summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authordcorbacho <dparracorbacho@piotal.io>2020-11-18 14:27:41 +0000
committerdcorbacho <dparracorbacho@piotal.io>2020-11-18 14:27:41 +0000
commitf23a51261d9502ec39df0f8db47ba6b22aa7659f (patch)
tree53dcdf46e7dc2c14e81ee960bce8793879b488d3 /deps
parentafa2c2bf6c7e0e9b63f4fb53dc931c70388e1c82 (diff)
parent9f6d64ec4a4b1eeac24d7846c5c64fd96798d892 (diff)
downloadrabbitmq-server-git-stream-timestamp-offset.tar.gz
Merge remote-tracking branch 'origin/master' into stream-timestamp-offsetstream-timestamp-offset
Diffstat (limited to 'deps')
-rw-r--r--deps/amqp10_client/.gitignore28
-rw-r--r--deps/amqp10_client/CODE_OF_CONDUCT.md44
-rw-r--r--deps/amqp10_client/CONTRIBUTING.md105
-rw-r--r--deps/amqp10_client/LICENSE4
-rw-r--r--deps/amqp10_client/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/amqp10_client/Makefile66
-rw-r--r--deps/amqp10_client/README.md159
-rw-r--r--deps/amqp10_client/elvis.config26
-rw-r--r--deps/amqp10_client/erlang.mk7808
-rw-r--r--deps/amqp10_client/rabbitmq-components.mk359
-rw-r--r--deps/amqp10_client/src/amqp10_client.erl560
-rw-r--r--deps/amqp10_client/src/amqp10_client.hrl24
-rw-r--r--deps/amqp10_client/src/amqp10_client_app.erl38
-rw-r--r--deps/amqp10_client/src/amqp10_client_connection.erl489
-rw-r--r--deps/amqp10_client/src/amqp10_client_connection_sup.erl42
-rw-r--r--deps/amqp10_client/src/amqp10_client_connections_sup.erl38
-rw-r--r--deps/amqp10_client/src/amqp10_client_frame_reader.erl338
-rw-r--r--deps/amqp10_client/src/amqp10_client_session.erl1121
-rw-r--r--deps/amqp10_client/src/amqp10_client_sessions_sup.erl36
-rw-r--r--deps/amqp10_client/src/amqp10_client_sup.erl34
-rw-r--r--deps/amqp10_client/src/amqp10_client_types.erl69
-rw-r--r--deps/amqp10_client/src/amqp10_msg.erl453
-rw-r--r--deps/amqp10_client/test/activemq_ct_helpers.erl119
-rw-r--r--deps/amqp10_client/test/mock_server.erl84
-rw-r--r--deps/amqp10_client/test/msg_SUITE.erl314
-rw-r--r--deps/amqp10_client/test/system_SUITE.erl740
-rw-r--r--deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml135
-rw-r--r--deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml135
-rw-r--r--deps/amqp10_common/.gitignore29
-rw-r--r--deps/amqp10_common/.travis.yml61
-rw-r--r--deps/amqp10_common/CODE_OF_CONDUCT.md44
-rw-r--r--deps/amqp10_common/CONTRIBUTING.md38
-rw-r--r--deps/amqp10_common/LICENSE5
-rw-r--r--deps/amqp10_common/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/amqp10_common/Makefile34
-rwxr-xr-xdeps/amqp10_common/codegen.py131
-rw-r--r--deps/amqp10_common/development.post.mk28
-rw-r--r--deps/amqp10_common/development.pre.mk12
-rw-r--r--deps/amqp10_common/erlang.mk7808
-rw-r--r--deps/amqp10_common/rabbitmq-components.mk359
-rw-r--r--deps/amqp10_common/src/amqp10_binary_generator.erl213
-rw-r--r--deps/amqp10_common/src/amqp10_binary_parser.erl170
-rw-r--r--deps/amqp10_common/src/amqp10_framing.erl217
-rw-r--r--deps/amqp10_common/test/binary_generator_SUITE.erl167
-rw-r--r--deps/amqp_client/.gitignore22
-rw-r--r--deps/amqp_client/.travis.yml61
-rw-r--r--deps/amqp_client/CODE_OF_CONDUCT.md44
-rw-r--r--deps/amqp_client/CONTRIBUTING.md38
-rw-r--r--deps/amqp_client/LICENSE4
-rw-r--r--deps/amqp_client/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/amqp_client/Makefile198
-rwxr-xr-xdeps/amqp_client/README.in10
-rw-r--r--deps/amqp_client/erlang.mk7746
-rw-r--r--deps/amqp_client/include/amqp_client.hrl47
-rw-r--r--deps/amqp_client/include/amqp_client_internal.hrl30
-rw-r--r--deps/amqp_client/include/amqp_gen_consumer_spec.hrl32
-rw-r--r--deps/amqp_client/include/rabbit_routing_prefixes.hrl15
-rw-r--r--deps/amqp_client/rabbitmq-components.mk359
-rw-r--r--deps/amqp_client/src/amqp_auth_mechanisms.erl44
-rw-r--r--deps/amqp_client/src/amqp_channel.erl1010
-rw-r--r--deps/amqp_client/src/amqp_channel_sup.erl73
-rw-r--r--deps/amqp_client/src/amqp_channel_sup_sup.erl36
-rw-r--r--deps/amqp_client/src/amqp_channels_manager.erl249
-rw-r--r--deps/amqp_client/src/amqp_client.erl37
-rw-r--r--deps/amqp_client/src/amqp_connection.erl395
-rw-r--r--deps/amqp_client/src/amqp_connection_sup.erl41
-rw-r--r--deps/amqp_client/src/amqp_connection_type_sup.erl91
-rw-r--r--deps/amqp_client/src/amqp_direct_connection.erl232
-rw-r--r--deps/amqp_client/src/amqp_direct_consumer.erl103
-rw-r--r--deps/amqp_client/src/amqp_gen_connection.erl387
-rw-r--r--deps/amqp_client/src/amqp_gen_consumer.erl284
-rw-r--r--deps/amqp_client/src/amqp_main_reader.erl179
-rw-r--r--deps/amqp_client/src/amqp_network_connection.erl380
-rw-r--r--deps/amqp_client/src/amqp_rpc_client.erl176
-rw-r--r--deps/amqp_client/src/amqp_rpc_server.erl138
-rw-r--r--deps/amqp_client/src/amqp_selective_consumer.erl265
-rw-r--r--deps/amqp_client/src/amqp_ssl.erl113
-rw-r--r--deps/amqp_client/src/amqp_sup.erl38
-rw-r--r--deps/amqp_client/src/amqp_uri.erl273
-rw-r--r--deps/amqp_client/src/amqp_util.erl17
-rw-r--r--deps/amqp_client/src/overview.edoc.in27
-rw-r--r--deps/amqp_client/src/rabbit_routing_util.erl222
-rw-r--r--deps/amqp_client/src/uri_parser.erl125
-rw-r--r--deps/amqp_client/test/system_SUITE.erl1485
-rw-r--r--deps/amqp_client/test/unit_SUITE.erl399
-rw-r--r--deps/rabbit/.gitignore42
-rw-r--r--deps/rabbit/.travis.yml61
-rw-r--r--deps/rabbit/.travis.yml.patch11
-rw-r--r--deps/rabbit/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbit/CONTRIBUTING.md123
-rw-r--r--deps/rabbit/INSTALL2
-rw-r--r--deps/rabbit/LICENSE5
-rw-r--r--deps/rabbit/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbit/Makefile303
-rw-r--r--deps/rabbit/README.md65
-rw-r--r--deps/rabbit/SECURITY.md24
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/.gitignore12
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/Makefile11
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl76
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl38
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl174
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl228
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl11
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl520
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl104
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl115
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl47
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl114
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl93
-rw-r--r--deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl22
-rw-r--r--deps/rabbit/docs/README-for-packages30
-rw-r--r--deps/rabbit/docs/README.md35
-rw-r--r--deps/rabbit/docs/advanced.config.example109
-rw-r--r--deps/rabbit/docs/rabbitmq-diagnostics.8725
-rw-r--r--deps/rabbit/docs/rabbitmq-echopid.870
-rw-r--r--deps/rabbit/docs/rabbitmq-env.conf.586
-rw-r--r--deps/rabbit/docs/rabbitmq-plugins.8254
-rw-r--r--deps/rabbit/docs/rabbitmq-queues.8202
-rw-r--r--deps/rabbit/docs/rabbitmq-server.898
-rw-r--r--deps/rabbit/docs/rabbitmq-server.service.example27
-rw-r--r--deps/rabbit/docs/rabbitmq-service.8152
-rw-r--r--deps/rabbit/docs/rabbitmq-upgrade.8108
-rw-r--r--deps/rabbit/docs/rabbitmq.conf.example1002
-rw-r--r--deps/rabbit/docs/rabbitmqctl.82424
-rw-r--r--deps/rabbit/docs/set_rabbitmq_policy.sh.example4
-rw-r--r--deps/rabbit/erlang.mk7808
-rw-r--r--deps/rabbit/include/amqqueue.hrl132
-rw-r--r--deps/rabbit/include/amqqueue_v1.hrl20
-rw-r--r--deps/rabbit/include/amqqueue_v2.hrl22
-rw-r--r--deps/rabbit/include/gm_specs.hrl15
-rw-r--r--deps/rabbit/include/vhost.hrl6
-rw-r--r--deps/rabbit/include/vhost_v1.hrl4
-rw-r--r--deps/rabbit/include/vhost_v2.hrl5
-rw-r--r--deps/rabbit/priv/schema/.gitignore4
-rw-r--r--deps/rabbit/priv/schema/rabbit.schema1791
-rw-r--r--deps/rabbit/rabbitmq-components.mk359
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-defaults18
-rw-r--r--deps/rabbit/scripts/rabbitmq-defaults.bat21
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-diagnostics23
-rw-r--r--deps/rabbit/scripts/rabbitmq-diagnostics.bat55
-rw-r--r--deps/rabbit/scripts/rabbitmq-echopid.bat57
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-env190
-rw-r--r--deps/rabbit/scripts/rabbitmq-env.bat173
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-plugins23
-rw-r--r--deps/rabbit/scripts/rabbitmq-plugins.bat56
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-queues23
-rw-r--r--deps/rabbit/scripts/rabbitmq-queues.bat56
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-rel58
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-server155
-rw-r--r--deps/rabbit/scripts/rabbitmq-server.bat91
-rw-r--r--deps/rabbit/scripts/rabbitmq-service.bat271
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-streams32
-rw-r--r--deps/rabbit/scripts/rabbitmq-streams.bat63
-rwxr-xr-xdeps/rabbit/scripts/rabbitmq-upgrade23
-rw-r--r--deps/rabbit/scripts/rabbitmq-upgrade.bat55
-rwxr-xr-xdeps/rabbit/scripts/rabbitmqctl23
-rw-r--r--deps/rabbit/scripts/rabbitmqctl.bat56
-rw-r--r--deps/rabbit/src/amqqueue.erl762
-rw-r--r--deps/rabbit/src/amqqueue_v1.erl584
-rw-r--r--deps/rabbit/src/background_gc.erl78
-rw-r--r--deps/rabbit/src/code_server_cache.erl81
-rw-r--r--deps/rabbit/src/gatherer.erl151
-rw-r--r--deps/rabbit/src/gm.erl1650
-rw-r--r--deps/rabbit/src/internal_user.erl216
-rw-r--r--deps/rabbit/src/internal_user_v1.erl151
-rw-r--r--deps/rabbit/src/lager_exchange_backend.erl233
-rw-r--r--deps/rabbit/src/lqueue.erl102
-rw-r--r--deps/rabbit/src/mirrored_supervisor_sups.erl34
-rw-r--r--deps/rabbit/src/pg_local.erl249
-rw-r--r--deps/rabbit/src/rabbit.erl1511
-rw-r--r--deps/rabbit/src/rabbit_access_control.erl257
-rw-r--r--deps/rabbit/src/rabbit_alarm.erl365
-rw-r--r--deps/rabbit/src/rabbit_amqqueue.erl1889
-rw-r--r--deps/rabbit/src/rabbit_amqqueue_process.erl1849
-rw-r--r--deps/rabbit/src/rabbit_amqqueue_sup.erl35
-rw-r--r--deps/rabbit/src/rabbit_amqqueue_sup_sup.erl84
-rw-r--r--deps/rabbit/src/rabbit_auth_backend_internal.erl1076
-rw-r--r--deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl54
-rw-r--r--deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl48
-rw-r--r--deps/rabbit/src/rabbit_auth_mechanism_plain.erl60
-rw-r--r--deps/rabbit/src/rabbit_autoheal.erl456
-rw-r--r--deps/rabbit/src/rabbit_backing_queue.erl264
-rw-r--r--deps/rabbit/src/rabbit_basic.erl354
-rw-r--r--deps/rabbit/src/rabbit_binding.erl691
-rw-r--r--deps/rabbit/src/rabbit_boot_steps.erl91
-rw-r--r--deps/rabbit/src/rabbit_channel.erl2797
-rw-r--r--deps/rabbit/src/rabbit_channel_interceptor.erl104
-rw-r--r--deps/rabbit/src/rabbit_channel_sup.erl92
-rw-r--r--deps/rabbit/src/rabbit_channel_sup_sup.erl42
-rw-r--r--deps/rabbit/src/rabbit_channel_tracking.erl291
-rw-r--r--deps/rabbit/src/rabbit_channel_tracking_handler.erl71
-rw-r--r--deps/rabbit/src/rabbit_classic_queue.erl527
-rw-r--r--deps/rabbit/src/rabbit_client_sup.erl43
-rw-r--r--deps/rabbit/src/rabbit_config.erl46
-rw-r--r--deps/rabbit/src/rabbit_confirms.erl152
-rw-r--r--deps/rabbit/src/rabbit_connection_helper_sup.erl57
-rw-r--r--deps/rabbit/src/rabbit_connection_sup.erl66
-rw-r--r--deps/rabbit/src/rabbit_connection_tracking.erl515
-rw-r--r--deps/rabbit/src/rabbit_connection_tracking_handler.erl80
-rw-r--r--deps/rabbit/src/rabbit_control_pbe.erl82
-rw-r--r--deps/rabbit/src/rabbit_core_ff.erl179
-rw-r--r--deps/rabbit/src/rabbit_core_metrics_gc.erl199
-rw-r--r--deps/rabbit/src/rabbit_credential_validation.erl44
-rw-r--r--deps/rabbit/src/rabbit_credential_validator.erl19
-rw-r--r--deps/rabbit/src/rabbit_credential_validator_accept_everything.erl23
-rw-r--r--deps/rabbit/src/rabbit_credential_validator_min_password_length.erl50
-rw-r--r--deps/rabbit/src/rabbit_credential_validator_password_regexp.erl42
-rw-r--r--deps/rabbit/src/rabbit_dead_letter.erl253
-rw-r--r--deps/rabbit/src/rabbit_definitions.erl767
-rw-r--r--deps/rabbit/src/rabbit_diagnostics.erl119
-rw-r--r--deps/rabbit/src/rabbit_direct.erl235
-rw-r--r--deps/rabbit/src/rabbit_disk_monitor.erl317
-rw-r--r--deps/rabbit/src/rabbit_epmd_monitor.erl104
-rw-r--r--deps/rabbit/src/rabbit_event_consumer.erl197
-rw-r--r--deps/rabbit/src/rabbit_exchange.erl592
-rw-r--r--deps/rabbit/src/rabbit_exchange_decorator.erl105
-rw-r--r--deps/rabbit/src/rabbit_exchange_parameters.erl39
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_direct.erl46
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_fanout.erl45
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_headers.erl136
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_invalid.erl45
-rw-r--r--deps/rabbit/src/rabbit_exchange_type_topic.erl266
-rw-r--r--deps/rabbit/src/rabbit_feature_flags.erl2470
-rw-r--r--deps/rabbit/src/rabbit_ff_extra.erl244
-rw-r--r--deps/rabbit/src/rabbit_ff_registry.erl189
-rw-r--r--deps/rabbit/src/rabbit_fhc_helpers.erl45
-rw-r--r--deps/rabbit/src/rabbit_fifo.erl2124
-rw-r--r--deps/rabbit/src/rabbit_fifo.hrl210
-rw-r--r--deps/rabbit/src/rabbit_fifo_client.erl888
-rw-r--r--deps/rabbit/src/rabbit_fifo_index.erl119
-rw-r--r--deps/rabbit/src/rabbit_fifo_v0.erl1961
-rw-r--r--deps/rabbit/src/rabbit_fifo_v0.hrl195
-rw-r--r--deps/rabbit/src/rabbit_file.erl321
-rw-r--r--deps/rabbit/src/rabbit_framing.erl36
-rw-r--r--deps/rabbit/src/rabbit_guid.erl181
-rw-r--r--deps/rabbit/src/rabbit_health_check.erl80
-rw-r--r--deps/rabbit/src/rabbit_lager.erl723
-rw-r--r--deps/rabbit/src/rabbit_limiter.erl448
-rw-r--r--deps/rabbit/src/rabbit_log_tail.erl102
-rw-r--r--deps/rabbit/src/rabbit_looking_glass.erl48
-rw-r--r--deps/rabbit/src/rabbit_maintenance.erl354
-rw-r--r--deps/rabbit/src/rabbit_memory_monitor.erl259
-rw-r--r--deps/rabbit/src/rabbit_metrics.erl45
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_coordinator.erl460
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_master.erl578
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_misc.erl680
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode.erl42
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode_all.erl32
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl45
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl69
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_slave.erl1093
-rw-r--r--deps/rabbit/src/rabbit_mirror_queue_sync.erl420
-rw-r--r--deps/rabbit/src/rabbit_mnesia.erl1117
-rw-r--r--deps/rabbit/src/rabbit_mnesia_rename.erl276
-rw-r--r--deps/rabbit/src/rabbit_msg_file.erl114
-rw-r--r--deps/rabbit/src/rabbit_msg_record.erl400
-rw-r--r--deps/rabbit/src/rabbit_msg_store.erl2245
-rw-r--r--deps/rabbit/src/rabbit_msg_store_ets_index.erl76
-rw-r--r--deps/rabbit/src/rabbit_msg_store_gc.erl125
-rw-r--r--deps/rabbit/src/rabbit_networking.erl663
-rw-r--r--deps/rabbit/src/rabbit_node_monitor.erl926
-rw-r--r--deps/rabbit/src/rabbit_nodes.erl157
-rw-r--r--deps/rabbit/src/rabbit_osiris_metrics.erl103
-rw-r--r--deps/rabbit/src/rabbit_parameter_validation.erl88
-rw-r--r--deps/rabbit/src/rabbit_password.erl52
-rw-r--r--deps/rabbit/src/rabbit_password_hashing_md5.erl19
-rw-r--r--deps/rabbit/src/rabbit_password_hashing_sha256.erl15
-rw-r--r--deps/rabbit/src/rabbit_password_hashing_sha512.erl15
-rw-r--r--deps/rabbit/src/rabbit_peer_discovery.erl326
-rw-r--r--deps/rabbit/src/rabbit_peer_discovery_classic_config.erl75
-rw-r--r--deps/rabbit/src/rabbit_peer_discovery_dns.erl113
-rw-r--r--deps/rabbit/src/rabbit_plugins.erl699
-rw-r--r--deps/rabbit/src/rabbit_policies.erl179
-rw-r--r--deps/rabbit/src/rabbit_policy.erl557
-rw-r--r--deps/rabbit/src/rabbit_policy_merge_strategy.erl19
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_cluster.erl22
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl53
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_feature_flags.erl32
-rw-r--r--deps/rabbit/src/rabbit_prelaunch_logging.erl75
-rw-r--r--deps/rabbit/src/rabbit_prequeue.erl100
-rw-r--r--deps/rabbit/src/rabbit_priority_queue.erl688
-rw-r--r--deps/rabbit/src/rabbit_queue_consumers.erl568
-rw-r--r--deps/rabbit/src/rabbit_queue_decorator.erl72
-rw-r--r--deps/rabbit/src/rabbit_queue_index.erl1521
-rw-r--r--deps/rabbit/src/rabbit_queue_location_client_local.erl39
-rw-r--r--deps/rabbit/src/rabbit_queue_location_min_masters.erl70
-rw-r--r--deps/rabbit/src/rabbit_queue_location_random.erl42
-rw-r--r--deps/rabbit/src/rabbit_queue_location_validator.erl67
-rw-r--r--deps/rabbit/src/rabbit_queue_master_location_misc.erl108
-rw-r--r--deps/rabbit/src/rabbit_queue_master_locator.erl19
-rw-r--r--deps/rabbit/src/rabbit_queue_type.erl581
-rw-r--r--deps/rabbit/src/rabbit_queue_type_util.erl74
-rw-r--r--deps/rabbit/src/rabbit_quorum_memory_manager.erl67
-rw-r--r--deps/rabbit/src/rabbit_quorum_queue.erl1523
-rw-r--r--deps/rabbit/src/rabbit_ra_registry.erl25
-rw-r--r--deps/rabbit/src/rabbit_reader.erl1803
-rw-r--r--deps/rabbit/src/rabbit_recovery_terms.erl240
-rw-r--r--deps/rabbit/src/rabbit_restartable_sup.erl33
-rw-r--r--deps/rabbit/src/rabbit_router.erl65
-rw-r--r--deps/rabbit/src/rabbit_runtime_parameters.erl412
-rw-r--r--deps/rabbit/src/rabbit_ssl.erl195
-rw-r--r--deps/rabbit/src/rabbit_stream_coordinator.erl949
-rw-r--r--deps/rabbit/src/rabbit_stream_queue.erl734
-rw-r--r--deps/rabbit/src/rabbit_sup.erl109
-rw-r--r--deps/rabbit/src/rabbit_sysmon_handler.erl235
-rw-r--r--deps/rabbit/src/rabbit_sysmon_minder.erl156
-rw-r--r--deps/rabbit/src/rabbit_table.erl416
-rw-r--r--deps/rabbit/src/rabbit_trace.erl128
-rw-r--r--deps/rabbit/src/rabbit_tracking.erl103
-rw-r--r--deps/rabbit/src/rabbit_upgrade.erl314
-rw-r--r--deps/rabbit/src/rabbit_upgrade_functions.erl662
-rw-r--r--deps/rabbit/src/rabbit_upgrade_preparation.erl51
-rw-r--r--deps/rabbit/src/rabbit_variable_queue.erl3015
-rw-r--r--deps/rabbit/src/rabbit_version.erl227
-rw-r--r--deps/rabbit/src/rabbit_vhost.erl422
-rw-r--r--deps/rabbit/src/rabbit_vhost_limit.erl205
-rw-r--r--deps/rabbit/src/rabbit_vhost_msg_store.erl68
-rw-r--r--deps/rabbit/src/rabbit_vhost_process.erl96
-rw-r--r--deps/rabbit/src/rabbit_vhost_sup.erl22
-rw-r--r--deps/rabbit/src/rabbit_vhost_sup_sup.erl271
-rw-r--r--deps/rabbit/src/rabbit_vhost_sup_wrapper.erl57
-rw-r--r--deps/rabbit/src/rabbit_vm.erl427
-rw-r--r--deps/rabbit/src/supervised_lifecycle.erl53
-rw-r--r--deps/rabbit/src/tcp_listener.erl90
-rw-r--r--deps/rabbit/src/tcp_listener_sup.erl54
-rw-r--r--deps/rabbit/src/term_to_binary_compat.erl15
-rw-r--r--deps/rabbit/src/vhost.erl172
-rw-r--r--deps/rabbit/src/vhost_v1.erl106
-rw-r--r--deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl302
-rw-r--r--deps/rabbit/test/backing_queue_SUITE.erl1632
-rw-r--r--deps/rabbit/test/channel_interceptor_SUITE.erl108
-rw-r--r--deps/rabbit/test/channel_operation_timeout_SUITE.erl198
-rw-r--r--deps/rabbit/test/channel_operation_timeout_test_queue.erl323
-rw-r--r--deps/rabbit/test/cluster_SUITE.erl307
-rw-r--r--deps/rabbit/test/cluster_rename_SUITE.erl301
-rw-r--r--deps/rabbit/test/clustering_management_SUITE.erl861
-rw-r--r--deps/rabbit/test/config_schema_SUITE.erl54
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets797
-rw-r--r--deps/rabbit/test/confirms_rejects_SUITE.erl412
-rw-r--r--deps/rabbit/test/consumer_timeout_SUITE.erl262
-rw-r--r--deps/rabbit/test/crashing_queues_SUITE.erl267
-rw-r--r--deps/rabbit/test/dead_lettering_SUITE.erl1174
-rw-r--r--deps/rabbit/test/definition_import_SUITE.erl257
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case1.json99
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json67
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json595
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case11.json24
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case13.json55
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case2.json49
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case3.json1
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case4.json49
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case5.json63
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case6.json47
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case7.json398
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case8.json17
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json1
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json1
-rw-r--r--deps/rabbit/test/definition_import_SUITE_data/failing_case12.json24
-rw-r--r--deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl111
-rw-r--r--deps/rabbit/test/dummy_event_receiver.erl49
-rw-r--r--deps/rabbit/test/dummy_interceptor.erl26
-rw-r--r--deps/rabbit/test/dummy_runtime_parameters.erl63
-rw-r--r--deps/rabbit/test/dummy_supervisor2.erl32
-rw-r--r--deps/rabbit/test/dynamic_ha_SUITE.erl1034
-rw-r--r--deps/rabbit/test/dynamic_qq_SUITE.erl248
-rw-r--r--deps/rabbit/test/eager_sync_SUITE.erl271
-rw-r--r--deps/rabbit/test/failing_dummy_interceptor.erl27
-rw-r--r--deps/rabbit/test/feature_flags_SUITE.erl1156
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore7
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile15
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk1
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk1
-rw-r--r--deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl10
-rw-r--r--deps/rabbit/test/honeycomb_cth.erl105
-rw-r--r--deps/rabbit/test/lazy_queue_SUITE.erl215
-rw-r--r--deps/rabbit/test/list_consumers_sanity_check_SUITE.erl125
-rw-r--r--deps/rabbit/test/list_queues_online_and_offline_SUITE.erl99
-rw-r--r--deps/rabbit/test/maintenance_mode_SUITE.erl284
-rw-r--r--deps/rabbit/test/many_node_ha_SUITE.erl112
-rw-r--r--deps/rabbit/test/message_size_limit_SUITE.erl145
-rw-r--r--deps/rabbit/test/metrics_SUITE.erl404
-rw-r--r--deps/rabbit/test/mirrored_supervisor_SUITE.erl328
-rw-r--r--deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl57
-rw-r--r--deps/rabbit/test/msg_store_SUITE.erl53
-rw-r--r--deps/rabbit/test/peer_discovery_classic_config_SUITE.erl179
-rw-r--r--deps/rabbit/test/peer_discovery_dns_SUITE.erl104
-rw-r--r--deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl1651
-rw-r--r--deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl182
-rw-r--r--deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl850
-rw-r--r--deps/rabbit/test/per_user_connection_tracking_SUITE.erl269
-rw-r--r--deps/rabbit/test/per_vhost_connection_limit_SUITE.erl751
-rw-r--r--deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl150
-rw-r--r--deps/rabbit/test/per_vhost_msg_store_SUITE.erl245
-rw-r--r--deps/rabbit/test/per_vhost_queue_limit_SUITE.erl682
-rw-r--r--deps/rabbit/test/policy_SUITE.erl204
-rw-r--r--deps/rabbit/test/priority_queue_SUITE.erl771
-rw-r--r--deps/rabbit/test/priority_queue_recovery_SUITE.erl144
-rw-r--r--deps/rabbit/test/product_info_SUITE.erl171
-rw-r--r--deps/rabbit/test/proxy_protocol_SUITE.erl91
-rw-r--r--deps/rabbit/test/publisher_confirms_parallel_SUITE.erl380
-rw-r--r--deps/rabbit/test/queue_length_limits_SUITE.erl382
-rw-r--r--deps/rabbit/test/queue_master_location_SUITE.erl487
-rw-r--r--deps/rabbit/test/queue_parallel_SUITE.erl725
-rw-r--r--deps/rabbit/test/queue_type_SUITE.erl275
-rw-r--r--deps/rabbit/test/quorum_queue_SUITE.erl2792
-rw-r--r--deps/rabbit/test/quorum_queue_utils.erl112
-rw-r--r--deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl46
-rw-r--r--deps/rabbit/test/rabbit_confirms_SUITE.erl154
-rw-r--r--deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl392
-rw-r--r--deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl19
-rw-r--r--deps/rabbit/test/rabbit_fifo_SUITE.erl1667
-rw-r--r--deps/rabbit/test/rabbit_fifo_int_SUITE.erl661
-rw-r--r--deps/rabbit/test/rabbit_fifo_prop_SUITE.erl1211
-rw-r--r--deps/rabbit/test/rabbit_fifo_v0_SUITE.erl1392
-rw-r--r--deps/rabbit/test/rabbit_foo_protocol_connection_info.erl25
-rw-r--r--deps/rabbit/test/rabbit_ha_test_consumer.erl108
-rw-r--r--deps/rabbit/test/rabbit_ha_test_producer.erl131
-rw-r--r--deps/rabbit/test/rabbit_msg_record_SUITE.erl213
-rw-r--r--deps/rabbit/test/rabbit_stream_queue_SUITE.erl1610
-rw-r--r--deps/rabbit/test/rabbitmq-env.bats128
-rw-r--r--deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl139
-rw-r--r--deps/rabbit/test/rabbitmqctl_integration_SUITE.erl164
-rw-r--r--deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl110
-rw-r--r--deps/rabbit/test/signal_handling_SUITE.erl160
-rw-r--r--deps/rabbit/test/simple_ha_SUITE.erl371
-rw-r--r--deps/rabbit/test/single_active_consumer_SUITE.erl376
-rw-r--r--deps/rabbit/test/sync_detection_SUITE.erl243
-rwxr-xr-xdeps/rabbit/test/temp/head_message_timestamp_tests.py131
-rwxr-xr-xdeps/rabbit/test/temp/rabbitmqadmin.py934
-rw-r--r--deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl105
-rw-r--r--deps/rabbit/test/test_util.erl28
-rw-r--r--deps/rabbit/test/topic_permission_SUITE.erl244
-rw-r--r--deps/rabbit/test/unit_access_control_SUITE.erl445
-rw-r--r--deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl127
-rw-r--r--deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl269
-rw-r--r--deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl231
-rw-r--r--deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl144
-rw-r--r--deps/rabbit/test/unit_app_management_SUITE.erl105
-rw-r--r--deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl71
-rw-r--r--deps/rabbit/test/unit_collections_SUITE.erl51
-rw-r--r--deps/rabbit/test/unit_config_value_encryption_SUITE.erl233
-rw-r--r--deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app46
-rw-r--r--deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase1
-rw-r--r--deps/rabbit/test/unit_connection_tracking_SUITE.erl119
-rw-r--r--deps/rabbit/test/unit_credit_flow_SUITE.erl90
-rw-r--r--deps/rabbit/test/unit_disk_monitor_SUITE.erl90
-rw-r--r--deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl112
-rw-r--r--deps/rabbit/test/unit_file_handle_cache_SUITE.erl278
-rw-r--r--deps/rabbit/test/unit_gen_server2_SUITE.erl152
-rw-r--r--deps/rabbit/test/unit_gm_SUITE.erl242
-rw-r--r--deps/rabbit/test/unit_log_config_SUITE.erl837
-rw-r--r--deps/rabbit/test/unit_log_management_SUITE.erl413
-rw-r--r--deps/rabbit/test/unit_operator_policy_SUITE.erl107
-rw-r--r--deps/rabbit/test/unit_pg_local_SUITE.erl103
-rw-r--r--deps/rabbit/test/unit_plugin_directories_SUITE.erl76
-rw-r--r--deps/rabbit/test/unit_plugin_versioning_SUITE.erl170
-rw-r--r--deps/rabbit/test/unit_policy_validators_SUITE.erl207
-rw-r--r--deps/rabbit/test/unit_priority_queue_SUITE.erl184
-rw-r--r--deps/rabbit/test/unit_queue_consumers_SUITE.erl121
-rw-r--r--deps/rabbit/test/unit_stats_and_metrics_SUITE.erl266
-rw-r--r--deps/rabbit/test/unit_supervisor2_SUITE.erl69
-rw-r--r--deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl95
-rw-r--r--deps/rabbit/test/upgrade_preparation_SUITE.erl109
-rw-r--r--deps/rabbit/test/vhost_SUITE.erl381
-rw-r--r--deps/rabbit_common/.gitignore31
-rw-r--r--deps/rabbit_common/.travis.yml61
-rw-r--r--deps/rabbit_common/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbit_common/CONTRIBUTING.md38
-rw-r--r--deps/rabbit_common/LICENSE11
-rw-r--r--deps/rabbit_common/LICENSE-BSD-recon27
-rw-r--r--deps/rabbit_common/LICENSE-MIT-Erlware-Commons21
-rw-r--r--deps/rabbit_common/LICENSE-MIT-Mochi9
-rw-r--r--deps/rabbit_common/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbit_common/Makefile53
-rw-r--r--deps/rabbit_common/README.md4
-rwxr-xr-xdeps/rabbit_common/codegen.py582
-rw-r--r--deps/rabbit_common/development.post.mk33
-rw-r--r--deps/rabbit_common/development.pre.mk14
-rw-r--r--deps/rabbit_common/erlang.mk7746
-rw-r--r--deps/rabbit_common/include/rabbit.hrl267
-rw-r--r--deps/rabbit_common/include/rabbit_core_metrics.hrl52
-rw-r--r--deps/rabbit_common/include/rabbit_log.hrl8
-rw-r--r--deps/rabbit_common/include/rabbit_memory.hrl16
-rw-r--r--deps/rabbit_common/include/rabbit_misc.hrl9
-rw-r--r--deps/rabbit_common/include/rabbit_msg_store.hrl12
-rw-r--r--deps/rabbit_common/include/resource.hrl14
-rw-r--r--deps/rabbit_common/mk/rabbitmq-build.mk42
-rw-r--r--deps/rabbit_common/mk/rabbitmq-components.hexpm.mk36
-rw-r--r--deps/rabbit_common/mk/rabbitmq-components.mk359
-rw-r--r--deps/rabbit_common/mk/rabbitmq-dist.mk365
-rw-r--r--deps/rabbit_common/mk/rabbitmq-early-plugin.mk3
-rw-r--r--deps/rabbit_common/mk/rabbitmq-early-test.mk130
-rw-r--r--deps/rabbit_common/mk/rabbitmq-hexpm.mk67
-rw-r--r--deps/rabbit_common/mk/rabbitmq-macros.mk22
-rw-r--r--deps/rabbit_common/mk/rabbitmq-plugin.mk23
-rw-r--r--deps/rabbit_common/mk/rabbitmq-run.mk428
-rw-r--r--deps/rabbit_common/mk/rabbitmq-test.mk80
-rw-r--r--deps/rabbit_common/mk/rabbitmq-tools.mk429
-rwxr-xr-xdeps/rabbit_common/mk/xrefr338
-rw-r--r--deps/rabbit_common/src/app_utils.erl167
-rw-r--r--deps/rabbit_common/src/code_version.erl348
-rw-r--r--deps/rabbit_common/src/credit_flow.erl210
-rw-r--r--deps/rabbit_common/src/delegate.erl277
-rw-r--r--deps/rabbit_common/src/delegate_sup.erl55
-rw-r--r--deps/rabbit_common/src/file_handle_cache.erl1564
-rw-r--r--deps/rabbit_common/src/file_handle_cache_stats.erl57
-rw-r--r--deps/rabbit_common/src/gen_server2.erl1419
-rw-r--r--deps/rabbit_common/src/lager_forwarder_backend.erl120
-rw-r--r--deps/rabbit_common/src/mirrored_supervisor.erl513
-rw-r--r--deps/rabbit_common/src/mnesia_sync.erl64
-rw-r--r--deps/rabbit_common/src/pmon.erl96
-rw-r--r--deps/rabbit_common/src/priority_queue.erl234
-rw-r--r--deps/rabbit_common/src/rabbit_amqp_connection.erl34
-rw-r--r--deps/rabbit_common/src/rabbit_amqqueue_common.erl39
-rw-r--r--deps/rabbit_common/src/rabbit_auth_backend_dummy.erl39
-rw-r--r--deps/rabbit_common/src/rabbit_auth_mechanism.erl41
-rw-r--r--deps/rabbit_common/src/rabbit_authn_backend.erl27
-rw-r--r--deps/rabbit_common/src/rabbit_authz_backend.erl88
-rw-r--r--deps/rabbit_common/src/rabbit_basic_common.erl41
-rw-r--r--deps/rabbit_common/src/rabbit_binary_generator.erl235
-rw-r--r--deps/rabbit_common/src/rabbit_binary_parser.erl172
-rw-r--r--deps/rabbit_common/src/rabbit_cert_info.erl270
-rw-r--r--deps/rabbit_common/src/rabbit_channel_common.erl25
-rw-r--r--deps/rabbit_common/src/rabbit_command_assembler.erl124
-rw-r--r--deps/rabbit_common/src/rabbit_control_misc.erl179
-rw-r--r--deps/rabbit_common/src/rabbit_core_metrics.erl437
-rw-r--r--deps/rabbit_common/src/rabbit_data_coercion.erl47
-rw-r--r--deps/rabbit_common/src/rabbit_env.erl1850
-rw-r--r--deps/rabbit_common/src/rabbit_error_logger_handler.erl169
-rw-r--r--deps/rabbit_common/src/rabbit_event.erl164
-rw-r--r--deps/rabbit_common/src/rabbit_exchange_type.erl68
-rw-r--r--deps/rabbit_common/src/rabbit_heartbeat.erl184
-rw-r--r--deps/rabbit_common/src/rabbit_http_util.erl967
-rw-r--r--deps/rabbit_common/src/rabbit_json.erl63
-rw-r--r--deps/rabbit_common/src/rabbit_log.erl164
-rw-r--r--deps/rabbit_common/src/rabbit_log_osiris_shim.erl26
-rw-r--r--deps/rabbit_common/src/rabbit_log_ra_shim.erl16
-rw-r--r--deps/rabbit_common/src/rabbit_misc.erl1434
-rw-r--r--deps/rabbit_common/src/rabbit_msg_store_index.erl89
-rw-r--r--deps/rabbit_common/src/rabbit_net.erl321
-rw-r--r--deps/rabbit_common/src/rabbit_nodes_common.erl227
-rw-r--r--deps/rabbit_common/src/rabbit_numerical.erl358
-rw-r--r--deps/rabbit_common/src/rabbit_password_hashing.erl11
-rw-r--r--deps/rabbit_common/src/rabbit_pbe.erl54
-rw-r--r--deps/rabbit_common/src/rabbit_peer_discovery_backend.erl59
-rw-r--r--deps/rabbit_common/src/rabbit_policy_validator.erl22
-rw-r--r--deps/rabbit_common/src/rabbit_queue_collector.erl80
-rw-r--r--deps/rabbit_common/src/rabbit_registry.erl165
-rw-r--r--deps/rabbit_common/src/rabbit_registry_class.erl12
-rw-r--r--deps/rabbit_common/src/rabbit_resource_monitor_misc.erl39
-rw-r--r--deps/rabbit_common/src/rabbit_runtime.erl66
-rw-r--r--deps/rabbit_common/src/rabbit_runtime_parameter.erl25
-rw-r--r--deps/rabbit_common/src/rabbit_semver.erl730
-rw-r--r--deps/rabbit_common/src/rabbit_semver_parser.erl306
-rw-r--r--deps/rabbit_common/src/rabbit_ssl_options.erl86
-rw-r--r--deps/rabbit_common/src/rabbit_types.erl196
-rw-r--r--deps/rabbit_common/src/rabbit_writer.erl437
-rw-r--r--deps/rabbit_common/src/supervisor2.erl1651
-rw-r--r--deps/rabbit_common/src/vm_memory_monitor.erl576
-rw-r--r--deps/rabbit_common/src/worker_pool.erl172
-rw-r--r--deps/rabbit_common/src/worker_pool_sup.erl69
-rw-r--r--deps/rabbit_common/src/worker_pool_worker.erl192
-rw-r--r--deps/rabbit_common/test/gen_server2_test_server.erl72
-rw-r--r--deps/rabbit_common/test/rabbit_env_SUITE.erl1098
-rw-r--r--deps/rabbit_common/test/supervisor2_SUITE.erl128
-rw-r--r--deps/rabbit_common/test/unit_SUITE.erl446
-rw-r--r--deps/rabbit_common/test/unit_priority_queue_SUITE.erl35
-rw-r--r--deps/rabbit_common/test/worker_pool_SUITE.erl220
-rw-r--r--deps/rabbitmq_amqp1_0/.gitignore35
-rw-r--r--deps/rabbitmq_amqp1_0/.travis.yml66
-rw-r--r--deps/rabbitmq_amqp1_0/.travis.yml.patch16
-rw-r--r--deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_amqp1_0/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_amqp1_0/LICENSE4
-rw-r--r--deps/rabbitmq_amqp1_0/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_amqp1_0/Makefile52
-rw-r--r--deps/rabbitmq_amqp1_0/README.md224
-rw-r--r--deps/rabbitmq_amqp1_0/erlang.mk7808
-rw-r--r--deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl52
-rw-r--r--deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema31
-rw-r--r--deps/rabbitmq_amqp1_0/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl79
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl40
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl61
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl228
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl67
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl261
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl240
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl809
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl398
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl419
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl62
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl38
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl72
-rw-r--r--deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl292
-rw-r--r--deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl203
-rw-r--r--deps/rabbitmq_amqp1_0/test/command_SUITE.erl69
-rw-r--r--deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl139
-rw-r--r--deps/rabbitmq_amqp1_0/test/system_SUITE.erl243
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs481
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj14
-rw-r--r--deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.gitignore1
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java110
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jarbin0 -> 48337 bytes
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties1
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw286
-rwxr-xr-xdeps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd161
-rw-r--r--deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml58
-rw-r--r--deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java84
-rw-r--r--deps/rabbitmq_amqp1_0/test/unit_SUITE.erl39
-rw-r--r--deps/rabbitmq_auth_backend_cache/.gitignore19
-rw-r--r--deps/rabbitmq_auth_backend_cache/.travis.yml61
-rw-r--r--deps/rabbitmq_auth_backend_cache/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_auth_backend_cache/CONTRIBUTING.md103
-rw-r--r--deps/rabbitmq_auth_backend_cache/LICENSE4
-rw-r--r--deps/rabbitmq_auth_backend_cache/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_auth_backend_cache/Makefile32
-rw-r--r--deps/rabbitmq_auth_backend_cache/README.md225
-rw-r--r--deps/rabbitmq_auth_backend_cache/erlang.mk7808
-rw-r--r--deps/rabbitmq_auth_backend_cache/priv/schema/rabbitmq_auth_backend_cache.schema62
-rw-r--r--deps/rabbitmq_auth_backend_cache/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl107
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl37
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl35
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl61
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl71
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl116
-rw-r--r--deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl129
-rw-r--r--deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE_data/rabbitmq_auth_backend_cache.snippets55
-rw-r--r--deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl169
-rw-r--r--deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl200
-rw-r--r--deps/rabbitmq_auth_backend_http/.gitignore22
-rw-r--r--deps/rabbitmq_auth_backend_http/.travis.yml61
-rw-r--r--deps/rabbitmq_auth_backend_http/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_auth_backend_http/CONTRIBUTING.md103
-rw-r--r--deps/rabbitmq_auth_backend_http/LICENSE4
-rw-r--r--deps/rabbitmq_auth_backend_http/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_auth_backend_http/Makefile33
-rw-r--r--deps/rabbitmq_auth_backend_http/README.md170
-rw-r--r--deps/rabbitmq_auth_backend_http/TODO1
-rw-r--r--deps/rabbitmq_auth_backend_http/erlang.mk7808
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/README.md235
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker-compose.yml12
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/enabled_plugins1
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/nodered/Dockerfile5
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/nodered/docker-compose.yml8
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows.json1
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows_cred.json1
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/nodered/settings.js260
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/docker/rabbitmq.conf22
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/.gitignore1
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/Dockerfile15
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/docker-compose.yml6
-rwxr-xr-xdeps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py22
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/__init__.py0
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/__init__.py0
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/models.py3
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/tests.py23
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/views.py41
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/settings.py155
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/urls.py26
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/wsgi.py16
-rwxr-xr-xdeps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/start.sh3
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/.gitignore3
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/composer.json19
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/bootstrap.php136
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/resource.php14
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/topic.php14
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/user.php14
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/vhost.php14
-rwxr-xr-xdeps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/var/log.log0
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.gitignore3
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java117
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jarbin0 -> 50710 bytes
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties2
-rwxr-xr-xdeps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw310
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd182
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml44
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java68
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java40
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java68
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java47
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java29
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java62
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java15
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java66
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/resources/logback-test.xml16
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.gitignore25
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jarbin0 -> 48337 bytes
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties1
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/README.md34
-rwxr-xr-xdeps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw286
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd161
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml118
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt73
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/Configuration.kt20
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt18
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/application.properties2
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/rabbitmq.conf11
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt127
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/App_Start/WebApiConfig.cs12
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Controllers/AuthController.cs143
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax1
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax.cs12
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Properties/AssemblyInfo.cs35
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Debug.config30
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Release.config31
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.config45
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj136
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj.user43
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.sln25
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/packages.config8
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/AuthResult.cs22
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Controllers/AuthController.cs110
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Program.cs17
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Properties/launchSettings.json30
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj18
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.sln37
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/Resource.cs11
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs24
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs27
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs9
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs12
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Startup.cs38
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.Development.json9
-rw-r--r--deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.json8
-rw-r--r--deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema21
-rw-r--r--deps/rabbitmq_auth_backend_http/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl188
-rw-r--r--deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl25
-rw-r--r--deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl62
-rw-r--r--deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl26
-rw-r--r--deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets26
-rw-r--r--deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl54
-rw-r--r--deps/rabbitmq_auth_backend_ldap/.gitignore20
-rw-r--r--deps/rabbitmq_auth_backend_ldap/.travis.yml63
-rw-r--r--deps/rabbitmq_auth_backend_ldap/.travis.yml.patch11
-rw-r--r--deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_auth_backend_ldap/CONTRIBUTING.md103
-rw-r--r--deps/rabbitmq_auth_backend_ldap/LICENSE4
-rw-r--r--deps/rabbitmq_auth_backend_ldap/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_auth_backend_ldap/Makefile52
-rw-r--r--deps/rabbitmq_auth_backend_ldap/README-authorisation.md1
-rw-r--r--deps/rabbitmq_auth_backend_ldap/README.md37
-rw-r--r--deps/rabbitmq_auth_backend_ldap/TESTING.md25
-rw-r--r--deps/rabbitmq_auth_backend_ldap/erlang.mk7808
-rw-r--r--deps/rabbitmq_auth_backend_ldap/example/README.md2
-rw-r--r--deps/rabbitmq_auth_backend_ldap/example/global.ldif27
-rw-r--r--deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif17
-rw-r--r--deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif3
-rw-r--r--deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif7
-rw-r--r--deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema339
-rw-r--r--deps/rabbitmq_auth_backend_ldap/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl928
-rw-r--r--deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl55
-rw-r--r--deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl34
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets276
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl201
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl949
-rwxr-xr-xdeps/rabbitmq_auth_backend_ldap/test/system_SUITE_data/init-slapd.sh98
-rw-r--r--deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl47
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/.gitignore17
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/.travis.yml59
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/CONTRIBUTING.md103
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/LICENSE4
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/Makefile20
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/README.md280
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/declare_queues.rb19
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/Makefile101
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/check_token.rb29
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/enabled_plugins1
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/rabbitmq.config30
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/log4j2.properties28
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/uaa.yml104
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/rabbitmq.config23
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/uaa.yml41
-rwxr-xr-xdeps/rabbitmq_auth_backend_oauth2/demo/setup.sh81
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/rabbitmq.config31
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/uaa.yml4
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/erlang.mk7808
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/rabbitmq-components.mk359
-rwxr-xr-xdeps/rabbitmq_auth_backend_oauth2/scripts/seed.sh43
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl143
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl318
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl26
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl90
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl122
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl84
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl48
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl58
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl75
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl99
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl341
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl373
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl551
-rw-r--r--deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl105
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/.gitignore17
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/.travis.yml61
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/CONTRIBUTING.md103
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/LICENSE4
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/Makefile27
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/README.md86
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/erlang.mk7808
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl68
-rw-r--r--deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl26
-rw-r--r--deps/rabbitmq_aws/.editorconfig15
-rw-r--r--deps/rabbitmq_aws/.gitignore30
-rw-r--r--deps/rabbitmq_aws/.travis.yml59
-rw-r--r--deps/rabbitmq_aws/LICENSE6
-rw-r--r--deps/rabbitmq_aws/LICENSE-erlcloud22
-rw-r--r--deps/rabbitmq_aws/LICENSE-httpc_aws29
-rw-r--r--deps/rabbitmq_aws/LICENSE-rabbitmq_aws29
-rw-r--r--deps/rabbitmq_aws/Makefile22
-rw-r--r--deps/rabbitmq_aws/README.md104
-rw-r--r--deps/rabbitmq_aws/erlang.mk7808
-rw-r--r--deps/rabbitmq_aws/include/rabbitmq_aws.hrl115
-rw-r--r--deps/rabbitmq_aws/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_aws/rabbitmq_aws.iml25
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws.erl472
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_app.erl22
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_config.erl694
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_json.erl62
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_sign.erl289
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_sup.erl20
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_urilib.erl122
-rw-r--r--deps/rabbitmq_aws/src/rabbitmq_aws_xml.erl46
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_all_tests.erl18
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_app_tests.erl24
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_config_tests.erl344
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_json_tests.erl63
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_sign_tests.erl291
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_sup_tests.erl27
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_tests.erl479
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_urilib_tests.erl154
-rw-r--r--deps/rabbitmq_aws/test/src/rabbitmq_aws_xml_tests.erl40
-rw-r--r--deps/rabbitmq_aws/test/src/test_aws_config.ini25
-rw-r--r--deps/rabbitmq_aws/test/src/test_aws_credentials.ini16
-rw-r--r--deps/rabbitmq_cli/.gitignore12
-rw-r--r--deps/rabbitmq_cli/.travis.yml57
-rw-r--r--deps/rabbitmq_cli/.travis.yml.patch62
-rw-r--r--deps/rabbitmq_cli/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_cli/COMMAND_TUTORIAL.md459
-rw-r--r--deps/rabbitmq_cli/CONTRIBUTING.md143
-rw-r--r--deps/rabbitmq_cli/DESIGN.md483
-rw-r--r--deps/rabbitmq_cli/LICENSE4
-rw-r--r--deps/rabbitmq_cli/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_cli/Makefile160
-rw-r--r--deps/rabbitmq_cli/README.md129
-rw-r--r--deps/rabbitmq_cli/config/config.exs37
-rw-r--r--deps/rabbitmq_cli/erlang.mk7296
-rw-r--r--deps/rabbitmq_cli/include/.gitkeep0
-rw-r--r--deps/rabbitmq_cli/lib/rabbit_common/records.ex19
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex118
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex170
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex16
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex21
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex25
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex34
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex25
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex88
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex35
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex108
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex196
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex200
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex21
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex138
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex67
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex26
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex56
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex19
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex148
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex39
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex312
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex52
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex105
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex15
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex15
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex73
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex198
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex56
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex72
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex311
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex55
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex37
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex17
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex17
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex115
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex24
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex98
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex62
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex79
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex53
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex62
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex45
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex52
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex87
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex76
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex48
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex49
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex60
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex46
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex61
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex49
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex85
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex50
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex43
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex124
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex46
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex285
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex116
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex125
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex52
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex41
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex60
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex102
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex38
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex107
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex75
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex80
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex143
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex57
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex35
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex40
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex122
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex343
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex98
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex136
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex95
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex74
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex85
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex40
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex84
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex108
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex67
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex94
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex48
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex40
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex49
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex49
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex49
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex49
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex143
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex48
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex95
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex91
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex58
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex54
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex43
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex90
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex84
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex87
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex90
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex73
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex108
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex118
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex40
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex62
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex45
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex34
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex47
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex140
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex57
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex74
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex79
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex68
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex78
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex76
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex75
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex48
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex61
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex50
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex146
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex106
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex25
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex253
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex26
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex72
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex46
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex54
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex42
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex42
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex62
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex40
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex269
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex62
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex124
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex94
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex77
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex55
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex86
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex101
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex85
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex119
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex82
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex90
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex46
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex71
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex122
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex41
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex71
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex35
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex36
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex36
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex35
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex116
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex72
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex53
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex45
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex77
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex75
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex92
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex56
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex50
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex73
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex29
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex103
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex48
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex67
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex37
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex94
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex84
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex70
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex73
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex37
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex39
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex38
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex42
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex127
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex18
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex182
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex40
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex69
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex75
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex19
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex247
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex87
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex52
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex26
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex42
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex138
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex68
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex134
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex146
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex156
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex154
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex188
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex133
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex55
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex232
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex21
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex36
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex28
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex28
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex70
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex105
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex118
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex58
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex126
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex112
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex54
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex84
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex69
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex97
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex64
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex61
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex58
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex48
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex65
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex65
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex54
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex39
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex56
-rw-r--r--deps/rabbitmq_cli/lib/rabbitmqctl.ex620
-rw-r--r--deps/rabbitmq_cli/mix.exs209
-rw-r--r--deps/rabbitmq_cli/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_cli/test/core/args_processing_test.exs90
-rw-r--r--deps/rabbitmq_cli/test/core/auto_complete_test.exs85
-rw-r--r--deps/rabbitmq_cli/test/core/command_modules_test.exs202
-rw-r--r--deps/rabbitmq_cli/test/core/default_output_test.exs114
-rw-r--r--deps/rabbitmq_cli/test/core/distribution_test.exs48
-rw-r--r--deps/rabbitmq_cli/test/core/helpers_test.exs140
-rw-r--r--deps/rabbitmq_cli/test/core/information_unit_test.exs44
-rw-r--r--deps/rabbitmq_cli/test/core/json_stream_test.exs24
-rw-r--r--deps/rabbitmq_cli/test/core/listeners_test.exs64
-rw-r--r--deps/rabbitmq_cli/test/core/node_name_test.exs73
-rw-r--r--deps/rabbitmq_cli/test/core/os_pid_test.exs54
-rw-r--r--deps/rabbitmq_cli/test/core/parser_test.exs369
-rw-r--r--deps/rabbitmq_cli/test/core/rpc_stream_test.exs94
-rw-r--r--deps/rabbitmq_cli/test/core/table_formatter_test.exs46
-rw-r--r--deps/rabbitmq_cli/test/ctl/add_user_command_test.exs86
-rw-r--r--deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs68
-rw-r--r--deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs81
-rw-r--r--deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs52
-rw-r--r--deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs44
-rw-r--r--deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs49
-rw-r--r--deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs64
-rw-r--r--deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs84
-rw-r--r--deps/rabbitmq_cli/test/ctl/change_password_command_test.exs80
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs86
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs127
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs138
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs64
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs100
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs129
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs107
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs115
-rw-r--r--deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs103
-rw-r--r--deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs147
-rw-r--r--deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs96
-rw-r--r--deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs50
-rw-r--r--deps/rabbitmq_cli/test/ctl/decode_command_test.exs95
-rw-r--r--deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs119
-rw-r--r--deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs59
-rw-r--r--deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs67
-rw-r--r--deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs70
-rw-r--r--deps/rabbitmq_cli/test/ctl/encode_command_test.exs92
-rw-r--r--deps/rabbitmq_cli/test/ctl/environment_command_test.exs45
-rw-r--r--deps/rabbitmq_cli/test/ctl/eval_command_test.exs74
-rw-r--r--deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/ctl/exec_command_test.exs47
-rw-r--r--deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs138
-rw-r--r--deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs63
-rw-r--r--deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs46
-rw-r--r--deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs68
-rw-r--r--deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs132
-rw-r--r--deps/rabbitmq_cli/test/ctl/help_command_test.exs76
-rw-r--r--deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs88
-rw-r--r--deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs104
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_bindings_command_test.exs85
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs118
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs29
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_connections_command_test.exs90
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_consumers_command_test.exs213
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs160
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs122
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs86
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs29
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs142
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs154
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs92
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs144
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_queues_command_test.exs145
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs85
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs103
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs91
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_users_command_test.exs74
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs111
-rw-r--r--deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs160
-rw-r--r--deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs65
-rw-r--r--deps/rabbitmq_cli/test/ctl/ping_command_test.exs56
-rw-r--r--deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs88
-rw-r--r--deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs102
-rw-r--r--deps/rabbitmq_cli/test/ctl/report_command_test.exs44
-rw-r--r--deps/rabbitmq_cli/test/ctl/reset_command_test.exs68
-rw-r--r--deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs95
-rw-r--r--deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs67
-rw-r--r--deps/rabbitmq_cli/test/ctl/rotate_logs_command_test.exs40
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs63
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs173
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs82
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs44
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs153
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs136
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs114
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs217
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs114
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs137
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs144
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs137
-rw-r--r--deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs162
-rw-r--r--deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs53
-rw-r--r--deps/rabbitmq_cli/test/ctl/start_app_command_test.exs50
-rw-r--r--deps/rabbitmq_cli/test/ctl/status_command_test.exs40
-rw-r--r--deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs49
-rw-r--r--deps/rabbitmq_cli/test/ctl/stop_command_test.exs52
-rw-r--r--deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs67
-rw-r--r--deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs64
-rw-r--r--deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs78
-rw-r--r--deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs79
-rw-r--r--deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs80
-rw-r--r--deps/rabbitmq_cli/test/ctl/version_command_test.exs24
-rw-r--r--deps/rabbitmq_cli/test/ctl/wait_command_test.exs114
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs69
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs118
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs111
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs59
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs62
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs68
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs50
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs101
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs44
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs73
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs39
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs39
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs39
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs50
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs37
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs39
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs39
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs78
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs98
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs115
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs107
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs48
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs72
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs44
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs62
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs85
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs65
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs50
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/schema_info_command_test.exs69
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs48
-rw-r--r--deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs60
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/definitions.json40
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/empty_pidfile.pid0
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/invalid_erl_expression.escript1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/invalid_pidfile.pid1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/loaded_applications.escript1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/valid_erl_expression.escript1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/valid_pidfile.pid1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/files/valid_pidfile_with_spaces.pid1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/.gitignore1
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.1.0.ezbin0 -> 3281 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.2.0.ezbin0 -> 3281 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_02-0.1.0.ezbin0 -> 3288 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-02/mock_rabbitmq_plugins_02-0.2.0.ezbin0 -> 3288 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-03/mock_rabbitmq_plugins_03-0.1.0.ezbin0 -> 2518 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-04/mock_rabbitmq_plugins_04.ezbin0 -> 2460 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_7-0.1.0.ezbin0 -> 3357 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0.ezbin0 -> 3363 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app10
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_app.beambin0 -> 1316 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_sup.beambin0 -> 1460 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_01-0.1.0.ezbin0 -> 3276 bytes
-rw-r--r--deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_02-0.1.0.ezbin0 -> 3283 bytes
-rw-r--r--deps/rabbitmq_cli/test/json_formatting.exs59
-rw-r--r--deps/rabbitmq_cli/test/plugins/directories_command_test.exs103
-rw-r--r--deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs187
-rw-r--r--deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs243
-rw-r--r--deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs103
-rw-r--r--deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs235
-rw-r--r--deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs45
-rw-r--r--deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs157
-rw-r--r--deps/rabbitmq_cli/test/queues/add_member_command_test.exs49
-rw-r--r--deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs40
-rw-r--r--deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs40
-rw-r--r--deps/rabbitmq_cli/test/queues/delete_member_command_test.exs49
-rw-r--r--deps/rabbitmq_cli/test/queues/grow_command_test.exs67
-rw-r--r--deps/rabbitmq_cli/test/queues/peek_command_test.exs59
-rw-r--r--deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs45
-rw-r--r--deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs45
-rw-r--r--deps/rabbitmq_cli/test/queues/shrink_command_test.exs55
-rw-r--r--deps/rabbitmq_cli/test/rabbitmqctl_test.exs301
-rw-r--r--deps/rabbitmq_cli/test/streams/add_replica_command_test.exs57
-rw-r--r--deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs57
-rw-r--r--deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs63
-rw-r--r--deps/rabbitmq_cli/test/test_helper.exs620
-rw-r--r--deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs45
-rw-r--r--deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs45
-rw-r--r--deps/rabbitmq_cli/test/upgrade/drain_command_test.exs57
-rw-r--r--deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs49
-rw-r--r--deps/rabbitmq_cli/test/upgrade/revive_command_test.exs57
-rw-r--r--deps/rabbitmq_codegen/.gitignore11
-rw-r--r--deps/rabbitmq_codegen/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_codegen/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_codegen/LICENSE6
-rw-r--r--deps/rabbitmq_codegen/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_codegen/Makefile42
-rw-r--r--deps/rabbitmq_codegen/README.extensions.md189
-rw-r--r--deps/rabbitmq_codegen/amqp-1.0/messaging.xml168
-rw-r--r--deps/rabbitmq_codegen/amqp-1.0/security.xml76
-rw-r--r--deps/rabbitmq_codegen/amqp-1.0/transactions.xml73
-rw-r--r--deps/rabbitmq_codegen/amqp-1.0/transport.xml200
-rw-r--r--deps/rabbitmq_codegen/amqp-1.0/types.xml125
-rw-r--r--deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json659
-rw-r--r--deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json483
-rw-r--r--deps/rabbitmq_codegen/amqp_codegen.py287
-rw-r--r--deps/rabbitmq_codegen/credit_extension.json54
-rw-r--r--deps/rabbitmq_codegen/demo_extension.json18
-rw-r--r--deps/rabbitmq_codegen/license_info4
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/.gitignore19
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/.travis.yml61
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/LICENSE4
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/Makefile21
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/README.md821
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/erlang.mk7808
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/python/README.md50
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py33
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py43
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py42
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/python/requirements.txt1
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile5
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile.lock15
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/ruby/README.md50
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/ruby/example1.rb35
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/ruby/example2.rb37
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/examples/ruby/example3.rb37
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/include/rabbitmq_consistent_hash_exchange.hrl7
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl134
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl343
-rw-r--r--deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl605
-rw-r--r--deps/rabbitmq_event_exchange/.gitignore19
-rw-r--r--deps/rabbitmq_event_exchange/.travis.yml61
-rw-r--r--deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_event_exchange/CONTRIBUTING.md123
-rw-r--r--deps/rabbitmq_event_exchange/LICENSE4
-rw-r--r--deps/rabbitmq_event_exchange/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_event_exchange/Makefile20
-rw-r--r--deps/rabbitmq_event_exchange/README.md169
-rw-r--r--deps/rabbitmq_event_exchange/erlang.mk7808
-rw-r--r--deps/rabbitmq_event_exchange/examples/java/QueueEvents.java44
-rw-r--r--deps/rabbitmq_event_exchange/include/rabbit_event_exchange.hrl1
-rw-r--r--deps/rabbitmq_event_exchange/priv/schema/rabbitmq_event_exchange.schema7
-rw-r--r--deps/rabbitmq_event_exchange/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl68
-rw-r--r--deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl247
-rw-r--r--deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl54
-rw-r--r--deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/rabbitmq_event_exchange.snippets19
-rw-r--r--deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/schema/rabbitmq_event_exchange.schema7
-rw-r--r--deps/rabbitmq_event_exchange/test/system_SUITE.erl490
-rw-r--r--deps/rabbitmq_event_exchange/test/unit_SUITE.erl54
-rw-r--r--deps/rabbitmq_federation/.gitignore18
-rw-r--r--deps/rabbitmq_federation/.travis.yml61
-rw-r--r--deps/rabbitmq_federation/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_federation/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_federation/LICENSE4
-rw-r--r--deps/rabbitmq_federation/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_federation/Makefile29
-rw-r--r--deps/rabbitmq_federation/README-hacking143
-rw-r--r--deps/rabbitmq_federation/README.md25
-rw-r--r--deps/rabbitmq_federation/erlang.mk7808
-rw-r--r--deps/rabbitmq_federation/etc/rabbit-test.sh24
-rwxr-xr-xdeps/rabbitmq_federation/etc/setup-rabbit-test.sh2
-rw-r--r--deps/rabbitmq_federation/include/rabbit_federation.hrl44
-rw-r--r--deps/rabbitmq_federation/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl117
-rw-r--r--deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl84
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_app.erl38
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_db.erl47
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_event.erl54
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_exchange.erl105
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl696
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl75
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl109
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_link_util.erl364
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_parameters.erl139
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_queue.erl111
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl330
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl87
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_status.erl175
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_sup.erl63
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_upstream.erl164
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl75
-rw-r--r--deps/rabbitmq_federation/src/rabbit_federation_util.erl102
-rw-r--r--deps/rabbitmq_federation/test/exchange_SUITE.erl1319
-rw-r--r--deps/rabbitmq_federation/test/federation_status_command_SUITE.erl168
-rw-r--r--deps/rabbitmq_federation/test/queue_SUITE.erl328
-rw-r--r--deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl129
-rw-r--r--deps/rabbitmq_federation/test/rabbit_federation_test_util.erl354
-rw-r--r--deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl101
-rw-r--r--deps/rabbitmq_federation/test/unit_SUITE.erl65
-rw-r--r--deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl230
-rw-r--r--deps/rabbitmq_federation_management/.gitignore17
-rw-r--r--deps/rabbitmq_federation_management/.travis.yml61
-rw-r--r--deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_federation_management/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_federation_management/LICENSE13
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-APACHE2-ExplorerCanvas202
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-BSD-base64js25
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-MIT-EJS1023
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-MIT-Flot22
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-MIT-Sammy06025
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-MIT-jQuery16421
-rw-r--r--deps/rabbitmq_federation_management/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_federation_management/Makefile21
-rw-r--r--deps/rabbitmq_federation_management/README.md44
-rw-r--r--deps/rabbitmq_federation_management/erlang.mk7808
-rw-r--r--deps/rabbitmq_federation_management/priv/www/js/federation.js89
-rw-r--r--deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs95
-rw-r--r--deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs262
-rw-r--r--deps/rabbitmq_federation_management/priv/www/js/tmpl/federation.ejs103
-rw-r--r--deps/rabbitmq_federation_management/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl143
-rw-r--r--deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl257
-rw-r--r--deps/rabbitmq_jms_topic_exchange/.gitignore17
-rw-r--r--deps/rabbitmq_jms_topic_exchange/.travis.yml61
-rw-r--r--deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_jms_topic_exchange/LICENSE4
-rw-r--r--deps/rabbitmq_jms_topic_exchange/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_jms_topic_exchange/LICENSES.txt736
-rw-r--r--deps/rabbitmq_jms_topic_exchange/Makefile17
-rw-r--r--deps/rabbitmq_jms_topic_exchange/README.md64
-rw-r--r--deps/rabbitmq_jms_topic_exchange/erlang.mk7808
-rw-r--r--deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl39
-rw-r--r--deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl300
-rw-r--r--deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl169
-rw-r--r--deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl131
-rw-r--r--deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl105
-rw-r--r--deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl122
-rw-r--r--deps/rabbitmq_management/.gitignore36
-rw-r--r--deps/rabbitmq_management/.travis.yml61
-rw-r--r--deps/rabbitmq_management/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_management/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_management/Caddyfile5
-rw-r--r--deps/rabbitmq_management/LICENSE16
-rw-r--r--deps/rabbitmq_management/LICENSE-APACHE2-excanvas202
-rw-r--r--deps/rabbitmq_management/LICENSE-BSD-base64js25
-rw-r--r--deps/rabbitmq_management/LICENSE-ISC-cowboy13
-rw-r--r--deps/rabbitmq_management/LICENSE-MIT-EJS21
-rw-r--r--deps/rabbitmq_management/LICENSE-MIT-Flot22
-rw-r--r--deps/rabbitmq_management/LICENSE-MIT-Sammy22
-rw-r--r--deps/rabbitmq_management/LICENSE-MIT-jQuery20
-rw-r--r--deps/rabbitmq_management/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_management/Makefile52
-rw-r--r--deps/rabbitmq_management/README.md21
-rwxr-xr-xdeps/rabbitmq_management/bin/rabbitmqadmin1184
-rw-r--r--deps/rabbitmq_management/erlang.mk7808
-rw-r--r--deps/rabbitmq_management/include/rabbit_mgmt.hrl19
-rw-r--r--deps/rabbitmq_management/priv/schema/rabbitmq_management.schema447
-rw-r--r--deps/rabbitmq_management/priv/www/api/index.html2093
-rw-r--r--deps/rabbitmq_management/priv/www/cli/index.html45
-rw-r--r--deps/rabbitmq_management/priv/www/css/evil.css1
-rw-r--r--deps/rabbitmq_management/priv/www/css/main.css361
-rw-r--r--deps/rabbitmq_management/priv/www/favicon.icobin0 -> 102856 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/bg-binary.pngbin0 -> 433 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/bg-green-dark.pngbin0 -> 190 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/bg-red-dark.pngbin0 -> 190 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/bg-red.pngbin0 -> 220 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/bg-yellow-dark.pngbin0 -> 190 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/collapse.pngbin0 -> 226 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/expand.pngbin0 -> 221 bytes
-rw-r--r--deps/rabbitmq_management/priv/www/img/rabbitmqlogo-master-copy.svg122
-rw-r--r--deps/rabbitmq_management/priv/www/img/rabbitmqlogo.svg124
-rw-r--r--deps/rabbitmq_management/priv/www/index.html70
-rw-r--r--deps/rabbitmq_management/priv/www/js/base64.js154
-rw-r--r--deps/rabbitmq_management/priv/www/js/charts.js329
-rw-r--r--deps/rabbitmq_management/priv/www/js/dispatcher.js345
-rw-r--r--deps/rabbitmq_management/priv/www/js/ejs-1.0.js505
-rw-r--r--deps/rabbitmq_management/priv/www/js/ejs-1.0.min.js1
-rw-r--r--deps/rabbitmq_management/priv/www/js/excanvas.js1428
-rw-r--r--deps/rabbitmq_management/priv/www/js/excanvas.min.js1
-rw-r--r--deps/rabbitmq_management/priv/www/js/formatters.js1067
-rw-r--r--deps/rabbitmq_management/priv/www/js/global.js798
-rw-r--r--deps/rabbitmq_management/priv/www/js/jquery-3.5.1.js10872
-rw-r--r--deps/rabbitmq_management/priv/www/js/jquery-3.5.1.min.js2
-rw-r--r--deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.js3061
-rw-r--r--deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.min.js29
-rw-r--r--deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.js431
-rw-r--r--deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.min.js9
-rw-r--r--deps/rabbitmq_management/priv/www/js/json2-2016.10.28.js506
-rw-r--r--deps/rabbitmq_management/priv/www/js/main.js1717
-rw-r--r--deps/rabbitmq_management/priv/www/js/prefs.js164
-rw-r--r--deps/rabbitmq_management/priv/www/js/sammy-0.7.6.js2156
-rw-r--r--deps/rabbitmq_management/priv/www/js/sammy-0.7.6.min.js5
-rw-r--r--deps/rabbitmq_management/priv/www/js/singular/client_frame.html21
-rw-r--r--deps/rabbitmq_management/priv/www/js/singular/postaccess.html10
-rw-r--r--deps/rabbitmq_management/priv/www/js/singular/postauth.html10
-rw-r--r--deps/rabbitmq_management/priv/www/js/singular/rpFrame.js1
-rw-r--r--deps/rabbitmq_management/priv/www/js/singular/singular.js1
-rw-r--r--deps/rabbitmq_management/priv/www/js/singular/singular.umd.js1
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/404.ejs3
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/add-binding.ejs49
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs61
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/bindings.ejs66
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs144
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs203
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/channels.ejs7
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/cluster-name.ejs31
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/columns-options.ejs25
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs194
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs155
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/consumers.ejs43
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/exchange.ejs97
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/exchanges.ejs169
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs60
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/layout.ejs58
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/limits.ejs177
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/list-exchanges.ejs5
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/login.ejs21
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/login_uaa.ejs5
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/memory-bar.ejs24
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/memory-table.ejs28
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs97
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/messages.ejs39
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-deliveries.ejs30
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-publishes.ejs46
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/node.ejs416
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs399
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/partition.ejs105
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/permissions.ejs91
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs305
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/policy.ejs37
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/popup.ejs6
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/publish.ejs67
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs431
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs353
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/rate-options.ejs56
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/registry.ejs25
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/status.ejs1
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/topic-permissions.ejs96
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/user.ejs100
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/users.ejs98
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/vhost.ejs62
-rw-r--r--deps/rabbitmq_management/priv/www/js/tmpl/vhosts.ejs163
-rw-r--r--deps/rabbitmq_management/rabbitmq-components.mk359
-rwxr-xr-xdeps/rabbitmq_management/scripts/seed.sh29
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_app.erl206
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_cors.erl84
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_csp.erl28
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_db.erl799
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl143
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl29
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl185
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_extension.erl16
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_headers.erl34
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl28
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl27
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl79
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_stats.erl739
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_sup.erl42
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl28
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_util.erl1220
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl71
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl51
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl79
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl141
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl149
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl68
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl50
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl54
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl59
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl98
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl54
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl54
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl49
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl59
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl298
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl90
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl105
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl75
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl33
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl55
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl45
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl71
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl36
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl56
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl178
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl56
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl54
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl54
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl66
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl127
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl48
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl76
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl64
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl59
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl53
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl78
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl75
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl85
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl56
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl49
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl83
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl182
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl88
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl78
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl102
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl38
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl47
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl44
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl49
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl82
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl113
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl68
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl166
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl42
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl113
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl58
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl13
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl54
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl65
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl103
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl38
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl47
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl44
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl73
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl62
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl63
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl59
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl47
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl97
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl56
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl69
-rw-r--r--deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl36
-rw-r--r--deps/rabbitmq_management/test/cache_SUITE.erl109
-rw-r--r--deps/rabbitmq_management/test/clustering_SUITE.erl874
-rw-r--r--deps/rabbitmq_management/test/clustering_prop_SUITE.erl280
-rw-r--r--deps/rabbitmq_management/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_management/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_management/test/config_schema_SUITE_data/rabbit-mgmt/access.log0
-rw-r--r--deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets525
-rw-r--r--deps/rabbitmq_management/test/listener_config_SUITE.erl135
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl3545
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl399
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl1716
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl512
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl63
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl458
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl469
-rw-r--r--deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl88
-rw-r--r--deps/rabbitmq_management/test/stats_SUITE.erl178
-rw-r--r--deps/rabbitmq_management_agent/.gitignore18
-rw-r--r--deps/rabbitmq_management_agent/.travis.yml61
-rw-r--r--deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_management_agent/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_management_agent/LICENSE4
-rw-r--r--deps/rabbitmq_management_agent/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_management_agent/Makefile34
-rw-r--r--deps/rabbitmq_management_agent/erlang.mk7808
-rw-r--r--deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl198
-rw-r--r--deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl25
-rw-r--r--deps/rabbitmq_management_agent/priv/schema/rabbitmq_management_agent.schema4
-rw-r--r--deps/rabbitmq_management_agent/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl54
-rw-r--r--deps/rabbitmq_management_agent/src/exometer_slide.erl551
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl17
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl22
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl55
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl28
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl572
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl80
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl99
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl501
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl20
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl559
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl230
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl712
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl175
-rw-r--r--deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl57
-rw-r--r--deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl631
-rw-r--r--deps/rabbitmq_management_agent/test/metrics_SUITE.erl157
-rw-r--r--deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl626
-rw-r--r--deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl174
-rw-r--r--deps/rabbitmq_mqtt/.gitignore24
-rw-r--r--deps/rabbitmq_mqtt/.travis.yml62
-rw-r--r--deps/rabbitmq_mqtt/.travis.yml.patch10
-rw-r--r--deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_mqtt/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_mqtt/LICENSE4
-rw-r--r--deps/rabbitmq_mqtt/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_mqtt/Makefile54
-rw-r--r--deps/rabbitmq_mqtt/README.md38
-rw-r--r--deps/rabbitmq_mqtt/erlang.mk7808
-rw-r--r--deps/rabbitmq_mqtt/include/mqtt_machine.hrl8
-rw-r--r--deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl92
-rw-r--r--deps/rabbitmq_mqtt/include/rabbit_mqtt_frame.hrl90
-rw-r--r--deps/rabbitmq_mqtt/include/rabbit_mqtt_retained_msg_store.hrl6
-rw-r--r--deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema259
-rw-r--r--deps/rabbitmq_mqtt/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl68
-rw-r--r--deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl87
-rw-r--r--deps/rabbitmq_mqtt/src/mqtt_machine.erl134
-rw-r--r--deps/rabbitmq_mqtt/src/mqtt_node.erl132
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt.erl55
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl88
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_info.erl25
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_sup.erl43
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_frame.erl224
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl45
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl1054
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl480
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl23
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl54
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl54
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl31
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl98
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl60
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl73
-rw-r--r--deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl139
-rw-r--r--deps/rabbitmq_mqtt/test/auth_SUITE.erl493
-rw-r--r--deps/rabbitmq_mqtt/test/cluster_SUITE.erl188
-rw-r--r--deps/rabbitmq_mqtt/test/command_SUITE.erl158
-rw-r--r--deps/rabbitmq_mqtt/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets144
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE.erl127
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/.gitignore3
-rwxr-xr-xdeps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java110
-rwxr-xr-xdeps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jarbin0 -> 48337 bytes
-rwxr-xr-xdeps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties1
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/Makefile27
-rwxr-xr-xdeps/rabbitmq_mqtt/test/java_SUITE_data/mvnw286
-rwxr-xr-xdeps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd161
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml137
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/src/test.config14
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java1030
-rwxr-xr-xdeps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/rabbit-test.sh8
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/setup-rabbit-test.sh2
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java157
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java89
-rw-r--r--deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/scripts/remove_old_test_keystores.groovy10
-rw-r--r--deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl73
-rw-r--r--deps/rabbitmq_mqtt/test/processor_SUITE.erl211
-rw-r--r--deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl125
-rw-r--r--deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl45
-rw-r--r--deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app19
-rw-r--r--deps/rabbitmq_mqtt/test/reader_SUITE.erl166
-rw-r--r--deps/rabbitmq_mqtt/test/retainer_SUITE.erl144
-rw-r--r--deps/rabbitmq_mqtt/test/util_SUITE.erl80
-rw-r--r--deps/rabbitmq_peer_discovery_aws/.gitignore25
-rw-r--r--deps/rabbitmq_peer_discovery_aws/.travis.yml61
-rw-r--r--deps/rabbitmq_peer_discovery_aws/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_peer_discovery_aws/CONTRIBUTING.md128
-rw-r--r--deps/rabbitmq_peer_discovery_aws/LICENSE4
-rw-r--r--deps/rabbitmq_peer_discovery_aws/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_peer_discovery_aws/Makefile19
-rw-r--r--deps/rabbitmq_peer_discovery_aws/README.md56
-rw-r--r--deps/rabbitmq_peer_discovery_aws/erlang.mk7808
-rw-r--r--deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema96
-rw-r--r--deps/rabbitmq_peer_discovery_aws/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl379
-rw-r--r--deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl54
-rw-r--r--deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets95
-rw-r--r--deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl190
-rw-r--r--deps/rabbitmq_peer_discovery_aws/test/rabbitmq_peer_discovery_aws_SUITE.erl116
-rw-r--r--deps/rabbitmq_peer_discovery_common/.gitignore25
-rw-r--r--deps/rabbitmq_peer_discovery_common/.travis.yml61
-rw-r--r--deps/rabbitmq_peer_discovery_common/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_peer_discovery_common/CONTRIBUTING.md123
-rw-r--r--deps/rabbitmq_peer_discovery_common/LICENSE4
-rw-r--r--deps/rabbitmq_peer_discovery_common/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_peer_discovery_common/Makefile20
-rw-r--r--deps/rabbitmq_peer_discovery_common/README.md27
-rw-r--r--deps/rabbitmq_peer_discovery_common/erlang.mk7808
-rw-r--r--deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl26
-rw-r--r--deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema75
-rw-r--r--deps/rabbitmq_peer_discovery_common/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl298
-rw-r--r--deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl19
-rw-r--r--deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl24
-rw-r--r--deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl131
-rw-r--r--deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl421
-rw-r--r--deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl404
-rw-r--r--deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl54
-rw-r--r--deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets68
-rw-r--r--deps/rabbitmq_peer_discovery_consul/.gitignore25
-rw-r--r--deps/rabbitmq_peer_discovery_consul/.travis.yml61
-rw-r--r--deps/rabbitmq_peer_discovery_consul/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_peer_discovery_consul/CONTRIBUTING.md123
-rw-r--r--deps/rabbitmq_peer_discovery_consul/LICENSE4
-rw-r--r--deps/rabbitmq_peer_discovery_consul/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_peer_discovery_consul/Makefile19
-rw-r--r--deps/rabbitmq_peer_discovery_consul/README.md56
-rw-r--r--deps/rabbitmq_peer_discovery_consul/erlang.mk7808
-rw-r--r--deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/README.md31
-rw-r--r--deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/enabled_plugins1
-rw-r--r--deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/rabbitmq.conf11
-rw-r--r--deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/docker-compose.yml44
-rw-r--r--deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl111
-rw-r--r--deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema324
-rw-r--r--deps/rabbitmq_peer_discovery_consul/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl777
-rw-r--r--deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl59
-rw-r--r--deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl21
-rw-r--r--deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl95
-rw-r--r--deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl37
-rw-r--r--deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets290
-rw-r--r--deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl1135
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/.gitignore26
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/.travis.yml62
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/.travis.yml.patch19
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/CONTRIBUTING.md118
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/LICENSE4
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/Makefile21
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/README.md57
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/RUNNING_TESTS.md33
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/erlang.mk7808
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl26
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema279
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl108
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl53
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl29
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl50
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl441
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl54
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets137
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl141
-rwxr-xr-xdeps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh81
-rw-r--r--deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl102
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/.gitignore25
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/.travis.yml61
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/CONTRIBUTING.md123
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/LICENSE4
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/Makefile19
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/README.md61
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/erlang.mk7808
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/examples/README.md3
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/examples/gke/README.md3
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/examples/kind/README.md3
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/examples/minikube/README.md3
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl63
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema133
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl231
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl68
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl21
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl49
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl38
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets136
-rw-r--r--deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl132
-rwxr-xr-xdeps/rabbitmq_prometheus/.autocomplete3
-rw-r--r--deps/rabbitmq_prometheus/.dockerignore8
-rw-r--r--deps/rabbitmq_prometheus/.gitignore24
-rw-r--r--deps/rabbitmq_prometheus/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_prometheus/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_prometheus/Dockerfile315
-rw-r--r--deps/rabbitmq_prometheus/LICENSE4
-rw-r--r--deps/rabbitmq_prometheus/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_prometheus/Makefile245
-rw-r--r--deps/rabbitmq_prometheus/README.md104
-rw-r--r--deps/rabbitmq_prometheus/docker/docker-compose-dist-tls.yml86
-rw-r--r--deps/rabbitmq_prometheus/docker/docker-compose-metrics.yml73
-rw-r--r--deps/rabbitmq_prometheus/docker/docker-compose-overview.yml187
-rw-r--r--deps/rabbitmq_prometheus/docker/docker-compose-qq.yml72
-rwxr-xr-xdeps/rabbitmq_prometheus/docker/docker-entrypoint.sh404
-rw-r--r--deps/rabbitmq_prometheus/docker/erlang.cookie1
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/__inputs.json12
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards.yml10
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json2332
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json1807
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json2355
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json5708
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-PerfTest.json1739
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json755
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/dashboards/rabbitmq-exporter_vs_rabbitmq-prometheus.json375
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/datasources.yml44
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-11352.md48
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04-original.pngbin0 -> 1683386 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04.jpgbin0 -> 475818 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04-original.pngbin0 -> 1713427 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04.jpgbin0 -> 495378 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-logo-2019-12-04.pngbin0 -> 376155 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04-original.pngbin0 -> 833685 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04.jpgbin0 -> 336056 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-10988.md32
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-cluster-2019-10-14.pngbin0 -> 425838 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-container-2019-10-14.pngbin0 -> 1073311 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-instance-2019-10-14.pngbin0 -> 656370 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-logo-2019-10-14.pngbin0 -> 532752 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-overview-2019-10-14.pngbin0 -> 1632345 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-11350.md65
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03-original.pngbin0 -> 1077398 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03.jpgbin0 -> 490429 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03-original.pngbin0 -> 1664726 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03.jpgbin0 -> 499309 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03-original.pngbin0 -> 1187078 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03.jpgbin0 -> 494797 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-logo-2019-12-03.jpgbin0 -> 481892 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-logo.pngbin0 -> 30319 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-10991.md40
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-2019-10-21.pngbin0 -> 1358612 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-collapsed-2019-10-21.pngbin0 -> 346600 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-info-2019-10-21.pngbin0 -> 1095241 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21-original.pngbin0 -> 596822 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21.pngbin0 -> 515537 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-instance-2019-10-03.pngbin0 -> 828709 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04-original.pngbin0 -> 856743 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04.pngbin0 -> 609176 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-overview-2019-10-03.pngbin0 -> 2165512 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-percentile-2019-10-03.pngbin0 -> 761039 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perftest-6566.md23
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-11340.md29
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03-original.pngbin0 -> 1275422 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03.jpgbin0 -> 484839 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-03.jpgbin0 -> 493088 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-030-original.pngbin0 -> 1285589 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-logo-2019-12-03.pngbin0 -> 353860 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03-original.pngbin0 -> 693743 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03.jpgbin0 -> 498493 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/prometheus.yml71
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls-definitions.json49
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf25
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-env.conf4
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-overview-definitions.json49
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf32
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-qq-definitions.json23
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-qq-env.conf2
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-qq.conf32
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_certificate.pem20
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_key.pem28
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_certificate.pem19
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.p12bin0 -> 2405 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.pem27
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_certificate.pem21
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.p12bin0 -> 2477 bytes
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.pem27
-rw-r--r--deps/rabbitmq_prometheus/docker/rabbitmq-ssl_dist.config10
-rw-r--r--deps/rabbitmq_prometheus/erlang.mk7686
-rw-r--r--deps/rabbitmq_prometheus/metrics.md260
-rw-r--r--deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema127
-rw-r--r--deps/rabbitmq_prometheus/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_prometheus/rabbitmq-disable-metrics-collector.conf1
-rw-r--r--deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl531
-rw-r--r--deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl134
-rw-r--r--deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl24
-rw-r--r--deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl149
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE.erl54
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets280
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management.schema436
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management_agent.schema4
-rw-r--r--deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_prometheus.schema116
-rw-r--r--deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl282
-rw-r--r--deps/rabbitmq_random_exchange/.gitignore19
-rw-r--r--deps/rabbitmq_random_exchange/.travis.yml61
-rw-r--r--deps/rabbitmq_random_exchange/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_random_exchange/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_random_exchange/LICENSE12
-rw-r--r--deps/rabbitmq_random_exchange/LICENSE-APACHE2204
-rw-r--r--deps/rabbitmq_random_exchange/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_random_exchange/Makefile17
-rw-r--r--deps/rabbitmq_random_exchange/README.md89
-rw-r--r--deps/rabbitmq_random_exchange/erlang.mk7808
-rw-r--r--deps/rabbitmq_random_exchange/include/.gitignore0
-rw-r--r--deps/rabbitmq_random_exchange/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl60
-rw-r--r--deps/rabbitmq_random_exchange/test/.gitkeep0
-rw-r--r--deps/rabbitmq_recent_history_exchange/.gitignore19
-rw-r--r--deps/rabbitmq_recent_history_exchange/.travis.yml61
-rw-r--r--deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_recent_history_exchange/LICENSE4
-rw-r--r--deps/rabbitmq_recent_history_exchange/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_recent_history_exchange/Makefile21
-rw-r--r--deps/rabbitmq_recent_history_exchange/README.md83
-rw-r--r--deps/rabbitmq_recent_history_exchange/erlang.mk7808
-rw-r--r--deps/rabbitmq_recent_history_exchange/etc/rabbit-hare.config3
-rw-r--r--deps/rabbitmq_recent_history_exchange/etc/rabbit-test.config3
-rw-r--r--deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl10
-rw-r--r--deps/rabbitmq_recent_history_exchange/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl206
-rw-r--r--deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl280
-rw-r--r--deps/rabbitmq_sharding/.gitignore18
-rw-r--r--deps/rabbitmq_sharding/.hgignore7
-rw-r--r--deps/rabbitmq_sharding/.travis.yml61
-rw-r--r--deps/rabbitmq_sharding/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_sharding/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_sharding/LICENSE4
-rw-r--r--deps/rabbitmq_sharding/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_sharding/LICENSE-MPL2373
-rw-r--r--deps/rabbitmq_sharding/Makefile21
-rw-r--r--deps/rabbitmq_sharding/README.extra.md79
-rw-r--r--deps/rabbitmq_sharding/README.md220
-rw-r--r--deps/rabbitmq_sharding/docs/sharded_queues.pngbin0 -> 41926 bytes
-rw-r--r--deps/rabbitmq_sharding/erlang.mk7808
-rw-r--r--deps/rabbitmq_sharding/etc/rabbit-hare.config3
-rw-r--r--deps/rabbitmq_sharding/etc/rabbit-test.config3
-rwxr-xr-xdeps/rabbitmq_sharding/etc/rkey.sh8
-rw-r--r--deps/rabbitmq_sharding/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl89
-rw-r--r--deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl60
-rw-r--r--deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl170
-rw-r--r--deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl61
-rw-r--r--deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl133
-rw-r--r--deps/rabbitmq_sharding/src/rabbit_sharding_util.erl48
-rw-r--r--deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl147
-rw-r--r--deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl339
-rw-r--r--deps/rabbitmq_shovel/.gitignore22
-rw-r--r--deps/rabbitmq_shovel/.travis.yml61
-rw-r--r--deps/rabbitmq_shovel/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_shovel/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_shovel/LICENSE4
-rw-r--r--deps/rabbitmq_shovel/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_shovel/Makefile40
-rw-r--r--deps/rabbitmq_shovel/README.md23
-rw-r--r--deps/rabbitmq_shovel/erlang.mk7808
-rw-r--r--deps/rabbitmq_shovel/include/rabbit_shovel.hrl31
-rw-r--r--deps/rabbitmq_shovel/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl84
-rw-r--r--deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl84
-rw-r--r--deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl129
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl520
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl407
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel.erl21
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl172
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_config.erl176
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl48
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl71
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl465
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_status.erl88
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl77
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_util.erl54
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl224
-rw-r--r--deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl32
-rw-r--r--deps/rabbitmq_shovel/test/amqp10_SUITE.erl293
-rw-r--r--deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl410
-rw-r--r--deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl102
-rw-r--r--deps/rabbitmq_shovel/test/config_SUITE.erl130
-rw-r--r--deps/rabbitmq_shovel/test/configuration_SUITE.erl359
-rw-r--r--deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl78
-rw-r--r--deps/rabbitmq_shovel/test/dynamic_SUITE.erl494
-rw-r--r--deps/rabbitmq_shovel/test/parameters_SUITE.erl350
-rw-r--r--deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl127
-rw-r--r--deps/rabbitmq_shovel/test/shovel_test_utils.erl51
-rw-r--r--deps/rabbitmq_shovel_management/.gitignore17
-rw-r--r--deps/rabbitmq_shovel_management/.travis.yml61
-rw-r--r--deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_shovel_management/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_shovel_management/LICENSE4
-rw-r--r--deps/rabbitmq_shovel_management/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_shovel_management/Makefile21
-rw-r--r--deps/rabbitmq_shovel_management/README.md96
-rw-r--r--deps/rabbitmq_shovel_management/erlang.mk7808
-rw-r--r--deps/rabbitmq_shovel_management/priv/www/js/shovel.js195
-rw-r--r--deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovel.ejs57
-rw-r--r--deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs365
-rw-r--r--deps/rabbitmq_shovel_management/priv/www/js/tmpl/shovels.ejs78
-rw-r--r--deps/rabbitmq_shovel_management/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl158
-rw-r--r--deps/rabbitmq_shovel_management/test/http_SUITE.erl336
-rw-r--r--deps/rabbitmq_stomp/.gitignore29
-rw-r--r--deps/rabbitmq_stomp/.travis.yml61
-rw-r--r--deps/rabbitmq_stomp/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_stomp/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_stomp/LICENSE4
-rw-r--r--deps/rabbitmq_stomp/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_stomp/Makefile46
-rw-r--r--deps/rabbitmq_stomp/NOTES71
-rw-r--r--deps/rabbitmq_stomp/README.md18
-rw-r--r--deps/rabbitmq_stomp/erlang.mk7808
-rwxr-xr-xdeps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_recv.pl13
-rwxr-xr-xdeps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_client.pl14
-rwxr-xr-xdeps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_service.pl21
-rwxr-xr-xdeps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send.pl9
-rwxr-xr-xdeps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send_many.pl11
-rwxr-xr-xdeps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_slow_recv.pl14
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/amq-sender.rb10
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/cb-receiver.rb8
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/cb-sender.rb6
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/cb-slow-receiver.rb13
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/exchange-receiver.rb15
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/exchange-sender.rb12
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/persistent-receiver.rb11
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/persistent-sender.rb13
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/temp-queue-client.rb9
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/temp-queue-service.rb15
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/topic-broadcast-receiver.rb11
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/topic-broadcast-with-unsubscribe.rb13
-rw-r--r--deps/rabbitmq_stomp/examples/ruby/topic-sender.rb7
-rw-r--r--deps/rabbitmq_stomp/include/rabbit_stomp.hrl42
-rw-r--r--deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl8
-rw-r--r--deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl73
-rw-r--r--deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema237
-rw-r--r--deps/rabbitmq_stomp/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl95
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp.erl131
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl50
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl25
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl266
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl46
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl1220
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl465
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl83
-rw-r--r--deps/rabbitmq_stomp/src/rabbit_stomp_util.erl418
-rw-r--r--deps/rabbitmq_stomp/test/amqqueue_SUITE.erl319
-rw-r--r--deps/rabbitmq_stomp/test/command_SUITE.erl127
-rw-r--r--deps/rabbitmq_stomp/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_stomp/test/config_schema_SUITE_data/rabbitmq_stomp.snippets97
-rw-r--r--deps/rabbitmq_stomp/test/connections_SUITE.erl160
-rw-r--r--deps/rabbitmq_stomp/test/frame_SUITE.erl191
-rw-r--r--deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl104
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE.erl72
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/deps/pika/Makefile27
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/deps/stomppy/Makefile27
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py252
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py42
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py259
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_options.py51
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py536
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py101
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/lifecycle.py187
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py331
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py87
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py40
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py41
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/ssl_lifecycle.py81
-rwxr-xr-xdeps/rabbitmq_stomp/test/python_SUITE_data/src/test.py21
-rwxr-xr-xdeps/rabbitmq_stomp/test/python_SUITE_data/src/test_connect_options.py15
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py26
-rwxr-xr-xdeps/rabbitmq_stomp/test/python_SUITE_data/src/test_ssl.py17
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py52
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py52
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py61
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py71
-rw-r--r--deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py62
-rw-r--r--deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl75
-rw-r--r--deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl80
-rw-r--r--deps/rabbitmq_stomp/test/src/test.config13
-rw-r--r--deps/rabbitmq_stomp/test/topic_SUITE.erl170
-rw-r--r--deps/rabbitmq_stomp/test/util_SUITE.erl242
-rw-r--r--deps/rabbitmq_stream/.gitignore56
-rw-r--r--deps/rabbitmq_stream/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_stream/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_stream/LICENSE5
-rw-r--r--deps/rabbitmq_stream/LICENSE-MPL-RabbitMQ370
-rw-r--r--deps/rabbitmq_stream/Makefile34
-rw-r--r--deps/rabbitmq_stream/README.adoc53
-rw-r--r--deps/rabbitmq_stream/docs/PROTOCOL.adoc499
-rw-r--r--deps/rabbitmq_stream/erlang.mk7712
-rw-r--r--deps/rabbitmq_stream/include/rabbit_stream.hrl70
-rw-r--r--deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema158
-rw-r--r--deps/rabbitmq_stream/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl95
-rw-r--r--deps/rabbitmq_stream/src/rabbit_stream.erl103
-rw-r--r--deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl49
-rw-r--r--deps/rabbitmq_stream/src/rabbit_stream_manager.erl262
-rw-r--r--deps/rabbitmq_stream/src/rabbit_stream_reader.erl1274
-rw-r--r--deps/rabbitmq_stream/src/rabbit_stream_sup.erl61
-rw-r--r--deps/rabbitmq_stream/src/rabbit_stream_utils.erl125
-rw-r--r--deps/rabbitmq_stream/test/command_SUITE.erl136
-rw-r--r--deps/rabbitmq_stream/test/config_schema_SUITE.erl53
-rw-r--r--deps/rabbitmq_stream/test/config_schema_SUITE_data/rabbitmq_stream.snippets73
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl266
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.gitignore3
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java117
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties2
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/Makefile18
-rwxr-xr-xdeps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw310
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd182
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml143
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java65
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java541
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java117
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java170
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java173
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java179
-rw-r--r--deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/resources/logback-test.xml13
-rw-r--r--deps/rabbitmq_top/.gitignore17
-rw-r--r--deps/rabbitmq_top/.travis.yml61
-rw-r--r--deps/rabbitmq_top/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_top/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_top/LICENSE5
-rw-r--r--deps/rabbitmq_top/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_top/Makefile21
-rw-r--r--deps/rabbitmq_top/README.md67
-rw-r--r--deps/rabbitmq_top/erlang.mk7808
-rw-r--r--deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs54
-rw-r--r--deps/rabbitmq_top/priv/www/js/tmpl/process.ejs58
-rw-r--r--deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs54
-rw-r--r--deps/rabbitmq_top/priv/www/js/top.js128
-rw-r--r--deps/rabbitmq_top/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_app.erl17
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_extension.erl18
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_sup.erl25
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_util.erl142
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl57
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_wm_process.erl68
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_wm_processes.erl51
-rw-r--r--deps/rabbitmq_top/src/rabbit_top_worker.erl171
-rw-r--r--deps/rabbitmq_tracing/.gitignore17
-rw-r--r--deps/rabbitmq_tracing/.travis.yml61
-rw-r--r--deps/rabbitmq_tracing/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_tracing/CONTRIBUTING.md123
-rw-r--r--deps/rabbitmq_tracing/LICENSE5
-rw-r--r--deps/rabbitmq_tracing/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_tracing/Makefile30
-rw-r--r--deps/rabbitmq_tracing/README.md67
-rw-r--r--deps/rabbitmq_tracing/erlang.mk7808
-rw-r--r--deps/rabbitmq_tracing/priv/www/js/tmpl/traces.ejs174
-rw-r--r--deps/rabbitmq_tracing/priv/www/js/tracing.js60
-rw-r--r--deps/rabbitmq_tracing/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_app.erl17
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl247
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl25
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_files.erl48
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl25
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl50
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl116
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_util.erl32
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl52
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl30
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl105
-rw-r--r--deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl39
-rw-r--r--deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl235
-rw-r--r--deps/rabbitmq_trust_store/.gitignore21
-rw-r--r--deps/rabbitmq_trust_store/.travis.yml61
-rw-r--r--deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_trust_store/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_trust_store/LICENSE5
-rw-r--r--deps/rabbitmq_trust_store/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_trust_store/Makefile29
-rw-r--r--deps/rabbitmq_trust_store/README.md210
-rw-r--r--deps/rabbitmq_trust_store/erlang.mk7808
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbit_trust_store_opera_com_provider.erl69
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/.gitignore4
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/README.md49
-rwxr-xr-xdeps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py10
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/__init__.py0
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/settings.py156
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/__init__.py0
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/apps.py7
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/tests.py3
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/urls.py7
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/views.py46
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/urls.py21
-rw-r--r--deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/wsgi.py16
-rw-r--r--deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema145
-rw-r--r--deps/rabbitmq_trust_store/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_trust_store/src/rabbit_trust_store.erl352
-rw-r--r--deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl78
-rw-r--r--deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl31
-rw-r--r--deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl110
-rw-r--r--deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl111
-rw-r--r--deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl39
-rw-r--r--deps/rabbitmq_trust_store/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets28
-rw-r--r--deps/rabbitmq_trust_store/test/system_SUITE.erl956
-rw-r--r--deps/rabbitmq_web_dispatch/.gitignore17
-rw-r--r--deps/rabbitmq_web_dispatch/.travis.yml61
-rw-r--r--deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_web_dispatch/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_web_dispatch/LICENSE5
-rw-r--r--deps/rabbitmq_web_dispatch/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_web_dispatch/Makefile26
-rw-r--r--deps/rabbitmq_web_dispatch/README.md14
-rw-r--r--deps/rabbitmq_web_dispatch/erlang.mk7808
-rw-r--r--deps/rabbitmq_web_dispatch/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl24
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl15
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl63
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl88
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl21
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl27
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl208
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl131
-rw-r--r--deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl55
-rw-r--r--deps/rabbitmq_web_dispatch/src/webmachine_log.erl239
-rw-r--r--deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl128
-rw-r--r--deps/rabbitmq_web_dispatch/src/webmachine_logger.hrl16
-rw-r--r--deps/rabbitmq_web_dispatch/test/priv/www/index.html7
-rw-r--r--deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl197
-rw-r--r--deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl73
-rw-r--r--deps/rabbitmq_web_mqtt/.gitignore17
-rw-r--r--deps/rabbitmq_web_mqtt/.travis.yml61
-rw-r--r--deps/rabbitmq_web_mqtt/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_web_mqtt/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_web_mqtt/LICENSE5
-rw-r--r--deps/rabbitmq_web_mqtt/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_web_mqtt/Makefile33
-rw-r--r--deps/rabbitmq_web_mqtt/README.md33
-rw-r--r--deps/rabbitmq_web_mqtt/erlang.mk7808
-rw-r--r--deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema183
-rw-r--r--deps/rabbitmq_web_mqtt/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl192
-rw-r--r--deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_info.erl26
-rw-r--r--deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_sup.erl53
-rw-r--r--deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl281
-rw-r--r--deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_middleware.erl22
-rw-r--r--deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets220
-rw-r--r--deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl105
-rw-r--r--deps/rabbitmq_web_mqtt/test/src/emqttc_packet.hrl240
-rw-r--r--deps/rabbitmq_web_mqtt/test/src/emqttc_parser.erl215
-rw-r--r--deps/rabbitmq_web_mqtt/test/src/emqttc_serialiser.erl157
-rw-r--r--deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl30
-rw-r--r--deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl306
-rw-r--r--deps/rabbitmq_web_mqtt/test/src/system_SUITE.erl244
-rw-r--r--deps/rabbitmq_web_mqtt_examples/.gitignore17
-rw-r--r--deps/rabbitmq_web_mqtt_examples/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_web_mqtt_examples/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_web_mqtt_examples/LICENSE9
-rw-r--r--deps/rabbitmq_web_mqtt_examples/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_web_mqtt_examples/Makefile23
-rw-r--r--deps/rabbitmq_web_mqtt_examples/README.md15
-rw-r--r--deps/rabbitmq_web_mqtt_examples/erlang.mk7808
-rw-r--r--deps/rabbitmq_web_mqtt_examples/priv/bunny.html169
-rw-r--r--deps/rabbitmq_web_mqtt_examples/priv/bunny.pngbin0 -> 38296 bytes
-rw-r--r--deps/rabbitmq_web_mqtt_examples/priv/echo.html135
-rw-r--r--deps/rabbitmq_web_mqtt_examples/priv/index.html15
-rw-r--r--deps/rabbitmq_web_mqtt_examples/priv/main.css38
-rw-r--r--deps/rabbitmq_web_mqtt_examples/priv/mqttws31.js2143
-rwxr-xr-xdeps/rabbitmq_web_mqtt_examples/priv/pencil.curbin0 -> 2238 bytes
-rw-r--r--deps/rabbitmq_web_mqtt_examples/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl29
-rw-r--r--deps/rabbitmq_web_stomp/.gitignore19
-rw-r--r--deps/rabbitmq_web_stomp/.travis.yml61
-rw-r--r--deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_web_stomp/CONTRIBUTING.md38
-rw-r--r--deps/rabbitmq_web_stomp/LICENSE5
-rw-r--r--deps/rabbitmq_web_stomp/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_web_stomp/Makefile39
-rw-r--r--deps/rabbitmq_web_stomp/README.md34
-rw-r--r--deps/rabbitmq_web_stomp/erlang.mk7808
-rw-r--r--deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema191
-rw-r--r--deps/rabbitmq_web_stomp/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl27
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl51
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl339
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl37
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl212
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl22
-rw-r--r--deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl22
-rw-r--r--deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl103
-rw-r--r--deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl55
-rw-r--r--deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets226
-rw-r--r--deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl269
-rw-r--r--deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl102
-rw-r--r--deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl201
-rw-r--r--deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl30
-rw-r--r--deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl284
-rw-r--r--deps/rabbitmq_web_stomp/test/src/stomp.erl45
-rw-r--r--deps/rabbitmq_web_stomp/test/unit_SUITE.erl58
-rw-r--r--deps/rabbitmq_web_stomp_examples/.gitignore17
-rw-r--r--deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbitmq_web_stomp_examples/CONTRIBUTING.md99
-rw-r--r--deps/rabbitmq_web_stomp_examples/LICENSE9
-rw-r--r--deps/rabbitmq_web_stomp_examples/LICENSE-APL2-Stomp-Websocket202
-rw-r--r--deps/rabbitmq_web_stomp_examples/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbitmq_web_stomp_examples/Makefile23
-rw-r--r--deps/rabbitmq_web_stomp_examples/README.md13
-rw-r--r--deps/rabbitmq_web_stomp_examples/erlang.mk7808
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/bunny.html136
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/bunny.pngbin0 -> 38296 bytes
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/echo.html105
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/index.html16
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/main.css38
-rwxr-xr-xdeps/rabbitmq_web_stomp_examples/priv/pencil.curbin0 -> 2238 bytes
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/stomp.js501
-rw-r--r--deps/rabbitmq_web_stomp_examples/priv/temp-queue.html98
-rw-r--r--deps/rabbitmq_web_stomp_examples/rabbitmq-components.mk359
-rw-r--r--deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl29
2382 files changed, 719280 insertions, 0 deletions
diff --git a/deps/amqp10_client/.gitignore b/deps/amqp10_client/.gitignore
new file mode 100644
index 0000000000..aca89b8519
--- /dev/null
+++ b/deps/amqp10_client/.gitignore
@@ -0,0 +1,28 @@
+.sw?
+.*.sw?
+*.beam
+*.plt
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+elvis
+
+amqp10_client.d
+/*.coverdata
+
+# Generated source files.
+/include/rabbit_amqp1_0_framing.hrl
+/src/rabbit_amqp1_0_framing0.erl
+
+# Downloaded ActiveMQ.
+/test/system_SUITE_data/apache-activemq-*
diff --git a/deps/amqp10_client/CODE_OF_CONDUCT.md b/deps/amqp10_client/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/amqp10_client/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/amqp10_client/CONTRIBUTING.md b/deps/amqp10_client/CONTRIBUTING.md
new file mode 100644
index 0000000000..b420963ee6
--- /dev/null
+++ b/deps/amqp10_client/CONTRIBUTING.md
@@ -0,0 +1,105 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/amqp10_client/LICENSE b/deps/amqp10_client/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/amqp10_client/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/amqp10_client/LICENSE-MPL-RabbitMQ b/deps/amqp10_client/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/amqp10_client/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/amqp10_client/Makefile b/deps/amqp10_client/Makefile
new file mode 100644
index 0000000000..6f84e2cfee
--- /dev/null
+++ b/deps/amqp10_client/Makefile
@@ -0,0 +1,66 @@
+PROJECT = amqp10_client
+PROJECT_DESCRIPTION = AMQP 1.0 client from the RabbitMQ Project
+PROJECT_MOD = amqp10_client_app
+
+BUILD_DEPS = rabbit_common elvis_mk
+DEPS = amqp10_common
+TEST_DEPS = rabbit rabbitmq_amqp1_0 rabbitmq_ct_helpers
+LOCAL_DEPS = ssl inets crypto
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-macros.mk \
+ rabbit_common/mk/rabbitmq-hexpm.mk \
+ rabbit_common/mk/rabbitmq-dist.mk \
+ rabbit_common/mk/rabbitmq-run.mk \
+ rabbit_common/mk/rabbitmq-test.mk \
+ rabbit_common/mk/rabbitmq-tools.mk
+
+DEP_PLUGINS += elvis_mk
+dep_elvis_mk = git https://github.com/inaka/elvis.mk.git master
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# --------------------------------------------------------------------
+# Compiler flags.
+# --------------------------------------------------------------------
+
+# gen_fsm is deprecated starting from Erlang 20, but we want to support
+# Erlang 19 as well.
+
+ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //')
+ERLANG_20_ERTS_VER := 9.0
+
+ifeq ($(call compare_version,$(ERTS_VER),$(ERLANG_20_ERTS_VER),>=),true)
+ERLC_OPTS += -Dnowarn_deprecated_gen_fsm
+endif
+
+# Dialyze the tests.
+DIALYZER_OPTS += --src -r test
+
+# --------------------------------------------------------------------
+# ActiveMQ for the testsuite.
+# --------------------------------------------------------------------
+
+ACTIVEMQ_VERSION := 5.14.4
+ACTIVEMQ_URL := 'https://archive.apache.org/dist/activemq/$(ACTIVEMQ_VERSION)/apache-activemq-$(ACTIVEMQ_VERSION)-bin.tar.gz'
+
+ACTIVEMQ := $(abspath test/system_SUITE_data/apache-activemq-$(ACTIVEMQ_VERSION)/bin/activemq)
+export ACTIVEMQ
+
+$(ACTIVEMQ): \
+ test/system_SUITE_data/apache-activemq-$(ACTIVEMQ_VERSION)-bin.tar.gz
+ $(gen_verbose) cd "$(dir $<)" && tar zxf "$(notdir $<)"
+
+test/system_SUITE_data/apache-activemq-$(ACTIVEMQ_VERSION)-bin.tar.gz:
+ $(gen_verbose) $(call core_http_get,$@,$(ACTIVEMQ_URL))
+
+tests:: $(ACTIVEMQ)
+
+ct ct-system: $(ACTIVEMQ)
diff --git a/deps/amqp10_client/README.md b/deps/amqp10_client/README.md
new file mode 100644
index 0000000000..fb4e0c574b
--- /dev/null
+++ b/deps/amqp10_client/README.md
@@ -0,0 +1,159 @@
+# Erlang AMQP 1.0 client
+
+This is an [Erlang client for the AMQP 1.0](https://www.amqp.org/resources/specifications) protocol.
+
+It's primary purpose is to be used in RabbitMQ related projects but it is a
+generic client that was tested with at least 4 implementations of AMQP 1.0.
+
+If you are looking for an Erlang client for [AMQP 0-9-1](https://www.rabbitmq.com/tutorials/amqp-concepts.html) — a completely different
+protocol despite the name — [consider this one](https://github.com/rabbitmq/rabbitmq-erlang-client).
+
+## Project Maturity and Status
+
+This client is used in the cross-protocol version of the RabbitMQ Shovel plugin. It is not 100%
+feature complete but moderately mature and was tested against at least three AMQP 1.0 servers:
+RabbitMQ, Azure ServiceBus, ActiveMQ.
+
+This client library is not officially supported by VMware at this time.
+
+## Usage
+
+### Connection Settings
+
+The `connection_config` map contains various configuration properties.
+
+```
+-type address :: inet:socket_address() | inet:hostname().
+
+-type connection_config() ::
+ #{container_id => binary(), % mandatory
+ %% must provide a list of addresses or a single address
+ addresses => [address()],
+ address => address(),
+ %% defaults to 5672, mandatory for TLS
+ port => inet:port_number(),
+ % the dns name of the target host
+ % required by some vendors such as Azure ServiceBus
+ hostname => binary(),
+ tls_opts => {secure_port, [ssl:ssl_option()]}, % optional
+ notify => pid(), % Pid to receive protocol notifications. Set to self() if not provided
+ max_frame_size => non_neg_integer(), % incoming max frame size
+ idle_time_out => non_neg_integer(), % heartbeat
+ sasl => none | anon | {plain, User :: binary(), Password :: binary(),
+ % set this to a negative value to allow a sender to "overshoot" the flow
+ % control by this margin
+ transfer_limit_margin => 0 | neg_integer()}
+ }.
+
+```
+
+### TLS
+
+TLS is enabled by setting the `tls_opts` connection configuration property.
+Currently the only valid value is `{secure_port, [ssl_option]}` where the port
+specified only accepts TLS. It is possible that tls negotiation as described
+in the amqp 1.0 protocol will be supported in the future. If no value is provided
+for `tls_opt` then a plain socket will be used.
+
+
+### Basic Example
+
+```
+%% this will connect to a localhost node
+{ok, Hostname} = inet:gethostname(),
+User = <<"guest">>,
+Password = <<"guest">>,
+%% create a configuration map
+OpnConf = #{address => Hostname,
+ port => Port,
+ container_id => <<"test-container">>,
+ sasl => {plain, User, Password}},
+{ok, Connection} = amqp10_client:open_connection(OpnConf),
+{ok, Session} = amqp10_client:begin_session(Connection),
+SenderLinkName = <<"test-sender">>,
+{ok, Sender} = amqp10_client:attach_sender_link(Session, SenderLinkName, <<"a-queue-maybe">>),
+
+%% wait for credit to be received
+receive
+ {amqp10_event, {link, Sender, credited}} -> ok
+after 2000 ->
+ exit(credited_timeout)
+end.
+
+%% create a new message using a delivery-tag, body and indicate
+%% it's settlement status (true meaning no disposition confirmation
+%% will be sent by the receiver).
+OutMsg = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, true),
+ok = amqp10_client:send_msg(Sender, OutMsg),
+ok = amqp10_client:detach_link(Sender),
+
+%% create a receiver link
+{ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"test-receiver">>, <<"a-queue-maybe">>),
+
+%% grant some credit to the remote sender but don't auto-renew it
+ok = amqp10_client:flow_link_credit(Receiver, 5, never),
+
+%% wait for a delivery
+receive
+ {amqp10_msg, Receiver, InMsg} -> ok
+after 2000 ->
+ exit(delivery_timeout)
+end.
+
+ok = amqp10_client:close_connection(Connection),
+```
+
+
+### Events
+
+The `ampq10_client` API is mostly asynchronous with respect to the AMQP 1.0
+protocol. Functions such as `amqp10_client:open_connection` typically return
+after the `Open` frame has been successfully written to the socket rather than
+waiting until the remote end returns with their `Open` frame. The client will
+notify the caller of various internal/async events using `amqp10_event`
+messages. In the example above when the remote replies with their `Open` frame
+a message is sent of the following forma:
+
+```
+{amqp10_event, {connection, ConnectionPid, opened}}
+```
+
+When the connection is closed an event is issued as such:
+
+```
+{amqp10_event, {connection, ConnectionPid, {closed, Why}}}
+```
+
+`Why` could be `normal` or contain a description of an error that occured
+and resulted in the closure of the connection.
+
+Likewise sessions and links have similar events using a similar format.
+
+```
+%% success events
+{amqp10_event, {connection, ConnectionPid, opened}}
+{amqp10_event, {session, SessionPid, begun}}
+{amqp10_event, {link, LinkRef, attached}}
+```
+
+```
+%% error events
+{amqp10_event, {connection, ConnectionPid, {closed, Why}}}
+{amqp10_event, {session, SessionPid, {ended, Why}}}
+{amqp10_event, {link, LinkRef, {detached, Why}}}
+```
+
+In addition the client may notify the initiator of certain protocol
+events such as a receiver running out of credit or credit being available
+to a sender.
+
+```
+%% no more credit available to sender
+{amqp10_event, {link, Sender, credit_exhausted}}
+%% sender credit received
+{amqp10_event, {link, Sender, credited}}
+```
+
+Other events may be declared as necessary, Hence it makes sense for a user
+of the client to handle all `{amqp10_event, _}` events to ensure unexpected
+messages aren't kept around in the mailbox.
diff --git a/deps/amqp10_client/elvis.config b/deps/amqp10_client/elvis.config
new file mode 100644
index 0000000000..74c730c603
--- /dev/null
+++ b/deps/amqp10_client/elvis.config
@@ -0,0 +1,26 @@
+[
+ {
+ elvis,
+ [
+ {config,
+ [#{dirs => ["src"],
+ filter => "*.erl",
+ ruleset => erl_files
+ },
+ #{dirs => ["."],
+ filter => "Makefile",
+ ruleset => makefiles
+ },
+ #{dirs => ["."],
+ filter => "rebar.config",
+ ruleset => rebar_config
+ },
+ #{dirs => ["."],
+ filter => "elvis.config",
+ ruleset => elvis_config
+ }
+ ]
+ }
+ ]
+ }
+].
diff --git a/deps/amqp10_client/erlang.mk b/deps/amqp10_client/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/amqp10_client/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/amqp10_client/rabbitmq-components.mk b/deps/amqp10_client/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/amqp10_client/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/amqp10_client/src/amqp10_client.erl b/deps/amqp10_client/src/amqp10_client.erl
new file mode 100644
index 0000000000..4420cbd2b3
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client.erl
@@ -0,0 +1,560 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_client).
+
+-include("amqp10_client.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-export([open_connection/1,
+ open_connection/2,
+ close_connection/1,
+ begin_session/1,
+ begin_session_sync/1,
+ begin_session_sync/2,
+ end_session/1,
+ attach_sender_link/3,
+ attach_sender_link/4,
+ attach_sender_link/5,
+ attach_sender_link_sync/3,
+ attach_sender_link_sync/4,
+ attach_sender_link_sync/5,
+ attach_receiver_link/3,
+ attach_receiver_link/4,
+ attach_receiver_link/5,
+ attach_receiver_link/6,
+ attach_receiver_link/7,
+ attach_link/2,
+ detach_link/1,
+ send_msg/2,
+ accept_msg/2,
+ flow_link_credit/3,
+ flow_link_credit/4,
+ echo/1,
+ link_handle/1,
+ get_msg/1,
+ get_msg/2,
+ parse_uri/1
+ ]).
+
+-define(DEFAULT_TIMEOUT, 5000).
+
+-type snd_settle_mode() :: amqp10_client_session:snd_settle_mode().
+-type rcv_settle_mode() :: amqp10_client_session:rcv_settle_mode().
+
+-type terminus_durability() :: amqp10_client_session:terminus_durability().
+
+-type target_def() :: amqp10_client_session:target_def().
+-type source_def() :: amqp10_client_session:source_def().
+
+-type attach_role() :: amqp10_client_session:attach_role().
+-type attach_args() :: amqp10_client_session:attach_args().
+-type filter() :: amqp10_client_session:filter().
+-type properties() :: amqp10_client_session:properties().
+
+-type connection_config() :: amqp10_client_connection:connection_config().
+
+-opaque link_ref() :: #link_ref{}.
+
+-export_type([
+ link_ref/0,
+ snd_settle_mode/0,
+ rcv_settle_mode/0,
+ terminus_durability/0,
+ target_def/0,
+ source_def/0,
+ attach_role/0,
+ attach_args/0,
+ connection_config/0
+ ]).
+
+-ifdef (OTP_RELEASE).
+ -if(?OTP_RELEASE >= 23).
+ -compile({nowarn_deprecated_function, [{http_uri, decode, 1}]}).
+ -endif.
+-endif.
+
+%% @doc Convenience function for opening a connection providing only an
+%% address and port. This uses anonymous sasl authentication.
+%% This is asynchronous and will notify success/closure to the caller using
+%% an amqp10_event of the following format:
+%% {amqp10_event, {connection, ConnectionPid, opened | {closed, Why}}}
+-spec open_connection(inet:socket_address() | inet:hostname(),
+ inet:port_number()) -> supervisor:startchild_ret().
+open_connection(Addr, Port) ->
+ open_connection(#{address => Addr, port => Port, notify => self(),
+ sasl => anon}).
+
+%% @doc Opens a connection using a connection_config map
+%% This is asynchronous and will notify success/closure to the caller using
+%% an amqp10_event of the following format:
+%% {amqp10_event, {connection, ConnectionPid, opened | {closed, Why}}}
+-spec open_connection(connection_config()) ->
+ supervisor:startchild_ret().
+open_connection(ConnectionConfig0) ->
+ Notify = maps:get(notify, ConnectionConfig0, self()),
+ NotifyWhenOpened = maps:get(notify_when_opened, ConnectionConfig0, self()),
+ NotifyWhenClosed = maps:get(notify_when_closed, ConnectionConfig0, self()),
+ amqp10_client_connection:open(ConnectionConfig0#{
+ notify => Notify,
+ notify_when_opened => NotifyWhenOpened,
+ notify_when_closed => NotifyWhenClosed
+ }).
+
+%% @doc Opens a connection using a connection_config map
+%% This is asynchronous and will notify completion to the caller using
+%% an amqp10_event of the following format:
+%% {amqp10_event, {connection, ConnectionPid, {closed, Why}}}
+-spec close_connection(pid()) -> ok.
+close_connection(Pid) ->
+ amqp10_client_connection:close(Pid, none).
+
+%% @doc Begins an amqp10 session using 'Connection'.
+%% This is asynchronous and will notify success/closure to the caller using
+%% an amqp10_event of the following format:
+%% {amqp10_event, {session, SessionPid, begun | {ended, Why}}}
+-spec begin_session(pid()) -> supervisor:startchild_ret().
+begin_session(Connection) when is_pid(Connection) ->
+ amqp10_client_connection:begin_session(Connection).
+
+%% @doc Synchronously begins an amqp10 session using 'Connection'.
+%% This is a convenience function that awaits the 'begun' event
+%% for the newly created session before returning.
+-spec begin_session_sync(pid()) ->
+ supervisor:startchild_ret() | session_timeout.
+begin_session_sync(Connection) when is_pid(Connection) ->
+ begin_session_sync(Connection, ?DEFAULT_TIMEOUT).
+
+%% @doc Synchronously begins an amqp10 session using 'Connection'.
+%% This is a convenience function that awaits the 'begun' event
+%% for the newly created session before returning.
+-spec begin_session_sync(pid(), non_neg_integer()) ->
+ supervisor:startchild_ret() | session_timeout.
+begin_session_sync(Connection, Timeout) when is_pid(Connection) ->
+ case begin_session(Connection) of
+ {ok, Session} ->
+ receive
+ {amqp10_event, {session, Session, begun}} ->
+ {ok, Session};
+ {amqp10_event, {session, Session, {ended, Err}}} ->
+ {error, Err}
+ after Timeout -> session_timeout
+ end;
+ Ret -> Ret
+ end.
+
+%% @doc End an amqp10 session.
+%% This is asynchronous and will notify completion of the end request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {session, SessionPid, {ended, Why}}}
+-spec end_session(pid()) -> ok.
+end_session(Pid) ->
+ amqp10_client_session:'end'(Pid).
+
+%% @doc Synchronously attach a link on 'Session'.
+%% This is a convenience function that awaits attached event
+%% for the link before returning.
+attach_sender_link_sync(Session, Name, Target) ->
+ attach_sender_link_sync(Session, Name, Target, mixed).
+
+%% @doc Synchronously attach a link on 'Session'.
+%% This is a convenience function that awaits attached event
+%% for the link before returning.
+-spec attach_sender_link_sync(pid(), binary(), binary(),
+ snd_settle_mode()) ->
+ {ok, link_ref()} | link_timeout.
+attach_sender_link_sync(Session, Name, Target, SettleMode) ->
+ attach_sender_link_sync(Session, Name, Target, SettleMode, none).
+
+%% @doc Synchronously attach a link on 'Session'.
+%% This is a convenience function that awaits attached event
+%% for the link before returning.
+-spec attach_sender_link_sync(pid(), binary(), binary(),
+ snd_settle_mode(), terminus_durability()) ->
+ {ok, link_ref()} | link_timeout.
+attach_sender_link_sync(Session, Name, Target, SettleMode, Durability) ->
+ {ok, Ref} = attach_sender_link(Session, Name, Target, SettleMode,
+ Durability),
+ receive
+ {amqp10_event, {link, Ref, attached}} ->
+ {ok, Ref};
+ {amqp10_event, {link, Ref, {detached, Err}}} ->
+ {error, Err}
+ after ?DEFAULT_TIMEOUT -> link_timeout
+ end.
+
+%% @doc Attaches a sender link to a target.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_sender_link(pid(), binary(), binary()) -> {ok, link_ref()}.
+attach_sender_link(Session, Name, Target) ->
+ % mixed should work with any type of msg
+ attach_sender_link(Session, Name, Target, mixed).
+
+%% @doc Attaches a sender link to a target.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_sender_link(pid(), binary(), binary(),
+ snd_settle_mode()) ->
+ {ok, link_ref()}.
+attach_sender_link(Session, Name, Target, SettleMode) ->
+ attach_sender_link(Session, Name, Target, SettleMode, none).
+
+%% @doc Attaches a sender link to a target.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_sender_link(pid(), binary(), binary(),
+ snd_settle_mode(), terminus_durability()) ->
+ {ok, link_ref()}.
+attach_sender_link(Session, Name, Target, SettleMode, Durability) ->
+ AttachArgs = #{name => Name,
+ role => {sender, #{address => Target,
+ durable => Durability}},
+ snd_settle_mode => SettleMode,
+ rcv_settle_mode => first},
+ amqp10_client_session:attach(Session, AttachArgs).
+
+%% @doc Attaches a receiver link to a source.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_receiver_link(pid(), binary(), binary()) ->
+ {ok, link_ref()}.
+attach_receiver_link(Session, Name, Source) ->
+ attach_receiver_link(Session, Name, Source, settled).
+
+%% @doc Attaches a receiver link to a source.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_receiver_link(pid(), binary(), binary(),
+ snd_settle_mode()) ->
+ {ok, link_ref()}.
+attach_receiver_link(Session, Name, Source, SettleMode) ->
+ attach_receiver_link(Session, Name, Source, SettleMode, none).
+
+%% @doc Attaches a receiver link to a source.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_receiver_link(pid(), binary(), binary(),
+ snd_settle_mode(), terminus_durability()) ->
+ {ok, link_ref()}.
+attach_receiver_link(Session, Name, Source, SettleMode, Durability) ->
+ attach_receiver_link(Session, Name, Source, SettleMode, Durability, #{}).
+
+%% @doc Attaches a receiver link to a source.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_receiver_link(pid(), binary(), binary(),
+ snd_settle_mode(), terminus_durability(), filter()) ->
+ {ok, link_ref()}.
+attach_receiver_link(Session, Name, Source, SettleMode, Durability, Filter) ->
+ attach_receiver_link(Session, Name, Source, SettleMode, Durability, Filter, #{}).
+
+%% @doc Attaches a receiver link to a source.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, attached | {detached, Why}}}
+-spec attach_receiver_link(pid(), binary(), binary(),
+ snd_settle_mode(), terminus_durability(), filter(),
+ properties()) ->
+ {ok, link_ref()}.
+attach_receiver_link(Session, Name, Source, SettleMode, Durability, Filter, Properties) ->
+ AttachArgs = #{name => Name,
+ role => {receiver, #{address => Source,
+ durable => Durability}, self()},
+ snd_settle_mode => SettleMode,
+ rcv_settle_mode => first,
+ filter => Filter,
+ properties => Properties},
+ amqp10_client_session:attach(Session, AttachArgs).
+
+-spec attach_link(pid(), attach_args()) -> {ok, link_ref()}.
+attach_link(Session, AttachArgs) ->
+ amqp10_client_session:attach(Session, AttachArgs).
+
+%% @doc Detaches a link.
+%% This is asynchronous and will notify completion of the attach request to the
+%% caller using an amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, {detached, Why}}}
+-spec detach_link(link_ref()) -> _.
+detach_link(#link_ref{link_handle = Handle, session = Session}) ->
+ amqp10_client_session:detach(Session, Handle).
+
+%% @doc Grant credit to a sender.
+%% The amqp10_client will automatically grant more credit to the sender when
+%% the remaining link credit falls below the value of RenewWhenBelow.
+%% If RenewWhenBelow is 'never' the client will never grant new credit. Instead
+%% the caller will be notified when the link_credit reaches 0 with an
+%% amqp10_event of the following format:
+%% {amqp10_event, {link, LinkRef, credit_exhausted}}
+-spec flow_link_credit(link_ref(), Credit :: non_neg_integer(),
+ RenewWhenBelow :: never | non_neg_integer()) -> ok.
+flow_link_credit(Ref, Credit, RenewWhenBelow) ->
+ flow_link_credit(Ref, Credit, RenewWhenBelow, false).
+
+-spec flow_link_credit(link_ref(), Credit :: non_neg_integer(),
+ RenewWhenBelow :: never | non_neg_integer(),
+ Drain :: boolean()) -> ok.
+flow_link_credit(#link_ref{role = receiver, session = Session,
+ link_handle = Handle},
+ Credit, RenewWhenBelow, Drain) ->
+ Flow = #'v1_0.flow'{link_credit = {uint, Credit},
+ drain = Drain},
+ ok = amqp10_client_session:flow(Session, Handle, Flow, RenewWhenBelow).
+
+%% @doc Request that the sender's flow state is echoed back
+%% This may be used to determine when the Link has finally quiesced.
+%% see §2.6.10 of the spec
+echo(#link_ref{role = receiver, session = Session,
+ link_handle = Handle}) ->
+ Flow = #'v1_0.flow'{link_credit = {uint, 0},
+ echo = true},
+ ok = amqp10_client_session:flow(Session, Handle, Flow, 0).
+
+%%% messages
+
+%% @doc Send a message on a the link referred to be the 'LinkRef'.
+%% Returns ok for "async" transfers when messages are sent with settled=true
+%% else it returns the delivery state from the disposition
+-spec send_msg(link_ref(), amqp10_msg:amqp10_msg()) ->
+ ok | {error, insufficient_credit | link_not_found | half_attached}.
+send_msg(#link_ref{role = sender, session = Session,
+ link_handle = Handle}, Msg0) ->
+ Msg = amqp10_msg:set_handle(Handle, Msg0),
+ amqp10_client_session:transfer(Session, Msg, ?DEFAULT_TIMEOUT).
+
+%% @doc Accept a message on a the link referred to be the 'LinkRef'.
+-spec accept_msg(link_ref(), amqp10_msg:amqp10_msg()) -> ok.
+accept_msg(#link_ref{role = receiver, session = Session}, Msg) ->
+ DeliveryId = amqp10_msg:delivery_id(Msg),
+ amqp10_client_session:disposition(Session, receiver, DeliveryId,
+ DeliveryId, true, accepted).
+
+%% @doc Get a single message from a link.
+%% Flows a single link credit then awaits delivery or timeout.
+-spec get_msg(link_ref()) -> {ok, amqp10_msg:amqp10_msg()} | {error, timeout}.
+get_msg(LinkRef) ->
+ get_msg(LinkRef, ?DEFAULT_TIMEOUT).
+
+%% @doc Get a single message from a link.
+%% Flows a single link credit then awaits delivery or timeout.
+-spec get_msg(link_ref(), non_neg_integer()) ->
+ {ok, amqp10_msg:amqp10_msg()} | {error, timeout}.
+get_msg(#link_ref{role = receiver} = Ref, Timeout) ->
+ %flow 1
+ ok = flow_link_credit(Ref, 1, never),
+ % wait for transfer
+ receive
+ {amqp10_msg, Ref, Message} -> {ok, Message}
+ after Timeout ->
+ {error, timeout}
+ end.
+
+%% @doc Get the link handle from a LinkRef
+-spec link_handle(link_ref()) -> non_neg_integer().
+link_handle(#link_ref{link_handle = Handle}) -> Handle.
+
+
+-spec parse_uri(string()) ->
+ {ok, connection_config()} | {error, term()}.
+parse_uri(Uri) ->
+ case uri_string:parse(Uri) of
+ Map when is_map(Map) ->
+ try
+ {ok, parse_result(Map)}
+ catch
+ throw:Err -> {error, Err}
+ end;
+ {error, _, _} = Err -> Err
+ end.
+
+parse_result(Map) ->
+ _ = case maps:get(path, Map, "/") of
+ "/" -> ok;
+ "" -> ok;
+ _ -> throw(path_segment_not_supported)
+ end,
+ Scheme = maps:get(scheme, Map, "amqp"),
+ UserInfo = maps:get(userinfo, Map, undefined),
+ Host = maps:get(host, Map),
+ DefaultPort = case Scheme of
+ "amqp" -> 5672;
+ "amqps" -> 5671
+ end,
+ Port = maps:get(port, Map, DefaultPort),
+ Query0 = maps:get(query, Map, ""),
+ Query = maps:from_list(uri_string:dissect_query(Query0)),
+ Sasl = case Query of
+ #{"sasl" := "anon"} -> anon;
+ #{"sasl" := "plain"} when UserInfo =:= undefined orelse length(UserInfo) =:= 0 ->
+ throw(plain_sasl_missing_userinfo);
+ _ ->
+ case UserInfo of
+ [] -> none;
+ undefined -> none;
+ U -> parse_usertoken(U)
+ end
+ end,
+ Ret0 = maps:fold(fun("idle_time_out", V, Acc) ->
+ Acc#{idle_time_out => list_to_integer(V)};
+ ("max_frame_size", V, Acc) ->
+ Acc#{max_frame_size => list_to_integer(V)};
+ ("hostname", V, Acc) ->
+ Acc#{hostname => list_to_binary(V)};
+ ("container_id", V, Acc) ->
+ Acc#{container_id => list_to_binary(V)};
+ ("transfer_limit_margin", V, Acc) ->
+ Acc#{transfer_limit_margin => list_to_integer(V)};
+ (_, _, Acc) -> Acc
+ end, #{address => Host,
+ hostname => to_binary(Host),
+ port => Port,
+ sasl => Sasl}, Query),
+ case Scheme of
+ "amqp" -> Ret0;
+ "amqps" ->
+ TlsOpts = parse_tls_opts(Query),
+ Ret0#{tls_opts => {secure_port, TlsOpts}}
+ end.
+
+
+parse_usertoken(undefined) ->
+ none;
+parse_usertoken("") ->
+ none;
+parse_usertoken(U) ->
+ [User, Pass] = string:tokens(U, ":"),
+ {plain, to_binary(http_uri:decode(User)), to_binary(http_uri:decode(Pass))}.
+
+parse_tls_opts(M) ->
+ lists:sort(maps:fold(fun parse_tls_opt/3, [], M)).
+
+parse_tls_opt(K, V, Acc)
+ when K =:= "cacertfile" orelse
+ K =:= "certfile" orelse
+ K =:= "keyfile" ->
+ [{to_atom(K), V} | Acc];
+parse_tls_opt("verify", V, Acc)
+ when V =:= "verify_none" orelse
+ V =:= "verify_peer" ->
+ [{verify, to_atom(V)} | Acc];
+parse_tls_opt("verify", _V, _Acc) ->
+ throw({invalid_option, verify});
+parse_tls_opt("versions", V, Acc) ->
+ Parts = string:tokens(V, ","),
+ Versions = [try_to_existing_atom(P) || P <- Parts],
+ [{versions, Versions} | Acc];
+parse_tls_opt("fail_if_no_peer_cert", V, Acc)
+ when V =:= "true" orelse
+ V =:= "false" ->
+ [{fail_if_no_peer_cert, to_atom(V)} | Acc];
+parse_tls_opt("fail_if_no_peer_cert", _V, _Acc) ->
+ throw({invalid_option, fail_if_no_peer_cert});
+parse_tls_opt("server_name_indication", "disable", Acc) ->
+ [{server_name_indication, disable} | Acc];
+parse_tls_opt("server_name_indication", V, Acc) ->
+ [{server_name_indication, V} | Acc];
+parse_tls_opt(_K, _V, Acc) ->
+ Acc.
+
+to_atom(X) when is_list(X) -> list_to_atom(X).
+to_binary(X) when is_list(X) -> list_to_binary(X).
+
+try_to_existing_atom(L) when is_list(L) ->
+ try list_to_existing_atom(L) of
+ A -> A
+ catch
+ _:badarg ->
+ throw({non_existent_atom, L})
+ end.
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+parse_uri_test_() ->
+ [?_assertEqual({ok, #{address => "my_host",
+ port => 9876,
+ hostname => <<"my_host">>,
+ sasl => none}}, parse_uri("amqp://my_host:9876")),
+ %% port defaults
+ ?_assertMatch({ok, #{port := 5671}}, parse_uri("amqps://my_host")),
+ ?_assertMatch({ok, #{port := 5672}}, parse_uri("amqp://my_host")),
+ ?_assertEqual({ok, #{address => "my_proxy",
+ port => 9876,
+ hostname => <<"my_host">>,
+ container_id => <<"my_container">>,
+ idle_time_out => 60000,
+ max_frame_size => 512,
+ tls_opts => {secure_port, []},
+ sasl => {plain, <<"fred">>, <<"passw">>}}},
+ parse_uri("amqps://fred:passw@my_proxy:9876?sasl=plain&"
+ "hostname=my_host&container_id=my_container&"
+ "max_frame_size=512&idle_time_out=60000")),
+ %% ensure URI encoded usernames and passwords are decodeded
+ ?_assertEqual({ok, #{address => "my_proxy",
+ port => 9876,
+ hostname => <<"my_proxy">>,
+ sasl => {plain, <<"fr/ed">>, <<"pa/ssw">>}}},
+ parse_uri("amqp://fr%2Fed:pa%2Fssw@my_proxy:9876")),
+ %% make sasl plain implicit when username and password is present
+ ?_assertEqual({ok, #{address => "my_proxy",
+ port => 9876,
+ hostname => <<"my_proxy">>,
+ sasl => {plain, <<"fred">>, <<"passw">>}}},
+ parse_uri("amqp://fred:passw@my_proxy:9876")),
+ ?_assertEqual(
+ {ok, #{address => "my_proxy", port => 9876,
+ hostname => <<"my_proxy">>,
+ tls_opts => {secure_port, [{cacertfile, "/etc/cacertfile.pem"},
+ {certfile, "/etc/certfile.pem"},
+ {fail_if_no_peer_cert, true},
+ {keyfile, "/etc/keyfile.key"},
+ {server_name_indication, "frazzle"},
+ {verify, verify_none},
+ {versions, ['tlsv1.1', 'tlsv1.2']}
+ ]},
+ sasl => {plain, <<"fred">>, <<"passw">>}}},
+ parse_uri("amqps://fred:passw@my_proxy:9876?sasl=plain"
+ "&cacertfile=/etc/cacertfile.pem&certfile=/etc/certfile.pem"
+ "&keyfile=/etc/keyfile.key&verify=verify_none"
+ "&fail_if_no_peer_cert=true"
+ "&server_name_indication=frazzle"
+ "&versions=tlsv1.1,tlsv1.2")),
+ %% invalid tls version
+ ?_assertEqual({error, {non_existent_atom, "tlsv1.9999999"}},
+ parse_uri("amqps://fred:passw@my_proxy:9876?sasl=plain&" ++
+ "versions=tlsv1.1,tlsv1.9999999")),
+ ?_assertEqual(
+ {ok, #{address => "my_proxy",
+ port => 9876,
+ hostname => <<"my_proxy">>,
+ tls_opts => {secure_port, [{server_name_indication, disable}]},
+ sasl => {plain, <<"fred">>, <<"passw">>}}},
+ parse_uri("amqps://fred:passw@my_proxy:9876?sasl=plain"
+ "&server_name_indication=disable")),
+ ?_assertEqual({error, {invalid_option, verify}},
+ parse_uri("amqps://fred:passw@my_proxy:9876?sasl=plain&" ++
+ "cacertfile=/etc/cacertfile.pem&certfile=/etc/certfile.pem&" ++
+ "keyfile=/etc/keyfile.key&verify=verify_bananas")),
+ ?_assertEqual({error, {invalid_option, fail_if_no_peer_cert}},
+ parse_uri("amqps://fred:passw@my_proxy:9876?sasl=plain&" ++
+ "cacertfile=/etc/cacertfile.pem&certfile=/etc/certfile.pem&" ++
+ "keyfile=/etc/keyfile.key&fail_if_no_peer_cert=banana")),
+ ?_assertEqual({error, plain_sasl_missing_userinfo},
+ parse_uri("amqp://my_host:9876?sasl=plain")),
+ ?_assertEqual({error, path_segment_not_supported},
+ parse_uri("amqp://my_host/my_path_segment:9876"))
+ ].
+
+-endif.
diff --git a/deps/amqp10_client/src/amqp10_client.hrl b/deps/amqp10_client/src/amqp10_client.hrl
new file mode 100644
index 0000000000..0f2b6917cb
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client.hrl
@@ -0,0 +1,24 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(AMQP_PROTOCOL_HEADER, <<"AMQP", 0, 1, 0, 0>>).
+-define(SASL_PROTOCOL_HEADER, <<"AMQP", 3, 1, 0, 0>>).
+-define(MIN_MAX_FRAME_SIZE, 512).
+-define(MAX_MAX_FRAME_SIZE, 1024 * 1024).
+-define(FRAME_HEADER_SIZE, 8).
+
+-define(TIMEOUT, 5000).
+
+% -define(debug, true).
+-ifdef(debug).
+-define(DBG(F, A), error_logger:info_msg(F, A)).
+-else.
+-define(DBG(F, A), ok).
+-endif.
+
+-record(link_ref, {role :: sender | receiver, session :: pid(),
+ link_handle :: non_neg_integer()}).
diff --git a/deps/amqp10_client/src/amqp10_client_app.erl b/deps/amqp10_client/src/amqp10_client_app.erl
new file mode 100644
index 0000000000..7b5271335b
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_app.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_client_app).
+
+-behaviour(application).
+
+%% Application callbacks
+-export([start/2,
+ stop/1]).
+
+-type start_type() :: (
+ normal |
+ {takeover, Node :: node()} |
+ {failover, Node :: node()}
+ ).
+-type state() :: term().
+
+%%====================================================================
+%% API
+%%====================================================================
+
+-spec start(StartType :: start_type(), StartArgs :: term()) ->
+ {ok, Pid :: pid()} | {ok, Pid :: pid(), State :: state()} | {error, Reason :: term()}.
+start(_Type, _Args) ->
+ amqp10_client_sup:start_link().
+
+-spec stop(State :: state()) -> ok.
+stop(_State) ->
+ ok.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================
diff --git a/deps/amqp10_client/src/amqp10_client_connection.erl b/deps/amqp10_client/src/amqp10_client_connection.erl
new file mode 100644
index 0000000000..5858e36c5a
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_connection.erl
@@ -0,0 +1,489 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_client_connection).
+
+-behaviour(gen_statem).
+
+-include("amqp10_client.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-ifdef(nowarn_deprecated_gen_fsm).
+-compile({nowarn_deprecated_function,
+ [{gen_fsm, reply, 2},
+ {gen_fsm, send_all_state_event, 2},
+ {gen_fsm, send_event, 2},
+ {gen_fsm, start_link, 3},
+ {gen_fsm, sync_send_all_state_event, 2}]}).
+-endif.
+
+%% Public API.
+-export([open/1,
+ close/2]).
+
+%% Private API.
+-export([start_link/2,
+ socket_ready/2,
+ protocol_header_received/5,
+ begin_session/1,
+ heartbeat/1]).
+
+%% gen_fsm callbacks.
+-export([init/1,
+ callback_mode/0,
+ terminate/3,
+ code_change/4]).
+
+%% gen_fsm state callbacks.
+-export([expecting_socket/3,
+ sasl_hdr_sent/3,
+ sasl_hdr_rcvds/3,
+ sasl_init_sent/3,
+ hdr_sent/3,
+ open_sent/3,
+ opened/3,
+ close_sent/3]).
+
+-type amqp10_socket() :: {tcp, gen_tcp:socket()} | {ssl, ssl:sslsocket()}.
+
+-type milliseconds() :: non_neg_integer().
+
+-type address() :: inet:socket_address() | inet:hostname().
+
+-type connection_config() ::
+ #{container_id => binary(), % AMQP container id
+ hostname => binary(), % the dns name of the target host
+ addresses => [address()],
+ address => address(),
+ port => inet:port_number(),
+ tls_opts => {secure_port, [ssl:ssl_option()]},
+ notify => pid() | none, % the pid to send connection events to
+ notify_when_opened => pid() | none,
+ notify_when_closed => pid() | none,
+ max_frame_size => non_neg_integer(), % TODO: constrain to large than 512
+ outgoing_max_frame_size => non_neg_integer() | undefined,
+ idle_time_out => milliseconds(),
+ % set to a negative value to allow a sender to "overshoot" the flow
+ % control by this margin
+ transfer_limit_margin => 0 | neg_integer(),
+ sasl => none | anon | {plain, User :: binary(), Pwd :: binary()},
+ notify => pid(),
+ notify_when_opened => pid() | none,
+ notify_when_closed => pid() | none
+ }.
+
+-record(state,
+ {next_channel = 1 :: pos_integer(),
+ connection_sup :: pid(),
+ reader_m_ref :: reference() | undefined,
+ sessions_sup :: pid() | undefined,
+ pending_session_reqs = [] :: [term()],
+ reader :: pid() | undefined,
+ socket :: amqp10_socket() | undefined,
+ idle_time_out :: non_neg_integer() | undefined,
+ heartbeat_timer :: timer:tref() | undefined,
+ config :: connection_config()
+ }).
+
+-export_type([connection_config/0,
+ amqp10_socket/0]).
+
+-define(DEFAULT_TIMEOUT, 5000).
+
+%% -------------------------------------------------------------------
+%% Public API.
+%% -------------------------------------------------------------------
+
+-spec open(connection_config()) -> supervisor:startchild_ret().
+open(Config) ->
+ %% Start the supervision tree dedicated to that connection. It
+ %% starts at least a connection process (the PID we want to return)
+ %% and a reader process (responsible for opening and reading the
+ %% socket).
+ case supervisor:start_child(amqp10_client_sup, [Config]) of
+ {ok, ConnSup} ->
+ %% We query the PIDs of the connection and reader processes. The
+ %% reader process needs to know the connection PID to send it the
+ %% socket.
+ Children = supervisor:which_children(ConnSup),
+ {_, Reader, _, _} = lists:keyfind(reader, 1, Children),
+ {_, Connection, _, _} = lists:keyfind(connection, 1, Children),
+ {_, SessionsSup, _, _} = lists:keyfind(sessions, 1, Children),
+ set_other_procs(Connection, #{sessions_sup => SessionsSup,
+ reader => Reader}),
+ {ok, Connection};
+ Error ->
+ Error
+ end.
+
+-spec close(pid(), {amqp10_client_types:amqp_error()
+ | amqp10_client_types:connection_error(), binary()} | none) -> ok.
+close(Pid, Reason) ->
+ gen_statem:cast(Pid, {close, Reason}).
+
+%% -------------------------------------------------------------------
+%% Private API.
+%% -------------------------------------------------------------------
+
+start_link(Sup, Config) ->
+ gen_statem:start_link(?MODULE, [Sup, Config], []).
+
+set_other_procs(Pid, OtherProcs) ->
+ gen_statem:cast(Pid, {set_other_procs, OtherProcs}).
+
+-spec socket_ready(pid(), amqp10_socket()) -> ok.
+socket_ready(Pid, Socket) ->
+ gen_statem:cast(Pid, {socket_ready, Socket}).
+
+-spec protocol_header_received(pid(), 0 | 3, non_neg_integer(),
+ non_neg_integer(), non_neg_integer()) -> ok.
+protocol_header_received(Pid, Protocol, Maj, Min, Rev) ->
+ gen_statem:cast(Pid, {protocol_header_received, Protocol, Maj, Min, Rev}).
+
+-spec begin_session(pid()) -> supervisor:startchild_ret().
+begin_session(Pid) ->
+ gen_statem:call(Pid, begin_session, {dirty_timeout, ?TIMEOUT}).
+
+heartbeat(Pid) ->
+ gen_statem:cast(Pid, heartbeat).
+
+%% -------------------------------------------------------------------
+%% gen_fsm callbacks.
+%% -------------------------------------------------------------------
+
+callback_mode() -> [state_functions].
+
+init([Sup, Config0]) ->
+ process_flag(trap_exit, true),
+ Config = maps:merge(config_defaults(), Config0),
+ {ok, expecting_socket, #state{connection_sup = Sup,
+ config = Config}}.
+
+expecting_socket(_EvtType, {socket_ready, Socket},
+ State = #state{config = Cfg}) ->
+ State1 = State#state{socket = Socket},
+ case Cfg of
+ #{sasl := none} ->
+ ok = socket_send(Socket, ?AMQP_PROTOCOL_HEADER),
+ {next_state, hdr_sent, State1};
+ _ ->
+ ok = socket_send(Socket, ?SASL_PROTOCOL_HEADER),
+ {next_state, sasl_hdr_sent, State1}
+ end;
+expecting_socket(_EvtType, {set_other_procs, OtherProcs}, State) ->
+ {keep_state, set_other_procs0(OtherProcs, State)};
+expecting_socket({call, From}, begin_session,
+ #state{pending_session_reqs = PendingSessionReqs} = State) ->
+ %% The caller already asked for a new session but the connection
+ %% isn't fully opened. Let's queue this request until the connection
+ %% is ready.
+ State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]},
+ {keep_state, State1}.
+
+sasl_hdr_sent(_EvtType, {protocol_header_received, 3, 1, 0, 0}, State) ->
+ {next_state, sasl_hdr_rcvds, State};
+sasl_hdr_sent({call, From}, begin_session,
+ #state{pending_session_reqs = PendingSessionReqs} = State) ->
+ State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]},
+ {keep_state, State1}.
+
+sasl_hdr_rcvds(_EvtType, #'v1_0.sasl_mechanisms'{
+ sasl_server_mechanisms = {array, symbol, Mechs}},
+ State = #state{config = #{sasl := Sasl}}) ->
+ SaslBin = {symbol, sasl_to_bin(Sasl)},
+ case lists:any(fun(S) when S =:= SaslBin -> true;
+ (_) -> false
+ end, Mechs) of
+ true ->
+ ok = send_sasl_init(State, Sasl),
+ {next_state, sasl_init_sent, State};
+ false ->
+ {stop, {sasl_not_supported, Sasl}, State}
+ end;
+sasl_hdr_rcvds({call, From}, begin_session,
+ #state{pending_session_reqs = PendingSessionReqs} = State) ->
+ State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]},
+ {keep_state, State1}.
+
+sasl_init_sent(_EvtType, #'v1_0.sasl_outcome'{code = {ubyte, 0}},
+ #state{socket = Socket} = State) ->
+ ok = socket_send(Socket, ?AMQP_PROTOCOL_HEADER),
+ {next_state, hdr_sent, State};
+sasl_init_sent(_EvtType, #'v1_0.sasl_outcome'{code = {ubyte, C}},
+ #state{} = State) when C==1;C==2;C==3;C==4 ->
+ {stop, sasl_auth_failure, State};
+sasl_init_sent({call, From}, begin_session,
+ #state{pending_session_reqs = PendingSessionReqs} = State) ->
+ State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]},
+ {keep_state, State1}.
+
+hdr_sent(_EvtType, {protocol_header_received, 0, 1, 0, 0}, State) ->
+ case send_open(State) of
+ ok -> {next_state, open_sent, State};
+ Error -> {stop, Error, State}
+ end;
+hdr_sent(_EvtType, {protocol_header_received, Protocol, Maj, Min,
+ Rev}, State) ->
+ error_logger:warning_msg("Unsupported protocol version: ~b ~b.~b.~b~n",
+ [Protocol, Maj, Min, Rev]),
+ {stop, normal, State};
+hdr_sent({call, From}, begin_session,
+ #state{pending_session_reqs = PendingSessionReqs} = State) ->
+ State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]},
+ {keep_state, State1}.
+
+open_sent(_EvtType, #'v1_0.open'{max_frame_size = MFSz,
+ idle_time_out = Timeout},
+ #state{pending_session_reqs = PendingSessionReqs,
+ config = Config} = State0) ->
+ State = case Timeout of
+ undefined -> State0;
+ {uint, T} when T > 0 ->
+ {ok, Tmr} = start_heartbeat_timer(T div 2),
+ State0#state{idle_time_out = T div 2,
+ heartbeat_timer = Tmr};
+ _ -> State0
+ end,
+ State1 = State#state{config =
+ Config#{outgoing_max_frame_size => unpack(MFSz)}},
+ State2 = lists:foldr(
+ fun(From, S0) ->
+ {Ret, S2} = handle_begin_session(From, S0),
+ _ = gen_statem:reply(From, Ret),
+ S2
+ end, State1, PendingSessionReqs),
+ ok = notify_opened(Config),
+ {next_state, opened, State2};
+open_sent({call, From}, begin_session,
+ #state{pending_session_reqs = PendingSessionReqs} = State) ->
+ State1 = State#state{pending_session_reqs = [From | PendingSessionReqs]},
+ {keep_state, State1}.
+
+opened(_EvtType, heartbeat, State = #state{idle_time_out = T}) ->
+ ok = send_heartbeat(State),
+ {ok, Tmr} = start_heartbeat_timer(T),
+ {keep_state, State#state{heartbeat_timer = Tmr}};
+opened(_EvtType, {close, Reason}, State = #state{config = Config}) ->
+ %% We send the first close frame and wait for the reply.
+ %% TODO: stop all sessions writing
+ %% We could still accept incoming frames (See: 2.4.6)
+ ok = notify_closed(Config, Reason),
+ case send_close(State, Reason) of
+ ok -> {next_state, close_sent, State};
+ {error, closed} -> {stop, normal, State};
+ Error -> {stop, Error, State}
+ end;
+opened(_EvtType, #'v1_0.close'{error = Error}, State = #state{config = Config}) ->
+ %% We receive the first close frame, reply and terminate.
+ ok = notify_closed(Config, translate_err(Error)),
+ _ = send_close(State, none),
+ {stop, normal, State};
+opened({call, From}, begin_session, State) ->
+ {Ret, State1} = handle_begin_session(From, State),
+ {keep_state, State1, [{reply, From, Ret}]};
+opened(info, {'DOWN', MRef, _, _, _Info},
+ State = #state{reader_m_ref = MRef, config = Config}) ->
+ %% reader has gone down and we are not already shutting down
+ ok = notify_closed(Config, shutdown),
+ {stop, normal, State};
+opened(_EvtType, Frame, State) ->
+ error_logger:warning_msg("Unexpected connection frame ~p when in state ~p ~n",
+ [Frame, State]),
+ {keep_state, State}.
+
+close_sent(_EvtType, heartbeat, State) ->
+ {next_state, close_sent, State};
+close_sent(_EvtType, #'v1_0.close'{}, State) ->
+ %% TODO: we should probably set up a timer before this to ensure
+ %% we close down event if no reply is received
+ {stop, normal, State}.
+
+set_other_procs0(OtherProcs, State) ->
+ #{sessions_sup := SessionsSup,
+ reader := Reader} = OtherProcs,
+ ReaderMRef = monitor(process, Reader),
+ amqp10_client_frame_reader:set_connection(Reader, self()),
+ State#state{sessions_sup = SessionsSup,
+ reader_m_ref = ReaderMRef,
+ reader = Reader}.
+
+terminate(Reason, _StateName, #state{connection_sup = Sup,
+ config = Config}) ->
+ ok = notify_closed(Config, Reason),
+ case Reason of
+ normal -> sys:terminate(Sup, normal);
+ _ -> ok
+ end,
+ ok.
+
+code_change(_OldVsn, StateName, State, _Extra) ->
+ {ok, StateName, State}.
+
+%% -------------------------------------------------------------------
+%% Internal functions.
+%% -------------------------------------------------------------------
+
+handle_begin_session({FromPid, _Ref},
+ #state{sessions_sup = Sup, reader = Reader,
+ next_channel = Channel,
+ config = Config} = State) ->
+ Ret = supervisor:start_child(Sup, [FromPid, Channel, Reader, Config]),
+ State1 = case Ret of
+ {ok, _} -> State#state{next_channel = Channel + 1};
+ _ -> State
+ end,
+ {Ret, State1}.
+
+send_open(#state{socket = Socket, config = Config}) ->
+ {ok, Product} = application:get_key(description),
+ {ok, Version} = application:get_key(vsn),
+ Platform = "Erlang/OTP " ++ erlang:system_info(otp_release),
+ Props = {map, [{{symbol, <<"product">>},
+ {utf8, list_to_binary(Product)}},
+ {{symbol, <<"version">>},
+ {utf8, list_to_binary(Version)}},
+ {{symbol, <<"platform">>},
+ {utf8, list_to_binary(Platform)}}
+ ]},
+ ContainerId = maps:get(container_id, Config, generate_container_id()),
+ IdleTimeOut = maps:get(idle_time_out, Config, 0),
+ Open0 = #'v1_0.open'{container_id = {utf8, ContainerId},
+ channel_max = {ushort, 100},
+ idle_time_out = {uint, IdleTimeOut},
+ properties = Props},
+ Open1 = case Config of
+ #{max_frame_size := MFSz} ->
+ Open0#'v1_0.open'{max_frame_size = {uint, MFSz}};
+ _ -> Open0
+ end,
+ Open = case Config of
+ #{hostname := Hostname} ->
+ Open1#'v1_0.open'{hostname = {utf8, Hostname}};
+ _ -> Open1
+ end,
+ Encoded = amqp10_framing:encode_bin(Open),
+ Frame = amqp10_binary_generator:build_frame(0, Encoded),
+ ?DBG("CONN <- ~p~n", [Open]),
+ socket_send(Socket, Frame).
+
+
+send_close(#state{socket = Socket}, _Reason) ->
+ Close = #'v1_0.close'{},
+ Encoded = amqp10_framing:encode_bin(Close),
+ Frame = amqp10_binary_generator:build_frame(0, Encoded),
+ ?DBG("CONN <- ~p~n", [Close]),
+ Ret = socket_send(Socket, Frame),
+ case Ret of
+ ok -> _ =
+ socket_shutdown(Socket, write),
+ ok;
+ _ -> ok
+ end,
+ Ret.
+
+send_sasl_init(State, anon) ->
+ Frame = #'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>}},
+ send(Frame, 1, State);
+send_sasl_init(State, {plain, User, Pass}) ->
+ Response = <<0:8, User/binary, 0:8, Pass/binary>>,
+ Frame = #'v1_0.sasl_init'{mechanism = {symbol, <<"PLAIN">>},
+ initial_response = {binary, Response}},
+ send(Frame, 1, State).
+
+send(Record, FrameType, #state{socket = Socket}) ->
+ Encoded = amqp10_framing:encode_bin(Record),
+ Frame = amqp10_binary_generator:build_frame(0, FrameType, Encoded),
+ ?DBG("CONN <- ~p~n", [Record]),
+ socket_send(Socket, Frame).
+
+send_heartbeat(#state{socket = Socket}) ->
+ Frame = amqp10_binary_generator:build_heartbeat_frame(),
+ socket_send(Socket, Frame).
+
+socket_send({tcp, Socket}, Data) ->
+ gen_tcp:send(Socket, Data);
+socket_send({ssl, Socket}, Data) ->
+ ssl:send(Socket, Data).
+
+socket_shutdown({tcp, Socket}, How) ->
+ gen_tcp:shutdown(Socket, How);
+socket_shutdown({ssl, Socket}, How) ->
+ ssl:shutdown(Socket, How).
+
+notify_opened(#{notify_when_opened := none}) ->
+ ok;
+notify_opened(#{notify_when_opened := Pid}) when is_pid(Pid) ->
+ Pid ! amqp10_event(opened),
+ ok;
+notify_opened(#{notify := Pid}) when is_pid(Pid) ->
+ Pid ! amqp10_event(opened),
+ ok;
+notify_opened(_) ->
+ ok.
+
+notify_closed(#{notify_when_closed := none}, _Reason) ->
+ ok;
+notify_closed(#{notify := none}, _Reason) ->
+ ok;
+notify_closed(#{notify_when_closed := Pid}, Reason) when is_pid(Pid) ->
+ Pid ! amqp10_event({closed, Reason}),
+ ok;
+notify_closed(#{notify := Pid}, Reason) when is_pid(Pid) ->
+ Pid ! amqp10_event({closed, Reason}),
+ ok.
+
+start_heartbeat_timer(Timeout) ->
+ timer:apply_after(Timeout, ?MODULE, heartbeat, [self()]).
+
+unpack(V) -> amqp10_client_types:unpack(V).
+
+-spec generate_container_id() -> binary().
+generate_container_id() ->
+ Pre = list_to_binary(atom_to_list(node())),
+ Id = bin_to_hex(crypto:strong_rand_bytes(8)),
+ <<Pre/binary, <<"_">>/binary, Id/binary>>.
+
+bin_to_hex(Bin) ->
+ <<<<if N >= 10 -> N -10 + $a;
+ true -> N + $0 end>>
+ || <<N:4>> <= Bin>>.
+
+translate_err(undefined) ->
+ none;
+translate_err(#'v1_0.error'{condition = Cond, description = Desc}) ->
+ Err =
+ case Cond of
+ ?V_1_0_AMQP_ERROR_INTERNAL_ERROR -> internal_error;
+ ?V_1_0_AMQP_ERROR_NOT_FOUND -> not_found;
+ ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS -> unauthorized_access;
+ ?V_1_0_AMQP_ERROR_DECODE_ERROR -> decode_error;
+ ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED -> resource_limit_exceeded;
+ ?V_1_0_AMQP_ERROR_NOT_ALLOWED -> not_allowed;
+ ?V_1_0_AMQP_ERROR_INVALID_FIELD -> invalid_field;
+ ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED -> not_implemented;
+ ?V_1_0_AMQP_ERROR_RESOURCE_LOCKED -> resource_locked;
+ ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED -> precondition_failed;
+ ?V_1_0_AMQP_ERROR_RESOURCE_DELETED -> resource_deleted;
+ ?V_1_0_AMQP_ERROR_ILLEGAL_STATE -> illegal_state;
+ ?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL -> frame_size_too_small;
+ ?V_1_0_CONNECTION_ERROR_CONNECTION_FORCED -> forced;
+ ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR -> framing_error;
+ ?V_1_0_CONNECTION_ERROR_REDIRECT -> redirect;
+ _ -> Cond
+ end,
+ {Err, unpack(Desc)}.
+
+amqp10_event(Evt) ->
+ {amqp10_event, {connection, self(), Evt}}.
+
+sasl_to_bin({plain, _, _}) -> <<"PLAIN">>;
+sasl_to_bin(anon) -> <<"ANONYMOUS">>.
+
+config_defaults() ->
+ #{sasl => none,
+ transfer_limit_margin => 0,
+ max_frame_size => ?MAX_MAX_FRAME_SIZE}.
diff --git a/deps/amqp10_client/src/amqp10_client_connection_sup.erl b/deps/amqp10_client/src/amqp10_client_connection_sup.erl
new file mode 100644
index 0000000000..c20a7b86ce
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_connection_sup.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_connection_sup).
+
+-behaviour(supervisor).
+
+%% Private API.
+-export([start_link/1]).
+
+%% Supervisor callbacks.
+-export([init/1]).
+
+-define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
+ transient, 5000, Type, [Mod]}).
+
+%% -------------------------------------------------------------------
+%% Private API.
+%% -------------------------------------------------------------------
+
+-spec start_link(amqp10_client_connection:connection_config()) ->
+ {ok, pid()} | ignore | {error, any()}.
+start_link(Config) ->
+ supervisor:start_link(?MODULE, [Config]).
+
+%% -------------------------------------------------------------------
+%% Supervisor callbacks.
+%% -------------------------------------------------------------------
+
+init(Args) ->
+ ReaderSpec = ?CHILD(reader, amqp10_client_frame_reader,
+ worker, [self() | Args]),
+ ConnectionSpec = ?CHILD(connection, amqp10_client_connection,
+ worker, [self() | Args]),
+ SessionsSupSpec = ?CHILD(sessions, amqp10_client_sessions_sup,
+ supervisor, []),
+ {ok, {{one_for_all, 0, 1}, [ConnectionSpec,
+ ReaderSpec,
+ SessionsSupSpec]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_connections_sup.erl b/deps/amqp10_client/src/amqp10_client_connections_sup.erl
new file mode 100644
index 0000000000..4200c64551
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_connections_sup.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_connections_sup).
+
+-behaviour(supervisor).
+
+%% Private API.
+-export([start_link/0,
+ stop_child/1]).
+
+%% Supervisor callbacks.
+-export([init/1]).
+
+-define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
+ temporary, infinity, Type, [Mod]}).
+
+%% -------------------------------------------------------------------
+%% Private API.
+%% -------------------------------------------------------------------
+
+stop_child(Pid) ->
+ supervisor:terminate_child({local, ?MODULE}, Pid).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% -------------------------------------------------------------------
+%% Supervisor callbacks.
+%% -------------------------------------------------------------------
+
+init([]) ->
+ Template = ?CHILD(connection_sup, amqp10_client_connection_sup,
+ supervisor, []),
+ {ok, {{simple_one_for_one, 0, 1}, [Template]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_frame_reader.erl b/deps/amqp10_client/src/amqp10_client_frame_reader.erl
new file mode 100644
index 0000000000..524ead07ee
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_frame_reader.erl
@@ -0,0 +1,338 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_frame_reader).
+
+-behaviour(gen_statem).
+
+-include("amqp10_client.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
+
+%% API
+-export([start_link/2,
+ set_connection/2,
+ close/1,
+ register_session/3,
+ unregister_session/4]).
+
+%% gen_statem callbacks
+-export([init/1,
+ callback_mode/0,
+ handle_event/4,
+ code_change/4,
+ terminate/3]).
+
+-define(RABBIT_TCP_OPTS, [binary,
+ {packet, 0},
+ {active, false},
+ {nodelay, true}]).
+
+-type frame_type() :: amqp | sasl.
+
+-record(frame_state,
+ {data_offset :: 2..255,
+ type :: frame_type(),
+ channel :: non_neg_integer(),
+ frame_length :: pos_integer()}).
+
+-record(state,
+ {connection_sup :: pid(),
+ socket :: amqp10_client_connection:amqp10_socket() | undefined,
+ buffer = <<>> :: binary(),
+ frame_state :: #frame_state{} | undefined,
+ connection :: pid() | undefined,
+ heartbeat_timer_ref :: reference() | undefined,
+ connection_config = #{} :: amqp10_client_connection:connection_config(),
+ outgoing_channels = #{},
+ incoming_channels = #{}}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+-spec start_link(pid(), amqp10_client_connection:connection_config()) ->
+ {ok, pid()} | ignore | {error, any()}.
+start_link(Sup, Config) ->
+ gen_statem:start_link(?MODULE, [Sup, Config], []).
+
+%% @private
+%% @doc
+%% Passes the connection process PID to the reader process.
+%%
+%% This function is called when a connection supervision tree is
+%% started.
+-spec set_connection(Reader :: pid(), ConnectionPid :: pid()) -> ok.
+set_connection(Reader, Connection) ->
+ gen_statem:cast(Reader, {set_connection, Connection}).
+
+close(Reader) ->
+ gen_statem:cast(Reader, close).
+
+register_session(Reader, Session, OutgoingChannel) ->
+ gen_statem:cast(Reader, {register_session, Session, OutgoingChannel}).
+
+unregister_session(Reader, Session, OutgoingChannel, IncomingChannel) ->
+ gen_statem:cast(Reader, {unregister_session, Session, OutgoingChannel, IncomingChannel}).
+
+%%%===================================================================
+%%% gen_statem callbacks
+%%%===================================================================
+
+callback_mode() ->
+ [handle_event_function].
+
+init([Sup, ConnConfig]) when is_map(ConnConfig) ->
+ Port = maps:get(port, ConnConfig, 5672),
+ %% combined the list of `addresses' with the value of the original `address' option if provided
+ Addresses0 = maps:get(addresses, ConnConfig, []),
+ Addresses = case maps:get(address, ConnConfig, undefined) of
+ undefined -> Addresses0;
+ Address -> Addresses0 ++ [Address]
+ end,
+ Result = lists:foldl(fun (Address, {error, _}) ->
+ gen_tcp:connect(Address, Port, ?RABBIT_TCP_OPTS);
+ (_Address, {ok, Socket}) ->
+ {ok, Socket}
+ end,
+ {error, undefined}, Addresses),
+ case Result of
+ {ok, Socket0} ->
+ Socket = case ConnConfig of
+ #{tls_opts := {secure_port, Opts}} ->
+ {ok, SslSock} = ssl:connect(Socket0, Opts),
+ {ssl, SslSock};
+ _ -> {tcp, Socket0}
+ end,
+ State = #state{connection_sup = Sup, socket = Socket,
+ connection_config = ConnConfig},
+ {ok, expecting_connection_pid, State};
+ {error, Reason} ->
+ {stop, Reason}
+ end.
+
+handle_event(cast, {set_connection, ConnectionPid}, expecting_connection_pid,
+ State=#state{socket = Socket}) ->
+ ok = amqp10_client_connection:socket_ready(ConnectionPid, Socket),
+ set_active_once(State),
+ State1 = State#state{connection = ConnectionPid},
+ {next_state, expecting_frame_header, State1};
+handle_event(cast, {register_session, Session, OutgoingChannel}, _StateName,
+ #state{socket = Socket, outgoing_channels = OutgoingChannels} = State) ->
+ ok = amqp10_client_session:socket_ready(Session, Socket),
+ OutgoingChannels1 = OutgoingChannels#{OutgoingChannel => Session},
+ State1 = State#state{outgoing_channels = OutgoingChannels1},
+ {keep_state, State1};
+handle_event(cast, {unregister_session, _Session, OutgoingChannel, IncomingChannel}, _StateName,
+ State=#state{outgoing_channels = OutgoingChannels,
+ incoming_channels = IncomingChannels}) ->
+ OutgoingChannels1 = maps:remove(OutgoingChannel, OutgoingChannels),
+ IncomingChannels1 = maps:remove(IncomingChannel, IncomingChannels),
+ State1 = State#state{outgoing_channels = OutgoingChannels1,
+ incoming_channels = IncomingChannels1},
+ {keep_state, State1};
+handle_event(cast, close, _StateName, State = #state{socket = Socket}) ->
+ close_socket(Socket),
+ {stop, normal, State#state{socket = undefined}};
+
+handle_event({call, From}, _Action, _State, _Data) ->
+ {keep_state_and_data, [{reply, From, ok}]};
+
+handle_event(info, {Tcp, _, Packet}, StateName, #state{buffer = Buffer} = State)
+ when Tcp == tcp orelse Tcp == ssl ->
+ Data = <<Buffer/binary, Packet/binary>>,
+ case handle_input(StateName, Data, State) of
+ {ok, NextState, Remaining, NewState0} ->
+ NewState = defer_heartbeat_timer(NewState0),
+ set_active_once(NewState),
+ {next_state, NextState, NewState#state{buffer = Remaining}};
+ {error, Reason, NewState} ->
+ {stop, Reason, NewState}
+ end;
+
+handle_event(info, {TcpError, _, Reason}, StateName, State)
+ when TcpError == tcp_error orelse TcpError == ssl_error ->
+ error_logger:warning_msg("AMQP 1.0 connection socket errored, connection state: '~s', reason: '~p'~n",
+ [StateName, Reason]),
+ State1 = State#state{socket = undefined,
+ buffer = <<>>,
+ frame_state = undefined},
+ {stop, {error, Reason}, State1};
+handle_event(info, {TcpClosed, _}, StateName, State)
+ when TcpClosed == tcp_closed orelse TcpClosed == ssl_closed ->
+ error_logger:warning_msg("AMQP 1.0 connection socket was closed, connection state: '~s'~n",
+ [StateName]),
+ State1 = State#state{socket = undefined,
+ buffer = <<>>,
+ frame_state = undefined},
+ {stop, normal, State1};
+
+handle_event(info, heartbeat, _StateName, #state{connection = Connection}) ->
+ amqp10_client_connection:close(Connection,
+ {resource_limit_exceeded, <<"remote idle-time-out">>}),
+ % do not stop as may want to read the peer's close frame
+ keep_state_and_data.
+
+terminate(normal, _StateName, #state{connection_sup = _Sup, socket = Socket}) ->
+ maybe_close_socket(Socket);
+terminate(_Reason, _StateName, #state{connection_sup = _Sup, socket = Socket}) ->
+ maybe_close_socket(Socket).
+
+code_change(_Vsn, State, Data, _Extra) ->
+ {ok, State, Data}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+maybe_close_socket(undefined) ->
+ ok;
+maybe_close_socket(Socket) ->
+ close_socket(Socket).
+
+close_socket({tcp, Socket}) ->
+ gen_tcp:close(Socket);
+close_socket({ssl, Socket}) ->
+ ssl:close(Socket).
+
+set_active_once(#state{socket = {tcp, Socket}}) ->
+ ok = inet:setopts(Socket, [{active, once}]);
+set_active_once(#state{socket = {ssl, Socket}}) ->
+ ok = ssl:setopts(Socket, [{active, once}]).
+
+handle_input(expecting_frame_header,
+ <<"AMQP", Protocol/unsigned, Maj/unsigned, Min/unsigned,
+ Rev/unsigned, Rest/binary>>,
+ #state{connection = ConnectionPid} = State)
+ when Protocol =:= 0 orelse Protocol =:= 3 ->
+ ok = amqp10_client_connection:protocol_header_received(
+ ConnectionPid, Protocol, Maj, Min, Rev),
+ handle_input(expecting_frame_header, Rest, State);
+
+handle_input(expecting_frame_header,
+ <<Length:32/unsigned, DOff:8/unsigned, Type/unsigned,
+ Channel:16/unsigned, Rest/binary>>, State)
+ when DOff >= 2 andalso (Type =:= 0 orelse Type =:= 1) ->
+ AFS = #frame_state{frame_length = Length, channel = Channel,
+ type = frame_type(Type), data_offset = DOff},
+ handle_input(expecting_extended_frame_header, Rest,
+ State#state{frame_state = AFS});
+
+handle_input(expecting_frame_header, <<_:8/binary, _/binary>>, State) ->
+ {error, invalid_protocol_header, State};
+
+handle_input(expecting_extended_frame_header, Data,
+ #state{frame_state =
+ #frame_state{data_offset = DOff}} = State) ->
+ Skip = DOff * 4 - 8,
+ case Data of
+ <<_:Skip/binary, Rest/binary>> ->
+ handle_input(expecting_frame_body, Rest, State);
+ _ ->
+ {ok, expecting_extended_frame_header, Data, State}
+ end;
+
+handle_input(expecting_frame_body, Data,
+ #state{frame_state = #frame_state{frame_length = Length,
+ type = FrameType,
+ data_offset = DOff,
+ channel = Channel}} = State) ->
+ Skip = DOff * 4 - 8,
+ BodyLength = Length - Skip - 8,
+ case {Data, BodyLength} of
+ {<<_:BodyLength/binary, Rest/binary>>, 0} ->
+ % heartbeat
+ handle_input(expecting_frame_header, Rest, State);
+ {<<FrameBody:BodyLength/binary, Rest/binary>>, _} ->
+ State1 = State#state{frame_state = undefined},
+ {PerfDesc, Payload} = amqp10_binary_parser:parse(FrameBody),
+ Perf = amqp10_framing:decode(PerfDesc),
+ State2 = route_frame(Channel, FrameType, {Perf, Payload}, State1),
+ handle_input(expecting_frame_header, Rest, State2);
+ _ ->
+ {ok, expecting_frame_body, Data, State}
+ end;
+
+handle_input(StateName, Data, State) ->
+ {ok, StateName, Data, State}.
+
+%%% LOCAL
+
+defer_heartbeat_timer(State =
+ #state{heartbeat_timer_ref = TRef,
+ connection_config = #{idle_time_out := T}})
+ when is_number(T) andalso T > 0 ->
+ _ = case TRef of
+ undefined -> ok;
+ _ -> _ = erlang:cancel_timer(TRef)
+ end,
+ NewTRef = erlang:send_after(T * 2, self(), heartbeat),
+ State#state{heartbeat_timer_ref = NewTRef};
+defer_heartbeat_timer(State) -> State.
+
+route_frame(Channel, FrameType, {Performative, Payload} = Frame, State0) ->
+ {DestinationPid, State} = find_destination(Channel, FrameType, Performative,
+ State0),
+ ?DBG("FRAME -> ~p ~p~n ~p~n", [Channel, DestinationPid, Performative]),
+ case Payload of
+ <<>> -> ok = gen_statem:cast(DestinationPid, Performative);
+ _ -> ok = gen_statem:cast(DestinationPid, Frame)
+ end,
+ State.
+
+-spec find_destination(amqp10_client_types:channel(), frame_type(),
+ amqp10_client_types:amqp10_performative(), #state{}) ->
+ {pid(), #state{}}.
+find_destination(0, amqp, Frame, #state{connection = ConnPid} = State)
+ when is_record(Frame, 'v1_0.open') orelse
+ is_record(Frame, 'v1_0.close') ->
+ {ConnPid, State};
+find_destination(_Channel, sasl, _Frame,
+ #state{connection = ConnPid} = State) ->
+ {ConnPid, State};
+find_destination(Channel, amqp,
+ #'v1_0.begin'{remote_channel = {ushort, OutgoingChannel}},
+ #state{outgoing_channels = OutgoingChannels,
+ incoming_channels = IncomingChannels} = State) ->
+ #{OutgoingChannel := Session} = OutgoingChannels,
+ IncomingChannels1 = IncomingChannels#{Channel => Session},
+ State1 = State#state{incoming_channels = IncomingChannels1},
+ {Session, State1};
+find_destination(Channel, amqp, _Frame,
+ #state{incoming_channels = IncomingChannels} = State) ->
+ #{Channel := Session} = IncomingChannels,
+ {Session, State}.
+
+frame_type(0) -> amqp;
+frame_type(1) -> sasl.
+
+-ifdef(TEST).
+
+find_destination_test_() ->
+ Pid = self(),
+ State = #state{connection = Pid, outgoing_channels = #{3 => Pid}},
+ StateConn = #state{connection = Pid},
+ StateWithIncoming = State#state{incoming_channels = #{7 => Pid}},
+ StateWithIncoming0 = State#state{incoming_channels = #{0 => Pid}},
+ Tests = [{0, #'v1_0.open'{}, State, State, amqp},
+ {0, #'v1_0.close'{}, State, State, amqp},
+ {7, #'v1_0.begin'{remote_channel = {ushort, 3}}, State,
+ StateWithIncoming, amqp},
+ {0, #'v1_0.begin'{remote_channel = {ushort, 3}}, State,
+ StateWithIncoming0, amqp},
+ {7, #'v1_0.end'{}, StateWithIncoming, StateWithIncoming, amqp},
+ {7, #'v1_0.attach'{}, StateWithIncoming, StateWithIncoming, amqp},
+ {7, #'v1_0.flow'{}, StateWithIncoming, StateWithIncoming, amqp},
+ {0, #'v1_0.sasl_init'{}, StateConn, StateConn, sasl}
+ ],
+ [?_assertMatch({Pid, NewState},
+ find_destination(Channel, Type, Frame, InputState))
+ || {Channel, Frame, InputState, NewState, Type} <- Tests].
+
+-endif.
diff --git a/deps/amqp10_client/src/amqp10_client_session.erl b/deps/amqp10_client/src/amqp10_client_session.erl
new file mode 100644
index 0000000000..e758a8acbb
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_session.erl
@@ -0,0 +1,1121 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_session).
+
+-behaviour(gen_statem).
+
+-include("amqp10_client.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+%% Public API.
+-export(['begin'/1,
+ begin_sync/1,
+ begin_sync/2,
+ 'end'/1,
+ attach/2,
+ detach/2,
+ transfer/3,
+ flow/4,
+ disposition/6
+ ]).
+
+%% Private API.
+-export([start_link/4,
+ socket_ready/2
+ ]).
+
+%% gen_statem callbacks
+-export([
+ init/1,
+ terminate/3,
+ code_change/4,
+ callback_mode/0
+ ]).
+
+%% gen_statem state callbacks.
+-export([
+ unmapped/3,
+ begin_sent/3,
+ mapped/3,
+ end_sent/3
+ ]).
+
+-define(MAX_SESSION_WINDOW_SIZE, 65535).
+-define(DEFAULT_MAX_HANDLE, 16#ffffffff).
+-define(DEFAULT_TIMEOUT, 5000).
+-define(INITIAL_OUTGOING_ID, 0).
+-define(INITIAL_DELIVERY_COUNT, 0).
+
+% -type from() :: {pid(), term()}.
+
+-type transfer_id() :: non_neg_integer().
+-type link_handle() :: non_neg_integer().
+-type link_name() :: binary().
+-type link_address() :: binary().
+-type link_role() :: sender | receiver.
+-type link_source() :: link_address() | undefined.
+-type link_target() :: {pid, pid()} | binary() | undefined.
+
+-type snd_settle_mode() :: unsettled | settled | mixed.
+-type rcv_settle_mode() :: first | second.
+
+-type terminus_durability() :: none | configuration | unsettled_state.
+
+-type target_def() :: #{address => link_address(),
+ durable => terminus_durability()}.
+-type source_def() :: #{address => link_address(),
+ durable => terminus_durability()}.
+
+-type attach_role() :: {sender, target_def()} | {receiver, source_def(), pid()}.
+
+% http://www.amqp.org/specification/1.0/filters
+-type filter() :: #{binary() => binary() | map() | list(binary())}.
+-type properties() :: #{binary() => tuple()}.
+
+-type attach_args() :: #{name => binary(),
+ role => attach_role(),
+ snd_settle_mode => snd_settle_mode(),
+ rcv_settle_mode => rcv_settle_mode(),
+ filter => filter(),
+ properties => properties()
+ }.
+
+-type link_ref() :: #link_ref{}.
+
+-export_type([snd_settle_mode/0,
+ rcv_settle_mode/0,
+ terminus_durability/0,
+ attach_args/0,
+ attach_role/0,
+ target_def/0,
+ source_def/0,
+ properties/0,
+ filter/0]).
+
+-record(link,
+ {name :: link_name(),
+ ref :: link_ref(),
+ state = detached :: detached | attach_sent | attached | detach_sent,
+ notify :: pid(),
+ output_handle :: link_handle(),
+ input_handle :: link_handle() | undefined,
+ role :: link_role(),
+ source :: link_source(),
+ target :: link_target(),
+ delivery_count = 0 :: non_neg_integer(),
+ link_credit = 0 :: non_neg_integer(),
+ link_credit_unsettled = 0 :: non_neg_integer(),
+ available = 0 :: non_neg_integer(),
+ drain = false :: boolean(),
+ partial_transfers :: undefined | {#'v1_0.transfer'{}, [binary()]},
+ auto_flow :: never | {auto, non_neg_integer(), non_neg_integer()}
+ }).
+
+-record(state,
+ {channel :: pos_integer(),
+ remote_channel :: pos_integer() | undefined,
+ next_incoming_id = 0 :: transfer_id(),
+ incoming_window = ?MAX_SESSION_WINDOW_SIZE :: non_neg_integer(),
+ next_outgoing_id = ?INITIAL_OUTGOING_ID + 1 :: transfer_id(),
+ outgoing_window = ?MAX_SESSION_WINDOW_SIZE :: non_neg_integer(),
+ remote_incoming_window = 0 :: non_neg_integer(),
+ remote_outgoing_window = 0 :: non_neg_integer(),
+ reader :: pid(),
+ socket :: amqp10_client_connection:amqp10_socket() | undefined,
+ links = #{} :: #{link_handle() => #link{}},
+ % maps link name to outgoing link handle
+ link_index = #{} :: #{link_name() => link_handle()},
+ % maps incoming handle to outgoing
+ link_handle_index = #{} :: #{link_handle() => link_handle()},
+ next_link_handle = 0 :: link_handle(),
+ early_attach_requests = [] :: [term()],
+ connection_config = #{} :: amqp10_client_connection:connection_config(),
+ % the unsettled map needs to go in the session state as a disposition
+ % can reference transfers for many different links
+ unsettled = #{} :: #{transfer_id() => {amqp10_msg:delivery_tag(),
+ any()}}, %TODO: refine as FsmRef
+ incoming_unsettled = #{} :: #{transfer_id() => link_handle()},
+ notify :: pid()
+ }).
+
+
+%% -------------------------------------------------------------------
+%% Public API.
+%% -------------------------------------------------------------------
+
+-spec 'begin'(pid()) -> supervisor:startchild_ret().
+'begin'(Connection) ->
+ %% The connection process is responsible for allocating a channel
+ %% number and contact the sessions supervisor to start a new session
+ %% process.
+ amqp10_client_connection:begin_session(Connection).
+
+-spec begin_sync(pid()) -> supervisor:startchild_ret().
+begin_sync(Connection) ->
+ begin_sync(Connection, ?DEFAULT_TIMEOUT).
+
+-spec begin_sync(pid(), non_neg_integer()) ->
+ supervisor:startchild_ret() | session_timeout.
+begin_sync(Connection, Timeout) ->
+ {ok, Session} = amqp10_client_connection:begin_session(Connection),
+ receive
+ {session_begin, Session} -> {ok, Session}
+ after Timeout -> session_timeout
+ end.
+
+-spec 'end'(pid()) -> ok.
+'end'(Pid) ->
+ gen_statem:cast(Pid, 'end').
+
+-spec attach(pid(), attach_args()) -> {ok, link_ref()}.
+attach(Session, Args) ->
+ gen_statem:call(Session, {attach, Args}, {dirty_timeout, ?TIMEOUT}).
+
+-spec detach(pid(), link_handle()) -> ok | {error, link_not_found | half_attached}.
+detach(Session, Handle) ->
+ gen_statem:call(Session, {detach, Handle}, {dirty_timeout, ?TIMEOUT}).
+
+-spec transfer(pid(), amqp10_msg:amqp10_msg(), timeout()) ->
+ ok | {error, insufficient_credit | link_not_found | half_attached}.
+transfer(Session, Amqp10Msg, Timeout) ->
+ [Transfer | Records] = amqp10_msg:to_amqp_records(Amqp10Msg),
+ gen_statem:call(Session, {transfer, Transfer, Records},
+ {dirty_timeout, Timeout}).
+
+flow(Session, Handle, Flow, RenewAfter) ->
+ gen_statem:cast(Session, {flow, Handle, Flow, RenewAfter}).
+
+-spec disposition(pid(), link_role(), transfer_id(), transfer_id(), boolean(),
+ amqp10_client_types:delivery_state()) -> ok.
+disposition(Session, Role, First, Last, Settled, DeliveryState) ->
+ gen_statem:call(Session, {disposition, Role, First, Last, Settled,
+ DeliveryState}, {dirty_timeout, ?TIMEOUT}).
+
+
+
+%% -------------------------------------------------------------------
+%% Private API.
+%% -------------------------------------------------------------------
+
+start_link(From, Channel, Reader, ConnConfig) ->
+ gen_statem:start_link(?MODULE, [From, Channel, Reader, ConnConfig], []).
+
+-spec socket_ready(pid(), amqp10_client_connection:amqp10_socket()) -> ok.
+socket_ready(Pid, Socket) ->
+ gen_statem:cast(Pid, {socket_ready, Socket}).
+
+%% -------------------------------------------------------------------
+%% gen_statem callbacks.
+%% -------------------------------------------------------------------
+
+callback_mode() -> [state_functions].
+
+init([FromPid, Channel, Reader, ConnConfig]) ->
+ process_flag(trap_exit, true),
+ amqp10_client_frame_reader:register_session(Reader, self(), Channel),
+ State = #state{notify = FromPid, channel = Channel, reader = Reader,
+ connection_config = ConnConfig},
+ {ok, unmapped, State}.
+
+unmapped(cast, {socket_ready, Socket}, State) ->
+ State1 = State#state{socket = Socket},
+ ok = send_begin(State1),
+ {next_state, begin_sent, State1};
+unmapped({call, From}, {attach, Attach},
+ #state{early_attach_requests = EARs} = State) ->
+ {keep_state,
+ State#state{early_attach_requests = [{From, Attach} | EARs]}}.
+
+begin_sent(cast, #'v1_0.begin'{remote_channel = {ushort, RemoteChannel},
+ next_outgoing_id = {uint, NOI},
+ incoming_window = {uint, InWindow},
+ outgoing_window = {uint, OutWindow}},
+ #state{early_attach_requests = EARs} = State) ->
+
+ State1 = State#state{remote_channel = RemoteChannel},
+ State2 = lists:foldr(fun({From, Attach}, S) ->
+ {S2, H} = send_attach(fun send/2, Attach, From, S),
+ gen_statem:reply(From, {ok, H}),
+ S2
+ end, State1, EARs),
+
+ ok = notify_session_begun(State2),
+
+ {next_state, mapped, State2#state{early_attach_requests = [],
+ next_incoming_id = NOI,
+ remote_incoming_window = InWindow,
+ remote_outgoing_window = OutWindow}};
+begin_sent({call, From}, {attach, Attach},
+ #state{early_attach_requests = EARs} = State) ->
+ {keep_state,
+ State#state{early_attach_requests = [{From, Attach} | EARs]}}.
+
+mapped(cast, 'end', State) ->
+ %% We send the first end frame and wait for the reply.
+ send_end(State),
+ {next_state, end_sent, State};
+mapped(cast, {flow, OutHandle, Flow0, RenewAfter}, State0) ->
+ State = send_flow(fun send/2, OutHandle, Flow0, RenewAfter, State0),
+ {next_state, mapped, State};
+mapped(cast, #'v1_0.end'{error = Err}, State) ->
+ %% We receive the first end frame, reply and terminate.
+ _ = send_end(State),
+ % TODO: send notifications for links?
+ Reason = case Err of
+ undefined -> normal;
+ _ -> Err
+ end,
+ ok = notify_session_ended(State, Reason),
+ {stop, normal, State};
+mapped(cast, #'v1_0.attach'{name = {utf8, Name},
+ initial_delivery_count = IDC,
+ handle = {uint, InHandle}},
+ #state{links = Links, link_index = LinkIndex,
+ link_handle_index = LHI} = State0) ->
+
+ #{Name := OutHandle} = LinkIndex,
+ #{OutHandle := Link0} = Links,
+ ok = notify_link_attached(Link0),
+
+ DeliveryCount = case Link0 of
+ #link{role = sender, delivery_count = DC} -> DC;
+ _ -> unpack(IDC)
+ end,
+ Link = Link0#link{input_handle = InHandle, state = attached,
+ delivery_count = DeliveryCount},
+ State = State0#state{links = Links#{OutHandle => Link},
+ link_index = maps:remove(Name, LinkIndex),
+ link_handle_index = LHI#{InHandle => OutHandle}},
+ {next_state, mapped, State};
+mapped(cast, #'v1_0.detach'{handle = {uint, InHandle},
+ error = Err},
+ #state{links = Links, link_handle_index = LHI} = State0) ->
+ with_link(InHandle, State0,
+ fun (#link{output_handle = OutHandle} = Link, State) ->
+ Reason = case Err of
+ undefined -> normal;
+ Err -> Err
+ end,
+ ok = notify_link_detached(Link, Reason),
+ {next_state, mapped,
+ State#state{links = maps:remove(OutHandle, Links),
+ link_handle_index = maps:remove(InHandle, LHI)}}
+ end);
+mapped(cast, #'v1_0.flow'{handle = undefined} = Flow, State0) ->
+ State = handle_session_flow(Flow, State0),
+ {next_state, mapped, State};
+mapped(cast, #'v1_0.flow'{handle = {uint, InHandle}} = Flow,
+ #state{links = Links} = State0) ->
+
+ State = handle_session_flow(Flow, State0),
+
+ {ok, #link{output_handle = OutHandle} = Link0} =
+ find_link_by_input_handle(InHandle, State),
+
+ % TODO: handle `send_flow` return tag
+ {ok, Link} = handle_link_flow(Flow, Link0),
+ ok = maybe_notify_link_credit(Link0, Link),
+ Links1 = Links#{OutHandle => Link},
+ State1 = State#state{links = Links1},
+ {next_state, mapped, State1};
+mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle},
+ more = true} = Transfer, Payload},
+ #state{links = Links} = State0) ->
+
+ {ok, #link{output_handle = OutHandle} = Link} =
+ find_link_by_input_handle(InHandle, State0),
+
+ Link1 = append_partial_transfer(Transfer, Payload, Link),
+
+ State = book_partial_transfer_received(
+ State0#state{links = Links#{OutHandle => Link1}}),
+ {next_state, mapped, State};
+mapped(cast, {#'v1_0.transfer'{handle = {uint, InHandle},
+ delivery_id = MaybeDeliveryId,
+ settled = Settled} = Transfer0, Payload0},
+ #state{incoming_unsettled = Unsettled0} = State0) ->
+
+ {ok, #link{target = {pid, TargetPid},
+ output_handle = OutHandle,
+ ref = LinkRef} = Link0} =
+ find_link_by_input_handle(InHandle, State0),
+
+ {Transfer, Payload, Link} = complete_partial_transfer(Transfer0, Payload0, Link0),
+ Msg = decode_as_msg(Transfer, Payload),
+
+ % stash the DeliveryId - not sure for what yet
+ Unsettled = case MaybeDeliveryId of
+ {uint, DeliveryId} when Settled =/= true ->
+ Unsettled0#{DeliveryId => OutHandle};
+ _ ->
+ Unsettled0
+ end,
+
+ % link bookkeeping
+ % notify when credit is exhausted (link_credit = 0)
+ % detach the Link with a transfer-limit-exceeded error code if further
+ % transfers are received
+ case book_transfer_received(Settled,
+ State0#state{incoming_unsettled = Unsettled},
+ Link) of
+ {ok, State} ->
+ % deliver
+ TargetPid ! {amqp10_msg, LinkRef, Msg},
+ State1 = auto_flow(Link, State),
+ {next_state, mapped, State1};
+ {credit_exhausted, State} ->
+ TargetPid ! {amqp10_msg, LinkRef, Msg},
+ ok = notify_link(Link, credit_exhausted),
+ {next_state, mapped, State};
+ {transfer_limit_exceeded, State} ->
+ error_logger:info_msg("transfer_limit_exceeded for link ~p~n", [Link]),
+ Link1 = detach_with_error_cond(Link, State,
+ ?V_1_0_LINK_ERROR_TRANSFER_LIMIT_EXCEEDED),
+ {next_state, mapped, update_link(Link1, State)}
+ end;
+
+
+% role=true indicates the disposition is from a `receiver`. i.e. from the
+% clients point of view these are dispositions relating to `sender`links
+mapped(cast, #'v1_0.disposition'{role = true, settled = true, first = {uint, First},
+ last = Last0, state = DeliveryState},
+ #state{unsettled = Unsettled0} = State) ->
+ Last = case Last0 of
+ undefined -> First;
+ {uint, L} -> L
+ end,
+ % TODO: no good if the range becomes very large!! refactor
+ Unsettled =
+ lists:foldl(fun(Id, Acc) ->
+ case Acc of
+ #{Id := {DeliveryTag, Receiver}} ->
+ S = translate_delivery_state(DeliveryState),
+ ok = notify_disposition(Receiver,
+ {S, DeliveryTag}),
+ maps:remove(Id, Acc);
+ _ -> Acc
+ end
+ end, Unsettled0, lists:seq(First, Last)),
+
+ {next_state, mapped, State#state{unsettled = Unsettled}};
+mapped(cast, Frame, State) ->
+ error_logger:warning_msg("Unhandled session frame ~p in state ~p~n",
+ [Frame, State]),
+ {next_state, mapped, State};
+mapped({call, From},
+ {transfer, #'v1_0.transfer'{handle = {uint, OutHandle},
+ delivery_tag = {binary, DeliveryTag},
+ settled = false} = Transfer0, Parts},
+ #state{next_outgoing_id = NOI, links = Links,
+ unsettled = Unsettled} = State) ->
+ case Links of
+ #{OutHandle := #link{input_handle = undefined}} ->
+ {keep_state, State, [{reply, From, {error, half_attached}}]};
+ #{OutHandle := #link{link_credit = LC}} when LC =< 0 ->
+ {keep_state, State, [{reply, From, {error, insufficient_credit}}]};
+ #{OutHandle := Link} ->
+ Transfer = Transfer0#'v1_0.transfer'{delivery_id = uint(NOI),
+ resume = false},
+ {ok, NumFrames} = send_transfer(Transfer, Parts, State),
+ State1 = State#state{unsettled = Unsettled#{NOI => {DeliveryTag, From}}},
+ {keep_state, book_transfer_send(NumFrames, Link, State1),
+ [{reply, From, ok}]};
+ _ ->
+ {keep_state, State, [{reply, From, {error, link_not_found}}]}
+
+ end;
+mapped({call, From},
+ {transfer, #'v1_0.transfer'{handle = {uint, OutHandle}} = Transfer0,
+ Parts}, #state{next_outgoing_id = NOI,
+ links = Links} = State) ->
+ case Links of
+ #{OutHandle := #link{input_handle = undefined}} ->
+ {keep_state_and_data, [{reply, From, {error, half_attached}}]};
+ #{OutHandle := #link{link_credit = LC}} when LC =< 0 ->
+ {keep_state_and_data, [{reply, From, {error, insufficient_credit}}]};
+ #{OutHandle := Link} ->
+ Transfer = Transfer0#'v1_0.transfer'{delivery_id = uint(NOI)},
+ {ok, NumFrames} = send_transfer(Transfer, Parts, State),
+ % TODO look into if erlang will correctly wrap integers during
+ % binary conversion.
+ {keep_state, book_transfer_send(NumFrames, Link, State),
+ [{reply, From, ok}]};
+ _ ->
+ {keep_state, [{reply, From, {error, link_not_found}}]}
+ end;
+
+mapped({call, From},
+ {disposition, Role, First, Last, Settled0, DeliveryState},
+ #state{incoming_unsettled = Unsettled0,
+ links = Links0} = State0) ->
+ Disposition =
+ begin
+ DS = translate_delivery_state(DeliveryState),
+ #'v1_0.disposition'{role = translate_role(Role),
+ first = {uint, First},
+ last = {uint, Last},
+ settled = Settled0,
+ state = DS}
+ end,
+
+ Ks = lists:seq(First, Last),
+ Settled = maps:values(maps:with(Ks, Unsettled0)),
+ Links = lists:foldl(fun (H, Acc) ->
+ #{H := #link{link_credit_unsettled = LCU} = L} = Acc,
+ Acc#{H => L#link{link_credit_unsettled = LCU-1}}
+ end, Links0, Settled),
+ Unsettled = maps:without(Ks, Unsettled0),
+ State = lists:foldl(fun(H, S) ->
+ #{H := L} = Links,
+ auto_flow(L, S)
+ end,
+ State0#state{incoming_unsettled = Unsettled,
+ links = Links},
+ lists:usort(Settled)),
+
+ Res = send(Disposition, State),
+
+ {keep_state, State, [{reply, From, Res}]};
+
+mapped({call, From}, {attach, Attach}, State) ->
+ {State1, LinkRef} = send_attach(fun send/2, Attach, From, State),
+ {keep_state, State1, [{reply, From, {ok, LinkRef}}]};
+
+mapped({call, From}, Msg, State) ->
+ {Reply, State1} = send_detach(fun send/2, Msg, From, State),
+ {keep_state, State1, [{reply, From, Reply}]};
+
+mapped(_EvtType, Msg, _State) ->
+ error_logger:info_msg("amqp10_session: unhandled msg in mapped state ~W",
+ [Msg, 10]),
+ keep_state_and_data.
+
+end_sent(_EvtType, #'v1_0.end'{}, State) ->
+ {stop, normal, State};
+end_sent(_EvtType, _Frame, State) ->
+ % just drop frames here
+ {next_state, end_sent, State}.
+
+terminate(Reason, _StateName, #state{channel = Channel,
+ remote_channel = RemoteChannel,
+ reader = Reader}) ->
+ case Reason of
+ normal -> amqp10_client_frame_reader:unregister_session(
+ Reader, self(), Channel, RemoteChannel);
+ _ -> ok
+ end,
+ ok.
+
+code_change(_OldVsn, StateName, State, _Extra) ->
+ {ok, StateName, State}.
+
+%% -------------------------------------------------------------------
+%% Internal functions.
+%% -------------------------------------------------------------------
+
+send_begin(#state{socket = Socket,
+ next_outgoing_id = NextOutId,
+ incoming_window = InWin,
+ outgoing_window = OutWin} = State) ->
+ Begin = #'v1_0.begin'{next_outgoing_id = uint(NextOutId),
+ incoming_window = uint(InWin),
+ outgoing_window = uint(OutWin) },
+ Frame = encode_frame(Begin, State),
+ socket_send(Socket, Frame).
+
+send_end(State) ->
+ send_end(State, undefined).
+
+send_end(#state{socket = Socket} = State, Cond) ->
+ Err = #'v1_0.error'{condition = Cond},
+ End = #'v1_0.end'{error = Err},
+ Frame = encode_frame(End, State),
+ socket_send(Socket, Frame).
+
+encode_frame(Record, #state{channel = Channel}) ->
+ Encoded = amqp10_framing:encode_bin(Record),
+ amqp10_binary_generator:build_frame(Channel, Encoded).
+
+send(Record, #state{socket = Socket} = State) ->
+ Frame = encode_frame(Record, State),
+ socket_send(Socket, Frame).
+
+send_transfer(Transfer0, Parts0, #state{socket = Socket, channel = Channel,
+ connection_config = Config}) ->
+ OutMaxFrameSize = case Config of
+ #{outgoing_max_frame_size := undefined} ->
+ ?MAX_MAX_FRAME_SIZE;
+ #{outgoing_max_frame_size := Sz} -> Sz;
+ _ -> ?MAX_MAX_FRAME_SIZE
+ end,
+ Transfer = amqp10_framing:encode_bin(Transfer0),
+ TSize = iolist_size(Transfer),
+ Parts = [amqp10_framing:encode_bin(P) || P <- Parts0],
+ PartsBin = iolist_to_binary(Parts),
+
+ % TODO: this does not take the extended header into account
+ % see: 2.3
+ MaxPayloadSize = OutMaxFrameSize - TSize - ?FRAME_HEADER_SIZE,
+
+ Frames = build_frames(Channel, Transfer0, PartsBin, MaxPayloadSize, []),
+ ok = socket_send(Socket, Frames),
+ {ok, length(Frames)}.
+
+send_flow(Send, OutHandle,
+ #'v1_0.flow'{link_credit = {uint, Credit}} = Flow0, RenewAfter,
+ #state{links = Links,
+ next_incoming_id = NII,
+ next_outgoing_id = NOI,
+ outgoing_window = OutWin,
+ incoming_window = InWin} = State) ->
+ AutoFlow = case RenewAfter of
+ never -> never;
+ Limit -> {auto, Limit, Credit}
+ end,
+ #{OutHandle := #link{output_handle = H,
+ role = receiver,
+ delivery_count = DeliveryCount,
+ available = Available,
+ link_credit_unsettled = LCU} = Link} = Links,
+ Flow = Flow0#'v1_0.flow'{handle = uint(H),
+ link_credit = uint(Credit),
+ next_incoming_id = uint(NII),
+ next_outgoing_id = uint(NOI),
+ outgoing_window = uint(OutWin),
+ incoming_window = uint(InWin),
+ delivery_count = uint(DeliveryCount),
+ available = uint(Available)},
+ ok = Send(Flow, State),
+ State#state{links = Links#{OutHandle =>
+ Link#link{link_credit = Credit,
+ % need to add on the current LCU
+ % to ensure we don't overcredit
+ link_credit_unsettled = LCU + Credit,
+ auto_flow = AutoFlow}}}.
+
+build_frames(Channel, Trf, Bin, MaxPayloadSize, Acc)
+ when byte_size(Bin) =< MaxPayloadSize ->
+ T = amqp10_framing:encode_bin(Trf#'v1_0.transfer'{more = false}),
+ Frame = amqp10_binary_generator:build_frame(Channel, [T, Bin]),
+ lists:reverse([Frame | Acc]);
+build_frames(Channel, Trf, Payload, MaxPayloadSize, Acc) ->
+ <<Bin:MaxPayloadSize/binary, Rest/binary>> = Payload,
+ T = amqp10_framing:encode_bin(Trf#'v1_0.transfer'{more = true}),
+ Frame = amqp10_binary_generator:build_frame(Channel, [T, Bin]),
+ build_frames(Channel, Trf, Rest, MaxPayloadSize, [Frame | Acc]).
+
+make_source(#{role := {sender, _}}) ->
+ #'v1_0.source'{};
+make_source(#{role := {receiver, #{address := Address} = Target, _Pid}, filter := Filter}) ->
+ Durable = translate_terminus_durability(maps:get(durable, Target, none)),
+ TranslatedFilter = translate_filters(Filter),
+ #'v1_0.source'{address = {utf8, Address},
+ durable = {uint, Durable},
+ filter = TranslatedFilter}.
+
+make_target(#{role := {receiver, _Source, _Pid}}) ->
+ #'v1_0.target'{};
+make_target(#{role := {sender, #{address := Address} = Target}}) ->
+ Durable = translate_terminus_durability(maps:get(durable, Target, none)),
+ #'v1_0.target'{address = {utf8, Address},
+ durable = {uint, Durable}}.
+
+make_properties(#{properties := Properties}) ->
+ translate_properties(Properties);
+make_properties(_) ->
+ undefined.
+
+translate_properties(Properties) when is_map(Properties) andalso map_size(Properties) =< 0 ->
+ undefined;
+translate_properties(Properties) when is_map(Properties) ->
+ {map, maps:fold(fun translate_property/3, [], Properties)}.
+
+translate_property(K, V, Acc) when is_tuple(V) ->
+ [{{symbol, K}, V} | Acc].
+
+translate_terminus_durability(none) -> 0;
+translate_terminus_durability(configuration) -> 1;
+translate_terminus_durability(unsettled_state) -> 2.
+
+translate_filters(Filters) when is_map(Filters) andalso map_size(Filters) =< 0 -> undefined;
+translate_filters(Filters) when is_map(Filters) -> {
+ map,
+ maps:fold(
+ fun(<<"apache.org:legacy-amqp-direct-binding:string">> = K, V, Acc) when is_binary(V) ->
+ [{{symbol, K}, {described, {symbol, K}, {utf8, V}}} | Acc];
+ (<<"apache.org:legacy-amqp-topic-binding:string">> = K, V, Acc) when is_binary(V) ->
+ [{{symbol, K}, {described, {symbol, K}, {utf8, V}}} | Acc];
+ (<<"apache.org:legacy-amqp-headers-binding:map">> = K, V, Acc) when is_map(V) ->
+ [{{symbol, K}, {described, {symbol, K}, translate_legacy_amqp_headers_binding(V)}} | Acc];
+ (<<"apache.org:no-local-filter:list">> = K, V, Acc) when is_list(V) ->
+ [{{symbol, K}, {described, {symbol, K}, lists:map(fun(Id) -> {utf8, Id} end, V)}} | Acc];
+ (<<"apache.org:selector-filter:string">> = K, V, Acc) when is_binary(V) ->
+ [{{symbol, K}, {described, {symbol, K}, {utf8, V}}} | Acc]
+ end,
+ [],
+ Filters)
+}.
+
+% https://people.apache.org/~rgodfrey/amqp-1.0/apache-filters.html
+translate_legacy_amqp_headers_binding(LegacyHeaders) -> {
+ map,
+ maps:fold(
+ fun(<<"x-match">> = K, <<"any">> = V, Acc) ->
+ [{{utf8, K}, {utf8, V}} | Acc];
+ (<<"x-match">> = K, <<"all">> = V, Acc) ->
+ [{{utf8, K}, {utf8, V}} | Acc];
+ (<<"x-",_/binary>>, _, Acc) ->
+ Acc;
+ (K, V, Acc) ->
+ [{{utf8, K}, {utf8, V}} | Acc]
+ end,
+ [],
+ LegacyHeaders)
+}.
+
+send_detach(Send, {detach, OutHandle}, _From, State = #state{links = Links}) ->
+ case Links of
+ #{OutHandle := #link{input_handle = undefined}} ->
+ % Link = stash_link_request(Link0, From, Msg),
+ % not fully attached yet - stash the request for later processing
+ {{error, half_attached}, State};
+ #{OutHandle := Link} ->
+ Detach = #'v1_0.detach'{handle = uint(OutHandle),
+ closed = true},
+ ok = Send(Detach, State),
+ Links1 = Links#{OutHandle => Link#link{state = detach_sent}},
+ {ok, State#state{links = Links1}};
+ _ ->
+ {{error, link_not_found}, State}
+ end.
+
+detach_with_error_cond(Link = #link{output_handle = OutHandle}, State, Cond) ->
+ Err = #'v1_0.error'{condition = Cond},
+ Detach = #'v1_0.detach'{handle = uint(OutHandle),
+ closed = true,
+ error = Err},
+ ok = send(Detach, State),
+ Link#link{state = detach_sent}.
+
+send_attach(Send, #{name := Name, role := Role} = Args, {FromPid, _},
+ #state{next_link_handle = OutHandle, links = Links,
+ link_index = LinkIndex} = State) ->
+
+ Source = make_source(Args),
+ Target = make_target(Args),
+ Properties = make_properties(Args),
+
+ {LinkTarget, RoleAsBool} = case Role of
+ {receiver, _, Pid} ->
+ {{pid, Pid}, true};
+ {sender, #{address := TargetAddr}} ->
+ {TargetAddr, false}
+ end,
+
+ % create attach performative
+ Attach = #'v1_0.attach'{name = {utf8, Name},
+ role = RoleAsBool,
+ handle = {uint, OutHandle},
+ source = Source,
+ properties = Properties,
+ initial_delivery_count =
+ {uint, ?INITIAL_DELIVERY_COUNT},
+ snd_settle_mode = snd_settle_mode(Args),
+ rcv_settle_mode = rcv_settle_mode(Args),
+ target = Target},
+ ok = Send(Attach, State),
+
+ Link = #link{name = Name,
+ ref = make_link_ref(element(1, Role), self(), OutHandle),
+ output_handle = OutHandle,
+ state = attach_sent,
+ role = element(1, Role),
+ notify = FromPid,
+ auto_flow = never,
+ target = LinkTarget},
+
+ {State#state{links = Links#{OutHandle => Link},
+ next_link_handle = OutHandle + 1,
+ link_index = LinkIndex#{Name => OutHandle}}, Link#link.ref}.
+
+-spec handle_session_flow(#'v1_0.flow'{}, #state{}) -> #state{}.
+handle_session_flow(#'v1_0.flow'{next_incoming_id = MaybeNII,
+ next_outgoing_id = {uint, NOI},
+ incoming_window = {uint, InWin},
+ outgoing_window = {uint, OutWin}},
+ #state{next_outgoing_id = OurNOI} = State) ->
+ NII = case MaybeNII of
+ {uint, N} -> N;
+ undefined -> ?INITIAL_OUTGOING_ID + 1
+ end,
+ State#state{next_incoming_id = NOI,
+ remote_incoming_window = NII + InWin - OurNOI, % see: 2.5.6
+ remote_outgoing_window = OutWin}.
+
+
+-spec handle_link_flow(#'v1_0.flow'{}, #link{}) -> {ok | send_flow, #link{}}.
+handle_link_flow(#'v1_0.flow'{drain = true, link_credit = {uint, TheirCredit}},
+ Link = #link{role = sender,
+ delivery_count = OurDC,
+ available = 0}) ->
+ {send_flow, Link#link{link_credit = 0,
+ delivery_count = OurDC + TheirCredit}};
+handle_link_flow(#'v1_0.flow'{delivery_count = MaybeTheirDC,
+ link_credit = {uint, TheirCredit}},
+ Link = #link{role = sender,
+ delivery_count = OurDC}) ->
+ TheirDC = case MaybeTheirDC of
+ undefined -> ?INITIAL_DELIVERY_COUNT;
+ {uint, DC} -> DC
+ end,
+ LinkCredit = TheirDC + TheirCredit - OurDC,
+
+ {ok, Link#link{link_credit = LinkCredit}};
+handle_link_flow(#'v1_0.flow'{delivery_count = TheirDC,
+ available = Available,
+ drain = Drain},
+ Link = #link{role = receiver}) ->
+
+ {ok, Link#link{delivery_count = unpack(TheirDC),
+ available = unpack(Available),
+ drain = Drain}}.
+
+-spec find_link_by_input_handle(link_handle(), #state{}) ->
+ {ok, #link{}} | not_found.
+find_link_by_input_handle(InHandle, #state{link_handle_index = LHI,
+ links = Links}) ->
+ case LHI of
+ #{InHandle := OutHandle} ->
+ case Links of
+ #{OutHandle := Link} ->
+ {ok, Link};
+ _ -> not_found
+ end;
+ _ -> not_found
+ end.
+
+with_link(InHandle, State, Fun) ->
+ case find_link_by_input_handle(InHandle, State) of
+ {ok, Link} ->
+ Fun(Link, State);
+ not_found ->
+ % end session with errant-link
+ ok = send_end(State, ?V_1_0_SESSION_ERROR_ERRANT_LINK),
+ {next_state, end_sent, State}
+ end.
+
+
+uint(Int) -> {uint, Int}.
+unpack(X) -> amqp10_client_types:unpack(X).
+
+snd_settle_mode(#{snd_settle_mode := unsettled}) -> {ubyte, 0};
+snd_settle_mode(#{snd_settle_mode := settled}) -> {ubyte, 1};
+snd_settle_mode(#{snd_settle_mode := mixed}) -> {ubyte, 2};
+snd_settle_mode(_) -> undefined.
+
+rcv_settle_mode(#{rcv_settle_mode := first}) -> {ubyte, 0};
+rcv_settle_mode(#{rcv_settle_mode := second}) -> {ubyte, 1};
+rcv_settle_mode(_) -> undefined.
+
+% certain amqp10 brokers (IBM MQ) return an undefined delivery state
+% when the link is detached before settlement is sent
+% TODO: work out if we can assume accepted
+translate_delivery_state(undefined) -> undefined;
+translate_delivery_state(#'v1_0.accepted'{}) -> accepted;
+translate_delivery_state(#'v1_0.rejected'{}) -> rejected;
+translate_delivery_state(#'v1_0.modified'{}) -> modified;
+translate_delivery_state(#'v1_0.released'{}) -> released;
+translate_delivery_state(#'v1_0.received'{}) -> received;
+translate_delivery_state(accepted) -> #'v1_0.accepted'{};
+translate_delivery_state(rejected) -> #'v1_0.rejected'{};
+translate_delivery_state(modified) -> #'v1_0.modified'{};
+translate_delivery_state(released) -> #'v1_0.released'{};
+translate_delivery_state(received) -> #'v1_0.received'{}.
+
+translate_role(sender) -> false;
+translate_role(receiver) -> true.
+
+maybe_notify_link_credit(#link{link_credit = 0, role = sender},
+ #link{link_credit = Credit} = Link)
+ when Credit > 0 ->
+ notify_link(Link, credited);
+maybe_notify_link_credit(_Old, _New) ->
+ ok.
+
+notify_link_attached(Link) ->
+ notify_link(Link, attached).
+
+notify_link_detached(Link, Reason) ->
+ notify_link(Link, {detached, Reason}).
+
+notify_link(#link{notify = Pid, ref = Ref}, What) ->
+ Evt = {amqp10_event, {link, Ref, What}},
+ Pid ! Evt,
+ ok.
+
+notify_session_begun(#state{notify = Pid}) ->
+ Pid ! amqp10_session_event(begun),
+ ok.
+
+notify_session_ended(#state{notify = Pid}, Reason) ->
+ Pid ! amqp10_session_event({ended, Reason}),
+ ok.
+
+notify_disposition({Pid, _}, SessionDeliveryTag) ->
+ Pid ! {amqp10_disposition, SessionDeliveryTag},
+ ok.
+
+book_transfer_send(Num, #link{output_handle = Handle} = Link,
+ #state{next_outgoing_id = NOI,
+ remote_incoming_window = RIW,
+ links = Links} = State) ->
+ State#state{next_outgoing_id = NOI+Num,
+ remote_incoming_window = RIW-Num,
+ links = Links#{Handle => incr_link_counters(Link)}}.
+
+book_partial_transfer_received(#state{next_incoming_id = NID,
+ remote_outgoing_window = ROW} = State) ->
+ State#state{next_incoming_id = NID+1,
+ remote_outgoing_window = ROW-1}.
+
+book_transfer_received(_Settled,
+ State = #state{connection_config =
+ #{transfer_limit_margin := Margin}},
+ #link{link_credit = Margin}) ->
+ {transfer_limit_exceeded, State};
+book_transfer_received(Settled,
+ #state{next_incoming_id = NID,
+ remote_outgoing_window = ROW,
+ links = Links} = State,
+ #link{output_handle = OutHandle,
+ delivery_count = DC,
+ link_credit = LC,
+ link_credit_unsettled = LCU0} = Link) ->
+ LCU = case Settled of
+ true -> LCU0-1;
+ _ -> LCU0
+ end,
+
+ Link1 = Link#link{delivery_count = DC+1,
+ link_credit = LC-1,
+ link_credit_unsettled = LCU},
+ State1 = State#state{links = Links#{OutHandle => Link1},
+ next_incoming_id = NID+1,
+ remote_outgoing_window = ROW-1},
+ case Link1 of
+ #link{link_credit = 0,
+ % only notify of credit exhaustion when
+ % not using auto flow.
+ auto_flow = never} ->
+ {credit_exhausted, State1};
+ _ -> {ok, State1}
+ end.
+
+auto_flow(#link{link_credit_unsettled = LCU,
+ auto_flow = {auto, Limit, Credit},
+ output_handle = OutHandle}, State)
+ when LCU =< Limit ->
+ send_flow(fun send/2, OutHandle,
+ #'v1_0.flow'{link_credit = {uint, Credit}},
+ Limit, State);
+auto_flow(_Link, State) ->
+ State.
+
+update_link(Link = #link{output_handle = OutHandle},
+ State = #state{links = Links}) ->
+ State#state{links = Links#{OutHandle => Link}}.
+
+incr_link_counters(#link{link_credit = LC, delivery_count = DC} = Link) ->
+ Link#link{delivery_count = DC+1, link_credit = LC+1}.
+
+append_partial_transfer(Transfer, Payload,
+ #link{partial_transfers = undefined} = Link) ->
+ Link#link{partial_transfers = {Transfer, [Payload]}};
+append_partial_transfer(_Transfer, Payload,
+ #link{partial_transfers = {T, Payloads}} = Link) ->
+ Link#link{partial_transfers = {T, [Payload | Payloads]}}.
+
+complete_partial_transfer(Transfer, Payload,
+ #link{partial_transfers = undefined} = Link) ->
+ {Transfer, Payload, Link};
+complete_partial_transfer(_Transfer, Payload,
+ #link{partial_transfers = {T, Payloads}} = Link) ->
+ {T, iolist_to_binary(lists:reverse([Payload | Payloads])),
+ Link#link{partial_transfers = undefined}}.
+
+decode_as_msg(Transfer, Payload) ->
+ Records = amqp10_framing:decode_bin(Payload),
+ amqp10_msg:from_amqp_records([Transfer | Records]).
+
+amqp10_session_event(Evt) ->
+ {amqp10_event, {session, self(), Evt}}.
+
+socket_send(Sock, Data) ->
+ case socket_send0(Sock, Data) of
+ ok -> ok;
+ {error, Reason} -> exit({socket_closed, Reason})
+ end.
+
+-dialyzer({no_fail_call, socket_send0/2}).
+socket_send0({tcp, Socket}, Data) ->
+ gen_tcp:send(Socket, Data);
+socket_send0({ssl, Socket}, Data) ->
+ ssl:send(Socket, Data).
+
+-spec make_link_ref(_, _, _) -> link_ref().
+make_link_ref(Role, Session, Handle) ->
+ #link_ref{role = Role, session = Session, link_handle = Handle}.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+handle_session_flow_test() ->
+ % see spec section: 2.5.6 for logic
+ Flow = #'v1_0.flow'{next_incoming_id = {uint, 50},
+ next_outgoing_id = {uint, 42},
+ incoming_window = {uint, 1000},
+ outgoing_window = {uint, 2000}},
+ State0 = #state{next_outgoing_id = 51},
+ State = handle_session_flow(Flow, State0),
+ 42 = State#state.next_incoming_id,
+ 2000 = State#state.remote_outgoing_window,
+ 50 + 1000 - 51 = State#state.remote_incoming_window.
+
+handle_session_flow_pre_begin_test() ->
+ % see spec section: 2.5.6 for logic
+ Flow = #'v1_0.flow'{next_incoming_id = undefined,
+ next_outgoing_id = {uint, 42},
+ incoming_window = {uint, 1000},
+ outgoing_window = {uint, 2000}},
+ State0 = #state{next_outgoing_id = 51},
+ State = handle_session_flow(Flow, State0),
+ 42 = State#state.next_incoming_id,
+ 2000 = State#state.remote_outgoing_window,
+ ?INITIAL_OUTGOING_ID + 1 + 1000 - 51 = State#state.remote_incoming_window.
+
+handle_link_flow_sender_test() ->
+ Handle = 45,
+ DeliveryCount = 55,
+ Link = #link{role = sender, output_handle = 99,
+ link_credit = 0, delivery_count = DeliveryCount + 2},
+ Flow = #'v1_0.flow'{handle = {uint, Handle},
+ link_credit = {uint, 42},
+ delivery_count = {uint, DeliveryCount}
+ },
+ {ok, Outcome} = handle_link_flow(Flow, Link),
+ % see section 2.6.7
+ Expected = DeliveryCount + 42 - (DeliveryCount + 2),
+ Expected = Outcome#link.link_credit,
+
+ % receiver does not yet know the delivery_count
+ {ok, Outcome2} = handle_link_flow(Flow#'v1_0.flow'{delivery_count = undefined},
+ Link),
+ Expected2 = ?INITIAL_DELIVERY_COUNT + 42 - (DeliveryCount + 2),
+ Expected2 = Outcome2#link.link_credit.
+
+handle_link_flow_sender_drain_test() ->
+ Handle = 45,
+ SndDeliveryCount = 55,
+ RcvLinkCredit = 42,
+ Link = #link{role = sender, output_handle = 99,
+ available = 0, link_credit = 20,
+ delivery_count = SndDeliveryCount},
+ Flow = #'v1_0.flow'{handle = {uint, Handle},
+ link_credit = {uint, RcvLinkCredit},
+ drain = true},
+ {send_flow, Outcome} = handle_link_flow(Flow, Link),
+ 0 = Outcome#link.link_credit,
+ ExpectedDC = SndDeliveryCount + RcvLinkCredit,
+ ExpectedDC = Outcome#link.delivery_count.
+
+
+handle_link_flow_receiver_test() ->
+ Handle = 45,
+ DeliveryCount = 55,
+ SenderDC = 57,
+ Link = #link{role = receiver, output_handle = 99,
+ link_credit = 0, delivery_count = DeliveryCount},
+ Flow = #'v1_0.flow'{handle = {uint, Handle},
+ delivery_count = {uint, SenderDC},
+ available = 99,
+ drain = true % what to do?
+ },
+ {ok, Outcome} = handle_link_flow(Flow, Link),
+ % see section 2.6.7
+ 99 = Outcome#link.available,
+ true = Outcome#link.drain,
+ SenderDC = Outcome#link.delivery_count. % maintain delivery count
+
+translate_filters_empty_map_test() ->
+ undefined = translate_filters(#{}).
+
+translate_filters_legacy_amqp_direct_binding_filter_test() ->
+ {map,
+ [{
+ {symbol,<<"apache.org:legacy-amqp-direct-binding:string">>},
+ {described, {symbol, <<"apache.org:legacy-amqp-direct-binding:string">>}, {utf8,<<"my topic">>}}
+ }]
+ } = translate_filters(#{<<"apache.org:legacy-amqp-direct-binding:string">> => <<"my topic">>}).
+
+translate_filters_legacy_amqp_topic_binding_filter_test() ->
+ {map,
+ [{
+ {symbol, <<"apache.org:legacy-amqp-topic-binding:string">>},
+ {described, {symbol, <<"apache.org:legacy-amqp-topic-binding:string">>}, {utf8, <<"*.stock.#">>}}
+ }]
+ } = translate_filters(#{<<"apache.org:legacy-amqp-topic-binding:string">> => <<"*.stock.#">>}).
+
+translate_filters_legacy_amqp_headers_binding_filter_test() ->
+ {map,
+ [{
+ {symbol, <<"apache.org:legacy-amqp-headers-binding:map">>},
+ {described, {symbol, <<"apache.org:legacy-amqp-headers-binding:map">>},
+ {map, [
+ {{utf8, <<"x-match">>}, {utf8, <<"all">>}},
+ {{utf8, <<"foo">>}, {utf8, <<"bar">>}},
+ {{utf8, <<"bar">>}, {utf8, <<"baz">>}}
+ ]
+ }}
+ }]
+ } = translate_filters(#{<<"apache.org:legacy-amqp-headers-binding:map">> => #{
+ <<"x-match">> => <<"all">>,
+ <<"x-something">> => <<"ignored">>,
+ <<"bar">> => <<"baz">>,
+ <<"foo">> => <<"bar">>
+ }}).
+
+translate_filters_legacy_amqp_no_local_filter_test() ->
+ {map,
+ [{
+ {symbol, <<"apache.org:no-local-filter:list">>},
+ {described, {symbol, <<"apache.org:no-local-filter:list">>}, [{utf8, <<"foo">>}, {utf8, <<"bar">>}]}
+ }]
+ } = translate_filters(#{<<"apache.org:no-local-filter:list">> => [<<"foo">>, <<"bar">>]}).
+
+translate_filters_selector_filter_test() ->
+ {map,
+ [{
+ {symbol, <<"apache.org:selector-filter:string">>},
+ {described, {symbol, <<"apache.org:selector-filter:string">>},
+ {utf8, <<"amqp.annotation.x-opt-enqueuedtimeutc > 123456789">>}}
+ }]
+ } = translate_filters(#{<<"apache.org:selector-filter:string">> => <<"amqp.annotation.x-opt-enqueuedtimeutc > 123456789">>}).
+
+translate_filters_multiple_filters_test() ->
+ {map,
+ [
+ {{symbol, <<"apache.org:selector-filter:string">>},
+ {described, {symbol, <<"apache.org:selector-filter:string">>},
+ {utf8, <<"amqp.annotation.x-opt-enqueuedtimeutc > 123456789">>}}},
+ {{symbol, <<"apache.org:legacy-amqp-direct-binding:string">>},
+ {described, {symbol, <<"apache.org:legacy-amqp-direct-binding:string">>}, {utf8,<<"my topic">>}}}
+ ]
+ } = translate_filters(#{
+ <<"apache.org:legacy-amqp-direct-binding:string">> => <<"my topic">>,
+ <<"apache.org:selector-filter:string">> => <<"amqp.annotation.x-opt-enqueuedtimeutc > 123456789">>
+ }).
+-endif.
diff --git a/deps/amqp10_client/src/amqp10_client_sessions_sup.erl b/deps/amqp10_client/src/amqp10_client_sessions_sup.erl
new file mode 100644
index 0000000000..cbe8499819
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_sessions_sup.erl
@@ -0,0 +1,36 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_sessions_sup).
+
+-behaviour(supervisor).
+
+%% Private API.
+-export([start_link/0]).
+
+%% Supervisor callbacks.
+-export([init/1]).
+
+-define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
+ transient, 5000, Type, [Mod]}).
+
+%% -------------------------------------------------------------------
+%% Private API.
+%% -------------------------------------------------------------------
+
+-spec start_link() ->
+ {ok, pid()} | ignore | {error, any()}.
+
+start_link() ->
+ supervisor:start_link(?MODULE, []).
+
+%% -------------------------------------------------------------------
+%% Supervisor callbacks.
+%% -------------------------------------------------------------------
+
+init(Args) ->
+ Template = ?CHILD(session, amqp10_client_session, worker, Args),
+ {ok, {{simple_one_for_one, 0, 1}, [Template]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_sup.erl b/deps/amqp10_client/src/amqp10_client_sup.erl
new file mode 100644
index 0000000000..345a51b141
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_sup.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_sup).
+
+-behaviour(supervisor).
+
+%% Private API.
+-export([start_link/0]).
+
+%% Supervisor callbacks.
+-export([init/1]).
+
+-define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
+ temporary, infinity, Type, [Mod]}).
+
+%% -------------------------------------------------------------------
+%% Private API.
+%% -------------------------------------------------------------------
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% -------------------------------------------------------------------
+%% Supervisor callbacks.
+%% -------------------------------------------------------------------
+
+init([]) ->
+ Template = ?CHILD(connection_sup, amqp10_client_connection_sup,
+ supervisor, []),
+ {ok, {{simple_one_for_one, 0, 1}, [Template]}}.
diff --git a/deps/amqp10_client/src/amqp10_client_types.erl b/deps/amqp10_client/src/amqp10_client_types.erl
new file mode 100644
index 0000000000..2f0ab2a413
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_client_types.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_client_types).
+
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-export([unpack/1,
+ utf8/1,
+ uint/1]).
+
+-type amqp10_performative() :: #'v1_0.open'{} | #'v1_0.begin'{} | #'v1_0.attach'{} |
+ #'v1_0.flow'{} | #'v1_0.transfer'{} |
+ #'v1_0.disposition'{} | #'v1_0.detach'{} |
+ #'v1_0.end'{} | #'v1_0.close'{}.
+
+-type amqp10_msg_record() :: #'v1_0.transfer'{} | #'v1_0.header'{} |
+ #'v1_0.delivery_annotations'{} |
+ #'v1_0.message_annotations'{} |
+ #'v1_0.properties'{} |
+ #'v1_0.application_properties'{} |
+ #'v1_0.data'{} | #'v1_0.amqp_sequence'{} |
+ #'v1_0.amqp_value'{} | #'v1_0.footer'{}.
+
+-type channel() :: non_neg_integer().
+
+-type source() :: #'v1_0.source'{}.
+-type target() :: #'v1_0.target'{}.
+
+-type delivery_state() :: accepted | rejected | modified | received | released.
+
+-type amqp_error() :: internal_error | not_found | unauthorized_access |
+ decode_error | resource_limit_exceeded |
+ not_allowed | invalid_field | not_implemented |
+ resource_locked | precondition_failed | resource_deleted |
+ illegal_state | frame_size_too_small.
+
+-type connection_error() :: connection_forced | framing_error | redirect.
+-type session_error() :: atom(). % TODO
+-type link_error() :: atom(). % TODO
+
+-type connection_event_detail() :: opened |
+ {closed, Reason::any()} |
+ {error, {connection_error(), any()}}.
+-type session_event_detail() :: begun | ended | {error, {session_error(), any()}}.
+-type link_event_detail() :: attached | detached | {error, {link_error(), any()}}.
+-type amqp10_event_detail() :: {connection, pid(), connection_event_detail()} |
+ {session, pid(), session_event_detail()} |
+ {link, {sender | receiver, Name :: binary()},
+ link_event_detail()}.
+-type amqp10_event() :: {amqp10_event, amqp10_event_detail()}.
+
+-export_type([amqp10_performative/0, channel/0,
+ source/0, target/0, amqp10_msg_record/0,
+ delivery_state/0, amqp_error/0, connection_error/0,
+ amqp10_event_detail/0, amqp10_event/0]).
+
+
+unpack(undefined) -> undefined;
+unpack({_, Value}) -> Value;
+unpack(Value) -> Value.
+
+utf8(S) when is_list(S) -> {utf8, list_to_binary(S)};
+utf8(B) when is_binary(B) -> {utf8, B}.
+
+uint(N) -> {uint, N}.
diff --git a/deps/amqp10_client/src/amqp10_msg.erl b/deps/amqp10_client/src/amqp10_msg.erl
new file mode 100644
index 0000000000..fdd198e125
--- /dev/null
+++ b/deps/amqp10_client/src/amqp10_msg.erl
@@ -0,0 +1,453 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp10_msg).
+
+-export([from_amqp_records/1,
+ to_amqp_records/1,
+ % "read" api
+ delivery_id/1,
+ delivery_tag/1,
+ handle/1,
+ settled/1,
+ message_format/1,
+ headers/1,
+ header/2,
+ delivery_annotations/1,
+ message_annotations/1,
+ properties/1,
+ application_properties/1,
+ body/1,
+ body_bin/1,
+ footer/1,
+ % "write" api
+ new/2,
+ new/3,
+ set_handle/2,
+ set_settled/2,
+ set_message_format/2,
+ set_headers/2,
+ set_properties/2,
+ set_application_properties/2,
+ set_delivery_annotations/2,
+ set_message_annotations/2
+ ]).
+
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-type maybe(T) :: T | undefined.
+
+-type delivery_tag() :: binary().
+-type content_type() :: term(). % TODO: refine
+-type content_encoding() :: term(). % TODO: refine
+
+% annotations keys are restricted to be of type symbol or of type ulong
+-type annotations_key() :: binary() | non_neg_integer().
+
+-type header_key() :: durable | priority | ttl | first_acquirer |
+ delivery_count.
+
+-type amqp10_header() :: #{durable => boolean(), % false
+ priority => byte(), % 4
+ ttl => maybe(non_neg_integer()),
+ first_acquirer => boolean(), % false
+ delivery_count => non_neg_integer()}. % 0
+
+-type amqp10_properties() :: #{message_id => maybe(any()),
+ user_id => maybe(binary()),
+ to => maybe(any()),
+ subject => maybe(binary()),
+ reply_to => maybe(any()),
+ correlation_id => maybe(any()),
+ content_type => maybe(content_type()),
+ content_encoding => maybe(content_encoding()),
+ absolute_expiry_time => maybe(non_neg_integer()),
+ creation_time => maybe(non_neg_integer()),
+ group_id => maybe(binary()),
+ group_sequence => maybe(non_neg_integer()),
+ reply_to_group_id => maybe(binary())}.
+
+-type amqp10_body() :: [#'v1_0.data'{}] |
+ [#'v1_0.amqp_sequence'{}] |
+ #'v1_0.amqp_value'{}.
+
+
+
+-record(amqp10_msg,
+ {transfer :: #'v1_0.transfer'{},
+ header :: maybe(#'v1_0.header'{}),
+ delivery_annotations :: maybe(#'v1_0.delivery_annotations'{}),
+ message_annotations :: maybe(#'v1_0.message_annotations'{}),
+ properties :: maybe(#'v1_0.properties'{}),
+ application_properties :: maybe(#'v1_0.application_properties'{}),
+ body :: amqp10_body() | unset,
+ footer :: maybe(#'v1_0.footer'{})
+ }).
+
+-opaque amqp10_msg() :: #amqp10_msg{}.
+
+-export_type([amqp10_msg/0,
+ amqp10_header/0,
+ amqp10_properties/0,
+ amqp10_body/0,
+ delivery_tag/0
+ ]).
+
+-define(record_to_tuplelist(Rec, Ref),
+ lists:zip(record_info(fields, Rec), tl(tuple_to_list(Ref)))).
+
+
+%% API functions
+
+-spec from_amqp_records([amqp10_client_types:amqp10_msg_record()]) ->
+ amqp10_msg().
+from_amqp_records([#'v1_0.transfer'{} = Transfer | Records]) ->
+ lists:foldl(fun parse_from_amqp/2, #amqp10_msg{transfer = Transfer,
+ body = unset}, Records).
+
+-spec to_amqp_records(amqp10_msg()) -> [amqp10_client_types:amqp10_msg_record()].
+to_amqp_records(#amqp10_msg{transfer = T,
+ header = H,
+ delivery_annotations = DAs,
+ message_annotations = MAs,
+ properties = Ps,
+ application_properties = APs,
+ body = B,
+ footer = F
+ }) ->
+ L = lists:flatten([T, H, DAs, MAs, Ps, APs, B, F]),
+ lists:filter(fun has_value/1, L).
+
+-spec delivery_tag(amqp10_msg()) -> delivery_tag().
+delivery_tag(#amqp10_msg{transfer = #'v1_0.transfer'{delivery_tag = Tag}}) ->
+ unpack(Tag).
+
+-spec delivery_id(amqp10_msg()) -> non_neg_integer().
+delivery_id(#amqp10_msg{transfer = #'v1_0.transfer'{delivery_id = Id}}) ->
+ unpack(Id).
+
+-spec handle(amqp10_msg()) -> non_neg_integer().
+handle(#amqp10_msg{transfer = #'v1_0.transfer'{handle = Handle}}) ->
+ unpack(Handle).
+
+-spec settled(amqp10_msg()) -> boolean().
+settled(#amqp10_msg{transfer = #'v1_0.transfer'{settled = Settled}}) ->
+ Settled.
+
+% First 3 octets are the format
+% the last 1 octet is the version
+% See 2.8.11 in the spec
+-spec message_format(amqp10_msg()) ->
+ maybe({non_neg_integer(), non_neg_integer()}).
+message_format(#amqp10_msg{transfer =
+ #'v1_0.transfer'{message_format = undefined}}) ->
+ undefined;
+message_format(#amqp10_msg{transfer =
+ #'v1_0.transfer'{message_format = {uint, MF}}}) ->
+ <<Format:24/unsigned, Version:8/unsigned>> = <<MF:32/unsigned>>,
+ {Format, Version}.
+
+
+-spec headers(amqp10_msg()) -> amqp10_header().
+headers(#amqp10_msg{header = undefined}) -> #{};
+headers(#amqp10_msg{header = #'v1_0.header'{durable = Durable,
+ priority = Priority,
+ ttl = Ttl,
+ first_acquirer = FA,
+ delivery_count = DC}}) ->
+ Fields = [{durable, header_value(durable, Durable)},
+ {priority, header_value(priority, Priority)},
+ {ttl, header_value(ttl, Ttl)},
+ {first_acquirer, header_value(first_acquirer, FA)},
+ {delivery_count, header_value(delivery_count, DC)}],
+
+ lists:foldl(fun ({_Key, undefined}, Acc) -> Acc;
+ ({Key, Value}, Acc) -> Acc#{Key => Value}
+ end, #{}, Fields).
+
+-spec header(header_key(), amqp10_msg()) -> term().
+header(durable = K, #amqp10_msg{header = #'v1_0.header'{durable = D}}) ->
+ header_value(K, D);
+header(priority = K,
+ #amqp10_msg{header = #'v1_0.header'{priority = D}}) ->
+ header_value(K, D);
+header(ttl = K, #amqp10_msg{header = #'v1_0.header'{ttl = D}}) ->
+ header_value(K, D);
+header(first_acquirer = K,
+ #amqp10_msg{header = #'v1_0.header'{first_acquirer = D}}) ->
+ header_value(K, D);
+header(delivery_count = K,
+ #amqp10_msg{header = #'v1_0.header'{delivery_count = D}}) ->
+ header_value(K, D);
+header(K, #amqp10_msg{header = undefined}) -> header_value(K, undefined).
+
+-spec delivery_annotations(amqp10_msg()) -> #{annotations_key() => any()}.
+delivery_annotations(#amqp10_msg{delivery_annotations = undefined}) ->
+ #{};
+delivery_annotations(#amqp10_msg{delivery_annotations =
+ #'v1_0.delivery_annotations'{content = DAs}}) ->
+ lists:foldl(fun({K, V}, Acc) -> Acc#{unpack(K) => unpack(V)} end,
+ #{}, DAs).
+
+-spec message_annotations(amqp10_msg()) -> #{annotations_key() => any()}.
+message_annotations(#amqp10_msg{message_annotations = undefined}) ->
+ #{};
+message_annotations(#amqp10_msg{message_annotations =
+ #'v1_0.message_annotations'{content = MAs}}) ->
+ lists:foldl(fun({K, V}, Acc) -> Acc#{unpack(K) => unpack(V)} end,
+ #{}, MAs).
+
+-spec properties(amqp10_msg()) -> amqp10_properties().
+properties(#amqp10_msg{properties = undefined}) -> #{};
+properties(#amqp10_msg{properties = Props}) ->
+ Fields = ?record_to_tuplelist('v1_0.properties', Props),
+ lists:foldl(fun ({_Key, undefined}, Acc) -> Acc;
+ ({Key, Value}, Acc) -> Acc#{Key => unpack(Value)}
+ end, #{}, Fields).
+
+% application property values can be simple types - no maps or lists
+-spec application_properties(amqp10_msg()) ->
+ #{binary() => binary() | integer() | string()}.
+application_properties(#amqp10_msg{application_properties = undefined}) ->
+ #{};
+application_properties(
+ #amqp10_msg{application_properties =
+ #'v1_0.application_properties'{content = MAs}}) ->
+ lists:foldl(fun({K, V}, Acc) -> Acc#{unpack(K) => unpack(V)} end,
+ #{}, MAs).
+
+-spec footer(amqp10_msg()) -> #{annotations_key() => any()}.
+footer(#amqp10_msg{footer = undefined}) -> #{};
+footer(#amqp10_msg{footer = #'v1_0.footer'{content = Footer}}) ->
+ lists:foldl(fun({K, V}, Acc) -> Acc#{unpack(K) => unpack(V)} end, #{},
+ Footer).
+
+-spec body(amqp10_msg()) ->
+ [binary()] | [#'v1_0.amqp_sequence'{}] | #'v1_0.amqp_value'{}.
+body(#amqp10_msg{body = [#'v1_0.data'{} | _] = Data}) ->
+ [Content || #'v1_0.data'{content = Content} <- Data];
+body(#amqp10_msg{body = Body}) -> Body.
+
+%% @doc Returns the binary representation
+-spec body_bin(amqp10_msg()) -> binary().
+body_bin(#amqp10_msg{body = [#'v1_0.data'{content = Bin}]})
+ when is_binary(Bin) ->
+ Bin;
+body_bin(#amqp10_msg{body = Data}) when is_list(Data) ->
+ iolist_to_binary([amqp10_framing:encode_bin(D) || D <- Data]);
+body_bin(#amqp10_msg{body = #'v1_0.amqp_value'{} = Body}) ->
+ %% TODO: to avoid unnecessary decoding and re-encoding we could amend
+ %% the parse to provide the body in a lazy fashion, only decoding when
+ %% reading. For now we just re-encode it.
+ iolist_to_binary(amqp10_framing:encode_bin(Body)).
+
+%% @doc Create a new amqp10 message using the specified delivery tag, body
+%% and settlement state. Settled=true means the message is considered settled
+%% as soon as sent and no disposition will be issued by the receiver.
+%% Settled=false will delay settlement until a disposition has been received.
+%% A disposition will be notified to the sender by a message of the
+%% following stucture:
+%% {amqp10_disposition, {accepted | rejected, DeliveryTag}}
+-spec new(delivery_tag(), amqp10_body() | binary(), boolean()) -> amqp10_msg().
+new(DeliveryTag, Body, Settled) when is_binary(Body) ->
+ #amqp10_msg{transfer = #'v1_0.transfer'{delivery_tag = {binary, DeliveryTag},
+ settled = Settled,
+ message_format = {uint, 0}},
+ body = [#'v1_0.data'{content = Body}]};
+new(DeliveryTag, Body, Settled) -> % TODO: constrain to amqp types
+ #amqp10_msg{transfer = #'v1_0.transfer'{delivery_tag = {binary, DeliveryTag},
+ settled = Settled,
+ message_format = {uint, 0}},
+ body = Body}.
+
+%% @doc Create a new settled amqp10 message using the specified delivery tag
+%% and body.
+-spec new(delivery_tag(), amqp10_body() | binary()) -> amqp10_msg().
+new(DeliveryTag, Body) ->
+ new(DeliveryTag, Body, false).
+
+
+% First 3 octets are the format
+% the last 1 octet is the version
+% See 2.8.11 in the spec
+%% @doc Set the message format.
+-spec set_message_format({non_neg_integer(), non_neg_integer()},
+ amqp10_msg()) -> amqp10_msg().
+set_message_format({Format, Version}, #amqp10_msg{transfer = T} = Msg) ->
+ <<MsgFormat:32/unsigned>> = <<Format:24/unsigned, Version:8/unsigned>>,
+ Msg#amqp10_msg{transfer = T#'v1_0.transfer'{message_format =
+ {uint, MsgFormat}}}.
+
+%% @doc Set the link handle used for the message transfer.
+-spec set_handle(non_neg_integer(), amqp10_msg()) -> amqp10_msg().
+set_handle(Handle, #amqp10_msg{transfer = T} = Msg) ->
+ Msg#amqp10_msg{transfer = T#'v1_0.transfer'{handle = {uint, Handle}}}.
+
+%% @doc Set the settledment mode.
+%% Settled=true means the message is considered settled
+%% as soon as sent and no disposition will be issued by the receiver.
+%% Settled=false will delay settlement until a disposition has been received.
+%% A disposition will be notified to the sender by a message of the
+%% following stucture:
+%% {amqp10_disposition, {accepted | rejected, DeliveryTag}}
+-spec set_settled(boolean(), amqp10_msg()) -> amqp10_msg().
+set_settled(Settled, #amqp10_msg{transfer = T} = Msg) ->
+ Msg#amqp10_msg{transfer = T#'v1_0.transfer'{settled = Settled}}.
+
+%% @doc Set amqp message headers.
+-spec set_headers(#{atom() => any()}, amqp10_msg()) -> amqp10_msg().
+set_headers(Headers, #amqp10_msg{header = undefined} = Msg) ->
+ set_headers(Headers, Msg#amqp10_msg{header = #'v1_0.header'{}});
+set_headers(Headers, #amqp10_msg{header = Current} = Msg) ->
+ H = maps:fold(fun(durable, V, Acc) ->
+ Acc#'v1_0.header'{durable = V};
+ (priority, V, Acc) ->
+ Acc#'v1_0.header'{priority = {uint, V}};
+ (first_acquirer, V, Acc) ->
+ Acc#'v1_0.header'{first_acquirer = V};
+ (ttl, V, Acc) ->
+ Acc#'v1_0.header'{ttl = {uint, V}};
+ (delivery_count, V, Acc) ->
+ Acc#'v1_0.header'{delivery_count = {uint, V}}
+ end, Current, Headers),
+ Msg#amqp10_msg{header = H}.
+
+%% @doc Set amqp message properties.
+-spec set_properties(amqp10_properties(), amqp10_msg()) -> amqp10_msg().
+set_properties(Props, #amqp10_msg{properties = undefined} = Msg) ->
+ set_properties(Props, Msg#amqp10_msg{properties = #'v1_0.properties'{}});
+set_properties(Props, #amqp10_msg{properties = Current} = Msg) ->
+ % TODO many fields are `any` types and we need to try to type tag them
+ P = maps:fold(fun(message_id, V, Acc) when is_binary(V) ->
+ % message_id can be any type but we restrict it here
+ Acc#'v1_0.properties'{message_id = utf8(V)};
+ (user_id, V, Acc) ->
+ Acc#'v1_0.properties'{user_id = utf8(V)};
+ (to, V, Acc) ->
+ Acc#'v1_0.properties'{to = utf8(V)};
+ (subject, V, Acc) ->
+ Acc#'v1_0.properties'{subject = utf8(V)};
+ (reply_to, V, Acc) ->
+ Acc#'v1_0.properties'{reply_to = utf8(V)};
+ (correlation_id, V, Acc) ->
+ Acc#'v1_0.properties'{correlation_id = utf8(V)};
+ (content_type, V, Acc) ->
+ Acc#'v1_0.properties'{content_type = sym(V)};
+ (content_encoding, V, Acc) ->
+ Acc#'v1_0.properties'{content_encoding = sym(V)};
+ (absolute_expiry_time, V, Acc) ->
+ Acc#'v1_0.properties'{absolute_expiry_time = {timestamp, V}};
+ (creation_time, V, Acc) ->
+ Acc#'v1_0.properties'{creation_time = {timestamp, V}};
+ (group_id, V, Acc) ->
+ Acc#'v1_0.properties'{group_id = utf8(V)};
+ (group_sequence, V, Acc) ->
+ Acc#'v1_0.properties'{group_sequence = uint(V)};
+ (reply_to_group_id, V, Acc) ->
+ Acc#'v1_0.properties'{reply_to_group_id = utf8(V)}
+ end, Current, Props),
+ Msg#amqp10_msg{properties = P}.
+
+-spec set_application_properties(#{binary() | string() => binary() | integer() | string()},
+ amqp10_msg()) -> amqp10_msg().
+set_application_properties(Props,
+ #amqp10_msg{application_properties = undefined} =
+ Msg) ->
+ APs = #'v1_0.application_properties'{content = []},
+ set_application_properties(Props,
+ Msg#amqp10_msg{application_properties = APs});
+set_application_properties(
+ Props0, #amqp10_msg{application_properties =
+ #'v1_0.application_properties'{content = APs0}} = Msg) ->
+ Props = maps:fold(fun (K, V, S) ->
+ S#{utf8(K) => wrap_ap_value(V)}
+ end, maps:from_list(APs0), Props0),
+ APs = #'v1_0.application_properties'{content = maps:to_list(Props)},
+ Msg#amqp10_msg{application_properties = APs}.
+
+-spec set_delivery_annotations(#{binary() => binary() | integer() | string()},
+ amqp10_msg()) -> amqp10_msg().
+set_delivery_annotations(Props,
+ #amqp10_msg{delivery_annotations = undefined} =
+ Msg) ->
+ Anns = #'v1_0.delivery_annotations'{content = []},
+ set_delivery_annotations(Props,
+ Msg#amqp10_msg{delivery_annotations = Anns});
+set_delivery_annotations(
+ Props0, #amqp10_msg{delivery_annotations =
+ #'v1_0.delivery_annotations'{content = Anns0}} = Msg) ->
+ Anns = maps:fold(fun (K, V, S) ->
+ S#{sym(K) => wrap_ap_value(V)}
+ end, maps:from_list(Anns0), Props0),
+ Anns1 = #'v1_0.delivery_annotations'{content = maps:to_list(Anns)},
+ Msg#amqp10_msg{delivery_annotations = Anns1}.
+
+-spec set_message_annotations(#{binary() => binary() | integer() | string()},
+ amqp10_msg()) -> amqp10_msg().
+set_message_annotations(Props,
+ #amqp10_msg{message_annotations = undefined} =
+ Msg) ->
+ Anns = #'v1_0.message_annotations'{content = []},
+ set_message_annotations(Props,
+ Msg#amqp10_msg{message_annotations = Anns});
+set_message_annotations(
+ Props0, #amqp10_msg{message_annotations =
+ #'v1_0.message_annotations'{content = Anns0}} = Msg) ->
+ Anns = maps:fold(fun (K, V, S) ->
+ S#{sym(K) => wrap_ap_value(V)}
+ end, maps:from_list(Anns0), Props0),
+ Anns1 = #'v1_0.message_annotations'{content = maps:to_list(Anns)},
+ Msg#amqp10_msg{message_annotations = Anns1}.
+
+wrap_ap_value(true) ->
+ {boolean, true};
+wrap_ap_value(false) ->
+ {boolean, false};
+wrap_ap_value(V) when is_integer(V) ->
+ {uint, V};
+wrap_ap_value(V) when is_binary(V) ->
+ utf8(V);
+wrap_ap_value(V) when is_list(V) ->
+ utf8(list_to_binary(V));
+wrap_ap_value(V) when is_atom(V) ->
+ utf8(atom_to_list(V)).
+
+
+%% LOCAL
+header_value(durable, undefined) -> false;
+header_value(priority, undefined) -> 4;
+header_value(first_acquirer, undefined) -> false;
+header_value(delivery_count, undefined) -> 0;
+header_value(Key, {_Type, Value}) -> header_value(Key, Value);
+header_value(_Key, Value) -> Value.
+
+parse_from_amqp(#'v1_0.header'{} = Header, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{header = Header};
+parse_from_amqp(#'v1_0.delivery_annotations'{} = DAS, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{delivery_annotations = DAS};
+parse_from_amqp(#'v1_0.message_annotations'{} = DAS, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{message_annotations = DAS};
+parse_from_amqp(#'v1_0.properties'{} = Header, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{properties = Header};
+parse_from_amqp(#'v1_0.application_properties'{} = APs, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{application_properties = APs};
+parse_from_amqp(#'v1_0.amqp_value'{} = Value, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{body = Value};
+parse_from_amqp(#'v1_0.amqp_sequence'{} = Seq, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{body = [Seq]};
+parse_from_amqp(#'v1_0.data'{} = Data, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{body = [Data]};
+parse_from_amqp(#'v1_0.footer'{} = Header, AmqpMsg) ->
+ AmqpMsg#amqp10_msg{footer = Header}.
+
+unpack(V) -> amqp10_client_types:unpack(V).
+utf8(V) -> amqp10_client_types:utf8(V).
+sym(B) when is_list(B) -> {symbol, list_to_binary(B)};
+sym(B) when is_binary(B) -> {symbol, B}.
+uint(B) -> {uint, B}.
+
+has_value(undefined) -> false;
+has_value(_) -> true.
diff --git a/deps/amqp10_client/test/activemq_ct_helpers.erl b/deps/amqp10_client/test/activemq_ct_helpers.erl
new file mode 100644
index 0000000000..89dbd5fc8e
--- /dev/null
+++ b/deps/amqp10_client/test/activemq_ct_helpers.erl
@@ -0,0 +1,119 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(activemq_ct_helpers).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([setup_steps/1,
+ teardown_steps/0,
+ ensure_activemq_cmd/1,
+ init_config_filename/2,
+ init_tcp_port_numbers/1,
+ start_activemq_nodes/1,
+ stop_activemq_nodes/1]).
+
+setup_steps(ConfigFileName) ->
+ [fun ensure_activemq_cmd/1,
+ fun(Config) -> init_config_filename(Config, ConfigFileName) end,
+ fun init_tcp_port_numbers/1,
+ fun start_activemq_nodes/1].
+
+teardown_steps() ->
+ [fun stop_activemq_nodes/1].
+
+ensure_activemq_cmd(Config) ->
+ ActivemqCmd= case rabbit_ct_helpers:get_config(Config, activemq_cmd) of
+ undefined ->
+ case os:getenv("ACTIVEMQ") of
+ false -> "activemq";
+ M -> M
+ end;
+ M ->
+ M
+ end,
+ Cmd = [ActivemqCmd, "--version"],
+ case rabbit_ct_helpers:exec(Cmd, [{match_stdout, "ActiveMQ"}]) of
+ {ok, _} -> rabbit_ct_helpers:set_config(Config,
+ {activemq_cmd, ActivemqCmd});
+ _ -> {skip,
+ "ActiveMQ CLI required, " ++
+ "please set ACTIVEMQ or 'activemq_cmd' in ct config"}
+ end.
+
+init_config_filename(Config, FileName) ->
+ ConfigFile = filename:join([?config(data_dir, Config),
+ "conf", FileName]),
+ rabbit_ct_helpers:set_config(Config, {activemq_config_filename, ConfigFile}).
+
+init_tcp_port_numbers(Config) ->
+ TCPPort = 21000,
+ NodeConfig = [{nodename, activemq},
+ {initial_nodename, activemq},
+ {tcp_port_amqp, TCPPort}],
+ rabbit_ct_helpers:set_config(Config, {rmq_nodes, [NodeConfig]}).
+
+start_activemq_nodes(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_hostname, "localhost"}]),
+ ActivemqCmd = ?config(activemq_cmd, Config1),
+ TCPPort = rabbit_ct_broker_helpers:get_node_config(Config1, 0, tcp_port_amqp),
+ ConfigFile = ?config(activemq_config_filename, Config1),
+ Cmd = [ActivemqCmd,
+ "start",
+ {"-Dtestsuite.tcp_port_amqp=~b", [TCPPort]},
+ {"xbean:file:~s", [ConfigFile]}],
+ case rabbit_ct_helpers:exec(Cmd, []) of
+ {ok, _} -> wait_for_activemq_nodes(Config1);
+ Error -> ct:pal("Error: ~p", [Error]),
+ {skip, "Failed to start ActiveMQ"}
+ end.
+
+wait_for_activemq_nodes(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Ports = rabbit_ct_broker_helpers:get_node_configs(Config, tcp_port_amqp),
+ wait_for_activemq_ports(Config, Hostname, Ports).
+
+wait_for_activemq_ports(Config, Hostname, [Port | Rest]) ->
+ ct:pal(?LOW_IMPORTANCE, "Waiting for ActiveMQ on port ~b", [Port]),
+ case wait_for_activemq_port(Hostname, Port, 60) of
+ ok ->
+ ct:pal(?LOW_IMPORTANCE, "ActiveMQ ready on port ~b", [Port]),
+ wait_for_activemq_ports(Config, Hostname, Rest);
+ {error, _} ->
+ Msg = lists:flatten(
+ io_lib:format(
+ "Failed to start ActiveMQ on port ~b; see ActiveMQ logs",
+ [Port])),
+ ct:pal(?LOW_IMPORTANCE, Msg, []),
+ {skip, Msg}
+ end;
+wait_for_activemq_ports(Config, _, []) ->
+ Config.
+
+wait_for_activemq_port(_, _, 0) ->
+ {error, econnrefused};
+wait_for_activemq_port(Hostname, Port, Retries) ->
+ case gen_tcp:connect(Hostname, Port, []) of
+ {ok, Connection} ->
+ gen_tcp:close(Connection),
+ ok;
+ {error, econnrefused} ->
+ timer:sleep(1000),
+ wait_for_activemq_port(Hostname, Port, Retries - 1);
+ Error ->
+ Error
+ end.
+
+stop_activemq_nodes(Config) ->
+ ActivemqCmd = ?config(activemq_cmd, Config),
+ Cmd = [ActivemqCmd, "stop"],
+ case rabbit_ct_helpers:exec(Cmd, []) of
+ {ok, _} -> Config;
+ Error -> ct:pal("Error: ~p", [Error]),
+ {skip, "Failed to stop ActiveMQ"}
+ end.
diff --git a/deps/amqp10_client/test/mock_server.erl b/deps/amqp10_client/test/mock_server.erl
new file mode 100644
index 0000000000..c2098efb7c
--- /dev/null
+++ b/deps/amqp10_client/test/mock_server.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(mock_server).
+
+%% API functions
+-export([start/1,
+ set_steps/2,
+ stop/1,
+ run/1,
+ amqp_step/1,
+ send_amqp_header_step/1,
+ recv_amqp_header_step/1
+ ]).
+
+-include_lib("src/amqp10_client.hrl").
+
+start(Port) ->
+ {ok, LSock} = gen_tcp:listen(Port, [binary, {packet, 0}, {active, false}]),
+ Pid = spawn(?MODULE, run, [LSock]),
+ {LSock, Pid}.
+
+set_steps({_Sock, Pid}, Steps) ->
+ Pid ! {set_steps, Steps},
+ ok.
+
+stop({S, P}) ->
+ P ! close,
+ gen_tcp:close(S),
+ exit(P, stop).
+
+run(Listener) ->
+ receive
+ {set_steps, Steps} ->
+ {ok, Sock} = gen_tcp:accept(Listener),
+ lists:foreach(fun(S) -> S(Sock) end, Steps),
+ receive
+ close -> ok
+ end
+ end.
+
+
+send(Socket, Ch, Records) ->
+ Encoded = [amqp10_framing:encode_bin(R) || R <- Records],
+ Frame = amqp10_binary_generator:build_frame(Ch, Encoded),
+ ok = gen_tcp:send(Socket, Frame).
+
+recv(Sock) ->
+ {ok, <<Length:32/unsigned, 2:8/unsigned,
+ _/unsigned, Ch:16/unsigned>>} = gen_tcp:recv(Sock, 8),
+ {ok, Data} = gen_tcp:recv(Sock, Length - 8),
+ {PerfDesc, Payload} = amqp10_binary_parser:parse(Data),
+ Perf = amqp10_framing:decode(PerfDesc),
+ {Ch, Perf, Payload}.
+
+amqp_step(Fun) ->
+ fun (Sock) ->
+ Recv = recv(Sock),
+ ct:pal("AMQP Step receieved ~p~n", [Recv]),
+ case Fun(Recv) of
+ {_Ch, []} -> ok;
+ {Ch, {multi, Records}} ->
+ [begin
+ ct:pal("AMQP multi Step send ~p~n", [R]),
+ send(Sock, Ch, R)
+ end || R <- Records];
+ {Ch, Records} ->
+ ct:pal("AMQP Step send ~p~n", [Records]),
+ send(Sock, Ch, Records)
+ end
+ end.
+
+
+send_amqp_header_step(Sock) ->
+ ct:pal("Sending AMQP protocol header"),
+ ok = gen_tcp:send(Sock, ?AMQP_PROTOCOL_HEADER).
+
+recv_amqp_header_step(Sock) ->
+ ct:pal("Receiving AMQP protocol header"),
+ {ok, R} = gen_tcp:recv(Sock, 8),
+ ct:pal("handshake Step receieved ~p~n", [R]).
diff --git a/deps/amqp10_client/test/msg_SUITE.erl b/deps/amqp10_client/test/msg_SUITE.erl
new file mode 100644
index 0000000000..c7c200c664
--- /dev/null
+++ b/deps/amqp10_client/test/msg_SUITE.erl
@@ -0,0 +1,314 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(msg_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-compile(export_all).
+
+
+all() ->
+ [{group, tests}].
+
+groups() ->
+ [
+ {tests, [parallel], [
+ minimal_input,
+ amqp_bodies,
+ full_input,
+ new,
+ new_with_options,
+ new_amqp_value,
+ new_amqp_sequence,
+ set_message_format,
+ set_headers,
+ update_headers,
+ set_properties,
+ set_application_properties,
+ set_delivery_annotations,
+ set_message_annotations,
+ to_amqp_records,
+ set_handle,
+ body_bin_data,
+ body_bin_amqp_value,
+ body_bin_amqp_sequence
+
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) -> Config.
+
+end_per_suite(Config) -> Config.
+
+%% -------------------------------------------------------------------
+%% Groups.
+%% -------------------------------------------------------------------
+
+init_per_group(_, Config) -> Config.
+
+end_per_group(_, Config) -> Config.
+
+minimal_input(_Config) ->
+ Tag = <<"tag">>,
+ Content = <<"content">>,
+ Input = [#'v1_0.transfer'{delivery_tag = {utf8, Tag},
+ delivery_id = {uint, 5672}},
+ #'v1_0.data'{content = Content}],
+ Res = amqp10_msg:from_amqp_records(Input),
+ Tag = amqp10_msg:delivery_tag(Res),
+ 5672 = amqp10_msg:delivery_id(Res),
+ undefined = amqp10_msg:message_format(Res),
+ #{} = amqp10_msg:headers(Res),
+ #{} = amqp10_msg:delivery_annotations(Res),
+ #{} = amqp10_msg:message_annotations(Res),
+ #{} = amqp10_msg:properties(Res),
+ #{} = amqp10_msg:application_properties(Res),
+ false = amqp10_msg:header(durable, Res),
+ 4 = amqp10_msg:header(priority, Res),
+ false = amqp10_msg:header(first_acquirer, Res),
+ 0 = amqp10_msg:header(delivery_count, Res),
+ undefined = amqp10_msg:header(ttl, Res).
+
+amqp_bodies(_Config) ->
+ Tag = <<"tag">>,
+ Content = <<"hi">>,
+ Data = #'v1_0.data'{content = Content},
+ Value = #'v1_0.amqp_value'{content = utf8("hi")},
+ Seq = #'v1_0.amqp_sequence'{content = {list, [utf8("hi"), utf8("there")]}},
+ Transfer = #'v1_0.transfer'{delivery_tag = {utf8, Tag}},
+
+ Res1 = amqp10_msg:from_amqp_records([Transfer, Data]),
+ [<<"hi">>] = amqp10_msg:body(Res1),
+
+ Res2 = amqp10_msg:from_amqp_records([Transfer, Value]),
+ #'v1_0.amqp_value'{content = {utf8, <<"hi">>}} = amqp10_msg:body(Res2),
+
+ Res3 = amqp10_msg:from_amqp_records([Transfer, Seq]),
+ [#'v1_0.amqp_sequence'{content = {list, [{utf8, <<"hi">>},
+ {utf8, <<"there">>}
+ ]}}] = amqp10_msg:body(Res3).
+
+full_input(_Config) ->
+ Tag = <<"tag">>,
+ Content = <<"content">>,
+ %% Format / Version
+ <<MessageFormat:32/unsigned>> = <<101:24/unsigned, 2:8/unsigned>>,
+ Input = [#'v1_0.transfer'{delivery_tag = utf8("tag"),
+ message_format = {uint, MessageFormat}
+ },
+ #'v1_0.header'{durable = true, priority = 9, ttl = 1004,
+ first_acquirer = true, delivery_count = 101},
+ #'v1_0.delivery_annotations'{content =
+ [{utf8("key"), utf8("value")}
+ ]},
+ #'v1_0.message_annotations'{content =
+ [{utf8("key"), utf8("value")}
+ ]},
+ #'v1_0.properties'{message_id = utf8("msg-id"),
+ user_id = utf8("zen"),
+ to = utf8("to"),
+ subject = utf8("subject"),
+ reply_to = utf8("reply-to"),
+ correlation_id = utf8("correlation_id"),
+ content_type = {symbol, <<"utf8">>},
+ content_encoding = {symbol, <<"gzip">>},
+ absolute_expiry_time = {timestamp, 1000},
+ creation_time = {timestamp, 10},
+ group_id = utf8("group-id"),
+ group_sequence = {uint, 33},
+ reply_to_group_id = utf8("reply-to-group-id")
+ },
+ #'v1_0.application_properties'{content =
+ [{utf8("key"), utf8("value")}]},
+ #'v1_0.data'{content = Content},
+ #'v1_0.footer'{content = [{utf8("key"), utf8("value")}]}
+ ],
+ Res = amqp10_msg:from_amqp_records(Input),
+ Tag = amqp10_msg:delivery_tag(Res),
+ {101, 2} = amqp10_msg:message_format(Res),
+ Headers = amqp10_msg:headers(Res),
+ #{durable := true,
+ priority := 9,
+ first_acquirer := true,
+ delivery_count := 101,
+ ttl := 1004} = Headers,
+
+ % header/2
+ true = amqp10_msg:header(durable, Res),
+ 9 = amqp10_msg:header(priority, Res),
+ true = amqp10_msg:header(first_acquirer, Res),
+ 101 = amqp10_msg:header(delivery_count, Res),
+ 1004 = amqp10_msg:header(ttl, Res), % no default
+
+ #{<<"key">> := <<"value">>} = amqp10_msg:delivery_annotations(Res),
+ #{<<"key">> := <<"value">>} = amqp10_msg:message_annotations(Res),
+ #{message_id := <<"msg-id">>,
+ user_id := <<"zen">>,
+ to := <<"to">>,
+ subject := <<"subject">>,
+ reply_to := <<"reply-to">>,
+ correlation_id := <<"correlation_id">>,
+ content_type := <<"utf8">>,
+ content_encoding := <<"gzip">>,
+ absolute_expiry_time := 1000,
+ creation_time := 10,
+ group_id := <<"group-id">>,
+ group_sequence := 33,
+ reply_to_group_id := <<"reply-to-group-id">>} = amqp10_msg:properties(Res),
+
+ #{<<"key">> := <<"value">>} = amqp10_msg:application_properties(Res),
+
+ ?assertEqual([Content], amqp10_msg:body(Res)),
+
+ #{<<"key">> := <<"value">>} = amqp10_msg:footer(Res).
+
+new(_Config) ->
+ Tag = <<"tag">>,
+ Body = <<"hi">>,
+ Msg = amqp10_msg:new(Tag, Body),
+ Tag = amqp10_msg:delivery_tag(Msg),
+ [<<"hi">>] = amqp10_msg:body(Msg).
+
+new_with_options(_Config) ->
+ Tag = <<"tag">>,
+ Body = <<"hi">>,
+ Msg = amqp10_msg:new(Tag, Body, true),
+ Tag = amqp10_msg:delivery_tag(Msg),
+ true = amqp10_msg:settled(Msg).
+
+new_amqp_value(_Config) ->
+ Tag = <<"tag">>,
+ Body = #'v1_0.amqp_value'{content = {utf8, <<"hi">>}},
+ Msg = amqp10_msg:new(Tag, Body),
+ Tag = amqp10_msg:delivery_tag(Msg),
+ Body = amqp10_msg:body(Msg).
+
+new_amqp_sequence(_Config) ->
+ Tag = <<"tag">>,
+ Body = #'v1_0.amqp_sequence'{content = {list, [utf8("hi"), utf8("there")]}},
+ Msg = amqp10_msg:new(Tag, [Body]),
+ Tag = amqp10_msg:delivery_tag(Msg),
+ [Body] = amqp10_msg:body(Msg).
+
+set_message_format(_Config) ->
+ MsgFormat = {103, 3},
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_message_format(MsgFormat, Msg0),
+ MsgFormat = amqp10_msg:message_format(Msg1).
+
+set_headers(_Config) ->
+ Headers = #{durable => true},
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_headers(Headers, Msg0),
+ #{durable := true} = amqp10_msg:headers(Msg1).
+
+update_headers(_Config) ->
+ Headers = #{priority => 5},
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_headers(Headers, Msg0),
+ #{priority := 5} = amqp10_msg:headers(Msg1),
+ Msg2 = amqp10_msg:set_headers(Headers#{priority => 9}, Msg1),
+ #{priority := 9} = amqp10_msg:headers(Msg2).
+
+set_handle(_Config) ->
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_handle(42, Msg0),
+ 42 = amqp10_msg:handle(Msg1).
+
+set_properties(_Config) ->
+ Props = #{message_id => <<"msg-id">>,
+ user_id => <<"zen">>,
+ to => <<"to">>,
+ subject => <<"subject">>,
+ reply_to => <<"reply-to">>,
+ correlation_id => <<"correlation_id">>,
+ content_type => <<"utf8">>,
+ content_encoding => <<"gzip">>,
+ absolute_expiry_time => 1000,
+ creation_time => 10,
+ group_id => <<"group-id">>,
+ group_sequence => 33,
+ reply_to_group_id => <<"reply-to-group-id">>},
+
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_properties(Props, Msg0),
+ Props = amqp10_msg:properties(Msg1).
+
+set_application_properties(_Config) ->
+ Props = #{"key" => "value",
+ <<"key2">> => <<"value2">>},
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_application_properties(Props, Msg0),
+ #{<<"key">> := <<"value">>,
+ <<"key2">> := <<"value2">>} = amqp10_msg:application_properties(Msg1),
+ ok.
+
+set_delivery_annotations(_Config) ->
+ Props = #{<<"x-key">> => "value",
+ <<"x-key2">> => 9},
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_delivery_annotations(Props, Msg0),
+ #{<<"x-key">> := <<"value">>,
+ <<"x-key2">> := 9} = amqp10_msg:delivery_annotations(Msg1).
+
+set_message_annotations(_Config) ->
+ Props = #{<<"x-key">> => "value",
+ <<"x-key2">> => 9},
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"hi">>),
+ Msg1 = amqp10_msg:set_message_annotations(Props, Msg0),
+ #{<<"x-key">> := <<"value">>,
+ <<"x-key2">> := 9} = amqp10_msg:message_annotations(Msg1).
+
+to_amqp_records(_Config) ->
+ Msg0 = amqp10_msg:new(<<"tag">>, <<"data">>),
+ Msg = amqp10_msg:set_headers(#{durable => true}, Msg0),
+ [#'v1_0.transfer'{},
+ #'v1_0.header'{durable = true},
+ #'v1_0.data'{content = <<"data">>}] =
+ amqp10_msg:to_amqp_records(Msg).
+
+body_bin_data(_) ->
+ Body = [
+ #'v1_0.data'{content = <<"one">>},
+ #'v1_0.data'{content = <<"two">>}
+ ],
+ Msg = amqp10_msg:new(<<55>>, Body),
+ Bin = amqp10_msg:body_bin(Msg),
+ Body = amqp10_framing:decode_bin(Bin),
+ ok.
+
+body_bin_amqp_value(_) ->
+ Body = #'v1_0.amqp_value'{content = {utf8, <<"one">>}},
+ Msg = amqp10_msg:new(<<55>>, Body),
+ Bin = amqp10_msg:body_bin(Msg),
+ [Body] = amqp10_framing:decode_bin(Bin),
+ ok.
+
+body_bin_amqp_sequence(_) ->
+ Body = [
+ #'v1_0.amqp_sequence'{content = [{utf8, <<"one">>},
+ {utf8, <<"blah">>}]},
+ #'v1_0.amqp_sequence'{content = [{utf8, <<"two">>}]}
+ ],
+ Msg = amqp10_msg:new(<<55>>, Body),
+ Bin = amqp10_msg:body_bin(Msg),
+ Body = amqp10_framing:decode_bin(Bin),
+ ok.
+
+%% -------------------------------------------------------------------
+%% Utilities
+%% -------------------------------------------------------------------
+
+utf8(S) -> amqp10_client_types:utf8(S).
diff --git a/deps/amqp10_client/test/system_SUITE.erl b/deps/amqp10_client/test/system_SUITE.erl
new file mode 100644
index 0000000000..d81d48c4e3
--- /dev/null
+++ b/deps/amqp10_client/test/system_SUITE.erl
@@ -0,0 +1,740 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-include_lib("src/amqp10_client.hrl").
+
+-compile(export_all).
+
+-define(UNAUTHORIZED_USER, <<"test_user_no_perm">>).
+
+%% The latch constant defines how many processes are spawned in order
+%% to run certain functionality in parallel. It follows the standard
+%% countdown latch pattern.
+-define(LATCH, 100).
+
+%% The wait constant defines how long a consumer waits before it
+%% unsubscribes
+-define(WAIT, 200).
+
+%% How to long wait for a process to die after an expected failure
+-define(PROCESS_EXIT_TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, rabbitmq},
+ {group, rabbitmq_strict},
+ {group, activemq},
+ {group, activemq_no_anon},
+ {group, mock}
+ ].
+
+groups() ->
+ [
+ {rabbitmq, [], shared()},
+ {activemq, [], shared()},
+ {rabbitmq_strict, [], [
+ basic_roundtrip_tls,
+ open_connection_plain_sasl,
+ open_connection_plain_sasl_failure,
+ open_connection_plain_sasl_parse_uri
+ ]},
+ {activemq_no_anon, [],
+ [
+ open_connection_plain_sasl,
+ open_connection_plain_sasl_parse_uri,
+ open_connection_plain_sasl_failure
+ ]},
+ {azure, [],
+ [
+ basic_roundtrip_service_bus,
+ filtered_roundtrip_service_bus
+ ]},
+ {mock, [], [
+ insufficient_credit,
+ incoming_heartbeat,
+ multi_transfer_without_delivery_id
+ ]}
+ ].
+
+shared() ->
+ [
+ open_close_connection,
+ basic_roundtrip,
+ early_transfer,
+ split_transfer,
+ transfer_unsettled,
+ subscribe,
+ subscribe_with_auto_flow,
+ outgoing_heartbeat,
+ roundtrip_large_messages
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config,
+ [
+ fun start_amqp10_client_app/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ [
+ fun stop_amqp10_client_app/1
+ ]).
+
+start_amqp10_client_app(Config) ->
+ ?assertMatch({ok, _}, application:ensure_all_started(amqp10_client)),
+ Config.
+
+stop_amqp10_client_app(Config) ->
+ ok = application:stop(amqp10_client),
+ Config.
+
+%% -------------------------------------------------------------------
+%% Groups.
+%% -------------------------------------------------------------------
+
+init_per_group(rabbitmq, Config0) ->
+ Config = rabbit_ct_helpers:set_config(Config0,
+ {sasl, {plain, <<"guest">>, <<"guest">>}}),
+ Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ [{rabbitmq_amqp1_0,
+ [{protocol_strict_mode, true}]}]),
+ rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps());
+init_per_group(rabbitmq_strict, Config0) ->
+ Config = rabbit_ct_helpers:set_config(Config0,
+ {sasl, {plain, <<"guest">>, <<"guest">>}}),
+ Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ [{rabbitmq_amqp1_0,
+ [{default_user, none},
+ {protocol_strict_mode, true}]}]),
+ rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps());
+init_per_group(activemq, Config0) ->
+ Config = rabbit_ct_helpers:set_config(Config0, {sasl, anon}),
+ rabbit_ct_helpers:run_steps(Config,
+ activemq_ct_helpers:setup_steps("activemq.xml"));
+init_per_group(activemq_no_anon, Config0) ->
+ Config = rabbit_ct_helpers:set_config(
+ Config0, {sasl, {plain, <<"user">>, <<"password">>}}),
+ rabbit_ct_helpers:run_steps(Config,
+ activemq_ct_helpers:setup_steps("activemq_no_anon.xml"));
+init_per_group(azure, Config) ->
+ rabbit_ct_helpers:set_config(Config,
+ [
+ {sb_endpoint, os:getenv("SB_ENDPOINT")},
+ {sb_keyname, to_bin(os:getenv("SB_KEYNAME"))},
+ {sb_key, to_bin(os:getenv("SB_KEY"))},
+ {sb_port, 5671}
+ ]);
+init_per_group(mock, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{mock_port, 25000},
+ {mock_host, "localhost"},
+ {sasl, none}
+ ]).
+end_per_group(rabbitmq, Config) ->
+ rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(rabbitmq_strict, Config) ->
+ rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(activemq, Config) ->
+ rabbit_ct_helpers:run_steps(Config, activemq_ct_helpers:teardown_steps());
+end_per_group(activemq_no_anon, Config) ->
+ rabbit_ct_helpers:run_steps(Config, activemq_ct_helpers:teardown_steps());
+end_per_group(_, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+init_per_testcase(_Test, Config) ->
+ case lists:keyfind(mock_port, 1, Config) of
+ {_, Port} ->
+ M = mock_server:start(Port),
+ rabbit_ct_helpers:set_config(Config, {mock_server, M});
+ _ -> Config
+ end.
+
+end_per_testcase(_Test, Config) ->
+ case lists:keyfind(mock_server, 1, Config) of
+ {_, M} -> mock_server:stop(M);
+ _ -> Config
+ end.
+
+%% -------------------------------------------------------------------
+
+open_close_connection(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ %% an address list
+ OpnConf = #{addresses => [Hostname],
+ port => Port,
+ notify => self(),
+ container_id => <<"open_close_connection_container">>,
+ sasl => ?config(sasl, Config)},
+ {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
+ {ok, Connection2} = amqp10_client:open_connection(OpnConf),
+ receive
+ {amqp10_event, {connection, Connection2, opened}} -> ok
+ after 5000 -> exit(connection_timeout)
+ end,
+ ok = amqp10_client:close_connection(Connection2),
+ ok = amqp10_client:close_connection(Connection).
+
+open_connection_plain_sasl(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ %% single address
+ OpnConf = #{address => Hostname,
+ port => Port,
+ notify => self(),
+ container_id => <<"open_connection_plain_sasl_container">>,
+ sasl => ?config(sasl, Config)},
+ {ok, Connection} = amqp10_client:open_connection(OpnConf),
+ receive
+ {amqp10_event, {connection, Connection, opened}} -> ok
+ after 5000 -> exit(connection_timeout)
+ end,
+ ok = amqp10_client:close_connection(Connection).
+
+open_connection_plain_sasl_parse_uri(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Uri = case ?config(sasl, Config) of
+ anon ->
+ lists:flatten(
+ io_lib:format("amqp://~s:~b", [Hostname, Port]));
+ {plain, Usr, Pwd} ->
+ lists:flatten(
+ io_lib:format("amqp://~s:~s@~s:~b?sasl=plain",
+ [Usr, Pwd, Hostname, Port]))
+ end,
+
+ {ok, ParsedConf} = amqp10_client:parse_uri(Uri),
+ OpnConf = maps:merge(#{notify => self(),
+ container_id => <<"open_connection_plain_sasl_parse_uri_container">>},
+ ParsedConf),
+ {ok, Connection} = amqp10_client:open_connection(OpnConf),
+ receive
+ {amqp10_event, {connection, Connection, opened}} -> ok
+ after 5000 -> exit(connection_timeout)
+ end,
+ ok = amqp10_client:close_connection(Connection).
+
+open_connection_plain_sasl_failure(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ OpnConf = #{address => Hostname,
+ port => Port,
+ notify => self(),
+ container_id => <<"open_connection_plain_sasl_container">>,
+ % anonymous access is not allowed
+ sasl => {plain, <<"WORNG">>, <<"WARBLE">>}},
+ {ok, Connection} = amqp10_client:open_connection(OpnConf),
+ receive
+ {amqp10_event, {connection, Connection,
+ {closed, sasl_auth_failure}}} -> ok;
+ % some implementation may simply close the tcp_connection
+ {amqp10_event, {connection, Connection, {closed, shutdown}}} -> ok
+
+ after 5000 ->
+ ct:pal("Connection process is alive? = ~p~n",
+ [erlang:is_process_alive(Connection)]),
+ exit(connection_timeout)
+ end,
+ ok = amqp10_client:close_connection(Connection).
+
+basic_roundtrip(Config) ->
+ application:start(sasl),
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ OpenConf = #{address => Hostname, port => Port, sasl => anon},
+ roundtrip(OpenConf).
+
+basic_roundtrip_tls(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls),
+ CACertFile = ?config(rmq_certsdir, Config) ++ "/testca/cacert.pem",
+ CertFile = ?config(rmq_certsdir, Config) ++ "/client/cert.pem",
+ KeyFile = ?config(rmq_certsdir, Config) ++ "/client/key.pem",
+ OpnConf = #{address => Hostname,
+ port => Port,
+ tls_opts => {secure_port, [{cacertfile, CACertFile},
+ {certfile, CertFile},
+ {keyfile, KeyFile}]},
+ notify => self(),
+ container_id => <<"open_connection_tls_container">>,
+ sasl => ?config(sasl, Config)},
+ roundtrip(OpnConf).
+
+service_bus_config(Config, ContainerId) ->
+ Hostname = ?config(sb_endpoint, Config),
+ Port = ?config(sb_port, Config),
+ User = ?config(sb_keyname, Config),
+ Password = ?config(sb_key, Config),
+ #{address => Hostname,
+ hostname => to_bin(Hostname),
+ port => Port,
+ tls_opts => {secure_port, [{versions, ['tlsv1.1']}]},
+ notify => self(),
+ container_id => ContainerId,
+ sasl => {plain, User, Password}}.
+
+basic_roundtrip_service_bus(Config) ->
+ roundtrip(service_bus_config(Config, <<"basic_roundtrip_service_bus">>)).
+
+filtered_roundtrip_service_bus(Config) ->
+ filtered_roundtrip(service_bus_config(Config, <<"filtered_roundtrip_service_bus">>)).
+
+roundtrip_large_messages(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ OpenConf = #{address => Hostname, port => Port, sasl => anon},
+ DataKb = crypto:strong_rand_bytes(1024),
+ roundtrip(OpenConf, DataKb),
+ Data1Mb = binary:copy(DataKb, 1024),
+ roundtrip(OpenConf, Data1Mb),
+ roundtrip(OpenConf, binary:copy(Data1Mb, 8)),
+ roundtrip(OpenConf, binary:copy(Data1Mb, 64)),
+ ok.
+
+
+roundtrip(OpenConf) ->
+ roundtrip(OpenConf, <<"banana">>).
+
+roundtrip(OpenConf, Body) ->
+ {ok, Connection} = amqp10_client:open_connection(OpenConf),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ {ok, Sender} = amqp10_client:attach_sender_link(Session,
+ <<"banana-sender">>,
+ <<"test1">>,
+ settled,
+ unsettled_state),
+ await_link(Sender, credited, link_credit_timeout),
+
+ Now = os:system_time(millisecond),
+ Props = #{creation_time => Now},
+ Msg0 = amqp10_msg:set_properties(Props,
+ amqp10_msg:new(<<"my-tag">>, Body, true)),
+ Msg1 = amqp10_msg:set_application_properties(#{"a_key" => "a_value"}, Msg0),
+ Msg = amqp10_msg:set_message_annotations(#{<<"x_key">> => "x_value"}, Msg1),
+ % RabbitMQ AMQP 1.0 does not yet support delivery annotations
+ % Msg = amqp10_msg:set_delivery_annotations(#{<<"x_key">> => "x_value"}, Msg2),
+ ok = amqp10_client:send_msg(Sender, Msg),
+ ok = amqp10_client:detach_link(Sender),
+ await_link(Sender, {detached, normal}, link_detach_timeout),
+
+ {error, link_not_found} = amqp10_client:detach_link(Sender),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"banana-receiver">>,
+ <<"test1">>,
+ settled,
+ unsettled_state),
+ {ok, OutMsg} = amqp10_client:get_msg(Receiver, 60000 * 5),
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ % ct:pal(?LOW_IMPORTANCE, "roundtrip message Out: ~p~nIn: ~p~n", [OutMsg, Msg]),
+ #{creation_time := Now} = amqp10_msg:properties(OutMsg),
+ #{<<"a_key">> := <<"a_value">>} = amqp10_msg:application_properties(OutMsg),
+ #{<<"x_key">> := <<"x_value">>} = amqp10_msg:message_annotations(OutMsg),
+ % #{<<"x_key">> := <<"x_value">>} = amqp10_msg:delivery_annotations(OutMsg),
+ ?assertEqual([Body], amqp10_msg:body(OutMsg)),
+ ok.
+
+filtered_roundtrip(OpenConf) ->
+ filtered_roundtrip(OpenConf, <<"banana">>).
+
+filtered_roundtrip(OpenConf, Body) ->
+ {ok, Connection} = amqp10_client:open_connection(OpenConf),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ {ok, Sender} = amqp10_client:attach_sender_link(Session,
+ <<"default-sender">>,
+ <<"test1">>,
+ settled,
+ unsettled_state),
+ await_link(Sender, credited, link_credit_timeout),
+
+ Now = os:system_time(millisecond),
+ Props1 = #{creation_time => Now},
+ Msg1 = amqp10_msg:set_properties(Props1, amqp10_msg:new(<<"msg-1-tag">>, Body, true)),
+ ok = amqp10_client:send_msg(Sender, Msg1),
+
+ {ok, DefaultReceiver} = amqp10_client:attach_receiver_link(Session,
+ <<"default-receiver">>,
+ <<"test1">>,
+ settled,
+ unsettled_state),
+ ok = amqp10_client:send_msg(Sender, Msg1),
+ {ok, OutMsg1} = amqp10_client:get_msg(DefaultReceiver, 60000 * 5),
+ ?assertEqual(<<"msg-1-tag">>, amqp10_msg:delivery_tag(OutMsg1)),
+
+ timer:sleep(5 * 1000),
+
+ Now2 = os:system_time(millisecond),
+ Props2 = #{creation_time => Now2 - 1000},
+ Msg2 = amqp10_msg:set_properties(Props2, amqp10_msg:new(<<"msg-2-tag">>, Body, true)),
+ Now2Binary = integer_to_binary(Now2),
+
+ ok = amqp10_client:send_msg(Sender, Msg2),
+
+ {ok, FilteredReceiver} = amqp10_client:attach_receiver_link(Session,
+ <<"filtered-receiver">>,
+ <<"test1">>,
+ settled,
+ unsettled_state,
+ #{<<"apache.org:selector-filter:string">> => <<"amqp.annotation.x-opt-enqueuedtimeutc > ", Now2Binary/binary>>}),
+
+ {ok, OutMsg2} = amqp10_client:get_msg(DefaultReceiver, 60000 * 5),
+ ?assertEqual(<<"msg-2-tag">>, amqp10_msg:delivery_tag(OutMsg2)),
+
+ {ok, OutMsgFiltered} = amqp10_client:get_msg(FilteredReceiver, 60000 * 5),
+ ?assertEqual(<<"msg-2-tag">>, amqp10_msg:delivery_tag(OutMsgFiltered)),
+
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ ok.
+
+% a message is sent before the link attach is guaranteed to
+% have completed and link credit granted
+% also queue a link detached immediately after transfer
+early_transfer(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ {ok, Sender} = amqp10_client:attach_sender_link(Session,
+ <<"early-transfer">>,
+ <<"test">>),
+
+ Msg = amqp10_msg:new(<<"my-tag">>, <<"banana">>, true),
+ % TODO: this is a timing issue - should use mock here really
+ {error, half_attached} = amqp10_client:send_msg(Sender, Msg),
+ % wait for credit
+ await_link(Sender, credited, credited_timeout),
+ ok = amqp10_client:detach_link(Sender),
+ % attach then immediately detach
+ LinkName = <<"early-transfer2">>,
+ {ok, Sender2} = amqp10_client:attach_sender_link(Session, LinkName,
+ <<"test">>),
+ {error, half_attached} = amqp10_client:detach_link(Sender2),
+ await_link(Sender2, credited, credited_timeout),
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ ok.
+
+split_transfer(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Conf = #{address => Hostname,
+ port => Port,
+ max_frame_size => 512,
+ sasl => ?config(sasl, Config)},
+ {ok, Connection} = amqp10_client:open_connection(Conf),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ Data = list_to_binary(string:chars(64, 1000)),
+ {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
+ <<"data-sender">>,
+ <<"test">>),
+ Msg = amqp10_msg:new(<<"my-tag">>, Data, true),
+ ok = amqp10_client:send_msg(Sender, Msg),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"data-receiver">>,
+ <<"test">>),
+ {ok, OutMsg} = amqp10_client:get_msg(Receiver),
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ ?assertEqual([Data], amqp10_msg:body(OutMsg)).
+
+transfer_unsettled(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Conf = #{address => Hostname, port => Port,
+ sasl => ?config(sasl, Config)},
+ {ok, Connection} = amqp10_client:open_connection(Conf),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ Data = list_to_binary(string:chars(64, 1000)),
+ {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
+ <<"data-sender">>,
+ <<"test">>, unsettled),
+ await_link(Sender, credited, credited_timeout),
+ DeliveryTag = <<"my-tag">>,
+ Msg = amqp10_msg:new(DeliveryTag, Data, false),
+ ok = amqp10_client:send_msg(Sender, Msg),
+ ct:pal("test pid ~w", [self()]),
+ ok = await_disposition(DeliveryTag),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"data-receiver">>,
+ <<"test">>, unsettled),
+ {ok, OutMsg} = amqp10_client:get_msg(Receiver),
+ ok = amqp10_client:accept_msg(Receiver, OutMsg),
+ {error, timeout} = amqp10_client:get_msg(Receiver, 1000),
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ ?assertEqual([Data], amqp10_msg:body(OutMsg)).
+
+subscribe(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ QueueName = <<"test-sub">>,
+ {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
+ <<"sub-sender">>,
+ QueueName),
+ _ = publish_messages(Sender, <<"banana">>, 10),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"sub-receiver">>,
+ QueueName, unsettled),
+ ok = amqp10_client:flow_link_credit(Receiver, 10, never),
+
+ _ = receive_messages(Receiver, 10),
+ % assert no further messages are delivered
+ timeout = receive_one(Receiver),
+ receive
+ {amqp10_event, {link, Receiver, credit_exhausted}} ->
+ ok
+ after 5000 ->
+ flush(),
+ exit(credit_exhausted_assert)
+ end,
+
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection).
+
+subscribe_with_auto_flow(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ QueueName = <<"test-sub">>,
+ {ok, Connection} = amqp10_client:open_connection(Hostname, Port),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ {ok, Sender} = amqp10_client:attach_sender_link_sync(Session,
+ <<"sub-sender">>,
+ QueueName),
+ _ = publish_messages(Sender, <<"banana">>, 10),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"sub-receiver">>,
+ QueueName, unsettled),
+ ok = amqp10_client:flow_link_credit(Receiver, 5, 2),
+
+ _ = receive_messages(Receiver, 10),
+
+ % assert no further messages are delivered
+ timeout = receive_one(Receiver),
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection).
+
+insufficient_credit(Config) ->
+ Hostname = ?config(mock_host, Config),
+ Port = ?config(mock_port, Config),
+ OpenStep = fun({0 = Ch, #'v1_0.open'{}, _Pay}) ->
+ {Ch, [#'v1_0.open'{container_id = {utf8, <<"mock">>}}]}
+ end,
+ BeginStep = fun({1 = Ch, #'v1_0.begin'{}, _Pay}) ->
+ {Ch, [#'v1_0.begin'{remote_channel = {ushort, 1},
+ next_outgoing_id = {uint, 1},
+ incoming_window = {uint, 1000},
+ outgoing_window = {uint, 1000}}
+ ]}
+ end,
+ AttachStep = fun({1 = Ch, #'v1_0.attach'{role = false,
+ name = Name}, <<>>}) ->
+ {Ch, [#'v1_0.attach'{name = Name,
+ handle = {uint, 99},
+ role = true}]}
+ end,
+ Steps = [fun mock_server:recv_amqp_header_step/1,
+ fun mock_server:send_amqp_header_step/1,
+ mock_server:amqp_step(OpenStep),
+ mock_server:amqp_step(BeginStep),
+ mock_server:amqp_step(AttachStep)],
+
+ ok = mock_server:set_steps(?config(mock_server, Config), Steps),
+
+ Cfg = #{address => Hostname, port => Port, sasl => none, notify => self()},
+ {ok, Connection} = amqp10_client:open_connection(Cfg),
+ {ok, Session} = amqp10_client:begin_session_sync(Connection),
+ {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"mock1-sender">>,
+ <<"test">>),
+ await_link(Sender, attached, attached_timeout),
+ Msg = amqp10_msg:new(<<"mock-tag">>, <<"banana">>, true),
+ {error, insufficient_credit} = amqp10_client:send_msg(Sender, Msg),
+
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ ok.
+
+multi_transfer_without_delivery_id(Config) ->
+ Hostname = ?config(mock_host, Config),
+ Port = ?config(mock_port, Config),
+ OpenStep = fun({0 = Ch, #'v1_0.open'{}, _Pay}) ->
+ {Ch, [#'v1_0.open'{container_id = {utf8, <<"mock">>}}]}
+ end,
+ BeginStep = fun({1 = Ch, #'v1_0.begin'{}, _Pay}) ->
+ {Ch, [#'v1_0.begin'{remote_channel = {ushort, 1},
+ next_outgoing_id = {uint, 1},
+ incoming_window = {uint, 1000},
+ outgoing_window = {uint, 1000}}
+ ]}
+ end,
+ AttachStep = fun({1 = Ch, #'v1_0.attach'{role = true,
+ name = Name}, <<>>}) ->
+ {Ch, [#'v1_0.attach'{name = Name,
+ handle = {uint, 99},
+ initial_delivery_count = {uint, 1},
+ role = false}
+ ]}
+ end,
+
+ LinkCreditStep = fun({1 = Ch, #'v1_0.flow'{}, <<>>}) ->
+ {Ch, {multi, [[#'v1_0.transfer'{handle = {uint, 99},
+ delivery_id = {uint, 12},
+ more = true},
+ #'v1_0.data'{content = <<"hello ">>}],
+ [#'v1_0.transfer'{handle = {uint, 99},
+ % delivery_id can be omitted
+ % for continuation frames
+ delivery_id = undefined,
+ settled = undefined,
+ more = false},
+ #'v1_0.data'{content = <<"world">>}]
+ ]}}
+ end,
+ Steps = [fun mock_server:recv_amqp_header_step/1,
+ fun mock_server:send_amqp_header_step/1,
+ mock_server:amqp_step(OpenStep),
+ mock_server:amqp_step(BeginStep),
+ mock_server:amqp_step(AttachStep),
+ mock_server:amqp_step(LinkCreditStep)
+ ],
+
+ ok = mock_server:set_steps(?config(mock_server, Config), Steps),
+
+ Cfg = #{address => Hostname, port => Port, sasl => none, notify => self()},
+ {ok, Connection} = amqp10_client:open_connection(Cfg),
+ {ok, Session} = amqp10_client:begin_session_sync(Connection),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"mock1-received">>,
+ <<"test">>),
+ amqp10_client:flow_link_credit(Receiver, 100, 50),
+ receive
+ {amqp10_msg, Receiver, _InMsg} ->
+ ok
+ after 2000 ->
+ exit(delivery_timeout)
+ end,
+
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection),
+ ok.
+
+outgoing_heartbeat(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ CConf = #{address => Hostname, port => Port,
+ idle_time_out => 5000, sasl => ?config(sasl, Config)},
+ {ok, Connection} = amqp10_client:open_connection(CConf),
+ timer:sleep(35 * 1000), % activemq defaults to 15s I believe
+ % check we can still establish a session
+ {ok, Session} = amqp10_client:begin_session_sync(Connection),
+ ok = amqp10_client:end_session(Session),
+ ok = amqp10_client:close_connection(Connection).
+
+incoming_heartbeat(Config) ->
+ Hostname = ?config(mock_host, Config),
+ Port = ?config(mock_port, Config),
+ OpenStep = fun({0 = Ch, #'v1_0.open'{}, _Pay}) ->
+ {Ch, [#'v1_0.open'{container_id = {utf8, <<"mock">>},
+ idle_time_out = {uint, 0}}]}
+ end,
+
+ CloseStep = fun({0 = Ch, #'v1_0.close'{error = _TODO}, _Pay}) ->
+ {Ch, [#'v1_0.close'{}]}
+ end,
+ Steps = [fun mock_server:recv_amqp_header_step/1,
+ fun mock_server:send_amqp_header_step/1,
+ mock_server:amqp_step(OpenStep),
+ mock_server:amqp_step(CloseStep)],
+ Mock = {_, MockPid} = ?config(mock_server, Config),
+ MockRef = monitor(process, MockPid),
+ ok = mock_server:set_steps(Mock, Steps),
+ CConf = #{address => Hostname, port => Port, sasl => ?config(sasl, Config),
+ idle_time_out => 1000, notify => self()},
+ {ok, Connection} = amqp10_client:open_connection(CConf),
+ receive
+ {amqp10_event, {connection, Connection,
+ {closed, {resource_limit_exceeded, <<"remote idle-time-out">>}}}} ->
+ ok
+ after 5000 ->
+ exit(incoming_heartbeat_assert)
+ end,
+ demonitor(MockRef).
+
+
+%%% HELPERS
+%%%
+
+receive_messages(Receiver, Num) ->
+ [begin
+ ct:pal("receive_messages ~p", [T]),
+ ok = receive_one(Receiver)
+ end || T <- lists:seq(1, Num)].
+
+publish_messages(Sender, Data, Num) ->
+ [begin
+ Tag = integer_to_binary(T),
+ Msg = amqp10_msg:new(Tag, Data, false),
+ ok = amqp10_client:send_msg(Sender, Msg),
+ ok = await_disposition(Tag)
+ end || T <- lists:seq(1, Num)].
+
+receive_one(Receiver) ->
+ receive
+ {amqp10_msg, Receiver, Msg} ->
+ amqp10_client:accept_msg(Receiver, Msg)
+ after 2000 ->
+ timeout
+ end.
+
+await_disposition(DeliveryTag) ->
+ receive
+ {amqp10_disposition, {accepted, DeliveryTag}} -> ok
+ after 3000 ->
+ flush(),
+ exit(dispostion_timeout)
+ end.
+
+await_link(Who, What, Err) ->
+ receive
+ {amqp10_event, {link, Who, What}} ->
+ ok;
+ {amqp10_event, {link, Who, {detached, Why}}} ->
+ exit(Why)
+ after 5000 ->
+ flush(),
+ exit(Err)
+ end.
+
+to_bin(X) when is_list(X) ->
+ list_to_binary(X).
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml b/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml
new file mode 100644
index 0000000000..2b9d37ed18
--- /dev/null
+++ b/deps/amqp10_client/test/system_SUITE_data/conf/activemq.xml
@@ -0,0 +1,135 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- START SNIPPET: example -->
+<beans
+ xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd
+ http://activemq.apache.org/schema/core https://activemq.apache.org/schema/core/activemq-core.xsd">
+
+ <!-- Allows us to use system properties as variables in this configuration file -->
+ <bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
+ <property name="locations">
+ <value>file:${activemq.conf}/credentials.properties</value>
+ </property>
+ </bean>
+
+ <!-- Allows accessing the server log -->
+ <bean id="logQuery" class="io.fabric8.insight.log.log4j.Log4jLogQuery"
+ lazy-init="false" scope="singleton"
+ init-method="start" destroy-method="stop">
+ </bean>
+
+ <!--
+ The <broker> element is used to configure the ActiveMQ broker.
+ -->
+ <broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}">
+
+ <destinationPolicy>
+ <policyMap>
+ <policyEntries>
+ <policyEntry topic=">" >
+ <!-- The constantPendingMessageLimitStrategy is used to prevent
+ slow topic consumers to block producers and affect other consumers
+ by limiting the number of messages that are retained
+ For more information, see:
+
+ https://activemq.apache.org/slow-consumer-handling.html
+
+ -->
+ <pendingMessageLimitStrategy>
+ <constantPendingMessageLimitStrategy limit="1000"/>
+ </pendingMessageLimitStrategy>
+ </policyEntry>
+ </policyEntries>
+ </policyMap>
+ </destinationPolicy>
+
+
+ <!--
+ The managementContext is used to configure how ActiveMQ is exposed in
+ JMX. By default, ActiveMQ uses the MBean server that is started by
+ the JVM. For more information, see:
+
+ https://activemq.apache.org/jmx.html
+ -->
+ <managementContext>
+ <managementContext createConnector="false"/>
+ </managementContext>
+
+ <!--
+ Configure message persistence for the broker. The default persistence
+ mechanism is the KahaDB store (identified by the kahaDB tag).
+ For more information, see:
+
+ https://activemq.apache.org/persistence.html
+ -->
+ <persistenceAdapter>
+ <kahaDB directory="${activemq.data}/kahadb"/>
+ </persistenceAdapter>
+
+
+ <!--
+ The systemUsage controls the maximum amount of space the broker will
+ use before disabling caching and/or slowing down producers. For more information, see:
+ https://activemq.apache.org/producer-flow-control.html
+ -->
+ <systemUsage>
+ <systemUsage>
+ <memoryUsage>
+ <memoryUsage percentOfJvmHeap="70" />
+ </memoryUsage>
+ <storeUsage>
+ <storeUsage limit="100 gb"/>
+ </storeUsage>
+ <tempUsage>
+ <tempUsage limit="50 gb"/>
+ </tempUsage>
+ </systemUsage>
+ </systemUsage>
+
+ <!--
+ The transport connectors expose ActiveMQ over a given protocol to
+ clients and other brokers. For more information, see:
+
+ https://activemq.apache.org/configuring-transports.html
+ -->
+ <transportConnectors>
+ <!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
+ <transportConnector name="amqp" uri="amqp://0.0.0.0:${testsuite.tcp_port_amqp}?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
+ </transportConnectors>
+
+ <!-- destroy the spring context on shutdown to stop jetty -->
+ <shutdownHooks>
+ <bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
+ </shutdownHooks>
+
+ <plugins>
+ <simpleAuthenticationPlugin anonymousAccessAllowed="true">
+ <users>
+ <authenticationUser username="system" password="manager"
+ groups="users,admins"/>
+ <authenticationUser username="user" password="password"
+ groups="users"/>
+ <authenticationUser username="guest" password="password" groups="guests"/>
+ </users>
+ </simpleAuthenticationPlugin>
+ </plugins>
+ </broker>
+
+</beans>
+<!-- END SNIPPET: example -->
diff --git a/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml b/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml
new file mode 100644
index 0000000000..2e489fa2b9
--- /dev/null
+++ b/deps/amqp10_client/test/system_SUITE_data/conf/activemq_no_anon.xml
@@ -0,0 +1,135 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- START SNIPPET: example -->
+<beans
+ xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd
+ http://activemq.apache.org/schema/core https://activemq.apache.org/schema/core/activemq-core.xsd">
+
+ <!-- Allows us to use system properties as variables in this configuration file -->
+ <bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
+ <property name="locations">
+ <value>file:${activemq.conf}/credentials.properties</value>
+ </property>
+ </bean>
+
+ <!-- Allows accessing the server log -->
+ <bean id="logQuery" class="io.fabric8.insight.log.log4j.Log4jLogQuery"
+ lazy-init="false" scope="singleton"
+ init-method="start" destroy-method="stop">
+ </bean>
+
+ <!--
+ The <broker> element is used to configure the ActiveMQ broker.
+ -->
+ <broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}">
+
+ <destinationPolicy>
+ <policyMap>
+ <policyEntries>
+ <policyEntry topic=">" >
+ <!-- The constantPendingMessageLimitStrategy is used to prevent
+ slow topic consumers to block producers and affect other consumers
+ by limiting the number of messages that are retained
+ For more information, see:
+
+ https://activemq.apache.org/slow-consumer-handling.html
+
+ -->
+ <pendingMessageLimitStrategy>
+ <constantPendingMessageLimitStrategy limit="1000"/>
+ </pendingMessageLimitStrategy>
+ </policyEntry>
+ </policyEntries>
+ </policyMap>
+ </destinationPolicy>
+
+
+ <!--
+ The managementContext is used to configure how ActiveMQ is exposed in
+ JMX. By default, ActiveMQ uses the MBean server that is started by
+ the JVM. For more information, see:
+
+ https://activemq.apache.org/jmx.html
+ -->
+ <managementContext>
+ <managementContext createConnector="false"/>
+ </managementContext>
+
+ <!--
+ Configure message persistence for the broker. The default persistence
+ mechanism is the KahaDB store (identified by the kahaDB tag).
+ For more information, see:
+
+ https://activemq.apache.org/persistence.html
+ -->
+ <persistenceAdapter>
+ <kahaDB directory="${activemq.data}/kahadb"/>
+ </persistenceAdapter>
+
+
+ <!--
+ The systemUsage controls the maximum amount of space the broker will
+ use before disabling caching and/or slowing down producers. For more information, see:
+ https://activemq.apache.org/producer-flow-control.html
+ -->
+ <systemUsage>
+ <systemUsage>
+ <memoryUsage>
+ <memoryUsage percentOfJvmHeap="70" />
+ </memoryUsage>
+ <storeUsage>
+ <storeUsage limit="100 gb"/>
+ </storeUsage>
+ <tempUsage>
+ <tempUsage limit="50 gb"/>
+ </tempUsage>
+ </systemUsage>
+ </systemUsage>
+
+ <!--
+ The transport connectors expose ActiveMQ over a given protocol to
+ clients and other brokers. For more information, see:
+
+ https://activemq.apache.org/configuring-transports.html
+ -->
+ <transportConnectors>
+ <!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
+ <transportConnector name="amqp" uri="amqp://0.0.0.0:${testsuite.tcp_port_amqp}?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
+ </transportConnectors>
+
+ <!-- destroy the spring context on shutdown to stop jetty -->
+ <shutdownHooks>
+ <bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
+ </shutdownHooks>
+
+ <plugins>
+ <simpleAuthenticationPlugin>
+ <users>
+ <authenticationUser username="system" password="manager"
+ groups="users,admins"/>
+ <authenticationUser username="user" password="password"
+ groups="users"/>
+ <authenticationUser username="guest" password="password" groups="guests"/>
+ </users>
+ </simpleAuthenticationPlugin>
+ </plugins>
+ </broker>
+
+</beans>
+<!-- END SNIPPET: example -->
diff --git a/deps/amqp10_common/.gitignore b/deps/amqp10_common/.gitignore
new file mode 100644
index 0000000000..22c3cc3980
--- /dev/null
+++ b/deps/amqp10_common/.gitignore
@@ -0,0 +1,29 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/ct.cover.spec
+/xrefr
+
+/amqp10_common.d
+/*.plt
+
+# Generated source files.
+/include/amqp10_framing.hrl
+/src/amqp10_framing0.erl
diff --git a/deps/amqp10_common/.travis.yml b/deps/amqp10_common/.travis.yml
new file mode 100644
index 0000000000..cd3781fa57
--- /dev/null
+++ b/deps/amqp10_common/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: mBGK2Yw6xJtIRD0V3JU9iwA8fkbkwcRO9i5ocKMi6g2/ub3cWylvJJCX1ZnLR4EEB3xtP60+XCl40g6V+qM4GEDqoT4nE8neEyajbhmKIFVuYnU29sC0czFuj9TpQ2gJyW//KhNX+sz8IpL78iQiMy+d1mK7sNWE8l1lCbcFeddu+Zr5BQ2YXJYP+NGLjkkpL59gYiXP9y5pfHbkoAAN+tS1/boCf8l29y6p83KMgvT/PL7vJXSwDgo48mfymipZOBEgsCFmoC6/QkrpsCrLAMiRGQB2l8uT2YOwF5BsioxSOggYkr3yJ7cOtoYPzB+AJQwxuiOYtDyga6qxFJKghW4wunXuGXHT+/sYKsJl/rMdbFpszAPNCIeXXfrxOAPNTq9IhyIKrqQRHKXGS95fySWTZ2AahpUKDcrw1zmWiS5Y35y87nNYYDQYQUFcuLMbADI5H+WAyZOidIoJwNY35R5CKMPG1zayxalgwBlbAbq/uLOhznoJ9KgoFzNwG7Ms7ND1Bpvb11L342N84vBqt7uvW9LPZ+2Mm82PeypBCAQOzPchk4XPjJxEHaSbcnZvexFOirOD9Xe9gyQFTVl2U7dnNG95Rlm2UP/r0MfuvIstw/WZ3d1sQNRlgesUaWGbWARmH1GmRelxLLz71MUqEz6VIHphjjycPwooc3q1pyU=
+ - secure: RJ5MM2YJB3tidhtbJAQ0EPOfiGzCBMcxwm7aVYPig5bQHzgLLM323GgMYG0i/UTDqdxAilUSMSCq115Bq9e0xdrWzn3ogn+Tg9BCZ1aWB8nq+HOoGKTGsFW2ZAyNNsgMoZG2444TBF5oAFKx+GpeN10lE/mThxuDckwePjWK/8jcwWiw1G6u9dwYqKkRiW0wVqGhQtB9V7UQXVY1ENp/EPxarssjcJt5DH+30WtVsQNrHi3qIBG8D9EHfVcZSK2gaaurz3ef7iaXg6UqMQZSnz2p+r7xahy2bYGiEcbWZZIGMyIRzdH6eTe3tpkkN3rZBBKnR+UM3Vd0p59D+CSWnicO01JLinDQgkfYAxy1DlC2IGzVmtMlOmz1wxZZwVajII9ySG/YvE2SXX1T3nBDNpcPAldGVc2qTyQdJMEA3Hrr/aMqePp352y0aLk44i3xJTKoNMZh3xitZLWGBS1XcNA3fk41THFzcOPuK39Kx86yTltigZvKiQ6HBUS1W5O3rE98IR95S65xDpxRcHm9Rerv4SBkUZkrEuNIW1aTSyX5pV6qdw4vXcfprI0gz2FVXCWmapL4yWbKn9STNH4bLD3JhJpWjzp3eqp78M0rvY5gtLRnwO6JIbxAc1oNTB/UrR2xIcDJgwoslVqJaK4bWOCzmNztkPsZ/7VCyPIiGQg=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/amqp10_common/CODE_OF_CONDUCT.md b/deps/amqp10_common/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..1f6ef1c576
--- /dev/null
+++ b/deps/amqp10_common/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/amqp10_common/CONTRIBUTING.md b/deps/amqp10_common/CONTRIBUTING.md
new file mode 100644
index 0000000000..45bbcbe62e
--- /dev/null
+++ b/deps/amqp10_common/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/amqp10_common/LICENSE b/deps/amqp10_common/LICENSE
new file mode 100644
index 0000000000..670ebad1b2
--- /dev/null
+++ b/deps/amqp10_common/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ AMQP 1.0 commons library, is licensed under the MPL 2.0. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/amqp10_common/LICENSE-MPL-RabbitMQ b/deps/amqp10_common/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/amqp10_common/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/amqp10_common/Makefile b/deps/amqp10_common/Makefile
new file mode 100644
index 0000000000..291a129f88
--- /dev/null
+++ b/deps/amqp10_common/Makefile
@@ -0,0 +1,34 @@
+PROJECT = amqp10_common
+PROJECT_DESCRIPTION = Modules shared by rabbitmq-amqp1.0 and rabbitmq-amqp1.0-client
+
+DIALYZER_OPTS += --src -r test
+
+BUILD_DEPS = rabbit_common
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-macros.mk \
+ rabbit_common/mk/rabbitmq-hexpm.mk \
+ rabbit_common/mk/rabbitmq-dist.mk \
+ rabbit_common/mk/rabbitmq-test.mk \
+ rabbit_common/mk/rabbitmq-tools.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+# Variables and recipes in development.*.mk are meant to be used from
+# any Git clone. They are excluded from the files published to Hex.pm.
+# Generated files are published to Hex.pm however so people using this
+# source won't have to depend on Python and rabbitmq-codegen.
+#
+# That's why those Makefiles are included with `-include`: we ignore any
+# inclusion errors.
+
+-include development.pre.mk
+
+include rabbitmq-components.mk
+include erlang.mk
+
+-include development.post.mk
diff --git a/deps/amqp10_common/codegen.py b/deps/amqp10_common/codegen.py
new file mode 100755
index 0000000000..dc4480a181
--- /dev/null
+++ b/deps/amqp10_common/codegen.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import sys
+import os
+import re
+from xml.dom.minidom import parse
+
+def safe(str):
+ return str.replace('-', '_')
+
+class AMQPType:
+ def __init__(self, dom):
+ self.name = safe(dom.getAttribute('name'))
+ self.source = dom.getAttribute('source')
+ self.desc = dom.getElementsByTagName('descriptor')[0].getAttribute('name')
+ self.code = dom.getElementsByTagName('descriptor')[0].getAttribute('code')
+ self.number = parse_code(self.code)
+ self.fields = [safe(el.getAttribute('name')) for el in
+ dom.getElementsByTagName('field')]
+ # These are 'restricted' types, rather than composite, so they
+ # do not have defined fields.
+ if self.desc in ['amqp:data:binary', 'amqp:amqp-sequence:list',
+ 'amqp:amqp-value:*', 'amqp:application-properties:map',
+ 'amqp:delivery-annotations:map',
+ 'amqp:message-annotations:map', 'amqp:footer:map']:
+ self.fields = ['content']
+
+ def define(self):
+ return ('SYMBOL_%s' % self.name.upper(), self.desc)
+
+class AMQPDefines:
+ def __init__(self, dom):
+ self.name = safe(dom.getAttribute('name'))
+ self.source = dom.getAttribute('source')
+ self.options = [(self.name.upper() + '_' +
+ (safe(el.getAttribute('name')).upper()),
+ el.getAttribute('value')) for el in
+ dom.getElementsByTagName('choice')]
+
+def print_erl(types):
+ print("""-module(amqp10_framing0).
+-export([record_for/1, fields/1, encode/1, symbol_for/1, number_for/1]).
+-include("amqp10_framing.hrl").""")
+ for t in types:
+ print("""record_for({symbol, <<"%s">>}) ->
+ #'v1_0.%s'{};""" % (t.desc, t.name))
+ if t.code:
+ print("""record_for({_, %d}) ->
+ #'v1_0.%s'{};""" % (t.number, t.name))
+ print("%% %s\n" % t.code)
+
+ print("""record_for(Other) -> exit({unknown, Other}).
+
+""")
+ for t in types:
+ print("""fields(#'v1_0.%s'{}) -> record_info(fields, 'v1_0.%s');""" % (t.name, t.name))
+ print("""fields(_Other) -> unknown.
+
+""")
+ for t in types:
+ print("""encode(Frame = #'v1_0.%s'{}) ->
+ amqp10_framing:encode_described('%s', %s, Frame);""" % (t.name, t.source, t.number))
+ print("""encode(undefined) -> null;
+encode(Other) -> Other.
+
+""")
+ for t in types:
+ print("""symbol_for(#'v1_0.%s'{}) ->
+ {symbol, <<"%s">>};""" % (t.name, t.desc))
+ print("""symbol_for(Other) -> exit({unknown, Other}).
+
+""")
+ for t in types:
+ print("""number_for(#'v1_0.%s'{}) ->
+ {ulong, %s};""" % (t.name, t.number))
+ print("""number_for(Other) -> exit({unknown, Other}).""")
+
+def print_hrl(types, defines):
+ for t in types:
+ print("""-record('v1_0.%s', {%s}).""" % (t.name, ", ".join(t.fields)))
+ print_define(t.define(), 'symbol')
+ for d in defines:
+ if len(d.options) > 0:
+ print(""" %% %s""" % (d.name))
+ for opt in d.options:
+ print_define(opt, d.source)
+ print("""
+-define(DESCRIBED, 0:8).
+-define(DESCRIBED_BIN, <<?DESCRIBED>>).
+""")
+
+
+def print_define(opt, source):
+ (name, value) = opt
+ if source == 'symbol':
+ quoted = '<<"%s">>' % value
+ else:
+ quoted = value
+ print("""-define(V_1_0_%s, {%s, %s}).""" % (name, source, quoted))
+
+def want_type(el):
+ descriptors = el.getElementsByTagName('descriptor')
+ return len(descriptors) > 0
+
+def want_define(el):
+ klass = el.getAttribute('class')
+ return klass == 'restricted'
+
+def parse_code(code):
+ res = re.match('0x([0-9a-fA-F]{8,8}):0x([0-9a-fA-F]{8,8})', code)
+ return res and int(res.group(1) + res.group(2), 16)
+
+types = []
+defines = []
+mode = sys.argv[1]
+
+for file in sys.argv[2:]:
+ tree = parse(file)
+ types.extend([AMQPType(el) for el in tree.getElementsByTagName('type')
+ if want_type(el)])
+ defines.extend([AMQPDefines(el) for el in tree.getElementsByTagName('type')
+ if want_define(el)])
+
+if mode == 'erl':
+ print_erl(types)
+elif mode == 'hrl':
+ print_hrl(types, defines)
+else:
+ raise "Mode != erl or hrl"
diff --git a/deps/amqp10_common/development.post.mk b/deps/amqp10_common/development.post.mk
new file mode 100644
index 0000000000..3f8301acd1
--- /dev/null
+++ b/deps/amqp10_common/development.post.mk
@@ -0,0 +1,28 @@
+# --------------------------------------------------------------------
+# Framing sources generation.
+# --------------------------------------------------------------------
+
+PYTHON ?= python
+CODEGEN = $(CURDIR)/codegen.py
+CODEGEN_DIR ?= $(DEPS_DIR)/rabbitmq_codegen
+CODEGEN_AMQP = $(CODEGEN_DIR)/amqp_codegen.py
+
+AMQP_SPEC_1_0 = $(CODEGEN_DIR)/amqp-1.0/messaging.xml \
+ $(CODEGEN_DIR)/amqp-1.0/security.xml \
+ $(CODEGEN_DIR)/amqp-1.0/transport.xml \
+ $(CODEGEN_DIR)/amqp-1.0/transactions.xml
+
+include/amqp10_framing.hrl:: $(CODEGEN) $(CODEGEN_AMQP) $(AMQP_SPEC_1_0)
+ $(verbose) mkdir -p $(dir $@)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) hrl $(AMQP_SPEC_1_0) > $@
+
+src/amqp10_framing0.erl:: $(CODEGEN) $(CODEGEN_AMQP) $(AMQP_SPEC_1_0)
+ $(verbose) mkdir -p $(dir $@)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) erl $(AMQP_SPEC_1_0) > $@
+
+clean:: clean-extra-sources
+
+clean-extra-sources:
+ $(gen_verbose) rm -f $(EXTRA_SOURCES)
diff --git a/deps/amqp10_common/development.pre.mk b/deps/amqp10_common/development.pre.mk
new file mode 100644
index 0000000000..5012829e7c
--- /dev/null
+++ b/deps/amqp10_common/development.pre.mk
@@ -0,0 +1,12 @@
+# Variables and recipes in development.*.mk are meant to be used from
+# any Git clone. They are excluded from the files published to Hex.pm.
+# Generated files are published to Hex.pm however so people using this
+# source won't have to depend on Python and rabbitmq-codegen.
+
+BUILD_DEPS += rabbitmq_codegen
+
+EXTRA_SOURCES += include/amqp10_framing.hrl \
+ src/amqp10_framing0.erl
+
+.DEFAULT_GOAL = all
+$(PROJECT).d:: $(EXTRA_SOURCES)
diff --git a/deps/amqp10_common/erlang.mk b/deps/amqp10_common/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/amqp10_common/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/amqp10_common/rabbitmq-components.mk b/deps/amqp10_common/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/amqp10_common/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/amqp10_common/src/amqp10_binary_generator.erl b/deps/amqp10_common/src/amqp10_binary_generator.erl
new file mode 100644
index 0000000000..7585c48d12
--- /dev/null
+++ b/deps/amqp10_common/src/amqp10_binary_generator.erl
@@ -0,0 +1,213 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_binary_generator).
+
+-export([generate/1, build_frame/2, build_frame/3,
+ build_heartbeat_frame/0]).
+
+-include("amqp10_framing.hrl").
+
+-type signed_byte() :: -128 .. 127.
+
+-type amqp10_ctor() :: ubyte | ushort | byte | short | int | uing | float |
+ double |
+ char | timestamp | uuid | utf8 | symbol | binary |
+ list | map | array |
+ {described, amqp10_type(), amqp10_ctor()}.
+
+-type amqp10_prim() ::
+ null |
+ true |
+ false |
+ {boolean, true | false} |
+ {ubyte, byte()} |
+ {ushort, non_neg_integer()} |
+ {uint, non_neg_integer()} |
+ {ulong, non_neg_integer()} |
+ {byte, signed_byte()} |
+ {short, integer()} |
+ {int, integer()} |
+ {float, float()} |
+ {double, float()} |
+ {char, binary()} |
+ {timestamp, integer()} |
+ {uuid, binary()} |
+ {utf8, binary()} |
+ {symbol, binary()} |
+ {binary, binary()} |
+ {list, [amqp10_type()]} |
+ {map, [{amqp10_prim(), amqp10_prim()}]} | %% TODO: make map a map
+ {array, amqp10_ctor(), [amqp10_type()]}.
+
+-type amqp10_described() ::
+ {described, amqp10_type(), amqp10_prim()}.
+
+-type amqp10_type() ::
+ amqp10_prim() | amqp10_described().
+
+-export_type([
+ amqp10_ctor/0,
+ amqp10_type/0,
+ amqp10_described/0
+ ]).
+
+
+-define(AMQP_FRAME_TYPE, 0).
+-define(DOFF, 2).
+-define(VAR_1_LIMIT, 16#FF).
+
+-spec build_frame(integer(), iolist()) -> iolist().
+build_frame(Channel, Payload) ->
+ build_frame(Channel, ?AMQP_FRAME_TYPE, Payload).
+
+build_frame(Channel, FrameType, Payload) ->
+ Size = iolist_size(Payload) + 8, % frame header and no extension
+ [ <<Size:32/unsigned, 2:8, FrameType:8, Channel:16/unsigned>>, Payload ].
+
+build_heartbeat_frame() ->
+ %% length is inclusive
+ <<8:32, ?DOFF:8, ?AMQP_FRAME_TYPE:8, 0:16>>.
+
+-spec generate(amqp10_type()) -> iolist().
+generate({described, Descriptor, Value}) ->
+ DescBin = generate(Descriptor),
+ ValueBin = generate(Value),
+ [ ?DESCRIBED_BIN, DescBin, ValueBin ];
+
+generate(null) -> <<16#40>>;
+generate(true) -> <<16#41>>;
+generate(false) -> <<16#42>>;
+generate({boolean, true}) -> <<16#56, 16#01>>;
+generate({boolean, false}) -> <<16#56, 16#00>>;
+
+%% some integral types have a compact encoding as a byte; this is in
+%% particular for the descriptors of AMQP types, which have the domain
+%% bits set to zero and values < 256.
+generate({ubyte, V}) -> <<16#50,V:8/unsigned>>;
+generate({ushort, V}) -> <<16#60,V:16/unsigned>>;
+generate({uint, V}) when V =:= 0 -> <<16#43>>;
+generate({uint, V}) when V < 256 -> <<16#52,V:8/unsigned>>;
+generate({uint, V}) -> <<16#70,V:32/unsigned>>;
+generate({ulong, V}) when V =:= 0 -> <<16#44>>;
+generate({ulong, V}) when V < 256 -> <<16#53,V:8/unsigned>>;
+generate({ulong, V}) -> <<16#80,V:64/unsigned>>;
+generate({byte, V}) -> <<16#51,V:8/signed>>;
+generate({short, V}) -> <<16#61,V:16/signed>>;
+generate({int, V}) when V<128 andalso V>-129 -> <<16#54,V:8/signed>>;
+generate({int, V}) -> <<16#71,V:32/signed>>;
+generate({long, V}) when V<128 andalso V>-129 -> <<16#55,V:8/signed>>;
+generate({long, V}) -> <<16#81,V:64/signed>>;
+generate({float, V}) -> <<16#72,V:32/float>>;
+generate({double, V}) -> <<16#82,V:64/float>>;
+generate({char, V}) -> <<16#73,V:4/binary>>;
+generate({timestamp,V}) -> <<16#83,V:64/signed>>;
+generate({uuid, V}) -> <<16#98,V:16/binary>>;
+
+generate({utf8, V}) when size(V) < ?VAR_1_LIMIT -> [<<16#a1,(size(V)):8>>, V];
+generate({utf8, V}) -> [<<16#b1,(size(V)):32>>, V];
+generate({symbol, V}) -> [<<16#a3,(size(V)):8>>, V];
+generate({binary, V}) ->
+ Size = iolist_size(V),
+ if Size < ?VAR_1_LIMIT -> [<<16#a0,Size:8>>, V];
+ true -> [<<16#b0,Size:32>>, V]
+ end;
+
+generate({list, []}) ->
+ <<16#45>>;
+generate({list, List}) ->
+ Count = length(List),
+ Compound = lists:map(fun generate/1, List),
+ S = iolist_size(Compound),
+ %% If the list contains less than (256 - 1) elements and if the
+ %% encoded size (including the encoding of "Count", thus S + 1
+ %% in the test) is less than 256 bytes, we use the short form.
+ %% Otherwise, we use the large form.
+ if Count >= (256 - 1) orelse (S + 1) >= 256 ->
+ [<<16#d0, (S + 4):32/unsigned, Count:32/unsigned>>, Compound];
+ true ->
+ [<<16#c0, (S + 1):8/unsigned, Count:8/unsigned>>, Compound]
+ end;
+
+generate({map, ListOfPairs}) ->
+ Count = length(ListOfPairs) * 2,
+ Compound = lists:map(fun ({Key, Val}) ->
+ [(generate(Key)),
+ (generate(Val))]
+ end, ListOfPairs),
+ S = iolist_size(Compound),
+ %% See generate({list, ...}) for an explanation of this test.
+ if Count >= (256 - 1) orelse (S + 1) >= 256 ->
+ [<<16#d1, (S + 4):32, Count:32>>, Compound];
+ true ->
+ [<<16#c1, (S + 1):8, Count:8>>, Compound]
+ end;
+
+generate({array, Type, List}) ->
+ Count = length(List),
+ Body = iolist_to_binary([constructor(Type),
+ [generate(Type, I) || I <- List]]),
+ S = size(Body),
+ %% See generate({list, ...}) for an explanation of this test.
+ if Count >= (256 - 1) orelse (S + 1) >= 256 ->
+ [<<16#f0, (S + 4):32/unsigned, Count:32/unsigned>>, Body];
+ true ->
+ [<<16#e0, (S + 1):8/unsigned, Count:8/unsigned>>, Body]
+ end;
+
+generate({as_is, TypeCode, Bin}) ->
+ <<TypeCode, Bin>>.
+
+%% TODO again these are a stub to get SASL working. New codec? Will
+%% that ever happen? If not we really just need to split generate/1
+%% up into things like these...
+%% for these constructors map straight-forwardly
+constructor(symbol) -> <<16#b3>>;
+constructor(ubyte) -> <<16#50>>;
+constructor(ushort) -> <<16#60>>;
+constructor(short) -> <<16#61>>;
+constructor(uint) -> <<16#70>>;
+constructor(ulong) -> <<16#80>>;
+constructor(byte) -> <<16#51>>;
+constructor(int) -> <<16#71>>;
+constructor(long) -> <<16#81>>;
+constructor(float) -> <<16#72>>;
+constructor(double) -> <<16#82>>;
+constructor(char) -> <<16#73>>;
+constructor(timestamp) -> <<16#83>>;
+constructor(uuid) -> <<16#98>>;
+constructor(null) -> <<16#40>>;
+constructor(boolean) -> <<16#56>>;
+constructor(array) -> <<16#f0>>; % use large array type for all nested arrays
+constructor(utf8) -> <<16#b1>>;
+constructor({described, Descriptor, Primitive}) ->
+ [<<16#00>>, generate(Descriptor), constructor(Primitive)].
+
+% returns io_list
+generate(symbol, {symbol, V}) -> [<<(size(V)):32>>, V];
+generate(utf8, {utf8, V}) -> [<<(size(V)):32>>, V];
+generate(boolean, true) -> <<16#01>>;
+generate(boolean, false) -> <<16#00>>;
+generate(boolean, {boolean, true}) -> <<16#01>>;
+generate(boolean, {boolean, false}) -> <<16#00>>;
+generate(ubyte, {ubyte, V}) -> <<V:8/unsigned>>;
+generate(byte, {byte, V}) -> <<V:8/signed>>;
+generate(ushort, {ushort, V}) -> <<V:16/unsigned>>;
+generate(short, {short, V}) -> <<V:16/signed>>;
+generate(uint, {uint, V}) -> <<V:32/unsigned>>;
+generate(int, {int, V}) -> <<V:32/signed>>;
+generate(ulong, {ulong, V}) -> <<V:64/unsigned>>;
+generate(long, {long, V}) -> <<V:64/signed>>;
+generate({described, D, P}, {described, D, V}) ->
+ generate(P, V);
+generate(array, {array, Type, List}) ->
+ Count = length(List),
+ Body = iolist_to_binary([constructor(Type),
+ [generate(Type, I) || I <- List]]),
+ S = size(Body),
+ %% See generate({list, ...}) for an explanation of this test.
+ [<<(S + 4):32/unsigned, Count:32/unsigned>>, Body].
diff --git a/deps/amqp10_common/src/amqp10_binary_parser.erl b/deps/amqp10_common/src/amqp10_binary_parser.erl
new file mode 100644
index 0000000000..87f78934a0
--- /dev/null
+++ b/deps/amqp10_common/src/amqp10_binary_parser.erl
@@ -0,0 +1,170 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_binary_parser).
+
+-export([parse/1, parse_all/1]).
+
+-include("amqp10_framing.hrl").
+
+% -spec parse(binary()) -> tuple().
+
+parse_all(ValueBin) when is_binary(ValueBin) ->
+ lists:reverse(parse_all([], parse(ValueBin))).
+
+parse_all(Acc, {Value, <<>>}) -> [Value | Acc];
+parse_all(Acc, {Value, Rest}) -> parse_all([Value | Acc], parse(Rest)).
+
+-spec parse(binary()) ->
+ {amqp10_binary_generator:amqp10_type(), binary()}.
+parse(<<?DESCRIBED,Rest/binary>>) ->
+ parse_described(Rest);
+parse(Rest) ->
+ parse_primitive0(Rest).
+
+parse_described(Bin) ->
+ {Descriptor, Rest1} = parse(Bin),
+ {Value, Rest2} = parse(Rest1),
+ {{described, Descriptor, Value}, Rest2}.
+
+parse_primitive0(<<Type,Rest/binary>>) ->
+ parse_primitive(Type, Rest).
+
+%% Constants
+parse_primitive(16#40, Rest) -> {null, Rest};
+parse_primitive(16#41, Rest) -> {true, Rest};
+parse_primitive(16#42, Rest) -> {false, Rest};
+parse_primitive(16#43, Rest) -> {{uint, 0}, Rest};
+parse_primitive(16#44, Rest) -> {{ulong, 0}, Rest};
+
+%% Fixed-widths. Most integral types have a compact encoding as a byte.
+parse_primitive(16#50, <<V:8/unsigned, R/binary>>) -> {{ubyte, V}, R};
+parse_primitive(16#51, <<V:8/signed, R/binary>>) -> {{byte, V}, R};
+parse_primitive(16#52, <<V:8/unsigned, R/binary>>) -> {{uint, V}, R};
+parse_primitive(16#53, <<V:8/unsigned, R/binary>>) -> {{ulong, V}, R};
+parse_primitive(16#54, <<V:8/signed, R/binary>>) -> {{int, V}, R};
+parse_primitive(16#55, <<V:8/signed, R/binary>>) -> {{long, V}, R};
+parse_primitive(16#56, <<0:8/unsigned, R/binary>>) -> {{boolean, false},R};
+parse_primitive(16#56, <<1:8/unsigned, R/binary>>) -> {{boolean, true}, R};
+parse_primitive(16#60, <<V:16/unsigned, R/binary>>) -> {{ushort, V}, R};
+parse_primitive(16#61, <<V:16/signed, R/binary>>) -> {{short, V}, R};
+parse_primitive(16#70, <<V:32/unsigned, R/binary>>) -> {{uint, V}, R};
+parse_primitive(16#71, <<V:32/signed, R/binary>>) -> {{int, V}, R};
+parse_primitive(16#72, <<V:32/float, R/binary>>) -> {{float, V}, R};
+parse_primitive(16#73, <<Utf32:4/binary,R/binary>>) -> {{char, Utf32}, R};
+parse_primitive(16#80, <<V:64/unsigned, R/binary>>) -> {{ulong, V}, R};
+parse_primitive(16#81, <<V:64/signed, R/binary>>) -> {{long, V}, R};
+parse_primitive(16#82, <<V:64/float, R/binary>>) -> {{double, V}, R};
+parse_primitive(16#83, <<TS:64/signed, R/binary>>) -> {{timestamp, TS}, R};
+parse_primitive(16#98, <<Uuid:16/binary,R/binary>>) -> {{uuid, Uuid}, R};
+
+%% Variable-widths
+parse_primitive(16#a0,<<S:8/unsigned, V:S/binary,R/binary>>)-> {{binary, V}, R};
+parse_primitive(16#a1,<<S:8/unsigned, V:S/binary,R/binary>>)-> {{utf8, V}, R};
+parse_primitive(16#a3,<<S:8/unsigned, V:S/binary,R/binary>>)-> {{symbol, V}, R};
+parse_primitive(16#b3,<<S:32/unsigned,V:S/binary,R/binary>>)-> {{symbol, V}, R};
+parse_primitive(16#b0,<<S:32/unsigned,V:S/binary,R/binary>>)-> {{binary, V}, R};
+parse_primitive(16#b1,<<S:32/unsigned,V:S/binary,R/binary>>)-> {{utf8, V}, R};
+
+%% Compounds
+parse_primitive(16#45, R) ->
+ {{list, []}, R};
+parse_primitive(16#c0,<<S:8/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ {{list, parse_compound(8, CountAndValue)}, R};
+parse_primitive(16#c1,<<S:8/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ List = parse_compound(8, CountAndValue),
+ {{map, mapify(List)}, R};
+parse_primitive(16#d0,<<S:32/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ {{list, parse_compound(32, CountAndValue)}, R};
+parse_primitive(16#d1,<<S:32/unsigned,CountAndValue:S/binary,R/binary>>) ->
+ List = parse_compound(32, CountAndValue),
+ {{map, mapify(List)}, R};
+
+%% Arrays
+parse_primitive(16#e0,<<S:8/unsigned,CountAndV:S/binary,R/binary>>) ->
+ {parse_array(8, CountAndV), R};
+parse_primitive(16#f0,<<S:32/unsigned,CountAndV:S/binary,R/binary>>) ->
+ {parse_array(32, CountAndV), R};
+
+%% NaN or +-inf
+parse_primitive(16#72, <<V:32, R/binary>>) ->
+ {{as_is, 16#72, <<V:32>>}, R};
+parse_primitive(16#82, <<V:64, R/binary>>) ->
+ {{as_is, 16#82, <<V:64>>}, R};
+
+%% decimals
+parse_primitive(16#74, <<V:32, R/binary>>) ->
+ {{as_is, 16#74, <<V:32>>}, R};
+parse_primitive(16#84, <<V:64, R/binary>>) ->
+ {{as_is, 16#84, <<V:64>>}, R};
+parse_primitive(16#94, <<V:128, R/binary>>) ->
+ {{as_is, 16#94, <<V:128>>}, R};
+
+parse_primitive(Type, _Bin) ->
+ throw({primitive_type_unsupported, Type, _Bin}).
+
+parse_compound(UnitSize, Bin) ->
+ <<Count:UnitSize, Bin1/binary>> = Bin,
+ parse_compound1(Count, Bin1, []).
+
+parse_compound1(0, <<>>, List) ->
+ lists:reverse(List);
+parse_compound1(_Left, <<>>, List) ->
+ case application:get_env(rabbitmq_amqp1_0, protocol_strict_mode) of
+ {ok, false} -> lists:reverse(List); %% ignore miscount
+ {ok, true} -> throw(compound_datatype_miscount)
+ end;
+parse_compound1(Count, Bin, Acc) ->
+ {Value, Rest} = parse(Bin),
+ parse_compound1(Count - 1, Rest, [Value | Acc]).
+
+%% array structure is {array, Ctor, [Data]}
+%% e.g. {array, symbol, [<<"amqp:accepted:list">>]}
+parse_array(UnitSize, Bin) ->
+ <<Count:UnitSize, Bin1/binary>> = Bin,
+ parse_array1(Count, Bin1).
+
+parse_array1(Count, <<?DESCRIBED, Rest/binary>>) ->
+ {Descriptor, Rest1} = parse(Rest),
+ {array, Type, List} = parse_array1(Count, Rest1),
+ Values = lists:map(fun (Value) ->
+ {described, Descriptor, Value}
+ end, List),
+ % this format cannot represent an empty array of described types
+ {array, {described, Descriptor, Type}, Values};
+parse_array1(Count, <<Type, ArrayBin/binary>>) ->
+ parse_array2(Count, Type, ArrayBin, []).
+
+parse_array2(0, Type, <<>>, Acc) ->
+ {array, parse_constructor(Type), lists:reverse(Acc)};
+parse_array2(Count, Type, Bin, Acc) ->
+ {Value, Rest} = parse_primitive(Type, Bin),
+ parse_array2(Count - 1, Type, Rest, [Value | Acc]).
+
+parse_constructor(16#a3) -> symbol;
+parse_constructor(16#b3) -> symbol;
+parse_constructor(16#a1) -> utf8;
+parse_constructor(16#b1) -> utf8;
+parse_constructor(16#50) -> ubyte;
+parse_constructor(16#51) -> byte;
+parse_constructor(16#60) -> ushort;
+parse_constructor(16#61) -> short;
+parse_constructor(16#70) -> uint;
+parse_constructor(16#71) -> int;
+parse_constructor(16#80) -> ulong;
+parse_constructor(16#81) -> long;
+parse_constructor(16#40) -> null;
+parse_constructor(16#56) -> boolean;
+parse_constructor(16#f0) -> array;
+parse_constructor(0) -> described;
+parse_constructor(X) ->
+ exit({failed_to_parse_constructor, X}).
+
+mapify([]) ->
+ [];
+mapify([Key, Value | Rest]) ->
+ [{Key, Value} | mapify(Rest)].
diff --git a/deps/amqp10_common/src/amqp10_framing.erl b/deps/amqp10_common/src/amqp10_framing.erl
new file mode 100644
index 0000000000..f96cf8b602
--- /dev/null
+++ b/deps/amqp10_common/src/amqp10_framing.erl
@@ -0,0 +1,217 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_framing).
+
+-export([encode/1, encode_described/3, decode/1, version/0,
+ symbol_for/1, number_for/1, encode_bin/1, decode_bin/1, pprint/1]).
+
+%% debug
+-export([fill_from_list/2, fill_from_map/2]).
+
+-include("amqp10_framing.hrl").
+
+-type amqp10_frame() :: #'v1_0.header'{} |
+#'v1_0.delivery_annotations'{} |
+#'v1_0.message_annotations'{} |
+#'v1_0.properties'{} |
+#'v1_0.application_properties'{} |
+#'v1_0.data'{} |
+#'v1_0.amqp_sequence'{} |
+#'v1_0.amqp_value'{} |
+#'v1_0.footer'{} |
+#'v1_0.received'{} |
+#'v1_0.accepted'{} |
+#'v1_0.rejected'{} |
+#'v1_0.released'{} |
+#'v1_0.modified'{} |
+#'v1_0.source'{} |
+#'v1_0.target'{} |
+#'v1_0.delete_on_close'{} |
+#'v1_0.delete_on_no_links'{} |
+#'v1_0.delete_on_no_messages'{} |
+#'v1_0.delete_on_no_links_or_messages'{} |
+#'v1_0.sasl_mechanisms'{} |
+#'v1_0.sasl_init'{} |
+#'v1_0.sasl_challenge'{} |
+#'v1_0.sasl_response'{} |
+#'v1_0.sasl_outcome'{} |
+#'v1_0.attach'{} |
+#'v1_0.flow'{} |
+#'v1_0.transfer'{} |
+#'v1_0.disposition'{} |
+#'v1_0.detach'{} |
+#'v1_0.end'{} |
+#'v1_0.close'{} |
+#'v1_0.error'{} |
+#'v1_0.coordinator'{} |
+#'v1_0.declare'{} |
+#'v1_0.discharge'{} |
+#'v1_0.declared'{} |
+#'v1_0.transactional_state'{}.
+
+version() ->
+ {1, 0, 0}.
+
+%% These are essentially in lieu of code generation ..
+
+fill_from_list(Record, Fields) ->
+ {Res, _} = lists:foldl(
+ fun (Field, {Record1, Num}) ->
+ DecodedField = decode(Field),
+ {setelement(Num, Record1, DecodedField),
+ Num + 1}
+ end,
+ {Record, 2}, Fields),
+ Res.
+
+fill_from_map(Record, Fields) ->
+ {Res, _} = lists:foldl(
+ fun (Key, {Record1, Num}) ->
+ case proplists:get_value(Key, Fields) of
+ undefined ->
+ {Record1, Num+1};
+ Value ->
+ {setelement(Num, Record1, decode(Value)), Num+1}
+ end
+ end,
+ {Record, 2}, keys(Record)),
+ Res.
+
+fill_from(F = #'v1_0.data'{}, Field) ->
+ F#'v1_0.data'{content = Field};
+fill_from(F = #'v1_0.amqp_value'{}, Field) ->
+ F#'v1_0.amqp_value'{content = Field}.
+
+keys(Record) ->
+ [{symbol, symbolify(K)} || K <- amqp10_framing0:fields(Record)].
+
+symbolify(FieldName) when is_atom(FieldName) ->
+ re:replace(atom_to_list(FieldName), "_", "-", [{return,binary}, global]).
+
+%% TODO: in fields of composite types with multiple=true, "a null
+%% value and a zero-length array (with a correct type for its
+%% elements) both describe an absence of a value and should be treated
+%% as semantically identical." (see section 1.3)
+
+%% A sequence comes as an arbitrary list of values; it's not a
+%% composite type.
+decode({described, Descriptor, {list, Fields}}) ->
+ case amqp10_framing0:record_for(Descriptor) of
+ #'v1_0.amqp_sequence'{} ->
+ #'v1_0.amqp_sequence'{content = [decode(F) || F <- Fields]};
+ Else ->
+ fill_from_list(Else, Fields)
+ end;
+decode({described, Descriptor, {map, Fields}}) ->
+ case amqp10_framing0:record_for(Descriptor) of
+ #'v1_0.application_properties'{} ->
+ #'v1_0.application_properties'{content = decode_map(Fields)};
+ #'v1_0.delivery_annotations'{} ->
+ #'v1_0.delivery_annotations'{content = decode_map(Fields)};
+ #'v1_0.message_annotations'{} ->
+ #'v1_0.message_annotations'{content = decode_map(Fields)};
+ #'v1_0.footer'{} ->
+ #'v1_0.footer'{content = decode_map(Fields)};
+ Else ->
+ fill_from_map(Else, Fields)
+ end;
+decode({described, Descriptor, {binary, Field}}) ->
+ case amqp10_framing0:record_for(Descriptor) of
+ #'v1_0.amqp_value'{} ->
+ #'v1_0.amqp_value'{content = {binary, Field}};
+ #'v1_0.data'{} ->
+ #'v1_0.data'{content = Field}
+ end;
+decode({described, Descriptor, Field}) ->
+ fill_from(amqp10_framing0:record_for(Descriptor), Field);
+decode(null) ->
+ undefined;
+decode(Other) ->
+ Other.
+
+decode_map(Fields) ->
+ [{decode(K), decode(V)} || {K, V} <- Fields].
+
+-spec encode_described(list | map | binary | annotations | '*',
+ non_neg_integer(),
+ amqp10_frame()) ->
+ amqp10_binary_generator:amqp10_described().
+encode_described(list, CodeNumber,
+ #'v1_0.amqp_sequence'{content = Content}) ->
+ {described, {ulong, CodeNumber},
+ {list, lists:map(fun encode/1, Content)}};
+encode_described(list, CodeNumber, Frame) ->
+ {described, {ulong, CodeNumber},
+ {list, lists:map(fun encode/1, tl(tuple_to_list(Frame)))}};
+encode_described(map, CodeNumber,
+ #'v1_0.application_properties'{content = Content}) ->
+ {described, {ulong, CodeNumber}, {map, Content}};
+encode_described(map, CodeNumber,
+ #'v1_0.delivery_annotations'{content = Content}) ->
+ {described, {ulong, CodeNumber}, {map, Content}};
+encode_described(map, CodeNumber,
+ #'v1_0.message_annotations'{content = Content}) ->
+ {described, {ulong, CodeNumber}, {map, Content}};
+encode_described(map, CodeNumber, #'v1_0.footer'{content = Content}) ->
+ {described, {ulong, CodeNumber}, {map, Content}};
+encode_described(binary, CodeNumber, #'v1_0.data'{content = Content}) ->
+ {described, {ulong, CodeNumber}, {binary, Content}};
+encode_described('*', CodeNumber, #'v1_0.amqp_value'{content = Content}) ->
+ {described, {ulong, CodeNumber}, Content};
+encode_described(annotations, CodeNumber, Frame) ->
+ encode_described(map, CodeNumber, Frame).
+
+encode(X) ->
+ amqp10_framing0:encode(X).
+
+encode_bin(X) ->
+ amqp10_binary_generator:generate(encode(X)).
+
+
+decode_bin(X) ->
+ [decode(PerfDesc) || PerfDesc <- decode_bin0(X)].
+
+decode_bin0(<<>>) -> [];
+decode_bin0(X) -> {PerfDesc, Rest} = amqp10_binary_parser:parse(X),
+ [PerfDesc | decode_bin0(Rest)].
+
+symbol_for(X) ->
+ amqp10_framing0:symbol_for(X).
+
+number_for(X) ->
+ amqp10_framing0:number_for(X).
+
+pprint(Thing) when is_tuple(Thing) ->
+ case amqp10_framing0:fields(Thing) of
+ unknown -> Thing;
+ Names -> [T|L] = tuple_to_list(Thing),
+ {T, lists:zip(Names, [pprint(I) || I <- L])}
+ end;
+pprint(Other) -> Other.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+encode_decode_test_() ->
+ Data = [{{utf8, <<"k">>}, {binary, <<"v">>}}],
+ Test = fun(M) -> [M] = decode_bin(iolist_to_binary(encode_bin(M))) end,
+ [
+ fun() -> Test(#'v1_0.application_properties'{content = Data}) end,
+ fun() -> Test(#'v1_0.delivery_annotations'{content = Data}) end,
+ fun() -> Test(#'v1_0.message_annotations'{content = Data}) end,
+ fun() -> Test(#'v1_0.footer'{content = Data}) end
+ ].
+
+encode_decode_amqp_sequence_test() ->
+ L = [{utf8, <<"k">>},
+ {binary, <<"v">>}],
+ F = #'v1_0.amqp_sequence'{content = L},
+ [F] = decode_bin(iolist_to_binary(encode_bin(F))),
+ ok.
+
+-endif.
diff --git a/deps/amqp10_common/test/binary_generator_SUITE.erl b/deps/amqp10_common/test/binary_generator_SUITE.erl
new file mode 100644
index 0000000000..773e66aa07
--- /dev/null
+++ b/deps/amqp10_common/test/binary_generator_SUITE.erl
@@ -0,0 +1,167 @@
+-module(binary_generator_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ booleans,
+ symbol,
+ timestamp,
+ numerals,
+ utf8,
+ list,
+ map,
+ described,
+ array
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+booleans(_Config) ->
+ roundtrip(true),
+ roundtrip(false),
+ roundtrip({boolean, false}),
+ roundtrip({boolean, true}),
+ ok.
+
+symbol(_Config) ->
+ roundtrip({symbol, <<"SYMB">>}),
+ ok.
+
+timestamp(_Config) ->
+ roundtrip({timestamp, erlang:system_time(millisecond)}),
+ ok.
+numerals(_Config) ->
+ roundtrip({ubyte, 0}),
+ roundtrip({ubyte, 16#FF}),
+ roundtrip({ushort, 0}),
+ roundtrip({ushort, 16#FFFF}),
+ roundtrip({uint, 0}),
+ roundtrip({uint, 16#FFFFFFFF}),
+ roundtrip({ulong, 0}),
+ roundtrip({ulong, 16#FFFFFFFFFFFFFFFF}),
+ roundtrip({byte, 0}),
+ roundtrip({byte, 16#7F}),
+ roundtrip({byte, -16#80}),
+ roundtrip({short, 0}),
+ roundtrip({short, 16#7FFF}),
+ roundtrip({short, -16#8000}),
+ roundtrip({int, 0}),
+ roundtrip({int, 16#7FFFFFFF}),
+ roundtrip({int, -16#80000000}),
+ roundtrip({long, 0}),
+ roundtrip({long, 16#7FFFFFFFFFFFFFFF}),
+ roundtrip({long, -16#8000000000000000}),
+ roundtrip({float, 0.0}),
+ roundtrip({float, 1.0}),
+ roundtrip({float, -1.0}),
+ roundtrip({double, 0.0}),
+ roundtrip({double, 1.0}),
+ roundtrip({double, -1.0}),
+ ok.
+
+utf8(_Config) ->
+ roundtrip({utf8, <<"hi">>}),
+ roundtrip({utf8, binary:copy(<<"asdfghjk">>, 64)}),
+ ok.
+
+list(_Config) ->
+ roundtrip({list, [{utf8, <<"hi">>},
+ {int, 123},
+ {binary, <<"data">>},
+ {array, int, [{int, 1}, {int, 2}, {int, 3}]},
+ {described,
+ {utf8, <<"URL">>},
+ {utf8, <<"http://example.org/hello-world">>}}
+ ]}),
+ ok.
+
+map(_Config) ->
+ roundtrip({map, [
+ {{utf8, <<"key1">>}, {utf8, <<"value1">>}},
+ {{utf8, <<"key2">>}, {int, 33}}
+ ]}),
+ ok.
+
+
+
+described(_Config) ->
+ roundtrip({described,
+ {utf8, <<"URL">>},
+ {utf8, <<"http://example.org/hello-world">>}}),
+ ok.
+
+array(_Config) ->
+ roundtrip({array, symbol, [{symbol, <<"ANONYMOUS">>}]}),
+ roundtrip({array, symbol, []}),
+ roundtrip({array, ubyte, [{ubyte, 1}, {ubyte, 255}]}),
+ roundtrip({array, byte, [{byte, 1}, {byte, -128}, {byte, 127}]}),
+ roundtrip({array, ushort, [{ushort, 0}, {ushort, 16#FFFF}]}),
+ roundtrip({array, short, [{short, 0}, {short, -16#8000},
+ {short, 16#7FFF}]}),
+ % uint
+ roundtrip({array, uint, [{uint, 0}, {uint, 16#FFFFFFFF}]}),
+ roundtrip({array, int, [{int, 0}, {int, -16#8000000},
+ {int, 16#7FFFFFFF}]}),
+ roundtrip({array, ulong, [{ulong, 0}, {ulong, 16#FFFFFFFFFFFFFFFF}]}),
+ roundtrip({array, long, [{long, 0}, {long, -16#8000000000000},
+ {long, 16#7FFFFFFFFFFFFF}]}),
+ roundtrip({array, boolean, [{boolean, true}, {boolean, false}]}),
+ % array of arrays
+ % TODO: does the inner type need to be consistent across the array?
+ roundtrip({array, array, []}),
+ roundtrip({array, array, [{array, symbol, [{symbol, <<"ANONYMOUS">>}]}]}),
+
+ Desc = {utf8, <<"URL">>},
+ roundtrip({array, {described, Desc, utf8},
+ [{described, Desc, {utf8, <<"http://example.org/hello">>}}]}),
+ roundtrip({array, {described, Desc, utf8}, []}),
+ ok.
+
+%% Utility
+
+roundtrip(Term) ->
+ Bin = iolist_to_binary(amqp10_binary_generator:generate(Term)),
+ % generate returns an iolist but parse expects a binary
+ ?assertMatch({Term, _}, amqp10_binary_parser:parse(Bin)).
diff --git a/deps/amqp_client/.gitignore b/deps/amqp_client/.gitignore
new file mode 100644
index 0000000000..394996242c
--- /dev/null
+++ b/deps/amqp_client/.gitignore
@@ -0,0 +1,22 @@
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/.erlang.mk/
+/_build/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/git-revisions.txt
+/logs/
+/PACKAGES/
+*.plt
+/plugins/
+/rebar.config
+/rebar.lock
+/sbin
+/test/ct.cover.spec
+/xrefr
+/amqp_client.d
diff --git a/deps/amqp_client/.travis.yml b/deps/amqp_client/.travis.yml
new file mode 100644
index 0000000000..c360f21022
--- /dev/null
+++ b/deps/amqp_client/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: YQ5QmZHQC3dBIk0vfHlQxHejaqCLq6GbuinOgLieS/Vwrmt51zbg+Wf/tG5yNTaYHHBev/D2e1u2nidZPQ6Xg4InFuknr2dYyPeJ+6A6194aWvdOkB/O3Ii2+m3dafZxsmYj9SZPYIUcwyzXQHwyaL/zOUJC9MGQzYv1L1FAQnQ=
+ - secure: Q1Du4xVNhvNBSv7vYzcF/9HB7R85nhwIpjZogfbZM2Kr6oTOeT8lyCEoiDjbiXkxZdWQa2YVGsjcvz8Q4rDvt8xu1gUG8j/bV3QWjB+0iWq2xQotnKvXW/toiDKBYyoIj9RUrQR5brwU95R1GAClLqxMdjY+/LijzLKqlQQo1L4=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.10'
+otp_release:
+ - '22.3'
+ - '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/amqp_client/CODE_OF_CONDUCT.md b/deps/amqp_client/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/amqp_client/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/amqp_client/CONTRIBUTING.md b/deps/amqp_client/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/amqp_client/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/amqp_client/LICENSE b/deps/amqp_client/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/amqp_client/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/amqp_client/LICENSE-MPL-RabbitMQ b/deps/amqp_client/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/amqp_client/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/amqp_client/Makefile b/deps/amqp_client/Makefile
new file mode 100644
index 0000000000..b15fd918f0
--- /dev/null
+++ b/deps/amqp_client/Makefile
@@ -0,0 +1,198 @@
+PROJECT = amqp_client
+PROJECT_DESCRIPTION = RabbitMQ AMQP Client
+PROJECT_MOD = amqp_client
+PROJECT_REGISTERED = amqp_sup
+
+define PROJECT_ENV
+[
+ {prefer_ipv6, false},
+ {ssl_options, []},
+ {writer_gc_threshold, 1000000000}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+%% Hex.pm package informations.
+ {licenses, ["MPL 2.0"]},
+ {links, [
+ {"Website", "https://www.rabbitmq.com/"},
+ {"GitHub", "https://github.com/rabbitmq/rabbitmq-erlang-client"},
+ {"User guide", "https://www.rabbitmq.com/erlang-client-user-guide.html"}
+ ]},
+ {build_tools, ["make", "rebar3"]},
+ {files, [
+ $(RABBITMQ_HEXPM_DEFAULT_FILES)
+ ]}
+endef
+
+# Release artifacts are put in $(PACKAGES_DIR).
+PACKAGES_DIR ?= $(abspath PACKAGES)
+
+LOCAL_DEPS = xmerl
+DEPS = rabbit_common
+TEST_DEPS = rabbitmq_ct_helpers rabbit
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
+ rabbit_common/mk/rabbitmq-hexpm.mk \
+ rabbit_common/mk/rabbitmq-dist.mk \
+ rabbit_common/mk/rabbitmq-run.mk \
+ rabbit_common/mk/rabbitmq-test.mk \
+ rabbit_common/mk/rabbitmq-tools.mk
+
+PLT_APPS = ssl public_key
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+WITHOUT = plugins/proper
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# --------------------------------------------------------------------
+# Distribution.
+# --------------------------------------------------------------------
+
+.PHONY: distribution
+
+distribution: docs source-dist package
+
+docs:: edoc
+edoc: doc/overview.edoc
+
+doc/overview.edoc: src/overview.edoc.in
+ mkdir -p doc
+ sed -e 's:%%VERSION%%:$(PROJECT_VERSION):g' < $< > $@
+
+.PHONY: source-dist clean-source-dist
+
+SOURCE_DIST_BASE ?= $(PROJECT)
+SOURCE_DIST_SUFFIXES ?= tar.xz zip
+SOURCE_DIST ?= $(PACKAGES_DIR)/$(SOURCE_DIST_BASE)-$(PROJECT_VERSION)-src
+
+# The first source distribution file is used by packages: if the archive
+# type changes, you must update all packages' Makefile.
+SOURCE_DIST_FILES = $(addprefix $(SOURCE_DIST).,$(SOURCE_DIST_SUFFIXES))
+
+.PHONY: $(SOURCE_DIST_FILES)
+
+source-dist: $(SOURCE_DIST_FILES)
+ @:
+
+RSYNC ?= rsync
+RSYNC_V_0 =
+RSYNC_V_1 = -v
+RSYNC_V_2 = -v
+RSYNC_V = $(RSYNC_V_$(V))
+RSYNC_FLAGS += -a $(RSYNC_V) \
+ --exclude '.sw?' --exclude '.*.sw?' \
+ --exclude '*.beam' \
+ --exclude '*.d' \
+ --exclude '*.pyc' \
+ --exclude '.git*' \
+ --exclude '.hg*' \
+ --exclude '.travis.yml' \
+ --exclude '.*.plt' \
+ --exclude '$(notdir $(ERLANG_MK_TMP))' \
+ --exclude 'cover/' \
+ --exclude 'deps/' \
+ --exclude 'ebin/' \
+ --exclude 'erl_crash.dump' \
+ --exclude '$(notdir $(DEPS_DIR))/' \
+ --exclude 'doc/' \
+ --exclude 'hexer*' \
+ --exclude 'logs/' \
+ --exclude 'plugins/' \
+ --exclude '$(notdir $(DIST_DIR))/' \
+ --exclude 'test' \
+ --exclude 'xrefr' \
+ --exclude '/$(notdir $(PACKAGES_DIR))/' \
+ --exclude '/PACKAGES/' \
+ --delete \
+ --delete-excluded
+
+TAR ?= tar
+TAR_V_0 =
+TAR_V_1 = -v
+TAR_V_2 = -v
+TAR_V = $(TAR_V_$(V))
+
+GZIP ?= gzip
+BZIP2 ?= bzip2
+XZ ?= xz
+
+ZIP ?= zip
+ZIP_V_0 = -q
+ZIP_V_1 =
+ZIP_V_2 =
+ZIP_V = $(ZIP_V_$(V))
+
+.PHONY: $(SOURCE_DIST)
+
+$(SOURCE_DIST): $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+ $(verbose) mkdir -p $(dir $@)
+ $(gen_verbose) $(RSYNC) $(RSYNC_FLAGS) ./ $@/
+ $(verbose) echo "$(PROJECT_DESCRIPTION) $(PROJECT_VERSION)" > $@/git-revisions.txt
+ $(verbose) echo "$(PROJECT) $$(git rev-parse HEAD) $$(git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD)" >> $@/git-revisions.txt
+ $(verbose) for dep in $$(cat $(ERLANG_MK_RECURSIVE_DEPS_LIST) | grep -v '/$(PROJECT)$$' | LC_COLLATE=C sort); do \
+ $(RSYNC) $(RSYNC_FLAGS) \
+ $$dep \
+ $@/deps; \
+ if test -f $@/deps/$$(basename $$dep)/erlang.mk && \
+ test "$$(wc -l $@/deps/$$(basename $$dep)/erlang.mk | awk '{print $$1;}')" = "1" && \
+ grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" $@/deps/$$(basename $$dep)/erlang.mk; then \
+ echo "include ../../erlang.mk" > $@/deps/$$(basename $$dep)/erlang.mk; \
+ fi; \
+ sed -E -i.bak "s|^[[:blank:]]*include[[:blank:]]+\.\./.*erlang.mk$$|include ../../erlang.mk|" \
+ $@/deps/$$(basename $$dep)/Makefile && \
+ rm $@/deps/$$(basename $$dep)/Makefile.bak; \
+ (cd $$dep; echo "$$(basename "$$dep") $$(git rev-parse HEAD) $$(git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD)") >> $@/git-revisions.txt; \
+ done
+ $(verbose) for file in $$(find $@ -name '*.app.src'); do \
+ sed -E -i.bak \
+ -e 's/[{]vsn[[:blank:]]*,[[:blank:]]*(""|"0.0.0")[[:blank:]]*}/{vsn, "$(PROJECT_VERSION)"}/' \
+ $$file; \
+ rm $$file.bak; \
+ done
+ $(verbose) rm $@/README.in
+ $(verbose) cp README.in $@/README
+ $(verbose) if test "$(BUILD_DOC)"; then cat "$(BUILD_DOC)" >> $@/README; fi
+
+# TODO: Fix file timestamps to have reproducible source archives.
+# $(verbose) find $@ -not -name 'git-revisions.txt' -print0 | xargs -0 touch -r $@/git-revisions.txt
+
+$(SOURCE_DIST).tar.gz: $(SOURCE_DIST)
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(TAR) $(TAR_V) --no-recursion -cf - | \
+ $(GZIP) --best > $@
+
+$(SOURCE_DIST).tar.bz2: $(SOURCE_DIST)
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(TAR) $(TAR_V) --no-recursion -cf - | \
+ $(BZIP2) > $@
+
+$(SOURCE_DIST).tar.xz: $(SOURCE_DIST)
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(TAR) $(TAR_V) --no-recursion -cf - | \
+ $(XZ) > $@
+
+$(SOURCE_DIST).zip: $(SOURCE_DIST)
+ $(verbose) rm -f $@
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(ZIP) $(ZIP_V) $@
+
+clean:: clean-source-dist
+
+clean-source-dist:
+ $(gen_verbose) rm -rf -- $(SOURCE_DIST_BASE)-*
+
+package: dist
+ cp $(DIST_DIR)/*.ez $(PACKAGES_DIR)
diff --git a/deps/amqp_client/README.in b/deps/amqp_client/README.in
new file mode 100755
index 0000000000..d9d9cdf1b6
--- /dev/null
+++ b/deps/amqp_client/README.in
@@ -0,0 +1,10 @@
+README.in:
+
+Please see https://www.rabbitmq.com/build-erlang-client.html for build
+instructions.
+
+For your convenience, a text copy of these instructions is available
+below. Please be aware that the instructions here may not be as up to
+date as those at the above URL.
+
+===========================================================================
diff --git a/deps/amqp_client/erlang.mk b/deps/amqp_client/erlang.mk
new file mode 100644
index 0000000000..defddc4865
--- /dev/null
+++ b/deps/amqp_client/erlang.mk
@@ -0,0 +1,7746 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT = plugins/proper
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/amqp_client/include/amqp_client.hrl b/deps/amqp_client/include/amqp_client.hrl
new file mode 100644
index 0000000000..382525177d
--- /dev/null
+++ b/deps/amqp_client/include/amqp_client.hrl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-ifndef(AMQP_CLIENT_HRL).
+-define(AMQP_CLIENT_HRL, true).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-record(amqp_msg, {props = #'P_basic'{}, payload = <<>>}).
+
+-record(amqp_params_network, {username = <<"guest">>,
+ password = <<"guest">>,
+ virtual_host = <<"/">>,
+ host = "localhost",
+ port = undefined,
+ channel_max = 2047,
+ frame_max = 0,
+ heartbeat = 10,
+ connection_timeout = 60000,
+ ssl_options = none,
+ auth_mechanisms =
+ [fun amqp_auth_mechanisms:plain/3,
+ fun amqp_auth_mechanisms:amqplain/3],
+ client_properties = [],
+ socket_options = []}).
+
+-record(amqp_params_direct, {username = none,
+ password = none,
+ virtual_host = <<"/">>,
+ node = node(),
+ adapter_info = none,
+ client_properties = []}).
+
+-record(amqp_adapter_info, {host = unknown,
+ port = unknown,
+ peer_host = unknown,
+ peer_port = unknown,
+ name = unknown,
+ protocol = unknown,
+ additional_info = []}).
+
+-endif.
diff --git a/deps/amqp_client/include/amqp_client_internal.hrl b/deps/amqp_client/include/amqp_client_internal.hrl
new file mode 100644
index 0000000000..01e099097e
--- /dev/null
+++ b/deps/amqp_client/include/amqp_client_internal.hrl
@@ -0,0 +1,30 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("amqp_client.hrl").
+
+-define(PROTOCOL_VERSION_MAJOR, 0).
+-define(PROTOCOL_VERSION_MINOR, 9).
+-define(PROTOCOL_HEADER, <<"AMQP", 0, 0, 9, 1>>).
+-define(PROTOCOL, rabbit_framing_amqp_0_9_1).
+
+-define(MAX_CHANNEL_NUMBER, 65535).
+
+-define(LOG_DEBUG(Format), error_logger:info_msg(Format)).
+-define(LOG_INFO(Format, Args), error_logger:info_msg(Format, Args)).
+-define(LOG_WARN(Format, Args), error_logger:warning_msg(Format, Args)).
+-define(LOG_ERR(Format, Args), error_logger:error_msg(Format, Args)).
+
+-define(CLIENT_CAPABILITIES,
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, true},
+ {<<"connection.blocked">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true}]).
+
+-define(WAIT_FOR_CONFIRMS_TIMEOUT, {60000, millisecond}).
diff --git a/deps/amqp_client/include/amqp_gen_consumer_spec.hrl b/deps/amqp_client/include/amqp_gen_consumer_spec.hrl
new file mode 100644
index 0000000000..29a305352a
--- /dev/null
+++ b/deps/amqp_client/include/amqp_gen_consumer_spec.hrl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("amqp_client.hrl").
+
+-type state() :: any().
+-type consume() :: #'basic.consume'{}.
+-type consume_ok() :: #'basic.consume_ok'{}.
+-type cancel() :: #'basic.cancel'{}.
+-type cancel_ok() :: #'basic.cancel_ok'{}.
+-type deliver() :: #'basic.deliver'{}.
+-type from() :: any().
+-type reason() :: any().
+-type ok_error() :: {ok, state()} | {error, reason(), state()}.
+
+-spec init([any()]) -> {ok, state()}.
+-spec handle_consume(consume(), pid(), state()) -> ok_error().
+-spec handle_consume_ok(consume_ok(), consume(), state()) ->
+ ok_error().
+-spec handle_cancel(cancel(), state()) -> ok_error().
+-spec handle_server_cancel(cancel(), state()) -> ok_error().
+-spec handle_cancel_ok(cancel_ok(), cancel(), state()) -> ok_error().
+-spec handle_deliver(deliver(), #amqp_msg{}, state()) -> ok_error().
+-spec handle_info(any(), state()) -> ok_error().
+-spec handle_call(any(), from(), state()) ->
+ {reply, any(), state()} | {noreply, state()} |
+ {error, reason(), state()}.
+-spec terminate(any(), state()) -> state().
diff --git a/deps/amqp_client/include/rabbit_routing_prefixes.hrl b/deps/amqp_client/include/rabbit_routing_prefixes.hrl
new file mode 100644
index 0000000000..49c680deec
--- /dev/null
+++ b/deps/amqp_client/include/rabbit_routing_prefixes.hrl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(QUEUE_PREFIX, "/queue").
+-define(TOPIC_PREFIX, "/topic").
+-define(EXCHANGE_PREFIX, "/exchange").
+-define(AMQQUEUE_PREFIX, "/amq/queue").
+-define(TEMP_QUEUE_PREFIX, "/temp-queue").
+%% reply queues names can have slashes in the content so no further
+%% parsing happens.
+-define(REPLY_QUEUE_PREFIX, "/reply-queue/").
diff --git a/deps/amqp_client/rabbitmq-components.mk b/deps/amqp_client/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/amqp_client/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/amqp_client/src/amqp_auth_mechanisms.erl b/deps/amqp_client/src/amqp_auth_mechanisms.erl
new file mode 100644
index 0000000000..549cd17376
--- /dev/null
+++ b/deps/amqp_client/src/amqp_auth_mechanisms.erl
@@ -0,0 +1,44 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_auth_mechanisms).
+
+-include("amqp_client.hrl").
+
+-export([plain/3, amqplain/3, external/3, crdemo/3]).
+
+%%---------------------------------------------------------------------------
+
+plain(none, _, init) ->
+ {<<"PLAIN">>, []};
+plain(none, #amqp_params_network{username = Username,
+ password = Password}, _State) ->
+ DecryptedPassword = credentials_obfuscation:decrypt(Password),
+ {<<0, Username/binary, 0, DecryptedPassword/binary>>, _State}.
+
+amqplain(none, _, init) ->
+ {<<"AMQPLAIN">>, []};
+amqplain(none, #amqp_params_network{username = Username,
+ password = Password}, _State) ->
+ LoginTable = [{<<"LOGIN">>, longstr, Username},
+ {<<"PASSWORD">>, longstr, credentials_obfuscation:decrypt(Password)}],
+ {rabbit_binary_generator:generate_table(LoginTable), _State}.
+
+external(none, _, init) ->
+ {<<"EXTERNAL">>, []};
+external(none, _, _State) ->
+ {<<"">>, _State}.
+
+crdemo(none, _, init) ->
+ {<<"RABBIT-CR-DEMO">>, 0};
+crdemo(none, #amqp_params_network{username = Username}, 0) ->
+ {Username, 1};
+crdemo(<<"Please tell me your password">>,
+ #amqp_params_network{password = Password}, 1) ->
+ DecryptedPassword = credentials_obfuscation:decrypt(Password),
+ {<<"My password is ", DecryptedPassword/binary>>, 2}.
diff --git a/deps/amqp_client/src/amqp_channel.erl b/deps/amqp_client/src/amqp_channel.erl
new file mode 100644
index 0000000000..9e95df4fe3
--- /dev/null
+++ b/deps/amqp_client/src/amqp_channel.erl
@@ -0,0 +1,1010 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
+%% @type amqp_reason(Type) = {Type, Code, Text}
+%% Code = non_neg_integer()
+%% Text = binary().
+%% @doc This module encapsulates the client's view of an AMQP
+%% channel. Each server side channel is represented by an amqp_channel
+%% process on the client side. Channel processes are created using the
+%% {@link amqp_connection} module. Channel processes are supervised
+%% under amqp_client's supervision tree.<br/>
+%% <br/>
+%% In case of a failure or an AMQP error, the channel process exits with a
+%% meaningful exit reason:<br/>
+%% <br/>
+%% <table>
+%% <tr>
+%% <td><strong>Cause</strong></td>
+%% <td><strong>Exit reason</strong></td>
+%% </tr>
+%% <tr>
+%% <td>Any reason, where Code would have been 200 otherwise</td>
+%% <td>```normal'''</td>
+%% </tr>
+%% <tr>
+%% <td>User application calls amqp_channel:close/3</td>
+%% <td>```close_reason(app_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server closes channel (soft error)</td>
+%% <td>```close_reason(server_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server misbehaved (did not follow protocol)</td>
+%% <td>```close_reason(server_misbehaved)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Connection is closing (causing all channels to cleanup and
+%% close)</td>
+%% <td>```{shutdown, {connection_closing, amqp_reason(atom())}}'''</td>
+%% </tr>
+%% <tr>
+%% <td>Other error</td>
+%% <td>(various error reasons, causing more detailed logging)</td>
+%% </tr>
+%% </table>
+%% <br/>
+%% See type definitions below.
+-module(amqp_channel).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([call/2, call/3, cast/2, cast/3, cast_flow/3]).
+-export([close/1, close/3]).
+-export([register_return_handler/2, unregister_return_handler/1,
+ register_flow_handler/2, unregister_flow_handler/1,
+ register_confirm_handler/2, unregister_confirm_handler/1]).
+-export([call_consumer/2, subscribe/3]).
+-export([next_publish_seqno/1, wait_for_confirms/1, wait_for_confirms/2,
+ wait_for_confirms_or_die/1, wait_for_confirms_or_die/2]).
+-export([start_link/5, set_writer/2, connection_closing/3, open/1,
+ enable_delivery_flow_control/1, notify_received/1]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-define(TIMEOUT_FLUSH, 60000).
+
+-record(state, {number,
+ connection,
+ consumer,
+ driver,
+ rpc_requests = queue:new(),
+ closing = false, %% false |
+ %% {just_channel, Reason} |
+ %% {connection, Reason}
+ writer,
+ return_handler = none,
+ confirm_handler = none,
+ next_pub_seqno = 0,
+ flow_active = true,
+ flow_handler = none,
+ unconfirmed_set = gb_sets:new(),
+ waiting_set = gb_trees:empty(),
+ only_acks_received = true,
+
+ %% true | false, only relevant in the direct
+ %% client case.
+ %% when true, consumers will manually notify
+ %% queue pids using rabbit_amqqueue_common:notify_sent/2
+ %% to prevent the queue from overwhelming slow
+ %% consumers that use automatic acknowledgement
+ %% mode.
+ delivery_flow_control = false
+ }).
+
+%%---------------------------------------------------------------------------
+%% Type Definitions
+%%---------------------------------------------------------------------------
+
+%% @type amqp_method().
+%% This abstract datatype represents the set of methods that comprise
+%% the AMQP execution model. As indicated in the overview, the
+%% attributes of each method in the execution model are described in
+%% the protocol documentation. The Erlang record definitions are
+%% autogenerated from a parseable version of the specification. Most
+%% fields in the generated records have sensible default values that
+%% you need not worry in the case of a simple usage of the client
+%% library.
+
+%% @type amqp_msg() = #amqp_msg{}.
+%% This is the content encapsulated in content-bearing AMQP methods. It
+%% contains the following fields:
+%% <ul>
+%% <li>props :: class_property() - A class property record, defaults to
+%% #'P_basic'{}</li>
+%% <li>payload :: binary() - The arbitrary data payload</li>
+%% </ul>
+
+%%---------------------------------------------------------------------------
+%% AMQP Channel API methods
+%%---------------------------------------------------------------------------
+
+%% @spec (Channel, Method) -> Result
+%% @doc This is equivalent to amqp_channel:call(Channel, Method, none).
+call(Channel, Method) ->
+ gen_server:call(Channel, {call, Method, none, self()}, amqp_util:call_timeout()).
+
+%% @spec (Channel, Method, Content) -> Result
+%% where
+%% Channel = pid()
+%% Method = amqp_method()
+%% Content = amqp_msg() | none
+%% Result = amqp_method() | ok | blocked | closing
+%% @doc This sends an AMQP method on the channel.
+%% For content bearing methods, Content has to be an amqp_msg(), whereas
+%% for non-content bearing methods, it needs to be the atom 'none'.<br/>
+%% In the case of synchronous methods, this function blocks until the
+%% corresponding reply comes back from the server and returns it.
+%% In the case of asynchronous methods, the function blocks until the method
+%% gets sent on the wire and returns the atom 'ok' on success.<br/>
+%% This will return the atom 'blocked' if the server has
+%% throttled the client for flow control reasons. This will return the
+%% atom 'closing' if the channel is in the process of shutting down.<br/>
+%% Note that for asynchronous methods, the synchronicity implied by
+%% 'call' only means that the client has transmitted the method to
+%% the broker. It does not necessarily imply that the broker has
+%% accepted responsibility for the message.
+call(Channel, Method, Content) ->
+ gen_server:call(Channel, {call, Method, Content, self()}, amqp_util:call_timeout()).
+
+%% @spec (Channel, Method) -> ok
+%% @doc This is equivalent to amqp_channel:cast(Channel, Method, none).
+cast(Channel, Method) ->
+ gen_server:cast(Channel, {cast, Method, none, self(), noflow}).
+
+%% @spec (Channel, Method, Content) -> ok
+%% where
+%% Channel = pid()
+%% Method = amqp_method()
+%% Content = amqp_msg() | none
+%% @doc This function is the same as {@link call/3}, except that it returns
+%% immediately with the atom 'ok', without blocking the caller process.
+%% This function is not recommended with synchronous methods, since there is no
+%% way to verify that the server has received the method.
+cast(Channel, Method, Content) ->
+ gen_server:cast(Channel, {cast, Method, Content, self(), noflow}).
+
+%% @spec (Channel, Method, Content) -> ok
+%% where
+%% Channel = pid()
+%% Method = amqp_method()
+%% Content = amqp_msg() | none
+%% @doc Like cast/3, with flow control.
+cast_flow(Channel, Method, Content) ->
+ credit_flow:send(Channel),
+ gen_server:cast(Channel, {cast, Method, Content, self(), flow}).
+
+%% @spec (Channel) -> ok | closing
+%% where
+%% Channel = pid()
+%% @doc Closes the channel, invokes
+%% close(Channel, 200, &lt;&lt;"Goodbye"&gt;&gt;).
+close(Channel) ->
+ close(Channel, 200, <<"Goodbye">>).
+
+%% @spec (Channel, Code, Text) -> ok | closing
+%% where
+%% Channel = pid()
+%% Code = integer()
+%% Text = binary()
+%% @doc Closes the channel, allowing the caller to supply a reply code and
+%% text. If the channel is already closing, the atom 'closing' is returned.
+close(Channel, Code, Text) ->
+ gen_server:call(Channel, {close, Code, Text}, amqp_util:call_timeout()).
+
+%% @spec (Channel) -> integer()
+%% where
+%% Channel = pid()
+%% @doc When in confirm mode, returns the sequence number of the next
+%% message to be published.
+next_publish_seqno(Channel) ->
+ gen_server:call(Channel, next_publish_seqno, amqp_util:call_timeout()).
+
+%% @spec (Channel) -> boolean() | 'timeout'
+%% where
+%% Channel = pid()
+%% @doc Wait until all messages published since the last call have
+%% been either ack'd or nack'd by the broker. Note, when called on a
+%% non-Confirm channel, waitForConfirms returns an error.
+%% @param Channel: the channel on which to wait.
+%% @end
+wait_for_confirms(Channel) ->
+ wait_for_confirms(Channel, ?WAIT_FOR_CONFIRMS_TIMEOUT).
+
+%% @spec (Channel, Timeout) -> boolean() | 'timeout'
+%% where
+%% Channel = pid()
+%% Timeout = non_neg_integer() | {non_neg_integer(), second | millisecond} | 'infinity'
+%% @doc Wait until all messages published since the last call have
+%% been either ack'd or nack'd by the broker or the timeout expires.
+%% Note, when called on a non-Confirm channel, waitForConfirms throws
+%% an exception.
+%% @param Channel: the channel on which to wait.
+%% @param Timeout: the wait timeout in seconds.
+%% @end
+wait_for_confirms(Channel, {Timeout, second}) ->
+ do_wait_for_confirms(Channel, second_to_millisecond(Timeout));
+wait_for_confirms(Channel, {Timeout, millisecond}) ->
+ do_wait_for_confirms(Channel, Timeout);
+wait_for_confirms(Channel, Timeout) ->
+ do_wait_for_confirms(Channel, second_to_millisecond(Timeout)).
+
+%% @spec (Channel) -> true
+%% where
+%% Channel = pid()
+%% @doc Behaves the same as wait_for_confirms/1, but if a nack is
+%% received, the calling process is immediately sent an
+%% exit(nack_received).
+%% @param Channel: the channel on which to wait.
+%% @end
+wait_for_confirms_or_die(Channel) ->
+ wait_for_confirms_or_die(Channel, ?WAIT_FOR_CONFIRMS_TIMEOUT).
+
+%% @spec (Channel, Timeout) -> true
+%% where
+%% Channel = pid()
+%% Timeout = non_neg_integer() | {non_neg_integer(), second | millisecond} | 'infinity'
+%% @doc Behaves the same as wait_for_confirms/1, but if a nack is
+%% received, the calling process is immediately sent an
+%% exit(nack_received). If the timeout expires, the calling process is
+%% sent an exit(timeout).
+%% @param Channel: the channel on which to wait.
+%% @param Timeout: the wait timeout in seconds.
+%% @end
+wait_for_confirms_or_die(Channel, Timeout) ->
+ case wait_for_confirms(Channel, Timeout) of
+ timeout -> close(Channel, 200, <<"Confirm Timeout">>),
+ exit(timeout);
+ false -> close(Channel, 200, <<"Nacks Received">>),
+ exit(nacks_received);
+ true -> true
+ end.
+
+%% @spec (Channel, ReturnHandler) -> ok
+%% where
+%% Channel = pid()
+%% ReturnHandler = pid()
+%% @doc This registers a handler to deal with returned messages. The
+%% registered process will receive #basic.return{} records.
+register_return_handler(Channel, ReturnHandler) ->
+ gen_server:cast(Channel, {register_return_handler, ReturnHandler} ).
+
+%% @spec (Channel) -> ok
+%% where
+%% Channel = pid()
+%% @doc Removes the return handler, if it exists. Does nothing if there is no
+%% such handler.
+unregister_return_handler(Channel) ->
+ gen_server:cast(Channel, unregister_return_handler).
+
+%% @spec (Channel, ConfirmHandler) -> ok
+%% where
+%% Channel = pid()
+%% ConfirmHandler = pid()
+
+%% @doc This registers a handler to deal with confirm-related
+%% messages. The registered process will receive #basic.ack{} and
+%% #basic.nack{} commands.
+register_confirm_handler(Channel, ConfirmHandler) ->
+ gen_server:cast(Channel, {register_confirm_handler, ConfirmHandler} ).
+
+%% @spec (Channel) -> ok
+%% where
+%% Channel = pid()
+%% @doc Removes the confirm handler, if it exists. Does nothing if there is no
+%% such handler.
+unregister_confirm_handler(Channel) ->
+ gen_server:cast(Channel, unregister_confirm_handler).
+
+%% @spec (Channel, FlowHandler) -> ok
+%% where
+%% Channel = pid()
+%% FlowHandler = pid()
+%% @doc This registers a handler to deal with channel flow notifications.
+%% The registered process will receive #channel.flow{} records.
+register_flow_handler(Channel, FlowHandler) ->
+ gen_server:cast(Channel, {register_flow_handler, FlowHandler} ).
+
+%% @spec (Channel) -> ok
+%% where
+%% Channel = pid()
+%% @doc Removes the flow handler, if it exists. Does nothing if there is no
+%% such handler.
+unregister_flow_handler(Channel) ->
+ gen_server:cast(Channel, unregister_flow_handler).
+
+%% @spec (Channel, Msg) -> ok
+%% where
+%% Channel = pid()
+%% Msg = any()
+%% @doc This causes the channel to invoke Consumer:handle_call/2,
+%% where Consumer is the amqp_gen_consumer implementation registered with
+%% the channel.
+call_consumer(Channel, Msg) ->
+ gen_server:call(Channel, {call_consumer, Msg}, amqp_util:call_timeout()).
+
+%% @spec (Channel, BasicConsume, Subscriber) -> ok
+%% where
+%% Channel = pid()
+%% BasicConsume = amqp_method()
+%% Subscriber = pid()
+%% @doc Subscribe the given pid to a queue using the specified
+%% basic.consume method.
+subscribe(Channel, BasicConsume = #'basic.consume'{}, Subscriber) ->
+ gen_server:call(Channel, {subscribe, BasicConsume, Subscriber}, amqp_util:call_timeout()).
+
+%%---------------------------------------------------------------------------
+%% Internal interface
+%%---------------------------------------------------------------------------
+
+%% @private
+start_link(Driver, Connection, ChannelNumber, Consumer, Identity) ->
+ gen_server:start_link(
+ ?MODULE, [Driver, Connection, ChannelNumber, Consumer, Identity], []).
+
+set_writer(Pid, Writer) ->
+ gen_server:cast(Pid, {set_writer, Writer}).
+
+enable_delivery_flow_control(Pid) ->
+ gen_server:cast(Pid, enable_delivery_flow_control).
+
+notify_received({Pid, QPid, ServerChPid}) ->
+ gen_server:cast(Pid, {send_notify, {QPid, ServerChPid}}).
+
+%% @private
+connection_closing(Pid, ChannelCloseType, Reason) ->
+ gen_server:cast(Pid, {connection_closing, ChannelCloseType, Reason}).
+
+%% @private
+open(Pid) ->
+ gen_server:call(Pid, open, amqp_util:call_timeout()).
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+%% @private
+init([Driver, Connection, ChannelNumber, Consumer, Identity]) ->
+ ?store_proc_name(Identity),
+ {ok, #state{connection = Connection,
+ driver = Driver,
+ number = ChannelNumber,
+ consumer = Consumer}}.
+
+%% @private
+handle_call(open, From, State) ->
+ {noreply, rpc_top_half(#'channel.open'{}, none, From, none, noflow, State)};
+%% @private
+handle_call({close, Code, Text}, From, State) ->
+ handle_close(Code, Text, From, State);
+%% @private
+handle_call({call, Method, AmqpMsg, Sender}, From, State) ->
+ handle_method_to_server(Method, AmqpMsg, From, Sender, noflow, State);
+%% Handles the delivery of messages from a direct channel
+%% @private
+handle_call({send_command_sync, Method, Content}, From, State) ->
+ Ret = handle_method_from_server(Method, Content, State),
+ gen_server:reply(From, ok),
+ Ret;
+%% Handles the delivery of messages from a direct channel
+%% @private
+handle_call({send_command_sync, Method}, From, State) ->
+ Ret = handle_method_from_server(Method, none, State),
+ gen_server:reply(From, ok),
+ Ret;
+%% @private
+handle_call(next_publish_seqno, _From,
+ State = #state{next_pub_seqno = SeqNo}) ->
+ {reply, SeqNo, State};
+handle_call({wait_for_confirms, Timeout}, From, State) ->
+ handle_wait_for_confirms(From, Timeout, State);
+%% @private
+handle_call({call_consumer, Msg}, _From,
+ State = #state{consumer = Consumer}) ->
+ {reply, amqp_gen_consumer:call_consumer(Consumer, Msg), State};
+%% @private
+handle_call({subscribe, BasicConsume, Subscriber}, From, State) ->
+ handle_method_to_server(BasicConsume, none, From, Subscriber, noflow,
+ State).
+
+%% @private
+handle_cast({set_writer, Writer}, State = #state{driver = direct}) ->
+ link(Writer),
+ {noreply, State#state{writer = Writer}};
+handle_cast({set_writer, Writer}, State) ->
+ {noreply, State#state{writer = Writer}};
+%% @private
+handle_cast(enable_delivery_flow_control, State) ->
+ {noreply, State#state{delivery_flow_control = true}};
+%% @private
+handle_cast({send_notify, {QPid, ChPid}}, State) ->
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid),
+ {noreply, State};
+%% @private
+handle_cast({cast, Method, AmqpMsg, Sender, noflow}, State) ->
+ handle_method_to_server(Method, AmqpMsg, none, Sender, noflow, State);
+handle_cast({cast, Method, AmqpMsg, Sender, flow}, State) ->
+ credit_flow:ack(Sender),
+ handle_method_to_server(Method, AmqpMsg, none, Sender, flow, State);
+%% @private
+handle_cast({register_return_handler, ReturnHandler}, State) ->
+ Ref = erlang:monitor(process, ReturnHandler),
+ {noreply, State#state{return_handler = {ReturnHandler, Ref}}};
+%% @private
+handle_cast(unregister_return_handler,
+ State = #state{return_handler = {_ReturnHandler, Ref}}) ->
+ erlang:demonitor(Ref),
+ {noreply, State#state{return_handler = none}};
+%% @private
+handle_cast({register_confirm_handler, ConfirmHandler}, State) ->
+ Ref = erlang:monitor(process, ConfirmHandler),
+ {noreply, State#state{confirm_handler = {ConfirmHandler, Ref}}};
+%% @private
+handle_cast(unregister_confirm_handler,
+ State = #state{confirm_handler = {_ConfirmHandler, Ref}}) ->
+ erlang:demonitor(Ref),
+ {noreply, State#state{confirm_handler = none}};
+%% @private
+handle_cast({register_flow_handler, FlowHandler}, State) ->
+ Ref = erlang:monitor(process, FlowHandler),
+ {noreply, State#state{flow_handler = {FlowHandler, Ref}}};
+%% @private
+handle_cast(unregister_flow_handler,
+ State = #state{flow_handler = {_FlowHandler, Ref}}) ->
+ erlang:demonitor(Ref),
+ {noreply, State#state{flow_handler = none}};
+%% Received from channels manager
+%% @private
+handle_cast({method, Method, Content, noflow}, State) ->
+ handle_method_from_server(Method, Content, State);
+%% Handles the situation when the connection closes without closing the channel
+%% beforehand. The channel must block all further RPCs,
+%% flush the RPC queue (optional), and terminate
+%% @private
+handle_cast({connection_closing, CloseType, Reason}, State) ->
+ handle_connection_closing(CloseType, Reason, State);
+%% @private
+handle_cast({shutdown, Shutdown}, State) ->
+ handle_shutdown(Shutdown, State).
+
+%% Received from rabbit_channel in the direct case
+%% @private
+handle_info({send_command, Method}, State) ->
+ handle_method_from_server(Method, none, State);
+%% Received from rabbit_channel in the direct case
+%% @private
+handle_info({send_command, Method, Content}, State) ->
+ handle_method_from_server(Method, Content, State);
+%% Received from rabbit_channel in the direct case
+%% @private
+handle_info({send_command_and_notify, QPid, ChPid,
+ Method = #'basic.deliver'{}, Content},
+ State = #state{delivery_flow_control = MFC}) ->
+ case MFC of
+ false -> handle_method_from_server(Method, Content, State),
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid);
+ true -> handle_method_from_server(Method, Content,
+ {self(), QPid, ChPid}, State)
+ end,
+ {noreply, State};
+%% This comes from the writer or rabbit_channel
+%% @private
+handle_info({channel_exit, _ChNumber, Reason}, State) ->
+ handle_channel_exit(Reason, State);
+%% This comes from rabbit_channel in the direct case
+handle_info({channel_closing, ChPid}, State) ->
+ ok = rabbit_channel_common:ready_for_close(ChPid),
+ {noreply, State};
+%% @private
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, State};
+%% @private
+handle_info(timed_out_flushing_channel, State) ->
+ ?LOG_WARN("Channel (~p) closing: timed out flushing while "
+ "connection closing~n", [self()]),
+ {stop, timed_out_flushing_channel, State};
+%% @private
+handle_info({'DOWN', _, process, ReturnHandler, shutdown},
+ State = #state{return_handler = {ReturnHandler, _Ref}}) ->
+ {noreply, State#state{return_handler = none}};
+handle_info({'DOWN', _, process, ReturnHandler, Reason},
+ State = #state{return_handler = {ReturnHandler, _Ref}}) ->
+ ?LOG_WARN("Channel (~p): Unregistering return handler ~p because it died. "
+ "Reason: ~p~n", [self(), ReturnHandler, Reason]),
+ {noreply, State#state{return_handler = none}};
+%% @private
+handle_info({'DOWN', _, process, ConfirmHandler, shutdown},
+ State = #state{confirm_handler = {ConfirmHandler, _Ref}}) ->
+ {noreply, State#state{confirm_handler = none}};
+handle_info({'DOWN', _, process, ConfirmHandler, Reason},
+ State = #state{confirm_handler = {ConfirmHandler, _Ref}}) ->
+ ?LOG_WARN("Channel (~p): Unregistering confirm handler ~p because it died. "
+ "Reason: ~p~n", [self(), ConfirmHandler, Reason]),
+ {noreply, State#state{confirm_handler = none}};
+%% @private
+handle_info({'DOWN', _, process, FlowHandler, shutdown},
+ State = #state{flow_handler = {FlowHandler, _Ref}}) ->
+ {noreply, State#state{flow_handler = none}};
+handle_info({'DOWN', _, process, FlowHandler, Reason},
+ State = #state{flow_handler = {FlowHandler, _Ref}}) ->
+ ?LOG_WARN("Channel (~p): Unregistering flow handler ~p because it died. "
+ "Reason: ~p~n", [self(), FlowHandler, Reason]),
+ {noreply, State#state{flow_handler = none}};
+handle_info({'DOWN', _, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue_common:notify_sent_queue_down(QPid),
+ {noreply, State};
+handle_info({confirm_timeout, From}, State = #state{waiting_set = WSet}) ->
+ case gb_trees:lookup(From, WSet) of
+ none ->
+ {noreply, State};
+ {value, _} ->
+ gen_server:reply(From, timeout),
+ {noreply, State#state{waiting_set = gb_trees:delete(From, WSet)}}
+ end.
+
+%% @private
+terminate(_Reason, State) ->
+ flush_writer(State),
+ State.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%---------------------------------------------------------------------------
+%% RPC mechanism
+%%---------------------------------------------------------------------------
+
+handle_method_to_server(Method, AmqpMsg, From, Sender, Flow,
+ State = #state{unconfirmed_set = USet}) ->
+ case {check_invalid_method(Method), From,
+ check_block(Method, AmqpMsg, State)} of
+ {ok, _, ok} ->
+ State1 = case {Method, State#state.next_pub_seqno} of
+ {#'confirm.select'{}, 0} ->
+ %% The confirm seqno is set to 1 on the
+ %% first confirm.select only.
+ State#state{next_pub_seqno = 1};
+ {#'basic.publish'{}, 0} ->
+ State;
+ {#'basic.publish'{}, SeqNo} ->
+ State#state{unconfirmed_set =
+ gb_sets:add(SeqNo, USet),
+ next_pub_seqno = SeqNo + 1};
+ _ ->
+ State
+ end,
+ {noreply, rpc_top_half(Method, build_content(AmqpMsg),
+ From, Sender, Flow, State1)};
+ {ok, none, BlockReply} ->
+ ?LOG_WARN("Channel (~p): discarding method ~p in cast.~n"
+ "Reason: ~p~n", [self(), Method, BlockReply]),
+ {noreply, State};
+ {ok, _, BlockReply} ->
+ {reply, BlockReply, State};
+ {{_, InvalidMethodMessage}, none, _} ->
+ ?LOG_WARN("Channel (~p): ignoring cast of ~p method. " ++
+ InvalidMethodMessage ++ "~n", [self(), Method]),
+ {noreply, State};
+ {{InvalidMethodReply, _}, _, _} ->
+ {reply, {error, InvalidMethodReply}, State}
+ end.
+
+handle_close(Code, Text, From, State) ->
+ Close = #'channel.close'{reply_code = Code,
+ reply_text = Text,
+ class_id = 0,
+ method_id = 0},
+ case check_block(Close, none, State) of
+ ok -> {noreply, rpc_top_half(Close, none, From, none, noflow,
+ State)};
+ BlockReply -> {reply, BlockReply, State}
+ end.
+
+rpc_top_half(Method, Content, From, Sender, Flow,
+ State0 = #state{rpc_requests = RequestQueue}) ->
+ State1 = State0#state{
+ rpc_requests = queue:in({From, Sender, Method, Content, Flow},
+ RequestQueue)},
+ IsFirstElement = queue:is_empty(RequestQueue),
+ if IsFirstElement -> do_rpc(State1);
+ true -> State1
+ end.
+
+rpc_bottom_half(Reply, State = #state{rpc_requests = RequestQueue}) ->
+ {{value, {From, _Sender, _Method, _Content, _Flow}}, RequestQueue1} =
+ queue:out(RequestQueue),
+ case From of
+ none -> ok;
+ _ -> gen_server:reply(From, Reply)
+ end,
+ do_rpc(State#state{rpc_requests = RequestQueue1}).
+
+do_rpc(State = #state{rpc_requests = Q,
+ closing = Closing}) ->
+ case queue:out(Q) of
+ {{value, {From, Sender, Method, Content, Flow}}, NewQ} ->
+ State1 = pre_do(Method, Content, Sender, State),
+ DoRet = do(Method, Content, Flow, State1),
+ case ?PROTOCOL:is_method_synchronous(Method) of
+ true -> State1;
+ false -> case {From, DoRet} of
+ {none, _} -> ok;
+ {_, ok} -> gen_server:reply(From, ok);
+ _ -> ok
+ %% Do not reply if error in do. Expecting
+ %% {channel_exit, _, _}
+ end,
+ do_rpc(State1#state{rpc_requests = NewQ})
+ end;
+ {empty, NewQ} ->
+ case Closing of
+ {connection, Reason} ->
+ gen_server:cast(self(),
+ {shutdown, {connection_closing, Reason}});
+ _ ->
+ ok
+ end,
+ State#state{rpc_requests = NewQ}
+ end.
+
+pending_rpc_method(#state{rpc_requests = Q}) ->
+ {value, {_From, _Sender, Method, _Content, _Flow}} = queue:peek(Q),
+ Method.
+
+pre_do(#'channel.close'{reply_code = Code, reply_text = Text}, none,
+ _Sender, State) ->
+ State#state{closing = {just_channel, {app_initiated_close, Code, Text}}};
+pre_do(#'basic.consume'{} = Method, none, Sender, State) ->
+ ok = call_to_consumer(Method, Sender, State),
+ State;
+pre_do(#'basic.cancel'{} = Method, none, Sender, State) ->
+ ok = call_to_consumer(Method, Sender, State),
+ State;
+pre_do(_, _, _, State) ->
+ State.
+
+%%---------------------------------------------------------------------------
+%% Handling of methods from the server
+%%---------------------------------------------------------------------------
+
+safely_handle_method_from_server(Method, Content,
+ Continuation,
+ State = #state{closing = Closing}) ->
+ case is_connection_method(Method) of
+ true -> server_misbehaved(
+ #amqp_error{name = command_invalid,
+ explanation = "connection method on "
+ "non-zero channel",
+ method = element(1, Method)},
+ State);
+ false -> Drop = case {Closing, Method} of
+ {{just_channel, _}, #'channel.close'{}} -> false;
+ {{just_channel, _}, #'channel.close_ok'{}} -> false;
+ {{just_channel, _}, _} -> true;
+ _ -> false
+ end,
+ if Drop -> ?LOG_INFO("Channel (~p): dropping method ~p from "
+ "server because channel is closing~n",
+ [self(), {Method, Content}]),
+ {noreply, State};
+ true ->
+ Continuation()
+ end
+ end.
+
+handle_method_from_server(Method, Content, State) ->
+ Fun = fun () ->
+ handle_method_from_server1(Method,
+ amqp_msg(Content), State)
+ end,
+ safely_handle_method_from_server(Method, Content, Fun, State).
+
+handle_method_from_server(Method = #'basic.deliver'{},
+ Content, DeliveryCtx, State) ->
+ Fun = fun () ->
+ handle_method_from_server1(Method,
+ amqp_msg(Content),
+ DeliveryCtx,
+ State)
+ end,
+ safely_handle_method_from_server(Method, Content, Fun, State).
+
+handle_method_from_server1(#'channel.open_ok'{}, none, State) ->
+ {noreply, rpc_bottom_half(ok, State)};
+handle_method_from_server1(#'channel.close'{reply_code = Code,
+ reply_text = Text},
+ none,
+ State = #state{closing = {just_channel, _}}) ->
+ %% Both client and server sent close at the same time. Don't shutdown yet,
+ %% wait for close_ok.
+ do(#'channel.close_ok'{}, none, noflow, State),
+ {noreply,
+ State#state{
+ closing = {just_channel, {server_initiated_close, Code, Text}}}};
+handle_method_from_server1(#'channel.close'{reply_code = Code,
+ reply_text = Text}, none, State) ->
+ do(#'channel.close_ok'{}, none, noflow, State),
+ handle_shutdown({server_initiated_close, Code, Text}, State);
+handle_method_from_server1(#'channel.close_ok'{}, none,
+ State = #state{closing = Closing}) ->
+ case Closing of
+ {just_channel, {app_initiated_close, _, _} = Reason} ->
+ handle_shutdown(Reason, rpc_bottom_half(ok, State));
+ {just_channel, {server_initiated_close, _, _} = Reason} ->
+ handle_shutdown(Reason,
+ rpc_bottom_half(closing, State));
+ {connection, Reason} ->
+ handle_shutdown({connection_closing, Reason}, State)
+ end;
+handle_method_from_server1(#'basic.consume_ok'{} = ConsumeOk, none, State) ->
+ Consume = #'basic.consume'{} = pending_rpc_method(State),
+ ok = call_to_consumer(ConsumeOk, Consume, State),
+ {noreply, rpc_bottom_half(ConsumeOk, State)};
+handle_method_from_server1(#'basic.cancel_ok'{} = CancelOk, none, State) ->
+ Cancel = #'basic.cancel'{} = pending_rpc_method(State),
+ ok = call_to_consumer(CancelOk, Cancel, State),
+ {noreply, rpc_bottom_half(CancelOk, State)};
+handle_method_from_server1(#'basic.cancel'{} = Cancel, none, State) ->
+ ok = call_to_consumer(Cancel, none, State),
+ {noreply, State};
+handle_method_from_server1(#'basic.deliver'{} = Deliver, AmqpMsg, State) ->
+ ok = call_to_consumer(Deliver, AmqpMsg, State),
+ {noreply, State};
+handle_method_from_server1(#'channel.flow'{active = Active} = Flow, none,
+ State = #state{flow_handler = FlowHandler}) ->
+ case FlowHandler of none -> ok;
+ {Pid, _Ref} -> Pid ! Flow
+ end,
+ %% Putting the flow_ok in the queue so that the RPC queue can be
+ %% flushed beforehand. Methods that made it to the queue are not
+ %% blocked in any circumstance.
+ {noreply, rpc_top_half(#'channel.flow_ok'{active = Active}, none, none,
+ none, noflow, State#state{flow_active = Active})};
+handle_method_from_server1(
+ #'basic.return'{} = BasicReturn, AmqpMsg,
+ State = #state{return_handler = ReturnHandler}) ->
+ case ReturnHandler of
+ none -> ?LOG_WARN("Channel (~p): received {~p, ~p} but there is "
+ "no return handler registered~n",
+ [self(), BasicReturn, AmqpMsg]);
+ {Pid, _Ref} -> Pid ! {BasicReturn, AmqpMsg}
+ end,
+ {noreply, State};
+handle_method_from_server1(#'basic.ack'{} = BasicAck, none,
+ #state{confirm_handler = none} = State) ->
+ {noreply, update_confirm_set(BasicAck, State)};
+handle_method_from_server1(#'basic.ack'{} = BasicAck, none,
+ #state{confirm_handler = {CH, _Ref}} = State) ->
+ CH ! BasicAck,
+ {noreply, update_confirm_set(BasicAck, State)};
+handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
+ #state{confirm_handler = none} = State) ->
+ ?LOG_WARN("Channel (~p): received ~p but there is no "
+ "confirm handler registered~n", [self(), BasicNack]),
+ {noreply, update_confirm_set(BasicNack, State)};
+handle_method_from_server1(#'basic.nack'{} = BasicNack, none,
+ #state{confirm_handler = {CH, _Ref}} = State) ->
+ CH ! BasicNack,
+ {noreply, update_confirm_set(BasicNack, State)};
+
+handle_method_from_server1(#'basic.credit_drained'{} = CreditDrained, none,
+ #state{consumer = Consumer} = State) ->
+ Consumer ! CreditDrained,
+ {noreply, State};
+handle_method_from_server1(Method, none, State) ->
+ {noreply, rpc_bottom_half(Method, State)};
+handle_method_from_server1(Method, Content, State) ->
+ {noreply, rpc_bottom_half({Method, Content}, State)}.
+
+%% only used with manual consumer-to-queue flow control
+handle_method_from_server1(#'basic.deliver'{} = Deliver, AmqpMsg,
+ DeliveryCtx, State) ->
+ ok = call_to_consumer(Deliver, AmqpMsg, DeliveryCtx, State),
+ {noreply, State}.
+
+%%---------------------------------------------------------------------------
+%% Other handle_* functions
+%%---------------------------------------------------------------------------
+
+handle_connection_closing(CloseType, Reason,
+ State = #state{rpc_requests = RpcQueue,
+ closing = Closing}) ->
+ NewState = State#state{closing = {connection, Reason}},
+ case {CloseType, Closing, queue:is_empty(RpcQueue)} of
+ {flush, false, false} ->
+ erlang:send_after(?TIMEOUT_FLUSH, self(),
+ timed_out_flushing_channel),
+ {noreply, NewState};
+ {flush, {just_channel, _}, false} ->
+ {noreply, NewState};
+ _ ->
+ handle_shutdown({connection_closing, Reason}, NewState)
+ end.
+
+handle_channel_exit(Reason = #amqp_error{name = ErrorName, explanation = Expl},
+ State = #state{connection = Connection, number = Number}) ->
+ %% Sent by rabbit_channel for hard errors in the direct case
+ ?LOG_ERR("connection ~p, channel ~p - error:~n~p~n",
+ [Connection, Number, Reason]),
+ {true, Code, _} = ?PROTOCOL:lookup_amqp_exception(ErrorName),
+ ReportedReason = {server_initiated_close, Code, Expl},
+ amqp_gen_connection:hard_error_in_channel(
+ Connection, self(), ReportedReason),
+ handle_shutdown({connection_closing, ReportedReason}, State);
+handle_channel_exit(Reason, State) ->
+ %% Unexpected death of a channel infrastructure process
+ {stop, {infrastructure_died, Reason}, State}.
+
+handle_shutdown({_, 200, _}, State) ->
+ {stop, normal, State};
+handle_shutdown({connection_closing, {_, 200, _}}, State) ->
+ {stop, normal, State};
+handle_shutdown({connection_closing, normal}, State) ->
+ {stop, normal, State};
+handle_shutdown(Reason, State) ->
+ {stop, {shutdown, Reason}, State}.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+do(Method, Content, Flow, #state{driver = network, writer = W}) ->
+ %% Catching because it expects the {channel_exit, _, _} message on error
+ catch case {Content, Flow} of
+ {none, _} -> rabbit_writer:send_command(W, Method);
+ {_, flow} -> rabbit_writer:send_command_flow(W, Method,
+ Content);
+ {_, noflow} -> rabbit_writer:send_command(W, Method, Content)
+ end;
+do(Method, Content, Flow, #state{driver = direct, writer = W}) ->
+ %% ditto catching because...
+ catch case {Content, Flow} of
+ {none, _} -> rabbit_channel_common:do(W, Method);
+ {_, flow} -> rabbit_channel_common:do_flow(W, Method, Content);
+ {_, noflow} -> rabbit_channel_common:do(W, Method, Content)
+ end.
+
+
+flush_writer(#state{driver = network, writer = Writer}) ->
+ try
+ rabbit_writer:flush(Writer)
+ catch
+ exit:noproc -> ok
+ end;
+flush_writer(#state{driver = direct}) ->
+ ok.
+amqp_msg(none) ->
+ none;
+amqp_msg(Content) ->
+ {Props, Payload} = rabbit_basic_common:from_content(Content),
+ #amqp_msg{props = Props, payload = Payload}.
+
+build_content(none) ->
+ none;
+build_content(#amqp_msg{props = Props, payload = Payload}) ->
+ rabbit_basic_common:build_content(Props, Payload).
+
+check_block(_Method, _AmqpMsg, #state{closing = {just_channel, _}}) ->
+ closing;
+check_block(_Method, _AmqpMsg, #state{closing = {connection, _}}) ->
+ closing;
+check_block(_Method, none, #state{}) ->
+ ok;
+check_block(_Method, #amqp_msg{}, #state{flow_active = false}) ->
+ blocked;
+check_block(_Method, _AmqpMsg, #state{}) ->
+ ok.
+
+check_invalid_method(#'channel.open'{}) ->
+ {use_amqp_connection_module,
+ "Use amqp_connection:open_channel/{1,2} instead"};
+check_invalid_method(#'channel.close'{}) ->
+ {use_close_function, "Use close/{1,3} instead"};
+check_invalid_method(Method) ->
+ case is_connection_method(Method) of
+ true -> {connection_methods_not_allowed,
+ "Sending connection methods is not allowed"};
+ false -> ok
+ end.
+
+is_connection_method(Method) ->
+ {ClassId, _} = ?PROTOCOL:method_id(element(1, Method)),
+ ?PROTOCOL:lookup_class_name(ClassId) == connection.
+
+server_misbehaved(#amqp_error{} = AmqpError, State = #state{number = Number}) ->
+ case rabbit_binary_generator:map_exception(Number, AmqpError, ?PROTOCOL) of
+ {0, _} ->
+ handle_shutdown({server_misbehaved, AmqpError}, State);
+ {_, Close} ->
+ ?LOG_WARN("Channel (~p) flushing and closing due to soft "
+ "error caused by the server ~p~n", [self(), AmqpError]),
+ Self = self(),
+ spawn(fun () -> call(Self, Close) end),
+ {noreply, State}
+ end.
+
+update_confirm_set(#'basic.ack'{delivery_tag = SeqNo,
+ multiple = Multiple},
+ State = #state{unconfirmed_set = USet}) ->
+ maybe_notify_waiters(
+ State#state{unconfirmed_set =
+ update_unconfirmed(SeqNo, Multiple, USet)});
+update_confirm_set(#'basic.nack'{delivery_tag = SeqNo,
+ multiple = Multiple},
+ State = #state{unconfirmed_set = USet}) ->
+ maybe_notify_waiters(
+ State#state{unconfirmed_set = update_unconfirmed(SeqNo, Multiple, USet),
+ only_acks_received = false}).
+
+update_unconfirmed(SeqNo, false, USet) ->
+ gb_sets:del_element(SeqNo, USet);
+update_unconfirmed(SeqNo, true, USet) ->
+ case gb_sets:is_empty(USet) of
+ true -> USet;
+ false -> {S, USet1} = gb_sets:take_smallest(USet),
+ case S > SeqNo of
+ true -> USet;
+ false -> update_unconfirmed(SeqNo, true, USet1)
+ end
+ end.
+
+maybe_notify_waiters(State = #state{unconfirmed_set = USet}) ->
+ case gb_sets:is_empty(USet) of
+ false -> State;
+ true -> notify_confirm_waiters(State)
+ end.
+
+notify_confirm_waiters(State = #state{waiting_set = WSet,
+ only_acks_received = OAR}) ->
+ [begin
+ safe_cancel_timer(TRef),
+ gen_server:reply(From, OAR)
+ end || {From, TRef} <- gb_trees:to_list(WSet)],
+ State#state{waiting_set = gb_trees:empty(),
+ only_acks_received = true}.
+
+do_wait_for_confirms(Channel, Timeout) when is_integer(Timeout) ->
+ case gen_server:call(Channel, {wait_for_confirms, Timeout}, amqp_util:call_timeout()) of
+ {error, Reason} -> throw(Reason);
+ Other -> Other
+ end.
+
+handle_wait_for_confirms(_From, _Timeout, State = #state{next_pub_seqno = 0}) ->
+ {reply, {error, not_in_confirm_mode}, State};
+handle_wait_for_confirms(From, Timeout,
+ State = #state{unconfirmed_set = USet,
+ waiting_set = WSet}) ->
+ case gb_sets:is_empty(USet) of
+ true -> {reply, true, State};
+ false -> TRef = case Timeout of
+ infinity -> undefined;
+ _ -> erlang:send_after(
+ Timeout, self(),
+ {confirm_timeout, From})
+ end,
+ {noreply,
+ State#state{waiting_set = gb_trees:insert(From, TRef, WSet)}}
+ end.
+
+call_to_consumer(Method, Args, #state{consumer = Consumer}) ->
+ amqp_gen_consumer:call_consumer(Consumer, Method, Args).
+
+call_to_consumer(Method, Args, DeliveryCtx, #state{consumer = Consumer}) ->
+ amqp_gen_consumer:call_consumer(Consumer, Method, Args, DeliveryCtx).
+
+safe_cancel_timer(undefined) -> ok;
+safe_cancel_timer(TRef) -> erlang:cancel_timer(TRef).
+
+second_to_millisecond(Timeout) ->
+ Timeout * 1000.
diff --git a/deps/amqp_client/src/amqp_channel_sup.erl b/deps/amqp_client/src/amqp_channel_sup.erl
new file mode 100644
index 0000000000..9bd85ce946
--- /dev/null
+++ b/deps/amqp_client/src/amqp_channel_sup.erl
@@ -0,0 +1,73 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_channel_sup).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/6]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Type, Connection, ConnName, InfraArgs, ChNumber,
+ Consumer = {_, _}) ->
+ Identity = {ConnName, ChNumber},
+ {ok, Sup} = supervisor2:start_link(?MODULE, [Consumer, Identity]),
+ [{gen_consumer, ConsumerPid, _, _}] = supervisor2:which_children(Sup),
+ {ok, ChPid} = supervisor2:start_child(
+ Sup, {channel,
+ {amqp_channel, start_link,
+ [Type, Connection, ChNumber, ConsumerPid, Identity]},
+ intrinsic, ?WORKER_WAIT, worker, [amqp_channel]}),
+ case start_writer(Sup, Type, InfraArgs, ConnName, ChNumber, ChPid) of
+ {ok, Writer} ->
+ amqp_channel:set_writer(ChPid, Writer),
+ {ok, AState} = init_command_assembler(Type),
+ {ok, Sup, {ChPid, AState}};
+ {error, _}=Error ->
+ Error
+ end.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+%% 1GB
+-define(DEFAULT_GC_THRESHOLD, 1000000000).
+
+start_writer(_Sup, direct, [ConnPid, Node, User, VHost, Collector, AmqpParams],
+ ConnName, ChNumber, ChPid) ->
+ rpc:call(Node, rabbit_direct, start_channel,
+ [ChNumber, ChPid, ConnPid, ConnName, ?PROTOCOL, User,
+ VHost, ?CLIENT_CAPABILITIES, Collector, AmqpParams]);
+start_writer(Sup, network, [Sock, FrameMax], ConnName, ChNumber, ChPid) ->
+ GCThreshold = application:get_env(amqp_client, writer_gc_threshold, ?DEFAULT_GC_THRESHOLD),
+ supervisor2:start_child(
+ Sup,
+ {writer, {rabbit_writer, start_link,
+ [Sock, ChNumber, FrameMax, ?PROTOCOL, ChPid,
+ {ConnName, ChNumber}, false, GCThreshold]},
+ transient, ?WORKER_WAIT, worker, [rabbit_writer]}).
+
+init_command_assembler(direct) -> {ok, none};
+init_command_assembler(network) -> rabbit_command_assembler:init(?PROTOCOL).
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([{ConsumerModule, ConsumerArgs}, Identity]) ->
+ {ok, {{one_for_all, 0, 1},
+ [{gen_consumer, {amqp_gen_consumer, start_link,
+ [ConsumerModule, ConsumerArgs, Identity]},
+ intrinsic, ?WORKER_WAIT, worker, [amqp_gen_consumer]}]}}.
diff --git a/deps/amqp_client/src/amqp_channel_sup_sup.erl b/deps/amqp_client/src/amqp_channel_sup_sup.erl
new file mode 100644
index 0000000000..720b0e5726
--- /dev/null
+++ b/deps/amqp_client/src/amqp_channel_sup_sup.erl
@@ -0,0 +1,36 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_channel_sup_sup).
+
+-include("amqp_client.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/3, start_channel_sup/4]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Type, Connection, ConnName) ->
+ supervisor2:start_link(?MODULE, [Type, Connection, ConnName]).
+
+start_channel_sup(Sup, InfraArgs, ChannelNumber, Consumer) ->
+ supervisor2:start_child(Sup, [InfraArgs, ChannelNumber, Consumer]).
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([Type, Connection, ConnName]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{channel_sup,
+ {amqp_channel_sup, start_link, [Type, Connection, ConnName]},
+ temporary, infinity, supervisor, [amqp_channel_sup]}]}}.
diff --git a/deps/amqp_client/src/amqp_channels_manager.erl b/deps/amqp_client/src/amqp_channels_manager.erl
new file mode 100644
index 0000000000..2a8d427dc4
--- /dev/null
+++ b/deps/amqp_client/src/amqp_channels_manager.erl
@@ -0,0 +1,249 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_channels_manager).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([start_link/3, open_channel/4, set_channel_max/2, is_empty/1,
+ num_channels/1, pass_frame/3, signal_connection_closing/3,
+ process_channel_frame/4]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-record(state, {connection,
+ channel_sup_sup,
+ map_num_pa = gb_trees:empty(), %% Number -> {Pid, AState}
+ map_pid_num = #{}, %% Pid -> Number
+ channel_max = ?MAX_CHANNEL_NUMBER,
+ closing = false}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Connection, ConnName, ChSupSup) ->
+ gen_server:start_link(?MODULE, [Connection, ConnName, ChSupSup], []).
+
+open_channel(ChMgr, ProposedNumber, Consumer, InfraArgs) ->
+ gen_server:call(ChMgr, {open_channel, ProposedNumber, Consumer, InfraArgs},
+ amqp_util:call_timeout()).
+
+set_channel_max(ChMgr, ChannelMax) ->
+ gen_server:cast(ChMgr, {set_channel_max, ChannelMax}).
+
+is_empty(ChMgr) ->
+ gen_server:call(ChMgr, is_empty, amqp_util:call_timeout()).
+
+num_channels(ChMgr) ->
+ gen_server:call(ChMgr, num_channels, amqp_util:call_timeout()).
+
+pass_frame(ChMgr, ChNumber, Frame) ->
+ gen_server:cast(ChMgr, {pass_frame, ChNumber, Frame}).
+
+signal_connection_closing(ChMgr, ChannelCloseType, Reason) ->
+ gen_server:cast(ChMgr, {connection_closing, ChannelCloseType, Reason}).
+
+process_channel_frame(Frame, Channel, ChPid, AState) ->
+ case rabbit_command_assembler:process(Frame, AState) of
+ {ok, NewAState} -> NewAState;
+ {ok, Method, NewAState} -> rabbit_channel_common:do(ChPid, Method),
+ NewAState;
+ {ok, Method, Content, NewAState} -> rabbit_channel_common:do(ChPid, Method,
+ Content),
+ NewAState;
+ {error, Reason} -> ChPid ! {channel_exit, Channel,
+ Reason},
+ AState
+ end.
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+init([Connection, ConnName, ChSupSup]) ->
+ ?store_proc_name(ConnName),
+ {ok, #state{connection = Connection, channel_sup_sup = ChSupSup}}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_call({open_channel, ProposedNumber, Consumer, InfraArgs}, _,
+ State = #state{closing = false}) ->
+ handle_open_channel(ProposedNumber, Consumer, InfraArgs, State);
+handle_call(is_empty, _, State) ->
+ {reply, internal_is_empty(State), State};
+handle_call(num_channels, _, State) ->
+ {reply, internal_num_channels(State), State}.
+
+handle_cast({set_channel_max, ChannelMax}, State) ->
+ {noreply, State#state{channel_max = ChannelMax}};
+handle_cast({pass_frame, ChNumber, Frame}, State) ->
+ {noreply, internal_pass_frame(ChNumber, Frame, State)};
+handle_cast({connection_closing, ChannelCloseType, Reason}, State) ->
+ handle_connection_closing(ChannelCloseType, Reason, State).
+
+handle_info({'DOWN', _, process, Pid, Reason}, State) ->
+ handle_down(Pid, Reason, State).
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+handle_open_channel(ProposedNumber, Consumer, InfraArgs,
+ State = #state{channel_sup_sup = ChSupSup}) ->
+ case new_number(ProposedNumber, State) of
+ {ok, Number} ->
+ {ok, _ChSup, {Ch, AState}} =
+ amqp_channel_sup_sup:start_channel_sup(ChSupSup, InfraArgs,
+ Number, Consumer),
+ NewState = internal_register(Number, Ch, AState, State),
+ erlang:monitor(process, Ch),
+ {reply, {ok, Ch}, NewState};
+ {error, _} = Error ->
+ {reply, Error, State}
+ end.
+
+new_number(none, #state{channel_max = ChannelMax, map_num_pa = MapNPA}) ->
+ case gb_trees:is_empty(MapNPA) of
+ true -> {ok, 1};
+ false -> {Smallest, _} = gb_trees:smallest(MapNPA),
+ if Smallest > 1 ->
+ {ok, Smallest - 1};
+ true ->
+ {Largest, _} = gb_trees:largest(MapNPA),
+ if Largest < ChannelMax -> {ok, Largest + 1};
+ true -> find_free(MapNPA)
+ end
+ end
+ end;
+new_number(Proposed, State = #state{channel_max = ChannelMax,
+ map_num_pa = MapNPA}) ->
+ IsValid = Proposed > 0 andalso Proposed =< ChannelMax andalso
+ not gb_trees:is_defined(Proposed, MapNPA),
+ case IsValid of true -> {ok, Proposed};
+ false -> new_number(none, State)
+ end.
+
+find_free(MapNPA) ->
+ find_free(gb_trees:iterator(MapNPA), 1).
+
+find_free(It, Candidate) ->
+ case gb_trees:next(It) of
+ {Number, _, It1} -> if Number > Candidate ->
+ {ok, Number - 1};
+ Number =:= Candidate ->
+ find_free(It1, Candidate + 1)
+ end;
+ none -> {error, out_of_channel_numbers}
+ end.
+
+handle_down(Pid, Reason, State) ->
+ case internal_lookup_pn(Pid, State) of
+ undefined -> {stop, {error, unexpected_down}, State};
+ Number -> handle_channel_down(Pid, Number, Reason, State)
+ end.
+
+handle_channel_down(Pid, Number, Reason, State) ->
+ maybe_report_down(Pid, case Reason of {shutdown, R} -> R;
+ _ -> Reason
+ end,
+ State),
+ NewState = internal_unregister(Number, Pid, State),
+ check_all_channels_terminated(NewState),
+ {noreply, NewState}.
+
+maybe_report_down(_Pid, normal, _State) ->
+ ok;
+maybe_report_down(_Pid, shutdown, _State) ->
+ ok;
+maybe_report_down(_Pid, {app_initiated_close, _, _}, _State) ->
+ ok;
+maybe_report_down(_Pid, {server_initiated_close, _, _}, _State) ->
+ ok;
+maybe_report_down(_Pid, {connection_closing, _}, _State) ->
+ ok;
+maybe_report_down(_Pid, {server_misbehaved, AmqpError},
+ #state{connection = Connection}) ->
+ amqp_gen_connection:server_misbehaved(Connection, AmqpError);
+maybe_report_down(Pid, Other, #state{connection = Connection}) ->
+ amqp_gen_connection:channel_internal_error(Connection, Pid, Other).
+
+check_all_channels_terminated(#state{closing = false}) ->
+ ok;
+check_all_channels_terminated(State = #state{closing = true,
+ connection = Connection}) ->
+ case internal_is_empty(State) of
+ true -> amqp_gen_connection:channels_terminated(Connection);
+ false -> ok
+ end.
+
+handle_connection_closing(ChannelCloseType, Reason,
+ State = #state{connection = Connection}) ->
+ case internal_is_empty(State) of
+ true -> amqp_gen_connection:channels_terminated(Connection);
+ false -> signal_channels_connection_closing(ChannelCloseType, Reason,
+ State)
+ end,
+ {noreply, State#state{closing = true}}.
+
+%%---------------------------------------------------------------------------
+
+internal_pass_frame(Number, Frame, State) ->
+ case internal_lookup_npa(Number, State) of
+ undefined ->
+ ?LOG_INFO("Dropping frame ~p for invalid or closed "
+ "channel number ~p~n", [Frame, Number]),
+ State;
+ {ChPid, AState} ->
+ NewAState = process_channel_frame(Frame, Number, ChPid, AState),
+ internal_update_npa(Number, ChPid, NewAState, State)
+ end.
+
+internal_register(Number, Pid, AState,
+ State = #state{map_num_pa = MapNPA, map_pid_num = MapPN}) ->
+ MapNPA1 = gb_trees:enter(Number, {Pid, AState}, MapNPA),
+ MapPN1 = maps:put(Pid, Number, MapPN),
+ State#state{map_num_pa = MapNPA1,
+ map_pid_num = MapPN1}.
+
+internal_unregister(Number, Pid,
+ State = #state{map_num_pa = MapNPA, map_pid_num = MapPN}) ->
+ MapNPA1 = gb_trees:delete(Number, MapNPA),
+ MapPN1 = maps:remove(Pid, MapPN),
+ State#state{map_num_pa = MapNPA1,
+ map_pid_num = MapPN1}.
+
+internal_is_empty(#state{map_num_pa = MapNPA}) ->
+ gb_trees:is_empty(MapNPA).
+
+internal_num_channels(#state{map_num_pa = MapNPA}) ->
+ gb_trees:size(MapNPA).
+
+internal_lookup_npa(Number, #state{map_num_pa = MapNPA}) ->
+ case gb_trees:lookup(Number, MapNPA) of {value, PA} -> PA;
+ none -> undefined
+ end.
+
+internal_lookup_pn(Pid, #state{map_pid_num = MapPN}) ->
+ case maps:find(Pid, MapPN) of {ok, Number} -> Number;
+ error -> undefined
+ end.
+
+internal_update_npa(Number, Pid, AState, State = #state{map_num_pa = MapNPA}) ->
+ State#state{map_num_pa = gb_trees:update(Number, {Pid, AState}, MapNPA)}.
+
+signal_channels_connection_closing(ChannelCloseType, Reason,
+ #state{map_pid_num = MapPN}) ->
+ [amqp_channel:connection_closing(Pid, ChannelCloseType, Reason)
+ || Pid <- maps:keys(MapPN)].
diff --git a/deps/amqp_client/src/amqp_client.erl b/deps/amqp_client/src/amqp_client.erl
new file mode 100644
index 0000000000..cf85c1b04c
--- /dev/null
+++ b/deps/amqp_client/src/amqp_client.erl
@@ -0,0 +1,37 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_client).
+
+-behaviour(application).
+
+-export([start/0]).
+-export([start/2, stop/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start() ->
+ %% rabbit_common needs compiler and syntax_tools, see
+ %%
+ %% * https://github.com/rabbitmq/rabbitmq-erlang-client/issues/72
+ %% * https://github.com/rabbitmq/rabbitmq-common/pull/149
+ application:ensure_all_started(rabbit_common),
+ {ok, _} = application:ensure_all_started(amqp_client),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% application callbacks
+%%---------------------------------------------------------------------------
+
+start(_StartType, _StartArgs) ->
+ amqp_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/amqp_client/src/amqp_connection.erl b/deps/amqp_client/src/amqp_connection.erl
new file mode 100644
index 0000000000..6800a44a3e
--- /dev/null
+++ b/deps/amqp_client/src/amqp_connection.erl
@@ -0,0 +1,395 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @type close_reason(Type) = {shutdown, amqp_reason(Type)}.
+%% @type amqp_reason(Type) = {Type, Code, Text}
+%% Code = non_neg_integer()
+%% Text = binary().
+%% @doc This module is responsible for maintaining a connection to an AMQP
+%% broker and manages channels within the connection. This module is used to
+%% open and close connections to the broker as well as creating new channels
+%% within a connection.<br/>
+%% The connections and channels created by this module are supervised under
+%% amqp_client's supervision tree. Please note that connections and channels
+%% do not get restarted automatically by the supervision tree in the case of a
+%% failure. If you need robust connections and channels, we recommend you use
+%% Erlang monitors on the returned connection and channel PIDs.<br/>
+%% <br/>
+%% In case of a failure or an AMQP error, the connection process exits with a
+%% meaningful exit reason:<br/>
+%% <br/>
+%% <table>
+%% <tr>
+%% <td><strong>Cause</strong></td>
+%% <td><strong>Exit reason</strong></td>
+%% </tr>
+%% <tr>
+%% <td>Any reason, where Code would have been 200 otherwise</td>
+%% <td>```normal'''</td>
+%% </tr>
+%% <tr>
+%% <td>User application calls amqp_connection:close/3</td>
+%% <td>```close_reason(app_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server closes connection (hard error)</td>
+%% <td>```close_reason(server_initiated_close)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Server misbehaved (did not follow protocol)</td>
+%% <td>```close_reason(server_misbehaved)'''</td>
+%% </tr>
+%% <tr>
+%% <td>AMQP client internal error - usually caused by a channel exiting
+%% with an unusual reason. This is usually accompanied by a more
+%% detailed error log from the channel</td>
+%% <td>```close_reason(internal_error)'''</td>
+%% </tr>
+%% <tr>
+%% <td>Other error</td>
+%% <td>(various error reasons, causing more detailed logging)</td>
+%% </tr>
+%% </table>
+%% <br/>
+%% See type definitions below.
+-module(amqp_connection).
+
+-include("amqp_client_internal.hrl").
+
+-export([open_channel/1, open_channel/2, open_channel/3, register_blocked_handler/2]).
+-export([start/1, start/2, close/1, close/2, close/3, close/4]).
+-export([error_atom/1]).
+-export([info/2, info_keys/1, info_keys/0]).
+-export([connection_name/1, update_secret/3]).
+-export([socket_adapter_info/2]).
+
+-define(DEFAULT_CONSUMER, {amqp_selective_consumer, []}).
+
+-define(PROTOCOL_SSL_PORT, (?PROTOCOL_PORT - 1)).
+
+%%---------------------------------------------------------------------------
+%% Type Definitions
+%%---------------------------------------------------------------------------
+
+%% @type amqp_adapter_info() = #amqp_adapter_info{}.
+%% @type amqp_params_direct() = #amqp_params_direct{}.
+%% As defined in amqp_client.hrl. It contains the following fields:
+%% <ul>
+%% <li>username :: binary() - The name of a user registered with the broker,
+%% defaults to &lt;&lt;guest"&gt;&gt;</li>
+%% <li>password :: binary() - The password of user, defaults to 'none'</li>
+%% <li>virtual_host :: binary() - The name of a virtual host in the broker,
+%% defaults to &lt;&lt;"/"&gt;&gt;</li>
+%% <li>node :: atom() - The node the broker runs on (direct only)</li>
+%% <li>adapter_info :: amqp_adapter_info() - Extra management information for if
+%% this connection represents a non-AMQP network connection.</li>
+%% <li>client_properties :: [{binary(), atom(), binary()}] - A list of extra
+%% client properties to be sent to the server, defaults to []</li>
+%% </ul>
+%%
+%% @type amqp_params_network() = #amqp_params_network{}.
+%% As defined in amqp_client.hrl. It contains the following fields:
+%% <ul>
+%% <li>username :: binary() - The name of a user registered with the broker,
+%% defaults to &lt;&lt;guest"&gt;&gt;</li>
+%% <li>password :: binary() - The user's password, defaults to
+%% &lt;&lt;"guest"&gt;&gt;</li>
+%% <li>virtual_host :: binary() - The name of a virtual host in the broker,
+%% defaults to &lt;&lt;"/"&gt;&gt;</li>
+%% <li>host :: string() - The hostname of the broker,
+%% defaults to "localhost" (network only)</li>
+%% <li>port :: integer() - The port the broker is listening on,
+%% defaults to 5672 (network only)</li>
+%% <li>channel_max :: non_neg_integer() - The channel_max handshake parameter,
+%% defaults to 0</li>
+%% <li>frame_max :: non_neg_integer() - The frame_max handshake parameter,
+%% defaults to 0 (network only)</li>
+%% <li>heartbeat :: non_neg_integer() - The heartbeat interval in seconds,
+%% defaults to 0 (turned off) (network only)</li>
+%% <li>connection_timeout :: non_neg_integer() | 'infinity'
+%% - The connection timeout in milliseconds,
+%% defaults to 30000 (network only)</li>
+%% <li>ssl_options :: term() - The second parameter to be used with the
+%% ssl:connect/2 function, defaults to 'none' (network only)</li>
+%% <li>client_properties :: [{binary(), atom(), binary()}] - A list of extra
+%% client properties to be sent to the server, defaults to []</li>
+%% <li>socket_options :: [any()] - Extra socket options. These are
+%% appended to the default options. See
+%% <a href="https://www.erlang.org/doc/man/inet.html#setopts-2">inet:setopts/2</a>
+%% and <a href="https://www.erlang.org/doc/man/gen_tcp.html#connect-4">
+%% gen_tcp:connect/4</a> for descriptions of the available options.</li>
+%% </ul>
+
+
+%%---------------------------------------------------------------------------
+%% Starting a connection
+%%---------------------------------------------------------------------------
+
+%% @spec (Params) -> {ok, Connection} | {error, Error}
+%% where
+%% Params = amqp_params_network() | amqp_params_direct()
+%% Connection = pid()
+%% @doc same as {@link amqp_connection:start/2. start(Params, undefined)}
+start(AmqpParams) ->
+ start(AmqpParams, undefined).
+
+%% @spec (Params, ConnectionName) -> {ok, Connection} | {error, Error}
+%% where
+%% Params = amqp_params_network() | amqp_params_direct()
+%% ConnectionName = undefined | binary()
+%% Connection = pid()
+%% @doc Starts a connection to an AMQP server. Use network params to
+%% connect to a remote AMQP server or direct params for a direct
+%% connection to a RabbitMQ server, assuming that the server is
+%% running in the same process space. If the port is set to 'undefined',
+%% the default ports will be selected depending on whether this is a
+%% normal or an SSL connection.
+%% If ConnectionName is binary - it will be added to client_properties as
+%% user specified connection name.
+start(AmqpParams, ConnName) when ConnName == undefined; is_binary(ConnName) ->
+ ensure_started(),
+ AmqpParams0 =
+ case AmqpParams of
+ #amqp_params_direct{password = Password} ->
+ AmqpParams#amqp_params_direct{password = credentials_obfuscation:encrypt(Password)};
+ #amqp_params_network{password = Password} ->
+ AmqpParams#amqp_params_network{password = credentials_obfuscation:encrypt(Password)}
+ end,
+ AmqpParams1 =
+ case AmqpParams0 of
+ #amqp_params_network{port = undefined, ssl_options = none} ->
+ AmqpParams0#amqp_params_network{port = ?PROTOCOL_PORT};
+ #amqp_params_network{port = undefined, ssl_options = _} ->
+ AmqpParams0#amqp_params_network{port = ?PROTOCOL_SSL_PORT};
+ _ ->
+ AmqpParams0
+ end,
+ AmqpParams2 = set_connection_name(ConnName, AmqpParams1),
+ AmqpParams3 = amqp_ssl:maybe_enhance_ssl_options(AmqpParams2),
+ {ok, _Sup, Connection} = amqp_sup:start_connection_sup(AmqpParams3),
+ amqp_gen_connection:connect(Connection).
+
+set_connection_name(undefined, Params) -> Params;
+set_connection_name(ConnName,
+ #amqp_params_network{client_properties = Props} = Params) ->
+ Params#amqp_params_network{
+ client_properties = [
+ {<<"connection_name">>, longstr, ConnName} | Props
+ ]};
+set_connection_name(ConnName,
+ #amqp_params_direct{client_properties = Props} = Params) ->
+ Params#amqp_params_direct{
+ client_properties = [
+ {<<"connection_name">>, longstr, ConnName} | Props
+ ]}.
+
+%% Usually the amqp_client application will already be running. We
+%% check whether that is the case by invoking an undocumented function
+%% which does not require a synchronous call to the application
+%% controller. That way we don't risk a dead-lock if, say, the
+%% application controller is in the process of shutting down the very
+%% application which is making this call.
+ensure_started() ->
+ [ensure_started(App) || App <- [syntax_tools, compiler, xmerl,
+ rabbit_common, amqp_client, credentials_obfuscation]].
+
+ensure_started(App) ->
+ case is_pid(application_controller:get_master(App)) andalso amqp_sup:is_ready() of
+ true -> ok;
+ false -> case application:ensure_all_started(App) of
+ {ok, _} -> ok;
+ {error, _} = E -> throw(E)
+ end
+ end.
+
+%%---------------------------------------------------------------------------
+%% Commands
+%%---------------------------------------------------------------------------
+
+%% @doc Invokes open_channel(ConnectionPid, none,
+%% {amqp_selective_consumer, []}). Opens a channel without having to
+%% specify a channel number. This uses the default consumer
+%% implementation.
+open_channel(ConnectionPid) ->
+ open_channel(ConnectionPid, none, ?DEFAULT_CONSUMER).
+
+%% @doc Invokes open_channel(ConnectionPid, none, Consumer).
+%% Opens a channel without having to specify a channel number.
+open_channel(ConnectionPid, {_, _} = Consumer) ->
+ open_channel(ConnectionPid, none, Consumer);
+
+%% @doc Invokes open_channel(ConnectionPid, ChannelNumber,
+%% {amqp_selective_consumer, []}). Opens a channel, using the default
+%% consumer implementation.
+open_channel(ConnectionPid, ChannelNumber)
+ when is_number(ChannelNumber) orelse ChannelNumber =:= none ->
+ open_channel(ConnectionPid, ChannelNumber, ?DEFAULT_CONSUMER).
+
+%% @spec (ConnectionPid, ChannelNumber, Consumer) -> Result
+%% where
+%% ConnectionPid = pid()
+%% ChannelNumber = pos_integer() | 'none'
+%% Consumer = {ConsumerModule, ConsumerArgs}
+%% ConsumerModule = atom()
+%% ConsumerArgs = [any()]
+%% Result = {ok, ChannelPid} | {error, Error}
+%% ChannelPid = pid()
+%% @doc Opens an AMQP channel.<br/>
+%% Opens a channel, using a proposed channel number and a specific consumer
+%% implementation.<br/>
+%% ConsumerModule must implement the amqp_gen_consumer behaviour. ConsumerArgs
+%% is passed as parameter to ConsumerModule:init/1.<br/>
+%% This function assumes that an AMQP connection (networked or direct)
+%% has already been successfully established.<br/>
+%% ChannelNumber must be less than or equal to the negotiated
+%% max_channel value, or less than or equal to ?MAX_CHANNEL_NUMBER
+%% (65535) if the negotiated max_channel value is 0.<br/>
+%% In the direct connection, max_channel is always 0.
+open_channel(ConnectionPid, ChannelNumber,
+ {_ConsumerModule, _ConsumerArgs} = Consumer) ->
+ amqp_gen_connection:open_channel(ConnectionPid, ChannelNumber, Consumer).
+
+%% @spec (ConnectionPid) -> ok | Error
+%% where
+%% ConnectionPid = pid()
+%% @doc Closes the channel, invokes
+%% close(Channel, 200, &lt;&lt;"Goodbye"&gt;&gt;).
+close(ConnectionPid) ->
+ close(ConnectionPid, 200, <<"Goodbye">>).
+
+%% @spec (ConnectionPid, Timeout) -> ok | Error
+%% where
+%% ConnectionPid = pid()
+%% Timeout = integer()
+%% @doc Closes the channel, using the supplied Timeout value.
+close(ConnectionPid, Timeout) ->
+ close(ConnectionPid, 200, <<"Goodbye">>, Timeout).
+
+%% @spec (ConnectionPid, Code, Text) -> ok | closing
+%% where
+%% ConnectionPid = pid()
+%% Code = integer()
+%% Text = binary()
+%% @doc Closes the AMQP connection, allowing the caller to set the reply
+%% code and text.
+close(ConnectionPid, Code, Text) ->
+ close(ConnectionPid, Code, Text, amqp_util:call_timeout()).
+
+%% @spec (ConnectionPid, Code, Text, Timeout) -> ok | closing
+%% where
+%% ConnectionPid = pid()
+%% Code = integer()
+%% Text = binary()
+%% Timeout = integer()
+%% @doc Closes the AMQP connection, allowing the caller to set the reply
+%% code and text, as well as a timeout for the operation, after which the
+%% connection will be abruptly terminated.
+close(ConnectionPid, Code, Text, Timeout) ->
+ Close = #'connection.close'{reply_text = Text,
+ reply_code = Code,
+ class_id = 0,
+ method_id = 0},
+ amqp_gen_connection:close(ConnectionPid, Close, Timeout).
+
+register_blocked_handler(ConnectionPid, BlockHandler) ->
+ amqp_gen_connection:register_blocked_handler(ConnectionPid, BlockHandler).
+
+-spec update_secret(pid(), term(), binary()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
+
+update_secret(ConnectionPid, NewSecret, Reason) ->
+ Update = #'connection.update_secret'{new_secret = NewSecret,
+ reason = Reason},
+ amqp_gen_connection:update_secret(ConnectionPid, Update).
+
+%%---------------------------------------------------------------------------
+%% Other functions
+%%---------------------------------------------------------------------------
+
+%% @spec (Code) -> atom()
+%% where
+%% Code = integer()
+%% @doc Returns a descriptive atom corresponding to the given AMQP
+%% error code.
+error_atom(Code) -> ?PROTOCOL:amqp_exception(Code).
+
+%% @spec (ConnectionPid, Items) -> ResultList
+%% where
+%% ConnectionPid = pid()
+%% Items = [Item]
+%% ResultList = [{Item, Result}]
+%% Item = atom()
+%% Result = term()
+%% @doc Returns information about the connection, as specified by the Items
+%% list. Item may be any atom returned by info_keys/1:
+%%<ul>
+%%<li>type - returns the type of the connection (network or direct)</li>
+%%<li>server_properties - returns the server_properties fields sent by the
+%% server while establishing the connection</li>
+%%<li>is_closing - returns true if the connection is in the process of closing
+%% and false otherwise</li>
+%%<li>amqp_params - returns the #amqp_params{} structure used to start the
+%% connection</li>
+%%<li>num_channels - returns the number of channels currently open under the
+%% connection (excluding channel 0)</li>
+%%<li>channel_max - returns the channel_max value negotiated with the
+%% server</li>
+%%<li>heartbeat - returns the heartbeat value negotiated with the server
+%% (only for the network connection)</li>
+%%<li>frame_max - returns the frame_max value negotiated with the
+%% server (only for the network connection)</li>
+%%<li>sock - returns the socket for the network connection (for use with
+%% e.g. inet:sockname/1) (only for the network connection)</li>
+%%<li>any other value - throws an exception</li>
+%%</ul>
+info(ConnectionPid, Items) ->
+ amqp_gen_connection:info(ConnectionPid, Items).
+
+%% @spec (ConnectionPid) -> Items
+%% where
+%% ConnectionPid = pid()
+%% Items = [Item]
+%% Item = atom()
+%% @doc Returns a list of atoms that can be used in conjunction with info/2.
+%% Note that the list differs from a type of connection to another (network vs.
+%% direct). Use info_keys/0 to get a list of info keys that can be used for
+%% any connection.
+info_keys(ConnectionPid) ->
+ amqp_gen_connection:info_keys(ConnectionPid).
+
+%% @spec () -> Items
+%% where
+%% Items = [Item]
+%% Item = atom()
+%% @doc Returns a list of atoms that can be used in conjunction with info/2.
+%% These are general info keys, which can be used in any type of connection.
+%% Other info keys may exist for a specific type. To get the full list of
+%% atoms that can be used for a certain connection, use info_keys/1.
+info_keys() ->
+ amqp_gen_connection:info_keys().
+
+%% @doc Takes a socket and a protocol, returns an #amqp_adapter_info{}
+%% based on the socket for the protocol given.
+socket_adapter_info(Sock, Protocol) ->
+ amqp_direct_connection:socket_adapter_info(Sock, Protocol).
+
+%% @spec (ConnectionPid) -> ConnectionName
+%% where
+%% ConnectionPid = pid()
+%% ConnectionName = binary()
+%% @doc Returns user specified connection name from client properties
+connection_name(ConnectionPid) ->
+ ClientProperties = case info(ConnectionPid, [amqp_params]) of
+ [{_, #amqp_params_network{client_properties = Props}}] -> Props;
+ [{_, #amqp_params_direct{client_properties = Props}}] -> Props
+ end,
+ case lists:keyfind(<<"connection_name">>, 1, ClientProperties) of
+ {<<"connection_name">>, _, ConnName} -> ConnName;
+ false -> undefined
+ end.
diff --git a/deps/amqp_client/src/amqp_connection_sup.erl b/deps/amqp_client/src/amqp_connection_sup.erl
new file mode 100644
index 0000000000..b71fb54fd4
--- /dev/null
+++ b/deps/amqp_client/src/amqp_connection_sup.erl
@@ -0,0 +1,41 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_connection_sup).
+
+-include("amqp_client.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(AMQPParams) ->
+ {ok, Sup} = supervisor2:start_link(?MODULE, []),
+ {ok, TypeSup} = supervisor2:start_child(
+ Sup, {connection_type_sup,
+ {amqp_connection_type_sup, start_link, []},
+ transient, ?SUPERVISOR_WAIT, supervisor,
+ [amqp_connection_type_sup]}),
+ {ok, Connection} = supervisor2:start_child(
+ Sup, {connection, {amqp_gen_connection, start_link,
+ [TypeSup, AMQPParams]},
+ intrinsic, brutal_kill, worker,
+ [amqp_gen_connection]}),
+ {ok, Sup, Connection}.
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/amqp_client/src/amqp_connection_type_sup.erl b/deps/amqp_client/src/amqp_connection_type_sup.erl
new file mode 100644
index 0000000000..f67dc56836
--- /dev/null
+++ b/deps/amqp_client/src/amqp_connection_type_sup.erl
@@ -0,0 +1,91 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_connection_type_sup).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_infrastructure_fun/3, type_module/1]).
+
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+type_module(#amqp_params_direct{}) -> {direct, amqp_direct_connection};
+type_module(#amqp_params_network{}) -> {network, amqp_network_connection}.
+
+%%---------------------------------------------------------------------------
+
+start_channels_manager(Sup, Conn, ConnName, Type) ->
+ {ok, ChSupSup} = supervisor2:start_child(
+ Sup,
+ {channel_sup_sup, {amqp_channel_sup_sup, start_link,
+ [Type, Conn, ConnName]},
+ intrinsic, ?SUPERVISOR_WAIT, supervisor,
+ [amqp_channel_sup_sup]}),
+ {ok, _} = supervisor2:start_child(
+ Sup,
+ {channels_manager, {amqp_channels_manager, start_link,
+ [Conn, ConnName, ChSupSup]},
+ transient, ?WORKER_WAIT, worker, [amqp_channels_manager]}).
+
+start_infrastructure_fun(Sup, Conn, network) ->
+ fun (Sock, ConnName) ->
+ {ok, ChMgr} = start_channels_manager(Sup, Conn, ConnName, network),
+ {ok, AState} = rabbit_command_assembler:init(?PROTOCOL),
+ {ok, GCThreshold} = application:get_env(amqp_client, writer_gc_threshold),
+ {ok, Writer} =
+ supervisor2:start_child(
+ Sup,
+ {writer,
+ {rabbit_writer, start_link,
+ [Sock, 0, ?FRAME_MIN_SIZE, ?PROTOCOL, Conn, ConnName,
+ false, GCThreshold]},
+ transient, ?WORKER_WAIT, worker, [rabbit_writer]}),
+ {ok, Reader} =
+ supervisor2:start_child(
+ Sup,
+ {main_reader, {amqp_main_reader, start_link,
+ [Sock, Conn, ChMgr, AState, ConnName]},
+ transient, ?WORKER_WAIT, worker, [amqp_main_reader]}),
+ case rabbit_net:controlling_process(Sock, Reader) of
+ ok ->
+ case amqp_main_reader:post_init(Reader) of
+ ok ->
+ {ok, ChMgr, Writer};
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end
+ end;
+start_infrastructure_fun(Sup, Conn, direct) ->
+ fun (ConnName) ->
+ {ok, ChMgr} = start_channels_manager(Sup, Conn, ConnName, direct),
+ {ok, Collector} =
+ supervisor2:start_child(
+ Sup,
+ {collector, {rabbit_queue_collector, start_link, [ConnName]},
+ transient, ?WORKER_WAIT, worker, [rabbit_queue_collector]}),
+ {ok, ChMgr, Collector}
+ end.
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/amqp_client/src/amqp_direct_connection.erl b/deps/amqp_client/src/amqp_direct_connection.erl
new file mode 100644
index 0000000000..a07c67074e
--- /dev/null
+++ b/deps/amqp_client/src/amqp_direct_connection.erl
@@ -0,0 +1,232 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_direct_connection).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(amqp_gen_connection).
+
+-export([server_close/3]).
+
+-export([init/0, terminate/2, connect/4, do/2, open_channel_args/1, i/2,
+ info_keys/0, handle_message/2, closing/3, channels_terminated/1]).
+
+-export([socket_adapter_info/2]).
+
+-record(state, {node,
+ user,
+ vhost,
+ params,
+ adapter_info,
+ collector,
+ closing_reason, %% undefined | Reason
+ connected_at
+ }).
+
+-define(INFO_KEYS, [type]).
+
+-define(CREATION_EVENT_KEYS, [pid, protocol, host, port, name,
+ peer_host, peer_port,
+ user, vhost, client_properties, type,
+ connected_at, node, user_who_performed_action]).
+
+%%---------------------------------------------------------------------------
+
+%% amqp_connection:close() logically closes from the client end. We may
+%% want to close from the server end.
+server_close(ConnectionPid, Code, Text) ->
+ Close = #'connection.close'{reply_text = Text,
+ reply_code = Code,
+ class_id = 0,
+ method_id = 0},
+ amqp_gen_connection:server_close(ConnectionPid, Close).
+
+init() ->
+ {ok, #state{}}.
+
+open_channel_args(#state{node = Node,
+ user = User,
+ vhost = VHost,
+ collector = Collector,
+ params = Params}) ->
+ [self(), Node, User, VHost, Collector, Params].
+
+do(_Method, _State) ->
+ ok.
+
+handle_message({force_event_refresh, Ref}, State = #state{node = Node}) ->
+ rpc:call(Node, rabbit_event, notify,
+ [connection_created, connection_info(State), Ref]),
+ {ok, State};
+handle_message(closing_timeout, State = #state{closing_reason = Reason}) ->
+ {stop, {closing_timeout, Reason}, State};
+handle_message({'DOWN', _MRef, process, _ConnSup, shutdown}, State) ->
+ {stop, {shutdown, node_down}, State};
+handle_message({'DOWN', _MRef, process, _ConnSup, Reason}, State) ->
+ {stop, {remote_node_down, Reason}, State};
+handle_message({'EXIT', Pid, Reason}, State) ->
+ {stop, rabbit_misc:format("stopping because dependent process ~p died: ~p", [Pid, Reason]), State};
+handle_message(Msg, State) ->
+ {stop, {unexpected_msg, Msg}, State}.
+
+closing(_ChannelCloseType, Reason, State) ->
+ {ok, State#state{closing_reason = Reason}}.
+
+channels_terminated(State = #state{closing_reason = Reason,
+ collector = Collector}) ->
+ rabbit_queue_collector:delete_all(Collector),
+ {stop, {shutdown, Reason}, State}.
+
+terminate(_Reason, #state{node = Node} = State) ->
+ rpc:call(Node, rabbit_direct, disconnect,
+ [self(), [{pid, self()},
+ {node, Node},
+ {name, i(name, State)}]]),
+ ok.
+
+i(type, _State) -> direct;
+i(pid, _State) -> self();
+
+%% Mandatory connection parameters
+
+i(node, #state{node = N}) -> N;
+i(user, #state{params = P}) -> P#amqp_params_direct.username;
+i(user_who_performed_action, St) -> i(user, St);
+i(vhost, #state{params = P}) -> P#amqp_params_direct.virtual_host;
+i(client_properties, #state{params = P}) ->
+ P#amqp_params_direct.client_properties;
+i(connected_at, #state{connected_at = T}) -> T;
+
+%%
+%% Optional adapter info
+%%
+
+%% adapter_info can be undefined e.g. when we were
+%% not granted access to a vhost
+i(_Key, #state{adapter_info = undefined}) -> unknown;
+i(protocol, #state{adapter_info = I}) -> I#amqp_adapter_info.protocol;
+i(host, #state{adapter_info = I}) -> I#amqp_adapter_info.host;
+i(port, #state{adapter_info = I}) -> I#amqp_adapter_info.port;
+i(peer_host, #state{adapter_info = I}) -> I#amqp_adapter_info.peer_host;
+i(peer_port, #state{adapter_info = I}) -> I#amqp_adapter_info.peer_port;
+i(name, #state{adapter_info = I}) -> I#amqp_adapter_info.name;
+i(internal_user, #state{user = U}) -> U;
+i(Item, _State) -> throw({bad_argument, Item}).
+
+info_keys() ->
+ ?INFO_KEYS.
+
+infos(Items, State) ->
+ [{Item, i(Item, State)} || Item <- Items].
+
+connection_info(State = #state{adapter_info = I}) ->
+ infos(?CREATION_EVENT_KEYS, State) ++ I#amqp_adapter_info.additional_info.
+
+connect(Params = #amqp_params_direct{username = Username,
+ password = Password,
+ node = Node,
+ adapter_info = Info,
+ virtual_host = VHost},
+ SIF, _TypeSup, State) ->
+ State1 = State#state{node = Node,
+ vhost = VHost,
+ params = Params,
+ adapter_info = ensure_adapter_info(Info),
+ connected_at =
+ os:system_time(milli_seconds)},
+ DecryptedPassword = credentials_obfuscation:decrypt(Password),
+ case rpc:call(Node, rabbit_direct, connect,
+ [{Username, DecryptedPassword}, VHost, ?PROTOCOL, self(),
+ connection_info(State1)]) of
+ {ok, {User, ServerProperties}} ->
+ {ok, ChMgr, Collector} = SIF(i(name, State1)),
+ State2 = State1#state{user = User,
+ collector = Collector},
+ %% There's no real connection-level process on the remote
+ %% node for us to monitor or link to, but we want to
+ %% detect connection death if the remote node goes down
+ %% when there are no channels. So we monitor the
+ %% supervisor; that way we find out if the node goes down
+ %% or the rabbit app stops.
+ erlang:monitor(process, {rabbit_direct_client_sup, Node}),
+ {ok, {ServerProperties, 0, ChMgr, State2}};
+ {error, _} = E ->
+ E;
+ {badrpc, nodedown} ->
+ {error, {nodedown, Node}}
+ end.
+
+ensure_adapter_info(none) ->
+ ensure_adapter_info(#amqp_adapter_info{});
+
+ensure_adapter_info(A = #amqp_adapter_info{protocol = unknown}) ->
+ ensure_adapter_info(A#amqp_adapter_info{
+ protocol = {'Direct', ?PROTOCOL:version()}});
+
+ensure_adapter_info(A = #amqp_adapter_info{name = unknown}) ->
+ Name = list_to_binary(rabbit_misc:pid_to_string(self())),
+ ensure_adapter_info(A#amqp_adapter_info{name = Name});
+
+ensure_adapter_info(Info) -> Info.
+
+socket_adapter_info(Sock, Protocol) ->
+ {PeerHost, PeerPort, Host, Port} =
+ case rabbit_net:socket_ends(Sock, inbound) of
+ {ok, Res} -> Res;
+ _ -> {unknown, unknown, unknown, unknown}
+ end,
+ Name = case rabbit_net:connection_string(Sock, inbound) of
+ {ok, Res1} -> Res1;
+ _Error -> "(unknown)"
+ end,
+ #amqp_adapter_info{protocol = Protocol,
+ name = list_to_binary(Name),
+ host = Host,
+ port = Port,
+ peer_host = PeerHost,
+ peer_port = PeerPort,
+ additional_info = maybe_ssl_info(Sock)}.
+
+maybe_ssl_info(Sock) ->
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ case rabbit_net:is_ssl(RealSocket) of
+ true -> [{ssl, true}] ++ ssl_info(RealSocket) ++ ssl_cert_info(RealSocket);
+ false -> [{ssl, false}]
+ end.
+
+ssl_info(Sock) ->
+ {Protocol, KeyExchange, Cipher, Hash} =
+ case rabbit_net:ssl_info(Sock) of
+ {ok, Infos} ->
+ {_, P} = lists:keyfind(protocol, 1, Infos),
+ #{cipher := C,
+ key_exchange := K,
+ mac := H} = proplists:get_value(
+ selected_cipher_suite, Infos),
+ {P, K, C, H};
+ _ ->
+ {unknown, unknown, unknown, unknown}
+ end,
+ [{ssl_protocol, Protocol},
+ {ssl_key_exchange, KeyExchange},
+ {ssl_cipher, Cipher},
+ {ssl_hash, Hash}].
+
+ssl_cert_info(Sock) ->
+ case rabbit_net:peercert(Sock) of
+ {ok, Cert} ->
+ [{peer_cert_issuer, list_to_binary(
+ rabbit_cert_info:issuer(Cert))},
+ {peer_cert_subject, list_to_binary(
+ rabbit_cert_info:subject(Cert))},
+ {peer_cert_validity, list_to_binary(
+ rabbit_cert_info:validity(Cert))}];
+ _ ->
+ []
+ end.
diff --git a/deps/amqp_client/src/amqp_direct_consumer.erl b/deps/amqp_client/src/amqp_direct_consumer.erl
new file mode 100644
index 0000000000..74517a03c8
--- /dev/null
+++ b/deps/amqp_client/src/amqp_direct_consumer.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc This module is an implementation of the amqp_gen_consumer
+%% behaviour and can be used as part of the Consumer parameter when
+%% opening AMQP channels.
+%% <br/>
+%% <br/>
+%% The Consumer parameter for this implementation is {{@module},
+%% [ConsumerPid]@}, where ConsumerPid is a process that will receive
+%% queue subscription-related messages.<br/>
+%% <br/>
+%% This consumer implementation causes the channel to send to the
+%% ConsumerPid all basic.consume, basic.consume_ok, basic.cancel,
+%% basic.cancel_ok and basic.deliver messages received from the
+%% server.
+%% <br/>
+%% <br/>
+%% In addition, this consumer implementation monitors the ConsumerPid
+%% and exits with the same shutdown reason when it dies. 'DOWN'
+%% messages from other sources are passed to ConsumerPid.
+%% <br/>
+%% Warning! It is not recommended to rely on a consumer on killing off the
+%% channel (through the exit signal). That may cause messages to get lost.
+%% Always use amqp_channel:close/{1,3} for a clean shut down.<br/>
+%% <br/>
+%% This module has no public functions.
+-module(amqp_direct_consumer).
+
+-include("amqp_gen_consumer_spec.hrl").
+
+-behaviour(amqp_gen_consumer).
+
+-export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3,
+ handle_cancel/2, handle_server_cancel/2,
+ handle_deliver/3, handle_deliver/4,
+ handle_info/2, handle_call/3, terminate/2]).
+
+%%---------------------------------------------------------------------------
+%% amqp_gen_consumer callbacks
+%%---------------------------------------------------------------------------
+
+%% @private
+init([ConsumerPid]) ->
+ erlang:monitor(process, ConsumerPid),
+ {ok, ConsumerPid}.
+
+%% @private
+handle_consume(M, A, C) ->
+ C ! {M, A},
+ {ok, C}.
+
+%% @private
+handle_consume_ok(M, _, C) ->
+ C ! M,
+ {ok, C}.
+
+%% @private
+handle_cancel(M, C) ->
+ C ! M,
+ {ok, C}.
+
+%% @private
+handle_cancel_ok(M, _, C) ->
+ C ! M,
+ {ok, C}.
+
+%% @private
+handle_server_cancel(M, C) ->
+ C ! {server_cancel, M},
+ {ok, C}.
+
+%% @private
+handle_deliver(M, A, C) ->
+ C ! {M, A},
+ {ok, C}.
+handle_deliver(M, A, DeliveryCtx, C) ->
+ C ! {M, A, DeliveryCtx},
+ {ok, C}.
+
+
+%% @private
+handle_info({'DOWN', _MRef, process, C, normal}, C) ->
+ %% The channel was closed.
+ {ok, C};
+handle_info({'DOWN', _MRef, process, C, Info}, C) ->
+ {error, {consumer_died, Info}, C};
+handle_info({'DOWN', MRef, process, Pid, Info}, C) ->
+ C ! {'DOWN', MRef, process, Pid, Info},
+ {ok, C}.
+
+%% @private
+handle_call(M, A, C) ->
+ C ! {M, A},
+ {reply, ok, C}.
+
+%% @private
+terminate(_Reason, C) ->
+ C.
diff --git a/deps/amqp_client/src/amqp_gen_connection.erl b/deps/amqp_client/src/amqp_gen_connection.erl
new file mode 100644
index 0000000000..5c826a5b5f
--- /dev/null
+++ b/deps/amqp_client/src/amqp_gen_connection.erl
@@ -0,0 +1,387 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_gen_connection).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([start_link/2, connect/1, open_channel/3, hard_error_in_channel/3,
+ channel_internal_error/3, server_misbehaved/2, channels_terminated/1,
+ close/3, server_close/2, info/2, info_keys/0, info_keys/1,
+ register_blocked_handler/2, update_secret/2]).
+-export([behaviour_info/1]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-define(INFO_KEYS, [server_properties, is_closing, amqp_params, num_channels,
+ channel_max]).
+
+-record(state, {module,
+ module_state,
+ channels_manager,
+ amqp_params,
+ channel_max,
+ server_properties,
+ %% connection.block, connection.unblock handler
+ block_handler,
+ closing = false %% #closing{} | false
+ }).
+
+-record(closing, {reason,
+ close,
+ from = none}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(TypeSup, AMQPParams) ->
+ gen_server:start_link(?MODULE, {TypeSup, AMQPParams}, []).
+
+connect(Pid) ->
+ gen_server:call(Pid, connect, amqp_util:call_timeout()).
+
+open_channel(Pid, ProposedNumber, Consumer) ->
+ case gen_server:call(Pid,
+ {command, {open_channel, ProposedNumber, Consumer}},
+ amqp_util:call_timeout()) of
+ {ok, ChannelPid} -> ok = amqp_channel:open(ChannelPid),
+ {ok, ChannelPid};
+ Error -> Error
+ end.
+
+hard_error_in_channel(Pid, ChannelPid, Reason) ->
+ gen_server:cast(Pid, {hard_error_in_channel, ChannelPid, Reason}).
+
+channel_internal_error(Pid, ChannelPid, Reason) ->
+ gen_server:cast(Pid, {channel_internal_error, ChannelPid, Reason}).
+
+server_misbehaved(Pid, AmqpError) ->
+ gen_server:cast(Pid, {server_misbehaved, AmqpError}).
+
+channels_terminated(Pid) ->
+ gen_server:cast(Pid, channels_terminated).
+
+close(Pid, Close, Timeout) ->
+ gen_server:call(Pid, {command, {close, Close, Timeout}}, amqp_util:call_timeout()).
+
+server_close(Pid, Close) ->
+ gen_server:cast(Pid, {server_close, Close}).
+
+update_secret(Pid, Method) ->
+ gen_server:call(Pid, {command, {update_secret, Method}}, amqp_util:call_timeout()).
+
+info(Pid, Items) ->
+ gen_server:call(Pid, {info, Items}, amqp_util:call_timeout()).
+
+info_keys() ->
+ ?INFO_KEYS.
+
+info_keys(Pid) ->
+ gen_server:call(Pid, info_keys, amqp_util:call_timeout()).
+
+%%---------------------------------------------------------------------------
+%% Behaviour
+%%---------------------------------------------------------------------------
+
+behaviour_info(callbacks) ->
+ [
+ %% init() -> {ok, InitialState}
+ {init, 0},
+
+ %% terminate(Reason, FinalState) -> Ignored
+ {terminate, 2},
+
+ %% connect(AmqpParams, SIF, TypeSup, State) ->
+ %% {ok, ConnectParams} | {closing, ConnectParams, AmqpError, Reply} |
+ %% {error, Error}
+ %% where
+ %% ConnectParams = {ServerProperties, ChannelMax, ChMgr, NewState}
+ {connect, 4},
+
+ %% do(Method, State) -> Ignored
+ {do, 2},
+
+ %% open_channel_args(State) -> OpenChannelArgs
+ {open_channel_args, 1},
+
+ %% i(InfoItem, State) -> Info
+ {i, 2},
+
+ %% info_keys() -> [InfoItem]
+ {info_keys, 0},
+
+ %% CallbackReply = {ok, NewState} | {stop, Reason, FinalState}
+
+ %% handle_message(Message, State) -> CallbackReply
+ {handle_message, 2},
+
+ %% closing(flush|abrupt, Reason, State) -> CallbackReply
+ {closing, 3},
+
+ %% channels_terminated(State) -> CallbackReply
+ {channels_terminated, 1}
+ ];
+behaviour_info(_Other) ->
+ undefined.
+
+callback(Function, Params, State = #state{module = Mod,
+ module_state = MState}) ->
+ case erlang:apply(Mod, Function, Params ++ [MState]) of
+ {ok, NewMState} -> {noreply,
+ State#state{module_state = NewMState}};
+ {stop, Reason, NewMState} -> {stop, Reason,
+ State#state{module_state = NewMState}}
+ end.
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+init({TypeSup, AMQPParams}) ->
+ %% Trapping exits since we need to make sure that the `terminate/2' is
+ %% called in the case of direct connection (it does not matter for a network
+ %% connection). See bug25116.
+ process_flag(trap_exit, true),
+ %% connect() has to be called first, so we can use a special state here
+ {ok, {TypeSup, AMQPParams}}.
+
+handle_call(connect, _From, {TypeSup, AMQPParams}) ->
+ {Type, Mod} = amqp_connection_type_sup:type_module(AMQPParams),
+ {ok, MState} = Mod:init(),
+ SIF = amqp_connection_type_sup:start_infrastructure_fun(
+ TypeSup, self(), Type),
+ State = #state{module = Mod,
+ module_state = MState,
+ amqp_params = AMQPParams,
+ block_handler = none},
+ case Mod:connect(AMQPParams, SIF, TypeSup, MState) of
+ {ok, Params} ->
+ {reply, {ok, self()}, after_connect(Params, State)};
+ {closing, #amqp_error{name = access_refused} = AmqpError, Error} ->
+ {stop, {shutdown, AmqpError}, Error, State};
+ {closing, Params, #amqp_error{} = AmqpError, Error} ->
+ server_misbehaved(self(), AmqpError),
+ {reply, Error, after_connect(Params, State)};
+ {error, _} = Error ->
+ {stop, {shutdown, Error}, Error, State}
+ end;
+handle_call({command, Command}, From, State = #state{closing = false}) ->
+ handle_command(Command, From, State);
+handle_call({command, _Command}, _From, State) ->
+ {reply, closing, State};
+handle_call({info, Items}, _From, State) ->
+ {reply, [{Item, i(Item, State)} || Item <- Items], State};
+handle_call(info_keys, _From, State = #state{module = Mod}) ->
+ {reply, ?INFO_KEYS ++ Mod:info_keys(), State}.
+
+after_connect({ServerProperties, ChannelMax, ChMgr, NewMState}, State) ->
+ case ChannelMax of
+ 0 -> ok;
+ _ -> amqp_channels_manager:set_channel_max(ChMgr, ChannelMax)
+ end,
+ State1 = State#state{server_properties = ServerProperties,
+ channel_max = ChannelMax,
+ channels_manager = ChMgr,
+ module_state = NewMState},
+ rabbit_misc:store_proc_name(?MODULE, i(name, State1)),
+ State1.
+
+handle_cast({method, Method, none, noflow}, State) ->
+ handle_method(Method, State);
+handle_cast(channels_terminated, State) ->
+ handle_channels_terminated(State);
+handle_cast({hard_error_in_channel, _Pid, Reason}, State) ->
+ server_initiated_close(Reason, State);
+handle_cast({channel_internal_error, Pid, Reason}, State) ->
+ ?LOG_WARN("Connection (~p) closing: internal error in channel (~p): ~p~n",
+ [self(), Pid, Reason]),
+ internal_error(Pid, Reason, State);
+handle_cast({server_misbehaved, AmqpError}, State) ->
+ server_misbehaved_close(AmqpError, State);
+handle_cast({server_close, #'connection.close'{} = Close}, State) ->
+ server_initiated_close(Close, State);
+handle_cast({register_blocked_handler, HandlerPid}, State) ->
+ Ref = erlang:monitor(process, HandlerPid),
+ {noreply, State#state{block_handler = {HandlerPid, Ref}}}.
+
+%% @private
+handle_info({'DOWN', _, process, BlockHandler, Reason},
+ State = #state{block_handler = {BlockHandler, _Ref}}) ->
+ ?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. "
+ "Reason: ~p~n", [self(), BlockHandler, Reason]),
+ {noreply, State#state{block_handler = none}};
+handle_info({'EXIT', BlockHandler, Reason},
+ State = #state{block_handler = {BlockHandler, Ref}}) ->
+ ?LOG_WARN("Connection (~p): Unregistering connection.{blocked,unblocked} handler ~p because it died. "
+ "Reason: ~p~n", [self(), BlockHandler, Reason]),
+ erlang:demonitor(Ref, [flush]),
+ {noreply, State#state{block_handler = none}};
+%% propagate the exit to the module that will stop with a sensible reason logged
+handle_info({'EXIT', _Pid, _Reason} = Info, State) ->
+ callback(handle_message, [Info], State);
+handle_info(Info, State) ->
+ callback(handle_message, [Info], State).
+
+terminate(Reason, #state{module = Mod, module_state = MState}) ->
+ Mod:terminate(Reason, MState).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%---------------------------------------------------------------------------
+%% Infos
+%%---------------------------------------------------------------------------
+
+i(server_properties, State) -> State#state.server_properties;
+i(is_closing, State) -> State#state.closing =/= false;
+i(amqp_params, State) -> State#state.amqp_params;
+i(channel_max, State) -> State#state.channel_max;
+i(num_channels, State) -> amqp_channels_manager:num_channels(
+ State#state.channels_manager);
+i(Item, #state{module = Mod, module_state = MState}) -> Mod:i(Item, MState).
+
+%%---------------------------------------------------------------------------
+%% connection.blocked, connection.unblocked
+%%---------------------------------------------------------------------------
+
+register_blocked_handler(Pid, HandlerPid) ->
+ gen_server:cast(Pid, {register_blocked_handler, HandlerPid}).
+
+%%---------------------------------------------------------------------------
+%% Command handling
+%%---------------------------------------------------------------------------
+
+handle_command({open_channel, ProposedNumber, Consumer}, _From,
+ State = #state{channels_manager = ChMgr,
+ module = Mod,
+ module_state = MState}) ->
+ {reply, amqp_channels_manager:open_channel(ChMgr, ProposedNumber, Consumer,
+ Mod:open_channel_args(MState)),
+ State};
+handle_command({close, #'connection.close'{} = Close, Timeout}, From, State) ->
+ app_initiated_close(Close, From, Timeout, State);
+handle_command({update_secret, #'connection.update_secret'{} = Method}, _From,
+ State = #state{module = Mod,
+ module_state = MState}) ->
+ {reply, Mod:do(Method, MState), State}.
+
+%%---------------------------------------------------------------------------
+%% Handling methods from broker
+%%---------------------------------------------------------------------------
+
+handle_method(#'connection.close'{} = Close, State) ->
+ server_initiated_close(Close, State);
+handle_method(#'connection.close_ok'{}, State = #state{closing = Closing}) ->
+ case Closing of #closing{from = none} -> ok;
+ #closing{from = From} -> gen_server:reply(From, ok)
+ end,
+ {stop, {shutdown, closing_to_reason(Closing)}, State};
+handle_method(#'connection.blocked'{} = Blocked, State = #state{block_handler = BlockHandler}) ->
+ case BlockHandler of none -> ok;
+ {Pid, _Ref} -> Pid ! Blocked
+ end,
+ {noreply, State};
+handle_method(#'connection.unblocked'{} = Unblocked, State = #state{block_handler = BlockHandler}) ->
+ case BlockHandler of none -> ok;
+ {Pid, _Ref} -> Pid ! Unblocked
+ end,
+ {noreply, State};
+handle_method(#'connection.update_secret_ok'{} = _Method, State) ->
+ {noreply, State};
+handle_method(Other, State) ->
+ server_misbehaved_close(#amqp_error{name = command_invalid,
+ explanation = "unexpected method on "
+ "channel 0",
+ method = element(1, Other)},
+ State).
+
+%%---------------------------------------------------------------------------
+%% Closing
+%%---------------------------------------------------------------------------
+
+app_initiated_close(Close, From, Timeout, State) ->
+ case Timeout of
+ infinity -> ok;
+ _ -> erlang:send_after(Timeout, self(), closing_timeout)
+ end,
+ set_closing_state(flush, #closing{reason = app_initiated_close,
+ close = Close,
+ from = From}, State).
+
+internal_error(Pid, Reason, State) ->
+ Str = list_to_binary(rabbit_misc:format("~p:~p", [Pid, Reason])),
+ Close = #'connection.close'{reply_text = Str,
+ reply_code = ?INTERNAL_ERROR,
+ class_id = 0,
+ method_id = 0},
+ set_closing_state(abrupt, #closing{reason = internal_error, close = Close},
+ State).
+
+server_initiated_close(Close, State) ->
+ ?LOG_WARN("Connection (~p) closing: received hard error ~p "
+ "from server~n", [self(), Close]),
+ set_closing_state(abrupt, #closing{reason = server_initiated_close,
+ close = Close}, State).
+
+server_misbehaved_close(AmqpError, State) ->
+ ?LOG_WARN("Connection (~p) closing: server misbehaved: ~p~n",
+ [self(), AmqpError]),
+ {0, Close} = rabbit_binary_generator:map_exception(0, AmqpError, ?PROTOCOL),
+ set_closing_state(abrupt, #closing{reason = server_misbehaved,
+ close = Close}, State).
+
+set_closing_state(ChannelCloseType, NewClosing,
+ State = #state{channels_manager = ChMgr,
+ closing = CurClosing}) ->
+ ResClosing =
+ case closing_priority(NewClosing) =< closing_priority(CurClosing) of
+ true -> NewClosing;
+ false -> CurClosing
+ end,
+ ClosingReason = closing_to_reason(ResClosing),
+ amqp_channels_manager:signal_connection_closing(ChMgr, ChannelCloseType,
+ ClosingReason),
+ callback(closing, [ChannelCloseType, ClosingReason],
+ State#state{closing = ResClosing}).
+
+closing_priority(false) -> 99;
+closing_priority(#closing{reason = app_initiated_close}) -> 4;
+closing_priority(#closing{reason = internal_error}) -> 3;
+closing_priority(#closing{reason = server_misbehaved}) -> 2;
+closing_priority(#closing{reason = server_initiated_close}) -> 1.
+
+closing_to_reason(#closing{close = #'connection.close'{reply_code = 200}}) ->
+ normal;
+closing_to_reason(#closing{reason = Reason,
+ close = #'connection.close'{reply_code = Code,
+ reply_text = Text}}) ->
+ {Reason, Code, Text};
+closing_to_reason(#closing{reason = Reason,
+ close = {Reason, _Code, _Text} = Close}) ->
+ Close.
+
+handle_channels_terminated(State = #state{closing = Closing,
+ module = Mod,
+ module_state = MState}) ->
+ #closing{reason = Reason, close = Close, from = From} = Closing,
+ case Reason of
+ server_initiated_close ->
+ Mod:do(#'connection.close_ok'{}, MState);
+ _ ->
+ Mod:do(Close, MState)
+ end,
+ case callback(channels_terminated, [], State) of
+ {stop, _, _} = Stop -> case From of none -> ok;
+ _ -> gen_server:reply(From, ok)
+ end,
+ Stop;
+ Other -> Other
+ end.
diff --git a/deps/amqp_client/src/amqp_gen_consumer.erl b/deps/amqp_client/src/amqp_gen_consumer.erl
new file mode 100644
index 0000000000..9caea78d8a
--- /dev/null
+++ b/deps/amqp_client/src/amqp_gen_consumer.erl
@@ -0,0 +1,284 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc A behaviour module for implementing consumers for
+%% amqp_channel. To specify a consumer implementation for a channel,
+%% use amqp_connection:open_channel/{2,3}.
+%% <br/>
+%% All callbacks are called within the gen_consumer process. <br/>
+%% <br/>
+%% See comments in amqp_gen_consumer.erl source file for documentation
+%% on the callback functions.
+%% <br/>
+%% Note that making calls to the channel from the callback module will
+%% result in deadlock.
+-module(amqp_gen_consumer).
+
+-include("amqp_client.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/3, call_consumer/2, call_consumer/3, call_consumer/4]).
+-export([behaviour_info/1]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, prioritise_info/3]).
+
+-record(state, {module,
+ module_state}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+%% @type ok_error() = {ok, state()} | {error, reason(), state()}.
+%% Denotes a successful or an error return from a consumer module call.
+
+start_link(ConsumerModule, ExtraParams, Identity) ->
+ gen_server2:start_link(
+ ?MODULE, [ConsumerModule, ExtraParams, Identity], []).
+
+%% @spec (Consumer, Msg) -> ok
+%% where
+%% Consumer = pid()
+%% Msg = any()
+%%
+%% @doc This function is used to perform arbitrary calls into the
+%% consumer module.
+call_consumer(Pid, Msg) ->
+ gen_server2:call(Pid, {consumer_call, Msg}, amqp_util:call_timeout()).
+
+%% @spec (Consumer, Method, Args) -> ok
+%% where
+%% Consumer = pid()
+%% Method = amqp_method()
+%% Args = any()
+%%
+%% @doc This function is used by amqp_channel to forward received
+%% methods and deliveries to the consumer module.
+call_consumer(Pid, Method, Args) ->
+ gen_server2:call(Pid, {consumer_call, Method, Args}, amqp_util:call_timeout()).
+
+call_consumer(Pid, Method, Args, DeliveryCtx) ->
+ gen_server2:call(Pid, {consumer_call, Method, Args, DeliveryCtx}, amqp_util:call_timeout()).
+
+%%---------------------------------------------------------------------------
+%% Behaviour
+%%---------------------------------------------------------------------------
+
+%% @private
+behaviour_info(callbacks) ->
+ [
+ %% init(Args) -> {ok, InitialState} | {stop, Reason} | ignore
+ %% where
+ %% Args = [any()]
+ %% InitialState = state()
+ %% Reason = term()
+ %%
+ %% This callback is invoked by the channel, when it starts
+ %% up. Use it to initialize the state of the consumer. In case of
+ %% an error, return {stop, Reason} or ignore.
+ {init, 1},
+
+ %% handle_consume(Consume, Sender, State) -> ok_error()
+ %% where
+ %% Consume = #'basic.consume'{}
+ %% Sender = pid()
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel before a basic.consume
+ %% is sent to the server.
+ {handle_consume, 3},
+
+ %% handle_consume_ok(ConsumeOk, Consume, State) -> ok_error()
+ %% where
+ %% ConsumeOk = #'basic.consume_ok'{}
+ %% Consume = #'basic.consume'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a
+ %% basic.consume_ok is received from the server. Consume is the original
+ %% method sent out to the server - it can be used to associate the
+ %% call with the response.
+ {handle_consume_ok, 3},
+
+ %% handle_cancel(Cancel, State) -> ok_error()
+ %% where
+ %% Cancel = #'basic.cancel'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.cancel
+ %% is sent to the server.
+ {handle_cancel, 2},
+
+ %% handle_cancel_ok(CancelOk, Cancel, State) -> ok_error()
+ %% where
+ %% CancelOk = #'basic.cancel_ok'{}
+ %% Cancel = #'basic.cancel'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.cancel_ok
+ %% is received from the server.
+ {handle_cancel_ok, 3},
+
+ %% handle_server_cancel(Cancel, State) -> ok_error()
+ %% where
+ %% Cancel = #'basic.cancel'{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.cancel
+ %% is received from the server.
+ {handle_server_cancel, 2},
+
+ %% handle_deliver(Deliver, Message, State) -> ok_error()
+ %% where
+ %% Deliver = #'basic.deliver'{}
+ %% Message = #amqp_msg{}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.deliver
+ %% is received from the server.
+ {handle_deliver, 3},
+
+ %% handle_deliver(Deliver, Message,
+ %% DeliveryCtx, State) -> ok_error()
+ %% where
+ %% Deliver = #'basic.deliver'{}
+ %% Message = #amqp_msg{}
+ %% DeliveryCtx = {pid(), pid(), pid()}
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel every time a basic.deliver
+ %% is received from the server. Only relevant for channels that use
+ %% direct client connection and manual flow control.
+ {handle_deliver, 4},
+
+ %% handle_info(Info, State) -> ok_error()
+ %% where
+ %% Info = any()
+ %% State = state()
+ %%
+ %% This callback is invoked the consumer process receives a
+ %% message.
+ {handle_info, 2},
+
+ %% handle_call(Msg, From, State) -> {reply, Reply, NewState} |
+ %% {noreply, NewState} |
+ %% {error, Reason, NewState}
+ %% where
+ %% Msg = any()
+ %% From = any()
+ %% Reply = any()
+ %% State = state()
+ %% NewState = state()
+ %%
+ %% This callback is invoked by the channel when calling
+ %% amqp_channel:call_consumer/2. Reply is the term that
+ %% amqp_channel:call_consumer/2 will return. If the callback
+ %% returns {noreply, _}, then the caller to
+ %% amqp_channel:call_consumer/2 and the channel remain blocked
+ %% until gen_server2:reply/2 is used with the provided From as
+ %% the first argument.
+ {handle_call, 3},
+
+ %% terminate(Reason, State) -> any()
+ %% where
+ %% Reason = any()
+ %% State = state()
+ %%
+ %% This callback is invoked by the channel after it has shut down and
+ %% just before its process exits.
+ {terminate, 2}
+ ];
+behaviour_info(_Other) ->
+ undefined.
+
+%%---------------------------------------------------------------------------
+%% gen_server2 callbacks
+%%---------------------------------------------------------------------------
+
+init([ConsumerModule, ExtraParams, Identity]) ->
+ ?store_proc_name(Identity),
+ case ConsumerModule:init(ExtraParams) of
+ {ok, MState} ->
+ {ok, #state{module = ConsumerModule, module_state = MState}};
+ {stop, Reason} ->
+ {stop, Reason};
+ ignore ->
+ ignore
+ end.
+
+prioritise_info({'DOWN', _MRef, process, _Pid, _Info}, _Len, _State) -> 1;
+prioritise_info(_, _Len, _State) -> 0.
+
+consumer_call_reply(Return, State) ->
+ case Return of
+ {ok, NewMState} ->
+ {reply, ok, State#state{module_state = NewMState}};
+ {error, Reason, NewMState} ->
+ {stop, {error, Reason}, {error, Reason},
+ State#state{module_state = NewMState}}
+ end.
+
+handle_call({consumer_call, Msg}, From,
+ State = #state{module = ConsumerModule,
+ module_state = MState}) ->
+ case ConsumerModule:handle_call(Msg, From, MState) of
+ {noreply, NewMState} ->
+ {noreply, State#state{module_state = NewMState}};
+ {reply, Reply, NewMState} ->
+ {reply, Reply, State#state{module_state = NewMState}};
+ {error, Reason, NewMState} ->
+ {stop, {error, Reason}, {error, Reason},
+ State#state{module_state = NewMState}}
+ end;
+handle_call({consumer_call, Method, Args}, _From,
+ State = #state{module = ConsumerModule,
+ module_state = MState}) ->
+ Return =
+ case Method of
+ #'basic.consume'{} ->
+ ConsumerModule:handle_consume(Method, Args, MState);
+ #'basic.consume_ok'{} ->
+ ConsumerModule:handle_consume_ok(Method, Args, MState);
+ #'basic.cancel'{} ->
+ case Args of
+ none -> %% server-sent
+ ConsumerModule:handle_server_cancel(Method, MState);
+ Pid when is_pid(Pid) -> %% client-sent
+ ConsumerModule:handle_cancel(Method, MState)
+ end;
+ #'basic.cancel_ok'{} ->
+ ConsumerModule:handle_cancel_ok(Method, Args, MState);
+ #'basic.deliver'{} ->
+ ConsumerModule:handle_deliver(Method, Args, MState)
+ end,
+ consumer_call_reply(Return, State);
+
+%% only supposed to be used with basic.deliver
+handle_call({consumer_call, Method = #'basic.deliver'{}, Args, DeliveryCtx}, _From,
+ State = #state{module = ConsumerModule,
+ module_state = MState}) ->
+ Return = ConsumerModule:handle_deliver(Method, Args, DeliveryCtx, MState),
+ consumer_call_reply(Return, State).
+
+handle_cast(_What, State) ->
+ {noreply, State}.
+
+handle_info(Info, State = #state{module_state = MState,
+ module = ConsumerModule}) ->
+ case ConsumerModule:handle_info(Info, MState) of
+ {ok, NewMState} ->
+ {noreply, State#state{module_state = NewMState}};
+ {error, Reason, NewMState} ->
+ {stop, {error, Reason}, State#state{module_state = NewMState}}
+ end.
+
+terminate(Reason, #state{module = ConsumerModule, module_state = MState}) ->
+ ConsumerModule:terminate(Reason, MState).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/amqp_client/src/amqp_main_reader.erl b/deps/amqp_client/src/amqp_main_reader.erl
new file mode 100644
index 0000000000..60cd93d03b
--- /dev/null
+++ b/deps/amqp_client/src/amqp_main_reader.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_main_reader).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(gen_server).
+
+-export([start_link/5, post_init/1]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2]).
+
+-record(state, {sock,
+ timer,
+ connection,
+ channels_manager,
+ astate,
+ message = none %% none | {Type, Channel, Length}
+ }).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link(Sock, Connection, ChMgr, AState, ConnName) ->
+ gen_server:start_link(
+ ?MODULE, [Sock, Connection, ConnName, ChMgr, AState], []).
+
+post_init(Reader) ->
+ try
+ gen_server:call(Reader, post_init)
+ catch
+ exit:{timeout, Timeout} ->
+ {error, {timeout, Timeout}}
+ end.
+
+%%---------------------------------------------------------------------------
+%% gen_server callbacks
+%%---------------------------------------------------------------------------
+
+init([Sock, Connection, ConnName, ChMgr, AState]) ->
+ ?store_proc_name(ConnName),
+ State = #state{sock = Sock,
+ connection = Connection,
+ channels_manager = ChMgr,
+ astate = AState,
+ message = none},
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% We need to use a call because we are not controlling the socket yet.
+handle_call(post_init, _From, State = #state{sock = Sock}) ->
+ case rabbit_net:setopts(Sock, [{active, once}]) of
+ ok -> {reply, ok, set_timeout(State)};
+ {error, Reason} -> handle_error(Reason, State)
+ end;
+handle_call(Call, From, State) ->
+ {stop, {unexpected_call, Call, From}, State}.
+
+handle_cast(Cast, State) ->
+ {stop, {unexpected_cast, Cast}, State}.
+
+handle_info({Tag, Sock, Data}, State = #state{sock = Sock})
+ when Tag =:= tcp; Tag =:= ssl ->
+ %% Latency hiding: Request next packet first, then process data
+ case rabbit_net:setopts(Sock, [{active, once}]) of
+ ok -> handle_data(Data, set_timeout(State));
+ {error, Reason} -> handle_error(Reason, State)
+ end;
+handle_info({Tag, Sock}, State = #state{sock = Sock})
+ when Tag =:= tcp_closed; Tag =:= ssl_closed ->
+ handle_error(closed, State);
+handle_info({Tag, Sock, Reason}, State = #state{sock = Sock})
+ when Tag =:= tcp_error; Tag =:= ssl_error ->
+ handle_error(Reason, State);
+handle_info({timeout, _TimerRef, idle_timeout}, State) ->
+ handle_error(timeout, State).
+
+handle_data(<<Type:8, Channel:16, Length:32, Payload:Length/binary, ?FRAME_END,
+ More/binary>>,
+ #state{message = none} = State) when
+ Type =:= ?FRAME_METHOD; Type =:= ?FRAME_HEADER;
+ Type =:= ?FRAME_BODY; Type =:= ?FRAME_HEARTBEAT ->
+ %% Optimisation for the direct match
+ handle_data(
+ More, process_frame(Type, Channel, Payload, State#state{message = none}));
+handle_data(<<Type:8, Channel:16, Length:32, Data/binary>>,
+ #state{message = none} = State) when
+ Type =:= ?FRAME_METHOD; Type =:= ?FRAME_HEADER;
+ Type =:= ?FRAME_BODY; Type =:= ?FRAME_HEARTBEAT ->
+ {noreply, State#state{message = {Type, Channel, Length, Data}}};
+handle_data(<<"AMQP", A, B, C>>, #state{sock = Sock, message = none} = State) ->
+ {ok, <<D>>} = rabbit_net:sync_recv(Sock, 1),
+ handle_error({refused, {A, B, C, D}}, State);
+handle_data(<<Malformed:7/binary, _Rest/binary>>,
+ #state{message = none} = State) ->
+ handle_error({malformed_header, Malformed}, State);
+handle_data(<<Data/binary>>, #state{message = none} = State) ->
+ {noreply, State#state{message = {expecting_header, Data}}};
+handle_data(Data, #state{message = {Type, Channel, L, OldData}} = State) ->
+ case <<OldData/binary, Data/binary>> of
+ <<Payload:L/binary, ?FRAME_END, More/binary>> ->
+ handle_data(More,
+ process_frame(Type, Channel, Payload,
+ State#state{message = none}));
+ NotEnough ->
+ %% Read in more data from the socket
+ {noreply, State#state{message = {Type, Channel, L, NotEnough}}}
+ end;
+handle_data(Data,
+ #state{message = {expecting_header, Old}} = State) ->
+ handle_data(<<Old/binary, Data/binary>>, State#state{message = none});
+handle_data(<<>>, State) ->
+ {noreply, State}.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+set_timeout(State0) ->
+ State = cancel_timeout(State0),
+ TimerRef = case amqp_util:call_timeout() of
+ infinity -> undefined;
+ Timeout -> erlang:start_timer(Timeout, self(), idle_timeout)
+ end,
+ State#state{timer=TimerRef}.
+
+cancel_timeout(State=#state{timer=TimerRef}) ->
+ ok = case TimerRef of
+ undefined -> ok;
+ _ -> erlang:cancel_timer(TimerRef, [{async, true}, {info, false}])
+ end,
+ State#state{timer=undefined}.
+
+process_frame(Type, ChNumber, Payload,
+ State = #state{connection = Connection,
+ channels_manager = ChMgr,
+ astate = AState}) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, ?PROTOCOL) of
+ heartbeat when ChNumber /= 0 ->
+ amqp_gen_connection:server_misbehaved(
+ Connection,
+ #amqp_error{name = command_invalid,
+ explanation = "heartbeat on non-zero channel"}),
+ State;
+ %% Match heartbeats but don't do anything with them
+ heartbeat ->
+ State;
+ AnalyzedFrame when ChNumber /= 0 ->
+ amqp_channels_manager:pass_frame(ChMgr, ChNumber, AnalyzedFrame),
+ State;
+ AnalyzedFrame ->
+ State#state{astate = amqp_channels_manager:process_channel_frame(
+ AnalyzedFrame, 0, Connection, AState)}
+ end.
+
+handle_error(closed, State = #state{connection = Conn}) ->
+ Conn ! socket_closed,
+ {noreply, State};
+handle_error({refused, Version}, State = #state{connection = Conn}) ->
+ Conn ! {refused, Version},
+ {noreply, State};
+handle_error({malformed_header, Version}, State = #state{connection = Conn}) ->
+ Conn ! {malformed_header, Version},
+ {noreply, State};
+handle_error(Reason, State = #state{connection = Conn}) ->
+ Conn ! {socket_error, Reason},
+ {stop, {socket_error, Reason}, State}.
diff --git a/deps/amqp_client/src/amqp_network_connection.erl b/deps/amqp_client/src/amqp_network_connection.erl
new file mode 100644
index 0000000000..975ea591da
--- /dev/null
+++ b/deps/amqp_client/src/amqp_network_connection.erl
@@ -0,0 +1,380 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_network_connection).
+
+-include("amqp_client_internal.hrl").
+
+-behaviour(amqp_gen_connection).
+-export([init/0, terminate/2, connect/4, do/2, open_channel_args/1, i/2,
+ info_keys/0, handle_message/2, closing/3, channels_terminated/1]).
+
+-define(RABBIT_TCP_OPTS, [binary, {packet, 0}, {active,false}, {nodelay, true}]).
+-define(SOCKET_CLOSING_TIMEOUT, 1000).
+-define(HANDSHAKE_RECEIVE_TIMEOUT, 60000).
+-define(TCP_MAX_PACKET_SIZE, (16#4000000 + ?EMPTY_FRAME_SIZE - 1)).
+
+-record(state, {sock,
+ name,
+ heartbeat,
+ writer0,
+ frame_max,
+ type_sup,
+ closing_reason, %% undefined | Reason
+ waiting_socket_close = false}).
+
+-define(INFO_KEYS, [type, heartbeat, frame_max, sock, name]).
+
+%%---------------------------------------------------------------------------
+
+init() ->
+ {ok, #state{}}.
+
+open_channel_args(#state{sock = Sock, frame_max = FrameMax}) ->
+ [Sock, FrameMax].
+
+do(#'connection.close_ok'{} = CloseOk, State) ->
+ erlang:send_after(?SOCKET_CLOSING_TIMEOUT, self(), socket_closing_timeout),
+ do2(CloseOk, State);
+do(Method, State) ->
+ do2(Method, State).
+
+do2(Method, #state{writer0 = Writer}) ->
+ %% Catching because it expects the {channel_exit, _, _} message on error
+ catch rabbit_writer:send_command_sync(Writer, Method).
+
+handle_message(socket_closing_timeout,
+ State = #state{closing_reason = Reason}) ->
+ {stop, {socket_closing_timeout, Reason}, State};
+handle_message(socket_closed, State = #state{waiting_socket_close = true,
+ closing_reason = Reason}) ->
+ {stop, {shutdown, Reason}, State};
+handle_message(socket_closed, State = #state{waiting_socket_close = false}) ->
+ {stop, socket_closed_unexpectedly, State};
+handle_message({socket_error, _} = SocketError, State) ->
+ {stop, SocketError, State};
+handle_message({channel_exit, 0, Reason}, State) ->
+ {stop, {channel0_died, Reason}, State};
+handle_message(heartbeat_timeout, State) ->
+ {stop, heartbeat_timeout, State};
+handle_message(closing_timeout, State = #state{closing_reason = Reason}) ->
+ {stop, Reason, State};
+handle_message({'EXIT', Pid, Reason}, State) ->
+ {stop, rabbit_misc:format("stopping because dependent process ~p died: ~p", [Pid, Reason]), State};
+%% see http://erlang.org/pipermail/erlang-bugs/2012-June/002933.html
+handle_message({Ref, {error, Reason}},
+ State = #state{waiting_socket_close = Waiting,
+ closing_reason = CloseReason})
+ when is_reference(Ref) ->
+ {stop, case {Reason, Waiting} of
+ {closed, true} -> {shutdown, CloseReason};
+ {closed, false} -> socket_closed_unexpectedly;
+ {_, _} -> {socket_error, Reason}
+ end, State}.
+
+closing(_ChannelCloseType, {server_initiated_close, _, _} = Reason, State) ->
+ {ok, State#state{waiting_socket_close = true,
+ closing_reason = Reason}};
+closing(_ChannelCloseType, Reason, State) ->
+ {ok, State#state{closing_reason = Reason}}.
+
+channels_terminated(State = #state{closing_reason =
+ {server_initiated_close, _, _}}) ->
+ {ok, State#state{waiting_socket_close = true}};
+channels_terminated(State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+i(type, _State) -> network;
+i(heartbeat, State) -> State#state.heartbeat;
+i(frame_max, State) -> State#state.frame_max;
+i(sock, State) -> State#state.sock;
+i(name, State) -> State#state.name;
+i(Item, _State) -> throw({bad_argument, Item}).
+
+info_keys() ->
+ ?INFO_KEYS.
+
+%%---------------------------------------------------------------------------
+%% Handshake
+%%---------------------------------------------------------------------------
+
+connect(AmqpParams = #amqp_params_network{host = Host}, SIF, TypeSup, State) ->
+ case gethostaddr(Host) of
+ [] -> {error, unknown_host};
+ [AF|_] -> do_connect(
+ AF, AmqpParams, SIF, State#state{type_sup = TypeSup})
+ end.
+
+do_connect({Addr, Family},
+ AmqpParams = #amqp_params_network{ssl_options = none,
+ port = Port,
+ connection_timeout = Timeout,
+ socket_options = ExtraOpts},
+ SIF, State) ->
+ ok = obtain(),
+ case gen_tcp:connect(Addr, Port,
+ [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts,
+ Timeout) of
+ {ok, Sock} -> try_handshake(AmqpParams, SIF,
+ State#state{sock = Sock});
+ {error, _} = E -> E
+ end;
+do_connect({Addr, Family},
+ AmqpParams = #amqp_params_network{ssl_options = SslOpts0,
+ port = Port,
+ connection_timeout = Timeout,
+ socket_options = ExtraOpts},
+ SIF, State) ->
+ {ok, GlobalSslOpts} = application:get_env(amqp_client, ssl_options),
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ ok = obtain(),
+ case gen_tcp:connect(Addr, Port,
+ [Family | ?RABBIT_TCP_OPTS] ++ ExtraOpts,
+ Timeout) of
+ {ok, Sock} ->
+ SslOpts = rabbit_ssl_options:fix(
+ orddict:to_list(
+ orddict:merge(fun (_, _A, B) -> B end,
+ orddict:from_list(GlobalSslOpts),
+ orddict:from_list(SslOpts0)))),
+ case ssl:connect(Sock, SslOpts, Timeout) of
+ {ok, SslSock} ->
+ try_handshake(AmqpParams, SIF,
+ State#state{sock = SslSock});
+ {error, _} = E ->
+ E
+ end;
+ {error, _} = E ->
+ E
+ end.
+
+inet_address_preference() ->
+ case application:get_env(amqp_client, prefer_ipv6) of
+ {ok, true} -> [inet6, inet];
+ {ok, false} -> [inet, inet6]
+ end.
+
+gethostaddr(Host) ->
+ Lookups = [{Family, inet:getaddr(Host, Family)}
+ || Family <- inet_address_preference()],
+ [{IP, Family} || {Family, {ok, IP}} <- Lookups].
+
+try_handshake(AmqpParams, SIF, State = #state{sock = Sock}) ->
+ Name = case rabbit_net:connection_string(Sock, outbound) of
+ {ok, Str} -> list_to_binary(Str);
+ {error, _} -> <<"unknown">>
+ end,
+ try handshake(AmqpParams, SIF,
+ State#state{name = <<"client ", Name/binary>>}) of
+ Return -> Return
+ catch exit:Reason -> {error, Reason}
+ end.
+
+handshake(AmqpParams, SIF, State0 = #state{sock = Sock}) ->
+ ok = rabbit_net:send(Sock, ?PROTOCOL_HEADER),
+ case start_infrastructure(SIF, State0) of
+ {ok, ChMgr, State1} ->
+ network_handshake(AmqpParams, {ChMgr, State1});
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+start_infrastructure(SIF, State = #state{sock = Sock, name = Name}) ->
+ case SIF(Sock, Name) of
+ {ok, ChMgr, Writer} ->
+ {ok, ChMgr, State#state{writer0 = Writer}};
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+network_handshake(AmqpParams = #amqp_params_network{virtual_host = VHost},
+ {ChMgr, State0}) ->
+ Start = #'connection.start'{server_properties = ServerProperties,
+ mechanisms = Mechanisms} =
+ handshake_recv('connection.start'),
+ ok = check_version(Start),
+ case login(AmqpParams, Mechanisms, State0) of
+ {closing, #amqp_error{}, _Error} = Err ->
+ do(#'connection.close_ok'{}, State0),
+ Err;
+ Tune ->
+ {TuneOk, ChannelMax, State1} = tune(Tune, AmqpParams, State0),
+ do2(TuneOk, State1),
+ do2(#'connection.open'{virtual_host = VHost}, State1),
+ Params = {ServerProperties, ChannelMax, ChMgr, State1},
+ case handshake_recv('connection.open_ok') of
+ #'connection.open_ok'{} -> {ok, Params};
+ {closing, #amqp_error{} = AmqpError, Error} -> {closing, Params,
+ AmqpError, Error}
+ end
+ end.
+
+check_version(#'connection.start'{version_major = ?PROTOCOL_VERSION_MAJOR,
+ version_minor = ?PROTOCOL_VERSION_MINOR}) ->
+ ok;
+check_version(#'connection.start'{version_major = 8,
+ version_minor = 0}) ->
+ exit({protocol_version_mismatch, 0, 8});
+check_version(#'connection.start'{version_major = Major,
+ version_minor = Minor}) ->
+ exit({protocol_version_mismatch, Major, Minor}).
+
+tune(#'connection.tune'{channel_max = ServerChannelMax,
+ frame_max = ServerFrameMax,
+ heartbeat = ServerHeartbeat},
+ #amqp_params_network{channel_max = ClientChannelMax,
+ frame_max = ClientFrameMax,
+ heartbeat = ClientHeartbeat}, State) ->
+ [ChannelMax, Heartbeat, FrameMax] =
+ lists:zipwith(fun (Client, Server) when Client =:= 0; Server =:= 0 ->
+ lists:max([Client, Server]);
+ (Client, Server) ->
+ lists:min([Client, Server])
+ end,
+ [ClientChannelMax, ClientHeartbeat, ClientFrameMax],
+ [ServerChannelMax, ServerHeartbeat, ServerFrameMax]),
+ %% If we attempt to recv > 64Mb, inet_drv will return enomem, so
+ %% we cap the max negotiated frame size accordingly. Note that
+ %% since we receive the frame header separately, we can actually
+ %% cope with frame sizes of 64M + ?EMPTY_FRAME_SIZE - 1.
+ CappedFrameMax = case FrameMax of
+ 0 -> ?TCP_MAX_PACKET_SIZE;
+ _ -> lists:min([FrameMax, ?TCP_MAX_PACKET_SIZE])
+ end,
+ NewState = State#state{heartbeat = Heartbeat, frame_max = CappedFrameMax},
+ start_heartbeat(NewState),
+ {#'connection.tune_ok'{channel_max = ChannelMax,
+ frame_max = CappedFrameMax,
+ heartbeat = Heartbeat}, ChannelMax, NewState}.
+
+start_heartbeat(#state{sock = Sock,
+ name = Name,
+ heartbeat = Heartbeat,
+ type_sup = Sup}) ->
+ Frame = rabbit_binary_generator:build_heartbeat_frame(),
+ SendFun = fun () -> catch rabbit_net:send(Sock, Frame) end,
+ Connection = self(),
+ ReceiveFun = fun () -> Connection ! heartbeat_timeout end,
+ rabbit_heartbeat:start(
+ Sup, Sock, Name, Heartbeat, SendFun, Heartbeat, ReceiveFun).
+
+login(Params = #amqp_params_network{auth_mechanisms = ClientMechanisms,
+ client_properties = UserProps},
+ ServerMechanismsStr, State) ->
+ ServerMechanisms = string:tokens(binary_to_list(ServerMechanismsStr), " "),
+ case [{N, S, F} || F <- ClientMechanisms,
+ {N, S} <- [F(none, Params, init)],
+ lists:member(binary_to_list(N), ServerMechanisms)] of
+ [{Name, MState0, Mech}|_] ->
+ {Resp, MState1} = Mech(none, Params, MState0),
+ StartOk = #'connection.start_ok'{
+ client_properties = client_properties(UserProps),
+ mechanism = Name,
+ response = Resp},
+ do2(StartOk, State),
+ login_loop(Mech, MState1, Params, State);
+ [] ->
+ exit({no_suitable_auth_mechanism, ServerMechanisms})
+ end.
+
+login_loop(Mech, MState0, Params, State) ->
+ case handshake_recv('connection.tune') of
+ Tune = #'connection.tune'{} ->
+ Tune;
+ #'connection.secure'{challenge = Challenge} ->
+ {Resp, MState1} = Mech(Challenge, Params, MState0),
+ do2(#'connection.secure_ok'{response = Resp}, State),
+ login_loop(Mech, MState1, Params, State);
+ #'connection.close'{reply_code = ?ACCESS_REFUSED,
+ reply_text = ExplanationBin} ->
+ Explanation = binary_to_list(ExplanationBin),
+ {closing,
+ #amqp_error{name = access_refused,
+ explanation = Explanation},
+ {error, {auth_failure, Explanation}}}
+ end.
+
+client_properties(UserProperties) ->
+ {ok, Vsn} = application:get_key(amqp_client, vsn),
+ Default = [{<<"product">>, longstr, <<"RabbitMQ">>},
+ {<<"version">>, longstr, list_to_binary(Vsn)},
+ {<<"platform">>, longstr, <<"Erlang">>},
+ {<<"copyright">>, longstr,
+ <<"Copyright (c) 2007-2020 VMware, Inc. or its affiliates.">>},
+ {<<"information">>, longstr,
+ <<"Licensed under the MPL. "
+ "See https://www.rabbitmq.com/">>},
+ {<<"capabilities">>, table, ?CLIENT_CAPABILITIES}],
+ lists:foldl(fun({K, _, _} = Tuple, Acc) ->
+ lists:keystore(K, 1, Acc, Tuple)
+ end, Default, UserProperties).
+
+handshake_recv(Expecting) ->
+ receive
+ {'$gen_cast', {method, Method, none, noflow}} ->
+ case {Expecting, element(1, Method)} of
+ {E, M} when E =:= M ->
+ Method;
+ {'connection.tune', 'connection.secure'} ->
+ Method;
+ {'connection.tune', 'connection.close'} ->
+ Method;
+ {'connection.open_ok', 'connection.close'} ->
+ exit(get_reason(Method));
+ {'connection.open_ok', _} ->
+ {closing,
+ #amqp_error{name = command_invalid,
+ explanation = "was expecting "
+ "connection.open_ok"},
+ {error, {unexpected_method, Method,
+ {expecting, Expecting}}}};
+ _ ->
+ throw({unexpected_method, Method,
+ {expecting, Expecting}})
+ end;
+ socket_closed ->
+ case Expecting of
+ 'connection.tune' -> exit({auth_failure, "Disconnected"});
+ 'connection.open_ok' -> exit(access_refused);
+ _ -> exit({socket_closed_unexpectedly,
+ Expecting})
+ end;
+ {socket_error, _} = SocketError ->
+ exit({SocketError, {expecting, Expecting}});
+ {refused, Version} ->
+ exit({server_refused_connection, Version});
+ {malformed_header, All} ->
+ exit({server_sent_malformed_header, All});
+ heartbeat_timeout ->
+ exit(heartbeat_timeout);
+ Other ->
+ throw({handshake_recv_unexpected_message, Other})
+ after ?HANDSHAKE_RECEIVE_TIMEOUT ->
+ case Expecting of
+ 'connection.open_ok' ->
+ {closing,
+ #amqp_error{name = internal_error,
+ explanation = "handshake timed out waiting "
+ "connection.open_ok"},
+ {error, handshake_receive_timed_out}};
+ _ ->
+ exit(handshake_receive_timed_out)
+ end
+ end.
+
+obtain() ->
+ case code:is_loaded(file_handle_cache) of
+ false -> ok;
+ _ -> file_handle_cache:obtain()
+ end.
+
+get_reason(#'connection.close'{reply_code = ErrCode}) ->
+ ?PROTOCOL:amqp_exception(ErrCode).
diff --git a/deps/amqp_client/src/amqp_rpc_client.erl b/deps/amqp_client/src/amqp_rpc_client.erl
new file mode 100644
index 0000000000..3fd9a34650
--- /dev/null
+++ b/deps/amqp_client/src/amqp_rpc_client.erl
@@ -0,0 +1,176 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc This module allows the simple execution of an asynchronous RPC over
+%% AMQP. It frees a client programmer of the necessary having to AMQP
+%% plumbing. Note that the this module does not handle any data encoding,
+%% so it is up to the caller to marshall and unmarshall message payloads
+%% accordingly.
+-module(amqp_rpc_client).
+
+-include("amqp_client.hrl").
+
+-behaviour(gen_server).
+
+-export([start/2, start_link/2, stop/1]).
+-export([call/2]).
+-export([init/1, terminate/2, code_change/3, handle_call/3,
+ handle_cast/2, handle_info/2]).
+
+-record(state, {channel,
+ reply_queue,
+ exchange,
+ routing_key,
+ continuations = #{},
+ correlation_id = 0}).
+
+%%--------------------------------------------------------------------------
+%% API
+%%--------------------------------------------------------------------------
+
+%% @spec (Connection, Queue) -> RpcClient
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcClient = pid()
+%% @doc Starts a new RPC client instance that sends requests to a
+%% specified queue. This function returns the pid of the RPC client process
+%% that can be used to invoke RPCs and stop the client.
+start(Connection, Queue) ->
+ {ok, Pid} = gen_server:start(?MODULE, [Connection, Queue], []),
+ Pid.
+
+%% @spec (Connection, Queue) -> RpcClient
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcClient = pid()
+%% @doc Starts, and links to, a new RPC client instance that sends requests
+%% to a specified queue. This function returns the pid of the RPC client
+%% process that can be used to invoke RPCs and stop the client.
+start_link(Connection, Queue) ->
+ {ok, Pid} = gen_server:start_link(?MODULE, [Connection, Queue], []),
+ Pid.
+
+%% @spec (RpcClient) -> ok
+%% where
+%% RpcClient = pid()
+%% @doc Stops an existing RPC client.
+stop(Pid) ->
+ gen_server:call(Pid, stop, amqp_util:call_timeout()).
+
+%% @spec (RpcClient, Payload) -> ok
+%% where
+%% RpcClient = pid()
+%% Payload = binary()
+%% @doc Invokes an RPC. Note the caller of this function is responsible for
+%% encoding the request and decoding the response.
+call(RpcClient, Payload) ->
+ gen_server:call(RpcClient, {call, Payload}, amqp_util:call_timeout()).
+
+%%--------------------------------------------------------------------------
+%% Plumbing
+%%--------------------------------------------------------------------------
+
+%% Sets up a reply queue for this client to listen on
+setup_reply_queue(State = #state{channel = Channel}) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{exclusive = true,
+ auto_delete = true}),
+ State#state{reply_queue = Q}.
+
+%% Registers this RPC client instance as a consumer to handle rpc responses
+setup_consumer(#state{channel = Channel, reply_queue = Q}) ->
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}).
+
+%% Publishes to the broker, stores the From address against
+%% the correlation id and increments the correlationid for
+%% the next request
+publish(Payload, From,
+ State = #state{channel = Channel,
+ reply_queue = Q,
+ exchange = X,
+ routing_key = RoutingKey,
+ correlation_id = CorrelationId,
+ continuations = Continuations}) ->
+ EncodedCorrelationId = base64:encode(<<CorrelationId:64>>),
+ Props = #'P_basic'{correlation_id = EncodedCorrelationId,
+ content_type = <<"application/octet-stream">>,
+ reply_to = Q},
+ Publish = #'basic.publish'{exchange = X,
+ routing_key = RoutingKey,
+ mandatory = true},
+ amqp_channel:call(Channel, Publish, #amqp_msg{props = Props,
+ payload = Payload}),
+ State#state{correlation_id = CorrelationId + 1,
+ continuations = maps:put(EncodedCorrelationId, From, Continuations)}.
+
+%%--------------------------------------------------------------------------
+%% gen_server callbacks
+%%--------------------------------------------------------------------------
+
+%% Sets up a reply queue and consumer within an existing channel
+%% @private
+init([Connection, RoutingKey]) ->
+ {ok, Channel} = amqp_connection:open_channel(
+ Connection, {amqp_direct_consumer, [self()]}),
+ InitialState = #state{channel = Channel,
+ exchange = <<>>,
+ routing_key = RoutingKey},
+ State = setup_reply_queue(InitialState),
+ setup_consumer(State),
+ {ok, State}.
+
+%% Closes the channel this gen_server instance started
+%% @private
+terminate(_Reason, #state{channel = Channel}) ->
+ amqp_channel:close(Channel),
+ ok.
+
+%% Handle the application initiated stop by just stopping this gen server
+%% @private
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+%% @private
+handle_call({call, Payload}, From, State) ->
+ NewState = publish(Payload, From, State),
+ {noreply, NewState}.
+
+%% @private
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%% @private
+handle_info({#'basic.consume'{}, _Pid}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel_ok'{}, State) ->
+ {stop, normal, State};
+
+%% @private
+handle_info({#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = #'P_basic'{correlation_id = Id},
+ payload = Payload}},
+ State = #state{continuations = Conts, channel = Channel}) ->
+ From = maps:get(Id, Conts),
+ gen_server:reply(From, Payload),
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ {noreply, State#state{continuations = maps:remove(Id, Conts) }}.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/amqp_client/src/amqp_rpc_server.erl b/deps/amqp_client/src/amqp_rpc_server.erl
new file mode 100644
index 0000000000..44e5113a94
--- /dev/null
+++ b/deps/amqp_client/src/amqp_rpc_server.erl
@@ -0,0 +1,138 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc This is a utility module that is used to expose an arbitrary function
+%% via an asynchronous RPC over AMQP mechanism. It frees the implementor of
+%% a simple function from having to plumb this into AMQP. Note that the
+%% RPC server does not handle any data encoding, so it is up to the callback
+%% function to marshall and unmarshall message payloads accordingly.
+-module(amqp_rpc_server).
+
+-behaviour(gen_server).
+
+-include("amqp_client.hrl").
+
+-export([init/1, terminate/2, code_change/3, handle_call/3,
+ handle_cast/2, handle_info/2]).
+-export([start/3, start_link/3]).
+-export([stop/1]).
+
+-record(state, {channel,
+ handler}).
+
+%%--------------------------------------------------------------------------
+%% API
+%%--------------------------------------------------------------------------
+
+%% @spec (Connection, Queue, RpcHandler) -> RpcServer
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcHandler = function()
+%% RpcServer = pid()
+%% @doc Starts a new RPC server instance that receives requests via a
+%% specified queue and dispatches them to a specified handler function. This
+%% function returns the pid of the RPC server that can be used to stop the
+%% server.
+start(Connection, Queue, Fun) ->
+ {ok, Pid} = gen_server:start(?MODULE, [Connection, Queue, Fun], []),
+ Pid.
+
+%% @spec (Connection, Queue, RpcHandler) -> RpcServer
+%% where
+%% Connection = pid()
+%% Queue = binary()
+%% RpcHandler = function()
+%% RpcServer = pid()
+%% @doc Starts, and links to, a new RPC server instance that receives
+%% requests via a specified queue and dispatches them to a specified
+%% handler function. This function returns the pid of the RPC server that
+%% can be used to stop the server.
+start_link(Connection, Queue, Fun) ->
+ {ok, Pid} = gen_server:start_link(?MODULE, [Connection, Queue, Fun], []),
+ Pid.
+
+%% @spec (RpcServer) -> ok
+%% where
+%% RpcServer = pid()
+%% @doc Stops an existing RPC server.
+stop(Pid) ->
+ gen_server:call(Pid, stop, amqp_util:call_timeout()).
+
+%%--------------------------------------------------------------------------
+%% gen_server callbacks
+%%--------------------------------------------------------------------------
+
+%% @private
+init([Connection, Q, Fun]) ->
+ {ok, Channel} = amqp_connection:open_channel(
+ Connection, {amqp_direct_consumer, [self()]}),
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+ {ok, #state{channel = Channel, handler = Fun} }.
+
+%% @private
+handle_info(shutdown, State) ->
+ {stop, normal, State};
+
+%% @private
+handle_info({#'basic.consume'{}, _}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel'{}, State) ->
+ {noreply, State};
+
+%% @private
+handle_info(#'basic.cancel_ok'{}, State) ->
+ {stop, normal, State};
+
+%% @private
+handle_info({#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = Props, payload = Payload}},
+ State = #state{handler = Fun, channel = Channel}) ->
+ #'P_basic'{correlation_id = CorrelationId,
+ reply_to = Q} = Props,
+ Response = Fun(Payload),
+ Properties = #'P_basic'{correlation_id = CorrelationId},
+ Publish = #'basic.publish'{exchange = <<>>,
+ routing_key = Q,
+ mandatory = true},
+ amqp_channel:call(Channel, Publish, #amqp_msg{props = Properties,
+ payload = Response}),
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ {noreply, State};
+
+%% @private
+handle_info({'DOWN', _MRef, process, _Pid, _Info}, State) ->
+ {noreply, State}.
+
+%% @private
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+%%--------------------------------------------------------------------------
+%% Rest of the gen_server callbacks
+%%--------------------------------------------------------------------------
+
+%% @private
+handle_cast(_Message, State) ->
+ {noreply, State}.
+
+%% Closes the channel this gen_server instance started
+%% @private
+terminate(_Reason, #state{channel = Channel}) ->
+ amqp_channel:close(Channel),
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/amqp_client/src/amqp_selective_consumer.erl b/deps/amqp_client/src/amqp_selective_consumer.erl
new file mode 100644
index 0000000000..da1138654e
--- /dev/null
+++ b/deps/amqp_client/src/amqp_selective_consumer.erl
@@ -0,0 +1,265 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc This module is an implementation of the amqp_gen_consumer
+%% behaviour and can be used as part of the Consumer parameter when
+%% opening AMQP channels. This is the default implementation selected
+%% by channel. <br/>
+%% <br/>
+%% The Consumer parameter for this implementation is {{@module}, []@}<br/>
+%% This consumer implementation keeps track of consumer tags and sends
+%% the subscription-relevant messages to the registered consumers, according
+%% to an internal tag dictionary.<br/>
+%% <br/>
+%% Send a #basic.consume{} message to the channel to subscribe a
+%% consumer to a queue and send a #basic.cancel{} message to cancel a
+%% subscription.<br/>
+%% <br/>
+%% The channel will send to the relevant registered consumers the
+%% basic.consume_ok, basic.cancel_ok, basic.cancel and basic.deliver messages
+%% received from the server.<br/>
+%% <br/>
+%% If a consumer is not registered for a given consumer tag, the message
+%% is sent to the default consumer registered with
+%% {@module}:register_default_consumer. If there is no default consumer
+%% registered in this case, an exception occurs and the channel is abruptly
+%% terminated.<br/>
+-module(amqp_selective_consumer).
+
+-include("amqp_gen_consumer_spec.hrl").
+
+-behaviour(amqp_gen_consumer).
+
+-export([register_default_consumer/2]).
+-export([init/1, handle_consume_ok/3, handle_consume/3, handle_cancel_ok/3,
+ handle_cancel/2, handle_server_cancel/2,
+ handle_deliver/3, handle_deliver/4,
+ handle_info/2, handle_call/3, terminate/2]).
+
+-record(state, {consumers = #{}, %% Tag -> ConsumerPid
+ unassigned = undefined, %% Pid
+ monitors = #{}, %% Pid -> {Count, MRef}
+ default_consumer = none}).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+%% @spec (ChannelPid, ConsumerPid) -> ok
+%% where
+%% ChannelPid = pid()
+%% ConsumerPid = pid()
+%% @doc This function registers a default consumer with the channel. A
+%% default consumer is used when a subscription is made via
+%% amqp_channel:call(ChannelPid, #'basic.consume'{}) (rather than
+%% {@module}:subscribe/3) and hence there is no consumer pid
+%% registered with the consumer tag. In this case, the relevant
+%% deliveries will be sent to the default consumer.
+register_default_consumer(ChannelPid, ConsumerPid) ->
+ amqp_channel:call_consumer(ChannelPid,
+ {register_default_consumer, ConsumerPid}).
+
+%%---------------------------------------------------------------------------
+%% amqp_gen_consumer callbacks
+%%---------------------------------------------------------------------------
+
+%% @private
+init([]) ->
+ {ok, #state{}}.
+
+%% @private
+handle_consume(#'basic.consume'{consumer_tag = Tag,
+ nowait = NoWait},
+ Pid, State = #state{consumers = Consumers,
+ monitors = Monitors}) ->
+ Result = case NoWait of
+ true when Tag =:= undefined orelse size(Tag) == 0 ->
+ no_consumer_tag_specified;
+ _ when is_binary(Tag) andalso size(Tag) >= 0 ->
+ case resolve_consumer(Tag, State) of
+ {consumer, _} -> consumer_tag_in_use;
+ _ -> ok
+ end;
+ _ ->
+ ok
+ end,
+ case {Result, NoWait} of
+ {ok, true} ->
+ {ok, State#state
+ {consumers = maps:put(Tag, Pid, Consumers),
+ monitors = add_to_monitor_dict(Pid, Monitors)}};
+ {ok, false} ->
+ {ok, State#state{unassigned = Pid}};
+ {Err, true} ->
+ {error, Err, State};
+ {_Err, false} ->
+ %% Don't do anything (don't override existing
+ %% consumers), the server will close the channel with an error.
+ {ok, State}
+ end.
+
+%% @private
+handle_consume_ok(BasicConsumeOk, _BasicConsume,
+ State = #state{unassigned = Pid,
+ consumers = Consumers,
+ monitors = Monitors})
+ when is_pid(Pid) ->
+ State1 =
+ State#state{
+ consumers = maps:put(tag(BasicConsumeOk), Pid, Consumers),
+ monitors = add_to_monitor_dict(Pid, Monitors),
+ unassigned = undefined},
+ deliver(BasicConsumeOk, State1),
+ {ok, State1}.
+
+%% @private
+%% We sent a basic.cancel.
+handle_cancel(#'basic.cancel'{nowait = true},
+ #state{default_consumer = none}) ->
+ exit(cancel_nowait_requires_default_consumer);
+
+handle_cancel(Cancel = #'basic.cancel'{nowait = NoWait}, State) ->
+ State1 = case NoWait of
+ true -> do_cancel(Cancel, State);
+ false -> State
+ end,
+ {ok, State1}.
+
+%% @private
+%% We sent a basic.cancel and now receive the ok.
+handle_cancel_ok(CancelOk, _Cancel, State) ->
+ State1 = do_cancel(CancelOk, State),
+ %% Use old state
+ deliver(CancelOk, State),
+ {ok, State1}.
+
+%% @private
+%% The server sent a basic.cancel.
+handle_server_cancel(Cancel = #'basic.cancel'{nowait = true}, State) ->
+ State1 = do_cancel(Cancel, State),
+ %% Use old state
+ deliver(Cancel, State),
+ {ok, State1}.
+
+%% @private
+handle_deliver(Method, Message, State) ->
+ deliver(Method, Message, State),
+ {ok, State}.
+
+%% @private
+handle_deliver(Method, Message, DeliveryCtx, State) ->
+ deliver(Method, Message, DeliveryCtx, State),
+ {ok, State}.
+
+%% @private
+handle_info({'DOWN', _MRef, process, Pid, _Info},
+ State = #state{monitors = Monitors,
+ consumers = Consumers,
+ default_consumer = DConsumer }) ->
+ case maps:find(Pid, Monitors) of
+ {ok, _CountMRef} ->
+ {ok, State#state{monitors = maps:remove(Pid, Monitors),
+ consumers =
+ maps:filter(
+ fun (_, Pid1) when Pid1 =:= Pid -> false;
+ (_, _) -> true
+ end, Consumers)}};
+ error ->
+ case Pid of
+ DConsumer -> {ok, State#state{
+ monitors = maps:remove(Pid, Monitors),
+ default_consumer = none}};
+ _ -> {ok, State} %% unnamed consumer went down
+ %% before receiving consume_ok
+ end
+ end;
+handle_info(#'basic.credit_drained'{} = Method, State) ->
+ deliver_to_consumer_or_die(Method, Method, State),
+ {ok, State}.
+
+%% @private
+handle_call({register_default_consumer, Pid}, _From,
+ State = #state{default_consumer = PrevPid,
+ monitors = Monitors}) ->
+ Monitors1 = case PrevPid of
+ none -> Monitors;
+ _ -> remove_from_monitor_dict(PrevPid, Monitors)
+ end,
+ {reply, ok,
+ State#state{default_consumer = Pid,
+ monitors = add_to_monitor_dict(Pid, Monitors1)}}.
+
+%% @private
+terminate(_Reason, State) ->
+ State.
+
+%%---------------------------------------------------------------------------
+%% Internal plumbing
+%%---------------------------------------------------------------------------
+
+deliver_to_consumer_or_die(Method, Msg, State) ->
+ case resolve_consumer(tag(Method), State) of
+ {consumer, Pid} -> Pid ! Msg;
+ {default, Pid} -> Pid ! Msg;
+ error -> exit(unexpected_delivery_and_no_default_consumer)
+ end.
+
+deliver(Method, State) ->
+ deliver(Method, undefined, State).
+deliver(Method, Message, State) ->
+ Combined = if Message =:= undefined -> Method;
+ true -> {Method, Message}
+ end,
+ deliver_to_consumer_or_die(Method, Combined, State).
+deliver(Method, Message, DeliveryCtx, State) ->
+ Combined = if Message =:= undefined -> Method;
+ true -> {Method, Message, DeliveryCtx}
+ end,
+ deliver_to_consumer_or_die(Method, Combined, State).
+
+do_cancel(Cancel, State = #state{consumers = Consumers,
+ monitors = Monitors}) ->
+ Tag = tag(Cancel),
+ case maps:find(Tag, Consumers) of
+ {ok, Pid} -> State#state{
+ consumers = maps:remove(Tag, Consumers),
+ monitors = remove_from_monitor_dict(Pid, Monitors)};
+ error -> %% Untracked consumer. Do nothing.
+ State
+ end.
+
+resolve_consumer(Tag, #state{consumers = Consumers,
+ default_consumer = DefaultConsumer}) ->
+ case maps:find(Tag, Consumers) of
+ {ok, ConsumerPid} -> {consumer, ConsumerPid};
+ error -> case DefaultConsumer of
+ none -> error;
+ _ -> {default, DefaultConsumer}
+ end
+ end.
+
+tag(#'basic.consume'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.consume_ok'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.cancel'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.cancel_ok'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.deliver'{consumer_tag = Tag}) -> Tag;
+tag(#'basic.credit_drained'{consumer_tag = Tag}) -> Tag.
+
+add_to_monitor_dict(Pid, Monitors) ->
+ case maps:find(Pid, Monitors) of
+ error -> maps:put(Pid,
+ {1, erlang:monitor(process, Pid)},
+ Monitors);
+ {ok, {Count, MRef}} -> maps:put(Pid, {Count + 1, MRef}, Monitors)
+ end.
+
+remove_from_monitor_dict(Pid, Monitors) ->
+ case maps:get(Pid, Monitors) of
+ {1, MRef} -> erlang:demonitor(MRef),
+ maps:remove(Pid, Monitors);
+ {Count, MRef} -> maps:put(Pid, {Count - 1, MRef}, Monitors)
+ end.
diff --git a/deps/amqp_client/src/amqp_ssl.erl b/deps/amqp_client/src/amqp_ssl.erl
new file mode 100644
index 0000000000..ff2bddd55a
--- /dev/null
+++ b/deps/amqp_client/src/amqp_ssl.erl
@@ -0,0 +1,113 @@
+-module(amqp_ssl).
+
+-include("amqp_client_internal.hrl").
+
+-include_lib("public_key/include/public_key.hrl").
+
+-export([maybe_enhance_ssl_options/1,
+ add_verify_fun_to_opts/2,
+ verify_fun/3]).
+
+maybe_enhance_ssl_options(Params = #amqp_params_network{ssl_options = none}) ->
+ Params;
+maybe_enhance_ssl_options(Params = #amqp_params_network{host = Host, ssl_options = Opts0}) ->
+ Opts1 = maybe_add_sni(Host, Opts0),
+ Opts2 = maybe_add_verify(Opts1),
+ Params#amqp_params_network{ssl_options = Opts2};
+maybe_enhance_ssl_options(Params) ->
+ Params.
+
+% https://github.com/erlang/otp/blob/master/lib/inets/src/http_client/httpc_handler.erl
+maybe_add_sni(Host, Options) ->
+ maybe_add_sni_0(lists:keyfind(server_name_indication, 1, Options), Host, Options).
+
+maybe_add_sni_0(false, Host, Options) ->
+ % NB: this is the case where the user did not specify
+ % server_name_indication at all. If Host is a DNS host name,
+ % we will specify server_name_indication via code
+ maybe_add_sni_1(inet_parse:domain(Host), Host, Options);
+maybe_add_sni_0({server_name_indication, disable}, _Host, Options) ->
+ % NB: this is the case where the user explicitly disabled
+ % server_name_indication
+ Options;
+maybe_add_sni_0({server_name_indication, SniHost}, _Host, Options) ->
+ % NB: this is the case where the user explicitly specified
+ % an SNI host name. We may need to add verify_fun for OTP 19
+ maybe_add_verify_fun(lists:keymember(verify_fun, 1, Options), SniHost, Options).
+
+maybe_add_sni_1(false, _Host, Options) ->
+ % NB: host is not a DNS host name, so nothing to add
+ Options;
+maybe_add_sni_1(true, Host, Options) ->
+ Opts1 = [{server_name_indication, Host} | Options],
+ maybe_add_verify_fun(lists:keymember(verify_fun, 1, Opts1), Host, Opts1).
+
+maybe_add_verify_fun(true, _Host, Options) ->
+ % NB: verify_fun already present, don't add twice
+ Options;
+maybe_add_verify_fun(false, Host, Options) ->
+ add_verify_fun_to_opts(lists:keyfind(verify, 1, Options), Host, Options).
+
+maybe_add_verify(Options) ->
+ case lists:keymember(verify, 1, Options) of
+ true ->
+ % NB: user has explicitly set 'verify'
+ Options;
+ _ ->
+ ?LOG_WARN("Connection (~p): Certificate chain verification is not enabled for this TLS connection. "
+ "Please see https://rabbitmq.com/ssl.html for more information.~n", [self()]),
+ Options
+ end.
+ % TODO FUTURE 3.8.x
+ % verify_peer will become the default in RabbitMQ 3.8.0
+ % false ->
+ % [{verify, verify_peer} | Options]
+ % end.
+
+add_verify_fun_to_opts(Host, Options) ->
+ add_verify_fun_to_opts(false, Host, Options).
+
+add_verify_fun_to_opts({verify, verify_none}, _Host, Options) ->
+ % NB: this is the case where the user explicitly disabled
+ % certificate chain verification so there's not much sense
+ % in adding verify_fun
+ Options;
+add_verify_fun_to_opts(_, Host, Options) ->
+ % NB: this is the case where the user either did not
+ % set the verify option or set it to verify_peer
+ case erlang:system_info(otp_release) of
+ "19" ->
+ F = fun ?MODULE:verify_fun/3,
+ [{verify_fun, {F, Host}} | Options];
+ _ -> Options
+ end.
+
+-type hostname() :: nonempty_string() | binary().
+
+-spec verify_fun(Cert :: #'OTPCertificate'{},
+ Event :: {bad_cert, Reason :: atom() | {revoked, atom()}} |
+ {extension, #'Extension'{}}, InitialUserState :: term()) ->
+ {valid, UserState :: term()} | {valid_peer, UserState :: hostname()} |
+ {fail, Reason :: term()} | {unknown, UserState :: term()}.
+verify_fun(_, {bad_cert, _} = Reason, _) ->
+ {fail, Reason};
+verify_fun(_, {extension, _}, UserState) ->
+ {unknown, UserState};
+verify_fun(_, valid, UserState) ->
+ {valid, UserState};
+% NOTE:
+% The user state is the hostname to verify as configured in
+% amqp_ssl:make_verify_fun
+verify_fun(Cert, valid_peer, Hostname) when Hostname =/= disable ->
+ verify_hostname(Cert, Hostname);
+verify_fun(_, valid_peer, UserState) ->
+ {valid, UserState}.
+
+% https://github.com/erlang/otp/blob/master/lib/ssl/src/ssl_certificate.erl
+verify_hostname(Cert, Hostname) ->
+ case public_key:pkix_verify_hostname(Cert, [{dns_id, Hostname}]) of
+ true ->
+ {valid, Hostname};
+ false ->
+ {fail, {bad_cert, hostname_check_failed}}
+ end.
diff --git a/deps/amqp_client/src/amqp_sup.erl b/deps/amqp_client/src/amqp_sup.erl
new file mode 100644
index 0000000000..05bc8e4185
--- /dev/null
+++ b/deps/amqp_client/src/amqp_sup.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @private
+-module(amqp_sup).
+
+-include("amqp_client.hrl").
+
+-behaviour(supervisor2).
+
+-export([start_link/0, is_ready/0, start_connection_sup/1]).
+-export([init/1]).
+
+%%---------------------------------------------------------------------------
+%% Interface
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link({local, amqp_sup}, ?MODULE, []).
+
+is_ready() ->
+ whereis(amqp_sup) =/= undefined.
+
+start_connection_sup(AmqpParams) ->
+ supervisor2:start_child(amqp_sup, [AmqpParams]).
+
+%%---------------------------------------------------------------------------
+%% supervisor2 callbacks
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{connection_sup, {amqp_connection_sup, start_link, []},
+ temporary, ?SUPERVISOR_WAIT, supervisor, [amqp_connection_sup]}]}}.
diff --git a/deps/amqp_client/src/amqp_uri.erl b/deps/amqp_client/src/amqp_uri.erl
new file mode 100644
index 0000000000..ff545a48b2
--- /dev/null
+++ b/deps/amqp_client/src/amqp_uri.erl
@@ -0,0 +1,273 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp_uri).
+
+-include("amqp_client.hrl").
+
+-export([parse/1, parse/2, remove_credentials/1]).
+
+%%---------------------------------------------------------------------------
+%% AMQP URI Parsing
+%%---------------------------------------------------------------------------
+
+%% Reformat a URI to remove authentication secrets from it (before we
+%% log it or display it anywhere).
+-spec remove_credentials(URI :: string() | binary()) -> string().
+remove_credentials(URI) ->
+ UriString = rabbit_data_coercion:to_list(URI),
+ Props = uri_parser:parse(UriString,
+ [{host, undefined}, {path, undefined},
+ {port, undefined}, {'query', []}]),
+ PortPart = case proplists:get_value(port, Props) of
+ undefined -> "";
+ Port -> rabbit_misc:format(":~B", [Port])
+ end,
+ PGet = fun(K, P) -> case proplists:get_value(K, P) of
+ undefined -> "";
+ R -> R
+ end
+ end,
+ rabbit_misc:format(
+ "~s://~s~s~s", [proplists:get_value(scheme, Props), PGet(host, Props),
+ PortPart, PGet(path, Props)]).
+
+%% @spec (Uri) -> {ok, #amqp_params_network{} | #amqp_params_direct{}} |
+%% {error, {Info, Uri}}
+%% where
+%% Uri = string()
+%% Info = any()
+%%
+%% @doc Parses an AMQP URI. If any of the URI parts are missing, the
+%% default values are used. If the hostname is zero-length, an
+%% #amqp_params_direct{} record is returned; otherwise, an
+%% #amqp_params_network{} record is returned. Extra parameters may be
+%% specified via the query string
+%% (e.g. "?heartbeat=5&amp;auth_mechanism=external"). In case of failure,
+%% an {error, {Info, Uri}} tuple is returned.
+%%
+%% The extra parameters that may be specified are channel_max,
+%% frame_max, heartbeat and auth_mechanism (the latter can appear more
+%% than once). The extra parameters that may be specified for an SSL
+%% connection are cacertfile, certfile, keyfile, verify,
+%% fail_if_no_peer_cert, password, and depth.
+-type parse_result() :: {ok, #amqp_params_network{}} |
+ {ok, #amqp_params_direct{}} |
+ {error, {any(), string()}}.
+
+-spec parse(Uri :: string() | binary()) -> parse_result().
+parse(Uri) -> parse(Uri, <<"/">>).
+
+-spec parse(Uri :: string() | binary(), DefaultVHost :: binary()) -> parse_result().
+parse(Uri, DefaultVHost) ->
+ try return(parse1(Uri, DefaultVHost))
+ catch throw:Err -> {error, {Err, Uri}};
+ error:Err -> {error, {Err, Uri}}
+ end.
+
+parse1(Uri, DefaultVHost) when is_list(Uri); is_binary(Uri) ->
+ UriString = rabbit_data_coercion:to_list(Uri),
+ case uri_parser:parse(UriString, [{host, undefined}, {path, undefined},
+ {port, undefined}, {'query', []}]) of
+ {error, Err} ->
+ throw({unable_to_parse_uri, Err});
+ Parsed ->
+ Endpoint =
+ case string:to_lower(proplists:get_value(scheme, Parsed)) of
+ "amqp" -> build_broker(Parsed, DefaultVHost);
+ "amqps" -> build_ssl_broker(Parsed, DefaultVHost);
+ Scheme -> fail({unexpected_uri_scheme, Scheme})
+ end,
+ return({ok, broker_add_query(Endpoint, Parsed)})
+ end;
+parse1(_, _DefaultVHost) ->
+ fail(expected_string_uri).
+
+unescape_string(Atom) when is_atom(Atom) ->
+ Atom;
+unescape_string(Integer) when is_integer(Integer) ->
+ Integer;
+unescape_string([]) ->
+ [];
+unescape_string([$%, N1, N2 | Rest]) ->
+ try
+ [erlang:list_to_integer([N1, N2], 16) | unescape_string(Rest)]
+ catch
+ error:badarg -> throw({invalid_entitiy, ['%', N1, N2]})
+ end;
+unescape_string([$% | Rest]) ->
+ fail({unterminated_entity, ['%' | Rest]});
+unescape_string([C | Rest]) ->
+ [C | unescape_string(Rest)].
+
+build_broker(ParsedUri, DefaultVHost) ->
+ [Host, Port, Path] =
+ [proplists:get_value(F, ParsedUri) || F <- [host, port, path]],
+ case Port =:= undefined orelse (0 < Port andalso Port =< 65535) of
+ true -> ok;
+ false -> fail({port_out_of_range, Port})
+ end,
+ VHost = case Path of
+ undefined -> DefaultVHost;
+ [$/|Rest] -> case string:chr(Rest, $/) of
+ 0 -> list_to_binary(unescape_string(Rest));
+ _ -> fail({invalid_vhost, Rest})
+ end
+ end,
+ UserInfo = proplists:get_value(userinfo, ParsedUri),
+ Record = case {unescape_string(Host), Port} of
+ {undefined, undefined} ->
+ #amqp_params_direct{virtual_host = VHost};
+ {undefined, _Port} ->
+ fail(port_requires_host);
+ {Host1, Port1} ->
+ Mech = mechanisms(ParsedUri),
+ #amqp_params_network{host = Host1,
+ port = Port1,
+ virtual_host = VHost,
+ auth_mechanisms = Mech}
+ end,
+ set_user_info(Record, UserInfo).
+
+set_user_info(Ps, UserInfo) ->
+ case UserInfo of
+ [U, P | _] -> set([{username, list_to_binary(unescape_string(U))},
+ {password, list_to_binary(unescape_string(P))}], Ps);
+
+ [U] -> set([{username, list_to_binary(unescape_string(U))}], Ps);
+ [] -> Ps
+ end.
+
+set(KVs, Ps = #amqp_params_direct{}) ->
+ set(KVs, Ps, record_info(fields, amqp_params_direct));
+set(KVs, Ps = #amqp_params_network{}) ->
+ set(KVs, Ps, record_info(fields, amqp_params_network)).
+
+set(KVs, Ps, Fields) ->
+ {Ps1, _Ix} = lists:foldl(fun (Field, {PsN, Ix}) ->
+ {case lists:keyfind(Field, 1, KVs) of
+ false -> PsN;
+ {_, V} -> setelement(Ix, PsN, V)
+ end, Ix + 1}
+ end, {Ps, 2}, Fields),
+ Ps1.
+
+build_ssl_broker(ParsedUri, DefaultVHost) ->
+ Params0 = build_broker(ParsedUri, DefaultVHost),
+ Query = proplists:get_value('query', ParsedUri),
+ SSLOptions =
+ run_state_monad(
+ [fun (L) -> KeyString = atom_to_list(Key),
+ case lists:keysearch(KeyString, 1, Query) of
+ {value, {_, Value}} ->
+ try return([{Key, unescape_string(Fun(Value))} | L])
+ catch throw:Reason ->
+ fail({invalid_ssl_parameter,
+ Key, Value, Query, Reason})
+ end;
+ false ->
+ L
+ end
+ end || {Fun, Key} <-
+ [{fun find_path_parameter/1, cacertfile},
+ {fun find_path_parameter/1, certfile},
+ {fun find_path_parameter/1, keyfile},
+ {fun find_atom_parameter/1, verify},
+ {fun find_boolean_parameter/1, fail_if_no_peer_cert},
+ {fun find_identity_parameter/1, password},
+ {fun find_sni_parameter/1, server_name_indication},
+ {fun find_integer_parameter/1, depth}]],
+ []),
+ Params1 = Params0#amqp_params_network{ssl_options = SSLOptions},
+ amqp_ssl:maybe_enhance_ssl_options(Params1).
+
+broker_add_query(Params = #amqp_params_direct{}, Uri) ->
+ broker_add_query(Params, Uri, record_info(fields, amqp_params_direct));
+broker_add_query(Params = #amqp_params_network{}, Uri) ->
+ broker_add_query(Params, Uri, record_info(fields, amqp_params_network)).
+
+broker_add_query(Params, ParsedUri, Fields) ->
+ Query = proplists:get_value('query', ParsedUri),
+ {Params1, _Pos} =
+ run_state_monad(
+ [fun ({ParamsN, Pos}) ->
+ Pos1 = Pos + 1,
+ KeyString = atom_to_list(Field),
+ case proplists:get_value(KeyString, Query) of
+ undefined ->
+ return({ParamsN, Pos1});
+ true -> %% proplists short form, not permitted
+ return({ParamsN, Pos1});
+ Value ->
+ try
+ ValueParsed = parse_amqp_param(Field, Value),
+ return(
+ {setelement(Pos, ParamsN, ValueParsed), Pos1})
+ catch throw:Reason ->
+ fail({invalid_amqp_params_parameter,
+ Field, Value, Query, Reason})
+ end
+ end
+ end || Field <- Fields], {Params, 2}),
+ Params1.
+
+parse_amqp_param(Field, String) when Field =:= channel_max orelse
+ Field =:= frame_max orelse
+ Field =:= heartbeat orelse
+ Field =:= connection_timeout orelse
+ Field =:= depth ->
+ find_integer_parameter(String);
+parse_amqp_param(Field, String) when Field =:= password ->
+ find_identity_parameter(String);
+parse_amqp_param(Field, String) ->
+ fail({parameter_unconfigurable_in_query, Field, String}).
+
+find_path_parameter(Value) ->
+ find_identity_parameter(Value).
+
+find_sni_parameter("disable") ->
+ disable;
+find_sni_parameter(Value) ->
+ find_identity_parameter(Value).
+
+find_identity_parameter(Value) -> return(Value).
+
+find_integer_parameter(Value) ->
+ try return(list_to_integer(Value))
+ catch error:badarg -> fail({not_an_integer, Value})
+ end.
+
+find_boolean_parameter(Value) ->
+ Bool = list_to_atom(Value),
+ case is_boolean(Bool) of
+ true -> return(Bool);
+ false -> fail({require_boolean, Bool})
+ end.
+
+find_atom_parameter(Value) -> return(list_to_atom(Value)).
+
+mechanisms(ParsedUri) ->
+ Query = proplists:get_value('query', ParsedUri),
+ Mechanisms = case proplists:get_all_values("auth_mechanism", Query) of
+ [] -> ["plain", "amqplain"];
+ Mechs -> Mechs
+ end,
+ [case [list_to_atom(T) || T <- string:tokens(Mech, ":")] of
+ [F] -> fun (R, P, S) -> amqp_auth_mechanisms:F(R, P, S) end;
+ [M, F] -> fun (R, P, S) -> M:F(R, P, S) end;
+ L -> throw({not_mechanism, L})
+ end || Mech <- Mechanisms].
+
+%% --=: Plain state monad implementation start :=--
+run_state_monad(FunList, State) ->
+ lists:foldl(fun (Fun, StateN) -> Fun(StateN) end, State, FunList).
+
+return(V) -> V.
+
+-spec fail(_) -> no_return().
+fail(Reason) -> throw(Reason).
+%% --=: end :=--
diff --git a/deps/amqp_client/src/amqp_util.erl b/deps/amqp_client/src/amqp_util.erl
new file mode 100644
index 0000000000..df7ce30662
--- /dev/null
+++ b/deps/amqp_client/src/amqp_util.erl
@@ -0,0 +1,17 @@
+-module(amqp_util).
+
+-include("amqp_client_internal.hrl").
+
+-export([call_timeout/0]).
+
+call_timeout() ->
+ case get(gen_server_call_timeout) of
+ undefined ->
+ Timeout = rabbit_misc:get_env(amqp_client,
+ gen_server_call_timeout,
+ 60000),
+ put(gen_server_call_timeout, Timeout),
+ Timeout;
+ Timeout ->
+ Timeout
+ end.
diff --git a/deps/amqp_client/src/overview.edoc.in b/deps/amqp_client/src/overview.edoc.in
new file mode 100644
index 0000000000..799b293239
--- /dev/null
+++ b/deps/amqp_client/src/overview.edoc.in
@@ -0,0 +1,27 @@
+@title AMQP Client for Erlang
+@author GoPivotal Inc. <support@rabbitmq.com>
+@copyright 2007-2020 VMware, Inc. or its affiliates.
+
+@version %%VERSION%%
+
+@reference <a href="https://www.rabbitmq.com/protocol.html" target="_top">AMQP documentation</a> on the RabbitMQ website.
+
+@doc
+
+== Overview ==
+
+This application provides an Erlang library to interact with an AMQP 0-9-1 compliant message broker. The module documentation assumes that the programmer has some basic familiarity with the execution model defined in the AMQP specification.
+
+The main components are {@link amqp_connection} and {@link amqp_channel}. The {@link amqp_connection} module is used to open and close connections to an AMQP broker as well as creating channels. The {@link amqp_channel} module is used to send and receive commands and messages to and from a broker within the context of a channel.
+
+== AMQP Record Definitions ==
+
+Many of the API functions take structured records as arguments. These records represent the commands defined in the AMQP execution model. The definitions for these records are automatically generated by the rabbitmq-codegen project. rabbitmq-codegen parses a machine readable view of the specification and generates a header file that includes the entire command set of AMQP. Each command in AMQP has an identically named record. The protocol documentation serves as a reference for the attributes of each command.
+
+== Programming Model ==
+
+For more information, refer to the Erlang AMQP client <a href="https://www.rabbitmq.com/erlang-client-user-guide.html">developer's guide</a> on the RabbitMQ website.
+
+== RPC Components ==
+
+The {@link amqp_rpc_server} module provides a generic building block to expose Erlang functions via an RPC over AMQP mechanism. The {@link amqp_rpc_client} provides a simple client utility to submit RPC requests to a server via AMQP.
diff --git a/deps/amqp_client/src/rabbit_routing_util.erl b/deps/amqp_client/src/rabbit_routing_util.erl
new file mode 100644
index 0000000000..9d64a1468e
--- /dev/null
+++ b/deps/amqp_client/src/rabbit_routing_util.erl
@@ -0,0 +1,222 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2013-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_routing_util).
+
+-export([init_state/0, dest_prefixes/0, all_dest_prefixes/0]).
+-export([ensure_endpoint/4, ensure_endpoint/5, ensure_binding/3]).
+-export([parse_endpoint/1, parse_endpoint/2]).
+-export([parse_routing/1, dest_temp_queue/1]).
+
+-include("amqp_client.hrl").
+-include("rabbit_routing_prefixes.hrl").
+
+%%----------------------------------------------------------------------------
+
+init_state() -> sets:new().
+
+dest_prefixes() -> [?EXCHANGE_PREFIX, ?TOPIC_PREFIX, ?QUEUE_PREFIX,
+ ?AMQQUEUE_PREFIX, ?REPLY_QUEUE_PREFIX].
+
+all_dest_prefixes() -> [?TEMP_QUEUE_PREFIX | dest_prefixes()].
+
+%% --------------------------------------------------------------------------
+
+parse_endpoint(Destination) ->
+ parse_endpoint(Destination, false).
+
+parse_endpoint(undefined, AllowAnonymousQueue) ->
+ parse_endpoint("/queue", AllowAnonymousQueue);
+
+parse_endpoint(Destination, AllowAnonymousQueue) when is_binary(Destination) ->
+ parse_endpoint(unicode:characters_to_list(Destination),
+ AllowAnonymousQueue);
+parse_endpoint(Destination, AllowAnonymousQueue) when is_list(Destination) ->
+ case re:split(Destination, "/", [{return, list}]) of
+ [Name] ->
+ {ok, {queue, unescape(Name)}};
+ ["", Type | Rest]
+ when Type =:= "exchange" orelse Type =:= "queue" orelse
+ Type =:= "topic" orelse Type =:= "temp-queue" ->
+ parse_endpoint0(atomise(Type), Rest, AllowAnonymousQueue);
+ ["", "amq", "queue" | Rest] ->
+ parse_endpoint0(amqqueue, Rest, AllowAnonymousQueue);
+ ["", "reply-queue" = Prefix | [_|_]] ->
+ parse_endpoint0(reply_queue,
+ [lists:nthtail(2 + length(Prefix), Destination)],
+ AllowAnonymousQueue);
+ _ ->
+ {error, {unknown_destination, Destination}}
+ end.
+
+parse_endpoint0(exchange, ["" | _] = Rest, _) ->
+ {error, {invalid_destination, exchange, to_url(Rest)}};
+parse_endpoint0(exchange, [Name], _) ->
+ {ok, {exchange, {unescape(Name), undefined}}};
+parse_endpoint0(exchange, [Name, Pattern], _) ->
+ {ok, {exchange, {unescape(Name), unescape(Pattern)}}};
+parse_endpoint0(queue, [], false) ->
+ {error, {invalid_destination, queue, []}};
+parse_endpoint0(queue, [], true) ->
+ {ok, {queue, undefined}};
+parse_endpoint0(Type, [[_|_]] = [Name], _) ->
+ {ok, {Type, unescape(Name)}};
+parse_endpoint0(Type, Rest, _) ->
+ {error, {invalid_destination, Type, to_url(Rest)}}.
+
+%% --------------------------------------------------------------------------
+
+ensure_endpoint(Dir, Channel, Endpoint, State) ->
+ ensure_endpoint(Dir, Channel, Endpoint, [], State).
+
+ensure_endpoint(source, Channel, {exchange, {Name, _}}, Params, State) ->
+ check_exchange(Name, Channel,
+ proplists:get_value(check_exchange, Params, false)),
+ Method = queue_declare_method(#'queue.declare'{}, exchange, Params),
+ #'queue.declare_ok'{queue = Queue} = amqp_channel:call(Channel, Method),
+ {ok, Queue, State};
+
+ensure_endpoint(source, Channel, {topic, _}, Params, State) ->
+ Method = queue_declare_method(#'queue.declare'{}, topic, Params),
+ #'queue.declare_ok'{queue = Queue} = amqp_channel:call(Channel, Method),
+ {ok, Queue, State};
+
+ensure_endpoint(_Dir, _Channel, {queue, undefined}, _Params, State) ->
+ {ok, undefined, State};
+
+ensure_endpoint(_, Channel, {queue, Name}, Params, State) ->
+ Params1 = rabbit_misc:pmerge(durable, true, Params),
+ Queue = list_to_binary(Name),
+ State1 = case sets:is_element(Queue, State) of
+ true -> State;
+ _ -> Method = queue_declare_method(
+ #'queue.declare'{queue = Queue,
+ nowait = true},
+ queue, Params1),
+ case Method#'queue.declare'.nowait of
+ true -> amqp_channel:cast(Channel, Method);
+ false -> amqp_channel:call(Channel, Method)
+ end,
+ sets:add_element(Queue, State)
+ end,
+ {ok, Queue, State1};
+
+ensure_endpoint(dest, Channel, {exchange, {Name, _}}, Params, State) ->
+ check_exchange(Name, Channel,
+ proplists:get_value(check_exchange, Params, false)),
+ {ok, undefined, State};
+
+ensure_endpoint(dest, _Ch, {topic, _}, _Params, State) ->
+ {ok, undefined, State};
+
+ensure_endpoint(_, _Ch, {amqqueue, Name}, _Params, State) ->
+ {ok, list_to_binary(Name), State};
+
+ensure_endpoint(_, _Ch, {reply_queue, Name}, _Params, State) ->
+ {ok, list_to_binary(Name), State};
+
+ensure_endpoint(_Direction, _Ch, _Endpoint, _Params, _State) ->
+ {error, invalid_endpoint}.
+
+%% --------------------------------------------------------------------------
+
+ensure_binding(QueueBin, {"", Queue}, _Channel) ->
+ %% i.e., we should only be asked to bind to the default exchange a
+ %% queue with its own name
+ QueueBin = list_to_binary(Queue),
+ ok;
+ensure_binding(Queue, {Exchange, RoutingKey}, Channel) ->
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Channel,
+ #'queue.bind'{
+ queue = Queue,
+ exchange = list_to_binary(Exchange),
+ routing_key = list_to_binary(RoutingKey)}),
+ ok.
+
+%% --------------------------------------------------------------------------
+
+parse_routing({exchange, {Name, undefined}}) ->
+ {Name, ""};
+parse_routing({exchange, {Name, Pattern}}) ->
+ {Name, Pattern};
+parse_routing({topic, Name}) ->
+ {"amq.topic", Name};
+parse_routing({Type, Name})
+ when Type =:= queue orelse Type =:= reply_queue orelse Type =:= amqqueue ->
+ {"", Name}.
+
+dest_temp_queue({temp_queue, Name}) -> Name;
+dest_temp_queue(_) -> none.
+
+%% --------------------------------------------------------------------------
+
+check_exchange(_, _, false) ->
+ ok;
+check_exchange(ExchangeName, Channel, true) ->
+ XDecl = #'exchange.declare'{ exchange = list_to_binary(ExchangeName),
+ passive = true },
+ #'exchange.declare_ok'{} = amqp_channel:call(Channel, XDecl),
+ ok.
+
+update_queue_declare_arguments(Method, Params) ->
+ Method#'queue.declare'{arguments =
+ proplists:get_value(arguments, Params, [])}.
+
+update_queue_declare_exclusive(Method, Params) ->
+ case proplists:get_value(exclusive, Params) of
+ undefined -> Method;
+ Val -> Method#'queue.declare'{exclusive = Val}
+ end.
+
+update_queue_declare_auto_delete(Method, Params) ->
+ case proplists:get_value(auto_delete, Params) of
+ undefined -> Method;
+ Val -> Method#'queue.declare'{auto_delete = Val}
+ end.
+
+update_queue_declare_nowait(Method, Params) ->
+ case proplists:get_value(nowait, Params) of
+ undefined -> Method;
+ Val -> Method#'queue.declare'{nowait = Val}
+ end.
+
+queue_declare_method(#'queue.declare'{} = Method, Type, Params) ->
+ %% defaults
+ Method1 = case proplists:get_value(durable, Params, false) of
+ true -> Method#'queue.declare'{durable = true};
+ false -> Method#'queue.declare'{auto_delete = true,
+ exclusive = true}
+ end,
+ %% set the rest of queue.declare fields from Params
+ Method2 = lists:foldl(fun (F, Acc) -> F(Acc, Params) end,
+ Method1, [fun update_queue_declare_arguments/2,
+ fun update_queue_declare_exclusive/2,
+ fun update_queue_declare_auto_delete/2,
+ fun update_queue_declare_nowait/2]),
+ case {Type, proplists:get_value(subscription_queue_name_gen, Params)} of
+ {topic, SQNG} when is_function(SQNG) ->
+ Method2#'queue.declare'{queue = SQNG()};
+ {exchange, SQNG} when is_function(SQNG) ->
+ Method2#'queue.declare'{queue = SQNG()};
+ _ ->
+ Method2
+ end.
+
+%% --------------------------------------------------------------------------
+
+to_url([]) -> [];
+to_url(Lol) -> "/" ++ string:join(Lol, "/").
+
+atomise(Name) when is_list(Name) ->
+ list_to_atom(re:replace(Name, "-", "_", [{return,list}, global])).
+
+unescape(Str) -> unescape(Str, []).
+
+unescape("%2F" ++ Str, Acc) -> unescape(Str, [$/ | Acc]);
+unescape([C | Str], Acc) -> unescape(Str, [C | Acc]);
+unescape([], Acc) -> lists:reverse(Acc).
diff --git a/deps/amqp_client/src/uri_parser.erl b/deps/amqp_client/src/uri_parser.erl
new file mode 100644
index 0000000000..a2bb98dad7
--- /dev/null
+++ b/deps/amqp_client/src/uri_parser.erl
@@ -0,0 +1,125 @@
+%% This file is a copy of http_uri.erl from the R13B-1 Erlang/OTP
+%% distribution with several modifications.
+
+%% All modifications are Copyright (c) 2009-2020 VMware, Inc. or its affiliates.
+
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at https://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+
+%% See https://tools.ietf.org/html/rfc3986
+
+-module(uri_parser).
+
+-export([parse/2]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+%% Returns a key list of elements extracted from the URI. Note that
+%% only 'scheme' is guaranteed to exist. Key-Value pairs from the
+%% Defaults list will be used absence of a non-empty value extracted
+%% from the URI. The values extracted are strings, except for 'port'
+%% which is an integer, 'userinfo' which is a list of strings (split
+%% on $:), and 'query' which is a list of strings where no $= char
+%% found, or a {key,value} pair where a $= char is found (initial
+%% split on $& and subsequent optional split on $=). Possible keys
+%% are: 'scheme', 'userinfo', 'host', 'port', 'path', 'query',
+%% 'fragment'.
+
+-spec parse(AbsURI, Defaults :: list())
+ -> [{atom(), string()}] | {error, no_scheme | {malformed_uri, AbsURI, any()}}
+ when AbsURI :: string() | binary().
+parse(AbsURI, Defaults) ->
+ AbsUriString = rabbit_data_coercion:to_list(AbsURI),
+ case parse_scheme(AbsUriString) of
+ {error, Reason} ->
+ {error, Reason};
+ {Scheme, Rest} ->
+ case (catch parse_uri_rest(Rest, true)) of
+ [_|_] = List ->
+ merge_keylists([{scheme, Scheme} | List], Defaults);
+ E ->
+ {error, {malformed_uri, AbsURI, E}}
+ end
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_scheme(AbsURI) ->
+ split_uri(AbsURI, ":", {error, no_scheme}).
+
+parse_uri_rest("//" ++ URIPart, true) ->
+ %% we have an authority
+ {Authority, PathQueryFrag} =
+ split_uri(URIPart, "/|\\?|#", {URIPart, ""}, 1, 0),
+ AuthorityParts = parse_authority(Authority),
+ parse_uri_rest(PathQueryFrag, false) ++ AuthorityParts;
+parse_uri_rest(PathQueryFrag, _Bool) ->
+ %% no authority, just a path and maybe query
+ {PathQuery, Frag} = split_uri(PathQueryFrag, "#", {PathQueryFrag, ""}),
+ {Path, QueryString} = split_uri(PathQuery, "\\?", {PathQuery, ""}),
+ QueryPropList = split_query(QueryString),
+ [{path, Path}, {'query', QueryPropList}, {fragment, Frag}].
+
+parse_authority(Authority) ->
+ {UserInfo, HostPort} = split_uri(Authority, "@", {"", Authority}),
+ UserInfoSplit = case re:split(UserInfo, ":", [{return, list}]) of
+ [""] -> [];
+ UIS -> UIS
+ end,
+ [{userinfo, UserInfoSplit} | parse_host_port(HostPort)].
+
+parse_host_port("[" ++ HostPort) -> %ipv6
+ {Host, ColonPort} = split_uri(HostPort, "\\]", {HostPort, ""}),
+ [{host, Host} | case split_uri(ColonPort, ":", not_found, 0, 1) of
+ not_found -> case ColonPort of
+ [] -> [];
+ _ -> throw({invalid_port, ColonPort})
+ end;
+ {_, Port} -> [{port, list_to_integer(Port)}]
+ end];
+
+parse_host_port(HostPort) ->
+ {Host, Port} = split_uri(HostPort, ":", {HostPort, not_found}),
+ [{host, Host} | case Port of
+ not_found -> [];
+ _ -> [{port, list_to_integer(Port)}]
+ end].
+
+split_query(Query) ->
+ case re:split(Query, "&", [{return, list}]) of
+ [""] -> [];
+ QParams -> [split_uri(Param, "=", Param) || Param <- QParams]
+ end.
+
+split_uri(UriPart, SplitChar, NoMatchResult) ->
+ split_uri(UriPart, SplitChar, NoMatchResult, 1, 1).
+
+split_uri(UriPart, SplitChar, NoMatchResult, SkipLeft, SkipRight) ->
+ case re:run(UriPart, SplitChar) of
+ {match, [{Match, _}]} ->
+ {string:substr(UriPart, 1, Match + 1 - SkipLeft),
+ string:substr(UriPart, Match + 1 + SkipRight, length(UriPart))};
+ nomatch ->
+ NoMatchResult
+ end.
+
+merge_keylists(A, B) ->
+ {AEmpty, ANonEmpty} = lists:partition(fun ({_Key, V}) -> V =:= [] end, A),
+ [AEmptyS, ANonEmptyS, BS] =
+ [lists:ukeysort(1, X) || X <- [AEmpty, ANonEmpty, B]],
+ lists:ukeymerge(1, lists:ukeymerge(1, ANonEmptyS, BS), AEmptyS).
diff --git a/deps/amqp_client/test/system_SUITE.erl b/deps/amqp_client/test/system_SUITE.erl
new file mode 100644
index 0000000000..9e39e468b7
--- /dev/null
+++ b/deps/amqp_client/test/system_SUITE.erl
@@ -0,0 +1,1485 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("public_key/include/public_key.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("amqp_client.hrl").
+-include("amqp_client_internal.hrl").
+
+-compile(export_all).
+
+-define(UNAUTHORIZED_USER, <<"test_user_no_perm">>).
+
+%% The latch constant defines how many processes are spawned in order
+%% to run certain functionality in parallel. It follows the standard
+%% countdown latch pattern.
+-define(LATCH, 100).
+
+%% The wait constant defines how long a consumer waits before it
+%% unsubscribes
+-define(WAIT, 10000).
+
+%% How to long wait for a process to die after an expected failure
+-define(PROCESS_EXIT_TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, direct_connection_tests},
+ {group, network_connection_tests}
+ ].
+
+-define(COMMON_PARALLEL_TEST_CASES, [
+ simultaneous_close,
+ basic_recover,
+ basic_consume,
+ consume_notification,
+ basic_nack,
+ large_content,
+ lifecycle,
+ no_vhost,
+ nowait_exchange_declare,
+ channel_repeat_open_close,
+ channel_multi_open_close,
+ basic_ack,
+ basic_ack_call,
+ channel_lifecycle,
+ queue_unbind,
+ sync_method_serialization,
+ async_sync_method_serialization,
+ sync_async_method_serialization,
+ rpc,
+ rpc_client,
+ confirm,
+ confirm_barrier,
+ confirm_select_before_wait,
+ confirm_barrier_timeout,
+ confirm_barrier_die_timeout,
+ default_consumer,
+ subscribe_nowait,
+ non_existent_exchange,
+ non_existent_user,
+ invalid_password,
+ non_existent_vhost,
+ no_permission,
+ channel_writer_death,
+ command_invalid_over_channel,
+ named_connection,
+ {teardown_loop, [{repeat, 100}, parallel], [teardown]},
+ {bogus_rpc_loop, [{repeat, 100}, parallel], [bogus_rpc]},
+ {hard_error_loop, [{repeat, 100}, parallel], [hard_error]}
+ ]).
+-define(COMMON_NON_PARALLEL_TEST_CASES, [
+ basic_qos, %% Not parallel because it's time-based.
+ connection_failure,
+ channel_death
+ ]).
+
+groups() ->
+ [
+ {direct_connection_tests, [], [
+ {parallel_tests, [parallel], [
+ basic_get_direct,
+ no_user,
+ no_password
+ | ?COMMON_PARALLEL_TEST_CASES]},
+ {non_parallel_tests, [], ?COMMON_NON_PARALLEL_TEST_CASES}
+ ]},
+ {network_connection_tests, [], [
+ {parallel_tests, [parallel], [
+ basic_get_ipv4,
+ basic_get_ipv6,
+ basic_get_ipv4_ssl,
+ basic_get_ipv6_ssl,
+ pub_and_close,
+ channel_tune_negotiation,
+ shortstr_overflow_property,
+ shortstr_overflow_field,
+ command_invalid_over_channel0
+ | ?COMMON_PARALLEL_TEST_CASES]},
+ {non_parallel_tests, [], [
+ connection_blocked_network
+ | ?COMMON_NON_PARALLEL_TEST_CASES]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++ [
+ fun ensure_amqp_client_srcdir/1,
+ fun create_unauthorized_user/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, [
+ fun delete_unauthorized_user/1
+ ] ++ rabbit_ct_broker_helpers:teardown_steps()).
+
+ensure_amqp_client_srcdir(Config) ->
+ rabbit_ct_helpers:ensure_application_srcdir(Config,
+ amqp_client, amqp_client).
+
+create_unauthorized_user(Config) ->
+ Cmd = ["add_user", ?UNAUTHORIZED_USER, ?UNAUTHORIZED_USER],
+ case rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd) of
+ {ok, _} -> rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_unauthorized_username, ?UNAUTHORIZED_USER},
+ {rmq_unauthorized_password, ?UNAUTHORIZED_USER}]);
+ _ -> {skip, "Failed to create unauthorized user"}
+ end.
+
+delete_unauthorized_user(Config) ->
+ Cmd = ["delete_user", ?UNAUTHORIZED_USER],
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd),
+ Config.
+
+%% -------------------------------------------------------------------
+%% Groups.
+%% -------------------------------------------------------------------
+
+init_per_group(direct_connection_tests, Config) ->
+ rabbit_ct_helpers:set_config(Config, {amqp_client_conn_type, direct});
+init_per_group(network_connection_tests, Config) ->
+ rabbit_ct_helpers:set_config(Config, {amqp_client_conn_type, network});
+init_per_group(Group, Config)
+ when Group =:= parallel_tests
+ orelse Group =:= non_parallel_tests
+ orelse Group =:= teardown_loop
+ orelse Group =:= bogus_rpc_loop
+ orelse Group =:= hard_error_loop ->
+ case ?config(amqp_client_conn_type, Config) of
+ undefined -> rabbit_ct_helpers:set_config(
+ Config, {amqp_client_conn_type, network});
+ _ -> Config
+ end.
+
+end_per_group(_, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+init_per_testcase(Test, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Test),
+ {Username, Password} = case Test of
+ no_user -> {none,
+ none};
+ no_password -> {?config(rmq_username, Config),
+ none};
+ non_existent_user -> {<<"no-user">>,
+ <<"no-user">>};
+ invalid_password -> {?config(rmq_username, Config),
+ <<"bad">>};
+ no_permission -> {?config(rmq_unauthorized_username, Config),
+ ?config(rmq_unauthorized_password, Config)};
+ _ -> {?config(rmq_username, Config),
+ ?config(rmq_password, Config)}
+ end,
+ VHost = case Test of
+ no_vhost -> <<"/noexist">>;
+ non_existent_vhost -> <<"oops">>;
+ _ -> ?config(rmq_vhost, Config)
+ end,
+ Host = case Test of
+ basic_get_ipv4 -> "127.0.0.1";
+ basic_get_ipv6 -> "::1";
+ basic_get_ipv4_ssl -> "127.0.0.1";
+ basic_get_ipv6_ssl -> "::1";
+ _ -> ?config(rmq_hostname, Config)
+ end,
+ {Port, SSLOpts} = if
+ Test =:= basic_get_ipv4_ssl orelse
+ Test =:= basic_get_ipv6_ssl ->
+ CertsDir = ?config(rmq_certsdir, Config),
+ % Compute SNI from the server's certificate directly.
+ {ok, PemBin} = file:read_file(filename:join([CertsDir,
+ "client",
+ "cert.pem"])),
+ [{_, DerCert, _}] = public_key:pem_decode(PemBin),
+ [CN] = rabbit_cert_info:subject_items(DerCert,
+ ?'id-at-commonName'),
+ {
+ rabbit_ct_broker_helpers:get_node_config(Config, 0,
+ tcp_port_amqp_tls),
+ [
+ {cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])},
+ {certfile, filename:join([CertsDir, "client", "cert.pem"])},
+ {keyfile, filename:join([CertsDir, "client", "key.pem"])},
+ {verify, verify_peer},
+ {server_name_indication, CN}
+ ]
+ };
+ true ->
+ {
+ rabbit_ct_broker_helpers:get_node_config(Config, 0,
+ tcp_port_amqp),
+ none
+ }
+ end,
+ ChannelMax = case Test of
+ channel_tune_negotiation -> 10;
+ _ -> ?config(rmq_channel_max, Config)
+ end,
+ ConnParams = case ?config(amqp_client_conn_type, Config) of
+ direct ->
+ #amqp_params_direct{
+ username = Username,
+ password = Password,
+ node = rabbit_ct_broker_helpers:get_node_config(Config,
+ 0, nodename),
+ virtual_host = VHost};
+ network ->
+ #amqp_params_network{
+ username = Username,
+ password = Password,
+ host = Host,
+ port = Port,
+ virtual_host = VHost,
+ channel_max = ChannelMax,
+ ssl_options = SSLOpts}
+ end,
+ rabbit_ct_helpers:set_config(Config,
+ {amqp_client_conn_params, ConnParams}).
+
+end_per_testcase(Test, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Test).
+
+%% -------------------------------------------------------------------
+
+basic_get_direct(Config) -> basic_get(Config).
+basic_get_ipv4(Config) -> basic_get(Config).
+basic_get_ipv6(Config) -> basic_get(Config).
+basic_get_ipv4_ssl(Config) -> basic_get(Config).
+basic_get_ipv6_ssl(Config) -> basic_get(Config).
+
+basic_get(Config) ->
+ case new_connection(Config) of
+ {ok, Connection} ->
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Payload = <<"foobar">>,
+ {ok, Q} = setup_publish(Channel, Payload),
+ get_and_assert_equals(Channel, Q, Payload),
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel);
+ {error, Posix}
+ when Posix =:= eaddrnotavail orelse Posix =:= enetunreach ->
+ {skip, inet:format_error(Posix)}
+ end.
+
+named_connection(Config) ->
+ ConnName = <<"Custom Name">>,
+ Params = ?config(amqp_client_conn_params, Config),
+ {ok, Connection} = amqp_connection:start(Params, ConnName),
+ ConnName = amqp_connection:connection_name(Connection),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Payload = <<"foobar">>,
+ {ok, Q} = setup_publish(Channel, Payload),
+ get_and_assert_equals(Channel, Q, Payload),
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+simultaneous_close(Config) ->
+ {ok, Connection} = new_connection(Config),
+ %% We pick a high channel number, to avoid any conflict with other
+ %% tests running in parallel.
+ ChannelNumber = case ?config(rmq_channel_max, Config) of
+ 0 -> ?MAX_CHANNEL_NUMBER;
+ N -> N
+ end,
+ {ok, Channel1} = amqp_connection:open_channel(Connection, ChannelNumber),
+
+ %% Publish to non-existent exchange and immediately close channel
+ amqp_channel:cast(Channel1, #'basic.publish'{exchange = <<"does-not-exist">>,
+ routing_key = <<"a">>},
+ #amqp_msg{payload = <<"foobar">>}),
+ try amqp_channel:close(Channel1) of
+ ok -> wait_for_death(Channel1);
+ closing -> wait_for_death(Channel1)
+ catch
+ exit:{noproc, _} -> ok;
+ exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _}}, _} -> ok
+ end,
+
+ %% Channel2 (opened with the exact same number as Channel1)
+ %% should not receive a close_ok (which is intended for Channel1)
+ {ok, Channel2} = amqp_connection:open_channel(Connection, ChannelNumber),
+
+ %% Make sure Channel2 functions normally
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel2,
+ #'exchange.declare'{exchange = <<"simultaneous_close">>}),
+ #'exchange.delete_ok'{} =
+ amqp_channel:call(Channel2,
+ #'exchange.delete'{exchange = <<"simultaneous_close">>}),
+
+ teardown(Connection, Channel2).
+
+%% -------------------------------------------------------------------
+
+basic_qos(Config) ->
+ [NoQos, Qos] = [basic_qos_test(Config, Prefetch) || Prefetch <- [0,1]],
+ ExpectedRatio = (1+1) / (1+50/5),
+ FudgeFactor = 2, %% account for timing variations
+ ct:pal(?LOW_IMPORTANCE,
+ "QOS=0 -> ~p (noqos)~n"
+ "QOS=1 -> ~p (qos)~n"
+ "qos / noqos < ~p * ~p = ~p < ~p = ~p~n",
+ [NoQos, Qos, ExpectedRatio, FudgeFactor, Qos / NoQos, ExpectedRatio * FudgeFactor, Qos / NoQos < ExpectedRatio * FudgeFactor]),
+ true = Qos / NoQos < ExpectedRatio * FudgeFactor.
+
+basic_qos_test(Config, Prefetch) ->
+ {ok, Connection} = new_connection(Config),
+ Messages = 100,
+ Workers = [5, 50],
+ Parent = self(),
+ {ok, Chan} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Chan, #'queue.declare'{}),
+ Kids = [spawn(
+ fun() ->
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel,
+ #'basic.qos'{prefetch_count = Prefetch}),
+ amqp_channel:call(Channel,
+ #'basic.consume'{queue = Q}),
+ Parent ! finished,
+ sleeping_consumer(Channel, Sleep, Parent)
+ end) || Sleep <- Workers],
+ latch_loop(length(Kids)),
+ spawn(fun() -> {ok, Channel} = amqp_connection:open_channel(Connection),
+ producer_loop(Channel, Q, Messages)
+ end),
+ {Res, _} = timer:tc(erlang, apply, [fun latch_loop/1, [Messages]]),
+ [Kid ! stop || Kid <- Kids],
+ latch_loop(length(Kids)),
+ teardown(Connection, Chan),
+ Res.
+
+sleeping_consumer(Channel, Sleep, Parent) ->
+ receive
+ stop ->
+ do_stop(Channel, Parent);
+ #'basic.consume_ok'{} ->
+ sleeping_consumer(Channel, Sleep, Parent);
+ #'basic.cancel_ok'{} ->
+ exit(unexpected_cancel_ok);
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _Content} ->
+ Parent ! finished,
+ receive stop -> do_stop(Channel, Parent)
+ after Sleep -> ok
+ end,
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = DeliveryTag}),
+ sleeping_consumer(Channel, Sleep, Parent)
+ end.
+
+do_stop(Channel, Parent) ->
+ Parent ! finished,
+ amqp_channel:close(Channel),
+ wait_for_death(Channel),
+ exit(normal).
+
+producer_loop(Channel, _RoutingKey, 0) ->
+ amqp_channel:close(Channel),
+ wait_for_death(Channel),
+ ok;
+
+producer_loop(Channel, RoutingKey, N) ->
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = RoutingKey},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<>>}),
+ producer_loop(Channel, RoutingKey, N - 1).
+
+%% -------------------------------------------------------------------
+
+basic_recover(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(
+ Connection, {amqp_direct_consumer, [self()]}),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ #'basic.consume_ok'{consumer_tag = Tag} =
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+ receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+ receive
+ {#'basic.deliver'{consumer_tag = Tag}, _} ->
+ %% no_ack set to false, but don't send ack
+ ok
+ end,
+ BasicRecover = #'basic.recover'{requeue = true},
+ amqp_channel:cast(Channel, BasicRecover),
+ receive
+ {#'basic.deliver'{consumer_tag = Tag,
+ delivery_tag = DeliveryTag2}, _} ->
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = DeliveryTag2})
+ end,
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+basic_consume(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ X = <<"basic_consume">>,
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+ RoutingKey = <<"key">>,
+ Parent = self(),
+ [spawn_link(fun () ->
+ consume_loop(Channel, X, RoutingKey, Parent, <<Tag:32>>)
+ end) || Tag <- lists:seq(1, ?LATCH)],
+ latch_loop(?LATCH),
+ Publish = #'basic.publish'{exchange = X, routing_key = RoutingKey},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+ latch_loop(?LATCH),
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel).
+
+consume_loop(Channel, X, RoutingKey, Parent, Tag) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Channel, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = RoutingKey}),
+ #'basic.consume_ok'{} =
+ amqp_channel:call(Channel,
+ #'basic.consume'{queue = Q, consumer_tag = Tag}),
+ receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
+ Parent ! finished,
+ receive {#'basic.deliver'{}, _} -> ok end,
+ #'basic.cancel_ok'{} =
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = Tag}),
+ receive #'basic.cancel_ok'{consumer_tag = Tag} -> ok end,
+ Parent ! finished.
+
+%% -------------------------------------------------------------------
+
+consume_notification(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ #'basic.consume_ok'{consumer_tag = CTag} = ConsumeOk =
+ amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+ receive ConsumeOk -> ok end,
+ #'queue.delete_ok'{} =
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ receive #'basic.cancel'{consumer_tag = CTag} -> ok end,
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+basic_nack(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{}),
+
+ Payload = <<"m1">>,
+
+ amqp_channel:call(Channel,
+ #'basic.publish'{exchange = <<>>, routing_key = Q},
+ #amqp_msg{payload = Payload}),
+
+ #'basic.get_ok'{delivery_tag = Tag} =
+ get_and_assert_equals(Channel, Q, Payload, false),
+
+ amqp_channel:call(Channel, #'basic.nack'{delivery_tag = Tag,
+ multiple = false,
+ requeue = false}),
+
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+large_content(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{}),
+ F = list_to_binary([rand:uniform(256)-1 || _ <- lists:seq(1, 1000)]),
+ Payload = list_to_binary([F || _ <- lists:seq(1, 1000)]),
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ get_and_assert_equals(Channel, Q, Payload),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+lifecycle(Config) ->
+ {ok, Connection} = new_connection(Config),
+ X = <<"lifecycle">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel,
+ #'exchange.declare'{exchange = X,
+ type = <<"topic">>}),
+ Parent = self(),
+ [spawn(fun () -> queue_exchange_binding(Channel, X, Parent, Tag) end)
+ || Tag <- lists:seq(1, ?LATCH)],
+ latch_loop(?LATCH),
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel).
+
+queue_exchange_binding(Channel, X, Parent, Tag) ->
+ receive
+ nothing -> ok
+ after (?LATCH - Tag rem 7) * 10 ->
+ ok
+ end,
+ Q = list_to_binary(rabbit_misc:format("lifecycle.a.b.c.~b", [Tag])),
+ Binding = <<"lifecycle.a.b.c.*">>,
+ #'queue.declare_ok'{queue = Q1}
+ = amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ Q = Q1,
+ Route = #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = Binding},
+ amqp_channel:call(Channel, Route),
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ Parent ! finished.
+
+%% -------------------------------------------------------------------
+
+no_user(Config) -> no_something(Config).
+no_password(Config) -> no_something(Config).
+
+no_something(Config) ->
+ {ok, Connection} = new_connection(Config),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+no_vhost(Config) ->
+ {error, not_allowed} = new_connection(Config),
+ ok.
+
+%% -------------------------------------------------------------------
+
+nowait_exchange_declare(Config) ->
+ {ok, Connection} = new_connection(Config),
+ X = <<"nowait_exchange_declare">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ ok = amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
+ type = <<"topic">>,
+ nowait = true}),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+channel_repeat_open_close(Config) ->
+ {ok, Connection} = new_connection(Config),
+ lists:foreach(
+ fun(_) ->
+ {ok, Ch} = amqp_connection:open_channel(Connection),
+ ok = amqp_channel:close(Ch)
+ end, lists:seq(1, 50)),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+channel_multi_open_close(Config) ->
+ {ok, Connection} = new_connection(Config),
+ [spawn_link(
+ fun() ->
+ try amqp_connection:open_channel(Connection) of
+ {ok, Ch} -> try amqp_channel:close(Ch) of
+ ok -> ok;
+ closing -> ok
+ catch
+ exit:{noproc, _} -> ok;
+ exit:{normal, _} -> ok
+ end;
+ closing -> ok
+ catch
+ exit:{noproc, _} -> ok;
+ exit:{normal, _} -> ok
+ end
+ end) || _ <- lists:seq(1, 50)],
+ erlang:yield(),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+basic_ack(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Q} = setup_publish(Channel),
+ {#'basic.get_ok'{delivery_tag = Tag}, _}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+basic_ack_call(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Q} = setup_publish(Channel),
+ {#'basic.get_ok'{delivery_tag = Tag}, _}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = Tag}),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+channel_lifecycle(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:close(Channel),
+ {ok, Channel2} = amqp_connection:open_channel(Connection),
+ teardown(Connection, Channel2).
+
+%% -------------------------------------------------------------------
+
+queue_unbind(Config) ->
+ {ok, Connection} = new_connection(Config),
+ X = <<"queue_unbind-eggs">>,
+ Q = <<"queue_unbind-foobar">>,
+ Key = <<"quay">>,
+ Payload = <<"foobar">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ Bind = #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = Key},
+ amqp_channel:call(Channel, Bind),
+ Publish = #'basic.publish'{exchange = X, routing_key = Key},
+ amqp_channel:call(Channel, Publish, Msg = #amqp_msg{payload = Payload}),
+ get_and_assert_equals(Channel, Q, Payload),
+ Unbind = #'queue.unbind'{queue = Q,
+ exchange = X,
+ routing_key = Key},
+ amqp_channel:call(Channel, Unbind),
+ amqp_channel:call(Channel, Publish, Msg),
+ get_and_assert_empty(Channel, Q),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+%% This is designed to exercise the internal queuing mechanism
+%% to ensure that sync methods are properly serialized
+sync_method_serialization(Config) ->
+ abstract_method_serialization_test(
+ "sync_method_serialization", Config,
+ fun (_, _) -> ok end,
+ fun (Channel, _, _, _, Count) ->
+ Q = fmt("sync_method_serialization-~p", [Count]),
+ #'queue.declare_ok'{queue = Q1} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{queue = Q,
+ exclusive = true}),
+ Q = Q1
+ end,
+ fun (_, _, _, _, _) -> ok end).
+
+%% This is designed to exercise the internal queuing mechanism
+%% to ensure that sending async methods and then a sync method is serialized
+%% properly
+async_sync_method_serialization(Config) ->
+ abstract_method_serialization_test(
+ "async_sync_method_serialization", Config,
+ fun (Channel, _X) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{}),
+ Q
+ end,
+ fun (Channel, X, Payload, _, _) ->
+ %% The async methods
+ ok = amqp_channel:call(Channel,
+ #'basic.publish'{exchange = X,
+ routing_key = <<"a">>},
+ #amqp_msg{payload = Payload})
+ end,
+ fun (Channel, X, _, Q, _) ->
+ %% The sync method
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Channel,
+ #'queue.bind'{exchange = X,
+ queue = Q,
+ routing_key = <<"a">>}),
+ %% No message should have been routed
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{queue = Q,
+ passive = true})
+ end).
+
+%% This is designed to exercise the internal queuing mechanism
+%% to ensure that sending sync methods and then an async method is serialized
+%% properly
+sync_async_method_serialization(Config) ->
+ abstract_method_serialization_test(
+ "sync_async_method_serialization", Config,
+ fun (_, _) -> ok end,
+ fun (Channel, X, _Payload, _, _) ->
+ %% The sync methods (called with cast to resume immediately;
+ %% the order should still be preserved)
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{exclusive = true}),
+ amqp_channel:cast(Channel, #'queue.bind'{exchange = X,
+ queue = Q,
+ routing_key= <<"a">>}),
+ Q
+ end,
+ fun (Channel, X, Payload, _, MultiOpRet) ->
+ #'confirm.select_ok'{} = amqp_channel:call(
+ Channel, #'confirm.select'{}),
+ ok = amqp_channel:call(Channel,
+ #'basic.publish'{exchange = X,
+ routing_key = <<"a">>},
+ #amqp_msg{payload = Payload}),
+ %% All queues must have gotten this message
+ true = amqp_channel:wait_for_confirms(Channel),
+ lists:foreach(
+ fun (Q) ->
+ #'queue.declare_ok'{message_count = 1} =
+ amqp_channel:call(
+ Channel, #'queue.declare'{queue = Q,
+ passive = true})
+ end, lists:flatten(MultiOpRet))
+ end).
+
+abstract_method_serialization_test(Test, Config,
+ BeforeFun, MultiOpFun, AfterFun) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ X = list_to_binary(Test),
+ Payload = list_to_binary(["x" || _ <- lists:seq(1, 1000)]),
+ OpsPerProcess = 20,
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
+ type = <<"topic">>}),
+ BeforeRet = BeforeFun(Channel, X),
+ Parent = self(),
+ [spawn(fun () -> Ret = [MultiOpFun(Channel, X, Payload, BeforeRet, I)
+ || _ <- lists:seq(1, OpsPerProcess)],
+ Parent ! {finished, Ret}
+ end) || I <- lists:seq(1, ?LATCH)],
+ MultiOpRet = latch_loop(?LATCH),
+ AfterFun(Channel, X, Payload, BeforeRet, MultiOpRet),
+ amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+teardown(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ true = is_process_alive(Channel),
+ true = is_process_alive(Connection),
+ teardown(Connection, Channel),
+ false = is_process_alive(Channel),
+ false = is_process_alive(Connection).
+
+%% -------------------------------------------------------------------
+
+%% This tests whether RPC over AMQP produces the same result as invoking the
+%% same argument against the same underlying gen_server instance.
+rpc(Config) ->
+ {ok, Connection} = new_connection(Config),
+ Fun = fun(X) -> X + 1 end,
+ RPCHandler = fun(X) -> term_to_binary(Fun(binary_to_term(X))) end,
+ Q = <<"rpc-test">>,
+ Server = amqp_rpc_server:start(Connection, Q, RPCHandler),
+ Client = amqp_rpc_client:start(Connection, Q),
+ Input = 1,
+ %% give both server and client a moment to initialise,
+ %% set up their topology and so on
+ timer:sleep(2000),
+ Reply = amqp_rpc_client:call(Client, term_to_binary(Input)),
+ Expected = Fun(Input),
+ DecodedReply = binary_to_term(Reply),
+ Expected = DecodedReply,
+ amqp_rpc_client:stop(Client),
+ amqp_rpc_server:stop(Server),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ teardown(Connection, Channel).
+
+%% This tests if the RPC continues to generate valid correlation ids
+%% over a series of requests.
+rpc_client(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Q = <<"rpc-client-test">>,
+ Latch = 255, % enough requests to tickle bad correlation ids
+ %% Start a server to return correlation ids to the client.
+ Server = spawn_link(fun() ->
+ rpc_correlation_server(Channel, Q)
+ end),
+ %% Generate a series of RPC requests on the same client.
+ Client = amqp_rpc_client:start(Connection, Q),
+ Parent = self(),
+ [spawn(fun() ->
+ Reply = amqp_rpc_client:call(Client, <<>>),
+ Parent ! {finished, Reply}
+ end) || _ <- lists:seq(1, Latch)],
+ %% Verify that the correlation ids are valid UTF-8 strings.
+ CorrelationIds = latch_loop(Latch),
+ [<<_/binary>> = DecodedId
+ || DecodedId <- [unicode:characters_to_binary(Id, utf8)
+ || Id <- CorrelationIds]],
+ %% Cleanup.
+ Server ! stop,
+ amqp_rpc_client:stop(Client),
+ amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+ teardown(Connection, Channel).
+
+%% Consumer of RPC requests that replies with the CorrelationId.
+rpc_correlation_server(Channel, Q) ->
+ ok = amqp_channel:register_return_handler(Channel, self()),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+ #'basic.consume_ok'{} =
+ amqp_channel:call(Channel,
+ #'basic.consume'{queue = Q,
+ consumer_tag = <<"server">>}),
+ ok = rpc_client_consume_loop(Channel),
+ #'basic.cancel_ok'{} =
+ amqp_channel:call(Channel,
+ #'basic.cancel'{consumer_tag = <<"server">>}),
+ ok = amqp_channel:unregister_return_handler(Channel).
+
+rpc_client_consume_loop(Channel) ->
+ receive
+ stop ->
+ ok;
+ {#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = Props}} ->
+ #'P_basic'{correlation_id = CorrelationId,
+ reply_to = Q} = Props,
+ Properties = #'P_basic'{correlation_id = CorrelationId},
+ Publish = #'basic.publish'{exchange = <<>>,
+ routing_key = Q,
+ mandatory = true},
+ amqp_channel:call(
+ Channel, Publish, #amqp_msg{props = Properties,
+ payload = CorrelationId}),
+ amqp_channel:call(
+ Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ rpc_client_consume_loop(Channel);
+ _ ->
+ rpc_client_consume_loop(Channel)
+ after 5000 ->
+ exit(no_request_received)
+ end.
+
+%% -------------------------------------------------------------------
+
+%% Test for the network client
+%% Sends a bunch of messages and immediately closes the connection without
+%% closing the channel. Then gets the messages back from the queue and expects
+%% all of them to have been sent.
+pub_and_close(Config) ->
+ {ok, Connection1} = new_connection(Config),
+ Payload = <<"eggs">>,
+ NMessages = 50000,
+ {ok, Channel1} = amqp_connection:open_channel(Connection1),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel1, #'queue.declare'{}),
+ %% Send messages
+ pc_producer_loop(Channel1, <<>>, Q, Payload, NMessages),
+ %% Close connection without closing channels
+ amqp_connection:close(Connection1),
+ %% Get sent messages back and count them
+ {ok, Connection2} = new_connection(Config),
+ {ok, Channel2} = amqp_connection:open_channel(
+ Connection2, {amqp_direct_consumer, [self()]}),
+ amqp_channel:call(Channel2, #'basic.consume'{queue = Q, no_ack = true}),
+ receive #'basic.consume_ok'{} -> ok end,
+ true = (pc_consumer_loop(Channel2, Payload, 0) == NMessages),
+ %% Make sure queue is empty
+ #'queue.declare_ok'{queue = Q, message_count = NRemaining} =
+ amqp_channel:call(Channel2, #'queue.declare'{queue = Q,
+ passive = true}),
+ true = (NRemaining == 0),
+ amqp_channel:call(Channel2, #'queue.delete'{queue = Q}),
+ teardown(Connection2, Channel2).
+
+pc_producer_loop(_, _, _, _, 0) -> ok;
+pc_producer_loop(Channel, X, Key, Payload, NRemaining) ->
+ Publish = #'basic.publish'{exchange = X, routing_key = Key},
+ ok = amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ pc_producer_loop(Channel, X, Key, Payload, NRemaining - 1).
+
+pc_consumer_loop(Channel, Payload, NReceived) ->
+ receive
+ {#'basic.deliver'{},
+ #amqp_msg{payload = DeliveredPayload}} ->
+ case DeliveredPayload of
+ Payload ->
+ pc_consumer_loop(Channel, Payload, NReceived + 1);
+ _ ->
+ exit(received_unexpected_content)
+ end
+ after 1000 ->
+ NReceived
+ end.
+
+%% -------------------------------------------------------------------
+
+channel_tune_negotiation(Config) ->
+ {ok, Connection} = new_connection(Config),
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+confirm(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Channel, self()),
+ {ok, Q} = setup_publish(Channel),
+ {#'basic.get_ok'{}, _}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} -> fail
+ after 2000 ->
+ exit(did_not_receive_pub_ack)
+ end,
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_barrier(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(
+ Channel,
+ #'basic.publish'{routing_key = <<"whoosh-confirm_barrier">>},
+ #amqp_msg{payload = <<"foo">>})
+ || _ <- lists:seq(1, 1000)], %% Hopefully enough to get a multi-ack
+ true = amqp_channel:wait_for_confirms(Channel),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_select_before_wait(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ try amqp_channel:wait_for_confirms(Channel) of
+ _ -> exit(success_despite_lack_of_confirm_mode)
+ catch
+ not_in_confirm_mode -> ok
+ end,
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_barrier_timeout(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(
+ Channel,
+ #'basic.publish'{routing_key = <<"whoosh-confirm_barrier_timeout">>},
+ #amqp_msg{payload = <<"foo">>})
+ || _ <- lists:seq(1, 1000)],
+ case amqp_channel:wait_for_confirms(Channel, 0) of
+ true -> ok;
+ timeout -> ok
+ end,
+ true = amqp_channel:wait_for_confirms(Channel),
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_barrier_die_timeout(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(
+ Channel,
+ #'basic.publish'{routing_key = <<"whoosh-confirm_barrier_die_timeout">>},
+ #amqp_msg{payload = <<"foo">>})
+ || _ <- lists:seq(1, 1000)],
+ try amqp_channel:wait_for_confirms_or_die(Channel, 0) of
+ true -> ok
+ catch
+ exit:timeout -> ok
+ end,
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+default_consumer(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ amqp_selective_consumer:register_default_consumer(Channel, self()),
+
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{}),
+ Pid = spawn(fun () -> receive
+ after 10000 -> ok
+ end
+ end),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Channel, #'basic.consume'{queue = Q}, Pid),
+ erlang:monitor(process, Pid),
+ exit(Pid, shutdown),
+ receive
+ {'DOWN', _, process, _, _} ->
+ io:format("little consumer died out~n")
+ end,
+ Payload = <<"for the default consumer">>,
+ amqp_channel:call(Channel,
+ #'basic.publish'{exchange = <<>>, routing_key = Q},
+ #amqp_msg{payload = Payload}),
+
+ receive
+ {#'basic.deliver'{}, #'amqp_msg'{payload = Payload}} ->
+ ok
+ after 1000 ->
+ exit('default_consumer_didnt_work')
+ end,
+ teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+subscribe_nowait(Config) ->
+ {ok, Conn} = new_connection(Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {ok, Q} = setup_publish(Ch),
+ CTag = <<"ctag">>,
+ amqp_selective_consumer:register_default_consumer(Ch, self()),
+ ok = amqp_channel:call(Ch, #'basic.consume'{queue = Q,
+ consumer_tag = CTag,
+ nowait = true}),
+ ok = amqp_channel:call(Ch, #'basic.cancel' {consumer_tag = CTag,
+ nowait = true}),
+ ok = amqp_channel:call(Ch, #'basic.consume'{queue = Q,
+ consumer_tag = CTag,
+ nowait = true}),
+ receive
+ #'basic.consume_ok'{} ->
+ exit(unexpected_consume_ok);
+ {#'basic.deliver'{delivery_tag = DTag}, _Content} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag})
+ end,
+ teardown(Conn, Ch).
+
+%% -------------------------------------------------------------------
+
+%% connection.blocked, connection.unblocked
+
+connection_blocked_network(Config) ->
+ {ok, Connection} = new_connection(Config),
+ X = <<"amq.direct">>,
+ K = Payload = <<"x">>,
+ clear_resource_alarm(memory, Config),
+ timer:sleep(1000),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Parent = self(),
+ Child = spawn_link(
+ fun() ->
+ receive
+ #'connection.blocked'{} -> ok
+ end,
+ clear_resource_alarm(memory, Config),
+ receive
+ #'connection.unblocked'{} -> ok
+ end,
+ Parent ! ok
+ end),
+ amqp_connection:register_blocked_handler(Connection, Child),
+ set_resource_alarm(memory, Config),
+ Publish = #'basic.publish'{exchange = X,
+ routing_key = K},
+ amqp_channel:call(Channel, Publish,
+ #amqp_msg{payload = Payload}),
+ timer:sleep(1000),
+ receive
+ ok ->
+ clear_resource_alarm(memory, Config),
+ clear_resource_alarm(disk, Config),
+ ok
+ after 10000 ->
+ clear_resource_alarm(memory, Config),
+ clear_resource_alarm(disk, Config),
+ exit(did_not_receive_connection_blocked)
+ end,
+ amqp_connection:close(Connection),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+%% Negative test cases.
+%% -------------------------------------------------------------------
+
+non_existent_exchange(Config) ->
+ {ok, Connection} = new_connection(Config),
+ X = <<"test-non_existent_exchange">>,
+ RoutingKey = <<"a-non_existent_exchange">>,
+ Payload = <<"foobar">>,
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, OtherChannel} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+
+ %% Deliberately mix up the routingkey and exchange arguments
+ Publish = #'basic.publish'{exchange = RoutingKey, routing_key = X},
+ amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+ wait_for_death(Channel),
+
+ %% Make sure Connection and OtherChannel still serve us and are not dead
+ {ok, _} = amqp_connection:open_channel(Connection),
+ amqp_channel:call(OtherChannel, #'exchange.delete'{exchange = X}),
+ amqp_connection:close(Connection).
+
+%% -------------------------------------------------------------------
+
+bogus_rpc(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ %% Deliberately bind to a non-existent queue
+ Bind = #'queue.bind'{exchange = <<"amq.topic">>,
+ queue = <<"does-not-exist">>,
+ routing_key = <<>>},
+ try amqp_channel:call(Channel, Bind) of
+ _ -> exit(expected_to_exit)
+ catch
+ exit:{{shutdown, {server_initiated_close, Code, _}},_} ->
+ ?NOT_FOUND = Code
+ end,
+ wait_for_death(Channel),
+ true = is_process_alive(Connection),
+ amqp_connection:close(Connection).
+
+%% -------------------------------------------------------------------
+
+hard_error(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, OtherChannel} = amqp_connection:open_channel(Connection),
+ OtherChannelMonitor = erlang:monitor(process, OtherChannel),
+ Qos = #'basic.qos'{prefetch_size = 10000000},
+ try amqp_channel:call(Channel, Qos) of
+ _ -> exit(expected_to_exit)
+ catch
+ exit:{{shutdown, {connection_closing,
+ {server_initiated_close, ?NOT_IMPLEMENTED, _}}}, _} ->
+ ok
+ end,
+ receive
+ {'DOWN', OtherChannelMonitor, process, OtherChannel, OtherExit} ->
+ {shutdown,
+ {connection_closing,
+ {server_initiated_close, ?NOT_IMPLEMENTED, _}}} = OtherExit
+ end,
+ wait_for_death(Channel),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+non_existent_user(Config) ->
+ {error, {auth_failure, _}} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+invalid_password(Config) ->
+ {error, {auth_failure, _}} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+non_existent_vhost(Config) ->
+ {error, not_allowed} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+no_permission(Config) ->
+ {error, not_allowed} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+%% An error in a channel should result in the death of the entire connection.
+%% The death of the channel is caused by an error in generating the frames
+%% (writer dies)
+channel_writer_death(Config) ->
+ ConnType = ?config(amqp_client_conn_type, Config),
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ Publish = #'basic.publish'{routing_key = <<>>, exchange = <<>>},
+ QoS = #'basic.qos'{prefetch_count = 0},
+ Message = #amqp_msg{props = <<>>, payload = <<>>},
+ amqp_channel:cast(Channel, Publish, Message),
+ try
+ Ret = amqp_channel:call(Channel, QoS),
+ throw({unexpected_success, Ret})
+ catch
+ exit:{{function_clause,
+ [{rabbit_channel, check_user_id_header, _, _} | _]}, _}
+ when ConnType =:= direct -> ok;
+
+ exit:{{infrastructure_died, {unknown_properties_record, <<>>}}, _}
+ when ConnType =:= network -> ok
+ end,
+ wait_for_death(Channel),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% The connection should die if the underlying connection is prematurely
+%% closed. For a network connection, this means that the TCP socket is
+%% closed. For a direct connection (remotely only, of course), this means that
+%% the RabbitMQ node appears as down.
+connection_failure(Config) ->
+ {ok, Connection} = new_connection(Config),
+ case amqp_connection:info(Connection, [type, amqp_params]) of
+ [{type, direct}, {amqp_params, Params}] ->
+ case Params#amqp_params_direct.node of
+ N when N == node() ->
+ amqp_connection:close(Connection);
+ N ->
+ true = erlang:disconnect_node(N),
+ net_adm:ping(N)
+ end;
+ [{type, network}, {amqp_params, _}] ->
+ [{sock, Sock}] = amqp_connection:info(Connection, [sock]),
+ close_remote_socket(Config, Sock)
+ end,
+ wait_for_death(Connection).
+
+%% We obtain the socket for the remote end of the connection by
+%% looking through open ports and comparing sockname/peername values.
+%% This is necessary because we cannot close our own socket to test
+%% connection failures; a close is expected.
+%%
+%% We need to convert the IPv4 to IPv6 because the server side
+%% will use the IPv6 format due to it being enabled for other tests.
+close_remote_socket(Config, Socket) when is_port(Socket) ->
+ {ok, {IPv4, Port}} = inet:sockname(Socket),
+ IPv6 = inet:ipv4_mapped_ipv6_address(IPv4),
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, close_remote_socket1, [{ok, {IPv4, Port}}, {ok, {IPv6, Port}}]).
+
+close_remote_socket1(SockNameIPv4, SockNameIPv6) ->
+ AllPorts = [{P, erlang:port_info(P)} || P <- erlang:ports()],
+ [RemoteSocket] = [
+ P
+ || {P, I} <- AllPorts,
+ I =/= undefined,
+ proplists:get_value(name, I) =:= "tcp_inet",
+ inet:peername(P) =:= SockNameIPv4 orelse
+ inet:peername(P) =:= SockNameIPv6],
+ ok = gen_tcp:close(RemoteSocket).
+
+%% -------------------------------------------------------------------
+
+%% An error in the channel process should result in the death of the entire
+%% connection. The death of the channel is caused by making a call with an
+%% invalid message to the channel process
+channel_death(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ try
+ Ret = amqp_channel:call(Channel, bogus_message),
+ throw({unexpected_success, Ret})
+ catch
+ exit:{{badarg,
+ [{amqp_channel, is_connection_method, 1, _} | _]}, _} -> ok;
+ exit:{{badarg,
+ [{erlang, element, [1, bogus_message], []},
+ {amqp_channel, is_connection_method, 1, _} | _]}, _} -> ok
+ end,
+ wait_for_death(Channel),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% Attempting to send a shortstr longer than 255 bytes in a property field
+%% should fail - this only applies to the network case
+shortstr_overflow_property(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ PBasic = #'P_basic'{content_type = SentString},
+ AmqpMsg = #amqp_msg{payload = <<"foobar">>, props = PBasic},
+ QoS = #'basic.qos'{prefetch_count = 0},
+ amqp_channel:cast(Channel, Publish, AmqpMsg),
+ try
+ Ret = amqp_channel:call(Channel, QoS),
+ throw({unexpected_success, Ret})
+ catch
+ exit:{{infrastructure_died, content_properties_shortstr_overflow}, _} -> ok
+ end,
+ wait_for_death(Channel),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% Attempting to send a shortstr longer than 255 bytes in a method's field
+%% should fail - this only applies to the network case
+shortstr_overflow_field(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
+ #'queue.declare_ok'{queue = Q}
+ = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+ try
+ Ret = amqp_channel:call(
+ Channel, #'basic.consume'{queue = Q,
+ no_ack = true,
+ consumer_tag = SentString}),
+ throw({unexpected_success, Ret})
+ catch
+ exit:{{infrastructure_died, method_field_shortstr_overflow}, _} -> ok
+ end,
+ wait_for_death(Channel),
+ wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% Simulates a #'connection.open'{} method received on non-zero channel. The
+%% connection is expected to send a '#connection.close{}' to the server with
+%% reply code command_invalid
+command_invalid_over_channel(Config) ->
+ {ok, Connection} = new_connection(Config),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ MonitorRef = erlang:monitor(process, Connection),
+ case amqp_connection:info(Connection, [type]) of
+ [{type, direct}] -> Channel ! {send_command, #'connection.open'{}};
+ [{type, network}] -> gen_server:cast(Channel,
+ {method, #'connection.open'{}, none, noflow})
+ end,
+ assert_down_with_error(MonitorRef, command_invalid),
+ false = is_process_alive(Channel).
+
+%% -------------------------------------------------------------------
+
+%% Simulates a #'basic.ack'{} method received on channel zero. The connection
+%% is expected to send a '#connection.close{}' to the server with reply code
+%% command_invalid - this only applies to the network case
+command_invalid_over_channel0(Config) ->
+ {ok, Connection} = new_connection(Config),
+ gen_server:cast(Connection, {method, #'basic.ack'{}, none, noflow}),
+ MonitorRef = erlang:monitor(process, Connection),
+ assert_down_with_error(MonitorRef, command_invalid).
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+new_connection(Config) ->
+ Params = ?config(amqp_client_conn_params, Config),
+ amqp_connection:start(Params).
+
+setup_publish(Channel) ->
+ setup_publish(Channel, <<"foobar">>).
+
+setup_publish(Channel, Payload) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+ ok = amqp_channel:call(Channel, #'basic.publish'{exchange = <<>>,
+ routing_key = Q},
+ #amqp_msg{payload = Payload}),
+ {ok, Q}.
+
+teardown(Connection, Channel) ->
+ amqp_channel:close(Channel),
+ wait_for_death(Channel),
+ ?assertEqual(ok, amqp_connection:close(Connection)),
+ wait_for_death(Connection).
+
+wait_for_death(Pid) ->
+ Ref = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', Ref, process, Pid, _Reason} ->
+ ok
+ after ?PROCESS_EXIT_TIMEOUT ->
+ exit({timed_out_waiting_for_process_death, Pid})
+ end.
+
+latch_loop() ->
+ latch_loop(?LATCH, []).
+
+latch_loop(Latch) ->
+ latch_loop(Latch, []).
+
+latch_loop(0, Acc) ->
+ Acc;
+latch_loop(Latch, Acc) ->
+ receive
+ finished -> latch_loop(Latch - 1, Acc);
+ {finished, Ret} -> latch_loop(Latch - 1, [Ret | Acc])
+ after ?LATCH * ?WAIT -> exit(waited_too_long)
+ end.
+
+get_and_assert_empty(Channel, Q) ->
+ #'basic.get_empty'{}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = true}).
+
+get_and_assert_equals(Channel, Q, Payload) ->
+ get_and_assert_equals(Channel, Q, Payload, true).
+
+get_and_assert_equals(Channel, Q, Payload, NoAck) ->
+ {GetOk = #'basic.get_ok'{}, Content}
+ = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = NoAck}),
+ #amqp_msg{payload = Payload2} = Content,
+ Payload = Payload2,
+ GetOk.
+
+assert_down_with_error(MonitorRef, CodeAtom) ->
+ receive
+ {'DOWN', MonitorRef, process, _, Reason} ->
+ {shutdown, {server_misbehaved, Code, _}} = Reason,
+ CodeAtom = ?PROTOCOL:amqp_exception(Code)
+ after 2000 ->
+ exit(did_not_die)
+ end.
+
+set_resource_alarm(memory, Config) ->
+ SrcDir = ?config(amqp_client_srcdir, Config),
+ Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, _} = rabbit_ct_helpers:make(Config, SrcDir, [
+ {"RABBITMQ_NODENAME=~s", [Nodename]},
+ "set-resource-alarm", "SOURCE=memory"]);
+set_resource_alarm(disk, Config) ->
+ SrcDir = ?config(amqp_client_srcdir, Config),
+ Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, _} = rabbit_ct_helpers:make(Config, SrcDir, [
+ {"RABBITMQ_NODENAME=~s", [Nodename]},
+ "set-resource-alarm", "SOURCE=disk"]).
+
+clear_resource_alarm(memory, Config) ->
+ SrcDir = ?config(amqp_client_srcdir, Config),
+ Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, _}= rabbit_ct_helpers:make(Config, SrcDir, [
+ {"RABBITMQ_NODENAME=~s", [Nodename]},
+ "clear-resource-alarm", "SOURCE=memory"]);
+clear_resource_alarm(disk, Config) ->
+ SrcDir = ?config(amqp_client_srcdir, Config),
+ Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, _}= rabbit_ct_helpers:make(Config, SrcDir, [
+ {"RABBITMQ_NODENAME=~s", [Nodename]},
+ "clear-resource-alarm", "SOURCE=disk"]).
+
+fmt(Fmt, Args) -> list_to_binary(rabbit_misc:format(Fmt, Args)).
diff --git a/deps/amqp_client/test/unit_SUITE.erl b/deps/amqp_client/test/unit_SUITE.erl
new file mode 100644
index 0000000000..48b4b9de13
--- /dev/null
+++ b/deps/amqp_client/test/unit_SUITE.erl
@@ -0,0 +1,399 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ amqp_uri_parsing,
+ amqp_uri_accepts_string_and_binaries,
+ amqp_uri_remove_credentials,
+ uri_parser_accepts_string_and_binaries,
+ route_destination_parsing,
+ rabbit_channel_build_topic_variable_map
+ ].
+
+%% -------------------------------------------------------------------
+%% AMQP URI parsing.
+%% -------------------------------------------------------------------
+
+amqp_uri_parsing(_Config) ->
+ %% From the spec (adapted)
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "host",
+ port = 10000,
+ virtual_host = <<"vhost">>,
+ heartbeat = 5}},
+ amqp_uri:parse(
+ "amqp://user:pass@host:10000/vhost?heartbeat=5")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"usera">>,
+ password = <<"apass">>,
+ host = "hoast",
+ port = 10000,
+ virtual_host = <<"v/host">>}},
+ amqp_uri:parse(
+ "aMQp://user%61:%61pass@ho%61st:10000/v%2fhost")),
+ ?assertMatch({ok, #amqp_params_direct{}}, amqp_uri:parse("amqp://")),
+ ?assertMatch({ok, #amqp_params_direct{username = <<"">>,
+ virtual_host = <<"">>}},
+ amqp_uri:parse("amqp://:@/")),
+
+ % https://github.com/rabbitmq/rabbitmq-server/issues/1663
+ ?assertEqual({error,{port_requires_host,"amqp://:1234"}},
+ amqp_uri:parse("amqp://:1234")),
+ ?assertMatch({ok, #amqp_params_network{host = "localhost",
+ port = 1234}},
+ amqp_uri:parse("amqp://localhost:1234")),
+
+ ?assertMatch({ok, #amqp_params_network{username = <<"">>,
+ password = <<"">>,
+ virtual_host = <<"">>,
+ host = "host"}},
+ amqp_uri:parse("amqp://:@host/")),
+ ?assertMatch({ok, #amqp_params_direct{username = <<"user">>}},
+ amqp_uri:parse("amqp://user@")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "localhost"}},
+ amqp_uri:parse("amqp://user:pass@localhost")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"/">>}},
+ amqp_uri:parse("amqp://host")),
+ ?assertMatch({ok, #amqp_params_network{port = 10000,
+ host = "localhost"}},
+ amqp_uri:parse("amqp://localhost:10000")),
+ ?assertMatch({ok, #amqp_params_direct{virtual_host = <<"vhost">>}},
+ amqp_uri:parse("amqp:///vhost")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"">>}},
+ amqp_uri:parse("amqp://host/")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"/">>}},
+ amqp_uri:parse("amqp://host/%2f")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1"}},
+ amqp_uri:parse("amqp://[::1]")),
+
+ %% Various other cases
+ ?assertMatch({ok, #amqp_params_network{host = "host", port = 100}},
+ amqp_uri:parse("amqp://host:100")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1", port = 100}},
+ amqp_uri:parse("amqp://[::1]:100")),
+
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://host/blah")),
+ ?assertMatch({ok, #amqp_params_network{host = "host",
+ port = 100,
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://host:100/blah")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1",
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://[::1]/blah")),
+ ?assertMatch({ok, #amqp_params_network{host = "::1",
+ port = 100,
+ virtual_host = <<"blah">>}},
+ amqp_uri:parse("amqp://[::1]:100/blah")),
+
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "host"}},
+ amqp_uri:parse("amqp://user:pass@host")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ port = 100}},
+ amqp_uri:parse("amqp://user:pass@host:100")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "::1"}},
+ amqp_uri:parse("amqp://user:pass@[::1]")),
+ ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "::1",
+ port = 100}},
+ amqp_uri:parse("amqp://user:pass@[::1]:100")),
+
+ %% TLS options
+ ?assertEqual({error,{port_requires_host,"amqps://:5671"}},
+ amqp_uri:parse("amqps://:5671")),
+ ?assertMatch({ok, #amqp_params_network{host = "localhost",
+ port = 5671}},
+ amqp_uri:parse("amqps://localhost:5671")),
+
+ {ok, #amqp_params_network{host = "host1", ssl_options = TLSOpts1}} =
+ amqp_uri:parse("amqps://host1/%2f?cacertfile=/path/to/cacertfile.pem"),
+ Exp1 = [
+ {cacertfile,"/path/to/cacertfile.pem"},
+ {server_name_indication,"host1"}
+ ],
+ ?assertEqual(lists:usort(Exp1), lists:usort(TLSOpts1)),
+
+ {ok, #amqp_params_network{host = "host2", ssl_options = TLSOpts2}} =
+ amqp_uri:parse("amqps://host2/%2F?verify=verify_peer"
+ "&server_name_indication=host2"
+ "&cacertfile=/path/to/cacertfile.pem"
+ "&certfile=/path/to/certfile.pem"),
+ Opts2 = [{certfile, "/path/to/certfile.pem"},
+ {cacertfile,"/path/to/cacertfile.pem"},
+ {server_name_indication, "host2"},
+ {verify, verify_peer}],
+ Exp2 = amqp_ssl:add_verify_fun_to_opts("host2", Opts2),
+ ?assertEqual(lists:usort(Exp2), lists:usort(TLSOpts2)),
+
+ {ok, #amqp_params_network{host = "host3", ssl_options = TLSOpts3}} =
+ amqp_uri:parse("amqps://host3/%2f?verify=verify_peer"
+ "&fail_if_no_peer_cert=true"),
+ Exp3 = [{fail_if_no_peer_cert, true},
+ {verify, verify_peer},
+ {server_name_indication,"host3"}],
+ ?assertEqual(lists:usort(Exp3), lists:usort(TLSOpts3)),
+
+ {ok, #amqp_params_network{host = "host4", ssl_options = TLSOpts4}} =
+ amqp_uri:parse("amqps://host4/%2f?cacertfile=/path/to/cacertfile.pem"
+ "&certfile=/path/to/certfile.pem"
+ "&password=topsecret"
+ "&depth=5"),
+ Exp4 = [{certfile, "/path/to/certfile.pem"},
+ {cacertfile,"/path/to/cacertfile.pem"},
+ {password, "topsecret"},
+ {depth, 5},
+ {server_name_indication,"host4"}],
+ ?assertEqual(lists:usort(Exp4), lists:usort(TLSOpts4)),
+
+ {ok, #amqp_params_network{host = "host5", ssl_options = TLSOpts5}} =
+ amqp_uri:parse("amqps://host5/%2f?server_name_indication=foobar"
+ "&verify=verify_peer"),
+ Opts5 = [{server_name_indication, "foobar"},
+ {verify, verify_peer}],
+ Exp5 = amqp_ssl:add_verify_fun_to_opts("foobar", Opts5),
+ ?assertEqual(lists:usort(Exp5), lists:usort(TLSOpts5)),
+
+ {ok, #amqp_params_network{host = "127.0.0.1", ssl_options = TLSOpts6}} =
+ amqp_uri:parse("amqps://127.0.0.1/%2f?server_name_indication=barbaz"
+ "&verify=verify_peer"),
+ Opts6 = [{server_name_indication, "barbaz"},
+ {verify, verify_peer}],
+ Exp6 = amqp_ssl:add_verify_fun_to_opts("barbaz", Opts6),
+ ?assertEqual(lists:usort(Exp6), lists:usort(TLSOpts6)),
+
+ {ok, #amqp_params_network{host = "host7", ssl_options = TLSOpts7}} =
+ amqp_uri:parse("amqps://host7/%2f?server_name_indication=disable"),
+ ?assertEqual(lists:usort([{server_name_indication, disable}]),
+ lists:usort(TLSOpts7)),
+
+ {ok, #amqp_params_network{host = "127.0.0.1", ssl_options = TLSOpts8}} =
+ amqp_uri:parse("amqps://127.0.0.1/%2f?server_name_indication=disable"
+ "&verify=verify_none"),
+ ?assertEqual(lists:usort([{server_name_indication, disable},
+ {verify, verify_none}]),
+ lists:usort(TLSOpts8)),
+
+ {ok, #amqp_params_network{host = "127.0.0.1", ssl_options = TLSOpts9}} =
+ amqp_uri:parse("amqps://127.0.0.1/%2f?cacertfile=/path/to/cacertfile.pem"
+ "&certfile=/path/to/certfile.pem"
+ "&password=topsecret"
+ "&depth=5"),
+ ?assertEqual(lists:usort([{certfile, "/path/to/certfile.pem"},
+ {cacertfile,"/path/to/cacertfile.pem"},
+ {password, "topsecret"},
+ {depth, 5}]),
+ lists:usort(TLSOpts9)),
+
+ {ok, #amqp_params_network{host = "host10", ssl_options = TLSOpts10}} =
+ amqp_uri:parse("amqps://host10/%2f?server_name_indication=host10"
+ "&verify=verify_none"),
+ Exp10 = [{server_name_indication, "host10"},
+ {verify, verify_none}],
+ ?assertEqual(lists:usort(Exp10), lists:usort(TLSOpts10)),
+
+ %% Various failure cases
+ ?assertMatch({error, _}, amqp_uri:parse("https://www.rabbitmq.com")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:bar:baz")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo[::1]")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:[::1]")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://[::1]foo")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000xyz")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000000")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo/bar/baz")),
+
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1x")),
+ ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%xy")),
+
+ ok.
+
+amqp_uri_accepts_string_and_binaries(_Config) ->
+ [?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+ password = <<"pass">>,
+ host = "host",
+ port = 10000,
+ virtual_host = <<"vhost">>,
+ heartbeat = 5}},
+ amqp_uri:parse(Uri))
+ || Uri <- string_binaries_uris()],
+ ok.
+
+amqp_uri_remove_credentials(_Config) ->
+ [?assertMatch("amqp://host:10000/vhost",
+ amqp_uri:remove_credentials(Uri))
+ || Uri <- string_binaries_uris()],
+ ok.
+
+uri_parser_accepts_string_and_binaries(_Config) ->
+ [?assertMatch([{fragment,[]},
+ {host,"host"},
+ {path,"/vhost"},
+ {port,10000},
+ {query,[{"heartbeat","5"}]},
+ {scheme,"amqp"},
+ {userinfo,["user","pass"]}],
+ uri_parser:parse(Uri, []))
+ || Uri <- string_binaries_uris()],
+ ok.
+
+string_binaries_uris() ->
+ ["amqp://user:pass@host:10000/vhost?heartbeat=5", <<"amqp://user:pass@host:10000/vhost?heartbeat=5">>].
+
+%% -------------------------------------------------------------------
+%% Route destination parsing.
+%% -------------------------------------------------------------------
+
+route_destination_parsing(_Config) ->
+ %% valid queue
+ ?assertMatch({ok, {queue, "test"}}, parse_dest("/queue/test")),
+
+ %% valid topic
+ ?assertMatch({ok, {topic, "test"}}, parse_dest("/topic/test")),
+
+ %% valid exchange
+ ?assertMatch({ok, {exchange, {"test", undefined}}}, parse_dest("/exchange/test")),
+
+ %% valid temp queue
+ ?assertMatch({ok, {temp_queue, "test"}}, parse_dest("/temp-queue/test")),
+
+ %% valid reply queue
+ ?assertMatch({ok, {reply_queue, "test"}}, parse_dest("/reply-queue/test")),
+ ?assertMatch({ok, {reply_queue, "test/2"}}, parse_dest("/reply-queue/test/2")),
+
+ %% valid exchange with pattern
+ ?assertMatch({ok, {exchange, {"test", "pattern"}}},
+ parse_dest("/exchange/test/pattern")),
+
+ %% valid pre-declared queue
+ ?assertMatch({ok, {amqqueue, "test"}}, parse_dest("/amq/queue/test")),
+
+ %% queue without name
+ ?assertMatch({error, {invalid_destination, queue, ""}}, parse_dest("/queue")),
+ ?assertMatch({ok, {queue, undefined}}, parse_dest("/queue", true)),
+
+ %% topic without name
+ ?assertMatch({error, {invalid_destination, topic, ""}}, parse_dest("/topic")),
+
+ %% exchange without name
+ ?assertMatch({error, {invalid_destination, exchange, ""}},
+ parse_dest("/exchange")),
+
+ %% exchange default name
+ ?assertMatch({error, {invalid_destination, exchange, "//foo"}},
+ parse_dest("/exchange//foo")),
+
+ %% amqqueue without name
+ ?assertMatch({error, {invalid_destination, amqqueue, ""}},
+ parse_dest("/amq/queue")),
+
+ %% queue without name with trailing slash
+ ?assertMatch({error, {invalid_destination, queue, "/"}}, parse_dest("/queue/")),
+
+ %% topic without name with trailing slash
+ ?assertMatch({error, {invalid_destination, topic, "/"}}, parse_dest("/topic/")),
+
+ %% exchange without name with trailing slash
+ ?assertMatch({error, {invalid_destination, exchange, "/"}},
+ parse_dest("/exchange/")),
+
+ %% queue with invalid name
+ ?assertMatch({error, {invalid_destination, queue, "/foo/bar"}},
+ parse_dest("/queue/foo/bar")),
+
+ %% topic with invalid name
+ ?assertMatch({error, {invalid_destination, topic, "/foo/bar"}},
+ parse_dest("/topic/foo/bar")),
+
+ %% exchange with invalid name
+ ?assertMatch({error, {invalid_destination, exchange, "/foo/bar/baz"}},
+ parse_dest("/exchange/foo/bar/baz")),
+
+ %% unknown destination
+ ?assertMatch({error, {unknown_destination, "/blah/boo"}},
+ parse_dest("/blah/boo")),
+
+ %% queue with escaped name
+ ?assertMatch({ok, {queue, "te/st"}}, parse_dest("/queue/te%2Fst")),
+
+ %% valid exchange with escaped name and pattern
+ ?assertMatch({ok, {exchange, {"te/st", "pa/tt/ern"}}},
+ parse_dest("/exchange/te%2Fst/pa%2Ftt%2Fern")),
+
+ ok.
+
+parse_dest(Destination, Params) ->
+ rabbit_routing_util:parse_endpoint(Destination, Params).
+parse_dest(Destination) ->
+ rabbit_routing_util:parse_endpoint(Destination).
+
+%% -------------------------------------------------------------------
+%% Topic variable map
+%% -------------------------------------------------------------------
+
+rabbit_channel_build_topic_variable_map(_Config) ->
+ AmqpParams = #amqp_params_direct{
+ adapter_info = #amqp_adapter_info{
+ additional_info = [
+ {variable_map, #{<<"client_id">> => <<"client99">>}}]}
+ },
+ %% simple case
+ ?assertMatch(
+ #{<<"client_id">> := <<"client99">>,
+ <<"username">> := <<"guest">>,
+ <<"vhost">> := <<"default">>}, rabbit_channel:build_topic_variable_map(
+ [{amqp_params, AmqpParams}], <<"default">>, <<"guest">>
+ )),
+ %% nothing to add
+ AmqpParams1 = #amqp_params_direct{adapter_info = #amqp_adapter_info{}},
+ ?assertMatch(
+ #{<<"username">> := <<"guest">>,
+ <<"vhost">> := <<"default">>}, rabbit_channel:build_topic_variable_map(
+ [{amqp_params, AmqpParams1}], <<"default">>, <<"guest">>
+ )),
+ %% nothing to add with amqp_params_network
+ AmqpParams2 = #amqp_params_network{},
+ ?assertMatch(
+ #{<<"username">> := <<"guest">>,
+ <<"vhost">> := <<"default">>}, rabbit_channel:build_topic_variable_map(
+ [{amqp_params, AmqpParams2}], <<"default">>, <<"guest">>
+ )),
+ %% trying to override channel variables, but those
+ %% take precedence
+ AmqpParams3 = #amqp_params_direct{
+ adapter_info = #amqp_adapter_info{
+ additional_info = [
+ {variable_map, #{<<"client_id">> => <<"client99">>,
+ <<"username">> => <<"admin">>}}]}
+ },
+ ?assertMatch(#{<<"client_id">> := <<"client99">>,
+ <<"username">> := <<"guest">>,
+ <<"vhost">> := <<"default">>}, rabbit_channel:build_topic_variable_map(
+ [{amqp_params, AmqpParams3}], <<"default">>, <<"guest">>
+ )),
+ ok.
diff --git a/deps/rabbit/.gitignore b/deps/rabbit/.gitignore
new file mode 100644
index 0000000000..dc870136e8
--- /dev/null
+++ b/deps/rabbit/.gitignore
@@ -0,0 +1,42 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+MnesiaCore.*
+/.erlang.mk/
+/cover/
+/debug/
+/deps/
+/debug/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/etc/
+/logs/
+/plugins/
+/plugins.lock
+/test/ct.cover.spec
+/test/config_schema_SUITE_data/schema/**
+/xrefr
+/sbin/
+/sbin.lock
+rabbit.d
+
+# Generated documentation.
+docs/*.html
+
+# Dialyzer
+*.plt
+
+# Tracing tools
+*-ttb
+*.ti
+*.lz4*
+callgrind.out*
+callgraph.dot*
+
+PACKAGES/*
+
+rabbit-rabbitmq-deps.mk
diff --git a/deps/rabbit/.travis.yml b/deps/rabbit/.travis.yml
new file mode 100644
index 0000000000..a502fe1922
--- /dev/null
+++ b/deps/rabbit/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: E4FIneR14YxnWbNNgFY48+Z8YpnwWcvIR0nD1Mo06WKXaq05UfQNQ7SZCjI3kKCNJGMhac12DFRhln+mQZ+T92MQ7IeU3ugpV5RSm+JqIwwIKzVM3+bjCQnFoL24OD4E+GjhJQWYQmPyM7l4OPluMr2N8BtANItgzX3AvljvlSc=
+ - secure: L1t0CHGR4RzOXwtkpM6feRKax95rszScBLqzjstEiMPkhjTsYTlAecnNxx6lTrGMnk5hQoi4PtbhmyZOX0siHTngTogoA/Nyn8etYzicU5ZO+qmBQOYpegz51lEu70ewXgkhEHzk9DtEPxfYviH9WiILrdUVRXXgZpoXq13p1QA=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.10'
+otp_release:
+ - '22.3'
+ - '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make ct-fast
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbit/.travis.yml.patch b/deps/rabbit/.travis.yml.patch
new file mode 100644
index 0000000000..ca4041f5c0
--- /dev/null
+++ b/deps/rabbit/.travis.yml.patch
@@ -0,0 +1,11 @@
+--- ../rabbit_common/.travis.yml 2020-03-04 13:38:36.985065000 +0100
++++ .travis.yml 2020-03-04 14:27:50.983504000 +0100
+@@ -43,7 +43,7 @@
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+- - make tests
++ - make ct-fast
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+ after_failure:
diff --git a/deps/rabbit/CODE_OF_CONDUCT.md b/deps/rabbit/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbit/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbit/CONTRIBUTING.md b/deps/rabbit/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbit/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbit/INSTALL b/deps/rabbit/INSTALL
new file mode 100644
index 0000000000..d105eb5498
--- /dev/null
+++ b/deps/rabbit/INSTALL
@@ -0,0 +1,2 @@
+Please see https://www.rabbitmq.com/download.html for installation
+guides.
diff --git a/deps/rabbit/LICENSE b/deps/rabbit/LICENSE
new file mode 100644
index 0000000000..626a19fef0
--- /dev/null
+++ b/deps/rabbit/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ server is licensed under the MPL 2.0. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbit/LICENSE-MPL-RabbitMQ b/deps/rabbit/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbit/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile
new file mode 100644
index 0000000000..7d2fae2ea5
--- /dev/null
+++ b/deps/rabbit/Makefile
@@ -0,0 +1,303 @@
+PROJECT = rabbit
+PROJECT_DESCRIPTION = RabbitMQ
+PROJECT_MOD = rabbit
+PROJECT_REGISTERED = rabbit_amqqueue_sup \
+ rabbit_direct_client_sup \
+ rabbit_log \
+ rabbit_node_monitor \
+ rabbit_router
+
+define PROJECT_ENV
+[
+ {tcp_listeners, [5672]},
+ {num_tcp_acceptors, 10},
+ {ssl_listeners, []},
+ {num_ssl_acceptors, 10},
+ {ssl_options, []},
+ {vm_memory_high_watermark, 0.4},
+ {vm_memory_high_watermark_paging_ratio, 0.5},
+ {vm_memory_calculation_strategy, rss},
+ {memory_monitor_interval, 2500},
+ {disk_free_limit, 50000000}, %% 50MB
+ {msg_store_index_module, rabbit_msg_store_ets_index},
+ {backing_queue_module, rabbit_variable_queue},
+ %% 0 ("no limit") would make a better default, but that
+ %% breaks the QPid Java client
+ {frame_max, 131072},
+ %% see rabbitmq-server#1593
+ {channel_max, 2047},
+ {connection_max, infinity},
+ {heartbeat, 60},
+ {msg_store_file_size_limit, 16777216},
+ {msg_store_shutdown_timeout, 600000},
+ {fhc_write_buffering, true},
+ {fhc_read_buffering, false},
+ {queue_index_max_journal_entries, 32768},
+ {queue_index_embed_msgs_below, 4096},
+ {default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {default_user_tags, [administrator]},
+ {default_vhost, <<"/">>},
+ {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+ {loopback_users, [<<"guest">>]},
+ {password_hashing_module, rabbit_password_hashing_sha256},
+ {server_properties, []},
+ {collect_statistics, none},
+ {collect_statistics_interval, 5000},
+ {mnesia_table_loading_retry_timeout, 30000},
+ {mnesia_table_loading_retry_limit, 10},
+ {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+ {auth_backends, [rabbit_auth_backend_internal]},
+ {delegate_count, 16},
+ {trace_vhosts, []},
+ {ssl_cert_login_from, distinguished_name},
+ {ssl_handshake_timeout, 5000},
+ {ssl_allow_poodle_attack, false},
+ {handshake_timeout, 10000},
+ {reverse_dns_lookups, false},
+ {cluster_partition_handling, ignore},
+ {cluster_keepalive_interval, 10000},
+ {autoheal_state_transition_timeout, 60000},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true},
+ {linger, {true, 0}},
+ {exit_on_close, false}
+ ]},
+ {halt_on_upgrade_failure, true},
+ {ssl_apps, [asn1, crypto, public_key, ssl]},
+ %% see rabbitmq-server#114
+ {mirroring_flow_control, true},
+ {mirroring_sync_batch_size, 4096},
+ %% see rabbitmq-server#227 and related tickets.
+ %% msg_store_credit_disc_bound only takes effect when
+ %% messages are persisted to the message store. If messages
+ %% are embedded on the queue index, then modifying this
+ %% setting has no effect because credit_flow is not used when
+ %% writing to the queue index. See the setting
+ %% queue_index_embed_msgs_below above.
+ {msg_store_credit_disc_bound, {4000, 800}},
+ {msg_store_io_batch_size, 4096},
+ %% see rabbitmq-server#143,
+ %% rabbitmq-server#949, rabbitmq-server#1098
+ {credit_flow_default_credit, {400, 200}},
+ {quorum_commands_soft_limit, 32},
+ {quorum_cluster_size, 5},
+ %% see rabbitmq-server#248
+ %% and rabbitmq-server#667
+ {channel_operation_timeout, 15000},
+
+ %% see rabbitmq-server#486
+ {autocluster,
+ [{peer_discovery_backend, rabbit_peer_discovery_classic_config}]
+ },
+ %% used by rabbit_peer_discovery_classic_config
+ {cluster_nodes, {[], disc}},
+
+ {config_entry_decoder, [{passphrase, undefined}]},
+
+ %% rabbitmq-server#973
+ {queue_explicit_gc_run_operation_threshold, 1000},
+ {lazy_queue_explicit_gc_run_operation_threshold, 1000},
+ {background_gc_enabled, false},
+ {background_gc_target_interval, 60000},
+ %% rabbitmq-server#589
+ {proxy_protocol, false},
+ {disk_monitor_failure_retries, 10},
+ {disk_monitor_failure_retry_interval, 120000},
+ %% either "stop_node" or "continue".
+ %% by default we choose to not terminate the entire node if one
+ %% vhost had to shut down, see server#1158 and server#1280
+ {vhost_restart_strategy, continue},
+ %% {global, prefetch count}
+ {default_consumer_prefetch, {false, 0}},
+ %% interval at which the channel can perform periodic actions
+ {channel_tick_interval, 60000},
+ %% Default max message size is 128 MB
+ {max_message_size, 134217728},
+ %% Socket writer will run GC every 1 GB of outgoing data
+ {writer_gc_threshold, 1000000000},
+ %% interval at which connection/channel tracking executes post operations
+ {tracking_execution_timeout, 15000},
+ {stream_messages_soft_limit, 256},
+ {track_auth_attempt_source, false}
+ ]
+endef
+
+# With Erlang.mk default behavior, the value of `$(APPS_DIR)` is always
+# relative to the top-level executed Makefile. In our case, it could be
+# a plugin for instance. However, the rabbitmq_prelaunch application is
+# in this repository, not the plugin's. That's why we need to override
+# this value here.
+APPS_DIR := $(CURDIR)/apps
+
+LOCAL_DEPS = sasl rabbitmq_prelaunch os_mon inets compiler public_key crypto ssl syntax_tools xmerl
+BUILD_DEPS = rabbitmq_cli syslog
+DEPS = cuttlefish ranch lager rabbit_common ra sysmon_handler stdout_formatter recon observer_cli osiris amqp10_common
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client meck proper
+
+PLT_APPS += mnesia
+
+dep_cuttlefish = hex 2.4.1
+dep_syslog = git https://github.com/schlagert/syslog 3.4.5
+dep_osiris = git https://github.com/rabbitmq/osiris master
+
+define usage_xml_to_erl
+$(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1))))
+endef
+
+DOCS_DIR = docs
+MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9])
+WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES))
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
+ rabbit_common/mk/rabbitmq-dist.mk \
+ rabbit_common/mk/rabbitmq-run.mk \
+ rabbit_common/mk/rabbitmq-test.mk \
+ rabbit_common/mk/rabbitmq-tools.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# See above why we mess with `$(APPS_DIR)`.
+unexport APPS_DIR
+
+ifeq ($(strip $(BATS)),)
+BATS := $(ERLANG_MK_TMP)/bats/bin/bats
+endif
+
+BATS_GIT ?= https://github.com/sstephenson/bats
+BATS_COMMIT ?= v0.4.0
+
+$(BATS):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 --branch=$(BATS_COMMIT) $(BATS_GIT) $(ERLANG_MK_TMP)/bats
+
+.PHONY: bats
+
+bats: $(BATS)
+ $(verbose) $(BATS) $(TEST_DIR)
+
+tests:: bats
+
+SLOW_CT_SUITES := backing_queue \
+ channel_interceptor \
+ cluster \
+ cluster_rename \
+ clustering_management \
+ config_schema \
+ confirms_rejects \
+ consumer_timeout \
+ crashing_queues \
+ dynamic_ha \
+ dynamic_qq \
+ eager_sync \
+ feature_flags \
+ health_check \
+ lazy_queue \
+ many_node_ha \
+ metrics \
+ msg_store \
+ partitions \
+ per_user_connection_tracking \
+ per_vhost_connection_limit \
+ per_vhost_connection_limit_partitions \
+ per_vhost_msg_store \
+ per_vhost_queue_limit \
+ policy \
+ priority_queue \
+ priority_queue_recovery \
+ publisher_confirms_parallel \
+ queue_master_location \
+ queue_parallel \
+ quorum_queue \
+ rabbit_core_metrics_gc \
+ rabbit_fifo_prop \
+ rabbitmq_queues_cli_integration \
+ rabbitmqctl_integration \
+ simple_ha \
+ sync_detection \
+ unit_inbroker_non_parallel \
+ unit_inbroker_parallel \
+ vhost
+FAST_CT_SUITES := $(filter-out $(sort $(SLOW_CT_SUITES)),$(CT_SUITES))
+
+ct-fast: CT_SUITES = $(FAST_CT_SUITES)
+ct-slow: CT_SUITES = $(SLOW_CT_SUITES)
+
+# --------------------------------------------------------------------
+# Compilation.
+# --------------------------------------------------------------------
+
+RMQ_ERLC_OPTS += -I $(DEPS_DIR)/rabbit_common/include
+
+ifdef INSTRUMENT_FOR_QC
+RMQ_ERLC_OPTS += -DINSTR_MOD=gm_qc
+else
+RMQ_ERLC_OPTS += -DINSTR_MOD=gm
+endif
+
+ifdef CREDIT_FLOW_TRACING
+RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true
+endif
+
+ifdef DEBUG_FF
+RMQ_ERLC_OPTS += -DDEBUG_QUORUM_QUEUE_FF=true
+endif
+
+ifndef USE_PROPER_QC
+# PropEr needs to be installed for property checking
+# http://proper.softlab.ntua.gr/
+USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
+RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc)
+endif
+
+# --------------------------------------------------------------------
+# Documentation.
+# --------------------------------------------------------------------
+
+.PHONY: manpages web-manpages distclean-manpages
+
+docs:: manpages web-manpages
+
+manpages: $(MANPAGES)
+ @:
+
+web-manpages: $(WEB_MANPAGES)
+ @:
+
+# We use mandoc(1) to convert manpages to HTML plus an awk script which
+# does:
+# 1. remove tables at the top and the bottom (they recall the
+# manpage name, section and date)
+# 2. "downgrade" headers by one level (eg. h1 -> h2)
+# 3. annotate .Dl lines with more CSS classes
+%.html: %
+ $(gen_verbose) mandoc -T html -O 'fragment,man=%N.%S.html' "$<" | \
+ awk '\
+ /^<table class="head">$$/ { remove_table=1; next; } \
+ /^<table class="foot">$$/ { remove_table=1; next; } \
+ /^<\/table>$$/ { if (remove_table) { remove_table=0; next; } } \
+ { if (!remove_table) { \
+ line=$$0; \
+ gsub(/<h2/, "<h3", line); \
+ gsub(/<\/h2>/, "</h3>", line); \
+ gsub(/<h1/, "<h2", line); \
+ gsub(/<\/h1>/, "</h2>", line); \
+ gsub(/class="D1"/, "class=\"D1 lang-bash\"", line); \
+ gsub(/class="Bd Bd-indent"/, "class=\"Bd Bd-indent lang-bash\"", line); \
+ gsub(/&#[xX]201[cCdD];/, "\\&quot;", line); \
+ print line; \
+ } } \
+ ' > "$@"
+
+distclean:: distclean-manpages
+
+distclean-manpages::
+ $(gen_verbose) rm -f $(WEB_MANPAGES)
diff --git a/deps/rabbit/README.md b/deps/rabbit/README.md
new file mode 100644
index 0000000000..28bb2699fd
--- /dev/null
+++ b/deps/rabbit/README.md
@@ -0,0 +1,65 @@
+[![OTP v22.3](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-server/Test%20-%20Erlang%2022.3/master?label=Erlang%2022.3)](https://github.com/rabbitmq/rabbitmq-server/actions?query=workflow%3A%22Test+-+Erlang+22.3%22+branch%3A%22master%22)
+[![OTP v23](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-server/Test%20-%20Erlang%2023.1/master?label=Erlang%2023.1)](https://github.com/rabbitmq/rabbitmq-server/actions?query=workflow%3A%22Test+-+Erlang+23.1%22+branch%3Amaster)
+
+# RabbitMQ Server
+
+[RabbitMQ](https://rabbitmq.com) is a [feature rich](https://rabbitmq.com/documentation.html), multi-protocol messaging broker. It supports:
+
+ * AMQP 0-9-1
+ * AMQP 1.0
+ * MQTT 3.1.1
+ * STOMP 1.0 through 1.2
+
+
+## Installation
+
+ * [Installation guides](https://rabbitmq.com/download.html) for various platforms
+ * [Kubernetes Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview.html)
+ * [Changelog](https://www.rabbitmq.com/changelog.html)
+ * [Releases](https://github.com/rabbitmq/rabbitmq-server/releases) on GitHub
+ * [Supported and unsupported series](https://www.rabbitmq.com/versions.html)
+ * [Supported Erlang versions](https://www.rabbitmq.com/which-erlang.html)
+
+
+## Tutorials & Documentation
+
+ * [RabbitMQ tutorials](https://rabbitmq.com/getstarted.html)
+ * [All documentation guides](https://rabbitmq.com/documentation.html)
+ * [CLI tools guide](https://rabbitmq.com/cli.html)
+ * [Configuration guide](https://rabbitmq.com/configure.html)
+ * [Client libraries and tools](https://rabbitmq.com/devtools.html)
+ * [Monitoring guide](https://rabbitmq.com/monitoring.html)
+ * [Production checklist](https://rabbitmq.com/production-checklist.html)
+ * [Runnable tutorials](https://github.com/rabbitmq/rabbitmq-tutorials/)
+ * [Documentation source](https://github.com/rabbitmq/rabbitmq-website/)
+
+
+## Getting Help
+
+ * [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users)
+ * [Commercial support](https://rabbitmq.com/services.html) from [Pivotal](https://pivotal.io) for open source RabbitMQ
+ * [Community Slack](https://rabbitmq-slack.herokuapp.com/)
+ * `#rabbitmq` on Freenode
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://rabbitmq.com/github.html).
+
+Questions about contributing, internals and so on are very welcome on the [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Licensing
+
+RabbitMQ server is [licensed under the MPL 2.0](LICENSE-MPL-RabbitMQ).
+
+
+## Building From Source and Packaging
+
+ * [Building RabbitMQ from Source](https://rabbitmq.com/build-server.html)
+ * [Building RabbitMQ Distribution Packages](https://rabbitmq.com/build-server.html)
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbit/SECURITY.md b/deps/rabbit/SECURITY.md
new file mode 100644
index 0000000000..762149554f
--- /dev/null
+++ b/deps/rabbit/SECURITY.md
@@ -0,0 +1,24 @@
+# Security Policy
+
+## Supported Versions
+
+See [RabbitMQ Release Series](https://www.rabbitmq.com/versions.html) for a list of currently supported
+versions.
+
+Vulnerabilities reported for versions out of support will not be investigated.
+
+
+## Reporting a Vulnerability
+
+Please responsibly disclosure vulnerabilities to `security@rabbitmq.com` and include the following information:
+
+ * RabbitMQ and Erlang versions used
+ * Operating system used
+ * A set of steps to reproduce the observed behavior
+ * An archive produced by [rabbitmq-collect-env](https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env)
+
+ RabbitMQ core team will get back to you after we have triaged the issue. If there's no sufficient reproduction
+ information available, we won't be able to act on the report.
+
+ RabbitMQ core team does not have a security vulnerability bounty programme at this time.
+
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore b/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore
new file mode 100644
index 0000000000..adca0d7655
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/.gitignore
@@ -0,0 +1,12 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/ebin/
+/.erlang.mk/
+/rabbitmq_prelaunch.d
+/xrefr
+
+# Dialyzer
+*.plt
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/Makefile b/deps/rabbit/apps/rabbitmq_prelaunch/Makefile
new file mode 100644
index 0000000000..572f7703d4
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/Makefile
@@ -0,0 +1,11 @@
+PROJECT = rabbitmq_prelaunch
+PROJECT_DESCRIPTION = RabbitMQ prelaunch setup
+PROJECT_VERSION = 1.0.0
+PROJECT_MOD = rabbit_prelaunch_app
+
+DEPS = rabbit_common lager
+
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk
+
+include ../../rabbitmq-components.mk
+include ../../erlang.mk
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl
new file mode 100644
index 0000000000..c76824e7be
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state.erl
@@ -0,0 +1,76 @@
+%%%-------------------------------------------------------------------
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_state).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-export([get/0,
+ set/1,
+ wait_for/2]).
+
+-define(PT_KEY_BOOT_STATE, {?MODULE, boot_state}).
+
+-type boot_state() :: 'stopped' | 'booting' | 'ready' | 'stopping'.
+
+-export_type([boot_state/0]).
+
+-spec get() -> boot_state().
+get() ->
+ persistent_term:get(?PT_KEY_BOOT_STATE, stopped).
+
+-spec set(boot_state()) -> ok.
+set(BootState) ->
+ rabbit_log_prelaunch:debug("Change boot state to `~s`", [BootState]),
+ ?assert(is_valid(BootState)),
+ case BootState of
+ stopped -> persistent_term:erase(?PT_KEY_BOOT_STATE);
+ _ -> persistent_term:put(?PT_KEY_BOOT_STATE, BootState)
+ end,
+ rabbit_boot_state_sup:notify_boot_state_listeners(BootState).
+
+-spec wait_for(boot_state(), timeout()) -> ok | {error, timeout}.
+wait_for(BootState, infinity) ->
+ case is_reached(BootState) of
+ true -> ok;
+ false -> Wait = 200,
+ timer:sleep(Wait),
+ wait_for(BootState, infinity)
+ end;
+wait_for(BootState, Timeout)
+ when is_integer(Timeout) andalso Timeout >= 0 ->
+ case is_reached(BootState) of
+ true -> ok;
+ false -> Wait = 200,
+ timer:sleep(Wait),
+ wait_for(BootState, Timeout - Wait)
+ end;
+wait_for(_, _) ->
+ {error, timeout}.
+
+boot_state_idx(stopped) -> 0;
+boot_state_idx(booting) -> 1;
+boot_state_idx(ready) -> 2;
+boot_state_idx(stopping) -> 3.
+
+is_valid(BootState) ->
+ is_integer(boot_state_idx(BootState)).
+
+is_reached(TargetBootState) ->
+ is_reached(?MODULE:get(), TargetBootState).
+
+is_reached(CurrentBootState, CurrentBootState) ->
+ true;
+is_reached(stopping, stopped) ->
+ false;
+is_reached(_CurrentBootState, stopped) ->
+ true;
+is_reached(stopped, _TargetBootState) ->
+ true;
+is_reached(CurrentBootState, TargetBootState) ->
+ boot_state_idx(TargetBootState) =< boot_state_idx(CurrentBootState).
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl
new file mode 100644
index 0000000000..fbdc5781fc
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_sup.erl
@@ -0,0 +1,38 @@
+%%%-------------------------------------------------------------------
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_state_sup).
+-behaviour(supervisor).
+
+-export([start_link/0,
+ init/1]).
+
+-export([notify_boot_state_listeners/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ SystemdSpec = #{id => rabbit_boot_state_systemd,
+ start => {rabbit_boot_state_systemd, start_link, []},
+ restart => transient},
+ {ok, {#{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+ [SystemdSpec]}}.
+
+-spec notify_boot_state_listeners(rabbit_boot_state:boot_state()) -> ok.
+notify_boot_state_listeners(BootState) ->
+ lists:foreach(
+ fun
+ ({_, Child, _, _}) when is_pid(Child) ->
+ gen_server:cast(Child, {notify_boot_state, BootState});
+ (_) ->
+ ok
+ end,
+ supervisor:which_children(?MODULE)).
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl
new file mode 100644
index 0000000000..f838535b6a
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_boot_state_systemd.erl
@@ -0,0 +1,174 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2015-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_state_systemd).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ terminate/2,
+ code_change/3]).
+
+-record(state, {mechanism,
+ sd_notify_module,
+ socket}).
+
+-define(LOG_PREFIX, "Boot state/systemd: ").
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init([]) ->
+ case os:type() of
+ {unix, _} ->
+ case code:load_file(sd_notify) of
+ {module, sd_notify} ->
+ {ok, #state{mechanism = legacy,
+ sd_notify_module = sd_notify}};
+ {error, _} ->
+ case os:getenv("NOTIFY_SOCKET") of
+ false ->
+ ignore;
+ "" ->
+ ignore;
+ Socket ->
+ {ok, #state{mechanism = socat,
+ socket = Socket}}
+ end
+ end;
+ _ ->
+ ignore
+ end.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({notify_boot_state, BootState}, State) ->
+ notify_boot_state(BootState, State),
+ {noreply, State}.
+
+terminate(normal, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%% Private
+
+notify_boot_state(ready = BootState,
+ #state{mechanism = legacy, sd_notify_module = SDNotify}) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "notifying of state `~s` (via native module)",
+ [BootState]),
+ sd_notify_legacy(SDNotify);
+notify_boot_state(ready = BootState,
+ #state{mechanism = socat, socket = Socket}) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "notifying of state `~s` (via socat(1))",
+ [BootState]),
+ sd_notify_socat(Socket);
+notify_boot_state(BootState, _) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "ignoring state `~s`",
+ [BootState]),
+ ok.
+
+sd_notify_message() ->
+ "READY=1\nSTATUS=Initialized\nMAINPID=" ++ os:getpid() ++ "\n".
+
+sd_notify_legacy(SDNotify) ->
+ SDNotify:sd_notify(0, sd_notify_message()).
+
+%% socat(1) is the most portable way the sd_notify could be
+%% implemented in erlang, without introducing some NIF. Currently the
+%% following issues prevent us from implementing it in a more
+%% reasonable way:
+%% - systemd-notify(1) is unstable for non-root users
+%% - erlang doesn't support unix domain sockets.
+%%
+%% Some details on how we ended with such a solution:
+%% https://github.com/rabbitmq/rabbitmq-server/issues/664
+sd_notify_socat(Socket) ->
+ case sd_current_unit() of
+ {ok, Unit} ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "systemd unit for activation check: \"~s\"~n",
+ [Unit]),
+ sd_notify_socat(Socket, Unit);
+ _ ->
+ ok
+ end.
+
+sd_notify_socat(Socket, Unit) ->
+ try sd_open_port(Socket) of
+ Port ->
+ Port ! {self(), {command, sd_notify_message()}},
+ Result = sd_wait_activation(Port, Unit),
+ port_close(Port),
+ Result
+ catch
+ Class:Reason ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "Failed to start socat(1): ~p:~p~n",
+ [Class, Reason]),
+ false
+ end.
+
+sd_current_unit() ->
+ CmdOut = os:cmd("ps -o unit= -p " ++ os:getpid()),
+ Ret = (catch re:run(CmdOut,
+ "([-.@0-9a-zA-Z]+)",
+ [unicode, {capture, all_but_first, list}])),
+ case Ret of
+ {'EXIT', _} -> error;
+ {match, [Unit]} -> {ok, Unit};
+ _ -> error
+ end.
+
+socat_socket_arg("@" ++ AbstractUnixSocket) ->
+ "abstract-sendto:" ++ AbstractUnixSocket;
+socat_socket_arg(UnixSocket) ->
+ "unix-sendto:" ++ UnixSocket.
+
+sd_open_port(Socket) ->
+ open_port(
+ {spawn_executable, os:find_executable("socat")},
+ [{args, [socat_socket_arg(Socket), "STDIO"]},
+ use_stdio, out]).
+
+sd_wait_activation(Port, Unit) ->
+ case os:find_executable("systemctl") of
+ false ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "systemctl(1) unavailable, falling back to sleep~n"),
+ timer:sleep(5000),
+ ok;
+ _ ->
+ sd_wait_activation(Port, Unit, 10)
+ end.
+
+sd_wait_activation(_, _, 0) ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "service still in 'activating' state, bailing out~n"),
+ ok;
+sd_wait_activation(Port, Unit, AttemptsLeft) ->
+ Ret = os:cmd("systemctl show --property=ActiveState -- '" ++ Unit ++ "'"),
+ case Ret of
+ "ActiveState=activating\n" ->
+ timer:sleep(1000),
+ sd_wait_activation(Port, Unit, AttemptsLeft - 1);
+ "ActiveState=" ++ _ ->
+ ok;
+ _ = Err ->
+ rabbit_log_prelaunch:debug(
+ ?LOG_PREFIX "unexpected status from systemd: ~p~n", [Err]),
+ ok
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl
new file mode 100644
index 0000000000..b6b29481c7
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch.erl
@@ -0,0 +1,228 @@
+-module(rabbit_prelaunch).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-export([run_prelaunch_first_phase/0,
+ assert_mnesia_is_stopped/0,
+ get_context/0,
+ get_stop_reason/0,
+ set_stop_reason/1,
+ clear_stop_reason/0,
+ is_initial_pass/0,
+ initial_pass_finished/0,
+ shutdown_func/1]).
+
+-ifdef(TEST).
+-export([store_context/1,
+ clear_context_cache/0]).
+-endif.
+
+-define(PT_KEY_CONTEXT, {?MODULE, context}).
+-define(PT_KEY_INITIAL_PASS, {?MODULE, initial_pass_finished}).
+-define(PT_KEY_SHUTDOWN_FUNC, {?MODULE, chained_shutdown_func}).
+-define(PT_KEY_STOP_REASON, {?MODULE, stop_reason}).
+
+run_prelaunch_first_phase() ->
+ try
+ do_run()
+ catch
+ throw:{error, _} = Error ->
+ rabbit_prelaunch_errors:log_error(Error),
+ set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error;
+ Class:Exception:Stacktrace ->
+ rabbit_prelaunch_errors:log_exception(
+ Class, Exception, Stacktrace),
+ Error = {error, Exception},
+ set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error
+ end.
+
+do_run() ->
+ %% Indicate RabbitMQ is booting.
+ clear_stop_reason(),
+ rabbit_boot_state:set(booting),
+
+ %% Configure dbg if requested.
+ rabbit_prelaunch_early_logging:enable_quick_dbg(rabbit_env:dbg_config()),
+
+ %% Setup signal handler.
+ ok = rabbit_prelaunch_sighandler:setup(),
+
+ %% We assert Mnesia is stopped before we run the prelaunch
+ %% phases.
+ %%
+ %% We need this because our cluster consistency check (in the second
+ %% phase) depends on Mnesia not being started before it has a chance
+ %% to run.
+ %%
+ %% Also, in the initial pass, we don't want Mnesia to run before
+ %% Erlang distribution is configured.
+ assert_mnesia_is_stopped(),
+
+ %% Get informations to setup logging.
+ Context0 = rabbit_env:get_context_before_logging_init(),
+ ?assertMatch(#{}, Context0),
+
+ %% Setup logging for the prelaunch phase.
+ ok = rabbit_prelaunch_early_logging:setup_early_logging(Context0, true),
+
+ IsInitialPass = is_initial_pass(),
+ case IsInitialPass of
+ true ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug(
+ "== Prelaunch phase [1/2] (initial pass) =="),
+ rabbit_log_prelaunch:debug("");
+ false ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Prelaunch phase [1/2] =="),
+ rabbit_log_prelaunch:debug("")
+ end,
+ rabbit_env:log_process_env(),
+
+ %% Load rabbitmq-env.conf, redo logging setup and continue.
+ Context1 = rabbit_env:get_context_after_logging_init(Context0),
+ ?assertMatch(#{}, Context1),
+ ok = rabbit_prelaunch_early_logging:setup_early_logging(Context1, true),
+ rabbit_env:log_process_env(),
+
+ %% Complete context now that we have the final environment loaded.
+ Context2 = rabbit_env:get_context_after_reloading_env(Context1),
+ ?assertMatch(#{}, Context2),
+ store_context(Context2),
+ rabbit_env:log_context(Context2),
+ ok = setup_shutdown_func(),
+
+ Context = Context2#{initial_pass => IsInitialPass},
+
+ rabbit_env:context_to_code_path(Context),
+ rabbit_env:context_to_app_env_vars(Context),
+
+ %% 1. Erlang/OTP compatibility check.
+ ok = rabbit_prelaunch_erlang_compat:check(Context),
+
+ %% 2. Configuration check + loading.
+ ok = rabbit_prelaunch_conf:setup(Context),
+
+ %% 3. Erlang distribution check + start.
+ ok = rabbit_prelaunch_dist:setup(Context),
+
+ %% 4. Write PID file.
+ rabbit_log_prelaunch:debug(""),
+ _ = write_pid_file(Context),
+ ignore.
+
+assert_mnesia_is_stopped() ->
+ ?assertNot(lists:keymember(mnesia, 1, application:which_applications())).
+
+store_context(Context) when is_map(Context) ->
+ persistent_term:put(?PT_KEY_CONTEXT, Context).
+
+get_context() ->
+ case persistent_term:get(?PT_KEY_CONTEXT, undefined) of
+ undefined -> undefined;
+ Context -> Context#{initial_pass => is_initial_pass()}
+ end.
+
+-ifdef(TEST).
+clear_context_cache() ->
+ persistent_term:erase(?PT_KEY_CONTEXT).
+-endif.
+
+get_stop_reason() ->
+ persistent_term:get(?PT_KEY_STOP_REASON, undefined).
+
+set_stop_reason(Reason) ->
+ case get_stop_reason() of
+ undefined ->
+ rabbit_log_prelaunch:debug("Set stop reason to: ~p", [Reason]),
+ persistent_term:put(?PT_KEY_STOP_REASON, Reason);
+ _ ->
+ ok
+ end.
+
+clear_stop_reason() ->
+ persistent_term:erase(?PT_KEY_STOP_REASON).
+
+is_initial_pass() ->
+ not persistent_term:get(?PT_KEY_INITIAL_PASS, false).
+
+initial_pass_finished() ->
+ persistent_term:put(?PT_KEY_INITIAL_PASS, true).
+
+setup_shutdown_func() ->
+ ThisMod = ?MODULE,
+ ThisFunc = shutdown_func,
+ ExistingShutdownFunc = application:get_env(kernel, shutdown_func),
+ case ExistingShutdownFunc of
+ {ok, {ThisMod, ThisFunc}} ->
+ ok;
+ {ok, {ExistingMod, ExistingFunc}} ->
+ rabbit_log_prelaunch:debug(
+ "Setting up kernel shutdown function: ~s:~s/1 "
+ "(chained with ~s:~s/1)",
+ [ThisMod, ThisFunc, ExistingMod, ExistingFunc]),
+ ok = persistent_term:put(
+ ?PT_KEY_SHUTDOWN_FUNC,
+ ExistingShutdownFunc),
+ ok = record_kernel_shutdown_func(ThisMod, ThisFunc);
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Setting up kernel shutdown function: ~s:~s/1",
+ [ThisMod, ThisFunc]),
+ ok = record_kernel_shutdown_func(ThisMod, ThisFunc)
+ end.
+
+record_kernel_shutdown_func(Mod, Func) ->
+ application:set_env(
+ kernel, shutdown_func, {Mod, Func},
+ [{persistent, true}]).
+
+shutdown_func(Reason) ->
+ rabbit_log_prelaunch:debug(
+ "Running ~s:shutdown_func() as part of `kernel` shutdown", [?MODULE]),
+ Context = get_context(),
+ remove_pid_file(Context),
+ ChainedShutdownFunc = persistent_term:get(
+ ?PT_KEY_SHUTDOWN_FUNC,
+ undefined),
+ case ChainedShutdownFunc of
+ {ChainedMod, ChainedFunc} -> ChainedMod:ChainedFunc(Reason);
+ _ -> ok
+ end.
+
+write_pid_file(#{pid_file := PidFile}) ->
+ rabbit_log_prelaunch:debug("Writing PID file: ~s", [PidFile]),
+ case filelib:ensure_dir(PidFile) of
+ ok ->
+ OSPid = os:getpid(),
+ case file:write_file(PidFile, OSPid) of
+ ok ->
+ ok;
+ {error, Reason} = Error ->
+ rabbit_log_prelaunch:warning(
+ "Failed to write PID file \"~s\": ~s",
+ [PidFile, file:format_error(Reason)]),
+ Error
+ end;
+ {error, Reason} = Error ->
+ rabbit_log_prelaunch:warning(
+ "Failed to create PID file \"~s\" directory: ~s",
+ [PidFile, file:format_error(Reason)]),
+ Error
+ end;
+write_pid_file(_) ->
+ ok.
+
+remove_pid_file(#{pid_file := PidFile, keep_pid_file_on_exit := true}) ->
+ rabbit_log_prelaunch:debug("Keeping PID file: ~s", [PidFile]),
+ ok;
+remove_pid_file(#{pid_file := PidFile}) ->
+ rabbit_log_prelaunch:debug("Deleting PID file: ~s", [PidFile]),
+ _ = file:delete(PidFile),
+ ok;
+remove_pid_file(_) ->
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl
new file mode 100644
index 0000000000..cef7f05e77
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_app.erl
@@ -0,0 +1,11 @@
+-module(rabbit_prelaunch_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ rabbit_prelaunch_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl
new file mode 100644
index 0000000000..fbbae7a185
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl
@@ -0,0 +1,520 @@
+-module(rabbit_prelaunch_conf).
+
+-include_lib("kernel/include/file.hrl").
+-include_lib("stdlib/include/zip.hrl").
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([setup/1,
+ get_config_state/0,
+ generate_config_from_cuttlefish_files/3,
+ decrypt_config/1]).
+
+-ifdef(TEST).
+-export([decrypt_config/2]).
+-endif.
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Configuration =="),
+
+ %% TODO: Check if directories/files are inside Mnesia dir.
+
+ ok = set_default_config(),
+
+ AdditionalConfigFiles = find_additional_config_files(Context),
+ AdvancedConfigFile = find_actual_advanced_config_file(Context),
+ State = case find_actual_main_config_file(Context) of
+ {MainConfigFile, erlang} ->
+ Config = load_cuttlefish_config_file(Context,
+ AdditionalConfigFiles,
+ MainConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => AdditionalConfigFiles,
+ config_advanced_file => MainConfigFile};
+ {MainConfigFile, cuttlefish} ->
+ ConfigFiles = [MainConfigFile | AdditionalConfigFiles],
+ Config = load_cuttlefish_config_file(Context,
+ ConfigFiles,
+ AdvancedConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => ConfigFiles,
+ config_advanced_file => AdvancedConfigFile};
+ undefined when AdditionalConfigFiles =/= [] ->
+ ConfigFiles = AdditionalConfigFiles,
+ Config = load_cuttlefish_config_file(Context,
+ ConfigFiles,
+ AdvancedConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => ConfigFiles,
+ config_advanced_file => AdvancedConfigFile};
+ undefined when AdvancedConfigFile =/= undefined ->
+ rabbit_log_prelaunch:warning(
+ "Using RABBITMQ_ADVANCED_CONFIG_FILE: ~s",
+ [AdvancedConfigFile]),
+ Config = load_cuttlefish_config_file(Context,
+ AdditionalConfigFiles,
+ AdvancedConfigFile),
+ Apps = [App || {App, _} <- Config],
+ decrypt_config(Apps),
+ #{config_files => AdditionalConfigFiles,
+ config_advanced_file => AdvancedConfigFile};
+ undefined ->
+ #{config_files => [],
+ config_advanced_file => undefined}
+ end,
+ ok = override_with_hard_coded_critical_config(),
+ ok = set_credentials_obfuscation_secret(),
+ rabbit_log_prelaunch:debug(
+ "Saving config state to application env: ~p", [State]),
+ store_config_state(State).
+
+store_config_state(ConfigState) ->
+ persistent_term:put({rabbitmq_prelaunch, config_state}, ConfigState).
+
+get_config_state() ->
+ persistent_term:get({rabbitmq_prelaunch, config_state}, undefined).
+
+%% -------------------------------------------------------------------
+%% Configuration loading.
+%% -------------------------------------------------------------------
+
+set_default_config() ->
+ rabbit_log_prelaunch:debug("Setting default config"),
+ Config = [
+ {ra,
+ [
+ {wal_max_size_bytes, 536870912}, %% 5 * 2 ^ 20
+ {wal_max_batch_size, 4096}
+ ]},
+ {aten,
+ [
+ %% a greater poll interval has shown to trigger fewer false
+ %% positive leader elections in quorum queues. The cost is slightly
+ %% longer detection time when a genuine network issue occurs.
+ %% Ra still uses erlang monitors of course so whenever a connection
+ %% goes down it is still immediately detected
+ {poll_interval, 5000}
+ ]},
+ {sysmon_handler,
+ [{process_limit, 100},
+ {port_limit, 100},
+ {gc_ms_limit, 0},
+ {schedule_ms_limit, 0},
+ {heap_word_limit, 0},
+ {busy_port, false},
+ {busy_dist_port, true}]}
+ ],
+ apply_erlang_term_based_config(Config).
+
+find_actual_main_config_file(#{main_config_file := File}) ->
+ case filelib:is_regular(File) of
+ true ->
+ Format = case filename:extension(File) of
+ ".conf" -> cuttlefish;
+ ".config" -> erlang;
+ _ -> determine_config_format(File)
+ end,
+ {File, Format};
+ false ->
+ OldFormatFile = File ++ ".config",
+ NewFormatFile = File ++ ".conf",
+ case filelib:is_regular(OldFormatFile) of
+ true ->
+ case filelib:is_regular(NewFormatFile) of
+ true ->
+ rabbit_log_prelaunch:warning(
+ "Both old (.config) and new (.conf) format "
+ "config files exist."),
+ rabbit_log_prelaunch:warning(
+ "Using the old format config file: ~s",
+ [OldFormatFile]),
+ rabbit_log_prelaunch:warning(
+ "Please update your config files to the new "
+ "format and remove the old file."),
+ ok;
+ false ->
+ ok
+ end,
+ {OldFormatFile, erlang};
+ false ->
+ case filelib:is_regular(NewFormatFile) of
+ true -> {NewFormatFile, cuttlefish};
+ false -> undefined
+ end
+ end
+ end.
+
+find_additional_config_files(#{additional_config_files := Pattern})
+ when Pattern =/= undefined ->
+ Pattern1 = case filelib:is_dir(Pattern) of
+ true -> filename:join(Pattern, "*");
+ false -> Pattern
+ end,
+ OnlyFiles = [File ||
+ File <- filelib:wildcard(Pattern1),
+ filelib:is_regular(File)],
+ lists:sort(OnlyFiles);
+find_additional_config_files(_) ->
+ [].
+
+find_actual_advanced_config_file(#{advanced_config_file := File}) ->
+ case filelib:is_regular(File) of
+ true -> File;
+ false -> undefined
+ end.
+
+determine_config_format(File) ->
+ case filelib:file_size(File) of
+ 0 ->
+ cuttlefish;
+ _ ->
+ case file:consult(File) of
+ {ok, _} -> erlang;
+ _ -> cuttlefish
+ end
+ end.
+
+load_cuttlefish_config_file(Context,
+ ConfigFiles,
+ AdvancedConfigFile) ->
+ Config = generate_config_from_cuttlefish_files(
+ Context, ConfigFiles, AdvancedConfigFile),
+ apply_erlang_term_based_config(Config),
+ Config.
+
+generate_config_from_cuttlefish_files(Context,
+ ConfigFiles,
+ AdvancedConfigFile) ->
+ %% Load schemas.
+ SchemaFiles = find_cuttlefish_schemas(Context),
+ case SchemaFiles of
+ [] ->
+ rabbit_log_prelaunch:error(
+ "No configuration schema found~n", []),
+ throw({error, no_configuration_schema_found});
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Configuration schemas found:~n", []),
+ lists:foreach(
+ fun(SchemaFile) ->
+ rabbit_log_prelaunch:debug(" - ~ts", [SchemaFile])
+ end,
+ SchemaFiles),
+ ok
+ end,
+ Schema = cuttlefish_schema:files(SchemaFiles),
+
+ %% Load configuration.
+ rabbit_log_prelaunch:debug(
+ "Loading configuration files (Cuttlefish based):"),
+ lists:foreach(
+ fun(ConfigFile) ->
+ rabbit_log_prelaunch:debug(" - ~ts", [ConfigFile])
+ end, ConfigFiles),
+ case cuttlefish_conf:files(ConfigFiles) of
+ {errorlist, Errors} ->
+ rabbit_log_prelaunch:error("Error parsing configuration:"),
+ lists:foreach(
+ fun(Error) ->
+ rabbit_log_prelaunch:error(
+ " - ~ts",
+ [cuttlefish_error:xlate(Error)])
+ end, Errors),
+ rabbit_log_prelaunch:error(
+ "Are these files using the Cuttlefish format?"),
+ throw({error, failed_to_parse_configuration_file});
+ Config0 ->
+ %% Finalize configuration, based on the schema.
+ Config = case cuttlefish_generator:map(Schema, Config0) of
+ {error, Phase, {errorlist, Errors}} ->
+ %% TODO
+ rabbit_log_prelaunch:error(
+ "Error preparing configuration in phase ~ts:",
+ [Phase]),
+ lists:foreach(
+ fun(Error) ->
+ rabbit_log_prelaunch:error(
+ " - ~ts",
+ [cuttlefish_error:xlate(Error)])
+ end, Errors),
+ throw(
+ {error, failed_to_prepare_configuration});
+ ValidConfig ->
+ proplists:delete(vm_args, ValidConfig)
+ end,
+
+ %% Apply advanced configuration overrides, if any.
+ override_with_advanced_config(Config, AdvancedConfigFile)
+ end.
+
+find_cuttlefish_schemas(Context) ->
+ Apps = list_apps(Context),
+ rabbit_log_prelaunch:debug(
+ "Looking up configuration schemas in the following applications:"),
+ find_cuttlefish_schemas(Apps, []).
+
+find_cuttlefish_schemas([App | Rest], AllSchemas) ->
+ Schemas = list_schemas_in_app(App),
+ find_cuttlefish_schemas(Rest, AllSchemas ++ Schemas);
+find_cuttlefish_schemas([], AllSchemas) ->
+ lists:sort(fun(A,B) -> A < B end, AllSchemas).
+
+list_apps(#{os_type := {win32, _}, plugins_path := PluginsPath}) ->
+ PluginsDirs = string:lexemes(PluginsPath, ";"),
+ list_apps1(PluginsDirs, []);
+list_apps(#{plugins_path := PluginsPath}) ->
+ PluginsDirs = string:lexemes(PluginsPath, ":"),
+ list_apps1(PluginsDirs, []).
+
+
+list_apps1([Dir | Rest], Apps) ->
+ case file:list_dir(Dir) of
+ {ok, Filenames} ->
+ NewApps = [list_to_atom(
+ hd(
+ string:split(filename:basename(F, ".ex"), "-")))
+ || F <- Filenames],
+ Apps1 = lists:umerge(Apps, lists:sort(NewApps)),
+ list_apps1(Rest, Apps1);
+ {error, Reason} ->
+ rabbit_log_prelaunch:debug(
+ "Failed to list directory \"~ts\" content: ~ts",
+ [Dir, file:format_error(Reason)]),
+ list_apps1(Rest, Apps)
+ end;
+list_apps1([], AppInfos) ->
+ AppInfos.
+
+list_schemas_in_app(App) ->
+ {Loaded, Unload} = case application:load(App) of
+ ok -> {true, true};
+ {error, {already_loaded, _}} -> {true, false};
+ {error, Reason} -> {Reason, false}
+ end,
+ List = case Loaded of
+ true ->
+ case code:priv_dir(App) of
+ {error, bad_name} ->
+ rabbit_log_prelaunch:debug(
+ " [ ] ~s (no readable priv dir)", [App]),
+ [];
+ PrivDir ->
+ SchemaDir = filename:join([PrivDir, "schema"]),
+ do_list_schemas_in_app(App, SchemaDir)
+ end;
+ Reason1 ->
+ rabbit_log_prelaunch:debug(
+ " [ ] ~s (failed to load application: ~p)",
+ [App, Reason1]),
+ []
+ end,
+ case Unload of
+ true -> _ = application:unload(App),
+ ok;
+ false -> ok
+ end,
+ List.
+
+do_list_schemas_in_app(App, SchemaDir) ->
+ case erl_prim_loader:list_dir(SchemaDir) of
+ {ok, Files} ->
+ rabbit_log_prelaunch:debug(" [x] ~s", [App]),
+ [filename:join(SchemaDir, File)
+ || [C | _] = File <- Files,
+ C =/= $.];
+ error ->
+ rabbit_log_prelaunch:debug(
+ " [ ] ~s (no readable schema dir)", [App]),
+ []
+ end.
+
+override_with_advanced_config(Config, undefined) ->
+ Config;
+override_with_advanced_config(Config, AdvancedConfigFile) ->
+ rabbit_log_prelaunch:debug(
+ "Override with advanced configuration file \"~ts\"",
+ [AdvancedConfigFile]),
+ case file:consult(AdvancedConfigFile) of
+ {ok, [AdvancedConfig]} ->
+ cuttlefish_advanced:overlay(Config, AdvancedConfig);
+ {ok, OtherTerms} ->
+ rabbit_log_prelaunch:error(
+ "Failed to load advanced configuration file \"~ts\", "
+ "incorrect format: ~p",
+ [AdvancedConfigFile, OtherTerms]),
+ throw({error, failed_to_parse_advanced_configuration_file});
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to load advanced configuration file \"~ts\": ~ts",
+ [AdvancedConfigFile, file:format_error(Reason)]),
+ throw({error, failed_to_read_advanced_configuration_file})
+ end.
+
+override_with_hard_coded_critical_config() ->
+ rabbit_log_prelaunch:debug("Override with hard-coded critical config"),
+ Config = [
+ {ra,
+ %% Make Ra use a custom logger that dispatches to lager
+ %% instead of the default OTP logger
+ [{logger_module, rabbit_log_ra_shim}]},
+ {osiris,
+ [{logger_module, rabbit_log_osiris_shim}]}
+ ],
+ apply_erlang_term_based_config(Config).
+
+apply_erlang_term_based_config([{_, []} | Rest]) ->
+ apply_erlang_term_based_config(Rest);
+apply_erlang_term_based_config([{App, Vars} | Rest]) ->
+ rabbit_log_prelaunch:debug(" Applying configuration for '~s':", [App]),
+ ok = apply_app_env_vars(App, Vars),
+ apply_erlang_term_based_config(Rest);
+apply_erlang_term_based_config([]) ->
+ ok.
+
+apply_app_env_vars(App, [{Var, Value} | Rest]) ->
+ rabbit_log_prelaunch:debug(" - ~s = ~p", [Var, Value]),
+ ok = application:set_env(App, Var, Value, [{persistent, true}]),
+ apply_app_env_vars(App, Rest);
+apply_app_env_vars(_, []) ->
+ ok.
+
+set_credentials_obfuscation_secret() ->
+ rabbit_log_prelaunch:debug(
+ "Refreshing credentials obfuscation configuration from env: ~p",
+ [application:get_all_env(credentials_obfuscation)]),
+ ok = credentials_obfuscation:refresh_config(),
+ CookieBin = rabbit_data_coercion:to_binary(erlang:get_cookie()),
+ rabbit_log_prelaunch:debug(
+ "Setting credentials obfuscation secret to '~s'", [CookieBin]),
+ ok = credentials_obfuscation:set_secret(CookieBin).
+
+%% -------------------------------------------------------------------
+%% Config decryption.
+%% -------------------------------------------------------------------
+
+decrypt_config(Apps) ->
+ rabbit_log_prelaunch:debug("Decoding encrypted config values (if any)"),
+ ConfigEntryDecoder = application:get_env(rabbit, config_entry_decoder, []),
+ decrypt_config(Apps, ConfigEntryDecoder).
+
+decrypt_config([], _) ->
+ ok;
+decrypt_config([App | Apps], Algo) ->
+ Algo1 = decrypt_app(App, application:get_all_env(App), Algo),
+ decrypt_config(Apps, Algo1).
+
+decrypt_app(_, [], Algo) ->
+ Algo;
+decrypt_app(App, [{Key, Value} | Tail], Algo) ->
+ Algo2 = try
+ case decrypt(Value, Algo) of
+ {Value, Algo1} ->
+ Algo1;
+ {NewValue, Algo1} ->
+ rabbit_log_prelaunch:debug(
+ "Value of `~s` decrypted", [Key]),
+ ok = application:set_env(App, Key, NewValue,
+ [{persistent, true}]),
+ Algo1
+ end
+ catch
+ throw:{bad_config_entry_decoder, _} = Error ->
+ throw(Error);
+ _:Msg ->
+ throw({config_decryption_error, {key, Key}, Msg})
+ end,
+ decrypt_app(App, Tail, Algo2).
+
+decrypt({encrypted, _} = EncValue,
+ {Cipher, Hash, Iterations, PassPhrase} = Algo) ->
+ {rabbit_pbe:decrypt_term(Cipher, Hash, Iterations, PassPhrase, EncValue),
+ Algo};
+decrypt({encrypted, _} = EncValue,
+ ConfigEntryDecoder)
+ when is_list(ConfigEntryDecoder) ->
+ Algo = config_entry_decoder_to_algo(ConfigEntryDecoder),
+ decrypt(EncValue, Algo);
+decrypt(List, Algo) when is_list(List) ->
+ decrypt_list(List, Algo, []);
+decrypt(Value, Algo) ->
+ {Value, Algo}.
+
+%% We make no distinction between strings and other lists.
+%% When we receive a string, we loop through each element
+%% and ultimately return the string unmodified, as intended.
+decrypt_list([], Algo, Acc) ->
+ {lists:reverse(Acc), Algo};
+decrypt_list([{Key, Value} | Tail], Algo, Acc)
+ when Key =/= encrypted ->
+ {Value1, Algo1} = decrypt(Value, Algo),
+ decrypt_list(Tail, Algo1, [{Key, Value1} | Acc]);
+decrypt_list([Value | Tail], Algo, Acc) ->
+ {Value1, Algo1} = decrypt(Value, Algo),
+ decrypt_list(Tail, Algo1, [Value1 | Acc]).
+
+config_entry_decoder_to_algo(ConfigEntryDecoder) ->
+ case get_passphrase(ConfigEntryDecoder) of
+ undefined ->
+ throw({bad_config_entry_decoder, missing_passphrase});
+ PassPhrase ->
+ {
+ proplists:get_value(
+ cipher, ConfigEntryDecoder, rabbit_pbe:default_cipher()),
+ proplists:get_value(
+ hash, ConfigEntryDecoder, rabbit_pbe:default_hash()),
+ proplists:get_value(
+ iterations, ConfigEntryDecoder,
+ rabbit_pbe:default_iterations()),
+ PassPhrase
+ }
+ end.
+
+get_passphrase(ConfigEntryDecoder) ->
+ rabbit_log_prelaunch:debug("Getting encrypted config passphrase"),
+ case proplists:get_value(passphrase, ConfigEntryDecoder) of
+ prompt ->
+ IoDevice = get_input_iodevice(),
+ ok = io:setopts(IoDevice, [{echo, false}]),
+ PP = lists:droplast(io:get_line(IoDevice,
+ "\nPlease enter the passphrase to unlock encrypted "
+ "configuration entries.\n\nPassphrase: ")),
+ ok = io:setopts(IoDevice, [{echo, true}]),
+ io:format(IoDevice, "~n", []),
+ PP;
+ {file, Filename} ->
+ {ok, File} = file:read_file(Filename),
+ [PP|_] = binary:split(File, [<<"\r\n">>, <<"\n">>]),
+ PP;
+ PP ->
+ PP
+ end.
+
+%% This function retrieves the correct IoDevice for requesting
+%% input. The problem with using the default IoDevice is that
+%% the Erlang shell prevents us from getting the input.
+%%
+%% Instead we therefore look for the io process used by the
+%% shell and if it can't be found (because the shell is not
+%% started e.g with -noshell) we use the 'user' process.
+%%
+%% This function will not work when either -oldshell or -noinput
+%% options are passed to erl.
+get_input_iodevice() ->
+ case whereis(user) of
+ undefined ->
+ user;
+ User ->
+ case group:interfaces(User) of
+ [] ->
+ user;
+ [{user_drv, Drv}] ->
+ case user_drv:interfaces(Drv) of
+ [] -> user;
+ [{current_group, IoDevice}] -> IoDevice
+ end
+ end
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl
new file mode 100644
index 0000000000..3d718438a7
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_dist.erl
@@ -0,0 +1,104 @@
+-module(rabbit_prelaunch_dist).
+
+-export([setup/1]).
+
+setup(#{nodename := Node, nodename_type := NameType} = Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Erlang distribution =="),
+ rabbit_log_prelaunch:debug("Rqeuested node name: ~s (type: ~s)",
+ [Node, NameType]),
+ case node() of
+ nonode@nohost ->
+ ok = rabbit_nodes_common:ensure_epmd(),
+ ok = dist_port_range_check(Context),
+ ok = dist_port_use_check(Context),
+ ok = duplicate_node_check(Context),
+
+ ok = do_setup(Context);
+ Node ->
+ rabbit_log_prelaunch:debug(
+ "Erlang distribution already running", []),
+ ok;
+ Unexpected ->
+ throw({error, {erlang_dist_running_with_unexpected_nodename,
+ Unexpected, Node}})
+ end,
+ ok.
+
+do_setup(#{nodename := Node, nodename_type := NameType}) ->
+ rabbit_log_prelaunch:debug("Starting Erlang distribution", []),
+ case application:get_env(kernel, net_ticktime) of
+ {ok, Ticktime} when is_integer(Ticktime) andalso Ticktime >= 1 ->
+ %% The value passed to net_kernel:start/1 is the
+ %% "minimum transition traffic interval" as defined in
+ %% net_kernel:set_net_ticktime/1.
+ MTTI = Ticktime * 1000 div 4,
+ {ok, _} = net_kernel:start([Node, NameType, MTTI]),
+ ok;
+ _ ->
+ {ok, _} = net_kernel:start([Node, NameType]),
+ ok
+ end,
+ ok.
+
+%% Check whether a node with the same name is already running
+duplicate_node_check(#{split_nodename := {NodeName, NodeHost}}) ->
+ rabbit_log_prelaunch:debug(
+ "Checking if node name ~s is already used", [NodeName]),
+ PrelaunchName = rabbit_nodes_common:make(
+ {NodeName ++ "_prelaunch_" ++ os:getpid(),
+ "localhost"}),
+ {ok, _} = net_kernel:start([PrelaunchName, shortnames]),
+ case rabbit_nodes_common:names(NodeHost) of
+ {ok, NamePorts} ->
+ case proplists:is_defined(NodeName, NamePorts) of
+ true ->
+ throw({error, {duplicate_node_name, NodeName, NodeHost}});
+ false ->
+ ok = net_kernel:stop(),
+ ok
+ end;
+ {error, EpmdReason} ->
+ throw({error, {epmd_error, NodeHost, EpmdReason}})
+ end.
+
+dist_port_range_check(#{erlang_dist_tcp_port := DistTcpPort}) ->
+ rabbit_log_prelaunch:debug(
+ "Checking if TCP port ~b is valid", [DistTcpPort]),
+ case DistTcpPort of
+ _ when DistTcpPort < 1 orelse DistTcpPort > 65535 ->
+ throw({error, {invalid_dist_port_range, DistTcpPort}});
+ _ ->
+ ok
+ end.
+
+dist_port_use_check(#{split_nodename := {_, NodeHost},
+ erlang_dist_tcp_port := DistTcpPort}) ->
+ rabbit_log_prelaunch:debug(
+ "Checking if TCP port ~b is available", [DistTcpPort]),
+ dist_port_use_check_ipv4(NodeHost, DistTcpPort).
+
+dist_port_use_check_ipv4(NodeHost, Port) ->
+ case gen_tcp:listen(Port, [inet, {reuseaddr, true}]) of
+ {ok, Sock} -> gen_tcp:close(Sock);
+ {error, einval} -> dist_port_use_check_ipv6(NodeHost, Port);
+ {error, _} -> dist_port_use_check_fail(Port, NodeHost)
+ end.
+
+dist_port_use_check_ipv6(NodeHost, Port) ->
+ case gen_tcp:listen(Port, [inet6, {reuseaddr, true}]) of
+ {ok, Sock} -> gen_tcp:close(Sock);
+ {error, _} -> dist_port_use_check_fail(Port, NodeHost)
+ end.
+
+-spec dist_port_use_check_fail(non_neg_integer(), string()) ->
+ no_return().
+
+dist_port_use_check_fail(Port, Host) ->
+ {ok, Names} = rabbit_nodes_common:names(Host),
+ case [N || {N, P} <- Names, P =:= Port] of
+ [] ->
+ throw({error, {dist_port_already_used, Port, not_erlang, Host}});
+ [Name] ->
+ throw({error, {dist_port_already_used, Port, Name, Host}})
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl
new file mode 100644
index 0000000000..4e371c76ae
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_early_logging.erl
@@ -0,0 +1,115 @@
+-module(rabbit_prelaunch_early_logging).
+
+-include_lib("rabbit_common/include/rabbit_log.hrl").
+
+-export([setup_early_logging/2,
+ enable_quick_dbg/1,
+ use_colored_logging/0,
+ use_colored_logging/1,
+ list_expected_sinks/0]).
+
+setup_early_logging(#{log_levels := undefined} = Context,
+ LagerEventToStdout) ->
+ setup_early_logging(Context#{log_levels => get_default_log_level()},
+ LagerEventToStdout);
+setup_early_logging(Context, LagerEventToStdout) ->
+ Configured = lists:member(
+ lager_util:make_internal_sink_name(rabbit_log_prelaunch),
+ lager:list_all_sinks()),
+ case Configured of
+ true -> ok;
+ false -> do_setup_early_logging(Context, LagerEventToStdout)
+ end.
+
+get_default_log_level() ->
+ #{"prelaunch" => warning}.
+
+do_setup_early_logging(#{log_levels := LogLevels} = Context,
+ LagerEventToStdout) ->
+ redirect_logger_messages_to_lager(),
+ Colored = use_colored_logging(Context),
+ application:set_env(lager, colored, Colored),
+ ConsoleBackend = lager_console_backend,
+ case LagerEventToStdout of
+ true ->
+ GLogLevel = case LogLevels of
+ #{global := Level} -> Level;
+ _ -> warning
+ end,
+ _ = lager_app:start_handler(
+ lager_event, ConsoleBackend, [{level, GLogLevel}]),
+ ok;
+ false ->
+ ok
+ end,
+ lists:foreach(
+ fun(Sink) ->
+ CLogLevel = get_log_level(LogLevels, Sink),
+ lager_app:configure_sink(
+ Sink,
+ [{handlers, [{ConsoleBackend, [{level, CLogLevel}]}]}])
+ end, list_expected_sinks()),
+ ok.
+
+redirect_logger_messages_to_lager() ->
+ io:format(standard_error, "Configuring logger redirection~n", []),
+ ok = logger:add_handler(rabbit_log, rabbit_log, #{}),
+ ok = logger:set_primary_config(level, all).
+
+use_colored_logging() ->
+ use_colored_logging(rabbit_prelaunch:get_context()).
+
+use_colored_logging(#{log_levels := #{color := true},
+ output_supports_colors := true}) ->
+ true;
+use_colored_logging(_) ->
+ false.
+
+list_expected_sinks() ->
+ Key = {?MODULE, lager_extra_sinks},
+ case persistent_term:get(Key, undefined) of
+ undefined ->
+ CompileOptions = proplists:get_value(options,
+ module_info(compile),
+ []),
+ AutoList = [lager_util:make_internal_sink_name(M)
+ || M <- proplists:get_value(lager_extra_sinks,
+ CompileOptions, [])],
+ List = case lists:member(?LAGER_SINK, AutoList) of
+ true -> AutoList;
+ false -> [?LAGER_SINK | AutoList]
+ end,
+ %% Store the list in the application environment. If this
+ %% module is later cover-compiled, the compile option will
+ %% be lost, so we will be able to retrieve the list from the
+ %% application environment.
+ persistent_term:put(Key, List),
+ List;
+ List ->
+ List
+ end.
+
+sink_to_category(Sink) when is_atom(Sink) ->
+ re:replace(
+ atom_to_list(Sink),
+ "^rabbit_log_(.+)_lager_event$",
+ "\\1",
+ [{return, list}]).
+
+get_log_level(LogLevels, Sink) ->
+ Category = sink_to_category(Sink),
+ case LogLevels of
+ #{Category := Level} -> Level;
+ #{global := Level} -> Level;
+ _ -> warning
+ end.
+
+enable_quick_dbg(#{dbg_output := Output, dbg_mods := Mods}) ->
+ case Output of
+ stdout -> {ok, _} = dbg:tracer(),
+ ok;
+ _ -> {ok, _} = dbg:tracer(port, dbg:trace_port(file, Output)),
+ ok
+ end,
+ {ok, _} = dbg:p(all, c),
+ lists:foreach(fun(M) -> {ok, _} = dbg:tp(M, cx) end, Mods).
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl
new file mode 100644
index 0000000000..1e8fe2690d
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_erlang_compat.erl
@@ -0,0 +1,47 @@
+-module(rabbit_prelaunch_erlang_compat).
+
+-export([check/1]).
+
+-define(OTP_MINIMUM, "21.3").
+-define(ERTS_MINIMUM, "10.3").
+
+check(_Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Erlang/OTP compatibility check =="),
+
+ ERTSVer = erlang:system_info(version),
+ OTPRel = rabbit_misc:otp_release(),
+ rabbit_log_prelaunch:debug(
+ "Requiring: Erlang/OTP ~s (ERTS ~s)", [?OTP_MINIMUM, ?ERTS_MINIMUM]),
+ rabbit_log_prelaunch:debug(
+ "Running: Erlang/OTP ~s (ERTS ~s)", [OTPRel, ERTSVer]),
+
+ case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of
+ true when ?ERTS_MINIMUM =/= ERTSVer ->
+ rabbit_log_prelaunch:debug(
+ "Erlang/OTP version requirement satisfied"),
+ ok;
+ true when ?ERTS_MINIMUM =:= ERTSVer andalso ?OTP_MINIMUM =< OTPRel ->
+ %% When a critical regression or bug is found, a new OTP
+ %% release can be published without changing the ERTS
+ %% version. For instance, this is the case with R16B03 and
+ %% R16B03-1.
+ %%
+ %% In this case, we compare the release versions
+ %% alphabetically.
+ ok;
+ _ ->
+ Msg =
+ "This RabbitMQ version cannot run on Erlang ~s (erts ~s): "
+ "minimum required version is ~s (erts ~s)",
+ Args = [OTPRel, ERTSVer, ?OTP_MINIMUM, ?ERTS_MINIMUM],
+ rabbit_log_prelaunch:error(Msg, Args),
+
+ %% Also print to stderr to make this more visible
+ io:format(standard_error, "Error: " ++ Msg ++ "~n", Args),
+
+ Msg2 = rabbit_misc:format(
+ "Erlang ~s or later is required, started on ~s",
+ [?OTP_MINIMUM, OTPRel]),
+ throw({error, {erlang_version_too_old, Msg2}})
+ end.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl
new file mode 100644
index 0000000000..b2cc03d069
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_errors.erl
@@ -0,0 +1,114 @@
+-module(rabbit_prelaunch_errors).
+
+-export([format_error/1,
+ format_exception/3,
+ log_error/1,
+ log_exception/3]).
+
+-define(BOOT_FAILED_HEADER,
+ "\n"
+ "BOOT FAILED\n"
+ "===========\n").
+
+-define(BOOT_FAILED_FOOTER,
+ "\n").
+
+log_error(Error) ->
+ Message = format_error(Error),
+ log_message(Message).
+
+format_error({error, {duplicate_node_name, NodeName, NodeHost}}) ->
+ rabbit_misc:format(
+ "ERROR: node with name ~p is already running on host ~p",
+ [NodeName, NodeHost]);
+format_error({error, {epmd_error, NodeHost, EpmdReason}}) ->
+ rabbit_misc:format(
+ "ERROR: epmd error for host ~s: ~s",
+ [NodeHost, rabbit_misc:format_inet_error(EpmdReason)]);
+format_error({error, {invalid_dist_port_range, DistTcpPort}}) ->
+ rabbit_misc:format(
+ "Invalid Erlang distribution TCP port: ~b", [DistTcpPort]);
+format_error({error, {dist_port_already_used, Port, not_erlang, Host}}) ->
+ rabbit_misc:format(
+ "ERROR: could not bind to distribution port ~b on host ~s. It could "
+ "be in use by another process or cannot be bound to (e.g. due to a "
+ "security policy)", [Port, Host]);
+format_error({error, {dist_port_already_used, Port, Name, Host}}) ->
+ rabbit_misc:format(
+ "ERROR: could not bind to distribution port ~b, it is in use by "
+ "another node: ~s@~s", [Port, Name, Host]);
+format_error({error, {erlang_dist_running_with_unexpected_nodename,
+ Unexpected, Node}}) ->
+ rabbit_misc:format(
+ "Erlang distribution running with another node name (~s) "
+ "than the configured one (~s)",
+ [Unexpected, Node]);
+format_error({bad_config_entry_decoder, missing_passphrase}) ->
+ rabbit_misc:format(
+ "Missing passphrase or missing passphrase read method in "
+ "`config_entry_decoder`", []);
+format_error({config_decryption_error, {key, Key}, _Msg}) ->
+ rabbit_misc:format(
+ "Error while decrypting key '~p'. Please check encrypted value, "
+ "passphrase, and encryption configuration~n",
+ [Key]);
+format_error({error, {timeout_waiting_for_tables, AllNodes, _}}) ->
+ Suffix =
+ "~nBACKGROUND~n==========~n~n"
+ "This cluster node was shut down while other nodes were still running.~n"
+ "To avoid losing data, you should start the other nodes first, then~n"
+ "start this one. To force this node to start, first invoke~n"
+ "\"rabbitmqctl force_boot\". If you do so, any changes made on other~n"
+ "cluster nodes after this one was shut down may be lost.",
+ {Message, Nodes} =
+ case AllNodes -- [node()] of
+ [] -> {rabbit_misc:format(
+ "Timeout contacting cluster nodes. Since RabbitMQ was"
+ " shut down forcefully~nit cannot determine which nodes"
+ " are timing out.~n" ++ Suffix, []),
+ []};
+ Ns -> {rabbit_misc:format(
+ "Timeout contacting cluster nodes: ~p.~n" ++ Suffix,
+ [Ns]),
+ Ns}
+ end,
+ Message ++ "\n" ++ rabbit_nodes_common:diagnostics(Nodes);
+format_error({error, {cannot_log_to_file, unknown, Reason}}) ->
+ rabbit_misc:format(
+ "failed to initialised logger: ~p~n",
+ [Reason]);
+format_error({error, {cannot_log_to_file, LogFile,
+ {cannot_create_parent_dirs, _, Reason}}}) ->
+ rabbit_misc:format(
+ "failed to create parent directory for log file at '~s', reason: ~s~n",
+ [LogFile, file:format_error(Reason)]);
+format_error({error, {cannot_log_to_file, LogFile, Reason}}) ->
+ rabbit_misc:format(
+ "failed to open log file at '~s', reason: ~s",
+ [LogFile, file:format_error(Reason)]);
+format_error(Error) ->
+ rabbit_misc:format("Error during startup: ~p", [Error]).
+
+log_exception(Class, Exception, Stacktrace) ->
+ Message = format_exception(Class, Exception, Stacktrace),
+ log_message(Message).
+
+format_exception(Class, Exception, Stacktrace) ->
+ rabbit_misc:format(
+ "Exception during startup:~n~s",
+ [lager:pr_stacktrace(Stacktrace, {Class, Exception})]).
+
+log_message(Message) ->
+ Lines = string:split(
+ ?BOOT_FAILED_HEADER ++
+ Message ++
+ ?BOOT_FAILED_FOOTER,
+ [$\n],
+ all),
+ lists:foreach(
+ fun(Line) ->
+ rabbit_log_prelaunch:error("~s", [Line]),
+ io:format(standard_error, "~s~n", [Line])
+ end, Lines),
+ timer:sleep(1000),
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl
new file mode 100644
index 0000000000..f9a60effda
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl
@@ -0,0 +1,93 @@
+-module(rabbit_prelaunch_sighandler).
+-behaviour(gen_event).
+
+-export([setup/0,
+ init/1,
+ handle_event/2,
+ handle_call/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+%% CAUTION: Signal handling in this module must be kept consistent
+%% with the same handling in rabbitmq-server(8).
+
+%% #{signal => default | ignore | stop}.
+-define(SIGNALS_HANDLED_BY_US,
+ #{
+ %% SIGHUP is often used to reload the configuration or reopen
+ %% log files after they were rotated. We don't support any
+ %% of those two cases, so ignore it for now, until we can do
+ %% something about it.
+ sighup => ignore,
+
+ %% SIGTSTP is triggered by Ctrl+Z to pause a program. However
+ %% we can't handle SIGCONT, the signal used to resume the
+ %% program. Unfortunately, it makes a SIGTSTP handler less
+ %% useful here.
+ sigtstp => ignore
+ }).
+
+-define(SIGNAL_HANDLED_BY_ERLANG(Signal),
+ Signal =:= sigusr1 orelse
+ Signal =:= sigquit orelse
+ Signal =:= sigterm).
+
+-define(SERVER, erl_signal_server).
+
+setup() ->
+ case os:type() of
+ {unix, _} ->
+ case whereis(?SERVER) of
+ undefined ->
+ ok;
+ _ ->
+ case lists:member(?MODULE, gen_event:which_handlers(?SERVER)) of
+ true -> ok;
+ false -> gen_event:add_handler(?SERVER, ?MODULE, [])
+ end
+ end;
+ _ ->
+ ok
+ end.
+
+init(_Args) ->
+ maps:fold(
+ fun
+ (Signal, _, Ret) when ?SIGNAL_HANDLED_BY_ERLANG(Signal) -> Ret;
+ (Signal, default, ok) -> os:set_signal(Signal, default);
+ (Signal, ignore, ok) -> os:set_signal(Signal, ignore);
+ (Signal, _, ok) -> os:set_signal(Signal, handle)
+ end, ok, ?SIGNALS_HANDLED_BY_US),
+ {ok, #{}}.
+
+handle_event(Signal, State) when ?SIGNAL_HANDLED_BY_ERLANG(Signal) ->
+ {ok, State};
+handle_event(Signal, State) ->
+ case ?SIGNALS_HANDLED_BY_US of
+ %% The code below can be uncommented if we introduce a signal
+ %% which should stop RabbitMQ.
+ %
+ %#{Signal := stop} ->
+ % error_logger:info_msg(
+ % "~s received - shutting down~n",
+ % [string:uppercase(atom_to_list(Signal))]),
+ % ok = init:stop();
+ _ ->
+ error_logger:info_msg(
+ "~s received - unhandled signal~n",
+ [string:uppercase(atom_to_list(Signal))])
+ end,
+ {ok, State}.
+
+handle_info(_, State) ->
+ {ok, State}.
+
+handle_call(_, State) ->
+ {ok, ok, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Args, _State) ->
+ ok.
diff --git a/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl
new file mode 100644
index 0000000000..9fd117d9f3
--- /dev/null
+++ b/deps/rabbit/apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sup.erl
@@ -0,0 +1,22 @@
+-module(rabbit_prelaunch_sup).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ BootStateSup = #{id => bootstate,
+ start => {rabbit_boot_state_sup, start_link, []},
+ type => supervisor},
+ %% `rabbit_prelaunch` does not start a process, it only configures
+ %% the node.
+ Prelaunch = #{id => prelaunch,
+ start => {rabbit_prelaunch, run_prelaunch_first_phase, []},
+ restart => transient},
+ Procs = [BootStateSup, Prelaunch],
+ {ok, {#{strategy => one_for_one,
+ intensity => 1,
+ period => 5}, Procs}}.
diff --git a/deps/rabbit/docs/README-for-packages b/deps/rabbit/docs/README-for-packages
new file mode 100644
index 0000000000..f507a74054
--- /dev/null
+++ b/deps/rabbit/docs/README-for-packages
@@ -0,0 +1,30 @@
+This is rabbitmq-server, a message broker implementing AMQP 0-9-1, AMQP 1.0,
+STOMP and MQTT.
+
+Most of the documentation for RabbitMQ is provided on the RabbitMQ web
+site. You can see documentation for the current version at
+
+https://www.rabbitmq.com/documentation.html
+
+and for previous versions at
+
+https://www.rabbitmq.com/previous.html
+
+Man pages are installed with this package. Of particular interest are
+rabbitmqctl(8), rabbitmq-diagnostics(8), rabbitmq-queues(8).
+They interact with a running node. rabbitmq-plugins(8) is used to manage plugins.
+All of these should be run as the superuser. Learn more about
+CLI tools at
+
+https://www.rabbitmq.com/cli.html
+
+An example configuration file is provided in the same directory as
+this README. Copy it to /etc/rabbitmq/rabbitmq.conf to use it. The
+RabbitMQ server must be restarted after changing the configuration
+file. Learn more about configuration at
+
+https://www.rabbitmq.com/configure.html
+
+An example policy file for HA queues is provided in the same directory
+as this README. Copy and chmod +x it to
+/usr/local/sbin/set_rabbitmq_policy to use it with the Pacemaker OCF RA.
diff --git a/deps/rabbit/docs/README.md b/deps/rabbit/docs/README.md
new file mode 100644
index 0000000000..dba1983378
--- /dev/null
+++ b/deps/rabbit/docs/README.md
@@ -0,0 +1,35 @@
+# Manual Pages and Documentation Extras
+
+This directory contains [CLI tool](https://rabbitmq.com/cli.html) man page sources as well as a few documentation extras:
+
+ * An [annotated rabbitmq.conf example](./rabbitmq.conf.example) (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats))
+ * An [annotated advanced.config example](./advanced.config.example) (see [The advanced.config file](https://www.rabbitmq.com/configure.html#advanced-config-file))
+ * A [systemd unit file example](./rabbitmq-server.service.example)
+
+Please [see rabbitmq.com](https://rabbitmq.com/documentation.html) for documentation guides.
+
+
+## Classic Config File Format Example
+
+Feeling nostalgic and looking for the [classic configuration file example](https://github.com/rabbitmq/rabbitmq-server/blob/v3.7.x/docs/rabbitmq.config.example)?
+Now that's old school! Keep in mind that classic configuration file **should be considered deprecated**.
+Prefer `rabbitmq.conf` (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats))
+with an `advanced.config` to complement it as needed.
+
+
+## man Pages
+
+### Source Files
+
+This directory contains man pages that are are converted to HTML using `mandoc`:
+
+ gmake web-manpages
+
+The result is then copied to the [website repository](https://github.com/rabbitmq/rabbitmq-website/tree/live/site/man)
+
+### Contributions
+
+Since deployed man pages are generated, it is important to keep them in sync with the source.
+Accepting community contributions — which will always come as website pull requests —
+is fine but the person who merges them is responsible for backporting all changes
+to the source pages in this repo.
diff --git a/deps/rabbit/docs/advanced.config.example b/deps/rabbit/docs/advanced.config.example
new file mode 100644
index 0000000000..dc5ab8fc0c
--- /dev/null
+++ b/deps/rabbit/docs/advanced.config.example
@@ -0,0 +1,109 @@
+[
+
+
+ %% ----------------------------------------------------------------------------
+ %% Advanced Erlang Networking/Clustering Options.
+ %%
+ %% See https://www.rabbitmq.com/clustering.html for details
+ %% ----------------------------------------------------------------------------
+ %% Sets the net_kernel tick time.
+ %% Please see http://erlang.org/doc/man/kernel_app.html and
+ %% https://www.rabbitmq.com/nettick.html for further details.
+ %%
+ %% {kernel, [{net_ticktime, 60}]},
+ %% ----------------------------------------------------------------------------
+ %% RabbitMQ Shovel Plugin
+ %%
+ %% See https://www.rabbitmq.com/shovel.html for details
+ %% ----------------------------------------------------------------------------
+
+ {rabbitmq_shovel,
+ [{shovels,
+ [%% A named shovel worker.
+ %% {my_first_shovel,
+ %% [
+
+ %% List the source broker(s) from which to consume.
+ %%
+ %% {sources,
+ %% [%% URI(s) and pre-declarations for all source broker(s).
+ %% {brokers, ["amqp://user:password@host.domain/my_vhost"]},
+ %% {declarations, []}
+ %% ]},
+
+ %% List the destination broker(s) to publish to.
+ %% {destinations,
+ %% [%% A singular version of the 'brokers' element.
+ %% {broker, "amqp://"},
+ %% {declarations, []}
+ %% ]},
+
+ %% Name of the queue to shovel messages from.
+ %%
+ %% {queue, <<"your-queue-name-goes-here">>},
+
+ %% Optional prefetch count.
+ %%
+ %% {prefetch_count, 10},
+
+ %% when to acknowledge messages:
+ %% - no_ack: never (auto)
+ %% - on_publish: after each message is republished
+ %% - on_confirm: when the destination broker confirms receipt
+ %%
+ %% {ack_mode, on_confirm},
+
+ %% Overwrite fields of the outbound basic.publish.
+ %%
+ %% {publish_fields, [{exchange, <<"my_exchange">>},
+ %% {routing_key, <<"from_shovel">>}]},
+
+ %% Static list of basic.properties to set on re-publication.
+ %%
+ %% {publish_properties, [{delivery_mode, 2}]},
+
+ %% The number of seconds to wait before attempting to
+ %% reconnect in the event of a connection failure.
+ %%
+ %% {reconnect_delay, 2.5}
+
+ %% ]} %% End of my_first_shovel
+ ]}
+ %% Rather than specifying some values per-shovel, you can specify
+ %% them for all shovels here.
+ %%
+ %% {defaults, [{prefetch_count, 0},
+ %% {ack_mode, on_confirm},
+ %% {publish_fields, []},
+ %% {publish_properties, [{delivery_mode, 2}]},
+ %% {reconnect_delay, 2.5}]}
+ ]},
+
+ {rabbitmq_auth_backend_ldap, [
+ %%
+ %% Authorisation
+ %% =============
+ %%
+
+ %% The LDAP plugin can perform a variety of queries against your
+ %% LDAP server to determine questions of authorisation. See
+ %% https://www.rabbitmq.com/ldap.html#authorisation for more
+ %% information.
+
+ %% Set the query to use when determining vhost access
+ %%
+ %% {vhost_access_query, {in_group,
+ %% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+ %% Set the query to use when determining resource (e.g., queue) access
+ %%
+ %% {resource_access_query, {constant, true}},
+
+ %% Set queries to determine which tags a user has
+ %%
+ %% {tag_queries, []}
+ ]}
+].
+
+
+
diff --git a/deps/rabbit/docs/rabbitmq-diagnostics.8 b/deps/rabbit/docs/rabbitmq-diagnostics.8
new file mode 100644
index 0000000000..7a6d53d097
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-diagnostics.8
@@ -0,0 +1,725 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-DIAGNOSTICS 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-diagnostics
+.Nd RabbitMQ diagnostics, monitoring and health checks tools
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool that provides commands used for diagnostics, monitoring
+and health checks of RabbitMQ nodes.
+See the
+.Lk https://rabbitmq.com/documentation.html "RabbitMQ documentation guides"
+to learn more about RabbitMQ diagnostics, monitoring and health checks.
+
+.Nm
+allows the operator to inspect node and cluster state. A number of
+health checks are available to be used interactively and by monitoring tools.
+
+.Pp
+By default if it is not possible to connect to and authenticate with the target node
+(for example if it is stopped), the operation will fail.
+To learn more, see the
+.Lk https://rabbitmq.com/monitoring.html "RabbitMQ Monitoring guide"
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Pf rabbit@ Ar target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------
+.Pp
+Most commands provided by
+.Nm
+inspect node and cluster state or perform health checks.
+.Pp
+Commands that list topology entities (e.g. queues) use tab as column delimiter.
+These commands and their arguments are delegated to rabbitmqctl(8).
+.Pp
+Some commands (
+.Cm list_queues ,
+.Cm list_exchanges ,
+.Cm list_bindings
+and
+.Cm list_consumers )
+accept an optional
+.Ar vhost
+parameter.
+.Pp
+The
+.Cm list_queues ,
+.Cm list_exchanges
+and
+.Cm list_bindings
+commands accept an optional virtual host parameter for which to display
+results.
+The default value is
+.Qq / .
+.El
+.Ss Help
+.Bl -tag -width Ds
+.\" ------------------------------------
+.Bl -tag -width Ds
+.It Cm help Oo Fl l Oc Op Ar command_name
+.Pp
+Prints usage for all available commands.
+.Bl -tag -width Ds
+.It Fl l , Fl -list-commands
+List command usages only, without parameter explanation.
+.It Ar command_name
+Prints usage for the specified command.
+.El
+.\" ------------------------------------
+.It Cm version
+.Pp
+Displays CLI tools version
+.El
+.Ss Nodes
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm wait
+.Pp
+See
+.Cm wait
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Cluster
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm cluster_status
+.Pp
+See
+.Cm cluster_status
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Users
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_users
+.Pp
+See
+.Cm list_users
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Access Control
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_permissions Op Fl p Ar vhost
+.Pp
+See
+.Cm list_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_topic_permissions Op Fl p Ar vhost
+.Pp
+See
+.Cm list_topic_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_user_permissions Ar username
+.Pp
+See
+.Cm list_user_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_user_topic_permissions Ar username
+.Pp
+See
+.Cm list_user_topic_permissions
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_vhosts Op Ar vhostinfoitem ...
+.Pp
+See
+.Cm list_vhosts
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Monitoring, observability and health checks
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm alarms
+.Pp
+Lists resource alarms, if any, in the cluster.
+.Pp
+See
+.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics alarms
+.\" ------------------------------------
+.It Cm certificates
+.Pp
+Displays the node certificates for every listener on target node that is configured to use TLS.
+.Pp
+Example:
+.sp
+.Dl rabbitmq-diagnostics certificates
+.\" ------------------------------------
+.It Cm check_alarms
+.Pp
+Health check that fails (returns with a non-zero code) if there are alarms
+in effect on any of the cluster nodes.
+.Pp
+See
+.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_alarms
+.\" ------------------------------------
+.It Cm check_certificate_expiration Oo Fl -unit Ar time_unit Oc Op Fl -within Ar seconds
+.Pp
+Checks the expiration date on the certificates for every listener on target node that is configured to use TLS.
+Supported time units are:
+.Bl -bullet
+.It
+days
+.It
+weeks
+.It
+months
+.It
+years
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_certificate_expiration --unit weeks --within 6
+.\" ------------------------------------
+.It Cm check_local_alarms
+.Pp
+Health check that fails (returns with a non-zero code) if there are alarms
+in effect on the target node.
+.Pp
+See
+.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_local_alarms
+.\" ------------------------------------
+.It Cm check_port_connectivity
+.Pp
+Health check that fails (returns with a non-zero code) if any listener ports
+on the target node cannot accept a new TCP connection opened by
+.Nm
+.Pp
+The check only validates if a new TCP connection is accepted. It does not
+perform messaging protocol handshake or authenticate.
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_port_connectivity
+.\" ------------------------------------
+.It Cm check_port_listener Ar port
+.Pp
+Health check that fails (returns with a non-zero code) if the target node
+is not listening on the specified port (there is no listener that
+uses that port).
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_port_listener 5672
+.\" ------------------------------------
+.It Cm check_protocol_listener Ar protocol
+.Pp
+Health check that fails (returns with a non-zero code) if the target node
+does not have a listener for the specified protocol.
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_protocol_listener mqtt
+.\" ------------------------------------
+.It Cm check_running
+.Pp
+Health check that fails (returns with a non-zero code) if the RabbitMQ
+application is not running on the target node.
+.Pp
+If
+.Cm rabbitmqctl(8)
+was used to stop the application, this check will fail.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_running
+.\" ------------------------------------
+.It Cm check_virtual_hosts
+.Pp
+Health check that checks if all vhosts are running in the target node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics check_virtual_hosts --timeout 60
+.\" ------------------------------------
+.It Cm cipher_suites
+.Pp
+Lists cipher suites enabled by default. To list all available cipher suites, add the --all argument.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics cipher_suites --format openssl --all
+.\" ------------------------------------
+.It Cm command_line_arguments
+.Pp
+Displays target node's command-line arguments and flags as reported by the runtime.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics command_line_arguments -n rabbit@hostname
+.\" ------------------------------------
+.It Cm consume_event_stream Oo Fl -duration Ar seconds | Fl d Ar seconds Oc Oo Fl -pattern Ar pattern Oc Op Fl -timeout Ar milliseconds
+.Pp
+Streams internal events from a running node. Output is jq-compatible.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics consume_event_stream -n rabbit@hostname --duration 20 --pattern "queue_.*"
+.\" ------------------------------------
+.It Cm discover_peers
+.Pp
+Runs a peer discovery on the target node and prints the discovered nodes, if any.
+.Pp
+See
+.Lk https://rabbitmq.com/cluster-formation.html "RabbitMQ Cluster Formation guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics discover_peers --timeout 60
+.\" ------------------------------------
+.It Cm environment
+See
+.Cm environment
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm erlang_cookie_hash
+.Pp
+Outputs a hashed value of the shared secret used by the target node
+to authenticate CLI tools and peers. The value can be compared with the hash
+found in error messages of CLI tools.
+.Pp
+See
+.Lk https://rabbitmq.com/clustering.html#erlang-cookie "RabbitMQ Clustering guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics erlang_cookie_hash -q
+.\" ------------------------------------
+.It Cm erlang_version
+.Pp
+Reports target node's Erlang/OTP version.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics erlang_version -q
+.\" ------------------------------------
+.It Cm is_booting
+.Pp
+Reports if RabbitMQ application is currently booting (not booted/running or stopped) on
+the target node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics is_booting
+.\" ------------------------------------
+.It Cm is_running
+.Pp
+Reports if RabbitMQ application is fully booted and running (that is, not stopped) on
+the target node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics is_running
+.\" ------------------------------------
+.It Cm list_bindings Oo Fl p Ar vhost Oc Op Ar bindinginfoitem ...
+.Pp
+See
+.Cm list_bindings
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_channels Op Ar channelinfoitem ...
+.Pp
+See
+.Cm list_channels
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_ciphers
+.Pp
+See
+.Cm list_ciphers
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_connections Op Ar connectioninfoitem ...
+.Pp
+See
+.Cm list_connections
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_consumers Op Fl p Ar vhost
+.Pp
+See
+.Cm list_consumers
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_exchanges Oo Fl p Ar vhost Oc Op Ar exchangeinfoitem ...
+.Pp
+See
+.Cm list_exchanges
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_hashes
+.Pp
+See
+.Cm list_hashes
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_queues Oo Fl p Ar vhost Oc Oo Fl -offline | Fl -online | Fl -local Oc Op Ar queueinfoitem ...
+.Pp
+See
+.Cm list_queues
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_unresponsive_queues Oc Oo Fl -local Oc Oo Fl -queue-timeout Ar milliseconds Oc Oo Ar column ... Oc Op Fl -no-table-headers
+.Pp
+See
+.Cm list_unresponsive_queues
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm listeners
+.Pp
+Lists listeners (bound sockets) on this node. Use this to inspect
+what protocols and ports the node is listening on for client, CLI tool
+and peer connections.
+.Pp
+See
+.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics listeners
+.\" ------------------------------------
+.It Cm log_tail Fl -number Ar number | Fl N Ar number Op Fl -timeout Ar milliseconds
+.Pp
+Prints the last N lines of the log on the node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics log_tail -number 100
+.\" ------------------------------------
+.It Cm log_tail_stream Oo Fl -duration Ar seconds | Fl d Ar seconds Oc Op Fl -timeout Ar milliseconds
+.Pp
+Streams logs from a running node for a period of time
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics log_tail_stream --duration 60
+.\" ------------------------------------
+.It Cm maybe_stuck
+.Pp
+Periodically samples stack traces of all Erlang processes
+("lightweight threads") on the node. Reports the processes for which
+stack trace samples are identical.
+.Pp
+Identical samples may indicate that the process is not making any progress
+but is not necessarily an indication of a problem.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics maybe_stuck -q
+.\" ------------------------------------
+.It Cm memory_breakdown Op Fl -unit Ar memory_unit
+.Pp
+Displays node's memory usage by category.
+Supported memory units are:
+.Bl -bullet
+.It
+bytes
+.It
+megabytes
+.It
+gigabytes
+.It
+terabytes
+.El
+.Pp
+See
+.Lk https://rabbitmq.com/memory-use.html "RabbitMQ Memory Use guide"
+to learn more.
+.Pp
+Example:
+.sp
+.Dl rabbitmq-diagnostics memory_breakdown --unit gigabytes
+.\" ------------------------------------
+.It Cm observer Op Fl -interval Ar seconds
+.Pp
+Starts a CLI observer interface on the target node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics observer --interval 10
+.\" ------------------------------------
+.It Cm ping
+.Pp
+Most basic health check. Succeeds if target node (runtime) is running
+and
+.Nm
+can authenticate with it successfully.
+.\" ------------------------------------
+.It Cm report
+.Pp
+See
+.Cm report
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm runtime_thread_stats Op Fl -sample-interval Ar interval
+.Pp
+Performs sampling of runtime (kernel) threads' activity for
+.Ar interval
+seconds and reports it.
+.Pp
+For this command to work, Erlang/OTP on the target node must be compiled with
+microstate accounting support and have the runtime_tools package available.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics runtime_thread_stats --sample-interval 15
+.\" ------------------------------------
+.It Cm schema_info Oo Fl -no_table_headers Oc Oo Ar column ... Oc Op Fl -timeout Ar milliseconds
+.Pp
+See
+.Cm schema_info
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm server_version
+.Pp
+Reports target node's version.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics server_version -q
+.\" ------------------------------------
+.It Cm status
+.Pp
+See
+.Cm status
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm tls_versions
+.Pp
+Lists all TLS versions supported by the runtime on the target node.
+Note that RabbitMQ can be configured to only accept a subset of those
+versions, for example, SSLv3 is disabled by default.
+.Pp
+See
+.Lk https://rabbitmq.com/ssl.html "RabbitMQ TLS guide"
+to learn more.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics tls_versions -q
+.El
+.Ss Parameters
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_global_parameters
+.Pp
+See
+.Cm list_global_parameters
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_parameters Op Fl p Ar vhost
+.Pp
+See
+.Cm list_parameters
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Policies
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_operator_policies Op Fl p Ar vhost
+.Pp
+See
+.Cm list_operator_policies
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.It Cm list_policies Op Fl p Ar vhost
+.Pp
+See
+.Cm list_policies
+in
+.Xr rabbitmqctl 8
+.\" ------------------------------------
+.El
+.Ss Virtual hosts
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_vhost_limits Oo Fl -vhost Ar vhost Oc Oo Fl -global Oc Op Fl -no-table-headers
+.Pp
+See
+.Cm list_vhost_limits
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Node configuration
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm log_location Oo Fl -all | Fl a Oc Op Fl -timeout Ar milliseconds
+.Pp
+Shows log file location(s) on target node
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-diagnostics log_location -a
+.El
+.Ss Feature flags
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list_feature_flags Oo Ar column ... Oc Op Fl -timeout Ar milliseconds
+.Pp
+See
+.Cm list_feature_flags
+in
+.Xr rabbitmqctl 8
+.El
+.Ss Queues
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm quorum_status Ar queue Op Fl -vhost Ar vhost
+.Pp
+See
+.Cm quorum_status
+in
+.Xr rabbitmq-queues 8
+.It Cm check_if_node_is_mirror_sync_critical
+.Pp
+See
+.Cm check_if_node_is_mirror_sync_critical
+in
+.Xr rabbitmq-queues 8
+.It Cm check_if_node_is_quorum_critical
+.Pp
+See
+.Cm check_if_node_is_quorum_critical
+in
+.Xr rabbitmq-queues 8
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-echopid.8 b/deps/rabbit/docs/rabbitmq-echopid.8
new file mode 100644
index 0000000000..f51dab854c
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-echopid.8
@@ -0,0 +1,70 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-ECHOPID.BAT 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-echopid.bat
+.Nd returns the Windows process id of the Erlang runtime running RabbitMQ
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Ar sname
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+Running
+.Nm
+will attempt to discover and echo the process id (PID) of the Erlang
+runtime process
+.Pq Pa erl.exe
+that is hosting RabbitMQ.
+To allow
+.Pa erl.exe
+time to start up and load RabbitMQ, the script will wait for ten seconds
+before timing out if a suitable PID cannot be found.
+.Pp
+If a PID is discovered, the script will echo it to stdout
+before exiting with a
+.Ev ERRORLEVEL
+of 0.
+If no PID is discovered before the timeout, nothing is written to stdout
+and the script exits setting
+.Ev ERRORLEVEL
+to 1.
+.Pp
+Note that this script only exists on Windows due to the need to wait for
+.Pa erl.exe
+and possibly time-out.
+To obtain the PID on Unix set
+.Ev RABBITMQ_PID_FILE
+before starting
+.Xr rabbitmq-server 8
+and do not use
+.Fl detached .
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Ar sname
+The short-name form of the RabbitMQ node name.
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmqctl 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-env.conf.5 b/deps/rabbit/docs/rabbitmq-env.conf.5
new file mode 100644
index 0000000000..b1bb26281b
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-env.conf.5
@@ -0,0 +1,86 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-ENV.CONF 5
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-env.conf
+.Nd environment variables used by RabbitMQ server
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+contains environment variables that override the defaults built in to the
+RabbitMQ scripts and CLI tools.
+.Pp
+The file is interpreted by the system shell, and so should consist of a
+sequence of shell environment variable definitions.
+Normal shell syntax is permitted (since the file is sourced using the
+shell "." operator), including line comments starting with "#".
+.Pp
+In order of preference, the startup scripts get their values from the
+environment, from
+.Nm
+and finally from the built-in default values.
+For example, for the
+.Ev RABBITMQ_NODENAME
+setting,
+.Ev RABBITMQ_NODENAME
+from the environment is checked first.
+If it is absent or equal to the empty string, then
+.Ev NODENAME
+from
+.Nm
+is checked.
+If it is also absent or set equal to the empty string then the default
+value from the startup script is used.
+.Pp
+The variable names in
+.Nm
+are always equal to the environment variable names, with the
+.Qq RABBITMQ_
+prefix removed:
+.Ev RABBITMQ_NODE_PORT
+from the environment becomes
+.Ev NODE_PORT
+in
+.Nm .
+.\" ------------------------------------------------------------------
+.Sh EXAMPLES
+.\" ------------------------------------------------------------------
+Below is an example of a minimalistic
+.Nm
+file that overrides the default node name prefix from "rabbit" to
+"hare".
+.sp
+.Dl # I am a complete rabbitmq-env.conf file.
+.Dl # Comment lines start with a hash character.
+.Dl # This is a /bin/sh script file - use ordinary envt var syntax
+.Dl NODENAME=hare
+
+In the below
+.Nm
+file RabbitMQ configuration file location is changed to "/data/services/rabbitmq/rabbitmq.conf".
+.sp
+.Dl # I am a complete rabbitmq-env.conf file.
+.Dl # Comment lines start with a hash character.
+.Dl # This is a /bin/sh script file - use ordinary envt var syntax
+.Dl CONFIG_FILE=/data/services/rabbitmq/rabbitmq.conf
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmq-echopid 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmqctl 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-plugins.8 b/deps/rabbit/docs/rabbitmq-plugins.8
new file mode 100644
index 0000000000..4cec8cfded
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-plugins.8
@@ -0,0 +1,254 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-PLUGINS 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-plugins
+.Nd command line tool for managing RabbitMQ plugins
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool for managing RabbitMQ plugins.
+See the
+.Lk https://www.rabbitmq.com/plugins.html "RabbitMQ Plugins guide"
+for an overview of RabbitMQ plugins and how they are used.
+
+.Nm
+allows the operator to enable, disable and inspect plugins.
+It must be run by a user with write permissions to the RabbitMQ
+configuration directory.
+.Pp
+Plugins can depend on other plugins.
+.Nm
+resolves the dependencies and enables or disables all dependencies
+so that the user doesn't have to manage them explicitly.
+Plugins listed on the
+.Nm
+command line are marked as explicitly enabled; dependent plugins are
+marked as implicitly enabled.
+Implicitly enabled plugins are automatically disabled again when they
+are no longer required.
+.Pp
+The
+.Cm enable ,
+.Cm disable ,
+and
+.Cm set
+commands will update the plugins file and then attempt to connect to the
+broker and ensure it is running all enabled plugins.
+By default if it is not possible to connect to and authenticate with the target node
+(for example if it is stopped), the operation will fail.
+If
+.Nm
+is used on the same host as the target node,
+.Fl -offline
+can be specified to make
+.Nm
+resolve and update plugin state directly (without contacting the node).
+Such changes will only have an effect on next node start.
+To learn more, see the
+.Lk https://www.rabbitmq.com/plugins.html "RabbitMQ Plugins guide"
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Ar rabbit@target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm list Oo Fl Eemv Oc Op Ar pattern
+.Bl -tag -width Ds
+.It Fl E
+Show only explicitly enabled plugins.
+.It Fl e
+Show only explicitly or implicitly enabled plugins.
+.It Fl m
+Show only plugin names (minimal).
+.It Fl v
+Show all plugin details (verbose).
+.It Ar pattern
+Pattern to filter the plugin names by.
+.El
+.Pp
+Lists all plugins, their versions, dependencies and descriptions.
+Each plugin is prefixed with two status indicator characters inside [ ].
+The first indicator can be:
+.Bl -tag -width <space> -compact
+.It Sy <space>
+to indicate that the plugin is not enabled
+.It Sy E
+to indicate that it is explicitly enabled
+.It Sy e
+to indicate that it is implicitly enabled
+.It Sy \!
+to indicate that it is enabled but missing and thus not operational
+.El
+.Pp
+The second indicator can be:
+.Bl -tag -width <space> -compact
+.It Sy <space>
+to show that the plugin is not running
+.It Sy *
+to show that it is
+.El
+.Pp
+If the optional pattern is given, only plugins whose name matches
+.Ar pattern
+are shown.
+.Pp
+For example, this command lists all plugins, on one line each
+.sp
+.Dl rabbitmq-plugins list
+.Pp
+This command lists all plugins:
+.sp
+.Dl rabbitmq-plugins list -v
+.Pp
+This command lists all plugins whose name contains "management".
+.sp
+.Dl rabbitmq-plugins list -v management
+.Pp
+This command lists all implicitly or explicitly enabled RabbitMQ plugins.
+.sp
+.Dl rabbitmq-plugins list -e rabbit
+.\" ------------------------------------
+.It Cm enable Oo Fl -offline Oc Oo Fl -online Oc Ar plugin ...
+.Bl -tag -width Ds
+.It Fl -offline
+Modify node's enabled plugin state directly without contacting the node.
+.It Fl -online
+Treat a failure to connect to the running broker as fatal.
+.It Ar plugin
+One or more plugins to enable.
+.El
+.Pp
+Enables the specified plugins and all their dependencies.
+.Pp
+For example, this command enables the
+.Qq shovel
+and
+.Qq management
+plugins and all their dependencies:
+.sp
+.Dl rabbitmq\-plugins enable rabbitmq_shovel rabbitmq_management
+.\" ------------------------------------
+.It Cm disable Oo Fl -offline Oc Oo Fl -online Oc Ar plugin ...
+.Bl -tag -width Ds
+.It Fl -offline
+Modify node's enabled plugin state directly without contacting the node.
+.It Fl -online
+Treat a failure to connect to the running broker as fatal.
+.It Ar plugin
+One or more plugins to disable.
+.El
+.Pp
+Disables the specified plugins and all their dependencies.
+.Pp
+For example, this command disables
+.Qq rabbitmq_management
+and all plugins that depend on it:
+.sp
+.Dl rabbitmq-plugins disable rabbitmq_management
+.\" ------------------------------------
+.It Cm set Oo Fl -offline Oc Oo Fl -online Oc Op Ar plugin ...
+.Bl -tag -width Ds
+.It Fl -offline
+Modify node's enabled plugin state directly without contacting the node.
+.It Fl -online
+Treat a failure to connect to the running broker as fatal.
+.It Ar plugin
+Zero or more plugins to disable.
+.El
+.Pp
+Enables the specified plugins and all their dependencies.
+Unlike
+.Cm enable ,
+this command ignores and overwrites any existing enabled plugins.
+.Cm set
+with no plugin arguments is a legal command meaning "disable all plugins".
+.Pp
+For example, this command enables the
+.Qq management
+plugin and its dependencies and disables everything else:
+.sp
+.Dl rabbitmq-plugins set rabbitmq_management
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-queues.8 b/deps/rabbit/docs/rabbitmq-queues.8
new file mode 100644
index 0000000000..a0bc41a19c
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-queues.8
@@ -0,0 +1,202 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-QUEUES 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-queues
+.Nd RabbitMQ queue management tools
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool that provides commands used to manage queues,
+mainly member handling for quorum queues.
+See the
+.Lk https://www.rabbitmq.com/quorum-queues.html "RabbitMQ quorum queues guide"
+and
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ classic mirrored queues guide"
+to learn more about queue types in RabbitMQ.
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Pf rabbit@ Ar target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm help
+.Pp
+Displays general help and commands supported by
+.Nm .
+.El
+.Ss Cluster
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm grow Ar node Ar selector Fl -vhost-pattern Ar pattern Fl -queue-pattern Ar pattern Fl -errors-only
+.Pp
+Adds a new replica on the given node for all or a half of matching quorum queues.
+.Pp
+Supported
+.Ar selector
+values are:
+.Bl -tag -width Ds
+.It Dv Sy all
+Selects all quorum queues
+.It Dv Sy even
+Selects quorum queues with an even number of replicas
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues grow Qo rabbit@newhost Qc Qo all Qc --vhost-pattern Qo a-vhost Qc --queue-pattern Qo .* Qc
+.\" ------------------------------------
+.It Cm rebalance Ar type Fl -vhost-pattern Ar pattern Fl -queue-pattern Ar pattern
+.Pp
+Rebalances queue master replicas across cluster nodes.
+.Pp
+Supported
+.Ar type
+values are:
+.Bl -tag -width Ds
+.It Dv Sy all
+All queue types
+.It Dv Sy quorum
+Only quorum queues
+.It Dv Sy classic
+Only classic queues
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues rebalance Qo all Qc --vhost-pattern Qo a-vhost Qc --queue-pattern Qo .* Qc
+.\" ------------------------------------
+.It Cm shrink Ar node
+.Pp
+Shrinks quorum queue clusters by removing any members (replicas) on the given node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues shrink Qo rabbit@decomissioned-node Qc
+.\" ------------------------------------
+.El
+.Ss Replication
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm add_member Ar queue Ar node Fl -vhost Ar virtual-host
+.Pp
+Adds a quorum queue member (replica) on the given node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues add_member --vhost Qo a-vhost Qc Qo a-queue Qc Qo rabbit@new-node Qc
+.\" ------------------------------------
+.It Cm delete_member Ar queue Ar node Fl -vhost Ar virtual-host
+.Pp
+Removes a quorum queue member (replica) on the given node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues delete_member --vhost Qo a-vhost Qc Qo a-queue Qc Qo rabbit@decomissioned-node Qc
+.\" ------------------------------------
+.El
+.Ss Queues
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm quorum_status Ar queue Fl -vhost Ar virtual-host
+.Pp
+Displays quorum status of a quorum queue.
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues quorum_status --vhost Qo a-vhost Qc Qo a-queue Qc
+.It Cm check_if_node_is_mirror_sync_critical
+.Pp
+Health check that exits with a non-zero code if there are classic mirrored queues without online synchronised mirrors (queues that would potentially lose data if the target node is shut down).
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues check_if_node_is_mirror_sync_critical
+.It Cm check_if_node_is_quorum_critical
+.Pp
+Health check that exits with a non-zero code if there are queues with minimum online quorum (queues that would lose their quorum if the target node is shut down).
+.Pp
+Example:
+.Sp
+.Dl rabbitmq-queues check_if_node_is_quorum_critical
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-server.8 b/deps/rabbit/docs/rabbitmq-server.8
new file mode 100644
index 0000000000..6a5e411cb3
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-server.8
@@ -0,0 +1,98 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-SERVER 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-server
+.Nd starts a RabbitMQ node
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl detached
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+Running
+.Nm
+starts a RabbitMQ node in the foreground. The node will display a startup
+banner and report when startup is complete.
+To shut down the server, use service management tools or
+.Xr rabbitmqctl 8 .
+.\" ------------------------------------------------------------------
+.Sh ENVIRONMENT
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Ev RABBITMQ_CONFIG_FILE
+Defaults to
+.Pa /etc/rabbitmq/rabbitmq.conf .
+Node configuration file path.
+To learn more, see the
+.Lk https://www.rabbitmq.com/configure.html "RabbitMQ Configuration guide"
+.It Ev RABBITMQ_MNESIA_BASE
+Defaults to
+.Pa /var/lib/rabbitmq/mnesia .
+Node data directory will be located (or created) in this directory.
+To learn more, see the
+.Lk https://www.rabbitmq.com/relocate.html "RabbitMQ File and Directory guide"
+.It Ev RABBITMQ_LOG_BASE
+Defaults to
+.Pa /var/log/rabbitmq .
+Log files generated by the server will be placed in this directory.
+To learn more, see the
+.Lk https://www.rabbitmq.com/logging.html "RabbitMQ Logging guide"
+.It Ev RABBITMQ_NODENAME
+Defaults to
+.Qq rabbit@ .
+followed by the computed hostname.
+Can be used to run multiple nodes on the same host.
+Every node in a cluster must have a unique
+.Ev RABBITMQ_NODENAME
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Ev RABBITMQ_NODE_IP_ADDRESS
+By default RabbitMQ will bind to all IPv6 and IPv4 interfaces available.
+This variable limits the node to one network interface or address
+family.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.It Ev RABBITMQ_NODE_PORT
+AMQP 0-9-1 and AMQP 1.0 port. Defaults to 5672.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl detached
+Start the server process in the background.
+Note that this will cause the pid not to be written to the pid file.
+.Pp
+For example, runs RabbitMQ AMQP server in the background:
+.sp
+.Dl rabbitmq-server -detached
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-server.service.example b/deps/rabbit/docs/rabbitmq-server.service.example
new file mode 100644
index 0000000000..dec70eb635
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-server.service.example
@@ -0,0 +1,27 @@
+# systemd unit example
+[Unit]
+Description=RabbitMQ broker
+After=network.target epmd@0.0.0.0.socket
+Wants=network.target epmd@0.0.0.0.socket
+
+[Service]
+Type=notify
+User=rabbitmq
+Group=rabbitmq
+NotifyAccess=all
+TimeoutStartSec=3600
+# Note:
+# You *may* wish to add the following to automatically restart RabbitMQ
+# in the event of a failure. systemd service restarts are not a
+# replacement for service monitoring. Please see
+# https://www.rabbitmq.com/monitoring.html
+#
+# Restart=on-failure
+# RestartSec=10
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/lib/rabbitmq/bin/rabbitmq-server
+ExecStop=/usr/lib/rabbitmq/bin/rabbitmqctl stop
+ExecStop=/bin/sh -c "while ps -p $MAINPID >/dev/null 2>&1; do sleep 1; done"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/deps/rabbit/docs/rabbitmq-service.8 b/deps/rabbit/docs/rabbitmq-service.8
new file mode 100644
index 0000000000..154388fcfc
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-service.8
@@ -0,0 +1,152 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-SERVICE.BAT 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-service.bat
+.Nd tool for managing RabbitMQ Windows service
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Ar command
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+Running
+.Nm
+allows the RabbitMQ broker to be run as a service in
+Windows® environments.
+The RabbitMQ broker service can be started and stopped using the
+Windows® services panel.
+.Pp
+By default the service will run in the authentication context of the
+local system account.
+It is therefore necessary to synchronise Erlang cookies between the
+local system account (typically
+.Pa C:\(rsWindows\(rs.erlang.cookie
+and the account that will be used to run
+.Xr rabbitmqctl 8 .
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Cm help
+Display usage information.
+.It Cm install
+Install the service.
+The service will not be started.
+Subsequent invocations will update the service parameters if relevant
+environment variables were modified.
+.It Cm remove
+Remove the service.
+If the service is running then it will automatically be stopped before
+being removed.
+No files will be deleted as a consequence and
+.Xr rabbitmq-server 8
+will remain operable.
+.It Cm start
+Start the service.
+The service must have been correctly installed beforehand.
+.It Cm stop
+Stop the service.
+The service must be running for this command to have any effect.
+.It Cm disable
+Disable the service.
+This is the equivalent of setting the startup type to
+.Sy Disabled
+using the service control panel.
+.It Cm enable
+Enable the service.
+This is the equivalent of setting the startup type to
+.Sy Automatic
+using the service control panel.
+.El
+.\" ------------------------------------------------------------------
+.Sh ENVIRONMENT
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Ev RABBITMQ_SERVICENAME
+Defaults to RabbitMQ.
+.It Ev RABBITMQ_BASE
+Note: Windows only. Defaults to the application data directory of the
+current user. This is the location of log and database directories.
+.It Ev RABBITMQ_NODENAME
+Defaults to
+.Qq rabbit@ .
+followed by the computed hostname.
+Can be used to run multiple nodes on the same host.
+Every node in a cluster must have a unique
+.Ev RABBITMQ_NODENAME
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Ev RABBITMQ_NODE_IP_ADDRESS
+By default RabbitMQ will bind to all IPv6 and IPv4 interfaces available.
+This variable limits the node to one network interface or address
+family.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.It Ev RABBITMQ_NODE_PORT
+AMQP 0-9-1 and AMQP 1.0 port. Defaults to 5672.
+To learn more, see the
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.It Ev ERLANG_SERVICE_MANAGER_PATH
+Defaults to
+.Pa C:\(rsProgram\ Files\(rserl{version}\(rserts-{version}\(rsbin
+(or
+.Pa C:\(rsProgram\ Files\ (x86)\(rserl{version}\(rserts-{version}\(rsbin
+for 64-bit environments).
+This is the installation location of the Erlang service manager.
+.It Ev RABBITMQ_CONSOLE_LOG
+Set this variable to
+.Sy new or
+.Sy reuse
+to have the console output from the server redirected to a file named
+.Pa SERVICENAME.debug
+in the application data directory of the user that installed the
+service.
+Under Vista this will be
+.Pa C:\(rsUsers\(rsAppData\(rsusername\(rsSERVICENAME .
+Under previous versions of Windows this will be
+.Pa C:\(rsDocuments and Settings\(rsusername\(rsApplication Data\(rsSERVICENAME .
+If
+.Ev RABBITMQ_CONSOLE_LOG
+is set to
+.Sy new
+then a new file will be created each time the service starts.
+If
+.Ev RABBITMQ_CONSOLE_LOG
+is set to
+.Sy reuse
+then the file will be overwritten each time the service starts.
+The default behaviour when
+.Ev RABBITMQ_CONSOLE_LOG
+is not set or set to a value other than
+.Sy new
+or
+.Sy reuse
+is to discard the server output.
+.El
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq-upgrade.8 b/deps/rabbit/docs/rabbitmq-upgrade.8
new file mode 100644
index 0000000000..4fe7283f13
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq-upgrade.8
@@ -0,0 +1,108 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQ-UPGRADE 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmq-upgrade
+.Nd RabbitMQ installation upgrade tools
+.\" ------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------
+.Nm
+is a command line tool that provides commands used during the upgrade of RabbitMQ nodes.
+See the
+.Lk https://www.rabbitmq.com/upgrade.html "RabbitMQ upgrade guide"
+to learn more about RabbitMQ installation upgrades.
+.
+.\" ------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Pf rabbit@ Ar target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------
+.Bl -tag -width Ds
+.\" ------------------------------------
+.It Cm help
+.Pp
+Displays general help and commands supported by
+.Nm .
+.\" ------------------------------------
+.It Cm post_upgrade
+.Pp
+Runs post-upgrade tasks. In the current version, it performs the rebalance of mirrored and quorum queues across all nodes in the cluster.
+.\" ------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------
+.Xr rabbitmqctl 8 ,
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/rabbitmq.conf.example b/deps/rabbit/docs/rabbitmq.conf.example
new file mode 100644
index 0000000000..17e023e62c
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmq.conf.example
@@ -0,0 +1,1002 @@
+# ======================================
+# RabbitMQ broker section
+# ======================================
+
+## Related doc guide: https://rabbitmq.com/configure.html. See
+## https://rabbitmq.com/documentation.html for documentation ToC.
+
+## Networking
+## ====================
+##
+## Related doc guide: https://rabbitmq.com/networking.html.
+##
+## By default, RabbitMQ will listen on all interfaces, using
+## the standard (reserved) AMQP 0-9-1 and 1.0 port.
+##
+# listeners.tcp.default = 5672
+
+
+## To listen on a specific interface, provide an IP address with port.
+## For example, to listen only on localhost for both IPv4 and IPv6:
+##
+# IPv4
+# listeners.tcp.local = 127.0.0.1:5672
+# IPv6
+# listeners.tcp.local_v6 = ::1:5672
+
+## You can define multiple listeners using listener names
+# listeners.tcp.other_port = 5673
+# listeners.tcp.other_ip = 10.10.10.10:5672
+
+
+## TLS listeners are configured in the same fashion as TCP listeners,
+## including the option to control the choice of interface.
+##
+# listeners.ssl.default = 5671
+
+## It is possible to disable regular TCP (non-TLS) listeners. Clients
+## not configured to use TLS and the correct TLS-enabled port won't be able
+## to connect to this node.
+# listeners.tcp = none
+
+## Number of Erlang processes that will accept connections for the TCP
+## and TLS listeners.
+##
+# num_acceptors.tcp = 10
+# num_acceptors.ssl = 10
+
+## Socket writer will force GC every so many bytes transferred.
+## Default is 1 GiB (`1000000000`). Set to 'off' to disable.
+##
+# socket_writer.gc_threshold = 1000000000
+#
+## To disable:
+# socket_writer.gc_threshold = off
+
+## Maximum amount of time allowed for the AMQP 0-9-1 and AMQP 1.0 handshake
+## (performed after socket connection and TLS handshake) to complete, in milliseconds.
+##
+# handshake_timeout = 10000
+
+## Set to 'true' to perform reverse DNS lookups when accepting a
+## connection. rabbitmqctl and management UI will then display hostnames
+## instead of IP addresses. Default value is `false`.
+##
+# reverse_dns_lookups = false
+
+##
+## Security, Access Control
+## ==============
+##
+
+## Related doc guide: https://rabbitmq.com/access-control.html.
+
+## The default "guest" user is only permitted to access the server
+## via a loopback interface (e.g. localhost).
+## {loopback_users, [<<"guest">>]},
+##
+# loopback_users.guest = true
+
+## Uncomment the following line if you want to allow access to the
+## guest user from anywhere on the network.
+# loopback_users.guest = false
+
+## TLS configuration.
+##
+## Related doc guide: https://rabbitmq.com/ssl.html.
+##
+# ssl_options.verify = verify_peer
+# ssl_options.fail_if_no_peer_cert = false
+# ssl_options.cacertfile = /path/to/cacert.pem
+# ssl_options.certfile = /path/to/cert.pem
+# ssl_options.keyfile = /path/to/key.pem
+#
+# ssl_options.honor_cipher_order = true
+# ssl_options.honor_ecc_order = true
+
+# ssl_options.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+# ssl_options.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+# ssl_options.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+# ssl_options.ciphers.4 = ECDHE-RSA-AES256-SHA384
+# ssl_options.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+# ssl_options.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+# ssl_options.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+# ssl_options.ciphers.8 = ECDH-RSA-AES256-SHA384
+# ssl_options.ciphers.9 = DHE-RSA-AES256-GCM-SHA384
+# ssl_options.ciphers.10 = DHE-DSS-AES256-GCM-SHA384
+# ssl_options.ciphers.11 = DHE-RSA-AES256-SHA256
+# ssl_options.ciphers.12 = DHE-DSS-AES256-SHA256
+# ssl_options.ciphers.13 = ECDHE-ECDSA-AES128-GCM-SHA256
+# ssl_options.ciphers.14 = ECDHE-RSA-AES128-GCM-SHA256
+# ssl_options.ciphers.15 = ECDHE-ECDSA-AES128-SHA256
+# ssl_options.ciphers.16 = ECDHE-RSA-AES128-SHA256
+# ssl_options.ciphers.17 = ECDH-ECDSA-AES128-GCM-SHA256
+# ssl_options.ciphers.18 = ECDH-RSA-AES128-GCM-SHA256
+# ssl_options.ciphers.19 = ECDH-ECDSA-AES128-SHA256
+# ssl_options.ciphers.20 = ECDH-RSA-AES128-SHA256
+# ssl_options.ciphers.21 = DHE-RSA-AES128-GCM-SHA256
+# ssl_options.ciphers.22 = DHE-DSS-AES128-GCM-SHA256
+# ssl_options.ciphers.23 = DHE-RSA-AES128-SHA256
+# ssl_options.ciphers.24 = DHE-DSS-AES128-SHA256
+# ssl_options.ciphers.25 = ECDHE-ECDSA-AES256-SHA
+# ssl_options.ciphers.26 = ECDHE-RSA-AES256-SHA
+# ssl_options.ciphers.27 = DHE-RSA-AES256-SHA
+# ssl_options.ciphers.28 = DHE-DSS-AES256-SHA
+# ssl_options.ciphers.29 = ECDH-ECDSA-AES256-SHA
+# ssl_options.ciphers.30 = ECDH-RSA-AES256-SHA
+# ssl_options.ciphers.31 = ECDHE-ECDSA-AES128-SHA
+# ssl_options.ciphers.32 = ECDHE-RSA-AES128-SHA
+# ssl_options.ciphers.33 = DHE-RSA-AES128-SHA
+# ssl_options.ciphers.34 = DHE-DSS-AES128-SHA
+# ssl_options.ciphers.35 = ECDH-ECDSA-AES128-SHA
+# ssl_options.ciphers.36 = ECDH-RSA-AES128-SHA
+
+## Select an authentication/authorisation backend to use.
+##
+## Alternative backends are provided by plugins, such as rabbitmq-auth-backend-ldap.
+##
+## NB: These settings require certain plugins to be enabled.
+##
+## Related doc guides:
+##
+## * https://rabbitmq.com/plugins.html
+## * https://rabbitmq.com/access-control.html
+##
+
+# auth_backends.1 = rabbit_auth_backend_internal
+
+## uses separate backends for authentication and authorisation,
+## see below.
+# auth_backends.1.authn = rabbit_auth_backend_ldap
+# auth_backends.1.authz = rabbit_auth_backend_internal
+
+## The rabbitmq_auth_backend_ldap plugin allows the broker to
+## perform authentication and authorisation by deferring to an
+## external LDAP server.
+##
+## Relevant doc guides:
+##
+## * https://rabbitmq.com/ldap.html
+## * https://rabbitmq.com/access-control.html
+##
+## uses LDAP for both authentication and authorisation
+# auth_backends.1 = rabbit_auth_backend_ldap
+
+## uses HTTP service for both authentication and
+## authorisation
+# auth_backends.1 = rabbit_auth_backend_http
+
+## uses two backends in a chain: HTTP first, then internal
+# auth_backends.1 = rabbit_auth_backend_http
+# auth_backends.2 = rabbit_auth_backend_internal
+
+## Authentication
+## The built-in mechanisms are 'PLAIN',
+## 'AMQPLAIN', and 'EXTERNAL' Additional mechanisms can be added via
+## plugins.
+##
+## Related doc guide: https://rabbitmq.com/authentication.html.
+##
+# auth_mechanisms.1 = PLAIN
+# auth_mechanisms.2 = AMQPLAIN
+
+## The rabbitmq-auth-mechanism-ssl plugin makes it possible to
+## authenticate a user based on the client's x509 (TLS) certificate.
+## Related doc guide: https://rabbitmq.com/authentication.html.
+##
+## To use auth-mechanism-ssl, the EXTERNAL mechanism should
+## be enabled:
+##
+# auth_mechanisms.1 = PLAIN
+# auth_mechanisms.2 = AMQPLAIN
+# auth_mechanisms.3 = EXTERNAL
+
+## To force x509 certificate-based authentication on all clients,
+## exclude all other mechanisms (note: this will disable password-based
+## authentication even for the management UI!):
+##
+# auth_mechanisms.1 = EXTERNAL
+
+## This pertains to both the rabbitmq-auth-mechanism-ssl plugin and
+## STOMP ssl_cert_login configurations. See the RabbitMQ STOMP plugin
+## configuration section later in this file and the README in
+## https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
+## details.
+##
+## To use the TLS cert's CN instead of its DN as the username
+##
+# ssl_cert_login_from = common_name
+
+## TLS handshake timeout, in milliseconds.
+##
+# ssl_handshake_timeout = 5000
+
+
+## Cluster name
+##
+# cluster_name = dev3.eng.megacorp.local
+
+## Password hashing implementation. Will only affect newly
+## created users. To recalculate hash for an existing user
+## it's necessary to update her password.
+##
+## To use SHA-512, set to rabbit_password_hashing_sha512.
+##
+# password_hashing_module = rabbit_password_hashing_sha256
+
+## When importing definitions exported from versions earlier
+## than 3.6.0, it is possible to go back to MD5 (only do this
+## as a temporary measure!) by setting this to rabbit_password_hashing_md5.
+##
+# password_hashing_module = rabbit_password_hashing_md5
+
+##
+## Default User / VHost
+## ====================
+##
+
+## On first start RabbitMQ will create a vhost and a user. These
+## config items control what gets created.
+## Relevant doc guide: https://rabbitmq.com/access-control.html
+##
+# default_vhost = /
+# default_user = guest
+# default_pass = guest
+
+# default_permissions.configure = .*
+# default_permissions.read = .*
+# default_permissions.write = .*
+
+## Tags for default user
+##
+## For more details about tags, see the documentation for the
+## Management Plugin at https://rabbitmq.com/management.html.
+##
+# default_user_tags.administrator = true
+
+## Define other tags like this:
+# default_user_tags.management = true
+# default_user_tags.custom_tag = true
+
+##
+## Additional network and protocol related configuration
+## =====================================================
+##
+
+## Set the default AMQP 0-9-1 heartbeat interval (in seconds).
+## Related doc guides:
+##
+## * https://rabbitmq.com/heartbeats.html
+## * https://rabbitmq.com/networking.html
+##
+# heartbeat = 60
+
+## Set the max permissible size of an AMQP frame (in bytes).
+##
+# frame_max = 131072
+
+## Set the max frame size the server will accept before connection
+## tuning occurs
+##
+# initial_frame_max = 4096
+
+## Set the max permissible number of channels per connection.
+## 0 means "no limit".
+##
+# channel_max = 128
+
+## Customising TCP Listener (Socket) Configuration.
+##
+## Related doc guides:
+##
+## * https://rabbitmq.com/networking.html
+## * https://www.erlang.org/doc/man/inet.html#setopts-2
+##
+
+# tcp_listen_options.backlog = 128
+# tcp_listen_options.nodelay = true
+# tcp_listen_options.exit_on_close = false
+#
+# tcp_listen_options.keepalive = true
+# tcp_listen_options.send_timeout = 15000
+#
+# tcp_listen_options.buffer = 196608
+# tcp_listen_options.sndbuf = 196608
+# tcp_listen_options.recbuf = 196608
+
+##
+## Resource Limits & Flow Control
+## ==============================
+##
+## Related doc guide: https://rabbitmq.com/memory.html.
+
+## Memory-based Flow Control threshold.
+##
+# vm_memory_high_watermark.relative = 0.4
+
+## Alternatively, we can set a limit (in bytes) of RAM used by the node.
+##
+# vm_memory_high_watermark.absolute = 1073741824
+
+## Or you can set absolute value using memory units (with RabbitMQ 3.6.0+).
+## Absolute watermark will be ignored if relative is defined!
+##
+# vm_memory_high_watermark.absolute = 2GB
+##
+## Supported unit symbols:
+##
+## k, kiB: kibibytes (2^10 - 1,024 bytes)
+## M, MiB: mebibytes (2^20 - 1,048,576 bytes)
+## G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
+## kB: kilobytes (10^3 - 1,000 bytes)
+## MB: megabytes (10^6 - 1,000,000 bytes)
+## GB: gigabytes (10^9 - 1,000,000,000 bytes)
+
+
+
+## Fraction of the high watermark limit at which queues start to
+## page message out to disc in order to free up memory.
+## For example, when vm_memory_high_watermark is set to 0.4 and this value is set to 0.5,
+## paging can begin as early as when 20% of total available RAM is used by the node.
+##
+## Values greater than 1.0 can be dangerous and should be used carefully.
+##
+## One alternative to this is to use durable queues and publish messages
+## as persistent (delivery mode = 2). With this combination queues will
+## move messages to disk much more rapidly.
+##
+## Another alternative is to configure queues to page all messages (both
+## persistent and transient) to disk as quickly
+## as possible, see https://rabbitmq.com/lazy-queues.html.
+##
+# vm_memory_high_watermark_paging_ratio = 0.5
+
+## Selects Erlang VM memory consumption calculation strategy. Can be `allocated`, `rss` or `legacy` (aliased as `erlang`),
+## Introduced in 3.6.11. `rss` is the default as of 3.6.12.
+## See https://github.com/rabbitmq/rabbitmq-server/issues/1223 and rabbitmq/rabbitmq-common#224 for background.
+# vm_memory_calculation_strategy = rss
+
+## Interval (in milliseconds) at which we perform the check of the memory
+## levels against the watermarks.
+##
+# memory_monitor_interval = 2500
+
+## The total memory available can be calculated from the OS resources
+## - default option - or provided as a configuration parameter.
+# total_memory_available_override_value = 2GB
+
+## Set disk free limit (in bytes). Once free disk space reaches this
+## lower bound, a disk alarm will be set - see the documentation
+## listed above for more details.
+##
+## Absolute watermark will be ignored if relative is defined!
+# disk_free_limit.absolute = 50000
+
+## Or you can set it using memory units (same as in vm_memory_high_watermark)
+## with RabbitMQ 3.6.0+.
+# disk_free_limit.absolute = 500KB
+# disk_free_limit.absolute = 50mb
+# disk_free_limit.absolute = 5GB
+
+## Alternatively, we can set a limit relative to total available RAM.
+##
+## Values lower than 1.0 can be dangerous and should be used carefully.
+# disk_free_limit.relative = 2.0
+
+##
+## Clustering
+## =====================
+##
+# cluster_partition_handling = ignore
+
+## pause_if_all_down strategy require additional configuration
+# cluster_partition_handling = pause_if_all_down
+
+## Recover strategy. Can be either 'autoheal' or 'ignore'
+# cluster_partition_handling.pause_if_all_down.recover = ignore
+
+## Node names to check
+# cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@localhost
+# cluster_partition_handling.pause_if_all_down.nodes.2 = hare@localhost
+
+## Mirror sync batch size, in messages. Increasing this will speed
+## up syncing but total batch size in bytes must not exceed 2 GiB.
+## Available in RabbitMQ 3.6.0 or later.
+##
+# mirroring_sync_batch_size = 4096
+
+## Make clustering happen *automatically* at startup. Only applied
+## to nodes that have just been reset or started for the first time.
+##
+## Relevant doc guide: https://rabbitmq.com//cluster-formation.html
+##
+
+# cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+#
+# cluster_formation.classic_config.nodes.1 = rabbit1@hostname
+# cluster_formation.classic_config.nodes.2 = rabbit2@hostname
+# cluster_formation.classic_config.nodes.3 = rabbit3@hostname
+# cluster_formation.classic_config.nodes.4 = rabbit4@hostname
+
+## DNS-based peer discovery. This backend will list A records
+## of the configured hostname and perform reverse lookups for
+## the addresses returned.
+
+# cluster_formation.peer_discovery_backend = rabbit_peer_discovery_dns
+# cluster_formation.dns.hostname = discovery.eng.example.local
+
+## This node's type can be configured. If you are not sure
+## what node type to use, always use 'disc'.
+# cluster_formation.node_type = disc
+
+## Interval (in milliseconds) at which we send keepalive messages
+## to other cluster members. Note that this is not the same thing
+## as net_ticktime; missed keepalive messages will not cause nodes
+## to be considered down.
+##
+# cluster_keepalive_interval = 10000
+
+##
+## Statistics Collection
+## =====================
+##
+
+## Statistics collection interval (in milliseconds). Increasing
+## this will reduce the load on management database.
+##
+# collect_statistics_interval = 5000
+
+## Fine vs. coarse statistics
+#
+# This value is no longer meant to be configured directly.
+#
+# See https://www.rabbitmq.com/management.html#fine-stats.
+
+##
+## Ra Settings
+## =====================
+##
+# raft.segment_max_entries = 65536
+# raft.wal_max_size_bytes = 1048576
+# raft.wal_max_batch_size = 4096
+# raft.snapshot_chunk_size = 1000000
+
+##
+## Misc/Advanced Options
+## =====================
+##
+## NB: Change these only if you understand what you are doing!
+##
+
+## Timeout used when waiting for Mnesia tables in a cluster to
+## become available.
+##
+# mnesia_table_loading_retry_timeout = 30000
+
+## Retries when waiting for Mnesia tables in the cluster startup. Note that
+## this setting is not applied to Mnesia upgrades or node deletions.
+##
+# mnesia_table_loading_retry_limit = 10
+
+## Size in bytes below which to embed messages in the queue index.
+## Related doc guide: https://rabbitmq.com/persistence-conf.html
+##
+# queue_index_embed_msgs_below = 4096
+
+## You can also set this size in memory units
+##
+# queue_index_embed_msgs_below = 4kb
+
+## Whether or not to enable background periodic forced GC runs for all
+## Erlang processes on the node in "waiting" state.
+##
+## Disabling background GC may reduce latency for client operations,
+## keeping it enabled may reduce median RAM usage by the binary heap
+## (see https://www.erlang-solutions.com/blog/erlang-garbage-collector.html).
+##
+## Before trying this option, please take a look at the memory
+## breakdown (https://www.rabbitmq.com/memory-use.html).
+##
+# background_gc_enabled = false
+
+## Target (desired) interval (in milliseconds) at which we run background GC.
+## The actual interval will vary depending on how long it takes to execute
+## the operation (can be higher than this interval). Values less than
+## 30000 milliseconds are not recommended.
+##
+# background_gc_target_interval = 60000
+
+## Whether or not to enable proxy protocol support.
+## Once enabled, clients cannot directly connect to the broker
+## anymore. They must connect through a load balancer that sends the
+## proxy protocol header to the broker at connection time.
+## This setting applies only to AMQP clients, other protocols
+## like MQTT or STOMP have their own setting to enable proxy protocol.
+## See the plugins documentation for more information.
+##
+# proxy_protocol = false
+
+## Overriden product name and version.
+## They are set to "RabbitMQ" and the release version by default.
+# product.name = RabbitMQ
+# product.version = 1.2.3
+
+## "Message of the day" file.
+## Its content is used to expand the logged and printed banners.
+## Default to /etc/rabbitmq/motd on Unix, %APPDATA%\RabbitMQ\motd.txt
+## on Windows.
+# motd_file = /etc/rabbitmq/motd
+
+## ----------------------------------------------------------------------------
+## Advanced Erlang Networking/Clustering Options.
+##
+## Related doc guide: https://rabbitmq.com/clustering.html
+## ----------------------------------------------------------------------------
+
+# ======================================
+# Kernel section
+# ======================================
+
+## Timeout used to detect peer unavailability, including CLI tools.
+## Related doc guide: https://www.rabbitmq.com/nettick.html.
+##
+# net_ticktime = 60
+
+## Inter-node communication port range.
+## The parameters inet_dist_listen_min and inet_dist_listen_max
+## can be configured in the classic config format only.
+## Related doc guide: https://www.rabbitmq.com/networking.html#epmd-inet-dist-port-range.
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ Management Plugin
+##
+## Related doc guide: https://rabbitmq.com/management.html.
+## ----------------------------------------------------------------------------
+
+# =======================================
+# Management section
+# =======================================
+
+## Preload schema definitions from the following JSON file.
+## Related doc guide: https://rabbitmq.com/management.html#load-definitions.
+##
+# management.load_definitions = /path/to/exported/definitions.json
+
+## Log all requests to the management HTTP API to a file.
+##
+# management.http_log_dir = /path/to/access.log
+
+## HTTP listener and embedded Web server settings.
+# ## See https://rabbitmq.com/management.html for details.
+#
+# management.tcp.port = 15672
+# management.tcp.ip = 0.0.0.0
+#
+# management.tcp.shutdown_timeout = 7000
+# management.tcp.max_keepalive = 120
+# management.tcp.idle_timeout = 120
+# management.tcp.inactivity_timeout = 120
+# management.tcp.request_timeout = 120
+# management.tcp.compress = true
+
+## HTTPS listener settings.
+## See https://rabbitmq.com/management.html and https://rabbitmq.com/ssl.html for details.
+##
+# management.ssl.port = 15671
+# management.ssl.cacertfile = /path/to/ca_certificate.pem
+# management.ssl.certfile = /path/to/server_certificate.pem
+# management.ssl.keyfile = /path/to/server_key.pem
+
+## More TLS options
+# management.ssl.honor_cipher_order = true
+# management.ssl.honor_ecc_order = true
+# management.ssl.client_renegotiation = false
+# management.ssl.secure_renegotiate = true
+
+## Supported TLS versions
+# management.ssl.versions.1 = tlsv1.2
+# management.ssl.versions.2 = tlsv1.1
+
+## Cipher suites the server is allowed to use
+# management.ssl.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+# management.ssl.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+# management.ssl.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+# management.ssl.ciphers.4 = ECDHE-RSA-AES256-SHA384
+# management.ssl.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+# management.ssl.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+# management.ssl.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+# management.ssl.ciphers.8 = ECDH-RSA-AES256-SHA384
+# management.ssl.ciphers.9 = DHE-RSA-AES256-GCM-SHA384
+
+## URL path prefix for HTTP API and management UI
+# management.path_prefix = /a-prefix
+
+## One of 'basic', 'detailed' or 'none'. See
+## https://rabbitmq.com/management.html#fine-stats for more details.
+# management.rates_mode = basic
+
+## Configure how long aggregated data (such as message rates and queue
+## lengths) is retained. Please read the plugin's documentation in
+## https://rabbitmq.com/management.html#configuration for more
+## details.
+## Your can use 'minute', 'hour' and 'day' keys or integer key (in seconds)
+# management.sample_retention_policies.global.minute = 5
+# management.sample_retention_policies.global.hour = 60
+# management.sample_retention_policies.global.day = 1200
+
+# management.sample_retention_policies.basic.minute = 5
+# management.sample_retention_policies.basic.hour = 60
+
+# management.sample_retention_policies.detailed.10 = 5
+
+## ----------------------------------------------------------------------------
+## RabbitMQ Shovel Plugin
+##
+## Related doc guide: https://rabbitmq.com/shovel.html
+## ----------------------------------------------------------------------------
+
+## See advanced.config.example for a Shovel plugin example
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ STOMP Plugin
+##
+## Related doc guide: https://rabbitmq.com/stomp.html
+## ----------------------------------------------------------------------------
+
+# =======================================
+# STOMP section
+# =======================================
+
+## See https://rabbitmq.com/stomp.html for details.
+
+## TCP listeners.
+##
+# stomp.listeners.tcp.1 = 127.0.0.1:61613
+# stomp.listeners.tcp.2 = ::1:61613
+
+## TCP listener settings
+##
+# stomp.tcp_listen_options.backlog = 2048
+# stomp.tcp_listen_options.recbuf = 131072
+# stomp.tcp_listen_options.sndbuf = 131072
+#
+# stomp.tcp_listen_options.keepalive = true
+# stomp.tcp_listen_options.nodelay = true
+#
+# stomp.tcp_listen_options.exit_on_close = true
+# stomp.tcp_listen_options.send_timeout = 120
+
+## Proxy protocol support
+##
+# stomp.proxy_protocol = false
+
+## TLS listeners
+## See https://rabbitmq.com/stomp.html and https://rabbitmq.com/ssl.html for details.
+# stomp.listeners.ssl.default = 61614
+#
+# ssl_options.cacertfile = path/to/cacert.pem
+# ssl_options.certfile = path/to/cert.pem
+# ssl_options.keyfile = path/to/key.pem
+# ssl_options.verify = verify_peer
+# ssl_options.fail_if_no_peer_cert = true
+
+
+## Number of Erlang processes that will accept connections for the TCP
+## and TLS listeners.
+##
+# stomp.num_acceptors.tcp = 10
+# stomp.num_acceptors.ssl = 1
+
+## Additional TLS options
+
+## Extract a name from the client's certificate when using TLS.
+##
+# stomp.ssl_cert_login = true
+
+## Set a default user name and password. This is used as the default login
+## whenever a CONNECT frame omits the login and passcode headers.
+##
+## Please note that setting this will allow clients to connect without
+## authenticating!
+##
+# stomp.default_user = guest
+# stomp.default_pass = guest
+
+## If a default user is configured, or you have configured use TLS client
+## certificate based authentication, you can choose to allow clients to
+## omit the CONNECT frame entirely. If set to true, the client is
+## automatically connected as the default user or user supplied in the
+## TLS certificate whenever the first frame sent on a session is not a
+## CONNECT frame.
+##
+# stomp.implicit_connect = true
+
+## Whether or not to enable proxy protocol support.
+## Once enabled, clients cannot directly connect to the broker
+## anymore. They must connect through a load balancer that sends the
+## proxy protocol header to the broker at connection time.
+## This setting applies only to STOMP clients, other protocols
+## like MQTT or AMQP have their own setting to enable proxy protocol.
+## See the plugins or broker documentation for more information.
+##
+# stomp.proxy_protocol = false
+
+## ----------------------------------------------------------------------------
+## RabbitMQ MQTT Adapter
+##
+## See https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md
+## for details
+## ----------------------------------------------------------------------------
+
+# =======================================
+# MQTT section
+# =======================================
+
+## TCP listener settings.
+##
+# mqtt.listeners.tcp.1 = 127.0.0.1:61613
+# mqtt.listeners.tcp.2 = ::1:61613
+
+## TCP listener options (as per the broker configuration).
+##
+# mqtt.tcp_listen_options.backlog = 4096
+# mqtt.tcp_listen_options.recbuf = 131072
+# mqtt.tcp_listen_options.sndbuf = 131072
+#
+# mqtt.tcp_listen_options.keepalive = true
+# mqtt.tcp_listen_options.nodelay = true
+#
+# mqtt.tcp_listen_options.exit_on_close = true
+# mqtt.tcp_listen_options.send_timeout = 120
+
+## TLS listener settings
+## ## See https://rabbitmq.com/mqtt.html and https://rabbitmq.com/ssl.html for details.
+#
+# mqtt.listeners.ssl.default = 8883
+#
+# ssl_options.cacertfile = /path/to/tls/ca_certificate_bundle.pem
+# ssl_options.certfile = /path/to/tls/server_certificate.pem
+# ssl_options.keyfile = /path/to/tls/server_key.pem
+# ssl_options.verify = verify_peer
+# ssl_options.fail_if_no_peer_cert = true
+#
+
+
+## Number of Erlang processes that will accept connections for the TCP
+## and TLS listeners.
+##
+# mqtt.num_acceptors.tcp = 10
+# mqtt.num_acceptors.ssl = 10
+
+## Whether or not to enable proxy protocol support.
+## Once enabled, clients cannot directly connect to the broker
+## anymore. They must connect through a load balancer that sends the
+## proxy protocol header to the broker at connection time.
+## This setting applies only to STOMP clients, other protocols
+## like STOMP or AMQP have their own setting to enable proxy protocol.
+## See the plugins or broker documentation for more information.
+##
+# mqtt.proxy_protocol = false
+
+## Set the default user name and password used for anonymous connections (when client
+## provides no credentials). Anonymous connections are highly discouraged!
+##
+# mqtt.default_user = guest
+# mqtt.default_pass = guest
+
+## Enable anonymous connections. If this is set to false, clients MUST provide
+## credentials in order to connect. See also the mqtt.default_user/mqtt.default_pass
+## keys. Anonymous connections are highly discouraged!
+##
+# mqtt.allow_anonymous = true
+
+## If you have multiple vhosts, specify the one to which the
+## adapter connects.
+##
+# mqtt.vhost = /
+
+## Specify the exchange to which messages from MQTT clients are published.
+##
+# mqtt.exchange = amq.topic
+
+## Specify TTL (time to live) to control the lifetime of non-clean sessions.
+##
+# mqtt.subscription_ttl = 1800000
+
+## Set the prefetch count (governing the maximum number of unacknowledged
+## messages that will be delivered).
+##
+# mqtt.prefetch = 10
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ AMQP 1.0 Support
+##
+## See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md.
+## ----------------------------------------------------------------------------
+
+# =======================================
+# AMQP 1.0 section
+# =======================================
+
+
+## Connections that are not authenticated with SASL will connect as this
+## account. See the README for more information.
+##
+## Please note that setting this will allow clients to connect without
+## authenticating!
+##
+# amqp1_0.default_user = guest
+
+## Enable protocol strict mode. See the README for more information.
+##
+# amqp1_0.protocol_strict_mode = false
+
+## Logging settings.
+##
+## See https://rabbitmq.com/logging.html and https://github.com/erlang-lager/lager for details.
+##
+
+## Log directory, taken from the RABBITMQ_LOG_BASE env variable by default.
+##
+# log.dir = /var/log/rabbitmq
+
+## Logging to file. Can be false or a filename.
+## Default:
+# log.file = rabbit.log
+
+## To disable logging to a file
+# log.file = false
+
+## Log level for file logging
+##
+# log.file.level = info
+
+## File rotation config. No rotation by default.
+## DO NOT SET rotation date to ''. Leave the value unset if "" is the desired value
+# log.file.rotation.date = $D0
+# log.file.rotation.size = 0
+
+## Logging to console (can be true or false)
+##
+# log.console = false
+
+## Log level for console logging
+##
+# log.console.level = info
+
+## Logging to the amq.rabbitmq.log exchange (can be true or false)
+##
+# log.exchange = false
+
+## Log level to use when logging to the amq.rabbitmq.log exchange
+##
+# log.exchange.level = info
+
+
+
+## ----------------------------------------------------------------------------
+## RabbitMQ LDAP Plugin
+##
+## Related doc guide: https://rabbitmq.com/ldap.html.
+##
+## ----------------------------------------------------------------------------
+
+# =======================================
+# LDAP section
+# =======================================
+
+##
+## Connecting to the LDAP server(s)
+## ================================
+##
+
+## Specify servers to bind to. You *must* set this in order for the plugin
+## to work properly.
+##
+# auth_ldap.servers.1 = your-server-name-goes-here
+
+## You can define multiple servers
+# auth_ldap.servers.2 = your-other-server
+
+## Connect to the LDAP server using TLS
+##
+# auth_ldap.use_ssl = false
+
+## Specify the LDAP port to connect to
+##
+# auth_ldap.port = 389
+
+## LDAP connection timeout, in milliseconds or 'infinity'
+##
+# auth_ldap.timeout = infinity
+
+## Or number
+# auth_ldap.timeout = 500
+
+## Enable logging of LDAP queries.
+## One of
+## - false (no logging is performed)
+## - true (verbose logging of the logic used by the plugin)
+## - network (as true, but additionally logs LDAP network traffic)
+##
+## Defaults to false.
+##
+# auth_ldap.log = false
+
+## Also can be true or network
+# auth_ldap.log = true
+# auth_ldap.log = network
+
+##
+## Authentication
+## ==============
+##
+
+## Pattern to convert the username given through AMQP to a DN before
+## binding
+##
+# auth_ldap.user_dn_pattern = cn=${username},ou=People,dc=example,dc=com
+
+## Alternatively, you can convert a username to a Distinguished
+## Name via an LDAP lookup after binding. See the documentation for
+## full details.
+
+## When converting a username to a dn via a lookup, set these to
+## the name of the attribute that represents the user name, and the
+## base DN for the lookup query.
+##
+# auth_ldap.dn_lookup_attribute = userPrincipalName
+# auth_ldap.dn_lookup_base = DC=gopivotal,DC=com
+
+## Controls how to bind for authorisation queries and also to
+## retrieve the details of users logging in without presenting a
+## password (e.g., SASL EXTERNAL).
+## One of
+## - as_user (to bind as the authenticated user - requires a password)
+## - anon (to bind anonymously)
+## - {UserDN, Password} (to bind with a specified user name and password)
+##
+## Defaults to 'as_user'.
+##
+# auth_ldap.other_bind = as_user
+
+## Or can be more complex:
+# auth_ldap.other_bind.user_dn = User
+# auth_ldap.other_bind.password = Password
+
+## If user_dn and password defined - other options is ignored.
+
+# -----------------------------
+# Too complex section of LDAP
+# -----------------------------
+
+##
+## Authorisation
+## =============
+##
+
+## The LDAP plugin can perform a variety of queries against your
+## LDAP server to determine questions of authorisation.
+##
+## Related doc guide: https://rabbitmq.com/ldap.html#authorisation.
+
+## Following configuration should be defined in advanced.config file
+## DO NOT UNCOMMENT THESE LINES!
+
+## Set the query to use when determining vhost access
+##
+## {vhost_access_query, {in_group,
+## "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+## Set the query to use when determining resource (e.g., queue) access
+##
+## {resource_access_query, {constant, true}},
+
+## Set queries to determine which tags a user has
+##
+## {tag_queries, []}
+# ]},
+# -----------------------------
diff --git a/deps/rabbit/docs/rabbitmqctl.8 b/deps/rabbit/docs/rabbitmqctl.8
new file mode 100644
index 0000000000..3e041ad2c8
--- /dev/null
+++ b/deps/rabbit/docs/rabbitmqctl.8
@@ -0,0 +1,2424 @@
+.\" vim:ft=nroff:
+.\" This Source Code Form is subject to the terms of the Mozilla Public
+.\" License, v. 2.0. If a copy of the MPL was not distributed with this
+.\" file, You can obtain one at https://mozilla.org/MPL/2.0/.
+.\"
+.\" Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+.\"
+.Dd September 28, 2019
+.Dt RABBITMQCTL 8
+.Os "RabbitMQ Server"
+.Sh NAME
+.Nm rabbitmqctl
+.Nd tool for managing RabbitMQ nodes
+.\" ------------------------------------------------------------------------------------------------
+.Sh SYNOPSIS
+.\" ------------------------------------------------------------------------------------------------
+.Nm
+.Op Fl q
+.Op Fl s
+.Op Fl l
+.Op Fl n Ar node
+.Op Fl t Ar timeout
+.Ar command
+.Op Ar command_options
+.\" ------------------------------------------------------------------------------------------------
+.Sh DESCRIPTION
+.\" ------------------------------------------------------------------------------------------------
+RabbitMQ is an open source multi-protocol messaging broker.
+.Pp
+.Nm
+is a command line tool for managing a RabbitMQ server node.
+It performs all actions by connecting to the target RabbitMQ node
+on a dedicated CLI tool communication port and authenticating
+using a shared secret (known as the cookie file).
+.Pp
+Diagnostic information is displayed if connection failed,
+the target node was not running, or
+.Nm
+could not authenticate to
+the target node successfully.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+and
+.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide"
+.\" ------------------------------------------------------------------------------------------------
+.Sh OPTIONS
+.\" ------------------------------------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Fl n Ar node
+Default node is
+.Qq Ar rabbit@target-hostname ,
+where
+.Ar target-hostname
+is the local host.
+On a host named
+.Qq myserver.example.com ,
+the node name will usually be
+.Qq rabbit@myserver
+(unless
+.Ev RABBITMQ_NODENAME
+has been overridden).
+The output of
+.Qq hostname -s
+is usually the correct suffix to use after the
+.Qq @
+sign.
+See
+.Xr rabbitmq-server 8
+for details of configuring a RabbitMQ node.
+.It Fl q , -quiet
+Quiet output mode is selected.
+Informational messages are reduced when quiet mode is in effect.
+.It Fl s , -silent
+Silent output mode is selected.
+Informational messages are reduced and table headers are suppressed when silent mode is in effect.
+.It Fl -no-table-headers
+Do not output headers for tabular data.
+.It Fl -dry-run
+Do not run the command.
+Only print information message.
+.It Fl t Ar timeout , Fl -timeout Ar timeout
+Operation timeout in seconds.
+Not all commands support timeouts.
+Default is
+.Cm infinity .
+.It Fl l , Fl -longnames
+Must be specified when the cluster is configured to use long (FQDN) node names.
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.It Fl -erlang-cookie Ar cookie
+Shared secret to use to authenticate to the target node.
+Prefer using a local file or the
+.Ev RABBITMQ_ERLANG_COOKIE
+environment variable instead of specifying this option on the command line.
+To learn more, see the
+.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide"
+.El
+.\" ------------------------------------------------------------------------------------------------
+.Sh COMMANDS
+.\" ------------------------------------------------------------------------------------------------
+.Bl -tag -width Ds
+.It Cm help Oo Fl l Oc Op Ar command_name
+.Pp
+Prints usage for all available commands.
+.Bl -tag -width Ds
+.It Fl l , Fl -list-commands
+List command usages only, without parameter explanation.
+.It Ar command_name
+Prints usage for the specified command.
+.El
+.\" ------------------------------------------------------------------
+.It Cm version
+.Pp
+Displays CLI tools version
+.El
+.\" ------------------------------------------------------------------
+.\" ## Nodes
+.\" ------------------------------------------------------------------
+.Ss Nodes
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm await_startup
+.Pp
+Waits for the RabbitMQ application to start on the target node
+.Pp
+For example, to wait for the RabbitMQ application to start:
+.sp
+.Dl rabbitmqctl await_startup
+.\" ------------------------------------------------------------------
+.It Cm reset
+.Pp
+Returns a RabbitMQ node to its virgin state.
+.Pp
+Removes the node from any cluster it belongs to, removes all data from
+the management database, such as configured users and vhosts, and
+deletes all persistent messages.
+.Pp
+For
+.Cm reset
+and
+.Cm force_reset
+to succeed the RabbitMQ application must have been stopped, e.g. with
+.Cm stop_app .
+.Pp
+For example, to reset the RabbitMQ node:
+.sp
+.Dl rabbitmqctl reset
+.\" ------------------------------------------------------------------
+.It Cm rotate_logs
+.Pp
+Instructs the RabbitMQ node to perform internal log rotation.
+.Pp
+Log rotation is performed according to lager settings specified in
+configuration file.
+.Pp
+Note that there is no need to call this command in case of external log
+rotation (e.g. from logrotate(8)), because lager detects renames and
+automatically reopens log files.
+.Pp
+For example, this command starts internal log rotation
+process:
+.sp
+.Dl rabbitmqctl rotate_logs
+.Pp
+Rotation is performed asynchronously, so there is no guarantee that it
+will be completed when this command returns.
+.\" ------------------------------------------------------------------
+.It Cm shutdown
+.Pp
+Shuts down the node, both RabbitMQ and its runtime.
+The command is blocking and will return after the runtime process exits.
+If RabbitMQ fails to stop, it will return a non-zero exit code.
+This command infers the OS PID of the target node and
+therefore can only be used to shut down nodes running on the same
+host (or broadly speaking, in the same operating system,
+e.g. in the same VM or container)
+.Pp
+Unlike the stop command, the shutdown command:
+.Bl -bullet
+.It
+does not require a
+.Ar pid_file
+to wait for the runtime process to exit
+.It
+returns a non-zero exit code if RabbitMQ node is not running
+.El
+.Pp
+For example, this will shut down a locally running RabbitMQ node
+with default node name:
+.sp
+.Dl rabbitmqctl shutdown
+.\" ------------------------------------------------------------------
+.It Cm start_app
+.Pp
+Starts the RabbitMQ application.
+.Pp
+This command is typically run after performing other management actions
+that required the RabbitMQ application to be stopped, e.g.\&
+.Cm reset .
+.Pp
+For example, to instruct the RabbitMQ node to start the RabbitMQ
+application:
+.sp
+.Dl rabbitmqctl start_app
+.\" ------------------------------------------------------------------
+.It Cm stop Op Ar pid_file
+.Pp
+Stops the Erlang node on which RabbitMQ is running.
+To restart the node follow the instructions for
+.Qq Running the Server
+in the
+.Lk https://rabbitmq.com/download.html installation guide .
+.Pp
+If a
+.Ar pid_file
+is specified, also waits for the process specified there to terminate.
+See the description of the
+.Cm wait
+command for details on this file.
+.Pp
+For example, to instruct the RabbitMQ node to terminate:
+.sp
+.Dl rabbitmqctl stop
+.\" ------------------------------------------------------------------
+.It Cm stop_app
+.Pp
+Stops the RabbitMQ application, leaving the runtime (Erlang VM) running.
+.Pp
+This command is typically run prior to performing other management
+actions that require the RabbitMQ application to be stopped, e.g.\&
+.Cm reset .
+.Pp
+For example, to instruct the RabbitMQ node to stop the RabbitMQ
+application:
+.sp
+.Dl rabbitmqctl stop_app
+.\" ------------------------------------------------------------------
+.It Cm wait Ar pid_file , Cm wait Fl -pid Ar pid
+.Pp
+Waits for the RabbitMQ application to start.
+.Pp
+This command will wait for the RabbitMQ application to start at the
+node.
+It will wait for the pid file to be created if
+.Ar pidfile
+is specified, then for a process with a pid specified in the pid file or
+the
+.Fl -pid
+argument, and then for the RabbitMQ application to start in that process.
+It will fail if the process terminates without starting the RabbitMQ
+application.
+.Pp
+If the specified pidfile is not created or erlang node is not started within
+.Fl -timeout
+the command will fail.
+Default timeout is 10 seconds.
+.Pp
+A suitable pid file is created by the
+.Xr rabbitmq-server 8
+script.
+By default this is located in the Mnesia directory.
+Modify the
+.Ev RABBITMQ_PID_FILE
+environment variable to change the location.
+.Pp
+For example, this command will return when the RabbitMQ node has started
+up:
+.sp
+.Dl rabbitmqctl wait /var/run/rabbitmq/pid
+.El
+.\" ------------------------------------------------------------------
+.\" ## Cluster Operations
+.\" ------------------------------------------------------------------
+.Ss Cluster management
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm await_online_nodes Ar count
+.Pp
+Waits for
+.Ar count
+nodes to join the cluster
+.Pp
+For example, to wait for two RabbitMQ nodes to start:
+.sp
+.Dl rabbitmqctl await_online_nodes 2
+.\" ------------------------------------------------------------------
+.It Cm change_cluster_node_type Ar type
+.Pp
+Changes the type of the cluster node.
+.Pp
+The
+.Ar type
+must be one of the following:
+.Bl -bullet -compact
+.It
+.Cm disc
+.It
+.Cm ram
+.El
+.Pp
+The node must be stopped for this operation to succeed, and when turning
+a node into a RAM node the node must not be the only disc node in the
+cluster.
+.Pp
+For example, this command will turn a RAM node into a disc node:
+.sp
+.Dl rabbitmqctl change_cluster_node_type disc
+.\" ------------------------------------------------------------------
+.It Cm cluster_status
+.Pp
+Displays all the nodes in the cluster grouped by node type, together
+with the currently running nodes.
+.Pp
+For example, this command displays the nodes in the cluster:
+.sp
+.Dl rabbitmqctl cluster_status
+.\" ------------------------------------------------------------------
+.It Cm force_boot
+.Pp
+Ensures that the node will start next time, even if it was not the last
+to shut down.
+.Pp
+Normally when you shut down a RabbitMQ cluster altogether, the first
+node you restart should be the last one to go down, since it may have
+seen things happen that other nodes did not.
+But sometimes that's not possible: for instance if the entire cluster
+loses power then all nodes may think they were not the last to shut
+down.
+.Pp
+In such a case you can invoke
+.Cm force_boot
+while the node is down.
+This will tell the node to unconditionally start next time you ask it
+to.
+If any changes happened to the cluster after this node shut down, they
+will be lost.
+.Pp
+If the last node to go down is permanently lost then you should use
+.Cm forget_cluster_node Fl -offline
+in preference to this command, as it will ensure that mirrored queues
+which were mastered on the lost node get promoted.
+.Pp
+For example, this will force the node not to wait for other nodes next
+time it is started:
+.sp
+.Dl rabbitmqctl force_boot
+.\" ------------------------------------------------------------------
+.It Cm force_reset
+.Pp
+Forcefully returns a RabbitMQ node to its virgin state.
+.Pp
+The
+.Cm force_reset
+command differs from
+.Cm reset
+in that it resets the node unconditionally, regardless of the current
+management database state and cluster configuration.
+It should only be used as a last resort if the database or cluster
+configuration has been corrupted.
+.Pp
+For
+.Cm reset
+and
+.Cm force_reset
+to succeed the RabbitMQ application must have been stopped, e.g. with
+.Cm stop_app .
+.Pp
+For example, to reset the RabbitMQ node:
+.sp
+.Dl rabbitmqctl force_reset
+.\" ------------------------------------------------------------------
+.It Cm forget_cluster_node Op Fl -offline
+.Bl -tag -width Ds
+.It Fl -offline
+Enables node removal from an offline node.
+This is only useful in the situation where all the nodes are offline and
+the last node to go down cannot be brought online, thus preventing the
+whole cluster from starting.
+It should not be used in any other circumstances since it can lead to
+inconsistencies.
+.El
+.Pp
+Removes a cluster node remotely.
+The node that is being removed must be offline, while the node we are
+removing from must be online, except when using the
+.Fl -offline
+flag.
+.Pp
+When using the
+.Fl -offline
+flag ,
+.Nm
+will not attempt to connect to a node as normal; instead it will
+temporarily become the node in order to make the change.
+This is useful if the node cannot be started normally.
+In this case the node will become the canonical source for cluster
+metadata (e.g. which queues exist), even if it was not before.
+Therefore you should use this command on the latest node to shut down if
+at all possible.
+.Pp
+For example, this command will remove the node
+.Qq rabbit@stringer
+from the node
+.Qq hare@mcnulty :
+.sp
+.Dl rabbitmqctl -n hare@mcnulty forget_cluster_node rabbit@stringer
+.\" ------------------------------------------------------------------
+.It Cm join_cluster Ar seed-node Op Fl -ram
+.Bl -tag -width Ds
+.It Ar seed-node
+Existing cluster member (seed node) to cluster with.
+.It Fl -ram
+If provided, the node will join the cluster as a RAM node.
+RAM node use is discouraged. Use only if you understand why
+exactly you need to use them.
+.El
+.Pp
+Instructs the node to become a member of the cluster that the specified
+node is in.
+Before clustering, the node is reset, so be careful when using this
+command.
+For this command to succeed the RabbitMQ application must have been
+stopped, e.g. with
+.Cm stop_app .
+.Pp
+Cluster nodes can be of two types: disc or RAM.
+Disc nodes replicate data in RAM and on disc, thus providing redundancy
+in the event of node failure and recovery from global events such as
+power failure across all nodes.
+RAM nodes replicate data in RAM only (with the exception of queue
+contents, which can reside on disc if the queue is persistent or too big
+to fit in memory) and are mainly used for scalability.
+RAM nodes are more performant only when managing resources (e.g.\&
+adding/removing queues, exchanges, or bindings).
+A cluster must always have at least one disc node, and usually should
+have more than one.
+.Pp
+The node will be a disc node by default.
+If you wish to create a RAM node, provide the
+.Fl -ram
+flag.
+.Pp
+After executing the
+.Cm join_cluster
+command, whenever the RabbitMQ application is started on the current
+node it will attempt to connect to the nodes that were in the cluster
+when the node went down.
+.Pp
+To leave a cluster,
+.Cm reset
+the node.
+You can also remove nodes remotely with the
+.Cm forget_cluster_node
+command.
+.Pp
+For example, this command instructs the RabbitMQ node to join the cluster that
+.Qq hare@elena
+is part of, as a ram node:
+.sp
+.Dl rabbitmqctl join_cluster hare@elena --ram
+.Pp
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide".
+.\" ------------------------------------------------------------------
+.It Cm rename_cluster_node Ar oldnode1 Ar newnode1 Op Ar oldnode2 Ar newnode2 ...
+.Pp
+Supports renaming of cluster nodes in the local database.
+.Pp
+This subcommand causes
+.Nm
+to temporarily become the node in order to make the change.
+The local cluster node must therefore be completely stopped; other nodes
+can be online or offline.
+.Pp
+This subcommand takes an even number of arguments, in pairs representing
+the old and new names for nodes.
+You must specify the old and new names for this node and for any other
+nodes that are stopped and being renamed at the same time.
+.Pp
+It is possible to stop all nodes and rename them all simultaneously (in
+which case old and new names for all nodes must be given to every node)
+or stop and rename nodes one at a time (in which case each node only
+needs to be told how its own name is changing).
+.Pp
+For example, this command will rename the node
+.Qq rabbit@misshelpful
+to the node
+.Qq rabbit@cordelia
+.sp
+.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia
+.Pp
+Note that this command only changes the local database.
+It may also be necessary to rename the local database directories,
+and to configure the new node name.
+For example:
+.sp
+.Bl -enum -compact
+.It
+Stop the node:
+.sp
+.Dl rabbitmqctl stop rabbit@misshelpful
+.sp
+.It
+Rename the node in the local database:
+.sp
+.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia
+.sp
+.It
+Rename the local database directories (note, you do not need to do this
+if you have set the RABBITMQ_MNESIA_DIR environment variable):
+.sp
+.Bd -literal -offset indent -compact
+mv \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@cordelia
+mv \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-rename \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@cordelia-rename
+mv \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-plugins-expand \\
+ /var/lib/rabbitmq/mnesia/rabbit\\@cordelia-plugins-expand
+.Ed
+.sp
+.It
+If node name is configured e.g. using
+.Ar /etc/rabbitmq/rabbitmq-env.conf
+it has also be updated there.
+.sp
+.It
+Start the node when ready
+.El
+.\" ------------------------------------------------------------------
+.It Cm update_cluster_nodes Ar clusternode
+.Bl -tag -width Ds
+.It Ar clusternode
+The node to consult for up-to-date information.
+.El
+.Pp
+Instructs an already clustered node to contact
+.Ar clusternode
+to cluster when booting up.
+This is different from
+.Cm join_cluster
+since it does not join any cluster - it checks that the node is already
+in a cluster with
+.Ar clusternode .
+.Pp
+The need for this command is motivated by the fact that clusters can
+change while a node is offline.
+Consider a situation where node
+.Va rabbit@A
+and
+.Va rabbit@B
+are clustered.
+.Va rabbit@A
+goes down,
+.Va rabbit@C
+clusters with
+.Va rabbit@B ,
+and then
+.Va rabbit@B
+leaves the cluster.
+When
+.Va rabbit@A
+starts back up, it'll try to contact
+.Va rabbit@B ,
+but this will fail since
+.Va rabbit@B
+is not in the cluster anymore.
+The following command will rename node
+.Va rabbit@B
+to
+.Va rabbit@C
+on node
+.Va rabbitA
+.sp
+.Dl update_cluster_nodes -n Va rabbit@A Va rabbit@B Va rabbit@C
+.Pp
+To learn more, see the
+.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
+.El
+.\" ------------------------------------------------------------------
+.\" ## Classic Mirrored Queues
+.\" ------------------------------------------------------------------
+.Ss Replication
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm sync_queue Oo Fl p Ar vhost Oc Ar queue
+.Bl -tag -width Ds
+.It Ar queue
+The name of the queue to synchronise.
+.El
+.Pp
+Instructs a mirrored queue with unsynchronised mirrors (follower replicas)
+to synchronise them.
+The queue will block while synchronisation takes place (all publishers
+to and consumers using the queue will block or temporarily see no activity).
+This command can only be used with mirrored queues.
+To learn more, see the
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
+.Pp
+Note that queues with unsynchronised replicas and active consumers
+will become synchronised eventually (assuming that consumers make progress).
+This command is primarily useful for queues which do not have active consumers.
+.\" ------------------------------------------------------------------
+.It Cm cancel_sync_queue Oo Fl p Ar vhost Oc Ar queue
+.Bl -tag -width Ds
+.It Ar queue
+The name of the queue to cancel synchronisation for.
+.El
+.Pp
+Instructs a synchronising mirrored queue to stop synchronising itself.
+.El
+.\" ------------------------------------------------------------------
+.\" ## User management
+.\" ------------------------------------------------------------------
+.Ss User Management
+Note that all user management commands
+.Nm
+only can manage users in the internal RabbitMQ database.
+Users from any alternative authentication backends such as LDAP cannot be inspected
+or managed with those commands.
+.Nm .
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm add_user Ar username Ar password
+.Bl -tag -width Ds
+.It Ar username
+The name of the user to create.
+.It Ar password
+The password the created user will use to log in to the broker.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to create a (non-administrative) user named
+.Qq janeway
+with (initial) password
+.Qq changeit :
+.sp
+.Dl rabbitmqctl add_user janeway changeit
+.\" ------------------------------------------------------------------
+.It Cm authenticate_user Ar username Ar password
+.Bl -tag -width Ds
+.It Ar username
+The name of the user.
+.It Ar password
+The password of the user.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to authenticate the user named
+.Qq janeway
+with password
+.Qq verifyit :
+.sp
+.Dl rabbitmqctl authenticate_user janeway verifyit
+.\" ------------------------------------------------------------------
+.It Cm change_password Ar username Ar newpassword
+.Bl -tag -width Ds
+.It Ar username
+The name of the user whose password is to be changed.
+.It Ar newpassword
+The new password for the user.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to change the
+password for the user named
+.Qq janeway
+to
+.Qq newpass :
+.sp
+.Dl rabbitmqctl change_password janeway newpass
+.\" ------------------------------------------------------------------
+.It Cm clear_password Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user whose password is to be cleared.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to clear the
+password for the user named
+.Qq janeway :
+.sp
+.Dl rabbitmqctl clear_password janeway
+.Pp
+This user now cannot log in with a password (but may be able to through
+e.g. SASL EXTERNAL if configured).
+.\" ------------------------------------------------------------------
+.It Cm delete_user Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user to delete.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to delete the user named
+.Qq janeway :
+.sp
+.Dl rabbitmqctl delete_user janeway
+.\" ------------------------------------------------------------------
+.It Cm list_users
+.Pp
+Lists users.
+Each result row will contain the user name followed by a list of the
+tags set for that user.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all users:
+.sp
+.Dl rabbitmqctl list_users
+.\" ------------------------------------------------------------------
+.It Cm set_user_tags Ar username Op Ar tag ...
+.Bl -tag -width Ds
+.It Ar username
+The name of the user whose tags are to be set.
+.It Ar tag
+Zero, one or more tags to set.
+Any existing tags will be removed.
+.El
+.Pp
+For example, this command instructs the RabbitMQ broker to ensure the user named
+.Qq janeway
+is an administrator:
+.sp
+.Dl rabbitmqctl set_user_tags janeway administrator
+.Pp
+This has no effect when the user logs in via AMQP, but can be used to
+permit the user to manage users, virtual hosts and permissions when
+the user logs in via some other means (for example with the management
+plugin).
+.Pp
+This command instructs the RabbitMQ broker to remove any tags from the user named
+.Qq janeway :
+.sp
+.Dl rabbitmqctl set_user_tags janeway
+.El
+.\" ------------------------------------------------------------------
+.\" ## Access Control
+.\" ------------------------------------------------------------------
+.Ss Access control
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm clear_permissions Oo Fl p Ar vhost Oc Ar username
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to deny the user access,
+defaulting to
+.Qq / .
+.It Ar username
+The name of the user to deny access to the specified virtual host.
+.El
+.Pp
+Sets user permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to deny the user
+named
+.Qq janeway
+access to the virtual host called
+.Qq my-vhost :
+.sp
+.Dl rabbitmqctl clear_permissions -p my-vhost janeway
+.\" ------------------------------------------------------------------
+.It Cm clear_topic_permissions Oo Fl p Ar vhost Oc Ar username Oo Ar exchange Oc
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to clear the topic permissions,
+defaulting to
+.Qq / .
+.It Ar username
+The name of the user to clear topic permissions to the specified virtual host.
+.It Ar exchange
+The name of the topic exchange to clear topic permissions, defaulting to all the
+topic exchanges the given user has topic permissions for.
+.El
+.Pp
+Clear user topic permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to remove topic permissions for user
+named
+.Qq janeway
+for the topic exchange
+.Qq amq.topic
+in the virtual host called
+.Qq my-vhost :
+.sp
+.Dl rabbitmqctl clear_topic_permissions -p my-vhost janeway amq.topic
+.\" ------------------------------------------------------------------
+.It Cm list_permissions Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to list the users that have been
+granted access to it, and their permissions.
+Defaults to
+.Qq / .
+.El
+.Pp
+Lists permissions in a virtual host.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+users which have been granted access to the virtual host called
+.Qq my-vhost ,
+and the permissions they have for operations on resources in that
+virtual host.
+Note that an empty string means no permissions granted:
+.sp
+.Dl rabbitmqctl list_permissions -p my-vhost
+.\" ------------------------------------------------------------------
+.It Cm list_topic_permissions Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to list the users topic permissions.
+Defaults to
+.Qq / .
+.El
+.Pp
+Lists topic permissions in a virtual host.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+users which have been granted topic permissions in the virtual host called
+.Qq my-vhost:
+.sp
+.Dl rabbitmqctl list_topic_permissions -p my-vhost
+.\" ------------------------------------------------------------------
+.It Cm list_user_permissions Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user for which to list the permissions.
+.El
+.Pp
+Lists user permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+virtual hosts to which the user named
+.Qq janeway
+has been granted access, and the permissions the user has for operations
+on resources in these virtual hosts:
+.sp
+.Dl rabbitmqctl list_user_permissions janeway
+.\" ------------------------------------------------------------------
+.It Cm list_user_topic_permissions Ar username
+.Bl -tag -width Ds
+.It Ar username
+The name of the user for which to list the topic permissions.
+.El
+.Pp
+Lists user topic permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all the
+virtual hosts to which the user named
+.Qq janeway
+has been granted access, and the topic permissions the user has in these virtual hosts:
+.sp
+.Dl rabbitmqctl list_topic_user_permissions janeway
+.\" ------------------------------------------------------------------
+.It Cm list_vhosts Op Ar vhostinfoitem ...
+.Pp
+Lists virtual hosts.
+.Pp
+The
+.Ar vhostinfoitem
+parameter is used to indicate which virtual host information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar vhostinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm name
+The name of the virtual host with non-ASCII characters escaped as in C.
+.It Cm tracing
+Whether tracing is enabled for this virtual host.
+.El
+.Pp
+If no
+.Ar vhostinfoitem
+are specified then the vhost name is displayed.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all
+virtual hosts:
+.sp
+.Dl rabbitmqctl list_vhosts name tracing
+.\" ------------------------------------------------------------------
+.It Cm set_permissions Oo Fl p Ar vhost Oc Ar user Ar conf Ar write Ar read
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to grant the user access,
+defaulting to
+.Qq / .
+.It Ar user
+The name of the user to grant access to the specified virtual host.
+.It Ar conf
+A regular expression matching resource names for which the user is
+granted configure permissions.
+.It Ar write
+A regular expression matching resource names for which the user is
+granted write permissions.
+.It Ar read
+A regular expression matching resource names for which the user is
+granted read permissions.
+.El
+.Pp
+Sets user permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to grant the
+user named
+.Qq janeway
+access to the virtual host called
+.Qq my-vhost ,
+with configure permissions on all resources whose names starts with
+.Qq janeway- ,
+and write and read permissions on all resources:
+.sp
+.Dl rabbitmqctl set_permissions -p my-vhost janeway Qo ^janeway-.* Qc Qo .* Qc Qq .*
+.\" ------------------------------------------------------------------
+.It Cm set_topic_permissions Oo Fl p Ar vhost Oc Ar user Ar exchange Ar write Ar read
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host to which to grant the user access,
+defaulting to
+.Qq / .
+.It Ar user
+The name of the user the permissions apply to in the target virtual host.
+.It Ar exchange
+The name of the topic exchange the authorisation check will be applied to.
+.It Ar write
+A regular expression matching the routing key of the published message.
+.It Ar read
+A regular expression matching the routing key of the consumed message.
+.El
+.Pp
+Sets user topic permissions.
+.Pp
+For example, this command instructs the RabbitMQ broker to let the
+user named
+.Qq janeway
+publish and consume messages going through the
+.Qq amp.topic
+exchange of the
+.Qq my-vhost
+virtual host with a routing key starting with
+.Qq janeway- :
+.sp
+.Dl rabbitmqctl set_topic_permissions -p my-vhost janeway amq.topic Qo ^janeway-.* Qc Qo ^janeway-.* Qc
+.Pp
+Topic permissions support variable expansion for the following variables:
+username, vhost, and client_id. Note that client_id is expanded only when using MQTT.
+The previous example could be made more generic by using
+.Qq ^{username}-.* :
+.sp
+.Dl rabbitmqctl set_topic_permissions -p my-vhost janeway amq.topic Qo ^{username}-.* Qc Qo ^{username}-.* Qc
+.El
+.\" ------------------------------------------------------------------
+.\" ## Monitoring and Observability
+.\" ------------------------------------------------------------------
+.Ss Monitoring, observability and health checks
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm environment
+.Pp
+Displays the name and value of each variable in the application
+environment for each running application.
+.\" ------------------------------------------------------------------
+.It Cm list_bindings Oo Fl p Ar vhost Oc Op Ar bindinginfoitem ...
+.Pp
+Returns binding details.
+By default the bindings for the
+.Qq /
+virtual host are returned.
+The
+.Fl p
+flag can be used to override this default.
+.Pp
+The
+.Ar bindinginfoitem
+parameter is used to indicate which binding information items to include
+in the results.
+The column order in the results will match the order of the parameters.
+.Ar bindinginfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm source_name
+The name of the source of messages to which the binding is attached.
+With non-ASCII characters escaped as in C.
+.It Cm source_kind
+The kind of the source of messages to which the binding is attached.
+Currently always exchange.
+With non-ASCII characters escaped as in C.
+.It Cm destination_name
+The name of the destination of messages to which the binding is
+attached.
+With non-ASCII characters escaped as in C.
+.It Cm destination_kind
+The kind of the destination of messages to which the binding is
+attached.
+With non-ASCII characters escaped as in C.
+.It Cm routing_key
+The binding's routing key, with non-ASCII characters escaped as in C.
+.It Cm arguments
+The binding's arguments.
+.El
+.Pp
+If no
+.Ar bindinginfoitem
+are specified then all above items are displayed.
+.Pp
+For example, this command displays the exchange name and queue name of
+the bindings in the virtual host named
+.Qq my-vhost
+.sp
+.Dl rabbitmqctl list_bindings -p my-vhost exchange_name queue_name
+.\" ------------------------------------------------------------------
+.It Cm list_channels Op Ar channelinfoitem ...
+.Pp
+Returns information on all current channels, the logical containers
+executing most AMQP commands.
+This includes channels that are part of ordinary AMQP connections, and
+channels created by various plug-ins and other extensions.
+.Pp
+The
+.Ar channelinfoitem
+parameter is used to indicate which channel information items to include
+in the results.
+The column order in the results will match the order of the parameters.
+.Ar channelinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm pid
+Id of the Erlang process associated with the connection.
+.It Cm connection
+Id of the Erlang process associated with the connection to which the
+channel belongs.
+.It Cm name
+Readable name for the channel.
+.It Cm number
+The number of the channel, which uniquely identifies it within a
+connection.
+.It Cm user
+Username associated with the channel.
+.It Cm vhost
+Virtual host in which the channel operates.
+.It Cm transactional
+True if the channel is in transactional mode, false otherwise.
+.It Cm confirm
+True if the channel is in confirm mode, false otherwise.
+.It Cm consumer_count
+Number of logical AMQP consumers retrieving messages via the channel.
+.It Cm messages_unacknowledged
+Number of messages delivered via this channel but not yet acknowledged.
+.It Cm messages_uncommitted
+Number of messages received in an as yet uncommitted transaction.
+.It Cm acks_uncommitted
+Number of acknowledgements received in an as yet uncommitted transaction.
+.It Cm messages_unconfirmed
+Number of published messages not yet confirmed.
+On channels not in confirm mode, this remains 0.
+.It Cm prefetch_count
+QoS prefetch limit for new consumers, 0 if unlimited.
+.It Cm global_prefetch_count
+QoS prefetch limit for the entire channel, 0 if unlimited.
+.El
+.Pp
+If no
+.Ar channelinfoitem
+are specified then pid, user, consumer_count, and
+messages_unacknowledged are assumed.
+.Pp
+For example, this command displays the connection process and count of
+unacknowledged messages for each channel:
+.sp
+.Dl rabbitmqctl list_channels connection messages_unacknowledged
+.\" ------------------------------------------------------------------
+.It Cm list_ciphers
+.Pp
+Lists cipher suites supported by encoding commands.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all
+cipher suites supported by encoding commands:
+.sp
+.Dl rabbitmqctl list_ciphers
+.\" ------------------------------------------------------------------
+.It Cm list_connections Op Ar connectioninfoitem ...
+.Pp
+Returns TCP/IP connection statistics.
+.Pp
+The
+.Ar connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm pid
+Id of the Erlang process associated with the connection.
+.It Cm name
+Readable name for the connection.
+.It Cm port
+Server port.
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was disabled.
+.It Cm peer_port
+Peer port.
+.It Cm peer_host
+Peer hostname obtained via reverse DNS, or its IP address if reverse DNS
+failed or was not enabled.
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+SSL protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+SSL key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+SSL cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.It Cm peer_cert_subject
+The subject of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_issuer
+The issuer of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_validity
+The period for which the peer's SSL certificate is valid.
+.It Cm state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+starting
+.It
+tuning
+.It
+opening
+.It
+running
+.It
+flow
+.It
+blocking
+.It
+blocked
+.It
+closing
+.It
+closed
+.El
+.It Cm channels
+Number of channels using the connection.
+.It Cm protocol
+Version of the AMQP protocol in use; currently one of:
+.Bl -bullet -compact
+.It
+{0,9,1}
+.It
+{0,8,0}
+.El
+.Pp
+Note that if a client requests an AMQP 0-9 connection, we treat it as
+AMQP 0-9-1.
+.It Cm auth_mechanism
+SASL authentication mechanism used, such as
+.Qq PLAIN .
+.It Cm user
+Username associated with the connection.
+.It Cm vhost
+Virtual host name with non-ASCII characters escaped as in C.
+.It Cm timeout
+Connection timeout / negotiated heartbeat interval, in seconds.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm channel_max
+Maximum number of channels on this connection.
+.It Cm client_properties
+Informational properties transmitted by the client during connection
+establishment.
+.It Cm recv_oct
+Octets received.
+.It Cm recv_cnt
+Packets received.
+.It Cm send_oct
+Octets send.
+.It Cm send_cnt
+Packets sent.
+.It Cm send_pend
+Send queue size.
+.It Cm connected_at
+Date and time this connection was established, as timestamp.
+.El
+.Pp
+If no
+.Ar connectioninfoitem
+are specified then user, peer host, peer port, time since flow control
+and memory block state are displayed.
+.Pp
+For example, this command displays the send queue size and server port
+for each connection:
+.sp
+.Dl rabbitmqctl list_connections send_pend port
+.\" ------------------------------------------------------------------
+.It Cm list_consumers Op Fl p Ar vhost
+.Pp
+Lists consumers, i.e. subscriptions to a queue\'s message stream.
+Each line printed shows, separated by tab characters, the name of
+the queue subscribed to, the id of the channel process via which the
+subscription was created and is managed, the consumer tag which uniquely
+identifies the subscription within a channel, a boolean indicating
+whether acknowledgements are expected for messages delivered to this
+consumer, an integer indicating the prefetch limit (with 0 meaning
+.Qq none ) ,
+and any arguments for this consumer.
+.\" ------------------------------------------------------------------
+.It Cm list_exchanges Oo Fl p Ar vhost Oc Op Ar exchangeinfoitem ...
+.Pp
+Returns exchange details.
+Exchange details of the
+.Qq /
+virtual host are returned if the
+.Fl p
+flag is absent.
+The
+.Fl p
+flag can be used to override this default.
+.Pp
+The
+.Ar exchangeinfoitem
+parameter is used to indicate which exchange information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar exchangeinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm name
+The name of the exchange with non-ASCII characters escaped as in C.
+.It Cm type
+The exchange type, such as:
+.Bl -bullet -compact
+.It
+direct
+.It
+topic
+.It
+headers
+.It
+fanout
+.El
+.It Cm durable
+Whether or not the exchange survives server restarts.
+.It Cm auto_delete
+Whether the exchange will be deleted automatically when no longer used.
+.It Cm internal
+Whether the exchange is internal, i.e. cannot be directly published to
+by a client.
+.It Cm arguments
+Exchange arguments.
+.It Cm policy
+Policy name for applying to the exchange.
+.El
+.Pp
+If no
+.Ar exchangeinfoitem
+are specified then exchange name and type are displayed.
+.Pp
+For example, this command displays the name and type for each exchange
+of the virtual host named
+.Qq my-vhost :
+.sp
+.Dl rabbitmqctl list_exchanges -p my-vhost name type
+.\" ------------------------------------------------------------------
+.It Cm list_hashes
+.Pp
+Lists hash functions supported by encoding commands.
+.Pp
+For example, this command instructs the RabbitMQ broker to list all hash
+functions supported by encoding commands:
+.sp
+.Dl rabbitmqctl list_hashes
+.\" ------------------------------------------------------------------
+.It Cm list_queues Oo Fl p Ar vhost Oc Oo Fl -offline | Fl -online | Fl -local Oc Op Ar queueinfoitem ...
+.Pp
+Returns queue details.
+Queue details of the
+.Qq /
+virtual host are returned if the
+.Fl p
+flag is absent.
+The
+.Fl p
+flag can be used to override this default.
+.Pp
+Displayed queues can be filtered by their status or location using one
+of the following mutually exclusive options:
+.Bl -tag -width Ds
+.It Fl -offline
+List only those durable queues that are not currently available (more
+specifically, their master node isn't).
+.It Fl -online
+List queues that are currently available (their master node is).
+.It Fl -local
+List only those queues whose master process is located on the current
+node.
+.El
+.Pp
+The
+.Ar queueinfoitem
+parameter is used to indicate which queue information items to include
+in the results.
+The column order in the results will match the order of the parameters.
+.Ar queueinfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm name
+The name of the queue with non\-ASCII characters escaped as in C.
+.It Cm durable
+Whether or not the queue survives server restarts.
+.It Cm auto_delete
+Whether the queue will be deleted automatically when no longer used.
+.It Cm arguments
+Queue arguments.
+.It Cm policy
+Effective policy name for the queue.
+.It Cm pid
+Erlang process identifier of the queue.
+.It Cm owner_pid
+Id of the Erlang process of the connection which is the
+exclusive owner of the queue.
+Empty if the queue is non-exclusive.
+.It Cm exclusive
+True if queue is exclusive (i.e. has owner_pid), false otherwise.
+.It Cm exclusive_consumer_pid
+Id of the Erlang process representing the channel of the exclusive
+consumer subscribed to this queue.
+Empty if there is no exclusive consumer.
+.It Cm exclusive_consumer_tag
+Consumer tag of the exclusive consumer subscribed to this queue.
+Empty if there is no exclusive consumer.
+.It Cm messages_ready
+Number of messages ready to be delivered to clients.
+.It Cm messages_unacknowledged
+Number of messages delivered to clients but not yet acknowledged.
+.It Cm messages
+Sum of ready and unacknowledged messages (queue depth).
+.It Cm messages_ready_ram
+Number of messages from messages_ready which are resident in ram.
+.It Cm messages_unacknowledged_ram
+Number of messages from messages_unacknowledged which are resident in
+ram.
+.It Cm messages_ram
+Total number of messages which are resident in ram.
+.It Cm messages_persistent
+Total number of persistent messages in the queue (will always be 0 for
+transient queues).
+.It Cm message_bytes
+Sum of the size of all message bodies in the queue.
+This does not include the message properties (including headers) or any
+overhead.
+.It Cm message_bytes_ready
+Like
+.Cm message_bytes
+but counting only those messages ready to be delivered to clients.
+.It Cm message_bytes_unacknowledged
+Like
+.Cm message_bytes
+but counting only those messages delivered to clients but not yet
+acknowledged.
+.It Cm message_bytes_ram
+Like
+.Cm message_bytes
+but counting only those messages which are currently held in RAM.
+.It Cm message_bytes_persistent
+Like
+.Cm message_bytes
+but counting only those messages which are persistent.
+.It Cm head_message_timestamp
+The timestamp property of the first message in the queue, if present.
+Timestamps of messages only appear when they are in the paged-in state.
+.It Cm disk_reads
+Total number of times messages have been read from disk by this queue
+since it started.
+.It Cm disk_writes
+Total number of times messages have been written to disk by this queue
+since it started.
+.It Cm consumers
+Number of consumers.
+.It Cm consumer_utilisation
+Fraction of the time (between 0.0 and 1.0) that the queue is able to
+immediately deliver messages to consumers.
+This can be less than 1.0 if consumers are limited by network congestion
+or prefetch count.
+.It Cm memory
+Bytes of memory allocated by the runtime for the
+queue, including stack, heap and internal structures.
+.It Cm slave_pids
+If the queue is mirrored, this lists the IDs of the mirrors (follower replicas).
+To learn more, see the
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
+.It Cm synchronised_slave_pids
+If the queue is mirrored, this gives the IDs of the mirrors (follower replicas) which
+are synchronised with the master (leader). To learn more, see the
+.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
+.It Cm state
+The state of the queue.
+Normally
+.Qq running ,
+but may be
+.Qq Bro syncing, Ar message_count Brc
+if the queue is synchronising.
+.Pp
+Queues which are located on cluster nodes that are currently down will
+be shown with a status of
+.Qq down
+(and most other
+.Ar queueinfoitem
+will be unavailable).
+.El
+.Pp
+If no
+.Ar queueinfoitem
+are specified then queue name and depth are displayed.
+.Pp
+For example, this command displays the depth and number of consumers for
+each queue of the virtual host named
+.Qq my-vhost
+.sp
+.Dl rabbitmqctl list_queues -p my-vhost messages consumers
+.\" ------------------------------------------------------------------
+.It Cm list_unresponsive_queues Oo Fl -local Oc Oo Fl -queue-timeout Ar milliseconds Oc Oo Ar column ... Oc Op Fl -no-table-headers
+.Pp
+Tests queues to respond within timeout. Lists those which did not respond
+.Pp
+For example, this command lists only those unresponsive queues whose master process
+is located on the current node.
+.Sp
+.Dl rabbitmqctl list_unresponsive_queues --local name
+.\" ------------------------------------------------------------------
+.It Cm ping
+.Pp
+Checks that the node OS process is up, registered with EPMD and CLI tools can authenticate with it
+.Pp
+Example:
+.Dl rabbitmqctl ping -n rabbit@hostname
+.\" ------------------------------------------------------------------
+.It Cm report
+.Pp
+Generate a server status report containing a concatenation of all server
+status information for support purposes.
+The output should be redirected to a file when accompanying a support
+request.
+.Pp
+For example, this command creates a server report which may be attached
+to a support request email:
+.sp
+.Dl rabbitmqctl report > server_report.txt
+.\" ------------------------------------------------------------------
+.It Cm schema_info Oo Fl -no-table-headers Oc Op Ar column ...
+.Pp
+Lists schema database tables and their properties
+.Pp
+For example, this command lists the table names and their active replicas:
+.sp
+.Dl rabbitmqctl schema_info name active_replicas
+.\" ------------------------------------------------------------------
+.It Cm status
+.Pp
+Displays broker status information such as the running applications on
+the current Erlang node, RabbitMQ and Erlang versions, OS name, memory
+and file descriptor statistics.
+(See the
+.Cm cluster_status
+command to find out which nodes are clustered and running.)
+.Pp
+For example, this command displays information about the RabbitMQ
+broker:
+.sp
+.Dl rabbitmqctl status
+.El
+.\" ------------------------------------------------------------------
+.\" ## Runtime Parameters and Policies
+.\" ------------------------------------------------------------------
+.Ss Runtime Parameters and Policies
+Certain features of RabbitMQ (such as the Federation plugin) are
+controlled by dynamic, cluster-wide
+.Em parameters.
+There are 2 kinds of parameters: parameters scoped to a virtual host and
+global parameters.
+Each vhost-scoped parameter consists of a component name, a name and a
+value.
+The component name and name are strings, and the value is a valid JSON document.
+A global parameter consists of a name and value.
+The name is a string and the value is an arbitrary Erlang data structure.
+Parameters can be set, cleared and listed.
+In general you should refer to the documentation for the feature in
+question to see how to set parameters.
+.Pp
+Policies is a feature built on top of runtime parameters.
+Policies are used to control and modify the behaviour of queues and
+exchanges on a cluster-wide basis.
+Policies apply within a given vhost, and consist of a name, pattern,
+definition and an optional priority.
+Policies can be set, cleared and listed.
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm clear_global_parameter Ar name
+.Pp
+Clears a global runtime parameter.
+This is similar to
+.Cm clear_parameter
+but the key-value pair isn't tied to a virtual host.
+.Bl -tag -width Ds
+.It Ar name
+The name of the global runtime parameter being cleared.
+.El
+.Pp
+For example, this command clears the global runtime parameter
+.Qq mqtt_default_vhosts :
+.sp
+.Dl rabbitmqctl clear_global_parameter mqtt_default_vhosts
+.\" ------------------------------------------------------------------
+.It Cm clear_parameter Oo Fl p Ar vhost Oc Ar component_name Ar key
+.Pp
+Clears a parameter.
+.Bl -tag -width Ds
+.It Ar component_name
+The name of the component for which the parameter is being cleared.
+.It Ar name
+The name of the parameter being cleared.
+.El
+.Pp
+For example, this command clears the parameter
+.Qq node01
+for the
+.Qq federation-upstream
+component in the default virtual host:
+.sp
+.Dl rabbitmqctl clear_parameter federation-upstream node01
+.\" ------------------------------------------------------------------
+.It Cm list_global_parameters
+.Pp
+Lists all global runtime parameters.
+This is similar to
+.Cm list_parameters
+but the global runtime parameters are not tied to any virtual host.
+.Pp
+For example, this command lists all global parameters:
+.sp
+.Dl rabbitmqctl list_global_parameters
+.\" ------------------------------------------------------------------
+.It Cm list_parameters Op Fl p Ar vhost
+.Pp
+Lists all parameters for a virtual host.
+.Pp
+For example, this command lists all parameters in the default virtual
+host:
+.sp
+.Dl rabbitmqctl list_parameters
+.\" ------------------------------------------------------------------
+.It Cm set_global_parameter Ar name Ar value
+.Pp
+Sets a global runtime parameter.
+This is similar to
+.Cm set_parameter
+but the key-value pair isn't tied to a virtual host.
+.Bl -tag -width Ds
+.It Ar name
+The name of the global runtime parameter being set.
+.It Ar value
+The value for the global runtime parameter, as a JSON term.
+In most shells you are very likely to need to quote this.
+.El
+.Pp
+For example, this command sets the global runtime parameter
+.Qq mqtt_default_vhosts
+to the JSON term {"O=client,CN=guest":"/"}:
+.sp
+.Dl rabbitmqctl set_global_parameter mqtt_default_vhosts '{"O=client,CN=guest":"/"}'
+.\" ------------------------------------------------------------------
+.It Cm set_parameter Oo Fl p Ar vhost Oc Ar component_name Ar name Ar value
+.Pp
+Sets a parameter.
+.Bl -tag -width Ds
+.It Ar component_name
+The name of the component for which the parameter is being set.
+.It Ar name
+The name of the parameter being set.
+.It Ar value
+The value for the parameter, as a JSON term.
+In most shells you are very likely to need to quote this.
+.El
+.Pp
+For example, this command sets the parameter
+.Qq node01
+for the
+.Qq federation-upstream
+component in the default virtual host to the following JSON
+.Qq guest :
+.sp
+.Dl rabbitmqctl set_parameter federation-upstream node01 '{"uri":"amqp://user:password@server/%2F","ack-mode":"on-publish"}'
+.\" ------------------------------------------------------------------
+.It Cm list_policies Op Fl p Ar vhost
+.Pp
+Lists all policies for a virtual host.
+.Pp
+For example, this command lists all policies in the default virtual
+host:
+.sp
+.Dl rabbitmqctl list_policies
+.\" ------------------------------------------------------------------
+.It Cm set_operator_policy Oo Fl p Ar vhost Oc Oo Fl -priority Ar priority Oc Oo Fl -apply-to Ar apply-to Oc Ar name Ar pattern Ar definition
+.Pp
+Sets an operator policy that overrides a subset of arguments in user
+policies.
+Arguments are identical to those of
+.Cm set_policy .
+.Pp
+Supported arguments are:
+.Bl -bullet -compact
+.It
+expires
+.It
+message-ttl
+.It
+max-length
+.It
+max-length-bytes
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_policy Oo Fl p Ar vhost Oc Oo Fl -priority Ar priority Oc Oo Fl -apply-to Ar apply-to Oc Ar name Ar pattern Ar definition
+.Pp
+Sets a policy.
+.Bl -tag -width Ds
+.It Ar name
+The name of the policy.
+.It Ar pattern
+The regular expression, which when matches on a given resources causes
+the policy to apply.
+.It Ar definition
+The definition of the policy, as a JSON term.
+In most shells you are very likely to need to quote this.
+.It Ar priority
+The priority of the policy as an integer.
+Higher numbers indicate greater precedence.
+The default is 0.
+.It Ar apply-to
+Which types of object this policy should apply to.
+Possible values are:
+.Bl -bullet -compact
+.It
+queues
+.It
+exchanges
+.It
+all
+.El
+The default is
+.Cm all ..
+.El
+.Pp
+For example, this command sets the policy
+.Qq federate-me
+in the default virtual host so that built-in exchanges are federated:
+.sp
+.Dl rabbitmqctl set_policy federate-me "^amq." '{"federation-upstream-set":"all"}'
+.\" ------------------------------------------------------------------
+.It Cm clear_policy Oo Fl p Ar vhost Oc Ar name
+.Pp
+Clears a policy.
+.Bl -tag -width Ds
+.It Ar name
+The name of the policy being cleared.
+.El
+.Pp
+For example, this command clears the
+.Qq federate-me
+policy in the default virtual host:
+.sp
+.Dl rabbitmqctl clear_policy federate-me
+.\" ------------------------------------------------------------------
+.It Cm clear_operator_policy Oo Fl p Ar vhost Oc Ar name
+.Pp
+Clears an operator policy.
+Arguments are identical to those of
+.Cm clear_policy .
+.\" ------------------------------------------------------------------
+.It Cm list_operator_policies Op Fl p Ar vhost
+.Pp
+Lists operator policy overrides for a virtual host.
+Arguments are identical to those of
+.Cm list_policies .
+.El
+.\" ------------------------------------------------------------------
+.\" ## Virtual Host Management
+.\" ------------------------------------------------------------------
+.Ss Virtual hosts
+Note that
+.Nm
+manages the RabbitMQ internal user database.
+Permissions for users from any alternative authorisation backend will
+not be visible to
+.Nm .
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm add_vhost Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host entry to create.
+.El
+.Pp
+Creates a virtual host.
+.Pp
+For example, this command instructs the RabbitMQ broker to create a new
+virtual host called
+.Qq test :
+.Pp
+.Dl rabbitmqctl add_vhost test
+.\" ------------------------------------------------------------------
+.It Cm clear_vhost_limits Op Fl p Ar vhost
+.Pp
+Clears virtual host limits.
+.Pp
+For example, this command clears vhost limits in vhost
+.Qq qa_env :
+.sp
+.Dl rabbitmqctl clear_vhost_limits -p qa_env
+.\" ------------------------------------------------------------------
+.It Cm delete_vhost Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host entry to delete.
+.El
+.Pp
+Deletes a virtual host.
+.Pp
+Deleting a virtual host deletes all its exchanges, queues, bindings,
+user permissions, parameters and policies.
+.Pp
+For example, this command instructs the RabbitMQ broker to delete the
+virtual host called
+.Qq test :
+.sp
+.Dl rabbitmqctl delete_vhost a-vhost
+.\" ------------------------------------------------------------------
+.It Cm list_vhost_limits Oo Fl p Ar vhost Oc Oo Fl -global Oc Op Fl -no-table-headers
+.Pp
+Displays configured virtual host limits.
+.Bl -tag -width Ds
+.It Fl -global
+Show limits for all vhosts.
+Suppresses the
+.Fl p
+parameter.
+.El
+.\" ------------------------------------------------------------------
+.It Cm restart_vhost Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host entry to restart.
+.El
+.Pp
+Restarts a failed vhost data stores and queues.
+.Pp
+For example, this command instructs the RabbitMQ broker to restart a
+virtual host called
+.Qq test :
+.Pp
+.Dl rabbitmqctl restart_vhost test
+.\" ------------------------------------------------------------------
+.It Cm set_vhost_limits Oo Fl p Ar vhost Oc Ar definition
+.Pp
+Sets virtual host limits.
+.Bl -tag -width Ds
+.It Ar definition
+The definition of the limits, as a JSON term.
+In most shells you are very likely to need to quote this.
+.Pp
+Recognised limits are:
+.Bl -bullet -compact
+.It
+max-connections
+.It
+max-queues
+.El
+.Pp
+Use a negative value to specify "no limit".
+.El
+.Pp
+For example, this command limits the max number of concurrent
+connections in vhost
+.Qq qa_env
+to 64:
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max-connections": 64}'
+.Pp
+This command limits the max number of queues in vhost
+.Qq qa_env
+to 256:
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max-queues": 256}'
+.Pp
+This command clears the max number of connections limit in vhost
+.Qq qa_env :
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max\-connections": \-1}'
+.Pp
+This command disables client connections in vhost
+.Qq qa_env :
+.sp
+.Dl rabbitmqctl set_vhost_limits -p qa_env '{"max-connections": 0}'
+.\" ------------------------------------------------------------------
+.It Cm trace_off Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to stop tracing.
+.El
+.Pp
+Stops tracing.
+.\" ------------------------------------------------------------------
+.It Cm trace_on Op Fl p Ar vhost
+.Bl -tag -width Ds
+.It Ar vhost
+The name of the virtual host for which to start tracing.
+.El
+.Pp
+Starts tracing.
+Note that the trace state is not persistent; it will revert to being off
+if the node is restarted.
+.El
+.\" ------------------------------------------------------------------
+.\" ## Configuration
+.\" ------------------------------------------------------------------
+.Ss Configuration
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm decode Ar value Ar passphrase Oo Fl -cipher Ar cipher Oc Oo Fl -hash Ar hash Oc Op Fl -iterations Ar iterations
+.Bl -tag -width Ds
+.It Ar value Ar passphrase
+Value to decrypt (as produced by the encode command) and passphrase.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl decode '{encrypted, <<"...">>}' mypassphrase
+.It Fl -cipher Ar cipher Fl -hash Ar hash Fl -iterations Ar iterations
+Options to specify the decryption settings.
+They can be used independently.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl decode --cipher blowfish_cfb64 --hash sha256 --iterations 10000 '{encrypted,<<"...">>} mypassphrase
+.El
+.\" ------------------------------------------------------------------
+.It Cm encode Ar value Ar passphrase Oo Fl -cipher Ar cipher Oc Oo Fl -hash Ar hash Oc Op Fl -iterations Ar iterations
+.Bl -tag -width Ds
+.It Ar value Ar passphrase
+Value to encrypt and passphrase.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl encode '<<"guest">>' mypassphrase
+.It Fl -cipher Ar cipher Fl -hash Ar hash Fl -iterations Ar iterations
+Options to specify the encryption settings.
+They can be used independently.
+.Pp
+For example:
+.sp
+.Dl rabbitmqctl encode --cipher blowfish_cfb64 --hash sha256 --iterations 10000 '<<"guest">>' mypassphrase
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_cluster_name Ar name
+.Pp
+Sets the cluster name to
+.Ar name .
+The cluster name is announced to clients on connection, and used by the
+federation and shovel plugins to record where a message has been.
+The cluster name is by default derived from the hostname of the first
+node in the cluster, but can be changed.
+.Pp
+For example, this sets the cluster name to
+.Qq london :
+.sp
+.Dl rabbitmqctl set_cluster_name london
+.\" ------------------------------------------------------------------
+.It Cm set_disk_free_limit Ar disk_limit
+.Bl -tag -width Ds
+.It Ar disk_limit
+Lower bound limit as an integer in bytes or a string with memory unit symbols
+(see vm_memory_high_watermark), e.g. 512M or 1G.
+Once free disk space reaches the limit, a disk alarm will be set.
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_disk_free_limit mem_relative Ar fraction
+.Bl -tag -width Ds
+.It Ar fraction
+Limit relative to the total amount available RAM as a non-negative
+floating point number.
+Values lower than 1.0 can be dangerous and should be used carefully.
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_log_level Op Ar log_level
+.Pp
+Sets log level in the running node
+.Pp
+Supported
+.Ar type
+values are:
+.Bl -bullet -compact
+.It
+debug
+.It
+info
+.It
+warning
+.It
+error
+.It
+none
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmqctl log_level debug
+.\" ------------------------------------------------------------------
+.It Cm set_vm_memory_high_watermark Ar fraction
+.Bl -tag -width Ds
+.It Ar fraction
+The new memory threshold fraction at which flow control is triggered, as
+a floating point number greater than or equal to 0.
+.El
+.\" ------------------------------------------------------------------
+.It Cm set_vm_memory_high_watermark Oo absolute Oc Ar memory_limit
+.Bl -tag -width Ds
+.It Ar memory_limit
+The new memory limit at which flow control is triggered, expressed in
+bytes as an integer number greater than or equal to 0 or as a string
+with memory unit symbol(e.g. 512M or 1G).
+Available unit symbols are:
+.Bl -tag -width Ds
+.It Cm k , Cm kiB
+kibibytes (2^10 bytes)
+.It Cm M , Cm MiB
+mebibytes (2^20 bytes)
+.It Cm G , Cm GiB
+gibibytes (2^30 bytes)
+.It Cm kB
+kilobytes (10^3 bytes)
+.It Cm MB
+megabytes (10^6 bytes)
+.It Cm GB
+gigabytes (10^9 bytes)
+.El
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## Feature Flags
+.\" ------------------------------------------------------------------
+.Ss Feature flags
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm enable_feature_flag Ar feature_flag
+.Pp
+Enables a feature flag on the target node.
+.Pp
+Example:
+.Sp
+.Dl rabbitmqctl enable_feature_flag quorum_queue
+.\" ------------------------------------------------------------------
+.It Cm list_feature_flags Op Ar column ...
+.Pp
+Lists feature flags
+.Pp
+Supported
+.Ar column
+values are:
+.Bl -bullet -compact
+.It
+name
+.It
+state
+.It
+stability
+.It
+provided_by
+.It
+desc
+.It
+doc_url
+.El
+.Pp
+Example:
+.Sp
+.Dl rabbitmqctl list_feature_flags name state
+.El
+.\" ------------------------------------------------------------------
+.\" ## Misc Operations
+.\" ------------------------------------------------------------------
+.Ss Connection Operations
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm close_all_connections Oo Fl p Ar vhost Oc Oo Fl -global Oc Oo Fl -per-connection-delay Ar delay Oc Oo Fl -limit Ar limit Oc Ar explanation
+.Bl -tag -width Ds
+.It Fl p Ar vhost
+The name of the virtual host for which connections should be closed.
+Ignored when
+.Fl -global
+is specified.
+.It Fl -global
+If connections should be close for all vhosts.
+Overrides
+.Fl p
+.It Fl -per-connection-delay Ar delay
+Time in milliseconds to wait after each connection closing.
+.It Fl -limit Ar limit
+Number of connection to close.
+Only works per vhost.
+Ignored when
+.Fl -global
+is specified.
+.It Ar explanation
+Explanation string.
+.El
+.Pp
+Instructs the broker to close all connections for the specified vhost or entire RabbitMQ node.
+.Pp
+For example, this command instructs the RabbitMQ broker to close 10 connections on
+.Qq qa_env
+vhost, passing the explanation
+.Qq Please close :
+.sp
+.Dl rabbitmqctl close_all_connections -p qa_env --limit 10 'Please close'
+.Pp
+This command instructs broker to close all connections to the node:
+.sp
+.Dl rabbitmqctl close_all_connections --global
+.sp
+.\" ------------------------------------------------------------------
+.It Cm close_connection Ar connectionpid Ar explanation
+.Bl -tag -width Ds
+.It Ar connectionpid
+Id of the Erlang process associated with the connection to close.
+.It Ar explanation
+Explanation string.
+.El
+.Pp
+Instructs the broker to close the connection associated with the Erlang
+process id
+.Ar connectionpid
+(see also the
+.Cm list_connections
+command), passing the
+.Ar explanation
+string to the connected client as part of the AMQP connection shutdown
+protocol.
+.Pp
+For example, this command instructs the RabbitMQ broker to close the connection associated with the Erlang process id
+.Qq <rabbit@tanto.4262.0> ,
+passing the explanation
+.Qq go away
+to the connected client:
+.sp
+.Dl rabbitmqctl close_connection Qo <rabbit@tanto.4262.0> Qc Qq go away
+.El
+.\" ------------------------------------------------------------------
+.\" ## Misc
+.\" ------------------------------------------------------------------
+.Ss Misc
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm eval Ar expression
+.Pp
+Evaluates an Erlang expression on the target node
+.\" ------------------------------------------------------------------
+.\" ## Queue Operations
+.\" ------------------------------------------------------------------
+.Ss Queue Operations
+.Bl -tag -width Ds
+.\" ------------------------------------------------------------------
+.It Cm delete_queue Ar queue_name Oo Fl -if-empty | Fl e Oc Op Fl -if-unused | Fl u
+.Bl -tag -width Ds
+.It Ar queue_name
+The name of the queue to delete.
+.It Ar --if-empty
+Delete the queue if it is empty (has no messages ready for delivery)
+.It Ar --if-unused
+Delete the queue only if it has no consumers
+.El
+.Pp
+Deletes a queue.
+.\" ------------------------------------------------------------------
+.It Cm purge_queue Oo Fl p Ar vhost Oc Ar queue
+.Bl -tag -width Ds
+.It Ar queue
+The name of the queue to purge.
+.El
+.Pp
+Purges a queue (removes all messages in it).
+.El
+.\" ------------------------------------------------------------------------------------------------
+.Sh PLUGIN COMMANDS
+.\" ------------------------------------------------------------------------------------------------
+RabbitMQ plugins can extend rabbitmqctl tool to add new commands when enabled.
+Currently available commands can be found in
+.Cm rabbitmqctl help
+output.
+Following commands are added by RabbitMQ plugins, available in default
+distribution:
+.\" ------------------------------------------------------------------
+.\" ## Shovel
+.\" ------------------------------------------------------------------
+.Ss Shovel plugin
+.Bl -tag -width Ds
+.It Cm shovel_status
+Prints a list of configured Shovels
+.It Cm delete_shovel Oo Fl p Ar vhost Oc Ar name
+Instructs the RabbitMQ node to delete the configured shovel by
+.Ar name .
+.El
+.\" ------------------------------------------------------------------
+.\" ## Federation
+.\" ------------------------------------------------------------------
+.Ss Federation plugin
+.Bl -tag -width Ds
+.It Cm federation_status Op Fl -only-down
+Prints a list of federation links.
+.Bl -tag -width Ds
+.It Fl -only-down
+Only list federation links which are not running.
+.El
+.It Cm restart_federation_link Ar link_id
+Instructs the RabbitMQ node to restart the federation link with specified
+.Ar link_id .
+.El
+.\" ------------------------------------------------------------------
+.\" ## AMQP 1.0
+.\" ------------------------------------------------------------------
+.Ss AMQP 1.0 plugin
+.Bl -tag -width Ds
+.It Cm list_amqp10_connections Op Ar amqp10_connectioninfoitem ...
+Similar to the
+.Cm list_connections
+command, but returns fields which make sense for AMQP-1.0 connections.
+.Ar amqp10_connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar amqp10_connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm pid
+Id of the Erlang process associated with the connection.
+.It Cm auth_mechanism
+SASL authentication mechanism used, such as
+.Qq PLAIN .
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was disabled.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm timeout
+Connection timeout / negotiated heartbeat interval, in seconds.
+.It Cm user
+Username associated with the connection.
+.It Cm state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+starting
+.It
+waiting_amqp0100
+.It
+securing
+.It
+running
+.It
+blocking
+.It
+blocked
+.It
+closing
+.It
+closed
+.El
+.It Cm recv_oct
+Octets received.
+.It Cm recv_cnt
+Packets received.
+.It Cm send_oct
+Octets send.
+.It Cm send_cnt
+Packets sent.
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+SSL protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+SSL key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+SSL cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.It Cm peer_cert_subject
+The subject of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_issuer
+The issuer of the peer's SSL certificate, in RFC4514 form.
+.It Cm peer_cert_validity
+The period for which the peer's SSL certificate is valid.
+.It Cm node
+The node name of the RabbitMQ node to which connection is established.
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## MQTT
+.\" ------------------------------------------------------------------
+.Ss MQTT plugin
+.Bl -tag -width Ds
+.It Cm list_mqtt_connections Op Ar mqtt_connectioninfoitem
+Similar to the
+.Cm list_connections
+command, but returns fields which make sense for MQTT connections.
+.Ar mqtt_connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar mqtt_connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was disabled.
+.It Cm port
+Server port.
+.It Cm peer_host
+Peer hostname obtained via reverse DNS, or its IP address if reverse DNS
+failed or was not enabled.
+.It Cm peer_port
+Peer port.
+.It Cm protocol
+MQTT protocol version, which can be on of the following:
+.Bl -bullet -compact
+.It
+{'MQTT', N/A}
+.It
+{'MQTT', 3.1.0}
+.It
+{'MQTT', 3.1.1}
+.El
+.It Cm channels
+Number of channels using the connection.
+.It Cm channel_max
+Maximum number of channels on this connection.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm client_properties
+Informational properties transmitted by the client during connection
+establishment.
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+SSL protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+SSL key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+SSL cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.It Cm conn_name
+Readable name for the connection.
+.It Cm connection_state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+starting
+.It
+running
+.It
+blocked
+.El
+.It Cm connection
+Id of the Erlang process associated with the internal amqp direct connection.
+.It Cm consumer_tags
+A tuple of consumer tags for QOS0 and QOS1.
+.It Cm message_id
+The last Packet ID sent in a control message.
+.It Cm client_id
+MQTT client identifier for the connection.
+.It Cm clean_sess
+MQTT clean session flag.
+.It Cm will_msg
+MQTT Will message sent in CONNECT frame.
+.It Cm exchange
+Exchange to route MQTT messages configured in rabbitmq_mqtt application environment.
+.It Cm ssl_login_name
+SSL peer cert auth name
+.It Cm retainer_pid
+Id of the Erlang process associated with retain storage for the connection.
+.It Cm user
+Username associated with the connection.
+.It Cm vhost
+Virtual host name with non-ASCII characters escaped as in C.
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## STOMP
+.\" ------------------------------------------------------------------
+.Ss STOMP plugin
+.Bl -tag -width Ds
+.It Cm list_stomp_connections Op Ar stomp_connectioninfoitem
+Similar to the
+.Cm list_connections
+command, but returns fields which make sense for STOMP connections.
+.Ar stomp_connectioninfoitem
+parameter is used to indicate which connection information items to
+include in the results.
+The column order in the results will match the order of the parameters.
+.Ar stomp_connectioninfoitem
+can take any value from the list that follows:
+.Bl -tag -width Ds
+.It Cm conn_name
+Readable name for the connection.
+.It Cm connection
+Id of the Erlang process associated with the internal amqp direct connection.
+.It Cm connection_state
+Connection state; one of:
+.Bl -bullet -compact
+.It
+running
+.It
+blocking
+.It
+blocked
+.El
+.It Cm session_id
+STOMP protocol session identifier
+.It Cm channel
+AMQP channel associated with the connection
+.It Cm version
+Negotiated STOMP protocol version for the connection.
+.It Cm implicit_connect
+Indicates if the connection was established using implicit connect (without CONNECT frame)
+.It Cm auth_login
+Effective username for the connection.
+.It Cm auth_mechanism
+STOMP authorization mechanism.
+Can be one of:
+.Bl -bullet -compact
+.It
+config
+.It
+ssl
+.It
+stomp_headers
+.El
+.It Cm port
+Server port.
+.It Cm host
+Server hostname obtained via reverse DNS, or its IP address if reverse
+DNS failed or was not enabled.
+.It Cm peer_port
+Peer port.
+.It Cm peer_host
+Peer hostname obtained via reverse DNS, or its IP address if reverse DNS
+failed or was not enabled.
+.It Cm protocol
+STOMP protocol version, which can be on of the following:
+.Bl -bullet -compact
+.It
+{'STOMP', 0}
+.It
+{'STOMP', 1}
+.It
+{'STOMP', 2}
+.El
+.It Cm channels
+Number of channels using the connection.
+.It Cm channel_max
+Maximum number of channels on this connection.
+.It Cm frame_max
+Maximum frame size (bytes).
+.It Cm client_properties
+Informational properties transmitted by the client during connection
+.It Cm ssl
+Boolean indicating whether the connection is secured with SSL.
+.It Cm ssl_protocol
+TLS protocol (e.g.\&
+.Qq tlsv1 ) .
+.It Cm ssl_key_exchange
+TLS key exchange algorithm (e.g.\&
+.Qq rsa ) .
+.It Cm ssl_cipher
+TLS cipher algorithm (e.g.\&
+.Qq aes_256_cbc ) .
+.It Cm ssl_hash
+SSL hash function (e.g.\&
+.Qq sha ) .
+.El
+.El
+.\" ------------------------------------------------------------------
+.\" ## Management Agent
+.\" ------------------------------------------------------------------
+.Ss Management agent plugin
+.Bl -tag -width Ds
+.It Cm reset_stats_db Op Fl -all
+Reset management stats database for the RabbitMQ node.
+.Bl -tag -width Ds
+.It Fl -all
+Reset stats database for all nodes in the cluster.
+.El
+.El
+.\" ------------------------------------------------------------------------------------------------
+.Sh SEE ALSO
+.\" ------------------------------------------------------------------------------------------------
+.Xr rabbitmq-diagnostics 8 ,
+.Xr rabbitmq-plugins 8 ,
+.Xr rabbitmq-server 8 ,
+.Xr rabbitmq-queues 8 ,
+.Xr rabbitmq-upgrade 8 ,
+.Xr rabbitmq-service 8 ,
+.Xr rabbitmq-env.conf 5 ,
+.Xr rabbitmq-echopid 8
+.\" ------------------------------------------------------------------------------------------------
+.Sh AUTHOR
+.\" ------------------------------------------------------------------------------------------------
+.An The RabbitMQ Team Aq Mt info@rabbitmq.com
diff --git a/deps/rabbit/docs/set_rabbitmq_policy.sh.example b/deps/rabbit/docs/set_rabbitmq_policy.sh.example
new file mode 100644
index 0000000000..f46e901ad5
--- /dev/null
+++ b/deps/rabbit/docs/set_rabbitmq_policy.sh.example
@@ -0,0 +1,4 @@
+# This script is called by rabbitmq-server-ha.ocf during RabbitMQ
+# cluster start up. It is a convenient place to set your cluster
+# policy here, for example:
+# ${OCF_RESKEY_ctl} set_policy ha-all "." '{"ha-mode":"all", "ha-sync-mode":"automatic"}' --apply-to all --priority 0
diff --git a/deps/rabbit/erlang.mk b/deps/rabbit/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbit/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbit/include/amqqueue.hrl b/deps/rabbit/include/amqqueue.hrl
new file mode 100644
index 0000000000..097f1dfa0c
--- /dev/null
+++ b/deps/rabbit/include/amqqueue.hrl
@@ -0,0 +1,132 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("amqqueue_v1.hrl").
+-include("amqqueue_v2.hrl").
+
+-define(is_amqqueue(Q),
+ (?is_amqqueue_v2(Q) orelse
+ ?is_amqqueue_v1(Q))).
+
+-define(amqqueue_is_auto_delete(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_auto_delete(Q) =:= true) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_auto_delete(Q) =:= true))).
+
+-define(amqqueue_is_durable(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_durable(Q) =:= true) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_durable(Q) =:= true))).
+
+-define(amqqueue_exclusive_owner_is(Q, Owner),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_exclusive_owner(Q) =:= Owner) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_exclusive_owner(Q) =:= Owner))).
+
+-define(amqqueue_exclusive_owner_is_pid(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ is_pid(?amqqueue_v2_field_exclusive_owner(Q))) orelse
+ (?is_amqqueue_v1(Q) andalso
+ is_pid(?amqqueue_v1_field_exclusive_owner(Q))))).
+
+-define(amqqueue_state_is(Q, State),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_state(Q) =:= State) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_state(Q) =:= State))).
+
+-define(amqqueue_v1_type, rabbit_classic_queue).
+
+-define(amqqueue_is_classic(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_type(Q) =:= rabbit_classic_queue) orelse
+ ?is_amqqueue_v1(Q))).
+
+-define(amqqueue_is_quorum(Q),
+ (?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_type(Q) =:= rabbit_quorum_queue) orelse
+ false).
+
+-define(amqqueue_is_stream(Q),
+ (?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_type(Q) =:= rabbit_stream_queue) orelse
+ false).
+
+-define(amqqueue_has_valid_pid(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ is_pid(?amqqueue_v2_field_pid(Q))) orelse
+ (?is_amqqueue_v1(Q) andalso
+ is_pid(?amqqueue_v1_field_pid(Q))))).
+
+-define(amqqueue_pid_runs_on_local_node(Q),
+ ((?is_amqqueue_v2(Q) andalso
+ node(?amqqueue_v2_field_pid(Q)) =:= node()) orelse
+ (?is_amqqueue_v1(Q) andalso
+ node(?amqqueue_v1_field_pid(Q)) =:= node()))).
+
+-define(amqqueue_pid_equals(Q, Pid),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_field_pid(Q) =:= Pid) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_field_pid(Q) =:= Pid))).
+
+-define(amqqueue_pids_are_equal(Q0, Q1),
+ ((?is_amqqueue_v2(Q0) andalso ?is_amqqueue_v2(Q1) andalso
+ ?amqqueue_v2_field_pid(Q0) =:= ?amqqueue_v2_field_pid(Q1)) orelse
+ (?is_amqqueue_v1(Q0) andalso ?is_amqqueue_v1(Q1) andalso
+ ?amqqueue_v1_field_pid(Q0) =:= ?amqqueue_v1_field_pid(Q1)))).
+
+-define(amqqueue_field_name(Q),
+ case ?is_amqqueue_v2(Q) of
+ true -> ?amqqueue_v2_field_name(Q);
+ false -> case ?is_amqqueue_v1(Q) of
+ true -> ?amqqueue_v1_field_name(Q)
+ end
+ end).
+
+-define(amqqueue_field_pid(Q),
+ case ?is_amqqueue_v2(Q) of
+ true -> ?amqqueue_v2_field_pid(Q);
+ false -> case ?is_amqqueue_v1(Q) of
+ true -> ?amqqueue_v1_field_pid(Q)
+ end
+ end).
+
+-define(amqqueue_v1_vhost(Q), element(2, ?amqqueue_v1_field_name(Q))).
+-define(amqqueue_v2_vhost(Q), element(2, ?amqqueue_v2_field_name(Q))).
+
+-define(amqqueue_vhost_equals(Q, VHost),
+ ((?is_amqqueue_v2(Q) andalso
+ ?amqqueue_v2_vhost(Q) =:= VHost) orelse
+ (?is_amqqueue_v1(Q) andalso
+ ?amqqueue_v1_vhost(Q) =:= VHost))).
+
+-ifdef(DEBUG_QUORUM_QUEUE_FF).
+-define(enable_quorum_queue_if_debug,
+ begin
+ rabbit_log:info(
+ "---- ENABLING quorum_queue as part of "
+ "?try_mnesia_tx_or_upgrade_amqqueue_and_retry() ----"),
+ ok = rabbit_feature_flags:enable(quorum_queue)
+ end).
+-else.
+-define(enable_quorum_queue_if_debug, noop).
+-endif.
+
+-define(try_mnesia_tx_or_upgrade_amqqueue_and_retry(Expr1, Expr2),
+ try
+ ?enable_quorum_queue_if_debug,
+ Expr1
+ catch
+ throw:{error, {bad_type, T}} when ?is_amqqueue(T) ->
+ Expr2;
+ throw:{aborted, {bad_type, T}} when ?is_amqqueue(T) ->
+ Expr2
+ end).
diff --git a/deps/rabbit/include/amqqueue_v1.hrl b/deps/rabbit/include/amqqueue_v1.hrl
new file mode 100644
index 0000000000..04b2d72850
--- /dev/null
+++ b/deps/rabbit/include/amqqueue_v1.hrl
@@ -0,0 +1,20 @@
+-define(is_amqqueue_v1(Q), is_record(Q, amqqueue, 19)).
+
+-define(amqqueue_v1_field_name(Q), element(2, Q)).
+-define(amqqueue_v1_field_durable(Q), element(3, Q)).
+-define(amqqueue_v1_field_auto_delete(Q), element(4, Q)).
+-define(amqqueue_v1_field_exclusive_owner(Q), element(5, Q)).
+-define(amqqueue_v1_field_arguments(Q), element(6, Q)).
+-define(amqqueue_v1_field_pid(Q), element(7, Q)).
+-define(amqqueue_v1_field_slave_pids(Q), element(8, Q)).
+-define(amqqueue_v1_field_sync_slave_pids(Q), element(9, Q)).
+-define(amqqueue_v1_field_recoverable_slaves(Q), element(10, Q)).
+-define(amqqueue_v1_field_policy(Q), element(11, Q)).
+-define(amqqueue_v1_field_operator_policy(Q), element(12, Q)).
+-define(amqqueue_v1_field_gm_pids(Q), element(13, Q)).
+-define(amqqueue_v1_field_decorators(Q), element(14, Q)).
+-define(amqqueue_v1_field_state(Q), element(15, Q)).
+-define(amqqueue_v1_field_policy_version(Q), element(16, Q)).
+-define(amqqueue_v1_field_slave_pids_pending_shutdown(Q), element(17, Q)).
+-define(amqqueue_v1_field_vhost(Q), element(18, Q)).
+-define(amqqueue_v1_field_options(Q), element(19, Q)).
diff --git a/deps/rabbit/include/amqqueue_v2.hrl b/deps/rabbit/include/amqqueue_v2.hrl
new file mode 100644
index 0000000000..c79a3b7366
--- /dev/null
+++ b/deps/rabbit/include/amqqueue_v2.hrl
@@ -0,0 +1,22 @@
+-define(is_amqqueue_v2(Q), is_record(Q, amqqueue, 21)).
+
+-define(amqqueue_v2_field_name(Q), element(2, Q)).
+-define(amqqueue_v2_field_durable(Q), element(3, Q)).
+-define(amqqueue_v2_field_auto_delete(Q), element(4, Q)).
+-define(amqqueue_v2_field_exclusive_owner(Q), element(5, Q)).
+-define(amqqueue_v2_field_arguments(Q), element(6, Q)).
+-define(amqqueue_v2_field_pid(Q), element(7, Q)).
+-define(amqqueue_v2_field_slave_pids(Q), element(8, Q)).
+-define(amqqueue_v2_field_sync_slave_pids(Q), element(9, Q)).
+-define(amqqueue_v2_field_recoverable_slaves(Q), element(10, Q)).
+-define(amqqueue_v2_field_policy(Q), element(11, Q)).
+-define(amqqueue_v2_field_operator_policy(Q), element(12, Q)).
+-define(amqqueue_v2_field_gm_pids(Q), element(13, Q)).
+-define(amqqueue_v2_field_decorators(Q), element(14, Q)).
+-define(amqqueue_v2_field_state(Q), element(15, Q)).
+-define(amqqueue_v2_field_policy_version(Q), element(16, Q)).
+-define(amqqueue_v2_field_slave_pids_pending_shutdown(Q), element(17, Q)).
+-define(amqqueue_v2_field_vhost(Q), element(18, Q)).
+-define(amqqueue_v2_field_options(Q), element(19, Q)).
+-define(amqqueue_v2_field_type(Q), element(20, Q)).
+-define(amqqueue_v2_field_type_state(Q), element(21, Q)).
diff --git a/deps/rabbit/include/gm_specs.hrl b/deps/rabbit/include/gm_specs.hrl
new file mode 100644
index 0000000000..2a16c862c4
--- /dev/null
+++ b/deps/rabbit/include/gm_specs.hrl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-type callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}.
+-type args() :: any().
+-type members() :: [pid()].
+
+-spec joined(args(), members()) -> callback_result().
+-spec members_changed(args(), members(),members()) -> callback_result().
+-spec handle_msg(args(), pid(), any()) -> callback_result().
+-spec handle_terminate(args(), term()) -> any().
diff --git a/deps/rabbit/include/vhost.hrl b/deps/rabbit/include/vhost.hrl
new file mode 100644
index 0000000000..d3abc0dd2a
--- /dev/null
+++ b/deps/rabbit/include/vhost.hrl
@@ -0,0 +1,6 @@
+-include("vhost_v1.hrl").
+-include("vhost_v2.hrl").
+
+-define(is_vhost(V),
+ (?is_vhost_v2(V) orelse
+ ?is_vhost_v1(V))).
diff --git a/deps/rabbit/include/vhost_v1.hrl b/deps/rabbit/include/vhost_v1.hrl
new file mode 100644
index 0000000000..185739c6be
--- /dev/null
+++ b/deps/rabbit/include/vhost_v1.hrl
@@ -0,0 +1,4 @@
+-define(is_vhost_v1(V), is_record(V, vhost, 3)).
+
+-define(vhost_v1_field_name(V), element(2, V)).
+-define(vhost_v1_field_limits(V), element(3, V)).
diff --git a/deps/rabbit/include/vhost_v2.hrl b/deps/rabbit/include/vhost_v2.hrl
new file mode 100644
index 0000000000..9345e8b206
--- /dev/null
+++ b/deps/rabbit/include/vhost_v2.hrl
@@ -0,0 +1,5 @@
+-define(is_vhost_v2(V), is_record(V, vhost, 4)).
+
+-define(vhost_v2_field_name(Q), element(2, Q)).
+-define(vhost_v2_field_limits(Q), element(3, Q)).
+-define(vhost_v2_field_metadata(Q), element(4, Q)).
diff --git a/deps/rabbit/priv/schema/.gitignore b/deps/rabbit/priv/schema/.gitignore
new file mode 100644
index 0000000000..68e5b59a44
--- /dev/null
+++ b/deps/rabbit/priv/schema/.gitignore
@@ -0,0 +1,4 @@
+# plugin schemas are extracted
+# into this directory: this is a Cuttlefish
+# requirement. So we ignore them.
+rabbitmq_*.schema
diff --git a/deps/rabbit/priv/schema/rabbit.schema b/deps/rabbit/priv/schema/rabbit.schema
new file mode 100644
index 0000000000..518403c20d
--- /dev/null
+++ b/deps/rabbit/priv/schema/rabbit.schema
@@ -0,0 +1,1791 @@
+% vim:ft=erlang:
+% ==============================
+% Rabbit app section
+% ==============================
+
+%%
+%% Network Connectivity
+%% ====================
+%%
+
+%% By default, RabbitMQ will listen on all interfaces, using
+%% the standard (reserved) AMQP port.
+%%
+%% {tcp_listeners, [5672]},
+%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
+%% For example, to listen only on localhost for both IPv4 and IPv6:
+%%
+%% {tcp_listeners, [{"127.0.0.1", 5672},
+%% {"[::1]", 5672}]},
+
+{mapping, "listeners.tcp", "rabbit.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbit.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% TLS listeners are configured in the same fashion as TCP listeners,
+%% including the option to control the choice of interface.
+%%
+%% {ssl_listeners, [5671]},
+
+{mapping, "listeners.ssl", "rabbit.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbit.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and TLS listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 1},
+
+{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+
+{mapping, "socket_writer.gc_threshold", "rabbit.writer_gc_threshold", [
+ {datatype, [{atom, off}, integer]}
+]}.
+
+{translation, "rabbit.writer_gc_threshold",
+ fun(Conf) ->
+ case cuttlefish:conf_get("socket_writer.gc_threshold", Conf, undefined) of
+ %% missing from the config
+ undefined -> cuttlefish:unset();
+ %% explicitly disabled
+ off -> undefined;
+ Int when is_integer(Int) andalso Int > 0 ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% Maximum time for 0-9-1 handshake (after socket connection
+%% and TLS handshake), in milliseconds.
+%%
+%% {handshake_timeout, 10000},
+
+{mapping, "handshake_timeout", "rabbit.handshake_timeout", [
+ {datatype, [{atom, infinity}, integer]}
+]}.
+
+%% Set to 'true' to perform reverse DNS lookups when accepting a
+%% connection. Hostnames will then be shown instead of IP addresses
+%% in rabbitmqctl and the management plugin.
+%%
+%% {reverse_dns_lookups, true},
+
+{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "erlang.K", "vm_args.+K", [
+ {default, "true"},
+ {level, advanced}
+]}.
+
+%%
+%% Definition import
+%%
+
+%% Load definitions from a JSON file or directory of files. See
+%% https://www.rabbitmq.com/management.html#load-definitions
+%%
+%% {load_definitions, "/path/to/schema.json"},
+%% {load_definitions, "/path/to/schemas"},
+{mapping, "load_definitions", "rabbit.load_definitions",
+ [{datatype, string},
+ {validators, ["file_accessible"]}]}.
+
+%%
+%% Security / AAA
+%% ==============
+%%
+
+%% The default "guest" user is only permitted to access the server
+%% via a loopback interface (e.g. localhost).
+%% {loopback_users, [<<"guest">>]},
+%%
+%% Uncomment the following line if you want to allow access to the
+%% guest user from anywhere on the network.
+%% {loopback_users, []},
+
+{mapping, "loopback_users", "rabbit.loopback_users", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "loopback_users.$user", "rabbit.loopback_users", [
+ {datatype, atom}
+]}.
+
+{translation, "rabbit.loopback_users",
+fun(Conf) ->
+ None = cuttlefish:conf_get("loopback_users", Conf, undefined),
+ case None of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf),
+ [ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ]
+ end
+end}.
+
+%% TLS options.
+%% See https://www.rabbitmq.com/ssl.html for full documentation.
+%%
+%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
+%% {certfile, "/path/to/server/cert.pem"},
+%% {keyfile, "/path/to/server/key.pem"},
+%% {verify, verify_peer},
+%% {fail_if_no_peer_cert, false}]},
+
+{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack",
+[{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options", "rabbit.ssl_options", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbit.ssl_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("ssl_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid ssl_options")
+ end
+end}.
+
+{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "ssl_options.cert", "rabbit.ssl_options.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf))
+end}.
+
+{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "ssl_options.depth", "rabbit.ssl_options.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "ssl_options.dh", "rabbit.ssl_options.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf))
+end}.
+
+{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.honor_ecc_order", "rabbit.ssl_options.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.password", "rabbit.ssl_options.password",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.ssl_options.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "ssl_options.ciphers.$cipher", "rabbit.ssl_options.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+%% ===========================================================================
+
+%% Choose the available SASL mechanism(s) to expose.
+%% The two default (built in) mechanisms are 'PLAIN' and
+%% 'AMQPLAIN'. Additional mechanisms can be added via
+%% plugins.
+%%
+%% See https://www.rabbitmq.com/authentication.html for more details.
+%%
+%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+
+{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [
+ {datatype, atom}]}.
+
+{translation, "rabbit.auth_mechanisms",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+
+%% Select an authentication backend to use. RabbitMQ provides an
+%% internal backend in the core.
+%%
+%% {auth_backends, [rabbit_auth_backend_internal]},
+
+{translation, "rabbit.auth_backends",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf),
+ BackendModule = fun
+ (internal) -> rabbit_auth_backend_internal;
+ (ldap) -> rabbit_auth_backend_ldap;
+ (http) -> rabbit_auth_backend_http;
+ (cache) -> rabbit_auth_backend_cache;
+ (amqp) -> rabbit_auth_backend_amqp;
+ (dummy) -> rabbit_auth_backend_dummy;
+ (Other) when is_atom(Other) -> Other;
+ (_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
+ end,
+ AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings],
+ AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings],
+ AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings],
+ Backends = lists:foldl(
+ fun({NumStr, {Type, V}}, Acc) ->
+ Num = case catch list_to_integer(NumStr) of
+ N when is_integer(N) -> N;
+ Err ->
+ cuttlefish:invalid(
+ iolist_to_binary(io_lib:format(
+ "Auth backend position in the chain should be an integer ~p", [Err])))
+ end,
+ NewVal = case dict:find(Num, Acc) of
+ {ok, {AuthN, AuthZ}} ->
+ case {Type, AuthN, AuthZ} of
+ {authn, undefined, _} ->
+ {V, AuthZ};
+ {authz, _, undefined} ->
+ {AuthN, V};
+ _ ->
+ cuttlefish:invalid(
+ iolist_to_binary(
+ io_lib:format(
+ "Auth backend already defined for the ~pth ~p backend",
+ [Num, Type])))
+ end;
+ error ->
+ case Type of
+ authn -> {V, undefined};
+ authz -> {undefined, V};
+ default -> {V, V}
+ end
+ end,
+ dict:store(Num, NewVal, Acc)
+ end,
+ dict:new(),
+ AuthBackends ++ AuthNBackends ++ AuthZBackends),
+ lists:map(
+ fun
+ ({Num, {undefined, AuthZ}}) ->
+ cuttlefish:warn(
+ io_lib:format(
+ "Auth backend undefined for the ~pth authz backend. Using ~p",
+ [Num, AuthZ])),
+ {AuthZ, AuthZ};
+ ({Num, {AuthN, undefined}}) ->
+ cuttlefish:warn(
+ io_lib:format(
+ "Authz backend undefined for the ~pth authn backend. Using ~p",
+ [Num, AuthN])),
+ {AuthN, AuthN};
+ ({_Num, {Auth, Auth}}) -> Auth;
+ ({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ}
+ end,
+ lists:keysort(1, dict:to_list(Backends)))
+end}.
+
+{mapping, "auth_backends.$num", "rabbit.auth_backends", [
+ {datatype, atom}
+]}.
+
+{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[
+ {datatype, atom}
+]}.
+
+{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[
+ {datatype, atom}
+]}.
+
+%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
+%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
+%% configuration section later in this file and the README in
+%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
+%% details.
+%%
+%% To use the peer certificate's Common Name (CN) field
+%% instead of its Distinguished Name (DN) for username extraction.
+%%
+%% {ssl_cert_login_from, common_name},
+%%
+%% To use the first SAN value of type DNS:
+%%
+%% {ssl_cert_login_from, subject_alternative_name},
+%% {ssl_cert_login_san_type, dns},
+%% {ssl_cert_login_san_index, 0}
+
+{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [
+ {datatype, {enum, [distinguished_name, common_name, subject_alternative_name, subject_alt_name]}}
+]}.
+
+{mapping, "ssl_cert_login_san_type", "rabbit.ssl_cert_login_san_type", [
+ {datatype, {enum, [dns, ip, email, uri, other_name]}}
+]}.
+
+{mapping, "ssl_cert_login_san_index", "rabbit.ssl_cert_login_san_index", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% TLS handshake timeout, in milliseconds.
+%%
+%% {ssl_handshake_timeout, 5000},
+
+{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [
+ {datatype, integer}
+]}.
+
+%% Cluster name
+
+{mapping, "cluster_name", "rabbit.cluster_name", [
+ {datatype, string}
+]}.
+
+%% Default worker process pool size. Used to limit maximum concurrency rate
+%% of certain operations, e.g. queue initialisation and recovery on node boot.
+
+{mapping, "default_worker_pool_size", "rabbit.default_worker_pool_size", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% Password hashing implementation. Will only affect newly
+%% created users. To recalculate hash for an existing user
+%% it's necessary to update her password.
+%%
+%% When importing definitions exported from versions earlier
+%% than 3.6.0, it is possible to go back to MD5 (only do this
+%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
+%%
+%% To use SHA-512, set to rabbit_password_hashing_sha512.
+%%
+%% {password_hashing_module, rabbit_password_hashing_sha256},
+
+{mapping, "password_hashing_module", "rabbit.password_hashing_module", [
+ {datatype, atom}
+]}.
+
+%% Credential validation.
+%%
+
+{mapping, "credential_validator.validation_backend", "rabbit.credential_validator.validation_backend", [
+ {datatype, atom}
+]}.
+
+{mapping, "credential_validator.min_length", "rabbit.credential_validator.min_length", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{mapping, "credential_validator.regexp", "rabbit.credential_validator.regexp", [
+ {datatype, string}
+]}.
+
+
+
+%%
+%% Default User / VHost
+%% ====================
+%%
+
+%% On first start RabbitMQ will create a vhost and a user. These
+%% config items control what gets created. See
+%% https://www.rabbitmq.com/access-control.html for further
+%% information about vhosts and access control.
+%%
+%% {default_vhost, <<"/">>},
+%% {default_user, <<"guest">>},
+%% {default_pass, <<"guest">>},
+%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+
+{mapping, "default_vhost", "rabbit.default_vhost", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_vhost", Conf))
+end}.
+
+{mapping, "default_user", "rabbit.default_user", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_user",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_user", Conf))
+end}.
+
+{mapping, "default_pass", "rabbit.default_pass", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_pass",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_pass", Conf))
+end}.
+
+{mapping, "default_permissions.configure", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{mapping, "default_permissions.read", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{mapping, "default_permissions.write", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_permissions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf),
+ Configure = proplists:get_value(["default_permissions", "configure"], Settings),
+ Read = proplists:get_value(["default_permissions", "read"], Settings),
+ Write = proplists:get_value(["default_permissions", "write"], Settings),
+ [list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)]
+end}.
+
+%% Tags for default user
+%%
+%% For more details about tags, see the documentation for the
+%% Management Plugin at https://www.rabbitmq.com/management.html.
+%%
+%% {default_user_tags, [administrator]},
+
+{mapping, "default_user_tags.$tag", "rabbit.default_user_tags",
+ [{datatype, {enum, [true, false]}}]}.
+
+{translation, "rabbit.default_user_tags",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf),
+ [ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ]
+end}.
+
+%%
+%% Additional network and protocol related configuration
+%% =====================================================
+%%
+
+%% Set the default connection heartbeat timeout (in seconds).
+%%
+%% {heartbeat, 600},
+
+{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}.
+
+%% Set the max permissible size of an AMQP 0-9-1 frame (in bytes).
+%%
+%% {frame_max, 131072},
+
+{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}.
+
+%% Set the max frame size the server will accept before connection
+%% tuning starts
+%%
+%% {initial_frame_max, 4096},
+
+{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}.
+
+%% Set the max permissible number of channels per connection.
+%% 0 means "no limit".
+%%
+%% {channel_max, 0},
+
+{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}.
+
+%% Set the max permissible number of client connections per node.
+%% `infinity` means "no limit".
+%%
+%% {connection_max, infinity},
+
+{mapping, "connection_max", "rabbit.connection_max",
+ [{datatype, [{atom, infinity}, integer]}]}.
+
+{translation, "rabbit.connection_max",
+ fun(Conf) ->
+ case cuttlefish:conf_get("connection_max", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ infinity -> infinity;
+ Val when is_integer(Val) -> Val;
+ _ -> cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+
+{mapping, "max_message_size", "rabbit.max_message_size",
+ [{datatype, integer}, {validators, ["less_then_512MB"]}]}.
+
+%% Customising Socket Options.
+%%
+%% See (https://www.erlang.org/doc/man/inet.html#setopts-2) for
+%% further documentation.
+%%
+%% {tcp_listen_options, [{backlog, 128},
+%% {nodelay, true},
+%% {exit_on_close, false}]},
+
+%% TCP listener section ======================================================
+
+{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbit.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("tcp_listen_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid tcp_listen_options")
+ end
+end}.
+
+{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.linger.on", "rabbit.tcp_listen_options.linger",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.linger.timeout", "rabbit.tcp_listen_options.linger",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{translation, "rabbit.tcp_listen_options.linger",
+fun(Conf) ->
+ LingerOn = cuttlefish:conf_get("tcp_listen_options.linger.on", Conf, false),
+ LingerTimeout = cuttlefish:conf_get("tcp_listen_options.linger.timeout", Conf, 0),
+ {LingerOn, LingerTimeout}
+end}.
+
+
+%% ==========================================================================
+
+%%
+%% Resource Limits & Flow Control
+%% ==============================
+%%
+%% See https://www.rabbitmq.com/memory.html for full details.
+
+%% Memory-based Flow Control threshold.
+%%
+%% {vm_memory_high_watermark, 0.4},
+
+%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
+%%
+%% {vm_memory_high_watermark, {absolute, 1073741824}},
+%%
+%% Or you can set absolute value using memory unit symbols (with RabbitMQ 3.6.0+).
+%%
+%% {vm_memory_high_watermark, {absolute, "1024M"}},
+%%
+%% Supported unit symbols:
+%%
+%% k, kiB: kibibytes (2^10 - 1,024 bytes)
+%% M, MiB: mebibytes (2^20 - 1,048,576 bytes)
+%% G, GiB: gibibytes (2^30 - 1,073,741,824 bytes)
+%% kB: kilobytes (10^3 - 1,000 bytes)
+%% MB: megabytes (10^6 - 1,000,000 bytes)
+%% GB: gigabytes (10^9 - 1,000,000,000 bytes)
+
+{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [
+ {datatype, float}]}.
+
+{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [
+ {datatype, [integer, string]}]}.
+
+
+{translation, "rabbit.vm_memory_high_watermark",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf),
+ Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings),
+ Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings),
+ case {Absolute, Relative} of
+ {undefined, undefined} -> cuttlefish:invalid("No vm watermark defined");
+ {_, undefined} -> {absolute, Absolute};
+ _ -> Relative
+ end
+end}.
+
+%% Fraction of the high watermark limit at which queues start to
+%% page message out to disc in order to free up memory.
+%%
+%% Values greater than 0.9 can be dangerous and should be used carefully.
+%%
+%% {vm_memory_high_watermark_paging_ratio, 0.5},
+
+{mapping, "vm_memory_high_watermark_paging_ratio",
+ "rabbit.vm_memory_high_watermark_paging_ratio",
+ [{datatype, float}, {validators, ["less_than_1"]}]}.
+
+%% Interval (in milliseconds) at which we perform the check of the memory
+%% levels against the watermarks.
+%%
+%% {memory_monitor_interval, 2500},
+
+{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval",
+ [{datatype, integer}]}.
+
+%% Selects Erlang VM memory consumption calculation strategy.
+%% Can be `allocated`, `rss` or `legacy` (aliased as `erlang`).
+%%
+%% {vm_memory_calculation_strategy, rss},
+
+{mapping, "vm_memory_calculation_strategy", "rabbit.vm_memory_calculation_strategy",
+ [{datatype, {enum, [rss, erlang, allocated, legacy]}}]}.
+
+%% The total memory available can be calculated from the OS resources
+%% (default option) or provided as a configuration parameter
+{mapping, "total_memory_available_override_value", "rabbit.total_memory_available_override_value", [
+ {datatype, [integer, string]}]}.
+
+%% Set disk free limit (in bytes). Once free disk space reaches this
+%% lower bound, a disk alarm will be set - see the documentation
+%% listed above for more details.
+%%
+%% {disk_free_limit, 50000000},
+%%
+%% Or you can set it using memory units (same as in vm_memory_high_watermark)
+%% with RabbitMQ 3.6.0+.
+%% {disk_free_limit, "50MB"},
+%% {disk_free_limit, "50000kB"},
+%% {disk_free_limit, "2GB"},
+
+%% Alternatively, we can set a limit relative to total available RAM.
+%%
+%% Values lower than 1.0 can be dangerous and should be used carefully.
+%% {disk_free_limit, {mem_relative, 2.0}},
+
+{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [
+ {datatype, float}]}.
+
+{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [
+ {datatype, [integer, string]}]}.
+
+
+{translation, "rabbit.disk_free_limit",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf),
+ Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings),
+ Relative = proplists:get_value(["disk_free_limit", "relative"], Settings),
+ case {Absolute, Relative} of
+ {undefined, undefined} -> cuttlefish:invalid("No disk limit defined");
+ {_, undefined} -> Absolute;
+ _ -> {mem_relative, Relative}
+ end
+end}.
+
+%%
+%% Clustering
+%% =====================
+%%
+
+%% How to respond to cluster partitions.
+%% See https://www.rabbitmq.com/partitions.html for further details.
+%%
+%% {cluster_partition_handling, ignore},
+
+{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling",
+ [{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}.
+
+{mapping, "cluster_partition_handling.pause_if_all_down.recover",
+ "rabbit.cluster_partition_handling",
+ [{datatype, {enum, [ignore, autoheal]}}]}.
+
+{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name",
+ "rabbit.cluster_partition_handling",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_partition_handling",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_partition_handling", Conf) of
+ pause_if_all_down ->
+ PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix(
+ "cluster_partition_handling.pause_if_all_down.nodes",
+ Conf),
+ case PauseIfAllDownNodes of
+ [] ->
+ cuttlefish:invalid("Nodes required for pause_if_all_down");
+ _ ->
+ Nodes = [ V || {K,V} <- PauseIfAllDownNodes ],
+ PauseIfAllDownRecover = cuttlefish:conf_get(
+ "cluster_partition_handling.pause_if_all_down.recover",
+ Conf),
+ case PauseIfAllDownRecover of
+ Recover when Recover == ignore; Recover == autoheal ->
+ {pause_if_all_down, Nodes, Recover};
+ Invalid ->
+ cuttlefish:invalid("Recover strategy required for pause_if_all_down")
+ end
+ end;
+ Other -> Other
+ end
+end}.
+
+%% Number of delegate processes to use for intra-cluster
+%% communication. On a machine which has a very large number of cores
+%% and is also part of a cluster, you may wish to increase this value.
+%%
+
+{mapping, "delegate_count", "rabbit.delegate_count", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% Mirror sync batch size, in messages. Increasing this will speed
+%% up syncing but total batch size in bytes must not exceed 2 GiB.
+%% Available in RabbitMQ 3.6.0 or later.
+%%
+%% {mirroring_sync_batch_size, 4096},
+
+{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size",
+ [{datatype, bytesize}, {validators, ["size_less_than_2G"]}]}.
+
+%% Peer discovery backend used by cluster formation.
+%%
+
+{mapping, "cluster_formation.peer_discovery_backend", "rabbit.cluster_formation.peer_discovery_backend", [
+ {datatype, atom}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_backend",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, rabbit_peer_discovery_classic_config) of
+ classic_config -> rabbit_peer_discovery_classic_config;
+ classic -> rabbit_peer_discovery_classic_config;
+ config -> rabbit_peer_discovery_classic_config;
+ dns -> rabbit_peer_discovery_dns;
+ aws -> rabbit_peer_discovery_aws;
+ consul -> rabbit_peer_discovery_consul;
+ etcd -> rabbit_peer_discovery_etcd;
+ kubernetes -> rabbit_peer_discovery_k8s;
+ k8s -> rabbit_peer_discovery_k8s;
+ Module -> Module
+ end
+end}.
+
+%% Own node type, used by cluster formation.
+%%
+
+{mapping, "cluster_formation.node_type", "rabbit.cluster_formation.node_type", [
+ {datatype, {enum, [disc, disk, ram]}}
+]}.
+
+{translation, "rabbit.cluster_formation.node_type",
+fun(Conf) ->
+ %% if peer discovery backend isn't configured, don't generate
+ %% node type
+ case cuttlefish:conf_get("cluster_formation.peer_discovery_backend", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ _Backend ->
+ case cuttlefish:conf_get("cluster_formation.node_type", Conf) of
+ disc -> disc;
+ %% always cast to `disc`
+ disk -> disc;
+ ram -> ram;
+ _Other -> disc
+ end
+ end
+end}.
+
+%% Cluster formation: Randomized startup delay
+
+{mapping, "cluster_formation.randomized_startup_delay_range.min", "rabbit.cluster_formation.randomized_startup_delay_range",
+ [{datatype, integer}]}.
+{mapping, "cluster_formation.randomized_startup_delay_range.max", "rabbit.cluster_formation.randomized_startup_delay_range",
+ [{datatype, integer}]}.
+
+{translation, "rabbit.cluster_formation.randomized_startup_delay_range",
+fun(Conf) ->
+ Min = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.min", Conf, undefined),
+ Max = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.max", Conf, undefined),
+
+ case {Min, Max} of
+ {undefined, undefined} ->
+ cuttlefish:unset();
+ {undefined, Max} ->
+ %% fallback default
+ {5, Max};
+ {Min, undefined} ->
+ %% fallback default
+ {Min, 60};
+ {Min, Max} ->
+ {Min, Max}
+ end
+end}.
+
+%% Cluster formation: discovery failure retries
+
+{mapping, "cluster_formation.lock_retry_limit", "rabbit.cluster_formation.lock_retry_limit",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+{mapping, "cluster_formation.lock_retry_timeout", "rabbit.cluster_formation.lock_retry_timeout",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+
+{mapping, "cluster_formation.discovery_retry_limit", "rabbit.cluster_formation.discovery_retry_limit",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+{mapping, "cluster_formation.discovery_retry_interval", "rabbit.cluster_formation.discovery_retry_interval",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+
+%% Classic config-driven peer discovery backend.
+%%
+%% Make clustering happen *automatically* at startup - only applied
+%% to nodes that have just been reset or started for the first time.
+%% See https://www.rabbitmq.com/clustering.html#auto-config for
+%% further details.
+%%
+%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
+
+{mapping, "cluster_formation.classic_config.nodes.$node", "rabbit.cluster_nodes",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_nodes",
+fun(Conf) ->
+ Nodes = [V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_formation.classic_config.nodes", Conf)],
+
+ case Nodes of
+ [] -> cuttlefish:unset();
+ Other ->
+ case cuttlefish:conf_get("cluster_formation.node_type", Conf, disc) of
+ disc -> {Other, disc};
+ %% Always cast to `disc`
+ disk -> {Other, disc};
+ ram -> {Other, ram}
+ end
+ end
+end}.
+
+%% DNS (A records and reverse lookups)-based peer discovery.
+%%
+
+{mapping, "cluster_formation.dns.hostname", "rabbit.cluster_formation.peer_discovery_dns.hostname",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_dns.hostname",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.dns.hostname", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> list_to_binary(Value)
+ end
+end}.
+
+
+%% Interval (in milliseconds) at which we send keepalive messages
+%% to other cluster members. Note that this is not the same thing
+%% as net_ticktime; missed keepalive messages will not cause nodes
+%% to be considered down.
+%%
+%% {cluster_keepalive_interval, 10000},
+
+{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval",
+ [{datatype, integer}]}.
+
+%% Queue master locator
+%%
+
+{mapping, "queue_master_locator", "rabbit.queue_master_locator",
+ [{datatype, string}]}.
+
+{translation, "rabbit.queue_master_locator",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf))
+end}.
+
+%%
+%% Statistics Collection
+%% =====================
+%%
+
+%% Set (internal) statistics collection granularity.
+%%
+%% {collect_statistics, none},
+
+{mapping, "collect_statistics", "rabbit.collect_statistics",
+ [{datatype, {enum, [none, coarse, fine]}}]}.
+
+%% Statistics collection interval (in milliseconds). Increasing
+%% this will reduce the load on management database.
+%%
+%% {collect_statistics_interval, 5000},
+
+{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval",
+ [{datatype, integer}]}.
+
+%%
+%% Misc/Advanced Options
+%% =====================
+%%
+%% NB: Change these only if you understand what you are doing!
+%%
+
+%% Explicitly enable/disable hipe compilation.
+%%
+%% {hipe_compile, true},
+%%
+%% DEPRECATED: this is a no-op and is kept only to allow old configs.
+
+{mapping, "hipe_compile", "rabbit.hipe_compile",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Timeout used when waiting for Mnesia tables in a cluster to
+%% become available.
+%%
+%% {mnesia_table_loading_retry_timeout, 30000},
+
+{mapping, "mnesia_table_loading_retry_timeout", "rabbit.mnesia_table_loading_retry_timeout",
+ [{datatype, integer}]}.
+
+%% Retries when waiting for Mnesia tables in the cluster startup. Note that
+%% this setting is not applied to Mnesia upgrades or node deletions.
+%%
+%% {mnesia_table_loading_retry_limit, 10},
+
+{mapping, "mnesia_table_loading_retry_limit", "rabbit.mnesia_table_loading_retry_limit",
+ [{datatype, integer}]}.
+
+{mapping, "message_store_shutdown_timeout", "rabbit.msg_store_shutdown_timeout",
+ [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+ ]}.
+
+%% Size in bytes below which to embed messages in the queue index. See
+%% https://www.rabbitmq.com/persistence-conf.html
+%%
+%% {queue_index_embed_msgs_below, 4096}
+
+{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below",
+ [{datatype, bytesize}]}.
+
+%% Whether or not to enable background GC.
+%%
+%% {background_gc_enabled, true}
+
+{mapping, "background_gc_enabled", "rabbit.background_gc_enabled",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Interval (in milliseconds) at which we run background GC.
+%%
+%% {background_gc_target_interval, 60000}
+
+{mapping, "background_gc_target_interval", "rabbit.background_gc_target_interval",
+ [{datatype, integer}]}.
+
+%% Whether or not to enable proxy protocol support.
+%%
+%% {proxy_protocol, false}
+
+{mapping, "proxy_protocol", "rabbit.proxy_protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Whether to stop the rabbit application if a vhost has
+%% to terminate for any reason.
+
+{mapping, "vhost_restart_strategy", "rabbit.vhost_restart_strategy",
+ [{datatype, {enum, [stop_node, continue, transient, persistent]}}]}.
+
+%% Approximate maximum time a consumer can spend processing a message before
+%% the channel is terminated, in milliseconds. Default is no timeout.
+%%
+%% {consumer_timeout, 10000},
+
+{mapping, "consumer_timeout", "rabbit.consumer_timeout", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+%% Product name & version overrides.
+
+{mapping, "product.name", "rabbit.product_name", [
+ {datatype, string}
+]}.
+{mapping, "product.version", "rabbit.product_version", [
+ {datatype, string}
+]}.
+
+%% Message of the day file.
+%% The content of that file is added to the banners, both logged and
+%% printed.
+
+{mapping, "motd_file", "rabbit.motd_file", [
+ {datatype, string}
+]}.
+
+% ==========================
+% Lager section
+% ==========================
+
+{mapping, "log.dir", "lager.log_root", [
+ {datatype, string},
+ {validators, ["dir_writable"]}]}.
+
+{mapping, "log.console", "rabbit.log.console.enabled", [
+ {datatype, {enum, [true, false]}}
+]}.
+{mapping, "log.console.level", "rabbit.log.console.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+{mapping, "log.exchange", "rabbit.log.exchange.enabled", [
+ {datatype, {enum, [true, false]}}
+]}.
+{mapping, "log.exchange.level", "rabbit.log.exchange.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+{mapping, "log.syslog", "rabbit.log.syslog.enabled", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "log.syslog.level", "rabbit.log.syslog.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+{mapping, "log.syslog.identity", "syslog.app_name", [
+ {datatype, string}
+]}.
+
+{mapping, "log.syslog.facility", "syslog.facility", [
+ {datatype, {enum, [kern, kernel, user, mail, daemon, auth, syslog, lpr,
+ news, uucp, cron, authpriv, ftp, ntp, audit, alert,
+ clock, local0, local1, local2, local3, local4,
+ local5, local6, local7]}}
+]}.
+
+{mapping, "log.syslog.multiline_mode", "syslog.multiline_mode", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "log.syslog.ip", "syslog.dest_host", [
+ {datatype, string},
+ {validators, ["is_ip"]}
+]}.
+
+{mapping, "log.syslog.host", "syslog.dest_host", [
+ {datatype, string}
+]}.
+
+{translation, "syslog.dest_host",
+fun(Conf) ->
+ case cuttlefish:conf_get("log.syslog", Conf) of
+ true ->
+ case cuttlefish:conf_get("log.syslog.ip", Conf, undefined) of
+ undefined ->
+ % If log.syslog.ip is not set, then this must be set
+ cuttlefish:conf_get("log.syslog.host", Conf);
+ IpAddr ->
+ IpAddr
+ end;
+ _ ->
+ cuttlefish:invalid("log.syslog must be set to true to set log.syslog.host or log.syslog.ip")
+ end
+end}.
+
+{mapping, "log.syslog.port", "syslog.dest_port", [
+ {datatype, integer}
+]}.
+
+{mapping, "log.syslog.transport", "syslog.protocol", [
+ {datatype, {enum, [udp, tcp, tls, ssl]}}
+]}.
+{mapping, "log.syslog.protocol", "syslog.protocol", [
+ {datatype, {enum, [rfc3164, rfc5424]}}
+]}.
+{mapping, "log.syslog.ssl_options.verify", "syslog.protocol", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "log.syslog.ssl_options.fail_if_no_peer_cert", "syslog.protocol", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.cacertfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.certfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.cacerts.$name", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.cert", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.client_renegotiation", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.crl_check", "syslog.protocol",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "log.syslog.ssl_options.depth", "syslog.protocol",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "log.syslog.ssl_options.dh", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.dhfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.honor_cipher_order", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.honor_ecc_order", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.key.RSAPrivateKey", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.key.DSAPrivateKey", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.key.PrivateKeyInfo", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.keyfile", "syslog.protocol",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "log.syslog.ssl_options.log_alert", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.password", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.psk_identity", "syslog.protocol",
+ [{datatype, string}]}.
+
+{mapping, "log.syslog.ssl_options.reuse_sessions", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.secure_renegotiate", "syslog.protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "log.syslog.ssl_options.versions.$version", "syslog.protocol",
+ [{datatype, atom}]}.
+
+{translation, "syslog.protocol",
+fun(Conf) ->
+ ParseSslOptions = fun() ->
+ RawSettings = [
+ {verify, cuttlefish:conf_get("log.syslog.ssl_options.verify", Conf, undefined)},
+ {fail_if_no_peer_cert, cuttlefish:conf_get("log.syslog.ssl_options.fail_if_no_peer_cert", Conf, undefined)},
+ {cacertfile, cuttlefish:conf_get("log.syslog.ssl_options.cacertfile", Conf, undefined)},
+ {certfile, cuttlefish:conf_get("log.syslog.ssl_options.certfile", Conf, undefined)},
+ {cert, cuttlefish:conf_get("log.syslog.ssl_options.cert", Conf, undefined)},
+ {client_renegotiation, cuttlefish:conf_get("log.syslog.ssl_options.client_renegotiation", Conf, undefined)},
+ {crl_check, cuttlefish:conf_get("log.syslog.ssl_options.crl_check", Conf, undefined)},
+ {depth, cuttlefish:conf_get("log.syslog.ssl_options.depth", Conf, undefined)},
+ {dh, cuttlefish:conf_get("log.syslog.ssl_options.dh", Conf, undefined)},
+ {dhfile, cuttlefish:conf_get("log.syslog.ssl_options.dhfile", Conf, undefined)},
+ {honor_cipher_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_cipher_order", Conf, undefined)},
+ {honor_ecc_order, cuttlefish:conf_get("log.syslog.ssl_options.honor_ecc_order", Conf, undefined)},
+
+ {keyfile, cuttlefish:conf_get("log.syslog.ssl_options.keyfile", Conf, undefined)},
+ {log_alert, cuttlefish:conf_get("log.syslog.ssl_options.log_alert", Conf, undefined)},
+ {password, cuttlefish:conf_get("log.syslog.ssl_options.password", Conf, undefined)},
+ {psk_identity, cuttlefish:conf_get("log.syslog.ssl_options.psk_identity", Conf, undefined)},
+ {reuse_sessions, cuttlefish:conf_get("log.syslog.ssl_options.reuse_sessions", Conf, undefined)},
+ {secure_renegotiate, cuttlefish:conf_get("log.syslog.ssl_options.secure_renegotiate", Conf, undefined)}
+ ],
+ DefinedSettings = [{K, V} || {K, V} <- RawSettings, V =/= undefined],
+
+ lists:map(
+ fun({K, Val}) when K == dh; K == cert -> {K, list_to_binary(Val)};
+ ({K, Val}) -> {K, Val}
+ end,
+ DefinedSettings) ++
+ [ {K, V}
+ || {K, V} <-
+ [{cacerts, [ list_to_binary(V) || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.cacerts", Conf)]},
+ {versions, [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.versions", Conf) ]},
+ {key, case cuttlefish_variable:filter_by_prefix("log.syslog.ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end}],
+ V =/= undefined,
+ V =/= []]
+ end,
+
+ Proto = cuttlefish:conf_get("log.syslog.protocol", Conf, undefined),
+ Transport = cuttlefish:conf_get("log.syslog.transport", Conf, udp),
+ case Transport of
+ TLS when TLS == tls; TLS == ssl ->
+ case Proto of
+ rfc3164 ->
+ cuttlefish:invalid("Syslog protocol rfc3164 is not compatible with TLS");
+ _ ->
+ {rfc5424, tls, ParseSslOptions()}
+ end;
+ _ when Transport == udp; Transport == tcp ->
+ case Proto of
+ undefined -> {rfc3164, Transport};
+ _ -> {Proto, Transport}
+ end;
+ _ -> cuttlefish:invalid("Invalid syslog transport ~p~n", [Transport])
+ end
+end}.
+
+{mapping, "log.file", "rabbit.log.file.file", [
+ {datatype, [{enum, [false]}, string]}
+]}.
+{mapping, "log.file.level", "rabbit.log.file.level", [
+ {datatype,
+ {enum, ['=debug', debug,
+ info, '!=info',
+ notice, '<=notice',
+ '<warning', warning,
+ error,
+ critical,
+ alert,
+ emergency,
+ none]}}
+]}.
+{mapping, "log.file.rotation.date", "rabbit.log.file.date", [
+ {datatype, string}
+]}.
+{mapping, "log.file.rotation.size", "rabbit.log.file.size", [
+ {datatype, integer}
+]}.
+{mapping, "log.file.rotation.count", "rabbit.log.file.count", [
+ {datatype, integer}
+]}.
+
+%% Log categories
+
+{mapping, "log.connection.level", "rabbit.log.categories.connection.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.connection.file", "rabbit.log.categories.connection.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.channel.level", "rabbit.log.categories.channel.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.channel.file", "rabbit.log.categories.channel.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.mirroring.level", "rabbit.log.categories.mirroring.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.mirroring.file", "rabbit.log.categories.mirroring.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.queue.level", "rabbit.log.categories.queue.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.queue.file", "rabbit.log.categories.queue.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.federation.level", "rabbit.log.categories.federation.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.federation.file", "rabbit.log.categories.federation.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.upgrade.level", "rabbit.log.categories.upgrade.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.upgrade.file", "rabbit.log.categories.upgrade.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.ra.level", "rabbit.log.categories.ra.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+{mapping, "log.ra.file", "rabbit.log.categories.ra.file", [
+ {datatype, string}
+]}.
+
+{mapping, "log.default.level", "rabbit.log.categories.default.level", [
+ {datatype, {enum, [debug, info, notice, warning, error, critical, alert, emergency, none]}}
+]}.
+
+% ==========================
+% Kernel section
+% ==========================
+
+{mapping, "net_ticktime", "kernel.net_ticktime",[
+ {datatype, [integer]},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+% ==========================
+% sysmon_handler section
+% ==========================
+
+%% @doc The threshold at which to warn about the number of processes
+%% that are overly busy. Processes with large heaps or that take a
+%% long time to garbage collect will count toward this threshold.
+{mapping, "sysmon_handler.thresholds.busy_processes", "sysmon_handler.process_limit", [
+ {datatype, integer},
+ hidden
+]}.
+
+{translation, "sysmon_handler.process_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.thresholds.busy_processes", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc The threshold at which to warn about the number of ports that
+%% are overly busy. Ports with full input buffers count toward this
+%% threshold.
+{mapping, "sysmon_handler.thresholds.busy_ports", "sysmon_handler.port_limit", [
+ {datatype, integer},
+ hidden
+]}.
+
+{translation, "sysmon_handler.port_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.thresholds.busy_ports", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc A process will become busy when it exceeds this amount of time
+%% doing garbage collection.
+%% @see sysmon_handler.thresholds.busy_processes
+{mapping, "sysmon_handler.triggers.process.garbage_collection", "sysmon_handler.gc_ms_limit", [
+ {datatype, [{atom, off},
+ {duration, ms}]},
+ hidden
+]}.
+
+{translation, "sysmon_handler.gc_ms_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.process.garbage_collection", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ off ->
+ 0;
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc A process will become busy when it exceeds this amount of time
+%% during a single process scheduling & execution cycle.
+{mapping, "sysmon_handler.triggers.process.long_scheduled_execution", "sysmon_handler.schedule_ms_limit", [
+ {datatype, [{atom, off},
+ {duration, ms}]},
+ hidden
+]}.
+
+{translation, "sysmon_handler.schedule_ms_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.process.long_scheduled_execution", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ off ->
+ 0;
+ Int when is_integer(Int) ->
+ Int;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc A process will become busy when its heap exceeds this size.
+%% @see sysmon_handler.thresholds.busy_processes
+{mapping, "sysmon_handler.triggers.process.heap_size", "sysmon_handler.heap_word_limit", [
+ {datatype, [{atom, off},
+ bytesize]},
+ hidden
+]}.
+
+{translation, "sysmon_handler.heap_word_limit",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.process.heap_size", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ off ->
+ 0;
+ Bytes when is_integer(Bytes) ->
+ WordSize = erlang:system_info(wordsize),
+ Bytes div WordSize;
+ _ ->
+ cuttlefish:invalid("should be a non-negative integer")
+ end
+ end
+}.
+
+%% @doc Whether ports with full input buffers will be counted as
+%% busy. Ports can represent open files or network sockets.
+%% @see sysmon_handler.thresholds.busy_ports
+{mapping, "sysmon_handler.triggers.port", "sysmon_handler.busy_port", [
+ {datatype, flag},
+ hidden
+]}.
+
+{translation, "sysmon_handler.busy_port",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.port", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+%% @doc Whether distribution ports with full input buffers will be
+%% counted as busy. Distribution ports connect Erlang nodes within a
+%% single cluster.
+%% @see sysmon_handler.thresholds.busy_ports
+{mapping, "sysmon_handler.triggers.distribution_port", "sysmon_handler.busy_dist_port", [
+ {datatype, flag},
+ hidden
+]}.
+
+{translation, "sysmon_handler.busy_dist_port",
+ fun(Conf) ->
+ case cuttlefish:conf_get("sysmon_handler.triggers.distribution_port", Conf, undefined) of
+ undefined ->
+ cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+%%
+%% Ra
+%%
+
+{mapping, "raft.segment_max_entries", "ra.segment_max_entries", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.segment_max_entries",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.segment_max_entries", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_max_size_bytes", "ra.wal_max_size_bytes", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_max_size_bytes",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_max_size_bytes", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_max_entries", "ra.wal_max_entries", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_max_entries",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_max_entries", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_hibernate_after", "ra.wal_hibernate_after", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_hibernate_after",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_hibernate_after", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.wal_max_batch_size", "ra.wal_max_batch_size", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.wal_max_batch_size",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.wal_max_batch_size", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.snapshot_chunk_size", "ra.snapshot_chunk_size", [
+ {datatype, integer},
+ {validators, ["non_zero_positive_integer"]}
+]}.
+
+{translation, "ra.snapshot_chunk_size",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.snapshot_chunk_size", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+{mapping, "raft.data_dir", "ra.data_dir", [
+ {datatype, string}
+]}.
+
+{translation, "ra.data_dir",
+ fun(Conf) ->
+ case cuttlefish:conf_get("raft.data_dir", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Val -> Val
+ end
+ end
+}.
+
+% ===============================
+% Validators
+% ===============================
+
+{validator, "size_less_than_2G", "Byte size should be less than 2G and greater than 0",
+fun(Size) when is_integer(Size) ->
+ Size > 0 andalso Size < 2147483648
+end}.
+
+{validator, "less_then_512MB", "Max message size should be less than 512MB and gre than 0",
+fun(Size) when is_integer(Size) ->
+ Size > 0 andalso Size < 536870912
+end}.
+
+{validator, "less_than_1", "Float is not between 0 and 1",
+fun(Float) when is_float(Float) ->
+ Float > 0 andalso Float < 1
+end}.
+
+{validator, "port", "Invalid port number",
+fun(Port) when is_integer(Port) ->
+ Port > 0 andalso Port < 65535
+end}.
+
+{validator, "byte", "Integer must be in the range [0, 255]",
+fun(Int) when is_integer(Int) ->
+ Int >= 0 andalso Int =< 255
+end}.
+
+{validator, "dir_writable", "Cannot create file in dir",
+fun(Dir) ->
+ TestFile = filename:join(Dir, "test_file"),
+ file:delete(TestFile),
+ Res = ok == file:write_file(TestFile, <<"test">>),
+ file:delete(TestFile),
+ Res
+end}.
+
+{validator, "file_accessible", "file doesn't exist or isn't readable",
+fun(File) ->
+ ReadFile = file:read_file_info(File),
+ element(1, ReadFile) == ok
+end}.
+
+{validator, "is_ip", "string is a valid IP address",
+fun(IpStr) ->
+ Res = inet:parse_address(IpStr),
+ element(1, Res) == ok
+end}.
+
+{validator, "non_negative_integer", "number should be greater or equal to zero",
+fun(Int) when is_integer(Int) ->
+ Int >= 0
+end}.
+
+{validator, "non_zero_positive_integer", "number should be greater or equal to one",
+fun(Int) when is_integer(Int) ->
+ Int >= 1
+end}.
diff --git a/deps/rabbit/rabbitmq-components.mk b/deps/rabbit/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbit/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbit/scripts/rabbitmq-defaults b/deps/rabbit/scripts/rabbitmq-defaults
new file mode 100755
index 0000000000..41d72c7da4
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-defaults
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+### next line potentially updated in package install steps
+SYS_PREFIX=
+
+CLEAN_BOOT_FILE=start_clean
+SASL_BOOT_FILE=start_sasl
+BOOT_MODULE="rabbit"
+
+if test -z "$CONF_ENV_FILE" && test -z "$RABBITMQ_CONF_ENV_FILE"; then
+ CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
+fi
diff --git a/deps/rabbit/scripts/rabbitmq-defaults.bat b/deps/rabbit/scripts/rabbitmq-defaults.bat
new file mode 100644
index 0000000000..41b3d2b47c
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-defaults.bat
@@ -0,0 +1,21 @@
+@echo off
+
+set SASL_BOOT_FILE=start_sasl
+set CLEAN_BOOT_FILE=start_clean
+set BOOT_MODULE=rabbit
+
+if "!RABBITMQ_BASE!"=="" (
+ set RABBITMQ_BASE=!APPDATA!\RabbitMQ
+) else (
+ set RABBITMQ_BASE=!RABBITMQ_BASE:"=!
+)
+
+if not exist "!RABBITMQ_BASE!" (
+ mkdir "!RABBITMQ_BASE!"
+)
+
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (
+ if "!CONF_ENV_FILE!"=="" (
+ set CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat
+ )
+)
diff --git a/deps/rabbit/scripts/rabbitmq-diagnostics b/deps/rabbit/scripts/rabbitmq-diagnostics
new file mode 100755
index 0000000000..7101f3cc9b
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-diagnostics
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-diagnostics "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-diagnostics.bat b/deps/rabbit/scripts/rabbitmq-diagnostics.bat
new file mode 100644
index 0000000000..af2982559c
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-diagnostics.bat
@@ -0,0 +1,55 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-diagnostics" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-echopid.bat b/deps/rabbit/scripts/rabbitmq-echopid.bat
new file mode 100644
index 0000000000..98080afd1f
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-echopid.bat
@@ -0,0 +1,57 @@
+@echo off
+
+REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
+REM
+REM <rabbitmq_nodename> (s)name of the erlang node to connect to (required)
+
+setlocal
+
+set TDP0=%~dp0
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if "%1"=="" goto argfail
+
+:: set timeout vars ::
+set TIMEOUT=10
+set TIMER=1
+
+:: check that wmic exists ::
+set WMIC_PATH=%SYSTEMROOT%\System32\Wbem\wmic.exe
+if not exist "%WMIC_PATH%" (
+ goto fail
+)
+
+:getpid
+for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%%RABBITMQ_NAME_TYPE% %1%%'" get processid 2^>nul`) do (
+ set PID=%%P
+ goto echopid
+)
+
+:echopid
+:: check for pid not found ::
+if "%PID%" == "" (
+ PING 127.0.0.1 -n 2 > nul
+ set /a TIMER+=1
+ if %TIMEOUT%==%TIMER% goto fail
+ goto getpid
+)
+
+:: show pid ::
+echo %PID%
+
+:: all done ::
+:ok
+endlocal
+EXIT /B 0
+
+:: argument is required ::
+:argfail
+echo Please provide your RabbitMQ node name as the argument to this script.
+
+:: something went wrong ::
+:fail
+endlocal
+EXIT /B 1
diff --git a/deps/rabbit/scripts/rabbitmq-env b/deps/rabbit/scripts/rabbitmq-env
new file mode 100755
index 0000000000..90702c43bb
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-env
@@ -0,0 +1,190 @@
+#!/bin/sh -e
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+if [ "$RABBITMQ_ENV_LOADED" = 1 ]; then
+ return 0;
+fi
+
+if [ -z "$RABBITMQ_SCRIPTS_DIR" ]; then
+ # We set +e here since since our test for "readlink -f" below needs to
+ # be able to fail.
+ set +e
+ # Determine where this script is really located (if this script is
+ # invoked from another script, this is the location of the caller)
+ SCRIPT_PATH="$0"
+ while [ -h "$SCRIPT_PATH" ] ; do
+ # Determine if readlink -f is supported at all. TODO clean this up.
+ FULL_PATH=`readlink -f $SCRIPT_PATH 2>/dev/null`
+ if [ "$?" != "0" ]; then
+ REL_PATH=`readlink $SCRIPT_PATH`
+ if expr "$REL_PATH" : '/.*' > /dev/null; then
+ SCRIPT_PATH="$REL_PATH"
+ else
+ SCRIPT_PATH="`dirname "$SCRIPT_PATH"`/$REL_PATH"
+ fi
+ else
+ SCRIPT_PATH=$FULL_PATH
+ fi
+ done
+ set -e
+
+ RABBITMQ_SCRIPTS_DIR=`dirname $SCRIPT_PATH`
+fi
+
+_rmq_env_now()
+{
+ date '+%Y-%m-%d %H:%M:%S'
+}
+
+_rmq_env_print()
+{
+ _rmq_env_tmp="$1"
+ _rmq_env_tmp_len="${#_rmq_env_tmp}"
+ shift
+ printf '%s %s %s\n' "$(_rmq_env_now)" "$_rmq_env_tmp" "$1" 1>&2
+ shift
+ _rmq_env_print_line=''
+ _rmq_env_indent="$((_rmq_env_tmp_len + 21))"
+ for _rmq_env_print_line in "$@"
+ do
+ printf "%${_rmq_env_indent}s%s\n" ' ' "$_rmq_env_print_line" 1>&2
+ done
+ unset _rmq_env_print_line
+ unset _rmq_env_indent
+ unset _rmq_env_tmp_len
+ unset _rmq_env_tmp
+}
+
+_rmq_env_perr()
+{
+ _rmq_env_print '[error]' "$@"
+}
+
+_rmq_env_pwarn()
+{
+ _rmq_env_print '[warning]' "$@"
+}
+
+rmq_realpath() {
+ local path=$1
+
+ if [ -d "$path" ]; then
+ cd "$path" && pwd
+ elif [ -f "$path" ]; then
+ cd "$(dirname "$path")" && echo $(pwd)/$(basename "$path")
+ else
+ echo "$path"
+ fi
+}
+
+RABBITMQ_HOME="$(rmq_realpath "${RABBITMQ_SCRIPTS_DIR}/..")"
+ESCRIPT_DIR="${RABBITMQ_HOME}/escript"
+
+## Set defaults
+. ${RABBITMQ_SCRIPTS_DIR}/rabbitmq-defaults
+
+# We save the current value of $RABBITMQ_PID_FILE in case it was set by
+# an init script. If $CONF_ENV_FILE overrides it again, we must ignore
+# it and warn the user.
+saved_RABBITMQ_PID_FILE="$RABBITMQ_PID_FILE"
+
+## Get configuration variables from the configure environment file
+[ "x" = "x$RABBITMQ_CONF_ENV_FILE" ] && RABBITMQ_CONF_ENV_FILE=${CONF_ENV_FILE}
+if [ -f "${RABBITMQ_CONF_ENV_FILE}" ]; then
+ CONF_ENV_FILE_PHASE=rabbitmq-env
+ . ${RABBITMQ_CONF_ENV_FILE} || true
+fi
+
+[ -n "$ERL_EPMD_PORT" ] && export ERL_EPMD_PORT
+[ -n "$ERL_EPMD_ADDRESS" ] && export ERL_EPMD_ADDRESS
+
+DEFAULT_SCHEDULER_BIND_TYPE="db"
+[ -n "$SCHEDULER_BIND_TYPE" ] || SCHEDULER_BIND_TYPE="$DEFAULT_SCHEDULER_BIND_TYPE"
+[ -n "$RABBITMQ_SCHEDULER_BIND_TYPE" ] || RABBITMQ_SCHEDULER_BIND_TYPE="$SCHEDULER_BIND_TYPE"
+
+DEFAULT_DISTRIBUTION_BUFFER_SIZE=128000
+[ -n "$DISTRIBUTION_BUFFER_SIZE" ] || DISTRIBUTION_BUFFER_SIZE="$DEFAULT_DISTRIBUTION_BUFFER_SIZE"
+[ -n "$RABBITMQ_DISTRIBUTION_BUFFER_SIZE" ] || RABBITMQ_DISTRIBUTION_BUFFER_SIZE="$DISTRIBUTION_BUFFER_SIZE"
+
+DEFAULT_MAX_NUMBER_OF_PROCESSES=1048576
+[ -n "$MAX_NUMBER_OF_PROCESSES" ] || MAX_NUMBER_OF_PROCESSES="$DEFAULT_MAX_NUMBER_OF_PROCESSES"
+[ -n "$RABBITMQ_MAX_NUMBER_OF_PROCESSES" ] || RABBITMQ_MAX_NUMBER_OF_PROCESSES="$MAX_NUMBER_OF_PROCESSES"
+
+DEFAULT_MAX_NUMBER_OF_ATOMS=5000000
+[ -n "$MAX_NUMBER_OF_ATOMS" ] || MAX_NUMBER_OF_ATOMS="$DEFAULT_MAX_NUMBER_OF_ATOMS"
+[ -n "$RABBITMQ_MAX_NUMBER_OF_ATOMS" ] || RABBITMQ_MAX_NUMBER_OF_ATOMS="$MAX_NUMBER_OF_ATOMS"
+
+## Common server defaults
+SERVER_ERL_ARGS=" +P $RABBITMQ_MAX_NUMBER_OF_PROCESSES +t $RABBITMQ_MAX_NUMBER_OF_ATOMS +stbt $RABBITMQ_SCHEDULER_BIND_TYPE +zdbbl $RABBITMQ_DISTRIBUTION_BUFFER_SIZE "
+
+##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS="$CTL_ERL_ARGS"
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MIN" ] && RABBITMQ_CTL_DIST_PORT_MIN="$CTL_DIST_PORT_MIN"
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MAX" ] && RABBITMQ_CTL_DIST_PORT_MAX="$CTL_DIST_PORT_MAX"
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MIN" ] && RABBITMQ_CTL_DIST_PORT_MIN='35672'
+[ "x" = "x$RABBITMQ_CTL_DIST_PORT_MAX" ] && RABBITMQ_CTL_DIST_PORT_MAX="$(($RABBITMQ_CTL_DIST_PORT_MIN + 10))"
+
+[ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_CODE_PATH" ] && RABBITMQ_SERVER_CODE_PATH=${SERVER_CODE_PATH}
+[ "x" = "x$RABBITMQ_IGNORE_SIGINT" ] && RABBITMQ_IGNORE_SIGINT="true"
+[ "xtrue" = "x$RABBITMQ_IGNORE_SIGINT" ] && RABBITMQ_IGNORE_SIGINT_FLAG="+B i"
+
+if [ -n "$saved_RABBITMQ_PID_FILE" ] && \
+ [ "$saved_RABBITMQ_PID_FILE" != "$RABBITMQ_PID_FILE" ]
+then
+ _rmq_env_pwarn 'RABBITMQ_PID_FILE was already set by the init script to:' \
+ "$saved_RABBITMQ_PID_FILE" \
+ 'The value set in rabbitmq-env.conf is ignored because it would break the init script.'
+
+ RABBITMQ_PID_FILE="$saved_RABBITMQ_PID_FILE"
+fi
+
+[ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+
+##--- End of overridden <var_name> variables
+
+_rmq_env_set_erl_libs()
+{
+ if [ -n "$ERL_LIBS" ]
+ then
+ export ERL_LIBS="$RABBITMQ_HOME/plugins:$ERL_LIBS"
+ else
+ export ERL_LIBS="$RABBITMQ_HOME/plugins"
+ fi
+}
+
+run_escript()
+{
+ escript_main="${1:?escript_main must be defined}"
+ shift
+ escript="${1:?escript must be defined}"
+ shift
+
+ _rmq_env_set_erl_libs
+
+ # Important: do not quote RABBITMQ_CTL_ERL_ARGS as they must be
+ # word-split
+ # shellcheck disable=SC2086
+ exec erl +B \
+ -boot "$CLEAN_BOOT_FILE" \
+ -noinput -noshell -hidden -smp enable \
+ $RABBITMQ_CTL_ERL_ARGS \
+ -kernel inet_dist_listen_min "$RABBITMQ_CTL_DIST_PORT_MIN" \
+ -kernel inet_dist_listen_max "$RABBITMQ_CTL_DIST_PORT_MAX" \
+ -run escript start \
+ -escript main "$escript_main" \
+ -extra "$escript" "$@"
+}
+
+RABBITMQ_ENV_LOADED=1
+
+# Since we source this elsewhere, don't accidentally stop execution
+true
diff --git a/deps/rabbit/scripts/rabbitmq-env.bat b/deps/rabbit/scripts/rabbitmq-env.bat
new file mode 100644
index 0000000000..1db57b33c5
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-env.bat
@@ -0,0 +1,173 @@
+@echo off
+
+REM Scopes the variables to the current batch file
+REM setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+REM setlocal enabledelayedexpansion
+
+REM SCRIPT_DIR=`dirname $SCRIPT_PATH`
+REM RABBITMQ_HOME="${SCRIPT_DIR}/.."
+set SCRIPT_DIR=%TDP0%
+set SCRIPT_NAME=%1
+for /f "delims=" %%F in ("%SCRIPT_DIR%..") do set RABBITMQ_HOME=%%~dpF%%~nF%%~xF
+
+if defined ERL_LIBS (
+ set "ERL_LIBS=%RABBITMQ_HOME%\plugins;%ERL_LIBS%"
+) else (
+ set "ERL_LIBS=%RABBITMQ_HOME%\plugins"
+)
+
+REM If ERLANG_HOME is not defined, check if "erl.exe" is available in
+REM the path and use that.
+if not defined ERLANG_HOME (
+ for /f "delims=" %%F in ('powershell.exe -NoLogo -NoProfile -NonInteractive -Command "(Get-Command erl.exe).Definition"') do @set ERL_PATH=%%F
+ if exist "!ERL_PATH!" (
+ for /f "delims=" %%F in ("!ERL_PATH!") do set ERL_DIRNAME=%%~dpF
+ for /f "delims=" %%F in ("!ERL_DIRNAME!\..") do @set ERLANG_HOME=%%~dpF%%~nF%%~xF
+ )
+ set ERL_PATH=
+ set ERL_DIRNAME=
+)
+
+REM ## Set defaults
+call "%SCRIPT_DIR%\rabbitmq-defaults.bat"
+
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (
+ set RABBITMQ_CONF_ENV_FILE=!CONF_ENV_FILE:"=!
+) else (
+ set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_CONF_ENV_FILE:"=!
+)
+
+if exist "!RABBITMQ_CONF_ENV_FILE!" (
+ call "!RABBITMQ_CONF_ENV_FILE!"
+)
+
+rem Bump ETS table limit to 50000
+if "!ERL_MAX_ETS_TABLES!"=="" (
+ set ERL_MAX_ETS_TABLES=50000
+)
+
+rem Default is defined here:
+rem https://github.com/erlang/otp/blob/master/erts/emulator/beam/erl_port.h
+if "!ERL_MAX_PORTS!"=="" (
+ set ERL_MAX_PORTS=65536
+)
+
+set DEFAULT_SCHEDULER_BIND_TYPE=db
+if "!RABBITMQ_SCHEDULER_BIND_TYPE!"=="" (
+ set RABBITMQ_SCHEDULER_BIND_TYPE=!SCHEDULER_BIND_TYPE!
+)
+if "!RABBITMQ_SCHEDULER_BIND_TYPE!"=="" (
+ set RABBITMQ_SCHEDULER_BIND_TYPE=!DEFAULT_SCHEDULER_BIND_TYPE!
+)
+
+set DEFAULT_DISTRIBUTION_BUFFER_SIZE=128000
+if "!RABBITMQ_DISTRIBUTION_BUFFER_SIZE!"=="" (
+ set RABBITMQ_DISTRIBUTION_BUFFER_SIZE=!DISTRIBUTION_BUFFER_SIZE!
+)
+if "!RABBITMQ_DISTRIBUTION_BUFFER_SIZE!"=="" (
+ set RABBITMQ_DISTRIBUTION_BUFFER_SIZE=!DEFAULT_DISTRIBUTION_BUFFER_SIZE!
+)
+
+set DEFAULT_MAX_NUMBER_OF_PROCESSES=1048576
+if "!RABBITMQ_MAX_NUMBER_OF_PROCESSES!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_PROCESSES=!MAX_NUMBER_OF_PROCESSES!
+)
+if "!RABBITMQ_MAX_NUMBER_OF_PROCESSES!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_PROCESSES=!DEFAULT_MAX_NUMBER_OF_PROCESSES!
+)
+
+set DEFAULT_MAX_NUMBER_OF_ATOMS=5000000
+if "!RABBITMQ_MAX_NUMBER_OF_ATOMS!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_ATOMS=!MAX_NUMBER_OF_ATOMS!
+)
+if "!RABBITMQ_MAX_NUMBER_OF_ATOMS!"=="" (
+ set RABBITMQ_MAX_NUMBER_OF_ATOMS=!DEFAULT_MAX_NUMBER_OF_ATOMS!
+)
+
+REM Common server defaults
+set SERVER_ERL_ARGS=+P !RABBITMQ_MAX_NUMBER_OF_PROCESSES! +t !RABBITMQ_MAX_NUMBER_OF_ATOMS! +stbt !RABBITMQ_SCHEDULER_BIND_TYPE! +zdbbl !RABBITMQ_DISTRIBUTION_BUFFER_SIZE!
+
+REM ##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+
+REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+if "!RABBITMQ_SERVER_ERL_ARGS!"=="" (
+ set RABBITMQ_SERVER_ERL_ARGS=!SERVER_ERL_ARGS!
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+if "!RABBITMQ_SERVER_START_ARGS!"=="" (
+ if not "!SERVER_START_ARGS!"=="" (
+ set RABBITMQ_SERVER_START_ARGS=!SERVER_START_ARGS!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+if "!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS!"=="" (
+ if not "!SERVER_ADDITIONAL_ERL_ARGS!"=="" (
+ set RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=!SERVER_ADDITIONAL_ERL_ARGS!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+if "!RABBITMQ_BOOT_MODULE!"=="" (
+ if "!BOOT_MODULE!"=="" (
+ set RABBITMQ_BOOT_MODULE=rabbit
+ ) else (
+ set RABBITMQ_BOOT_MODULE=!BOOT_MODULE!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
+if "!RABBITMQ_CTL_ERL_ARGS!"=="" (
+ if not "!CTL_ERL_ARGS!"=="" (
+ set RABBITMQ_CTL_ERL_ARGS=!CTL_ERL_ARGS!
+ )
+)
+
+if "!RABBITMQ_CTL_DIST_PORT_MIN!"=="" (
+ if not "!CTL_DIST_PORT_MIN!"=="" (
+ set RABBITMQ_CTL_DIST_PORT_MIN=!CTL_DIST_PORT_MIN!
+ )
+)
+if "!RABBITMQ_CTL_DIST_PORT_MAX!"=="" (
+ if not "!CTL_DIST_PORT_MAX!"=="" (
+ set RABBITMQ_CTL_DIST_PORT_MAX=!CTL_DIST_PORT_MAX!
+ )
+)
+if "!RABBITMQ_CTL_DIST_PORT_MIN!"=="" (
+ set RABBITMQ_CTL_DIST_PORT_MIN=35672
+)
+if "!RABBITMQ_CTL_DIST_PORT_MAX!"=="" (
+ set /a RABBITMQ_CTL_DIST_PORT_MAX=10+!RABBITMQ_CTL_DIST_PORT_MIN!
+)
+
+REM ADDITIONAL WINDOWS ONLY CONFIG ITEMS
+
+if "!RABBITMQ_SERVICENAME!"=="" (
+ if "!SERVICENAME!"=="" (
+ set RABBITMQ_SERVICENAME=RabbitMQ
+ ) else (
+ set RABBITMQ_SERVICENAME=!SERVICENAME!
+ )
+)
+
+REM Environment cleanup
+set BOOT_MODULE=
+set CONFIG_FILE=
+set FEATURE_FLAGS_FILE=
+set ENABLED_PLUGINS_FILE=
+set LOG_BASE=
+set MNESIA_BASE=
+set PLUGINS_DIR=
+set SCRIPT_DIR=
+set SCRIPT_NAME=
+set TDP0=
+
+REM ##--- End of overridden <var_name> variables
+
+REM # Since we source this elsewhere, don't accidentally stop execution
+REM true
diff --git a/deps/rabbit/scripts/rabbitmq-plugins b/deps/rabbit/scripts/rabbitmq-plugins
new file mode 100755
index 0000000000..1ec15b2ee9
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-plugins
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-plugins "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-plugins.bat b/deps/rabbit/scripts/rabbitmq-plugins.bat
new file mode 100644
index 0000000000..e1f13b7073
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-plugins.bat
@@ -0,0 +1,56 @@
+@echo off
+
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "!TDP0!\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-plugins" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-queues b/deps/rabbit/scripts/rabbitmq-queues
new file mode 100755
index 0000000000..680076f962
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-queues
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-queues "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-queues.bat b/deps/rabbit/scripts/rabbitmq-queues.bat
new file mode 100644
index 0000000000..99fce6479f
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-queues.bat
@@ -0,0 +1,56 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-queues" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-rel b/deps/rabbit/scripts/rabbitmq-rel
new file mode 100755
index 0000000000..a96ec78764
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-rel
@@ -0,0 +1,58 @@
+#!/usr/bin/env escript
+%% vim:ft=erlang:sw=2:et:
+
+main(["show-rel"]) ->
+ Rel = get_rel(),
+ io:format("~p.~n", [Rel]);
+main(["gen-boot"]) ->
+ generate_rel(),
+ generate_boot().
+
+get_rel() ->
+ ok = application:load(rabbit),
+ Apps0 = get_apps(rabbit),
+ Apps1 = lists:sort(
+ fun
+ (_, rabbitmq_prelaunch) -> false;
+ (rabbitmq_prelaunch, _) -> true;
+ (_, mnesia) -> true;
+ (mnesia, _) -> false;
+ (A, B) -> A =< B
+ end, Apps0),
+ Apps = [{App, get_vsn(App)} || App <- Apps1],
+
+ ERTSVersion = erlang:system_info(version),
+ RabbitVersion = get_vsn(rabbit),
+
+ {release,
+ {"RabbitMQ", RabbitVersion},
+ {erts, ERTSVersion},
+ Apps}.
+
+get_apps(App) ->
+ ok = load_app(App),
+ {ok, DirectDeps} = application:get_key(App, applications),
+ lists:umerge(
+ [lists:usort(get_apps(Dep)) || Dep <- DirectDeps] ++
+ [lists:usort([kernel, stdlib, sasl, App, mnesia])]).
+
+load_app(App) ->
+ case application:load(App) of
+ ok -> ok;
+ {error, {already_loaded, App}} -> ok
+ end.
+
+generate_rel() ->
+ Rel = get_rel(),
+ io:format("~p.~n", [Rel]),
+ Output = io_lib:format("~p.~n", [Rel]),
+ ok = file:write_file("rabbit.rel", Output).
+
+generate_boot() ->
+ Options = [local, {path, code:get_path()}],
+ ok = systools:make_script("rabbit", Options).
+
+get_vsn(App) ->
+ load_app(App),
+ {ok, Vsn} = application:get_key(App, vsn),
+ Vsn.
diff --git a/deps/rabbit/scripts/rabbitmq-server b/deps/rabbit/scripts/rabbitmq-server
new file mode 100755
index 0000000000..82058dcb26
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-server
@@ -0,0 +1,155 @@
+#!/bin/sh
+# vim:sw=4:et:
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+set -e
+
+# Get default settings with user overrides for (RABBITMQ_)<var_name>
+# Non-empty defaults should be set in rabbitmq-env
+SCRIPTS_DIR=$(dirname "$0")
+. "$SCRIPTS_DIR/rabbitmq-env"
+
+[ "$NOTIFY_SOCKET" ] && RUNNING_UNDER_SYSTEMD=true
+
+RABBITMQ_DEFAULT_ALLOC_ARGS="+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30"
+
+# Bump ETS table limit to 50000
+if [ "x" = "x$ERL_MAX_ETS_TABLES" ]; then
+ ERL_MAX_ETS_TABLES=50000
+fi
+
+check_start_params() {
+ check_not_empty RABBITMQ_BOOT_MODULE
+ check_not_empty SASL_BOOT_FILE
+}
+
+check_not_empty() {
+ local name="${1:?}"
+ local value
+ eval value=\$$name
+ if [ -z "$value" ]; then
+ echo "Error: ENV variable should be defined: $1.
+ Please check rabbitmq-env, rabbitmq-defaults, and ${RABBITMQ_CONF_ENV_FILE} script files"
+ exit 78
+ fi
+}
+
+start_rabbitmq_server() {
+ set -e
+
+ _rmq_env_set_erl_libs
+
+ RABBITMQ_START_RABBIT=
+ [ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT=" -noinput"
+ if test -z "$RABBITMQ_NODE_ONLY"; then
+ if test "$USE_RABBIT_BOOT_SCRIPT"; then
+ # TODO: This is experimental and undocumented at this point.
+ # It is here just to do simple checks while playing with how
+ # RabbitMQ is started.
+ "$SCRIPTS_DIR/rabbitmq-rel" gen-boot
+ SASL_BOOT_FILE=rabbit
+ test -f "$SASL_BOOT_FILE.boot"
+ RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -init_debug"
+ else
+ RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -s $RABBITMQ_BOOT_MODULE boot"
+ fi
+ fi
+
+ # We need to turn off path expansion because some of the vars,
+ # notably RABBITMQ_SERVER_ERL_ARGS, contain terms that look like
+ # globs and there is no other way of preventing their expansion.
+ set -f
+
+ export ERL_MAX_ETS_TABLES \
+ SYS_PREFIX
+
+ check_start_params
+
+ exec erl \
+ -pa "$RABBITMQ_SERVER_CODE_PATH" \
+ ${RABBITMQ_START_RABBIT} \
+ -boot "${SASL_BOOT_FILE}" \
+ +W w \
+ ${RABBITMQ_DEFAULT_ALLOC_ARGS} \
+ ${RABBITMQ_SERVER_ERL_ARGS} \
+ ${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS} \
+ ${RABBITMQ_SERVER_START_ARGS} \
+ -lager crash_log false \
+ -lager handlers '[]' \
+ "$@"
+}
+
+stop_rabbitmq_server() {
+ if test "$rabbitmq_server_pid"; then
+ kill -TERM "$rabbitmq_server_pid"
+ wait "$rabbitmq_server_pid" || true
+ fi
+}
+
+if [ "$RABBITMQ_ALLOW_INPUT" -o "$RUNNING_UNDER_SYSTEMD" -o "$detached" ]; then
+ # Run erlang VM directly, completely replacing current shell
+ # process - so the pid file written in the code above will be
+ # valid (unless detached, which is also handled in the code
+ # above).
+ #
+ # And also this is the correct mode to run the broker under
+ # systemd - there is no need in a proxy process that converts
+ # signals to graceful shutdown command, the unit file should already
+ # contain instructions for graceful shutdown. Also by removing
+ # this additional process we could simply use value returned by
+ # `os:getpid/0` for a systemd ready notification.
+ start_rabbitmq_server "$@"
+else
+ # When RabbitMQ runs in the foreground but the Erlang shell is
+ # disabled, we setup signal handlers to stop RabbitMQ properly. This
+ # is at least useful in the case of Docker.
+ # The Erlang VM should ignore SIGINT.
+ RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS} ${RABBITMQ_IGNORE_SIGINT_FLAG}"
+
+ # Signal handlers. They all stop RabbitMQ properly, using
+ # rabbitmqctl stop. This script will exit with different exit codes:
+ # SIGHUP, SIGTSTP + SIGCONT
+ # Ignored until we implement a useful behavior.
+ # SIGTERM
+ # Exits 0 since this is considered a normal process termination.
+ # SIGINT
+ # Exits 128 + $signal_number where $signal_number is 2 for SIGINT (see
+ # https://pubs.opengroup.org/onlinepubs/009695399/utilities/kill.html).
+ # This is considered an abnormal process termination. Normally, we
+ # don't need to specify this exit code because the shell propagates it.
+ # Unfortunately, the signal handler doesn't work as expected in Dash,
+ # thus we need to explicitly restate the exit code.
+ #
+ # The behaviors below should remain consistent with the
+ # equivalent signal handlers in the Erlang code
+ # (see apps/rabbitmq_prelaunch/src/rabbit_prelaunch_sighandler.erl).
+ trap '' HUP TSTP CONT
+ trap "stop_rabbitmq_server; exit 0" TERM
+ trap "stop_rabbitmq_server; exit 130" INT
+
+ start_rabbitmq_server "$@" &
+ export rabbitmq_server_pid=$!
+
+ # Block until RabbitMQ exits or a signal is caught.
+ # Waits for last command (which is start_rabbitmq_server)
+ #
+ # The "|| true" is here to work around an issue with Dash. Normally
+ # in a Bourne shell, if `wait` is interrupted by a signal, the
+ # signal handlers defined above are executed and the script
+ # terminates with the exit code of `wait` (unless the signal handler
+ # overrides that).
+ # In the case of Dash, it looks like `set -e` (set at the beginning
+ # of this script) gets precedence over signal handling. Therefore,
+ # when `wait` is interrupted, its exit code is non-zero and because
+ # of `set -e`, the script terminates immediately without running the
+ # signal handler. To work around this issue, we use "|| true" to
+ # force that statement to succeed and the signal handler to properly
+ # execute. Because the statement below has an exit code of 0, the
+ # signal handler has to restate the expected exit code.
+ wait "$rabbitmq_server_pid" || true
+fi
diff --git a/deps/rabbit/scripts/rabbitmq-server.bat b/deps/rabbit/scripts/rabbitmq-server.bat
new file mode 100644
index 0000000000..3a386b63c4
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-server.bat
@@ -0,0 +1,91 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+set CONF_SCRIPT_DIR=%~dp0
+setlocal enabledelayedexpansion
+setlocal enableextensions
+
+if ERRORLEVEL 1 (
+ echo "Failed to enable command extensions!"
+ exit /B 1
+)
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+set RABBITMQ_DEFAULT_ALLOC_ARGS=+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30
+
+set RABBITMQ_START_RABBIT=
+if "!RABBITMQ_ALLOW_INPUT!"=="" (
+ set RABBITMQ_START_RABBIT=!RABBITMQ_START_RABBIT! -noinput
+)
+if "!RABBITMQ_NODE_ONLY!"=="" (
+ set RABBITMQ_START_RABBIT=!RABBITMQ_START_RABBIT! -s "!RABBITMQ_BOOT_MODULE!" boot
+)
+
+set ENV_OK=true
+CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE!
+
+if "!ENV_OK!"=="false" (
+ EXIT /b 78
+)
+
+if "!RABBITMQ_ALLOW_INPUT!"=="" (
+ set ERL_CMD=erl.exe
+) else (
+ set ERL_CMD=werl.exe
+)
+
+"!ERLANG_HOME!\bin\!ERL_CMD!" ^
+!RABBITMQ_START_RABBIT! ^
+-boot "!SASL_BOOT_FILE!" ^
++W w ^
+!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
+!RABBITMQ_SERVER_ERL_ARGS! ^
+!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
+!RABBITMQ_SERVER_START_ARGS! ^
+-lager crash_log false ^
+-lager handlers "[]" ^
+!STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+:check_not_empty
+if "%~2"=="" (
+ ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env and rabbitmq-defaults, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings"
+ set ENV_OK=false
+ EXIT /B 78
+ )
+EXIT /B 0
+
+endlocal
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-service.bat b/deps/rabbit/scripts/rabbitmq-service.bat
new file mode 100644
index 0000000000..0b7906d4bf
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-service.bat
@@ -0,0 +1,271 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TN0=%~n0
+set TDP0=%~dp0
+set CONF_SCRIPT_DIR=%~dp0
+set P1=%1
+setlocal enabledelayedexpansion
+setlocal enableextensions
+
+if ERRORLEVEL 1 (
+ echo "Failed to enable command extensions!"
+ exit /B 1
+)
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+REM Check for the short names here too
+if "!RABBITMQ_USE_LONGNAME!"=="true" (
+ set RABBITMQ_NAME_TYPE=-name
+ set NAMETYPE=longnames
+) else (
+ if "!USE_LONGNAME!"=="true" (
+ set RABBITMQ_USE_LONGNAME=true
+ set RABBITMQ_NAME_TYPE=-name
+ set NAMETYPE=longnames
+ ) else (
+ set RABBITMQ_USE_LONGNAME=false
+ set RABBITMQ_NAME_TYPE=-sname
+ set NAMETYPE=shortnames
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+if "!RABBITMQ_NODENAME!"=="" (
+ if "!NODENAME!"=="" (
+ REM We use Erlang to query the local hostname because
+ REM !COMPUTERNAME! and Erlang may return different results.
+ REM Start erl with -sname to make sure epmd is started.
+ call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -sname rabbit-prelaunch-epmd -eval "init:stop()." >nul 2>&1
+ for /f "delims=" %%F in ('call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -eval "net_kernel:start([list_to_atom(""rabbit-gethostname-"" ++ os:getpid()), %NAMETYPE%]), [_, H] = string:tokens(atom_to_list(node()), ""@""), io:format(""~s~n"", [H]), init:stop()."') do @set HOSTNAME=%%F
+ set RABBITMQ_NODENAME=rabbit@!HOSTNAME!
+ set HOSTNAME=
+ ) else (
+ set RABBITMQ_NODENAME=!NODENAME!
+ )
+)
+set NAMETYPE=
+
+REM Set Erlang distribution port, based on the AMQP TCP port.
+REM
+REM We do this only for the Windows service because in this case, the node has
+REM to start with the distribution enabled on the command line. For all other
+REM cases, distribution is configured at runtime.
+if "!RABBITMQ_NODE_PORT!"=="" (
+ if not "!NODE_PORT!"=="" (
+ set RABBITMQ_NODE_PORT=!NODE_PORT!
+ ) else (
+ set RABBITMQ_NODE_PORT=5672
+ )
+)
+
+if "!RABBITMQ_DIST_PORT!"=="" (
+ if "!DIST_PORT!"=="" (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_DIST_PORT=25672
+ ) else (
+ set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
+ )
+ ) else (
+ set RABBITMQ_DIST_PORT=!DIST_PORT!
+ )
+)
+
+set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!
+
+set STARVAR=
+shift
+:loop1
+if "%1"=="" goto after_loop
+ set STARVAR=%STARVAR% %1
+ shift
+goto loop1
+:after_loop
+
+if "!ERLANG_SERVICE_MANAGER_PATH!"=="" (
+ if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B
+ )
+ for /f "delims=" %%i in ('dir /ad/b "!ERLANG_HOME!"') do if exist "!ERLANG_HOME!\%%i\bin\erlsrv.exe" (
+ set ERLANG_SERVICE_MANAGER_PATH=!ERLANG_HOME!\%%i\bin
+ )
+)
+
+set CONSOLE_FLAG=
+set CONSOLE_LOG_VALID=
+for %%i in (new reuse) do if "%%i" == "!RABBITMQ_CONSOLE_LOG!" set CONSOLE_LOG_VALID=TRUE
+if "!CONSOLE_LOG_VALID!" == "TRUE" (
+ set CONSOLE_FLAG=-debugtype !RABBITMQ_CONSOLE_LOG!
+)
+
+rem *** End of configuration ***
+
+if not exist "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" (
+ echo.
+ echo **********************************************
+ echo ERLANG_SERVICE_MANAGER_PATH not set correctly.
+ echo **********************************************
+ echo.
+ echo "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" not found
+ echo Please set ERLANG_SERVICE_MANAGER_PATH to the folder containing "erlsrv.exe".
+ echo.
+ exit /B 1
+)
+
+if "!P1!" == "install" goto INSTALL_SERVICE
+for %%i in (start stop) do if "%%i" == "!P1!" goto START_STOP_SERVICE
+for %%i in (disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE
+
+echo.
+echo *********************
+echo Service control usage
+echo *********************
+echo.
+echo !TN0! help - Display this help
+echo !TN0! install - Install the !RABBITMQ_SERVICENAME! service
+echo !TN0! remove - Remove the !RABBITMQ_SERVICENAME! service
+echo.
+echo The following actions can also be accomplished by using
+echo Windows Services Management Console (services.msc):
+echo.
+echo !TN0! start - Start the !RABBITMQ_SERVICENAME! service
+echo !TN0! stop - Stop the !RABBITMQ_SERVICENAME! service
+echo !TN0! disable - Disable the !RABBITMQ_SERVICENAME! service
+echo !TN0! enable - Enable the !RABBITMQ_SERVICENAME! service
+echo.
+exit /B
+
+
+:INSTALL_SERVICE
+
+if not exist "!RABBITMQ_BASE!" (
+ echo Creating base directory !RABBITMQ_BASE! & mkdir "!RABBITMQ_BASE!"
+)
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL
+if errorlevel 1 (
+ "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!
+) else (
+ echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters
+)
+
+set RABBITMQ_DEFAULT_ALLOC_ARGS=+MBas ageffcbf +MHas ageffcbf +MBlmbcs 512 +MHlmbcs 512 +MMmcs 30
+
+set RABBITMQ_START_RABBIT=
+if "!RABBITMQ_NODE_ONLY!"=="" (
+ set RABBITMQ_START_RABBIT=-s "!RABBITMQ_BOOT_MODULE!" boot
+)
+
+if "!RABBITMQ_SERVICE_RESTART!"=="" (
+ set RABBITMQ_SERVICE_RESTART=restart
+)
+
+set ENV_OK=true
+CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE!
+CALL :check_not_empty "RABBITMQ_NAME_TYPE" !RABBITMQ_NAME_TYPE!
+CALL :check_not_empty "RABBITMQ_NODENAME" !RABBITMQ_NODENAME!
+
+if "!ENV_OK!"=="false" (
+ EXIT /b 78
+)
+
+set ERLANG_SERVICE_ARGUMENTS= ^
+!RABBITMQ_START_RABBIT! ^
+-boot "!SASL_BOOT_FILE!" ^
++W w ^
+!RABBITMQ_DEFAULT_ALLOC_ARGS! ^
+!RABBITMQ_SERVER_ERL_ARGS! ^
+!RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^
+!RABBITMQ_SERVER_START_ARGS! ^
+!RABBITMQ_DIST_ARG! ^
+-lager crash_log false ^
+-lager handlers "[]" ^
+!STARVAR!
+
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!
+set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!
+
+rem We resolve %APPDATA% at install time so that the user's %APPDATA%
+rem is passed to `rabbit_env` at runtime (instead of the service's
+rem %APPDAT%).
+rem
+rem The goal is to keep the same behavior as when RabbitMQ data
+rem locations were decided in `rabbitmq-env.bat` (sourced by this
+rem script), even if now, we compute everything in `rabbit_env` at
+rem runtime.
+rem
+rem We may revisit this in the future so that no data is stored in a
+rem user-specific directory.
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^
+-onfail !RABBITMQ_SERVICE_RESTART! ^
+-machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^
+-env APPDATA="!APPDATA!" ^
+-env ERL_LIBS="!ERL_LIBS!" ^
+-env ERL_MAX_ETS_TABLES="!ERL_MAX_ETS_TABLES!" ^
+-env ERL_MAX_PORTS="!ERL_MAX_PORTS!" ^
+-workdir "!RABBITMQ_BASE!" ^
+-stopaction "rabbit:stop_and_halt()." ^
+!RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^
+!CONSOLE_FLAG! ^
+-comment "Multi-protocol open source messaging broker" ^
+-args "!ERLANG_SERVICE_ARGUMENTS!" > NUL
+
+if ERRORLEVEL 1 (
+ EXIT /B 1
+)
+goto END
+
+
+:MODIFY_SERVICE
+
+"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" !P1! !RABBITMQ_SERVICENAME!
+if ERRORLEVEL 1 (
+ EXIT /B 1
+)
+goto END
+
+
+:START_STOP_SERVICE
+
+REM start and stop via erlsrv reports no error message. Using net instead
+net !P1! !RABBITMQ_SERVICENAME!
+if ERRORLEVEL 1 (
+ EXIT /B 1
+)
+goto END
+
+:END
+
+EXIT /B 0
+
+:check_not_empty
+if "%~2"=="" (
+ ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env, rabbitmq-default, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings"
+ set ENV_OK=false
+ EXIT /B 78
+ )
+EXIT /B 0
+
+endlocal
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-streams b/deps/rabbit/scripts/rabbitmq-streams
new file mode 100755
index 0000000000..376cc497df
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-streams
@@ -0,0 +1,32 @@
+#!/bin/sh
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 Pivotal Software, Inc. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-streams "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-streams.bat b/deps/rabbit/scripts/rabbitmq-streams.bat
new file mode 100644
index 0000000000..83572a8d62
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-streams.bat
@@ -0,0 +1,63 @@
+@echo off
+REM The contents of this file are subject to the Mozilla Public License
+REM Version 1.1 (the "License"); you may not use this file except in
+REM compliance with the License. You may obtain a copy of the License
+REM at https://www.mozilla.org/MPL/
+REM
+REM Software distributed under the License is distributed on an "AS IS"
+REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+REM the License for the specific language governing rights and
+REM limitations under the License.
+REM
+REM The Original Code is RabbitMQ.
+REM
+REM The Initial Developer of the Original Code is GoPivotal, Inc.
+REM Copyright (c) 2007-2020 Pivotal Software, Inc. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-streams" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmq-upgrade b/deps/rabbit/scripts/rabbitmq-upgrade
new file mode 100755
index 0000000000..6d2bc3f948
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-upgrade
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmq-upgrade "$@"
diff --git a/deps/rabbit/scripts/rabbitmq-upgrade.bat b/deps/rabbit/scripts/rabbitmq-upgrade.bat
new file mode 100644
index 0000000000..70b0eeee62
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmq-upgrade.bat
@@ -0,0 +1,55 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmq-upgrade" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
diff --git a/deps/rabbit/scripts/rabbitmqctl b/deps/rabbit/scripts/rabbitmqctl
new file mode 100755
index 0000000000..8016dbe282
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmqctl
@@ -0,0 +1,23 @@
+#!/bin/sh
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+# Exit immediately if a pipeline, which may consist of a single simple command,
+# a list, or a compound command returns a non-zero status
+set -e
+
+# Each variable or function that is created or modified is given the export
+# attribute and marked for export to the environment of subsequent commands.
+set -a
+
+# shellcheck source=/dev/null
+#
+# TODO: when shellcheck adds support for relative paths, change to
+# shellcheck source=./rabbitmq-env
+. "${0%/*}"/rabbitmq-env
+
+run_escript rabbitmqctl_escript "${ESCRIPT_DIR:?must be defined}"/rabbitmqctl "$@"
diff --git a/deps/rabbit/scripts/rabbitmqctl.bat b/deps/rabbit/scripts/rabbitmqctl.bat
new file mode 100644
index 0000000000..711ec6e990
--- /dev/null
+++ b/deps/rabbit/scripts/rabbitmqctl.bat
@@ -0,0 +1,56 @@
+@echo off
+REM This Source Code Form is subject to the terms of the Mozilla Public
+REM License, v. 2.0. If a copy of the MPL was not distributed with this
+REM file, You can obtain one at https://mozilla.org/MPL/2.0/.
+REM
+REM Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+REM
+
+REM Scopes the variables to the current batch file
+setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+set STAR=%*
+setlocal enabledelayedexpansion
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "%TDP0%\rabbitmq-env.bat" %~n0
+
+if not exist "!ERLANG_HOME!\bin\erl.exe" (
+ echo.
+ echo ******************************
+ echo ERLANG_HOME not set correctly.
+ echo ******************************
+ echo.
+ echo Please either set ERLANG_HOME to point to your Erlang installation or place the
+ echo RabbitMQ server distribution in the Erlang lib folder.
+ echo.
+ exit /B 1
+)
+
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
+"!ERLANG_HOME!\bin\erl.exe" +B ^
+-boot !CLEAN_BOOT_FILE! ^
+-noinput -noshell -hidden -smp enable ^
+!RABBITMQ_CTL_ERL_ARGS! ^
+-kernel inet_dist_listen_min !RABBITMQ_CTL_DIST_PORT_MIN! ^
+-kernel inet_dist_listen_max !RABBITMQ_CTL_DIST_PORT_MAX! ^
+-run escript start ^
+-escript main rabbitmqctl_escript ^
+-extra "%RABBITMQ_HOME%\escript\rabbitmqctl" !STAR!
+
+if ERRORLEVEL 1 (
+ exit /B %ERRORLEVEL%
+)
+
+EXIT /B 0
+
+endlocal
+endlocal
diff --git a/deps/rabbit/src/amqqueue.erl b/deps/rabbit/src/amqqueue.erl
new file mode 100644
index 0000000000..3415ebd073
--- /dev/null
+++ b/deps/rabbit/src/amqqueue.erl
@@ -0,0 +1,762 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqqueue). %% Could become amqqueue_v2 in the future.
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([new/8,
+ new/9,
+ new_with_version/9,
+ new_with_version/10,
+ fields/0,
+ fields/1,
+ field_vhost/0,
+ record_version_to_use/0,
+ upgrade/1,
+ upgrade_to/2,
+ % arguments
+ get_arguments/1,
+ set_arguments/2,
+ % decorators
+ get_decorators/1,
+ set_decorators/2,
+ % exclusive_owner
+ get_exclusive_owner/1,
+ % gm_pids
+ get_gm_pids/1,
+ set_gm_pids/2,
+ get_leader/1,
+ % name (#resource)
+ get_name/1,
+ set_name/2,
+ % operator_policy
+ get_operator_policy/1,
+ set_operator_policy/2,
+ get_options/1,
+ % pid
+ get_pid/1,
+ set_pid/2,
+ % policy
+ get_policy/1,
+ set_policy/2,
+ % policy_version
+ get_policy_version/1,
+ set_policy_version/2,
+ % type_state
+ get_type_state/1,
+ set_type_state/2,
+ % recoverable_slaves
+ get_recoverable_slaves/1,
+ set_recoverable_slaves/2,
+ % slave_pids
+ get_slave_pids/1,
+ set_slave_pids/2,
+ % slave_pids_pending_shutdown
+ get_slave_pids_pending_shutdown/1,
+ set_slave_pids_pending_shutdown/2,
+ % state
+ get_state/1,
+ set_state/2,
+ % sync_slave_pids
+ get_sync_slave_pids/1,
+ set_sync_slave_pids/2,
+ get_type/1,
+ get_vhost/1,
+ is_amqqueue/1,
+ is_auto_delete/1,
+ is_durable/1,
+ is_classic/1,
+ is_quorum/1,
+ pattern_match_all/0,
+ pattern_match_on_name/1,
+ pattern_match_on_type/1,
+ reset_mirroring_and_decorators/1,
+ set_immutable/1,
+ qnode/1,
+ macros/0]).
+
+-define(record_version, amqqueue_v2).
+-define(is_backwards_compat_classic(T),
+ (T =:= classic orelse T =:= ?amqqueue_v1_type)).
+
+-record(amqqueue, {
+ name :: rabbit_amqqueue:name() | '_', %% immutable
+ durable :: boolean() | '_', %% immutable
+ auto_delete :: boolean() | '_', %% immutable
+ exclusive_owner = none :: pid() | none | '_', %% immutable
+ arguments = [] :: rabbit_framing:amqp_table() | '_', %% immutable
+ pid :: pid() | ra_server_id() | none | '_', %% durable (just so we
+ %% know home node)
+ slave_pids = [] :: [pid()] | none | '_', %% transient
+ sync_slave_pids = [] :: [pid()] | none| '_',%% transient
+ recoverable_slaves = [] :: [atom()] | none | '_', %% durable
+ policy :: binary() | none | undefined | '_', %% durable, implicit
+ %% update as above
+ operator_policy :: binary() | none | undefined | '_', %% durable,
+ %% implicit
+ %% update
+ %% as above
+ gm_pids = [] :: [{pid(), pid()}] | none | '_', %% transient
+ decorators :: [atom()] | none | undefined | '_', %% transient,
+ %% recalculated
+ %% as above
+ state = live :: atom() | none | '_', %% durable (have we crashed?)
+ policy_version = 0 :: non_neg_integer() | '_',
+ slave_pids_pending_shutdown = [] :: [pid()] | '_',
+ vhost :: rabbit_types:vhost() | undefined | '_', %% secondary index
+ options = #{} :: map() | '_',
+ type = ?amqqueue_v1_type :: module() | '_',
+ type_state = #{} :: map() | '_'
+ }).
+
+-type amqqueue() :: amqqueue_v1:amqqueue_v1() | amqqueue_v2().
+-type amqqueue_v2() :: #amqqueue{
+ name :: rabbit_amqqueue:name(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ exclusive_owner :: pid() | none,
+ arguments :: rabbit_framing:amqp_table(),
+ pid :: pid() | ra_server_id() | none,
+ slave_pids :: [pid()] | none,
+ sync_slave_pids :: [pid()] | none,
+ recoverable_slaves :: [atom()] | none,
+ policy :: binary() | none | undefined,
+ operator_policy :: binary() | none | undefined,
+ gm_pids :: [{pid(), pid()}] | none,
+ decorators :: [atom()] | none | undefined,
+ state :: atom() | none,
+ policy_version :: non_neg_integer(),
+ slave_pids_pending_shutdown :: [pid()],
+ vhost :: rabbit_types:vhost() | undefined,
+ options :: map(),
+ type :: atom(),
+ type_state :: #{}
+ }.
+
+-type ra_server_id() :: {Name :: atom(), Node :: node()}.
+
+-type amqqueue_pattern() :: amqqueue_v1:amqqueue_v1_pattern() |
+ amqqueue_v2_pattern().
+-type amqqueue_v2_pattern() :: #amqqueue{
+ name :: rabbit_amqqueue:name() | '_',
+ durable :: '_',
+ auto_delete :: '_',
+ exclusive_owner :: '_',
+ arguments :: '_',
+ pid :: '_',
+ slave_pids :: '_',
+ sync_slave_pids :: '_',
+ recoverable_slaves :: '_',
+ policy :: '_',
+ operator_policy :: '_',
+ gm_pids :: '_',
+ decorators :: '_',
+ state :: '_',
+ policy_version :: '_',
+ slave_pids_pending_shutdown :: '_',
+ vhost :: '_',
+ options :: '_',
+ type :: atom() | '_',
+ type_state :: '_'
+ }.
+
+-export_type([amqqueue/0,
+ amqqueue_v2/0,
+ amqqueue_pattern/0,
+ amqqueue_v2_pattern/0,
+ ra_server_id/0]).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ new(Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ ?amqqueue_v1_type).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ atom()) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ is_atom(Type) ->
+ case record_version_to_use() of
+ ?record_version ->
+ new_with_version(
+ ?record_version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type);
+ _ ->
+ amqqueue_v1:new(
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ end.
+
+-spec new_with_version
+(amqqueue_v1 | amqqueue_v2,
+ rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new_with_version(RecordVersion,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ new_with_version(RecordVersion,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ ?amqqueue_v1_type).
+
+-spec new_with_version
+(amqqueue_v1 | amqqueue_v2,
+ rabbit_amqqueue:name(),
+ pid() | ra_server_id() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ atom()) -> amqqueue().
+
+new_with_version(?record_version,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse is_tuple(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ is_atom(Type) ->
+ #amqqueue{name = Name,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args,
+ exclusive_owner = Owner,
+ pid = Pid,
+ vhost = VHost,
+ options = Options,
+ type = ensure_type_compat(Type)};
+new_with_version(Version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when ?is_backwards_compat_classic(Type) ->
+ amqqueue_v1:new_with_version(
+ Version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec is_amqqueue(any()) -> boolean().
+
+is_amqqueue(#amqqueue{}) -> true;
+is_amqqueue(Queue) -> amqqueue_v1:is_amqqueue(Queue).
+
+-spec record_version_to_use() -> amqqueue_v1 | amqqueue_v2.
+
+record_version_to_use() ->
+ case rabbit_feature_flags:is_enabled(quorum_queue) of
+ true -> ?record_version;
+ false -> amqqueue_v1:record_version_to_use()
+ end.
+
+-spec upgrade(amqqueue()) -> amqqueue().
+
+upgrade(#amqqueue{} = Queue) -> Queue;
+upgrade(OldQueue) -> upgrade_to(record_version_to_use(), OldQueue).
+
+-spec upgrade_to
+(amqqueue_v2, amqqueue()) -> amqqueue_v2();
+(amqqueue_v1, amqqueue_v1:amqqueue_v1()) -> amqqueue_v1:amqqueue_v1().
+
+upgrade_to(?record_version, #amqqueue{} = Queue) ->
+ Queue;
+upgrade_to(?record_version, OldQueue) ->
+ Fields = erlang:tuple_to_list(OldQueue) ++ [?amqqueue_v1_type,
+ undefined],
+ #amqqueue{} = erlang:list_to_tuple(Fields);
+upgrade_to(Version, OldQueue) ->
+ amqqueue_v1:upgrade_to(Version, OldQueue).
+
+% arguments
+
+-spec get_arguments(amqqueue()) -> rabbit_framing:amqp_table().
+
+get_arguments(#amqqueue{arguments = Args}) ->
+ Args;
+get_arguments(Queue) ->
+ amqqueue_v1:get_arguments(Queue).
+
+-spec set_arguments(amqqueue(), rabbit_framing:amqp_table()) -> amqqueue().
+
+set_arguments(#amqqueue{} = Queue, Args) ->
+ Queue#amqqueue{arguments = Args};
+set_arguments(Queue, Args) ->
+ amqqueue_v1:set_arguments(Queue, Args).
+
+% decorators
+
+-spec get_decorators(amqqueue()) -> [atom()] | none | undefined.
+
+get_decorators(#amqqueue{decorators = Decorators}) ->
+ Decorators;
+get_decorators(Queue) ->
+ amqqueue_v1:get_decorators(Queue).
+
+-spec set_decorators(amqqueue(), [atom()] | none | undefined) -> amqqueue().
+
+set_decorators(#amqqueue{} = Queue, Decorators) ->
+ Queue#amqqueue{decorators = Decorators};
+set_decorators(Queue, Decorators) ->
+ amqqueue_v1:set_decorators(Queue, Decorators).
+
+-spec get_exclusive_owner(amqqueue()) -> pid() | none.
+
+get_exclusive_owner(#amqqueue{exclusive_owner = Owner}) ->
+ Owner;
+get_exclusive_owner(Queue) ->
+ amqqueue_v1:get_exclusive_owner(Queue).
+
+% gm_pids
+
+-spec get_gm_pids(amqqueue()) -> [{pid(), pid()}] | none.
+
+get_gm_pids(#amqqueue{gm_pids = GMPids}) ->
+ GMPids;
+get_gm_pids(Queue) ->
+ amqqueue_v1:get_gm_pids(Queue).
+
+-spec set_gm_pids(amqqueue(), [{pid(), pid()}] | none) -> amqqueue().
+
+set_gm_pids(#amqqueue{} = Queue, GMPids) ->
+ Queue#amqqueue{gm_pids = GMPids};
+set_gm_pids(Queue, GMPids) ->
+ amqqueue_v1:set_gm_pids(Queue, GMPids).
+
+-spec get_leader(amqqueue_v2()) -> node().
+
+get_leader(#amqqueue{type = rabbit_quorum_queue, pid = {_, Leader}}) -> Leader.
+
+% operator_policy
+
+-spec get_operator_policy(amqqueue()) -> binary() | none | undefined.
+
+get_operator_policy(#amqqueue{operator_policy = OpPolicy}) -> OpPolicy;
+get_operator_policy(Queue) -> amqqueue_v1:get_operator_policy(Queue).
+
+-spec set_operator_policy(amqqueue(), binary() | none | undefined) ->
+ amqqueue().
+
+set_operator_policy(#amqqueue{} = Queue, Policy) ->
+ Queue#amqqueue{operator_policy = Policy};
+set_operator_policy(Queue, Policy) ->
+ amqqueue_v1:set_operator_policy(Queue, Policy).
+
+% name
+
+-spec get_name(amqqueue()) -> rabbit_amqqueue:name().
+
+get_name(#amqqueue{name = Name}) -> Name;
+get_name(Queue) -> amqqueue_v1:get_name(Queue).
+
+-spec set_name(amqqueue(), rabbit_amqqueue:name()) -> amqqueue().
+
+set_name(#amqqueue{} = Queue, Name) ->
+ Queue#amqqueue{name = Name};
+set_name(Queue, Name) ->
+ amqqueue_v1:set_name(Queue, Name).
+
+-spec get_options(amqqueue()) -> map().
+
+get_options(#amqqueue{options = Options}) -> Options;
+get_options(Queue) -> amqqueue_v1:get_options(Queue).
+
+% pid
+
+-spec get_pid
+(amqqueue_v2()) -> pid() | ra_server_id() | none;
+(amqqueue_v1:amqqueue_v1()) -> pid() | none.
+
+get_pid(#amqqueue{pid = Pid}) -> Pid;
+get_pid(Queue) -> amqqueue_v1:get_pid(Queue).
+
+-spec set_pid
+(amqqueue_v2(), pid() | ra_server_id() | none) -> amqqueue_v2();
+(amqqueue_v1:amqqueue_v1(), pid() | none) -> amqqueue_v1:amqqueue_v1().
+
+set_pid(#amqqueue{} = Queue, Pid) ->
+ Queue#amqqueue{pid = Pid};
+set_pid(Queue, Pid) ->
+ amqqueue_v1:set_pid(Queue, Pid).
+
+% policy
+
+-spec get_policy(amqqueue()) -> proplists:proplist() | none | undefined.
+
+get_policy(#amqqueue{policy = Policy}) -> Policy;
+get_policy(Queue) -> amqqueue_v1:get_policy(Queue).
+
+-spec set_policy(amqqueue(), binary() | none | undefined) -> amqqueue().
+
+set_policy(#amqqueue{} = Queue, Policy) ->
+ Queue#amqqueue{policy = Policy};
+set_policy(Queue, Policy) ->
+ amqqueue_v1:set_policy(Queue, Policy).
+
+% policy_version
+
+-spec get_policy_version(amqqueue()) -> non_neg_integer().
+
+get_policy_version(#amqqueue{policy_version = PV}) ->
+ PV;
+get_policy_version(Queue) ->
+ amqqueue_v1:get_policy_version(Queue).
+
+-spec set_policy_version(amqqueue(), non_neg_integer()) -> amqqueue().
+
+set_policy_version(#amqqueue{} = Queue, PV) ->
+ Queue#amqqueue{policy_version = PV};
+set_policy_version(Queue, PV) ->
+ amqqueue_v1:set_policy_version(Queue, PV).
+
+% recoverable_slaves
+
+-spec get_recoverable_slaves(amqqueue()) -> [atom()] | none.
+
+get_recoverable_slaves(#amqqueue{recoverable_slaves = Slaves}) ->
+ Slaves;
+get_recoverable_slaves(Queue) ->
+ amqqueue_v1:get_recoverable_slaves(Queue).
+
+-spec set_recoverable_slaves(amqqueue(), [atom()] | none) -> amqqueue().
+
+set_recoverable_slaves(#amqqueue{} = Queue, Slaves) ->
+ Queue#amqqueue{recoverable_slaves = Slaves};
+set_recoverable_slaves(Queue, Slaves) ->
+ amqqueue_v1:set_recoverable_slaves(Queue, Slaves).
+
+% type_state (new in v2)
+
+-spec get_type_state(amqqueue()) -> map().
+get_type_state(#amqqueue{type_state = TState}) ->
+ TState;
+get_type_state(_) ->
+ #{}.
+
+-spec set_type_state(amqqueue(), map()) -> amqqueue().
+set_type_state(#amqqueue{} = Queue, TState) ->
+ Queue#amqqueue{type_state = TState};
+set_type_state(Queue, _TState) ->
+ Queue.
+
+% slave_pids
+
+-spec get_slave_pids(amqqueue()) -> [pid()] | none.
+
+get_slave_pids(#amqqueue{slave_pids = Slaves}) ->
+ Slaves;
+get_slave_pids(Queue) ->
+ amqqueue_v1:get_slave_pids(Queue).
+
+-spec set_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
+
+set_slave_pids(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids = SlavePids};
+set_slave_pids(Queue, SlavePids) ->
+ amqqueue_v1:set_slave_pids(Queue, SlavePids).
+
+% slave_pids_pending_shutdown
+
+-spec get_slave_pids_pending_shutdown(amqqueue()) -> [pid()].
+
+get_slave_pids_pending_shutdown(
+ #amqqueue{slave_pids_pending_shutdown = Slaves}) ->
+ Slaves;
+get_slave_pids_pending_shutdown(Queue) ->
+ amqqueue_v1:get_slave_pids_pending_shutdown(Queue).
+
+-spec set_slave_pids_pending_shutdown(amqqueue(), [pid()]) -> amqqueue().
+
+set_slave_pids_pending_shutdown(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids_pending_shutdown = SlavePids};
+set_slave_pids_pending_shutdown(Queue, SlavePids) ->
+ amqqueue_v1:set_slave_pids_pending_shutdown(Queue, SlavePids).
+
+% state
+
+-spec get_state(amqqueue()) -> atom() | none.
+
+get_state(#amqqueue{state = State}) -> State;
+get_state(Queue) -> amqqueue_v1:get_state(Queue).
+
+-spec set_state(amqqueue(), atom() | none) -> amqqueue().
+
+set_state(#amqqueue{} = Queue, State) ->
+ Queue#amqqueue{state = State};
+set_state(Queue, State) ->
+ amqqueue_v1:set_state(Queue, State).
+
+% sync_slave_pids
+
+-spec get_sync_slave_pids(amqqueue()) -> [pid()] | none.
+
+get_sync_slave_pids(#amqqueue{sync_slave_pids = Pids}) ->
+ Pids;
+get_sync_slave_pids(Queue) ->
+ amqqueue_v1:get_sync_slave_pids(Queue).
+
+-spec set_sync_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
+
+set_sync_slave_pids(#amqqueue{} = Queue, Pids) ->
+ Queue#amqqueue{sync_slave_pids = Pids};
+set_sync_slave_pids(Queue, Pids) ->
+ amqqueue_v1:set_sync_slave_pids(Queue, Pids).
+
+%% New in v2.
+
+-spec get_type(amqqueue()) -> atom().
+
+get_type(#amqqueue{type = Type}) -> Type;
+get_type(Queue) when ?is_amqqueue(Queue) -> ?amqqueue_v1_type.
+
+-spec get_vhost(amqqueue()) -> rabbit_types:vhost() | undefined.
+
+get_vhost(#amqqueue{vhost = VHost}) -> VHost;
+get_vhost(Queue) -> amqqueue_v1:get_vhost(Queue).
+
+-spec is_auto_delete(amqqueue()) -> boolean().
+
+is_auto_delete(#amqqueue{auto_delete = AutoDelete}) ->
+ AutoDelete;
+is_auto_delete(Queue) ->
+ amqqueue_v1:is_auto_delete(Queue).
+
+-spec is_durable(amqqueue()) -> boolean().
+
+is_durable(#amqqueue{durable = Durable}) -> Durable;
+is_durable(Queue) -> amqqueue_v1:is_durable(Queue).
+
+-spec is_classic(amqqueue()) -> boolean().
+
+is_classic(Queue) ->
+ get_type(Queue) =:= ?amqqueue_v1_type.
+
+-spec is_quorum(amqqueue()) -> boolean().
+
+is_quorum(Queue) ->
+ get_type(Queue) =:= rabbit_quorum_queue.
+
+fields() ->
+ case record_version_to_use() of
+ ?record_version -> fields(?record_version);
+ _ -> amqqueue_v1:fields()
+ end.
+
+fields(?record_version) -> record_info(fields, amqqueue);
+fields(Version) -> amqqueue_v1:fields(Version).
+
+field_vhost() ->
+ case record_version_to_use() of
+ ?record_version -> #amqqueue.vhost;
+ _ -> amqqueue_v1:field_vhost()
+ end.
+
+-spec pattern_match_all() -> amqqueue_pattern().
+
+pattern_match_all() ->
+ case record_version_to_use() of
+ ?record_version -> #amqqueue{_ = '_'};
+ _ -> amqqueue_v1:pattern_match_all()
+ end.
+
+-spec pattern_match_on_name(rabbit_amqqueue:name()) -> amqqueue_pattern().
+
+pattern_match_on_name(Name) ->
+ case record_version_to_use() of
+ ?record_version -> #amqqueue{name = Name, _ = '_'};
+ _ -> amqqueue_v1:pattern_match_on_name(Name)
+ end.
+
+-spec pattern_match_on_type(atom()) -> amqqueue_pattern().
+
+pattern_match_on_type(Type) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #amqqueue{type = Type, _ = '_'};
+ _ when ?is_backwards_compat_classic(Type) ->
+ amqqueue_v1:pattern_match_all();
+ %% FIXME: We try a pattern which should never match when the
+ %% `quorum_queue` feature flag is not enabled yet. Is there
+ %% a better solution?
+ _ ->
+ amqqueue_v1:pattern_match_on_name(
+ rabbit_misc:r(<<0>>, queue, <<0>>))
+ end.
+
+-spec reset_mirroring_and_decorators(amqqueue()) -> amqqueue().
+
+reset_mirroring_and_decorators(#amqqueue{} = Queue) ->
+ Queue#amqqueue{slave_pids = [],
+ sync_slave_pids = [],
+ gm_pids = [],
+ decorators = undefined};
+reset_mirroring_and_decorators(Queue) ->
+ amqqueue_v1:reset_mirroring_and_decorators(Queue).
+
+-spec set_immutable(amqqueue()) -> amqqueue().
+
+set_immutable(#amqqueue{} = Queue) ->
+ Queue#amqqueue{pid = none,
+ slave_pids = [],
+ sync_slave_pids = none,
+ recoverable_slaves = none,
+ gm_pids = none,
+ policy = none,
+ decorators = none,
+ state = none};
+set_immutable(Queue) ->
+ amqqueue_v1:set_immutable(Queue).
+
+-spec qnode(amqqueue() | pid() | ra_server_id()) -> node().
+
+qnode(Queue) when ?is_amqqueue(Queue) ->
+ QPid = get_pid(Queue),
+ qnode(QPid);
+qnode(QPid) when is_pid(QPid) ->
+ node(QPid);
+qnode({_, Node}) ->
+ Node.
+
+% private
+
+macros() ->
+ io:format(
+ "-define(is_~s(Q), is_record(Q, amqqueue, ~b)).~n~n",
+ [?record_version, record_info(size, amqqueue)]),
+ %% The field number starts at 2 because the first element is the
+ %% record name.
+ macros(record_info(fields, amqqueue), 2).
+
+macros([Field | Rest], I) ->
+ io:format(
+ "-define(~s_field_~s(Q), element(~b, Q)).~n",
+ [?record_version, Field, I]),
+ macros(Rest, I + 1);
+macros([], _) ->
+ ok.
+
+ensure_type_compat(classic) ->
+ ?amqqueue_v1_type;
+ensure_type_compat(Type) ->
+ Type.
diff --git a/deps/rabbit/src/amqqueue_v1.erl b/deps/rabbit/src/amqqueue_v1.erl
new file mode 100644
index 0000000000..dd1de74a4e
--- /dev/null
+++ b/deps/rabbit/src/amqqueue_v1.erl
@@ -0,0 +1,584 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqqueue_v1).
+
+-include_lib("rabbit_common/include/resource.hrl").
+-include("amqqueue.hrl").
+
+-export([new/8,
+ new/9,
+ new_with_version/9,
+ new_with_version/10,
+ fields/0,
+ fields/1,
+ field_vhost/0,
+ record_version_to_use/0,
+ upgrade/1,
+ upgrade_to/2,
+ % arguments
+ get_arguments/1,
+ set_arguments/2,
+ % decorators
+ get_decorators/1,
+ set_decorators/2,
+ % exclusive_owner
+ get_exclusive_owner/1,
+ % gm_pids
+ get_gm_pids/1,
+ set_gm_pids/2,
+ get_leader/1,
+ % name (#resource)
+ get_name/1,
+ set_name/2,
+ % operator_policy
+ get_operator_policy/1,
+ set_operator_policy/2,
+ get_options/1,
+ % pid
+ get_pid/1,
+ set_pid/2,
+ % policy
+ get_policy/1,
+ set_policy/2,
+ % policy_version
+ get_policy_version/1,
+ set_policy_version/2,
+ % type_state
+ get_type_state/1,
+ set_type_state/2,
+ % recoverable_slaves
+ get_recoverable_slaves/1,
+ set_recoverable_slaves/2,
+ % slave_pids
+ get_slave_pids/1,
+ set_slave_pids/2,
+ % slave_pids_pending_shutdown
+ get_slave_pids_pending_shutdown/1,
+ set_slave_pids_pending_shutdown/2,
+ % state
+ get_state/1,
+ set_state/2,
+ % sync_slave_pids
+ get_sync_slave_pids/1,
+ set_sync_slave_pids/2,
+ get_type/1,
+ get_vhost/1,
+ is_amqqueue/1,
+ is_auto_delete/1,
+ is_durable/1,
+ is_classic/1,
+ is_quorum/1,
+ pattern_match_all/0,
+ pattern_match_on_name/1,
+ pattern_match_on_type/1,
+ reset_mirroring_and_decorators/1,
+ set_immutable/1,
+ qnode/1,
+ macros/0]).
+
+-define(record_version, ?MODULE).
+-define(is_backwards_compat_classic(T),
+ (T =:= classic orelse T =:= ?amqqueue_v1_type)).
+
+-record(amqqueue, {
+ name :: rabbit_amqqueue:name() | '_', %% immutable
+ durable :: boolean() | '_', %% immutable
+ auto_delete :: boolean() | '_', %% immutable
+ exclusive_owner = none :: pid() | none | '_', %% immutable
+ arguments = [] :: rabbit_framing:amqp_table() | '_', %% immutable
+ pid :: pid() | none | '_', %% durable (just so we
+ %% know home node)
+ slave_pids = [] :: [pid()] | none | '_', %% transient
+ sync_slave_pids = [] :: [pid()] | none| '_',%% transient
+ recoverable_slaves = [] :: [atom()] | none | '_', %% durable
+ policy :: binary() | none | undefined | '_', %% durable, implicit
+ %% update as above
+ operator_policy :: binary() | none | undefined | '_', %% durable,
+ %% implicit
+ %% update
+ %% as above
+ gm_pids = [] :: [{pid(), pid()}] | none | '_', %% transient
+ decorators :: [atom()] | none | undefined | '_', %% transient,
+ %% recalculated
+ %% as above
+ state = live :: atom() | none | '_', %% durable (have we crashed?)
+ policy_version = 0 :: non_neg_integer() | '_',
+ slave_pids_pending_shutdown = [] :: [pid()] | '_',
+ vhost :: rabbit_types:vhost() | undefined | '_', %% secondary index
+ options = #{} :: map() | '_'
+ }).
+
+-type amqqueue() :: amqqueue_v1().
+-type amqqueue_v1() :: #amqqueue{
+ name :: rabbit_amqqueue:name(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ exclusive_owner :: pid() | none,
+ arguments :: rabbit_framing:amqp_table(),
+ pid :: pid() | none,
+ slave_pids :: [pid()] | none,
+ sync_slave_pids :: [pid()] | none,
+ recoverable_slaves :: [atom()] | none,
+ policy :: binary() | none | undefined,
+ operator_policy :: binary() | none | undefined,
+ gm_pids :: [{pid(), pid()}] | none,
+ decorators :: [atom()] | none | undefined,
+ state :: atom() | none,
+ policy_version :: non_neg_integer(),
+ slave_pids_pending_shutdown :: [pid()],
+ vhost :: rabbit_types:vhost() | undefined,
+ options :: map()
+ }.
+
+-type amqqueue_pattern() :: amqqueue_v1_pattern().
+-type amqqueue_v1_pattern() :: #amqqueue{
+ name :: rabbit_amqqueue:name() | '_',
+ durable :: '_',
+ auto_delete :: '_',
+ exclusive_owner :: '_',
+ arguments :: '_',
+ pid :: '_',
+ slave_pids :: '_',
+ sync_slave_pids :: '_',
+ recoverable_slaves :: '_',
+ policy :: '_',
+ operator_policy :: '_',
+ gm_pids :: '_',
+ decorators :: '_',
+ state :: '_',
+ policy_version :: '_',
+ slave_pids_pending_shutdown :: '_',
+ vhost :: '_',
+ options :: '_'
+ }.
+
+-export_type([amqqueue/0,
+ amqqueue_v1/0,
+ amqqueue_pattern/0,
+ amqqueue_v1_pattern/0]).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ new_with_version(
+ ?record_version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec new(rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ ?amqqueue_v1_type | classic) -> amqqueue().
+
+new(#resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ ?is_backwards_compat_classic(Type) ->
+ new(
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec new_with_version(amqqueue_v1,
+ rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map()) -> amqqueue().
+
+new_with_version(?record_version,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) ->
+ #amqqueue{name = Name,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args,
+ exclusive_owner = Owner,
+ pid = Pid,
+ vhost = VHost,
+ options = Options}.
+
+-spec new_with_version(amqqueue_v1,
+ rabbit_amqqueue:name(),
+ pid() | none,
+ boolean(),
+ boolean(),
+ pid() | none,
+ rabbit_framing:amqp_table(),
+ rabbit_types:vhost() | undefined,
+ map(),
+ ?amqqueue_v1_type | classic) -> amqqueue().
+
+new_with_version(?record_version,
+ #resource{kind = queue} = Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options,
+ Type)
+ when (is_pid(Pid) orelse Pid =:= none) andalso
+ is_boolean(Durable) andalso
+ is_boolean(AutoDelete) andalso
+ (is_pid(Owner) orelse Owner =:= none) andalso
+ is_list(Args) andalso
+ (is_binary(VHost) orelse VHost =:= undefined) andalso
+ is_map(Options) andalso
+ ?is_backwards_compat_classic(Type) ->
+ new_with_version(
+ ?record_version,
+ Name,
+ Pid,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ Options).
+
+-spec is_amqqueue(any()) -> boolean().
+
+is_amqqueue(#amqqueue{}) -> true;
+is_amqqueue(_) -> false.
+
+-spec record_version_to_use() -> amqqueue_v1.
+
+record_version_to_use() ->
+ ?record_version.
+
+-spec upgrade(amqqueue()) -> amqqueue().
+
+upgrade(#amqqueue{} = Queue) -> Queue.
+
+-spec upgrade_to(amqqueue_v1, amqqueue()) -> amqqueue().
+
+upgrade_to(?record_version, #amqqueue{} = Queue) ->
+ Queue.
+
+% arguments
+
+-spec get_arguments(amqqueue()) -> rabbit_framing:amqp_table().
+
+get_arguments(#amqqueue{arguments = Args}) -> Args.
+
+-spec set_arguments(amqqueue(), rabbit_framing:amqp_table()) -> amqqueue().
+
+set_arguments(#amqqueue{} = Queue, Args) ->
+ Queue#amqqueue{arguments = Args}.
+
+% decorators
+
+-spec get_decorators(amqqueue()) -> [atom()] | none | undefined.
+
+get_decorators(#amqqueue{decorators = Decorators}) -> Decorators.
+
+-spec set_decorators(amqqueue(), [atom()] | none | undefined) -> amqqueue().
+
+set_decorators(#amqqueue{} = Queue, Decorators) ->
+ Queue#amqqueue{decorators = Decorators}.
+
+-spec get_exclusive_owner(amqqueue()) -> pid() | none.
+
+get_exclusive_owner(#amqqueue{exclusive_owner = Owner}) -> Owner.
+
+% gm_pids
+
+-spec get_gm_pids(amqqueue()) -> [{pid(), pid()}] | none.
+
+get_gm_pids(#amqqueue{gm_pids = GMPids}) -> GMPids.
+
+-spec set_gm_pids(amqqueue(), [{pid(), pid()}] | none) -> amqqueue().
+
+set_gm_pids(#amqqueue{} = Queue, GMPids) ->
+ Queue#amqqueue{gm_pids = GMPids}.
+
+-spec get_leader(amqqueue_v1()) -> no_return().
+
+get_leader(_) -> throw({unsupported, ?record_version, get_leader}).
+
+% operator_policy
+
+-spec get_operator_policy(amqqueue()) -> binary() | none | undefined.
+
+get_operator_policy(#amqqueue{operator_policy = OpPolicy}) -> OpPolicy.
+
+-spec set_operator_policy(amqqueue(), binary() | none | undefined) ->
+ amqqueue().
+
+set_operator_policy(#amqqueue{} = Queue, OpPolicy) ->
+ Queue#amqqueue{operator_policy = OpPolicy}.
+
+% name
+
+-spec get_name(amqqueue()) -> rabbit_amqqueue:name().
+
+get_name(#amqqueue{name = Name}) -> Name.
+
+-spec set_name(amqqueue(), rabbit_amqqueue:name()) -> amqqueue().
+
+set_name(#amqqueue{} = Queue, Name) ->
+ Queue#amqqueue{name = Name}.
+
+-spec get_options(amqqueue()) -> map().
+
+get_options(#amqqueue{options = Options}) -> Options.
+
+% pid
+
+-spec get_pid
+(amqqueue_v1:amqqueue_v1()) -> pid() | none.
+
+get_pid(#amqqueue{pid = Pid}) -> Pid.
+
+-spec set_pid
+(amqqueue_v1:amqqueue_v1(), pid() | none) -> amqqueue_v1:amqqueue_v1().
+
+set_pid(#amqqueue{} = Queue, Pid) ->
+ Queue#amqqueue{pid = Pid}.
+
+% policy
+
+-spec get_policy(amqqueue()) -> proplists:proplist() | none | undefined.
+
+get_policy(#amqqueue{policy = Policy}) -> Policy.
+
+-spec set_policy(amqqueue(), binary() | none | undefined) -> amqqueue().
+
+set_policy(#amqqueue{} = Queue, Policy) ->
+ Queue#amqqueue{policy = Policy}.
+
+% policy_version
+
+-spec get_policy_version(amqqueue()) -> non_neg_integer().
+
+get_policy_version(#amqqueue{policy_version = PV}) ->
+ PV.
+
+-spec set_policy_version(amqqueue(), non_neg_integer()) -> amqqueue().
+
+set_policy_version(#amqqueue{} = Queue, PV) ->
+ Queue#amqqueue{policy_version = PV}.
+
+% recoverable_slaves
+
+-spec get_recoverable_slaves(amqqueue()) -> [atom()] | none.
+
+get_recoverable_slaves(#amqqueue{recoverable_slaves = Slaves}) ->
+ Slaves.
+
+-spec set_recoverable_slaves(amqqueue(), [atom()] | none) -> amqqueue().
+
+set_recoverable_slaves(#amqqueue{} = Queue, Slaves) ->
+ Queue#amqqueue{recoverable_slaves = Slaves}.
+
+% type_state (new in v2)
+
+-spec get_type_state(amqqueue()) -> no_return().
+
+get_type_state(_) -> throw({unsupported, ?record_version, get_type_state}).
+
+-spec set_type_state(amqqueue(), [node()]) -> no_return().
+
+set_type_state(_, _) ->
+ throw({unsupported, ?record_version, set_type_state}).
+
+% slave_pids
+
+get_slave_pids(#amqqueue{slave_pids = Slaves}) ->
+ Slaves.
+
+set_slave_pids(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids = SlavePids}.
+
+% slave_pids_pending_shutdown
+
+get_slave_pids_pending_shutdown(
+ #amqqueue{slave_pids_pending_shutdown = Slaves}) ->
+ Slaves.
+
+set_slave_pids_pending_shutdown(#amqqueue{} = Queue, SlavePids) ->
+ Queue#amqqueue{slave_pids_pending_shutdown = SlavePids}.
+
+% state
+
+-spec get_state(amqqueue()) -> atom() | none.
+
+get_state(#amqqueue{state = State}) -> State.
+
+-spec set_state(amqqueue(), atom() | none) -> amqqueue().
+
+set_state(#amqqueue{} = Queue, State) ->
+ Queue#amqqueue{state = State}.
+
+% sync_slave_pids
+
+-spec get_sync_slave_pids(amqqueue()) -> [pid()] | none.
+
+get_sync_slave_pids(#amqqueue{sync_slave_pids = Pids}) ->
+ Pids.
+
+-spec set_sync_slave_pids(amqqueue(), [pid()] | none) -> amqqueue().
+
+set_sync_slave_pids(#amqqueue{} = Queue, Pids) ->
+ Queue#amqqueue{sync_slave_pids = Pids}.
+
+%% New in v2.
+
+-spec get_type(amqqueue()) -> atom().
+
+get_type(Queue) when ?is_amqqueue(Queue) -> ?amqqueue_v1_type.
+
+-spec get_vhost(amqqueue()) -> rabbit_types:vhost() | undefined.
+
+get_vhost(#amqqueue{vhost = VHost}) -> VHost.
+
+-spec is_auto_delete(amqqueue()) -> boolean().
+
+is_auto_delete(#amqqueue{auto_delete = AutoDelete}) -> AutoDelete.
+
+-spec is_durable(amqqueue()) -> boolean().
+
+is_durable(#amqqueue{durable = Durable}) -> Durable.
+
+-spec is_classic(amqqueue()) -> boolean().
+
+is_classic(Queue) ->
+ get_type(Queue) =:= ?amqqueue_v1_type.
+
+-spec is_quorum(amqqueue()) -> boolean().
+
+is_quorum(Queue) when ?is_amqqueue(Queue) ->
+ false.
+
+fields() -> fields(?record_version).
+
+fields(?record_version) -> record_info(fields, amqqueue).
+
+field_vhost() -> #amqqueue.vhost.
+
+-spec pattern_match_all() -> amqqueue_pattern().
+
+pattern_match_all() -> #amqqueue{_ = '_'}.
+
+-spec pattern_match_on_name(rabbit_amqqueue:name()) ->
+ amqqueue_pattern().
+
+pattern_match_on_name(Name) -> #amqqueue{name = Name, _ = '_'}.
+
+-spec pattern_match_on_type(atom()) -> no_return().
+
+pattern_match_on_type(_) ->
+ throw({unsupported, ?record_version, pattern_match_on_type}).
+
+reset_mirroring_and_decorators(#amqqueue{} = Queue) ->
+ Queue#amqqueue{slave_pids = [],
+ sync_slave_pids = [],
+ gm_pids = [],
+ decorators = undefined}.
+
+set_immutable(#amqqueue{} = Queue) ->
+ Queue#amqqueue{pid = none,
+ slave_pids = none,
+ sync_slave_pids = none,
+ recoverable_slaves = none,
+ gm_pids = none,
+ policy = none,
+ decorators = none,
+ state = none}.
+
+-spec qnode(amqqueue() | pid()) -> node().
+
+qnode(Queue) when ?is_amqqueue(Queue) ->
+ QPid = get_pid(Queue),
+ qnode(QPid);
+qnode(QPid) when is_pid(QPid) ->
+ node(QPid).
+
+macros() ->
+ io:format(
+ "-define(is_~s(Q), is_record(Q, amqqueue, ~b)).~n~n",
+ [?record_version, record_info(size, amqqueue)]),
+ %% The field number starts at 2 because the first element is the
+ %% record name.
+ macros(record_info(fields, amqqueue), 2).
+
+macros([Field | Rest], I) ->
+ io:format(
+ "-define(~s_field_~s(Q), element(~b, Q)).~n",
+ [?record_version, Field, I]),
+ macros(Rest, I + 1);
+macros([], _) ->
+ ok.
diff --git a/deps/rabbit/src/background_gc.erl b/deps/rabbit/src/background_gc.erl
new file mode 100644
index 0000000000..be5bf0c995
--- /dev/null
+++ b/deps/rabbit/src/background_gc.erl
@@ -0,0 +1,78 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(background_gc).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, run/0]).
+-export([gc/0]). %% For run_interval only
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(MAX_RATIO, 0.01).
+-define(MAX_INTERVAL, 240000).
+
+-record(state, {last_interval}).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+
+start_link() -> gen_server2:start_link({local, ?MODULE}, ?MODULE, [],
+ [{timeout, infinity}]).
+
+-spec run() -> 'ok'.
+
+run() -> gen_server2:cast(?MODULE, run).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, IdealInterval} = application:get_env(rabbit, background_gc_target_interval),
+ {ok, interval_gc(#state{last_interval = IdealInterval})}.
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, {unexpected_call, Msg}, State}.
+
+handle_cast(run, State) -> gc(), {noreply, State};
+
+handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(run, State) -> {noreply, interval_gc(State)};
+
+handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+terminate(_Reason, State) -> State.
+
+%%----------------------------------------------------------------------------
+
+interval_gc(State = #state{last_interval = LastInterval}) ->
+ {ok, IdealInterval} = application:get_env(rabbit, background_gc_target_interval),
+ {ok, Interval} = rabbit_misc:interval_operation(
+ {?MODULE, gc, []},
+ ?MAX_RATIO, ?MAX_INTERVAL, IdealInterval, LastInterval),
+ erlang:send_after(Interval, self(), run),
+ State#state{last_interval = Interval}.
+
+-spec gc() -> 'ok'.
+
+gc() ->
+ Enabled = rabbit_misc:get_env(rabbit, background_gc_enabled, false),
+ case Enabled of
+ true ->
+ [garbage_collect(P) || P <- processes(),
+ {status, waiting} == process_info(P, status)],
+ %% since we will never be waiting...
+ garbage_collect();
+ false ->
+ ok
+ end,
+ ok.
diff --git a/deps/rabbit/src/code_server_cache.erl b/deps/rabbit/src/code_server_cache.erl
new file mode 100644
index 0000000000..b53f5dcee9
--- /dev/null
+++ b/deps/rabbit/src/code_server_cache.erl
@@ -0,0 +1,81 @@
+%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
+%% ex: ts=4 sw=4 et
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(code_server_cache).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ maybe_call_mfa/4]).
+
+%% gen_server callbacks
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-record(state, {
+ modules = #{} :: #{atom() => boolean()}
+}).
+
+%% API
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+maybe_call_mfa(Module, Function, Args, Default) ->
+ gen_server:call(?MODULE, {maybe_call_mfa, {Module, Function, Args, Default}}).
+
+%% gen_server callbacks
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call({maybe_call_mfa, {Mod, _F, _A, _D} = MFA}, _From, #state{modules = ModuleMap} = State0) ->
+ Value = maps:get(Mod, ModuleMap, true),
+ {ok, Reply, State1} = handle_maybe_call_mfa(Value, MFA, State0),
+ {reply, Reply, State1};
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% Internal functions
+
+handle_maybe_call_mfa(false, {_M, _F, _A, Default}, State) ->
+ {ok, Default, State};
+handle_maybe_call_mfa(true, {Module, Function, Args, Default}, State) ->
+ try
+ Reply = erlang:apply(Module, Function, Args),
+ {ok, Reply, State}
+ catch
+ error:undef ->
+ handle_maybe_call_mfa_error(Module, Default, State);
+ Err:Reason ->
+ rabbit_log:error("Calling ~p:~p failed: ~p:~p~n",
+ [Module, Function, Err, Reason]),
+ handle_maybe_call_mfa_error(Module, Default, State)
+ end.
+
+handle_maybe_call_mfa_error(Module, Default, #state{modules = ModuleMap0} = State0) ->
+ ModuleMap1 = maps:put(Module, false, ModuleMap0),
+ State1 = State0#state{modules = ModuleMap1},
+ {ok, Default, State1}.
diff --git a/deps/rabbit/src/gatherer.erl b/deps/rabbit/src/gatherer.erl
new file mode 100644
index 0000000000..2b46ec02b1
--- /dev/null
+++ b/deps/rabbit/src/gatherer.erl
@@ -0,0 +1,151 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(gatherer).
+
+%% Gatherer is a queue which has producer and consumer processes. Before producers
+%% push items to the queue using gatherer:in/2 they need to declare their intent
+%% to do so with gatherer:fork/1. When a publisher's work is done, it states so
+%% using gatherer:finish/1.
+%%
+%% Consumers pop messages off queues with gatherer:out/1. If a queue is empty
+%% and there are producers that haven't finished working, the caller is blocked
+%% until an item is available. If there are no active producers, gatherer:out/1
+%% immediately returns 'empty'.
+%%
+%% This module is primarily used to collect results from asynchronous tasks
+%% running in a worker pool, e.g. when recovering bindings or rebuilding
+%% message store indices.
+
+-behaviour(gen_server2).
+
+-export([start_link/0, stop/1, fork/1, finish/1, in/2, sync_in/2, out/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+-record(gstate, { forks, values, blocked }).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server2:start_link(?MODULE, [], [{timeout, infinity}]).
+
+-spec stop(pid()) -> 'ok'.
+
+stop(Pid) ->
+ unlink(Pid),
+ gen_server2:call(Pid, stop, infinity).
+
+-spec fork(pid()) -> 'ok'.
+
+fork(Pid) ->
+ gen_server2:call(Pid, fork, infinity).
+
+-spec finish(pid()) -> 'ok'.
+
+finish(Pid) ->
+ gen_server2:cast(Pid, finish).
+
+-spec in(pid(), any()) -> 'ok'.
+
+in(Pid, Value) ->
+ gen_server2:cast(Pid, {in, Value}).
+
+-spec sync_in(pid(), any()) -> 'ok'.
+
+sync_in(Pid, Value) ->
+ gen_server2:call(Pid, {in, Value}, infinity).
+
+-spec out(pid()) -> {'value', any()} | 'empty'.
+
+out(Pid) ->
+ gen_server2:call(Pid, out, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #gstate { forks = 0, values = queue:new(), blocked = queue:new() },
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+handle_call(fork, _From, State = #gstate { forks = Forks }) ->
+ {reply, ok, State #gstate { forks = Forks + 1 }, hibernate};
+
+handle_call({in, Value}, From, State) ->
+ {noreply, in(Value, From, State), hibernate};
+
+handle_call(out, From, State = #gstate { forks = Forks,
+ values = Values,
+ blocked = Blocked }) ->
+ case queue:out(Values) of
+ {empty, _} when Forks == 0 ->
+ {reply, empty, State, hibernate};
+ {empty, _} ->
+ {noreply, State #gstate { blocked = queue:in(From, Blocked) },
+ hibernate};
+ {{value, {PendingIn, Value}}, NewValues} ->
+ reply(PendingIn, ok),
+ {reply, {value, Value}, State #gstate { values = NewValues },
+ hibernate}
+ end;
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) ->
+ NewForks = Forks - 1,
+ NewBlocked = case NewForks of
+ 0 -> _ = [gen_server2:reply(From, empty) ||
+ From <- queue:to_list(Blocked)],
+ queue:new();
+ _ -> Blocked
+ end,
+ {noreply, State #gstate { forks = NewForks, blocked = NewBlocked },
+ hibernate};
+
+handle_cast({in, Value}, State) ->
+ {noreply, in(Value, undefined, State), hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+%%----------------------------------------------------------------------------
+
+in(Value, From, State = #gstate { values = Values, blocked = Blocked }) ->
+ case queue:out(Blocked) of
+ {empty, _} ->
+ State #gstate { values = queue:in({From, Value}, Values) };
+ {{value, PendingOut}, NewBlocked} ->
+ reply(From, ok),
+ gen_server2:reply(PendingOut, {value, Value}),
+ State #gstate { blocked = NewBlocked }
+ end.
+
+reply(undefined, _Reply) -> ok;
+reply(From, Reply) -> gen_server2:reply(From, Reply).
diff --git a/deps/rabbit/src/gm.erl b/deps/rabbit/src/gm.erl
new file mode 100644
index 0000000000..af24a2958a
--- /dev/null
+++ b/deps/rabbit/src/gm.erl
@@ -0,0 +1,1650 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(gm).
+
+%% Guaranteed Multicast
+%% ====================
+%%
+%% This module provides the ability to create named groups of
+%% processes to which members can be dynamically added and removed,
+%% and for messages to be broadcast within the group that are
+%% guaranteed to reach all members of the group during the lifetime of
+%% the message. The lifetime of a message is defined as being, at a
+%% minimum, the time from which the message is first sent to any
+%% member of the group, up until the time at which it is known by the
+%% member who published the message that the message has reached all
+%% group members.
+%%
+%% The guarantee given is that provided a message, once sent, makes it
+%% to members who do not all leave the group, the message will
+%% continue to propagate to all group members.
+%%
+%% Another way of stating the guarantee is that if member P publishes
+%% messages m and m', then for all members P', if P' is a member of
+%% the group prior to the publication of m, and P' receives m', then
+%% P' will receive m.
+%%
+%% Note that only local-ordering is enforced: i.e. if member P sends
+%% message m and then message m', then for-all members P', if P'
+%% receives m and m', then they will receive m' after m. Causality
+%% ordering is _not_ enforced. I.e. if member P receives message m
+%% and as a result publishes message m', there is no guarantee that
+%% other members P' will receive m before m'.
+%%
+%%
+%% API Use
+%% -------
+%%
+%% Mnesia must be started. Use the idempotent create_tables/0 function
+%% to create the tables required.
+%%
+%% start_link/3
+%% Provide the group name, the callback module name, and any arguments
+%% you wish to be passed into the callback module's functions. The
+%% joined/2 function will be called when we have joined the group,
+%% with the arguments passed to start_link and a list of the current
+%% members of the group. See the callbacks specs and the comments
+%% below for further details of the callback functions.
+%%
+%% leave/1
+%% Provide the Pid. Removes the Pid from the group. The callback
+%% handle_terminate/2 function will be called.
+%%
+%% broadcast/2
+%% Provide the Pid and a Message. The message will be sent to all
+%% members of the group as per the guarantees given above. This is a
+%% cast and the function call will return immediately. There is no
+%% guarantee that the message will reach any member of the group.
+%%
+%% confirmed_broadcast/2
+%% Provide the Pid and a Message. As per broadcast/2 except that this
+%% is a call, not a cast, and only returns 'ok' once the Message has
+%% reached every member of the group. Do not call
+%% confirmed_broadcast/2 directly from the callback module otherwise
+%% you will deadlock the entire group.
+%%
+%% info/1
+%% Provide the Pid. Returns a proplist with various facts, including
+%% the group name and the current group members.
+%%
+%% validate_members/2
+%% Check whether a given member list agrees with the chosen member's
+%% view. Any differences will be communicated via the members_changed
+%% callback. If there are no differences then there will be no reply.
+%% Note that members will not necessarily share the same view.
+%%
+%% forget_group/1
+%% Provide the group name. Removes its mnesia record. Makes no attempt
+%% to ensure the group is empty.
+%%
+%% Implementation Overview
+%% -----------------------
+%%
+%% One possible means of implementation would be a fan-out from the
+%% sender to every member of the group. This would require that the
+%% group is fully connected, and, in the event that the original
+%% sender of the message disappears from the group before the message
+%% has made it to every member of the group, raises questions as to
+%% who is responsible for sending on the message to new group members.
+%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] -
+%% if the sender dies part way through, who is responsible for
+%% ensuring that the remaining Members receive the Msg? In the event
+%% that within the group, messages sent are broadcast from a subset of
+%% the members, the fan-out arrangement has the potential to
+%% substantially impact the CPU and network workload of such members,
+%% as such members would have to accommodate the cost of sending each
+%% message to every group member.
+%%
+%% Instead, if the members of the group are arranged in a chain, then
+%% it becomes easier to reason about who within the group has received
+%% each message and who has not. It eases issues of responsibility: in
+%% the event of a group member disappearing, the nearest upstream
+%% member of the chain is responsible for ensuring that messages
+%% continue to propagate down the chain. It also results in equal
+%% distribution of sending and receiving workload, even if all
+%% messages are being sent from just a single group member. This
+%% configuration has the further advantage that it is not necessary
+%% for every group member to know of every other group member, and
+%% even that a group member does not have to be accessible from all
+%% other group members.
+%%
+%% Performance is kept high by permitting pipelining and all
+%% communication between joined group members is asynchronous. In the
+%% chain A -> B -> C -> D, if A sends a message to the group, it will
+%% not directly contact C or D. However, it must know that D receives
+%% the message (in addition to B and C) before it can consider the
+%% message fully sent. A simplistic implementation would require that
+%% D replies to C, C replies to B and B then replies to A. This would
+%% result in a propagation delay of twice the length of the chain. It
+%% would also require, in the event of the failure of C, that D knows
+%% to directly contact B and issue the necessary replies. Instead, the
+%% chain forms a ring: D sends the message on to A: D does not
+%% distinguish A as the sender, merely as the next member (downstream)
+%% within the chain (which has now become a ring). When A receives
+%% from D messages that A sent, it knows that all members have
+%% received the message. However, the message is not dead yet: if C
+%% died as B was sending to C, then B would need to detect the death
+%% of C and forward the message on to D instead: thus every node has
+%% to remember every message published until it is told that it can
+%% forget about the message. This is essential not just for dealing
+%% with failure of members, but also for the addition of new members.
+%%
+%% Thus once A receives the message back again, it then sends to B an
+%% acknowledgement for the message, indicating that B can now forget
+%% about the message. B does so, and forwards the ack to C. C forgets
+%% the message, and forwards the ack to D, which forgets the message
+%% and finally forwards the ack back to A. At this point, A takes no
+%% further action: the message and its acknowledgement have made it to
+%% every member of the group. The message is now dead, and any new
+%% member joining the group at this point will not receive the
+%% message.
+%%
+%% We therefore have two roles:
+%%
+%% 1. The sender, who upon receiving their own messages back, must
+%% then send out acknowledgements, and upon receiving their own
+%% acknowledgements back perform no further action.
+%%
+%% 2. The other group members who upon receiving messages and
+%% acknowledgements must update their own internal state accordingly
+%% (the sending member must also do this in order to be able to
+%% accommodate failures), and forwards messages on to their downstream
+%% neighbours.
+%%
+%%
+%% Implementation: It gets trickier
+%% --------------------------------
+%%
+%% Chain A -> B -> C -> D
+%%
+%% A publishes a message which B receives. A now dies. B and D will
+%% detect the death of A, and will link up, thus the chain is now B ->
+%% C -> D. B forwards A's message on to C, who forwards it to D, who
+%% forwards it to B. Thus B is now responsible for A's messages - both
+%% publications and acknowledgements that were in flight at the point
+%% at which A died. Even worse is that this is transitive: after B
+%% forwards A's message to C, B dies as well. Now C is not only
+%% responsible for B's in-flight messages, but is also responsible for
+%% A's in-flight messages.
+%%
+%% Lemma 1: A member can only determine which dead members they have
+%% inherited responsibility for if there is a total ordering on the
+%% conflicting additions and subtractions of members from the group.
+%%
+%% Consider the simultaneous death of B and addition of B' that
+%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or
+%% C is responsible for in-flight messages from B. It is easy to
+%% ensure that at least one of them thinks they have inherited B, but
+%% if we do not ensure that exactly one of them inherits B, then we
+%% could have B' converting publishes to acks, which then will crash C
+%% as C does not believe it has issued acks for those messages.
+%%
+%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E
+%% becoming A -> C' -> E. Who has inherited which of B, C and D?
+%%
+%% However, for non-conflicting membership changes, only a partial
+%% ordering is required. For example, A -> B -> C becoming A -> A' ->
+%% B. The addition of A', between A and B can have no conflicts with
+%% the death of C: it is clear that A has inherited C's messages.
+%%
+%% For ease of implementation, we adopt the simple solution, of
+%% imposing a total order on all membership changes.
+%%
+%% On the death of a member, it is ensured the dead member's
+%% neighbours become aware of the death, and the upstream neighbour
+%% now sends to its new downstream neighbour its state, including the
+%% messages pending acknowledgement. The downstream neighbour can then
+%% use this to calculate which publishes and acknowledgements it has
+%% missed out on, due to the death of its old upstream. Thus the
+%% downstream can catch up, and continues the propagation of messages
+%% through the group.
+%%
+%% Lemma 2: When a member is joining, it must synchronously
+%% communicate with its upstream member in order to receive its
+%% starting state atomically with its addition to the group.
+%%
+%% New members must start with the same state as their nearest
+%% upstream neighbour. This ensures that it is not surprised by
+%% acknowledgements they are sent, and that should their downstream
+%% neighbour die, they are able to send the correct state to their new
+%% downstream neighbour to ensure it can catch up. Thus in the
+%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' ->
+%% C, A' must start with the state of A, so that it can send C the
+%% correct state when B dies, allowing C to detect any missed
+%% messages.
+%%
+%% If A' starts by adding itself to the group membership, A could then
+%% die, without A' having received the necessary state from A. This
+%% would leave A' responsible for in-flight messages from A, but
+%% having the least knowledge of all, of those messages. Thus A' must
+%% start by synchronously calling A, which then immediately sends A'
+%% back its state. A then adds A' to the group. If A dies at this
+%% point then A' will be able to see this (as A' will fail to appear
+%% in the group membership), and thus A' will ignore the state it
+%% receives from A, and will simply repeat the process, trying to now
+%% join downstream from some other member. This ensures that should
+%% the upstream die as soon as the new member has been joined, the new
+%% member is guaranteed to receive the correct state, allowing it to
+%% correctly process messages inherited due to the death of its
+%% upstream neighbour.
+%%
+%% The canonical definition of the group membership is held by a
+%% distributed database. Whilst this allows the total ordering of
+%% changes to be achieved, it is nevertheless undesirable to have to
+%% query this database for the current view, upon receiving each
+%% message. Instead, we wish for members to be able to cache a view of
+%% the group membership, which then requires a cache invalidation
+%% mechanism. Each member maintains its own view of the group
+%% membership. Thus when the group's membership changes, members may
+%% need to become aware of such changes in order to be able to
+%% accurately process messages they receive. Because of the
+%% requirement of a total ordering of conflicting membership changes,
+%% it is not possible to use the guaranteed broadcast mechanism to
+%% communicate these changes: to achieve the necessary ordering, it
+%% would be necessary for such messages to be published by exactly one
+%% member, which can not be guaranteed given that such a member could
+%% die.
+%%
+%% The total ordering we enforce on membership changes gives rise to a
+%% view version number: every change to the membership creates a
+%% different view, and the total ordering permits a simple
+%% monotonically increasing view version number.
+%%
+%% Lemma 3: If a message is sent from a member that holds view version
+%% N, it can be correctly processed by any member receiving the
+%% message with a view version >= N.
+%%
+%% Initially, let us suppose that each view contains the ordering of
+%% every member that was ever part of the group. Dead members are
+%% marked as such. Thus we have a ring of members, some of which are
+%% dead, and are thus inherited by the nearest alive downstream
+%% member.
+%%
+%% In the chain A -> B -> C, all three members initially have view
+%% version 1, which reflects reality. B publishes a message, which is
+%% forward by C to A. B now dies, which A notices very quickly. Thus A
+%% updates the view, creating version 2. It now forwards B's
+%% publication, sending that message to its new downstream neighbour,
+%% C. This happens before C is aware of the death of B. C must become
+%% aware of the view change before it interprets the message its
+%% received, otherwise it will fail to learn of the death of B, and
+%% thus will not realise it has inherited B's messages (and will
+%% likely crash).
+%%
+%% Thus very simply, we have that each subsequent view contains more
+%% information than the preceding view.
+%%
+%% However, to avoid the views growing indefinitely, we need to be
+%% able to delete members which have died _and_ for which no messages
+%% are in-flight. This requires that upon inheriting a dead member, we
+%% know the last publication sent by the dead member (this is easy: we
+%% inherit a member because we are the nearest downstream member which
+%% implies that we know at least as much than everyone else about the
+%% publications of the dead member), and we know the earliest message
+%% for which the acknowledgement is still in flight.
+%%
+%% In the chain A -> B -> C, when B dies, A will send to C its state
+%% (as C is the new downstream from A), allowing C to calculate which
+%% messages it has missed out on (described above). At this point, C
+%% also inherits B's messages. If that state from A also includes the
+%% last message published by B for which an acknowledgement has been
+%% seen, then C knows exactly which further acknowledgements it must
+%% receive (also including issuing acknowledgements for publications
+%% still in-flight that it receives), after which it is known there
+%% are no more messages in flight for B, thus all evidence that B was
+%% ever part of the group can be safely removed from the canonical
+%% group membership.
+%%
+%% Thus, for every message that a member sends, it includes with that
+%% message its view version. When a member receives a message it will
+%% update its view from the canonical copy, should its view be older
+%% than the view version included in the message it has received.
+%%
+%% The state held by each member therefore includes the messages from
+%% each publisher pending acknowledgement, the last publication seen
+%% from that publisher, and the last acknowledgement from that
+%% publisher. In the case of the member's own publications or
+%% inherited members, this last acknowledgement seen state indicates
+%% the last acknowledgement retired, rather than sent.
+%%
+%%
+%% Proof sketch
+%% ------------
+%%
+%% We need to prove that with the provided operational semantics, we
+%% can never reach a state that is not well formed from a well-formed
+%% starting state.
+%%
+%% Operational semantics (small step): straight-forward message
+%% sending, process monitoring, state updates.
+%%
+%% Well formed state: dead members inherited by exactly one non-dead
+%% member; for every entry in anyone's pending-acks, either (the
+%% publication of the message is in-flight downstream from the member
+%% and upstream from the publisher) or (the acknowledgement of the
+%% message is in-flight downstream from the publisher and upstream
+%% from the member).
+%%
+%% Proof by induction on the applicable operational semantics.
+%%
+%%
+%% Related work
+%% ------------
+%%
+%% The ring configuration and double traversal of messages around the
+%% ring is similar (though developed independently) to the LCR
+%% protocol by [Levy 2008]. However, LCR differs in several
+%% ways. Firstly, by using vector clocks, it enforces a total order of
+%% message delivery, which is unnecessary for our purposes. More
+%% significantly, it is built on top of a "group communication system"
+%% which performs the group management functions, taking
+%% responsibility away from the protocol as to how to cope with safely
+%% adding and removing members. When membership changes do occur, the
+%% protocol stipulates that every member must perform communication
+%% with every other member of the group, to ensure all outstanding
+%% deliveries complete, before the entire group transitions to the new
+%% view. This, in total, requires two sets of all-to-all synchronous
+%% communications.
+%%
+%% This is not only rather inefficient, but also does not explain what
+%% happens upon the failure of a member during this process. It does
+%% though entirely avoid the need for inheritance of responsibility of
+%% dead members that our protocol incorporates.
+%%
+%% In [Marandi et al 2010], a Paxos-based protocol is described. This
+%% work explicitly focuses on the efficiency of communication. LCR
+%% (and our protocol too) are more efficient, but at the cost of
+%% higher latency. The Ring-Paxos protocol is itself built on top of
+%% IP-multicast, which rules it out for many applications where
+%% point-to-point communication is all that can be required. They also
+%% have an excellent related work section which I really ought to
+%% read...
+%%
+%%
+%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008.
+%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast
+%% Protocol
+
+
+-behaviour(gen_server2).
+
+-export([create_tables/0, start_link/4, leave/1, broadcast/2, broadcast/3,
+ confirmed_broadcast/2, info/1, validate_members/2, forget_group/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_info/3]).
+
+%% For INSTR_MOD callbacks
+-export([call/3, cast/2, monitor/1, demonitor/1]).
+
+-export([table_definitions/0]).
+
+-define(GROUP_TABLE, gm_group).
+-define(MAX_BUFFER_SIZE, 100000000). %% 100MB
+-define(BROADCAST_TIMER, 25).
+-define(FORCE_GC_TIMER, 250).
+-define(VERSION_START, 0).
+-define(SETS, ordsets).
+
+-record(state,
+ { self,
+ left,
+ right,
+ group_name,
+ module,
+ view,
+ pub_count,
+ members_state,
+ callback_args,
+ confirms,
+ broadcast_buffer,
+ broadcast_buffer_sz,
+ broadcast_timer,
+ force_gc_timer,
+ txn_executor,
+ shutting_down
+ }).
+
+-record(gm_group, { name, version, members }).
+
+-record(view_member, { id, aliases, left, right }).
+
+-record(member, { pending_ack, last_pub, last_ack }).
+
+-define(TABLE, {?GROUP_TABLE, [{record_name, gm_group},
+ {attributes, record_info(fields, gm_group)}]}).
+-define(TABLE_MATCH, {match, #gm_group { _ = '_' }}).
+
+-define(TAG, '$gm').
+
+-export_type([group_name/0]).
+
+-type group_name() :: any().
+-type txn_fun() :: fun((fun(() -> any())) -> any()).
+
+%% The joined, members_changed and handle_msg callbacks can all return
+%% any of the following terms:
+%%
+%% 'ok' - the callback function returns normally
+%%
+%% {'stop', Reason} - the callback indicates the member should stop
+%% with reason Reason and should leave the group.
+%%
+%% {'become', Module, Args} - the callback indicates that the callback
+%% module should be changed to Module and that the callback functions
+%% should now be passed the arguments Args. This allows the callback
+%% module to be dynamically changed.
+
+%% Called when we've successfully joined the group. Supplied with Args
+%% provided in start_link, plus current group members.
+-callback joined(Args :: term(), Members :: [pid()]) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Supplied with Args provided in start_link, the list of new members
+%% and the list of members previously known to us that have since
+%% died. Note that if a member joins and dies very quickly, it's
+%% possible that we will never see that member appear in either births
+%% or deaths. However we are guaranteed that (1) we will see a member
+%% joining either in the births here, or in the members passed to
+%% joined/2 before receiving any messages from it; and (2) we will not
+%% see members die that we have not seen born (or supplied in the
+%% members to joined/2).
+-callback members_changed(Args :: term(),
+ Births :: [pid()], Deaths :: [pid()]) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Supplied with Args provided in start_link, the sender, and the
+%% message. This does get called for messages injected by this member,
+%% however, in such cases, there is no special significance of this
+%% invocation: it does not indicate that the message has made it to
+%% any other members, let alone all other members.
+-callback handle_msg(Args :: term(), From :: pid(), Message :: term()) ->
+ ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
+
+%% Called on gm member termination as per rules in gen_server, with
+%% the Args provided in start_link plus the termination Reason.
+-callback handle_terminate(Args :: term(), Reason :: term()) ->
+ ok | term().
+
+-spec create_tables() -> 'ok' | {'aborted', any()}.
+
+create_tables() ->
+ create_tables([?TABLE]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Tables]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Tables);
+ {aborted, {already_exists, Table}} -> create_tables(Tables);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+-spec start_link(group_name(), atom(), any(), txn_fun()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(GroupName, Module, Args, TxnFun) ->
+ gen_server2:start_link(?MODULE, [GroupName, Module, Args, TxnFun],
+ [{spawn_opt, [{fullsweep_after, 0}]}]).
+
+-spec leave(pid()) -> 'ok'.
+
+leave(Server) ->
+ gen_server2:cast(Server, leave).
+
+-spec broadcast(pid(), any()) -> 'ok'.
+
+broadcast(Server, Msg) -> broadcast(Server, Msg, 0).
+
+broadcast(Server, Msg, SizeHint) ->
+ gen_server2:cast(Server, {broadcast, Msg, SizeHint}).
+
+-spec confirmed_broadcast(pid(), any()) -> 'ok'.
+
+confirmed_broadcast(Server, Msg) ->
+ gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity).
+
+-spec info(pid()) -> rabbit_types:infos().
+
+info(Server) ->
+ gen_server2:call(Server, info, infinity).
+
+-spec validate_members(pid(), [pid()]) -> 'ok'.
+
+validate_members(Server, Members) ->
+ gen_server2:cast(Server, {validate_members, Members}).
+
+-spec forget_group(group_name()) -> 'ok'.
+
+forget_group(GroupName) ->
+ {atomic, ok} = mnesia:sync_transaction(
+ fun () ->
+ mnesia:delete({?GROUP_TABLE, GroupName})
+ end),
+ ok.
+
+init([GroupName, Module, Args, TxnFun]) ->
+ put(process_name, {?MODULE, GroupName}),
+ Self = make_member(GroupName),
+ gen_server2:cast(self(), join),
+ {ok, #state { self = Self,
+ left = {Self, undefined},
+ right = {Self, undefined},
+ group_name = GroupName,
+ module = Module,
+ view = undefined,
+ pub_count = -1,
+ members_state = undefined,
+ callback_args = Args,
+ confirms = queue:new(),
+ broadcast_buffer = [],
+ broadcast_buffer_sz = 0,
+ broadcast_timer = undefined,
+ force_gc_timer = undefined,
+ txn_executor = TxnFun,
+ shutting_down = false }}.
+
+
+handle_call({confirmed_broadcast, _Msg}, _From,
+ State = #state { shutting_down = {true, _} }) ->
+ reply(shutting_down, State);
+
+handle_call({confirmed_broadcast, _Msg}, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_joined, State);
+
+handle_call({confirmed_broadcast, Msg}, _From,
+ State = #state { self = Self,
+ right = {Self, undefined},
+ module = Module,
+ callback_args = Args }) ->
+ handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
+ ok, State});
+
+handle_call({confirmed_broadcast, Msg}, From, State) ->
+ {Result, State1 = #state { pub_count = PubCount, confirms = Confirms }} =
+ internal_broadcast(Msg, 0, State),
+ Confirms1 = queue:in({PubCount, From}, Confirms),
+ handle_callback_result({Result, flush_broadcast_buffer(
+ State1 #state { confirms = Confirms1 })});
+
+handle_call(info, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_joined, State);
+
+handle_call(info, _From, State = #state { group_name = GroupName,
+ module = Module,
+ view = View }) ->
+ reply([{group_name, GroupName},
+ {module, Module},
+ {group_members, get_pids(alive_view_members(View))}], State);
+
+handle_call({add_on_right, _NewMember}, _From,
+ State = #state { members_state = undefined }) ->
+ reply(not_ready, State);
+
+handle_call({add_on_right, NewMember}, _From,
+ State = #state { self = Self,
+ group_name = GroupName,
+ members_state = MembersState,
+ txn_executor = TxnFun }) ->
+ try
+ Group = record_new_member_in_group(
+ NewMember, Self, GroupName, TxnFun),
+ View1 = group_to_view(check_membership(Self, Group)),
+ MembersState1 = remove_erased_members(MembersState, View1),
+ ok = send_right(NewMember, View1,
+ {catchup, Self, prepare_members_state(MembersState1)}),
+ {Result, State1} = change_view(View1, State #state {
+ members_state = MembersState1 }),
+ handle_callback_result({Result, {ok, Group}, State1})
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end.
+
+%% add_on_right causes a catchup to be sent immediately from the left,
+%% so we can never see this from the left neighbour. However, it's
+%% possible for the right neighbour to send us a check_neighbours
+%% immediately before that. We can't possibly handle it, but if we're
+%% in this state we know a catchup is coming imminently anyway. So
+%% just ignore it.
+handle_cast({?TAG, _ReqVer, check_neighbours},
+ State = #state { members_state = undefined }) ->
+ noreply(State);
+
+handle_cast({?TAG, ReqVer, Msg},
+ State = #state { view = View,
+ self = Self,
+ members_state = MembersState,
+ group_name = GroupName }) ->
+ try
+ {Result, State1} =
+ case needs_view_update(ReqVer, View) of
+ true ->
+ View1 = group_to_view(
+ check_membership(Self,
+ dirty_read_group(GroupName))),
+ MemberState1 = remove_erased_members(MembersState, View1),
+ change_view(View1, State #state {
+ members_state = MemberState1 });
+ false -> {ok, State}
+ end,
+ handle_callback_result(
+ if_callback_success(
+ Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1))
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end;
+
+handle_cast({broadcast, _Msg, _SizeHint},
+ State = #state { shutting_down = {true, _} }) ->
+ noreply(State);
+
+handle_cast({broadcast, _Msg, _SizeHint},
+ State = #state { members_state = undefined }) ->
+ noreply(State);
+
+handle_cast({broadcast, Msg, _SizeHint},
+ State = #state { self = Self,
+ right = {Self, undefined},
+ module = Module,
+ callback_args = Args }) ->
+ handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
+ State});
+
+handle_cast({broadcast, Msg, SizeHint}, State) ->
+ {Result, State1} = internal_broadcast(Msg, SizeHint, State),
+ handle_callback_result({Result, maybe_flush_broadcast_buffer(State1)});
+
+handle_cast(join, State = #state { self = Self,
+ group_name = GroupName,
+ members_state = undefined,
+ module = Module,
+ callback_args = Args,
+ txn_executor = TxnFun }) ->
+ try
+ View = join_group(Self, GroupName, TxnFun),
+ MembersState =
+ case alive_view_members(View) of
+ [Self] -> blank_member_state();
+ _ -> undefined
+ end,
+ State1 = check_neighbours(State #state { view = View,
+ members_state = MembersState }),
+ handle_callback_result(
+ {Module:joined(Args, get_pids(all_known_members(View))), State1})
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end;
+
+handle_cast({validate_members, OldMembers},
+ State = #state { view = View,
+ module = Module,
+ callback_args = Args }) ->
+ NewMembers = get_pids(all_known_members(View)),
+ Births = NewMembers -- OldMembers,
+ Deaths = OldMembers -- NewMembers,
+ case {Births, Deaths} of
+ {[], []} -> noreply(State);
+ _ -> Result = Module:members_changed(Args, Births, Deaths),
+ handle_callback_result({Result, State})
+ end;
+
+handle_cast(leave, State) ->
+ {stop, normal, State}.
+
+
+handle_info(force_gc, State) ->
+ garbage_collect(),
+ noreply(State #state { force_gc_timer = undefined });
+
+handle_info(flush, State) ->
+ noreply(
+ flush_broadcast_buffer(State #state { broadcast_timer = undefined }));
+
+handle_info(timeout, State) ->
+ noreply(flush_broadcast_buffer(State));
+
+handle_info({'DOWN', _MRef, process, _Pid, _Reason},
+ State = #state { shutting_down =
+ {true, {shutdown, ring_shutdown}} }) ->
+ noreply(State);
+handle_info({'DOWN', MRef, process, _Pid, Reason},
+ State = #state { self = Self,
+ left = Left,
+ right = Right,
+ group_name = GroupName,
+ confirms = Confirms,
+ txn_executor = TxnFun }) ->
+ try
+ check_membership(GroupName),
+ Member = case {Left, Right} of
+ {{Member1, MRef}, _} -> Member1;
+ {_, {Member1, MRef}} -> Member1;
+ _ -> undefined
+ end,
+ case {Member, Reason} of
+ {undefined, _} ->
+ noreply(State);
+ {_, {shutdown, ring_shutdown}} ->
+ noreply(State);
+ _ ->
+ %% In the event of a partial partition we could see another member
+ %% go down and then remove them from Mnesia. While they can
+ %% recover from this they'd have to restart the queue - not
+ %% ideal. So let's sleep here briefly just in case this was caused
+ %% by a partial partition; in which case by the time we record the
+ %% member death in Mnesia we will probably be in a full
+ %% partition and will not be assassinating another member.
+ timer:sleep(100),
+ View1 = group_to_view(record_dead_member_in_group(Self,
+ Member, GroupName, TxnFun, true)),
+ handle_callback_result(
+ case alive_view_members(View1) of
+ [Self] -> maybe_erase_aliases(
+ State #state {
+ members_state = blank_member_state(),
+ confirms = purge_confirms(Confirms) },
+ View1);
+ _ -> change_view(View1, State)
+ end)
+ end
+ catch
+ lost_membership ->
+ {stop, shutdown, State}
+ end;
+handle_info(_, State) ->
+ %% Discard any unexpected messages, such as late replies from neighbour_call/2
+ %% TODO: For #gm_group{} related info messages, it could be worthwhile to
+ %% change_view/2, as this might reflect an alteration in the gm group, meaning
+ %% we now need to update our state. see rabbitmq-server#914.
+ noreply(State).
+
+terminate(Reason, #state { module = Module, callback_args = Args }) ->
+ Module:handle_terminate(Args, Reason).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+prioritise_info(flush, _Len, _State) ->
+ 1;
+%% DOWN messages should not overtake initial catchups; if they do we
+%% will receive a DOWN we do not know what to do with.
+prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len,
+ #state { members_state = undefined }) ->
+ 0;
+%% We should not prioritise DOWN messages from our left since
+%% otherwise the DOWN can overtake any last activity from the left,
+%% causing that activity to be lost.
+prioritise_info({'DOWN', _MRef, process, LeftPid, _Reason}, _Len,
+ #state { left = {{_LeftVer, LeftPid}, _MRef2} }) ->
+ 0;
+%% But prioritise all other DOWNs - we want to make sure we are not
+%% sending activity into the void for too long because our right is
+%% down but we don't know it.
+prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len, _State) ->
+ 1;
+prioritise_info(_, _Len, _State) ->
+ 0.
+
+
+handle_msg(check_neighbours, State) ->
+ %% no-op - it's already been done by the calling handle_cast
+ {ok, State};
+
+handle_msg({catchup, Left, MembersStateLeft},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ right = {Right, _MRefR},
+ view = View,
+ members_state = undefined }) ->
+ ok = send_right(Right, View, {catchup, Self, MembersStateLeft}),
+ MembersStateLeft1 = build_members_state(MembersStateLeft),
+ {ok, State #state { members_state = MembersStateLeft1 }};
+
+handle_msg({catchup, Left, MembersStateLeft},
+ State = #state { self = Self,
+ left = {Left, _MRefL},
+ view = View,
+ members_state = MembersState })
+ when MembersState =/= undefined ->
+ MembersStateLeft1 = build_members_state(MembersStateLeft),
+ AllMembers = lists:usort(maps:keys(MembersState) ++
+ maps:keys(MembersStateLeft1)),
+ {MembersState1, Activity} =
+ lists:foldl(
+ fun (Id, MembersStateActivity) ->
+ #member { pending_ack = PALeft, last_ack = LA } =
+ find_member_or_blank(Id, MembersStateLeft1),
+ with_member_acc(
+ fun (#member { pending_ack = PA } = Member, Activity1) ->
+ case is_member_alias(Id, Self, View) of
+ true ->
+ {_AcksInFlight, Pubs, _PA1} =
+ find_prefix_common_suffix(PALeft, PA),
+ {Member #member { last_ack = LA },
+ activity_cons(Id, pubs_from_queue(Pubs),
+ [], Activity1)};
+ false ->
+ {Acks, _Common, Pubs} =
+ find_prefix_common_suffix(PA, PALeft),
+ {Member,
+ activity_cons(Id, pubs_from_queue(Pubs),
+ acks_from_queue(Acks),
+ Activity1)}
+ end
+ end, Id, MembersStateActivity)
+ end, {MembersState, activity_nil()}, AllMembers),
+ handle_msg({activity, Left, activity_finalise(Activity)},
+ State #state { members_state = MembersState1 });
+
+handle_msg({catchup, _NotLeft, _MembersState}, State) ->
+ {ok, State};
+
+handle_msg({activity, Left, Activity},
+ State = #state { self = Self,
+ group_name = GroupName,
+ left = {Left, _MRefL},
+ view = View,
+ members_state = MembersState,
+ confirms = Confirms })
+ when MembersState =/= undefined ->
+ try
+ %% If we have to stop, do it asap so we avoid any ack confirmation
+ %% Membership must be checked again by erase_members_in_group, as the
+ %% node can be marked as dead on the meanwhile
+ check_membership(GroupName),
+ {MembersState1, {Confirms1, Activity1}} =
+ calculate_activity(MembersState, Confirms, Activity, Self, View),
+ State1 = State #state { members_state = MembersState1,
+ confirms = Confirms1 },
+ Activity3 = activity_finalise(Activity1),
+ ok = maybe_send_activity(Activity3, State1),
+ {Result, State2} = maybe_erase_aliases(State1, View),
+ if_callback_success(
+ Result, fun activity_true/3, fun activity_false/3, Activity3, State2)
+ catch
+ lost_membership ->
+ {{stop, shutdown}, State}
+ end;
+
+handle_msg({activity, _NotLeft, _Activity}, State) ->
+ {ok, State}.
+
+
+noreply(State) ->
+ {noreply, ensure_timers(State), flush_timeout(State)}.
+
+reply(Reply, State) ->
+ {reply, Reply, ensure_timers(State), flush_timeout(State)}.
+
+ensure_timers(State) ->
+ ensure_force_gc_timer(ensure_broadcast_timer(State)).
+
+flush_timeout(#state{broadcast_buffer = []}) -> infinity;
+flush_timeout(_) -> 0.
+
+ensure_force_gc_timer(State = #state { force_gc_timer = TRef })
+ when is_reference(TRef) ->
+ State;
+ensure_force_gc_timer(State = #state { force_gc_timer = undefined }) ->
+ TRef = erlang:send_after(?FORCE_GC_TIMER, self(), force_gc),
+ State #state { force_gc_timer = TRef }.
+
+ensure_broadcast_timer(State = #state { broadcast_buffer = [],
+ broadcast_timer = undefined }) ->
+ State;
+ensure_broadcast_timer(State = #state { broadcast_buffer = [],
+ broadcast_timer = TRef }) ->
+ _ = erlang:cancel_timer(TRef),
+ State #state { broadcast_timer = undefined };
+ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
+ TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
+ State #state { broadcast_timer = TRef };
+ensure_broadcast_timer(State) ->
+ State.
+
+internal_broadcast(Msg, SizeHint,
+ State = #state { self = Self,
+ pub_count = PubCount,
+ module = Module,
+ callback_args = Args,
+ broadcast_buffer = Buffer,
+ broadcast_buffer_sz = BufferSize }) ->
+ PubCount1 = PubCount + 1,
+ {Module:handle_msg(Args, get_pid(Self), Msg),
+ State #state { pub_count = PubCount1,
+ broadcast_buffer = [{PubCount1, Msg} | Buffer],
+ broadcast_buffer_sz = BufferSize + SizeHint}}.
+
+%% The Erlang distribution mechanism has an interesting quirk - it
+%% will kill the VM cold with "Absurdly large distribution output data
+%% buffer" if you attempt to send a message which serialises out to
+%% more than 2^31 bytes in size. It's therefore a very good idea to
+%% make sure that we don't exceed that size!
+%%
+%% Now, we could figure out the size of messages as they come in using
+%% size(term_to_binary(Msg)) or similar. The trouble is, that requires
+%% us to serialise the message only to throw the serialised form
+%% away. Hard to believe that's a sensible thing to do. So instead we
+%% accept a size hint from the application, via broadcast/3. This size
+%% hint can be the size of anything in the message which we expect
+%% could be large, and we just ignore the size of any small bits of
+%% the message term. Therefore MAX_BUFFER_SIZE is set somewhat
+%% conservatively at 100MB - but the buffer is only to allow us to
+%% buffer tiny messages anyway, so 100MB is plenty.
+
+maybe_flush_broadcast_buffer(State = #state{broadcast_buffer_sz = Size}) ->
+ case Size > ?MAX_BUFFER_SIZE of
+ true -> flush_broadcast_buffer(State);
+ false -> State
+ end.
+
+flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) ->
+ State;
+flush_broadcast_buffer(State = #state { self = Self,
+ members_state = MembersState,
+ broadcast_buffer = Buffer,
+ pub_count = PubCount }) ->
+ [{PubCount, _Msg}|_] = Buffer, %% ASSERTION match on PubCount
+ Pubs = lists:reverse(Buffer),
+ Activity = activity_cons(Self, Pubs, [], activity_nil()),
+ ok = maybe_send_activity(activity_finalise(Activity), State),
+ MembersState1 = with_member(
+ fun (Member = #member { pending_ack = PA }) ->
+ PA1 = queue:join(PA, queue:from_list(Pubs)),
+ Member #member { pending_ack = PA1,
+ last_pub = PubCount }
+ end, Self, MembersState),
+ State #state { members_state = MembersState1,
+ broadcast_buffer = [],
+ broadcast_buffer_sz = 0 }.
+
+%% ---------------------------------------------------------------------------
+%% View construction and inspection
+%% ---------------------------------------------------------------------------
+
+needs_view_update(ReqVer, {Ver, _View}) -> Ver < ReqVer.
+
+view_version({Ver, _View}) -> Ver.
+
+is_member_alive({dead, _Member}) -> false;
+is_member_alive(_) -> true.
+
+is_member_alias(Self, Self, _View) ->
+ true;
+is_member_alias(Member, Self, View) ->
+ ?SETS:is_element(Member,
+ ((fetch_view_member(Self, View)) #view_member.aliases)).
+
+dead_member_id({dead, Member}) -> Member.
+
+store_view_member(VMember = #view_member { id = Id }, {Ver, View}) ->
+ {Ver, maps:put(Id, VMember, View)}.
+
+with_view_member(Fun, View, Id) ->
+ store_view_member(Fun(fetch_view_member(Id, View)), View).
+
+fetch_view_member(Id, {_Ver, View}) -> maps:get(Id, View).
+
+find_view_member(Id, {_Ver, View}) -> maps:find(Id, View).
+
+blank_view(Ver) -> {Ver, maps:new()}.
+
+alive_view_members({_Ver, View}) -> maps:keys(View).
+
+all_known_members({_Ver, View}) ->
+ maps:fold(
+ fun (Member, #view_member { aliases = Aliases }, Acc) ->
+ ?SETS:to_list(Aliases) ++ [Member | Acc]
+ end, [], View).
+
+group_to_view(#gm_group { members = Members, version = Ver }) ->
+ Alive = lists:filter(fun is_member_alive/1, Members),
+ [_|_] = Alive, %% ASSERTION - can't have all dead members
+ add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members).
+
+link_view([Left, Middle, Right | Rest], View) ->
+ case find_view_member(Middle, View) of
+ error ->
+ link_view(
+ [Middle, Right | Rest],
+ store_view_member(#view_member { id = Middle,
+ aliases = ?SETS:new(),
+ left = Left,
+ right = Right }, View));
+ {ok, _} ->
+ View
+ end;
+link_view(_, View) ->
+ View.
+
+add_aliases(View, Members) ->
+ Members1 = ensure_alive_suffix(Members),
+ {EmptyDeadSet, View1} =
+ lists:foldl(
+ fun (Member, {DeadAcc, ViewAcc}) ->
+ case is_member_alive(Member) of
+ true ->
+ {?SETS:new(),
+ with_view_member(
+ fun (VMember =
+ #view_member { aliases = Aliases }) ->
+ VMember #view_member {
+ aliases = ?SETS:union(Aliases, DeadAcc) }
+ end, ViewAcc, Member)};
+ false ->
+ {?SETS:add_element(dead_member_id(Member), DeadAcc),
+ ViewAcc}
+ end
+ end, {?SETS:new(), View}, Members1),
+ 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION
+ View1.
+
+ensure_alive_suffix(Members) ->
+ queue:to_list(ensure_alive_suffix1(queue:from_list(Members))).
+
+ensure_alive_suffix1(MembersQ) ->
+ {{value, Member}, MembersQ1} = queue:out_r(MembersQ),
+ case is_member_alive(Member) of
+ true -> MembersQ;
+ false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1))
+ end.
+
+
+%% ---------------------------------------------------------------------------
+%% View modification
+%% ---------------------------------------------------------------------------
+
+join_group(Self, GroupName, TxnFun) ->
+ join_group(Self, GroupName, dirty_read_group(GroupName), TxnFun).
+
+join_group(Self, GroupName, {error, not_found}, TxnFun) ->
+ join_group(Self, GroupName,
+ prune_or_create_group(Self, GroupName, TxnFun), TxnFun);
+join_group(Self, _GroupName, #gm_group { members = [Self] } = Group, _TxnFun) ->
+ group_to_view(Group);
+join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) ->
+ case lists:member(Self, Members) of
+ true ->
+ group_to_view(Group);
+ false ->
+ case lists:filter(fun is_member_alive/1, Members) of
+ [] ->
+ join_group(Self, GroupName,
+ prune_or_create_group(Self, GroupName, TxnFun),
+ TxnFun);
+ Alive ->
+ Left = lists:nth(rand:uniform(length(Alive)), Alive),
+ Handler =
+ fun () ->
+ join_group(
+ Self, GroupName,
+ record_dead_member_in_group(Self,
+ Left, GroupName, TxnFun, false),
+ TxnFun)
+ end,
+ try
+ case neighbour_call(Left, {add_on_right, Self}) of
+ {ok, Group1} -> group_to_view(Group1);
+ not_ready -> join_group(Self, GroupName, TxnFun)
+ end
+ catch
+ exit:{R, _}
+ when R =:= noproc; R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _}
+ when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end
+ end
+ end.
+
+dirty_read_group(GroupName) ->
+ case mnesia:dirty_read(?GROUP_TABLE, GroupName) of
+ [] -> {error, not_found};
+ [Group] -> Group
+ end.
+
+read_group(GroupName) ->
+ case mnesia:read({?GROUP_TABLE, GroupName}) of
+ [] -> {error, not_found};
+ [Group] -> Group
+ end.
+
+write_group(Group) -> mnesia:write(?GROUP_TABLE, Group, write), Group.
+
+prune_or_create_group(Self, GroupName, TxnFun) ->
+ TxnFun(
+ fun () ->
+ GroupNew = #gm_group { name = GroupName,
+ members = [Self],
+ version = get_version(Self) },
+ case read_group(GroupName) of
+ {error, not_found} ->
+ write_group(GroupNew);
+ Group = #gm_group { members = Members } ->
+ case lists:any(fun is_member_alive/1, Members) of
+ true -> Group;
+ false -> write_group(GroupNew)
+ end
+ end
+ end).
+
+record_dead_member_in_group(Self, Member, GroupName, TxnFun, Verify) ->
+ Fun =
+ fun () ->
+ try
+ Group = #gm_group { members = Members, version = Ver } =
+ case Verify of
+ true ->
+ check_membership(Self, read_group(GroupName));
+ false ->
+ check_group(read_group(GroupName))
+ end,
+ case lists:splitwith(
+ fun (Member1) -> Member1 =/= Member end, Members) of
+ {_Members1, []} -> %% not found - already recorded dead
+ Group;
+ {Members1, [Member | Members2]} ->
+ Members3 = Members1 ++ [{dead, Member} | Members2],
+ write_group(Group #gm_group { members = Members3,
+ version = Ver + 1 })
+ end
+ catch
+ lost_membership ->
+ %% The transaction must not be abruptly crashed, but
+ %% leave the gen_server to stop normally
+ {error, lost_membership}
+ end
+ end,
+ handle_lost_membership_in_txn(TxnFun, Fun).
+
+handle_lost_membership_in_txn(TxnFun, Fun) ->
+ case TxnFun(Fun) of
+ {error, lost_membership = T} ->
+ throw(T);
+ Any ->
+ Any
+ end.
+
+record_new_member_in_group(NewMember, Left, GroupName, TxnFun) ->
+ Fun =
+ fun () ->
+ try
+ Group = #gm_group { members = Members, version = Ver } =
+ check_membership(Left, read_group(GroupName)),
+ case lists:member(NewMember, Members) of
+ true ->
+ %% This avois duplicates during partial partitions,
+ %% as inconsistent views might happen during them
+ rabbit_log:warning("(~p) GM avoiding duplicate of ~p",
+ [self(), NewMember]),
+ Group;
+ false ->
+ {Prefix, [Left | Suffix]} =
+ lists:splitwith(fun (M) -> M =/= Left end, Members),
+ write_group(Group #gm_group {
+ members = Prefix ++ [Left, NewMember | Suffix],
+ version = Ver + 1 })
+ end
+ catch
+ lost_membership ->
+ %% The transaction must not be abruptly crashed, but
+ %% leave the gen_server to stop normally
+ {error, lost_membership}
+ end
+ end,
+ handle_lost_membership_in_txn(TxnFun, Fun).
+
+erase_members_in_group(Self, Members, GroupName, TxnFun) ->
+ DeadMembers = [{dead, Id} || Id <- Members],
+ Fun =
+ fun () ->
+ try
+ Group = #gm_group { members = [_|_] = Members1, version = Ver } =
+ check_membership(Self, read_group(GroupName)),
+ case Members1 -- DeadMembers of
+ Members1 -> Group;
+ Members2 -> write_group(
+ Group #gm_group { members = Members2,
+ version = Ver + 1 })
+ end
+ catch
+ lost_membership ->
+ %% The transaction must not be abruptly crashed, but
+ %% leave the gen_server to stop normally
+ {error, lost_membership}
+ end
+ end,
+ handle_lost_membership_in_txn(TxnFun, Fun).
+
+maybe_erase_aliases(State = #state { self = Self,
+ group_name = GroupName,
+ members_state = MembersState,
+ txn_executor = TxnFun }, View) ->
+ #view_member { aliases = Aliases } = fetch_view_member(Self, View),
+ {Erasable, MembersState1}
+ = ?SETS:fold(
+ fun (Id, {ErasableAcc, MembersStateAcc} = Acc) ->
+ #member { last_pub = LP, last_ack = LA } =
+ find_member_or_blank(Id, MembersState),
+ case can_erase_view_member(Self, Id, LA, LP) of
+ true -> {[Id | ErasableAcc],
+ erase_member(Id, MembersStateAcc)};
+ false -> Acc
+ end
+ end, {[], MembersState}, Aliases),
+ View1 = case Erasable of
+ [] -> View;
+ _ -> group_to_view(
+ erase_members_in_group(Self, Erasable, GroupName, TxnFun))
+ end,
+ change_view(View1, State #state { members_state = MembersState1 }).
+
+can_erase_view_member(Self, Self, _LA, _LP) -> false;
+can_erase_view_member(_Self, _Id, N, N) -> true;
+can_erase_view_member(_Self, _Id, _LA, _LP) -> false.
+
+neighbour_cast(N, Msg) -> ?INSTR_MOD:cast(get_pid(N), Msg).
+neighbour_call(N, Msg) -> ?INSTR_MOD:call(get_pid(N), Msg, infinity).
+
+%% ---------------------------------------------------------------------------
+%% View monitoring and maintenance
+%% ---------------------------------------------------------------------------
+
+ensure_neighbour(_Ver, Self, {Self, undefined}, Self) ->
+ {Self, undefined};
+ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) ->
+ ok = neighbour_cast(RealNeighbour, {?TAG, Ver, check_neighbours}),
+ {RealNeighbour, maybe_monitor(RealNeighbour, Self)};
+ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) ->
+ {RealNeighbour, MRef};
+ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
+ true = ?INSTR_MOD:demonitor(MRef),
+ Msg = {?TAG, Ver, check_neighbours},
+ ok = neighbour_cast(RealNeighbour, Msg),
+ ok = case Neighbour of
+ Self -> ok;
+ _ -> neighbour_cast(Neighbour, Msg)
+ end,
+ {Neighbour, maybe_monitor(Neighbour, Self)}.
+
+maybe_monitor( Self, Self) -> undefined;
+maybe_monitor(Other, _Self) -> ?INSTR_MOD:monitor(get_pid(Other)).
+
+check_neighbours(State = #state { self = Self,
+ left = Left,
+ right = Right,
+ view = View,
+ broadcast_buffer = Buffer }) ->
+ #view_member { left = VLeft, right = VRight }
+ = fetch_view_member(Self, View),
+ Ver = view_version(View),
+ Left1 = ensure_neighbour(Ver, Self, Left, VLeft),
+ Right1 = ensure_neighbour(Ver, Self, Right, VRight),
+ Buffer1 = case Right1 of
+ {Self, undefined} -> [];
+ _ -> Buffer
+ end,
+ State1 = State #state { left = Left1, right = Right1,
+ broadcast_buffer = Buffer1 },
+ ok = maybe_send_catchup(Right, State1),
+ State1.
+
+maybe_send_catchup(Right, #state { right = Right }) ->
+ ok;
+maybe_send_catchup(_Right, #state { self = Self,
+ right = {Self, undefined} }) ->
+ ok;
+maybe_send_catchup(_Right, #state { members_state = undefined }) ->
+ ok;
+maybe_send_catchup(_Right, #state { self = Self,
+ right = {Right, _MRef},
+ view = View,
+ members_state = MembersState }) ->
+ send_right(Right, View,
+ {catchup, Self, prepare_members_state(MembersState)}).
+
+
+%% ---------------------------------------------------------------------------
+%% Catch_up delta detection
+%% ---------------------------------------------------------------------------
+
+find_prefix_common_suffix(A, B) ->
+ {Prefix, A1} = find_prefix(A, B, queue:new()),
+ {Common, Suffix} = find_common(A1, B, queue:new()),
+ {Prefix, Common, Suffix}.
+
+%% Returns the elements of A that occur before the first element of B,
+%% plus the remainder of A.
+find_prefix(A, B, Prefix) ->
+ case {queue:out(A), queue:out(B)} of
+ {{{value, Val}, _A1}, {{value, Val}, _B1}} ->
+ {Prefix, A};
+ {{empty, A1}, {{value, _A}, _B1}} ->
+ {Prefix, A1};
+ {{{value, {NumA, _MsgA} = Val}, A1},
+ {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB ->
+ find_prefix(A1, B, queue:in(Val, Prefix));
+ {_, {empty, _B1}} ->
+ {A, Prefix} %% Prefix well be empty here
+ end.
+
+%% A should be a prefix of B. Returns the commonality plus the
+%% remainder of B.
+find_common(A, B, Common) ->
+ case {queue:out(A), queue:out(B)} of
+ {{{value, Val}, A1}, {{value, Val}, B1}} ->
+ find_common(A1, B1, queue:in(Val, Common));
+ {{empty, _A}, _} ->
+ {Common, B};
+ %% Drop value from B.
+ %% Match value to avoid infinite loop, since {empty, B} = queue:out(B).
+ {_, {{value, _}, B1}} ->
+ find_common(A, B1, Common);
+ %% Drop value from A. Empty A should be matched by second close.
+ {{{value, _}, A1}, _} ->
+ find_common(A1, B, Common)
+ end.
+
+
+%% ---------------------------------------------------------------------------
+%% Members helpers
+%% ---------------------------------------------------------------------------
+
+with_member(Fun, Id, MembersState) ->
+ store_member(
+ Id, Fun(find_member_or_blank(Id, MembersState)), MembersState).
+
+with_member_acc(Fun, Id, {MembersState, Acc}) ->
+ {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc),
+ {store_member(Id, MemberState, MembersState), Acc1}.
+
+find_member_or_blank(Id, MembersState) ->
+ case maps:find(Id, MembersState) of
+ {ok, Result} -> Result;
+ error -> blank_member()
+ end.
+
+erase_member(Id, MembersState) -> maps:remove(Id, MembersState).
+
+blank_member() ->
+ #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }.
+
+blank_member_state() -> maps:new().
+
+store_member(Id, MemberState, MembersState) ->
+ maps:put(Id, MemberState, MembersState).
+
+prepare_members_state(MembersState) -> maps:to_list(MembersState).
+
+build_members_state(MembersStateList) -> maps:from_list(MembersStateList).
+
+make_member(GroupName) ->
+ {case dirty_read_group(GroupName) of
+ #gm_group { version = Version } -> Version;
+ {error, not_found} -> ?VERSION_START
+ end, self()}.
+
+remove_erased_members(MembersState, View) ->
+ lists:foldl(fun (Id, MembersState1) ->
+ store_member(Id, find_member_or_blank(Id, MembersState),
+ MembersState1)
+ end, blank_member_state(), all_known_members(View)).
+
+get_version({Version, _Pid}) -> Version.
+
+get_pid({_Version, Pid}) -> Pid.
+
+get_pids(Ids) -> [Pid || {_Version, Pid} <- Ids].
+
+%% ---------------------------------------------------------------------------
+%% Activity assembly
+%% ---------------------------------------------------------------------------
+
+activity_nil() -> queue:new().
+
+activity_cons( _Id, [], [], Tail) -> Tail;
+activity_cons(Sender, Pubs, Acks, Tail) -> queue:in({Sender, Pubs, Acks}, Tail).
+
+activity_finalise(Activity) -> queue:to_list(Activity).
+
+maybe_send_activity([], _State) ->
+ ok;
+maybe_send_activity(Activity, #state { self = Self,
+ right = {Right, _MRefR},
+ view = View }) ->
+ send_right(Right, View, {activity, Self, Activity}).
+
+send_right(Right, View, Msg) ->
+ ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}).
+
+calculate_activity(MembersState, Confirms, Activity, Self, View) ->
+ lists:foldl(
+ fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
+ with_member_acc(
+ fun (Member = #member { pending_ack = PA,
+ last_pub = LP,
+ last_ack = LA },
+ {Confirms2, Activity2}) ->
+ case is_member_alias(Id, Self, View) of
+ true ->
+ {ToAck, PA1} =
+ find_common(queue_from_pubs(Pubs), PA,
+ queue:new()),
+ LA1 = last_ack(Acks, LA),
+ AckNums = acks_from_queue(ToAck),
+ Confirms3 = maybe_confirm(
+ Self, Id, Confirms2, AckNums),
+ {Member #member { pending_ack = PA1,
+ last_ack = LA1 },
+ {Confirms3,
+ activity_cons(
+ Id, [], AckNums, Activity2)}};
+ false ->
+ PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
+ LA1 = last_ack(Acks, LA),
+ LP1 = last_pub(Pubs, LP),
+ {Member #member { pending_ack = PA1,
+ last_pub = LP1,
+ last_ack = LA1 },
+ {Confirms2,
+ activity_cons(Id, Pubs, Acks, Activity2)}}
+ end
+ end, Id, MembersStateConfirmsActivity)
+ end, {MembersState, {Confirms, activity_nil()}}, Activity).
+
+callback(Args, Module, Activity) ->
+ Result =
+ lists:foldl(
+ fun ({Id, Pubs, _Acks}, {Args1, Module1, ok}) ->
+ lists:foldl(fun ({_PubNum, Pub}, Acc = {Args2, Module2, ok}) ->
+ case Module2:handle_msg(
+ Args2, get_pid(Id), Pub) of
+ ok ->
+ Acc;
+ {become, Module3, Args3} ->
+ {Args3, Module3, ok};
+ {stop, _Reason} = Error ->
+ Error
+ end;
+ (_, Error = {stop, _Reason}) ->
+ Error
+ end, {Args1, Module1, ok}, Pubs);
+ (_, Error = {stop, _Reason}) ->
+ Error
+ end, {Args, Module, ok}, Activity),
+ case Result of
+ {Args, Module, ok} -> ok;
+ {Args1, Module1, ok} -> {become, Module1, Args1};
+ {stop, _Reason} = Error -> Error
+ end.
+
+change_view(View, State = #state { view = View0,
+ module = Module,
+ callback_args = Args }) ->
+ OldMembers = all_known_members(View0),
+ NewMembers = all_known_members(View),
+ Births = NewMembers -- OldMembers,
+ Deaths = OldMembers -- NewMembers,
+ Result = case {Births, Deaths} of
+ {[], []} -> ok;
+ _ -> Module:members_changed(
+ Args, get_pids(Births), get_pids(Deaths))
+ end,
+ {Result, check_neighbours(State #state { view = View })}.
+
+handle_callback_result({Result, State}) ->
+ if_callback_success(
+ Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State);
+handle_callback_result({Result, Reply, State}) ->
+ if_callback_success(
+ Result, fun reply_true/3, fun reply_false/3, Reply, State).
+
+no_reply_true (_Result, _Undefined, State) -> noreply(State).
+no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}.
+
+reply_true (_Result, Reply, State) -> reply(Reply, State).
+reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}.
+
+handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State).
+handle_msg_false(Result, _Msg, State) -> {Result, State}.
+
+activity_true(_Result, Activity, State = #state { module = Module,
+ callback_args = Args }) ->
+ {callback(Args, Module, Activity), State}.
+activity_false(Result, _Activity, State) ->
+ {Result, State}.
+
+if_callback_success(Result, True, False, Arg, State) ->
+ {NewResult, NewState} = maybe_stop(Result, State),
+ if_callback_success1(NewResult, True, False, Arg, NewState).
+
+if_callback_success1(ok, True, _False, Arg, State) ->
+ True(ok, Arg, State);
+if_callback_success1(
+ {become, Module, Args} = Result, True, _False, Arg, State) ->
+ True(Result, Arg, State #state { module = Module,
+ callback_args = Args });
+if_callback_success1({stop, _Reason} = Result, _True, False, Arg, State) ->
+ False(Result, Arg, State).
+
+maybe_stop({stop, Reason}, #state{ shutting_down = false } = State) ->
+ ShuttingDown = {true, Reason},
+ case has_pending_messages(State) of
+ true -> {ok, State #state{ shutting_down = ShuttingDown }};
+ false -> {{stop, Reason}, State #state{ shutting_down = ShuttingDown }}
+ end;
+maybe_stop(Result, #state{ shutting_down = false } = State) ->
+ {Result, State};
+maybe_stop(Result, #state{ shutting_down = {true, Reason} } = State) ->
+ case has_pending_messages(State) of
+ true -> {Result, State};
+ false -> {{stop, Reason}, State}
+ end.
+
+has_pending_messages(#state{ broadcast_buffer = Buffer })
+ when Buffer =/= [] ->
+ true;
+has_pending_messages(#state{ members_state = MembersState }) ->
+ MembersWithPubAckMismatches = maps:filter(fun(_Id, #member{last_pub = LP, last_ack = LA}) ->
+ LP =/= LA
+ end, MembersState),
+ 0 =/= maps:size(MembersWithPubAckMismatches).
+
+maybe_confirm(_Self, _Id, Confirms, []) ->
+ Confirms;
+maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) ->
+ case queue:out(Confirms) of
+ {empty, _Confirms} ->
+ Confirms;
+ {{value, {PubNum, From}}, Confirms1} ->
+ gen_server2:reply(From, ok),
+ maybe_confirm(Self, Self, Confirms1, PubNums);
+ {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum ->
+ maybe_confirm(Self, Self, Confirms, PubNums)
+ end;
+maybe_confirm(_Self, _Id, Confirms, _PubNums) ->
+ Confirms.
+
+purge_confirms(Confirms) ->
+ _ = [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
+ queue:new().
+
+
+%% ---------------------------------------------------------------------------
+%% Msg transformation
+%% ---------------------------------------------------------------------------
+
+acks_from_queue(Q) -> [PubNum || {PubNum, _Msg} <- queue:to_list(Q)].
+
+pubs_from_queue(Q) -> queue:to_list(Q).
+
+queue_from_pubs(Pubs) -> queue:from_list(Pubs).
+
+apply_acks( [], Pubs) -> Pubs;
+apply_acks(List, Pubs) -> {_, Pubs1} = queue:split(length(List), Pubs),
+ Pubs1.
+
+join_pubs(Q, []) -> Q;
+join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)).
+
+last_ack( [], LA) -> LA;
+last_ack(List, LA) -> LA1 = lists:last(List),
+ true = LA1 > LA, %% ASSERTION
+ LA1.
+
+last_pub( [], LP) -> LP;
+last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List),
+ true = PubNum > LP, %% ASSERTION
+ PubNum.
+
+%% ---------------------------------------------------------------------------
+
+%% Uninstrumented versions
+
+call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout).
+cast(Pid, Msg) -> gen_server2:cast(Pid, Msg).
+monitor(Pid) -> erlang:monitor(process, Pid).
+demonitor(MRef) -> erlang:demonitor(MRef).
+
+check_membership(Self, #gm_group{members = M} = Group) ->
+ case lists:member(Self, M) of
+ true ->
+ Group;
+ false ->
+ throw(lost_membership)
+ end;
+check_membership(_Self, {error, not_found}) ->
+ throw(lost_membership).
+
+check_membership(GroupName) ->
+ case dirty_read_group(GroupName) of
+ #gm_group{members = M} ->
+ case lists:keymember(self(), 2, M) of
+ true ->
+ ok;
+ false ->
+ throw(lost_membership)
+ end;
+ {error, not_found} ->
+ throw(lost_membership)
+ end.
+
+check_group({error, not_found}) ->
+ throw(lost_membership);
+check_group(Any) ->
+ Any.
diff --git a/deps/rabbit/src/internal_user.erl b/deps/rabbit/src/internal_user.erl
new file mode 100644
index 0000000000..b2bdcb6785
--- /dev/null
+++ b/deps/rabbit/src/internal_user.erl
@@ -0,0 +1,216 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(internal_user).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ new/0,
+ new/1,
+ record_version_to_use/0,
+ fields/0,
+ fields/1,
+ upgrade/1,
+ upgrade_to/2,
+ pattern_match_all/0,
+ get_username/1,
+ get_password_hash/1,
+ get_tags/1,
+ get_hashing_algorithm/1,
+ get_limits/1,
+ create_user/3,
+ set_password_hash/3,
+ set_tags/2,
+ update_limits/3,
+ clear_limits/1
+]).
+
+-define(record_version, internal_user_v2).
+
+-type(username() :: binary()).
+
+-type(password_hash() :: binary()).
+
+-type internal_user() :: internal_user_v1:internal_user_v1() | internal_user_v2().
+
+-record(internal_user, {
+ username :: username() | '_',
+ password_hash :: password_hash() | '_',
+ tags :: [atom()] | '_',
+ %% password hashing implementation module,
+ %% typically rabbit_password_hashing_* but can
+ %% come from a plugin
+ hashing_algorithm :: atom() | '_',
+ limits = #{} :: map() | '_'}).
+
+-type(internal_user_v2() ::
+ #internal_user{username :: username() | '_',
+ password_hash :: password_hash() | '_',
+ tags :: [atom()] | '_',
+ hashing_algorithm :: atom() | '_',
+ limits :: map()}).
+
+-type internal_user_pattern() :: internal_user_v1:internal_user_v1_pattern() |
+ internal_user_v2_pattern().
+
+-type internal_user_v2_pattern() :: #internal_user{
+ username :: username() | '_',
+ password_hash :: '_',
+ tags :: '_',
+ hashing_algorithm :: '_',
+ limits :: '_'
+ }.
+
+-export_type([username/0,
+ password_hash/0,
+ internal_user/0,
+ internal_user_v2/0,
+ internal_user_pattern/0,
+ internal_user_v2_pattern/0]).
+
+-spec new() -> internal_user().
+new() ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = []
+ };
+ _ ->
+ internal_user_v1:new()
+ end.
+
+-spec new(tuple()) -> internal_user().
+new({hashing_algorithm, HashingAlgorithm}) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = [],
+ hashing_algorithm = HashingAlgorithm
+ };
+ _ ->
+ internal_user_v1:new({hashing_algorithm, HashingAlgorithm})
+ end;
+new({tags, Tags}) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = Tags
+ };
+ _ ->
+ internal_user_v1:new({tags, Tags})
+ end.
+
+-spec record_version_to_use() -> internal_user_v1 | internal_user_v2.
+record_version_to_use() ->
+ case rabbit_feature_flags:is_enabled(user_limits) of
+ true -> ?record_version;
+ false -> internal_user_v1:record_version_to_use()
+ end.
+
+-spec fields() -> list().
+fields() ->
+ case record_version_to_use() of
+ ?record_version -> fields(?record_version);
+ _ -> internal_user_v1:fields()
+ end.
+
+-spec fields(atom()) -> list().
+fields(?record_version) -> record_info(fields, internal_user);
+fields(Version) -> internal_user_v1:fields(Version).
+
+-spec upgrade(internal_user()) -> internal_user().
+upgrade(#internal_user{} = User) -> User;
+upgrade(OldUser) -> upgrade_to(record_version_to_use(), OldUser).
+
+-spec upgrade_to
+(internal_user_v2, internal_user()) -> internal_user_v2();
+(internal_user_v1, internal_user_v1:internal_user_v1()) -> internal_user_v1:internal_user_v1().
+
+upgrade_to(?record_version, #internal_user{} = User) ->
+ User;
+upgrade_to(?record_version, OldUser) ->
+ Fields = erlang:tuple_to_list(OldUser) ++ [#{}],
+ #internal_user{} = erlang:list_to_tuple(Fields);
+upgrade_to(Version, OldUser) ->
+ internal_user_v1:upgrade_to(Version, OldUser).
+
+-spec pattern_match_all() -> internal_user_pattern().
+pattern_match_all() ->
+ case record_version_to_use() of
+ ?record_version -> #internal_user{_ = '_'};
+ _ -> internal_user_v1:pattern_match_all()
+ end.
+
+-spec get_username(internal_user()) -> username().
+get_username(#internal_user{username = Value}) -> Value;
+get_username(User) -> internal_user_v1:get_username(User).
+
+-spec get_password_hash(internal_user()) -> password_hash().
+get_password_hash(#internal_user{password_hash = Value}) -> Value;
+get_password_hash(User) -> internal_user_v1:get_password_hash(User).
+
+-spec get_tags(internal_user()) -> [atom()].
+get_tags(#internal_user{tags = Value}) -> Value;
+get_tags(User) -> internal_user_v1:get_tags(User).
+
+-spec get_hashing_algorithm(internal_user()) -> atom().
+get_hashing_algorithm(#internal_user{hashing_algorithm = Value}) -> Value;
+get_hashing_algorithm(User) -> internal_user_v1:get_hashing_algorithm(User).
+
+-spec get_limits(internal_user()) -> map().
+get_limits(#internal_user{limits = Value}) -> Value;
+get_limits(User) -> internal_user_v1:get_limits(User).
+
+-spec create_user(username(), password_hash(), atom()) -> internal_user().
+create_user(Username, PasswordHash, HashingMod) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #internal_user{username = Username,
+ password_hash = PasswordHash,
+ tags = [],
+ hashing_algorithm = HashingMod,
+ limits = #{}
+ };
+ _ ->
+ internal_user_v1:create_user(Username, PasswordHash, HashingMod)
+ end.
+
+-spec set_password_hash(internal_user(), password_hash(), atom()) -> internal_user().
+set_password_hash(#internal_user{} = User, PasswordHash, HashingAlgorithm) ->
+ User#internal_user{password_hash = PasswordHash,
+ hashing_algorithm = HashingAlgorithm};
+set_password_hash(User, PasswordHash, HashingAlgorithm) ->
+ internal_user_v1:set_password_hash(User, PasswordHash, HashingAlgorithm).
+
+-spec set_tags(internal_user(), [atom()]) -> internal_user().
+set_tags(#internal_user{} = User, Tags) ->
+ User#internal_user{tags = Tags};
+set_tags(User, Tags) ->
+ internal_user_v1:set_tags(User, Tags).
+
+-spec update_limits
+(add, internal_user(), map()) -> internal_user();
+(remove, internal_user(), term()) -> internal_user().
+update_limits(add, #internal_user{limits = Limits} = User, Term) ->
+ User#internal_user{limits = maps:merge(Limits, Term)};
+update_limits(remove, #internal_user{limits = Limits} = User, LimitType) ->
+ User#internal_user{limits = maps:remove(LimitType, Limits)};
+update_limits(Action, User, Term) ->
+ internal_user_v1:update_limits(Action, User, Term).
+
+-spec clear_limits(internal_user()) -> internal_user().
+clear_limits(#internal_user{} = User) ->
+ User#internal_user{limits = #{}};
+clear_limits(User) ->
+ internal_user_v1:clear_limits(User).
diff --git a/deps/rabbit/src/internal_user_v1.erl b/deps/rabbit/src/internal_user_v1.erl
new file mode 100644
index 0000000000..edb956436f
--- /dev/null
+++ b/deps/rabbit/src/internal_user_v1.erl
@@ -0,0 +1,151 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(internal_user_v1).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ new/0,
+ new/1,
+ record_version_to_use/0,
+ fields/0,
+ fields/1,
+ upgrade/1,
+ upgrade_to/2,
+ pattern_match_all/0,
+ get_username/1,
+ get_password_hash/1,
+ get_tags/1,
+ get_hashing_algorithm/1,
+ get_limits/1,
+ create_user/3,
+ set_password_hash/3,
+ set_tags/2,
+ update_limits/3,
+ clear_limits/1
+]).
+
+-define(record_version, ?MODULE).
+
+-record(internal_user, {
+ username :: internal_user:username() | '_',
+ password_hash :: internal_user:password_hash() | '_',
+ tags :: [atom()] | '_',
+ %% password hashing implementation module,
+ %% typically rabbit_password_hashing_* but can
+ %% come from a plugin
+ hashing_algorithm :: atom() | '_'}).
+
+-type internal_user() :: internal_user_v1().
+
+-type(internal_user_v1() ::
+ #internal_user{username :: internal_user:username(),
+ password_hash :: internal_user:password_hash(),
+ tags :: [atom()],
+ hashing_algorithm :: atom()}).
+
+-type internal_user_pattern() :: internal_user_v1_pattern().
+
+-type internal_user_v1_pattern() :: #internal_user{
+ username :: internal_user:username() | '_',
+ password_hash :: '_',
+ tags :: '_',
+ hashing_algorithm :: '_'
+ }.
+
+-export_type([internal_user/0,
+ internal_user_v1/0,
+ internal_user_pattern/0,
+ internal_user_v1_pattern/0]).
+
+-spec record_version_to_use() -> internal_user_v1.
+record_version_to_use() ->
+ ?record_version.
+
+-spec new() -> internal_user().
+new() ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = []
+ }.
+
+-spec new(tuple()) -> internal_user().
+new({hashing_algorithm, HashingAlgorithm}) ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ hashing_algorithm = HashingAlgorithm,
+ tags = []
+ };
+new({tags, Tags}) ->
+ #internal_user{
+ username = <<"">>,
+ password_hash = <<"">>,
+ tags = Tags
+ }.
+
+-spec fields() -> list().
+fields() -> fields(?record_version).
+
+-spec fields(atom()) -> list().
+fields(?record_version) -> record_info(fields, internal_user).
+
+-spec upgrade(internal_user()) -> internal_user().
+upgrade(#internal_user{} = User) -> User.
+
+-spec upgrade_to(internal_user_v1, internal_user()) -> internal_user().
+upgrade_to(?record_version, #internal_user{} = User) ->
+ User.
+
+-spec pattern_match_all() -> internal_user_pattern().
+pattern_match_all() -> #internal_user{_ = '_'}.
+
+-spec get_username(internal_user()) -> internal_user:username().
+get_username(#internal_user{username = Value}) -> Value.
+
+-spec get_password_hash(internal_user()) -> internal_user:password_hash().
+get_password_hash(#internal_user{password_hash = Value}) -> Value.
+
+-spec get_tags(internal_user()) -> [atom()].
+get_tags(#internal_user{tags = Value}) -> Value.
+
+-spec get_hashing_algorithm(internal_user()) -> atom().
+get_hashing_algorithm(#internal_user{hashing_algorithm = Value}) -> Value.
+
+-spec get_limits(internal_user()) -> map().
+get_limits(_User) -> #{}.
+
+-spec create_user(internal_user:username(), internal_user:password_hash(),
+ atom()) -> internal_user().
+create_user(Username, PasswordHash, HashingMod) ->
+ #internal_user{username = Username,
+ password_hash = PasswordHash,
+ tags = [],
+ hashing_algorithm = HashingMod
+ }.
+
+-spec set_password_hash(internal_user:internal_user(),
+ internal_user:password_hash(), atom()) -> internal_user().
+set_password_hash(#internal_user{} = User, PasswordHash, HashingAlgorithm) ->
+ User#internal_user{password_hash = PasswordHash,
+ hashing_algorithm = HashingAlgorithm}.
+
+-spec set_tags(internal_user(), [atom()]) -> internal_user().
+set_tags(#internal_user{} = User, Tags) ->
+ User#internal_user{tags = Tags}.
+
+-spec update_limits
+(add, internal_user(), map()) -> internal_user();
+(remove, internal_user(), term()) -> internal_user().
+update_limits(_, User, _) ->
+ User.
+
+-spec clear_limits(internal_user()) -> internal_user().
+clear_limits(User) ->
+ User.
diff --git a/deps/rabbit/src/lager_exchange_backend.erl b/deps/rabbit/src/lager_exchange_backend.erl
new file mode 100644
index 0000000000..cd96f2230e
--- /dev/null
+++ b/deps/rabbit/src/lager_exchange_backend.erl
@@ -0,0 +1,233 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc RabbitMQ backend for lager.
+%% Configuration is a proplist with the following keys:
+%% <ul>
+%% <li>`level' - log level to use</li>
+%% <li>`formatter' - the module to use when formatting log messages. Defaults to
+%% `lager_default_formatter'</li>
+%% <li>`formatter_config' - the format configuration string. Defaults to
+%% `time [ severity ] message'</li>
+%% </ul>
+
+-module(lager_exchange_backend).
+
+-behaviour(gen_event).
+
+-export([init/1, terminate/2, code_change/3,
+ handle_call/2, handle_event/2, handle_info/2]).
+
+-export([maybe_init_exchange/0]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-include_lib("lager/include/lager.hrl").
+
+-record(state, {level :: {'mask', integer()},
+ formatter :: atom(),
+ format_config :: any(),
+ init_exchange_ts = undefined :: integer() | undefined,
+ exchange = undefined :: #resource{} | undefined}).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-compile([{parse_transform, lager_transform}]).
+-endif.
+
+-define(INIT_EXCHANGE_INTERVAL_SECS, 5).
+-define(TERSE_FORMAT, [time, " [", severity, "] ", message]).
+-define(DEFAULT_FORMAT_CONFIG, ?TERSE_FORMAT).
+-define(FORMAT_CONFIG_OFF, []).
+
+-ifdef(TEST).
+-define(DEPRECATED(_Msg), ok).
+-else.
+-define(DEPRECATED(Msg),
+ io:format(user, "WARNING: This is a deprecated lager_exchange_backend configuration. Please use \"~w\" instead.~n", [Msg])).
+-endif.
+
+-define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>).
+
+init([Level]) when is_atom(Level) ->
+ ?DEPRECATED([{level, Level}]),
+ init([{level, Level}]);
+init([Level, true]) when is_atom(Level) -> % for backwards compatibility
+ ?DEPRECATED([{level, Level}, {formatter_config, [{eol, "\\r\\n\\"}]}]),
+ init([{level, Level}, {formatter_config, ?FORMAT_CONFIG_OFF}]);
+init([Level, false]) when is_atom(Level) -> % for backwards compatibility
+ ?DEPRECATED([{level, Level}]),
+ init([{level, Level}]);
+
+init(Options) when is_list(Options) ->
+ true = validate_options(Options),
+ Level = get_option(level, Options, undefined),
+ try lager_util:config_to_mask(Level) of
+ L ->
+ DefaultOptions = [{formatter, lager_default_formatter},
+ {formatter_config, ?DEFAULT_FORMAT_CONFIG}],
+ [Formatter, Config] = [get_option(K, Options, Default) || {K, Default} <- DefaultOptions],
+ State0 = #state{level=L,
+ formatter=Formatter,
+ format_config=Config},
+ % NB: this will probably always fail since the / vhost isn't available
+ State1 = maybe_init_exchange(State0),
+ {ok, State1}
+ catch
+ _:_ ->
+ {error, {fatal, bad_log_level}}
+ end;
+init(Level) when is_atom(Level) ->
+ ?DEPRECATED([{level, Level}]),
+ init([{level, Level}]);
+init(Other) ->
+ {error, {fatal, {bad_lager_exchange_backend_config, Other}}}.
+
+% rabbitmq/rabbitmq-server#1973
+% This is called immediatly after the / vhost is created
+% or recovered
+maybe_init_exchange() ->
+ case lists:member(?MODULE, gen_event:which_handlers(lager_event)) of
+ true ->
+ _ = init_exchange(true),
+ ok;
+ _ ->
+ ok
+ end.
+
+validate_options([]) -> true;
+validate_options([{level, L}|T]) when is_atom(L) ->
+ case lists:member(L, ?LEVELS) of
+ false ->
+ throw({error, {fatal, {bad_level, L}}});
+ true ->
+ validate_options(T)
+ end;
+validate_options([{formatter, M}|T]) when is_atom(M) ->
+ validate_options(T);
+validate_options([{formatter_config, C}|T]) when is_list(C) ->
+ validate_options(T);
+validate_options([H|_]) ->
+ throw({error, {fatal, {bad_lager_exchange_backend_config, H}}}).
+
+get_option(K, Options, Default) ->
+ case lists:keyfind(K, 1, Options) of
+ {K, V} -> V;
+ false -> Default
+ end.
+
+handle_call(get_loglevel, #state{level=Level} = State) ->
+ {ok, Level, State};
+handle_call({set_loglevel, Level}, State) ->
+ try lager_util:config_to_mask(Level) of
+ Levels ->
+ {ok, ok, State#state{level=Levels}}
+ catch
+ _:_ ->
+ {ok, {error, bad_log_level}, State}
+ end;
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+handle_event({log, _Message} = Event, State0) ->
+ State1 = maybe_init_exchange(State0),
+ handle_log_event(Event, State1);
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% @private
+handle_log_event({log, _Message}, #state{exchange=undefined} = State) ->
+ % NB: tried to define the exchange but still undefined,
+ % so not logging this message. Note: we can't log this dropped
+ % message because it will start an infinite loop
+ {ok, State};
+handle_log_event({log, Message},
+ #state{level=L, exchange=LogExch,
+ formatter=Formatter, format_config=FormatConfig} = State) ->
+ case lager_util:is_loggable(Message, L, ?MODULE) of
+ true ->
+ %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's
+ %% second resolution, not millisecond.
+ RoutingKey = rabbit_data_coercion:to_binary(lager_msg:severity(Message)),
+ Timestamp = os:system_time(seconds),
+ Node = rabbit_data_coercion:to_binary(node()),
+ Headers = [{<<"node">>, longstr, Node}],
+ AmqpMsg = #'P_basic'{content_type = <<"text/plain">>,
+ timestamp = Timestamp,
+ headers = Headers},
+ Body = rabbit_data_coercion:to_binary(Formatter:format(Message, FormatConfig)),
+ case rabbit_basic:publish(LogExch, RoutingKey, AmqpMsg, Body) of
+ ok -> ok;
+ {error, not_found} -> ok
+ end,
+ {ok, State};
+ false ->
+ {ok, State}
+ end.
+
+%% @private
+maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=undefined} = State) ->
+ Now = erlang:monotonic_time(second),
+ handle_init_exchange(init_exchange(true), Now, State);
+maybe_init_exchange(#state{exchange=undefined, init_exchange_ts=Timestamp} = State) ->
+ Now = erlang:monotonic_time(second),
+ % NB: since we may try to declare the exchange on every log message, this ensures
+ % that we only try once every 5 seconds
+ HasEnoughTimeElapsed = Now - Timestamp > ?INIT_EXCHANGE_INTERVAL_SECS,
+ Result = init_exchange(HasEnoughTimeElapsed),
+ handle_init_exchange(Result, Now, State);
+maybe_init_exchange(State) ->
+ State.
+
+%% @private
+init_exchange(true) ->
+ {ok, DefaultVHost} = application:get_env(rabbit, default_vhost),
+ Exchange = rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME),
+ try
+ %% durable
+ #exchange{} = rabbit_exchange:declare(Exchange, topic, true, false, true, [], ?INTERNAL_USER),
+ rabbit_log:info("Declared exchange '~s' in vhost '~s'", [?LOG_EXCH_NAME, DefaultVHost]),
+ {ok, Exchange}
+ catch
+ ErrType:Err ->
+ rabbit_log:error("Could not declare exchange '~s' in vhost '~s', reason: ~p:~p",
+ [?LOG_EXCH_NAME, DefaultVHost, ErrType, Err]),
+ {ok, undefined}
+ end;
+init_exchange(_) ->
+ {ok, undefined}.
+
+%% @private
+handle_init_exchange({ok, undefined}, Now, State) ->
+ State#state{init_exchange_ts=Now};
+handle_init_exchange({ok, Exchange}, Now, State) ->
+ State#state{exchange=Exchange, init_exchange_ts=Now}.
+
+-ifdef(TEST).
+console_config_validation_test_() ->
+ Good = [{level, info}],
+ Bad1 = [{level, foo}],
+ Bad2 = [{larval, info}],
+ AllGood = [{level, info}, {formatter, my_formatter},
+ {formatter_config, ["blort", "garbage"]}],
+ [
+ ?_assertEqual(true, validate_options(Good)),
+ ?_assertThrow({error, {fatal, {bad_level, foo}}}, validate_options(Bad1)),
+ ?_assertThrow({error, {fatal, {bad_lager_exchange_backend_config, {larval, info}}}}, validate_options(Bad2)),
+ ?_assertEqual(true, validate_options(AllGood))
+ ].
+-endif.
diff --git a/deps/rabbit/src/lqueue.erl b/deps/rabbit/src/lqueue.erl
new file mode 100644
index 0000000000..1e267210d9
--- /dev/null
+++ b/deps/rabbit/src/lqueue.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(lqueue).
+
+%% lqueue implements a subset of Erlang's queue module. lqueues
+%% maintain their own length, so lqueue:len/1
+%% is an O(1) operation, in contrast with queue:len/1 which is O(n).
+
+-export([new/0, is_empty/1, len/1, in/2, in_r/2, out/1, out_r/1, join/2,
+ foldl/3, foldr/3, from_list/1, drop/1, to_list/1, peek/1, peek_r/1]).
+
+-define(QUEUE, queue).
+
+-export_type([
+ ?MODULE/0,
+ ?MODULE/1
+ ]).
+
+-opaque ?MODULE() :: ?MODULE(_).
+-opaque ?MODULE(T) :: {non_neg_integer(), queue:queue(T)}.
+-type value() :: any().
+-type result(T) :: 'empty' | {'value', T}.
+
+-spec new() -> ?MODULE(_).
+
+new() -> {0, ?QUEUE:new()}.
+
+-spec drop(?MODULE(T)) -> ?MODULE(T).
+
+drop({L, Q}) -> {L - 1, ?QUEUE:drop(Q)}.
+
+-spec is_empty(?MODULE(_)) -> boolean().
+
+is_empty({0, _Q}) -> true;
+is_empty(_) -> false.
+
+-spec in(T, ?MODULE(T)) -> ?MODULE(T).
+
+in(V, {L, Q}) -> {L+1, ?QUEUE:in(V, Q)}.
+
+-spec in_r(value(), ?MODULE(T)) -> ?MODULE(T).
+
+in_r(V, {L, Q}) -> {L+1, ?QUEUE:in_r(V, Q)}.
+
+-spec out(?MODULE(T)) -> {result(T), ?MODULE(T)}.
+
+out({0, _Q} = Q) -> {empty, Q};
+out({L, Q}) -> {Result, Q1} = ?QUEUE:out(Q),
+ {Result, {L-1, Q1}}.
+
+-spec out_r(?MODULE(T)) -> {result(T), ?MODULE(T)}.
+
+out_r({0, _Q} = Q) -> {empty, Q};
+out_r({L, Q}) -> {Result, Q1} = ?QUEUE:out_r(Q),
+ {Result, {L-1, Q1}}.
+
+-spec join(?MODULE(A), ?MODULE(B)) -> ?MODULE(A | B).
+
+join({L1, Q1}, {L2, Q2}) -> {L1 + L2, ?QUEUE:join(Q1, Q2)}.
+
+-spec to_list(?MODULE(T)) -> [T].
+
+to_list({_L, Q}) -> ?QUEUE:to_list(Q).
+
+-spec from_list([T]) -> ?MODULE(T).
+
+from_list(L) -> {length(L), ?QUEUE:from_list(L)}.
+
+-spec foldl(fun ((T, B) -> B), B, ?MODULE(T)) -> B.
+
+foldl(Fun, Init, Q) ->
+ case out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> foldl(Fun, Fun(V, Init), Q1)
+ end.
+
+-spec foldr(fun ((T, B) -> B), B, ?MODULE(T)) -> B.
+
+foldr(Fun, Init, Q) ->
+ case out_r(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> foldr(Fun, Fun(V, Init), Q1)
+ end.
+
+-spec len(?MODULE(_)) -> non_neg_integer().
+
+len({L, _}) -> L.
+
+-spec peek(?MODULE(T)) -> result(T).
+
+peek({ 0, _Q}) -> empty;
+peek({_L, Q}) -> ?QUEUE:peek(Q).
+
+-spec peek_r(?MODULE(T)) -> result(T).
+
+peek_r({ 0, _Q}) -> empty;
+peek_r({_L, Q}) -> ?QUEUE:peek_r(Q).
diff --git a/deps/rabbit/src/mirrored_supervisor_sups.erl b/deps/rabbit/src/mirrored_supervisor_sups.erl
new file mode 100644
index 0000000000..b29d4d48e6
--- /dev/null
+++ b/deps/rabbit/src/mirrored_supervisor_sups.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor_sups).
+
+-define(SUPERVISOR, supervisor2).
+-define(GS_MODULE, mirrored_supervisor).
+
+-behaviour(?SUPERVISOR).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+init({overall, _Group, _TxFun, ignore}) -> ignore;
+init({overall, Group, TxFun, {ok, {Restart, ChildSpecs}}}) ->
+ %% Important: Delegate MUST start before Mirroring so that when we
+ %% shut down from above it shuts down last, so Mirroring does not
+ %% see it die.
+ %%
+ %% See comment in handle_info('DOWN', ...) in mirrored_supervisor
+ {ok, {{one_for_all, 0, 1},
+ [{delegate, {?SUPERVISOR, start_link, [?MODULE, {delegate, Restart}]},
+ temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
+ {mirroring, {?GS_MODULE, start_internal, [Group, TxFun, ChildSpecs]},
+ permanent, 16#ffffffff, worker, [?MODULE]}]}};
+
+
+init({delegate, Restart}) ->
+ {ok, {Restart, []}}.
diff --git a/deps/rabbit/src/pg_local.erl b/deps/rabbit/src/pg_local.erl
new file mode 100644
index 0000000000..263e743d1f
--- /dev/null
+++ b/deps/rabbit/src/pg_local.erl
@@ -0,0 +1,249 @@
+%% This file is a copy of pg2.erl from the R13B-3 Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) Process groups are node-local only.
+%%
+%% 2) Groups are created/deleted implicitly.
+%%
+%% 3) 'join' and 'leave' are asynchronous.
+%%
+%% 4) the type specs of the exported non-callback functions have been
+%% extracted into a separate, guarded section, and rewritten in
+%% old-style spec syntax, for better compatibility with older
+%% versions of Erlang/OTP. The remaining type specs have been
+%% removed.
+
+%% All modifications are (C) 2010-2020 VMware, Inc. or its affiliates.
+
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at https://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(pg_local).
+
+-export([join/2, leave/2, get_members/1, in_group/2]).
+%% intended for testing only; not part of official API
+-export([sync/0, clear/0]).
+-export([start/0, start_link/0, init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2]).
+
+%%----------------------------------------------------------------------------
+
+-type name() :: term().
+
+%%----------------------------------------------------------------------------
+
+-define(TABLE, pg_local_table).
+
+%%%
+%%% Exported functions
+%%%
+
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+-spec start() -> {'ok', pid()} | {'error', any()}.
+
+start() ->
+ ensure_started().
+
+-spec join(name(), pid()) -> 'ok'.
+
+join(Name, Pid) when is_pid(Pid) ->
+ _ = ensure_started(),
+ gen_server:cast(?MODULE, {join, Name, Pid}).
+
+-spec leave(name(), pid()) -> 'ok'.
+
+leave(Name, Pid) when is_pid(Pid) ->
+ _ = ensure_started(),
+ gen_server:cast(?MODULE, {leave, Name, Pid}).
+
+-spec get_members(name()) -> [pid()].
+
+get_members(Name) ->
+ _ = ensure_started(),
+ group_members(Name).
+
+-spec in_group(name(), pid()) -> boolean().
+
+in_group(Name, Pid) ->
+ _ = ensure_started(),
+ %% The join message is a cast and thus can race, but we want to
+ %% keep it that way to be fast in the common case.
+ case member_present(Name, Pid) of
+ true -> true;
+ false -> sync(),
+ member_present(Name, Pid)
+ end.
+
+-spec sync() -> 'ok'.
+
+sync() ->
+ _ = ensure_started(),
+ gen_server:call(?MODULE, sync, infinity).
+
+clear() ->
+ _ = ensure_started(),
+ gen_server:call(?MODULE, clear, infinity).
+
+%%%
+%%% Callback functions from gen_server
+%%%
+
+-record(state, {}).
+
+init([]) ->
+ ?TABLE = ets:new(?TABLE, [ordered_set, protected, named_table]),
+ {ok, #state{}}.
+
+handle_call(sync, _From, S) ->
+ {reply, ok, S};
+
+handle_call(clear, _From, S) ->
+ ets:delete_all_objects(?TABLE),
+ {reply, ok, S};
+
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The pg_local server received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+handle_cast({join, Name, Pid}, S) ->
+ _ = join_group(Name, Pid),
+ {noreply, S};
+handle_cast({leave, Name, Pid}, S) ->
+ leave_group(Name, Pid),
+ {noreply, S};
+handle_cast(_, S) ->
+ {noreply, S}.
+
+handle_info({'DOWN', MonitorRef, process, Pid, _Info}, S) ->
+ member_died(MonitorRef, Pid),
+ {noreply, S};
+handle_info(_, S) ->
+ {noreply, S}.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(?TABLE),
+ ok.
+
+%%%
+%%% Local functions
+%%%
+
+%%% One ETS table, pg_local_table, is used for bookkeeping. The type of the
+%%% table is ordered_set, and the fast matching of partially
+%%% instantiated keys is used extensively.
+%%%
+%%% {{ref, Pid}, MonitorRef, Counter}
+%%% {{ref, MonitorRef}, Pid}
+%%% Each process has one monitor. Counter is incremented when the
+%%% Pid joins some group.
+%%% {{member, Name, Pid}, _}
+%%% Pid is a member of group Name, GroupCounter is incremented when the
+%%% Pid joins the group Name.
+%%% {{pid, Pid, Name}}
+%%% Pid is a member of group Name.
+
+member_died(Ref, Pid) ->
+ case ets:lookup(?TABLE, {ref, Ref}) of
+ [{{ref, Ref}, Pid}] ->
+ leave_all_groups(Pid);
+ %% in case the key has already been removed
+ %% we can clean up using the value from the DOWN message
+ _ ->
+ leave_all_groups(Pid)
+ end,
+ ok.
+
+leave_all_groups(Pid) ->
+ Names = member_groups(Pid),
+ _ = [leave_group(Name, P) ||
+ Name <- Names,
+ P <- member_in_group(Pid, Name)].
+
+join_group(Name, Pid) ->
+ Ref_Pid = {ref, Pid},
+ try _ = ets:update_counter(?TABLE, Ref_Pid, {3, +1})
+ catch _:_ ->
+ Ref = erlang:monitor(process, Pid),
+ true = ets:insert(?TABLE, {Ref_Pid, Ref, 1}),
+ true = ets:insert(?TABLE, {{ref, Ref}, Pid})
+ end,
+ Member_Name_Pid = {member, Name, Pid},
+ try _ = ets:update_counter(?TABLE, Member_Name_Pid, {2, +1})
+ catch _:_ ->
+ true = ets:insert(?TABLE, {Member_Name_Pid, 1}),
+ true = ets:insert(?TABLE, {{pid, Pid, Name}})
+ end.
+
+leave_group(Name, Pid) ->
+ Member_Name_Pid = {member, Name, Pid},
+ try ets:update_counter(?TABLE, Member_Name_Pid, {2, -1}) of
+ N ->
+ if
+ N =:= 0 ->
+ true = ets:delete(?TABLE, {pid, Pid, Name}),
+ true = ets:delete(?TABLE, Member_Name_Pid);
+ true ->
+ ok
+ end,
+ Ref_Pid = {ref, Pid},
+ case ets:update_counter(?TABLE, Ref_Pid, {3, -1}) of
+ 0 ->
+ [{Ref_Pid,Ref,0}] = ets:lookup(?TABLE, Ref_Pid),
+ true = ets:delete(?TABLE, {ref, Ref}),
+ true = ets:delete(?TABLE, Ref_Pid),
+ true = erlang:demonitor(Ref, [flush]),
+ ok;
+ _ ->
+ ok
+ end
+ catch _:_ ->
+ ok
+ end.
+
+group_members(Name) ->
+ [P ||
+ [P, N] <- ets:match(?TABLE, {{member, Name, '$1'},'$2'}),
+ _ <- lists:seq(1, N)].
+
+member_in_group(Pid, Name) ->
+ [{{member, Name, Pid}, N}] = ets:lookup(?TABLE, {member, Name, Pid}),
+ lists:duplicate(N, Pid).
+
+member_present(Name, Pid) ->
+ case ets:lookup(?TABLE, {member, Name, Pid}) of
+ [_] -> true;
+ [] -> false
+ end.
+
+member_groups(Pid) ->
+ [Name || [Name] <- ets:match(?TABLE, {{pid, Pid, '$1'}})].
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {pg_local, {?MODULE, start_link, []}, permanent,
+ 16#ffffffff, worker, [?MODULE]},
+ supervisor:start_child(kernel_safe_sup, C);
+ PgLocalPid ->
+ {ok, PgLocalPid}
+ end.
diff --git a/deps/rabbit/src/rabbit.erl b/deps/rabbit/src/rabbit.erl
new file mode 100644
index 0000000000..9248c945dc
--- /dev/null
+++ b/deps/rabbit/src/rabbit.erl
@@ -0,0 +1,1511 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(application).
+
+-export([start/0, boot/0, stop/0,
+ stop_and_halt/0, await_startup/0, await_startup/1, await_startup/3,
+ status/0, is_running/0, alarms/0,
+ is_running/1, environment/0, rotate_logs/0, force_event_refresh/1,
+ start_fhc/0]).
+
+-export([start/2, stop/1, prep_stop/1]).
+-export([start_apps/1, start_apps/2, stop_apps/1]).
+-export([product_info/0,
+ product_name/0,
+ product_version/0,
+ base_product_name/0,
+ base_product_version/0,
+ motd_file/0,
+ motd/0]).
+-export([log_locations/0, config_files/0]). %% for testing and mgmt-agent
+-export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]).
+
+%%---------------------------------------------------------------------------
+%% Boot steps.
+-export([maybe_insert_default_data/0, boot_delegate/0, recover/0]).
+
+%% for tests
+-export([validate_msg_store_io_batch_size_and_credit_disc_bound/2]).
+
+-rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}).
+
+-rabbit_boot_step({codec_correctness_check,
+ [{description, "codec correctness check"},
+ {mfa, {rabbit_binary_generator,
+ check_empty_frame_size,
+ []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+%% rabbit_alarm currently starts memory and disk space monitors
+-rabbit_boot_step({rabbit_alarm,
+ [{description, "alarm handler"},
+ {mfa, {rabbit_alarm, start, []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({feature_flags,
+ [{description, "feature flags registry and initial state"},
+ {mfa, {rabbit_feature_flags, init, []}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({database,
+ [{mfa, {rabbit_mnesia, init, []}},
+ {requires, file_handle_cache},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({database_sync,
+ [{description, "database sync"},
+ {mfa, {rabbit_sup, start_child, [mnesia_sync]}},
+ {requires, database},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({code_server_cache,
+ [{description, "code_server cache server"},
+ {mfa, {rabbit_sup, start_child, [code_server_cache]}},
+ {requires, rabbit_alarm},
+ {enables, file_handle_cache}]}).
+
+-rabbit_boot_step({file_handle_cache,
+ [{description, "file handle cache server"},
+ {mfa, {rabbit, start_fhc, []}},
+ %% FHC needs memory monitor to be running
+ {requires, code_server_cache},
+ {enables, worker_pool}]}).
+
+-rabbit_boot_step({worker_pool,
+ [{description, "default worker pool"},
+ {mfa, {rabbit_sup, start_supervisor_child,
+ [worker_pool_sup]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({definition_import_worker_pool,
+ [{description, "dedicated worker pool for definition import"},
+ {mfa, {rabbit_definitions, boot, []}},
+ {requires, external_infrastructure}]}).
+
+-rabbit_boot_step({external_infrastructure,
+ [{description, "external infrastructure ready"}]}).
+
+-rabbit_boot_step({rabbit_registry,
+ [{description, "plugin registry"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_registry]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({rabbit_core_metrics,
+ [{description, "core metrics storage"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_metrics]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_osiris_metrics,
+ [{description, "osiris metrics scraper"},
+ {mfa, {rabbit_sup, start_child,
+ [rabbit_osiris_metrics]}},
+ {requires, pre_boot},
+ {enables, external_infrastructure}]}).
+
+%% -rabbit_boot_step({rabbit_stream_coordinator,
+%% [{description, "stream queues coordinator"},
+%% {mfa, {rabbit_stream_coordinator, start,
+%% []}},
+%% {requires, pre_boot},
+%% {enables, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_event,
+ [{description, "statistics event manager"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_event]}},
+ {requires, external_infrastructure},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({kernel_ready,
+ [{description, "kernel ready"},
+ {requires, external_infrastructure}]}).
+
+-rabbit_boot_step({rabbit_memory_monitor,
+ [{description, "memory monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_memory_monitor]}},
+ {requires, rabbit_alarm},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({guid_generator,
+ [{description, "guid generator"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_guid]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({delegate_sup,
+ [{description, "cluster delegate"},
+ {mfa, {rabbit, boot_delegate, []}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_node_monitor,
+ [{description, "node monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_node_monitor]}},
+ {requires, [rabbit_alarm, guid_generator]},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_epmd_monitor,
+ [{description, "epmd monitor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_epmd_monitor]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({rabbit_sysmon_minder,
+ [{description, "sysmon_handler supervisor"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_sysmon_minder]}},
+ {requires, kernel_ready},
+ {enables, core_initialized}]}).
+
+-rabbit_boot_step({core_initialized,
+ [{description, "core initialized"},
+ {requires, kernel_ready}]}).
+
+-rabbit_boot_step({upgrade_queues,
+ [{description, "per-vhost message store migration"},
+ {mfa, {rabbit_upgrade,
+ maybe_migrate_queues_to_per_vhost_storage,
+ []}},
+ {requires, [core_initialized]},
+ {enables, recovery}]}).
+
+-rabbit_boot_step({recovery,
+ [{description, "exchange, queue and binding recovery"},
+ {mfa, {rabbit, recover, []}},
+ {requires, [core_initialized]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({empty_db_check,
+ [{description, "empty DB check"},
+ {mfa, {?MODULE, maybe_insert_default_data, []}},
+ {requires, recovery},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({routing_ready,
+ [{description, "message delivery logic ready"},
+ {requires, [core_initialized, recovery]}]}).
+
+-rabbit_boot_step({connection_tracking,
+ [{description, "connection tracking infrastructure"},
+ {mfa, {rabbit_connection_tracking, boot, []}},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({channel_tracking,
+ [{description, "channel tracking infrastructure"},
+ {mfa, {rabbit_channel_tracking, boot, []}},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({background_gc,
+ [{description, "background garbage collection"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [background_gc]}},
+ {requires, [core_initialized, recovery]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({rabbit_core_metrics_gc,
+ [{description, "background core metrics garbage collection"},
+ {mfa, {rabbit_sup, start_restartable_child,
+ [rabbit_core_metrics_gc]}},
+ {requires, [core_initialized, recovery]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({rabbit_looking_glass,
+ [{description, "Looking Glass tracer and profiler"},
+ {mfa, {rabbit_looking_glass, boot, []}},
+ {requires, [core_initialized, recovery]},
+ {enables, routing_ready}]}).
+
+-rabbit_boot_step({pre_flight,
+ [{description, "ready to communicate with peers and clients"},
+ {requires, [core_initialized, recovery, routing_ready]}]}).
+
+-rabbit_boot_step({cluster_name,
+ [{description, "sets cluster name if configured"},
+ {mfa, {rabbit_nodes, boot, []}},
+ {requires, pre_flight}
+ ]}).
+
+-rabbit_boot_step({direct_client,
+ [{description, "direct client"},
+ {mfa, {rabbit_direct, boot, []}},
+ {requires, pre_flight}
+ ]}).
+
+-rabbit_boot_step({notify_cluster,
+ [{description, "notifies cluster peers of our presence"},
+ {mfa, {rabbit_node_monitor, notify_node_up, []}},
+ {requires, pre_flight}]}).
+
+-rabbit_boot_step({networking,
+ [{description, "TCP and TLS listeners (backwards compatibility)"},
+ {mfa, {rabbit_log, debug, ["'networking' boot step skipped and moved to end of startup", []]}},
+ {requires, notify_cluster}]}).
+
+%%---------------------------------------------------------------------------
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-define(APPS, [os_mon, mnesia, rabbit_common, rabbitmq_prelaunch, ra, sysmon_handler, rabbit, osiris]).
+
+-define(ASYNC_THREADS_WARNING_THRESHOLD, 8).
+
+%% 1 minute
+-define(BOOT_START_TIMEOUT, 1 * 60 * 1000).
+%% 12 hours
+-define(BOOT_FINISH_TIMEOUT, 12 * 60 * 60 * 1000).
+%% 100 ms
+-define(BOOT_STATUS_CHECK_INTERVAL, 100).
+
+%%----------------------------------------------------------------------------
+
+-type restart_type() :: 'permanent' | 'transient' | 'temporary'.
+
+-type param() :: atom().
+-type app_name() :: atom().
+
+%%----------------------------------------------------------------------------
+
+-spec start() -> 'ok'.
+
+start() ->
+ %% start() vs. boot(): we want to throw an error in start().
+ start_it(temporary).
+
+-spec boot() -> 'ok'.
+
+boot() ->
+ %% start() vs. boot(): we want the node to exit in boot(). Because
+ %% applications are started with `transient`, any error during their
+ %% startup will abort the node.
+ start_it(transient).
+
+run_prelaunch_second_phase() ->
+ %% Finish the prelaunch phase started by the `rabbitmq_prelaunch`
+ %% application.
+ %%
+ %% The first phase was handled by the `rabbitmq_prelaunch`
+ %% application. It was started in one of the following way:
+ %% - from an Erlang release boot script;
+ %% - from the rabbit:boot/0 or rabbit:start/0 functions.
+ %%
+ %% The `rabbitmq_prelaunch` application creates the context map from
+ %% the environment and the configuration files early during Erlang
+ %% VM startup. Once it is done, all application environments are
+ %% configured (in particular `mnesia` and `ra`).
+ %%
+ %% This second phase depends on other modules & facilities of
+ %% RabbitMQ core. That's why we need to run it now, from the
+ %% `rabbit` application start function.
+
+ %% We assert Mnesia is stopped before we run the prelaunch
+ %% phases. See `rabbit_prelaunch` for an explanation.
+ %%
+ %% This is the second assertion, just in case Mnesia is started
+ %% between the two prelaunch phases.
+ rabbit_prelaunch:assert_mnesia_is_stopped(),
+
+ %% Get the context created by `rabbitmq_prelaunch` then proceed
+ %% with all steps in this phase.
+ #{initial_pass := IsInitialPass} =
+ Context = rabbit_prelaunch:get_context(),
+
+ case IsInitialPass of
+ true ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug(
+ "== Prelaunch phase [2/2] (initial pass) ==");
+ false ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Prelaunch phase [2/2] =="),
+ ok
+ end,
+
+ %% 1. Enabled plugins file.
+ ok = rabbit_prelaunch_enabled_plugins_file:setup(Context),
+
+ %% 2. Feature flags registry.
+ ok = rabbit_prelaunch_feature_flags:setup(Context),
+
+ %% 3. Logging.
+ ok = rabbit_prelaunch_logging:setup(Context),
+
+ %% 4. Clustering.
+ ok = rabbit_prelaunch_cluster:setup(Context),
+
+ %% Start Mnesia now that everything is ready.
+ rabbit_log_prelaunch:debug("Starting Mnesia"),
+ ok = mnesia:start(),
+
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Prelaunch DONE =="),
+
+ case IsInitialPass of
+ true -> rabbit_prelaunch:initial_pass_finished();
+ false -> ok
+ end,
+ ok.
+
+start_it(StartType) ->
+ case spawn_boot_marker() of
+ {ok, Marker} ->
+ T0 = erlang:timestamp(),
+ rabbit_log:info("RabbitMQ is asked to start...", []),
+ try
+ {ok, _} = application:ensure_all_started(rabbitmq_prelaunch,
+ StartType),
+ {ok, _} = application:ensure_all_started(rabbit,
+ StartType),
+ ok = wait_for_ready_or_stopped(),
+
+ T1 = erlang:timestamp(),
+ rabbit_log_prelaunch:debug(
+ "Time to start RabbitMQ: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ stop_boot_marker(Marker),
+ ok
+ catch
+ error:{badmatch, Error}:_ ->
+ stop_boot_marker(Marker),
+ case StartType of
+ temporary -> throw(Error);
+ _ -> exit(Error)
+ end
+ end;
+ {already_booting, Marker} ->
+ stop_boot_marker(Marker),
+ ok
+ end.
+
+wait_for_ready_or_stopped() ->
+ ok = rabbit_boot_state:wait_for(ready, ?BOOT_FINISH_TIMEOUT),
+ case rabbit_boot_state:get() of
+ ready ->
+ ok;
+ _ ->
+ ok = rabbit_boot_state:wait_for(stopped, ?BOOT_FINISH_TIMEOUT),
+ rabbit_prelaunch:get_stop_reason()
+ end.
+
+spawn_boot_marker() ->
+ %% Compatibility with older RabbitMQ versions:
+ %% We register a process doing nothing to indicate that RabbitMQ is
+ %% booting. This is checked by `is_booting(Node)` on a remote node.
+ Marker = spawn_link(fun() -> receive stop -> ok end end),
+ case catch register(rabbit_boot, Marker) of
+ true -> {ok, Marker};
+ _ -> {already_booting, Marker}
+ end.
+
+stop_boot_marker(Marker) ->
+ unlink(Marker),
+ Marker ! stop,
+ ok.
+
+-spec stop() -> 'ok'.
+
+stop() ->
+ case wait_for_ready_or_stopped() of
+ ok ->
+ case rabbit_boot_state:get() of
+ ready ->
+ Product = product_name(),
+ rabbit_log:info("~s is asked to stop...", [Product]),
+ do_stop(),
+ rabbit_log:info(
+ "Successfully stopped ~s and its dependencies",
+ [Product]),
+ ok;
+ stopped ->
+ ok
+ end;
+ _ ->
+ ok
+ end.
+
+do_stop() ->
+ Apps0 = ?APPS ++ rabbit_plugins:active(),
+ %% We ensure that Mnesia is stopped last (or more exactly, after rabbit).
+ Apps1 = app_utils:app_dependency_order(Apps0, true) -- [mnesia],
+ Apps = [mnesia | Apps1],
+ %% this will also perform unregistration with the peer discovery backend
+ %% as needed
+ stop_apps(Apps).
+
+-spec stop_and_halt() -> no_return().
+
+stop_and_halt() ->
+ try
+ stop()
+ catch Type:Reason ->
+ rabbit_log:error(
+ "Error trying to stop ~s: ~p:~p",
+ [product_name(), Type, Reason]),
+ error({Type, Reason})
+ after
+ %% Enclose all the logging in the try block.
+ %% init:stop() will be called regardless of any errors.
+ try
+ AppsLeft = [ A || {A, _, _} <- application:which_applications() ],
+ rabbit_log:info(
+ lists:flatten(["Halting Erlang VM with the following applications:~n",
+ [" ~p~n" || _ <- AppsLeft]]),
+ AppsLeft),
+ %% Also duplicate this information to stderr, so console where
+ %% foreground broker was running (or systemd journal) will
+ %% contain information about graceful termination.
+ io:format(standard_error, "Gracefully halting Erlang VM~n", [])
+ after
+ init:stop()
+ end
+ end,
+ ok.
+
+-spec start_apps([app_name()]) -> 'ok'.
+
+start_apps(Apps) ->
+ start_apps(Apps, #{}).
+
+-spec start_apps([app_name()],
+ #{app_name() => restart_type()}) -> 'ok'.
+
+%% TODO: start_apps/2 and is now specific to plugins. This function
+%% should be moved over `rabbit_plugins`, along with stop_apps/1, once
+%% the latter stops using app_utils as well.
+
+start_apps(Apps, RestartTypes) ->
+ false = lists:member(rabbit, Apps), %% Assertion.
+ %% We need to load all applications involved in order to be able to
+ %% find new feature flags.
+ app_utils:load_applications(Apps),
+ ok = rabbit_feature_flags:refresh_feature_flags_after_app_load(Apps),
+ rabbit_prelaunch_conf:decrypt_config(Apps),
+ lists:foreach(
+ fun(App) ->
+ RestartType = maps:get(App, RestartTypes, temporary),
+ ok = rabbit_boot_steps:run_boot_steps([App]),
+ case application:ensure_all_started(App, RestartType) of
+ {ok, _} -> ok;
+ {error, Reason} -> throw({could_not_start, App, Reason})
+ end
+ end, Apps).
+
+-spec stop_apps([app_name()]) -> 'ok'.
+
+stop_apps([]) ->
+ ok;
+stop_apps(Apps) ->
+ rabbit_log:info(
+ lists:flatten(["Stopping ~s applications and their dependencies in the following order:~n",
+ [" ~p~n" || _ <- Apps]]),
+ [product_name() | lists:reverse(Apps)]),
+ ok = app_utils:stop_applications(
+ Apps, handle_app_error(error_during_shutdown)),
+ case lists:member(rabbit, Apps) of
+ %% plugin deactivation
+ false -> rabbit_boot_steps:run_cleanup_steps(Apps);
+ true -> ok %% it's all going anyway
+ end,
+ ok.
+
+-spec handle_app_error(_) -> fun((_, _) -> no_return()).
+handle_app_error(Term) ->
+ fun(App, {bad_return, {_MFA, {'EXIT', ExitReason}}}) ->
+ throw({Term, App, ExitReason});
+ (App, Reason) ->
+ throw({Term, App, Reason})
+ end.
+
+is_booting() -> is_booting(node()).
+
+is_booting(Node) when Node =:= node() ->
+ case rabbit_boot_state:get() of
+ booting -> true;
+ _ -> false
+ end;
+is_booting(Node) ->
+ case rpc:call(Node, rabbit, is_booting, []) of
+ {badrpc, _} = Err -> Err;
+ Ret -> Ret
+ end.
+
+
+-spec await_startup() -> 'ok' | {'error', 'timeout'}.
+
+await_startup() ->
+ await_startup(node(), false).
+
+-spec await_startup(node() | non_neg_integer()) -> 'ok' | {'error', 'timeout'}.
+
+await_startup(Node) when is_atom(Node) ->
+ await_startup(Node, false);
+ await_startup(Timeout) when is_integer(Timeout) ->
+ await_startup(node(), false, Timeout).
+
+-spec await_startup(node(), boolean()) -> 'ok' | {'error', 'timeout'}.
+
+await_startup(Node, PrintProgressReports) ->
+ case is_booting(Node) of
+ true -> wait_for_boot_to_finish(Node, PrintProgressReports);
+ false ->
+ case is_running(Node) of
+ true -> ok;
+ false -> wait_for_boot_to_start(Node),
+ wait_for_boot_to_finish(Node, PrintProgressReports)
+ end
+ end.
+
+-spec await_startup(node(), boolean(), non_neg_integer()) -> 'ok' | {'error', 'timeout'}.
+
+await_startup(Node, PrintProgressReports, Timeout) ->
+ case is_booting(Node) of
+ true -> wait_for_boot_to_finish(Node, PrintProgressReports, Timeout);
+ false ->
+ case is_running(Node) of
+ true -> ok;
+ false -> wait_for_boot_to_start(Node, Timeout),
+ wait_for_boot_to_finish(Node, PrintProgressReports, Timeout)
+ end
+ end.
+
+wait_for_boot_to_start(Node) ->
+ wait_for_boot_to_start(Node, ?BOOT_START_TIMEOUT).
+
+wait_for_boot_to_start(Node, infinity) ->
+ %% This assumes that 100K iterations is close enough to "infinity".
+ %% Now that's deep.
+ do_wait_for_boot_to_start(Node, 100000);
+wait_for_boot_to_start(Node, Timeout) ->
+ Iterations = Timeout div ?BOOT_STATUS_CHECK_INTERVAL,
+ do_wait_for_boot_to_start(Node, Iterations).
+
+do_wait_for_boot_to_start(_Node, IterationsLeft) when IterationsLeft =< 0 ->
+ {error, timeout};
+do_wait_for_boot_to_start(Node, IterationsLeft) ->
+ case is_booting(Node) of
+ false ->
+ timer:sleep(?BOOT_STATUS_CHECK_INTERVAL),
+ do_wait_for_boot_to_start(Node, IterationsLeft - 1);
+ {badrpc, _} = Err ->
+ Err;
+ true ->
+ ok
+ end.
+
+wait_for_boot_to_finish(Node, PrintProgressReports) ->
+ wait_for_boot_to_finish(Node, PrintProgressReports, ?BOOT_FINISH_TIMEOUT).
+
+wait_for_boot_to_finish(Node, PrintProgressReports, infinity) ->
+ %% This assumes that 100K iterations is close enough to "infinity".
+ %% Now that's deep.
+ do_wait_for_boot_to_finish(Node, PrintProgressReports, 100000);
+wait_for_boot_to_finish(Node, PrintProgressReports, Timeout) ->
+ Iterations = Timeout div ?BOOT_STATUS_CHECK_INTERVAL,
+ do_wait_for_boot_to_finish(Node, PrintProgressReports, Iterations).
+
+do_wait_for_boot_to_finish(_Node, _PrintProgressReports, IterationsLeft) when IterationsLeft =< 0 ->
+ {error, timeout};
+do_wait_for_boot_to_finish(Node, PrintProgressReports, IterationsLeft) ->
+ case is_booting(Node) of
+ false ->
+ %% We don't want badrpc error to be interpreted as false,
+ %% so we don't call rabbit:is_running(Node)
+ case rpc:call(Node, rabbit, is_running, []) of
+ true -> ok;
+ false -> {error, rabbit_is_not_running};
+ {badrpc, _} = Err -> Err
+ end;
+ {badrpc, _} = Err ->
+ Err;
+ true ->
+ maybe_print_boot_progress(PrintProgressReports, IterationsLeft),
+ timer:sleep(?BOOT_STATUS_CHECK_INTERVAL),
+ do_wait_for_boot_to_finish(Node, PrintProgressReports, IterationsLeft - 1)
+ end.
+
+maybe_print_boot_progress(false = _PrintProgressReports, _IterationsLeft) ->
+ ok;
+maybe_print_boot_progress(true, IterationsLeft) ->
+ case IterationsLeft rem 100 of
+ %% This will be printed on the CLI command end to illustrate some
+ %% progress.
+ 0 -> io:format("Still booting, will check again in 10 seconds...~n");
+ _ -> ok
+ end.
+
+-spec status
+ () -> [{pid, integer()} |
+ {running_applications, [{atom(), string(), string()}]} |
+ {os, {atom(), atom()}} |
+ {erlang_version, string()} |
+ {memory, any()}].
+
+status() ->
+ Version = base_product_version(),
+ S1 = [{pid, list_to_integer(os:getpid())},
+ %% The timeout value used is twice that of gen_server:call/2.
+ {running_applications, rabbit_misc:which_applications()},
+ {os, os:type()},
+ {rabbitmq_version, Version},
+ {erlang_version, erlang:system_info(system_version)},
+ {memory, rabbit_vm:memory()},
+ {alarms, alarms()},
+ {is_under_maintenance, rabbit_maintenance:is_being_drained_local_read(node())},
+ {listeners, listeners()},
+ {vm_memory_calculation_strategy, vm_memory_monitor:get_memory_calculation_strategy()}],
+ S2 = rabbit_misc:filter_exit_map(
+ fun ({Key, {M, F, A}}) -> {Key, erlang:apply(M, F, A)} end,
+ [{vm_memory_high_watermark, {vm_memory_monitor,
+ get_vm_memory_high_watermark, []}},
+ {vm_memory_limit, {vm_memory_monitor,
+ get_memory_limit, []}},
+ {disk_free_limit, {rabbit_disk_monitor,
+ get_disk_free_limit, []}},
+ {disk_free, {rabbit_disk_monitor,
+ get_disk_free, []}}]),
+ S3 = rabbit_misc:with_exit_handler(
+ fun () -> [] end,
+ fun () -> [{file_descriptors, file_handle_cache:info()}] end),
+ S4 = [{processes, [{limit, erlang:system_info(process_limit)},
+ {used, erlang:system_info(process_count)}]},
+ {run_queue, erlang:statistics(run_queue)},
+ {uptime, begin
+ {T,_} = erlang:statistics(wall_clock),
+ T div 1000
+ end},
+ {kernel, {net_ticktime, net_kernel:get_net_ticktime()}}],
+ S5 = [{active_plugins, rabbit_plugins:active()},
+ {enabled_plugin_file, rabbit_plugins:enabled_plugins_file()}],
+ S6 = [{config_files, config_files()},
+ {log_files, log_locations()},
+ {data_directory, rabbit_mnesia:dir()},
+ {raft_data_directory, ra_env:data_dir()}],
+ Totals = case is_running() of
+ true ->
+ [{virtual_host_count, rabbit_vhost:count()},
+ {connection_count,
+ length(rabbit_networking:connections_local())},
+ {queue_count, total_queue_count()}];
+ false ->
+ []
+ end,
+ S7 = [{totals, Totals}],
+ S8 = lists:filter(
+ fun
+ ({product_base_name, _}) -> true;
+ ({product_base_version, _}) -> true;
+ ({product_name, _}) -> true;
+ ({product_version, _}) -> true;
+ (_) -> false
+ end,
+ maps:to_list(product_info())),
+ S1 ++ S2 ++ S3 ++ S4 ++ S5 ++ S6 ++ S7 ++ S8.
+
+alarms() ->
+ Alarms = rabbit_misc:with_exit_handler(rabbit_misc:const([]),
+ fun rabbit_alarm:get_alarms/0),
+ N = node(),
+ %% [{{resource_limit,memory,rabbit@mercurio},[]}]
+ [{resource_limit, Limit, Node} || {{resource_limit, Limit, Node}, _} <- Alarms, Node =:= N].
+
+listeners() ->
+ Listeners = try
+ rabbit_networking:active_listeners()
+ catch
+ exit:{aborted, _} -> []
+ end,
+ [L || L = #listener{node = Node} <- Listeners, Node =:= node()].
+
+total_queue_count() ->
+ lists:foldl(fun (VirtualHost, Acc) ->
+ Acc + rabbit_amqqueue:count(VirtualHost)
+ end,
+ 0, rabbit_vhost:list_names()).
+
+-spec is_running() -> boolean().
+
+is_running() -> is_running(node()).
+
+-spec is_running(node()) -> boolean().
+
+is_running(Node) when Node =:= node() ->
+ case rabbit_boot_state:get() of
+ ready -> true;
+ _ -> false
+ end;
+is_running(Node) ->
+ case rpc:call(Node, rabbit, is_running, []) of
+ true -> true;
+ _ -> false
+ end.
+
+is_booted() -> is_booted(node()).
+
+is_booted(Node) ->
+ case is_booting(Node) of
+ false ->
+ is_running(Node);
+ _ -> false
+ end.
+
+-spec environment() -> [{param(), term()}].
+
+environment() ->
+ %% The timeout value is twice that of gen_server:call/2.
+ [{A, environment(A)} ||
+ {A, _, _} <- lists:keysort(1, application:which_applications(10000))].
+
+environment(App) ->
+ Ignore = [default_pass, included_applications],
+ lists:keysort(1, [P || P = {K, _} <- application:get_all_env(App),
+ not lists:member(K, Ignore)]).
+
+-spec rotate_logs() -> rabbit_types:ok_or_error(any()).
+
+rotate_logs() ->
+ rabbit_lager:fold_sinks(
+ fun
+ (_, [], Acc) ->
+ Acc;
+ (SinkName, FileNames, Acc) ->
+ lager:log(SinkName, info, self(),
+ "Log file rotation forced", []),
+ %% FIXME: We use an internal message, understood by
+ %% lager_file_backend. We should use a proper API, when
+ %% it's added to Lager.
+ %%
+ %% FIXME: This call is effectively asynchronous: at the
+ %% end of this function, we can't guaranty the rotation
+ %% is completed.
+ [ok = gen_event:call(SinkName,
+ {lager_file_backend, FileName},
+ rotate,
+ infinity) || FileName <- FileNames],
+ lager:log(SinkName, info, self(),
+ "Log file re-opened after forced rotation", []),
+ Acc
+ end, ok).
+
+%%--------------------------------------------------------------------
+
+-spec start('normal',[]) ->
+ {'error',
+ {'erlang_version_too_old',
+ {'found',string(),string()},
+ {'required',string(),string()}}} |
+ {'ok',pid()}.
+
+start(normal, []) ->
+ %% Reset boot state and clear the stop reason again (it was already
+ %% made in rabbitmq_prelaunch).
+ %%
+ %% This is important if the previous startup attempt failed after
+ %% rabbitmq_prelaunch was started and the application is still
+ %% running.
+ rabbit_boot_state:set(booting),
+ rabbit_prelaunch:clear_stop_reason(),
+
+ try
+ run_prelaunch_second_phase(),
+
+ ProductInfo = product_info(),
+ case ProductInfo of
+ #{product_overridden := true,
+ product_base_name := BaseName,
+ product_base_version := BaseVersion} ->
+ rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n Based on ~s ~s~n ~s~n ~s~n",
+ [product_name(), product_version(), rabbit_misc:otp_release(),
+ BaseName, BaseVersion,
+ ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]);
+ _ ->
+ rabbit_log:info("~n Starting ~s ~s on Erlang ~s~n ~s~n ~s~n",
+ [product_name(), product_version(), rabbit_misc:otp_release(),
+ ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE])
+ end,
+ log_motd(),
+ {ok, SupPid} = rabbit_sup:start_link(),
+
+ %% Compatibility with older RabbitMQ versions + required by
+ %% rabbit_node_monitor:notify_node_up/0:
+ %%
+ %% We register the app process under the name `rabbit`. This is
+ %% checked by `is_running(Node)` on a remote node. The process
+ %% is also monitord by rabbit_node_monitor.
+ %%
+ %% The process name must be registered *before* running the boot
+ %% steps: that's when rabbit_node_monitor will set the process
+ %% monitor up.
+ %%
+ %% Note that plugins were not taken care of at this point
+ %% either.
+ rabbit_log_prelaunch:debug(
+ "Register `rabbit` process (~p) for rabbit_node_monitor",
+ [self()]),
+ true = register(rabbit, self()),
+
+ print_banner(),
+ log_banner(),
+ warn_if_kernel_config_dubious(),
+ warn_if_disc_io_options_dubious(),
+ %% We run `rabbit` boot steps only for now. Plugins boot steps
+ %% will be executed as part of the postlaunch phase after they
+ %% are started.
+ rabbit_boot_steps:run_boot_steps([rabbit]),
+ run_postlaunch_phase(),
+ {ok, SupPid}
+ catch
+ throw:{error, _} = Error ->
+ mnesia:stop(),
+ rabbit_prelaunch_errors:log_error(Error),
+ rabbit_prelaunch:set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error;
+ Class:Exception:Stacktrace ->
+ mnesia:stop(),
+ rabbit_prelaunch_errors:log_exception(
+ Class, Exception, Stacktrace),
+ Error = {error, Exception},
+ rabbit_prelaunch:set_stop_reason(Error),
+ rabbit_boot_state:set(stopped),
+ Error
+ end.
+
+run_postlaunch_phase() ->
+ spawn(fun() -> do_run_postlaunch_phase() end).
+
+do_run_postlaunch_phase() ->
+ %% Once RabbitMQ itself is started, we need to run a few more steps,
+ %% in particular start plugins.
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Postlaunch phase =="),
+
+ try
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Plugins =="),
+
+ rabbit_log_prelaunch:debug("Setting plugins up"),
+ %% `Plugins` contains all the enabled plugins, plus their
+ %% dependencies. The order is important: dependencies appear
+ %% before plugin which depend on them.
+ Plugins = rabbit_plugins:setup(),
+ rabbit_log_prelaunch:debug(
+ "Starting the following plugins: ~p", [Plugins]),
+ %% We can load all plugins and refresh their feature flags at
+ %% once, because it does not involve running code from the
+ %% plugins.
+ app_utils:load_applications(Plugins),
+ ok = rabbit_feature_flags:refresh_feature_flags_after_app_load(
+ Plugins),
+ %% However, we want to run their boot steps and actually start
+ %% them one by one, to ensure a dependency is fully started
+ %% before a plugin which depends on it gets a chance to start.
+ lists:foreach(
+ fun(Plugin) ->
+ ok = rabbit_boot_steps:run_boot_steps([Plugin]),
+ case application:ensure_all_started(Plugin) of
+ {ok, _} -> ok;
+ Error -> throw(Error)
+ end
+ end, Plugins),
+
+ %% Successful boot resets node maintenance state.
+ rabbit_log_prelaunch:info("Resetting node maintenance status"),
+ _ = rabbit_maintenance:unmark_as_being_drained(),
+
+ %% Export definitions after all plugins have been enabled,
+ %% see rabbitmq/rabbitmq-server#2384
+ case rabbit_definitions:maybe_load_definitions() of
+ ok -> ok;
+ DefLoadError -> throw(DefLoadError)
+ end,
+
+ %% Start listeners after all plugins have been enabled,
+ %% see rabbitmq/rabbitmq-server#2405.
+ rabbit_log_prelaunch:info(
+ "Ready to start client connection listeners"),
+ ok = rabbit_networking:boot(),
+
+ %% The node is ready: mark it as such and log it.
+ %% NOTE: PLEASE DO NOT ADD CRITICAL NODE STARTUP CODE AFTER THIS.
+ ok = rabbit_lager:broker_is_started(),
+ ok = log_broker_started(
+ rabbit_plugins:strictly_plugins(rabbit_plugins:active())),
+
+ rabbit_log_prelaunch:debug("Marking ~s as running", [product_name()]),
+ rabbit_boot_state:set(ready)
+ catch
+ throw:{error, _} = Error ->
+ rabbit_prelaunch_errors:log_error(Error),
+ rabbit_prelaunch:set_stop_reason(Error),
+ do_stop();
+ Class:Exception:Stacktrace ->
+ rabbit_prelaunch_errors:log_exception(
+ Class, Exception, Stacktrace),
+ Error = {error, Exception},
+ rabbit_prelaunch:set_stop_reason(Error),
+ do_stop()
+ end.
+
+prep_stop(State) ->
+ rabbit_boot_state:set(stopping),
+ rabbit_peer_discovery:maybe_unregister(),
+ State.
+
+-spec stop(_) -> 'ok'.
+
+stop(State) ->
+ ok = rabbit_alarm:stop(),
+ ok = case rabbit_mnesia:is_clustered() of
+ true -> ok;
+ false -> rabbit_table:clear_ram_only_tables()
+ end,
+ case State of
+ [] -> rabbit_prelaunch:set_stop_reason(normal);
+ _ -> rabbit_prelaunch:set_stop_reason(State)
+ end,
+ rabbit_boot_state:set(stopped),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% boot step functions
+
+-spec boot_delegate() -> 'ok'.
+
+boot_delegate() ->
+ {ok, Count} = application:get_env(rabbit, delegate_count),
+ rabbit_sup:start_supervisor_child(delegate_sup, [Count]).
+
+-spec recover() -> 'ok'.
+
+recover() ->
+ ok = rabbit_policy:recover(),
+ ok = rabbit_vhost:recover(),
+ ok = lager_exchange_backend:maybe_init_exchange().
+
+-spec maybe_insert_default_data() -> 'ok'.
+
+maybe_insert_default_data() ->
+ NoDefsToImport = not rabbit_definitions:has_configured_definitions_to_load(),
+ case rabbit_table:needs_default_data() andalso NoDefsToImport of
+ true ->
+ rabbit_log:info("Will seed default virtual host and user..."),
+ insert_default_data();
+ false ->
+ rabbit_log:info("Will not seed default virtual host and user: have definitions to load..."),
+ ok
+ end.
+
+insert_default_data() ->
+ {ok, DefaultUser} = application:get_env(default_user),
+ {ok, DefaultPass} = application:get_env(default_pass),
+ {ok, DefaultTags} = application:get_env(default_user_tags),
+ {ok, DefaultVHost} = application:get_env(default_vhost),
+ {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} =
+ application:get_env(default_permissions),
+
+ DefaultUserBin = rabbit_data_coercion:to_binary(DefaultUser),
+ DefaultPassBin = rabbit_data_coercion:to_binary(DefaultPass),
+ DefaultVHostBin = rabbit_data_coercion:to_binary(DefaultVHost),
+ DefaultConfigurePermBin = rabbit_data_coercion:to_binary(DefaultConfigurePerm),
+ DefaultWritePermBin = rabbit_data_coercion:to_binary(DefaultWritePerm),
+ DefaultReadPermBin = rabbit_data_coercion:to_binary(DefaultReadPerm),
+
+ ok = rabbit_vhost:add(DefaultVHostBin, <<"Default virtual host">>, [], ?INTERNAL_USER),
+ ok = lager_exchange_backend:maybe_init_exchange(),
+ ok = rabbit_auth_backend_internal:add_user(
+ DefaultUserBin,
+ DefaultPassBin,
+ ?INTERNAL_USER
+ ),
+ ok = rabbit_auth_backend_internal:set_tags(DefaultUserBin, DefaultTags,
+ ?INTERNAL_USER),
+ ok = rabbit_auth_backend_internal:set_permissions(DefaultUserBin,
+ DefaultVHostBin,
+ DefaultConfigurePermBin,
+ DefaultWritePermBin,
+ DefaultReadPermBin,
+ ?INTERNAL_USER),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% logging
+
+-spec log_locations() -> [rabbit_lager:log_location()].
+log_locations() ->
+ rabbit_lager:log_locations().
+
+-spec config_locations() -> [rabbit_config:config_location()].
+config_locations() ->
+ rabbit_config:config_files().
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Ref) ->
+ % direct connections, e.g. MQTT, STOMP
+ ok = rabbit_direct:force_event_refresh(Ref),
+ % AMQP connections
+ ok = rabbit_networking:force_connection_event_refresh(Ref),
+ % "external" connections, which are not handled by the "AMQP core",
+ % e.g. connections to the stream plugin
+ ok = rabbit_networking:force_non_amqp_connection_event_refresh(Ref),
+ ok = rabbit_channel:force_event_refresh(Ref),
+ ok = rabbit_amqqueue:force_event_refresh(Ref).
+
+%%---------------------------------------------------------------------------
+%% misc
+
+log_broker_started(Plugins) ->
+ PluginList = iolist_to_binary([rabbit_misc:format(" * ~s~n", [P])
+ || P <- Plugins]),
+ Message = string:strip(rabbit_misc:format(
+ "Server startup complete; ~b plugins started.~n~s",
+ [length(Plugins), PluginList]), right, $\n),
+ rabbit_log:info(Message),
+ io:format(" completed with ~p plugins.~n", [length(Plugins)]).
+
+-define(RABBIT_TEXT_LOGO,
+ "~n ## ## ~s ~s"
+ "~n ## ##"
+ "~n ########## ~s"
+ "~n ###### ##"
+ "~n ########## ~s").
+-define(FG8_START, "\033[38;5;202m").
+-define(BG8_START, "\033[48;5;202m").
+-define(FG32_START, "\033[38;2;255;102;0m").
+-define(BG32_START, "\033[48;2;255;102;0m").
+-define(C_END, "\033[0m").
+-define(RABBIT_8BITCOLOR_LOGO,
+ "~n " ?BG8_START " " ?C_END " " ?BG8_START " " ?C_END " \033[1m" ?FG8_START "~s" ?C_END " ~s"
+ "~n " ?BG8_START " " ?C_END " " ?BG8_START " " ?C_END
+ "~n " ?BG8_START " " ?C_END " ~s"
+ "~n " ?BG8_START " " ?C_END " " ?BG8_START " " ?C_END
+ "~n " ?BG8_START " " ?C_END " ~s").
+-define(RABBIT_32BITCOLOR_LOGO,
+ "~n " ?BG32_START " " ?C_END " " ?BG32_START " " ?C_END " \033[1m" ?FG32_START "~s" ?C_END " ~s"
+ "~n " ?BG32_START " " ?C_END " " ?BG32_START " " ?C_END
+ "~n " ?BG32_START " " ?C_END " ~s"
+ "~n " ?BG32_START " " ?C_END " " ?BG32_START " " ?C_END
+ "~n " ?BG32_START " " ?C_END " ~s").
+
+print_banner() ->
+ Product = product_name(),
+ Version = product_version(),
+ LineListFormatter = fun (Placeholder, [_ | Tail] = LL) ->
+ LF = lists:flatten([Placeholder || _ <- lists:seq(1, length(Tail))]),
+ {LF, LL};
+ (_, []) ->
+ {"", ["(none)"]}
+ end,
+ Logo = case rabbit_prelaunch:get_context() of
+ %% We use the colored logo only when running the
+ %% interactive shell and when colors are supported.
+ %%
+ %% Basically it means it will be used on Unix when
+ %% running "make run-broker" and that's about it.
+ #{os_type := {unix, darwin},
+ interactive_shell := true,
+ output_supports_colors := true} -> ?RABBIT_8BITCOLOR_LOGO;
+ #{interactive_shell := true,
+ output_supports_colors := true} -> ?RABBIT_32BITCOLOR_LOGO;
+ _ -> ?RABBIT_TEXT_LOGO
+ end,
+ %% padded list lines
+ {LogFmt, LogLocations} = LineListFormatter("~n ~ts", log_locations()),
+ {CfgFmt, CfgLocations} = LineListFormatter("~n ~ts", config_locations()),
+ {MOTDFormat, MOTDArgs} = case motd() of
+ undefined ->
+ {"", []};
+ MOTD ->
+ Lines = string:split(MOTD, "\n", all),
+ Padded = [case Line of
+ <<>> -> "\n";
+ _ -> [" ", Line, "\n"]
+ end
+ || Line <- Lines],
+ {"~n~ts", [Padded]}
+ end,
+ io:format(Logo ++
+ "~n" ++
+ MOTDFormat ++
+ "~n Doc guides: https://rabbitmq.com/documentation.html"
+ "~n Support: https://rabbitmq.com/contact.html"
+ "~n Tutorials: https://rabbitmq.com/getstarted.html"
+ "~n Monitoring: https://rabbitmq.com/monitoring.html"
+ "~n"
+ "~n Logs: ~ts" ++ LogFmt ++ "~n"
+ "~n Config file(s): ~ts" ++ CfgFmt ++ "~n"
+ "~n Starting broker...",
+ [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE] ++
+ MOTDArgs ++
+ LogLocations ++
+ CfgLocations).
+
+log_motd() ->
+ case motd() of
+ undefined ->
+ ok;
+ MOTD ->
+ Lines = string:split(MOTD, "\n", all),
+ Padded = [case Line of
+ <<>> -> "\n";
+ _ -> [" ", Line, "\n"]
+ end
+ || Line <- Lines],
+ rabbit_log:info("~n~ts", [string:trim(Padded, trailing, [$\r, $\n])])
+ end.
+
+log_banner() ->
+ {FirstLog, OtherLogs} = case log_locations() of
+ [Head | Tail] ->
+ {Head, [{"", F} || F <- Tail]};
+ [] ->
+ {"(none)", []}
+ end,
+ Settings = [{"node", node()},
+ {"home dir", home_dir()},
+ {"config file(s)", config_files()},
+ {"cookie hash", rabbit_nodes:cookie_hash()},
+ {"log(s)", FirstLog}] ++
+ OtherLogs ++
+ [{"database dir", rabbit_mnesia:dir()}],
+ DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]),
+ Format = fun (K, V) ->
+ rabbit_misc:format(
+ " ~-" ++ integer_to_list(DescrLen) ++ "s: ~ts~n", [K, V])
+ end,
+ Banner = string:strip(lists:flatten(
+ [case S of
+ {"config file(s)" = K, []} ->
+ Format(K, "(none)");
+ {"config file(s)" = K, [V0 | Vs]} ->
+ [Format(K, V0) | [Format("", V) || V <- Vs]];
+ {K, V} ->
+ Format(K, V)
+ end || S <- Settings]), right, $\n),
+ rabbit_log:info("~n~ts", [Banner]).
+
+warn_if_kernel_config_dubious() ->
+ case os:type() of
+ {win32, _} ->
+ ok;
+ _ ->
+ case erlang:system_info(kernel_poll) of
+ true -> ok;
+ false -> rabbit_log:warning(
+ "Kernel poll (epoll, kqueue, etc) is disabled. Throughput "
+ "and CPU utilization may worsen.~n")
+ end
+ end,
+ AsyncThreads = erlang:system_info(thread_pool_size),
+ case AsyncThreads < ?ASYNC_THREADS_WARNING_THRESHOLD of
+ true -> rabbit_log:warning(
+ "Erlang VM is running with ~b I/O threads, "
+ "file I/O performance may worsen~n", [AsyncThreads]);
+ false -> ok
+ end,
+ IDCOpts = case application:get_env(kernel, inet_default_connect_options) of
+ undefined -> [];
+ {ok, Val} -> Val
+ end,
+ case proplists:get_value(nodelay, IDCOpts, false) of
+ false -> rabbit_log:warning("Nagle's algorithm is enabled for sockets, "
+ "network I/O latency will be higher~n");
+ true -> ok
+ end.
+
+warn_if_disc_io_options_dubious() ->
+ %% if these values are not set, it doesn't matter since
+ %% rabbit_variable_queue will pick up the values defined in the
+ %% IO_BATCH_SIZE and CREDIT_DISC_BOUND constants.
+ CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
+ undefined),
+ IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
+ undefined),
+ case catch validate_msg_store_io_batch_size_and_credit_disc_bound(
+ CreditDiscBound, IoBatchSize) of
+ ok -> ok;
+ {error, {Reason, Vars}} ->
+ rabbit_log:warning(Reason, Vars)
+ end.
+
+validate_msg_store_io_batch_size_and_credit_disc_bound(CreditDiscBound,
+ IoBatchSize) ->
+ case IoBatchSize of
+ undefined ->
+ ok;
+ IoBatchSize when is_integer(IoBatchSize) ->
+ if IoBatchSize < ?IO_BATCH_SIZE ->
+ throw({error,
+ {"io_batch_size of ~b lower than recommended value ~b, "
+ "paging performance may worsen~n",
+ [IoBatchSize, ?IO_BATCH_SIZE]}});
+ true ->
+ ok
+ end;
+ IoBatchSize ->
+ throw({error,
+ {"io_batch_size should be an integer, but ~b given",
+ [IoBatchSize]}})
+ end,
+
+ %% CreditDiscBound = {InitialCredit, MoreCreditAfter}
+ {RIC, RMCA} = ?CREDIT_DISC_BOUND,
+ case CreditDiscBound of
+ undefined ->
+ ok;
+ {IC, MCA} when is_integer(IC), is_integer(MCA) ->
+ if IC < RIC; MCA < RMCA ->
+ throw({error,
+ {"msg_store_credit_disc_bound {~b, ~b} lower than"
+ "recommended value {~b, ~b},"
+ " paging performance may worsen~n",
+ [IC, MCA, RIC, RMCA]}});
+ true ->
+ ok
+ end;
+ {IC, MCA} ->
+ throw({error,
+ {"both msg_store_credit_disc_bound values should be integers, but ~p given",
+ [{IC, MCA}]}});
+ CreditDiscBound ->
+ throw({error,
+ {"invalid msg_store_credit_disc_bound value given: ~p",
+ [CreditDiscBound]}})
+ end,
+
+ case {CreditDiscBound, IoBatchSize} of
+ {undefined, undefined} ->
+ ok;
+ {_CDB, undefined} ->
+ ok;
+ {undefined, _IBS} ->
+ ok;
+ {{InitialCredit, _MCA}, IoBatchSize} ->
+ if IoBatchSize < InitialCredit ->
+ throw(
+ {error,
+ {"msg_store_io_batch_size ~b should be bigger than the initial "
+ "credit value from msg_store_credit_disc_bound ~b,"
+ " paging performance may worsen~n",
+ [IoBatchSize, InitialCredit]}});
+ true ->
+ ok
+ end
+ end.
+
+-spec product_name() -> string().
+
+product_name() ->
+ case product_info() of
+ #{product_name := ProductName} -> ProductName;
+ #{product_base_name := BaseName} -> BaseName
+ end.
+
+-spec product_version() -> string().
+
+product_version() ->
+ case product_info() of
+ #{product_version := ProductVersion} -> ProductVersion;
+ #{product_base_version := BaseVersion} -> BaseVersion
+ end.
+
+-spec product_info() -> #{product_base_name := string(),
+ product_base_version := string(),
+ product_overridden := boolean(),
+ product_name => string(),
+ product_version => string(),
+ otp_release := string()}.
+
+product_info() ->
+ PTKey = {?MODULE, product},
+ try
+ %% The value is cached the first time to avoid calling the
+ %% application master many times just for that.
+ persistent_term:get(PTKey)
+ catch
+ error:badarg ->
+ BaseName = base_product_name(),
+ BaseVersion = base_product_version(),
+ Info0 = #{product_base_name => BaseName,
+ product_base_version => BaseVersion,
+ otp_release => rabbit_misc:otp_release()},
+
+ {NameFromEnv, VersionFromEnv} =
+ case rabbit_prelaunch:get_context() of
+ #{product_name := NFE,
+ product_version := VFE} -> {NFE, VFE};
+ _ -> {undefined, undefined}
+ end,
+
+ Info1 = case NameFromEnv of
+ undefined ->
+ NameFromApp = string_from_app_env(
+ product_name,
+ undefined),
+ case NameFromApp of
+ undefined ->
+ Info0;
+ _ ->
+ Info0#{product_name => NameFromApp,
+ product_overridden => true}
+ end;
+ _ ->
+ Info0#{product_name => NameFromEnv,
+ product_overridden => true}
+ end,
+
+ Info2 = case VersionFromEnv of
+ undefined ->
+ VersionFromApp = string_from_app_env(
+ product_version,
+ undefined),
+ case VersionFromApp of
+ undefined ->
+ Info1;
+ _ ->
+ Info1#{product_version => VersionFromApp,
+ product_overridden => true}
+ end;
+ _ ->
+ Info1#{product_version => VersionFromEnv,
+ product_overridden => true}
+ end,
+ persistent_term:put(PTKey, Info2),
+ Info2
+ end.
+
+string_from_app_env(Key, Default) ->
+ case application:get_env(rabbit, Key) of
+ {ok, Val} ->
+ case io_lib:deep_char_list(Val) of
+ true ->
+ case lists:flatten(Val) of
+ "" -> Default;
+ String -> String
+ end;
+ false ->
+ Default
+ end;
+ undefined ->
+ Default
+ end.
+
+base_product_name() ->
+ %% This function assumes the `rabbit` application was loaded in
+ %% product_info().
+ {ok, Product} = application:get_key(rabbit, description),
+ Product.
+
+base_product_version() ->
+ %% This function assumes the `rabbit` application was loaded in
+ %% product_info().
+ rabbit_misc:version().
+
+motd_file() ->
+ %% Precendence is:
+ %% 1. The environment variable;
+ %% 2. The `motd_file` configuration parameter;
+ %% 3. The default value.
+ Context = rabbit_prelaunch:get_context(),
+ case Context of
+ #{motd_file := File,
+ var_origins := #{motd_file := environment}}
+ when File =/= undefined ->
+ File;
+ _ ->
+ Default = case Context of
+ #{motd_file := File} -> File;
+ _ -> undefined
+ end,
+ string_from_app_env(motd_file, Default)
+ end.
+
+motd() ->
+ case motd_file() of
+ undefined ->
+ undefined;
+ File ->
+ case file:read_file(File) of
+ {ok, MOTD} -> string:trim(MOTD, trailing, [$\r,$\n]);
+ {error, _} -> undefined
+ end
+ end.
+
+home_dir() ->
+ case init:get_argument(home) of
+ {ok, [[Home]]} -> Home;
+ Other -> Other
+ end.
+
+config_files() ->
+ rabbit_config:config_files().
+
+%% We don't want this in fhc since it references rabbit stuff. And we can't put
+%% this in the bootstep directly.
+start_fhc() ->
+ ok = rabbit_sup:start_restartable_child(
+ file_handle_cache,
+ [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]),
+ ensure_working_fhc().
+
+ensure_working_fhc() ->
+ %% To test the file handle cache, we simply read a file we know it
+ %% exists (Erlang kernel's .app file).
+ %%
+ %% To avoid any pollution of the application process' dictionary by
+ %% file_handle_cache, we spawn a separate process.
+ Parent = self(),
+ TestFun = fun() ->
+ ReadBuf = case application:get_env(rabbit, fhc_read_buffering) of
+ {ok, true} -> "ON";
+ {ok, false} -> "OFF"
+ end,
+ WriteBuf = case application:get_env(rabbit, fhc_write_buffering) of
+ {ok, true} -> "ON";
+ {ok, false} -> "OFF"
+ end,
+ rabbit_log:info("FHC read buffering: ~s~n", [ReadBuf]),
+ rabbit_log:info("FHC write buffering: ~s~n", [WriteBuf]),
+ Filename = filename:join(code:lib_dir(kernel, ebin), "kernel.app"),
+ {ok, Fd} = file_handle_cache:open(Filename, [raw, binary, read], []),
+ {ok, _} = file_handle_cache:read(Fd, 1),
+ ok = file_handle_cache:close(Fd),
+ Parent ! fhc_ok
+ end,
+ TestPid = spawn_link(TestFun),
+ %% Because we are waiting for the test fun, abuse the
+ %% 'mnesia_table_loading_retry_timeout' parameter to find a sane timeout
+ %% value.
+ Timeout = rabbit_table:retry_timeout(),
+ receive
+ fhc_ok -> ok;
+ {'EXIT', TestPid, Exception} -> throw({ensure_working_fhc, Exception})
+ after Timeout ->
+ throw({ensure_working_fhc, {timeout, TestPid}})
+ end.
diff --git a/deps/rabbit/src/rabbit_access_control.erl b/deps/rabbit/src/rabbit_access_control.erl
new file mode 100644
index 0000000000..72260d5723
--- /dev/null
+++ b/deps/rabbit/src/rabbit_access_control.erl
@@ -0,0 +1,257 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_access_control).
+
+-include("rabbit.hrl").
+
+-export([check_user_pass_login/2, check_user_login/2, check_user_loopback/2,
+ check_vhost_access/4, check_resource_access/4, check_topic_access/4]).
+
+-export([permission_cache_can_expire/1, update_state/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([permission_atom/0]).
+
+-type permission_atom() :: 'configure' | 'read' | 'write'.
+
+%%----------------------------------------------------------------------------
+
+-spec check_user_pass_login
+ (rabbit_types:username(), rabbit_types:password()) ->
+ {'ok', rabbit_types:user()} |
+ {'refused', rabbit_types:username(), string(), [any()]}.
+
+check_user_pass_login(Username, Password) ->
+ check_user_login(Username, [{password, Password}]).
+
+-spec check_user_login
+ (rabbit_types:username(), [{atom(), any()}]) ->
+ {'ok', rabbit_types:user()} |
+ {'refused', rabbit_types:username(), string(), [any()]}.
+
+check_user_login(Username, AuthProps) ->
+ %% extra auth properties like MQTT client id are in AuthProps
+ {ok, Modules} = application:get_env(rabbit, auth_backends),
+ R = lists:foldl(
+ fun (rabbit_auth_backend_cache=ModN, {refused, _, _, _}) ->
+ %% It is possible to specify authn/authz within the cache module settings,
+ %% so we have to do both auth steps here
+ %% See this rabbitmq-users discussion:
+ %% https://groups.google.com/d/topic/rabbitmq-users/ObqM7MQdA3I/discussion
+ try_authenticate_and_try_authorize(ModN, ModN, Username, AuthProps);
+ ({ModN, ModZs}, {refused, _, _, _}) ->
+ %% Different modules for authN vs authZ. So authenticate
+ %% with authN module, then if that succeeds do
+ %% passwordless (i.e pre-authenticated) login with authZ.
+ try_authenticate_and_try_authorize(ModN, ModZs, Username, AuthProps);
+ (Mod, {refused, _, _, _}) ->
+ %% Same module for authN and authZ. Just take the result
+ %% it gives us
+ case try_authenticate(Mod, Username, AuthProps) of
+ {ok, ModNUser = #auth_user{username = Username2, impl = Impl}} ->
+ rabbit_log:debug("User '~s' authenticated successfully by backend ~s", [Username2, Mod]),
+ user(ModNUser, {ok, [{Mod, Impl}], []});
+ Else ->
+ rabbit_log:debug("User '~s' failed authenticatation by backend ~s", [Username, Mod]),
+ Else
+ end;
+ (_, {ok, User}) ->
+ %% We've successfully authenticated. Skip to the end...
+ {ok, User}
+ end,
+ {refused, Username, "No modules checked '~s'", [Username]}, Modules),
+ R.
+
+try_authenticate_and_try_authorize(ModN, ModZs0, Username, AuthProps) ->
+ ModZs = case ModZs0 of
+ A when is_atom(A) -> [A];
+ L when is_list(L) -> L
+ end,
+ case try_authenticate(ModN, Username, AuthProps) of
+ {ok, ModNUser = #auth_user{username = Username2}} ->
+ rabbit_log:debug("User '~s' authenticated successfully by backend ~s", [Username2, ModN]),
+ user(ModNUser, try_authorize(ModZs, Username2, AuthProps));
+ Else ->
+ Else
+ end.
+
+try_authenticate(Module, Username, AuthProps) ->
+ case Module:user_login_authentication(Username, AuthProps) of
+ {ok, AuthUser} -> {ok, AuthUser};
+ {error, E} -> {refused, Username,
+ "~s failed authenticating ~s: ~p~n",
+ [Module, Username, E]};
+ {refused, F, A} -> {refused, Username, F, A}
+ end.
+
+try_authorize(Modules, Username, AuthProps) ->
+ lists:foldr(
+ fun (Module, {ok, ModsImpls, ModsTags}) ->
+ case Module:user_login_authorization(Username, AuthProps) of
+ {ok, Impl, Tags}-> {ok, [{Module, Impl} | ModsImpls], ModsTags ++ Tags};
+ {ok, Impl} -> {ok, [{Module, Impl} | ModsImpls], ModsTags};
+ {error, E} -> {refused, Username,
+ "~s failed authorizing ~s: ~p~n",
+ [Module, Username, E]};
+ {refused, F, A} -> {refused, Username, F, A}
+ end;
+ (_, {refused, F, A}) ->
+ {refused, Username, F, A}
+ end, {ok, [], []}, Modules).
+
+user(#auth_user{username = Username, tags = Tags}, {ok, ModZImpls, ModZTags}) ->
+ {ok, #user{username = Username,
+ tags = Tags ++ ModZTags,
+ authz_backends = ModZImpls}};
+user(_AuthUser, Error) ->
+ Error.
+
+auth_user(#user{username = Username, tags = Tags}, Impl) ->
+ #auth_user{username = Username,
+ tags = Tags,
+ impl = Impl}.
+
+-spec check_user_loopback
+ (rabbit_types:username(), rabbit_net:socket() | inet:ip_address()) ->
+ 'ok' | 'not_allowed'.
+
+check_user_loopback(Username, SockOrAddr) ->
+ {ok, Users} = application:get_env(rabbit, loopback_users),
+ case rabbit_net:is_loopback(SockOrAddr)
+ orelse not lists:member(Username, Users) of
+ true -> ok;
+ false -> not_allowed
+ end.
+
+get_authz_data_from({ip, Address}) ->
+ #{peeraddr => Address};
+get_authz_data_from({socket, Sock}) ->
+ {ok, {Address, _Port}} = rabbit_net:peername(Sock),
+ #{peeraddr => Address};
+get_authz_data_from(undefined) ->
+ undefined.
+
+% Note: ip can be either a tuple or, a binary if reverse_dns_lookups
+% is enabled and it's a direct connection.
+-spec check_vhost_access(User :: rabbit_types:user(),
+ VHostPath :: rabbit_types:vhost(),
+ AuthzRawData :: {socket, rabbit_net:socket()} | {ip, inet:ip_address() | binary()} | undefined,
+ AuthzContext :: map()) ->
+ 'ok' | rabbit_types:channel_exit().
+check_vhost_access(User = #user{username = Username,
+ authz_backends = Modules}, VHostPath, AuthzRawData, AuthzContext) ->
+ AuthzData = get_authz_data_from(AuthzRawData),
+ FullAuthzContext = create_vhost_access_authz_data(AuthzData, AuthzContext),
+ lists:foldl(
+ fun({Mod, Impl}, ok) ->
+ check_access(
+ fun() ->
+ rabbit_vhost:exists(VHostPath) andalso
+ Mod:check_vhost_access(
+ auth_user(User, Impl), VHostPath, FullAuthzContext)
+ end,
+ Mod, "access to vhost '~s' refused for user '~s'",
+ [VHostPath, Username], not_allowed);
+ (_, Else) ->
+ Else
+ end, ok, Modules).
+
+create_vhost_access_authz_data(undefined, Context) when map_size(Context) == 0 ->
+ undefined;
+create_vhost_access_authz_data(undefined, Context) ->
+ Context;
+create_vhost_access_authz_data(PeerAddr, Context) when map_size(Context) == 0 ->
+ PeerAddr;
+create_vhost_access_authz_data(PeerAddr, Context) ->
+ maps:merge(PeerAddr, Context).
+
+-spec check_resource_access
+ (rabbit_types:user(), rabbit_types:r(atom()), permission_atom(), rabbit_types:authz_context()) ->
+ 'ok' | rabbit_types:channel_exit().
+
+check_resource_access(User, R = #resource{kind = exchange, name = <<"">>},
+ Permission, Context) ->
+ check_resource_access(User, R#resource{name = <<"amq.default">>},
+ Permission, Context);
+check_resource_access(User = #user{username = Username,
+ authz_backends = Modules},
+ Resource, Permission, Context) ->
+ lists:foldl(
+ fun({Module, Impl}, ok) ->
+ check_access(
+ fun() -> Module:check_resource_access(
+ auth_user(User, Impl), Resource, Permission, Context) end,
+ Module, "access to ~s refused for user '~s'",
+ [rabbit_misc:rs(Resource), Username]);
+ (_, Else) -> Else
+ end, ok, Modules).
+
+check_topic_access(User = #user{username = Username,
+ authz_backends = Modules},
+ Resource, Permission, Context) ->
+ lists:foldl(
+ fun({Module, Impl}, ok) ->
+ check_access(
+ fun() -> Module:check_topic_access(
+ auth_user(User, Impl), Resource, Permission, Context) end,
+ Module, "access to topic '~s' in exchange ~s refused for user '~s'",
+ [maps:get(routing_key, Context), rabbit_misc:rs(Resource), Username]);
+ (_, Else) -> Else
+ end, ok, Modules).
+
+check_access(Fun, Module, ErrStr, ErrArgs) ->
+ check_access(Fun, Module, ErrStr, ErrArgs, access_refused).
+
+check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
+ case Fun() of
+ true ->
+ ok;
+ false ->
+ rabbit_misc:protocol_error(ErrName, ErrStr, ErrArgs);
+ {error, E} ->
+ FullErrStr = ErrStr ++ ", backend ~s returned an error: ~p~n",
+ FullErrArgs = ErrArgs ++ [Module, E],
+ rabbit_log:error(FullErrStr, FullErrArgs),
+ rabbit_misc:protocol_error(ErrName, FullErrStr, FullErrArgs)
+ end.
+
+-spec update_state(User :: rabbit_types:user(), NewState :: term()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string()} |
+ {'error', any()}.
+
+update_state(User = #user{authz_backends = Backends0}, NewState) ->
+ %% N.B.: we use foldl/3 and prepending, so the final list of
+ %% backends is in reverse order from the original list.
+ Backends = lists:foldl(
+ fun({Module, Impl}, {ok, Acc}) ->
+ case Module:state_can_expire() of
+ true ->
+ case Module:update_state(auth_user(User, Impl), NewState) of
+ {ok, #auth_user{impl = Impl1}} ->
+ {ok, [{Module, Impl1} | Acc]};
+ Else -> Else
+ end;
+ false ->
+ {ok, [{Module, Impl} | Acc]}
+ end;
+ (_, {error, _} = Err) -> Err;
+ (_, {refused, _, _} = Err) -> Err
+ end, {ok, []}, Backends0),
+ case Backends of
+ {ok, Pairs} -> {ok, User#user{authz_backends = lists:reverse(Pairs)}};
+ Else -> Else
+ end.
+
+-spec permission_cache_can_expire(User :: rabbit_types:user()) -> boolean().
+
+%% Returns true if any of the backends support credential expiration,
+%% otherwise returns false.
+permission_cache_can_expire(#user{authz_backends = Backends}) ->
+ lists:any(fun ({Module, _State}) -> Module:state_can_expire() end, Backends).
diff --git a/deps/rabbit/src/rabbit_alarm.erl b/deps/rabbit/src/rabbit_alarm.erl
new file mode 100644
index 0000000000..3f1ab7ae62
--- /dev/null
+++ b/deps/rabbit/src/rabbit_alarm.erl
@@ -0,0 +1,365 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% There are two types of alarms handled by this module:
+%%
+%% * per-node resource (disk, memory) alarms for the whole cluster. If any node
+%% has an alarm, then all publishing should be disabled across the
+%% cluster until all alarms clear. When a node sets such an alarm,
+%% this information is automatically propagated throughout the cluster.
+%% `#alarms.alarmed_nodes' is being used to track this type of alarms.
+%% * limits local to this node (file_descriptor_limit). Used for information
+%% purposes only: logging and getting node status. This information is not propagated
+%% throughout the cluster. `#alarms.alarms' is being used to track this type of alarms.
+%% @end
+
+-module(rabbit_alarm).
+
+-behaviour(gen_event).
+
+-export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
+ clear_alarm/1, get_alarms/0, get_alarms/1, get_local_alarms/0, get_local_alarms/1, on_node_up/1, on_node_down/1,
+ format_as_map/1, format_as_maps/1, is_local/1]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([remote_conserve_resources/3]). %% Internal use only
+
+-define(SERVER, ?MODULE).
+
+-define(FILE_DESCRIPTOR_RESOURCE, <<"file descriptors">>).
+-define(MEMORY_RESOURCE, <<"memory">>).
+-define(DISK_SPACE_RESOURCE, <<"disk space">>).
+
+%%----------------------------------------------------------------------------
+
+-record(alarms, {alertees :: dict:dict(pid(), rabbit_types:mfargs()),
+ alarmed_nodes :: dict:dict(node(), [resource_alarm_source()]),
+ alarms :: [alarm()]}).
+
+-type local_alarm() :: 'file_descriptor_limit'.
+-type resource_alarm_source() :: 'disk' | 'memory'.
+-type resource_alarm() :: {resource_limit, resource_alarm_source(), node()}.
+-type alarm() :: local_alarm() | resource_alarm().
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_event:start_link({local, ?SERVER}).
+
+-spec start() -> 'ok'.
+
+start() ->
+ ok = rabbit_sup:start_restartable_child(?MODULE),
+ ok = gen_event:add_handler(?SERVER, ?MODULE, []),
+ {ok, MemoryWatermark} = application:get_env(vm_memory_high_watermark),
+
+ rabbit_sup:start_restartable_child(
+ vm_memory_monitor, [MemoryWatermark,
+ fun (Alarm) ->
+ background_gc:run(),
+ set_alarm(Alarm)
+ end,
+ fun clear_alarm/1]),
+ {ok, DiskLimit} = application:get_env(disk_free_limit),
+ rabbit_sup:start_delayed_restartable_child(
+ rabbit_disk_monitor, [DiskLimit]),
+ ok.
+
+-spec stop() -> 'ok'.
+
+stop() -> ok.
+
+%% Registers a handler that should be called on every resource alarm change.
+%% Given a call rabbit_alarm:register(Pid, {M, F, A}), the handler would be
+%% called like this: `apply(M, F, A ++ [Pid, Source, Alert])', where `Source'
+%% has the type of resource_alarm_source() and `Alert' has the type of resource_alert().
+
+-spec register(pid(), rabbit_types:mfargs()) -> [atom()].
+
+register(Pid, AlertMFA) ->
+ gen_event:call(?SERVER, ?MODULE, {register, Pid, AlertMFA}, infinity).
+
+-spec set_alarm({alarm(), []}) -> 'ok'.
+
+set_alarm(Alarm) -> gen_event:notify(?SERVER, {set_alarm, Alarm}).
+
+-spec clear_alarm(alarm()) -> 'ok'.
+
+clear_alarm(Alarm) -> gen_event:notify(?SERVER, {clear_alarm, Alarm}).
+
+-spec get_alarms() -> [{alarm(), []}].
+get_alarms() -> gen_event:call(?SERVER, ?MODULE, get_alarms, infinity).
+
+-spec get_alarms(timeout()) -> [{alarm(), []}].
+get_alarms(Timeout) -> gen_event:call(?SERVER, ?MODULE, get_alarms, Timeout).
+
+-spec get_local_alarms() -> [alarm()].
+get_local_alarms() -> gen_event:call(?SERVER, ?MODULE, get_local_alarms, infinity).
+
+-spec get_local_alarms(timeout()) -> [alarm()].
+get_local_alarms(Timeout) -> gen_event:call(?SERVER, ?MODULE, get_local_alarms, Timeout).
+
+-spec filter_local_alarms([alarm()]) -> [alarm()].
+filter_local_alarms(Alarms) ->
+ lists:filter(fun is_local/1, Alarms).
+
+-spec is_local({alarm(), any()}) -> boolean().
+is_local({file_descriptor_limit, _}) -> true;
+is_local({{resource_limit, _Resource, Node}, _}) when Node =:= node() -> true;
+is_local({{resource_limit, _Resource, Node}, _}) when Node =/= node() -> false.
+
+-spec format_as_map(alarm()) -> #{binary() => term()}.
+format_as_map(file_descriptor_limit) ->
+ #{
+ <<"resource">> => ?FILE_DESCRIPTOR_RESOURCE,
+ <<"node">> => node()
+ };
+format_as_map({resource_limit, disk, Node}) ->
+ #{
+ <<"resource">> => ?DISK_SPACE_RESOURCE,
+ <<"node">> => Node
+ };
+format_as_map({resource_limit, memory, Node}) ->
+ #{
+ <<"resource">> => ?MEMORY_RESOURCE,
+ <<"node">> => Node
+ };
+format_as_map({resource_limit, Limit, Node}) ->
+ #{
+ <<"resource">> => rabbit_data_coercion:to_binary(Limit),
+ <<"node">> => Node
+ }.
+
+-spec format_as_maps([{alarm(), []}]) -> [#{any() => term()}].
+format_as_maps(Alarms) when is_list(Alarms) ->
+ %% get_alarms/0 returns
+ %%
+ %% [
+ %% {file_descriptor_limit, []},
+ %% {{resource_limit, disk, rabbit@warp10}, []},
+ %% {{resource_limit, memory, rabbit@warp10}, []}
+ %% ]
+ lists:map(fun({Resource, _}) -> format_as_map(Resource);
+ (Resource) -> format_as_map(Resource)
+ end, Alarms).
+
+
+-spec on_node_up(node()) -> 'ok'.
+on_node_up(Node) -> gen_event:notify(?SERVER, {node_up, Node}).
+
+-spec on_node_down(node()) -> 'ok'.
+on_node_down(Node) -> gen_event:notify(?SERVER, {node_down, Node}).
+
+remote_conserve_resources(Pid, Source, {true, _, _}) ->
+ gen_event:notify({?SERVER, node(Pid)},
+ {set_alarm, {{resource_limit, Source, node()}, []}});
+remote_conserve_resources(Pid, Source, {false, _, _}) ->
+ gen_event:notify({?SERVER, node(Pid)},
+ {clear_alarm, {resource_limit, Source, node()}}).
+
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #alarms{alertees = dict:new(),
+ alarmed_nodes = dict:new(),
+ alarms = []}}.
+
+handle_call({register, Pid, AlertMFA}, State = #alarms{alarmed_nodes = AN}) ->
+ {ok, lists:usort(lists:append([V || {_, V} <- dict:to_list(AN)])),
+ internal_register(Pid, AlertMFA, State)};
+
+handle_call(get_alarms, State) ->
+ {ok, compute_alarms(State), State};
+
+handle_call(get_local_alarms, State) ->
+ {ok, filter_local_alarms(compute_alarms(State)), State};
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event({set_alarm, {{resource_limit, Source, Node}, []}}, State) ->
+ case is_node_alarmed(Source, Node, State) of
+ true ->
+ {ok, State};
+ false ->
+ rabbit_event:notify(alarm_set, [{source, Source},
+ {node, Node}]),
+ handle_set_resource_alarm(Source, Node, State)
+ end;
+handle_event({set_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
+ case lists:member(Alarm, Alarms) of
+ true -> {ok, State};
+ false -> UpdatedAlarms = lists:usort([Alarm|Alarms]),
+ handle_set_alarm(Alarm, State#alarms{alarms = UpdatedAlarms})
+ end;
+
+handle_event({clear_alarm, {resource_limit, Source, Node}}, State) ->
+ case is_node_alarmed(Source, Node, State) of
+ true ->
+ rabbit_event:notify(alarm_cleared, [{source, Source},
+ {node, Node}]),
+ handle_clear_resource_alarm(Source, Node, State);
+ false ->
+ {ok, State}
+ end;
+handle_event({clear_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
+ case lists:keymember(Alarm, 1, Alarms) of
+ true -> handle_clear_alarm(
+ Alarm, State#alarms{alarms = lists:keydelete(
+ Alarm, 1, Alarms)});
+ false -> {ok, State}
+
+ end;
+
+handle_event({node_up, Node}, State) ->
+ %% Must do this via notify and not call to avoid possible deadlock.
+ ok = gen_event:notify(
+ {?SERVER, Node},
+ {register, self(), {?MODULE, remote_conserve_resources, []}}),
+ {ok, State};
+
+handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
+ AlarmsForDeadNode = case dict:find(Node, AN) of
+ {ok, V} -> V;
+ error -> []
+ end,
+ {ok, lists:foldr(fun(Source, AccState) ->
+ rabbit_log:warning("~s resource limit alarm cleared for dead node ~p~n",
+ [Source, Node]),
+ maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
+ end, State, AlarmsForDeadNode)};
+
+handle_event({register, Pid, AlertMFA}, State) ->
+ {ok, internal_register(Pid, AlertMFA, State)};
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #alarms{alertees = Alertees}) ->
+ {ok, State#alarms{alertees = dict:erase(Pid, Alertees)}};
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+dict_append(Key, Val, Dict) ->
+ L = case dict:find(Key, Dict) of
+ {ok, V} -> V;
+ error -> []
+ end,
+ dict:store(Key, lists:usort([Val|L]), Dict).
+
+dict_unappend(Key, Val, Dict) ->
+ L = case dict:find(Key, Dict) of
+ {ok, V} -> V;
+ error -> []
+ end,
+
+ case lists:delete(Val, L) of
+ [] -> dict:erase(Key, Dict);
+ X -> dict:store(Key, X, Dict)
+ end.
+
+maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
+ State = #alarms{alarmed_nodes = AN,
+ alertees = Alertees}) ->
+ AN1 = UpdateFun(Node, Source, AN),
+ %% Is alarm for Source still set on any node?
+ StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
+ case StillHasAlerts of
+ true -> ok;
+ false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster~n", [Source])
+ end,
+ Alert = {WasAlertAdded, StillHasAlerts, Node},
+ case node() of
+ Node -> ok = alert_remote(Alert, Alertees, Source);
+ _ -> ok
+ end,
+ ok = alert_local(Alert, Alertees, Source),
+ State#alarms{alarmed_nodes = AN1}.
+
+alert_local(Alert, Alertees, Source) ->
+ alert(Alertees, Source, Alert, fun erlang:'=:='/2).
+
+alert_remote(Alert, Alertees, Source) ->
+ alert(Alertees, Source, Alert, fun erlang:'=/='/2).
+
+alert(Alertees, Source, Alert, NodeComparator) ->
+ Node = node(),
+ dict:fold(fun (Pid, {M, F, A}, ok) ->
+ case NodeComparator(Node, node(Pid)) of
+ true -> apply(M, F, A ++ [Pid, Source, Alert]);
+ false -> ok
+ end
+ end, ok, Alertees).
+
+internal_register(Pid, {M, F, A} = AlertMFA,
+ State = #alarms{alertees = Alertees}) ->
+ _MRef = erlang:monitor(process, Pid),
+ case dict:find(node(), State#alarms.alarmed_nodes) of
+ {ok, Sources} -> [apply(M, F, A ++ [Pid, R, {true, true, node()}]) || R <- Sources];
+ error -> ok
+ end,
+ NewAlertees = dict:store(Pid, AlertMFA, Alertees),
+ State#alarms{alertees = NewAlertees}.
+
+handle_set_resource_alarm(Source, Node, State) ->
+ rabbit_log:warning(
+ "~s resource limit alarm set on node ~p.~n~n"
+ "**********************************************************~n"
+ "*** Publishers will be blocked until this alarm clears ***~n"
+ "**********************************************************~n",
+ [Source, Node]),
+ {ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}.
+
+handle_set_alarm({file_descriptor_limit, []}, State) ->
+ rabbit_log:warning(
+ "file descriptor limit alarm set.~n~n"
+ "********************************************************************~n"
+ "*** New connections will not be accepted until this alarm clears ***~n"
+ "********************************************************************~n"),
+ {ok, State};
+handle_set_alarm(Alarm, State) ->
+ rabbit_log:warning("alarm '~p' set~n", [Alarm]),
+ {ok, State}.
+
+handle_clear_resource_alarm(Source, Node, State) ->
+ rabbit_log:warning("~s resource limit alarm cleared on node ~p~n",
+ [Source, Node]),
+ {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
+
+handle_clear_alarm(file_descriptor_limit, State) ->
+ rabbit_log:warning("file descriptor limit alarm cleared~n"),
+ {ok, State};
+handle_clear_alarm(Alarm, State) ->
+ rabbit_log:warning("alarm '~p' cleared~n", [Alarm]),
+ {ok, State}.
+
+is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->
+ case dict:find(Node, AN) of
+ {ok, Sources} ->
+ lists:member(Source, Sources);
+ error ->
+ false
+ end.
+
+compute_alarms(#alarms{alarms = Alarms,
+ alarmed_nodes = AN}) ->
+ Alarms ++ [ {{resource_limit, Source, Node}, []}
+ || {Node, Sources} <- dict:to_list(AN), Source <- Sources ].
diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl
new file mode 100644
index 0000000000..cd5f894680
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue.erl
@@ -0,0 +1,1889 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue).
+
+-export([warn_file_limit/0]).
+-export([recover/1, stop/1, start/1, declare/6, declare/7,
+ delete_immediately/1, delete_exclusive/2, delete/4, purge/1,
+ forget_all_durable/1]).
+-export([pseudo_queue/2, pseudo_queue/3, immutable/1]).
+-export([lookup/1, lookup_many/1, not_found_or_absent/1, not_found_or_absent_dirty/1,
+ with/2, with/3, with_or_die/2,
+ assert_equivalence/5,
+ check_exclusive_access/2, with_exclusive_access_or_die/3,
+ stat/1, deliver/2,
+ requeue/3, ack/3, reject/4]).
+-export([not_found/1, absent/2]).
+-export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2,
+ emit_info_all/5, list_local/1, info_local/1,
+ emit_info_local/4, emit_info_down/4]).
+-export([count/0]).
+-export([list_down/1, count/1, list_names/0, list_names/1, list_local_names/0,
+ list_local_names_down/0, list_with_possible_retry/1]).
+-export([list_by_type/1, sample_local_queues/0, sample_n_by_name/2, sample_n/2]).
+-export([force_event_refresh/1, notify_policy_changed/1]).
+-export([consumers/1, consumers_all/1, emit_consumers_all/4, consumer_info_keys/0]).
+-export([basic_get/5, basic_consume/12, basic_cancel/5, notify_decorators/1]).
+-export([notify_sent/2, notify_sent_queue_down/1, resume/2]).
+-export([notify_down_all/2, notify_down_all/3, activate_limit_all/2, credit/5]).
+-export([on_node_up/1, on_node_down/1]).
+-export([update/2, store_queue/1, update_decorators/1, policy_changed/2]).
+-export([update_mirroring/1, sync_mirrors/1, cancel_sync_mirrors/1]).
+-export([emit_unresponsive/6, emit_unresponsive_local/5, is_unresponsive/2]).
+-export([has_synchronised_mirrors_online/1]).
+-export([is_replicated/1, is_exclusive/1, is_not_exclusive/1, is_dead_exclusive/1]).
+-export([list_local_quorum_queues/0, list_local_quorum_queue_names/0,
+ list_local_mirrored_classic_queues/0, list_local_mirrored_classic_names/0,
+ list_local_leaders/0, list_local_followers/0, get_quorum_nodes/1,
+ list_local_mirrored_classic_without_synchronised_mirrors/0,
+ list_local_mirrored_classic_without_synchronised_mirrors_for_cli/0]).
+-export([ensure_rabbit_queue_record_is_initialized/1]).
+-export([format/1]).
+-export([delete_immediately_by_resource/1]).
+-export([delete_crashed/1,
+ delete_crashed/2,
+ delete_crashed_internal/2]).
+
+-export([pid_of/1, pid_of/2]).
+-export([mark_local_durable_queues_stopped/1]).
+
+-export([rebalance/3]).
+-export([collect_info_all/2]).
+
+-export([is_policy_applicable/2]).
+-export([is_server_named_allowed/1]).
+
+-export([check_max_age/1]).
+-export([get_queue_type/1]).
+
+%% internal
+-export([internal_declare/2, internal_delete/2, run_backing_queue/3,
+ set_ram_duration_target/2, set_maximum_since_use/2,
+ emit_consumers_local/3, internal_delete/3]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+-include("amqqueue.hrl").
+
+-define(INTEGER_ARG_TYPES, [byte, short, signedint, long,
+ unsignedbyte, unsignedshort, unsignedint]).
+
+-define(MORE_CONSUMER_CREDIT_AFTER, 50).
+
+-define(IS_CLASSIC(QPid), is_pid(QPid)).
+-define(IS_QUORUM(QPid), is_tuple(QPid)).
+%%----------------------------------------------------------------------------
+
+-export_type([name/0, qmsg/0, absent_reason/0]).
+
+-type name() :: rabbit_types:r('queue').
+
+-type qpids() :: [pid()].
+-type qlen() :: rabbit_types:ok(non_neg_integer()).
+-type qfun(A) :: fun ((amqqueue:amqqueue()) -> A | no_return()).
+-type qmsg() :: {name(), pid() | {atom(), pid()}, msg_id(),
+ boolean(), rabbit_types:message()}.
+-type msg_id() :: non_neg_integer().
+-type ok_or_errors() ::
+ 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}.
+-type absent_reason() :: 'nodedown' | 'crashed' | stopped | timeout.
+-type queue_not_found() :: not_found.
+-type queue_absent() :: {'absent', amqqueue:amqqueue(), absent_reason()}.
+-type not_found_or_absent() :: queue_not_found() | queue_absent().
+
+%%----------------------------------------------------------------------------
+
+-define(CONSUMER_INFO_KEYS,
+ [queue_name, channel_pid, consumer_tag, ack_required, prefetch_count,
+ active, activity_status, arguments]).
+
+warn_file_limit() ->
+ DurableQueues = find_recoverable_queues(),
+ L = length(DurableQueues),
+
+ %% if there are not enough file handles, the server might hang
+ %% when trying to recover queues, warn the user:
+ case file_handle_cache:get_limit() < L of
+ true ->
+ rabbit_log:warning(
+ "Recovering ~p queues, available file handles: ~p. Please increase max open file handles limit to at least ~p!~n",
+ [L, file_handle_cache:get_limit(), L]);
+ false ->
+ ok
+ end.
+
+-spec recover(rabbit_types:vhost()) ->
+ {Recovered :: [amqqueue:amqqueue()],
+ Failed :: [amqqueue:amqqueue()]}.
+recover(VHost) ->
+ AllDurable = find_local_durable_queues(VHost),
+ rabbit_queue_type:recover(VHost, AllDurable).
+
+filter_pid_per_type(QPids) ->
+ lists:partition(fun(QPid) -> ?IS_CLASSIC(QPid) end, QPids).
+
+filter_resource_per_type(Resources) ->
+ Queues = [begin
+ {ok, Q} = lookup(Resource),
+ QPid = amqqueue:get_pid(Q),
+ {Resource, QPid}
+ end || Resource <- Resources],
+ lists:partition(fun({_Resource, QPid}) -> ?IS_CLASSIC(QPid) end, Queues).
+
+-spec stop(rabbit_types:vhost()) -> 'ok'.
+stop(VHost) ->
+ %% Classic queues
+ ok = rabbit_amqqueue_sup_sup:stop_for_vhost(VHost),
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ ok = BQ:stop(VHost),
+ rabbit_quorum_queue:stop(VHost).
+
+-spec start([amqqueue:amqqueue()]) -> 'ok'.
+
+start(Qs) ->
+ %% At this point all recovered queues and their bindings are
+ %% visible to routing, so now it is safe for them to complete
+ %% their initialisation (which may involve interacting with other
+ %% queues).
+ _ = [amqqueue:get_pid(Q) ! {self(), go}
+ || Q <- Qs,
+ %% All queues are supposed to be classic here.
+ amqqueue:is_classic(Q)],
+ ok.
+
+mark_local_durable_queues_stopped(VHost) ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ do_mark_local_durable_queues_stopped(VHost),
+ do_mark_local_durable_queues_stopped(VHost)).
+
+do_mark_local_durable_queues_stopped(VHost) ->
+ Qs = find_local_durable_queues(VHost),
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [ store_queue(amqqueue:set_state(Q, stopped))
+ || Q <- Qs, amqqueue:get_type(Q) =:= rabbit_classic_queue,
+ amqqueue:get_state(Q) =/= stopped ]
+ end).
+
+find_local_durable_queues(VHost) ->
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(
+ qlc:q(
+ [Q || Q <- mnesia:table(rabbit_durable_queue),
+ amqqueue:get_vhost(Q) =:= VHost andalso
+ rabbit_queue_type:is_recoverable(Q)
+ ]))
+ end).
+
+find_recoverable_queues() ->
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q <- mnesia:table(rabbit_durable_queue),
+ rabbit_queue_type:is_recoverable(Q)]))
+ end).
+
+-spec declare(name(),
+ boolean(),
+ boolean(),
+ rabbit_framing:amqp_table(),
+ rabbit_types:maybe(pid()),
+ rabbit_types:username()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'new', amqqueue:amqqueue(), rabbit_fifo_client:state()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(QueueName, Durable, AutoDelete, Args, Owner, ActingUser) ->
+ declare(QueueName, Durable, AutoDelete, Args, Owner, ActingUser, node()).
+
+
+%% The Node argument suggests where the queue (master if mirrored)
+%% should be. Note that in some cases (e.g. with "nodes" policy in
+%% effect) this might not be possible to satisfy.
+
+-spec declare(name(),
+ boolean(),
+ boolean(),
+ rabbit_framing:amqp_table(),
+ rabbit_types:maybe(pid()),
+ rabbit_types:username(),
+ node()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(QueueName = #resource{virtual_host = VHost}, Durable, AutoDelete, Args,
+ Owner, ActingUser, Node) ->
+ ok = check_declare_arguments(QueueName, Args),
+ Type = get_queue_type(Args),
+ case rabbit_queue_type:is_enabled(Type) of
+ true ->
+ Q0 = amqqueue:new(QueueName,
+ none,
+ Durable,
+ AutoDelete,
+ Owner,
+ Args,
+ VHost,
+ #{user => ActingUser},
+ Type),
+ Q = rabbit_queue_decorator:set(
+ rabbit_policy:set(Q0)),
+ rabbit_queue_type:declare(Q, Node);
+ false ->
+ {protocol_error, internal_error,
+ "Cannot declare a queue '~s' of type '~s' on node '~s': "
+ "the corresponding feature flag is disabled",
+ [rabbit_misc:rs(QueueName), Type, Node]}
+ end.
+
+get_queue_type(Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-queue-type">>) of
+ undefined ->
+ rabbit_queue_type:default();
+ {_, V} ->
+ rabbit_queue_type:discover(V)
+ end.
+
+-spec internal_declare(amqqueue:amqqueue(), boolean()) ->
+ {created | existing, amqqueue:amqqueue()} | queue_absent().
+
+internal_declare(Q, Recover) ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ do_internal_declare(Q, Recover),
+ begin
+ Q1 = amqqueue:upgrade(Q),
+ do_internal_declare(Q1, Recover)
+ end).
+
+do_internal_declare(Q, true) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ ok = store_queue(amqqueue:set_state(Q, live)),
+ rabbit_misc:const({created, Q})
+ end);
+do_internal_declare(Q, false) ->
+ QueueName = amqqueue:get_name(Q),
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case mnesia:wread({rabbit_queue, QueueName}) of
+ [] ->
+ case not_found_or_absent(QueueName) of
+ not_found -> Q1 = rabbit_policy:set(Q),
+ Q2 = amqqueue:set_state(Q1, live),
+ ok = store_queue(Q2),
+ fun () -> {created, Q2} end;
+ {absent, _Q, _} = R -> rabbit_misc:const(R)
+ end;
+ [ExistingQ] ->
+ rabbit_misc:const({existing, ExistingQ})
+ end
+ end).
+
+-spec update
+ (name(), fun((amqqueue:amqqueue()) -> amqqueue:amqqueue())) ->
+ 'not_found' | amqqueue:amqqueue().
+
+update(Name, Fun) ->
+ case mnesia:wread({rabbit_queue, Name}) of
+ [Q] ->
+ Durable = amqqueue:is_durable(Q),
+ Q1 = Fun(Q),
+ ok = mnesia:write(rabbit_queue, Q1, write),
+ case Durable of
+ true -> ok = mnesia:write(rabbit_durable_queue, Q1, write);
+ _ -> ok
+ end,
+ Q1;
+ [] ->
+ not_found
+ end.
+
+%% only really used for quorum queues to ensure the rabbit_queue record
+%% is initialised
+ensure_rabbit_queue_record_is_initialized(Q) ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ do_ensure_rabbit_queue_record_is_initialized(Q),
+ begin
+ Q1 = amqqueue:upgrade(Q),
+ do_ensure_rabbit_queue_record_is_initialized(Q1)
+ end).
+
+do_ensure_rabbit_queue_record_is_initialized(Q) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ ok = store_queue(Q),
+ rabbit_misc:const({ok, Q})
+ end).
+
+-spec store_queue(amqqueue:amqqueue()) -> 'ok'.
+
+store_queue(Q) when ?amqqueue_is_durable(Q) ->
+ Q1 = amqqueue:reset_mirroring_and_decorators(Q),
+ ok = mnesia:write(rabbit_durable_queue, Q1, write),
+ store_queue_ram(Q);
+store_queue(Q) when not ?amqqueue_is_durable(Q) ->
+ store_queue_ram(Q).
+
+store_queue_ram(Q) ->
+ ok = mnesia:write(rabbit_queue, rabbit_queue_decorator:set(Q), write).
+
+-spec update_decorators(name()) -> 'ok'.
+
+update_decorators(Name) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ case mnesia:wread({rabbit_queue, Name}) of
+ [Q] -> store_queue_ram(Q),
+ ok;
+ [] -> ok
+ end
+ end).
+
+-spec policy_changed(amqqueue:amqqueue(), amqqueue:amqqueue()) ->
+ 'ok'.
+
+policy_changed(Q1, Q2) ->
+ Decorators1 = amqqueue:get_decorators(Q1),
+ Decorators2 = amqqueue:get_decorators(Q2),
+ rabbit_mirror_queue_misc:update_mirrors(Q1, Q2),
+ D1 = rabbit_queue_decorator:select(Decorators1),
+ D2 = rabbit_queue_decorator:select(Decorators2),
+ [ok = M:policy_changed(Q1, Q2) || M <- lists:usort(D1 ++ D2)],
+ %% Make sure we emit a stats event even if nothing
+ %% mirroring-related has changed - the policy may have changed anyway.
+ notify_policy_changed(Q2).
+
+is_policy_applicable(QName, Policy) ->
+ case lookup(QName) of
+ {ok, Q} ->
+ rabbit_queue_type:is_policy_applicable(Q, Policy);
+ _ ->
+ %% Defaults to previous behaviour. Apply always
+ true
+ end.
+
+is_server_named_allowed(Args) ->
+ Type = get_queue_type(Args),
+ rabbit_queue_type:is_server_named_allowed(Type).
+
+-spec lookup
+ (name()) ->
+ rabbit_types:ok(amqqueue:amqqueue()) |
+ rabbit_types:error('not_found');
+ ([name()]) ->
+ [amqqueue:amqqueue()].
+
+lookup([]) -> []; %% optimisation
+lookup([Name]) -> ets:lookup(rabbit_queue, Name); %% optimisation
+lookup(Names) when is_list(Names) ->
+ %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+ %% expensive for reasons explained in rabbit_misc:dirty_read/1.
+ lists:append([ets:lookup(rabbit_queue, Name) || Name <- Names]);
+lookup(Name) ->
+ rabbit_misc:dirty_read({rabbit_queue, Name}).
+
+-spec lookup_many ([name()]) -> [amqqueue:amqqueue()].
+
+lookup_many(Names) when is_list(Names) ->
+ lookup(Names).
+
+-spec not_found_or_absent(name()) -> not_found_or_absent().
+
+not_found_or_absent(Name) ->
+ %% NB: we assume that the caller has already performed a lookup on
+ %% rabbit_queue and not found anything
+ case mnesia:read({rabbit_durable_queue, Name}) of
+ [] -> not_found;
+ [Q] -> {absent, Q, nodedown} %% Q exists on stopped node
+ end.
+
+-spec not_found_or_absent_dirty(name()) -> not_found_or_absent().
+
+not_found_or_absent_dirty(Name) ->
+ %% We should read from both tables inside a tx, to get a
+ %% consistent view. But the chances of an inconsistency are small,
+ %% and only affect the error kind.
+ case rabbit_misc:dirty_read({rabbit_durable_queue, Name}) of
+ {error, not_found} -> not_found;
+ {ok, Q} -> {absent, Q, nodedown}
+ end.
+
+-spec get_rebalance_lock(pid()) ->
+ {true, {rebalance_queues, pid()}} | false.
+get_rebalance_lock(Pid) when is_pid(Pid) ->
+ Id = {rebalance_queues, Pid},
+ Nodes = [node()|nodes()],
+ %% Note that we're not re-trying. We want to immediately know
+ %% if a re-balance is taking place and stop accordingly.
+ case global:set_lock(Id, Nodes, 0) of
+ true ->
+ {true, Id};
+ false ->
+ false
+ end.
+
+-spec rebalance('all' | 'quorum' | 'classic', binary(), binary()) ->
+ {ok, [{node(), pos_integer()}]} | {error, term()}.
+rebalance(Type, VhostSpec, QueueSpec) ->
+ %% We have not yet acquired the rebalance_queues global lock.
+ maybe_rebalance(get_rebalance_lock(self()), Type, VhostSpec, QueueSpec).
+
+maybe_rebalance({true, Id}, Type, VhostSpec, QueueSpec) ->
+ rabbit_log:info("Starting queue rebalance operation: '~s' for vhosts matching '~s' and queues matching '~s'",
+ [Type, VhostSpec, QueueSpec]),
+ Running = rabbit_nodes:all_running(),
+ NumRunning = length(Running),
+ ToRebalance = [Q || Q <- rabbit_amqqueue:list(),
+ filter_per_type(Type, Q),
+ is_replicated(Q),
+ is_match(amqqueue:get_vhost(Q), VhostSpec) andalso
+ is_match(get_resource_name(amqqueue:get_name(Q)), QueueSpec)],
+ NumToRebalance = length(ToRebalance),
+ ByNode = group_by_node(ToRebalance),
+ Rem = case (NumToRebalance rem NumRunning) of
+ 0 -> 0;
+ _ -> 1
+ end,
+ MaxQueuesDesired = (NumToRebalance div NumRunning) + Rem,
+ Result = iterative_rebalance(ByNode, MaxQueuesDesired),
+ global:del_lock(Id),
+ rabbit_log:info("Finished queue rebalance operation"),
+ Result;
+maybe_rebalance(false, _Type, _VhostSpec, _QueueSpec) ->
+ rabbit_log:warning("Queue rebalance operation is in progress, please wait."),
+ {error, rebalance_in_progress}.
+
+filter_per_type(all, _) ->
+ true;
+filter_per_type(quorum, Q) ->
+ ?amqqueue_is_quorum(Q);
+filter_per_type(classic, Q) ->
+ ?amqqueue_is_classic(Q).
+
+rebalance_module(Q) when ?amqqueue_is_quorum(Q) ->
+ rabbit_quorum_queue;
+rebalance_module(Q) when ?amqqueue_is_classic(Q) ->
+ rabbit_mirror_queue_misc.
+
+get_resource_name(#resource{name = Name}) ->
+ Name.
+
+is_match(Subj, E) ->
+ nomatch /= re:run(Subj, E).
+
+iterative_rebalance(ByNode, MaxQueuesDesired) ->
+ case maybe_migrate(ByNode, MaxQueuesDesired) of
+ {ok, Summary} ->
+ rabbit_log:info("All queue masters are balanced"),
+ {ok, Summary};
+ {migrated, Other} ->
+ iterative_rebalance(Other, MaxQueuesDesired);
+ {not_migrated, Other} ->
+ iterative_rebalance(Other, MaxQueuesDesired)
+ end.
+
+maybe_migrate(ByNode, MaxQueuesDesired) ->
+ maybe_migrate(ByNode, MaxQueuesDesired, maps:keys(ByNode)).
+
+maybe_migrate(ByNode, _, []) ->
+ {ok, maps:fold(fun(K, V, Acc) ->
+ {CQs, QQs} = lists:partition(fun({_, Q, _}) ->
+ ?amqqueue_is_classic(Q)
+ end, V),
+ [[{<<"Node name">>, K}, {<<"Number of quorum queues">>, length(QQs)},
+ {<<"Number of classic queues">>, length(CQs)}] | Acc]
+ end, [], ByNode)};
+maybe_migrate(ByNode, MaxQueuesDesired, [N | Nodes]) ->
+ case maps:get(N, ByNode, []) of
+ [{_, Q, false} = Queue | Queues] = All when length(All) > MaxQueuesDesired ->
+ Name = amqqueue:get_name(Q),
+ Module = rebalance_module(Q),
+ OtherNodes = Module:get_replicas(Q) -- [N],
+ case OtherNodes of
+ [] ->
+ {not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)};
+ _ ->
+ [{Length, Destination} | _] = sort_by_number_of_queues(OtherNodes, ByNode),
+ rabbit_log:warning("Migrating queue ~p from node ~p with ~p queues to node ~p with ~p queues",
+ [Name, N, length(All), Destination, Length]),
+ case Module:transfer_leadership(Q, Destination) of
+ {migrated, NewNode} ->
+ rabbit_log:warning("Queue ~p migrated to ~p", [Name, NewNode]),
+ {migrated, update_migrated_queue(Destination, N, Queue, Queues, ByNode)};
+ {not_migrated, Reason} ->
+ rabbit_log:warning("Error migrating queue ~p: ~p", [Name, Reason]),
+ {not_migrated, update_not_migrated_queue(N, Queue, Queues, ByNode)}
+ end
+ end;
+ [{_, _, true} | _] = All when length(All) > MaxQueuesDesired ->
+ rabbit_log:warning("Node ~p contains ~p queues, but all have already migrated. "
+ "Do nothing", [N, length(All)]),
+ maybe_migrate(ByNode, MaxQueuesDesired, Nodes);
+ All ->
+ rabbit_log:warning("Node ~p only contains ~p queues, do nothing",
+ [N, length(All)]),
+ maybe_migrate(ByNode, MaxQueuesDesired, Nodes)
+ end.
+
+update_not_migrated_queue(N, {Entries, Q, _}, Queues, ByNode) ->
+ maps:update(N, Queues ++ [{Entries, Q, true}], ByNode).
+
+update_migrated_queue(NewNode, OldNode, {Entries, Q, _}, Queues, ByNode) ->
+ maps:update_with(NewNode,
+ fun(L) -> L ++ [{Entries, Q, true}] end,
+ [{Entries, Q, true}], maps:update(OldNode, Queues, ByNode)).
+
+sort_by_number_of_queues(Nodes, ByNode) ->
+ lists:keysort(1,
+ lists:map(fun(Node) ->
+ {num_queues(Node, ByNode), Node}
+ end, Nodes)).
+
+num_queues(Node, ByNode) ->
+ length(maps:get(Node, ByNode, [])).
+
+group_by_node(Queues) ->
+ ByNode = lists:foldl(fun(Q, Acc) ->
+ Module = rebalance_module(Q),
+ Length = Module:queue_length(Q),
+ maps:update_with(amqqueue:qnode(Q),
+ fun(L) -> [{Length, Q, false} | L] end,
+ [{Length, Q, false}], Acc)
+ end, #{}, Queues),
+ maps:map(fun(_K, V) -> lists:keysort(1, V) end, ByNode).
+
+-spec with(name(),
+ qfun(A),
+ fun((not_found_or_absent()) -> rabbit_types:channel_exit())) ->
+ A | rabbit_types:channel_exit().
+
+with(Name, F, E) ->
+ with(Name, F, E, 2000).
+
+with(#resource{} = Name, F, E, RetriesLeft) ->
+ case lookup(Name) of
+ {ok, Q} when ?amqqueue_state_is(Q, live) andalso RetriesLeft =:= 0 ->
+ %% Something bad happened to that queue, we are bailing out
+ %% on processing current request.
+ E({absent, Q, timeout});
+ {ok, Q} when ?amqqueue_state_is(Q, stopped) andalso RetriesLeft =:= 0 ->
+ %% The queue was stopped and not migrated
+ E({absent, Q, stopped});
+ %% The queue process has crashed with unknown error
+ {ok, Q} when ?amqqueue_state_is(Q, crashed) ->
+ E({absent, Q, crashed});
+ %% The queue process has been stopped by a supervisor.
+ %% In that case a synchronised mirror can take over
+ %% so we should retry.
+ {ok, Q} when ?amqqueue_state_is(Q, stopped) ->
+ %% The queue process was stopped by the supervisor
+ rabbit_misc:with_exit_handler(
+ fun () -> retry_wait(Q, F, E, RetriesLeft) end,
+ fun () -> F(Q) end);
+ %% The queue is supposed to be active.
+ %% The master node can go away or queue can be killed
+ %% so we retry, waiting for a mirror to take over.
+ {ok, Q} when ?amqqueue_state_is(Q, live) ->
+ %% We check is_process_alive(QPid) in case we receive a
+ %% nodedown (for example) in F() that has nothing to do
+ %% with the QPid. F() should be written s.t. that this
+ %% cannot happen, so we bail if it does since that
+ %% indicates a code bug and we don't want to get stuck in
+ %% the retry loop.
+ rabbit_misc:with_exit_handler(
+ fun () -> retry_wait(Q, F, E, RetriesLeft) end,
+ fun () -> F(Q) end);
+ {error, not_found} ->
+ E(not_found_or_absent_dirty(Name))
+ end.
+
+-spec retry_wait(amqqueue:amqqueue(),
+ qfun(A),
+ fun((not_found_or_absent()) -> rabbit_types:channel_exit()),
+ non_neg_integer()) ->
+ A | rabbit_types:channel_exit().
+
+retry_wait(Q, F, E, RetriesLeft) ->
+ Name = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ QState = amqqueue:get_state(Q),
+ case {QState, is_replicated(Q)} of
+ %% We don't want to repeat an operation if
+ %% there are no mirrors to migrate to
+ {stopped, false} ->
+ E({absent, Q, stopped});
+ _ ->
+ case rabbit_mnesia:is_process_alive(QPid) of
+ true ->
+ % rabbitmq-server#1682
+ % The old check would have crashed here,
+ % instead, log it and run the exit fun. absent & alive is weird,
+ % but better than crashing with badmatch,true
+ rabbit_log:debug("Unexpected alive queue process ~p~n", [QPid]),
+ E({absent, Q, alive});
+ false ->
+ ok % Expected result
+ end,
+ timer:sleep(30),
+ with(Name, F, E, RetriesLeft - 1)
+ end.
+
+-spec with(name(), qfun(A)) ->
+ A | rabbit_types:error(not_found_or_absent()).
+
+with(Name, F) -> with(Name, F, fun (E) -> {error, E} end).
+
+-spec with_or_die(name(), qfun(A)) -> A | rabbit_types:channel_exit().
+
+with_or_die(Name, F) ->
+ with(Name, F, die_fun(Name)).
+
+-spec die_fun(name()) ->
+ fun((not_found_or_absent()) -> rabbit_types:channel_exit()).
+
+die_fun(Name) ->
+ fun (not_found) -> not_found(Name);
+ ({absent, Q, Reason}) -> absent(Q, Reason)
+ end.
+
+-spec not_found(name()) -> rabbit_types:channel_exit().
+
+not_found(R) -> rabbit_misc:protocol_error(not_found, "no ~s", [rabbit_misc:rs(R)]).
+
+-spec absent(amqqueue:amqqueue(), absent_reason()) ->
+ rabbit_types:channel_exit().
+
+absent(Q, AbsentReason) ->
+ QueueName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ IsDurable = amqqueue:is_durable(Q),
+ priv_absent(QueueName, QPid, IsDurable, AbsentReason).
+
+-spec priv_absent(name(), pid(), boolean(), absent_reason()) ->
+ rabbit_types:channel_exit().
+
+priv_absent(QueueName, QPid, true, nodedown) ->
+ %% The assertion of durability is mainly there because we mention
+ %% durability in the error message. That way we will hopefully
+ %% notice if at some future point our logic changes s.t. we get
+ %% here with non-durable queues.
+ rabbit_misc:protocol_error(
+ not_found,
+ "home node '~s' of durable ~s is down or inaccessible",
+ [node(QPid), rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, _QPid, _IsDurable, stopped) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "~s process is stopped by supervisor", [rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, _QPid, _IsDurable, crashed) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "~s has crashed and failed to restart", [rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, _QPid, _IsDurable, timeout) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "failed to perform operation on ~s due to timeout", [rabbit_misc:rs(QueueName)]);
+
+priv_absent(QueueName, QPid, _IsDurable, alive) ->
+ rabbit_misc:protocol_error(
+ not_found,
+ "failed to perform operation on ~s: its master replica ~w may be stopping or being demoted",
+ [rabbit_misc:rs(QueueName), QPid]).
+
+-spec assert_equivalence
+ (amqqueue:amqqueue(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:maybe(pid())) ->
+ 'ok' | rabbit_types:channel_exit() | rabbit_types:connection_exit().
+
+assert_equivalence(Q, DurableDeclare, AutoDeleteDeclare, Args1, Owner) ->
+ QName = amqqueue:get_name(Q),
+ DurableQ = amqqueue:is_durable(Q),
+ AutoDeleteQ = amqqueue:is_auto_delete(Q),
+ ok = check_exclusive_access(Q, Owner, strict),
+ ok = rabbit_misc:assert_field_equivalence(DurableQ, DurableDeclare, QName, durable),
+ ok = rabbit_misc:assert_field_equivalence(AutoDeleteQ, AutoDeleteDeclare, QName, auto_delete),
+ ok = assert_args_equivalence(Q, Args1).
+
+-spec check_exclusive_access(amqqueue:amqqueue(), pid()) ->
+ 'ok' | rabbit_types:channel_exit().
+
+check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax).
+
+check_exclusive_access(Q, Owner, _MatchType)
+ when ?amqqueue_exclusive_owner_is(Q, Owner) ->
+ ok;
+check_exclusive_access(Q, _ReaderPid, lax)
+ when ?amqqueue_exclusive_owner_is(Q, none) ->
+ ok;
+check_exclusive_access(Q, _ReaderPid, _MatchType) ->
+ QueueName = amqqueue:get_name(Q),
+ rabbit_misc:protocol_error(
+ resource_locked,
+ "cannot obtain exclusive access to locked ~s. It could be originally "
+ "declared on another connection or the exclusive property value does not "
+ "match that of the original declaration.",
+ [rabbit_misc:rs(QueueName)]).
+
+-spec with_exclusive_access_or_die(name(), pid(), qfun(A)) ->
+ A | rabbit_types:channel_exit().
+
+with_exclusive_access_or_die(Name, ReaderPid, F) ->
+ with_or_die(Name,
+ fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end).
+
+assert_args_equivalence(Q, RequiredArgs) ->
+ QueueName = amqqueue:get_name(Q),
+ Args = amqqueue:get_arguments(Q),
+ rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName,
+ [Key || {Key, _Fun} <- declare_args()]).
+
+check_declare_arguments(QueueName, Args) ->
+ check_arguments(QueueName, Args, declare_args()).
+
+check_consume_arguments(QueueName, Args) ->
+ check_arguments(QueueName, Args, consume_args()).
+
+check_arguments(QueueName, Args, Validators) ->
+ [case rabbit_misc:table_lookup(Args, Key) of
+ undefined -> ok;
+ TypeVal -> case Fun(TypeVal, Args) of
+ ok -> ok;
+ {error, Error} -> rabbit_misc:protocol_error(
+ precondition_failed,
+ "invalid arg '~s' for ~s: ~255p",
+ [Key, rabbit_misc:rs(QueueName),
+ Error])
+ end
+ end || {Key, Fun} <- Validators],
+ ok.
+
+declare_args() ->
+ [{<<"x-expires">>, fun check_expires_arg/2},
+ {<<"x-message-ttl">>, fun check_message_ttl_arg/2},
+ {<<"x-dead-letter-exchange">>, fun check_dlxname_arg/2},
+ {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
+ {<<"x-max-length">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-length-bytes">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-in-memory-length">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-in-memory-bytes">>, fun check_non_neg_int_arg/2},
+ {<<"x-max-priority">>, fun check_max_priority_arg/2},
+ {<<"x-overflow">>, fun check_overflow/2},
+ {<<"x-queue-mode">>, fun check_queue_mode/2},
+ {<<"x-single-active-consumer">>, fun check_single_active_consumer_arg/2},
+ {<<"x-queue-type">>, fun check_queue_type/2},
+ {<<"x-quorum-initial-group-size">>, fun check_initial_cluster_size_arg/2},
+ {<<"x-max-age">>, fun check_max_age_arg/2},
+ {<<"x-max-segment-size">>, fun check_non_neg_int_arg/2},
+ {<<"x-initial-cluster-size">>, fun check_initial_cluster_size_arg/2},
+ {<<"x-queue-leader-locator">>, fun check_queue_leader_locator_arg/2}].
+
+consume_args() -> [{<<"x-priority">>, fun check_int_arg/2},
+ {<<"x-cancel-on-ha-failover">>, fun check_bool_arg/2}].
+
+check_int_arg({Type, _}, _) ->
+ case lists:member(Type, ?INTEGER_ARG_TYPES) of
+ true -> ok;
+ false -> {error, {unacceptable_type, Type}}
+ end.
+
+check_bool_arg({bool, _}, _) -> ok;
+check_bool_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
+
+check_non_neg_int_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok when Val >= 0 -> ok;
+ ok -> {error, {value_negative, Val}};
+ Error -> Error
+ end.
+
+check_expires_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok when Val == 0 -> {error, {value_zero, Val}};
+ ok -> rabbit_misc:check_expiry(Val);
+ Error -> Error
+ end.
+
+check_message_ttl_arg({Type, Val}, Args) ->
+ case check_int_arg({Type, Val}, Args) of
+ ok -> rabbit_misc:check_expiry(Val);
+ Error -> Error
+ end.
+
+check_max_priority_arg({Type, Val}, Args) ->
+ case check_non_neg_int_arg({Type, Val}, Args) of
+ ok when Val =< ?MAX_SUPPORTED_PRIORITY -> ok;
+ ok -> {error, {max_value_exceeded, Val}};
+ Error -> Error
+ end.
+
+check_single_active_consumer_arg({Type, Val}, Args) ->
+ case check_bool_arg({Type, Val}, Args) of
+ ok -> ok;
+ Error -> Error
+ end.
+
+check_initial_cluster_size_arg({Type, Val}, Args) ->
+ case check_non_neg_int_arg({Type, Val}, Args) of
+ ok when Val == 0 -> {error, {value_zero, Val}};
+ ok -> ok;
+ Error -> Error
+ end.
+
+check_max_age_arg({longstr, Val}, _Args) ->
+ case check_max_age(Val) of
+ {error, _} = E ->
+ E;
+ _ ->
+ ok
+ end;
+check_max_age_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_max_age(MaxAge) ->
+ case re:run(MaxAge, "(^[0-9]*)(.*)", [{capture, all_but_first, list}]) of
+ {match, [Value, Unit]} ->
+ case list_to_integer(Value) of
+ I when I > 0 ->
+ case lists:member(Unit, ["Y", "M", "D", "h", "m", "s"]) of
+ true ->
+ Int = list_to_integer(Value),
+ Int * unit_value_in_ms(Unit);
+ false ->
+ {error, invalid_max_age}
+ end;
+ _ ->
+ {error, invalid_max_age}
+ end;
+ _ ->
+ {error, invalid_max_age}
+ end.
+
+unit_value_in_ms("Y") ->
+ 365 * unit_value_in_ms("D");
+unit_value_in_ms("M") ->
+ 30 * unit_value_in_ms("D");
+unit_value_in_ms("D") ->
+ 24 * unit_value_in_ms("h");
+unit_value_in_ms("h") ->
+ 3600 * unit_value_in_ms("s");
+unit_value_in_ms("m") ->
+ 60 * unit_value_in_ms("s");
+unit_value_in_ms("s") ->
+ 1000.
+
+%% Note that the validity of x-dead-letter-exchange is already verified
+%% by rabbit_channel's queue.declare handler.
+check_dlxname_arg({longstr, _}, _) -> ok;
+check_dlxname_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
+
+check_dlxrk_arg({longstr, _}, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-dead-letter-exchange">>) of
+ undefined -> {error, routing_key_but_no_dlx_defined};
+ _ -> ok
+ end;
+check_dlxrk_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_overflow({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"drop-head">>,
+ <<"reject-publish">>,
+ <<"reject-publish-dlx">>]) of
+ true -> ok;
+ false -> {error, invalid_overflow}
+ end;
+check_overflow({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_queue_leader_locator_arg({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"client-local">>,
+ <<"random">>,
+ <<"least-leaders">>]) of
+ true -> ok;
+ false -> {error, invalid_queue_locator_arg}
+ end;
+check_queue_leader_locator_arg({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_queue_mode({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"default">>, <<"lazy">>]) of
+ true -> ok;
+ false -> {error, invalid_queue_mode}
+ end;
+check_queue_mode({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+check_queue_type({longstr, Val}, _Args) ->
+ case lists:member(Val, [<<"classic">>, <<"quorum">>, <<"stream">>]) of
+ true -> ok;
+ false -> {error, invalid_queue_type}
+ end;
+check_queue_type({Type, _}, _Args) ->
+ {error, {unacceptable_type, Type}}.
+
+-spec list() -> [amqqueue:amqqueue()].
+
+list() ->
+ list_with_possible_retry(fun do_list/0).
+
+do_list() ->
+ mnesia:dirty_match_object(rabbit_queue, amqqueue:pattern_match_all()).
+
+-spec count() -> non_neg_integer().
+
+count() ->
+ mnesia:table_info(rabbit_queue, size).
+
+-spec list_names() -> [rabbit_amqqueue:name()].
+
+list_names() -> mnesia:dirty_all_keys(rabbit_queue).
+
+list_names(VHost) -> [amqqueue:get_name(Q) || Q <- list(VHost)].
+
+list_local_names() ->
+ [ amqqueue:get_name(Q) || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed, is_local_to_node(amqqueue:get_pid(Q), node())].
+
+list_local_names_down() ->
+ [ amqqueue:get_name(Q) || Q <- list(),
+ is_down(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node())].
+
+is_down(Q) ->
+ try
+ info(Q, [state]) == [{state, down}]
+ catch
+ _:_ ->
+ true
+ end.
+
+
+-spec sample_local_queues() -> [amqqueue:amqqueue()].
+sample_local_queues() -> sample_n_by_name(list_local_names(), 300).
+
+-spec sample_n_by_name([rabbit_amqqueue:name()], pos_integer()) -> [amqqueue:amqqueue()].
+sample_n_by_name([], _N) ->
+ [];
+sample_n_by_name(Names, N) when is_list(Names) andalso is_integer(N) andalso N > 0 ->
+ %% lists:nth/2 throws when position is > list length
+ M = erlang:min(N, length(Names)),
+ Ids = lists:foldl(fun( _, Acc) when length(Acc) >= 100 ->
+ Acc;
+ (_, Acc) ->
+ Pick = lists:nth(rand:uniform(M), Names),
+ [Pick | Acc]
+ end,
+ [], lists:seq(1, M)),
+ lists:map(fun (Id) ->
+ {ok, Q} = rabbit_amqqueue:lookup(Id),
+ Q
+ end,
+ lists:usort(Ids)).
+
+-spec sample_n([amqqueue:amqqueue()], pos_integer()) -> [amqqueue:amqqueue()].
+sample_n([], _N) ->
+ [];
+sample_n(Queues, N) when is_list(Queues) andalso is_integer(N) andalso N > 0 ->
+ Names = [amqqueue:get_name(Q) || Q <- Queues],
+ sample_n_by_name(Names, N).
+
+
+-spec list_by_type(atom()) -> [amqqueue:amqqueue()].
+
+list_by_type(classic) -> list_by_type(rabbit_classic_queue);
+list_by_type(quorum) -> list_by_type(rabbit_quorum_queue);
+list_by_type(Type) ->
+ {atomic, Qs} =
+ mnesia:sync_transaction(
+ fun () ->
+ mnesia:match_object(rabbit_durable_queue,
+ amqqueue:pattern_match_on_type(Type),
+ read)
+ end),
+ Qs.
+
+-spec list_local_quorum_queue_names() -> [rabbit_amqqueue:name()].
+
+list_local_quorum_queue_names() ->
+ [ amqqueue:get_name(Q) || Q <- list_by_type(quorum),
+ amqqueue:get_state(Q) =/= crashed,
+ lists:member(node(), get_quorum_nodes(Q))].
+
+-spec list_local_quorum_queues() -> [amqqueue:amqqueue()].
+list_local_quorum_queues() ->
+ [ Q || Q <- list_by_type(quorum),
+ amqqueue:get_state(Q) =/= crashed,
+ lists:member(node(), get_quorum_nodes(Q))].
+
+-spec list_local_leaders() -> [amqqueue:amqqueue()].
+list_local_leaders() ->
+ [ Q || Q <- list(),
+ amqqueue:is_quorum(Q),
+ amqqueue:get_state(Q) =/= crashed, amqqueue:get_leader(Q) =:= node()].
+
+-spec list_local_followers() -> [amqqueue:amqqueue()].
+list_local_followers() ->
+ [Q
+ || Q <- list(),
+ amqqueue:is_quorum(Q),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:get_leader(Q) =/= node(),
+ rabbit_quorum_queue:is_recoverable(Q)
+ ].
+
+-spec list_local_mirrored_classic_queues() -> [amqqueue:amqqueue()].
+list_local_mirrored_classic_queues() ->
+ [ Q || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:is_classic(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node()),
+ is_replicated(Q)].
+
+-spec list_local_mirrored_classic_names() -> [rabbit_amqqueue:name()].
+list_local_mirrored_classic_names() ->
+ [ amqqueue:get_name(Q) || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:is_classic(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node()),
+ is_replicated(Q)].
+
+-spec list_local_mirrored_classic_without_synchronised_mirrors() ->
+ [amqqueue:amqqueue()].
+list_local_mirrored_classic_without_synchronised_mirrors() ->
+ [ Q || Q <- list(),
+ amqqueue:get_state(Q) =/= crashed,
+ amqqueue:is_classic(Q),
+ %% filter out exclusive queues as they won't actually be mirrored
+ is_not_exclusive(Q),
+ is_local_to_node(amqqueue:get_pid(Q), node()),
+ is_replicated(Q),
+ not has_synchronised_mirrors_online(Q)].
+
+-spec list_local_mirrored_classic_without_synchronised_mirrors_for_cli() ->
+ [#{binary => any()}].
+list_local_mirrored_classic_without_synchronised_mirrors_for_cli() ->
+ ClassicQs = list_local_mirrored_classic_without_synchronised_mirrors(),
+ [begin
+ #resource{name = Name} = amqqueue:get_name(Q),
+ #{
+ <<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(amqqueue:get_name(Q))),
+ <<"name">> => Name,
+ <<"virtual_host">> => amqqueue:get_vhost(Q),
+ <<"type">> => <<"classic">>
+ }
+ end || Q <- ClassicQs].
+
+is_local_to_node(QPid, Node) when ?IS_CLASSIC(QPid) ->
+ Node =:= node(QPid);
+is_local_to_node({_, Leader} = QPid, Node) when ?IS_QUORUM(QPid) ->
+ Node =:= Leader.
+
+-spec list(rabbit_types:vhost()) -> [amqqueue:amqqueue()].
+
+list(VHostPath) ->
+ list(VHostPath, rabbit_queue).
+
+list(VHostPath, TableName) ->
+ list_with_possible_retry(fun() -> do_list(VHostPath, TableName) end).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+do_list(VHostPath, TableName) ->
+ mnesia:async_dirty(
+ fun () ->
+ mnesia:match_object(
+ TableName,
+ amqqueue:pattern_match_on_name(rabbit_misc:r(VHostPath, queue)),
+ read)
+ end).
+
+list_with_possible_retry(Fun) ->
+ %% amqqueue migration:
+ %% The `rabbit_queue` or `rabbit_durable_queue` tables
+ %% might be migrated between the time we query the pattern
+ %% (with the `amqqueue` module) and the time we call
+ %% `mnesia:dirty_match_object()`. This would lead to an empty list
+ %% (no object matching the now incorrect pattern), not a Mnesia
+ %% error.
+ %%
+ %% So if the result is an empty list and the version of the
+ %% `amqqueue` record changed in between, we retry the operation.
+ %%
+ %% However, we don't do this if inside a Mnesia transaction: we
+ %% could end up with a live lock between this started transaction
+ %% and the Mnesia table migration which is blocked (but the
+ %% rabbit_feature_flags lock is held).
+ AmqqueueRecordVersion = amqqueue:record_version_to_use(),
+ case Fun() of
+ [] ->
+ case mnesia:is_transaction() of
+ true ->
+ [];
+ false ->
+ case amqqueue:record_version_to_use() of
+ AmqqueueRecordVersion -> [];
+ _ -> Fun()
+ end
+ end;
+ Ret ->
+ Ret
+ end.
+
+-spec list_down(rabbit_types:vhost()) -> [amqqueue:amqqueue()].
+
+list_down(VHostPath) ->
+ case rabbit_vhost:exists(VHostPath) of
+ false -> [];
+ true ->
+ Present = list(VHostPath),
+ Durable = list(VHostPath, rabbit_durable_queue),
+ PresentS = sets:from_list([amqqueue:get_name(Q) || Q <- Present]),
+ sets:to_list(sets:filter(fun (Q) ->
+ N = amqqueue:get_name(Q),
+ not sets:is_element(N, PresentS)
+ end, sets:from_list(Durable)))
+ end.
+
+count(VHost) ->
+ try
+ %% this is certainly suboptimal but there is no way to count
+ %% things using a secondary index in Mnesia. Our counter-table-per-node
+ %% won't work here because with master migration of mirrored queues
+ %% the "ownership" of queues by nodes becomes a non-trivial problem
+ %% that requires a proper consensus algorithm.
+ length(list_for_count(VHost))
+ catch _:Err ->
+ rabbit_log:error("Failed to fetch number of queues in vhost ~p:~n~p~n",
+ [VHost, Err]),
+ 0
+ end.
+
+list_for_count(VHost) ->
+ list_with_possible_retry(
+ fun() ->
+ mnesia:dirty_index_read(rabbit_queue,
+ VHost,
+ amqqueue:field_vhost())
+ end).
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+%% It should no default to classic queue keys, but a subset of those that must be shared
+%% by all queue types. Not sure this is even being used, so will leave it here for backwards
+%% compatibility. Each queue type handles now info(Q, all_keys) with the keys it supports.
+info_keys() -> rabbit_amqqueue_process:info_keys().
+
+map(Qs, F) -> rabbit_misc:filter_exit_map(F, Qs).
+
+is_unresponsive(Q, _Timeout) when ?amqqueue_state_is(Q, crashed) ->
+ false;
+is_unresponsive(Q, Timeout) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ try
+ delegate:invoke(QPid, {gen_server2, call, [{info, [name]}, Timeout]}),
+ false
+ catch
+ %% TODO catch any exit??
+ exit:{timeout, _} ->
+ true
+ end;
+is_unresponsive(Q, Timeout) when ?amqqueue_is_quorum(Q) ->
+ try
+ Leader = amqqueue:get_pid(Q),
+ case rabbit_fifo_client:stat(Leader, Timeout) of
+ {ok, _, _} -> false;
+ {timeout, _} -> true;
+ {error, _} -> true
+ end
+ catch
+ exit:{timeout, _} ->
+ true
+ end.
+
+format(Q) when ?amqqueue_is_quorum(Q) -> rabbit_quorum_queue:format(Q);
+format(Q) -> rabbit_amqqueue_process:format(Q).
+
+-spec info(amqqueue:amqqueue()) -> rabbit_types:infos().
+
+info(Q) when ?is_amqqueue(Q) -> rabbit_queue_type:info(Q, all_keys).
+
+
+-spec info(amqqueue:amqqueue(), rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+info(Q, Items) when ?is_amqqueue(Q) ->
+ rabbit_queue_type:info(Q, Items).
+
+info_down(Q, DownReason) ->
+ rabbit_queue_type:info_down(Q, DownReason).
+
+info_down(Q, Items, DownReason) ->
+ rabbit_queue_type:info_down(Q, Items, DownReason).
+
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+info_all(VHostPath) ->
+ map(list(VHostPath), fun (Q) -> info(Q) end) ++
+ map(list_down(VHostPath), fun (Q) -> info_down(Q, down) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+ [rabbit_types:infos()].
+
+info_all(VHostPath, Items) ->
+ map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
+ map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
+
+emit_info_local(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list_local(VHostPath)).
+
+emit_info_all(Nodes, VHostPath, Items, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids).
+
+collect_info_all(VHostPath, Items) ->
+ Nodes = rabbit_nodes:all_running(),
+ Ref = make_ref(),
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, self()]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ wait_for_queues(Ref, length(Pids), []).
+
+wait_for_queues(Ref, N, Acc) ->
+ receive
+ {Ref, finished} when N == 1 ->
+ Acc;
+ {Ref, finished} ->
+ wait_for_queues(Ref, N - 1, Acc);
+ {Ref, Items, continue} ->
+ wait_for_queues(Ref, N, [Items | Acc])
+ after
+ 1000 ->
+ Acc
+ end.
+
+emit_info_down(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
+ list_down(VHostPath)).
+
+emit_unresponsive_local(VHostPath, Items, Timeout, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> case is_unresponsive(Q, Timeout) of
+ true -> info_down(Q, Items, unresponsive);
+ false -> []
+ end
+ end, list_local(VHostPath)
+ ).
+
+emit_unresponsive(Nodes, VHostPath, Items, Timeout, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_unresponsive_local,
+ [VHostPath, Items, Timeout, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids).
+
+info_local(VHostPath) ->
+ map(list_local(VHostPath), fun (Q) -> info(Q, [name]) end).
+
+list_local(VHostPath) ->
+ [Q || Q <- list(VHostPath),
+ amqqueue:get_state(Q) =/= crashed, is_local_to_node(amqqueue:get_pid(Q), node())].
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Ref) ->
+ %% note: quorum queuse emit stats on periodic ticks that run unconditionally,
+ %% so force_event_refresh is unnecessary (and, in fact, would only produce log noise) for QQs.
+ ClassicQs = list_by_type(rabbit_classic_queue),
+ [gen_server2:cast(amqqueue:get_pid(Q),
+ {force_event_refresh, Ref}) || Q <- ClassicQs],
+ ok.
+
+-spec notify_policy_changed(amqqueue:amqqueue()) -> 'ok'.
+notify_policy_changed(Q) when ?is_amqqueue(Q) ->
+ rabbit_queue_type:policy_changed(Q).
+
+-spec consumers(amqqueue:amqqueue()) ->
+ [{pid(), rabbit_types:ctag(), boolean(), non_neg_integer(),
+ boolean(), atom(),
+ rabbit_framing:amqp_table(), rabbit_types:username()}].
+
+consumers(Q) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [consumers, infinity]});
+consumers(Q) when ?amqqueue_is_quorum(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid, fun rabbit_fifo:query_consumers/1) of
+ {ok, {_, Result}, _} -> maps:values(Result);
+ _ -> []
+ end;
+consumers(Q) when ?amqqueue_is_stream(Q) ->
+ %% TODO how??? they only exist on the channel
+ %% we could list the offset listener on the writer but we don't even have a consumer tag,
+ %% only a (channel) pid and offset
+ [].
+
+-spec consumer_info_keys() -> rabbit_types:info_keys().
+
+consumer_info_keys() -> ?CONSUMER_INFO_KEYS.
+
+-spec consumers_all(rabbit_types:vhost()) ->
+ [{name(), pid(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table()}].
+
+consumers_all(VHostPath) ->
+ ConsumerInfoKeys = consumer_info_keys(),
+ lists:append(
+ map(list(VHostPath),
+ fun(Q) -> get_queue_consumer_info(Q, ConsumerInfoKeys) end)).
+
+emit_consumers_all(Nodes, VHostPath, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_amqqueue, emit_consumers_local, [VHostPath, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_consumers_local(VHostPath, Ref, AggregatorPid) ->
+ ConsumerInfoKeys = consumer_info_keys(),
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(Q) -> get_queue_consumer_info(Q, ConsumerInfoKeys) end,
+ list_local(VHostPath)).
+
+get_queue_consumer_info(Q, ConsumerInfoKeys) ->
+ [lists:zip(ConsumerInfoKeys,
+ [amqqueue:get_name(Q), ChPid, CTag,
+ AckRequired, Prefetch, Active, ActivityStatus, Args]) ||
+ {ChPid, CTag, AckRequired, Prefetch, Active, ActivityStatus, Args, _} <- consumers(Q)].
+
+-spec stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+stat(Q) ->
+ rabbit_queue_type:stat(Q).
+
+-spec pid_of(amqqueue:amqqueue()) ->
+ pid().
+
+pid_of(Q) -> amqqueue:get_pid(Q).
+
+-spec pid_of(rabbit_types:vhost(), rabbit_misc:resource_name()) ->
+ pid() | rabbit_types:error('not_found').
+
+pid_of(VHost, QueueName) ->
+ case lookup(rabbit_misc:r(VHost, queue, QueueName)) of
+ {ok, Q} -> pid_of(Q);
+ {error, not_found} = E -> E
+ end.
+
+-spec delete_exclusive(qpids(), pid()) -> 'ok'.
+
+delete_exclusive(QPids, ConnId) ->
+ rabbit_amqqueue_common:delete_exclusive(QPids, ConnId).
+
+-spec delete_immediately(qpids()) -> 'ok'.
+
+delete_immediately(QPids) ->
+ {Classic, Quorum} = filter_pid_per_type(QPids),
+ [gen_server2:cast(QPid, delete_immediately) || QPid <- Classic],
+ case Quorum of
+ [] -> ok;
+ _ -> {error, cannot_delete_quorum_queues, Quorum}
+ end.
+
+delete_immediately_by_resource(Resources) ->
+ {Classic, Quorum} = filter_resource_per_type(Resources),
+ [gen_server2:cast(QPid, delete_immediately) || {_, QPid} <- Classic],
+ [rabbit_quorum_queue:delete_immediately(Resource, QPid)
+ || {Resource, QPid} <- Quorum],
+ ok.
+
+-spec delete
+ (amqqueue:amqqueue(), 'false', 'false', rabbit_types:username()) ->
+ qlen() |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()};
+ (amqqueue:amqqueue(), 'true' , 'false', rabbit_types:username()) ->
+ qlen() | rabbit_types:error('in_use') |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()};
+ (amqqueue:amqqueue(), 'false', 'true', rabbit_types:username()) ->
+ qlen() | rabbit_types:error('not_empty') |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()};
+ (amqqueue:amqqueue(), 'true' , 'true', rabbit_types:username()) ->
+ qlen() |
+ rabbit_types:error('in_use') |
+ rabbit_types:error('not_empty') |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+delete(Q, IfUnused, IfEmpty, ActingUser) ->
+ rabbit_queue_type:delete(Q, IfUnused, IfEmpty, ActingUser).
+
+%% delete_crashed* INCLUDED FOR BACKWARDS COMPATBILITY REASONS
+delete_crashed(Q) when ?amqqueue_is_classic(Q) ->
+ rabbit_classic_queue:delete_crashed(Q).
+
+delete_crashed(Q, ActingUser) when ?amqqueue_is_classic(Q) ->
+ rabbit_classic_queue:delete_crashed(Q, ActingUser).
+
+-spec delete_crashed_internal(amqqueue:amqqueue(), rabbit_types:username()) -> 'ok'.
+delete_crashed_internal(Q, ActingUser) when ?amqqueue_is_classic(Q) ->
+ rabbit_classic_queue:delete_crashed_internal(Q, ActingUser).
+
+-spec purge(amqqueue:amqqueue()) -> qlen().
+purge(Q) when ?is_amqqueue(Q) ->
+ rabbit_queue_type:purge(Q).
+
+-spec requeue(name(),
+ {rabbit_fifo:consumer_tag(), [msg_id()]},
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+requeue(QRef, {CTag, MsgIds}, QStates) ->
+ reject(QRef, true, {CTag, MsgIds}, QStates).
+
+-spec ack(name(),
+ {rabbit_fifo:consumer_tag(), [msg_id()]},
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+ack(QPid, {CTag, MsgIds}, QueueStates) ->
+ rabbit_queue_type:settle(QPid, complete, CTag, MsgIds, QueueStates).
+
+
+-spec reject(name(),
+ boolean(),
+ {rabbit_fifo:consumer_tag(), [msg_id()]},
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+reject(QRef, Requeue, {CTag, MsgIds}, QStates) ->
+ Op = case Requeue of
+ true -> requeue;
+ false -> discard
+ end,
+ rabbit_queue_type:settle(QRef, Op, CTag, MsgIds, QStates).
+
+-spec notify_down_all(qpids(), pid()) -> ok_or_errors().
+notify_down_all(QPids, ChPid) ->
+ notify_down_all(QPids, ChPid, ?CHANNEL_OPERATION_TIMEOUT).
+
+-spec notify_down_all(qpids(), pid(), non_neg_integer()) ->
+ ok_or_errors().
+notify_down_all(QPids, ChPid, Timeout) ->
+ case rpc:call(node(), delegate, invoke,
+ [QPids, {gen_server2, call, [{notify_down, ChPid}, infinity]}], Timeout) of
+ {badrpc, timeout} -> {error, {channel_operation_timeout, Timeout}};
+ {badrpc, Reason} -> {error, Reason};
+ {_, Bads} ->
+ case lists:filter(
+ fun ({_Pid, {exit, {R, _}, _}}) ->
+ rabbit_misc:is_abnormal_exit(R);
+ ({_Pid, _}) -> false
+ end, Bads) of
+ [] -> ok;
+ Bads1 -> {error, Bads1}
+ end;
+ Error -> {error, Error}
+ end.
+
+-spec activate_limit_all(qpids(), pid()) -> ok.
+
+activate_limit_all(QRefs, ChPid) ->
+ QPids = [P || P <- QRefs, ?IS_CLASSIC(P)],
+ delegate:invoke_no_result(QPids, {gen_server2, cast,
+ [{activate_limit, ChPid}]}).
+
+-spec credit(amqqueue:amqqueue(),
+ rabbit_types:ctag(),
+ non_neg_integer(),
+ boolean(),
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()}.
+credit(Q, CTag, Credit, Drain, QStates) ->
+ rabbit_queue_type:credit(Q, CTag, Credit, Drain, QStates).
+
+-spec basic_get(amqqueue:amqqueue(), boolean(), pid(), rabbit_types:ctag(),
+ rabbit_queue_type:state()) ->
+ {'ok', non_neg_integer(), qmsg(), rabbit_queue_type:state()} |
+ {'empty', rabbit_queue_type:state()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+basic_get(Q, NoAck, LimiterPid, CTag, QStates0) ->
+ rabbit_queue_type:dequeue(Q, NoAck, LimiterPid, CTag, QStates0).
+
+
+-spec basic_consume(amqqueue:amqqueue(), boolean(), pid(), pid(), boolean(),
+ non_neg_integer(), rabbit_types:ctag(), boolean(),
+ rabbit_framing:amqp_table(), any(), rabbit_types:username(),
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state(), rabbit_queue_type:actions()} |
+ {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+basic_consume(Q, NoAck, ChPid, LimiterPid,
+ LimiterActive, ConsumerPrefetchCount, ConsumerTag,
+ ExclusiveConsume, Args, OkMsg, ActingUser, Contexts) ->
+
+ QName = amqqueue:get_name(Q),
+ %% first phase argument validation
+ %% each queue type may do further validations
+ ok = check_consume_arguments(QName, Args),
+ Spec = #{no_ack => NoAck,
+ channel_pid => ChPid,
+ limiter_pid => LimiterPid,
+ limiter_active => LimiterActive,
+ prefetch_count => ConsumerPrefetchCount,
+ consumer_tag => ConsumerTag,
+ exclusive_consume => ExclusiveConsume,
+ args => Args,
+ ok_msg => OkMsg,
+ acting_user => ActingUser},
+ rabbit_queue_type:consume(Q, Spec, Contexts).
+
+-spec basic_cancel(amqqueue:amqqueue(), rabbit_types:ctag(), any(),
+ rabbit_types:username(),
+ rabbit_queue_type:state()) ->
+ {ok, rabbit_queue_type:state()} | {error, term()}.
+basic_cancel(Q, ConsumerTag, OkMsg, ActingUser, QStates) ->
+ rabbit_queue_type:cancel(Q, ConsumerTag,
+ OkMsg, ActingUser, QStates).
+
+-spec notify_decorators(amqqueue:amqqueue()) -> 'ok'.
+
+notify_decorators(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke_no_result(QPid, {gen_server2, cast, [notify_decorators]}).
+
+notify_sent(QPid, ChPid) ->
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid).
+
+notify_sent_queue_down(QPid) ->
+ rabbit_amqqueue_common:notify_sent_queue_down(QPid).
+
+-spec resume(pid(), pid()) -> 'ok'.
+
+resume(QPid, ChPid) -> delegate:invoke_no_result(QPid, {gen_server2, cast,
+ [{resume, ChPid}]}).
+
+internal_delete1(QueueName, OnlyDurable) ->
+ internal_delete1(QueueName, OnlyDurable, normal).
+
+internal_delete1(QueueName, OnlyDurable, Reason) ->
+ ok = mnesia:delete({rabbit_queue, QueueName}),
+ case Reason of
+ auto_delete ->
+ case mnesia:wread({rabbit_durable_queue, QueueName}) of
+ [] -> ok;
+ [_] -> ok = mnesia:delete({rabbit_durable_queue, QueueName})
+ end;
+ _ ->
+ mnesia:delete({rabbit_durable_queue, QueueName})
+ end,
+ %% we want to execute some things, as decided by rabbit_exchange,
+ %% after the transaction.
+ rabbit_binding:remove_for_destination(QueueName, OnlyDurable).
+
+-spec internal_delete(name(), rabbit_types:username()) -> 'ok'.
+
+internal_delete(QueueName, ActingUser) ->
+ internal_delete(QueueName, ActingUser, normal).
+
+internal_delete(QueueName, ActingUser, Reason) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case {mnesia:wread({rabbit_queue, QueueName}),
+ mnesia:wread({rabbit_durable_queue, QueueName})} of
+ {[], []} ->
+ rabbit_misc:const(ok);
+ _ ->
+ Deletions = internal_delete1(QueueName, false, Reason),
+ T = rabbit_binding:process_deletions(Deletions,
+ ?INTERNAL_USER),
+ fun() ->
+ ok = T(),
+ rabbit_core_metrics:queue_deleted(QueueName),
+ ok = rabbit_event:notify(queue_deleted,
+ [{name, QueueName},
+ {user_who_performed_action, ActingUser}])
+ end
+ end
+ end).
+
+-spec forget_all_durable(node()) -> 'ok'.
+
+forget_all_durable(Node) ->
+ %% Note rabbit is not running so we avoid e.g. the worker pool. Also why
+ %% we don't invoke the return from rabbit_binding:process_deletions/1.
+ {atomic, ok} =
+ mnesia:sync_transaction(
+ fun () ->
+ Qs = mnesia:match_object(rabbit_durable_queue,
+ amqqueue:pattern_match_all(), write),
+ [forget_node_for_queue(Node, Q) ||
+ Q <- Qs,
+ is_local_to_node(amqqueue:get_pid(Q), Node)],
+ ok
+ end),
+ ok.
+
+%% Try to promote a mirror while down - it should recover as a
+%% master. We try to take the oldest mirror here for best chance of
+%% recovery.
+forget_node_for_queue(_DeadNode, Q)
+ when ?amqqueue_is_quorum(Q) ->
+ ok;
+forget_node_for_queue(DeadNode, Q) ->
+ RS = amqqueue:get_recoverable_slaves(Q),
+ forget_node_for_queue(DeadNode, RS, Q).
+
+forget_node_for_queue(_DeadNode, [], Q) ->
+ %% No mirrors to recover from, queue is gone.
+ %% Don't process_deletions since that just calls callbacks and we
+ %% are not really up.
+ Name = amqqueue:get_name(Q),
+ internal_delete1(Name, true);
+
+%% Should not happen, but let's be conservative.
+forget_node_for_queue(DeadNode, [DeadNode | T], Q) ->
+ forget_node_for_queue(DeadNode, T, Q);
+
+forget_node_for_queue(DeadNode, [H|T], Q) when ?is_amqqueue(Q) ->
+ Type = amqqueue:get_type(Q),
+ case {node_permits_offline_promotion(H), Type} of
+ {false, _} -> forget_node_for_queue(DeadNode, T, Q);
+ {true, rabbit_classic_queue} ->
+ Q1 = amqqueue:set_pid(Q, rabbit_misc:node_to_fake_pid(H)),
+ ok = mnesia:write(rabbit_durable_queue, Q1, write);
+ {true, rabbit_quorum_queue} ->
+ ok
+ end.
+
+node_permits_offline_promotion(Node) ->
+ case node() of
+ Node -> not rabbit:is_running(); %% [1]
+ _ -> All = rabbit_mnesia:cluster_nodes(all),
+ Running = rabbit_nodes:all_running(),
+ lists:member(Node, All) andalso
+ not lists:member(Node, Running) %% [2]
+ end.
+%% [1] In this case if we are a real running node (i.e. rabbitmqctl
+%% has RPCed into us) then we cannot allow promotion. If on the other
+%% hand we *are* rabbitmqctl impersonating the node for offline
+%% node-forgetting then we can.
+%%
+%% [2] This is simpler; as long as it's down that's OK
+
+-spec run_backing_queue
+ (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) ->
+ 'ok'.
+
+run_backing_queue(QPid, Mod, Fun) ->
+ gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
+
+-spec set_ram_duration_target(pid(), number() | 'infinity') -> 'ok'.
+
+set_ram_duration_target(QPid, Duration) ->
+ gen_server2:cast(QPid, {set_ram_duration_target, Duration}).
+
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+
+set_maximum_since_use(QPid, Age) ->
+ gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+
+-spec update_mirroring(pid()) -> 'ok'.
+
+update_mirroring(QPid) ->
+ ok = delegate:invoke_no_result(QPid, {gen_server2, cast, [update_mirroring]}).
+
+-spec sync_mirrors(amqqueue:amqqueue() | pid()) ->
+ 'ok' | rabbit_types:error('not_mirrored').
+
+sync_mirrors(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]});
+sync_mirrors(QPid) ->
+ delegate:invoke(QPid, {gen_server2, call, [sync_mirrors, infinity]}).
+
+-spec cancel_sync_mirrors(amqqueue:amqqueue() | pid()) ->
+ 'ok' | {'ok', 'not_syncing'}.
+
+cancel_sync_mirrors(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]});
+cancel_sync_mirrors(QPid) ->
+ delegate:invoke(QPid, {gen_server2, call, [cancel_sync_mirrors, infinity]}).
+
+-spec is_replicated(amqqueue:amqqueue()) -> boolean().
+
+is_replicated(Q) when ?amqqueue_is_quorum(Q) ->
+ true;
+is_replicated(Q) ->
+ rabbit_mirror_queue_misc:is_mirrored(Q).
+
+is_exclusive(Q) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ false;
+is_exclusive(Q) when ?amqqueue_exclusive_owner_is_pid(Q) ->
+ true.
+
+is_not_exclusive(Q) ->
+ not is_exclusive(Q).
+
+is_dead_exclusive(Q) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ false;
+is_dead_exclusive(Q) when ?amqqueue_exclusive_owner_is_pid(Q) ->
+ Pid = amqqueue:get_pid(Q),
+ not rabbit_mnesia:is_process_alive(Pid).
+
+-spec has_synchronised_mirrors_online(amqqueue:amqqueue()) -> boolean().
+has_synchronised_mirrors_online(Q) ->
+ %% a queue with all mirrors down would have no mirror pids.
+ %% We treat these as in sync intentionally to avoid false positives.
+ MirrorPids = amqqueue:get_sync_slave_pids(Q),
+ MirrorPids =/= [] andalso lists:any(fun rabbit_misc:is_process_alive/1, MirrorPids).
+
+-spec on_node_up(node()) -> 'ok'.
+
+on_node_up(Node) ->
+ ok = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ Qs = mnesia:match_object(rabbit_queue,
+ amqqueue:pattern_match_all(), write),
+ [maybe_clear_recoverable_node(Node, Q) || Q <- Qs],
+ ok
+ end).
+
+maybe_clear_recoverable_node(Node, Q) ->
+ SPids = amqqueue:get_sync_slave_pids(Q),
+ RSs = amqqueue:get_recoverable_slaves(Q),
+ case lists:member(Node, RSs) of
+ true ->
+ %% There is a race with
+ %% rabbit_mirror_queue_slave:record_synchronised/1 called
+ %% by the incoming mirror node and this function, called
+ %% by the master node. If this function is executed after
+ %% record_synchronised/1, the node is erroneously removed
+ %% from the recoverable mirrors list.
+ %%
+ %% We check if the mirror node's queue PID is alive. If it is
+ %% the case, then this function is executed after. In this
+ %% situation, we don't touch the queue record, it is already
+ %% correct.
+ DoClearNode =
+ case [SP || SP <- SPids, node(SP) =:= Node] of
+ [SPid] -> not rabbit_misc:is_process_alive(SPid);
+ _ -> true
+ end,
+ if
+ DoClearNode -> RSs1 = RSs -- [Node],
+ store_queue(
+ amqqueue:set_recoverable_slaves(Q, RSs1));
+ true -> ok
+ end;
+ false ->
+ ok
+ end.
+
+-spec on_node_down(node()) -> 'ok'.
+
+on_node_down(Node) ->
+ {QueueNames, QueueDeletions} = delete_queues_on_node_down(Node),
+ notify_queue_binding_deletions(QueueDeletions),
+ rabbit_core_metrics:queues_deleted(QueueNames),
+ notify_queues_deleted(QueueNames),
+ ok.
+
+delete_queues_on_node_down(Node) ->
+ lists:unzip(lists:flatten([
+ rabbit_misc:execute_mnesia_transaction(
+ fun () -> [{Queue, delete_queue(Queue)} || Queue <- Queues] end
+ ) || Queues <- partition_queues(queues_to_delete_when_node_down(Node))
+ ])).
+
+delete_queue(QueueName) ->
+ ok = mnesia:delete({rabbit_queue, QueueName}),
+ rabbit_binding:remove_transient_for_destination(QueueName).
+
+% If there are many queues and we delete them all in a single Mnesia transaction,
+% this can block all other Mnesia operations for a really long time.
+% In situations where a node wants to (re-)join a cluster,
+% Mnesia won't be able to sync on the new node until this operation finishes.
+% As a result, we want to have multiple Mnesia transactions so that other
+% operations can make progress in between these queue delete transactions.
+%
+% 10 queues per Mnesia transaction is an arbitrary number, but it seems to work OK with 50k queues per node.
+partition_queues([Q0,Q1,Q2,Q3,Q4,Q5,Q6,Q7,Q8,Q9 | T]) ->
+ [[Q0,Q1,Q2,Q3,Q4,Q5,Q6,Q7,Q8,Q9] | partition_queues(T)];
+partition_queues(T) ->
+ [T].
+
+queues_to_delete_when_node_down(NodeDown) ->
+ rabbit_misc:execute_mnesia_transaction(fun () ->
+ qlc:e(qlc:q([amqqueue:get_name(Q) ||
+ Q <- mnesia:table(rabbit_queue),
+ amqqueue:qnode(Q) == NodeDown andalso
+ not rabbit_mnesia:is_process_alive(amqqueue:get_pid(Q)) andalso
+ (not rabbit_amqqueue:is_replicated(Q) orelse
+ rabbit_amqqueue:is_dead_exclusive(Q))]
+ ))
+ end).
+
+notify_queue_binding_deletions(QueueDeletions) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun() ->
+ rabbit_binding:process_deletions(
+ lists:foldl(
+ fun rabbit_binding:combine_deletions/2,
+ rabbit_binding:new_deletions(),
+ QueueDeletions
+ ),
+ ?INTERNAL_USER
+ )
+ end
+ ).
+
+notify_queues_deleted(QueueDeletions) ->
+ lists:foreach(
+ fun(Queue) ->
+ ok = rabbit_event:notify(queue_deleted,
+ [{name, Queue},
+ {user, ?INTERNAL_USER}])
+ end,
+ QueueDeletions).
+
+-spec pseudo_queue(name(), pid()) -> amqqueue:amqqueue().
+
+pseudo_queue(QueueName, Pid) ->
+ pseudo_queue(QueueName, Pid, false).
+
+-spec pseudo_queue(name(), pid(), boolean()) -> amqqueue:amqqueue().
+
+pseudo_queue(#resource{kind = queue} = QueueName, Pid, Durable)
+ when is_pid(Pid) andalso
+ is_boolean(Durable) ->
+ amqqueue:new(QueueName,
+ Pid,
+ Durable,
+ false,
+ none, % Owner,
+ [],
+ undefined, % VHost,
+ #{user => undefined}, % ActingUser
+ rabbit_classic_queue % Type
+ ).
+
+-spec immutable(amqqueue:amqqueue()) -> amqqueue:amqqueue().
+
+immutable(Q) -> amqqueue:set_immutable(Q).
+
+-spec deliver([amqqueue:amqqueue()], rabbit_types:delivery()) -> 'ok'.
+
+deliver(Qs, Delivery) ->
+ _ = rabbit_queue_type:deliver(Qs, Delivery, stateless),
+ ok.
+
+get_quorum_nodes(Q) ->
+ case amqqueue:get_type_state(Q) of
+ #{nodes := Nodes} ->
+ Nodes;
+ _ ->
+ []
+ end.
diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl
new file mode 100644
index 0000000000..abad3b5ad4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue_process.erl
@@ -0,0 +1,1849 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_process).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("amqqueue.hrl").
+
+-behaviour(gen_server2).
+
+-define(SYNC_INTERVAL, 200). %% milliseconds
+-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
+-define(CONSUMER_BIAS_RATIO, 2.0). %% i.e. consume 100% faster
+
+-export([info_keys/0]).
+
+-export([init_with_backing_queue_state/7]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+-export([format/1]).
+-export([is_policy_applicable/2]).
+
+%% Queue's state
+-record(q, {
+ %% an #amqqueue record
+ q :: amqqueue:amqqueue(),
+ %% none | {exclusive consumer channel PID, consumer tag} | {single active consumer channel PID, consumer}
+ active_consumer,
+ %% Set to true if a queue has ever had a consumer.
+ %% This is used to determine when to delete auto-delete queues.
+ has_had_consumers,
+ %% backing queue module.
+ %% for mirrored queues, this will be rabbit_mirror_queue_master.
+ %% for non-priority and non-mirrored queues, rabbit_variable_queue.
+ %% see rabbit_backing_queue.
+ backing_queue,
+ %% backing queue state.
+ %% see rabbit_backing_queue, rabbit_variable_queue.
+ backing_queue_state,
+ %% consumers state, see rabbit_queue_consumers
+ consumers,
+ %% queue expiration value
+ expires,
+ %% timer used to periodically sync (flush) queue index
+ sync_timer_ref,
+ %% timer used to update ingress/egress rates and queue RAM duration target
+ rate_timer_ref,
+ %% timer used to clean up this queue due to TTL (on when unused)
+ expiry_timer_ref,
+ %% stats emission timer
+ stats_timer,
+ %% maps message IDs to {channel pid, MsgSeqNo}
+ %% pairs
+ msg_id_to_channel,
+ %% message TTL value
+ ttl,
+ %% timer used to delete expired messages
+ ttl_timer_ref,
+ ttl_timer_expiry,
+ %% Keeps track of channels that publish to this queue.
+ %% When channel process goes down, queues have to perform
+ %% certain cleanup.
+ senders,
+ %% dead letter exchange as a #resource record, if any
+ dlx,
+ dlx_routing_key,
+ %% max length in messages, if configured
+ max_length,
+ %% max length in bytes, if configured
+ max_bytes,
+ %% an action to perform if queue is to be over a limit,
+ %% can be either drop-head (default), reject-publish or reject-publish-dlx
+ overflow,
+ %% when policies change, this version helps queue
+ %% determine what previously scheduled/set up state to ignore,
+ %% e.g. message expiration messages from previously set up timers
+ %% that may or may not be still valid
+ args_policy_version,
+ %% used to discard outdated/superseded policy updates,
+ %% e.g. when policies are applied concurrently. See
+ %% https://github.com/rabbitmq/rabbitmq-server/issues/803 for one
+ %% example.
+ mirroring_policy_version = 0,
+ %% running | flow | idle
+ status,
+ %% true | false
+ single_active_consumer_on
+ }).
+
+%%----------------------------------------------------------------------------
+
+-define(STATISTICS_KEYS,
+ [messages_ready,
+ messages_unacknowledged,
+ messages,
+ reductions,
+ name,
+ policy,
+ operator_policy,
+ effective_policy_definition,
+ exclusive_consumer_pid,
+ exclusive_consumer_tag,
+ single_active_consumer_pid,
+ single_active_consumer_tag,
+ consumers,
+ consumer_utilisation,
+ memory,
+ slave_pids,
+ synchronised_slave_pids,
+ recoverable_slaves,
+ state,
+ garbage_collection
+ ]).
+
+-define(CREATION_EVENT_KEYS,
+ [name,
+ durable,
+ auto_delete,
+ arguments,
+ owner_pid,
+ exclusive,
+ user_who_performed_action
+ ]).
+
+-define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name]]).
+
+%%----------------------------------------------------------------------------
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS ++ rabbit_backing_queue:info_keys().
+statistics_keys() -> ?STATISTICS_KEYS ++ rabbit_backing_queue:info_keys().
+
+%%----------------------------------------------------------------------------
+
+init(Q) ->
+ process_flag(trap_exit, true),
+ ?store_proc_name(amqqueue:get_name(Q)),
+ {ok, init_state(amqqueue:set_pid(Q, self())), hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE},
+ ?MODULE}.
+
+init_state(Q) ->
+ SingleActiveConsumerOn = case rabbit_misc:table_lookup(amqqueue:get_arguments(Q), <<"x-single-active-consumer">>) of
+ {bool, true} -> true;
+ _ -> false
+ end,
+ State = #q{q = Q,
+ active_consumer = none,
+ has_had_consumers = false,
+ consumers = rabbit_queue_consumers:new(),
+ senders = pmon:new(delegate),
+ msg_id_to_channel = #{},
+ status = running,
+ args_policy_version = 0,
+ overflow = 'drop-head',
+ single_active_consumer_on = SingleActiveConsumerOn},
+ rabbit_event:init_stats_timer(State, #q.stats_timer).
+
+init_it(Recover, From, State = #q{q = Q})
+ when ?amqqueue_exclusive_owner_is(Q, none) ->
+ init_it2(Recover, From, State);
+
+%% You used to be able to declare an exclusive durable queue. Sadly we
+%% need to still tidy up after that case, there could be the remnants
+%% of one left over from an upgrade. So that's why we don't enforce
+%% Recover = new here.
+init_it(Recover, From, State = #q{q = Q0}) ->
+ Owner = amqqueue:get_exclusive_owner(Q0),
+ case rabbit_misc:is_process_alive(Owner) of
+ true -> erlang:monitor(process, Owner),
+ init_it2(Recover, From, State);
+ false -> #q{backing_queue = undefined,
+ backing_queue_state = undefined,
+ q = Q} = State,
+ send_reply(From, {owner_died, Q}),
+ BQ = backing_queue_module(Q),
+ {_, Terms} = recovery_status(Recover),
+ BQS = bq_init(BQ, Q, Terms),
+ %% Rely on terminate to delete the queue.
+ log_delete_exclusive(Owner, State),
+ {stop, {shutdown, missing_owner},
+ State#q{backing_queue = BQ, backing_queue_state = BQS}}
+ end.
+
+init_it2(Recover, From, State = #q{q = Q,
+ backing_queue = undefined,
+ backing_queue_state = undefined}) ->
+ {Barrier, TermsOrNew} = recovery_status(Recover),
+ case rabbit_amqqueue:internal_declare(Q, Recover /= new) of
+ {Res, Q1}
+ when ?is_amqqueue(Q1) andalso
+ (Res == created orelse Res == existing) ->
+ case matches(Recover, Q, Q1) of
+ true ->
+ ok = file_handle_cache:register_callback(
+ rabbit_amqqueue, set_maximum_since_use, [self()]),
+ ok = rabbit_memory_monitor:register(
+ self(), {rabbit_amqqueue,
+ set_ram_duration_target, [self()]}),
+ BQ = backing_queue_module(Q1),
+ BQS = bq_init(BQ, Q, TermsOrNew),
+ send_reply(From, {new, Q}),
+ recovery_barrier(Barrier),
+ State1 = process_args_policy(
+ State#q{backing_queue = BQ,
+ backing_queue_state = BQS}),
+ notify_decorators(startup, State),
+ rabbit_event:notify(queue_created,
+ infos(?CREATION_EVENT_KEYS, State1)),
+ rabbit_event:if_enabled(State1, #q.stats_timer,
+ fun() -> emit_stats(State1) end),
+ noreply(State1);
+ false ->
+ {stop, normal, {existing, Q1}, State}
+ end;
+ Err ->
+ {stop, normal, Err, State}
+ end.
+
+recovery_status(new) -> {no_barrier, new};
+recovery_status({Recover, Terms}) -> {Recover, Terms}.
+
+send_reply(none, _Q) -> ok;
+send_reply(From, Q) -> gen_server2:reply(From, Q).
+
+matches(new, Q1, Q2) ->
+ %% i.e. not policy
+ amqqueue:get_name(Q1) =:= amqqueue:get_name(Q2) andalso
+ amqqueue:is_durable(Q1) =:= amqqueue:is_durable(Q2) andalso
+ amqqueue:is_auto_delete(Q1) =:= amqqueue:is_auto_delete(Q2) andalso
+ amqqueue:get_exclusive_owner(Q1) =:= amqqueue:get_exclusive_owner(Q2) andalso
+ amqqueue:get_arguments(Q1) =:= amqqueue:get_arguments(Q2) andalso
+ amqqueue:get_pid(Q1) =:= amqqueue:get_pid(Q2) andalso
+ amqqueue:get_slave_pids(Q1) =:= amqqueue:get_slave_pids(Q2);
+%% FIXME: Should v1 vs. v2 of the same record match?
+matches(_, Q, Q) -> true;
+matches(_, _Q, _Q1) -> false.
+
+recovery_barrier(no_barrier) ->
+ ok;
+recovery_barrier(BarrierPid) ->
+ MRef = erlang:monitor(process, BarrierPid),
+ receive
+ {BarrierPid, go} -> erlang:demonitor(MRef, [flush]);
+ {'DOWN', MRef, process, _, _} -> ok
+ end.
+
+-spec init_with_backing_queue_state
+ (amqqueue:amqqueue(), atom(), tuple(), any(),
+ [rabbit_types:delivery()], pmon:pmon(), map()) ->
+ #q{}.
+
+init_with_backing_queue_state(Q, BQ, BQS,
+ RateTRef, Deliveries, Senders, MTC) ->
+ Owner = amqqueue:get_exclusive_owner(Q),
+ case Owner of
+ none -> ok;
+ _ -> erlang:monitor(process, Owner)
+ end,
+ State = init_state(Q),
+ State1 = State#q{backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = RateTRef,
+ senders = Senders,
+ msg_id_to_channel = MTC},
+ State2 = process_args_policy(State1),
+ State3 = lists:foldl(fun (Delivery, StateN) ->
+ maybe_deliver_or_enqueue(Delivery, true, StateN)
+ end, State2, Deliveries),
+ notify_decorators(startup, State3),
+ State3.
+
+terminate(shutdown = R, State = #q{backing_queue = BQ, q = Q0}) ->
+ QName = amqqueue:get_name(Q0),
+ rabbit_core_metrics:queue_deleted(qname(State)),
+ terminate_shutdown(
+ fun (BQS) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [Q] = mnesia:read({rabbit_queue, QName}),
+ Q2 = amqqueue:set_state(Q, stopped),
+ %% amqqueue migration:
+ %% The amqqueue was read from this transaction, no need
+ %% to handle migration.
+ rabbit_amqqueue:store_queue(Q2)
+ end),
+ BQ:terminate(R, BQS)
+ end, State);
+terminate({shutdown, missing_owner} = Reason, State) ->
+ %% if the owner was missing then there will be no queue, so don't emit stats
+ terminate_shutdown(terminate_delete(false, Reason, State), State);
+terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
+ rabbit_core_metrics:queue_deleted(qname(State)),
+ terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+terminate(normal, State = #q{status = {terminated_by, auto_delete}}) ->
+ %% auto_delete case
+ %% To increase performance we want to avoid a mnesia_sync:sync call
+ %% after every transaction, as we could be deleting simultaneously
+ %% thousands of queues. A optimisation introduced by server#1513
+ %% needs to be reverted by this case, avoiding to guard the delete
+ %% operation on `rabbit_durable_queue`
+ terminate_shutdown(terminate_delete(true, auto_delete, State), State);
+terminate(normal, State) -> %% delete case
+ terminate_shutdown(terminate_delete(true, normal, State), State);
+%% If we crashed don't try to clean up the BQS, probably best to leave it.
+terminate(_Reason, State = #q{q = Q}) ->
+ terminate_shutdown(fun (BQS) ->
+ Q2 = amqqueue:set_state(Q, crashed),
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ rabbit_amqqueue:store_queue(Q2),
+ begin
+ Q3 = amqqueue:upgrade(Q2),
+ rabbit_amqqueue:store_queue(Q3)
+ end)
+ end),
+ BQS
+ end, State).
+
+terminate_delete(EmitStats, Reason0,
+ State = #q{q = Q,
+ backing_queue = BQ,
+ status = Status}) ->
+ QName = amqqueue:get_name(Q),
+ ActingUser = terminated_by(Status),
+ fun (BQS) ->
+ Reason = case Reason0 of
+ auto_delete -> normal;
+ Any -> Any
+ end,
+ BQS1 = BQ:delete_and_terminate(Reason, BQS),
+ if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer,
+ fun() -> emit_stats(State) end);
+ true -> ok
+ end,
+ %% This try-catch block transforms throws to errors since throws are not
+ %% logged.
+ try
+ %% don't care if the internal delete doesn't return 'ok'.
+ rabbit_amqqueue:internal_delete(QName, ActingUser, Reason0)
+ catch
+ {error, ReasonE} -> error(ReasonE)
+ end,
+ BQS1
+ end.
+
+terminated_by({terminated_by, auto_delete}) ->
+ ?INTERNAL_USER;
+terminated_by({terminated_by, ActingUser}) ->
+ ActingUser;
+terminated_by(_) ->
+ ?INTERNAL_USER.
+
+terminate_shutdown(Fun, #q{status = Status} = State) ->
+ ActingUser = terminated_by(Status),
+ State1 = #q{backing_queue_state = BQS, consumers = Consumers} =
+ lists:foldl(fun (F, S) -> F(S) end, State,
+ [fun stop_sync_timer/1,
+ fun stop_rate_timer/1,
+ fun stop_expiry_timer/1,
+ fun stop_ttl_timer/1]),
+ case BQS of
+ undefined -> State1;
+ _ -> ok = rabbit_memory_monitor:deregister(self()),
+ QName = qname(State),
+ notify_decorators(shutdown, State),
+ [emit_consumer_deleted(Ch, CTag, QName, ActingUser) ||
+ {Ch, CTag, _, _, _, _, _, _} <-
+ rabbit_queue_consumers:all(Consumers)],
+ State1#q{backing_queue_state = Fun(BQS)}
+ end.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+maybe_notify_decorators(false, State) -> State;
+maybe_notify_decorators(true, State) -> notify_decorators(State), State.
+
+notify_decorators(Event, State) -> decorator_callback(qname(State), Event, []).
+
+notify_decorators(State = #q{consumers = Consumers,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ P = rabbit_queue_consumers:max_active_priority(Consumers),
+ decorator_callback(qname(State), consumer_state_changed,
+ [P, BQ:is_empty(BQS)]).
+
+decorator_callback(QName, F, A) ->
+ %% Look up again in case policy and hence decorators have changed
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ Ds = amqqueue:get_decorators(Q),
+ [ok = apply(M, F, [Q|A]) || M <- rabbit_queue_decorator:select(Ds)];
+ {error, not_found} ->
+ ok
+ end.
+
+bq_init(BQ, Q, Recover) ->
+ Self = self(),
+ BQ:init(Q, Recover,
+ fun (Mod, Fun) ->
+ rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
+ end).
+
+process_args_policy(State = #q{q = Q,
+ args_policy_version = N}) ->
+ ArgsTable =
+ [{<<"expires">>, fun res_min/2, fun init_exp/2},
+ {<<"dead-letter-exchange">>, fun res_arg/2, fun init_dlx/2},
+ {<<"dead-letter-routing-key">>, fun res_arg/2, fun init_dlx_rkey/2},
+ {<<"message-ttl">>, fun res_min/2, fun init_ttl/2},
+ {<<"max-length">>, fun res_min/2, fun init_max_length/2},
+ {<<"max-length-bytes">>, fun res_min/2, fun init_max_bytes/2},
+ {<<"overflow">>, fun res_arg/2, fun init_overflow/2},
+ {<<"queue-mode">>, fun res_arg/2, fun init_queue_mode/2}],
+ drop_expired_msgs(
+ lists:foldl(fun({Name, Resolve, Fun}, StateN) ->
+ Fun(rabbit_queue_type_util:args_policy_lookup(Name, Resolve, Q), StateN)
+ end, State#q{args_policy_version = N + 1}, ArgsTable)).
+
+res_arg(_PolVal, ArgVal) -> ArgVal.
+res_min(PolVal, ArgVal) -> erlang:min(PolVal, ArgVal).
+
+%% In both these we init with the undefined variant first to stop any
+%% existing timer, then start a new one which may fire after a
+%% different time.
+init_exp(undefined, State) -> stop_expiry_timer(State#q{expires = undefined});
+init_exp(Expires, State) -> State1 = init_exp(undefined, State),
+ ensure_expiry_timer(State1#q{expires = Expires}).
+
+init_ttl(undefined, State) -> stop_ttl_timer(State#q{ttl = undefined});
+init_ttl(TTL, State) -> (init_ttl(undefined, State))#q{ttl = TTL}.
+
+init_dlx(undefined, State) ->
+ State#q{dlx = undefined};
+init_dlx(DLX, State = #q{q = Q}) ->
+ QName = amqqueue:get_name(Q),
+ State#q{dlx = rabbit_misc:r(QName, exchange, DLX)}.
+
+init_dlx_rkey(RoutingKey, State) -> State#q{dlx_routing_key = RoutingKey}.
+
+init_max_length(MaxLen, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{max_length = MaxLen}),
+ State1.
+
+init_max_bytes(MaxBytes, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{max_bytes = MaxBytes}),
+ State1.
+
+%% Reset overflow to default 'drop-head' value if it's undefined.
+init_overflow(undefined, #q{overflow = 'drop-head'} = State) ->
+ State;
+init_overflow(undefined, State) ->
+ {_Dropped, State1} = maybe_drop_head(State#q{overflow = 'drop-head'}),
+ State1;
+init_overflow(Overflow, State) ->
+ OverflowVal = binary_to_existing_atom(Overflow, utf8),
+ case OverflowVal of
+ 'drop-head' ->
+ {_Dropped, State1} = maybe_drop_head(State#q{overflow = OverflowVal}),
+ State1;
+ _ ->
+ State#q{overflow = OverflowVal}
+ end.
+
+init_queue_mode(undefined, State) ->
+ State;
+init_queue_mode(Mode, State = #q {backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQS1 = BQ:set_queue_mode(binary_to_existing_atom(Mode, utf8), BQS),
+ State#q{backing_queue_state = BQS1}.
+
+reply(Reply, NewState) ->
+ {NewState1, Timeout} = next_state(NewState),
+ {reply, Reply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
+
+noreply(NewState) ->
+ {NewState1, Timeout} = next_state(NewState),
+ {noreply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
+
+next_state(State = #q{q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ assert_invariant(State),
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ MTC1 = confirm_messages(MsgIds, MTC, amqqueue:get_name(Q)),
+ State1 = State#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1},
+ case BQ:needs_timeout(BQS1) of
+ false -> {stop_sync_timer(State1), hibernate };
+ idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
+ timed -> {ensure_sync_timer(State1), 0 }
+ end.
+
+backing_queue_module(Q) ->
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> {ok, BQM} = application:get_env(backing_queue_module),
+ BQM;
+ true -> rabbit_mirror_queue_master
+ end.
+
+ensure_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #q.sync_timer_ref,
+ ?SYNC_INTERVAL, sync_timeout).
+
+stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #q.sync_timer_ref).
+
+ensure_rate_timer(State) ->
+ rabbit_misc:ensure_timer(State, #q.rate_timer_ref,
+ ?RAM_DURATION_UPDATE_INTERVAL,
+ update_ram_duration).
+
+stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #q.rate_timer_ref).
+
+%% We wish to expire only when there are no consumers *and* the expiry
+%% hasn't been refreshed (by queue.declare or basic.get) for the
+%% configured period.
+ensure_expiry_timer(State = #q{expires = undefined}) ->
+ State;
+ensure_expiry_timer(State = #q{expires = Expires,
+ args_policy_version = Version}) ->
+ case is_unused(State) of
+ true -> NewState = stop_expiry_timer(State),
+ rabbit_misc:ensure_timer(NewState, #q.expiry_timer_ref,
+ Expires, {maybe_expire, Version});
+ false -> State
+ end.
+
+stop_expiry_timer(State) -> rabbit_misc:stop_timer(State, #q.expiry_timer_ref).
+
+ensure_ttl_timer(undefined, State) ->
+ State;
+ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = undefined,
+ args_policy_version = Version}) ->
+ After = (case Expiry - os:system_time(micro_seconds) of
+ V when V > 0 -> V + 999; %% always fire later
+ _ -> 0
+ end) div 1000,
+ TRef = rabbit_misc:send_after(After, self(), {drop_expired, Version}),
+ State#q{ttl_timer_ref = TRef, ttl_timer_expiry = Expiry};
+ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = TRef,
+ ttl_timer_expiry = TExpiry})
+ when Expiry + 1000 < TExpiry ->
+ rabbit_misc:cancel_timer(TRef),
+ ensure_ttl_timer(Expiry, State#q{ttl_timer_ref = undefined});
+ensure_ttl_timer(_Expiry, State) ->
+ State.
+
+stop_ttl_timer(State) -> rabbit_misc:stop_timer(State, #q.ttl_timer_ref).
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #q.stats_timer, emit_stats).
+
+assert_invariant(#q{single_active_consumer_on = true}) ->
+ %% queue may contain messages and have available consumers with exclusive consumer
+ ok;
+assert_invariant(State = #q{consumers = Consumers, single_active_consumer_on = false}) ->
+ true = (rabbit_queue_consumers:inactive(Consumers) orelse is_empty(State)).
+
+is_empty(#q{backing_queue = BQ, backing_queue_state = BQS}) -> BQ:is_empty(BQS).
+
+maybe_send_drained(WasEmpty, State) ->
+ case (not WasEmpty) andalso is_empty(State) of
+ true -> notify_decorators(State),
+ rabbit_queue_consumers:send_drained();
+ false -> ok
+ end,
+ State.
+
+confirm_messages([], MTC, _QName) ->
+ MTC;
+confirm_messages(MsgIds, MTC, QName) ->
+ {CMs, MTC1} =
+ lists:foldl(
+ fun(MsgId, {CMs, MTC0}) ->
+ case maps:get(MsgId, MTC0, none) of
+ none ->
+ {CMs, MTC0};
+ {SenderPid, MsgSeqNo} ->
+ {maps:update_with(SenderPid,
+ fun(MsgSeqNos) ->
+ [MsgSeqNo | MsgSeqNos]
+ end,
+ [MsgSeqNo],
+ CMs),
+ maps:remove(MsgId, MTC0)}
+
+ end
+ end, {#{}, MTC}, MsgIds),
+ maps:fold(
+ fun(Pid, MsgSeqNos, _) ->
+ confirm_to_sender(Pid, QName, MsgSeqNos)
+ end,
+ ok,
+ CMs),
+ MTC1.
+
+send_or_record_confirm(#delivery{confirm = false}, State) ->
+ {never, State};
+send_or_record_confirm(#delivery{confirm = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message {
+ is_persistent = true,
+ id = MsgId}},
+ State = #q{q = Q,
+ msg_id_to_channel = MTC})
+ when ?amqqueue_is_durable(Q) ->
+ MTC1 = maps:put(MsgId, {SenderPid, MsgSeqNo}, MTC),
+ {eventually, State#q{msg_id_to_channel = MTC1}};
+send_or_record_confirm(#delivery{confirm = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo},
+ #q{q = Q} = State) ->
+ confirm_to_sender(SenderPid, amqqueue:get_name(Q), [MsgSeqNo]),
+ {immediately, State}.
+
+%% This feature was used by `rabbit_amqqueue_process` and
+%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is
+%% unused in 3.8.x and thus deprecated. We keep it to support in-place
+%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op
+%% starting with that version.
+send_mandatory(#delivery{mandatory = false}) ->
+ ok;
+send_mandatory(#delivery{mandatory = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}) ->
+ gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
+
+discard(#delivery{confirm = Confirm,
+ sender = SenderPid,
+ flow = Flow,
+ message = #basic_message{id = MsgId}}, BQ, BQS, MTC, QName) ->
+ MTC1 = case Confirm of
+ true -> confirm_messages([MsgId], MTC, QName);
+ false -> MTC
+ end,
+ BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
+ {BQS1, MTC1}.
+
+run_message_queue(State) -> run_message_queue(false, State).
+
+run_message_queue(ActiveConsumersChanged, State) ->
+ case is_empty(State) of
+ true -> maybe_notify_decorators(ActiveConsumersChanged, State);
+ false -> case rabbit_queue_consumers:deliver(
+ fun(AckRequired) -> fetch(AckRequired, State) end,
+ qname(State), State#q.consumers,
+ State#q.single_active_consumer_on, State#q.active_consumer) of
+ {delivered, ActiveConsumersChanged1, State1, Consumers} ->
+ run_message_queue(
+ ActiveConsumersChanged or ActiveConsumersChanged1,
+ State1#q{consumers = Consumers});
+ {undelivered, ActiveConsumersChanged1, Consumers} ->
+ maybe_notify_decorators(
+ ActiveConsumersChanged or ActiveConsumersChanged1,
+ State#q{consumers = Consumers})
+ end
+ end.
+
+attempt_delivery(Delivery = #delivery{sender = SenderPid,
+ flow = Flow,
+ message = Message},
+ Props, Delivered, State = #q{q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ case rabbit_queue_consumers:deliver(
+ fun (true) -> true = BQ:is_empty(BQS),
+ {AckTag, BQS1} =
+ BQ:publish_delivered(
+ Message, Props, SenderPid, Flow, BQS),
+ {{Message, Delivered, AckTag}, {BQS1, MTC}};
+ (false) -> {{Message, Delivered, undefined},
+ discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q))}
+ end, qname(State), State#q.consumers, State#q.single_active_consumer_on, State#q.active_consumer) of
+ {delivered, ActiveConsumersChanged, {BQS1, MTC1}, Consumers} ->
+ {delivered, maybe_notify_decorators(
+ ActiveConsumersChanged,
+ State#q{backing_queue_state = BQS1,
+ msg_id_to_channel = MTC1,
+ consumers = Consumers})};
+ {undelivered, ActiveConsumersChanged, Consumers} ->
+ {undelivered, maybe_notify_decorators(
+ ActiveConsumersChanged,
+ State#q{consumers = Consumers})}
+ end.
+
+maybe_deliver_or_enqueue(Delivery = #delivery{message = Message},
+ Delivered,
+ State = #q{overflow = Overflow,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ dlx = DLX,
+ dlx_routing_key = RK}) ->
+ send_mandatory(Delivery), %% must do this before confirms
+ case {will_overflow(Delivery, State), Overflow} of
+ {true, 'reject-publish'} ->
+ %% Drop publish and nack to publisher
+ send_reject_publish(Delivery, Delivered, State);
+ {true, 'reject-publish-dlx'} ->
+ %% Publish to DLX
+ with_dlx(
+ DLX,
+ fun (X) ->
+ QName = qname(State),
+ rabbit_dead_letter:publish(Message, maxlen, X, RK, QName)
+ end,
+ fun () -> ok end),
+ %% Drop publish and nack to publisher
+ send_reject_publish(Delivery, Delivered, State);
+ _ ->
+ {IsDuplicate, BQS1} = BQ:is_duplicate(Message, BQS),
+ State1 = State#q{backing_queue_state = BQS1},
+ case IsDuplicate of
+ true -> State1;
+ {true, drop} -> State1;
+ %% Drop publish and nack to publisher
+ {true, reject} ->
+ send_reject_publish(Delivery, Delivered, State1);
+ %% Enqueue and maybe drop head later
+ false ->
+ deliver_or_enqueue(Delivery, Delivered, State1)
+ end
+ end.
+
+deliver_or_enqueue(Delivery = #delivery{message = Message,
+ sender = SenderPid,
+ flow = Flow},
+ Delivered,
+ State = #q{q = Q, backing_queue = BQ}) ->
+ {Confirm, State1} = send_or_record_confirm(Delivery, State),
+ Props = message_properties(Message, Confirm, State1),
+ case attempt_delivery(Delivery, Props, Delivered, State1) of
+ {delivered, State2} ->
+ State2;
+ %% The next one is an optimisation
+ {undelivered, State2 = #q{ttl = 0, dlx = undefined,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}} ->
+ {BQS1, MTC1} = discard(Delivery, BQ, BQS, MTC, amqqueue:get_name(Q)),
+ State2#q{backing_queue_state = BQS1, msg_id_to_channel = MTC1};
+ {undelivered, State2 = #q{backing_queue_state = BQS}} ->
+
+ BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, Flow, BQS),
+ {Dropped, State3 = #q{backing_queue_state = BQS2}} =
+ maybe_drop_head(State2#q{backing_queue_state = BQS1}),
+ QLen = BQ:len(BQS2),
+ %% optimisation: it would be perfectly safe to always
+ %% invoke drop_expired_msgs here, but that is expensive so
+ %% we only do that if a new message that might have an
+ %% expiry ends up at the head of the queue. If the head
+ %% remains unchanged, or if the newly published message
+ %% has no expiry and becomes the head of the queue then
+ %% the call is unnecessary.
+ case {Dropped, QLen =:= 1, Props#message_properties.expiry} of
+ {false, false, _} -> State3;
+ {true, true, undefined} -> State3;
+ {_, _, _} -> drop_expired_msgs(State3)
+ end
+ end.
+
+maybe_drop_head(State = #q{max_length = undefined,
+ max_bytes = undefined}) ->
+ {false, State};
+maybe_drop_head(State = #q{overflow = 'reject-publish'}) ->
+ {false, State};
+maybe_drop_head(State = #q{overflow = 'reject-publish-dlx'}) ->
+ {false, State};
+maybe_drop_head(State = #q{overflow = 'drop-head'}) ->
+ maybe_drop_head(false, State).
+
+maybe_drop_head(AlreadyDropped, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ case over_max_length(State) of
+ true ->
+ maybe_drop_head(true,
+ with_dlx(
+ State#q.dlx,
+ fun (X) -> dead_letter_maxlen_msg(X, State) end,
+ fun () ->
+ {_, BQS1} = BQ:drop(false, BQS),
+ State#q{backing_queue_state = BQS1}
+ end));
+ false ->
+ {AlreadyDropped, State}
+ end.
+
+send_reject_publish(#delivery{confirm = true,
+ sender = SenderPid,
+ flow = Flow,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message{id = MsgId}},
+ _Delivered,
+ State = #q{ q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_to_channel = MTC}) ->
+ ok = rabbit_classic_queue:send_rejection(SenderPid,
+ amqqueue:get_name(Q), MsgSeqNo),
+
+ MTC1 = maps:remove(MsgId, MTC),
+ BQS1 = BQ:discard(MsgId, SenderPid, Flow, BQS),
+ State#q{ backing_queue_state = BQS1, msg_id_to_channel = MTC1 };
+send_reject_publish(#delivery{confirm = false},
+ _Delivered, State) ->
+ State.
+
+will_overflow(_, #q{max_length = undefined,
+ max_bytes = undefined}) -> false;
+will_overflow(#delivery{message = Message},
+ #q{max_length = MaxLen,
+ max_bytes = MaxBytes,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ ExpectedQueueLength = BQ:len(BQS) + 1,
+
+ #basic_message{content = #content{payload_fragments_rev = PFR}} = Message,
+ MessageSize = iolist_size(PFR),
+ ExpectedQueueSizeBytes = BQ:info(message_bytes_ready, BQS) + MessageSize,
+
+ ExpectedQueueLength > MaxLen orelse ExpectedQueueSizeBytes > MaxBytes.
+
+over_max_length(#q{max_length = MaxLen,
+ max_bytes = MaxBytes,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQ:len(BQS) > MaxLen orelse BQ:info(message_bytes_ready, BQS) > MaxBytes.
+
+requeue_and_run(AckTags, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ WasEmpty = BQ:is_empty(BQS),
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ {_Dropped, State1} = maybe_drop_head(State#q{backing_queue_state = BQS1}),
+ run_message_queue(maybe_send_drained(WasEmpty, drop_expired_msgs(State1))).
+
+fetch(AckRequired, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {Result, BQS1} = BQ:fetch(AckRequired, BQS),
+ State1 = drop_expired_msgs(State#q{backing_queue_state = BQS1}),
+ {Result, maybe_send_drained(Result =:= empty, State1)}.
+
+ack(AckTags, ChPid, State) ->
+ subtract_acks(ChPid, AckTags, State,
+ fun (State1 = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {_Guids, BQS1} = BQ:ack(AckTags, BQS),
+ State1#q{backing_queue_state = BQS1}
+ end).
+
+requeue(AckTags, ChPid, State) ->
+ subtract_acks(ChPid, AckTags, State,
+ fun (State1) -> requeue_and_run(AckTags, State1) end).
+
+possibly_unblock(Update, ChPid, State = #q{consumers = Consumers}) ->
+ case rabbit_queue_consumers:possibly_unblock(Update, ChPid, Consumers) of
+ unchanged -> State;
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, State1)
+ end.
+
+should_auto_delete(#q{q = Q})
+ when not ?amqqueue_is_auto_delete(Q) -> false;
+should_auto_delete(#q{has_had_consumers = false}) -> false;
+should_auto_delete(State) -> is_unused(State).
+
+handle_ch_down(DownPid, State = #q{consumers = Consumers,
+ active_consumer = Holder,
+ single_active_consumer_on = SingleActiveConsumerOn,
+ senders = Senders}) ->
+ State1 = State#q{senders = case pmon:is_monitored(DownPid, Senders) of
+ false ->
+ Senders;
+ true ->
+ %% A rabbit_channel process died. Here credit_flow will take care
+ %% of cleaning up the rabbit_amqqueue_process process dictionary
+ %% with regards to the credit we were tracking for the channel
+ %% process. See handle_cast({deliver, Deliver}, State) in this
+ %% module. In that cast function we process deliveries from the
+ %% channel, which means we credit_flow:ack/1 said
+ %% messages. credit_flow:ack'ing messages means we are increasing
+ %% a counter to know when we need to send MoreCreditAfter. Since
+ %% the process died, the credit_flow flow module will clean up
+ %% that for us.
+ credit_flow:peer_down(DownPid),
+ pmon:demonitor(DownPid, Senders)
+ end},
+ case rabbit_queue_consumers:erase_ch(DownPid, Consumers) of
+ not_found ->
+ {ok, State1};
+ {ChAckTags, ChCTags, Consumers1} ->
+ QName = qname(State1),
+ [emit_consumer_deleted(DownPid, CTag, QName, ?INTERNAL_USER) || CTag <- ChCTags],
+ Holder1 = new_single_active_consumer_after_channel_down(DownPid, Holder, SingleActiveConsumerOn, Consumers1),
+ State2 = State1#q{consumers = Consumers1,
+ active_consumer = Holder1},
+ maybe_notify_consumer_updated(State2, Holder, Holder1),
+ notify_decorators(State2),
+ case should_auto_delete(State2) of
+ true ->
+ log_auto_delete(
+ io_lib:format(
+ "because all of its consumers (~p) were on a channel that was closed",
+ [length(ChCTags)]),
+ State),
+ {stop, State2};
+ false -> {ok, requeue_and_run(ChAckTags,
+ ensure_expiry_timer(State2))}
+ end
+ end.
+
+new_single_active_consumer_after_channel_down(DownChPid, CurrentSingleActiveConsumer, _SingleActiveConsumerIsOn = true, Consumers) ->
+ case CurrentSingleActiveConsumer of
+ {DownChPid, _} ->
+ % the single active consumer is on the down channel, we have to replace it
+ case rabbit_queue_consumers:get_consumer(Consumers) of
+ undefined -> none;
+ Consumer -> Consumer
+ end;
+ _ ->
+ CurrentSingleActiveConsumer
+ end;
+new_single_active_consumer_after_channel_down(DownChPid, CurrentSingleActiveConsumer, _SingleActiveConsumerIsOn = false, _Consumers) ->
+ case CurrentSingleActiveConsumer of
+ {DownChPid, _} -> none;
+ Other -> Other
+ end.
+
+check_exclusive_access({_ChPid, _ConsumerTag}, _ExclusiveConsume, _State) ->
+ in_use;
+check_exclusive_access(none, false, _State) ->
+ ok;
+check_exclusive_access(none, true, State) ->
+ case is_unused(State) of
+ true -> ok;
+ false -> in_use
+ end.
+
+is_unused(_State) -> rabbit_queue_consumers:count() == 0.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+qname(#q{q = Q}) -> amqqueue:get_name(Q).
+
+backing_queue_timeout(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ State#q{backing_queue_state = BQ:timeout(BQS)}.
+
+subtract_acks(ChPid, AckTags, State = #q{consumers = Consumers}, Fun) ->
+ case rabbit_queue_consumers:subtract_acks(ChPid, AckTags, Consumers) of
+ not_found -> State;
+ unchanged -> Fun(State);
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, Fun(State1))
+ end.
+
+message_properties(Message = #basic_message{content = Content},
+ Confirm, #q{ttl = TTL}) ->
+ #content{payload_fragments_rev = PFR} = Content,
+ #message_properties{expiry = calculate_msg_expiry(Message, TTL),
+ needs_confirming = Confirm == eventually,
+ size = iolist_size(PFR)}.
+
+calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
+ #content{properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% We assert that the expiration must be valid - we check in the channel.
+ {ok, MsgTTL} = rabbit_basic:parse_expiration(Props),
+ case lists:min([TTL, MsgTTL]) of
+ undefined -> undefined;
+ T -> os:system_time(micro_seconds) + T * 1000
+ end.
+
+%% Logically this function should invoke maybe_send_drained/2.
+%% However, that is expensive. Since some frequent callers of
+%% drop_expired_msgs/1, in particular deliver_or_enqueue/3, cannot
+%% possibly cause the queue to become empty, we push the
+%% responsibility to the callers. So be cautious when adding new ones.
+drop_expired_msgs(State) ->
+ case is_empty(State) of
+ true -> State;
+ false -> drop_expired_msgs(os:system_time(micro_seconds),
+ State)
+ end.
+
+drop_expired_msgs(Now, State = #q{backing_queue_state = BQS,
+ backing_queue = BQ }) ->
+ ExpirePred = fun (#message_properties{expiry = Exp}) -> Now >= Exp end,
+ {Props, State1} =
+ with_dlx(
+ State#q.dlx,
+ fun (X) -> dead_letter_expired_msgs(ExpirePred, X, State) end,
+ fun () -> {Next, BQS1} = BQ:dropwhile(ExpirePred, BQS),
+ {Next, State#q{backing_queue_state = BQS1}} end),
+ ensure_ttl_timer(case Props of
+ undefined -> undefined;
+ #message_properties{expiry = Exp} -> Exp
+ end, State1).
+
+with_dlx(undefined, _With, Without) -> Without();
+with_dlx(DLX, With, Without) -> case rabbit_exchange:lookup(DLX) of
+ {ok, X} -> With(X);
+ {error, not_found} -> Without()
+ end.
+
+dead_letter_expired_msgs(ExpirePred, X, State = #q{backing_queue = BQ}) ->
+ dead_letter_msgs(fun (DLFun, Acc, BQS1) ->
+ BQ:fetchwhile(ExpirePred, DLFun, Acc, BQS1)
+ end, expired, X, State).
+
+dead_letter_rejected_msgs(AckTags, X, State = #q{backing_queue = BQ}) ->
+ {ok, State1} =
+ dead_letter_msgs(
+ fun (DLFun, Acc, BQS) ->
+ {Acc1, BQS1} = BQ:ackfold(DLFun, Acc, BQS, AckTags),
+ {ok, Acc1, BQS1}
+ end, rejected, X, State),
+ State1.
+
+dead_letter_maxlen_msg(X, State = #q{backing_queue = BQ}) ->
+ {ok, State1} =
+ dead_letter_msgs(
+ fun (DLFun, Acc, BQS) ->
+ {{Msg, _, AckTag}, BQS1} = BQ:fetch(true, BQS),
+ {ok, DLFun(Msg, AckTag, Acc), BQS1}
+ end, maxlen, X, State),
+ State1.
+
+dead_letter_msgs(Fun, Reason, X, State = #q{dlx_routing_key = RK,
+ backing_queue_state = BQS,
+ backing_queue = BQ}) ->
+ QName = qname(State),
+ {Res, Acks1, BQS1} =
+ Fun(fun (Msg, AckTag, Acks) ->
+ rabbit_dead_letter:publish(Msg, Reason, X, RK, QName),
+ [AckTag | Acks]
+ end, [], BQS),
+ {_Guids, BQS2} = BQ:ack(Acks1, BQS1),
+ {Res, State#q{backing_queue_state = BQS2}}.
+
+stop(State) -> stop(noreply, State).
+
+stop(noreply, State) -> {stop, normal, State};
+stop(Reply, State) -> {stop, normal, Reply, State}.
+
+infos(Items, #q{q = Q} = State) ->
+ lists:foldr(fun(totals, Acc) ->
+ [{messages_ready, i(messages_ready, State)},
+ {messages, i(messages, State)},
+ {messages_unacknowledged, i(messages_unacknowledged, State)}] ++ Acc;
+ (type_specific, Acc) ->
+ format(Q) ++ Acc;
+ (Item, Acc) ->
+ [{Item, i(Item, State)} | Acc]
+ end, [], Items).
+
+i(name, #q{q = Q}) -> amqqueue:get_name(Q);
+i(durable, #q{q = Q}) -> amqqueue:is_durable(Q);
+i(auto_delete, #q{q = Q}) -> amqqueue:is_auto_delete(Q);
+i(arguments, #q{q = Q}) -> amqqueue:get_arguments(Q);
+i(pid, _) ->
+ self();
+i(owner_pid, #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ '';
+i(owner_pid, #q{q = Q}) ->
+ amqqueue:get_exclusive_owner(Q);
+i(exclusive, #q{q = Q}) ->
+ ExclusiveOwner = amqqueue:get_exclusive_owner(Q),
+ is_pid(ExclusiveOwner);
+i(policy, #q{q = Q}) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(operator_policy, #q{q = Q}) ->
+ case rabbit_policy:name_op(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(effective_policy_definition, #q{q = Q}) ->
+ case rabbit_policy:effective_definition(Q) of
+ undefined -> [];
+ Def -> Def
+ end;
+i(exclusive_consumer_pid, #q{active_consumer = {ChPid, _ConsumerTag}, single_active_consumer_on = false}) ->
+ ChPid;
+i(exclusive_consumer_pid, _) ->
+ '';
+i(exclusive_consumer_tag, #q{active_consumer = {_ChPid, ConsumerTag}, single_active_consumer_on = false}) ->
+ ConsumerTag;
+i(exclusive_consumer_tag, _) ->
+ '';
+i(single_active_consumer_pid, #q{active_consumer = {ChPid, _Consumer}, single_active_consumer_on = true}) ->
+ ChPid;
+i(single_active_consumer_pid, _) ->
+ '';
+i(single_active_consumer_tag, #q{active_consumer = {_ChPid, Consumer}, single_active_consumer_on = true}) ->
+ rabbit_queue_consumers:consumer_tag(Consumer);
+i(single_active_consumer_tag, _) ->
+ '';
+i(messages_ready, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ BQ:len(BQS);
+i(messages_unacknowledged, _) ->
+ rabbit_queue_consumers:unacknowledged_message_count();
+i(messages, State) ->
+ lists:sum([i(Item, State) || Item <- [messages_ready,
+ messages_unacknowledged]]);
+i(consumers, _) ->
+ rabbit_queue_consumers:count();
+i(consumer_utilisation, #q{consumers = Consumers}) ->
+ case rabbit_queue_consumers:count() of
+ 0 -> '';
+ _ -> rabbit_queue_consumers:utilisation(Consumers)
+ end;
+i(memory, _) ->
+ {memory, M} = process_info(self(), memory),
+ M;
+i(slave_pids, #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> amqqueue:get_slave_pids(Q)
+ end;
+i(synchronised_slave_pids, #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> amqqueue:get_sync_slave_pids(Q)
+ end;
+i(recoverable_slaves, #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ Durable = amqqueue:is_durable(Q0),
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ case Durable andalso rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false -> '';
+ true -> amqqueue:get_recoverable_slaves(Q)
+ end;
+i(state, #q{status = running}) -> credit_flow:state();
+i(state, #q{status = State}) -> State;
+i(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(user_who_performed_action, #q{q = Q}) ->
+ Opts = amqqueue:get_options(Q),
+ maps:get(user, Opts, ?UNKNOWN_USER);
+i(type, _) -> classic;
+i(Item, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ BQ:info(Item, BQS).
+
+emit_stats(State) ->
+ emit_stats(State, []).
+
+emit_stats(State, Extra) ->
+ ExtraKs = [K || {K, _} <- Extra],
+ [{messages_ready, MR}, {messages_unacknowledged, MU}, {messages, M},
+ {reductions, R}, {name, Name} | Infos] = All
+ = [{K, V} || {K, V} <- infos(statistics_keys(), State),
+ not lists:member(K, ExtraKs)],
+ rabbit_core_metrics:queue_stats(Name, Extra ++ Infos),
+ rabbit_core_metrics:queue_stats(Name, MR, MU, M, R),
+ rabbit_event:notify(queue_stats, Extra ++ All).
+
+emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName,
+ PrefetchCount, Args, Ref, ActingUser) ->
+ rabbit_event:notify(consumer_created,
+ [{consumer_tag, CTag},
+ {exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {channel, ChPid},
+ {queue, QName},
+ {prefetch_count, PrefetchCount},
+ {arguments, Args},
+ {user_who_performed_action, ActingUser}],
+ Ref).
+
+emit_consumer_deleted(ChPid, ConsumerTag, QName, ActingUser) ->
+ rabbit_core_metrics:consumer_deleted(ChPid, ConsumerTag, QName),
+ rabbit_event:notify(consumer_deleted,
+ [{consumer_tag, ConsumerTag},
+ {channel, ChPid},
+ {queue, QName},
+ {user_who_performed_action, ActingUser}]).
+
+%%----------------------------------------------------------------------------
+
+prioritise_call(Msg, _From, _Len, State) ->
+ case Msg of
+ info -> 9;
+ {info, _Items} -> 9;
+ consumers -> 9;
+ stat -> 7;
+ {basic_consume, _, _, _, _, _, _, _, _, _} -> consumer_bias(State, 0, 2);
+ {basic_cancel, _, _, _} -> consumer_bias(State, 0, 2);
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, State) ->
+ case Msg of
+ delete_immediately -> 8;
+ {delete_exclusive, _Pid} -> 8;
+ {set_ram_duration_target, _Duration} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {run_backing_queue, _Mod, _Fun} -> 6;
+ {ack, _AckTags, _ChPid} -> 4; %% [1]
+ {resume, _ChPid} -> 3;
+ {notify_sent, _ChPid, _Credit} -> consumer_bias(State, 0, 2);
+ _ -> 0
+ end.
+
+%% [1] It should be safe to always prioritise ack / resume since they
+%% will be rate limited by how fast consumers receive messages -
+%% i.e. by notify_sent. We prioritise ack and resume to discourage
+%% starvation caused by prioritising notify_sent. We don't vary their
+%% priority since acks should stay in order (some parts of the queue
+%% stack are optimised for that) and to make things easier to reason
+%% about. Finally, we prioritise ack over resume since it should
+%% always reduce memory use.
+%% bump_reduce_memory_use is prioritised over publishes, because sending
+%% credit to self is hard to reason about. Consumers can continue while
+%% reduce_memory_use is in progress.
+
+consumer_bias(#q{backing_queue = BQ, backing_queue_state = BQS}, Low, High) ->
+ case BQ:msg_rates(BQS) of
+ {0.0, _} -> Low;
+ {Ingress, Egress} when Egress / Ingress < ?CONSUMER_BIAS_RATIO -> High;
+ {_, _} -> Low
+ end.
+
+prioritise_info(Msg, _Len, #q{q = Q}) ->
+ DownPid = amqqueue:get_exclusive_owner(Q),
+ case Msg of
+ {'DOWN', _, process, DownPid, _} -> 8;
+ update_ram_duration -> 8;
+ {maybe_expire, _Version} -> 8;
+ {drop_expired, _Version} -> 8;
+ emit_stats -> 7;
+ sync_timeout -> 6;
+ bump_reduce_memory_use -> 1;
+ _ -> 0
+ end.
+
+handle_call({init, Recover}, From, State) ->
+ try
+ init_it(Recover, From, State)
+ catch
+ {coordinator_not_started, Reason} ->
+ %% The GM can shutdown before the coordinator has started up
+ %% (lost membership or missing group), thus the start_link of
+ %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
+ %% is trapping exists. The master captures this return value and
+ %% throws the current exception.
+ {stop, Reason, State}
+ end;
+
+handle_call(info, _From, State) ->
+ reply({ok, infos(info_keys(), State)}, State);
+
+handle_call({info, Items}, _From, State) ->
+ try
+ reply({ok, infos(Items, State)}, State)
+ catch Error -> reply({error, Error}, State)
+ end;
+
+handle_call(consumers, _From, State = #q{consumers = Consumers, single_active_consumer_on = false}) ->
+ reply(rabbit_queue_consumers:all(Consumers), State);
+handle_call(consumers, _From, State = #q{consumers = Consumers, active_consumer = ActiveConsumer}) ->
+ reply(rabbit_queue_consumers:all(Consumers, ActiveConsumer, true), State);
+
+handle_call({notify_down, ChPid}, _From, State) ->
+ %% we want to do this synchronously, so that auto_deleted queues
+ %% are no longer visible by the time we send a response to the
+ %% client. The queue is ultimately deleted in terminate/2; if we
+ %% return stop with a reply, terminate/2 will be called by
+ %% gen_server2 *before* the reply is sent.
+ case handle_ch_down(ChPid, State) of
+ {ok, State1} -> reply(ok, State1);
+ {stop, State1} -> stop(ok, State1#q{status = {terminated_by, auto_delete}})
+ end;
+
+handle_call({basic_get, ChPid, NoAck, LimiterPid}, _From,
+ State = #q{q = Q}) ->
+ QName = amqqueue:get_name(Q),
+ AckRequired = not NoAck,
+ State1 = ensure_expiry_timer(State),
+ case fetch(AckRequired, State1) of
+ {empty, State2} ->
+ reply(empty, State2);
+ {{Message, IsDelivered, AckTag},
+ #q{backing_queue = BQ, backing_queue_state = BQS} = State2} ->
+ case AckRequired of
+ true -> ok = rabbit_queue_consumers:record_ack(
+ ChPid, LimiterPid, AckTag);
+ false -> ok
+ end,
+ Msg = {QName, self(), AckTag, IsDelivered, Message},
+ reply({ok, BQ:len(BQS), Msg}, State2)
+ end;
+
+handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
+ PrefetchCount, ConsumerTag, ExclusiveConsume, Args, OkMsg, ActingUser},
+ _From, State = #q{consumers = Consumers,
+ active_consumer = Holder,
+ single_active_consumer_on = SingleActiveConsumerOn}) ->
+ ConsumerRegistration = case SingleActiveConsumerOn of
+ true ->
+ case ExclusiveConsume of
+ true ->
+ {error, reply({error, exclusive_consume_unavailable}, State)};
+ false ->
+ Consumers1 = rabbit_queue_consumers:add(
+ ChPid, ConsumerTag, NoAck,
+ LimiterPid, LimiterActive,
+ PrefetchCount, Args, is_empty(State),
+ ActingUser, Consumers),
+
+ case Holder of
+ none ->
+ NewConsumer = rabbit_queue_consumers:get(ChPid, ConsumerTag, Consumers1),
+ {state, State#q{consumers = Consumers1,
+ has_had_consumers = true,
+ active_consumer = NewConsumer}};
+ _ ->
+ {state, State#q{consumers = Consumers1,
+ has_had_consumers = true}}
+ end
+ end;
+ false ->
+ case check_exclusive_access(Holder, ExclusiveConsume, State) of
+ in_use -> {error, reply({error, exclusive_consume_unavailable}, State)};
+ ok ->
+ Consumers1 = rabbit_queue_consumers:add(
+ ChPid, ConsumerTag, NoAck,
+ LimiterPid, LimiterActive,
+ PrefetchCount, Args, is_empty(State),
+ ActingUser, Consumers),
+ ExclusiveConsumer =
+ if ExclusiveConsume -> {ChPid, ConsumerTag};
+ true -> Holder
+ end,
+ {state, State#q{consumers = Consumers1,
+ has_had_consumers = true,
+ active_consumer = ExclusiveConsumer}}
+ end
+ end,
+ case ConsumerRegistration of
+ {error, Reply} ->
+ Reply;
+ {state, State1} ->
+ ok = maybe_send_reply(ChPid, OkMsg),
+ QName = qname(State1),
+ AckRequired = not NoAck,
+ TheConsumer = rabbit_queue_consumers:get(ChPid, ConsumerTag, State1#q.consumers),
+ {ConsumerIsActive, ActivityStatus} =
+ case {SingleActiveConsumerOn, State1#q.active_consumer} of
+ {true, TheConsumer} ->
+ {true, single_active};
+ {true, _} ->
+ {false, waiting};
+ {false, _} ->
+ {true, up}
+ end,
+ rabbit_core_metrics:consumer_created(
+ ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName,
+ PrefetchCount, ConsumerIsActive, ActivityStatus, Args),
+ emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ AckRequired, QName, PrefetchCount,
+ Args, none, ActingUser),
+ notify_decorators(State1),
+ reply(ok, run_message_queue(State1))
+ end;
+
+handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg, ActingUser}, _From,
+ State = #q{consumers = Consumers,
+ active_consumer = Holder,
+ single_active_consumer_on = SingleActiveConsumerOn }) ->
+ ok = maybe_send_reply(ChPid, OkMsg),
+ case rabbit_queue_consumers:remove(ChPid, ConsumerTag, Consumers) of
+ not_found ->
+ reply(ok, State);
+ Consumers1 ->
+ Holder1 = new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag,
+ Holder, SingleActiveConsumerOn, Consumers1
+ ),
+ State1 = State#q{consumers = Consumers1,
+ active_consumer = Holder1},
+ maybe_notify_consumer_updated(State1, Holder, Holder1),
+ emit_consumer_deleted(ChPid, ConsumerTag, qname(State1), ActingUser),
+ notify_decorators(State1),
+ case should_auto_delete(State1) of
+ false -> reply(ok, ensure_expiry_timer(State1));
+ true ->
+ log_auto_delete(
+ io_lib:format(
+ "because its last consumer with tag '~s' was cancelled",
+ [ConsumerTag]),
+ State),
+ stop(ok, State1)
+ end
+ end;
+
+handle_call(stat, _From, State) ->
+ State1 = #q{backing_queue = BQ, backing_queue_state = BQS} =
+ ensure_expiry_timer(State),
+ reply({ok, BQ:len(BQS), rabbit_queue_consumers:count()}, State1);
+
+handle_call({delete, IfUnused, IfEmpty, ActingUser}, _From,
+ State = #q{backing_queue_state = BQS, backing_queue = BQ}) ->
+ IsEmpty = BQ:is_empty(BQS),
+ IsUnused = is_unused(State),
+ if
+ IfEmpty and not(IsEmpty) -> reply({error, not_empty}, State);
+ IfUnused and not(IsUnused) -> reply({error, in_use}, State);
+ true -> stop({ok, BQ:len(BQS)},
+ State#q{status = {terminated_by, ActingUser}})
+ end;
+
+handle_call(purge, _From, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {Count, BQS1} = BQ:purge(BQS),
+ State1 = State#q{backing_queue_state = BQS1},
+ reply({ok, Count}, maybe_send_drained(Count =:= 0, State1));
+
+handle_call({requeue, AckTags, ChPid}, From, State) ->
+ gen_server2:reply(From, ok),
+ noreply(requeue(AckTags, ChPid, State));
+
+handle_call(sync_mirrors, _From,
+ State = #q{backing_queue = rabbit_mirror_queue_master,
+ backing_queue_state = BQS}) ->
+ S = fun(BQSN) -> State#q{backing_queue_state = BQSN} end,
+ HandleInfo = fun (Status) ->
+ receive {'$gen_call', From, {info, Items}} ->
+ Infos = infos(Items, State#q{status = Status}),
+ gen_server2:reply(From, {ok, Infos})
+ after 0 ->
+ ok
+ end
+ end,
+ EmitStats = fun (Status) ->
+ rabbit_event:if_enabled(
+ State, #q.stats_timer,
+ fun() -> emit_stats(State#q{status = Status}) end)
+ end,
+ case rabbit_mirror_queue_master:sync_mirrors(HandleInfo, EmitStats, BQS) of
+ {ok, BQS1} -> reply(ok, S(BQS1));
+ {stop, Reason, BQS1} -> {stop, Reason, S(BQS1)}
+ end;
+
+handle_call(sync_mirrors, _From, State) ->
+ reply({error, not_mirrored}, State);
+
+%% By definition if we get this message here we do not have to do anything.
+handle_call(cancel_sync_mirrors, _From, State) ->
+ reply({ok, not_syncing}, State).
+
+new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, CurrentSingleActiveConsumer,
+ _SingleActiveConsumerIsOn = true, Consumers) ->
+ case rabbit_queue_consumers:is_same(ChPid, ConsumerTag, CurrentSingleActiveConsumer) of
+ true ->
+ case rabbit_queue_consumers:get_consumer(Consumers) of
+ undefined -> none;
+ Consumer -> Consumer
+ end;
+ false ->
+ CurrentSingleActiveConsumer
+ end;
+new_single_active_consumer_after_basic_cancel(ChPid, ConsumerTag, CurrentSingleActiveConsumer,
+ _SingleActiveConsumerIsOn = false, _Consumers) ->
+ case CurrentSingleActiveConsumer of
+ {ChPid, ConsumerTag} -> none;
+ _ -> CurrentSingleActiveConsumer
+ end.
+
+maybe_notify_consumer_updated(#q{single_active_consumer_on = false}, _, _) ->
+ ok;
+maybe_notify_consumer_updated(#q{single_active_consumer_on = true}, SingleActiveConsumer, SingleActiveConsumer) ->
+ % the single active consumer didn't change, nothing to do
+ ok;
+maybe_notify_consumer_updated(#q{single_active_consumer_on = true} = State, _PreviousConsumer, NewConsumer) ->
+ case NewConsumer of
+ {ChPid, Consumer} ->
+ {Tag, Ack, Prefetch, Args} = rabbit_queue_consumers:get_infos(Consumer),
+ rabbit_core_metrics:consumer_updated(
+ ChPid, Tag, false, Ack, qname(State),
+ Prefetch, true, single_active, Args
+ ),
+ ok;
+ _ ->
+ ok
+ end.
+
+handle_cast(init, State) ->
+ try
+ init_it({no_barrier, non_clean_shutdown}, none, State)
+ catch
+ {coordinator_not_started, Reason} ->
+ %% The GM can shutdown before the coordinator has started up
+ %% (lost membership or missing group), thus the start_link of
+ %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
+ %% is trapping exists. The master captures this return value and
+ %% throws the current exception.
+ {stop, Reason, State}
+ end;
+
+handle_cast({run_backing_queue, Mod, Fun},
+ State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ noreply(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)});
+
+handle_cast({deliver,
+ Delivery = #delivery{sender = Sender,
+ flow = Flow},
+ SlaveWhenPublished},
+ State = #q{senders = Senders}) ->
+ Senders1 = case Flow of
+ %% In both credit_flow:ack/1 we are acking messages to the channel
+ %% process that sent us the message delivery. See handle_ch_down
+ %% for more info.
+ flow -> credit_flow:ack(Sender),
+ case SlaveWhenPublished of
+ true -> credit_flow:ack(Sender); %% [0]
+ false -> ok
+ end,
+ pmon:monitor(Sender, Senders);
+ noflow -> Senders
+ end,
+ State1 = State#q{senders = Senders1},
+ noreply(maybe_deliver_or_enqueue(Delivery, SlaveWhenPublished, State1));
+%% [0] The second ack is since the channel thought we were a mirror at
+%% the time it published this message, so it used two credits (see
+%% rabbit_queue_type:deliver/2).
+
+handle_cast({ack, AckTags, ChPid}, State) ->
+ noreply(ack(AckTags, ChPid, State));
+
+handle_cast({reject, true, AckTags, ChPid}, State) ->
+ noreply(requeue(AckTags, ChPid, State));
+
+handle_cast({reject, false, AckTags, ChPid}, State) ->
+ noreply(with_dlx(
+ State#q.dlx,
+ fun (X) -> subtract_acks(ChPid, AckTags, State,
+ fun (State1) ->
+ dead_letter_rejected_msgs(
+ AckTags, X, State1)
+ end) end,
+ fun () -> ack(AckTags, ChPid, State) end));
+
+handle_cast({delete_exclusive, ConnPid}, State) ->
+ log_delete_exclusive(ConnPid, State),
+ stop(State);
+
+handle_cast(delete_immediately, State) ->
+ stop(State);
+
+handle_cast({resume, ChPid}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:resume_fun(),
+ ChPid, State));
+
+handle_cast({notify_sent, ChPid, Credit}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:notify_sent_fun(Credit),
+ ChPid, State));
+
+handle_cast({activate_limit, ChPid}, State) ->
+ noreply(possibly_unblock(rabbit_queue_consumers:activate_limit_fun(),
+ ChPid, State));
+
+handle_cast({set_ram_duration_target, Duration},
+ State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ noreply(State#q{backing_queue_state = BQS1});
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State);
+
+handle_cast(update_mirroring, State = #q{q = Q,
+ mirroring_policy_version = Version}) ->
+ case needs_update_mirroring(Q, Version) of
+ false ->
+ noreply(State);
+ {Policy, NewVersion} ->
+ State1 = State#q{mirroring_policy_version = NewVersion},
+ noreply(update_mirroring(Policy, State1))
+ end;
+
+handle_cast({credit, ChPid, CTag, Credit, Drain},
+ State = #q{consumers = Consumers,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ q = Q}) ->
+ Len = BQ:len(BQS),
+ rabbit_classic_queue:send_queue_event(ChPid, amqqueue:get_name(Q), {send_credit_reply, Len}),
+ noreply(
+ case rabbit_queue_consumers:credit(Len == 0, Credit, Drain, ChPid, CTag,
+ Consumers) of
+ unchanged -> State;
+ {unblocked, Consumers1} -> State1 = State#q{consumers = Consumers1},
+ run_message_queue(true, State1)
+ end);
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+handle_cast({force_event_refresh, Ref},
+ State = #q{consumers = Consumers,
+ active_consumer = Holder}) ->
+ rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State), Ref),
+ QName = qname(State),
+ AllConsumers = rabbit_queue_consumers:all(Consumers),
+ case Holder of
+ none ->
+ [emit_consumer_created(
+ Ch, CTag, false, AckRequired, QName, Prefetch,
+ Args, Ref, ActingUser) ||
+ {Ch, CTag, AckRequired, Prefetch, _, _, Args, ActingUser}
+ <- AllConsumers];
+ {Ch, CTag} ->
+ [{Ch, CTag, AckRequired, Prefetch, _, _, Args, ActingUser}] = AllConsumers,
+ emit_consumer_created(
+ Ch, CTag, true, AckRequired, QName, Prefetch, Args, Ref, ActingUser)
+ end,
+ noreply(rabbit_event:init_stats_timer(State, #q.stats_timer));
+
+handle_cast(notify_decorators, State) ->
+ notify_decorators(State),
+ noreply(State);
+
+handle_cast(policy_changed, State = #q{q = Q0}) ->
+ Name = amqqueue:get_name(Q0),
+ %% We depend on the #q.q field being up to date at least WRT
+ %% policy (but not mirror pids) in various places, so when it
+ %% changes we go and read it from Mnesia again.
+ %%
+ %% This also has the side effect of waking us up so we emit a
+ %% stats event - so event consumers see the changed policy.
+ {ok, Q} = rabbit_amqqueue:lookup(Name),
+ noreply(process_args_policy(State#q{q = Q}));
+
+handle_cast({sync_start, _, _}, State = #q{q = Q}) ->
+ Name = amqqueue:get_name(Q),
+ %% Only a mirror should receive this, it means we are a duplicated master
+ rabbit_mirror_queue_misc:log_warning(
+ Name, "Stopping after receiving sync_start from another master", []),
+ stop(State).
+
+handle_info({maybe_expire, Vsn}, State = #q{args_policy_version = Vsn}) ->
+ case is_unused(State) of
+ true -> stop(State);
+ false -> noreply(State#q{expiry_timer_ref = undefined})
+ end;
+
+handle_info({maybe_expire, _Vsn}, State) ->
+ noreply(State);
+
+handle_info({drop_expired, Vsn}, State = #q{args_policy_version = Vsn}) ->
+ WasEmpty = is_empty(State),
+ State1 = drop_expired_msgs(State#q{ttl_timer_ref = undefined}),
+ noreply(maybe_send_drained(WasEmpty, State1));
+
+handle_info({drop_expired, _Vsn}, State) ->
+ noreply(State);
+
+handle_info(emit_stats, State) ->
+ emit_stats(State),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(rabbit_event:reset_stats_timer(
+ State, #q.stats_timer)),
+ {noreply, State1, Timeout};
+
+handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
+ State = #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, DownPid) ->
+ %% Exclusively owned queues must disappear with their owner. In
+ %% the case of clean shutdown we delete the queue synchronously in
+ %% the reader - although not required by the spec this seems to
+ %% match what people expect (see bug 21824). However we need this
+ %% monitor-and-async- delete in case the connection goes away
+ %% unexpectedly.
+ log_delete_exclusive(DownPid, State),
+ stop(State);
+
+handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) ->
+ case handle_ch_down(DownPid, State) of
+ {ok, State1} -> noreply(State1);
+ {stop, State1} -> stop(State1)
+ end;
+
+handle_info(update_ram_duration, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(State#q{rate_timer_ref = undefined,
+ backing_queue_state = BQS2}),
+ {noreply, State1, Timeout};
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined}));
+
+handle_info(timeout, State) ->
+ noreply(backing_queue_timeout(State));
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({bump_credit, Msg}, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% The message_store is granting us more credit. This means the
+ %% backing queue (for the rabbit_variable_queue case) might
+ %% continue paging messages to disk if it still needs to. We
+ %% consume credits from the message_store whenever we need to
+ %% persist a message to disk. See:
+ %% rabbit_variable_queue:msg_store_write/4.
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State#q{backing_queue_state = BQ:resume(BQS)});
+handle_info(bump_reduce_memory_use, State = #q{backing_queue = BQ,
+ backing_queue_state = BQS0}) ->
+ BQS1 = BQ:handle_info(bump_reduce_memory_use, BQS0),
+ noreply(State#q{backing_queue_state = BQ:resume(BQS1)});
+
+handle_info(Info, State) ->
+ {stop, {unhandled_info, Info}, State}.
+
+handle_pre_hibernate(State = #q{backing_queue_state = undefined}) ->
+ {hibernate, State};
+handle_pre_hibernate(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ BQS3 = BQ:handle_pre_hibernate(BQS2),
+ rabbit_event:if_enabled(
+ State, #q.stats_timer,
+ fun () -> emit_stats(State,
+ [{idle_since,
+ os:system_time(milli_seconds)},
+ {consumer_utilisation, ''}])
+ end),
+ State1 = rabbit_event:stop_stats_timer(State#q{backing_queue_state = BQS3},
+ #q.stats_timer),
+ {hibernate, stop_rate_timer(State1)}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+format(Q) when ?is_amqqueue(Q) ->
+ case rabbit_mirror_queue_misc:is_mirrored(Q) of
+ false ->
+ [{node, node(amqqueue:get_pid(Q))}];
+ true ->
+ Slaves = amqqueue:get_slave_pids(Q),
+ SSlaves = amqqueue:get_sync_slave_pids(Q),
+ [{slave_nodes, [node(S) || S <- Slaves]},
+ {synchronised_slave_nodes, [node(S) || S <- SSlaves]},
+ {node, node(amqqueue:get_pid(Q))}]
+ end.
+
+-spec is_policy_applicable(amqqueue:amqqueue(), any()) -> boolean().
+is_policy_applicable(_Q, _Policy) ->
+ true.
+
+log_delete_exclusive({ConPid, _ConRef}, State) ->
+ log_delete_exclusive(ConPid, State);
+log_delete_exclusive(ConPid, #q{ q = Q }) ->
+ Resource = amqqueue:get_name(Q),
+ #resource{ name = QName, virtual_host = VHost } = Resource,
+ rabbit_log_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++
+ "because its declaring connection ~p was closed",
+ [QName, VHost, ConPid]).
+
+log_auto_delete(Reason, #q{ q = Q }) ->
+ Resource = amqqueue:get_name(Q),
+ #resource{ name = QName, virtual_host = VHost } = Resource,
+ rabbit_log_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++
+ Reason,
+ [QName, VHost]).
+
+needs_update_mirroring(Q, Version) ->
+ {ok, UpQ} = rabbit_amqqueue:lookup(amqqueue:get_name(Q)),
+ DBVersion = amqqueue:get_policy_version(UpQ),
+ case DBVersion > Version of
+ true -> {rabbit_policy:get(<<"ha-mode">>, UpQ), DBVersion};
+ false -> false
+ end.
+
+
+update_mirroring(Policy, State = #q{backing_queue = BQ}) ->
+ case update_to(Policy, BQ) of
+ start_mirroring ->
+ start_mirroring(State);
+ stop_mirroring ->
+ stop_mirroring(State);
+ ignore ->
+ State;
+ update_ha_mode ->
+ update_ha_mode(State)
+ end.
+
+update_to(undefined, rabbit_mirror_queue_master) ->
+ stop_mirroring;
+update_to(_, rabbit_mirror_queue_master) ->
+ update_ha_mode;
+update_to(undefined, BQ) when BQ =/= rabbit_mirror_queue_master ->
+ ignore;
+update_to(_, BQ) when BQ =/= rabbit_mirror_queue_master ->
+ start_mirroring.
+
+start_mirroring(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% lookup again to get policy for init_with_existing_bq
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ true = BQ =/= rabbit_mirror_queue_master, %% assertion
+ BQ1 = rabbit_mirror_queue_master,
+ BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
+ State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1}.
+
+stop_mirroring(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQ = rabbit_mirror_queue_master, %% assertion
+ {BQ1, BQS1} = BQ:stop_mirroring(BQS),
+ State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1}.
+
+update_ha_mode(State) ->
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ ok = rabbit_mirror_queue_misc:update_mirrors(Q),
+ State.
+
+confirm_to_sender(Pid, QName, MsgSeqNos) ->
+ rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos).
+
+
diff --git a/deps/rabbit/src/rabbit_amqqueue_sup.erl b/deps/rabbit/src/rabbit_amqqueue_sup.erl
new file mode 100644
index 0000000000..a9eaf4087f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue_sup.erl
@@ -0,0 +1,35 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(amqqueue:amqqueue(), rabbit_prequeue:start_mode()) ->
+ {'ok', pid(), pid()}.
+
+start_link(Q, StartMode) ->
+ Marker = spawn_link(fun() -> receive stop -> ok end end),
+ ChildSpec = {rabbit_amqqueue,
+ {rabbit_prequeue, start_link, [Q, StartMode, Marker]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_amqqueue_process,
+ rabbit_mirror_queue_slave]},
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, QPid} = supervisor2:start_child(SupPid, ChildSpec),
+ unlink(Marker),
+ Marker ! stop,
+ {ok, SupPid, QPid}.
+
+init([]) -> {ok, {{one_for_one, 5, 10}, []}}.
diff --git a/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl b/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl
new file mode 100644
index 0000000000..732816b79f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_amqqueue_sup_sup.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_queue_process/3]).
+-export([start_for_vhost/1, stop_for_vhost/1,
+ find_for_vhost/2, find_for_vhost/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+-spec start_queue_process
+ (node(), amqqueue:amqqueue(), 'declare' | 'recovery' | 'slave') ->
+ pid().
+
+start_queue_process(Node, Q, StartMode) ->
+ #resource{virtual_host = VHost} = amqqueue:get_name(Q),
+ {ok, Sup} = find_for_vhost(VHost, Node),
+ {ok, _SupPid, QPid} = supervisor2:start_child(Sup, [Q, StartMode]),
+ QPid.
+
+init([]) ->
+ {ok, {{simple_one_for_one, 10, 10},
+ [{rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []},
+ temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}.
+
+-spec find_for_vhost(rabbit_types:vhost()) -> {ok, pid()} | {error, term()}.
+find_for_vhost(VHost) ->
+ find_for_vhost(VHost, node()).
+
+-spec find_for_vhost(rabbit_types:vhost(), atom()) -> {ok, pid()} | {error, term()}.
+find_for_vhost(VHost, Node) ->
+ {ok, VHostSup} = rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node),
+ case supervisor2:find_child(VHostSup, rabbit_amqqueue_sup_sup) of
+ [QSup] -> {ok, QSup};
+ Result -> {error, {queue_supervisor_not_found, Result}}
+ end.
+
+-spec start_for_vhost(rabbit_types:vhost()) -> {ok, pid()} | {error, term()}.
+start_for_vhost(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ supervisor2:start_child(
+ VHostSup,
+ {rabbit_amqqueue_sup_sup,
+ {rabbit_amqqueue_sup_sup, start_link, []},
+ transient, infinity, supervisor, [rabbit_amqqueue_sup_sup]});
+ %% we can get here if a vhost is added and removed concurrently
+ %% e.g. some integration tests do it
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to start a queue process supervisor for vhost ~s: vhost no longer exists!",
+ [VHost]),
+ {error, {no_such_vhost, VHost}}
+ end.
+
+-spec stop_for_vhost(rabbit_types:vhost()) -> ok.
+stop_for_vhost(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ ok = supervisor2:terminate_child(VHostSup, rabbit_amqqueue_sup_sup),
+ ok = supervisor2:delete_child(VHostSup, rabbit_amqqueue_sup_sup);
+ %% see start/1
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to stop a queue process supervisor for vhost ~s: vhost no longer exists!",
+ [VHost]),
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_auth_backend_internal.erl b/deps/rabbit/src/rabbit_auth_backend_internal.erl
new file mode 100644
index 0000000000..cb930a1630
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_backend_internal.erl
@@ -0,0 +1,1076 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_internal).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4]).
+
+-export([add_user/3, delete_user/2, lookup_user/1, exists/1,
+ change_password/3, clear_password/2,
+ hash_password/2, change_password_hash/2, change_password_hash/3,
+ set_tags/3, set_permissions/6, clear_permissions/3,
+ set_topic_permissions/6, clear_topic_permissions/3, clear_topic_permissions/4,
+ add_user_sans_validation/3, put_user/2, put_user/3]).
+
+-export([set_user_limits/3, clear_user_limits/3, is_over_connection_limit/1,
+ is_over_channel_limit/1, get_user_limits/0, get_user_limits/1]).
+
+-export([user_info_keys/0, perms_info_keys/0,
+ user_perms_info_keys/0, vhost_perms_info_keys/0,
+ user_vhost_perms_info_keys/0, all_users/0,
+ list_users/0, list_users/2, list_permissions/0,
+ list_user_permissions/1, list_user_permissions/3,
+ list_topic_permissions/0,
+ list_vhost_permissions/1, list_vhost_permissions/3,
+ list_user_vhost_permissions/2,
+ list_user_topic_permissions/1, list_vhost_topic_permissions/1, list_user_vhost_topic_permissions/2]).
+
+-export([state_can_expire/0]).
+
+%% for testing
+-export([hashing_module_for_user/1, expand_topic_permission/2]).
+
+%%----------------------------------------------------------------------------
+
+-type regexp() :: binary().
+
+%%----------------------------------------------------------------------------
+%% Implementation of rabbit_auth_backend
+
+%% Returns a password hashing module for the user record provided. If
+%% there is no information in the record, we consider it to be legacy
+%% (inserted by a version older than 3.6.0) and fall back to MD5, the
+%% now obsolete hashing function.
+hashing_module_for_user(User) ->
+ ModOrUndefined = internal_user:get_hashing_algorithm(User),
+ rabbit_password:hashing_mod(ModOrUndefined).
+
+-define(BLANK_PASSWORD_REJECTION_MESSAGE,
+ "user '~s' attempted to log in with a blank password, which is prohibited by the internal authN backend. "
+ "To use TLS/x509 certificate-based authentication, see the rabbitmq_auth_mechanism_ssl plugin and configure the client to use the EXTERNAL authentication mechanism. "
+ "Alternatively change the password for the user to be non-blank.").
+
+%% For cases when we do not have a set of credentials,
+%% namely when x509 (TLS) certificates are used. This should only be
+%% possible when the EXTERNAL authentication mechanism is used, see
+%% rabbit_auth_mechanism_plain:handle_response/2 and rabbit_reader:auth_phase/2.
+user_login_authentication(Username, []) ->
+ internal_check_user_login(Username, fun(_) -> true end);
+%% For cases when we do have a set of credentials. rabbit_auth_mechanism_plain:handle_response/2
+%% performs initial validation.
+user_login_authentication(Username, AuthProps) ->
+ case lists:keyfind(password, 1, AuthProps) of
+ {password, <<"">>} ->
+ {refused, ?BLANK_PASSWORD_REJECTION_MESSAGE,
+ [Username]};
+ {password, ""} ->
+ {refused, ?BLANK_PASSWORD_REJECTION_MESSAGE,
+ [Username]};
+ {password, Cleartext} ->
+ internal_check_user_login(
+ Username,
+ fun(User) ->
+ case internal_user:get_password_hash(User) of
+ <<Salt:4/binary, Hash/binary>> ->
+ Hash =:= rabbit_password:salted_hash(
+ hashing_module_for_user(User), Salt, Cleartext);
+ _ ->
+ false
+ end
+ end);
+ false -> exit({unknown_auth_props, Username, AuthProps})
+ end.
+
+state_can_expire() -> false.
+
+user_login_authorization(Username, _AuthProps) ->
+ case user_login_authentication(Username, []) of
+ {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags};
+ Else -> Else
+ end.
+
+internal_check_user_login(Username, Fun) ->
+ Refused = {refused, "user '~s' - invalid credentials", [Username]},
+ case lookup_user(Username) of
+ {ok, User} ->
+ Tags = internal_user:get_tags(User),
+ case Fun(User) of
+ true -> {ok, #auth_user{username = Username,
+ tags = Tags,
+ impl = none}};
+ _ -> Refused
+ end;
+ {error, not_found} ->
+ Refused
+ end.
+
+check_vhost_access(#auth_user{username = Username}, VHostPath, _AuthzData) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] -> false;
+ [_R] -> true
+ end.
+
+check_resource_access(#auth_user{username = Username},
+ #resource{virtual_host = VHostPath, name = Name},
+ Permission,
+ _AuthContext) ->
+ case mnesia:dirty_read({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VHostPath}}) of
+ [] ->
+ false;
+ [#user_permission{permission = P}] ->
+ PermRegexp = case element(permission_index(Permission), P) of
+ %% <<"^$">> breaks Emacs' erlang mode
+ <<"">> -> <<$^, $$>>;
+ RE -> RE
+ end,
+ case re:run(Name, PermRegexp, [{capture, none}]) of
+ match -> true;
+ nomatch -> false
+ end
+ end.
+
+check_topic_access(#auth_user{username = Username},
+ #resource{virtual_host = VHostPath, name = Name, kind = topic},
+ Permission,
+ Context) ->
+ case mnesia:dirty_read({rabbit_topic_permission,
+ #topic_permission_key{user_vhost = #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ exchange = Name
+ }}) of
+ [] ->
+ true;
+ [#topic_permission{permission = P}] ->
+ PermRegexp = case element(permission_index(Permission), P) of
+ %% <<"^$">> breaks Emacs' erlang mode
+ <<"">> -> <<$^, $$>>;
+ RE -> RE
+ end,
+ PermRegexpExpanded = expand_topic_permission(
+ PermRegexp,
+ maps:get(variable_map, Context, undefined)
+ ),
+ case re:run(maps:get(routing_key, Context), PermRegexpExpanded, [{capture, none}]) of
+ match -> true;
+ nomatch -> false
+ end
+ end.
+
+expand_topic_permission(Permission, ToExpand) when is_map(ToExpand) ->
+ Opening = <<"{">>,
+ Closing = <<"}">>,
+ ReplaceFun = fun(K, V, Acc) ->
+ Placeholder = <<Opening/binary, K/binary, Closing/binary>>,
+ binary:replace(Acc, Placeholder, V, [global])
+ end,
+ maps:fold(ReplaceFun, Permission, ToExpand);
+expand_topic_permission(Permission, _ToExpand) ->
+ Permission.
+
+permission_index(configure) -> #permission.configure;
+permission_index(write) -> #permission.write;
+permission_index(read) -> #permission.read.
+
+%%----------------------------------------------------------------------------
+%% Manipulation of the user database
+
+validate_credentials(Username, Password) ->
+ rabbit_credential_validation:validate(Username, Password).
+
+validate_and_alternate_credentials(Username, Password, ActingUser, Fun) ->
+ case validate_credentials(Username, Password) of
+ ok ->
+ Fun(Username, Password, ActingUser);
+ {error, Err} ->
+ rabbit_log:error("Credential validation for '~s' failed!~n", [Username]),
+ {error, Err}
+ end.
+
+-spec add_user(rabbit_types:username(), rabbit_types:password(),
+ rabbit_types:username()) -> 'ok' | {'error', string()}.
+
+add_user(Username, Password, ActingUser) ->
+ validate_and_alternate_credentials(Username, Password, ActingUser,
+ fun add_user_sans_validation/3).
+
+add_user_sans_validation(Username, Password, ActingUser) ->
+ rabbit_log:debug("Asked to create a new user '~s', password length in bytes: ~p", [Username, bit_size(Password)]),
+ %% hash_password will pick the hashing function configured for us
+ %% but we also need to store a hint as part of the record, so we
+ %% retrieve it here one more time
+ HashingMod = rabbit_password:hashing_mod(),
+ PasswordHash = hash_password(HashingMod, Password),
+ User = internal_user:create_user(Username, PasswordHash, HashingMod),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_user, Username}) of
+ [] ->
+ ok = mnesia:write(rabbit_user, User, write);
+ _ ->
+ mnesia:abort({user_already_exists, Username})
+ end
+ end),
+ rabbit_log:info("Created user '~s'", [Username]),
+ rabbit_event:notify(user_created, [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {user_already_exists, _}} = Error ->
+ rabbit_log:warning("Failed to add user '~s': the user already exists", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to add user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to add user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end .
+
+-spec delete_user(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
+
+delete_user(Username, ActingUser) ->
+ rabbit_log:debug("Asked to delete user '~s'", [Username]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user(
+ Username,
+ fun () ->
+ ok = mnesia:delete({rabbit_user, Username}),
+ [ok = mnesia:delete_object(
+ rabbit_user_permission, R, write) ||
+ R <- mnesia:match_object(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = '_'},
+ permission = '_'},
+ write)],
+ UserTopicPermissionsQuery = match_user_vhost_topic_permission(Username, '_'),
+ UserTopicPermissions = UserTopicPermissionsQuery(),
+ [ok = mnesia:delete_object(rabbit_topic_permission, R, write) || R <- UserTopicPermissions],
+ ok
+ end)),
+ rabbit_log:info("Deleted user '~s'", [Username]),
+ rabbit_event:notify(user_deleted,
+ [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to delete user '~s': the user does not exist", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to delete user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to delete user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end .
+
+-spec lookup_user
+ (rabbit_types:username()) ->
+ rabbit_types:ok(internal_user:internal_user()) |
+ rabbit_types:error('not_found').
+
+lookup_user(Username) ->
+ rabbit_misc:dirty_read({rabbit_user, Username}).
+
+-spec exists(rabbit_types:username()) -> boolean().
+
+exists(Username) ->
+ case lookup_user(Username) of
+ {error, not_found} -> false;
+ _ -> true
+ end.
+
+-spec change_password
+ (rabbit_types:username(), rabbit_types:password(), rabbit_types:username()) -> 'ok'.
+
+change_password(Username, Password, ActingUser) ->
+ validate_and_alternate_credentials(Username, Password, ActingUser,
+ fun change_password_sans_validation/3).
+
+change_password_sans_validation(Username, Password, ActingUser) ->
+ try
+ rabbit_log:debug("Asked to change password of user '~s', new password length in bytes: ~p", [Username, bit_size(Password)]),
+ HashingAlgorithm = rabbit_password:hashing_mod(),
+ R = change_password_hash(Username,
+ hash_password(rabbit_password:hashing_mod(),
+ Password),
+ HashingAlgorithm),
+ rabbit_log:info("Successfully changed password for user '~s'", [Username]),
+ rabbit_event:notify(user_password_changed,
+ [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to change password for user '~s': the user does not exist", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to change password for user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to change password for user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end.
+
+-spec clear_password(rabbit_types:username(), rabbit_types:username()) -> 'ok'.
+
+clear_password(Username, ActingUser) ->
+ rabbit_log:info("Clearing password for '~s'~n", [Username]),
+ R = change_password_hash(Username, <<"">>),
+ rabbit_event:notify(user_password_cleared,
+ [{name, Username},
+ {user_who_performed_action, ActingUser}]),
+ R.
+
+-spec hash_password
+ (module(), rabbit_types:password()) -> rabbit_types:password_hash().
+
+hash_password(HashingMod, Cleartext) ->
+ rabbit_password:hash(HashingMod, Cleartext).
+
+-spec change_password_hash
+ (rabbit_types:username(), rabbit_types:password_hash()) -> 'ok'.
+
+change_password_hash(Username, PasswordHash) ->
+ change_password_hash(Username, PasswordHash, rabbit_password:hashing_mod()).
+
+
+change_password_hash(Username, PasswordHash, HashingAlgorithm) ->
+ update_user(Username, fun(User) ->
+ internal_user:set_password_hash(User,
+ PasswordHash, HashingAlgorithm)
+ end).
+
+-spec set_tags(rabbit_types:username(), [atom()], rabbit_types:username()) -> 'ok'.
+
+set_tags(Username, Tags, ActingUser) ->
+ ConvertedTags = [rabbit_data_coercion:to_atom(I) || I <- Tags],
+ rabbit_log:debug("Asked to set user tags for user '~s' to ~p", [Username, ConvertedTags]),
+ try
+ R = update_user(Username, fun(User) ->
+ internal_user:set_tags(User, ConvertedTags)
+ end),
+ rabbit_log:info("Successfully set user tags for user '~s' to ~p", [Username, ConvertedTags]),
+ rabbit_event:notify(user_tags_set, [{name, Username}, {tags, ConvertedTags},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to set tags for user '~s': the user does not exist", [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to set tags for user '~s': ~p", [Username, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to set tags for user '~s': ~p", [Username, Error]),
+ exit(Error)
+ end .
+
+-spec set_permissions
+ (rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(),
+ regexp(), rabbit_types:username()) ->
+ 'ok'.
+
+set_permissions(Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm, ActingUser) ->
+ rabbit_log:debug("Asked to set permissions for "
+ "'~s' in virtual host '~s' to '~s', '~s', '~s'",
+ [Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
+ lists:map(
+ fun (RegexpBin) ->
+ Regexp = binary_to_list(RegexpBin),
+ case re:compile(Regexp) of
+ {ok, _} -> ok;
+ {error, Reason} ->
+ rabbit_log:warning("Failed to set permissions for '~s' in virtual host '~s': "
+ "regular expression '~s' is invalid",
+ [Username, VirtualHost, RegexpBin]),
+ throw({error, {invalid_regexp, Regexp, Reason}})
+ end
+ end, [ConfigurePerm, WritePerm, ReadPerm]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () -> ok = mnesia:write(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VirtualHost},
+ permission = #permission{
+ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}},
+ write)
+ end)),
+ rabbit_log:info("Successfully set permissions for "
+ "'~s' in virtual host '~s' to '~s', '~s', '~s'",
+ [Username, VirtualHost, ConfigurePerm, WritePerm, ReadPerm]),
+ rabbit_event:notify(permission_created, [{user, Username},
+ {vhost, VirtualHost},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to set permissions for '~s': virtual host '~s' does not exist",
+ [Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to set permissions for '~s': the user does not exist",
+ [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to set permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to set permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+-spec clear_permissions
+ (rabbit_types:username(), rabbit_types:vhost(), rabbit_types:username()) -> 'ok'.
+
+clear_permissions(Username, VirtualHost, ActingUser) ->
+ rabbit_log:debug("Asked to clear permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () ->
+ ok = mnesia:delete({rabbit_user_permission,
+ #user_vhost{username = Username,
+ virtual_host = VirtualHost}})
+ end)),
+ rabbit_log:info("Successfully cleared permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ rabbit_event:notify(permission_deleted, [{user, Username},
+ {vhost, VirtualHost},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s': virtual host '~s' does not exist",
+ [Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s': the user does not exist",
+ [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to clear permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+
+update_user(Username, Fun) ->
+ rabbit_misc:execute_mnesia_transaction(
+ rabbit_misc:with_user(
+ Username,
+ fun () ->
+ {ok, User} = lookup_user(Username),
+ ok = mnesia:write(rabbit_user, Fun(User), write)
+ end)).
+
+set_topic_permissions(Username, VirtualHost, Exchange, WritePerm, ReadPerm, ActingUser) ->
+ rabbit_log:debug("Asked to set topic permissions on exchange '~s' for "
+ "user '~s' in virtual host '~s' to '~s', '~s'",
+ [Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
+ WritePermRegex = rabbit_data_coercion:to_binary(WritePerm),
+ ReadPermRegex = rabbit_data_coercion:to_binary(ReadPerm),
+ lists:map(
+ fun (RegexpBin) ->
+ case re:compile(RegexpBin) of
+ {ok, _} -> ok;
+ {error, Reason} ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for "
+ "'~s' in virtual host '~s': regular expression '~s' is invalid",
+ [Exchange, Username, VirtualHost, RegexpBin]),
+ throw({error, {invalid_regexp, RegexpBin, Reason}})
+ end
+ end, [WritePerm, ReadPerm]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () -> ok = mnesia:write(
+ rabbit_topic_permission,
+ #topic_permission{
+ topic_permission_key = #topic_permission_key{
+ user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VirtualHost},
+ exchange = Exchange
+ },
+ permission = #permission{
+ write = WritePermRegex,
+ read = ReadPermRegex
+ }
+ },
+ write)
+ end)),
+ rabbit_log:info("Successfully set topic permissions on exchange '~s' for "
+ "'~s' in virtual host '~s' to '~s', '~s'",
+ [Exchange, Username, VirtualHost, WritePerm, ReadPerm]),
+ rabbit_event:notify(topic_permission_created, [
+ {user, Username},
+ {vhost, VirtualHost},
+ {exchange, Exchange},
+ {write, WritePermRegex},
+ {read, ReadPermRegex},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s': virtual host '~s' does not exist.",
+ [Exchange, Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s': the user does not exist.",
+ [Exchange, Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p.",
+ [Exchange, Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to set topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p.",
+ [Exchange, Username, VirtualHost, Error]),
+ exit(Error)
+ end .
+
+clear_topic_permissions(Username, VirtualHost, ActingUser) ->
+ rabbit_log:debug("Asked to clear topic permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () ->
+ ListFunction = match_user_vhost_topic_permission(Username, VirtualHost),
+ List = ListFunction(),
+ lists:foreach(fun(X) ->
+ ok = mnesia:delete_object(rabbit_topic_permission, X, write)
+ end, List)
+ end)),
+ rabbit_log:info("Successfully cleared topic permissions for '~s' in virtual host '~s'",
+ [Username, VirtualHost]),
+ rabbit_event:notify(topic_permission_deleted, [{user, Username},
+ {vhost, VirtualHost},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s': virtual host '~s' does not exist",
+ [Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s': the user does not exist",
+ [Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to clear topic permissions for '~s' in virtual host '~s': ~p",
+ [Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+clear_topic_permissions(Username, VirtualHost, Exchange, ActingUser) ->
+ rabbit_log:debug("Asked to clear topic permissions on exchange '~s' for '~s' in virtual host '~s'",
+ [Exchange, Username, VirtualHost]),
+ try
+ R = rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with_user_and_vhost(
+ Username, VirtualHost,
+ fun () ->
+ ok = mnesia:delete(rabbit_topic_permission,
+ #topic_permission_key{
+ user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VirtualHost},
+ exchange = Exchange
+ }, write)
+ end)),
+ rabbit_log:info("Successfully cleared topic permissions on exchange '~s' for '~s' in virtual host '~s'",
+ [Exchange, Username, VirtualHost]),
+ rabbit_event:notify(permission_deleted, [{user, Username},
+ {vhost, VirtualHost},
+ {user_who_performed_action, ActingUser}]),
+ R
+ catch
+ throw:{error, {no_such_vhost, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s': virtual host '~s' does not exist",
+ [Exchange, Username, VirtualHost]),
+ throw(Error);
+ throw:{error, {no_such_user, _}} = Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s': the user does not exist",
+ [Exchange, Username]),
+ throw(Error);
+ throw:Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p",
+ [Exchange, Username, VirtualHost, Error]),
+ throw(Error);
+ exit:Error ->
+ rabbit_log:warning("Failed to clear topic permissions on exchange '~s' for '~s' in virtual host '~s': ~p",
+ [Exchange, Username, VirtualHost, Error]),
+ exit(Error)
+ end.
+
+put_user(User, ActingUser) -> put_user(User, undefined, ActingUser).
+
+put_user(User, Version, ActingUser) ->
+ Username = maps:get(name, User),
+ HasPassword = maps:is_key(password, User),
+ HasPasswordHash = maps:is_key(password_hash, User),
+ Password = maps:get(password, User, undefined),
+ PasswordHash = maps:get(password_hash, User, undefined),
+
+ Tags = case {maps:get(tags, User, undefined), maps:get(administrator, User, undefined)} of
+ {undefined, undefined} ->
+ throw({error, tags_not_present});
+ {undefined, AdminS} ->
+ case rabbit_misc:parse_bool(AdminS) of
+ true -> [administrator];
+ false -> []
+ end;
+ {TagsS, _} ->
+ [list_to_atom(string:strip(T)) ||
+ T <- string:tokens(binary_to_list(TagsS), ",")]
+ end,
+
+ %% pre-configured, only applies to newly created users
+ Permissions = maps:get(permissions, User, undefined),
+
+ PassedCredentialValidation =
+ case {HasPassword, HasPasswordHash} of
+ {true, false} ->
+ rabbit_credential_validation:validate(Username, Password) =:= ok;
+ {false, true} -> true;
+ _ ->
+ rabbit_credential_validation:validate(Username, Password) =:= ok
+ end,
+
+ case exists(Username) of
+ true ->
+ case {HasPassword, HasPasswordHash} of
+ {true, false} ->
+ update_user_password(PassedCredentialValidation, Username, Password, Tags, ActingUser);
+ {false, true} ->
+ update_user_password_hash(Username, PasswordHash, Tags, User, Version, ActingUser);
+ {true, true} ->
+ throw({error, both_password_and_password_hash_are_provided});
+ %% clear password, update tags if needed
+ _ ->
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser),
+ rabbit_auth_backend_internal:clear_password(Username, ActingUser)
+ end;
+ false ->
+ case {HasPassword, HasPasswordHash} of
+ {true, false} ->
+ create_user_with_password(PassedCredentialValidation, Username, Password, Tags, Permissions, ActingUser);
+ {false, true} ->
+ create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, Permissions, ActingUser);
+ {true, true} ->
+ throw({error, both_password_and_password_hash_are_provided});
+ {false, false} ->
+ %% this user won't be able to sign in using
+ %% a username/password pair but can be used for x509 certificate authentication,
+ %% with authn backends such as HTTP or LDAP and so on.
+ create_user_with_password(PassedCredentialValidation, Username, <<"">>, Tags, Permissions, ActingUser)
+ end
+ end.
+
+update_user_password(_PassedCredentialValidation = true, Username, Password, Tags, ActingUser) ->
+ rabbit_auth_backend_internal:change_password(Username, Password, ActingUser),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser);
+update_user_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _ActingUser) ->
+ %% we don't log here because
+ %% rabbit_auth_backend_internal will do it
+ throw({error, credential_validation_failed}).
+
+update_user_password_hash(Username, PasswordHash, Tags, User, Version, ActingUser) ->
+ %% when a hash this provided, credential validation
+ %% is not applied
+ HashingAlgorithm = hashing_algorithm(User, Version),
+
+ Hash = rabbit_misc:b64decode_or_throw(PasswordHash),
+ rabbit_auth_backend_internal:change_password_hash(
+ Username, Hash, HashingAlgorithm),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser).
+
+create_user_with_password(_PassedCredentialValidation = true, Username, Password, Tags, undefined, ActingUser) ->
+ rabbit_auth_backend_internal:add_user(Username, Password, ActingUser),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser);
+create_user_with_password(_PassedCredentialValidation = true, Username, Password, Tags, PreconfiguredPermissions, ActingUser) ->
+ rabbit_auth_backend_internal:add_user(Username, Password, ActingUser),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser),
+ preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser);
+create_user_with_password(_PassedCredentialValidation = false, _Username, _Password, _Tags, _, _) ->
+ %% we don't log here because
+ %% rabbit_auth_backend_internal will do it
+ throw({error, credential_validation_failed}).
+
+create_user_with_password_hash(Username, PasswordHash, Tags, User, Version, PreconfiguredPermissions, ActingUser) ->
+ %% when a hash this provided, credential validation
+ %% is not applied
+ HashingAlgorithm = hashing_algorithm(User, Version),
+ Hash = rabbit_misc:b64decode_or_throw(PasswordHash),
+
+ %% first we create a user with dummy credentials and no
+ %% validation applied, then we update password hash
+ TmpPassword = rabbit_guid:binary(rabbit_guid:gen_secure(), "tmp"),
+ rabbit_auth_backend_internal:add_user_sans_validation(Username, TmpPassword, ActingUser),
+
+ rabbit_auth_backend_internal:change_password_hash(
+ Username, Hash, HashingAlgorithm),
+ rabbit_auth_backend_internal:set_tags(Username, Tags, ActingUser),
+ preconfigure_permissions(Username, PreconfiguredPermissions, ActingUser).
+
+preconfigure_permissions(_Username, undefined, _ActingUser) ->
+ ok;
+preconfigure_permissions(Username, Map, ActingUser) when is_map(Map) ->
+ maps:map(fun(VHost, M) ->
+ rabbit_auth_backend_internal:set_permissions(Username, VHost,
+ maps:get(<<"configure">>, M),
+ maps:get(<<"write">>, M),
+ maps:get(<<"read">>, M),
+ ActingUser)
+ end,
+ Map),
+ ok.
+
+set_user_limits(Username, Definition, ActingUser) when is_list(Definition); is_binary(Definition) ->
+ case rabbit_feature_flags:is_enabled(user_limits) of
+ true ->
+ case rabbit_json:try_decode(rabbit_data_coercion:to_binary(Definition)) of
+ {ok, Term} ->
+ validate_parameters_and_update_limit(Username, Term, ActingUser);
+ {error, Reason} ->
+ {error_string, rabbit_misc:format(
+ "JSON decoding error. Reason: ~ts", [Reason])}
+ end;
+ false -> {error_string, "cannot set any user limits: the user_limits feature flag is not enabled"}
+ end;
+set_user_limits(Username, Definition, ActingUser) when is_map(Definition) ->
+ case rabbit_feature_flags:is_enabled(user_limits) of
+ true -> validate_parameters_and_update_limit(Username, Definition, ActingUser);
+ false -> {error_string, "cannot set any user limits: the user_limits feature flag is not enabled"}
+ end.
+
+validate_parameters_and_update_limit(Username, Term, ActingUser) ->
+ case flatten_errors(rabbit_parameter_validation:proplist(
+ <<"user-limits">>, user_limit_validation(), Term)) of
+ ok ->
+ update_user(Username, fun(User) ->
+ internal_user:update_limits(add, User, Term)
+ end),
+ notify_limit_set(Username, ActingUser, Term);
+ {errors, [{Reason, Arguments}]} ->
+ {error_string, rabbit_misc:format(Reason, Arguments)}
+ end.
+
+user_limit_validation() ->
+ [{<<"max-connections">>, fun rabbit_parameter_validation:integer/2, optional},
+ {<<"max-channels">>, fun rabbit_parameter_validation:integer/2, optional}].
+
+clear_user_limits(Username, <<"all">>, ActingUser) ->
+ update_user(Username, fun(User) ->
+ internal_user:clear_limits(User)
+ end),
+ notify_limit_clear(Username, ActingUser);
+clear_user_limits(Username, LimitType, ActingUser) ->
+ update_user(Username, fun(User) ->
+ internal_user:update_limits(remove, User, LimitType)
+ end),
+ notify_limit_clear(Username, ActingUser).
+
+flatten_errors(L) ->
+ case [{F, A} || I <- lists:flatten([L]), {error, F, A} <- [I]] of
+ [] -> ok;
+ E -> {errors, E}
+ end.
+
+%%----------------------------------------------------------------------------
+%% Listing
+
+-define(PERMS_INFO_KEYS, [configure, write, read]).
+-define(USER_INFO_KEYS, [user, tags]).
+
+-spec user_info_keys() -> rabbit_types:info_keys().
+
+user_info_keys() -> ?USER_INFO_KEYS.
+
+-spec perms_info_keys() -> rabbit_types:info_keys().
+
+perms_info_keys() -> [user, vhost | ?PERMS_INFO_KEYS].
+
+-spec vhost_perms_info_keys() -> rabbit_types:info_keys().
+
+vhost_perms_info_keys() -> [user | ?PERMS_INFO_KEYS].
+
+-spec user_perms_info_keys() -> rabbit_types:info_keys().
+
+user_perms_info_keys() -> [vhost | ?PERMS_INFO_KEYS].
+
+-spec user_vhost_perms_info_keys() -> rabbit_types:info_keys().
+
+user_vhost_perms_info_keys() -> ?PERMS_INFO_KEYS.
+
+topic_perms_info_keys() -> [user, vhost, exchange, write, read].
+user_topic_perms_info_keys() -> [vhost, exchange, write, read].
+vhost_topic_perms_info_keys() -> [user, exchange, write, read].
+user_vhost_topic_perms_info_keys() -> [exchange, write, read].
+
+all_users() -> mnesia:dirty_match_object(rabbit_user, internal_user:pattern_match_all()).
+
+-spec list_users() -> [rabbit_types:infos()].
+
+list_users() ->
+ [extract_internal_user_params(U) ||
+ U <- all_users()].
+
+-spec list_users(reference(), pid()) -> 'ok'.
+
+list_users(Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(U) -> extract_internal_user_params(U) end,
+ all_users()).
+
+-spec list_permissions() -> [rabbit_types:infos()].
+
+list_permissions() ->
+ list_permissions(perms_info_keys(), match_user_vhost('_', '_')).
+
+list_permissions(Keys, QueryThunk) ->
+ [extract_user_permission_params(Keys, U) ||
+ U <- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+
+list_permissions(Keys, QueryThunk, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(U) -> extract_user_permission_params(Keys, U) end,
+ rabbit_misc:execute_mnesia_transaction(QueryThunk)).
+
+filter_props(Keys, Props) -> [T || T = {K, _} <- Props, lists:member(K, Keys)].
+
+-spec list_user_permissions
+ (rabbit_types:username()) -> [rabbit_types:infos()].
+
+list_user_permissions(Username) ->
+ list_permissions(
+ user_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost(Username, '_'))).
+
+-spec list_user_permissions
+ (rabbit_types:username(), reference(), pid()) -> 'ok'.
+
+list_user_permissions(Username, Ref, AggregatorPid) ->
+ list_permissions(
+ user_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost(Username, '_')),
+ Ref, AggregatorPid).
+
+-spec list_vhost_permissions
+ (rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+list_vhost_permissions(VHostPath) ->
+ list_permissions(
+ vhost_perms_info_keys(),
+ rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath))).
+
+-spec list_vhost_permissions
+ (rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+
+list_vhost_permissions(VHostPath, Ref, AggregatorPid) ->
+ list_permissions(
+ vhost_perms_info_keys(),
+ rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath)),
+ Ref, AggregatorPid).
+
+-spec list_user_vhost_permissions
+ (rabbit_types:username(), rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+list_user_vhost_permissions(Username, VHostPath) ->
+ list_permissions(
+ user_vhost_perms_info_keys(),
+ rabbit_vhost:with_user_and_vhost(
+ Username, VHostPath, match_user_vhost(Username, VHostPath))).
+
+extract_user_permission_params(Keys, #user_permission{
+ user_vhost =
+ #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ permission = #permission{
+ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}}) ->
+ filter_props(Keys, [{user, Username},
+ {vhost, VHostPath},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm}]).
+
+extract_internal_user_params(User) ->
+ [{user, internal_user:get_username(User)},
+ {tags, internal_user:get_tags(User)}].
+
+match_user_vhost(Username, VHostPath) ->
+ fun () -> mnesia:match_object(
+ rabbit_user_permission,
+ #user_permission{user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VHostPath},
+ permission = '_'},
+ read)
+ end.
+
+list_topic_permissions() ->
+ list_topic_permissions(topic_perms_info_keys(), match_user_vhost_topic_permission('_', '_')).
+
+list_user_topic_permissions(Username) ->
+ list_topic_permissions(user_topic_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost_topic_permission(Username, '_'))).
+
+list_vhost_topic_permissions(VHost) ->
+ list_topic_permissions(vhost_topic_perms_info_keys(),
+ rabbit_vhost:with(VHost, match_user_vhost_topic_permission('_', VHost))).
+
+list_user_vhost_topic_permissions(Username, VHost) ->
+ list_topic_permissions(user_vhost_topic_perms_info_keys(),
+ rabbit_vhost:with_user_and_vhost(Username, VHost, match_user_vhost_topic_permission(Username, VHost))).
+
+list_topic_permissions(Keys, QueryThunk) ->
+ [extract_topic_permission_params(Keys, U) ||
+ U <- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+
+match_user_vhost_topic_permission(Username, VHostPath) ->
+ match_user_vhost_topic_permission(Username, VHostPath, '_').
+
+match_user_vhost_topic_permission(Username, VHostPath, Exchange) ->
+ fun () -> mnesia:match_object(
+ rabbit_topic_permission,
+ #topic_permission{topic_permission_key = #topic_permission_key{
+ user_vhost = #user_vhost{
+ username = Username,
+ virtual_host = VHostPath},
+ exchange = Exchange},
+ permission = '_'},
+ read)
+ end.
+
+extract_topic_permission_params(Keys, #topic_permission{
+ topic_permission_key = #topic_permission_key{
+ user_vhost = #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ exchange = Exchange},
+ permission = #permission{
+ write = WritePerm,
+ read = ReadPerm}}) ->
+ filter_props(Keys, [{user, Username},
+ {vhost, VHostPath},
+ {exchange, Exchange},
+ {write, WritePerm},
+ {read, ReadPerm}]).
+
+hashing_algorithm(User, Version) ->
+ case maps:get(hashing_algorithm, User, undefined) of
+ undefined ->
+ case Version of
+ %% 3.6.1 and later versions are supposed to have
+ %% the algorithm exported and thus not need a default
+ <<"3.6.0">> -> rabbit_password_hashing_sha256;
+ <<"3.5.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.4.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.3.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.2.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.1.", _/binary>> -> rabbit_password_hashing_md5;
+ <<"3.0.", _/binary>> -> rabbit_password_hashing_md5;
+ _ -> rabbit_password:hashing_mod()
+ end;
+ Alg -> rabbit_data_coercion:to_atom(Alg, utf8)
+ end.
+
+is_over_connection_limit(Username) ->
+ Fun = fun() ->
+ rabbit_connection_tracking:count_tracked_items_in({user, Username})
+ end,
+ is_over_limit(Username, <<"max-connections">>, Fun).
+
+is_over_channel_limit(Username) ->
+ Fun = fun() ->
+ rabbit_channel_tracking:count_tracked_items_in({user, Username})
+ end,
+ is_over_limit(Username, <<"max-channels">>, Fun).
+
+is_over_limit(Username, LimitType, Fun) ->
+ case get_user_limit(Username, LimitType) of
+ undefined -> false;
+ {ok, 0} -> {true, 0};
+ {ok, Limit} ->
+ case Fun() >= Limit of
+ false -> false;
+ true -> {true, Limit}
+ end
+ end.
+
+get_user_limit(Username, LimitType) ->
+ case lookup_user(Username) of
+ {ok, User} ->
+ case rabbit_misc:pget(LimitType, internal_user:get_limits(User)) of
+ undefined -> undefined;
+ N when N < 0 -> undefined;
+ N when N >= 0 -> {ok, N}
+ end;
+ _ ->
+ undefined
+ end.
+
+get_user_limits() ->
+ [{internal_user:get_username(U), internal_user:get_limits(U)} ||
+ U <- all_users(),
+ internal_user:get_limits(U) =/= #{}].
+
+get_user_limits(Username) ->
+ case lookup_user(Username) of
+ {ok, User} -> internal_user:get_limits(User);
+ _ -> undefined
+ end.
+
+notify_limit_set(Username, ActingUser, Term) ->
+ rabbit_event:notify(user_limits_set,
+ [{name, <<"limits">>}, {user_who_performed_action, ActingUser},
+ {username, Username} | maps:to_list(Term)]).
+
+notify_limit_clear(Username, ActingUser) ->
+ rabbit_event:notify(user_limits_cleared,
+ [{name, <<"limits">>}, {user_who_performed_action, ActingUser},
+ {username, Username}]).
diff --git a/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl
new file mode 100644
index 0000000000..c81a337153
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_amqplain).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism amqplain"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"AMQPLAIN">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%% AMQPLAIN, as used by Qpid Python test suite. The 0-8 spec actually
+%% defines this as PLAIN, but in 0-9 that definition is gone, instead
+%% referring generically to "SASL security mechanism", i.e. the above.
+
+description() ->
+ [{description, <<"QPid AMQPLAIN mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ [].
+
+-define(IS_STRING_TYPE(Type), Type =:= longstr orelse Type =:= shortstr).
+
+handle_response(Response, _State) ->
+ LoginTable = rabbit_binary_parser:parse_table(Response),
+ case {lists:keysearch(<<"LOGIN">>, 1, LoginTable),
+ lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of
+ {{value, {_, UserType, User}},
+ {value, {_, PassType, Pass}}} when ?IS_STRING_TYPE(UserType);
+ ?IS_STRING_TYPE(PassType) ->
+ rabbit_access_control:check_user_pass_login(User, Pass);
+ {{value, {_, _UserType, _User}},
+ {value, {_, _PassType, _Pass}}} ->
+ {protocol_error,
+ "AMQPLAIN auth info ~w uses unsupported type for LOGIN or PASSWORD field",
+ [LoginTable]};
+ _ ->
+ {protocol_error,
+ "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field",
+ [LoginTable]}
+ end.
diff --git a/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl b/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl
new file mode 100644
index 0000000000..15439c461f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_mechanism_cr_demo.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_cr_demo).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism cr-demo"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"RABBIT-CR-DEMO">>,
+ ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-record(state, {username = undefined}).
+
+%% Provides equivalent security to PLAIN but demos use of Connection.Secure(Ok)
+%% START-OK: Username
+%% SECURE: "Please tell me your password"
+%% SECURE-OK: "My password is ~s", [Password]
+
+description() ->
+ [{description, <<"RabbitMQ Demo challenge-response authentication "
+ "mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ #state{}.
+
+handle_response(Response, State = #state{username = undefined}) ->
+ {challenge, <<"Please tell me your password">>,
+ State#state{username = Response}};
+
+handle_response(<<"My password is ", Password/binary>>,
+ #state{username = Username}) ->
+ rabbit_access_control:check_user_pass_login(Username, Password);
+handle_response(Response, _State) ->
+ {protocol_error, "Invalid response '~s'", [Response]}.
diff --git a/deps/rabbit/src/rabbit_auth_mechanism_plain.erl b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl
new file mode 100644
index 0000000000..d704c72400
--- /dev/null
+++ b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl
@@ -0,0 +1,60 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_plain).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "auth mechanism plain"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"PLAIN">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%% SASL PLAIN, as used by the Qpid Java client and our clients. Also,
+%% apparently, by OpenAMQ.
+
+description() ->
+ [{description, <<"SASL PLAIN authentication mechanism">>}].
+
+should_offer(_Sock) ->
+ true.
+
+init(_Sock) ->
+ [].
+
+handle_response(Response, _State) ->
+ case extract_user_pass(Response) of
+ {ok, User, Pass} ->
+ rabbit_access_control:check_user_pass_login(User, Pass);
+ error ->
+ {protocol_error, "response ~p invalid", [Response]}
+ end.
+
+extract_user_pass(Response) ->
+ case extract_elem(Response) of
+ {ok, User, Response1} -> case extract_elem(Response1) of
+ {ok, Pass, <<>>} -> {ok, User, Pass};
+ _ -> error
+ end;
+ error -> error
+ end.
+
+extract_elem(<<0:8, Rest/binary>>) ->
+ Count = next_null_pos(Rest, 0),
+ <<Elem:Count/binary, Rest1/binary>> = Rest,
+ {ok, Elem, Rest1};
+extract_elem(_) ->
+ error.
+
+next_null_pos(<<>>, Count) -> Count;
+next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count;
+next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1).
diff --git a/deps/rabbit/src/rabbit_autoheal.erl b/deps/rabbit/src/rabbit_autoheal.erl
new file mode 100644
index 0000000000..6380d71895
--- /dev/null
+++ b/deps/rabbit/src/rabbit_autoheal.erl
@@ -0,0 +1,456 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_autoheal).
+
+-export([init/0, enabled/0, maybe_start/1, rabbit_down/2, node_down/2,
+ handle_msg/3, process_down/2]).
+
+%% The named process we are running in.
+-define(SERVER, rabbit_node_monitor).
+
+-define(MNESIA_STOPPED_PING_INTERNAL, 200).
+
+-define(AUTOHEAL_STATE_AFTER_RESTART, rabbit_autoheal_state_after_restart).
+
+%%----------------------------------------------------------------------------
+
+%% In order to autoheal we want to:
+%%
+%% * Find the winning partition
+%% * Stop all nodes in other partitions
+%% * Wait for them all to be stopped
+%% * Start them again
+%%
+%% To keep things simple, we assume all nodes are up. We don't start
+%% unless all nodes are up, and if a node goes down we abandon the
+%% whole process. To further keep things simple we also defer the
+%% decision as to the winning node to the "leader" - arbitrarily
+%% selected as the first node in the cluster.
+%%
+%% To coordinate the restarting nodes we pick a special node from the
+%% winning partition - the "winner". Restarting nodes then stop, and
+%% wait for it to tell them it is safe to start again. The winner
+%% determines that a node has stopped just by seeing if its rabbit app
+%% stops - if a node stops for any other reason it just gets a message
+%% it will ignore, and otherwise we carry on.
+%%
+%% Meanwhile, the leader may continue to receive new autoheal requests:
+%% all of them are ignored. The winner notifies the leader when the
+%% current autoheal process is finished (ie. when all losers stopped and
+%% were asked to start again) or was aborted. When the leader receives
+%% the notification or if it looses contact with the winner, it can
+%% accept new autoheal requests.
+%%
+%% The winner and the leader are not necessarily the same node.
+%%
+%% The leader can be a loser and will restart in this case. It remembers
+%% there is an autoheal in progress by temporarily saving the autoheal
+%% state to the application environment.
+%%
+%% == Possible states ==
+%%
+%% not_healing
+%% - the default
+%%
+%% {winner_waiting, OutstandingStops, Notify}
+%% - we are the winner and are waiting for all losing nodes to stop
+%% before telling them they can restart
+%%
+%% {leader_waiting, Winner, Notify}
+%% - we are the leader, and have already assigned the winner and losers.
+%% We are waiting for a confirmation from the winner that the autoheal
+%% process has ended. Meanwhile we can ignore autoheal requests.
+%% Because we may be a loser too, this state is saved to the application
+%% environment and restored on startup.
+%%
+%% restarting
+%% - we are restarting. Of course the node monitor immediately dies
+%% then so this state does not last long. We therefore send the
+%% autoheal_safe_to_start message to the rabbit_outside_app_process
+%% instead.
+%%
+%% == Message flow ==
+%%
+%% 1. Any node (leader included) >> {request_start, node()} >> Leader
+%% When Mnesia detects it is running partitioned or
+%% when a remote node starts, rabbit_node_monitor calls
+%% rabbit_autoheal:maybe_start/1. The message above is sent to the
+%% leader so the leader can take a decision.
+%%
+%% 2. Leader >> {become_winner, Losers} >> Winner
+%% The leader notifies the winner so the latter can proceed with
+%% the autoheal.
+%%
+%% 3. Winner >> {winner_is, Winner} >> All losers
+%% The winner notifies losers they must stop.
+%%
+%% 4. Winner >> autoheal_safe_to_start >> All losers
+%% When either all losers stopped or the autoheal process was
+%% aborted, the winner notifies losers they can start again.
+%%
+%% 5. Leader >> report_autoheal_status >> Winner
+%% The leader asks the autoheal status to the winner. This only
+%% happens when the leader is a loser too. If this is not the case,
+%% this message is never sent.
+%%
+%% 6. Winner >> {autoheal_finished, Winner} >> Leader
+%% The winner notifies the leader that the autoheal process was
+%% either finished or aborted (ie. autoheal_safe_to_start was sent
+%% to losers).
+
+%%----------------------------------------------------------------------------
+
+init() ->
+ %% We check the application environment for a saved autoheal state
+ %% saved during a restart. If this node is a leader, it is used
+ %% to determine if it needs to ask the winner to report about the
+ %% autoheal progress.
+ State = case application:get_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART) of
+ {ok, S} -> S;
+ undefined -> not_healing
+ end,
+ ok = application:unset_env(rabbit, ?AUTOHEAL_STATE_AFTER_RESTART),
+ case State of
+ {leader_waiting, Winner, _} ->
+ rabbit_log:info(
+ "Autoheal: in progress, requesting report from ~p~n", [Winner]),
+ send(Winner, report_autoheal_status);
+ _ ->
+ ok
+ end,
+ State.
+
+maybe_start(not_healing) ->
+ case enabled() of
+ true -> Leader = leader(),
+ send(Leader, {request_start, node()}),
+ rabbit_log:info("Autoheal request sent to ~p~n", [Leader]),
+ not_healing;
+ false -> not_healing
+ end;
+maybe_start(State) ->
+ State.
+
+enabled() ->
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, autoheal} -> true;
+ {ok, {pause_if_all_down, _, autoheal}} -> true;
+ _ -> false
+ end.
+
+leader() ->
+ [Leader | _] = lists:usort(rabbit_mnesia:cluster_nodes(all)),
+ Leader.
+
+%% This is the winner receiving its last notification that a node has
+%% stopped - all nodes can now start again
+rabbit_down(Node, {winner_waiting, [Node], Notify}) ->
+ rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]),
+ winner_finish(Notify);
+
+rabbit_down(Node, {winner_waiting, WaitFor, Notify}) ->
+ {winner_waiting, WaitFor -- [Node], Notify};
+
+rabbit_down(Winner, {leader_waiting, Winner, Losers}) ->
+ abort([Winner], Losers);
+
+rabbit_down(_Node, State) ->
+ %% Ignore. Either:
+ %% o we already cancelled the autoheal process;
+ %% o we are still waiting the winner's report.
+ State.
+
+node_down(_Node, not_healing) ->
+ not_healing;
+
+node_down(Node, {winner_waiting, _, Notify}) ->
+ abort([Node], Notify);
+
+node_down(Node, {leader_waiting, Node, _Notify}) ->
+ %% The winner went down, we don't know what to do so we simply abort.
+ rabbit_log:info("Autoheal: aborting - winner ~p went down~n", [Node]),
+ not_healing;
+
+node_down(Node, {leader_waiting, _, _} = St) ->
+ %% If it is a partial partition, the winner might continue with the
+ %% healing process. If it is a full partition, the winner will also
+ %% see it and abort. Let's wait for it.
+ rabbit_log:info("Autoheal: ~p went down, waiting for winner decision ~n", [Node]),
+ St;
+
+node_down(Node, _State) ->
+ rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
+ not_healing.
+
+%% If the process that has to restart the node crashes for an unexpected reason,
+%% we go back to a not healing state so the node is able to recover.
+process_down({'EXIT', Pid, Reason}, {restarting, Pid}) when Reason =/= normal ->
+ rabbit_log:info("Autoheal: aborting - the process responsible for restarting the "
+ "node terminated with reason: ~p~n", [Reason]),
+ not_healing;
+
+process_down(_, State) ->
+ State.
+
+%% By receiving this message we become the leader
+%% TODO should we try to debounce this?
+handle_msg({request_start, Node},
+ not_healing, Partitions) ->
+ rabbit_log:info("Autoheal request received from ~p~n", [Node]),
+ case check_other_nodes(Partitions) of
+ {error, E} ->
+ rabbit_log:info("Autoheal request denied: ~s~n", [fmt_error(E)]),
+ not_healing;
+ {ok, AllPartitions} ->
+ {Winner, Losers} = make_decision(AllPartitions),
+ rabbit_log:info("Autoheal decision~n"
+ " * Partitions: ~p~n"
+ " * Winner: ~p~n"
+ " * Losers: ~p~n",
+ [AllPartitions, Winner, Losers]),
+ case node() =:= Winner of
+ true -> handle_msg({become_winner, Losers},
+ not_healing, Partitions);
+ false -> send(Winner, {become_winner, Losers}),
+ {leader_waiting, Winner, Losers}
+ end
+ end;
+
+handle_msg({request_start, Node},
+ State, _Partitions) ->
+ rabbit_log:info("Autoheal request received from ~p when healing; "
+ "ignoring~n", [Node]),
+ State;
+
+handle_msg({become_winner, Losers},
+ not_healing, _Partitions) ->
+ rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n",
+ [Losers]),
+ stop_partition(Losers);
+
+handle_msg({become_winner, Losers},
+ {winner_waiting, _, Losers}, _Partitions) ->
+ %% The leader has aborted the healing, might have seen us down but
+ %% we didn't see the same. Let's try again as it is the same partition.
+ rabbit_log:info("Autoheal: I am the winner and received a duplicated "
+ "request, waiting again for ~p to stop~n", [Losers]),
+ stop_partition(Losers);
+
+handle_msg({become_winner, _},
+ {winner_waiting, _, Losers}, _Partitions) ->
+ %% Something has happened to the leader, it might have seen us down but we
+ %% are still alive. Partitions have changed, cannot continue.
+ rabbit_log:info("Autoheal: I am the winner and received another healing "
+ "request, partitions have changed to ~p. Aborting ~n", [Losers]),
+ winner_finish(Losers),
+ not_healing;
+
+handle_msg({winner_is, Winner}, State = not_healing,
+ _Partitions) ->
+ %% This node is a loser, nothing else.
+ Pid = restart_loser(State, Winner),
+ {restarting, Pid};
+handle_msg({winner_is, Winner}, State = {leader_waiting, Winner, _},
+ _Partitions) ->
+ %% This node is the leader and a loser at the same time.
+ Pid = restart_loser(State, Winner),
+ {restarting, Pid};
+
+handle_msg(Request, {restarting, Pid} = St, _Partitions) ->
+ %% ignore, we can contribute no further
+ rabbit_log:info("Autoheal: Received the request ~p while waiting for ~p "
+ "to restart the node. Ignoring it ~n", [Request, Pid]),
+ St;
+
+handle_msg(report_autoheal_status, not_healing, _Partitions) ->
+ %% The leader is asking about the autoheal status to us (the
+ %% winner). This happens when the leader is a loser and it just
+ %% restarted. We are in the "not_healing" state, so the previous
+ %% autoheal process ended: let's tell this to the leader.
+ send(leader(), {autoheal_finished, node()}),
+ not_healing;
+
+handle_msg(report_autoheal_status, State, _Partitions) ->
+ %% Like above, the leader is asking about the autoheal status. We
+ %% are not finished with it. There is no need to send anything yet
+ %% to the leader: we will send the notification when it is over.
+ State;
+
+handle_msg({autoheal_finished, Winner},
+ {leader_waiting, Winner, _}, _Partitions) ->
+ %% The winner is finished with the autoheal process and notified us
+ %% (the leader). We can transition to the "not_healing" state and
+ %% accept new requests.
+ rabbit_log:info("Autoheal finished according to winner ~p~n", [Winner]),
+ not_healing;
+
+handle_msg({autoheal_finished, Winner}, not_healing, _Partitions)
+ when Winner =:= node() ->
+ %% We are the leader and the winner. The state already transitioned
+ %% to "not_healing" at the end of the autoheal process.
+ rabbit_log:info("Autoheal finished according to winner ~p~n", [node()]),
+ not_healing;
+
+handle_msg({autoheal_finished, Winner}, not_healing, _Partitions) ->
+ %% We might have seen the winner down during a partial partition and
+ %% transitioned to not_healing. However, the winner was still able
+ %% to finish. Let it pass.
+ rabbit_log:info("Autoheal finished according to winner ~p."
+ " Unexpected, I might have previously seen the winner down~n", [Winner]),
+ not_healing.
+
+%%----------------------------------------------------------------------------
+
+send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
+
+abort(Down, Notify) ->
+ rabbit_log:info("Autoheal: aborting - ~p down~n", [Down]),
+ %% Make sure any nodes waiting for us start - it won't necessarily
+ %% heal the partition but at least they won't get stuck.
+ %% If we are executing this, we are not stopping. Thus, don't wait
+ %% for ourselves!
+ winner_finish(Notify -- [node()]).
+
+winner_finish(Notify) ->
+ %% There is a race in Mnesia causing a starting loser to hang
+ %% forever if another loser stops at the same time: the starting
+ %% node connects to the other node, negotiates the protocol and
+ %% attempts to acquire a write lock on the schema on the other node.
+ %% If the other node stops between the protocol negotiation and lock
+ %% request, the starting node never gets an answer to its lock
+ %% request.
+ %%
+ %% To work around the problem, we make sure Mnesia is stopped on all
+ %% losing nodes before sending the "autoheal_safe_to_start" signal.
+ wait_for_mnesia_shutdown(Notify),
+ [{rabbit_outside_app_process, N} ! autoheal_safe_to_start || N <- Notify],
+ send(leader(), {autoheal_finished, node()}),
+ not_healing.
+
+%% This improves the previous implementation, but could still potentially enter an infinity
+%% loop. If it also possible that for when it finishes some of the nodes have been
+%% manually restarted, but we can't do much more (apart from stop them again). So let it
+%% continue and notify all the losers to restart.
+wait_for_mnesia_shutdown(AllNodes) ->
+ Monitors = lists:foldl(fun(Node, Monitors0) ->
+ pmon:monitor({mnesia_sup, Node}, Monitors0)
+ end, pmon:new(), AllNodes),
+ wait_for_supervisors(Monitors).
+
+wait_for_supervisors(Monitors) ->
+ case pmon:is_empty(Monitors) of
+ true ->
+ ok;
+ false ->
+ receive
+ {'DOWN', _MRef, process, {mnesia_sup, _} = I, _Reason} ->
+ wait_for_supervisors(pmon:erase(I, Monitors))
+ after
+ 60000 ->
+ AliveLosers = [Node || {_, Node} <- pmon:monitored(Monitors)],
+ rabbit_log:info("Autoheal: mnesia in nodes ~p is still up, sending "
+ "winner notification again to these ~n", [AliveLosers]),
+ [send(L, {winner_is, node()}) || L <- AliveLosers],
+ wait_for_mnesia_shutdown(AliveLosers)
+ end
+ end.
+
+restart_loser(State, Winner) ->
+ rabbit_log:warning(
+ "Autoheal: we were selected to restart; winner is ~p~n", [Winner]),
+ NextStateTimeout = application:get_env(rabbit, autoheal_state_transition_timeout, 60000),
+ rabbit_node_monitor:run_outside_applications(
+ fun () ->
+ MRef = erlang:monitor(process, {?SERVER, Winner}),
+ rabbit:stop(),
+ NextState = receive
+ {'DOWN', MRef, process, {?SERVER, Winner}, _Reason} ->
+ not_healing;
+ autoheal_safe_to_start ->
+ State
+ after NextStateTimeout ->
+ rabbit_log:warning(
+ "Autoheal: timed out waiting for a safe-to-start message from the winner (~p); will retry",
+ [Winner]),
+ not_healing
+ end,
+ erlang:demonitor(MRef, [flush]),
+ %% During the restart, the autoheal state is lost so we
+ %% store it in the application environment temporarily so
+ %% init/0 can pick it up.
+ %%
+ %% This is useful to the leader which is a loser at the
+ %% same time: because the leader is restarting, there
+ %% is a great chance it misses the "autoheal finished!"
+ %% notification from the winner. Thanks to the saved
+ %% state, it knows it needs to ask the winner if the
+ %% autoheal process is finished or not.
+ application:set_env(rabbit,
+ ?AUTOHEAL_STATE_AFTER_RESTART, NextState),
+ rabbit:start()
+ end, true).
+
+make_decision(AllPartitions) ->
+ Sorted = lists:sort([{partition_value(P), P} || P <- AllPartitions]),
+ [[Winner | _] | Rest] = lists:reverse([P || {_, P} <- Sorted]),
+ {Winner, lists:append(Rest)}.
+
+partition_value(Partition) ->
+ Connections = [Res || Node <- Partition,
+ Res <- [rpc:call(Node, rabbit_networking,
+ connections_local, [])],
+ is_list(Res)],
+ {length(lists:append(Connections)), length(Partition)}.
+
+%% We have our local understanding of what partitions exist; but we
+%% only know which nodes we have been partitioned from, not which
+%% nodes are partitioned from each other.
+check_other_nodes(LocalPartitions) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ {Results, Bad} = rabbit_node_monitor:status(Nodes -- [node()]),
+ RemotePartitions = [{Node, proplists:get_value(partitions, Res)}
+ || {Node, Res} <- Results],
+ RemoteDown = [{Node, Down}
+ || {Node, Res} <- Results,
+ Down <- [Nodes -- proplists:get_value(nodes, Res)],
+ Down =/= []],
+ case {Bad, RemoteDown} of
+ {[], []} -> Partitions = [{node(), LocalPartitions} | RemotePartitions],
+ {ok, all_partitions(Partitions, [Nodes])};
+ {[], _} -> {error, {remote_down, RemoteDown}};
+ {_, _} -> {error, {nodes_down, Bad}}
+ end.
+
+all_partitions([], Partitions) ->
+ Partitions;
+all_partitions([{Node, CantSee} | Rest], Partitions) ->
+ {[Containing], Others} =
+ lists:partition(fun (Part) -> lists:member(Node, Part) end, Partitions),
+ A = Containing -- CantSee,
+ B = Containing -- A,
+ Partitions1 = case {A, B} of
+ {[], _} -> Partitions;
+ {_, []} -> Partitions;
+ _ -> [A, B | Others]
+ end,
+ all_partitions(Rest, Partitions1).
+
+fmt_error({remote_down, RemoteDown}) ->
+ rabbit_misc:format("Remote nodes disconnected:~n ~p", [RemoteDown]);
+fmt_error({nodes_down, NodesDown}) ->
+ rabbit_misc:format("Local nodes down: ~p", [NodesDown]).
+
+stop_partition(Losers) ->
+ %% The leader said everything was ready - do we agree? If not then
+ %% give up.
+ Down = Losers -- rabbit_node_monitor:alive_rabbit_nodes(Losers),
+ case Down of
+ [] -> [send(L, {winner_is, node()}) || L <- Losers],
+ {winner_waiting, Losers, Losers};
+ _ -> abort(Down, Losers)
+ end.
diff --git a/deps/rabbit/src/rabbit_backing_queue.erl b/deps/rabbit/src/rabbit_backing_queue.erl
new file mode 100644
index 0000000000..4d709e14d0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_backing_queue.erl
@@ -0,0 +1,264 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_backing_queue).
+
+-export([info_keys/0]).
+
+-define(INFO_KEYS, [messages_ram, messages_ready_ram,
+ messages_unacknowledged_ram, messages_persistent,
+ message_bytes, message_bytes_ready,
+ message_bytes_unacknowledged, message_bytes_ram,
+ message_bytes_persistent, head_message_timestamp,
+ disk_reads, disk_writes, backing_queue_status,
+ messages_paged_out, message_bytes_paged_out]).
+
+%% We can't specify a per-queue ack/state with callback signatures
+-type ack() :: any().
+-type state() :: any().
+
+-type flow() :: 'flow' | 'noflow'.
+-type msg_ids() :: [rabbit_types:msg_id()].
+-type publish() :: {rabbit_types:basic_message(),
+ rabbit_types:message_properties(), boolean()}.
+-type delivered_publish() :: {rabbit_types:basic_message(),
+ rabbit_types:message_properties()}.
+-type fetch_result(Ack) ::
+ ('empty' | {rabbit_types:basic_message(), boolean(), Ack}).
+-type drop_result(Ack) ::
+ ('empty' | {rabbit_types:msg_id(), Ack}).
+-type recovery_terms() :: [term()] | 'non_clean_shutdown'.
+-type recovery_info() :: 'new' | recovery_terms().
+-type purged_msg_count() :: non_neg_integer().
+-type async_callback() ::
+ fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok').
+-type duration() :: ('undefined' | 'infinity' | number()).
+
+-type msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A).
+-type msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean()).
+
+-type queue_mode() :: atom().
+
+%% Called on startup with a vhost and a list of durable queue names on this vhost.
+%% The queues aren't being started at this point, but this call allows the
+%% backing queue to perform any checking necessary for the consistency
+%% of those queues, or initialise any other shared resources.
+%%
+%% The list of queue recovery terms returned as {ok, Terms} must be given
+%% in the same order as the list of queue names supplied.
+-callback start(rabbit_types:vhost(), [rabbit_amqqueue:name()]) -> rabbit_types:ok(recovery_terms()).
+
+%% Called to tear down any state/resources for vhost. NB: Implementations should
+%% not depend on this function being called on shutdown and instead
+%% should hook into the rabbit supervision hierarchy.
+-callback stop(rabbit_types:vhost()) -> 'ok'.
+
+%% Initialise the backing queue and its state.
+%%
+%% Takes
+%% 1. the amqqueue record
+%% 2. a term indicating whether the queue is an existing queue that
+%% should be recovered or not. When 'new' is given, no recovery is
+%% taking place, otherwise a list of recovery terms is given, or
+%% the atom 'non_clean_shutdown' if no recovery terms are available.
+%% 3. an asynchronous callback which accepts a function of type
+%% backing-queue-state to backing-queue-state. This callback
+%% function can be safely invoked from any process, which makes it
+%% useful for passing messages back into the backing queue,
+%% especially as the backing queue does not have control of its own
+%% mailbox.
+-callback init(amqqueue:amqqueue(), recovery_info(),
+ async_callback()) -> state().
+
+%% Called on queue shutdown when queue isn't being deleted.
+-callback terminate(any(), state()) -> state().
+
+%% Called when the queue is terminating and needs to delete all its
+%% content.
+-callback delete_and_terminate(any(), state()) -> state().
+
+%% Called to clean up after a crashed queue. In this case we don't
+%% have a process and thus a state(), we are just removing on-disk data.
+-callback delete_crashed(amqqueue:amqqueue()) -> 'ok'.
+
+%% Remove all 'fetchable' messages from the queue, i.e. all messages
+%% except those that have been fetched already and are pending acks.
+-callback purge(state()) -> {purged_msg_count(), state()}.
+
+%% Remove all messages in the queue which have been fetched and are
+%% pending acks.
+-callback purge_acks(state()) -> state().
+
+%% Publish a message.
+-callback publish(rabbit_types:basic_message(),
+ rabbit_types:message_properties(), boolean(), pid(), flow(),
+ state()) -> state().
+
+%% Like publish/6 but for batches of publishes.
+-callback batch_publish([publish()], pid(), flow(), state()) -> state().
+
+%% Called for messages which have already been passed straight
+%% out to a client. The queue will be empty for these calls
+%% (i.e. saves the round trip through the backing queue).
+-callback publish_delivered(rabbit_types:basic_message(),
+ rabbit_types:message_properties(), pid(), flow(),
+ state())
+ -> {ack(), state()}.
+
+%% Like publish_delivered/5 but for batches of publishes.
+-callback batch_publish_delivered([delivered_publish()], pid(), flow(),
+ state())
+ -> {[ack()], state()}.
+
+%% Called to inform the BQ about messages which have reached the
+%% queue, but are not going to be further passed to BQ.
+-callback discard(rabbit_types:msg_id(), pid(), flow(), state()) -> state().
+
+%% Return ids of messages which have been confirmed since the last
+%% invocation of this function (or initialisation).
+%%
+%% Message ids should only appear in the result of drain_confirmed
+%% under the following circumstances:
+%%
+%% 1. The message appears in a call to publish_delivered/4 and the
+%% first argument (ack_required) is false; or
+%% 2. The message is fetched from the queue with fetch/2 and the first
+%% argument (ack_required) is false; or
+%% 3. The message is acked (ack/2 is called for the message); or
+%% 4. The message is fully fsync'd to disk in such a way that the
+%% recovery of the message is guaranteed in the event of a crash of
+%% this rabbit node (excluding hardware failure).
+%%
+%% In addition to the above conditions, a message id may only appear
+%% in the result of drain_confirmed if
+%% #message_properties.needs_confirming = true when the msg was
+%% published (through whichever means) to the backing queue.
+%%
+%% It is legal for the same message id to appear in the results of
+%% multiple calls to drain_confirmed, which means that the backing
+%% queue is not required to keep track of which messages it has
+%% already confirmed. The confirm will be issued to the publisher the
+%% first time the message id appears in the result of
+%% drain_confirmed. All subsequent appearances of that message id will
+%% be ignored.
+-callback drain_confirmed(state()) -> {msg_ids(), state()}.
+
+%% Drop messages from the head of the queue while the supplied
+%% predicate on message properties returns true. Returns the first
+%% message properties for which the predicate returned false, or
+%% 'undefined' if the whole backing queue was traversed w/o the
+%% predicate ever returning false.
+-callback dropwhile(msg_pred(), state())
+ -> {rabbit_types:message_properties() | undefined, state()}.
+
+%% Like dropwhile, except messages are fetched in "require
+%% acknowledgement" mode and are passed, together with their ack tag,
+%% to the supplied function. The function is also fed an
+%% accumulator. The result of fetchwhile is as for dropwhile plus the
+%% accumulator.
+-callback fetchwhile(msg_pred(), msg_fun(A), A, state())
+ -> {rabbit_types:message_properties() | undefined,
+ A, state()}.
+
+%% Produce the next message.
+-callback fetch(true, state()) -> {fetch_result(ack()), state()};
+ (false, state()) -> {fetch_result(undefined), state()}.
+
+%% Remove the next message.
+-callback drop(true, state()) -> {drop_result(ack()), state()};
+ (false, state()) -> {drop_result(undefined), state()}.
+
+%% Acktags supplied are for messages which can now be forgotten
+%% about. Must return 1 msg_id per Ack, in the same order as Acks.
+-callback ack([ack()], state()) -> {msg_ids(), state()}.
+
+%% Reinsert messages into the queue which have already been delivered
+%% and were pending acknowledgement.
+-callback requeue([ack()], state()) -> {msg_ids(), state()}.
+
+%% Fold over messages by ack tag. The supplied function is called with
+%% each message, its ack tag, and an accumulator.
+-callback ackfold(msg_fun(A), A, state(), [ack()]) -> {A, state()}.
+
+%% Fold over all the messages in a queue and return the accumulated
+%% results, leaving the queue undisturbed.
+-callback fold(fun((rabbit_types:basic_message(),
+ rabbit_types:message_properties(),
+ boolean(), A) -> {('stop' | 'cont'), A}),
+ A, state()) -> {A, state()}.
+
+%% How long is my queue?
+-callback len(state()) -> non_neg_integer().
+
+%% Is my queue empty?
+-callback is_empty(state()) -> boolean().
+
+%% What's the queue depth, where depth = length + number of pending acks
+-callback depth(state()) -> non_neg_integer().
+
+%% For the next three functions, the assumption is that you're
+%% monitoring something like the ingress and egress rates of the
+%% queue. The RAM duration is thus the length of time represented by
+%% the messages held in RAM given the current rates. If you want to
+%% ignore all of this stuff, then do so, and return 0 in
+%% ram_duration/1.
+
+%% The target is to have no more messages in RAM than indicated by the
+%% duration and the current queue rates.
+-callback set_ram_duration_target(duration(), state()) -> state().
+
+%% Optionally recalculate the duration internally (likely to be just
+%% update your internal rates), and report how many seconds the
+%% messages in RAM represent given the current rates of the queue.
+-callback ram_duration(state()) -> {duration(), state()}.
+
+%% Should 'timeout' be called as soon as the queue process can manage
+%% (either on an empty mailbox, or when a timer fires)?
+-callback needs_timeout(state()) -> 'false' | 'timed' | 'idle'.
+
+%% Called (eventually) after needs_timeout returns 'idle' or 'timed'.
+%% Note this may be called more than once for each 'idle' or 'timed'
+%% returned from needs_timeout
+-callback timeout(state()) -> state().
+
+%% Called immediately before the queue hibernates.
+-callback handle_pre_hibernate(state()) -> state().
+
+%% Called when more credit has become available for credit_flow.
+-callback resume(state()) -> state().
+
+%% Used to help prioritisation in rabbit_amqqueue_process. The rate of
+%% inbound messages and outbound messages at the moment.
+-callback msg_rates(state()) -> {float(), float()}.
+
+-callback info(atom(), state()) -> any().
+
+%% Passed a function to be invoked with the relevant backing queue's
+%% state. Useful for when the backing queue or other components need
+%% to pass functions into the backing queue.
+-callback invoke(atom(), fun ((atom(), A) -> A), state()) -> state().
+
+%% Called prior to a publish or publish_delivered call. Allows the BQ
+%% to signal that it's already seen this message, (e.g. it was published
+%% or discarded previously) specifying whether to drop the message or reject it.
+-callback is_duplicate(rabbit_types:basic_message(), state())
+ -> {{true, drop} | {true, reject} | boolean(), state()}.
+
+-callback set_queue_mode(queue_mode(), state()) -> state().
+
+-callback zip_msgs_and_acks([delivered_publish()],
+ [ack()], Acc, state())
+ -> Acc.
+
+%% Called when rabbit_amqqueue_process receives a message via
+%% handle_info and it should be processed by the backing
+%% queue
+-callback handle_info(term(), state()) -> state().
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
diff --git a/deps/rabbit/src/rabbit_basic.erl b/deps/rabbit/src/rabbit_basic.erl
new file mode 100644
index 0000000000..cdc9e082e4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_basic.erl
@@ -0,0 +1,354 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_basic).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([publish/4, publish/5, publish/1,
+ message/3, message/4, properties/1, prepend_table_header/3,
+ extract_headers/1, extract_timestamp/1, map_headers/2, delivery/4,
+ header_routes/1, parse_expiration/1, header/2, header/3]).
+-export([build_content/2, from_content/1, msg_size/1,
+ maybe_gc_large_msg/1, maybe_gc_large_msg/2]).
+-export([add_header/4,
+ peek_fmt_message/1]).
+
+%%----------------------------------------------------------------------------
+
+-type properties_input() ::
+ rabbit_framing:amqp_property_record() | [{atom(), any()}].
+-type publish_result() ::
+ ok | rabbit_types:error('not_found').
+-type header() :: any().
+-type headers() :: rabbit_framing:amqp_table() | 'undefined'.
+
+-type exchange_input() :: rabbit_types:exchange() | rabbit_exchange:name().
+-type body_input() :: binary() | [binary()].
+
+%%----------------------------------------------------------------------------
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+
+-spec publish
+ (exchange_input(), rabbit_router:routing_key(), properties_input(),
+ body_input()) ->
+ publish_result().
+
+publish(Exchange, RoutingKeyBin, Properties, Body) ->
+ publish(Exchange, RoutingKeyBin, false, Properties, Body).
+
+%% Convenience function, for avoiding round-trips in calls across the
+%% erlang distributed network.
+
+-spec publish
+ (exchange_input(), rabbit_router:routing_key(), boolean(),
+ properties_input(), body_input()) ->
+ publish_result().
+
+publish(X = #exchange{name = XName}, RKey, Mandatory, Props, Body) ->
+ Message = message(XName, RKey, properties(Props), Body),
+ publish(X, delivery(Mandatory, false, Message, undefined));
+publish(XName, RKey, Mandatory, Props, Body) ->
+ Message = message(XName, RKey, properties(Props), Body),
+ publish(delivery(Mandatory, false, Message, undefined)).
+
+-spec publish(rabbit_types:delivery()) -> publish_result().
+
+publish(Delivery = #delivery{
+ message = #basic_message{exchange_name = XName}}) ->
+ case rabbit_exchange:lookup(XName) of
+ {ok, X} -> publish(X, Delivery);
+ Err -> Err
+ end.
+
+publish(X, Delivery) ->
+ Qs = rabbit_amqqueue:lookup(rabbit_exchange:route(X, Delivery)),
+ _ = rabbit_queue_type:deliver(Qs, Delivery, stateless),
+ ok.
+
+-spec delivery
+ (boolean(), boolean(), rabbit_types:message(), undefined | integer()) ->
+ rabbit_types:delivery().
+
+delivery(Mandatory, Confirm, Message, MsgSeqNo) ->
+ #delivery{mandatory = Mandatory, confirm = Confirm, sender = self(),
+ message = Message, msg_seq_no = MsgSeqNo, flow = noflow}.
+
+-spec build_content
+ (rabbit_framing:amqp_property_record(), binary() | [binary()]) ->
+ rabbit_types:content().
+
+build_content(Properties, BodyBin) when is_binary(BodyBin) ->
+ build_content(Properties, [BodyBin]);
+
+build_content(Properties, PFR) ->
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ #content{class_id = ClassId,
+ properties = Properties,
+ properties_bin = none,
+ protocol = none,
+ payload_fragments_rev = PFR}.
+
+-spec from_content
+ (rabbit_types:content()) ->
+ {rabbit_framing:amqp_property_record(), binary()}.
+
+from_content(Content) ->
+ #content{class_id = ClassId,
+ properties = Props,
+ payload_fragments_rev = FragmentsRev} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ {Props, list_to_binary(lists:reverse(FragmentsRev))}.
+
+%% This breaks the spec rule forbidding message modification
+strip_header(#content{properties = #'P_basic'{headers = undefined}}
+ = DecodedContent, _Key) ->
+ DecodedContent;
+strip_header(#content{properties = Props = #'P_basic'{headers = Headers}}
+ = DecodedContent, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ false -> DecodedContent;
+ {value, Found} -> Headers0 = lists:delete(Found, Headers),
+ rabbit_binary_generator:clear_encoded_content(
+ DecodedContent#content{
+ properties = Props#'P_basic'{
+ headers = Headers0}})
+ end.
+
+-spec message
+ (rabbit_exchange:name(), rabbit_router:routing_key(),
+ rabbit_types:decoded_content()) ->
+ rabbit_types:ok_or_error2(rabbit_types:message(), any()).
+
+message(XName, RoutingKey, #content{properties = Props} = DecodedContent) ->
+ try
+ {ok, #basic_message{
+ exchange_name = XName,
+ content = strip_header(DecodedContent, ?DELETED_HEADER),
+ id = rabbit_guid:gen(),
+ is_persistent = is_message_persistent(DecodedContent),
+ routing_keys = [RoutingKey |
+ header_routes(Props#'P_basic'.headers)]}}
+ catch
+ {error, _Reason} = Error -> Error
+ end.
+
+-spec message
+ (rabbit_exchange:name(), rabbit_router:routing_key(), properties_input(),
+ binary()) ->
+ rabbit_types:message().
+
+message(XName, RoutingKey, RawProperties, Body) ->
+ Properties = properties(RawProperties),
+ Content = build_content(Properties, Body),
+ {ok, Msg} = message(XName, RoutingKey, Content),
+ Msg.
+
+-spec properties
+ (properties_input()) -> rabbit_framing:amqp_property_record().
+
+properties(P = #'P_basic'{}) ->
+ P;
+properties(P) when is_list(P) ->
+ %% Yes, this is O(length(P) * record_info(size, 'P_basic') / 2),
+ %% i.e. slow. Use the definition of 'P_basic' directly if
+ %% possible!
+ lists:foldl(fun ({Key, Value}, Acc) ->
+ case indexof(record_info(fields, 'P_basic'), Key) of
+ 0 -> throw({unknown_basic_property, Key});
+ N -> setelement(N + 1, Acc, Value)
+ end
+ end, #'P_basic'{}, P).
+
+-spec prepend_table_header
+ (binary(), rabbit_framing:amqp_table(), headers()) -> headers().
+
+prepend_table_header(Name, Info, undefined) ->
+ prepend_table_header(Name, Info, []);
+prepend_table_header(Name, Info, Headers) ->
+ case rabbit_misc:table_lookup(Headers, Name) of
+ {array, Existing} ->
+ prepend_table(Name, Info, Existing, Headers);
+ undefined ->
+ prepend_table(Name, Info, [], Headers);
+ Other ->
+ Headers2 = prepend_table(Name, Info, [], Headers),
+ set_invalid_header(Name, Other, Headers2)
+ end.
+
+prepend_table(Name, Info, Prior, Headers) ->
+ rabbit_misc:set_table_value(Headers, Name, array, [{table, Info} | Prior]).
+
+set_invalid_header(Name, {_, _}=Value, Headers) when is_list(Headers) ->
+ case rabbit_misc:table_lookup(Headers, ?INVALID_HEADERS_KEY) of
+ undefined ->
+ set_invalid([{Name, array, [Value]}], Headers);
+ {table, ExistingHdr} ->
+ update_invalid(Name, Value, ExistingHdr, Headers);
+ Other ->
+ %% somehow the x-invalid-headers header is corrupt
+ Invalid = [{?INVALID_HEADERS_KEY, array, [Other]}],
+ set_invalid_header(Name, Value, set_invalid(Invalid, Headers))
+ end.
+
+set_invalid(NewHdr, Headers) ->
+ rabbit_misc:set_table_value(Headers, ?INVALID_HEADERS_KEY, table, NewHdr).
+
+update_invalid(Name, Value, ExistingHdr, Header) ->
+ Values = case rabbit_misc:table_lookup(ExistingHdr, Name) of
+ undefined -> [Value];
+ {array, Prior} -> [Value | Prior]
+ end,
+ NewHdr = rabbit_misc:set_table_value(ExistingHdr, Name, array, Values),
+ set_invalid(NewHdr, Header).
+
+-spec header(header(), headers()) -> 'undefined' | any().
+
+header(_Header, undefined) ->
+ undefined;
+header(_Header, []) ->
+ undefined;
+header(Header, Headers) ->
+ header(Header, Headers, undefined).
+
+-spec header(header(), headers(), any()) -> 'undefined' | any().
+
+header(Header, Headers, Default) ->
+ case lists:keysearch(Header, 1, Headers) of
+ false -> Default;
+ {value, Val} -> Val
+ end.
+
+-spec extract_headers(rabbit_types:content()) -> headers().
+
+extract_headers(Content) ->
+ #content{properties = #'P_basic'{headers = Headers}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ Headers.
+
+extract_timestamp(Content) ->
+ #content{properties = #'P_basic'{timestamp = Timestamp}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ Timestamp.
+
+-spec map_headers
+ (fun((headers()) -> headers()), rabbit_types:content()) ->
+ rabbit_types:content().
+
+map_headers(F, Content) ->
+ Content1 = rabbit_binary_parser:ensure_content_decoded(Content),
+ #content{properties = #'P_basic'{headers = Headers} = Props} = Content1,
+ Headers1 = F(Headers),
+ rabbit_binary_generator:clear_encoded_content(
+ Content1#content{properties = Props#'P_basic'{headers = Headers1}}).
+
+indexof(L, Element) -> indexof(L, Element, 1).
+
+indexof([], _Element, _N) -> 0;
+indexof([Element | _Rest], Element, N) -> N;
+indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1).
+
+is_message_persistent(#content{properties = #'P_basic'{
+ delivery_mode = Mode}}) ->
+ case Mode of
+ 1 -> false;
+ 2 -> true;
+ undefined -> false;
+ Other -> throw({error, {delivery_mode_unknown, Other}})
+ end.
+
+%% Extract CC routes from headers
+
+-spec header_routes(undefined | rabbit_framing:amqp_table()) -> [string()].
+
+header_routes(undefined) ->
+ [];
+header_routes(HeadersTable) ->
+ lists:append(
+ [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of
+ {array, Routes} -> [Route || {longstr, Route} <- Routes];
+ undefined -> [];
+ {Type, _Val} -> throw({error, {unacceptable_type_in_header,
+ binary_to_list(HeaderKey), Type}})
+ end || HeaderKey <- ?ROUTING_HEADERS]).
+
+-spec parse_expiration
+ (rabbit_framing:amqp_property_record()) ->
+ rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any()).
+
+parse_expiration(#'P_basic'{expiration = undefined}) ->
+ {ok, undefined};
+parse_expiration(#'P_basic'{expiration = Expiration}) ->
+ case string:to_integer(binary_to_list(Expiration)) of
+ {error, no_integer} = E ->
+ E;
+ {N, ""} ->
+ case rabbit_misc:check_expiry(N) of
+ ok -> {ok, N};
+ E = {error, _} -> E
+ end;
+ {_, S} ->
+ {error, {leftover_string, S}}
+ end.
+
+maybe_gc_large_msg(Content) ->
+ rabbit_writer:maybe_gc_large_msg(Content).
+
+maybe_gc_large_msg(Content, undefined) ->
+ rabbit_writer:msg_size(Content);
+maybe_gc_large_msg(Content, GCThreshold) ->
+ rabbit_writer:maybe_gc_large_msg(Content, GCThreshold).
+
+msg_size(Content) ->
+ rabbit_writer:msg_size(Content).
+
+add_header(Name, Type, Value, #basic_message{content = Content0} = Msg) ->
+ Content = rabbit_basic:map_headers(
+ fun(undefined) ->
+ rabbit_misc:set_table_value([], Name, Type, Value);
+ (Headers) ->
+ rabbit_misc:set_table_value(Headers, Name, Type, Value)
+ end, Content0),
+ Msg#basic_message{content = Content}.
+
+peek_fmt_message(#basic_message{exchange_name = Ex,
+ routing_keys = RKeys,
+ content =
+ #content{payload_fragments_rev = Payl0,
+ properties = Props}}) ->
+ Fields = [atom_to_binary(F, utf8) || F <- record_info(fields, 'P_basic')],
+ T = lists:zip(Fields, tl(tuple_to_list(Props))),
+ lists:foldl(
+ fun ({<<"headers">>, Hdrs}, Acc) ->
+ case Hdrs of
+ [] ->
+ Acc;
+ _ ->
+ Acc ++ [{header_key(H), V} || {H, _T, V} <- Hdrs]
+ end;
+ ({_, undefined}, Acc) ->
+ Acc;
+ (KV, Acc) ->
+ [KV | Acc]
+ end, [], [{<<"payload (max 64 bytes)">>,
+ %% restric payload to 64 bytes
+ binary_prefix_64(iolist_to_binary(lists:reverse(Payl0)), 64)},
+ {<<"exchange">>, Ex#resource.name},
+ {<<"routing_keys">>, RKeys} | T]).
+
+header_key(A) ->
+ <<"header.", A/binary>>.
+
+binary_prefix_64(Bin, Len) ->
+ binary:part(Bin, 0, min(byte_size(Bin), Len)).
diff --git a/deps/rabbit/src/rabbit_binding.erl b/deps/rabbit/src/rabbit_binding.erl
new file mode 100644
index 0000000000..6ef25c4e60
--- /dev/null
+++ b/deps/rabbit/src/rabbit_binding.erl
@@ -0,0 +1,691 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_binding).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([recover/0, recover/2, exists/1, add/2, add/3, remove/1, remove/2, remove/3, remove/4]).
+-export([list/1, list_for_source/1, list_for_destination/1,
+ list_for_source_and_destination/2, list_explicit/0]).
+-export([new_deletions/0, combine_deletions/2, add_deletion/3,
+ process_deletions/2, binding_action/3]).
+-export([info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4]).
+%% these must all be run inside a mnesia tx
+-export([has_for_source/1, remove_for_source/1,
+ remove_for_destination/2, remove_transient_for_destination/1,
+ remove_default_exchange_binding_rows_of/1]).
+
+-export([implicit_for_destination/1, reverse_binding/1]).
+-export([new/4]).
+
+-define(DEFAULT_EXCHANGE(VHostPath), #resource{virtual_host = VHostPath,
+ kind = exchange,
+ name = <<>>}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([key/0, deletions/0]).
+
+-type key() :: binary().
+
+-type bind_errors() :: rabbit_types:error(
+ {'resources_missing',
+ [{'not_found', (rabbit_types:binding_source() |
+ rabbit_types:binding_destination())} |
+ {'absent', amqqueue:amqqueue()}]}).
+
+-type bind_ok_or_error() :: 'ok' | bind_errors() |
+ rabbit_types:error(
+ {'binding_invalid', string(), [any()]}).
+-type bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error()).
+-type inner_fun() ::
+ fun((rabbit_types:exchange(),
+ rabbit_types:exchange() | amqqueue:amqqueue()) ->
+ rabbit_types:ok_or_error(rabbit_types:amqp_error())).
+-type bindings() :: [rabbit_types:binding()].
+
+%% TODO this should really be opaque but that seems to confuse 17.1's
+%% dialyzer into objecting to everything that uses it.
+-type deletions() :: dict:dict().
+
+%%----------------------------------------------------------------------------
+
+-spec new(rabbit_types:exchange(),
+ key(),
+ rabbit_types:exchange() | amqqueue:amqqueue(),
+ rabbit_framing:amqp_table()) ->
+ rabbit_types:binding().
+
+new(Src, RoutingKey, Dst, #{}) ->
+ new(Src, RoutingKey, Dst, []);
+new(Src, RoutingKey, Dst, Arguments) when is_map(Arguments) ->
+ new(Src, RoutingKey, Dst, maps:to_list(Arguments));
+new(Src, RoutingKey, Dst, Arguments) ->
+ #binding{source = Src, key = RoutingKey, destination = Dst, args = Arguments}.
+
+
+-define(INFO_KEYS, [source_name, source_kind,
+ destination_name, destination_kind,
+ routing_key, arguments,
+ vhost]).
+
+%% Global table recovery
+
+-spec recover([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
+ 'ok'.
+
+recover() ->
+ rabbit_misc:table_filter(
+ fun (Route) ->
+ mnesia:read({rabbit_semi_durable_route, Route}) =:= []
+ end,
+ fun (Route, true) ->
+ ok = mnesia:write(rabbit_semi_durable_route, Route, write);
+ (_Route, false) ->
+ ok
+ end, rabbit_durable_route).
+
+%% Virtual host-specific recovery
+recover(XNames, QNames) ->
+ XNameSet = sets:from_list(XNames),
+ QNameSet = sets:from_list(QNames),
+ SelectSet = fun (#resource{kind = exchange}) -> XNameSet;
+ (#resource{kind = queue}) -> QNameSet
+ end,
+ {ok, Gatherer} = gatherer:start_link(),
+ [recover_semi_durable_route(Gatherer, R, SelectSet(Dst)) ||
+ R = #route{binding = #binding{destination = Dst}} <-
+ rabbit_misc:dirty_read_all(rabbit_semi_durable_route)],
+ empty = gatherer:out(Gatherer),
+ ok = gatherer:stop(Gatherer),
+ ok.
+
+recover_semi_durable_route(Gatherer, R = #route{binding = B}, ToRecover) ->
+ #binding{source = Src, destination = Dst} = B,
+ case sets:is_element(Dst, ToRecover) of
+ true -> {ok, X} = rabbit_exchange:lookup(Src),
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ recover_semi_durable_route_txn(R, X),
+ gatherer:finish(Gatherer)
+ end);
+ false -> ok
+ end.
+
+recover_semi_durable_route_txn(R = #route{binding = B}, X) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read(rabbit_semi_durable_route, B, read) of
+ [] -> no_recover;
+ _ -> ok = sync_transient_route(R, fun mnesia:write/3),
+ rabbit_exchange:serial(X)
+ end
+ end,
+ fun (no_recover, _) -> ok;
+ (_Serial, true) -> x_callback(transaction, X, add_binding, B);
+ (Serial, false) -> x_callback(Serial, X, add_binding, B)
+ end).
+
+-spec exists(rabbit_types:binding()) -> boolean() | bind_errors().
+
+exists(#binding{source = ?DEFAULT_EXCHANGE(_),
+ destination = #resource{kind = queue, name = QName} = Queue,
+ key = QName,
+ args = []}) ->
+ case rabbit_amqqueue:lookup(Queue) of
+ {ok, _} -> true;
+ {error, not_found} -> false
+ end;
+exists(Binding) ->
+ binding_action(
+ Binding, fun (_Src, _Dst, B) ->
+ rabbit_misc:const(mnesia:read({rabbit_route, B}) /= [])
+ end, fun not_found_or_absent_errs/1).
+
+-spec add(rabbit_types:binding(), rabbit_types:username()) -> bind_res().
+
+add(Binding, ActingUser) -> add(Binding, fun (_Src, _Dst) -> ok end, ActingUser).
+
+-spec add(rabbit_types:binding(), inner_fun(), rabbit_types:username()) -> bind_res().
+
+add(Binding, InnerFun, ActingUser) ->
+ binding_action(
+ Binding,
+ fun (Src, Dst, B) ->
+ case rabbit_exchange:validate_binding(Src, B) of
+ ok ->
+ lock_resource(Src, read),
+ lock_resource(Dst, read),
+ %% this argument is used to check queue exclusivity;
+ %% in general, we want to fail on that in preference to
+ %% anything else
+ case InnerFun(Src, Dst) of
+ ok ->
+ case mnesia:read({rabbit_route, B}) of
+ [] -> add(Src, Dst, B, ActingUser);
+ [_] -> fun () -> ok end
+ end;
+ {error, _} = Err ->
+ rabbit_misc:const(Err)
+ end;
+ {error, _} = Err ->
+ rabbit_misc:const(Err)
+ end
+ end, fun not_found_or_absent_errs/1).
+
+add(Src, Dst, B, ActingUser) ->
+ [SrcDurable, DstDurable] = [durable(E) || E <- [Src, Dst]],
+ ok = sync_route(#route{binding = B}, SrcDurable, DstDurable,
+ fun mnesia:write/3),
+ x_callback(transaction, Src, add_binding, B),
+ Serial = rabbit_exchange:serial(Src),
+ fun () ->
+ x_callback(Serial, Src, add_binding, B),
+ ok = rabbit_event:notify(
+ binding_created,
+ info(B) ++ [{user_who_performed_action, ActingUser}])
+ end.
+
+-spec remove(rabbit_types:binding()) -> bind_res().
+remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end, ?INTERNAL_USER).
+
+-spec remove(rabbit_types:binding(), rabbit_types:username()) -> bind_res().
+remove(Binding, ActingUser) -> remove(Binding, fun (_Src, _Dst) -> ok end, ActingUser).
+
+
+-spec remove(rabbit_types:binding(), inner_fun(), rabbit_types:username()) -> bind_res().
+remove(Binding, InnerFun, ActingUser) ->
+ binding_action(
+ Binding,
+ fun (Src, Dst, B) ->
+ lock_resource(Src, read),
+ lock_resource(Dst, read),
+ case mnesia:read(rabbit_route, B, write) of
+ [] -> case mnesia:read(rabbit_durable_route, B, write) of
+ [] -> rabbit_misc:const(ok);
+ %% We still delete the binding and run
+ %% all post-delete functions if there is only
+ %% a durable route in the database
+ _ -> remove(Src, Dst, B, ActingUser)
+ end;
+ _ -> case InnerFun(Src, Dst) of
+ ok -> remove(Src, Dst, B, ActingUser);
+ {error, _} = Err -> rabbit_misc:const(Err)
+ end
+ end
+ end, fun absent_errs_only/1).
+
+remove(Src, Dst, B, ActingUser) ->
+ ok = sync_route(#route{binding = B}, durable(Src), durable(Dst),
+ fun delete/3),
+ Deletions = maybe_auto_delete(
+ B#binding.source, [B], new_deletions(), false),
+ process_deletions(Deletions, ActingUser).
+
+%% Implicit bindings are implicit as of rabbitmq/rabbitmq-server#1721.
+remove_default_exchange_binding_rows_of(Dst = #resource{}) ->
+ case implicit_for_destination(Dst) of
+ [Binding] ->
+ mnesia:dirty_delete(rabbit_durable_route, Binding),
+ mnesia:dirty_delete(rabbit_semi_durable_route, Binding),
+ mnesia:dirty_delete(rabbit_reverse_route,
+ reverse_binding(Binding)),
+ mnesia:dirty_delete(rabbit_route, Binding);
+ _ ->
+ %% no binding to remove or
+ %% a competing tx has beaten us to it?
+ ok
+ end,
+ ok.
+
+-spec list_explicit() -> bindings().
+
+list_explicit() ->
+ mnesia:async_dirty(
+ fun () ->
+ AllRoutes = mnesia:dirty_match_object(rabbit_route, #route{_ = '_'}),
+ %% if there are any default exchange bindings left after an upgrade
+ %% of a pre-3.8 database, filter them out
+ AllBindings = [B || #route{binding = B} <- AllRoutes],
+ lists:filter(fun(#binding{source = S}) ->
+ not (S#resource.kind =:= exchange andalso S#resource.name =:= <<>>)
+ end, AllBindings)
+ end).
+
+-spec list(rabbit_types:vhost()) -> bindings().
+
+list(VHostPath) ->
+ VHostResource = rabbit_misc:r(VHostPath, '_'),
+ Route = #route{binding = #binding{source = VHostResource,
+ destination = VHostResource,
+ _ = '_'},
+ _ = '_'},
+ %% if there are any default exchange bindings left after an upgrade
+ %% of a pre-3.8 database, filter them out
+ AllBindings = [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route,
+ Route)],
+ Filtered = lists:filter(fun(#binding{source = S}) ->
+ S =/= ?DEFAULT_EXCHANGE(VHostPath)
+ end, AllBindings),
+ implicit_bindings(VHostPath) ++ Filtered.
+
+-spec list_for_source
+ (rabbit_types:binding_source()) -> bindings().
+
+list_for_source(?DEFAULT_EXCHANGE(VHostPath)) ->
+ implicit_bindings(VHostPath);
+list_for_source(SrcName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{source = SrcName, _ = '_'}},
+ [B || #route{binding = B}
+ <- mnesia:match_object(rabbit_route, Route, read)]
+ end).
+
+-spec list_for_destination
+ (rabbit_types:binding_destination()) -> bindings().
+
+list_for_destination(DstName = #resource{virtual_host = VHostPath}) ->
+ AllBindings = mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{destination = DstName,
+ _ = '_'}},
+ [reverse_binding(B) ||
+ #reverse_route{reverse_binding = B} <-
+ mnesia:match_object(rabbit_reverse_route,
+ reverse_route(Route), read)]
+ end),
+ Filtered = lists:filter(fun(#binding{source = S}) ->
+ S =/= ?DEFAULT_EXCHANGE(VHostPath)
+ end, AllBindings),
+ implicit_for_destination(DstName) ++ Filtered.
+
+implicit_bindings(VHostPath) ->
+ DstQueues = rabbit_amqqueue:list_names(VHostPath),
+ [ #binding{source = ?DEFAULT_EXCHANGE(VHostPath),
+ destination = DstQueue,
+ key = QName,
+ args = []}
+ || DstQueue = #resource{name = QName} <- DstQueues ].
+
+implicit_for_destination(DstQueue = #resource{kind = queue,
+ virtual_host = VHostPath,
+ name = QName}) ->
+ [#binding{source = ?DEFAULT_EXCHANGE(VHostPath),
+ destination = DstQueue,
+ key = QName,
+ args = []}];
+implicit_for_destination(_) ->
+ [].
+
+-spec list_for_source_and_destination
+ (rabbit_types:binding_source(), rabbit_types:binding_destination()) ->
+ bindings().
+
+list_for_source_and_destination(?DEFAULT_EXCHANGE(VHostPath),
+ #resource{kind = queue,
+ virtual_host = VHostPath,
+ name = QName} = DstQueue) ->
+ [#binding{source = ?DEFAULT_EXCHANGE(VHostPath),
+ destination = DstQueue,
+ key = QName,
+ args = []}];
+list_for_source_and_destination(SrcName, DstName) ->
+ mnesia:async_dirty(
+ fun() ->
+ Route = #route{binding = #binding{source = SrcName,
+ destination = DstName,
+ _ = '_'}},
+ [B || #route{binding = B} <- mnesia:match_object(rabbit_route,
+ Route, read)]
+ end).
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+map(VHostPath, F) ->
+ %% TODO: there is scope for optimisation here, e.g. using a
+ %% cursor, parallelising the function invocation
+ lists:map(F, list(VHostPath)).
+
+infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items].
+
+i(source_name, #binding{source = SrcName}) -> SrcName#resource.name;
+i(source_kind, #binding{source = SrcName}) -> SrcName#resource.kind;
+i(vhost, #binding{source = SrcName}) -> SrcName#resource.virtual_host;
+i(destination_name, #binding{destination = DstName}) -> DstName#resource.name;
+i(destination_kind, #binding{destination = DstName}) -> DstName#resource.kind;
+i(routing_key, #binding{key = RoutingKey}) -> RoutingKey;
+i(arguments, #binding{args = Arguments}) -> Arguments;
+i(Item, _) -> throw({bad_argument, Item}).
+
+-spec info(rabbit_types:binding()) -> rabbit_types:infos().
+
+info(B = #binding{}) -> infos(?INFO_KEYS, B).
+
+-spec info(rabbit_types:binding(), rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+info(B = #binding{}, Items) -> infos(Items, B).
+
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+info_all(VHostPath) -> map(VHostPath, fun (B) -> info(B) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+ [rabbit_types:infos()].
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (B) -> info(B, Items) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(),
+ reference(), pid()) -> 'ok'.
+
+info_all(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(B) -> info(B, Items) end, list(VHostPath)).
+
+-spec has_for_source(rabbit_types:binding_source()) -> boolean().
+
+has_for_source(SrcName) ->
+ Match = #route{binding = #binding{source = SrcName, _ = '_'}},
+ %% we need to check for semi-durable routes (which subsumes
+ %% durable routes) here too in case a bunch of routes to durable
+ %% queues have been removed temporarily as a result of a node
+ %% failure
+ contains(rabbit_route, Match) orelse
+ contains(rabbit_semi_durable_route, Match).
+
+-spec remove_for_source(rabbit_types:binding_source()) -> bindings().
+
+remove_for_source(SrcName) ->
+ lock_resource(SrcName),
+ Match = #route{binding = #binding{source = SrcName, _ = '_'}},
+ remove_routes(
+ lists:usort(
+ mnesia:dirty_match_object(rabbit_route, Match) ++
+ mnesia:dirty_match_object(rabbit_semi_durable_route, Match))).
+
+-spec remove_for_destination
+ (rabbit_types:binding_destination(), boolean()) -> deletions().
+
+remove_for_destination(DstName, OnlyDurable) ->
+ remove_for_destination(DstName, OnlyDurable, fun remove_routes/1).
+
+-spec remove_transient_for_destination
+ (rabbit_types:binding_destination()) -> deletions().
+
+remove_transient_for_destination(DstName) ->
+ remove_for_destination(DstName, false, fun remove_transient_routes/1).
+
+%%----------------------------------------------------------------------------
+
+durable(#exchange{durable = D}) -> D;
+durable(Q) when ?is_amqqueue(Q) ->
+ amqqueue:is_durable(Q).
+
+binding_action(Binding = #binding{source = SrcName,
+ destination = DstName,
+ args = Arguments}, Fun, ErrFun) ->
+ call_with_source_and_destination(
+ SrcName, DstName,
+ fun (Src, Dst) ->
+ SortedArgs = rabbit_misc:sort_field_table(Arguments),
+ Fun(Src, Dst, Binding#binding{args = SortedArgs})
+ end, ErrFun).
+
+sync_route(Route, true, true, Fun) ->
+ ok = Fun(rabbit_durable_route, Route, write),
+ sync_route(Route, false, true, Fun);
+
+sync_route(Route, false, true, Fun) ->
+ ok = Fun(rabbit_semi_durable_route, Route, write),
+ sync_route(Route, false, false, Fun);
+
+sync_route(Route, _SrcDurable, false, Fun) ->
+ sync_transient_route(Route, Fun).
+
+sync_transient_route(Route, Fun) ->
+ ok = Fun(rabbit_route, Route, write),
+ ok = Fun(rabbit_reverse_route, reverse_route(Route), write).
+
+call_with_source_and_destination(SrcName, DstName, Fun, ErrFun) ->
+ SrcTable = table_for_resource(SrcName),
+ DstTable = table_for_resource(DstName),
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () ->
+ case {mnesia:read({SrcTable, SrcName}),
+ mnesia:read({DstTable, DstName})} of
+ {[Src], [Dst]} -> Fun(Src, Dst);
+ {[], [_] } -> ErrFun([SrcName]);
+ {[_], [] } -> ErrFun([DstName]);
+ {[], [] } -> ErrFun([SrcName, DstName])
+ end
+ end).
+
+not_found_or_absent_errs(Names) ->
+ Errs = [not_found_or_absent(Name) || Name <- Names],
+ rabbit_misc:const({error, {resources_missing, Errs}}).
+
+absent_errs_only(Names) ->
+ Errs = [E || Name <- Names,
+ {absent, _Q, _Reason} = E <- [not_found_or_absent(Name)]],
+ rabbit_misc:const(case Errs of
+ [] -> ok;
+ _ -> {error, {resources_missing, Errs}}
+ end).
+
+table_for_resource(#resource{kind = exchange}) -> rabbit_exchange;
+table_for_resource(#resource{kind = queue}) -> rabbit_queue.
+
+not_found_or_absent(#resource{kind = exchange} = Name) ->
+ {not_found, Name};
+not_found_or_absent(#resource{kind = queue} = Name) ->
+ case rabbit_amqqueue:not_found_or_absent(Name) of
+ not_found -> {not_found, Name};
+ {absent, _Q, _Reason} = R -> R
+ end.
+
+contains(Table, MatchHead) ->
+ continue(mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read)).
+
+continue('$end_of_table') -> false;
+continue({[_|_], _}) -> true;
+continue({[], Continuation}) -> continue(mnesia:select(Continuation)).
+
+remove_routes(Routes) ->
+ %% This partitioning allows us to suppress unnecessary delete
+ %% operations on disk tables, which require an fsync.
+ {RamRoutes, DiskRoutes} =
+ lists:partition(fun (R) -> mnesia:read(
+ rabbit_durable_route, R#route.binding, read) == [] end,
+ Routes),
+ {RamOnlyRoutes, SemiDurableRoutes} =
+ lists:partition(fun (R) -> mnesia:read(
+ rabbit_semi_durable_route, R#route.binding, read) == [] end,
+ RamRoutes),
+ %% Of course the destination might not really be durable but it's
+ %% just as easy to try to delete it from the semi-durable table
+ %% than check first
+ [ok = sync_route(R, true, true, fun delete/3) ||
+ R <- DiskRoutes],
+ [ok = sync_route(R, false, true, fun delete/3) ||
+ R <- SemiDurableRoutes],
+ [ok = sync_route(R, false, false, fun delete/3) ||
+ R <- RamOnlyRoutes],
+ [R#route.binding || R <- Routes].
+
+
+delete(Tab, #route{binding = B}, LockKind) ->
+ mnesia:delete(Tab, B, LockKind);
+delete(Tab, #reverse_route{reverse_binding = B}, LockKind) ->
+ mnesia:delete(Tab, B, LockKind).
+
+remove_transient_routes(Routes) ->
+ [begin
+ ok = sync_transient_route(R, fun delete/3),
+ R#route.binding
+ end || R <- Routes].
+
+remove_for_destination(DstName, OnlyDurable, Fun) ->
+ lock_resource(DstName),
+ MatchFwd = #route{binding = #binding{destination = DstName, _ = '_'}},
+ MatchRev = reverse_route(MatchFwd),
+ Routes = case OnlyDurable of
+ false ->
+ [reverse_route(R) ||
+ R <- mnesia:dirty_match_object(
+ rabbit_reverse_route, MatchRev)];
+ true -> lists:usort(
+ mnesia:dirty_match_object(
+ rabbit_durable_route, MatchFwd) ++
+ mnesia:dirty_match_object(
+ rabbit_semi_durable_route, MatchFwd))
+ end,
+ Bindings = Fun(Routes),
+ group_bindings_fold(fun maybe_auto_delete/4, new_deletions(),
+ lists:keysort(#binding.source, Bindings), OnlyDurable).
+
+%% Instead of locking entire table on remove operations we can lock the
+%% affected resource only.
+lock_resource(Name) -> lock_resource(Name, write).
+
+lock_resource(Name, LockKind) ->
+ mnesia:lock({global, Name, mnesia:table_info(rabbit_route, where_to_write)},
+ LockKind).
+
+%% Requires that its input binding list is sorted in exchange-name
+%% order, so that the grouping of bindings (for passing to
+%% group_bindings_and_auto_delete1) works properly.
+group_bindings_fold(_Fun, Acc, [], _OnlyDurable) ->
+ Acc;
+group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs],
+ OnlyDurable) ->
+ group_bindings_fold(Fun, SrcName, Acc, Bs, [B], OnlyDurable).
+
+group_bindings_fold(
+ Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings,
+ OnlyDurable) ->
+ group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings], OnlyDurable);
+group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings, OnlyDurable) ->
+ %% Either Removed is [], or its head has a non-matching SrcName.
+ group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc, OnlyDurable), Removed,
+ OnlyDurable).
+
+maybe_auto_delete(XName, Bindings, Deletions, OnlyDurable) ->
+ {Entry, Deletions1} =
+ case mnesia:read({case OnlyDurable of
+ true -> rabbit_durable_exchange;
+ false -> rabbit_exchange
+ end, XName}) of
+ [] -> {{undefined, not_deleted, Bindings}, Deletions};
+ [X] -> case rabbit_exchange:maybe_auto_delete(X, OnlyDurable) of
+ not_deleted ->
+ {{X, not_deleted, Bindings}, Deletions};
+ {deleted, Deletions2} ->
+ {{X, deleted, Bindings},
+ combine_deletions(Deletions, Deletions2)}
+ end
+ end,
+ add_deletion(XName, Entry, Deletions1).
+
+reverse_route(#route{binding = Binding}) ->
+ #reverse_route{reverse_binding = reverse_binding(Binding)};
+
+reverse_route(#reverse_route{reverse_binding = Binding}) ->
+ #route{binding = reverse_binding(Binding)}.
+
+reverse_binding(#reverse_binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}) ->
+ #binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args};
+
+reverse_binding(#binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}) ->
+ #reverse_binding{source = SrcName,
+ destination = DstName,
+ key = Key,
+ args = Args}.
+
+%% ----------------------------------------------------------------------------
+%% Binding / exchange deletion abstraction API
+%% ----------------------------------------------------------------------------
+
+anything_but( NotThis, NotThis, NotThis) -> NotThis;
+anything_but( NotThis, NotThis, This) -> This;
+anything_but( NotThis, This, NotThis) -> This;
+anything_but(_NotThis, This, This) -> This.
+
+-spec new_deletions() -> deletions().
+
+new_deletions() -> dict:new().
+
+-spec add_deletion
+ (rabbit_exchange:name(),
+ {'undefined' | rabbit_types:exchange(),
+ 'deleted' | 'not_deleted',
+ bindings()},
+ deletions()) ->
+ deletions().
+
+add_deletion(XName, Entry, Deletions) ->
+ dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end,
+ Entry, Deletions).
+
+-spec combine_deletions(deletions(), deletions()) -> deletions().
+
+combine_deletions(Deletions1, Deletions2) ->
+ dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end,
+ Deletions1, Deletions2).
+
+merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) ->
+ {anything_but(undefined, X1, X2),
+ anything_but(not_deleted, Deleted1, Deleted2),
+ [Bindings1 | Bindings2]}.
+
+-spec process_deletions(deletions(), rabbit_types:username()) -> rabbit_misc:thunk('ok').
+
+process_deletions(Deletions, ActingUser) ->
+ AugmentedDeletions =
+ dict:map(fun (_XName, {X, deleted, Bindings}) ->
+ Bs = lists:flatten(Bindings),
+ x_callback(transaction, X, delete, Bs),
+ {X, deleted, Bs, none};
+ (_XName, {X, not_deleted, Bindings}) ->
+ Bs = lists:flatten(Bindings),
+ x_callback(transaction, X, remove_bindings, Bs),
+ {X, not_deleted, Bs, rabbit_exchange:serial(X)}
+ end, Deletions),
+ fun() ->
+ dict:fold(fun (XName, {X, deleted, Bs, Serial}, ok) ->
+ ok = rabbit_event:notify(
+ exchange_deleted,
+ [{name, XName},
+ {user_who_performed_action, ActingUser}]),
+ del_notify(Bs, ActingUser),
+ x_callback(Serial, X, delete, Bs);
+ (_XName, {X, not_deleted, Bs, Serial}, ok) ->
+ del_notify(Bs, ActingUser),
+ x_callback(Serial, X, remove_bindings, Bs)
+ end, ok, AugmentedDeletions)
+ end.
+
+del_notify(Bs, ActingUser) -> [rabbit_event:notify(
+ binding_deleted,
+ info(B) ++ [{user_who_performed_action, ActingUser}])
+ || B <- Bs].
+
+x_callback(Serial, X, F, Bs) ->
+ ok = rabbit_exchange:callback(X, F, Serial, [X, Bs]).
diff --git a/deps/rabbit/src/rabbit_boot_steps.erl b/deps/rabbit/src/rabbit_boot_steps.erl
new file mode 100644
index 0000000000..f87448edb7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_boot_steps.erl
@@ -0,0 +1,91 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_boot_steps).
+
+-export([run_boot_steps/0, run_boot_steps/1, run_cleanup_steps/1]).
+-export([find_steps/0, find_steps/1]).
+
+run_boot_steps() ->
+ run_boot_steps(loaded_applications()).
+
+run_boot_steps(Apps) ->
+ [begin
+ rabbit_log:info("Running boot step ~s defined by app ~s", [Step, App]),
+ ok = run_step(Attrs, mfa)
+ end || {App, Step, Attrs} <- find_steps(Apps)],
+ ok.
+
+run_cleanup_steps(Apps) ->
+ [run_step(Attrs, cleanup) || {_, _, Attrs} <- find_steps(Apps)],
+ ok.
+
+loaded_applications() ->
+ [App || {App, _, _} <- application:loaded_applications()].
+
+find_steps() ->
+ find_steps(loaded_applications()).
+
+find_steps(Apps) ->
+ All = sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)),
+ [Step || {App, _, _} = Step <- All, lists:member(App, Apps)].
+
+run_step(Attributes, AttributeName) ->
+ [begin
+ rabbit_log:debug("Applying MFA: M = ~s, F = ~s, A = ~p",
+ [M, F, A]),
+ case apply(M,F,A) of
+ ok -> ok;
+ {error, Reason} -> exit({error, Reason})
+ end
+ end
+ || {Key, {M,F,A}} <- Attributes,
+ Key =:= AttributeName],
+ ok.
+
+vertices({AppName, _Module, Steps}) ->
+ [{StepName, {AppName, StepName, Atts}} || {StepName, Atts} <- Steps].
+
+edges({_AppName, _Module, Steps}) ->
+ EnsureList = fun (L) when is_list(L) -> L;
+ (T) -> [T]
+ end,
+ [case Key of
+ requires -> {StepName, OtherStep};
+ enables -> {OtherStep, StepName}
+ end || {StepName, Atts} <- Steps,
+ {Key, OtherStepOrSteps} <- Atts,
+ OtherStep <- EnsureList(OtherStepOrSteps),
+ Key =:= requires orelse Key =:= enables].
+
+sort_boot_steps(UnsortedSteps) ->
+ case rabbit_misc:build_acyclic_graph(fun vertices/1, fun edges/1,
+ UnsortedSteps) of
+ {ok, G} ->
+ %% Use topological sort to find a consistent ordering (if
+ %% there is one, otherwise fail).
+ SortedSteps = lists:reverse(
+ [begin
+ {StepName, Step} = digraph:vertex(G,
+ StepName),
+ Step
+ end || StepName <- digraph_utils:topsort(G)]),
+ digraph:delete(G),
+ %% Check that all mentioned {M,F,A} triples are exported.
+ case [{StepName, {M,F,A}} ||
+ {_App, StepName, Attributes} <- SortedSteps,
+ {mfa, {M,F,A}} <- Attributes,
+ code:ensure_loaded(M) =/= {module, M} orelse
+ not erlang:function_exported(M, F, length(A))] of
+ [] -> SortedSteps;
+ MissingFns -> exit({boot_functions_not_exported, MissingFns})
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ exit({duplicate_boot_step, StepName});
+ {error, {edge, Reason, From, To}} ->
+ exit({invalid_boot_step_dependency, From, To, Reason})
+ end.
diff --git a/deps/rabbit/src/rabbit_channel.erl b/deps/rabbit/src/rabbit_channel.erl
new file mode 100644
index 0000000000..8e7828a7c0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel.erl
@@ -0,0 +1,2797 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+%% rabbit_channel processes represent an AMQP 0-9-1 channels.
+%%
+%% Connections parse protocol frames coming from clients and
+%% dispatch them to channel processes.
+%% Channels are responsible for implementing the logic behind
+%% the various protocol methods, involving other processes as
+%% needed:
+%%
+%% * Routing messages (using functions in various exchange type
+%% modules) to queue processes.
+%% * Managing queues, exchanges, and bindings.
+%% * Keeping track of consumers
+%% * Keeping track of unacknowledged deliveries to consumers
+%% * Keeping track of publisher confirms
+%% * Transaction management
+%% * Authorisation (enforcing permissions)
+%% * Publishing trace events if tracing is enabled
+%%
+%% Every channel has a number of dependent processes:
+%%
+%% * A writer which is responsible for sending frames to clients.
+%% * A limiter which controls how many messages can be delivered
+%% to consumers according to active QoS prefetch and internal
+%% flow control logic.
+%%
+%% Channels are also aware of their connection's queue collector.
+%% When a queue is declared as exclusive on a channel, the channel
+%% will notify queue collector of that queue.
+
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_misc.hrl").
+
+-include("amqqueue.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/11, start_link/12, do/2, do/3, do_flow/3, flush/1, shutdown/1]).
+-export([send_command/2, deliver/4, deliver_reply/2,
+ send_credit_reply/2, send_drained/2]).
+-export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1,
+ emit_info_all/4, info_local/1]).
+-export([refresh_config_local/0, ready_for_close/1]).
+-export([refresh_interceptors/0]).
+-export([force_event_refresh/1]).
+-export([update_user_state/2]).
+
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, handle_pre_hibernate/1, handle_post_hibernate/1,
+ prioritise_call/4, prioritise_cast/3, prioritise_info/3,
+ format_message_queue/2]).
+
+%% Internal
+-export([list_local/0, emit_info_local/3, deliver_reply_local/3]).
+-export([get_vhost/1, get_user/1]).
+%% For testing
+-export([build_topic_variable_map/3]).
+-export([list_queue_states/1, get_max_message_size/0]).
+
+%% Mgmt HTTP API refactor
+-export([handle_method/6]).
+
+-record(conf, {
+ %% starting | running | flow | closing
+ state,
+ %% same as reader's protocol. Used when instantiating
+ %% (protocol) exceptions.
+ protocol,
+ %% channel number
+ channel,
+ %% reader process
+ reader_pid,
+ %% writer process
+ writer_pid,
+ %%
+ conn_pid,
+ %% same as reader's name, see #v1.name
+ %% in rabbit_reader
+ conn_name,
+ %% channel's originating source e.g. rabbit_reader | rabbit_direct | undefined
+ %% or any other channel creating/spawning entity
+ source,
+ %% same as #v1.user in the reader, used in
+ %% authorisation checks
+ user,
+ %% same as #v1.user in the reader
+ virtual_host,
+ %% when queue.bind's queue field is empty,
+ %% this name will be used instead
+ most_recently_declared_queue,
+ %% when a queue is declared as exclusive, queue
+ %% collector must be notified.
+ %% see rabbit_queue_collector for more info.
+ queue_collector_pid,
+
+ %% same as capabilities in the reader
+ capabilities,
+ %% tracing exchange resource if tracing is enabled,
+ %% 'none' otherwise
+ trace_state,
+ consumer_prefetch,
+ %% Message content size limit
+ max_message_size,
+ consumer_timeout,
+ authz_context,
+ %% defines how ofter gc will be executed
+ writer_gc_threshold
+ }).
+
+-record(pending_ack, {delivery_tag,
+ tag,
+ delivered_at,
+ queue, %% queue name
+ msg_id}).
+
+-record(ch, {cfg :: #conf{},
+ %% limiter state, see rabbit_limiter
+ limiter,
+ %% none | {Msgs, Acks} | committing | failed |
+ tx,
+ %% (consumer) delivery tag sequence
+ next_tag,
+ %% messages pending consumer acknowledgement
+ unacked_message_q,
+ %% queue processes are monitored to update
+ %% queue names
+ queue_monitors,
+ %% a map of consumer tags to
+ %% consumer details: #amqqueue record, acknowledgement mode,
+ %% consumer exclusivity, etc
+ consumer_mapping,
+ %% a map of queue names to consumer tag lists
+ queue_consumers,
+ %% timer used to emit statistics
+ stats_timer,
+ %% are publisher confirms enabled for this channel?
+ confirm_enabled,
+ %% publisher confirm delivery tag sequence
+ publish_seqno,
+ %% an unconfirmed_messages data structure used to track unconfirmed
+ %% (to publishers) messages
+ unconfirmed,
+ %% a list of tags for published messages that were
+ %% delivered but are yet to be confirmed to the client
+ confirmed,
+ %% a list of tags for published messages that were
+ %% rejected but are yet to be sent to the client
+ rejected,
+ %% used by "one shot RPC" (amq.
+ reply_consumer,
+ %% flow | noflow, see rabbitmq-server#114
+ delivery_flow,
+ interceptor_state,
+ queue_states,
+ tick_timer
+ }).
+
+-define(QUEUE, lqueue).
+
+-define(MAX_PERMISSION_CACHE_SIZE, 12).
+
+-define(REFRESH_TIMEOUT, 15000).
+
+-define(STATISTICS_KEYS,
+ [reductions,
+ pid,
+ transactional,
+ confirm,
+ consumer_count,
+ messages_unacknowledged,
+ messages_unconfirmed,
+ messages_uncommitted,
+ acks_uncommitted,
+ pending_raft_commands,
+ prefetch_count,
+ global_prefetch_count,
+ state,
+ garbage_collection]).
+
+
+-define(CREATION_EVENT_KEYS,
+ [pid,
+ name,
+ connection,
+ number,
+ user,
+ vhost,
+ user_who_performed_action]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+
+-define(INCR_STATS(Type, Key, Inc, Measure, State),
+ case rabbit_event:stats_level(State, #ch.stats_timer) of
+ fine ->
+ rabbit_core_metrics:channel_stats(Type, Measure, {self(), Key}, Inc),
+ %% Keys in the process dictionary are used to clean up the core metrics
+ put({Type, Key}, none);
+ _ ->
+ ok
+ end).
+
+-define(INCR_STATS(Type, Key, Inc, Measure),
+ begin
+ rabbit_core_metrics:channel_stats(Type, Measure, {self(), Key}, Inc),
+ %% Keys in the process dictionary are used to clean up the core metrics
+ put({Type, Key}, none)
+ end).
+
+%%----------------------------------------------------------------------------
+
+-export_type([channel_number/0]).
+
+-type channel_number() :: non_neg_integer().
+
+-export_type([channel/0]).
+
+-type channel() :: #ch{}.
+
+%%----------------------------------------------------------------------------
+
+-spec start_link
+ (channel_number(), pid(), pid(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid(), pid()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter) ->
+ start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter, undefined).
+
+-spec start_link
+ (channel_number(), pid(), pid(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid(), pid(), any()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
+ VHost, Capabilities, CollectorPid, Limiter, AmqpParams) ->
+ gen_server2:start_link(
+ ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, CollectorPid, Limiter, AmqpParams], []).
+
+-spec do(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+
+do(Pid, Method) ->
+ rabbit_channel_common:do(Pid, Method).
+
+-spec do
+ (pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:maybe(rabbit_types:content())) ->
+ 'ok'.
+
+do(Pid, Method, Content) ->
+ rabbit_channel_common:do(Pid, Method, Content).
+
+-spec do_flow
+ (pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:maybe(rabbit_types:content())) ->
+ 'ok'.
+
+do_flow(Pid, Method, Content) ->
+ rabbit_channel_common:do_flow(Pid, Method, Content).
+
+-spec flush(pid()) -> 'ok'.
+
+flush(Pid) ->
+ gen_server2:call(Pid, flush, infinity).
+
+-spec shutdown(pid()) -> 'ok'.
+
+shutdown(Pid) ->
+ gen_server2:cast(Pid, terminate).
+
+-spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+
+send_command(Pid, Msg) ->
+ gen_server2:cast(Pid, {command, Msg}).
+
+-spec deliver
+ (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg()) -> 'ok'.
+
+deliver(Pid, ConsumerTag, AckRequired, Msg) ->
+ gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}).
+
+-spec deliver_reply(binary(), rabbit_types:delivery()) -> 'ok'.
+
+deliver_reply(<<"amq.rabbitmq.reply-to.", Rest/binary>>, Delivery) ->
+ case decode_fast_reply_to(Rest) of
+ {ok, Pid, Key} ->
+ delegate:invoke_no_result(
+ Pid, {?MODULE, deliver_reply_local, [Key, Delivery]});
+ error ->
+ ok
+ end.
+
+%% We want to ensure people can't use this mechanism to send a message
+%% to an arbitrary process and kill it!
+
+-spec deliver_reply_local(pid(), binary(), rabbit_types:delivery()) -> 'ok'.
+
+deliver_reply_local(Pid, Key, Delivery) ->
+ case pg_local:in_group(rabbit_channels, Pid) of
+ true -> gen_server2:cast(Pid, {deliver_reply, Key, Delivery});
+ false -> ok
+ end.
+
+declare_fast_reply_to(<<"amq.rabbitmq.reply-to">>) ->
+ exists;
+declare_fast_reply_to(<<"amq.rabbitmq.reply-to.", Rest/binary>>) ->
+ case decode_fast_reply_to(Rest) of
+ {ok, Pid, Key} ->
+ Msg = {declare_fast_reply_to, Key},
+ rabbit_misc:with_exit_handler(
+ rabbit_misc:const(not_found),
+ fun() -> gen_server2:call(Pid, Msg, infinity) end);
+ error ->
+ not_found
+ end;
+declare_fast_reply_to(_) ->
+ not_found.
+
+decode_fast_reply_to(Rest) ->
+ case string:tokens(binary_to_list(Rest), ".") of
+ [PidEnc, Key] -> Pid = binary_to_term(base64:decode(PidEnc)),
+ {ok, Pid, Key};
+ _ -> error
+ end.
+
+-spec send_credit_reply(pid(), non_neg_integer()) -> 'ok'.
+
+send_credit_reply(Pid, Len) ->
+ gen_server2:cast(Pid, {send_credit_reply, Len}).
+
+-spec send_drained(pid(), [{rabbit_types:ctag(), non_neg_integer()}]) -> 'ok'.
+
+send_drained(Pid, CTagCredit) ->
+ gen_server2:cast(Pid, {send_drained, CTagCredit}).
+
+-spec list() -> [pid()].
+
+list() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_channel, list_local, [], ?RPC_TIMEOUT).
+
+-spec list_local() -> [pid()].
+
+list_local() ->
+ pg_local:get_members(rabbit_channels).
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+-spec info(pid()) -> rabbit_types:infos().
+
+info(Pid) ->
+ {Timeout, Deadline} = get_operation_timeout_and_deadline(),
+ try
+ case gen_server2:call(Pid, {info, Deadline}, Timeout) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end
+ catch
+ exit:{timeout, _} ->
+ rabbit_log:error("Timed out getting channel ~p info", [Pid]),
+ throw(timeout)
+ end.
+
+-spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+
+info(Pid, Items) ->
+ {Timeout, Deadline} = get_operation_timeout_and_deadline(),
+ try
+ case gen_server2:call(Pid, {{info, Items}, Deadline}, Timeout) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end
+ catch
+ exit:{timeout, _} ->
+ rabbit_log:error("Timed out getting channel ~p info", [Pid]),
+ throw(timeout)
+ end.
+
+-spec info_all() -> [rabbit_types:infos()].
+
+info_all() ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C) end, list()).
+
+-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+
+info_all(Items) ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
+
+info_local(Items) ->
+ rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list_local()).
+
+emit_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_channel, emit_info_local, [Items, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids).
+
+emit_info_local(Items, Ref, AggregatorPid) ->
+ emit_info(list_local(), Items, Ref, AggregatorPid).
+
+emit_info(PidList, InfoItems, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(C) -> info(C, InfoItems) end, PidList).
+
+-spec refresh_config_local() -> 'ok'.
+
+refresh_config_local() ->
+ rabbit_misc:upmap(
+ fun (C) ->
+ try
+ gen_server2:call(C, refresh_config, infinity)
+ catch _:Reason ->
+ rabbit_log:error("Failed to refresh channel config "
+ "for channel ~p. Reason ~p",
+ [C, Reason])
+ end
+ end,
+ list_local()),
+ ok.
+
+refresh_interceptors() ->
+ rabbit_misc:upmap(
+ fun (C) ->
+ try
+ gen_server2:call(C, refresh_interceptors, ?REFRESH_TIMEOUT)
+ catch _:Reason ->
+ rabbit_log:error("Failed to refresh channel interceptors "
+ "for channel ~p. Reason ~p",
+ [C, Reason])
+ end
+ end,
+ list_local()),
+ ok.
+
+-spec ready_for_close(pid()) -> 'ok'.
+
+ready_for_close(Pid) ->
+ rabbit_channel_common:ready_for_close(Pid).
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Ref) ->
+ [gen_server2:cast(C, {force_event_refresh, Ref}) || C <- list()],
+ ok.
+
+list_queue_states(Pid) ->
+ gen_server2:call(Pid, list_queue_states).
+
+-spec update_user_state(pid(), rabbit_types:auth_user()) -> 'ok' | {error, channel_terminated}.
+
+update_user_state(Pid, UserState) when is_pid(Pid) ->
+ case erlang:is_process_alive(Pid) of
+ true -> Pid ! {update_user_state, UserState},
+ ok;
+ false -> {error, channel_terminated}
+ end.
+
+%%---------------------------------------------------------------------------
+
+init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
+ Capabilities, CollectorPid, LimiterPid, AmqpParams]) ->
+ process_flag(trap_exit, true),
+ ?LG_PROCESS_TYPE(channel),
+ ?store_proc_name({ConnName, Channel}),
+ ok = pg_local:join(rabbit_channels, self()),
+ Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of
+ true -> flow;
+ false -> noflow
+ end,
+ {ok, {Global, Prefetch}} = application:get_env(rabbit, default_consumer_prefetch),
+ Limiter0 = rabbit_limiter:new(LimiterPid),
+ Limiter = case {Global, Prefetch} of
+ {true, 0} ->
+ rabbit_limiter:unlimit_prefetch(Limiter0);
+ {true, _} ->
+ rabbit_limiter:limit_prefetch(Limiter0, Prefetch, 0);
+ _ ->
+ Limiter0
+ end,
+ %% Process dictionary is used here because permission cache already uses it. MK.
+ put(permission_cache_can_expire, rabbit_access_control:permission_cache_can_expire(User)),
+ MaxMessageSize = get_max_message_size(),
+ ConsumerTimeout = get_consumer_timeout(),
+ OptionalVariables = extract_variable_map_from_amqp_params(AmqpParams),
+ {ok, GCThreshold} = application:get_env(rabbit, writer_gc_threshold),
+ State = #ch{cfg = #conf{state = starting,
+ protocol = Protocol,
+ channel = Channel,
+ reader_pid = ReaderPid,
+ writer_pid = WriterPid,
+ conn_pid = ConnPid,
+ conn_name = ConnName,
+ user = User,
+ virtual_host = VHost,
+ most_recently_declared_queue = <<>>,
+ queue_collector_pid = CollectorPid,
+ capabilities = Capabilities,
+ trace_state = rabbit_trace:init(VHost),
+ consumer_prefetch = Prefetch,
+ max_message_size = MaxMessageSize,
+ consumer_timeout = ConsumerTimeout,
+ authz_context = OptionalVariables,
+ writer_gc_threshold = GCThreshold
+ },
+ limiter = Limiter,
+ tx = none,
+ next_tag = 1,
+ unacked_message_q = ?QUEUE:new(),
+ queue_monitors = pmon:new(),
+ consumer_mapping = #{},
+ queue_consumers = #{},
+ confirm_enabled = false,
+ publish_seqno = 1,
+ unconfirmed = rabbit_confirms:init(),
+ rejected = [],
+ confirmed = [],
+ reply_consumer = none,
+ delivery_flow = Flow,
+ interceptor_state = undefined,
+ queue_states = rabbit_queue_type:init()
+ },
+ State1 = State#ch{
+ interceptor_state = rabbit_channel_interceptor:init(State)},
+ State2 = rabbit_event:init_stats_timer(State1, #ch.stats_timer),
+ Infos = infos(?CREATION_EVENT_KEYS, State2),
+ rabbit_core_metrics:channel_created(self(), Infos),
+ rabbit_event:notify(channel_created, Infos),
+ rabbit_event:if_enabled(State2, #ch.stats_timer,
+ fun() -> emit_stats(State2) end),
+ put_operation_timeout(),
+ State3 = init_tick_timer(State2),
+ {ok, State3, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ info -> 9;
+ {info, _Items} -> 9;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {confirm, _MsgSeqNos, _QPid} -> 5;
+ {reject_publish, _MsgSeqNos, _QPid} -> 5;
+ {queue_event, _, {confirm, _MsgSeqNos, _QPid}} -> 5;
+ {queue_event, _, {reject_publish, _MsgSeqNos, _QPid}} -> 5;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ emit_stats -> 7;
+ _ -> 0
+ end.
+
+handle_call(flush, _From, State) ->
+ reply(ok, State);
+
+handle_call({info, Deadline}, _From, State) ->
+ try
+ reply({ok, infos(?INFO_KEYS, Deadline, State)}, State)
+ catch
+ Error ->
+ reply({error, Error}, State)
+ end;
+
+handle_call({{info, Items}, Deadline}, _From, State) ->
+ try
+ reply({ok, infos(Items, Deadline, State)}, State)
+ catch
+ Error ->
+ reply({error, Error}, State)
+ end;
+
+handle_call(refresh_config, _From,
+ State = #ch{cfg = #conf{virtual_host = VHost} = Cfg}) ->
+ reply(ok, State#ch{cfg = Cfg#conf{trace_state = rabbit_trace:init(VHost)}});
+
+handle_call(refresh_interceptors, _From, State) ->
+ IState = rabbit_channel_interceptor:init(State),
+ reply(ok, State#ch{interceptor_state = IState});
+
+handle_call({declare_fast_reply_to, Key}, _From,
+ State = #ch{reply_consumer = Consumer}) ->
+ reply(case Consumer of
+ {_, _, Key} -> exists;
+ _ -> not_found
+ end, State);
+
+handle_call(list_queue_states, _From, State = #ch{queue_states = QueueStates}) ->
+ %% For testing of cleanup only
+ %% HACK
+ {reply, maps:keys(element(2, QueueStates)), State};
+
+handle_call(_Request, _From, State) ->
+ noreply(State).
+
+handle_cast({method, Method, Content, Flow},
+ State = #ch{cfg = #conf{reader_pid = Reader},
+ interceptor_state = IState}) ->
+ case Flow of
+ %% We are going to process a message from the rabbit_reader
+ %% process, so here we ack it. In this case we are accessing
+ %% the rabbit_channel process dictionary.
+ flow -> credit_flow:ack(Reader);
+ noflow -> ok
+ end,
+ try handle_method(rabbit_channel_interceptor:intercept_in(
+ expand_shortcuts(Method, State), Content, IState),
+ State) of
+ {reply, Reply, NewState} ->
+ ok = send(Reply, NewState),
+ noreply(NewState);
+ {noreply, NewState} ->
+ noreply(NewState);
+ stop ->
+ {stop, normal, State}
+ catch
+ exit:Reason = #amqp_error{} ->
+ MethodName = rabbit_misc:method_record_type(Method),
+ handle_exception(Reason#amqp_error{method = MethodName}, State);
+ _:Reason:Stacktrace ->
+ {stop, {Reason, Stacktrace}, State}
+ end;
+
+handle_cast(ready_for_close,
+ State = #ch{cfg = #conf{state = closing,
+ writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command_sync(WriterPid, #'channel.close_ok'{}),
+ {stop, normal, State};
+
+handle_cast(terminate, State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:flush(WriterPid),
+ {stop, normal, State};
+
+handle_cast({command, #'basic.consume_ok'{consumer_tag = CTag} = Msg}, State) ->
+ ok = send(Msg, State),
+ noreply(consumer_monitor(CTag, State));
+
+handle_cast({command, Msg}, State) ->
+ ok = send(Msg, State),
+ noreply(State);
+
+handle_cast({deliver, _CTag, _AckReq, _Msg},
+ State = #ch{cfg = #conf{state = closing}}) ->
+ noreply(State);
+handle_cast({deliver, ConsumerTag, AckRequired, Msg}, State) ->
+ % TODO: handle as action
+ noreply(handle_deliver(ConsumerTag, AckRequired, Msg, State));
+
+handle_cast({deliver_reply, _K, _Del},
+ State = #ch{cfg = #conf{state = closing}}) ->
+ noreply(State);
+handle_cast({deliver_reply, _K, _Del}, State = #ch{reply_consumer = none}) ->
+ noreply(State);
+handle_cast({deliver_reply, Key, #delivery{message =
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}}},
+ State = #ch{cfg = #conf{writer_pid = WriterPid},
+ next_tag = DeliveryTag,
+ reply_consumer = {ConsumerTag, _Suffix, Key}}) ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ redelivered = false,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ Content),
+ noreply(State);
+handle_cast({deliver_reply, _K1, _}, State=#ch{reply_consumer = {_, _, _K2}}) ->
+ noreply(State);
+
+handle_cast({send_credit_reply, Len},
+ State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command(
+ WriterPid, #'basic.credit_ok'{available = Len}),
+ noreply(State);
+
+handle_cast({send_drained, CTagCredit},
+ State = #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ [ok = rabbit_writer:send_command(
+ WriterPid, #'basic.credit_drained'{consumer_tag = ConsumerTag,
+ credit_drained = CreditDrained})
+ || {ConsumerTag, CreditDrained} <- CTagCredit],
+ noreply(State);
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+handle_cast({force_event_refresh, Ref}, State) ->
+ rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State),
+ Ref),
+ noreply(rabbit_event:init_stats_timer(State, #ch.stats_timer));
+
+handle_cast({mandatory_received, _MsgSeqNo}, State) ->
+ %% This feature was used by `rabbit_amqqueue_process` and
+ %% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x.
+ %% It is unused in 3.8.x and thus deprecated. We keep it to support
+ %% in-place upgrades to 3.8.x (i.e. mixed-version clusters), but it
+ %% is a no-op starting with that version.
+ %%
+ %% NB: don't call noreply/1 since we don't want to send confirms.
+ noreply_coalesce(State);
+
+handle_cast({reject_publish, _MsgSeqNo, QPid} = Evt, State) ->
+ %% For backwards compatibility
+ QRef = find_queue_name_from_pid(QPid, State#ch.queue_states),
+ case QRef of
+ undefined ->
+ %% ignore if no queue could be found for the given pid
+ noreply(State);
+ _ ->
+ handle_cast({queue_event, QRef, Evt}, State)
+ end;
+
+handle_cast({confirm, _MsgSeqNo, QPid} = Evt, State) ->
+ %% For backwards compatibility
+ QRef = find_queue_name_from_pid(QPid, State#ch.queue_states),
+ case QRef of
+ undefined ->
+ %% ignore if no queue could be found for the given pid
+ noreply(State);
+ _ ->
+ handle_cast({queue_event, QRef, Evt}, State)
+ end;
+handle_cast({queue_event, QRef, Evt},
+ #ch{queue_states = QueueStates0} = State0) ->
+ case rabbit_queue_type:handle_event(QRef, Evt, QueueStates0) of
+ {ok, QState1, Actions} ->
+ State1 = State0#ch{queue_states = QState1},
+ State = handle_queue_actions(Actions, State1),
+ noreply_coalesce(State);
+ eol ->
+ State1 = handle_consuming_queue_down_or_eol(QRef, State0),
+ {ConfirmMXs, UC1} =
+ rabbit_confirms:remove_queue(QRef, State1#ch.unconfirmed),
+ %% Deleted queue is a special case.
+ %% Do not nack the "rejected" messages.
+ State2 = record_confirms(ConfirmMXs,
+ State1#ch{unconfirmed = UC1}),
+ erase_queue_stats(QRef),
+ noreply_coalesce(
+ State2#ch{queue_states = rabbit_queue_type:remove(QRef, QueueStates0)});
+ {protocol_error, Type, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end.
+
+handle_info({ra_event, {Name, _} = From, Evt}, State) ->
+ %% For backwards compatibility
+ QRef = find_queue_name_from_quorum_name(Name, State#ch.queue_states),
+ handle_cast({queue_event, QRef, {From, Evt}}, State);
+
+handle_info({bump_credit, Msg}, State) ->
+ %% A rabbit_amqqueue_process is granting credit to our channel. If
+ %% our channel was being blocked by this process, and no other
+ %% process is blocking our channel, then this channel will be
+ %% unblocked. This means that any credit that was deferred will be
+ %% sent to rabbit_reader processs that might be blocked by this
+ %% particular channel.
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State);
+
+handle_info(timeout, State) ->
+ noreply(State);
+
+handle_info(emit_stats, State) ->
+ emit_stats(State),
+ State1 = rabbit_event:reset_stats_timer(State, #ch.stats_timer),
+ %% NB: don't call noreply/1 since we don't want to kick off the
+ %% stats timer.
+ {noreply, send_confirms_and_nacks(State1), hibernate};
+
+handle_info({'DOWN', _MRef, process, QPid, Reason},
+ #ch{queue_states = QStates0,
+ queue_monitors = _QMons} = State0) ->
+ credit_flow:peer_down(QPid),
+ case rabbit_queue_type:handle_down(QPid, Reason, QStates0) of
+ {ok, QState1, Actions} ->
+ State1 = State0#ch{queue_states = QState1},
+ State = handle_queue_actions(Actions, State1),
+ noreply_coalesce(State);
+ {eol, QRef} ->
+ State1 = handle_consuming_queue_down_or_eol(QRef, State0),
+ {ConfirmMXs, UC1} =
+ rabbit_confirms:remove_queue(QRef, State1#ch.unconfirmed),
+ %% Deleted queue is a special case.
+ %% Do not nack the "rejected" messages.
+ State2 = record_confirms(ConfirmMXs,
+ State1#ch{unconfirmed = UC1}),
+ erase_queue_stats(QRef),
+ noreply_coalesce(
+ State2#ch{queue_states = rabbit_queue_type:remove(QRef, QStates0)})
+ end;
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({{Ref, Node}, LateAnswer},
+ State = #ch{cfg = #conf{channel = Channel}})
+ when is_reference(Ref) ->
+ rabbit_log_channel:warning("Channel ~p ignoring late answer ~p from ~p",
+ [Channel, LateAnswer, Node]),
+ noreply(State);
+
+handle_info(tick, State0 = #ch{queue_states = QueueStates0}) ->
+ case get(permission_cache_can_expire) of
+ true -> ok = clear_permission_cache();
+ _ -> ok
+ end,
+ case evaluate_consumer_timeout(State0#ch{queue_states = QueueStates0}) of
+ {noreply, State} ->
+ noreply(init_tick_timer(reset_tick_timer(State)));
+ Return ->
+ Return
+ end;
+handle_info({update_user_state, User}, State = #ch{cfg = Cfg}) ->
+ noreply(State#ch{cfg = Cfg#conf{user = User}}).
+
+
+handle_pre_hibernate(State0) ->
+ ok = clear_permission_cache(),
+ State = maybe_cancel_tick_timer(State0),
+ rabbit_event:if_enabled(
+ State, #ch.stats_timer,
+ fun () -> emit_stats(State,
+ [{idle_since,
+ os:system_time(milli_seconds)}])
+ end),
+ {hibernate, rabbit_event:stop_stats_timer(State, #ch.stats_timer)}.
+
+handle_post_hibernate(State0) ->
+ State = init_tick_timer(State0),
+ {noreply, State}.
+
+terminate(_Reason,
+ State = #ch{cfg = #conf{user = #user{username = Username}},
+ queue_states = QueueCtxs}) ->
+ _ = rabbit_queue_type:close(QueueCtxs),
+ {_Res, _State1} = notify_queues(State),
+ pg_local:leave(rabbit_channels, self()),
+ rabbit_event:if_enabled(State, #ch.stats_timer,
+ fun() -> emit_stats(State) end),
+ [delete_stats(Tag) || {Tag, _} <- get()],
+ rabbit_core_metrics:channel_closed(self()),
+ rabbit_event:notify(channel_closed, [{pid, self()},
+ {user_who_performed_action, Username}]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+-spec get_max_message_size() -> non_neg_integer().
+
+get_max_message_size() ->
+ case application:get_env(rabbit, max_message_size) of
+ {ok, MS} when is_integer(MS) ->
+ erlang:min(MS, ?MAX_MSG_SIZE);
+ _ ->
+ ?MAX_MSG_SIZE
+ end.
+
+get_consumer_timeout() ->
+ case application:get_env(rabbit, consumer_timeout) of
+ {ok, MS} when is_integer(MS) ->
+ MS;
+ _ ->
+ undefined
+ end.
+%%---------------------------------------------------------------------------
+
+reply(Reply, NewState) -> {reply, Reply, next_state(NewState), hibernate}.
+
+noreply(NewState) -> {noreply, next_state(NewState), hibernate}.
+
+next_state(State) -> ensure_stats_timer(send_confirms_and_nacks(State)).
+
+noreply_coalesce(State = #ch{confirmed = C, rejected = R}) ->
+ Timeout = case {C, R} of {[], []} -> hibernate; _ -> 0 end,
+ {noreply, ensure_stats_timer(State), Timeout}.
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #ch.stats_timer, emit_stats).
+
+return_ok(State, true, _Msg) -> {noreply, State};
+return_ok(State, false, Msg) -> {reply, Msg, State}.
+
+ok_msg(true, _Msg) -> undefined;
+ok_msg(false, Msg) -> Msg.
+
+send(_Command, #ch{cfg = #conf{state = closing}}) ->
+ ok;
+send(Command, #ch{cfg = #conf{writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command(WriterPid, Command).
+
+format_soft_error(#amqp_error{name = N, explanation = E, method = M}) ->
+ io_lib:format("operation ~s caused a channel exception ~s: ~ts", [M, N, E]).
+
+handle_exception(Reason, State = #ch{cfg = #conf{protocol = Protocol,
+ channel = Channel,
+ writer_pid = WriterPid,
+ reader_pid = ReaderPid,
+ conn_pid = ConnPid,
+ conn_name = ConnName,
+ virtual_host = VHost,
+ user = User
+ }}) ->
+ %% something bad's happened: notify_queues may not be 'ok'
+ {_Result, State1} = notify_queues(State),
+ case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
+ {Channel, CloseMethod} ->
+ rabbit_log_channel:error(
+ "Channel error on connection ~p (~s, vhost: '~s',"
+ " user: '~s'), channel ~p:~n~s~n",
+ [ConnPid, ConnName, VHost, User#user.username,
+ Channel, format_soft_error(Reason)]),
+ ok = rabbit_writer:send_command(WriterPid, CloseMethod),
+ {noreply, State1};
+ {0, _} ->
+ ReaderPid ! {channel_exit, Channel, Reason},
+ {stop, normal, State1}
+ end.
+
+-spec precondition_failed(string()) -> no_return().
+
+precondition_failed(Format) -> precondition_failed(Format, []).
+
+-spec precondition_failed(string(), [any()]) -> no_return().
+
+precondition_failed(Format, Params) ->
+ rabbit_misc:protocol_error(precondition_failed, Format, Params).
+
+return_queue_declare_ok(#resource{name = ActualName},
+ NoWait, MessageCount, ConsumerCount,
+ #ch{cfg = Cfg} = State) ->
+ return_ok(State#ch{cfg = Cfg#conf{most_recently_declared_queue = ActualName}},
+ NoWait, #'queue.declare_ok'{queue = ActualName,
+ message_count = MessageCount,
+ consumer_count = ConsumerCount}).
+
+check_resource_access(User, Resource, Perm, Context) ->
+ V = {Resource, Context, Perm},
+
+ Cache = case get(permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+ case lists:member(V, Cache) of
+ true -> ok;
+ false -> ok = rabbit_access_control:check_resource_access(
+ User, Resource, Perm, Context),
+ CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
+ put(permission_cache, [V | CacheTail])
+ end.
+
+clear_permission_cache() -> erase(permission_cache),
+ erase(topic_permission_cache),
+ ok.
+
+check_configure_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, configure, Context).
+
+check_write_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, write, Context).
+
+check_read_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, read, Context).
+
+check_write_permitted_on_topic(Resource, User, RoutingKey, AuthzContext) ->
+ check_topic_authorisation(Resource, User, RoutingKey, AuthzContext, write).
+
+check_read_permitted_on_topic(Resource, User, RoutingKey, AuthzContext) ->
+ check_topic_authorisation(Resource, User, RoutingKey, AuthzContext, read).
+
+check_user_id_header(#'P_basic'{user_id = undefined}, _) ->
+ ok;
+check_user_id_header(#'P_basic'{user_id = Username},
+ #ch{cfg = #conf{user = #user{username = Username}}}) ->
+ ok;
+check_user_id_header(
+ #'P_basic'{}, #ch{cfg = #conf{user = #user{authz_backends =
+ [{rabbit_auth_backend_dummy, _}]}}}) ->
+ ok;
+check_user_id_header(#'P_basic'{user_id = Claimed},
+ #ch{cfg = #conf{user = #user{username = Actual,
+ tags = Tags}}}) ->
+ case lists:member(impersonator, Tags) of
+ true -> ok;
+ false -> precondition_failed(
+ "user_id property set to '~s' but authenticated user was "
+ "'~s'", [Claimed, Actual])
+ end.
+
+check_expiration_header(Props) ->
+ case rabbit_basic:parse_expiration(Props) of
+ {ok, _} -> ok;
+ {error, E} -> precondition_failed("invalid expiration '~s': ~p",
+ [Props#'P_basic'.expiration, E])
+ end.
+
+check_internal_exchange(#exchange{name = Name, internal = true}) ->
+ rabbit_misc:protocol_error(access_refused,
+ "cannot publish to internal ~s",
+ [rabbit_misc:rs(Name)]);
+check_internal_exchange(_) ->
+ ok.
+
+check_topic_authorisation(#exchange{name = Name = #resource{virtual_host = VHost}, type = topic},
+ User = #user{username = Username},
+ RoutingKey, AuthzContext, Permission) ->
+ Resource = Name#resource{kind = topic},
+ VariableMap = build_topic_variable_map(AuthzContext, VHost, Username),
+ Context = #{routing_key => RoutingKey,
+ variable_map => VariableMap},
+ Cache = case get(topic_permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+ case lists:member({Resource, Context, Permission}, Cache) of
+ true -> ok;
+ false -> ok = rabbit_access_control:check_topic_access(
+ User, Resource, Permission, Context),
+ CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
+ put(topic_permission_cache, [{Resource, Context, Permission} | CacheTail])
+ end;
+check_topic_authorisation(_, _, _, _, _) ->
+ ok.
+
+
+build_topic_variable_map(AuthzContext, VHost, Username) when is_map(AuthzContext) ->
+ maps:merge(AuthzContext, #{<<"vhost">> => VHost, <<"username">> => Username});
+build_topic_variable_map(AuthzContext, VHost, Username) ->
+ maps:merge(extract_variable_map_from_amqp_params(AuthzContext), #{<<"vhost">> => VHost, <<"username">> => Username}).
+
+%% Use tuple representation of amqp_params to avoid a dependency on amqp_client.
+%% Extracts variable map only from amqp_params_direct, not amqp_params_network.
+%% amqp_params_direct records are usually used by plugins (e.g. MQTT, STOMP)
+extract_variable_map_from_amqp_params({amqp_params, {amqp_params_direct, _, _, _, _,
+ {amqp_adapter_info, _,_,_,_,_,_,AdditionalInfo}, _}}) ->
+ proplists:get_value(variable_map, AdditionalInfo, #{});
+extract_variable_map_from_amqp_params({amqp_params_direct, _, _, _, _,
+ {amqp_adapter_info, _,_,_,_,_,_,AdditionalInfo}, _}) ->
+ proplists:get_value(variable_map, AdditionalInfo, #{});
+extract_variable_map_from_amqp_params([Value]) ->
+ extract_variable_map_from_amqp_params(Value);
+extract_variable_map_from_amqp_params(_) ->
+ #{}.
+
+check_msg_size(Content, MaxMessageSize, GCThreshold) ->
+ Size = rabbit_basic:maybe_gc_large_msg(Content, GCThreshold),
+ case Size of
+ S when S > MaxMessageSize ->
+ ErrorMessage = case MaxMessageSize of
+ ?MAX_MSG_SIZE ->
+ "message size ~B is larger than max size ~B";
+ _ ->
+ "message size ~B is larger than configured max size ~B"
+ end,
+ precondition_failed(ErrorMessage,
+ [Size, MaxMessageSize]);
+ _ -> ok
+ end.
+
+check_vhost_queue_limit(#resource{name = QueueName}, VHost) ->
+ case rabbit_vhost_limit:is_over_queue_limit(VHost) of
+ false -> ok;
+ {true, Limit} -> precondition_failed("cannot declare queue '~s': "
+ "queue limit in vhost '~s' (~p) is reached",
+ [QueueName, VHost, Limit])
+
+ end.
+
+qbin_to_resource(QueueNameBin, VHostPath) ->
+ name_to_resource(queue, QueueNameBin, VHostPath).
+
+name_to_resource(Type, NameBin, VHostPath) ->
+ rabbit_misc:r(VHostPath, Type, NameBin).
+
+expand_queue_name_shortcut(<<>>, #ch{cfg = #conf{most_recently_declared_queue = <<>>}}) ->
+ rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
+expand_queue_name_shortcut(<<>>, #ch{cfg = #conf{most_recently_declared_queue = MRDQ}}) ->
+ MRDQ;
+expand_queue_name_shortcut(QueueNameBin, _) ->
+ QueueNameBin.
+
+expand_routing_key_shortcut(<<>>, <<>>,
+ #ch{cfg = #conf{most_recently_declared_queue = <<>>}}) ->
+ rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
+expand_routing_key_shortcut(<<>>, <<>>,
+ #ch{cfg = #conf{most_recently_declared_queue = MRDQ}}) ->
+ MRDQ;
+expand_routing_key_shortcut(_QueueNameBin, RoutingKey, _State) ->
+ RoutingKey.
+
+expand_shortcuts(#'basic.get' {queue = Q} = M, State) ->
+ M#'basic.get' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'basic.consume'{queue = Q} = M, State) ->
+ M#'basic.consume'{queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.delete' {queue = Q} = M, State) ->
+ M#'queue.delete' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.purge' {queue = Q} = M, State) ->
+ M#'queue.purge' {queue = expand_queue_name_shortcut(Q, State)};
+expand_shortcuts(#'queue.bind' {queue = Q, routing_key = K} = M, State) ->
+ M#'queue.bind' {queue = expand_queue_name_shortcut(Q, State),
+ routing_key = expand_routing_key_shortcut(Q, K, State)};
+expand_shortcuts(#'queue.unbind' {queue = Q, routing_key = K} = M, State) ->
+ M#'queue.unbind' {queue = expand_queue_name_shortcut(Q, State),
+ routing_key = expand_routing_key_shortcut(Q, K, State)};
+expand_shortcuts(M, _State) ->
+ M.
+
+check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) ->
+ rabbit_misc:protocol_error(
+ access_refused, "operation not permitted on the default exchange", []);
+check_not_default_exchange(_) ->
+ ok.
+
+check_exchange_deletion(XName = #resource{name = <<"amq.", _/binary>>,
+ kind = exchange}) ->
+ rabbit_misc:protocol_error(
+ access_refused, "deletion of system ~s not allowed",
+ [rabbit_misc:rs(XName)]);
+check_exchange_deletion(_) ->
+ ok.
+
+%% check that an exchange/queue name does not contain the reserved
+%% "amq." prefix.
+%%
+%% As per the AMQP 0-9-1 spec, the exclusion of "amq." prefixed names
+%% only applies on actual creation, and not in the cases where the
+%% entity already exists or passive=true.
+%%
+%% NB: We deliberately do not enforce the other constraints on names
+%% required by the spec.
+check_name(Kind, NameBin = <<"amq.", _/binary>>) ->
+ rabbit_misc:protocol_error(
+ access_refused,
+ "~s name '~s' contains reserved prefix 'amq.*'",[Kind, NameBin]);
+check_name(_Kind, NameBin) ->
+ NameBin.
+
+strip_cr_lf(NameBin) ->
+ binary:replace(NameBin, [<<"\n">>, <<"\r">>], <<"">>, [global]).
+
+
+maybe_set_fast_reply_to(
+ C = #content{properties = P = #'P_basic'{reply_to =
+ <<"amq.rabbitmq.reply-to">>}},
+ #ch{reply_consumer = ReplyConsumer}) ->
+ case ReplyConsumer of
+ none -> rabbit_misc:protocol_error(
+ precondition_failed,
+ "fast reply consumer does not exist", []);
+ {_, Suf, _K} -> Rep = <<"amq.rabbitmq.reply-to.", Suf/binary>>,
+ rabbit_binary_generator:clear_encoded_content(
+ C#content{properties = P#'P_basic'{reply_to = Rep}})
+ end;
+maybe_set_fast_reply_to(C, _State) ->
+ C.
+
+record_rejects([], State) ->
+ State;
+record_rejects(MXs, State = #ch{rejected = R, tx = Tx}) ->
+ Tx1 = case Tx of
+ none -> none;
+ _ -> failed
+ end,
+ State#ch{rejected = [MXs | R], tx = Tx1}.
+
+record_confirms([], State) ->
+ State;
+record_confirms(MXs, State = #ch{confirmed = C}) ->
+ State#ch{confirmed = [MXs | C]}.
+
+handle_method({Method, Content}, State) ->
+ handle_method(Method, Content, State).
+
+handle_method(#'channel.open'{}, _,
+ State = #ch{cfg = #conf{state = starting} = Cfg}) ->
+ %% Don't leave "starting" as the state for 5s. TODO is this TRTTD?
+ State1 = State#ch{cfg = Cfg#conf{state = running}},
+ rabbit_event:if_enabled(State1, #ch.stats_timer,
+ fun() -> emit_stats(State1) end),
+ {reply, #'channel.open_ok'{}, State1};
+
+handle_method(#'channel.open'{}, _, _State) ->
+ rabbit_misc:protocol_error(
+ channel_error, "second 'channel.open' seen", []);
+
+handle_method(_Method, _, #ch{cfg = #conf{state = starting}}) ->
+ rabbit_misc:protocol_error(channel_error, "expected 'channel.open'", []);
+
+handle_method(#'channel.close_ok'{}, _, #ch{cfg = #conf{state = closing}}) ->
+ stop;
+
+handle_method(#'channel.close'{}, _,
+ State = #ch{cfg = #conf{state = closing,
+ writer_pid = WriterPid}}) ->
+ ok = rabbit_writer:send_command(WriterPid, #'channel.close_ok'{}),
+ {noreply, State};
+
+handle_method(_Method, _, State = #ch{cfg = #conf{state = closing}}) ->
+ {noreply, State};
+
+handle_method(#'channel.close'{}, _,
+ State = #ch{cfg = #conf{reader_pid = ReaderPid}}) ->
+ {_Result, State1} = notify_queues(State),
+ %% We issue the channel.close_ok response after a handshake with
+ %% the reader, the other half of which is ready_for_close. That
+ %% way the reader forgets about the channel before we send the
+ %% response (and this channel process terminates). If we didn't do
+ %% that, a channel.open for the same channel number, which a
+ %% client is entitled to send as soon as it has received the
+ %% close_ok, might be received by the reader before it has seen
+ %% the termination and hence be sent to the old, now dead/dying
+ %% channel process, instead of a new process, and thus lost.
+ ReaderPid ! {channel_closing, self()},
+ {noreply, State1};
+
+%% Even though the spec prohibits the client from sending commands
+%% while waiting for the reply to a synchronous command, we generally
+%% do allow this...except in the case of a pending tx.commit, where
+%% it could wreak havoc.
+handle_method(_Method, _, #ch{tx = Tx})
+ when Tx =:= committing orelse Tx =:= failed ->
+ rabbit_misc:protocol_error(
+ channel_error, "unexpected command while processing 'tx.commit'", []);
+
+handle_method(#'access.request'{},_, State) ->
+ {reply, #'access.request_ok'{ticket = 1}, State};
+
+handle_method(#'basic.publish'{immediate = true}, _Content, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "immediate=true", []);
+
+handle_method(#'basic.publish'{exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ mandatory = Mandatory},
+ Content, State = #ch{cfg = #conf{channel = ChannelNum,
+ conn_name = ConnName,
+ virtual_host = VHostPath,
+ user = #user{username = Username} = User,
+ trace_state = TraceState,
+ max_message_size = MaxMessageSize,
+ authz_context = AuthzContext,
+ writer_gc_threshold = GCThreshold
+ },
+ tx = Tx,
+ confirm_enabled = ConfirmEnabled,
+ delivery_flow = Flow
+ }) ->
+ check_msg_size(Content, MaxMessageSize, GCThreshold),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ check_write_permitted(ExchangeName, User, AuthzContext),
+ Exchange = rabbit_exchange:lookup_or_die(ExchangeName),
+ check_internal_exchange(Exchange),
+ check_write_permitted_on_topic(Exchange, User, RoutingKey, AuthzContext),
+ %% We decode the content's properties here because we're almost
+ %% certain to want to look at delivery-mode and priority.
+ DecodedContent = #content {properties = Props} =
+ maybe_set_fast_reply_to(
+ rabbit_binary_parser:ensure_content_decoded(Content), State),
+ check_user_id_header(Props, State),
+ check_expiration_header(Props),
+ DoConfirm = Tx =/= none orelse ConfirmEnabled,
+ {MsgSeqNo, State1} =
+ case DoConfirm orelse Mandatory of
+ false -> {undefined, State};
+ true -> SeqNo = State#ch.publish_seqno,
+ {SeqNo, State#ch{publish_seqno = SeqNo + 1}}
+ end,
+ case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of
+ {ok, Message} ->
+ Delivery = rabbit_basic:delivery(
+ Mandatory, DoConfirm, Message, MsgSeqNo),
+ QNames = rabbit_exchange:route(Exchange, Delivery),
+ rabbit_trace:tap_in(Message, QNames, ConnName, ChannelNum,
+ Username, TraceState),
+ DQ = {Delivery#delivery{flow = Flow}, QNames},
+ {noreply, case Tx of
+ none -> deliver_to_queues(DQ, State1);
+ {Msgs, Acks} -> Msgs1 = ?QUEUE:in(DQ, Msgs),
+ State1#ch{tx = {Msgs1, Acks}}
+ end};
+ {error, Reason} ->
+ precondition_failed("invalid message: ~p", [Reason])
+ end;
+
+handle_method(#'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = Multiple,
+ requeue = Requeue}, _, State) ->
+ reject(DeliveryTag, Requeue, Multiple, State);
+
+handle_method(#'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = Multiple},
+ _, State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
+ {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
+ State1 = State#ch{unacked_message_q = Remaining},
+ {noreply, case Tx of
+ none -> {State2, Actions} = ack(Acked, State1),
+ handle_queue_actions(Actions, State2);
+ {Msgs, Acks} -> Acks1 = ack_cons(ack, Acked, Acks),
+ State1#ch{tx = {Msgs, Acks1}}
+ end};
+
+handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck},
+ _, State = #ch{cfg = #conf{writer_pid = WriterPid,
+ conn_pid = ConnPid,
+ user = User,
+ virtual_host = VHostPath,
+ authz_context = AuthzContext
+ },
+ limiter = Limiter,
+ next_tag = DeliveryTag,
+ queue_states = QueueStates0}) ->
+ QueueName = qbin_to_resource(QueueNameBin, VHostPath),
+ check_read_permitted(QueueName, User, AuthzContext),
+ case rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ %% Use the delivery tag as consumer tag for quorum queues
+ fun (Q) ->
+ rabbit_queue_type:dequeue(
+ Q, NoAck, rabbit_limiter:pid(Limiter),
+ DeliveryTag, QueueStates0)
+ end) of
+ {ok, MessageCount, Msg, QueueStates} ->
+ handle_basic_get(WriterPid, DeliveryTag, NoAck, MessageCount, Msg,
+ State#ch{queue_states = QueueStates});
+ {empty, QueueStates} ->
+ ?INCR_STATS(queue_stats, QueueName, 1, get_empty, State),
+ {reply, #'basic.get_empty'{}, State#ch{queue_states = QueueStates}};
+ empty ->
+ ?INCR_STATS(queue_stats, QueueName, 1, get_empty, State),
+ {reply, #'basic.get_empty'{}, State};
+ {error, {unsupported, single_active_consumer}} ->
+ rabbit_misc:protocol_error(
+ resource_locked,
+ "cannot obtain access to locked ~s. basic.get operations "
+ "are not supported by quorum queues with single active consumer",
+ [rabbit_misc:rs(QueueName)]);
+ {error, Reason} ->
+ %% TODO add queue type to error message
+ rabbit_misc:protocol_error(internal_error,
+ "Cannot get a message from queue '~s': ~p",
+ [rabbit_misc:rs(QueueName), Reason]);
+ {protocol_error, Type, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end;
+
+handle_method(#'basic.consume'{queue = <<"amq.rabbitmq.reply-to">>,
+ consumer_tag = CTag0,
+ no_ack = NoAck,
+ nowait = NoWait},
+ _, State = #ch{reply_consumer = ReplyConsumer,
+ consumer_mapping = ConsumerMapping}) ->
+ case maps:find(CTag0, ConsumerMapping) of
+ error ->
+ case {ReplyConsumer, NoAck} of
+ {none, true} ->
+ CTag = case CTag0 of
+ <<>> -> rabbit_guid:binary(
+ rabbit_guid:gen_secure(), "amq.ctag");
+ Other -> Other
+ end,
+ %% Precalculate both suffix and key; base64 encoding is
+ %% expensive
+ Key = base64:encode(rabbit_guid:gen_secure()),
+ PidEnc = base64:encode(term_to_binary(self())),
+ Suffix = <<PidEnc/binary, ".", Key/binary>>,
+ Consumer = {CTag, Suffix, binary_to_list(Key)},
+ State1 = State#ch{reply_consumer = Consumer},
+ case NoWait of
+ true -> {noreply, State1};
+ false -> Rep = #'basic.consume_ok'{consumer_tag = CTag},
+ {reply, Rep, State1}
+ end;
+ {_, false} ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "reply consumer cannot acknowledge", []);
+ _ ->
+ rabbit_misc:protocol_error(
+ precondition_failed, "reply consumer already set", [])
+ end;
+ {ok, _} ->
+ %% Attempted reuse of consumer tag.
+ rabbit_misc:protocol_error(
+ not_allowed, "attempt to reuse consumer tag '~s'", [CTag0])
+ end;
+
+handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
+ _, State = #ch{reply_consumer = {ConsumerTag, _, _}}) ->
+ State1 = State#ch{reply_consumer = none},
+ case NoWait of
+ true -> {noreply, State1};
+ false -> Rep = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
+ {reply, Rep, State1}
+ end;
+
+handle_method(#'basic.consume'{queue = QueueNameBin,
+ consumer_tag = ConsumerTag,
+ no_local = _, % FIXME: implement
+ no_ack = NoAck,
+ exclusive = ExclusiveConsume,
+ nowait = NoWait,
+ arguments = Args},
+ _, State = #ch{cfg = #conf{consumer_prefetch = ConsumerPrefetch,
+ user = User,
+ virtual_host = VHostPath,
+ authz_context = AuthzContext},
+ consumer_mapping = ConsumerMapping
+ }) ->
+ case maps:find(ConsumerTag, ConsumerMapping) of
+ error ->
+ QueueName = qbin_to_resource(QueueNameBin, VHostPath),
+ check_read_permitted(QueueName, User, AuthzContext),
+ ActualConsumerTag =
+ case ConsumerTag of
+ <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
+ "amq.ctag");
+ Other -> Other
+ end,
+ case basic_consume(
+ QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args, NoWait, State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {error, exclusive_consume_unavailable} ->
+ rabbit_misc:protocol_error(
+ access_refused, "~s in exclusive use",
+ [rabbit_misc:rs(QueueName)]);
+ {error, global_qos_not_supported_for_queue_type} ->
+ rabbit_misc:protocol_error(
+ not_implemented, "~s does not support global qos",
+ [rabbit_misc:rs(QueueName)])
+ end;
+ {ok, _} ->
+ %% Attempted reuse of consumer tag.
+ rabbit_misc:protocol_error(
+ not_allowed, "attempt to reuse consumer tag '~s'", [ConsumerTag])
+ end;
+
+handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
+ _, State = #ch{cfg = #conf{user = #user{username = Username}},
+ consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons,
+ queue_states = QueueStates0}) ->
+ OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
+ case maps:find(ConsumerTag, ConsumerMapping) of
+ error ->
+ %% Spec requires we ignore this situation.
+ return_ok(State, NoWait, OkMsg);
+ {ok, {Q, _CParams}} when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+
+ ConsumerMapping1 = maps:remove(ConsumerTag, ConsumerMapping),
+ QCons1 =
+ case maps:find(QName, QCons) of
+ error -> QCons;
+ {ok, CTags} -> CTags1 = gb_sets:delete(ConsumerTag, CTags),
+ case gb_sets:is_empty(CTags1) of
+ true -> maps:remove(QName, QCons);
+ false -> maps:put(QName, CTags1, QCons)
+ end
+ end,
+ NewState = State#ch{consumer_mapping = ConsumerMapping1,
+ queue_consumers = QCons1},
+ %% In order to ensure that no more messages are sent to
+ %% the consumer after the cancel_ok has been sent, we get
+ %% the queue process to send the cancel_ok on our
+ %% behalf. If we were sending the cancel_ok ourselves it
+ %% might overtake a message sent previously by the queue.
+ case rabbit_misc:with_exit_handler(
+ fun () -> {error, not_found} end,
+ fun () ->
+ rabbit_queue_type:cancel(
+ Q, ConsumerTag, ok_msg(NoWait, OkMsg),
+ Username, QueueStates0)
+ end) of
+ {ok, QueueStates} ->
+ {noreply, NewState#ch{queue_states = QueueStates}};
+ {error, not_found} ->
+ %% Spec requires we ignore this situation.
+ return_ok(NewState, NoWait, OkMsg)
+ end
+ end;
+
+handle_method(#'basic.qos'{prefetch_size = Size}, _, _State) when Size /= 0 ->
+ rabbit_misc:protocol_error(not_implemented,
+ "prefetch_size!=0 (~w)", [Size]);
+
+handle_method(#'basic.qos'{global = false,
+ prefetch_count = PrefetchCount},
+ _, State = #ch{cfg = Cfg,
+ limiter = Limiter}) ->
+ %% Ensures that if default was set, it's overridden
+ Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
+ {reply, #'basic.qos_ok'{}, State#ch{cfg = Cfg#conf{consumer_prefetch = PrefetchCount},
+ limiter = Limiter1}};
+
+handle_method(#'basic.qos'{global = true,
+ prefetch_count = 0},
+ _, State = #ch{limiter = Limiter}) ->
+ Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
+
+handle_method(#'basic.qos'{global = true,
+ prefetch_count = PrefetchCount},
+ _, State = #ch{limiter = Limiter, unacked_message_q = UAMQ}) ->
+ %% TODO ?QUEUE:len(UAMQ) is not strictly right since that counts
+ %% unacked messages from basic.get too. Pretty obscure though.
+ Limiter1 = rabbit_limiter:limit_prefetch(Limiter,
+ PrefetchCount, ?QUEUE:len(UAMQ)),
+ case ((not rabbit_limiter:is_active(Limiter)) andalso
+ rabbit_limiter:is_active(Limiter1)) of
+ true -> rabbit_amqqueue:activate_limit_all(
+ classic_consumer_queue_pids(State#ch.consumer_mapping), self());
+ false -> ok
+ end,
+ {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
+
+handle_method(#'basic.recover_async'{requeue = true},
+ _, State = #ch{unacked_message_q = UAMQ,
+ limiter = Limiter,
+ queue_states = QueueStates0}) ->
+ OkFun = fun () -> ok end,
+ UAMQL = ?QUEUE:to_list(UAMQ),
+ {QueueStates, Actions} =
+ foreach_per_queue(
+ fun ({QPid, CTag}, MsgIds, {Acc0, Actions0}) ->
+ rabbit_misc:with_exit_handler(
+ OkFun,
+ fun () ->
+ {ok, Acc, Act} = rabbit_amqqueue:requeue(QPid, {CTag, MsgIds}, Acc0),
+ {Acc, Act ++ Actions0}
+ end)
+ end, lists:reverse(UAMQL), {QueueStates0, []}),
+ ok = notify_limiter(Limiter, UAMQL),
+ State1 = handle_queue_actions(Actions, State#ch{unacked_message_q = ?QUEUE:new(),
+ queue_states = QueueStates}),
+ %% No answer required - basic.recover is the newer, synchronous
+ %% variant of this method
+ {noreply, State1};
+
+handle_method(#'basic.recover_async'{requeue = false}, _, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "requeue=false", []);
+
+handle_method(#'basic.recover'{requeue = Requeue}, Content, State) ->
+ {noreply, State1} = handle_method(#'basic.recover_async'{requeue = Requeue},
+ Content, State),
+ {reply, #'basic.recover_ok'{}, State1};
+
+handle_method(#'basic.reject'{delivery_tag = DeliveryTag, requeue = Requeue},
+ _, State) ->
+ reject(DeliveryTag, Requeue, false, State);
+
+handle_method(#'exchange.declare'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.declare_ok'{});
+
+handle_method(#'exchange.delete'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ virtual_host = VHostPath,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.delete_ok'{});
+
+handle_method(#'exchange.bind'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.bind_ok'{});
+
+handle_method(#'exchange.unbind'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'exchange.unbind_ok'{});
+
+handle_method(#'queue.declare'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{virtual_host = VHostPath,
+ conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ {ok, QueueName, MessageCount, ConsumerCount} =
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_queue_declare_ok(QueueName, NoWait, MessageCount,
+ ConsumerCount, State);
+
+handle_method(#'queue.delete'{nowait = NoWait} = Method, _,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ virtual_host = VHostPath,
+ queue_collector_pid = CollectorPid,
+ user = User}}) ->
+ {ok, PurgedMessageCount} =
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait,
+ #'queue.delete_ok'{message_count = PurgedMessageCount});
+
+handle_method(#'queue.bind'{nowait = NoWait} = Method, _,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ virtual_host = VHostPath}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, NoWait, #'queue.bind_ok'{});
+
+handle_method(#'queue.unbind'{} = Method, _,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ virtual_host = VHostPath}}) ->
+ handle_method(Method, ConnPid, AuthzContext, CollectorPid, VHostPath, User),
+ return_ok(State, false, #'queue.unbind_ok'{});
+
+handle_method(#'queue.purge'{nowait = NoWait} = Method,
+ _, State = #ch{cfg = #conf{conn_pid = ConnPid,
+ authz_context = AuthzContext,
+ user = User,
+ queue_collector_pid = CollectorPid,
+ virtual_host = VHostPath}}) ->
+ case handle_method(Method, ConnPid, AuthzContext, CollectorPid,
+ VHostPath, User) of
+ {ok, PurgedMessageCount} ->
+ return_ok(State, NoWait,
+ #'queue.purge_ok'{message_count = PurgedMessageCount})
+ end;
+
+handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) ->
+ precondition_failed("cannot switch from confirm to tx mode");
+
+handle_method(#'tx.select'{}, _, State = #ch{tx = none}) ->
+ {reply, #'tx.select_ok'{}, State#ch{tx = new_tx()}};
+
+handle_method(#'tx.select'{}, _, State) ->
+ {reply, #'tx.select_ok'{}, State};
+
+handle_method(#'tx.commit'{}, _, #ch{tx = none}) ->
+ precondition_failed("channel is not transactional");
+
+handle_method(#'tx.commit'{}, _, State = #ch{tx = {Msgs, Acks},
+ limiter = Limiter}) ->
+ State1 = queue_fold(fun deliver_to_queues/2, State, Msgs),
+ Rev = fun (X) -> lists:reverse(lists:sort(X)) end,
+ {State2, Actions2} =
+ lists:foldl(fun ({ack, A}, {Acc, Actions}) ->
+ {Acc0, Actions0} = ack(Rev(A), Acc),
+ {Acc0, Actions ++ Actions0};
+ ({Requeue, A}, {Acc, Actions}) ->
+ {Acc0, Actions0} = internal_reject(Requeue, Rev(A), Limiter, Acc),
+ {Acc0, Actions ++ Actions0}
+ end, {State1, []}, lists:reverse(Acks)),
+ State3 = handle_queue_actions(Actions2, State2),
+ {noreply, maybe_complete_tx(State3#ch{tx = committing})};
+
+handle_method(#'tx.rollback'{}, _, #ch{tx = none}) ->
+ precondition_failed("channel is not transactional");
+
+handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ,
+ tx = {_Msgs, Acks}}) ->
+ AcksL = lists:append(lists:reverse([lists:reverse(L) || {_, L} <- Acks])),
+ UAMQ1 = ?QUEUE:from_list(lists:usort(AcksL ++ ?QUEUE:to_list(UAMQ))),
+ {reply, #'tx.rollback_ok'{}, State#ch{unacked_message_q = UAMQ1,
+ tx = new_tx()}};
+
+handle_method(#'confirm.select'{}, _, #ch{tx = {_, _}}) ->
+ precondition_failed("cannot switch from tx to confirm mode");
+
+handle_method(#'confirm.select'{nowait = NoWait}, _, State) ->
+ return_ok(State#ch{confirm_enabled = true},
+ NoWait, #'confirm.select_ok'{});
+
+handle_method(#'channel.flow'{active = true}, _, State) ->
+ {reply, #'channel.flow_ok'{active = true}, State};
+
+handle_method(#'channel.flow'{active = false}, _, _State) ->
+ rabbit_misc:protocol_error(not_implemented, "active=false", []);
+
+handle_method(#'basic.credit'{consumer_tag = CTag,
+ credit = Credit,
+ drain = Drain},
+ _, State = #ch{consumer_mapping = Consumers,
+ queue_states = QStates0}) ->
+ case maps:find(CTag, Consumers) of
+ {ok, {Q, _CParams}} ->
+ {ok, QStates, Actions} = rabbit_queue_type:credit(Q, CTag, Credit, Drain, QStates0),
+ {noreply, handle_queue_actions(Actions, State#ch{queue_states = QStates})};
+ error -> precondition_failed(
+ "unknown consumer tag '~s'", [CTag])
+ end;
+
+handle_method(_MethodRecord, _Content, _State) ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unimplemented method", []).
+
+%%----------------------------------------------------------------------------
+
+%% We get the queue process to send the consume_ok on our behalf. This
+%% is for symmetry with basic.cancel - see the comment in that method
+%% for why.
+basic_consume(QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args, NoWait,
+ State = #ch{cfg = #conf{conn_pid = ConnPid,
+ user = #user{username = Username}},
+ limiter = Limiter,
+ consumer_mapping = ConsumerMapping,
+ queue_states = QueueStates0}) ->
+ case rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) ->
+ {rabbit_amqqueue:basic_consume(
+ Q, NoAck, self(),
+ rabbit_limiter:pid(Limiter),
+ rabbit_limiter:is_active(Limiter),
+ ConsumerPrefetch, ActualConsumerTag,
+ ExclusiveConsume, Args,
+ ok_msg(NoWait, #'basic.consume_ok'{
+ consumer_tag = ActualConsumerTag}),
+ Username, QueueStates0),
+ Q}
+ end) of
+ {{ok, QueueStates, Actions}, Q} when ?is_amqqueue(Q) ->
+ CM1 = maps:put(
+ ActualConsumerTag,
+ {Q, {NoAck, ConsumerPrefetch, ExclusiveConsume, Args}},
+ ConsumerMapping),
+
+ State1 = State#ch{consumer_mapping = CM1,
+ queue_states = QueueStates},
+ State2 = handle_queue_actions(Actions, State1),
+ {ok, case NoWait of
+ true -> consumer_monitor(ActualConsumerTag, State2);
+ false -> State2
+ end};
+ {{error, exclusive_consume_unavailable} = E, _Q} ->
+ E;
+ {{error, global_qos_not_supported_for_queue_type} = E, _Q} ->
+ E;
+ {{protocol_error, Type, Reason, ReasonArgs}, _Q} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end.
+
+maybe_stat(false, Q) -> rabbit_amqqueue:stat(Q);
+maybe_stat(true, _Q) -> {ok, 0, 0}.
+
+consumer_monitor(ConsumerTag,
+ State = #ch{consumer_mapping = ConsumerMapping,
+ queue_consumers = QCons}) ->
+ {Q, _} = maps:get(ConsumerTag, ConsumerMapping),
+ QRef = amqqueue:get_name(Q),
+ CTags1 = case maps:find(QRef, QCons) of
+ {ok, CTags} -> gb_sets:insert(ConsumerTag, CTags);
+ error -> gb_sets:singleton(ConsumerTag)
+ end,
+ QCons1 = maps:put(QRef, CTags1, QCons),
+ State#ch{queue_consumers = QCons1}.
+
+handle_consuming_queue_down_or_eol(QName,
+ State = #ch{queue_consumers = QCons}) ->
+ ConsumerTags = case maps:find(QName, QCons) of
+ error -> gb_sets:new();
+ {ok, CTags} -> CTags
+ end,
+ gb_sets:fold(
+ fun (CTag, StateN = #ch{consumer_mapping = CMap}) ->
+ case queue_down_consumer_action(CTag, CMap) of
+ remove ->
+ cancel_consumer(CTag, QName, StateN);
+ {recover, {NoAck, ConsumerPrefetch, Exclusive, Args}} ->
+ case catch basic_consume(
+ QName, NoAck, ConsumerPrefetch, CTag,
+ Exclusive, Args, true, StateN) of
+ {ok, StateN1} ->
+ StateN1;
+ _Err ->
+ cancel_consumer(CTag, QName, StateN)
+ end
+ end
+ end, State#ch{queue_consumers = maps:remove(QName, QCons)}, ConsumerTags).
+
+%% [0] There is a slight danger here that if a queue is deleted and
+%% then recreated again the reconsume will succeed even though it was
+%% not an HA failover. But the likelihood is not great and most users
+%% are unlikely to care.
+
+cancel_consumer(CTag, QName,
+ State = #ch{cfg = #conf{capabilities = Capabilities},
+ consumer_mapping = CMap}) ->
+ case rabbit_misc:table_lookup(
+ Capabilities, <<"consumer_cancel_notify">>) of
+ {bool, true} -> ok = send(#'basic.cancel'{consumer_tag = CTag,
+ nowait = true}, State);
+ _ -> ok
+ end,
+ rabbit_event:notify(consumer_deleted, [{consumer_tag, CTag},
+ {channel, self()},
+ {queue, QName}]),
+ State#ch{consumer_mapping = maps:remove(CTag, CMap)}.
+
+queue_down_consumer_action(CTag, CMap) ->
+ {_, {_, _, _, Args} = ConsumeSpec} = maps:get(CTag, CMap),
+ case rabbit_misc:table_lookup(Args, <<"x-cancel-on-ha-failover">>) of
+ {bool, true} -> remove;
+ _ -> {recover, ConsumeSpec}
+ end.
+
+binding_action(Fun, SourceNameBin0, DestinationType, DestinationNameBin0,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext,
+ #user{username = Username} = User) ->
+ ExchangeNameBin = strip_cr_lf(SourceNameBin0),
+ DestinationNameBin = strip_cr_lf(DestinationNameBin0),
+ DestinationName = name_to_resource(DestinationType, DestinationNameBin, VHostPath),
+ check_write_permitted(DestinationName, User, AuthzContext),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
+ [check_not_default_exchange(N) || N <- [DestinationName, ExchangeName]],
+ check_read_permitted(ExchangeName, User, AuthzContext),
+ case rabbit_exchange:lookup(ExchangeName) of
+ {error, not_found} ->
+ ok;
+ {ok, Exchange} ->
+ check_read_permitted_on_topic(Exchange, User, RoutingKey, AuthzContext)
+ end,
+ case Fun(#binding{source = ExchangeName,
+ destination = DestinationName,
+ key = RoutingKey,
+ args = Arguments},
+ fun (_X, Q) when ?is_amqqueue(Q) ->
+ try rabbit_amqqueue:check_exclusive_access(Q, ConnPid)
+ catch exit:Reason -> {error, Reason}
+ end;
+ (_X, #exchange{}) ->
+ ok
+ end,
+ Username) of
+ {error, {resources_missing, [{not_found, Name} | _]}} ->
+ rabbit_amqqueue:not_found(Name);
+ {error, {resources_missing, [{absent, Q, Reason} | _]}} ->
+ rabbit_amqqueue:absent(Q, Reason);
+ {error, binding_not_found} ->
+ rabbit_misc:protocol_error(
+ not_found, "no binding ~s between ~s and ~s",
+ [RoutingKey, rabbit_misc:rs(ExchangeName),
+ rabbit_misc:rs(DestinationName)]);
+ {error, {binding_invalid, Fmt, Args}} ->
+ rabbit_misc:protocol_error(precondition_failed, Fmt, Args);
+ {error, #amqp_error{} = Error} ->
+ rabbit_misc:protocol_error(Error);
+ ok ->
+ ok
+ end.
+
+basic_return(#basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content},
+ State = #ch{cfg = #conf{protocol = Protocol,
+ writer_pid = WriterPid}},
+ Reason) ->
+ ?INCR_STATS(exchange_stats, ExchangeName, 1, return_unroutable, State),
+ {_Close, ReplyCode, ReplyText} = Protocol:lookup_amqp_exception(Reason),
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.return'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ Content).
+
+reject(DeliveryTag, Requeue, Multiple,
+ State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
+ {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
+ State1 = State#ch{unacked_message_q = Remaining},
+ {noreply, case Tx of
+ none ->
+ {State2, Actions} = internal_reject(Requeue, Acked, State1#ch.limiter, State1),
+ handle_queue_actions(Actions, State2);
+ {Msgs, Acks} ->
+ Acks1 = ack_cons(Requeue, Acked, Acks),
+ State1#ch{tx = {Msgs, Acks1}}
+ end}.
+
+%% NB: Acked is in youngest-first order
+internal_reject(Requeue, Acked, Limiter,
+ State = #ch{queue_states = QueueStates0}) ->
+ {QueueStates, Actions} =
+ foreach_per_queue(
+ fun({QRef, CTag}, MsgIds, {Acc0, Actions0}) ->
+ Op = case Requeue of
+ false -> discard;
+ true -> requeue
+ end,
+ case rabbit_queue_type:settle(QRef, Op, CTag, MsgIds, Acc0) of
+ {ok, Acc, Actions} ->
+ {Acc, Actions0 ++ Actions};
+ {protocol_error, ErrorType, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs)
+ end
+ end, Acked, {QueueStates0, []}),
+ ok = notify_limiter(Limiter, Acked),
+ {State#ch{queue_states = QueueStates}, Actions}.
+
+record_sent(Type, Tag, AckRequired,
+ Msg = {QName, _QPid, MsgId, Redelivered, _Message},
+ State = #ch{cfg = #conf{channel = ChannelNum,
+ trace_state = TraceState,
+ user = #user{username = Username},
+ conn_name = ConnName
+ },
+ unacked_message_q = UAMQ,
+ next_tag = DeliveryTag
+ }) ->
+ ?INCR_STATS(queue_stats, QName, 1, case {Type, AckRequired} of
+ {get, true} -> get;
+ {get, false} -> get_no_ack;
+ {deliver, true} -> deliver;
+ {deliver, false} -> deliver_no_ack
+ end, State),
+ case Redelivered of
+ true -> ?INCR_STATS(queue_stats, QName, 1, redeliver, State);
+ false -> ok
+ end,
+ DeliveredAt = os:system_time(millisecond),
+ rabbit_trace:tap_out(Msg, ConnName, ChannelNum, Username, TraceState),
+ UAMQ1 = case AckRequired of
+ true ->
+ ?QUEUE:in(#pending_ack{delivery_tag = DeliveryTag,
+ tag = Tag,
+ delivered_at = DeliveredAt,
+ queue = QName,
+ msg_id = MsgId}, UAMQ);
+ false ->
+ UAMQ
+ end,
+ State#ch{unacked_message_q = UAMQ1, next_tag = DeliveryTag + 1}.
+
+%% NB: returns acks in youngest-first order
+collect_acks(Q, 0, true) ->
+ {lists:reverse(?QUEUE:to_list(Q)), ?QUEUE:new()};
+collect_acks(Q, DeliveryTag, Multiple) ->
+ collect_acks([], [], Q, DeliveryTag, Multiple).
+
+collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) ->
+ case ?QUEUE:out(Q) of
+ {{value, UnackedMsg = #pending_ack{delivery_tag = CurrentDeliveryTag}},
+ QTail} ->
+ if CurrentDeliveryTag == DeliveryTag ->
+ {[UnackedMsg | ToAcc],
+ case PrefixAcc of
+ [] -> QTail;
+ _ -> ?QUEUE:join(
+ ?QUEUE:from_list(lists:reverse(PrefixAcc)),
+ QTail)
+ end};
+ Multiple ->
+ collect_acks([UnackedMsg | ToAcc], PrefixAcc,
+ QTail, DeliveryTag, Multiple);
+ true ->
+ collect_acks(ToAcc, [UnackedMsg | PrefixAcc],
+ QTail, DeliveryTag, Multiple)
+ end;
+ {empty, _} ->
+ precondition_failed("unknown delivery tag ~w", [DeliveryTag])
+ end.
+
+%% NB: Acked is in youngest-first order
+ack(Acked, State = #ch{queue_states = QueueStates0}) ->
+ {QueueStates, Actions} =
+ foreach_per_queue(
+ fun ({QRef, CTag}, MsgIds, {Acc0, ActionsAcc0}) ->
+ case rabbit_queue_type:settle(QRef, complete, CTag,
+ MsgIds, Acc0) of
+ {ok, Acc, ActionsAcc} ->
+ incr_queue_stats(QRef, MsgIds, State),
+ {Acc, ActionsAcc0 ++ ActionsAcc};
+ {protocol_error, ErrorType, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs)
+ end
+ end, Acked, {QueueStates0, []}),
+ ok = notify_limiter(State#ch.limiter, Acked),
+ {State#ch{queue_states = QueueStates}, Actions}.
+
+incr_queue_stats(QName, MsgIds, State) ->
+ Count = length(MsgIds),
+ ?INCR_STATS(queue_stats, QName, Count, ack, State).
+
+%% {Msgs, Acks}
+%%
+%% Msgs is a queue.
+%%
+%% Acks looks s.t. like this:
+%% [{false,[5,4]},{true,[3]},{ack,[2,1]}, ...]
+%%
+%% Each element is a pair consisting of a tag and a list of
+%% ack'ed/reject'ed msg ids. The tag is one of 'ack' (to ack), 'true'
+%% (reject w requeue), 'false' (reject w/o requeue). The msg ids, as
+%% well as the list overall, are in "most-recent (generally youngest)
+%% ack first" order.
+new_tx() -> {?QUEUE:new(), []}.
+
+notify_queues(State = #ch{cfg = #conf{state = closing}}) ->
+ {ok, State};
+notify_queues(State = #ch{consumer_mapping = Consumers,
+ cfg = Cfg}) ->
+ QPids = classic_consumer_queue_pids(Consumers),
+ Timeout = get_operation_timeout(),
+ {rabbit_amqqueue:notify_down_all(QPids, self(), Timeout),
+ State#ch{cfg = Cfg#conf{state = closing}}}.
+
+foreach_per_queue(_F, [], Acc) ->
+ Acc;
+foreach_per_queue(F, [#pending_ack{tag = CTag,
+ queue = QName,
+ msg_id = MsgId}], Acc) ->
+ %% quorum queue, needs the consumer tag
+ F({QName, CTag}, [MsgId], Acc);
+foreach_per_queue(F, UAL, Acc) ->
+ T = lists:foldl(fun (#pending_ack{tag = CTag,
+ queue = QName,
+ msg_id = MsgId}, T) ->
+ rabbit_misc:gb_trees_cons({QName, CTag}, MsgId, T)
+ end, gb_trees:empty(), UAL),
+ rabbit_misc:gb_trees_fold(fun (Key, Val, Acc0) -> F(Key, Val, Acc0) end, Acc, T).
+
+%% hack to patch up missing queue type behaviour for classic queue
+classic_consumer_queue_pids(Consumers) ->
+ lists:usort([amqqueue:get_pid(Q)
+ || {Q, _CParams} <- maps:values(Consumers),
+ amqqueue:get_type(Q) == rabbit_classic_queue]).
+
+%% tell the limiter about the number of acks that have been received
+%% for messages delivered to subscribed consumers, but not acks for
+%% messages sent in a response to a basic.get (identified by their
+%% consumer tag as an integer (the same as the delivery tag, required
+%% quorum queues))
+notify_limiter(Limiter, Acked) ->
+ %% optimisation: avoid the potentially expensive 'foldl' in the
+ %% common case.
+ case rabbit_limiter:is_active(Limiter) of
+ false -> ok;
+ true -> case lists:foldl(fun ({_, CTag, _, _}, Acc) when is_integer(CTag) ->
+ %% Quorum queues use integer CTags
+ %% classic queues use binaries
+ %% Quorum queues do not interact
+ %% with limiters
+ Acc;
+ ({_, _, _, _}, Acc) -> Acc + 1
+ end, 0, Acked) of
+ 0 -> ok;
+ Count -> rabbit_limiter:ack(Limiter, Count)
+ end
+ end.
+
+deliver_to_queues({#delivery{message = #basic_message{exchange_name = XName},
+ confirm = false,
+ mandatory = false},
+ _RoutedToQs = []}, State) -> %% optimisation
+ ?INCR_STATS(exchange_stats, XName, 1, publish, State),
+ ?INCR_STATS(exchange_stats, XName, 1, drop_unroutable, State),
+ State;
+deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
+ exchange_name = XName},
+ mandatory = Mandatory,
+ confirm = Confirm,
+ msg_seq_no = MsgSeqNo},
+ DelQNames}, State0 = #ch{queue_states = QueueStates0}) ->
+ Qs = rabbit_amqqueue:lookup(DelQNames),
+ AllQueueNames = lists:foldl(fun (Q, Acc) ->
+ QRef = amqqueue:get_name(Q),
+ [QRef | Acc]
+ end, [], Qs),
+ {ok, QueueStates, Actions} =
+ rabbit_queue_type:deliver(Qs, Delivery, QueueStates0),
+ %% NB: the order here is important since basic.returns must be
+ %% sent before confirms.
+ ok = process_routing_mandatory(Mandatory, Qs, Message, State0),
+ State1 = process_routing_confirm(Confirm, AllQueueNames,
+ MsgSeqNo, XName, State0),
+ %% Actions must be processed after registering confirms as actions may
+ %% contain rejections of publishes
+ State = handle_queue_actions(Actions,
+ State1#ch{queue_states = QueueStates}),
+ case rabbit_event:stats_level(State, #ch.stats_timer) of
+ fine ->
+ ?INCR_STATS(exchange_stats, XName, 1, publish),
+ [?INCR_STATS(queue_exchange_stats,
+ {amqqueue:get_name(Q), XName}, 1, publish)
+ || Q <- Qs];
+ _ ->
+ ok
+ end,
+ State.
+
+process_routing_mandatory(_Mandatory = true,
+ _RoutedToQs = [],
+ Msg, State) ->
+ ok = basic_return(Msg, State, no_route),
+ ok;
+process_routing_mandatory(_Mandatory = false,
+ _RoutedToQs = [],
+ #basic_message{exchange_name = ExchangeName}, State) ->
+ ?INCR_STATS(exchange_stats, ExchangeName, 1, drop_unroutable, State),
+ ok;
+process_routing_mandatory(_, _, _, _) ->
+ ok.
+
+process_routing_confirm(false, _, _, _, State) ->
+ State;
+process_routing_confirm(true, [], MsgSeqNo, XName, State) ->
+ record_confirms([{MsgSeqNo, XName}], State);
+process_routing_confirm(true, QRefs, MsgSeqNo, XName, State) ->
+ State#ch{unconfirmed =
+ rabbit_confirms:insert(MsgSeqNo, QRefs, XName, State#ch.unconfirmed)}.
+
+confirm(MsgSeqNos, QRef, State = #ch{unconfirmed = UC}) ->
+ %% NOTE: if queue name does not exist here it's likely that the ref also
+ %% does not exist in unconfirmed messages.
+ %% Neither does the 'ignore' atom, so it's a reasonable fallback.
+ {ConfirmMXs, UC1} = rabbit_confirms:confirm(MsgSeqNos, QRef, UC),
+ %% NB: don't call noreply/1 since we don't want to send confirms.
+ record_confirms(ConfirmMXs, State#ch{unconfirmed = UC1}).
+
+send_confirms_and_nacks(State = #ch{tx = none, confirmed = [], rejected = []}) ->
+ State;
+send_confirms_and_nacks(State = #ch{tx = none, confirmed = C, rejected = R}) ->
+ case rabbit_node_monitor:pause_partition_guard() of
+ ok ->
+ Confirms = lists:append(C),
+ Rejects = lists:append(R),
+ ConfirmMsgSeqNos =
+ lists:foldl(
+ fun ({MsgSeqNo, XName}, MSNs) ->
+ ?INCR_STATS(exchange_stats, XName, 1, confirm, State),
+ [MsgSeqNo | MSNs]
+ end, [], Confirms),
+ RejectMsgSeqNos = [MsgSeqNo || {MsgSeqNo, _} <- Rejects],
+
+ State1 = send_confirms(ConfirmMsgSeqNos,
+ RejectMsgSeqNos,
+ State#ch{confirmed = []}),
+ %% TODO: msg seq nos, same as for confirms. Need to implement
+ %% nack rates first.
+ send_nacks(RejectMsgSeqNos,
+ ConfirmMsgSeqNos,
+ State1#ch{rejected = []});
+ pausing -> State
+ end;
+send_confirms_and_nacks(State) ->
+ case rabbit_node_monitor:pause_partition_guard() of
+ ok -> maybe_complete_tx(State);
+ pausing -> State
+ end.
+
+send_nacks([], _, State) ->
+ State;
+send_nacks(_Rs, _, State = #ch{cfg = #conf{state = closing}}) -> %% optimisation
+ State;
+send_nacks(Rs, Cs, State) ->
+ coalesce_and_send(Rs, Cs,
+ fun(MsgSeqNo, Multiple) ->
+ #'basic.nack'{delivery_tag = MsgSeqNo,
+ multiple = Multiple}
+ end, State).
+
+send_confirms([], _, State) ->
+ State;
+send_confirms(_Cs, _, State = #ch{cfg = #conf{state = closing}}) -> %% optimisation
+ State;
+send_confirms([MsgSeqNo], _, State) ->
+ ok = send(#'basic.ack'{delivery_tag = MsgSeqNo}, State),
+ State;
+send_confirms(Cs, Rs, State) ->
+ coalesce_and_send(Cs, Rs,
+ fun(MsgSeqNo, Multiple) ->
+ #'basic.ack'{delivery_tag = MsgSeqNo,
+ multiple = Multiple}
+ end, State).
+
+coalesce_and_send(MsgSeqNos, NegativeMsgSeqNos, MkMsgFun, State = #ch{unconfirmed = UC}) ->
+ SMsgSeqNos = lists:usort(MsgSeqNos),
+ UnconfirmedCutoff = case rabbit_confirms:is_empty(UC) of
+ true -> lists:last(SMsgSeqNos) + 1;
+ false -> rabbit_confirms:smallest(UC)
+ end,
+ Cutoff = lists:min([UnconfirmedCutoff | NegativeMsgSeqNos]),
+ {Ms, Ss} = lists:splitwith(fun(X) -> X < Cutoff end, SMsgSeqNos),
+ case Ms of
+ [] -> ok;
+ _ -> ok = send(MkMsgFun(lists:last(Ms), true), State)
+ end,
+ [ok = send(MkMsgFun(SeqNo, false), State) || SeqNo <- Ss],
+ State.
+
+ack_cons(Tag, Acked, [{Tag, Acks} | L]) -> [{Tag, Acked ++ Acks} | L];
+ack_cons(Tag, Acked, Acks) -> [{Tag, Acked} | Acks].
+
+ack_len(Acks) -> lists:sum([length(L) || {ack, L} <- Acks]).
+
+maybe_complete_tx(State = #ch{tx = {_, _}}) ->
+ State;
+maybe_complete_tx(State = #ch{unconfirmed = UC}) ->
+ case rabbit_confirms:is_empty(UC) of
+ false -> State;
+ true -> complete_tx(State#ch{confirmed = []})
+ end.
+
+complete_tx(State = #ch{tx = committing}) ->
+ ok = send(#'tx.commit_ok'{}, State),
+ State#ch{tx = new_tx()};
+complete_tx(State = #ch{tx = failed}) ->
+ {noreply, State1} = handle_exception(
+ rabbit_misc:amqp_error(
+ precondition_failed, "partial tx completion", [],
+ 'tx.commit'),
+ State),
+ State1#ch{tx = new_tx()}.
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+infos(Items, Deadline, State) ->
+ [begin
+ Now = now_millis(),
+ if
+ Now > Deadline ->
+ throw(timeout);
+ true ->
+ {Item, i(Item, State)}
+ end
+ end || Item <- Items].
+
+i(pid, _) -> self();
+i(connection, #ch{cfg = #conf{conn_pid = ConnPid}}) -> ConnPid;
+i(number, #ch{cfg = #conf{channel = Channel}}) -> Channel;
+i(user, #ch{cfg = #conf{user = User}}) -> User#user.username;
+i(user_who_performed_action, Ch) -> i(user, Ch);
+i(vhost, #ch{cfg = #conf{virtual_host = VHost}}) -> VHost;
+i(transactional, #ch{tx = Tx}) -> Tx =/= none;
+i(confirm, #ch{confirm_enabled = CE}) -> CE;
+i(name, State) -> name(State);
+i(consumer_count, #ch{consumer_mapping = CM}) -> maps:size(CM);
+i(messages_unconfirmed, #ch{unconfirmed = UC}) -> rabbit_confirms:size(UC);
+i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> ?QUEUE:len(UAMQ);
+i(messages_uncommitted, #ch{tx = {Msgs, _Acks}}) -> ?QUEUE:len(Msgs);
+i(messages_uncommitted, #ch{}) -> 0;
+i(acks_uncommitted, #ch{tx = {_Msgs, Acks}}) -> ack_len(Acks);
+i(acks_uncommitted, #ch{}) -> 0;
+i(pending_raft_commands, #ch{queue_states = QS}) ->
+ pending_raft_commands(QS);
+i(state, #ch{cfg = #conf{state = running}}) -> credit_flow:state();
+i(state, #ch{cfg = #conf{state = State}}) -> State;
+i(prefetch_count, #ch{cfg = #conf{consumer_prefetch = C}}) -> C;
+i(global_prefetch_count, #ch{limiter = Limiter}) ->
+ rabbit_limiter:get_prefetch_limit(Limiter);
+i(interceptors, #ch{interceptor_state = IState}) ->
+ IState;
+i(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(Item, _) ->
+ throw({bad_argument, Item}).
+
+pending_raft_commands(QStates) ->
+ Fun = fun(_, V, Acc) ->
+ case rabbit_queue_type:state_info(V) of
+ #{pending_raft_commands := P} ->
+ Acc + P;
+ _ ->
+ Acc
+ end
+ end,
+ rabbit_queue_type:fold_state(Fun, 0, QStates).
+
+name(#ch{cfg = #conf{conn_name = ConnName, channel = Channel}}) ->
+ list_to_binary(rabbit_misc:format("~s (~p)", [ConnName, Channel])).
+
+emit_stats(State) -> emit_stats(State, []).
+
+emit_stats(State, Extra) ->
+ [{reductions, Red} | Coarse0] = infos(?STATISTICS_KEYS, State),
+ %% First metric must be `idle_since` (if available), as expected by
+ %% `rabbit_mgmt_format:format_channel_stats`. This is a performance
+ %% optimisation that avoids traversing the whole list when only
+ %% one element has to be formatted.
+ rabbit_core_metrics:channel_stats(self(), Extra ++ Coarse0),
+ rabbit_core_metrics:channel_stats(reductions, self(), Red).
+
+erase_queue_stats(QName) ->
+ rabbit_core_metrics:channel_queue_down({self(), QName}),
+ erase({queue_stats, QName}),
+ [begin
+ rabbit_core_metrics:channel_queue_exchange_down({self(), QX}),
+ erase({queue_exchange_stats, QX})
+ end || {{queue_exchange_stats, QX = {QName0, _}}, _} <- get(),
+ QName0 =:= QName].
+
+get_vhost(#ch{cfg = #conf{virtual_host = VHost}}) -> VHost.
+
+get_user(#ch{cfg = #conf{user = User}}) -> User.
+
+delete_stats({queue_stats, QName}) ->
+ rabbit_core_metrics:channel_queue_down({self(), QName});
+delete_stats({exchange_stats, XName}) ->
+ rabbit_core_metrics:channel_exchange_down({self(), XName});
+delete_stats({queue_exchange_stats, QX}) ->
+ rabbit_core_metrics:channel_queue_exchange_down({self(), QX});
+delete_stats(_) ->
+ ok.
+
+put_operation_timeout() ->
+ put(channel_operation_timeout, ?CHANNEL_OPERATION_TIMEOUT).
+
+get_operation_timeout() ->
+ get(channel_operation_timeout).
+
+%% Refactored and exported to allow direct calls from the HTTP API,
+%% avoiding the usage of AMQP 0-9-1 from the management.
+
+handle_method(#'exchange.bind'{destination = DestinationNameBin,
+ source = SourceNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:add/3,
+ SourceNameBin, exchange, DestinationNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+handle_method(#'exchange.unbind'{destination = DestinationNameBin,
+ source = SourceNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:remove/3,
+ SourceNameBin, exchange, DestinationNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+handle_method(#'queue.unbind'{queue = QueueNameBin,
+ exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:remove/3,
+ ExchangeNameBin, queue, QueueNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+handle_method(#'queue.bind'{queue = QueueNameBin,
+ exchange = ExchangeNameBin,
+ routing_key = RoutingKey,
+ arguments = Arguments},
+ ConnPid, AuthzContext, _CollectorId, VHostPath, User) ->
+ binding_action(fun rabbit_binding:add/3,
+ ExchangeNameBin, queue, QueueNameBin,
+ RoutingKey, Arguments, VHostPath, ConnPid, AuthzContext, User);
+%% Note that all declares to these are effectively passive. If it
+%% exists it by definition has one consumer.
+handle_method(#'queue.declare'{queue = <<"amq.rabbitmq.reply-to",
+ _/binary>> = QueueNameBin},
+ _ConnPid, _AuthzContext, _CollectorPid, VHost, _User) ->
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ QueueName = rabbit_misc:r(VHost, queue, StrippedQueueNameBin),
+ case declare_fast_reply_to(StrippedQueueNameBin) of
+ exists -> {ok, QueueName, 0, 1};
+ not_found -> rabbit_amqqueue:not_found(QueueName)
+ end;
+handle_method(#'queue.declare'{queue = QueueNameBin,
+ passive = false,
+ durable = DurableDeclare,
+ exclusive = ExclusiveDeclare,
+ auto_delete = AutoDelete,
+ nowait = NoWait,
+ arguments = Args} = Declare,
+ ConnPid, AuthzContext, CollectorPid, VHostPath,
+ #user{username = Username} = User) ->
+ Owner = case ExclusiveDeclare of
+ true -> ConnPid;
+ false -> none
+ end,
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ Durable = DurableDeclare andalso not ExclusiveDeclare,
+ ActualNameBin = case StrippedQueueNameBin of
+ <<>> ->
+ case rabbit_amqqueue:is_server_named_allowed(Args) of
+ true ->
+ rabbit_guid:binary(rabbit_guid:gen_secure(), "amq.gen");
+ false ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "Cannot declare a server-named queue for type ~p",
+ [rabbit_amqqueue:get_queue_type(Args)])
+ end;
+ Other -> check_name('queue', Other)
+ end,
+ QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin),
+ check_configure_permitted(QueueName, User, AuthzContext),
+ rabbit_core_metrics:queue_declared(QueueName),
+ case rabbit_amqqueue:with(
+ QueueName,
+ fun (Q) -> ok = rabbit_amqqueue:assert_equivalence(
+ Q, Durable, AutoDelete, Args, Owner),
+ maybe_stat(NoWait, Q)
+ end) of
+ {ok, MessageCount, ConsumerCount} ->
+ {ok, QueueName, MessageCount, ConsumerCount};
+ {error, not_found} ->
+ %% enforce the limit for newly declared queues only
+ check_vhost_queue_limit(QueueName, VHostPath),
+ DlxKey = <<"x-dead-letter-exchange">>,
+ case rabbit_misc:r_arg(VHostPath, exchange, Args, DlxKey) of
+ undefined ->
+ ok;
+ {error, {invalid_type, Type}} ->
+ precondition_failed(
+ "invalid type '~s' for arg '~s' in ~s",
+ [Type, DlxKey, rabbit_misc:rs(QueueName)]);
+ DLX ->
+ check_read_permitted(QueueName, User, AuthzContext),
+ check_write_permitted(DLX, User, AuthzContext),
+ ok
+ end,
+ case rabbit_amqqueue:declare(QueueName, Durable, AutoDelete,
+ Args, Owner, Username) of
+ {new, Q} when ?is_amqqueue(Q) ->
+ %% We need to notify the reader within the channel
+ %% process so that we can be sure there are no
+ %% outstanding exclusive queues being declared as
+ %% the connection shuts down.
+ QPid = amqqueue:get_pid(Q),
+ ok = case {Owner, CollectorPid} of
+ {none, _} -> ok;
+ {_, none} -> ok; %% Supports call from mgmt API
+ _ -> rabbit_queue_collector:register(
+ CollectorPid, QPid)
+ end,
+ rabbit_core_metrics:queue_created(QueueName),
+ {ok, QueueName, 0, 0};
+ {existing, _Q} ->
+ %% must have been created between the stat and the
+ %% declare. Loop around again.
+ handle_method(Declare, ConnPid, AuthzContext, CollectorPid, VHostPath,
+ User);
+ {absent, Q, Reason} ->
+ rabbit_amqqueue:absent(Q, Reason);
+ {owner_died, _Q} ->
+ %% Presumably our own days are numbered since the
+ %% connection has died. Pretend the queue exists though,
+ %% just so nothing fails.
+ {ok, QueueName, 0, 0};
+ {protocol_error, ErrorType, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(ErrorType, Reason, ReasonArgs)
+ end;
+ {error, {absent, Q, Reason}} ->
+ rabbit_amqqueue:absent(Q, Reason)
+ end;
+handle_method(#'queue.declare'{queue = QueueNameBin,
+ nowait = NoWait,
+ passive = true},
+ ConnPid, _AuthzContext, _CollectorPid, VHostPath, _User) ->
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ QueueName = rabbit_misc:r(VHostPath, queue, StrippedQueueNameBin),
+ Fun = fun (Q0) ->
+ QStat = maybe_stat(NoWait, Q0),
+ {QStat, Q0}
+ end,
+ %% Note: no need to check if Q is an #amqqueue, with_or_die does it
+ {{ok, MessageCount, ConsumerCount}, Q} = rabbit_amqqueue:with_or_die(QueueName, Fun),
+ ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
+ {ok, QueueName, MessageCount, ConsumerCount};
+handle_method(#'queue.delete'{queue = QueueNameBin,
+ if_unused = IfUnused,
+ if_empty = IfEmpty},
+ ConnPid, AuthzContext, _CollectorPid, VHostPath,
+ User = #user{username = Username}) ->
+ StrippedQueueNameBin = strip_cr_lf(QueueNameBin),
+ QueueName = qbin_to_resource(StrippedQueueNameBin, VHostPath),
+
+ check_configure_permitted(QueueName, User, AuthzContext),
+ case rabbit_amqqueue:with(
+ QueueName,
+ fun (Q) ->
+ rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
+ rabbit_queue_type:delete(Q, IfUnused, IfEmpty, Username)
+ end,
+ fun (not_found) ->
+ {ok, 0};
+ ({absent, Q, crashed}) ->
+ _ = rabbit_classic_queue:delete_crashed(Q, Username),
+ {ok, 0};
+ ({absent, Q, stopped}) ->
+ _ = rabbit_classic_queue:delete_crashed(Q, Username),
+ {ok, 0};
+ ({absent, Q, Reason}) ->
+ rabbit_amqqueue:absent(Q, Reason)
+ end) of
+ {error, in_use} ->
+ precondition_failed("~s in use", [rabbit_misc:rs(QueueName)]);
+ {error, not_empty} ->
+ precondition_failed("~s not empty", [rabbit_misc:rs(QueueName)]);
+ {ok, Count} ->
+ {ok, Count};
+ {protocol_error, Type, Reason, ReasonArgs} ->
+ rabbit_misc:protocol_error(Type, Reason, ReasonArgs)
+ end;
+handle_method(#'exchange.delete'{exchange = ExchangeNameBin,
+ if_unused = IfUnused},
+ _ConnPid, AuthzContext, _CollectorPid, VHostPath,
+ User = #user{username = Username}) ->
+ StrippedExchangeNameBin = strip_cr_lf(ExchangeNameBin),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, StrippedExchangeNameBin),
+ check_not_default_exchange(ExchangeName),
+ check_exchange_deletion(ExchangeName),
+ check_configure_permitted(ExchangeName, User, AuthzContext),
+ case rabbit_exchange:delete(ExchangeName, IfUnused, Username) of
+ {error, not_found} ->
+ ok;
+ {error, in_use} ->
+ precondition_failed("~s in use", [rabbit_misc:rs(ExchangeName)]);
+ ok ->
+ ok
+ end;
+handle_method(#'queue.purge'{queue = QueueNameBin},
+ ConnPid, AuthzContext, _CollectorPid, VHostPath, User) ->
+ QueueName = qbin_to_resource(QueueNameBin, VHostPath),
+ check_read_permitted(QueueName, User, AuthzContext),
+ rabbit_amqqueue:with_exclusive_access_or_die(
+ QueueName, ConnPid,
+ fun (Q) ->
+ case rabbit_queue_type:purge(Q) of
+ {ok, _} = Res ->
+ Res;
+ {error, not_supported} ->
+ rabbit_misc:protocol_error(
+ not_implemented,
+ "queue.purge not supported by stream queues ~s",
+ [rabbit_misc:rs(amqqueue:get_name(Q))])
+ end
+ end);
+handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
+ type = TypeNameBin,
+ passive = false,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Args},
+ _ConnPid, AuthzContext, _CollectorPid, VHostPath,
+ #user{username = Username} = User) ->
+ CheckedType = rabbit_exchange:check_type(TypeNameBin),
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, strip_cr_lf(ExchangeNameBin)),
+ check_not_default_exchange(ExchangeName),
+ check_configure_permitted(ExchangeName, User, AuthzContext),
+ X = case rabbit_exchange:lookup(ExchangeName) of
+ {ok, FoundX} -> FoundX;
+ {error, not_found} ->
+ check_name('exchange', strip_cr_lf(ExchangeNameBin)),
+ AeKey = <<"alternate-exchange">>,
+ case rabbit_misc:r_arg(VHostPath, exchange, Args, AeKey) of
+ undefined -> ok;
+ {error, {invalid_type, Type}} ->
+ precondition_failed(
+ "invalid type '~s' for arg '~s' in ~s",
+ [Type, AeKey, rabbit_misc:rs(ExchangeName)]);
+ AName -> check_read_permitted(ExchangeName, User, AuthzContext),
+ check_write_permitted(AName, User, AuthzContext),
+ ok
+ end,
+ rabbit_exchange:declare(ExchangeName,
+ CheckedType,
+ Durable,
+ AutoDelete,
+ Internal,
+ Args,
+ Username)
+ end,
+ ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable,
+ AutoDelete, Internal, Args);
+handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
+ passive = true},
+ _ConnPid, _AuthzContext, _CollectorPid, VHostPath, _User) ->
+ ExchangeName = rabbit_misc:r(VHostPath, exchange, strip_cr_lf(ExchangeNameBin)),
+ check_not_default_exchange(ExchangeName),
+ _ = rabbit_exchange:lookup_or_die(ExchangeName).
+
+handle_deliver(CTag, Ack, Msgs, State) when is_list(Msgs) ->
+ lists:foldl(fun(Msg, S) ->
+ handle_deliver0(CTag, Ack, Msg, S)
+ end, State, Msgs);
+handle_deliver(CTag, Ack, Msg, State) ->
+ %% backwards compatibility clause
+ handle_deliver0(CTag, Ack, Msg, State).
+
+handle_deliver0(ConsumerTag, AckRequired,
+ Msg = {QName, QPid, _MsgId, Redelivered,
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}},
+ State = #ch{cfg = #conf{writer_pid = WriterPid,
+ writer_gc_threshold = GCThreshold},
+ next_tag = DeliveryTag,
+ queue_states = Qs}) ->
+ Deliver = #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ redelivered = Redelivered,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey},
+ case rabbit_queue_type:module(QName, Qs) of
+ {ok, rabbit_classic_queue} ->
+ ok = rabbit_writer:send_command_and_notify(
+ WriterPid, QPid, self(), Deliver, Content);
+ _ ->
+ ok = rabbit_writer:send_command(WriterPid, Deliver, Content)
+ end,
+ case GCThreshold of
+ undefined -> ok;
+ _ -> rabbit_basic:maybe_gc_large_msg(Content, GCThreshold)
+ end,
+ record_sent(deliver, ConsumerTag, AckRequired, Msg, State).
+
+handle_basic_get(WriterPid, DeliveryTag, NoAck, MessageCount,
+ Msg = {_QName, _QPid, _MsgId, Redelivered,
+ #basic_message{exchange_name = ExchangeName,
+ routing_keys = [RoutingKey | _CcRoutes],
+ content = Content}}, State) ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.get_ok'{delivery_tag = DeliveryTag,
+ redelivered = Redelivered,
+ exchange = ExchangeName#resource.name,
+ routing_key = RoutingKey,
+ message_count = MessageCount},
+ Content),
+ {noreply, record_sent(get, DeliveryTag, not(NoAck), Msg, State)}.
+
+init_tick_timer(State = #ch{tick_timer = undefined}) ->
+ {ok, Interval} = application:get_env(rabbit, channel_tick_interval),
+ State#ch{tick_timer = erlang:send_after(Interval, self(), tick)};
+init_tick_timer(State) ->
+ State.
+
+reset_tick_timer(State) ->
+ State#ch{tick_timer = undefined}.
+
+maybe_cancel_tick_timer(#ch{tick_timer = undefined} = State) ->
+ State;
+maybe_cancel_tick_timer(#ch{tick_timer = TRef,
+ unacked_message_q = UMQ} = State) ->
+ case ?QUEUE:len(UMQ) of
+ 0 ->
+ %% we can only cancel the tick timer if the unacked messages
+ %% queue is empty.
+ _ = erlang:cancel_timer(TRef),
+ State#ch{tick_timer = undefined};
+ _ ->
+ %% let the timer continue
+ State
+ end.
+
+now_millis() ->
+ erlang:monotonic_time(millisecond).
+
+get_operation_timeout_and_deadline() ->
+ % NB: can't use get_operation_timeout because
+ % this code may not be running via the channel Pid
+ Timeout = ?CHANNEL_OPERATION_TIMEOUT,
+ Deadline = now_millis() + Timeout,
+ {Timeout, Deadline}.
+
+queue_fold(Fun, Init, Q) ->
+ case ?QUEUE:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+evaluate_consumer_timeout(State0 = #ch{cfg = #conf{channel = Channel,
+ consumer_timeout = Timeout},
+ unacked_message_q = UAMQ}) ->
+ Now = os:system_time(millisecond),
+ case ?QUEUE:peek(UAMQ) of
+ {value, #pending_ack{delivery_tag = ConsumerTag,
+ delivered_at = Time}}
+ when is_integer(Timeout)
+ andalso Time < Now - Timeout ->
+ rabbit_log_channel:warning("Consumer ~s on channel ~w has timed out "
+ "waiting on consumer acknowledgement. Timeout used: ~p ms",
+ [rabbit_data_coercion:to_binary(ConsumerTag),
+ Channel, Timeout]),
+ Ex = rabbit_misc:amqp_error(precondition_failed,
+ "consumer ack timed out on channel ~w",
+ [Channel], none),
+ handle_exception(Ex, State0);
+ _ ->
+ {noreply, State0}
+ end.
+
+handle_queue_actions(Actions, #ch{} = State0) ->
+ WriterPid = State0#ch.cfg#conf.writer_pid,
+ lists:foldl(
+ fun ({send_credit_reply, Avail}, S0) ->
+ ok = rabbit_writer:send_command(WriterPid,
+ #'basic.credit_ok'{available = Avail}),
+ S0;
+ ({send_drained, {CTag, Credit}}, S0) ->
+ ok = rabbit_writer:send_command(
+ WriterPid,
+ #'basic.credit_drained'{consumer_tag = CTag,
+ credit_drained = Credit}),
+ S0;
+ ({settled, QRef, MsgSeqNos}, S0) ->
+ confirm(MsgSeqNos, QRef, S0);
+ ({rejected, _QRef, MsgSeqNos}, S0) ->
+ {U, Rej} =
+ lists:foldr(
+ fun(SeqNo, {U1, Acc}) ->
+ case rabbit_confirms:reject(SeqNo, U1) of
+ {ok, MX, U2} ->
+ {U2, [MX | Acc]};
+ {error, not_found} ->
+ {U1, Acc}
+ end
+ end, {S0#ch.unconfirmed, []}, MsgSeqNos),
+ S = S0#ch{unconfirmed = U},
+ record_rejects(Rej, S);
+ ({deliver, CTag, AckRequired, Msgs}, S0) ->
+ handle_deliver(CTag, AckRequired, Msgs, S0);
+ ({queue_down, QRef}, S0) ->
+ handle_consuming_queue_down_or_eol(QRef, S0)
+
+ end, State0, Actions).
+
+find_queue_name_from_pid(Pid, QStates) when is_pid(Pid) ->
+ Fun = fun(K, _V, undefined) ->
+ case rabbit_amqqueue:lookup(K) of
+ {error, not_found} ->
+ undefined;
+ {ok, Q} ->
+ Pids = get_queue_pids(Q),
+ case lists:member(Pid, Pids) of
+ true ->
+ K;
+ false ->
+ undefined
+ end
+ end;
+ (_K, _V, Acc) ->
+ Acc
+ end,
+ rabbit_queue_type:fold_state(Fun, undefined, QStates).
+
+get_queue_pids(Q) when ?amqqueue_is_quorum(Q) ->
+ [amqqueue:get_leader(Q)];
+get_queue_pids(Q) ->
+ [amqqueue:get_pid(Q) | amqqueue:get_slave_pids(Q)].
+
+find_queue_name_from_quorum_name(Name, QStates) ->
+ Fun = fun(K, _V, undefined) ->
+ {ok, Q} = rabbit_amqqueue:lookup(K),
+ case amqqueue:get_pid(Q) of
+ {Name, _} ->
+ amqqueue:get_name(Q);
+ _ ->
+ undefined
+ end
+ end,
+ rabbit_queue_type:fold_state(Fun, undefined, QStates).
diff --git a/deps/rabbit/src/rabbit_channel_interceptor.erl b/deps/rabbit/src/rabbit_channel_interceptor.erl
new file mode 100644
index 0000000000..c40b437f10
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_interceptor.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_interceptor).
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([init/1, intercept_in/3]).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type(method_name() :: rabbit_framing:amqp_method_name()).
+-type(original_method() :: rabbit_framing:amqp_method_record()).
+-type(processed_method() :: rabbit_framing:amqp_method_record()).
+-type(original_content() :: rabbit_types:maybe(rabbit_types:content())).
+-type(processed_content() :: rabbit_types:maybe(rabbit_types:content())).
+-type(interceptor_state() :: term()).
+
+-callback description() -> [proplists:property()].
+%% Derive some initial state from the channel. This will be passed back
+%% as the third argument of intercept/3.
+-callback init(rabbit_channel:channel()) -> interceptor_state().
+-callback intercept(original_method(), original_content(),
+ interceptor_state()) ->
+ {processed_method(), processed_content()} |
+ rabbit_misc:channel_or_connection_exit().
+-callback applies_to() -> list(method_name()).
+
+added_to_rabbit_registry(_Type, _ModuleName) ->
+ rabbit_channel:refresh_interceptors().
+removed_from_rabbit_registry(_Type) ->
+ rabbit_channel:refresh_interceptors().
+
+init(Ch) ->
+ Mods = [M || {_, M} <- rabbit_registry:lookup_all(channel_interceptor)],
+ check_no_overlap(Mods),
+ [{Mod, Mod:init(Ch)} || Mod <- Mods].
+
+check_no_overlap(Mods) ->
+ check_no_overlap1([sets:from_list(Mod:applies_to()) || Mod <- Mods]).
+
+%% Check no non-empty pairwise intersection in a list of sets
+check_no_overlap1(Sets) ->
+ lists:foldl(fun(Set, Union) ->
+ Is = sets:intersection(Set, Union),
+ case sets:size(Is) of
+ 0 -> ok;
+ _ ->
+ internal_error("Interceptor: more than one "
+ "module handles ~p~n", [Is])
+ end,
+ sets:union(Set, Union)
+ end,
+ sets:new(),
+ Sets),
+ ok.
+
+intercept_in(M, C, Mods) ->
+ lists:foldl(fun({Mod, ModState}, {M1, C1}) ->
+ call_module(Mod, ModState, M1, C1)
+ end,
+ {M, C},
+ Mods).
+
+call_module(Mod, St, M, C) ->
+ % this little dance is because Mod might be unloaded at any point
+ case (catch {ok, Mod:intercept(M, C, St)}) of
+ {ok, R} -> validate_response(Mod, M, C, R);
+ {'EXIT', {undef, [{Mod, intercept, _, _} | _]}} -> {M, C}
+ end.
+
+validate_response(Mod, M1, C1, R = {M2, C2}) ->
+ case {validate_method(M1, M2), validate_content(C1, C2)} of
+ {true, true} -> R;
+ {false, _} ->
+ internal_error("Interceptor: ~p expected to return "
+ "method: ~p but returned: ~p",
+ [Mod, rabbit_misc:method_record_type(M1),
+ rabbit_misc:method_record_type(M2)]);
+ {_, false} ->
+ internal_error("Interceptor: ~p expected to return "
+ "content iff content is provided but "
+ "content in = ~p; content out = ~p",
+ [Mod, C1, C2])
+ end.
+
+validate_method(M, M2) ->
+ rabbit_misc:method_record_type(M) =:= rabbit_misc:method_record_type(M2).
+
+validate_content(none, none) -> true;
+validate_content(#content{}, #content{}) -> true;
+validate_content(_, _) -> false.
+
+%% keep dialyzer happy
+-spec internal_error(string(), [any()]) -> no_return().
+internal_error(Format, Args) ->
+ rabbit_misc:protocol_error(internal_error, Format, Args).
diff --git a/deps/rabbit/src/rabbit_channel_sup.erl b/deps/rabbit/src/rabbit_channel_sup.erl
new file mode 100644
index 0000000000..0d405ad3a7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_sup.erl
@@ -0,0 +1,92 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_sup).
+
+%% Supervises processes that implement AMQP 0-9-1 channels:
+%%
+%% * Channel process itself
+%% * Network writer (for network connections)
+%% * Limiter (handles channel QoS and flow control)
+%%
+%% Every rabbit_channel_sup is supervised by rabbit_channel_sup_sup.
+%%
+%% See also rabbit_channel, rabbit_writer, rabbit_limiter.
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([start_link_args/0]).
+
+-type start_link_args() ::
+ {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), pid(), string(), rabbit_types:protocol(),
+ rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+ pid()} |
+ {'direct', rabbit_channel:channel_number(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid()}.
+
+-define(FAIR_WAIT, 70000).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(start_link_args()) -> {'ok', pid(), {pid(), any()}}.
+
+start_link({tcp, Sock, Channel, FrameMax, ReaderPid, ConnName, Protocol, User,
+ VHost, Capabilities, Collector}) ->
+ {ok, SupPid} = supervisor2:start_link(
+ ?MODULE, {tcp, Sock, Channel, FrameMax,
+ ReaderPid, Protocol, {ConnName, Channel}}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ [WriterPid] = supervisor2:find_child(SupPid, writer),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_channel, start_link,
+ [Channel, ReaderPid, WriterPid, ReaderPid, ConnName,
+ Protocol, User, VHost, Capabilities, Collector,
+ LimiterPid]},
+ intrinsic, ?FAIR_WAIT, worker, [rabbit_channel]}),
+ {ok, AState} = rabbit_command_assembler:init(Protocol),
+ {ok, SupPid, {ChannelPid, AState}};
+start_link({direct, Channel, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, Collector, AmqpParams}) ->
+ {ok, SupPid} = supervisor2:start_link(
+ ?MODULE, {direct, {ConnName, Channel}}),
+ [LimiterPid] = supervisor2:find_child(SupPid, limiter),
+ {ok, ChannelPid} =
+ supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_channel, start_link,
+ [Channel, ClientChannelPid, ClientChannelPid, ConnPid,
+ ConnName, Protocol, User, VHost, Capabilities, Collector,
+ LimiterPid, AmqpParams]},
+ intrinsic, ?FAIR_WAIT, worker, [rabbit_channel]}),
+ {ok, SupPid, {ChannelPid, none}}.
+
+%%----------------------------------------------------------------------------
+
+init(Type) ->
+ ?LG_PROCESS_TYPE(channel_sup),
+ {ok, {{one_for_all, 0, 1}, child_specs(Type)}}.
+
+child_specs({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol, Identity}) ->
+ [{writer, {rabbit_writer, start_link,
+ [Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, true]},
+ intrinsic, ?FAIR_WAIT, worker, [rabbit_writer]}
+ | child_specs({direct, Identity})];
+child_specs({direct, Identity}) ->
+ [{limiter, {rabbit_limiter, start_link, [Identity]},
+ transient, ?FAIR_WAIT, worker, [rabbit_limiter]}].
diff --git a/deps/rabbit/src/rabbit_channel_sup_sup.erl b/deps/rabbit/src/rabbit_channel_sup_sup.erl
new file mode 100644
index 0000000000..72cf38d6c8
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_sup_sup.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_sup_sup).
+
+%% Supervisor for AMQP 0-9-1 channels. Every AMQP 0-9-1 connection has
+%% one of these.
+%%
+%% See also rabbit_channel_sup, rabbit_connection_helper_sup, rabbit_reader.
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_channel/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+-spec start_channel(pid(), rabbit_channel_sup:start_link_args()) ->
+ {'ok', pid(), {pid(), any()}}.
+
+start_channel(Pid, Args) ->
+ supervisor2:start_child(Pid, [Args]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ ?LG_PROCESS_TYPE(channel_sup_sup),
+ {ok, {{simple_one_for_one, 0, 1},
+ [{channel_sup, {rabbit_channel_sup, start_link, []},
+ temporary, infinity, supervisor, [rabbit_channel_sup]}]}}.
diff --git a/deps/rabbit/src/rabbit_channel_tracking.erl b/deps/rabbit/src/rabbit_channel_tracking.erl
new file mode 100644
index 0000000000..42ab664a06
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_tracking.erl
@@ -0,0 +1,291 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_tracking).
+
+%% Abstracts away how tracked connection records are stored
+%% and queried.
+%%
+%% See also:
+%%
+%% * rabbit_channel_tracking_handler
+%% * rabbit_reader
+%% * rabbit_event
+-behaviour(rabbit_tracking).
+
+-export([boot/0,
+ update_tracked/1,
+ handle_cast/1,
+ register_tracked/1,
+ unregister_tracked/1,
+ count_tracked_items_in/1,
+ clear_tracking_tables/0,
+ shutdown_tracked_items/2]).
+
+-export([list/0, list_of_user/1, list_on_node/1,
+ tracked_channel_table_name_for/1,
+ tracked_channel_per_user_table_name_for/1,
+ get_all_tracked_channel_table_names_for_node/1,
+ delete_tracked_channel_user_entry/1]).
+
+-include_lib("rabbit.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+%%
+%% API
+%%
+
+%% Sets up and resets channel tracking tables for this node.
+-spec boot() -> ok.
+
+boot() ->
+ ensure_tracked_channels_table_for_this_node(),
+ rabbit_log:info("Setting up a table for channel tracking on this node: ~p",
+ [tracked_channel_table_name_for(node())]),
+ ensure_per_user_tracked_channels_table_for_node(),
+ rabbit_log:info("Setting up a table for channel tracking on this node: ~p",
+ [tracked_channel_per_user_table_name_for(node())]),
+ clear_tracking_tables(),
+ ok.
+
+-spec update_tracked(term()) -> ok.
+
+update_tracked(Event) ->
+ spawn(?MODULE, handle_cast, [Event]),
+ ok.
+
+%% Asynchronously handle update events
+-spec handle_cast(term()) -> ok.
+
+handle_cast({channel_created, Details}) ->
+ ThisNode = node(),
+ case node(pget(pid, Details)) of
+ ThisNode ->
+ TrackedCh = #tracked_channel{id = TrackedChId} =
+ tracked_channel_from_channel_created_event(Details),
+ try
+ register_tracked(TrackedCh)
+ catch
+ error:{no_exists, _} ->
+ Msg = "Could not register channel ~p for tracking, "
+ "its table is not ready yet or the channel terminated prematurely",
+ rabbit_log_connection:warning(Msg, [TrackedChId]),
+ ok;
+ error:Err ->
+ Msg = "Could not register channel ~p for tracking: ~p",
+ rabbit_log_connection:warning(Msg, [TrackedChId, Err]),
+ ok
+ end;
+ _OtherNode ->
+ %% ignore
+ ok
+ end;
+handle_cast({channel_closed, Details}) ->
+ %% channel has terminated, unregister iff local
+ case get_tracked_channel_by_pid(pget(pid, Details)) of
+ [#tracked_channel{name = Name}] ->
+ unregister_tracked(rabbit_tracking:id(node(), Name));
+ _Other -> ok
+ end;
+handle_cast({connection_closed, ConnDetails}) ->
+ ThisNode= node(),
+ ConnPid = pget(pid, ConnDetails),
+
+ case pget(node, ConnDetails) of
+ ThisNode ->
+ TrackedChs = get_tracked_channels_by_connection_pid(ConnPid),
+ rabbit_log_connection:info(
+ "Closing all channels from connection '~p' "
+ "because it has been closed", [pget(name, ConnDetails)]),
+ %% Shutting down channels will take care of unregistering the
+ %% corresponding tracking.
+ shutdown_tracked_items(TrackedChs, undefined),
+ ok;
+ _DifferentNode ->
+ ok
+ end;
+handle_cast({user_deleted, Details}) ->
+ Username = pget(name, Details),
+ %% Schedule user entry deletion, allowing time for connections to close
+ _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
+ delete_tracked_channel_user_entry, [Username]),
+ ok;
+handle_cast({node_deleted, Details}) ->
+ Node = pget(node, Details),
+ rabbit_log_connection:info(
+ "Node '~s' was removed from the cluster, deleting"
+ " its channel tracking tables...", [Node]),
+ delete_tracked_channels_table_for_node(Node),
+ delete_per_user_tracked_channels_table_for_node(Node).
+
+-spec register_tracked(rabbit_types:tracked_channel()) -> ok.
+-dialyzer([{nowarn_function, [register_tracked/1]}, race_conditions]).
+
+register_tracked(TrackedCh =
+ #tracked_channel{node = Node, name = Name, username = Username}) ->
+ ChId = rabbit_tracking:id(Node, Name),
+ TableName = tracked_channel_table_name_for(Node),
+ PerUserChTableName = tracked_channel_per_user_table_name_for(Node),
+ %% upsert
+ case mnesia:dirty_read(TableName, ChId) of
+ [] ->
+ mnesia:dirty_write(TableName, TrackedCh),
+ mnesia:dirty_update_counter(PerUserChTableName, Username, 1);
+ [#tracked_channel{}] ->
+ ok
+ end,
+ ok.
+
+-spec unregister_tracked(rabbit_types:tracked_channel_id()) -> ok.
+
+unregister_tracked(ChId = {Node, _Name}) when Node =:= node() ->
+ TableName = tracked_channel_table_name_for(Node),
+ PerUserChannelTableName = tracked_channel_per_user_table_name_for(Node),
+ case mnesia:dirty_read(TableName, ChId) of
+ [] -> ok;
+ [#tracked_channel{username = Username}] ->
+ mnesia:dirty_update_counter(PerUserChannelTableName, Username, -1),
+ mnesia:dirty_delete(TableName, ChId)
+ end.
+
+-spec count_tracked_items_in({atom(), rabbit_types:username()}) -> non_neg_integer().
+
+count_tracked_items_in({user, Username}) ->
+ rabbit_tracking:count_tracked_items(
+ fun tracked_channel_per_user_table_name_for/1,
+ #tracked_channel_per_user.channel_count, Username,
+ "channels in vhost").
+
+-spec clear_tracking_tables() -> ok.
+
+clear_tracking_tables() ->
+ clear_tracked_channel_tables_for_this_node(),
+ ok.
+
+-spec shutdown_tracked_items(list(), term()) -> ok.
+
+shutdown_tracked_items(TrackedItems, _Args) ->
+ close_channels(TrackedItems).
+
+%% helper functions
+-spec list() -> [rabbit_types:tracked_channel()].
+
+list() ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = tracked_channel_table_name_for(Node),
+ Acc ++ mnesia:dirty_match_object(Tab, #tracked_channel{_ = '_'})
+ end, [], rabbit_nodes:all_running()).
+
+-spec list_of_user(rabbit_types:username()) -> [rabbit_types:tracked_channel()].
+
+list_of_user(Username) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_channel_table_name_for/1,
+ #tracked_channel{username = Username, _ = '_'}).
+
+-spec list_on_node(node()) -> [rabbit_types:tracked_channel()].
+
+list_on_node(Node) ->
+ try mnesia:dirty_match_object(
+ tracked_channel_table_name_for(Node),
+ #tracked_channel{_ = '_'})
+ catch exit:{aborted, {no_exists, _}} -> []
+ end.
+
+-spec tracked_channel_table_name_for(node()) -> atom().
+
+tracked_channel_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format("tracked_channel_on_node_~s", [Node])).
+
+-spec tracked_channel_per_user_table_name_for(node()) -> atom().
+
+tracked_channel_per_user_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format(
+ "tracked_channel_table_per_user_on_node_~s", [Node])).
+
+%% internal
+ensure_tracked_channels_table_for_this_node() ->
+ ensure_tracked_channels_table_for_node(node()).
+
+ensure_per_user_tracked_channels_table_for_node() ->
+ ensure_per_user_tracked_channels_table_for_node(node()).
+
+%% Create tables
+ensure_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_channel},
+ {attributes, record_info(fields, tracked_channel)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a tracked channel table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+ensure_per_user_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_per_user_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_channel_per_user},
+ {attributes, record_info(fields, tracked_channel_per_user)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a per-user tracked channel table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+clear_tracked_channel_tables_for_this_node() ->
+ [rabbit_tracking:clear_tracking_table(T)
+ || T <- get_all_tracked_channel_table_names_for_node(node())].
+
+delete_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node, "tracked channel").
+
+delete_per_user_tracked_channels_table_for_node(Node) ->
+ TableName = tracked_channel_per_user_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node,
+ "per-user tracked channels").
+
+get_all_tracked_channel_table_names_for_node(Node) ->
+ [tracked_channel_table_name_for(Node),
+ tracked_channel_per_user_table_name_for(Node)].
+
+get_tracked_channels_by_connection_pid(ConnPid) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_channel_table_name_for/1,
+ #tracked_channel{connection = ConnPid, _ = '_'}).
+
+get_tracked_channel_by_pid(ChPid) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_channel_table_name_for/1,
+ #tracked_channel{pid = ChPid, _ = '_'}).
+
+delete_tracked_channel_user_entry(Username) ->
+ rabbit_tracking:delete_tracked_entry(
+ {rabbit_auth_backend_internal, exists, [Username]},
+ fun tracked_channel_per_user_table_name_for/1,
+ Username).
+
+tracked_channel_from_channel_created_event(ChannelDetails) ->
+ Node = node(ChPid = pget(pid, ChannelDetails)),
+ Name = pget(name, ChannelDetails),
+ #tracked_channel{
+ id = rabbit_tracking:id(Node, Name),
+ name = Name,
+ node = Node,
+ vhost = pget(vhost, ChannelDetails),
+ pid = ChPid,
+ connection = pget(connection, ChannelDetails),
+ username = pget(user, ChannelDetails)}.
+
+close_channels(TrackedChannels = [#tracked_channel{}|_]) ->
+ [rabbit_channel:shutdown(ChPid)
+ || #tracked_channel{pid = ChPid} <- TrackedChannels],
+ ok;
+close_channels(_TrackedChannels = []) -> ok.
diff --git a/deps/rabbit/src/rabbit_channel_tracking_handler.erl b/deps/rabbit/src/rabbit_channel_tracking_handler.erl
new file mode 100644
index 0000000000..0cbe02f39e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_channel_tracking_handler.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_tracking_handler).
+
+%% This module keeps track of channel creation and termination events
+%% on its local node. Similar to the rabbit_connection_tracking_handler,
+%% the primary goal here is to decouple channel tracking from rabbit_reader
+%% and isolate channel tracking to its own process to avoid blocking connection
+%% creation events. Additionaly, creation events are also non-blocking in that
+%% they spawn a short-live process for updating the tracking tables in realtime.
+%%
+%% Events from other nodes are ignored.
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include_lib("rabbit.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "channel tracking event handler"},
+ {mfa, {gen_event, add_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {cleanup, {gen_event, delete_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {requires, [channel_tracking]},
+ {enables, recovery}]}).
+
+%%
+%% API
+%%
+
+init([]) ->
+ {ok, []}.
+
+handle_event(#event{type = channel_created, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({channel_created, Details}),
+ {ok, State};
+handle_event(#event{type = channel_closed, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({channel_closed, Details}),
+ {ok, State};
+handle_event(#event{type = connection_closed, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({connection_closed, Details}),
+ {ok, State};
+handle_event(#event{type = user_deleted, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({user_deleted, Details}),
+ {ok, State};
+%% A node had been deleted from the cluster.
+handle_event(#event{type = node_deleted, props = Details}, State) ->
+ ok = rabbit_channel_tracking:update_tracked({node_deleted, Details}),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_classic_queue.erl b/deps/rabbit/src/rabbit_classic_queue.erl
new file mode 100644
index 0000000000..e53c0aecc2
--- /dev/null
+++ b/deps/rabbit/src/rabbit_classic_queue.erl
@@ -0,0 +1,527 @@
+-module(rabbit_classic_queue).
+-behaviour(rabbit_queue_type).
+
+-include("amqqueue.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-record(msg_status, {pending :: [pid()],
+ confirmed = [] :: [pid()]}).
+
+-record(?MODULE, {pid :: undefined | pid(), %% the current master pid
+ qref :: term(), %% TODO
+ unconfirmed = #{} ::
+ #{non_neg_integer() => #msg_status{}}}).
+-define(STATE, ?MODULE).
+
+-opaque state() :: #?STATE{}.
+
+-export_type([state/0]).
+
+-export([
+ is_enabled/0,
+ declare/2,
+ delete/4,
+ is_recoverable/1,
+ recover/2,
+ purge/1,
+ policy_changed/1,
+ stat/1,
+ init/1,
+ close/1,
+ update/2,
+ consume/3,
+ cancel/5,
+ handle_event/2,
+ deliver/2,
+ settle/4,
+ credit/4,
+ dequeue/4,
+ info/2,
+ state_info/1,
+ capabilities/0
+ ]).
+
+-export([delete_crashed/1,
+ delete_crashed/2,
+ delete_crashed_internal/2]).
+
+-export([confirm_to_sender/3,
+ send_rejection/3,
+ send_queue_event/3]).
+
+is_enabled() -> true.
+
+declare(Q, Node) when ?amqqueue_is_classic(Q) ->
+ QName = amqqueue:get_name(Q),
+ VHost = amqqueue:get_vhost(Q),
+ Node1 = case Node of
+ {ignore_location, Node0} ->
+ Node0;
+ _ ->
+ case rabbit_queue_master_location_misc:get_location(Q) of
+ {ok, Node0} -> Node0;
+ _ -> Node
+ end
+ end,
+ Node1 = rabbit_mirror_queue_misc:initial_queue_node(Q, Node1),
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost, Node1) of
+ {ok, _} ->
+ gen_server2:call(
+ rabbit_amqqueue_sup_sup:start_queue_process(Node1, Q, declare),
+ {init, new}, infinity);
+ {error, Error} ->
+ {protocol_error, internal_error, "Cannot declare a queue '~s' on node '~s': ~255p",
+ [rabbit_misc:rs(QName), Node1, Error]}
+ end.
+
+delete(Q, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q) ->
+ case wait_for_promoted_or_stopped(Q) of
+ {promoted, Q1} ->
+ QPid = amqqueue:get_pid(Q1),
+ delegate:invoke(QPid, {gen_server2, call,
+ [{delete, IfUnused, IfEmpty, ActingUser},
+ infinity]});
+ {stopped, Q1} ->
+ #resource{name = Name, virtual_host = Vhost} = amqqueue:get_name(Q1),
+ case IfEmpty of
+ true ->
+ rabbit_log:error("Queue ~s in vhost ~s has its master node down and "
+ "no mirrors available or eligible for promotion. "
+ "The queue may be non-empty. "
+ "Refusing to force-delete.",
+ [Name, Vhost]),
+ {error, not_empty};
+ false ->
+ rabbit_log:warning("Queue ~s in vhost ~s has its master node is down and "
+ "no mirrors available or eligible for promotion. "
+ "Forcing queue deletion.",
+ [Name, Vhost]),
+ delete_crashed_internal(Q1, ActingUser),
+ {ok, 0}
+ end;
+ {error, not_found} ->
+ %% Assume the queue was deleted
+ {ok, 0}
+ end.
+
+is_recoverable(Q) when ?is_amqqueue(Q) ->
+ Node = node(),
+ Node =:= node(amqqueue:get_pid(Q)) andalso
+ %% Terminations on node down will not remove the rabbit_queue
+ %% record if it is a mirrored queue (such info is now obtained from
+ %% the policy). Thus, we must check if the local pid is alive
+ %% - if the record is present - in order to restart.
+ (mnesia:read(rabbit_queue, amqqueue:get_name(Q), read) =:= []
+ orelse not rabbit_mnesia:is_process_alive(amqqueue:get_pid(Q))).
+
+recover(VHost, Queues) ->
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ %% We rely on BQ:start/1 returning the recovery terms in the same
+ %% order as the supplied queue names, so that we can zip them together
+ %% for further processing in recover_durable_queues.
+ {ok, OrderedRecoveryTerms} =
+ BQ:start(VHost, [amqqueue:get_name(Q) || Q <- Queues]),
+ case rabbit_amqqueue_sup_sup:start_for_vhost(VHost) of
+ {ok, _} ->
+ RecoveredQs = recover_durable_queues(lists:zip(Queues,
+ OrderedRecoveryTerms)),
+ RecoveredNames = [amqqueue:get_name(Q) || Q <- RecoveredQs],
+ FailedQueues = [Q || Q <- Queues,
+ not lists:member(amqqueue:get_name(Q), RecoveredNames)],
+ {RecoveredQs, FailedQueues};
+ {error, Reason} ->
+ rabbit_log:error("Failed to start queue supervisor for vhost '~s': ~s", [VHost, Reason]),
+ throw({error, Reason})
+ end.
+
+-spec policy_changed(amqqueue:amqqueue()) -> ok.
+policy_changed(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ gen_server2:cast(QPid, policy_changed).
+
+stat(Q) ->
+ delegate:invoke(amqqueue:get_pid(Q),
+ {gen_server2, call, [stat, infinity]}).
+
+-spec init(amqqueue:amqqueue()) -> state().
+init(Q) when ?amqqueue_is_classic(Q) ->
+ QName = amqqueue:get_name(Q),
+ #?STATE{pid = amqqueue:get_pid(Q),
+ qref = QName}.
+
+-spec close(state()) -> ok.
+close(_State) ->
+ ok.
+
+-spec update(amqqueue:amqqueue(), state()) -> state().
+update(Q, #?STATE{pid = Pid} = State) when ?amqqueue_is_classic(Q) ->
+ case amqqueue:get_pid(Q) of
+ Pid ->
+ State;
+ NewPid ->
+ %% master pid is different, update
+ State#?STATE{pid = NewPid}
+ end.
+
+consume(Q, Spec, State) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ QRef = amqqueue:get_name(Q),
+ #{no_ack := NoAck,
+ channel_pid := ChPid,
+ limiter_pid := LimiterPid,
+ limiter_active := LimiterActive,
+ prefetch_count := ConsumerPrefetchCount,
+ consumer_tag := ConsumerTag,
+ exclusive_consume := ExclusiveConsume,
+ args := Args,
+ ok_msg := OkMsg,
+ acting_user := ActingUser} = Spec,
+ case delegate:invoke(QPid,
+ {gen_server2, call,
+ [{basic_consume, NoAck, ChPid, LimiterPid,
+ LimiterActive, ConsumerPrefetchCount, ConsumerTag,
+ ExclusiveConsume, Args, OkMsg, ActingUser},
+ infinity]}) of
+ ok ->
+ %% ask the host process to monitor this pid
+ %% TODO: track pids as they change
+ {ok, State#?STATE{pid = QPid}, [{monitor, QPid, QRef}]};
+ Err ->
+ Err
+ end.
+
+cancel(Q, ConsumerTag, OkMsg, ActingUser, State) ->
+ QPid = amqqueue:get_pid(Q),
+ case delegate:invoke(QPid, {gen_server2, call,
+ [{basic_cancel, self(), ConsumerTag,
+ OkMsg, ActingUser}, infinity]}) of
+ ok ->
+ {ok, State};
+ Err -> Err
+ end.
+
+-spec settle(rabbit_queue_type:settle_op(), rabbit_types:ctag(),
+ [non_neg_integer()], state()) ->
+ {state(), rabbit_queue_type:actions()}.
+settle(complete, _CTag, MsgIds, State) ->
+ Pid = State#?STATE.pid,
+ delegate:invoke_no_result(Pid,
+ {gen_server2, cast, [{ack, MsgIds, self()}]}),
+ {State, []};
+settle(Op, _CTag, MsgIds, State) ->
+ ChPid = self(),
+ ok = delegate:invoke_no_result(State#?STATE.pid,
+ {gen_server2, cast,
+ [{reject, Op == requeue, MsgIds, ChPid}]}),
+ {State, []}.
+
+credit(CTag, Credit, Drain, State) ->
+ ChPid = self(),
+ delegate:invoke_no_result(State#?STATE.pid,
+ {gen_server2, cast,
+ [{credit, ChPid, CTag, Credit, Drain}]}),
+ {State, []}.
+
+handle_event({confirm, MsgSeqNos, Pid}, #?STATE{qref = QRef,
+ unconfirmed = U0} = State) ->
+ %% confirms should never result in rejections
+ {Unconfirmed, ConfirmedSeqNos, []} =
+ settle_seq_nos(MsgSeqNos, Pid, U0, confirm),
+ Actions = [{settled, QRef, ConfirmedSeqNos}],
+ %% handle confirm event from queues
+ %% in this case the classic queue should track each individual publish and
+ %% the processes involved and only emit a settle action once they have all
+ %% been received (or DOWN has been received).
+ %% Hence this part of the confirm logic is queue specific.
+ {ok, State#?STATE{unconfirmed = Unconfirmed}, Actions};
+handle_event({reject_publish, SeqNo, _QPid},
+ #?STATE{qref = QRef,
+ unconfirmed = U0} = State) ->
+ %% It does not matter which queue rejected the message,
+ %% if any queue did, it should not be confirmed.
+ {U, Rejected} = reject_seq_no(SeqNo, U0),
+ Actions = [{rejected, QRef, Rejected}],
+ {ok, State#?STATE{unconfirmed = U}, Actions};
+handle_event({down, Pid, Info}, #?STATE{qref = QRef,
+ pid = MasterPid,
+ unconfirmed = U0} = State0) ->
+ Actions0 = case Pid =:= MasterPid of
+ true ->
+ [{queue_down, QRef}];
+ false ->
+ []
+ end,
+ case rabbit_misc:is_abnormal_exit(Info) of
+ false when Info =:= normal andalso Pid == MasterPid ->
+ %% queue was deleted and masterpid is down
+ eol;
+ false ->
+ %% this assumes the mirror isn't part of the active set
+ MsgSeqNos = maps:keys(
+ maps:filter(fun (_, #msg_status{pending = Pids}) ->
+ lists:member(Pid, Pids)
+ end, U0)),
+ {Unconfirmed, Settled, Rejected} =
+ settle_seq_nos(MsgSeqNos, Pid, U0, down),
+ Actions = settlement_action(
+ settled, QRef, Settled,
+ settlement_action(rejected, QRef, Rejected, Actions0)),
+ {ok, State0#?STATE{unconfirmed = Unconfirmed}, Actions};
+ true ->
+ %% any abnormal exit should be considered a full reject of the
+ %% oustanding message ids - If the message didn't get to all
+ %% mirrors we have to assume it will never get there
+ MsgIds = maps:fold(
+ fun (SeqNo, Status, Acc) ->
+ case lists:member(Pid, Status#msg_status.pending) of
+ true ->
+ [SeqNo | Acc];
+ false ->
+ Acc
+ end
+ end, [], U0),
+ U = maps:without(MsgIds, U0),
+ {ok, State0#?STATE{unconfirmed = U},
+ [{rejected, QRef, MsgIds} | Actions0]}
+ end;
+handle_event({send_credit_reply, _} = Action, State) ->
+ {ok, State, [Action]}.
+
+settlement_action(_Type, _QRef, [], Acc) ->
+ Acc;
+settlement_action(Type, QRef, MsgSeqs, Acc) ->
+ [{Type, QRef, MsgSeqs} | Acc].
+
+-spec deliver([{amqqueue:amqqueue(), state()}],
+ Delivery :: term()) ->
+ {[{amqqueue:amqqueue(), state()}], rabbit_queue_type:actions()}.
+deliver(Qs0, #delivery{flow = Flow,
+ msg_seq_no = MsgNo,
+ message = #basic_message{exchange_name = _Ex},
+ confirm = _Confirm} = Delivery) ->
+ %% TODO: record master and slaves for confirm processing
+ {MPids, SPids, Qs, Actions} = qpids(Qs0, MsgNo),
+ QPids = MPids ++ SPids,
+ case Flow of
+ %% Here we are tracking messages sent by the rabbit_channel
+ %% process. We are accessing the rabbit_channel process
+ %% dictionary.
+ flow -> [credit_flow:send(QPid) || QPid <- QPids],
+ [credit_flow:send(QPid) || QPid <- SPids];
+ noflow -> ok
+ end,
+ MMsg = {deliver, Delivery, false},
+ SMsg = {deliver, Delivery, true},
+ delegate:invoke_no_result(MPids, {gen_server2, cast, [MMsg]}),
+ delegate:invoke_no_result(SPids, {gen_server2, cast, [SMsg]}),
+ {Qs, Actions}.
+
+
+-spec dequeue(NoAck :: boolean(), LimiterPid :: pid(),
+ rabbit_types:ctag(), state()) ->
+ {ok, Count :: non_neg_integer(), rabbit_amqqueue:qmsg(), state()} |
+ {empty, state()}.
+dequeue(NoAck, LimiterPid, _CTag, State) ->
+ QPid = State#?STATE.pid,
+ case delegate:invoke(QPid, {gen_server2, call,
+ [{basic_get, self(), NoAck, LimiterPid}, infinity]}) of
+ empty ->
+ {empty, State};
+ {ok, Count, Msg} ->
+ {ok, Count, Msg, State}
+ end.
+
+-spec state_info(state()) -> #{atom() := term()}.
+state_info(_State) ->
+ #{}.
+
+%% general queue info
+-spec info(amqqueue:amqqueue(), all_keys | rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+info(Q, Items) ->
+ QPid = amqqueue:get_pid(Q),
+ Req = case Items of
+ all_keys -> info;
+ _ -> {info, Items}
+ end,
+ case delegate:invoke(QPid, {gen_server2, call, [Req, infinity]}) of
+ {ok, Result} ->
+ Result;
+ {error, _Err} ->
+ [];
+ Result when is_list(Result) ->
+ %% this is a backwards compatibility clause
+ Result
+ end.
+
+-spec purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()}.
+purge(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ delegate:invoke(QPid, {gen_server2, call, [purge, infinity]}).
+
+qpids(Qs, MsgNo) ->
+ lists:foldl(
+ fun ({Q, S0}, {MPidAcc, SPidAcc, Qs0, Actions0}) ->
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ QRef = amqqueue:get_name(Q),
+ Actions = [{monitor, QPid, QRef}
+ | [{monitor, P, QRef} || P <- SPids]] ++ Actions0,
+ %% confirm record only if MsgNo isn't undefined
+ S = case S0 of
+ #?STATE{unconfirmed = U0} ->
+ Rec = [QPid | SPids],
+ U = case MsgNo of
+ undefined ->
+ U0;
+ _ ->
+ U0#{MsgNo => #msg_status{pending = Rec}}
+ end,
+ S0#?STATE{pid = QPid,
+ unconfirmed = U};
+ stateless ->
+ S0
+ end,
+ {[QPid | MPidAcc], SPidAcc ++ SPids,
+ [{Q, S} | Qs0], Actions}
+ end, {[], [], [], []}, Qs).
+
+%% internal-ish
+-spec wait_for_promoted_or_stopped(amqqueue:amqqueue()) ->
+ {promoted, amqqueue:amqqueue()} |
+ {stopped, amqqueue:amqqueue()} |
+ {error, not_found}.
+wait_for_promoted_or_stopped(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ case rabbit_mnesia:is_process_alive(QPid) of
+ true -> {promoted, Q};
+ false ->
+ case lists:any(fun(Pid) ->
+ rabbit_mnesia:is_process_alive(Pid)
+ end, SPids) of
+ %% There is a live slave. May be promoted
+ true ->
+ timer:sleep(100),
+ wait_for_promoted_or_stopped(Q);
+ %% All slave pids are stopped.
+ %% No process left for the queue
+ false -> {stopped, Q}
+ end
+ end;
+ {error, not_found} ->
+ {error, not_found}
+ end.
+
+-spec delete_crashed(amqqueue:amqqueue()) -> ok.
+delete_crashed(Q) ->
+ delete_crashed(Q, ?INTERNAL_USER).
+
+delete_crashed(Q, ActingUser) ->
+ ok = rpc:call(amqqueue:qnode(Q), ?MODULE, delete_crashed_internal,
+ [Q, ActingUser]).
+
+delete_crashed_internal(Q, ActingUser) ->
+ QName = amqqueue:get_name(Q),
+ {ok, BQ} = application:get_env(rabbit, backing_queue_module),
+ BQ:delete_crashed(Q),
+ ok = rabbit_amqqueue:internal_delete(QName, ActingUser).
+
+recover_durable_queues(QueuesAndRecoveryTerms) ->
+ {Results, Failures} =
+ gen_server2:mcall(
+ [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery),
+ {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
+ [rabbit_log:error("Queue ~p failed to initialise: ~p~n",
+ [Pid, Error]) || {Pid, Error} <- Failures],
+ [Q || {_, {new, Q}} <- Results].
+
+capabilities() ->
+ #{policies => [<<"expires">>, <<"message-ttl">>, <<"dead-letter-exchange">>,
+ <<"dead-letter-routing-key">>, <<"max-length">>,
+ <<"max-length-bytes">>, <<"max-in-memory-length">>, <<"max-in-memory-bytes">>,
+ <<"max-priority">>, <<"overflow">>, <<"queue-mode">>,
+ <<"single-active-consumer">>, <<"delivery-limit">>,
+ <<"ha-mode">>, <<"ha-params">>, <<"ha-sync-mode">>,
+ <<"ha-promote-on-shutdown">>, <<"ha-promote-on-failure">>,
+ <<"queue-master-locator">>],
+ queue_arguments => [<<"x-expires">>, <<"x-message-ttl">>, <<"x-dead-letter-exchange">>,
+ <<"x-dead-letter-routing-key">>, <<"x-max-length">>,
+ <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>,
+ <<"x-max-in-memory-bytes">>, <<"x-max-priority">>,
+ <<"x-overflow">>, <<"x-queue-mode">>, <<"x-single-active-consumer">>,
+ <<"x-queue-type">>, <<"x-queue-master-locator">>],
+ consumer_arguments => [<<"x-cancel-on-ha-failover">>,
+ <<"x-priority">>, <<"x-credit">>
+ ],
+ server_named => true}.
+
+reject_seq_no(SeqNo, U0) ->
+ reject_seq_no(SeqNo, U0, []).
+
+reject_seq_no(SeqNo, U0, Acc) ->
+ case maps:take(SeqNo, U0) of
+ {_, U} ->
+ {U, [SeqNo | Acc]};
+ error ->
+ {U0, Acc}
+ end.
+
+settle_seq_nos(MsgSeqNos, Pid, U0, Reason) ->
+ lists:foldl(
+ fun (SeqNo, {U, C0, R0}) ->
+ case U of
+ #{SeqNo := Status0} ->
+ case update_msg_status(Reason, Pid, Status0) of
+ #msg_status{pending = [],
+ confirmed = []} ->
+ %% no pending left and nothing confirmed
+ %% then we reject it
+ {maps:remove(SeqNo, U), C0, [SeqNo | R0]};
+ #msg_status{pending = [],
+ confirmed = _} ->
+ %% this can be confirmed as there are no pending
+ %% and confirmed isn't empty
+ {maps:remove(SeqNo, U), [SeqNo | C0], R0};
+ MsgStatus ->
+ {U#{SeqNo => MsgStatus}, C0, R0}
+ end;
+ _ ->
+ {U, C0, R0}
+ end
+ end, {U0, [], []}, MsgSeqNos).
+
+update_msg_status(confirm, Pid, #msg_status{pending = P,
+ confirmed = C} = S) ->
+ Rem = lists:delete(Pid, P),
+ S#msg_status{pending = Rem, confirmed = [Pid | C]};
+update_msg_status(down, Pid, #msg_status{pending = P} = S) ->
+ S#msg_status{pending = lists:delete(Pid, P)}.
+
+%% part of channel <-> queue api
+confirm_to_sender(Pid, QName, MsgSeqNos) ->
+ %% the stream queue included the queue type refactoring and thus requires
+ %% a different message format
+ Evt = case rabbit_ff_registry:is_enabled(stream_queue) of
+ true ->
+ {queue_event, QName, {confirm, MsgSeqNos, self()}};
+ false ->
+ {confirm, MsgSeqNos, self()}
+ end,
+ gen_server2:cast(Pid, Evt).
+
+send_rejection(Pid, QName, MsgSeqNo) ->
+ case rabbit_ff_registry:is_enabled(stream_queue) of
+ true ->
+ gen_server2:cast(Pid, {queue_event, QName,
+ {reject_publish, MsgSeqNo, self()}});
+ false ->
+ gen_server2:cast(Pid, {reject_publish, MsgSeqNo, self()})
+ end.
+
+send_queue_event(Pid, QName, Evt) ->
+ gen_server2:cast(Pid, {queue_event, QName, Evt}).
diff --git a/deps/rabbit/src/rabbit_client_sup.erl b/deps/rabbit/src/rabbit_client_sup.erl
new file mode 100644
index 0000000000..a28e4ce39c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_client_sup.erl
@@ -0,0 +1,43 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_client_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/1, start_link/2, start_link_worker/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Callback) ->
+ supervisor2:start_link(?MODULE, Callback).
+
+-spec start_link({'local', atom()}, rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(SupName, Callback) ->
+ supervisor2:start_link(SupName, ?MODULE, Callback).
+
+-spec start_link_worker({'local', atom()}, rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link_worker(SupName, Callback) ->
+ supervisor2:start_link(SupName, ?MODULE, {Callback, worker}).
+
+init({M,F,A}) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}};
+init({{M,F,A}, worker}) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{client, {M,F,A}, temporary, ?WORKER_WAIT, worker, [M]}]}}.
diff --git a/deps/rabbit/src/rabbit_config.erl b/deps/rabbit/src/rabbit_config.erl
new file mode 100644
index 0000000000..1198035a7a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_config.erl
@@ -0,0 +1,46 @@
+-module(rabbit_config).
+
+-export([
+ config_files/0,
+ get_advanced_config/0
+ ]).
+
+-export([schema_dir/0]).
+-deprecated([{schema_dir, 0, eventually}]).
+
+-export_type([config_location/0]).
+
+-type config_location() :: string().
+
+get_confs() ->
+ case get_prelaunch_config_state() of
+ #{config_files := Confs} -> Confs;
+ _ -> []
+ end.
+
+schema_dir() ->
+ undefined.
+
+get_advanced_config() ->
+ case get_prelaunch_config_state() of
+ %% There can be only one advanced.config
+ #{config_advanced_file := FileName} when FileName =/= undefined ->
+ case rabbit_file:is_file(FileName) of
+ true -> FileName;
+ false -> none
+ end;
+ _ -> none
+ end.
+
+-spec config_files() -> [config_location()].
+config_files() ->
+ ConfFiles = [filename:absname(File) || File <- get_confs(),
+ filelib:is_regular(File)],
+ AdvancedFiles = case get_advanced_config() of
+ none -> [];
+ FileName -> [filename:absname(FileName)]
+ end,
+ AdvancedFiles ++ ConfFiles.
+
+get_prelaunch_config_state() ->
+ rabbit_prelaunch_conf:get_config_state().
diff --git a/deps/rabbit/src/rabbit_confirms.erl b/deps/rabbit/src/rabbit_confirms.erl
new file mode 100644
index 0000000000..2fe032d1f1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_confirms.erl
@@ -0,0 +1,152 @@
+-module(rabbit_confirms).
+
+-compile({no_auto_import, [size/1]}).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([init/0,
+ insert/4,
+ confirm/3,
+ reject/2,
+
+ remove_queue/2,
+
+ smallest/1,
+ size/1,
+ is_empty/1]).
+
+-type seq_no() :: non_neg_integer().
+-type queue_name() :: rabbit_amqqueue:name().
+-type exchange_name() :: rabbit_exchange:name().
+
+-record(?MODULE, {smallest :: undefined | seq_no(),
+ unconfirmed = #{} :: #{seq_no() =>
+ {exchange_name(),
+ #{queue_name() => ok}}}
+ }).
+
+-type mx() :: {seq_no(), exchange_name()}.
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([
+ state/0
+ ]).
+
+-spec init() -> state().
+init() ->
+ #?MODULE{}.
+
+-spec insert(seq_no(), [queue_name()], exchange_name(), state()) ->
+ state().
+insert(SeqNo, QNames, #resource{kind = exchange} = XName,
+ #?MODULE{smallest = S0,
+ unconfirmed = U0} = State)
+ when is_integer(SeqNo)
+ andalso is_list(QNames)
+ andalso is_map_key(SeqNo, U0) == false ->
+ U = U0#{SeqNo => {XName, maps:from_list([{Q, ok} || Q <- QNames])}},
+ S = case S0 of
+ undefined -> SeqNo;
+ _ -> S0
+ end,
+ State#?MODULE{smallest = S,
+ unconfirmed = U}.
+
+-spec confirm([seq_no()], queue_name(), state()) ->
+ {[mx()], state()}.
+confirm(SeqNos, QName, #?MODULE{smallest = Smallest0,
+ unconfirmed = U0} = State)
+ when is_list(SeqNos) ->
+ {Confirmed, U} = lists:foldr(
+ fun (SeqNo, Acc) ->
+ confirm_one(SeqNo, QName, Acc)
+ end, {[], U0}, SeqNos),
+ %% check if smallest is in Confirmed
+ %% TODO: this can be optimised by checking in the preceeding foldr
+ Smallest =
+ case lists:any(fun ({S, _}) -> S == Smallest0 end, Confirmed) of
+ true ->
+ %% work out new smallest
+ next_smallest(Smallest0, U);
+ false ->
+ Smallest0
+ end,
+ {Confirmed, State#?MODULE{smallest = Smallest,
+ unconfirmed = U}}.
+
+-spec reject(seq_no(), state()) ->
+ {ok, mx(), state()} | {error, not_found}.
+reject(SeqNo, #?MODULE{smallest = Smallest0,
+ unconfirmed = U0} = State)
+ when is_integer(SeqNo) ->
+ case maps:take(SeqNo, U0) of
+ {{XName, _QS}, U} ->
+ Smallest = case SeqNo of
+ Smallest0 ->
+ %% need to scan as the smallest was removed
+ next_smallest(Smallest0, U);
+ _ ->
+ Smallest0
+ end,
+ {ok, {SeqNo, XName}, State#?MODULE{unconfirmed = U,
+ smallest = Smallest}};
+ error ->
+ {error, not_found}
+ end.
+
+%% idempotent
+-spec remove_queue(queue_name(), state()) ->
+ {[mx()], state()}.
+remove_queue(QName, #?MODULE{unconfirmed = U} = State) ->
+ SeqNos = maps:fold(
+ fun (SeqNo, {_XName, QS0}, Acc) ->
+ case maps:is_key(QName, QS0) of
+ true ->
+ [SeqNo | Acc];
+ false ->
+ Acc
+ end
+ end, [], U),
+ confirm(lists:sort(SeqNos), QName,State).
+
+-spec smallest(state()) -> seq_no() | undefined.
+smallest(#?MODULE{smallest = Smallest}) ->
+ Smallest.
+
+-spec size(state()) -> non_neg_integer().
+size(#?MODULE{unconfirmed = U}) ->
+ maps:size(U).
+
+-spec is_empty(state()) -> boolean().
+is_empty(State) ->
+ size(State) == 0.
+
+%% INTERNAL
+
+confirm_one(SeqNo, QName, {Acc, U0}) ->
+ case maps:take(SeqNo, U0) of
+ {{XName, QS}, U1}
+ when is_map_key(QName, QS)
+ andalso map_size(QS) == 1 ->
+ %% last queue confirm
+ {[{SeqNo, XName} | Acc], U1};
+ {{XName, QS}, U1} ->
+ {Acc, U1#{SeqNo => {XName, maps:remove(QName, QS)}}};
+ error ->
+ {Acc, U0}
+ end.
+
+next_smallest(_S, U) when map_size(U) == 0 ->
+ undefined;
+next_smallest(S, U) when is_map_key(S, U) ->
+ S;
+next_smallest(S, U) ->
+ %% TODO: this is potentially infinitely recursive if called incorrectly
+ next_smallest(S+1, U).
+
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
diff --git a/deps/rabbit/src/rabbit_connection_helper_sup.erl b/deps/rabbit/src/rabbit_connection_helper_sup.erl
new file mode 100644
index 0000000000..d0509029fd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_helper_sup.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_helper_sup).
+
+%% Supervises auxiliary processes of AMQP 0-9-1 connections:
+%%
+%% * Channel supervisor
+%% * Heartbeat receiver
+%% * Heartbeat sender
+%% * Exclusive queue collector
+%%
+%% See also rabbit_heartbeat, rabbit_channel_sup_sup, rabbit_queue_collector.
+
+-behaviour(supervisor2).
+
+-export([start_link/0]).
+-export([start_channel_sup_sup/1,
+ start_queue_collector/2]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+-spec start_channel_sup_sup(pid()) -> rabbit_types:ok_pid_or_error().
+
+start_channel_sup_sup(SupPid) ->
+ supervisor2:start_child(
+ SupPid,
+ {channel_sup_sup, {rabbit_channel_sup_sup, start_link, []},
+ intrinsic, infinity, supervisor, [rabbit_channel_sup_sup]}).
+
+-spec start_queue_collector(pid(), rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_queue_collector(SupPid, Identity) ->
+ supervisor2:start_child(
+ SupPid,
+ {collector, {rabbit_queue_collector, start_link, [Identity]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_queue_collector]}).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ ?LG_PROCESS_TYPE(connection_helper_sup),
+ {ok, {{one_for_one, 10, 10}, []}}.
diff --git a/deps/rabbit/src/rabbit_connection_sup.erl b/deps/rabbit/src/rabbit_connection_sup.erl
new file mode 100644
index 0000000000..c1d1bd0d77
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_sup.erl
@@ -0,0 +1,66 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_sup).
+
+%% Supervisor for a (network) AMQP 0-9-1 client connection.
+%%
+%% Supervises
+%%
+%% * rabbit_reader
+%% * Auxiliary process supervisor
+%%
+%% See also rabbit_reader, rabbit_connection_helper_sup.
+
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-export([start_link/4, reader/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(any(), rabbit_net:socket(), module(), any()) ->
+ {'ok', pid(), pid()}.
+
+start_link(Ref, _Sock, _Transport, _Opts) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ %% We need to get channels in the hierarchy here so they get shut
+ %% down after the reader, so the reader gets a chance to terminate
+ %% them cleanly. But for 1.0 readers we can't start the real
+ %% ch_sup_sup (because we don't know if we will be 0-9-1 or 1.0) -
+ %% so we add another supervisor into the hierarchy.
+ %%
+ %% This supervisor also acts as an intermediary for heartbeaters and
+ %% the queue collector process, since these must not be siblings of the
+ %% reader due to the potential for deadlock if they are added/restarted
+ %% whilst the supervision tree is shutting down.
+ {ok, HelperSup} =
+ supervisor2:start_child(
+ SupPid,
+ {helper_sup, {rabbit_connection_helper_sup, start_link, []},
+ intrinsic, infinity, supervisor, [rabbit_connection_helper_sup]}),
+ {ok, ReaderPid} =
+ supervisor2:start_child(
+ SupPid,
+ {reader, {rabbit_reader, start_link, [HelperSup, Ref]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_reader]}),
+ {ok, SupPid, ReaderPid}.
+
+-spec reader(pid()) -> pid().
+
+reader(Pid) ->
+ hd(supervisor2:find_child(Pid, reader)).
+
+%%--------------------------------------------------------------------------
+
+init([]) ->
+ ?LG_PROCESS_TYPE(connection_sup),
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbit/src/rabbit_connection_tracking.erl b/deps/rabbit/src/rabbit_connection_tracking.erl
new file mode 100644
index 0000000000..c0704e6a7c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_tracking.erl
@@ -0,0 +1,515 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_tracking).
+
+%% Abstracts away how tracked connection records are stored
+%% and queried.
+%%
+%% See also:
+%%
+%% * rabbit_connection_tracking_handler
+%% * rabbit_reader
+%% * rabbit_event
+-behaviour(rabbit_tracking).
+
+-export([boot/0,
+ update_tracked/1,
+ handle_cast/1,
+ register_tracked/1,
+ unregister_tracked/1,
+ count_tracked_items_in/1,
+ clear_tracking_tables/0,
+ shutdown_tracked_items/2]).
+
+-export([ensure_tracked_connections_table_for_node/1,
+ ensure_per_vhost_tracked_connections_table_for_node/1,
+ ensure_per_user_tracked_connections_table_for_node/1,
+
+ ensure_tracked_connections_table_for_this_node/0,
+ ensure_per_vhost_tracked_connections_table_for_this_node/0,
+ ensure_per_user_tracked_connections_table_for_this_node/0,
+
+ tracked_connection_table_name_for/1,
+ tracked_connection_per_vhost_table_name_for/1,
+ tracked_connection_per_user_table_name_for/1,
+ get_all_tracked_connection_table_names_for_node/1,
+
+ delete_tracked_connections_table_for_node/1,
+ delete_per_vhost_tracked_connections_table_for_node/1,
+ delete_per_user_tracked_connections_table_for_node/1,
+ delete_tracked_connection_user_entry/1,
+ delete_tracked_connection_vhost_entry/1,
+
+ clear_tracked_connection_tables_for_this_node/0,
+
+ list/0, list/1, list_on_node/1, list_on_node/2, list_of_user/1,
+ tracked_connection_from_connection_created/1,
+ tracked_connection_from_connection_state/1,
+ lookup/1,
+ count/0]).
+
+-include_lib("rabbit.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-export([close_connections/3]).
+
+%%
+%% API
+%%
+
+%% Behaviour callbacks
+
+-spec boot() -> ok.
+
+%% Sets up and resets connection tracking tables for this
+%% node.
+boot() ->
+ ensure_tracked_connections_table_for_this_node(),
+ rabbit_log:info("Setting up a table for connection tracking on this node: ~p",
+ [tracked_connection_table_name_for(node())]),
+ ensure_per_vhost_tracked_connections_table_for_this_node(),
+ rabbit_log:info("Setting up a table for per-vhost connection counting on this node: ~p",
+ [tracked_connection_per_vhost_table_name_for(node())]),
+ ensure_per_user_tracked_connections_table_for_this_node(),
+ rabbit_log:info("Setting up a table for per-user connection counting on this node: ~p",
+ [tracked_connection_per_user_table_name_for(node())]),
+ clear_tracking_tables(),
+ ok.
+
+-spec update_tracked(term()) -> ok.
+
+update_tracked(Event) ->
+ spawn(?MODULE, handle_cast, [Event]),
+ ok.
+
+%% Asynchronously handle update events
+-spec handle_cast(term()) -> ok.
+
+handle_cast({connection_created, Details}) ->
+ ThisNode = node(),
+ case pget(node, Details) of
+ ThisNode ->
+ TConn = tracked_connection_from_connection_created(Details),
+ ConnId = TConn#tracked_connection.id,
+ try
+ register_tracked(TConn)
+ catch
+ error:{no_exists, _} ->
+ Msg = "Could not register connection ~p for tracking, "
+ "its table is not ready yet or the connection terminated prematurely",
+ rabbit_log_connection:warning(Msg, [ConnId]),
+ ok;
+ error:Err ->
+ Msg = "Could not register connection ~p for tracking: ~p",
+ rabbit_log_connection:warning(Msg, [ConnId, Err]),
+ ok
+ end;
+ _OtherNode ->
+ %% ignore
+ ok
+ end;
+handle_cast({connection_closed, Details}) ->
+ ThisNode = node(),
+ case pget(node, Details) of
+ ThisNode ->
+ %% [{name,<<"127.0.0.1:64078 -> 127.0.0.1:5672">>},
+ %% {pid,<0.1774.0>},
+ %% {node, rabbit@hostname}]
+ unregister_tracked(
+ rabbit_tracking:id(ThisNode, pget(name, Details)));
+ _OtherNode ->
+ %% ignore
+ ok
+ end;
+handle_cast({vhost_deleted, Details}) ->
+ VHost = pget(name, Details),
+ %% Schedule vhost entry deletion, allowing time for connections to close
+ _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
+ delete_tracked_connection_vhost_entry, [VHost]),
+ rabbit_log_connection:info("Closing all connections in vhost '~s' because it's being deleted", [VHost]),
+ shutdown_tracked_items(
+ rabbit_connection_tracking:list(VHost),
+ rabbit_misc:format("vhost '~s' is deleted", [VHost]));
+%% Note: under normal circumstances this will be called immediately
+%% after the vhost_deleted above. Therefore we should be careful about
+%% what we log and be more defensive.
+handle_cast({vhost_down, Details}) ->
+ VHost = pget(name, Details),
+ Node = pget(node, Details),
+ rabbit_log_connection:info("Closing all connections in vhost '~s' on node '~s'"
+ " because the vhost is stopping",
+ [VHost, Node]),
+ shutdown_tracked_items(
+ rabbit_connection_tracking:list_on_node(Node, VHost),
+ rabbit_misc:format("vhost '~s' is down", [VHost]));
+handle_cast({user_deleted, Details}) ->
+ Username = pget(name, Details),
+ %% Schedule user entry deletion, allowing time for connections to close
+ _ = timer:apply_after(?TRACKING_EXECUTION_TIMEOUT, ?MODULE,
+ delete_tracked_connection_user_entry, [Username]),
+ rabbit_log_connection:info("Closing all connections from user '~s' because it's being deleted", [Username]),
+ shutdown_tracked_items(
+ rabbit_connection_tracking:list_of_user(Username),
+ rabbit_misc:format("user '~s' is deleted", [Username]));
+%% A node had been deleted from the cluster.
+handle_cast({node_deleted, Details}) ->
+ Node = pget(node, Details),
+ rabbit_log_connection:info("Node '~s' was removed from the cluster, deleting its connection tracking tables...", [Node]),
+ delete_tracked_connections_table_for_node(Node),
+ delete_per_vhost_tracked_connections_table_for_node(Node),
+ delete_per_user_tracked_connections_table_for_node(Node).
+
+-spec register_tracked(rabbit_types:tracked_connection()) -> ok.
+-dialyzer([{nowarn_function, [register_tracked/1]}, race_conditions]).
+
+register_tracked(#tracked_connection{username = Username, vhost = VHost, id = ConnId, node = Node} = Conn) when Node =:= node() ->
+ TableName = tracked_connection_table_name_for(Node),
+ PerVhostTableName = tracked_connection_per_vhost_table_name_for(Node),
+ PerUserConnTableName = tracked_connection_per_user_table_name_for(Node),
+ %% upsert
+ case mnesia:dirty_read(TableName, ConnId) of
+ [] ->
+ mnesia:dirty_write(TableName, Conn),
+ mnesia:dirty_update_counter(PerVhostTableName, VHost, 1),
+ mnesia:dirty_update_counter(PerUserConnTableName, Username, 1);
+ [#tracked_connection{}] ->
+ ok
+ end,
+ ok.
+
+-spec unregister_tracked(rabbit_types:tracked_connection_id()) -> ok.
+
+unregister_tracked(ConnId = {Node, _Name}) when Node =:= node() ->
+ TableName = tracked_connection_table_name_for(Node),
+ PerVhostTableName = tracked_connection_per_vhost_table_name_for(Node),
+ PerUserConnTableName = tracked_connection_per_user_table_name_for(Node),
+ case mnesia:dirty_read(TableName, ConnId) of
+ [] -> ok;
+ [#tracked_connection{vhost = VHost, username = Username}] ->
+ mnesia:dirty_update_counter(PerUserConnTableName, Username, -1),
+ mnesia:dirty_update_counter(PerVhostTableName, VHost, -1),
+ mnesia:dirty_delete(TableName, ConnId)
+ end.
+
+-spec count_tracked_items_in({atom(), rabbit_types:vhost()}) -> non_neg_integer().
+
+count_tracked_items_in({vhost, VirtualHost}) ->
+ rabbit_tracking:count_tracked_items(
+ fun tracked_connection_per_vhost_table_name_for/1,
+ #tracked_connection_per_vhost.connection_count, VirtualHost,
+ "connections in vhost");
+count_tracked_items_in({user, Username}) ->
+ rabbit_tracking:count_tracked_items(
+ fun tracked_connection_per_user_table_name_for/1,
+ #tracked_connection_per_user.connection_count, Username,
+ "connections for user").
+
+-spec clear_tracking_tables() -> ok.
+
+clear_tracking_tables() ->
+ clear_tracked_connection_tables_for_this_node().
+
+-spec shutdown_tracked_items(list(), term()) -> ok.
+
+shutdown_tracked_items(TrackedItems, Message) ->
+ close_connections(TrackedItems, Message).
+
+%% Extended API
+
+-spec ensure_tracked_connections_table_for_this_node() -> ok.
+
+ensure_tracked_connections_table_for_this_node() ->
+ ensure_tracked_connections_table_for_node(node()).
+
+
+-spec ensure_per_vhost_tracked_connections_table_for_this_node() -> ok.
+
+ensure_per_vhost_tracked_connections_table_for_this_node() ->
+ ensure_per_vhost_tracked_connections_table_for_node(node()).
+
+
+-spec ensure_per_user_tracked_connections_table_for_this_node() -> ok.
+
+ensure_per_user_tracked_connections_table_for_this_node() ->
+ ensure_per_user_tracked_connections_table_for_node(node()).
+
+
+%% Create tables
+-spec ensure_tracked_connections_table_for_node(node()) -> ok.
+
+ensure_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_connection},
+ {attributes, record_info(fields, tracked_connection)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a tracked connection table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+-spec ensure_per_vhost_tracked_connections_table_for_node(node()) -> ok.
+
+ensure_per_vhost_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_vhost_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_connection_per_vhost},
+ {attributes, record_info(fields, tracked_connection_per_vhost)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a per-vhost tracked connection table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+-spec ensure_per_user_tracked_connections_table_for_node(node()) -> ok.
+
+ensure_per_user_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_user_table_name_for(Node),
+ case mnesia:create_table(TableName, [{record_name, tracked_connection_per_user},
+ {attributes, record_info(fields, tracked_connection_per_user)}]) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to create a per-user tracked connection table for node ~p: ~p", [Node, Error]),
+ ok
+ end.
+
+-spec clear_tracked_connection_tables_for_this_node() -> ok.
+
+clear_tracked_connection_tables_for_this_node() ->
+ [rabbit_tracking:clear_tracking_table(T)
+ || T <- get_all_tracked_connection_table_names_for_node(node())],
+ ok.
+
+-spec delete_tracked_connections_table_for_node(node()) -> ok.
+
+delete_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node, "tracked connection").
+
+-spec delete_per_vhost_tracked_connections_table_for_node(node()) -> ok.
+
+delete_per_vhost_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_vhost_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node,
+ "per-vhost tracked connection").
+
+-spec delete_per_user_tracked_connections_table_for_node(node()) -> ok.
+
+delete_per_user_tracked_connections_table_for_node(Node) ->
+ TableName = tracked_connection_per_user_table_name_for(Node),
+ rabbit_tracking:delete_tracking_table(TableName, Node,
+ "per-user tracked connection").
+
+-spec tracked_connection_table_name_for(node()) -> atom().
+
+tracked_connection_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format("tracked_connection_on_node_~s", [Node])).
+
+-spec tracked_connection_per_vhost_table_name_for(node()) -> atom().
+
+tracked_connection_per_vhost_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format("tracked_connection_per_vhost_on_node_~s", [Node])).
+
+-spec tracked_connection_per_user_table_name_for(node()) -> atom().
+
+tracked_connection_per_user_table_name_for(Node) ->
+ list_to_atom(rabbit_misc:format(
+ "tracked_connection_table_per_user_on_node_~s", [Node])).
+
+-spec get_all_tracked_connection_table_names_for_node(node()) -> [atom()].
+
+get_all_tracked_connection_table_names_for_node(Node) ->
+ [tracked_connection_table_name_for(Node),
+ tracked_connection_per_vhost_table_name_for(Node),
+ tracked_connection_per_user_table_name_for(Node)].
+
+-spec lookup(rabbit_types:connection_name()) -> rabbit_types:tracked_connection() | 'not_found'.
+
+lookup(Name) ->
+ Nodes = rabbit_nodes:all_running(),
+ lookup(Name, Nodes).
+
+lookup(_, []) ->
+ not_found;
+lookup(Name, [Node | Nodes]) ->
+ TableName = tracked_connection_table_name_for(Node),
+ case mnesia:dirty_read(TableName, {Node, Name}) of
+ [] -> lookup(Name, Nodes);
+ [Row] -> Row
+ end.
+
+-spec list() -> [rabbit_types:tracked_connection()].
+
+list() ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = tracked_connection_table_name_for(Node),
+ Acc ++ mnesia:dirty_match_object(Tab, #tracked_connection{_ = '_'})
+ end, [], rabbit_nodes:all_running()).
+
+-spec count() -> non_neg_integer().
+
+count() ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = tracked_connection_table_name_for(Node),
+ Acc + mnesia:table_info(Tab, size)
+ end, 0, rabbit_nodes:all_running()).
+
+-spec list(rabbit_types:vhost()) -> [rabbit_types:tracked_connection()].
+
+list(VHost) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_connection_table_name_for/1,
+ #tracked_connection{vhost = VHost, _ = '_'}).
+
+-spec list_on_node(node()) -> [rabbit_types:tracked_connection()].
+
+list_on_node(Node) ->
+ try mnesia:dirty_match_object(
+ tracked_connection_table_name_for(Node),
+ #tracked_connection{_ = '_'})
+ catch exit:{aborted, {no_exists, _}} -> []
+ end.
+
+-spec list_on_node(node(), rabbit_types:vhost()) -> [rabbit_types:tracked_connection()].
+
+list_on_node(Node, VHost) ->
+ try mnesia:dirty_match_object(
+ tracked_connection_table_name_for(Node),
+ #tracked_connection{vhost = VHost, _ = '_'})
+ catch exit:{aborted, {no_exists, _}} -> []
+ end.
+
+
+-spec list_of_user(rabbit_types:username()) -> [rabbit_types:tracked_connection()].
+
+list_of_user(Username) ->
+ rabbit_tracking:match_tracked_items(
+ fun tracked_connection_table_name_for/1,
+ #tracked_connection{username = Username, _ = '_'}).
+
+%% Internal, delete tracked entries
+
+delete_tracked_connection_vhost_entry(Vhost) ->
+ rabbit_tracking:delete_tracked_entry(
+ {rabbit_vhost, exists, [Vhost]},
+ fun tracked_connection_per_vhost_table_name_for/1,
+ Vhost).
+
+delete_tracked_connection_user_entry(Username) ->
+ rabbit_tracking:delete_tracked_entry(
+ {rabbit_auth_backend_internal, exists, [Username]},
+ fun tracked_connection_per_user_table_name_for/1,
+ Username).
+
+%% Returns a #tracked_connection from connection_created
+%% event details.
+%%
+%% @see rabbit_connection_tracking_handler.
+tracked_connection_from_connection_created(EventDetails) ->
+ %% Example event:
+ %%
+ %% [{type,network},
+ %% {pid,<0.329.0>},
+ %% {name,<<"127.0.0.1:60998 -> 127.0.0.1:5672">>},
+ %% {port,5672},
+ %% {peer_port,60998},
+ %% {host,{0,0,0,0,0,65535,32512,1}},
+ %% {peer_host,{0,0,0,0,0,65535,32512,1}},
+ %% {ssl,false},
+ %% {peer_cert_subject,''},
+ %% {peer_cert_issuer,''},
+ %% {peer_cert_validity,''},
+ %% {auth_mechanism,<<"PLAIN">>},
+ %% {ssl_protocol,''},
+ %% {ssl_key_exchange,''},
+ %% {ssl_cipher,''},
+ %% {ssl_hash,''},
+ %% {protocol,{0,9,1}},
+ %% {user,<<"guest">>},
+ %% {vhost,<<"/">>},
+ %% {timeout,14},
+ %% {frame_max,131072},
+ %% {channel_max,65535},
+ %% {client_properties,
+ %% [{<<"capabilities">>,table,
+ %% [{<<"publisher_confirms">>,bool,true},
+ %% {<<"consumer_cancel_notify">>,bool,true},
+ %% {<<"exchange_exchange_bindings">>,bool,true},
+ %% {<<"basic.nack">>,bool,true},
+ %% {<<"connection.blocked">>,bool,true},
+ %% {<<"authentication_failure_close">>,bool,true}]},
+ %% {<<"product">>,longstr,<<"Bunny">>},
+ %% {<<"platform">>,longstr,
+ %% <<"ruby 2.3.0p0 (2015-12-25 revision 53290) [x86_64-darwin15]">>},
+ %% {<<"version">>,longstr,<<"2.3.0.pre">>},
+ %% {<<"information">>,longstr,
+ %% <<"http://rubybunny.info">>}]},
+ %% {connected_at,1453214290847}]
+ Name = pget(name, EventDetails),
+ Node = pget(node, EventDetails),
+ #tracked_connection{id = rabbit_tracking:id(Node, Name),
+ name = Name,
+ node = Node,
+ vhost = pget(vhost, EventDetails),
+ username = pget(user, EventDetails),
+ connected_at = pget(connected_at, EventDetails),
+ pid = pget(pid, EventDetails),
+ type = pget(type, EventDetails),
+ peer_host = pget(peer_host, EventDetails),
+ peer_port = pget(peer_port, EventDetails)}.
+
+tracked_connection_from_connection_state(#connection{
+ vhost = VHost,
+ connected_at = Ts,
+ peer_host = PeerHost,
+ peer_port = PeerPort,
+ user = Username,
+ name = Name
+ }) ->
+ tracked_connection_from_connection_created(
+ [{name, Name},
+ {node, node()},
+ {vhost, VHost},
+ {user, Username},
+ {user_who_performed_action, Username},
+ {connected_at, Ts},
+ {pid, self()},
+ {type, network},
+ {peer_port, PeerPort},
+ {peer_host, PeerHost}]).
+
+close_connections(Tracked, Message) ->
+ close_connections(Tracked, Message, 0).
+
+close_connections(Tracked, Message, Delay) ->
+ [begin
+ close_connection(Conn, Message),
+ timer:sleep(Delay)
+ end || Conn <- Tracked],
+ ok.
+
+close_connection(#tracked_connection{pid = Pid, type = network}, Message) ->
+ try
+ rabbit_networking:close_connection(Pid, Message)
+ catch error:{not_a_connection, _} ->
+ %% could has been closed concurrently, or the input
+ %% is bogus. In any case, we should not terminate
+ ok;
+ _:Err ->
+ %% ignore, don't terminate
+ rabbit_log:warning("Could not close connection ~p: ~p", [Pid, Err]),
+ ok
+ end;
+close_connection(#tracked_connection{pid = Pid, type = direct}, Message) ->
+ %% Do an RPC call to the node running the direct client.
+ Node = node(Pid),
+ rpc:call(Node, amqp_direct_connection, server_close, [Pid, 320, Message]).
diff --git a/deps/rabbit/src/rabbit_connection_tracking_handler.erl b/deps/rabbit/src/rabbit_connection_tracking_handler.erl
new file mode 100644
index 0000000000..17085d805a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_connection_tracking_handler.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_connection_tracking_handler).
+
+%% This module keeps track of connection creation and termination events
+%% on its local node. The primary goal here is to decouple connection
+%% tracking from rabbit_reader in rabbit_common.
+%%
+%% Events from other nodes are ignored.
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% for compatibility with previous versions of CLI tools
+-export([close_connections/3]).
+
+-include_lib("rabbit.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "connection tracking event handler"},
+ {mfa, {gen_event, add_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {cleanup, {gen_event, delete_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {requires, [connection_tracking]},
+ {enables, recovery}]}).
+
+%%
+%% API
+%%
+
+init([]) ->
+ {ok, []}.
+
+handle_event(#event{type = connection_created, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({connection_created, Details}),
+ {ok, State};
+handle_event(#event{type = connection_closed, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({connection_closed, Details}),
+ {ok, State};
+handle_event(#event{type = vhost_deleted, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({vhost_deleted, Details}),
+ {ok, State};
+%% Note: under normal circumstances this will be called immediately
+%% after the vhost_deleted above. Therefore we should be careful about
+%% what we log and be more defensive.
+handle_event(#event{type = vhost_down, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({vhost_down, Details}),
+ {ok, State};
+handle_event(#event{type = user_deleted, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({user_deleted, Details}),
+ {ok, State};
+%% A node had been deleted from the cluster.
+handle_event(#event{type = node_deleted, props = Details}, State) ->
+ ok = rabbit_connection_tracking:update_tracked({node_deleted, Details}),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+close_connections(Tracked, Message, Delay) ->
+ rabbit_connection_tracking:close_connections(Tracked, Message, Delay).
diff --git a/deps/rabbit/src/rabbit_control_pbe.erl b/deps/rabbit/src/rabbit_control_pbe.erl
new file mode 100644
index 0000000000..95c4fe41f1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_control_pbe.erl
@@ -0,0 +1,82 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_control_pbe).
+
+-export([decode/4, encode/4, list_ciphers/0, list_hashes/0]).
+
+% for testing purposes
+-export([evaluate_input_as_term/1]).
+
+list_ciphers() ->
+ {ok, io_lib:format("~p", [rabbit_pbe:supported_ciphers()])}.
+
+list_hashes() ->
+ {ok, io_lib:format("~p", [rabbit_pbe:supported_hashes()])}.
+
+validate(_Cipher, _Hash, Iterations, _Args) when Iterations =< 0 ->
+ {error, io_lib:format("The requested number of iterations is incorrect", [])};
+validate(_Cipher, _Hash, _Iterations, Args) when length(Args) < 2 ->
+ {error, io_lib:format("Please provide a value to encode/decode and a passphrase", [])};
+validate(_Cipher, _Hash, _Iterations, Args) when length(Args) > 2 ->
+ {error, io_lib:format("Too many arguments. Please provide a value to encode/decode and a passphrase", [])};
+validate(Cipher, Hash, _Iterations, _Args) ->
+ case lists:member(Cipher, rabbit_pbe:supported_ciphers()) of
+ false ->
+ {error, io_lib:format("The requested cipher is not supported", [])};
+ true ->
+ case lists:member(Hash, rabbit_pbe:supported_hashes()) of
+ false ->
+ {error, io_lib:format("The requested hash is not supported", [])};
+ true -> ok
+ end
+ end.
+
+encode(Cipher, Hash, Iterations, Args) ->
+ case validate(Cipher, Hash, Iterations, Args) of
+ {error, Err} -> {error, Err};
+ ok ->
+ [Value, PassPhrase] = Args,
+ try begin
+ TermValue = evaluate_input_as_term(Value),
+ Result = {encrypted, _} = rabbit_pbe:encrypt_term(Cipher, Hash, Iterations,
+ list_to_binary(PassPhrase), TermValue),
+ {ok, io_lib:format("~p", [Result])}
+ end
+ catch
+ _:Msg -> {error, io_lib:format("Error during cipher operation: ~p", [Msg])}
+ end
+ end.
+
+decode(Cipher, Hash, Iterations, Args) ->
+ case validate(Cipher, Hash, Iterations, Args) of
+ {error, Err} -> {error, Err};
+ ok ->
+ [Value, PassPhrase] = Args,
+ try begin
+ TermValue = evaluate_input_as_term(Value),
+ TermToDecrypt = case TermValue of
+ {encrypted, _}=EncryptedTerm ->
+ EncryptedTerm;
+ _ ->
+ {encrypted, TermValue}
+ end,
+ Result = rabbit_pbe:decrypt_term(Cipher, Hash, Iterations,
+ list_to_binary(PassPhrase),
+ TermToDecrypt),
+ {ok, io_lib:format("~p", [Result])}
+ end
+ catch
+ _:Msg -> {error, io_lib:format("Error during cipher operation: ~p", [Msg])}
+ end
+ end.
+
+evaluate_input_as_term(Input) ->
+ {ok,Tokens,_EndLine} = erl_scan:string(Input ++ "."),
+ {ok,AbsForm} = erl_parse:parse_exprs(Tokens),
+ {value,TermValue,_Bs} = erl_eval:exprs(AbsForm, erl_eval:new_bindings()),
+ TermValue.
diff --git a/deps/rabbit/src/rabbit_core_ff.erl b/deps/rabbit/src/rabbit_core_ff.erl
new file mode 100644
index 0000000000..6d30846775
--- /dev/null
+++ b/deps/rabbit/src/rabbit_core_ff.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_core_ff).
+
+-export([quorum_queue_migration/3,
+ stream_queue_migration/3,
+ implicit_default_bindings_migration/3,
+ virtual_host_metadata_migration/3,
+ maintenance_mode_status_migration/3,
+ user_limits_migration/3]).
+
+-rabbit_feature_flag(
+ {quorum_queue,
+ #{desc => "Support queues of type `quorum`",
+ doc_url => "https://www.rabbitmq.com/quorum-queues.html",
+ stability => stable,
+ migration_fun => {?MODULE, quorum_queue_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {stream_queue,
+ #{desc => "Support queues of type `stream`",
+ doc_url => "https://www.rabbitmq.com/stream-queues.html",
+ stability => stable,
+ depends_on => [quorum_queue],
+ migration_fun => {?MODULE, stream_queue_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {implicit_default_bindings,
+ #{desc => "Default bindings are now implicit, instead of "
+ "being stored in the database",
+ stability => stable,
+ migration_fun => {?MODULE, implicit_default_bindings_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {virtual_host_metadata,
+ #{desc => "Virtual host metadata (description, tags, etc)",
+ stability => stable,
+ migration_fun => {?MODULE, virtual_host_metadata_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {maintenance_mode_status,
+ #{desc => "Maintenance mode status",
+ stability => stable,
+ migration_fun => {?MODULE, maintenance_mode_status_migration}
+ }}).
+
+-rabbit_feature_flag(
+ {user_limits,
+ #{desc => "Configure connection and channel limits for a user",
+ stability => stable,
+ migration_fun => {?MODULE, user_limits_migration}
+ }}).
+
+%% -------------------------------------------------------------------
+%% Quorum queues.
+%% -------------------------------------------------------------------
+
+-define(quorum_queue_tables, [rabbit_queue,
+ rabbit_durable_queue]).
+
+quorum_queue_migration(FeatureName, _FeatureProps, enable) ->
+ Tables = ?quorum_queue_tables,
+ rabbit_table:wait(Tables, _Retry = true),
+ Fields = amqqueue:fields(amqqueue_v2),
+ migrate_to_amqqueue_with_type(FeatureName, Tables, Fields);
+quorum_queue_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ Tables = ?quorum_queue_tables,
+ rabbit_table:wait(Tables, _Retry = true),
+ Fields = amqqueue:fields(amqqueue_v2),
+ mnesia:table_info(rabbit_queue, attributes) =:= Fields andalso
+ mnesia:table_info(rabbit_durable_queue, attributes) =:= Fields.
+
+stream_queue_migration(_FeatureName, _FeatureProps, _Enable) ->
+ ok.
+
+migrate_to_amqqueue_with_type(FeatureName, [Table | Rest], Fields) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: migrating Mnesia table ~s...",
+ [FeatureName, Table]),
+ Fun = fun(Queue) -> amqqueue:upgrade_to(amqqueue_v2, Queue) end,
+ case mnesia:transform_table(Table, Fun, Fields) of
+ {atomic, ok} -> migrate_to_amqqueue_with_type(FeatureName,
+ Rest,
+ Fields);
+ {aborted, Reason} -> {error, Reason}
+ end;
+migrate_to_amqqueue_with_type(FeatureName, [], _) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: Mnesia tables migration done",
+ [FeatureName]),
+ ok.
+
+%% -------------------------------------------------------------------
+%% Default bindings.
+%% -------------------------------------------------------------------
+
+implicit_default_bindings_migration(FeatureName, _FeatureProps,
+ enable) ->
+ %% Default exchange bindings are now implicit (not stored in the
+ %% route tables). It should be safe to remove them outside of a
+ %% transaction.
+ rabbit_table:wait([rabbit_queue]),
+ Queues = mnesia:dirty_all_keys(rabbit_queue),
+ remove_explicit_default_bindings(FeatureName, Queues);
+implicit_default_bindings_migration(_Feature_Name, _FeatureProps,
+ is_enabled) ->
+ undefined.
+
+remove_explicit_default_bindings(_FeatureName, []) ->
+ ok;
+remove_explicit_default_bindings(FeatureName, Queues) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: deleting explicit default bindings "
+ "for ~b queues (it may take some time)...",
+ [FeatureName, length(Queues)]),
+ [rabbit_binding:remove_default_exchange_binding_rows_of(Q)
+ || Q <- Queues],
+ ok.
+
+%% -------------------------------------------------------------------
+%% Virtual host metadata.
+%% -------------------------------------------------------------------
+
+virtual_host_metadata_migration(_FeatureName, _FeatureProps, enable) ->
+ Tab = rabbit_vhost,
+ rabbit_table:wait([Tab], _Retry = true),
+ Fun = fun(Row) -> vhost:upgrade_to(vhost_v2, Row) end,
+ case mnesia:transform_table(Tab, Fun, vhost:fields(vhost_v2)) of
+ {atomic, ok} -> ok;
+ {aborted, Reason} -> {error, Reason}
+ end;
+virtual_host_metadata_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ mnesia:table_info(rabbit_vhost, attributes) =:= vhost:fields(vhost_v2).
+
+%% -------------------------------------------------------------------
+%% Maintenance mode.
+%% -------------------------------------------------------------------
+
+maintenance_mode_status_migration(FeatureName, _FeatureProps, enable) ->
+ TableName = rabbit_maintenance:status_table_name(),
+ rabbit_log:info(
+ "Creating table ~s for feature flag `~s`",
+ [TableName, FeatureName]),
+ try
+ _ = rabbit_table:create(
+ TableName,
+ rabbit_maintenance:status_table_definition()),
+ _ = rabbit_table:ensure_table_copy(TableName, node())
+ catch throw:Reason ->
+ rabbit_log:error(
+ "Failed to create maintenance status table: ~p",
+ [Reason])
+ end;
+maintenance_mode_status_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ rabbit_table:exists(rabbit_maintenance:status_table_name()).
+
+%% -------------------------------------------------------------------
+%% User limits.
+%% -------------------------------------------------------------------
+
+user_limits_migration(_FeatureName, _FeatureProps, enable) ->
+ Tab = rabbit_user,
+ rabbit_table:wait([Tab], _Retry = true),
+ Fun = fun(Row) -> internal_user:upgrade_to(internal_user_v2, Row) end,
+ case mnesia:transform_table(Tab, Fun, internal_user:fields(internal_user_v2)) of
+ {atomic, ok} -> ok;
+ {aborted, Reason} -> {error, Reason}
+ end;
+user_limits_migration(_FeatureName, _FeatureProps, is_enabled) ->
+ mnesia:table_info(rabbit_user, attributes) =:= internal_user:fields(internal_user_v2).
diff --git a/deps/rabbit/src/rabbit_core_metrics_gc.erl b/deps/rabbit/src/rabbit_core_metrics_gc.erl
new file mode 100644
index 0000000000..890c127586
--- /dev/null
+++ b/deps/rabbit/src/rabbit_core_metrics_gc.erl
@@ -0,0 +1,199 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_core_metrics_gc).
+
+-record(state, {timer,
+ interval
+ }).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init(_) ->
+ Interval = rabbit_misc:get_env(rabbit, core_metrics_gc_interval, 120000),
+ {ok, start_timer(#state{interval = Interval})}.
+
+handle_call(test, _From, State) ->
+ {reply, ok, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(start_gc, State) ->
+ gc_connections(),
+ gc_channels(),
+ gc_queues(),
+ gc_exchanges(),
+ gc_nodes(),
+ gc_gen_server2(),
+ gc_auth_attempts(),
+ {noreply, start_timer(State)}.
+
+terminate(_Reason, #state{timer = TRef}) ->
+ erlang:cancel_timer(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+start_timer(#state{interval = Interval} = St) ->
+ TRef = erlang:send_after(Interval, self(), start_gc),
+ St#state{timer = TRef}.
+
+gc_connections() ->
+ gc_process(connection_created),
+ gc_process(connection_metrics),
+ gc_process(connection_coarse_metrics).
+
+gc_channels() ->
+ gc_process(channel_created),
+ gc_process(channel_metrics),
+ gc_process(channel_process_metrics),
+ ok.
+
+gc_queues() ->
+ gc_local_queues(),
+ gc_global_queues().
+
+gc_local_queues() ->
+ Queues = rabbit_amqqueue:list_local_names(),
+ QueuesDown = rabbit_amqqueue:list_local_names_down(),
+ GbSet = gb_sets:from_list(Queues),
+ GbSetDown = gb_sets:from_list(QueuesDown),
+ gc_queue_metrics(GbSet, GbSetDown),
+ gc_entity(queue_coarse_metrics, GbSet),
+ Followers = gb_sets:from_list([amqqueue:get_name(Q) || Q <- rabbit_amqqueue:list_local_followers() ]),
+ gc_leader_data(Followers).
+
+gc_leader_data(Followers) ->
+ ets:foldl(fun({Id, _, _, _, _}, none) ->
+ gc_leader_data(Id, queue_coarse_metrics, Followers)
+ end, none, queue_coarse_metrics).
+
+gc_leader_data(Id, Table, GbSet) ->
+ case gb_sets:is_member(Id, GbSet) of
+ true ->
+ ets:delete(Table, Id),
+ none;
+ false ->
+ none
+ end.
+
+gc_global_queues() ->
+ GbSet = gb_sets:from_list(rabbit_amqqueue:list_names()),
+ gc_process_and_entity(channel_queue_metrics, GbSet),
+ gc_process_and_entity(consumer_created, GbSet),
+ ExchangeGbSet = gb_sets:from_list(rabbit_exchange:list_names()),
+ gc_process_and_entities(channel_queue_exchange_metrics, GbSet, ExchangeGbSet).
+
+gc_exchanges() ->
+ Exchanges = rabbit_exchange:list_names(),
+ GbSet = gb_sets:from_list(Exchanges),
+ gc_process_and_entity(channel_exchange_metrics, GbSet).
+
+gc_nodes() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ GbSet = gb_sets:from_list(Nodes),
+ gc_entity(node_node_metrics, GbSet).
+
+gc_gen_server2() ->
+ gc_process(gen_server2_metrics).
+
+gc_process(Table) ->
+ ets:foldl(fun({Pid = Key, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({Pid = Key, _, _, _, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({Pid = Key, _, _, _}, none) ->
+ gc_process(Pid, Table, Key)
+ end, none, Table).
+
+gc_process(Pid, Table, Key) ->
+ case rabbit_misc:is_process_alive(Pid) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_queue_metrics(GbSet, GbSetDown) ->
+ Table = queue_metrics,
+ ets:foldl(fun({Key, Props, Marker}, none) ->
+ case gb_sets:is_member(Key, GbSet) of
+ true ->
+ case gb_sets:is_member(Key, GbSetDown) of
+ true ->
+ ets:insert(Table, {Key, [{state, down} | lists:keydelete(state, 1, Props)], Marker}),
+ none;
+ false ->
+ none
+ end;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end
+ end, none, Table).
+
+gc_entity(Table, GbSet) ->
+ ets:foldl(fun({{_, Id} = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _, _, _, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet)
+ end, none, Table).
+
+gc_entity(Id, Table, Key, GbSet) ->
+ case gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_process_and_entity(Table, GbSet) ->
+ ets:foldl(fun({{Pid, Id} = Key, _, _, _, _, _, _, _, _}, none)
+ when Table == channel_queue_metrics ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{Pid, Id} = Key, _, _, _, _, _}, none)
+ when Table == channel_exchange_metrics ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{Id, Pid, _} = Key, _, _, _, _, _, _}, none)
+ when Table == consumer_created ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{{Pid, Id}, _} = Key, _, _, _, _}, none) ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet)
+ end, none, Table).
+
+gc_process_and_entity(Id, Pid, Table, Key, GbSet) ->
+ case rabbit_misc:is_process_alive(Pid) andalso gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_process_and_entities(Table, QueueGbSet, ExchangeGbSet) ->
+ ets:foldl(fun({{Pid, {Q, X}} = Key, _, _}, none) ->
+ gc_process(Pid, Table, Key),
+ gc_entity(Q, Table, Key, QueueGbSet),
+ gc_entity(X, Table, Key, ExchangeGbSet)
+ end, none, Table).
+
+gc_auth_attempts() ->
+ ets:delete_all_objects(auth_attempt_detailed_metrics).
diff --git a/deps/rabbit/src/rabbit_credential_validation.erl b/deps/rabbit/src/rabbit_credential_validation.erl
new file mode 100644
index 0000000000..8712628ade
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validation.erl
@@ -0,0 +1,44 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validation).
+
+-include("rabbit.hrl").
+
+%% used for backwards compatibility
+-define(DEFAULT_BACKEND, rabbit_credential_validator_accept_everything).
+
+%%
+%% API
+%%
+
+-export([validate/2, backend/0]).
+
+%% Validates a username/password pair by delegating to the effective
+%% `rabbit_credential_validator`. Used by `rabbit_auth_backend_internal`.
+%% Note that some validators may choose to only validate passwords.
+%%
+%% Possible return values:
+%%
+%% * ok: provided credentials passed validation.
+%% * {error, Error, Args}: provided password password failed validation.
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(Username, Password) ->
+ Backend = backend(),
+ Backend:validate(Username, Password).
+
+-spec backend() -> atom().
+
+backend() ->
+ case application:get_env(rabbit, credential_validator) of
+ undefined ->
+ ?DEFAULT_BACKEND;
+ {ok, Proplist} ->
+ proplists:get_value(validation_backend, Proplist, ?DEFAULT_BACKEND)
+ end.
diff --git a/deps/rabbit/src/rabbit_credential_validator.erl b/deps/rabbit/src/rabbit_credential_validator.erl
new file mode 100644
index 0000000000..3b5d0752bf
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validator).
+
+-include("rabbit.hrl").
+
+%% Validates a password. Used by `rabbit_auth_backend_internal`.
+%%
+%% Possible return values:
+%%
+%% * ok: provided password passed validation.
+%% * {error, Error, Args}: provided password password failed validation.
+
+-callback validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
diff --git a/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl b/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl
new file mode 100644
index 0000000000..fea10fd4b6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator_accept_everything.erl
@@ -0,0 +1,23 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validator_accept_everything).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_credential_validator).
+
+%%
+%% API
+%%
+
+-export([validate/2]).
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(_Username, _Password) ->
+ ok.
diff --git a/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl b/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl
new file mode 100644
index 0000000000..463090127f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator_min_password_length.erl
@@ -0,0 +1,50 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_credential_validator_min_password_length).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_credential_validator).
+
+%% accommodates default (localhost-only) user credentials,
+%% guest/guest
+-define(DEFAULT_MIN_LENGTH, 5).
+
+%%
+%% API
+%%
+
+-export([validate/2]).
+%% for tests
+-export([validate/3]).
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(Username, Password) ->
+ MinLength = case application:get_env(rabbit, credential_validator) of
+ undefined ->
+ ?DEFAULT_MIN_LENGTH;
+ {ok, Proplist} ->
+ case proplists:get_value(min_length, Proplist) of
+ undefined -> ?DEFAULT_MIN_LENGTH;
+ Value -> rabbit_data_coercion:to_integer(Value)
+ end
+ end,
+ validate(Username, Password, MinLength).
+
+
+-spec validate(rabbit_types:username(), rabbit_types:password(), integer()) -> 'ok' | {'error', string(), [any()]}.
+
+%% passwordless users
+validate(_Username, undefined, MinLength) ->
+ {error, rabbit_misc:format("minimum required password length is ~B", [MinLength])};
+validate(_Username, Password, MinLength) ->
+ case size(Password) >= MinLength of
+ true -> ok;
+ false -> {error, rabbit_misc:format("minimum required password length is ~B", [MinLength])}
+ end.
diff --git a/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl b/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl
new file mode 100644
index 0000000000..dc64cf1d31
--- /dev/null
+++ b/deps/rabbit/src/rabbit_credential_validator_password_regexp.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+%% A `rabbit_credential_validator` implementation that matches
+%% password against a pre-configured regular expression.
+-module(rabbit_credential_validator_password_regexp).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_credential_validator).
+
+%%
+%% API
+%%
+
+-export([validate/2]).
+%% for tests
+-export([validate/3]).
+
+-spec validate(rabbit_types:username(), rabbit_types:password()) -> 'ok' | {'error', string()}.
+
+validate(Username, Password) ->
+ {ok, Proplist} = application:get_env(rabbit, credential_validator),
+ Regexp = case proplists:get_value(regexp, Proplist) of
+ undefined -> {error, "rabbit.credential_validator.regexp config key is undefined"};
+ Value -> rabbit_data_coercion:to_list(Value)
+ end,
+ validate(Username, Password, Regexp).
+
+
+-spec validate(rabbit_types:username(), rabbit_types:password(), string()) -> 'ok' | {'error', string(), [any()]}.
+
+validate(_Username, Password, Pattern) ->
+ case re:run(rabbit_data_coercion:to_list(Password), Pattern) of
+ {match, _} -> ok;
+ nomatch -> {error, "provided password does not match the validator regular expression"}
+ end.
diff --git a/deps/rabbit/src/rabbit_dead_letter.erl b/deps/rabbit/src/rabbit_dead_letter.erl
new file mode 100644
index 0000000000..755de5cf53
--- /dev/null
+++ b/deps/rabbit/src/rabbit_dead_letter.erl
@@ -0,0 +1,253 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_dead_letter).
+
+-export([publish/5]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+%%----------------------------------------------------------------------------
+
+-type reason() :: 'expired' | 'rejected' | 'maxlen' | delivery_limit.
+
+%%----------------------------------------------------------------------------
+
+-spec publish(rabbit_types:message(), reason(), rabbit_types:exchange(),
+ 'undefined' | binary(), rabbit_amqqueue:name()) -> 'ok'.
+publish(Msg, Reason, X, RK, QName) ->
+ DLMsg = make_msg(Msg, Reason, X#exchange.name, RK, QName),
+ Delivery = rabbit_basic:delivery(false, false, DLMsg, undefined),
+ {Queues, Cycles} = detect_cycles(Reason, DLMsg,
+ rabbit_exchange:route(X, Delivery)),
+ lists:foreach(fun log_cycle_once/1, Cycles),
+ _ = rabbit_queue_type:deliver(rabbit_amqqueue:lookup(Queues),
+ Delivery, stateless),
+ ok.
+
+make_msg(Msg = #basic_message{content = Content,
+ exchange_name = Exchange,
+ routing_keys = RoutingKeys},
+ Reason, DLX, RK, #resource{name = QName}) ->
+ {DeathRoutingKeys, HeadersFun1} =
+ case RK of
+ undefined -> {RoutingKeys, fun (H) -> H end};
+ _ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end}
+ end,
+ ReasonBin = list_to_binary(atom_to_list(Reason)),
+ TimeSec = os:system_time(seconds),
+ PerMsgTTL = per_msg_ttl_header(Content#content.properties),
+ HeadersFun2 =
+ fun (Headers) ->
+ %% The first routing key is the one specified in the
+ %% basic.publish; all others are CC or BCC keys.
+ RKs = [hd(RoutingKeys) | rabbit_basic:header_routes(Headers)],
+ RKs1 = [{longstr, Key} || Key <- RKs],
+ Info = [{<<"reason">>, longstr, ReasonBin},
+ {<<"queue">>, longstr, QName},
+ {<<"time">>, timestamp, TimeSec},
+ {<<"exchange">>, longstr, Exchange#resource.name},
+ {<<"routing-keys">>, array, RKs1}] ++ PerMsgTTL,
+ HeadersFun1(update_x_death_header(Info, Headers))
+ end,
+ Content1 = #content{properties = Props} =
+ rabbit_basic:map_headers(HeadersFun2, Content),
+ Content2 = Content1#content{properties =
+ Props#'P_basic'{expiration = undefined}},
+ Msg#basic_message{exchange_name = DLX,
+ id = rabbit_guid:gen(),
+ routing_keys = DeathRoutingKeys,
+ content = Content2}.
+
+
+x_death_event_key(Info, Key) ->
+ case lists:keysearch(Key, 1, Info) of
+ false -> undefined;
+ {value, {Key, _KeyType, Val}} -> Val
+ end.
+
+maybe_append_to_event_group(Table, _Key, _SeenKeys, []) ->
+ [Table];
+maybe_append_to_event_group(Table, {_Queue, _Reason} = Key, SeenKeys, Acc) ->
+ case sets:is_element(Key, SeenKeys) of
+ true -> Acc;
+ false -> [Table | Acc]
+ end.
+
+group_by_queue_and_reason([]) ->
+ [];
+group_by_queue_and_reason([Table]) ->
+ [Table];
+group_by_queue_and_reason(Tables) ->
+ {_, Grouped} =
+ lists:foldl(
+ fun ({table, Info}, {SeenKeys, Acc}) ->
+ Q = x_death_event_key(Info, <<"queue">>),
+ R = x_death_event_key(Info, <<"reason">>),
+ Matcher = queue_and_reason_matcher(Q, R),
+ {Matches, _} = lists:partition(Matcher, Tables),
+ {Augmented, N} = case Matches of
+ [X] -> {X, 1};
+ [X|_] = Xs -> {X, length(Xs)}
+ end,
+ Key = {Q, R},
+ Acc1 = maybe_append_to_event_group(
+ ensure_xdeath_event_count(Augmented, N),
+ Key, SeenKeys, Acc),
+ {sets:add_element(Key, SeenKeys), Acc1}
+ end, {sets:new(), []}, Tables),
+ Grouped.
+
+update_x_death_header(Info, undefined) ->
+ update_x_death_header(Info, []);
+update_x_death_header(Info, Headers) ->
+ X = x_death_event_key(Info, <<"exchange">>),
+ Q = x_death_event_key(Info, <<"queue">>),
+ R = x_death_event_key(Info, <<"reason">>),
+ case rabbit_basic:header(<<"x-death">>, Headers) of
+ undefined ->
+ %% First x-death event gets its own top-level headers.
+ %% See rabbitmq/rabbitmq-server#1332.
+ Headers2 = rabbit_misc:set_table_value(Headers, <<"x-first-death-reason">>,
+ longstr, R),
+ Headers3 = rabbit_misc:set_table_value(Headers2, <<"x-first-death-queue">>,
+ longstr, Q),
+ Headers4 = rabbit_misc:set_table_value(Headers3, <<"x-first-death-exchange">>,
+ longstr, X),
+ rabbit_basic:prepend_table_header(
+ <<"x-death">>,
+ [{<<"count">>, long, 1} | Info], Headers4);
+ {<<"x-death">>, array, Tables} ->
+ %% group existing x-death headers in case we have some from
+ %% before rabbitmq-server#78
+ GroupedTables = group_by_queue_and_reason(Tables),
+ {Matches, Others} = lists:partition(
+ queue_and_reason_matcher(Q, R),
+ GroupedTables),
+ Info1 = case Matches of
+ [] ->
+ [{<<"count">>, long, 1} | Info];
+ [{table, M}] ->
+ increment_xdeath_event_count(M)
+ end,
+ rabbit_misc:set_table_value(
+ Headers, <<"x-death">>, array,
+ [{table, rabbit_misc:sort_field_table(Info1)} | Others]);
+ {<<"x-death">>, InvalidType, Header} ->
+ rabbit_log:warning("Message has invalid x-death header (type: ~p)."
+ " Resetting header ~p~n",
+ [InvalidType, Header]),
+ %% if x-death is something other than an array (list)
+ %% then we reset it: this happens when some clients consume
+ %% a message and re-publish is, converting header values
+ %% to strings, intentionally or not.
+ %% See rabbitmq/rabbitmq-server#767 for details.
+ rabbit_misc:set_table_value(
+ Headers, <<"x-death">>, array,
+ [{table, [{<<"count">>, long, 1} | Info]}])
+ end.
+
+ensure_xdeath_event_count({table, Info}, InitialVal) when InitialVal >= 1 ->
+ {table, ensure_xdeath_event_count(Info, InitialVal)};
+ensure_xdeath_event_count(Info, InitialVal) when InitialVal >= 1 ->
+ case x_death_event_key(Info, <<"count">>) of
+ undefined ->
+ [{<<"count">>, long, InitialVal} | Info];
+ _ ->
+ Info
+ end.
+
+increment_xdeath_event_count(Info) ->
+ case x_death_event_key(Info, <<"count">>) of
+ undefined ->
+ [{<<"count">>, long, 1} | Info];
+ N ->
+ lists:keyreplace(
+ <<"count">>, 1, Info,
+ {<<"count">>, long, N + 1})
+ end.
+
+queue_and_reason_matcher(Q, R) ->
+ F = fun(Info) ->
+ x_death_event_key(Info, <<"queue">>) =:= Q
+ andalso x_death_event_key(Info, <<"reason">>) =:= R
+ end,
+ fun({table, Info}) ->
+ F(Info);
+ (Info) when is_list(Info) ->
+ F(Info)
+ end.
+
+per_msg_ttl_header(#'P_basic'{expiration = undefined}) ->
+ [];
+per_msg_ttl_header(#'P_basic'{expiration = Expiration}) ->
+ [{<<"original-expiration">>, longstr, Expiration}];
+per_msg_ttl_header(_) ->
+ [].
+
+detect_cycles(rejected, _Msg, Queues) ->
+ {Queues, []};
+
+detect_cycles(_Reason, #basic_message{content = Content}, Queues) ->
+ #content{properties = #'P_basic'{headers = Headers}} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ NoCycles = {Queues, []},
+ case Headers of
+ undefined ->
+ NoCycles;
+ _ ->
+ case rabbit_misc:table_lookup(Headers, <<"x-death">>) of
+ {array, Deaths} ->
+ {Cycling, NotCycling} =
+ lists:partition(fun (#resource{name = Queue}) ->
+ is_cycle(Queue, Deaths)
+ end, Queues),
+ OldQueues = [rabbit_misc:table_lookup(D, <<"queue">>) ||
+ {table, D} <- Deaths],
+ OldQueues1 = [QName || {longstr, QName} <- OldQueues],
+ {NotCycling, [[QName | OldQueues1] ||
+ #resource{name = QName} <- Cycling]};
+ _ ->
+ NoCycles
+ end
+ end.
+
+is_cycle(Queue, Deaths) ->
+ {Cycle, Rest} =
+ lists:splitwith(
+ fun ({table, D}) ->
+ {longstr, Queue} =/= rabbit_misc:table_lookup(D, <<"queue">>);
+ (_) ->
+ true
+ end, Deaths),
+ %% Is there a cycle, and if so, is it "fully automatic", i.e. with
+ %% no reject in it?
+ case Rest of
+ [] -> false;
+ [H|_] -> lists:all(
+ fun ({table, D}) ->
+ {longstr, <<"rejected">>} =/=
+ rabbit_misc:table_lookup(D, <<"reason">>);
+ (_) ->
+ %% There was something we didn't expect, therefore
+ %% a client must have put it there, therefore the
+ %% cycle was not "fully automatic".
+ false
+ end, Cycle ++ [H])
+ end.
+
+log_cycle_once(Queues) ->
+ Key = {queue_cycle, Queues},
+ case get(Key) of
+ true -> ok;
+ undefined -> rabbit_log:warning(
+ "Message dropped. Dead-letter queues cycle detected" ++
+ ": ~p~nThis cycle will NOT be reported again.~n",
+ [Queues]),
+ put(Key, true)
+ end.
diff --git a/deps/rabbit/src/rabbit_definitions.erl b/deps/rabbit/src/rabbit_definitions.erl
new file mode 100644
index 0000000000..0d0212dbae
--- /dev/null
+++ b/deps/rabbit/src/rabbit_definitions.erl
@@ -0,0 +1,767 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_definitions).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([boot/0]).
+%% automatic import on boot
+-export([maybe_load_definitions/0, maybe_load_definitions/2, maybe_load_definitions_from/2,
+ has_configured_definitions_to_load/0]).
+%% import
+-export([import_raw/1, import_raw/2, import_parsed/1, import_parsed/2,
+ apply_defs/2, apply_defs/3, apply_defs/4, apply_defs/5]).
+
+-export([all_definitions/0]).
+-export([
+ list_users/0, list_vhosts/0, list_permissions/0, list_topic_permissions/0,
+ list_runtime_parameters/0, list_global_runtime_parameters/0, list_policies/0,
+ list_exchanges/0, list_queues/0, list_bindings/0,
+ is_internal_parameter/1
+]).
+-export([decode/1, decode/2, args/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+%%
+%% API
+%%
+
+-type definition_category() :: 'users' |
+ 'vhosts' |
+ 'permissions' |
+ 'topic_permissions' |
+ 'parameters' |
+ 'global_parameters' |
+ 'policies' |
+ 'queues' |
+ 'bindings' |
+ 'exchanges'.
+
+-type definition_object() :: #{binary() => any()}.
+-type definition_list() :: [definition_object()].
+
+-type definitions() :: #{
+ definition_category() => definition_list()
+}.
+
+-export_type([definition_object/0, definition_list/0, definition_category/0, definitions/0]).
+
+-define(IMPORT_WORK_POOL, definition_import_pool).
+
+boot() ->
+ PoolSize = application:get_env(rabbit, definition_import_work_pool_size, rabbit_runtime:guess_number_of_cpu_cores()),
+ rabbit_sup:start_supervisor_child(definition_import_pool_sup, worker_pool_sup, [PoolSize, ?IMPORT_WORK_POOL]).
+
+maybe_load_definitions() ->
+ %% Note that management.load_definitions is handled in the plugin for backwards compatibility.
+ %% This executes the "core" version of load_definitions.
+ maybe_load_definitions(rabbit, load_definitions).
+
+-spec import_raw(Body :: binary() | iolist()) -> ok | {error, term()}.
+import_raw(Body) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ case decode([], Body) of
+ {error, E} -> {error, E};
+ {ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER)
+ end.
+
+-spec import_raw(Body :: binary() | iolist(), VHost :: vhost:name()) -> ok | {error, term()}.
+import_raw(Body, VHost) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ case decode([], Body) of
+ {error, E} -> {error, E};
+ {ok, _, Map} -> apply_defs(Map, ?INTERNAL_USER, fun() -> ok end, VHost)
+ end.
+
+-spec import_parsed(Defs :: #{any() => any()} | list()) -> ok | {error, term()}.
+import_parsed(Body0) when is_list(Body0) ->
+ import_parsed(maps:from_list(Body0));
+import_parsed(Body0) when is_map(Body0) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ Body = atomise_map_keys(Body0),
+ apply_defs(Body, ?INTERNAL_USER).
+
+-spec import_parsed(Defs :: #{any() => any() | list()}, VHost :: vhost:name()) -> ok | {error, term()}.
+import_parsed(Body0, VHost) when is_list(Body0) ->
+ import_parsed(maps:from_list(Body0), VHost);
+import_parsed(Body0, VHost) ->
+ rabbit_log:info("Asked to import definitions. Acting user: ~s", [?INTERNAL_USER]),
+ Body = atomise_map_keys(Body0),
+ apply_defs(Body, ?INTERNAL_USER, fun() -> ok end, VHost).
+
+-spec all_definitions() -> map().
+all_definitions() ->
+ Xs = list_exchanges(),
+ Qs = list_queues(),
+ Bs = list_bindings(),
+
+ Users = list_users(),
+ VHosts = list_vhosts(),
+ Params = list_runtime_parameters(),
+ GParams = list_global_runtime_parameters(),
+ Pols = list_policies(),
+
+ Perms = list_permissions(),
+ TPerms = list_topic_permissions(),
+
+ {ok, Vsn} = application:get_key(rabbit, vsn),
+ #{
+ rabbit_version => rabbit_data_coercion:to_binary(Vsn),
+ rabbitmq_version => rabbit_data_coercion:to_binary(Vsn),
+ users => Users,
+ vhosts => VHosts,
+ permissions => Perms,
+ topic_permissions => TPerms,
+ parameters => Params,
+ global_parameters => GParams,
+ policies => Pols,
+ queues => Qs,
+ bindings => Bs,
+ exchanges => Xs
+ }.
+
+%%
+%% Implementation
+%%
+
+-spec has_configured_definitions_to_load() -> boolean().
+has_configured_definitions_to_load() ->
+ case application:get_env(rabbit, load_definitions) of
+ undefined -> false;
+ {ok, none} -> false;
+ {ok, _Path} -> true
+ end.
+
+maybe_load_definitions(App, Key) ->
+ case application:get_env(App, Key) of
+ undefined ->
+ rabbit_log:debug("No definition file configured to import via load_definitions"),
+ ok;
+ {ok, none} ->
+ rabbit_log:debug("No definition file configured to import via load_definitions"),
+ ok;
+ {ok, FileOrDir} ->
+ rabbit_log:debug("Will import definitions file from load_definitions"),
+ IsDir = filelib:is_dir(FileOrDir),
+ maybe_load_definitions_from(IsDir, FileOrDir)
+ end.
+
+maybe_load_definitions_from(true, Dir) ->
+ rabbit_log:info("Applying definitions from directory ~s", [Dir]),
+ load_definitions_from_files(file:list_dir(Dir), Dir);
+maybe_load_definitions_from(false, File) ->
+ load_definitions_from_file(File).
+
+load_definitions_from_files({ok, Filenames0}, Dir) ->
+ Filenames1 = lists:sort(Filenames0),
+ Filenames2 = [filename:join(Dir, F) || F <- Filenames1],
+ load_definitions_from_filenames(Filenames2);
+load_definitions_from_files({error, E}, Dir) ->
+ rabbit_log:error("Could not read definitions from directory ~s, Error: ~p", [Dir, E]),
+ {error, {could_not_read_defs, E}}.
+
+load_definitions_from_filenames([]) ->
+ ok;
+load_definitions_from_filenames([File|Rest]) ->
+ case load_definitions_from_file(File) of
+ ok -> load_definitions_from_filenames(Rest);
+ {error, E} -> {error, {failed_to_import_definitions, File, E}}
+ end.
+
+load_definitions_from_file(File) ->
+ case file:read_file(File) of
+ {ok, Body} ->
+ rabbit_log:info("Applying definitions from file at '~s'", [File]),
+ import_raw(Body);
+ {error, E} ->
+ rabbit_log:error("Could not read definitions from file at '~s', error: ~p", [File, E]),
+ {error, {could_not_read_defs, {File, E}}}
+ end.
+
+decode(Keys, Body) ->
+ case decode(Body) of
+ {ok, J0} ->
+ J = maps:fold(fun(K, V, Acc) ->
+ Acc#{rabbit_data_coercion:to_atom(K, utf8) => V}
+ end, J0, J0),
+ Results = [get_or_missing(K, J) || K <- Keys],
+ case [E || E = {key_missing, _} <- Results] of
+ [] -> {ok, Results, J};
+ Errors -> {error, Errors}
+ end;
+ Else -> Else
+ end.
+
+decode(<<"">>) ->
+ {ok, #{}};
+decode(Body) ->
+ try
+ Decoded = rabbit_json:decode(Body),
+ Normalised = atomise_map_keys(Decoded),
+ {ok, Normalised}
+ catch error:_ -> {error, not_json}
+ end.
+
+atomise_map_keys(Decoded) ->
+ maps:fold(fun(K, V, Acc) ->
+ Acc#{rabbit_data_coercion:to_atom(K, utf8) => V}
+ end, Decoded, Decoded).
+
+-spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser) ->
+ apply_defs(Map, ActingUser, fun () -> ok end).
+
+-spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok')) -> 'ok' | {error, term()};
+ (Map :: #{atom() => any()}, ActingUser :: rabbit_types:username(),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser, VHost) when is_binary(VHost) ->
+ apply_defs(Map, ActingUser, fun () -> ok end, VHost);
+
+apply_defs(Map, ActingUser, SuccessFun) when is_function(SuccessFun) ->
+ Version = maps:get(rabbitmq_version, Map, maps:get(rabbit_version, Map, undefined)),
+ try
+ concurrent_for_all(users, ActingUser, Map,
+ fun(User, _Username) ->
+ rabbit_auth_backend_internal:put_user(User, Version, ActingUser)
+ end),
+ concurrent_for_all(vhosts, ActingUser, Map, fun add_vhost/2),
+ validate_limits(Map),
+ concurrent_for_all(permissions, ActingUser, Map, fun add_permission/2),
+ concurrent_for_all(topic_permissions, ActingUser, Map, fun add_topic_permission/2),
+ sequential_for_all(parameters, ActingUser, Map, fun add_parameter/2),
+ sequential_for_all(global_parameters, ActingUser, Map, fun add_global_parameter/2),
+ %% importing policies concurrently can be unsafe as queues will be getting
+ %% potentially out of order notifications of applicable policy changes
+ sequential_for_all(policies, ActingUser, Map, fun add_policy/2),
+ concurrent_for_all(queues, ActingUser, Map, fun add_queue/2),
+ concurrent_for_all(exchanges, ActingUser, Map, fun add_exchange/2),
+ concurrent_for_all(bindings, ActingUser, Map, fun add_binding/2),
+ SuccessFun(),
+ ok
+ catch {error, E} -> {error, E};
+ exit:E -> {error, E}
+ end.
+
+-spec apply_defs(Map :: #{atom() => any()},
+ ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok'),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser, SuccessFun, VHost) when is_binary(VHost) ->
+ rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~p, acting user: ~p",
+ [VHost, ActingUser]),
+ try
+ validate_limits(Map, VHost),
+ sequential_for_all(parameters, ActingUser, Map, VHost, fun add_parameter/3),
+ %% importing policies concurrently can be unsafe as queues will be getting
+ %% potentially out of order notifications of applicable policy changes
+ sequential_for_all(policies, ActingUser, Map, VHost, fun add_policy/3),
+ concurrent_for_all(queues, ActingUser, Map, VHost, fun add_queue/3),
+ concurrent_for_all(exchanges, ActingUser, Map, VHost, fun add_exchange/3),
+ concurrent_for_all(bindings, ActingUser, Map, VHost, fun add_binding/3),
+ SuccessFun()
+ catch {error, E} -> {error, format(E)};
+ exit:E -> {error, format(E)}
+ end.
+
+-spec apply_defs(Map :: #{atom() => any()},
+ ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok'),
+ ErrorFun :: fun((any()) -> 'ok'),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Map, ActingUser, SuccessFun, ErrorFun, VHost) ->
+ rabbit_log:info("Asked to import definitions for a virtual host. Virtual host: ~p, acting user: ~p",
+ [VHost, ActingUser]),
+ try
+ validate_limits(Map, VHost),
+ sequential_for_all(parameters, ActingUser, Map, VHost, fun add_parameter/3),
+ %% importing policies concurrently can be unsafe as queues will be getting
+ %% potentially out of order notifications of applicable policy changes
+ sequential_for_all(policies, ActingUser, Map, VHost, fun add_policy/3),
+ concurrent_for_all(queues, ActingUser, Map, VHost, fun add_queue/3),
+ concurrent_for_all(exchanges, ActingUser, Map, VHost, fun add_exchange/3),
+ concurrent_for_all(bindings, ActingUser, Map, VHost, fun add_binding/3),
+ SuccessFun()
+ catch {error, E} -> ErrorFun(format(E));
+ exit:E -> ErrorFun(format(E))
+ end.
+
+sequential_for_all(Category, ActingUser, Definitions, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Category), Definitions, undefined) of
+ undefined -> ok;
+ List ->
+ case length(List) of
+ 0 -> ok;
+ N -> rabbit_log:info("Importing sequentially ~p ~s...", [N, human_readable_category_name(Category)])
+ end,
+ [begin
+ %% keys are expected to be atoms
+ Fun(atomize_keys(M), ActingUser)
+ end || M <- List, is_map(M)]
+ end.
+
+sequential_for_all(Name, ActingUser, Definitions, VHost, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Name), Definitions, undefined) of
+ undefined -> ok;
+ List -> [Fun(VHost, atomize_keys(M), ActingUser) || M <- List, is_map(M)]
+ end.
+
+concurrent_for_all(Category, ActingUser, Definitions, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Category), Definitions, undefined) of
+ undefined -> ok;
+ List ->
+ case length(List) of
+ 0 -> ok;
+ N -> rabbit_log:info("Importing concurrently ~p ~s...", [N, human_readable_category_name(Category)])
+ end,
+ WorkPoolFun = fun(M) ->
+ Fun(atomize_keys(M), ActingUser)
+ end,
+ do_concurrent_for_all(List, WorkPoolFun)
+ end.
+
+concurrent_for_all(Name, ActingUser, Definitions, VHost, Fun) ->
+ case maps:get(rabbit_data_coercion:to_atom(Name), Definitions, undefined) of
+ undefined -> ok;
+ List ->
+ WorkPoolFun = fun(M) ->
+ Fun(VHost, atomize_keys(M), ActingUser)
+ end,
+ do_concurrent_for_all(List, WorkPoolFun)
+ end.
+
+do_concurrent_for_all(List, WorkPoolFun) ->
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ %% keys are expected to be atoms
+ ok = gatherer:fork(Gatherer),
+ worker_pool:submit_async(
+ ?IMPORT_WORK_POOL,
+ fun() ->
+ try
+ WorkPoolFun(M)
+ catch {error, E} -> gatherer:in(Gatherer, {error, E});
+ _:E -> gatherer:in(Gatherer, {error, E})
+ end,
+ gatherer:finish(Gatherer)
+ end)
+ end || M <- List, is_map(M)],
+ case gatherer:out(Gatherer) of
+ empty ->
+ ok = gatherer:stop(Gatherer);
+ {value, {error, E}} ->
+ ok = gatherer:stop(Gatherer),
+ throw({error, E})
+ end.
+
+-spec atomize_keys(#{any() => any()}) -> #{atom() => any()}.
+
+atomize_keys(M) ->
+ maps:fold(fun(K, V, Acc) ->
+ maps:put(rabbit_data_coercion:to_atom(K), V, Acc)
+ end, #{}, M).
+
+-spec human_readable_category_name(definition_category()) -> string().
+
+human_readable_category_name(topic_permissions) -> "topic permissions";
+human_readable_category_name(parameters) -> "runtime parameters";
+human_readable_category_name(global_parameters) -> "global runtime parameters";
+human_readable_category_name(Other) -> rabbit_data_coercion:to_list(Other).
+
+
+format(#amqp_error{name = Name, explanation = Explanation}) ->
+ rabbit_data_coercion:to_binary(rabbit_misc:format("~s: ~s", [Name, Explanation]));
+format({no_such_vhost, undefined}) ->
+ rabbit_data_coercion:to_binary(
+ "Virtual host does not exist and is not specified in definitions file.");
+format({no_such_vhost, VHost}) ->
+ rabbit_data_coercion:to_binary(
+ rabbit_misc:format("Please create virtual host \"~s\" prior to importing definitions.",
+ [VHost]));
+format({vhost_limit_exceeded, ErrMsg}) ->
+ rabbit_data_coercion:to_binary(ErrMsg);
+format(E) ->
+ rabbit_data_coercion:to_binary(rabbit_misc:format("~p", [E])).
+
+add_parameter(Param, Username) ->
+ VHost = maps:get(vhost, Param, undefined),
+ add_parameter(VHost, Param, Username).
+
+add_parameter(VHost, Param, Username) ->
+ Comp = maps:get(component, Param, undefined),
+ Key = maps:get(name, Param, undefined),
+ Term = maps:get(value, Param, undefined),
+ Result = case is_map(Term) of
+ true ->
+ %% coerce maps to proplists for backwards compatibility.
+ %% See rabbitmq-management#528.
+ TermProplist = rabbit_data_coercion:to_proplist(Term),
+ rabbit_runtime_parameters:set(VHost, Comp, Key, TermProplist, Username);
+ _ ->
+ rabbit_runtime_parameters:set(VHost, Comp, Key, Term, Username)
+ end,
+ case Result of
+ ok -> ok;
+ {error_string, E} ->
+ S = rabbit_misc:format(" (~s/~s/~s)", [VHost, Comp, Key]),
+ exit(rabbit_data_coercion:to_binary(rabbit_misc:escape_html_tags(E ++ S)))
+ end.
+
+add_global_parameter(Param, Username) ->
+ Key = maps:get(name, Param, undefined),
+ Term = maps:get(value, Param, undefined),
+ case is_map(Term) of
+ true ->
+ %% coerce maps to proplists for backwards compatibility.
+ %% See rabbitmq-management#528.
+ TermProplist = rabbit_data_coercion:to_proplist(Term),
+ rabbit_runtime_parameters:set_global(Key, TermProplist, Username);
+ _ ->
+ rabbit_runtime_parameters:set_global(Key, Term, Username)
+ end.
+
+add_policy(Param, Username) ->
+ VHost = maps:get(vhost, Param, undefined),
+ add_policy(VHost, Param, Username).
+
+add_policy(VHost, Param, Username) ->
+ Key = maps:get(name, Param, undefined),
+ case rabbit_policy:set(
+ VHost, Key, maps:get(pattern, Param, undefined),
+ case maps:get(definition, Param, undefined) of
+ undefined -> undefined;
+ Def -> rabbit_data_coercion:to_proplist(Def)
+ end,
+ maps:get(priority, Param, undefined),
+ maps:get('apply-to', Param, <<"all">>),
+ Username) of
+ ok -> ok;
+ {error_string, E} -> S = rabbit_misc:format(" (~s/~s)", [VHost, Key]),
+ exit(rabbit_data_coercion:to_binary(rabbit_misc:escape_html_tags(E ++ S)))
+ end.
+
+-spec add_vhost(map(), rabbit_types:username()) -> ok.
+
+add_vhost(VHost, ActingUser) ->
+ VHostName = maps:get(name, VHost, undefined),
+ VHostTrace = maps:get(tracing, VHost, undefined),
+ VHostDefinition = maps:get(definition, VHost, undefined),
+ VHostTags = maps:get(tags, VHost, undefined),
+ rabbit_vhost:put_vhost(VHostName, VHostDefinition, VHostTags, VHostTrace, ActingUser).
+
+add_permission(Permission, ActingUser) ->
+ rabbit_auth_backend_internal:set_permissions(maps:get(user, Permission, undefined),
+ maps:get(vhost, Permission, undefined),
+ maps:get(configure, Permission, undefined),
+ maps:get(write, Permission, undefined),
+ maps:get(read, Permission, undefined),
+ ActingUser).
+
+add_topic_permission(TopicPermission, ActingUser) ->
+ rabbit_auth_backend_internal:set_topic_permissions(
+ maps:get(user, TopicPermission, undefined),
+ maps:get(vhost, TopicPermission, undefined),
+ maps:get(exchange, TopicPermission, undefined),
+ maps:get(write, TopicPermission, undefined),
+ maps:get(read, TopicPermission, undefined),
+ ActingUser).
+
+add_queue(Queue, ActingUser) ->
+ add_queue_int(Queue, r(queue, Queue), ActingUser).
+
+add_queue(VHost, Queue, ActingUser) ->
+ add_queue_int(Queue, rv(VHost, queue, Queue), ActingUser).
+
+add_queue_int(_Queue, R = #resource{kind = queue,
+ name = <<"amq.", _/binary>>}, ActingUser) ->
+ Name = R#resource.name,
+ rabbit_log:warning("Skipping import of a queue whose name begins with 'amq.', "
+ "name: ~s, acting user: ~s", [Name, ActingUser]);
+add_queue_int(Queue, Name, ActingUser) ->
+ rabbit_amqqueue:declare(Name,
+ maps:get(durable, Queue, undefined),
+ maps:get(auto_delete, Queue, undefined),
+ args(maps:get(arguments, Queue, undefined)),
+ none,
+ ActingUser).
+
+add_exchange(Exchange, ActingUser) ->
+ add_exchange_int(Exchange, r(exchange, Exchange), ActingUser).
+
+add_exchange(VHost, Exchange, ActingUser) ->
+ add_exchange_int(Exchange, rv(VHost, exchange, Exchange), ActingUser).
+
+add_exchange_int(_Exchange, #resource{kind = exchange, name = <<"">>}, ActingUser) ->
+ rabbit_log:warning("Not importing the default exchange, acting user: ~s", [ActingUser]);
+add_exchange_int(_Exchange, R = #resource{kind = exchange,
+ name = <<"amq.", _/binary>>}, ActingUser) ->
+ Name = R#resource.name,
+ rabbit_log:warning("Skipping import of an exchange whose name begins with 'amq.', "
+ "name: ~s, acting user: ~s", [Name, ActingUser]);
+add_exchange_int(Exchange, Name, ActingUser) ->
+ Internal = case maps:get(internal, Exchange, undefined) of
+ undefined -> false; %% =< 2.2.0
+ I -> I
+ end,
+ rabbit_exchange:declare(Name,
+ rabbit_exchange:check_type(maps:get(type, Exchange, undefined)),
+ maps:get(durable, Exchange, undefined),
+ maps:get(auto_delete, Exchange, undefined),
+ Internal,
+ args(maps:get(arguments, Exchange, undefined)),
+ ActingUser).
+
+add_binding(Binding, ActingUser) ->
+ DestType = dest_type(Binding),
+ add_binding_int(Binding, r(exchange, source, Binding),
+ r(DestType, destination, Binding), ActingUser).
+
+add_binding(VHost, Binding, ActingUser) ->
+ DestType = dest_type(Binding),
+ add_binding_int(Binding, rv(VHost, exchange, source, Binding),
+ rv(VHost, DestType, destination, Binding), ActingUser).
+
+add_binding_int(Binding, Source, Destination, ActingUser) ->
+ rabbit_binding:add(
+ #binding{source = Source,
+ destination = Destination,
+ key = maps:get(routing_key, Binding, undefined),
+ args = args(maps:get(arguments, Binding, undefined))},
+ ActingUser).
+
+dest_type(Binding) ->
+ rabbit_data_coercion:to_atom(maps:get(destination_type, Binding, undefined)).
+
+r(Type, Props) -> r(Type, name, Props).
+
+r(Type, Name, Props) ->
+ rabbit_misc:r(maps:get(vhost, Props, undefined), Type, maps:get(Name, Props, undefined)).
+
+rv(VHost, Type, Props) -> rv(VHost, Type, name, Props).
+
+rv(VHost, Type, Name, Props) ->
+ rabbit_misc:r(VHost, Type, maps:get(Name, Props, undefined)).
+
+%%--------------------------------------------------------------------
+
+validate_limits(All) ->
+ case maps:get(queues, All, undefined) of
+ undefined -> ok;
+ Queues0 ->
+ {ok, VHostMap} = filter_out_existing_queues(Queues0),
+ maps:fold(fun validate_vhost_limit/3, ok, VHostMap)
+ end.
+
+validate_limits(All, VHost) ->
+ case maps:get(queues, All, undefined) of
+ undefined -> ok;
+ Queues0 ->
+ Queues1 = filter_out_existing_queues(VHost, Queues0),
+ AddCount = length(Queues1),
+ validate_vhost_limit(VHost, AddCount, ok)
+ end.
+
+filter_out_existing_queues(Queues) ->
+ build_filtered_map(Queues, maps:new()).
+
+filter_out_existing_queues(VHost, Queues) ->
+ Pred = fun(Queue) ->
+ Rec = rv(VHost, queue, <<"name">>, Queue),
+ case rabbit_amqqueue:lookup(Rec) of
+ {ok, _} -> false;
+ {error, not_found} -> true
+ end
+ end,
+ lists:filter(Pred, Queues).
+
+build_queue_data(Queue) ->
+ VHost = maps:get(<<"vhost">>, Queue, undefined),
+ Rec = rv(VHost, queue, <<"name">>, Queue),
+ {Rec, VHost}.
+
+build_filtered_map([], AccMap) ->
+ {ok, AccMap};
+build_filtered_map([Queue|Rest], AccMap0) ->
+ {Rec, VHost} = build_queue_data(Queue),
+ case rabbit_amqqueue:lookup(Rec) of
+ {error, not_found} ->
+ AccMap1 = maps:update_with(VHost, fun(V) -> V + 1 end, 1, AccMap0),
+ build_filtered_map(Rest, AccMap1);
+ {ok, _} ->
+ build_filtered_map(Rest, AccMap0)
+ end.
+
+validate_vhost_limit(VHost, AddCount, ok) ->
+ WouldExceed = rabbit_vhost_limit:would_exceed_queue_limit(AddCount, VHost),
+ validate_vhost_queue_limit(VHost, AddCount, WouldExceed).
+
+validate_vhost_queue_limit(_VHost, 0, _) ->
+ % Note: not adding any new queues so the upload
+ % must be update-only
+ ok;
+validate_vhost_queue_limit(_VHost, _AddCount, false) ->
+ % Note: would not exceed queue limit
+ ok;
+validate_vhost_queue_limit(VHost, AddCount, {true, Limit, QueueCount}) ->
+ ErrFmt = "Adding ~B queue(s) to virtual host \"~s\" would exceed the limit of ~B queue(s).~n~nThis virtual host currently has ~B queue(s) defined.~n~nImport aborted!",
+ ErrInfo = [AddCount, VHost, Limit, QueueCount],
+ ErrMsg = rabbit_misc:format(ErrFmt, ErrInfo),
+ exit({vhost_limit_exceeded, ErrMsg}).
+
+get_or_missing(K, L) ->
+ case maps:get(K, L, undefined) of
+ undefined -> {key_missing, K};
+ V -> V
+ end.
+
+args([]) -> args(#{});
+args(L) -> rabbit_misc:to_amqp_table(L).
+
+%%
+%% Export
+%%
+
+list_exchanges() ->
+ %% exclude internal exchanges, they are not meant to be declared or used by
+ %% applications
+ [exchange_definition(X) || X <- lists:filter(fun(#exchange{internal = true}) -> false;
+ (#exchange{name = #resource{name = <<>>}}) -> false;
+ (X) -> not rabbit_exchange:is_amq_prefixed(X)
+ end,
+ rabbit_exchange:list())].
+
+exchange_definition(#exchange{name = #resource{virtual_host = VHost, name = Name},
+ type = Type,
+ durable = Durable, auto_delete = AD, arguments = Args}) ->
+ #{<<"vhost">> => VHost,
+ <<"name">> => Name,
+ <<"type">> => Type,
+ <<"durable">> => Durable,
+ <<"auto_delete">> => AD,
+ <<"arguments">> => rabbit_misc:amqp_table(Args)}.
+
+list_queues() ->
+ %% exclude exclusive queues, they cannot be restored
+ [queue_definition(Q) || Q <- lists:filter(fun(Q0) ->
+ amqqueue:get_exclusive_owner(Q0) =:= none
+ end,
+ rabbit_amqqueue:list())].
+
+queue_definition(Q) ->
+ #resource{virtual_host = VHost, name = Name} = amqqueue:get_name(Q),
+ Type = case amqqueue:get_type(Q) of
+ rabbit_classic_queue -> classic;
+ rabbit_quorum_queue -> quorum;
+ rabbit_stream_queue -> stream;
+ T -> T
+ end,
+ #{
+ <<"vhost">> => VHost,
+ <<"name">> => Name,
+ <<"type">> => Type,
+ <<"durable">> => amqqueue:is_durable(Q),
+ <<"auto_delete">> => amqqueue:is_auto_delete(Q),
+ <<"arguments">> => rabbit_misc:amqp_table(amqqueue:get_arguments(Q))
+ }.
+
+list_bindings() ->
+ [binding_definition(B) || B <- rabbit_binding:list_explicit()].
+
+binding_definition(#binding{source = S,
+ key = RoutingKey,
+ destination = D,
+ args = Args}) ->
+ #{
+ <<"source">> => S#resource.name,
+ <<"vhost">> => S#resource.virtual_host,
+ <<"destination">> => D#resource.name,
+ <<"destination_type">> => D#resource.kind,
+ <<"routing_key">> => RoutingKey,
+ <<"arguments">> => rabbit_misc:amqp_table(Args)
+ }.
+
+list_vhosts() ->
+ [vhost_definition(V) || V <- rabbit_vhost:all()].
+
+vhost_definition(VHost) ->
+ #{
+ <<"name">> => vhost:get_name(VHost),
+ <<"limits">> => vhost:get_limits(VHost),
+ <<"metadata">> => vhost:get_metadata(VHost)
+ }.
+
+list_users() ->
+ [user_definition(U) || U <- rabbit_auth_backend_internal:all_users()].
+
+user_definition(User) ->
+ #{<<"name">> => internal_user:get_username(User),
+ <<"password_hash">> => base64:encode(internal_user:get_password_hash(User)),
+ <<"hashing_algorithm">> => rabbit_auth_backend_internal:hashing_module_for_user(User),
+ <<"tags">> => tags_as_binaries(internal_user:get_tags(User)),
+ <<"limits">> => internal_user:get_limits(User)
+ }.
+
+list_runtime_parameters() ->
+ [runtime_parameter_definition(P) || P <- rabbit_runtime_parameters:list(), is_list(P)].
+
+runtime_parameter_definition(Param) ->
+ #{
+ <<"vhost">> => pget(vhost, Param),
+ <<"component">> => pget(component, Param),
+ <<"name">> => pget(name, Param),
+ <<"value">> => maps:from_list(pget(value, Param))
+ }.
+
+list_global_runtime_parameters() ->
+ [global_runtime_parameter_definition(P) || P <- rabbit_runtime_parameters:list_global(), not is_internal_parameter(P)].
+
+global_runtime_parameter_definition(P0) ->
+ P = [{rabbit_data_coercion:to_binary(K), V} || {K, V} <- P0],
+ maps:from_list(P).
+
+-define(INTERNAL_GLOBAL_PARAM_PREFIX, "internal").
+
+is_internal_parameter(Param) ->
+ Name = rabbit_data_coercion:to_list(pget(name, Param)),
+ %% if global parameter name starts with an "internal", consider it to be internal
+ %% and exclude it from definition export
+ string:left(Name, length(?INTERNAL_GLOBAL_PARAM_PREFIX)) =:= ?INTERNAL_GLOBAL_PARAM_PREFIX.
+
+list_policies() ->
+ [policy_definition(P) || P <- rabbit_policy:list()].
+
+policy_definition(Policy) ->
+ #{
+ <<"vhost">> => pget(vhost, Policy),
+ <<"name">> => pget(name, Policy),
+ <<"pattern">> => pget(pattern, Policy),
+ <<"apply-to">> => pget('apply-to', Policy),
+ <<"priority">> => pget(priority, Policy),
+ <<"definition">> => maps:from_list(pget(definition, Policy))
+ }.
+
+list_permissions() ->
+ [permission_definition(P) || P <- rabbit_auth_backend_internal:list_permissions()].
+
+permission_definition(P0) ->
+ P = [{rabbit_data_coercion:to_binary(K), V} || {K, V} <- P0],
+ maps:from_list(P).
+
+list_topic_permissions() ->
+ [topic_permission_definition(P) || P <- rabbit_auth_backend_internal:list_topic_permissions()].
+
+topic_permission_definition(P0) ->
+ P = [{rabbit_data_coercion:to_binary(K), V} || {K, V} <- P0],
+ maps:from_list(P).
+
+tags_as_binaries(Tags) ->
+ list_to_binary(string:join([atom_to_list(T) || T <- Tags], ",")).
diff --git a/deps/rabbit/src/rabbit_diagnostics.erl b/deps/rabbit/src/rabbit_diagnostics.erl
new file mode 100644
index 0000000000..999596cdc9
--- /dev/null
+++ b/deps/rabbit/src/rabbit_diagnostics.erl
@@ -0,0 +1,119 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_diagnostics).
+
+-define(PROCESS_INFO,
+ [registered_name, current_stacktrace, initial_call, message_queue_len,
+ links, monitors, monitored_by, heap_size]).
+
+-export([maybe_stuck/0, maybe_stuck/1, top_memory_use/0, top_memory_use/1,
+ top_binary_refs/0, top_binary_refs/1]).
+
+maybe_stuck() -> maybe_stuck(5000).
+
+maybe_stuck(Timeout) ->
+ Pids = processes(),
+ io:format("~s There are ~p processes.~n", [get_time(), length(Pids)]),
+ maybe_stuck(Pids, Timeout).
+
+maybe_stuck(Pids, Timeout) when Timeout =< 0 ->
+ io:format("~s Found ~p suspicious processes.~n", [get_time(), length(Pids)]),
+ [io:format("~s ~p~n", [get_time(), info(Pid)]) || Pid <- Pids],
+ ok;
+maybe_stuck(Pids, Timeout) ->
+ Pids2 = [P || P <- Pids, looks_stuck(P)],
+ io:format("~s Investigated ~p processes this round, ~pms to go.~n",
+ [get_time(), length(Pids2), Timeout]),
+ timer:sleep(500),
+ maybe_stuck(Pids2, Timeout - 500).
+
+looks_stuck(Pid) ->
+ case info(Pid, status, gone) of
+ {status, waiting} ->
+ %% It's tempting to just check for message_queue_len > 0
+ %% here rather than mess around with stack traces and
+ %% heuristics. But really, sometimes freshly stuck
+ %% processes can have 0 messages...
+ case info(Pid, current_stacktrace, gone) of
+ {current_stacktrace, [H|_]} ->
+ maybe_stuck_stacktrace(H);
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end.
+
+maybe_stuck_stacktrace({gen_server2, process_next_msg, _}) -> false;
+maybe_stuck_stacktrace({gen_event, fetch_msg, _}) -> false;
+maybe_stuck_stacktrace({prim_inet, accept0, _}) -> false;
+maybe_stuck_stacktrace({prim_inet, recv0, _}) -> false;
+maybe_stuck_stacktrace({rabbit_heartbeat, heartbeater, _}) -> false;
+maybe_stuck_stacktrace({rabbit_net, recv, _}) -> false;
+maybe_stuck_stacktrace({group, _, _}) -> false;
+maybe_stuck_stacktrace({shell, _, _}) -> false;
+maybe_stuck_stacktrace({io, _, _}) -> false;
+maybe_stuck_stacktrace({M, F, A, _}) ->
+ maybe_stuck_stacktrace({M, F, A});
+maybe_stuck_stacktrace({_M, F, _A}) ->
+ case string:str(atom_to_list(F), "loop") of
+ 0 -> true;
+ _ -> false
+ end.
+
+top_memory_use() -> top_memory_use(30).
+
+top_memory_use(Count) ->
+ Pids = processes(),
+ io:format("~s Memory use: top ~p of ~p processes.~n", [get_time(), Count, length(Pids)]),
+ Procs = [{info(Pid, memory, 0), info(Pid)} || Pid <- Pids],
+ Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
+ io:format("~s ~p~n", [get_time(), Sorted]).
+
+top_binary_refs() -> top_binary_refs(30).
+
+top_binary_refs(Count) ->
+ Pids = processes(),
+ io:format("~s Binary refs: top ~p of ~p processes.~n", [get_time(), Count, length(Pids)]),
+ Procs = [{{binary_refs, binary_refs(Pid)}, info(Pid)} || Pid <- Pids],
+ Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
+ io:format("~s ~p~n", [get_time(), Sorted]).
+
+binary_refs(Pid) ->
+ case info(Pid, binary, []) of
+ {binary, Refs} ->
+ lists:sum([Sz || {_Ptr, Sz} <- lists:usort([{Ptr, Sz} ||
+ {Ptr, Sz, _Cnt} <- Refs])]);
+ _ -> 0
+ end.
+
+info(Pid) ->
+ [{pid, Pid} | info(Pid, ?PROCESS_INFO, [])].
+
+info(Pid, Infos, Default) ->
+ try
+ process_info(Pid, Infos)
+ catch
+ _:_ -> case is_atom(Infos) of
+ true -> {Infos, Default};
+ false -> Default
+ end
+ end.
+
+get_time() ->
+ {{Y,M,D}, {H,Min,Sec}} = calendar:local_time(),
+ [ integer_to_list(Y), "-",
+ prefix_zero(integer_to_list(M)), "-",
+ prefix_zero(integer_to_list(D)), " ",
+ prefix_zero(integer_to_list(H)), ":",
+ prefix_zero(integer_to_list(Min)), ":",
+ prefix_zero(integer_to_list(Sec))
+ ].
+
+prefix_zero([C]) -> [$0, C];
+prefix_zero([_,_] = Full) -> Full.
diff --git a/deps/rabbit/src/rabbit_direct.erl b/deps/rabbit/src/rabbit_direct.erl
new file mode 100644
index 0000000000..3fc2d75908
--- /dev/null
+++ b/deps/rabbit/src/rabbit_direct.erl
@@ -0,0 +1,235 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_direct).
+
+-export([boot/0, force_event_refresh/1, list/0, connect/5,
+ start_channel/10, disconnect/2]).
+
+-deprecated([{force_event_refresh, 1, eventually}]).
+
+%% Internal
+-export([list_local/0]).
+
+%% For testing only
+-export([extract_extra_auth_props/4]).
+
+-include("rabbit.hrl").
+-include("rabbit_misc.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec boot() -> 'ok'.
+
+boot() -> rabbit_sup:start_supervisor_child(
+ rabbit_direct_client_sup, rabbit_client_sup,
+ [{local, rabbit_direct_client_sup},
+ {rabbit_channel_sup, start_link, []}]).
+
+-spec force_event_refresh(reference()) -> 'ok'.
+
+force_event_refresh(Ref) ->
+ [Pid ! {force_event_refresh, Ref} || Pid <- list()],
+ ok.
+
+-spec list_local() -> [pid()].
+
+list_local() ->
+ pg_local:get_members(rabbit_direct).
+
+-spec list() -> [pid()].
+
+list() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_direct, list_local, [], ?RPC_TIMEOUT).
+
+%%----------------------------------------------------------------------------
+
+auth_fun({none, _}, _VHost, _ExtraAuthProps) ->
+ fun () -> {ok, rabbit_auth_backend_dummy:user()} end;
+
+auth_fun({Username, none}, _VHost, _ExtraAuthProps) ->
+ fun () -> rabbit_access_control:check_user_login(Username, []) end;
+
+auth_fun({Username, Password}, VHost, ExtraAuthProps) ->
+ fun () ->
+ rabbit_access_control:check_user_login(
+ Username,
+ [{password, Password}, {vhost, VHost}] ++ ExtraAuthProps)
+ end.
+
+-spec connect
+ (({'none', 'none'} | {rabbit_types:username(), 'none'} |
+ {rabbit_types:username(), rabbit_types:password()}),
+ rabbit_types:vhost(), rabbit_types:protocol(), pid(),
+ rabbit_event:event_props()) ->
+ rabbit_types:ok_or_error2(
+ {rabbit_types:user(), rabbit_framing:amqp_table()},
+ 'broker_not_found_on_node' |
+ {'auth_failure', string()} | 'access_refused').
+
+connect(Creds, VHost, Protocol, Pid, Infos) ->
+ ExtraAuthProps = extract_extra_auth_props(Creds, VHost, Pid, Infos),
+ AuthFun = auth_fun(Creds, VHost, ExtraAuthProps),
+ case rabbit:is_running() of
+ true ->
+ case whereis(rabbit_direct_client_sup) of
+ undefined ->
+ {error, broker_is_booting};
+ _ ->
+ case is_over_vhost_connection_limit(VHost, Creds, Pid) of
+ true ->
+ {error, not_allowed};
+ false ->
+ case is_vhost_alive(VHost, Creds, Pid) of
+ false ->
+ {error, {internal_error, vhost_is_down}};
+ true ->
+ case AuthFun() of
+ {ok, User = #user{username = Username}} ->
+ notify_auth_result(Username,
+ user_authentication_success, []),
+ connect1(User, VHost, Protocol, Pid, Infos);
+ {refused, Username, Msg, Args} ->
+ notify_auth_result(Username,
+ user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}]),
+ {error, {auth_failure, "Refused"}}
+ end %% AuthFun()
+ end %% is_vhost_alive
+ end %% is_over_vhost_connection_limit
+ end;
+ false -> {error, broker_not_found_on_node}
+ end.
+
+extract_extra_auth_props(Creds, VHost, Pid, Infos) ->
+ case extract_protocol(Infos) of
+ undefined ->
+ [];
+ Protocol ->
+ maybe_call_connection_info_module(Protocol, Creds, VHost, Pid, Infos)
+ end.
+
+extract_protocol(Infos) ->
+ case proplists:get_value(protocol, Infos, undefined) of
+ {Protocol, _Version} ->
+ Protocol;
+ _ ->
+ undefined
+ end.
+
+maybe_call_connection_info_module(Protocol, Creds, VHost, Pid, Infos) ->
+ Module = rabbit_data_coercion:to_atom(string:to_lower(
+ "rabbit_" ++
+ lists:flatten(string:replace(rabbit_data_coercion:to_list(Protocol), " ", "_", all)) ++
+ "_connection_info")
+ ),
+ Args = [Creds, VHost, Pid, Infos],
+ code_server_cache:maybe_call_mfa(Module, additional_authn_params, Args, []).
+
+is_vhost_alive(VHost, {Username, _Password}, Pid) ->
+ PrintedUsername = case Username of
+ none -> "";
+ _ -> Username
+ end,
+ case rabbit_vhost_sup_sup:is_vhost_alive(VHost) of
+ true -> true;
+ false ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "access to vhost '~s' refused for user '~s': "
+ "vhost '~s' is down",
+ [Pid, VHost, PrintedUsername, VHost]),
+ false
+ end.
+
+is_over_vhost_connection_limit(VHost, {Username, _Password}, Pid) ->
+ PrintedUsername = case Username of
+ none -> "";
+ _ -> Username
+ end,
+ try rabbit_vhost_limit:is_over_connection_limit(VHost) of
+ false -> false;
+ {true, Limit} ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "access to vhost '~s' refused for user '~s': "
+ "vhost connection limit (~p) is reached",
+ [Pid, VHost, PrintedUsername, Limit]),
+ true
+ catch
+ throw:{error, {no_such_vhost, VHost}} ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "vhost ~s not found", [Pid, VHost]),
+ true
+ end.
+
+notify_auth_result(Username, AuthResult, ExtraProps) ->
+ EventProps = [{connection_type, direct},
+ {name, case Username of none -> ''; _ -> Username end}] ++
+ ExtraProps,
+ rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
+connect1(User = #user{username = Username}, VHost, Protocol, Pid, Infos) ->
+ case rabbit_auth_backend_internal:is_over_connection_limit(Username) of
+ false ->
+ % Note: peer_host can be either a tuple or
+ % a binary if reverse_dns_lookups is enabled
+ PeerHost = proplists:get_value(peer_host, Infos),
+ AuthzContext = proplists:get_value(variable_map, Infos, #{}),
+ try rabbit_access_control:check_vhost_access(User, VHost,
+ {ip, PeerHost}, AuthzContext) of
+ ok -> ok = pg_local:join(rabbit_direct, Pid),
+ rabbit_core_metrics:connection_created(Pid, Infos),
+ rabbit_event:notify(connection_created, Infos),
+ {ok, {User, rabbit_reader:server_properties(Protocol)}}
+ catch
+ exit:#amqp_error{name = Reason = not_allowed} ->
+ {error, Reason}
+ end;
+ {true, Limit} ->
+ rabbit_log_connection:error(
+ "Error on Direct connection ~p~n"
+ "access refused for user '~s': "
+ "user connection limit (~p) is reached",
+ [Pid, Username, Limit]),
+ {error, not_allowed}
+ end.
+
+-spec start_channel
+ (rabbit_channel:channel_number(), pid(), pid(), string(),
+ rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
+ rabbit_framing:amqp_table(), pid(), any()) ->
+ {'ok', pid()}.
+
+start_channel(Number, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User = #user{username = Username}, VHost, Capabilities,
+ Collector, AmqpParams) ->
+ case rabbit_auth_backend_internal:is_over_channel_limit(Username) of
+ false ->
+ {ok, _, {ChannelPid, _}} =
+ supervisor2:start_child(
+ rabbit_direct_client_sup,
+ [{direct, Number, ClientChannelPid, ConnPid, ConnName, Protocol,
+ User, VHost, Capabilities, Collector, AmqpParams}]),
+ {ok, ChannelPid};
+ {true, Limit} ->
+ rabbit_log_connection:error(
+ "Error on direct connection ~p~n"
+ "number of channels opened for user '~s' has reached the "
+ "maximum allowed limit of (~w)",
+ [ConnPid, Username, Limit]),
+ {error, not_allowed}
+ end.
+
+-spec disconnect(pid(), rabbit_event:event_props()) -> 'ok'.
+
+disconnect(Pid, Infos) ->
+ pg_local:leave(rabbit_direct, Pid),
+ rabbit_core_metrics:connection_closed(Pid),
+ rabbit_event:notify(connection_closed, Infos).
diff --git a/deps/rabbit/src/rabbit_disk_monitor.erl b/deps/rabbit/src/rabbit_disk_monitor.erl
new file mode 100644
index 0000000000..8277794098
--- /dev/null
+++ b/deps/rabbit/src/rabbit_disk_monitor.erl
@@ -0,0 +1,317 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_disk_monitor).
+
+%% Disk monitoring server. Monitors free disk space
+%% periodically and sets alarms when it is below a certain
+%% watermark (configurable either as an absolute value or
+%% relative to the memory limit).
+%%
+%% Disk monitoring is done by shelling out to /usr/bin/df
+%% instead of related built-in OTP functions because currently
+%% this is the most reliable way of determining free disk space
+%% for the partition our internal database is on.
+%%
+%% Update interval is dynamically calculated assuming disk
+%% space is being filled at FAST_RATE.
+
+-behaviour(gen_server).
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([get_disk_free_limit/0, set_disk_free_limit/1,
+ get_min_check_interval/0, set_min_check_interval/1,
+ get_max_check_interval/0, set_max_check_interval/1,
+ get_disk_free/0, set_enabled/1]).
+
+-define(SERVER, ?MODULE).
+-define(DEFAULT_MIN_DISK_CHECK_INTERVAL, 100).
+-define(DEFAULT_MAX_DISK_CHECK_INTERVAL, 10000).
+-define(DEFAULT_DISK_FREE_LIMIT, 50000000).
+%% 250MB/s i.e. 250kB/ms
+-define(FAST_RATE, (250 * 1000)).
+
+-record(state, {
+ %% monitor partition on which this directory resides
+ dir,
+ %% configured limit in bytes
+ limit,
+ %% last known free disk space amount in bytes
+ actual,
+ %% minimum check interval
+ min_interval,
+ %% maximum check interval
+ max_interval,
+ %% timer that drives periodic checks
+ timer,
+ %% is free disk space alarm currently in effect?
+ alarmed,
+ %% is monitoring enabled? false on unsupported
+ %% platforms
+ enabled,
+ %% number of retries to enable monitoring if it fails
+ %% on start-up
+ retries,
+ %% Interval between retries
+ interval
+}).
+
+%%----------------------------------------------------------------------------
+
+-type disk_free_limit() :: (integer() | string() | {'mem_relative', float() | integer()}).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+-spec get_disk_free_limit() -> integer().
+
+get_disk_free_limit() ->
+ gen_server:call(?MODULE, get_disk_free_limit, infinity).
+
+-spec set_disk_free_limit(disk_free_limit()) -> 'ok'.
+
+set_disk_free_limit(Limit) ->
+ gen_server:call(?MODULE, {set_disk_free_limit, Limit}, infinity).
+
+-spec get_min_check_interval() -> integer().
+
+get_min_check_interval() ->
+ gen_server:call(?MODULE, get_min_check_interval, infinity).
+
+-spec set_min_check_interval(integer()) -> 'ok'.
+
+set_min_check_interval(Interval) ->
+ gen_server:call(?MODULE, {set_min_check_interval, Interval}, infinity).
+
+-spec get_max_check_interval() -> integer().
+
+get_max_check_interval() ->
+ gen_server:call(?MODULE, get_max_check_interval, infinity).
+
+-spec set_max_check_interval(integer()) -> 'ok'.
+
+set_max_check_interval(Interval) ->
+ gen_server:call(?MODULE, {set_max_check_interval, Interval}, infinity).
+
+-spec get_disk_free() -> (integer() | 'unknown').
+-spec set_enabled(string()) -> 'ok'.
+
+get_disk_free() ->
+ gen_server:call(?MODULE, get_disk_free, infinity).
+
+set_enabled(Enabled) ->
+ gen_server:call(?MODULE, {set_enabled, Enabled}, infinity).
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+-spec start_link(disk_free_limit()) -> rabbit_types:ok_pid_or_error().
+
+start_link(Args) ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [Args], []).
+
+init([Limit]) ->
+ Dir = dir(),
+ {ok, Retries} = application:get_env(rabbit, disk_monitor_failure_retries),
+ {ok, Interval} = application:get_env(rabbit, disk_monitor_failure_retry_interval),
+ State = #state{dir = Dir,
+ min_interval = ?DEFAULT_MIN_DISK_CHECK_INTERVAL,
+ max_interval = ?DEFAULT_MAX_DISK_CHECK_INTERVAL,
+ alarmed = false,
+ enabled = true,
+ limit = Limit,
+ retries = Retries,
+ interval = Interval},
+ {ok, enable(State)}.
+
+handle_call(get_disk_free_limit, _From, State = #state{limit = Limit}) ->
+ {reply, Limit, State};
+
+handle_call({set_disk_free_limit, _}, _From, #state{enabled = false} = State) ->
+ rabbit_log:info("Cannot set disk free limit: "
+ "disabled disk free space monitoring", []),
+ {reply, ok, State};
+
+handle_call({set_disk_free_limit, Limit}, _From, State) ->
+ {reply, ok, set_disk_limits(State, Limit)};
+
+handle_call(get_min_check_interval, _From, State) ->
+ {reply, State#state.min_interval, State};
+
+handle_call(get_max_check_interval, _From, State) ->
+ {reply, State#state.max_interval, State};
+
+handle_call({set_min_check_interval, MinInterval}, _From, State) ->
+ {reply, ok, State#state{min_interval = MinInterval}};
+
+handle_call({set_max_check_interval, MaxInterval}, _From, State) ->
+ {reply, ok, State#state{max_interval = MaxInterval}};
+
+handle_call(get_disk_free, _From, State = #state { actual = Actual }) ->
+ {reply, Actual, State};
+
+handle_call({set_enabled, _Enabled = true}, _From, State) ->
+ start_timer(set_disk_limits(State, State#state.limit)),
+ rabbit_log:info("Free disk space monitor was enabled"),
+ {reply, ok, State#state{enabled = true}};
+handle_call({set_enabled, _Enabled = false}, _From, State) ->
+ erlang:cancel_timer(State#state.timer),
+ rabbit_log:info("Free disk space monitor was manually disabled"),
+ {reply, ok, State#state{enabled = false}};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(try_enable, #state{retries = Retries} = State) ->
+ {noreply, enable(State#state{retries = Retries - 1})};
+handle_info(update, State) ->
+ {noreply, start_timer(internal_update(State))};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Server Internals
+%%----------------------------------------------------------------------------
+
+% the partition / drive containing this directory will be monitored
+dir() -> rabbit_mnesia:dir().
+
+set_disk_limits(State, Limit0) ->
+ Limit = interpret_limit(Limit0),
+ State1 = State#state { limit = Limit },
+ rabbit_log:info("Disk free limit set to ~pMB~n",
+ [trunc(Limit / 1000000)]),
+ internal_update(State1).
+
+internal_update(State = #state { limit = Limit,
+ dir = Dir,
+ alarmed = Alarmed}) ->
+ CurrentFree = get_disk_free(Dir),
+ NewAlarmed = CurrentFree < Limit,
+ case {Alarmed, NewAlarmed} of
+ {false, true} ->
+ emit_update_info("insufficient", CurrentFree, Limit),
+ rabbit_alarm:set_alarm({{resource_limit, disk, node()}, []});
+ {true, false} ->
+ emit_update_info("sufficient", CurrentFree, Limit),
+ rabbit_alarm:clear_alarm({resource_limit, disk, node()});
+ _ ->
+ ok
+ end,
+ State #state {alarmed = NewAlarmed, actual = CurrentFree}.
+
+get_disk_free(Dir) ->
+ get_disk_free(Dir, os:type()).
+
+get_disk_free(Dir, {unix, Sun})
+ when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris ->
+ Df = os:find_executable("df"),
+ parse_free_unix(rabbit_misc:os_cmd(Df ++ " -k " ++ Dir));
+get_disk_free(Dir, {unix, _}) ->
+ Df = os:find_executable("df"),
+ parse_free_unix(rabbit_misc:os_cmd(Df ++ " -kP " ++ Dir));
+get_disk_free(Dir, {win32, _}) ->
+ %% On Windows, the Win32 API enforces a limit of 260 characters
+ %% (MAX_PATH). If we call `dir` with a path longer than that, it
+ %% fails with "File not found". Starting with Windows 10 version
+ %% 1607, this limit was removed, but the administrator has to
+ %% configure that.
+ %%
+ %% NTFS supports paths up to 32767 characters. Therefore, paths
+ %% longer than 260 characters exist but they are "inaccessible" to
+ %% `dir`.
+ %%
+ %% A workaround is to tell the Win32 API to not parse a path and
+ %% just pass it raw to the underlying filesystem. To do this, the
+ %% path must be prepended with "\\?\". That's what we do here.
+ %%
+ %% However, the underlying filesystem may not support forward
+ %% slashes transparently, as the Win32 API does. Therefore, we
+ %% convert all forward slashes to backslashes.
+ %%
+ %% See the following page to learn more about this:
+ %% https://ss64.com/nt/syntax-filenames.html
+ RawDir = "\\\\?\\" ++ string:replace(Dir, "/", "\\", all),
+ parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ RawDir ++ "\"")).
+
+parse_free_unix(Str) ->
+ case string:tokens(Str, "\n") of
+ [_, S | _] -> case string:tokens(S, " \t") of
+ [_, _, _, Free | _] -> list_to_integer(Free) * 1024;
+ _ -> exit({unparseable, Str})
+ end;
+ _ -> exit({unparseable, Str})
+ end.
+
+parse_free_win32(CommandResult) ->
+ LastLine = lists:last(string:tokens(CommandResult, "\r\n")),
+ {match, [Free]} = re:run(lists:reverse(LastLine), "(\\d+)",
+ [{capture, all_but_first, list}]),
+ list_to_integer(lists:reverse(Free)).
+
+interpret_limit({mem_relative, Relative})
+ when is_number(Relative) ->
+ round(Relative * vm_memory_monitor:get_total_memory());
+interpret_limit(Absolute) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
+ {ok, ParsedAbsolute} -> ParsedAbsolute;
+ {error, parse_error} ->
+ rabbit_log:error("Unable to parse disk_free_limit value ~p",
+ [Absolute]),
+ ?DEFAULT_DISK_FREE_LIMIT
+ end.
+
+emit_update_info(StateStr, CurrentFree, Limit) ->
+ rabbit_log:info(
+ "Free disk space is ~s. Free bytes: ~p. Limit: ~p~n",
+ [StateStr, CurrentFree, Limit]).
+
+start_timer(State) ->
+ State#state{timer = erlang:send_after(interval(State), self(), update)}.
+
+interval(#state{alarmed = true,
+ max_interval = MaxInterval}) ->
+ MaxInterval;
+interval(#state{limit = Limit,
+ actual = Actual,
+ min_interval = MinInterval,
+ max_interval = MaxInterval}) ->
+ IdealInterval = 2 * (Actual - Limit) / ?FAST_RATE,
+ trunc(erlang:max(MinInterval, erlang:min(MaxInterval, IdealInterval))).
+
+enable(#state{retries = 0} = State) ->
+ State;
+enable(#state{dir = Dir, interval = Interval, limit = Limit, retries = Retries}
+ = State) ->
+ case {catch get_disk_free(Dir),
+ vm_memory_monitor:get_total_memory()} of
+ {N1, N2} when is_integer(N1), is_integer(N2) ->
+ rabbit_log:info("Enabling free disk space monitoring~n", []),
+ start_timer(set_disk_limits(State, Limit));
+ Err ->
+ rabbit_log:info("Free disk space monitor encountered an error "
+ "(e.g. failed to parse output from OS tools): ~p, retries left: ~b~n",
+ [Err, Retries]),
+ erlang:send_after(Interval, self(), try_enable),
+ State#state{enabled = false}
+ end.
diff --git a/deps/rabbit/src/rabbit_epmd_monitor.erl b/deps/rabbit/src/rabbit_epmd_monitor.erl
new file mode 100644
index 0000000000..938826dba6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_epmd_monitor.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_epmd_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-record(state, {timer, mod, me, host, port}).
+
+-define(SERVER, ?MODULE).
+-define(CHECK_FREQUENCY, 60000).
+
+%%----------------------------------------------------------------------------
+%% It's possible for epmd to be killed out from underneath us. If that
+%% happens, then obviously clustering and rabbitmqctl stop
+%% working. This process checks up on epmd and restarts it /
+%% re-registers us with it if it has gone away.
+%%
+%% How could epmd be killed?
+%%
+%% 1) The most popular way for this to happen is when running as a
+%% Windows service. The user starts rabbitmqctl first, and this starts
+%% epmd under the user's account. When they log out epmd is killed.
+%%
+%% 2) Some packagings of (non-RabbitMQ?) Erlang apps might do "killall
+%% epmd" as a shutdown or uninstall step.
+%% ----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+ {Me, Host} = rabbit_nodes:parts(node()),
+ Mod = net_kernel:epmd_module(),
+ {ok, Port} = handle_port_please(init, Mod:port_please(Me, Host), Me, undefined),
+ State = #state{mod = Mod, me = Me, host = Host, port = Port},
+ {ok, ensure_timer(State)}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(check, State0) ->
+ {ok, State1} = check_epmd(State0),
+ {noreply, ensure_timer(State1#state{timer = undefined})};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(check, State0) ->
+ {ok, State1} = check_epmd(State0),
+ {noreply, ensure_timer(State1#state{timer = undefined})};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+ensure_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.timer, ?CHECK_FREQUENCY, check).
+
+check_epmd(State = #state{mod = Mod,
+ me = Me,
+ host = Host,
+ port = Port0}) ->
+ rabbit_log:debug("Asked to [re-]register this node (~s@~s) with epmd...", [Me, Host]),
+ {ok, Port1} = handle_port_please(check, Mod:port_please(Me, Host), Me, Port0),
+ rabbit_nodes:ensure_epmd(),
+ Mod:register_node(Me, Port1),
+ rabbit_log:debug("[Re-]registered this node (~s@~s) with epmd at port ~p", [Me, Host, Port1]),
+ {ok, State#state{port = Port1}}.
+
+handle_port_please(init, noport, Me, Port) ->
+ rabbit_log:info("epmd does not know us, re-registering as ~s~n", [Me]),
+ {ok, Port};
+handle_port_please(check, noport, Me, Port) ->
+ rabbit_log:warning("epmd does not know us, re-registering ~s at port ~b~n", [Me, Port]),
+ {ok, Port};
+handle_port_please(_, closed, _Me, Port) ->
+ rabbit_log:error("epmd monitor failed to retrieve our port from epmd: closed"),
+ {ok, Port};
+handle_port_please(init, {port, NewPort, _Version}, _Me, _Port) ->
+ rabbit_log:info("epmd monitor knows us, inter-node communication (distribution) port: ~p", [NewPort]),
+ {ok, NewPort};
+handle_port_please(check, {port, NewPort, _Version}, _Me, _Port) ->
+ {ok, NewPort};
+handle_port_please(_, {error, Error}, _Me, Port) ->
+ rabbit_log:error("epmd monitor failed to retrieve our port from epmd: ~p", [Error]),
+ {ok, Port}.
diff --git a/deps/rabbit/src/rabbit_event_consumer.erl b/deps/rabbit/src/rabbit_event_consumer.erl
new file mode 100644
index 0000000000..489d39312e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_event_consumer.erl
@@ -0,0 +1,197 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_event_consumer).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([register/4]).
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {pid, ref, monitor, pattern}).
+
+%%----------------------------------------------------------------------------
+
+register(Pid, Ref, Duration, Pattern) ->
+ case gen_event:add_handler(rabbit_event, ?MODULE, [Pid, Ref, Duration, Pattern]) of
+ ok ->
+ {ok, Ref};
+ Error ->
+ Error
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Ref, Duration, Pattern]) ->
+ MRef = erlang:monitor(process, Pid),
+ case Duration of
+ infinity -> infinity;
+ _ -> erlang:send_after(Duration * 1000, self(), rabbit_event_consumer_timeout)
+ end,
+ {ok, #state{pid = Pid, ref = Ref, monitor = MRef, pattern = Pattern}}.
+
+handle_call(_Request, State) -> {ok, not_understood, State}.
+
+handle_event(#event{type = Type,
+ props = Props,
+ timestamp = TS,
+ reference = none}, #state{pid = Pid,
+ ref = Ref,
+ pattern = Pattern} = State) ->
+ case key(Type) of
+ ignore -> ok;
+ Key -> case re:run(Key, Pattern, [{capture, none}]) of
+ match ->
+ Data = [{'event', Key}] ++
+ fmt_proplist([{'timestamp_in_ms', TS} | Props]),
+ Pid ! {Ref, Data, confinue};
+ _ ->
+ ok
+ end
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info({'DOWN', MRef, _, _, _}, #state{monitor = MRef}) ->
+ remove_handler;
+handle_info(rabbit_event_consumer_timeout, #state{pid = Pid, ref = Ref}) ->
+ Pid ! {Ref, <<>>, finished},
+ remove_handler;
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, #state{monitor = MRef}) ->
+ erlang:demonitor(MRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+%% pattern matching is way more efficient that the string operations,
+%% let's use all the keys we're aware of to speed up the handler.
+%% Any unknown or new one will be processed as before (see last function clause).
+key(queue_deleted) ->
+ <<"queue.deleted">>;
+key(queue_created) ->
+ <<"queue.created">>;
+key(exchange_created) ->
+ <<"exchange.created">>;
+key(exchange_deleted) ->
+ <<"exchange.deleted">>;
+key(binding_created) ->
+ <<"binding.created">>;
+key(connection_created) ->
+ <<"connection.created">>;
+key(connection_closed) ->
+ <<"connection.closed">>;
+key(channel_created) ->
+ <<"channel.created">>;
+key(channel_closed) ->
+ <<"channel.closed">>;
+key(consumer_created) ->
+ <<"consumer.created">>;
+key(consumer_deleted) ->
+ <<"consumer.deleted">>;
+key(queue_stats) ->
+ ignore;
+key(connection_stats) ->
+ ignore;
+key(policy_set) ->
+ <<"policy.set">>;
+key(policy_cleared) ->
+ <<"policy.cleared">>;
+key(parameter_set) ->
+ <<"parameter.set">>;
+key(parameter_cleared) ->
+ <<"parameter.cleared">>;
+key(vhost_created) ->
+ <<"vhost.created">>;
+key(vhost_deleted) ->
+ <<"vhost.deleted">>;
+key(vhost_limits_set) ->
+ <<"vhost.limits.set">>;
+key(vhost_limits_cleared) ->
+ <<"vhost.limits.cleared">>;
+key(user_authentication_success) ->
+ <<"user.authentication.success">>;
+key(user_authentication_failure) ->
+ <<"user.authentication.failure">>;
+key(user_created) ->
+ <<"user.created">>;
+key(user_deleted) ->
+ <<"user.deleted">>;
+key(user_password_changed) ->
+ <<"user.password.changed">>;
+key(user_password_cleared) ->
+ <<"user.password.cleared">>;
+key(user_tags_set) ->
+ <<"user.tags.set">>;
+key(permission_created) ->
+ <<"permission.created">>;
+key(permission_deleted) ->
+ <<"permission.deleted">>;
+key(topic_permission_created) ->
+ <<"topic.permission.created">>;
+key(topic_permission_deleted) ->
+ <<"topic.permission.deleted">>;
+key(alarm_set) ->
+ <<"alarm.set">>;
+key(alarm_cleared) ->
+ <<"alarm.cleared">>;
+key(shovel_worker_status) ->
+ <<"shovel.worker.status">>;
+key(shovel_worker_removed) ->
+ <<"shovel.worker.removed">>;
+key(federation_link_status) ->
+ <<"federation.link.status">>;
+key(federation_link_removed) ->
+ <<"federation.link.removed">>;
+key(S) ->
+ case string:tokens(atom_to_list(S), "_") of
+ [_, "stats"] -> ignore;
+ Tokens -> list_to_binary(string:join(Tokens, "."))
+ end.
+
+fmt_proplist(Props) ->
+ lists:foldl(fun({K, V}, Acc) ->
+ case fmt(K, V) of
+ L when is_list(L) -> lists:append(L, Acc);
+ T -> [T | Acc]
+ end
+ end, [], Props).
+
+fmt(K, #resource{virtual_host = VHost,
+ name = Name}) -> [{K, Name},
+ {'vhost', VHost}];
+fmt(K, true) -> {K, true};
+fmt(K, false) -> {K, false};
+fmt(K, V) when is_atom(V) -> {K, atom_to_binary(V, utf8)};
+fmt(K, V) when is_integer(V) -> {K, V};
+fmt(K, V) when is_number(V) -> {K, V};
+fmt(K, V) when is_binary(V) -> {K, V};
+fmt(K, [{_, _}|_] = Vs) -> {K, fmt_proplist(Vs)};
+fmt(K, Vs) when is_list(Vs) -> {K, [fmt(V) || V <- Vs]};
+fmt(K, V) when is_pid(V) -> {K, list_to_binary(rabbit_misc:pid_to_string(V))};
+fmt(K, V) -> {K,
+ list_to_binary(
+ rabbit_misc:format("~1000000000p", [V]))}.
+
+%% Exactly the same as fmt/2, duplicated only for performance issues
+fmt(true) -> true;
+fmt(false) -> false;
+fmt(V) when is_atom(V) -> atom_to_binary(V, utf8);
+fmt(V) when is_integer(V) -> V;
+fmt(V) when is_number(V) -> V;
+fmt(V) when is_binary(V) -> V;
+fmt([{_, _}|_] = Vs) -> fmt_proplist(Vs);
+fmt(Vs) when is_list(Vs) -> [fmt(V) || V <- Vs];
+fmt(V) when is_pid(V) -> list_to_binary(rabbit_misc:pid_to_string(V));
+fmt(V) -> list_to_binary(
+ rabbit_misc:format("~1000000000p", [V])).
diff --git a/deps/rabbit/src/rabbit_exchange.erl b/deps/rabbit/src/rabbit_exchange.erl
new file mode 100644
index 0000000000..129b2b868b
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange.erl
@@ -0,0 +1,592 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([recover/1, policy_changed/2, callback/4, declare/7,
+ assert_equivalence/6, assert_args_equivalence/2, check_type/1,
+ lookup/1, lookup_many/1, lookup_or_die/1, list/0, list/1, lookup_scratch/2,
+ update_scratch/3, update_decorators/1, immutable/1,
+ info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4,
+ route/2, delete/3, validate_binding/2, count/0]).
+-export([list_names/0, is_amq_prefixed/1]).
+%% these must be run inside a mnesia tx
+-export([maybe_auto_delete/2, serial/1, peek_serial/1, update/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([name/0, type/0]).
+
+-type name() :: rabbit_types:r('exchange').
+-type type() :: atom().
+-type fun_name() :: atom().
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments,
+ policy, user_who_performed_action]).
+
+-spec recover(rabbit_types:vhost()) -> [name()].
+
+recover(VHost) ->
+ Xs = rabbit_misc:table_filter(
+ fun (#exchange{name = XName}) ->
+ XName#resource.virtual_host =:= VHost andalso
+ mnesia:read({rabbit_exchange, XName}) =:= []
+ end,
+ fun (X, Tx) ->
+ X1 = case Tx of
+ true -> store_ram(X);
+ false -> rabbit_exchange_decorator:set(X)
+ end,
+ callback(X1, create, map_create_tx(Tx), [X1])
+ end,
+ rabbit_durable_exchange),
+ [XName || #exchange{name = XName} <- Xs].
+
+-spec callback
+ (rabbit_types:exchange(), fun_name(),
+ fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok'.
+
+callback(X = #exchange{type = XType,
+ decorators = Decorators}, Fun, Serial0, Args) ->
+ Serial = if is_function(Serial0) -> Serial0;
+ is_atom(Serial0) -> fun (_Bool) -> Serial0 end
+ end,
+ [ok = apply(M, Fun, [Serial(M:serialise_events(X)) | Args]) ||
+ M <- rabbit_exchange_decorator:select(all, Decorators)],
+ Module = type_to_module(XType),
+ apply(Module, Fun, [Serial(Module:serialise_events()) | Args]).
+
+-spec policy_changed
+ (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok'.
+
+policy_changed(X = #exchange{type = XType,
+ decorators = Decorators},
+ X1 = #exchange{decorators = Decorators1}) ->
+ D = rabbit_exchange_decorator:select(all, Decorators),
+ D1 = rabbit_exchange_decorator:select(all, Decorators1),
+ DAll = lists:usort(D ++ D1),
+ [ok = M:policy_changed(X, X1) || M <- [type_to_module(XType) | DAll]],
+ ok.
+
+serialise_events(X = #exchange{type = Type, decorators = Decorators}) ->
+ lists:any(fun (M) -> M:serialise_events(X) end,
+ rabbit_exchange_decorator:select(all, Decorators))
+ orelse (type_to_module(Type)):serialise_events().
+
+-spec serial(rabbit_types:exchange()) ->
+ fun((boolean()) -> 'none' | pos_integer()).
+
+serial(#exchange{name = XName} = X) ->
+ Serial = case serialise_events(X) of
+ true -> next_serial(XName);
+ false -> none
+ end,
+ fun (true) -> Serial;
+ (false) -> none
+ end.
+
+-spec is_amq_prefixed(rabbit_types:exchange() | binary()) -> boolean().
+
+is_amq_prefixed(Name) when is_binary(Name) ->
+ case re:run(Name, <<"^amq\.">>) of
+ nomatch -> false;
+ {match, _} -> true
+ end;
+is_amq_prefixed(#exchange{name = #resource{name = <<>>}}) ->
+ false;
+is_amq_prefixed(#exchange{name = #resource{name = Name}}) ->
+ is_amq_prefixed(Name).
+
+-spec declare
+ (name(), type(), boolean(), boolean(), boolean(),
+ rabbit_framing:amqp_table(), rabbit_types:username())
+ -> rabbit_types:exchange().
+
+declare(XName, Type, Durable, AutoDelete, Internal, Args, Username) ->
+ X = rabbit_exchange_decorator:set(
+ rabbit_policy:set(#exchange{name = XName,
+ type = Type,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Args,
+ options = #{user => Username}})),
+ XT = type_to_module(Type),
+ %% We want to upset things if it isn't ok
+ ok = XT:validate(X),
+ %% Avoid a channel exception if there's a race condition
+ %% with an exchange.delete operation.
+ %%
+ %% See rabbitmq/rabbitmq-federation#7.
+ case rabbit_runtime_parameters:lookup(XName#resource.virtual_host,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ XName#resource.name) of
+ not_found ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_exchange, XName}) of
+ [] ->
+ {new, store(X)};
+ [ExistingX] ->
+ {existing, ExistingX}
+ end
+ end,
+ fun ({new, Exchange}, Tx) ->
+ ok = callback(X, create, map_create_tx(Tx), [Exchange]),
+ rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)),
+ Exchange;
+ ({existing, Exchange}, _Tx) ->
+ Exchange;
+ (Err, _Tx) ->
+ Err
+ end);
+ _ ->
+ rabbit_log:warning("ignoring exchange.declare for exchange ~p,
+ exchange.delete in progress~n.", [XName]),
+ X
+ end.
+
+map_create_tx(true) -> transaction;
+map_create_tx(false) -> none.
+
+
+store(X = #exchange{durable = true}) ->
+ mnesia:write(rabbit_durable_exchange, X#exchange{decorators = undefined},
+ write),
+ store_ram(X);
+store(X = #exchange{durable = false}) ->
+ store_ram(X).
+
+store_ram(X) ->
+ X1 = rabbit_exchange_decorator:set(X),
+ ok = mnesia:write(rabbit_exchange, rabbit_exchange_decorator:set(X1),
+ write),
+ X1.
+
+%% Used with binaries sent over the wire; the type may not exist.
+
+-spec check_type
+ (binary()) -> atom() | rabbit_types:connection_exit().
+
+check_type(TypeBin) ->
+ case rabbit_registry:binary_to_type(rabbit_data_coercion:to_binary(TypeBin)) of
+ {error, not_found} ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unknown exchange type '~s'", [TypeBin]);
+ T ->
+ case rabbit_registry:lookup_module(exchange, T) of
+ {error, not_found} -> rabbit_misc:protocol_error(
+ command_invalid,
+ "invalid exchange type '~s'", [T]);
+ {ok, _Module} -> T
+ end
+ end.
+
+-spec assert_equivalence
+ (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(),
+ rabbit_framing:amqp_table())
+ -> 'ok' | rabbit_types:connection_exit().
+
+assert_equivalence(X = #exchange{ name = XName,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ type = Type},
+ ReqType, ReqDurable, ReqAutoDelete, ReqInternal, ReqArgs) ->
+ AFE = fun rabbit_misc:assert_field_equivalence/4,
+ AFE(Type, ReqType, XName, type),
+ AFE(Durable, ReqDurable, XName, durable),
+ AFE(AutoDelete, ReqAutoDelete, XName, auto_delete),
+ AFE(Internal, ReqInternal, XName, internal),
+ (type_to_module(Type)):assert_args_equivalence(X, ReqArgs).
+
+-spec assert_args_equivalence
+ (rabbit_types:exchange(), rabbit_framing:amqp_table())
+ -> 'ok' | rabbit_types:connection_exit().
+
+assert_args_equivalence(#exchange{ name = Name, arguments = Args },
+ RequiredArgs) ->
+ %% The spec says "Arguments are compared for semantic
+ %% equivalence". The only arg we care about is
+ %% "alternate-exchange".
+ rabbit_misc:assert_args_equivalence(Args, RequiredArgs, Name,
+ [<<"alternate-exchange">>]).
+
+-spec lookup
+ (name()) -> rabbit_types:ok(rabbit_types:exchange()) |
+ rabbit_types:error('not_found').
+
+lookup(Name) ->
+ rabbit_misc:dirty_read({rabbit_exchange, Name}).
+
+
+-spec lookup_many([name()]) -> [rabbit_types:exchange()].
+
+lookup_many([]) -> [];
+lookup_many([Name]) -> ets:lookup(rabbit_exchange, Name);
+lookup_many(Names) when is_list(Names) ->
+ %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+ %% expensive for reasons explained in rabbit_misc:dirty_read/1.
+ lists:append([ets:lookup(rabbit_exchange, Name) || Name <- Names]).
+
+
+-spec lookup_or_die
+ (name()) -> rabbit_types:exchange() |
+ rabbit_types:channel_exit().
+
+lookup_or_die(Name) ->
+ case lookup(Name) of
+ {ok, X} -> X;
+ {error, not_found} -> rabbit_amqqueue:not_found(Name)
+ end.
+
+-spec list() -> [rabbit_types:exchange()].
+
+list() -> mnesia:dirty_match_object(rabbit_exchange, #exchange{_ = '_'}).
+
+-spec count() -> non_neg_integer().
+
+count() ->
+ mnesia:table_info(rabbit_exchange, size).
+
+-spec list_names() -> [rabbit_exchange:name()].
+
+list_names() -> mnesia:dirty_all_keys(rabbit_exchange).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+
+-spec list(rabbit_types:vhost()) -> [rabbit_types:exchange()].
+
+list(VHostPath) ->
+ mnesia:async_dirty(
+ fun () ->
+ mnesia:match_object(
+ rabbit_exchange,
+ #exchange{name = rabbit_misc:r(VHostPath, exchange), _ = '_'},
+ read)
+ end).
+
+-spec lookup_scratch(name(), atom()) ->
+ rabbit_types:ok(term()) |
+ rabbit_types:error('not_found').
+
+lookup_scratch(Name, App) ->
+ case lookup(Name) of
+ {ok, #exchange{scratches = undefined}} ->
+ {error, not_found};
+ {ok, #exchange{scratches = Scratches}} ->
+ case orddict:find(App, Scratches) of
+ {ok, Value} -> {ok, Value};
+ error -> {error, not_found}
+ end;
+ {error, not_found} ->
+ {error, not_found}
+ end.
+
+-spec update_scratch(name(), atom(), fun((any()) -> any())) -> 'ok'.
+
+update_scratch(Name, App, Fun) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ update(Name,
+ fun(X = #exchange{scratches = Scratches0}) ->
+ Scratches1 = case Scratches0 of
+ undefined -> orddict:new();
+ _ -> Scratches0
+ end,
+ Scratch = case orddict:find(App, Scratches1) of
+ {ok, S} -> S;
+ error -> undefined
+ end,
+ Scratches2 = orddict:store(
+ App, Fun(Scratch), Scratches1),
+ X#exchange{scratches = Scratches2}
+ end),
+ ok
+ end).
+
+-spec update_decorators(name()) -> 'ok'.
+
+update_decorators(Name) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ case mnesia:wread({rabbit_exchange, Name}) of
+ [X] -> store_ram(X),
+ ok;
+ [] -> ok
+ end
+ end).
+
+-spec update
+ (name(),
+ fun((rabbit_types:exchange()) -> rabbit_types:exchange()))
+ -> not_found | rabbit_types:exchange().
+
+update(Name, Fun) ->
+ case mnesia:wread({rabbit_exchange, Name}) of
+ [X] -> X1 = Fun(X),
+ store(X1);
+ [] -> not_found
+ end.
+
+-spec immutable(rabbit_types:exchange()) -> rabbit_types:exchange().
+
+immutable(X) -> X#exchange{scratches = none,
+ policy = none,
+ decorators = none}.
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+map(VHostPath, F) ->
+ %% TODO: there is scope for optimisation here, e.g. using a
+ %% cursor, parallelising the function invocation
+ lists:map(F, list(VHostPath)).
+
+infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
+
+i(name, #exchange{name = Name}) -> Name;
+i(type, #exchange{type = Type}) -> Type;
+i(durable, #exchange{durable = Durable}) -> Durable;
+i(auto_delete, #exchange{auto_delete = AutoDelete}) -> AutoDelete;
+i(internal, #exchange{internal = Internal}) -> Internal;
+i(arguments, #exchange{arguments = Arguments}) -> Arguments;
+i(policy, X) -> case rabbit_policy:name(X) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(user_who_performed_action, #exchange{options = Opts}) ->
+ maps:get(user, Opts, ?UNKNOWN_USER);
+i(Item, #exchange{type = Type} = X) ->
+ case (type_to_module(Type)):info(X, [Item]) of
+ [{Item, I}] -> I;
+ [] -> throw({bad_argument, Item})
+ end.
+
+-spec info(rabbit_types:exchange()) -> rabbit_types:infos().
+
+info(X = #exchange{type = Type}) ->
+ infos(?INFO_KEYS, X) ++ (type_to_module(Type)):info(X).
+
+-spec info
+ (rabbit_types:exchange(), rabbit_types:info_keys())
+ -> rabbit_types:infos().
+
+info(X = #exchange{type = _Type}, Items) ->
+ infos(Items, X).
+
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+
+info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys())
+ -> [rabbit_types:infos()].
+
+info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end).
+
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(),
+ reference(), pid())
+ -> 'ok'.
+
+info_all(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(X) -> info(X, Items) end, list(VHostPath)).
+
+-spec route(rabbit_types:exchange(), rabbit_types:delivery())
+ -> [rabbit_amqqueue:name()].
+
+route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName,
+ decorators = Decorators} = X,
+ #delivery{message = #basic_message{routing_keys = RKs}} = Delivery) ->
+ case RName of
+ <<>> ->
+ RKsSorted = lists:usort(RKs),
+ [rabbit_channel:deliver_reply(RK, Delivery) ||
+ RK <- RKsSorted, virtual_reply_queue(RK)],
+ [rabbit_misc:r(VHost, queue, RK) || RK <- RKsSorted,
+ not virtual_reply_queue(RK)];
+ _ ->
+ Decs = rabbit_exchange_decorator:select(route, Decorators),
+ lists:usort(route1(Delivery, Decs, {[X], XName, []}))
+ end.
+
+virtual_reply_queue(<<"amq.rabbitmq.reply-to.", _/binary>>) -> true;
+virtual_reply_queue(_) -> false.
+
+route1(_, _, {[], _, QNames}) ->
+ QNames;
+route1(Delivery, Decorators,
+ {[X = #exchange{type = Type} | WorkList], SeenXs, QNames}) ->
+ ExchangeDests = (type_to_module(Type)):route(X, Delivery),
+ DecorateDests = process_decorators(X, Decorators, Delivery),
+ AlternateDests = process_alternate(X, ExchangeDests),
+ route1(Delivery, Decorators,
+ lists:foldl(fun process_route/2, {WorkList, SeenXs, QNames},
+ AlternateDests ++ DecorateDests ++ ExchangeDests)).
+
+process_alternate(X = #exchange{name = XName}, []) ->
+ case rabbit_policy:get_arg(
+ <<"alternate-exchange">>, <<"alternate-exchange">>, X) of
+ undefined -> [];
+ AName -> [rabbit_misc:r(XName, exchange, AName)]
+ end;
+process_alternate(_X, _Results) ->
+ [].
+
+process_decorators(_, [], _) -> %% optimisation
+ [];
+process_decorators(X, Decorators, Delivery) ->
+ lists:append([Decorator:route(X, Delivery) || Decorator <- Decorators]).
+
+process_route(#resource{kind = exchange} = XName,
+ {_WorkList, XName, _QNames} = Acc) ->
+ Acc;
+process_route(#resource{kind = exchange} = XName,
+ {WorkList, #resource{kind = exchange} = SeenX, QNames}) ->
+ {cons_if_present(XName, WorkList),
+ gb_sets:from_list([SeenX, XName]), QNames};
+process_route(#resource{kind = exchange} = XName,
+ {WorkList, SeenXs, QNames} = Acc) ->
+ case gb_sets:is_element(XName, SeenXs) of
+ true -> Acc;
+ false -> {cons_if_present(XName, WorkList),
+ gb_sets:add_element(XName, SeenXs), QNames}
+ end;
+process_route(#resource{kind = queue} = QName,
+ {WorkList, SeenXs, QNames}) ->
+ {WorkList, SeenXs, [QName | QNames]}.
+
+cons_if_present(XName, L) ->
+ case lookup(XName) of
+ {ok, X} -> [X | L];
+ {error, not_found} -> L
+ end.
+
+call_with_exchange(XName, Fun) ->
+ rabbit_misc:execute_mnesia_tx_with_tail(
+ fun () -> case mnesia:read({rabbit_exchange, XName}) of
+ [] -> rabbit_misc:const({error, not_found});
+ [X] -> Fun(X)
+ end
+ end).
+
+-spec delete
+ (name(), 'true', rabbit_types:username()) ->
+ 'ok'| rabbit_types:error('not_found' | 'in_use');
+ (name(), 'false', rabbit_types:username()) ->
+ 'ok' | rabbit_types:error('not_found').
+
+delete(XName, IfUnused, Username) ->
+ Fun = case IfUnused of
+ true -> fun conditional_delete/2;
+ false -> fun unconditional_delete/2
+ end,
+ try
+ %% guard exchange.declare operations from failing when there's
+ %% a race condition between it and an exchange.delete.
+ %%
+ %% see rabbitmq/rabbitmq-federation#7
+ rabbit_runtime_parameters:set(XName#resource.virtual_host,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ XName#resource.name, true, Username),
+ call_with_exchange(
+ XName,
+ fun (X) ->
+ case Fun(X, false) of
+ {deleted, X, Bs, Deletions} ->
+ rabbit_binding:process_deletions(
+ rabbit_binding:add_deletion(
+ XName, {X, deleted, Bs}, Deletions), Username);
+ {error, _InUseOrNotFound} = E ->
+ rabbit_misc:const(E)
+ end
+ end)
+ after
+ rabbit_runtime_parameters:clear(XName#resource.virtual_host,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ XName#resource.name, Username)
+ end.
+
+-spec validate_binding
+ (rabbit_types:exchange(), rabbit_types:binding())
+ -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
+
+validate_binding(X = #exchange{type = XType}, Binding) ->
+ Module = type_to_module(XType),
+ Module:validate_binding(X, Binding).
+
+-spec maybe_auto_delete
+ (rabbit_types:exchange(), boolean())
+ -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}.
+
+maybe_auto_delete(#exchange{auto_delete = false}, _OnlyDurable) ->
+ not_deleted;
+maybe_auto_delete(#exchange{auto_delete = true} = X, OnlyDurable) ->
+ case conditional_delete(X, OnlyDurable) of
+ {error, in_use} -> not_deleted;
+ {deleted, X, [], Deletions} -> {deleted, Deletions}
+ end.
+
+conditional_delete(X = #exchange{name = XName}, OnlyDurable) ->
+ case rabbit_binding:has_for_source(XName) of
+ false -> internal_delete(X, OnlyDurable, false);
+ true -> {error, in_use}
+ end.
+
+unconditional_delete(X, OnlyDurable) ->
+ internal_delete(X, OnlyDurable, true).
+
+internal_delete(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSource) ->
+ ok = mnesia:delete({rabbit_exchange, XName}),
+ ok = mnesia:delete({rabbit_exchange_serial, XName}),
+ mnesia:delete({rabbit_durable_exchange, XName}),
+ Bindings = case RemoveBindingsForSource of
+ true -> rabbit_binding:remove_for_source(XName);
+ false -> []
+ end,
+ {deleted, X, Bindings, rabbit_binding:remove_for_destination(
+ XName, OnlyDurable)}.
+
+next_serial(XName) ->
+ Serial = peek_serial(XName, write),
+ ok = mnesia:write(rabbit_exchange_serial,
+ #exchange_serial{name = XName, next = Serial + 1}, write),
+ Serial.
+
+-spec peek_serial(name()) -> pos_integer() | 'undefined'.
+
+peek_serial(XName) -> peek_serial(XName, read).
+
+peek_serial(XName, LockType) ->
+ case mnesia:read(rabbit_exchange_serial, XName, LockType) of
+ [#exchange_serial{next = Serial}] -> Serial;
+ _ -> 1
+ end.
+
+invalid_module(T) ->
+ rabbit_log:warning("Could not find exchange type ~s.~n", [T]),
+ put({xtype_to_module, T}, rabbit_exchange_type_invalid),
+ rabbit_exchange_type_invalid.
+
+%% Used with atoms from records; e.g., the type is expected to exist.
+type_to_module(T) ->
+ case get({xtype_to_module, T}) of
+ undefined ->
+ case rabbit_registry:lookup_module(exchange, T) of
+ {ok, Module} -> put({xtype_to_module, T}, Module),
+ Module;
+ {error, not_found} -> invalid_module(T)
+ end;
+ Module ->
+ Module
+ end.
diff --git a/deps/rabbit/src/rabbit_exchange_decorator.erl b/deps/rabbit/src/rabbit_exchange_decorator.erl
new file mode 100644
index 0000000000..02d0258d3c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_decorator.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_decorator).
+
+-include("rabbit.hrl").
+
+-export([select/2, set/1]).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+%% This is like an exchange type except that:
+%%
+%% 1) It applies to all exchanges as soon as it is installed, therefore
+%% 2) It is not allowed to affect validation, so no validate/1 or
+%% assert_args_equivalence/2
+%%
+%% It's possible in the future we might make decorators
+%% able to manipulate messages as they are published.
+
+-type(tx() :: 'transaction' | 'none').
+-type(serial() :: pos_integer() | tx()).
+
+-callback description() -> [proplists:property()].
+
+%% Should Rabbit ensure that all binding events that are
+%% delivered to an individual exchange can be serialised? (they
+%% might still be delivered out of order, but there'll be a
+%% serial number).
+-callback serialise_events(rabbit_types:exchange()) -> boolean().
+
+%% called after declaration and recovery
+-callback create(tx(), rabbit_types:exchange()) -> 'ok'.
+
+%% called after exchange (auto)deletion.
+-callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
+ 'ok'.
+
+%% called when the policy attached to this exchange changes.
+-callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
+ 'ok'.
+
+%% called after a binding has been added or recovered
+-callback add_binding(serial(), rabbit_types:exchange(),
+ rabbit_types:binding()) -> 'ok'.
+
+%% called after bindings have been deleted.
+-callback remove_bindings(serial(), rabbit_types:exchange(),
+ [rabbit_types:binding()]) -> 'ok'.
+
+%% Allows additional destinations to be added to the routing decision.
+-callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
+ [rabbit_amqqueue:name() | rabbit_exchange:name()].
+
+%% Whether the decorator wishes to receive callbacks for the exchange
+%% none:no callbacks, noroute:all callbacks except route, all:all callbacks
+-callback active_for(rabbit_types:exchange()) -> 'none' | 'noroute' | 'all'.
+
+%%----------------------------------------------------------------------------
+
+added_to_rabbit_registry(_Type, _ModuleName) ->
+ [maybe_recover(X) || X <- rabbit_exchange:list()],
+ ok.
+removed_from_rabbit_registry(_Type) ->
+ [maybe_recover(X) || X <- rabbit_exchange:list()],
+ ok.
+
+%% select a subset of active decorators
+select(all, {Route, NoRoute}) -> filter(Route ++ NoRoute);
+select(route, {Route, _NoRoute}) -> filter(Route);
+select(raw, {Route, NoRoute}) -> Route ++ NoRoute.
+
+filter(Modules) ->
+ [M || M <- Modules, code:which(M) =/= non_existing].
+
+set(X) ->
+ Decs = lists:foldl(fun (D, {Route, NoRoute}) ->
+ ActiveFor = D:active_for(X),
+ {cons_if_eq(all, ActiveFor, D, Route),
+ cons_if_eq(noroute, ActiveFor, D, NoRoute)}
+ end, {[], []}, list()),
+ X#exchange{decorators = Decs}.
+
+list() -> [M || {_, M} <- rabbit_registry:lookup_all(exchange_decorator)].
+
+cons_if_eq(Select, Select, Item, List) -> [Item | List];
+cons_if_eq(_Select, _Other, _Item, List) -> List.
+
+maybe_recover(X = #exchange{name = Name,
+ decorators = Decs}) ->
+ #exchange{decorators = Decs1} = set(X),
+ Old = lists:sort(select(all, Decs)),
+ New = lists:sort(select(all, Decs1)),
+ case New of
+ Old -> ok;
+ _ -> %% TODO create a tx here for non-federation decorators
+ [M:create(none, X) || M <- New -- Old],
+ rabbit_exchange:update_decorators(Name)
+ end.
diff --git a/deps/rabbit/src/rabbit_exchange_parameters.erl b/deps/rabbit/src/rabbit_exchange_parameters.erl
new file mode 100644
index 0000000000..f9de648cfa
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_parameters.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_parameters).
+
+-behaviour(rabbit_runtime_parameter).
+
+-include("rabbit.hrl").
+
+-export([register/0]).
+-export([validate/5, notify/5, notify_clear/4]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange parameters"},
+ {mfa, {rabbit_exchange_parameters, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter,
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, ?MODULE),
+ %% ensure there are no leftovers from before node restart/crash
+ rabbit_runtime_parameters:clear_component(
+ ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT,
+ ?INTERNAL_USER),
+ ok.
+
+validate(_VHost, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, _Name, _Term, _User) ->
+ ok.
+
+notify(_VHost, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, _Name, _Term, _Username) ->
+ ok.
+
+notify_clear(_VHost, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, _Name, _Username) ->
+ ok.
diff --git a/deps/rabbit/src/rabbit_exchange_type_direct.erl b/deps/rabbit/src/rabbit_exchange_type_direct.erl
new file mode 100644
index 0000000000..3f4350e7b0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_direct.erl
@@ -0,0 +1,46 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_direct).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type direct"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"direct">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP direct exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ rabbit_router:match_routing_key(Name, Routes).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_fanout.erl b/deps/rabbit/src/rabbit_exchange_type_fanout.erl
new file mode 100644
index 0000000000..a8778cf0c7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_fanout.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_fanout).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type fanout"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"fanout">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP fanout exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name}, _Delivery) ->
+ rabbit_router:match_routing_key(Name, ['_']).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_headers.erl b/deps/rabbit/src/rabbit_exchange_type_headers.erl
new file mode 100644
index 0000000000..e40195de7a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_headers.erl
@@ -0,0 +1,136 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_headers).
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type headers"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"headers">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP headers exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{content = Content}}) ->
+ Headers = case (Content#content.properties)#'P_basic'.headers of
+ undefined -> [];
+ H -> rabbit_misc:sort_field_table(H)
+ end,
+ rabbit_router:match_bindings(
+ Name, fun (#binding{args = Spec}) -> headers_match(Spec, Headers) end).
+
+validate_binding(_X, #binding{args = Args}) ->
+ case rabbit_misc:table_lookup(Args, <<"x-match">>) of
+ {longstr, <<"all">>} -> ok;
+ {longstr, <<"any">>} -> ok;
+ {longstr, Other} -> {error,
+ {binding_invalid,
+ "Invalid x-match field value ~p; "
+ "expected all or any", [Other]}};
+ {Type, Other} -> {error,
+ {binding_invalid,
+ "Invalid x-match field type ~p (value ~p); "
+ "expected longstr", [Type, Other]}};
+ undefined -> ok %% [0]
+ end.
+%% [0] spec is vague on whether it can be omitted but in practice it's
+%% useful to allow people to do this
+
+parse_x_match({longstr, <<"all">>}) -> all;
+parse_x_match({longstr, <<"any">>}) -> any;
+parse_x_match(_) -> all. %% legacy; we didn't validate
+
+%% Horrendous matching algorithm. Depends for its merge-like
+%% (linear-time) behaviour on the lists:keysort
+%% (rabbit_misc:sort_field_table) that route/1 and
+%% rabbit_binding:{add,remove}/2 do.
+%%
+%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+%% In other words: REQUIRES BOTH PATTERN AND DATA TO BE SORTED ASCENDING BY KEY.
+%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+%%
+
+-spec headers_match
+ (rabbit_framing:amqp_table(), rabbit_framing:amqp_table()) ->
+ boolean().
+
+headers_match(Args, Data) ->
+ MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)),
+ headers_match(Args, Data, true, false, MK).
+
+% A bit less horrendous algorithm :)
+headers_match(_, _, false, _, all) -> false;
+headers_match(_, _, _, true, any) -> true;
+
+% No more bindings, return current state
+headers_match([], _Data, AllMatch, _AnyMatch, all) -> AllMatch;
+headers_match([], _Data, _AllMatch, AnyMatch, any) -> AnyMatch;
+
+% Delete bindings starting with x-
+headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data,
+ AllMatch, AnyMatch, MatchKind) ->
+ headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind);
+
+% No more data, but still bindings, false with all
+headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) ->
+ headers_match([], [], false, AnyMatch, MatchKind);
+
+% Data key header not in binding, go next data
+headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest],
+ AllMatch, AnyMatch, MatchKind) when PK > DK ->
+ headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind);
+
+% Binding key header not in data, false with all, go next binding
+headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _],
+ _AllMatch, AnyMatch, MatchKind) when PK < DK ->
+ headers_match(PRest, Data, false, AnyMatch, MatchKind);
+
+%% It's not properly specified, but a "no value" in a
+%% pattern field is supposed to mean simple presence of
+%% the corresponding data field. I've interpreted that to
+%% mean a type of "void" for the pattern field.
+headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest],
+ AllMatch, _AnyMatch, MatchKind) when PK == DK ->
+ headers_match(PRest, DRest, AllMatch, true, MatchKind);
+
+% Complete match, true with any, go next
+headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest],
+ AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV ->
+ headers_match(PRest, DRest, AllMatch, true, MatchKind);
+
+% Value does not match, false with all, go next
+headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest],
+ _AllMatch, AnyMatch, MatchKind) when PK == DK ->
+ headers_match(PRest, DRest, false, AnyMatch, MatchKind).
+
+
+validate(_X) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_invalid.erl b/deps/rabbit/src/rabbit_exchange_type_invalid.erl
new file mode 100644
index 0000000000..3fa27d28e9
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_invalid.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_invalid).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description,
+ <<"Dummy exchange type, to be used when the intended one is not found.">>
+ }].
+
+serialise_events() -> false.
+
+-spec route(rabbit_types:exchange(), rabbit_types:delivery()) -> no_return().
+
+route(#exchange{name = Name, type = Type}, _) ->
+ rabbit_misc:protocol_error(
+ precondition_failed,
+ "Cannot route message through ~s: exchange type ~s not found",
+ [rabbit_misc:rs(Name), Type]).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbit/src/rabbit_exchange_type_topic.erl b/deps/rabbit/src/rabbit_exchange_type_topic.erl
new file mode 100644
index 0000000000..38b05895f2
--- /dev/null
+++ b/deps/rabbit/src/rabbit_exchange_type_topic.erl
@@ -0,0 +1,266 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_topic).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type topic"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"topic">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%----------------------------------------------------------------------------
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"AMQP topic exchange, as per the AMQP specification">>}].
+
+serialise_events() -> false.
+
+%% NB: This may return duplicate results in some situations (that's ok)
+route(#exchange{name = X},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ lists:append([begin
+ Words = split_topic_key(RKey),
+ mnesia:async_dirty(fun trie_match/2, [X, Words])
+ end || RKey <- Routes]).
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+
+delete(transaction, #exchange{name = X}, _Bs) ->
+ trie_remove_all_nodes(X),
+ trie_remove_all_edges(X),
+ trie_remove_all_bindings(X),
+ ok;
+delete(none, _Exchange, _Bs) ->
+ ok.
+
+policy_changed(_X1, _X2) -> ok.
+
+add_binding(transaction, _Exchange, Binding) ->
+ internal_add_binding(Binding);
+add_binding(none, _Exchange, _Binding) ->
+ ok.
+
+remove_bindings(transaction, _X, Bs) ->
+ %% See rabbit_binding:lock_route_tables for the rationale for
+ %% taking table locks.
+ case Bs of
+ [_] -> ok;
+ _ -> [mnesia:lock({table, T}, write) ||
+ T <- [rabbit_topic_trie_node,
+ rabbit_topic_trie_edge,
+ rabbit_topic_trie_binding]]
+ end,
+ [case follow_down_get_path(X, split_topic_key(K)) of
+ {ok, Path = [{FinalNode, _} | _]} ->
+ trie_remove_binding(X, FinalNode, D, Args),
+ remove_path_if_empty(X, Path);
+ {error, _Node, _RestW} ->
+ %% We're trying to remove a binding that no longer exists.
+ %% That's unexpected, but shouldn't be a problem.
+ ok
+ end || #binding{source = X, key = K, destination = D, args = Args} <- Bs],
+ ok;
+remove_bindings(none, _X, _Bs) ->
+ ok.
+
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+%%----------------------------------------------------------------------------
+
+internal_add_binding(#binding{source = X, key = K, destination = D,
+ args = Args}) ->
+ FinalNode = follow_down_create(X, split_topic_key(K)),
+ trie_add_binding(X, FinalNode, D, Args),
+ ok.
+
+trie_match(X, Words) ->
+ trie_match(X, root, Words, []).
+
+trie_match(X, Node, [], ResAcc) ->
+ trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [],
+ trie_bindings(X, Node) ++ ResAcc);
+trie_match(X, Node, [W | RestW] = Words, ResAcc) ->
+ lists:foldl(fun ({WArg, MatchFun, RestWArg}, Acc) ->
+ trie_match_part(X, Node, WArg, MatchFun, RestWArg, Acc)
+ end, ResAcc, [{W, fun trie_match/4, RestW},
+ {"*", fun trie_match/4, RestW},
+ {"#", fun trie_match_skip_any/4, Words}]).
+
+trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) ->
+ case trie_child(X, Node, Search) of
+ {ok, NextNode} -> MatchFun(X, NextNode, RestW, ResAcc);
+ error -> ResAcc
+ end.
+
+trie_match_skip_any(X, Node, [], ResAcc) ->
+ trie_match(X, Node, [], ResAcc);
+trie_match_skip_any(X, Node, [_ | RestW] = Words, ResAcc) ->
+ trie_match_skip_any(X, Node, RestW,
+ trie_match(X, Node, Words, ResAcc)).
+
+follow_down_create(X, Words) ->
+ case follow_down_last_node(X, Words) of
+ {ok, FinalNode} -> FinalNode;
+ {error, Node, RestW} -> lists:foldl(
+ fun (W, CurNode) ->
+ NewNode = new_node_id(),
+ trie_add_edge(X, CurNode, NewNode, W),
+ NewNode
+ end, Node, RestW)
+ end.
+
+follow_down_last_node(X, Words) ->
+ follow_down(X, fun (_, Node, _) -> Node end, root, Words).
+
+follow_down_get_path(X, Words) ->
+ follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end,
+ [{root, none}], Words).
+
+follow_down(X, AccFun, Acc0, Words) ->
+ follow_down(X, root, AccFun, Acc0, Words).
+
+follow_down(_X, _CurNode, _AccFun, Acc, []) ->
+ {ok, Acc};
+follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) ->
+ case trie_child(X, CurNode, W) of
+ {ok, NextNode} -> follow_down(X, NextNode, AccFun,
+ AccFun(W, NextNode, Acc), RestW);
+ error -> {error, Acc, Words}
+ end.
+
+remove_path_if_empty(_, [{root, none}]) ->
+ ok;
+remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) ->
+ case mnesia:read(rabbit_topic_trie_node,
+ #trie_node{exchange_name = X, node_id = Node}, write) of
+ [] -> trie_remove_edge(X, Parent, Node, W),
+ remove_path_if_empty(X, RestPath);
+ _ -> ok
+ end.
+
+trie_child(X, Node, Word) ->
+ case mnesia:read({rabbit_topic_trie_edge,
+ #trie_edge{exchange_name = X,
+ node_id = Node,
+ word = Word}}) of
+ [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode};
+ [] -> error
+ end.
+
+trie_bindings(X, Node) ->
+ MatchHead = #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X,
+ node_id = Node,
+ destination = '$1',
+ arguments = '_'}},
+ mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]).
+
+trie_update_node_counts(X, Node, Field, Delta) ->
+ E = case mnesia:read(rabbit_topic_trie_node,
+ #trie_node{exchange_name = X,
+ node_id = Node}, write) of
+ [] -> #topic_trie_node{trie_node = #trie_node{
+ exchange_name = X,
+ node_id = Node},
+ edge_count = 0,
+ binding_count = 0};
+ [E0] -> E0
+ end,
+ case setelement(Field, E, element(Field, E) + Delta) of
+ #topic_trie_node{edge_count = 0, binding_count = 0} ->
+ ok = mnesia:delete_object(rabbit_topic_trie_node, E, write);
+ EN ->
+ ok = mnesia:write(rabbit_topic_trie_node, EN, write)
+ end.
+
+trie_add_edge(X, FromNode, ToNode, W) ->
+ trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, +1),
+ trie_edge_op(X, FromNode, ToNode, W, fun mnesia:write/3).
+
+trie_remove_edge(X, FromNode, ToNode, W) ->
+ trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, -1),
+ trie_edge_op(X, FromNode, ToNode, W, fun mnesia:delete_object/3).
+
+trie_edge_op(X, FromNode, ToNode, W, Op) ->
+ ok = Op(rabbit_topic_trie_edge,
+ #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
+ node_id = FromNode,
+ word = W},
+ node_id = ToNode},
+ write).
+
+trie_add_binding(X, Node, D, Args) ->
+ trie_update_node_counts(X, Node, #topic_trie_node.binding_count, +1),
+ trie_binding_op(X, Node, D, Args, fun mnesia:write/3).
+
+trie_remove_binding(X, Node, D, Args) ->
+ trie_update_node_counts(X, Node, #topic_trie_node.binding_count, -1),
+ trie_binding_op(X, Node, D, Args, fun mnesia:delete_object/3).
+
+trie_binding_op(X, Node, D, Args, Op) ->
+ ok = Op(rabbit_topic_trie_binding,
+ #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X,
+ node_id = Node,
+ destination = D,
+ arguments = Args}},
+ write).
+
+trie_remove_all_nodes(X) ->
+ remove_all(rabbit_topic_trie_node,
+ #topic_trie_node{trie_node = #trie_node{exchange_name = X,
+ _ = '_'},
+ _ = '_'}).
+
+trie_remove_all_edges(X) ->
+ remove_all(rabbit_topic_trie_edge,
+ #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
+ _ = '_'},
+ _ = '_'}).
+
+trie_remove_all_bindings(X) ->
+ remove_all(rabbit_topic_trie_binding,
+ #topic_trie_binding{
+ trie_binding = #trie_binding{exchange_name = X, _ = '_'},
+ _ = '_'}).
+
+remove_all(Table, Pattern) ->
+ lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end,
+ mnesia:match_object(Table, Pattern, write)).
+
+new_node_id() ->
+ rabbit_guid:gen().
+
+split_topic_key(Key) ->
+ split_topic_key(Key, [], []).
+
+split_topic_key(<<>>, [], []) ->
+ [];
+split_topic_key(<<>>, RevWordAcc, RevResAcc) ->
+ lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]);
+split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) ->
+ split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]);
+split_topic_key(<<C:8, Rest/binary>>, RevWordAcc, RevResAcc) ->
+ split_topic_key(Rest, [C | RevWordAcc], RevResAcc).
diff --git a/deps/rabbit/src/rabbit_feature_flags.erl b/deps/rabbit/src/rabbit_feature_flags.erl
new file mode 100644
index 0000000000..921ec9ab53
--- /dev/null
+++ b/deps/rabbit/src/rabbit_feature_flags.erl
@@ -0,0 +1,2470 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @author The RabbitMQ team
+%% @copyright 2018-2020 VMware, Inc. or its affiliates.
+%%
+%% @doc
+%% This module offers a framework to declare capabilities a RabbitMQ node
+%% supports and therefore a way to determine if multiple RabbitMQ nodes in
+%% a cluster are compatible and can work together.
+%%
+%% == What a feature flag is ==
+%%
+%% A <strong>feature flag</strong> is a name and several properties given
+%% to a change in RabbitMQ which impacts its communication with other
+%% RabbitMQ nodes. This kind of change can be:
+%% <ul>
+%% <li>an update to an Erlang record</li>
+%% <li>a modification to a replicated Mnesia table schema</li>
+%% <li>a modification to Erlang messages exchanged between Erlang processes
+%% which might run on remote nodes</li>
+%% </ul>
+%%
+%% A feature flag is qualified by:
+%% <ul>
+%% <li>a <strong>name</strong></li>
+%% <li>a <strong>description</strong> (optional)</li>
+%% <li>a list of other <strong>feature flags this feature flag depends on
+%% </strong> (optional). This can be useful when the change builds up on
+%% top of a previous change. For instance, it expands a record which was
+%% already modified by a previous feature flag.</li>
+%% <li>a <strong>migration function</strong> (optional). If provided, this
+%% function is called when the feature flag is enabled. It is responsible
+%% for doing all the data conversion, if any, and confirming the feature
+%% flag can be enabled.</li>
+%% <li>a level of stability (stable or experimental). For now, this is only
+%% informational. But it might be used for specific purposes in the
+%% future.</li>
+%% </ul>
+%%
+%% == How to declare a feature flag ==
+%%
+%% To define a new feature flag, you need to use the
+%% `rabbit_feature_flag()' module attribute:
+%%
+%% ```
+%% -rabbit_feature_flag(FeatureFlag).
+%% '''
+%%
+%% `FeatureFlag' is a {@type feature_flag_modattr()}.
+%%
+%% == How to enable a feature flag ==
+%%
+%% To enable a supported feature flag, you have the following solutions:
+%%
+%% <ul>
+%% <li>Using this module API:
+%% ```
+%% rabbit_feature_flags:enable(FeatureFlagName).
+%% '''
+%% </li>
+%% <li>Using the `rabbitmqctl' CLI:
+%% ```
+%% rabbitmqctl enable_feature_flag "$feature_flag_name"
+%% '''
+%% </li>
+%% </ul>
+%%
+%% == How to disable a feature flag ==
+%%
+%% Once enabled, there is <strong>currently no way to disable</strong> a
+%% feature flag.
+
+-module(rabbit_feature_flags).
+
+-export([list/0,
+ list/1,
+ list/2,
+ enable/1,
+ enable_all/0,
+ disable/1,
+ disable_all/0,
+ is_supported/1,
+ is_supported/2,
+ is_supported_locally/1,
+ is_supported_remotely/1,
+ is_supported_remotely/2,
+ is_supported_remotely/3,
+ is_enabled/1,
+ is_enabled/2,
+ is_disabled/1,
+ is_disabled/2,
+ info/0,
+ info/1,
+ init/0,
+ get_state/1,
+ get_stability/1,
+ check_node_compatibility/1,
+ check_node_compatibility/2,
+ is_node_compatible/1,
+ is_node_compatible/2,
+ sync_feature_flags_with_cluster/2,
+ sync_feature_flags_with_cluster/3,
+ refresh_feature_flags_after_app_load/1,
+ enabled_feature_flags_list_file/0
+ ]).
+
+%% RabbitMQ internal use only.
+-export([initialize_registry/0,
+ initialize_registry/1,
+ mark_as_enabled_locally/2,
+ remote_nodes/0,
+ running_remote_nodes/0,
+ does_node_support/3,
+ merge_feature_flags_from_unknown_apps/1,
+ do_sync_feature_flags_with_node/1]).
+
+-ifdef(TEST).
+-export([inject_test_feature_flags/1,
+ initialize_registry/3,
+ query_supported_feature_flags/0,
+ mark_as_enabled_remotely/2,
+ mark_as_enabled_remotely/4,
+ registry_loading_lock/0]).
+-endif.
+
+%% Default timeout for operations on remote nodes.
+-define(TIMEOUT, 60000).
+
+-define(FF_REGISTRY_LOADING_LOCK, {feature_flags_registry_loading, self()}).
+-define(FF_STATE_CHANGE_LOCK, {feature_flags_state_change, self()}).
+
+-type feature_flag_modattr() :: {feature_name(),
+ feature_props()}.
+%% The value of a `-rabbitmq_feature_flag()' module attribute used to
+%% declare a new feature flag.
+
+-type feature_name() :: atom().
+%% The feature flag's name. It is used in many places to identify a
+%% specific feature flag. In particular, this is how an end-user (or
+%% the CLI) can enable a feature flag. This is also the only bit which
+%% is persisted so a node remember which feature flags are enabled.
+
+-type feature_props() :: #{desc => string(),
+ doc_url => string(),
+ stability => stability(),
+ depends_on => [feature_name()],
+ migration_fun => migration_fun_name()}.
+%% The feature flag properties.
+%%
+%% All properties are optional.
+%%
+%% The properties are:
+%% <ul>
+%% <li>`desc': a description of the feature flag</li>
+%% <li>`doc_url': a URL pointing to more documentation about the feature
+%% flag</li>
+%% <li>`stability': the level of stability</li>
+%% <li>`depends_on': a list of feature flags name which must be enabled
+%% before this one</li>
+%% <li>`migration_fun': a migration function specified by its module and
+%% function names</li>
+%% </ul>
+%%
+%% Note that the `migration_fun' is a {@type migration_fun_name()},
+%% not a {@type migration_fun()}. However, the function signature
+%% must conform to the {@type migration_fun()} signature. The reason
+%% is that we must be able to represent it as an Erlang term when
+%% we regenerate the registry module source code (using {@link
+%% erl_syntax:abstract/1}).
+
+-type feature_flags() :: #{feature_name() => feature_props_extended()}.
+%% The feature flags map as returned or accepted by several functions in
+%% this module. In particular, this what the {@link list/0} function
+%% returns.
+
+-type feature_props_extended() :: #{desc => string(),
+ doc_url => string(),
+ stability => stability(),
+ migration_fun => migration_fun_name(),
+ depends_on => [feature_name()],
+ provided_by => atom()}.
+%% The feature flag properties, once expanded by this module when feature
+%% flags are discovered.
+%%
+%% The new properties compared to {@type feature_props()} are:
+%% <ul>
+%% <li>`provided_by': the name of the application providing the feature flag</li>
+%% </ul>
+
+-type feature_state() :: boolean() | state_changing.
+%% The state of the feature flag: enabled if `true', disabled if `false'
+%% or `state_changing'.
+
+-type feature_states() :: #{feature_name() => feature_state()}.
+
+-type stability() :: stable | experimental.
+%% The level of stability of a feature flag. Currently, only informational.
+
+-type migration_fun_name() :: {Module :: atom(), Function :: atom()}.
+%% The name of the module and function to call when changing the state of
+%% the feature flag.
+
+-type migration_fun() :: fun((feature_name(),
+ feature_props_extended(),
+ migration_fun_context())
+ -> ok | {error, any()} | % context = enable
+ boolean() | undefined). % context = is_enabled
+%% The migration function signature.
+%%
+%% It is called with context `enable' when a feature flag is being enabled.
+%% The function is responsible for this feature-flag-specific verification
+%% and data conversion. It returns `ok' if RabbitMQ can mark the feature
+%% flag as enabled an continue with the next one, if any. Otherwise, it
+%% returns `{error, any()}' if there is an error and the feature flag should
+%% remain disabled. The function must be idempotent: if the feature flag is
+%% already enabled on another node and the local node is running this function
+%% again because it is syncing its feature flags state, it should succeed.
+%%
+%% It is called with the context `is_enabled' to check if a feature flag
+%% is actually enabled. It is useful on RabbitMQ startup, just in case
+%% the previous instance failed to write the feature flags list file.
+
+-type migration_fun_context() :: enable | is_enabled.
+
+-type registry_vsn() :: term().
+
+-export_type([feature_flag_modattr/0,
+ feature_props/0,
+ feature_name/0,
+ feature_flags/0,
+ feature_props_extended/0,
+ feature_state/0,
+ feature_states/0,
+ stability/0,
+ migration_fun_name/0,
+ migration_fun/0,
+ migration_fun_context/0]).
+
+-on_load(on_load/0).
+
+-spec list() -> feature_flags().
+%% @doc
+%% Lists all supported feature flags.
+%%
+%% @returns A map of all supported feature flags.
+
+list() -> list(all).
+
+-spec list(Which :: all | enabled | disabled) -> feature_flags().
+%% @doc
+%% Lists all, enabled or disabled feature flags, depending on the argument.
+%%
+%% @param Which The group of feature flags to return: `all', `enabled' or
+%% `disabled'.
+%% @returns A map of selected feature flags.
+
+list(all) -> rabbit_ff_registry:list(all);
+list(enabled) -> rabbit_ff_registry:list(enabled);
+list(disabled) -> maps:filter(
+ fun(FeatureName, _) -> is_disabled(FeatureName) end,
+ list(all)).
+
+-spec list(all | enabled | disabled, stability()) -> feature_flags().
+%% @doc
+%% Lists all, enabled or disabled feature flags, depending on the first
+%% argument, only keeping those having the specified stability.
+%%
+%% @param Which The group of feature flags to return: `all', `enabled' or
+%% `disabled'.
+%% @param Stability The level of stability used to filter the map of feature
+%% flags.
+%% @returns A map of selected feature flags.
+
+list(Which, Stability)
+ when Stability =:= stable orelse Stability =:= experimental ->
+ maps:filter(fun(_, FeatureProps) ->
+ Stability =:= get_stability(FeatureProps)
+ end, list(Which)).
+
+-spec enable(feature_name() | [feature_name()]) -> ok |
+ {error, Reason :: any()}.
+%% @doc
+%% Enables the specified feature flag or set of feature flags.
+%%
+%% @param FeatureName The name or the list of names of feature flags to
+%% enable.
+%% @returns `ok' if the feature flags (and all the feature flags they
+%% depend on) were successfully enabled, or `{error, Reason}' if one
+%% feature flag could not be enabled (subsequent feature flags in the
+%% dependency tree are left unchanged).
+
+enable(FeatureName) when is_atom(FeatureName) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: REQUEST TO ENABLE",
+ [FeatureName]),
+ case is_enabled(FeatureName) of
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: already enabled",
+ [FeatureName]),
+ ok;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: not enabled, check if supported by cluster",
+ [FeatureName]),
+ %% The feature flag must be supported locally and remotely
+ %% (i.e. by all members of the cluster).
+ case is_supported(FeatureName) of
+ true ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: supported, attempt to enable...",
+ [FeatureName]),
+ do_enable(FeatureName);
+ false ->
+ rabbit_log_feature_flags:error(
+ "Feature flag `~s`: not supported",
+ [FeatureName]),
+ {error, unsupported}
+ end
+ end;
+enable(FeatureNames) when is_list(FeatureNames) ->
+ with_feature_flags(FeatureNames, fun enable/1).
+
+-spec enable_all() -> ok | {error, any()}.
+%% @doc
+%% Enables all supported feature flags.
+%%
+%% @returns `ok' if the feature flags were successfully enabled,
+%% or `{error, Reason}' if one feature flag could not be enabled
+%% (subsequent feature flags in the dependency tree are left
+%% unchanged).
+
+enable_all() ->
+ with_feature_flags(maps:keys(list(all)), fun enable/1).
+
+-spec disable(feature_name() | [feature_name()]) -> ok | {error, any()}.
+%% @doc
+%% Disables the specified feature flag or set of feature flags.
+%%
+%% @param FeatureName The name or the list of names of feature flags to
+%% disable.
+%% @returns `ok' if the feature flags (and all the feature flags they
+%% depend on) were successfully disabled, or `{error, Reason}' if one
+%% feature flag could not be disabled (subsequent feature flags in the
+%% dependency tree are left unchanged).
+
+disable(FeatureName) when is_atom(FeatureName) ->
+ {error, unsupported};
+disable(FeatureNames) when is_list(FeatureNames) ->
+ with_feature_flags(FeatureNames, fun disable/1).
+
+-spec disable_all() -> ok | {error, any()}.
+%% @doc
+%% Disables all supported feature flags.
+%%
+%% @returns `ok' if the feature flags were successfully disabled,
+%% or `{error, Reason}' if one feature flag could not be disabled
+%% (subsequent feature flags in the dependency tree are left
+%% unchanged).
+
+disable_all() ->
+ with_feature_flags(maps:keys(list(all)), fun disable/1).
+
+-spec with_feature_flags([feature_name()],
+ fun((feature_name()) -> ok | {error, any()})) ->
+ ok | {error, any()}.
+%% @private
+
+with_feature_flags([FeatureName | Rest], Fun) ->
+ case Fun(FeatureName) of
+ ok -> with_feature_flags(Rest, Fun);
+ Error -> Error
+ end;
+with_feature_flags([], _) ->
+ ok.
+
+-spec is_supported(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by the entire cluster.
+%%
+%% This is the same as calling both {@link is_supported_locally/1} and
+%% {@link is_supported_remotely/1} with a logical AND.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported(FeatureNames) ->
+ is_supported_locally(FeatureNames) andalso
+ is_supported_remotely(FeatureNames).
+
+-spec is_supported(feature_name() | [feature_name()], timeout()) ->
+ boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by the entire cluster.
+%%
+%% This is the same as calling both {@link is_supported_locally/1} and
+%% {@link is_supported_remotely/2} with a logical AND.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported(FeatureNames, Timeout) ->
+ is_supported_locally(FeatureNames) andalso
+ is_supported_remotely(FeatureNames, Timeout).
+
+-spec is_supported_locally(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by the local node.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not.
+
+is_supported_locally(FeatureName) when is_atom(FeatureName) ->
+ rabbit_ff_registry:is_supported(FeatureName);
+is_supported_locally(FeatureNames) when is_list(FeatureNames) ->
+ lists:all(fun(F) -> rabbit_ff_registry:is_supported(F) end, FeatureNames).
+
+-spec is_supported_remotely(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by all remote nodes.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported_remotely(FeatureNames) ->
+ is_supported_remotely(FeatureNames, ?TIMEOUT).
+
+-spec is_supported_remotely(feature_name() | [feature_name()], timeout()) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by all remote nodes.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if the set of feature flags is entirely supported, or
+%% `false' if one of them is not or the RPC timed out.
+
+is_supported_remotely(FeatureName, Timeout) when is_atom(FeatureName) ->
+ is_supported_remotely([FeatureName], Timeout);
+is_supported_remotely([], _) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: skipping query for feature flags support as the "
+ "given list is empty"),
+ true;
+is_supported_remotely(FeatureNames, Timeout) when is_list(FeatureNames) ->
+ case running_remote_nodes() of
+ [] ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: isolated node; skipping remote node query "
+ "=> consider `~p` supported",
+ [FeatureNames]),
+ true;
+ RemoteNodes ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: about to query these remote nodes about "
+ "support for `~p`: ~p",
+ [FeatureNames, RemoteNodes]),
+ is_supported_remotely(RemoteNodes, FeatureNames, Timeout)
+ end.
+
+-spec is_supported_remotely([node()],
+ feature_name() | [feature_name()],
+ timeout()) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% supported by specified remote nodes.
+%%
+%% @param RemoteNodes The list of remote nodes to query.
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if the set of feature flags is entirely supported by
+%% all nodes, or `false' if one of them is not or the RPC timed out.
+
+is_supported_remotely(_, [], _) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: skipping query for feature flags support as the "
+ "given list is empty"),
+ true;
+is_supported_remotely([Node | Rest], FeatureNames, Timeout) ->
+ case does_node_support(Node, FeatureNames, Timeout) of
+ true ->
+ is_supported_remotely(Rest, FeatureNames, Timeout);
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: stopping query for support for `~p` here",
+ [FeatureNames]),
+ false
+ end;
+is_supported_remotely([], FeatureNames, _) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: all running remote nodes support `~p`",
+ [FeatureNames]),
+ true.
+
+-spec is_enabled(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% enabled.
+%%
+%% This is the same as calling {@link is_enabled/2} as a `blocking'
+%% call.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is enabled, or
+%% `false' if one of them is not.
+
+is_enabled(FeatureNames) ->
+ is_enabled(FeatureNames, blocking).
+
+-spec is_enabled
+(feature_name() | [feature_name()], blocking) ->
+ boolean();
+(feature_name() | [feature_name()], non_blocking) ->
+ feature_state().
+%% @doc
+%% Returns if a single feature flag or a set of feature flags is
+%% enabled.
+%%
+%% When `blocking' is passed, the function waits (blocks) for the
+%% state of a feature flag being disabled or enabled stabilizes before
+%% returning its final state.
+%%
+%% When `non_blocking' is passed, the function returns immediately with
+%% the state of the feature flag (`true' if enabled, `false' otherwise)
+%% or `state_changing' is the state is being changed at the time of the
+%% call.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if the set of feature flags is enabled,
+%% `false' if one of them is not, or `state_changing' if one of them
+%% is being worked on. Note that `state_changing' has precedence over
+%% `false', so if one is `false' and another one is `state_changing',
+%% `state_changing' is returned.
+
+is_enabled(FeatureNames, non_blocking) ->
+ is_enabled_nb(FeatureNames);
+is_enabled(FeatureNames, blocking) ->
+ case is_enabled_nb(FeatureNames) of
+ state_changing ->
+ global:set_lock(?FF_STATE_CHANGE_LOCK),
+ global:del_lock(?FF_STATE_CHANGE_LOCK),
+ is_enabled(FeatureNames, blocking);
+ IsEnabled ->
+ IsEnabled
+ end.
+
+is_enabled_nb(FeatureName) when is_atom(FeatureName) ->
+ rabbit_ff_registry:is_enabled(FeatureName);
+is_enabled_nb(FeatureNames) when is_list(FeatureNames) ->
+ lists:foldl(
+ fun
+ (_F, state_changing = Acc) ->
+ Acc;
+ (F, false = Acc) ->
+ case rabbit_ff_registry:is_enabled(F) of
+ state_changing -> state_changing;
+ _ -> Acc
+ end;
+ (F, _) ->
+ rabbit_ff_registry:is_enabled(F)
+ end,
+ true, FeatureNames).
+
+-spec is_disabled(feature_name() | [feature_name()]) -> boolean().
+%% @doc
+%% Returns if a single feature flag or one feature flag in a set of
+%% feature flags is disabled.
+%%
+%% This is the same as negating the result of {@link is_enabled/1}.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if one of the feature flags is disabled, or
+%% `false' if they are all enabled.
+
+is_disabled(FeatureNames) ->
+ is_disabled(FeatureNames, blocking).
+
+-spec is_disabled
+(feature_name() | [feature_name()], blocking) ->
+ boolean();
+(feature_name() | [feature_name()], non_blocking) ->
+ feature_state().
+%% @doc
+%% Returns if a single feature flag or one feature flag in a set of
+%% feature flags is disabled.
+%%
+%% This is the same as negating the result of {@link is_enabled/2},
+%% except that `state_changing' is returned as is.
+%%
+%% See {@link is_enabled/2} for a description of the `blocking' and
+%% `non_blocking' modes.
+%%
+%% @param FeatureNames The name or a list of names of the feature flag(s)
+%% to be checked.
+%% @returns `true' if one feature flag in the set of feature flags is
+%% disabled, `false' if they are all enabled, or `state_changing' if
+%% one of them is being worked on. Note that `state_changing' has
+%% precedence over `true', so if one is `true' (i.e. disabled) and
+%% another one is `state_changing', `state_changing' is returned.
+%%
+%% @see is_enabled/2
+
+is_disabled(FeatureName, Blocking) ->
+ case is_enabled(FeatureName, Blocking) of
+ state_changing -> state_changing;
+ IsEnabled -> not IsEnabled
+ end.
+
+-spec info() -> ok.
+%% @doc
+%% Displays a table on stdout summing up the supported feature flags,
+%% their state and various informations about them.
+
+info() ->
+ info(#{}).
+
+-spec info(#{color => boolean(),
+ lines => boolean(),
+ verbose => non_neg_integer()}) -> ok.
+%% @doc
+%% Displays a table on stdout summing up the supported feature flags,
+%% their state and various informations about them.
+%%
+%% Supported options are:
+%% <ul>
+%% <li>`color': a boolean to indicate if colors should be used to
+%% highlight some elements.</li>
+%% <li>`lines': a boolean to indicate if table borders should be drawn
+%% using ASCII lines instead of regular characters.</li>
+%% <li>`verbose': a non-negative integer to specify the level of
+%% verbosity.</li>
+%% </ul>
+%%
+%% @param Options A map of various options to tune the displayed table.
+
+info(Options) when is_map(Options) ->
+ rabbit_ff_extra:info(Options).
+
+-spec get_state(feature_name()) -> enabled | disabled | unavailable.
+%% @doc
+%% Returns the state of a feature flag.
+%%
+%% The possible states are:
+%% <ul>
+%% <li>`enabled': the feature flag is enabled.</li>
+%% <li>`disabled': the feature flag is supported by all nodes in the
+%% cluster but currently disabled.</li>
+%% <li>`unavailable': the feature flag is unsupported by at least one
+%% node in the cluster and can not be enabled for now.</li>
+%% </ul>
+%%
+%% @param FeatureName The name of the feature flag to check.
+%% @returns `enabled', `disabled' or `unavailable'.
+
+get_state(FeatureName) when is_atom(FeatureName) ->
+ IsEnabled = is_enabled(FeatureName),
+ IsSupported = is_supported(FeatureName),
+ case IsEnabled of
+ true -> enabled;
+ false -> case IsSupported of
+ true -> disabled;
+ false -> unavailable
+ end
+ end.
+
+-spec get_stability(feature_name() | feature_props_extended()) -> stability().
+%% @doc
+%% Returns the stability of a feature flag.
+%%
+%% The possible stability levels are:
+%% <ul>
+%% <li>`stable': the feature flag is stable and will not change in future
+%% releases: it can be enabled in production.</li>
+%% <li>`experimental': the feature flag is experimental and may change in
+%% the future (without a guaranteed upgrade path): enabling it in
+%% production is not recommended.</li>
+%% <li>`unavailable': the feature flag is unsupported by at least one
+%% node in the cluster and can not be enabled for now.</li>
+%% </ul>
+%%
+%% @param FeatureName The name of the feature flag to check.
+%% @returns `stable' or `experimental'.
+
+get_stability(FeatureName) when is_atom(FeatureName) ->
+ case rabbit_ff_registry:get(FeatureName) of
+ undefined -> undefined;
+ FeatureProps -> get_stability(FeatureProps)
+ end;
+get_stability(FeatureProps) when is_map(FeatureProps) ->
+ maps:get(stability, FeatureProps, stable).
+
+%% -------------------------------------------------------------------
+%% Feature flags registry.
+%% -------------------------------------------------------------------
+
+-spec init() -> ok | no_return().
+%% @private
+
+init() ->
+ %% We want to make sure the `feature_flags` file exists once
+ %% RabbitMQ was started at least once. This is not required by
+ %% this module (it works fine if the file is missing) but it helps
+ %% external tools.
+ _ = ensure_enabled_feature_flags_list_file_exists(),
+
+ %% We also "list" supported feature flags. We are not interested in
+ %% that list, however, it triggers the first initialization of the
+ %% registry.
+ _ = list(all),
+ ok.
+
+-spec initialize_registry() -> ok | {error, any()} | no_return().
+%% @private
+%% @doc
+%% Initializes or reinitializes the registry.
+%%
+%% The registry is an Erlang module recompiled at runtime to hold the
+%% state of all supported feature flags.
+%%
+%% That Erlang module is called {@link rabbit_ff_registry}. The initial
+%% source code of this module simply calls this function so it is
+%% replaced by a proper registry.
+%%
+%% Once replaced, the registry contains the map of all supported feature
+%% flags and their state. This is makes it very efficient to query a
+%% feature flag state or property.
+%%
+%% The registry is local to all RabbitMQ nodes.
+
+initialize_registry() ->
+ initialize_registry(#{}).
+
+-spec initialize_registry(feature_flags()) ->
+ ok | {error, any()} | no_return().
+%% @private
+%% @doc
+%% Initializes or reinitializes the registry.
+%%
+%% See {@link initialize_registry/0} for a description of the registry.
+%%
+%% This function takes a map of new supported feature flags (so their
+%% name and extended properties) to add to the existing known feature
+%% flags.
+
+initialize_registry(NewSupportedFeatureFlags) ->
+ %% The first step is to get the feature flag states: if this is the
+ %% first time we initialize it, we read the list from disk (the
+ %% `feature_flags` file). Otherwise we query the existing registry
+ %% before it is replaced.
+ RegistryInitialized = rabbit_ff_registry:is_registry_initialized(),
+ FeatureStates = case RegistryInitialized of
+ true ->
+ rabbit_ff_registry:states();
+ false ->
+ EnabledFeatureNames =
+ read_enabled_feature_flags_list(),
+ list_of_enabled_feature_flags_to_feature_states(
+ EnabledFeatureNames)
+ end,
+
+ %% We also record if the feature flags state was correctly written
+ %% to disk. Currently we don't use this information, but in the
+ %% future, we might want to retry the write if it failed so far.
+ %%
+ %% TODO: Retry to write the feature flags state if the first try
+ %% failed.
+ WrittenToDisk = case RegistryInitialized of
+ true ->
+ rabbit_ff_registry:is_registry_written_to_disk();
+ false ->
+ true
+ end,
+ initialize_registry(NewSupportedFeatureFlags,
+ FeatureStates,
+ WrittenToDisk).
+
+-spec list_of_enabled_feature_flags_to_feature_states([feature_name()]) ->
+ feature_states().
+
+list_of_enabled_feature_flags_to_feature_states(FeatureNames) ->
+ maps:from_list([{FeatureName, true} || FeatureName <- FeatureNames]).
+
+-spec initialize_registry(feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+%% @doc
+%% Initializes or reinitializes the registry.
+%%
+%% See {@link initialize_registry/0} for a description of the registry.
+%%
+%% This function takes a map of new supported feature flags (so their
+%% name and extended properties) to add to the existing known feature
+%% flags, a map of the new feature flag states (whether they are
+%% enabled, disabled or `state_changing'), and a flag to indicate if the
+%% feature flag states was recorded to disk.
+%%
+%% The latter is used to block callers asking if a feature flag is
+%% enabled or disabled while its state is changing.
+
+initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk) ->
+ Ret = maybe_initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk),
+ case Ret of
+ ok -> ok;
+ restart -> initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk);
+ Error -> Error
+ end.
+
+-spec maybe_initialize_registry(feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | restart | {error, any()} | no_return().
+
+maybe_initialize_registry(NewSupportedFeatureFlags,
+ NewFeatureStates,
+ WrittenToDisk) ->
+ %% We save the version of the current registry before computing
+ %% the new one. This is used when we do the actual reload: if the
+ %% current registry was reloaded in the meantime, we need to restart
+ %% the computation to make sure we don't loose data.
+ RegistryVsn = registry_vsn(),
+
+ %% We take the feature flags already registered.
+ RegistryInitialized = rabbit_ff_registry:is_registry_initialized(),
+ KnownFeatureFlags1 = case RegistryInitialized of
+ true -> rabbit_ff_registry:list(all);
+ false -> #{}
+ end,
+
+ %% Query the list (it's a map to be exact) of known
+ %% supported feature flags. That list comes from the
+ %% `-rabbitmq_feature_flag().` module attributes exposed by all
+ %% currently loaded Erlang modules.
+ KnownFeatureFlags2 = query_supported_feature_flags(),
+
+ %% We merge the feature flags we already knew about
+ %% (KnownFeatureFlags1), those found in the loaded applications
+ %% (KnownFeatureFlags2) and those specified in arguments
+ %% (NewSupportedFeatureFlags). The latter come from remote nodes
+ %% usually: for example, they can come from plugins loaded on remote
+ %% node but the plugins are missing locally. In this case, we
+ %% consider those feature flags supported because there is no code
+ %% locally which would cause issues.
+ %%
+ %% It means that the list of feature flags only grows. we don't try
+ %% to clean it at some point because we want to remember about the
+ %% feature flags we saw (and their state). It should be fine because
+ %% that list should remain small.
+ KnownFeatureFlags = maps:merge(KnownFeatureFlags1,
+ KnownFeatureFlags2),
+ AllFeatureFlags = maps:merge(KnownFeatureFlags,
+ NewSupportedFeatureFlags),
+
+ %% Next we want to update the feature states, based on the new
+ %% states passed as arguments.
+ FeatureStates0 = case RegistryInitialized of
+ true ->
+ maps:merge(rabbit_ff_registry:states(),
+ NewFeatureStates);
+ false ->
+ NewFeatureStates
+ end,
+ FeatureStates = maps:filter(
+ fun(_, true) -> true;
+ (_, state_changing) -> true;
+ (_, false) -> false
+ end, FeatureStates0),
+
+ Proceed = does_registry_need_refresh(AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk),
+
+ case Proceed of
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: (re)initialize registry (~p)",
+ [self()]),
+ T0 = erlang:timestamp(),
+ Ret = do_initialize_registry(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk),
+ T1 = erlang:timestamp(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: time to regen registry: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ Ret;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry already up-to-date, skipping init"),
+ ok
+ end.
+
+-spec does_registry_need_refresh(feature_flags(),
+ feature_states(),
+ boolean()) ->
+ boolean().
+
+does_registry_need_refresh(AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk) ->
+ case rabbit_ff_registry:is_registry_initialized() of
+ true ->
+ %% Before proceeding with the actual
+ %% (re)initialization, let's see if there are any
+ %% changes.
+ CurrentAllFeatureFlags = rabbit_ff_registry:list(all),
+ CurrentFeatureStates = rabbit_ff_registry:states(),
+ CurrentWrittenToDisk =
+ rabbit_ff_registry:is_registry_written_to_disk(),
+
+ if
+ AllFeatureFlags =/= CurrentAllFeatureFlags ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, list of feature flags differs"),
+ true;
+ FeatureStates =/= CurrentFeatureStates ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, feature flag states differ"),
+ true;
+ WrittenToDisk =/= CurrentWrittenToDisk ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, \"written to disk\" state changed"),
+ true;
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: no"),
+ false
+ end;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry refresh needed: "
+ "yes, first-time initialization"),
+ true
+ end.
+
+-spec do_initialize_registry(registry_vsn(),
+ feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | restart | {error, any()} | no_return().
+%% @private
+
+do_initialize_registry(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk) ->
+ %% We log the state of those feature flags.
+ rabbit_log_feature_flags:info(
+ "Feature flags: list of feature flags found:"),
+ lists:foreach(
+ fun(FeatureName) ->
+ rabbit_log_feature_flags:info(
+ "Feature flags: [~s] ~s",
+ [case maps:is_key(FeatureName, FeatureStates) of
+ true ->
+ case maps:get(FeatureName, FeatureStates) of
+ true -> "x";
+ state_changing -> "~"
+ end;
+ false ->
+ " "
+ end,
+ FeatureName])
+ end, lists:sort(maps:keys(AllFeatureFlags))),
+ rabbit_log_feature_flags:info(
+ "Feature flags: feature flag states written to disk: ~s",
+ [case WrittenToDisk of
+ true -> "yes";
+ false -> "no"
+ end]),
+
+ %% We request the registry to be regenerated and reloaded with the
+ %% new state.
+ regen_registry_mod(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk).
+
+-spec query_supported_feature_flags() -> feature_flags().
+%% @private
+
+-ifdef(TEST).
+-define(PT_TESTSUITE_ATTRS, {?MODULE, testsuite_feature_flags_attrs}).
+
+inject_test_feature_flags(AttributesFromTestsuite) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: injecting feature flags from testsuite: ~p",
+ [AttributesFromTestsuite]),
+ ok = persistent_term:put(?PT_TESTSUITE_ATTRS, AttributesFromTestsuite),
+ initialize_registry().
+
+module_attributes_from_testsuite() ->
+ persistent_term:get(?PT_TESTSUITE_ATTRS, []).
+
+query_supported_feature_flags() ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: query feature flags in loaded applications "
+ "+ testsuite"),
+ T0 = erlang:timestamp(),
+ AttributesPerApp = rabbit_misc:rabbitmq_related_module_attributes(
+ rabbit_feature_flag),
+ AttributesFromTestsuite = module_attributes_from_testsuite(),
+ T1 = erlang:timestamp(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: time to find supported feature flags: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ AllAttributes = AttributesPerApp ++ AttributesFromTestsuite,
+ prepare_queried_feature_flags(AllAttributes, #{}).
+-else.
+query_supported_feature_flags() ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: query feature flags in loaded applications"),
+ T0 = erlang:timestamp(),
+ AttributesPerApp = rabbit_misc:rabbitmq_related_module_attributes(
+ rabbit_feature_flag),
+ T1 = erlang:timestamp(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: time to find supported feature flags: ~p µs",
+ [timer:now_diff(T1, T0)]),
+ prepare_queried_feature_flags(AttributesPerApp, #{}).
+-endif.
+
+prepare_queried_feature_flags([{App, _Module, Attributes} | Rest],
+ AllFeatureFlags) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: application `~s` has ~b feature flags",
+ [App, length(Attributes)]),
+ AllFeatureFlags1 = lists:foldl(
+ fun({FeatureName, FeatureProps}, AllFF) ->
+ merge_new_feature_flags(AllFF,
+ App,
+ FeatureName,
+ FeatureProps)
+ end, AllFeatureFlags, Attributes),
+ prepare_queried_feature_flags(Rest, AllFeatureFlags1);
+prepare_queried_feature_flags([], AllFeatureFlags) ->
+ AllFeatureFlags.
+
+-spec merge_new_feature_flags(feature_flags(),
+ atom(),
+ feature_name(),
+ feature_props()) -> feature_flags().
+%% @private
+
+merge_new_feature_flags(AllFeatureFlags, App, FeatureName, FeatureProps)
+ when is_atom(FeatureName) andalso is_map(FeatureProps) ->
+ %% We expand the feature flag properties map with:
+ %% - the name of the application providing it: only informational
+ %% for now, but can be handy to understand that a feature flag
+ %% comes from a plugin.
+ FeatureProps1 = maps:put(provided_by, App, FeatureProps),
+ maps:merge(AllFeatureFlags,
+ #{FeatureName => FeatureProps1}).
+
+-spec regen_registry_mod(registry_vsn(),
+ feature_flags(),
+ feature_states(),
+ boolean()) ->
+ ok | restart | {error, any()} | no_return().
+%% @private
+
+regen_registry_mod(RegistryVsn,
+ AllFeatureFlags,
+ FeatureStates,
+ WrittenToDisk) ->
+ %% Here, we recreate the source code of the `rabbit_ff_registry`
+ %% module from scratch.
+ %%
+ %% IMPORTANT: We want both modules to have the exact same public
+ %% API in order to simplify the life of developers and their tools
+ %% (Dialyzer, completion, and so on).
+
+ %% -module(rabbit_ff_registry).
+ ModuleAttr = erl_syntax:attribute(
+ erl_syntax:atom(module),
+ [erl_syntax:atom(rabbit_ff_registry)]),
+ ModuleForm = erl_syntax:revert(ModuleAttr),
+ %% -export([...]).
+ ExportAttr = erl_syntax:attribute(
+ erl_syntax:atom(export),
+ [erl_syntax:list(
+ [erl_syntax:arity_qualifier(
+ erl_syntax:atom(F),
+ erl_syntax:integer(A))
+ || {F, A} <- [{get, 1},
+ {list, 1},
+ {states, 0},
+ {is_supported, 1},
+ {is_enabled, 1},
+ {is_registry_initialized, 0},
+ {is_registry_written_to_disk, 0}]]
+ )
+ ]
+ ),
+ ExportForm = erl_syntax:revert(ExportAttr),
+ %% get(_) -> ...
+ GetClauses = [erl_syntax:clause(
+ [erl_syntax:atom(FeatureName)],
+ [],
+ [erl_syntax:abstract(maps:get(FeatureName,
+ AllFeatureFlags))])
+ || FeatureName <- maps:keys(AllFeatureFlags)
+ ],
+ GetUnknownClause = erl_syntax:clause(
+ [erl_syntax:variable("_")],
+ [],
+ [erl_syntax:atom(undefined)]),
+ GetFun = erl_syntax:function(
+ erl_syntax:atom(get),
+ GetClauses ++ [GetUnknownClause]),
+ GetFunForm = erl_syntax:revert(GetFun),
+ %% list(_) -> ...
+ ListAllBody = erl_syntax:abstract(AllFeatureFlags),
+ ListAllClause = erl_syntax:clause([erl_syntax:atom(all)],
+ [],
+ [ListAllBody]),
+ EnabledFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ maps:is_key(FeatureName,
+ FeatureStates)
+ andalso
+ maps:get(FeatureName, FeatureStates)
+ =:=
+ true
+ end, AllFeatureFlags),
+ ListEnabledBody = erl_syntax:abstract(EnabledFeatureFlags),
+ ListEnabledClause = erl_syntax:clause(
+ [erl_syntax:atom(enabled)],
+ [],
+ [ListEnabledBody]),
+ DisabledFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ not maps:is_key(FeatureName,
+ FeatureStates)
+ end, AllFeatureFlags),
+ ListDisabledBody = erl_syntax:abstract(DisabledFeatureFlags),
+ ListDisabledClause = erl_syntax:clause(
+ [erl_syntax:atom(disabled)],
+ [],
+ [ListDisabledBody]),
+ StateChangingFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ maps:is_key(FeatureName,
+ FeatureStates)
+ andalso
+ maps:get(FeatureName, FeatureStates)
+ =:=
+ state_changing
+ end, AllFeatureFlags),
+ ListStateChangingBody = erl_syntax:abstract(StateChangingFeatureFlags),
+ ListStateChangingClause = erl_syntax:clause(
+ [erl_syntax:atom(state_changing)],
+ [],
+ [ListStateChangingBody]),
+ ListFun = erl_syntax:function(
+ erl_syntax:atom(list),
+ [ListAllClause,
+ ListEnabledClause,
+ ListDisabledClause,
+ ListStateChangingClause]),
+ ListFunForm = erl_syntax:revert(ListFun),
+ %% states() -> ...
+ StatesBody = erl_syntax:abstract(FeatureStates),
+ StatesClause = erl_syntax:clause([], [], [StatesBody]),
+ StatesFun = erl_syntax:function(
+ erl_syntax:atom(states),
+ [StatesClause]),
+ StatesFunForm = erl_syntax:revert(StatesFun),
+ %% is_supported(_) -> ...
+ IsSupportedClauses = [erl_syntax:clause(
+ [erl_syntax:atom(FeatureName)],
+ [],
+ [erl_syntax:atom(true)])
+ || FeatureName <- maps:keys(AllFeatureFlags)
+ ],
+ NotSupportedClause = erl_syntax:clause(
+ [erl_syntax:variable("_")],
+ [],
+ [erl_syntax:atom(false)]),
+ IsSupportedFun = erl_syntax:function(
+ erl_syntax:atom(is_supported),
+ IsSupportedClauses ++ [NotSupportedClause]),
+ IsSupportedFunForm = erl_syntax:revert(IsSupportedFun),
+ %% is_enabled(_) -> ...
+ IsEnabledClauses = [erl_syntax:clause(
+ [erl_syntax:atom(FeatureName)],
+ [],
+ [case maps:is_key(FeatureName, FeatureStates) of
+ true ->
+ erl_syntax:atom(
+ maps:get(FeatureName, FeatureStates));
+ false ->
+ erl_syntax:atom(false)
+ end])
+ || FeatureName <- maps:keys(AllFeatureFlags)
+ ],
+ NotEnabledClause = erl_syntax:clause(
+ [erl_syntax:variable("_")],
+ [],
+ [erl_syntax:atom(false)]),
+ IsEnabledFun = erl_syntax:function(
+ erl_syntax:atom(is_enabled),
+ IsEnabledClauses ++ [NotEnabledClause]),
+ IsEnabledFunForm = erl_syntax:revert(IsEnabledFun),
+ %% is_registry_initialized() -> ...
+ IsInitializedClauses = [erl_syntax:clause(
+ [],
+ [],
+ [erl_syntax:atom(true)])
+ ],
+ IsInitializedFun = erl_syntax:function(
+ erl_syntax:atom(is_registry_initialized),
+ IsInitializedClauses),
+ IsInitializedFunForm = erl_syntax:revert(IsInitializedFun),
+ %% is_registry_written_to_disk() -> ...
+ IsWrittenToDiskClauses = [erl_syntax:clause(
+ [],
+ [],
+ [erl_syntax:atom(WrittenToDisk)])
+ ],
+ IsWrittenToDiskFun = erl_syntax:function(
+ erl_syntax:atom(is_registry_written_to_disk),
+ IsWrittenToDiskClauses),
+ IsWrittenToDiskFunForm = erl_syntax:revert(IsWrittenToDiskFun),
+ %% Compilation!
+ Forms = [ModuleForm,
+ ExportForm,
+ GetFunForm,
+ ListFunForm,
+ StatesFunForm,
+ IsSupportedFunForm,
+ IsEnabledFunForm,
+ IsInitializedFunForm,
+ IsWrittenToDiskFunForm],
+ maybe_log_registry_source_code(Forms),
+ CompileOpts = [return_errors,
+ return_warnings],
+ case compile:forms(Forms, CompileOpts) of
+ {ok, Mod, Bin, _} ->
+ load_registry_mod(RegistryVsn, Mod, Bin);
+ {error, Errors, Warnings} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: registry compilation:~n"
+ "Errors: ~p~n"
+ "Warnings: ~p",
+ [Errors, Warnings]),
+ {error, {compilation_failure, Errors, Warnings}}
+ end.
+
+maybe_log_registry_source_code(Forms) ->
+ case rabbit_prelaunch:get_context() of
+ #{log_feature_flags_registry := true} ->
+ rabbit_log_feature_flags:debug(
+ "== FEATURE FLAGS REGISTRY ==~n"
+ "~s~n"
+ "== END ==~n",
+ [erl_prettypr:format(erl_syntax:form_list(Forms))]);
+ _ ->
+ ok
+ end.
+
+-ifdef(TEST).
+registry_loading_lock() -> ?FF_REGISTRY_LOADING_LOCK.
+-endif.
+
+-spec load_registry_mod(registry_vsn(), atom(), binary()) ->
+ ok | restart | no_return().
+%% @private
+
+load_registry_mod(RegistryVsn, Mod, Bin) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry module ready, loading it (~p)...",
+ [self()]),
+ FakeFilename = "Compiled and loaded by " ?MODULE_STRING,
+ %% Time to load the new registry, replacing the old one. We use a
+ %% lock here to synchronize concurrent reloads.
+ global:set_lock(?FF_REGISTRY_LOADING_LOCK, [node()]),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: acquired lock before reloading registry module (~p)",
+ [self()]),
+ %% We want to make sure that the old registry (not the one being
+ %% currently in use) is purged by the code server. It means no
+ %% process lingers on that old code.
+ %%
+ %% We use code:soft_purge() for that (meaning no process is killed)
+ %% and we wait in an infinite loop for that to succeed.
+ ok = purge_old_registry(Mod),
+ %% Now we can replace the currently loaded registry by the new one.
+ %% The code server takes care of marking the current registry as old
+ %% and load the new module in an atomic operation.
+ %%
+ %% Therefore there is no chance of a window where there is no
+ %% registry module available, causing the one on disk to be
+ %% reloaded.
+ Ret = case registry_vsn() of
+ RegistryVsn -> code:load_binary(Mod, FakeFilename, Bin);
+ OtherVsn -> {error, {restart, RegistryVsn, OtherVsn}}
+ end,
+ rabbit_log_feature_flags:debug(
+ "Feature flags: releasing lock after reloading registry module (~p)",
+ [self()]),
+ global:del_lock(?FF_REGISTRY_LOADING_LOCK, [node()]),
+ case Ret of
+ {module, _} ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: registry module loaded (vsn: ~p -> ~p)",
+ [RegistryVsn, registry_vsn()]),
+ ok;
+ {error, {restart, Expected, Current}} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: another registry module was loaded in the "
+ "meantime (expected old vsn: ~p, current vsn: ~p); "
+ "restarting the regen",
+ [Expected, Current]),
+ restart;
+ {error, Reason} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to load registry module: ~p",
+ [Reason]),
+ throw({feature_flag_registry_reload_failure, Reason})
+ end.
+
+-spec registry_vsn() -> registry_vsn().
+%% @private
+
+registry_vsn() ->
+ Attrs = rabbit_ff_registry:module_info(attributes),
+ proplists:get_value(vsn, Attrs, undefined).
+
+purge_old_registry(Mod) ->
+ case code:is_loaded(Mod) of
+ {file, _} -> do_purge_old_registry(Mod);
+ false -> ok
+ end.
+
+do_purge_old_registry(Mod) ->
+ case code:soft_purge(Mod) of
+ true -> ok;
+ false -> do_purge_old_registry(Mod)
+ end.
+
+%% -------------------------------------------------------------------
+%% Feature flags state storage.
+%% -------------------------------------------------------------------
+
+-spec ensure_enabled_feature_flags_list_file_exists() -> ok | {error, any()}.
+%% @private
+
+ensure_enabled_feature_flags_list_file_exists() ->
+ File = enabled_feature_flags_list_file(),
+ case filelib:is_regular(File) of
+ true -> ok;
+ false -> write_enabled_feature_flags_list([])
+ end.
+
+-spec read_enabled_feature_flags_list() ->
+ [feature_name()] | no_return().
+%% @private
+
+read_enabled_feature_flags_list() ->
+ case try_to_read_enabled_feature_flags_list() of
+ {error, Reason} ->
+ File = enabled_feature_flags_list_file(),
+ throw({feature_flags_file_read_error, File, Reason});
+ Ret ->
+ Ret
+ end.
+
+-spec try_to_read_enabled_feature_flags_list() ->
+ [feature_name()] | {error, any()}.
+%% @private
+
+try_to_read_enabled_feature_flags_list() ->
+ File = enabled_feature_flags_list_file(),
+ case file:consult(File) of
+ {ok, [List]} ->
+ List;
+ {error, enoent} ->
+ %% If the file is missing, we consider the list of enabled
+ %% feature flags to be empty.
+ [];
+ {error, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to read the `feature_flags` "
+ "file at `~s`: ~s",
+ [File, file:format_error(Reason)]),
+ Error
+ end.
+
+-spec write_enabled_feature_flags_list([feature_name()]) ->
+ ok | no_return().
+%% @private
+
+write_enabled_feature_flags_list(FeatureNames) ->
+ case try_to_write_enabled_feature_flags_list(FeatureNames) of
+ {error, Reason} ->
+ File = enabled_feature_flags_list_file(),
+ throw({feature_flags_file_write_error, File, Reason});
+ Ret ->
+ Ret
+ end.
+
+-spec try_to_write_enabled_feature_flags_list([feature_name()]) ->
+ ok | {error, any()}.
+%% @private
+
+try_to_write_enabled_feature_flags_list(FeatureNames) ->
+ %% Before writing the new file, we read the existing one. If there
+ %% are unknown feature flags in that file, we want to keep their
+ %% state, even though they are unsupported at this time. It could be
+ %% that a plugin was disabled in the meantime.
+ %%
+ %% FIXME: Lock this code to fix concurrent read/modify/write.
+ PreviouslyEnabled = case try_to_read_enabled_feature_flags_list() of
+ {error, _} -> [];
+ List -> List
+ end,
+ FeatureNames1 = lists:foldl(
+ fun(Name, Acc) ->
+ case is_supported_locally(Name) of
+ true -> Acc;
+ false -> [Name | Acc]
+ end
+ end, FeatureNames, PreviouslyEnabled),
+ FeatureNames2 = lists:sort(FeatureNames1),
+
+ File = enabled_feature_flags_list_file(),
+ Content = io_lib:format("~p.~n", [FeatureNames2]),
+ %% TODO: If we fail to write the the file, we should spawn a process
+ %% to retry the operation.
+ case file:write_file(File, Content) of
+ ok ->
+ ok;
+ {error, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to write the `feature_flags` "
+ "file at `~s`: ~s",
+ [File, file:format_error(Reason)]),
+ Error
+ end.
+
+-spec enabled_feature_flags_list_file() -> file:filename().
+%% @doc
+%% Returns the path to the file where the state of feature flags is stored.
+%%
+%% @returns the path to the file.
+
+enabled_feature_flags_list_file() ->
+ case application:get_env(rabbit, feature_flags_file) of
+ {ok, Val} -> Val;
+ undefined -> throw(feature_flags_file_not_set)
+ end.
+
+%% -------------------------------------------------------------------
+%% Feature flags management: enabling.
+%% -------------------------------------------------------------------
+
+-spec do_enable(feature_name()) -> ok | {error, any()} | no_return().
+%% @private
+
+do_enable(FeatureName) ->
+ %% We mark this feature flag as "state changing" before doing the
+ %% actual state change. We also take a global lock: this permits
+ %% to block callers asking about a feature flag changing state.
+ global:set_lock(?FF_STATE_CHANGE_LOCK),
+ Ret = case mark_as_enabled(FeatureName, state_changing) of
+ ok ->
+ case enable_dependencies(FeatureName, true) of
+ ok ->
+ case run_migration_fun(FeatureName, enable) of
+ ok ->
+ mark_as_enabled(FeatureName, true);
+ {error, no_migration_fun} ->
+ mark_as_enabled(FeatureName, true);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end,
+ case Ret of
+ ok -> ok;
+ _ -> mark_as_enabled(FeatureName, false)
+ end,
+ global:del_lock(?FF_STATE_CHANGE_LOCK),
+ Ret.
+
+-spec enable_locally(feature_name()) -> ok | {error, any()} | no_return().
+%% @private
+
+enable_locally(FeatureName) when is_atom(FeatureName) ->
+ case is_enabled(FeatureName) of
+ true ->
+ ok;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: enable locally (as part of feature "
+ "flag states synchronization)",
+ [FeatureName]),
+ do_enable_locally(FeatureName)
+ end.
+
+-spec do_enable_locally(feature_name()) -> ok | {error, any()} | no_return().
+%% @private
+
+do_enable_locally(FeatureName) ->
+ case enable_dependencies(FeatureName, false) of
+ ok ->
+ case run_migration_fun(FeatureName, enable) of
+ ok ->
+ mark_as_enabled_locally(FeatureName, true);
+ {error, no_migration_fun} ->
+ mark_as_enabled_locally(FeatureName, true);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+-spec enable_dependencies(feature_name(), boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+enable_dependencies(FeatureName, Everywhere) ->
+ FeatureProps = rabbit_ff_registry:get(FeatureName),
+ DependsOn = maps:get(depends_on, FeatureProps, []),
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: enable dependencies: ~p",
+ [FeatureName, DependsOn]),
+ enable_dependencies(FeatureName, DependsOn, Everywhere).
+
+-spec enable_dependencies(feature_name(), [feature_name()], boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+enable_dependencies(TopLevelFeatureName, [FeatureName | Rest], Everywhere) ->
+ Ret = case Everywhere of
+ true -> enable(FeatureName);
+ false -> enable_locally(FeatureName)
+ end,
+ case Ret of
+ ok -> enable_dependencies(TopLevelFeatureName, Rest, Everywhere);
+ Error -> Error
+ end;
+enable_dependencies(_, [], _) ->
+ ok.
+
+-spec run_migration_fun(feature_name(), any()) ->
+ any() | {error, any()}.
+%% @private
+
+run_migration_fun(FeatureName, Arg) ->
+ FeatureProps = rabbit_ff_registry:get(FeatureName),
+ run_migration_fun(FeatureName, FeatureProps, Arg).
+
+run_migration_fun(FeatureName, FeatureProps, Arg) ->
+ case maps:get(migration_fun, FeatureProps, none) of
+ {MigrationMod, MigrationFun}
+ when is_atom(MigrationMod) andalso is_atom(MigrationFun) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flag `~s`: run migration function ~p with arg: ~p",
+ [FeatureName, MigrationFun, Arg]),
+ try
+ erlang:apply(MigrationMod,
+ MigrationFun,
+ [FeatureName, FeatureProps, Arg])
+ catch
+ _:Reason:Stacktrace ->
+ rabbit_log_feature_flags:error(
+ "Feature flag `~s`: migration function crashed: ~p~n~p",
+ [FeatureName, Reason, Stacktrace]),
+ {error, {migration_fun_crash, Reason, Stacktrace}}
+ end;
+ none ->
+ {error, no_migration_fun};
+ Invalid ->
+ rabbit_log_feature_flags:error(
+ "Feature flag `~s`: invalid migration function: ~p",
+ [FeatureName, Invalid]),
+ {error, {invalid_migration_fun, Invalid}}
+ end.
+
+-spec mark_as_enabled(feature_name(), feature_state()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled(FeatureName, IsEnabled) ->
+ case mark_as_enabled_locally(FeatureName, IsEnabled) of
+ ok ->
+ mark_as_enabled_remotely(FeatureName, IsEnabled);
+ Error ->
+ Error
+ end.
+
+-spec mark_as_enabled_locally(feature_name(), feature_state()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled_locally(FeatureName, IsEnabled) ->
+ rabbit_log_feature_flags:info(
+ "Feature flag `~s`: mark as enabled=~p",
+ [FeatureName, IsEnabled]),
+ EnabledFeatureNames = maps:keys(list(enabled)),
+ NewEnabledFeatureNames = case IsEnabled of
+ true ->
+ [FeatureName | EnabledFeatureNames];
+ false ->
+ EnabledFeatureNames -- [FeatureName];
+ state_changing ->
+ EnabledFeatureNames
+ end,
+ WrittenToDisk = case NewEnabledFeatureNames of
+ EnabledFeatureNames ->
+ rabbit_ff_registry:is_registry_written_to_disk();
+ _ ->
+ ok =:= try_to_write_enabled_feature_flags_list(
+ NewEnabledFeatureNames)
+ end,
+ initialize_registry(#{},
+ #{FeatureName => IsEnabled},
+ WrittenToDisk).
+
+-spec mark_as_enabled_remotely(feature_name(), feature_state()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled_remotely(FeatureName, IsEnabled) ->
+ Nodes = running_remote_nodes(),
+ mark_as_enabled_remotely(Nodes, FeatureName, IsEnabled, ?TIMEOUT).
+
+-spec mark_as_enabled_remotely([node()],
+ feature_name(),
+ feature_state(),
+ timeout()) ->
+ any() | {error, any()} | no_return().
+%% @private
+
+mark_as_enabled_remotely([], _FeatureName, _IsEnabled, _Timeout) ->
+ ok;
+mark_as_enabled_remotely(Nodes, FeatureName, IsEnabled, Timeout) ->
+ T0 = erlang:timestamp(),
+ Rets = [{Node, rpc:call(Node,
+ ?MODULE,
+ mark_as_enabled_locally,
+ [FeatureName, IsEnabled],
+ Timeout)}
+ || Node <- Nodes],
+ FailedNodes = [Node || {Node, Ret} <- Rets, Ret =/= ok],
+ case FailedNodes of
+ [] ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: `~s` successfully marked as enabled=~p on all "
+ "nodes", [FeatureName, IsEnabled]),
+ ok;
+ _ ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: failed to mark feature flag `~s` as enabled=~p "
+ "on the following nodes:", [FeatureName, IsEnabled]),
+ [rabbit_log_feature_flags:error(
+ "Feature flags: - ~s: ~p",
+ [Node, Ret])
+ || {Node, Ret} <- Rets,
+ Ret =/= ok],
+ Sleep = 1000,
+ T1 = erlang:timestamp(),
+ Duration = timer:now_diff(T1, T0),
+ NewTimeout = (Timeout * 1000 - Duration) div 1000 - Sleep,
+ if
+ NewTimeout > 0 ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: retrying with a timeout of ~b "
+ "ms after sleeping for ~b ms",
+ [NewTimeout, Sleep]),
+ timer:sleep(Sleep),
+ mark_as_enabled_remotely(FailedNodes,
+ FeatureName,
+ IsEnabled,
+ NewTimeout);
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: not retrying; RPC went over the "
+ "~b milliseconds timeout", [Timeout]),
+ %% FIXME: Is crashing the process the best solution here?
+ throw(
+ {failed_to_mark_feature_flag_as_enabled_on_remote_nodes,
+ FeatureName, IsEnabled, FailedNodes})
+ end
+ end.
+
+%% -------------------------------------------------------------------
+%% Coordination with remote nodes.
+%% -------------------------------------------------------------------
+
+-spec remote_nodes() -> [node()].
+%% @private
+
+remote_nodes() ->
+ mnesia:system_info(db_nodes) -- [node()].
+
+-spec running_remote_nodes() -> [node()].
+%% @private
+
+running_remote_nodes() ->
+ mnesia:system_info(running_db_nodes) -- [node()].
+
+query_running_remote_nodes(Node, Timeout) ->
+ case rpc:call(Node, mnesia, system_info, [running_db_nodes], Timeout) of
+ {badrpc, _} = Error -> Error;
+ Nodes -> Nodes -- [node()]
+ end.
+
+-spec does_node_support(node(), [feature_name()], timeout()) -> boolean().
+%% @private
+
+does_node_support(Node, FeatureNames, Timeout) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: querying `~p` support on node ~s...",
+ [FeatureNames, Node]),
+ Ret = case node() of
+ Node ->
+ is_supported_locally(FeatureNames);
+ _ ->
+ run_feature_flags_mod_on_remote_node(
+ Node, is_supported_locally, [FeatureNames], Timeout)
+ end,
+ case Ret of
+ {error, pre_feature_flags_rabbitmq} ->
+ %% See run_feature_flags_mod_on_remote_node/4 for
+ %% an explanation why we consider this node a 3.7.x
+ %% pre-feature-flags node.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: no feature flags support on node `~s`, "
+ "consider the feature flags unsupported: ~p",
+ [Node, FeatureNames]),
+ false;
+ {error, Reason} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: error while querying `~p` support on "
+ "node ~s: ~p",
+ [FeatureNames, Node, Reason]),
+ false;
+ true ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` supports `~p`",
+ [Node, FeatureNames]),
+ true;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` does not support `~p`; "
+ "stopping query here",
+ [Node, FeatureNames]),
+ false
+ end.
+
+-spec check_node_compatibility(node()) -> ok | {error, any()}.
+%% @doc
+%% Checks if a node is compatible with the local node.
+%%
+%% To be compatible, the following two conditions must be met:
+%% <ol>
+%% <li>feature flags enabled on the local node must be supported by the
+%% remote node</li>
+%% <li>feature flags enabled on the remote node must be supported by the
+%% local node</li>
+%% </ol>
+%%
+%% @param Node the name of the remote node to test.
+%% @returns `ok' if they are compatible, `{error, Reason}' if they are not.
+
+check_node_compatibility(Node) ->
+ check_node_compatibility(Node, ?TIMEOUT).
+
+-spec check_node_compatibility(node(), timeout()) -> ok | {error, any()}.
+%% @doc
+%% Checks if a node is compatible with the local node.
+%%
+%% See {@link check_node_compatibility/1} for the conditions required to
+%% consider two nodes compatible.
+%%
+%% @param Node the name of the remote node to test.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `ok' if they are compatible, `{error, Reason}' if they are not.
+%%
+%% @see check_node_compatibility/1
+
+check_node_compatibility(Node, Timeout) ->
+ %% Before checking compatibility, we exchange feature flags from
+ %% unknown Erlang applications. So we fetch remote feature flags
+ %% from applications which are not loaded locally, and the opposite.
+ %%
+ %% The goal is that such feature flags are not blocking the
+ %% communication between nodes because the code (which would
+ %% break) is missing on those nodes. Therefore they should not be
+ %% considered when determining compatibility.
+ exchange_feature_flags_from_unknown_apps(Node, Timeout),
+
+ %% FIXME:
+ %% When we try to cluster two nodes, we get:
+ %% Feature flags: starting an unclustered node: all feature flags
+ %% will be enabled by default
+ %% It should probably not be the case...
+
+ %% We can now proceed with the actual compatibility check.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` compatibility check, part 1/2",
+ [Node]),
+ Part1 = local_enabled_feature_flags_is_supported_remotely(Node, Timeout),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` compatibility check, part 2/2",
+ [Node]),
+ Part2 = remote_enabled_feature_flags_is_supported_locally(Node, Timeout),
+ case {Part1, Part2} of
+ {true, true} ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: node `~s` is compatible",
+ [Node]),
+ ok;
+ {false, _} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: node `~s` is INCOMPATIBLE: "
+ "feature flags enabled locally are not supported remotely",
+ [Node]),
+ {error, incompatible_feature_flags};
+ {_, false} ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: node `~s` is INCOMPATIBLE: "
+ "feature flags enabled remotely are not supported locally",
+ [Node]),
+ {error, incompatible_feature_flags}
+ end.
+
+-spec is_node_compatible(node()) -> boolean().
+%% @doc
+%% Returns if a node is compatible with the local node.
+%%
+%% This function calls {@link check_node_compatibility/2} and returns
+%% `true' the latter returns `ok'. Therefore this is the same code,
+%% except that this function returns a boolean, but not the reason of
+%% the incompatibility if any.
+%%
+%% @param Node the name of the remote node to test.
+%% @returns `true' if they are compatible, `false' otherwise.
+
+is_node_compatible(Node) ->
+ is_node_compatible(Node, ?TIMEOUT).
+
+-spec is_node_compatible(node(), timeout()) -> boolean().
+%% @doc
+%% Returns if a node is compatible with the local node.
+%%
+%% This function calls {@link check_node_compatibility/2} and returns
+%% `true' the latter returns `ok'. Therefore this is the same code,
+%% except that this function returns a boolean, but not the reason
+%% of the incompatibility if any. If the RPC times out, nodes are
+%% considered incompatible.
+%%
+%% @param Node the name of the remote node to test.
+%% @param Timeout Time in milliseconds after which the RPC gives up.
+%% @returns `true' if they are compatible, `false' otherwise.
+
+is_node_compatible(Node, Timeout) ->
+ check_node_compatibility(Node, Timeout) =:= ok.
+
+-spec local_enabled_feature_flags_is_supported_remotely(node(),
+ timeout()) ->
+ boolean().
+%% @private
+
+local_enabled_feature_flags_is_supported_remotely(Node, Timeout) ->
+ LocalEnabledFeatureNames = maps:keys(list(enabled)),
+ is_supported_remotely([Node], LocalEnabledFeatureNames, Timeout).
+
+-spec remote_enabled_feature_flags_is_supported_locally(node(),
+ timeout()) ->
+ boolean().
+%% @private
+
+remote_enabled_feature_flags_is_supported_locally(Node, Timeout) ->
+ case query_remote_feature_flags(Node, enabled, Timeout) of
+ {error, _} ->
+ false;
+ RemoteEnabledFeatureFlags when is_map(RemoteEnabledFeatureFlags) ->
+ RemoteEnabledFeatureNames = maps:keys(RemoteEnabledFeatureFlags),
+ is_supported_locally(RemoteEnabledFeatureNames)
+ end.
+
+-spec run_feature_flags_mod_on_remote_node(node(),
+ atom(),
+ [term()],
+ timeout()) ->
+ term() | {error, term()}.
+%% @private
+
+run_feature_flags_mod_on_remote_node(Node, Function, Args, Timeout) ->
+ case rpc:call(Node, ?MODULE, Function, Args, Timeout) of
+ {badrpc, {'EXIT',
+ {undef,
+ [{?MODULE, Function, Args, []}
+ | _]}}} ->
+ %% If rabbit_feature_flags:Function() is undefined
+ %% on the remote node, we consider it to be a 3.7.x
+ %% pre-feature-flags node.
+ %%
+ %% Theoretically, it could be an older version (3.6.x and
+ %% older). But the RabbitMQ version consistency check
+ %% (rabbit_misc:version_minor_equivalent/2) called from
+ %% rabbit_mnesia:check_rabbit_consistency/2 already blocked
+ %% this situation from happening before we reach this point.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: ~s:~s~p unavailable on node `~s`: "
+ "assuming it is a RabbitMQ 3.7.x pre-feature-flags node",
+ [?MODULE, Function, Args, Node]),
+ {error, pre_feature_flags_rabbitmq};
+ {badrpc, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: error while running ~s:~s~p "
+ "on node `~s`: ~p",
+ [?MODULE, Function, Args, Node, Reason]),
+ {error, Error};
+ Ret ->
+ Ret
+ end.
+
+-spec query_remote_feature_flags(node(),
+ Which :: all | enabled | disabled,
+ timeout()) ->
+ feature_flags() | {error, any()}.
+%% @private
+
+query_remote_feature_flags(Node, Which, Timeout) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: querying ~s feature flags on node `~s`...",
+ [Which, Node]),
+ case run_feature_flags_mod_on_remote_node(Node, list, [Which], Timeout) of
+ {error, pre_feature_flags_rabbitmq} ->
+ %% See run_feature_flags_mod_on_remote_node/4 for
+ %% an explanation why we consider this node a 3.7.x
+ %% pre-feature-flags node.
+ rabbit_log_feature_flags:debug(
+ "Feature flags: no feature flags support on node `~s`, "
+ "consider the list of feature flags empty", [Node]),
+ #{};
+ {error, Reason} = Error ->
+ rabbit_log_feature_flags:error(
+ "Feature flags: error while querying ~s feature flags "
+ "on node `~s`: ~p",
+ [Which, Node, Reason]),
+ Error;
+ RemoteFeatureFlags when is_map(RemoteFeatureFlags) ->
+ RemoteFeatureNames = maps:keys(RemoteFeatureFlags),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: querying ~s feature flags on node `~s` "
+ "done; ~s features: ~p",
+ [Which, Node, Which, RemoteFeatureNames]),
+ RemoteFeatureFlags
+ end.
+
+-spec merge_feature_flags_from_unknown_apps(feature_flags()) ->
+ ok | {error, any()}.
+%% @private
+
+merge_feature_flags_from_unknown_apps(FeatureFlags)
+ when is_map(FeatureFlags) ->
+ LoadedApps = [App || {App, _, _} <- application:loaded_applications()],
+ FeatureFlagsFromUnknownApps =
+ maps:fold(
+ fun(FeatureName, FeatureProps, UnknownFF) ->
+ case is_supported_locally(FeatureName) of
+ true ->
+ UnknownFF;
+ false ->
+ FeatureProvider = maps:get(provided_by, FeatureProps),
+ case lists:member(FeatureProvider, LoadedApps) of
+ true -> UnknownFF;
+ false -> maps:put(FeatureName, FeatureProps,
+ UnknownFF)
+ end
+ end
+ end,
+ #{},
+ FeatureFlags),
+ case maps:keys(FeatureFlagsFromUnknownApps) of
+ [] ->
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: register feature flags provided by applications "
+ "unknown locally: ~p",
+ [maps:keys(FeatureFlagsFromUnknownApps)]),
+ initialize_registry(FeatureFlagsFromUnknownApps)
+ end.
+
+exchange_feature_flags_from_unknown_apps(Node, Timeout) ->
+ %% The first step is to fetch feature flags from Erlang applications
+ %% we don't know locally (they are loaded remotely, but not
+ %% locally).
+ fetch_remote_feature_flags_from_apps_unknown_locally(Node, Timeout),
+
+ %% The next step is to do the opposite: push feature flags to remote
+ %% nodes so they can register those from applications they don't
+ %% know.
+ push_local_feature_flags_from_apps_unknown_remotely(Node, Timeout).
+
+fetch_remote_feature_flags_from_apps_unknown_locally(Node, Timeout) ->
+ RemoteFeatureFlags = query_remote_feature_flags(Node, all, Timeout),
+ merge_feature_flags_from_unknown_apps(RemoteFeatureFlags).
+
+push_local_feature_flags_from_apps_unknown_remotely(Node, Timeout) ->
+ LocalFeatureFlags = list(all),
+ push_local_feature_flags_from_apps_unknown_remotely(
+ Node, LocalFeatureFlags, Timeout).
+
+push_local_feature_flags_from_apps_unknown_remotely(
+ Node, FeatureFlags, Timeout)
+ when map_size(FeatureFlags) > 0 ->
+ case query_running_remote_nodes(Node, Timeout) of
+ {badrpc, Reason} ->
+ {error, Reason};
+ Nodes ->
+ lists:foreach(
+ fun(N) ->
+ run_feature_flags_mod_on_remote_node(
+ N,
+ merge_feature_flags_from_unknown_apps,
+ [FeatureFlags],
+ Timeout)
+ end, Nodes)
+ end;
+push_local_feature_flags_from_apps_unknown_remotely(_, _, _) ->
+ ok.
+
+-spec sync_feature_flags_with_cluster([node()], boolean()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+sync_feature_flags_with_cluster(Nodes, NodeIsVirgin) ->
+ sync_feature_flags_with_cluster(Nodes, NodeIsVirgin, ?TIMEOUT).
+
+-spec sync_feature_flags_with_cluster([node()], boolean(), timeout()) ->
+ ok | {error, any()} | no_return().
+%% @private
+
+sync_feature_flags_with_cluster([], NodeIsVirgin, _) ->
+ verify_which_feature_flags_are_actually_enabled(),
+ case NodeIsVirgin of
+ true ->
+ FeatureNames = get_forced_feature_flag_names(),
+ case remote_nodes() of
+ [] when FeatureNames =:= undefined ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered node "
+ "for the first time: all feature flags will be "
+ "enabled by default"),
+ enable_all();
+ [] ->
+ case FeatureNames of
+ [] ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered "
+ "node for the first time: all feature "
+ "flags are forcibly left disabled from "
+ "the $RABBITMQ_FEATURE_FLAGS environment "
+ "variable"),
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered "
+ "node for the first time: only the "
+ "following feature flags specified in "
+ "the $RABBITMQ_FEATURE_FLAGS environment "
+ "variable will be enabled: ~p",
+ [FeatureNames]),
+ enable(FeatureNames)
+ end;
+ _ ->
+ ok
+ end;
+ false ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: starting an unclustered node which is "
+ "already initialized: all feature flags left in their "
+ "current state"),
+ ok
+ end;
+sync_feature_flags_with_cluster(Nodes, _, Timeout) ->
+ verify_which_feature_flags_are_actually_enabled(),
+ RemoteNodes = Nodes -- [node()],
+ sync_feature_flags_with_cluster1(RemoteNodes, Timeout).
+
+sync_feature_flags_with_cluster1([], _) ->
+ ok;
+sync_feature_flags_with_cluster1(RemoteNodes, Timeout) ->
+ RandomRemoteNode = pick_one_node(RemoteNodes),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: SYNCING FEATURE FLAGS with node `~s`...",
+ [RandomRemoteNode]),
+ case query_remote_feature_flags(RandomRemoteNode, enabled, Timeout) of
+ {error, _} = Error ->
+ Error;
+ RemoteFeatureFlags ->
+ RemoteFeatureNames = maps:keys(RemoteFeatureFlags),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: enabling locally feature flags already "
+ "enabled on node `~s`...",
+ [RandomRemoteNode]),
+ case do_sync_feature_flags_with_node(RemoteFeatureNames) of
+ ok ->
+ sync_feature_flags_with_cluster2(
+ RandomRemoteNode, Timeout);
+ Error ->
+ Error
+ end
+ end.
+
+sync_feature_flags_with_cluster2(RandomRemoteNode, Timeout) ->
+ LocalFeatureNames = maps:keys(list(enabled)),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: enabling on node `~s` feature flags already "
+ "enabled locally...",
+ [RandomRemoteNode]),
+ Ret = run_feature_flags_mod_on_remote_node(
+ RandomRemoteNode,
+ do_sync_feature_flags_with_node,
+ [LocalFeatureNames],
+ Timeout),
+ case Ret of
+ {error, pre_feature_flags_rabbitmq} -> ok;
+ _ -> Ret
+ end.
+
+pick_one_node(Nodes) ->
+ RandomIndex = rand:uniform(length(Nodes)),
+ lists:nth(RandomIndex, Nodes).
+
+do_sync_feature_flags_with_node([FeatureFlag | Rest]) ->
+ case enable_locally(FeatureFlag) of
+ ok -> do_sync_feature_flags_with_node(Rest);
+ Error -> Error
+ end;
+do_sync_feature_flags_with_node([]) ->
+ ok.
+
+-spec get_forced_feature_flag_names() -> [feature_name()] | undefined.
+%% @private
+%% @doc
+%% Returns the (possibly empty) list of feature flags the user want
+%% to enable out-of-the-box when starting a node for the first time.
+%%
+%% Without this, the default is to enable all the supported feature
+%% flags.
+%%
+%% There are two ways to specify that list:
+%% <ol>
+%% <li>Using the `$RABBITMQ_FEATURE_FLAGS' environment variable; for
+%% instance `RABBITMQ_FEATURE_FLAGS=quorum_queue,mnevis'.</li>
+%% <li>Using the `forced_feature_flags_on_init' configuration parameter;
+%% for instance
+%% `{rabbit, [{forced_feature_flags_on_init, [quorum_queue, mnevis]}]}'.</li>
+%% </ol>
+%%
+%% The environment variable has precedence over the configuration
+%% parameter.
+
+get_forced_feature_flag_names() ->
+ Ret = case get_forced_feature_flag_names_from_env() of
+ undefined -> get_forced_feature_flag_names_from_config();
+ List -> List
+ end,
+ case Ret of
+ undefined -> ok;
+ [] -> rabbit_log_feature_flags:info(
+ "Feature flags: automatic enablement of feature "
+ "flags disabled (i.e. none will be enabled "
+ "automatically)");
+ _ -> rabbit_log_feature_flags:info(
+ "Feature flags: automatic enablement of feature "
+ "flags limited to the following list: ~p", [Ret])
+ end,
+ Ret.
+
+-spec get_forced_feature_flag_names_from_env() -> [feature_name()] | undefined.
+%% @private
+
+get_forced_feature_flag_names_from_env() ->
+ case rabbit_prelaunch:get_context() of
+ #{forced_feature_flags_on_init := ForcedFFs}
+ when is_list(ForcedFFs) ->
+ ForcedFFs;
+ _ ->
+ undefined
+ end.
+
+-spec get_forced_feature_flag_names_from_config() -> [feature_name()] | undefined.
+%% @private
+
+get_forced_feature_flag_names_from_config() ->
+ Value = application:get_env(rabbit,
+ forced_feature_flags_on_init,
+ undefined),
+ case Value of
+ undefined ->
+ Value;
+ _ when is_list(Value) ->
+ case lists:all(fun is_atom/1, Value) of
+ true -> Value;
+ false -> undefined
+ end;
+ _ ->
+ undefined
+ end.
+
+-spec verify_which_feature_flags_are_actually_enabled() ->
+ ok | {error, any()} | no_return().
+%% @private
+
+verify_which_feature_flags_are_actually_enabled() ->
+ AllFeatureFlags = list(all),
+ EnabledFeatureNames = read_enabled_feature_flags_list(),
+ rabbit_log_feature_flags:debug(
+ "Feature flags: double-checking feature flag states..."),
+ %% In case the previous instance of the node failed to write the
+ %% feature flags list file, we want to double-check the list of
+ %% enabled feature flags read from disk. For each feature flag,
+ %% we call the migration function to query if the feature flag is
+ %% actually enabled.
+ %%
+ %% If a feature flag doesn't provide a migration function (or if the
+ %% function fails), we keep the current state of the feature flag.
+ List1 = maps:fold(
+ fun(Name, Props, Acc) ->
+ Ret = run_migration_fun(Name, Props, is_enabled),
+ case Ret of
+ true ->
+ [Name | Acc];
+ false ->
+ Acc;
+ _ ->
+ MarkedAsEnabled = is_enabled(Name),
+ case MarkedAsEnabled of
+ true -> [Name | Acc];
+ false -> Acc
+ end
+ end
+ end,
+ [], AllFeatureFlags),
+ RepairedEnabledFeatureNames = lists:sort(List1),
+ %% We log the list of feature flags for which the state changes
+ %% after the check above.
+ WereEnabled = RepairedEnabledFeatureNames -- EnabledFeatureNames,
+ WereDisabled = EnabledFeatureNames -- RepairedEnabledFeatureNames,
+ case {WereEnabled, WereDisabled} of
+ {[], []} -> ok;
+ _ -> rabbit_log_feature_flags:warning(
+ "Feature flags: the previous instance of this node "
+ "must have failed to write the `feature_flags` "
+ "file at `~s`:",
+ [enabled_feature_flags_list_file()])
+ end,
+ case WereEnabled of
+ [] -> ok;
+ _ -> rabbit_log_feature_flags:warning(
+ "Feature flags: - list of previously enabled "
+ "feature flags now marked as such: ~p", [WereEnabled])
+ end,
+ case WereDisabled of
+ [] -> ok;
+ _ -> rabbit_log_feature_flags:warning(
+ "Feature flags: - list of previously disabled "
+ "feature flags now marked as such: ~p", [WereDisabled])
+ end,
+ %% Finally, if the new list of enabled feature flags is different
+ %% than the one on disk, we write the new list and re-initialize the
+ %% registry.
+ case RepairedEnabledFeatureNames of
+ EnabledFeatureNames ->
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: write the repaired list of enabled feature "
+ "flags"),
+ WrittenToDisk = ok =:= try_to_write_enabled_feature_flags_list(
+ RepairedEnabledFeatureNames),
+ initialize_registry(
+ #{},
+ list_of_enabled_feature_flags_to_feature_states(
+ RepairedEnabledFeatureNames),
+ WrittenToDisk)
+ end.
+
+-spec refresh_feature_flags_after_app_load([atom()]) ->
+ ok | {error, any()} | no_return().
+
+refresh_feature_flags_after_app_load([]) ->
+ ok;
+refresh_feature_flags_after_app_load(Apps) ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: new apps loaded: ~p -> refreshing feature flags",
+ [Apps]),
+
+ FeatureFlags0 = list(all),
+ FeatureFlags1 = query_supported_feature_flags(),
+
+ %% The following list contains all the feature flags this node
+ %% learned about only because remote nodes have them. Now, the
+ %% applications providing them are loaded locally as well.
+ %% Therefore, we may run their migration function in case the state
+ %% of this node needs it.
+ AlreadySupportedFeatureNames = maps:keys(
+ maps:filter(
+ fun(_, #{provided_by := App}) ->
+ lists:member(App, Apps)
+ end, FeatureFlags0)),
+ case AlreadySupportedFeatureNames of
+ [] ->
+ ok;
+ _ ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: new apps loaded: feature flags already "
+ "supported: ~p",
+ [lists:sort(AlreadySupportedFeatureNames)])
+ end,
+
+ %% The following list contains all the feature flags no nodes in the
+ %% cluster knew about before: this is the first time we see them in
+ %% this instance of the cluster. We need to register them on all
+ %% nodes.
+ NewSupportedFeatureFlags = maps:filter(
+ fun(FeatureName, _) ->
+ not maps:is_key(FeatureName,
+ FeatureFlags0)
+ end, FeatureFlags1),
+ case maps:keys(NewSupportedFeatureFlags) of
+ [] ->
+ ok;
+ NewSupportedFeatureNames ->
+ rabbit_log_feature_flags:debug(
+ "Feature flags: new apps loaded: new feature flags (unseen so "
+ "far): ~p ",
+ [lists:sort(NewSupportedFeatureNames)])
+ end,
+
+ case initialize_registry() of
+ ok ->
+ Ret = maybe_enable_locally_after_app_load(
+ AlreadySupportedFeatureNames),
+ case Ret of
+ ok ->
+ share_new_feature_flags_after_app_load(
+ NewSupportedFeatureFlags, ?TIMEOUT);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+maybe_enable_locally_after_app_load([]) ->
+ ok;
+maybe_enable_locally_after_app_load([FeatureName | Rest]) ->
+ case is_enabled(FeatureName) of
+ true ->
+ case do_enable_locally(FeatureName) of
+ ok -> maybe_enable_locally_after_app_load(Rest);
+ Error -> Error
+ end;
+ false ->
+ maybe_enable_locally_after_app_load(Rest)
+ end.
+
+share_new_feature_flags_after_app_load(FeatureFlags, Timeout) ->
+ push_local_feature_flags_from_apps_unknown_remotely(
+ node(), FeatureFlags, Timeout).
+
+on_load() ->
+ %% The goal of this `on_load()` code server hook is to prevent this
+ %% module from being loaded in an already running RabbitMQ node if
+ %% the running version does not have the feature flags subsystem.
+ %%
+ %% This situation happens when an upgrade overwrites RabbitMQ files
+ %% with the node still running. This is the case with many packages:
+ %% files are updated on disk, then a post-install step takes care of
+ %% restarting the service.
+ %%
+ %% The problem is that if many nodes in a cluster are updated at the
+ %% same time, one node running the newer version might query feature
+ %% flags on an old node where this module is already available
+ %% (because files were already overwritten). This causes the query
+ %% to report an unexpected answer and the newer node to refuse to
+ %% start.
+ %%
+ %% However, when the module is executed outside of RabbitMQ (for
+ %% debugging purpose or in the context of EUnit for instance), we
+ %% want to allow the load. That's why we first check if RabbitMQ is
+ %% actually running.
+ case rabbit:is_running() of
+ true ->
+ %% RabbitMQ is running.
+ %%
+ %% Now we want to differentiate a pre-feature-flags node
+ %% from one having the subsystem.
+ %%
+ %% To do that, we verify if the `feature_flags_file`
+ %% application environment variable is defined. With a
+ %% feature-flags-enabled node, this application environment
+ %% variable is defined by rabbitmq-server(8).
+ case application:get_env(rabbit, feature_flags_file) of
+ {ok, _} ->
+ %% This is a feature-flags-enabled version. Loading
+ %% the module is permitted.
+ ok;
+ _ ->
+ %% This is a pre-feature-flags version. We deny the
+ %% load and report why, possibly specifying the
+ %% version of RabbitMQ.
+ Vsn = case application:get_key(rabbit, vsn) of
+ {ok, V} -> V;
+ undefined -> "unknown version"
+ end,
+ "Refusing to load '" ?MODULE_STRING "' on this "
+ "node. It appears to be running a pre-feature-flags "
+ "version of RabbitMQ (" ++ Vsn ++ "). This is fine: "
+ "a newer version of RabbitMQ was deployed on this "
+ "node, but it was not restarted yet. This warning "
+ "is probably caused by a remote node querying this "
+ "node for its feature flags."
+ end;
+ false ->
+ %% RabbitMQ is not running. Loading the module is permitted
+ %% because this Erlang node will never be queried for its
+ %% feature flags.
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_ff_extra.erl b/deps/rabbit/src/rabbit_ff_extra.erl
new file mode 100644
index 0000000000..f0728d491e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ff_extra.erl
@@ -0,0 +1,244 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% @copyright 2018-2020 VMware, Inc. or its affiliates.
+%%
+%% @doc
+%% This module provides extra functions unused by the feature flags
+%% subsystem core functionality.
+
+-module(rabbit_ff_extra).
+
+-include_lib("stdout_formatter/include/stdout_formatter.hrl").
+
+-export([cli_info/0,
+ info/1,
+ info/2,
+ format_error/1]).
+
+-type cli_info() :: [cli_info_entry()].
+%% A list of feature flags properties, formatted for the RabbitMQ CLI.
+
+-type cli_info_entry() :: [{name, rabbit_feature_flags:feature_name()} |
+ {state, enabled | disabled | unavailable} |
+ {stability, rabbit_feature_flags:stability()} |
+ {provided_by, atom()} |
+ {desc, string()} |
+ {doc_url, string()}].
+%% A list of properties for a single feature flag, formatted for the
+%% RabbitMQ CLI.
+
+-type info_options() :: #{colors => boolean(),
+ lines => boolean(),
+ verbose => non_neg_integer()}.
+%% Options accepted by {@link info/1} and {@link info/2}.
+
+-export_type([info_options/0]).
+
+-spec cli_info() -> cli_info().
+%% @doc
+%% Returns a list of all feature flags properties.
+%%
+%% @returns the list of all feature flags properties.
+
+cli_info() ->
+ cli_info(rabbit_feature_flags:list(all)).
+
+-spec cli_info(rabbit_feature_flags:feature_flags()) -> cli_info().
+%% @doc
+%% Formats a map of feature flags and their properties into a list of
+%% feature flags properties as expected by the RabbitMQ CLI.
+%%
+%% @param FeatureFlags A map of feature flags.
+%% @returns the list of feature flags properties, created from the map
+%% specified in arguments.
+
+cli_info(FeatureFlags) ->
+ lists:foldr(
+ fun(FeatureName, Acc) ->
+ FeatureProps = maps:get(FeatureName, FeatureFlags),
+ State = rabbit_feature_flags:get_state(FeatureName),
+ Stability = rabbit_feature_flags:get_stability(FeatureProps),
+ App = maps:get(provided_by, FeatureProps),
+ Desc = maps:get(desc, FeatureProps, ""),
+ DocUrl = maps:get(doc_url, FeatureProps, ""),
+ FFInfo = [{name, FeatureName},
+ {desc, unicode:characters_to_binary(Desc)},
+ {doc_url, unicode:characters_to_binary(DocUrl)},
+ {state, State},
+ {stability, Stability},
+ {provided_by, App}],
+ [FFInfo | Acc]
+ end, [], lists:sort(maps:keys(FeatureFlags))).
+
+-spec info(info_options()) -> ok.
+%% @doc
+%% Displays an array of all supported feature flags and their properties
+%% on `stdout'.
+%%
+%% @param Options Options to tune what is displayed and how.
+
+info(Options) ->
+ %% Two tables: one for stable feature flags, one for experimental ones.
+ StableFF = rabbit_feature_flags:list(all, stable),
+ case maps:size(StableFF) of
+ 0 ->
+ ok;
+ _ ->
+ stdout_formatter:display(
+ #paragraph{content = "\n## Stable feature flags:",
+ props = #{bold => true}}),
+ info(StableFF, Options)
+ end,
+ ExpFF = rabbit_feature_flags:list(all, experimental),
+ case maps:size(ExpFF) of
+ 0 ->
+ ok;
+ _ ->
+ stdout_formatter:display(
+ #paragraph{content = "\n## Experimental feature flags:",
+ props = #{bold => true}}),
+ info(ExpFF, Options)
+ end,
+ case maps:size(StableFF) + maps:size(ExpFF) of
+ 0 -> ok;
+ _ -> state_legend(Options)
+ end.
+
+-spec info(rabbit_feature_flags:feature_flags(), info_options()) -> ok.
+%% @doc
+%% Displays an array of feature flags and their properties on `stdout',
+%% based on the specified feature flags map.
+%%
+%% @param FeatureFlags Map of the feature flags to display.
+%% @param Options Options to tune what is displayed and how.
+
+info(FeatureFlags, Options) ->
+ Verbose = maps:get(verbose, Options, 0),
+ UseColors = use_colors(Options),
+ UseLines = use_lines(Options),
+ Title = case UseColors of
+ true -> #{title => true};
+ false -> #{}
+ end,
+ Bold = case UseColors of
+ true -> #{bold => true};
+ false -> #{}
+ end,
+ {Green, Yellow, Red} = case UseColors of
+ true ->
+ {#{fg => green},
+ #{fg => yellow},
+ #{bold => true,
+ bg => red}};
+ false ->
+ {#{}, #{}, #{}}
+ end,
+ Border = case UseLines of
+ true -> #{border_drawing => ansi};
+ false -> #{border_drawing => ascii}
+ end,
+ %% Table columns:
+ %% | Name | State | Provided by | Description
+ %%
+ %% where:
+ %% State = Enabled | Disabled | Unavailable (if a node doesn't
+ %% support it).
+ TableHeader = #row{cells = ["Name",
+ "State",
+ "Provided",
+ "Description"],
+ props = Title},
+ Nodes = lists:sort([node() | rabbit_feature_flags:remote_nodes()]),
+ Rows = lists:map(
+ fun(FeatureName) ->
+ FeatureProps = maps:get(FeatureName, FeatureFlags),
+ State0 = rabbit_feature_flags:get_state(FeatureName),
+ {State, Color} = case State0 of
+ enabled ->
+ {"Enabled", Green};
+ disabled ->
+ {"Disabled", Yellow};
+ unavailable ->
+ {"Unavailable", Red}
+ end,
+ App = maps:get(provided_by, FeatureProps),
+ Desc = maps:get(desc, FeatureProps, ""),
+ VFun = fun(Node) ->
+ Supported =
+ rabbit_feature_flags:does_node_support(
+ Node, [FeatureName], 60000),
+ {Label, LabelColor} =
+ case Supported of
+ true -> {"supported", #{}};
+ false -> {"unsupported", Red}
+ end,
+ #paragraph{content =
+ [rabbit_misc:format(" ~s: ",
+ [Node]),
+ #paragraph{content = Label,
+ props = LabelColor}]}
+ end,
+ ExtraLines = if
+ Verbose > 0 ->
+ NodesList = lists:join(
+ "\n",
+ lists:map(
+ VFun, Nodes)),
+ ["\n\n",
+ "Per-node support level:\n"
+ | NodesList];
+ true ->
+ []
+ end,
+ [#paragraph{content = FeatureName,
+ props = Bold},
+ #paragraph{content = State,
+ props = Color},
+ #paragraph{content = App},
+ #paragraph{content = [Desc | ExtraLines]}]
+ end, lists:sort(maps:keys(FeatureFlags))),
+ io:format("~n", []),
+ stdout_formatter:display(#table{rows = [TableHeader | Rows],
+ props = Border#{cell_padding => {0, 1}}}).
+
+use_colors(Options) ->
+ maps:get(colors, Options, true).
+
+use_lines(Options) ->
+ maps:get(lines, Options, true).
+
+state_legend(Options) ->
+ UseColors = use_colors(Options),
+ {Green, Yellow, Red} = case UseColors of
+ true ->
+ {#{fg => green},
+ #{fg => yellow},
+ #{bold => true,
+ bg => red}};
+ false ->
+ {#{}, #{}, #{}}
+ end,
+ Enabled = #paragraph{content = "Enabled", props = Green},
+ Disabled = #paragraph{content = "Disabled", props = Yellow},
+ Unavailable = #paragraph{content = "Unavailable", props = Red},
+ stdout_formatter:display(
+ #paragraph{
+ content =
+ ["\n",
+ "Possible states:\n",
+ " ", Enabled, ": The feature flag is enabled on all nodes\n",
+ " ", Disabled, ": The feature flag is disabled on all nodes\n",
+ " ", Unavailable, ": The feature flag cannot be enabled because"
+ " one or more nodes do not support it\n"]}).
+
+-spec format_error(any()) -> string().
+%% @doc
+%% Formats the error reason term so it can be presented to human beings.
+%%
+%% @param Reason The term in the `{error, Reason}' tuple.
+%% @returns the formatted error reason.
+
+format_error(Reason) ->
+ rabbit_misc:format("~p", [Reason]).
diff --git a/deps/rabbit/src/rabbit_ff_registry.erl b/deps/rabbit/src/rabbit_ff_registry.erl
new file mode 100644
index 0000000000..372971f949
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ff_registry.erl
@@ -0,0 +1,189 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @author The RabbitMQ team
+%% @copyright 2018-2020 VMware, Inc. or its affiliates.
+%%
+%% @doc
+%% This module exposes the API of the {@link rabbit_feature_flags}
+%% registry. The feature flags registry is an Erlang module, compiled at
+%% runtime, storing all the informations about feature flags: which are
+%% supported, which are enabled, etc.
+%%
+%% Because it is compiled at runtime, the initial source code is mostly
+%% an API reference. What the initial module does is merely ask {@link
+%% rabbit_feature_flags} to generate the real registry.
+
+-module(rabbit_ff_registry).
+
+-export([get/1,
+ list/1,
+ states/0,
+ is_supported/1,
+ is_enabled/1,
+ is_registry_initialized/0,
+ is_registry_written_to_disk/0]).
+
+-ifdef(TEST).
+-on_load(on_load/0).
+-endif.
+
+-spec get(rabbit_feature_flags:feature_name()) ->
+ rabbit_feature_flags:feature_props() | undefined.
+%% @doc
+%% Returns the properties of a feature flag.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param FeatureName The name of the feature flag.
+%% @returns the properties of the specified feature flag.
+
+get(FeatureName) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% Initially, is_registry_initialized/0 always returns `false`
+ %% and this ?MODULE:get(FeatureName) is always called. The case
+ %% statement is here to please Dialyzer.
+ case is_registry_initialized() of
+ false -> ?MODULE:get(FeatureName);
+ true -> undefined
+ end.
+
+-spec list(all | enabled | disabled) -> rabbit_feature_flags:feature_flags().
+%% @doc
+%% Lists all, enabled or disabled feature flags, depending on the argument.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param Which The group of feature flags to return: `all', `enabled' or
+%% `disabled'.
+%% @returns A map of selected feature flags.
+
+list(Which) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:list(Which);
+ true -> #{}
+ end.
+
+-spec states() -> rabbit_feature_flags:feature_states().
+%% @doc
+%% Returns the states of supported feature flags.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @returns A map of feature flag states.
+
+states() ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:states();
+ true -> #{}
+ end.
+
+-spec is_supported(rabbit_feature_flags:feature_name()) -> boolean().
+%% @doc
+%% Returns if a feature flag is supported.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param FeatureName The name of the feature flag to be checked.
+%% @returns `true' if the feature flag is supported, or `false'
+%% otherwise.
+
+is_supported(FeatureName) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:is_supported(FeatureName);
+ true -> false
+ end.
+
+-spec is_enabled(rabbit_feature_flags:feature_name()) -> boolean() | state_changing.
+%% @doc
+%% Returns if a feature flag is supported or if its state is changing.
+%%
+%% Only the informations stored in the local registry is used to answer
+%% this call.
+%%
+%% @param FeatureName The name of the feature flag to be checked.
+%% @returns `true' if the feature flag is supported, `state_changing' if
+%% its state is transient, or `false' otherwise.
+
+is_enabled(FeatureName) ->
+ rabbit_feature_flags:initialize_registry(),
+ %% See get/1 for an explanation of the case statement below.
+ case is_registry_initialized() of
+ false -> ?MODULE:is_enabled(FeatureName);
+ true -> false
+ end.
+
+-spec is_registry_initialized() -> boolean().
+%% @doc
+%% Indicates if the registry is initialized.
+%%
+%% The registry is considered initialized once the initial Erlang module
+%% was replaced by the copy compiled at runtime.
+%%
+%% @returns `true' when the module is the one compiled at runtime,
+%% `false' when the module is the initial one compiled from RabbitMQ
+%% source code.
+
+is_registry_initialized() ->
+ always_return_false().
+
+-spec is_registry_written_to_disk() -> boolean().
+%% @doc
+%% Indicates if the feature flags state was successfully persisted to disk.
+%%
+%% Note that on startup, {@link rabbit_feature_flags} tries to determine
+%% the state of each supported feature flag, regardless of the
+%% information on disk, to ensure maximum consistency. However, this can
+%% be done for feature flags supporting it only.
+%%
+%% @returns `true' if the state was successfully written to disk and
+%% the registry can be initialized from that during the next RabbitMQ
+%% startup, `false' if the write failed and the node might loose feature
+%% flags state on restart.
+
+is_registry_written_to_disk() ->
+ always_return_true().
+
+always_return_true() ->
+ %% This function is here to trick Dialyzer. We want some functions
+ %% in this initial on-disk registry to always return `true` or
+ %% `false`. However the generated registry will return actual
+ %% booleans. The `-spec()` correctly advertises a return type of
+ %% `boolean()`. But in the meantime, Dialyzer only knows about this
+ %% copy which, without the trick below, would always return either
+ %% `true` (e.g. in is_registry_written_to_disk/0) or `false` (e.g.
+ %% is_registry_initialized/0). This obviously causes some warnings
+ %% where the registry functions are used: Dialyzer believes that
+ %% e.g. matching the return value of is_registry_initialized/0
+ %% against `true` will never succeed.
+ %%
+ %% That's why this function makes a call which we know the result,
+ %% but not Dialyzer, to "create" that hard-coded `true` return
+ %% value.
+ erlang:get({?MODULE, always_undefined}) =:= undefined.
+
+always_return_false() ->
+ not always_return_true().
+
+-ifdef(TEST).
+on_load() ->
+ _ = (catch rabbit_log_feature_flags:debug(
+ "Feature flags: Loading initial (uninitialized) registry "
+ "module (~p)",
+ [self()])),
+ ok.
+-endif.
diff --git a/deps/rabbit/src/rabbit_fhc_helpers.erl b/deps/rabbit/src/rabbit_fhc_helpers.erl
new file mode 100644
index 0000000000..d310e84008
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fhc_helpers.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_fhc_helpers).
+
+-export([clear_read_cache/0]).
+
+-include("amqqueue.hrl").
+
+clear_read_cache() ->
+ case application:get_env(rabbit, fhc_read_buffering) of
+ {ok, true} ->
+ file_handle_cache:clear_read_cache(),
+ clear_vhost_read_cache(rabbit_vhost:list_names());
+ _ -> %% undefined or {ok, false}
+ ok
+ end.
+
+clear_vhost_read_cache([]) ->
+ ok;
+clear_vhost_read_cache([VHost | Rest]) ->
+ clear_queue_read_cache(rabbit_amqqueue:list(VHost)),
+ clear_vhost_read_cache(Rest).
+
+clear_queue_read_cache([]) ->
+ ok;
+clear_queue_read_cache([Q | Rest]) when ?is_amqqueue(Q) ->
+ MPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ %% Limit the action to the current node.
+ Pids = [P || P <- [MPid | SPids], node(P) =:= node()],
+ %% This function is executed in the context of the backing queue
+ %% process because the read buffer is stored in the process
+ %% dictionary.
+ Fun = fun(_, State) ->
+ _ = file_handle_cache:clear_process_read_cache(),
+ State
+ end,
+ [rabbit_amqqueue:run_backing_queue(Pid, rabbit_variable_queue, Fun)
+ || Pid <- Pids],
+ clear_queue_read_cache(Rest).
diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl
new file mode 100644
index 0000000000..51acfffd0d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo.erl
@@ -0,0 +1,2124 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_fifo).
+
+-behaviour(ra_machine).
+
+-compile(inline_list_funcs).
+-compile(inline).
+-compile({no_auto_import, [apply/3]}).
+
+-include("rabbit_fifo.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ init/1,
+ apply/3,
+ state_enter/2,
+ tick/2,
+ overview/1,
+ get_checked_out/4,
+ %% versioning
+ version/0,
+ which_module/1,
+ %% aux
+ init_aux/1,
+ handle_aux/6,
+ % queries
+ query_messages_ready/1,
+ query_messages_checked_out/1,
+ query_messages_total/1,
+ query_processes/1,
+ query_ra_indexes/1,
+ query_consumer_count/1,
+ query_consumers/1,
+ query_stat/1,
+ query_single_active_consumer/1,
+ query_in_memory_usage/1,
+ query_peek/2,
+ usage/1,
+
+ zero/1,
+
+ %% misc
+ dehydrate_state/1,
+ normalize/1,
+
+ %% protocol helpers
+ make_enqueue/3,
+ make_register_enqueuer/1,
+ make_checkout/3,
+ make_settle/2,
+ make_return/2,
+ make_discard/2,
+ make_credit/4,
+ make_purge/0,
+ make_purge_nodes/1,
+ make_update_config/1,
+ make_garbage_collection/0
+ ]).
+
+%% command records representing all the protocol actions that are supported
+-record(enqueue, {pid :: option(pid()),
+ seq :: option(msg_seqno()),
+ msg :: raw_msg()}).
+-record(register_enqueuer, {pid :: pid()}).
+-record(checkout, {consumer_id :: consumer_id(),
+ spec :: checkout_spec(),
+ meta :: consumer_meta()}).
+-record(settle, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(return, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(discard, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(credit, {consumer_id :: consumer_id(),
+ credit :: non_neg_integer(),
+ delivery_count :: non_neg_integer(),
+ drain :: boolean()}).
+-record(purge, {}).
+-record(purge_nodes, {nodes :: [node()]}).
+-record(update_config, {config :: config()}).
+-record(garbage_collection, {}).
+
+-opaque protocol() ::
+ #enqueue{} |
+ #register_enqueuer{} |
+ #checkout{} |
+ #settle{} |
+ #return{} |
+ #discard{} |
+ #credit{} |
+ #purge{} |
+ #purge_nodes{} |
+ #update_config{} |
+ #garbage_collection{}.
+
+-type command() :: protocol() | ra_machine:builtin_command().
+%% all the command types supported by ra fifo
+
+-type client_msg() :: delivery().
+%% the messages `rabbit_fifo' can send to consumers.
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([protocol/0,
+ delivery/0,
+ command/0,
+ credit_mode/0,
+ consumer_tag/0,
+ consumer_meta/0,
+ consumer_id/0,
+ client_msg/0,
+ msg/0,
+ msg_id/0,
+ msg_seqno/0,
+ delivery_msg/0,
+ state/0,
+ config/0]).
+
+-spec init(config()) -> state().
+init(#{name := Name,
+ queue_resource := Resource} = Conf) ->
+ update_config(Conf, #?MODULE{cfg = #cfg{name = Name,
+ resource = Resource}}).
+
+update_config(Conf, State) ->
+ DLH = maps:get(dead_letter_handler, Conf, undefined),
+ BLH = maps:get(become_leader_handler, Conf, undefined),
+ RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY),
+ Overflow = maps:get(overflow_strategy, Conf, drop_head),
+ MaxLength = maps:get(max_length, Conf, undefined),
+ MaxBytes = maps:get(max_bytes, Conf, undefined),
+ MaxMemoryLength = maps:get(max_in_memory_length, Conf, undefined),
+ MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined),
+ DeliveryLimit = maps:get(delivery_limit, Conf, undefined),
+ Expires = maps:get(expires, Conf, undefined),
+ ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of
+ true ->
+ single_active;
+ false ->
+ competing
+ end,
+ Cfg = State#?MODULE.cfg,
+ RCISpec = {RCI, RCI},
+
+ LastActive = maps:get(created, Conf, undefined),
+ State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec,
+ dead_letter_handler = DLH,
+ become_leader_handler = BLH,
+ overflow_strategy = Overflow,
+ max_length = MaxLength,
+ max_bytes = MaxBytes,
+ max_in_memory_length = MaxMemoryLength,
+ max_in_memory_bytes = MaxMemoryBytes,
+ consumer_strategy = ConsumerStrategy,
+ delivery_limit = DeliveryLimit,
+ expires = Expires},
+ last_active = LastActive}.
+
+zero(_) ->
+ 0.
+
+% msg_ids are scoped per consumer
+% ra_indexes holds all raft indexes for enqueues currently on queue
+-spec apply(ra_machine:command_meta_data(), command(), state()) ->
+ {state(), Reply :: term(), ra_machine:effects()} |
+ {state(), Reply :: term()}.
+apply(Meta, #enqueue{pid = From, seq = Seq,
+ msg = RawMsg}, State00) ->
+ apply_enqueue(Meta, From, Seq, RawMsg, State00);
+apply(_Meta, #register_enqueuer{pid = Pid},
+ #?MODULE{enqueuers = Enqueuers0,
+ cfg = #cfg{overflow_strategy = Overflow}} = State0) ->
+
+ State = case maps:is_key(Pid, Enqueuers0) of
+ true ->
+ %% if the enqueuer exits just echo the overflow state
+ State0;
+ false ->
+ State0#?MODULE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}}
+ end,
+ Res = case is_over_limit(State) of
+ true when Overflow == reject_publish ->
+ reject_publish;
+ _ ->
+ ok
+ end,
+ {State, Res, [{monitor, process, Pid}]};
+apply(Meta,
+ #settle{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ % need to increment metrics before completing as any snapshot
+ % states taken need to include them
+ complete_and_checkout(Meta, MsgIds, ConsumerId,
+ Con0, [], State);
+ _ ->
+ {State, ok}
+
+ end;
+apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ Discarded = maps:with(MsgIds, Con0#consumer.checked_out),
+ Effects = dead_letter_effects(rejected, Discarded, State0, []),
+ complete_and_checkout(Meta, MsgIds, ConsumerId, Con0,
+ Effects, State0);
+ _ ->
+ {State0, ok}
+ end;
+apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{checked_out = Checked0}} ->
+ Returned = maps:with(MsgIds, Checked0),
+ return(Meta, ConsumerId, Returned, [], State);
+ _ ->
+ {State, ok}
+ end;
+apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt,
+ drain = Drain, consumer_id = ConsumerId},
+ #?MODULE{consumers = Cons0,
+ service_queue = ServiceQueue0,
+ waiting_consumers = Waiting0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} ->
+ %% this can go below 0 when credit is reduced
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ %% grant the credit
+ Con1 = Con0#consumer{credit = C},
+ ServiceQueue = maybe_queue_consumer(ConsumerId, Con1,
+ ServiceQueue0),
+ Cons = maps:put(ConsumerId, Con1, Cons0),
+ {State1, ok, Effects} =
+ checkout(Meta, State0,
+ State0#?MODULE{service_queue = ServiceQueue,
+ consumers = Cons}, []),
+ Response = {send_credit_reply, messages_ready(State1)},
+ %% by this point all checkouts for the updated credit value
+ %% should be processed so we can evaluate the drain
+ case Drain of
+ false ->
+ %% just return the result of the checkout
+ {State1, Response, Effects};
+ true ->
+ Con = #consumer{credit = PostCred} =
+ maps:get(ConsumerId, State1#?MODULE.consumers),
+ %% add the outstanding credit to the delivery count
+ DeliveryCount = Con#consumer.delivery_count + PostCred,
+ Consumers = maps:put(ConsumerId,
+ Con#consumer{delivery_count = DeliveryCount,
+ credit = 0},
+ State1#?MODULE.consumers),
+ Drained = Con#consumer.credit,
+ {CTag, _} = ConsumerId,
+ {State1#?MODULE{consumers = Consumers},
+ %% returning a multi response with two client actions
+ %% for the channel to execute
+ {multi, [Response, {send_drained, {CTag, Drained}}]},
+ Effects}
+ end;
+ _ when Waiting0 /= [] ->
+ %% there are waiting consuemrs
+ case lists:keytake(ConsumerId, 1, Waiting0) of
+ {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} ->
+ %% the consumer is a waiting one
+ %% grant the credit
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ Con = Con0#consumer{credit = C},
+ State = State0#?MODULE{waiting_consumers =
+ [{ConsumerId, Con} | Waiting]},
+ {State, {send_credit_reply, messages_ready(State)}};
+ false ->
+ {State0, ok}
+ end;
+ _ ->
+ %% credit for unknown consumer - just ignore
+ {State0, ok}
+ end;
+apply(_, #checkout{spec = {dequeue, _}},
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active}} = State0) ->
+ {State0, {error, {unsupported, single_active_consumer}}};
+apply(#{index := Index,
+ system_time := Ts,
+ from := From} = Meta, #checkout{spec = {dequeue, Settlement},
+ meta = ConsumerMeta,
+ consumer_id = ConsumerId},
+ #?MODULE{consumers = Consumers} = State00) ->
+ %% dequeue always updates last_active
+ State0 = State00#?MODULE{last_active = Ts},
+ %% all dequeue operations result in keeping the queue from expiring
+ Exists = maps:is_key(ConsumerId, Consumers),
+ case messages_ready(State0) of
+ 0 ->
+ {State0, {dequeue, empty}};
+ _ when Exists ->
+ %% a dequeue using the same consumer_id isn't possible at this point
+ {State0, {dequeue, empty}};
+ Ready ->
+ State1 = update_consumer(ConsumerId, ConsumerMeta,
+ {once, 1, simple_prefetch}, 0,
+ State0),
+ {success, _, MsgId, Msg, State2} = checkout_one(Meta, State1),
+ {State4, Effects1} = case Settlement of
+ unsettled ->
+ {_, Pid} = ConsumerId,
+ {State2, [{monitor, process, Pid}]};
+ settled ->
+ %% immediately settle the checkout
+ {State3, _, Effects0} =
+ apply(Meta, make_settle(ConsumerId, [MsgId]),
+ State2),
+ {State3, Effects0}
+ end,
+ {Reply, Effects2} =
+ case Msg of
+ {RaftIdx, {Header, empty}} ->
+ %% TODO add here new log effect with reply
+ {'$ra_no_reply',
+ [reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From) |
+ Effects1]};
+ _ ->
+ {{dequeue, {MsgId, Msg}, Ready-1}, Effects1}
+
+ end,
+
+ case evaluate_limit(Index, false, State0, State4, Effects2) of
+ {State, true, Effects} ->
+ update_smallest_raft_index(Index, Reply, State, Effects);
+ {State, false, Effects} ->
+ {State, Reply, Effects}
+ end
+ end;
+apply(Meta, #checkout{spec = cancel, consumer_id = ConsumerId}, State0) ->
+ {State, Effects} = cancel_consumer(Meta, ConsumerId, State0, [],
+ consumer_cancel),
+ checkout(Meta, State0, State, Effects);
+apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta,
+ consumer_id = {_, Pid} = ConsumerId},
+ State0) ->
+ Priority = get_priority_from_args(ConsumerMeta),
+ State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, Priority, State0),
+ checkout(Meta, State0, State1, [{monitor, process, Pid}]);
+apply(#{index := Index}, #purge{},
+ #?MODULE{ra_indexes = Indexes0,
+ returns = Returns,
+ messages = Messages} = State0) ->
+ Total = messages_ready(State0),
+ Indexes1 = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ [I || {_, {I, _}} <- lqueue:to_list(Messages)]),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1,
+ [I || {_, {I, _}} <- lqueue:to_list(Returns)]),
+
+ State1 = State0#?MODULE{ra_indexes = Indexes,
+ messages = lqueue:new(),
+ returns = lqueue:new(),
+ msg_bytes_enqueue = 0,
+ prefix_msgs = {0, [], 0, []},
+ msg_bytes_in_memory = 0,
+ msgs_ready_in_memory = 0},
+ Effects0 = [garbage_collection],
+ Reply = {purge, Total},
+ {State, _, Effects} = evaluate_limit(Index, false, State0,
+ State1, Effects0),
+ update_smallest_raft_index(Index, Reply, State, Effects);
+apply(_Meta, #garbage_collection{}, State) ->
+ {State, ok, [{aux, garbage_collection}]};
+apply(#{system_time := Ts} = Meta, {down, Pid, noconnection},
+ #?MODULE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0,
+ enqueuers = Enqs0} = State0) ->
+ Node = node(Pid),
+ %% if the pid refers to an active or cancelled consumer,
+ %% mark it as suspected and return it to the waiting queue
+ {State1, Effects0} =
+ maps:fold(fun({_, P} = Cid, C0, {S0, E0})
+ when node(P) =:= Node ->
+ %% the consumer should be returned to waiting
+ %% and checked out messages should be returned
+ Effs = consumer_update_active_effects(
+ S0, Cid, C0, false, suspected_down, E0),
+ Checked = C0#consumer.checked_out,
+ Credit = increase_credit(C0, maps:size(Checked)),
+ {St, Effs1} = return_all(Meta, S0, Effs,
+ Cid, C0#consumer{credit = Credit}),
+ %% if the consumer was cancelled there is a chance it got
+ %% removed when returning hence we need to be defensive here
+ Waiting = case St#?MODULE.consumers of
+ #{Cid := C} ->
+ Waiting0 ++ [{Cid, C}];
+ _ ->
+ Waiting0
+ end,
+ {St#?MODULE{consumers = maps:remove(Cid, St#?MODULE.consumers),
+ waiting_consumers = Waiting,
+ last_active = Ts},
+ Effs1};
+ (_, _, S) ->
+ S
+ end, {State0, []}, Cons0),
+ WaitingConsumers = update_waiting_consumer_status(Node, State1,
+ suspected_down),
+
+ %% select a new consumer from the waiting queue and run a checkout
+ State2 = State1#?MODULE{waiting_consumers = WaitingConsumers},
+ {State, Effects1} = activate_next_consumer(State2, Effects0),
+
+ %% mark any enquers as suspected
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+ Effects = [{monitor, node, Node} | Effects1],
+ checkout(Meta, State0, State#?MODULE{enqueuers = Enqs}, Effects);
+apply(#{system_time := Ts} = Meta, {down, Pid, noconnection},
+ #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ %% A node has been disconnected. This doesn't necessarily mean that
+ %% any processes on this node are down, they _may_ come back so here
+ %% we just mark them as suspected (effectively deactivated)
+ %% and return all checked out messages to the main queue for delivery to any
+ %% live consumers
+ %%
+ %% all pids for the disconnected node will be marked as suspected not just
+ %% the one we got the `down' command for
+ Node = node(Pid),
+
+ {State, Effects1} =
+ maps:fold(
+ fun({_, P} = Cid, #consumer{checked_out = Checked0,
+ status = up} = C0,
+ {St0, Eff}) when node(P) =:= Node ->
+ Credit = increase_credit(C0, map_size(Checked0)),
+ C = C0#consumer{status = suspected_down,
+ credit = Credit},
+ {St, Eff0} = return_all(Meta, St0, Eff, Cid, C),
+ Eff1 = consumer_update_active_effects(St, Cid, C, false,
+ suspected_down, Eff0),
+ {St, Eff1};
+ (_, _, {St, Eff}) ->
+ {St, Eff}
+ end, {State0, []}, Cons0),
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+
+ % Monitor the node so that we can "unsuspect" these processes when the node
+ % comes back, then re-issue all monitors and discover the final fate of
+ % these processes
+ Effects = case maps:size(State#?MODULE.consumers) of
+ 0 ->
+ [{aux, inactive}, {monitor, node, Node}];
+ _ ->
+ [{monitor, node, Node}]
+ end ++ Effects1,
+ checkout(Meta, State0, State#?MODULE{enqueuers = Enqs,
+ last_active = Ts}, Effects);
+apply(Meta, {down, Pid, _Info}, State0) ->
+ {State, Effects} = handle_down(Meta, Pid, State0),
+ checkout(Meta, State0, State, Effects);
+apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ service_queue = _SQ0} = State0) ->
+ %% A node we are monitoring has come back.
+ %% If we have suspected any processes of being
+ %% down we should now re-issue the monitors for them to detect if they're
+ %% actually down or not
+ Monitors = [{monitor, process, P}
+ || P <- suspected_pids_for(Node, State0)],
+
+ Enqs1 = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = up};
+ (_, E) -> E
+ end, Enqs0),
+ ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0),
+ %% mark all consumers as up
+ {State1, Effects1} =
+ maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc})
+ when (node(P) =:= Node) and
+ (C#consumer.status =/= cancelled) ->
+ EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId,
+ C, true, up, EAcc),
+ {update_or_remove_sub(Meta, ConsumerId,
+ C#consumer{status = up},
+ SAcc), EAcc1};
+ (_, _, Acc) ->
+ Acc
+ end, {State0, Monitors}, Cons0),
+ Waiting = update_waiting_consumer_status(Node, State1, up),
+ State2 = State1#?MODULE{
+ enqueuers = Enqs1,
+ waiting_consumers = Waiting},
+ {State, Effects} = activate_next_consumer(State2, Effects1),
+ checkout(Meta, State0, State, Effects);
+apply(_, {nodedown, _Node}, State) ->
+ {State, ok};
+apply(Meta, #purge_nodes{nodes = Nodes}, State0) ->
+ {State, Effects} = lists:foldl(fun(Node, {S, E}) ->
+ purge_node(Meta, Node, S, E)
+ end, {State0, []}, Nodes),
+ {State, ok, Effects};
+apply(Meta, #update_config{config = Conf}, State) ->
+ checkout(Meta, State, update_config(Conf, State), []);
+apply(_Meta, {machine_version, 0, 1}, V0State) ->
+ State = convert_v0_to_v1(V0State),
+ {State, ok, []}.
+
+convert_v0_to_v1(V0State0) ->
+ V0State = rabbit_fifo_v0:normalize_for_v1(V0State0),
+ V0Msgs = rabbit_fifo_v0:get_field(messages, V0State),
+ V1Msgs = lqueue:from_list(lists:sort(maps:to_list(V0Msgs))),
+ V0Enqs = rabbit_fifo_v0:get_field(enqueuers, V0State),
+ V1Enqs = maps:map(
+ fun (_EPid, E) ->
+ #enqueuer{next_seqno = element(2, E),
+ pending = element(3, E),
+ status = element(4, E)}
+ end, V0Enqs),
+ V0Cons = rabbit_fifo_v0:get_field(consumers, V0State),
+ V1Cons = maps:map(
+ fun (_CId, C0) ->
+ %% add the priority field
+ list_to_tuple(tuple_to_list(C0) ++ [0])
+ end, V0Cons),
+ V0SQ = rabbit_fifo_v0:get_field(service_queue, V0State),
+ V1SQ = priority_queue:from_list(queue:to_list(V0SQ)),
+ Cfg = #cfg{name = rabbit_fifo_v0:get_cfg_field(name, V0State),
+ resource = rabbit_fifo_v0:get_cfg_field(resource, V0State),
+ release_cursor_interval = rabbit_fifo_v0:get_cfg_field(release_cursor_interval, V0State),
+ dead_letter_handler = rabbit_fifo_v0:get_cfg_field(dead_letter_handler, V0State),
+ become_leader_handler = rabbit_fifo_v0:get_cfg_field(become_leader_handler, V0State),
+ %% TODO: what if policy enabling reject_publish was applied before conversion?
+ overflow_strategy = drop_head,
+ max_length = rabbit_fifo_v0:get_cfg_field(max_length, V0State),
+ max_bytes = rabbit_fifo_v0:get_cfg_field(max_bytes, V0State),
+ consumer_strategy = rabbit_fifo_v0:get_cfg_field(consumer_strategy, V0State),
+ delivery_limit = rabbit_fifo_v0:get_cfg_field(delivery_limit, V0State),
+ max_in_memory_length = rabbit_fifo_v0:get_cfg_field(max_in_memory_length, V0State),
+ max_in_memory_bytes = rabbit_fifo_v0:get_cfg_field(max_in_memory_bytes, V0State)
+ },
+
+ #?MODULE{cfg = Cfg,
+ messages = V1Msgs,
+ next_msg_num = rabbit_fifo_v0:get_field(next_msg_num, V0State),
+ returns = rabbit_fifo_v0:get_field(returns, V0State),
+ enqueue_count = rabbit_fifo_v0:get_field(enqueue_count, V0State),
+ enqueuers = V1Enqs,
+ ra_indexes = rabbit_fifo_v0:get_field(ra_indexes, V0State),
+ release_cursors = rabbit_fifo_v0:get_field(release_cursors, V0State),
+ consumers = V1Cons,
+ service_queue = V1SQ,
+ prefix_msgs = rabbit_fifo_v0:get_field(prefix_msgs, V0State),
+ msg_bytes_enqueue = rabbit_fifo_v0:get_field(msg_bytes_enqueue, V0State),
+ msg_bytes_checkout = rabbit_fifo_v0:get_field(msg_bytes_checkout, V0State),
+ waiting_consumers = rabbit_fifo_v0:get_field(waiting_consumers, V0State),
+ msg_bytes_in_memory = rabbit_fifo_v0:get_field(msg_bytes_in_memory, V0State),
+ msgs_ready_in_memory = rabbit_fifo_v0:get_field(msgs_ready_in_memory, V0State)
+ }.
+
+purge_node(Meta, Node, State, Effects) ->
+ lists:foldl(fun(Pid, {S0, E0}) ->
+ {S, E} = handle_down(Meta, Pid, S0),
+ {S, E0 ++ E}
+ end, {State, Effects}, all_pids_for(Node, State)).
+
+%% any downs that re not noconnection
+handle_down(Meta, Pid, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ % Remove any enqueuer for the same pid and enqueue any pending messages
+ % This should be ok as we won't see any more enqueues from this pid
+ State1 = case maps:take(Pid, Enqs0) of
+ {#enqueuer{pending = Pend}, Enqs} ->
+ lists:foldl(fun ({_, RIdx, RawMsg}, S) ->
+ enqueue(RIdx, RawMsg, S)
+ end, State0#?MODULE{enqueuers = Enqs}, Pend);
+ error ->
+ State0
+ end,
+ {Effects1, State2} = handle_waiting_consumer_down(Pid, State1),
+ % return checked out messages to main queue
+ % Find the consumers for the down pid
+ DownConsumers = maps:keys(
+ maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)),
+ lists:foldl(fun(ConsumerId, {S, E}) ->
+ cancel_consumer(Meta, ConsumerId, S, E, down)
+ end, {State2, Effects1}, DownConsumers).
+
+consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = competing}}) ->
+ fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) ->
+ consumer_update_active_effects(State, ConsumerId, Consumer, Active,
+ ActivityStatus, Effects)
+ end;
+consumer_active_flag_update_function(#?MODULE{cfg = #cfg{consumer_strategy = single_active}}) ->
+ fun(_, _, _, _, _, Effects) ->
+ Effects
+ end.
+
+handle_waiting_consumer_down(_Pid,
+ #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State) ->
+ {[], State};
+handle_waiting_consumer_down(_Pid,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State) ->
+ {[], State};
+handle_waiting_consumer_down(Pid,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ % get cancel effects for down waiting consumers
+ Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end,
+ WaitingConsumers0),
+ Effects = lists:foldl(fun ({ConsumerId, _}, Effects) ->
+ cancel_consumer_effects(ConsumerId, State0,
+ Effects)
+ end, [], Down),
+ % update state to have only up waiting consumers
+ StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end,
+ WaitingConsumers0),
+ State = State0#?MODULE{waiting_consumers = StillUp},
+ {Effects, State}.
+
+update_waiting_consumer_status(Node,
+ #?MODULE{waiting_consumers = WaitingConsumers},
+ Status) ->
+ [begin
+ case node(Pid) of
+ Node ->
+ {ConsumerId, Consumer#consumer{status = Status}};
+ _ ->
+ {ConsumerId, Consumer}
+ end
+ end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers,
+ Consumer#consumer.status =/= cancelled].
+
+-spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects().
+state_enter(leader, #?MODULE{consumers = Cons,
+ enqueuers = Enqs,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{name = Name,
+ resource = Resource,
+ become_leader_handler = BLH},
+ prefix_msgs = {0, [], 0, []}
+ }) ->
+ % return effects to monitor all current consumers and enqueuers
+ Pids = lists:usort(maps:keys(Enqs)
+ ++ [P || {_, P} <- maps:keys(Cons)]
+ ++ [P || {{_, P}, _} <- WaitingConsumers]),
+ Mons = [{monitor, process, P} || P <- Pids],
+ Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids],
+ NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]),
+ FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}],
+ Effects = Mons ++ Nots ++ NodeMons ++ FHReservation,
+ case BLH of
+ undefined ->
+ Effects;
+ {Mod, Fun, Args} ->
+ [{mod_call, Mod, Fun, Args ++ [Name]} | Effects]
+ end;
+state_enter(eol, #?MODULE{enqueuers = Enqs,
+ consumers = Custs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0),
+ WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end,
+ #{}, WaitingConsumers0),
+ AllConsumers = maps:merge(Custs, WaitingConsumers1),
+ [{send_msg, P, eol, ra_event}
+ || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++
+ [{mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}];
+state_enter(State, #?MODULE{cfg = #cfg{resource = _Resource}}) when State =/= leader ->
+ FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []},
+ [FHReservation];
+ state_enter(_, _) ->
+ %% catch all as not handling all states
+ [].
+
+
+-spec tick(non_neg_integer(), state()) -> ra_machine:effects().
+tick(Ts, #?MODULE{cfg = #cfg{name = Name,
+ resource = QName},
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes} = State) ->
+ case is_expired(Ts, State) of
+ true ->
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}];
+ false ->
+ Metrics = {Name,
+ messages_ready(State),
+ num_checked_out(State), % checked out
+ messages_total(State),
+ query_consumer_count(State), % Consumers
+ EnqueueBytes,
+ CheckoutBytes},
+ [{mod_call, rabbit_quorum_queue,
+ handle_tick, [QName, Metrics, all_nodes(State)]}]
+ end.
+
+-spec overview(state()) -> map().
+overview(#?MODULE{consumers = Cons,
+ enqueuers = Enqs,
+ release_cursors = Cursors,
+ enqueue_count = EnqCount,
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes,
+ cfg = Cfg} = State) ->
+ Conf = #{name => Cfg#cfg.name,
+ resource => Cfg#cfg.resource,
+ release_cursor_interval => Cfg#cfg.release_cursor_interval,
+ dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler,
+ max_length => Cfg#cfg.max_length,
+ max_bytes => Cfg#cfg.max_bytes,
+ consumer_strategy => Cfg#cfg.consumer_strategy,
+ max_in_memory_length => Cfg#cfg.max_in_memory_length,
+ max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes,
+ expires => Cfg#cfg.expires,
+ delivery_limit => Cfg#cfg.delivery_limit
+ },
+ #{type => ?MODULE,
+ config => Conf,
+ num_consumers => maps:size(Cons),
+ num_checked_out => num_checked_out(State),
+ num_enqueuers => maps:size(Enqs),
+ num_ready_messages => messages_ready(State),
+ num_messages => messages_total(State),
+ num_release_cursors => lqueue:len(Cursors),
+ release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)],
+ release_cursor_enqueue_counter => EnqCount,
+ enqueue_message_bytes => EnqueueBytes,
+ checkout_message_bytes => CheckoutBytes}.
+
+-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) ->
+ [delivery_msg()].
+get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) ->
+ case Consumers of
+ #{Cid := #consumer{checked_out = Checked}} ->
+ [{K, snd(snd(maps:get(K, Checked)))}
+ || K <- lists:seq(From, To),
+ maps:is_key(K, Checked)];
+ _ ->
+ []
+ end.
+
+-spec version() -> pos_integer().
+version() -> 1.
+
+which_module(0) -> rabbit_fifo_v0;
+which_module(1) -> ?MODULE.
+
+-record(aux_gc, {last_raft_idx = 0 :: ra:index()}).
+-record(aux, {name :: atom(),
+ utilisation :: term(),
+ gc = #aux_gc{} :: #aux_gc{}}).
+
+init_aux(Name) when is_atom(Name) ->
+ %% TODO: catch specific exception throw if table already exists
+ ok = ra_machine_ets:create_table(rabbit_fifo_usage,
+ [named_table, set, public,
+ {write_concurrency, true}]),
+ Now = erlang:monotonic_time(micro_seconds),
+ #aux{name = Name,
+ utilisation = {inactive, Now, 1, 1.0}}.
+
+handle_aux(leader, _, garbage_collection, State, Log, _MacState) ->
+ ra_log_wal:force_roll_over(ra_log_wal),
+ {no_reply, State, Log};
+handle_aux(follower, _, garbage_collection, State, Log, MacState) ->
+ ra_log_wal:force_roll_over(ra_log_wal),
+ {no_reply, force_eval_gc(Log, MacState, State), Log};
+handle_aux(_RaState, cast, eval, Aux0, Log, _MacState) ->
+ {no_reply, Aux0, Log};
+handle_aux(_RaState, cast, Cmd, #aux{utilisation = Use0} = Aux0,
+ Log, _MacState)
+ when Cmd == active orelse Cmd == inactive ->
+ {no_reply, Aux0#aux{utilisation = update_use(Use0, Cmd)}, Log};
+handle_aux(_RaState, cast, tick, #aux{name = Name,
+ utilisation = Use0} = State0,
+ Log, MacState) ->
+ true = ets:insert(rabbit_fifo_usage,
+ {Name, utilisation(Use0)}),
+ Aux = eval_gc(Log, MacState, State0),
+ {no_reply, Aux, Log};
+handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0,
+ Log0, MacState) ->
+ case rabbit_fifo:query_peek(Pos, MacState) of
+ {ok, {Idx, {Header, empty}}} ->
+ %% need to re-hydrate from the log
+ {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0),
+ #enqueue{msg = Msg} = Cmd,
+ {reply, {ok, {Header, Msg}}, Aux0, Log};
+ {ok, {_Idx, {Header, Msg}}} ->
+ {reply, {ok, {Header, Msg}}, Aux0, Log0};
+ Err ->
+ {reply, Err, Aux0, Log0}
+ end.
+
+
+eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState,
+ #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) ->
+ {Idx, _} = ra_log:last_index_term(Log),
+ {memory, Mem} = erlang:process_info(self(), memory),
+ case messages_total(MacState) of
+ 0 when Idx > LastGcIdx andalso
+ Mem > ?GC_MEM_LIMIT_B ->
+ garbage_collect(),
+ {memory, MemAfter} = erlang:process_info(self(), memory),
+ rabbit_log:debug("~s: full GC sweep complete. "
+ "Process memory changed from ~.2fMB to ~.2fMB.",
+ [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
+ AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
+ _ ->
+ AuxState
+ end.
+
+force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}},
+ #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) ->
+ {Idx, _} = ra_log:last_index_term(Log),
+ {memory, Mem} = erlang:process_info(self(), memory),
+ case Idx > LastGcIdx of
+ true ->
+ garbage_collect(),
+ {memory, MemAfter} = erlang:process_info(self(), memory),
+ rabbit_log:debug("~s: full GC sweep complete. "
+ "Process memory changed from ~.2fMB to ~.2fMB.",
+ [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
+ AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
+ false ->
+ AuxState
+ end.
+
+%%% Queries
+
+query_messages_ready(State) ->
+ messages_ready(State).
+
+query_messages_checked_out(#?MODULE{consumers = Consumers}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, S) ->
+ maps:size(C) + S
+ end, 0, Consumers).
+
+query_messages_total(State) ->
+ messages_total(State).
+
+query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) ->
+ Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0),
+ maps:keys(maps:merge(Enqs, Cons)).
+
+
+query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) ->
+ RaIndexes.
+
+query_consumer_count(#?MODULE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers}) ->
+ Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) ->
+ Status =/= suspected_down
+ end, Consumers),
+ maps:size(Up) + length(WaitingConsumers).
+
+query_consumers(#?MODULE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) ->
+ ActiveActivityStatusFun =
+ case ConsumerStrategy of
+ competing ->
+ fun(_ConsumerId,
+ #consumer{status = Status}) ->
+ case Status of
+ suspected_down ->
+ {false, Status};
+ _ ->
+ {true, Status}
+ end
+ end;
+ single_active ->
+ SingleActiveConsumer = query_single_active_consumer(State),
+ fun({Tag, Pid} = _Consumer, _) ->
+ case SingleActiveConsumer of
+ {value, {Tag, Pid}} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end
+ end
+ end,
+ FromConsumers =
+ maps:fold(fun (_, #consumer{status = cancelled}, Acc) ->
+ Acc;
+ ({Tag, Pid}, #consumer{meta = Meta} = Consumer, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, Consumers),
+ FromWaitingConsumers =
+ lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) ->
+ Acc;
+ ({{Tag, Pid}, #consumer{meta = Meta} = Consumer}, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, WaitingConsumers),
+ maps:merge(FromConsumers, FromWaitingConsumers).
+
+
+query_single_active_consumer(#?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ consumers = Consumers}) ->
+ case maps:size(Consumers) of
+ 0 ->
+ {error, no_value};
+ 1 ->
+ {value, lists:nth(1, maps:keys(Consumers))};
+ _
+ ->
+ {error, illegal_size}
+ end ;
+query_single_active_consumer(_) ->
+ disabled.
+
+query_stat(#?MODULE{consumers = Consumers} = State) ->
+ {messages_ready(State), maps:size(Consumers)}.
+
+query_in_memory_usage(#?MODULE{msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length}) ->
+ {Length, Bytes}.
+
+query_peek(Pos, State0) when Pos > 0 ->
+ case take_next_msg(State0) of
+ empty ->
+ {error, no_message_at_pos};
+ {{_Seq, IdxMsg}, _State}
+ when Pos == 1 ->
+ {ok, IdxMsg};
+ {_Msg, State} ->
+ query_peek(Pos-1, State)
+ end.
+
+
+-spec usage(atom()) -> float().
+usage(Name) when is_atom(Name) ->
+ case ets:lookup(rabbit_fifo_usage, Name) of
+ [] -> 0.0;
+ [{_, Use}] -> Use
+ end.
+
+%%% Internal
+
+messages_ready(#?MODULE{messages = M,
+ prefix_msgs = {RCnt, _R, PCnt, _P},
+ returns = R}) ->
+ %% prefix messages will rarely have anything in them during normal
+ %% operations so length/1 is fine here
+ lqueue:len(M) + lqueue:len(R) + RCnt + PCnt.
+
+messages_total(#?MODULE{ra_indexes = I,
+ prefix_msgs = {RCnt, _R, PCnt, _P}}) ->
+ rabbit_fifo_index:size(I) + RCnt + PCnt.
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+utilisation({active, Since, Avg}) ->
+ use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg);
+utilisation({inactive, Since, Active, Avg}) ->
+ use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg).
+
+use_avg(0, 0, Avg) ->
+ Avg;
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
+
+moving_average(_Time, _, Next, undefined) ->
+ Next;
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+num_checked_out(#?MODULE{consumers = Cons}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, Acc) ->
+ maps:size(C) + Acc
+ end, 0, Cons).
+
+cancel_consumer(Meta, ConsumerId,
+ #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State,
+ Effects, Reason) ->
+ cancel_consumer0(Meta, ConsumerId, State, Effects, Reason);
+cancel_consumer(Meta, ConsumerId,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State,
+ Effects, Reason) ->
+ %% single active consumer on, no consumers are waiting
+ cancel_consumer0(Meta, ConsumerId, State, Effects, Reason);
+cancel_consumer(Meta, ConsumerId,
+ #?MODULE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0} = State0,
+ Effects0, Reason) ->
+ %% single active consumer on, consumers are waiting
+ case maps:is_key(ConsumerId, Cons0) of
+ true ->
+ % The active consumer is to be removed
+ {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0,
+ Effects0, Reason),
+ activate_next_consumer(State1, Effects1);
+ false ->
+ % The cancelled consumer is not active or cancelled
+ % Just remove it from idle_consumers
+ Waiting = lists:keydelete(ConsumerId, 1, Waiting0),
+ Effects = cancel_consumer_effects(ConsumerId, State0, Effects0),
+ % A waiting consumer isn't supposed to have any checked out messages,
+ % so nothing special to do here
+ {State0#?MODULE{waiting_consumers = Waiting}, Effects}
+ end.
+
+consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}},
+ ConsumerId, #consumer{meta = Meta},
+ Active, ActivityStatus,
+ Effects) ->
+ Ack = maps:get(ack, Meta, undefined),
+ Prefetch = maps:get(prefetch, Meta, undefined),
+ Args = maps:get(args, Meta, []),
+ [{mod_call, rabbit_quorum_queue, update_consumer_handler,
+ [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]}
+ | Effects].
+
+cancel_consumer0(Meta, ConsumerId,
+ #?MODULE{consumers = C0} = S0, Effects0, Reason) ->
+ case C0 of
+ #{ConsumerId := Consumer} ->
+ {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer,
+ S0, Effects0, Reason),
+ %% The effects are emitted before the consumer is actually removed
+ %% if the consumer has unacked messages. This is a bit weird but
+ %% in line with what classic queues do (from an external point of
+ %% view)
+ Effects = cancel_consumer_effects(ConsumerId, S, Effects2),
+ case maps:size(S#?MODULE.consumers) of
+ 0 ->
+ {S, [{aux, inactive} | Effects]};
+ _ ->
+ {S, Effects}
+ end;
+ _ ->
+ %% already removed: do nothing
+ {S0, Effects0}
+ end.
+
+activate_next_consumer(#?MODULE{consumers = Cons,
+ waiting_consumers = Waiting0} = State0,
+ Effects0) ->
+ case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of
+ Up when map_size(Up) == 0 ->
+ %% there are no active consumer in the consumer map
+ case lists:filter(fun ({_, #consumer{status = Status}}) ->
+ Status == up
+ end, Waiting0) of
+ [{NextConsumerId, NextConsumer} | _] ->
+ %% there is a potential next active consumer
+ Remaining = lists:keydelete(NextConsumerId, 1, Waiting0),
+ #?MODULE{service_queue = ServiceQueue} = State0,
+ ServiceQueue1 = maybe_queue_consumer(NextConsumerId,
+ NextConsumer,
+ ServiceQueue),
+ State = State0#?MODULE{consumers = Cons#{NextConsumerId => NextConsumer},
+ service_queue = ServiceQueue1,
+ waiting_consumers = Remaining},
+ Effects = consumer_update_active_effects(State, NextConsumerId,
+ NextConsumer, true,
+ single_active, Effects0),
+ {State, Effects};
+ [] ->
+ {State0, [{aux, inactive} | Effects0]}
+ end;
+ _ ->
+ {State0, Effects0}
+ end.
+
+
+
+maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, Consumer, S0, Effects0, Reason) ->
+ case Reason of
+ consumer_cancel ->
+ {update_or_remove_sub(Meta, ConsumerId,
+ Consumer#consumer{lifetime = once,
+ credit = 0,
+ status = cancelled},
+ S0), Effects0};
+ down ->
+ {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer),
+ {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers),
+ last_active = Ts},
+ Effects1}
+ end.
+
+apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) ->
+ case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of
+ {ok, State1, Effects1} ->
+ State2 = append_to_master_index(RaftIdx, State1),
+ {State, ok, Effects} = checkout(Meta, State0, State2, Effects1),
+ {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects};
+ {duplicate, State, Effects} ->
+ {State, ok, Effects}
+ end.
+
+drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects0) ->
+ case take_next_msg(State0) of
+ {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}},
+ State1} ->
+ Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0),
+ State2 = add_bytes_drop(Header, State1#?MODULE{ra_indexes = Indexes}),
+ State = case Msg of
+ 'empty' -> State2;
+ _ -> subtract_in_memory_counts(Header, State2)
+ end,
+ Effects = dead_letter_effects(maxlen, #{none => FullMsg},
+ State, Effects0),
+ {State, Effects};
+ {{'$prefix_msg', Header}, State1} ->
+ State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)),
+ {State2, Effects0};
+ {{'$empty_msg', Header}, State1} ->
+ State2 = add_bytes_drop(Header, State1),
+ {State2, Effects0};
+ empty ->
+ {State0, Effects0}
+ end.
+
+enqueue(RaftIdx, RawMsg, #?MODULE{messages = Messages,
+ next_msg_num = NextMsgNum} = State0) ->
+ %% the initial header is an integer only - it will get expanded to a map
+ %% when the next required key is added
+ Header = message_size(RawMsg),
+ {State1, Msg} =
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ % indexed message with header map
+ {State0, {RaftIdx, {Header, 'empty'}}};
+ false ->
+ {add_in_memory_counts(Header, State0),
+ {RaftIdx, {Header, RawMsg}}} % indexed message with header map
+ end,
+ State = add_bytes_enqueue(Header, State1),
+ State#?MODULE{messages = lqueue:in({NextMsgNum, Msg}, Messages),
+ next_msg_num = NextMsgNum + 1}.
+
+append_to_master_index(RaftIdx,
+ #?MODULE{ra_indexes = Indexes0} = State0) ->
+ State = incr_enqueue_count(State0),
+ Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0),
+ State#?MODULE{ra_indexes = Indexes}.
+
+
+incr_enqueue_count(#?MODULE{enqueue_count = EC,
+ cfg = #cfg{release_cursor_interval = {_Base, C}}
+ } = State0) when EC >= C->
+ %% this will trigger a dehydrated version of the state to be stored
+ %% at this raft index for potential future snapshot generation
+ %% Q: Why don't we just stash the release cursor here?
+ %% A: Because it needs to be the very last thing we do and we
+ %% first needs to run the checkout logic.
+ State0#?MODULE{enqueue_count = 0};
+incr_enqueue_count(#?MODULE{enqueue_count = C} = State) ->
+ State#?MODULE{enqueue_count = C + 1}.
+
+maybe_store_dehydrated_state(RaftIdx,
+ #?MODULE{cfg =
+ #cfg{release_cursor_interval = {Base, _}}
+ = Cfg,
+ ra_indexes = Indexes,
+ enqueue_count = 0,
+ release_cursors = Cursors0} = State0) ->
+ case rabbit_fifo_index:exists(RaftIdx, Indexes) of
+ false ->
+ %% the incoming enqueue must already have been dropped
+ State0;
+ true ->
+ Interval = case Base of
+ 0 -> 0;
+ _ ->
+ Total = messages_total(State0),
+ min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX)
+ end,
+ State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval =
+ {Base, Interval}}},
+ Dehydrated = dehydrate_state(State),
+ Cursor = {release_cursor, RaftIdx, Dehydrated},
+ Cursors = lqueue:in(Cursor, Cursors0),
+ State#?MODULE{release_cursors = Cursors}
+ end;
+maybe_store_dehydrated_state(_RaftIdx, State) ->
+ State.
+
+enqueue_pending(From,
+ #enqueuer{next_seqno = Next,
+ pending = [{Next, RaftIdx, RawMsg} | Pending]} = Enq0,
+ State0) ->
+ State = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending},
+ enqueue_pending(From, Enq, State);
+enqueue_pending(From, Enq, #?MODULE{enqueuers = Enqueuers0} = State) ->
+ State#?MODULE{enqueuers = Enqueuers0#{From => Enq}}.
+
+maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) ->
+ % direct enqueue without tracking
+ State = enqueue(RaftIdx, RawMsg, State0),
+ {ok, State, Effects};
+maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0,
+ #?MODULE{enqueuers = Enqueuers0} = State0) ->
+ case maps:get(From, Enqueuers0, undefined) of
+ undefined ->
+ State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}},
+ {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo,
+ RawMsg, Effects0, State1),
+ {ok, State, [{monitor, process, From} | Effects]};
+ #enqueuer{next_seqno = MsgSeqNo} = Enq0 ->
+ % it is the next expected seqno
+ State1 = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1},
+ State = enqueue_pending(From, Enq, State1),
+ {ok, State, Effects0};
+ #enqueuer{next_seqno = Next,
+ pending = Pending0} = Enq0
+ when MsgSeqNo > Next ->
+ % out of order delivery
+ Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0],
+ Enq = Enq0#enqueuer{pending = lists:sort(Pending)},
+ {ok, State0#?MODULE{enqueuers = Enqueuers0#{From => Enq}}, Effects0};
+ #enqueuer{next_seqno = Next} when MsgSeqNo =< Next ->
+ % duplicate delivery - remove the raft index from the ra_indexes
+ % map as it was added earlier
+ {duplicate, State0, Effects0}
+ end.
+
+snd(T) ->
+ element(2, T).
+
+return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned,
+ Effects0, State0) ->
+ {State1, Effects1} = maps:fold(
+ fun(MsgId, {Tag, _} = Msg, {S0, E0})
+ when Tag == '$prefix_msg';
+ Tag == '$empty_msg'->
+ return_one(Meta, MsgId, 0, Msg, S0, E0, ConsumerId);
+ (MsgId, {MsgNum, Msg}, {S0, E0}) ->
+ return_one(Meta, MsgId, MsgNum, Msg, S0, E0,
+ ConsumerId)
+ end, {State0, Effects0}, Returned),
+ State2 =
+ case State1#?MODULE.consumers of
+ #{ConsumerId := Con0} ->
+ Con = Con0#consumer{credit = increase_credit(Con0,
+ map_size(Returned))},
+ update_or_remove_sub(Meta, ConsumerId, Con, State1);
+ _ ->
+ State1
+ end,
+ {State, ok, Effects} = checkout(Meta, State0, State2, Effects1),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+% used to processes messages that are finished
+complete(Meta, ConsumerId, Discarded,
+ #consumer{checked_out = Checked} = Con0, Effects,
+ #?MODULE{ra_indexes = Indexes0} = State0) ->
+ %% TODO optimise use of Discarded map here
+ MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)],
+ %% credit_mode = simple_prefetch should automatically top-up credit
+ %% as messages are simple_prefetch or otherwise returned
+ Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked),
+ credit = increase_credit(Con0, map_size(Discarded))},
+ State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ MsgRaftIdxs),
+ %% TODO: use maps:fold instead
+ State2 = lists:foldl(fun({_, {_, {Header, _}}}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$prefix_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$empty_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc)
+ end, State1, maps:values(Discarded)),
+ {State2#?MODULE{ra_indexes = Indexes}, Effects}.
+
+increase_credit(#consumer{lifetime = once,
+ credit = Credit}, _) ->
+ %% once consumers cannot increment credit
+ Credit;
+increase_credit(#consumer{lifetime = auto,
+ credit_mode = credited,
+ credit = Credit}, _) ->
+ %% credit_mode: credit also doesn't automatically increment credit
+ Credit;
+increase_credit(#consumer{credit = Current}, Credit) ->
+ Current + Credit.
+
+complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId,
+ #consumer{checked_out = Checked0} = Con0,
+ Effects0, State0) ->
+ Discarded = maps:with(MsgIds, Checked0),
+ {State2, Effects1} = complete(Meta, ConsumerId, Discarded, Con0,
+ Effects0, State0),
+ {State, ok, Effects} = checkout(Meta, State0, State2, Effects1),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+dead_letter_effects(_Reason, _Discarded,
+ #?MODULE{cfg = #cfg{dead_letter_handler = undefined}},
+ Effects) ->
+ Effects;
+dead_letter_effects(Reason, Discarded,
+ #?MODULE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}},
+ Effects) ->
+ RaftIdxs = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ [RaftIdx | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{log, RaftIdxs,
+ fun (Log) ->
+ Lookup = maps:from_list(lists:zip(RaftIdxs, Log)),
+ DeadLetters = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup),
+ [{Reason, Msg} | Acc];
+ (_, {_, {_, {_Header, Msg}}}, Acc) ->
+ [{Reason, Msg} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{mod_call, Mod, Fun, Args ++ [DeadLetters]}]
+ end} | Effects].
+
+cancel_consumer_effects(ConsumerId,
+ #?MODULE{cfg = #cfg{resource = QName}}, Effects) ->
+ [{mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [QName, ConsumerId]} | Effects].
+
+update_smallest_raft_index(Idx, State, Effects) ->
+ update_smallest_raft_index(Idx, ok, State, Effects).
+
+update_smallest_raft_index(IncomingRaftIdx, Reply,
+ #?MODULE{cfg = Cfg,
+ ra_indexes = Indexes,
+ release_cursors = Cursors0} = State0,
+ Effects) ->
+ case rabbit_fifo_index:size(Indexes) of
+ 0 ->
+ % there are no messages on queue anymore and no pending enqueues
+ % we can forward release_cursor all the way until
+ % the last received command, hooray
+ %% reset the release cursor interval
+ #cfg{release_cursor_interval = {Base, _}} = Cfg,
+ RCI = {Base, Base},
+ State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCI},
+ release_cursors = lqueue:new(),
+ enqueue_count = 0},
+ {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]};
+ _ ->
+ Smallest = rabbit_fifo_index:smallest(Indexes),
+ case find_next_cursor(Smallest, Cursors0) of
+ {empty, Cursors} ->
+ {State0#?MODULE{release_cursors = Cursors}, Reply, Effects};
+ {Cursor, Cursors} ->
+ %% we can emit a release cursor when we've passed the smallest
+ %% release cursor available.
+ {State0#?MODULE{release_cursors = Cursors}, Reply,
+ Effects ++ [Cursor]}
+ end
+ end.
+
+find_next_cursor(Idx, Cursors) ->
+ find_next_cursor(Idx, Cursors, empty).
+
+find_next_cursor(Smallest, Cursors0, Potential) ->
+ case lqueue:out(Cursors0) of
+ {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest ->
+ %% we found one but it may not be the largest one
+ find_next_cursor(Smallest, Cursors, Cursor);
+ _ ->
+ {Potential, Cursors0}
+ end.
+
+update_header(Key, UpdateFun, Default, Header)
+ when is_integer(Header) ->
+ update_header(Key, UpdateFun, Default, #{size => Header});
+update_header(Key, UpdateFun, Default, Header) ->
+ maps:update_with(Key, UpdateFun, Default, Header).
+
+
+return_one(Meta, MsgId, 0, {Tag, Header0},
+ #?MODULE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId)
+ when Tag == '$prefix_msg'; Tag == '$empty_msg' ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {Tag, Header},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ complete(Meta, ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0);
+ _ ->
+ %% this should not affect the release cursor in any way
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ {Msg, State1} = case Tag of
+ '$empty_msg' ->
+ {Msg0, State0};
+ _ -> case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{'$empty_msg', Header}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?MODULE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in(Msg, Returns)}),
+ Effects0}
+ end;
+return_one(Meta, MsgId, MsgNum, {RaftId, {Header0, RawMsg}},
+ #?MODULE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId) ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {RaftId, {Header, RawMsg}},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ DlMsg = {MsgNum, Msg0},
+ Effects = dead_letter_effects(delivery_limit, #{none => DlMsg},
+ State0, Effects0),
+ complete(Meta, ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0);
+ _ ->
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ %% this should not affect the release cursor in any way
+ {Msg, State1} = case RawMsg of
+ 'empty' ->
+ {Msg0, State0};
+ _ ->
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{RaftId, {Header, 'empty'}}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?MODULE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in({MsgNum, Msg}, Returns)}),
+ Effects0}
+ end.
+
+return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId,
+ #consumer{checked_out = Checked0} = Con) ->
+ %% need to sort the list so that we return messages in the order
+ %% they were checked out
+ Checked = lists:sort(maps:to_list(Checked0)),
+ State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}},
+ lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) ->
+ return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) ->
+ return_one(Meta, MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {MsgNum, Msg}}, {S, E}) ->
+ return_one(Meta, MsgId, MsgNum, Msg, S, E, ConsumerId)
+ end, {State, Effects0}, Checked).
+
+%% checkout new messages to consumers
+checkout(#{index := Index} = Meta, OldState, State0, Effects0) ->
+ {State1, _Result, Effects1} = checkout0(Meta, checkout_one(Meta, State0),
+ Effects0, {#{}, #{}}),
+ case evaluate_limit(Index, false, OldState, State1, Effects1) of
+ {State, true, Effects} ->
+ update_smallest_raft_index(Index, State, Effects);
+ {State, false, Effects} ->
+ {State, ok, Effects}
+ end.
+
+checkout0(Meta, {success, ConsumerId, MsgId, {RaftIdx, {Header, 'empty'}}, State},
+ Effects, {SendAcc, LogAcc0}) ->
+ DelMsg = {RaftIdx, {MsgId, Header}},
+ LogAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], LogAcc0),
+ checkout0(Meta, checkout_one(Meta, State), Effects, {SendAcc, LogAcc});
+checkout0(Meta, {success, ConsumerId, MsgId, Msg, State}, Effects,
+ {SendAcc0, LogAcc}) ->
+ DelMsg = {MsgId, Msg},
+ SendAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], SendAcc0),
+ checkout0(Meta, checkout_one(Meta, State), Effects, {SendAcc, LogAcc});
+checkout0(_Meta, {Activity, State0}, Effects0, {SendAcc, LogAcc}) ->
+ Effects1 = case Activity of
+ nochange ->
+ append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc);
+ inactive ->
+ [{aux, inactive}
+ | append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc)]
+ end,
+ {State0, ok, lists:reverse(Effects1)}.
+
+evaluate_limit(_Index, Result, _BeforeState,
+ #?MODULE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}} = State,
+ Effects) ->
+ {State, Result, Effects};
+evaluate_limit(Index, Result, BeforeState,
+ #?MODULE{cfg = #cfg{overflow_strategy = Strategy},
+ enqueuers = Enqs0} = State0,
+ Effects0) ->
+ case is_over_limit(State0) of
+ true when Strategy == drop_head ->
+ {State, Effects} = drop_head(State0, Effects0),
+ evaluate_limit(Index, true, BeforeState, State, Effects);
+ true when Strategy == reject_publish ->
+ %% generate send_msg effect for each enqueuer to let them know
+ %% they need to block
+ {Enqs, Effects} =
+ maps:fold(
+ fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) ->
+ E = E0#enqueuer{blocked = Index},
+ {Enqs#{P => E},
+ [{send_msg, P, {queue_status, reject_publish},
+ [ra_event]} | Acc]};
+ (_P, _E, Acc) ->
+ Acc
+ end, {Enqs0, Effects0}, Enqs0),
+ {State0#?MODULE{enqueuers = Enqs}, Result, Effects};
+ false when Strategy == reject_publish ->
+ %% TODO: optimise as this case gets called for every command
+ %% pretty much
+ Before = is_below_soft_limit(BeforeState),
+ case {Before, is_below_soft_limit(State0)} of
+ {false, true} ->
+ %% we have moved below the lower limit which
+ {Enqs, Effects} =
+ maps:fold(
+ fun (P, #enqueuer{} = E0, {Enqs, Acc}) ->
+ E = E0#enqueuer{blocked = undefined},
+ {Enqs#{P => E},
+ [{send_msg, P, {queue_status, go}, [ra_event]}
+ | Acc]};
+ (_P, _E, Acc) ->
+ Acc
+ end, {Enqs0, Effects0}, Enqs0),
+ {State0#?MODULE{enqueuers = Enqs}, Result, Effects};
+ _ ->
+ {State0, Result, Effects0}
+ end;
+ false ->
+ {State0, Result, Effects0}
+ end.
+
+evaluate_memory_limit(_Header,
+ #?MODULE{cfg = #cfg{max_in_memory_length = undefined,
+ max_in_memory_bytes = undefined}}) ->
+ false;
+evaluate_memory_limit(#{size := Size}, State) ->
+ evaluate_memory_limit(Size, State);
+evaluate_memory_limit(Size,
+ #?MODULE{cfg = #cfg{max_in_memory_length = MaxLength,
+ max_in_memory_bytes = MaxBytes},
+ msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length})
+ when is_integer(Size) ->
+ (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes).
+
+append_send_msg_effects(Effects, AccMap) when map_size(AccMap) == 0 ->
+ Effects;
+append_send_msg_effects(Effects0, AccMap) ->
+ Effects = maps:fold(fun (C, Msgs, Ef) ->
+ [send_msg_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap),
+ [{aux, active} | Effects].
+
+append_log_effects(Effects0, AccMap) ->
+ maps:fold(fun (C, Msgs, Ef) ->
+ [send_log_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap).
+
+%% next message is determined as follows:
+%% First we check if there are are prefex returns
+%% Then we check if there are current returns
+%% then we check prefix msgs
+%% then we check current messages
+%%
+%% When we return it is always done to the current return queue
+%% for both prefix messages and current messages
+take_next_msg(#?MODULE{prefix_msgs = {R, P}} = State) ->
+ %% conversion
+ take_next_msg(State#?MODULE{prefix_msgs = {length(R), R, length(P), P}});
+take_next_msg(#?MODULE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem],
+ NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {Msg, State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?MODULE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {{'$prefix_msg', Header},
+ State#?MODULE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?MODULE{returns = Returns,
+ messages = Messages0,
+ prefix_msgs = {NumR, R, NumP, P}} = State) ->
+ %% use peek rather than out there as the most likely case is an empty
+ %% queue
+ case lqueue:peek(Returns) of
+ {value, NextMsg} ->
+ {NextMsg,
+ State#?MODULE{returns = lqueue:drop(Returns)}};
+ empty when P == [] ->
+ case lqueue:out(Messages0) of
+ {empty, _} ->
+ empty;
+ {{value, {_, _} = SeqMsg}, Messages} ->
+ {SeqMsg, State#?MODULE{messages = Messages }}
+ end;
+ empty ->
+ [Msg | Rem] = P,
+ case Msg of
+ {Header, 'empty'} ->
+ %% There are prefix msgs
+ {{'$empty_msg', Header},
+ State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}};
+ Header ->
+ {{'$prefix_msg', Header},
+ State#?MODULE{prefix_msgs = {NumR, R, NumP-1, Rem}}}
+ end
+ end.
+
+send_msg_effect({CTag, CPid}, Msgs) ->
+ {send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}.
+
+send_log_effect({CTag, CPid}, IdxMsgs) ->
+ {RaftIdxs, Data} = lists:unzip(IdxMsgs),
+ {log, RaftIdxs,
+ fun(Log) ->
+ Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {MsgId, Header}) ->
+ {MsgId, {Header, Msg}}
+ end, Log, Data),
+ [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}]
+ end,
+ {local, node(CPid)}}.
+
+reply_log_effect(RaftIdx, MsgId, Header, Ready, From) ->
+ {log, [RaftIdx],
+ fun([{enqueue, _, _, Msg}]) ->
+ [{reply, From, {wrap_reply,
+ {dequeue, {MsgId, {Header, Msg}}, Ready}}}]
+ end}.
+
+checkout_one(Meta, #?MODULE{service_queue = SQ0,
+ messages = Messages0,
+ consumers = Cons0} = InitState) ->
+ case priority_queue:out(SQ0) of
+ {{value, ConsumerId}, SQ1} ->
+ case take_next_msg(InitState) of
+ {ConsumerMsg, State0} ->
+ %% there are consumers waiting to be serviced
+ %% process consumer checkout
+ case maps:find(ConsumerId, Cons0) of
+ {ok, #consumer{credit = 0}} ->
+ %% no credit but was still on queue
+ %% can happen when draining
+ %% recurse without consumer on queue
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1});
+ {ok, #consumer{status = cancelled}} ->
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1});
+ {ok, #consumer{status = suspected_down}} ->
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1});
+ {ok, #consumer{checked_out = Checked0,
+ next_msg_id = Next,
+ credit = Credit,
+ delivery_count = DelCnt} = Con0} ->
+ Checked = maps:put(Next, ConsumerMsg, Checked0),
+ Con = Con0#consumer{checked_out = Checked,
+ next_msg_id = Next + 1,
+ credit = Credit - 1,
+ delivery_count = DelCnt + 1},
+ State1 = update_or_remove_sub(Meta,
+ ConsumerId, Con,
+ State0#?MODULE{service_queue = SQ1}),
+ {State, Msg} =
+ case ConsumerMsg of
+ {'$prefix_msg', Header} ->
+ {subtract_in_memory_counts(
+ Header, add_bytes_checkout(Header, State1)),
+ ConsumerMsg};
+ {'$empty_msg', Header} ->
+ {add_bytes_checkout(Header, State1),
+ ConsumerMsg};
+ {_, {_, {Header, 'empty'}} = M} ->
+ {add_bytes_checkout(Header, State1),
+ M};
+ {_, {_, {Header, _} = M}} ->
+ {subtract_in_memory_counts(
+ Header,
+ add_bytes_checkout(Header, State1)),
+ M}
+ end,
+ {success, ConsumerId, Next, Msg, State};
+ error ->
+ %% consumer did not exist but was queued, recurse
+ checkout_one(Meta, InitState#?MODULE{service_queue = SQ1})
+ end;
+ empty ->
+ {nochange, InitState}
+ end;
+ {empty, _} ->
+ case lqueue:len(Messages0) of
+ 0 -> {nochange, InitState};
+ _ -> {inactive, InitState}
+ end
+ end.
+
+update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto,
+ credit = 0} = Con,
+ #?MODULE{consumers = Cons} = State) ->
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)};
+update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = auto} = Con,
+ #?MODULE{consumers = Cons,
+ service_queue = ServiceQueue} = State) ->
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons),
+ service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)};
+update_or_remove_sub(#{system_time := Ts},
+ ConsumerId, #consumer{lifetime = once,
+ checked_out = Checked,
+ credit = 0} = Con,
+ #?MODULE{consumers = Cons} = State) ->
+ case maps:size(Checked) of
+ 0 ->
+ % we're done with this consumer
+ State#?MODULE{consumers = maps:remove(ConsumerId, Cons),
+ last_active = Ts};
+ _ ->
+ % there are unsettled items so need to keep around
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)}
+ end;
+update_or_remove_sub(_Meta, ConsumerId, #consumer{lifetime = once} = Con,
+ #?MODULE{consumers = Cons,
+ service_queue = ServiceQueue} = State) ->
+ State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons),
+ service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}.
+
+uniq_queue_in(Key, #consumer{priority = P}, Queue) ->
+ % TODO: queue:member could surely be quite expensive, however the practical
+ % number of unique consumers may not be large enough for it to matter
+ case priority_queue:member(Key, Queue) of
+ true ->
+ Queue;
+ false ->
+ priority_queue:in(Key, P, Queue)
+ end.
+
+update_consumer(ConsumerId, Meta, Spec, Priority,
+ #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0) ->
+ %% general case, single active consumer off
+ update_consumer0(ConsumerId, Meta, Spec, Priority, State0);
+update_consumer(ConsumerId, Meta, Spec, Priority,
+ #?MODULE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active}} = State0)
+ when map_size(Cons0) == 0 ->
+ %% single active consumer on, no one is consuming yet
+ update_consumer0(ConsumerId, Meta, Spec, Priority, State0);
+update_consumer(ConsumerId, Meta, {Life, Credit, Mode}, Priority,
+ #?MODULE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ %% single active consumer on and one active consumer already
+ %% adding the new consumer to the waiting list
+ Consumer = #consumer{lifetime = Life, meta = Meta,
+ priority = Priority,
+ credit = Credit, credit_mode = Mode},
+ WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}],
+ State0#?MODULE{waiting_consumers = WaitingConsumers1}.
+
+update_consumer0(ConsumerId, Meta, {Life, Credit, Mode}, Priority,
+ #?MODULE{consumers = Cons0,
+ service_queue = ServiceQueue0} = State0) ->
+ %% TODO: this logic may not be correct for updating a pre-existing consumer
+ Init = #consumer{lifetime = Life, meta = Meta,
+ priority = Priority,
+ credit = Credit, credit_mode = Mode},
+ Cons = maps:update_with(ConsumerId,
+ fun(S) ->
+ %% remove any in-flight messages from
+ %% the credit update
+ N = maps:size(S#consumer.checked_out),
+ C = max(0, Credit - N),
+ S#consumer{lifetime = Life, credit = C}
+ end, Init, Cons0),
+ ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons),
+ ServiceQueue0),
+ State0#?MODULE{consumers = Cons, service_queue = ServiceQueue}.
+
+maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con,
+ ServiceQueue0) ->
+ case Credit > 0 of
+ true ->
+ % consumerect needs service - check if already on service queue
+ uniq_queue_in(ConsumerId, Con, ServiceQueue0);
+ false ->
+ ServiceQueue0
+ end.
+
+%% creates a dehydrated version of the current state to be cached and
+%% potentially used to for a snaphot at a later point
+dehydrate_state(#?MODULE{messages = Messages,
+ consumers = Consumers,
+ returns = Returns,
+ prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0},
+ waiting_consumers = Waiting0} = State) ->
+ RCnt = lqueue:len(Returns),
+ %% TODO: optimise this function as far as possible
+ PrefRet1 = lists:foldr(fun ({'$prefix_msg', Header}, Acc) ->
+ [Header | Acc];
+ ({'$empty_msg', _} = Msg, Acc) ->
+ [Msg | Acc];
+ ({_, {_, {Header, 'empty'}}}, Acc) ->
+ [{'$empty_msg', Header} | Acc];
+ ({_, {_, {Header, _}}}, Acc) ->
+ [Header | Acc]
+ end,
+ [],
+ lqueue:to_list(Returns)),
+ PrefRet = PrefRet0 ++ PrefRet1,
+ PrefMsgsSuff = dehydrate_messages(Messages, []),
+ %% prefix messages are not populated in normal operation only after
+ %% recovering from a snapshot
+ PrefMsgs = PrefMsg0 ++ PrefMsgsSuff,
+ Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0],
+ State#?MODULE{messages = lqueue:new(),
+ ra_indexes = rabbit_fifo_index:empty(),
+ release_cursors = lqueue:new(),
+ consumers = maps:map(fun (_, C) ->
+ dehydrate_consumer(C)
+ end, Consumers),
+ returns = lqueue:new(),
+ prefix_msgs = {PRCnt + RCnt, PrefRet,
+ PPCnt + lqueue:len(Messages), PrefMsgs},
+ waiting_consumers = Waiting}.
+
+%% TODO make body recursive to avoid allocating lists:reverse call
+dehydrate_messages(Msgs0, Acc0) ->
+ {OutRes, Msgs} = lqueue:out(Msgs0),
+ case OutRes of
+ {value, {_MsgId, {_RaftId, {_, 'empty'} = Msg}}} ->
+ dehydrate_messages(Msgs, [Msg | Acc0]);
+ {value, {_MsgId, {_RaftId, {Header, _}}}} ->
+ dehydrate_messages(Msgs, [Header | Acc0]);
+ empty ->
+ lists:reverse(Acc0)
+ end.
+
+dehydrate_consumer(#consumer{checked_out = Checked0} = Con) ->
+ Checked = maps:map(fun (_, {'$prefix_msg', _} = M) ->
+ M;
+ (_, {'$empty_msg', _} = M) ->
+ M;
+ (_, {_, {_, {Header, 'empty'}}}) ->
+ {'$empty_msg', Header};
+ (_, {_, {_, {Header, _}}}) ->
+ {'$prefix_msg', Header}
+ end, Checked0),
+ Con#consumer{checked_out = Checked}.
+
+%% make the state suitable for equality comparison
+normalize(#?MODULE{messages = Messages,
+ release_cursors = Cursors} = State) ->
+ State#?MODULE{messages = lqueue:from_list(lqueue:to_list(Messages)),
+ release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}.
+
+is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}}) ->
+ false;
+is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength,
+ max_bytes = MaxBytes},
+ msg_bytes_enqueue = BytesEnq} = State) ->
+ messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes).
+
+is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}}) ->
+ false;
+is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength,
+ max_bytes = MaxBytes},
+ msg_bytes_enqueue = BytesEnq} = State) ->
+ is_below(MaxLength, messages_ready(State)) andalso
+ is_below(MaxBytes, BytesEnq).
+
+is_below(undefined, _Num) ->
+ true;
+is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) ->
+ Num =< trunc(Val * ?LOW_LIMIT).
+
+-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol().
+make_enqueue(Pid, Seq, Msg) ->
+ #enqueue{pid = Pid, seq = Seq, msg = Msg}.
+
+-spec make_register_enqueuer(pid()) -> protocol().
+make_register_enqueuer(Pid) ->
+ #register_enqueuer{pid = Pid}.
+
+-spec make_checkout(consumer_id(),
+ checkout_spec(), consumer_meta()) -> protocol().
+make_checkout(ConsumerId, Spec, Meta) ->
+ #checkout{consumer_id = ConsumerId,
+ spec = Spec, meta = Meta}.
+
+-spec make_settle(consumer_id(), [msg_id()]) -> protocol().
+make_settle(ConsumerId, MsgIds) when is_list(MsgIds) ->
+ #settle{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_return(consumer_id(), [msg_id()]) -> protocol().
+make_return(ConsumerId, MsgIds) ->
+ #return{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_discard(consumer_id(), [msg_id()]) -> protocol().
+make_discard(ConsumerId, MsgIds) ->
+ #discard{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(),
+ boolean()) -> protocol().
+make_credit(ConsumerId, Credit, DeliveryCount, Drain) ->
+ #credit{consumer_id = ConsumerId,
+ credit = Credit,
+ delivery_count = DeliveryCount,
+ drain = Drain}.
+
+-spec make_purge() -> protocol().
+make_purge() -> #purge{}.
+
+-spec make_garbage_collection() -> protocol().
+make_garbage_collection() -> #garbage_collection{}.
+
+-spec make_purge_nodes([node()]) -> protocol().
+make_purge_nodes(Nodes) ->
+ #purge_nodes{nodes = Nodes}.
+
+-spec make_update_config(config()) -> protocol().
+make_update_config(Config) ->
+ #update_config{config = Config}.
+
+add_bytes_enqueue(Bytes,
+ #?MODULE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_enqueue(#{size := Bytes}, State) ->
+ add_bytes_enqueue(Bytes, State).
+
+add_bytes_drop(Bytes,
+ #?MODULE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_drop(#{size := Bytes}, State) ->
+ add_bytes_drop(Bytes, State).
+
+add_bytes_checkout(Bytes,
+ #?MODULE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue } = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_checkout = Checkout + Bytes,
+ msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_checkout(#{size := Bytes}, State) ->
+ add_bytes_checkout(Bytes, State).
+
+add_bytes_settle(Bytes,
+ #?MODULE{msg_bytes_checkout = Checkout} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_checkout = Checkout - Bytes};
+add_bytes_settle(#{size := Bytes}, State) ->
+ add_bytes_settle(Bytes, State).
+
+add_bytes_return(Bytes,
+ #?MODULE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_checkout = Checkout - Bytes,
+ msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_return(#{size := Bytes}, State) ->
+ add_bytes_return(Bytes, State).
+
+add_in_memory_counts(Bytes,
+ #?MODULE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_in_memory = InMemoryBytes + Bytes,
+ msgs_ready_in_memory = InMemoryCount + 1};
+add_in_memory_counts(#{size := Bytes}, State) ->
+ add_in_memory_counts(Bytes, State).
+
+subtract_in_memory_counts(Bytes,
+ #?MODULE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?MODULE{msg_bytes_in_memory = InMemoryBytes - Bytes,
+ msgs_ready_in_memory = InMemoryCount - 1};
+subtract_in_memory_counts(#{size := Bytes}, State) ->
+ subtract_in_memory_counts(Bytes, State).
+
+message_size(#basic_message{content = Content}) ->
+ #content{payload_fragments_rev = PFR} = Content,
+ iolist_size(PFR);
+message_size({'$prefix_msg', H}) ->
+ get_size_from_header(H);
+message_size({'$empty_msg', H}) ->
+ get_size_from_header(H);
+message_size(B) when is_binary(B) ->
+ byte_size(B);
+message_size(Msg) ->
+ %% probably only hit this for testing so ok to use erts_debug
+ erts_debug:size(Msg).
+
+get_size_from_header(Size) when is_integer(Size) ->
+ Size;
+get_size_from_header(#{size := B}) ->
+ B.
+
+
+all_nodes(#?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Nodes0 = maps:fold(fun({_, P}, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, #{}, Cons0),
+ Nodes1 = maps:fold(fun(P, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes0, Enqs0),
+ maps:keys(
+ lists:foldl(fun({{_, P}, _}, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes1, WaitingConsumers0)).
+
+all_pids_for(Node, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P}, _}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
+
+suspected_pids_for(Node, #?MODULE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P},
+ #consumer{status = suspected_down}}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
+
+is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires},
+ last_active = LastActive,
+ consumers = Consumers})
+ when is_number(LastActive) andalso is_number(Expires) ->
+ %% TODO: should it be active consumers?
+ Active = maps:filter(fun (_, #consumer{status = suspected_down}) ->
+ false;
+ (_, _) ->
+ true
+ end, Consumers),
+
+ Ts > (LastActive + Expires) andalso maps:size(Active) == 0;
+is_expired(_Ts, _State) ->
+ false.
+
+get_priority_from_args(#{args := Args}) ->
+ case rabbit_misc:table_lookup(Args, <<"x-priority">>) of
+ {_Key, Value} ->
+ Value;
+ _ -> 0
+ end;
+get_priority_from_args(_) ->
+ 0.
diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl
new file mode 100644
index 0000000000..a63483becd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo.hrl
@@ -0,0 +1,210 @@
+
+-type option(T) :: undefined | T.
+
+-type raw_msg() :: term().
+%% The raw message. It is opaque to rabbit_fifo.
+
+-type msg_in_id() :: non_neg_integer().
+% a queue scoped monotonically incrementing integer used to enforce order
+% in the unassigned messages map
+
+-type msg_id() :: non_neg_integer().
+%% A consumer-scoped monotonically incrementing integer included with a
+%% {@link delivery/0.}. Used to settle deliveries using
+%% {@link rabbit_fifo_client:settle/3.}
+
+-type msg_seqno() :: non_neg_integer().
+%% A sender process scoped monotonically incrementing integer included
+%% in enqueue messages. Used to ensure ordering of messages send from the
+%% same process
+
+-type msg_header() :: msg_size() |
+ #{size := msg_size(),
+ delivery_count => non_neg_integer()}.
+%% The message header:
+%% delivery_count: the number of unsuccessful delivery attempts.
+%% A non-zero value indicates a previous attempt.
+%% If it only contains the size it can be condensed to an integer only
+
+-type msg() :: {msg_header(), raw_msg()}.
+%% message with a header map.
+
+-type msg_size() :: non_neg_integer().
+%% the size in bytes of the msg payload
+
+-type indexed_msg() :: {ra:index(), msg()}.
+
+-type prefix_msg() :: {'$prefix_msg', msg_header()}.
+
+-type delivery_msg() :: {msg_id(), msg()}.
+%% A tuple consisting of the message id and the headered message.
+
+-type consumer_tag() :: binary().
+%% An arbitrary binary tag used to distinguish between different consumers
+%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.}
+
+-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}.
+%% Represents the delivery of one or more rabbit_fifo messages.
+
+-type consumer_id() :: {consumer_tag(), pid()}.
+%% The entity that receives messages. Uniquely identifies a consumer.
+
+-type credit_mode() :: simple_prefetch | credited.
+%% determines how credit is replenished
+
+-type checkout_spec() :: {once | auto, Num :: non_neg_integer(),
+ credit_mode()} |
+ {dequeue, settled | unsettled} |
+ cancel.
+
+-type consumer_meta() :: #{ack => boolean(),
+ username => binary(),
+ prefetch => non_neg_integer(),
+ args => list()}.
+%% static meta data associated with a consumer
+
+
+-type applied_mfa() :: {module(), atom(), list()}.
+% represents a partially applied module call
+
+-define(RELEASE_CURSOR_EVERY, 2048).
+-define(RELEASE_CURSOR_EVERY_MAX, 3200000).
+-define(USE_AVG_HALF_LIFE, 10000.0).
+%% an average QQ without any message uses about 100KB so setting this limit
+%% to ~10 times that should be relatively safe.
+-define(GC_MEM_LIMIT_B, 2000000).
+
+-define(MB, 1048576).
+-define(LOW_LIMIT, 0.8).
+
+-record(consumer,
+ {meta = #{} :: consumer_meta(),
+ checked_out = #{} :: #{msg_id() => {msg_in_id(), indexed_msg()}},
+ next_msg_id = 0 :: msg_id(), % part of snapshot data
+ %% max number of messages that can be sent
+ %% decremented for each delivery
+ credit = 0 : non_neg_integer(),
+ %% total number of checked out messages - ever
+ %% incremented for each delivery
+ delivery_count = 0 :: non_neg_integer(),
+ %% the mode of how credit is incremented
+ %% simple_prefetch: credit is re-filled as deliveries are settled
+ %% or returned.
+ %% credited: credit can only be changed by receiving a consumer_credit
+ %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}'
+ credit_mode = simple_prefetch :: credit_mode(), % part of snapshot data
+ lifetime = once :: once | auto,
+ status = up :: up | suspected_down | cancelled,
+ priority = 0 :: non_neg_integer()
+ }).
+
+-type consumer() :: #consumer{}.
+
+-type consumer_strategy() :: competing | single_active.
+
+-type milliseconds() :: non_neg_integer().
+
+-record(enqueuer,
+ {next_seqno = 1 :: msg_seqno(),
+ % out of order enqueues - sorted list
+ pending = [] :: [{msg_seqno(), ra:index(), raw_msg()}],
+ status = up :: up |
+ suspected_down,
+ %% it is useful to have a record of when this was blocked
+ %% so that we can retry sending the block effect if
+ %% the publisher did not receive the initial one
+ blocked :: undefined | ra:index(),
+ unused_1,
+ unused_2
+ }).
+
+-record(cfg,
+ {name :: atom(),
+ resource :: rabbit_types:r('queue'),
+ release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}),
+ dead_letter_handler :: option(applied_mfa()),
+ become_leader_handler :: option(applied_mfa()),
+ overflow_strategy = drop_head :: drop_head | reject_publish,
+ max_length :: option(non_neg_integer()),
+ max_bytes :: option(non_neg_integer()),
+ %% whether single active consumer is on or not for this queue
+ consumer_strategy = competing :: consumer_strategy(),
+ %% the maximum number of unsuccessful delivery attempts permitted
+ delivery_limit :: option(non_neg_integer()),
+ max_in_memory_length :: option(non_neg_integer()),
+ max_in_memory_bytes :: option(non_neg_integer()),
+ expires :: undefined | milliseconds(),
+ unused_1,
+ unused_2
+ }).
+
+-type prefix_msgs() :: {list(), list()} |
+ {non_neg_integer(), list(),
+ non_neg_integer(), list()}.
+
+-record(rabbit_fifo,
+ {cfg :: #cfg{},
+ % unassigned messages
+ messages = lqueue:new() :: lqueue:lqueue({msg_in_id(), indexed_msg()}),
+ % defines the next message id
+ next_msg_num = 1 :: msg_in_id(),
+ % queue of returned msg_in_ids - when checking out it picks from
+ returns = lqueue:new() :: lqueue:lqueue(prefix_msg() |
+ {msg_in_id(), indexed_msg()}),
+ % a counter of enqueues - used to trigger shadow copy points
+ enqueue_count = 0 :: non_neg_integer(),
+ % a map containing all the live processes that have ever enqueued
+ % a message to this queue as well as a cached value of the smallest
+ % ra_index of all pending enqueues
+ enqueuers = #{} :: #{pid() => #enqueuer{}},
+ % master index of all enqueue raft indexes including pending
+ % enqueues
+ % rabbit_fifo_index can be slow when calculating the smallest
+ % index when there are large gaps but should be faster than gb_trees
+ % for normal appending operations as it's backed by a map
+ ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(),
+ release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor,
+ ra:index(), #rabbit_fifo{}}),
+ % consumers need to reflect consumer state at time of snapshot
+ % needs to be part of snapshot
+ consumers = #{} :: #{consumer_id() => #consumer{}},
+ % consumers that require further service are queued here
+ % needs to be part of snapshot
+ service_queue = priority_queue:new() :: priority_queue:q(),
+ %% This is a special field that is only used for snapshots
+ %% It represents the queued messages at the time the
+ %% dehydrated snapshot state was cached.
+ %% As release_cursors are only emitted for raft indexes where all
+ %% prior messages no longer contribute to the current state we can
+ %% replace all message payloads with their sizes (to be used for
+ %% overflow calculations).
+ %% This is done so that consumers are still served in a deterministic
+ %% order on recovery.
+ prefix_msgs = {0, [], 0, []} :: prefix_msgs(),
+ msg_bytes_enqueue = 0 :: non_neg_integer(),
+ msg_bytes_checkout = 0 :: non_neg_integer(),
+ %% waiting consumers, one is picked active consumer is cancelled or dies
+ %% used only when single active consumer is on
+ waiting_consumers = [] :: [{consumer_id(), consumer()}],
+ msg_bytes_in_memory = 0 :: non_neg_integer(),
+ msgs_ready_in_memory = 0 :: non_neg_integer(),
+ last_active :: undefined | non_neg_integer(),
+ unused_1,
+ unused_2
+ }).
+
+-type config() :: #{name := atom(),
+ queue_resource := rabbit_types:r('queue'),
+ dead_letter_handler => applied_mfa(),
+ become_leader_handler => applied_mfa(),
+ release_cursor_interval => non_neg_integer(),
+ max_length => non_neg_integer(),
+ max_bytes => non_neg_integer(),
+ max_in_memory_length => non_neg_integer(),
+ max_in_memory_bytes => non_neg_integer(),
+ overflow_strategy => drop_head | reject_publish,
+ single_active_consumer_on => boolean(),
+ delivery_limit => non_neg_integer(),
+ expires => non_neg_integer(),
+ created => non_neg_integer()
+ }.
diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl
new file mode 100644
index 0000000000..3990222b15
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_client.erl
@@ -0,0 +1,888 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% @doc Provides an easy to consume API for interacting with the {@link rabbit_fifo.}
+%% state machine implementation running inside a `ra' raft system.
+%%
+%% Handles command tracking and other non-functional concerns.
+-module(rabbit_fifo_client).
+
+-export([
+ init/2,
+ init/3,
+ init/5,
+ checkout/5,
+ cancel_checkout/2,
+ enqueue/2,
+ enqueue/3,
+ dequeue/3,
+ settle/3,
+ return/3,
+ discard/3,
+ credit/4,
+ handle_ra_event/3,
+ untracked_enqueue/2,
+ purge/1,
+ cluster_name/1,
+ update_machine_state/2,
+ pending_size/1,
+ stat/1,
+ stat/2
+ ]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(SOFT_LIMIT, 32).
+-define(TIMER_TIME, 10000).
+
+-type seq() :: non_neg_integer().
+%% last_applied is initialised to -1
+-type maybe_seq() :: integer().
+-type action() :: {send_credit_reply, Available :: non_neg_integer()} |
+ {send_drained, CTagCredit ::
+ {rabbit_fifo:consumer_tag(), non_neg_integer()}}.
+-type actions() :: [action()].
+
+-type cluster_name() :: rabbit_types:r(queue).
+
+-record(consumer, {last_msg_id :: seq() | -1,
+ ack = false :: boolean(),
+ delivery_count = 0 :: non_neg_integer()}).
+
+-record(cfg, {cluster_name :: cluster_name(),
+ servers = [] :: [ra:server_id()],
+ soft_limit = ?SOFT_LIMIT :: non_neg_integer(),
+ block_handler = fun() -> ok end :: fun(() -> term()),
+ unblock_handler = fun() -> ok end :: fun(() -> ok),
+ timeout :: non_neg_integer(),
+ version = 0 :: non_neg_integer()}).
+
+-record(state, {cfg :: #cfg{},
+ leader :: undefined | ra:server_id(),
+ queue_status :: undefined | go | reject_publish,
+ next_seq = 0 :: seq(),
+ %% Last applied is initialise to -1 to note that no command has yet been
+ %% applied, but allowing to resend messages if the first ones on the sequence
+ %% are lost (messages are sent from last_applied + 1)
+ last_applied = -1 :: maybe_seq(),
+ next_enqueue_seq = 1 :: seq(),
+ %% indicates that we've exceeded the soft limit
+ slow = false :: boolean(),
+ unsent_commands = #{} :: #{rabbit_fifo:consumer_id() =>
+ {[seq()], [seq()], [seq()]}},
+ pending = #{} :: #{seq() =>
+ {term(), rabbit_fifo:command()}},
+ consumer_deliveries = #{} :: #{rabbit_fifo:consumer_tag() =>
+ #consumer{}},
+ timer_state :: term()
+ }).
+
+-opaque state() :: #state{}.
+
+-export_type([
+ state/0,
+ actions/0
+ ]).
+
+
+%% @doc Create the initial state for a new rabbit_fifo sessions. A state is needed
+%% to interact with a rabbit_fifo queue using @module.
+%% @param ClusterName the id of the cluster to interact with
+%% @param Servers The known servers of the queue. If the current leader is known
+%% ensure the leader node is at the head of the list.
+-spec init(cluster_name(), [ra:server_id()]) -> state().
+init(ClusterName, Servers) ->
+ init(ClusterName, Servers, ?SOFT_LIMIT).
+
+%% @doc Create the initial state for a new rabbit_fifo sessions. A state is needed
+%% to interact with a rabbit_fifo queue using @module.
+%% @param ClusterName the id of the cluster to interact with
+%% @param Servers The known servers of the queue. If the current leader is known
+%% ensure the leader node is at the head of the list.
+%% @param MaxPending size defining the max number of pending commands.
+-spec init(cluster_name(), [ra:server_id()], non_neg_integer()) -> state().
+init(ClusterName = #resource{}, Servers, SoftLimit) ->
+ Timeout = application:get_env(kernel, net_ticktime, 60) + 5,
+ #state{cfg = #cfg{cluster_name = ClusterName,
+ servers = Servers,
+ soft_limit = SoftLimit,
+ timeout = Timeout * 1000}}.
+
+-spec init(cluster_name(), [ra:server_id()], non_neg_integer(), fun(() -> ok),
+ fun(() -> ok)) -> state().
+init(ClusterName = #resource{}, Servers, SoftLimit, BlockFun, UnblockFun) ->
+ %% net ticktime is in seconds
+ Timeout = application:get_env(kernel, net_ticktime, 60) + 5,
+ #state{cfg = #cfg{cluster_name = ClusterName,
+ servers = Servers,
+ block_handler = BlockFun,
+ unblock_handler = UnblockFun,
+ soft_limit = SoftLimit,
+ timeout = Timeout * 1000}}.
+
+
+%% @doc Enqueues a message.
+%% @param Correlation an arbitrary erlang term used to correlate this
+%% command when it has been applied.
+%% @param Msg an arbitrary erlang term representing the message.
+%% @param State the current {@module} state.
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%% {@module} assigns a sequence number to every raft command it issues. The
+%% SequenceNumber can be correlated to the applied sequence numbers returned
+%% by the {@link handle_ra_event/2. handle_ra_event/2} function.
+-spec enqueue(Correlation :: term(), Msg :: term(), State :: state()) ->
+ {ok | slow | reject_publish, state()}.
+enqueue(Correlation, Msg,
+ #state{queue_status = undefined,
+ next_enqueue_seq = 1,
+ cfg = #cfg{timeout = Timeout}} = State0) ->
+ %% it is the first enqueue, check the version
+ {_, Node} = Server = pick_server(State0),
+ case rpc:call(Node, ra_machine, version, [{machine, rabbit_fifo, #{}}]) of
+ 0 ->
+ %% the leader is running the old version
+ %% so we can't initialize the enqueuer session safely
+ %% fall back on old behavour
+ enqueue(Correlation, Msg, State0#state{queue_status = go});
+ 1 ->
+ %% were running the new version on the leader do sync initialisation
+ %% of enqueuer session
+ Reg = rabbit_fifo:make_register_enqueuer(self()),
+ case ra:process_command(Server, Reg, Timeout) of
+ {ok, reject_publish, _} ->
+ {reject_publish, State0#state{queue_status = reject_publish}};
+ {ok, ok, _} ->
+ enqueue(Correlation, Msg, State0#state{queue_status = go});
+ {timeout, _} ->
+ %% if we timeout it is probably better to reject
+ %% the message than being uncertain
+ {reject_publish, State0};
+ Err ->
+ exit(Err)
+ end;
+ {badrpc, nodedown} ->
+ {reject_publish, State0}
+ end;
+enqueue(_Correlation, _Msg,
+ #state{queue_status = reject_publish,
+ cfg = #cfg{}} = State) ->
+ {reject_publish, State};
+enqueue(Correlation, Msg,
+ #state{slow = Slow,
+ queue_status = go,
+ cfg = #cfg{block_handler = BlockFun}} = State0) ->
+ Node = pick_server(State0),
+ {Next, State1} = next_enqueue_seq(State0),
+ % by default there is no correlation id
+ Cmd = rabbit_fifo:make_enqueue(self(), Next, Msg),
+ case send_command(Node, Correlation, Cmd, low, State1) of
+ {slow, State} when not Slow ->
+ BlockFun(),
+ {slow, set_timer(State)};
+ Any ->
+ Any
+ end.
+
+%% @doc Enqueues a message.
+%% @param Msg an arbitrary erlang term representing the message.
+%% @param State the current {@module} state.
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%% {@module} assigns a sequence number to every raft command it issues. The
+%% SequenceNumber can be correlated to the applied sequence numbers returned
+%% by the {@link handle_ra_event/2. handle_ra_event/2} function.
+%%
+-spec enqueue(Msg :: term(), State :: state()) ->
+ {ok | slow | reject_publish, state()}.
+enqueue(Msg, State) ->
+ enqueue(undefined, Msg, State).
+
+%% @doc Dequeue a message from the queue.
+%%
+%% This is a synchronous call. I.e. the call will block until the command
+%% has been accepted by the ra process or it times out.
+%%
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param Settlement either `settled' or `unsettled'. When `settled' no
+%% further settlement needs to be done.
+%% @param State The {@module} state.
+%%
+%% @returns `{ok, IdMsg, State}' or `{error | timeout, term()}'
+-spec dequeue(rabbit_fifo:consumer_tag(),
+ Settlement :: settled | unsettled, state()) ->
+ {ok, non_neg_integer(), term(), non_neg_integer()}
+ | {empty, state()} | {error | timeout, term()}.
+dequeue(ConsumerTag, Settlement,
+ #state{cfg = #cfg{timeout = Timeout,
+ cluster_name = QName}} = State0) ->
+ Node = pick_server(State0),
+ ConsumerId = consumer_id(ConsumerTag),
+ case ra:process_command(Node,
+ rabbit_fifo:make_checkout(ConsumerId,
+ {dequeue, Settlement},
+ #{}),
+ Timeout) of
+ {ok, {dequeue, empty}, Leader} ->
+ {empty, State0#state{leader = Leader}};
+ {ok, {dequeue, {MsgId, {MsgHeader, Msg0}}, MsgsReady}, Leader} ->
+ Count = case MsgHeader of
+ #{delivery_count := C} -> C;
+ _ -> 0
+ end,
+ IsDelivered = Count > 0,
+ Msg = add_delivery_count_header(Msg0, Count),
+ {ok, MsgsReady,
+ {QName, qref(Leader), MsgId, IsDelivered, Msg},
+ State0#state{leader = Leader}};
+ {ok, {error, _} = Err, _Leader} ->
+ Err;
+ Err ->
+ Err
+ end.
+
+add_delivery_count_header(#basic_message{} = Msg0, Count)
+ when is_integer(Count) ->
+ rabbit_basic:add_header(<<"x-delivery-count">>, long, Count, Msg0);
+add_delivery_count_header(Msg, _Count) ->
+ Msg.
+
+
+%% @doc Settle a message. Permanently removes message from the queue.
+%% @param ConsumerTag the tag uniquely identifying the consumer.
+%% @param MsgIds the message ids received with the {@link rabbit_fifo:delivery/0.}
+%% @param State the {@module} state
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%%
+-spec settle(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) ->
+ {state(), list()}.
+settle(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) ->
+ Node = pick_server(State0),
+ Cmd = rabbit_fifo:make_settle(consumer_id(ConsumerTag), MsgIds),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ % turn slow into ok for this function
+ {S, []}
+ end;
+settle(ConsumerTag, [_|_] = MsgIds,
+ #state{unsent_commands = Unsent0} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% we've reached the soft limit so will stash the command to be
+ %% sent once we have seen enough notifications
+ Unsent = maps:update_with(ConsumerId,
+ fun ({Settles, Returns, Discards}) ->
+ {Settles ++ MsgIds, Returns, Discards}
+ end, {MsgIds, [], []}, Unsent0),
+ {State0#state{unsent_commands = Unsent}, []}.
+
+%% @doc Return a message to the queue.
+%% @param ConsumerTag the tag uniquely identifying the consumer.
+%% @param MsgIds the message ids to return received
+%% from {@link rabbit_fifo:delivery/0.}
+%% @param State the {@module} state
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+%%
+-spec return(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) ->
+ {state(), list()}.
+return(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) ->
+ Node = pick_server(State0),
+ % TODO: make rabbit_fifo return support lists of message ids
+ Cmd = rabbit_fifo:make_return(consumer_id(ConsumerTag), MsgIds),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ {S, []}
+ end;
+return(ConsumerTag, [_|_] = MsgIds,
+ #state{unsent_commands = Unsent0} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% we've reached the soft limit so will stash the command to be
+ %% sent once we have seen enough notifications
+ Unsent = maps:update_with(ConsumerId,
+ fun ({Settles, Returns, Discards}) ->
+ {Settles, Returns ++ MsgIds, Discards}
+ end, {[], MsgIds, []}, Unsent0),
+ {State0#state{unsent_commands = Unsent}, []}.
+
+%% @doc Discards a checked out message.
+%% If the queue has a dead_letter_handler configured this will be called.
+%% @param ConsumerTag the tag uniquely identifying the consumer.
+%% @param MsgIds the message ids to discard
+%% from {@link rabbit_fifo:delivery/0.}
+%% @param State the {@module} state
+%% @returns
+%% `{ok | slow, State}' if the command was successfully sent. If the return
+%% tag is `slow' it means the limit is approaching and it is time to slow down
+%% the sending rate.
+-spec discard(rabbit_fifo:consumer_tag(), [rabbit_fifo:msg_id()], state()) ->
+ {state(), list()}.
+discard(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) ->
+ Node = pick_server(State0),
+ Cmd = rabbit_fifo:make_discard(consumer_id(ConsumerTag), MsgIds),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ % turn slow into ok for this function
+ {S, []}
+ end;
+discard(ConsumerTag, [_|_] = MsgIds,
+ #state{unsent_commands = Unsent0} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% we've reached the soft limit so will stash the command to be
+ %% sent once we have seen enough notifications
+ Unsent = maps:update_with(ConsumerId,
+ fun ({Settles, Returns, Discards}) ->
+ {Settles, Returns, Discards ++ MsgIds}
+ end, {[], [], MsgIds}, Unsent0),
+ {State0#state{unsent_commands = Unsent}, []}.
+
+%% @doc Register with the rabbit_fifo queue to "checkout" messages as they
+%% become available.
+%%
+%% This is a synchronous call. I.e. the call will block until the command
+%% has been accepted by the ra process or it times out.
+%%
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param NumUnsettled the maximum number of in-flight messages. Once this
+%% number of messages has been received but not settled no further messages
+%% will be delivered to the consumer.
+%% @param CreditMode The credit mode to use for the checkout.
+%% simple_prefetch: credit is auto topped up as deliveries are settled
+%% credited: credit is only increased by sending credit to the queue
+%% @param State The {@module} state.
+%%
+%% @returns `{ok, State}' or `{error | timeout, term()}'
+-spec checkout(rabbit_fifo:consumer_tag(),
+ NumUnsettled :: non_neg_integer(),
+ CreditMode :: rabbit_fifo:credit_mode(),
+ Meta :: rabbit_fifo:consumer_meta(),
+ state()) -> {ok, state()} | {error | timeout, term()}.
+checkout(ConsumerTag, NumUnsettled, CreditMode, Meta,
+ #state{consumer_deliveries = CDels0} = State0) ->
+ Servers = sorted_servers(State0),
+ ConsumerId = {ConsumerTag, self()},
+ Cmd = rabbit_fifo:make_checkout(ConsumerId,
+ {auto, NumUnsettled, CreditMode},
+ Meta),
+ %% ???
+ Ack = maps:get(ack, Meta, true),
+
+ SDels = maps:update_with(ConsumerTag,
+ fun (V) ->
+ V#consumer{ack = Ack}
+ end,
+ #consumer{last_msg_id = -1,
+ ack = Ack}, CDels0),
+ try_process_command(Servers, Cmd, State0#state{consumer_deliveries = SDels}).
+
+%% @doc Provide credit to the queue
+%%
+%% This only has an effect if the consumer uses credit mode: credited
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param Credit the amount of credit to provide to theq queue
+%% @param Drain tells the queue to use up any credit that cannot be immediately
+%% fulfilled. (i.e. there are not enough messages on queue to use up all the
+%% provided credit).
+-spec credit(rabbit_fifo:consumer_tag(),
+ Credit :: non_neg_integer(),
+ Drain :: boolean(),
+ state()) ->
+ {state(), actions()}.
+credit(ConsumerTag, Credit, Drain,
+ #state{consumer_deliveries = CDels} = State0) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ %% the last received msgid provides us with the delivery count if we
+ %% add one as it is 0 indexed
+ C = maps:get(ConsumerTag, CDels, #consumer{last_msg_id = -1}),
+ Node = pick_server(State0),
+ Cmd = rabbit_fifo:make_credit(ConsumerId, Credit,
+ C#consumer.last_msg_id + 1, Drain),
+ case send_command(Node, undefined, Cmd, normal, State0) of
+ {_, S} ->
+ % turn slow into ok for this function
+ {S, []}
+ end.
+
+%% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag
+%%
+%% This is a synchronous call. I.e. the call will block until the command
+%% has been accepted by the ra process or it times out.
+%%
+%% @param ConsumerTag a unique tag to identify this particular consumer.
+%% @param State The {@module} state.
+%%
+%% @returns `{ok, State}' or `{error | timeout, term()}'
+-spec cancel_checkout(rabbit_fifo:consumer_tag(), state()) ->
+ {ok, state()} | {error | timeout, term()}.
+cancel_checkout(ConsumerTag, #state{consumer_deliveries = CDels} = State0) ->
+ Servers = sorted_servers(State0),
+ ConsumerId = {ConsumerTag, self()},
+ Cmd = rabbit_fifo:make_checkout(ConsumerId, cancel, #{}),
+ State = State0#state{consumer_deliveries = maps:remove(ConsumerTag, CDels)},
+ try_process_command(Servers, Cmd, State).
+
+%% @doc Purges all the messages from a rabbit_fifo queue and returns the number
+%% of messages purged.
+-spec purge(ra:server_id()) -> {ok, non_neg_integer()} | {error | timeout, term()}.
+purge(Node) ->
+ case ra:process_command(Node, rabbit_fifo:make_purge()) of
+ {ok, {purge, Reply}, _} ->
+ {ok, Reply};
+ Err ->
+ Err
+ end.
+
+-spec pending_size(state()) -> non_neg_integer().
+pending_size(#state{pending = Pend}) ->
+ maps:size(Pend).
+
+-spec stat(ra:server_id()) ->
+ {ok, non_neg_integer(), non_neg_integer()}
+ | {error | timeout, term()}.
+stat(Leader) ->
+ %% short timeout as we don't want to spend too long if it is going to
+ %% fail anyway
+ stat(Leader, 250).
+
+-spec stat(ra:server_id(), non_neg_integer()) ->
+ {ok, non_neg_integer(), non_neg_integer()}
+ | {error | timeout, term()}.
+stat(Leader, Timeout) ->
+ %% short timeout as we don't want to spend too long if it is going to
+ %% fail anyway
+ case ra:local_query(Leader, fun rabbit_fifo:query_stat/1, Timeout) of
+ {ok, {_, {R, C}}, _} -> {ok, R, C};
+ {error, _} = Error -> Error;
+ {timeout, _} = Error -> Error
+ end.
+
+%% @doc returns the cluster name
+-spec cluster_name(state()) -> cluster_name().
+cluster_name(#state{cfg = #cfg{cluster_name = ClusterName}}) ->
+ ClusterName.
+
+update_machine_state(Server, Conf) ->
+ case ra:process_command(Server, rabbit_fifo:make_update_config(Conf)) of
+ {ok, ok, _} ->
+ ok;
+ Err ->
+ Err
+ end.
+
+%% @doc Handles incoming `ra_events'. Events carry both internal "bookeeping"
+%% events emitted by the `ra' leader as well as `rabbit_fifo' emitted events such
+%% as message deliveries. All ra events need to be handled by {@module}
+%% to ensure bookeeping, resends and flow control is correctly handled.
+%%
+%% If the `ra_event' contains a `rabbit_fifo' generated message it will be returned
+%% for further processing.
+%%
+%% Example:
+%%
+%% ```
+%% receive
+%% {ra_event, From, Evt} ->
+%% case rabbit_fifo_client:handle_ra_event(From, Evt, State0) of
+%% {internal, _Seq, State} -> State;
+%% {{delivery, _ConsumerTag, Msgs}, State} ->
+%% handle_messages(Msgs),
+%% ...
+%% end
+%% end
+%% '''
+%%
+%% @param From the {@link ra:server_id().} of the sending process.
+%% @param Event the body of the `ra_event'.
+%% @param State the current {@module} state.
+%%
+%% @returns
+%% `{internal, AppliedCorrelations, State}' if the event contained an internally
+%% handled event such as a notification and a correlation was included with
+%% the command (e.g. in a call to `enqueue/3' the correlation terms are returned
+%% here.
+%%
+%% `{RaFifoEvent, State}' if the event contained a client message generated by
+%% the `rabbit_fifo' state machine such as a delivery.
+%%
+%% The type of `rabbit_fifo' client messages that can be received are:
+%%
+%% `{delivery, ConsumerTag, [{MsgId, {MsgHeader, Msg}}]}'
+%%
+%% <li>`ConsumerTag' the binary tag passed to {@link checkout/3.}</li>
+%% <li>`MsgId' is a consumer scoped monotonically incrementing id that can be
+%% used to {@link settle/3.} (roughly: AMQP 0.9.1 ack) message once finished
+%% with them.</li>
+-spec handle_ra_event(ra:server_id(), ra_server_proc:ra_event_body(), state()) ->
+ {internal, Correlators :: [term()], actions(), state()} |
+ {rabbit_fifo:client_msg(), state()} | eol.
+handle_ra_event(From, {applied, Seqs},
+ #state{cfg = #cfg{cluster_name = QRef,
+ soft_limit = SftLmt,
+ unblock_handler = UnblockFun}} = State0) ->
+
+ {Corrs, Actions0, State1} = lists:foldl(fun seq_applied/2,
+ {[], [], State0#state{leader = From}},
+ Seqs),
+ Actions = case Corrs of
+ [] ->
+ lists:reverse(Actions0);
+ _ ->
+ [{settled, QRef, Corrs}
+ | lists:reverse(Actions0)]
+ end,
+ case maps:size(State1#state.pending) < SftLmt of
+ true when State1#state.slow == true ->
+ % we have exited soft limit state
+ % send any unsent commands and cancel the time as
+ % TODO: really the timer should only be cancelled when the channel
+ % exits flow state (which depends on the state of all queues the
+ % channel is interacting with)
+ % but the fact the queue has just applied suggests
+ % it's ok to cancel here anyway
+ State2 = cancel_timer(State1#state{slow = false,
+ unsent_commands = #{}}),
+ % build up a list of commands to issue
+ Commands = maps:fold(
+ fun (Cid, {Settled, Returns, Discards}, Acc) ->
+ add_command(Cid, settle, Settled,
+ add_command(Cid, return, Returns,
+ add_command(Cid, discard,
+ Discards, Acc)))
+ end, [], State1#state.unsent_commands),
+ Node = pick_server(State2),
+ %% send all the settlements and returns
+ State = lists:foldl(fun (C, S0) ->
+ case send_command(Node, undefined,
+ C, normal, S0) of
+ {T, S} when T =/= error ->
+ S
+ end
+ end, State2, Commands),
+ UnblockFun(),
+ {ok, State, Actions};
+ _ ->
+ {ok, State1, Actions}
+ end;
+handle_ra_event(From, {machine, {delivery, _ConsumerTag, _} = Del}, State0) ->
+ handle_delivery(From, Del, State0);
+handle_ra_event(_, {machine, {queue_status, Status}},
+ #state{} = State) ->
+ %% just set the queue status
+ {ok, State#state{queue_status = Status}, []};
+handle_ra_event(Leader, {machine, leader_change},
+ #state{leader = Leader} = State) ->
+ %% leader already known
+ {ok, State, []};
+handle_ra_event(Leader, {machine, leader_change}, State0) ->
+ %% we need to update leader
+ %% and resend any pending commands
+ State = resend_all_pending(State0#state{leader = Leader}),
+ {ok, State, []};
+handle_ra_event(_From, {rejected, {not_leader, undefined, _Seq}}, State0) ->
+ % TODO: how should these be handled? re-sent on timer or try random
+ {ok, State0, []};
+handle_ra_event(_From, {rejected, {not_leader, Leader, Seq}}, State0) ->
+ State1 = State0#state{leader = Leader},
+ State = resend(Seq, State1),
+ {ok, State, []};
+handle_ra_event(_, timeout, #state{cfg = #cfg{servers = Servers}} = State0) ->
+ case find_leader(Servers) of
+ undefined ->
+ %% still no leader, set the timer again
+ {ok, set_timer(State0), []};
+ Leader ->
+ State = resend_all_pending(State0#state{leader = Leader}),
+ {ok, State, []}
+ end;
+handle_ra_event(_Leader, {machine, eol}, _State0) ->
+ eol.
+
+%% @doc Attempts to enqueue a message using cast semantics. This provides no
+%% guarantees or retries if the message fails to achieve consensus or if the
+%% servers sent to happens not to be available. If the message is sent to a
+%% follower it will attempt the deliver it to the leader, if known. Else it will
+%% drop the messages.
+%%
+%% NB: only use this for non-critical enqueues where a full rabbit_fifo_client state
+%% cannot be maintained.
+%%
+%% @param CusterId the cluster id.
+%% @param Servers the known servers in the cluster.
+%% @param Msg the message to enqueue.
+%%
+%% @returns `ok'
+-spec untracked_enqueue([ra:server_id()], term()) ->
+ ok.
+untracked_enqueue([Node | _], Msg) ->
+ Cmd = rabbit_fifo:make_enqueue(undefined, undefined, Msg),
+ ok = ra:pipeline_command(Node, Cmd),
+ ok.
+
+%% Internal
+
+try_process_command([Server | Rem], Cmd, State) ->
+ case ra:process_command(Server, Cmd, 30000) of
+ {ok, _, Leader} ->
+ {ok, State#state{leader = Leader}};
+ Err when length(Rem) =:= 0 ->
+ Err;
+ _ ->
+ try_process_command(Rem, Cmd, State)
+ end.
+
+seq_applied({Seq, MaybeAction},
+ {Corrs, Actions0, #state{last_applied = Last} = State0})
+ when Seq > Last ->
+ State1 = do_resends(Last+1, Seq-1, State0),
+ {Actions, State} = maybe_add_action(MaybeAction, Actions0, State1),
+ case maps:take(Seq, State#state.pending) of
+ {{undefined, _}, Pending} ->
+ {Corrs, Actions, State#state{pending = Pending,
+ last_applied = Seq}};
+ {{Corr, _}, Pending} ->
+ {[Corr | Corrs], Actions, State#state{pending = Pending,
+ last_applied = Seq}};
+ error ->
+ % must have already been resent or removed for some other reason
+ % still need to update last_applied or we may inadvertently resend
+ % stuff later
+ {Corrs, Actions, State#state{last_applied = Seq}}
+ end;
+seq_applied(_Seq, Acc) ->
+ Acc.
+
+maybe_add_action(ok, Acc, State) ->
+ {Acc, State};
+maybe_add_action({multi, Actions}, Acc0, State0) ->
+ lists:foldl(fun (Act, {Acc, State}) ->
+ maybe_add_action(Act, Acc, State)
+ end, {Acc0, State0}, Actions);
+maybe_add_action({send_drained, {Tag, Credit}} = Action, Acc,
+ #state{consumer_deliveries = CDels} = State) ->
+ %% add credit to consumer delivery_count
+ C = maps:get(Tag, CDels),
+ {[Action | Acc],
+ State#state{consumer_deliveries =
+ update_consumer(Tag, C#consumer.last_msg_id,
+ Credit, C, CDels)}};
+maybe_add_action(Action, Acc, State) ->
+ %% anything else is assumed to be an action
+ {[Action | Acc], State}.
+
+do_resends(From, To, State) when From =< To ->
+ % ?INFO("rabbit_fifo_client: doing resends From ~w To ~w~n", [From, To]),
+ lists:foldl(fun resend/2, State, lists:seq(From, To));
+do_resends(_, _, State) ->
+ State.
+
+% resends a command with a new sequence number
+resend(OldSeq, #state{pending = Pending0, leader = Leader} = State) ->
+ case maps:take(OldSeq, Pending0) of
+ {{Corr, Cmd}, Pending} ->
+ %% resends aren't subject to flow control here
+ resend_command(Leader, Corr, Cmd, State#state{pending = Pending});
+ error ->
+ State
+ end.
+
+resend_all_pending(#state{pending = Pend} = State) ->
+ Seqs = lists:sort(maps:keys(Pend)),
+ lists:foldl(fun resend/2, State, Seqs).
+
+maybe_auto_ack(true, Deliver, State0) ->
+ %% manual ack is enabled
+ {ok, State0, [Deliver]};
+maybe_auto_ack(false, {deliver, Tag, _Ack, Msgs} = Deliver, State0) ->
+ %% we have to auto ack these deliveries
+ MsgIds = [I || {_, _, I, _, _} <- Msgs],
+ {State, Actions} = settle(Tag, MsgIds, State0),
+ {ok, State, [Deliver] ++ Actions}.
+
+
+handle_delivery(Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs},
+ #state{cfg = #cfg{cluster_name = QName},
+ consumer_deliveries = CDels0} = State0) ->
+ QRef = qref(Leader),
+ {LastId, _} = lists:last(IdMsgs),
+ Consumer = #consumer{ack = Ack} = maps:get(Tag, CDels0),
+ %% format as a deliver action
+ Del = {deliver, Tag, Ack, transform_msgs(QName, QRef, IdMsgs)},
+ %% TODO: remove potential default allocation
+ case Consumer of
+ #consumer{last_msg_id = Prev} = C
+ when FstId =:= Prev+1 ->
+ maybe_auto_ack(Ack, Del,
+ State0#state{consumer_deliveries =
+ update_consumer(Tag, LastId,
+ length(IdMsgs), C,
+ CDels0)});
+ #consumer{last_msg_id = Prev} = C
+ when FstId > Prev+1 ->
+ NumMissing = FstId - Prev + 1,
+ %% there may actually be fewer missing messages returned than expected
+ %% This can happen when a node the channel is on gets disconnected
+ %% from the node the leader is on and then reconnected afterwards.
+ %% When the node is disconnected the leader will return all checked
+ %% out messages to the main queue to ensure they don't get stuck in
+ %% case the node never comes back.
+ case get_missing_deliveries(Leader, Prev+1, FstId-1, Tag) of
+ {protocol_error, _, _, _} = Err ->
+ Err;
+ Missing ->
+ XDel = {deliver, Tag, Ack, transform_msgs(QName, QRef,
+ Missing ++ IdMsgs)},
+ maybe_auto_ack(Ack, XDel,
+ State0#state{consumer_deliveries =
+ update_consumer(Tag, LastId,
+ length(IdMsgs) + NumMissing,
+ C, CDels0)})
+ end;
+ #consumer{last_msg_id = Prev}
+ when FstId =< Prev ->
+ case lists:dropwhile(fun({Id, _}) -> Id =< Prev end, IdMsgs) of
+ [] ->
+ {ok, State0, []};
+ IdMsgs2 ->
+ handle_delivery(Leader, {delivery, Tag, IdMsgs2}, State0)
+ end;
+ C when FstId =:= 0 ->
+ % the very first delivery
+ maybe_auto_ack(Ack, Del,
+ State0#state{consumer_deliveries =
+ update_consumer(Tag, LastId,
+ length(IdMsgs),
+ C#consumer{last_msg_id = LastId},
+ CDels0)})
+ end.
+
+transform_msgs(QName, QRef, Msgs) ->
+ lists:map(
+ fun({MsgId, {MsgHeader, Msg0}}) ->
+ {Msg, Redelivered} = case MsgHeader of
+ #{delivery_count := C} ->
+ {add_delivery_count_header(Msg0, C), true};
+ _ ->
+ {Msg0, false}
+ end,
+ {QName, QRef, MsgId, Redelivered, Msg}
+ end, Msgs).
+
+update_consumer(Tag, LastId, DelCntIncr,
+ #consumer{delivery_count = D} = C, Consumers) ->
+ maps:put(Tag,
+ C#consumer{last_msg_id = LastId,
+ delivery_count = D + DelCntIncr},
+ Consumers).
+
+
+get_missing_deliveries(Leader, From, To, ConsumerTag) ->
+ ConsumerId = consumer_id(ConsumerTag),
+ % ?INFO("get_missing_deliveries for ~w from ~b to ~b",
+ % [ConsumerId, From, To]),
+ Query = fun (State) ->
+ rabbit_fifo:get_checked_out(ConsumerId, From, To, State)
+ end,
+ case ra:local_query(Leader, Query) of
+ {ok, {_, Missing}, _} ->
+ Missing;
+ {error, Error} ->
+ {protocol_error, internal_error, "Cannot query missing deliveries from ~p: ~p",
+ [Leader, Error]};
+ {timeout, _} ->
+ {protocol_error, internal_error, "Cannot query missing deliveries from ~p: timeout",
+ [Leader]}
+ end.
+
+pick_server(#state{leader = undefined,
+ cfg = #cfg{servers = [N | _]}}) ->
+ %% TODO: pick random rather that first?
+ N;
+pick_server(#state{leader = Leader}) ->
+ Leader.
+
+% servers sorted by last known leader
+sorted_servers(#state{leader = undefined,
+ cfg = #cfg{servers = Servers}}) ->
+ Servers;
+sorted_servers(#state{leader = Leader,
+ cfg = #cfg{servers = Servers}}) ->
+ [Leader | lists:delete(Leader, Servers)].
+
+next_seq(#state{next_seq = Seq} = State) ->
+ {Seq, State#state{next_seq = Seq + 1}}.
+
+next_enqueue_seq(#state{next_enqueue_seq = Seq} = State) ->
+ {Seq, State#state{next_enqueue_seq = Seq + 1}}.
+
+consumer_id(ConsumerTag) ->
+ {ConsumerTag, self()}.
+
+send_command(Server, Correlation, Command, Priority,
+ #state{pending = Pending,
+ cfg = #cfg{soft_limit = SftLmt}} = State0) ->
+ {Seq, State} = next_seq(State0),
+ ok = ra:pipeline_command(Server, Command, Seq, Priority),
+ Tag = case maps:size(Pending) >= SftLmt of
+ true -> slow;
+ false -> ok
+ end,
+ {Tag, State#state{pending = Pending#{Seq => {Correlation, Command}},
+ slow = Tag == slow}}.
+
+resend_command(Node, Correlation, Command,
+ #state{pending = Pending} = State0) ->
+ {Seq, State} = next_seq(State0),
+ ok = ra:pipeline_command(Node, Command, Seq),
+ State#state{pending = Pending#{Seq => {Correlation, Command}}}.
+
+add_command(_, _, [], Acc) ->
+ Acc;
+add_command(Cid, settle, MsgIds, Acc) ->
+ [rabbit_fifo:make_settle(Cid, MsgIds) | Acc];
+add_command(Cid, return, MsgIds, Acc) ->
+ [rabbit_fifo:make_return(Cid, MsgIds) | Acc];
+add_command(Cid, discard, MsgIds, Acc) ->
+ [rabbit_fifo:make_discard(Cid, MsgIds) | Acc].
+
+set_timer(#state{leader = Leader0,
+ cfg = #cfg{servers = [Server | _],
+ cluster_name = QName}} = State) ->
+ Leader = case Leader0 of
+ undefined -> Server;
+ _ ->
+ Leader0
+ end,
+ Ref = erlang:send_after(?TIMER_TIME, self(),
+ {'$gen_cast',
+ {queue_event, QName, {Leader, timeout}}}),
+ State#state{timer_state = Ref}.
+
+cancel_timer(#state{timer_state = undefined} = State) ->
+ State;
+cancel_timer(#state{timer_state = Ref} = State) ->
+ erlang:cancel_timer(Ref, [{async, true}, {info, false}]),
+ State#state{timer_state = undefined}.
+
+find_leader([]) ->
+ undefined;
+find_leader([Server | Servers]) ->
+ case ra:members(Server, 500) of
+ {ok, _, Leader} -> Leader;
+ _ ->
+ find_leader(Servers)
+ end.
+
+qref({Ref, _}) -> Ref;
+qref(Ref) -> Ref.
diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl
new file mode 100644
index 0000000000..14ac89faff
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_index.erl
@@ -0,0 +1,119 @@
+-module(rabbit_fifo_index).
+
+-export([
+ empty/0,
+ exists/2,
+ append/2,
+ delete/2,
+ size/1,
+ smallest/1,
+ map/2
+ ]).
+
+-compile({no_auto_import, [size/1]}).
+
+%% the empty atom is a lot smaller (4 bytes) than e.g. `undefined` (13 bytes).
+%% This matters as the data map gets persisted as part of the snapshot
+-define(NIL, '').
+
+-record(?MODULE, {data = #{} :: #{integer() => ?NIL},
+ smallest :: undefined | non_neg_integer(),
+ largest :: undefined | non_neg_integer()
+ }).
+
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([state/0]).
+
+-spec empty() -> state().
+empty() ->
+ #?MODULE{}.
+
+-spec exists(integer(), state()) -> boolean().
+exists(Key, #?MODULE{data = Data}) ->
+ maps:is_key(Key, Data).
+
+% only integer keys are supported
+-spec append(integer(), state()) -> state().
+append(Key,
+ #?MODULE{data = Data,
+ smallest = Smallest,
+ largest = Largest} = State)
+ when Key > Largest orelse Largest =:= undefined ->
+ State#?MODULE{data = maps:put(Key, ?NIL, Data),
+ smallest = ra_lib:default(Smallest, Key),
+ largest = Key}.
+
+-spec delete(Index :: integer(), state()) -> state().
+delete(Smallest, #?MODULE{data = Data0,
+ largest = Largest,
+ smallest = Smallest} = State) ->
+ Data = maps:remove(Smallest, Data0),
+ case find_next(Smallest + 1, Largest, Data) of
+ undefined ->
+ State#?MODULE{data = Data,
+ smallest = undefined,
+ largest = undefined};
+ Next ->
+ State#?MODULE{data = Data, smallest = Next}
+ end;
+delete(Key, #?MODULE{data = Data} = State) ->
+ State#?MODULE{data = maps:remove(Key, Data)}.
+
+-spec size(state()) -> non_neg_integer().
+size(#?MODULE{data = Data}) ->
+ maps:size(Data).
+
+-spec smallest(state()) -> undefined | integer().
+smallest(#?MODULE{smallest = Smallest}) ->
+ Smallest.
+
+
+-spec map(fun(), state()) -> state().
+map(F, #?MODULE{data = Data} = State) ->
+ State#?MODULE{data = maps:map(F, Data)}.
+
+
+%% internal
+
+find_next(Next, Last, _Map) when Next > Last ->
+ undefined;
+find_next(Next, Last, Map) ->
+ case Map of
+ #{Next := _} ->
+ Next;
+ _ ->
+ % in degenerate cases the range here could be very large
+ % and hence this could be very slow
+ % the typical case should ideally be better
+ % assuming fifo-ish deletion of entries
+ find_next(Next+1, Last, Map)
+ end.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+append_test() ->
+ S0 = empty(),
+ false = exists(99, S0),
+ undefined = smallest(S0),
+ 0 = size(S0),
+ S1 = append(1, S0),
+ false = exists(99, S1),
+ true = exists(1, S1),
+ 1 = size(S1),
+ 1 = smallest(S1),
+ S2 = append(2, S1),
+ true = exists(2, S2),
+ 2 = size(S2),
+ 1 = smallest(S2),
+ S3 = delete(1, S2),
+ 2 = smallest(S3),
+ 1 = size(S3),
+ S5 = delete(2, S3),
+ undefined = smallest(S5),
+ 0 = size(S0),
+ ok.
+
+-endif.
diff --git a/deps/rabbit/src/rabbit_fifo_v0.erl b/deps/rabbit/src/rabbit_fifo_v0.erl
new file mode 100644
index 0000000000..a61f42616d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_v0.erl
@@ -0,0 +1,1961 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_fifo_v0).
+
+-behaviour(ra_machine).
+
+-compile(inline_list_funcs).
+-compile(inline).
+-compile({no_auto_import, [apply/3]}).
+
+-include("rabbit_fifo_v0.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([
+ init/1,
+ apply/3,
+ state_enter/2,
+ tick/2,
+ overview/1,
+ get_checked_out/4,
+ %% aux
+ init_aux/1,
+ handle_aux/6,
+ % queries
+ query_messages_ready/1,
+ query_messages_checked_out/1,
+ query_messages_total/1,
+ query_processes/1,
+ query_ra_indexes/1,
+ query_consumer_count/1,
+ query_consumers/1,
+ query_stat/1,
+ query_single_active_consumer/1,
+ query_in_memory_usage/1,
+ usage/1,
+
+ zero/1,
+
+ %% misc
+ dehydrate_state/1,
+ normalize/1,
+ normalize_for_v1/1,
+ %% getters for coversions
+ get_field/2,
+ get_cfg_field/2,
+
+ %% protocol helpers
+ make_enqueue/3,
+ make_checkout/3,
+ make_settle/2,
+ make_return/2,
+ make_discard/2,
+ make_credit/4,
+ make_purge/0,
+ make_purge_nodes/1,
+ make_update_config/1
+ ]).
+
+%% command records representing all the protocol actions that are supported
+-record(enqueue, {pid :: option(pid()),
+ seq :: option(msg_seqno()),
+ msg :: raw_msg()}).
+-record(checkout, {consumer_id :: consumer_id(),
+ spec :: checkout_spec(),
+ meta :: consumer_meta()}).
+-record(settle, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(return, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(discard, {consumer_id :: consumer_id(),
+ msg_ids :: [msg_id()]}).
+-record(credit, {consumer_id :: consumer_id(),
+ credit :: non_neg_integer(),
+ delivery_count :: non_neg_integer(),
+ drain :: boolean()}).
+-record(purge, {}).
+-record(purge_nodes, {nodes :: [node()]}).
+-record(update_config, {config :: config()}).
+
+-opaque protocol() ::
+ #enqueue{} |
+ #checkout{} |
+ #settle{} |
+ #return{} |
+ #discard{} |
+ #credit{} |
+ #purge{} |
+ #purge_nodes{} |
+ #update_config{}.
+
+-type command() :: protocol() | ra_machine:builtin_command().
+%% all the command types supported by ra fifo
+
+-type client_msg() :: delivery().
+%% the messages `rabbit_fifo' can send to consumers.
+
+-opaque state() :: #?STATE{}.
+
+-export_type([protocol/0,
+ delivery/0,
+ command/0,
+ credit_mode/0,
+ consumer_tag/0,
+ consumer_meta/0,
+ consumer_id/0,
+ client_msg/0,
+ msg/0,
+ msg_id/0,
+ msg_seqno/0,
+ delivery_msg/0,
+ state/0,
+ config/0]).
+
+-spec init(config()) -> state().
+init(#{name := Name,
+ queue_resource := Resource} = Conf) ->
+ update_config(Conf, #?STATE{cfg = #cfg{name = Name,
+ resource = Resource}}).
+
+update_config(Conf, State) ->
+ DLH = maps:get(dead_letter_handler, Conf, undefined),
+ BLH = maps:get(become_leader_handler, Conf, undefined),
+ SHI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY),
+ MaxLength = maps:get(max_length, Conf, undefined),
+ MaxBytes = maps:get(max_bytes, Conf, undefined),
+ MaxMemoryLength = maps:get(max_in_memory_length, Conf, undefined),
+ MaxMemoryBytes = maps:get(max_in_memory_bytes, Conf, undefined),
+ DeliveryLimit = maps:get(delivery_limit, Conf, undefined),
+ ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of
+ true ->
+ single_active;
+ false ->
+ competing
+ end,
+ Cfg = State#?STATE.cfg,
+ SHICur = case State#?STATE.cfg of
+ #cfg{release_cursor_interval = {_, C}} ->
+ C;
+ #cfg{release_cursor_interval = undefined} ->
+ SHI;
+ #cfg{release_cursor_interval = C} ->
+ C
+ end,
+
+ State#?STATE{cfg = Cfg#cfg{release_cursor_interval = {SHI, SHICur},
+ dead_letter_handler = DLH,
+ become_leader_handler = BLH,
+ max_length = MaxLength,
+ max_bytes = MaxBytes,
+ max_in_memory_length = MaxMemoryLength,
+ max_in_memory_bytes = MaxMemoryBytes,
+ consumer_strategy = ConsumerStrategy,
+ delivery_limit = DeliveryLimit}}.
+
+zero(_) ->
+ 0.
+
+% msg_ids are scoped per consumer
+% ra_indexes holds all raft indexes for enqueues currently on queue
+-spec apply(ra_machine:command_meta_data(), command(), state()) ->
+ {state(), Reply :: term(), ra_machine:effects()} |
+ {state(), Reply :: term()}.
+apply(Metadata, #enqueue{pid = From, seq = Seq,
+ msg = RawMsg}, State00) ->
+ apply_enqueue(Metadata, From, Seq, RawMsg, State00);
+apply(Meta,
+ #settle{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ % need to increment metrics before completing as any snapshot
+ % states taken need to include them
+ complete_and_checkout(Meta, MsgIds, ConsumerId,
+ Con0, [], State);
+ _ ->
+ {State, ok}
+
+ end;
+apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := Con0} ->
+ Discarded = maps:with(MsgIds, Con0#consumer.checked_out),
+ Effects = dead_letter_effects(rejected, Discarded, State0, []),
+ complete_and_checkout(Meta, MsgIds, ConsumerId, Con0,
+ Effects, State0);
+ _ ->
+ {State0, ok}
+ end;
+apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0} = State) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{checked_out = Checked0}} ->
+ Returned = maps:with(MsgIds, Checked0),
+ return(Meta, ConsumerId, Returned, [], State);
+ _ ->
+ {State, ok}
+ end;
+apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt,
+ drain = Drain, consumer_id = ConsumerId},
+ #?STATE{consumers = Cons0,
+ service_queue = ServiceQueue0,
+ waiting_consumers = Waiting0} = State0) ->
+ case Cons0 of
+ #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} ->
+ %% this can go below 0 when credit is reduced
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ %% grant the credit
+ Con1 = Con0#consumer{credit = C},
+ ServiceQueue = maybe_queue_consumer(ConsumerId, Con1,
+ ServiceQueue0),
+ Cons = maps:put(ConsumerId, Con1, Cons0),
+ {State1, ok, Effects} =
+ checkout(Meta, State0#?STATE{service_queue = ServiceQueue,
+ consumers = Cons}, []),
+ Response = {send_credit_reply, messages_ready(State1)},
+ %% by this point all checkouts for the updated credit value
+ %% should be processed so we can evaluate the drain
+ case Drain of
+ false ->
+ %% just return the result of the checkout
+ {State1, Response, Effects};
+ true ->
+ Con = #consumer{credit = PostCred} =
+ maps:get(ConsumerId, State1#?STATE.consumers),
+ %% add the outstanding credit to the delivery count
+ DeliveryCount = Con#consumer.delivery_count + PostCred,
+ Consumers = maps:put(ConsumerId,
+ Con#consumer{delivery_count = DeliveryCount,
+ credit = 0},
+ State1#?STATE.consumers),
+ Drained = Con#consumer.credit,
+ {CTag, _} = ConsumerId,
+ {State1#?STATE{consumers = Consumers},
+ %% returning a multi response with two client actions
+ %% for the channel to execute
+ {multi, [Response, {send_drained, {CTag, Drained}}]},
+ Effects}
+ end;
+ _ when Waiting0 /= [] ->
+ %% there are waiting consuemrs
+ case lists:keytake(ConsumerId, 1, Waiting0) of
+ {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} ->
+ %% the consumer is a waiting one
+ %% grant the credit
+ C = max(0, RemoteDelCnt + NewCredit - DelCnt),
+ Con = Con0#consumer{credit = C},
+ State = State0#?STATE{waiting_consumers =
+ [{ConsumerId, Con} | Waiting]},
+ {State, {send_credit_reply, messages_ready(State)}};
+ false ->
+ {State0, ok}
+ end;
+ _ ->
+ %% credit for unknown consumer - just ignore
+ {State0, ok}
+ end;
+apply(_, #checkout{spec = {dequeue, _}},
+ #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) ->
+ {State0, {error, unsupported}};
+apply(#{from := From} = Meta, #checkout{spec = {dequeue, Settlement},
+ meta = ConsumerMeta,
+ consumer_id = ConsumerId},
+ #?STATE{consumers = Consumers} = State0) ->
+ Exists = maps:is_key(ConsumerId, Consumers),
+ case messages_ready(State0) of
+ 0 ->
+ {State0, {dequeue, empty}};
+ _ when Exists ->
+ %% a dequeue using the same consumer_id isn't possible at this point
+ {State0, {dequeue, empty}};
+ Ready ->
+ State1 = update_consumer(ConsumerId, ConsumerMeta,
+ {once, 1, simple_prefetch},
+ State0),
+ {success, _, MsgId, Msg, State2} = checkout_one(State1),
+ {State, Effects} = case Settlement of
+ unsettled ->
+ {_, Pid} = ConsumerId,
+ {State2, [{monitor, process, Pid}]};
+ settled ->
+ %% immediately settle the checkout
+ {State3, _, Effects0} =
+ apply(Meta, make_settle(ConsumerId, [MsgId]),
+ State2),
+ {State3, Effects0}
+ end,
+ case Msg of
+ {RaftIdx, {Header, 'empty'}} ->
+ %% TODO add here new log effect with reply
+ {State, '$ra_no_reply',
+ reply_log_effect(RaftIdx, MsgId, Header, Ready - 1, From)};
+ _ ->
+ {State, {dequeue, {MsgId, Msg}, Ready-1}, Effects}
+ end
+ end;
+apply(Meta, #checkout{spec = cancel, consumer_id = ConsumerId}, State0) ->
+ {State, Effects} = cancel_consumer(ConsumerId, State0, [], consumer_cancel),
+ checkout(Meta, State, Effects);
+apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta,
+ consumer_id = {_, Pid} = ConsumerId},
+ State0) ->
+ State1 = update_consumer(ConsumerId, ConsumerMeta, Spec, State0),
+ checkout(Meta, State1, [{monitor, process, Pid}]);
+apply(#{index := RaftIdx}, #purge{},
+ #?STATE{ra_indexes = Indexes0,
+ returns = Returns,
+ messages = Messages} = State0) ->
+ Total = messages_ready(State0),
+ Indexes1 = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ [I || {I, _} <- lists:sort(maps:values(Messages))]),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes1,
+ [I || {_, {I, _}} <- lqueue:to_list(Returns)]),
+ {State, _, Effects} =
+ update_smallest_raft_index(RaftIdx,
+ State0#?STATE{ra_indexes = Indexes,
+ messages = #{},
+ returns = lqueue:new(),
+ msg_bytes_enqueue = 0,
+ prefix_msgs = {0, [], 0, []},
+ low_msg_num = undefined,
+ msg_bytes_in_memory = 0,
+ msgs_ready_in_memory = 0},
+ []),
+ %% as we're not checking out after a purge (no point) we have to
+ %% reverse the effects ourselves
+ {State, {purge, Total},
+ lists:reverse([garbage_collection | Effects])};
+apply(Meta, {down, Pid, noconnection},
+ #?STATE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0,
+ enqueuers = Enqs0} = State0) ->
+ Node = node(Pid),
+ %% if the pid refers to an active or cancelled consumer,
+ %% mark it as suspected and return it to the waiting queue
+ {State1, Effects0} =
+ maps:fold(fun({_, P} = Cid, C0, {S0, E0})
+ when node(P) =:= Node ->
+ %% the consumer should be returned to waiting
+ %% and checked out messages should be returned
+ Effs = consumer_update_active_effects(
+ S0, Cid, C0, false, suspected_down, E0),
+ Checked = C0#consumer.checked_out,
+ Credit = increase_credit(C0, maps:size(Checked)),
+ {St, Effs1} = return_all(S0, Effs,
+ Cid, C0#consumer{credit = Credit}),
+ %% if the consumer was cancelled there is a chance it got
+ %% removed when returning hence we need to be defensive here
+ Waiting = case St#?STATE.consumers of
+ #{Cid := C} ->
+ Waiting0 ++ [{Cid, C}];
+ _ ->
+ Waiting0
+ end,
+ {St#?STATE{consumers = maps:remove(Cid, St#?STATE.consumers),
+ waiting_consumers = Waiting},
+ Effs1};
+ (_, _, S) ->
+ S
+ end, {State0, []}, Cons0),
+ WaitingConsumers = update_waiting_consumer_status(Node, State1,
+ suspected_down),
+
+ %% select a new consumer from the waiting queue and run a checkout
+ State2 = State1#?STATE{waiting_consumers = WaitingConsumers},
+ {State, Effects1} = activate_next_consumer(State2, Effects0),
+
+ %% mark any enquers as suspected
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+ Effects = [{monitor, node, Node} | Effects1],
+ checkout(Meta, State#?STATE{enqueuers = Enqs}, Effects);
+apply(Meta, {down, Pid, noconnection},
+ #?STATE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ %% A node has been disconnected. This doesn't necessarily mean that
+ %% any processes on this node are down, they _may_ come back so here
+ %% we just mark them as suspected (effectively deactivated)
+ %% and return all checked out messages to the main queue for delivery to any
+ %% live consumers
+ %%
+ %% all pids for the disconnected node will be marked as suspected not just
+ %% the one we got the `down' command for
+ Node = node(Pid),
+
+ {State, Effects1} =
+ maps:fold(
+ fun({_, P} = Cid, #consumer{checked_out = Checked0,
+ status = up} = C0,
+ {St0, Eff}) when node(P) =:= Node ->
+ Credit = increase_credit(C0, map_size(Checked0)),
+ C = C0#consumer{status = suspected_down,
+ credit = Credit},
+ {St, Eff0} = return_all(St0, Eff, Cid, C),
+ Eff1 = consumer_update_active_effects(St, Cid, C, false,
+ suspected_down, Eff0),
+ {St, Eff1};
+ (_, _, {St, Eff}) ->
+ {St, Eff}
+ end, {State0, []}, Cons0),
+ Enqs = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = suspected_down};
+ (_, E) -> E
+ end, Enqs0),
+
+ % Monitor the node so that we can "unsuspect" these processes when the node
+ % comes back, then re-issue all monitors and discover the final fate of
+ % these processes
+ Effects = case maps:size(State#?STATE.consumers) of
+ 0 ->
+ [{aux, inactive}, {monitor, node, Node}];
+ _ ->
+ [{monitor, node, Node}]
+ end ++ Effects1,
+ checkout(Meta, State#?STATE{enqueuers = Enqs}, Effects);
+apply(Meta, {down, Pid, _Info}, State0) ->
+ {State, Effects} = handle_down(Pid, State0),
+ checkout(Meta, State, Effects);
+apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ service_queue = SQ0} = State0) ->
+ %% A node we are monitoring has come back.
+ %% If we have suspected any processes of being
+ %% down we should now re-issue the monitors for them to detect if they're
+ %% actually down or not
+ Monitors = [{monitor, process, P}
+ || P <- suspected_pids_for(Node, State0)],
+
+ Enqs1 = maps:map(fun(P, E) when node(P) =:= Node ->
+ E#enqueuer{status = up};
+ (_, E) -> E
+ end, Enqs0),
+ ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0),
+ %% mark all consumers as up
+ {Cons1, SQ, Effects1} =
+ maps:fold(fun({_, P} = ConsumerId, C, {CAcc, SQAcc, EAcc})
+ when (node(P) =:= Node) and
+ (C#consumer.status =/= cancelled) ->
+ EAcc1 = ConsumerUpdateActiveFun(State0, ConsumerId,
+ C, true, up, EAcc),
+ update_or_remove_sub(ConsumerId,
+ C#consumer{status = up}, CAcc,
+ SQAcc, EAcc1);
+ (_, _, Acc) ->
+ Acc
+ end, {Cons0, SQ0, Monitors}, Cons0),
+ Waiting = update_waiting_consumer_status(Node, State0, up),
+ State1 = State0#?STATE{consumers = Cons1,
+ enqueuers = Enqs1,
+ service_queue = SQ,
+ waiting_consumers = Waiting},
+ {State, Effects} = activate_next_consumer(State1, Effects1),
+ checkout(Meta, State, Effects);
+apply(_, {nodedown, _Node}, State) ->
+ {State, ok};
+apply(_, #purge_nodes{nodes = Nodes}, State0) ->
+ {State, Effects} = lists:foldl(fun(Node, {S, E}) ->
+ purge_node(Node, S, E)
+ end, {State0, []}, Nodes),
+ {State, ok, Effects};
+apply(Meta, #update_config{config = Conf}, State) ->
+ checkout(Meta, update_config(Conf, State), []).
+
+purge_node(Node, State, Effects) ->
+ lists:foldl(fun(Pid, {S0, E0}) ->
+ {S, E} = handle_down(Pid, S0),
+ {S, E0 ++ E}
+ end, {State, Effects}, all_pids_for(Node, State)).
+
+%% any downs that re not noconnection
+handle_down(Pid, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0} = State0) ->
+ % Remove any enqueuer for the same pid and enqueue any pending messages
+ % This should be ok as we won't see any more enqueues from this pid
+ State1 = case maps:take(Pid, Enqs0) of
+ {#enqueuer{pending = Pend}, Enqs} ->
+ lists:foldl(fun ({_, RIdx, RawMsg}, S) ->
+ enqueue(RIdx, RawMsg, S)
+ end, State0#?STATE{enqueuers = Enqs}, Pend);
+ error ->
+ State0
+ end,
+ {Effects1, State2} = handle_waiting_consumer_down(Pid, State1),
+ % return checked out messages to main queue
+ % Find the consumers for the down pid
+ DownConsumers = maps:keys(
+ maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)),
+ lists:foldl(fun(ConsumerId, {S, E}) ->
+ cancel_consumer(ConsumerId, S, E, down)
+ end, {State2, Effects1}, DownConsumers).
+
+consumer_active_flag_update_function(#?STATE{cfg = #cfg{consumer_strategy = competing}}) ->
+ fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) ->
+ consumer_update_active_effects(State, ConsumerId, Consumer, Active,
+ ActivityStatus, Effects)
+ end;
+consumer_active_flag_update_function(#?STATE{cfg = #cfg{consumer_strategy = single_active}}) ->
+ fun(_, _, _, _, _, Effects) ->
+ Effects
+ end.
+
+handle_waiting_consumer_down(_Pid,
+ #?STATE{cfg = #cfg{consumer_strategy = competing}} = State) ->
+ {[], State};
+handle_waiting_consumer_down(_Pid,
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State) ->
+ {[], State};
+handle_waiting_consumer_down(Pid,
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ % get cancel effects for down waiting consumers
+ Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end,
+ WaitingConsumers0),
+ Effects = lists:foldl(fun ({ConsumerId, _}, Effects) ->
+ cancel_consumer_effects(ConsumerId, State0,
+ Effects)
+ end, [], Down),
+ % update state to have only up waiting consumers
+ StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end,
+ WaitingConsumers0),
+ State = State0#?STATE{waiting_consumers = StillUp},
+ {Effects, State}.
+
+update_waiting_consumer_status(Node,
+ #?STATE{waiting_consumers = WaitingConsumers},
+ Status) ->
+ [begin
+ case node(Pid) of
+ Node ->
+ {ConsumerId, Consumer#consumer{status = Status}};
+ _ ->
+ {ConsumerId, Consumer}
+ end
+ end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers,
+ Consumer#consumer.status =/= cancelled].
+
+-spec state_enter(ra_server:ra_state(), state()) -> ra_machine:effects().
+state_enter(leader, #?STATE{consumers = Cons,
+ enqueuers = Enqs,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{name = Name,
+ resource = Resource,
+ become_leader_handler = BLH},
+ prefix_msgs = {0, [], 0, []}
+ }) ->
+ % return effects to monitor all current consumers and enqueuers
+ Pids = lists:usort(maps:keys(Enqs)
+ ++ [P || {_, P} <- maps:keys(Cons)]
+ ++ [P || {{_, P}, _} <- WaitingConsumers]),
+ Mons = [{monitor, process, P} || P <- Pids],
+ Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids],
+ NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]),
+ FHReservation = [{mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}],
+ Effects = Mons ++ Nots ++ NodeMons ++ FHReservation,
+ case BLH of
+ undefined ->
+ Effects;
+ {Mod, Fun, Args} ->
+ [{mod_call, Mod, Fun, Args ++ [Name]} | Effects]
+ end;
+state_enter(eol, #?STATE{enqueuers = Enqs,
+ consumers = Custs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0),
+ WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end,
+ #{}, WaitingConsumers0),
+ AllConsumers = maps:merge(Custs, WaitingConsumers1),
+ [{send_msg, P, eol, ra_event}
+ || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++
+ [{mod_call, rabbit_quorum_queue, file_handle_release_reservation, []}];
+state_enter(State, #?STATE{cfg = #cfg{resource = _Resource}}) when State =/= leader ->
+ FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []},
+ [FHReservation];
+ state_enter(_, _) ->
+ %% catch all as not handling all states
+ [].
+
+
+-spec tick(non_neg_integer(), state()) -> ra_machine:effects().
+tick(_Ts, #?STATE{cfg = #cfg{name = Name,
+ resource = QName},
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes} = State) ->
+ Metrics = {Name,
+ messages_ready(State),
+ num_checked_out(State), % checked out
+ messages_total(State),
+ query_consumer_count(State), % Consumers
+ EnqueueBytes,
+ CheckoutBytes},
+ [{mod_call, rabbit_quorum_queue,
+ handle_tick, [QName, Metrics, all_nodes(State)]}].
+
+-spec overview(state()) -> map().
+overview(#?STATE{consumers = Cons,
+ enqueuers = Enqs,
+ release_cursors = Cursors,
+ enqueue_count = EnqCount,
+ msg_bytes_enqueue = EnqueueBytes,
+ msg_bytes_checkout = CheckoutBytes,
+ cfg = Cfg} = State) ->
+ Conf = #{name => Cfg#cfg.name,
+ resource => Cfg#cfg.resource,
+ release_cursor_interval => Cfg#cfg.release_cursor_interval,
+ dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler,
+ max_length => Cfg#cfg.max_length,
+ max_bytes => Cfg#cfg.max_bytes,
+ consumer_strategy => Cfg#cfg.consumer_strategy,
+ max_in_memory_length => Cfg#cfg.max_in_memory_length,
+ max_in_memory_bytes => Cfg#cfg.max_in_memory_bytes},
+ #{type => ?MODULE,
+ config => Conf,
+ num_consumers => maps:size(Cons),
+ num_checked_out => num_checked_out(State),
+ num_enqueuers => maps:size(Enqs),
+ num_ready_messages => messages_ready(State),
+ num_messages => messages_total(State),
+ num_release_cursors => lqueue:len(Cursors),
+ release_crusor_enqueue_counter => EnqCount,
+ enqueue_message_bytes => EnqueueBytes,
+ checkout_message_bytes => CheckoutBytes}.
+
+-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) ->
+ [delivery_msg()].
+get_checked_out(Cid, From, To, #?STATE{consumers = Consumers}) ->
+ case Consumers of
+ #{Cid := #consumer{checked_out = Checked}} ->
+ [{K, snd(snd(maps:get(K, Checked)))}
+ || K <- lists:seq(From, To),
+ maps:is_key(K, Checked)];
+ _ ->
+ []
+ end.
+
+-record(aux_gc, {last_raft_idx = 0 :: ra:index()}).
+-record(aux, {name :: atom(),
+ utilisation :: term(),
+ gc = #aux_gc{} :: #aux_gc{}}).
+
+init_aux(Name) when is_atom(Name) ->
+ %% TODO: catch specific exception throw if table already exists
+ ok = ra_machine_ets:create_table(rabbit_fifo_usage,
+ [named_table, set, public,
+ {write_concurrency, true}]),
+ Now = erlang:monotonic_time(micro_seconds),
+ #aux{name = Name,
+ utilisation = {inactive, Now, 1, 1.0}}.
+
+handle_aux(_RaState, cast, Cmd, #aux{name = Name,
+ utilisation = Use0} = State0,
+ Log, MacState) ->
+ State = case Cmd of
+ _ when Cmd == active orelse Cmd == inactive ->
+ State0#aux{utilisation = update_use(Use0, Cmd)};
+ tick ->
+ true = ets:insert(rabbit_fifo_usage,
+ {Name, utilisation(Use0)}),
+ eval_gc(Log, MacState, State0);
+ eval ->
+ State0
+ end,
+ {no_reply, State, Log}.
+
+eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState,
+ #aux{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) ->
+ {Idx, _} = ra_log:last_index_term(Log),
+ {memory, Mem} = erlang:process_info(self(), memory),
+ case messages_total(MacState) of
+ 0 when Idx > LastGcIdx andalso
+ Mem > ?GC_MEM_LIMIT_B ->
+ garbage_collect(),
+ {memory, MemAfter} = erlang:process_info(self(), memory),
+ rabbit_log:debug("~s: full GC sweep complete. "
+ "Process memory changed from ~.2fMB to ~.2fMB.",
+ [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]),
+ AuxState#aux{gc = Gc#aux_gc{last_raft_idx = Idx}};
+ _ ->
+ AuxState
+ end.
+
+%%% Queries
+
+query_messages_ready(State) ->
+ messages_ready(State).
+
+query_messages_checked_out(#?STATE{consumers = Consumers}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, S) ->
+ maps:size(C) + S
+ end, 0, Consumers).
+
+query_messages_total(State) ->
+ messages_total(State).
+
+query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) ->
+ Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0),
+ maps:keys(maps:merge(Enqs, Cons)).
+
+
+query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) ->
+ RaIndexes.
+
+query_consumer_count(#?STATE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers}) ->
+ maps:size(Consumers) + length(WaitingConsumers).
+
+query_consumers(#?STATE{consumers = Consumers,
+ waiting_consumers = WaitingConsumers,
+ cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) ->
+ ActiveActivityStatusFun =
+ case ConsumerStrategy of
+ competing ->
+ fun(_ConsumerId,
+ #consumer{status = Status}) ->
+ case Status of
+ suspected_down ->
+ {false, Status};
+ _ ->
+ {true, Status}
+ end
+ end;
+ single_active ->
+ SingleActiveConsumer = query_single_active_consumer(State),
+ fun({Tag, Pid} = _Consumer, _) ->
+ case SingleActiveConsumer of
+ {value, {Tag, Pid}} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end
+ end
+ end,
+ FromConsumers =
+ maps:fold(fun (_, #consumer{status = cancelled}, Acc) ->
+ Acc;
+ ({Tag, Pid}, #consumer{meta = Meta} = Consumer, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, Consumers),
+ FromWaitingConsumers =
+ lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) ->
+ Acc;
+ ({{Tag, Pid}, #consumer{meta = Meta} = Consumer}, Acc) ->
+ {Active, ActivityStatus} =
+ ActiveActivityStatusFun({Tag, Pid}, Consumer),
+ maps:put({Tag, Pid},
+ {Pid, Tag,
+ maps:get(ack, Meta, undefined),
+ maps:get(prefetch, Meta, undefined),
+ Active,
+ ActivityStatus,
+ maps:get(args, Meta, []),
+ maps:get(username, Meta, undefined)},
+ Acc)
+ end, #{}, WaitingConsumers),
+ maps:merge(FromConsumers, FromWaitingConsumers).
+
+query_single_active_consumer(#?STATE{cfg = #cfg{consumer_strategy = single_active},
+ consumers = Consumers}) ->
+ case maps:size(Consumers) of
+ 0 ->
+ {error, no_value};
+ 1 ->
+ {value, lists:nth(1, maps:keys(Consumers))};
+ _
+ ->
+ {error, illegal_size}
+ end ;
+query_single_active_consumer(_) ->
+ disabled.
+
+query_stat(#?STATE{consumers = Consumers} = State) ->
+ {messages_ready(State), maps:size(Consumers)}.
+
+query_in_memory_usage(#?STATE{msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length}) ->
+ {Length, Bytes}.
+
+-spec usage(atom()) -> float().
+usage(Name) when is_atom(Name) ->
+ case ets:lookup(rabbit_fifo_usage, Name) of
+ [] -> 0.0;
+ [{_, Use}] -> Use
+ end.
+
+%%% Internal
+
+messages_ready(#?STATE{messages = M,
+ prefix_msgs = {RCnt, _R, PCnt, _P},
+ returns = R}) ->
+
+ %% prefix messages will rarely have anything in them during normal
+ %% operations so length/1 is fine here
+ maps:size(M) + lqueue:len(R) + RCnt + PCnt.
+
+messages_total(#?STATE{ra_indexes = I,
+ prefix_msgs = {RCnt, _R, PCnt, _P}}) ->
+ rabbit_fifo_index:size(I) + RCnt + PCnt.
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+utilisation({active, Since, Avg}) ->
+ use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg);
+utilisation({inactive, Since, Active, Avg}) ->
+ use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg).
+
+use_avg(0, 0, Avg) ->
+ Avg;
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
+
+moving_average(_Time, _, Next, undefined) ->
+ Next;
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+num_checked_out(#?STATE{consumers = Cons}) ->
+ maps:fold(fun (_, #consumer{checked_out = C}, Acc) ->
+ maps:size(C) + Acc
+ end, 0, Cons).
+
+cancel_consumer(ConsumerId,
+ #?STATE{cfg = #cfg{consumer_strategy = competing}} = State,
+ Effects, Reason) ->
+ cancel_consumer0(ConsumerId, State, Effects, Reason);
+cancel_consumer(ConsumerId,
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = []} = State,
+ Effects, Reason) ->
+ %% single active consumer on, no consumers are waiting
+ cancel_consumer0(ConsumerId, State, Effects, Reason);
+cancel_consumer(ConsumerId,
+ #?STATE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = Waiting0} = State0,
+ Effects0, Reason) ->
+ %% single active consumer on, consumers are waiting
+ case maps:is_key(ConsumerId, Cons0) of
+ true ->
+ % The active consumer is to be removed
+ {State1, Effects1} = cancel_consumer0(ConsumerId, State0,
+ Effects0, Reason),
+ activate_next_consumer(State1, Effects1);
+ false ->
+ % The cancelled consumer is not active or cancelled
+ % Just remove it from idle_consumers
+ Waiting = lists:keydelete(ConsumerId, 1, Waiting0),
+ Effects = cancel_consumer_effects(ConsumerId, State0, Effects0),
+ % A waiting consumer isn't supposed to have any checked out messages,
+ % so nothing special to do here
+ {State0#?STATE{waiting_consumers = Waiting}, Effects}
+ end.
+
+consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}},
+ ConsumerId, #consumer{meta = Meta},
+ Active, ActivityStatus,
+ Effects) ->
+ Ack = maps:get(ack, Meta, undefined),
+ Prefetch = maps:get(prefetch, Meta, undefined),
+ Args = maps:get(args, Meta, []),
+ [{mod_call, rabbit_quorum_queue, update_consumer_handler,
+ [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]}
+ | Effects].
+
+cancel_consumer0(ConsumerId, #?STATE{consumers = C0} = S0, Effects0, Reason) ->
+ case C0 of
+ #{ConsumerId := Consumer} ->
+ {S, Effects2} = maybe_return_all(ConsumerId, Consumer, S0,
+ Effects0, Reason),
+ %% The effects are emitted before the consumer is actually removed
+ %% if the consumer has unacked messages. This is a bit weird but
+ %% in line with what classic queues do (from an external point of
+ %% view)
+ Effects = cancel_consumer_effects(ConsumerId, S, Effects2),
+ case maps:size(S#?STATE.consumers) of
+ 0 ->
+ {S, [{aux, inactive} | Effects]};
+ _ ->
+ {S, Effects}
+ end;
+ _ ->
+ %% already removed: do nothing
+ {S0, Effects0}
+ end.
+
+activate_next_consumer(#?STATE{consumers = Cons,
+ waiting_consumers = Waiting0} = State0,
+ Effects0) ->
+ case maps:filter(fun (_, #consumer{status = S}) -> S == up end, Cons) of
+ Up when map_size(Up) == 0 ->
+ %% there are no active consumer in the consumer map
+ case lists:filter(fun ({_, #consumer{status = Status}}) ->
+ Status == up
+ end, Waiting0) of
+ [{NextConsumerId, NextConsumer} | _] ->
+ %% there is a potential next active consumer
+ Remaining = lists:keydelete(NextConsumerId, 1, Waiting0),
+ #?STATE{service_queue = ServiceQueue} = State0,
+ ServiceQueue1 = maybe_queue_consumer(NextConsumerId,
+ NextConsumer,
+ ServiceQueue),
+ State = State0#?STATE{consumers = Cons#{NextConsumerId => NextConsumer},
+ service_queue = ServiceQueue1,
+ waiting_consumers = Remaining},
+ Effects = consumer_update_active_effects(State, NextConsumerId,
+ NextConsumer, true,
+ single_active, Effects0),
+ {State, Effects};
+ [] ->
+ {State0, [{aux, inactive} | Effects0]}
+ end;
+ _ ->
+ {State0, Effects0}
+ end.
+
+
+
+maybe_return_all(ConsumerId, Consumer,
+ #?STATE{consumers = C0,
+ service_queue = SQ0} = S0,
+ Effects0, Reason) ->
+ case Reason of
+ consumer_cancel ->
+ {Cons, SQ, Effects1} =
+ update_or_remove_sub(ConsumerId,
+ Consumer#consumer{lifetime = once,
+ credit = 0,
+ status = cancelled},
+ C0, SQ0, Effects0),
+ {S0#?STATE{consumers = Cons,
+ service_queue = SQ}, Effects1};
+ down ->
+ {S1, Effects1} = return_all(S0, Effects0, ConsumerId, Consumer),
+ {S1#?STATE{consumers = maps:remove(ConsumerId, S1#?STATE.consumers)},
+ Effects1}
+ end.
+
+apply_enqueue(#{index := RaftIdx} = Meta, From, Seq, RawMsg, State0) ->
+ case maybe_enqueue(RaftIdx, From, Seq, RawMsg, [], State0) of
+ {ok, State1, Effects1} ->
+ State2 = append_to_master_index(RaftIdx, State1),
+ {State, ok, Effects} = checkout(Meta, State2, Effects1),
+ {maybe_store_dehydrated_state(RaftIdx, State), ok, Effects};
+ {duplicate, State, Effects} ->
+ {State, ok, Effects}
+ end.
+
+drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects0) ->
+ case take_next_msg(State0) of
+ {FullMsg = {_MsgId, {RaftIdxToDrop, {Header, Msg}}},
+ State1} ->
+ Indexes = rabbit_fifo_index:delete(RaftIdxToDrop, Indexes0),
+ State2 = add_bytes_drop(Header, State1#?STATE{ra_indexes = Indexes}),
+ State = case Msg of
+ 'empty' -> State2;
+ _ -> subtract_in_memory_counts(Header, State2)
+ end,
+ Effects = dead_letter_effects(maxlen, #{none => FullMsg},
+ State, Effects0),
+ {State, Effects};
+ {{'$prefix_msg', Header}, State1} ->
+ State2 = subtract_in_memory_counts(Header, add_bytes_drop(Header, State1)),
+ {State2, Effects0};
+ {{'$empty_msg', Header}, State1} ->
+ State2 = add_bytes_drop(Header, State1),
+ {State2, Effects0};
+ empty ->
+ {State0, Effects0}
+ end.
+
+enqueue(RaftIdx, RawMsg, #?STATE{messages = Messages,
+ low_msg_num = LowMsgNum,
+ next_msg_num = NextMsgNum} = State0) ->
+ %% the initial header is an integer only - it will get expanded to a map
+ %% when the next required key is added
+ Header = message_size(RawMsg),
+ {State1, Msg} =
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ % indexed message with header map
+ {State0, {RaftIdx, {Header, 'empty'}}};
+ false ->
+ {add_in_memory_counts(Header, State0),
+ {RaftIdx, {Header, RawMsg}}} % indexed message with header map
+ end,
+ State = add_bytes_enqueue(Header, State1),
+ State#?STATE{messages = Messages#{NextMsgNum => Msg},
+ %% this is probably only done to record it when low_msg_num
+ %% is undefined
+ low_msg_num = min(LowMsgNum, NextMsgNum),
+ next_msg_num = NextMsgNum + 1}.
+
+append_to_master_index(RaftIdx,
+ #?STATE{ra_indexes = Indexes0} = State0) ->
+ State = incr_enqueue_count(State0),
+ Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0),
+ State#?STATE{ra_indexes = Indexes}.
+
+
+incr_enqueue_count(#?STATE{enqueue_count = C,
+ cfg = #cfg{release_cursor_interval = {_Base, C}}
+ } = State0) ->
+ %% this will trigger a dehydrated version of the state to be stored
+ %% at this raft index for potential future snapshot generation
+ %% Q: Why don't we just stash the release cursor here?
+ %% A: Because it needs to be the very last thing we do and we
+ %% first needs to run the checkout logic.
+ State0#?STATE{enqueue_count = 0};
+incr_enqueue_count(#?STATE{cfg = #cfg{release_cursor_interval = C} = Cfg}
+ = State0)
+ when is_integer(C) ->
+ %% conversion to new release cursor interval format
+ State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = {C, C}}},
+ incr_enqueue_count(State);
+incr_enqueue_count(#?STATE{enqueue_count = C} = State) ->
+ State#?STATE{enqueue_count = C + 1}.
+
+maybe_store_dehydrated_state(RaftIdx,
+ #?STATE{cfg =
+ #cfg{release_cursor_interval = {Base, _}}
+ = Cfg,
+ ra_indexes = Indexes,
+ enqueue_count = 0,
+ release_cursors = Cursors0} = State0) ->
+ case rabbit_fifo_index:exists(RaftIdx, Indexes) of
+ false ->
+ %% the incoming enqueue must already have been dropped
+ State0;
+ true ->
+ Interval = case Base of
+ 0 -> 0;
+ _ ->
+ Total = messages_total(State0),
+ min(max(Total, Base),
+ ?RELEASE_CURSOR_EVERY_MAX)
+ end,
+ State = convert_prefix_msgs(
+ State0#?STATE{cfg = Cfg#cfg{release_cursor_interval =
+ {Base, Interval}}}),
+ Dehydrated = dehydrate_state(State),
+ Cursor = {release_cursor, RaftIdx, Dehydrated},
+ Cursors = lqueue:in(Cursor, Cursors0),
+ State#?STATE{release_cursors = Cursors}
+ end;
+maybe_store_dehydrated_state(RaftIdx,
+ #?STATE{cfg =
+ #cfg{release_cursor_interval = C} = Cfg}
+ = State0)
+ when is_integer(C) ->
+ %% convert to new format
+ State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = {C, C}}},
+ maybe_store_dehydrated_state(RaftIdx, State);
+maybe_store_dehydrated_state(_RaftIdx, State) ->
+ State.
+
+enqueue_pending(From,
+ #enqueuer{next_seqno = Next,
+ pending = [{Next, RaftIdx, RawMsg} | Pending]} = Enq0,
+ State0) ->
+ State = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = Next + 1, pending = Pending},
+ enqueue_pending(From, Enq, State);
+enqueue_pending(From, Enq, #?STATE{enqueuers = Enqueuers0} = State) ->
+ State#?STATE{enqueuers = Enqueuers0#{From => Enq}}.
+
+maybe_enqueue(RaftIdx, undefined, undefined, RawMsg, Effects, State0) ->
+ % direct enqueue without tracking
+ State = enqueue(RaftIdx, RawMsg, State0),
+ {ok, State, Effects};
+maybe_enqueue(RaftIdx, From, MsgSeqNo, RawMsg, Effects0,
+ #?STATE{enqueuers = Enqueuers0} = State0) ->
+ case maps:get(From, Enqueuers0, undefined) of
+ undefined ->
+ State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}},
+ {ok, State, Effects} = maybe_enqueue(RaftIdx, From, MsgSeqNo,
+ RawMsg, Effects0, State1),
+ {ok, State, [{monitor, process, From} | Effects]};
+ #enqueuer{next_seqno = MsgSeqNo} = Enq0 ->
+ % it is the next expected seqno
+ State1 = enqueue(RaftIdx, RawMsg, State0),
+ Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1},
+ State = enqueue_pending(From, Enq, State1),
+ {ok, State, Effects0};
+ #enqueuer{next_seqno = Next,
+ pending = Pending0} = Enq0
+ when MsgSeqNo > Next ->
+ % out of order delivery
+ Pending = [{MsgSeqNo, RaftIdx, RawMsg} | Pending0],
+ Enq = Enq0#enqueuer{pending = lists:sort(Pending)},
+ {ok, State0#?STATE{enqueuers = Enqueuers0#{From => Enq}}, Effects0};
+ #enqueuer{next_seqno = Next} when MsgSeqNo =< Next ->
+ % duplicate delivery - remove the raft index from the ra_indexes
+ % map as it was added earlier
+ {duplicate, State0, Effects0}
+ end.
+
+snd(T) ->
+ element(2, T).
+
+return(#{index := IncomingRaftIdx} = Meta, ConsumerId, Returned,
+ Effects0, #?STATE{service_queue = SQ0} = State0) ->
+ {State1, Effects1} = maps:fold(
+ fun(MsgId, {Tag, _} = Msg, {S0, E0})
+ when Tag == '$prefix_msg';
+ Tag == '$empty_msg'->
+ return_one(MsgId, 0, Msg, S0, E0, ConsumerId);
+ (MsgId, {MsgNum, Msg}, {S0, E0}) ->
+ return_one(MsgId, MsgNum, Msg, S0, E0,
+ ConsumerId)
+ end, {State0, Effects0}, Returned),
+ {State2, Effects3} =
+ case State1#?STATE.consumers of
+ #{ConsumerId := Con0} = Cons0 ->
+ Con = Con0#consumer{credit = increase_credit(Con0,
+ map_size(Returned))},
+ {Cons, SQ, Effects2} = update_or_remove_sub(ConsumerId, Con,
+ Cons0, SQ0, Effects1),
+ {State1#?STATE{consumers = Cons,
+ service_queue = SQ}, Effects2};
+ _ ->
+ {State1, Effects1}
+ end,
+ {State, ok, Effects} = checkout(Meta, State2, Effects3),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+% used to processes messages that are finished
+complete(ConsumerId, Discarded,
+ #consumer{checked_out = Checked} = Con0, Effects0,
+ #?STATE{consumers = Cons0, service_queue = SQ0,
+ ra_indexes = Indexes0} = State0) ->
+ %% TODO optimise use of Discarded map here
+ MsgRaftIdxs = [RIdx || {_, {RIdx, _}} <- maps:values(Discarded)],
+ %% credit_mode = simple_prefetch should automatically top-up credit
+ %% as messages are simple_prefetch or otherwise returned
+ Con = Con0#consumer{checked_out = maps:without(maps:keys(Discarded), Checked),
+ credit = increase_credit(Con0, map_size(Discarded))},
+ {Cons, SQ, Effects} = update_or_remove_sub(ConsumerId, Con, Cons0,
+ SQ0, Effects0),
+ Indexes = lists:foldl(fun rabbit_fifo_index:delete/2, Indexes0,
+ MsgRaftIdxs),
+ %% TODO: use maps:fold instead
+ State1 = lists:foldl(fun({_, {_, {Header, _}}}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$prefix_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc);
+ ({'$empty_msg', Header}, Acc) ->
+ add_bytes_settle(Header, Acc)
+ end, State0, maps:values(Discarded)),
+ {State1#?STATE{consumers = Cons,
+ ra_indexes = Indexes,
+ service_queue = SQ}, Effects}.
+
+increase_credit(#consumer{lifetime = once,
+ credit = Credit}, _) ->
+ %% once consumers cannot increment credit
+ Credit;
+increase_credit(#consumer{lifetime = auto,
+ credit_mode = credited,
+ credit = Credit}, _) ->
+ %% credit_mode: credit also doesn't automatically increment credit
+ Credit;
+increase_credit(#consumer{credit = Current}, Credit) ->
+ Current + Credit.
+
+complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId,
+ #consumer{checked_out = Checked0} = Con0,
+ Effects0, State0) ->
+ Discarded = maps:with(MsgIds, Checked0),
+ {State2, Effects1} = complete(ConsumerId, Discarded, Con0,
+ Effects0, State0),
+ {State, ok, Effects} = checkout(Meta, State2, Effects1),
+ update_smallest_raft_index(IncomingRaftIdx, State, Effects).
+
+dead_letter_effects(_Reason, _Discarded,
+ #?STATE{cfg = #cfg{dead_letter_handler = undefined}},
+ Effects) ->
+ Effects;
+dead_letter_effects(Reason, Discarded,
+ #?STATE{cfg = #cfg{dead_letter_handler = {Mod, Fun, Args}}},
+ Effects) ->
+ RaftIdxs = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ [RaftIdx | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{log, RaftIdxs,
+ fun (Log) ->
+ Lookup = maps:from_list(lists:zip(RaftIdxs, Log)),
+ DeadLetters = maps:fold(
+ fun (_, {_, {RaftIdx, {_Header, 'empty'}}}, Acc) ->
+ {enqueue, _, _, Msg} = maps:get(RaftIdx, Lookup),
+ [{Reason, Msg} | Acc];
+ (_, {_, {_, {_Header, Msg}}}, Acc) ->
+ [{Reason, Msg} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Discarded),
+ [{mod_call, Mod, Fun, Args ++ [DeadLetters]}]
+ end} | Effects].
+
+cancel_consumer_effects(ConsumerId,
+ #?STATE{cfg = #cfg{resource = QName}}, Effects) ->
+ [{mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [QName, ConsumerId]} | Effects].
+
+update_smallest_raft_index(IncomingRaftIdx,
+ #?STATE{ra_indexes = Indexes,
+ release_cursors = Cursors0} = State0,
+ Effects) ->
+ case rabbit_fifo_index:size(Indexes) of
+ 0 ->
+ % there are no messages on queue anymore and no pending enqueues
+ % we can forward release_cursor all the way until
+ % the last received command, hooray
+ State = State0#?STATE{release_cursors = lqueue:new()},
+ {State, ok, Effects ++ [{release_cursor, IncomingRaftIdx, State}]};
+ _ ->
+ Smallest = rabbit_fifo_index:smallest(Indexes),
+ case find_next_cursor(Smallest, Cursors0) of
+ {empty, Cursors} ->
+ {State0#?STATE{release_cursors = Cursors},
+ ok, Effects};
+ {Cursor, Cursors} ->
+ %% we can emit a release cursor we've passed the smallest
+ %% release cursor available.
+ {State0#?STATE{release_cursors = Cursors}, ok,
+ Effects ++ [Cursor]}
+ end
+ end.
+
+find_next_cursor(Idx, Cursors) ->
+ find_next_cursor(Idx, Cursors, empty).
+
+find_next_cursor(Smallest, Cursors0, Potential) ->
+ case lqueue:out(Cursors0) of
+ {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest ->
+ %% we found one but it may not be the largest one
+ find_next_cursor(Smallest, Cursors, Cursor);
+ _ ->
+ {Potential, Cursors0}
+ end.
+
+update_header(Key, UpdateFun, Default, Header)
+ when is_integer(Header) ->
+ update_header(Key, UpdateFun, Default, #{size => Header});
+update_header(Key, UpdateFun, Default, Header) ->
+ maps:update_with(Key, UpdateFun, Default, Header).
+
+
+return_one(MsgId, 0, {Tag, Header0},
+ #?STATE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId)
+ when Tag == '$prefix_msg'; Tag == '$empty_msg' ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {Tag, Header},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ complete(ConsumerId, #{MsgId => Msg0}, Con0, Effects0, State0);
+ _ ->
+ %% this should not affect the release cursor in any way
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ {Msg, State1} = case Tag of
+ '$empty_msg' ->
+ {Msg0, State0};
+ _ -> case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{'$empty_msg', Header}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?STATE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in(Msg, Returns)}),
+ Effects0}
+ end;
+return_one(MsgId, MsgNum, {RaftId, {Header0, RawMsg}},
+ #?STATE{returns = Returns,
+ consumers = Consumers,
+ cfg = #cfg{delivery_limit = DeliveryLimit}} = State0,
+ Effects0, ConsumerId) ->
+ #consumer{checked_out = Checked} = Con0 = maps:get(ConsumerId, Consumers),
+ Header = update_header(delivery_count, fun (C) -> C+1 end, 1, Header0),
+ Msg0 = {RaftId, {Header, RawMsg}},
+ case maps:get(delivery_count, Header) of
+ DeliveryCount when DeliveryCount > DeliveryLimit ->
+ DlMsg = {MsgNum, Msg0},
+ Effects = dead_letter_effects(delivery_limit, #{none => DlMsg},
+ State0, Effects0),
+ complete(ConsumerId, #{MsgId => DlMsg}, Con0, Effects, State0);
+ _ ->
+ Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked)},
+ %% this should not affect the release cursor in any way
+ {Msg, State1} = case RawMsg of
+ 'empty' ->
+ {Msg0, State0};
+ _ ->
+ case evaluate_memory_limit(Header, State0) of
+ true ->
+ {{RaftId, {Header, 'empty'}}, State0};
+ false ->
+ {Msg0, add_in_memory_counts(Header, State0)}
+ end
+ end,
+ {add_bytes_return(
+ Header,
+ State1#?STATE{consumers = Consumers#{ConsumerId => Con},
+ returns = lqueue:in({MsgNum, Msg}, Returns)}),
+ Effects0}
+ end.
+
+return_all(#?STATE{consumers = Cons} = State0, Effects0, ConsumerId,
+ #consumer{checked_out = Checked0} = Con) ->
+ %% need to sort the list so that we return messages in the order
+ %% they were checked out
+ Checked = lists:sort(maps:to_list(Checked0)),
+ State = State0#?STATE{consumers = Cons#{ConsumerId => Con}},
+ lists:foldl(fun ({MsgId, {'$prefix_msg', _} = Msg}, {S, E}) ->
+ return_one(MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {'$empty_msg', _} = Msg}, {S, E}) ->
+ return_one(MsgId, 0, Msg, S, E, ConsumerId);
+ ({MsgId, {MsgNum, Msg}}, {S, E}) ->
+ return_one(MsgId, MsgNum, Msg, S, E, ConsumerId)
+ end, {State, Effects0}, Checked).
+
+%% checkout new messages to consumers
+checkout(#{index := Index}, State0, Effects0) ->
+ {State1, _Result, Effects1} = checkout0(checkout_one(State0),
+ Effects0, {#{}, #{}}),
+ case evaluate_limit(false, State1, Effects1) of
+ {State, true, Effects} ->
+ update_smallest_raft_index(Index, State, Effects);
+ {State, false, Effects} ->
+ {State, ok, Effects}
+ end.
+
+checkout0({success, ConsumerId, MsgId, {RaftIdx, {Header, 'empty'}}, State},
+ Effects, {SendAcc, LogAcc0}) ->
+ DelMsg = {RaftIdx, {MsgId, Header}},
+ LogAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], LogAcc0),
+ checkout0(checkout_one(State), Effects, {SendAcc, LogAcc});
+checkout0({success, ConsumerId, MsgId, Msg, State}, Effects,
+ {SendAcc0, LogAcc}) ->
+ DelMsg = {MsgId, Msg},
+ SendAcc = maps:update_with(ConsumerId,
+ fun (M) -> [DelMsg | M] end,
+ [DelMsg], SendAcc0),
+ checkout0(checkout_one(State), Effects, {SendAcc, LogAcc});
+checkout0({Activity, State0}, Effects0, {SendAcc, LogAcc}) ->
+ Effects1 = case Activity of
+ nochange ->
+ append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc);
+ inactive ->
+ [{aux, inactive}
+ | append_send_msg_effects(
+ append_log_effects(Effects0, LogAcc), SendAcc)]
+ end,
+ {State0, ok, lists:reverse(Effects1)}.
+
+evaluate_limit(Result,
+ #?STATE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}} = State,
+ Effects) ->
+ {State, Result, Effects};
+evaluate_limit(Result, State00, Effects0) ->
+ State0 = convert_prefix_msgs(State00),
+ case is_over_limit(State0) of
+ true ->
+ {State, Effects} = drop_head(State0, Effects0),
+ evaluate_limit(true, State, Effects);
+ false ->
+ {State0, Result, Effects0}
+ end.
+
+evaluate_memory_limit(_Header,
+ #?STATE{cfg = #cfg{max_in_memory_length = undefined,
+ max_in_memory_bytes = undefined}}) ->
+ false;
+evaluate_memory_limit(#{size := Size}, State) ->
+ evaluate_memory_limit(Size, State);
+evaluate_memory_limit(Size,
+ #?STATE{cfg = #cfg{max_in_memory_length = MaxLength,
+ max_in_memory_bytes = MaxBytes},
+ msg_bytes_in_memory = Bytes,
+ msgs_ready_in_memory = Length})
+ when is_integer(Size) ->
+ (Length >= MaxLength) orelse ((Bytes + Size) > MaxBytes).
+
+append_send_msg_effects(Effects, AccMap) when map_size(AccMap) == 0 ->
+ Effects;
+append_send_msg_effects(Effects0, AccMap) ->
+ Effects = maps:fold(fun (C, Msgs, Ef) ->
+ [send_msg_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap),
+ [{aux, active} | Effects].
+
+append_log_effects(Effects0, AccMap) ->
+ maps:fold(fun (C, Msgs, Ef) ->
+ [send_log_effect(C, lists:reverse(Msgs)) | Ef]
+ end, Effects0, AccMap).
+
+%% next message is determined as follows:
+%% First we check if there are are prefex returns
+%% Then we check if there are current returns
+%% then we check prefix msgs
+%% then we check current messages
+%%
+%% When we return it is always done to the current return queue
+%% for both prefix messages and current messages
+take_next_msg(#?STATE{prefix_msgs = {R, P}} = State) ->
+ %% conversion
+ take_next_msg(State#?STATE{prefix_msgs = {length(R), R, length(P), P}});
+take_next_msg(#?STATE{prefix_msgs = {NumR, [{'$empty_msg', _} = Msg | Rem],
+ NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {Msg, State#?STATE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?STATE{prefix_msgs = {NumR, [Header | Rem], NumP, P}} = State) ->
+ %% there are prefix returns, these should be served first
+ {{'$prefix_msg', Header},
+ State#?STATE{prefix_msgs = {NumR-1, Rem, NumP, P}}};
+take_next_msg(#?STATE{returns = Returns,
+ low_msg_num = Low0,
+ messages = Messages0,
+ prefix_msgs = {NumR, R, NumP, P}} = State) ->
+ %% use peek rather than out there as the most likely case is an empty
+ %% queue
+ case lqueue:peek(Returns) of
+ {value, NextMsg} ->
+ {NextMsg,
+ State#?STATE{returns = lqueue:drop(Returns)}};
+ empty when P == [] ->
+ case Low0 of
+ undefined ->
+ empty;
+ _ ->
+ {Msg, Messages} = maps:take(Low0, Messages0),
+ case maps:size(Messages) of
+ 0 ->
+ {{Low0, Msg},
+ State#?STATE{messages = Messages,
+ low_msg_num = undefined}};
+ _ ->
+ {{Low0, Msg},
+ State#?STATE{messages = Messages,
+ low_msg_num = Low0 + 1}}
+ end
+ end;
+ empty ->
+ [Msg | Rem] = P,
+ case Msg of
+ {Header, 'empty'} ->
+ %% There are prefix msgs
+ {{'$empty_msg', Header},
+ State#?STATE{prefix_msgs = {NumR, R, NumP-1, Rem}}};
+ Header ->
+ {{'$prefix_msg', Header},
+ State#?STATE{prefix_msgs = {NumR, R, NumP-1, Rem}}}
+ end
+ end.
+
+send_msg_effect({CTag, CPid}, Msgs) ->
+ {send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}.
+
+send_log_effect({CTag, CPid}, IdxMsgs) ->
+ {RaftIdxs, Data} = lists:unzip(IdxMsgs),
+ {log, RaftIdxs,
+ fun(Log) ->
+ Msgs = lists:zipwith(fun ({enqueue, _, _, Msg}, {MsgId, Header}) ->
+ {MsgId, {Header, Msg}}
+ end, Log, Data),
+ [{send_msg, CPid, {delivery, CTag, Msgs}, [local, ra_event]}]
+ end,
+ {local, node(CPid)}}.
+
+reply_log_effect(RaftIdx, MsgId, Header, Ready, From) ->
+ {log, [RaftIdx],
+ fun([{enqueue, _, _, Msg}]) ->
+ [{reply, From, {wrap_reply,
+ {dequeue, {MsgId, {Header, Msg}}, Ready}}}]
+ end}.
+
+checkout_one(#?STATE{service_queue = SQ0,
+ messages = Messages0,
+ consumers = Cons0} = InitState) ->
+ case queue:peek(SQ0) of
+ {value, ConsumerId} ->
+ case take_next_msg(InitState) of
+ {ConsumerMsg, State0} ->
+ SQ1 = queue:drop(SQ0),
+ %% there are consumers waiting to be serviced
+ %% process consumer checkout
+ case maps:find(ConsumerId, Cons0) of
+ {ok, #consumer{credit = 0}} ->
+ %% no credit but was still on queue
+ %% can happen when draining
+ %% recurse without consumer on queue
+ checkout_one(InitState#?STATE{service_queue = SQ1});
+ {ok, #consumer{status = cancelled}} ->
+ checkout_one(InitState#?STATE{service_queue = SQ1});
+ {ok, #consumer{status = suspected_down}} ->
+ checkout_one(InitState#?STATE{service_queue = SQ1});
+ {ok, #consumer{checked_out = Checked0,
+ next_msg_id = Next,
+ credit = Credit,
+ delivery_count = DelCnt} = Con0} ->
+ Checked = maps:put(Next, ConsumerMsg, Checked0),
+ Con = Con0#consumer{checked_out = Checked,
+ next_msg_id = Next + 1,
+ credit = Credit - 1,
+ delivery_count = DelCnt + 1},
+ {Cons, SQ, []} = % we expect no effects
+ update_or_remove_sub(ConsumerId, Con,
+ Cons0, SQ1, []),
+ State1 = State0#?STATE{service_queue = SQ,
+ consumers = Cons},
+ {State, Msg} =
+ case ConsumerMsg of
+ {'$prefix_msg', Header} ->
+ {subtract_in_memory_counts(
+ Header, add_bytes_checkout(Header, State1)),
+ ConsumerMsg};
+ {'$empty_msg', Header} ->
+ {add_bytes_checkout(Header, State1),
+ ConsumerMsg};
+ {_, {_, {Header, 'empty'}} = M} ->
+ {add_bytes_checkout(Header, State1),
+ M};
+ {_, {_, {Header, _} = M}} ->
+ {subtract_in_memory_counts(
+ Header,
+ add_bytes_checkout(Header, State1)),
+ M}
+ end,
+ {success, ConsumerId, Next, Msg, State};
+ error ->
+ %% consumer did not exist but was queued, recurse
+ checkout_one(InitState#?STATE{service_queue = SQ1})
+ end;
+ empty ->
+ {nochange, InitState}
+ end;
+ empty ->
+ case maps:size(Messages0) of
+ 0 -> {nochange, InitState};
+ _ -> {inactive, InitState}
+ end
+ end.
+
+update_or_remove_sub(ConsumerId, #consumer{lifetime = auto,
+ credit = 0} = Con,
+ Cons, ServiceQueue, Effects) ->
+ {maps:put(ConsumerId, Con, Cons), ServiceQueue, Effects};
+update_or_remove_sub(ConsumerId, #consumer{lifetime = auto} = Con,
+ Cons, ServiceQueue, Effects) ->
+ {maps:put(ConsumerId, Con, Cons),
+ uniq_queue_in(ConsumerId, ServiceQueue), Effects};
+update_or_remove_sub(ConsumerId, #consumer{lifetime = once,
+ checked_out = Checked,
+ credit = 0} = Con,
+ Cons, ServiceQueue, Effects) ->
+ case maps:size(Checked) of
+ 0 ->
+ % we're done with this consumer
+ % TODO: demonitor consumer pid but _only_ if there are no other
+ % monitors for this pid
+ {maps:remove(ConsumerId, Cons), ServiceQueue, Effects};
+ _ ->
+ % there are unsettled items so need to keep around
+ {maps:put(ConsumerId, Con, Cons), ServiceQueue, Effects}
+ end;
+update_or_remove_sub(ConsumerId, #consumer{lifetime = once} = Con,
+ Cons, ServiceQueue, Effects) ->
+ {maps:put(ConsumerId, Con, Cons),
+ uniq_queue_in(ConsumerId, ServiceQueue), Effects}.
+
+uniq_queue_in(Key, Queue) ->
+ % TODO: queue:member could surely be quite expensive, however the practical
+ % number of unique consumers may not be large enough for it to matter
+ case queue:member(Key, Queue) of
+ true ->
+ Queue;
+ false ->
+ queue:in(Key, Queue)
+ end.
+
+update_consumer(ConsumerId, Meta, Spec,
+ #?STATE{cfg = #cfg{consumer_strategy = competing}} = State0) ->
+ %% general case, single active consumer off
+ update_consumer0(ConsumerId, Meta, Spec, State0);
+update_consumer(ConsumerId, Meta, Spec,
+ #?STATE{consumers = Cons0,
+ cfg = #cfg{consumer_strategy = single_active}} = State0)
+ when map_size(Cons0) == 0 ->
+ %% single active consumer on, no one is consuming yet
+ update_consumer0(ConsumerId, Meta, Spec, State0);
+update_consumer(ConsumerId, Meta, {Life, Credit, Mode},
+ #?STATE{cfg = #cfg{consumer_strategy = single_active},
+ waiting_consumers = WaitingConsumers0} = State0) ->
+ %% single active consumer on and one active consumer already
+ %% adding the new consumer to the waiting list
+ Consumer = #consumer{lifetime = Life, meta = Meta,
+ credit = Credit, credit_mode = Mode},
+ WaitingConsumers1 = WaitingConsumers0 ++ [{ConsumerId, Consumer}],
+ State0#?STATE{waiting_consumers = WaitingConsumers1}.
+
+update_consumer0(ConsumerId, Meta, {Life, Credit, Mode},
+ #?STATE{consumers = Cons0,
+ service_queue = ServiceQueue0} = State0) ->
+ %% TODO: this logic may not be correct for updating a pre-existing consumer
+ Init = #consumer{lifetime = Life, meta = Meta,
+ credit = Credit, credit_mode = Mode},
+ Cons = maps:update_with(ConsumerId,
+ fun(S) ->
+ %% remove any in-flight messages from
+ %% the credit update
+ N = maps:size(S#consumer.checked_out),
+ C = max(0, Credit - N),
+ S#consumer{lifetime = Life, credit = C}
+ end, Init, Cons0),
+ ServiceQueue = maybe_queue_consumer(ConsumerId, maps:get(ConsumerId, Cons),
+ ServiceQueue0),
+ State0#?STATE{consumers = Cons, service_queue = ServiceQueue}.
+
+maybe_queue_consumer(ConsumerId, #consumer{credit = Credit},
+ ServiceQueue0) ->
+ case Credit > 0 of
+ true ->
+ % consumerect needs service - check if already on service queue
+ uniq_queue_in(ConsumerId, ServiceQueue0);
+ false ->
+ ServiceQueue0
+ end.
+
+convert_prefix_msgs(#?STATE{prefix_msgs = {R, P}} = State) ->
+ State#?STATE{prefix_msgs = {length(R), R, length(P), P}};
+convert_prefix_msgs(State) ->
+ State.
+
+%% creates a dehydrated version of the current state to be cached and
+%% potentially used to for a snaphot at a later point
+dehydrate_state(#?STATE{messages = Messages,
+ consumers = Consumers,
+ returns = Returns,
+ low_msg_num = Low,
+ next_msg_num = Next,
+ prefix_msgs = {PRCnt, PrefRet0, PPCnt, PrefMsg0},
+ waiting_consumers = Waiting0} = State) ->
+ RCnt = lqueue:len(Returns),
+ %% TODO: optimise this function as far as possible
+ PrefRet1 = lists:foldr(fun ({'$prefix_msg', Header}, Acc) ->
+ [Header | Acc];
+ ({'$empty_msg', _} = Msg, Acc) ->
+ [Msg | Acc];
+ ({_, {_, {Header, 'empty'}}}, Acc) ->
+ [{'$empty_msg', Header} | Acc];
+ ({_, {_, {Header, _}}}, Acc) ->
+ [Header | Acc]
+ end,
+ [],
+ lqueue:to_list(Returns)),
+ PrefRet = PrefRet0 ++ PrefRet1,
+ PrefMsgsSuff = dehydrate_messages(Low, Next - 1, Messages, []),
+ %% prefix messages are not populated in normal operation only after
+ %% recovering from a snapshot
+ PrefMsgs = PrefMsg0 ++ PrefMsgsSuff,
+ Waiting = [{Cid, dehydrate_consumer(C)} || {Cid, C} <- Waiting0],
+ State#?STATE{messages = #{},
+ ra_indexes = rabbit_fifo_index:empty(),
+ release_cursors = lqueue:new(),
+ low_msg_num = undefined,
+ consumers = maps:map(fun (_, C) ->
+ dehydrate_consumer(C)
+ end, Consumers),
+ returns = lqueue:new(),
+ prefix_msgs = {PRCnt + RCnt, PrefRet,
+ PPCnt + maps:size(Messages), PrefMsgs},
+ waiting_consumers = Waiting}.
+
+dehydrate_messages(Low, Next, _Msgs, Acc)
+ when Next < Low ->
+ Acc;
+dehydrate_messages(Low, Next, Msgs, Acc0) ->
+ Acc = case maps:get(Next, Msgs) of
+ {_RaftIdx, {_, 'empty'} = Msg} ->
+ [Msg | Acc0];
+ {_RaftIdx, {Header, _}} ->
+ [Header | Acc0]
+ end,
+ dehydrate_messages(Low, Next - 1, Msgs, Acc).
+
+dehydrate_consumer(#consumer{checked_out = Checked0} = Con) ->
+ Checked = maps:map(fun (_, {'$prefix_msg', _} = M) ->
+ M;
+ (_, {'$empty_msg', _} = M) ->
+ M;
+ (_, {_, {_, {Header, 'empty'}}}) ->
+ {'$empty_msg', Header};
+ (_, {_, {_, {Header, _}}}) ->
+ {'$prefix_msg', Header}
+ end, Checked0),
+ Con#consumer{checked_out = Checked}.
+
+%% make the state suitable for equality comparison
+normalize(#?STATE{release_cursors = Cursors} = State) ->
+ State#?STATE{release_cursors = lqueue:from_list(lqueue:to_list(Cursors))}.
+
+is_over_limit(#?STATE{cfg = #cfg{max_length = undefined,
+ max_bytes = undefined}}) ->
+ false;
+is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength,
+ max_bytes = MaxBytes},
+ msg_bytes_enqueue = BytesEnq} = State) ->
+
+ messages_ready(State) > MaxLength orelse (BytesEnq > MaxBytes).
+
+normalize_for_v1(#?STATE{cfg = Cfg} = State) ->
+ %% run all v0 conversions so that v1 does not have to have this code
+ RCI = case Cfg of
+ #cfg{release_cursor_interval = {_, _} = R} ->
+ R;
+ #cfg{release_cursor_interval = undefined} ->
+ {?RELEASE_CURSOR_EVERY, ?RELEASE_CURSOR_EVERY};
+ #cfg{release_cursor_interval = C} ->
+ {?RELEASE_CURSOR_EVERY, C}
+ end,
+ convert_prefix_msgs(
+ State#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCI}}).
+
+get_field(Field, State) ->
+ Fields = record_info(fields, ?STATE),
+ Index = record_index_of(Field, Fields),
+ element(Index, State).
+
+get_cfg_field(Field, #?STATE{cfg = Cfg} ) ->
+ Fields = record_info(fields, cfg),
+ Index = record_index_of(Field, Fields),
+ element(Index, Cfg).
+
+record_index_of(F, Fields) ->
+ index_of(2, F, Fields).
+
+index_of(_, F, []) ->
+ exit({field_not_found, F});
+index_of(N, F, [F | _]) ->
+ N;
+index_of(N, F, [_ | T]) ->
+ index_of(N+1, F, T).
+
+-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol().
+make_enqueue(Pid, Seq, Msg) ->
+ #enqueue{pid = Pid, seq = Seq, msg = Msg}.
+-spec make_checkout(consumer_id(),
+ checkout_spec(), consumer_meta()) -> protocol().
+make_checkout(ConsumerId, Spec, Meta) ->
+ #checkout{consumer_id = ConsumerId,
+ spec = Spec, meta = Meta}.
+
+-spec make_settle(consumer_id(), [msg_id()]) -> protocol().
+make_settle(ConsumerId, MsgIds) ->
+ #settle{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_return(consumer_id(), [msg_id()]) -> protocol().
+make_return(ConsumerId, MsgIds) ->
+ #return{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_discard(consumer_id(), [msg_id()]) -> protocol().
+make_discard(ConsumerId, MsgIds) ->
+ #discard{consumer_id = ConsumerId, msg_ids = MsgIds}.
+
+-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(),
+ boolean()) -> protocol().
+make_credit(ConsumerId, Credit, DeliveryCount, Drain) ->
+ #credit{consumer_id = ConsumerId,
+ credit = Credit,
+ delivery_count = DeliveryCount,
+ drain = Drain}.
+
+-spec make_purge() -> protocol().
+make_purge() -> #purge{}.
+
+-spec make_purge_nodes([node()]) -> protocol().
+make_purge_nodes(Nodes) ->
+ #purge_nodes{nodes = Nodes}.
+
+-spec make_update_config(config()) -> protocol().
+make_update_config(Config) ->
+ #update_config{config = Config}.
+
+add_bytes_enqueue(Bytes,
+ #?STATE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_enqueue(#{size := Bytes}, State) ->
+ add_bytes_enqueue(Bytes, State).
+
+add_bytes_drop(Bytes,
+ #?STATE{msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_drop(#{size := Bytes}, State) ->
+ add_bytes_drop(Bytes, State).
+
+add_bytes_checkout(Bytes,
+ #?STATE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue } = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_checkout = Checkout + Bytes,
+ msg_bytes_enqueue = Enqueue - Bytes};
+add_bytes_checkout(#{size := Bytes}, State) ->
+ add_bytes_checkout(Bytes, State).
+
+add_bytes_settle(Bytes,
+ #?STATE{msg_bytes_checkout = Checkout} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_checkout = Checkout - Bytes};
+add_bytes_settle(#{size := Bytes}, State) ->
+ add_bytes_settle(Bytes, State).
+
+add_bytes_return(Bytes,
+ #?STATE{msg_bytes_checkout = Checkout,
+ msg_bytes_enqueue = Enqueue} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_checkout = Checkout - Bytes,
+ msg_bytes_enqueue = Enqueue + Bytes};
+add_bytes_return(#{size := Bytes}, State) ->
+ add_bytes_return(Bytes, State).
+
+add_in_memory_counts(Bytes,
+ #?STATE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_in_memory = InMemoryBytes + Bytes,
+ msgs_ready_in_memory = InMemoryCount + 1};
+add_in_memory_counts(#{size := Bytes}, State) ->
+ add_in_memory_counts(Bytes, State).
+
+subtract_in_memory_counts(Bytes,
+ #?STATE{msg_bytes_in_memory = InMemoryBytes,
+ msgs_ready_in_memory = InMemoryCount} = State)
+ when is_integer(Bytes) ->
+ State#?STATE{msg_bytes_in_memory = InMemoryBytes - Bytes,
+ msgs_ready_in_memory = InMemoryCount - 1};
+subtract_in_memory_counts(#{size := Bytes}, State) ->
+ subtract_in_memory_counts(Bytes, State).
+
+message_size(#basic_message{content = Content}) ->
+ #content{payload_fragments_rev = PFR} = Content,
+ iolist_size(PFR);
+message_size({'$prefix_msg', H}) ->
+ get_size_from_header(H);
+message_size({'$empty_msg', H}) ->
+ get_size_from_header(H);
+message_size(B) when is_binary(B) ->
+ byte_size(B);
+message_size(Msg) ->
+ %% probably only hit this for testing so ok to use erts_debug
+ erts_debug:size(Msg).
+
+get_size_from_header(Size) when is_integer(Size) ->
+ Size;
+get_size_from_header(#{size := B}) ->
+ B.
+
+
+all_nodes(#?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Nodes0 = maps:fold(fun({_, P}, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, #{}, Cons0),
+ Nodes1 = maps:fold(fun(P, _, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes0, Enqs0),
+ maps:keys(
+ lists:foldl(fun({{_, P}, _}, Acc) ->
+ Acc#{node(P) => ok}
+ end, Nodes1, WaitingConsumers0)).
+
+all_pids_for(Node, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, _, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P}, _}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
+
+suspected_pids_for(Node, #?STATE{consumers = Cons0,
+ enqueuers = Enqs0,
+ waiting_consumers = WaitingConsumers0}) ->
+ Cons = maps:fold(fun({_, P}, #consumer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, [], Cons0),
+ Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, _, Acc) -> Acc
+ end, Cons, Enqs0),
+ lists:foldl(fun({{_, P},
+ #consumer{status = suspected_down}}, Acc)
+ when node(P) =:= Node ->
+ [P | Acc];
+ (_, Acc) -> Acc
+ end, Enqs, WaitingConsumers0).
diff --git a/deps/rabbit/src/rabbit_fifo_v0.hrl b/deps/rabbit/src/rabbit_fifo_v0.hrl
new file mode 100644
index 0000000000..333ccb4d77
--- /dev/null
+++ b/deps/rabbit/src/rabbit_fifo_v0.hrl
@@ -0,0 +1,195 @@
+
+-type option(T) :: undefined | T.
+
+-type raw_msg() :: term().
+%% The raw message. It is opaque to rabbit_fifo.
+
+-type msg_in_id() :: non_neg_integer().
+% a queue scoped monotonically incrementing integer used to enforce order
+% in the unassigned messages map
+
+-type msg_id() :: non_neg_integer().
+%% A consumer-scoped monotonically incrementing integer included with a
+%% {@link delivery/0.}. Used to settle deliveries using
+%% {@link rabbit_fifo_client:settle/3.}
+
+-type msg_seqno() :: non_neg_integer().
+%% A sender process scoped monotonically incrementing integer included
+%% in enqueue messages. Used to ensure ordering of messages send from the
+%% same process
+
+-type msg_header() :: msg_size() |
+ #{size := msg_size(),
+ delivery_count => non_neg_integer()}.
+%% The message header:
+%% delivery_count: the number of unsuccessful delivery attempts.
+%% A non-zero value indicates a previous attempt.
+%% If it only contains the size it can be condensed to an integer only
+
+-type msg() :: {msg_header(), raw_msg()}.
+%% message with a header map.
+
+-type msg_size() :: non_neg_integer().
+%% the size in bytes of the msg payload
+
+-type indexed_msg() :: {ra:index(), msg()}.
+
+-type prefix_msg() :: {'$prefix_msg', msg_header()}.
+
+-type delivery_msg() :: {msg_id(), msg()}.
+%% A tuple consisting of the message id and the headered message.
+
+-type consumer_tag() :: binary().
+%% An arbitrary binary tag used to distinguish between different consumers
+%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.}
+
+-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}.
+%% Represents the delivery of one or more rabbit_fifo messages.
+
+-type consumer_id() :: {consumer_tag(), pid()}.
+%% The entity that receives messages. Uniquely identifies a consumer.
+
+-type credit_mode() :: simple_prefetch | credited.
+%% determines how credit is replenished
+
+-type checkout_spec() :: {once | auto, Num :: non_neg_integer(),
+ credit_mode()} |
+ {dequeue, settled | unsettled} |
+ cancel.
+
+-type consumer_meta() :: #{ack => boolean(),
+ username => binary(),
+ prefetch => non_neg_integer(),
+ args => list()}.
+%% static meta data associated with a consumer
+
+
+-type applied_mfa() :: {module(), atom(), list()}.
+% represents a partially applied module call
+
+-define(RELEASE_CURSOR_EVERY, 64000).
+-define(RELEASE_CURSOR_EVERY_MAX, 3200000).
+-define(USE_AVG_HALF_LIFE, 10000.0).
+%% an average QQ without any message uses about 100KB so setting this limit
+%% to ~10 times that should be relatively safe.
+-define(GC_MEM_LIMIT_B, 2000000).
+
+-define(MB, 1048576).
+-define(STATE, rabbit_fifo).
+
+-record(consumer,
+ {meta = #{} :: consumer_meta(),
+ checked_out = #{} :: #{msg_id() => {msg_in_id(), indexed_msg()}},
+ next_msg_id = 0 :: msg_id(), % part of snapshot data
+ %% max number of messages that can be sent
+ %% decremented for each delivery
+ credit = 0 : non_neg_integer(),
+ %% total number of checked out messages - ever
+ %% incremented for each delivery
+ delivery_count = 0 :: non_neg_integer(),
+ %% the mode of how credit is incremented
+ %% simple_prefetch: credit is re-filled as deliveries are settled
+ %% or returned.
+ %% credited: credit can only be changed by receiving a consumer_credit
+ %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}'
+ credit_mode = simple_prefetch :: credit_mode(), % part of snapshot data
+ lifetime = once :: once | auto,
+ status = up :: up | suspected_down | cancelled
+ }).
+
+-type consumer() :: #consumer{}.
+
+-type consumer_strategy() :: competing | single_active.
+
+-record(enqueuer,
+ {next_seqno = 1 :: msg_seqno(),
+ % out of order enqueues - sorted list
+ pending = [] :: [{msg_seqno(), ra:index(), raw_msg()}],
+ status = up :: up | suspected_down
+ }).
+
+-record(cfg,
+ {name :: atom(),
+ resource :: rabbit_types:r('queue'),
+ release_cursor_interval ::
+ undefined | non_neg_integer() |
+ {non_neg_integer(), non_neg_integer()},
+ dead_letter_handler :: option(applied_mfa()),
+ become_leader_handler :: option(applied_mfa()),
+ max_length :: option(non_neg_integer()),
+ max_bytes :: option(non_neg_integer()),
+ %% whether single active consumer is on or not for this queue
+ consumer_strategy = competing :: consumer_strategy(),
+ %% the maximum number of unsuccessful delivery attempts permitted
+ delivery_limit :: option(non_neg_integer()),
+ max_in_memory_length :: option(non_neg_integer()),
+ max_in_memory_bytes :: option(non_neg_integer())
+ }).
+
+-type prefix_msgs() :: {list(), list()} |
+ {non_neg_integer(), list(),
+ non_neg_integer(), list()}.
+
+-record(?STATE,
+ {cfg :: #cfg{},
+ % unassigned messages
+ messages = #{} :: #{msg_in_id() => indexed_msg()},
+ % defines the lowest message in id available in the messages map
+ % that isn't a return
+ low_msg_num :: option(msg_in_id()),
+ % defines the next message in id to be added to the messages map
+ next_msg_num = 1 :: msg_in_id(),
+ % list of returned msg_in_ids - when checking out it picks from
+ % this list first before taking low_msg_num
+ returns = lqueue:new() :: lqueue:lqueue(prefix_msg() |
+ {msg_in_id(), indexed_msg()}),
+ % a counter of enqueues - used to trigger shadow copy points
+ enqueue_count = 0 :: non_neg_integer(),
+ % a map containing all the live processes that have ever enqueued
+ % a message to this queue as well as a cached value of the smallest
+ % ra_index of all pending enqueues
+ enqueuers = #{} :: #{pid() => #enqueuer{}},
+ % master index of all enqueue raft indexes including pending
+ % enqueues
+ % rabbit_fifo_index can be slow when calculating the smallest
+ % index when there are large gaps but should be faster than gb_trees
+ % for normal appending operations as it's backed by a map
+ ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(),
+ release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor,
+ ra:index(), #?STATE{}}),
+ % consumers need to reflect consumer state at time of snapshot
+ % needs to be part of snapshot
+ consumers = #{} :: #{consumer_id() => #consumer{}},
+ % consumers that require further service are queued here
+ % needs to be part of snapshot
+ service_queue = queue:new() :: queue:queue(consumer_id()),
+ %% This is a special field that is only used for snapshots
+ %% It represents the queued messages at the time the
+ %% dehydrated snapshot state was cached.
+ %% As release_cursors are only emitted for raft indexes where all
+ %% prior messages no longer contribute to the current state we can
+ %% replace all message payloads with their sizes (to be used for
+ %% overflow calculations).
+ %% This is done so that consumers are still served in a deterministic
+ %% order on recovery.
+ prefix_msgs = {0, [], 0, []} :: prefix_msgs(),
+ msg_bytes_enqueue = 0 :: non_neg_integer(),
+ msg_bytes_checkout = 0 :: non_neg_integer(),
+ %% waiting consumers, one is picked active consumer is cancelled or dies
+ %% used only when single active consumer is on
+ waiting_consumers = [] :: [{consumer_id(), consumer()}],
+ msg_bytes_in_memory = 0 :: non_neg_integer(),
+ msgs_ready_in_memory = 0 :: non_neg_integer()
+ }).
+
+-type config() :: #{name := atom(),
+ queue_resource := rabbit_types:r('queue'),
+ dead_letter_handler => applied_mfa(),
+ become_leader_handler => applied_mfa(),
+ release_cursor_interval => non_neg_integer(),
+ max_length => non_neg_integer(),
+ max_bytes => non_neg_integer(),
+ max_in_memory_length => non_neg_integer(),
+ max_in_memory_bytes => non_neg_integer(),
+ single_active_consumer_on => boolean(),
+ delivery_limit => non_neg_integer()}.
diff --git a/deps/rabbit/src/rabbit_file.erl b/deps/rabbit/src/rabbit_file.erl
new file mode 100644
index 0000000000..f8263d9e77
--- /dev/null
+++ b/deps/rabbit/src/rabbit_file.erl
@@ -0,0 +1,321 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_file).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([is_file/1, is_dir/1, file_size/1, ensure_dir/1, wildcard/2, list_dir/1]).
+-export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]).
+-export([append_file/2, ensure_parent_dirs_exist/1]).
+-export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
+-export([lock_file/1]).
+-export([read_file_info/1]).
+-export([filename_as_a_directory/1]).
+
+-import(file_handle_cache, [with_handle/1, with_handle/2]).
+
+-define(TMP_EXT, ".tmp").
+
+%%----------------------------------------------------------------------------
+
+-type ok_or_error() :: rabbit_types:ok_or_error(any()).
+
+%%----------------------------------------------------------------------------
+
+-spec is_file((file:filename())) -> boolean().
+
+is_file(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{type=regular}} -> true;
+ {ok, #file_info{type=directory}} -> true;
+ _ -> false
+ end.
+
+-spec is_dir((file:filename())) -> boolean().
+
+is_dir(Dir) -> is_dir_internal(read_file_info(Dir)).
+
+is_dir_no_handle(Dir) -> is_dir_internal(prim_file:read_file_info(Dir)).
+
+is_dir_internal({ok, #file_info{type=directory}}) -> true;
+is_dir_internal(_) -> false.
+
+-spec file_size((file:filename())) -> non_neg_integer().
+
+file_size(File) ->
+ case read_file_info(File) of
+ {ok, #file_info{size=Size}} -> Size;
+ _ -> 0
+ end.
+
+-spec ensure_dir((file:filename())) -> ok_or_error().
+
+ensure_dir(File) -> with_handle(fun () -> ensure_dir_internal(File) end).
+
+ensure_dir_internal("/") ->
+ ok;
+ensure_dir_internal(File) ->
+ Dir = filename:dirname(File),
+ case is_dir_no_handle(Dir) of
+ true -> ok;
+ false -> ensure_dir_internal(Dir),
+ prim_file:make_dir(Dir)
+ end.
+
+-spec wildcard(string(), file:filename()) -> [file:filename()].
+
+wildcard(Pattern, Dir) ->
+ case list_dir(Dir) of
+ {ok, Files} -> {ok, RE} = re:compile(Pattern, [anchored]),
+ [File || File <- Files,
+ match =:= re:run(File, RE, [{capture, none}])];
+ {error, _} -> []
+ end.
+
+-spec list_dir(file:filename()) ->
+ rabbit_types:ok_or_error2([file:filename()], any()).
+
+list_dir(Dir) -> with_handle(fun () -> prim_file:list_dir(Dir) end).
+
+read_file_info(File) ->
+ with_handle(fun () -> prim_file:read_file_info(File) end).
+
+-spec read_term_file
+ (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()).
+
+read_term_file(File) ->
+ try
+ {ok, Data} = with_handle(fun () -> prim_file:read_file(File) end),
+ {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)),
+ TokenGroups = group_tokens(Tokens),
+ {ok, [begin
+ {ok, Term} = erl_parse:parse_term(Tokens1),
+ Term
+ end || Tokens1 <- TokenGroups]}
+ catch
+ error:{badmatch, Error} -> Error
+ end.
+
+group_tokens(Ts) -> [lists:reverse(G) || G <- group_tokens([], Ts)].
+
+group_tokens([], []) -> [];
+group_tokens(Cur, []) -> [Cur];
+group_tokens(Cur, [T = {dot, _} | Ts]) -> [[T | Cur] | group_tokens([], Ts)];
+group_tokens(Cur, [T | Ts]) -> group_tokens([T | Cur], Ts).
+
+-spec write_term_file(file:filename(), [any()]) -> ok_or_error().
+
+write_term_file(File, Terms) ->
+ write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) ||
+ Term <- Terms])).
+
+-spec write_file(file:filename(), iodata()) -> ok_or_error().
+
+write_file(Path, Data) -> write_file(Path, Data, []).
+
+-spec write_file(file:filename(), iodata(), [any()]) -> ok_or_error().
+
+write_file(Path, Data, Modes) ->
+ Modes1 = [binary, write | (Modes -- [binary, write])],
+ case make_binary(Data) of
+ Bin when is_binary(Bin) -> write_file1(Path, Bin, Modes1);
+ {error, _} = E -> E
+ end.
+
+%% make_binary/1 is based on the corresponding function in the
+%% kernel/file.erl module of the Erlang R14B02 release, which is
+%% licensed under the EPL.
+
+make_binary(Bin) when is_binary(Bin) ->
+ Bin;
+make_binary(List) ->
+ try
+ iolist_to_binary(List)
+ catch error:Reason ->
+ {error, Reason}
+ end.
+
+write_file1(Path, Bin, Modes) ->
+ try
+ with_synced_copy(Path, Modes,
+ fun (Hdl) ->
+ ok = prim_file:write(Hdl, Bin)
+ end)
+ catch
+ error:{badmatch, Error} -> Error;
+ _:{error, Error} -> {error, Error}
+ end.
+
+with_synced_copy(Path, Modes, Fun) ->
+ case lists:member(append, Modes) of
+ true ->
+ {error, append_not_supported, Path};
+ false ->
+ with_handle(
+ fun () ->
+ Bak = Path ++ ?TMP_EXT,
+ case prim_file:open(Bak, Modes) of
+ {ok, Hdl} ->
+ try
+ Result = Fun(Hdl),
+ ok = prim_file:sync(Hdl),
+ ok = prim_file:rename(Bak, Path),
+ Result
+ after
+ prim_file:close(Hdl)
+ end;
+ {error, _} = E -> E
+ end
+ end)
+ end.
+
+%% TODO the semantics of this function are rather odd. But see bug 25021.
+
+-spec append_file(file:filename(), string()) -> ok_or_error().
+
+append_file(File, Suffix) ->
+ case read_file_info(File) of
+ {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix);
+ {error, enoent} -> append_file(File, 0, Suffix);
+ Error -> Error
+ end.
+
+append_file(_, _, "") ->
+ ok;
+append_file(File, 0, Suffix) ->
+ with_handle(fun () ->
+ case prim_file:open([File, Suffix], [append]) of
+ {ok, Fd} -> prim_file:close(Fd);
+ Error -> Error
+ end
+ end);
+append_file(File, _, Suffix) ->
+ case with_handle(2, fun () ->
+ file:copy(File, {[File, Suffix], [append]})
+ end) of
+ {ok, _BytesCopied} -> ok;
+ Error -> Error
+ end.
+
+-spec ensure_parent_dirs_exist(string()) -> 'ok'.
+
+ensure_parent_dirs_exist(Filename) ->
+ case ensure_dir(Filename) of
+ ok -> ok;
+ {error, Reason} ->
+ throw({error, {cannot_create_parent_dirs, Filename, Reason}})
+ end.
+
+-spec rename(file:filename(), file:filename()) -> ok_or_error().
+
+rename(Old, New) -> with_handle(fun () -> prim_file:rename(Old, New) end).
+
+-spec delete([file:filename()]) -> ok_or_error().
+
+delete(File) -> with_handle(fun () -> prim_file:delete(File) end).
+
+-spec recursive_delete([file:filename()]) ->
+ rabbit_types:ok_or_error({file:filename(), any()}).
+
+recursive_delete(Files) ->
+ with_handle(
+ fun () -> lists:foldl(fun (Path, ok) -> recursive_delete1(Path);
+ (_Path, {error, _Err} = Error) -> Error
+ end, ok, Files)
+ end).
+
+recursive_delete1(Path) ->
+ case is_dir_no_handle(Path) and not(is_symlink_no_handle(Path)) of
+ false -> case prim_file:delete(Path) of
+ ok -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ true -> case prim_file:list_dir(Path) of
+ {ok, FileNames} ->
+ case lists:foldl(
+ fun (FileName, ok) ->
+ recursive_delete1(
+ filename:join(Path, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames) of
+ ok ->
+ case prim_file:del_dir(Path) of
+ ok -> ok;
+ {error, Err} -> {error, {Path, Err}}
+ end;
+ {error, _Err} = Error ->
+ Error
+ end;
+ {error, Err} ->
+ {error, {Path, Err}}
+ end
+ end.
+
+is_symlink_no_handle(File) ->
+ case prim_file:read_link(File) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+-spec recursive_copy(file:filename(), file:filename()) ->
+ rabbit_types:ok_or_error({file:filename(), file:filename(), any()}).
+
+recursive_copy(Src, Dest) ->
+ %% Note that this uses the 'file' module and, hence, shouldn't be
+ %% run on many processes at once.
+ case is_dir(Src) of
+ false -> case file:copy(Src, Dest) of
+ {ok, _Bytes} -> ok;
+ {error, enoent} -> ok; %% Path doesn't exist anyway
+ {error, Err} -> {error, {Src, Dest, Err}}
+ end;
+ true -> case file:list_dir(Src) of
+ {ok, FileNames} ->
+ case file:make_dir(Dest) of
+ ok ->
+ lists:foldl(
+ fun (FileName, ok) ->
+ recursive_copy(
+ filename:join(Src, FileName),
+ filename:join(Dest, FileName));
+ (_FileName, Error) ->
+ Error
+ end, ok, FileNames);
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end;
+ {error, Err} ->
+ {error, {Src, Dest, Err}}
+ end
+ end.
+
+%% TODO: When we stop supporting Erlang prior to R14, this should be
+%% replaced with file:open [write, exclusive]
+
+-spec lock_file(file:filename()) -> rabbit_types:ok_or_error('eexist').
+
+lock_file(Path) ->
+ case is_file(Path) of
+ true -> {error, eexist};
+ false -> with_handle(
+ fun () -> {ok, Lock} = prim_file:open(Path, [write]),
+ ok = prim_file:close(Lock)
+ end)
+ end.
+
+-spec filename_as_a_directory(file:filename()) -> file:filename().
+
+filename_as_a_directory(FileName) ->
+ case lists:last(FileName) of
+ "/" ->
+ FileName;
+ _ ->
+ FileName ++ "/"
+ end.
diff --git a/deps/rabbit/src/rabbit_framing.erl b/deps/rabbit/src/rabbit_framing.erl
new file mode 100644
index 0000000000..42927b2b68
--- /dev/null
+++ b/deps/rabbit/src/rabbit_framing.erl
@@ -0,0 +1,36 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% TODO auto-generate
+
+-module(rabbit_framing).
+
+-export_type([protocol/0,
+ amqp_field_type/0, amqp_property_type/0,
+ amqp_table/0, amqp_array/0, amqp_value/0,
+ amqp_method_name/0, amqp_method/0, amqp_method_record/0,
+ amqp_method_field_name/0, amqp_property_record/0,
+ amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
+
+-type protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1'.
+
+-define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T |
+ rabbit_framing_amqp_0_9_1:T)).
+
+-?protocol_type(amqp_field_type()).
+-?protocol_type(amqp_property_type()).
+-?protocol_type(amqp_table()).
+-?protocol_type(amqp_array()).
+-?protocol_type(amqp_value()).
+-?protocol_type(amqp_method_name()).
+-?protocol_type(amqp_method()).
+-?protocol_type(amqp_method_record()).
+-?protocol_type(amqp_method_field_name()).
+-?protocol_type(amqp_property_record()).
+-?protocol_type(amqp_exception()).
+-?protocol_type(amqp_exception_code()).
+-?protocol_type(amqp_class_id()).
diff --git a/deps/rabbit/src/rabbit_guid.erl b/deps/rabbit/src/rabbit_guid.erl
new file mode 100644
index 0000000000..01e6464332
--- /dev/null
+++ b/deps/rabbit/src/rabbit_guid.erl
@@ -0,0 +1,181 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_guid).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([filename/0]).
+-export([gen/0, gen_secure/0, string/2, binary/2, to_string/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+-define(SERIAL_FILENAME, "rabbit_serial").
+
+-record(state, {serial}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([guid/0]).
+
+-type guid() :: binary().
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE,
+ [update_disk_serial()], []).
+
+%% We use this to detect a (possibly rather old) Mnesia directory,
+%% since it has existed since at least 1.7.0 (as far back as I cared
+%% to go).
+
+-spec filename() -> string().
+
+filename() ->
+ filename:join(rabbit_mnesia:dir(), ?SERIAL_FILENAME).
+
+update_disk_serial() ->
+ Filename = filename(),
+ Serial = case rabbit_file:read_term_file(Filename) of
+ {ok, [Num]} -> Num;
+ {ok, []} -> 0; %% [1]
+ {error, enoent} -> 0;
+ {error, Reason} ->
+ throw({error, {cannot_read_serial_file, Filename, Reason}})
+ end,
+ case rabbit_file:write_term_file(Filename, [Serial + 1]) of
+ ok -> ok;
+ {error, Reason1} ->
+ throw({error, {cannot_write_serial_file, Filename, Reason1}})
+ end,
+ Serial.
+%% [1] a couple of users have reported startup failures due to the
+%% file being empty, presumably as a result of filesystem
+%% corruption. While rabbit doesn't cope with that in general, in this
+%% specific case we can be more accommodating.
+
+%% Generate an un-hashed guid.
+fresh() ->
+ %% We don't use erlang:now() here because a) it may return
+ %% duplicates when the system clock has been rewound prior to a
+ %% restart, or ids were generated at a high rate (which causes
+ %% now() to move ahead of the system time), and b) it is really
+ %% slow since it takes a global lock and makes a system call.
+ %%
+ %% A persisted serial number, the node, and a unique reference
+ %% (per node incarnation) uniquely identifies a process in space
+ %% and time.
+ Serial = gen_server:call(?SERVER, serial, infinity),
+ {Serial, node(), make_ref()}.
+
+advance_blocks({B1, B2, B3, B4}, I) ->
+ %% To produce a new set of blocks, we create a new 32bit block
+ %% hashing {B5, I}. The new hash is used as last block, and the
+ %% other three blocks are XORed with it.
+ %%
+ %% Doing this is convenient because it avoids cascading conflicts,
+ %% while being very fast. The conflicts are avoided by propagating
+ %% the changes through all the blocks at each round by XORing, so
+ %% the only occasion in which a collision will take place is when
+ %% all 4 blocks are the same and the counter is the same.
+ %%
+ %% The range (2^32) is provided explicitly since phash uses 2^27
+ %% by default.
+ B5 = erlang:phash2({B1, I}, 4294967296),
+ {{(B2 bxor B5), (B3 bxor B5), (B4 bxor B5), B5}, I+1}.
+
+%% generate a GUID. This function should be used when performance is a
+%% priority and predictability is not an issue. Otherwise use
+%% gen_secure/0.
+
+-spec gen() -> guid().
+
+gen() ->
+ %% We hash a fresh GUID with md5, split it in 4 blocks, and each
+ %% time we need a new guid we rotate them producing a new hash
+ %% with the aid of the counter. Look at the comments in
+ %% advance_blocks/2 for details.
+ case get(guid) of
+ undefined -> <<B1:32, B2:32, B3:32, B4:32>> = Res =
+ erlang:md5(term_to_binary(fresh())),
+ put(guid, {{B1, B2, B3, B4}, 0}),
+ Res;
+ {BS, I} -> {{B1, B2, B3, B4}, _} = S = advance_blocks(BS, I),
+ put(guid, S),
+ <<B1:32, B2:32, B3:32, B4:32>>
+ end.
+
+%% generate a non-predictable GUID.
+%%
+%% The id is only unique within a single cluster and as long as the
+%% serial store hasn't been deleted.
+%%
+%% If you are not concerned with predictability, gen/0 is faster.
+
+-spec gen_secure() -> guid().
+
+gen_secure() ->
+ %% Here instead of hashing once we hash the GUID and the counter
+ %% each time, so that the GUID is not predictable.
+ G = case get(guid_secure) of
+ undefined -> {fresh(), 0};
+ {S, I} -> {S, I+1}
+ end,
+ put(guid_secure, G),
+ erlang:md5(term_to_binary(G)).
+
+%% generate a readable string representation of a GUID.
+%%
+%% employs base64url encoding, which is safer in more contexts than
+%% plain base64.
+
+-spec string(guid() | string(), any()) -> string().
+
+string(G, Prefix) when is_list(Prefix) ->
+ Prefix ++ "-" ++ rabbit_misc:base64url(G);
+string(G, Prefix) when is_binary(Prefix) ->
+ binary_to_list(Prefix) ++ "-" ++ rabbit_misc:base64url(G).
+
+-spec binary(guid() | string(), any()) -> binary().
+
+binary(G, Prefix) ->
+ list_to_binary(string(G, Prefix)).
+
+%% copied from https://stackoverflow.com/questions/1657204/erlang-uuid-generator
+to_string(<<TL:32, TM:16, THV:16, CSR:8, CSL:8, N:48>>) ->
+ lists:flatten(
+ io_lib:format("~8.16.0b-~4.16.0b-~4.16.0b-~2.16.0b~2.16.0b-~12.16.0b",
+ [TL, TM, THV, CSR, CSL, N])).
+
+%%----------------------------------------------------------------------------
+
+init([Serial]) ->
+ {ok, #state{serial = Serial}}.
+
+handle_call(serial, _From, State = #state{serial = Serial}) ->
+ {reply, Serial, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_health_check.erl b/deps/rabbit/src/rabbit_health_check.erl
new file mode 100644
index 0000000000..4674ca7d8e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_health_check.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_health_check).
+
+%% External API
+-export([node/1, node/2]).
+
+%% Internal API
+-export([local/0]).
+
+%%----------------------------------------------------------------------------
+%% External functions
+%%----------------------------------------------------------------------------
+
+-spec node(node(), timeout()) -> ok | {badrpc, term()} | {error_string, string()}.
+
+node(Node) ->
+ %% same default as in CLI
+ node(Node, 70000).
+node(Node, Timeout) ->
+ rabbit_misc:rpc_call(Node, rabbit_health_check, local, [], Timeout).
+
+-spec local() -> ok | {error_string, string()}.
+
+local() ->
+ rabbit_log:warning("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. "
+ "See https://www.rabbitmq.com/monitoring.html#health-checks for replacement options."),
+ run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+run_checks([]) ->
+ ok;
+run_checks([C|Cs]) ->
+ case node_health_check(C) of
+ ok ->
+ run_checks(Cs);
+ Error ->
+ Error
+ end.
+
+node_health_check(list_channels) ->
+ case rabbit_channel:info_local([pid]) of
+ L when is_list(L) ->
+ ok
+ end;
+
+node_health_check(list_queues) ->
+ health_check_queues(rabbit_vhost:list_names());
+
+node_health_check(rabbit_node_monitor) ->
+ case rabbit_node_monitor:partitions() of
+ [] ->
+ ok;
+ L when is_list(L), length(L) > 0 ->
+ ErrorMsg = io_lib:format("cluster partition in effect: ~p", [L]),
+ {error_string, ErrorMsg}
+ end;
+
+node_health_check(alarms) ->
+ case proplists:get_value(alarms, rabbit:status()) of
+ [] ->
+ ok;
+ Alarms ->
+ ErrorMsg = io_lib:format("resource alarm(s) in effect:~p", [Alarms]),
+ {error_string, ErrorMsg}
+ end.
+
+health_check_queues([]) ->
+ ok;
+health_check_queues([VHost|RestVHosts]) ->
+ case rabbit_amqqueue:info_local(VHost) of
+ L when is_list(L) ->
+ health_check_queues(RestVHosts)
+ end.
diff --git a/deps/rabbit/src/rabbit_lager.erl b/deps/rabbit/src/rabbit_lager.erl
new file mode 100644
index 0000000000..3cbc5e431d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_lager.erl
@@ -0,0 +1,723 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_lager).
+
+-include_lib("rabbit_common/include/rabbit_log.hrl").
+
+%% API
+-export([start_logger/0, log_locations/0, fold_sinks/2,
+ broker_is_started/0, set_log_level/1]).
+
+%% For test purposes
+-export([configure_lager/0]).
+
+-export_type([log_location/0]).
+
+-type log_location() :: string().
+
+start_logger() ->
+ ok = maybe_remove_logger_handler(),
+ ok = app_utils:stop_applications([lager, syslog]),
+ ok = ensure_lager_configured(),
+ ok = app_utils:start_applications([lager]),
+ fold_sinks(
+ fun
+ (_, [], Acc) ->
+ Acc;
+ (SinkName, _, Acc) ->
+ lager:log(SinkName, info, self(),
+ "Log file opened with Lager", []),
+ Acc
+ end, ok),
+ ensure_log_working().
+
+broker_is_started() ->
+ {ok, HwmCurrent} = application:get_env(lager, error_logger_hwm),
+ {ok, HwmOrig0} = application:get_env(lager, error_logger_hwm_original),
+ HwmOrig = case get_most_verbose_log_level() of
+ debug -> HwmOrig0 * 100;
+ _ -> HwmOrig0
+ end,
+ case HwmOrig =:= HwmCurrent of
+ false ->
+ ok = application:set_env(lager, error_logger_hwm, HwmOrig),
+ Handlers = gen_event:which_handlers(lager_event),
+ lists:foreach(fun(Handler) ->
+ lager:set_loghwm(Handler, HwmOrig)
+ end, Handlers),
+ ok;
+ _ ->
+ ok
+ end.
+
+set_log_level(Level) ->
+ IsValidLevel = lists:member(Level, lager_util:levels()),
+ set_log_level(IsValidLevel, Level).
+
+set_log_level(true, Level) ->
+ SinksAndHandlers = [{Sink, gen_event:which_handlers(Sink)} ||
+ Sink <- lager:list_all_sinks()],
+ DefaultHwm = application:get_env(lager, error_logger_hwm_original, 50),
+ Hwm = case Level of
+ debug -> DefaultHwm * 100;
+ _ -> DefaultHwm
+ end,
+ application:set_env(lager, error_logger_hwm, Hwm),
+ set_sink_log_level(SinksAndHandlers, Level, Hwm);
+set_log_level(_, Level) ->
+ {error, {invalid_log_level, Level}}.
+
+set_sink_log_level([], _Level, _Hwm) ->
+ ok;
+set_sink_log_level([{Sink, Handlers}|Rest], Level, Hwm) ->
+ set_sink_handler_log_level(Sink, Handlers, Level, Hwm),
+ set_sink_log_level(Rest, Level, Hwm).
+
+set_sink_handler_log_level(_Sink, [], _Level, _Hwm) ->
+ ok;
+set_sink_handler_log_level(Sink, [Handler|Rest], Level, Hwm)
+ when is_atom(Handler) andalso is_integer(Hwm) ->
+ lager:set_loghwm(Sink, Handler, undefined, Hwm),
+ ok = lager:set_loglevel(Sink, Handler, undefined, Level),
+ set_sink_handler_log_level(Sink, Rest, Level, Hwm);
+set_sink_handler_log_level(Sink, [{Handler, Id}|Rest], Level, Hwm) ->
+ lager:set_loghwm(Sink, Handler, Id, Hwm),
+ ok = lager:set_loglevel(Sink, Handler, Id, Level),
+ set_sink_handler_log_level(Sink, Rest, Level, Hwm);
+set_sink_handler_log_level(Sink, [_|Rest], Level, Hwm) ->
+ set_sink_handler_log_level(Sink, Rest, Level, Hwm).
+
+log_locations() ->
+ ensure_lager_configured(),
+ DefaultHandlers = application:get_env(lager, handlers, []),
+ Sinks = application:get_env(lager, extra_sinks, []),
+ ExtraHandlers = [proplists:get_value(handlers, Props, [])
+ || {_, Props} <- Sinks],
+ lists:sort(log_locations1([DefaultHandlers | ExtraHandlers], [])).
+
+log_locations1([Handlers | Rest], Locations) ->
+ Locations1 = log_locations2(Handlers, Locations),
+ log_locations1(Rest, Locations1);
+log_locations1([], Locations) ->
+ Locations.
+
+log_locations2([{lager_file_backend, Settings} | Rest], Locations) ->
+ FileName = lager_file_name1(Settings),
+ Locations1 = case lists:member(FileName, Locations) of
+ false -> [FileName | Locations];
+ true -> Locations
+ end,
+ log_locations2(Rest, Locations1);
+log_locations2([{lager_console_backend, _} | Rest], Locations) ->
+ Locations1 = case lists:member("<stdout>", Locations) of
+ false -> ["<stdout>" | Locations];
+ true -> Locations
+ end,
+ log_locations2(Rest, Locations1);
+log_locations2([_ | Rest], Locations) ->
+ log_locations2(Rest, Locations);
+log_locations2([], Locations) ->
+ Locations.
+
+fold_sinks(Fun, Acc) ->
+ Handlers = lager_config:global_get(handlers),
+ Sinks = dict:to_list(lists:foldl(
+ fun
+ ({{lager_file_backend, F}, _, S}, Dict) ->
+ dict:append(S, F, Dict);
+ ({_, _, S}, Dict) ->
+ case dict:is_key(S, Dict) of
+ true -> dict:store(S, [], Dict);
+ false -> Dict
+ end
+ end,
+ dict:new(), Handlers)),
+ fold_sinks(Sinks, Fun, Acc).
+
+fold_sinks([{SinkName, FileNames} | Rest], Fun, Acc) ->
+ Acc1 = Fun(SinkName, FileNames, Acc),
+ fold_sinks(Rest, Fun, Acc1);
+fold_sinks([], _, Acc) ->
+ Acc.
+
+ensure_log_working() ->
+ {ok, Handlers} = application:get_env(lager, handlers),
+ [ ensure_lager_handler_file_exist(Handler)
+ || Handler <- Handlers ],
+ Sinks = application:get_env(lager, extra_sinks, []),
+ ensure_extra_sinks_working(Sinks, list_expected_sinks()).
+
+ensure_extra_sinks_working(Sinks, [SinkName | Rest]) ->
+ case proplists:get_value(SinkName, Sinks) of
+ undefined -> throw({error, {cannot_log_to_file, unknown,
+ rabbit_log_lager_event_sink_undefined}});
+ Sink ->
+ SinkHandlers = proplists:get_value(handlers, Sink, []),
+ [ ensure_lager_handler_file_exist(Handler)
+ || Handler <- SinkHandlers ]
+ end,
+ ensure_extra_sinks_working(Sinks, Rest);
+ensure_extra_sinks_working(_Sinks, []) ->
+ ok.
+
+ensure_lager_handler_file_exist(Handler) ->
+ case lager_file_name(Handler) of
+ false -> ok;
+ FileName -> ensure_logfile_exist(FileName)
+ end.
+
+lager_file_name({lager_file_backend, Settings}) ->
+ lager_file_name1(Settings);
+lager_file_name(_) ->
+ false.
+
+lager_file_name1(Settings) when is_list(Settings) ->
+ {file, FileName} = proplists:lookup(file, Settings),
+ lager_util:expand_path(FileName);
+lager_file_name1({FileName, _}) -> lager_util:expand_path(FileName);
+lager_file_name1({FileName, _, _, _, _}) -> lager_util:expand_path(FileName);
+lager_file_name1(_) ->
+ throw({error, {cannot_log_to_file, unknown,
+ lager_file_backend_config_invalid}}).
+
+
+ensure_logfile_exist(FileName) ->
+ LogFile = lager_util:expand_path(FileName),
+ case rabbit_file:read_file_info(LogFile) of
+ {ok,_} -> ok;
+ {error, Err} -> throw({error, {cannot_log_to_file, LogFile, Err}})
+ end.
+
+ensure_lager_configured() ->
+ case lager_configured() of
+ false -> configure_lager();
+ true -> ok
+ end.
+
+%% Lager should have handlers and sinks
+%% Error logger forwarding to syslog should be disabled
+lager_configured() ->
+ Sinks = lager:list_all_sinks(),
+ ExpectedSinks = list_expected_sinks(),
+ application:get_env(lager, handlers) =/= undefined
+ andalso
+ lists:all(fun(S) -> lists:member(S, Sinks) end, ExpectedSinks)
+ andalso
+ application:get_env(syslog, syslog_error_logger) =/= undefined.
+
+configure_lager() ->
+ ok = app_utils:load_applications([lager]),
+ %% Turn off reformatting for error_logger messages
+ case application:get_env(lager, error_logger_redirect) of
+ undefined -> application:set_env(lager, error_logger_redirect, true);
+ _ -> ok
+ end,
+ case application:get_env(lager, error_logger_format_raw) of
+ undefined -> application:set_env(lager, error_logger_format_raw, true);
+ _ -> ok
+ end,
+ case application:get_env(lager, log_root) of
+ undefined ->
+ %% Setting env var to 'undefined' is different from not
+ %% setting it at all, and lager is sensitive to this
+ %% difference.
+ case application:get_env(rabbit, lager_log_root) of
+ {ok, Value} ->
+ ok = application:set_env(lager, log_root, Value);
+ _ ->
+ ok
+ end;
+ _ -> ok
+ end,
+ case application:get_env(lager, colored) of
+ undefined ->
+ UseColor = rabbit_prelaunch_early_logging:use_colored_logging(),
+ application:set_env(lager, colored, UseColor);
+ _ ->
+ ok
+ end,
+ %% Set rabbit.log config variable based on environment.
+ prepare_rabbit_log_config(),
+ %% Configure syslog library.
+ ok = configure_syslog_error_logger(),
+ %% At this point we should have rabbit.log application variable
+ %% configured to generate RabbitMQ log handlers.
+ GeneratedHandlers = generate_lager_handlers(),
+
+ %% If there are lager handlers configured,
+ %% both lager and generate RabbitMQ handlers are used.
+ %% This is because it's hard to decide clear preference rules.
+ %% RabbitMQ handlers can be set to [] to use only lager handlers.
+ Handlers = case application:get_env(lager, handlers, undefined) of
+ undefined -> GeneratedHandlers;
+ LagerHandlers ->
+ %% Remove handlers generated in previous starts
+ FormerRabbitHandlers = application:get_env(lager, rabbit_handlers, []),
+ GeneratedHandlers ++ remove_rabbit_handlers(LagerHandlers,
+ FormerRabbitHandlers)
+ end,
+
+ ok = application:set_env(lager, handlers, Handlers),
+ ok = application:set_env(lager, rabbit_handlers, GeneratedHandlers),
+
+ %% Setup extra sink/handlers. If they are not configured, redirect
+ %% messages to the default sink. To know the list of expected extra
+ %% sinks, we look at the 'lager_extra_sinks' compilation option.
+ LogConfig = application:get_env(rabbit, log, []),
+ LogLevels = application:get_env(rabbit, log_levels, []),
+ Categories = proplists:get_value(categories, LogConfig, []),
+ CategoriesConfig0 = case {Categories, LogLevels} of
+ {[], []} -> [];
+ {[], LogLevels} ->
+ io:format("Using deprecated config parameter 'log_levels'. "
+ "Please update your configuration file according to "
+ "https://rabbitmq.com/logging.html"),
+ lists:map(fun({Name, Level}) -> {Name, [{level, Level}]} end,
+ LogLevels);
+ {Categories, []} ->
+ Categories;
+ {Categories, _} ->
+ io:format("Using the deprecated config parameter 'rabbit.log_levels' together "
+ "with a new parameter for log categories."
+ " 'rabbit.log_levels' will be ignored. Please remove it from the config. More at "
+ "https://rabbitmq.com/logging.html"),
+ Categories
+ end,
+ LogLevelsFromContext = case rabbit_prelaunch:get_context() of
+ #{log_levels := LL} -> LL;
+ _ -> undefined
+ end,
+ Fun = fun
+ (global, _, CC) ->
+ CC;
+ (color, _, CC) ->
+ CC;
+ (CategoryS, LogLevel, CC) ->
+ Category = list_to_atom(CategoryS),
+ CCEntry = proplists:get_value(
+ Category, CC, []),
+ CCEntry1 = lists:ukeymerge(
+ 1,
+ [{level, LogLevel}],
+ lists:ukeysort(1, CCEntry)),
+ lists:keystore(
+ Category, 1, CC, {Category, CCEntry1})
+ end,
+ CategoriesConfig = case LogLevelsFromContext of
+ undefined ->
+ CategoriesConfig0;
+ _ ->
+ maps:fold(Fun,
+ CategoriesConfig0,
+ LogLevelsFromContext)
+ end,
+ SinkConfigs = lists:map(
+ fun({Name, Config}) ->
+ {rabbit_log:make_internal_sink_name(Name), Config}
+ end,
+ CategoriesConfig),
+ LagerSinks = application:get_env(lager, extra_sinks, []),
+ GeneratedSinks = generate_lager_sinks(
+ [error_logger_lager_event | list_expected_sinks()],
+ SinkConfigs),
+ Sinks = merge_lager_sink_handlers(LagerSinks, GeneratedSinks, []),
+ ok = application:set_env(lager, extra_sinks, Sinks),
+
+ case application:get_env(lager, error_logger_hwm) of
+ undefined ->
+ ok = application:set_env(lager, error_logger_hwm, 1000),
+ % NB: 50 is the default value in lager.app.src
+ ok = application:set_env(lager, error_logger_hwm_original, 50);
+ {ok, Val} when is_integer(Val) andalso Val < 1000 ->
+ ok = application:set_env(lager, error_logger_hwm, 1000),
+ ok = application:set_env(lager, error_logger_hwm_original, Val);
+ {ok, Val} when is_integer(Val) ->
+ ok = application:set_env(lager, error_logger_hwm_original, Val),
+ ok
+ end,
+ ok.
+
+configure_syslog_error_logger() ->
+ %% Disable error_logger forwarding to syslog if it's not configured
+ case application:get_env(syslog, syslog_error_logger) of
+ undefined ->
+ application:set_env(syslog, syslog_error_logger, false);
+ _ -> ok
+ end.
+
+remove_rabbit_handlers(Handlers, FormerHandlers) ->
+ lists:filter(fun(Handler) ->
+ not lists:member(Handler, FormerHandlers)
+ end,
+ Handlers).
+
+generate_lager_handlers() ->
+ LogConfig = application:get_env(rabbit, log, []),
+ LogHandlersConfig = lists:keydelete(categories, 1, LogConfig),
+ generate_lager_handlers(LogHandlersConfig).
+
+generate_lager_handlers(LogHandlersConfig) ->
+ lists:flatmap(
+ fun
+ ({file, HandlerConfig}) ->
+ case proplists:get_value(file, HandlerConfig, false) of
+ false -> [];
+ FileName when is_list(FileName) ->
+ Backend = lager_backend(file),
+ generate_handler(Backend, HandlerConfig)
+ end;
+ ({Other, HandlerConfig}) when
+ Other =:= console; Other =:= syslog; Other =:= exchange ->
+ case proplists:get_value(enabled, HandlerConfig, false) of
+ false -> [];
+ true ->
+ Backend = lager_backend(Other),
+ generate_handler(Backend,
+ lists:keydelete(enabled, 1, HandlerConfig))
+ end
+ end,
+ LogHandlersConfig).
+
+lager_backend(file) -> lager_file_backend;
+lager_backend(console) -> lager_console_backend;
+lager_backend(syslog) -> syslog_lager_backend;
+lager_backend(exchange) -> lager_exchange_backend.
+
+%% Syslog backend is using an old API for configuration and
+%% does not support proplists.
+generate_handler(syslog_lager_backend=Backend, HandlerConfig) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ Level = proplists:get_value(level, HandlerConfig, DefaultConfigVal),
+ ok = configure_handler_backend(Backend),
+ [{Backend,
+ [Level,
+ {},
+ {lager_default_formatter, syslog_formatter_config()}]}];
+generate_handler(Backend, HandlerConfig) ->
+ [{Backend,
+ lists:ukeymerge(1, lists:ukeysort(1, HandlerConfig),
+ lists:ukeysort(1, default_handler_config(Backend)))}].
+
+configure_handler_backend(syslog_lager_backend) ->
+ {ok, _} = application:ensure_all_started(syslog),
+ ok;
+configure_handler_backend(_Backend) ->
+ ok.
+
+default_handler_config(lager_console_backend) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ [{level, DefaultConfigVal},
+ {formatter_config, default_config_value({formatter_config, console})}];
+default_handler_config(lager_exchange_backend) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ [{level, DefaultConfigVal},
+ {formatter_config, default_config_value({formatter_config, exchange})}];
+default_handler_config(lager_file_backend) ->
+ %% The default log level is set to `debug` because the actual
+ %% filtering is made at the sink level. We want to accept all
+ %% messages here.
+ DefaultConfigVal = debug,
+ [{level, DefaultConfigVal},
+ {formatter_config, default_config_value({formatter_config, file})},
+ {date, ""},
+ {size, 0}].
+
+default_config_value(level) ->
+ LogConfig = application:get_env(rabbit, log, []),
+ FoldFun = fun
+ ({_, Cfg}, LL) when is_list(Cfg) ->
+ NewLL = proplists:get_value(level, Cfg, LL),
+ case LL of
+ undefined ->
+ NewLL;
+ _ ->
+ MoreVerbose = lager_util:level_to_num(NewLL) > lager_util:level_to_num(LL),
+ case MoreVerbose of
+ true -> NewLL;
+ false -> LL
+ end
+ end;
+ (_, LL) ->
+ LL
+ end,
+ FoundLL = lists:foldl(FoldFun, undefined, LogConfig),
+ case FoundLL of
+ undefined -> info;
+ _ -> FoundLL
+ end;
+default_config_value({formatter_config, console}) ->
+ EOL = case application:get_env(lager, colored) of
+ {ok, true} -> "\e[0m\r\n";
+ _ -> "\r\n"
+ end,
+ [date, " ", time, " ", color, "[", severity, "] ",
+ {pid, ""},
+ " ", message, EOL];
+default_config_value({formatter_config, _}) ->
+ [date, " ", time, " ", color, "[", severity, "] ",
+ {pid, ""},
+ " ", message, "\n"].
+
+syslog_formatter_config() ->
+ [color, "[", severity, "] ",
+ {pid, ""},
+ " ", message, "\n"].
+
+prepare_rabbit_log_config() ->
+ %% If RABBIT_LOGS is not set, we should ignore it.
+ DefaultFile = application:get_env(rabbit, lager_default_file, undefined),
+ %% If RABBIT_UPGRADE_LOGS is not set, we should ignore it.
+ UpgradeFile = application:get_env(rabbit, lager_upgrade_file, undefined),
+ case DefaultFile of
+ undefined -> ok;
+ false ->
+ set_env_default_log_disabled();
+ tty ->
+ set_env_default_log_console();
+ FileName when is_list(FileName) ->
+ case rabbit_prelaunch:get_context() of
+ %% The user explicitly sets $RABBITMQ_LOGS;
+ %% we should override a file location even
+ %% if it's set in rabbitmq.config
+ #{var_origins := #{main_log_file := environment}} ->
+ set_env_default_log_file(FileName, override);
+ _ ->
+ set_env_default_log_file(FileName, keep)
+ end
+ end,
+
+ %% Upgrade log file never overrides the value set in rabbitmq.config
+ case UpgradeFile of
+ %% No special env for upgrade logs - redirect to the default sink
+ undefined -> ok;
+ %% Redirect logs to default output.
+ DefaultFile -> ok;
+ UpgradeFileName when is_list(UpgradeFileName) ->
+ set_env_upgrade_log_file(UpgradeFileName)
+ end.
+
+set_env_default_log_disabled() ->
+ %% Disabling all the logs.
+ ok = application:set_env(rabbit, log, []).
+
+set_env_default_log_console() ->
+ LogConfig = application:get_env(rabbit, log, []),
+ ConsoleConfig = proplists:get_value(console, LogConfig, []),
+ LogConfigConsole =
+ lists:keystore(console, 1, LogConfig,
+ {console, lists:keystore(enabled, 1, ConsoleConfig,
+ {enabled, true})}),
+ %% Remove the file handler - disable logging to file
+ LogConfigConsoleNoFile = lists:keydelete(file, 1, LogConfigConsole),
+ ok = application:set_env(rabbit, log, LogConfigConsoleNoFile).
+
+set_env_default_log_file(FileName, Override) ->
+ LogConfig = application:get_env(rabbit, log, []),
+ FileConfig = proplists:get_value(file, LogConfig, []),
+ NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of
+ undefined ->
+ lists:keystore(file, 1, LogConfig,
+ {file, lists:keystore(file, 1, FileConfig,
+ {file, FileName})});
+ _ConfiguredFileName ->
+ case Override of
+ override ->
+ lists:keystore(
+ file, 1, LogConfig,
+ {file, lists:keystore(file, 1, FileConfig,
+ {file, FileName})});
+ keep ->
+ LogConfig
+ end
+ end,
+ ok = application:set_env(rabbit, log, NewLogConfig).
+
+set_env_upgrade_log_file(FileName) ->
+ LogConfig = application:get_env(rabbit, log, []),
+ SinksConfig = proplists:get_value(categories, LogConfig, []),
+ UpgradeSinkConfig = proplists:get_value(upgrade, SinksConfig, []),
+ FileConfig = proplists:get_value(file, SinksConfig, []),
+ NewLogConfig = case proplists:get_value(file, FileConfig, undefined) of
+ undefined ->
+ lists:keystore(
+ categories, 1, LogConfig,
+ {categories,
+ lists:keystore(
+ upgrade, 1, SinksConfig,
+ {upgrade,
+ lists:keystore(file, 1, UpgradeSinkConfig,
+ {file, FileName})})});
+ %% No cahnge. We don't want to override the configured value.
+ _File -> LogConfig
+ end,
+ ok = application:set_env(rabbit, log, NewLogConfig).
+
+generate_lager_sinks(SinkNames, SinkConfigs) ->
+ LogLevels = case rabbit_prelaunch:get_context() of
+ #{log_levels := LL} -> LL;
+ _ -> undefined
+ end,
+ DefaultLogLevel = case LogLevels of
+ #{global := LogLevel} ->
+ LogLevel;
+ _ ->
+ default_config_value(level)
+ end,
+ lists:map(fun(SinkName) ->
+ SinkConfig = proplists:get_value(SinkName, SinkConfigs, []),
+ SinkHandlers = case proplists:get_value(file, SinkConfig, false) of
+ %% If no file defined - forward everything to the default backend
+ false ->
+ ForwarderLevel = proplists:get_value(level,
+ SinkConfig,
+ DefaultLogLevel),
+ [{lager_forwarder_backend,
+ [lager_util:make_internal_sink_name(lager), ForwarderLevel]}];
+ %% If a file defined - add a file backend to handlers and remove all default file backends.
+ File ->
+ %% Use `debug` as a default handler to not override a handler level
+ Level = proplists:get_value(level, SinkConfig, DefaultLogLevel),
+ DefaultGeneratedHandlers = application:get_env(lager, rabbit_handlers, []),
+ SinkFileHandlers = case proplists:get_value(lager_file_backend, DefaultGeneratedHandlers, undefined) of
+ undefined ->
+ %% Create a new file handler.
+ %% `info` is a default level here.
+ FileLevel = proplists:get_value(level, SinkConfig, DefaultLogLevel),
+ generate_lager_handlers([{file, [{file, File}, {level, FileLevel}]}]);
+ FileHandler ->
+ %% Replace a filename in the handler
+ FileHandlerChanges = case handler_level_more_verbose(FileHandler, Level) of
+ true -> [{file, File}, {level, Level}];
+ false -> [{file, File}]
+ end,
+
+ [{lager_file_backend,
+ lists:ukeymerge(1, FileHandlerChanges,
+ lists:ukeysort(1, FileHandler))}]
+ end,
+ %% Remove all file handlers.
+ AllLagerHandlers = application:get_env(lager, handlers, []),
+ HandlersWithoutFile = lists:filter(
+ fun({lager_file_backend, _}) -> false;
+ ({_, _}) -> true
+ end,
+ AllLagerHandlers),
+ %% Set level for handlers which are more verbose.
+ %% We don't increase verbosity in sinks so it works like forwarder backend.
+ HandlersWithoutFileWithLevel = lists:map(fun({Name, Handler}) ->
+ case handler_level_more_verbose(Handler, Level) of
+ true -> {Name, lists:keystore(level, 1, Handler, {level, Level})};
+ false -> {Name, Handler}
+ end
+ end,
+ HandlersWithoutFile),
+
+ HandlersWithoutFileWithLevel ++ SinkFileHandlers
+ end,
+ {SinkName, [{handlers, SinkHandlers}, {rabbit_handlers, SinkHandlers}]}
+ end,
+ SinkNames).
+
+handler_level_more_verbose(Handler, Level) ->
+ HandlerLevel = proplists:get_value(level, Handler, default_config_value(level)),
+ lager_util:level_to_num(HandlerLevel) > lager_util:level_to_num(Level).
+
+merge_lager_sink_handlers([{Name, Sink} | RestSinks], GeneratedSinks, Agg) ->
+ %% rabbitmq/rabbitmq-server#2044.
+ %% We have to take into account that a sink's
+ %% handler backend may need additional configuration here.
+ %% {rabbit_log_federation_lager_event, [
+ %% {handlers, [
+ %% {lager_forwarder_backend, [lager_event,inherit]},
+ %% {syslog_lager_backend, [debug]}
+ %% ]},
+ %% {rabbit_handlers, [
+ %% {lager_forwarder_backend, [lager_event,inherit]}
+ %% ]}
+ %% ]}
+ case lists:keytake(Name, 1, GeneratedSinks) of
+ {value, {Name, GenSink}, RestGeneratedSinks} ->
+ Handlers = proplists:get_value(handlers, Sink, []),
+ GenHandlers = proplists:get_value(handlers, GenSink, []),
+ FormerRabbitHandlers = proplists:get_value(rabbit_handlers, Sink, []),
+
+ %% Remove handlers defined in previous starts
+ ConfiguredHandlers = remove_rabbit_handlers(Handlers, FormerRabbitHandlers),
+ NewHandlers = GenHandlers ++ ConfiguredHandlers,
+ ok = maybe_configure_handler_backends(NewHandlers),
+ MergedSink = lists:keystore(rabbit_handlers, 1,
+ lists:keystore(handlers, 1, Sink,
+ {handlers, NewHandlers}),
+ {rabbit_handlers, GenHandlers}),
+ merge_lager_sink_handlers(
+ RestSinks,
+ RestGeneratedSinks,
+ [{Name, MergedSink} | Agg]);
+ false ->
+ merge_lager_sink_handlers(
+ RestSinks,
+ GeneratedSinks,
+ [{Name, Sink} | Agg])
+ end;
+merge_lager_sink_handlers([], GeneratedSinks, Agg) -> GeneratedSinks ++ Agg.
+
+maybe_configure_handler_backends([]) ->
+ ok;
+maybe_configure_handler_backends([{Backend, _}|Backends]) ->
+ ok = configure_handler_backend(Backend),
+ maybe_configure_handler_backends(Backends).
+
+list_expected_sinks() ->
+ rabbit_prelaunch_early_logging:list_expected_sinks().
+
+maybe_remove_logger_handler() ->
+ M = logger,
+ F = remove_handler,
+ try
+ ok = erlang:apply(M, F, [default])
+ catch
+ error:undef ->
+ % OK since the logger module only exists in OTP 21.1 or later
+ ok;
+ error:{badmatch, {error, {not_found, default}}} ->
+ % OK - this error happens when running a CLI command
+ ok;
+ Err:Reason ->
+ error_logger:error_msg("calling ~p:~p failed: ~p:~p~n",
+ [M, F, Err, Reason])
+ end.
+
+get_most_verbose_log_level() ->
+ {ok, HandlersA} = application:get_env(lager, handlers),
+ {ok, ExtraSinks} = application:get_env(lager, extra_sinks),
+ HandlersB = lists:append(
+ [H || {_, Keys} <- ExtraSinks,
+ {handlers, H} <- Keys]),
+ get_most_verbose_log_level(HandlersA ++ HandlersB,
+ lager_util:level_to_num(none)).
+
+get_most_verbose_log_level([{_, Props} | Rest], MostVerbose) ->
+ LogLevel = proplists:get_value(level, Props, info),
+ LogLevelNum = lager_util:level_to_num(LogLevel),
+ case LogLevelNum > MostVerbose of
+ true ->
+ get_most_verbose_log_level(Rest, LogLevelNum);
+ false ->
+ get_most_verbose_log_level(Rest, MostVerbose)
+ end;
+get_most_verbose_log_level([], MostVerbose) ->
+ lager_util:num_to_level(MostVerbose).
diff --git a/deps/rabbit/src/rabbit_limiter.erl b/deps/rabbit/src/rabbit_limiter.erl
new file mode 100644
index 0000000000..d3803957d3
--- /dev/null
+++ b/deps/rabbit/src/rabbit_limiter.erl
@@ -0,0 +1,448 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% The purpose of the limiter is to stem the flow of messages from
+%% queues to channels, in order to act upon various protocol-level
+%% flow control mechanisms, specifically AMQP 0-9-1's basic.qos
+%% prefetch_count, our consumer prefetch extension, and AMQP 1.0's
+%% link (aka consumer) credit mechanism.
+%%
+%% Each channel has an associated limiter process, created with
+%% start_link/1, which it passes to queues on consumer creation with
+%% rabbit_amqqueue:basic_consume/10, and rabbit_amqqueue:basic_get/4.
+%% The latter isn't strictly necessary, since basic.get is not
+%% subject to limiting, but it means that whenever a queue knows about
+%% a channel, it also knows about its limiter, which is less fiddly.
+%%
+%% The limiter process holds state that is, in effect, shared between
+%% the channel and all queues from which the channel is
+%% consuming. Essentially all these queues are competing for access to
+%% a single, limited resource - the ability to deliver messages via
+%% the channel - and it is the job of the limiter process to mediate
+%% that access.
+%%
+%% The limiter process is separate from the channel process for two
+%% reasons: separation of concerns, and efficiency. Channels can get
+%% very busy, particularly if they are also dealing with publishes.
+%% With a separate limiter process all the aforementioned access
+%% mediation can take place without touching the channel.
+%%
+%% For efficiency, both the channel and the queues keep some local
+%% state, initialised from the limiter pid with new/1 and client/1,
+%% respectively. In particular this allows them to avoid any
+%% interaction with the limiter process when it is 'inactive', i.e. no
+%% protocol-level flow control is taking place.
+%%
+%% This optimisation does come at the cost of some complexity though:
+%% when a limiter becomes active, the channel needs to inform all its
+%% consumer queues of this change in status. It does this by invoking
+%% rabbit_amqqueue:activate_limit_all/2. Note that there is no inverse
+%% transition, i.e. once a queue has been told about an active
+%% limiter, it is not subsequently told when that limiter becomes
+%% inactive. In practice it is rare for that to happen, though we
+%% could optimise this case in the future.
+%%
+%% Consumer credit (for AMQP 1.0) and per-consumer prefetch (for AMQP
+%% 0-9-1) are treated as essentially the same thing, but with the
+%% exception that per-consumer prefetch gets an auto-topup when
+%% acknowledgments come in.
+%%
+%% The bookkeeping for this is local to queues, so it is not necessary
+%% to store information about it in the limiter process. But for
+%% abstraction we hide it from the queue behind the limiter API, and
+%% it therefore becomes part of the queue local state.
+%%
+%% The interactions with the limiter are as follows:
+%%
+%% 1. Channels tell the limiter about basic.qos prefetch counts -
+%% that's what the limit_prefetch/3, unlimit_prefetch/1,
+%% get_prefetch_limit/1 API functions are about. They also tell the
+%% limiter queue state (via the queue) about consumer credit
+%% changes and message acknowledgement - that's what credit/5 and
+%% ack_from_queue/3 are for.
+%%
+%% 2. Queues also tell the limiter queue state about the queue
+%% becoming empty (via drained/1) and consumers leaving (via
+%% forget_consumer/2).
+%%
+%% 3. Queues register with the limiter - this happens as part of
+%% activate/1.
+%%
+%% 4. The limiter process maintains an internal counter of 'messages
+%% sent but not yet acknowledged', called the 'volume'.
+%%
+%% 5. Queues ask the limiter for permission (with can_send/3) whenever
+%% they want to deliver a message to a channel. The limiter checks
+%% whether a) the volume has not yet reached the prefetch limit,
+%% and b) whether the consumer has enough credit. If so it
+%% increments the volume and tells the queue to proceed. Otherwise
+%% it marks the queue as requiring notification (see below) and
+%% tells the queue not to proceed.
+%%
+%% 6. A queue that has been told to proceed (by the return value of
+%% can_send/3) sends the message to the channel. Conversely, a
+%% queue that has been told not to proceed, will not attempt to
+%% deliver that message, or any future messages, to the
+%% channel. This is accomplished by can_send/3 capturing the
+%% outcome in the local state, where it can be accessed with
+%% is_suspended/1.
+%%
+%% 7. When a channel receives an ack it tells the limiter (via ack/2)
+%% how many messages were ack'ed. The limiter process decrements
+%% the volume and if it falls below the prefetch_count then it
+%% notifies (through rabbit_amqqueue:resume/2) all the queues
+%% requiring notification, i.e. all those that had a can_send/3
+%% request denied.
+%%
+%% 8. Upon receipt of such a notification, queues resume delivery to
+%% the channel, i.e. they will once again start asking limiter, as
+%% described in (5).
+%%
+%% 9. When a queue has no more consumers associated with a particular
+%% channel, it deactivates use of the limiter with deactivate/1,
+%% which alters the local state such that no further interactions
+%% with the limiter process take place until a subsequent
+%% activate/1.
+
+-module(rabbit_limiter).
+
+-include("rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/1]).
+%% channel API
+-export([new/1, limit_prefetch/3, unlimit_prefetch/1, is_active/1,
+ get_prefetch_limit/1, ack/2, pid/1]).
+%% queue API
+-export([client/1, activate/1, can_send/3, resume/1, deactivate/1,
+ is_suspended/1, is_consumer_blocked/2, credit/5, ack_from_queue/3,
+ drained/1, forget_consumer/2]).
+%% callbacks
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
+ handle_info/2, prioritise_call/4]).
+
+%%----------------------------------------------------------------------------
+
+-record(lstate, {pid, prefetch_limited}).
+-record(qstate, {pid, state, credits}).
+
+-type lstate() :: #lstate{pid :: pid(),
+ prefetch_limited :: boolean()}.
+-type qstate() :: #qstate{pid :: pid(),
+ state :: 'dormant' | 'active' | 'suspended'}.
+
+-type credit_mode() :: 'manual' | 'drain' | 'auto'.
+
+%%----------------------------------------------------------------------------
+
+-record(lim, {prefetch_count = 0,
+ ch_pid,
+ %% 'Notify' is a boolean that indicates whether a queue should be
+ %% notified of a change in the limit or volume that may allow it to
+ %% deliver more messages via the limiter's channel.
+ queues = maps:new(), % QPid -> {MonitorRef, Notify}
+ volume = 0}).
+
+%% mode is of type credit_mode()
+-record(credit, {credit = 0, mode}).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_types:proc_name()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(ProcName) -> gen_server2:start_link(?MODULE, [ProcName], []).
+
+-spec new(pid()) -> lstate().
+
+new(Pid) ->
+ %% this a 'call' to ensure that it is invoked at most once.
+ ok = gen_server:call(Pid, {new, self()}, infinity),
+ #lstate{pid = Pid, prefetch_limited = false}.
+
+-spec limit_prefetch(lstate(), non_neg_integer(), non_neg_integer()) ->
+ lstate().
+
+limit_prefetch(L, PrefetchCount, UnackedCount) when PrefetchCount > 0 ->
+ ok = gen_server:call(
+ L#lstate.pid,
+ {limit_prefetch, PrefetchCount, UnackedCount}, infinity),
+ L#lstate{prefetch_limited = true}.
+
+-spec unlimit_prefetch(lstate()) -> lstate().
+
+unlimit_prefetch(L) ->
+ ok = gen_server:call(L#lstate.pid, unlimit_prefetch, infinity),
+ L#lstate{prefetch_limited = false}.
+
+-spec is_active(lstate()) -> boolean().
+
+is_active(#lstate{prefetch_limited = Limited}) -> Limited.
+
+-spec get_prefetch_limit(lstate()) -> non_neg_integer().
+
+get_prefetch_limit(#lstate{prefetch_limited = false}) -> 0;
+get_prefetch_limit(L) ->
+ gen_server:call(L#lstate.pid, get_prefetch_limit, infinity).
+
+-spec ack(lstate(), non_neg_integer()) -> 'ok'.
+
+ack(#lstate{prefetch_limited = false}, _AckCount) -> ok;
+ack(L, AckCount) -> gen_server:cast(L#lstate.pid, {ack, AckCount}).
+
+-spec pid(lstate()) -> pid().
+
+pid(#lstate{pid = Pid}) -> Pid.
+
+-spec client(pid()) -> qstate().
+
+client(Pid) -> #qstate{pid = Pid, state = dormant, credits = gb_trees:empty()}.
+
+-spec activate(qstate()) -> qstate().
+
+activate(L = #qstate{state = dormant}) ->
+ ok = gen_server:cast(L#qstate.pid, {register, self()}),
+ L#qstate{state = active};
+activate(L) -> L.
+
+-spec can_send(qstate(), boolean(), rabbit_types:ctag()) ->
+ {'continue' | 'suspend', qstate()}.
+
+can_send(L = #qstate{pid = Pid, state = State, credits = Credits},
+ AckRequired, CTag) ->
+ case is_consumer_blocked(L, CTag) of
+ false -> case (State =/= active orelse
+ safe_call(Pid, {can_send, self(), AckRequired}, true)) of
+ true -> Credits1 = decrement_credit(CTag, Credits),
+ {continue, L#qstate{credits = Credits1}};
+ false -> {suspend, L#qstate{state = suspended}}
+ end;
+ true -> {suspend, L}
+ end.
+
+safe_call(Pid, Msg, ExitValue) ->
+ rabbit_misc:with_exit_handler(
+ fun () -> ExitValue end,
+ fun () -> gen_server2:call(Pid, Msg, infinity) end).
+
+-spec resume(qstate()) -> qstate().
+
+resume(L = #qstate{state = suspended}) ->
+ L#qstate{state = active};
+resume(L) -> L.
+
+-spec deactivate(qstate()) -> qstate().
+
+deactivate(L = #qstate{state = dormant}) -> L;
+deactivate(L) ->
+ ok = gen_server:cast(L#qstate.pid, {unregister, self()}),
+ L#qstate{state = dormant}.
+
+-spec is_suspended(qstate()) -> boolean().
+
+is_suspended(#qstate{state = suspended}) -> true;
+is_suspended(#qstate{}) -> false.
+
+-spec is_consumer_blocked(qstate(), rabbit_types:ctag()) -> boolean().
+
+is_consumer_blocked(#qstate{credits = Credits}, CTag) ->
+ case gb_trees:lookup(CTag, Credits) of
+ none -> false;
+ {value, #credit{credit = C}} when C > 0 -> false;
+ {value, #credit{}} -> true
+ end.
+
+-spec credit
+ (qstate(), rabbit_types:ctag(), non_neg_integer(), credit_mode(),
+ boolean()) ->
+ {boolean(), qstate()}.
+
+credit(Limiter = #qstate{credits = Credits}, CTag, Crd, Mode, IsEmpty) ->
+ {Res, Cr} =
+ case IsEmpty andalso Mode =:= drain of
+ true -> {true, #credit{credit = 0, mode = manual}};
+ false -> {false, #credit{credit = Crd, mode = Mode}}
+ end,
+ {Res, Limiter#qstate{credits = enter_credit(CTag, Cr, Credits)}}.
+
+-spec ack_from_queue(qstate(), rabbit_types:ctag(), non_neg_integer()) ->
+ {boolean(), qstate()}.
+
+ack_from_queue(Limiter = #qstate{credits = Credits}, CTag, Credit) ->
+ {Credits1, Unblocked} =
+ case gb_trees:lookup(CTag, Credits) of
+ {value, C = #credit{mode = auto, credit = C0}} ->
+ {update_credit(CTag, C#credit{credit = C0 + Credit}, Credits),
+ C0 =:= 0 andalso Credit =/= 0};
+ _ ->
+ {Credits, false}
+ end,
+ {Unblocked, Limiter#qstate{credits = Credits1}}.
+
+-spec drained(qstate()) ->
+ {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}.
+
+drained(Limiter = #qstate{credits = Credits}) ->
+ Drain = fun(C) -> C#credit{credit = 0, mode = manual} end,
+ {CTagCredits, Credits2} =
+ rabbit_misc:gb_trees_fold(
+ fun (CTag, C = #credit{credit = Crd, mode = drain}, {Acc, Creds0}) ->
+ {[{CTag, Crd} | Acc], update_credit(CTag, Drain(C), Creds0)};
+ (_CTag, #credit{credit = _Crd, mode = _Mode}, {Acc, Creds0}) ->
+ {Acc, Creds0}
+ end, {[], Credits}, Credits),
+ {CTagCredits, Limiter#qstate{credits = Credits2}}.
+
+-spec forget_consumer(qstate(), rabbit_types:ctag()) -> qstate().
+
+forget_consumer(Limiter = #qstate{credits = Credits}, CTag) ->
+ Limiter#qstate{credits = gb_trees:delete_any(CTag, Credits)}.
+
+%%----------------------------------------------------------------------------
+%% Queue-local code
+%%----------------------------------------------------------------------------
+
+%% We want to do all the AMQP 1.0-ish link level credit calculations
+%% in the queue (to do them elsewhere introduces a ton of
+%% races). However, it's a big chunk of code that is conceptually very
+%% linked to the limiter concept. So we get the queue to hold a bit of
+%% state for us (#qstate.credits), and maintain a fiction that the
+%% limiter is making the decisions...
+
+decrement_credit(CTag, Credits) ->
+ case gb_trees:lookup(CTag, Credits) of
+ {value, C = #credit{credit = Credit}} ->
+ update_credit(CTag, C#credit{credit = Credit - 1}, Credits);
+ none ->
+ Credits
+ end.
+
+enter_credit(CTag, C, Credits) ->
+ gb_trees:enter(CTag, ensure_credit_invariant(C), Credits).
+
+update_credit(CTag, C, Credits) ->
+ gb_trees:update(CTag, ensure_credit_invariant(C), Credits).
+
+ensure_credit_invariant(C = #credit{credit = 0, mode = drain}) ->
+ %% Using up all credit implies no need to send a 'drained' event
+ C#credit{mode = manual};
+ensure_credit_invariant(C) ->
+ C.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([ProcName]) -> ?store_proc_name(ProcName),
+ ?LG_PROCESS_TYPE(limiter),
+ {ok, #lim{}}.
+
+prioritise_call(get_prefetch_limit, _From, _Len, _State) -> 9;
+prioritise_call(_Msg, _From, _Len, _State) -> 0.
+
+handle_call({new, ChPid}, _From, State = #lim{ch_pid = undefined}) ->
+ {reply, ok, State#lim{ch_pid = ChPid}};
+
+handle_call({limit_prefetch, PrefetchCount, UnackedCount}, _From,
+ State = #lim{prefetch_count = 0}) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount,
+ volume = UnackedCount})};
+handle_call({limit_prefetch, PrefetchCount, _UnackedCount}, _From, State) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount})};
+
+handle_call(unlimit_prefetch, _From, State) ->
+ {reply, ok, maybe_notify(State, State#lim{prefetch_count = 0,
+ volume = 0})};
+
+handle_call(get_prefetch_limit, _From,
+ State = #lim{prefetch_count = PrefetchCount}) ->
+ {reply, PrefetchCount, State};
+
+handle_call({can_send, QPid, AckRequired}, _From,
+ State = #lim{volume = Volume}) ->
+ case prefetch_limit_reached(State) of
+ true -> {reply, false, limit_queue(QPid, State)};
+ false -> {reply, true, State#lim{volume = if AckRequired -> Volume + 1;
+ true -> Volume
+ end}}
+ end.
+
+handle_cast({ack, Count}, State = #lim{volume = Volume}) ->
+ NewVolume = if Volume == 0 -> 0;
+ true -> Volume - Count
+ end,
+ {noreply, maybe_notify(State, State#lim{volume = NewVolume})};
+
+handle_cast({register, QPid}, State) ->
+ {noreply, remember_queue(QPid, State)};
+
+handle_cast({unregister, QPid}, State) ->
+ {noreply, forget_queue(QPid, State)}.
+
+handle_info({'DOWN', _MonitorRef, _Type, QPid, _Info}, State) ->
+ {noreply, forget_queue(QPid, State)}.
+
+terminate(_, _) ->
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing
+%%----------------------------------------------------------------------------
+
+maybe_notify(OldState, NewState) ->
+ case prefetch_limit_reached(OldState) andalso
+ not prefetch_limit_reached(NewState) of
+ true -> notify_queues(NewState);
+ false -> NewState
+ end.
+
+prefetch_limit_reached(#lim{prefetch_count = Limit, volume = Volume}) ->
+ Limit =/= 0 andalso Volume >= Limit.
+
+remember_queue(QPid, State = #lim{queues = Queues}) ->
+ case maps:is_key(QPid, Queues) of
+ false -> MRef = erlang:monitor(process, QPid),
+ State#lim{queues = maps:put(QPid, {MRef, false}, Queues)};
+ true -> State
+ end.
+
+forget_queue(QPid, State = #lim{queues = Queues}) ->
+ case maps:find(QPid, Queues) of
+ {ok, {MRef, _}} -> true = erlang:demonitor(MRef),
+ State#lim{queues = maps:remove(QPid, Queues)};
+ error -> State
+ end.
+
+limit_queue(QPid, State = #lim{queues = Queues}) ->
+ UpdateFun = fun ({MRef, _}) -> {MRef, true} end,
+ State#lim{queues = maps:update_with(QPid, UpdateFun, Queues)}.
+
+notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) ->
+ {QList, NewQueues} =
+ maps:fold(fun (_QPid, {_, false}, Acc) -> Acc;
+ (QPid, {MRef, true}, {L, D}) ->
+ {[QPid | L], maps:put(QPid, {MRef, false}, D)}
+ end, {[], Queues}, Queues),
+ case length(QList) of
+ 0 -> ok;
+ 1 -> ok = rabbit_amqqueue:resume(hd(QList), ChPid); %% common case
+ L ->
+ %% We randomly vary the position of queues in the list,
+ %% thus ensuring that each queue has an equal chance of
+ %% being notified first.
+ {L1, L2} = lists:split(rand:uniform(L), QList),
+ [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3]
+ || L3 <- [L2, L1]],
+ ok
+ end,
+ State#lim{queues = NewQueues}.
diff --git a/deps/rabbit/src/rabbit_log_tail.erl b/deps/rabbit/src/rabbit_log_tail.erl
new file mode 100644
index 0000000000..c3faad07fc
--- /dev/null
+++ b/deps/rabbit/src/rabbit_log_tail.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log_tail).
+
+-export([tail_n_lines/2]).
+-export([init_tail_stream/4]).
+
+-define(GUESS_OFFSET, 200).
+
+init_tail_stream(Filename, Pid, Ref, Duration) ->
+ RPCProc = self(),
+ Reader = spawn(fun() ->
+ link(Pid),
+ case file:open(Filename, [read, binary]) of
+ {ok, File} ->
+ TimeLimit = case Duration of
+ infinity -> infinity;
+ _ -> erlang:system_time(second) + Duration
+ end,
+ {ok, _} = file:position(File, eof),
+ RPCProc ! {Ref, opened},
+ read_loop(File, Pid, Ref, TimeLimit);
+ {error, _} = Err ->
+ RPCProc ! {Ref, Err}
+ end
+ end),
+ receive
+ {Ref, opened} -> {ok, Ref};
+ {Ref, {error, Err}} -> {error, Err}
+ after 5000 ->
+ exit(Reader, timeout),
+ {error, timeout}
+ end.
+
+read_loop(File, Pid, Ref, TimeLimit) ->
+ case is_integer(TimeLimit) andalso erlang:system_time(second) > TimeLimit of
+ true -> Pid ! {Ref, <<>>, finished};
+ false ->
+ case file:read(File, ?GUESS_OFFSET) of
+ {ok, Data} ->
+ Pid ! {Ref, Data, confinue},
+ read_loop(File, Pid, Ref, TimeLimit);
+ eof ->
+ timer:sleep(1000),
+ read_loop(File, Pid, Ref, TimeLimit);
+ {error, _} = Err ->
+ Pid ! {Ref, Err, finished}
+ end
+ end.
+
+tail_n_lines(Filename, N) ->
+ case file:open(Filename, [read, binary]) of
+ {ok, File} ->
+ {ok, Eof} = file:position(File, eof),
+ %% Eof may move. Only read up to the current one.
+ Result = reverse_read_n_lines(N, N, File, Eof, Eof),
+ file:close(File),
+ Result;
+ {error, _} = Error -> Error
+ end.
+
+reverse_read_n_lines(N, OffsetN, File, Position, Eof) ->
+ GuessPosition = offset(Position, OffsetN),
+ case read_lines_from_position(File, GuessPosition, Eof) of
+ {ok, Lines} ->
+ NLines = length(Lines),
+ case {NLines >= N, GuessPosition == 0} of
+ %% Take only N lines if there is more
+ {true, _} -> lists:nthtail(NLines - N, Lines);
+ %% Safe to assume that NLines is less then N
+ {_, true} -> Lines;
+ %% Adjust position
+ _ ->
+ reverse_read_n_lines(N, N - NLines + 1, File, GuessPosition, Eof)
+ end;
+ {error, _} = Error -> Error
+ end.
+
+read_from_position(File, GuessPosition, Eof) ->
+ file:pread(File, GuessPosition, max(0, Eof - GuessPosition)).
+
+read_lines_from_position(File, GuessPosition, Eof) ->
+ case read_from_position(File, GuessPosition, Eof) of
+ {ok, Data} ->
+ Lines = binary:split(Data, <<"\n">>, [global, trim]),
+ case {GuessPosition, Lines} of
+ %% If position is 0 - there are no partial lines
+ {0, _} -> {ok, Lines};
+ %% Remove first line as it can be partial
+ {_, [_ | Rest]} -> {ok, Rest};
+ {_, []} -> {ok, []}
+ end;
+ {error, _} = Error -> Error
+ end.
+
+offset(Base, N) ->
+ max(0, Base - N * ?GUESS_OFFSET).
diff --git a/deps/rabbit/src/rabbit_looking_glass.erl b/deps/rabbit/src/rabbit_looking_glass.erl
new file mode 100644
index 0000000000..00b1b6d46b
--- /dev/null
+++ b/deps/rabbit/src/rabbit_looking_glass.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_looking_glass).
+
+-ignore_xref([{lg, trace, 4}]).
+-ignore_xref([{maps, from_list, 1}]).
+
+-export([boot/0]).
+-export([connections/0]).
+
+boot() ->
+ case os:getenv("RABBITMQ_TRACER") of
+ false ->
+ ok;
+ Value ->
+ Input = parse_value(Value),
+ rabbit_log:info(
+ "Enabling Looking Glass profiler, input value: ~p",
+ [Input]
+ ),
+ {ok, _} = application:ensure_all_started(looking_glass),
+ lg:trace(
+ Input,
+ lg_file_tracer,
+ "traces.lz4",
+ maps:from_list([
+ {mode, profile},
+ {process_dump, true},
+ {running, true},
+ {send, true}]
+ )
+ )
+ end.
+
+parse_value(Value) ->
+ [begin
+ [Mod, Fun] = string:tokens(C, ":"),
+ {callback, list_to_atom(Mod), list_to_atom(Fun)}
+ end || C <- string:tokens(Value, ",")].
+
+connections() ->
+ Pids = [Pid || {{conns_sup, _}, Pid} <- ets:tab2list(ranch_server)],
+ ['_', {scope, Pids}].
diff --git a/deps/rabbit/src/rabbit_maintenance.erl b/deps/rabbit/src/rabbit_maintenance.erl
new file mode 100644
index 0000000000..e5434dc888
--- /dev/null
+++ b/deps/rabbit/src/rabbit_maintenance.erl
@@ -0,0 +1,354 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_maintenance).
+
+-include("rabbit.hrl").
+
+-export([
+ is_enabled/0,
+ drain/0,
+ revive/0,
+ mark_as_being_drained/0,
+ unmark_as_being_drained/0,
+ is_being_drained_local_read/1,
+ is_being_drained_consistent_read/1,
+ status_local_read/1,
+ status_consistent_read/1,
+ filter_out_drained_nodes_local_read/1,
+ filter_out_drained_nodes_consistent_read/1,
+ suspend_all_client_listeners/0,
+ resume_all_client_listeners/0,
+ close_all_client_connections/0,
+ primary_replica_transfer_candidate_nodes/0,
+ random_primary_replica_transfer_candidate_node/1,
+ transfer_leadership_of_quorum_queues/1,
+ transfer_leadership_of_classic_mirrored_queues/1,
+ status_table_name/0,
+ status_table_definition/0
+]).
+
+-define(TABLE, rabbit_node_maintenance_states).
+-define(FEATURE_FLAG, maintenance_mode_status).
+-define(DEFAULT_STATUS, regular).
+-define(DRAINING_STATUS, draining).
+
+-type maintenance_status() :: ?DEFAULT_STATUS | ?DRAINING_STATUS.
+-type mnesia_table() :: atom().
+
+-export_type([
+ maintenance_status/0
+]).
+
+%%
+%% API
+%%
+
+-spec status_table_name() -> mnesia_table().
+status_table_name() ->
+ ?TABLE.
+
+-spec status_table_definition() -> list().
+status_table_definition() ->
+ maps:to_list(#{
+ record_name => node_maintenance_state,
+ attributes => record_info(fields, node_maintenance_state)
+ }).
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ rabbit_feature_flags:is_enabled(?FEATURE_FLAG).
+
+-spec drain() -> ok.
+drain() ->
+ case is_enabled() of
+ true -> do_drain();
+ false -> rabbit_log:warning("Feature flag `~s` is not enabled, draining is a no-op", [?FEATURE_FLAG])
+ end.
+
+-spec do_drain() -> ok.
+do_drain() ->
+ rabbit_log:alert("This node is being put into maintenance (drain) mode"),
+ mark_as_being_drained(),
+ rabbit_log:info("Marked this node as undergoing maintenance"),
+ suspend_all_client_listeners(),
+ rabbit_log:alert("Suspended all listeners and will no longer accept client connections"),
+ {ok, NConnections} = close_all_client_connections(),
+ %% allow plugins to react e.g. by closing their protocol connections
+ rabbit_event:notify(maintenance_connections_closed, #{
+ reason => <<"node is being put into maintenance">>
+ }),
+ rabbit_log:alert("Closed ~b local client connections", [NConnections]),
+
+ TransferCandidates = primary_replica_transfer_candidate_nodes(),
+ ReadableCandidates = readable_candidate_list(TransferCandidates),
+ rabbit_log:info("Node will transfer primary replicas of its queues to ~b peers: ~s",
+ [length(TransferCandidates), ReadableCandidates]),
+ transfer_leadership_of_classic_mirrored_queues(TransferCandidates),
+ transfer_leadership_of_quorum_queues(TransferCandidates),
+ stop_local_quorum_queue_followers(),
+
+ %% allow plugins to react
+ rabbit_event:notify(maintenance_draining, #{
+ reason => <<"node is being put into maintenance">>
+ }),
+ rabbit_log:alert("Node is ready to be shut down for maintenance or upgrade"),
+
+ ok.
+
+-spec revive() -> ok.
+revive() ->
+ case is_enabled() of
+ true -> do_revive();
+ false -> rabbit_log:warning("Feature flag `~s` is not enabled, reviving is a no-op", [?FEATURE_FLAG])
+ end.
+
+-spec do_revive() -> ok.
+do_revive() ->
+ rabbit_log:alert("This node is being revived from maintenance (drain) mode"),
+ revive_local_quorum_queue_replicas(),
+ rabbit_log:alert("Resumed all listeners and will accept client connections again"),
+ resume_all_client_listeners(),
+ rabbit_log:alert("Resumed all listeners and will accept client connections again"),
+ unmark_as_being_drained(),
+ rabbit_log:info("Marked this node as back from maintenance and ready to serve clients"),
+
+ %% allow plugins to react
+ rabbit_event:notify(maintenance_revived, #{}),
+
+ ok.
+
+-spec mark_as_being_drained() -> boolean().
+mark_as_being_drained() ->
+ rabbit_log:debug("Marking the node as undergoing maintenance"),
+ set_maintenance_status_status(?DRAINING_STATUS).
+
+-spec unmark_as_being_drained() -> boolean().
+unmark_as_being_drained() ->
+ rabbit_log:debug("Unmarking the node as undergoing maintenance"),
+ set_maintenance_status_status(?DEFAULT_STATUS).
+
+set_maintenance_status_status(Status) ->
+ Res = mnesia:transaction(fun () ->
+ case mnesia:wread({?TABLE, node()}) of
+ [] ->
+ Row = #node_maintenance_state{
+ node = node(),
+ status = Status
+ },
+ mnesia:write(?TABLE, Row, write);
+ [Row0] ->
+ Row = Row0#node_maintenance_state{
+ node = node(),
+ status = Status
+ },
+ mnesia:write(?TABLE, Row, write)
+ end
+ end),
+ case Res of
+ {atomic, ok} -> true;
+ _ -> false
+ end.
+
+
+-spec is_being_drained_local_read(node()) -> boolean().
+is_being_drained_local_read(Node) ->
+ Status = status_local_read(Node),
+ Status =:= ?DRAINING_STATUS.
+
+-spec is_being_drained_consistent_read(node()) -> boolean().
+is_being_drained_consistent_read(Node) ->
+ Status = status_consistent_read(Node),
+ Status =:= ?DRAINING_STATUS.
+
+-spec status_local_read(node()) -> maintenance_status().
+status_local_read(Node) ->
+ case catch mnesia:dirty_read(?TABLE, Node) of
+ [] -> ?DEFAULT_STATUS;
+ [#node_maintenance_state{node = Node, status = Status}] ->
+ Status;
+ _ -> ?DEFAULT_STATUS
+ end.
+
+-spec status_consistent_read(node()) -> maintenance_status().
+status_consistent_read(Node) ->
+ case mnesia:transaction(fun() -> mnesia:read(?TABLE, Node) end) of
+ {atomic, []} -> ?DEFAULT_STATUS;
+ {atomic, [#node_maintenance_state{node = Node, status = Status}]} ->
+ Status;
+ {atomic, _} -> ?DEFAULT_STATUS;
+ {aborted, _Reason} -> ?DEFAULT_STATUS
+ end.
+
+ -spec filter_out_drained_nodes_local_read([node()]) -> [node()].
+filter_out_drained_nodes_local_read(Nodes) ->
+ lists:filter(fun(N) -> not is_being_drained_local_read(N) end, Nodes).
+
+-spec filter_out_drained_nodes_consistent_read([node()]) -> [node()].
+filter_out_drained_nodes_consistent_read(Nodes) ->
+ lists:filter(fun(N) -> not is_being_drained_consistent_read(N) end, Nodes).
+
+-spec suspend_all_client_listeners() -> rabbit_types:ok_or_error(any()).
+ %% Pauses all listeners on the current node except for
+ %% Erlang distribution (clustering and CLI tools).
+ %% A respausedumed listener will not accept any new client connections
+ %% but previously established connections won't be interrupted.
+suspend_all_client_listeners() ->
+ Listeners = rabbit_networking:node_client_listeners(node()),
+ rabbit_log:info("Asked to suspend ~b client connection listeners. "
+ "No new client connections will be accepted until these listeners are resumed!", [length(Listeners)]),
+ Results = lists:foldl(local_listener_fold_fun(fun ranch:suspend_listener/1), [], Listeners),
+ lists:foldl(fun ok_or_first_error/2, ok, Results).
+
+ -spec resume_all_client_listeners() -> rabbit_types:ok_or_error(any()).
+ %% Resumes all listeners on the current node except for
+ %% Erlang distribution (clustering and CLI tools).
+ %% A resumed listener will accept new client connections.
+resume_all_client_listeners() ->
+ Listeners = rabbit_networking:node_client_listeners(node()),
+ rabbit_log:info("Asked to resume ~b client connection listeners. "
+ "New client connections will be accepted from now on", [length(Listeners)]),
+ Results = lists:foldl(local_listener_fold_fun(fun ranch:resume_listener/1), [], Listeners),
+ lists:foldl(fun ok_or_first_error/2, ok, Results).
+
+ -spec close_all_client_connections() -> {'ok', non_neg_integer()}.
+close_all_client_connections() ->
+ Pids = rabbit_networking:local_connections(),
+ rabbit_networking:close_connections(Pids, "Node was put into maintenance mode"),
+ {ok, length(Pids)}.
+
+-spec transfer_leadership_of_quorum_queues([node()]) -> ok.
+transfer_leadership_of_quorum_queues([]) ->
+ rabbit_log:warning("Skipping leadership transfer of quorum queues: no candidate "
+ "(online, not under maintenance) nodes to transfer to!");
+transfer_leadership_of_quorum_queues(_TransferCandidates) ->
+ %% we only transfer leadership for QQs that have local leaders
+ Queues = rabbit_amqqueue:list_local_leaders(),
+ rabbit_log:info("Will transfer leadership of ~b quorum queues with current leader on this node",
+ [length(Queues)]),
+ [begin
+ Name = amqqueue:get_name(Q),
+ rabbit_log:debug("Will trigger a leader election for local quorum queue ~s",
+ [rabbit_misc:rs(Name)]),
+ %% we trigger an election and exclude this node from the list of candidates
+ %% by simply shutting its local QQ replica (Ra server)
+ RaLeader = amqqueue:get_pid(Q),
+ rabbit_log:debug("Will stop Ra server ~p", [RaLeader]),
+ case ra:stop_server(RaLeader) of
+ ok ->
+ rabbit_log:debug("Successfully stopped Ra server ~p", [RaLeader]);
+ {error, nodedown} ->
+ rabbit_log:error("Failed to stop Ra server ~p: target node was reported as down")
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Leadership transfer for quorum queues hosted on this node has been initiated").
+
+-spec transfer_leadership_of_classic_mirrored_queues([node()]) -> ok.
+ transfer_leadership_of_classic_mirrored_queues([]) ->
+ rabbit_log:warning("Skipping leadership transfer of classic mirrored queues: no candidate "
+ "(online, not under maintenance) nodes to transfer to!");
+transfer_leadership_of_classic_mirrored_queues(TransferCandidates) ->
+ Queues = rabbit_amqqueue:list_local_mirrored_classic_queues(),
+ ReadableCandidates = readable_candidate_list(TransferCandidates),
+ rabbit_log:info("Will transfer leadership of ~b classic mirrored queues hosted on this node to these peer nodes: ~s",
+ [length(Queues), ReadableCandidates]),
+
+ [begin
+ Name = amqqueue:get_name(Q),
+ case random_primary_replica_transfer_candidate_node(TransferCandidates) of
+ {ok, Pick} ->
+ rabbit_log:debug("Will transfer leadership of local queue ~s to node ~s",
+ [rabbit_misc:rs(Name), Pick]),
+ case rabbit_mirror_queue_misc:transfer_leadership(Q, Pick) of
+ {migrated, _} ->
+ rabbit_log:debug("Successfully transferred leadership of queue ~s to node ~s",
+ [rabbit_misc:rs(Name), Pick]);
+ Other ->
+ rabbit_log:warning("Could not transfer leadership of queue ~s to node ~s: ~p",
+ [rabbit_misc:rs(Name), Pick, Other])
+ end;
+ undefined ->
+ rabbit_log:warning("Could not transfer leadership of queue ~s: no suitable candidates?",
+ [Name])
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Leadership transfer for local classic mirrored queues is complete").
+
+-spec stop_local_quorum_queue_followers() -> ok.
+stop_local_quorum_queue_followers() ->
+ Queues = rabbit_amqqueue:list_local_followers(),
+ rabbit_log:info("Will stop local follower replicas of ~b quorum queues on this node",
+ [length(Queues)]),
+ [begin
+ Name = amqqueue:get_name(Q),
+ rabbit_log:debug("Will stop a local follower replica of quorum queue ~s",
+ [rabbit_misc:rs(Name)]),
+ %% shut down Ra nodes so that they are not considered for leader election
+ {RegisteredName, _LeaderNode} = amqqueue:get_pid(Q),
+ RaNode = {RegisteredName, node()},
+ rabbit_log:debug("Will stop Ra server ~p", [RaNode]),
+ case ra:stop_server(RaNode) of
+ ok ->
+ rabbit_log:debug("Successfully stopped Ra server ~p", [RaNode]);
+ {error, nodedown} ->
+ rabbit_log:error("Failed to stop Ra server ~p: target node was reported as down")
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Stopped all local replicas of quorum queues hosted on this node").
+
+ -spec primary_replica_transfer_candidate_nodes() -> [node()].
+primary_replica_transfer_candidate_nodes() ->
+ filter_out_drained_nodes_consistent_read(rabbit_nodes:all_running() -- [node()]).
+
+-spec random_primary_replica_transfer_candidate_node([node()]) -> {ok, node()} | undefined.
+random_primary_replica_transfer_candidate_node([]) ->
+ undefined;
+random_primary_replica_transfer_candidate_node(Candidates) ->
+ Nth = erlang:phash2(erlang:monotonic_time(), length(Candidates)),
+ Candidate = lists:nth(Nth + 1, Candidates),
+ {ok, Candidate}.
+
+revive_local_quorum_queue_replicas() ->
+ Queues = rabbit_amqqueue:list_local_followers(),
+ [begin
+ Name = amqqueue:get_name(Q),
+ rabbit_log:debug("Will trigger a leader election for local quorum queue ~s",
+ [rabbit_misc:rs(Name)]),
+ %% start local QQ replica (Ra server) of this queue
+ {Prefix, _Node} = amqqueue:get_pid(Q),
+ RaServer = {Prefix, node()},
+ rabbit_log:debug("Will start Ra server ~p", [RaServer]),
+ case ra:restart_server(RaServer) of
+ ok ->
+ rabbit_log:debug("Successfully restarted Ra server ~p", [RaServer]);
+ {error, {already_started, _Pid}} ->
+ rabbit_log:debug("Ra server ~p is already running", [RaServer]);
+ {error, nodedown} ->
+ rabbit_log:error("Failed to restart Ra server ~p: target node was reported as down")
+ end
+ end || Q <- Queues],
+ rabbit_log:info("Restart of local quorum queue replicas is complete").
+
+%%
+%% Implementation
+%%
+
+local_listener_fold_fun(Fun) ->
+ fun(#listener{node = Node, ip_address = Addr, port = Port}, Acc) when Node =:= node() ->
+ RanchRef = rabbit_networking:ranch_ref(Addr, Port),
+ [Fun(RanchRef) | Acc];
+ (_, Acc) ->
+ Acc
+ end.
+
+ok_or_first_error(ok, Acc) ->
+ Acc;
+ok_or_first_error({error, _} = Err, _Acc) ->
+ Err.
+
+readable_candidate_list(Nodes) ->
+ string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", ").
diff --git a/deps/rabbit/src/rabbit_memory_monitor.erl b/deps/rabbit/src/rabbit_memory_monitor.erl
new file mode 100644
index 0000000000..5934a97cff
--- /dev/null
+++ b/deps/rabbit/src/rabbit_memory_monitor.erl
@@ -0,0 +1,259 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+%% This module handles the node-wide memory statistics.
+%% It receives statistics from all queues, counts the desired
+%% queue length (in seconds), and sends this information back to
+%% queues.
+
+-module(rabbit_memory_monitor).
+
+-behaviour(gen_server2).
+
+-export([start_link/0, register/2, deregister/1,
+ report_ram_duration/2, stop/0, conserve_resources/3, memory_use/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(process, {pid, reported, sent, callback, monitor}).
+
+-record(state, {timer, %% 'internal_update' timer
+ queue_durations, %% ets #process
+ queue_duration_sum, %% sum of all queue_durations
+ queue_duration_count, %% number of elements in sum
+ desired_duration, %% the desired queue duration
+ disk_alarm %% disable paging, disk alarm has fired
+ }).
+
+-define(SERVER, ?MODULE).
+-define(TABLE_NAME, ?MODULE).
+
+%% If all queues are pushed to disk (duration 0), then the sum of
+%% their reported lengths will be 0. If memory then becomes available,
+%% unless we manually intervene, the sum will remain 0, and the queues
+%% will never get a non-zero duration. Thus when the mem use is <
+%% SUM_INC_THRESHOLD, increase the sum artificially by SUM_INC_AMOUNT.
+-define(SUM_INC_THRESHOLD, 0.95).
+-define(SUM_INC_AMOUNT, 1.0).
+
+-define(EPSILON, 0.000001). %% less than this and we clamp to 0
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+-spec register(pid(), {atom(),atom(),[any()]}) -> 'ok'.
+
+register(Pid, MFA = {_M, _F, _A}) ->
+ gen_server2:call(?SERVER, {register, Pid, MFA}, infinity).
+
+-spec deregister(pid()) -> 'ok'.
+
+deregister(Pid) ->
+ gen_server2:cast(?SERVER, {deregister, Pid}).
+
+-spec report_ram_duration
+ (pid(), float() | 'infinity') -> number() | 'infinity'.
+
+report_ram_duration(Pid, QueueDuration) ->
+ gen_server2:call(?SERVER,
+ {report_ram_duration, Pid, QueueDuration}, infinity).
+
+-spec stop() -> 'ok'.
+
+stop() ->
+ gen_server2:cast(?SERVER, stop).
+
+%% Paging should be enabled/disabled only in response to disk resource alarms
+%% for the current node.
+conserve_resources(Pid, disk, {_, Conserve, Node}) when node(Pid) =:= Node ->
+ gen_server2:cast(Pid, {disk_alarm, Conserve});
+conserve_resources(_Pid, _Source, _Conserve) ->
+ ok.
+
+memory_use(Type) ->
+ vm_memory_monitor:get_memory_use(Type).
+
+%%----------------------------------------------------------------------------
+%% Gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, Interval} = application:get_env(rabbit, memory_monitor_interval),
+ {ok, TRef} = timer:send_interval(Interval, update),
+
+ Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]),
+ Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ {ok, internal_update(
+ #state { timer = TRef,
+ queue_durations = Ets,
+ queue_duration_sum = 0.0,
+ queue_duration_count = 0,
+ desired_duration = infinity,
+ disk_alarm = lists:member(disk, Alarms)})}.
+
+handle_call({report_ram_duration, Pid, QueueDuration}, From,
+ State = #state { queue_duration_sum = Sum,
+ queue_duration_count = Count,
+ queue_durations = Durations,
+ desired_duration = SendDuration }) ->
+
+ [Proc = #process { reported = PrevQueueDuration }] =
+ ets:lookup(Durations, Pid),
+
+ gen_server2:reply(From, SendDuration),
+
+ {Sum1, Count1} =
+ case {PrevQueueDuration, QueueDuration} of
+ {infinity, infinity} -> {Sum, Count};
+ {infinity, _} -> {Sum + QueueDuration, Count + 1};
+ {_, infinity} -> {Sum - PrevQueueDuration, Count - 1};
+ {_, _} -> {Sum - PrevQueueDuration + QueueDuration,
+ Count}
+ end,
+ true = ets:insert(Durations, Proc #process { reported = QueueDuration,
+ sent = SendDuration }),
+ {noreply, State #state { queue_duration_sum = zero_clamp(Sum1),
+ queue_duration_count = Count1 }};
+
+handle_call({register, Pid, MFA}, _From,
+ State = #state { queue_durations = Durations }) ->
+ MRef = erlang:monitor(process, Pid),
+ true = ets:insert(Durations, #process { pid = Pid, reported = infinity,
+ sent = infinity, callback = MFA,
+ monitor = MRef }),
+ {reply, ok, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({disk_alarm, Alarm}, State = #state{disk_alarm = Alarm}) ->
+ {noreply, State};
+
+handle_cast({disk_alarm, Alarm}, State) ->
+ {noreply, internal_update(State#state{disk_alarm = Alarm})};
+
+handle_cast({deregister, Pid}, State) ->
+ {noreply, internal_deregister(Pid, true, State)};
+
+handle_cast(stop, State) ->
+ {stop, normal, State};
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ {noreply, internal_update(State)};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
+ {noreply, internal_deregister(Pid, false, State)};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state { timer = TRef }) ->
+ timer:cancel(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+
+zero_clamp(Sum) when Sum < ?EPSILON -> 0.0;
+zero_clamp(Sum) -> Sum.
+
+internal_deregister(Pid, Demonitor,
+ State = #state { queue_duration_sum = Sum,
+ queue_duration_count = Count,
+ queue_durations = Durations }) ->
+ case ets:lookup(Durations, Pid) of
+ [] -> State;
+ [#process { reported = PrevQueueDuration, monitor = MRef }] ->
+ true = case Demonitor of
+ true -> erlang:demonitor(MRef);
+ false -> true
+ end,
+ {Sum1, Count1} =
+ case PrevQueueDuration of
+ infinity -> {Sum, Count};
+ _ -> {zero_clamp(Sum - PrevQueueDuration),
+ Count - 1}
+ end,
+ true = ets:delete(Durations, Pid),
+ State #state { queue_duration_sum = Sum1,
+ queue_duration_count = Count1 }
+ end.
+
+internal_update(State = #state{queue_durations = Durations,
+ desired_duration = DesiredDurationAvg,
+ disk_alarm = DiskAlarm}) ->
+ DesiredDurationAvg1 = desired_duration_average(State),
+ ShouldInform = should_inform_predicate(DiskAlarm),
+ case ShouldInform(DesiredDurationAvg, DesiredDurationAvg1) of
+ true -> inform_queues(ShouldInform, DesiredDurationAvg1, Durations);
+ false -> ok
+ end,
+ State#state{desired_duration = DesiredDurationAvg1}.
+
+desired_duration_average(#state{disk_alarm = true}) ->
+ infinity;
+desired_duration_average(#state{disk_alarm = false,
+ queue_duration_sum = Sum,
+ queue_duration_count = Count}) ->
+ {ok, LimitThreshold} =
+ application:get_env(rabbit, vm_memory_high_watermark_paging_ratio),
+ MemoryRatio = memory_use(ratio),
+ if MemoryRatio =:= infinity ->
+ 0.0;
+ MemoryRatio < LimitThreshold orelse Count == 0 ->
+ infinity;
+ MemoryRatio < ?SUM_INC_THRESHOLD ->
+ ((Sum + ?SUM_INC_AMOUNT) / Count) / MemoryRatio;
+ true ->
+ (Sum / Count) / MemoryRatio
+ end.
+
+inform_queues(ShouldInform, DesiredDurationAvg, Durations) ->
+ true =
+ ets:foldl(
+ fun (Proc = #process{reported = QueueDuration,
+ sent = PrevSendDuration,
+ callback = {M, F, A}}, true) ->
+ case ShouldInform(PrevSendDuration, DesiredDurationAvg)
+ andalso ShouldInform(QueueDuration, DesiredDurationAvg) of
+ true -> ok = erlang:apply(
+ M, F, A ++ [DesiredDurationAvg]),
+ ets:insert(
+ Durations,
+ Proc#process{sent = DesiredDurationAvg});
+ false -> true
+ end
+ end, true, Durations).
+
+%% In normal use, we only inform queues immediately if the desired
+%% duration has decreased, we want to ensure timely paging.
+should_inform_predicate(false) -> fun greater_than/2;
+%% When the disk alarm has gone off though, we want to inform queues
+%% immediately if the desired duration has *increased* - we want to
+%% ensure timely stopping paging.
+should_inform_predicate(true) -> fun (D1, D2) -> greater_than(D2, D1) end.
+
+greater_than(infinity, infinity) -> false;
+greater_than(infinity, _D2) -> true;
+greater_than(_D1, infinity) -> false;
+greater_than(D1, D2) -> D1 > D2.
diff --git a/deps/rabbit/src/rabbit_metrics.erl b/deps/rabbit/src/rabbit_metrics.erl
new file mode 100644
index 0000000000..10418e3884
--- /dev/null
+++ b/deps/rabbit/src/rabbit_metrics.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_metrics).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+%% Starts the raw metrics storage and owns the ETS tables.
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+ rabbit_core_metrics:init(),
+ {ok, none}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl b/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl
new file mode 100644
index 0000000000..91a7c3ddc8
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_coordinator.erl
@@ -0,0 +1,460 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_coordinator).
+
+-export([start_link/4, get_gm/1, ensure_monitoring/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1]).
+
+-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
+
+-behaviour(gen_server2).
+-behaviour(gm).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+-include("gm_specs.hrl").
+
+-record(state, { q,
+ gm,
+ monitors,
+ death_fun,
+ depth_fun
+ }).
+
+%%----------------------------------------------------------------------------
+%%
+%% Mirror Queues
+%%
+%% A queue with mirrors consists of the following:
+%%
+%% #amqqueue{ pid, slave_pids }
+%% | |
+%% +----------+ +-------+--------------+-----------...etc...
+%% | | |
+%% V V V
+%% amqqueue_process---+ mirror-----+ mirror-----+ ...etc...
+%% | BQ = master----+ | | BQ = vq | | BQ = vq |
+%% | | BQ = vq | | +-+-------+ +-+-------+
+%% | +-+-------+ | | |
+%% +-++-----|---------+ | | (some details elided)
+%% || | | |
+%% || coordinator-+ | |
+%% || +-+---------+ | |
+%% || | | |
+%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc...
+%% || +--+ +--+ +--+
+%% ||
+%% consumers
+%%
+%% The master is merely an implementation of bq, and thus is invoked
+%% through the normal bq interface by the amqqueue_process. The mirrors
+%% meanwhile are processes in their own right (as is the
+%% coordinator). The coordinator and all mirrors belong to the same gm
+%% group. Every member of a gm group receives messages sent to the gm
+%% group. Because the master is the bq of amqqueue_process, it doesn't
+%% have sole control over its mailbox, and as a result, the master
+%% itself cannot be passed messages directly (well, it could by via
+%% the amqqueue:run_backing_queue callback but that would induce
+%% additional unnecessary loading on the master queue process), yet it
+%% needs to react to gm events, such as the death of mirrors. Thus the
+%% master creates the coordinator, and it is the coordinator that is
+%% the gm callback module and event handler for the master.
+%%
+%% Consumers are only attached to the master. Thus the master is
+%% responsible for informing all mirrors when messages are fetched from
+%% the bq, when they're acked, and when they're requeued.
+%%
+%% The basic goal is to ensure that all mirrors performs actions on
+%% their bqs in the same order as the master. Thus the master
+%% intercepts all events going to its bq, and suitably broadcasts
+%% these events on the gm. The mirrors thus receive two streams of
+%% events: one stream is via the gm, and one stream is from channels
+%% directly. Whilst the stream via gm is guaranteed to be consistently
+%% seen by all mirrors , the same is not true of the stream via
+%% channels. For example, in the event of an unexpected death of a
+%% channel during a publish, only some of the mirrors may receive that
+%% publish. As a result of this problem, the messages broadcast over
+%% the gm contain published content, and thus mirrors can operate
+%% successfully on messages that they only receive via the gm.
+%%
+%% The key purpose of also sending messages directly from the channels
+%% to the mirrors is that without this, in the event of the death of
+%% the master, messages could be lost until a suitable mirror is
+%% promoted. However, that is not the only reason. A mirror cannot send
+%% confirms for a message until it has seen it from the
+%% channel. Otherwise, it might send a confirm to a channel for a
+%% message that it might *never* receive from that channel. This can
+%% happen because new mirrors join the gm ring (and thus receive
+%% messages from the master) before inserting themselves in the
+%% queue's mnesia record (which is what channels look at for routing).
+%% As it turns out, channels will simply ignore such bogus confirms,
+%% but relying on that would introduce a dangerously tight coupling.
+%%
+%% Hence the mirrors have to wait until they've seen both the publish
+%% via gm, and the publish via the channel before they issue the
+%% confirm. Either form of publish can arrive first, and a mirror can
+%% be upgraded to the master at any point during this
+%% process. Confirms continue to be issued correctly, however.
+%%
+%% Because the mirror is a full process, it impersonates parts of the
+%% amqqueue API. However, it does not need to implement all parts: for
+%% example, no ack or consumer-related message can arrive directly at
+%% a mirror from a channel: it is only publishes that pass both
+%% directly to the mirrors and go via gm.
+%%
+%% Slaves can be added dynamically. When this occurs, there is no
+%% attempt made to sync the current contents of the master with the
+%% new mirror, thus the mirror will start empty, regardless of the state
+%% of the master. Thus the mirror needs to be able to detect and ignore
+%% operations which are for messages it has not received: because of
+%% the strict FIFO nature of queues in general, this is
+%% straightforward - all new publishes that the new mirror receives via
+%% gm should be processed as normal, but fetches which are for
+%% messages the mirror has never seen should be ignored. Similarly,
+%% acks for messages the mirror never fetched should be
+%% ignored. Similarly, we don't republish rejected messages that we
+%% haven't seen. Eventually, as the master is consumed from, the
+%% messages at the head of the queue which were there before the slave
+%% joined will disappear, and the mirror will become fully synced with
+%% the state of the master.
+%%
+%% The detection of the sync-status is based on the depth of the BQs,
+%% where the depth is defined as the sum of the length of the BQ (as
+%% per BQ:len) and the messages pending an acknowledgement. When the
+%% depth of the mirror is equal to the master's, then the mirror is
+%% synchronised. We only store the difference between the two for
+%% simplicity. Comparing the length is not enough since we need to
+%% take into account rejected messages which will make it back into
+%% the master queue but can't go back in the mirror, since we don't
+%% want "holes" in the mirror queue. Note that the depth, and the
+%% length likewise, must always be shorter on the mirror - we assert
+%% that in various places. In case mirrors are joined to an empty queue
+%% which only goes on to receive publishes, they start by asking the
+%% master to broadcast its depth. This is enough for mirrors to always
+%% be able to work out when their head does not differ from the master
+%% (and is much simpler and cheaper than getting the master to hang on
+%% to the guid of the msg at the head of its queue). When a mirror is
+%% promoted to a master, it unilaterally broadcasts its depth, in
+%% order to solve the problem of depth requests from new mirrors being
+%% unanswered by a dead master.
+%%
+%% Obviously, due to the async nature of communication across gm, the
+%% mirrors can fall behind. This does not matter from a sync pov: if
+%% they fall behind and the master dies then a) no publishes are lost
+%% because all publishes go to all mirrors anyway; b) the worst that
+%% happens is that acks get lost and so messages come back to
+%% life. This is no worse than normal given you never get confirmation
+%% that an ack has been received (not quite true with QoS-prefetch,
+%% but close enough for jazz).
+%%
+%% Because acktags are issued by the bq independently, and because
+%% there is no requirement for the master and all mirrors to use the
+%% same bq, all references to msgs going over gm is by msg_id. Thus
+%% upon acking, the master must convert the acktags back to msg_ids
+%% (which happens to be what bq:ack returns), then sends the msg_ids
+%% over gm, the mirrors must convert the msg_ids to acktags (a mapping
+%% the mirrors themselves must maintain).
+%%
+%% When the master dies, a mirror gets promoted. This will be the
+%% eldest mirror, and thus the hope is that that mirror is most likely
+%% to be sync'd with the master. The design of gm is that the
+%% notification of the death of the master will only appear once all
+%% messages in-flight from the master have been fully delivered to all
+%% members of the gm group. Thus at this point, the mirror that gets
+%% promoted cannot broadcast different events in a different order
+%% than the master for the same msgs: there is no possibility for the
+%% same msg to be processed by the old master and the new master - if
+%% it was processed by the old master then it will have been processed
+%% by the mirror before the mirror was promoted, and vice versa.
+%%
+%% Upon promotion, all msgs pending acks are requeued as normal, the
+%% mirror constructs state suitable for use in the master module, and
+%% then dynamically changes into an amqqueue_process with the master
+%% as the bq, and the slave's bq as the master's bq. Thus the very
+%% same process that was the mirror is now a full amqqueue_process.
+%%
+%% It is important that we avoid memory leaks due to the death of
+%% senders (i.e. channels) and partial publications. A sender
+%% publishing a message may fail mid way through the publish and thus
+%% only some of the mirrors will receive the message. We need the
+%% mirrors to be able to detect this and tidy up as necessary to avoid
+%% leaks. If we just had the master monitoring all senders then we
+%% would have the possibility that a sender appears and only sends the
+%% message to a few of the mirrors before dying. Those mirrors would
+%% then hold on to the message, assuming they'll receive some
+%% instruction eventually from the master. Thus we have both mirrors
+%% and the master monitor all senders they become aware of. But there
+%% is a race: if the mirror receives a DOWN of a sender, how does it
+%% know whether or not the master is going to send it instructions
+%% regarding those messages?
+%%
+%% Whilst the master monitors senders, it can't access its mailbox
+%% directly, so it delegates monitoring to the coordinator. When the
+%% coordinator receives a DOWN message from a sender, it informs the
+%% master via a callback. This allows the master to do any tidying
+%% necessary, but more importantly allows the master to broadcast a
+%% sender_death message to all the mirrors , saying the sender has
+%% died. Once the mirrors receive the sender_death message, they know
+%% that they're not going to receive any more instructions from the gm
+%% regarding that sender. However, it is possible that the coordinator
+%% receives the DOWN and communicates that to the master before the
+%% master has finished receiving and processing publishes from the
+%% sender. This turns out not to be a problem: the sender has actually
+%% died, and so will not need to receive confirms or other feedback,
+%% and should further messages be "received" from the sender, the
+%% master will ask the coordinator to set up a new monitor, and
+%% will continue to process the messages normally. Slaves may thus
+%% receive publishes via gm from previously declared "dead" senders,
+%% but again, this is fine: should the mirror have just thrown out the
+%% message it had received directly from the sender (due to receiving
+%% a sender_death message via gm), it will be able to cope with the
+%% publication purely from the master via gm.
+%%
+%% When a mirror receives a DOWN message for a sender, if it has not
+%% received the sender_death message from the master via gm already,
+%% then it will wait 20 seconds before broadcasting a request for
+%% confirmation from the master that the sender really has died.
+%% Should a sender have only sent a publish to mirrors , this allows
+%% mirrors to inform the master of the previous existence of the
+%% sender. The master will thus monitor the sender, receive the DOWN,
+%% and subsequently broadcast the sender_death message, allowing the
+%% mirrors to tidy up. This process can repeat for the same sender:
+%% consider one mirror receives the publication, then the DOWN, then
+%% asks for confirmation of death, then the master broadcasts the
+%% sender_death message. Only then does another mirror receive the
+%% publication and thus set up its monitoring. Eventually that slave
+%% too will receive the DOWN, ask for confirmation and the master will
+%% monitor the sender again, receive another DOWN, and send out
+%% another sender_death message. Given the 20 second delay before
+%% requesting death confirmation, this is highly unlikely, but it is a
+%% possibility.
+%%
+%% When the 20 second timer expires, the mirror first checks to see
+%% whether it still needs confirmation of the death before requesting
+%% it. This prevents unnecessary traffic on gm as it allows one
+%% broadcast of the sender_death message to satisfy many mirrors.
+%%
+%% If we consider the promotion of a mirror at this point, we have two
+%% possibilities: that of the mirror that has received the DOWN and is
+%% thus waiting for confirmation from the master that the sender
+%% really is down; and that of the mirror that has not received the
+%% DOWN. In the first case, in the act of promotion to master, the new
+%% master will monitor again the dead sender, and after it has
+%% finished promoting itself, it should find another DOWN waiting,
+%% which it will then broadcast. This will allow mirrors to tidy up as
+%% normal. In the second case, we have the possibility that
+%% confirmation-of-sender-death request has been broadcast, but that
+%% it was broadcast before the master failed, and that the mirror being
+%% promoted does not know anything about that sender, and so will not
+%% monitor it on promotion. Thus a mirror that broadcasts such a
+%% request, at the point of broadcasting it, recurses, setting another
+%% 20 second timer. As before, on expiry of the timer, the mirrors
+%% checks to see whether it still has not received a sender_death
+%% message for the dead sender, and if not, broadcasts a death
+%% confirmation request. Thus this ensures that even when a master
+%% dies and the new mirror has no knowledge of the dead sender, it will
+%% eventually receive a death confirmation request, shall monitor the
+%% dead sender, receive the DOWN and broadcast the sender_death
+%% message.
+%%
+%% The preceding commentary deals with the possibility of mirrors
+%% receiving publications from senders which the master does not, and
+%% the need to prevent memory leaks in such scenarios. The inverse is
+%% also possible: a partial publication may cause only the master to
+%% receive a publication. It will then publish the message via gm. The
+%% mirrors will receive it via gm, will publish it to their BQ and will
+%% set up monitoring on the sender. They will then receive the DOWN
+%% message and the master will eventually publish the corresponding
+%% sender_death message. The mirror will then be able to tidy up its
+%% state as normal.
+%%
+%% Recovery of mirrored queues is straightforward: as nodes die, the
+%% remaining nodes record this, and eventually a situation is reached
+%% in which only one node is alive, which is the master. This is the
+%% only node which, upon recovery, will resurrect a mirrored queue:
+%% nodes which die and then rejoin as a mirror will start off empty as
+%% if they have no mirrored content at all. This is not surprising: to
+%% achieve anything more sophisticated would require the master and
+%% recovering mirror to be able to check to see whether they agree on
+%% the last seen state of the queue: checking depth alone is not
+%% sufficient in this case.
+%%
+%% For more documentation see the comments in bug 23554.
+%%
+%%----------------------------------------------------------------------------
+
+-spec start_link
+ (amqqueue:amqqueue(), pid() | 'undefined',
+ rabbit_mirror_queue_master:death_fun(),
+ rabbit_mirror_queue_master:depth_fun()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Queue, GM, DeathFun, DepthFun) ->
+ gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, DepthFun], []).
+
+-spec get_gm(pid()) -> pid().
+
+get_gm(CPid) ->
+ gen_server2:call(CPid, get_gm, infinity).
+
+-spec ensure_monitoring(pid(), [pid()]) -> 'ok'.
+
+ensure_monitoring(CPid, Pids) ->
+ gen_server2:cast(CPid, {ensure_monitoring, Pids}).
+
+%% ---------------------------------------------------------------------------
+%% gen_server
+%% ---------------------------------------------------------------------------
+
+init([Q, GM, DeathFun, DepthFun]) when ?is_amqqueue(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ ?store_proc_name(QueueName),
+ GM1 = case GM of
+ undefined ->
+ {ok, GM2} = gm:start_link(
+ QueueName, ?MODULE, [self()],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ receive {joined, GM2, _Members} ->
+ ok
+ end,
+ GM2;
+ _ ->
+ true = link(GM),
+ GM
+ end,
+ {ok, #state { q = Q,
+ gm = GM1,
+ monitors = pmon:new(),
+ death_fun = DeathFun,
+ depth_fun = DepthFun },
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(get_gm, _From, State = #state { gm = GM }) ->
+ reply(GM, State).
+
+handle_cast({gm_deaths, DeadGMPids}, State = #state{q = Q}) when ?amqqueue_pid_runs_on_local_node(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ MPid = amqqueue:get_pid(Q),
+ case rabbit_mirror_queue_misc:remove_from_queue(
+ QueueName, MPid, DeadGMPids) of
+ {ok, MPid, DeadPids, ExtraNodes} ->
+ rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
+ DeadPids),
+ rabbit_mirror_queue_misc:add_mirrors(QueueName, ExtraNodes, async),
+ noreply(State);
+ {ok, _MPid0, DeadPids, _ExtraNodes} ->
+ %% see rabbitmq-server#914;
+ %% Different mirror is now master, stop current coordinator normally.
+ %% Initiating queue is now mirror and the least we could do is report
+ %% deaths which we 'think' we saw.
+ %% NOTE: Reported deaths here, could be inconsistent.
+ rabbit_mirror_queue_misc:report_deaths(MPid, false, QueueName,
+ DeadPids),
+ {stop, shutdown, State};
+ {error, not_found} ->
+ {stop, normal, State};
+ {error, {not_synced, _}} ->
+ rabbit_log:error("Mirror queue ~p in unexpected state."
+ " Promoted to master but already a master.",
+ [QueueName]),
+ error(unexpected_mirrored_state)
+ end;
+
+handle_cast(request_depth, State = #state{depth_fun = DepthFun, q = QArg}) when ?is_amqqueue(QArg) ->
+ QName = amqqueue:get_name(QArg),
+ MPid = amqqueue:get_pid(QArg),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, QFound} when ?amqqueue_pid_equals(QFound, MPid) ->
+ ok = DepthFun(),
+ noreply(State);
+ _ ->
+ {stop, shutdown, State}
+ end;
+
+handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) ->
+ noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) });
+
+handle_cast({delete_and_terminate, {shutdown, ring_shutdown}}, State) ->
+ {stop, normal, State};
+handle_cast({delete_and_terminate, Reason}, State) ->
+ {stop, Reason, State}.
+
+handle_info({'DOWN', _MonitorRef, process, Pid, _Reason},
+ State = #state { monitors = Mons,
+ death_fun = DeathFun }) ->
+ noreply(case pmon:is_monitored(Pid, Mons) of
+ false -> State;
+ true -> ok = DeathFun(Pid),
+ State #state { monitors = pmon:erase(Pid, Mons) }
+ end);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, #state{}) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_pre_hibernate(State = #state { gm = GM }) ->
+ %% Since GM notifications of deaths are lazy we might not get a
+ %% timely notification of mirror death if policy changes when
+ %% everything is idle. So cause some activity just before we
+ %% sleep. This won't cause us to go into perpetual motion as the
+ %% heartbeat does not wake up coordinator or mirrors.
+ gm:broadcast(GM, hibernate_heartbeat),
+ {hibernate, State}.
+
+%% ---------------------------------------------------------------------------
+%% GM
+%% ---------------------------------------------------------------------------
+
+joined([CPid], Members) ->
+ CPid ! {joined, self(), Members},
+ ok.
+
+members_changed([_CPid], _Births, []) ->
+ ok;
+members_changed([CPid], _Births, Deaths) ->
+ ok = gen_server2:cast(CPid, {gm_deaths, Deaths}).
+
+handle_msg([CPid], _From, request_depth = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
+handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
+ ok = gen_server2:cast(CPid, Msg);
+handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) ->
+ %% We tell GM to stop, but we don't instruct the coordinator to
+ %% stop yet. The GM will first make sure all pending messages were
+ %% actually delivered. Then it calls handle_terminate/2 below so the
+ %% coordinator is stopped.
+ %%
+ %% If we stop the coordinator right now, remote mirrors could see the
+ %% coordinator DOWN before delete_and_terminate was delivered to all
+ %% GMs. One of those GM would be promoted as the master, and this GM
+ %% would hang forever, waiting for other GMs to stop.
+ {stop, {shutdown, ring_shutdown}};
+handle_msg([_CPid], _From, _Msg) ->
+ ok.
+
+handle_terminate([CPid], Reason) ->
+ ok = gen_server2:cast(CPid, {delete_and_terminate, Reason}),
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Others
+%% ---------------------------------------------------------------------------
+
+noreply(State) ->
+ {noreply, State, hibernate}.
+
+reply(Reply, State) ->
+ {reply, Reply, State, hibernate}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_master.erl b/deps/rabbit/src/rabbit_mirror_queue_master.erl
new file mode 100644
index 0000000000..71146e1ce2
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_master.erl
@@ -0,0 +1,578 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_master).
+
+-export([init/3, terminate/2, delete_and_terminate/2,
+ purge/1, purge_acks/1, publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
+ len/1, is_empty/1, depth/1, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
+ needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1,
+ msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, handle_info/2]).
+
+-export([start/2, stop/1, delete_crashed/1]).
+
+-export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]).
+
+-export([init_with_existing_bq/3, stop_mirroring/1, sync_mirrors/3]).
+
+-behaviour(rabbit_backing_queue).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-record(state, { name,
+ gm,
+ coordinator,
+ backing_queue,
+ backing_queue_state,
+ seen_status,
+ confirmed,
+ known_senders,
+ wait_timeout
+ }).
+
+-export_type([death_fun/0, depth_fun/0, stats_fun/0]).
+
+-type death_fun() :: fun ((pid()) -> 'ok').
+-type depth_fun() :: fun (() -> 'ok').
+-type stats_fun() :: fun ((any()) -> 'ok').
+-type master_state() :: #state { name :: rabbit_amqqueue:name(),
+ gm :: pid(),
+ coordinator :: pid(),
+ backing_queue :: atom(),
+ backing_queue_state :: any(),
+ seen_status :: map(),
+ confirmed :: [rabbit_guid:guid()],
+ known_senders :: sets:set()
+ }.
+
+%% For general documentation of HA design, see
+%% rabbit_mirror_queue_coordinator
+
+%% ---------------------------------------------------------------------------
+%% Backing queue
+%% ---------------------------------------------------------------------------
+
+-spec start(_, _) -> no_return().
+start(_Vhost, _DurableQueues) ->
+ %% This will never get called as this module will never be
+ %% installed as the default BQ implementation.
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+-spec stop(_) -> no_return().
+stop(_Vhost) ->
+ %% Same as start/1.
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+-spec delete_crashed(_) -> no_return().
+delete_crashed(_QName) ->
+ exit({not_valid_for_generic_backing_queue, ?MODULE}).
+
+init(Q, Recover, AsyncCallback) ->
+ {ok, BQ} = application:get_env(backing_queue_module),
+ BQS = BQ:init(Q, Recover, AsyncCallback),
+ State = #state{gm = GM} = init_with_existing_bq(Q, BQ, BQS),
+ ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
+ State.
+
+-spec init_with_existing_bq(amqqueue:amqqueue(), atom(), any()) ->
+ master_state().
+
+init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ case rabbit_mirror_queue_coordinator:start_link(
+ Q0, undefined, sender_death_fun(), depth_fun()) of
+ {ok, CPid} ->
+ GM = rabbit_mirror_queue_coordinator:get_gm(CPid),
+ Self = self(),
+ Fun = fun () ->
+ [Q1] = mnesia:read({rabbit_queue, QName}),
+ true = amqqueue:is_amqqueue(Q1),
+ GMPids0 = amqqueue:get_gm_pids(Q1),
+ GMPids1 = [{GM, Self} | GMPids0],
+ Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
+ Q3 = amqqueue:set_state(Q2, live),
+ %% amqqueue migration:
+ %% The amqqueue was read from this transaction, no
+ %% need to handle migration.
+ ok = rabbit_amqqueue:store_queue(Q3)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(Fun),
+ {_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
+ %% We need synchronous add here (i.e. do not return until the
+ %% mirror is running) so that when queue declaration is finished
+ %% all mirrors are up; we don't want to end up with unsynced mirrors
+ %% just by declaring a new queue. But add can't be synchronous all
+ %% the time as it can be called by mirrors and that's
+ %% deadlock-prone.
+ rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync),
+ #state{name = QName,
+ gm = GM,
+ coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = #{},
+ confirmed = [],
+ known_senders = sets:new(),
+ wait_timeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000)};
+ {error, Reason} ->
+ %% The GM can shutdown before the coordinator has started up
+ %% (lost membership or missing group), thus the start_link of
+ %% the coordinator returns {error, shutdown} as rabbit_amqqueue_process
+ % is trapping exists
+ throw({coordinator_not_started, Reason})
+ end.
+
+-spec stop_mirroring(master_state()) -> {atom(), any()}.
+
+stop_mirroring(State = #state { coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ unlink(CPid),
+ stop_all_slaves(shutdown, State),
+ {BQ, BQS}.
+
+-spec sync_mirrors(stats_fun(), stats_fun(), master_state()) ->
+ {'ok', master_state()} | {stop, any(), master_state()}.
+
+sync_mirrors(HandleInfo, EmitStats,
+ State = #state { name = QName,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Log = fun (Fmt, Params) ->
+ rabbit_mirror_queue_misc:log_info(
+ QName, "Synchronising: " ++ Fmt ++ "~n", Params)
+ end,
+ Log("~p messages to synchronise", [BQ:len(BQS)]),
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ SPids = amqqueue:get_slave_pids(Q),
+ SyncBatchSize = rabbit_mirror_queue_misc:sync_batch_size(Q),
+ Log("batch size: ~p", [SyncBatchSize]),
+ Ref = make_ref(),
+ Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, QName, Log, SPids),
+ gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}),
+ S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end,
+ case rabbit_mirror_queue_sync:master_go(
+ Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, BQ, BQS) of
+ {cancelled, BQS1} -> Log(" synchronisation cancelled ", []),
+ {ok, S(BQS1)};
+ {shutdown, R, BQS1} -> {stop, R, S(BQS1)};
+ {sync_died, R, BQS1} -> Log("~p", [R]),
+ {ok, S(BQS1)};
+ {already_synced, BQS1} -> {ok, S(BQS1)};
+ {ok, BQS1} -> Log("complete", []),
+ {ok, S(BQS1)}
+ end.
+
+terminate({shutdown, dropped} = Reason,
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ %% Backing queue termination - this node has been explicitly
+ %% dropped. Normally, non-durable queues would be tidied up on
+ %% startup, but there's a possibility that we will be added back
+ %% in without this node being restarted. Thus we must do the full
+ %% blown delete_and_terminate now, but only locally: we do not
+ %% broadcast delete_and_terminate.
+ State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)};
+
+terminate(Reason,
+ State = #state { name = QName,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ %% Backing queue termination. The queue is going down but
+ %% shouldn't be deleted. Most likely safe shutdown of this
+ %% node.
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ SSPids = amqqueue:get_sync_slave_pids(Q),
+ case SSPids =:= [] andalso
+ rabbit_policy:get(<<"ha-promote-on-shutdown">>, Q) =/= <<"always">> of
+ true -> %% Remove the whole queue to avoid data loss
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Stopping all nodes on master shutdown since no "
+ "synchronised mirror (replica) is available~n", []),
+ stop_all_slaves(Reason, State);
+ false -> %% Just let some other mirror take over.
+ ok
+ end,
+ State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
+
+delete_and_terminate(Reason, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ stop_all_slaves(Reason, State),
+ State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
+
+stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ SPids = amqqueue:get_slave_pids(Q),
+ rabbit_mirror_queue_misc:stop_all_slaves(Reason, SPids, QName, GM, WT).
+
+purge(State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, 0, BQ:len(BQS), false}),
+ {Count, BQS1} = BQ:purge(BQS),
+ {Count, State #state { backing_queue_state = BQS1 }}.
+
+-spec purge_acks(_) -> no_return().
+purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}).
+
+publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {publish, ChPid, Flow, MsgProps, Msg},
+ rabbit_basic:msg_size(Msg)),
+ BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS),
+ ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+
+batch_publish(Publishes, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Publishes1, false, MsgSizes} =
+ lists:foldl(fun ({Msg = #basic_message { id = MsgId },
+ MsgProps, _IsDelivered}, {Pubs, false, Sizes}) ->
+ {[{Msg, MsgProps, true} | Pubs], %% [0]
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ Sizes + rabbit_basic:msg_size(Msg)}
+ end, {[], false, 0}, Publishes),
+ Publishes2 = lists:reverse(Publishes1),
+ ok = gm:broadcast(GM, {batch_publish, ChPid, Flow, Publishes2},
+ MsgSizes),
+ BQS1 = BQ:batch_publish(Publishes2, ChPid, Flow, BQS),
+ ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+%% [0] When the mirror process handles the publish command, it sets the
+%% IsDelivered flag to true, so to avoid iterating over the messages
+%% again at the mirror, we do it here.
+
+publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
+ ChPid, Flow, State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {publish_delivered, ChPid, Flow, MsgProps, Msg},
+ rabbit_basic:msg_size(Msg)),
+ {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {AckTag, ensure_monitoring(ChPid, State1)}.
+
+batch_publish_delivered(Publishes, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {false, MsgSizes} =
+ lists:foldl(fun ({Msg = #basic_message { id = MsgId }, _MsgProps},
+ {false, Sizes}) ->
+ {false = maps:is_key(MsgId, SS), %% ASSERTION
+ Sizes + rabbit_basic:msg_size(Msg)}
+ end, {false, 0}, Publishes),
+ ok = gm:broadcast(GM, {batch_publish_delivered, ChPid, Flow, Publishes},
+ MsgSizes),
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {AckTags, ensure_monitoring(ChPid, State1)}.
+
+discard(MsgId, ChPid, Flow, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = SS }) ->
+ false = maps:is_key(MsgId, SS), %% ASSERTION
+ ok = gm:broadcast(GM, {discard, ChPid, Flow, MsgId}),
+ ensure_monitoring(ChPid,
+ State #state { backing_queue_state =
+ BQ:discard(MsgId, ChPid, Flow, BQS) }).
+
+dropwhile(Pred, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ {Next, BQS1} = BQ:dropwhile(Pred, BQS),
+ {Next, drop(Len, false, State #state { backing_queue_state = BQS1 })}.
+
+fetchwhile(Pred, Fun, Acc, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ {Next, Acc1, BQS1} = BQ:fetchwhile(Pred, Fun, Acc, BQS),
+ {Next, Acc1, drop(Len, true, State #state { backing_queue_state = BQS1 })}.
+
+drain_confirmed(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ seen_status = SS,
+ confirmed = Confirmed }) ->
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ {MsgIds1, SS1} =
+ lists:foldl(
+ fun (MsgId, {MsgIdsN, SSN}) ->
+ %% We will never see 'discarded' here
+ case maps:find(MsgId, SSN) of
+ error ->
+ {[MsgId | MsgIdsN], SSN};
+ {ok, published} ->
+ %% It was published when we were a mirror,
+ %% and we were promoted before we saw the
+ %% publish from the channel. We still
+ %% haven't seen the channel publish, and
+ %% consequently we need to filter out the
+ %% confirm here. We will issue the confirm
+ %% when we see the publish from the channel.
+ {MsgIdsN, maps:put(MsgId, confirmed, SSN)};
+ {ok, confirmed} ->
+ %% Well, confirms are racy by definition.
+ {[MsgId | MsgIdsN], SSN}
+ end
+ end, {[], SS}, MsgIds),
+ {Confirmed ++ MsgIds1, State #state { backing_queue_state = BQS1,
+ seen_status = SS1,
+ confirmed = [] }}.
+
+fetch(AckRequired, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:fetch(AckRequired, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {Result, case Result of
+ empty -> State1;
+ {_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1)
+ end}.
+
+drop(AckRequired, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:drop(AckRequired, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {Result, case Result of
+ empty -> State1;
+ {_MsgId, _AckTag} -> drop_one(AckRequired, State1)
+ end}.
+
+ack(AckTags, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {MsgIds, BQS1} = BQ:ack(AckTags, BQS),
+ case MsgIds of
+ [] -> ok;
+ _ -> ok = gm:broadcast(GM, {ack, MsgIds})
+ end,
+ {MsgIds, State #state { backing_queue_state = BQS1 }}.
+
+requeue(AckTags, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ ok = gm:broadcast(GM, {requeue, MsgIds}),
+ {MsgIds, State #state { backing_queue_state = BQS1 }}.
+
+ackfold(MsgFun, Acc, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }, AckTags) ->
+ {Acc1, BQS1} = BQ:ackfold(MsgFun, Acc, BQS, AckTags),
+ {Acc1, State #state { backing_queue_state = BQS1 }}.
+
+fold(Fun, Acc, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:fold(Fun, Acc, BQS),
+ {Result, State #state { backing_queue_state = BQS1 }}.
+
+len(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:len(BQS).
+
+is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:is_empty(BQS).
+
+depth(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:depth(BQS).
+
+set_ram_duration_target(Target, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state =
+ BQ:set_ram_duration_target(Target, BQS) }.
+
+ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ {Result, BQS1} = BQ:ram_duration(BQS),
+ {Result, State #state { backing_queue_state = BQS1 }}.
+
+needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:needs_timeout(BQS).
+
+timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:timeout(BQS) }.
+
+handle_pre_hibernate(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }.
+
+handle_info(Msg, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:handle_info(Msg, BQS) }.
+
+resume(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:resume(BQS) }.
+
+msg_rates(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:msg_rates(BQS).
+
+info(backing_queue_status,
+ State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:info(backing_queue_status, BQS) ++
+ [ {mirror_seen, maps:size(State #state.seen_status)},
+ {mirror_senders, sets:size(State #state.known_senders)} ];
+info(Item, #state { backing_queue = BQ, backing_queue_state = BQS }) ->
+ BQ:info(Item, BQS).
+
+invoke(?MODULE, Fun, State) ->
+ Fun(?MODULE, State);
+invoke(Mod, Fun, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
+
+is_duplicate(Message = #basic_message { id = MsgId },
+ State = #state { seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ confirmed = Confirmed }) ->
+ %% Here, we need to deal with the possibility that we're about to
+ %% receive a message that we've already seen when we were a mirror
+ %% (we received it via gm). Thus if we do receive such message now
+ %% via the channel, there may be a confirm waiting to issue for
+ %% it.
+
+ %% We will never see {published, ChPid, MsgSeqNo} here.
+ case maps:find(MsgId, SS) of
+ error ->
+ %% We permit the underlying BQ to have a peek at it, but
+ %% only if we ourselves are not filtering out the msg.
+ {Result, BQS1} = BQ:is_duplicate(Message, BQS),
+ {Result, State #state { backing_queue_state = BQS1 }};
+ {ok, published} ->
+ %% It already got published when we were a mirror and no
+ %% confirmation is waiting. amqqueue_process will have, in
+ %% its msg_id_to_channel mapping, the entry for dealing
+ %% with the confirm when that comes back in (it's added
+ %% immediately after calling is_duplicate). The msg is
+ %% invalid. We will not see this again, nor will we be
+ %% further involved in confirming this message, so erase.
+ {{true, drop}, State #state { seen_status = maps:remove(MsgId, SS) }};
+ {ok, Disposition}
+ when Disposition =:= confirmed
+ %% It got published when we were a mirror via gm, and
+ %% confirmed some time after that (maybe even after
+ %% promotion), but before we received the publish from the
+ %% channel, so couldn't previously know what the
+ %% msg_seq_no was (and thus confirm as a mirror). So we
+ %% need to confirm now. As above, amqqueue_process will
+ %% have the entry for the msg_id_to_channel mapping added
+ %% immediately after calling is_duplicate/2.
+ orelse Disposition =:= discarded ->
+ %% Message was discarded while we were a mirror. Confirm now.
+ %% As above, amqqueue_process will have the entry for the
+ %% msg_id_to_channel mapping.
+ {{true, drop}, State #state { seen_status = maps:remove(MsgId, SS),
+ confirmed = [MsgId | Confirmed] }}
+ end.
+
+set_queue_mode(Mode, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {set_queue_mode, Mode}),
+ BQS1 = BQ:set_queue_mode(Mode, BQS),
+ State #state { backing_queue_state = BQS1 }.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator,
+ #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
+
+%% ---------------------------------------------------------------------------
+%% Other exported functions
+%% ---------------------------------------------------------------------------
+
+-spec promote_backing_queue_state
+ (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()],
+ map(), [pid()]) ->
+ master_state().
+
+promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ Len = BQ:len(BQS1),
+ Depth = BQ:depth(BQS1),
+ true = Len == Depth, %% ASSERTION: everything must have been requeued
+ ok = gm:broadcast(GM, {depth, Depth}),
+ WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000),
+ #state { name = QName,
+ gm = GM,
+ coordinator = CPid,
+ backing_queue = BQ,
+ backing_queue_state = BQS1,
+ seen_status = Seen,
+ confirmed = [],
+ known_senders = sets:from_list(KS),
+ wait_timeout = WaitTimeout }.
+
+-spec sender_death_fun() -> death_fun().
+
+sender_death_fun() ->
+ Self = self(),
+ fun (DeadPid) ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM, known_senders = KS }) ->
+ ok = gm:broadcast(GM, {sender_death, DeadPid}),
+ KS1 = sets:del_element(DeadPid, KS),
+ State #state { known_senders = KS1 }
+ end)
+ end.
+
+-spec depth_fun() -> depth_fun().
+
+depth_fun() ->
+ Self = self(),
+ fun () ->
+ rabbit_amqqueue:run_backing_queue(
+ Self, ?MODULE,
+ fun (?MODULE, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
+ State
+ end)
+ end.
+
+%% ---------------------------------------------------------------------------
+%% Helpers
+%% ---------------------------------------------------------------------------
+
+drop_one(AckRequired, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}),
+ State.
+
+drop(PrevLen, AckRequired, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ Len = BQ:len(BQS),
+ case PrevLen - Len of
+ 0 -> State;
+ Dropped -> ok = gm:broadcast(GM, {drop, Len, Dropped, AckRequired}),
+ State
+ end.
+
+ensure_monitoring(ChPid, State = #state { coordinator = CPid,
+ known_senders = KS }) ->
+ case sets:is_element(ChPid, KS) of
+ true -> State;
+ false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring(
+ CPid, [ChPid]),
+ State #state { known_senders = sets:add_element(ChPid, KS) }
+ end.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_misc.erl b/deps/rabbit/src/rabbit_mirror_queue_misc.erl
new file mode 100644
index 0000000000..02f590e2fb
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_misc.erl
@@ -0,0 +1,680 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_misc).
+-behaviour(rabbit_policy_validator).
+
+-export([remove_from_queue/3, on_vhost_up/1, add_mirrors/3,
+ report_deaths/4, store_updated_slaves/1,
+ initial_queue_node/2, suggested_queue_nodes/1, actual_queue_nodes/1,
+ is_mirrored/1, is_mirrored_ha_nodes/1,
+ update_mirrors/2, update_mirrors/1, validate_policy/1,
+ maybe_auto_sync/1, maybe_drop_master_after_sync/1,
+ sync_batch_size/1, log_info/3, log_warning/3]).
+-export([stop_all_slaves/5]).
+
+-export([sync_queue/1, cancel_sync_queue/1]).
+
+-export([transfer_leadership/2, queue_length/1, get_replicas/1]).
+
+%% for testing only
+-export([module/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-define(HA_NODES_MODULE, rabbit_mirror_queue_mode_nodes).
+
+-rabbit_boot_step(
+ {?MODULE,
+ [{description, "HA policy validation"},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-mode">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-params">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-sync-batch-size">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-promote-on-shutdown">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-promote-on-failure">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+
+%%----------------------------------------------------------------------------
+
+%% Returns {ok, NewMPid, DeadPids, ExtraNodes}
+
+-spec remove_from_queue
+ (rabbit_amqqueue:name(), pid(), [pid()]) ->
+ {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'} |
+ {'error', {'not_synced', [pid()]}}.
+
+remove_from_queue(QueueName, Self, DeadGMPids) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ %% Someone else could have deleted the queue before we
+ %% get here. Or, gm group could've altered. see rabbitmq-server#914
+ case mnesia:read({rabbit_queue, QueueName}) of
+ [] -> {error, not_found};
+ [Q0] when ?is_amqqueue(Q0) ->
+ QPid = amqqueue:get_pid(Q0),
+ SPids = amqqueue:get_slave_pids(Q0),
+ SyncSPids = amqqueue:get_sync_slave_pids(Q0),
+ GMPids = amqqueue:get_gm_pids(Q0),
+ {DeadGM, AliveGM} = lists:partition(
+ fun ({GM, _}) ->
+ lists:member(GM, DeadGMPids)
+ end, GMPids),
+ DeadPids = [Pid || {_GM, Pid} <- DeadGM],
+ AlivePids = [Pid || {_GM, Pid} <- AliveGM],
+ Alive = [Pid || Pid <- [QPid | SPids],
+ lists:member(Pid, AlivePids)],
+ {QPid1, SPids1} = case Alive of
+ [] ->
+ %% GM altered, & if all pids are
+ %% perceived as dead, rather do
+ %% do nothing here, & trust the
+ %% promoted mirror to have updated
+ %% mnesia during the alteration.
+ {QPid, SPids};
+ _ -> promote_slave(Alive)
+ end,
+ DoNotPromote = SyncSPids =:= [] andalso
+ rabbit_policy:get(<<"ha-promote-on-failure">>, Q0) =:= <<"when-synced">>,
+ case {{QPid, SPids}, {QPid1, SPids1}} of
+ {Same, Same} ->
+ {ok, QPid1, DeadPids, []};
+ _ when QPid1 =/= QPid andalso QPid1 =:= Self andalso DoNotPromote =:= true ->
+ %% We have been promoted to master
+ %% but there are no synchronised mirrors
+ %% hence this node is not synchronised either
+ %% Bailing out.
+ {error, {not_synced, SPids1}};
+ _ when QPid =:= QPid1 orelse QPid1 =:= Self ->
+ %% Either master hasn't changed, so
+ %% we're ok to update mnesia; or we have
+ %% become the master. If gm altered,
+ %% we have no choice but to proceed.
+ Q1 = amqqueue:set_pid(Q0, QPid1),
+ Q2 = amqqueue:set_slave_pids(Q1, SPids1),
+ Q3 = amqqueue:set_gm_pids(Q2, AliveGM),
+ store_updated_slaves(Q3),
+ %% If we add and remove nodes at the
+ %% same time we might tell the old
+ %% master we need to sync and then
+ %% shut it down. So let's check if
+ %% the new master needs to sync.
+ maybe_auto_sync(Q3),
+ {ok, QPid1, DeadPids, slaves_to_start_on_failure(Q3, DeadGMPids)};
+ _ ->
+ %% Master has changed, and we're not it.
+ %% [1].
+ Q1 = amqqueue:set_slave_pids(Q0, Alive),
+ Q2 = amqqueue:set_gm_pids(Q1, AliveGM),
+ store_updated_slaves(Q2),
+ {ok, QPid1, DeadPids, []}
+ end
+ end
+ end).
+%% [1] We still update mnesia here in case the mirror that is supposed
+%% to become master dies before it does do so, in which case the dead
+%% old master might otherwise never get removed, which in turn might
+%% prevent promotion of another mirror (e.g. us).
+%%
+%% Note however that we do not update the master pid. Otherwise we can
+%% have the situation where a mirror updates the mnesia record for a
+%% queue, promoting another mirror before that mirror realises it has
+%% become the new master, which is bad because it could then mean the
+%% mirror (now master) receives messages it's not ready for (for
+%% example, new consumers).
+%%
+%% We set slave_pids to Alive rather than SPids1 since otherwise we'd
+%% be removing the pid of the candidate master, which in turn would
+%% prevent it from promoting itself.
+%%
+%% We maintain gm_pids as our source of truth, i.e. it contains the
+%% most up-to-date information about which GMs and associated
+%% {M,S}Pids are alive. And all pids in slave_pids always have a
+%% corresponding entry in gm_pids. By contrast, due to the
+%% aforementioned restriction on updating the master pid, that pid may
+%% not be present in gm_pids, but only if said master has died.
+
+%% Sometimes a mirror dying means we need to start more on other
+%% nodes - "exactly" mode can cause this to happen.
+slaves_to_start_on_failure(Q, DeadGMPids) ->
+ %% In case Mnesia has not caught up yet, filter out nodes we know
+ %% to be dead..
+ ClusterNodes = rabbit_nodes:all_running() --
+ [node(P) || P <- DeadGMPids],
+ {_, OldNodes, _} = actual_queue_nodes(Q),
+ {_, NewNodes} = suggested_queue_nodes(Q, ClusterNodes),
+ NewNodes -- OldNodes.
+
+on_vhost_up(VHost) ->
+ QNames =
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:foldl(
+ fun
+ (Q, QNames0) when not ?amqqueue_vhost_equals(Q, VHost) ->
+ QNames0;
+ (Q, QNames0) when ?amqqueue_is_classic(Q) ->
+ QName = amqqueue:get_name(Q),
+ Pid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ %% We don't want to pass in the whole
+ %% cluster - we don't want a situation
+ %% where starting one node causes us to
+ %% decide to start a mirror on another
+ PossibleNodes0 = [node(P) || P <- [Pid | SPids]],
+ PossibleNodes =
+ case lists:member(node(), PossibleNodes0) of
+ true -> PossibleNodes0;
+ false -> [node() | PossibleNodes0]
+ end,
+ {_MNode, SNodes} = suggested_queue_nodes(
+ Q, PossibleNodes),
+ case lists:member(node(), SNodes) of
+ true -> [QName | QNames0];
+ false -> QNames0
+ end;
+ (_, QNames0) ->
+ QNames0
+ end, [], rabbit_queue)
+ end),
+ [add_mirror(QName, node(), async) || QName <- QNames],
+ ok.
+
+drop_mirrors(QName, Nodes) ->
+ [drop_mirror(QName, Node) || Node <- Nodes],
+ ok.
+
+drop_mirror(QName, MirrorNode) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?is_amqqueue(Q) ->
+ Name = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
+ [] ->
+ {error, {queue_not_mirrored_on_node, MirrorNode}};
+ [QPid] when SPids =:= [] ->
+ {error, cannot_drop_only_mirror};
+ [Pid] ->
+ log_info(Name, "Dropping queue mirror on node ~p~n",
+ [MirrorNode]),
+ exit(Pid, {shutdown, dropped}),
+ {ok, dropped}
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+-spec add_mirrors(rabbit_amqqueue:name(), [node()], 'sync' | 'async') ->
+ 'ok'.
+
+add_mirrors(QName, Nodes, SyncMode) ->
+ [add_mirror(QName, Node, SyncMode) || Node <- Nodes],
+ ok.
+
+add_mirror(QName, MirrorNode, SyncMode) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ rabbit_misc:with_exit_handler(
+ rabbit_misc:const(ok),
+ fun () ->
+ #resource{virtual_host = VHost} = amqqueue:get_name(Q),
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost, MirrorNode) of
+ {ok, _} ->
+ try
+ SPid = rabbit_amqqueue_sup_sup:start_queue_process(
+ MirrorNode, Q, slave),
+ log_info(QName, "Adding mirror on node ~p: ~p~n",
+ [MirrorNode, SPid]),
+ rabbit_mirror_queue_slave:go(SPid, SyncMode)
+ of
+ _ -> ok
+ catch
+ error:QError ->
+ log_warning(QName,
+ "Unable to start queue mirror on node '~p'. "
+ "Target queue supervisor is not running: ~p~n",
+ [MirrorNode, QError])
+ end;
+ {error, Error} ->
+ log_warning(QName,
+ "Unable to start queue mirror on node '~p'. "
+ "Target virtual host is not running: ~p~n",
+ [MirrorNode, Error]),
+ ok
+ end
+ end);
+ {error, not_found} = E ->
+ E
+ end.
+
+report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
+ ok;
+report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
+ log_info(QueueName, "~s ~s saw deaths of mirrors~s~n",
+ [case IsMaster of
+ true -> "Master";
+ false -> "Slave"
+ end,
+ rabbit_misc:pid_to_string(MirrorPid),
+ [[$ , rabbit_misc:pid_to_string(P)] || P <- DeadPids]]).
+
+-spec log_info(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'.
+
+log_info (QName, Fmt, Args) ->
+ rabbit_log_mirroring:info("Mirrored ~s: " ++ Fmt,
+ [rabbit_misc:rs(QName) | Args]).
+
+-spec log_warning(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'.
+
+log_warning(QName, Fmt, Args) ->
+ rabbit_log_mirroring:warning("Mirrored ~s: " ++ Fmt,
+ [rabbit_misc:rs(QName) | Args]).
+
+-spec store_updated_slaves(amqqueue:amqqueue()) ->
+ amqqueue:amqqueue().
+
+store_updated_slaves(Q0) when ?is_amqqueue(Q0) ->
+ SPids = amqqueue:get_slave_pids(Q0),
+ SSPids = amqqueue:get_sync_slave_pids(Q0),
+ RS0 = amqqueue:get_recoverable_slaves(Q0),
+ %% TODO now that we clear sync_slave_pids in rabbit_durable_queue,
+ %% do we still need this filtering?
+ SSPids1 = [SSPid || SSPid <- SSPids, lists:member(SSPid, SPids)],
+ Q1 = amqqueue:set_sync_slave_pids(Q0, SSPids1),
+ RS1 = update_recoverable(SPids, RS0),
+ Q2 = amqqueue:set_recoverable_slaves(Q1, RS1),
+ Q3 = amqqueue:set_state(Q2, live),
+ %% amqqueue migration:
+ %% The amqqueue was read from this transaction, no need to handle
+ %% migration.
+ ok = rabbit_amqqueue:store_queue(Q3),
+ %% Wake it up so that we emit a stats event
+ rabbit_amqqueue:notify_policy_changed(Q3),
+ Q3.
+
+%% Recoverable nodes are those which we could promote if the whole
+%% cluster were to suddenly stop and we then lose the master; i.e. all
+%% nodes with running mirrors , and all stopped nodes which had running
+%% mirrors when they were up.
+%%
+%% Therefore we aim here to add new nodes with mirrors , and remove
+%% running nodes without mirrors , We also try to keep the order
+%% constant, and similar to the live SPids field (i.e. oldest
+%% first). That's not necessarily optimal if nodes spend a long time
+%% down, but we don't have a good way to predict what the optimal is
+%% in that case anyway, and we assume nodes will not just be down for
+%% a long time without being removed.
+update_recoverable(SPids, RS) ->
+ SNodes = [node(SPid) || SPid <- SPids],
+ RunningNodes = rabbit_nodes:all_running(),
+ AddNodes = SNodes -- RS,
+ DelNodes = RunningNodes -- SNodes, %% i.e. running with no slave
+ (RS -- DelNodes) ++ AddNodes.
+
+stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
+ PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]],
+ ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
+ %% It's possible that we could be partitioned from some mirrors
+ %% between the lookup and the broadcast, in which case we could
+ %% monitor them but they would not have received the GM
+ %% message. So only wait for mirrors which are still
+ %% not-partitioned.
+ PendingSlavePids = lists:foldl(fun({Pid, MRef}, Acc) ->
+ case rabbit_mnesia:on_running_node(Pid) of
+ true ->
+ receive
+ {'DOWN', MRef, process, _Pid, _Info} ->
+ Acc
+ after WaitTimeout ->
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Missing 'DOWN' message from ~p in"
+ " node ~p~n", [Pid, node(Pid)]),
+ [Pid | Acc]
+ end;
+ false ->
+ Acc
+ end
+ end, [], PidsMRefs),
+ %% Normally when we remove a mirror another mirror or master will
+ %% notice and update Mnesia. But we just removed them all, and
+ %% have stopped listening ourselves. So manually clean up.
+ rabbit_misc:execute_mnesia_transaction(fun () ->
+ [Q0] = mnesia:read({rabbit_queue, QName}),
+ Q1 = amqqueue:set_gm_pids(Q0, []),
+ Q2 = amqqueue:set_slave_pids(Q1, []),
+ %% Restarted mirrors on running nodes can
+ %% ensure old incarnations are stopped using
+ %% the pending mirror pids.
+ Q3 = amqqueue:set_slave_pids_pending_shutdown(Q2, PendingSlavePids),
+ rabbit_mirror_queue_misc:store_updated_slaves(Q3)
+ end),
+ ok = gm:forget_group(QName).
+
+%%----------------------------------------------------------------------------
+
+promote_slave([SPid | SPids]) ->
+ %% The mirror pids are maintained in descending order of age, so
+ %% the one to promote is the oldest.
+ {SPid, SPids}.
+
+-spec initial_queue_node(amqqueue:amqqueue(), node()) -> node().
+
+initial_queue_node(Q, DefNode) ->
+ {MNode, _SNodes} = suggested_queue_nodes(Q, DefNode, rabbit_nodes:all_running()),
+ MNode.
+
+-spec suggested_queue_nodes(amqqueue:amqqueue()) ->
+ {node(), [node()]}.
+
+suggested_queue_nodes(Q) -> suggested_queue_nodes(Q, rabbit_nodes:all_running()).
+suggested_queue_nodes(Q, All) -> suggested_queue_nodes(Q, node(), All).
+
+%% The third argument exists so we can pull a call to
+%% rabbit_nodes:all_running() out of a loop or transaction
+%% or both.
+suggested_queue_nodes(Q, DefNode, All) when ?is_amqqueue(Q) ->
+ Owner = amqqueue:get_exclusive_owner(Q),
+ {MNode0, SNodes, SSNodes} = actual_queue_nodes(Q),
+ MNode = case MNode0 of
+ none -> DefNode;
+ _ -> MNode0
+ end,
+ case Owner of
+ none -> Params = policy(<<"ha-params">>, Q),
+ case module(Q) of
+ {ok, M} -> M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All);
+ _ -> {MNode, []}
+ end;
+ _ -> {MNode, []}
+ end.
+
+policy(Policy, Q) ->
+ case rabbit_policy:get(Policy, Q) of
+ undefined -> none;
+ P -> P
+ end.
+
+module(Q) when ?is_amqqueue(Q) ->
+ case rabbit_policy:get(<<"ha-mode">>, Q) of
+ undefined -> not_mirrored;
+ Mode -> module(Mode)
+ end;
+
+module(Mode) when is_binary(Mode) ->
+ case rabbit_registry:binary_to_type(Mode) of
+ {error, not_found} -> not_mirrored;
+ T -> case rabbit_registry:lookup_module(ha_mode, T) of
+ {ok, Module} -> {ok, Module};
+ _ -> not_mirrored
+ end
+ end.
+
+validate_mode(Mode) ->
+ case module(Mode) of
+ {ok, _Module} ->
+ ok;
+ not_mirrored ->
+ {error, "~p is not a valid ha-mode value", [Mode]}
+ end.
+
+-spec is_mirrored(amqqueue:amqqueue()) -> boolean().
+
+is_mirrored(Q) ->
+ case module(Q) of
+ {ok, _} -> true;
+ _ -> false
+ end.
+
+is_mirrored_ha_nodes(Q) ->
+ case module(Q) of
+ {ok, ?HA_NODES_MODULE} -> true;
+ _ -> false
+ end.
+
+actual_queue_nodes(Q) when ?is_amqqueue(Q) ->
+ MPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ SSPids = amqqueue:get_sync_slave_pids(Q),
+ Nodes = fun (L) -> [node(Pid) || Pid <- L] end,
+ {case MPid of
+ none -> none;
+ _ -> node(MPid)
+ end, Nodes(SPids), Nodes(SSPids)}.
+
+-spec maybe_auto_sync(amqqueue:amqqueue()) -> 'ok'.
+
+maybe_auto_sync(Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case policy(<<"ha-sync-mode">>, Q) of
+ <<"automatic">> ->
+ spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end);
+ _ ->
+ ok
+ end.
+
+sync_queue(Q0) ->
+ F = fun
+ (Q) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ rabbit_amqqueue:sync_mirrors(QPid);
+ (Q) when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported}
+ end,
+ rabbit_amqqueue:with(Q0, F).
+
+cancel_sync_queue(Q0) ->
+ F = fun
+ (Q) when ?amqqueue_is_classic(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ rabbit_amqqueue:cancel_sync_mirrors(QPid);
+ (Q) when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported}
+ end,
+ rabbit_amqqueue:with(Q0, F).
+
+sync_batch_size(Q) when ?is_amqqueue(Q) ->
+ case policy(<<"ha-sync-batch-size">>, Q) of
+ none -> %% we need this case because none > 1 == true
+ default_batch_size();
+ BatchSize when BatchSize > 1 ->
+ BatchSize;
+ _ ->
+ default_batch_size()
+ end.
+
+-define(DEFAULT_BATCH_SIZE, 4096).
+
+default_batch_size() ->
+ rabbit_misc:get_env(rabbit, mirroring_sync_batch_size,
+ ?DEFAULT_BATCH_SIZE).
+
+-spec update_mirrors
+ (amqqueue:amqqueue(), amqqueue:amqqueue()) -> 'ok'.
+
+update_mirrors(OldQ, NewQ) when ?amqqueue_pids_are_equal(OldQ, NewQ) ->
+ % Note: we do want to ensure both queues have same pid
+ QPid = amqqueue:get_pid(OldQ),
+ QPid = amqqueue:get_pid(NewQ),
+ case {is_mirrored(OldQ), is_mirrored(NewQ)} of
+ {false, false} -> ok;
+ _ -> rabbit_amqqueue:update_mirroring(QPid)
+ end.
+
+-spec update_mirrors
+ (amqqueue:amqqueue()) -> 'ok'.
+
+update_mirrors(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ {OldMNode, OldSNodes, _} = actual_queue_nodes(Q),
+ {NewMNode, NewSNodes} = suggested_queue_nodes(Q),
+ OldNodes = [OldMNode | OldSNodes],
+ NewNodes = [NewMNode | NewSNodes],
+ %% When a mirror dies, remove_from_queue/2 might have to add new
+ %% mirrors (in "exactly" mode). It will check mnesia to see which
+ %% mirrors there currently are. If drop_mirror/2 is invoked first
+ %% then when we end up in remove_from_queue/2 it will not see the
+ %% mirrors that add_mirror/2 will add, and also want to add them
+ %% (even though we are not responding to the death of a
+ %% mirror). Breakage ensues.
+ add_mirrors (QName, NewNodes -- OldNodes, async),
+ drop_mirrors(QName, OldNodes -- NewNodes),
+ %% This is for the case where no extra nodes were added but we changed to
+ %% a policy requiring auto-sync.
+ maybe_auto_sync(Q),
+ ok.
+
+queue_length(Q) ->
+ [{messages, M}] = rabbit_amqqueue:info(Q, [messages]),
+ M.
+
+get_replicas(Q) ->
+ {MNode, SNodes} = suggested_queue_nodes(Q),
+ [MNode] ++ SNodes.
+
+transfer_leadership(Q, Destination) ->
+ QName = amqqueue:get_name(Q),
+ {OldMNode, OldSNodes, _} = actual_queue_nodes(Q),
+ OldNodes = [OldMNode | OldSNodes],
+ add_mirrors(QName, [Destination] -- OldNodes, async),
+ drop_mirrors(QName, OldNodes -- [Destination]),
+ {Result, NewQ} = wait_for_new_master(QName, Destination),
+ update_mirrors(NewQ),
+ Result.
+
+wait_for_new_master(QName, Destination) ->
+ wait_for_new_master(QName, Destination, 100).
+
+wait_for_new_master(QName, _, 0) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ {{not_migrated, ""}, Q};
+wait_for_new_master(QName, Destination, N) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ case amqqueue:get_pid(Q) of
+ none ->
+ timer:sleep(100),
+ wait_for_new_master(QName, Destination, N - 1);
+ Pid ->
+ case node(Pid) of
+ Destination ->
+ {{migrated, Destination}, Q};
+ _ ->
+ timer:sleep(100),
+ wait_for_new_master(QName, Destination, N - 1)
+ end
+ end.
+
+%% The arrival of a newly synced mirror may cause the master to die if
+%% the policy does not want the master but it has been kept alive
+%% because there were no synced mirrors.
+%%
+%% We don't just call update_mirrors/2 here since that could decide to
+%% start a mirror for some other reason, and since we are the mirror ATM
+%% that allows complicated deadlocks.
+
+-spec maybe_drop_master_after_sync(amqqueue:amqqueue()) -> 'ok'.
+
+maybe_drop_master_after_sync(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ MPid = amqqueue:get_pid(Q),
+ {DesiredMNode, DesiredSNodes} = suggested_queue_nodes(Q),
+ case node(MPid) of
+ DesiredMNode -> ok;
+ OldMNode -> false = lists:member(OldMNode, DesiredSNodes), %% [0]
+ drop_mirror(QName, OldMNode)
+ end,
+ ok.
+%% [0] ASSERTION - if the policy wants the master to change, it has
+%% not just shuffled it into the mirrors. All our modes ensure this
+%% does not happen, but we should guard against a misbehaving plugin.
+
+%%----------------------------------------------------------------------------
+
+validate_policy(KeyList) ->
+ Mode = proplists:get_value(<<"ha-mode">>, KeyList, none),
+ Params = proplists:get_value(<<"ha-params">>, KeyList, none),
+ SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none),
+ SyncBatchSize = proplists:get_value(
+ <<"ha-sync-batch-size">>, KeyList, none),
+ PromoteOnShutdown = proplists:get_value(
+ <<"ha-promote-on-shutdown">>, KeyList, none),
+ PromoteOnFailure = proplists:get_value(
+ <<"ha-promote-on-failure">>, KeyList, none),
+ case {Mode, Params, SyncMode, SyncBatchSize, PromoteOnShutdown, PromoteOnFailure} of
+ {none, none, none, none, none, none} ->
+ ok;
+ {none, _, _, _, _, _} ->
+ {error, "ha-mode must be specified to specify ha-params, "
+ "ha-sync-mode or ha-promote-on-shutdown", []};
+ _ ->
+ validate_policies(
+ [{Mode, fun validate_mode/1},
+ {Params, ha_params_validator(Mode)},
+ {SyncMode, fun validate_sync_mode/1},
+ {SyncBatchSize, fun validate_sync_batch_size/1},
+ {PromoteOnShutdown, fun validate_pos/1},
+ {PromoteOnFailure, fun validate_pof/1}])
+ end.
+
+ha_params_validator(Mode) ->
+ fun(Val) ->
+ {ok, M} = module(Mode),
+ M:validate_policy(Val)
+ end.
+
+validate_policies([]) ->
+ ok;
+validate_policies([{Val, Validator} | Rest]) ->
+ case Validator(Val) of
+ ok -> validate_policies(Rest);
+ E -> E
+ end.
+
+validate_sync_mode(SyncMode) ->
+ case SyncMode of
+ <<"automatic">> -> ok;
+ <<"manual">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-sync-mode must be \"manual\" "
+ "or \"automatic\", got ~p", [Mode]}
+ end.
+
+validate_sync_batch_size(none) ->
+ ok;
+validate_sync_batch_size(N) when is_integer(N) andalso N > 0 ->
+ ok;
+validate_sync_batch_size(N) ->
+ {error, "ha-sync-batch-size takes an integer greater than 0, "
+ "~p given", [N]}.
+
+validate_pos(PromoteOnShutdown) ->
+ case PromoteOnShutdown of
+ <<"always">> -> ok;
+ <<"when-synced">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-promote-on-shutdown must be "
+ "\"always\" or \"when-synced\", got ~p", [Mode]}
+ end.
+
+validate_pof(PromoteOnShutdown) ->
+ case PromoteOnShutdown of
+ <<"always">> -> ok;
+ <<"when-synced">> -> ok;
+ none -> ok;
+ Mode -> {error, "ha-promote-on-failure must be "
+ "\"always\" or \"when-synced\", got ~p", [Mode]}
+ end.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode.erl b/deps/rabbit/src/rabbit_mirror_queue_mode.erl
new file mode 100644
index 0000000000..91491efc49
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type master() :: node().
+-type slave() :: node().
+-type params() :: any().
+
+-callback description() -> [proplists:property()].
+
+%% Called whenever we think we might need to change nodes for a
+%% mirrored queue. Note that this is called from a variety of
+%% contexts, both inside and outside Mnesia transactions. Ideally it
+%% will be pure-functional.
+%%
+%% Takes: parameters set in the policy,
+%% current master,
+%% current mirrors,
+%% current synchronised mirrors,
+%% all nodes to consider
+%%
+%% Returns: tuple of new master, new mirrors
+%%
+-callback suggested_queue_nodes(
+ params(), master(), [slave()], [slave()], [node()]) ->
+ {master(), [slave()]}.
+
+%% Are the parameters valid for this mode?
+-callback validate_policy(params()) ->
+ rabbit_policy_validator:validate_results().
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl
new file mode 100644
index 0000000000..2da12a5972
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode_all.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_all).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode all"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"all">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to all nodes">>}].
+
+suggested_queue_nodes(_Params, MNode, _SNodes, _SSNodes, Poss) ->
+ {MNode, Poss -- [MNode]}.
+
+validate_policy(none) ->
+ ok;
+validate_policy(_Params) ->
+ {error, "ha-mode=\"all\" does not take parameters", []}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl
new file mode 100644
index 0000000000..a8aa7546ac
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode_exactly.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_exactly).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode exactly"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"exactly">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to a specified number of nodes">>}].
+
+%% When we need to add nodes, we randomise our candidate list as a
+%% crude form of load-balancing. TODO it would also be nice to
+%% randomise the list of ones to remove when we have too many - we
+%% would have to take account of synchronisation though.
+suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
+ SCount = Count - 1,
+ {MNode, case SCount > length(SNodes) of
+ true -> Cand = shuffle((Poss -- [MNode]) -- SNodes),
+ SNodes ++ lists:sublist(Cand, SCount - length(SNodes));
+ false -> lists:sublist(SNodes, SCount)
+ end}.
+
+shuffle(L) ->
+ {_, L1} = lists:unzip(lists:keysort(1, [{rand:uniform(), N} || N <- L])),
+ L1.
+
+validate_policy(N) when is_integer(N) andalso N > 0 ->
+ ok;
+validate_policy(Params) ->
+ {error, "ha-mode=\"exactly\" takes an integer, ~p given", [Params]}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl b/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl
new file mode 100644
index 0000000000..f3e134ba63
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_mode_nodes.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_mode_nodes).
+
+-include("rabbit.hrl").
+
+-behaviour(rabbit_mirror_queue_mode).
+
+-export([description/0, suggested_queue_nodes/5, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "mirror mode nodes"},
+ {mfa, {rabbit_registry, register,
+ [ha_mode, <<"nodes">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+description() ->
+ [{description, <<"Mirror queue to specified nodes">>}].
+
+suggested_queue_nodes(PolicyNodes0, CurrentMaster, _SNodes, SSNodes, NodesRunningRabbitMQ) ->
+ PolicyNodes1 = [list_to_atom(binary_to_list(Node)) || Node <- PolicyNodes0],
+ %% If the current master is not in the nodes specified, then what we want
+ %% to do depends on whether there are any synchronised mirrors. If there
+ %% are then we can just kill the current master - the admin has asked for
+ %% a migration and we should give it to them. If there are not however
+ %% then we must keep the master around so as not to lose messages.
+
+ PolicyNodes = case SSNodes of
+ [] -> lists:usort([CurrentMaster | PolicyNodes1]);
+ _ -> PolicyNodes1
+ end,
+ Unavailable = PolicyNodes -- NodesRunningRabbitMQ,
+ AvailablePolicyNodes = PolicyNodes -- Unavailable,
+ case AvailablePolicyNodes of
+ [] -> %% We have never heard of anything? Not much we can do but
+ %% keep the master alive.
+ {CurrentMaster, []};
+ _ -> case lists:member(CurrentMaster, AvailablePolicyNodes) of
+ true -> {CurrentMaster,
+ AvailablePolicyNodes -- [CurrentMaster]};
+ false -> %% Make sure the new master is synced! In order to
+ %% get here SSNodes must not be empty.
+ SyncPolicyNodes = [Node ||
+ Node <- AvailablePolicyNodes,
+ lists:member(Node, SSNodes)],
+ NewMaster = case SyncPolicyNodes of
+ [Node | _] -> Node;
+ [] -> erlang:hd(SSNodes)
+ end,
+ {NewMaster, AvailablePolicyNodes -- [NewMaster]}
+ end
+ end.
+
+validate_policy([]) ->
+ {error, "ha-mode=\"nodes\" list must be non-empty", []};
+validate_policy(Nodes) when is_list(Nodes) ->
+ case [I || I <- Nodes, not is_binary(I)] of
+ [] -> ok;
+ Invalid -> {error, "ha-mode=\"nodes\" takes a list of strings, "
+ "~p was not a string", [Invalid]}
+ end;
+validate_policy(Params) ->
+ {error, "ha-mode=\"nodes\" takes a list, ~p given", [Params]}.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_slave.erl b/deps/rabbit/src/rabbit_mirror_queue_slave.erl
new file mode 100644
index 0000000000..0480db9cfe
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_slave.erl
@@ -0,0 +1,1093 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_slave).
+
+%% For general documentation of HA design, see
+%% rabbit_mirror_queue_coordinator
+%%
+%% We receive messages from GM and from publishers, and the gm
+%% messages can arrive either before or after the 'actual' message.
+%% All instructions from the GM group must be processed in the order
+%% in which they're received.
+
+-export([set_maximum_since_use/2, info/1, go/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1, prioritise_call/4,
+ prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+
+-export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
+
+-behaviour(gen_server2).
+-behaviour(gm).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-include("amqqueue.hrl").
+-include("gm_specs.hrl").
+
+%%----------------------------------------------------------------------------
+
+-define(INFO_KEYS,
+ [pid,
+ name,
+ master_pid,
+ is_synchronised
+ ]).
+
+-define(SYNC_INTERVAL, 25). %% milliseconds
+-define(RAM_DURATION_UPDATE_INTERVAL, 5000).
+-define(DEATH_TIMEOUT, 20000). %% 20 seconds
+
+-record(state, { q,
+ gm,
+ backing_queue,
+ backing_queue_state,
+ sync_timer_ref,
+ rate_timer_ref,
+
+ sender_queues, %% :: Pid -> {Q Msg, Set MsgId, ChState}
+ msg_id_ack, %% :: MsgId -> AckTag
+
+ msg_id_status,
+ known_senders,
+
+ %% Master depth - local depth
+ depth_delta
+ }).
+
+%%----------------------------------------------------------------------------
+
+set_maximum_since_use(QPid, Age) ->
+ gen_server2:cast(QPid, {set_maximum_since_use, Age}).
+
+info(QPid) -> gen_server2:call(QPid, info, infinity).
+
+init(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ ?store_proc_name(QName),
+ {ok, {not_started, Q}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN,
+ ?DESIRED_HIBERNATE}, ?MODULE}.
+
+go(SPid, sync) -> gen_server2:call(SPid, go, infinity);
+go(SPid, async) -> gen_server2:cast(SPid, go).
+
+handle_go(Q0) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ %% We join the GM group before we add ourselves to the amqqueue
+ %% record. As a result:
+ %% 1. We can receive msgs from GM that correspond to messages we will
+ %% never receive from publishers.
+ %% 2. When we receive a message from publishers, we must receive a
+ %% message from the GM group for it.
+ %% 3. However, that instruction from the GM group can arrive either
+ %% before or after the actual message. We need to be able to
+ %% distinguish between GM instructions arriving early, and case (1)
+ %% above.
+ %%
+ process_flag(trap_exit, true), %% amqqueue_process traps exits too.
+ {ok, GM} = gm:start_link(QName, ?MODULE, [self()],
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ MRef = erlang:monitor(process, GM),
+ %% We ignore the DOWN message because we are also linked and
+ %% trapping exits, we just want to not get stuck and we will exit
+ %% later.
+ receive
+ {joined, GM} -> erlang:demonitor(MRef, [flush]),
+ ok;
+ {'DOWN', MRef, _, _, _} -> ok
+ end,
+ Self = self(),
+ Node = node(),
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() -> init_it(Self, GM, Node, QName) end) of
+ {new, QPid, GMPids} ->
+ ok = file_handle_cache:register_callback(
+ rabbit_amqqueue, set_maximum_since_use, [Self]),
+ ok = rabbit_memory_monitor:register(
+ Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
+ {ok, BQ} = application:get_env(backing_queue_module),
+ Q1 = amqqueue:set_pid(Q0, QPid),
+ _ = BQ:delete_crashed(Q1), %% For crash recovery
+ BQS = bq_init(BQ, Q1, new),
+ State = #state { q = Q1,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = undefined,
+ sync_timer_ref = undefined,
+
+ sender_queues = #{},
+ msg_id_ack = #{},
+
+ msg_id_status = #{},
+ known_senders = pmon:new(delegate),
+
+ depth_delta = undefined
+ },
+ ok = gm:broadcast(GM, request_depth),
+ ok = gm:validate_members(GM, [GM | [G || {G, _} <- GMPids]]),
+ rabbit_mirror_queue_misc:maybe_auto_sync(Q1),
+ {ok, State};
+ {stale, StalePid} ->
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Detected stale HA master: ~p~n", [StalePid]),
+ gm:leave(GM),
+ {error, {stale_master_pid, StalePid}};
+ duplicate_live_master ->
+ gm:leave(GM),
+ {error, {duplicate_live_master, Node}};
+ existing ->
+ gm:leave(GM),
+ {error, normal};
+ master_in_recovery ->
+ gm:leave(GM),
+ %% The queue record vanished - we must have a master starting
+ %% concurrently with us. In that case we can safely decide to do
+ %% nothing here, and the master will start us in
+ %% master:init_with_existing_bq/3
+ {error, normal}
+ end.
+
+init_it(Self, GM, Node, QName) ->
+ case mnesia:read({rabbit_queue, QName}) of
+ [Q] when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ SPids = amqqueue:get_slave_pids(Q),
+ GMPids = amqqueue:get_gm_pids(Q),
+ PSPids = amqqueue:get_slave_pids_pending_shutdown(Q),
+ case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of
+ [] -> stop_pending_slaves(QName, PSPids),
+ add_slave(Q, Self, GM),
+ {new, QPid, GMPids};
+ [QPid] -> case rabbit_mnesia:is_process_alive(QPid) of
+ true -> duplicate_live_master;
+ false -> {stale, QPid}
+ end;
+ [SPid] -> case rabbit_mnesia:is_process_alive(SPid) of
+ true -> existing;
+ false -> GMPids1 = [T || T = {_, S} <- GMPids, S =/= SPid],
+ SPids1 = SPids -- [SPid],
+ Q1 = amqqueue:set_slave_pids(Q, SPids1),
+ Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
+ add_slave(Q2, Self, GM),
+ {new, QPid, GMPids1}
+ end
+ end;
+ [] ->
+ master_in_recovery
+ end.
+
+%% Pending mirrors have been asked to stop by the master, but despite the node
+%% being up these did not answer on the expected timeout. Stop local mirrors now.
+stop_pending_slaves(QName, Pids) ->
+ [begin
+ rabbit_mirror_queue_misc:log_warning(
+ QName, "Detected a non-responsive classic queue mirror, stopping it: ~p~n", [Pid]),
+ case erlang:process_info(Pid, dictionary) of
+ undefined -> ok;
+ {dictionary, Dict} ->
+ Vhost = QName#resource.virtual_host,
+ {ok, AmqQSup} = rabbit_amqqueue_sup_sup:find_for_vhost(Vhost),
+ case proplists:get_value('$ancestors', Dict) of
+ [Sup, AmqQSup | _] ->
+ exit(Sup, kill),
+ exit(Pid, kill);
+ _ ->
+ ok
+ end
+ end
+ end || Pid <- Pids, node(Pid) =:= node(),
+ true =:= erlang:is_process_alive(Pid)].
+
+%% Add to the end, so they are in descending order of age, see
+%% rabbit_mirror_queue_misc:promote_slave/1
+add_slave(Q0, New, GM) when ?is_amqqueue(Q0) ->
+ SPids = amqqueue:get_slave_pids(Q0),
+ GMPids = amqqueue:get_gm_pids(Q0),
+ SPids1 = SPids ++ [New],
+ GMPids1 = [{GM, New} | GMPids],
+ Q1 = amqqueue:set_slave_pids(Q0, SPids1),
+ Q2 = amqqueue:set_gm_pids(Q1, GMPids1),
+ rabbit_mirror_queue_misc:store_updated_slaves(Q2).
+
+handle_call(go, _From, {not_started, Q} = NotStarted) ->
+ case handle_go(Q) of
+ {ok, State} -> {reply, ok, State};
+ {error, Error} -> {stop, Error, NotStarted}
+ end;
+
+handle_call({gm_deaths, DeadGMPids}, From,
+ State = #state{ gm = GM, q = Q,
+ backing_queue = BQ,
+ backing_queue_state = BQS}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ MPid = amqqueue:get_pid(Q),
+ Self = self(),
+ case rabbit_mirror_queue_misc:remove_from_queue(QName, Self, DeadGMPids) of
+ {error, not_found} ->
+ gen_server2:reply(From, ok),
+ {stop, normal, State};
+ {error, {not_synced, _SPids}} ->
+ BQ:delete_and_terminate({error, not_synced}, BQS),
+ {stop, normal, State#state{backing_queue_state = undefined}};
+ {ok, Pid, DeadPids, ExtraNodes} ->
+ rabbit_mirror_queue_misc:report_deaths(Self, false, QName,
+ DeadPids),
+ case Pid of
+ MPid ->
+ %% master hasn't changed
+ gen_server2:reply(From, ok),
+ rabbit_mirror_queue_misc:add_mirrors(
+ QName, ExtraNodes, async),
+ noreply(State);
+ Self ->
+ %% we've become master
+ QueueState = promote_me(From, State),
+ rabbit_mirror_queue_misc:add_mirrors(
+ QName, ExtraNodes, async),
+ {become, rabbit_amqqueue_process, QueueState, hibernate};
+ _ ->
+ %% master has changed to not us
+ gen_server2:reply(From, ok),
+ %% see rabbitmq-server#914;
+ %% It's not always guaranteed that we won't have ExtraNodes.
+ %% If gm alters, master can change to not us with extra nodes,
+ %% in which case we attempt to add mirrors on those nodes.
+ case ExtraNodes of
+ [] -> void;
+ _ -> rabbit_mirror_queue_misc:add_mirrors(
+ QName, ExtraNodes, async)
+ end,
+ %% Since GM is by nature lazy we need to make sure
+ %% there is some traffic when a master dies, to
+ %% make sure all mirrors get informed of the
+ %% death. That is all process_death does, create
+ %% some traffic.
+ ok = gm:broadcast(GM, process_death),
+ Q1 = amqqueue:set_pid(Q, Pid),
+ State1 = State#state{q = Q1},
+ noreply(State1)
+ end
+ end;
+
+handle_call(info, _From, State) ->
+ reply(infos(?INFO_KEYS, State), State).
+
+handle_cast(go, {not_started, Q} = NotStarted) ->
+ case handle_go(Q) of
+ {ok, State} -> {noreply, State};
+ {error, Error} -> {stop, Error, NotStarted}
+ end;
+
+handle_cast({run_backing_queue, Mod, Fun}, State) ->
+ noreply(run_backing_queue(Mod, Fun, State));
+
+handle_cast({gm, Instruction}, State = #state{q = Q0}) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q1} when ?is_amqqueue(Q1) ->
+ SPids = amqqueue:get_slave_pids(Q1),
+ case lists:member(self(), SPids) of
+ true ->
+ handle_process_result(process_instruction(Instruction, State));
+ false ->
+ %% Potentially a duplicated mirror caused by a partial partition,
+ %% will stop as a new mirror could start unaware of our presence
+ {stop, shutdown, State}
+ end;
+ {error, not_found} ->
+ %% Would not expect this to happen after fixing #953
+ {stop, shutdown, State}
+ end;
+
+handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, true},
+ State) ->
+ %% Asynchronous, non-"mandatory", deliver mode.
+ %% We are acking messages to the channel process that sent us
+ %% the message delivery. See
+ %% rabbit_amqqueue_process:handle_ch_down for more info.
+ %% If message is rejected by the master, the publish will be nacked
+ %% even if mirrors confirm it. No need to check for length here.
+ maybe_flow_ack(Sender, Flow),
+ noreply(maybe_enqueue_message(Delivery, State));
+
+handle_cast({sync_start, Ref, Syncer},
+ State = #state { depth_delta = DD,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State1 = #state{rate_timer_ref = TRef} = ensure_rate_timer(State),
+ S = fun({MA, TRefN, BQSN}) ->
+ State1#state{depth_delta = undefined,
+ msg_id_ack = maps:from_list(MA),
+ rate_timer_ref = TRefN,
+ backing_queue_state = BQSN}
+ end,
+ case rabbit_mirror_queue_sync:slave(
+ DD, Ref, TRef, Syncer, BQ, BQS,
+ fun (BQN, BQSN) ->
+ BQSN1 = update_ram_duration(BQN, BQSN),
+ TRefN = rabbit_misc:send_after(?RAM_DURATION_UPDATE_INTERVAL,
+ self(), update_ram_duration),
+ {TRefN, BQSN1}
+ end) of
+ denied -> noreply(State1);
+ {ok, Res} -> noreply(set_delta(0, S(Res)));
+ {failed, Res} -> noreply(S(Res));
+ {stop, Reason, Res} -> {stop, Reason, S(Res)}
+ end;
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State);
+
+handle_cast({set_ram_duration_target, Duration},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ noreply(State #state { backing_queue_state = BQS1 });
+
+handle_cast(policy_changed, State) ->
+ %% During partial partitions, we might end up receiving messages expected by a master
+ %% Ignore them
+ noreply(State).
+
+handle_info(update_ram_duration, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQS1 = update_ram_duration(BQ, BQS),
+ %% Don't call noreply/1, we don't want to set timers
+ {State1, Timeout} = next_state(State #state {
+ rate_timer_ref = undefined,
+ backing_queue_state = BQS1 }),
+ {noreply, State1, Timeout};
+
+handle_info(sync_timeout, State) ->
+ noreply(backing_queue_timeout(
+ State #state { sync_timer_ref = undefined }));
+
+handle_info(timeout, State) ->
+ noreply(backing_queue_timeout(State));
+
+handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) ->
+ local_sender_death(ChPid, State),
+ noreply(maybe_forget_sender(ChPid, down_from_ch, State));
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ noreply(State);
+
+handle_info(bump_reduce_memory_use, State) ->
+ noreply(State);
+
+%% In the event of a short partition during sync we can detect the
+%% master's 'death', drop out of sync, and then receive sync messages
+%% which were still in flight. Ignore them.
+handle_info({sync_msg, _Ref, _Msg, _Props, _Unacked}, State) ->
+ noreply(State);
+
+handle_info({sync_complete, _Ref}, State) ->
+ noreply(State);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, {not_started, _Q}) ->
+ ok;
+terminate(_Reason, #state { backing_queue_state = undefined }) ->
+ %% We've received a delete_and_terminate from gm, thus nothing to
+ %% do here.
+ ok;
+terminate({shutdown, dropped} = R, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% See rabbit_mirror_queue_master:terminate/2
+ terminate_common(State),
+ BQ:delete_and_terminate(R, BQS);
+terminate(shutdown, State) ->
+ terminate_shutdown(shutdown, State);
+terminate({shutdown, _} = R, State) ->
+ terminate_shutdown(R, State);
+terminate(Reason, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ terminate_common(State),
+ BQ:delete_and_terminate(Reason, BQS).
+
+%% If the Reason is shutdown, or {shutdown, _}, it is not the queue
+%% being deleted: it's just the node going down. Even though we're a
+%% mirror, we have no idea whether or not we'll be the only copy coming
+%% back up. Thus we must assume we will be, and preserve anything we
+%% have on disk.
+terminate_shutdown(Reason, State = #state{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ terminate_common(State),
+ BQ:terminate(Reason, BQS).
+
+terminate_common(State) ->
+ ok = rabbit_memory_monitor:deregister(self()),
+ stop_rate_timer(stop_sync_timer(State)).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_pre_hibernate({not_started, _Q} = State) ->
+ {hibernate, State};
+
+handle_pre_hibernate(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
+ BQS3 = BQ:handle_pre_hibernate(BQS2),
+ {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ info -> 9;
+ {gm_deaths, _Dead} -> 5;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {set_ram_duration_target, _Duration} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {run_backing_queue, _Mod, _Fun} -> 6;
+ {gm, _Msg} -> 5;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ update_ram_duration -> 8;
+ sync_timeout -> 6;
+ _ -> 0
+ end.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%% ---------------------------------------------------------------------------
+%% GM
+%% ---------------------------------------------------------------------------
+
+joined([SPid], _Members) -> SPid ! {joined, self()}, ok.
+
+members_changed([_SPid], _Births, []) ->
+ ok;
+members_changed([ SPid], _Births, Deaths) ->
+ case rabbit_misc:with_exit_handler(
+ rabbit_misc:const(ok),
+ fun() ->
+ gen_server2:call(SPid, {gm_deaths, Deaths}, infinity)
+ end) of
+ ok -> ok;
+ {promote, CPid} -> {become, rabbit_mirror_queue_coordinator, [CPid]}
+ end.
+
+handle_msg([_SPid], _From, hibernate_heartbeat) ->
+ %% See rabbit_mirror_queue_coordinator:handle_pre_hibernate/1
+ ok;
+handle_msg([_SPid], _From, request_depth) ->
+ %% This is only of value to the master
+ ok;
+handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) ->
+ %% This is only of value to the master
+ ok;
+handle_msg([_SPid], _From, process_death) ->
+ %% We must not take any notice of the master death here since it
+ %% comes without ordering guarantees - there could still be
+ %% messages from the master we have yet to receive. When we get
+ %% members_changed, then there will be no more messages.
+ ok;
+handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
+ ok = gen_server2:cast(CPid, {gm, Msg}),
+ {stop, {shutdown, ring_shutdown}};
+handle_msg([SPid], _From, {sync_start, Ref, Syncer, SPids}) ->
+ case lists:member(SPid, SPids) of
+ true -> gen_server2:cast(SPid, {sync_start, Ref, Syncer});
+ false -> ok
+ end;
+handle_msg([SPid], _From, Msg) ->
+ ok = gen_server2:cast(SPid, {gm, Msg}).
+
+handle_terminate([_SPid], _Reason) ->
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Others
+%% ---------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, _State) ->
+ self();
+i(name, #state{q = Q}) when ?is_amqqueue(Q) ->
+ amqqueue:get_name(Q);
+i(master_pid, #state{q = Q}) when ?is_amqqueue(Q) ->
+ amqqueue:get_pid(Q);
+i(is_synchronised, #state{depth_delta = DD}) ->
+ DD =:= 0;
+i(_, _) ->
+ ''.
+
+bq_init(BQ, Q, Recover) ->
+ Self = self(),
+ BQ:init(Q, Recover,
+ fun (Mod, Fun) ->
+ rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
+ end).
+
+run_backing_queue(rabbit_mirror_queue_master, Fun, State) ->
+ %% Yes, this might look a little crazy, but see comments in
+ %% confirm_sender_death/1
+ Fun(?MODULE, State);
+run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
+
+%% This feature was used by `rabbit_amqqueue_process` and
+%% `rabbit_mirror_queue_slave` up-to and including RabbitMQ 3.7.x. It is
+%% unused in 3.8.x and thus deprecated. We keep it to support in-place
+%% upgrades to 3.8.x (i.e. mixed-version clusters), but it is a no-op
+%% starting with that version.
+send_mandatory(#delivery{mandatory = false}) ->
+ ok;
+send_mandatory(#delivery{mandatory = true,
+ sender = SenderPid,
+ msg_seq_no = MsgSeqNo}) ->
+ gen_server2:cast(SenderPid, {mandatory_received, MsgSeqNo}).
+
+send_or_record_confirm(_, #delivery{ confirm = false }, MS, _State) ->
+ MS;
+send_or_record_confirm(published, #delivery { sender = ChPid,
+ confirm = true,
+ msg_seq_no = MsgSeqNo,
+ message = #basic_message {
+ id = MsgId,
+ is_persistent = true } },
+ MS, #state{q = Q}) when ?amqqueue_is_durable(Q) ->
+ maps:put(MsgId, {published, ChPid, MsgSeqNo} , MS);
+send_or_record_confirm(_Status, #delivery { sender = ChPid,
+ confirm = true,
+ msg_seq_no = MsgSeqNo },
+ MS, #state{q = Q} = _State) ->
+ ok = rabbit_classic_queue:confirm_to_sender(ChPid,
+ amqqueue:get_name(Q), [MsgSeqNo]),
+ MS.
+
+confirm_messages(MsgIds, State = #state{q = Q, msg_id_status = MS}) ->
+ QName = amqqueue:get_name(Q),
+ {CMs, MS1} =
+ lists:foldl(
+ fun (MsgId, {CMsN, MSN} = Acc) ->
+ %% We will never see 'discarded' here
+ case maps:find(MsgId, MSN) of
+ error ->
+ %% If it needed confirming, it'll have
+ %% already been done.
+ Acc;
+ {ok, published} ->
+ %% Still not seen it from the channel, just
+ %% record that it's been confirmed.
+ {CMsN, maps:put(MsgId, confirmed, MSN)};
+ {ok, {published, ChPid, MsgSeqNo}} ->
+ %% Seen from both GM and Channel. Can now
+ %% confirm.
+ {rabbit_misc:gb_trees_cons(ChPid, MsgSeqNo, CMsN),
+ maps:remove(MsgId, MSN)};
+ {ok, confirmed} ->
+ %% It's already been confirmed. This is
+ %% probably it's been both sync'd to disk
+ %% and then delivered and ack'd before we've
+ %% seen the publish from the
+ %% channel. Nothing to do here.
+ Acc
+ end
+ end, {gb_trees:empty(), MS}, MsgIds),
+ Fun = fun (Pid, MsgSeqNos) ->
+ rabbit_classic_queue:confirm_to_sender(Pid, QName, MsgSeqNos)
+ end,
+ rabbit_misc:gb_trees_foreach(Fun, CMs),
+ State #state { msg_id_status = MS1 }.
+
+handle_process_result({ok, State}) -> noreply(State);
+handle_process_result({stop, State}) -> {stop, normal, State}.
+
+-spec promote_me({pid(), term()}, #state{}) -> no_return().
+
+promote_me(From, #state { q = Q0,
+ gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS,
+ rate_timer_ref = RateTRef,
+ sender_queues = SQ,
+ msg_id_ack = MA,
+ msg_id_status = MS,
+ known_senders = KS}) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ rabbit_mirror_queue_misc:log_info(QName, "Promoting mirror ~s to master~n",
+ [rabbit_misc:pid_to_string(self())]),
+ Q1 = amqqueue:set_pid(Q0, self()),
+ DeathFun = rabbit_mirror_queue_master:sender_death_fun(),
+ DepthFun = rabbit_mirror_queue_master:depth_fun(),
+ {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(Q1, GM, DeathFun, DepthFun),
+ true = unlink(GM),
+ gen_server2:reply(From, {promote, CPid}),
+
+ %% Everything that we're monitoring, we need to ensure our new
+ %% coordinator is monitoring.
+ MPids = pmon:monitored(KS),
+ ok = rabbit_mirror_queue_coordinator:ensure_monitoring(CPid, MPids),
+
+ %% We find all the messages that we've received from channels but
+ %% not from gm, and pass them to the
+ %% queue_process:init_with_backing_queue_state to be enqueued.
+ %%
+ %% We also have to requeue messages which are pending acks: the
+ %% consumers from the master queue have been lost and so these
+ %% messages need requeuing. They might also be pending
+ %% confirmation, and indeed they might also be pending arrival of
+ %% the publication from the channel itself, if we received both
+ %% the publication and the fetch via gm first! Requeuing doesn't
+ %% affect confirmations: if the message was previously pending a
+ %% confirmation then it still will be, under the same msg_id. So
+ %% as a master, we need to be prepared to filter out the
+ %% publication of said messages from the channel (is_duplicate
+ %% (thus such requeued messages must remain in the msg_id_status
+ %% (MS) which becomes seen_status (SS) in the master)).
+ %%
+ %% Then there are messages we already have in the queue, which are
+ %% not currently pending acknowledgement:
+ %% 1. Messages we've only received via gm:
+ %% Filter out subsequent publication from channel through
+ %% validate_message. Might have to issue confirms then or
+ %% later, thus queue_process state will have to know that
+ %% there's a pending confirm.
+ %% 2. Messages received via both gm and channel:
+ %% Queue will have to deal with issuing confirms if necessary.
+ %%
+ %% MS contains the following three entry types:
+ %%
+ %% a) published:
+ %% published via gm only; pending arrival of publication from
+ %% channel, maybe pending confirm.
+ %%
+ %% b) {published, ChPid, MsgSeqNo}:
+ %% published via gm and channel; pending confirm.
+ %%
+ %% c) confirmed:
+ %% published via gm only, and confirmed; pending publication
+ %% from channel.
+ %%
+ %% d) discarded:
+ %% seen via gm only as discarded. Pending publication from
+ %% channel
+ %%
+ %% The forms a, c and d only, need to go to the master state
+ %% seen_status (SS).
+ %%
+ %% The form b only, needs to go through to the queue_process
+ %% state to form the msg_id_to_channel mapping (MTC).
+ %%
+ %% No messages that are enqueued from SQ at this point will have
+ %% entries in MS.
+ %%
+ %% Messages that are extracted from MA may have entries in MS, and
+ %% those messages are then requeued. However, as discussed above,
+ %% this does not affect MS, nor which bits go through to SS in
+ %% Master, or MTC in queue_process.
+
+ St = [published, confirmed, discarded],
+ SS = maps:filter(fun (_MsgId, Status) -> lists:member(Status, St) end, MS),
+ AckTags = [AckTag || {_MsgId, AckTag} <- maps:to_list(MA)],
+
+ MasterState = rabbit_mirror_queue_master:promote_backing_queue_state(
+ QName, CPid, BQ, BQS, GM, AckTags, SS, MPids),
+
+ MTC = maps:fold(fun (MsgId, {published, ChPid, MsgSeqNo}, MTC0) ->
+ maps:put(MsgId, {ChPid, MsgSeqNo}, MTC0);
+ (_Msgid, _Status, MTC0) ->
+ MTC0
+ end, #{}, MS),
+ Deliveries = [promote_delivery(Delivery) ||
+ {_ChPid, {PubQ, _PendCh, _ChState}} <- maps:to_list(SQ),
+ Delivery <- queue:to_list(PubQ)],
+ AwaitGmDown = [ChPid || {ChPid, {_, _, down_from_ch}} <- maps:to_list(SQ)],
+ KS1 = lists:foldl(fun (ChPid0, KS0) ->
+ pmon:demonitor(ChPid0, KS0)
+ end, KS, AwaitGmDown),
+ rabbit_misc:store_proc_name(rabbit_amqqueue_process, QName),
+ rabbit_amqqueue_process:init_with_backing_queue_state(
+ Q1, rabbit_mirror_queue_master, MasterState, RateTRef, Deliveries, KS1,
+ MTC).
+
+%% We reset mandatory to false here because we will have sent the
+%% mandatory_received already as soon as we got the message. We also
+%% need to send an ack for these messages since the channel is waiting
+%% for one for the via-GM case and we will not now receive one.
+promote_delivery(Delivery = #delivery{sender = Sender, flow = Flow}) ->
+ maybe_flow_ack(Sender, Flow),
+ Delivery#delivery{mandatory = false}.
+
+noreply(State) ->
+ {NewState, Timeout} = next_state(State),
+ {noreply, ensure_rate_timer(NewState), Timeout}.
+
+reply(Reply, State) ->
+ {NewState, Timeout} = next_state(State),
+ {reply, Reply, ensure_rate_timer(NewState), Timeout}.
+
+next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) ->
+ {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
+ State1 = confirm_messages(MsgIds,
+ State #state { backing_queue_state = BQS1 }),
+ case BQ:needs_timeout(BQS1) of
+ false -> {stop_sync_timer(State1), hibernate };
+ idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
+ timed -> {ensure_sync_timer(State1), 0 }
+ end.
+
+backing_queue_timeout(State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ State#state{backing_queue_state = BQ:timeout(BQS)}.
+
+ensure_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.sync_timer_ref,
+ ?SYNC_INTERVAL, sync_timeout).
+
+stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #state.sync_timer_ref).
+
+ensure_rate_timer(State) ->
+ rabbit_misc:ensure_timer(State, #state.rate_timer_ref,
+ ?RAM_DURATION_UPDATE_INTERVAL,
+ update_ram_duration).
+
+stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #state.rate_timer_ref).
+
+ensure_monitoring(ChPid, State = #state { known_senders = KS }) ->
+ State #state { known_senders = pmon:monitor(ChPid, KS) }.
+
+local_sender_death(ChPid, #state { known_senders = KS }) ->
+ %% The channel will be monitored iff we have received a delivery
+ %% from it but not heard about its death from the master. So if it
+ %% is monitored we need to point the death out to the master (see
+ %% essay).
+ ok = case pmon:is_monitored(ChPid, KS) of
+ false -> ok;
+ true -> confirm_sender_death(ChPid)
+ end.
+
+confirm_sender_death(Pid) ->
+ %% We have to deal with the possibility that we'll be promoted to
+ %% master before this thing gets run. Consequently we set the
+ %% module to rabbit_mirror_queue_master so that if we do become a
+ %% rabbit_amqqueue_process before then, sane things will happen.
+ Fun =
+ fun (?MODULE, State = #state { known_senders = KS,
+ gm = GM }) ->
+ %% We're running still as a mirror
+ %%
+ %% See comment in local_sender_death/2; we might have
+ %% received a sender_death in the meanwhile so check
+ %% again.
+ ok = case pmon:is_monitored(Pid, KS) of
+ false -> ok;
+ true -> gm:broadcast(GM, {ensure_monitoring, [Pid]}),
+ confirm_sender_death(Pid)
+ end,
+ State;
+ (rabbit_mirror_queue_master, State) ->
+ %% We've become a master. State is now opaque to
+ %% us. When we became master, if Pid was still known
+ %% to us then we'd have set up monitoring of it then,
+ %% so this is now a noop.
+ State
+ end,
+ %% Note that we do not remove our knowledge of this ChPid until we
+ %% get the sender_death from GM as well as a DOWN notification.
+ {ok, _TRef} = timer:apply_after(
+ ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue,
+ [self(), rabbit_mirror_queue_master, Fun]),
+ ok.
+
+forget_sender(_, running) -> false;
+forget_sender(down_from_gm, down_from_gm) -> false; %% [1]
+forget_sender(down_from_ch, down_from_ch) -> false;
+forget_sender(Down1, Down2) when Down1 =/= Down2 -> true.
+
+%% [1] If another mirror goes through confirm_sender_death/1 before we
+%% do we can get two GM sender_death messages in a row for the same
+%% channel - don't treat that as anything special.
+
+%% Record and process lifetime events from channels. Forget all about a channel
+%% only when down notifications are received from both the channel and from gm.
+maybe_forget_sender(ChPid, ChState, State = #state { sender_queues = SQ,
+ msg_id_status = MS,
+ known_senders = KS }) ->
+ case maps:find(ChPid, SQ) of
+ error ->
+ State;
+ {ok, {MQ, PendCh, ChStateRecord}} ->
+ case forget_sender(ChState, ChStateRecord) of
+ true ->
+ credit_flow:peer_down(ChPid),
+ State #state { sender_queues = maps:remove(ChPid, SQ),
+ msg_id_status = lists:foldl(
+ fun maps:remove/2,
+ MS, sets:to_list(PendCh)),
+ known_senders = pmon:demonitor(ChPid, KS) };
+ false ->
+ SQ1 = maps:put(ChPid, {MQ, PendCh, ChState}, SQ),
+ State #state { sender_queues = SQ1 }
+ end
+ end.
+
+maybe_enqueue_message(
+ Delivery = #delivery { message = #basic_message { id = MsgId },
+ sender = ChPid },
+ State = #state { sender_queues = SQ, msg_id_status = MS }) ->
+ send_mandatory(Delivery), %% must do this before confirms
+ State1 = ensure_monitoring(ChPid, State),
+ %% We will never see {published, ChPid, MsgSeqNo} here.
+ case maps:find(MsgId, MS) of
+ error ->
+ {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ),
+ MQ1 = queue:in(Delivery, MQ),
+ SQ1 = maps:put(ChPid, {MQ1, PendingCh, ChState}, SQ),
+ State1 #state { sender_queues = SQ1 };
+ {ok, Status} ->
+ MS1 = send_or_record_confirm(
+ Status, Delivery, maps:remove(MsgId, MS), State1),
+ SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ),
+ State1 #state { msg_id_status = MS1,
+ sender_queues = SQ1 }
+ end.
+
+get_sender_queue(ChPid, SQ) ->
+ case maps:find(ChPid, SQ) of
+ error -> {queue:new(), sets:new(), running};
+ {ok, Val} -> Val
+ end.
+
+remove_from_pending_ch(MsgId, ChPid, SQ) ->
+ case maps:find(ChPid, SQ) of
+ error ->
+ SQ;
+ {ok, {MQ, PendingCh, ChState}} ->
+ maps:put(ChPid, {MQ, sets:del_element(MsgId, PendingCh), ChState},
+ SQ)
+ end.
+
+publish_or_discard(Status, ChPid, MsgId,
+ State = #state { sender_queues = SQ, msg_id_status = MS }) ->
+ %% We really are going to do the publish/discard right now, even
+ %% though we may not have seen it directly from the channel. But
+ %% we cannot issue confirms until the latter has happened. So we
+ %% need to keep track of the MsgId and its confirmation status in
+ %% the meantime.
+ State1 = ensure_monitoring(ChPid, State),
+ {MQ, PendingCh, ChState} = get_sender_queue(ChPid, SQ),
+ {MQ1, PendingCh1, MS1} =
+ case queue:out(MQ) of
+ {empty, _MQ2} ->
+ {MQ, sets:add_element(MsgId, PendingCh),
+ maps:put(MsgId, Status, MS)};
+ {{value, Delivery = #delivery {
+ message = #basic_message { id = MsgId } }}, MQ2} ->
+ {MQ2, PendingCh,
+ %% We received the msg from the channel first. Thus
+ %% we need to deal with confirms here.
+ send_or_record_confirm(Status, Delivery, MS, State1)};
+ {{value, #delivery {}}, _MQ2} ->
+ %% The instruction was sent to us before we were
+ %% within the slave_pids within the #amqqueue{}
+ %% record. We'll never receive the message directly
+ %% from the channel. And the channel will not be
+ %% expecting any confirms from us.
+ {MQ, PendingCh, MS}
+ end,
+ SQ1 = maps:put(ChPid, {MQ1, PendingCh1, ChState}, SQ),
+ State1 #state { sender_queues = SQ1, msg_id_status = MS1 }.
+
+
+process_instruction({publish, ChPid, Flow, MsgProps,
+ Msg = #basic_message { id = MsgId }}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(published, ChPid, MsgId, State),
+ BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({batch_publish, ChPid, Flow, Publishes}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ lists:foldl(fun ({#basic_message { id = MsgId },
+ _MsgProps, _IsDelivered}, St) ->
+ publish_or_discard(published, ChPid, MsgId, St)
+ end, State, Publishes),
+ BQS1 = BQ:batch_publish(Publishes, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({publish_delivered, ChPid, Flow, MsgProps,
+ Msg = #basic_message { id = MsgId }}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(published, ChPid, MsgId, State),
+ true = BQ:is_empty(BQS),
+ {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
+ {ok, maybe_store_ack(true, MsgId, AckTag,
+ State1 #state { backing_queue_state = BQS1 })};
+process_instruction({batch_publish_delivered, ChPid, Flow, Publishes}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ {MsgIds,
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS }} =
+ lists:foldl(fun ({#basic_message { id = MsgId }, _MsgProps},
+ {MsgIds, St}) ->
+ {[MsgId | MsgIds],
+ publish_or_discard(published, ChPid, MsgId, St)}
+ end, {[], State}, Publishes),
+ true = BQ:is_empty(BQS),
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
+ MsgIdsAndAcks = lists:zip(lists:reverse(MsgIds), AckTags),
+ State2 = lists:foldl(
+ fun ({MsgId, AckTag}, St) ->
+ maybe_store_ack(true, MsgId, AckTag, St)
+ end, State1 #state { backing_queue_state = BQS1 },
+ MsgIdsAndAcks),
+ {ok, State2};
+process_instruction({discard, ChPid, Flow, MsgId}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ publish_or_discard(discarded, ChPid, MsgId, State),
+ BQS1 = BQ:discard(MsgId, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({drop, Length, Dropped, AckRequired},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ QLen = BQ:len(BQS),
+ ToDrop = case QLen - Length of
+ N when N > 0 -> N;
+ _ -> 0
+ end,
+ State1 = lists:foldl(
+ fun (const, StateN = #state{backing_queue_state = BQSN}) ->
+ {{MsgId, AckTag}, BQSN1} = BQ:drop(AckRequired, BQSN),
+ maybe_store_ack(
+ AckRequired, MsgId, AckTag,
+ StateN #state { backing_queue_state = BQSN1 })
+ end, State, lists:duplicate(ToDrop, const)),
+ {ok, case AckRequired of
+ true -> State1;
+ false -> update_delta(ToDrop - Dropped, State1)
+ end};
+process_instruction({ack, MsgIds},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_ack = MA }) ->
+ {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
+ {MsgIds1, BQS1} = BQ:ack(AckTags, BQS),
+ [] = MsgIds1 -- MsgIds, %% ASSERTION
+ {ok, update_delta(length(MsgIds1) - length(MsgIds),
+ State #state { msg_id_ack = MA1,
+ backing_queue_state = BQS1 })};
+process_instruction({requeue, MsgIds},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS,
+ msg_id_ack = MA }) ->
+ {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
+ {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
+ {ok, State #state { msg_id_ack = MA1,
+ backing_queue_state = BQS1 }};
+process_instruction({sender_death, ChPid},
+ State = #state { known_senders = KS }) ->
+ %% The channel will be monitored iff we have received a message
+ %% from it. In this case we just want to avoid doing work if we
+ %% never got any messages.
+ {ok, case pmon:is_monitored(ChPid, KS) of
+ false -> State;
+ true -> maybe_forget_sender(ChPid, down_from_gm, State)
+ end};
+process_instruction({depth, Depth},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {ok, set_delta(Depth - BQ:depth(BQS), State)};
+
+process_instruction({delete_and_terminate, Reason},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQ:delete_and_terminate(Reason, BQS),
+ {stop, State #state { backing_queue_state = undefined }};
+process_instruction({set_queue_mode, Mode},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQS1 = BQ:set_queue_mode(Mode, BQS),
+ {ok, State #state { backing_queue_state = BQS1 }}.
+
+maybe_flow_ack(Sender, flow) -> credit_flow:ack(Sender);
+maybe_flow_ack(_Sender, noflow) -> ok.
+
+msg_ids_to_acktags(MsgIds, MA) ->
+ {AckTags, MA1} =
+ lists:foldl(
+ fun (MsgId, {Acc, MAN}) ->
+ case maps:find(MsgId, MA) of
+ error -> {Acc, MAN};
+ {ok, AckTag} -> {[AckTag | Acc], maps:remove(MsgId, MAN)}
+ end
+ end, {[], MA}, MsgIds),
+ {lists:reverse(AckTags), MA1}.
+
+maybe_store_ack(false, _MsgId, _AckTag, State) ->
+ State;
+maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA }) ->
+ State #state { msg_id_ack = maps:put(MsgId, AckTag, MA) }.
+
+set_delta(0, State = #state { depth_delta = undefined }) ->
+ ok = record_synchronised(State#state.q),
+ State #state { depth_delta = 0 };
+set_delta(NewDelta, State = #state { depth_delta = undefined }) ->
+ true = NewDelta > 0, %% assertion
+ State #state { depth_delta = NewDelta };
+set_delta(NewDelta, State = #state { depth_delta = Delta }) ->
+ update_delta(NewDelta - Delta, State).
+
+update_delta(_DeltaChange, State = #state { depth_delta = undefined }) ->
+ State;
+update_delta( DeltaChange, State = #state { depth_delta = 0 }) ->
+ 0 = DeltaChange, %% assertion: we cannot become unsync'ed
+ State;
+update_delta( DeltaChange, State = #state { depth_delta = Delta }) ->
+ true = DeltaChange =< 0, %% assertion: we cannot become 'less' sync'ed
+ set_delta(Delta + DeltaChange, State #state { depth_delta = undefined }).
+
+update_ram_duration(BQ, BQS) ->
+ {RamDuration, BQS1} = BQ:ram_duration(BQS),
+ DesiredDuration =
+ rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
+ BQ:set_ram_duration_target(DesiredDuration, BQS1).
+
+record_synchronised(Q0) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ Self = self(),
+ F = fun () ->
+ case mnesia:read({rabbit_queue, QName}) of
+ [] ->
+ ok;
+ [Q1] when ?is_amqqueue(Q1) ->
+ SSPids = amqqueue:get_sync_slave_pids(Q1),
+ SSPids1 = [Self | SSPids],
+ Q2 = amqqueue:set_sync_slave_pids(Q1, SSPids1),
+ rabbit_mirror_queue_misc:store_updated_slaves(Q2),
+ {ok, Q2}
+ end
+ end,
+ case rabbit_misc:execute_mnesia_transaction(F) of
+ ok -> ok;
+ {ok, Q2} -> rabbit_mirror_queue_misc:maybe_drop_master_after_sync(Q2)
+ end.
diff --git a/deps/rabbit/src/rabbit_mirror_queue_sync.erl b/deps/rabbit/src/rabbit_mirror_queue_sync.erl
new file mode 100644
index 0000000000..a82ee05599
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mirror_queue_sync.erl
@@ -0,0 +1,420 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mirror_queue_sync).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([master_prepare/4, master_go/8, slave/7, conserve_resources/3]).
+
+-define(SYNC_PROGRESS_INTERVAL, 1000000).
+
+%% There are three processes around, the master, the syncer and the
+%% slave(s). The syncer is an intermediary, linked to the master in
+%% order to make sure we do not mess with the master's credit flow or
+%% set of monitors.
+%%
+%% Interactions
+%% ------------
+%%
+%% '*' indicates repeating messages. All are standard Erlang messages
+%% except sync_start which is sent over GM to flush out any other
+%% messages that we might have sent that way already. (credit) is the
+%% usual credit_flow bump message every so often.
+%%
+%% Master Syncer Slave(s)
+%% sync_mirrors -> || ||
+%% || -- (spawns) --> || ||
+%% || --------- sync_start (over GM) -------> ||
+%% || || <--- sync_ready ---- ||
+%% || || (or) ||
+%% || || <--- sync_deny ----- ||
+%% || <--- ready ---- || ||
+%% || <--- next* ---- || || }
+%% || ---- msg* ----> || || } loop
+%% || || ---- sync_msgs* ---> || }
+%% || || <--- (credit)* ----- || }
+%% || <--- next ---- || ||
+%% || ---- done ----> || ||
+%% || || -- sync_complete --> ||
+%% || (Dies) ||
+
+-type log_fun() :: fun ((string(), [any()]) -> 'ok').
+-type bq() :: atom().
+-type bqs() :: any().
+-type ack() :: any().
+-type slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
+ bqs()}.
+
+%% ---------------------------------------------------------------------------
+%% Master
+
+-spec master_prepare(reference(), rabbit_amqqueue:name(),
+ log_fun(), [pid()]) -> pid().
+
+master_prepare(Ref, QName, Log, SPids) ->
+ MPid = self(),
+ spawn_link(fun () ->
+ ?store_proc_name(QName),
+ syncer(Ref, Log, MPid, SPids)
+ end).
+
+-spec master_go(pid(), reference(), log_fun(),
+ rabbit_mirror_queue_master:stats_fun(),
+ rabbit_mirror_queue_master:stats_fun(),
+ non_neg_integer(),
+ bq(), bqs()) ->
+ {'already_synced', bqs()} | {'ok', bqs()} |
+ {'cancelled', bqs()} |
+ {'shutdown', any(), bqs()} |
+ {'sync_died', any(), bqs()}.
+
+master_go(Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, BQ, BQS) ->
+ Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()},
+ receive
+ {'EXIT', Syncer, normal} -> {already_synced, BQS};
+ {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS};
+ {ready, Syncer} -> EmitStats({syncing, 0}),
+ master_batch_go0(Args, SyncBatchSize,
+ BQ, BQS)
+ end.
+
+master_batch_go0(Args, BatchSize, BQ, BQS) ->
+ FoldFun =
+ fun (Msg, MsgProps, Unacked, Acc) ->
+ Acc1 = append_to_acc(Msg, MsgProps, Unacked, Acc),
+ case maybe_master_batch_send(Acc1, BatchSize) of
+ true -> master_batch_send(Args, Acc1);
+ false -> {cont, Acc1}
+ end
+ end,
+ FoldAcc = {[], 0, {0, BQ:depth(BQS)}, erlang:monotonic_time()},
+ bq_fold(FoldFun, FoldAcc, Args, BQ, BQS).
+
+master_batch_send({Syncer, Ref, Log, HandleInfo, EmitStats, Parent},
+ {Batch, I, {Curr, Len}, Last}) ->
+ T = maybe_emit_stats(Last, I, EmitStats, Log),
+ HandleInfo({syncing, I}),
+ handle_set_maximum_since_use(),
+ SyncMsg = {msgs, Ref, lists:reverse(Batch)},
+ NewAcc = {[], I + length(Batch), {Curr, Len}, T},
+ master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent).
+
+%% Either send messages when we reach the last one in the queue or
+%% whenever we have accumulated BatchSize messages.
+maybe_master_batch_send({_, _, {Len, Len}, _}, _BatchSize) ->
+ true;
+maybe_master_batch_send({_, _, {Curr, _Len}, _}, BatchSize)
+ when Curr rem BatchSize =:= 0 ->
+ true;
+maybe_master_batch_send(_Acc, _BatchSize) ->
+ false.
+
+bq_fold(FoldFun, FoldAcc, Args, BQ, BQS) ->
+ case BQ:fold(FoldFun, FoldAcc, BQS) of
+ {{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1};
+ {{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1};
+ {_, BQS1} -> master_done(Args, BQS1)
+ end.
+
+append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {Curr, Len}, T}) ->
+ {[{Msg, MsgProps, Unacked} | Batch], I, {Curr + 1, Len}, T}.
+
+master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent) ->
+ receive
+ {'$gen_call', From,
+ cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}),
+ gen_server2:reply(From, ok),
+ {stop, cancelled};
+ {next, Ref} -> Syncer ! SyncMsg,
+ {cont, NewAcc};
+ {'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}};
+ {'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}}
+ end.
+
+master_done({Syncer, Ref, _Log, _HandleInfo, _EmitStats, Parent}, BQS) ->
+ receive
+ {'$gen_call', From,
+ cancel_sync_mirrors} ->
+ stop_syncer(Syncer, {cancel, Ref}),
+ gen_server2:reply(From, ok),
+ {cancelled, BQS};
+ {cancelled, Ref} ->
+ {cancelled, BQS};
+ {next, Ref} ->
+ stop_syncer(Syncer, {done, Ref}),
+ {ok, BQS};
+ {'EXIT', Parent, Reason} ->
+ {shutdown, Reason, BQS};
+ {'EXIT', Syncer, Reason} ->
+ {sync_died, Reason, BQS}
+ end.
+
+stop_syncer(Syncer, Msg) ->
+ unlink(Syncer),
+ Syncer ! Msg,
+ receive {'EXIT', Syncer, _} -> ok
+ after 0 -> ok
+ end.
+
+maybe_emit_stats(Last, I, EmitStats, Log) ->
+ Interval = erlang:convert_time_unit(
+ erlang:monotonic_time() - Last, native, micro_seconds),
+ case Interval > ?SYNC_PROGRESS_INTERVAL of
+ true -> EmitStats({syncing, I}),
+ Log("~p messages", [I]),
+ erlang:monotonic_time();
+ false -> Last
+ end.
+
+handle_set_maximum_since_use() ->
+ receive
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age)
+ after 0 ->
+ ok
+ end.
+
+%% Master
+%% ---------------------------------------------------------------------------
+%% Syncer
+
+syncer(Ref, Log, MPid, SPids) ->
+ [erlang:monitor(process, SPid) || SPid <- SPids],
+ %% We wait for a reply from the mirrors so that we know they are in
+ %% a receive block and will thus receive messages we send to them
+ %% *without* those messages ending up in their gen_server2 pqueue.
+ case await_slaves(Ref, SPids) of
+ [] -> Log("all mirrors already synced", []);
+ SPids1 -> MPid ! {ready, self()},
+ Log("mirrors ~p to sync", [[node(SPid) || SPid <- SPids1]]),
+ syncer_check_resources(Ref, MPid, SPids1)
+ end.
+
+await_slaves(Ref, SPids) ->
+ [SPid || SPid <- SPids,
+ rabbit_mnesia:on_running_node(SPid) andalso %% [0]
+ receive
+ {sync_ready, Ref, SPid} -> true;
+ {sync_deny, Ref, SPid} -> false;
+ {'DOWN', _, process, SPid, _} -> false
+ end].
+%% [0] This check is in case there's been a partition which has then
+%% healed in between the master retrieving the mirror pids from Mnesia
+%% and sending 'sync_start' over GM. If so there might be mirrors on the
+%% other side of the partition which we can monitor (since they have
+%% rejoined the distributed system with us) but which did not get the
+%% 'sync_start' and so will not reply. We need to act as though they are
+%% down.
+
+syncer_check_resources(Ref, MPid, SPids) ->
+ rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ %% Before we ask the master node to send the first batch of messages
+ %% over here, we check if one node is already short on memory. If
+ %% that's the case, we wait for the alarm to be cleared before
+ %% starting the syncer loop.
+ AlarmedNodes = lists:any(
+ fun
+ ({{resource_limit, memory, _}, _}) -> true;
+ ({_, _}) -> false
+ end, rabbit_alarm:get_alarms()),
+ if
+ not AlarmedNodes ->
+ MPid ! {next, Ref},
+ syncer_loop(Ref, MPid, SPids);
+ true ->
+ case wait_for_resources(Ref, SPids) of
+ cancel -> MPid ! {cancelled, Ref};
+ SPids1 -> MPid ! {next, Ref},
+ syncer_loop(Ref, MPid, SPids1)
+ end
+ end.
+
+syncer_loop(Ref, MPid, SPids) ->
+ receive
+ {conserve_resources, memory, true} ->
+ case wait_for_resources(Ref, SPids) of
+ cancel -> MPid ! {cancelled, Ref};
+ SPids1 -> syncer_loop(Ref, MPid, SPids1)
+ end;
+ {conserve_resources, _, _} ->
+ %% Ignore other alerts.
+ syncer_loop(Ref, MPid, SPids);
+ {msgs, Ref, Msgs} ->
+ SPids1 = wait_for_credit(SPids),
+ case SPids1 of
+ [] ->
+ % Die silently because there are no mirrors left.
+ ok;
+ _ ->
+ broadcast(SPids1, {sync_msgs, Ref, Msgs}),
+ MPid ! {next, Ref},
+ syncer_loop(Ref, MPid, SPids1)
+ end;
+ {cancel, Ref} ->
+ %% We don't tell the mirrors we will die - so when we do
+ %% they interpret that as a failure, which is what we
+ %% want.
+ ok;
+ {done, Ref} ->
+ [SPid ! {sync_complete, Ref} || SPid <- SPids]
+ end.
+
+broadcast(SPids, Msg) ->
+ [begin
+ credit_flow:send(SPid),
+ SPid ! Msg
+ end || SPid <- SPids].
+
+conserve_resources(Pid, Source, {_, Conserve, _}) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+wait_for_credit(SPids) ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ wait_for_credit(SPids);
+ {'DOWN', _, process, SPid, _} ->
+ credit_flow:peer_down(SPid),
+ wait_for_credit(lists:delete(SPid, SPids))
+ end;
+ false -> SPids
+ end.
+
+wait_for_resources(Ref, SPids) ->
+ receive
+ {conserve_resources, memory, false} ->
+ SPids;
+ {conserve_resources, _, _} ->
+ %% Ignore other alerts.
+ wait_for_resources(Ref, SPids);
+ {cancel, Ref} ->
+ %% We don't tell the mirrors we will die - so when we do
+ %% they interpret that as a failure, which is what we
+ %% want.
+ cancel;
+ {'DOWN', _, process, SPid, _} ->
+ credit_flow:peer_down(SPid),
+ SPids1 = wait_for_credit(lists:delete(SPid, SPids)),
+ wait_for_resources(Ref, SPids1)
+ end.
+
+%% Syncer
+%% ---------------------------------------------------------------------------
+%% Slave
+
+-spec slave(non_neg_integer(), reference(), timer:tref(), pid(),
+ bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) ->
+ 'denied' |
+ {'ok' | 'failed', slave_sync_state()} |
+ {'stop', any(), slave_sync_state()}.
+
+slave(0, Ref, _TRef, Syncer, _BQ, _BQS, _UpdateRamDuration) ->
+ Syncer ! {sync_deny, Ref, self()},
+ denied;
+
+slave(_DD, Ref, TRef, Syncer, BQ, BQS, UpdateRamDuration) ->
+ MRef = erlang:monitor(process, Syncer),
+ Syncer ! {sync_ready, Ref, self()},
+ {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
+ slave_sync_loop({Ref, MRef, Syncer, BQ, UpdateRamDuration,
+ rabbit_misc:get_parent()}, {[], TRef, BQS1}).
+
+slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
+ State = {MA, TRef, BQS}) ->
+ receive
+ {'DOWN', MRef, process, Syncer, _Reason} ->
+ %% If the master dies half way we are not in the usual
+ %% half-synced state (with messages nearer the tail of the
+ %% queue); instead we have ones nearer the head. If we then
+ %% sync with a newly promoted master, or even just receive
+ %% messages from it, we have a hole in the middle. So the
+ %% only thing to do here is purge.
+ {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
+ credit_flow:peer_down(Syncer),
+ {failed, {[], TRef, BQS1}};
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ slave_sync_loop(Args, State);
+ {sync_complete, Ref} ->
+ erlang:demonitor(MRef, [flush]),
+ credit_flow:peer_down(Syncer),
+ {ok, State};
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ slave_sync_loop(Args, State);
+ {'$gen_cast', {set_ram_duration_target, Duration}} ->
+ BQS1 = BQ:set_ram_duration_target(Duration, BQS),
+ slave_sync_loop(Args, {MA, TRef, BQS1});
+ {'$gen_cast', {run_backing_queue, Mod, Fun}} ->
+ BQS1 = BQ:invoke(Mod, Fun, BQS),
+ slave_sync_loop(Args, {MA, TRef, BQS1});
+ update_ram_duration ->
+ {TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
+ slave_sync_loop(Args, {MA, TRef1, BQS1});
+ {sync_msgs, Ref, Batch} ->
+ credit_flow:ack(Syncer),
+ {MA1, BQS1} = process_batch(Batch, MA, BQ, BQS),
+ slave_sync_loop(Args, {MA1, TRef, BQS1});
+ {'EXIT', Parent, Reason} ->
+ {stop, Reason, State};
+ %% If the master throws an exception
+ {'$gen_cast', {gm, {delete_and_terminate, Reason}}} ->
+ BQ:delete_and_terminate(Reason, BQS),
+ {stop, Reason, {[], TRef, undefined}}
+ end.
+
+%% We are partitioning messages by the Unacked element in the tuple.
+%% when unacked = true, then it's a publish_delivered message,
+%% otherwise it's a publish message.
+%%
+%% Note that we can't first partition the batch and then publish each
+%% part, since that would result in re-ordering messages, which we
+%% don't want to do.
+process_batch([], MA, _BQ, BQS) ->
+ {MA, BQS};
+process_batch(Batch, MA, BQ, BQS) ->
+ {_Msg, _MsgProps, Unacked} = hd(Batch),
+ process_batch(Batch, Unacked, [], MA, BQ, BQS).
+
+process_batch([{Msg, Props, true = Unacked} | Rest], true = Unacked,
+ Acc, MA, BQ, BQS) ->
+ %% publish_delivered messages don't need the IsDelivered flag,
+ %% therefore we just add {Msg, Props} to the accumulator.
+ process_batch(Rest, Unacked, [{Msg, props(Props)} | Acc],
+ MA, BQ, BQS);
+process_batch([{Msg, Props, false = Unacked} | Rest], false = Unacked,
+ Acc, MA, BQ, BQS) ->
+ %% publish messages needs the IsDelivered flag which is set to true
+ %% here.
+ process_batch(Rest, Unacked, [{Msg, props(Props), true} | Acc],
+ MA, BQ, BQS);
+process_batch(Batch, Unacked, Acc, MA, BQ, BQS) ->
+ {MA1, BQS1} = publish_batch(Unacked, lists:reverse(Acc), MA, BQ, BQS),
+ process_batch(Batch, MA1, BQ, BQS1).
+
+%% Unacked msgs are published via batch_publish.
+publish_batch(false, Batch, MA, BQ, BQS) ->
+ batch_publish(Batch, MA, BQ, BQS);
+%% Acked msgs are published via batch_publish_delivered.
+publish_batch(true, Batch, MA, BQ, BQS) ->
+ batch_publish_delivered(Batch, MA, BQ, BQS).
+
+
+batch_publish(Batch, MA, BQ, BQS) ->
+ BQS1 = BQ:batch_publish(Batch, none, noflow, BQS),
+ {MA, BQS1}.
+
+batch_publish_delivered(Batch, MA, BQ, BQS) ->
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Batch, none, noflow, BQS),
+ MA1 = BQ:zip_msgs_and_acks(Batch, AckTags, MA, BQS1),
+ {MA1, BQS1}.
+
+props(Props) ->
+ Props#message_properties{needs_confirming = false}.
diff --git a/deps/rabbit/src/rabbit_mnesia.erl b/deps/rabbit/src/rabbit_mnesia.erl
new file mode 100644
index 0000000000..070c6a8205
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mnesia.erl
@@ -0,0 +1,1117 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mnesia).
+
+-export([%% Main interface
+ init/0,
+ join_cluster/2,
+ reset/0,
+ force_reset/0,
+ update_cluster_nodes/1,
+ change_cluster_node_type/1,
+ forget_cluster_node/2,
+ force_load_next_boot/0,
+
+ %% Various queries to get the status of the db
+ status/0,
+ is_clustered/0,
+ on_running_node/1,
+ is_process_alive/1,
+ is_registered_process_alive/1,
+ cluster_nodes/1,
+ node_type/0,
+ dir/0,
+ cluster_status_from_mnesia/0,
+
+ %% Operations on the db and utils, mainly used in `rabbit_upgrade' and `rabbit'
+ init_db_unchecked/2,
+ copy_db/1,
+ check_cluster_consistency/0,
+ ensure_mnesia_dir/0,
+
+ %% Hooks used in `rabbit_node_monitor'
+ on_node_up/1,
+ on_node_down/1,
+
+ %% Helpers for diagnostics commands
+ schema_info/1
+ ]).
+
+%% Used internally in rpc calls
+-export([node_info/0, remove_node_if_mnesia_running/1]).
+
+-ifdef(TEST).
+-compile(export_all).
+-export([init_with_lock/3]).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+-export_type([node_type/0, cluster_status/0]).
+
+-type node_type() :: disc | ram.
+-type cluster_status() :: {[node()], [node()], [node()]}.
+
+%%----------------------------------------------------------------------------
+%% Main interface
+%%----------------------------------------------------------------------------
+
+-spec init() -> 'ok'.
+
+init() ->
+ ensure_mnesia_running(),
+ ensure_mnesia_dir(),
+ case is_virgin_node() of
+ true ->
+ rabbit_log:info("Node database directory at ~ts is empty. "
+ "Assuming we need to join an existing cluster or initialise from scratch...~n",
+ [dir()]),
+ rabbit_peer_discovery:log_configured_backend(),
+ rabbit_peer_discovery:maybe_init(),
+ init_with_lock();
+ false ->
+ NodeType = node_type(),
+ init_db_and_upgrade(cluster_nodes(all), NodeType,
+ NodeType =:= ram, _Retry = true),
+ rabbit_peer_discovery:maybe_init(),
+ rabbit_peer_discovery:maybe_register()
+ end,
+ %% We intuitively expect the global name server to be synced when
+ %% Mnesia is up. In fact that's not guaranteed to be the case -
+ %% let's make it so.
+ ok = rabbit_node_monitor:global_sync(),
+ ok.
+
+init_with_lock() ->
+ {Retries, Timeout} = rabbit_peer_discovery:locking_retry_timeout(),
+ init_with_lock(Retries, Timeout, fun run_peer_discovery/0).
+
+init_with_lock(0, _, RunPeerDiscovery) ->
+ case rabbit_peer_discovery:lock_acquisition_failure_mode() of
+ ignore ->
+ rabbit_log:warning("Could not acquire a peer discovery lock, out of retries", []),
+ RunPeerDiscovery(),
+ rabbit_peer_discovery:maybe_register();
+ fail ->
+ exit(cannot_acquire_startup_lock)
+ end;
+init_with_lock(Retries, Timeout, RunPeerDiscovery) ->
+ LockResult = rabbit_peer_discovery:lock(),
+ rabbit_log:debug("rabbit_peer_discovery:lock returned ~p", [LockResult]),
+ case LockResult of
+ not_supported ->
+ rabbit_log:info("Peer discovery backend does not support locking, falling back to randomized delay"),
+ %% See rabbitmq/rabbitmq-server#1202 for details.
+ rabbit_peer_discovery:maybe_inject_randomized_delay(),
+ RunPeerDiscovery(),
+ rabbit_peer_discovery:maybe_register();
+ {error, _Reason} ->
+ timer:sleep(Timeout),
+ init_with_lock(Retries - 1, Timeout, RunPeerDiscovery);
+ {ok, Data} ->
+ try
+ RunPeerDiscovery(),
+ rabbit_peer_discovery:maybe_register()
+ after
+ rabbit_peer_discovery:unlock(Data)
+ end
+ end.
+
+-spec run_peer_discovery() -> ok | {[node()], node_type()}.
+run_peer_discovery() ->
+ {RetriesLeft, DelayInterval} = rabbit_peer_discovery:discovery_retries(),
+ run_peer_discovery_with_retries(RetriesLeft, DelayInterval).
+
+-spec run_peer_discovery_with_retries(non_neg_integer(), non_neg_integer()) -> ok | {[node()], node_type()}.
+run_peer_discovery_with_retries(0, _DelayInterval) ->
+ ok;
+run_peer_discovery_with_retries(RetriesLeft, DelayInterval) ->
+ FindBadNodeNames = fun
+ (Name, BadNames) when is_atom(Name) -> BadNames;
+ (Name, BadNames) -> [Name | BadNames]
+ end,
+ {DiscoveredNodes0, NodeType} =
+ case rabbit_peer_discovery:discover_cluster_nodes() of
+ {error, Reason} ->
+ RetriesLeft1 = RetriesLeft - 1,
+ rabbit_log:error("Peer discovery returned an error: ~p. Will retry after a delay of ~b ms, ~b retries left...",
+ [Reason, DelayInterval, RetriesLeft1]),
+ timer:sleep(DelayInterval),
+ run_peer_discovery_with_retries(RetriesLeft1, DelayInterval);
+ {ok, {Nodes, Type} = Config}
+ when is_list(Nodes) andalso (Type == disc orelse Type == disk orelse Type == ram) ->
+ case lists:foldr(FindBadNodeNames, [], Nodes) of
+ [] -> Config;
+ BadNames -> e({invalid_cluster_node_names, BadNames})
+ end;
+ {ok, {_, BadType}} when BadType /= disc andalso BadType /= ram ->
+ e({invalid_cluster_node_type, BadType});
+ {ok, _} ->
+ e(invalid_cluster_nodes_conf)
+ end,
+ DiscoveredNodes = lists:usort(DiscoveredNodes0),
+ rabbit_log:info("All discovered existing cluster peers: ~s~n",
+ [rabbit_peer_discovery:format_discovered_nodes(DiscoveredNodes)]),
+ Peers = nodes_excl_me(DiscoveredNodes),
+ case Peers of
+ [] ->
+ rabbit_log:info("Discovered no peer nodes to cluster with. "
+ "Some discovery backends can filter nodes out based on a readiness criteria. "
+ "Enabling debug logging might help troubleshoot."),
+ init_db_and_upgrade([node()], disc, false, _Retry = true);
+ _ ->
+ rabbit_log:info("Peer nodes we can cluster with: ~s~n",
+ [rabbit_peer_discovery:format_discovered_nodes(Peers)]),
+ join_discovered_peers(Peers, NodeType)
+ end.
+
+%% Attempts to join discovered,
+%% reachable and compatible (in terms of Mnesia internal protocol version and such)
+%% cluster peers in order.
+join_discovered_peers(TryNodes, NodeType) ->
+ {RetriesLeft, DelayInterval} = rabbit_peer_discovery:discovery_retries(),
+ join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval).
+
+join_discovered_peers_with_retries(TryNodes, _NodeType, 0, _DelayInterval) ->
+ rabbit_log:warning(
+ "Could not successfully contact any node of: ~s (as in Erlang distribution). "
+ "Starting as a blank standalone node...~n",
+ [string:join(lists:map(fun atom_to_list/1, TryNodes), ",")]),
+ init_db_and_upgrade([node()], disc, false, _Retry = true);
+join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft, DelayInterval) ->
+ case find_reachable_peer_to_cluster_with(nodes_excl_me(TryNodes)) of
+ {ok, Node} ->
+ rabbit_log:info("Node '~s' selected for auto-clustering~n", [Node]),
+ {ok, {_, DiscNodes, _}} = discover_cluster0(Node),
+ init_db_and_upgrade(DiscNodes, NodeType, true, _Retry = true),
+ rabbit_connection_tracking:boot(),
+ rabbit_node_monitor:notify_joined_cluster();
+ none ->
+ RetriesLeft1 = RetriesLeft - 1,
+ rabbit_log:error("Trying to join discovered peers failed. Will retry after a delay of ~b ms, ~b retries left...",
+ [DelayInterval, RetriesLeft1]),
+ timer:sleep(DelayInterval),
+ join_discovered_peers_with_retries(TryNodes, NodeType, RetriesLeft1, DelayInterval)
+ end.
+
+%% Make the node join a cluster. The node will be reset automatically
+%% before we actually cluster it. The nodes provided will be used to
+%% find out about the nodes in the cluster.
+%%
+%% This function will fail if:
+%%
+%% * The node is currently the only disc node of its cluster
+%% * We can't connect to any of the nodes provided
+%% * The node is currently already clustered with the cluster of the nodes
+%% provided
+%%
+%% Note that we make no attempt to verify that the nodes provided are
+%% all in the same cluster, we simply pick the first online node and
+%% we cluster to its cluster.
+
+-spec join_cluster(node(), node_type())
+ -> ok | {ok, already_member} | {error, {inconsistent_cluster, string()}}.
+
+join_cluster(DiscoveryNode, NodeType) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ case is_only_clustered_disc_node() of
+ true -> e(clustering_only_disc_node);
+ false -> ok
+ end,
+ {ClusterNodes, _, _} = discover_cluster([DiscoveryNode]),
+ case me_in_nodes(ClusterNodes) of
+ false ->
+ case check_cluster_consistency(DiscoveryNode, false) of
+ {ok, _} ->
+ %% reset the node. this simplifies things and it
+ %% will be needed in this case - we're joining a new
+ %% cluster with new nodes which are not in synch
+ %% with the current node. It also lifts the burden
+ %% of resetting the node from the user.
+ reset_gracefully(),
+
+ %% Join the cluster
+ rabbit_log:info("Clustering with ~p as ~p node~n",
+ [ClusterNodes, NodeType]),
+ ok = init_db_with_mnesia(ClusterNodes, NodeType,
+ true, true, _Retry = true),
+ rabbit_connection_tracking:boot(),
+ rabbit_node_monitor:notify_joined_cluster(),
+ ok;
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ true ->
+ %% DiscoveryNode thinks that we are part of a cluster, but
+ %% do we think so ourselves?
+ case are_we_clustered_with(DiscoveryNode) of
+ true ->
+ rabbit_log:info("Asked to join a cluster but already a member of it: ~p~n", [ClusterNodes]),
+ {ok, already_member};
+ false ->
+ Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
+ rabbit_log:error(Msg),
+ {error, {inconsistent_cluster, Msg}}
+ end
+ end.
+
+%% return node to its virgin state, where it is not member of any
+%% cluster, has no cluster configuration, no local database, and no
+%% persisted messages
+
+-spec reset() -> 'ok'.
+
+reset() ->
+ ensure_mnesia_not_running(),
+ rabbit_log:info("Resetting Rabbit~n", []),
+ reset_gracefully().
+
+-spec force_reset() -> 'ok'.
+
+force_reset() ->
+ ensure_mnesia_not_running(),
+ rabbit_log:info("Resetting Rabbit forcefully~n", []),
+ wipe().
+
+reset_gracefully() ->
+ AllNodes = cluster_nodes(all),
+ %% Reconnecting so that we will get an up to date nodes. We don't
+ %% need to check for consistency because we are resetting.
+ %% Force=true here so that reset still works when clustered with a
+ %% node which is down.
+ init_db_with_mnesia(AllNodes, node_type(), false, false, _Retry = false),
+ case is_only_clustered_disc_node() of
+ true -> e(resetting_only_disc_node);
+ false -> ok
+ end,
+ leave_cluster(),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema),
+ wipe().
+
+wipe() ->
+ %% We need to make sure that we don't end up in a distributed
+ %% Erlang system with nodes while not being in an Mnesia cluster
+ %% with them. We don't handle that well.
+ [erlang:disconnect_node(N) || N <- cluster_nodes(all)],
+ %% remove persisted messages and any other garbage we find
+ ok = rabbit_file:recursive_delete(filelib:wildcard(dir() ++ "/*")),
+ ok = rabbit_node_monitor:reset_cluster_status(),
+ ok.
+
+-spec change_cluster_node_type(node_type()) -> 'ok'.
+
+change_cluster_node_type(Type) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ case is_clustered() of
+ false -> e(not_clustered);
+ true -> ok
+ end,
+ {_, _, RunningNodes} = discover_cluster(cluster_nodes(all)),
+ %% We might still be marked as running by a remote node since the
+ %% information of us going down might not have propagated yet.
+ Node = case RunningNodes -- [node()] of
+ [] -> e(no_online_cluster_nodes);
+ [Node0|_] -> Node0
+ end,
+ ok = reset(),
+ ok = join_cluster(Node, Type).
+
+-spec update_cluster_nodes(node()) -> 'ok'.
+
+update_cluster_nodes(DiscoveryNode) ->
+ ensure_mnesia_not_running(),
+ ensure_mnesia_dir(),
+ Status = {AllNodes, _, _} = discover_cluster([DiscoveryNode]),
+ case me_in_nodes(AllNodes) of
+ true ->
+ %% As in `check_consistency/0', we can safely delete the
+ %% schema here, since it'll be replicated from the other
+ %% nodes
+ mnesia:delete_schema([node()]),
+ rabbit_node_monitor:write_cluster_status(Status),
+ rabbit_log:info("Updating cluster nodes from ~p~n",
+ [DiscoveryNode]),
+ init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false);
+ false ->
+ e(inconsistent_cluster)
+ end,
+ ok.
+
+%% We proceed like this: try to remove the node locally. If the node
+%% is offline, we remove the node if:
+%% * This node is a disc node
+%% * All other nodes are offline
+%% * This node was, at the best of our knowledge (see comment below)
+%% the last or second to last after the node we're removing to go
+%% down
+
+-spec forget_cluster_node(node(), boolean()) -> 'ok'.
+
+forget_cluster_node(Node, RemoveWhenOffline) ->
+ forget_cluster_node(Node, RemoveWhenOffline, true).
+
+forget_cluster_node(Node, RemoveWhenOffline, EmitNodeDeletedEvent) ->
+ case lists:member(Node, cluster_nodes(all)) of
+ true -> ok;
+ false -> e(not_a_cluster_node)
+ end,
+ case {RemoveWhenOffline, is_running()} of
+ {true, false} -> remove_node_offline_node(Node);
+ {true, true} -> e(online_node_offline_flag);
+ {false, false} -> e(offline_node_no_offline_flag);
+ {false, true} -> rabbit_log:info(
+ "Removing node ~p from cluster~n", [Node]),
+ case remove_node_if_mnesia_running(Node) of
+ ok when EmitNodeDeletedEvent ->
+ rabbit_event:notify(node_deleted, [{node, Node}]),
+ ok;
+ ok -> ok;
+ {error, _} = Err -> throw(Err)
+ end
+ end.
+
+remove_node_offline_node(Node) ->
+ %% Here `mnesia:system_info(running_db_nodes)' will RPC, but that's what we
+ %% want - we need to know the running nodes *now*. If the current node is a
+ %% RAM node it will return bogus results, but we don't care since we only do
+ %% this operation from disc nodes.
+ case {mnesia:system_info(running_db_nodes) -- [Node], node_type()} of
+ {[], disc} ->
+ start_mnesia(),
+ try
+ %% What we want to do here is replace the last node to
+ %% go down with the current node. The way we do this
+ %% is by force loading the table, and making sure that
+ %% they are loaded.
+ rabbit_table:force_load(),
+ rabbit_table:wait_for_replicated(_Retry = false),
+ %% We skip the 'node_deleted' event because the
+ %% application is stopped and thus, rabbit_event is not
+ %% enabled.
+ forget_cluster_node(Node, false, false),
+ force_load_next_boot()
+ after
+ stop_mnesia()
+ end;
+ {_, _} ->
+ e(removing_node_from_offline_node)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Queries
+%%----------------------------------------------------------------------------
+
+-spec status() -> [{'nodes', [{node_type(), [node()]}]} |
+ {'running_nodes', [node()]} |
+ {'partitions', [{node(), [node()]}]}].
+
+status() ->
+ IfNonEmpty = fun (_, []) -> [];
+ (Type, Nodes) -> [{Type, Nodes}]
+ end,
+ [{nodes, (IfNonEmpty(disc, cluster_nodes(disc)) ++
+ IfNonEmpty(ram, cluster_nodes(ram)))}] ++
+ case is_running() of
+ true -> RunningNodes = cluster_nodes(running),
+ [{running_nodes, RunningNodes},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {partitions, mnesia_partitions(RunningNodes)}];
+ false -> []
+ end.
+
+mnesia_partitions(Nodes) ->
+ Replies = rabbit_node_monitor:partitions(Nodes),
+ [Reply || Reply = {_, R} <- Replies, R =/= []].
+
+is_running() -> mnesia:system_info(is_running) =:= yes.
+
+-spec is_clustered() -> boolean().
+
+is_clustered() -> AllNodes = cluster_nodes(all),
+ AllNodes =/= [] andalso AllNodes =/= [node()].
+
+-spec on_running_node(pid()) -> boolean().
+
+on_running_node(Pid) -> lists:member(node(Pid), cluster_nodes(running)).
+
+%% This requires the process be in the same running cluster as us
+%% (i.e. not partitioned or some random node).
+%%
+%% See also rabbit_misc:is_process_alive/1 which does not.
+
+-spec is_process_alive(pid() | {atom(), node()}) -> boolean().
+
+is_process_alive(Pid) when is_pid(Pid) ->
+ on_running_node(Pid) andalso
+ rpc:call(node(Pid), erlang, is_process_alive, [Pid]) =:= true;
+is_process_alive({Name, Node}) ->
+ lists:member(Node, cluster_nodes(running)) andalso
+ rpc:call(Node, rabbit_mnesia, is_registered_process_alive, [Name]) =:= true.
+
+-spec is_registered_process_alive(atom()) -> boolean().
+
+is_registered_process_alive(Name) ->
+ is_pid(whereis(Name)).
+
+-spec cluster_nodes('all' | 'disc' | 'ram' | 'running') -> [node()].
+
+cluster_nodes(WhichNodes) -> cluster_status(WhichNodes).
+
+%% This function is the actual source of information, since it gets
+%% the data from mnesia. Obviously it'll work only when mnesia is
+%% running.
+
+-spec cluster_status_from_mnesia() -> rabbit_types:ok_or_error2(
+ cluster_status(), any()).
+
+cluster_status_from_mnesia() ->
+ case is_running() of
+ false ->
+ {error, mnesia_not_running};
+ true ->
+ %% If the tables are not present, it means that
+ %% `init_db/3' hasn't been run yet. In other words, either
+ %% we are a virgin node or a restarted RAM node. In both
+ %% cases we're not interested in what mnesia has to say.
+ NodeType = case mnesia:system_info(use_dir) of
+ true -> disc;
+ false -> ram
+ end,
+ case rabbit_table:is_present() of
+ true -> AllNodes = mnesia:system_info(db_nodes),
+ DiscCopies = mnesia:table_info(schema, disc_copies),
+ DiscNodes = case NodeType of
+ disc -> nodes_incl_me(DiscCopies);
+ ram -> DiscCopies
+ end,
+ %% `mnesia:system_info(running_db_nodes)' is safe since
+ %% we know that mnesia is running
+ RunningNodes = mnesia:system_info(running_db_nodes),
+ {ok, {AllNodes, DiscNodes, RunningNodes}};
+ false -> {error, tables_not_present}
+ end
+ end.
+
+cluster_status(WhichNodes) ->
+ {AllNodes, DiscNodes, RunningNodes} = Nodes =
+ case cluster_status_from_mnesia() of
+ {ok, Nodes0} ->
+ Nodes0;
+ {error, _Reason} ->
+ {AllNodes0, DiscNodes0, RunningNodes0} =
+ rabbit_node_monitor:read_cluster_status(),
+ %% The cluster status file records the status when the node is
+ %% online, but we know for sure that the node is offline now, so
+ %% we can remove it from the list of running nodes.
+ {AllNodes0, DiscNodes0, nodes_excl_me(RunningNodes0)}
+ end,
+ case WhichNodes of
+ status -> Nodes;
+ all -> AllNodes;
+ disc -> DiscNodes;
+ ram -> AllNodes -- DiscNodes;
+ running -> RunningNodes
+ end.
+
+node_info() ->
+ {rabbit_misc:otp_release(), rabbit_misc:version(),
+ mnesia:system_info(protocol_version),
+ cluster_status_from_mnesia()}.
+
+-spec node_type() -> node_type().
+
+node_type() ->
+ {_AllNodes, DiscNodes, _RunningNodes} =
+ rabbit_node_monitor:read_cluster_status(),
+ case DiscNodes =:= [] orelse me_in_nodes(DiscNodes) of
+ true -> disc;
+ false -> ram
+ end.
+
+-spec dir() -> file:filename().
+
+dir() -> mnesia:system_info(directory).
+
+%%----------------------------------------------------------------------------
+%% Operations on the db
+%%----------------------------------------------------------------------------
+
+%% Adds the provided nodes to the mnesia cluster, creating a new
+%% schema if there is the need to and catching up if there are other
+%% nodes in the cluster already. It also updates the cluster status
+%% file.
+init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
+ NodeIsVirgin = is_virgin_node(),
+ rabbit_log:debug("Does data directory looks like that of a blank (uninitialised) node? ~p", [NodeIsVirgin]),
+ Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
+ %% Note that we use `system_info' here and not the cluster status
+ %% since when we start rabbit for the first time the cluster
+ %% status will say we are a disc node but the tables won't be
+ %% present yet.
+ WasDiscNode = mnesia:system_info(use_dir),
+ case {Nodes, WasDiscNode, NodeType} of
+ {[], _, ram} ->
+ %% Standalone ram node, we don't want that
+ throw({error, cannot_create_standalone_ram_node});
+ {[], false, disc} ->
+ %% RAM -> disc, starting from scratch
+ ok = create_schema();
+ {[], true, disc} ->
+ %% First disc node up
+ maybe_force_load(),
+ ok;
+ {[_ | _], _, _} ->
+ %% Subsequent node in cluster, catch up
+ maybe_force_load(),
+ ok = rabbit_table:wait_for_replicated(_Retry = true),
+ ok = rabbit_table:ensure_local_copies(NodeType)
+ end,
+ ensure_feature_flags_are_in_sync(Nodes, NodeIsVirgin),
+ ensure_schema_integrity(),
+ rabbit_node_monitor:update_cluster_status(),
+ ok.
+
+-spec init_db_unchecked([node()], node_type()) -> 'ok'.
+
+init_db_unchecked(ClusterNodes, NodeType) ->
+ init_db(ClusterNodes, NodeType, false).
+
+init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes, Retry) ->
+ ok = init_db(ClusterNodes, NodeType, CheckOtherNodes),
+ ok = case rabbit_upgrade:maybe_upgrade_local() of
+ ok -> ok;
+ starting_from_scratch -> rabbit_version:record_desired();
+ version_not_available -> schema_ok_or_move()
+ end,
+ %% `maybe_upgrade_local' restarts mnesia, so ram nodes will forget
+ %% about the cluster
+ case NodeType of
+ ram -> start_mnesia(),
+ change_extra_db_nodes(ClusterNodes, false);
+ disc -> ok
+ end,
+ %% ...and all nodes will need to wait for tables
+ rabbit_table:wait_for_replicated(Retry),
+ ok.
+
+init_db_with_mnesia(ClusterNodes, NodeType,
+ CheckOtherNodes, CheckConsistency, Retry) ->
+ start_mnesia(CheckConsistency),
+ try
+ init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes, Retry)
+ after
+ stop_mnesia()
+ end.
+
+-spec ensure_mnesia_dir() -> 'ok'.
+
+ensure_mnesia_dir() ->
+ MnesiaDir = dir() ++ "/",
+ case filelib:ensure_dir(MnesiaDir) of
+ {error, Reason} ->
+ throw({error, {cannot_create_mnesia_dir, MnesiaDir, Reason}});
+ ok ->
+ ok
+ end.
+
+ensure_mnesia_running() ->
+ case mnesia:system_info(is_running) of
+ yes ->
+ ok;
+ starting ->
+ wait_for(mnesia_running),
+ ensure_mnesia_running();
+ Reason when Reason =:= no; Reason =:= stopping ->
+ throw({error, mnesia_not_running})
+ end.
+
+ensure_mnesia_not_running() ->
+ case mnesia:system_info(is_running) of
+ no ->
+ ok;
+ stopping ->
+ wait_for(mnesia_not_running),
+ ensure_mnesia_not_running();
+ Reason when Reason =:= yes; Reason =:= starting ->
+ throw({error, mnesia_unexpectedly_running})
+ end.
+
+ensure_feature_flags_are_in_sync(Nodes, NodeIsVirgin) ->
+ Ret = rabbit_feature_flags:sync_feature_flags_with_cluster(
+ Nodes, NodeIsVirgin),
+ case Ret of
+ ok -> ok;
+ {error, Reason} -> throw({error, {incompatible_feature_flags, Reason}})
+ end.
+
+ensure_schema_integrity() ->
+ case rabbit_table:check_schema_integrity(_Retry = true) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ throw({error, {schema_integrity_check_failed, Reason}})
+ end.
+
+-spec copy_db(file:filename()) -> rabbit_types:ok_or_error(any()).
+
+copy_db(Destination) ->
+ ok = ensure_mnesia_not_running(),
+ rabbit_file:recursive_copy(dir(), Destination).
+
+force_load_filename() ->
+ filename:join(dir(), "force_load").
+
+-spec force_load_next_boot() -> 'ok'.
+
+force_load_next_boot() ->
+ rabbit_file:write_file(force_load_filename(), <<"">>).
+
+maybe_force_load() ->
+ case rabbit_file:is_file(force_load_filename()) of
+ true -> rabbit_table:force_load(),
+ rabbit_file:delete(force_load_filename());
+ false -> ok
+ end.
+
+%% This does not guarantee us much, but it avoids some situations that
+%% will definitely end up badly
+
+-spec check_cluster_consistency() -> 'ok'.
+
+check_cluster_consistency() ->
+ %% We want to find 0 or 1 consistent nodes.
+ case lists:foldl(
+ fun (Node, {error, _}) -> check_cluster_consistency(Node, true);
+ (_Node, {ok, Status}) -> {ok, Status}
+ end, {error, not_found}, nodes_excl_me(cluster_nodes(all)))
+ of
+ {ok, Status = {RemoteAllNodes, _, _}} ->
+ case ordsets:is_subset(ordsets:from_list(cluster_nodes(all)),
+ ordsets:from_list(RemoteAllNodes)) of
+ true ->
+ ok;
+ false ->
+ %% We delete the schema here since we think we are
+ %% clustered with nodes that are no longer in the
+ %% cluster and there is no other way to remove
+ %% them from our schema. On the other hand, we are
+ %% sure that there is another online node that we
+ %% can use to sync the tables with. There is a
+ %% race here: if between this check and the
+ %% `init_db' invocation the cluster gets
+ %% disbanded, we're left with a node with no
+ %% mnesia data that will try to connect to offline
+ %% nodes.
+ mnesia:delete_schema([node()])
+ end,
+ rabbit_node_monitor:write_cluster_status(Status);
+ {error, not_found} ->
+ ok;
+ {error, _} = E ->
+ throw(E)
+ end.
+
+check_cluster_consistency(Node, CheckNodesConsistency) ->
+ case remote_node_info(Node) of
+ {badrpc, _Reason} ->
+ {error, not_found};
+ {_OTP, Rabbit, DelegateModuleHash, _Status} when is_binary(DelegateModuleHash) ->
+ %% when a delegate module .beam file hash is present
+ %% in the tuple, we are dealing with an old version
+ rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit);
+ {_OTP, _Rabbit, _Protocol, {error, _}} ->
+ {error, not_found};
+ {OTP, Rabbit, Protocol, {ok, Status}} when CheckNodesConsistency ->
+ case check_consistency(Node, OTP, Rabbit, Protocol, Status) of
+ {error, _} = E -> E;
+ {ok, Res} -> {ok, Res}
+ end;
+ {OTP, Rabbit, Protocol, {ok, Status}} ->
+ case check_consistency(Node, OTP, Rabbit, Protocol) of
+ {error, _} = E -> E;
+ ok -> {ok, Status}
+ end
+ end.
+
+remote_node_info(Node) ->
+ case rpc:call(Node, rabbit_mnesia, node_info, []) of
+ {badrpc, _} = Error -> Error;
+ %% RabbitMQ prior to 3.6.2
+ {OTP, Rabbit, Status} -> {OTP, Rabbit, unsupported, Status};
+ %% RabbitMQ 3.6.2 or later
+ {OTP, Rabbit, Protocol, Status} -> {OTP, Rabbit, Protocol, Status}
+ end.
+
+
+%%--------------------------------------------------------------------
+%% Hooks for `rabbit_node_monitor'
+%%--------------------------------------------------------------------
+
+-spec on_node_up(node()) -> 'ok'.
+
+on_node_up(Node) ->
+ case running_disc_nodes() of
+ [Node] -> rabbit_log:info("cluster contains disc nodes again~n");
+ _ -> ok
+ end.
+
+-spec on_node_down(node()) -> 'ok'.
+
+on_node_down(_Node) ->
+ case running_disc_nodes() of
+ [] -> rabbit_log:info("only running disc node went down~n");
+ _ -> ok
+ end.
+
+running_disc_nodes() ->
+ {_AllNodes, DiscNodes, RunningNodes} = cluster_status(status),
+ ordsets:to_list(ordsets:intersection(ordsets:from_list(DiscNodes),
+ ordsets:from_list(RunningNodes))).
+
+%%--------------------------------------------------------------------
+%% Helpers for diagnostics commands
+%%--------------------------------------------------------------------
+
+schema_info(Items) ->
+ Tables = mnesia:system_info(tables),
+ [info(Table, Items) || Table <- Tables].
+
+info(Table, Items) ->
+ All = [{name, Table} | mnesia:table_info(Table, all)],
+ [{Item, proplists:get_value(Item, All)} || Item <- Items].
+
+%%--------------------------------------------------------------------
+%% Internal helpers
+%%--------------------------------------------------------------------
+
+discover_cluster(Nodes) ->
+ case lists:foldl(fun (_, {ok, Res}) -> {ok, Res};
+ (Node, _) -> discover_cluster0(Node)
+ end, {error, no_nodes_provided}, Nodes) of
+ {ok, Res} -> Res;
+ {error, E} -> throw({error, E});
+ {badrpc, Reason} -> throw({badrpc_multi, Reason, Nodes})
+ end.
+
+discover_cluster0(Node) when Node == node() ->
+ {error, cannot_cluster_node_with_itself};
+discover_cluster0(Node) ->
+ rpc:call(Node, rabbit_mnesia, cluster_status_from_mnesia, []).
+
+schema_ok_or_move() ->
+ case rabbit_table:check_schema_integrity(_Retry = false) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ %% NB: we cannot use rabbit_log here since it may not have been
+ %% started yet
+ rabbit_log:warning("schema integrity check failed: ~p~n"
+ "moving database to backup location "
+ "and recreating schema from scratch~n",
+ [Reason]),
+ ok = move_db(),
+ ok = create_schema()
+ end.
+
+%% We only care about disc nodes since ram nodes are supposed to catch
+%% up only
+create_schema() ->
+ stop_mnesia(),
+ rabbit_log:debug("Will bootstrap a schema database..."),
+ rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
+ rabbit_log:debug("Bootstraped a schema database successfully"),
+ start_mnesia(),
+
+ rabbit_log:debug("Will create schema database tables"),
+ ok = rabbit_table:create(),
+ rabbit_log:debug("Created schema database tables successfully"),
+ rabbit_log:debug("Will check schema database integrity..."),
+ ensure_schema_integrity(),
+ rabbit_log:debug("Schema database schema integrity check passed"),
+ ok = rabbit_version:record_desired().
+
+move_db() ->
+ stop_mnesia(),
+ MnesiaDir = filename:dirname(dir() ++ "/"),
+ {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(),
+ BackupDir = rabbit_misc:format(
+ "~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w",
+ [MnesiaDir, Year, Month, Day, Hour, Minute, Second]),
+ case file:rename(MnesiaDir, BackupDir) of
+ ok ->
+ %% NB: we cannot use rabbit_log here since it may not have
+ %% been started yet
+ rabbit_log:warning("moved database from ~s to ~s~n",
+ [MnesiaDir, BackupDir]),
+ ok;
+ {error, Reason} -> throw({error, {cannot_backup_mnesia,
+ MnesiaDir, BackupDir, Reason}})
+ end,
+ ensure_mnesia_dir(),
+ start_mnesia(),
+ ok.
+
+remove_node_if_mnesia_running(Node) ->
+ case is_running() of
+ false ->
+ {error, mnesia_not_running};
+ true ->
+ %% Deleting the the schema copy of the node will result in
+ %% the node being removed from the cluster, with that
+ %% change being propagated to all nodes
+ case mnesia:del_table_copy(schema, Node) of
+ {atomic, ok} ->
+ rabbit_amqqueue:forget_all_durable(Node),
+ rabbit_node_monitor:notify_left_cluster(Node),
+ ok;
+ {aborted, Reason} ->
+ {error, {failed_to_remove_node, Node, Reason}}
+ end
+ end.
+
+leave_cluster() ->
+ case nodes_excl_me(cluster_nodes(all)) of
+ [] -> ok;
+ AllNodes -> case lists:any(fun leave_cluster/1, AllNodes) of
+ true -> ok;
+ false -> e(no_running_cluster_nodes)
+ end
+ end.
+
+leave_cluster(Node) ->
+ case rpc:call(Node,
+ rabbit_mnesia, remove_node_if_mnesia_running, [node()]) of
+ ok -> true;
+ {error, mnesia_not_running} -> false;
+ {error, Reason} -> throw({error, Reason});
+ {badrpc, nodedown} -> false
+ end.
+
+wait_for(Condition) ->
+ rabbit_log:info("Waiting for ~p...~n", [Condition]),
+ timer:sleep(1000).
+
+start_mnesia(CheckConsistency) ->
+ case CheckConsistency of
+ true -> check_cluster_consistency();
+ false -> ok
+ end,
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ensure_mnesia_running().
+
+start_mnesia() ->
+ start_mnesia(true).
+
+stop_mnesia() ->
+ stopped = mnesia:stop(),
+ ensure_mnesia_not_running().
+
+change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) ->
+ ClusterNodes = nodes_excl_me(ClusterNodes0),
+ case {mnesia:change_config(extra_db_nodes, ClusterNodes), ClusterNodes} of
+ {{ok, []}, [_|_]} when CheckOtherNodes ->
+ throw({error, {failed_to_cluster_with, ClusterNodes,
+ "Mnesia could not connect to any nodes."}});
+ {{ok, Nodes}, _} ->
+ Nodes
+ end.
+
+check_consistency(Node, OTP, Rabbit, ProtocolVersion) ->
+ rabbit_misc:sequence_error(
+ [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP),
+ check_rabbit_consistency(Node, Rabbit)]).
+
+check_consistency(Node, OTP, Rabbit, ProtocolVersion, Status) ->
+ rabbit_misc:sequence_error(
+ [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP),
+ check_rabbit_consistency(Node, Rabbit),
+ check_nodes_consistency(Node, Status)]).
+
+check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) ->
+ case me_in_nodes(RemoteAllNodes) of
+ true ->
+ {ok, RemoteStatus};
+ false ->
+ {error, {inconsistent_cluster,
+ format_inconsistent_cluster_message(node(), Node)}}
+ end.
+
+check_mnesia_or_otp_consistency(_Node, unsupported, OTP) ->
+ rabbit_version:check_otp_consistency(OTP);
+check_mnesia_or_otp_consistency(Node, ProtocolVersion, _) ->
+ check_mnesia_consistency(Node, ProtocolVersion).
+
+check_mnesia_consistency(Node, ProtocolVersion) ->
+ % If mnesia is running we will just check protocol version
+ % If it's not running, we don't want it to join cluster until all checks pass
+ % so we start it without `dir` env variable to prevent
+ % joining cluster and/or corrupting data
+ with_running_or_clean_mnesia(fun() ->
+ case negotiate_protocol([Node]) of
+ [Node] -> ok;
+ [] ->
+ LocalVersion = mnesia:system_info(protocol_version),
+ {error, {inconsistent_cluster,
+ rabbit_misc:format("Mnesia protocol negotiation failed."
+ " Local version: ~p."
+ " Remote version ~p",
+ [LocalVersion, ProtocolVersion])}}
+ end
+ end).
+
+negotiate_protocol([Node]) ->
+ mnesia_monitor:negotiate_protocol([Node]).
+
+with_running_or_clean_mnesia(Fun) ->
+ IsMnesiaRunning = case mnesia:system_info(is_running) of
+ yes -> true;
+ no -> false;
+ stopping ->
+ ensure_mnesia_not_running(),
+ false;
+ starting ->
+ ensure_mnesia_running(),
+ true
+ end,
+ case IsMnesiaRunning of
+ true -> Fun();
+ false ->
+ SavedMnesiaDir = dir(),
+ application:unset_env(mnesia, dir),
+ SchemaLoc = application:get_env(mnesia, schema_location, opt_disc),
+ application:set_env(mnesia, schema_location, ram),
+ mnesia:start(),
+ Result = Fun(),
+ application:stop(mnesia),
+ application:set_env(mnesia, dir, SavedMnesiaDir),
+ application:set_env(mnesia, schema_location, SchemaLoc),
+ Result
+ end.
+
+check_rabbit_consistency(RemoteNode, RemoteVersion) ->
+ rabbit_misc:sequence_error(
+ [rabbit_version:check_version_consistency(
+ rabbit_misc:version(), RemoteVersion, "Rabbit",
+ fun rabbit_misc:version_minor_equivalent/2),
+ rabbit_feature_flags:check_node_compatibility(RemoteNode)]).
+
+%% This is fairly tricky. We want to know if the node is in the state
+%% that a `reset' would leave it in. We cannot simply check if the
+%% mnesia tables aren't there because restarted RAM nodes won't have
+%% tables while still being non-virgin. What we do instead is to
+%% check if the mnesia directory is non existent or empty, with the
+%% exception of certain files and directories, which can be there very early
+%% on node boot.
+is_virgin_node() ->
+ case rabbit_file:list_dir(dir()) of
+ {error, enoent} ->
+ true;
+ {ok, []} ->
+ true;
+ {ok, List0} ->
+ IgnoredFiles0 =
+ [rabbit_node_monitor:cluster_status_filename(),
+ rabbit_node_monitor:running_nodes_filename(),
+ rabbit_node_monitor:default_quorum_filename(),
+ rabbit_node_monitor:quorum_filename(),
+ rabbit_feature_flags:enabled_feature_flags_list_file()],
+ IgnoredFiles = [filename:basename(File) || File <- IgnoredFiles0],
+ rabbit_log:debug("Files and directories found in node's data directory: ~s, of them to be ignored: ~s",
+ [string:join(lists:usort(List0), ", "), string:join(lists:usort(IgnoredFiles), ", ")]),
+ List = List0 -- IgnoredFiles,
+ rabbit_log:debug("Files and directories found in node's data directory sans ignored ones: ~s", [string:join(lists:usort(List), ", ")]),
+ List =:= []
+ end.
+
+find_reachable_peer_to_cluster_with([]) ->
+ none;
+find_reachable_peer_to_cluster_with([Node | Nodes]) ->
+ Fail = fun (Fmt, Args) ->
+ rabbit_log:warning(
+ "Could not auto-cluster with node ~s: " ++ Fmt, [Node | Args]),
+ find_reachable_peer_to_cluster_with(Nodes)
+ end,
+ case remote_node_info(Node) of
+ {badrpc, _} = Reason ->
+ Fail("~p~n", [Reason]);
+ %% old delegate hash check
+ {_OTP, RMQ, Hash, _} when is_binary(Hash) ->
+ Fail("version ~s~n", [RMQ]);
+ {_OTP, _RMQ, _Protocol, {error, _} = E} ->
+ Fail("~p~n", [E]);
+ {OTP, RMQ, Protocol, _} ->
+ case check_consistency(Node, OTP, RMQ, Protocol) of
+ {error, _} -> Fail("versions ~p~n",
+ [{OTP, RMQ}]);
+ ok -> {ok, Node}
+ end
+ end.
+
+is_only_clustered_disc_node() ->
+ node_type() =:= disc andalso is_clustered() andalso
+ cluster_nodes(disc) =:= [node()].
+
+are_we_clustered_with(Node) ->
+ lists:member(Node, mnesia_lib:all_nodes()).
+
+me_in_nodes(Nodes) -> lists:member(node(), Nodes).
+
+nodes_incl_me(Nodes) -> lists:usort([node()|Nodes]).
+
+nodes_excl_me(Nodes) -> Nodes -- [node()].
+
+-spec e(any()) -> no_return().
+
+e(Tag) -> throw({error, {Tag, error_description(Tag)}}).
+
+error_description({invalid_cluster_node_names, BadNames}) ->
+ "In the 'cluster_nodes' configuration key, the following node names "
+ "are invalid: " ++ lists:flatten(io_lib:format("~p", [BadNames]));
+error_description({invalid_cluster_node_type, BadType}) ->
+ "In the 'cluster_nodes' configuration key, the node type is invalid "
+ "(expected 'disc' or 'ram'): " ++
+ lists:flatten(io_lib:format("~p", [BadType]));
+error_description(invalid_cluster_nodes_conf) ->
+ "The 'cluster_nodes' configuration key is invalid, it must be of the "
+ "form {[Nodes], Type}, where Nodes is a list of node names and "
+ "Type is either 'disc' or 'ram'";
+error_description(clustering_only_disc_node) ->
+ "You cannot cluster a node if it is the only disc node in its existing "
+ " cluster. If new nodes joined while this node was offline, use "
+ "'update_cluster_nodes' to add them manually.";
+error_description(resetting_only_disc_node) ->
+ "You cannot reset a node when it is the only disc node in a cluster. "
+ "Please convert another node of the cluster to a disc node first.";
+error_description(not_clustered) ->
+ "Non-clustered nodes can only be disc nodes.";
+error_description(no_online_cluster_nodes) ->
+ "Could not find any online cluster nodes. If the cluster has changed, "
+ "you can use the 'update_cluster_nodes' command.";
+error_description(inconsistent_cluster) ->
+ "The nodes provided do not have this node as part of the cluster.";
+error_description(not_a_cluster_node) ->
+ "The node selected is not in the cluster.";
+error_description(online_node_offline_flag) ->
+ "You set the --offline flag, which is used to remove nodes remotely from "
+ "offline nodes, but this node is online.";
+error_description(offline_node_no_offline_flag) ->
+ "You are trying to remove a node from an offline node. That is dangerous, "
+ "but can be done with the --offline flag. Please consult the manual "
+ "for rabbitmqctl for more information.";
+error_description(removing_node_from_offline_node) ->
+ "To remove a node remotely from an offline node, the node you are removing "
+ "from must be a disc node and all the other nodes must be offline.";
+error_description(no_running_cluster_nodes) ->
+ "You cannot leave a cluster if no online nodes are present.".
+
+format_inconsistent_cluster_message(Thinker, Dissident) ->
+ rabbit_misc:format("Node ~p thinks it's clustered "
+ "with node ~p, but ~p disagrees",
+ [Thinker, Dissident, Dissident]).
diff --git a/deps/rabbit/src/rabbit_mnesia_rename.erl b/deps/rabbit/src/rabbit_mnesia_rename.erl
new file mode 100644
index 0000000000..e0d88c0f5e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_mnesia_rename.erl
@@ -0,0 +1,276 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mnesia_rename).
+-include("rabbit.hrl").
+
+-export([rename/2]).
+-export([maybe_finish/1]).
+
+-define(CONVERT_TABLES, [schema, rabbit_durable_queue]).
+
+%% Supports renaming the nodes in the Mnesia database. In order to do
+%% this, we take a backup of the database, traverse the backup
+%% changing node names and pids as we go, then restore it.
+%%
+%% That's enough for a standalone node, for clusters the story is more
+%% complex. We can take pairs of nodes From and To, but backing up and
+%% restoring the database changes schema cookies, so if we just do
+%% this on all nodes the cluster will refuse to re-form with
+%% "Incompatible schema cookies.". Therefore we do something similar
+%% to what we do for upgrades - the first node in the cluster to
+%% restart becomes the authority, and other nodes wipe their own
+%% Mnesia state and rejoin. They also need to tell Mnesia the old node
+%% is not coming back.
+%%
+%% If we are renaming nodes one at a time then the running cluster
+%% might not be aware that a rename has taken place, so after we wipe
+%% and rejoin we then update any tables (in practice just
+%% rabbit_durable_queue) which should be aware that we have changed.
+
+%%----------------------------------------------------------------------------
+
+-spec rename(node(), [{node(), node()}]) -> 'ok'.
+
+rename(Node, NodeMapList) ->
+ try
+ %% Check everything is correct and figure out what we are
+ %% changing from and to.
+ {FromNode, ToNode, NodeMap} = prepare(Node, NodeMapList),
+
+ %% We backup and restore Mnesia even if other nodes are
+ %% running at the time, and defer the final decision about
+ %% whether to use our mutated copy or rejoin the cluster until
+ %% we restart. That means we might be mutating our copy of the
+ %% database while the cluster is running. *Do not* contact the
+ %% cluster while this is happening, we are likely to get
+ %% confused.
+ application:set_env(kernel, dist_auto_connect, never),
+
+ %% Take a copy we can restore from if we abandon the
+ %% rename. We don't restore from the "backup" since restoring
+ %% that changes schema cookies and might stop us rejoining the
+ %% cluster.
+ ok = rabbit_mnesia:copy_db(mnesia_copy_dir()),
+
+ %% And make the actual changes
+ become(FromNode),
+ take_backup(before_backup_name()),
+ convert_backup(NodeMap, before_backup_name(), after_backup_name()),
+ ok = rabbit_file:write_term_file(rename_config_name(),
+ [{FromNode, ToNode}]),
+ convert_config_files(NodeMap),
+ become(ToNode),
+ restore_backup(after_backup_name()),
+ ok
+ after
+ stop_mnesia()
+ end.
+
+prepare(Node, NodeMapList) ->
+ %% If we have a previous rename and haven't started since, give up.
+ case rabbit_file:is_dir(dir()) of
+ true -> exit({rename_in_progress,
+ "Restart node under old name to roll back"});
+ false -> ok = rabbit_file:ensure_dir(mnesia_copy_dir())
+ end,
+
+ %% Check we don't have two nodes mapped to the same node
+ {FromNodes, ToNodes} = lists:unzip(NodeMapList),
+ case length(FromNodes) - length(lists:usort(ToNodes)) of
+ 0 -> ok;
+ _ -> exit({duplicate_node, ToNodes})
+ end,
+
+ %% Figure out which node we are before and after the change
+ FromNode = case [From || {From, To} <- NodeMapList,
+ To =:= Node] of
+ [N] -> N;
+ [] -> Node
+ end,
+ NodeMap = dict:from_list(NodeMapList),
+ ToNode = case dict:find(FromNode, NodeMap) of
+ {ok, N2} -> N2;
+ error -> FromNode
+ end,
+
+ %% Check that we are in the cluster, all old nodes are in the
+ %% cluster, and no new nodes are.
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ case {FromNodes -- Nodes, ToNodes -- (ToNodes -- Nodes),
+ lists:member(Node, Nodes ++ ToNodes)} of
+ {[], [], true} -> ok;
+ {[], [], false} -> exit({i_am_not_involved, Node});
+ {F, [], _} -> exit({nodes_not_in_cluster, F});
+ {_, T, _} -> exit({nodes_already_in_cluster, T})
+ end,
+ {FromNode, ToNode, NodeMap}.
+
+take_backup(Backup) ->
+ start_mnesia(),
+ %% We backup only local tables: in particular, this excludes the
+ %% connection tracking tables which have no local replica.
+ LocalTables = mnesia:system_info(local_tables),
+ {ok, Name, _Nodes} = mnesia:activate_checkpoint([
+ {max, LocalTables}
+ ]),
+ ok = mnesia:backup_checkpoint(Name, Backup),
+ stop_mnesia().
+
+restore_backup(Backup) ->
+ ok = mnesia:install_fallback(Backup, [{scope, local}]),
+ start_mnesia(),
+ stop_mnesia(),
+ rabbit_mnesia:force_load_next_boot().
+
+-spec maybe_finish([node()]) -> 'ok'.
+
+maybe_finish(AllNodes) ->
+ case rabbit_file:read_term_file(rename_config_name()) of
+ {ok, [{FromNode, ToNode}]} -> finish(FromNode, ToNode, AllNodes);
+ _ -> ok
+ end.
+
+finish(FromNode, ToNode, AllNodes) ->
+ case node() of
+ ToNode ->
+ case rabbit_upgrade:nodes_running(AllNodes) of
+ [] -> finish_primary(FromNode, ToNode);
+ _ -> finish_secondary(FromNode, ToNode, AllNodes)
+ end;
+ FromNode ->
+ rabbit_log:info(
+ "Abandoning rename from ~s to ~s since we are still ~s~n",
+ [FromNode, ToNode, FromNode]),
+ [{ok, _} = file:copy(backup_of_conf(F), F) || F <- config_files()],
+ ok = rabbit_file:recursive_delete([rabbit_mnesia:dir()]),
+ ok = rabbit_file:recursive_copy(
+ mnesia_copy_dir(), rabbit_mnesia:dir()),
+ delete_rename_files();
+ _ ->
+ %% Boot will almost certainly fail but we might as
+ %% well just log this
+ rabbit_log:info(
+ "Rename attempted from ~s to ~s but we are ~s - ignoring.~n",
+ [FromNode, ToNode, node()])
+ end.
+
+finish_primary(FromNode, ToNode) ->
+ rabbit_log:info("Restarting as primary after rename from ~s to ~s~n",
+ [FromNode, ToNode]),
+ delete_rename_files(),
+ ok.
+
+finish_secondary(FromNode, ToNode, AllNodes) ->
+ rabbit_log:info("Restarting as secondary after rename from ~s to ~s~n",
+ [FromNode, ToNode]),
+ rabbit_upgrade:secondary_upgrade(AllNodes),
+ rename_in_running_mnesia(FromNode, ToNode),
+ delete_rename_files(),
+ ok.
+
+dir() -> rabbit_mnesia:dir() ++ "-rename".
+before_backup_name() -> dir() ++ "/backup-before".
+after_backup_name() -> dir() ++ "/backup-after".
+rename_config_name() -> dir() ++ "/pending.config".
+mnesia_copy_dir() -> dir() ++ "/mnesia-copy".
+
+delete_rename_files() -> ok = rabbit_file:recursive_delete([dir()]).
+
+start_mnesia() -> rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ rabbit_table:force_load(),
+ rabbit_table:wait_for_replicated(_Retry = false).
+stop_mnesia() -> stopped = mnesia:stop().
+
+convert_backup(NodeMap, FromBackup, ToBackup) ->
+ mnesia:traverse_backup(
+ FromBackup, ToBackup,
+ fun
+ (Row, Acc) ->
+ case lists:member(element(1, Row), ?CONVERT_TABLES) of
+ true -> {[update_term(NodeMap, Row)], Acc};
+ false -> {[Row], Acc}
+ end
+ end, switched).
+
+config_files() ->
+ [rabbit_node_monitor:running_nodes_filename(),
+ rabbit_node_monitor:cluster_status_filename()].
+
+backup_of_conf(Path) ->
+ filename:join([dir(), filename:basename(Path)]).
+
+convert_config_files(NodeMap) ->
+ [convert_config_file(NodeMap, Path) || Path <- config_files()].
+
+convert_config_file(NodeMap, Path) ->
+ {ok, Term} = rabbit_file:read_term_file(Path),
+ {ok, _} = file:copy(Path, backup_of_conf(Path)),
+ ok = rabbit_file:write_term_file(Path, update_term(NodeMap, Term)).
+
+lookup_node(OldNode, NodeMap) ->
+ case dict:find(OldNode, NodeMap) of
+ {ok, NewNode} -> NewNode;
+ error -> OldNode
+ end.
+
+mini_map(FromNode, ToNode) -> dict:from_list([{FromNode, ToNode}]).
+
+update_term(NodeMap, L) when is_list(L) ->
+ [update_term(NodeMap, I) || I <- L];
+update_term(NodeMap, T) when is_tuple(T) ->
+ list_to_tuple(update_term(NodeMap, tuple_to_list(T)));
+update_term(NodeMap, Node) when is_atom(Node) ->
+ lookup_node(Node, NodeMap);
+update_term(NodeMap, Pid) when is_pid(Pid) ->
+ rabbit_misc:pid_change_node(Pid, lookup_node(node(Pid), NodeMap));
+update_term(_NodeMap, Term) ->
+ Term.
+
+rename_in_running_mnesia(FromNode, ToNode) ->
+ All = rabbit_mnesia:cluster_nodes(all),
+ Running = rabbit_nodes:all_running(),
+ case {lists:member(FromNode, Running), lists:member(ToNode, All)} of
+ {false, true} -> ok;
+ {true, _} -> exit({old_node_running, FromNode});
+ {_, false} -> exit({new_node_not_in_cluster, ToNode})
+ end,
+ {atomic, ok} = mnesia:del_table_copy(schema, FromNode),
+ Map = mini_map(FromNode, ToNode),
+ {atomic, _} = transform_table(rabbit_durable_queue, Map),
+ ok.
+
+transform_table(Table, Map) ->
+ mnesia:sync_transaction(
+ fun () ->
+ mnesia:lock({table, Table}, write),
+ transform_table(Table, Map, mnesia:first(Table))
+ end).
+
+transform_table(_Table, _Map, '$end_of_table') ->
+ ok;
+transform_table(Table, Map, Key) ->
+ [Term] = mnesia:read(Table, Key, write),
+ ok = mnesia:write(Table, update_term(Map, Term), write),
+ transform_table(Table, Map, mnesia:next(Table, Key)).
+
+become(BecomeNode) ->
+ error_logger:tty(false),
+ case net_adm:ping(BecomeNode) of
+ pong -> exit({node_running, BecomeNode});
+ pang -> ok = net_kernel:stop(),
+ io:format(" * Impersonating node: ~s...", [BecomeNode]),
+ {ok, _} = start_distribution(BecomeNode),
+ io:format(" done~n", []),
+ Dir = mnesia:system_info(directory),
+ io:format(" * Mnesia directory : ~s~n", [Dir])
+ end.
+
+start_distribution(Name) ->
+ rabbit_nodes:ensure_epmd(),
+ NameType = rabbit_nodes_common:name_type(Name),
+ net_kernel:start([Name, NameType]).
diff --git a/deps/rabbit/src/rabbit_msg_file.erl b/deps/rabbit/src/rabbit_msg_file.erl
new file mode 100644
index 0000000000..1a24f690a0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_file.erl
@@ -0,0 +1,114 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_file).
+
+-export([append/3, read/2, scan/4]).
+
+%%----------------------------------------------------------------------------
+
+-include("rabbit_msg_store.hrl").
+
+-define(INTEGER_SIZE_BYTES, 8).
+-define(INTEGER_SIZE_BITS, (8 * ?INTEGER_SIZE_BYTES)).
+-define(WRITE_OK_SIZE_BITS, 8).
+-define(WRITE_OK_MARKER, 255).
+-define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)).
+-define(MSG_ID_SIZE_BYTES, 16).
+-define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)).
+-define(SCAN_BLOCK_SIZE, 4194304). %% 4MB
+
+%%----------------------------------------------------------------------------
+
+-type io_device() :: any().
+-type position() :: non_neg_integer().
+-type msg_size() :: non_neg_integer().
+-type file_size() :: non_neg_integer().
+-type message_accumulator(A) ::
+ fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) ->
+ A).
+
+%%----------------------------------------------------------------------------
+
+-spec append(io_device(), rabbit_types:msg_id(), msg()) ->
+ rabbit_types:ok_or_error2(msg_size(), any()).
+
+append(FileHdl, MsgId, MsgBody)
+ when is_binary(MsgId) andalso size(MsgId) =:= ?MSG_ID_SIZE_BYTES ->
+ MsgBodyBin = term_to_binary(MsgBody),
+ MsgBodyBinSize = size(MsgBodyBin),
+ Size = MsgBodyBinSize + ?MSG_ID_SIZE_BYTES,
+ case file_handle_cache:append(FileHdl,
+ <<Size:?INTEGER_SIZE_BITS,
+ MsgId:?MSG_ID_SIZE_BYTES/binary,
+ MsgBodyBin:MsgBodyBinSize/binary,
+ ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>) of
+ ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT};
+ KO -> KO
+ end.
+
+-spec read(io_device(), msg_size()) ->
+ rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()},
+ any()).
+
+read(FileHdl, TotalSize) ->
+ Size = TotalSize - ?FILE_PACKING_ADJUSTMENT,
+ BodyBinSize = Size - ?MSG_ID_SIZE_BYTES,
+ case file_handle_cache:read(FileHdl, TotalSize) of
+ {ok, <<Size:?INTEGER_SIZE_BITS,
+ MsgId:?MSG_ID_SIZE_BYTES/binary,
+ MsgBodyBin:BodyBinSize/binary,
+ ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>} ->
+ {ok, {MsgId, binary_to_term(MsgBodyBin)}};
+ KO -> KO
+ end.
+
+-spec scan(io_device(), file_size(), message_accumulator(A), A) ->
+ {'ok', A, position()}.
+
+scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 ->
+ scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc).
+
+scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) ->
+ {ok, Acc, ScanOffset};
+scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) ->
+ Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]),
+ case file_handle_cache:read(FileHdl, Read) of
+ {ok, Data1} ->
+ {Data2, Acc1, ScanOffset1} =
+ scanner(<<Data/binary, Data1/binary>>, ScanOffset, Fun, Acc),
+ ReadOffset1 = ReadOffset + size(Data1),
+ scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1);
+ _KO ->
+ {ok, Acc, ScanOffset}
+ end.
+
+scanner(<<>>, Offset, _Fun, Acc) ->
+ {<<>>, Acc, Offset};
+scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) ->
+ {<<>>, Acc, Offset}; %% Nothing to do other than stop.
+scanner(<<Size:?INTEGER_SIZE_BITS, MsgIdAndMsg:Size/binary,
+ WriteMarker:?WRITE_OK_SIZE_BITS, Rest/binary>>, Offset, Fun, Acc) ->
+ TotalSize = Size + ?FILE_PACKING_ADJUSTMENT,
+ case WriteMarker of
+ ?WRITE_OK_MARKER ->
+ %% Here we take option 5 from
+ %% https://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in
+ %% which we read the MsgId as a number, and then convert it
+ %% back to a binary in order to work around bugs in
+ %% Erlang's GC.
+ <<MsgIdNum:?MSG_ID_SIZE_BITS, Msg/binary>> =
+ <<MsgIdAndMsg:Size/binary>>,
+ <<MsgId:?MSG_ID_SIZE_BYTES/binary>> =
+ <<MsgIdNum:?MSG_ID_SIZE_BITS>>,
+ scanner(Rest, Offset + TotalSize, Fun,
+ Fun({MsgId, TotalSize, Offset, Msg}, Acc));
+ _ ->
+ scanner(Rest, Offset + TotalSize, Fun, Acc)
+ end;
+scanner(Data, Offset, _Fun, Acc) ->
+ {Data, Acc, Offset}.
diff --git a/deps/rabbit/src/rabbit_msg_record.erl b/deps/rabbit/src/rabbit_msg_record.erl
new file mode 100644
index 0000000000..3ebe14cb9f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_record.erl
@@ -0,0 +1,400 @@
+-module(rabbit_msg_record).
+
+-export([
+ init/1,
+ to_iodata/1,
+ from_amqp091/2,
+ to_amqp091/1,
+ add_message_annotations/2,
+ message_annotation/2,
+ message_annotation/3
+ ]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-type maybe(T) :: T | undefined.
+-type amqp10_data() :: #'v1_0.data'{} |
+ [#'v1_0.amqp_sequence'{} | #'v1_0.data'{}] |
+ #'v1_0.amqp_value'{}.
+-record(msg,
+ {
+ % header :: maybe(#'v1_0.header'{}),
+ % delivery_annotations :: maybe(#'v1_0.delivery_annotations'{}),
+ message_annotations :: maybe(#'v1_0.message_annotations'{}),
+ properties :: maybe(#'v1_0.properties'{}),
+ application_properties :: maybe(#'v1_0.application_properties'{}),
+ data :: maybe(amqp10_data())
+ % footer :: maybe(#'v1_0.footer'{})
+ }).
+
+%% holds static or rarely changing fields
+-record(cfg, {}).
+-record(?MODULE, {cfg :: #cfg{},
+ msg :: #msg{},
+ %% holds a list of modifications to various sections
+ changes = [] :: list()}).
+
+-opaque state() :: #?MODULE{}.
+
+-export_type([
+ state/0
+ ]).
+
+%% this module acts as a wrapper / converter for the internal binar storage format
+%% (AMQP 1.0) and any format it needs to be converted to / from.
+%% Efficiency is key. No unnecessary allocations or work should be done until it
+%% is absolutely needed
+
+%% init from an AMQP 1.0 encoded binary
+-spec init(binary()) -> state().
+init(Bin) when is_binary(Bin) ->
+ %% TODO: delay parsing until needed
+ {MA, P, AP, D} = decode(amqp10_framing:decode_bin(Bin),
+ {undefined, undefined, undefined, undefined}),
+ #?MODULE{cfg = #cfg{},
+ msg = #msg{properties = P,
+ application_properties = AP,
+ message_annotations = MA,
+ data = D}}.
+
+decode([], Acc) ->
+ Acc;
+decode([#'v1_0.message_annotations'{} = MA | Rem], {_, P, AP, D}) ->
+ decode(Rem, {MA, P, AP, D});
+decode([#'v1_0.properties'{} = P | Rem], {MA, _, AP, D}) ->
+ decode(Rem, {MA, P, AP, D});
+decode([#'v1_0.application_properties'{} = AP | Rem], {MA, P, _, D}) ->
+ decode(Rem, {MA, P, AP, D});
+decode([#'v1_0.data'{} = D | Rem], {MA, P, AP, _}) ->
+ decode(Rem, {MA, P, AP, D}).
+
+amqp10_properties_empty(#'v1_0.properties'{message_id = undefined,
+ user_id = undefined,
+ to = undefined,
+ % subject = wrap(utf8, RKey),
+ reply_to = undefined,
+ correlation_id = undefined,
+ content_type = undefined,
+ content_encoding = undefined,
+ creation_time = undefined}) ->
+ true;
+amqp10_properties_empty(_) ->
+ false.
+
+%% to realise the final binary data representation
+-spec to_iodata(state()) -> iodata().
+to_iodata(#?MODULE{msg = #msg{properties = P,
+ application_properties = AP,
+ message_annotations = MA,
+ data = Data}}) ->
+ [
+ case MA of
+ #'v1_0.message_annotations'{content = []} ->
+ <<>>;
+ _ ->
+ amqp10_framing:encode_bin(MA)
+ end,
+ case amqp10_properties_empty(P) of
+ true -> <<>>;
+ false ->
+ amqp10_framing:encode_bin(P)
+ end,
+ case AP of
+ #'v1_0.application_properties'{content = []} ->
+ <<>>;
+ _ ->
+ amqp10_framing:encode_bin(AP)
+ end,
+ amqp10_framing:encode_bin(Data)
+ ].
+
+%% TODO: refine type spec here
+-spec add_message_annotations(#{binary() => {atom(), term()}}, state()) ->
+ state().
+add_message_annotations(Anns,
+ #?MODULE{msg =
+ #msg{message_annotations = MA0} = Msg} = State) ->
+ Content = maps:fold(
+ fun (K, {T, V}, Acc) ->
+ map_add(symbol, K, T, V, Acc)
+ end,
+ case MA0 of
+ undefined -> [];
+ #'v1_0.message_annotations'{content = C} -> C
+ end,
+ Anns),
+
+ State#?MODULE{msg =
+ Msg#msg{message_annotations =
+ #'v1_0.message_annotations'{content = Content}}}.
+
+%% TODO: refine
+-type amqp10_term() :: {atom(), term()}.
+
+-spec message_annotation(binary(), state()) -> undefined | amqp10_term().
+message_annotation(Key, State) ->
+ message_annotation(Key, State, undefined).
+
+-spec message_annotation(binary(), state(), undefined | amqp10_term()) ->
+ undefined | amqp10_term().
+message_annotation(_Key, #?MODULE{msg = #msg{message_annotations = undefined}},
+ Default) ->
+ Default;
+message_annotation(Key,
+ #?MODULE{msg =
+ #msg{message_annotations =
+ #'v1_0.message_annotations'{content = Content}}},
+ Default)
+ when is_binary(Key) ->
+ case lists:search(fun ({{symbol, K}, _}) -> K == Key end, Content) of
+ {value, {_K, V}} ->
+ V;
+ false ->
+ Default
+ end.
+
+
+%% take a binary AMQP 1.0 input function,
+%% parses it and returns the current parse state
+%% this is the input function from storage and from, e.g. socket input
+-spec from_amqp091(#'P_basic'{}, iodata()) -> state().
+from_amqp091(#'P_basic'{message_id = MsgId,
+ expiration = Expiration,
+ delivery_mode = DelMode,
+ headers = Headers,
+ user_id = UserId,
+ reply_to = ReplyTo,
+ type = Type,
+ priority = Priority,
+ app_id = AppId,
+ correlation_id = CorrId,
+ content_type = ContentType,
+ content_encoding = ContentEncoding,
+ timestamp = Timestamp
+ }, Data) ->
+ %% TODO: support parsing properties bin directly?
+ ConvertedTs = case Timestamp of
+ undefined ->
+ undefined;
+ _ ->
+ Timestamp * 1000
+ end,
+ P = #'v1_0.properties'{message_id = wrap(utf8, MsgId),
+ user_id = wrap(binary, UserId),
+ to = undefined,
+ % subject = wrap(utf8, RKey),
+ reply_to = wrap(utf8, ReplyTo),
+ correlation_id = wrap(utf8, CorrId),
+ content_type = wrap(symbol, ContentType),
+ content_encoding = wrap(symbol, ContentEncoding),
+ creation_time = wrap(timestamp, ConvertedTs)},
+
+ APC0 = [{wrap(utf8, K), from_091(T, V)} || {K, T, V}
+ <- case Headers of
+ undefined -> [];
+ _ -> Headers
+ end],
+ %% properties that do not map directly to AMQP 1.0 properties are stored
+ %% in application properties
+ APC = map_add(utf8, <<"x-basic-type">>, utf8, Type,
+ map_add(utf8, <<"x-basic-app-id">>, utf8, AppId, APC0)),
+
+ MAC = map_add(symbol, <<"x-basic-priority">>, ubyte, Priority,
+ map_add(symbol, <<"x-basic-delivery-mode">>, ubyte, DelMode,
+ map_add(symbol, <<"x-basic-expiration">>, utf8, Expiration, []))),
+
+ AP = #'v1_0.application_properties'{content = APC},
+ MA = #'v1_0.message_annotations'{content = MAC},
+ #?MODULE{cfg = #cfg{},
+ msg = #msg{properties = P,
+ application_properties = AP,
+ message_annotations = MA,
+ data = #'v1_0.data'{content = Data}}}.
+
+map_add(_T, _Key, _Type, undefined, Acc) ->
+ Acc;
+map_add(KeyType, Key, Type, Value, Acc) ->
+ [{wrap(KeyType, Key), wrap(Type, Value)} | Acc].
+
+-spec to_amqp091(state()) -> {#'P_basic'{}, iodata()}.
+to_amqp091(#?MODULE{msg = #msg{properties = P,
+ application_properties = APR,
+ message_annotations = MAR,
+ data = #'v1_0.data'{content = Payload}}}) ->
+ #'v1_0.properties'{message_id = MsgId,
+ user_id = UserId,
+ reply_to = ReplyTo0,
+ correlation_id = CorrId,
+ content_type = ContentType,
+ content_encoding = ContentEncoding,
+ creation_time = Timestamp} = case P of
+ undefined ->
+ #'v1_0.properties'{};
+ _ ->
+ P
+ end,
+
+ AP0 = case APR of
+ #'v1_0.application_properties'{content = AC} -> AC;
+ _ -> []
+ end,
+ MA0 = case MAR of
+ #'v1_0.message_annotations'{content = MC} -> MC;
+ _ -> []
+ end,
+
+ {Type, AP1} = amqp10_map_get(utf8(<<"x-basic-type">>), AP0),
+ {AppId, AP} = amqp10_map_get(utf8(<<"x-basic-app-id">>), AP1),
+
+ {Priority, MA1} = amqp10_map_get(symbol(<<"x-basic-priority">>), MA0),
+ {DelMode, MA2} = amqp10_map_get(symbol(<<"x-basic-delivery-mode">>), MA1),
+ {Expiration, _MA} = amqp10_map_get(symbol(<<"x-basic-expiration">>), MA2),
+
+ Headers0 = [to_091(unwrap(K), V) || {K, V} <- AP],
+ {Headers1, MsgId091} = message_id(MsgId, <<"x-message-id-type">>, Headers0),
+ {Headers, CorrId091} = message_id(CorrId, <<"x-correlation-id-type">>, Headers1),
+
+ BP = #'P_basic'{message_id = MsgId091,
+ delivery_mode = DelMode,
+ expiration = Expiration,
+ user_id = unwrap(UserId),
+ headers = case Headers of
+ [] -> undefined;
+ _ -> Headers
+ end,
+ reply_to = unwrap(ReplyTo0),
+ type = Type,
+ app_id = AppId,
+ priority = Priority,
+ correlation_id = CorrId091,
+ content_type = unwrap(ContentType),
+ content_encoding = unwrap(ContentEncoding),
+ timestamp = case unwrap(Timestamp) of
+ undefined ->
+ undefined;
+ Ts ->
+ Ts div 1000
+ end
+ },
+ {BP, Payload}.
+
+%%% Internal
+
+amqp10_map_get(K, AP0) ->
+ case lists:keytake(K, 1, AP0) of
+ false ->
+ {undefined, AP0};
+ {value, {_, V}, AP} ->
+ {unwrap(V), AP}
+ end.
+
+wrap(_Type, undefined) ->
+ undefined;
+wrap(Type, Val) ->
+ {Type, Val}.
+
+unwrap(undefined) ->
+ undefined;
+unwrap({_Type, V}) ->
+ V.
+
+% symbol_for(#'v1_0.properties'{}) ->
+% {symbol, <<"amqp:properties:list">>};
+
+% number_for(#'v1_0.properties'{}) ->
+% {ulong, 115};
+% encode(Frame = #'v1_0.properties'{}) ->
+% amqp10_framing:encode_described(list, 115, Frame);
+
+% encode_described(list, CodeNumber, Frame) ->
+% {described, {ulong, CodeNumber},
+% {list, lists:map(fun encode/1, tl(tuple_to_list(Frame)))}};
+
+% -spec generate(amqp10_type()) -> iolist().
+% generate({described, Descriptor, Value}) ->
+% DescBin = generate(Descriptor),
+% ValueBin = generate(Value),
+% [ ?DESCRIBED_BIN, DescBin, ValueBin ].
+
+to_091(Key, {utf8, V}) when is_binary(V) -> {Key, longstr, V};
+to_091(Key, {long, V}) -> {Key, long, V};
+to_091(Key, {byte, V}) -> {Key, byte, V};
+to_091(Key, {ubyte, V}) -> {Key, unsignedbyte, V};
+to_091(Key, {short, V}) -> {Key, short, V};
+to_091(Key, {ushort, V}) -> {Key, unsignedshort, V};
+to_091(Key, {uint, V}) -> {Key, unsignedint, V};
+to_091(Key, {int, V}) -> {Key, signedint, V};
+to_091(Key, {double, V}) -> {Key, double, V};
+to_091(Key, {float, V}) -> {Key, float, V};
+%% NB: header values can never be shortstr!
+to_091(Key, {timestamp, V}) -> {Key, timestamp, V div 1000};
+to_091(Key, {binary, V}) -> {Key, binary, V};
+to_091(Key, {boolean, V}) -> {Key, bool, V};
+to_091(Key, true) -> {Key, bool, true};
+to_091(Key, false) -> {Key, bool, false}.
+
+from_091(longstr, V) when is_binary(V) -> {utf8, V};
+from_091(long, V) -> {long, V};
+from_091(unsignedbyte, V) -> {ubyte, V};
+from_091(short, V) -> {short, V};
+from_091(unsignedshort, V) -> {ushort, V};
+from_091(unsignedint, V) -> {uint, V};
+from_091(signedint, V) -> {int, V};
+from_091(double, V) -> {double, V};
+from_091(float, V) -> {float, V};
+from_091(bool, V) -> {boolean, V};
+from_091(binary, V) -> {binary, V};
+from_091(timestamp, V) -> {timestamp, V * 1000};
+from_091(byte, V) -> {byte, V}.
+
+% convert_header(signedint, V) -> [$I, <<V:32/signed>>];
+% convert_header(decimal, V) -> {Before, After} = V,
+% [$D, Before, <<After:32>>];
+% convert_header(timestamp, V) -> [$T, <<V:64>>];
+% % convert_header(table, V) -> [$F | table_to_binary(V)];
+% % convert_header(array, V) -> [$A | array_to_binary(V)];
+% convert_header(byte, V) -> [$b, <<V:8/signed>>];
+% convert_header(double, V) -> [$d, <<V:64/float>>];
+% convert_header(float, V) -> [$f, <<V:32/float>>];
+% convert_header(short, V) -> [$s, <<V:16/signed>>];
+% convert_header(binary, V) -> [$x | long_string_to_binary(V)];
+% convert_header(unsignedbyte, V) -> [$B, <<V:8/unsigned>>];
+% convert_header(unsignedshort, V) -> [$u, <<V:16/unsigned>>];
+% convert_header(unsignedint, V) -> [$i, <<V:32/unsigned>>];
+% convert_header(void, _V) -> [$V].
+
+utf8(T) -> {utf8, T}.
+symbol(T) -> {symbol, T}.
+
+message_id({uuid, UUID}, HKey, H0) ->
+ H = [{HKey, longstr, <<"uuid">>} | H0],
+ {H, rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUID))};
+message_id({ulong, N}, HKey, H0) ->
+ H = [{HKey, longstr, <<"ulong">>} | H0],
+ {H, erlang:integer_to_binary(N)};
+message_id({binary, B}, HKey, H0) ->
+ E = base64:encode(B),
+ case byte_size(E) > 256 of
+ true ->
+ K = binary:replace(HKey, <<"-type">>, <<>>),
+ {[{K, longstr, B} | H0], undefined};
+ false ->
+ H = [{HKey, longstr, <<"binary">>} | H0],
+ {H, E}
+ end;
+message_id({utf8, S}, HKey, H0) ->
+ case byte_size(S) > 256 of
+ true ->
+ K = binary:replace(HKey, <<"-type">>, <<>>),
+ {[{K, longstr, S} | H0], undefined};
+ false ->
+ {H0, S}
+ end;
+message_id(MsgId, _, H) ->
+ {H, unwrap(MsgId)}.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
diff --git a/deps/rabbit/src/rabbit_msg_store.erl b/deps/rabbit/src/rabbit_msg_store.erl
new file mode 100644
index 0000000000..4851e56248
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_store.erl
@@ -0,0 +1,2245 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store).
+
+-behaviour(gen_server2).
+
+-export([start_link/4, start_global_store_link/4, successfully_recovered_state/1,
+ client_init/4, client_terminate/1, client_delete_and_terminate/1,
+ client_ref/1, close_all_indicated/1,
+ write/3, write_flow/3, read/2, contains/2, remove/2]).
+
+-export([set_maximum_since_use/2, combine_files/3,
+ delete_file/2]). %% internal
+
+-export([scan_file_for_valid_messages/1]). %% salvage tool
+
+-export([transform_dir/3, force_recovery/2]). %% upgrade
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, prioritise_call/4, prioritise_cast/3,
+ prioritise_info/3, format_message_queue/2]).
+
+%%----------------------------------------------------------------------------
+
+-include("rabbit_msg_store.hrl").
+
+-define(SYNC_INTERVAL, 25). %% milliseconds
+-define(CLEAN_FILENAME, "clean.dot").
+-define(FILE_SUMMARY_FILENAME, "file_summary.ets").
+-define(TRANSFORM_TMP, "transform_tmp").
+
+-define(BINARY_MODE, [raw, binary]).
+-define(READ_MODE, [read]).
+-define(READ_AHEAD_MODE, [read_ahead | ?READ_MODE]).
+-define(WRITE_MODE, [write]).
+
+-define(FILE_EXTENSION, ".rdq").
+-define(FILE_EXTENSION_TMP, ".rdt").
+
+-define(HANDLE_CACHE_BUFFER_SIZE, 1048576). %% 1MB
+
+ %% i.e. two pairs, so GC does not go idle when busy
+-define(MAXIMUM_SIMULTANEOUS_GC_FILES, 4).
+
+%%----------------------------------------------------------------------------
+
+-record(msstate,
+ {
+ %% store directory
+ dir,
+ %% the module for index ops,
+ %% rabbit_msg_store_ets_index by default
+ index_module,
+ %% where are messages?
+ index_state,
+ %% current file name as number
+ current_file,
+ %% current file handle since the last fsync?
+ current_file_handle,
+ %% file handle cache
+ file_handle_cache,
+ %% TRef for our interval timer
+ sync_timer_ref,
+ %% sum of valid data in all files
+ sum_valid_data,
+ %% sum of file sizes
+ sum_file_size,
+ %% things to do once GC completes
+ pending_gc_completion,
+ %% pid of our GC
+ gc_pid,
+ %% tid of the shared file handles table
+ file_handles_ets,
+ %% tid of the file summary table
+ file_summary_ets,
+ %% tid of current file cache table
+ cur_file_cache_ets,
+ %% tid of writes/removes in flight
+ flying_ets,
+ %% set of dying clients
+ dying_clients,
+ %% map of references of all registered clients
+ %% to callbacks
+ clients,
+ %% boolean: did we recover state?
+ successfully_recovered,
+ %% how big are our files allowed to get?
+ file_size_limit,
+ %% client ref to synced messages mapping
+ cref_to_msg_ids,
+ %% See CREDIT_DISC_BOUND in rabbit.hrl
+ credit_disc_bound
+ }).
+
+-record(client_msstate,
+ { server,
+ client_ref,
+ file_handle_cache,
+ index_state,
+ index_module,
+ dir,
+ gc_pid,
+ file_handles_ets,
+ file_summary_ets,
+ cur_file_cache_ets,
+ flying_ets,
+ credit_disc_bound
+ }).
+
+-record(file_summary,
+ {file, valid_total_size, left, right, file_size, locked, readers}).
+
+-record(gc_state,
+ { dir,
+ index_module,
+ index_state,
+ file_summary_ets,
+ file_handles_ets,
+ msg_store
+ }).
+
+-record(dying_client,
+ { client_ref,
+ file,
+ offset
+ }).
+
+%%----------------------------------------------------------------------------
+
+-export_type([gc_state/0, file_num/0]).
+
+-type gc_state() :: #gc_state { dir :: file:filename(),
+ index_module :: atom(),
+ index_state :: any(),
+ file_summary_ets :: ets:tid(),
+ file_handles_ets :: ets:tid(),
+ msg_store :: server()
+ }.
+
+-type server() :: pid() | atom().
+-type client_ref() :: binary().
+-type file_num() :: non_neg_integer().
+-type client_msstate() :: #client_msstate {
+ server :: server(),
+ client_ref :: client_ref(),
+ file_handle_cache :: map(),
+ index_state :: any(),
+ index_module :: atom(),
+ dir :: file:filename(),
+ gc_pid :: pid(),
+ file_handles_ets :: ets:tid(),
+ file_summary_ets :: ets:tid(),
+ cur_file_cache_ets :: ets:tid(),
+ flying_ets :: ets:tid(),
+ credit_disc_bound :: {pos_integer(), pos_integer()}}.
+-type msg_ref_delta_gen(A) ::
+ fun ((A) -> 'finished' |
+ {rabbit_types:msg_id(), non_neg_integer(), A}).
+-type maybe_msg_id_fun() ::
+ 'undefined' | fun ((gb_sets:set(), 'written' | 'ignored') -> any()).
+-type maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok').
+-type deletion_thunk() :: fun (() -> boolean()).
+
+%%----------------------------------------------------------------------------
+
+%% We run GC whenever (garbage / sum_file_size) > ?GARBAGE_FRACTION
+%% It is not recommended to set this to < 0.5
+-define(GARBAGE_FRACTION, 0.5).
+
+%% Message store is responsible for storing messages
+%% on disk and loading them back. The store handles both
+%% persistent messages and transient ones (when a node
+%% is under RAM pressure and needs to page messages out
+%% to disk). The store is responsible for locating messages
+%% on disk and maintaining an index.
+%%
+%% There are two message stores per node: one for transient
+%% and one for persistent messages.
+%%
+%% Queue processes interact with the stores via clients.
+%%
+%% The components:
+%%
+%% Index: this is a mapping from MsgId to #msg_location{}.
+%% By default, it's in ETS, but other implementations can
+%% be used.
+%% FileSummary: this maps File to #file_summary{} and is stored
+%% in ETS.
+%%
+%% The basic idea is that messages are appended to the current file up
+%% until that file becomes too big (> file_size_limit). At that point,
+%% the file is closed and a new file is created on the _right_ of the
+%% old file which is used for new messages. Files are named
+%% numerically ascending, thus the file with the lowest name is the
+%% eldest file.
+%%
+%% We need to keep track of which messages are in which files (this is
+%% the index); how much useful data is in each file and which files
+%% are on the left and right of each other. This is the purpose of the
+%% file summary ETS table.
+%%
+%% As messages are removed from files, holes appear in these
+%% files. The field ValidTotalSize contains the total amount of useful
+%% data left in the file. This is needed for garbage collection.
+%%
+%% When we discover that a file is now empty, we delete it. When we
+%% discover that it can be combined with the useful data in either its
+%% left or right neighbour, and overall, across all the files, we have
+%% ((the amount of garbage) / (the sum of all file sizes)) >
+%% ?GARBAGE_FRACTION, we start a garbage collection run concurrently,
+%% which will compact the two files together. This keeps disk
+%% utilisation high and aids performance. We deliberately do this
+%% lazily in order to prevent doing GC on files which are soon to be
+%% emptied (and hence deleted).
+%%
+%% Given the compaction between two files, the left file (i.e. elder
+%% file) is considered the ultimate destination for the good data in
+%% the right file. If necessary, the good data in the left file which
+%% is fragmented throughout the file is written out to a temporary
+%% file, then read back in to form a contiguous chunk of good data at
+%% the start of the left file. Thus the left file is garbage collected
+%% and compacted. Then the good data from the right file is copied
+%% onto the end of the left file. Index and file summary tables are
+%% updated.
+%%
+%% On non-clean startup, we scan the files we discover, dealing with
+%% the possibilities of a crash having occurred during a compaction
+%% (this consists of tidyup - the compaction is deliberately designed
+%% such that data is duplicated on disk rather than risking it being
+%% lost), and rebuild the file summary and index ETS table.
+%%
+%% So, with this design, messages move to the left. Eventually, they
+%% should end up in a contiguous block on the left and are then never
+%% rewritten. But this isn't quite the case. If in a file there is one
+%% message that is being ignored, for some reason, and messages in the
+%% file to the right and in the current block are being read all the
+%% time then it will repeatedly be the case that the good data from
+%% both files can be combined and will be written out to a new
+%% file. Whenever this happens, our shunned message will be rewritten.
+%%
+%% So, provided that we combine messages in the right order,
+%% (i.e. left file, bottom to top, right file, bottom to top),
+%% eventually our shunned message will end up at the bottom of the
+%% left file. The compaction/combining algorithm is smart enough to
+%% read in good data from the left file that is scattered throughout
+%% (i.e. C and D in the below diagram), then truncate the file to just
+%% above B (i.e. truncate to the limit of the good contiguous region
+%% at the start of the file), then write C and D on top and then write
+%% E, F and G from the right file on top. Thus contiguous blocks of
+%% good data at the bottom of files are not rewritten.
+%%
+%% +-------+ +-------+ +-------+
+%% | X | | G | | G |
+%% +-------+ +-------+ +-------+
+%% | D | | X | | F |
+%% +-------+ +-------+ +-------+
+%% | X | | X | | E |
+%% +-------+ +-------+ +-------+
+%% | C | | F | ===> | D |
+%% +-------+ +-------+ +-------+
+%% | X | | X | | C |
+%% +-------+ +-------+ +-------+
+%% | B | | X | | B |
+%% +-------+ +-------+ +-------+
+%% | A | | E | | A |
+%% +-------+ +-------+ +-------+
+%% left right left
+%%
+%% From this reasoning, we do have a bound on the number of times the
+%% message is rewritten. From when it is inserted, there can be no
+%% files inserted between it and the head of the queue, and the worst
+%% case is that every time it is rewritten, it moves one position lower
+%% in the file (for it to stay at the same position requires that
+%% there are no holes beneath it, which means truncate would be used
+%% and so it would not be rewritten at all). Thus this seems to
+%% suggest the limit is the number of messages ahead of it in the
+%% queue, though it's likely that that's pessimistic, given the
+%% requirements for compaction/combination of files.
+%%
+%% The other property that we have is the bound on the lowest
+%% utilisation, which should be 50% - worst case is that all files are
+%% fractionally over half full and can't be combined (equivalent is
+%% alternating full files and files with only one tiny message in
+%% them).
+%%
+%% Messages are reference-counted. When a message with the same msg id
+%% is written several times we only store it once, and only remove it
+%% from the store when it has been removed the same number of times.
+%%
+%% The reference counts do not persist. Therefore the initialisation
+%% function must be provided with a generator that produces ref count
+%% deltas for all recovered messages. This is only used on startup
+%% when the shutdown was non-clean.
+%%
+%% Read messages with a reference count greater than one are entered
+%% into a message cache. The purpose of the cache is not especially
+%% performance, though it can help there too, but prevention of memory
+%% explosion. It ensures that as messages with a high reference count
+%% are read from several processes they are read back as the same
+%% binary object rather than multiples of identical binary
+%% objects.
+%%
+%% Reads can be performed directly by clients without calling to the
+%% server. This is safe because multiple file handles can be used to
+%% read files. However, locking is used by the concurrent GC to make
+%% sure that reads are not attempted from files which are in the
+%% process of being garbage collected.
+%%
+%% When a message is removed, its reference count is decremented. Even
+%% if the reference count becomes 0, its entry is not removed. This is
+%% because in the event of the same message being sent to several
+%% different queues, there is the possibility of one queue writing and
+%% removing the message before other queues write it at all. Thus
+%% accommodating 0-reference counts allows us to avoid unnecessary
+%% writes here. Of course, there are complications: the file to which
+%% the message has already been written could be locked pending
+%% deletion or GC, which means we have to rewrite the message as the
+%% original copy will now be lost.
+%%
+%% The server automatically defers reads, removes and contains calls
+%% that occur which refer to files which are currently being
+%% GC'd. Contains calls are only deferred in order to ensure they do
+%% not overtake removes.
+%%
+%% The current file to which messages are being written has a
+%% write-back cache. This is written to immediately by clients and can
+%% be read from by clients too. This means that there are only ever
+%% writes made to the current file, thus eliminating delays due to
+%% flushing write buffers in order to be able to safely read from the
+%% current file. The one exception to this is that on start up, the
+%% cache is not populated with msgs found in the current file, and
+%% thus in this case only, reads may have to come from the file
+%% itself. The effect of this is that even if the msg_store process is
+%% heavily overloaded, clients can still write and read messages with
+%% very low latency and not block at all.
+%%
+%% Clients of the msg_store are required to register before using the
+%% msg_store. This provides them with the necessary client-side state
+%% to allow them to directly access the various caches and files. When
+%% they terminate, they should deregister. They can do this by calling
+%% either client_terminate/1 or client_delete_and_terminate/1. The
+%% differences are: (a) client_terminate is synchronous. As a result,
+%% if the msg_store is badly overloaded and has lots of in-flight
+%% writes and removes to process, this will take some time to
+%% return. However, once it does return, you can be sure that all the
+%% actions you've issued to the msg_store have been processed. (b) Not
+%% only is client_delete_and_terminate/1 asynchronous, but it also
+%% permits writes and subsequent removes from the current
+%% (terminating) client which are still in flight to be safely
+%% ignored. Thus from the point of view of the msg_store itself, and
+%% all from the same client:
+%%
+%% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N
+%% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 -->
+%%
+%% The client obviously sent T after all the other messages (up to
+%% W4), but because the msg_store prioritises messages, the T can be
+%% promoted and thus received early.
+%%
+%% Thus at the point of the msg_store receiving T, we have messages 1
+%% and 2 with a refcount of 1. After T, W3 will be ignored because
+%% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be
+%% ignored because the messages that they refer to were already known
+%% to the msg_store prior to T. However, it can be a little more
+%% complex: after the first R2, the refcount of msg 2 is 0. At that
+%% point, if a GC occurs or file deletion, msg 2 could vanish, which
+%% would then mean that the subsequent W2 and R2 are then ignored.
+%%
+%% The use case then for client_delete_and_terminate/1 is if the
+%% client wishes to remove everything it's written to the msg_store:
+%% it issues removes for all messages it's written and not removed,
+%% and then calls client_delete_and_terminate/1. At that point, any
+%% in-flight writes (and subsequent removes) can be ignored, but
+%% removes and writes for messages the msg_store already knows about
+%% will continue to be processed normally (which will normally just
+%% involve modifying the reference count, which is fast). Thus we save
+%% disk bandwidth for writes which are going to be immediately removed
+%% again by the the terminating client.
+%%
+%% We use a separate set to keep track of the dying clients in order
+%% to keep that set, which is inspected on every write and remove, as
+%% small as possible. Inspecting the set of all clients would degrade
+%% performance with many healthy clients and few, if any, dying
+%% clients, which is the typical case.
+%%
+%% Client termination messages are stored in a separate ets index to
+%% avoid filling primary message store index and message files with
+%% client termination messages.
+%%
+%% When the msg_store has a backlog (i.e. it has unprocessed messages
+%% in its mailbox / gen_server priority queue), a further optimisation
+%% opportunity arises: we can eliminate pairs of 'write' and 'remove'
+%% from the same client for the same message. A typical occurrence of
+%% these is when an empty durable queue delivers persistent messages
+%% to ack'ing consumers. The queue will asynchronously ask the
+%% msg_store to 'write' such messages, and when they are acknowledged
+%% it will issue a 'remove'. That 'remove' may be issued before the
+%% msg_store has processed the 'write'. There is then no point going
+%% ahead with the processing of that 'write'.
+%%
+%% To detect this situation a 'flying_ets' table is shared between the
+%% clients and the server. The table is keyed on the combination of
+%% client (reference) and msg id, and the value represents an
+%% integration of all the writes and removes currently "in flight" for
+%% that message between the client and server - '+1' means all the
+%% writes/removes add up to a single 'write', '-1' to a 'remove', and
+%% '0' to nothing. (NB: the integration can never add up to more than
+%% one 'write' or 'read' since clients must not write/remove a message
+%% more than once without first removing/writing it).
+%%
+%% Maintaining this table poses two challenges: 1) both the clients
+%% and the server access and update the table, which causes
+%% concurrency issues, 2) we must ensure that entries do not stay in
+%% the table forever, since that would constitute a memory leak. We
+%% address the former by carefully modelling all operations as
+%% sequences of atomic actions that produce valid results in all
+%% possible interleavings. We address the latter by deleting table
+%% entries whenever the server finds a 0-valued entry during the
+%% processing of a write/remove. 0 is essentially equivalent to "no
+%% entry". If, OTOH, the value is non-zero we know there is at least
+%% one other 'write' or 'remove' in flight, so we get an opportunity
+%% later to delete the table entry when processing these.
+%%
+%% There are two further complications. We need to ensure that 1)
+%% eliminated writes still get confirmed, and 2) the write-back cache
+%% doesn't grow unbounded. These are quite straightforward to
+%% address. See the comments in the code.
+%%
+%% For notes on Clean Shutdown and startup, see documentation in
+%% rabbit_variable_queue.
+
+%%----------------------------------------------------------------------------
+%% public API
+%%----------------------------------------------------------------------------
+
+-spec start_link
+ (atom(), file:filename(), [binary()] | 'undefined',
+ {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error().
+
+start_link(Type, Dir, ClientRefs, StartupFunState) when is_atom(Type) ->
+ gen_server2:start_link(?MODULE,
+ [Type, Dir, ClientRefs, StartupFunState],
+ [{timeout, infinity}]).
+
+start_global_store_link(Type, Dir, ClientRefs, StartupFunState) when is_atom(Type) ->
+ gen_server2:start_link({local, Type}, ?MODULE,
+ [Type, Dir, ClientRefs, StartupFunState],
+ [{timeout, infinity}]).
+
+-spec successfully_recovered_state(server()) -> boolean().
+
+successfully_recovered_state(Server) ->
+ gen_server2:call(Server, successfully_recovered_state, infinity).
+
+-spec client_init(server(), client_ref(), maybe_msg_id_fun(),
+ maybe_close_fds_fun()) -> client_msstate().
+
+client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) when is_pid(Server); is_atom(Server) ->
+ {IState, IModule, Dir, GCPid,
+ FileHandlesEts, FileSummaryEts, CurFileCacheEts, FlyingEts} =
+ gen_server2:call(
+ Server, {new_client_state, Ref, self(), MsgOnDiskFun, CloseFDsFun},
+ infinity),
+ CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
+ ?CREDIT_DISC_BOUND),
+ #client_msstate { server = Server,
+ client_ref = Ref,
+ file_handle_cache = #{},
+ index_state = IState,
+ index_module = IModule,
+ dir = Dir,
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ credit_disc_bound = CreditDiscBound }.
+
+-spec client_terminate(client_msstate()) -> 'ok'.
+
+client_terminate(CState = #client_msstate { client_ref = Ref }) ->
+ close_all_handles(CState),
+ ok = server_call(CState, {client_terminate, Ref}).
+
+-spec client_delete_and_terminate(client_msstate()) -> 'ok'.
+
+client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) ->
+ close_all_handles(CState),
+ ok = server_cast(CState, {client_dying, Ref}),
+ ok = server_cast(CState, {client_delete, Ref}).
+
+-spec client_ref(client_msstate()) -> client_ref().
+
+client_ref(#client_msstate { client_ref = Ref }) -> Ref.
+
+-spec write_flow(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'.
+
+write_flow(MsgId, Msg,
+ CState = #client_msstate {
+ server = Server,
+ credit_disc_bound = CreditDiscBound }) ->
+ %% Here we are tracking messages sent by the
+ %% rabbit_amqqueue_process process via the
+ %% rabbit_variable_queue. We are accessing the
+ %% rabbit_amqqueue_process process dictionary.
+ credit_flow:send(Server, CreditDiscBound),
+ client_write(MsgId, Msg, flow, CState).
+
+-spec write(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'.
+
+write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
+
+-spec read(rabbit_types:msg_id(), client_msstate()) ->
+ {rabbit_types:ok(msg()) | 'not_found', client_msstate()}.
+
+read(MsgId,
+ CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+ file_handle_cache_stats:update(msg_store_read),
+ %% Check the cur file cache
+ case ets:lookup(CurFileCacheEts, MsgId) of
+ [] ->
+ Defer = fun() -> {server_call(CState, {read, MsgId}), CState} end,
+ case index_lookup_positive_ref_count(MsgId, CState) of
+ not_found -> Defer();
+ MsgLocation -> client_read1(MsgLocation, Defer, CState)
+ end;
+ [{MsgId, Msg, _CacheRefCount}] ->
+ {{ok, Msg}, CState}
+ end.
+
+-spec contains(rabbit_types:msg_id(), client_msstate()) -> boolean().
+
+contains(MsgId, CState) -> server_call(CState, {contains, MsgId}).
+
+-spec remove([rabbit_types:msg_id()], client_msstate()) -> 'ok'.
+
+remove([], _CState) -> ok;
+remove(MsgIds, CState = #client_msstate { client_ref = CRef }) ->
+ [client_update_flying(-1, MsgId, CState) || MsgId <- MsgIds],
+ server_cast(CState, {remove, CRef, MsgIds}).
+
+-spec set_maximum_since_use(server(), non_neg_integer()) -> 'ok'.
+
+set_maximum_since_use(Server, Age) when is_pid(Server); is_atom(Server) ->
+ gen_server2:cast(Server, {set_maximum_since_use, Age}).
+
+%%----------------------------------------------------------------------------
+%% Client-side-only helpers
+%%----------------------------------------------------------------------------
+
+server_call(#client_msstate { server = Server }, Msg) ->
+ gen_server2:call(Server, Msg, infinity).
+
+server_cast(#client_msstate { server = Server }, Msg) ->
+ gen_server2:cast(Server, Msg).
+
+client_write(MsgId, Msg, Flow,
+ CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts,
+ client_ref = CRef }) ->
+ file_handle_cache_stats:update(msg_store_write),
+ ok = client_update_flying(+1, MsgId, CState),
+ ok = update_msg_cache(CurFileCacheEts, MsgId, Msg),
+ ok = server_cast(CState, {write, CRef, MsgId, Flow}).
+
+client_read1(#msg_location { msg_id = MsgId, file = File } = MsgLocation, Defer,
+ CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
+ case ets:lookup(FileSummaryEts, File) of
+ [] -> %% File has been GC'd and no longer exists. Go around again.
+ read(MsgId, CState);
+ [#file_summary { locked = Locked, right = Right }] ->
+ client_read2(Locked, Right, MsgLocation, Defer, CState)
+ end.
+
+client_read2(false, undefined, _MsgLocation, Defer, _CState) ->
+ %% Although we've already checked both caches and not found the
+ %% message there, the message is apparently in the
+ %% current_file. We can only arrive here if we are trying to read
+ %% a message which we have not written, which is very odd, so just
+ %% defer.
+ %%
+ %% OR, on startup, the cur_file_cache is not populated with the
+ %% contents of the current file, thus reads from the current file
+ %% will end up here and will need to be deferred.
+ Defer();
+client_read2(true, _Right, _MsgLocation, Defer, _CState) ->
+ %% Of course, in the mean time, the GC could have run and our msg
+ %% is actually in a different file, unlocked. However, deferring is
+ %% the safest and simplest thing to do.
+ Defer();
+client_read2(false, _Right,
+ MsgLocation = #msg_location { msg_id = MsgId, file = File },
+ Defer,
+ CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
+ %% It's entirely possible that everything we're doing from here on
+ %% is for the wrong file, or a non-existent file, as a GC may have
+ %% finished.
+ safe_ets_update_counter(
+ FileSummaryEts, File, {#file_summary.readers, +1},
+ fun (_) -> client_read3(MsgLocation, Defer, CState) end,
+ fun () -> read(MsgId, CState) end).
+
+client_read3(#msg_location { msg_id = MsgId, file = File }, Defer,
+ CState = #client_msstate { file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ gc_pid = GCPid,
+ client_ref = Ref }) ->
+ Release =
+ fun() -> ok = case ets:update_counter(FileSummaryEts, File,
+ {#file_summary.readers, -1}) of
+ 0 -> case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { locked = true }] ->
+ rabbit_msg_store_gc:no_readers(
+ GCPid, File);
+ _ -> ok
+ end;
+ _ -> ok
+ end
+ end,
+ %% If a GC involving the file hasn't already started, it won't
+ %% start now. Need to check again to see if we've been locked in
+ %% the meantime, between lookup and update_counter (thus GC
+ %% started before our +1. In fact, it could have finished by now
+ %% too).
+ case ets:lookup(FileSummaryEts, File) of
+ [] -> %% GC has deleted our file, just go round again.
+ read(MsgId, CState);
+ [#file_summary { locked = true }] ->
+ %% If we get a badarg here, then the GC has finished and
+ %% deleted our file. Try going around again. Otherwise,
+ %% just defer.
+ %%
+ %% badarg scenario: we lookup, msg_store locks, GC starts,
+ %% GC ends, we +1 readers, msg_store ets:deletes (and
+ %% unlocks the dest)
+ try Release(),
+ Defer()
+ catch error:badarg -> read(MsgId, CState)
+ end;
+ [#file_summary { locked = false }] ->
+ %% Ok, we're definitely safe to continue - a GC involving
+ %% the file cannot start up now, and isn't running, so
+ %% nothing will tell us from now on to close the handle if
+ %% it's already open.
+ %%
+ %% Finally, we need to recheck that the msg is still at
+ %% the same place - it's possible an entire GC ran between
+ %% us doing the lookup and the +1 on the readers. (Same as
+ %% badarg scenario above, but we don't have a missing file
+ %% - we just have the /wrong/ file).
+ case index_lookup(MsgId, CState) of
+ #msg_location { file = File } = MsgLocation ->
+ %% Still the same file.
+ {ok, CState1} = close_all_indicated(CState),
+ %% We are now guaranteed that the mark_handle_open
+ %% call will either insert_new correctly, or will
+ %% fail, but find the value is open, not close.
+ mark_handle_open(FileHandlesEts, File, Ref),
+ %% Could the msg_store now mark the file to be
+ %% closed? No: marks for closing are issued only
+ %% when the msg_store has locked the file.
+ %% This will never be the current file
+ {Msg, CState2} = read_from_disk(MsgLocation, CState1),
+ Release(), %% this MUST NOT fail with badarg
+ {{ok, Msg}, CState2};
+ #msg_location {} = MsgLocation -> %% different file!
+ Release(), %% this MUST NOT fail with badarg
+ client_read1(MsgLocation, Defer, CState);
+ not_found -> %% it seems not to exist. Defer, just to be sure.
+ try Release() %% this can badarg, same as locked case, above
+ catch error:badarg -> ok
+ end,
+ Defer()
+ end
+ end.
+
+client_update_flying(Diff, MsgId, #client_msstate { flying_ets = FlyingEts,
+ client_ref = CRef }) ->
+ Key = {MsgId, CRef},
+ case ets:insert_new(FlyingEts, {Key, Diff}) of
+ true -> ok;
+ false -> try ets:update_counter(FlyingEts, Key, {2, Diff}) of
+ 0 -> ok;
+ Diff -> ok;
+ Err -> throw({bad_flying_ets_update, Diff, Err, Key})
+ catch error:badarg ->
+ %% this is guaranteed to succeed since the
+ %% server only removes and updates flying_ets
+ %% entries; it never inserts them
+ true = ets:insert_new(FlyingEts, {Key, Diff})
+ end,
+ ok
+ end.
+
+clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM,
+ dying_clients = DyingClients }) ->
+ State #msstate { cref_to_msg_ids = maps:remove(CRef, CTM),
+ dying_clients = maps:remove(CRef, DyingClients) }.
+
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+
+init([Type, BaseDir, ClientRefs, StartupFunState]) ->
+ process_flag(trap_exit, true),
+
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+
+ Dir = filename:join(BaseDir, atom_to_list(Type)),
+ Name = filename:join(filename:basename(BaseDir), atom_to_list(Type)),
+
+ {ok, IndexModule} = application:get_env(rabbit, msg_store_index_module),
+ rabbit_log:info("Message store ~tp: using ~p to provide index~n", [Name, IndexModule]),
+
+ AttemptFileSummaryRecovery =
+ case ClientRefs of
+ undefined -> ok = rabbit_file:recursive_delete([Dir]),
+ ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
+ false;
+ _ -> ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
+ recover_crashed_compactions(Dir)
+ end,
+ %% if we found crashed compactions we trust neither the
+ %% file_summary nor the location index. Note the file_summary is
+ %% left empty here if it can't be recovered.
+ {FileSummaryRecovered, FileSummaryEts} =
+ recover_file_summary(AttemptFileSummaryRecovery, Dir),
+ {CleanShutdown, IndexState, ClientRefs1} =
+ recover_index_and_client_refs(IndexModule, FileSummaryRecovered,
+ ClientRefs, Dir, Name),
+ Clients = maps:from_list(
+ [{CRef, {undefined, undefined, undefined}} ||
+ CRef <- ClientRefs1]),
+ %% CleanShutdown => msg location index and file_summary both
+ %% recovered correctly.
+ true = case {FileSummaryRecovered, CleanShutdown} of
+ {true, false} -> ets:delete_all_objects(FileSummaryEts);
+ _ -> true
+ end,
+ %% CleanShutdown <=> msg location index and file_summary both
+ %% recovered correctly.
+
+ FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles,
+ [ordered_set, public]),
+ CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]),
+ FlyingEts = ets:new(rabbit_msg_store_flying, [set, public]),
+
+ {ok, FileSizeLimit} = application:get_env(rabbit, msg_store_file_size_limit),
+
+ {ok, GCPid} = rabbit_msg_store_gc:start_link(
+ #gc_state { dir = Dir,
+ index_module = IndexModule,
+ index_state = IndexState,
+ file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ msg_store = self()
+ }),
+
+ CreditDiscBound = rabbit_misc:get_env(rabbit, msg_store_credit_disc_bound,
+ ?CREDIT_DISC_BOUND),
+
+ State = #msstate { dir = Dir,
+ index_module = IndexModule,
+ index_state = IndexState,
+ current_file = 0,
+ current_file_handle = undefined,
+ file_handle_cache = #{},
+ sync_timer_ref = undefined,
+ sum_valid_data = 0,
+ sum_file_size = 0,
+ pending_gc_completion = maps:new(),
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ dying_clients = #{},
+ clients = Clients,
+ successfully_recovered = CleanShutdown,
+ file_size_limit = FileSizeLimit,
+ cref_to_msg_ids = #{},
+ credit_disc_bound = CreditDiscBound
+ },
+ %% If we didn't recover the msg location index then we need to
+ %% rebuild it now.
+ Cleanliness = case CleanShutdown of
+ true -> "clean";
+ false -> "unclean"
+ end,
+ rabbit_log:debug("Rebuilding message location index after ~s shutdown...~n",
+ [Cleanliness]),
+ {Offset, State1 = #msstate { current_file = CurFile }} =
+ build_index(CleanShutdown, StartupFunState, State),
+ rabbit_log:debug("Finished rebuilding index~n", []),
+ %% read is only needed so that we can seek
+ {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile),
+ [read | ?WRITE_MODE]),
+ {ok, Offset} = file_handle_cache:position(CurHdl, Offset),
+ ok = file_handle_cache:truncate(CurHdl),
+
+ {ok, maybe_compact(State1 #msstate { current_file_handle = CurHdl }),
+ hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_call(Msg, _From, _Len, _State) ->
+ case Msg of
+ successfully_recovered_state -> 7;
+ {new_client_state, _Ref, _Pid, _MODC, _CloseFDsFun} -> 7;
+ {read, _MsgId} -> 2;
+ _ -> 0
+ end.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {combine_files, _Source, _Destination, _Reclaimed} -> 8;
+ {delete_file, _File, _Reclaimed} -> 8;
+ {set_maximum_since_use, _Age} -> 8;
+ {client_dying, _Pid} -> 7;
+ _ -> 0
+ end.
+
+prioritise_info(Msg, _Len, _State) ->
+ case Msg of
+ sync -> 8;
+ _ -> 0
+ end.
+
+handle_call(successfully_recovered_state, _From, State) ->
+ reply(State #msstate.successfully_recovered, State);
+
+handle_call({new_client_state, CRef, CPid, MsgOnDiskFun, CloseFDsFun}, _From,
+ State = #msstate { dir = Dir,
+ index_state = IndexState,
+ index_module = IndexModule,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ clients = Clients,
+ gc_pid = GCPid }) ->
+ Clients1 = maps:put(CRef, {CPid, MsgOnDiskFun, CloseFDsFun}, Clients),
+ erlang:monitor(process, CPid),
+ reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts,
+ CurFileCacheEts, FlyingEts},
+ State #msstate { clients = Clients1 });
+
+handle_call({client_terminate, CRef}, _From, State) ->
+ reply(ok, clear_client(CRef, State));
+
+handle_call({read, MsgId}, From, State) ->
+ State1 = read_message(MsgId, From, State),
+ noreply(State1);
+
+handle_call({contains, MsgId}, From, State) ->
+ State1 = contains_message(MsgId, From, State),
+ noreply(State1).
+
+handle_cast({client_dying, CRef},
+ State = #msstate { dying_clients = DyingClients,
+ current_file_handle = CurHdl,
+ current_file = CurFile }) ->
+ {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
+ DyingClients1 = maps:put(CRef,
+ #dying_client{client_ref = CRef,
+ file = CurFile,
+ offset = CurOffset},
+ DyingClients),
+ noreply(State #msstate { dying_clients = DyingClients1 });
+
+handle_cast({client_delete, CRef},
+ State = #msstate { clients = Clients }) ->
+ State1 = State #msstate { clients = maps:remove(CRef, Clients) },
+ noreply(clear_client(CRef, State1));
+
+handle_cast({write, CRef, MsgId, Flow},
+ State = #msstate { cur_file_cache_ets = CurFileCacheEts,
+ clients = Clients,
+ credit_disc_bound = CreditDiscBound }) ->
+ case Flow of
+ flow -> {CPid, _, _} = maps:get(CRef, Clients),
+ %% We are going to process a message sent by the
+ %% rabbit_amqqueue_process. Now we are accessing the
+ %% msg_store process dictionary.
+ credit_flow:ack(CPid, CreditDiscBound);
+ noflow -> ok
+ end,
+ true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}),
+ case update_flying(-1, MsgId, CRef, State) of
+ process ->
+ [{MsgId, Msg, _PWC}] = ets:lookup(CurFileCacheEts, MsgId),
+ noreply(write_message(MsgId, Msg, CRef, State));
+ ignore ->
+ %% A 'remove' has already been issued and eliminated the
+ %% 'write'.
+ State1 = blind_confirm(CRef, gb_sets:singleton(MsgId),
+ ignored, State),
+ %% If all writes get eliminated, cur_file_cache_ets could
+ %% grow unbounded. To prevent that we delete the cache
+ %% entry here, but only if the message isn't in the
+ %% current file. That way reads of the message can
+ %% continue to be done client side, from either the cache
+ %% or the non-current files. If the message *is* in the
+ %% current file then the cache entry will be removed by
+ %% the normal logic for that in write_message/4 and
+ %% maybe_roll_to_new_file/2.
+ case index_lookup(MsgId, State1) of
+ [#msg_location { file = File }]
+ when File == State1 #msstate.current_file ->
+ ok;
+ _ ->
+ true = ets:match_delete(CurFileCacheEts, {MsgId, '_', 0})
+ end,
+ noreply(State1)
+ end;
+
+handle_cast({remove, CRef, MsgIds}, State) ->
+ {RemovedMsgIds, State1} =
+ lists:foldl(
+ fun (MsgId, {Removed, State2}) ->
+ case update_flying(+1, MsgId, CRef, State2) of
+ process -> {[MsgId | Removed],
+ remove_message(MsgId, CRef, State2)};
+ ignore -> {Removed, State2}
+ end
+ end, {[], State}, MsgIds),
+ noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(RemovedMsgIds),
+ ignored, State1)));
+
+handle_cast({combine_files, Source, Destination, Reclaimed},
+ State = #msstate { sum_file_size = SumFileSize,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ clients = Clients }) ->
+ ok = cleanup_after_file_deletion(Source, State),
+ %% see comment in cleanup_after_file_deletion, and client_read3
+ true = mark_handle_to_close(Clients, FileHandlesEts, Destination, false),
+ true = ets:update_element(FileSummaryEts, Destination,
+ {#file_summary.locked, false}),
+ State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
+ noreply(maybe_compact(run_pending([Source, Destination], State1)));
+
+handle_cast({delete_file, File, Reclaimed},
+ State = #msstate { sum_file_size = SumFileSize }) ->
+ ok = cleanup_after_file_deletion(File, State),
+ State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
+ noreply(maybe_compact(run_pending([File], State1)));
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ noreply(State).
+
+handle_info(sync, State) ->
+ noreply(internal_sync(State));
+
+handle_info(timeout, State) ->
+ noreply(internal_sync(State));
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
+ %% similar to what happens in
+ %% rabbit_amqqueue_process:handle_ch_down but with a relation of
+ %% msg_store -> rabbit_amqqueue_process instead of
+ %% rabbit_amqqueue_process -> rabbit_channel.
+ credit_flow:peer_down(Pid),
+ noreply(State);
+
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State}.
+
+terminate(_Reason, State = #msstate { index_state = IndexState,
+ index_module = IndexModule,
+ current_file_handle = CurHdl,
+ gc_pid = GCPid,
+ file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ flying_ets = FlyingEts,
+ clients = Clients,
+ dir = Dir }) ->
+ rabbit_log:info("Stopping message store for directory '~s'", [Dir]),
+ %% stop the gc first, otherwise it could be working and we pull
+ %% out the ets tables from under it.
+ ok = rabbit_msg_store_gc:stop(GCPid),
+ State1 = case CurHdl of
+ undefined -> State;
+ _ -> State2 = internal_sync(State),
+ ok = file_handle_cache:close(CurHdl),
+ State2
+ end,
+ State3 = close_all_handles(State1),
+ case store_file_summary(FileSummaryEts, Dir) of
+ ok -> ok;
+ {error, FSErr} ->
+ rabbit_log:error("Unable to store file summary"
+ " for vhost message store for directory ~p~n"
+ "Error: ~p~n",
+ [Dir, FSErr])
+ end,
+ [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts,
+ CurFileCacheEts, FlyingEts]],
+ IndexModule:terminate(IndexState),
+ case store_recovery_terms([{client_refs, maps:keys(Clients)},
+ {index_module, IndexModule}], Dir) of
+ ok ->
+ rabbit_log:info("Message store for directory '~s' is stopped", [Dir]),
+ ok;
+ {error, RTErr} ->
+ rabbit_log:error("Unable to save message store recovery terms"
+ " for directory ~p~nError: ~p~n",
+ [Dir, RTErr])
+ end,
+ State3 #msstate { index_state = undefined,
+ current_file_handle = undefined }.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%%----------------------------------------------------------------------------
+%% general helper functions
+%%----------------------------------------------------------------------------
+
+noreply(State) ->
+ {State1, Timeout} = next_state(State),
+ {noreply, State1, Timeout}.
+
+reply(Reply, State) ->
+ {State1, Timeout} = next_state(State),
+ {reply, Reply, State1, Timeout}.
+
+next_state(State = #msstate { sync_timer_ref = undefined,
+ cref_to_msg_ids = CTM }) ->
+ case maps:size(CTM) of
+ 0 -> {State, hibernate};
+ _ -> {start_sync_timer(State), 0}
+ end;
+next_state(State = #msstate { cref_to_msg_ids = CTM }) ->
+ case maps:size(CTM) of
+ 0 -> {stop_sync_timer(State), hibernate};
+ _ -> {State, 0}
+ end.
+
+start_sync_timer(State) ->
+ rabbit_misc:ensure_timer(State, #msstate.sync_timer_ref,
+ ?SYNC_INTERVAL, sync).
+
+stop_sync_timer(State) ->
+ rabbit_misc:stop_timer(State, #msstate.sync_timer_ref).
+
+internal_sync(State = #msstate { current_file_handle = CurHdl,
+ cref_to_msg_ids = CTM }) ->
+ State1 = stop_sync_timer(State),
+ CGs = maps:fold(fun (CRef, MsgIds, NS) ->
+ case gb_sets:is_empty(MsgIds) of
+ true -> NS;
+ false -> [{CRef, MsgIds} | NS]
+ end
+ end, [], CTM),
+ ok = case CGs of
+ [] -> ok;
+ _ -> file_handle_cache:sync(CurHdl)
+ end,
+ lists:foldl(fun ({CRef, MsgIds}, StateN) ->
+ client_confirm(CRef, MsgIds, written, StateN)
+ end, State1, CGs).
+
+update_flying(Diff, MsgId, CRef, #msstate { flying_ets = FlyingEts }) ->
+ Key = {MsgId, CRef},
+ NDiff = -Diff,
+ case ets:lookup(FlyingEts, Key) of
+ [] -> ignore;
+ [{_, Diff}] -> ignore; %% [1]
+ [{_, NDiff}] -> ets:update_counter(FlyingEts, Key, {2, Diff}),
+ true = ets:delete_object(FlyingEts, {Key, 0}),
+ process;
+ [{_, 0}] -> true = ets:delete_object(FlyingEts, {Key, 0}),
+ ignore;
+ [{_, Err}] -> throw({bad_flying_ets_record, Diff, Err, Key})
+ end.
+%% [1] We can get here, for example, in the following scenario: There
+%% is a write followed by a remove in flight. The counter will be 0,
+%% so on processing the write the server attempts to delete the
+%% entry. If at that point the client injects another write it will
+%% either insert a new entry, containing +1, or increment the existing
+%% entry to +1, thus preventing its removal. Either way therefore when
+%% the server processes the read, the counter will be +1.
+
+write_action({true, not_found}, _MsgId, State) ->
+ {ignore, undefined, State};
+write_action({true, #msg_location { file = File }}, _MsgId, State) ->
+ {ignore, File, State};
+write_action({false, not_found}, _MsgId, State) ->
+ {write, State};
+write_action({Mask, #msg_location { ref_count = 0, file = File,
+ total_size = TotalSize }},
+ MsgId, State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ case {Mask, ets:lookup(FileSummaryEts, File)} of
+ {false, [#file_summary { locked = true }]} ->
+ ok = index_delete(MsgId, State),
+ {write, State};
+ {false_if_increment, [#file_summary { locked = true }]} ->
+ %% The msg for MsgId is older than the client death
+ %% message, but as it is being GC'd currently we'll have
+ %% to write a new copy, which will then be younger, so
+ %% ignore this write.
+ {ignore, File, State};
+ {_Mask, [#file_summary {}]} ->
+ ok = index_update_ref_count(MsgId, 1, State),
+ State1 = adjust_valid_total_size(File, TotalSize, State),
+ {confirm, File, State1}
+ end;
+write_action({_Mask, #msg_location { ref_count = RefCount, file = File }},
+ MsgId, State) ->
+ ok = index_update_ref_count(MsgId, RefCount + 1, State),
+ %% We already know about it, just update counter. Only update
+ %% field otherwise bad interaction with concurrent GC
+ {confirm, File, State}.
+
+write_message(MsgId, Msg, CRef,
+ State = #msstate { cur_file_cache_ets = CurFileCacheEts }) ->
+ case write_action(should_mask_action(CRef, MsgId, State), MsgId, State) of
+ {write, State1} ->
+ write_message(MsgId, Msg,
+ record_pending_confirm(CRef, MsgId, State1));
+ {ignore, CurFile, State1 = #msstate { current_file = CurFile }} ->
+ State1;
+ {ignore, _File, State1} ->
+ true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
+ State1;
+ {confirm, CurFile, State1 = #msstate { current_file = CurFile }}->
+ record_pending_confirm(CRef, MsgId, State1);
+ {confirm, _File, State1} ->
+ true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) ->
+ MsgOnDiskFun(gb_sets:singleton(MsgId), written),
+ CTM
+ end, CRef, State1)
+ end.
+
+remove_message(MsgId, CRef,
+ State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ case should_mask_action(CRef, MsgId, State) of
+ {true, _Location} ->
+ State;
+ {false_if_increment, #msg_location { ref_count = 0 }} ->
+ %% CRef has tried to both write and remove this msg whilst
+ %% it's being GC'd.
+ %%
+ %% ASSERTION: [#file_summary { locked = true }] =
+ %% ets:lookup(FileSummaryEts, File),
+ State;
+ {_Mask, #msg_location { ref_count = RefCount, file = File,
+ total_size = TotalSize }}
+ when RefCount > 0 ->
+ %% only update field, otherwise bad interaction with
+ %% concurrent GC
+ Dec = fun () -> index_update_ref_count(
+ MsgId, RefCount - 1, State) end,
+ case RefCount of
+ %% don't remove from cur_file_cache_ets here because
+ %% there may be further writes in the mailbox for the
+ %% same msg.
+ 1 -> case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { locked = true }] ->
+ add_to_pending_gc_completion(
+ {remove, MsgId, CRef}, File, State);
+ [#file_summary {}] ->
+ ok = Dec(),
+ delete_file_if_empty(
+ File, adjust_valid_total_size(
+ File, -TotalSize, State))
+ end;
+ _ -> ok = Dec(),
+ State
+ end
+ end.
+
+write_message(MsgId, Msg,
+ State = #msstate { current_file_handle = CurHdl,
+ current_file = CurFile,
+ sum_valid_data = SumValid,
+ sum_file_size = SumFileSize,
+ file_summary_ets = FileSummaryEts }) ->
+ {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
+ {ok, TotalSize} = rabbit_msg_file:append(CurHdl, MsgId, Msg),
+ ok = index_insert(
+ #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile,
+ offset = CurOffset, total_size = TotalSize }, State),
+ [#file_summary { right = undefined, locked = false }] =
+ ets:lookup(FileSummaryEts, CurFile),
+ [_,_] = ets:update_counter(FileSummaryEts, CurFile,
+ [{#file_summary.valid_total_size, TotalSize},
+ {#file_summary.file_size, TotalSize}]),
+ maybe_roll_to_new_file(CurOffset + TotalSize,
+ State #msstate {
+ sum_valid_data = SumValid + TotalSize,
+ sum_file_size = SumFileSize + TotalSize }).
+
+read_message(MsgId, From, State) ->
+ case index_lookup_positive_ref_count(MsgId, State) of
+ not_found -> gen_server2:reply(From, not_found),
+ State;
+ MsgLocation -> read_message1(From, MsgLocation, State)
+ end.
+
+read_message1(From, #msg_location { msg_id = MsgId, file = File,
+ offset = Offset } = MsgLoc,
+ State = #msstate { current_file = CurFile,
+ current_file_handle = CurHdl,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts }) ->
+ case File =:= CurFile of
+ true -> {Msg, State1} =
+ %% can return [] if msg in file existed on startup
+ case ets:lookup(CurFileCacheEts, MsgId) of
+ [] ->
+ {ok, RawOffSet} =
+ file_handle_cache:current_raw_offset(CurHdl),
+ ok = case Offset >= RawOffSet of
+ true -> file_handle_cache:flush(CurHdl);
+ false -> ok
+ end,
+ read_from_disk(MsgLoc, State);
+ [{MsgId, Msg1, _CacheRefCount}] ->
+ {Msg1, State}
+ end,
+ gen_server2:reply(From, {ok, Msg}),
+ State1;
+ false -> [#file_summary { locked = Locked }] =
+ ets:lookup(FileSummaryEts, File),
+ case Locked of
+ true -> add_to_pending_gc_completion({read, MsgId, From},
+ File, State);
+ false -> {Msg, State1} = read_from_disk(MsgLoc, State),
+ gen_server2:reply(From, {ok, Msg}),
+ State1
+ end
+ end.
+
+read_from_disk(#msg_location { msg_id = MsgId, file = File, offset = Offset,
+ total_size = TotalSize }, State) ->
+ {Hdl, State1} = get_read_handle(File, State),
+ {ok, Offset} = file_handle_cache:position(Hdl, Offset),
+ {ok, {MsgId, Msg}} =
+ case rabbit_msg_file:read(Hdl, TotalSize) of
+ {ok, {MsgId, _}} = Obj ->
+ Obj;
+ Rest ->
+ {error, {misread, [{old_state, State},
+ {file_num, File},
+ {offset, Offset},
+ {msg_id, MsgId},
+ {read, Rest},
+ {proc_dict, get()}
+ ]}}
+ end,
+ {Msg, State1}.
+
+contains_message(MsgId, From,
+ State = #msstate { pending_gc_completion = Pending }) ->
+ case index_lookup_positive_ref_count(MsgId, State) of
+ not_found ->
+ gen_server2:reply(From, false),
+ State;
+ #msg_location { file = File } ->
+ case maps:is_key(File, Pending) of
+ true -> add_to_pending_gc_completion(
+ {contains, MsgId, From}, File, State);
+ false -> gen_server2:reply(From, true),
+ State
+ end
+ end.
+
+add_to_pending_gc_completion(
+ Op, File, State = #msstate { pending_gc_completion = Pending }) ->
+ State #msstate { pending_gc_completion =
+ rabbit_misc:maps_cons(File, Op, Pending) }.
+
+run_pending(Files, State) ->
+ lists:foldl(
+ fun (File, State1 = #msstate { pending_gc_completion = Pending }) ->
+ Pending1 = maps:remove(File, Pending),
+ lists:foldl(
+ fun run_pending_action/2,
+ State1 #msstate { pending_gc_completion = Pending1 },
+ lists:reverse(maps:get(File, Pending)))
+ end, State, Files).
+
+run_pending_action({read, MsgId, From}, State) ->
+ read_message(MsgId, From, State);
+run_pending_action({contains, MsgId, From}, State) ->
+ contains_message(MsgId, From, State);
+run_pending_action({remove, MsgId, CRef}, State) ->
+ remove_message(MsgId, CRef, State).
+
+safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) ->
+ try
+ SuccessFun(ets:update_counter(Tab, Key, UpdateOp))
+ catch error:badarg -> FailThunk()
+ end.
+
+update_msg_cache(CacheEts, MsgId, Msg) ->
+ case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of
+ true -> ok;
+ false -> safe_ets_update_counter(
+ CacheEts, MsgId, {3, +1}, fun (_) -> ok end,
+ fun () -> update_msg_cache(CacheEts, MsgId, Msg) end)
+ end.
+
+adjust_valid_total_size(File, Delta, State = #msstate {
+ sum_valid_data = SumValid,
+ file_summary_ets = FileSummaryEts }) ->
+ [_] = ets:update_counter(FileSummaryEts, File,
+ [{#file_summary.valid_total_size, Delta}]),
+ State #msstate { sum_valid_data = SumValid + Delta }.
+
+maps_store(Key, Val, Dict) ->
+ false = maps:is_key(Key, Dict),
+ maps:put(Key, Val, Dict).
+
+update_pending_confirms(Fun, CRef,
+ State = #msstate { clients = Clients,
+ cref_to_msg_ids = CTM }) ->
+ case maps:get(CRef, Clients) of
+ {_CPid, undefined, _CloseFDsFun} -> State;
+ {_CPid, MsgOnDiskFun, _CloseFDsFun} -> CTM1 = Fun(MsgOnDiskFun, CTM),
+ State #msstate {
+ cref_to_msg_ids = CTM1 }
+ end.
+
+record_pending_confirm(CRef, MsgId, State) ->
+ update_pending_confirms(
+ fun (_MsgOnDiskFun, CTM) ->
+ NewMsgIds = case maps:find(CRef, CTM) of
+ error -> gb_sets:singleton(MsgId);
+ {ok, MsgIds} -> gb_sets:add(MsgId, MsgIds)
+ end,
+ maps:put(CRef, NewMsgIds, CTM)
+ end, CRef, State).
+
+client_confirm(CRef, MsgIds, ActionTaken, State) ->
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) ->
+ case maps:find(CRef, CTM) of
+ {ok, Gs} -> MsgOnDiskFun(gb_sets:intersection(Gs, MsgIds),
+ ActionTaken),
+ MsgIds1 = rabbit_misc:gb_sets_difference(
+ Gs, MsgIds),
+ case gb_sets:is_empty(MsgIds1) of
+ true -> maps:remove(CRef, CTM);
+ false -> maps:put(CRef, MsgIds1, CTM)
+ end;
+ error -> CTM
+ end
+ end, CRef, State).
+
+blind_confirm(CRef, MsgIds, ActionTaken, State) ->
+ update_pending_confirms(
+ fun (MsgOnDiskFun, CTM) -> MsgOnDiskFun(MsgIds, ActionTaken), CTM end,
+ CRef, State).
+
+%% Detect whether the MsgId is older or younger than the client's death
+%% msg (if there is one). If the msg is older than the client death
+%% msg, and it has a 0 ref_count we must only alter the ref_count, not
+%% rewrite the msg - rewriting it would make it younger than the death
+%% msg and thus should be ignored. Note that this (correctly) returns
+%% false when testing to remove the death msg itself.
+should_mask_action(CRef, MsgId,
+ State = #msstate{dying_clients = DyingClients}) ->
+ case {maps:find(CRef, DyingClients), index_lookup(MsgId, State)} of
+ {error, Location} ->
+ {false, Location};
+ {{ok, _}, not_found} ->
+ {true, not_found};
+ {{ok, Client}, #msg_location { file = File, offset = Offset,
+ ref_count = RefCount } = Location} ->
+ #dying_client{file = DeathFile, offset = DeathOffset} = Client,
+ {case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of
+ {true, _} -> true;
+ {false, 0} -> false_if_increment;
+ {false, _} -> false
+ end, Location}
+ end.
+
+%%----------------------------------------------------------------------------
+%% file helper functions
+%%----------------------------------------------------------------------------
+
+open_file(File, Mode) ->
+ file_handle_cache:open_with_absolute_path(
+ File, ?BINARY_MODE ++ Mode,
+ [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE},
+ {read_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]).
+
+open_file(Dir, FileName, Mode) ->
+ open_file(form_filename(Dir, FileName), Mode).
+
+close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) ->
+ CState #client_msstate { file_handle_cache = close_handle(Key, FHC) };
+
+close_handle(Key, State = #msstate { file_handle_cache = FHC }) ->
+ State #msstate { file_handle_cache = close_handle(Key, FHC) };
+
+close_handle(Key, FHC) ->
+ case maps:find(Key, FHC) of
+ {ok, Hdl} -> ok = file_handle_cache:close(Hdl),
+ maps:remove(Key, FHC);
+ error -> FHC
+ end.
+
+mark_handle_open(FileHandlesEts, File, Ref) ->
+ %% This is fine to fail (already exists). Note it could fail with
+ %% the value being close, and not have it updated to open.
+ ets:insert_new(FileHandlesEts, {{Ref, File}, open}),
+ true.
+
+%% See comment in client_read3 - only call this when the file is locked
+mark_handle_to_close(ClientRefs, FileHandlesEts, File, Invoke) ->
+ [ begin
+ case (ets:update_element(FileHandlesEts, Key, {2, close})
+ andalso Invoke) of
+ true -> case maps:get(Ref, ClientRefs) of
+ {_CPid, _MsgOnDiskFun, undefined} ->
+ ok;
+ {_CPid, _MsgOnDiskFun, CloseFDsFun} ->
+ ok = CloseFDsFun()
+ end;
+ false -> ok
+ end
+ end || {{Ref, _File} = Key, open} <-
+ ets:match_object(FileHandlesEts, {{'_', File}, open}) ],
+ true.
+
+safe_file_delete_fun(File, Dir, FileHandlesEts) ->
+ fun () -> safe_file_delete(File, Dir, FileHandlesEts) end.
+
+safe_file_delete(File, Dir, FileHandlesEts) ->
+ %% do not match on any value - it's the absence of the row that
+ %% indicates the client has really closed the file.
+ case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
+ {[_|_], _Cont} -> false;
+ _ -> ok = file:delete(
+ form_filename(Dir, filenum_to_name(File))),
+ true
+ end.
+
+-spec close_all_indicated
+ (client_msstate()) -> rabbit_types:ok(client_msstate()).
+
+close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts,
+ client_ref = Ref } =
+ CState) ->
+ Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}),
+ {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) ->
+ true = ets:delete(FileHandlesEts, Key),
+ close_handle(File, CStateM)
+ end, CState, Objs)}.
+
+close_all_handles(CState = #client_msstate { file_handles_ets = FileHandlesEts,
+ file_handle_cache = FHC,
+ client_ref = Ref }) ->
+ ok = maps:fold(fun (File, Hdl, ok) ->
+ true = ets:delete(FileHandlesEts, {Ref, File}),
+ file_handle_cache:close(Hdl)
+ end, ok, FHC),
+ CState #client_msstate { file_handle_cache = #{} };
+
+close_all_handles(State = #msstate { file_handle_cache = FHC }) ->
+ ok = maps:fold(fun (_Key, Hdl, ok) -> file_handle_cache:close(Hdl) end,
+ ok, FHC),
+ State #msstate { file_handle_cache = #{} }.
+
+get_read_handle(FileNum, CState = #client_msstate { file_handle_cache = FHC,
+ dir = Dir }) ->
+ {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
+ {Hdl, CState #client_msstate { file_handle_cache = FHC2 }};
+
+get_read_handle(FileNum, State = #msstate { file_handle_cache = FHC,
+ dir = Dir }) ->
+ {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
+ {Hdl, State #msstate { file_handle_cache = FHC2 }}.
+
+get_read_handle(FileNum, FHC, Dir) ->
+ case maps:find(FileNum, FHC) of
+ {ok, Hdl} -> {Hdl, FHC};
+ error -> {ok, Hdl} = open_file(Dir, filenum_to_name(FileNum),
+ ?READ_MODE),
+ {Hdl, maps:put(FileNum, Hdl, FHC)}
+ end.
+
+preallocate(Hdl, FileSizeLimit, FinalPos) ->
+ {ok, FileSizeLimit} = file_handle_cache:position(Hdl, FileSizeLimit),
+ ok = file_handle_cache:truncate(Hdl),
+ {ok, FinalPos} = file_handle_cache:position(Hdl, FinalPos),
+ ok.
+
+truncate_and_extend_file(Hdl, Lowpoint, Highpoint) ->
+ {ok, Lowpoint} = file_handle_cache:position(Hdl, Lowpoint),
+ ok = file_handle_cache:truncate(Hdl),
+ ok = preallocate(Hdl, Highpoint, Lowpoint).
+
+form_filename(Dir, Name) -> filename:join(Dir, Name).
+
+filenum_to_name(File) -> integer_to_list(File) ++ ?FILE_EXTENSION.
+
+filename_to_num(FileName) -> list_to_integer(filename:rootname(FileName)).
+
+list_sorted_filenames(Dir, Ext) ->
+ lists:sort(fun (A, B) -> filename_to_num(A) < filename_to_num(B) end,
+ filelib:wildcard("*" ++ Ext, Dir)).
+
+%%----------------------------------------------------------------------------
+%% index
+%%----------------------------------------------------------------------------
+
+index_lookup_positive_ref_count(Key, State) ->
+ case index_lookup(Key, State) of
+ not_found -> not_found;
+ #msg_location { ref_count = 0 } -> not_found;
+ #msg_location {} = MsgLocation -> MsgLocation
+ end.
+
+index_update_ref_count(Key, RefCount, State) ->
+ index_update_fields(Key, {#msg_location.ref_count, RefCount}, State).
+
+index_lookup(Key, #gc_state { index_module = Index,
+ index_state = State }) ->
+ Index:lookup(Key, State);
+
+index_lookup(Key, #client_msstate { index_module = Index,
+ index_state = State }) ->
+ Index:lookup(Key, State);
+
+index_lookup(Key, #msstate { index_module = Index, index_state = State }) ->
+ Index:lookup(Key, State).
+
+index_insert(Obj, #msstate { index_module = Index, index_state = State }) ->
+ Index:insert(Obj, State).
+
+index_update(Obj, #msstate { index_module = Index, index_state = State }) ->
+ Index:update(Obj, State).
+
+index_update_fields(Key, Updates, #msstate{ index_module = Index,
+ index_state = State }) ->
+ Index:update_fields(Key, Updates, State);
+index_update_fields(Key, Updates, #gc_state{ index_module = Index,
+ index_state = State }) ->
+ Index:update_fields(Key, Updates, State).
+
+index_delete(Key, #msstate { index_module = Index, index_state = State }) ->
+ Index:delete(Key, State).
+
+index_delete_object(Obj, #gc_state{ index_module = Index,
+ index_state = State }) ->
+ Index:delete_object(Obj, State).
+
+index_clean_up_temporary_reference_count_entries(
+ #msstate { index_module = Index,
+ index_state = State }) ->
+ Index:clean_up_temporary_reference_count_entries_without_file(State).
+
+%%----------------------------------------------------------------------------
+%% shutdown and recovery
+%%----------------------------------------------------------------------------
+
+recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Name) ->
+ {false, IndexModule:new(Dir), []};
+recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Name) ->
+ rabbit_log:warning("Message store ~tp: rebuilding indices from scratch~n", [Name]),
+ {false, IndexModule:new(Dir), []};
+recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Name) ->
+ Fresh = fun (ErrorMsg, ErrorArgs) ->
+ rabbit_log:warning("Message store ~tp : " ++ ErrorMsg ++ "~n"
+ "rebuilding indices from scratch~n",
+ [Name | ErrorArgs]),
+ {false, IndexModule:new(Dir), []}
+ end,
+ case read_recovery_terms(Dir) of
+ {false, Error} ->
+ Fresh("failed to read recovery terms: ~p", [Error]);
+ {true, Terms} ->
+ RecClientRefs = proplists:get_value(client_refs, Terms, []),
+ RecIndexModule = proplists:get_value(index_module, Terms),
+ case (lists:sort(ClientRefs) =:= lists:sort(RecClientRefs)
+ andalso IndexModule =:= RecIndexModule) of
+ true -> case IndexModule:recover(Dir) of
+ {ok, IndexState1} ->
+ {true, IndexState1, ClientRefs};
+ {error, Error} ->
+ Fresh("failed to recover index: ~p", [Error])
+ end;
+ false -> Fresh("recovery terms differ from present", [])
+ end
+ end.
+
+store_recovery_terms(Terms, Dir) ->
+ rabbit_file:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms).
+
+read_recovery_terms(Dir) ->
+ Path = filename:join(Dir, ?CLEAN_FILENAME),
+ case rabbit_file:read_term_file(Path) of
+ {ok, Terms} -> case file:delete(Path) of
+ ok -> {true, Terms};
+ {error, Error} -> {false, Error}
+ end;
+ {error, Error} -> {false, Error}
+ end.
+
+store_file_summary(Tid, Dir) ->
+ ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ [{extended_info, [object_count]}]).
+
+recover_file_summary(false, _Dir) ->
+ %% TODO: the only reason for this to be an *ordered*_set is so
+ %% that a) maybe_compact can start a traversal from the eldest
+ %% file, and b) build_index in fast recovery mode can easily
+ %% identify the current file. It's awkward to have both that
+ %% odering and the left/right pointers in the entries - replacing
+ %% the former with some additional bit of state would be easy, but
+ %% ditching the latter would be neater.
+ {false, ets:new(rabbit_msg_store_file_summary,
+ [ordered_set, public, {keypos, #file_summary.file}])};
+recover_file_summary(true, Dir) ->
+ Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> ok = file:delete(Path),
+ {true, Tid};
+ {error, _Error} -> recover_file_summary(false, Dir)
+ end.
+
+count_msg_refs(Gen, Seed, State) ->
+ case Gen(Seed) of
+ finished ->
+ ok;
+ {_MsgId, 0, Next} ->
+ count_msg_refs(Gen, Next, State);
+ {MsgId, Delta, Next} ->
+ ok = case index_lookup(MsgId, State) of
+ not_found ->
+ index_insert(#msg_location { msg_id = MsgId,
+ file = undefined,
+ ref_count = Delta },
+ State);
+ #msg_location { ref_count = RefCount } = StoreEntry ->
+ NewRefCount = RefCount + Delta,
+ case NewRefCount of
+ 0 -> index_delete(MsgId, State);
+ _ -> index_update(StoreEntry #msg_location {
+ ref_count = NewRefCount },
+ State)
+ end
+ end,
+ count_msg_refs(Gen, Next, State)
+ end.
+
+recover_crashed_compactions(Dir) ->
+ FileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION),
+ TmpFileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION_TMP),
+ lists:foreach(
+ fun (TmpFileName) ->
+ NonTmpRelatedFileName =
+ filename:rootname(TmpFileName) ++ ?FILE_EXTENSION,
+ true = lists:member(NonTmpRelatedFileName, FileNames),
+ ok = recover_crashed_compaction(
+ Dir, TmpFileName, NonTmpRelatedFileName)
+ end, TmpFileNames),
+ TmpFileNames == [].
+
+recover_crashed_compaction(Dir, TmpFileName, NonTmpRelatedFileName) ->
+ %% Because a msg can legitimately appear multiple times in the
+ %% same file, identifying the contents of the tmp file and where
+ %% they came from is non-trivial. If we are recovering a crashed
+ %% compaction then we will be rebuilding the index, which can cope
+ %% with duplicates appearing. Thus the simplest and safest thing
+ %% to do is to append the contents of the tmp file to its main
+ %% file.
+ {ok, TmpHdl} = open_file(Dir, TmpFileName, ?READ_MODE),
+ {ok, MainHdl} = open_file(Dir, NonTmpRelatedFileName,
+ ?READ_MODE ++ ?WRITE_MODE),
+ {ok, _End} = file_handle_cache:position(MainHdl, eof),
+ Size = filelib:file_size(form_filename(Dir, TmpFileName)),
+ {ok, Size} = file_handle_cache:copy(TmpHdl, MainHdl, Size),
+ ok = file_handle_cache:close(MainHdl),
+ ok = file_handle_cache:delete(TmpHdl),
+ ok.
+
+scan_file_for_valid_messages(File) ->
+ case open_file(File, ?READ_MODE) of
+ {ok, Hdl} -> Valid = rabbit_msg_file:scan(
+ Hdl, filelib:file_size(File),
+ fun scan_fun/2, []),
+ ok = file_handle_cache:close(Hdl),
+ Valid;
+ {error, enoent} -> {ok, [], 0};
+ {error, Reason} -> {error, {unable_to_scan_file,
+ filename:basename(File),
+ Reason}}
+ end.
+
+scan_file_for_valid_messages(Dir, FileName) ->
+ scan_file_for_valid_messages(form_filename(Dir, FileName)).
+
+scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) ->
+ [{MsgId, TotalSize, Offset} | Acc].
+
+%% Takes the list in *ascending* order (i.e. eldest message
+%% first). This is the opposite of what scan_file_for_valid_messages
+%% produces. The list of msgs that is produced is youngest first.
+drop_contiguous_block_prefix(L) -> drop_contiguous_block_prefix(L, 0).
+
+drop_contiguous_block_prefix([], ExpectedOffset) ->
+ {ExpectedOffset, []};
+drop_contiguous_block_prefix([#msg_location { offset = ExpectedOffset,
+ total_size = TotalSize } | Tail],
+ ExpectedOffset) ->
+ ExpectedOffset1 = ExpectedOffset + TotalSize,
+ drop_contiguous_block_prefix(Tail, ExpectedOffset1);
+drop_contiguous_block_prefix(MsgsAfterGap, ExpectedOffset) ->
+ {ExpectedOffset, MsgsAfterGap}.
+
+build_index(true, _StartupFunState,
+ State = #msstate { file_summary_ets = FileSummaryEts }) ->
+ ets:foldl(
+ fun (#file_summary { valid_total_size = ValidTotalSize,
+ file_size = FileSize,
+ file = File },
+ {_Offset, State1 = #msstate { sum_valid_data = SumValid,
+ sum_file_size = SumFileSize }}) ->
+ {FileSize, State1 #msstate {
+ sum_valid_data = SumValid + ValidTotalSize,
+ sum_file_size = SumFileSize + FileSize,
+ current_file = File }}
+ end, {0, State}, FileSummaryEts);
+build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
+ State = #msstate { dir = Dir }) ->
+ rabbit_log:debug("Rebuilding message refcount...~n", []),
+ ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
+ rabbit_log:debug("Done rebuilding message refcount~n", []),
+ {ok, Pid} = gatherer:start_link(),
+ case [filename_to_num(FileName) ||
+ FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
+ [] -> rebuild_index(Pid, [State #msstate.current_file],
+ State);
+ Files -> {Offset, State1} = rebuild_index(Pid, Files, State),
+ {Offset, lists:foldl(fun delete_file_if_empty/2,
+ State1, Files)}
+ end.
+
+build_index_worker(Gatherer, State = #msstate { dir = Dir },
+ Left, File, Files) ->
+ FileName = filenum_to_name(File),
+ rabbit_log:debug("Rebuilding message location index from ~p (~B file(s) remaining)~n",
+ [form_filename(Dir, FileName), length(Files)]),
+ {ok, Messages, FileSize} =
+ scan_file_for_valid_messages(Dir, FileName),
+ {ValidMessages, ValidTotalSize} =
+ lists:foldl(
+ fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) ->
+ case index_lookup(MsgId, State) of
+ #msg_location { file = undefined } = StoreEntry ->
+ ok = index_update(StoreEntry #msg_location {
+ file = File, offset = Offset,
+ total_size = TotalSize },
+ State),
+ {[Obj | VMAcc], VTSAcc + TotalSize};
+ _ ->
+ {VMAcc, VTSAcc}
+ end
+ end, {[], 0}, Messages),
+ {Right, FileSize1} =
+ case Files of
+ %% if it's the last file, we'll truncate to remove any
+ %% rubbish above the last valid message. This affects the
+ %% file size.
+ [] -> {undefined, case ValidMessages of
+ [] -> 0;
+ _ -> {_MsgId, TotalSize, Offset} =
+ lists:last(ValidMessages),
+ Offset + TotalSize
+ end};
+ [F|_] -> {F, FileSize}
+ end,
+ ok = gatherer:in(Gatherer, #file_summary {
+ file = File,
+ valid_total_size = ValidTotalSize,
+ left = Left,
+ right = Right,
+ file_size = FileSize1,
+ locked = false,
+ readers = 0 }),
+ ok = gatherer:finish(Gatherer).
+
+enqueue_build_index_workers(_Gatherer, _Left, [], _State) ->
+ exit(normal);
+enqueue_build_index_workers(Gatherer, Left, [File|Files], State) ->
+ ok = worker_pool:dispatch_sync(
+ fun () ->
+ link(Gatherer),
+ ok = build_index_worker(Gatherer, State,
+ Left, File, Files),
+ unlink(Gatherer),
+ ok
+ end),
+ enqueue_build_index_workers(Gatherer, File, Files, State).
+
+reduce_index(Gatherer, LastFile,
+ State = #msstate { file_summary_ets = FileSummaryEts,
+ sum_valid_data = SumValid,
+ sum_file_size = SumFileSize }) ->
+ case gatherer:out(Gatherer) of
+ empty ->
+ ok = gatherer:stop(Gatherer),
+ ok = index_clean_up_temporary_reference_count_entries(State),
+ Offset = case ets:lookup(FileSummaryEts, LastFile) of
+ [] -> 0;
+ [#file_summary { file_size = FileSize }] -> FileSize
+ end,
+ {Offset, State #msstate { current_file = LastFile }};
+ {value, #file_summary { valid_total_size = ValidTotalSize,
+ file_size = FileSize } = FileSummary} ->
+ true = ets:insert_new(FileSummaryEts, FileSummary),
+ reduce_index(Gatherer, LastFile,
+ State #msstate {
+ sum_valid_data = SumValid + ValidTotalSize,
+ sum_file_size = SumFileSize + FileSize })
+ end.
+
+rebuild_index(Gatherer, Files, State) ->
+ lists:foreach(fun (_File) ->
+ ok = gatherer:fork(Gatherer)
+ end, Files),
+ Pid = spawn(
+ fun () ->
+ enqueue_build_index_workers(Gatherer, undefined,
+ Files, State)
+ end),
+ erlang:monitor(process, Pid),
+ reduce_index(Gatherer, lists:last(Files), State).
+
+%%----------------------------------------------------------------------------
+%% garbage collection / compaction / aggregation -- internal
+%%----------------------------------------------------------------------------
+
+maybe_roll_to_new_file(
+ Offset,
+ State = #msstate { dir = Dir,
+ current_file_handle = CurHdl,
+ current_file = CurFile,
+ file_summary_ets = FileSummaryEts,
+ cur_file_cache_ets = CurFileCacheEts,
+ file_size_limit = FileSizeLimit })
+ when Offset >= FileSizeLimit ->
+ State1 = internal_sync(State),
+ ok = file_handle_cache:close(CurHdl),
+ NextFile = CurFile + 1,
+ {ok, NextHdl} = open_file(Dir, filenum_to_name(NextFile), ?WRITE_MODE),
+ true = ets:insert_new(FileSummaryEts, #file_summary {
+ file = NextFile,
+ valid_total_size = 0,
+ left = CurFile,
+ right = undefined,
+ file_size = 0,
+ locked = false,
+ readers = 0 }),
+ true = ets:update_element(FileSummaryEts, CurFile,
+ {#file_summary.right, NextFile}),
+ true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}),
+ maybe_compact(State1 #msstate { current_file_handle = NextHdl,
+ current_file = NextFile });
+maybe_roll_to_new_file(_, State) ->
+ State.
+
+maybe_compact(State = #msstate { sum_valid_data = SumValid,
+ sum_file_size = SumFileSize,
+ gc_pid = GCPid,
+ pending_gc_completion = Pending,
+ file_summary_ets = FileSummaryEts,
+ file_size_limit = FileSizeLimit })
+ when SumFileSize > 2 * FileSizeLimit andalso
+ (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION ->
+ %% TODO: the algorithm here is sub-optimal - it may result in a
+ %% complete traversal of FileSummaryEts.
+ First = ets:first(FileSummaryEts),
+ case First =:= '$end_of_table' orelse
+ maps:size(Pending) >= ?MAXIMUM_SIMULTANEOUS_GC_FILES of
+ true ->
+ State;
+ false ->
+ case find_files_to_combine(FileSummaryEts, FileSizeLimit,
+ ets:lookup(FileSummaryEts, First)) of
+ not_found ->
+ State;
+ {Src, Dst} ->
+ Pending1 = maps_store(Dst, [],
+ maps_store(Src, [], Pending)),
+ State1 = close_handle(Src, close_handle(Dst, State)),
+ true = ets:update_element(FileSummaryEts, Src,
+ {#file_summary.locked, true}),
+ true = ets:update_element(FileSummaryEts, Dst,
+ {#file_summary.locked, true}),
+ ok = rabbit_msg_store_gc:combine(GCPid, Src, Dst),
+ State1 #msstate { pending_gc_completion = Pending1 }
+ end
+ end;
+maybe_compact(State) ->
+ State.
+
+find_files_to_combine(FileSummaryEts, FileSizeLimit,
+ [#file_summary { file = Dst,
+ valid_total_size = DstValid,
+ right = Src,
+ locked = DstLocked }]) ->
+ case Src of
+ undefined ->
+ not_found;
+ _ ->
+ [#file_summary { file = Src,
+ valid_total_size = SrcValid,
+ left = Dst,
+ right = SrcRight,
+ locked = SrcLocked }] = Next =
+ ets:lookup(FileSummaryEts, Src),
+ case SrcRight of
+ undefined -> not_found;
+ _ -> case (DstValid + SrcValid =< FileSizeLimit) andalso
+ (DstValid > 0) andalso (SrcValid > 0) andalso
+ not (DstLocked orelse SrcLocked) of
+ true -> {Src, Dst};
+ false -> find_files_to_combine(
+ FileSummaryEts, FileSizeLimit, Next)
+ end
+ end
+ end.
+
+delete_file_if_empty(File, State = #msstate { current_file = File }) ->
+ State;
+delete_file_if_empty(File, State = #msstate {
+ gc_pid = GCPid,
+ file_summary_ets = FileSummaryEts,
+ pending_gc_completion = Pending }) ->
+ [#file_summary { valid_total_size = ValidData,
+ locked = false }] =
+ ets:lookup(FileSummaryEts, File),
+ case ValidData of
+ %% don't delete the file_summary_ets entry for File here
+ %% because we could have readers which need to be able to
+ %% decrement the readers count.
+ 0 -> true = ets:update_element(FileSummaryEts, File,
+ {#file_summary.locked, true}),
+ ok = rabbit_msg_store_gc:delete(GCPid, File),
+ Pending1 = maps_store(File, [], Pending),
+ close_handle(File,
+ State #msstate { pending_gc_completion = Pending1 });
+ _ -> State
+ end.
+
+cleanup_after_file_deletion(File,
+ #msstate { file_handles_ets = FileHandlesEts,
+ file_summary_ets = FileSummaryEts,
+ clients = Clients }) ->
+ %% Ensure that any clients that have open fhs to the file close
+ %% them before using them again. This has to be done here (given
+ %% it's done in the msg_store, and not the gc), and not when
+ %% starting up the GC, because if done when starting up the GC,
+ %% the client could find the close, and close and reopen the fh,
+ %% whilst the GC is waiting for readers to disappear, before it's
+ %% actually done the GC.
+ true = mark_handle_to_close(Clients, FileHandlesEts, File, true),
+ [#file_summary { left = Left,
+ right = Right,
+ locked = true,
+ readers = 0 }] = ets:lookup(FileSummaryEts, File),
+ %% We'll never delete the current file, so right is never undefined
+ true = Right =/= undefined, %% ASSERTION
+ true = ets:update_element(FileSummaryEts, Right,
+ {#file_summary.left, Left}),
+ %% ensure the double linked list is maintained
+ true = case Left of
+ undefined -> true; %% File is the eldest file (left-most)
+ _ -> ets:update_element(FileSummaryEts, Left,
+ {#file_summary.right, Right})
+ end,
+ true = ets:delete(FileSummaryEts, File),
+ ok.
+
+%%----------------------------------------------------------------------------
+%% garbage collection / compaction / aggregation -- external
+%%----------------------------------------------------------------------------
+
+-spec combine_files(non_neg_integer(), non_neg_integer(), gc_state()) ->
+ {ok, deletion_thunk()} | {defer, [non_neg_integer()]}.
+
+combine_files(Source, Destination,
+ State = #gc_state { file_summary_ets = FileSummaryEts }) ->
+ [#file_summary{locked = true} = SourceSummary] =
+ ets:lookup(FileSummaryEts, Source),
+
+ [#file_summary{locked = true} = DestinationSummary] =
+ ets:lookup(FileSummaryEts, Destination),
+
+ case {SourceSummary, DestinationSummary} of
+ {#file_summary{readers = 0}, #file_summary{readers = 0}} ->
+ {ok, do_combine_files(SourceSummary, DestinationSummary,
+ Source, Destination, State)};
+ _ ->
+ rabbit_log:debug("Asked to combine files ~p and ~p but they have active readers. Deferring.",
+ [Source, Destination]),
+ DeferredFiles = [FileSummary#file_summary.file
+ || FileSummary <- [SourceSummary, DestinationSummary],
+ FileSummary#file_summary.readers /= 0],
+ {defer, DeferredFiles}
+ end.
+
+do_combine_files(SourceSummary, DestinationSummary,
+ Source, Destination,
+ State = #gc_state { file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ dir = Dir,
+ msg_store = Server }) ->
+ #file_summary {
+ readers = 0,
+ left = Destination,
+ valid_total_size = SourceValid,
+ file_size = SourceFileSize,
+ locked = true } = SourceSummary,
+ #file_summary {
+ readers = 0,
+ right = Source,
+ valid_total_size = DestinationValid,
+ file_size = DestinationFileSize,
+ locked = true } = DestinationSummary,
+
+ SourceName = filenum_to_name(Source),
+ DestinationName = filenum_to_name(Destination),
+ {ok, SourceHdl} = open_file(Dir, SourceName,
+ ?READ_AHEAD_MODE),
+ {ok, DestinationHdl} = open_file(Dir, DestinationName,
+ ?READ_AHEAD_MODE ++ ?WRITE_MODE),
+ TotalValidData = SourceValid + DestinationValid,
+ %% if DestinationValid =:= DestinationContiguousTop then we don't
+ %% need a tmp file
+ %% if they're not equal, then we need to write out everything past
+ %% the DestinationContiguousTop to a tmp file then truncate,
+ %% copy back in, and then copy over from Source
+ %% otherwise we just truncate straight away and copy over from Source
+ {DestinationWorkList, DestinationValid} =
+ load_and_vacuum_message_file(Destination, State),
+ {DestinationContiguousTop, DestinationWorkListTail} =
+ drop_contiguous_block_prefix(DestinationWorkList),
+ case DestinationWorkListTail of
+ [] -> ok = truncate_and_extend_file(
+ DestinationHdl, DestinationContiguousTop, TotalValidData);
+ _ -> Tmp = filename:rootname(DestinationName) ++ ?FILE_EXTENSION_TMP,
+ {ok, TmpHdl} = open_file(Dir, Tmp, ?READ_AHEAD_MODE++?WRITE_MODE),
+ ok = copy_messages(
+ DestinationWorkListTail, DestinationContiguousTop,
+ DestinationValid, DestinationHdl, TmpHdl, Destination,
+ State),
+ TmpSize = DestinationValid - DestinationContiguousTop,
+ %% so now Tmp contains everything we need to salvage
+ %% from Destination, and index_state has been updated to
+ %% reflect the compaction of Destination so truncate
+ %% Destination and copy from Tmp back to the end
+ {ok, 0} = file_handle_cache:position(TmpHdl, 0),
+ ok = truncate_and_extend_file(
+ DestinationHdl, DestinationContiguousTop, TotalValidData),
+ {ok, TmpSize} =
+ file_handle_cache:copy(TmpHdl, DestinationHdl, TmpSize),
+ %% position in DestinationHdl should now be DestinationValid
+ ok = file_handle_cache:sync(DestinationHdl),
+ ok = file_handle_cache:delete(TmpHdl)
+ end,
+ {SourceWorkList, SourceValid} = load_and_vacuum_message_file(Source, State),
+ ok = copy_messages(SourceWorkList, DestinationValid, TotalValidData,
+ SourceHdl, DestinationHdl, Destination, State),
+ %% tidy up
+ ok = file_handle_cache:close(DestinationHdl),
+ ok = file_handle_cache:close(SourceHdl),
+
+ %% don't update dest.right, because it could be changing at the
+ %% same time
+ true = ets:update_element(
+ FileSummaryEts, Destination,
+ [{#file_summary.valid_total_size, TotalValidData},
+ {#file_summary.file_size, TotalValidData}]),
+
+ Reclaimed = SourceFileSize + DestinationFileSize - TotalValidData,
+ rabbit_log:debug("Combined segment files number ~p (source) and ~p (destination), reclaimed ~p bytes",
+ [Source, Destination, Reclaimed]),
+ gen_server2:cast(Server, {combine_files, Source, Destination, Reclaimed}),
+ safe_file_delete_fun(Source, Dir, FileHandlesEts).
+
+-spec delete_file(non_neg_integer(), gc_state()) -> {ok, deletion_thunk()} | {defer, [non_neg_integer()]}.
+
+delete_file(File, State = #gc_state { file_summary_ets = FileSummaryEts,
+ file_handles_ets = FileHandlesEts,
+ dir = Dir,
+ msg_store = Server }) ->
+ case ets:lookup(FileSummaryEts, File) of
+ [#file_summary { valid_total_size = 0,
+ locked = true,
+ file_size = FileSize,
+ readers = 0 }] ->
+ {[], 0} = load_and_vacuum_message_file(File, State),
+ gen_server2:cast(Server, {delete_file, File, FileSize}),
+ {ok, safe_file_delete_fun(File, Dir, FileHandlesEts)};
+ [#file_summary{readers = Readers}] when Readers > 0 ->
+ rabbit_log:debug("Asked to delete file ~p but it has active readers. Deferring.",
+ [File]),
+ {defer, [File]}
+ end.
+
+load_and_vacuum_message_file(File, State = #gc_state { dir = Dir }) ->
+ %% Messages here will be end-of-file at start-of-list
+ {ok, Messages, _FileSize} =
+ scan_file_for_valid_messages(Dir, filenum_to_name(File)),
+ %% foldl will reverse so will end up with msgs in ascending offset order
+ lists:foldl(
+ fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) ->
+ case index_lookup(MsgId, State) of
+ #msg_location { file = File, total_size = TotalSize,
+ offset = Offset, ref_count = 0 } = Entry ->
+ ok = index_delete_object(Entry, State),
+ Acc;
+ #msg_location { file = File, total_size = TotalSize,
+ offset = Offset } = Entry ->
+ {[ Entry | List ], TotalSize + Size};
+ _ ->
+ Acc
+ end
+ end, {[], 0}, Messages).
+
+copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl,
+ Destination, State) ->
+ Copy = fun ({BlockStart, BlockEnd}) ->
+ BSize = BlockEnd - BlockStart,
+ {ok, BlockStart} =
+ file_handle_cache:position(SourceHdl, BlockStart),
+ {ok, BSize} =
+ file_handle_cache:copy(SourceHdl, DestinationHdl, BSize)
+ end,
+ case
+ lists:foldl(
+ fun (#msg_location { msg_id = MsgId, offset = Offset,
+ total_size = TotalSize },
+ {CurOffset, Block = {BlockStart, BlockEnd}}) ->
+ %% CurOffset is in the DestinationFile.
+ %% Offset, BlockStart and BlockEnd are in the SourceFile
+ %% update MsgLocation to reflect change of file and offset
+ ok = index_update_fields(MsgId,
+ [{#msg_location.file, Destination},
+ {#msg_location.offset, CurOffset}],
+ State),
+ {CurOffset + TotalSize,
+ case BlockEnd of
+ undefined ->
+ %% base case, called only for the first list elem
+ {Offset, Offset + TotalSize};
+ Offset ->
+ %% extend the current block because the
+ %% next msg follows straight on
+ {BlockStart, BlockEnd + TotalSize};
+ _ ->
+ %% found a gap, so actually do the work for
+ %% the previous block
+ Copy(Block),
+ {Offset, Offset + TotalSize}
+ end}
+ end, {InitOffset, {undefined, undefined}}, WorkList) of
+ {FinalOffset, Block} ->
+ case WorkList of
+ [] -> ok;
+ _ -> Copy(Block), %% do the last remaining block
+ ok = file_handle_cache:sync(DestinationHdl)
+ end;
+ {FinalOffsetZ, _Block} ->
+ {gc_error, [{expected, FinalOffset},
+ {got, FinalOffsetZ},
+ {destination, Destination}]}
+ end.
+
+-spec force_recovery(file:filename(), server()) -> 'ok'.
+
+force_recovery(BaseDir, Store) ->
+ Dir = filename:join(BaseDir, atom_to_list(Store)),
+ case file:delete(filename:join(Dir, ?CLEAN_FILENAME)) of
+ ok -> ok;
+ {error, enoent} -> ok
+ end,
+ recover_crashed_compactions(BaseDir),
+ ok.
+
+foreach_file(D, Fun, Files) ->
+ [ok = Fun(filename:join(D, File)) || File <- Files].
+
+foreach_file(D1, D2, Fun, Files) ->
+ [ok = Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files].
+
+-spec transform_dir(file:filename(), server(),
+ fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'.
+
+transform_dir(BaseDir, Store, TransformFun) ->
+ Dir = filename:join(BaseDir, atom_to_list(Store)),
+ TmpDir = filename:join(Dir, ?TRANSFORM_TMP),
+ TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end,
+ CopyFile = fun (Src, Dst) -> {ok, _Bytes} = file:copy(Src, Dst), ok end,
+ case filelib:is_dir(TmpDir) of
+ true -> throw({error, transform_failed_previously});
+ false -> FileList = list_sorted_filenames(Dir, ?FILE_EXTENSION),
+ foreach_file(Dir, TmpDir, TransformFile, FileList),
+ foreach_file(Dir, fun file:delete/1, FileList),
+ foreach_file(TmpDir, Dir, CopyFile, FileList),
+ foreach_file(TmpDir, fun file:delete/1, FileList),
+ ok = file:del_dir(TmpDir)
+ end.
+
+transform_msg_file(FileOld, FileNew, TransformFun) ->
+ ok = rabbit_file:ensure_parent_dirs_exist(FileNew),
+ {ok, RefOld} = file_handle_cache:open_with_absolute_path(
+ FileOld, [raw, binary, read], []),
+ {ok, RefNew} = file_handle_cache:open_with_absolute_path(
+ FileNew, [raw, binary, write],
+ [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]),
+ {ok, _Acc, _IgnoreSize} =
+ rabbit_msg_file:scan(
+ RefOld, filelib:file_size(FileOld),
+ fun({MsgId, _Size, _Offset, BinMsg}, ok) ->
+ {ok, MsgNew} = case binary_to_term(BinMsg) of
+ <<>> -> {ok, <<>>}; %% dying client marker
+ Msg -> TransformFun(Msg)
+ end,
+ {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew),
+ ok
+ end, ok),
+ ok = file_handle_cache:close(RefOld),
+ ok = file_handle_cache:close(RefNew),
+ ok.
diff --git a/deps/rabbit/src/rabbit_msg_store_ets_index.erl b/deps/rabbit/src/rabbit_msg_store_ets_index.erl
new file mode 100644
index 0000000000..294417b5ba
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_store_ets_index.erl
@@ -0,0 +1,76 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store_ets_index).
+
+-include("rabbit_msg_store.hrl").
+
+-behaviour(rabbit_msg_store_index).
+
+-export([new/1, recover/1,
+ lookup/2, insert/2, update/2, update_fields/3, delete/2,
+ delete_object/2, clean_up_temporary_reference_count_entries_without_file/1, terminate/1]).
+
+-define(MSG_LOC_NAME, rabbit_msg_store_ets_index).
+-define(FILENAME, "msg_store_index.ets").
+
+-record(state, { table, dir }).
+
+new(Dir) ->
+ file:delete(filename:join(Dir, ?FILENAME)),
+ Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]),
+ #state { table = Tid, dir = Dir }.
+
+recover(Dir) ->
+ Path = filename:join(Dir, ?FILENAME),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> file:delete(Path),
+ {ok, #state { table = Tid, dir = Dir }};
+ Error -> Error
+ end.
+
+lookup(Key, State) ->
+ case ets:lookup(State #state.table, Key) of
+ [] -> not_found;
+ [Entry] -> Entry
+ end.
+
+insert(Obj, State) ->
+ true = ets:insert_new(State #state.table, Obj),
+ ok.
+
+update(Obj, State) ->
+ true = ets:insert(State #state.table, Obj),
+ ok.
+
+update_fields(Key, Updates, State) ->
+ true = ets:update_element(State #state.table, Key, Updates),
+ ok.
+
+delete(Key, State) ->
+ true = ets:delete(State #state.table, Key),
+ ok.
+
+delete_object(Obj, State) ->
+ true = ets:delete_object(State #state.table, Obj),
+ ok.
+
+clean_up_temporary_reference_count_entries_without_file(State) ->
+ MatchHead = #msg_location { file = undefined, _ = '_' },
+ ets:select_delete(State #state.table, [{MatchHead, [], [true]}]),
+ ok.
+
+terminate(#state { table = MsgLocations, dir = Dir }) ->
+ case ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME),
+ [{extended_info, [object_count]}]) of
+ ok -> ok;
+ {error, Err} ->
+ rabbit_log:error("Unable to save message store index"
+ " for directory ~p.~nError: ~p~n",
+ [Dir, Err])
+ end,
+ ets:delete(MsgLocations).
diff --git a/deps/rabbit/src/rabbit_msg_store_gc.erl b/deps/rabbit/src/rabbit_msg_store_gc.erl
new file mode 100644
index 0000000000..41addc5fa6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_msg_store_gc.erl
@@ -0,0 +1,125 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store_gc).
+
+-behaviour(gen_server2).
+
+-export([start_link/1, combine/3, delete/2, no_readers/2, stop/1]).
+
+-export([set_maximum_since_use/2]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, prioritise_cast/3]).
+
+-record(state,
+ { pending_no_readers,
+ on_action,
+ msg_store_state
+ }).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_msg_store:gc_state()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(MsgStoreState) ->
+ gen_server2:start_link(?MODULE, [MsgStoreState],
+ [{timeout, infinity}]).
+
+-spec combine(pid(), rabbit_msg_store:file_num(),
+ rabbit_msg_store:file_num()) -> 'ok'.
+
+combine(Server, Source, Destination) ->
+ gen_server2:cast(Server, {combine, Source, Destination}).
+
+-spec delete(pid(), rabbit_msg_store:file_num()) -> 'ok'.
+
+delete(Server, File) ->
+ gen_server2:cast(Server, {delete, File}).
+
+-spec no_readers(pid(), rabbit_msg_store:file_num()) -> 'ok'.
+
+no_readers(Server, File) ->
+ gen_server2:cast(Server, {no_readers, File}).
+
+-spec stop(pid()) -> 'ok'.
+
+stop(Server) ->
+ gen_server2:call(Server, stop, infinity).
+
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+
+set_maximum_since_use(Pid, Age) ->
+ gen_server2:cast(Pid, {set_maximum_since_use, Age}).
+
+%%----------------------------------------------------------------------------
+
+init([MsgStoreState]) ->
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+ {ok, #state { pending_no_readers = #{},
+ on_action = [],
+ msg_store_state = MsgStoreState }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
+prioritise_cast(_Msg, _Len, _State) -> 0.
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+handle_cast({combine, Source, Destination}, State) ->
+ {noreply, attempt_action(combine, [Source, Destination], State), hibernate};
+
+handle_cast({delete, File}, State) ->
+ {noreply, attempt_action(delete, [File], State), hibernate};
+
+handle_cast({no_readers, File},
+ State = #state { pending_no_readers = Pending }) ->
+ {noreply, case maps:find(File, Pending) of
+ error ->
+ State;
+ {ok, {Action, Files}} ->
+ Pending1 = maps:remove(File, Pending),
+ attempt_action(
+ Action, Files,
+ State #state { pending_no_readers = Pending1 })
+ end, hibernate};
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ {noreply, State, hibernate}.
+
+handle_info(Info, State) ->
+ {stop, {unhandled_info, Info}, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+attempt_action(Action, Files,
+ State = #state { pending_no_readers = Pending,
+ on_action = Thunks,
+ msg_store_state = MsgStoreState }) ->
+ case do_action(Action, Files, MsgStoreState) of
+ {ok, OkThunk} ->
+ State#state{on_action = lists:filter(fun (Thunk) -> not Thunk() end,
+ [OkThunk | Thunks])};
+ {defer, [File | _]} ->
+ Pending1 = maps:put(File, {Action, Files}, Pending),
+ State #state { pending_no_readers = Pending1 }
+ end.
+
+do_action(combine, [Source, Destination], MsgStoreState) ->
+ rabbit_msg_store:combine_files(Source, Destination, MsgStoreState);
+do_action(delete, [File], MsgStoreState) ->
+ rabbit_msg_store:delete_file(File, MsgStoreState).
diff --git a/deps/rabbit/src/rabbit_networking.erl b/deps/rabbit/src/rabbit_networking.erl
new file mode 100644
index 0000000000..433b1d7540
--- /dev/null
+++ b/deps/rabbit/src/rabbit_networking.erl
@@ -0,0 +1,663 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_networking).
+
+%% This module contains various functions that deal with networking,
+%% TCP and TLS listeners, and connection information.
+%%
+%% It also contains a boot step — boot/0 — that starts networking machinery.
+%% This module primarily covers AMQP 0-9-1 but some bits are reused in
+%% plugins that provide protocol support, e.g. STOMP or MQTT.
+%%
+%% Functions in this module take care of normalising TCP listener options,
+%% including dual IP stack cases, and starting the AMQP 0-9-1 listener(s).
+%%
+%% See also tcp_listener_sup and tcp_listener.
+
+-export([boot/0, start_tcp_listener/2, start_ssl_listener/3,
+ stop_tcp_listener/1, on_node_down/1, active_listeners/0,
+ node_listeners/1, node_client_listeners/1,
+ register_connection/1, unregister_connection/1,
+ register_non_amqp_connection/1, unregister_non_amqp_connection/1,
+ connections/0, non_amqp_connections/0, connection_info_keys/0,
+ connection_info/1, connection_info/2,
+ connection_info_all/0, connection_info_all/1,
+ emit_connection_info_all/4, emit_connection_info_local/3,
+ close_connection/2, close_connections/2, close_all_connections/1,
+ force_connection_event_refresh/1, force_non_amqp_connection_event_refresh/1,
+ handshake/2, tcp_host/1,
+ ranch_ref/1, ranch_ref/2, ranch_ref_of_protocol/1,
+ listener_of_protocol/1, stop_ranch_listener_of_protocol/1]).
+
+%% Used by TCP-based transports, e.g. STOMP adapter
+-export([tcp_listener_addresses/1, tcp_listener_spec/9,
+ ensure_ssl/0, fix_ssl_options/1, poodle_check/1]).
+
+-export([tcp_listener_started/4, tcp_listener_stopped/4]).
+
+-deprecated([{force_connection_event_refresh, 1, eventually}]).
+
+-export([
+ local_connections/0,
+ local_non_amqp_connections/0,
+ %% prefer local_connections/0
+ connections_local/0
+]).
+
+-include("rabbit.hrl").
+-include("rabbit_misc.hrl").
+
+%% IANA-suggested ephemeral port range is 49152 to 65535
+-define(FIRST_TEST_BIND_PORT, 49152).
+
+%%----------------------------------------------------------------------------
+
+-export_type([ip_port/0, hostname/0]).
+
+-type hostname() :: rabbit_net:hostname().
+-type ip_port() :: rabbit_net:ip_port().
+
+-type family() :: atom().
+-type listener_config() :: ip_port() |
+ {hostname(), ip_port()} |
+ {hostname(), ip_port(), family()}.
+-type address() :: {inet:ip_address(), ip_port(), family()}.
+-type name_prefix() :: atom().
+-type protocol() :: atom().
+-type label() :: string().
+
+-spec boot() -> 'ok' | no_return().
+
+boot() ->
+ ok = record_distribution_listener(),
+ _ = application:start(ranch),
+ rabbit_log:debug("Started Ranch"),
+ %% Failures will throw exceptions
+ _ = boot_listeners(fun boot_tcp/1, application:get_env(rabbit, num_tcp_acceptors, 10), "TCP"),
+ _ = boot_listeners(fun boot_tls/1, application:get_env(rabbit, num_ssl_acceptors, 10), "TLS"),
+ ok.
+
+boot_listeners(Fun, NumAcceptors, Type) ->
+ case Fun(NumAcceptors) of
+ ok ->
+ ok;
+ {error, {could_not_start_listener, Address, Port, Details}} = Error ->
+ rabbit_log:error("Failed to start ~s listener [~s]:~p, error: ~p",
+ [Type, Address, Port, Details]),
+ throw(Error)
+ end.
+
+boot_tcp(NumAcceptors) ->
+ {ok, TcpListeners} = application:get_env(tcp_listeners),
+ case lists:foldl(fun(Listener, ok) ->
+ start_tcp_listener(Listener, NumAcceptors);
+ (_Listener, Error) ->
+ Error
+ end,
+ ok, TcpListeners) of
+ ok -> ok;
+ {error, _} = Error -> Error
+ end.
+
+boot_tls(NumAcceptors) ->
+ case application:get_env(ssl_listeners) of
+ {ok, []} ->
+ ok;
+ {ok, SslListeners} ->
+ SslOpts = ensure_ssl(),
+ case poodle_check('AMQP') of
+ ok -> [start_ssl_listener(L, SslOpts, NumAcceptors) || L <- SslListeners];
+ danger -> ok
+ end,
+ ok
+ end.
+
+-spec ensure_ssl() -> rabbit_types:infos().
+
+ensure_ssl() ->
+ {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
+ ok = app_utils:start_applications(SslAppsConfig),
+ {ok, SslOptsConfig0} = application:get_env(rabbit, ssl_options),
+ rabbit_ssl_options:fix(SslOptsConfig0).
+
+-spec poodle_check(atom()) -> 'ok' | 'danger'.
+
+poodle_check(Context) ->
+ {ok, Vsn} = application:get_key(ssl, vsn),
+ case rabbit_misc:version_compare(Vsn, "5.3", gte) of %% R16B01
+ true -> ok;
+ false -> case application:get_env(rabbit, ssl_allow_poodle_attack) of
+ {ok, true} -> ok;
+ _ -> log_poodle_fail(Context),
+ danger
+ end
+ end.
+
+log_poodle_fail(Context) ->
+ rabbit_log:error(
+ "The installed version of Erlang (~s) contains the bug OTP-10905,~n"
+ "which makes it impossible to disable SSLv3. This makes the system~n"
+ "vulnerable to the POODLE attack. SSL listeners for ~s have therefore~n"
+ "been disabled.~n~n"
+ "You are advised to upgrade to a recent Erlang version; R16B01 is the~n"
+ "first version in which this bug is fixed, but later is usually~n"
+ "better.~n~n"
+ "If you cannot upgrade now and want to re-enable SSL listeners, you can~n"
+ "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n"
+ "'rabbit' section of your configuration file.~n",
+ [rabbit_misc:otp_release(), Context]).
+
+fix_ssl_options(Config) ->
+ rabbit_ssl_options:fix(Config).
+
+-spec tcp_listener_addresses(listener_config()) -> [address()].
+
+tcp_listener_addresses(Port) when is_integer(Port) ->
+ tcp_listener_addresses_auto(Port);
+tcp_listener_addresses({"auto", Port}) ->
+ %% Variant to prevent lots of hacking around in bash and batch files
+ tcp_listener_addresses_auto(Port);
+tcp_listener_addresses({Host, Port}) ->
+ %% auto: determine family IPv4 / IPv6 after converting to IP address
+ tcp_listener_addresses({Host, Port, auto});
+tcp_listener_addresses({Host, Port, Family0})
+ when is_integer(Port) andalso (Port >= 0) andalso (Port =< 65535) ->
+ [{IPAddress, Port, Family} ||
+ {IPAddress, Family} <- getaddr(Host, Family0)];
+tcp_listener_addresses({_Host, Port, _Family0}) ->
+ rabbit_log:error("invalid port ~p - not 0..65535~n", [Port]),
+ throw({error, {invalid_port, Port}}).
+
+tcp_listener_addresses_auto(Port) ->
+ lists:append([tcp_listener_addresses(Listener) ||
+ Listener <- port_to_listeners(Port)]).
+
+-spec tcp_listener_spec
+ (name_prefix(), address(), [gen_tcp:listen_option()], module(), module(),
+ any(), protocol(), non_neg_integer(), label()) ->
+ supervisor:child_spec().
+
+tcp_listener_spec(NamePrefix, {IPAddress, Port, Family}, SocketOpts,
+ Transport, ProtoSup, ProtoOpts, Protocol, NumAcceptors, Label) ->
+ Args = [IPAddress, Port, Transport, [Family | SocketOpts], ProtoSup, ProtoOpts,
+ {?MODULE, tcp_listener_started, [Protocol, SocketOpts]},
+ {?MODULE, tcp_listener_stopped, [Protocol, SocketOpts]},
+ NumAcceptors, Label],
+ {rabbit_misc:tcp_name(NamePrefix, IPAddress, Port),
+ {tcp_listener_sup, start_link, Args},
+ transient, infinity, supervisor, [tcp_listener_sup]}.
+
+-spec ranch_ref(#listener{} | [{atom(), any()}] | 'undefined') -> ranch:ref() | undefined.
+ranch_ref(#listener{port = Port}) ->
+ [{IPAddress, Port, _Family} | _] = tcp_listener_addresses(Port),
+ {acceptor, IPAddress, Port};
+ranch_ref(Listener) when is_list(Listener) ->
+ Port = rabbit_misc:pget(port, Listener),
+ [{IPAddress, Port, _Family} | _] = tcp_listener_addresses(Port),
+ {acceptor, IPAddress, Port};
+ranch_ref(undefined) ->
+ undefined.
+
+-spec ranch_ref(inet:ip_address(), ip_port()) -> ranch:ref().
+
+%% Returns a reference that identifies a TCP listener in Ranch.
+ranch_ref(IPAddress, Port) ->
+ {acceptor, IPAddress, Port}.
+
+-spec ranch_ref_of_protocol(atom()) -> ranch:ref() | undefined.
+ranch_ref_of_protocol(Protocol) ->
+ ranch_ref(listener_of_protocol(Protocol)).
+
+-spec listener_of_protocol(atom()) -> #listener{}.
+listener_of_protocol(Protocol) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ MatchSpec = #listener{
+ node = node(),
+ protocol = Protocol,
+ _ = '_'
+ },
+ case mnesia:match_object(rabbit_listener, MatchSpec, read) of
+ [] -> undefined;
+ [Row] -> Row
+ end
+ end).
+
+-spec stop_ranch_listener_of_protocol(atom()) -> ok | {error, not_found}.
+stop_ranch_listener_of_protocol(Protocol) ->
+ case rabbit_networking:ranch_ref_of_protocol(Protocol) of
+ undefined -> ok;
+ Ref ->
+ rabbit_log:debug("Stopping Ranch listener for protocol ~s", [Protocol]),
+ ranch:stop_listener(Ref)
+ end.
+
+-spec start_tcp_listener(
+ listener_config(), integer()) -> 'ok' | {'error', term()}.
+
+start_tcp_listener(Listener, NumAcceptors) ->
+ start_listener(Listener, NumAcceptors, amqp, "TCP listener", tcp_opts()).
+
+-spec start_ssl_listener(
+ listener_config(), rabbit_types:infos(), integer()) -> 'ok' | {'error', term()}.
+
+start_ssl_listener(Listener, SslOpts, NumAcceptors) ->
+ start_listener(Listener, NumAcceptors, 'amqp/ssl', "TLS (SSL) listener", tcp_opts() ++ SslOpts).
+
+
+-spec start_listener(
+ listener_config(), integer(), protocol(), label(), list()) -> 'ok' | {'error', term()}.
+start_listener(Listener, NumAcceptors, Protocol, Label, Opts) ->
+ lists:foldl(fun (Address, ok) ->
+ start_listener0(Address, NumAcceptors, Protocol, Label, Opts);
+ (_Address, {error, _} = Error) ->
+ Error
+ end, ok, tcp_listener_addresses(Listener)).
+
+start_listener0(Address, NumAcceptors, Protocol, Label, Opts) ->
+ Transport = transport(Protocol),
+ Spec = tcp_listener_spec(rabbit_tcp_listener_sup, Address, Opts,
+ Transport, rabbit_connection_sup, [], Protocol,
+ NumAcceptors, Label),
+ case supervisor:start_child(rabbit_sup, Spec) of
+ {ok, _} -> ok;
+ {error, {{shutdown, {failed_to_start_child, _,
+ {shutdown, {failed_to_start_child, _,
+ {listen_error, _, PosixError}}}}}, _}} ->
+ {IPAddress, Port, _Family} = Address,
+ {error, {could_not_start_listener, rabbit_misc:ntoa(IPAddress), Port, PosixError}};
+ {error, Other} ->
+ {IPAddress, Port, _Family} = Address,
+ {error, {could_not_start_listener, rabbit_misc:ntoa(IPAddress), Port, Other}}
+ end.
+
+transport(Protocol) ->
+ case Protocol of
+ amqp -> ranch_tcp;
+ 'amqp/ssl' -> ranch_ssl
+ end.
+
+-spec stop_tcp_listener(listener_config()) -> 'ok'.
+
+stop_tcp_listener(Listener) ->
+ [stop_tcp_listener0(Address) ||
+ Address <- tcp_listener_addresses(Listener)],
+ ok.
+
+stop_tcp_listener0({IPAddress, Port, _Family}) ->
+ Name = rabbit_misc:tcp_name(rabbit_tcp_listener_sup, IPAddress, Port),
+ ok = supervisor:terminate_child(rabbit_sup, Name),
+ ok = supervisor:delete_child(rabbit_sup, Name).
+
+-spec tcp_listener_started
+ (_, _,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()}, _) ->
+ 'ok'.
+
+tcp_listener_started(Protocol, Opts, IPAddress, Port) ->
+ %% We need the ip to distinguish e.g. 0.0.0.0 and 127.0.0.1
+ %% We need the host so we can distinguish multiple instances of the above
+ %% in a cluster.
+ ok = mnesia:dirty_write(
+ rabbit_listener,
+ #listener{node = node(),
+ protocol = Protocol,
+ host = tcp_host(IPAddress),
+ ip_address = IPAddress,
+ port = Port,
+ opts = Opts}).
+
+-spec tcp_listener_stopped
+ (_, _,
+ string() |
+ {byte(),byte(),byte(),byte()} |
+ {char(),char(),char(),char(),char(),char(),char(),char()},
+ _) ->
+ 'ok'.
+
+tcp_listener_stopped(Protocol, Opts, IPAddress, Port) ->
+ ok = mnesia:dirty_delete_object(
+ rabbit_listener,
+ #listener{node = node(),
+ protocol = Protocol,
+ host = tcp_host(IPAddress),
+ ip_address = IPAddress,
+ port = Port,
+ opts = Opts}).
+
+-spec record_distribution_listener() -> ok | no_return().
+
+record_distribution_listener() ->
+ {Name, Host} = rabbit_nodes:parts(node()),
+ case erl_epmd:port_please(list_to_atom(Name), Host, infinity) of
+ {port, Port, _Version} ->
+ tcp_listener_started(clustering, [], {0,0,0,0,0,0,0,0}, Port);
+ noport ->
+ throw({error, no_epmd_port})
+ end.
+
+-spec active_listeners() -> [rabbit_types:listener()].
+
+active_listeners() ->
+ rabbit_misc:dirty_read_all(rabbit_listener).
+
+-spec node_listeners(node()) -> [rabbit_types:listener()].
+
+node_listeners(Node) ->
+ mnesia:dirty_read(rabbit_listener, Node).
+
+-spec node_client_listeners(node()) -> [rabbit_types:listener()].
+
+node_client_listeners(Node) ->
+ case node_listeners(Node) of
+ [] -> [];
+ Xs ->
+ lists:filter(fun (#listener{protocol = clustering}) -> false;
+ (_) -> true
+ end, Xs)
+ end.
+
+-spec on_node_down(node()) -> 'ok'.
+
+on_node_down(Node) ->
+ case lists:member(Node, nodes()) of
+ false ->
+ rabbit_log:info(
+ "Node ~s is down, deleting its listeners~n", [Node]),
+ ok = mnesia:dirty_delete(rabbit_listener, Node);
+ true ->
+ rabbit_log:info(
+ "Keeping ~s listeners: the node is already back~n", [Node])
+ end.
+
+-spec register_connection(pid()) -> ok.
+
+register_connection(Pid) -> pg_local:join(rabbit_connections, Pid).
+
+-spec unregister_connection(pid()) -> ok.
+
+unregister_connection(Pid) -> pg_local:leave(rabbit_connections, Pid).
+
+-spec connections() -> [rabbit_types:connection()].
+
+connections() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_networking, connections_local, [], ?RPC_TIMEOUT).
+
+-spec local_connections() -> [rabbit_types:connection()].
+%% @doc Returns pids of AMQP 0-9-1 and AMQP 1.0 connections local to this node.
+local_connections() ->
+ connections_local().
+
+-spec connections_local() -> [rabbit_types:connection()].
+%% @deprecated Prefer {@link local_connections}
+connections_local() -> pg_local:get_members(rabbit_connections).
+
+-spec register_non_amqp_connection(pid()) -> ok.
+
+register_non_amqp_connection(Pid) -> pg_local:join(rabbit_non_amqp_connections, Pid).
+
+-spec unregister_non_amqp_connection(pid()) -> ok.
+
+unregister_non_amqp_connection(Pid) -> pg_local:leave(rabbit_non_amqp_connections, Pid).
+
+-spec non_amqp_connections() -> [rabbit_types:connection()].
+
+non_amqp_connections() ->
+ Nodes = rabbit_nodes:all_running(),
+ rabbit_misc:append_rpc_all_nodes(Nodes, rabbit_networking, local_non_amqp_connections, [], ?RPC_TIMEOUT).
+
+-spec local_non_amqp_connections() -> [rabbit_types:connection()].
+local_non_amqp_connections() ->
+ pg_local:get_members(rabbit_non_amqp_connections).
+
+-spec connection_info_keys() -> rabbit_types:info_keys().
+
+connection_info_keys() -> rabbit_reader:info_keys().
+
+-spec connection_info(rabbit_types:connection()) -> rabbit_types:infos().
+
+connection_info(Pid) -> rabbit_reader:info(Pid).
+
+-spec connection_info(rabbit_types:connection(), rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items).
+
+-spec connection_info_all() -> [rabbit_types:infos()].
+
+connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
+
+-spec connection_info_all(rabbit_types:info_keys()) ->
+ [rabbit_types:infos()].
+
+connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
+
+emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [ spawn_link(Node, rabbit_networking, emit_connection_info_local, [Items, Ref, AggregatorPid]) || Node <- Nodes ],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_connection_info_local(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Q) -> connection_info(Q, Items) end,
+ connections_local()).
+
+-spec close_connection(pid(), string()) -> 'ok'.
+
+close_connection(Pid, Explanation) ->
+ case lists:member(Pid, connections()) of
+ true ->
+ Res = rabbit_reader:shutdown(Pid, Explanation),
+ rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
+ Res;
+ false ->
+ rabbit_log:warning("Asked to close connection ~p (reason: ~p) "
+ "but no running cluster node reported it as an active connection. Was it already closed? ~n",
+ [Pid, Explanation]),
+ ok
+ end.
+
+-spec close_connections([pid()], string()) -> 'ok'.
+close_connections(Pids, Explanation) ->
+ [close_connection(Pid, Explanation) || Pid <- Pids],
+ ok.
+
+%% Meant to be used by tests only
+-spec close_all_connections(string()) -> 'ok'.
+close_all_connections(Explanation) ->
+ Pids = connections(),
+ [close_connection(Pid, Explanation) || Pid <- Pids],
+ ok.
+
+-spec force_connection_event_refresh(reference()) -> 'ok'.
+force_connection_event_refresh(Ref) ->
+ [rabbit_reader:force_event_refresh(C, Ref) || C <- connections()],
+ ok.
+
+-spec force_non_amqp_connection_event_refresh(reference()) -> 'ok'.
+force_non_amqp_connection_event_refresh(Ref) ->
+ [gen_server:cast(Pid, {force_event_refresh, Ref}) || Pid <- non_amqp_connections()],
+ ok.
+
+-spec failed_to_recv_proxy_header(_, _) -> no_return().
+failed_to_recv_proxy_header(Ref, Error) ->
+ Msg = case Error of
+ closed -> "error when receiving proxy header: TCP socket was ~p prematurely";
+ _Other -> "error when receiving proxy header: ~p"
+ end,
+ rabbit_log:debug(Msg, [Error]),
+ % The following call will clean up resources then exit
+ _ = ranch:handshake(Ref),
+ exit({shutdown, failed_to_recv_proxy_header}).
+
+handshake(Ref, ProxyProtocolEnabled) ->
+ case ProxyProtocolEnabled of
+ true ->
+ case ranch:recv_proxy_header(Ref, 3000) of
+ {error, Error} ->
+ failed_to_recv_proxy_header(Ref, Error);
+ {error, protocol_error, Error} ->
+ failed_to_recv_proxy_header(Ref, Error);
+ {ok, ProxyInfo} ->
+ {ok, Sock} = ranch:handshake(Ref),
+ setup_socket(Sock),
+ {ok, {rabbit_proxy_socket, Sock, ProxyInfo}}
+ end;
+ false ->
+ {ok, Sock} = ranch:handshake(Ref),
+ setup_socket(Sock),
+ {ok, Sock}
+ end.
+
+setup_socket(Sock) ->
+ ok = tune_buffer_size(Sock),
+ ok = file_handle_cache:obtain().
+
+tune_buffer_size(Sock) ->
+ case tune_buffer_size1(Sock) of
+ ok -> ok;
+ {error, _} -> rabbit_net:fast_close(Sock),
+ exit(normal)
+ end.
+
+tune_buffer_size1(Sock) ->
+ case rabbit_net:getopts(Sock, [sndbuf, recbuf, buffer]) of
+ {ok, BufSizes} -> BufSz = lists:max([Sz || {_Opt, Sz} <- BufSizes]),
+ rabbit_net:setopts(Sock, [{buffer, BufSz}]);
+ Error -> Error
+ end.
+
+%%--------------------------------------------------------------------
+
+tcp_host(IPAddress) ->
+ rabbit_net:tcp_host(IPAddress).
+
+cmap(F) -> rabbit_misc:filter_exit_map(F, connections()).
+
+tcp_opts() ->
+ {ok, ConfigOpts} = application:get_env(rabbit, tcp_listen_options),
+ ConfigOpts.
+
+%% inet_parse:address takes care of ip string, like "0.0.0.0"
+%% inet:getaddr returns immediately for ip tuple {0,0,0,0},
+%% and runs 'inet_gethost' port process for dns lookups.
+%% On Windows inet:getaddr runs dns resolver for ip string, which may fail.
+getaddr(Host, Family) ->
+ case inet_parse:address(Host) of
+ {ok, IPAddress} -> [{IPAddress, resolve_family(IPAddress, Family)}];
+ {error, _} -> gethostaddr(Host, Family)
+ end.
+
+gethostaddr(Host, auto) ->
+ Lookups = [{Family, inet:getaddr(Host, Family)} || Family <- [inet, inet6]],
+ case [{IP, Family} || {Family, {ok, IP}} <- Lookups] of
+ [] -> host_lookup_error(Host, Lookups);
+ IPs -> IPs
+ end;
+
+gethostaddr(Host, Family) ->
+ case inet:getaddr(Host, Family) of
+ {ok, IPAddress} -> [{IPAddress, Family}];
+ {error, Reason} -> host_lookup_error(Host, Reason)
+ end.
+
+-spec host_lookup_error(_, _) -> no_return().
+host_lookup_error(Host, Reason) ->
+ rabbit_log:error("invalid host ~p - ~p~n", [Host, Reason]),
+ throw({error, {invalid_host, Host, Reason}}).
+
+resolve_family({_,_,_,_}, auto) -> inet;
+resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6;
+resolve_family(IP, auto) -> throw({error, {strange_family, IP}});
+resolve_family(_, F) -> F.
+
+%%--------------------------------------------------------------------
+
+%% There are three kinds of machine (for our purposes).
+%%
+%% * Those which treat IPv4 addresses as a special kind of IPv6 address
+%% ("Single stack")
+%% - Linux by default, Windows Vista and later
+%% - We also treat any (hypothetical?) IPv6-only machine the same way
+%% * Those which consider IPv6 and IPv4 to be completely separate things
+%% ("Dual stack")
+%% - OpenBSD, Windows XP / 2003, Linux if so configured
+%% * Those which do not support IPv6.
+%% - Ancient/weird OSes, Linux if so configured
+%%
+%% How to reconfigure Linux to test this:
+%% Single stack (default):
+%% echo 0 > /proc/sys/net/ipv6/bindv6only
+%% Dual stack:
+%% echo 1 > /proc/sys/net/ipv6/bindv6only
+%% IPv4 only:
+%% add ipv6.disable=1 to GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub then
+%% sudo update-grub && sudo reboot
+%%
+%% This matters in (and only in) the case where the sysadmin (or the
+%% app descriptor) has only supplied a port and we wish to bind to
+%% "all addresses". This means different things depending on whether
+%% we're single or dual stack. On single stack binding to "::"
+%% implicitly includes all IPv4 addresses, and subsequently attempting
+%% to bind to "0.0.0.0" will fail. On dual stack, binding to "::" will
+%% only bind to IPv6 addresses, and we need another listener bound to
+%% "0.0.0.0" for IPv4. Finally, on IPv4-only systems we of course only
+%% want to bind to "0.0.0.0".
+%%
+%% Unfortunately it seems there is no way to detect single vs dual stack
+%% apart from attempting to bind to the port.
+port_to_listeners(Port) ->
+ IPv4 = {"0.0.0.0", Port, inet},
+ IPv6 = {"::", Port, inet6},
+ case ipv6_status(?FIRST_TEST_BIND_PORT) of
+ single_stack -> [IPv6];
+ ipv6_only -> [IPv6];
+ dual_stack -> [IPv6, IPv4];
+ ipv4_only -> [IPv4]
+ end.
+
+ipv6_status(TestPort) ->
+ IPv4 = [inet, {ip, {0,0,0,0}}],
+ IPv6 = [inet6, {ip, {0,0,0,0,0,0,0,0}}],
+ case gen_tcp:listen(TestPort, IPv6) of
+ {ok, LSock6} ->
+ case gen_tcp:listen(TestPort, IPv4) of
+ {ok, LSock4} ->
+ %% Dual stack
+ gen_tcp:close(LSock6),
+ gen_tcp:close(LSock4),
+ dual_stack;
+ %% Checking the error here would only let us
+ %% distinguish single stack IPv6 / IPv4 vs IPv6 only,
+ %% which we figure out below anyway.
+ {error, _} ->
+ gen_tcp:close(LSock6),
+ case gen_tcp:listen(TestPort, IPv4) of
+ %% Single stack
+ {ok, LSock4} -> gen_tcp:close(LSock4),
+ single_stack;
+ %% IPv6-only machine. Welcome to the future.
+ {error, eafnosupport} -> ipv6_only; %% Linux
+ {error, eprotonosupport}-> ipv6_only; %% FreeBSD
+ %% Dual stack machine with something already
+ %% on IPv4.
+ {error, _} -> ipv6_status(TestPort + 1)
+ end
+ end;
+ %% IPv4-only machine. Welcome to the 90s.
+ {error, eafnosupport} -> %% Linux
+ ipv4_only;
+ {error, eprotonosupport} -> %% FreeBSD
+ ipv4_only;
+ %% Port in use
+ {error, _} ->
+ ipv6_status(TestPort + 1)
+ end.
diff --git a/deps/rabbit/src/rabbit_node_monitor.erl b/deps/rabbit/src/rabbit_node_monitor.erl
new file mode 100644
index 0000000000..b56180c54c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_node_monitor.erl
@@ -0,0 +1,926 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_node_monitor).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([running_nodes_filename/0,
+ cluster_status_filename/0, quorum_filename/0, default_quorum_filename/0,
+ prepare_cluster_status_files/0,
+ write_cluster_status/1, read_cluster_status/0,
+ update_cluster_status/0, reset_cluster_status/0]).
+-export([notify_node_up/0, notify_joined_cluster/0, notify_left_cluster/1]).
+-export([partitions/0, partitions/1, status/1, subscribe/1]).
+-export([pause_partition_guard/0]).
+-export([global_sync/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+ %% Utils
+-export([all_rabbit_nodes_up/0, run_outside_applications/2, ping_all/0,
+ alive_nodes/1, alive_rabbit_nodes/1]).
+
+-define(SERVER, ?MODULE).
+-define(NODE_REPLY_TIMEOUT, 5000).
+-define(RABBIT_UP_RPC_TIMEOUT, 2000).
+-define(RABBIT_DOWN_PING_INTERVAL, 1000).
+
+-record(state, {monitors, partitions, subscribers, down_ping_timer,
+ keepalive_timer, autoheal, guid, node_guids}).
+
+%%----------------------------------------------------------------------------
+%% Start
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%%----------------------------------------------------------------------------
+%% Cluster file operations
+%%----------------------------------------------------------------------------
+
+%% The cluster file information is kept in two files. The "cluster
+%% status file" contains all the clustered nodes and the disc nodes.
+%% The "running nodes file" contains the currently running nodes or
+%% the running nodes at shutdown when the node is down.
+%%
+%% We strive to keep the files up to date and we rely on this
+%% assumption in various situations. Obviously when mnesia is offline
+%% the information we have will be outdated, but it cannot be
+%% otherwise.
+
+-spec running_nodes_filename() -> string().
+
+running_nodes_filename() ->
+ filename:join(rabbit_mnesia:dir(), "nodes_running_at_shutdown").
+
+-spec cluster_status_filename() -> string().
+
+cluster_status_filename() ->
+ filename:join(rabbit_mnesia:dir(), "cluster_nodes.config").
+
+quorum_filename() ->
+ ra_env:data_dir().
+
+default_quorum_filename() ->
+ filename:join(rabbit_mnesia:dir(), "quorum").
+
+-spec prepare_cluster_status_files() -> 'ok' | no_return().
+
+prepare_cluster_status_files() ->
+ rabbit_mnesia:ensure_mnesia_dir(),
+ RunningNodes1 = case try_read_file(running_nodes_filename()) of
+ {ok, [Nodes]} when is_list(Nodes) -> Nodes;
+ {ok, Other} -> corrupt_cluster_status_files(Other);
+ {error, enoent} -> []
+ end,
+ ThisNode = [node()],
+ %% The running nodes file might contain a set or a list, in case
+ %% of the legacy file
+ RunningNodes2 = lists:usort(ThisNode ++ RunningNodes1),
+ {AllNodes1, DiscNodes} =
+ case try_read_file(cluster_status_filename()) of
+ {ok, [{AllNodes, DiscNodes0}]} ->
+ {AllNodes, DiscNodes0};
+ {ok, [AllNodes0]} when is_list(AllNodes0) ->
+ {legacy_cluster_nodes(AllNodes0), legacy_disc_nodes(AllNodes0)};
+ {ok, Files} ->
+ corrupt_cluster_status_files(Files);
+ {error, enoent} ->
+ LegacyNodes = legacy_cluster_nodes([]),
+ {LegacyNodes, LegacyNodes}
+ end,
+ AllNodes2 = lists:usort(AllNodes1 ++ RunningNodes2),
+ ok = write_cluster_status({AllNodes2, DiscNodes, RunningNodes2}).
+
+-spec corrupt_cluster_status_files(any()) -> no_return().
+
+corrupt_cluster_status_files(F) ->
+ throw({error, corrupt_cluster_status_files, F}).
+
+-spec write_cluster_status(rabbit_mnesia:cluster_status()) -> 'ok'.
+
+write_cluster_status({All, Disc, Running}) ->
+ ClusterStatusFN = cluster_status_filename(),
+ Res = case rabbit_file:write_term_file(ClusterStatusFN, [{All, Disc}]) of
+ ok ->
+ RunningNodesFN = running_nodes_filename(),
+ {RunningNodesFN,
+ rabbit_file:write_term_file(RunningNodesFN, [Running])};
+ E1 = {error, _} ->
+ {ClusterStatusFN, E1}
+ end,
+ case Res of
+ {_, ok} -> ok;
+ {FN, {error, E2}} -> throw({error, {could_not_write_file, FN, E2}})
+ end.
+
+-spec read_cluster_status() -> rabbit_mnesia:cluster_status().
+
+read_cluster_status() ->
+ case {try_read_file(cluster_status_filename()),
+ try_read_file(running_nodes_filename())} of
+ {{ok, [{All, Disc}]}, {ok, [Running]}} when is_list(Running) ->
+ {All, Disc, Running};
+ {Stat, Run} ->
+ throw({error, {corrupt_or_missing_cluster_files, Stat, Run}})
+ end.
+
+-spec update_cluster_status() -> 'ok'.
+
+update_cluster_status() ->
+ {ok, Status} = rabbit_mnesia:cluster_status_from_mnesia(),
+ write_cluster_status(Status).
+
+-spec reset_cluster_status() -> 'ok'.
+
+reset_cluster_status() ->
+ write_cluster_status({[node()], [node()], [node()]}).
+
+%%----------------------------------------------------------------------------
+%% Cluster notifications
+%%----------------------------------------------------------------------------
+
+-spec notify_node_up() -> 'ok'.
+
+notify_node_up() ->
+ gen_server:cast(?SERVER, notify_node_up).
+
+-spec notify_joined_cluster() -> 'ok'.
+
+notify_joined_cluster() ->
+ Nodes = rabbit_nodes:all_running() -- [node()],
+ gen_server:abcast(Nodes, ?SERVER,
+ {joined_cluster, node(), rabbit_mnesia:node_type()}),
+ ok.
+
+-spec notify_left_cluster(node()) -> 'ok'.
+
+notify_left_cluster(Node) ->
+ Nodes = rabbit_nodes:all_running(),
+ gen_server:abcast(Nodes, ?SERVER, {left_cluster, Node}),
+ ok.
+
+%%----------------------------------------------------------------------------
+%% Server calls
+%%----------------------------------------------------------------------------
+
+-spec partitions() -> [node()].
+
+partitions() ->
+ gen_server:call(?SERVER, partitions, infinity).
+
+-spec partitions([node()]) -> [{node(), [node()]}].
+
+partitions(Nodes) ->
+ {Replies, _} = gen_server:multi_call(Nodes, ?SERVER, partitions, ?NODE_REPLY_TIMEOUT),
+ Replies.
+
+-spec status([node()]) -> {[{node(), [node()]}], [node()]}.
+
+status(Nodes) ->
+ gen_server:multi_call(Nodes, ?SERVER, status, infinity).
+
+-spec subscribe(pid()) -> 'ok'.
+
+subscribe(Pid) ->
+ gen_server:cast(?SERVER, {subscribe, Pid}).
+
+%%----------------------------------------------------------------------------
+%% pause_minority/pause_if_all_down safety
+%%----------------------------------------------------------------------------
+
+%% If we are in a minority and pause_minority mode then a) we are
+%% going to shut down imminently and b) we should not confirm anything
+%% until then, since anything we confirm is likely to be lost.
+%%
+%% The same principles apply to a node which isn't part of the preferred
+%% partition when we are in pause_if_all_down mode.
+%%
+%% We could confirm something by having an HA queue see the pausing
+%% state (and fail over into it) before the node monitor stops us, or
+%% by using unmirrored queues and just having them vanish (and
+%% confirming messages as thrown away).
+%%
+%% So we have channels call in here before issuing confirms, to do a
+%% lightweight check that we have not entered a pausing state.
+
+-spec pause_partition_guard() -> 'ok' | 'pausing'.
+
+pause_partition_guard() ->
+ case get(pause_partition_guard) of
+ not_pause_mode ->
+ ok;
+ undefined ->
+ {ok, M} = application:get_env(rabbit, cluster_partition_handling),
+ case M of
+ pause_minority ->
+ pause_minority_guard([], ok);
+ {pause_if_all_down, PreferredNodes, _} ->
+ pause_if_all_down_guard(PreferredNodes, [], ok);
+ _ ->
+ put(pause_partition_guard, not_pause_mode),
+ ok
+ end;
+ {minority_mode, Nodes, LastState} ->
+ pause_minority_guard(Nodes, LastState);
+ {pause_if_all_down_mode, PreferredNodes, Nodes, LastState} ->
+ pause_if_all_down_guard(PreferredNodes, Nodes, LastState)
+ end.
+
+pause_minority_guard(LastNodes, LastState) ->
+ case nodes() of
+ LastNodes -> LastState;
+ _ -> NewState = case majority() of
+ false -> pausing;
+ true -> ok
+ end,
+ put(pause_partition_guard,
+ {minority_mode, nodes(), NewState}),
+ NewState
+ end.
+
+pause_if_all_down_guard(PreferredNodes, LastNodes, LastState) ->
+ case nodes() of
+ LastNodes -> LastState;
+ _ -> NewState = case in_preferred_partition(PreferredNodes) of
+ false -> pausing;
+ true -> ok
+ end,
+ put(pause_partition_guard,
+ {pause_if_all_down_mode, PreferredNodes, nodes(),
+ NewState}),
+ NewState
+ end.
+
+%%----------------------------------------------------------------------------
+%% "global" hang workaround.
+%%----------------------------------------------------------------------------
+
+%% This code works around a possible inconsistency in the "global"
+%% state, causing global:sync/0 to never return.
+%%
+%% 1. A process is spawned.
+%% 2. If after 15", global:sync() didn't return, the "global"
+%% state is parsed.
+%% 3. If it detects that a sync is blocked for more than 10",
+%% the process sends fake nodedown/nodeup events to the two
+%% nodes involved (one local, one remote).
+%% 4. Both "global" instances restart their synchronisation.
+%% 5. globao:sync() finally returns.
+%%
+%% FIXME: Remove this workaround, once we got rid of the change to
+%% "dist_auto_connect" and fixed the bugs uncovered.
+
+global_sync() ->
+ Pid = spawn(fun workaround_global_hang/0),
+ ok = global:sync(),
+ Pid ! global_sync_done,
+ ok.
+
+workaround_global_hang() ->
+ receive
+ global_sync_done ->
+ ok
+ after 10000 ->
+ find_blocked_global_peers()
+ end.
+
+find_blocked_global_peers() ->
+ Snapshot1 = snapshot_global_dict(),
+ timer:sleep(10000),
+ Snapshot2 = snapshot_global_dict(),
+ find_blocked_global_peers1(Snapshot2, Snapshot1).
+
+snapshot_global_dict() ->
+ {status, _, _, [Dict | _]} = sys:get_status(global_name_server),
+ [E || {{sync_tag_his, _}, _} = E <- Dict].
+
+find_blocked_global_peers1([{{sync_tag_his, Peer}, _} = Item | Rest],
+ OlderSnapshot) ->
+ case lists:member(Item, OlderSnapshot) of
+ true -> unblock_global_peer(Peer);
+ false -> ok
+ end,
+ find_blocked_global_peers1(Rest, OlderSnapshot);
+find_blocked_global_peers1([], _) ->
+ ok.
+
+unblock_global_peer(PeerNode) ->
+ ThisNode = node(),
+ PeerState = rpc:call(PeerNode, sys, get_status, [global_name_server]),
+ error_logger:info_msg(
+ "Global hang workaround: global state on ~s seems broken~n"
+ " * Peer global state: ~p~n"
+ " * Local global state: ~p~n"
+ "Faking nodedown/nodeup between ~s and ~s~n",
+ [PeerNode, PeerState, sys:get_status(global_name_server),
+ PeerNode, ThisNode]),
+ {global_name_server, ThisNode} ! {nodedown, PeerNode},
+ {global_name_server, PeerNode} ! {nodedown, ThisNode},
+ {global_name_server, ThisNode} ! {nodeup, PeerNode},
+ {global_name_server, PeerNode} ! {nodeup, ThisNode},
+ ok.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ %% We trap exits so that the supervisor will not just kill us. We
+ %% want to be sure that we are not going to be killed while
+ %% writing out the cluster status files - bad things can then
+ %% happen.
+ process_flag(trap_exit, true),
+ net_kernel:monitor_nodes(true, [nodedown_reason]),
+ {ok, _} = mnesia:subscribe(system),
+ %% If the node has been restarted, Mnesia can trigger a system notification
+ %% before the monitor subscribes to receive them. To avoid autoheal blocking due to
+ %% the inconsistent database event never arriving, we being monitoring all running
+ %% nodes as early as possible. The rest of the monitoring ops will only be triggered
+ %% when notifications arrive.
+ Nodes = possibly_partitioned_nodes(),
+ startup_log(Nodes),
+ Monitors = lists:foldl(fun(Node, Monitors0) ->
+ pmon:monitor({rabbit, Node}, Monitors0)
+ end, pmon:new(), Nodes),
+ {ok, ensure_keepalive_timer(#state{monitors = Monitors,
+ subscribers = pmon:new(),
+ partitions = [],
+ guid = rabbit_guid:gen(),
+ node_guids = maps:new(),
+ autoheal = rabbit_autoheal:init()})}.
+
+handle_call(partitions, _From, State = #state{partitions = Partitions}) ->
+ {reply, Partitions, State};
+
+handle_call(status, _From, State = #state{partitions = Partitions}) ->
+ {reply, [{partitions, Partitions},
+ {nodes, [node() | nodes()]}], State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(notify_node_up, State = #state{guid = GUID}) ->
+ Nodes = rabbit_nodes:all_running() -- [node()],
+ gen_server:abcast(Nodes, ?SERVER,
+ {node_up, node(), rabbit_mnesia:node_type(), GUID}),
+ %% register other active rabbits with this rabbit
+ DiskNodes = rabbit_mnesia:cluster_nodes(disc),
+ [gen_server:cast(?SERVER, {node_up, N, case lists:member(N, DiskNodes) of
+ true -> disc;
+ false -> ram
+ end}) || N <- Nodes],
+ {noreply, State};
+
+%%----------------------------------------------------------------------------
+%% Partial partition detection
+%%
+%% Every node generates a GUID each time it starts, and announces that
+%% GUID in 'node_up', with 'announce_guid' sent by return so the new
+%% node knows the GUIDs of the others. These GUIDs are sent in all the
+%% partial partition related messages to ensure that we ignore partial
+%% partition messages from before we restarted (to avoid getting stuck
+%% in a loop).
+%%
+%% When one node gets nodedown from another, it then sends
+%% 'check_partial_partition' to all the nodes it still thinks are
+%% alive. If any of those (intermediate) nodes still see the "down"
+%% node as up, they inform it that this has happened. The original
+%% node (in 'ignore', 'pause_if_all_down' or 'autoheal' mode) will then
+%% disconnect from the intermediate node to "upgrade" to a full
+%% partition.
+%%
+%% In pause_minority mode it will instead immediately pause until all
+%% nodes come back. This is because the contract for pause_minority is
+%% that nodes should never sit in a partitioned state - if it just
+%% disconnected, it would become a minority, pause, realise it's not
+%% in a minority any more, and come back, still partitioned (albeit no
+%% longer partially).
+%% ----------------------------------------------------------------------------
+
+handle_cast({node_up, Node, NodeType, GUID},
+ State = #state{guid = MyGUID,
+ node_guids = GUIDs}) ->
+ cast(Node, {announce_guid, node(), MyGUID}),
+ GUIDs1 = maps:put(Node, GUID, GUIDs),
+ handle_cast({node_up, Node, NodeType}, State#state{node_guids = GUIDs1});
+
+handle_cast({announce_guid, Node, GUID}, State = #state{node_guids = GUIDs}) ->
+ {noreply, State#state{node_guids = maps:put(Node, GUID, GUIDs)}};
+
+handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
+ State = #state{guid = MyGUID,
+ node_guids = GUIDs}) ->
+ case lists:member(Node, rabbit_nodes:all_running()) andalso
+ maps:find(Node, GUIDs) =:= {ok, NodeGUID} of
+ true -> spawn_link( %%[1]
+ fun () ->
+ case rpc:call(Node, rabbit, is_running, []) of
+ {badrpc, _} -> ok;
+ _ ->
+ rabbit_log:warning("Received a 'DOWN' message"
+ " from ~p but still can"
+ " communicate with it ~n",
+ [Node]),
+ cast(Rep, {partial_partition,
+ Node, node(), RepGUID})
+ end
+ end);
+ false -> ok
+ end,
+ {noreply, State};
+%% [1] We checked that we haven't heard the node go down - but we
+%% really should make sure we can actually communicate with
+%% it. Otherwise there's a race where we falsely detect a partial
+%% partition.
+%%
+%% Now of course the rpc:call/4 may take a long time to return if
+%% connectivity with the node is actually interrupted - but that's OK,
+%% we only really want to do something in a timely manner if
+%% connectivity is OK. However, of course as always we must not block
+%% the node monitor, so we do the check in a separate process.
+
+handle_cast({check_partial_partition, _Node, _Reporter,
+ _NodeGUID, _GUID, _ReporterGUID}, State) ->
+ {noreply, State};
+
+handle_cast({partial_partition, NotReallyDown, Proxy, MyGUID},
+ State = #state{guid = MyGUID}) ->
+ FmtBase = "Partial partition detected:~n"
+ " * We saw DOWN from ~s~n"
+ " * We can still see ~s which can see ~s~n",
+ ArgsBase = [NotReallyDown, Proxy, NotReallyDown],
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, pause_minority} ->
+ rabbit_log:error(
+ FmtBase ++ " * pause_minority mode enabled~n"
+ "We will therefore pause until the *entire* cluster recovers~n",
+ ArgsBase),
+ await_cluster_recovery(fun all_nodes_up/0),
+ {noreply, State};
+ {ok, {pause_if_all_down, PreferredNodes, _}} ->
+ case in_preferred_partition(PreferredNodes) of
+ true -> rabbit_log:error(
+ FmtBase ++ "We will therefore intentionally "
+ "disconnect from ~s~n", ArgsBase ++ [Proxy]),
+ upgrade_to_full_partition(Proxy);
+ false -> rabbit_log:info(
+ FmtBase ++ "We are about to pause, no need "
+ "for further actions~n", ArgsBase)
+ end,
+ {noreply, State};
+ {ok, _} ->
+ rabbit_log:error(
+ FmtBase ++ "We will therefore intentionally disconnect from ~s~n",
+ ArgsBase ++ [Proxy]),
+ upgrade_to_full_partition(Proxy),
+ {noreply, State}
+ end;
+
+handle_cast({partial_partition, _GUID, _Reporter, _Proxy}, State) ->
+ {noreply, State};
+
+%% Sometimes it appears the Erlang VM does not give us nodedown
+%% messages reliably when another node disconnects from us. Therefore
+%% we are told just before the disconnection so we can reciprocate.
+handle_cast({partial_partition_disconnect, Other}, State) ->
+ rabbit_log:error("Partial partition disconnect from ~s~n", [Other]),
+ disconnect(Other),
+ {noreply, State};
+
+%% Note: when updating the status file, we can't simply write the
+%% mnesia information since the message can (and will) overtake the
+%% mnesia propagation.
+handle_cast({node_up, Node, NodeType},
+ State = #state{monitors = Monitors}) ->
+ rabbit_log:info("rabbit on node ~p up~n", [Node]),
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({add_node(Node, AllNodes),
+ case NodeType of
+ disc -> add_node(Node, DiscNodes);
+ ram -> DiscNodes
+ end,
+ add_node(Node, RunningNodes)}),
+ ok = handle_live_rabbit(Node),
+ Monitors1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
+ true ->
+ Monitors;
+ false ->
+ pmon:monitor({rabbit, Node}, Monitors)
+ end,
+ {noreply, maybe_autoheal(State#state{monitors = Monitors1})};
+
+handle_cast({joined_cluster, Node, NodeType}, State) ->
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({add_node(Node, AllNodes),
+ case NodeType of
+ disc -> add_node(Node, DiscNodes);
+ ram -> DiscNodes
+ end,
+ RunningNodes}),
+ {noreply, State};
+
+handle_cast({left_cluster, Node}, State) ->
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({del_node(Node, AllNodes), del_node(Node, DiscNodes),
+ del_node(Node, RunningNodes)}),
+ {noreply, State};
+
+handle_cast({subscribe, Pid}, State = #state{subscribers = Subscribers}) ->
+ {noreply, State#state{subscribers = pmon:monitor(Pid, Subscribers)}};
+
+handle_cast(keepalive, State) ->
+ {noreply, State};
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
+ State = #state{monitors = Monitors, subscribers = Subscribers}) ->
+ rabbit_log:info("rabbit on node ~p down~n", [Node]),
+ {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
+ write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}),
+ [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)],
+ {noreply, handle_dead_rabbit(
+ Node,
+ State#state{monitors = pmon:erase({rabbit, Node}, Monitors)})};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #state{subscribers = Subscribers}) ->
+ {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
+
+handle_info({nodedown, Node, Info}, State = #state{guid = MyGUID,
+ node_guids = GUIDs}) ->
+ rabbit_log:info("node ~p down: ~p~n",
+ [Node, proplists:get_value(nodedown_reason, Info)]),
+ Check = fun (N, CheckGUID, DownGUID) ->
+ cast(N, {check_partial_partition,
+ Node, node(), DownGUID, CheckGUID, MyGUID})
+ end,
+ case maps:find(Node, GUIDs) of
+ {ok, DownGUID} -> Alive = rabbit_nodes:all_running()
+ -- [node(), Node],
+ [case maps:find(N, GUIDs) of
+ {ok, CheckGUID} -> Check(N, CheckGUID, DownGUID);
+ error -> ok
+ end || N <- Alive];
+ error -> ok
+ end,
+ {noreply, handle_dead_node(Node, State)};
+
+handle_info({nodeup, Node, _Info}, State) ->
+ rabbit_log:info("node ~p up~n", [Node]),
+ {noreply, State};
+
+handle_info({mnesia_system_event,
+ {inconsistent_database, running_partitioned_network, Node}},
+ State = #state{partitions = Partitions,
+ monitors = Monitors}) ->
+ %% We will not get a node_up from this node - yet we should treat it as
+ %% up (mostly).
+ State1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
+ true -> State;
+ false -> State#state{
+ monitors = pmon:monitor({rabbit, Node}, Monitors)}
+ end,
+ ok = handle_live_rabbit(Node),
+ Partitions1 = lists:usort([Node | Partitions]),
+ {noreply, maybe_autoheal(State1#state{partitions = Partitions1})};
+
+handle_info({autoheal_msg, Msg}, State = #state{autoheal = AState,
+ partitions = Partitions}) ->
+ AState1 = rabbit_autoheal:handle_msg(Msg, AState, Partitions),
+ {noreply, State#state{autoheal = AState1}};
+
+handle_info(ping_down_nodes, State) ->
+ %% We ping nodes when some are down to ensure that we find out
+ %% about healed partitions quickly. We ping all nodes rather than
+ %% just the ones we know are down for simplicity; it's not expensive
+ %% to ping the nodes that are up, after all.
+ State1 = State#state{down_ping_timer = undefined},
+ Self = self(),
+ %% We ping in a separate process since in a partition it might
+ %% take some noticeable length of time and we don't want to block
+ %% the node monitor for that long.
+ spawn_link(fun () ->
+ ping_all(),
+ case all_nodes_up() of
+ true -> ok;
+ false -> Self ! ping_down_nodes_again
+ end
+ end),
+ {noreply, State1};
+
+handle_info(ping_down_nodes_again, State) ->
+ {noreply, ensure_ping_timer(State)};
+
+handle_info(ping_up_nodes, State) ->
+ %% In this case we need to ensure that we ping "quickly" -
+ %% i.e. only nodes that we know to be up.
+ [cast(N, keepalive) || N <- alive_nodes() -- [node()]],
+ {noreply, ensure_keepalive_timer(State#state{keepalive_timer = undefined})};
+
+handle_info({'EXIT', _, _} = Info, State = #state{autoheal = AState0}) ->
+ AState = rabbit_autoheal:process_down(Info, AState0),
+ {noreply, State#state{autoheal = AState}};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, State) ->
+ rabbit_misc:stop_timer(State, #state.down_ping_timer),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Functions that call the module specific hooks when nodes go up/down
+%%----------------------------------------------------------------------------
+
+handle_dead_node(Node, State = #state{autoheal = Autoheal}) ->
+ %% In general in rabbit_node_monitor we care about whether the
+ %% rabbit application is up rather than the node; we do this so
+ %% that we can respond in the same way to "rabbitmqctl stop_app"
+ %% and "rabbitmqctl stop" as much as possible.
+ %%
+ %% However, for pause_minority and pause_if_all_down modes we can't do
+ %% this, since we depend on looking at whether other nodes are up
+ %% to decide whether to come back up ourselves - if we decide that
+ %% based on the rabbit application we would go down and never come
+ %% back.
+ case application:get_env(rabbit, cluster_partition_handling) of
+ {ok, pause_minority} ->
+ case majority([Node]) of
+ true -> ok;
+ false -> await_cluster_recovery(fun majority/0)
+ end,
+ State;
+ {ok, {pause_if_all_down, PreferredNodes, HowToRecover}} ->
+ case in_preferred_partition(PreferredNodes, [Node]) of
+ true -> ok;
+ false -> await_cluster_recovery(
+ fun in_preferred_partition/0)
+ end,
+ case HowToRecover of
+ autoheal -> State#state{autoheal =
+ rabbit_autoheal:node_down(Node, Autoheal)};
+ _ -> State
+ end;
+ {ok, ignore} ->
+ State;
+ {ok, autoheal} ->
+ State#state{autoheal = rabbit_autoheal:node_down(Node, Autoheal)};
+ {ok, Term} ->
+ rabbit_log:warning("cluster_partition_handling ~p unrecognised, "
+ "assuming 'ignore'~n", [Term]),
+ State
+ end.
+
+await_cluster_recovery(Condition) ->
+ rabbit_log:warning("Cluster minority/secondary status detected - "
+ "awaiting recovery~n", []),
+ run_outside_applications(fun () ->
+ rabbit:stop(),
+ wait_for_cluster_recovery(Condition)
+ end, false),
+ ok.
+
+run_outside_applications(Fun, WaitForExistingProcess) ->
+ spawn_link(fun () ->
+ %% Ignore exit messages from the monitor - the link is needed
+ %% to ensure the monitor detects abnormal exits from this process
+ %% and can reset the 'restarting' status on the autoheal, avoiding
+ %% a deadlock. The monitor is restarted when rabbit does, so messages
+ %% in the other direction should be ignored.
+ process_flag(trap_exit, true),
+ %% If our group leader is inside an application we are about
+ %% to stop, application:stop/1 does not return.
+ group_leader(whereis(init), self()),
+ register_outside_app_process(Fun, WaitForExistingProcess)
+ end).
+
+register_outside_app_process(Fun, WaitForExistingProcess) ->
+ %% Ensure only one such process at a time, the exit(badarg) is
+ %% harmless if one is already running.
+ %%
+ %% If WaitForExistingProcess is false, the given fun is simply not
+ %% executed at all and the process exits.
+ %%
+ %% If WaitForExistingProcess is true, we wait for the end of the
+ %% currently running process before executing the given function.
+ try register(rabbit_outside_app_process, self()) of
+ true ->
+ do_run_outside_app_fun(Fun)
+ catch
+ error:badarg when WaitForExistingProcess ->
+ MRef = erlang:monitor(process, rabbit_outside_app_process),
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ %% The existing process exited, let's try to
+ %% register again.
+ register_outside_app_process(Fun, WaitForExistingProcess)
+ end;
+ error:badarg ->
+ ok
+ end.
+
+do_run_outside_app_fun(Fun) ->
+ try
+ Fun()
+ catch _:E:Stacktrace ->
+ rabbit_log:error(
+ "rabbit_outside_app_process:~n~p~n~p~n",
+ [E, Stacktrace])
+ end.
+
+wait_for_cluster_recovery(Condition) ->
+ ping_all(),
+ case Condition() of
+ true -> rabbit:start();
+ false -> timer:sleep(?RABBIT_DOWN_PING_INTERVAL),
+ wait_for_cluster_recovery(Condition)
+ end.
+
+handle_dead_rabbit(Node, State = #state{partitions = Partitions,
+ autoheal = Autoheal}) ->
+ %% TODO: This may turn out to be a performance hog when there are
+ %% lots of nodes. We really only need to execute some of these
+ %% statements on *one* node, rather than all of them.
+ ok = rabbit_networking:on_node_down(Node),
+ ok = rabbit_amqqueue:on_node_down(Node),
+ ok = rabbit_alarm:on_node_down(Node),
+ ok = rabbit_mnesia:on_node_down(Node),
+ %% If we have been partitioned, and we are now in the only remaining
+ %% partition, we no longer care about partitions - forget them. Note
+ %% that we do not attempt to deal with individual (other) partitions
+ %% going away. It's only safe to forget anything about partitions when
+ %% there are no partitions.
+ Down = Partitions -- alive_rabbit_nodes(),
+ NoLongerPartitioned = rabbit_nodes:all_running(),
+ Partitions1 = case Partitions -- Down -- NoLongerPartitioned of
+ [] -> [];
+ _ -> Partitions
+ end,
+ ensure_ping_timer(
+ State#state{partitions = Partitions1,
+ autoheal = rabbit_autoheal:rabbit_down(Node, Autoheal)}).
+
+ensure_ping_timer(State) ->
+ rabbit_misc:ensure_timer(
+ State, #state.down_ping_timer, ?RABBIT_DOWN_PING_INTERVAL,
+ ping_down_nodes).
+
+ensure_keepalive_timer(State) ->
+ {ok, Interval} = application:get_env(rabbit, cluster_keepalive_interval),
+ rabbit_misc:ensure_timer(
+ State, #state.keepalive_timer, Interval, ping_up_nodes).
+
+handle_live_rabbit(Node) ->
+ ok = rabbit_amqqueue:on_node_up(Node),
+ ok = rabbit_alarm:on_node_up(Node),
+ ok = rabbit_mnesia:on_node_up(Node).
+
+maybe_autoheal(State = #state{partitions = []}) ->
+ State;
+
+maybe_autoheal(State = #state{autoheal = AState}) ->
+ case all_nodes_up() of
+ true -> State#state{autoheal = rabbit_autoheal:maybe_start(AState)};
+ false -> State
+ end.
+
+%%--------------------------------------------------------------------
+%% Internal utils
+%%--------------------------------------------------------------------
+
+try_read_file(FileName) ->
+ case rabbit_file:read_term_file(FileName) of
+ {ok, Term} -> {ok, Term};
+ {error, enoent} -> {error, enoent};
+ {error, E} -> throw({error, {cannot_read_file, FileName, E}})
+ end.
+
+legacy_cluster_nodes(Nodes) ->
+ %% We get all the info that we can, including the nodes from
+ %% mnesia, which will be there if the node is a disc node (empty
+ %% list otherwise)
+ lists:usort(Nodes ++ mnesia:system_info(db_nodes)).
+
+legacy_disc_nodes(AllNodes) ->
+ case AllNodes == [] orelse lists:member(node(), AllNodes) of
+ true -> [node()];
+ false -> []
+ end.
+
+add_node(Node, Nodes) -> lists:usort([Node | Nodes]).
+
+del_node(Node, Nodes) -> Nodes -- [Node].
+
+cast(Node, Msg) -> gen_server:cast({?SERVER, Node}, Msg).
+
+upgrade_to_full_partition(Proxy) ->
+ cast(Proxy, {partial_partition_disconnect, node()}),
+ disconnect(Proxy).
+
+%% When we call this, it's because we want to force Mnesia to detect a
+%% partition. But if we just disconnect_node/1 then Mnesia won't
+%% detect a very short partition. So we want to force a slightly
+%% longer disconnect. Unfortunately we don't have a way to blacklist
+%% individual nodes; the best we can do is turn off auto-connect
+%% altogether.
+disconnect(Node) ->
+ application:set_env(kernel, dist_auto_connect, never),
+ erlang:disconnect_node(Node),
+ timer:sleep(1000),
+ application:unset_env(kernel, dist_auto_connect),
+ ok.
+
+%%--------------------------------------------------------------------
+
+%% mnesia:system_info(db_nodes) (and hence
+%% rabbit_nodes:all_running()) does not return all nodes
+%% when partitioned, just those that we are sharing Mnesia state
+%% with. So we have a small set of replacement functions
+%% here. "rabbit" in a function's name implies we test if the rabbit
+%% application is up, not just the node.
+
+%% As we use these functions to decide what to do in pause_minority or
+%% pause_if_all_down states, they *must* be fast, even in the case where
+%% TCP connections are timing out. So that means we should be careful
+%% about whether we connect to nodes which are currently disconnected.
+
+majority() ->
+ majority([]).
+
+majority(NodesDown) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ AliveNodes = alive_nodes(Nodes) -- NodesDown,
+ length(AliveNodes) / length(Nodes) > 0.5.
+
+in_preferred_partition() ->
+ {ok, {pause_if_all_down, PreferredNodes, _}} =
+ application:get_env(rabbit, cluster_partition_handling),
+ in_preferred_partition(PreferredNodes).
+
+in_preferred_partition(PreferredNodes) ->
+ in_preferred_partition(PreferredNodes, []).
+
+in_preferred_partition(PreferredNodes, NodesDown) ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ RealPreferredNodes = [N || N <- PreferredNodes, lists:member(N, Nodes)],
+ AliveNodes = alive_nodes(RealPreferredNodes) -- NodesDown,
+ RealPreferredNodes =:= [] orelse AliveNodes =/= [].
+
+all_nodes_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_nodes(Nodes)) =:= length(Nodes).
+
+-spec all_rabbit_nodes_up() -> boolean().
+
+all_rabbit_nodes_up() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ length(alive_rabbit_nodes(Nodes)) =:= length(Nodes).
+
+-spec alive_nodes([node()]) -> [node()].
+
+alive_nodes() -> alive_nodes(rabbit_mnesia:cluster_nodes(all)).
+alive_nodes(Nodes) -> [N || N <- Nodes, lists:member(N, [node()|nodes()])].
+
+-spec alive_rabbit_nodes([node()]) -> [node()].
+
+alive_rabbit_nodes() -> alive_rabbit_nodes(rabbit_mnesia:cluster_nodes(all)).
+
+alive_rabbit_nodes(Nodes) ->
+ [N || N <- alive_nodes(Nodes), rabbit:is_running(N)].
+
+%% This one is allowed to connect!
+
+-spec ping_all() -> 'ok'.
+
+ping_all() ->
+ [net_adm:ping(N) || N <- rabbit_mnesia:cluster_nodes(all)],
+ ok.
+
+possibly_partitioned_nodes() ->
+ alive_rabbit_nodes() -- rabbit_nodes:all_running().
+
+startup_log([]) ->
+ rabbit_log:info("Starting rabbit_node_monitor~n", []);
+startup_log(Nodes) ->
+ rabbit_log:info("Starting rabbit_node_monitor, might be partitioned from ~p~n",
+ [Nodes]).
diff --git a/deps/rabbit/src/rabbit_nodes.erl b/deps/rabbit/src/rabbit_nodes.erl
new file mode 100644
index 0000000000..3034a4d513
--- /dev/null
+++ b/deps/rabbit/src/rabbit_nodes.erl
@@ -0,0 +1,157 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_nodes).
+
+-export([names/1, diagnostics/1, make/1, make/2, parts/1, cookie_hash/0,
+ is_running/2, is_process_running/2,
+ cluster_name/0, set_cluster_name/1, set_cluster_name/2, ensure_epmd/0,
+ all_running/0, name_type/0, running_count/0, total_count/0,
+ await_running_count/2, is_single_node_cluster/0,
+ boot/0]).
+-export([persistent_cluster_id/0, seed_internal_cluster_id/0, seed_user_provided_cluster_name/0]).
+
+-include_lib("kernel/include/inet.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(SAMPLING_INTERVAL, 1000).
+
+-define(INTERNAL_CLUSTER_ID_PARAM_NAME, internal_cluster_id).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+boot() ->
+ seed_internal_cluster_id(),
+ seed_user_provided_cluster_name().
+
+name_type() ->
+ #{nodename_type := NodeType} = rabbit_prelaunch:get_context(),
+ NodeType.
+
+-spec names(string()) ->
+ rabbit_types:ok_or_error2([{string(), integer()}], term()).
+
+names(Hostname) ->
+ rabbit_nodes_common:names(Hostname).
+
+-spec diagnostics([node()]) -> string().
+
+diagnostics(Nodes) ->
+ rabbit_nodes_common:diagnostics(Nodes).
+
+make(NameOrParts) ->
+ rabbit_nodes_common:make(NameOrParts).
+
+make(ShortName, Hostname) ->
+ make({ShortName, Hostname}).
+
+parts(NodeStr) ->
+ rabbit_nodes_common:parts(NodeStr).
+
+-spec cookie_hash() -> string().
+
+cookie_hash() ->
+ rabbit_nodes_common:cookie_hash().
+
+-spec is_running(node(), atom()) -> boolean().
+
+is_running(Node, Application) ->
+ rabbit_nodes_common:is_running(Node, Application).
+
+-spec is_process_running(node(), atom()) -> boolean().
+
+is_process_running(Node, Process) ->
+ rabbit_nodes_common:is_process_running(Node, Process).
+
+-spec cluster_name() -> binary().
+
+cluster_name() ->
+ rabbit_runtime_parameters:value_global(
+ cluster_name, cluster_name_default()).
+
+cluster_name_default() ->
+ {ID, _} = parts(node()),
+ FQDN = rabbit_net:hostname(),
+ list_to_binary(atom_to_list(make({ID, FQDN}))).
+
+-spec persistent_cluster_id() -> binary().
+persistent_cluster_id() ->
+ case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
+ not_found ->
+ seed_internal_cluster_id(),
+ persistent_cluster_id();
+ Param ->
+ #{value := Val, name := ?INTERNAL_CLUSTER_ID_PARAM_NAME} = maps:from_list(Param),
+ Val
+ end.
+
+-spec seed_internal_cluster_id() -> binary().
+seed_internal_cluster_id() ->
+ case rabbit_runtime_parameters:lookup_global(?INTERNAL_CLUSTER_ID_PARAM_NAME) of
+ not_found ->
+ Id = rabbit_guid:binary(rabbit_guid:gen(), "rabbitmq-cluster-id"),
+ rabbit_log:info("Initialising internal cluster ID to '~s'", [Id]),
+ rabbit_runtime_parameters:set_global(?INTERNAL_CLUSTER_ID_PARAM_NAME, Id, ?INTERNAL_USER),
+ Id;
+ Param ->
+ #{value := Val, name := ?INTERNAL_CLUSTER_ID_PARAM_NAME} = maps:from_list(Param),
+ Val
+ end.
+
+seed_user_provided_cluster_name() ->
+ case application:get_env(rabbit, cluster_name) of
+ undefined -> ok;
+ {ok, Name} ->
+ rabbit_log:info("Setting cluster name to '~s' as configured", [Name]),
+ set_cluster_name(rabbit_data_coercion:to_binary(Name))
+ end.
+
+-spec set_cluster_name(binary()) -> 'ok'.
+
+set_cluster_name(Name) ->
+ set_cluster_name(Name, ?INTERNAL_USER).
+
+-spec set_cluster_name(binary(), rabbit_types:username()) -> 'ok'.
+
+set_cluster_name(Name, Username) ->
+ %% Cluster name should be binary
+ BinaryName = rabbit_data_coercion:to_binary(Name),
+ rabbit_runtime_parameters:set_global(cluster_name, BinaryName, Username).
+
+ensure_epmd() ->
+ rabbit_nodes_common:ensure_epmd().
+
+-spec all_running() -> [node()].
+all_running() -> rabbit_mnesia:cluster_nodes(running).
+
+-spec running_count() -> integer().
+running_count() -> length(all_running()).
+
+-spec total_count() -> integer().
+total_count() -> length(rabbit_mnesia:cluster_nodes(all)).
+
+-spec is_single_node_cluster() -> boolean().
+is_single_node_cluster() ->
+ total_count() =:= 1.
+
+-spec await_running_count(integer(), integer()) -> 'ok' | {'error', atom()}.
+await_running_count(TargetCount, Timeout) ->
+ Retries = round(Timeout/?SAMPLING_INTERVAL),
+ await_running_count_with_retries(TargetCount, Retries).
+
+await_running_count_with_retries(1, _Retries) -> ok;
+await_running_count_with_retries(_TargetCount, Retries) when Retries =:= 0 ->
+ {error, timeout};
+await_running_count_with_retries(TargetCount, Retries) ->
+ case running_count() >= TargetCount of
+ true -> ok;
+ false ->
+ timer:sleep(?SAMPLING_INTERVAL),
+ await_running_count_with_retries(TargetCount, Retries - 1)
+ end.
diff --git a/deps/rabbit/src/rabbit_osiris_metrics.erl b/deps/rabbit/src/rabbit_osiris_metrics.erl
new file mode 100644
index 0000000000..7b2574c7e1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_osiris_metrics.erl
@@ -0,0 +1,103 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_osiris_metrics).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(TICK_TIMEOUT, 5000).
+-define(SERVER, ?MODULE).
+
+-define(STATISTICS_KEYS,
+ [policy,
+ operator_policy,
+ effective_policy_definition,
+ state,
+ leader,
+ online,
+ members
+ ]).
+
+-record(state, {timeout :: non_neg_integer()}).
+
+%%----------------------------------------------------------------------------
+%% Starts the raw metrics storage and owns the ETS tables.
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+init([]) ->
+ Timeout = application:get_env(rabbit, stream_tick_interval,
+ ?TICK_TIMEOUT),
+ erlang:send_after(Timeout, self(), tick),
+ {ok, #state{timeout = Timeout}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(tick, #state{timeout = Timeout} = State) ->
+ Data = osiris_counters:overview(),
+ maps:map(
+ fun ({osiris_writer, QName}, #{offset := Offs,
+ first_offset := FstOffs}) ->
+ COffs = Offs + 1 - FstOffs,
+ rabbit_core_metrics:queue_stats(QName, COffs, 0, COffs, 0),
+ Infos = try
+ %% TODO complete stats!
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ rabbit_stream_queue:info(Q, ?STATISTICS_KEYS);
+ _ ->
+ []
+ end
+ catch
+ _:_ ->
+ %% It's possible that the writer has died but
+ %% it's still on the amqqueue record, so the
+ %% `erlang:process_info/2` calls will return
+ %% `undefined` and crash with a badmatch.
+ %% At least for now, skipping the metrics might
+ %% be the best option. Otherwise this brings
+ %% down `rabbit_sup` and the whole `rabbit` app.
+ []
+ end,
+ rabbit_core_metrics:queue_stats(QName, Infos),
+ rabbit_event:notify(queue_stats, Infos ++ [{name, QName},
+ {messages, COffs},
+ {messages_ready, COffs},
+ {messages_unacknowledged, 0}]),
+ ok;
+ (_, _V) ->
+ ok
+ end, Data),
+ erlang:send_after(Timeout, self(), tick),
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_parameter_validation.erl b/deps/rabbit/src/rabbit_parameter_validation.erl
new file mode 100644
index 0000000000..66287ec799
--- /dev/null
+++ b/deps/rabbit/src/rabbit_parameter_validation.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_parameter_validation).
+
+-export([number/2, integer/2, binary/2, boolean/2, list/2, regex/2, proplist/3, enum/1]).
+
+number(_Name, Term) when is_number(Term) ->
+ ok;
+
+number(Name, Term) ->
+ {error, "~s should be a number, actually was ~p", [Name, Term]}.
+
+integer(_Name, Term) when is_integer(Term) ->
+ ok;
+
+integer(Name, Term) ->
+ {error, "~s should be a number, actually was ~p", [Name, Term]}.
+
+binary(_Name, Term) when is_binary(Term) ->
+ ok;
+
+binary(Name, Term) ->
+ {error, "~s should be binary, actually was ~p", [Name, Term]}.
+
+boolean(_Name, Term) when is_boolean(Term) ->
+ ok;
+boolean(Name, Term) ->
+ {error, "~s should be boolean, actually was ~p", [Name, Term]}.
+
+list(_Name, Term) when is_list(Term) ->
+ ok;
+
+list(Name, Term) ->
+ {error, "~s should be list, actually was ~p", [Name, Term]}.
+
+regex(Name, Term) when is_binary(Term) ->
+ case re:compile(Term) of
+ {ok, _} -> ok;
+ {error, Reason} -> {error, "~s should be regular expression "
+ "but is invalid: ~p", [Name, Reason]}
+ end;
+regex(Name, Term) ->
+ {error, "~s should be a binary but was ~p", [Name, Term]}.
+
+proplist(Name, Constraints, Term) when is_list(Term) ->
+ {Results, Remainder}
+ = lists:foldl(
+ fun ({Key, Fun, Needed}, {Results0, Term0}) ->
+ case {lists:keytake(Key, 1, Term0), Needed} of
+ {{value, {Key, Value}, Term1}, _} ->
+ {[Fun(Key, Value) | Results0],
+ Term1};
+ {false, mandatory} ->
+ {[{error, "Key \"~s\" not found in ~s",
+ [Key, Name]} | Results0], Term0};
+ {false, optional} ->
+ {Results0, Term0}
+ end
+ end, {[], Term}, Constraints),
+ case Remainder of
+ [] -> Results;
+ _ -> [{error, "Unrecognised terms ~p in ~s", [Remainder, Name]}
+ | Results]
+ end;
+
+proplist(Name, Constraints, Term0) when is_map(Term0) ->
+ Term = maps:to_list(Term0),
+ proplist(Name, Constraints, Term);
+
+proplist(Name, _Constraints, Term) ->
+ {error, "~s not a list ~p", [Name, Term]}.
+
+enum(OptionsA) ->
+ Options = [list_to_binary(atom_to_list(O)) || O <- OptionsA],
+ fun (Name, Term) when is_binary(Term) ->
+ case lists:member(Term, Options) of
+ true -> ok;
+ false -> {error, "~s should be one of ~p, actually was ~p",
+ [Name, Options, Term]}
+ end;
+ (Name, Term) ->
+ {error, "~s should be binary, actually was ~p", [Name, Term]}
+ end.
diff --git a/deps/rabbit/src/rabbit_password.erl b/deps/rabbit/src/rabbit_password.erl
new file mode 100644
index 0000000000..6a5254b707
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password.erl
@@ -0,0 +1,52 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password).
+-include("rabbit.hrl").
+
+-define(DEFAULT_HASHING_MODULE, rabbit_password_hashing_sha256).
+
+%%
+%% API
+%%
+
+-export([hash/1, hash/2, generate_salt/0, salted_hash/2, salted_hash/3,
+ hashing_mod/0, hashing_mod/1]).
+
+hash(Cleartext) ->
+ hash(hashing_mod(), Cleartext).
+
+hash(HashingMod, Cleartext) ->
+ SaltBin = generate_salt(),
+ Hash = salted_hash(HashingMod, SaltBin, Cleartext),
+ <<SaltBin/binary, Hash/binary>>.
+
+generate_salt() ->
+ Salt = rand:uniform(16#ffffffff),
+ <<Salt:32>>.
+
+salted_hash(Salt, Cleartext) ->
+ salted_hash(hashing_mod(), Salt, Cleartext).
+
+salted_hash(Mod, Salt, Cleartext) ->
+ Fun = fun Mod:hash/1,
+ Fun(<<Salt/binary, Cleartext/binary>>).
+
+hashing_mod() ->
+ rabbit_misc:get_env(rabbit, password_hashing_module,
+ ?DEFAULT_HASHING_MODULE).
+
+hashing_mod(rabbit_password_hashing_sha256) ->
+ rabbit_password_hashing_sha256;
+hashing_mod(rabbit_password_hashing_md5) ->
+ rabbit_password_hashing_md5;
+%% fall back to the hashing function that's been used prior to 3.6.0
+hashing_mod(undefined) ->
+ rabbit_password_hashing_md5;
+%% if a custom module is configured, simply use it
+hashing_mod(CustomMod) when is_atom(CustomMod) ->
+ CustomMod.
diff --git a/deps/rabbit/src/rabbit_password_hashing_md5.erl b/deps/rabbit/src/rabbit_password_hashing_md5.erl
new file mode 100644
index 0000000000..1e306673ca
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password_hashing_md5.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Legacy hashing implementation, only used as a last resort when
+%% #internal_user.hashing_algorithm is md5 or undefined (the case in
+%% pre-3.6.0 user records).
+
+-module(rabbit_password_hashing_md5).
+
+-behaviour(rabbit_password_hashing).
+
+-export([hash/1]).
+
+hash(Binary) ->
+ erlang:md5(Binary).
diff --git a/deps/rabbit/src/rabbit_password_hashing_sha256.erl b/deps/rabbit/src/rabbit_password_hashing_sha256.erl
new file mode 100644
index 0000000000..3ccc298efd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password_hashing_sha256.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password_hashing_sha256).
+
+-behaviour(rabbit_password_hashing).
+
+-export([hash/1]).
+
+hash(Binary) ->
+ crypto:hash(sha256, Binary).
diff --git a/deps/rabbit/src/rabbit_password_hashing_sha512.erl b/deps/rabbit/src/rabbit_password_hashing_sha512.erl
new file mode 100644
index 0000000000..c5edf8888a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_password_hashing_sha512.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password_hashing_sha512).
+
+-behaviour(rabbit_password_hashing).
+
+-export([hash/1]).
+
+hash(Binary) ->
+ crypto:hash(sha512, Binary).
diff --git a/deps/rabbit/src/rabbit_peer_discovery.erl b/deps/rabbit/src/rabbit_peer_discovery.erl
new file mode 100644
index 0000000000..1688579450
--- /dev/null
+++ b/deps/rabbit/src/rabbit_peer_discovery.erl
@@ -0,0 +1,326 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery).
+
+%%
+%% API
+%%
+
+-export([maybe_init/0, discover_cluster_nodes/0, backend/0, node_type/0,
+ normalize/1, format_discovered_nodes/1, log_configured_backend/0,
+ register/0, unregister/0, maybe_register/0, maybe_unregister/0,
+ maybe_inject_randomized_delay/0, lock/0, unlock/1,
+ discovery_retries/0]).
+-export([append_node_prefix/1, node_prefix/0, locking_retry_timeout/0,
+ lock_acquisition_failure_mode/0]).
+
+-define(DEFAULT_BACKEND, rabbit_peer_discovery_classic_config).
+
+%% what node type is used by default for this node when joining
+%% a new cluster as a virgin node
+-define(DEFAULT_NODE_TYPE, disc).
+
+%% default node prefix to attach to discovered hostnames
+-define(DEFAULT_PREFIX, "rabbit").
+
+%% default randomized delay range, in seconds
+-define(DEFAULT_STARTUP_RANDOMIZED_DELAY, {5, 60}).
+
+%% default discovery retries and interval.
+-define(DEFAULT_DISCOVERY_RETRY_COUNT, 10).
+-define(DEFAULT_DISCOVERY_RETRY_INTERVAL_MS, 500).
+
+-define(NODENAME_PART_SEPARATOR, "@").
+
+-spec backend() -> atom().
+
+backend() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(peer_discovery_backend, Proplist, ?DEFAULT_BACKEND);
+ undefined ->
+ ?DEFAULT_BACKEND
+ end.
+
+
+
+-spec node_type() -> rabbit_types:node_type().
+
+node_type() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(node_type, Proplist, ?DEFAULT_NODE_TYPE);
+ undefined ->
+ ?DEFAULT_NODE_TYPE
+ end.
+
+-spec locking_retry_timeout() -> {Retries :: integer(), Timeout :: integer()}.
+
+locking_retry_timeout() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ Retries = proplists:get_value(lock_retry_limit, Proplist, 10),
+ Timeout = proplists:get_value(lock_retry_timeout, Proplist, 30000),
+ {Retries, Timeout};
+ undefined ->
+ {10, 30000}
+ end.
+
+-spec lock_acquisition_failure_mode() -> ignore | fail.
+
+lock_acquisition_failure_mode() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(lock_acquisition_failure_mode, Proplist, fail);
+ undefined ->
+ fail
+ end.
+
+-spec log_configured_backend() -> ok.
+
+log_configured_backend() ->
+ rabbit_log:info("Configured peer discovery backend: ~s~n", [backend()]).
+
+maybe_init() ->
+ Backend = backend(),
+ code:ensure_loaded(Backend),
+ case erlang:function_exported(Backend, init, 0) of
+ true ->
+ rabbit_log:debug("Peer discovery backend supports initialisation"),
+ case Backend:init() of
+ ok ->
+ rabbit_log:debug("Peer discovery backend initialisation succeeded"),
+ ok;
+ {error, Error} ->
+ rabbit_log:warning("Peer discovery backend initialisation failed: ~p.", [Error]),
+ ok
+ end;
+ false ->
+ rabbit_log:debug("Peer discovery backend does not support initialisation"),
+ ok
+ end.
+
+
+%% This module doesn't currently sanity-check the return value of
+%% `Backend:list_nodes()`. Therefore, it could return something invalid:
+%% thus the `{œk, any()} in the spec.
+%%
+%% `rabbit_mnesia:init_from_config()` does some verifications.
+
+-spec discover_cluster_nodes() ->
+ {ok, {Nodes :: [node()], NodeType :: rabbit_types:node_type()} | any()} |
+ {error, Reason :: string()}.
+
+discover_cluster_nodes() ->
+ Backend = backend(),
+ normalize(Backend:list_nodes()).
+
+
+-spec maybe_register() -> ok.
+
+maybe_register() ->
+ Backend = backend(),
+ case Backend:supports_registration() of
+ true ->
+ register(),
+ Backend:post_registration();
+ false ->
+ rabbit_log:info("Peer discovery backend ~s does not support registration, skipping registration.", [Backend]),
+ ok
+ end.
+
+
+-spec maybe_unregister() -> ok.
+
+maybe_unregister() ->
+ Backend = backend(),
+ case Backend:supports_registration() of
+ true ->
+ unregister();
+ false ->
+ rabbit_log:info("Peer discovery backend ~s does not support registration, skipping unregistration.", [Backend]),
+ ok
+ end.
+
+-spec discovery_retries() -> {Retries :: integer(), Interval :: integer()}.
+
+discovery_retries() ->
+ case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ Retries = proplists:get_value(discovery_retry_limit, Proplist, ?DEFAULT_DISCOVERY_RETRY_COUNT),
+ Interval = proplists:get_value(discovery_retry_interval, Proplist, ?DEFAULT_DISCOVERY_RETRY_INTERVAL_MS),
+ {Retries, Interval};
+ undefined ->
+ {?DEFAULT_DISCOVERY_RETRY_COUNT, ?DEFAULT_DISCOVERY_RETRY_INTERVAL_MS}
+ end.
+
+
+-spec maybe_inject_randomized_delay() -> ok.
+maybe_inject_randomized_delay() ->
+ Backend = backend(),
+ case Backend:supports_registration() of
+ true ->
+ rabbit_log:info("Peer discovery backend ~s supports registration.", [Backend]),
+ inject_randomized_delay();
+ false ->
+ rabbit_log:info("Peer discovery backend ~s does not support registration, skipping randomized startup delay.", [Backend]),
+ ok
+ end.
+
+-spec inject_randomized_delay() -> ok.
+
+inject_randomized_delay() ->
+ {Min, Max} = randomized_delay_range_in_ms(),
+ case {Min, Max} of
+ %% When the max value is set to 0, consider the delay to be disabled.
+ %% In addition, `rand:uniform/1` will fail with a "no function clause"
+ %% when the argument is 0.
+ {_, 0} ->
+ rabbit_log:info("Randomized delay range's upper bound is set to 0. Considering it disabled."),
+ ok;
+ {_, N} when is_number(N) ->
+ rand:seed(exsplus),
+ RandomVal = rand:uniform(round(N)),
+ rabbit_log:debug("Randomized startup delay: configured range is from ~p to ~p milliseconds, PRNG pick: ~p...",
+ [Min, Max, RandomVal]),
+ Effective = case RandomVal < Min of
+ true -> Min;
+ false -> RandomVal
+ end,
+ rabbit_log:info("Will wait for ~p milliseconds before proceeding with registration...", [Effective]),
+ timer:sleep(Effective),
+ ok
+ end.
+
+-spec randomized_delay_range_in_ms() -> {integer(), integer()}.
+
+randomized_delay_range_in_ms() ->
+ Backend = backend(),
+ Default = case erlang:function_exported(Backend, randomized_startup_delay_range, 0) of
+ true -> Backend:randomized_startup_delay_range();
+ false -> ?DEFAULT_STARTUP_RANDOMIZED_DELAY
+ end,
+ {Min, Max} = case application:get_env(rabbit, cluster_formation) of
+ {ok, Proplist} ->
+ proplists:get_value(randomized_startup_delay_range, Proplist, Default);
+ undefined ->
+ Default
+ end,
+ {Min * 1000, Max * 1000}.
+
+
+-spec register() -> ok.
+
+register() ->
+ Backend = backend(),
+ rabbit_log:info("Will register with peer discovery backend ~s", [Backend]),
+ case Backend:register() of
+ ok -> ok;
+ {error, Error} ->
+ rabbit_log:error("Failed to register with peer discovery backend ~s: ~p",
+ [Backend, Error]),
+ ok
+ end.
+
+
+-spec unregister() -> ok.
+
+unregister() ->
+ Backend = backend(),
+ rabbit_log:info("Will unregister with peer discovery backend ~s", [Backend]),
+ case Backend:unregister() of
+ ok -> ok;
+ {error, Error} ->
+ rabbit_log:error("Failed to unregister with peer discovery backend ~s: ~p",
+ [Backend, Error]),
+ ok
+ end.
+
+-spec lock() -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}.
+
+lock() ->
+ Backend = backend(),
+ rabbit_log:info("Will try to lock with peer discovery backend ~s", [Backend]),
+ case Backend:lock(node()) of
+ {error, Reason} = Error ->
+ rabbit_log:error("Failed to lock with peer discovery backend ~s: ~p",
+ [Backend, Reason]),
+ Error;
+ Any ->
+ Any
+ end.
+
+-spec unlock(Data :: term()) -> ok | {error, Reason :: string()}.
+
+unlock(Data) ->
+ Backend = backend(),
+ rabbit_log:info("Will try to unlock with peer discovery backend ~s", [Backend]),
+ case Backend:unlock(Data) of
+ {error, Reason} = Error ->
+ rabbit_log:error("Failed to unlock with peer discovery backend ~s: ~p, "
+ "lock data: ~p",
+ [Backend, Reason, Data]),
+ Error;
+ Any ->
+ Any
+ end.
+
+%%
+%% Implementation
+%%
+
+-spec normalize(Nodes :: [node()] |
+ {Nodes :: [node()],
+ NodeType :: rabbit_types:node_type()} |
+ {ok, Nodes :: [node()]} |
+ {ok, {Nodes :: [node()],
+ NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}) ->
+ {ok, {Nodes :: [node()], NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+normalize(Nodes) when is_list(Nodes) ->
+ {ok, {Nodes, disc}};
+normalize({Nodes, NodeType}) when is_list(Nodes) andalso is_atom(NodeType) ->
+ {ok, {Nodes, NodeType}};
+normalize({ok, Nodes}) when is_list(Nodes) ->
+ {ok, {Nodes, disc}};
+normalize({ok, {Nodes, NodeType}}) when is_list(Nodes) andalso is_atom(NodeType) ->
+ {ok, {Nodes, NodeType}};
+normalize({error, Reason}) ->
+ {error, Reason}.
+
+-spec format_discovered_nodes(Nodes :: list()) -> string().
+
+format_discovered_nodes(Nodes) ->
+ %% NOTE: in OTP 21 string:join/2 is deprecated but still available.
+ %% Its recommended replacement is not a drop-in one, though, so
+ %% we will not be switching just yet.
+ string:join(lists:map(fun rabbit_data_coercion:to_list/1, Nodes), ", ").
+
+
+
+-spec node_prefix() -> string().
+
+node_prefix() ->
+ case string:tokens(atom_to_list(node()), ?NODENAME_PART_SEPARATOR) of
+ [Prefix, _] -> Prefix;
+ [_] -> ?DEFAULT_PREFIX
+ end.
+
+
+
+-spec append_node_prefix(Value :: binary() | string()) -> string().
+
+append_node_prefix(Value) when is_binary(Value) orelse is_list(Value) ->
+ Val = rabbit_data_coercion:to_list(Value),
+ Hostname = case string:tokens(Val, ?NODENAME_PART_SEPARATOR) of
+ [_ExistingPrefix, HN] -> HN;
+ [HN] -> HN
+ end,
+ string:join([node_prefix(), Hostname], ?NODENAME_PART_SEPARATOR).
diff --git a/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl
new file mode 100644
index 0000000000..8bc7382a75
--- /dev/null
+++ b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_classic_config).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include("rabbit.hrl").
+
+-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+
+%%
+%% API
+%%
+
+-spec list_nodes() -> {ok, {Nodes :: [node()], rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+list_nodes() ->
+ case application:get_env(rabbit, cluster_nodes, {[], disc}) of
+ {_Nodes, _NodeType} = Pair -> {ok, Pair};
+ Nodes when is_list(Nodes) -> {ok, {Nodes, disc}}
+ end.
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ %% If we don't have any nodes configured, skip randomized delay and similar operations
+ %% as we don't want to delay startup for no reason. MK.
+ has_any_peer_nodes_configured().
+
+-spec register() -> ok.
+
+register() ->
+ ok.
+
+-spec unregister() -> ok.
+
+unregister() ->
+ ok.
+
+-spec post_registration() -> ok.
+
+post_registration() ->
+ ok.
+
+-spec lock(Node :: atom()) -> not_supported.
+
+lock(_Node) ->
+ not_supported.
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(_Data) ->
+ ok.
+
+%%
+%% Helpers
+%%
+
+has_any_peer_nodes_configured() ->
+ case application:get_env(rabbit, cluster_nodes, []) of
+ {[], _NodeType} ->
+ false;
+ {Nodes, _NodeType} when is_list(Nodes) ->
+ true;
+ [] ->
+ false;
+ Nodes when is_list(Nodes) ->
+ true
+ end.
diff --git a/deps/rabbit/src/rabbit_peer_discovery_dns.erl b/deps/rabbit/src/rabbit_peer_discovery_dns.erl
new file mode 100644
index 0000000000..6e343a6e2d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_peer_discovery_dns.erl
@@ -0,0 +1,113 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_dns).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include("rabbit.hrl").
+
+-export([list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+%% for tests
+-export([discover_nodes/2, discover_hostnames/2]).
+
+%%
+%% API
+%%
+
+-spec list_nodes() ->
+ {ok, {Nodes :: [node()], rabbit_types:node_type()}}.
+
+list_nodes() ->
+ case application:get_env(rabbit, cluster_formation) of
+ undefined ->
+ {ok, {[], disc}};
+ {ok, ClusterFormation} ->
+ case proplists:get_value(peer_discovery_dns, ClusterFormation) of
+ undefined ->
+ rabbit_log:warning("Peer discovery backend is set to ~s "
+ "but final config does not contain rabbit.cluster_formation.peer_discovery_dns. "
+ "Cannot discover any nodes because seed hostname is not configured!",
+ [?MODULE]),
+ {ok, {[], disc}};
+ Proplist ->
+ Hostname = rabbit_data_coercion:to_list(proplists:get_value(hostname, Proplist)),
+
+ {ok, {discover_nodes(Hostname, net_kernel:longnames()), rabbit_peer_discovery:node_type()}}
+ end
+ end.
+
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ false.
+
+
+-spec register() -> ok.
+
+register() ->
+ ok.
+
+-spec unregister() -> ok.
+
+unregister() ->
+ ok.
+
+-spec post_registration() -> ok.
+
+post_registration() ->
+ ok.
+
+-spec lock(Node :: atom()) -> not_supported.
+
+lock(_Node) ->
+ not_supported.
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(_Data) ->
+ ok.
+
+%%
+%% Implementation
+%%
+
+discover_nodes(SeedHostname, LongNamesUsed) ->
+ [list_to_atom(rabbit_peer_discovery:append_node_prefix(H)) ||
+ H <- discover_hostnames(SeedHostname, LongNamesUsed)].
+
+discover_hostnames(SeedHostname, LongNamesUsed) ->
+ lookup(SeedHostname, LongNamesUsed, ipv4) ++
+ lookup(SeedHostname, LongNamesUsed, ipv6).
+
+decode_record(ipv4) ->
+ a;
+decode_record(ipv6) ->
+ aaaa.
+
+lookup(SeedHostname, LongNamesUsed, IPv) ->
+ IPs = inet_res:lookup(SeedHostname, in, decode_record(IPv)),
+ rabbit_log:info("Addresses discovered via ~s records of ~s: ~s",
+ [string:to_upper(atom_to_list(decode_record(IPv))),
+ SeedHostname,
+ string:join([inet_parse:ntoa(IP) || IP <- IPs], ", ")]),
+ Hosts = [extract_host(inet:gethostbyaddr(A), LongNamesUsed, A) ||
+ A <- IPs],
+ lists:filter(fun(E) -> E =/= error end, Hosts).
+
+
+%% long node names are used
+extract_host({ok, {hostent, FQDN, _, _, _, _}}, true, _Address) ->
+ FQDN;
+%% short node names are used
+extract_host({ok, {hostent, FQDN, _, _, _, _}}, false, _Address) ->
+ lists:nth(1, string:tokens(FQDN, "."));
+extract_host({error, Error}, _, Address) ->
+ rabbit_log:error("Reverse DNS lookup for address ~s failed: ~p",
+ [inet_parse:ntoa(Address), Error]),
+ error.
diff --git a/deps/rabbit/src/rabbit_plugins.erl b/deps/rabbit/src/rabbit_plugins.erl
new file mode 100644
index 0000000000..5697ffc29a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_plugins.erl
@@ -0,0 +1,699 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_plugins).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("stdlib/include/zip.hrl").
+
+-export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3, running_plugins/0]).
+-export([ensure/1]).
+-export([validate_plugins/1, format_invalid_plugins/1]).
+-export([is_strictly_plugin/1, strictly_plugins/2, strictly_plugins/1]).
+-export([plugins_dir/0, plugin_names/1, plugins_expand_dir/0, enabled_plugins_file/0]).
+
+% Export for testing purpose.
+-export([is_version_supported/2, validate_plugins/2]).
+%%----------------------------------------------------------------------------
+
+-type plugin_name() :: atom().
+
+%%----------------------------------------------------------------------------
+
+-spec ensure(string()) -> {'ok', [atom()], [atom()]} | {error, any()}.
+
+ensure(FileJustChanged) ->
+ case rabbit:is_running() of
+ true -> ensure1(FileJustChanged);
+ false -> {error, rabbit_not_running}
+ end.
+
+ensure1(FileJustChanged0) ->
+ {ok, OurFile0} = application:get_env(rabbit, enabled_plugins_file),
+ FileJustChanged = filename:nativename(FileJustChanged0),
+ OurFile = filename:nativename(OurFile0),
+ case OurFile of
+ FileJustChanged ->
+ Enabled = read_enabled(OurFile),
+ Wanted = prepare_plugins(Enabled),
+ Current = active(),
+ Start = Wanted -- Current,
+ Stop = Current -- Wanted,
+ rabbit:start_apps(Start),
+ %% We need sync_notify here since mgmt will attempt to look at all
+ %% the modules for the disabled plugins - if they are unloaded
+ %% that won't work.
+ ok = rabbit_event:sync_notify(plugins_changed, [{enabled, Start},
+ {disabled, Stop}]),
+ %% The app_utils module stops the apps in reverse order, so we should
+ %% pass them here in dependency order.
+ rabbit:stop_apps(lists:reverse(Stop)),
+ clean_plugins(Stop),
+ case {Start, Stop} of
+ {[], []} ->
+ ok;
+ {[], _} ->
+ rabbit_log:info("Plugins changed; disabled ~p~n",
+ [Stop]);
+ {_, []} ->
+ rabbit_log:info("Plugins changed; enabled ~p~n",
+ [Start]);
+ {_, _} ->
+ rabbit_log:info("Plugins changed; enabled ~p, disabled ~p~n",
+ [Start, Stop])
+ end,
+ {ok, Start, Stop};
+ _ ->
+ {error, {enabled_plugins_mismatch, FileJustChanged, OurFile}}
+ end.
+
+-spec plugins_expand_dir() -> file:filename().
+plugins_expand_dir() ->
+ case application:get_env(rabbit, plugins_expand_dir) of
+ {ok, ExpandDir} ->
+ ExpandDir;
+ _ ->
+ filename:join([rabbit_mnesia:dir(), "plugins_expand_dir"])
+ end.
+
+-spec plugins_dir() -> file:filename().
+plugins_dir() ->
+ case application:get_env(rabbit, plugins_dir) of
+ {ok, PluginsDistDir} ->
+ PluginsDistDir;
+ _ ->
+ filename:join([rabbit_mnesia:dir(), "plugins_dir_stub"])
+ end.
+
+-spec enabled_plugins_file() -> file:filename().
+enabled_plugins_file() ->
+ case application:get_env(rabbit, enabled_plugins_file) of
+ {ok, Val} ->
+ Val;
+ _ ->
+ filename:join([rabbit_mnesia:dir(), "enabled_plugins"])
+ end.
+
+-spec enabled_plugins() -> [atom()].
+enabled_plugins() ->
+ case application:get_env(rabbit, enabled_plugins_file) of
+ {ok, EnabledFile} ->
+ read_enabled(EnabledFile);
+ _ ->
+ []
+ end.
+
+%% @doc Prepares the file system and installs all enabled plugins.
+
+-spec setup() -> [plugin_name()].
+
+setup() ->
+ ExpandDir = plugins_expand_dir(),
+ %% Eliminate the contents of the destination directory
+ case delete_recursively(ExpandDir) of
+ ok -> ok;
+ {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
+ [ExpandDir, E1]}})
+ end,
+ Enabled = enabled_plugins(),
+ prepare_plugins(Enabled).
+
+%% @doc Lists the plugins which are currently running.
+
+-spec active() -> [plugin_name()].
+
+active() ->
+ InstalledPlugins = plugin_names(list(plugins_dir())),
+ [App || {App, _, _} <- rabbit_misc:which_applications(),
+ lists:member(App, InstalledPlugins)].
+
+%% @doc Get the list of plugins which are ready to be enabled.
+
+-spec list(string()) -> [#plugin{}].
+
+list(PluginsPath) ->
+ list(PluginsPath, false).
+
+-spec list(string(), boolean()) -> [#plugin{}].
+
+list(PluginsPath, IncludeRequiredDeps) ->
+ {AllPlugins, LoadingProblems} = discover_plugins(split_path(PluginsPath)),
+ {UniquePlugins, DuplicateProblems} = remove_duplicate_plugins(AllPlugins),
+ Plugins1 = maybe_keep_required_deps(IncludeRequiredDeps, UniquePlugins),
+ Plugins2 = remove_plugins(Plugins1),
+ maybe_report_plugin_loading_problems(LoadingProblems ++ DuplicateProblems),
+ ensure_dependencies(Plugins2).
+
+%% @doc Read the list of enabled plugins from the supplied term file.
+
+-spec read_enabled(file:filename()) -> [plugin_name()].
+
+read_enabled(PluginsFile) ->
+ case rabbit_file:read_term_file(PluginsFile) of
+ {ok, [Plugins]} -> Plugins;
+ {ok, []} -> [];
+ {ok, [_|_]} -> throw({error, {malformed_enabled_plugins_file,
+ PluginsFile}});
+ {error, enoent} -> [];
+ {error, Reason} -> throw({error, {cannot_read_enabled_plugins_file,
+ PluginsFile, Reason}})
+ end.
+
+%% @doc Calculate the dependency graph from <i>Sources</i>.
+%% When Reverse =:= true the bottom/leaf level applications are returned in
+%% the resulting list, otherwise they're skipped.
+
+-spec dependencies(boolean(), [plugin_name()], [#plugin{}]) ->
+ [plugin_name()].
+
+dependencies(Reverse, Sources, AllPlugins) ->
+ {ok, G} = rabbit_misc:build_acyclic_graph(
+ fun ({App, _Deps}) -> [{App, App}] end,
+ fun ({App, Deps}) -> [{App, Dep} || Dep <- Deps] end,
+ [{Name, Deps} || #plugin{name = Name,
+ dependencies = Deps} <- AllPlugins]),
+ Dests = case Reverse of
+ false -> digraph_utils:reachable(Sources, G);
+ true -> digraph_utils:reaching(Sources, G)
+ end,
+ OrderedDests = digraph_utils:postorder(digraph_utils:subgraph(G, Dests)),
+ true = digraph:delete(G),
+ OrderedDests.
+
+%% Filter real plugins from application dependencies
+
+-spec is_strictly_plugin(#plugin{}) -> boolean().
+
+is_strictly_plugin(#plugin{extra_dependencies = ExtraDeps}) ->
+ lists:member(rabbit, ExtraDeps).
+
+-spec strictly_plugins([plugin_name()], [#plugin{}]) -> [plugin_name()].
+
+strictly_plugins(Plugins, AllPlugins) ->
+ lists:filter(
+ fun(Name) ->
+ is_strictly_plugin(lists:keyfind(Name, #plugin.name, AllPlugins))
+ end, Plugins).
+
+-spec strictly_plugins([plugin_name()]) -> [plugin_name()].
+
+strictly_plugins(Plugins) ->
+ AllPlugins = list(plugins_dir()),
+ lists:filter(
+ fun(Name) ->
+ is_strictly_plugin(lists:keyfind(Name, #plugin.name, AllPlugins))
+ end, Plugins).
+
+%% For a few known cases, an externally provided plugin can be trusted.
+%% In this special case, it overrides the plugin.
+is_plugin_provided_by_otp(#plugin{name = eldap}) ->
+ %% eldap was added to Erlang/OTP R15B01 (ERTS 5.9.1). In this case,
+ %% we prefer this version to the plugin.
+ rabbit_misc:version_compare(erlang:system_info(version), "5.9.1", gte);
+is_plugin_provided_by_otp(_) ->
+ false.
+
+%% Make sure we don't list OTP apps in here, and also that we detect
+%% missing dependencies.
+ensure_dependencies(Plugins) ->
+ Names = plugin_names(Plugins),
+ NotThere = [Dep || #plugin{dependencies = Deps} <- Plugins,
+ Dep <- Deps,
+ not lists:member(Dep, Names)],
+ {OTP, Missing} = lists:partition(fun is_loadable/1, lists:usort(NotThere)),
+ case Missing of
+ [] -> ok;
+ _ -> Blame = [Name || #plugin{name = Name,
+ dependencies = Deps} <- Plugins,
+ lists:any(fun (Dep) ->
+ lists:member(Dep, Missing)
+ end, Deps)],
+ throw({error, {missing_dependencies, Missing, Blame}})
+ end,
+ [P#plugin{dependencies = Deps -- OTP,
+ extra_dependencies = Deps -- (Deps -- OTP)}
+ || P = #plugin{dependencies = Deps} <- Plugins].
+
+is_loadable(App) ->
+ case application:load(App) of
+ {error, {already_loaded, _}} -> true;
+ ok -> application:unload(App),
+ true;
+ _ -> false
+ end.
+
+
+%% List running plugins along with their version.
+-spec running_plugins() -> {ok, [{atom(), Vsn :: string()}]}.
+running_plugins() ->
+ ActivePlugins = active(),
+ {ok, [{App, Vsn} || {App, _ , Vsn} <- rabbit_misc:which_applications(), lists:member(App, ActivePlugins)]}.
+
+%%----------------------------------------------------------------------------
+
+prepare_plugins(Enabled) ->
+ ExpandDir = plugins_expand_dir(),
+ AllPlugins = list(plugins_dir()),
+ Wanted = dependencies(false, Enabled, AllPlugins),
+ WantedPlugins = lookup_plugins(Wanted, AllPlugins),
+ {ValidPlugins, Problems} = validate_plugins(WantedPlugins),
+ maybe_warn_about_invalid_plugins(Problems),
+ case filelib:ensure_dir(ExpandDir ++ "/") of
+ ok -> ok;
+ {error, E2} -> throw({error, {cannot_create_plugins_expand_dir,
+ [ExpandDir, E2]}})
+ end,
+ [prepare_plugin(Plugin, ExpandDir) || Plugin <- ValidPlugins],
+ Wanted.
+
+maybe_warn_about_invalid_plugins([]) ->
+ ok;
+maybe_warn_about_invalid_plugins(InvalidPlugins) ->
+ %% TODO: error message formatting
+ rabbit_log:warning(format_invalid_plugins(InvalidPlugins)).
+
+
+format_invalid_plugins(InvalidPlugins) ->
+ lists:flatten(["Failed to enable some plugins: \r\n"
+ | [format_invalid_plugin(Plugin)
+ || Plugin <- InvalidPlugins]]).
+
+format_invalid_plugin({Name, Errors}) ->
+ [io_lib:format(" ~p:~n", [Name])
+ | [format_invalid_plugin_error(Err) || Err <- Errors]].
+
+format_invalid_plugin_error({missing_dependency, Dep}) ->
+ io_lib:format(" Dependency is missing or invalid: ~p~n", [Dep]);
+%% a plugin doesn't support the effective broker version
+format_invalid_plugin_error({broker_version_mismatch, Version, Required}) ->
+ io_lib:format(" Plugin doesn't support current server version."
+ " Actual broker version: ~p, supported by the plugin: ~p~n",
+ [Version, format_required_versions(Required)]);
+%% one of dependencies of a plugin doesn't match its version requirements
+format_invalid_plugin_error({{dependency_version_mismatch, Version, Required}, Name}) ->
+ io_lib:format(" Version '~p' of dependency '~p' is unsupported."
+ " Version ranges supported by the plugin: ~p~n",
+ [Version, Name, Required]);
+format_invalid_plugin_error(Err) ->
+ io_lib:format(" Unknown error ~p~n", [Err]).
+
+format_required_versions(Versions) ->
+ lists:map(fun(V) ->
+ case re:run(V, "^[0-9]*\.[0-9]*\.", [{capture, all, list}]) of
+ {match, [Sub]} ->
+ lists:flatten(io_lib:format("~s-~sx", [V, Sub]));
+ _ ->
+ V
+ end
+ end, Versions).
+
+validate_plugins(Plugins) ->
+ application:load(rabbit),
+ RabbitVersion = RabbitVersion = case application:get_key(rabbit, vsn) of
+ undefined -> "0.0.0";
+ {ok, Val} -> Val
+ end,
+ validate_plugins(Plugins, RabbitVersion).
+
+validate_plugins(Plugins, BrokerVersion) ->
+ lists:foldl(
+ fun(#plugin{name = Name,
+ broker_version_requirements = BrokerVersionReqs,
+ dependency_version_requirements = DepsVersions} = Plugin,
+ {Plugins0, Errors}) ->
+ case is_version_supported(BrokerVersion, BrokerVersionReqs) of
+ true ->
+ case BrokerVersion of
+ "0.0.0" ->
+ rabbit_log:warning(
+ "Running development version of the broker."
+ " Requirement ~p for plugin ~p is ignored.",
+ [BrokerVersionReqs, Name]);
+ _ -> ok
+ end,
+ case check_plugins_versions(Name, Plugins0, DepsVersions) of
+ ok -> {[Plugin | Plugins0], Errors};
+ {error, Err} -> {Plugins0, [{Name, Err} | Errors]}
+ end;
+ false ->
+ Error = [{broker_version_mismatch, BrokerVersion, BrokerVersionReqs}],
+ {Plugins0, [{Name, Error} | Errors]}
+ end
+ end,
+ {[],[]},
+ Plugins).
+
+check_plugins_versions(PluginName, AllPlugins, RequiredVersions) ->
+ ExistingVersions = [{Name, Vsn}
+ || #plugin{name = Name, version = Vsn} <- AllPlugins],
+ Problems = lists:foldl(
+ fun({Name, Versions}, Acc) ->
+ case proplists:get_value(Name, ExistingVersions) of
+ undefined -> [{missing_dependency, Name} | Acc];
+ Version ->
+ case is_version_supported(Version, Versions) of
+ true ->
+ case Version of
+ "" ->
+ rabbit_log:warning(
+ "~p plugin version is not defined."
+ " Requirement ~p for plugin ~p is ignored",
+ [Versions, PluginName]);
+ _ -> ok
+ end,
+ Acc;
+ false ->
+ [{{dependency_version_mismatch, Version, Versions}, Name} | Acc]
+ end
+ end
+ end,
+ [],
+ RequiredVersions),
+ case Problems of
+ [] -> ok;
+ _ -> {error, Problems}
+ end.
+
+is_version_supported("", _) -> true;
+is_version_supported("0.0.0", _) -> true;
+is_version_supported(_Version, []) -> true;
+is_version_supported(VersionFull, ExpectedVersions) ->
+ %% Pre-release version should be supported in plugins,
+ %% therefore preview part should be removed
+ Version = remove_version_preview_part(VersionFull),
+ case lists:any(fun(ExpectedVersion) ->
+ rabbit_misc:strict_version_minor_equivalent(ExpectedVersion,
+ Version)
+ andalso
+ rabbit_misc:version_compare(ExpectedVersion, Version, lte)
+ end,
+ ExpectedVersions) of
+ true -> true;
+ false -> false
+ end.
+
+remove_version_preview_part(Version) ->
+ {Ver, _Preview} = rabbit_semver:parse(Version),
+ iolist_to_binary(rabbit_semver:format({Ver, {[], []}})).
+
+clean_plugins(Plugins) ->
+ ExpandDir = plugins_expand_dir(),
+ [clean_plugin(Plugin, ExpandDir) || Plugin <- Plugins].
+
+clean_plugin(Plugin, ExpandDir) ->
+ {ok, Mods} = application:get_key(Plugin, modules),
+ application:unload(Plugin),
+ [begin
+ code:soft_purge(Mod),
+ code:delete(Mod),
+ false = code:is_loaded(Mod)
+ end || Mod <- Mods],
+ delete_recursively(rabbit_misc:format("~s/~s", [ExpandDir, Plugin])).
+
+prepare_dir_plugin(PluginAppDescPath) ->
+ PluginEbinDir = filename:dirname(PluginAppDescPath),
+ Plugin = filename:basename(PluginAppDescPath, ".app"),
+ code:add_patha(PluginEbinDir),
+ case filelib:wildcard(PluginEbinDir++ "/*.beam") of
+ [] ->
+ ok;
+ [BeamPath | _] ->
+ Module = list_to_atom(filename:basename(BeamPath, ".beam")),
+ case code:ensure_loaded(Module) of
+ {module, _} ->
+ ok;
+ {error, badfile} ->
+ rabbit_log:error("Failed to enable plugin \"~s\": "
+ "it may have been built with an "
+ "incompatible (more recent?) "
+ "version of Erlang~n", [Plugin]),
+ throw({plugin_built_with_incompatible_erlang, Plugin});
+ Error ->
+ throw({plugin_module_unloadable, Plugin, Error})
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+
+delete_recursively(Fn) ->
+ case rabbit_file:recursive_delete([Fn]) of
+ ok -> ok;
+ {error, {Path, E}} -> {error, {cannot_delete, Path, E}}
+ end.
+
+find_unzipped_app_file(ExpandDir, Files) ->
+ StripComponents = length(filename:split(ExpandDir)),
+ [ X || X <- Files,
+ [_AppName, "ebin", MaybeAppFile] <-
+ [lists:nthtail(StripComponents, filename:split(X))],
+ lists:suffix(".app", MaybeAppFile)
+ ].
+
+prepare_plugin(#plugin{type = ez, name = Name, location = Location}, ExpandDir) ->
+ case zip:unzip(Location, [{cwd, ExpandDir}]) of
+ {ok, Files} ->
+ case find_unzipped_app_file(ExpandDir, Files) of
+ [PluginAppDescPath|_] ->
+ prepare_dir_plugin(PluginAppDescPath);
+ _ ->
+ rabbit_log:error("Plugin archive '~s' doesn't contain an .app file~n", [Location]),
+ throw({app_file_missing, Name, Location})
+ end;
+ {error, Reason} ->
+ rabbit_log:error("Could not unzip plugin archive '~s': ~p~n", [Location, Reason]),
+ throw({failed_to_unzip_plugin, Name, Location, Reason})
+ end;
+prepare_plugin(#plugin{type = dir, location = Location, name = Name},
+ _ExpandDir) ->
+ case filelib:wildcard(Location ++ "/ebin/*.app") of
+ [PluginAppDescPath|_] ->
+ prepare_dir_plugin(PluginAppDescPath);
+ _ ->
+ rabbit_log:error("Plugin directory '~s' doesn't contain an .app file~n", [Location]),
+ throw({app_file_missing, Name, Location})
+ end.
+
+plugin_info({ez, EZ}) ->
+ case read_app_file(EZ) of
+ {application, Name, Props} -> mkplugin(Name, Props, ez, EZ);
+ {error, Reason} -> {error, EZ, Reason}
+ end;
+plugin_info({app, App}) ->
+ case rabbit_file:read_term_file(App) of
+ {ok, [{application, Name, Props}]} ->
+ mkplugin(Name, Props, dir,
+ filename:absname(
+ filename:dirname(filename:dirname(App))));
+ {error, Reason} ->
+ {error, App, {invalid_app, Reason}}
+ end.
+
+mkplugin(Name, Props, Type, Location) ->
+ Version = proplists:get_value(vsn, Props, "0"),
+ Description = proplists:get_value(description, Props, ""),
+ Dependencies = proplists:get_value(applications, Props, []),
+ BrokerVersions = proplists:get_value(broker_version_requirements, Props, []),
+ DepsVersions = proplists:get_value(dependency_version_requirements, Props, []),
+ #plugin{name = Name, version = Version, description = Description,
+ dependencies = Dependencies, location = Location, type = Type,
+ broker_version_requirements = BrokerVersions,
+ dependency_version_requirements = DepsVersions}.
+
+read_app_file(EZ) ->
+ case zip:list_dir(EZ) of
+ {ok, [_|ZippedFiles]} ->
+ case find_app_files(ZippedFiles) of
+ [AppPath|_] ->
+ {ok, [{AppPath, AppFile}]} =
+ zip:extract(EZ, [{file_list, [AppPath]}, memory]),
+ parse_binary(AppFile);
+ [] ->
+ {error, no_app_file}
+ end;
+ {error, Reason} ->
+ {error, {invalid_ez, Reason}}
+ end.
+
+find_app_files(ZippedFiles) ->
+ {ok, RE} = re:compile("^.*/ebin/.*.app$"),
+ [Path || {zip_file, Path, _, _, _, _} <- ZippedFiles,
+ re:run(Path, RE, [{capture, none}]) =:= match].
+
+parse_binary(Bin) ->
+ try
+ {ok, Ts, _} = erl_scan:string(binary_to_list(Bin)),
+ {ok, Term} = erl_parse:parse_term(Ts),
+ Term
+ catch
+ Err -> {error, {invalid_app, Err}}
+ end.
+
+plugin_names(Plugins) ->
+ [Name || #plugin{name = Name} <- Plugins].
+
+lookup_plugins(Names, AllPlugins) ->
+ %% Preserve order of Names
+ lists:map(
+ fun(Name) ->
+ lists:keyfind(Name, #plugin.name, AllPlugins)
+ end,
+ Names).
+
+%% Split PATH-like value into its components.
+split_path(PathString) ->
+ Delimiters = case os:type() of
+ {unix, _} -> ":";
+ {win32, _} -> ";"
+ end,
+ string:tokens(PathString, Delimiters).
+
+%% Search for files using glob in a given dir. Returns full filenames of those files.
+full_path_wildcard(Glob, Dir) ->
+ [filename:join([Dir, File]) || File <- filelib:wildcard(Glob, Dir)].
+
+%% Returns list off all .ez files in a given set of directories
+list_ezs([]) ->
+ [];
+list_ezs([Dir|Rest]) ->
+ [{ez, EZ} || EZ <- full_path_wildcard("*.ez", Dir)] ++ list_ezs(Rest).
+
+%% Returns list of all files that look like OTP applications in a
+%% given set of directories.
+list_free_apps([]) ->
+ [];
+list_free_apps([Dir|Rest]) ->
+ [{app, App} || App <- full_path_wildcard("*/ebin/*.app", Dir)]
+ ++ list_free_apps(Rest).
+
+compare_by_name_and_version(#plugin{name = Name, version = VersionA},
+ #plugin{name = Name, version = VersionB}) ->
+ rabbit_semver:lte(VersionA, VersionB);
+compare_by_name_and_version(#plugin{name = NameA},
+ #plugin{name = NameB}) ->
+ NameA =< NameB.
+
+-spec discover_plugins([Directory]) -> {[#plugin{}], [Problem]} when
+ Directory :: file:name(),
+ Problem :: {file:name(), term()}.
+discover_plugins(PluginsDirs) ->
+ EZs = list_ezs(PluginsDirs),
+ FreeApps = list_free_apps(PluginsDirs),
+ read_plugins_info(EZs ++ FreeApps, {[], []}).
+
+read_plugins_info([], Acc) ->
+ Acc;
+read_plugins_info([Path|Paths], {Plugins, Problems}) ->
+ case plugin_info(Path) of
+ #plugin{} = Plugin ->
+ read_plugins_info(Paths, {[Plugin|Plugins], Problems});
+ {error, Location, Reason} ->
+ read_plugins_info(Paths, {Plugins, [{Location, Reason}|Problems]})
+ end.
+
+remove_duplicate_plugins(Plugins) ->
+ %% Reverse order ensures that if there are several versions of the
+ %% same plugin, the most recent one comes first.
+ Sorted = lists:reverse(
+ lists:sort(fun compare_by_name_and_version/2, Plugins)),
+ remove_duplicate_plugins(Sorted, {[], []}).
+
+remove_duplicate_plugins([], Acc) ->
+ Acc;
+remove_duplicate_plugins([Best = #plugin{name = Name}, Offender = #plugin{name = Name} | Rest],
+ {Plugins0, Problems0}) ->
+ Problems1 = [{Offender#plugin.location, duplicate_plugin}|Problems0],
+ remove_duplicate_plugins([Best|Rest], {Plugins0, Problems1});
+remove_duplicate_plugins([Plugin|Rest], {Plugins0, Problems0}) ->
+ Plugins1 = [Plugin|Plugins0],
+ remove_duplicate_plugins(Rest, {Plugins1, Problems0}).
+
+maybe_keep_required_deps(true, Plugins) ->
+ Plugins;
+maybe_keep_required_deps(false, Plugins) ->
+ RabbitDeps = list_all_deps([rabbit]),
+ lists:filter(fun
+ (#plugin{name = Name}) ->
+ not lists:member(Name, RabbitDeps);
+ (Name) when is_atom(Name) ->
+ not lists:member(Name, RabbitDeps)
+ end,
+ Plugins).
+
+list_all_deps(Applications) ->
+ list_all_deps(Applications, []).
+
+list_all_deps([Application | Applications], Deps) ->
+ %% We load the application to be sure we can get the "applications" key.
+ %% This is required for rabbitmq-plugins for instance.
+ application:load(Application),
+ NewDeps = [Application | Deps],
+ case application:get_key(Application, applications) of
+ {ok, ApplicationDeps} ->
+ RemainingApplications0 = ApplicationDeps ++ Applications,
+ RemainingApplications = RemainingApplications0 -- NewDeps,
+ list_all_deps(RemainingApplications, NewDeps);
+ undefined ->
+ list_all_deps(Applications, NewDeps)
+ end;
+list_all_deps([], Deps) ->
+ Deps.
+
+remove_plugins(Plugins) ->
+ %% We want to filter out all Erlang applications in the plugins
+ %% directories which are not actual RabbitMQ plugin.
+ %%
+ %% A RabbitMQ plugin must depend on `rabbit`. We also want to keep
+ %% all applications they depend on, except Erlang/OTP applications.
+ %% In the end, we will skip:
+ %% * Erlang/OTP applications
+ %% * All applications which do not depend on `rabbit` and which
+ %% are not direct or indirect dependencies of plugins.
+ ActualPlugins = [Plugin
+ || #plugin{dependencies = Deps} = Plugin <- Plugins,
+ lists:member(rabbit, Deps)],
+ %% As said above, we want to keep all non-plugins which are
+ %% dependencies of plugins.
+ PluginDeps = lists:usort(
+ lists:flatten(
+ [resolve_deps(Plugins, Plugin)
+ || Plugin <- ActualPlugins])),
+ lists:filter(
+ fun(#plugin{name = Name} = Plugin) ->
+ IsOTPApp = is_plugin_provided_by_otp(Plugin),
+ IsAPlugin =
+ lists:member(Plugin, ActualPlugins) orelse
+ lists:member(Name, PluginDeps),
+ if
+ IsOTPApp ->
+ rabbit_log:debug(
+ "Plugins discovery: "
+ "ignoring ~s, Erlang/OTP application",
+ [Name]);
+ not IsAPlugin ->
+ rabbit_log:debug(
+ "Plugins discovery: "
+ "ignoring ~s, not a RabbitMQ plugin",
+ [Name]);
+ true ->
+ ok
+ end,
+ not (IsOTPApp orelse not IsAPlugin)
+ end, Plugins).
+
+resolve_deps(Plugins, #plugin{dependencies = Deps}) ->
+ IndirectDeps = [case lists:keyfind(Dep, #plugin.name, Plugins) of
+ false -> [];
+ DepPlugin -> resolve_deps(Plugins, DepPlugin)
+ end
+ || Dep <- Deps],
+ Deps ++ IndirectDeps.
+
+maybe_report_plugin_loading_problems([]) ->
+ ok;
+maybe_report_plugin_loading_problems(Problems) ->
+ io:format(standard_error,
+ "Problem reading some plugins: ~p~n",
+ [Problems]).
diff --git a/deps/rabbit/src/rabbit_policies.erl b/deps/rabbit/src/rabbit_policies.erl
new file mode 100644
index 0000000000..54e4d2c03e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_policies.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policies).
+
+%% Provides built-in policy parameter
+%% validation functions.
+
+-behaviour(rabbit_policy_validator).
+-behaviour(rabbit_policy_merge_strategy).
+
+-include("rabbit.hrl").
+
+-export([register/0, validate_policy/1, merge_policy_value/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "internal policies"},
+ {mfa, {rabbit_policies, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ %% Note: there are more validators registered from other modules,
+ %% such as rabbit_mirror_queue_misc
+ [rabbit_registry:register(Class, Name, ?MODULE) ||
+ {Class, Name} <- [{policy_validator, <<"alternate-exchange">>},
+ {policy_validator, <<"dead-letter-exchange">>},
+ {policy_validator, <<"dead-letter-routing-key">>},
+ {policy_validator, <<"message-ttl">>},
+ {policy_validator, <<"expires">>},
+ {policy_validator, <<"max-length">>},
+ {policy_validator, <<"max-length-bytes">>},
+ {policy_validator, <<"max-in-memory-length">>},
+ {policy_validator, <<"max-in-memory-bytes">>},
+ {policy_validator, <<"queue-mode">>},
+ {policy_validator, <<"overflow">>},
+ {policy_validator, <<"delivery-limit">>},
+ {policy_validator, <<"max-age">>},
+ {policy_validator, <<"max-segment-size">>},
+ {policy_validator, <<"queue-leader-locator">>},
+ {policy_validator, <<"initial-cluster-size">>},
+ {operator_policy_validator, <<"expires">>},
+ {operator_policy_validator, <<"message-ttl">>},
+ {operator_policy_validator, <<"max-length">>},
+ {operator_policy_validator, <<"max-length-bytes">>},
+ {operator_policy_validator, <<"max-in-memory-length">>},
+ {operator_policy_validator, <<"max-in-memory-bytes">>},
+ {operator_policy_validator, <<"delivery-limit">>},
+ {policy_merge_strategy, <<"expires">>},
+ {policy_merge_strategy, <<"message-ttl">>},
+ {policy_merge_strategy, <<"max-length">>},
+ {policy_merge_strategy, <<"max-length-bytes">>},
+ {policy_merge_strategy, <<"max-in-memory-length">>},
+ {policy_merge_strategy, <<"max-in-memory-bytes">>},
+ {policy_merge_strategy, <<"delivery-limit">>}]],
+ ok.
+
+-spec validate_policy([{binary(), term()}]) -> rabbit_policy_validator:validate_results().
+
+validate_policy(Terms) ->
+ lists:foldl(fun ({Key, Value}, ok) -> validate_policy0(Key, Value);
+ (_, Error) -> Error
+ end, ok, Terms).
+
+validate_policy0(<<"alternate-exchange">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"alternate-exchange">>, Value) ->
+ {error, "~p is not a valid alternate exchange name", [Value]};
+
+validate_policy0(<<"dead-letter-exchange">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"dead-letter-exchange">>, Value) ->
+ {error, "~p is not a valid dead letter exchange name", [Value]};
+
+validate_policy0(<<"dead-letter-routing-key">>, Value)
+ when is_binary(Value) ->
+ ok;
+validate_policy0(<<"dead-letter-routing-key">>, Value) ->
+ {error, "~p is not a valid dead letter routing key", [Value]};
+
+validate_policy0(<<"message-ttl">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"message-ttl">>, Value) ->
+ {error, "~p is not a valid message TTL", [Value]};
+
+validate_policy0(<<"expires">>, Value)
+ when is_integer(Value), Value >= 1 ->
+ ok;
+validate_policy0(<<"expires">>, Value) ->
+ {error, "~p is not a valid queue expiry", [Value]};
+
+validate_policy0(<<"max-length">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-length">>, Value) ->
+ {error, "~p is not a valid maximum length", [Value]};
+
+validate_policy0(<<"max-length-bytes">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-length-bytes">>, Value) ->
+ {error, "~p is not a valid maximum length in bytes", [Value]};
+
+validate_policy0(<<"max-in-memory-length">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-in-memory-length">>, Value) ->
+ {error, "~p is not a valid maximum memory in bytes", [Value]};
+
+validate_policy0(<<"max-in-memory-bytes">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-in-memory-bytes">>, Value) ->
+ {error, "~p is not a valid maximum memory in bytes", [Value]};
+
+validate_policy0(<<"queue-mode">>, <<"default">>) ->
+ ok;
+validate_policy0(<<"queue-mode">>, <<"lazy">>) ->
+ ok;
+validate_policy0(<<"queue-mode">>, Value) ->
+ {error, "~p is not a valid queue-mode value", [Value]};
+validate_policy0(<<"overflow">>, <<"drop-head">>) ->
+ ok;
+validate_policy0(<<"overflow">>, <<"reject-publish">>) ->
+ ok;
+validate_policy0(<<"overflow">>, <<"reject-publish-dlx">>) ->
+ ok;
+validate_policy0(<<"overflow">>, Value) ->
+ {error, "~p is not a valid overflow value", [Value]};
+
+validate_policy0(<<"delivery-limit">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"delivery-limit">>, Value) ->
+ {error, "~p is not a valid delivery limit", [Value]};
+
+validate_policy0(<<"max-age">>, Value) ->
+ case rabbit_amqqueue:check_max_age(Value) of
+ {error, _} ->
+ {error, "~p is not a valid max age", [Value]};
+ _ ->
+ ok
+ end;
+
+validate_policy0(<<"queue-leader-locator">>, <<"client-local">>) ->
+ ok;
+validate_policy0(<<"queue-leader-locator">>, <<"random">>) ->
+ ok;
+validate_policy0(<<"queue-leader-locator">>, <<"least-leaders">>) ->
+ ok;
+validate_policy0(<<"queue-leader-locator">>, Value) ->
+ {error, "~p is not a valid queue leader locator value", [Value]};
+
+validate_policy0(<<"initial-cluster-size">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"initial-cluster-size">>, Value) ->
+ {error, "~p is not a valid cluster size", [Value]};
+
+validate_policy0(<<"max-segment-size">>, Value)
+ when is_integer(Value), Value >= 0 ->
+ ok;
+validate_policy0(<<"max-segment-size">>, Value) ->
+ {error, "~p is not a valid segment size", [Value]}.
+
+merge_policy_value(<<"message-ttl">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-length">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-length-bytes">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"expires">>, Val, OpVal) -> min(Val, OpVal);
+merge_policy_value(<<"delivery-limit">>, Val, OpVal) -> min(Val, OpVal).
diff --git a/deps/rabbit/src/rabbit_policy.erl b/deps/rabbit/src/rabbit_policy.erl
new file mode 100644
index 0000000000..44807de97d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_policy.erl
@@ -0,0 +1,557 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policy).
+
+%% Policies is a way to apply optional arguments ("x-args")
+%% to exchanges and queues in bulk, using name matching.
+%%
+%% Only one policy can apply to a given queue or exchange
+%% at a time. Priorities help determine what policy should
+%% take precedence.
+%%
+%% Policies build on runtime parameters. Policy-driven parameters
+%% are well known and therefore validated.
+%%
+%% See also:
+%%
+%% * rabbit_runtime_parameters
+%% * rabbit_policies
+%% * rabbit_registry
+
+%% TODO specs
+
+-behaviour(rabbit_runtime_parameter).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-export([register/0]).
+-export([invalidate/0, recover/0]).
+-export([name/1, name_op/1, effective_definition/1, merge_operator_definitions/2, get/2, get_arg/3, set/1]).
+-export([validate/5, notify/5, notify_clear/4]).
+-export([parse_set/7, set/7, delete/3, lookup/2, list/0, list/1,
+ list_formatted/1, list_formatted/3, info_keys/0]).
+-export([parse_set_op/7, set_op/7, delete_op/3, lookup_op/2, list_op/0, list_op/1,
+ list_formatted_op/1, list_formatted_op/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "policy parameters"},
+ {mfa, {rabbit_policy, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"policy">>, ?MODULE),
+ rabbit_registry:register(runtime_parameter, <<"operator_policy">>, ?MODULE).
+
+name(Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ name0(Policy);
+name(#exchange{policy = Policy}) -> name0(Policy).
+
+name_op(Q) when ?is_amqqueue(Q) ->
+ OpPolicy = amqqueue:get_operator_policy(Q),
+ name0(OpPolicy);
+name_op(#exchange{operator_policy = Policy}) -> name0(Policy).
+
+name0(undefined) -> none;
+name0(Policy) -> pget(name, Policy).
+
+effective_definition(Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ OpPolicy = amqqueue:get_operator_policy(Q),
+ merge_operator_definitions(Policy, OpPolicy);
+effective_definition(#exchange{policy = Policy, operator_policy = OpPolicy}) ->
+ merge_operator_definitions(Policy, OpPolicy).
+
+merge_operator_definitions(undefined, undefined) -> undefined;
+merge_operator_definitions(Policy, undefined) -> pget(definition, Policy);
+merge_operator_definitions(undefined, OpPolicy) -> pget(definition, OpPolicy);
+merge_operator_definitions(Policy, OpPolicy) ->
+ OpDefinition = rabbit_data_coercion:to_map(pget(definition, OpPolicy, [])),
+ Definition = rabbit_data_coercion:to_map(pget(definition, Policy, [])),
+ Keys = maps:keys(Definition),
+ OpKeys = maps:keys(OpDefinition),
+ lists:map(fun(Key) ->
+ case {maps:get(Key, Definition, undefined), maps:get(Key, OpDefinition, undefined)} of
+ {Val, undefined} -> {Key, Val};
+ {undefined, OpVal} -> {Key, OpVal};
+ {Val, OpVal} -> {Key, merge_policy_value(Key, Val, OpVal)}
+ end
+ end,
+ lists:umerge(Keys, OpKeys)).
+
+set(Q0) when ?is_amqqueue(Q0) ->
+ Name = amqqueue:get_name(Q0),
+ Policy = match(Name),
+ OpPolicy = match_op(Name),
+ Q1 = amqqueue:set_policy(Q0, Policy),
+ Q2 = amqqueue:set_operator_policy(Q1, OpPolicy),
+ Q2;
+set(X = #exchange{name = Name}) ->
+ X#exchange{policy = match(Name), operator_policy = match_op(Name)}.
+
+match(Name = #resource{virtual_host = VHost}) ->
+ match(Name, list(VHost)).
+
+match_op(Name = #resource{virtual_host = VHost}) ->
+ match(Name, list_op(VHost)).
+
+get(Name, Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ OpPolicy = amqqueue:get_operator_policy(Q),
+ get0(Name, Policy, OpPolicy);
+get(Name, #exchange{policy = Policy, operator_policy = OpPolicy}) ->
+ get0(Name, Policy, OpPolicy);
+
+%% Caution - SLOW.
+get(Name, EntityName = #resource{virtual_host = VHost}) ->
+ get0(Name,
+ match(EntityName, list(VHost)),
+ match(EntityName, list_op(VHost))).
+
+get0(_Name, undefined, undefined) -> undefined;
+get0(Name, undefined, OpPolicy) -> pget(Name, pget(definition, OpPolicy, []));
+get0(Name, Policy, undefined) -> pget(Name, pget(definition, Policy, []));
+get0(Name, Policy, OpPolicy) ->
+ OpDefinition = pget(definition, OpPolicy, []),
+ Definition = pget(definition, Policy, []),
+ case {pget(Name, Definition), pget(Name, OpDefinition)} of
+ {undefined, undefined} -> undefined;
+ {Val, undefined} -> Val;
+ {undefined, Val} -> Val;
+ {Val, OpVal} -> merge_policy_value(Name, Val, OpVal)
+ end.
+
+merge_policy_value(Name, PolicyVal, OpVal) ->
+ case policy_merge_strategy(Name) of
+ {ok, Module} -> Module:merge_policy_value(Name, PolicyVal, OpVal);
+ {error, not_found} -> rabbit_policies:merge_policy_value(Name, PolicyVal, OpVal)
+ end.
+
+policy_merge_strategy(Name) ->
+ case rabbit_registry:binary_to_type(rabbit_data_coercion:to_binary(Name)) of
+ {error, not_found} ->
+ {error, not_found};
+ T ->
+ rabbit_registry:lookup_module(policy_merge_strategy, T)
+ end.
+
+%% Many heads for optimisation
+get_arg(_AName, _PName, #exchange{arguments = [], policy = undefined}) ->
+ undefined;
+get_arg(_AName, PName, X = #exchange{arguments = []}) ->
+ get(PName, X);
+get_arg(AName, PName, X = #exchange{arguments = Args}) ->
+ case rabbit_misc:table_lookup(Args, AName) of
+ undefined -> get(PName, X);
+ {_Type, Arg} -> Arg
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% Gets called during upgrades - therefore must not assume anything about the
+%% state of Mnesia
+invalidate() ->
+ rabbit_file:write_file(invalid_file(), <<"">>).
+
+recover() ->
+ case rabbit_file:is_file(invalid_file()) of
+ true -> recover0(),
+ rabbit_file:delete(invalid_file());
+ false -> ok
+ end.
+
+%% To get here we have to have just completed an Mnesia upgrade - i.e. we are
+%% the first node starting. So we can rewrite the whole database. Note that
+%% recovery has not yet happened; we must work with the rabbit_durable_<thing>
+%% variants.
+recover0() ->
+ Xs = mnesia:dirty_match_object(rabbit_durable_exchange, #exchange{_ = '_'}),
+ Qs = rabbit_amqqueue:list_with_possible_retry(
+ fun() ->
+ mnesia:dirty_match_object(
+ rabbit_durable_queue, amqqueue:pattern_match_all())
+ end),
+ Policies = list(),
+ OpPolicies = list_op(),
+ [rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(
+ rabbit_durable_exchange,
+ rabbit_exchange_decorator:set(
+ X#exchange{policy = match(Name, Policies),
+ operator_policy = match(Name, OpPolicies)}),
+ write)
+ end) || X = #exchange{name = Name} <- Xs],
+ [begin
+ QName = amqqueue:get_name(Q0),
+ Policy1 = match(QName, Policies),
+ Q1 = amqqueue:set_policy(Q0, Policy1),
+ OpPolicy1 = match(QName, OpPolicies),
+ Q2 = amqqueue:set_operator_policy(Q1, OpPolicy1),
+ Q3 = rabbit_queue_decorator:set(Q2),
+ ?try_mnesia_tx_or_upgrade_amqqueue_and_retry(
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(rabbit_durable_queue, Q3, write)
+ end),
+ begin
+ Q4 = amqqueue:upgrade(Q3),
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ mnesia:write(rabbit_durable_queue, Q4, write)
+ end)
+ end)
+ end || Q0 <- Qs],
+ ok.
+
+invalid_file() ->
+ filename:join(rabbit_mnesia:dir(), "policies_are_invalid").
+
+%%----------------------------------------------------------------------------
+
+parse_set_op(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ parse_set(<<"operator_policy">>, VHost, Name, Pattern, Definition, Priority,
+ ApplyTo, ActingUser).
+
+parse_set(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ parse_set(<<"policy">>, VHost, Name, Pattern, Definition, Priority, ApplyTo,
+ ActingUser).
+
+parse_set(Type, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ try rabbit_data_coercion:to_integer(Priority) of
+ Num -> parse_set0(Type, VHost, Name, Pattern, Definition, Num, ApplyTo,
+ ActingUser)
+ catch
+ error:badarg -> {error, "~p priority must be a number", [Priority]}
+ end.
+
+parse_set0(Type, VHost, Name, Pattern, Defn, Priority, ApplyTo, ActingUser) ->
+ case rabbit_json:try_decode(Defn) of
+ {ok, Term} ->
+ R = set0(Type, VHost, Name,
+ [{<<"pattern">>, Pattern},
+ {<<"definition">>, maps:to_list(Term)},
+ {<<"priority">>, Priority},
+ {<<"apply-to">>, ApplyTo}],
+ ActingUser),
+ rabbit_log:info("Successfully set policy '~s' matching ~s names in virtual host '~s' using pattern '~s'",
+ [Name, ApplyTo, VHost, Pattern]),
+ R;
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set_op(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ set(<<"operator_policy">>, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser).
+
+set(VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ set(<<"policy">>, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser).
+
+set(Type, VHost, Name, Pattern, Definition, Priority, ApplyTo, ActingUser) ->
+ PolicyProps = [{<<"pattern">>, Pattern},
+ {<<"definition">>, Definition},
+ {<<"priority">>, case Priority of
+ undefined -> 0;
+ _ -> Priority
+ end},
+ {<<"apply-to">>, case ApplyTo of
+ undefined -> <<"all">>;
+ _ -> ApplyTo
+ end}],
+ set0(Type, VHost, Name, PolicyProps, ActingUser).
+
+set0(Type, VHost, Name, Term, ActingUser) ->
+ rabbit_runtime_parameters:set_any(VHost, Type, Name, Term, ActingUser).
+
+delete_op(VHost, Name, ActingUser) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"operator_policy">>, Name, ActingUser).
+
+delete(VHost, Name, ActingUser) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"policy">>, Name, ActingUser).
+
+lookup_op(VHost, Name) ->
+ case rabbit_runtime_parameters:lookup(VHost, <<"operator_policy">>, Name) of
+ not_found -> not_found;
+ P -> p(P, fun ident/1)
+ end.
+
+lookup(VHost, Name) ->
+ case rabbit_runtime_parameters:lookup(VHost, <<"policy">>, Name) of
+ not_found -> not_found;
+ P -> p(P, fun ident/1)
+ end.
+
+list_op() ->
+ list_op('_').
+
+list_op(VHost) ->
+ list0_op(VHost, fun ident/1).
+
+list_formatted_op(VHost) ->
+ order_policies(list0_op(VHost, fun rabbit_json:encode/1)).
+
+list_formatted_op(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(AggregatorPid, Ref,
+ fun(P) -> P end, list_formatted_op(VHost)).
+
+list0_op(VHost, DefnFun) ->
+ [p(P, DefnFun)
+ || P <- rabbit_runtime_parameters:list(VHost, <<"operator_policy">>)].
+
+
+list() ->
+ list('_').
+
+list(VHost) ->
+ list0(VHost, fun ident/1).
+
+list_formatted(VHost) ->
+ order_policies(list0(VHost, fun rabbit_json:encode/1)).
+
+list_formatted(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(AggregatorPid, Ref,
+ fun(P) -> P end, list_formatted(VHost)).
+
+list0(VHost, DefnFun) ->
+ [p(P, DefnFun) || P <- rabbit_runtime_parameters:list(VHost, <<"policy">>)].
+
+order_policies(PropList) ->
+ lists:sort(fun (A, B) -> not sort_pred(A, B) end, PropList).
+
+p(Parameter, DefnFun) ->
+ Value = pget(value, Parameter),
+ [{vhost, pget(vhost, Parameter)},
+ {name, pget(name, Parameter)},
+ {pattern, pget(<<"pattern">>, Value)},
+ {'apply-to', pget(<<"apply-to">>, Value)},
+ {definition, DefnFun(pget(<<"definition">>, Value))},
+ {priority, pget(<<"priority">>, Value)}].
+
+ident(X) -> X.
+
+info_keys() -> [vhost, name, 'apply-to', pattern, definition, priority].
+
+%%----------------------------------------------------------------------------
+
+validate(_VHost, <<"policy">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, policy_validation(), Term);
+validate(_VHost, <<"operator_policy">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, operator_policy_validation(), Term).
+
+notify(VHost, <<"policy">>, Name, Term, ActingUser) ->
+ rabbit_event:notify(policy_set, [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser} | Term]),
+ update_policies(VHost);
+notify(VHost, <<"operator_policy">>, Name, Term, ActingUser) ->
+ rabbit_event:notify(policy_set, [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser} | Term]),
+ update_policies(VHost).
+
+notify_clear(VHost, <<"policy">>, Name, ActingUser) ->
+ rabbit_event:notify(policy_cleared, [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser}]),
+ update_policies(VHost);
+notify_clear(VHost, <<"operator_policy">>, Name, ActingUser) ->
+ rabbit_event:notify(operator_policy_cleared,
+ [{name, Name}, {vhost, VHost},
+ {user_who_performed_action, ActingUser}]),
+ update_policies(VHost).
+
+%%----------------------------------------------------------------------------
+
+%% [1] We need to prevent this from becoming O(n^2) in a similar
+%% manner to rabbit_binding:remove_for_{source,destination}. So see
+%% the comment in rabbit_binding:lock_route_tables/0 for more rationale.
+%% [2] We could be here in a post-tx fun after the vhost has been
+%% deleted; in which case it's fine to do nothing.
+update_policies(VHost) ->
+ Tabs = [rabbit_queue, rabbit_durable_queue,
+ rabbit_exchange, rabbit_durable_exchange],
+ {Xs, Qs} = rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [mnesia:lock({table, T}, write) || T <- Tabs], %% [1]
+ case catch {list(VHost), list_op(VHost)} of
+ {'EXIT', {throw, {error, {no_such_vhost, _}}}} ->
+ {[], []}; %% [2]
+ {'EXIT', Exit} ->
+ exit(Exit);
+ {Policies, OpPolicies} ->
+ {[update_exchange(X, Policies, OpPolicies) ||
+ X <- rabbit_exchange:list(VHost)],
+ [update_queue(Q, Policies, OpPolicies) ||
+ Q <- rabbit_amqqueue:list(VHost)]}
+ end
+ end),
+ [catch notify(X) || X <- Xs],
+ [catch notify(Q) || Q <- Qs],
+ ok.
+
+update_exchange(X = #exchange{name = XName,
+ policy = OldPolicy,
+ operator_policy = OldOpPolicy},
+ Policies, OpPolicies) ->
+ case {match(XName, Policies), match(XName, OpPolicies)} of
+ {OldPolicy, OldOpPolicy} -> no_change;
+ {NewPolicy, NewOpPolicy} ->
+ NewExchange = rabbit_exchange:update(
+ XName,
+ fun(X0) ->
+ rabbit_exchange_decorator:set(
+ X0 #exchange{policy = NewPolicy,
+ operator_policy = NewOpPolicy})
+ end),
+ case NewExchange of
+ #exchange{} = X1 -> {X, X1};
+ not_found -> {X, X }
+ end
+ end.
+
+update_queue(Q0, Policies, OpPolicies) when ?is_amqqueue(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ OldPolicy = amqqueue:get_policy(Q0),
+ OldOpPolicy = amqqueue:get_operator_policy(Q0),
+ case {match(QName, Policies), match(QName, OpPolicies)} of
+ {OldPolicy, OldOpPolicy} -> no_change;
+ {NewPolicy, NewOpPolicy} ->
+ F = fun (QFun0) ->
+ QFun1 = amqqueue:set_policy(QFun0, NewPolicy),
+ QFun2 = amqqueue:set_operator_policy(QFun1, NewOpPolicy),
+ NewPolicyVersion = amqqueue:get_policy_version(QFun2) + 1,
+ QFun3 = amqqueue:set_policy_version(QFun2, NewPolicyVersion),
+ rabbit_queue_decorator:set(QFun3)
+ end,
+ NewQueue = rabbit_amqqueue:update(QName, F),
+ case NewQueue of
+ Q1 when ?is_amqqueue(Q1) ->
+ {Q0, Q1};
+ not_found ->
+ {Q0, Q0}
+ end
+ end.
+
+notify(no_change)->
+ ok;
+notify({X1 = #exchange{}, X2 = #exchange{}}) ->
+ rabbit_exchange:policy_changed(X1, X2);
+notify({Q1, Q2}) when ?is_amqqueue(Q1), ?is_amqqueue(Q2) ->
+ rabbit_amqqueue:policy_changed(Q1, Q2).
+
+match(Name, Policies) ->
+ case match_all(Name, Policies) of
+ [] -> undefined;
+ [Policy | _] -> Policy
+ end.
+
+match_all(Name, Policies) ->
+ lists:sort(fun sort_pred/2, [P || P <- Policies, matches(Name, P)]).
+
+matches(#resource{name = Name, kind = Kind, virtual_host = VHost} = Resource, Policy) ->
+ matches_type(Kind, pget('apply-to', Policy)) andalso
+ is_applicable(Resource, pget(definition, Policy)) andalso
+ match =:= re:run(Name, pget(pattern, Policy), [{capture, none}]) andalso
+ VHost =:= pget(vhost, Policy).
+
+matches_type(exchange, <<"exchanges">>) -> true;
+matches_type(queue, <<"queues">>) -> true;
+matches_type(exchange, <<"all">>) -> true;
+matches_type(queue, <<"all">>) -> true;
+matches_type(_, _) -> false.
+
+sort_pred(A, B) -> pget(priority, A) >= pget(priority, B).
+
+is_applicable(#resource{kind = queue} = Resource, Policy) ->
+ rabbit_amqqueue:is_policy_applicable(Resource, to_list(Policy));
+is_applicable(_, _) ->
+ true.
+
+to_list(L) when is_list(L) ->
+ L;
+to_list(M) when is_map(M) ->
+ maps:to_list(M).
+
+%%----------------------------------------------------------------------------
+
+operator_policy_validation() ->
+ [{<<"priority">>, fun rabbit_parameter_validation:number/2, mandatory},
+ {<<"pattern">>, fun rabbit_parameter_validation:regex/2, mandatory},
+ {<<"apply-to">>, fun apply_to_validation/2, optional},
+ {<<"definition">>, fun validation_op/2, mandatory}].
+
+policy_validation() ->
+ [{<<"priority">>, fun rabbit_parameter_validation:number/2, mandatory},
+ {<<"pattern">>, fun rabbit_parameter_validation:regex/2, mandatory},
+ {<<"apply-to">>, fun apply_to_validation/2, optional},
+ {<<"definition">>, fun validation/2, mandatory}].
+
+validation_op(Name, Terms) ->
+ validation(Name, Terms, operator_policy_validator).
+
+validation(Name, Terms) ->
+ validation(Name, Terms, policy_validator).
+
+validation(_Name, [], _Validator) ->
+ {error, "no policy provided", []};
+validation(Name, Terms0, Validator) when is_map(Terms0) ->
+ Terms = maps:to_list(Terms0),
+ validation(Name, Terms, Validator);
+validation(_Name, Terms, Validator) when is_list(Terms) ->
+ {Keys, Modules} = lists:unzip(
+ rabbit_registry:lookup_all(Validator)),
+ [] = dups(Keys), %% ASSERTION
+ Validators = lists:zipwith(fun (M, K) -> {M, a2b(K)} end, Modules, Keys),
+ case is_proplist(Terms) of
+ true -> {TermKeys, _} = lists:unzip(Terms),
+ case dups(TermKeys) of
+ [] -> validation0(Validators, Terms);
+ Dup -> {error, "~p duplicate keys not allowed", [Dup]}
+ end;
+ false -> {error, "definition must be a dictionary: ~p", [Terms]}
+ end;
+validation(Name, Term, Validator) ->
+ {error, "parse error while reading policy ~s: ~p. Validator: ~p.",
+ [Name, Term, Validator]}.
+
+validation0(Validators, Terms) ->
+ case lists:foldl(
+ fun (Mod, {ok, TermsLeft}) ->
+ ModKeys = proplists:get_all_values(Mod, Validators),
+ case [T || {Key, _} = T <- TermsLeft,
+ lists:member(Key, ModKeys)] of
+ [] -> {ok, TermsLeft};
+ Scope -> {Mod:validate_policy(Scope), TermsLeft -- Scope}
+ end;
+ (_, Acc) ->
+ Acc
+ end, {ok, Terms}, proplists:get_keys(Validators)) of
+ {ok, []} ->
+ ok;
+ {ok, Unvalidated} ->
+ {error, "~p are not recognised policy settings", [Unvalidated]};
+ {Error, _} ->
+ Error
+ end.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+dups(L) -> L -- lists:usort(L).
+
+is_proplist(L) -> length(L) =:= length([I || I = {_, _} <- L]).
+
+apply_to_validation(_Name, <<"all">>) -> ok;
+apply_to_validation(_Name, <<"exchanges">>) -> ok;
+apply_to_validation(_Name, <<"queues">>) -> ok;
+apply_to_validation(_Name, Term) ->
+ {error, "apply-to '~s' unrecognised; should be 'queues', 'exchanges' "
+ "or 'all'", [Term]}.
diff --git a/deps/rabbit/src/rabbit_policy_merge_strategy.erl b/deps/rabbit/src/rabbit_policy_merge_strategy.erl
new file mode 100644
index 0000000000..f2b79e5862
--- /dev/null
+++ b/deps/rabbit/src/rabbit_policy_merge_strategy.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policy_merge_strategy).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-callback merge_policy_value(binary(), Value, Value) ->
+ Value
+ when Value :: term().
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit/src/rabbit_prelaunch_cluster.erl b/deps/rabbit/src/rabbit_prelaunch_cluster.erl
new file mode 100644
index 0000000000..9d3cda99e3
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_cluster.erl
@@ -0,0 +1,22 @@
+-module(rabbit_prelaunch_cluster).
+
+-export([setup/1]).
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Clustering =="),
+ rabbit_log_prelaunch:debug("Preparing cluster status files"),
+ rabbit_node_monitor:prepare_cluster_status_files(),
+ case Context of
+ #{initial_pass := true} ->
+ rabbit_log_prelaunch:debug("Upgrading Mnesia schema"),
+ ok = rabbit_upgrade:maybe_upgrade_mnesia();
+ _ ->
+ ok
+ end,
+ %% It's important that the consistency check happens after
+ %% the upgrade, since if we are a secondary node the
+ %% primary node will have forgotten us
+ rabbit_log_prelaunch:debug("Checking cluster consistency"),
+ rabbit_mnesia:check_cluster_consistency(),
+ ok.
diff --git a/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl b/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl
new file mode 100644
index 0000000000..57fe32f8e6
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_enabled_plugins_file.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prelaunch_enabled_plugins_file).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([setup/1]).
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Enabled plugins file =="),
+ update_enabled_plugins_file(Context).
+
+%% -------------------------------------------------------------------
+%% `enabled_plugins` file content initialization.
+%% -------------------------------------------------------------------
+
+update_enabled_plugins_file(#{enabled_plugins := undefined}) ->
+ ok;
+update_enabled_plugins_file(#{enabled_plugins := all,
+ plugins_path := Path} = Context) ->
+ List = [P#plugin.name || P <- rabbit_plugins:list(Path)],
+ do_update_enabled_plugins_file(Context, List);
+update_enabled_plugins_file(#{enabled_plugins := List} = Context) ->
+ do_update_enabled_plugins_file(Context, List).
+
+do_update_enabled_plugins_file(#{enabled_plugins_file := File}, List) ->
+ SortedList = lists:usort(List),
+ case SortedList of
+ [] ->
+ rabbit_log_prelaunch:debug("Marking all plugins as disabled");
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Marking the following plugins as enabled:"),
+ [rabbit_log_prelaunch:debug(" - ~s", [P]) || P <- SortedList]
+ end,
+ Content = io_lib:format("~p.~n", [SortedList]),
+ case file:write_file(File, Content) of
+ ok ->
+ rabbit_log_prelaunch:debug("Wrote plugins file: ~ts", [File]),
+ ok;
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to update enabled plugins file \"~ts\" "
+ "from $RABBITMQ_ENABLED_PLUGINS: ~ts",
+ [File, file:format_error(Reason)]),
+ throw({error, failed_to_update_enabled_plugins_file})
+ end.
diff --git a/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl b/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl
new file mode 100644
index 0000000000..cd7b276f4c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_feature_flags.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prelaunch_feature_flags).
+
+-export([setup/1]).
+
+setup(#{feature_flags_file := FFFile}) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Feature flags =="),
+ case filelib:ensure_dir(FFFile) of
+ ok ->
+ rabbit_log_prelaunch:debug("Initializing feature flags registry"),
+ case rabbit_feature_flags:initialize_registry() of
+ ok ->
+ ok;
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to initialize feature flags registry: ~p",
+ [Reason]),
+ throw({error, failed_to_initialize_feature_flags_registry})
+ end;
+ {error, Reason} ->
+ rabbit_log_prelaunch:error(
+ "Failed to create feature flags file \"~ts\" directory: ~ts",
+ [FFFile, file:format_error(Reason)]),
+ throw({error, failed_to_create_feature_flags_file_directory})
+ end.
diff --git a/deps/rabbit/src/rabbit_prelaunch_logging.erl b/deps/rabbit/src/rabbit_prelaunch_logging.erl
new file mode 100644
index 0000000000..6e3f040ec5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prelaunch_logging.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prelaunch_logging).
+
+-export([setup/1]).
+
+setup(Context) ->
+ rabbit_log_prelaunch:debug(""),
+ rabbit_log_prelaunch:debug("== Logging =="),
+ ok = set_ERL_CRASH_DUMP_envvar(Context),
+ ok = configure_lager(Context).
+
+set_ERL_CRASH_DUMP_envvar(#{log_base_dir := LogBaseDir}) ->
+ case os:getenv("ERL_CRASH_DUMP") of
+ false ->
+ ErlCrashDump = filename:join(LogBaseDir, "erl_crash.dump"),
+ rabbit_log_prelaunch:debug(
+ "Setting $ERL_CRASH_DUMP environment variable to \"~ts\"",
+ [ErlCrashDump]),
+ os:putenv("ERL_CRASH_DUMP", ErlCrashDump),
+ ok;
+ ErlCrashDump ->
+ rabbit_log_prelaunch:debug(
+ "$ERL_CRASH_DUMP environment variable already set to \"~ts\"",
+ [ErlCrashDump]),
+ ok
+ end.
+
+configure_lager(#{log_base_dir := LogBaseDir,
+ main_log_file := MainLog,
+ upgrade_log_file := UpgradeLog} = Context) ->
+ {SaslErrorLogger,
+ MainLagerHandler,
+ UpgradeLagerHandler} = case MainLog of
+ "-" ->
+ %% Log to STDOUT.
+ rabbit_log_prelaunch:debug(
+ "Logging to stdout"),
+ {tty,
+ tty,
+ tty};
+ _ ->
+ rabbit_log_prelaunch:debug(
+ "Logging to:"),
+ [rabbit_log_prelaunch:debug(
+ " - ~ts", [Log])
+ || Log <- [MainLog, UpgradeLog]],
+ %% Log to file.
+ {false,
+ MainLog,
+ UpgradeLog}
+ end,
+
+ ok = application:set_env(lager, crash_log, "log/crash.log"),
+
+ Fun = fun({App, Var, Value}) ->
+ case application:get_env(App, Var) of
+ undefined -> ok = application:set_env(App, Var, Value);
+ _ -> ok
+ end
+ end,
+ Vars = [{sasl, sasl_error_logger, SaslErrorLogger},
+ {rabbit, lager_log_root, LogBaseDir},
+ {rabbit, lager_default_file, MainLagerHandler},
+ {rabbit, lager_upgrade_file, UpgradeLagerHandler}],
+ lists:foreach(Fun, Vars),
+
+ ok = rabbit_lager:start_logger(),
+
+ ok = rabbit_prelaunch_early_logging:setup_early_logging(Context, false).
diff --git a/deps/rabbit/src/rabbit_prequeue.erl b/deps/rabbit/src/rabbit_prequeue.erl
new file mode 100644
index 0000000000..b5af8927c7
--- /dev/null
+++ b/deps/rabbit/src/rabbit_prequeue.erl
@@ -0,0 +1,100 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prequeue).
+
+%% This is the initial gen_server that all queue processes start off
+%% as. It handles the decision as to whether we need to start a new
+%% mirror, a new master/unmirrored, or whether we are restarting (and
+%% if so, as what). Thus a crashing queue process can restart from here
+%% and always do the right thing.
+
+-export([start_link/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-behaviour(gen_server2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([start_mode/0]).
+
+-type start_mode() :: 'declare' | 'recovery' | 'slave'.
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(amqqueue:amqqueue(), start_mode(), pid())
+ -> rabbit_types:ok_pid_or_error().
+
+start_link(Q, StartMode, Marker) ->
+ gen_server2:start_link(?MODULE, {Q, StartMode, Marker}, []).
+
+%%----------------------------------------------------------------------------
+
+init({Q, StartMode, Marker}) ->
+ init(Q, case {is_process_alive(Marker), StartMode} of
+ {true, slave} -> slave;
+ {true, _} -> master;
+ {false, _} -> restart
+ end).
+
+init(Q, master) -> rabbit_amqqueue_process:init(Q);
+init(Q, slave) -> rabbit_mirror_queue_slave:init(Q);
+
+init(Q0, restart) when ?is_amqqueue(Q0) ->
+ QueueName = amqqueue:get_name(Q0),
+ {ok, Q1} = rabbit_amqqueue:lookup(QueueName),
+ QPid = amqqueue:get_pid(Q1),
+ SPids = amqqueue:get_slave_pids(Q1),
+ LocalOrMasterDown = node(QPid) =:= node()
+ orelse not rabbit_mnesia:on_running_node(QPid),
+ Slaves = [SPid || SPid <- SPids, rabbit_mnesia:is_process_alive(SPid)],
+ case rabbit_mnesia:is_process_alive(QPid) of
+ true -> false = LocalOrMasterDown, %% assertion
+ rabbit_mirror_queue_slave:go(self(), async),
+ rabbit_mirror_queue_slave:init(Q1); %% [1]
+ false -> case LocalOrMasterDown andalso Slaves =:= [] of
+ true -> crash_restart(Q1); %% [2]
+ false -> timer:sleep(25),
+ init(Q1, restart) %% [3]
+ end
+ end.
+%% [1] There is a master on another node. Regardless of whether we
+%% were originally a master or a mirror, we are now a new slave.
+%%
+%% [2] Nothing is alive. We are the last best hope. Try to restart as a master.
+%%
+%% [3] The current master is dead but either there are alive mirrors to
+%% take over or it's all happening on a different node anyway. This is
+%% not a stable situation. Sleep and wait for somebody else to make a
+%% move.
+
+crash_restart(Q0) when ?is_amqqueue(Q0) ->
+ QueueName = amqqueue:get_name(Q0),
+ rabbit_log:error("Restarting crashed ~s.~n", [rabbit_misc:rs(QueueName)]),
+ gen_server2:cast(self(), init),
+ Q1 = amqqueue:set_pid(Q0, self()),
+ rabbit_amqqueue_process:init(Q1).
+
+%%----------------------------------------------------------------------------
+
+%% This gen_server2 always hands over to some other module at the end
+%% of init/1.
+-spec handle_call(_, _, _) -> no_return().
+handle_call(_Msg, _From, _State) -> exit(unreachable).
+-spec handle_cast(_, _) -> no_return().
+handle_cast(_Msg, _State) -> exit(unreachable).
+-spec handle_info(_, _) -> no_return().
+handle_info(_Msg, _State) -> exit(unreachable).
+-spec terminate(_, _) -> no_return().
+terminate(_Reason, _State) -> exit(unreachable).
+-spec code_change(_, _, _) -> no_return().
+code_change(_OldVsn, _State, _Extra) -> exit(unreachable).
diff --git a/deps/rabbit/src/rabbit_priority_queue.erl b/deps/rabbit/src/rabbit_priority_queue.erl
new file mode 100644
index 0000000000..4b41b8dfbd
--- /dev/null
+++ b/deps/rabbit/src/rabbit_priority_queue.erl
@@ -0,0 +1,688 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2015-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_priority_queue).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("amqqueue.hrl").
+
+-behaviour(rabbit_backing_queue).
+
+%% enabled unconditionally. Disabling priority queuing after
+%% it has been enabled is dangerous.
+-rabbit_boot_step({?MODULE,
+ [{description, "enable priority queue"},
+ {mfa, {?MODULE, enable, []}},
+ {requires, pre_boot},
+ {enables, kernel_ready}]}).
+
+-export([enable/0]).
+
+-export([start/2, stop/1]).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
+ batch_publish/4, batch_publish_delivered/4,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, handle_info/2]).
+
+-record(state, {bq, bqss, max_priority}).
+-record(passthrough, {bq, bqs}).
+
+%% See 'note on suffixes' below
+-define(passthrough1(F), State#passthrough{bqs = BQ:F}).
+-define(passthrough2(F),
+ {Res, BQS1} = BQ:F, {Res, State#passthrough{bqs = BQS1}}).
+-define(passthrough3(F),
+ {Res1, Res2, BQS1} = BQ:F, {Res1, Res2, State#passthrough{bqs = BQS1}}).
+
+%% This module adds support for priority queues.
+%%
+%% Priority queues have one backing queue per priority. Backing queue functions
+%% then produce a list of results for each BQ and fold over them, sorting
+%% by priority.
+%%
+%%For queues that do not
+%% have priorities enabled, the functions in this module delegate to
+%% their "regular" backing queue module counterparts. See the `passthrough`
+%% record and passthrough{1,2,3} macros.
+%%
+%% Delivery to consumers happens by first "running" the queue with
+%% the highest priority until there are no more messages to deliver,
+%% then the next one, and so on. This offers good prioritisation
+%% but may result in lower priority messages not being delivered
+%% when there's a high ingress rate of messages with higher priority.
+
+enable() ->
+ {ok, RealBQ} = application:get_env(rabbit, backing_queue_module),
+ case RealBQ of
+ ?MODULE -> ok;
+ _ -> rabbit_log:info("Priority queues enabled, real BQ is ~s~n",
+ [RealBQ]),
+ application:set_env(
+ rabbitmq_priority_queue, backing_queue_module, RealBQ),
+ application:set_env(rabbit, backing_queue_module, ?MODULE)
+ end.
+
+%%----------------------------------------------------------------------------
+
+start(VHost, QNames) ->
+ BQ = bq(),
+ %% TODO this expand-collapse dance is a bit ridiculous but it's what
+ %% rabbit_amqqueue:recover/0 expects. We could probably simplify
+ %% this if we rejigged recovery a bit.
+ {DupNames, ExpNames} = expand_queues(QNames),
+ case BQ:start(VHost, ExpNames) of
+ {ok, ExpRecovery} ->
+ {ok, collapse_recovery(QNames, DupNames, ExpRecovery)};
+ Else ->
+ Else
+ end.
+
+stop(VHost) ->
+ BQ = bq(),
+ BQ:stop(VHost).
+
+%%----------------------------------------------------------------------------
+
+mutate_name(P, Q) when ?is_amqqueue(Q) ->
+ Res0 = #resource{name = QNameBin0} = amqqueue:get_name(Q),
+ QNameBin1 = mutate_name_bin(P, QNameBin0),
+ Res1 = Res0#resource{name = QNameBin1},
+ amqqueue:set_name(Q, Res1).
+
+mutate_name_bin(P, NameBin) ->
+ <<NameBin/binary, 0, P:8>>.
+
+expand_queues(QNames) ->
+ lists:unzip(
+ lists:append([expand_queue(QName) || QName <- QNames])).
+
+expand_queue(QName = #resource{name = QNameBin}) ->
+ {ok, Q} = rabbit_misc:dirty_read({rabbit_durable_queue, QName}),
+ case priorities(Q) of
+ none -> [{QName, QName}];
+ Ps -> [{QName, QName#resource{name = mutate_name_bin(P, QNameBin)}}
+ || P <- Ps]
+ end.
+
+collapse_recovery(QNames, DupNames, Recovery) ->
+ NameToTerms = lists:foldl(fun({Name, RecTerm}, Dict) ->
+ dict:append(Name, RecTerm, Dict)
+ end, dict:new(), lists:zip(DupNames, Recovery)),
+ [dict:fetch(Name, NameToTerms) || Name <- QNames].
+
+priorities(Q) when ?is_amqqueue(Q) ->
+ Args = amqqueue:get_arguments(Q),
+ Ints = [long, short, signedint, byte, unsignedbyte, unsignedshort, unsignedint],
+ case rabbit_misc:table_lookup(Args, <<"x-max-priority">>) of
+ {Type, RequestedMax} ->
+ case lists:member(Type, Ints) of
+ false -> none;
+ true ->
+ Max = min(RequestedMax, ?MAX_SUPPORTED_PRIORITY),
+ lists:reverse(lists:seq(0, Max))
+ end;
+ _ -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+init(Q, Recover, AsyncCallback) ->
+ BQ = bq(),
+ case priorities(Q) of
+ none -> RealRecover = case Recover of
+ [R] -> R; %% [0]
+ R -> R
+ end,
+ #passthrough{bq = BQ,
+ bqs = BQ:init(Q, RealRecover, AsyncCallback)};
+ Ps -> Init = fun (P, Term) ->
+ BQ:init(
+ mutate_name(P, Q), Term,
+ fun (M, F) -> AsyncCallback(M, {P, F}) end)
+ end,
+ BQSs = case have_recovery_terms(Recover) of
+ false -> [{P, Init(P, Recover)} || P <- Ps];
+ _ -> PsTerms = lists:zip(Ps, Recover),
+ [{P, Init(P, Term)} || {P, Term} <- PsTerms]
+ end,
+ #state{bq = BQ,
+ bqss = BQSs,
+ max_priority = hd(Ps)}
+ end.
+%% [0] collapse_recovery has the effect of making a list of recovery
+%% terms in priority order, even for non priority queues. It's easier
+%% to do that and "unwrap" in init/3 than to have collapse_recovery be
+%% aware of non-priority queues.
+
+have_recovery_terms(new) -> false;
+have_recovery_terms(non_clean_shutdown) -> false;
+have_recovery_terms(_) -> true.
+
+terminate(Reason, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:terminate(Reason, BQSN) end, State);
+terminate(Reason, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(terminate(Reason, BQS)).
+
+delete_and_terminate(Reason, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) ->
+ BQ:delete_and_terminate(Reason, BQSN)
+ end, State);
+delete_and_terminate(Reason, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(delete_and_terminate(Reason, BQS)).
+
+delete_crashed(Q) ->
+ BQ = bq(),
+ case priorities(Q) of
+ none -> BQ:delete_crashed(Q);
+ Ps -> [BQ:delete_crashed(mutate_name(P, Q)) || P <- Ps]
+ end.
+
+purge(State = #state{bq = BQ}) ->
+ fold_add2(fun (_P, BQSN) -> BQ:purge(BQSN) end, State);
+purge(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(purge(BQS)).
+
+purge_acks(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:purge_acks(BQSN) end, State);
+purge_acks(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(purge_acks(BQS)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State = #state{bq = BQ}) ->
+ pick1(fun (_P, BQSN) ->
+ BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQSN)
+ end, Msg, State);
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
+
+batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+ PubMap = partition_publish_batch(Publishes, MaxP),
+ lists:foldl(
+ fun ({Priority, Pubs}, St) ->
+ pick1(fun (_P, BQSN) ->
+ BQ:batch_publish(Pubs, ChPid, Flow, BQSN)
+ end, Priority, St)
+ end, State, maps:to_list(PubMap));
+batch_publish(Publishes, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(batch_publish(Publishes, ChPid, Flow, BQS)).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State = #state{bq = BQ}) ->
+ pick2(fun (P, BQSN) ->
+ {AckTag, BQSN1} = BQ:publish_delivered(
+ Msg, MsgProps, ChPid, Flow, BQSN),
+ {{P, AckTag}, BQSN1}
+ end, Msg, State);
+publish_delivered(Msg, MsgProps, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
+
+batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+ PubMap = partition_publish_delivered_batch(Publishes, MaxP),
+ {PrioritiesAndAcks, State1} =
+ lists:foldl(
+ fun ({Priority, Pubs}, {PriosAndAcks, St}) ->
+ {PriosAndAcks1, St1} =
+ pick2(fun (P, BQSN) ->
+ {AckTags, BQSN1} =
+ BQ:batch_publish_delivered(
+ Pubs, ChPid, Flow, BQSN),
+ {priority_on_acktags(P, AckTags), BQSN1}
+ end, Priority, St),
+ {[PriosAndAcks1 | PriosAndAcks], St1}
+ end, {[], State}, maps:to_list(PubMap)),
+ {lists:reverse(PrioritiesAndAcks), State1};
+batch_publish_delivered(Publishes, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(batch_publish_delivered(Publishes, ChPid, Flow, BQS)).
+
+%% TODO this is a hack. The BQ api does not give us enough information
+%% here - if we had the Msg we could look at its priority and forward
+%% to the appropriate sub-BQ. But we don't so we are stuck.
+%%
+%% But fortunately VQ ignores discard/4, so we can too, *assuming we
+%% are talking to VQ*. discard/4 is used by HA, but that's "above" us
+%% (if in use) so we don't break that either, just some hypothetical
+%% alternate BQ implementation.
+discard(_MsgId, _ChPid, _Flow, State = #state{}) ->
+ State;
+ %% We should have something a bit like this here:
+ %% pick1(fun (_P, BQSN) ->
+ %% BQ:discard(MsgId, ChPid, Flow, BQSN)
+ %% end, Msg, State);
+discard(MsgId, ChPid, Flow, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(discard(MsgId, ChPid, Flow, BQS)).
+
+drain_confirmed(State = #state{bq = BQ}) ->
+ fold_append2(fun (_P, BQSN) -> BQ:drain_confirmed(BQSN) end, State);
+drain_confirmed(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(drain_confirmed(BQS)).
+
+dropwhile(Pred, State = #state{bq = BQ}) ->
+ find2(fun (_P, BQSN) -> BQ:dropwhile(Pred, BQSN) end, undefined, State);
+dropwhile(Pred, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(dropwhile(Pred, BQS)).
+
+%% TODO this is a bit nasty. In the one place where fetchwhile/4 is
+%% actually used the accumulator is a list of acktags, which of course
+%% we need to mutate - so we do that although we are encoding an
+%% assumption here.
+fetchwhile(Pred, Fun, Acc, State = #state{bq = BQ}) ->
+ findfold3(
+ fun (P, BQSN, AccN) ->
+ {Res, AccN1, BQSN1} = BQ:fetchwhile(Pred, Fun, AccN, BQSN),
+ {Res, priority_on_acktags(P, AccN1), BQSN1}
+ end, Acc, undefined, State);
+fetchwhile(Pred, Fun, Acc, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough3(fetchwhile(Pred, Fun, Acc, BQS)).
+
+fetch(AckRequired, State = #state{bq = BQ}) ->
+ find2(
+ fun (P, BQSN) ->
+ case BQ:fetch(AckRequired, BQSN) of
+ {empty, BQSN1} -> {empty, BQSN1};
+ {{Msg, Del, ATag}, BQSN1} -> {{Msg, Del, {P, ATag}}, BQSN1}
+ end
+ end, empty, State);
+fetch(AckRequired, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(fetch(AckRequired, BQS)).
+
+drop(AckRequired, State = #state{bq = BQ}) ->
+ find2(fun (P, BQSN) ->
+ case BQ:drop(AckRequired, BQSN) of
+ {empty, BQSN1} -> {empty, BQSN1};
+ {{MsgId, AckTag}, BQSN1} -> {{MsgId, {P, AckTag}}, BQSN1}
+ end
+ end, empty, State);
+drop(AckRequired, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(drop(AckRequired, BQS)).
+
+ack(AckTags, State = #state{bq = BQ}) ->
+ fold_by_acktags2(fun (AckTagsN, BQSN) ->
+ BQ:ack(AckTagsN, BQSN)
+ end, AckTags, State);
+ack(AckTags, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(ack(AckTags, BQS)).
+
+requeue(AckTags, State = #state{bq = BQ}) ->
+ fold_by_acktags2(fun (AckTagsN, BQSN) ->
+ BQ:requeue(AckTagsN, BQSN)
+ end, AckTags, State);
+requeue(AckTags, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(requeue(AckTags, BQS)).
+
+%% Similar problem to fetchwhile/4
+ackfold(MsgFun, Acc, State = #state{bq = BQ}, AckTags) ->
+ AckTagsByPriority = partition_acktags(AckTags),
+ fold2(
+ fun (P, BQSN, AccN) ->
+ case maps:find(P, AckTagsByPriority) of
+ {ok, ATagsN} -> {AccN1, BQSN1} =
+ BQ:ackfold(MsgFun, AccN, BQSN, ATagsN),
+ {priority_on_acktags(P, AccN1), BQSN1};
+ error -> {AccN, BQSN}
+ end
+ end, Acc, State);
+ackfold(MsgFun, Acc, State = #passthrough{bq = BQ, bqs = BQS}, AckTags) ->
+ ?passthrough2(ackfold(MsgFun, Acc, BQS, AckTags)).
+
+fold(Fun, Acc, State = #state{bq = BQ}) ->
+ fold2(fun (_P, BQSN, AccN) -> BQ:fold(Fun, AccN, BQSN) end, Acc, State);
+fold(Fun, Acc, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(fold(Fun, Acc, BQS)).
+
+len(#state{bq = BQ, bqss = BQSs}) ->
+ add0(fun (_P, BQSN) -> BQ:len(BQSN) end, BQSs);
+len(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:len(BQS).
+
+is_empty(#state{bq = BQ, bqss = BQSs}) ->
+ all0(fun (_P, BQSN) -> BQ:is_empty(BQSN) end, BQSs);
+is_empty(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:is_empty(BQS).
+
+depth(#state{bq = BQ, bqss = BQSs}) ->
+ add0(fun (_P, BQSN) -> BQ:depth(BQSN) end, BQSs);
+depth(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:depth(BQS).
+
+set_ram_duration_target(DurationTarget, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) ->
+ BQ:set_ram_duration_target(DurationTarget, BQSN)
+ end, State);
+set_ram_duration_target(DurationTarget,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(set_ram_duration_target(DurationTarget, BQS)).
+
+ram_duration(State = #state{bq = BQ}) ->
+ fold_min2(fun (_P, BQSN) -> BQ:ram_duration(BQSN) end, State);
+ram_duration(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(ram_duration(BQS)).
+
+needs_timeout(#state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun (_P, _BQSN, timed) -> timed;
+ (_P, BQSN, idle) -> case BQ:needs_timeout(BQSN) of
+ timed -> timed;
+ _ -> idle
+ end;
+ (_P, BQSN, false) -> BQ:needs_timeout(BQSN)
+ end, false, BQSs);
+needs_timeout(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:needs_timeout(BQS).
+
+timeout(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:timeout(BQSN) end, State);
+timeout(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(timeout(BQS)).
+
+handle_pre_hibernate(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) ->
+ BQ:handle_pre_hibernate(BQSN)
+ end, State);
+handle_pre_hibernate(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(handle_pre_hibernate(BQS)).
+
+handle_info(Msg, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:handle_info(Msg, BQSN) end, State);
+handle_info(Msg, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(handle_info(Msg, BQS)).
+
+resume(State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:resume(BQSN) end, State);
+resume(State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(resume(BQS)).
+
+msg_rates(#state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun(_P, BQSN, {InN, OutN}) ->
+ {In, Out} = BQ:msg_rates(BQSN),
+ {InN + In, OutN + Out}
+ end, {0.0, 0.0}, BQSs);
+msg_rates(#passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:msg_rates(BQS).
+
+info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun (P, BQSN, Acc) ->
+ combine_status(P, BQ:info(backing_queue_status, BQSN), Acc)
+ end, nothing, BQSs);
+info(head_message_timestamp, #state{bq = BQ, bqss = BQSs}) ->
+ find_head_message_timestamp(BQ, BQSs, '');
+info(Item, #state{bq = BQ, bqss = BQSs}) ->
+ fold0(fun (_P, BQSN, Acc) ->
+ Acc + BQ:info(Item, BQSN)
+ end, 0, BQSs);
+info(Item, #passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:info(Item, BQS).
+
+invoke(Mod, {P, Fun}, State = #state{bq = BQ}) ->
+ pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
+invoke(Mod, Fun, State = #state{bq = BQ, max_priority = P}) ->
+ pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
+invoke(Mod, Fun, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(invoke(Mod, Fun, BQS)).
+
+is_duplicate(Msg, State = #state{bq = BQ}) ->
+ pick2(fun (_P, BQSN) -> BQ:is_duplicate(Msg, BQSN) end, Msg, State);
+is_duplicate(Msg, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(is_duplicate(Msg, BQS)).
+
+set_queue_mode(Mode, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:set_queue_mode(Mode, BQSN) end, State);
+set_queue_mode(Mode, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(set_queue_mode(Mode, BQS)).
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{bqss = [{MaxP, _} |_]}) ->
+ MsgsByPriority = partition_publish_delivered_batch(Msgs, MaxP),
+ lists:foldl(fun (Acks, MAs) ->
+ {P, _AckTag} = hd(Acks),
+ Pubs = maps:get(P, MsgsByPriority),
+ MAs0 = zip_msgs_and_acks(Pubs, Acks),
+ MAs ++ MAs0
+ end, Accumulator, AckTags);
+zip_msgs_and_acks(Msgs, AckTags, Accumulator,
+ #passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
+
+%%----------------------------------------------------------------------------
+
+bq() ->
+ {ok, RealBQ} = application:get_env(
+ rabbitmq_priority_queue, backing_queue_module),
+ RealBQ.
+
+%% Note on suffixes: Many utility functions here have suffixes telling
+%% you the arity of the return type of the BQ function they are
+%% designed to work with.
+%%
+%% 0 - BQ function returns a value and does not modify state
+%% 1 - BQ function just returns a new state
+%% 2 - BQ function returns a 2-tuple of {Result, NewState}
+%% 3 - BQ function returns a 3-tuple of {Result1, Result2, NewState}
+
+%% Fold over results
+fold0(Fun, Acc, [{P, BQSN} | Rest]) -> fold0(Fun, Fun(P, BQSN, Acc), Rest);
+fold0(_Fun, Acc, []) -> Acc.
+
+%% Do all BQs match?
+all0(Pred, BQSs) -> fold0(fun (_P, _BQSN, false) -> false;
+ (P, BQSN, true) -> Pred(P, BQSN)
+ end, true, BQSs).
+
+%% Sum results
+add0(Fun, BQSs) -> fold0(fun (P, BQSN, Acc) -> Acc + Fun(P, BQSN) end, 0, BQSs).
+
+%% Apply for all states
+foreach1(Fun, State = #state{bqss = BQSs}) ->
+ a(State#state{bqss = foreach1(Fun, BQSs, [])}).
+foreach1(Fun, [{Priority, BQSN} | Rest], BQSAcc) ->
+ BQSN1 = Fun(Priority, BQSN),
+ foreach1(Fun, Rest, [{Priority, BQSN1} | BQSAcc]);
+foreach1(_Fun, [], BQSAcc) ->
+ lists:reverse(BQSAcc).
+
+%% For a given thing, just go to its BQ
+pick1(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
+ {P, BQSN} = priority_bq(Prioritisable, BQSs),
+ a(State#state{bqss = bq_store(P, Fun(P, BQSN), BQSs)}).
+
+%% Fold over results
+fold2(Fun, Acc, State = #state{bqss = BQSs}) ->
+ {Res, BQSs1} = fold2(Fun, Acc, BQSs, []),
+ {Res, a(State#state{bqss = BQSs1})}.
+
+fold2(Fun, Acc, [{P, BQSN} | Rest], BQSAcc) ->
+ {Acc1, BQSN1} = Fun(P, BQSN, Acc),
+ fold2(Fun, Acc1, Rest, [{P, BQSN1} | BQSAcc]);
+fold2(_Fun, Acc, [], BQSAcc) ->
+ {Acc, lists:reverse(BQSAcc)}.
+
+%% Fold over results assuming results are lists and we want to append them
+fold_append2(Fun, State) ->
+ fold2(fun (P, BQSN, Acc) ->
+ {Res, BQSN1} = Fun(P, BQSN),
+ {Res ++ Acc, BQSN1}
+ end, [], State).
+
+%% Fold over results assuming results are numbers and we want to sum them
+fold_add2(Fun, State) ->
+ fold2(fun (P, BQSN, Acc) ->
+ {Res, BQSN1} = Fun(P, BQSN),
+ {add_maybe_infinity(Res, Acc), BQSN1}
+ end, 0, State).
+
+%% Fold over results assuming results are numbers and we want the minimum
+fold_min2(Fun, State) ->
+ fold2(fun (P, BQSN, Acc) ->
+ {Res, BQSN1} = Fun(P, BQSN),
+ {erlang:min(Res, Acc), BQSN1}
+ end, infinity, State).
+
+%% Fold over results assuming results are lists and we want to append
+%% them, and also that we have some AckTags we want to pass in to each
+%% invocation.
+fold_by_acktags2(Fun, AckTags, State) ->
+ AckTagsByPriority = partition_acktags(AckTags),
+ fold_append2(fun (P, BQSN) ->
+ case maps:find(P, AckTagsByPriority) of
+ {ok, AckTagsN} -> Fun(AckTagsN, BQSN);
+ error -> {[], BQSN}
+ end
+ end, State).
+
+%% For a given thing, just go to its BQ
+pick2(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
+ {P, BQSN} = priority_bq(Prioritisable, BQSs),
+ {Res, BQSN1} = Fun(P, BQSN),
+ {Res, a(State#state{bqss = bq_store(P, BQSN1, BQSs)})}.
+
+%% Run through BQs in priority order until one does not return
+%% {NotFound, NewState} or we have gone through them all.
+find2(Fun, NotFound, State = #state{bqss = BQSs}) ->
+ {Res, BQSs1} = find2(Fun, NotFound, BQSs, []),
+ {Res, a(State#state{bqss = BQSs1})}.
+find2(Fun, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
+ case Fun(P, BQSN) of
+ {NotFound, BQSN1} -> find2(Fun, NotFound, Rest, [{P, BQSN1} | BQSAcc]);
+ {Res, BQSN1} -> {Res, lists:reverse([{P, BQSN1} | BQSAcc]) ++ Rest}
+ end;
+find2(_Fun, NotFound, [], BQSAcc) ->
+ {NotFound, lists:reverse(BQSAcc)}.
+
+%% Run through BQs in priority order like find2 but also folding as we go.
+findfold3(Fun, Acc, NotFound, State = #state{bqss = BQSs}) ->
+ {Res, Acc1, BQSs1} = findfold3(Fun, Acc, NotFound, BQSs, []),
+ {Res, Acc1, a(State#state{bqss = BQSs1})}.
+findfold3(Fun, Acc, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
+ case Fun(P, BQSN, Acc) of
+ {NotFound, Acc1, BQSN1} ->
+ findfold3(Fun, Acc1, NotFound, Rest, [{P, BQSN1} | BQSAcc]);
+ {Res, Acc1, BQSN1} ->
+ {Res, Acc1, lists:reverse([{P, BQSN1} | BQSAcc]) ++ Rest}
+ end;
+findfold3(_Fun, Acc, NotFound, [], BQSAcc) ->
+ {NotFound, Acc, lists:reverse(BQSAcc)}.
+
+bq_fetch(P, []) -> exit({not_found, P});
+bq_fetch(P, [{P, BQSN} | _]) -> {P, BQSN};
+bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T).
+
+bq_store(P, BQS, BQSs) ->
+ [{PN, case PN of
+ P -> BQS;
+ _ -> BQSN
+ end} || {PN, BQSN} <- BQSs].
+
+%%
+a(State = #state{bqss = BQSs}) ->
+ Ps = [P || {P, _} <- BQSs],
+ case lists:reverse(lists:usort(Ps)) of
+ Ps -> State;
+ _ -> exit({bad_order, Ps})
+ end.
+
+%%----------------------------------------------------------------------------
+partition_publish_batch(Publishes, MaxP) ->
+ partition_publishes(
+ Publishes, fun ({Msg, _, _}) -> Msg end, MaxP).
+
+partition_publish_delivered_batch(Publishes, MaxP) ->
+ partition_publishes(
+ Publishes, fun ({Msg, _}) -> Msg end, MaxP).
+
+partition_publishes(Publishes, ExtractMsg, MaxP) ->
+ Partitioned =
+ lists:foldl(fun (Pub, Dict) ->
+ Msg = ExtractMsg(Pub),
+ rabbit_misc:maps_cons(priority(Msg, MaxP), Pub, Dict)
+ end, maps:new(), Publishes),
+ maps:map(fun (_P, RevPubs) ->
+ lists:reverse(RevPubs)
+ end, Partitioned).
+
+
+priority_bq(Priority, [{MaxP, _} | _] = BQSs) ->
+ bq_fetch(priority(Priority, MaxP), BQSs).
+
+%% Messages with a priority which is higher than the queue's maximum are treated
+%% as if they were published with the maximum priority.
+priority(undefined, _MaxP) ->
+ 0;
+priority(Priority, MaxP) when is_integer(Priority), Priority =< MaxP ->
+ Priority;
+priority(Priority, MaxP) when is_integer(Priority), Priority > MaxP ->
+ MaxP;
+priority(#basic_message{content = Content}, MaxP) ->
+ priority(rabbit_binary_parser:ensure_content_decoded(Content), MaxP);
+priority(#content{properties = Props}, MaxP) ->
+ #'P_basic'{priority = Priority0} = Props,
+ priority(Priority0, MaxP).
+
+add_maybe_infinity(infinity, _) -> infinity;
+add_maybe_infinity(_, infinity) -> infinity;
+add_maybe_infinity(A, B) -> A + B.
+
+partition_acktags(AckTags) -> partition_acktags(AckTags, maps:new()).
+
+partition_acktags([], Partitioned) ->
+ maps:map(fun (_P, RevAckTags) ->
+ lists:reverse(RevAckTags)
+ end, Partitioned);
+partition_acktags([{P, AckTag} | Rest], Partitioned) ->
+ partition_acktags(Rest, rabbit_misc:maps_cons(P, AckTag, Partitioned)).
+
+priority_on_acktags(P, AckTags) ->
+ [case Tag of
+ _ when is_integer(Tag) -> {P, Tag};
+ _ -> Tag
+ end || Tag <- AckTags].
+
+combine_status(P, New, nothing) ->
+ [{priority_lengths, [{P, proplists:get_value(len, New)}]} | New];
+combine_status(P, New, Old) ->
+ Combined = [{K, cse(V, proplists:get_value(K, Old))} || {K, V} <- New],
+ Lens = [{P, proplists:get_value(len, New)} |
+ proplists:get_value(priority_lengths, Old)],
+ [{priority_lengths, Lens} | Combined].
+
+cse(infinity, _) -> infinity;
+cse(_, infinity) -> infinity;
+%% queue modes
+cse(_, default) -> default;
+cse(default, _) -> default;
+cse(_, lazy) -> lazy;
+cse(lazy, _) -> lazy;
+%% numerical stats
+cse(A, B) when is_number(A) -> A + B;
+cse({delta, _, _, _, _}, _) -> {delta, todo, todo, todo, todo};
+cse(_, _) -> undefined.
+
+%% When asked about 'head_message_timestamp' fro this priority queue, we
+%% walk all the backing queues, starting by the highest priority. Once a
+%% backing queue having messages (ready or unacknowledged) is found, its
+%% 'head_message_timestamp' is returned even if it is null.
+
+find_head_message_timestamp(BQ, [{_, BQSN} | Rest], Timestamp) ->
+ MsgCount = BQ:len(BQSN) + BQ:info(messages_unacknowledged_ram, BQSN),
+ if
+ MsgCount =/= 0 -> BQ:info(head_message_timestamp, BQSN);
+ true -> find_head_message_timestamp(BQ, Rest, Timestamp)
+ end;
+find_head_message_timestamp(_, [], Timestamp) ->
+ Timestamp.
+
+zip_msgs_and_acks(Pubs, AckTags) ->
+ lists:zipwith(
+ fun ({#basic_message{ id = Id }, _Props}, AckTag) ->
+ {Id, AckTag}
+ end, Pubs, AckTags).
diff --git a/deps/rabbit/src/rabbit_queue_consumers.erl b/deps/rabbit/src/rabbit_queue_consumers.erl
new file mode 100644
index 0000000000..4f826f72e8
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_consumers.erl
@@ -0,0 +1,568 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_consumers).
+
+-export([new/0, max_active_priority/1, inactive/1, all/1, all/3, count/0,
+ unacknowledged_message_count/0, add/10, remove/3, erase_ch/2,
+ send_drained/0, deliver/5, record_ack/3, subtract_acks/3,
+ possibly_unblock/3,
+ resume_fun/0, notify_sent_fun/1, activate_limit_fun/0,
+ credit/6, utilisation/1, is_same/3, get_consumer/1, get/3,
+ consumer_tag/1, get_infos/1]).
+
+%%----------------------------------------------------------------------------
+
+-define(QUEUE, lqueue).
+
+-define(UNSENT_MESSAGE_LIMIT, 200).
+
+%% Utilisation average calculations are all in μs.
+-define(USE_AVG_HALF_LIFE, 1000000.0).
+
+-record(state, {consumers, use}).
+
+-record(consumer, {tag, ack_required, prefetch, args, user}).
+
+%% These are held in our process dictionary
+-record(cr, {ch_pid,
+ monitor_ref,
+ acktags,
+ consumer_count,
+ %% Queue of {ChPid, #consumer{}} for consumers which have
+ %% been blocked (rate/prefetch limited) for any reason
+ blocked_consumers,
+ %% The limiter itself
+ limiter,
+ %% Internal flow control for queue -> writer
+ unsent_message_count}).
+
+%%----------------------------------------------------------------------------
+
+-type time_micros() :: non_neg_integer().
+-type ratio() :: float().
+-type state() :: #state{consumers ::priority_queue:q(),
+ use :: {'inactive',
+ time_micros(), time_micros(), ratio()} |
+ {'active', time_micros(), ratio()}}.
+-type consumer() :: #consumer{tag::rabbit_types:ctag(), ack_required::boolean(),
+ prefetch::non_neg_integer(), args::rabbit_framing:amqp_table(),
+ user::rabbit_types:username()}.
+-type ch() :: pid().
+-type ack() :: non_neg_integer().
+-type cr_fun() :: fun ((#cr{}) -> #cr{}).
+-type fetch_result() :: {rabbit_types:basic_message(), boolean(), ack()}.
+
+%%----------------------------------------------------------------------------
+
+-spec new() -> state().
+
+new() -> #state{consumers = priority_queue:new(),
+ use = {active,
+ erlang:monotonic_time(micro_seconds),
+ 1.0}}.
+
+-spec max_active_priority(state()) -> integer() | 'infinity' | 'empty'.
+
+max_active_priority(#state{consumers = Consumers}) ->
+ priority_queue:highest(Consumers).
+
+-spec inactive(state()) -> boolean().
+
+inactive(#state{consumers = Consumers}) ->
+ priority_queue:is_empty(Consumers).
+
+-spec all(state()) -> [{ch(), rabbit_types:ctag(), boolean(),
+ non_neg_integer(), boolean(), atom(),
+ rabbit_framing:amqp_table(), rabbit_types:username()}].
+
+all(State) ->
+ all(State, none, false).
+
+all(#state{consumers = Consumers}, SingleActiveConsumer, SingleActiveConsumerOn) ->
+ lists:foldl(fun (C, Acc) -> consumers(C#cr.blocked_consumers, SingleActiveConsumer, SingleActiveConsumerOn, Acc) end,
+ consumers(Consumers, SingleActiveConsumer, SingleActiveConsumerOn, []), all_ch_record()).
+
+consumers(Consumers, SingleActiveConsumer, SingleActiveConsumerOn, Acc) ->
+ ActiveActivityStatusFun = case SingleActiveConsumerOn of
+ true ->
+ fun({ChPid, Consumer}) ->
+ case SingleActiveConsumer of
+ {ChPid, Consumer} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end
+ end;
+ false ->
+ fun(_) -> {true, up} end
+ end,
+ priority_queue:fold(
+ fun ({ChPid, Consumer}, _P, Acc1) ->
+ #consumer{tag = CTag, ack_required = Ack, prefetch = Prefetch,
+ args = Args, user = Username} = Consumer,
+ {Active, ActivityStatus} = ActiveActivityStatusFun({ChPid, Consumer}),
+ [{ChPid, CTag, Ack, Prefetch, Active, ActivityStatus, Args, Username} | Acc1]
+ end, Acc, Consumers).
+
+-spec count() -> non_neg_integer().
+
+count() -> lists:sum([Count || #cr{consumer_count = Count} <- all_ch_record()]).
+
+-spec unacknowledged_message_count() -> non_neg_integer().
+
+unacknowledged_message_count() ->
+ lists:sum([?QUEUE:len(C#cr.acktags) || C <- all_ch_record()]).
+
+-spec add(ch(), rabbit_types:ctag(), boolean(), pid(), boolean(),
+ non_neg_integer(), rabbit_framing:amqp_table(), boolean(),
+ rabbit_types:username(), state())
+ -> state().
+
+add(ChPid, CTag, NoAck, LimiterPid, LimiterActive, Prefetch, Args, IsEmpty,
+ Username, State = #state{consumers = Consumers,
+ use = CUInfo}) ->
+ C = #cr{consumer_count = Count,
+ limiter = Limiter} = ch_record(ChPid, LimiterPid),
+ Limiter1 = case LimiterActive of
+ true -> rabbit_limiter:activate(Limiter);
+ false -> Limiter
+ end,
+ C1 = C#cr{consumer_count = Count + 1, limiter = Limiter1},
+ update_ch_record(
+ case parse_credit_args(Prefetch, Args) of
+ {0, auto} -> C1;
+ {_Credit, auto} when NoAck -> C1;
+ {Credit, Mode} -> credit_and_drain(
+ C1, CTag, Credit, Mode, IsEmpty)
+ end),
+ Consumer = #consumer{tag = CTag,
+ ack_required = not NoAck,
+ prefetch = Prefetch,
+ args = Args,
+ user = Username},
+ State#state{consumers = add_consumer({ChPid, Consumer}, Consumers),
+ use = update_use(CUInfo, active)}.
+
+-spec remove(ch(), rabbit_types:ctag(), state()) ->
+ 'not_found' | state().
+
+remove(ChPid, CTag, State = #state{consumers = Consumers}) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{consumer_count = Count,
+ limiter = Limiter,
+ blocked_consumers = Blocked} ->
+ Blocked1 = remove_consumer(ChPid, CTag, Blocked),
+ Limiter1 = case Count of
+ 1 -> rabbit_limiter:deactivate(Limiter);
+ _ -> Limiter
+ end,
+ Limiter2 = rabbit_limiter:forget_consumer(Limiter1, CTag),
+ update_ch_record(C#cr{consumer_count = Count - 1,
+ limiter = Limiter2,
+ blocked_consumers = Blocked1}),
+ State#state{consumers =
+ remove_consumer(ChPid, CTag, Consumers)}
+ end.
+
+-spec erase_ch(ch(), state()) ->
+ 'not_found' | {[ack()], [rabbit_types:ctag()],
+ state()}.
+
+erase_ch(ChPid, State = #state{consumers = Consumers}) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ blocked_consumers = BlockedQ} ->
+ All = priority_queue:join(Consumers, BlockedQ),
+ ok = erase_ch_record(C),
+ Filtered = priority_queue:filter(chan_pred(ChPid, true), All),
+ {[AckTag || {AckTag, _CTag} <- ?QUEUE:to_list(ChAckTags)],
+ tags(priority_queue:to_list(Filtered)),
+ State#state{consumers = remove_consumers(ChPid, Consumers)}}
+ end.
+
+-spec send_drained() -> 'ok'.
+
+send_drained() -> [update_ch_record(send_drained(C)) || C <- all_ch_record()],
+ ok.
+
+-spec deliver(fun ((boolean()) -> {fetch_result(), T}),
+ rabbit_amqqueue:name(), state(), boolean(),
+ none | {ch(), rabbit_types:ctag()} | {ch(), consumer()}) ->
+ {'delivered', boolean(), T, state()} |
+ {'undelivered', boolean(), state()}.
+
+deliver(FetchFun, QName, State, SingleActiveConsumerIsOn, ActiveConsumer) ->
+ deliver(FetchFun, QName, false, State, SingleActiveConsumerIsOn, ActiveConsumer).
+
+deliver(_FetchFun, _QName, false, State, true, none) ->
+ {undelivered, false,
+ State#state{use = update_use(State#state.use, inactive)}};
+deliver(FetchFun, QName, false, State = #state{consumers = Consumers}, true, SingleActiveConsumer) ->
+ {ChPid, Consumer} = SingleActiveConsumer,
+ %% blocked (rate/prefetch limited) consumers are removed from the queue state, but not the exclusive_consumer field,
+ %% so we need to do this check to avoid adding the exclusive consumer to the channel record
+ %% over and over
+ case is_blocked(SingleActiveConsumer) of
+ true ->
+ {undelivered, false,
+ State#state{use = update_use(State#state.use, inactive)}};
+ false ->
+ case deliver_to_consumer(FetchFun, SingleActiveConsumer, QName) of
+ {delivered, R} ->
+ {delivered, false, R, State};
+ undelivered ->
+ {ChPid, Consumer} = SingleActiveConsumer,
+ Consumers1 = remove_consumer(ChPid, Consumer#consumer.tag, Consumers),
+ {undelivered, true,
+ State#state{consumers = Consumers1, use = update_use(State#state.use, inactive)}}
+ end
+ end;
+deliver(FetchFun, QName, ConsumersChanged,
+ State = #state{consumers = Consumers}, false, _SingleActiveConsumer) ->
+ case priority_queue:out_p(Consumers) of
+ {empty, _} ->
+ {undelivered, ConsumersChanged,
+ State#state{use = update_use(State#state.use, inactive)}};
+ {{value, QEntry, Priority}, Tail} ->
+ case deliver_to_consumer(FetchFun, QEntry, QName) of
+ {delivered, R} ->
+ {delivered, ConsumersChanged, R,
+ State#state{consumers = priority_queue:in(QEntry, Priority,
+ Tail)}};
+ undelivered ->
+ deliver(FetchFun, QName, true,
+ State#state{consumers = Tail}, false, _SingleActiveConsumer)
+ end
+ end.
+
+deliver_to_consumer(FetchFun, E = {ChPid, Consumer}, QName) ->
+ C = lookup_ch(ChPid),
+ case is_ch_blocked(C) of
+ true ->
+ block_consumer(C, E),
+ undelivered;
+ false -> case rabbit_limiter:can_send(C#cr.limiter,
+ Consumer#consumer.ack_required,
+ Consumer#consumer.tag) of
+ {suspend, Limiter} ->
+ block_consumer(C#cr{limiter = Limiter}, E),
+ undelivered;
+ {continue, Limiter} ->
+ {delivered, deliver_to_consumer(
+ FetchFun, Consumer,
+ C#cr{limiter = Limiter}, QName)}
+ end
+ end.
+
+deliver_to_consumer(FetchFun,
+ #consumer{tag = CTag,
+ ack_required = AckRequired},
+ C = #cr{ch_pid = ChPid,
+ acktags = ChAckTags,
+ unsent_message_count = Count},
+ QName) ->
+ {{Message, IsDelivered, AckTag}, R} = FetchFun(AckRequired),
+ rabbit_channel:deliver(ChPid, CTag, AckRequired,
+ {QName, self(), AckTag, IsDelivered, Message}),
+ ChAckTags1 = case AckRequired of
+ true -> ?QUEUE:in({AckTag, CTag}, ChAckTags);
+ false -> ChAckTags
+ end,
+ update_ch_record(C#cr{acktags = ChAckTags1,
+ unsent_message_count = Count + 1}),
+ R.
+
+is_blocked(Consumer = {ChPid, _C}) ->
+ #cr{blocked_consumers = BlockedConsumers} = lookup_ch(ChPid),
+ priority_queue:member(Consumer, BlockedConsumers).
+
+-spec record_ack(ch(), pid(), ack()) -> 'ok'.
+
+record_ack(ChPid, LimiterPid, AckTag) ->
+ C = #cr{acktags = ChAckTags} = ch_record(ChPid, LimiterPid),
+ update_ch_record(C#cr{acktags = ?QUEUE:in({AckTag, none}, ChAckTags)}),
+ ok.
+
+-spec subtract_acks(ch(), [ack()], state()) ->
+ 'not_found' | 'unchanged' | {'unblocked', state()}.
+
+subtract_acks(ChPid, AckTags, State) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ not_found;
+ C = #cr{acktags = ChAckTags, limiter = Lim} ->
+ {CTagCounts, AckTags2} = subtract_acks(
+ AckTags, [], maps:new(), ChAckTags),
+ {Unblocked, Lim2} =
+ maps:fold(
+ fun (CTag, Count, {UnblockedN, LimN}) ->
+ {Unblocked1, LimN1} =
+ rabbit_limiter:ack_from_queue(LimN, CTag, Count),
+ {UnblockedN orelse Unblocked1, LimN1}
+ end, {false, Lim}, CTagCounts),
+ C2 = C#cr{acktags = AckTags2, limiter = Lim2},
+ case Unblocked of
+ true -> unblock(C2, State);
+ false -> update_ch_record(C2),
+ unchanged
+ end
+ end.
+
+subtract_acks([], [], CTagCounts, AckQ) ->
+ {CTagCounts, AckQ};
+subtract_acks([], Prefix, CTagCounts, AckQ) ->
+ {CTagCounts, ?QUEUE:join(?QUEUE:from_list(lists:reverse(Prefix)), AckQ)};
+subtract_acks([T | TL] = AckTags, Prefix, CTagCounts, AckQ) ->
+ case ?QUEUE:out(AckQ) of
+ {{value, {T, CTag}}, QTail} ->
+ subtract_acks(TL, Prefix,
+ maps:update_with(CTag, fun (Old) -> Old + 1 end, 1, CTagCounts), QTail);
+ {{value, V}, QTail} ->
+ subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail);
+ {empty, _} ->
+ subtract_acks([], Prefix, CTagCounts, AckQ)
+ end.
+
+-spec possibly_unblock(cr_fun(), ch(), state()) ->
+ 'unchanged' | {'unblocked', state()}.
+
+possibly_unblock(Update, ChPid, State) ->
+ case lookup_ch(ChPid) of
+ not_found -> unchanged;
+ C -> C1 = Update(C),
+ case is_ch_blocked(C) andalso not is_ch_blocked(C1) of
+ false -> update_ch_record(C1),
+ unchanged;
+ true -> unblock(C1, State)
+ end
+ end.
+
+unblock(C = #cr{blocked_consumers = BlockedQ, limiter = Limiter},
+ State = #state{consumers = Consumers, use = Use}) ->
+ case lists:partition(
+ fun({_P, {_ChPid, #consumer{tag = CTag}}}) ->
+ rabbit_limiter:is_consumer_blocked(Limiter, CTag)
+ end, priority_queue:to_list(BlockedQ)) of
+ {_, []} ->
+ update_ch_record(C),
+ unchanged;
+ {Blocked, Unblocked} ->
+ BlockedQ1 = priority_queue:from_list(Blocked),
+ UnblockedQ = priority_queue:from_list(Unblocked),
+ update_ch_record(C#cr{blocked_consumers = BlockedQ1}),
+ {unblocked,
+ State#state{consumers = priority_queue:join(Consumers, UnblockedQ),
+ use = update_use(Use, active)}}
+ end.
+
+-spec resume_fun() -> cr_fun().
+
+resume_fun() ->
+ fun (C = #cr{limiter = Limiter}) ->
+ C#cr{limiter = rabbit_limiter:resume(Limiter)}
+ end.
+
+-spec notify_sent_fun(non_neg_integer()) -> cr_fun().
+
+notify_sent_fun(Credit) ->
+ fun (C = #cr{unsent_message_count = Count}) ->
+ C#cr{unsent_message_count = Count - Credit}
+ end.
+
+-spec activate_limit_fun() -> cr_fun().
+
+activate_limit_fun() ->
+ fun (C = #cr{limiter = Limiter}) ->
+ C#cr{limiter = rabbit_limiter:activate(Limiter)}
+ end.
+
+-spec credit(boolean(), integer(), boolean(), ch(), rabbit_types:ctag(),
+ state()) -> 'unchanged' | {'unblocked', state()}.
+
+credit(IsEmpty, Credit, Drain, ChPid, CTag, State) ->
+ case lookup_ch(ChPid) of
+ not_found ->
+ unchanged;
+ #cr{limiter = Limiter} = C ->
+ C1 = #cr{limiter = Limiter1} =
+ credit_and_drain(C, CTag, Credit, drain_mode(Drain), IsEmpty),
+ case is_ch_blocked(C1) orelse
+ (not rabbit_limiter:is_consumer_blocked(Limiter, CTag)) orelse
+ rabbit_limiter:is_consumer_blocked(Limiter1, CTag) of
+ true -> update_ch_record(C1),
+ unchanged;
+ false -> unblock(C1, State)
+ end
+ end.
+
+drain_mode(true) -> drain;
+drain_mode(false) -> manual.
+
+-spec utilisation(state()) -> ratio().
+
+utilisation(#state{use = {active, Since, Avg}}) ->
+ use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg);
+utilisation(#state{use = {inactive, Since, Active, Avg}}) ->
+ use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg).
+
+is_same(ChPid, ConsumerTag, {ChPid, #consumer{tag = ConsumerTag}}) ->
+ true;
+is_same(_ChPid, _ConsumerTag, _Consumer) ->
+ false.
+
+get_consumer(#state{consumers = Consumers}) ->
+ case priority_queue:out_p(Consumers) of
+ {{value, Consumer, _Priority}, _Tail} -> Consumer;
+ {empty, _} -> undefined
+ end.
+
+-spec get(ch(), rabbit_types:ctag(), state()) -> undefined | consumer().
+
+get(ChPid, ConsumerTag, #state{consumers = Consumers}) ->
+ Consumers1 = priority_queue:filter(fun ({CP, #consumer{tag = CT}}) ->
+ (CP == ChPid) and (CT == ConsumerTag)
+ end, Consumers),
+ case priority_queue:out_p(Consumers1) of
+ {empty, _} -> undefined;
+ {{value, Consumer, _Priority}, _Tail} -> Consumer
+ end.
+
+-spec get_infos(consumer()) -> term().
+
+get_infos(Consumer) ->
+ {Consumer#consumer.tag,Consumer#consumer.ack_required,
+ Consumer#consumer.prefetch, Consumer#consumer.args}.
+
+-spec consumer_tag(consumer()) -> rabbit_types:ctag().
+
+consumer_tag(#consumer{tag = CTag}) ->
+ CTag.
+
+
+
+%%----------------------------------------------------------------------------
+
+parse_credit_args(Default, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-credit">>) of
+ {table, T} -> case {rabbit_misc:table_lookup(T, <<"credit">>),
+ rabbit_misc:table_lookup(T, <<"drain">>)} of
+ {{long, C}, {bool, D}} -> {C, drain_mode(D)};
+ _ -> {Default, auto}
+ end;
+ undefined -> {Default, auto}
+ end.
+
+lookup_ch(ChPid) ->
+ case get({ch, ChPid}) of
+ undefined -> not_found;
+ C -> C
+ end.
+
+ch_record(ChPid, LimiterPid) ->
+ Key = {ch, ChPid},
+ case get(Key) of
+ undefined -> MonitorRef = erlang:monitor(process, ChPid),
+ Limiter = rabbit_limiter:client(LimiterPid),
+ C = #cr{ch_pid = ChPid,
+ monitor_ref = MonitorRef,
+ acktags = ?QUEUE:new(),
+ consumer_count = 0,
+ blocked_consumers = priority_queue:new(),
+ limiter = Limiter,
+ unsent_message_count = 0},
+ put(Key, C),
+ C;
+ C = #cr{} -> C
+ end.
+
+update_ch_record(C = #cr{consumer_count = ConsumerCount,
+ acktags = ChAckTags,
+ unsent_message_count = UnsentMessageCount}) ->
+ case {?QUEUE:is_empty(ChAckTags), ConsumerCount, UnsentMessageCount} of
+ {true, 0, 0} -> ok = erase_ch_record(C);
+ _ -> ok = store_ch_record(C)
+ end,
+ C.
+
+store_ch_record(C = #cr{ch_pid = ChPid}) ->
+ put({ch, ChPid}, C),
+ ok.
+
+erase_ch_record(#cr{ch_pid = ChPid, monitor_ref = MonitorRef}) ->
+ erlang:demonitor(MonitorRef),
+ erase({ch, ChPid}),
+ ok.
+
+all_ch_record() -> [C || {{ch, _}, C} <- get()].
+
+block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) ->
+ update_ch_record(C#cr{blocked_consumers = add_consumer(QEntry, Blocked)}).
+
+is_ch_blocked(#cr{unsent_message_count = Count, limiter = Limiter}) ->
+ Count >= ?UNSENT_MESSAGE_LIMIT orelse rabbit_limiter:is_suspended(Limiter).
+
+send_drained(C = #cr{ch_pid = ChPid, limiter = Limiter}) ->
+ case rabbit_limiter:drained(Limiter) of
+ {[], Limiter} -> C;
+ {CTagCredit, Limiter2} -> rabbit_channel:send_drained(
+ ChPid, CTagCredit),
+ C#cr{limiter = Limiter2}
+ end.
+
+credit_and_drain(C = #cr{ch_pid = ChPid, limiter = Limiter},
+ CTag, Credit, Mode, IsEmpty) ->
+ case rabbit_limiter:credit(Limiter, CTag, Credit, Mode, IsEmpty) of
+ {true, Limiter1} -> rabbit_channel:send_drained(ChPid,
+ [{CTag, Credit}]),
+ C#cr{limiter = Limiter1};
+ {false, Limiter1} -> C#cr{limiter = Limiter1}
+ end.
+
+tags(CList) -> [CTag || {_P, {_ChPid, #consumer{tag = CTag}}} <- CList].
+
+add_consumer({ChPid, Consumer = #consumer{args = Args}}, Queue) ->
+ Priority = case rabbit_misc:table_lookup(Args, <<"x-priority">>) of
+ {_, P} -> P;
+ _ -> 0
+ end,
+ priority_queue:in({ChPid, Consumer}, Priority, Queue).
+
+remove_consumer(ChPid, CTag, Queue) ->
+ priority_queue:filter(fun ({CP, #consumer{tag = CT}}) ->
+ (CP /= ChPid) or (CT /= CTag)
+ end, Queue).
+
+remove_consumers(ChPid, Queue) ->
+ priority_queue:filter(chan_pred(ChPid, false), Queue).
+
+chan_pred(ChPid, Want) ->
+ fun ({CP, _Consumer}) when CP =:= ChPid -> Want;
+ (_) -> not Want
+ end.
+
+update_use({inactive, _, _, _} = CUInfo, inactive) ->
+ CUInfo;
+update_use({active, _, _} = CUInfo, active) ->
+ CUInfo;
+update_use({active, Since, Avg}, inactive) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {inactive, Now, Now - Since, Avg};
+update_use({inactive, Since, Active, Avg}, active) ->
+ Now = erlang:monotonic_time(micro_seconds),
+ {active, Now, use_avg(Active, Now - Since, Avg)}.
+
+use_avg(0, 0, Avg) ->
+ Avg;
+use_avg(Active, Inactive, Avg) ->
+ Time = Inactive + Active,
+ rabbit_misc:moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
diff --git a/deps/rabbit/src/rabbit_queue_decorator.erl b/deps/rabbit/src/rabbit_queue_decorator.erl
new file mode 100644
index 0000000000..cbb50456c1
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_decorator.erl
@@ -0,0 +1,72 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_decorator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([select/1, set/1, register/2, unregister/1]).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+%%----------------------------------------------------------------------------
+
+-callback startup(amqqueue:amqqueue()) -> 'ok'.
+
+-callback shutdown(amqqueue:amqqueue()) -> 'ok'.
+
+-callback policy_changed(amqqueue:amqqueue(), amqqueue:amqqueue()) ->
+ 'ok'.
+
+-callback active_for(amqqueue:amqqueue()) -> boolean().
+
+%% called with Queue, MaxActivePriority, IsEmpty
+-callback consumer_state_changed(
+ amqqueue:amqqueue(), integer(), boolean()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
+
+select(Modules) ->
+ [M || M <- Modules, code:which(M) =/= non_existing].
+
+set(Q) when ?is_amqqueue(Q) ->
+ Decorators = [D || D <- list(), D:active_for(Q)],
+ amqqueue:set_decorators(Q, Decorators).
+
+list() -> [M || {_, M} <- rabbit_registry:lookup_all(queue_decorator)].
+
+register(TypeName, ModuleName) ->
+ rabbit_registry:register(queue_decorator, TypeName, ModuleName),
+ [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
+ ok.
+
+unregister(TypeName) ->
+ rabbit_registry:unregister(queue_decorator, TypeName),
+ [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
+ ok.
+
+maybe_recover(Q0) when ?is_amqqueue(Q0) ->
+ Name = amqqueue:get_name(Q0),
+ Decs0 = amqqueue:get_decorators(Q0),
+ Q1 = set(Q0),
+ Decs1 = amqqueue:get_decorators(Q1),
+ Old = lists:sort(select(Decs0)),
+ New = lists:sort(select(Decs1)),
+ case New of
+ Old ->
+ ok;
+ _ ->
+ %% TODO LRB JSP 160169569 should startup be passed Q1 here?
+ [M:startup(Q0) || M <- New -- Old],
+ rabbit_amqqueue:update_decorators(Name)
+ end.
diff --git a/deps/rabbit/src/rabbit_queue_index.erl b/deps/rabbit/src/rabbit_queue_index.erl
new file mode 100644
index 0000000000..faab4380b5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_index.erl
@@ -0,0 +1,1521 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_index).
+
+-export([erase/1, init/3, reset_state/1, recover/6,
+ terminate/3, delete_and_terminate/1,
+ pre_publish/7, flush_pre_publish_cache/2,
+ publish/6, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
+ read/3, next_segment_boundary/1, bounds/1, start/2, stop/1]).
+
+-export([add_queue_ttl/0, avoid_zeroes/0, store_msg_size/0, store_msg/0]).
+-export([scan_queue_segments/3, scan_queue_segments/4]).
+
+%% Migrates from global to per-vhost message stores
+-export([move_to_per_vhost_stores/1,
+ update_recovery_term/2,
+ read_global_recovery_terms/1,
+ cleanup_global_recovery_terms/0]).
+
+-define(CLEAN_FILENAME, "clean.dot").
+
+%%----------------------------------------------------------------------------
+
+%% The queue index is responsible for recording the order of messages
+%% within a queue on disk. As such it contains records of messages
+%% being published, delivered and acknowledged. The publish record
+%% includes the sequence ID, message ID and a small quantity of
+%% metadata about the message; the delivery and acknowledgement
+%% records just contain the sequence ID. A publish record may also
+%% contain the complete message if provided to publish/5; this allows
+%% the message store to be avoided altogether for small messages. In
+%% either case the publish record is stored in memory in the same
+%% serialised format it will take on disk.
+%%
+%% Because of the fact that the queue can decide at any point to send
+%% a queue entry to disk, you can not rely on publishes appearing in
+%% order. The only thing you can rely on is a message being published,
+%% then delivered, then ack'd.
+%%
+%% In order to be able to clean up ack'd messages, we write to segment
+%% files. These files have a fixed number of entries: ?SEGMENT_ENTRY_COUNT
+%% publishes, delivers and acknowledgements. They are numbered, and so
+%% it is known that the 0th segment contains messages 0 ->
+%% ?SEGMENT_ENTRY_COUNT - 1, the 1st segment contains messages
+%% ?SEGMENT_ENTRY_COUNT -> 2*?SEGMENT_ENTRY_COUNT - 1 and so on. As
+%% such, in the segment files, we only refer to message sequence ids
+%% by the LSBs as SeqId rem ?SEGMENT_ENTRY_COUNT. This gives them a
+%% fixed size.
+%%
+%% However, transient messages which are not sent to disk at any point
+%% will cause gaps to appear in segment files. Therefore, we delete a
+%% segment file whenever the number of publishes == number of acks
+%% (note that although it is not fully enforced, it is assumed that a
+%% message will never be ackd before it is delivered, thus this test
+%% also implies == number of delivers). In practise, this does not
+%% cause disk churn in the pathological case because of the journal
+%% and caching (see below).
+%%
+%% Because of the fact that publishes, delivers and acks can occur all
+%% over, we wish to avoid lots of seeking. Therefore we have a fixed
+%% sized journal to which all actions are appended. When the number of
+%% entries in this journal reaches max_journal_entries, the journal
+%% entries are scattered out to their relevant files, and the journal
+%% is truncated to zero size. Note that entries in the journal must
+%% carry the full sequence id, thus the format of entries in the
+%% journal is different to that in the segments.
+%%
+%% The journal is also kept fully in memory, pre-segmented: the state
+%% contains a mapping from segment numbers to state-per-segment (this
+%% state is held for all segments which have been "seen": thus a
+%% segment which has been read but has no pending entries in the
+%% journal is still held in this mapping. Also note that a map is
+%% used for this mapping, not an array because with an array, you will
+%% always have entries from 0). Actions are stored directly in this
+%% state. Thus at the point of flushing the journal, firstly no
+%% reading from disk is necessary, but secondly if the known number of
+%% acks and publishes in a segment are equal, given the known state of
+%% the segment file combined with the journal, no writing needs to be
+%% done to the segment file either (in fact it is deleted if it exists
+%% at all). This is safe given that the set of acks is a subset of the
+%% set of publishes. When it is necessary to sync messages, it is
+%% sufficient to fsync on the journal: when entries are distributed
+%% from the journal to segment files, those segments appended to are
+%% fsync'd prior to the journal being truncated.
+%%
+%% This module is also responsible for scanning the queue index files
+%% and seeding the message store on start up.
+%%
+%% Note that in general, the representation of a message's state as
+%% the tuple: {('no_pub'|{IsPersistent, Bin, MsgBin}),
+%% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly
+%% necessary for most operations. However, for startup, and to ensure
+%% the safe and correct combination of journal entries with entries
+%% read from the segment on disk, this richer representation vastly
+%% simplifies and clarifies the code.
+%%
+%% For notes on Clean Shutdown and startup, see documentation in
+%% rabbit_variable_queue.
+%%
+%%----------------------------------------------------------------------------
+
+%% ---- Journal details ----
+
+-define(JOURNAL_FILENAME, "journal.jif").
+-define(QUEUE_NAME_STUB_FILE, ".queue_name").
+
+-define(PUB_PERSIST_JPREFIX, 2#00).
+-define(PUB_TRANS_JPREFIX, 2#01).
+-define(DEL_JPREFIX, 2#10).
+-define(ACK_JPREFIX, 2#11).
+-define(JPREFIX_BITS, 2).
+-define(SEQ_BYTES, 8).
+-define(SEQ_BITS, ((?SEQ_BYTES * 8) - ?JPREFIX_BITS)).
+
+%% ---- Segment details ----
+
+-define(SEGMENT_EXTENSION, ".idx").
+
+%% TODO: The segment size would be configurable, but deriving all the
+%% other values is quite hairy and quite possibly noticeably less
+%% efficient, depending on how clever the compiler is when it comes to
+%% binary generation/matching with constant vs variable lengths.
+
+-define(REL_SEQ_BITS, 14).
+%% calculated as trunc(math:pow(2,?REL_SEQ_BITS))).
+-define(SEGMENT_ENTRY_COUNT, 16384).
+
+%% seq only is binary 01 followed by 14 bits of rel seq id
+%% (range: 0 - 16383)
+-define(REL_SEQ_ONLY_PREFIX, 01).
+-define(REL_SEQ_ONLY_PREFIX_BITS, 2).
+-define(REL_SEQ_ONLY_RECORD_BYTES, 2).
+
+%% publish record is binary 1 followed by a bit for is_persistent,
+%% then 14 bits of rel seq id, 64 bits for message expiry, 32 bits of
+%% size and then 128 bits of md5sum msg id.
+-define(PUB_PREFIX, 1).
+-define(PUB_PREFIX_BITS, 1).
+
+-define(EXPIRY_BYTES, 8).
+-define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)).
+-define(NO_EXPIRY, 0).
+
+-define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes
+-define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)).
+
+%% This is the size of the message body content, for stats
+-define(SIZE_BYTES, 4).
+-define(SIZE_BITS, (?SIZE_BYTES * 8)).
+
+%% This is the size of the message record embedded in the queue
+%% index. If 0, the message can be found in the message store.
+-define(EMBEDDED_SIZE_BYTES, 4).
+-define(EMBEDDED_SIZE_BITS, (?EMBEDDED_SIZE_BYTES * 8)).
+
+%% 16 bytes for md5sum + 8 for expiry
+-define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES + ?SIZE_BYTES)).
+%% + 4 for size
+-define(PUB_RECORD_SIZE_BYTES, (?PUB_RECORD_BODY_BYTES + ?EMBEDDED_SIZE_BYTES)).
+
+%% + 2 for seq, bits and prefix
+-define(PUB_RECORD_PREFIX_BYTES, 2).
+
+%% ---- misc ----
+
+-define(PUB, {_, _, _}). %% {IsPersistent, Bin, MsgBin}
+
+-define(READ_MODE, [binary, raw, read]).
+-define(WRITE_MODE, [write | ?READ_MODE]).
+
+%%----------------------------------------------------------------------------
+
+-record(qistate, {
+ %% queue directory where segment and journal files are stored
+ dir,
+ %% map of #segment records
+ segments,
+ %% journal file handle obtained from/used by file_handle_cache
+ journal_handle,
+ %% how many not yet flushed entries are there
+ dirty_count,
+ %% this many not yet flushed journal entries will force a flush
+ max_journal_entries,
+ %% callback function invoked when a message is "handled"
+ %% by the index and potentially can be confirmed to the publisher
+ on_sync,
+ on_sync_msg,
+ %% set of IDs of unconfirmed [to publishers] messages
+ unconfirmed,
+ unconfirmed_msg,
+ %% optimisation
+ pre_publish_cache,
+ %% optimisation
+ delivered_cache,
+ %% queue name resource record
+ queue_name}).
+
+-record(segment, {
+ %% segment ID (an integer)
+ num,
+ %% segment file path (see also ?SEGMENT_EXTENSION)
+ path,
+ %% index operation log entries in this segment
+ journal_entries,
+ entries_to_segment,
+ %% counter of unacknowledged messages
+ unacked
+}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({add_queue_ttl, local, []}).
+-rabbit_upgrade({avoid_zeroes, local, [add_queue_ttl]}).
+-rabbit_upgrade({store_msg_size, local, [avoid_zeroes]}).
+-rabbit_upgrade({store_msg, local, [store_msg_size]}).
+
+-type hdl() :: ('undefined' | any()).
+-type segment() :: ('undefined' |
+ #segment { num :: non_neg_integer(),
+ path :: file:filename(),
+ journal_entries :: array:array(),
+ entries_to_segment :: array:array(),
+ unacked :: non_neg_integer()
+ }).
+-type seq_id() :: integer().
+-type seg_map() :: {map(), [segment()]}.
+-type on_sync_fun() :: fun ((gb_sets:set()) -> ok).
+-type qistate() :: #qistate { dir :: file:filename(),
+ segments :: 'undefined' | seg_map(),
+ journal_handle :: hdl(),
+ dirty_count :: integer(),
+ max_journal_entries :: non_neg_integer(),
+ on_sync :: on_sync_fun(),
+ on_sync_msg :: on_sync_fun(),
+ unconfirmed :: gb_sets:set(),
+ unconfirmed_msg :: gb_sets:set(),
+ pre_publish_cache :: list(),
+ delivered_cache :: list()
+ }.
+-type contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean()).
+-type walker(A) :: fun ((A) -> 'finished' |
+ {rabbit_types:msg_id(), non_neg_integer(), A}).
+-type shutdown_terms() :: [term()] | 'non_clean_shutdown'.
+
+%%----------------------------------------------------------------------------
+%% public API
+%%----------------------------------------------------------------------------
+
+-spec erase(rabbit_amqqueue:name()) -> 'ok'.
+
+erase(#resource{ virtual_host = VHost } = Name) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ #qistate { dir = Dir } = blank_state(VHostDir, Name),
+ erase_index_dir(Dir).
+
+%% used during variable queue purge when there are no pending acks
+
+-spec reset_state(qistate()) -> qistate().
+
+reset_state(#qistate{ queue_name = Name,
+ dir = Dir,
+ on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun,
+ journal_handle = JournalHdl }) ->
+ ok = case JournalHdl of
+ undefined -> ok;
+ _ -> file_handle_cache:close(JournalHdl)
+ end,
+ ok = erase_index_dir(Dir),
+ blank_state_name_dir_funs(Name, Dir, OnSyncFun, OnSyncMsgFun).
+
+-spec init(rabbit_amqqueue:name(),
+ on_sync_fun(), on_sync_fun()) -> qistate().
+
+init(#resource{ virtual_host = VHost } = Name, OnSyncFun, OnSyncMsgFun) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ State = #qistate { dir = Dir } = blank_state(VHostDir, Name),
+ false = rabbit_file:is_file(Dir), %% is_file == is file or dir
+ State#qistate{on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun}.
+
+-spec recover(rabbit_amqqueue:name(), shutdown_terms(), boolean(),
+ contains_predicate(),
+ on_sync_fun(), on_sync_fun()) ->
+ {'undefined' | non_neg_integer(),
+ 'undefined' | non_neg_integer(), qistate()}.
+
+recover(#resource{ virtual_host = VHost } = Name, Terms, MsgStoreRecovered,
+ ContainsCheckFun, OnSyncFun, OnSyncMsgFun) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ State = blank_state(VHostDir, Name),
+ State1 = State #qistate{on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun},
+ CleanShutdown = Terms /= non_clean_shutdown,
+ case CleanShutdown andalso MsgStoreRecovered of
+ true -> RecoveredCounts = proplists:get_value(segments, Terms, []),
+ init_clean(RecoveredCounts, State1);
+ false -> init_dirty(CleanShutdown, ContainsCheckFun, State1)
+ end.
+
+-spec terminate(rabbit_types:vhost(), [any()], qistate()) -> qistate().
+
+terminate(VHost, Terms, State = #qistate { dir = Dir }) ->
+ {SegmentCounts, State1} = terminate(State),
+ rabbit_recovery_terms:store(VHost, filename:basename(Dir),
+ [{segments, SegmentCounts} | Terms]),
+ State1.
+
+-spec delete_and_terminate(qistate()) -> qistate().
+
+delete_and_terminate(State) ->
+ {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
+ ok = rabbit_file:recursive_delete([Dir]),
+ State1.
+
+pre_publish(MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered, JournalSizeHint,
+ State = #qistate{pre_publish_cache = PPC,
+ delivered_cache = DC}) ->
+ State1 = maybe_needs_confirming(MsgProps, MsgOrId, State),
+
+ {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps),
+
+ PPC1 =
+ [[<<(case IsPersistent of
+ true -> ?PUB_PERSIST_JPREFIX;
+ false -> ?PUB_TRANS_JPREFIX
+ end):?JPREFIX_BITS,
+ SeqId:?SEQ_BITS, Bin/binary,
+ (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin] | PPC],
+
+ DC1 =
+ case IsDelivered of
+ true ->
+ [SeqId | DC];
+ false ->
+ DC
+ end,
+
+ State2 = add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State1),
+ maybe_flush_pre_publish_cache(
+ JournalSizeHint,
+ State2#qistate{pre_publish_cache = PPC1,
+ delivered_cache = DC1}).
+
+%% pre_publish_cache is the entry with most elements when compared to
+%% delivered_cache so we only check the former in the guard.
+maybe_flush_pre_publish_cache(JournalSizeHint,
+ #qistate{pre_publish_cache = PPC} = State)
+ when length(PPC) >= ?SEGMENT_ENTRY_COUNT ->
+ flush_pre_publish_cache(JournalSizeHint, State);
+maybe_flush_pre_publish_cache(_JournalSizeHint, State) ->
+ State.
+
+flush_pre_publish_cache(JournalSizeHint, State) ->
+ State1 = flush_pre_publish_cache(State),
+ State2 = flush_delivered_cache(State1),
+ maybe_flush_journal(JournalSizeHint, State2).
+
+flush_pre_publish_cache(#qistate{pre_publish_cache = []} = State) ->
+ State;
+flush_pre_publish_cache(State = #qistate{pre_publish_cache = PPC}) ->
+ {JournalHdl, State1} = get_journal_handle(State),
+ file_handle_cache_stats:update(queue_index_journal_write),
+ ok = file_handle_cache:append(JournalHdl, lists:reverse(PPC)),
+ State1#qistate{pre_publish_cache = []}.
+
+flush_delivered_cache(#qistate{delivered_cache = []} = State) ->
+ State;
+flush_delivered_cache(State = #qistate{delivered_cache = DC}) ->
+ State1 = deliver(lists:reverse(DC), State),
+ State1#qistate{delivered_cache = []}.
+
+-spec publish(rabbit_types:msg_id(), seq_id(),
+ rabbit_types:message_properties(), boolean(),
+ non_neg_integer(), qistate()) -> qistate().
+
+publish(MsgOrId, SeqId, MsgProps, IsPersistent, JournalSizeHint, State) ->
+ {JournalHdl, State1} =
+ get_journal_handle(
+ maybe_needs_confirming(MsgProps, MsgOrId, State)),
+ file_handle_cache_stats:update(queue_index_journal_write),
+ {Bin, MsgBin} = create_pub_record_body(MsgOrId, MsgProps),
+ ok = file_handle_cache:append(
+ JournalHdl, [<<(case IsPersistent of
+ true -> ?PUB_PERSIST_JPREFIX;
+ false -> ?PUB_TRANS_JPREFIX
+ end):?JPREFIX_BITS,
+ SeqId:?SEQ_BITS, Bin/binary,
+ (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin]),
+ maybe_flush_journal(
+ JournalSizeHint,
+ add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State1)).
+
+maybe_needs_confirming(MsgProps, MsgOrId,
+ State = #qistate{unconfirmed = UC,
+ unconfirmed_msg = UCM}) ->
+ MsgId = case MsgOrId of
+ #basic_message{id = Id} -> Id;
+ Id when is_binary(Id) -> Id
+ end,
+ ?MSG_ID_BYTES = size(MsgId),
+ case {MsgProps#message_properties.needs_confirming, MsgOrId} of
+ {true, MsgId} -> UC1 = gb_sets:add_element(MsgId, UC),
+ State#qistate{unconfirmed = UC1};
+ {true, _} -> UCM1 = gb_sets:add_element(MsgId, UCM),
+ State#qistate{unconfirmed_msg = UCM1};
+ {false, _} -> State
+ end.
+
+-spec deliver([seq_id()], qistate()) -> qistate().
+
+deliver(SeqIds, State) ->
+ deliver_or_ack(del, SeqIds, State).
+
+-spec ack([seq_id()], qistate()) -> qistate().
+
+ack(SeqIds, State) ->
+ deliver_or_ack(ack, SeqIds, State).
+
+%% This is called when there are outstanding confirms or when the
+%% queue is idle and the journal needs syncing (see needs_sync/1).
+
+-spec sync(qistate()) -> qistate().
+
+sync(State = #qistate { journal_handle = undefined }) ->
+ State;
+sync(State = #qistate { journal_handle = JournalHdl }) ->
+ ok = file_handle_cache:sync(JournalHdl),
+ notify_sync(State).
+
+-spec needs_sync(qistate()) -> 'confirms' | 'other' | 'false'.
+
+needs_sync(#qistate{journal_handle = undefined}) ->
+ false;
+needs_sync(#qistate{journal_handle = JournalHdl,
+ unconfirmed = UC,
+ unconfirmed_msg = UCM}) ->
+ case gb_sets:is_empty(UC) andalso gb_sets:is_empty(UCM) of
+ true -> case file_handle_cache:needs_sync(JournalHdl) of
+ true -> other;
+ false -> false
+ end;
+ false -> confirms
+ end.
+
+-spec flush(qistate()) -> qistate().
+
+flush(State = #qistate { dirty_count = 0 }) -> State;
+flush(State) -> flush_journal(State).
+
+-spec read(seq_id(), seq_id(), qistate()) ->
+ {[{rabbit_types:msg_id(), seq_id(),
+ rabbit_types:message_properties(),
+ boolean(), boolean()}], qistate()}.
+
+read(StartEnd, StartEnd, State) ->
+ {[], State};
+read(Start, End, State = #qistate { segments = Segments,
+ dir = Dir }) when Start =< End ->
+ %% Start is inclusive, End is exclusive.
+ LowerB = {StartSeg, _StartRelSeq} = seq_id_to_seg_and_rel_seq_id(Start),
+ UpperB = {EndSeg, _EndRelSeq} = seq_id_to_seg_and_rel_seq_id(End - 1),
+ {Messages, Segments1} =
+ lists:foldr(fun (Seg, Acc) ->
+ read_bounded_segment(Seg, LowerB, UpperB, Acc, Dir)
+ end, {[], Segments}, lists:seq(StartSeg, EndSeg)),
+ {Messages, State #qistate { segments = Segments1 }}.
+
+-spec next_segment_boundary(seq_id()) -> seq_id().
+
+next_segment_boundary(SeqId) ->
+ {Seg, _RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
+ reconstruct_seq_id(Seg + 1, 0).
+
+-spec bounds(qistate()) ->
+ {non_neg_integer(), non_neg_integer(), qistate()}.
+
+bounds(State = #qistate { segments = Segments }) ->
+ %% This is not particularly efficient, but only gets invoked on
+ %% queue initialisation.
+ SegNums = lists:sort(segment_nums(Segments)),
+ %% Don't bother trying to figure out the lowest seq_id, merely the
+ %% seq_id of the start of the lowest segment. That seq_id may not
+ %% actually exist, but that's fine. The important thing is that
+ %% the segment exists and the seq_id reported is on a segment
+ %% boundary.
+ %%
+ %% We also don't really care about the max seq_id. Just start the
+ %% next segment: it makes life much easier.
+ %%
+ %% SegNums is sorted, ascending.
+ {LowSeqId, NextSeqId} =
+ case SegNums of
+ [] -> {0, 0};
+ [MinSeg|_] -> {reconstruct_seq_id(MinSeg, 0),
+ reconstruct_seq_id(1 + lists:last(SegNums), 0)}
+ end,
+ {LowSeqId, NextSeqId, State}.
+
+-spec start(rabbit_types:vhost(), [rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}.
+
+start(VHost, DurableQueueNames) ->
+ ok = rabbit_recovery_terms:start(VHost),
+ {DurableTerms, DurableDirectories} =
+ lists:foldl(
+ fun(QName, {RecoveryTerms, ValidDirectories}) ->
+ DirName = queue_name_to_dir_name(QName),
+ RecoveryInfo = case rabbit_recovery_terms:read(VHost, DirName) of
+ {error, _} -> non_clean_shutdown;
+ {ok, Terms} -> Terms
+ end,
+ {[RecoveryInfo | RecoveryTerms],
+ sets:add_element(DirName, ValidDirectories)}
+ end, {[], sets:new()}, DurableQueueNames),
+ %% Any queue directory we've not been asked to recover is considered garbage
+ rabbit_file:recursive_delete(
+ [DirName ||
+ DirName <- all_queue_directory_names(VHost),
+ not sets:is_element(filename:basename(DirName), DurableDirectories)]),
+ rabbit_recovery_terms:clear(VHost),
+
+ %% The backing queue interface requires that the queue recovery terms
+ %% which come back from start/1 are in the same order as DurableQueueNames
+ OrderedTerms = lists:reverse(DurableTerms),
+ {OrderedTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
+
+
+stop(VHost) -> rabbit_recovery_terms:stop(VHost).
+
+all_queue_directory_names(VHost) ->
+ filelib:wildcard(filename:join([rabbit_vhost:msg_store_dir_path(VHost),
+ "queues", "*"])).
+
+all_queue_directory_names() ->
+ filelib:wildcard(filename:join([rabbit_vhost:msg_store_dir_wildcard(),
+ "queues", "*"])).
+
+%%----------------------------------------------------------------------------
+%% startup and shutdown
+%%----------------------------------------------------------------------------
+
+erase_index_dir(Dir) ->
+ case rabbit_file:is_dir(Dir) of
+ true -> rabbit_file:recursive_delete([Dir]);
+ false -> ok
+ end.
+
+blank_state(VHostDir, QueueName) ->
+ Dir = queue_dir(VHostDir, QueueName),
+ blank_state_name_dir_funs(QueueName,
+ Dir,
+ fun (_) -> ok end,
+ fun (_) -> ok end).
+
+queue_dir(VHostDir, QueueName) ->
+ %% Queue directory is
+ %% {node_database_dir}/msg_stores/vhosts/{vhost}/queues/{queue}
+ QueueDir = queue_name_to_dir_name(QueueName),
+ filename:join([VHostDir, "queues", QueueDir]).
+
+queue_name_to_dir_name(#resource { kind = queue,
+ virtual_host = VHost,
+ name = QName }) ->
+ <<Num:128>> = erlang:md5(<<"queue", VHost/binary, QName/binary>>),
+ rabbit_misc:format("~.36B", [Num]).
+
+queue_name_to_dir_name_legacy(Name = #resource { kind = queue }) ->
+ <<Num:128>> = erlang:md5(term_to_binary_compat:term_to_binary_1(Name)),
+ rabbit_misc:format("~.36B", [Num]).
+
+queues_base_dir() ->
+ rabbit_mnesia:dir().
+
+blank_state_name_dir_funs(Name, Dir, OnSyncFun, OnSyncMsgFun) ->
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ #qistate { dir = Dir,
+ segments = segments_new(),
+ journal_handle = undefined,
+ dirty_count = 0,
+ max_journal_entries = MaxJournal,
+ on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun,
+ unconfirmed = gb_sets:new(),
+ unconfirmed_msg = gb_sets:new(),
+ pre_publish_cache = [],
+ delivered_cache = [],
+ queue_name = Name }.
+
+init_clean(RecoveredCounts, State) ->
+ %% Load the journal. Since this is a clean recovery this (almost)
+ %% gets us back to where we were on shutdown.
+ State1 = #qistate { dir = Dir, segments = Segments } = load_journal(State),
+ %% The journal loading only creates records for segments touched
+ %% by the journal, and the counts are based on the journal entries
+ %% only. We need *complete* counts for *all* segments. By an
+ %% amazing coincidence we stored that information on shutdown.
+ Segments1 =
+ lists:foldl(
+ fun ({Seg, UnackedCount}, SegmentsN) ->
+ Segment = segment_find_or_new(Seg, Dir, SegmentsN),
+ segment_store(Segment #segment { unacked = UnackedCount },
+ SegmentsN)
+ end, Segments, RecoveredCounts),
+ %% the counts above include transient messages, which would be the
+ %% wrong thing to return
+ {undefined, undefined, State1 # qistate { segments = Segments1 }}.
+
+init_dirty(CleanShutdown, ContainsCheckFun, State) ->
+ %% Recover the journal completely. This will also load segments
+ %% which have entries in the journal and remove duplicates. The
+ %% counts will correctly reflect the combination of the segment
+ %% and the journal.
+ State1 = #qistate { dir = Dir, segments = Segments } =
+ recover_journal(State),
+ {Segments1, Count, Bytes, DirtyCount} =
+ %% Load each segment in turn and filter out messages that are
+ %% not in the msg_store, by adding acks to the journal. These
+ %% acks only go to the RAM journal as it doesn't matter if we
+ %% lose them. Also mark delivered if not clean shutdown. Also
+ %% find the number of unacked messages. Also accumulate the
+ %% dirty count here, so we can call maybe_flush_journal below
+ %% and avoid unnecessary file system operations.
+ lists:foldl(
+ fun (Seg, {Segments2, CountAcc, BytesAcc, DirtyCount}) ->
+ {{Segment = #segment { unacked = UnackedCount }, Dirty},
+ UnackedBytes} =
+ recover_segment(ContainsCheckFun, CleanShutdown,
+ segment_find_or_new(Seg, Dir, Segments2),
+ State1#qistate.max_journal_entries),
+ {segment_store(Segment, Segments2),
+ CountAcc + UnackedCount,
+ BytesAcc + UnackedBytes, DirtyCount + Dirty}
+ end, {Segments, 0, 0, 0}, all_segment_nums(State1)),
+ State2 = maybe_flush_journal(State1 #qistate { segments = Segments1,
+ dirty_count = DirtyCount }),
+ {Count, Bytes, State2}.
+
+terminate(State = #qistate { journal_handle = JournalHdl,
+ segments = Segments }) ->
+ ok = case JournalHdl of
+ undefined -> ok;
+ _ -> file_handle_cache:close(JournalHdl)
+ end,
+ SegmentCounts =
+ segment_fold(
+ fun (#segment { num = Seg, unacked = UnackedCount }, Acc) ->
+ [{Seg, UnackedCount} | Acc]
+ end, [], Segments),
+ {SegmentCounts, State #qistate { journal_handle = undefined,
+ segments = undefined }}.
+
+recover_segment(ContainsCheckFun, CleanShutdown,
+ Segment = #segment { journal_entries = JEntries }, MaxJournal) ->
+ {SegEntries, UnackedCount} = load_segment(false, Segment),
+ {SegEntries1, UnackedCountDelta} =
+ segment_plus_journal(SegEntries, JEntries),
+ array:sparse_foldl(
+ fun (RelSeq, {{IsPersistent, Bin, MsgBin}, Del, no_ack},
+ {SegmentAndDirtyCount, Bytes}) ->
+ {MsgOrId, MsgProps} = parse_pub_record_body(Bin, MsgBin),
+ {recover_message(ContainsCheckFun(MsgOrId), CleanShutdown,
+ Del, RelSeq, SegmentAndDirtyCount, MaxJournal),
+ Bytes + case IsPersistent of
+ true -> MsgProps#message_properties.size;
+ false -> 0
+ end}
+ end,
+ {{Segment #segment { unacked = UnackedCount + UnackedCountDelta }, 0}, 0},
+ SegEntries1).
+
+recover_message( true, true, _Del, _RelSeq, SegmentAndDirtyCount, _MaxJournal) ->
+ SegmentAndDirtyCount;
+recover_message( true, false, del, _RelSeq, SegmentAndDirtyCount, _MaxJournal) ->
+ SegmentAndDirtyCount;
+recover_message( true, false, no_del, RelSeq, {Segment, _DirtyCount}, MaxJournal) ->
+ %% force to flush the segment
+ {add_to_journal(RelSeq, del, Segment), MaxJournal + 1};
+recover_message(false, _, del, RelSeq, {Segment, DirtyCount}, _MaxJournal) ->
+ {add_to_journal(RelSeq, ack, Segment), DirtyCount + 1};
+recover_message(false, _, no_del, RelSeq, {Segment, DirtyCount}, _MaxJournal) ->
+ {add_to_journal(RelSeq, ack,
+ add_to_journal(RelSeq, del, Segment)),
+ DirtyCount + 2}.
+
+%%----------------------------------------------------------------------------
+%% msg store startup delta function
+%%----------------------------------------------------------------------------
+
+queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) ->
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () -> link(Gatherer),
+ ok = queue_index_walker_reader(QueueName, Gatherer),
+ unlink(Gatherer),
+ ok
+ end)
+ end || QueueName <- DurableQueues],
+ queue_index_walker({next, Gatherer});
+
+queue_index_walker({next, Gatherer}) when is_pid(Gatherer) ->
+ case gatherer:out(Gatherer) of
+ empty ->
+ ok = gatherer:stop(Gatherer),
+ finished;
+ {value, {MsgId, Count}} ->
+ {MsgId, Count, {next, Gatherer}}
+ end.
+
+queue_index_walker_reader(QueueName, Gatherer) ->
+ ok = scan_queue_segments(
+ fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok)
+ when is_binary(MsgId) ->
+ gatherer:sync_in(Gatherer, {MsgId, 1});
+ (_SeqId, _MsgId, _MsgProps, _IsPersistent, _IsDelivered,
+ _IsAcked, Acc) ->
+ Acc
+ end, ok, QueueName),
+ ok = gatherer:finish(Gatherer).
+
+scan_queue_segments(Fun, Acc, #resource{ virtual_host = VHost } = QueueName) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ scan_queue_segments(Fun, Acc, VHostDir, QueueName).
+
+scan_queue_segments(Fun, Acc, VHostDir, QueueName) ->
+ State = #qistate { segments = Segments, dir = Dir } =
+ recover_journal(blank_state(VHostDir, QueueName)),
+ Result = lists:foldr(
+ fun (Seg, AccN) ->
+ segment_entries_foldr(
+ fun (RelSeq, {{MsgOrId, MsgProps, IsPersistent},
+ IsDelivered, IsAcked}, AccM) ->
+ Fun(reconstruct_seq_id(Seg, RelSeq), MsgOrId, MsgProps,
+ IsPersistent, IsDelivered, IsAcked, AccM)
+ end, AccN, segment_find_or_new(Seg, Dir, Segments))
+ end, Acc, all_segment_nums(State)),
+ {_SegmentCounts, _State} = terminate(State),
+ Result.
+
+%%----------------------------------------------------------------------------
+%% expiry/binary manipulation
+%%----------------------------------------------------------------------------
+
+create_pub_record_body(MsgOrId, #message_properties { expiry = Expiry,
+ size = Size }) ->
+ ExpiryBin = expiry_to_binary(Expiry),
+ case MsgOrId of
+ MsgId when is_binary(MsgId) ->
+ {<<MsgId/binary, ExpiryBin/binary, Size:?SIZE_BITS>>, <<>>};
+ #basic_message{id = MsgId} ->
+ MsgBin = term_to_binary(MsgOrId),
+ {<<MsgId/binary, ExpiryBin/binary, Size:?SIZE_BITS>>, MsgBin}
+ end.
+
+expiry_to_binary(undefined) -> <<?NO_EXPIRY:?EXPIRY_BITS>>;
+expiry_to_binary(Expiry) -> <<Expiry:?EXPIRY_BITS>>.
+
+parse_pub_record_body(<<MsgIdNum:?MSG_ID_BITS, Expiry:?EXPIRY_BITS,
+ Size:?SIZE_BITS>>, MsgBin) ->
+ %% work around for binary data fragmentation. See
+ %% rabbit_msg_file:read_next/2
+ <<MsgId:?MSG_ID_BYTES/binary>> = <<MsgIdNum:?MSG_ID_BITS>>,
+ Props = #message_properties{expiry = case Expiry of
+ ?NO_EXPIRY -> undefined;
+ X -> X
+ end,
+ size = Size},
+ case MsgBin of
+ <<>> -> {MsgId, Props};
+ _ -> Msg = #basic_message{id = MsgId} = binary_to_term(MsgBin),
+ {Msg, Props}
+ end.
+
+%%----------------------------------------------------------------------------
+%% journal manipulation
+%%----------------------------------------------------------------------------
+
+add_to_journal(SeqId, Action, State = #qistate { dirty_count = DCount,
+ segments = Segments,
+ dir = Dir }) ->
+ {Seg, RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
+ Segment = segment_find_or_new(Seg, Dir, Segments),
+ Segment1 = add_to_journal(RelSeq, Action, Segment),
+ State #qistate { dirty_count = DCount + 1,
+ segments = segment_store(Segment1, Segments) };
+
+add_to_journal(RelSeq, Action,
+ Segment = #segment { journal_entries = JEntries,
+ entries_to_segment = EToSeg,
+ unacked = UnackedCount }) ->
+
+ {Fun, Entry} = action_to_entry(RelSeq, Action, JEntries),
+
+ {JEntries1, EToSeg1} =
+ case Fun of
+ set ->
+ {array:set(RelSeq, Entry, JEntries),
+ array:set(RelSeq, entry_to_segment(RelSeq, Entry, []),
+ EToSeg)};
+ reset ->
+ {array:reset(RelSeq, JEntries),
+ array:reset(RelSeq, EToSeg)}
+ end,
+
+ Segment #segment {
+ journal_entries = JEntries1,
+ entries_to_segment = EToSeg1,
+ unacked = UnackedCount + case Action of
+ ?PUB -> +1;
+ del -> 0;
+ ack -> -1
+ end}.
+
+action_to_entry(RelSeq, Action, JEntries) ->
+ case array:get(RelSeq, JEntries) of
+ undefined ->
+ {set,
+ case Action of
+ ?PUB -> {Action, no_del, no_ack};
+ del -> {no_pub, del, no_ack};
+ ack -> {no_pub, no_del, ack}
+ end};
+ ({Pub, no_del, no_ack}) when Action == del ->
+ {set, {Pub, del, no_ack}};
+ ({no_pub, del, no_ack}) when Action == ack ->
+ {set, {no_pub, del, ack}};
+ ({?PUB, del, no_ack}) when Action == ack ->
+ {reset, none}
+ end.
+
+maybe_flush_journal(State) ->
+ maybe_flush_journal(infinity, State).
+
+maybe_flush_journal(Hint, State = #qistate { dirty_count = DCount,
+ max_journal_entries = MaxJournal })
+ when DCount > MaxJournal orelse (Hint =/= infinity andalso DCount > Hint) ->
+ flush_journal(State);
+maybe_flush_journal(_Hint, State) ->
+ State.
+
+flush_journal(State = #qistate { segments = Segments }) ->
+ Segments1 =
+ segment_fold(
+ fun (#segment { unacked = 0, path = Path }, SegmentsN) ->
+ case rabbit_file:is_file(Path) of
+ true -> ok = rabbit_file:delete(Path);
+ false -> ok
+ end,
+ SegmentsN;
+ (#segment {} = Segment, SegmentsN) ->
+ segment_store(append_journal_to_segment(Segment), SegmentsN)
+ end, segments_new(), Segments),
+ {JournalHdl, State1} =
+ get_journal_handle(State #qistate { segments = Segments1 }),
+ ok = file_handle_cache:clear(JournalHdl),
+ notify_sync(State1 #qistate { dirty_count = 0 }).
+
+append_journal_to_segment(#segment { journal_entries = JEntries,
+ entries_to_segment = EToSeg,
+ path = Path } = Segment) ->
+ case array:sparse_size(JEntries) of
+ 0 -> Segment;
+ _ ->
+ file_handle_cache_stats:update(queue_index_write),
+
+ {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+ %% the file_handle_cache also does a list reverse, so this
+ %% might not be required here, but before we were doing a
+ %% sparse_foldr, a lists:reverse/1 seems to be the correct
+ %% thing to do for now.
+ file_handle_cache:append(Hdl, lists:reverse(array:to_list(EToSeg))),
+ ok = file_handle_cache:close(Hdl),
+ Segment #segment { journal_entries = array_new(),
+ entries_to_segment = array_new([]) }
+ end.
+
+get_journal_handle(State = #qistate { journal_handle = undefined,
+ dir = Dir,
+ queue_name = Name }) ->
+ Path = filename:join(Dir, ?JOURNAL_FILENAME),
+ ok = rabbit_file:ensure_dir(Path),
+ ok = ensure_queue_name_stub_file(Dir, Name),
+ {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?WRITE_MODE, [{write_buffer, infinity}]),
+ {Hdl, State #qistate { journal_handle = Hdl }};
+get_journal_handle(State = #qistate { journal_handle = Hdl }) ->
+ {Hdl, State}.
+
+%% Loading Journal. This isn't idempotent and will mess up the counts
+%% if you call it more than once on the same state. Assumes the counts
+%% are 0 to start with.
+load_journal(State = #qistate { dir = Dir }) ->
+ Path = filename:join(Dir, ?JOURNAL_FILENAME),
+ case rabbit_file:is_file(Path) of
+ true -> {JournalHdl, State1} = get_journal_handle(State),
+ Size = rabbit_file:file_size(Path),
+ {ok, 0} = file_handle_cache:position(JournalHdl, 0),
+ {ok, JournalBin} = file_handle_cache:read(JournalHdl, Size),
+ parse_journal_entries(JournalBin, State1);
+ false -> State
+ end.
+
+%% ditto
+recover_journal(State) ->
+ State1 = #qistate { segments = Segments } = load_journal(State),
+ Segments1 =
+ segment_map(
+ fun (Segment = #segment { journal_entries = JEntries,
+ entries_to_segment = EToSeg,
+ unacked = UnackedCountInJournal }) ->
+ %% We want to keep ack'd entries in so that we can
+ %% remove them if duplicates are in the journal. The
+ %% counts here are purely from the segment itself.
+ {SegEntries, UnackedCountInSeg} = load_segment(true, Segment),
+ {JEntries1, EToSeg1, UnackedCountDuplicates} =
+ journal_minus_segment(JEntries, EToSeg, SegEntries),
+ Segment #segment { journal_entries = JEntries1,
+ entries_to_segment = EToSeg1,
+ unacked = (UnackedCountInJournal +
+ UnackedCountInSeg -
+ UnackedCountDuplicates) }
+ end, Segments),
+ State1 #qistate { segments = Segments1 }.
+
+parse_journal_entries(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>, State) ->
+ parse_journal_entries(Rest, add_to_journal(SeqId, del, State));
+
+parse_journal_entries(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>, State) ->
+ parse_journal_entries(Rest, add_to_journal(SeqId, ack, State));
+parse_journal_entries(<<0:?JPREFIX_BITS, 0:?SEQ_BITS,
+ 0:?PUB_RECORD_SIZE_BYTES/unit:8, _/binary>>, State) ->
+ %% Journal entry composed only of zeroes was probably
+ %% produced during a dirty shutdown so stop reading
+ State;
+parse_journal_entries(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Bin:?PUB_RECORD_BODY_BYTES/binary,
+ MsgSize:?EMBEDDED_SIZE_BITS, MsgBin:MsgSize/binary,
+ Rest/binary>>, State) ->
+ IsPersistent = case Prefix of
+ ?PUB_PERSIST_JPREFIX -> true;
+ ?PUB_TRANS_JPREFIX -> false
+ end,
+ parse_journal_entries(
+ Rest, add_to_journal(SeqId, {IsPersistent, Bin, MsgBin}, State));
+parse_journal_entries(_ErrOrEoF, State) ->
+ State.
+
+deliver_or_ack(_Kind, [], State) ->
+ State;
+deliver_or_ack(Kind, SeqIds, State) ->
+ JPrefix = case Kind of ack -> ?ACK_JPREFIX; del -> ?DEL_JPREFIX end,
+ {JournalHdl, State1} = get_journal_handle(State),
+ file_handle_cache_stats:update(queue_index_journal_write),
+ ok = file_handle_cache:append(
+ JournalHdl,
+ [<<JPrefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>> || SeqId <- SeqIds]),
+ maybe_flush_journal(lists:foldl(fun (SeqId, StateN) ->
+ add_to_journal(SeqId, Kind, StateN)
+ end, State1, SeqIds)).
+
+notify_sync(State = #qistate{unconfirmed = UC,
+ unconfirmed_msg = UCM,
+ on_sync = OnSyncFun,
+ on_sync_msg = OnSyncMsgFun}) ->
+ State1 = case gb_sets:is_empty(UC) of
+ true -> State;
+ false -> OnSyncFun(UC),
+ State#qistate{unconfirmed = gb_sets:new()}
+ end,
+ case gb_sets:is_empty(UCM) of
+ true -> State1;
+ false -> OnSyncMsgFun(UCM),
+ State1#qistate{unconfirmed_msg = gb_sets:new()}
+ end.
+
+%%----------------------------------------------------------------------------
+%% segment manipulation
+%%----------------------------------------------------------------------------
+
+seq_id_to_seg_and_rel_seq_id(SeqId) ->
+ { SeqId div ?SEGMENT_ENTRY_COUNT, SeqId rem ?SEGMENT_ENTRY_COUNT }.
+
+reconstruct_seq_id(Seg, RelSeq) ->
+ (Seg * ?SEGMENT_ENTRY_COUNT) + RelSeq.
+
+all_segment_nums(#qistate { dir = Dir, segments = Segments }) ->
+ lists:sort(
+ sets:to_list(
+ lists:foldl(
+ fun (SegName, Set) ->
+ sets:add_element(
+ list_to_integer(
+ lists:takewhile(fun (C) -> $0 =< C andalso C =< $9 end,
+ SegName)), Set)
+ end, sets:from_list(segment_nums(Segments)),
+ rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)))).
+
+segment_find_or_new(Seg, Dir, Segments) ->
+ case segment_find(Seg, Segments) of
+ {ok, Segment} -> Segment;
+ error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION,
+ Path = filename:join(Dir, SegName),
+ #segment { num = Seg,
+ path = Path,
+ journal_entries = array_new(),
+ entries_to_segment = array_new([]),
+ unacked = 0 }
+ end.
+
+segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) ->
+ {ok, Segment}; %% 1 or (2, matches head)
+segment_find(Seg, {_Segments, [_, Segment = #segment { num = Seg }]}) ->
+ {ok, Segment}; %% 2, matches tail
+segment_find(Seg, {Segments, _}) -> %% no match
+ maps:find(Seg, Segments).
+
+segment_store(Segment = #segment { num = Seg }, %% 1 or (2, matches head)
+ {Segments, [#segment { num = Seg } | Tail]}) ->
+ {Segments, [Segment | Tail]};
+segment_store(Segment = #segment { num = Seg }, %% 2, matches tail
+ {Segments, [SegmentA, #segment { num = Seg }]}) ->
+ {Segments, [Segment, SegmentA]};
+segment_store(Segment = #segment { num = Seg }, {Segments, []}) ->
+ {maps:remove(Seg, Segments), [Segment]};
+segment_store(Segment = #segment { num = Seg }, {Segments, [SegmentA]}) ->
+ {maps:remove(Seg, Segments), [Segment, SegmentA]};
+segment_store(Segment = #segment { num = Seg },
+ {Segments, [SegmentA, SegmentB]}) ->
+ {maps:put(SegmentB#segment.num, SegmentB, maps:remove(Seg, Segments)),
+ [Segment, SegmentA]}.
+
+segment_fold(Fun, Acc, {Segments, CachedSegments}) ->
+ maps:fold(fun (_Seg, Segment, Acc1) -> Fun(Segment, Acc1) end,
+ lists:foldl(Fun, Acc, CachedSegments), Segments).
+
+segment_map(Fun, {Segments, CachedSegments}) ->
+ {maps:map(fun (_Seg, Segment) -> Fun(Segment) end, Segments),
+ lists:map(Fun, CachedSegments)}.
+
+segment_nums({Segments, CachedSegments}) ->
+ lists:map(fun (#segment { num = Num }) -> Num end, CachedSegments) ++
+ maps:keys(Segments).
+
+segments_new() ->
+ {#{}, []}.
+
+entry_to_segment(_RelSeq, {?PUB, del, ack}, Initial) ->
+ Initial;
+entry_to_segment(RelSeq, {Pub, Del, Ack}, Initial) ->
+ %% NB: we are assembling the segment in reverse order here, so
+ %% del/ack comes first.
+ Buf1 = case {Del, Ack} of
+ {no_del, no_ack} ->
+ Initial;
+ _ ->
+ Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS>>,
+ case {Del, Ack} of
+ {del, ack} -> [[Binary, Binary] | Initial];
+ _ -> [Binary | Initial]
+ end
+ end,
+ case Pub of
+ no_pub ->
+ Buf1;
+ {IsPersistent, Bin, MsgBin} ->
+ [[<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+ (bool_to_int(IsPersistent)):1,
+ RelSeq:?REL_SEQ_BITS, Bin/binary,
+ (size(MsgBin)):?EMBEDDED_SIZE_BITS>>, MsgBin] | Buf1]
+ end.
+
+read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq},
+ {Messages, Segments}, Dir) ->
+ Segment = segment_find_or_new(Seg, Dir, Segments),
+ {segment_entries_foldr(
+ fun (RelSeq, {{MsgOrId, MsgProps, IsPersistent}, IsDelivered, no_ack},
+ Acc)
+ when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso
+ (Seg < EndSeg orelse EndRelSeq >= RelSeq) ->
+ [{MsgOrId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps,
+ IsPersistent, IsDelivered == del} | Acc];
+ (_RelSeq, _Value, Acc) ->
+ Acc
+ end, Messages, Segment),
+ segment_store(Segment, Segments)}.
+
+segment_entries_foldr(Fun, Init,
+ Segment = #segment { journal_entries = JEntries }) ->
+ {SegEntries, _UnackedCount} = load_segment(false, Segment),
+ {SegEntries1, _UnackedCountD} = segment_plus_journal(SegEntries, JEntries),
+ array:sparse_foldr(
+ fun (RelSeq, {{IsPersistent, Bin, MsgBin}, Del, Ack}, Acc) ->
+ {MsgOrId, MsgProps} = parse_pub_record_body(Bin, MsgBin),
+ Fun(RelSeq, {{MsgOrId, MsgProps, IsPersistent}, Del, Ack}, Acc)
+ end, Init, SegEntries1).
+
+%% Loading segments
+%%
+%% Does not do any combining with the journal at all.
+load_segment(KeepAcked, #segment { path = Path }) ->
+ Empty = {array_new(), 0},
+ case rabbit_file:is_file(Path) of
+ false -> Empty;
+ true -> Size = rabbit_file:file_size(Path),
+ file_handle_cache_stats:update(queue_index_read),
+ {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?READ_MODE, []),
+ {ok, 0} = file_handle_cache:position(Hdl, bof),
+ {ok, SegBin} = file_handle_cache:read(Hdl, Size),
+ ok = file_handle_cache:close(Hdl),
+ Res = parse_segment_entries(SegBin, KeepAcked, Empty),
+ Res
+ end.
+
+parse_segment_entries(<<?PUB_PREFIX:?PUB_PREFIX_BITS,
+ IsPersistNum:1, RelSeq:?REL_SEQ_BITS, Rest/binary>>,
+ KeepAcked, Acc) ->
+ parse_segment_publish_entry(
+ Rest, 1 == IsPersistNum, RelSeq, KeepAcked, Acc);
+parse_segment_entries(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>, KeepAcked, Acc) ->
+ parse_segment_entries(
+ Rest, KeepAcked, add_segment_relseq_entry(KeepAcked, RelSeq, Acc));
+parse_segment_entries(<<>>, _KeepAcked, Acc) ->
+ Acc.
+
+parse_segment_publish_entry(<<Bin:?PUB_RECORD_BODY_BYTES/binary,
+ MsgSize:?EMBEDDED_SIZE_BITS,
+ MsgBin:MsgSize/binary, Rest/binary>>,
+ IsPersistent, RelSeq, KeepAcked,
+ {SegEntries, Unacked}) ->
+ Obj = {{IsPersistent, Bin, MsgBin}, no_del, no_ack},
+ SegEntries1 = array:set(RelSeq, Obj, SegEntries),
+ parse_segment_entries(Rest, KeepAcked, {SegEntries1, Unacked + 1});
+parse_segment_publish_entry(Rest, _IsPersistent, _RelSeq, KeepAcked, Acc) ->
+ parse_segment_entries(Rest, KeepAcked, Acc).
+
+add_segment_relseq_entry(KeepAcked, RelSeq, {SegEntries, Unacked}) ->
+ case array:get(RelSeq, SegEntries) of
+ {Pub, no_del, no_ack} ->
+ {array:set(RelSeq, {Pub, del, no_ack}, SegEntries), Unacked};
+ {Pub, del, no_ack} when KeepAcked ->
+ {array:set(RelSeq, {Pub, del, ack}, SegEntries), Unacked - 1};
+ {_Pub, del, no_ack} ->
+ {array:reset(RelSeq, SegEntries), Unacked - 1}
+ end.
+
+array_new() ->
+ array_new(undefined).
+
+array_new(Default) ->
+ array:new([{default, Default}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
+
+bool_to_int(true ) -> 1;
+bool_to_int(false) -> 0.
+
+%%----------------------------------------------------------------------------
+%% journal & segment combination
+%%----------------------------------------------------------------------------
+
+%% Combine what we have just read from a segment file with what we're
+%% holding for that segment in memory. There must be no duplicates.
+segment_plus_journal(SegEntries, JEntries) ->
+ array:sparse_foldl(
+ fun (RelSeq, JObj, {SegEntriesOut, AdditionalUnacked}) ->
+ SegEntry = array:get(RelSeq, SegEntriesOut),
+ {Obj, AdditionalUnackedDelta} =
+ segment_plus_journal1(SegEntry, JObj),
+ {case Obj of
+ undefined -> array:reset(RelSeq, SegEntriesOut);
+ _ -> array:set(RelSeq, Obj, SegEntriesOut)
+ end,
+ AdditionalUnacked + AdditionalUnackedDelta}
+ end, {SegEntries, 0}, JEntries).
+
+%% Here, the result is a tuple with the first element containing the
+%% item which we may be adding to (for items only in the journal),
+%% modifying in (bits in both), or, when returning 'undefined',
+%% erasing from (ack in journal, not segment) the segment array. The
+%% other element of the tuple is the delta for AdditionalUnacked.
+segment_plus_journal1(undefined, {?PUB, no_del, no_ack} = Obj) ->
+ {Obj, 1};
+segment_plus_journal1(undefined, {?PUB, del, no_ack} = Obj) ->
+ {Obj, 1};
+segment_plus_journal1(undefined, {?PUB, del, ack}) ->
+ {undefined, 0};
+
+segment_plus_journal1({?PUB = Pub, no_del, no_ack}, {no_pub, del, no_ack}) ->
+ {{Pub, del, no_ack}, 0};
+segment_plus_journal1({?PUB, no_del, no_ack}, {no_pub, del, ack}) ->
+ {undefined, -1};
+segment_plus_journal1({?PUB, del, no_ack}, {no_pub, no_del, ack}) ->
+ {undefined, -1}.
+
+%% Remove from the journal entries for a segment, items that are
+%% duplicates of entries found in the segment itself. Used on start up
+%% to clean up the journal.
+%%
+%% We need to update the entries_to_segment since they are just a
+%% cache of what's on the journal.
+journal_minus_segment(JEntries, EToSeg, SegEntries) ->
+ array:sparse_foldl(
+ fun (RelSeq, JObj, {JEntriesOut, EToSegOut, UnackedRemoved}) ->
+ SegEntry = array:get(RelSeq, SegEntries),
+ {Obj, UnackedRemovedDelta} =
+ journal_minus_segment1(JObj, SegEntry),
+ {JEntriesOut1, EToSegOut1} =
+ case Obj of
+ keep ->
+ {JEntriesOut, EToSegOut};
+ undefined ->
+ {array:reset(RelSeq, JEntriesOut),
+ array:reset(RelSeq, EToSegOut)};
+ _ ->
+ {array:set(RelSeq, Obj, JEntriesOut),
+ array:set(RelSeq, entry_to_segment(RelSeq, Obj, []),
+ EToSegOut)}
+ end,
+ {JEntriesOut1, EToSegOut1, UnackedRemoved + UnackedRemovedDelta}
+ end, {JEntries, EToSeg, 0}, JEntries).
+
+%% Here, the result is a tuple with the first element containing the
+%% item we are adding to or modifying in the (initially fresh) journal
+%% array. If the item is 'undefined' we leave the journal array
+%% alone. The other element of the tuple is the deltas for
+%% UnackedRemoved.
+
+%% Both the same. Must be at least the publish
+journal_minus_segment1({?PUB, _Del, no_ack} = Obj, Obj) ->
+ {undefined, 1};
+journal_minus_segment1({?PUB, _Del, ack} = Obj, Obj) ->
+ {undefined, 0};
+
+%% Just publish in journal
+journal_minus_segment1({?PUB, no_del, no_ack}, undefined) ->
+ {keep, 0};
+
+%% Publish and deliver in journal
+journal_minus_segment1({?PUB, del, no_ack}, undefined) ->
+ {keep, 0};
+journal_minus_segment1({?PUB = Pub, del, no_ack}, {Pub, no_del, no_ack}) ->
+ {{no_pub, del, no_ack}, 1};
+
+%% Publish, deliver and ack in journal
+journal_minus_segment1({?PUB, del, ack}, undefined) ->
+ {keep, 0};
+journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, no_del, no_ack}) ->
+ {{no_pub, del, ack}, 1};
+journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, del, no_ack}) ->
+ {{no_pub, no_del, ack}, 1};
+
+%% Just deliver in journal
+journal_minus_segment1({no_pub, del, no_ack}, {?PUB, no_del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, del, no_ack}, {?PUB, del, no_ack}) ->
+ {undefined, 0};
+
+%% Just ack in journal
+journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, ack}) ->
+ {undefined, -1};
+
+%% Deliver and ack in journal
+journal_minus_segment1({no_pub, del, ack}, {?PUB, no_del, no_ack}) ->
+ {keep, 0};
+journal_minus_segment1({no_pub, del, ack}, {?PUB, del, no_ack}) ->
+ {{no_pub, no_del, ack}, 0};
+journal_minus_segment1({no_pub, del, ack}, {?PUB, del, ack}) ->
+ {undefined, -1};
+
+%% Missing segment. If flush_journal/1 is interrupted after deleting
+%% the segment but before truncating the journal we can get these
+%% cases: a delivery and an acknowledgement in the journal, or just an
+%% acknowledgement in the journal, but with no segment. In both cases
+%% we have really forgotten the message; so ignore what's in the
+%% journal.
+journal_minus_segment1({no_pub, no_del, ack}, undefined) ->
+ {undefined, 0};
+journal_minus_segment1({no_pub, del, ack}, undefined) ->
+ {undefined, 0}.
+
+%%----------------------------------------------------------------------------
+%% upgrade
+%%----------------------------------------------------------------------------
+
+-spec add_queue_ttl() -> 'ok'.
+
+add_queue_ttl() ->
+ foreach_queue_index({fun add_queue_ttl_journal/1,
+ fun add_queue_ttl_segment/1}).
+
+add_queue_ttl_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+add_queue_ttl_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+add_queue_ttl_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BYTES/binary, Rest/binary>>) ->
+ {[<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, MsgId,
+ expiry_to_binary(undefined)], Rest};
+add_queue_ttl_journal(_) ->
+ stop.
+
+add_queue_ttl_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BYTES/binary,
+ Rest/binary>>) ->
+ {[<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>,
+ MsgId, expiry_to_binary(undefined)], Rest};
+add_queue_ttl_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+add_queue_ttl_segment(_) ->
+ stop.
+
+avoid_zeroes() ->
+ foreach_queue_index({none, fun avoid_zeroes_segment/1}).
+
+avoid_zeroes_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS>>, Rest};
+avoid_zeroes_segment(<<0:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+avoid_zeroes_segment(_) ->
+ stop.
+
+%% At upgrade time we just define every message's size as 0 - that
+%% will save us a load of faff with the message store, and means we
+%% can actually use the clean recovery terms in VQ. It does mean we
+%% don't count message bodies from before the migration, but we can
+%% live with that.
+store_msg_size() ->
+ foreach_queue_index({fun store_msg_size_journal/1,
+ fun store_msg_size_segment/1}).
+
+store_msg_size_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_size_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_size_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS,
+ Rest/binary>>) ->
+ {<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, 0:?SIZE_BITS>>, Rest};
+store_msg_size_journal(_) ->
+ stop.
+
+store_msg_size_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, 0:?SIZE_BITS>>, Rest};
+store_msg_size_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+store_msg_size_segment(_) ->
+ stop.
+
+store_msg() ->
+ foreach_queue_index({fun store_msg_journal/1,
+ fun store_msg_segment/1}).
+
+store_msg_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ Rest/binary>>) ->
+ {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
+store_msg_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+ Rest/binary>>) ->
+ {<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+ 0:?EMBEDDED_SIZE_BITS>>, Rest};
+store_msg_journal(_) ->
+ stop.
+
+store_msg_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
+ RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BITS,
+ Expiry:?EXPIRY_BITS, Size:?SIZE_BITS, Rest/binary>>) ->
+ {<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
+ MsgId:?MSG_ID_BITS, Expiry:?EXPIRY_BITS, Size:?SIZE_BITS,
+ 0:?EMBEDDED_SIZE_BITS>>, Rest};
+store_msg_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
+ RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
+ {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
+ Rest};
+store_msg_segment(_) ->
+ stop.
+
+
+
+%%----------------------------------------------------------------------------
+%% Migration functions
+%%----------------------------------------------------------------------------
+
+foreach_queue_index(Funs) ->
+ QueueDirNames = all_queue_directory_names(),
+ {ok, Gatherer} = gatherer:start_link(),
+ [begin
+ ok = gatherer:fork(Gatherer),
+ ok = worker_pool:submit_async(
+ fun () ->
+ transform_queue(QueueDirName, Gatherer, Funs)
+ end)
+ end || QueueDirName <- QueueDirNames],
+ empty = gatherer:out(Gatherer),
+ ok = gatherer:stop(Gatherer).
+
+transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) ->
+ ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun),
+ [ok = transform_file(filename:join(Dir, Seg), SegmentFun)
+ || Seg <- rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)],
+ ok = gatherer:finish(Gatherer).
+
+transform_file(_Path, none) ->
+ ok;
+transform_file(Path, Fun) when is_function(Fun)->
+ PathTmp = Path ++ ".upgrade",
+ case rabbit_file:file_size(Path) of
+ 0 -> ok;
+ Size -> {ok, PathTmpHdl} =
+ file_handle_cache:open_with_absolute_path(
+ PathTmp, ?WRITE_MODE,
+ [{write_buffer, infinity}]),
+
+ {ok, PathHdl} = file_handle_cache:open_with_absolute_path(
+ Path, ?READ_MODE, [{read_buffer, Size}]),
+ {ok, Content} = file_handle_cache:read(PathHdl, Size),
+ ok = file_handle_cache:close(PathHdl),
+
+ ok = drive_transform_fun(Fun, PathTmpHdl, Content),
+
+ ok = file_handle_cache:close(PathTmpHdl),
+ ok = rabbit_file:rename(PathTmp, Path)
+ end.
+
+drive_transform_fun(Fun, Hdl, Contents) ->
+ case Fun(Contents) of
+ stop -> ok;
+ {Output, Contents1} -> ok = file_handle_cache:append(Hdl, Output),
+ drive_transform_fun(Fun, Hdl, Contents1)
+ end.
+
+move_to_per_vhost_stores(#resource{virtual_host = VHost} = QueueName) ->
+ OldQueueDir = filename:join([queues_base_dir(), "queues",
+ queue_name_to_dir_name_legacy(QueueName)]),
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ NewQueueDir = queue_dir(VHostDir, QueueName),
+ rabbit_log_upgrade:info("About to migrate queue directory '~s' to '~s'",
+ [OldQueueDir, NewQueueDir]),
+ case rabbit_file:is_dir(OldQueueDir) of
+ true ->
+ ok = rabbit_file:ensure_dir(NewQueueDir),
+ ok = rabbit_file:rename(OldQueueDir, NewQueueDir),
+ ok = ensure_queue_name_stub_file(NewQueueDir, QueueName);
+ false ->
+ Msg = "Queue index directory '~s' not found for ~s~n",
+ Args = [OldQueueDir, rabbit_misc:rs(QueueName)],
+ rabbit_log_upgrade:error(Msg, Args),
+ rabbit_log:error(Msg, Args)
+ end,
+ ok.
+
+ensure_queue_name_stub_file(Dir, #resource{virtual_host = VHost, name = QName}) ->
+ QueueNameFile = filename:join(Dir, ?QUEUE_NAME_STUB_FILE),
+ file:write_file(QueueNameFile, <<"VHOST: ", VHost/binary, "\n",
+ "QUEUE: ", QName/binary, "\n">>).
+
+read_global_recovery_terms(DurableQueueNames) ->
+ ok = rabbit_recovery_terms:open_global_table(),
+
+ DurableTerms =
+ lists:foldl(
+ fun(QName, RecoveryTerms) ->
+ DirName = queue_name_to_dir_name_legacy(QName),
+ RecoveryInfo = case rabbit_recovery_terms:read_global(DirName) of
+ {error, _} -> non_clean_shutdown;
+ {ok, Terms} -> Terms
+ end,
+ [RecoveryInfo | RecoveryTerms]
+ end, [], DurableQueueNames),
+
+ ok = rabbit_recovery_terms:close_global_table(),
+ %% The backing queue interface requires that the queue recovery terms
+ %% which come back from start/1 are in the same order as DurableQueueNames
+ OrderedTerms = lists:reverse(DurableTerms),
+ {OrderedTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
+
+cleanup_global_recovery_terms() ->
+ rabbit_file:recursive_delete([filename:join([queues_base_dir(), "queues"])]),
+ rabbit_recovery_terms:delete_global_table(),
+ ok.
+
+
+update_recovery_term(#resource{virtual_host = VHost} = QueueName, Term) ->
+ Key = queue_name_to_dir_name(QueueName),
+ rabbit_recovery_terms:store(VHost, Key, Term).
diff --git a/deps/rabbit/src/rabbit_queue_location_client_local.erl b/deps/rabbit/src/rabbit_queue_location_client_local.erl
new file mode 100644
index 0000000000..2df1608534
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_client_local.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_client_local).
+-behaviour(rabbit_queue_master_locator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master client local"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"client-local">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Locate queue master node as the client local node">>}].
+
+queue_master_location(Q) when ?is_amqqueue(Q) ->
+ %% unlike with other locator strategies we do not check node maintenance
+ %% status for two reasons:
+ %%
+ %% * nodes in maintenance mode will drop their client connections
+ %% * with other strategies, if no nodes are available, the current node
+ %% is returned but this strategy already does just that
+ {ok, node()}.
diff --git a/deps/rabbit/src/rabbit_queue_location_min_masters.erl b/deps/rabbit/src/rabbit_queue_location_min_masters.erl
new file mode 100644
index 0000000000..6535f082fe
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_min_masters.erl
@@ -0,0 +1,70 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_min_masters).
+-behaviour(rabbit_queue_master_locator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master min bound queues"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"min-masters">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description,
+ <<"Locate queue master node from cluster node with least bound queues">>}].
+
+queue_master_location(Q) when ?is_amqqueue(Q) ->
+ Cluster = rabbit_queue_master_location_misc:all_nodes(Q),
+ QueueNames = rabbit_amqqueue:list_names(),
+ MastersPerNode0 = lists:foldl(
+ fun(#resource{virtual_host = VHost, name = QueueName}, NodeMasters) ->
+ case rabbit_queue_master_location_misc:lookup_master(QueueName, VHost) of
+ {ok, Master} when is_atom(Master) ->
+ case maps:is_key(Master, NodeMasters) of
+ true -> maps:update_with(Master,
+ fun(N) -> N + 1 end,
+ NodeMasters);
+ false -> NodeMasters
+ end;
+ _ -> NodeMasters
+ end
+ end,
+ maps:from_list([{N, 0} || N <- Cluster]),
+ QueueNames),
+
+ MastersPerNode = maps:filter(fun (Node, _N) ->
+ not rabbit_maintenance:is_being_drained_local_read(Node)
+ end, MastersPerNode0),
+
+ case map_size(MastersPerNode) > 0 of
+ true ->
+ {MinNode, _NMasters} = maps:fold(
+ fun(Node, NMasters, init) ->
+ {Node, NMasters};
+ (Node, NMasters, {MinNode, MinMasters}) ->
+ case NMasters < MinMasters of
+ true -> {Node, NMasters};
+ false -> {MinNode, MinMasters}
+ end
+ end,
+ init, MastersPerNode),
+ {ok, MinNode};
+ false ->
+ undefined
+ end.
diff --git a/deps/rabbit/src/rabbit_queue_location_random.erl b/deps/rabbit/src/rabbit_queue_location_random.erl
new file mode 100644
index 0000000000..7232fc6703
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_random.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_random).
+-behaviour(rabbit_queue_master_locator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master random"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"random">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description,
+ <<"Locate queue master node from cluster in a random manner">>}].
+
+queue_master_location(Q) when ?is_amqqueue(Q) ->
+ Cluster0 = rabbit_queue_master_location_misc:all_nodes(Q),
+ Cluster = rabbit_maintenance:filter_out_drained_nodes_local_read(Cluster0),
+ case Cluster of
+ [] ->
+ undefined;
+ Candidates when is_list(Candidates) ->
+ RandomPos = erlang:phash2(erlang:monotonic_time(), length(Candidates)),
+ MasterNode = lists:nth(RandomPos + 1, Candidates),
+ {ok, MasterNode}
+ end.
diff --git a/deps/rabbit/src/rabbit_queue_location_validator.erl b/deps/rabbit/src/rabbit_queue_location_validator.erl
new file mode 100644
index 0000000000..bf41be622c
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_location_validator.erl
@@ -0,0 +1,67 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_location_validator).
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([validate_policy/1, validate_strategy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "Queue location policy validation"},
+ {mfa, {rabbit_registry, register,
+ [policy_validator,
+ <<"queue-master-locator">>,
+ ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+validate_policy(KeyList) ->
+ case proplists:lookup(<<"queue-master-locator">> , KeyList) of
+ {_, Strategy} -> case validate_strategy(Strategy) of
+ {error, _, _} = Er -> Er;
+ _ -> ok
+ end;
+ _ -> {error, "queue-master-locator undefined"}
+ end.
+
+validate_strategy(Strategy) ->
+ case module(Strategy) of
+ R = {ok, _M} -> R;
+ _ ->
+ {error, "~p invalid queue-master-locator value", [Strategy]}
+ end.
+
+policy(Policy, Q) ->
+ case rabbit_policy:get(Policy, Q) of
+ undefined -> none;
+ P -> P
+ end.
+
+module(Q) when ?is_amqqueue(Q) ->
+ case policy(<<"queue-master-locator">>, Q) of
+ undefined -> no_location_strategy;
+ Mode -> module(Mode)
+ end;
+module(Strategy) when is_binary(Strategy) ->
+ case rabbit_registry:binary_to_type(Strategy) of
+ {error, not_found} -> no_location_strategy;
+ T ->
+ case rabbit_registry:lookup_module(queue_master_locator, T) of
+ {ok, Module} ->
+ case code:which(Module) of
+ non_existing -> no_location_strategy;
+ _ -> {ok, Module}
+ end;
+ _ ->
+ no_location_strategy
+ end
+ end;
+module(Strategy) ->
+ module(rabbit_data_coercion:to_binary(Strategy)).
diff --git a/deps/rabbit/src/rabbit_queue_master_location_misc.erl b/deps/rabbit/src/rabbit_queue_master_location_misc.erl
new file mode 100644
index 0000000000..37698e184f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_master_location_misc.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_master_location_misc).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("amqqueue.hrl").
+
+-export([lookup_master/2,
+ lookup_queue/2,
+ get_location/1,
+ get_location_mod_by_config/1,
+ get_location_mod_by_args/1,
+ get_location_mod_by_policy/1,
+ all_nodes/1]).
+
+-spec lookup_master(binary(), binary()) -> {ok, node()} | {error, not_found}.
+lookup_master(QueueNameBin, VHostPath) when is_binary(QueueNameBin),
+ is_binary(VHostPath) ->
+ QueueR = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ case rabbit_amqqueue:lookup(QueueR) of
+ {ok, Queue} when ?amqqueue_has_valid_pid(Queue) ->
+ Pid = amqqueue:get_pid(Queue),
+ {ok, node(Pid)};
+ Error -> Error
+ end.
+
+lookup_queue(QueueNameBin, VHostPath) when is_binary(QueueNameBin),
+ is_binary(VHostPath) ->
+ QueueR = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ case rabbit_amqqueue:lookup(QueueR) of
+ Reply = {ok, Queue} when ?is_amqqueue(Queue) ->
+ Reply;
+ Error ->
+ Error
+ end.
+
+get_location(Queue) when ?is_amqqueue(Queue) ->
+ Reply1 = case get_location_mod_by_args(Queue) of
+ _Err1 = {error, _} ->
+ case get_location_mod_by_policy(Queue) of
+ _Err2 = {error, _} ->
+ case get_location_mod_by_config(Queue) of
+ Err3 = {error, _} -> Err3;
+ Reply0 = {ok, _Module} -> Reply0
+ end;
+ Reply0 = {ok, _Module} -> Reply0
+ end;
+ Reply0 = {ok, _Module} -> Reply0
+ end,
+
+ case Reply1 of
+ {ok, CB} -> CB:queue_master_location(Queue);
+ Error -> Error
+ end.
+
+get_location_mod_by_args(Queue) when ?is_amqqueue(Queue) ->
+ Args = amqqueue:get_arguments(Queue),
+ case rabbit_misc:table_lookup(Args, <<"x-queue-master-locator">>) of
+ {_Type, Strategy} ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end;
+ _ -> {error, "x-queue-master-locator undefined"}
+ end.
+
+get_location_mod_by_policy(Queue) when ?is_amqqueue(Queue) ->
+ case rabbit_policy:get(<<"queue-master-locator">> , Queue) of
+ undefined -> {error, "queue-master-locator policy undefined"};
+ Strategy ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end
+ end.
+
+get_location_mod_by_config(Queue) when ?is_amqqueue(Queue) ->
+ case application:get_env(rabbit, queue_master_locator) of
+ {ok, Strategy} ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end;
+ _ -> {error, "queue_master_locator undefined"}
+ end.
+
+all_nodes(Queue) when ?is_amqqueue(Queue) ->
+ handle_is_mirrored_ha_nodes(rabbit_mirror_queue_misc:is_mirrored_ha_nodes(Queue), Queue).
+
+handle_is_mirrored_ha_nodes(false, _Queue) ->
+ % Note: ha-mode is NOT 'nodes' - it is either exactly or all, which means
+ % that any node in the cluster is eligible to be the new queue master node
+ rabbit_nodes:all_running();
+handle_is_mirrored_ha_nodes(true, Queue) ->
+ % Note: ha-mode is 'nodes', which explicitly specifies allowed nodes.
+ % We must use suggested_queue_nodes to get that list of nodes as the
+ % starting point for finding the queue master location
+ handle_suggested_queue_nodes(rabbit_mirror_queue_misc:suggested_queue_nodes(Queue)).
+
+handle_suggested_queue_nodes({_MNode, []}) ->
+ rabbit_nodes:all_running();
+handle_suggested_queue_nodes({MNode, SNodes}) ->
+ [MNode | SNodes].
diff --git a/deps/rabbit/src/rabbit_queue_master_locator.erl b/deps/rabbit/src/rabbit_queue_master_locator.erl
new file mode 100644
index 0000000000..ff2e30f587
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_master_locator.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_master_locator).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-callback description() -> [proplists:property()].
+-callback queue_master_location(amqqueue:amqqueue()) ->
+ {'ok', node()} | {'error', term()}.
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit/src/rabbit_queue_type.erl b/deps/rabbit/src/rabbit_queue_type.erl
new file mode 100644
index 0000000000..4e59b6a7c0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_type.erl
@@ -0,0 +1,581 @@
+-module(rabbit_queue_type).
+-include("amqqueue.hrl").
+-include_lib("rabbit_common/include/resource.hrl").
+
+-export([
+ init/0,
+ close/1,
+ discover/1,
+ default/0,
+ is_enabled/1,
+ declare/2,
+ delete/4,
+ is_recoverable/1,
+ recover/2,
+ purge/1,
+ policy_changed/1,
+ stat/1,
+ remove/2,
+ info/2,
+ state_info/1,
+ info_down/2,
+ info_down/3,
+ %% stateful client API
+ new/2,
+ consume/3,
+ cancel/5,
+ handle_down/3,
+ handle_event/3,
+ module/2,
+ deliver/3,
+ settle/5,
+ credit/5,
+ dequeue/5,
+ fold_state/3,
+ is_policy_applicable/2,
+ is_server_named_allowed/1
+ ]).
+
+%% gah what is a good identity of a classic queue including all replicas
+-type queue_name() :: rabbit_types:r(queue).
+-type queue_ref() :: queue_name() | atom().
+-type queue_state() :: term().
+-type msg_tag() :: term().
+
+-define(STATE, ?MODULE).
+
+%% Recoverable slaves shouldn't really be a generic one, but let's keep it here until
+%% mirrored queues are deprecated.
+-define(DOWN_KEYS, [name, durable, auto_delete, arguments, pid, recoverable_slaves, type, state]).
+
+-define(QREF(QueueReference),
+ (is_tuple(QueueReference) andalso element(1, QueueReference) == resource)
+ orelse is_atom(QueueReference)).
+%% anything that the host process needs to do on behalf of the queue type
+%% session, like knowing when to notify on monitor down
+-type action() ::
+ {monitor, Pid :: pid(), queue_ref()} |
+ %% indicate to the queue type module that a message has been delivered
+ %% fully to the queue
+ {settled, Success :: boolean(), [msg_tag()]} |
+ {deliver, rabbit_types:ctag(), boolean(), [rabbit_amqqueue:qmsg()]}.
+
+-type actions() :: [action()].
+
+-type event() ::
+ {down, pid(), Info :: term()} |
+ term().
+
+-record(ctx, {module :: module(),
+ name :: queue_name(),
+ %% "publisher confirm queue accounting"
+ %% queue type implementation should emit a:
+ %% {settle, Success :: boolean(), msg_tag()}
+ %% to either settle or reject the delivery of a
+ %% message to the queue instance
+ %% The queue type module will then emit a {confirm | reject, [msg_tag()}
+ %% action to the channel or channel like process when a msg_tag
+ %% has reached its conclusion
+ state :: queue_state()}).
+
+
+-record(?STATE, {ctxs = #{} :: #{queue_ref() => #ctx{} | queue_ref()},
+ monitor_registry = #{} :: #{pid() => queue_ref()}
+ }).
+
+-opaque state() :: #?STATE{}.
+
+-type consume_spec() :: #{no_ack := boolean(),
+ channel_pid := pid(),
+ limiter_pid => pid(),
+ limiter_active => boolean(),
+ prefetch_count => non_neg_integer(),
+ consumer_tag := rabbit_types:ctag(),
+ exclusive_consume => boolean(),
+ args => rabbit_framing:amqp_table(),
+ ok_msg := term(),
+ acting_user := rabbit_types:username()}.
+
+
+
+% copied from rabbit_amqqueue
+-type absent_reason() :: 'nodedown' | 'crashed' | stopped | timeout.
+
+-type settle_op() :: 'complete' | 'requeue' | 'discard'.
+
+-export_type([state/0,
+ consume_spec/0,
+ action/0,
+ actions/0,
+ settle_op/0]).
+
+%% is the queue type feature enabled
+-callback is_enabled() -> boolean().
+
+-callback declare(amqqueue:amqqueue(), node()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback delete(amqqueue:amqqueue(),
+ boolean(),
+ boolean(),
+ rabbit_types:username()) ->
+ rabbit_types:ok(non_neg_integer()) |
+ rabbit_types:error(in_use | not_empty) |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback recover(rabbit_types:vhost(), [amqqueue:amqqueue()]) ->
+ {Recovered :: [amqqueue:amqqueue()],
+ Failed :: [amqqueue:amqqueue()]}.
+
+%% checks if the queue should be recovered
+-callback is_recoverable(amqqueue:amqqueue()) ->
+ boolean().
+
+-callback purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()} | {error, term()}.
+
+-callback policy_changed(amqqueue:amqqueue()) -> ok.
+
+%% stateful
+%% intitialise and return a queue type specific session context
+-callback init(amqqueue:amqqueue()) -> queue_state().
+
+-callback close(queue_state()) -> ok.
+%% update the queue type state from amqqrecord
+-callback update(amqqueue:amqqueue(), queue_state()) -> queue_state().
+
+-callback consume(amqqueue:amqqueue(),
+ consume_spec(),
+ queue_state()) ->
+ {ok, queue_state(), actions()} | {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback cancel(amqqueue:amqqueue(),
+ rabbit_types:ctag(),
+ term(),
+ rabbit_types:username(),
+ queue_state()) ->
+ {ok, queue_state()} | {error, term()}.
+
+%% any async events returned from the queue system should be processed through
+%% this
+-callback handle_event(Event :: event(),
+ queue_state()) ->
+ {ok, queue_state(), actions()} | {error, term()} | eol |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback deliver([{amqqueue:amqqueue(), queue_state()}],
+ Delivery :: term()) ->
+ {[{amqqueue:amqqueue(), queue_state()}], actions()}.
+
+-callback settle(settle_op(), rabbit_types:ctag(), [non_neg_integer()], queue_state()) ->
+ {queue_state(), actions()} |
+ {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}.
+
+-callback credit(rabbit_types:ctag(),
+ non_neg_integer(), Drain :: boolean(), queue_state()) ->
+ {queue_state(), actions()}.
+
+-callback dequeue(NoAck :: boolean(), LimiterPid :: pid(),
+ rabbit_types:ctag(), queue_state()) ->
+ {ok, Count :: non_neg_integer(), rabbit_amqqueue:qmsg(), queue_state()} |
+ {empty, queue_state()} |
+ {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+
+%% return a map of state summary information
+-callback state_info(queue_state()) ->
+ #{atom() := term()}.
+
+%% general queue info
+-callback info(amqqueue:amqqueue(), all_keys | rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+
+-callback stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+
+-callback capabilities() ->
+ #{atom() := term()}.
+
+%% TODO: this should be controlled by a registry that is populated on boot
+discover(<<"quorum">>) ->
+ rabbit_quorum_queue;
+discover(<<"classic">>) ->
+ rabbit_classic_queue;
+discover(<<"stream">>) ->
+ rabbit_stream_queue.
+
+default() ->
+ rabbit_classic_queue.
+
+-spec is_enabled(module()) -> boolean().
+is_enabled(Type) ->
+ Type:is_enabled().
+
+-spec declare(amqqueue:amqqueue(), node()) ->
+ {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} |
+ {'absent', amqqueue:amqqueue(), absent_reason()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(Q, Node) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:declare(Q, Node).
+
+-spec delete(amqqueue:amqqueue(), boolean(),
+ boolean(), rabbit_types:username()) ->
+ rabbit_types:ok(non_neg_integer()) |
+ rabbit_types:error(in_use | not_empty) |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+delete(Q, IfUnused, IfEmpty, ActingUser) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:delete(Q, IfUnused, IfEmpty, ActingUser).
+
+-spec purge(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer()} | {error, term()}.
+purge(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:purge(Q).
+
+-spec policy_changed(amqqueue:amqqueue()) -> 'ok'.
+policy_changed(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:policy_changed(Q).
+
+-spec stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+stat(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:stat(Q).
+
+-spec remove(queue_ref(), state()) -> state().
+remove(QRef, #?STATE{ctxs = Ctxs0} = State) ->
+ case maps:take(QRef, Ctxs0) of
+ error ->
+ State;
+ {_, Ctxs} ->
+ State#?STATE{ctxs = Ctxs}
+ end.
+
+-spec info(amqqueue:amqqueue(), all_keys | rabbit_types:info_keys()) ->
+ rabbit_types:infos().
+info(Q, Items) when ?amqqueue_state_is(Q, crashed) ->
+ info_down(Q, Items, crashed);
+info(Q, Items) when ?amqqueue_state_is(Q, stopped) ->
+ info_down(Q, Items, stopped);
+info(Q, Items) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:info(Q, Items).
+
+fold_state(Fun, Acc, #?STATE{ctxs = Ctxs}) ->
+ maps:fold(Fun, Acc, Ctxs).
+
+state_info(#ctx{state = S,
+ module = Mod}) ->
+ Mod:state_info(S);
+state_info(_) ->
+ #{}.
+
+down_keys() -> ?DOWN_KEYS.
+
+info_down(Q, DownReason) ->
+ info_down(Q, down_keys(), DownReason).
+
+info_down(Q, all_keys, DownReason) ->
+ info_down(Q, down_keys(), DownReason);
+info_down(Q, Items, DownReason) ->
+ [{Item, i_down(Item, Q, DownReason)} || Item <- Items].
+
+i_down(name, Q, _) -> amqqueue:get_name(Q);
+i_down(durable, Q, _) -> amqqueue:is_durable(Q);
+i_down(auto_delete, Q, _) -> amqqueue:is_auto_delete(Q);
+i_down(arguments, Q, _) -> amqqueue:get_arguments(Q);
+i_down(pid, Q, _) -> amqqueue:get_pid(Q);
+i_down(recoverable_slaves, Q, _) -> amqqueue:get_recoverable_slaves(Q);
+i_down(type, Q, _) -> amqqueue:get_type(Q);
+i_down(state, _Q, DownReason) -> DownReason;
+i_down(_K, _Q, _DownReason) -> ''.
+
+is_policy_applicable(Q, Policy) ->
+ Mod = amqqueue:get_type(Q),
+ Capabilities = Mod:capabilities(),
+ Applicable = maps:get(policies, Capabilities, []),
+ lists:all(fun({P, _}) ->
+ lists:member(P, Applicable)
+ end, Policy).
+
+is_server_named_allowed(Type) ->
+ Capabilities = Type:capabilities(),
+ maps:get(server_named, Capabilities, false).
+
+-spec init() -> state().
+init() ->
+ #?STATE{}.
+
+-spec close(state()) -> ok.
+close(#?STATE{ctxs = Contexts}) ->
+ _ = maps:map(
+ fun (_, #ctx{module = Mod,
+ state = S}) ->
+ ok = Mod:close(S)
+ end, Contexts),
+ ok.
+
+-spec new(amqqueue:amqqueue(), state()) -> state().
+new(Q, State) when ?is_amqqueue(Q) ->
+ Ctx = get_ctx(Q, State),
+ set_ctx(Q, Ctx, State).
+
+-spec consume(amqqueue:amqqueue(), consume_spec(), state()) ->
+ {ok, state(), actions()} | {error, term()}.
+consume(Q, Spec, State) ->
+ #ctx{state = CtxState0} = Ctx = get_ctx(Q, State),
+ Mod = amqqueue:get_type(Q),
+ case Mod:consume(Q, Spec, CtxState0) of
+ {ok, CtxState, Actions} ->
+ return_ok(set_ctx(Q, Ctx#ctx{state = CtxState}, State), Actions);
+ Err ->
+ Err
+ end.
+
+%% TODO switch to cancel spec api
+-spec cancel(amqqueue:amqqueue(),
+ rabbit_types:ctag(),
+ term(),
+ rabbit_types:username(),
+ state()) ->
+ {ok, state()} | {error, term()}.
+cancel(Q, Tag, OkMsg, ActiveUser, Ctxs) ->
+ #ctx{state = State0} = Ctx = get_ctx(Q, Ctxs),
+ Mod = amqqueue:get_type(Q),
+ case Mod:cancel(Q, Tag, OkMsg, ActiveUser, State0) of
+ {ok, State} ->
+ {ok, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)};
+ Err ->
+ Err
+ end.
+
+-spec is_recoverable(amqqueue:amqqueue()) ->
+ boolean().
+is_recoverable(Q) ->
+ Mod = amqqueue:get_type(Q),
+ Mod:is_recoverable(Q).
+
+-spec recover(rabbit_types:vhost(), [amqqueue:amqqueue()]) ->
+ {Recovered :: [amqqueue:amqqueue()],
+ Failed :: [amqqueue:amqqueue()]}.
+recover(VHost, Qs) ->
+ ByType = lists:foldl(
+ fun (Q, Acc) ->
+ T = amqqueue:get_type(Q),
+ maps:update_with(T, fun (X) ->
+ [Q | X]
+ end, Acc)
+ %% TODO resolve all registered queue types from registry
+ end, #{rabbit_classic_queue => [],
+ rabbit_quorum_queue => [],
+ rabbit_stream_queue => []}, Qs),
+ maps:fold(fun (Mod, Queues, {R0, F0}) ->
+ {R, F} = Mod:recover(VHost, Queues),
+ {R0 ++ R, F0 ++ F}
+ end, {[], []}, ByType).
+
+-spec handle_down(pid(), term(), state()) ->
+ {ok, state(), actions()} | {eol, queue_ref()} | {error, term()}.
+handle_down(Pid, Info, #?STATE{monitor_registry = Reg0} = State0) ->
+ %% lookup queue ref in monitor registry
+ case maps:take(Pid, Reg0) of
+ {QRef, Reg} ->
+ case handle_event(QRef, {down, Pid, Info}, State0) of
+ {ok, State, Actions} ->
+ {ok, State#?STATE{monitor_registry = Reg}, Actions};
+ eol ->
+ {eol, QRef};
+ Err ->
+ Err
+ end;
+ error ->
+ {ok, State0, []}
+ end.
+
+%% messages sent from queues
+-spec handle_event(queue_ref(), term(), state()) ->
+ {ok, state(), actions()} | eol | {error, term()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+handle_event(QRef, Evt, Ctxs) ->
+ %% events can arrive after a queue state has been cleared up
+ %% so need to be defensive here
+ case get_ctx(QRef, Ctxs, undefined) of
+ #ctx{module = Mod,
+ state = State0} = Ctx ->
+ case Mod:handle_event(Evt, State0) of
+ {ok, State, Actions} ->
+ return_ok(set_ctx(QRef, Ctx#ctx{state = State}, Ctxs), Actions);
+ Err ->
+ Err
+ end;
+ undefined ->
+ {ok, Ctxs, []}
+ end.
+
+-spec module(queue_ref(), state()) ->
+ {ok, module()} | {error, not_found}.
+module(QRef, Ctxs) ->
+ %% events can arrive after a queue state has been cleared up
+ %% so need to be defensive here
+ case get_ctx(QRef, Ctxs, undefined) of
+ #ctx{module = Mod} ->
+ {ok, Mod};
+ undefined ->
+ {error, not_found}
+ end.
+
+-spec deliver([amqqueue:amqqueue()], Delivery :: term(),
+ stateless | state()) ->
+ {ok, state(), actions()}.
+deliver(Qs, Delivery, stateless) ->
+ _ = lists:map(fun(Q) ->
+ Mod = amqqueue:get_type(Q),
+ _ = Mod:deliver([{Q, stateless}], Delivery)
+ end, Qs),
+ {ok, stateless, []};
+deliver(Qs, Delivery, #?STATE{} = State0) ->
+ %% sort by queue type - then dispatch each group
+ ByType = lists:foldl(
+ fun (Q, Acc) ->
+ T = amqqueue:get_type(Q),
+ Ctx = get_ctx(Q, State0),
+ maps:update_with(
+ T, fun (A) ->
+ [{Q, Ctx#ctx.state} | A]
+ end, [{Q, Ctx#ctx.state}], Acc)
+ end, #{}, Qs),
+ %%% dispatch each group to queue type interface?
+ {Xs, Actions} = maps:fold(fun(Mod, QSs, {X0, A0}) ->
+ {X, A} = Mod:deliver(QSs, Delivery),
+ {X0 ++ X, A0 ++ A}
+ end, {[], []}, ByType),
+ State = lists:foldl(
+ fun({Q, S}, Acc) ->
+ Ctx = get_ctx(Q, Acc),
+ set_ctx(qref(Q), Ctx#ctx{state = S}, Acc)
+ end, State0, Xs),
+ return_ok(State, Actions).
+
+
+-spec settle(queue_ref(), settle_op(), rabbit_types:ctag(),
+ [non_neg_integer()], state()) ->
+ {ok, state(), actions()} |
+ {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}.
+settle(QRef, Op, CTag, MsgIds, Ctxs)
+ when ?QREF(QRef) ->
+ case get_ctx(QRef, Ctxs, undefined) of
+ undefined ->
+ %% if we receive a settlement and there is no queue state it means
+ %% the queue was deleted with active consumers
+ {ok, Ctxs, []};
+ #ctx{state = State0,
+ module = Mod} = Ctx ->
+ case Mod:settle(Op, CTag, MsgIds, State0) of
+ {State, Actions} ->
+ {ok, set_ctx(QRef, Ctx#ctx{state = State}, Ctxs), Actions};
+ Err ->
+ Err
+ end
+ end.
+
+-spec credit(amqqueue:amqqueue() | queue_ref(),
+ rabbit_types:ctag(), non_neg_integer(),
+ boolean(), state()) -> {ok, state(), actions()}.
+credit(Q, CTag, Credit, Drain, Ctxs) ->
+ #ctx{state = State0,
+ module = Mod} = Ctx = get_ctx(Q, Ctxs),
+ {State, Actions} = Mod:credit(CTag, Credit, Drain, State0),
+ {ok, set_ctx(Q, Ctx#ctx{state = State}, Ctxs), Actions}.
+
+-spec dequeue(amqqueue:amqqueue(), boolean(),
+ pid(), rabbit_types:ctag(), state()) ->
+ {ok, non_neg_integer(), term(), state()} |
+ {empty, state()}.
+dequeue(Q, NoAck, LimiterPid, CTag, Ctxs) ->
+ #ctx{state = State0} = Ctx = get_ctx(Q, Ctxs),
+ Mod = amqqueue:get_type(Q),
+ case Mod:dequeue(NoAck, LimiterPid, CTag, State0) of
+ {ok, Num, Msg, State} ->
+ {ok, Num, Msg, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)};
+ {empty, State} ->
+ {empty, set_ctx(Q, Ctx#ctx{state = State}, Ctxs)};
+ {error, _} = Err ->
+ Err;
+ {protocol_error, _, _, _} = Err ->
+ Err
+ end.
+
+get_ctx(Q, #?STATE{ctxs = Contexts}) when ?is_amqqueue(Q) ->
+ Ref = qref(Q),
+ case Contexts of
+ #{Ref := #ctx{module = Mod,
+ state = State} = Ctx} ->
+ Ctx#ctx{state = Mod:update(Q, State)};
+ _ ->
+ %% not found - initialize
+ Mod = amqqueue:get_type(Q),
+ Name = amqqueue:get_name(Q),
+ #ctx{module = Mod,
+ name = Name,
+ state = Mod:init(Q)}
+ end;
+get_ctx(QRef, Contexts) when ?QREF(QRef) ->
+ case get_ctx(QRef, Contexts, undefined) of
+ undefined ->
+ exit({queue_context_not_found, QRef});
+ Ctx ->
+ Ctx
+ end.
+
+get_ctx(QRef, #?STATE{ctxs = Contexts}, Default) ->
+ Ref = qref(QRef),
+ %% if we use a QRef it should always be initialised
+ case maps:get(Ref, Contexts, undefined) of
+ #ctx{} = Ctx ->
+ Ctx;
+ undefined ->
+ Default
+ end.
+
+set_ctx(Q, Ctx, #?STATE{ctxs = Contexts} = State)
+ when ?is_amqqueue(Q) ->
+ Ref = qref(Q),
+ State#?STATE{ctxs = maps:put(Ref, Ctx, Contexts)};
+set_ctx(QRef, Ctx, #?STATE{ctxs = Contexts} = State) ->
+ Ref = qref(QRef),
+ State#?STATE{ctxs = maps:put(Ref, Ctx, Contexts)}.
+
+qref(#resource{kind = queue} = QName) ->
+ QName;
+qref(Q) when ?is_amqqueue(Q) ->
+ amqqueue:get_name(Q).
+
+return_ok(State0, []) ->
+ {ok, State0, []};
+return_ok(State0, Actions0) ->
+ {State, Actions} =
+ lists:foldl(
+ fun({monitor, Pid, QRef},
+ {#?STATE{monitor_registry = M0} = S0, A0}) ->
+ case M0 of
+ #{Pid := QRef} ->
+ %% already monitored by the qref
+ {S0, A0};
+ #{Pid := _} ->
+ %% TODO: allow multiple Qrefs to monitor the same pid
+ exit(return_ok_duplicate_monitored_pid);
+ _ ->
+ _ = erlang:monitor(process, Pid),
+ M = M0#{Pid => QRef},
+ {S0#?STATE{monitor_registry = M}, A0}
+ end;
+ (Act, {S, A0}) ->
+ {S, [Act | A0]}
+ end, {State0, []}, Actions0),
+ {ok, State, lists:reverse(Actions)}.
diff --git a/deps/rabbit/src/rabbit_queue_type_util.erl b/deps/rabbit/src/rabbit_queue_type_util.erl
new file mode 100644
index 0000000000..e417cb13c4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_queue_type_util.erl
@@ -0,0 +1,74 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2018-2020 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_type_util).
+
+-export([args_policy_lookup/3,
+ qname_to_internal_name/1,
+ check_auto_delete/1,
+ check_exclusive/1,
+ check_non_durable/1,
+ run_checks/2]).
+
+-include("rabbit.hrl").
+-include("amqqueue.hrl").
+
+args_policy_lookup(Name, Resolve, Q) when ?is_amqqueue(Q) ->
+ Args = amqqueue:get_arguments(Q),
+ AName = <<"x-", Name/binary>>,
+ case {rabbit_policy:get(Name, Q), rabbit_misc:table_lookup(Args, AName)} of
+ {undefined, undefined} -> undefined;
+ {undefined, {_Type, Val}} -> Val;
+ {Val, undefined} -> Val;
+ {PolVal, {_Type, ArgVal}} -> Resolve(PolVal, ArgVal)
+ end.
+
+%% TODO escape hack
+qname_to_internal_name(#resource{virtual_host = <<"/">>, name = Name}) ->
+ erlang:binary_to_atom(<<"%2F_", Name/binary>>, utf8);
+qname_to_internal_name(#resource{virtual_host = VHost, name = Name}) ->
+ erlang:binary_to_atom(<<VHost/binary, "_", Name/binary>>, utf8).
+
+check_auto_delete(Q) when ?amqqueue_is_auto_delete(Q) ->
+ Name = amqqueue:get_name(Q),
+ {protocol_error, precondition_failed, "invalid property 'auto-delete' for ~s",
+ [rabbit_misc:rs(Name)]};
+check_auto_delete(_) ->
+ ok.
+
+check_exclusive(Q) when ?amqqueue_exclusive_owner_is(Q, none) ->
+ ok;
+check_exclusive(Q) when ?is_amqqueue(Q) ->
+ Name = amqqueue:get_name(Q),
+ {protocol_error, precondition_failed, "invalid property 'exclusive-owner' for ~s",
+ [rabbit_misc:rs(Name)]}.
+
+check_non_durable(Q) when ?amqqueue_is_durable(Q) ->
+ ok;
+check_non_durable(Q) when not ?amqqueue_is_durable(Q) ->
+ Name = amqqueue:get_name(Q),
+ {protocol_error, precondition_failed, "invalid property 'non-durable' for ~s",
+ [rabbit_misc:rs(Name)]}.
+
+run_checks([], _) ->
+ ok;
+run_checks([C | Checks], Q) ->
+ case C(Q) of
+ ok ->
+ run_checks(Checks, Q);
+ Err ->
+ Err
+ end.
diff --git a/deps/rabbit/src/rabbit_quorum_memory_manager.erl b/deps/rabbit/src/rabbit_quorum_memory_manager.erl
new file mode 100644
index 0000000000..94c2ef6b4b
--- /dev/null
+++ b/deps/rabbit/src/rabbit_quorum_memory_manager.erl
@@ -0,0 +1,67 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_quorum_memory_manager).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+-export([register/0, unregister/0]).
+
+-record(state, {last_roll_over,
+ interval}).
+
+-rabbit_boot_step({rabbit_quorum_memory_manager,
+ [{description, "quorum memory manager"},
+ {mfa, {?MODULE, register, []}},
+ {cleanup, {?MODULE, unregister, []}},
+ {requires, rabbit_event},
+ {enables, recovery}]}).
+
+register() ->
+ gen_event:add_handler(rabbit_alarm, ?MODULE, []).
+
+unregister() ->
+ gen_event:delete_handler(rabbit_alarm, ?MODULE, []).
+
+init([]) ->
+ {ok, #state{interval = interval()}}.
+
+handle_call( _, State) ->
+ {ok, ok, State}.
+
+handle_event({set_alarm, {{resource_limit, memory, Node}, []}},
+ #state{last_roll_over = undefined} = State) when Node == node() ->
+ {ok, force_roll_over(State)};
+handle_event({set_alarm, {{resource_limit, memory, Node}, []}},
+ #state{last_roll_over = Last, interval = Interval } = State)
+ when Node == node() ->
+ Now = erlang:system_time(millisecond),
+ case Now > (Last + Interval) of
+ true ->
+ {ok, force_roll_over(State)};
+ false ->
+ {ok, State}
+ end;
+handle_event(_, State) ->
+ {ok, State}.
+
+handle_info(_, State) ->
+ {ok, State}.
+
+terminate(_, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+force_roll_over(State) ->
+ ra_log_wal:force_roll_over(ra_log_wal),
+ State#state{last_roll_over = erlang:system_time(millisecond)}.
+
+interval() ->
+ application:get_env(rabbit, min_wal_roll_over_interval, 20000).
diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl
new file mode 100644
index 0000000000..95cc93d728
--- /dev/null
+++ b/deps/rabbit/src/rabbit_quorum_queue.erl
@@ -0,0 +1,1523 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_quorum_queue).
+
+-behaviour(rabbit_queue_type).
+
+-export([init/1,
+ close/1,
+ update/2,
+ handle_event/2]).
+-export([is_recoverable/1, recover/2, stop/1, delete/4, delete_immediately/2]).
+-export([state_info/1, info/2, stat/1, infos/1]).
+-export([settle/4, dequeue/4, consume/3, cancel/5]).
+-export([credit/4]).
+-export([purge/1]).
+-export([stateless_deliver/2, deliver/3, deliver/2]).
+-export([dead_letter_publish/4]).
+-export([queue_name/1]).
+-export([cluster_state/1, status/2]).
+-export([update_consumer_handler/8, update_consumer/9]).
+-export([cancel_consumer_handler/2, cancel_consumer/3]).
+-export([become_leader/2, handle_tick/3, spawn_deleter/1]).
+-export([rpc_delete_metrics/1]).
+-export([format/1]).
+-export([open_files/1]).
+-export([peek/2, peek/3]).
+-export([add_member/4]).
+-export([delete_member/3]).
+-export([requeue/3]).
+-export([policy_changed/1]).
+-export([format_ra_event/3]).
+-export([cleanup_data_dir/0]).
+-export([shrink_all/1,
+ grow/4]).
+-export([transfer_leadership/2, get_replicas/1, queue_length/1]).
+-export([file_handle_leader_reservation/1, file_handle_other_reservation/0]).
+-export([file_handle_release_reservation/0]).
+-export([list_with_minimum_quorum/0, list_with_minimum_quorum_for_cli/0,
+ filter_quorum_critical/1, filter_quorum_critical/2,
+ all_replica_states/0]).
+-export([capabilities/0]).
+-export([repair_amqqueue_nodes/1,
+ repair_amqqueue_nodes/2
+ ]).
+-export([reclaim_memory/2]).
+
+-export([is_enabled/0,
+ declare/2]).
+
+-import(rabbit_queue_type_util, [args_policy_lookup/3,
+ qname_to_internal_name/1]).
+
+-include_lib("stdlib/include/qlc.hrl").
+-include("rabbit.hrl").
+-include("amqqueue.hrl").
+
+-type msg_id() :: non_neg_integer().
+-type qmsg() :: {rabbit_types:r('queue'), pid(), msg_id(), boolean(), rabbit_types:message()}.
+
+-define(STATISTICS_KEYS,
+ [policy,
+ operator_policy,
+ effective_policy_definition,
+ consumers,
+ memory,
+ state,
+ garbage_collection,
+ leader,
+ online,
+ members,
+ open_files,
+ single_active_consumer_pid,
+ single_active_consumer_ctag,
+ messages_ram,
+ message_bytes_ram
+ ]).
+
+-define(INFO_KEYS, [name, durable, auto_delete, arguments, pid, messages, messages_ready,
+ messages_unacknowledged, local_state, type] ++ ?STATISTICS_KEYS).
+
+-define(RPC_TIMEOUT, 1000).
+-define(TICK_TIMEOUT, 5000). %% the ra server tick time
+-define(DELETE_TIMEOUT, 5000).
+-define(ADD_MEMBER_TIMEOUT, 5000).
+
+%%----------- rabbit_queue_type ---------------------------------------------
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ rabbit_feature_flags:is_enabled(quorum_queue).
+
+%%----------------------------------------------------------------------------
+
+-spec init(amqqueue:amqqueue()) -> rabbit_fifo_client:state().
+init(Q) when ?is_amqqueue(Q) ->
+ {ok, SoftLimit} = application:get_env(rabbit, quorum_commands_soft_limit),
+ %% This lookup could potentially return an {error, not_found}, but we do not
+ %% know what to do if the queue has `disappeared`. Let it crash.
+ {Name, _LeaderNode} = Leader = amqqueue:get_pid(Q),
+ Nodes = get_nodes(Q),
+ QName = amqqueue:get_name(Q),
+ %% Ensure the leader is listed first
+ Servers0 = [{Name, N} || N <- Nodes],
+ Servers = [Leader | lists:delete(Leader, Servers0)],
+ rabbit_fifo_client:init(QName, Servers, SoftLimit,
+ fun() -> credit_flow:block(Name) end,
+ fun() -> credit_flow:unblock(Name), ok end).
+
+-spec close(rabbit_fifo_client:state()) -> ok.
+close(_State) ->
+ ok.
+
+-spec update(amqqueue:amqqueue(), rabbit_fifo_client:state()) ->
+ rabbit_fifo_client:state().
+update(Q, State) when ?amqqueue_is_quorum(Q) ->
+ %% QQ state maintains it's own updates
+ State.
+
+-spec handle_event({amqqueue:ra_server_id(), any()},
+ rabbit_fifo_client:state()) ->
+ {ok, rabbit_fifo_client:state(), rabbit_queue_type:actions()} |
+ eol |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+handle_event({From, Evt}, QState) ->
+ rabbit_fifo_client:handle_ra_event(From, Evt, QState).
+
+-spec declare(amqqueue:amqqueue(), node()) ->
+ {new | existing, amqqueue:amqqueue()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(Q, _Node) when ?amqqueue_is_quorum(Q) ->
+ case rabbit_queue_type_util:run_checks(
+ [fun rabbit_queue_type_util:check_auto_delete/1,
+ fun rabbit_queue_type_util:check_exclusive/1,
+ fun rabbit_queue_type_util:check_non_durable/1],
+ Q) of
+ ok ->
+ start_cluster(Q);
+ Err ->
+ Err
+ end.
+
+start_cluster(Q) ->
+ QName = amqqueue:get_name(Q),
+ Durable = amqqueue:is_durable(Q),
+ AutoDelete = amqqueue:is_auto_delete(Q),
+ Arguments = amqqueue:get_arguments(Q),
+ Opts = amqqueue:get_options(Q),
+ ActingUser = maps:get(user, Opts, ?UNKNOWN_USER),
+ QuorumSize = get_default_quorum_initial_group_size(Arguments),
+ RaName = qname_to_internal_name(QName),
+ Id = {RaName, node()},
+ Nodes = select_quorum_nodes(QuorumSize, rabbit_mnesia:cluster_nodes(all)),
+ NewQ0 = amqqueue:set_pid(Q, Id),
+ NewQ1 = amqqueue:set_type_state(NewQ0, #{nodes => Nodes}),
+ case rabbit_amqqueue:internal_declare(NewQ1, false) of
+ {created, NewQ} ->
+ TickTimeout = application:get_env(rabbit, quorum_tick_interval, ?TICK_TIMEOUT),
+ RaConfs = [make_ra_conf(NewQ, ServerId, TickTimeout)
+ || ServerId <- members(NewQ)],
+ case ra:start_cluster(RaConfs) of
+ {ok, _, _} ->
+ %% TODO: handle error - what should be done if the
+ %% config cannot be updated
+ ok = rabbit_fifo_client:update_machine_state(Id,
+ ra_machine_config(NewQ)),
+ %% force a policy change to ensure the latest config is
+ %% updated even when running the machine version from 0
+ rabbit_event:notify(queue_created,
+ [{name, QName},
+ {durable, Durable},
+ {auto_delete, AutoDelete},
+ {arguments, Arguments},
+ {user_who_performed_action,
+ ActingUser}]),
+ {new, NewQ};
+ {error, Error} ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ {protocol_error, internal_error,
+ "Cannot declare a queue '~s' on node '~s': ~255p",
+ [rabbit_misc:rs(QName), node(), Error]}
+ end;
+ {existing, _} = Ex ->
+ Ex
+ end.
+
+ra_machine(Q) ->
+ {module, rabbit_fifo, ra_machine_config(Q)}.
+
+ra_machine_config(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ {Name, _} = amqqueue:get_pid(Q),
+ %% take the minimum value of the policy and the queue arg if present
+ MaxLength = args_policy_lookup(<<"max-length">>, fun min/2, Q),
+ %% prefer the policy defined strategy if available
+ Overflow = args_policy_lookup(<<"overflow">>, fun (A, _B) -> A end , Q),
+ MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q),
+ MaxMemoryLength = args_policy_lookup(<<"max-in-memory-length">>, fun min/2, Q),
+ MaxMemoryBytes = args_policy_lookup(<<"max-in-memory-bytes">>, fun min/2, Q),
+ DeliveryLimit = args_policy_lookup(<<"delivery-limit">>, fun min/2, Q),
+ Expires = args_policy_lookup(<<"expires">>,
+ fun (A, _B) -> A end,
+ Q),
+ #{name => Name,
+ queue_resource => QName,
+ dead_letter_handler => dlx_mfa(Q),
+ become_leader_handler => {?MODULE, become_leader, [QName]},
+ max_length => MaxLength,
+ max_bytes => MaxBytes,
+ max_in_memory_length => MaxMemoryLength,
+ max_in_memory_bytes => MaxMemoryBytes,
+ single_active_consumer_on => single_active_consumer_on(Q),
+ delivery_limit => DeliveryLimit,
+ overflow_strategy => overflow(Overflow, drop_head, QName),
+ created => erlang:system_time(millisecond),
+ expires => Expires
+ }.
+
+single_active_consumer_on(Q) ->
+ QArguments = amqqueue:get_arguments(Q),
+ case rabbit_misc:table_lookup(QArguments, <<"x-single-active-consumer">>) of
+ {bool, true} -> true;
+ _ -> false
+ end.
+
+update_consumer_handler(QName, {ConsumerTag, ChPid}, Exclusive, AckRequired, Prefetch, Active, ActivityStatus, Args) ->
+ local_or_remote_handler(ChPid, rabbit_quorum_queue, update_consumer,
+ [QName, ChPid, ConsumerTag, Exclusive, AckRequired, Prefetch, Active, ActivityStatus, Args]).
+
+update_consumer(QName, ChPid, ConsumerTag, Exclusive, AckRequired, Prefetch, Active, ActivityStatus, Args) ->
+ catch rabbit_core_metrics:consumer_updated(ChPid, ConsumerTag, Exclusive, AckRequired,
+ QName, Prefetch, Active, ActivityStatus, Args).
+
+cancel_consumer_handler(QName, {ConsumerTag, ChPid}) ->
+ local_or_remote_handler(ChPid, rabbit_quorum_queue, cancel_consumer,
+ [QName, ChPid, ConsumerTag]).
+
+cancel_consumer(QName, ChPid, ConsumerTag) ->
+ catch rabbit_core_metrics:consumer_deleted(ChPid, ConsumerTag, QName),
+ emit_consumer_deleted(ChPid, ConsumerTag, QName, ?INTERNAL_USER).
+
+local_or_remote_handler(ChPid, Module, Function, Args) ->
+ Node = node(ChPid),
+ case Node == node() of
+ true ->
+ erlang:apply(Module, Function, Args);
+ false ->
+ %% this could potentially block for a while if the node is
+ %% in disconnected state or tcp buffers are full
+ rpc:cast(Node, Module, Function, Args)
+ end.
+
+become_leader(QName, Name) ->
+ Fun = fun (Q1) ->
+ amqqueue:set_state(
+ amqqueue:set_pid(Q1, {Name, node()}),
+ live)
+ end,
+ %% as this function is called synchronously when a ra node becomes leader
+ %% we need to ensure there is no chance of blocking as else the ra node
+ %% may not be able to establish it's leadership
+ spawn(fun() ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_amqqueue:update(QName, Fun)
+ end),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q0} when ?is_amqqueue(Q0) ->
+ Nodes = get_nodes(Q0),
+ [rpc:call(Node, ?MODULE, rpc_delete_metrics,
+ [QName], ?RPC_TIMEOUT)
+ || Node <- Nodes, Node =/= node()];
+ _ ->
+ ok
+ end
+ end).
+
+-spec all_replica_states() -> {node(), #{atom() => atom()}}.
+all_replica_states() ->
+ Rows = ets:tab2list(ra_state),
+ {node(), maps:from_list(Rows)}.
+
+-spec list_with_minimum_quorum() -> [amqqueue:amqqueue()].
+list_with_minimum_quorum() ->
+ filter_quorum_critical(
+ rabbit_amqqueue:list_local_quorum_queues()).
+
+-spec list_with_minimum_quorum_for_cli() -> [#{binary() => term()}].
+list_with_minimum_quorum_for_cli() ->
+ QQs = list_with_minimum_quorum(),
+ [begin
+ #resource{name = Name} = amqqueue:get_name(Q),
+ #{
+ <<"readable_name">> => rabbit_data_coercion:to_binary(rabbit_misc:rs(amqqueue:get_name(Q))),
+ <<"name">> => Name,
+ <<"virtual_host">> => amqqueue:get_vhost(Q),
+ <<"type">> => <<"quorum">>
+ }
+ end || Q <- QQs].
+
+-spec filter_quorum_critical([amqqueue:amqqueue()]) -> [amqqueue:amqqueue()].
+filter_quorum_critical(Queues) ->
+ %% Example map of QQ replica states:
+ %% #{rabbit@warp10 =>
+ %% #{'%2F_qq.636' => leader,'%2F_qq.243' => leader,
+ %% '%2F_qq.1939' => leader,'%2F_qq.1150' => leader,
+ %% '%2F_qq.1109' => leader,'%2F_qq.1654' => leader,
+ %% '%2F_qq.1679' => leader,'%2F_qq.1003' => leader,
+ %% '%2F_qq.1593' => leader,'%2F_qq.1765' => leader,
+ %% '%2F_qq.933' => leader,'%2F_qq.38' => leader,
+ %% '%2F_qq.1357' => leader,'%2F_qq.1345' => leader,
+ %% '%2F_qq.1694' => leader,'%2F_qq.994' => leader,
+ %% '%2F_qq.490' => leader,'%2F_qq.1704' => leader,
+ %% '%2F_qq.58' => leader,'%2F_qq.564' => leader,
+ %% '%2F_qq.683' => leader,'%2F_qq.386' => leader,
+ %% '%2F_qq.753' => leader,'%2F_qq.6' => leader,
+ %% '%2F_qq.1590' => leader,'%2F_qq.1363' => leader,
+ %% '%2F_qq.882' => leader,'%2F_qq.1161' => leader,...}}
+ ReplicaStates = maps:from_list(
+ rabbit_misc:append_rpc_all_nodes(rabbit_nodes:all_running(),
+ ?MODULE, all_replica_states, [])),
+ filter_quorum_critical(Queues, ReplicaStates).
+
+-spec filter_quorum_critical([amqqueue:amqqueue()], #{node() => #{atom() => atom()}}) -> [amqqueue:amqqueue()].
+
+filter_quorum_critical(Queues, ReplicaStates) ->
+ lists:filter(fun (Q) ->
+ MemberNodes = rabbit_amqqueue:get_quorum_nodes(Q),
+ {Name, _Node} = amqqueue:get_pid(Q),
+ AllUp = lists:filter(fun (N) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ case maps:get(N, ReplicaStates, undefined) of
+ #{Name := State} when State =:= follower orelse State =:= leader ->
+ true;
+ _ -> false
+ end
+ end, MemberNodes),
+ MinQuorum = length(MemberNodes) div 2 + 1,
+ length(AllUp) =< MinQuorum
+ end, Queues).
+
+capabilities() ->
+ #{policies => [<<"max-length">>, <<"max-length-bytes">>, <<"overflow">>,
+ <<"expires">>, <<"max-in-memory-length">>, <<"max-in-memory-bytes">>,
+ <<"delivery-limit">>, <<"dead-letter-exchange">>, <<"dead-letter-routing-key">>],
+ queue_arguments => [<<"x-expires">>, <<"x-dead-letter-exchange">>,
+ <<"x-dead-letter-routing-key">>, <<"x-max-length">>,
+ <<"x-max-length-bytes">>, <<"x-max-in-memory-length">>,
+ <<"x-max-in-memory-bytes">>, <<"x-overflow">>,
+ <<"x-single-active-consumer">>, <<"x-queue-type">>,
+ <<"x-quorum-initial-group-size">>, <<"x-delivery-limit">>],
+ consumer_arguments => [<<"x-priority">>, <<"x-credit">>],
+ server_named => false}.
+
+rpc_delete_metrics(QName) ->
+ ets:delete(queue_coarse_metrics, QName),
+ ets:delete(queue_metrics, QName),
+ ok.
+
+spawn_deleter(QName) ->
+ spawn(fun () ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ delete(Q, false, false, <<"expired">>)
+ end).
+
+handle_tick(QName,
+ {Name, MR, MU, M, C, MsgBytesReady, MsgBytesUnack},
+ Nodes) ->
+ %% this makes calls to remote processes so cannot be run inside the
+ %% ra server
+ Self = self(),
+ _ = spawn(fun() ->
+ R = reductions(Name),
+ rabbit_core_metrics:queue_stats(QName, MR, MU, M, R),
+ Util = case C of
+ 0 -> 0;
+ _ -> rabbit_fifo:usage(Name)
+ end,
+ Infos = [{consumers, C},
+ {consumer_utilisation, Util},
+ {message_bytes_ready, MsgBytesReady},
+ {message_bytes_unacknowledged, MsgBytesUnack},
+ {message_bytes, MsgBytesReady + MsgBytesUnack},
+ {message_bytes_persistent, MsgBytesReady + MsgBytesUnack},
+ {messages_persistent, M}
+
+ | infos(QName, ?STATISTICS_KEYS -- [consumers])],
+ rabbit_core_metrics:queue_stats(QName, Infos),
+ rabbit_event:notify(queue_stats,
+ Infos ++ [{name, QName},
+ {messages, M},
+ {messages_ready, MR},
+ {messages_unacknowledged, MU},
+ {reductions, R}]),
+ ok = repair_leader_record(QName, Self),
+ ExpectedNodes = rabbit_mnesia:cluster_nodes(all),
+ case Nodes -- ExpectedNodes of
+ [] ->
+ ok;
+ Stale ->
+ rabbit_log:info("~s: stale nodes detected. Purging ~w~n",
+ [rabbit_misc:rs(QName), Stale]),
+ %% pipeline purge command
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ ok = ra:pipeline_command(amqqueue:get_pid(Q),
+ rabbit_fifo:make_purge_nodes(Stale)),
+
+ ok
+ end
+ end),
+ ok.
+
+repair_leader_record(QName, Self) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ Node = node(),
+ case amqqueue:get_pid(Q) of
+ {_, Node} ->
+ %% it's ok - we don't need to do anything
+ ok;
+ _ ->
+ rabbit_log:debug("~s: repairing leader record",
+ [rabbit_misc:rs(QName)]),
+ {_, Name} = erlang:process_info(Self, registered_name),
+ become_leader(QName, Name)
+ end,
+ ok.
+
+repair_amqqueue_nodes(VHost, QueueName) ->
+ QName = #resource{virtual_host = VHost, name = QueueName, kind = queue},
+ repair_amqqueue_nodes(QName).
+
+-spec repair_amqqueue_nodes(rabbit_types:r('queue') | amqqueue:amqqueue()) ->
+ ok | repaired.
+repair_amqqueue_nodes(QName = #resource{}) ->
+ {ok, Q0} = rabbit_amqqueue:lookup(QName),
+ repair_amqqueue_nodes(Q0);
+repair_amqqueue_nodes(Q0) ->
+ QName = amqqueue:get_name(Q0),
+ Leader = amqqueue:get_pid(Q0),
+ {ok, Members, _} = ra:members(Leader),
+ RaNodes = [N || {_, N} <- Members],
+ #{nodes := Nodes} = amqqueue:get_type_state(Q0),
+ case lists:sort(RaNodes) =:= lists:sort(Nodes) of
+ true ->
+ %% up to date
+ ok;
+ false ->
+ %% update amqqueue record
+ Fun = fun (Q) ->
+ TS0 = amqqueue:get_type_state(Q),
+ TS = TS0#{nodes => RaNodes},
+ amqqueue:set_type_state(Q, TS)
+ end,
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_amqqueue:update(QName, Fun)
+ end),
+ repaired
+ end.
+
+reductions(Name) ->
+ try
+ {reductions, R} = process_info(whereis(Name), reductions),
+ R
+ catch
+ error:badarg ->
+ 0
+ end.
+
+is_recoverable(Q) ->
+ Node = node(),
+ Nodes = get_nodes(Q),
+ lists:member(Node, Nodes).
+
+-spec recover(binary(), [amqqueue:amqqueue()]) ->
+ {[amqqueue:amqqueue()], [amqqueue:amqqueue()]}.
+recover(_Vhost, Queues) ->
+ lists:foldl(
+ fun (Q0, {R0, F0}) ->
+ {Name, _} = amqqueue:get_pid(Q0),
+ QName = amqqueue:get_name(Q0),
+ Nodes = get_nodes(Q0),
+ Formatter = {?MODULE, format_ra_event, [QName]},
+ Res = case ra:restart_server({Name, node()},
+ #{ra_event_formatter => Formatter}) of
+ ok ->
+ % queue was restarted, good
+ ok;
+ {error, Err1}
+ when Err1 == not_started orelse
+ Err1 == name_not_registered ->
+ % queue was never started on this node
+ % so needs to be started from scratch.
+ Machine = ra_machine(Q0),
+ RaNodes = [{Name, Node} || Node <- Nodes],
+ case ra:start_server(Name, {Name, node()}, Machine, RaNodes) of
+ ok -> ok;
+ Err2 ->
+ rabbit_log:warning("recover: quorum queue ~w could not"
+ " be started ~w", [Name, Err2]),
+ fail
+ end;
+ {error, {already_started, _}} ->
+ %% this is fine and can happen if a vhost crashes and performs
+ %% recovery whilst the ra application and servers are still
+ %% running
+ ok;
+ Err ->
+ %% catch all clause to avoid causing the vhost not to start
+ rabbit_log:warning("recover: quorum queue ~w could not be "
+ "restarted ~w", [Name, Err]),
+ fail
+ end,
+ %% we have to ensure the quorum queue is
+ %% present in the rabbit_queue table and not just in
+ %% rabbit_durable_queue
+ %% So many code paths are dependent on this.
+ {ok, Q} = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Q0),
+ case Res of
+ ok ->
+ {[Q | R0], F0};
+ fail ->
+ {R0, [Q | F0]}
+ end
+ end, {[], []}, Queues).
+
+-spec stop(rabbit_types:vhost()) -> ok.
+stop(VHost) ->
+ _ = [begin
+ Pid = amqqueue:get_pid(Q),
+ ra:stop_server(Pid)
+ end || Q <- find_quorum_queues(VHost)],
+ ok.
+
+-spec delete(amqqueue:amqqueue(),
+ boolean(), boolean(),
+ rabbit_types:username()) ->
+ {ok, QLen :: non_neg_integer()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+delete(Q, true, _IfEmpty, _ActingUser) when ?amqqueue_is_quorum(Q) ->
+ {protocol_error, not_implemented,
+ "cannot delete ~s. queue.delete operations with if-unused flag set are not supported by quorum queues",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+delete(Q, _IfUnused, true, _ActingUser) when ?amqqueue_is_quorum(Q) ->
+ {protocol_error, not_implemented,
+ "cannot delete ~s. queue.delete operations with if-empty flag set are not supported by quorum queues",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ QName = amqqueue:get_name(Q),
+ QNodes = get_nodes(Q),
+ %% TODO Quorum queue needs to support consumer tracking for IfUnused
+ Timeout = ?DELETE_TIMEOUT,
+ {ok, ReadyMsgs, _} = stat(Q),
+ Servers = [{Name, Node} || Node <- QNodes],
+ case ra:delete_cluster(Servers, Timeout) of
+ {ok, {_, LeaderNode} = Leader} ->
+ MRef = erlang:monitor(process, Leader),
+ receive
+ {'DOWN', MRef, process, _, _} ->
+ ok
+ after Timeout ->
+ ok = force_delete_queue(Servers)
+ end,
+ ok = delete_queue_data(QName, ActingUser),
+ rpc:call(LeaderNode, rabbit_core_metrics, queue_deleted, [QName],
+ ?RPC_TIMEOUT),
+ {ok, ReadyMsgs};
+ {error, {no_more_servers_to_try, Errs}} ->
+ case lists:all(fun({{error, noproc}, _}) -> true;
+ (_) -> false
+ end, Errs) of
+ true ->
+ %% If all ra nodes were already down, the delete
+ %% has succeed
+ delete_queue_data(QName, ActingUser),
+ {ok, ReadyMsgs};
+ false ->
+ %% attempt forced deletion of all servers
+ rabbit_log:warning(
+ "Could not delete quorum queue '~s', not enough nodes "
+ " online to reach a quorum: ~255p."
+ " Attempting force delete.",
+ [rabbit_misc:rs(QName), Errs]),
+ ok = force_delete_queue(Servers),
+ delete_queue_data(QName, ActingUser),
+ {ok, ReadyMsgs}
+ end
+ end.
+
+force_delete_queue(Servers) ->
+ [begin
+ case catch(ra:force_delete_server(S)) of
+ ok -> ok;
+ Err ->
+ rabbit_log:warning(
+ "Force delete of ~w failed with: ~w"
+ "This may require manual data clean up~n",
+ [S, Err]),
+ ok
+ end
+ end || S <- Servers],
+ ok.
+
+delete_queue_data(QName, ActingUser) ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ ok.
+
+
+delete_immediately(Resource, {_Name, _} = QPid) ->
+ _ = rabbit_amqqueue:internal_delete(Resource, ?INTERNAL_USER),
+ {ok, _} = ra:delete_cluster([QPid]),
+ rabbit_core_metrics:queue_deleted(Resource),
+ ok.
+
+settle(complete, CTag, MsgIds, QState) ->
+ rabbit_fifo_client:settle(quorum_ctag(CTag), MsgIds, QState);
+settle(requeue, CTag, MsgIds, QState) ->
+ rabbit_fifo_client:return(quorum_ctag(CTag), MsgIds, QState);
+settle(discard, CTag, MsgIds, QState) ->
+ rabbit_fifo_client:discard(quorum_ctag(CTag), MsgIds, QState).
+
+credit(CTag, Credit, Drain, QState) ->
+ rabbit_fifo_client:credit(quorum_ctag(CTag), Credit, Drain, QState).
+
+-spec dequeue(NoAck :: boolean(), pid(),
+ rabbit_types:ctag(), rabbit_fifo_client:state()) ->
+ {empty, rabbit_fifo_client:state()} |
+ {ok, QLen :: non_neg_integer(), qmsg(), rabbit_fifo_client:state()} |
+ {error, term()}.
+dequeue(NoAck, _LimiterPid, CTag0, QState0) ->
+ CTag = quorum_ctag(CTag0),
+ Settlement = case NoAck of
+ true ->
+ settled;
+ false ->
+ unsettled
+ end,
+ rabbit_fifo_client:dequeue(CTag, Settlement, QState0).
+
+-spec consume(amqqueue:amqqueue(),
+ rabbit_queue_type:consume_spec(),
+ rabbit_fifo_client:state()) ->
+ {ok, rabbit_fifo_client:state(), rabbit_queue_type:actions()} |
+ {error, global_qos_not_supported_for_queue_type}.
+consume(Q, #{limiter_active := true}, _State)
+ when ?amqqueue_is_quorum(Q) ->
+ {error, global_qos_not_supported_for_queue_type};
+consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) ->
+ #{no_ack := NoAck,
+ channel_pid := ChPid,
+ prefetch_count := ConsumerPrefetchCount,
+ consumer_tag := ConsumerTag0,
+ exclusive_consume := ExclusiveConsume,
+ args := Args,
+ ok_msg := OkMsg,
+ acting_user := ActingUser} = Spec,
+ %% TODO: validate consumer arguments
+ %% currently quorum queues do not support any arguments
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ maybe_send_reply(ChPid, OkMsg),
+ ConsumerTag = quorum_ctag(ConsumerTag0),
+ %% A prefetch count of 0 means no limitation,
+ %% let's make it into something large for ra
+ Prefetch0 = case ConsumerPrefetchCount of
+ 0 -> 2000;
+ Other -> Other
+ end,
+ %% consumer info is used to describe the consumer properties
+ AckRequired = not NoAck,
+ ConsumerMeta = #{ack => AckRequired,
+ prefetch => ConsumerPrefetchCount,
+ args => Args,
+ username => ActingUser},
+
+ {CreditMode, Credit, Drain} = parse_credit_args(Prefetch0, Args),
+ %% if the mode is credited we should send a separate credit command
+ %% after checkout and give 0 credits initally
+ Prefetch = case CreditMode of
+ credited -> 0;
+ simple_prefetch -> Prefetch0
+ end,
+ {ok, QState1} = rabbit_fifo_client:checkout(ConsumerTag, Prefetch,
+ CreditMode, ConsumerMeta,
+ QState0),
+ QState = case CreditMode of
+ credited when Credit > 0 ->
+ rabbit_fifo_client:credit(ConsumerTag, Credit, Drain,
+ QState1);
+ _ -> QState1
+ end,
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_single_active_consumer/1) of
+ {ok, {_, SacResult}, _} ->
+ SingleActiveConsumerOn = single_active_consumer_on(Q),
+ {IsSingleActiveConsumer, ActivityStatus} = case {SingleActiveConsumerOn, SacResult} of
+ {false, _} ->
+ {true, up};
+ {true, {value, {ConsumerTag, ChPid}}} ->
+ {true, single_active};
+ _ ->
+ {false, waiting}
+ end,
+ rabbit_core_metrics:consumer_created(
+ ChPid, ConsumerTag, ExclusiveConsume,
+ AckRequired, QName,
+ ConsumerPrefetchCount, IsSingleActiveConsumer,
+ ActivityStatus, Args),
+ emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ AckRequired, QName, Prefetch,
+ Args, none, ActingUser),
+ {ok, QState, []};
+ {error, Error} ->
+ Error;
+ {timeout, _} ->
+ {error, timeout}
+ end.
+
+% -spec basic_cancel(rabbit_types:ctag(), ChPid :: pid(), any(), rabbit_fifo_client:state()) ->
+% {'ok', rabbit_fifo_client:state()}.
+
+cancel(_Q, ConsumerTag, OkMsg, _ActingUser, State) ->
+ maybe_send_reply(self(), OkMsg),
+ rabbit_fifo_client:cancel_checkout(quorum_ctag(ConsumerTag), State).
+
+emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName, PrefetchCount, Args, Ref, ActingUser) ->
+ rabbit_event:notify(consumer_created,
+ [{consumer_tag, CTag},
+ {exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {channel, ChPid},
+ {queue, QName},
+ {prefetch_count, PrefetchCount},
+ {arguments, Args},
+ {user_who_performed_action, ActingUser}],
+ Ref).
+
+emit_consumer_deleted(ChPid, ConsumerTag, QName, ActingUser) ->
+ rabbit_event:notify(consumer_deleted,
+ [{consumer_tag, ConsumerTag},
+ {channel, ChPid},
+ {queue, QName},
+ {user_who_performed_action, ActingUser}]).
+
+-spec stateless_deliver(amqqueue:ra_server_id(), rabbit_types:delivery()) -> 'ok'.
+
+stateless_deliver(ServerId, Delivery) ->
+ ok = rabbit_fifo_client:untracked_enqueue([ServerId],
+ Delivery#delivery.message).
+
+-spec deliver(Confirm :: boolean(), rabbit_types:delivery(),
+ rabbit_fifo_client:state()) ->
+ {ok | slow, rabbit_fifo_client:state()} |
+ {reject_publish, rabbit_fifo_client:state()}.
+deliver(false, Delivery, QState0) ->
+ case rabbit_fifo_client:enqueue(Delivery#delivery.message, QState0) of
+ {ok, _} = Res -> Res;
+ {slow, _} = Res -> Res;
+ {reject_publish, State} ->
+ {ok, State}
+ end;
+deliver(true, Delivery, QState0) ->
+ rabbit_fifo_client:enqueue(Delivery#delivery.msg_seq_no,
+ Delivery#delivery.message, QState0).
+
+deliver(QSs, #delivery{confirm = Confirm} = Delivery) ->
+ lists:foldl(
+ fun({Q, stateless}, {Qs, Actions}) ->
+ QRef = amqqueue:get_pid(Q),
+ ok = rabbit_fifo_client:untracked_enqueue(
+ [QRef], Delivery#delivery.message),
+ {Qs, Actions};
+ ({Q, S0}, {Qs, Actions}) ->
+ case deliver(Confirm, Delivery, S0) of
+ {reject_publish, S} ->
+ Seq = Delivery#delivery.msg_seq_no,
+ QName = rabbit_fifo_client:cluster_name(S),
+ {[{Q, S} | Qs], [{rejected, QName, [Seq]} | Actions]};
+ {_, S} ->
+ {[{Q, S} | Qs], Actions}
+ end
+ end, {[], []}, QSs).
+
+
+state_info(S) ->
+ #{pending_raft_commands => rabbit_fifo_client:pending_size(S)}.
+
+
+
+-spec infos(rabbit_types:r('queue')) -> rabbit_types:infos().
+infos(QName) ->
+ infos(QName, ?STATISTICS_KEYS).
+
+infos(QName, Keys) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ info(Q, Keys);
+ {error, not_found} ->
+ []
+ end.
+
+info(Q, all_keys) ->
+ info(Q, ?INFO_KEYS);
+info(Q, Items) ->
+ lists:foldr(fun(totals, Acc) ->
+ i_totals(Q) ++ Acc;
+ (type_specific, Acc) ->
+ format(Q) ++ Acc;
+ (Item, Acc) ->
+ [{Item, i(Item, Q)} | Acc]
+ end, [], Items).
+
+-spec stat(amqqueue:amqqueue()) ->
+ {'ok', non_neg_integer(), non_neg_integer()}.
+stat(Q) when ?is_amqqueue(Q) ->
+ %% same short default timeout as in rabbit_fifo_client:stat/1
+ stat(Q, 250).
+
+-spec stat(amqqueue:amqqueue(), non_neg_integer()) -> {'ok', non_neg_integer(), non_neg_integer()}.
+
+stat(Q, Timeout) when ?is_amqqueue(Q) ->
+ Leader = amqqueue:get_pid(Q),
+ try
+ case rabbit_fifo_client:stat(Leader, Timeout) of
+ {ok, _, _} = Success -> Success;
+ {error, _} -> {ok, 0, 0};
+ {timeout, _} -> {ok, 0, 0}
+ end
+ catch
+ _:_ ->
+ %% Leader is not available, cluster might be in minority
+ {ok, 0, 0}
+ end.
+
+-spec purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()}.
+purge(Q) when ?is_amqqueue(Q) ->
+ Node = amqqueue:get_pid(Q),
+ rabbit_fifo_client:purge(Node).
+
+requeue(ConsumerTag, MsgIds, QState) ->
+ rabbit_fifo_client:return(quorum_ctag(ConsumerTag), MsgIds, QState).
+
+cleanup_data_dir() ->
+ Names = [begin
+ {Name, _} = amqqueue:get_pid(Q),
+ Name
+ end
+ || Q <- rabbit_amqqueue:list_by_type(?MODULE),
+ lists:member(node(), get_nodes(Q))],
+ NoQQClusters = rabbit_ra_registry:list_not_quorum_clusters(),
+ Registered = ra_directory:list_registered(),
+ Running = Names ++ NoQQClusters,
+ _ = [maybe_delete_data_dir(UId) || {Name, UId} <- Registered,
+ not lists:member(Name, Running)],
+ ok.
+
+maybe_delete_data_dir(UId) ->
+ Dir = ra_env:server_data_dir(UId),
+ {ok, Config} = ra_log:read_config(Dir),
+ case maps:get(machine, Config) of
+ {module, rabbit_fifo, _} ->
+ ra_lib:recursive_delete(Dir),
+ ra_directory:unregister_name(UId);
+ _ ->
+ ok
+ end.
+
+policy_changed(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ _ = rabbit_fifo_client:update_machine_state(QPid, ra_machine_config(Q)),
+ ok.
+
+-spec cluster_state(Name :: atom()) -> 'down' | 'recovering' | 'running'.
+
+cluster_state(Name) ->
+ case whereis(Name) of
+ undefined -> down;
+ _ ->
+ case ets:lookup(ra_state, Name) of
+ [{_, recover}] -> recovering;
+ _ -> running
+ end
+ end.
+
+-spec status(rabbit_types:vhost(), Name :: rabbit_misc:resource_name()) ->
+ [[{binary(), term()}]] | {error, term()}.
+status(Vhost, QueueName) ->
+ %% Handle not found queues
+ QName = #resource{virtual_host = Vhost, name = QueueName, kind = queue},
+ RName = qname_to_internal_name(QName),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ Nodes = get_nodes(Q),
+ [begin
+ case get_sys_status({RName, N}) of
+ {ok, Sys} ->
+ {_, M} = lists:keyfind(ra_server_state, 1, Sys),
+ {_, RaftState} = lists:keyfind(raft_state, 1, Sys),
+ #{commit_index := Commit,
+ machine_version := MacVer,
+ current_term := Term,
+ log := #{last_index := Last,
+ snapshot_index := SnapIdx}} = M,
+ [{<<"Node Name">>, N},
+ {<<"Raft State">>, RaftState},
+ {<<"Log Index">>, Last},
+ {<<"Commit Index">>, Commit},
+ {<<"Snapshot Index">>, SnapIdx},
+ {<<"Term">>, Term},
+ {<<"Machine Version">>, MacVer}
+ ];
+ {error, Err} ->
+ [{<<"Node Name">>, N},
+ {<<"Raft State">>, Err},
+ {<<"Log Index">>, <<>>},
+ {<<"Commit Index">>, <<>>},
+ {<<"Snapshot Index">>, <<>>},
+ {<<"Term">>, <<>>},
+ {<<"Machine Version">>, <<>>}
+ ]
+ end
+ end || N <- Nodes];
+ {error, not_found} = E ->
+ E
+ end.
+
+get_sys_status(Proc) ->
+ try lists:nth(5, element(4, sys:get_status(Proc))) of
+ Sys -> {ok, Sys}
+ catch
+ _:Err when is_tuple(Err) ->
+ {error, element(1, Err)};
+ _:_ ->
+ {error, other}
+
+ end.
+
+
+add_member(VHost, Name, Node, Timeout) ->
+ QName = #resource{virtual_host = VHost, name = Name, kind = queue},
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ QNodes = get_nodes(Q),
+ case lists:member(Node, rabbit_nodes:all_running()) of
+ false ->
+ {error, node_not_running};
+ true ->
+ case lists:member(Node, QNodes) of
+ true ->
+ %% idempotent by design
+ ok;
+ false ->
+ add_member(Q, Node, Timeout)
+ end
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+add_member(Q, Node, Timeout) when ?amqqueue_is_quorum(Q) ->
+ {RaName, _} = amqqueue:get_pid(Q),
+ QName = amqqueue:get_name(Q),
+ %% TODO parallel calls might crash this, or add a duplicate in quorum_nodes
+ ServerId = {RaName, Node},
+ Members = members(Q),
+ TickTimeout = application:get_env(rabbit, quorum_tick_interval,
+ ?TICK_TIMEOUT),
+ Conf = make_ra_conf(Q, ServerId, TickTimeout),
+ case ra:start_server(Conf) of
+ ok ->
+ case ra:add_member(Members, ServerId, Timeout) of
+ {ok, _, Leader} ->
+ Fun = fun(Q1) ->
+ Q2 = update_type_state(
+ Q1, fun(#{nodes := Nodes} = Ts) ->
+ Ts#{nodes => [Node | Nodes]}
+ end),
+ amqqueue:set_pid(Q2, Leader)
+ end,
+ rabbit_misc:execute_mnesia_transaction(
+ fun() -> rabbit_amqqueue:update(QName, Fun) end),
+ ok;
+ {timeout, _} ->
+ _ = ra:force_delete_server(ServerId),
+ _ = ra:remove_member(Members, ServerId),
+ {error, timeout};
+ E ->
+ _ = ra:force_delete_server(ServerId),
+ E
+ end;
+ E ->
+ E
+ end.
+
+delete_member(VHost, Name, Node) ->
+ QName = #resource{virtual_host = VHost, name = Name, kind = queue},
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ QNodes = get_nodes(Q),
+ case lists:member(Node, QNodes) of
+ false ->
+ %% idempotent by design
+ ok;
+ true ->
+ delete_member(Q, Node)
+ end;
+ {error, not_found} = E ->
+ E
+ end.
+
+
+delete_member(Q, Node) when ?amqqueue_is_quorum(Q) ->
+ QName = amqqueue:get_name(Q),
+ {RaName, _} = amqqueue:get_pid(Q),
+ ServerId = {RaName, Node},
+ case members(Q) of
+ [{_, Node}] ->
+
+ %% deleting the last member is not allowed
+ {error, last_node};
+ Members ->
+ case ra:remove_member(Members, ServerId) of
+ {ok, _, _Leader} ->
+ Fun = fun(Q1) ->
+ update_type_state(
+ Q1,
+ fun(#{nodes := Nodes} = Ts) ->
+ Ts#{nodes => lists:delete(Node, Nodes)}
+ end)
+ end,
+ rabbit_misc:execute_mnesia_transaction(
+ fun() -> rabbit_amqqueue:update(QName, Fun) end),
+ case ra:force_delete_server(ServerId) of
+ ok ->
+ ok;
+ {error, {badrpc, nodedown}} ->
+ ok;
+ {error, {badrpc, {'EXIT', {badarg, _}}}} ->
+ %% DETS/ETS tables can't be found, application isn't running
+ ok;
+ {error, _} = Err ->
+ Err;
+ Err ->
+ {error, Err}
+ end;
+ {timeout, _} ->
+ {error, timeout};
+ E ->
+ E
+ end
+ end.
+
+-spec shrink_all(node()) ->
+ [{rabbit_amqqueue:name(),
+ {ok, pos_integer()} | {error, pos_integer(), term()}}].
+shrink_all(Node) ->
+ [begin
+ QName = amqqueue:get_name(Q),
+ rabbit_log:info("~s: removing member (replica) on node ~w",
+ [rabbit_misc:rs(QName), Node]),
+ Size = length(get_nodes(Q)),
+ case delete_member(Q, Node) of
+ ok ->
+ {QName, {ok, Size-1}};
+ {error, Err} ->
+ rabbit_log:warning("~s: failed to remove member (replica) on node ~w, error: ~w",
+ [rabbit_misc:rs(QName), Node, Err]),
+ {QName, {error, Size, Err}}
+ end
+ end || Q <- rabbit_amqqueue:list(),
+ amqqueue:get_type(Q) == ?MODULE,
+ lists:member(Node, get_nodes(Q))].
+
+-spec grow(node(), binary(), binary(), all | even) ->
+ [{rabbit_amqqueue:name(),
+ {ok, pos_integer()} | {error, pos_integer(), term()}}].
+grow(Node, VhostSpec, QueueSpec, Strategy) ->
+ Running = rabbit_nodes:all_running(),
+ [begin
+ Size = length(get_nodes(Q)),
+ QName = amqqueue:get_name(Q),
+ rabbit_log:info("~s: adding a new member (replica) on node ~w",
+ [rabbit_misc:rs(QName), Node]),
+ case add_member(Q, Node, ?ADD_MEMBER_TIMEOUT) of
+ ok ->
+ {QName, {ok, Size + 1}};
+ {error, Err} ->
+ rabbit_log:warning(
+ "~s: failed to add member (replica) on node ~w, error: ~w",
+ [rabbit_misc:rs(QName), Node, Err]),
+ {QName, {error, Size, Err}}
+ end
+ end
+ || Q <- rabbit_amqqueue:list(),
+ amqqueue:get_type(Q) == ?MODULE,
+ %% don't add a member if there is already one on the node
+ not lists:member(Node, get_nodes(Q)),
+ %% node needs to be running
+ lists:member(Node, Running),
+ matches_strategy(Strategy, get_nodes(Q)),
+ is_match(amqqueue:get_vhost(Q), VhostSpec) andalso
+ is_match(get_resource_name(amqqueue:get_name(Q)), QueueSpec) ].
+
+transfer_leadership(Q, Destination) ->
+ {RaName, _} = Pid = amqqueue:get_pid(Q),
+ case ra:transfer_leadership(Pid, {RaName, Destination}) of
+ ok ->
+ case ra:members(Pid) of
+ {_, _, {_, NewNode}} ->
+ {migrated, NewNode};
+ {timeout, _} ->
+ {not_migrated, ra_members_timeout}
+ end;
+ already_leader ->
+ {not_migrated, already_leader};
+ {error, Reason} ->
+ {not_migrated, Reason};
+ {timeout, _} ->
+ %% TODO should we retry once?
+ {not_migrated, timeout}
+ end.
+
+queue_length(Q) ->
+ Name = amqqueue:get_name(Q),
+ case ets:lookup(ra_metrics, Name) of
+ [] -> 0;
+ [{_, _, SnapIdx, _, _, LastIdx, _}] -> LastIdx - SnapIdx
+ end.
+
+get_replicas(Q) ->
+ get_nodes(Q).
+
+get_resource_name(#resource{name = Name}) ->
+ Name.
+
+matches_strategy(all, _) -> true;
+matches_strategy(even, Members) ->
+ length(Members) rem 2 == 0.
+
+is_match(Subj, E) ->
+ nomatch /= re:run(Subj, E).
+
+file_handle_leader_reservation(QName) ->
+ {ok, Q} = rabbit_amqqueue:lookup(QName),
+ ClusterSize = length(get_nodes(Q)),
+ file_handle_cache:set_reservation(2 + ClusterSize).
+
+file_handle_other_reservation() ->
+ file_handle_cache:set_reservation(2).
+
+file_handle_release_reservation() ->
+ file_handle_cache:release_reservation().
+
+-spec reclaim_memory(rabbit_types:vhost(), Name :: rabbit_misc:resource_name()) -> ok | {error, term()}.
+reclaim_memory(Vhost, QueueName) ->
+ QName = #resource{virtual_host = Vhost, name = QueueName, kind = queue},
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ ok = ra:pipeline_command(amqqueue:get_pid(Q),
+ rabbit_fifo:make_garbage_collection());
+ {error, not_found} = E ->
+ E
+ end.
+
+%%----------------------------------------------------------------------------
+dlx_mfa(Q) ->
+ DLX = init_dlx(args_policy_lookup(<<"dead-letter-exchange">>,
+ fun res_arg/2, Q), Q),
+ DLXRKey = args_policy_lookup(<<"dead-letter-routing-key">>,
+ fun res_arg/2, Q),
+ {?MODULE, dead_letter_publish, [DLX, DLXRKey, amqqueue:get_name(Q)]}.
+
+init_dlx(undefined, _Q) ->
+ undefined;
+init_dlx(DLX, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ rabbit_misc:r(QName, exchange, DLX).
+
+res_arg(_PolVal, ArgVal) -> ArgVal.
+
+dead_letter_publish(undefined, _, _, _) ->
+ ok;
+dead_letter_publish(X, RK, QName, ReasonMsgs) ->
+ case rabbit_exchange:lookup(X) of
+ {ok, Exchange} ->
+ [rabbit_dead_letter:publish(Msg, Reason, Exchange, RK, QName)
+ || {Reason, Msg} <- ReasonMsgs];
+ {error, not_found} ->
+ ok
+ end.
+
+find_quorum_queues(VHost) ->
+ Node = node(),
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q <- mnesia:table(rabbit_durable_queue),
+ ?amqqueue_is_quorum(Q),
+ amqqueue:get_vhost(Q) =:= VHost,
+ amqqueue:qnode(Q) == Node]))
+ end).
+
+i_totals(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, MR, MU, M, _}] ->
+ [{messages_ready, MR},
+ {messages_unacknowledged, MU},
+ {messages, M}];
+ [] ->
+ [{messages_ready, 0},
+ {messages_unacknowledged, 0},
+ {messages, 0}]
+ end.
+
+i(name, Q) when ?is_amqqueue(Q) -> amqqueue:get_name(Q);
+i(durable, Q) when ?is_amqqueue(Q) -> amqqueue:is_durable(Q);
+i(auto_delete, Q) when ?is_amqqueue(Q) -> amqqueue:is_auto_delete(Q);
+i(arguments, Q) when ?is_amqqueue(Q) -> amqqueue:get_arguments(Q);
+i(pid, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ whereis(Name);
+i(messages, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ quorum_messages(QName);
+i(messages_ready, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, MR, _, _, _}] ->
+ MR;
+ [] ->
+ 0
+ end;
+i(messages_unacknowledged, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, MU, _, _}] ->
+ MU;
+ [] ->
+ 0
+ end;
+i(policy, Q) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(operator_policy, Q) ->
+ case rabbit_policy:name_op(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(effective_policy_definition, Q) ->
+ case rabbit_policy:effective_definition(Q) of
+ undefined -> [];
+ Def -> Def
+ end;
+i(consumers, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_metrics, QName) of
+ [{_, M, _}] ->
+ proplists:get_value(consumers, M, 0);
+ [] ->
+ 0
+ end;
+i(memory, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ try
+ {memory, M} = process_info(whereis(Name), memory),
+ M
+ catch
+ error:badarg ->
+ 0
+ end;
+i(state, Q) when ?is_amqqueue(Q) ->
+ {Name, Node} = amqqueue:get_pid(Q),
+ %% Check against the leader or last known leader
+ case rpc:call(Node, ?MODULE, cluster_state, [Name], ?RPC_TIMEOUT) of
+ {badrpc, _} -> down;
+ State -> State
+ end;
+i(local_state, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ case ets:lookup(ra_state, Name) of
+ [{_, State}] -> State;
+ _ -> not_member
+ end;
+i(garbage_collection, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ try
+ rabbit_misc:get_gc_info(whereis(Name))
+ catch
+ error:badarg ->
+ []
+ end;
+i(members, Q) when ?is_amqqueue(Q) ->
+ get_nodes(Q);
+i(online, Q) -> online(Q);
+i(leader, Q) -> leader(Q);
+i(open_files, Q) when ?is_amqqueue(Q) ->
+ {Name, _} = amqqueue:get_pid(Q),
+ Nodes = get_nodes(Q),
+ {Data, _} = rpc:multicall(Nodes, ?MODULE, open_files, [Name]),
+ lists:flatten(Data);
+i(single_active_consumer_pid, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid, fun rabbit_fifo:query_single_active_consumer/1) of
+ {ok, {_, {value, {_ConsumerTag, ChPid}}}, _} ->
+ ChPid;
+ {ok, _, _} ->
+ '';
+ {error, _} ->
+ '';
+ {timeout, _} ->
+ ''
+ end;
+i(single_active_consumer_ctag, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_single_active_consumer/1) of
+ {ok, {_, {value, {ConsumerTag, _ChPid}}}, _} ->
+ ConsumerTag;
+ {ok, _, _} ->
+ '';
+ {error, _} ->
+ '';
+ {timeout, _} ->
+ ''
+ end;
+i(type, _) -> quorum;
+i(messages_ram, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_in_memory_usage/1) of
+ {ok, {_, {Length, _}}, _} ->
+ Length;
+ {error, _} ->
+ 0;
+ {timeout, _} ->
+ 0
+ end;
+i(message_bytes_ram, Q) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ case ra:local_query(QPid,
+ fun rabbit_fifo:query_in_memory_usage/1) of
+ {ok, {_, {_, Bytes}}, _} ->
+ Bytes;
+ {error, _} ->
+ 0;
+ {timeout, _} ->
+ 0
+ end;
+i(_K, _Q) -> ''.
+
+open_files(Name) ->
+ case whereis(Name) of
+ undefined -> {node(), 0};
+ Pid -> case ets:lookup(ra_open_file_metrics, Pid) of
+ [] -> {node(), 0};
+ [{_, Count}] -> {node(), Count}
+ end
+ end.
+
+leader(Q) when ?is_amqqueue(Q) ->
+ {Name, Leader} = amqqueue:get_pid(Q),
+ case is_process_alive(Name, Leader) of
+ true -> Leader;
+ false -> ''
+ end.
+
+peek(Vhost, Queue, Pos) ->
+ peek(Pos, rabbit_misc:r(Vhost, queue, Queue)).
+
+peek(Pos, #resource{} = QName) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ peek(Pos, Q);
+ Err ->
+ Err
+ end;
+peek(Pos, Q) when ?is_amqqueue(Q) andalso ?amqqueue_is_quorum(Q) ->
+ LeaderPid = amqqueue:get_pid(Q),
+ case ra:aux_command(LeaderPid, {peek, Pos}) of
+ {ok, {MsgHeader, Msg0}} ->
+ Count = case MsgHeader of
+ #{delivery_count := C} -> C;
+ _ -> 0
+ end,
+ Msg = rabbit_basic:add_header(<<"x-delivery-count">>, long,
+ Count, Msg0),
+ {ok, rabbit_basic:peek_fmt_message(Msg)};
+ {error, Err} ->
+ {error, Err};
+ Err ->
+ Err
+ end;
+peek(_Pos, Q) when ?is_amqqueue(Q) andalso ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported}.
+
+online(Q) when ?is_amqqueue(Q) ->
+ Nodes = get_nodes(Q),
+ {Name, _} = amqqueue:get_pid(Q),
+ [Node || Node <- Nodes, is_process_alive(Name, Node)].
+
+format(Q) when ?is_amqqueue(Q) ->
+ Nodes = get_nodes(Q),
+ [{members, Nodes}, {online, online(Q)}, {leader, leader(Q)}].
+
+is_process_alive(Name, Node) ->
+ erlang:is_pid(rpc:call(Node, erlang, whereis, [Name], ?RPC_TIMEOUT)).
+
+-spec quorum_messages(rabbit_amqqueue:name()) -> non_neg_integer().
+
+quorum_messages(QName) ->
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, _, M, _}] ->
+ M;
+ [] ->
+ 0
+ end.
+
+quorum_ctag(Int) when is_integer(Int) ->
+ integer_to_binary(Int);
+quorum_ctag(Other) ->
+ Other.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+queue_name(RaFifoState) ->
+ rabbit_fifo_client:cluster_name(RaFifoState).
+
+get_default_quorum_initial_group_size(Arguments) ->
+ case rabbit_misc:table_lookup(Arguments, <<"x-quorum-initial-group-size">>) of
+ undefined -> application:get_env(rabbit, default_quorum_initial_group_size);
+ {_Type, Val} -> Val
+ end.
+
+select_quorum_nodes(Size, All) when length(All) =< Size ->
+ All;
+select_quorum_nodes(Size, All) ->
+ Node = node(),
+ case lists:member(Node, All) of
+ true ->
+ select_quorum_nodes(Size - 1, lists:delete(Node, All), [Node]);
+ false ->
+ select_quorum_nodes(Size, All, [])
+ end.
+
+select_quorum_nodes(0, _, Selected) ->
+ Selected;
+select_quorum_nodes(Size, Rest, Selected) ->
+ S = lists:nth(rand:uniform(length(Rest)), Rest),
+ select_quorum_nodes(Size - 1, lists:delete(S, Rest), [S | Selected]).
+
+%% member with the current leader first
+members(Q) when ?amqqueue_is_quorum(Q) ->
+ {RaName, LeaderNode} = amqqueue:get_pid(Q),
+ Nodes = lists:delete(LeaderNode, get_nodes(Q)),
+ [{RaName, N} || N <- [LeaderNode | Nodes]].
+
+format_ra_event(ServerId, Evt, QRef) ->
+ {'$gen_cast', {queue_event, QRef, {ServerId, Evt}}}.
+
+make_ra_conf(Q, ServerId, TickTimeout) ->
+ QName = amqqueue:get_name(Q),
+ RaMachine = ra_machine(Q),
+ [{ClusterName, _} | _] = Members = members(Q),
+ UId = ra:new_uid(ra_lib:to_binary(ClusterName)),
+ FName = rabbit_misc:rs(QName),
+ Formatter = {?MODULE, format_ra_event, [QName]},
+ #{cluster_name => ClusterName,
+ id => ServerId,
+ uid => UId,
+ friendly_name => FName,
+ metrics_key => QName,
+ initial_members => Members,
+ log_init_args => #{uid => UId},
+ tick_timeout => TickTimeout,
+ machine => RaMachine,
+ ra_event_formatter => Formatter}.
+
+get_nodes(Q) when ?is_amqqueue(Q) ->
+ #{nodes := Nodes} = amqqueue:get_type_state(Q),
+ Nodes.
+
+update_type_state(Q, Fun) when ?is_amqqueue(Q) ->
+ Ts = amqqueue:get_type_state(Q),
+ amqqueue:set_type_state(Q, Fun(Ts)).
+
+overflow(undefined, Def, _QName) -> Def;
+overflow(<<"reject-publish">>, _Def, _QName) -> reject_publish;
+overflow(<<"drop-head">>, _Def, _QName) -> drop_head;
+overflow(<<"reject-publish-dlx">> = V, Def, QName) ->
+ rabbit_log:warning("Invalid overflow strategy ~p for quorum queue: ~p",
+ [V, rabbit_misc:rs(QName)]),
+ Def.
+
+parse_credit_args(Default, Args) ->
+ case rabbit_misc:table_lookup(Args, <<"x-credit">>) of
+ {table, T} ->
+ case {rabbit_misc:table_lookup(T, <<"credit">>),
+ rabbit_misc:table_lookup(T, <<"drain">>)} of
+ {{long, C}, {bool, D}} ->
+ {credited, C, D};
+ _ ->
+ {simple_prefetch, Default, false}
+ end;
+ undefined ->
+ {simple_prefetch, Default, false}
+ end.
diff --git a/deps/rabbit/src/rabbit_ra_registry.erl b/deps/rabbit/src/rabbit_ra_registry.erl
new file mode 100644
index 0000000000..b02d89eda5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ra_registry.erl
@@ -0,0 +1,25 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ra_registry).
+
+-export([list_not_quorum_clusters/0]).
+
+%% Not all ra clusters are quorum queues. We need to keep a list of these so we don't
+%% take them into account in operations such as memory calculation and data cleanup.
+%% Hardcoded atm
+list_not_quorum_clusters() ->
+ [rabbit_stream_coordinator].
diff --git a/deps/rabbit/src/rabbit_reader.erl b/deps/rabbit/src/rabbit_reader.erl
new file mode 100644
index 0000000000..c91dbbc105
--- /dev/null
+++ b/deps/rabbit/src/rabbit_reader.erl
@@ -0,0 +1,1803 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_reader).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+%% This is an AMQP 0-9-1 connection implementation. If AMQP 1.0 plugin is enabled,
+%% this module passes control of incoming AMQP 1.0 connections to it.
+%%
+%% Every connection (as in, a process using this module)
+%% is a controlling process for a server socket.
+%%
+%% Connections have a number of responsibilities:
+%%
+%% * Performing protocol handshake
+%% * Parsing incoming data and dispatching protocol methods
+%% * Authenticating clients (with the help of authentication backends)
+%% * Enforcing TCP backpressure (throttling clients)
+%% * Enforcing connection limits, e.g. channel_max
+%% * Channel management
+%% * Setting up heartbeater and alarm notifications
+%% * Emitting connection and network activity metric events
+%% * Gracefully handling client disconnects, channel termination, etc
+%%
+%% and a few more.
+%%
+%% Every connection has
+%%
+%% * a queue collector which is responsible for keeping
+%% track of exclusive queues on the connection and their cleanup.
+%% * a heartbeater that's responsible for sending heartbeat frames to clients,
+%% keeping track of the incoming ones and notifying connection about
+%% heartbeat timeouts
+%% * Stats timer, a timer that is used to periodically emit metric events
+%%
+%% Some dependencies are started under a separate supervisor to avoid deadlocks
+%% during system shutdown. See rabbit_channel_sup:start_link/0 for details.
+%%
+%% Reader processes are special processes (in the OTP sense).
+
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([start_link/2, info_keys/0, info/1, info/2, force_event_refresh/2,
+ shutdown/2]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-export([init/3, mainloop/4, recvloop/4]).
+
+-export([conserve_resources/3, server_properties/1]).
+
+-define(NORMAL_TIMEOUT, 3).
+-define(CLOSING_TIMEOUT, 30).
+-define(CHANNEL_TERMINATION_TIMEOUT, 3).
+%% we wait for this many seconds before closing TCP connection
+%% with a client that failed to log in. Provides some relief
+%% from connection storms and DoS.
+-define(SILENT_CLOSE_DELAY, 3).
+-define(CHANNEL_MIN, 1).
+
+%%--------------------------------------------------------------------------
+
+-record(v1, {
+ %% parent process
+ parent,
+ %% socket
+ sock,
+ %% connection state, see connection record
+ connection,
+ callback,
+ recv_len,
+ pending_recv,
+ %% pre_init | securing | running | blocking | blocked | closing | closed | {become, F}
+ connection_state,
+ %% see comment in rabbit_connection_sup:start_link/0
+ helper_sup,
+ %% takes care of cleaning up exclusive queues,
+ %% see rabbit_queue_collector
+ queue_collector,
+ %% sends and receives heartbeat frames,
+ %% see rabbit_heartbeat
+ heartbeater,
+ %% timer used to emit statistics
+ stats_timer,
+ %% channel supervisor
+ channel_sup_sup_pid,
+ %% how many channels this connection has
+ channel_count,
+ %% throttling state, for both
+ %% credit- and resource-driven flow control
+ throttle,
+ proxy_socket}).
+
+-record(throttle, {
+ %% never | timestamp()
+ last_blocked_at,
+ %% a set of the reasons why we are
+ %% blocked: {resource, memory}, {resource, disk}.
+ %% More reasons can be added in the future.
+ blocked_by,
+ %% true if received any publishes, false otherwise
+ %% note that this will also be true when connection is
+ %% already blocked
+ should_block,
+ %% true if we had we sent a connection.blocked,
+ %% false otherwise
+ connection_blocked_message_sent
+}).
+
+-define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt,
+ send_pend, state, channels, reductions,
+ garbage_collection]).
+
+-define(SIMPLE_METRICS, [pid, recv_oct, send_oct, reductions]).
+-define(OTHER_METRICS, [recv_cnt, send_cnt, send_pend, state, channels,
+ garbage_collection]).
+
+-define(CREATION_EVENT_KEYS,
+ [pid, name, port, peer_port, host,
+ peer_host, ssl, peer_cert_subject, peer_cert_issuer,
+ peer_cert_validity, auth_mechanism, ssl_protocol,
+ ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
+ timeout, frame_max, channel_max, client_properties, connected_at,
+ node, user_who_performed_action]).
+
+-define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
+
+-define(AUTH_NOTIFICATION_INFO_KEYS,
+ [host, name, peer_host, peer_port, protocol, auth_mechanism,
+ ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject,
+ peer_cert_validity]).
+
+-define(IS_RUNNING(State),
+ (State#v1.connection_state =:= running orelse
+ State#v1.connection_state =:= blocked)).
+
+-define(IS_STOPPING(State),
+ (State#v1.connection_state =:= closing orelse
+ State#v1.connection_state =:= closed)).
+
+%%--------------------------------------------------------------------------
+
+-type resource_alert() :: {WasAlarmSetForNode :: boolean(),
+ IsThereAnyAlarmsWithSameSourceInTheCluster :: boolean(),
+ NodeForWhichAlarmWasSetOrCleared :: node()}.
+
+%%--------------------------------------------------------------------------
+
+-spec start_link(pid(), any()) -> rabbit_types:ok(pid()).
+
+start_link(HelperSup, Ref) ->
+ Pid = proc_lib:spawn_link(?MODULE, init, [self(), HelperSup, Ref]),
+
+ {ok, Pid}.
+
+-spec shutdown(pid(), string()) -> 'ok'.
+
+shutdown(Pid, Explanation) ->
+ gen_server:call(Pid, {shutdown, Explanation}, infinity).
+
+-spec init(pid(), pid(), any()) -> no_return().
+
+init(Parent, HelperSup, Ref) ->
+ ?LG_PROCESS_TYPE(reader),
+ {ok, Sock} = rabbit_networking:handshake(Ref,
+ application:get_env(rabbit, proxy_protocol, false)),
+ Deb = sys:debug_options([]),
+ start_connection(Parent, HelperSup, Deb, Sock).
+
+-spec system_continue(_,_,{[binary()], non_neg_integer(), #v1{}}) -> any().
+
+system_continue(Parent, Deb, {Buf, BufLen, State}) ->
+ mainloop(Deb, Buf, BufLen, State#v1{parent = Parent}).
+
+-spec system_terminate(_,_,_,_) -> no_return().
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+-spec info_keys() -> rabbit_types:info_keys().
+
+info_keys() -> ?INFO_KEYS.
+
+-spec info(pid()) -> rabbit_types:infos().
+
+info(Pid) ->
+ gen_server:call(Pid, info, infinity).
+
+-spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+
+info(Pid, Items) ->
+ case gen_server:call(Pid, {info, Items}, infinity) of
+ {ok, Res} -> Res;
+ {error, Error} -> throw(Error)
+ end.
+
+-spec force_event_refresh(pid(), reference()) -> 'ok'.
+
+% Note: https://www.pivotaltracker.com/story/show/166962656
+% This event is necessary for the stats timer to be initialized with
+% the correct values once the management agent has started
+force_event_refresh(Pid, Ref) ->
+ gen_server:cast(Pid, {force_event_refresh, Ref}).
+
+-spec conserve_resources(pid(), atom(), resource_alert()) -> 'ok'.
+
+conserve_resources(Pid, Source, {_, Conserve, _}) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+-spec server_properties(rabbit_types:protocol()) ->
+ rabbit_framing:amqp_table().
+
+server_properties(Protocol) ->
+ {ok, Product} = application:get_key(rabbit, description),
+ {ok, Version} = application:get_key(rabbit, vsn),
+
+ %% Get any configuration-specified server properties
+ {ok, RawConfigServerProps} = application:get_env(rabbit,
+ server_properties),
+
+ %% Normalize the simplified (2-tuple) and unsimplified (3-tuple) forms
+ %% from the config and merge them with the generated built-in properties
+ NormalizedConfigServerProps =
+ [{<<"capabilities">>, table, server_capabilities(Protocol)} |
+ [case X of
+ {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)),
+ longstr,
+ maybe_list_to_binary(Value)};
+ {BinKey, Type, Value} -> {BinKey, Type, Value}
+ end || X <- RawConfigServerProps ++
+ [{product, Product},
+ {version, Version},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {platform, rabbit_misc:platform_and_version()},
+ {copyright, ?COPYRIGHT_MESSAGE},
+ {information, ?INFORMATION_MESSAGE}]]],
+
+ %% Filter duplicated properties in favour of config file provided values
+ lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end,
+ NormalizedConfigServerProps).
+
+maybe_list_to_binary(V) when is_binary(V) -> V;
+maybe_list_to_binary(V) when is_list(V) -> list_to_binary(V).
+
+server_capabilities(rabbit_framing_amqp_0_9_1) ->
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, true},
+ {<<"connection.blocked">>, bool, true},
+ {<<"consumer_priorities">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true},
+ {<<"per_consumer_qos">>, bool, true},
+ {<<"direct_reply_to">>, bool, true}];
+server_capabilities(_) ->
+ [].
+
+%%--------------------------------------------------------------------------
+
+socket_error(Reason) when is_atom(Reason) ->
+ rabbit_log_connection:error("Error on AMQP connection ~p: ~s~n",
+ [self(), rabbit_misc:format_inet_error(Reason)]);
+socket_error(Reason) ->
+ Fmt = "Error on AMQP connection ~p:~n~p~n",
+ Args = [self(), Reason],
+ case Reason of
+ %% The socket was closed while upgrading to SSL.
+ %% This is presumably a TCP healthcheck, so don't log
+ %% it unless specified otherwise.
+ {ssl_upgrade_error, closed} ->
+ %% Lager sinks (rabbit_log_connection)
+ %% are handled by the lager parse_transform.
+ %% Hence have to define the loglevel as a function call.
+ rabbit_log_connection:debug(Fmt, Args);
+ _ ->
+ rabbit_log_connection:error(Fmt, Args)
+ end.
+
+inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
+
+socket_op(Sock, Fun) ->
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ case Fun(Sock) of
+ {ok, Res} -> Res;
+ {error, Reason} -> socket_error(Reason),
+ rabbit_net:fast_close(RealSocket),
+ exit(normal)
+ end.
+
+-spec start_connection(pid(), pid(), any(), rabbit_net:socket()) ->
+ no_return().
+
+start_connection(Parent, HelperSup, Deb, Sock) ->
+ process_flag(trap_exit, true),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ Name = case rabbit_net:connection_string(Sock, inbound) of
+ {ok, Str} -> list_to_binary(Str);
+ {error, enotconn} -> rabbit_net:fast_close(RealSocket),
+ exit(normal);
+ {error, Reason} -> socket_error(Reason),
+ rabbit_net:fast_close(RealSocket),
+ exit(normal)
+ end,
+ {ok, HandshakeTimeout} = application:get_env(rabbit, handshake_timeout),
+ InitialFrameMax = application:get_env(rabbit, initial_frame_max, ?FRAME_MIN_SIZE),
+ erlang:send_after(HandshakeTimeout, self(), handshake_timeout),
+ {PeerHost, PeerPort, Host, Port} =
+ socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
+ ?store_proc_name(Name),
+ State = #v1{parent = Parent,
+ sock = RealSocket,
+ connection = #connection{
+ name = Name,
+ log_name = Name,
+ host = Host,
+ peer_host = PeerHost,
+ port = Port,
+ peer_port = PeerPort,
+ protocol = none,
+ user = none,
+ timeout_sec = (HandshakeTimeout / 1000),
+ frame_max = InitialFrameMax,
+ vhost = none,
+ client_properties = none,
+ capabilities = [],
+ auth_mechanism = none,
+ auth_state = none,
+ connected_at = os:system_time(
+ milli_seconds)},
+ callback = uninitialized_callback,
+ recv_len = 0,
+ pending_recv = false,
+ connection_state = pre_init,
+ queue_collector = undefined, %% started on tune-ok
+ helper_sup = HelperSup,
+ heartbeater = none,
+ channel_sup_sup_pid = none,
+ channel_count = 0,
+ throttle = #throttle{
+ last_blocked_at = never,
+ should_block = false,
+ blocked_by = sets:new(),
+ connection_blocked_message_sent = false
+ },
+ proxy_socket = rabbit_net:maybe_get_proxy_socket(Sock)},
+ try
+ case run({?MODULE, recvloop,
+ [Deb, [], 0, switch_callback(rabbit_event:init_stats_timer(
+ State, #v1.stats_timer),
+ handshake, 8)]}) of
+ %% connection was closed cleanly by the client
+ #v1{connection = #connection{user = #user{username = Username},
+ vhost = VHost}} ->
+ rabbit_log_connection:info("closing AMQP connection ~p (~s, vhost: '~s', user: '~s')~n",
+ [self(), dynamic_connection_name(Name), VHost, Username]);
+ %% just to be more defensive
+ _ ->
+ rabbit_log_connection:info("closing AMQP connection ~p (~s)~n",
+ [self(), dynamic_connection_name(Name)])
+ end
+ catch
+ Ex ->
+ log_connection_exception(dynamic_connection_name(Name), Ex)
+ after
+ %% We don't call gen_tcp:close/1 here since it waits for
+ %% pending output to be sent, which results in unnecessary
+ %% delays. We could just terminate - the reader is the
+ %% controlling process and hence its termination will close
+ %% the socket. However, to keep the file_handle_cache
+ %% accounting as accurate as possible we ought to close the
+ %% socket w/o delay before termination.
+ rabbit_net:fast_close(RealSocket),
+ rabbit_networking:unregister_connection(self()),
+ rabbit_core_metrics:connection_closed(self()),
+ ClientProperties = case get(client_properties) of
+ undefined ->
+ [];
+ Properties ->
+ Properties
+ end,
+ EventProperties = [{name, Name},
+ {pid, self()},
+ {node, node()},
+ {client_properties, ClientProperties}],
+ EventProperties1 = case get(connection_user_provided_name) of
+ undefined ->
+ EventProperties;
+ ConnectionUserProvidedName ->
+ [{user_provided_name, ConnectionUserProvidedName} | EventProperties]
+ end,
+ rabbit_event:notify(connection_closed, EventProperties1)
+ end,
+ done.
+
+log_connection_exception(Name, Ex) ->
+ Severity = case Ex of
+ connection_closed_with_no_data_received -> debug;
+ {connection_closed_abruptly, _} -> warning;
+ connection_closed_abruptly -> warning;
+ _ -> error
+ end,
+ log_connection_exception(Severity, Name, Ex).
+
+log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) ->
+ %% Long line to avoid extra spaces and line breaks in log
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~n"
+ "missed heartbeats from client, timeout: ~ps~n",
+ [self(), Name, TimeoutSec]);
+log_connection_exception(Severity, Name, {connection_closed_abruptly,
+ #v1{connection = #connection{user = #user{username = Username},
+ vhost = VHost}}}) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s, vhost: '~s', user: '~s'):~nclient unexpectedly closed TCP connection~n",
+ [self(), Name, VHost, Username]);
+%% when client abruptly closes connection before connection.open/authentication/authorization
+%% succeeded, don't log username and vhost as 'none'
+log_connection_exception(Severity, Name, {connection_closed_abruptly, _}) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~nclient unexpectedly closed TCP connection~n",
+ [self(), Name]);
+%% failed connection.tune negotiations
+log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel,
+ {exit, #amqp_error{explanation = Explanation},
+ _Method, _Stacktrace}}) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~nfailed to negotiate connection parameters: ~s~n",
+ [self(), Name, Explanation]);
+%% old exception structure
+log_connection_exception(Severity, Name, connection_closed_abruptly) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~n"
+ "client unexpectedly closed TCP connection~n",
+ [self(), Name]);
+log_connection_exception(Severity, Name, Ex) ->
+ log_connection_exception_with_severity(Severity,
+ "closing AMQP connection ~p (~s):~n~p~n",
+ [self(), Name, Ex]).
+
+log_connection_exception_with_severity(Severity, Fmt, Args) ->
+ case Severity of
+ debug -> rabbit_log_connection:debug(Fmt, Args);
+ warning -> rabbit_log_connection:warning(Fmt, Args);
+ error -> rabbit_log_connection:error(Fmt, Args)
+ end.
+
+run({M, F, A}) ->
+ try apply(M, F, A)
+ catch {become, MFA} -> run(MFA)
+ end.
+
+recvloop(Deb, Buf, BufLen, State = #v1{pending_recv = true}) ->
+ mainloop(Deb, Buf, BufLen, State);
+recvloop(Deb, Buf, BufLen, State = #v1{connection_state = blocked}) ->
+ mainloop(Deb, Buf, BufLen, State);
+recvloop(Deb, Buf, BufLen, State = #v1{connection_state = {become, F}}) ->
+ throw({become, F(Deb, Buf, BufLen, State)});
+recvloop(Deb, Buf, BufLen, State = #v1{sock = Sock, recv_len = RecvLen})
+ when BufLen < RecvLen ->
+ case rabbit_net:setopts(Sock, [{active, once}]) of
+ ok -> mainloop(Deb, Buf, BufLen,
+ State#v1{pending_recv = true});
+ {error, Reason} -> stop(Reason, State)
+ end;
+recvloop(Deb, [B], _BufLen, State) ->
+ {Rest, State1} = handle_input(State#v1.callback, B, State),
+ recvloop(Deb, [Rest], size(Rest), State1);
+recvloop(Deb, Buf, BufLen, State = #v1{recv_len = RecvLen}) ->
+ {DataLRev, RestLRev} = binlist_split(BufLen - RecvLen, Buf, []),
+ Data = list_to_binary(lists:reverse(DataLRev)),
+ {<<>>, State1} = handle_input(State#v1.callback, Data, State),
+ recvloop(Deb, lists:reverse(RestLRev), BufLen - RecvLen, State1).
+
+binlist_split(0, L, Acc) ->
+ {L, Acc};
+binlist_split(Len, L, [Acc0|Acc]) when Len < 0 ->
+ {H, T} = split_binary(Acc0, -Len),
+ {[H|L], [T|Acc]};
+binlist_split(Len, [H|T], Acc) ->
+ binlist_split(Len - size(H), T, [H|Acc]).
+
+-spec mainloop(_,[binary()], non_neg_integer(), #v1{}) -> any().
+
+mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock,
+ connection_state = CS,
+ connection = #connection{
+ name = ConnName}}) ->
+ Recv = rabbit_net:recv(Sock),
+ case CS of
+ pre_init when Buf =:= [] ->
+ %% We only log incoming connections when either the
+ %% first byte was received or there was an error (eg. a
+ %% timeout).
+ %%
+ %% The goal is to not log TCP healthchecks (a connection
+ %% with no data received) unless specified otherwise.
+ Fmt = "accepting AMQP connection ~p (~s)~n",
+ Args = [self(), ConnName],
+ case Recv of
+ closed -> rabbit_log_connection:debug(Fmt, Args);
+ _ -> rabbit_log_connection:info(Fmt, Args)
+ end;
+ _ ->
+ ok
+ end,
+ case Recv of
+ {data, Data} ->
+ recvloop(Deb, [Data | Buf], BufLen + size(Data),
+ State#v1{pending_recv = false});
+ closed when State#v1.connection_state =:= closed ->
+ State;
+ closed when CS =:= pre_init andalso Buf =:= [] ->
+ stop(tcp_healthcheck, State);
+ closed ->
+ stop(closed, State);
+ {other, {heartbeat_send_error, Reason}} ->
+ %% The only portable way to detect disconnect on blocked
+ %% connection is to wait for heartbeat send failure.
+ stop(Reason, State);
+ {error, Reason} ->
+ stop(Reason, State);
+ {other, {system, From, Request}} ->
+ sys:handle_system_msg(Request, From, State#v1.parent,
+ ?MODULE, Deb, {Buf, BufLen, State});
+ {other, Other} ->
+ case handle_other(Other, State) of
+ stop -> State;
+ NewState -> recvloop(Deb, Buf, BufLen, NewState)
+ end
+ end.
+
+-spec stop(_, #v1{}) -> no_return().
+stop(tcp_healthcheck, State) ->
+ %% The connection was closed before any packet was received. It's
+ %% probably a load-balancer healthcheck: don't consider this a
+ %% failure.
+ maybe_emit_stats(State),
+ throw(connection_closed_with_no_data_received);
+stop(closed, State) ->
+ maybe_emit_stats(State),
+ throw({connection_closed_abruptly, State});
+stop(Reason, State) ->
+ maybe_emit_stats(State),
+ throw({inet_error, Reason}).
+
+handle_other({conserve_resources, Source, Conserve},
+ State = #v1{throttle = Throttle = #throttle{blocked_by = Blockers}}) ->
+ Resource = {resource, Source},
+ Blockers1 = case Conserve of
+ true -> sets:add_element(Resource, Blockers);
+ false -> sets:del_element(Resource, Blockers)
+ end,
+ control_throttle(State#v1{throttle = Throttle#throttle{blocked_by = Blockers1}});
+handle_other({channel_closing, ChPid}, State) ->
+ ok = rabbit_channel:ready_for_close(ChPid),
+ {_, State1} = channel_cleanup(ChPid, State),
+ maybe_close(control_throttle(State1));
+handle_other({'EXIT', Parent, normal}, State = #v1{parent = Parent}) ->
+ %% rabbitmq/rabbitmq-server#544
+ %% The connection port process has exited due to the TCP socket being closed.
+ %% Handle this case in the same manner as receiving {error, closed}
+ stop(closed, State);
+handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
+ Msg = io_lib:format("broker forced connection closure with reason '~w'", [Reason]),
+ terminate(Msg, State),
+ %% this is what we are expected to do according to
+ %% https://www.erlang.org/doc/man/sys.html
+ %%
+ %% If we wanted to be *really* nice we should wait for a while for
+ %% clients to close the socket at their end, just as we do in the
+ %% ordinary error case. However, since this termination is
+ %% initiated by our parent it is probably more important to exit
+ %% quickly.
+ maybe_emit_stats(State),
+ exit(Reason);
+handle_other({channel_exit, _Channel, E = {writer, send_failed, _E}}, State) ->
+ maybe_emit_stats(State),
+ throw(E);
+handle_other({channel_exit, Channel, Reason}, State) ->
+ handle_exception(State, Channel, Reason);
+handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
+ handle_dependent_exit(ChPid, Reason, State);
+handle_other(terminate_connection, State) ->
+ maybe_emit_stats(State),
+ stop;
+handle_other(handshake_timeout, State)
+ when ?IS_RUNNING(State) orelse ?IS_STOPPING(State) ->
+ State;
+handle_other(handshake_timeout, State) ->
+ maybe_emit_stats(State),
+ throw({handshake_timeout, State#v1.callback});
+handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
+ State;
+handle_other(heartbeat_timeout,
+ State = #v1{connection = #connection{timeout_sec = T}}) ->
+ maybe_emit_stats(State),
+ throw({heartbeat_timeout, T});
+handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
+ {ForceTermination, NewState} = terminate(Explanation, State),
+ gen_server:reply(From, ok),
+ case ForceTermination of
+ force -> stop;
+ normal -> NewState
+ end;
+handle_other({'$gen_call', From, info}, State) ->
+ gen_server:reply(From, infos(?INFO_KEYS, State)),
+ State;
+handle_other({'$gen_call', From, {info, Items}}, State) ->
+ gen_server:reply(From, try {ok, infos(Items, State)}
+ catch Error -> {error, Error}
+ end),
+ State;
+handle_other({'$gen_cast', {force_event_refresh, Ref}}, State)
+ when ?IS_RUNNING(State) ->
+ rabbit_event:notify(
+ connection_created,
+ augment_infos_with_user_provided_connection_name(
+ [{type, network} | infos(?CREATION_EVENT_KEYS, State)], State),
+ Ref),
+ rabbit_event:init_stats_timer(State, #v1.stats_timer);
+handle_other({'$gen_cast', {force_event_refresh, _Ref}}, State) ->
+ %% Ignore, we will emit a created event once we start running.
+ State;
+handle_other(ensure_stats, State) ->
+ ensure_stats_timer(State);
+handle_other(emit_stats, State) ->
+ emit_stats(State);
+handle_other({bump_credit, Msg}, State) ->
+ %% Here we are receiving credit by some channel process.
+ credit_flow:handle_bump_msg(Msg),
+ control_throttle(State);
+handle_other(Other, State) ->
+ %% internal error -> something worth dying for
+ maybe_emit_stats(State),
+ exit({unexpected_message, Other}).
+
+switch_callback(State, Callback, Length) ->
+ State#v1{callback = Callback, recv_len = Length}.
+
+terminate(Explanation, State) when ?IS_RUNNING(State) ->
+ {normal, handle_exception(State, 0,
+ rabbit_misc:amqp_error(
+ connection_forced, "~s", [Explanation], none))};
+terminate(_Explanation, State) ->
+ {force, State}.
+
+send_blocked(#v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities},
+ sock = Sock}, Reason) ->
+ case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
+ {bool, true} ->
+
+ ok = send_on_channel0(Sock, #'connection.blocked'{reason = Reason},
+ Protocol);
+ _ ->
+ ok
+ end.
+
+send_unblocked(#v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities},
+ sock = Sock}) ->
+ case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
+ {bool, true} ->
+ ok = send_on_channel0(Sock, #'connection.unblocked'{}, Protocol);
+ _ ->
+ ok
+ end.
+
+%%--------------------------------------------------------------------------
+%% error handling / termination
+
+close_connection(State = #v1{queue_collector = Collector,
+ connection = #connection{
+ timeout_sec = TimeoutSec}}) ->
+ %% The spec says "Exclusive queues may only be accessed by the
+ %% current connection, and are deleted when that connection
+ %% closes." This does not strictly imply synchrony, but in
+ %% practice it seems to be what people assume.
+ clean_up_exclusive_queues(Collector),
+ %% We terminate the connection after the specified interval, but
+ %% no later than ?CLOSING_TIMEOUT seconds.
+ erlang:send_after((if TimeoutSec > 0 andalso
+ TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
+ true -> ?CLOSING_TIMEOUT
+ end) * 1000, self(), terminate_connection),
+ State#v1{connection_state = closed}.
+
+%% queue collector will be undefined when connection
+%% tuning was never performed or didn't finish. In such cases
+%% there's also nothing to clean up.
+clean_up_exclusive_queues(undefined) ->
+ ok;
+
+clean_up_exclusive_queues(Collector) ->
+ rabbit_queue_collector:delete_all(Collector).
+
+handle_dependent_exit(ChPid, Reason, State) ->
+ {Channel, State1} = channel_cleanup(ChPid, State),
+ case {Channel, termination_kind(Reason)} of
+ {undefined, controlled} -> State1;
+ {undefined, uncontrolled} -> handle_uncontrolled_channel_close(ChPid),
+ exit({abnormal_dependent_exit,
+ ChPid, Reason});
+ {_, controlled} -> maybe_close(control_throttle(State1));
+ {_, uncontrolled} -> handle_uncontrolled_channel_close(ChPid),
+ State2 = handle_exception(
+ State1, Channel, Reason),
+ maybe_close(control_throttle(State2))
+ end.
+
+terminate_channels(#v1{channel_count = 0} = State) ->
+ State;
+terminate_channels(#v1{channel_count = ChannelCount} = State) ->
+ lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
+ Timeout = 1000 * ?CHANNEL_TERMINATION_TIMEOUT * ChannelCount,
+ TimerRef = erlang:send_after(Timeout, self(), cancel_wait),
+ wait_for_channel_termination(ChannelCount, TimerRef, State).
+
+wait_for_channel_termination(0, TimerRef, State) ->
+ case erlang:cancel_timer(TimerRef) of
+ false -> receive
+ cancel_wait -> State
+ end;
+ _ -> State
+ end;
+wait_for_channel_termination(N, TimerRef,
+ State = #v1{connection_state = CS,
+ connection = #connection{
+ log_name = ConnName,
+ user = User,
+ vhost = VHost},
+ sock = Sock}) ->
+ receive
+ {'DOWN', _MRef, process, ChPid, Reason} ->
+ {Channel, State1} = channel_cleanup(ChPid, State),
+ case {Channel, termination_kind(Reason)} of
+ {undefined, _} ->
+ exit({abnormal_dependent_exit, ChPid, Reason});
+ {_, controlled} ->
+ wait_for_channel_termination(N-1, TimerRef, State1);
+ {_, uncontrolled} ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, vhost: '~s',"
+ " user: '~s', state: ~p), channel ~p:"
+ "error while terminating:~n~p~n",
+ [self(), ConnName, VHost, User#user.username,
+ CS, Channel, Reason]),
+ handle_uncontrolled_channel_close(ChPid),
+ wait_for_channel_termination(N-1, TimerRef, State1)
+ end;
+ {'EXIT', Sock, _Reason} ->
+ clean_up_all_channels(State),
+ exit(normal);
+ cancel_wait ->
+ exit(channel_termination_timeout)
+ end.
+
+maybe_close(State = #v1{connection_state = closing,
+ channel_count = 0,
+ connection = #connection{protocol = Protocol},
+ sock = Sock}) ->
+ NewState = close_connection(State),
+ ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
+ NewState;
+maybe_close(State) ->
+ State.
+
+termination_kind(normal) -> controlled;
+termination_kind(_) -> uncontrolled.
+
+format_hard_error(#amqp_error{name = N, explanation = E, method = M}) ->
+ io_lib:format("operation ~s caused a connection exception ~s: ~p", [M, N, E]);
+format_hard_error(Reason) ->
+ case io_lib:deep_char_list(Reason) of
+ true -> Reason;
+ false -> rabbit_misc:format("~p", [Reason])
+ end.
+
+log_hard_error(#v1{connection_state = CS,
+ connection = #connection{
+ log_name = ConnName,
+ user = User,
+ vhost = VHost}}, Channel, Reason) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, vhost: '~s',"
+ " user: '~s', state: ~p), channel ~p:~n ~s~n",
+ [self(), ConnName, VHost, User#user.username, CS, Channel, format_hard_error(Reason)]).
+
+handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
+ log_hard_error(State, Channel, Reason),
+ State;
+handle_exception(State = #v1{connection = #connection{protocol = Protocol},
+ connection_state = CS},
+ Channel, Reason)
+ when ?IS_RUNNING(State) orelse CS =:= closing ->
+ respond_and_close(State, Channel, Protocol, Reason, Reason);
+%% authentication failure
+handle_exception(State = #v1{connection = #connection{protocol = Protocol,
+ log_name = ConnName,
+ capabilities = Capabilities},
+ connection_state = starting},
+ Channel, Reason = #amqp_error{name = access_refused,
+ explanation = ErrMsg}) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, state: ~p):~n~s~n",
+ [self(), ConnName, starting, ErrMsg]),
+ %% respect authentication failure notification capability
+ case rabbit_misc:table_lookup(Capabilities,
+ <<"authentication_failure_close">>) of
+ {bool, true} ->
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
+ _ ->
+ close_connection(terminate_channels(State))
+ end;
+%% when loopback-only user tries to connect from a non-local host
+%% when user tries to access a vhost it has no permissions for
+handle_exception(State = #v1{connection = #connection{protocol = Protocol,
+ log_name = ConnName,
+ user = User},
+ connection_state = opening},
+ Channel, Reason = #amqp_error{name = not_allowed,
+ explanation = ErrMsg}) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s, user: '~s', state: ~p):~n~s~n",
+ [self(), ConnName, User#user.username, opening, ErrMsg]),
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
+handle_exception(State = #v1{connection = #connection{protocol = Protocol},
+ connection_state = CS = opening},
+ Channel, Reason = #amqp_error{}) ->
+ respond_and_close(State, Channel, Protocol, Reason,
+ {handshake_error, CS, Reason});
+%% when negotiation fails, e.g. due to channel_max being higher than the
+%% maximum allowed limit
+handle_exception(State = #v1{connection = #connection{protocol = Protocol,
+ log_name = ConnName,
+ user = User},
+ connection_state = tuning},
+ Channel, Reason = #amqp_error{name = not_allowed,
+ explanation = ErrMsg}) ->
+ rabbit_log_connection:error(
+ "Error on AMQP connection ~p (~s,"
+ " user: '~s', state: ~p):~n~s~n",
+ [self(), ConnName, User#user.username, tuning, ErrMsg]),
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State);
+handle_exception(State, Channel, Reason) ->
+ %% We don't trust the client at this point - force them to wait
+ %% for a bit so they can't DOS us with repeated failed logins etc.
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw({handshake_error, State#v1.connection_state, Channel, Reason}).
+
+%% we've "lost sync" with the client and hence must not accept any
+%% more input
+-spec fatal_frame_error(_, _, _, _, _) -> no_return().
+fatal_frame_error(Error, Type, Channel, Payload, State) ->
+ frame_error(Error, Type, Channel, Payload, State),
+ %% grace period to allow transmission of error
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw(fatal_frame_error).
+
+frame_error(Error, Type, Channel, Payload, State) ->
+ {Str, Bin} = payload_snippet(Payload),
+ handle_exception(State, Channel,
+ rabbit_misc:amqp_error(frame_error,
+ "type ~p, ~s octets = ~p: ~p",
+ [Type, Str, Bin, Error], none)).
+
+unexpected_frame(Type, Channel, Payload, State) ->
+ {Str, Bin} = payload_snippet(Payload),
+ handle_exception(State, Channel,
+ rabbit_misc:amqp_error(unexpected_frame,
+ "type ~p, ~s octets = ~p",
+ [Type, Str, Bin], none)).
+
+payload_snippet(Payload) when size(Payload) =< 16 ->
+ {"all", Payload};
+payload_snippet(<<Snippet:16/binary, _/binary>>) ->
+ {"first 16", Snippet}.
+
+%%--------------------------------------------------------------------------
+
+create_channel(_Channel,
+ #v1{channel_count = ChannelCount,
+ connection = #connection{channel_max = ChannelMax}})
+ when ChannelMax /= 0 andalso ChannelCount >= ChannelMax ->
+ {error, rabbit_misc:amqp_error(
+ not_allowed, "number of channels opened (~w) has reached the "
+ "negotiated channel_max (~w)",
+ [ChannelCount, ChannelMax], 'none')};
+create_channel(Channel,
+ #v1{sock = Sock,
+ queue_collector = Collector,
+ channel_sup_sup_pid = ChanSupSup,
+ channel_count = ChannelCount,
+ connection =
+ #connection{name = Name,
+ protocol = Protocol,
+ frame_max = FrameMax,
+ vhost = VHost,
+ capabilities = Capabilities,
+ user = #user{username = Username} = User}
+ } = State) ->
+ case rabbit_auth_backend_internal:is_over_channel_limit(Username) of
+ false ->
+ {ok, _ChSupPid, {ChPid, AState}} =
+ rabbit_channel_sup_sup:start_channel(
+ ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), Name,
+ Protocol, User, VHost, Capabilities,
+ Collector}),
+ MRef = erlang:monitor(process, ChPid),
+ put({ch_pid, ChPid}, {Channel, MRef}),
+ put({channel, Channel}, {ChPid, AState}),
+ {ok, {ChPid, AState}, State#v1{channel_count = ChannelCount + 1}};
+ {true, Limit} ->
+ {error, rabbit_misc:amqp_error(not_allowed,
+ "number of channels opened for user '~s' has reached "
+ "the maximum allowed user limit of (~w)",
+ [Username, Limit], 'none')}
+ end.
+
+channel_cleanup(ChPid, State = #v1{channel_count = ChannelCount}) ->
+ case get({ch_pid, ChPid}) of
+ undefined -> {undefined, State};
+ {Channel, MRef} -> credit_flow:peer_down(ChPid),
+ erase({channel, Channel}),
+ erase({ch_pid, ChPid}),
+ erlang:demonitor(MRef, [flush]),
+ {Channel, State#v1{channel_count = ChannelCount - 1}}
+ end.
+
+all_channels() -> [ChPid || {{ch_pid, ChPid}, _ChannelMRef} <- get()].
+
+clean_up_all_channels(State) ->
+ CleanupFun = fun(ChPid) ->
+ channel_cleanup(ChPid, State)
+ end,
+ lists:foreach(CleanupFun, all_channels()).
+
+%%--------------------------------------------------------------------------
+
+handle_frame(Type, 0, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}})
+ when ?IS_STOPPING(State) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ {method, MethodName, FieldsBin} ->
+ handle_method0(MethodName, FieldsBin, State);
+ _Other -> State
+ end;
+handle_frame(Type, 0, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}}) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ error -> frame_error(unknown_frame, Type, 0, Payload, State);
+ heartbeat -> State;
+ {method, MethodName, FieldsBin} ->
+ handle_method0(MethodName, FieldsBin, State);
+ _Other -> unexpected_frame(Type, 0, Payload, State)
+ end;
+handle_frame(Type, Channel, Payload,
+ State = #v1{connection = #connection{protocol = Protocol}})
+ when ?IS_RUNNING(State) ->
+ case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
+ error -> frame_error(unknown_frame, Type, Channel, Payload, State);
+ heartbeat -> unexpected_frame(Type, Channel, Payload, State);
+ Frame -> process_frame(Frame, Channel, State)
+ end;
+handle_frame(_Type, _Channel, _Payload, State) when ?IS_STOPPING(State) ->
+ State;
+handle_frame(Type, Channel, Payload, State) ->
+ unexpected_frame(Type, Channel, Payload, State).
+
+process_frame(Frame, Channel, State) ->
+ ChKey = {channel, Channel},
+ case (case get(ChKey) of
+ undefined -> create_channel(Channel, State);
+ Other -> {ok, Other, State}
+ end) of
+ {error, Error} ->
+ handle_exception(State, Channel, Error);
+ {ok, {ChPid, AState}, State1} ->
+ case rabbit_command_assembler:process(Frame, AState) of
+ {ok, NewAState} ->
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, State1);
+ {ok, Method, NewAState} ->
+ rabbit_channel:do(ChPid, Method),
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, State1);
+ {ok, Method, Content, NewAState} ->
+ rabbit_channel:do_flow(ChPid, Method, Content),
+ put(ChKey, {ChPid, NewAState}),
+ post_process_frame(Frame, ChPid, control_throttle(State1));
+ {error, Reason} ->
+ handle_exception(State1, Channel, Reason)
+ end
+ end.
+
+post_process_frame({method, 'channel.close_ok', _}, ChPid, State) ->
+ {_, State1} = channel_cleanup(ChPid, State),
+ %% This is not strictly necessary, but more obviously
+ %% correct. Also note that we do not need to call maybe_close/1
+ %% since we cannot possibly be in the 'closing' state.
+ control_throttle(State1);
+post_process_frame({content_header, _, _, _, _}, _ChPid, State) ->
+ publish_received(State);
+post_process_frame({content_body, _}, _ChPid, State) ->
+ publish_received(State);
+post_process_frame(_Frame, _ChPid, State) ->
+ State.
+
+%%--------------------------------------------------------------------------
+
+%% We allow clients to exceed the frame size a little bit since quite
+%% a few get it wrong - off-by 1 or 8 (empty frame size) are typical.
+-define(FRAME_SIZE_FUDGE, ?EMPTY_FRAME_SIZE).
+
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, _/binary>>,
+ State = #v1{connection = #connection{frame_max = FrameMax}})
+ when FrameMax /= 0 andalso
+ PayloadSize > FrameMax - ?EMPTY_FRAME_SIZE + ?FRAME_SIZE_FUDGE ->
+ fatal_frame_error(
+ {frame_too_large, PayloadSize, FrameMax - ?EMPTY_FRAME_SIZE},
+ Type, Channel, <<>>, State);
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32,
+ Payload:PayloadSize/binary, ?FRAME_END,
+ Rest/binary>>,
+ State) ->
+ {Rest, ensure_stats_timer(handle_frame(Type, Channel, Payload, State))};
+handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, Rest/binary>>,
+ State) ->
+ {Rest, ensure_stats_timer(
+ switch_callback(State,
+ {frame_payload, Type, Channel, PayloadSize},
+ PayloadSize + 1))};
+handle_input({frame_payload, Type, Channel, PayloadSize}, Data, State) ->
+ <<Payload:PayloadSize/binary, EndMarker, Rest/binary>> = Data,
+ case EndMarker of
+ ?FRAME_END -> State1 = handle_frame(Type, Channel, Payload, State),
+ {Rest, switch_callback(State1, frame_header, 7)};
+ _ -> fatal_frame_error({invalid_frame_end_marker, EndMarker},
+ Type, Channel, Payload, State)
+ end;
+handle_input(handshake, <<"AMQP", A, B, C, D, Rest/binary>>, State) ->
+ {Rest, handshake({A, B, C, D}, State)};
+handle_input(handshake, <<Other:8/binary, _/binary>>, #v1{sock = Sock}) ->
+ refuse_connection(Sock, {bad_header, Other});
+handle_input(Callback, Data, _State) ->
+ throw({bad_input, Callback, Data}).
+
+%% The two rules pertaining to version negotiation:
+%%
+%% * If the server cannot support the protocol specified in the
+%% protocol header, it MUST respond with a valid protocol header and
+%% then close the socket connection.
+%%
+%% * The server MUST provide a protocol version that is lower than or
+%% equal to that requested by the client in the protocol header.
+handshake({0, 0, 9, 1}, State) ->
+ start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State);
+
+%% This is the protocol header for 0-9, which we can safely treat as
+%% though it were 0-9-1.
+handshake({1, 1, 0, 9}, State) ->
+ start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State);
+
+%% This is what most clients send for 0-8. The 0-8 spec, confusingly,
+%% defines the version as 8-0.
+handshake({1, 1, 8, 0}, State) ->
+ start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
+
+%% The 0-8 spec as on the AMQP web site actually has this as the
+%% protocol header; some libraries e.g., py-amqplib, send it when they
+%% want 0-8.
+handshake({1, 1, 9, 1}, State) ->
+ start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
+
+%% ... and finally, the 1.0 spec is crystal clear!
+handshake({Id, 1, 0, 0}, State) ->
+ become_1_0(Id, State);
+
+handshake(Vsn, #v1{sock = Sock}) ->
+ refuse_connection(Sock, {bad_version, Vsn}).
+
+%% Offer a protocol version to the client. Connection.start only
+%% includes a major and minor version number, Luckily 0-9 and 0-9-1
+%% are similar enough that clients will be happy with either.
+start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision},
+ Protocol,
+ State = #v1{sock = Sock, connection = Connection}) ->
+ rabbit_networking:register_connection(self()),
+ Start = #'connection.start'{
+ version_major = ProtocolMajor,
+ version_minor = ProtocolMinor,
+ server_properties = server_properties(Protocol),
+ mechanisms = auth_mechanisms_binary(Sock),
+ locales = <<"en_US">> },
+ ok = send_on_channel0(Sock, Start, Protocol),
+ switch_callback(State#v1{connection = Connection#connection{
+ timeout_sec = ?NORMAL_TIMEOUT,
+ protocol = Protocol},
+ connection_state = starting},
+ frame_header, 7).
+
+-spec refuse_connection(_, _, _) -> no_return().
+refuse_connection(Sock, Exception, {A, B, C, D}) ->
+ ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end),
+ throw(Exception).
+
+-spec refuse_connection(rabbit_net:socket(), any()) -> no_return().
+
+refuse_connection(Sock, Exception) ->
+ refuse_connection(Sock, Exception, {0, 0, 9, 1}).
+
+ensure_stats_timer(State = #v1{connection_state = running}) ->
+ rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats);
+ensure_stats_timer(State) ->
+ State.
+
+%%--------------------------------------------------------------------------
+
+handle_method0(MethodName, FieldsBin,
+ State = #v1{connection = #connection{protocol = Protocol}}) ->
+ try
+ handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin),
+ State)
+ catch throw:{inet_error, E} when E =:= closed; E =:= enotconn ->
+ maybe_emit_stats(State),
+ throw({connection_closed_abruptly, State});
+ exit:#amqp_error{method = none} = Reason ->
+ handle_exception(State, 0, Reason#amqp_error{method = MethodName});
+ Type:Reason:Stacktrace ->
+ handle_exception(State, 0, {Type, Reason, MethodName, Stacktrace})
+ end.
+
+handle_method0(#'connection.start_ok'{mechanism = Mechanism,
+ response = Response,
+ client_properties = ClientProperties},
+ State0 = #v1{connection_state = starting,
+ connection = Connection0,
+ sock = Sock}) ->
+ AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
+ Capabilities =
+ case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of
+ {table, Capabilities1} -> Capabilities1;
+ _ -> []
+ end,
+ Connection1 = Connection0#connection{
+ client_properties = ClientProperties,
+ capabilities = Capabilities,
+ auth_mechanism = {Mechanism, AuthMechanism},
+ auth_state = AuthMechanism:init(Sock)},
+ Connection2 = augment_connection_log_name(Connection1),
+ State = State0#v1{connection_state = securing,
+ connection = Connection2},
+ % adding client properties to process dictionary to send them later
+ % in the connection_closed event
+ put(client_properties, ClientProperties),
+ case user_provided_connection_name(Connection2) of
+ undefined ->
+ undefined;
+ UserProvidedConnectionName ->
+ put(connection_user_provided_name, UserProvidedConnectionName)
+ end,
+ auth_phase(Response, State);
+
+handle_method0(#'connection.secure_ok'{response = Response},
+ State = #v1{connection_state = securing}) ->
+ auth_phase(Response, State);
+
+handle_method0(#'connection.tune_ok'{frame_max = FrameMax,
+ channel_max = ChannelMax,
+ heartbeat = ClientHeartbeat},
+ State = #v1{connection_state = tuning,
+ connection = Connection,
+ helper_sup = SupPid,
+ sock = Sock}) ->
+ ok = validate_negotiated_integer_value(
+ frame_max, ?FRAME_MIN_SIZE, FrameMax),
+ ok = validate_negotiated_integer_value(
+ channel_max, ?CHANNEL_MIN, ChannelMax),
+ {ok, Collector} = rabbit_connection_helper_sup:start_queue_collector(
+ SupPid, Connection#connection.name),
+ Frame = rabbit_binary_generator:build_heartbeat_frame(),
+ Parent = self(),
+ SendFun =
+ fun() ->
+ case catch rabbit_net:send(Sock, Frame) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ Parent ! {heartbeat_send_error, Reason};
+ Unexpected ->
+ Parent ! {heartbeat_send_error, Unexpected}
+ end,
+ ok
+ end,
+ ReceiveFun = fun() -> Parent ! heartbeat_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ SupPid, Sock, Connection#connection.name,
+ ClientHeartbeat, SendFun, ClientHeartbeat, ReceiveFun),
+ State#v1{connection_state = opening,
+ connection = Connection#connection{
+ frame_max = FrameMax,
+ channel_max = ChannelMax,
+ timeout_sec = ClientHeartbeat},
+ queue_collector = Collector,
+ heartbeater = Heartbeater};
+
+handle_method0(#'connection.open'{virtual_host = VHost},
+ State = #v1{connection_state = opening,
+ connection = Connection = #connection{
+ log_name = ConnName,
+ user = User = #user{username = Username},
+ protocol = Protocol},
+ helper_sup = SupPid,
+ sock = Sock,
+ throttle = Throttle}) ->
+
+ ok = is_over_vhost_connection_limit(VHost, User),
+ ok = is_over_user_connection_limit(User),
+ ok = rabbit_access_control:check_vhost_access(User, VHost, {socket, Sock}, #{}),
+ ok = is_vhost_alive(VHost, User),
+ NewConnection = Connection#connection{vhost = VHost},
+ ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol),
+
+ Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ BlockedBy = sets:from_list([{resource, Alarm} || Alarm <- Alarms]),
+ Throttle1 = Throttle#throttle{blocked_by = BlockedBy},
+
+ {ok, ChannelSupSupPid} =
+ rabbit_connection_helper_sup:start_channel_sup_sup(SupPid),
+ State1 = control_throttle(
+ State#v1{connection_state = running,
+ connection = NewConnection,
+ channel_sup_sup_pid = ChannelSupSupPid,
+ throttle = Throttle1}),
+ Infos = augment_infos_with_user_provided_connection_name(
+ [{type, network} | infos(?CREATION_EVENT_KEYS, State1)],
+ State1
+ ),
+ rabbit_core_metrics:connection_created(proplists:get_value(pid, Infos),
+ Infos),
+ rabbit_event:notify(connection_created, Infos),
+ maybe_emit_stats(State1),
+ rabbit_log_connection:info(
+ "connection ~p (~s): "
+ "user '~s' authenticated and granted access to vhost '~s'~n",
+ [self(), dynamic_connection_name(ConnName), Username, VHost]),
+ State1;
+handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
+ lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
+ maybe_close(State#v1{connection_state = closing});
+handle_method0(#'connection.close'{},
+ State = #v1{connection = #connection{protocol = Protocol},
+ sock = Sock})
+ when ?IS_STOPPING(State) ->
+ %% We're already closed or closing, so we don't need to cleanup
+ %% anything.
+ ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
+ State;
+handle_method0(#'connection.close_ok'{},
+ State = #v1{connection_state = closed}) ->
+ self() ! terminate_connection,
+ State;
+handle_method0(#'connection.update_secret'{new_secret = NewSecret, reason = Reason},
+ State = #v1{connection =
+ #connection{protocol = Protocol,
+ user = User = #user{username = Username},
+ log_name = ConnName} = Conn,
+ sock = Sock}) when ?IS_RUNNING(State) ->
+ rabbit_log_connection:debug(
+ "connection ~p (~s) of user '~s': "
+ "asked to update secret, reason: ~s~n",
+ [self(), dynamic_connection_name(ConnName), Username, Reason]),
+ case rabbit_access_control:update_state(User, NewSecret) of
+ {ok, User1} ->
+ %% User/auth backend state has been updated. Now we can propagate it to channels
+ %% asynchronously and return. All the channels have to do is to update their
+ %% own state.
+ %%
+ %% Any secret update errors coming from the authz backend will be handled in the other branch.
+ %% Therefore we optimistically do no error handling here. MK.
+ lists:foreach(fun(Ch) ->
+ rabbit_log:debug("Updating user/auth backend state for channel ~p", [Ch]),
+ _ = rabbit_channel:update_user_state(Ch, User1)
+ end, all_channels()),
+ ok = send_on_channel0(Sock, #'connection.update_secret_ok'{}, Protocol),
+ rabbit_log_connection:info(
+ "connection ~p (~s): "
+ "user '~s' updated secret, reason: ~s~n",
+ [self(), dynamic_connection_name(ConnName), Username, Reason]),
+ State#v1{connection = Conn#connection{user = User1}};
+ {refused, Message} ->
+ rabbit_log_connection:error("Secret update was refused for user '~p': ~p",
+ [Username, Message]),
+ rabbit_misc:protocol_error(not_allowed, "New secret was refused by one of the backends", []);
+ {error, Message} ->
+ rabbit_log_connection:error("Secret update for user '~p' failed: ~p",
+ [Username, Message]),
+ rabbit_misc:protocol_error(not_allowed,
+ "Secret update failed", [])
+ end;
+handle_method0(_Method, State) when ?IS_STOPPING(State) ->
+ State;
+handle_method0(_Method, #v1{connection_state = S}) ->
+ rabbit_misc:protocol_error(
+ channel_error, "unexpected method in connection state ~w", [S]).
+
+is_vhost_alive(VHostPath, User) ->
+ case rabbit_vhost_sup_sup:is_vhost_alive(VHostPath) of
+ true -> ok;
+ false ->
+ rabbit_misc:protocol_error(internal_error,
+ "access to vhost '~s' refused for user '~s': "
+ "vhost '~s' is down",
+ [VHostPath, User#user.username, VHostPath])
+ end.
+
+is_over_vhost_connection_limit(VHostPath, User) ->
+ try rabbit_vhost_limit:is_over_connection_limit(VHostPath) of
+ false -> ok;
+ {true, Limit} -> rabbit_misc:protocol_error(not_allowed,
+ "access to vhost '~s' refused for user '~s': "
+ "connection limit (~p) is reached",
+ [VHostPath, User#user.username, Limit])
+ catch
+ throw:{error, {no_such_vhost, VHostPath}} ->
+ rabbit_misc:protocol_error(not_allowed, "vhost ~s not found", [VHostPath])
+ end.
+
+is_over_user_connection_limit(#user{username = Username}) ->
+ case rabbit_auth_backend_internal:is_over_connection_limit(Username) of
+ false -> ok;
+ {true, Limit} -> rabbit_misc:protocol_error(not_allowed,
+ "Connection refused for user '~s': "
+ "user connection limit (~p) is reached",
+ [Username, Limit])
+ end.
+
+validate_negotiated_integer_value(Field, Min, ClientValue) ->
+ ServerValue = get_env(Field),
+ if ClientValue /= 0 andalso ClientValue < Min ->
+ fail_negotiation(Field, min, Min, ClientValue);
+ ServerValue /= 0 andalso (ClientValue =:= 0 orelse
+ ClientValue > ServerValue) ->
+ fail_negotiation(Field, max, ServerValue, ClientValue);
+ true ->
+ ok
+ end.
+
+%% keep dialyzer happy
+-spec fail_negotiation(atom(), 'min' | 'max', integer(), integer()) ->
+ no_return().
+fail_negotiation(Field, MinOrMax, ServerValue, ClientValue) ->
+ {S1, S2} = case MinOrMax of
+ min -> {lower, minimum};
+ max -> {higher, maximum}
+ end,
+ ClientValueDetail = get_client_value_detail(Field, ClientValue),
+ rabbit_misc:protocol_error(
+ not_allowed, "negotiated ~w = ~w~s is ~w than the ~w allowed value (~w)",
+ [Field, ClientValue, ClientValueDetail, S1, S2, ServerValue], 'connection.tune').
+
+get_env(Key) ->
+ {ok, Value} = application:get_env(rabbit, Key),
+ Value.
+
+send_on_channel0(Sock, Method, Protocol) ->
+ ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol).
+
+auth_mechanism_to_module(TypeBin, Sock) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ rabbit_misc:protocol_error(
+ command_invalid, "unknown authentication mechanism '~s'",
+ [TypeBin]);
+ T ->
+ case {lists:member(T, auth_mechanisms(Sock)),
+ rabbit_registry:lookup_module(auth_mechanism, T)} of
+ {true, {ok, Module}} ->
+ Module;
+ _ ->
+ rabbit_misc:protocol_error(
+ command_invalid,
+ "invalid authentication mechanism '~s'", [T])
+ end
+ end.
+
+auth_mechanisms(Sock) ->
+ {ok, Configured} = application:get_env(auth_mechanisms),
+ [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
+ Module:should_offer(Sock), lists:member(Name, Configured)].
+
+auth_mechanisms_binary(Sock) ->
+ list_to_binary(
+ string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")).
+
+auth_phase(Response,
+ State = #v1{connection = Connection =
+ #connection{protocol = Protocol,
+ auth_mechanism = {Name, AuthMechanism},
+ auth_state = AuthState},
+ sock = Sock}) ->
+ RemoteAddress = list_to_binary(inet:ntoa(Connection#connection.host)),
+ case AuthMechanism:handle_response(Response, AuthState) of
+ {refused, Username, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),
+ auth_fail(Username, Msg, Args, Name, State);
+ {protocol_error, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, <<>>, amqp091),
+ notify_auth_result(none, user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}],
+ State),
+ rabbit_misc:protocol_error(syntax_error, Msg, Args);
+ {challenge, Challenge, AuthState1} ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, <<>>, amqp091),
+ Secure = #'connection.secure'{challenge = Challenge},
+ ok = send_on_channel0(Sock, Secure, Protocol),
+ State#v1{connection = Connection#connection{
+ auth_state = AuthState1}};
+ {ok, User = #user{username = Username}} ->
+ case rabbit_access_control:check_user_loopback(Username, Sock) of
+ ok ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, Username, amqp091),
+ notify_auth_result(Username, user_authentication_success,
+ [], State);
+ not_allowed ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, amqp091),
+ auth_fail(Username, "user '~s' can only connect via "
+ "localhost", [Username], Name, State)
+ end,
+ Tune = #'connection.tune'{frame_max = get_env(frame_max),
+ channel_max = get_env(channel_max),
+ heartbeat = get_env(heartbeat)},
+ ok = send_on_channel0(Sock, Tune, Protocol),
+ State#v1{connection_state = tuning,
+ connection = Connection#connection{user = User,
+ auth_state = none}}
+ end.
+
+-spec auth_fail
+ (rabbit_types:username() | none, string(), [any()], binary(), #v1{}) ->
+ no_return().
+
+auth_fail(Username, Msg, Args, AuthName,
+ State = #v1{connection = #connection{protocol = Protocol,
+ capabilities = Capabilities}}) ->
+ notify_auth_result(Username, user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}], State),
+ AmqpError = rabbit_misc:amqp_error(
+ access_refused, "~s login refused: ~s",
+ [AuthName, io_lib:format(Msg, Args)], none),
+ case rabbit_misc:table_lookup(Capabilities,
+ <<"authentication_failure_close">>) of
+ {bool, true} ->
+ SafeMsg = io_lib:format(
+ "Login was refused using authentication "
+ "mechanism ~s. For details see the broker "
+ "logfile.", [AuthName]),
+ AmqpError1 = AmqpError#amqp_error{explanation = SafeMsg},
+ {0, CloseMethod} = rabbit_binary_generator:map_exception(
+ 0, AmqpError1, Protocol),
+ ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol);
+ _ -> ok
+ end,
+ rabbit_misc:protocol_error(AmqpError).
+
+notify_auth_result(Username, AuthResult, ExtraProps, State) ->
+ EventProps = [{connection_type, network},
+ {name, case Username of none -> ''; _ -> Username end}] ++
+ [case Item of
+ name -> {connection_name, i(name, State)};
+ _ -> {Item, i(Item, State)}
+ end || Item <- ?AUTH_NOTIFICATION_INFO_KEYS] ++
+ ExtraProps,
+ rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
+%%--------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(pid, #v1{}) -> self();
+i(node, #v1{}) -> node();
+i(SockStat, S) when SockStat =:= recv_oct;
+ SockStat =:= recv_cnt;
+ SockStat =:= send_oct;
+ SockStat =:= send_cnt;
+ SockStat =:= send_pend ->
+ socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end,
+ fun ([{_, I}]) -> I end, S);
+i(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock);
+i(ssl_protocol, S) -> ssl_info(fun ({P, _}) -> P end, S);
+i(ssl_key_exchange, S) -> ssl_info(fun ({_, {K, _, _}}) -> K end, S);
+i(ssl_cipher, S) -> ssl_info(fun ({_, {_, C, _}}) -> C end, S);
+i(ssl_hash, S) -> ssl_info(fun ({_, {_, _, H}}) -> H end, S);
+i(peer_cert_issuer, S) -> cert_info(fun rabbit_ssl:peer_cert_issuer/1, S);
+i(peer_cert_subject, S) -> cert_info(fun rabbit_ssl:peer_cert_subject/1, S);
+i(peer_cert_validity, S) -> cert_info(fun rabbit_ssl:peer_cert_validity/1, S);
+i(channels, #v1{channel_count = ChannelCount}) -> ChannelCount;
+i(state, #v1{connection_state = ConnectionState,
+ throttle = #throttle{blocked_by = Reasons,
+ last_blocked_at = T} = Throttle}) ->
+ %% not throttled by resource or other longer-term reasons
+ %% TODO: come up with a sensible function name
+ case sets:size(sets:del_element(flow, Reasons)) =:= 0 andalso
+ (credit_flow:blocked() %% throttled by flow now
+ orelse %% throttled by flow recently
+ (is_blocked_by_flow(Throttle) andalso T =/= never andalso
+ erlang:convert_time_unit(erlang:monotonic_time() - T,
+ native,
+ micro_seconds) < 5000000)) of
+ true -> flow;
+ false ->
+ case {has_reasons_to_block(Throttle), ConnectionState} of
+ %% blocked
+ {_, blocked} -> blocked;
+ %% not yet blocked (there were no publishes)
+ {true, running} -> blocking;
+ %% not blocked
+ {false, _} -> ConnectionState;
+ %% catch all to be defensive
+ _ -> ConnectionState
+ end
+ end;
+i(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(Item, #v1{connection = Conn}) -> ic(Item, Conn).
+
+ic(name, #connection{name = Name}) -> Name;
+ic(host, #connection{host = Host}) -> Host;
+ic(peer_host, #connection{peer_host = PeerHost}) -> PeerHost;
+ic(port, #connection{port = Port}) -> Port;
+ic(peer_port, #connection{peer_port = PeerPort}) -> PeerPort;
+ic(protocol, #connection{protocol = none}) -> none;
+ic(protocol, #connection{protocol = P}) -> P:version();
+ic(user, #connection{user = none}) -> '';
+ic(user, #connection{user = U}) -> U#user.username;
+ic(user_who_performed_action, C) -> ic(user, C);
+ic(vhost, #connection{vhost = VHost}) -> VHost;
+ic(timeout, #connection{timeout_sec = Timeout}) -> Timeout;
+ic(frame_max, #connection{frame_max = FrameMax}) -> FrameMax;
+ic(channel_max, #connection{channel_max = ChMax}) -> ChMax;
+ic(client_properties, #connection{client_properties = CP}) -> CP;
+ic(auth_mechanism, #connection{auth_mechanism = none}) -> none;
+ic(auth_mechanism, #connection{auth_mechanism = {Name, _Mod}}) -> Name;
+ic(connected_at, #connection{connected_at = T}) -> T;
+ic(Item, #connection{}) -> throw({bad_argument, Item}).
+
+socket_info(Get, Select, #v1{sock = Sock}) ->
+ case Get(Sock) of
+ {ok, T} -> case Select(T) of
+ N when is_number(N) -> N;
+ _ -> 0
+ end;
+ {error, _} -> 0
+ end.
+
+ssl_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:ssl_info(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, Items} ->
+ P = proplists:get_value(protocol, Items),
+ #{cipher := C,
+ key_exchange := K,
+ mac := H} = proplists:get_value(selected_cipher_suite, Items),
+ F({P, {K, C, H}})
+ end.
+
+cert_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:peercert(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, Cert} -> list_to_binary(F(Cert))
+ end.
+
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #v1.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State) ->
+ [{_, Pid}, {_, Recv_oct}, {_, Send_oct}, {_, Reductions}] = I
+ = infos(?SIMPLE_METRICS, State),
+ Infos = infos(?OTHER_METRICS, State),
+ rabbit_core_metrics:connection_stats(Pid, Infos),
+ rabbit_core_metrics:connection_stats(Pid, Recv_oct, Send_oct, Reductions),
+ rabbit_event:notify(connection_stats, Infos ++ I),
+ State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer),
+ ensure_stats_timer(State1).
+
+%% 1.0 stub
+-spec become_1_0(non_neg_integer(), #v1{}) -> no_return().
+
+become_1_0(Id, State = #v1{sock = Sock}) ->
+ case code:is_loaded(rabbit_amqp1_0_reader) of
+ false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled);
+ _ -> Mode = case Id of
+ 0 -> amqp;
+ 3 -> sasl;
+ _ -> refuse_connection(
+ Sock, {unsupported_amqp1_0_protocol_id, Id},
+ {3, 1, 0, 0})
+ end,
+ F = fun (_Deb, Buf, BufLen, S) ->
+ {rabbit_amqp1_0_reader, init,
+ [Mode, pack_for_1_0(Buf, BufLen, S)]}
+ end,
+ State#v1{connection_state = {become, F}}
+ end.
+
+pack_for_1_0(Buf, BufLen, #v1{parent = Parent,
+ sock = Sock,
+ recv_len = RecvLen,
+ pending_recv = PendingRecv,
+ helper_sup = SupPid,
+ proxy_socket = ProxySocket}) ->
+ {Parent, Sock, RecvLen, PendingRecv, SupPid, Buf, BufLen, ProxySocket}.
+
+respond_and_close(State, Channel, Protocol, Reason, LogErr) ->
+ log_hard_error(State, Channel, LogErr),
+ send_error_on_channel0_and_close(Channel, Protocol, Reason, State).
+
+send_error_on_channel0_and_close(Channel, Protocol, Reason, State) ->
+ {0, CloseMethod} =
+ rabbit_binary_generator:map_exception(Channel, Reason, Protocol),
+ State1 = close_connection(terminate_channels(State)),
+ ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol),
+ State1.
+
+%%
+%% Publisher throttling
+%%
+
+blocked_by_message(#throttle{blocked_by = Reasons}) ->
+ %% we don't want to report internal flow as a reason here since
+ %% it is entirely transient
+ Reasons1 = sets:del_element(flow, Reasons),
+ RStr = string:join([format_blocked_by(R) || R <- sets:to_list(Reasons1)], " & "),
+ list_to_binary(rabbit_misc:format("low on ~s", [RStr])).
+
+format_blocked_by({resource, memory}) -> "memory";
+format_blocked_by({resource, disk}) -> "disk";
+format_blocked_by({resource, disc}) -> "disk".
+
+update_last_blocked_at(Throttle) ->
+ Throttle#throttle{last_blocked_at = erlang:monotonic_time()}.
+
+connection_blocked_message_sent(
+ #throttle{connection_blocked_message_sent = BS}) -> BS.
+
+should_send_blocked(Throttle = #throttle{blocked_by = Reasons}) ->
+ should_block(Throttle)
+ andalso
+ sets:size(sets:del_element(flow, Reasons)) =/= 0
+ andalso
+ not connection_blocked_message_sent(Throttle).
+
+should_send_unblocked(Throttle = #throttle{blocked_by = Reasons}) ->
+ connection_blocked_message_sent(Throttle)
+ andalso
+ sets:size(sets:del_element(flow, Reasons)) == 0.
+
+%% Returns true if we have a reason to block
+%% this connection.
+has_reasons_to_block(#throttle{blocked_by = Reasons}) ->
+ sets:size(Reasons) > 0.
+
+is_blocked_by_flow(#throttle{blocked_by = Reasons}) ->
+ sets:is_element(flow, Reasons).
+
+should_block(#throttle{should_block = Val}) -> Val.
+
+should_block_connection(Throttle) ->
+ should_block(Throttle) andalso has_reasons_to_block(Throttle).
+
+should_unblock_connection(Throttle) ->
+ not should_block_connection(Throttle).
+
+maybe_block(State = #v1{connection_state = CS, throttle = Throttle}) ->
+ case should_block_connection(Throttle) of
+ true ->
+ State1 = State#v1{connection_state = blocked,
+ throttle = update_last_blocked_at(Throttle)},
+ case CS of
+ running ->
+ ok = rabbit_heartbeat:pause_monitor(State#v1.heartbeater);
+ _ -> ok
+ end,
+ maybe_send_blocked_or_unblocked(State1);
+ false -> State
+ end.
+
+maybe_unblock(State = #v1{throttle = Throttle}) ->
+ case should_unblock_connection(Throttle) of
+ true ->
+ ok = rabbit_heartbeat:resume_monitor(State#v1.heartbeater),
+ State1 = State#v1{connection_state = running,
+ throttle = Throttle#throttle{should_block = false}},
+ maybe_send_unblocked(State1);
+ false -> State
+ end.
+
+maybe_send_unblocked(State = #v1{throttle = Throttle}) ->
+ case should_send_unblocked(Throttle) of
+ true ->
+ ok = send_unblocked(State),
+ State#v1{throttle =
+ Throttle#throttle{connection_blocked_message_sent = false}};
+ false -> State
+ end.
+
+maybe_send_blocked_or_unblocked(State = #v1{throttle = Throttle}) ->
+ case should_send_blocked(Throttle) of
+ true ->
+ ok = send_blocked(State, blocked_by_message(Throttle)),
+ State#v1{throttle =
+ Throttle#throttle{connection_blocked_message_sent = true}};
+ false -> maybe_send_unblocked(State)
+ end.
+
+publish_received(State = #v1{throttle = Throttle}) ->
+ case has_reasons_to_block(Throttle) of
+ false -> State;
+ true ->
+ Throttle1 = Throttle#throttle{should_block = true},
+ maybe_block(State#v1{throttle = Throttle1})
+ end.
+
+control_throttle(State = #v1{connection_state = CS,
+ throttle = #throttle{blocked_by = Reasons} = Throttle}) ->
+ Throttle1 = case credit_flow:blocked() of
+ true ->
+ Throttle#throttle{blocked_by = sets:add_element(flow, Reasons)};
+ false ->
+ Throttle#throttle{blocked_by = sets:del_element(flow, Reasons)}
+ end,
+ State1 = State#v1{throttle = Throttle1},
+ case CS of
+ running -> maybe_block(State1);
+ %% unblock or re-enable blocking
+ blocked -> maybe_block(maybe_unblock(State1));
+ _ -> State1
+ end.
+
+augment_connection_log_name(#connection{name = Name} = Connection) ->
+ case user_provided_connection_name(Connection) of
+ undefined ->
+ Connection;
+ UserSpecifiedName ->
+ LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>,
+ rabbit_log_connection:info("Connection ~p (~s) has a client-provided name: ~s~n", [self(), Name, UserSpecifiedName]),
+ ?store_proc_name(LogName),
+ Connection#connection{log_name = LogName}
+ end.
+
+augment_infos_with_user_provided_connection_name(Infos, #v1{connection = Connection}) ->
+ case user_provided_connection_name(Connection) of
+ undefined ->
+ Infos;
+ UserProvidedConnectionName ->
+ [{user_provided_name, UserProvidedConnectionName} | Infos]
+ end.
+
+user_provided_connection_name(#connection{client_properties = ClientProperties}) ->
+ case rabbit_misc:table_lookup(ClientProperties, <<"connection_name">>) of
+ {longstr, UserSpecifiedName} ->
+ UserSpecifiedName;
+ _ ->
+ undefined
+ end.
+
+dynamic_connection_name(Default) ->
+ case rabbit_misc:get_proc_name() of
+ {ok, Name} ->
+ Name;
+ _ ->
+ Default
+ end.
+
+handle_uncontrolled_channel_close(ChPid) ->
+ rabbit_core_metrics:channel_closed(ChPid),
+ rabbit_event:notify(channel_closed, [{pid, ChPid}]).
+
+-spec get_client_value_detail(atom(), integer()) -> string().
+get_client_value_detail(channel_max, 0) ->
+ " (no limit)";
+get_client_value_detail(_Field, _ClientValue) ->
+ "".
diff --git a/deps/rabbit/src/rabbit_recovery_terms.erl b/deps/rabbit/src/rabbit_recovery_terms.erl
new file mode 100644
index 0000000000..d89de9ece3
--- /dev/null
+++ b/deps/rabbit/src/rabbit_recovery_terms.erl
@@ -0,0 +1,240 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% We use a gen_server simply so that during the terminate/2 call
+%% (i.e., during shutdown), we can sync/flush the dets table to disk.
+
+-module(rabbit_recovery_terms).
+
+-behaviour(gen_server).
+
+-export([start/1, stop/1, store/3, read/2, clear/1]).
+
+-export([start_link/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([upgrade_recovery_terms/0, persistent_bytes/0]).
+-export([open_global_table/0, close_global_table/0,
+ read_global/1, delete_global_table/0]).
+-export([open_table/1, close_table/1]).
+
+-rabbit_upgrade({upgrade_recovery_terms, local, []}).
+-rabbit_upgrade({persistent_bytes, local, [upgrade_recovery_terms]}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start(rabbit_types:vhost()) -> rabbit_types:ok_or_error(term()).
+
+start(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ {ok, _} = supervisor2:start_child(
+ VHostSup,
+ {?MODULE,
+ {?MODULE, start_link, [VHost]},
+ transient, ?WORKER_WAIT, worker,
+ [?MODULE]});
+ %% we can get here if a vhost is added and removed concurrently
+ %% e.g. some integration tests do it
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to start a recovery terms manager for vhost ~s: vhost no longer exists!",
+ [VHost])
+ end,
+ ok.
+
+-spec stop(rabbit_types:vhost()) -> rabbit_types:ok_or_error(term()).
+
+stop(VHost) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ case supervisor:terminate_child(VHostSup, ?MODULE) of
+ ok -> supervisor:delete_child(VHostSup, ?MODULE);
+ E -> E
+ end;
+ %% see start/1
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to stop a recovery terms manager for vhost ~s: vhost no longer exists!",
+ [VHost]),
+
+ ok
+ end.
+
+-spec store(rabbit_types:vhost(), file:filename(), term()) -> rabbit_types:ok_or_error(term()).
+
+store(VHost, DirBaseName, Terms) ->
+ dets:insert(VHost, {DirBaseName, Terms}).
+
+-spec read(rabbit_types:vhost(), file:filename()) -> rabbit_types:ok_or_error2(term(), not_found).
+
+read(VHost, DirBaseName) ->
+ case dets:lookup(VHost, DirBaseName) of
+ [{_, Terms}] -> {ok, Terms};
+ _ -> {error, not_found}
+ end.
+
+-spec clear(rabbit_types:vhost()) -> 'ok'.
+
+clear(VHost) ->
+ try
+ dets:delete_all_objects(VHost)
+ %% see start/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to clear recovery terms for vhost ~s: table no longer exists!",
+ [VHost]),
+ ok
+ end,
+ flush(VHost).
+
+start_link(VHost) ->
+ gen_server:start_link(?MODULE, [VHost], []).
+
+%%----------------------------------------------------------------------------
+
+upgrade_recovery_terms() ->
+ open_global_table(),
+ try
+ QueuesDir = filename:join(rabbit_mnesia:dir(), "queues"),
+ Dirs = case rabbit_file:list_dir(QueuesDir) of
+ {ok, Entries} -> Entries;
+ {error, _} -> []
+ end,
+ [begin
+ File = filename:join([QueuesDir, Dir, "clean.dot"]),
+ case rabbit_file:read_term_file(File) of
+ {ok, Terms} -> ok = store_global_table(Dir, Terms);
+ {error, _} -> ok
+ end,
+ file:delete(File)
+ end || Dir <- Dirs],
+ ok
+ after
+ close_global_table()
+ end.
+
+persistent_bytes() -> dets_upgrade(fun persistent_bytes/1).
+persistent_bytes(Props) -> Props ++ [{persistent_bytes, 0}].
+
+dets_upgrade(Fun)->
+ open_global_table(),
+ try
+ ok = dets:foldl(fun ({DirBaseName, Terms}, Acc) ->
+ store_global_table(DirBaseName, Fun(Terms)),
+ Acc
+ end, ok, ?MODULE),
+ ok
+ after
+ close_global_table()
+ end.
+
+open_global_table() ->
+ File = filename:join(rabbit_mnesia:dir(), "recovery.dets"),
+ {ok, _} = dets:open_file(?MODULE, [{file, File},
+ {ram_file, true},
+ {auto_save, infinity}]),
+ ok.
+
+close_global_table() ->
+ try
+ dets:sync(?MODULE),
+ dets:close(?MODULE)
+ %% see clear/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to clear global recovery terms: table no longer exists!",
+ []),
+ ok
+ end.
+
+store_global_table(DirBaseName, Terms) ->
+ dets:insert(?MODULE, {DirBaseName, Terms}).
+
+read_global(DirBaseName) ->
+ case dets:lookup(?MODULE, DirBaseName) of
+ [{_, Terms}] -> {ok, Terms};
+ _ -> {error, not_found}
+ end.
+
+delete_global_table() ->
+ file:delete(filename:join(rabbit_mnesia:dir(), "recovery.dets")).
+
+%%----------------------------------------------------------------------------
+
+init([VHost]) ->
+ process_flag(trap_exit, true),
+ open_table(VHost),
+ {ok, VHost}.
+
+handle_call(Msg, _, State) -> {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(_Info, State) -> {noreply, State}.
+
+terminate(_Reason, VHost) ->
+ close_table(VHost).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+-spec open_table(vhost:name()) -> rabbit_types:ok_or_error(any()).
+
+open_table(VHost) ->
+ open_table(VHost, 10).
+
+-spec open_table(vhost:name(), non_neg_integer()) -> rabbit_types:ok_or_error(any()).
+
+open_table(VHost, RetriesLeft) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ File = filename:join(VHostDir, "recovery.dets"),
+ Opts = [{file, File},
+ {ram_file, true},
+ {auto_save, infinity}],
+ case dets:open_file(VHost, Opts) of
+ {ok, _} -> ok;
+ {error, Error} ->
+ case RetriesLeft of
+ 0 ->
+ {error, Error};
+ N when is_integer(N) ->
+ _ = file:delete(File),
+ %% Wait before retrying
+ DelayInMs = 1000,
+ rabbit_log:warning("Failed to open a recovery terms DETS file at ~p. Will delete it and retry in ~p ms (~p retries left)",
+ [File, DelayInMs, RetriesLeft]),
+ timer:sleep(DelayInMs),
+ open_table(VHost, RetriesLeft - 1)
+ end
+ end.
+
+-spec flush(vhost:name()) -> rabbit_types:ok_or_error(any()).
+
+flush(VHost) ->
+ try
+ dets:sync(VHost)
+ %% see clear/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to sync recovery terms table for vhost ~s: the table no longer exists!",
+ [VHost]),
+ ok
+ end.
+
+-spec close_table(vhost:name()) -> rabbit_types:ok_or_error(any()).
+
+close_table(VHost) ->
+ try
+ ok = flush(VHost),
+ ok = dets:close(VHost)
+ %% see clear/1
+ catch _:badarg ->
+ rabbit_log:error("Failed to close recovery terms table for vhost ~s: the table no longer exists!",
+ [VHost]),
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_restartable_sup.erl b/deps/rabbit/src/rabbit_restartable_sup.erl
new file mode 100644
index 0000000000..46fcace99f
--- /dev/null
+++ b/deps/rabbit/src/rabbit_restartable_sup.erl
@@ -0,0 +1,33 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_restartable_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/3]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(DELAY, 2).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(atom(), rabbit_types:mfargs(), boolean()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Name, {_M, _F, _A} = Fun, Delay) ->
+ supervisor2:start_link({local, Name}, ?MODULE, [Fun, Delay]).
+
+init([{Mod, _F, _A} = Fun, Delay]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{Mod, Fun, case Delay of
+ true -> {transient, 1};
+ false -> transient
+ end, ?WORKER_WAIT, worker, [Mod]}]}}.
diff --git a/deps/rabbit/src/rabbit_router.erl b/deps/rabbit/src/rabbit_router.erl
new file mode 100644
index 0000000000..ed170bcd8e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_router.erl
@@ -0,0 +1,65 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_router).
+-include_lib("stdlib/include/qlc.hrl").
+-include("rabbit.hrl").
+
+-export([match_bindings/2, match_routing_key/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([routing_key/0, match_result/0]).
+
+-type routing_key() :: binary().
+-type match_result() :: [rabbit_types:binding_destination()].
+
+-spec match_bindings(rabbit_types:binding_source(),
+ fun ((rabbit_types:binding()) -> boolean())) ->
+ match_result().
+-spec match_routing_key(rabbit_types:binding_source(),
+ [routing_key()] | ['_']) ->
+ match_result().
+
+%%----------------------------------------------------------------------------
+
+match_bindings(SrcName, Match) ->
+ MatchHead = #route{binding = #binding{source = SrcName,
+ _ = '_'}},
+ Routes = ets:select(rabbit_route, [{MatchHead, [], [['$_']]}]),
+ [Dest || [#route{binding = Binding = #binding{destination = Dest}}] <-
+ Routes, Match(Binding)].
+
+match_routing_key(SrcName, [RoutingKey]) ->
+ find_routes(#route{binding = #binding{source = SrcName,
+ destination = '$1',
+ key = RoutingKey,
+ _ = '_'}},
+ []);
+match_routing_key(SrcName, [_|_] = RoutingKeys) ->
+ find_routes(#route{binding = #binding{source = SrcName,
+ destination = '$1',
+ key = '$2',
+ _ = '_'}},
+ [list_to_tuple(['orelse' | [{'=:=', '$2', RKey} ||
+ RKey <- RoutingKeys]])]).
+
+%%--------------------------------------------------------------------
+
+%% Normally we'd call mnesia:dirty_select/2 here, but that is quite
+%% expensive for the same reasons as above, and, additionally, due to
+%% mnesia 'fixing' the table with ets:safe_fixtable/2, which is wholly
+%% unnecessary. According to the ets docs (and the code in erl_db.c),
+%% 'select' is safe anyway ("Functions that internally traverse over a
+%% table, like select and match, will give the same guarantee as
+%% safe_fixtable.") and, furthermore, even the lower level iterators
+%% ('first' and 'next') are safe on ordered_set tables ("Note that for
+%% tables of the ordered_set type, safe_fixtable/2 is not necessary as
+%% calls to first/1 and next/2 will always succeed."), which
+%% rabbit_route is.
+find_routes(MatchHead, Conditions) ->
+ ets:select(rabbit_route, [{MatchHead, Conditions, ['$1']}]).
diff --git a/deps/rabbit/src/rabbit_runtime_parameters.erl b/deps/rabbit/src/rabbit_runtime_parameters.erl
new file mode 100644
index 0000000000..1870b5dfa5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_runtime_parameters.erl
@@ -0,0 +1,412 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameters).
+
+%% Runtime parameters are bits of configuration that are
+%% set, as the name implies, at runtime and not in the config file.
+%%
+%% The benefits of storing some bits of configuration at runtime vary:
+%%
+%% * Some parameters are vhost-specific
+%% * Others are specific to individual nodes
+%% * ...or even queues, exchanges, etc
+%%
+%% The most obvious use case for runtime parameters is policies but
+%% there are others:
+%%
+%% * Plugin-specific parameters that only make sense at runtime,
+%% e.g. Federation and Shovel link settings
+%% * Exchange and queue decorators
+%%
+%% Parameters are grouped by components, e.g. <<"policy">> or <<"shovel">>.
+%% Components are mapped to modules that perform validation.
+%% Runtime parameter values are then looked up by the modules that
+%% need to use them.
+%%
+%% Parameters are stored in Mnesia and can be global. Their changes
+%% are broadcasted over rabbit_event.
+%%
+%% Global parameters keys are atoms and values are JSON documents.
+%%
+%% See also:
+%%
+%% * rabbit_policies
+%% * rabbit_policy
+%% * rabbit_registry
+%% * rabbit_event
+
+-include("rabbit.hrl").
+
+-export([parse_set/5, set/5, set_any/5, clear/4, clear_any/4, list/0, list/1,
+ list_component/1, list/2, list_formatted/1, list_formatted/3,
+ lookup/3, value/3, value/4, info_keys/0, clear_component/2]).
+
+-export([parse_set_global/3, set_global/3, value_global/1, value_global/2,
+ list_global/0, list_global_formatted/0, list_global_formatted/2,
+ lookup_global/1, global_info_keys/0, clear_global/2]).
+
+%%----------------------------------------------------------------------------
+
+-type ok_or_error_string() :: 'ok' | {'error_string', string()}.
+-type ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok').
+
+-spec parse_set(rabbit_types:vhost(), binary(), binary(), string(),
+ rabbit_types:user() | rabbit_types:username() | 'none')
+ -> ok_or_error_string().
+-spec set(rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:user() | rabbit_types:username() | 'none')
+ -> ok_or_error_string().
+-spec set_any(rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:user() | rabbit_types:username() | 'none')
+ -> ok_or_error_string().
+-spec set_global(atom(), term(), rabbit_types:username()) -> 'ok'.
+-spec clear(rabbit_types:vhost(), binary(), binary(), rabbit_types:username())
+ -> ok_thunk_or_error_string().
+-spec clear_any(rabbit_types:vhost(), binary(), binary(), rabbit_types:username())
+ -> ok_thunk_or_error_string().
+-spec list() -> [rabbit_types:infos()].
+-spec list(rabbit_types:vhost() | '_') -> [rabbit_types:infos()].
+-spec list_component(binary()) -> [rabbit_types:infos()].
+-spec list(rabbit_types:vhost() | '_', binary() | '_')
+ -> [rabbit_types:infos()].
+-spec list_formatted(rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec list_formatted(rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+-spec lookup(rabbit_types:vhost(), binary(), binary())
+ -> rabbit_types:infos() | 'not_found'.
+-spec value(rabbit_types:vhost(), binary(), binary()) -> term().
+-spec value(rabbit_types:vhost(), binary(), binary(), term()) -> term().
+-spec value_global(atom()) -> term() | 'not_found'.
+-spec value_global(atom(), term()) -> term().
+-spec info_keys() -> rabbit_types:info_keys().
+
+%%---------------------------------------------------------------------------
+
+-import(rabbit_misc, [pget/2]).
+
+-define(TABLE, rabbit_runtime_parameters).
+
+%%---------------------------------------------------------------------------
+
+parse_set(_, <<"policy">>, _, _, _) ->
+ {error_string, "policies may not be set using this method"};
+parse_set(VHost, Component, Name, String, User) ->
+ Definition = rabbit_data_coercion:to_binary(String),
+ case rabbit_json:try_decode(Definition) of
+ {ok, Term} when is_map(Term) -> set(VHost, Component, Name, maps:to_list(Term), User);
+ {ok, Term} -> set(VHost, Component, Name, Term, User);
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set(_, <<"policy">>, _, _, _) ->
+ {error_string, "policies may not be set using this method"};
+set(VHost, Component, Name, Term, User) ->
+ set_any(VHost, Component, Name, Term, User).
+
+parse_set_global(Name, String, ActingUser) ->
+ Definition = rabbit_data_coercion:to_binary(String),
+ case rabbit_json:try_decode(Definition) of
+ {ok, Term} when is_map(Term) -> set_global(Name, maps:to_list(Term), ActingUser);
+ {ok, Term} -> set_global(Name, Term, ActingUser);
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set_global(Name, Term, ActingUser) ->
+ NameAsAtom = rabbit_data_coercion:to_atom(Name),
+ rabbit_log:debug("Setting global parameter '~s' to ~p", [NameAsAtom, Term]),
+ mnesia_update(NameAsAtom, Term),
+ event_notify(parameter_set, none, global, [{name, NameAsAtom},
+ {value, Term},
+ {user_who_performed_action, ActingUser}]),
+ ok.
+
+format_error(L) ->
+ {error_string, rabbit_misc:format_many([{"Validation failed~n", []} | L])}.
+
+set_any(VHost, Component, Name, Term, User) ->
+ case set_any0(VHost, Component, Name, Term, User) of
+ ok -> ok;
+ {errors, L} -> format_error(L)
+ end.
+
+set_any0(VHost, Component, Name, Term, User) ->
+ rabbit_log:debug("Asked to set or update runtime parameter '~s' in vhost '~s' "
+ "for component '~s', value: ~p",
+ [Name, VHost, Component, Term]),
+ case lookup_component(Component) of
+ {ok, Mod} ->
+ case flatten_errors(
+ Mod:validate(VHost, Component, Name, Term, get_user(User))) of
+ ok ->
+ case mnesia_update(VHost, Component, Name, Term) of
+ {old, Term} ->
+ ok;
+ _ ->
+ ActingUser = get_username(User),
+ event_notify(
+ parameter_set, VHost, Component,
+ [{name, Name},
+ {value, Term},
+ {user_who_performed_action, ActingUser}]),
+ Mod:notify(VHost, Component, Name, Term, ActingUser)
+ end,
+ ok;
+ E ->
+ E
+ end;
+ E ->
+ E
+ end.
+
+%% Validate only an user record as expected by the API before #rabbitmq-event-exchange-10
+get_user(#user{} = User) ->
+ User;
+get_user(_) ->
+ none.
+
+get_username(#user{username = Username}) ->
+ Username;
+get_username(none) ->
+ ?INTERNAL_USER;
+get_username(Any) ->
+ Any.
+
+mnesia_update(Key, Term) ->
+ rabbit_misc:execute_mnesia_transaction(mnesia_update_fun(Key, Term)).
+
+mnesia_update(VHost, Comp, Name, Term) ->
+ rabbit_misc:execute_mnesia_transaction(
+ rabbit_vhost:with(VHost, mnesia_update_fun({VHost, Comp, Name}, Term))).
+
+mnesia_update_fun(Key, Term) ->
+ fun () ->
+ Res = case mnesia:read(?TABLE, Key, read) of
+ [] -> new;
+ [Params] -> {old, Params#runtime_parameters.value}
+ end,
+ ok = mnesia:write(?TABLE, c(Key, Term), write),
+ Res
+ end.
+
+clear(_, <<"policy">> , _, _) ->
+ {error_string, "policies may not be cleared using this method"};
+clear(VHost, Component, Name, ActingUser) ->
+ clear_any(VHost, Component, Name, ActingUser).
+
+clear_global(Key, ActingUser) ->
+ KeyAsAtom = rabbit_data_coercion:to_atom(Key),
+ Notify = fun() ->
+ event_notify(parameter_set, none, global,
+ [{name, KeyAsAtom},
+ {user_who_performed_action, ActingUser}]),
+ ok
+ end,
+ case value_global(KeyAsAtom) of
+ not_found ->
+ {error_string, "Parameter does not exist"};
+ _ ->
+ F = fun () ->
+ ok = mnesia:delete(?TABLE, KeyAsAtom, write)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(F),
+ case mnesia:is_transaction() of
+ true -> Notify;
+ false -> Notify()
+ end
+ end.
+
+clear_component(Component, ActingUser) ->
+ case list_component(Component) of
+ [] ->
+ ok;
+ Xs ->
+ [clear(pget(vhost, X),
+ pget(component, X),
+ pget(name, X),
+ ActingUser) || X <- Xs],
+ ok
+ end.
+
+clear_any(VHost, Component, Name, ActingUser) ->
+ Notify = fun () ->
+ case lookup_component(Component) of
+ {ok, Mod} -> event_notify(
+ parameter_cleared, VHost, Component,
+ [{name, Name},
+ {user_who_performed_action, ActingUser}]),
+ Mod:notify_clear(VHost, Component, Name, ActingUser);
+ _ -> ok
+ end
+ end,
+ case lookup(VHost, Component, Name) of
+ not_found -> {error_string, "Parameter does not exist"};
+ _ -> mnesia_clear(VHost, Component, Name),
+ case mnesia:is_transaction() of
+ true -> Notify;
+ false -> Notify()
+ end
+ end.
+
+mnesia_clear(VHost, Component, Name) ->
+ F = fun () ->
+ ok = mnesia:delete(?TABLE, {VHost, Component, Name}, write)
+ end,
+ ok = rabbit_misc:execute_mnesia_transaction(rabbit_vhost:with(VHost, F)).
+
+event_notify(_Event, _VHost, <<"policy">>, _Props) ->
+ ok;
+event_notify(Event, none, Component, Props) ->
+ rabbit_event:notify(Event, [{component, Component} | Props]);
+event_notify(Event, VHost, Component, Props) ->
+ rabbit_event:notify(Event, [{vhost, VHost},
+ {component, Component} | Props]).
+
+list() ->
+ [p(P) || #runtime_parameters{ key = {_VHost, Comp, _Name}} = P <-
+ rabbit_misc:dirty_read_all(?TABLE), Comp /= <<"policy">>].
+
+list(VHost) -> list(VHost, '_').
+list_component(Component) -> list('_', Component).
+
+%% Not dirty_match_object since that would not be transactional when used in a
+%% tx context
+list(VHost, Component) ->
+ mnesia:async_dirty(
+ fun () ->
+ case VHost of
+ '_' -> ok;
+ _ -> rabbit_vhost:assert(VHost)
+ end,
+ Match = #runtime_parameters{key = {VHost, Component, '_'},
+ _ = '_'},
+ [p(P) || #runtime_parameters{key = {_VHost, Comp, _Name}} = P <-
+ mnesia:match_object(?TABLE, Match, read),
+ Comp =/= <<"policy">> orelse Component =:= <<"policy">>]
+ end).
+
+list_global() ->
+ %% list only atom keys
+ mnesia:async_dirty(
+ fun () ->
+ Match = #runtime_parameters{key = '_', _ = '_'},
+ [p(P) || P <- mnesia:match_object(?TABLE, Match, read),
+ is_atom(P#runtime_parameters.key)]
+ end).
+
+list_formatted(VHost) ->
+ [ format_parameter(info_keys(), P) || P <- list(VHost) ].
+
+format_parameter(InfoKeys, P) ->
+ lists:foldr(fun
+ (value, Acc) ->
+ [{value, rabbit_json:encode(pget(value, P))} | Acc];
+ (Key, Acc) ->
+ case lists:keyfind(Key, 1, P) of
+ false -> Acc;
+ {Key, Val} -> [{Key, Val} | Acc]
+ end
+ end,
+ [], InfoKeys).
+
+list_formatted(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(P) -> format_parameter(info_keys(), P) end, list(VHost)).
+
+list_global_formatted() ->
+ [ format_parameter(global_info_keys(), P) || P <- list_global() ].
+
+list_global_formatted(Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(P) -> format_parameter(global_info_keys(), P) end, list_global()).
+
+lookup(VHost, Component, Name) ->
+ case lookup0({VHost, Component, Name}, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> p(Params)
+ end.
+
+lookup_global(Name) ->
+ case lookup0(Name, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> p(Params)
+ end.
+
+value(VHost, Comp, Name) -> value0({VHost, Comp, Name}).
+value(VHost, Comp, Name, Def) -> value0({VHost, Comp, Name}, Def).
+
+value_global(Key) ->
+ value0(Key).
+
+value_global(Key, Default) ->
+ value0(Key, Default).
+
+value0(Key) ->
+ case lookup0(Key, rabbit_misc:const(not_found)) of
+ not_found -> not_found;
+ Params -> Params#runtime_parameters.value
+ end.
+
+value0(Key, Default) ->
+ Params = lookup0(Key, fun () -> lookup_missing(Key, Default) end),
+ Params#runtime_parameters.value.
+
+lookup0(Key, DefaultFun) ->
+ case mnesia:dirty_read(?TABLE, Key) of
+ [] -> DefaultFun();
+ [R] -> R
+ end.
+
+lookup_missing(Key, Default) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read(?TABLE, Key, read) of
+ [] -> Record = c(Key, Default),
+ mnesia:write(?TABLE, Record, write),
+ Record;
+ [R] -> R
+ end
+ end).
+
+c(Key, Default) ->
+ #runtime_parameters{key = Key,
+ value = Default}.
+
+p(#runtime_parameters{key = {VHost, Component, Name}, value = Value}) ->
+ [{vhost, VHost},
+ {component, Component},
+ {name, Name},
+ {value, Value}];
+
+p(#runtime_parameters{key = Key, value = Value}) when is_atom(Key) ->
+ [{name, Key},
+ {value, Value}].
+
+info_keys() -> [component, name, value].
+
+global_info_keys() -> [name, value].
+
+%%---------------------------------------------------------------------------
+
+lookup_component(Component) ->
+ case rabbit_registry:lookup_module(
+ runtime_parameter, list_to_atom(binary_to_list(Component))) of
+ {error, not_found} -> {errors,
+ [{"component ~s not found", [Component]}]};
+ {ok, Module} -> {ok, Module}
+ end.
+
+flatten_errors(L) ->
+ case [{F, A} || I <- lists:flatten([L]), {error, F, A} <- [I]] of
+ [] -> ok;
+ E -> {errors, E}
+ end.
diff --git a/deps/rabbit/src/rabbit_ssl.erl b/deps/rabbit/src/rabbit_ssl.erl
new file mode 100644
index 0000000000..84670b0a19
--- /dev/null
+++ b/deps/rabbit/src/rabbit_ssl.erl
@@ -0,0 +1,195 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ssl).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
+-export([peer_cert_subject_items/2, peer_cert_auth_name/1]).
+-export([cipher_suites_erlang/2, cipher_suites_erlang/1,
+ cipher_suites_openssl/2, cipher_suites_openssl/1,
+ cipher_suites/1]).
+
+%%--------------------------------------------------------------------------
+
+-export_type([certificate/0]).
+
+% Due to API differences between OTP releases.
+-dialyzer(no_missing_calls).
+-ignore_xref([{ssl_cipher_format, suite_legacy, 1},
+ {ssl_cipher_format, suite, 1},
+ {ssl_cipher_format, suite_to_str, 1},
+ {ssl_cipher_format, erl_suite_definition, 1},
+ {ssl_cipher_format, suite_map_to_openssl_str, 1},
+ {ssl_cipher_format, suite_map_to_bin, 1}]).
+
+-type certificate() :: rabbit_cert_info:certificate().
+
+-type cipher_suites_mode() :: default | all | anonymous.
+
+-spec cipher_suites(cipher_suites_mode()) -> ssl:ciphers().
+cipher_suites(Mode) ->
+ Version = get_highest_protocol_version(),
+ ssl:cipher_suites(Mode, Version).
+
+-spec cipher_suites_erlang(cipher_suites_mode()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_erlang(Mode) ->
+ Version = get_highest_protocol_version(),
+ cipher_suites_erlang(Mode, Version).
+
+-spec cipher_suites_erlang(cipher_suites_mode(),
+ ssl:protocol_version() | tls_record:tls_version()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_erlang(Mode, Version) ->
+ [ format_cipher_erlang(C)
+ || C <- ssl:cipher_suites(Mode, Version) ].
+
+-spec cipher_suites_openssl(cipher_suites_mode()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_openssl(Mode) ->
+ Version = get_highest_protocol_version(),
+ cipher_suites_openssl(Mode, Version).
+
+-spec cipher_suites_openssl(cipher_suites_mode(),
+ ssl:protocol_version() | tls_record:tls_version()) ->
+ [ssl:old_cipher_suite()].
+cipher_suites_openssl(Mode, Version) ->
+ lists:filtermap(fun(C) ->
+ OpenSSL = format_cipher_openssl(C),
+ case is_list(OpenSSL) of
+ true -> {true, OpenSSL};
+ false -> false
+ end
+ end,
+ ssl:cipher_suites(Mode, Version)).
+
+
+format_cipher_erlang(Cipher) ->
+ case erlang:function_exported(ssl_cipher_format, suite_map_to_bin, 1) of
+ true ->
+ format_cipher_erlang22(Cipher);
+ false ->
+ format_cipher_erlang21(Cipher)
+ end.
+
+format_cipher_erlang22(Cipher) ->
+ ssl_cipher_format:suite_legacy(ssl_cipher_format:suite_map_to_bin(Cipher)).
+
+format_cipher_erlang21(Cipher) ->
+ ssl_cipher_format:erl_suite_definition(ssl_cipher_format:suite(Cipher)).
+
+
+format_cipher_openssl(Cipher) ->
+ case erlang:function_exported(ssl_cipher_format, suite_map_to_bin, 1) of
+ true ->
+ format_cipher_openssl22(Cipher);
+ false ->
+ format_cipher_openssl21(Cipher)
+ end.
+
+format_cipher_openssl22(Cipher) ->
+ ssl_cipher_format:suite_map_to_openssl_str(Cipher).
+
+format_cipher_openssl21(Cipher) ->
+ ssl_cipher_format:suite_to_str(Cipher).
+
+-spec get_highest_protocol_version() -> tls_record:tls_atom_version().
+get_highest_protocol_version() ->
+ tls_record:protocol_version(
+ tls_record:highest_protocol_version([])).
+
+%%--------------------------------------------------------------------------
+%% High-level functions used by reader
+%%--------------------------------------------------------------------------
+
+%% Return a string describing the certificate's issuer.
+peer_cert_issuer(Cert) ->
+ rabbit_cert_info:issuer(Cert).
+
+%% Return a string describing the certificate's subject, as per RFC4514.
+peer_cert_subject(Cert) ->
+ rabbit_cert_info:subject(Cert).
+
+%% Return the parts of the certificate's subject.
+peer_cert_subject_items(Cert, Type) ->
+ rabbit_cert_info:subject_items(Cert, Type).
+
+%% Filters certificate SAN extensions by (OTP) SAN type name.
+peer_cert_subject_alternative_names(Cert, Type) ->
+ SANs = rabbit_cert_info:subject_alternative_names(Cert),
+ lists:filter(fun({Key, _}) -> Key =:= Type end, SANs).
+
+%% Return a string describing the certificate's validity.
+peer_cert_validity(Cert) ->
+ rabbit_cert_info:validity(Cert).
+
+%% Extract a username from the certificate
+-spec peer_cert_auth_name
+ (certificate()) -> binary() | 'not_found' | 'unsafe'.
+
+peer_cert_auth_name(Cert) ->
+ {ok, Mode} = application:get_env(rabbit, ssl_cert_login_from),
+ peer_cert_auth_name(Mode, Cert).
+
+peer_cert_auth_name(distinguished_name, Cert) ->
+ case auth_config_sane() of
+ true -> iolist_to_binary(peer_cert_subject(Cert));
+ false -> unsafe
+ end;
+
+peer_cert_auth_name(subject_alt_name, Cert) ->
+ peer_cert_auth_name(subject_alternative_name, Cert);
+
+peer_cert_auth_name(subject_alternative_name, Cert) ->
+ case auth_config_sane() of
+ true ->
+ Type = application:get_env(rabbit, ssl_cert_login_san_type, dns),
+ %% lists:nth/2 is 1-based
+ Index = application:get_env(rabbit, ssl_cert_login_san_index, 0) + 1,
+ OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)),
+ rabbit_log:debug("Peer certificate SANs of type ~s: ~p, index to use with lists:nth/2: ~b", [Type, OfType, Index]),
+ case length(OfType) of
+ 0 -> not_found;
+ N when N < Index -> not_found;
+ N when N >= Index ->
+ {_, Value} = lists:nth(Index, OfType),
+ rabbit_data_coercion:to_binary(Value)
+ end;
+ false -> unsafe
+ end;
+
+peer_cert_auth_name(common_name, Cert) ->
+ %% If there is more than one CN then we join them with "," in a
+ %% vaguely DN-like way. But this is more just so we do something
+ %% more intelligent than crashing, if you actually want to escape
+ %% things properly etc, use DN mode.
+ case auth_config_sane() of
+ true -> case peer_cert_subject_items(Cert, ?'id-at-commonName') of
+ not_found -> not_found;
+ CNs -> list_to_binary(string:join(CNs, ","))
+ end;
+ false -> unsafe
+ end.
+
+auth_config_sane() ->
+ {ok, Opts} = application:get_env(rabbit, ssl_options),
+ case proplists:get_value(verify, Opts) of
+ verify_peer -> true;
+ V -> rabbit_log:warning("TLS peer verification (authentication) is "
+ "disabled, ssl_options.verify value used: ~p. "
+ "See https://www.rabbitmq.com/ssl.html#peer-verification to learn more.", [V]),
+ false
+ end.
+
+otp_san_type(dns) -> dNSName;
+otp_san_type(ip) -> iPAddress;
+otp_san_type(email) -> rfc822Name;
+otp_san_type(uri) -> uniformResourceIdentifier;
+otp_san_type(other_name) -> otherName;
+otp_san_type(Other) -> Other.
diff --git a/deps/rabbit/src/rabbit_stream_coordinator.erl b/deps/rabbit/src/rabbit_stream_coordinator.erl
new file mode 100644
index 0000000000..9e4890c894
--- /dev/null
+++ b/deps/rabbit/src/rabbit_stream_coordinator.erl
@@ -0,0 +1,949 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_stream_coordinator).
+
+-behaviour(ra_machine).
+
+-export([start/0]).
+-export([format_ra_event/2]).
+
+-export([init/1,
+ apply/3,
+ state_enter/2,
+ init_aux/1,
+ handle_aux/6,
+ tick/2]).
+
+-export([recover/0,
+ start_cluster/1,
+ delete_cluster/2,
+ add_replica/2,
+ delete_replica/2]).
+
+-export([policy_changed/1]).
+
+-export([phase_repair_mnesia/2,
+ phase_start_cluster/1,
+ phase_delete_cluster/2,
+ phase_check_quorum/1,
+ phase_start_new_leader/1,
+ phase_stop_replicas/1,
+ phase_start_replica/3,
+ phase_delete_replica/2]).
+
+-export([log_overview/1]).
+
+-define(STREAM_COORDINATOR_STARTUP, {stream_coordinator_startup, self()}).
+-define(TICK_TIMEOUT, 60000).
+-define(RESTART_TIMEOUT, 1000).
+-define(PHASE_RETRY_TIMEOUT, 10000).
+-define(CMD_TIMEOUT, 30000).
+
+-record(?MODULE, {streams, monitors}).
+
+start() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ ServerId = {?MODULE, node()},
+ case ra:restart_server(ServerId) of
+ {error, Reason} when Reason == not_started orelse
+ Reason == name_not_registered ->
+ case ra:start_server(make_ra_conf(node(), Nodes)) of
+ ok ->
+ global:set_lock(?STREAM_COORDINATOR_STARTUP),
+ case find_members(Nodes) of
+ [] ->
+ %% We're the first (and maybe only) one
+ ra:trigger_election(ServerId);
+ Members ->
+ %% What to do if we get a timeout?
+ {ok, _, _} = ra:add_member(Members, ServerId, 30000)
+ end,
+ global:del_lock(?STREAM_COORDINATOR_STARTUP),
+ _ = ra:members(ServerId),
+ ok;
+ Error ->
+ exit(Error)
+ end;
+ ok ->
+ ok;
+ Error ->
+ exit(Error)
+ end.
+
+find_members([]) ->
+ [];
+find_members([Node | Nodes]) ->
+ case ra:members({?MODULE, Node}) of
+ {_, Members, _} ->
+ Members;
+ {error, noproc} ->
+ find_members(Nodes);
+ {timeout, _} ->
+ %% not sure what to do here
+ find_members(Nodes)
+ end.
+
+recover() ->
+ ra:restart_server({?MODULE, node()}).
+
+start_cluster(Q) ->
+ process_command({start_cluster, #{queue => Q}}).
+
+delete_cluster(StreamId, ActingUser) ->
+ process_command({delete_cluster, #{stream_id => StreamId, acting_user => ActingUser}}).
+
+add_replica(StreamId, Node) ->
+ process_command({start_replica, #{stream_id => StreamId, node => Node,
+ retries => 1}}).
+
+policy_changed(StreamId) ->
+ process_command({policy_changed, #{stream_id => StreamId}}).
+
+delete_replica(StreamId, Node) ->
+ process_command({delete_replica, #{stream_id => StreamId, node => Node}}).
+
+process_command(Cmd) ->
+ global:set_lock(?STREAM_COORDINATOR_STARTUP),
+ Servers = ensure_coordinator_started(),
+ global:del_lock(?STREAM_COORDINATOR_STARTUP),
+ process_command(Servers, Cmd).
+
+process_command([], _Cmd) ->
+ {error, coordinator_unavailable};
+process_command([Server | Servers], {CmdName, _} = Cmd) ->
+ case ra:process_command(Server, Cmd, ?CMD_TIMEOUT) of
+ {timeout, _} ->
+ rabbit_log:warning("Coordinator timeout on server ~p when processing command ~p",
+ [Server, CmdName]),
+ process_command(Servers, Cmd);
+ {error, noproc} ->
+ process_command(Servers, Cmd);
+ Reply ->
+ Reply
+ end.
+
+ensure_coordinator_started() ->
+ Local = {?MODULE, node()},
+ AllNodes = all_nodes(),
+ case ra:restart_server(Local) of
+ {error, Reason} when Reason == not_started orelse
+ Reason == name_not_registered ->
+ OtherNodes = all_nodes() -- [Local],
+ %% We can't use find_members/0 here as a process that timeouts means the cluster is up
+ case lists:filter(fun(N) -> global:whereis_name(N) =/= undefined end, OtherNodes) of
+ [] ->
+ start_coordinator_cluster();
+ _ ->
+ OtherNodes
+ end;
+ ok ->
+ AllNodes;
+ {error, {already_started, _}} ->
+ AllNodes;
+ _ ->
+ AllNodes
+ end.
+
+start_coordinator_cluster() ->
+ Nodes = rabbit_mnesia:cluster_nodes(running),
+ case ra:start_cluster([make_ra_conf(Node, Nodes) || Node <- Nodes]) of
+ {ok, Started, _} ->
+ Started;
+ {error, cluster_not_formed} ->
+ rabbit_log:warning("Stream coordinator cluster not formed", []),
+ []
+ end.
+
+all_nodes() ->
+ Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
+ [{?MODULE, Node} || Node <- [node() | Nodes]].
+
+init(_Conf) ->
+ #?MODULE{streams = #{},
+ monitors = #{}}.
+
+apply(#{from := From}, {policy_changed, #{stream_id := StreamId}} = Cmd,
+ #?MODULE{streams = Streams0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, ok, []};
+ #{conf := Conf,
+ state := running} ->
+ case rabbit_stream_queue:update_stream_conf(Conf) of
+ Conf ->
+ %% No changes, ensure we only trigger an election if it's a must
+ {State, ok, []};
+ _ ->
+ {State, ok, [{mod_call, osiris_writer, stop, [Conf]}]}
+ end;
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+
+ end;
+apply(#{from := From}, {start_cluster, #{queue := Q}}, #?MODULE{streams = Streams} = State) ->
+ #{name := StreamId} = Conf0 = amqqueue:get_type_state(Q),
+ Conf = apply_leader_locator_strategy(Conf0, Streams),
+ case maps:is_key(StreamId, Streams) of
+ true ->
+ {State, '$ra_no_reply', wrap_reply(From, {error, already_started})};
+ false ->
+ Phase = phase_start_cluster,
+ PhaseArgs = [amqqueue:set_type_state(Q, Conf)],
+ SState = #{state => start_cluster,
+ phase => Phase,
+ phase_args => PhaseArgs,
+ conf => Conf,
+ reply_to => From,
+ pending_cmds => [],
+ pending_replicas => []},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering phase_start_cluster", [StreamId]),
+ {State#?MODULE{streams = maps:put(StreamId, SState, Streams)}, '$ra_no_reply',
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+apply(_Meta, {start_cluster_reply, Q}, #?MODULE{streams = Streams,
+ monitors = Monitors0} = State) ->
+ #{name := StreamId,
+ leader_pid := LeaderPid,
+ replica_pids := ReplicaPids} = Conf = amqqueue:get_type_state(Q),
+ SState0 = maps:get(StreamId, Streams),
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [new, Q],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ Monitors = lists:foldl(fun(Pid, M) ->
+ maps:put(Pid, {StreamId, follower}, M)
+ end, maps:put(LeaderPid, {StreamId, leader}, Monitors0), ReplicaPids),
+ MonitorActions = [{monitor, process, Pid} || Pid <- ReplicaPids ++ [LeaderPid]],
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p "
+ "after start_cluster_reply", [StreamId, Phase]),
+ {State#?MODULE{streams = maps:put(StreamId, SState, Streams),
+ monitors = Monitors}, ok,
+ MonitorActions ++ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_Meta, {start_replica_failed, StreamId, Node, Retries, Reply},
+ #?MODULE{streams = Streams0} = State) ->
+ rabbit_log:debug("rabbit_stream_coordinator: ~p start replica failed", [StreamId]),
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{pending_replicas := Pending,
+ reply_to := From} = SState ->
+ Streams = Streams0#{StreamId => clear_stream_state(SState#{pending_replicas =>
+ add_unique(Node, Pending)})},
+ reply_and_run_pending(
+ From, StreamId, ok, Reply,
+ [{timer, {pipeline,
+ [{start_replica, #{stream_id => StreamId,
+ node => Node,
+ from => undefined,
+ retries => Retries + 1}}]},
+ ?RESTART_TIMEOUT * Retries}],
+ State#?MODULE{streams = Streams})
+ end;
+apply(_Meta, {phase_finished, StreamId, Reply}, #?MODULE{streams = Streams0} = State) ->
+ rabbit_log:debug("rabbit_stream_coordinator: ~p phase finished", [StreamId]),
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{reply_to := From} = SState ->
+ Streams = Streams0#{StreamId => clear_stream_state(SState)},
+ reply_and_run_pending(From, StreamId, ok, Reply, [], State#?MODULE{streams = Streams})
+ end;
+apply(#{from := From}, {start_replica, #{stream_id := StreamId, node := Node,
+ retries := Retries}} = Cmd,
+ #?MODULE{streams = Streams0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ case From of
+ undefined ->
+ {State, ok, []};
+ _ ->
+ {State, '$ra_no_reply', wrap_reply(From, {error, not_found})}
+ end;
+ #{conf := Conf,
+ state := running} = SState0 ->
+ Phase = phase_start_replica,
+ PhaseArgs = [Node, Conf, Retries],
+ SState = update_stream_state(From, start_replica, Phase, PhaseArgs, SState0),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p on node ~p",
+ [StreamId, Phase, Node]),
+ {State#?MODULE{streams = Streams0#{StreamId => SState}}, '$ra_no_reply',
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+ end;
+apply(_Meta, {start_replica_reply, StreamId, Pid},
+ #?MODULE{streams = Streams, monitors = Monitors0} = State) ->
+ case maps:get(StreamId, Streams, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{conf := Conf0} = SState0 ->
+ #{replica_nodes := Replicas0,
+ replica_pids := ReplicaPids0} = Conf0,
+ {ReplicaPids, MaybePid} = delete_replica_pid(node(Pid), ReplicaPids0),
+ Conf = Conf0#{replica_pids => [Pid | ReplicaPids],
+ replica_nodes => add_unique(node(Pid), Replicas0)},
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [update, Conf],
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p after start replica", [StreamId, Phase]),
+ #{pending_replicas := Pending} = SState0 = maps:get(StreamId, Streams),
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs,
+ pending_replicas => lists:delete(node(Pid), Pending)},
+ Monitors1 = Monitors0#{Pid => {StreamId, follower}},
+ Monitors = case MaybePid of
+ [P] -> maps:remove(P, Monitors1);
+ _ -> Monitors1
+ end,
+ {State#?MODULE{streams = Streams#{StreamId => SState},
+ monitors = Monitors}, ok,
+ [{monitor, process, Pid}, {aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+apply(#{from := From}, {delete_replica, #{stream_id := StreamId, node := Node}} = Cmd,
+ #?MODULE{streams = Streams0,
+ monitors = Monitors0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, '$ra_no_reply', wrap_reply(From, {error, not_found})};
+ #{conf := Conf0,
+ state := running,
+ pending_replicas := Pending0} = SState0 ->
+ Replicas0 = maps:get(replica_nodes, Conf0),
+ ReplicaPids0 = maps:get(replica_pids, Conf0),
+ case lists:member(Node, Replicas0) of
+ false ->
+ reply_and_run_pending(From, StreamId, '$ra_no_reply', ok, [], State);
+ true ->
+ [Pid] = lists:filter(fun(P) -> node(P) == Node end, ReplicaPids0),
+ ReplicaPids = lists:delete(Pid, ReplicaPids0),
+ Replicas = lists:delete(Node, Replicas0),
+ Pending = lists:delete(Node, Pending0),
+ Conf = Conf0#{replica_pids => ReplicaPids,
+ replica_nodes => Replicas},
+ Phase = phase_delete_replica,
+ PhaseArgs = [Node, Conf],
+ SState = update_stream_state(From, delete_replica,
+ Phase, PhaseArgs,
+ SState0#{conf => Conf0,
+ pending_replicas => Pending}),
+ Monitors = maps:remove(Pid, Monitors0),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p on node ~p", [StreamId, Phase, Node]),
+ {State#?MODULE{monitors = Monitors,
+ streams = Streams0#{StreamId => SState}},
+ '$ra_no_reply',
+ [{demonitor, process, Pid},
+ {aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+ end;
+apply(#{from := From}, {delete_cluster, #{stream_id := StreamId,
+ acting_user := ActingUser}} = Cmd,
+ #?MODULE{streams = Streams0, monitors = Monitors0} = State) ->
+ case maps:get(StreamId, Streams0, undefined) of
+ undefined ->
+ {State, '$ra_no_reply', wrap_reply(From, {ok, 0})};
+ #{conf := Conf,
+ state := running} = SState0 ->
+ ReplicaPids = maps:get(replica_pids, Conf),
+ LeaderPid = maps:get(leader_pid, Conf),
+ Monitors = lists:foldl(fun(Pid, M) ->
+ maps:remove(Pid, M)
+ end, Monitors0, ReplicaPids ++ [LeaderPid]),
+ Phase = phase_delete_cluster,
+ PhaseArgs = [Conf, ActingUser],
+ SState = update_stream_state(From, delete_cluster, Phase, PhaseArgs, SState0),
+ Demonitors = [{demonitor, process, Pid} || Pid <- [LeaderPid | ReplicaPids]],
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p",
+ [StreamId, Phase]),
+ {State#?MODULE{monitors = Monitors,
+ streams = Streams0#{StreamId => SState}}, '$ra_no_reply',
+ Demonitors ++ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ SState0 ->
+ Streams = maps:put(StreamId, add_pending_cmd(From, Cmd, SState0), Streams0),
+ {State#?MODULE{streams = Streams}, '$ra_no_reply', []}
+ end;
+apply(_Meta, {delete_cluster_reply, StreamId}, #?MODULE{streams = Streams} = State0) ->
+ #{reply_to := From,
+ pending_cmds := Pending} = maps:get(StreamId, Streams),
+ State = State0#?MODULE{streams = maps:remove(StreamId, Streams)},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p finished delete_cluster_reply",
+ [StreamId]),
+ Actions = [{ra, pipeline_command, [{?MODULE, node()}, Cmd]} || Cmd <- Pending],
+ {State, ok, Actions ++ wrap_reply(From, {ok, 0})};
+apply(_Meta, {down, Pid, _Reason} = Cmd, #?MODULE{streams = Streams,
+ monitors = Monitors0} = State) ->
+ case maps:get(Pid, Monitors0, undefined) of
+ {StreamId, Role} ->
+ Monitors = maps:remove(Pid, Monitors0),
+ case maps:get(StreamId, Streams, undefined) of
+ #{state := delete_cluster} ->
+ {State#?MODULE{monitors = Monitors}, ok, []};
+ undefined ->
+ {State#?MODULE{monitors = Monitors}, ok, []};
+ #{state := running,
+ conf := #{replica_pids := Pids} = Conf0,
+ pending_cmds := Pending0} = SState0 ->
+ case Role of
+ leader ->
+ rabbit_log:info("rabbit_stream_coordinator: ~p leader is down, starting election", [StreamId]),
+ Phase = phase_stop_replicas,
+ PhaseArgs = [Conf0],
+ SState = update_stream_state(undefined, leader_election, Phase, PhaseArgs, SState0),
+ Events = [{demonitor, process, P} || P <- Pids],
+ Monitors1 = lists:foldl(fun(P, M) ->
+ maps:remove(P, M)
+ end, Monitors, Pids),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p", [StreamId, Phase]),
+ {State#?MODULE{monitors = Monitors1,
+ streams = Streams#{StreamId => SState}},
+ ok, Events ++ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ follower ->
+ case rabbit_misc:is_process_alive(maps:get(leader_pid, Conf0)) of
+ true ->
+ Phase = phase_start_replica,
+ PhaseArgs = [node(Pid), Conf0, 1],
+ SState = update_stream_state(undefined,
+ replica_restart,
+ Phase, PhaseArgs,
+ SState0),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p replica on node ~p is down, entering ~p", [StreamId, node(Pid), Phase]),
+ {State#?MODULE{monitors = Monitors,
+ streams = Streams#{StreamId => SState}},
+ ok, [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+ false ->
+ SState = SState0#{pending_cmds => Pending0 ++ [Cmd]},
+ reply_and_run_pending(undefined, StreamId, ok, ok, [], State#?MODULE{streams = Streams#{StreamId => SState}})
+ end
+ end;
+ #{pending_cmds := Pending0} = SState0 ->
+ SState = SState0#{pending_cmds => Pending0 ++ [Cmd]},
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok, []}
+ end;
+ undefined ->
+ {State, ok, []}
+ end;
+apply(_Meta, {start_leader_election, StreamId, NewEpoch, Offsets},
+ #?MODULE{streams = Streams} = State) ->
+ #{conf := Conf0} = SState0 = maps:get(StreamId, Streams),
+ #{leader_node := Leader,
+ replica_nodes := Replicas,
+ replica_pids := ReplicaPids0} = Conf0,
+ NewLeader = find_max_offset(Offsets),
+ rabbit_log:info("rabbit_stream_coordinator: ~p starting new leader on node ~p",
+ [StreamId, NewLeader]),
+ {ReplicaPids, _} = delete_replica_pid(NewLeader, ReplicaPids0),
+ Conf = rabbit_stream_queue:update_stream_conf(
+ Conf0#{epoch => NewEpoch,
+ leader_node => NewLeader,
+ replica_nodes => lists:delete(NewLeader, Replicas ++ [Leader]),
+ replica_pids => ReplicaPids}),
+ Phase = phase_start_new_leader,
+ PhaseArgs = [Conf],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering phase_start_new_leader",
+ [StreamId]),
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok,
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_Meta, {leader_elected, StreamId, NewLeaderPid},
+ #?MODULE{streams = Streams, monitors = Monitors0} = State) ->
+ rabbit_log:info("rabbit_stream_coordinator: ~p leader elected", [StreamId]),
+ #{conf := Conf0,
+ pending_cmds := Pending0} = SState0 = maps:get(StreamId, Streams),
+ #{leader_pid := LeaderPid,
+ replica_nodes := Replicas} = Conf0,
+ Conf = Conf0#{leader_pid => NewLeaderPid},
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [update, Conf],
+ Pending = Pending0 ++ [{start_replica, #{stream_id => StreamId, node => R,
+ retries => 1, from => undefined}}
+ || R <- Replicas],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs,
+ pending_replicas => Replicas,
+ pending_cmds => Pending},
+ Monitors = maps:put(NewLeaderPid, {StreamId, leader}, maps:remove(LeaderPid, Monitors0)),
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p after "
+ "leader election", [StreamId, Phase]),
+ {State#?MODULE{streams = Streams#{StreamId => SState},
+ monitors = Monitors}, ok,
+ [{monitor, process, NewLeaderPid},
+ {aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_Meta, {replicas_stopped, StreamId}, #?MODULE{streams = Streams} = State) ->
+ case maps:get(StreamId, Streams, undefined) of
+ undefined ->
+ {State, {error, not_found}, []};
+ #{conf := Conf0} = SState0 ->
+ Phase = phase_check_quorum,
+ Conf = Conf0#{replica_pids => []},
+ PhaseArgs = [Conf],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ rabbit_log:info("rabbit_stream_coordinator: ~p all replicas have been stopped, "
+ "checking quorum available", [StreamId]),
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok,
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]}
+ end;
+apply(_Meta, {stream_updated, #{name := StreamId} = Conf}, #?MODULE{streams = Streams} = State) ->
+ SState0 = maps:get(StreamId, Streams),
+ Phase = phase_repair_mnesia,
+ PhaseArgs = [update, Conf],
+ SState = SState0#{conf => Conf,
+ phase => Phase,
+ phase_args => PhaseArgs},
+ rabbit_log:debug("rabbit_stream_coordinator: ~p entering ~p after"
+ " stream_updated", [StreamId, Phase]),
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, ok,
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}]};
+apply(_, {timeout, {pipeline, Cmds}}, State) ->
+ Actions = [{mod_call, ra, pipeline_command, [{?MODULE, node()}, Cmd]} || Cmd <- Cmds],
+ {State, ok, Actions};
+apply(_, {timeout, {aux, Cmd}}, State) ->
+ {State, ok, [{aux, Cmd}]};
+apply(Meta, {_, #{from := From}} = Cmd, State) ->
+ ?MODULE:apply(Meta#{from => From}, Cmd, State).
+
+state_enter(leader, #?MODULE{streams = Streams, monitors = Monitors}) ->
+ maps:fold(fun(_, #{conf := #{name := StreamId},
+ pending_replicas := Pending,
+ state := State,
+ phase := Phase,
+ phase_args := PhaseArgs}, Acc) ->
+ restart_aux_phase(State, Phase, PhaseArgs, StreamId) ++
+ pipeline_restart_replica_cmds(StreamId, Pending) ++
+ Acc
+ end, [{monitor, process, P} || P <- maps:keys(Monitors)], Streams);
+state_enter(follower, #?MODULE{monitors = Monitors}) ->
+ [{monitor, process, P} || P <- maps:keys(Monitors)];
+state_enter(recover, _) ->
+ put('$rabbit_vm_category', ?MODULE),
+ [];
+state_enter(_, _) ->
+ [].
+
+restart_aux_phase(running, _, _, _) ->
+ [];
+restart_aux_phase(_State, Phase, PhaseArgs, StreamId) ->
+ [{aux, {phase, StreamId, Phase, PhaseArgs}}].
+
+pipeline_restart_replica_cmds(StreamId, Pending) ->
+ [{timer, {pipeline, [{start_replica, #{stream_id => StreamId,
+ node => Node,
+ from => undefined,
+ retries => 1}}
+ || Node <- Pending]}, ?RESTART_TIMEOUT}].
+
+tick(_Ts, _State) ->
+ [{aux, maybe_resize_coordinator_cluster}].
+
+maybe_resize_coordinator_cluster() ->
+ spawn(fun() ->
+ case ra:members({?MODULE, node()}) of
+ {_, Members, _} ->
+ MemberNodes = [Node || {_, Node} <- Members],
+ Running = rabbit_mnesia:cluster_nodes(running),
+ All = rabbit_mnesia:cluster_nodes(all),
+ case Running -- MemberNodes of
+ [] ->
+ ok;
+ New ->
+ rabbit_log:warning("New rabbit node(s) detected, "
+ "adding stream coordinator in: ~p", [New]),
+ add_members(Members, New)
+ end,
+ case MemberNodes -- All of
+ [] ->
+ ok;
+ Old ->
+ rabbit_log:warning("Rabbit node(s) removed from the cluster, "
+ "deleting stream coordinator in: ~p", [Old]),
+ remove_members(Members, Old)
+ end;
+ _ ->
+ ok
+ end
+ end).
+
+add_members(_, []) ->
+ ok;
+add_members(Members, [Node | Nodes]) ->
+ Conf = make_ra_conf(Node, [N || {_, N} <- Members]),
+ case ra:start_server(Conf) of
+ ok ->
+ case ra:add_member(Members, {?MODULE, Node}) of
+ {ok, NewMembers, _} ->
+ add_members(NewMembers, Nodes);
+ _ ->
+ add_members(Members, Nodes)
+ end;
+ Error ->
+ rabbit_log:warning("Stream coordinator failed to start on node ~p : ~p",
+ [Node, Error]),
+ add_members(Members, Nodes)
+ end.
+
+remove_members(_, []) ->
+ ok;
+remove_members(Members, [Node | Nodes]) ->
+ case ra:remove_member(Members, {?MODULE, Node}) of
+ {ok, NewMembers, _} ->
+ remove_members(NewMembers, Nodes);
+ _ ->
+ remove_members(Members, Nodes)
+ end.
+
+init_aux(_Name) ->
+ {#{}, undefined}.
+
+%% TODO ensure the dead writer is restarted as a replica at some point in time, increasing timeout?
+handle_aux(leader, _, maybe_resize_coordinator_cluster, {Monitors, undefined}, LogState, _) ->
+ Pid = maybe_resize_coordinator_cluster(),
+ {no_reply, {Monitors, Pid}, LogState, [{monitor, process, aux, Pid}]};
+handle_aux(leader, _, maybe_resize_coordinator_cluster, AuxState, LogState, _) ->
+ %% Coordinator resizing is still happening, let's ignore this tick event
+ {no_reply, AuxState, LogState};
+handle_aux(leader, _, {down, Pid, _}, {Monitors, Pid}, LogState, _) ->
+ %% Coordinator resizing has finished
+ {no_reply, {Monitors, undefined}, LogState};
+handle_aux(leader, _, {phase, _, Fun, Args} = Cmd, {Monitors, Coordinator}, LogState, _) ->
+ Pid = erlang:apply(?MODULE, Fun, Args),
+ Actions = [{monitor, process, aux, Pid}],
+ {no_reply, {maps:put(Pid, Cmd, Monitors), Coordinator}, LogState, Actions};
+handle_aux(leader, _, {down, Pid, normal}, {Monitors, Coordinator}, LogState, _) ->
+ {no_reply, {maps:remove(Pid, Monitors), Coordinator}, LogState};
+handle_aux(leader, _, {down, Pid, Reason}, {Monitors0, Coordinator}, LogState, _) ->
+ %% The phase has failed, let's retry it
+ case maps:get(Pid, Monitors0) of
+ {phase, StreamId, phase_start_new_leader, Args} ->
+ rabbit_log:warning("Error while starting new leader for stream queue ~p, "
+ "restarting election: ~p", [StreamId, Reason]),
+ Monitors = maps:remove(Pid, Monitors0),
+ Cmd = {phase, StreamId, phase_check_quorum, Args},
+ {no_reply, {Monitors, Coordinator}, LogState, [{timer, {aux, Cmd}, ?PHASE_RETRY_TIMEOUT}]};
+ {phase, StreamId, Fun, _} = Cmd ->
+ rabbit_log:warning("Error while executing coordinator phase ~p for stream queue ~p ~p",
+ [Fun, StreamId, Reason]),
+ Monitors = maps:remove(Pid, Monitors0),
+ {no_reply, {Monitors, Coordinator}, LogState, [{timer, {aux, Cmd}, ?PHASE_RETRY_TIMEOUT}]}
+ end;
+handle_aux(_, _, _, AuxState, LogState, _) ->
+ {no_reply, AuxState, LogState}.
+
+reply_and_run_pending(From, StreamId, Reply, WrapReply, Actions0, #?MODULE{streams = Streams} = State) ->
+ #{pending_cmds := Pending} = SState0 = maps:get(StreamId, Streams),
+ AuxActions = [{mod_call, ra, pipeline_command, [{?MODULE, node()}, Cmd]}
+ || Cmd <- Pending],
+ SState = maps:put(pending_cmds, [], SState0),
+ Actions = case From of
+ undefined ->
+ AuxActions ++ Actions0;
+ _ ->
+ wrap_reply(From, WrapReply) ++ AuxActions ++ Actions0
+ end,
+ {State#?MODULE{streams = Streams#{StreamId => SState}}, Reply, Actions}.
+
+wrap_reply(From, Reply) ->
+ [{reply, From, {wrap_reply, Reply}}].
+
+add_pending_cmd(From, {CmdName, CmdMap}, #{pending_cmds := Pending0} = StreamState) ->
+ %% Remove from pending the leader election and automatic replica restart when
+ %% the command is delete_cluster
+ Pending = case CmdName of
+ delete_cluster ->
+ lists:filter(fun({down, _, _}) ->
+ false;
+ (_) ->
+ true
+ end, Pending0);
+ _ ->
+ Pending0
+ end,
+ maps:put(pending_cmds, Pending ++ [{CmdName, maps:put(from, From, CmdMap)}],
+ StreamState).
+
+clear_stream_state(StreamState) ->
+ StreamState#{reply_to => undefined,
+ state => running,
+ phase => undefined,
+ phase_args => undefined}.
+
+update_stream_state(From, State, Phase, PhaseArgs, StreamState) ->
+ StreamState#{reply_to => From,
+ state => State,
+ phase => Phase,
+ phase_args => PhaseArgs}.
+
+phase_start_replica(Node, #{name := StreamId} = Conf0,
+ Retries) ->
+ spawn(
+ fun() ->
+ %% If a new leader hasn't yet been elected, this will fail with a badmatch
+ %% as get_reader_context returns a no proc. An unhandled failure will
+ %% crash this monitored process and restart it later.
+ %% TODO However, do we want that crash in the log? We might need to try/catch
+ %% to provide a log message instead as it's 'expected'. We could try to
+ %% verify first that the leader is alive, but there would still be potential
+ %% for a race condition in here.
+ try
+ case osiris_replica:start(Node, Conf0) of
+ {ok, Pid} ->
+ ra:pipeline_command({?MODULE, node()},
+ {start_replica_reply, StreamId, Pid});
+ {error, already_present} ->
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, ok});
+ {error, {already_started, _}} ->
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, ok});
+ {error, Reason} = Error ->
+ rabbit_log:warning("Error while starting replica for ~p : ~p",
+ [maps:get(name, Conf0), Reason]),
+ ra:pipeline_command({?MODULE, node()},
+ {start_replica_failed, StreamId, Node, Retries, Error})
+ end
+ catch _:E->
+ rabbit_log:warning("Error while starting replica for ~p : ~p",
+ [maps:get(name, Conf0), E]),
+ ra:pipeline_command({?MODULE, node()},
+ {start_replica_failed, StreamId, Node, Retries, {error, E}})
+ end
+ end).
+
+phase_delete_replica(Node, Conf) ->
+ spawn(
+ fun() ->
+ ok = osiris_replica:delete(Node, Conf),
+ ra:pipeline_command({?MODULE, node()}, {stream_updated, Conf})
+ end).
+
+phase_stop_replicas(#{replica_nodes := Replicas,
+ name := StreamId} = Conf) ->
+ spawn(
+ fun() ->
+ [try
+ osiris_replica:stop(Node, Conf)
+ catch _:{{nodedown, _}, _} ->
+ %% It could be the old leader that is still down, it's normal.
+ ok
+ end || Node <- Replicas],
+ ra:pipeline_command({?MODULE, node()}, {replicas_stopped, StreamId})
+ end).
+
+phase_start_new_leader(#{name := StreamId, leader_node := Node, leader_pid := LPid} = Conf) ->
+ spawn(fun() ->
+ osiris_replica:stop(Node, Conf),
+ %% If the start fails, the monitor will capture the crash and restart it
+ case osiris_writer:start(Conf) of
+ {ok, Pid} ->
+ ra:pipeline_command({?MODULE, node()},
+ {leader_elected, StreamId, Pid});
+ {error, already_present} ->
+ ra:pipeline_command({?MODULE, node()},
+ {leader_elected, StreamId, LPid});
+ {error, {already_started, Pid}} ->
+ ra:pipeline_command({?MODULE, node()},
+ {leader_elected, StreamId, Pid})
+ end
+ end).
+
+phase_check_quorum(#{name := StreamId,
+ epoch := Epoch,
+ replica_nodes := Nodes} = Conf) ->
+ spawn(fun() ->
+ Offsets = find_replica_offsets(Conf),
+ case is_quorum(length(Nodes) + 1, length(Offsets)) of
+ true ->
+ ra:pipeline_command({?MODULE, node()},
+ {start_leader_election, StreamId, Epoch + 1, Offsets});
+ false ->
+ %% Let's crash this process so the monitor will restart it
+ exit({not_enough_quorum, StreamId})
+ end
+ end).
+
+find_replica_offsets(#{replica_nodes := Nodes,
+ leader_node := Leader} = Conf) ->
+ lists:foldl(
+ fun(Node, Acc) ->
+ try
+ %% osiris_log:overview/1 needs the directory - last item of the list
+ case rpc:call(Node, rabbit, is_running, []) of
+ false ->
+ Acc;
+ true ->
+ case rpc:call(Node, ?MODULE, log_overview, [Conf]) of
+ {badrpc, nodedown} ->
+ Acc;
+ {_Range, Offsets} ->
+ [{Node, select_highest_offset(Offsets)} | Acc]
+ end
+ end
+ catch
+ _:_ ->
+ Acc
+ end
+ end, [], Nodes ++ [Leader]).
+
+select_highest_offset([]) ->
+ empty;
+select_highest_offset(Offsets) ->
+ lists:last(Offsets).
+
+log_overview(Config) ->
+ Dir = osiris_log:directory(Config),
+ osiris_log:overview(Dir).
+
+find_max_offset(Offsets) ->
+ [{Node, _} | _] = lists:sort(fun({_, {Ao, E}}, {_, {Bo, E}}) ->
+ Ao >= Bo;
+ ({_, {_, Ae}}, {_, {_, Be}}) ->
+ Ae >= Be;
+ ({_, empty}, _) ->
+ false;
+ (_, {_, empty}) ->
+ true
+ end, Offsets),
+ Node.
+
+is_quorum(1, 1) ->
+ true;
+is_quorum(NumReplicas, NumAlive) ->
+ NumAlive >= ((NumReplicas div 2) + 1).
+
+phase_repair_mnesia(new, Q) ->
+ spawn(fun() ->
+ Reply = rabbit_amqqueue:internal_declare(Q, false),
+ #{name := StreamId} = amqqueue:get_type_state(Q),
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, Reply})
+ end);
+
+phase_repair_mnesia(update, #{reference := QName,
+ leader_pid := LeaderPid,
+ name := StreamId} = Conf) ->
+ Fun = fun (Q) ->
+ amqqueue:set_type_state(amqqueue:set_pid(Q, LeaderPid), Conf)
+ end,
+ spawn(fun() ->
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_amqqueue:update(QName, Fun)
+ end) of
+ not_found ->
+ %% This can happen during recovery
+ [Q] = mnesia:dirty_read(rabbit_durable_queue, QName),
+ rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q));
+ _ ->
+ ok
+ end,
+ ra:pipeline_command({?MODULE, node()}, {phase_finished, StreamId, ok})
+ end).
+
+phase_start_cluster(Q0) ->
+ spawn(
+ fun() ->
+ case osiris:start_cluster(amqqueue:get_type_state(Q0)) of
+ {ok, #{leader_pid := Pid} = Conf} ->
+ Q = amqqueue:set_type_state(amqqueue:set_pid(Q0, Pid), Conf),
+ ra:pipeline_command({?MODULE, node()}, {start_cluster_reply, Q});
+ {error, {already_started, _}} ->
+ ra:pipeline_command({?MODULE, node()}, {start_cluster_finished, {error, already_started}})
+ end
+ end).
+
+phase_delete_cluster(#{name := StreamId,
+ reference := QName} = Conf, ActingUser) ->
+ spawn(
+ fun() ->
+ ok = osiris:delete_cluster(Conf),
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ ra:pipeline_command({?MODULE, node()}, {delete_cluster_reply, StreamId})
+ end).
+
+format_ra_event(ServerId, Evt) ->
+ {stream_coordinator_event, ServerId, Evt}.
+
+make_ra_conf(Node, Nodes) ->
+ UId = ra:new_uid(ra_lib:to_binary(?MODULE)),
+ Formatter = {?MODULE, format_ra_event, []},
+ Members = [{?MODULE, N} || N <- Nodes],
+ TickTimeout = application:get_env(rabbit, stream_tick_interval,
+ ?TICK_TIMEOUT),
+ #{cluster_name => ?MODULE,
+ id => {?MODULE, Node},
+ uid => UId,
+ friendly_name => atom_to_list(?MODULE),
+ metrics_key => ?MODULE,
+ initial_members => Members,
+ log_init_args => #{uid => UId},
+ tick_timeout => TickTimeout,
+ machine => {module, ?MODULE, #{}},
+ ra_event_formatter => Formatter}.
+
+add_unique(Node, Nodes) ->
+ case lists:member(Node, Nodes) of
+ true ->
+ Nodes;
+ _ ->
+ [Node | Nodes]
+ end.
+
+delete_replica_pid(Node, ReplicaPids) ->
+ lists:partition(fun(P) -> node(P) =/= Node end, ReplicaPids).
+
+apply_leader_locator_strategy(#{leader_locator_strategy := <<"client-local">>} = Conf, _) ->
+ Conf;
+apply_leader_locator_strategy(#{leader_node := Leader,
+ replica_nodes := Replicas0,
+ leader_locator_strategy := <<"random">>,
+ name := StreamId} = Conf, _) ->
+ Replicas = [Leader | Replicas0],
+ ClusterSize = length(Replicas),
+ Hash = erlang:phash2(StreamId),
+ Pos = (Hash rem ClusterSize) + 1,
+ NewLeader = lists:nth(Pos, Replicas),
+ NewReplicas = lists:delete(NewLeader, Replicas),
+ Conf#{leader_node => NewLeader,
+ replica_nodes => NewReplicas};
+apply_leader_locator_strategy(#{leader_node := Leader,
+ replica_nodes := Replicas0,
+ leader_locator_strategy := <<"least-leaders">>} = Conf,
+ Streams) ->
+ Replicas = [Leader | Replicas0],
+ Counters0 = maps:from_list([{R, 0} || R <- Replicas]),
+ Counters = maps:to_list(maps:fold(fun(_Key, #{conf := #{leader_node := L}}, Acc) ->
+ maps:update_with(L, fun(V) -> V + 1 end, 0, Acc)
+ end, Counters0, Streams)),
+ Ordered = lists:sort(fun({_, V1}, {_, V2}) ->
+ V1 =< V2
+ end, Counters),
+ %% We could have potentially introduced nodes that are not in the list of replicas if
+ %% initial cluster size is smaller than the cluster size. Let's select the first one
+ %% that is on the list of replicas
+ NewLeader = select_first_matching_node(Ordered, Replicas),
+ NewReplicas = lists:delete(NewLeader, Replicas),
+ Conf#{leader_node => NewLeader,
+ replica_nodes => NewReplicas}.
+
+select_first_matching_node([{N, _} | Rest], Replicas) ->
+ case lists:member(N, Replicas) of
+ true -> N;
+ false -> select_first_matching_node(Rest, Replicas)
+ end.
diff --git a/deps/rabbit/src/rabbit_stream_queue.erl b/deps/rabbit/src/rabbit_stream_queue.erl
new file mode 100644
index 0000000000..4e428495b0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_stream_queue.erl
@@ -0,0 +1,734 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_queue).
+
+-behaviour(rabbit_queue_type).
+
+-export([is_enabled/0,
+ declare/2,
+ delete/4,
+ purge/1,
+ policy_changed/1,
+ recover/2,
+ is_recoverable/1,
+ consume/3,
+ cancel/5,
+ handle_event/2,
+ deliver/2,
+ settle/4,
+ credit/4,
+ dequeue/4,
+ info/2,
+ init/1,
+ close/1,
+ update/2,
+ state_info/1,
+ stat/1,
+ capabilities/0]).
+
+-export([set_retention_policy/3]).
+-export([add_replica/3,
+ delete_replica/3]).
+-export([format_osiris_event/2]).
+-export([update_stream_conf/1]).
+
+-include("rabbit.hrl").
+-include("amqqueue.hrl").
+
+-define(INFO_KEYS, [name, durable, auto_delete, arguments, leader, members, online, state,
+ messages, messages_ready, messages_unacknowledged, committed_offset,
+ policy, operator_policy, effective_policy_definition, type]).
+
+-type appender_seq() :: non_neg_integer().
+
+-record(stream, {name :: rabbit_types:r('queue'),
+ credit :: integer(),
+ max :: non_neg_integer(),
+ start_offset = 0 :: non_neg_integer(),
+ listening_offset = 0 :: non_neg_integer(),
+ log :: undefined | osiris_log:state()}).
+
+-record(stream_client, {name :: term(),
+ leader :: pid(),
+ next_seq = 1 :: non_neg_integer(),
+ correlation = #{} :: #{appender_seq() => term()},
+ soft_limit :: non_neg_integer(),
+ slow = false :: boolean(),
+ readers = #{} :: #{term() => #stream{}}
+ }).
+
+-import(rabbit_queue_type_util, [args_policy_lookup/3]).
+
+-type client() :: #stream_client{}.
+
+-spec is_enabled() -> boolean().
+is_enabled() ->
+ rabbit_feature_flags:is_enabled(stream_queue).
+
+-spec declare(amqqueue:amqqueue(), node()) ->
+ {'new' | 'existing', amqqueue:amqqueue()} |
+ {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}.
+declare(Q0, Node) when ?amqqueue_is_stream(Q0) ->
+ case rabbit_queue_type_util:run_checks(
+ [fun rabbit_queue_type_util:check_auto_delete/1,
+ fun rabbit_queue_type_util:check_exclusive/1,
+ fun rabbit_queue_type_util:check_non_durable/1],
+ Q0) of
+ ok ->
+ start_cluster(Q0, Node);
+ Err ->
+ Err
+ end.
+
+start_cluster(Q0, Node) ->
+ Arguments = amqqueue:get_arguments(Q0),
+ QName = amqqueue:get_name(Q0),
+ Opts = amqqueue:get_options(Q0),
+ ActingUser = maps:get(user, Opts, ?UNKNOWN_USER),
+ Conf0 = make_stream_conf(Node, Q0),
+ case rabbit_stream_coordinator:start_cluster(
+ amqqueue:set_type_state(Q0, Conf0)) of
+ {ok, {error, already_started}, _} ->
+ {protocol_error, precondition_failed, "safe queue name already in use '~s'",
+ [Node]};
+ {ok, {created, Q}, _} ->
+ rabbit_event:notify(queue_created,
+ [{name, QName},
+ {durable, true},
+ {auto_delete, false},
+ {arguments, Arguments},
+ {user_who_performed_action,
+ ActingUser}]),
+ {new, Q};
+ {ok, {error, Error}, _} ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ {protocol_error, internal_error, "Cannot declare a queue '~s' on node '~s': ~255p",
+ [rabbit_misc:rs(QName), node(), Error]};
+ {ok, {existing, Q}, _} ->
+ {existing, Q};
+ {error, coordinator_unavailable} ->
+ _ = rabbit_amqqueue:internal_delete(QName, ActingUser),
+ {protocol_error, internal_error,
+ "Cannot declare a queue '~s' on node '~s': coordinator unavailable",
+ [rabbit_misc:rs(QName), node()]}
+ end.
+
+-spec delete(amqqueue:amqqueue(), boolean(),
+ boolean(), rabbit_types:username()) ->
+ rabbit_types:ok(non_neg_integer()) |
+ rabbit_types:error(in_use | not_empty).
+delete(Q, _IfUnused, _IfEmpty, ActingUser) ->
+ Name = maps:get(name, amqqueue:get_type_state(Q)),
+ {ok, Reply, _} = rabbit_stream_coordinator:delete_cluster(Name, ActingUser),
+ Reply.
+
+-spec purge(amqqueue:amqqueue()) ->
+ {ok, non_neg_integer()} | {error, term()}.
+purge(_) ->
+ {error, not_supported}.
+
+-spec policy_changed(amqqueue:amqqueue()) -> 'ok'.
+policy_changed(Q) ->
+ Name = maps:get(name, amqqueue:get_type_state(Q)),
+ _ = rabbit_stream_coordinator:policy_changed(Name),
+ ok.
+
+stat(_) ->
+ {ok, 0, 0}.
+
+consume(Q, #{prefetch_count := 0}, _)
+ when ?amqqueue_is_stream(Q) ->
+ {protocol_error, precondition_failed, "consumer prefetch count is not set for '~s'",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+consume(Q, #{no_ack := true}, _)
+ when ?amqqueue_is_stream(Q) ->
+ {protocol_error, not_implemented,
+ "automatic acknowledgement not supported by stream queues ~s",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]};
+consume(Q, #{limiter_active := true}, _State)
+ when ?amqqueue_is_stream(Q) ->
+ {error, global_qos_not_supported_for_queue_type};
+consume(Q, Spec, QState0) when ?amqqueue_is_stream(Q) ->
+ %% Messages should include the offset as a custom header.
+ case check_queue_exists_in_local_node(Q) of
+ ok ->
+ #{no_ack := NoAck,
+ channel_pid := ChPid,
+ prefetch_count := ConsumerPrefetchCount,
+ consumer_tag := ConsumerTag,
+ exclusive_consume := ExclusiveConsume,
+ args := Args,
+ ok_msg := OkMsg} = Spec,
+ QName = amqqueue:get_name(Q),
+ Offset = case rabbit_misc:table_lookup(Args, <<"x-stream-offset">>) of
+ undefined ->
+ next;
+ {_, <<"first">>} ->
+ first;
+ {_, <<"last">>} ->
+ last;
+ {_, <<"next">>} ->
+ next;
+ {timestamp, V} ->
+ {timestamp, V};
+ {_, V} ->
+ V
+ end,
+ rabbit_core_metrics:consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
+ not NoAck, QName,
+ ConsumerPrefetchCount, false,
+ up, Args),
+ %% FIXME: reply needs to be sent before the stream begins sending
+ %% really it should be sent by the stream queue process like classic queues
+ %% do
+ maybe_send_reply(ChPid, OkMsg),
+ QState = begin_stream(QState0, Q, ConsumerTag, Offset,
+ ConsumerPrefetchCount),
+ {ok, QState, []};
+ Err ->
+ Err
+ end.
+
+get_local_pid(#{leader_pid := Pid}) when node(Pid) == node() ->
+ Pid;
+get_local_pid(#{replica_pids := ReplicaPids}) ->
+ [Local | _] = lists:filter(fun(Pid) ->
+ node(Pid) == node()
+ end, ReplicaPids),
+ Local.
+
+begin_stream(#stream_client{readers = Readers0} = State,
+ Q, Tag, Offset, Max) ->
+ LocalPid = get_local_pid(amqqueue:get_type_state(Q)),
+ {ok, Seg0} = osiris:init_reader(LocalPid, Offset),
+ NextOffset = osiris_log:next_offset(Seg0) - 1,
+ osiris:register_offset_listener(LocalPid, NextOffset),
+ %% TODO: avoid double calls to the same process
+ StartOffset = case Offset of
+ first -> NextOffset;
+ last -> NextOffset;
+ next -> NextOffset;
+ {timestamp, _} -> NextOffset;
+ _ -> Offset
+ end,
+ Str0 = #stream{name = amqqueue:get_name(Q),
+ credit = Max,
+ start_offset = StartOffset,
+ listening_offset = NextOffset,
+ log = Seg0,
+ max = Max},
+ State#stream_client{readers = Readers0#{Tag => Str0}}.
+
+cancel(_Q, ConsumerTag, OkMsg, ActingUser, #stream_client{readers = Readers0,
+ name = QName} = State) ->
+ Readers = maps:remove(ConsumerTag, Readers0),
+ rabbit_core_metrics:consumer_deleted(self(), ConsumerTag, QName),
+ rabbit_event:notify(consumer_deleted, [{consumer_tag, ConsumerTag},
+ {channel, self()},
+ {queue, QName},
+ {user_who_performed_action, ActingUser}]),
+ maybe_send_reply(self(), OkMsg),
+ {ok, State#stream_client{readers = Readers}}.
+
+credit(CTag, Credit, Drain, #stream_client{readers = Readers0,
+ name = Name,
+ leader = Leader} = State) ->
+ {Readers1, Msgs} = case Readers0 of
+ #{CTag := #stream{credit = Credit0} = Str0} ->
+ Str1 = Str0#stream{credit = Credit0 + Credit},
+ {Str, Msgs0} = stream_entries(Name, Leader, Str1),
+ {Readers0#{CTag => Str}, Msgs0};
+ _ ->
+ {Readers0, []}
+ end,
+ {Readers, Actions} =
+ case Drain of
+ true ->
+ case Readers1 of
+ #{CTag := #stream{credit = Credit1} = Str2} ->
+ {Readers0#{CTag => Str2#stream{credit = 0}}, [{send_drained, {CTag, Credit1}}]};
+ _ ->
+ {Readers1, []}
+ end;
+ false ->
+ {Readers1, []}
+ end,
+ {State#stream_client{readers = Readers}, [{send_credit_reply, length(Msgs)},
+ {deliver, CTag, true, Msgs}] ++ Actions}.
+
+deliver(QSs, #delivery{confirm = Confirm} = Delivery) ->
+ lists:foldl(
+ fun({_Q, stateless}, {Qs, Actions}) ->
+ %% TODO what do we do with stateless?
+ %% QRef = amqqueue:get_pid(Q),
+ %% ok = rabbit_fifo_client:untracked_enqueue(
+ %% [QRef], Delivery#delivery.message),
+ {Qs, Actions};
+ ({Q, S0}, {Qs, Actions}) ->
+ S = deliver(Confirm, Delivery, S0),
+ {[{Q, S} | Qs], Actions}
+ end, {[], []}, QSs).
+
+deliver(_Confirm, #delivery{message = Msg, msg_seq_no = MsgId},
+ #stream_client{name = Name,
+ leader = LeaderPid,
+ next_seq = Seq,
+ correlation = Correlation0,
+ soft_limit = SftLmt,
+ slow = Slow0} = State) ->
+ ok = osiris:write(LeaderPid, Seq, msg_to_iodata(Msg)),
+ Correlation = case MsgId of
+ undefined ->
+ Correlation0;
+ _ when is_number(MsgId) ->
+ Correlation0#{Seq => MsgId}
+ end,
+ Slow = case maps:size(Correlation) >= SftLmt of
+ true when not Slow0 ->
+ credit_flow:block(Name),
+ true;
+ Bool ->
+ Bool
+ end,
+ State#stream_client{next_seq = Seq + 1,
+ correlation = Correlation,
+ slow = Slow}.
+-spec dequeue(_, _, _, client()) -> no_return().
+dequeue(_, _, _, #stream_client{name = Name}) ->
+ {protocol_error, not_implemented, "basic.get not supported by stream queues ~s",
+ [rabbit_misc:rs(Name)]}.
+
+handle_event({osiris_written, From, Corrs}, State = #stream_client{correlation = Correlation0,
+ soft_limit = SftLmt,
+ slow = Slow0,
+ name = Name}) ->
+ MsgIds = maps:values(maps:with(Corrs, Correlation0)),
+ Correlation = maps:without(Corrs, Correlation0),
+ Slow = case maps:size(Correlation) < SftLmt of
+ true when Slow0 ->
+ credit_flow:unblock(Name),
+ false;
+ _ ->
+ Slow0
+ end,
+ {ok, State#stream_client{correlation = Correlation,
+ slow = Slow}, [{settled, From, MsgIds}]};
+handle_event({osiris_offset, _From, _Offs}, State = #stream_client{leader = Leader,
+ readers = Readers0,
+ name = Name}) ->
+ %% offset isn't actually needed as we use the atomic to read the
+ %% current committed
+ {Readers, TagMsgs} = maps:fold(
+ fun (Tag, Str0, {Acc, TM}) ->
+ {Str, Msgs} = stream_entries(Name, Leader, Str0),
+ %% HACK for now, better to just return but
+ %% tricky with acks credits
+ %% that also evaluate the stream
+ % gen_server:cast(self(), {stream_delivery, Tag, Msgs}),
+ {Acc#{Tag => Str}, [{Tag, Leader, Msgs} | TM]}
+ end, {#{}, []}, Readers0),
+ Ack = true,
+ Deliveries = [{deliver, Tag, Ack, OffsetMsg}
+ || {Tag, _LeaderPid, OffsetMsg} <- TagMsgs],
+ {ok, State#stream_client{readers = Readers}, Deliveries}.
+
+is_recoverable(Q) ->
+ Node = node(),
+ #{replica_nodes := Nodes,
+ leader_node := Leader} = amqqueue:get_type_state(Q),
+ lists:member(Node, Nodes ++ [Leader]).
+
+recover(_VHost, Queues) ->
+ lists:foldl(
+ fun (Q0, {R0, F0}) ->
+ {ok, Q} = recover(Q0),
+ {[Q | R0], F0}
+ end, {[], []}, Queues).
+
+settle(complete, CTag, MsgIds, #stream_client{readers = Readers0,
+ name = Name,
+ leader = Leader} = State) ->
+ Credit = length(MsgIds),
+ {Readers, Msgs} = case Readers0 of
+ #{CTag := #stream{credit = Credit0} = Str0} ->
+ Str1 = Str0#stream{credit = Credit0 + Credit},
+ {Str, Msgs0} = stream_entries(Name, Leader, Str1),
+ {Readers0#{CTag => Str}, Msgs0};
+ _ ->
+ {Readers0, []}
+ end,
+ {State#stream_client{readers = Readers}, [{deliver, CTag, true, Msgs}]};
+settle(_, _, _, #stream_client{name = Name}) ->
+ {protocol_error, not_implemented,
+ "basic.nack and basic.reject not supported by stream queues ~s",
+ [rabbit_misc:rs(Name)]}.
+
+info(Q, all_items) ->
+ info(Q, ?INFO_KEYS);
+info(Q, Items) ->
+ lists:foldr(fun(Item, Acc) ->
+ [{Item, i(Item, Q)} | Acc]
+ end, [], Items).
+
+i(name, Q) when ?is_amqqueue(Q) -> amqqueue:get_name(Q);
+i(durable, Q) when ?is_amqqueue(Q) -> amqqueue:is_durable(Q);
+i(auto_delete, Q) when ?is_amqqueue(Q) -> amqqueue:is_auto_delete(Q);
+i(arguments, Q) when ?is_amqqueue(Q) -> amqqueue:get_arguments(Q);
+i(leader, Q) when ?is_amqqueue(Q) ->
+ #{leader_node := Leader} = amqqueue:get_type_state(Q),
+ Leader;
+i(members, Q) when ?is_amqqueue(Q) ->
+ #{replica_nodes := Nodes} = amqqueue:get_type_state(Q),
+ Nodes;
+i(online, Q) ->
+ #{replica_pids := ReplicaPids,
+ leader_pid := LeaderPid} = amqqueue:get_type_state(Q),
+ [node(P) || P <- ReplicaPids ++ [LeaderPid], rabbit_misc:is_process_alive(P)];
+i(state, Q) when ?is_amqqueue(Q) ->
+ %% TODO the coordinator should answer this, I guess??
+ running;
+i(messages, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, _, M, _}] ->
+ M;
+ [] ->
+ 0
+ end;
+i(messages_ready, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, MR, _, _, _}] ->
+ MR;
+ [] ->
+ 0
+ end;
+i(messages_unacknowledged, Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case ets:lookup(queue_coarse_metrics, QName) of
+ [{_, _, MU, _, _}] ->
+ MU;
+ [] ->
+ 0
+ end;
+i(committed_offset, Q) ->
+ %% TODO should it be on a metrics table?
+ Data = osiris_counters:overview(),
+ maps:get(committed_offset,
+ maps:get({osiris_writer, amqqueue:get_name(Q)}, Data));
+i(policy, Q) ->
+ case rabbit_policy:name(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(operator_policy, Q) ->
+ case rabbit_policy:name_op(Q) of
+ none -> '';
+ Policy -> Policy
+ end;
+i(effective_policy_definition, Q) ->
+ case rabbit_policy:effective_definition(Q) of
+ undefined -> [];
+ Def -> Def
+ end;
+i(type, _) ->
+ stream;
+i(_, _) ->
+ ''.
+
+init(Q) when ?is_amqqueue(Q) ->
+ Leader = amqqueue:get_pid(Q),
+ {ok, SoftLimit} = application:get_env(rabbit, stream_messages_soft_limit),
+ #stream_client{name = amqqueue:get_name(Q),
+ leader = Leader,
+ soft_limit = SoftLimit}.
+
+close(#stream_client{readers = Readers}) ->
+ _ = maps:map(fun (_, #stream{log = Log}) ->
+ osiris_log:close(Log)
+ end, Readers),
+ ok.
+
+update(_, State) ->
+ State.
+
+state_info(_) ->
+ #{}.
+
+set_retention_policy(Name, VHost, Policy) ->
+ case rabbit_amqqueue:check_max_age(Policy) of
+ {error, _} = E ->
+ E;
+ MaxAge ->
+ QName = rabbit_misc:r(VHost, queue, Name),
+ Fun = fun(Q) ->
+ Conf = amqqueue:get_type_state(Q),
+ amqqueue:set_type_state(Q, Conf#{max_age => MaxAge})
+ end,
+ case rabbit_misc:execute_mnesia_transaction(
+ fun() -> rabbit_amqqueue:update(QName, Fun) end) of
+ not_found ->
+ {error, not_found};
+ _ ->
+ ok
+ end
+ end.
+
+add_replica(VHost, Name, Node) ->
+ QName = rabbit_misc:r(VHost, queue, Name),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_stream(Q) ->
+ case lists:member(Node, rabbit_mnesia:cluster_nodes(running)) of
+ false ->
+ {error, node_not_running};
+ true ->
+ #{name := StreamId} = amqqueue:get_type_state(Q),
+ {ok, Reply, _} = rabbit_stream_coordinator:add_replica(StreamId, Node),
+ Reply
+ end;
+ E ->
+ E
+ end.
+
+delete_replica(VHost, Name, Node) ->
+ QName = rabbit_misc:r(VHost, queue, Name),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} when ?amqqueue_is_classic(Q) ->
+ {error, classic_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_quorum(Q) ->
+ {error, quorum_queue_not_supported};
+ {ok, Q} when ?amqqueue_is_stream(Q) ->
+ case lists:member(Node, rabbit_mnesia:cluster_nodes(running)) of
+ false ->
+ {error, node_not_running};
+ true ->
+ #{name := StreamId} = amqqueue:get_type_state(Q),
+ {ok, Reply, _} = rabbit_stream_coordinator:delete_replica(StreamId, Node),
+ Reply
+ end;
+ E ->
+ E
+ end.
+
+make_stream_conf(Node, Q) ->
+ QName = amqqueue:get_name(Q),
+ Name = queue_name(QName),
+ %% MaxLength = args_policy_lookup(<<"max-length">>, fun min/2, Q),
+ MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q),
+ MaxAge = max_age(args_policy_lookup(<<"max-age">>, fun max_age/2, Q)),
+ MaxSegmentSize = args_policy_lookup(<<"max-segment-size">>, fun min/2, Q),
+ LeaderLocator = queue_leader_locator(args_policy_lookup(<<"queue-leader-locator">>,
+ fun res_arg/2, Q)),
+ InitialClusterSize = initial_cluster_size(args_policy_lookup(<<"initial-cluster-size">>,
+ fun res_arg/2, Q)),
+ Replicas0 = rabbit_mnesia:cluster_nodes(all) -- [Node],
+ Replicas = select_stream_nodes(InitialClusterSize - 1, Replicas0),
+ Formatter = {?MODULE, format_osiris_event, [QName]},
+ Retention = lists:filter(fun({_, R}) ->
+ R =/= undefined
+ end, [{max_bytes, MaxBytes},
+ {max_age, MaxAge}]),
+ add_if_defined(max_segment_size, MaxSegmentSize, #{reference => QName,
+ name => Name,
+ retention => Retention,
+ leader_locator_strategy => LeaderLocator,
+ leader_node => Node,
+ replica_nodes => Replicas,
+ event_formatter => Formatter,
+ epoch => 1}).
+
+select_stream_nodes(Size, All) when length(All) =< Size ->
+ All;
+select_stream_nodes(Size, All) ->
+ Node = node(),
+ case lists:member(Node, All) of
+ true ->
+ select_stream_nodes(Size - 1, lists:delete(Node, All), [Node]);
+ false ->
+ select_stream_nodes(Size, All, [])
+ end.
+
+select_stream_nodes(0, _, Selected) ->
+ Selected;
+select_stream_nodes(Size, Rest, Selected) ->
+ S = lists:nth(rand:uniform(length(Rest)), Rest),
+ select_stream_nodes(Size - 1, lists:delete(S, Rest), [S | Selected]).
+
+update_stream_conf(#{reference := QName} = Conf) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q),
+ MaxAge = max_age(args_policy_lookup(<<"max-age">>, fun max_age/2, Q)),
+ MaxSegmentSize = args_policy_lookup(<<"max-segment-size">>, fun min/2, Q),
+ Retention = lists:filter(fun({_, R}) ->
+ R =/= undefined
+ end, [{max_bytes, MaxBytes},
+ {max_age, MaxAge}]),
+ add_if_defined(max_segment_size, MaxSegmentSize, Conf#{retention => Retention});
+ _ ->
+ Conf
+ end.
+
+add_if_defined(_, undefined, Map) ->
+ Map;
+add_if_defined(Key, Value, Map) ->
+ maps:put(Key, Value, Map).
+
+format_osiris_event(Evt, QRef) ->
+ {'$gen_cast', {queue_event, QRef, Evt}}.
+
+max_age(undefined) ->
+ undefined;
+max_age(Bin) when is_binary(Bin) ->
+ rabbit_amqqueue:check_max_age(Bin);
+max_age(Age) ->
+ Age.
+
+max_age(Age1, Age2) ->
+ min(rabbit_amqqueue:check_max_age(Age1), rabbit_amqqueue:check_max_age(Age2)).
+
+queue_leader_locator(undefined) -> <<"client-local">>;
+queue_leader_locator(Val) -> Val.
+
+initial_cluster_size(undefined) ->
+ length(rabbit_mnesia:cluster_nodes(running));
+initial_cluster_size(Val) ->
+ Val.
+
+res_arg(PolVal, undefined) -> PolVal;
+res_arg(_, ArgVal) -> ArgVal.
+
+queue_name(#resource{virtual_host = VHost, name = Name}) ->
+ Timestamp = erlang:integer_to_binary(erlang:system_time()),
+ osiris_util:to_base64uri(erlang:binary_to_list(<<VHost/binary, "_", Name/binary, "_",
+ Timestamp/binary>>)).
+
+recover(Q) ->
+ rabbit_stream_coordinator:recover(),
+ {ok, Q}.
+
+check_queue_exists_in_local_node(Q) ->
+ Conf = amqqueue:get_type_state(Q),
+ AllNodes = [maps:get(leader_node, Conf) | maps:get(replica_nodes, Conf)],
+ case lists:member(node(), AllNodes) of
+ true ->
+ ok;
+ false ->
+ {protocol_error, precondition_failed,
+ "queue '~s' does not a have a replica on the local node",
+ [rabbit_misc:rs(amqqueue:get_name(Q))]}
+ end.
+
+maybe_send_reply(_ChPid, undefined) -> ok;
+maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
+
+stream_entries(Name, Id, Str) ->
+ stream_entries(Name, Id, Str, []).
+
+stream_entries(Name, LeaderPid,
+ #stream{name = QName,
+ credit = Credit,
+ start_offset = StartOffs,
+ listening_offset = LOffs,
+ log = Seg0} = Str0, MsgIn)
+ when Credit > 0 ->
+ case osiris_log:read_chunk_parsed(Seg0) of
+ {end_of_stream, Seg} ->
+ NextOffset = osiris_log:next_offset(Seg),
+ case NextOffset > LOffs of
+ true ->
+ osiris:register_offset_listener(LeaderPid, NextOffset),
+ {Str0#stream{log = Seg,
+ listening_offset = NextOffset}, MsgIn};
+ false ->
+ {Str0#stream{log = Seg}, MsgIn}
+ end;
+ {Records, Seg} ->
+ Msgs = [begin
+ Msg0 = binary_to_msg(QName, B),
+ Msg = rabbit_basic:add_header(<<"x-stream-offset">>,
+ long, O, Msg0),
+ {Name, LeaderPid, O, false, Msg}
+ end || {O, B} <- Records,
+ O >= StartOffs],
+
+ NumMsgs = length(Msgs),
+
+ Str = Str0#stream{credit = Credit - NumMsgs,
+ log = Seg},
+ case Str#stream.credit < 1 of
+ true ->
+ %% we are done here
+ {Str, MsgIn ++ Msgs};
+ false ->
+ %% if there are fewer Msgs than Entries0 it means there were non-events
+ %% in the log and we should recurse and try again
+ stream_entries(Name, LeaderPid, Str, MsgIn ++ Msgs)
+ end
+ end;
+stream_entries(_Name, _Id, Str, Msgs) ->
+ {Str, Msgs}.
+
+binary_to_msg(#resource{virtual_host = VHost,
+ kind = queue,
+ name = QName}, Data) ->
+ R0 = rabbit_msg_record:init(Data),
+ %% if the message annotation isn't present the data most likely came from
+ %% the rabbitmq-stream plugin so we'll choose defaults that simulate use
+ %% of the direct exchange
+ {utf8, Exchange} = rabbit_msg_record:message_annotation(<<"x-exchange">>,
+ R0, {utf8, <<>>}),
+ {utf8, RoutingKey} = rabbit_msg_record:message_annotation(<<"x-routing-key">>,
+ R0, {utf8, QName}),
+ {Props, Payload} = rabbit_msg_record:to_amqp091(R0),
+ XName = #resource{kind = exchange,
+ virtual_host = VHost,
+ name = Exchange},
+ Content = #content{class_id = 60,
+ properties = Props,
+ properties_bin = none,
+ payload_fragments_rev = [Payload]},
+ {ok, Msg} = rabbit_basic:message(XName, RoutingKey, Content),
+ Msg.
+
+
+msg_to_iodata(#basic_message{exchange_name = #resource{name = Exchange},
+ routing_keys = [RKey | _],
+ content = Content}) ->
+ #content{properties = Props,
+ payload_fragments_rev = Payload} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ R0 = rabbit_msg_record:from_amqp091(Props, lists:reverse(Payload)),
+ %% TODO durable?
+ R = rabbit_msg_record:add_message_annotations(
+ #{<<"x-exchange">> => {utf8, Exchange},
+ <<"x-routing-key">> => {utf8, RKey}}, R0),
+ rabbit_msg_record:to_iodata(R).
+
+capabilities() ->
+ #{policies => [<<"max-length-bytes">>, <<"max-age">>, <<"max-segment-size">>,
+ <<"queue-leader-locator">>, <<"initial-cluster-size">>],
+ queue_arguments => [<<"x-dead-letter-exchange">>, <<"x-dead-letter-routing-key">>,
+ <<"x-max-length">>, <<"x-max-length-bytes">>,
+ <<"x-single-active-consumer">>, <<"x-queue-type">>,
+ <<"x-max-age">>, <<"x-max-segment-size">>,
+ <<"x-initial-cluster-size">>, <<"x-queue-leader-locator">>],
+ consumer_arguments => [<<"x-stream-offset">>],
+ server_named => false}.
diff --git a/deps/rabbit/src/rabbit_sup.erl b/deps/rabbit/src/rabbit_sup.erl
new file mode 100644
index 0000000000..06643b155d
--- /dev/null
+++ b/deps/rabbit/src/rabbit_sup.erl
@@ -0,0 +1,109 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_child/1, start_child/2, start_child/3, start_child/4,
+ start_supervisor_child/1, start_supervisor_child/2,
+ start_supervisor_child/3,
+ start_restartable_child/1, start_restartable_child/2,
+ start_delayed_restartable_child/1, start_delayed_restartable_child/2,
+ stop_child/1]).
+
+-export([init/1]).
+
+-include("rabbit.hrl").
+
+-define(SERVER, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+start_link() -> supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+-spec start_child(atom()) -> 'ok'.
+
+start_child(Mod) -> start_child(Mod, []).
+
+-spec start_child(atom(), [any()]) -> 'ok'.
+
+start_child(Mod, Args) -> start_child(Mod, Mod, Args).
+
+-spec start_child(atom(), atom(), [any()]) -> 'ok'.
+
+start_child(ChildId, Mod, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, start_link, Args},
+ transient, ?WORKER_WAIT, worker, [Mod]})).
+
+-spec start_child(atom(), atom(), atom(), [any()]) -> 'ok'.
+
+start_child(ChildId, Mod, Fun, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, Fun, Args},
+ transient, ?WORKER_WAIT, worker, [Mod]})).
+
+-spec start_supervisor_child(atom()) -> 'ok'.
+
+start_supervisor_child(Mod) -> start_supervisor_child(Mod, []).
+
+-spec start_supervisor_child(atom(), [any()]) -> 'ok'.
+
+start_supervisor_child(Mod, Args) -> start_supervisor_child(Mod, Mod, Args).
+
+-spec start_supervisor_child(atom(), atom(), [any()]) -> 'ok'.
+
+start_supervisor_child(ChildId, Mod, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, start_link, Args},
+ transient, infinity, supervisor, [Mod]})).
+
+-spec start_restartable_child(atom()) -> 'ok'.
+
+start_restartable_child(M) -> start_restartable_child(M, [], false).
+
+-spec start_restartable_child(atom(), [any()]) -> 'ok'.
+
+start_restartable_child(M, A) -> start_restartable_child(M, A, false).
+
+-spec start_delayed_restartable_child(atom()) -> 'ok'.
+
+start_delayed_restartable_child(M) -> start_restartable_child(M, [], true).
+
+-spec start_delayed_restartable_child(atom(), [any()]) -> 'ok'.
+
+start_delayed_restartable_child(M, A) -> start_restartable_child(M, A, true).
+
+start_restartable_child(Mod, Args, Delay) ->
+ Name = list_to_atom(atom_to_list(Mod) ++ "_sup"),
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {Name, {rabbit_restartable_sup, start_link,
+ [Name, {Mod, start_link, Args}, Delay]},
+ transient, infinity, supervisor, [rabbit_restartable_sup]})).
+
+-spec stop_child(atom()) -> rabbit_types:ok_or_error(any()).
+
+stop_child(ChildId) ->
+ case supervisor:terminate_child(?SERVER, ChildId) of
+ ok -> supervisor:delete_child(?SERVER, ChildId);
+ E -> E
+ end.
+
+init([]) -> {ok, {{one_for_all, 0, 1}, []}}.
+
+
+%%----------------------------------------------------------------------------
+
+child_reply({ok, _}) -> ok;
+child_reply(X) -> X.
diff --git a/deps/rabbit/src/rabbit_sysmon_handler.erl b/deps/rabbit/src/rabbit_sysmon_handler.erl
new file mode 100644
index 0000000000..8f7298ed6e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_sysmon_handler.erl
@@ -0,0 +1,235 @@
+%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% https://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc A custom event handler to the `sysmon_handler' application's
+%% `system_monitor' event manager.
+%%
+%% This module attempts to discover more information about a process
+%% that generates a system_monitor event.
+
+-module(rabbit_sysmon_handler).
+
+-behaviour(gen_event).
+
+%% API
+-export([add_handler/0]).
+
+%% gen_event callbacks
+-export([init/1, handle_event/2, handle_call/2,
+ handle_info/2, terminate/2, code_change/3]).
+
+-record(state, {timer_ref :: reference() | undefined}).
+
+-define(INACTIVITY_TIMEOUT, 5000).
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+add_handler() ->
+ %% Vulnerable to race conditions (installing handler multiple
+ %% times), but risk is zero in the common OTP app startup case.
+ case lists:member(?MODULE, gen_event:which_handlers(sysmon_handler)) of
+ true ->
+ ok;
+ false ->
+ sysmon_handler_filter:add_custom_handler(?MODULE, [])
+ end.
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever a new event handler is added to an event manager,
+%% this function is called to initialize the event handler.
+%%
+%% @spec init(Args) -> {ok, State}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+ {ok, #state{}, hibernate}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives an event sent using
+%% gen_event:notify/2 or gen_event:sync_notify/2, this function is
+%% called for each installed event handler to handle the event.
+%%
+%% @spec handle_event(Event, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_event({monitor, Pid, Type, _Info},
+ State=#state{timer_ref=TimerRef}) when Pid == self() ->
+ %% Reset the inactivity timeout
+ NewTimerRef = reset_timer(TimerRef),
+ maybe_collect_garbage(Type),
+ {ok, State#state{timer_ref=NewTimerRef}};
+handle_event({monitor, PidOrPort, Type, Info}, State=#state{timer_ref=TimerRef}) ->
+ %% Reset the inactivity timeout
+ NewTimerRef = reset_timer(TimerRef),
+ {Fmt, Args} = format_pretty_proc_or_port_info(PidOrPort),
+ rabbit_log:warning("~p ~w ~w " ++ Fmt ++ " ~w", [?MODULE, Type, PidOrPort] ++ Args ++ [Info]),
+ {ok, State#state{timer_ref=NewTimerRef}};
+handle_event({suppressed, Type, Info}, State=#state{timer_ref=TimerRef}) ->
+ %% Reset the inactivity timeout
+ NewTimerRef = reset_timer(TimerRef),
+ rabbit_log:debug("~p encountered a suppressed event of type ~w: ~w", [?MODULE, Type, Info]),
+ {ok, State#state{timer_ref=NewTimerRef}};
+handle_event(Event, State=#state{timer_ref=TimerRef}) ->
+ NewTimerRef = reset_timer(TimerRef),
+ rabbit_log:warning("~p unhandled event: ~p", [?MODULE, Event]),
+ {ok, State#state{timer_ref=NewTimerRef}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives a request sent using
+%% gen_event:call/3,4, this function is called for the specified
+%% event handler to handle the request.
+%%
+%% @spec handle_call(Request, State) ->
+%% {ok, Reply, State} |
+%% {swap_handler, Reply, Args1, State1, Mod2, Args2} |
+%% {remove_handler, Reply}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(_Call, State) ->
+ Reply = not_supported,
+ {ok, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called for each installed event handler when
+%% an event manager receives any other message than an event or a
+%% synchronous request (or a system message).
+%%
+%% @spec handle_info(Info, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_info(inactivity_timeout, State) ->
+ %% No events have arrived for the timeout period
+ %% so hibernate to free up resources.
+ {ok, State, hibernate};
+handle_info(Info, State) ->
+ rabbit_log:info("handle_info got ~p", [Info]),
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event handler is deleted from an event manager, this
+%% function is called. It should be the opposite of Module:init/1 and
+%% do any necessary cleaning up.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+format_pretty_proc_or_port_info(PidOrPort) ->
+ try
+ case get_pretty_proc_or_port_info(PidOrPort) of
+ undefined ->
+ {"", []};
+ Res ->
+ Res
+ end
+ catch C:E:S ->
+ {"Pid ~w, ~W ~W at ~w\n",
+ [PidOrPort, C, 20, E, 20, S]}
+ end.
+
+get_pretty_proc_or_port_info(Pid) when is_pid(Pid) ->
+ Infos = [registered_name, initial_call, current_function, message_queue_len],
+ case process_info(Pid, Infos) of
+ undefined ->
+ undefined;
+ [] ->
+ undefined;
+ [{registered_name, RN0}, ICT1, {_, CF}, {_, MQL}] ->
+ ICT = case proc_lib:translate_initial_call(Pid) of
+ {proc_lib, init_p, 5} -> % not by proc_lib, see docs
+ ICT1;
+ ICT2 ->
+ {initial_call, ICT2}
+ end,
+ RNL = if RN0 == [] -> [];
+ true -> [{name, RN0}]
+ end,
+ {"~w", [RNL ++ [ICT, CF, {message_queue_len, MQL}]]}
+ end;
+get_pretty_proc_or_port_info(Port) when is_port(Port) ->
+ PortInfo = erlang:port_info(Port),
+ {value, {name, Name}, PortInfo2} = lists:keytake(name, 1, PortInfo),
+ QueueSize = [erlang:port_info(Port, queue_size)],
+ Connected = case proplists:get_value(connected, PortInfo2) of
+ undefined ->
+ [];
+ ConnectedPid ->
+ case proc_lib:translate_initial_call(ConnectedPid) of
+ {proc_lib, init_p, 5} -> % not by proc_lib, see docs
+ [];
+ ICT ->
+ [{initial_call, ICT}]
+ end
+ end,
+ {"name ~s ~w", [Name, lists:append([PortInfo2, QueueSize, Connected])]}.
+
+
+%% @doc If the message type is due to a large heap warning
+%% and the source is ourself, go ahead and collect garbage
+%% to avoid the death spiral.
+-spec maybe_collect_garbage(atom()) -> ok.
+maybe_collect_garbage(large_heap) ->
+ erlang:garbage_collect(),
+ ok;
+maybe_collect_garbage(_) ->
+ ok.
+
+-spec reset_timer(undefined | reference()) -> reference().
+reset_timer(undefined) ->
+ erlang:send_after(?INACTIVITY_TIMEOUT, self(), inactivity_timeout);
+reset_timer(TimerRef) ->
+ _ = erlang:cancel_timer(TimerRef),
+ reset_timer(undefined).
diff --git a/deps/rabbit/src/rabbit_sysmon_minder.erl b/deps/rabbit/src/rabbit_sysmon_minder.erl
new file mode 100644
index 0000000000..a0402e5ebe
--- /dev/null
+++ b/deps/rabbit/src/rabbit_sysmon_minder.erl
@@ -0,0 +1,156 @@
+%% -------------------------------------------------------------------
+%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% https://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+
+-module(rabbit_sysmon_minder).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Starts the server
+%%
+%% @spec start_link() -> {ok, Pid} | ignore | {error, Error}
+%% @end
+%%--------------------------------------------------------------------
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Initializes the server
+%%
+%% @spec init(Args) -> {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+ %% Add our system_monitor event handler. We do that here because
+ %% we have a process at our disposal (i.e. ourself) to receive the
+ %% notification in the very unlikely event that the
+ %% sysmon_handler has crashed and been removed from the
+ %% sysmon_handler gen_event server. (If we had a supervisor
+ %% or app-starting process add the handler, then if the handler
+ %% crashes, nobody will act on the crash notification.)
+ rabbit_sysmon_handler:add_handler(),
+ {ok, #state{}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling call messages
+%%
+%% @spec handle_call(Request, From, State) ->
+%% {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(_Request, _From, State) ->
+ Reply = ok,
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling cast messages
+%%
+%% @spec handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling all non call/cast messages
+%%
+%% @spec handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+handle_info({gen_event_EXIT, rabbit_sysmon_handler, _}, State) ->
+ %% SASL will create an error message, no need for us to duplicate it.
+ %%
+ %% Our handler should never crash, but it did indeed crash. If
+ %% there's a pathological condition somewhere that's generating
+ %% lots of unforseen things that crash core's custom handler, we
+ %% could make things worse by jumping back into the exploding
+ %% volcano. Wait a little bit before jumping back. Besides, the
+ %% system_monitor data is nice but is not critical: there is no
+ %% need to make things worse if things are indeed bad, and if we
+ %% miss a few seconds of system_monitor events, the world will not
+ %% end.
+ timer:sleep(2*1000),
+ rabbit_sysmon_handler:add_handler(),
+ {noreply, State};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called by a gen_server when it is about to
+%% terminate. It should be the opposite of Module:init/1 and do any
+%% necessary cleaning up. When it returns, the gen_server terminates
+%% with Reason. The return value is ignored.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/rabbit_table.erl b/deps/rabbit/src/rabbit_table.erl
new file mode 100644
index 0000000000..77534763d0
--- /dev/null
+++ b/deps/rabbit/src/rabbit_table.erl
@@ -0,0 +1,416 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_table).
+
+-export([
+ create/0, create/2, ensure_local_copies/1, ensure_table_copy/2,
+ wait_for_replicated/1, wait/1, wait/2,
+ force_load/0, is_present/0, is_empty/0, needs_default_data/0,
+ check_schema_integrity/1, clear_ram_only_tables/0, retry_timeout/0,
+ wait_for_replicated/0, exists/1]).
+
+%% for testing purposes
+-export([definitions/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-type retry() :: boolean().
+-type mnesia_table() :: atom().
+
+%%----------------------------------------------------------------------------
+%% Main interface
+%%----------------------------------------------------------------------------
+
+-spec create() -> 'ok'.
+
+create() ->
+ lists:foreach(
+ fun ({Table, Def}) -> create(Table, Def) end,
+ definitions()),
+ ensure_secondary_indexes(),
+ ok.
+
+-spec create(mnesia_table(), list()) -> rabbit_types:ok_or_error(any()).
+
+create(TableName, TableDefinition) ->
+ TableDefinition1 = proplists:delete(match, TableDefinition),
+ rabbit_log:debug("Will create a schema database table '~s'", [TableName]),
+ case mnesia:create_table(TableName, TableDefinition1) of
+ {atomic, ok} -> ok;
+ {aborted,{already_exists, TableName}} -> ok;
+ {aborted, {already_exists, TableName, _}} -> ok;
+ {aborted, Reason} ->
+ throw({error, {table_creation_failed, TableName, TableDefinition1, Reason}})
+ end.
+
+-spec exists(mnesia_table()) -> boolean().
+exists(Table) ->
+ lists:member(Table, mnesia:system_info(tables)).
+
+%% Sets up secondary indexes in a blank node database.
+ensure_secondary_indexes() ->
+ ensure_secondary_index(rabbit_queue, vhost),
+ ok.
+
+ensure_secondary_index(Table, Field) ->
+ case mnesia:add_table_index(Table, Field) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, Table, _}} -> ok
+ end.
+
+-spec ensure_table_copy(mnesia_table(), node()) -> ok | {error, any()}.
+ensure_table_copy(TableName, Node) ->
+ rabbit_log:debug("Will add a local schema database copy for table '~s'", [TableName]),
+ case mnesia:add_table_copy(TableName, Node, disc_copies) of
+ {atomic, ok} -> ok;
+ {aborted,{already_exists, TableName}} -> ok;
+ {aborted, {already_exists, TableName, _}} -> ok;
+ {aborted, Reason} -> {error, Reason}
+ end.
+
+%% This arity only exists for backwards compatibility with certain
+%% plugins. See https://github.com/rabbitmq/rabbitmq-clusterer/issues/19.
+
+-spec wait_for_replicated() -> 'ok'.
+
+wait_for_replicated() ->
+ wait_for_replicated(false).
+
+-spec wait_for_replicated(retry()) -> 'ok'.
+
+wait_for_replicated(Retry) ->
+ wait([Tab || {Tab, TabDef} <- definitions(),
+ not lists:member({local_content, true}, TabDef)], Retry).
+
+-spec wait([atom()]) -> 'ok'.
+
+wait(TableNames) ->
+ wait(TableNames, _Retry = false).
+
+wait(TableNames, Retry) ->
+ {Timeout, Retries} = retry_timeout(Retry),
+ wait(TableNames, Timeout, Retries).
+
+wait(TableNames, Timeout, Retries) ->
+ %% We might be in ctl here for offline ops, in which case we can't
+ %% get_env() for the rabbit app.
+ rabbit_log:info("Waiting for Mnesia tables for ~p ms, ~p retries left~n",
+ [Timeout, Retries - 1]),
+ Result = case mnesia:wait_for_tables(TableNames, Timeout) of
+ ok ->
+ ok;
+ {timeout, BadTabs} ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ {error, {timeout_waiting_for_tables, AllNodes, BadTabs}};
+ {error, Reason} ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ {error, {failed_waiting_for_tables, AllNodes, Reason}}
+ end,
+ case {Retries, Result} of
+ {_, ok} ->
+ rabbit_log:info("Successfully synced tables from a peer"),
+ ok;
+ {1, {error, _} = Error} ->
+ throw(Error);
+ {_, {error, Error}} ->
+ rabbit_log:warning("Error while waiting for Mnesia tables: ~p~n", [Error]),
+ wait(TableNames, Timeout, Retries - 1)
+ end.
+
+retry_timeout(_Retry = false) ->
+ {retry_timeout(), 1};
+retry_timeout(_Retry = true) ->
+ Retries = case application:get_env(rabbit, mnesia_table_loading_retry_limit) of
+ {ok, T} -> T;
+ undefined -> 10
+ end,
+ {retry_timeout(), Retries}.
+
+-spec retry_timeout() -> non_neg_integer() | infinity.
+
+retry_timeout() ->
+ case application:get_env(rabbit, mnesia_table_loading_retry_timeout) of
+ {ok, T} -> T;
+ undefined -> 30000
+ end.
+
+-spec force_load() -> 'ok'.
+
+force_load() -> [mnesia:force_load_table(T) || T <- names()], ok.
+
+-spec is_present() -> boolean().
+
+is_present() -> names() -- mnesia:system_info(tables) =:= [].
+
+-spec is_empty() -> boolean().
+
+is_empty() -> is_empty(names()).
+
+-spec needs_default_data() -> boolean().
+
+needs_default_data() -> is_empty([rabbit_user, rabbit_user_permission,
+ rabbit_vhost]).
+
+is_empty(Names) ->
+ lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end,
+ Names).
+
+-spec check_schema_integrity(retry()) -> rabbit_types:ok_or_error(any()).
+
+check_schema_integrity(Retry) ->
+ Tables = mnesia:system_info(tables),
+ case check(fun (Tab, TabDef) ->
+ case lists:member(Tab, Tables) of
+ false -> {error, {table_missing, Tab}};
+ true -> check_attributes(Tab, TabDef)
+ end
+ end) of
+ ok -> wait(names(), Retry),
+ check(fun check_content/2);
+ Other -> Other
+ end.
+
+-spec clear_ram_only_tables() -> 'ok'.
+
+clear_ram_only_tables() ->
+ Node = node(),
+ lists:foreach(
+ fun (TabName) ->
+ case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of
+ true -> {atomic, ok} = mnesia:clear_table(TabName);
+ false -> ok
+ end
+ end, names()),
+ ok.
+
+%% The sequence in which we delete the schema and then the other
+%% tables is important: if we delete the schema first when moving to
+%% RAM mnesia will loudly complain since it doesn't make much sense to
+%% do that. But when moving to disc, we need to move the schema first.
+
+-spec ensure_local_copies('disc' | 'ram') -> 'ok'.
+
+ensure_local_copies(disc) ->
+ create_local_copy(schema, disc_copies),
+ create_local_copies(disc);
+ensure_local_copies(ram) ->
+ create_local_copies(ram),
+ create_local_copy(schema, ram_copies).
+
+%%--------------------------------------------------------------------
+%% Internal helpers
+%%--------------------------------------------------------------------
+
+create_local_copies(Type) ->
+ lists:foreach(
+ fun ({Tab, TabDef}) ->
+ HasDiscCopies = has_copy_type(TabDef, disc_copies),
+ HasDiscOnlyCopies = has_copy_type(TabDef, disc_only_copies),
+ LocalTab = proplists:get_bool(local_content, TabDef),
+ StorageType =
+ if
+ Type =:= disc orelse LocalTab ->
+ if
+ HasDiscCopies -> disc_copies;
+ HasDiscOnlyCopies -> disc_only_copies;
+ true -> ram_copies
+ end;
+ Type =:= ram ->
+ ram_copies
+ end,
+ ok = create_local_copy(Tab, StorageType)
+ end, definitions(Type)),
+ ok.
+
+create_local_copy(Tab, Type) ->
+ StorageType = mnesia:table_info(Tab, storage_type),
+ {atomic, ok} =
+ if
+ StorageType == unknown ->
+ mnesia:add_table_copy(Tab, node(), Type);
+ StorageType /= Type ->
+ mnesia:change_table_copy_type(Tab, node(), Type);
+ true -> {atomic, ok}
+ end,
+ ok.
+
+has_copy_type(TabDef, DiscType) ->
+ lists:member(node(), proplists:get_value(DiscType, TabDef, [])).
+
+check_attributes(Tab, TabDef) ->
+ {_, ExpAttrs} = proplists:lookup(attributes, TabDef),
+ case mnesia:table_info(Tab, attributes) of
+ ExpAttrs -> ok;
+ Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}}
+ end.
+
+check_content(Tab, TabDef) ->
+ {_, Match} = proplists:lookup(match, TabDef),
+ case mnesia:dirty_first(Tab) of
+ '$end_of_table' ->
+ ok;
+ Key ->
+ ObjList = mnesia:dirty_read(Tab, Key),
+ MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]),
+ case ets:match_spec_run(ObjList, MatchComp) of
+ ObjList -> ok;
+ _ -> {error, {table_content_invalid, Tab, Match, ObjList}}
+ end
+ end.
+
+check(Fun) ->
+ case [Error || {Tab, TabDef} <- definitions(),
+ begin
+ {Ret, Error} = case Fun(Tab, TabDef) of
+ ok -> {false, none};
+ {error, E} -> {true, E}
+ end,
+ Ret
+ end] of
+ [] -> ok;
+ Errors -> {error, Errors}
+ end.
+
+%%--------------------------------------------------------------------
+%% Table definitions
+%%--------------------------------------------------------------------
+
+names() -> [Tab || {Tab, _} <- definitions()].
+
+%% The tables aren't supposed to be on disk on a ram node
+definitions(disc) ->
+ definitions();
+definitions(ram) ->
+ [{Tab, [{disc_copies, []}, {ram_copies, [node()]} |
+ proplists:delete(
+ ram_copies, proplists:delete(disc_copies, TabDef))]} ||
+ {Tab, TabDef} <- definitions()].
+
+definitions() ->
+ [{rabbit_user,
+ [{record_name, internal_user},
+ {attributes, internal_user:fields()},
+ {disc_copies, [node()]},
+ {match, internal_user:pattern_match_all()}]},
+ {rabbit_user_permission,
+ [{record_name, user_permission},
+ {attributes, record_info(fields, user_permission)},
+ {disc_copies, [node()]},
+ {match, #user_permission{user_vhost = #user_vhost{_='_'},
+ permission = #permission{_='_'},
+ _='_'}}]},
+ {rabbit_topic_permission,
+ [{record_name, topic_permission},
+ {attributes, record_info(fields, topic_permission)},
+ {disc_copies, [node()]},
+ {match, #topic_permission{topic_permission_key = #topic_permission_key{_='_'},
+ permission = #permission{_='_'},
+ _='_'}}]},
+ {rabbit_vhost,
+ [
+ {record_name, vhost},
+ {attributes, vhost:fields()},
+ {disc_copies, [node()]},
+ {match, vhost:pattern_match_all()}]},
+ {rabbit_listener,
+ [{record_name, listener},
+ {attributes, record_info(fields, listener)},
+ {type, bag},
+ {match, #listener{_='_'}}]},
+ {rabbit_durable_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {disc_copies, [node()]},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_semi_durable_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {type, ordered_set},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_route,
+ [{record_name, route},
+ {attributes, record_info(fields, route)},
+ {type, ordered_set},
+ {match, #route{binding = binding_match(), _='_'}}]},
+ {rabbit_reverse_route,
+ [{record_name, reverse_route},
+ {attributes, record_info(fields, reverse_route)},
+ {type, ordered_set},
+ {match, #reverse_route{reverse_binding = reverse_binding_match(),
+ _='_'}}]},
+ {rabbit_topic_trie_node,
+ [{record_name, topic_trie_node},
+ {attributes, record_info(fields, topic_trie_node)},
+ {type, ordered_set},
+ {match, #topic_trie_node{trie_node = trie_node_match(), _='_'}}]},
+ {rabbit_topic_trie_edge,
+ [{record_name, topic_trie_edge},
+ {attributes, record_info(fields, topic_trie_edge)},
+ {type, ordered_set},
+ {match, #topic_trie_edge{trie_edge = trie_edge_match(), _='_'}}]},
+ {rabbit_topic_trie_binding,
+ [{record_name, topic_trie_binding},
+ {attributes, record_info(fields, topic_trie_binding)},
+ {type, ordered_set},
+ {match, #topic_trie_binding{trie_binding = trie_binding_match(),
+ _='_'}}]},
+ {rabbit_durable_exchange,
+ [{record_name, exchange},
+ {attributes, record_info(fields, exchange)},
+ {disc_copies, [node()]},
+ {match, #exchange{name = exchange_name_match(), _='_'}}]},
+ {rabbit_exchange,
+ [{record_name, exchange},
+ {attributes, record_info(fields, exchange)},
+ {match, #exchange{name = exchange_name_match(), _='_'}}]},
+ {rabbit_exchange_serial,
+ [{record_name, exchange_serial},
+ {attributes, record_info(fields, exchange_serial)},
+ {match, #exchange_serial{name = exchange_name_match(), _='_'}}]},
+ {rabbit_runtime_parameters,
+ [{record_name, runtime_parameters},
+ {attributes, record_info(fields, runtime_parameters)},
+ {disc_copies, [node()]},
+ {match, #runtime_parameters{_='_'}}]},
+ {rabbit_durable_queue,
+ [{record_name, amqqueue},
+ {attributes, amqqueue:fields()},
+ {disc_copies, [node()]},
+ {match, amqqueue:pattern_match_on_name(queue_name_match())}]},
+ {rabbit_queue,
+ [{record_name, amqqueue},
+ {attributes, amqqueue:fields()},
+ {match, amqqueue:pattern_match_on_name(queue_name_match())}]}
+ ]
+ ++ gm:table_definitions()
+ ++ mirrored_supervisor:table_definitions().
+
+binding_match() ->
+ #binding{source = exchange_name_match(),
+ destination = binding_destination_match(),
+ _='_'}.
+reverse_binding_match() ->
+ #reverse_binding{destination = binding_destination_match(),
+ source = exchange_name_match(),
+ _='_'}.
+binding_destination_match() ->
+ resource_match('_').
+trie_node_match() ->
+ #trie_node{exchange_name = exchange_name_match(), _='_'}.
+trie_edge_match() ->
+ #trie_edge{exchange_name = exchange_name_match(), _='_'}.
+trie_binding_match() ->
+ #trie_binding{exchange_name = exchange_name_match(), _='_'}.
+exchange_name_match() ->
+ resource_match(exchange).
+queue_name_match() ->
+ resource_match(queue).
+resource_match(Kind) ->
+ #resource{kind = Kind, _='_'}.
diff --git a/deps/rabbit/src/rabbit_trace.erl b/deps/rabbit/src/rabbit_trace.erl
new file mode 100644
index 0000000000..74b892330e
--- /dev/null
+++ b/deps/rabbit/src/rabbit_trace.erl
@@ -0,0 +1,128 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trace).
+
+-export([init/1, enabled/1, tap_in/6, tap_out/5, start/1, stop/1]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-define(TRACE_VHOSTS, trace_vhosts).
+-define(XNAME, <<"amq.rabbitmq.trace">>).
+
+%%----------------------------------------------------------------------------
+
+-type state() :: rabbit_types:exchange() | 'none'.
+
+%%----------------------------------------------------------------------------
+
+-spec init(rabbit_types:vhost()) -> state().
+
+init(VHost) ->
+ case enabled(VHost) of
+ false -> none;
+ true -> {ok, X} = rabbit_exchange:lookup(
+ rabbit_misc:r(VHost, exchange, ?XNAME)),
+ X
+ end.
+
+-spec enabled(rabbit_types:vhost()) -> boolean().
+
+enabled(VHost) ->
+ {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS),
+ lists:member(VHost, VHosts).
+
+-spec tap_in(rabbit_types:basic_message(), [rabbit_amqqueue:name()],
+ binary(), rabbit_channel:channel_number(),
+ rabbit_types:username(), state()) -> 'ok'.
+
+tap_in(_Msg, _QNames, _ConnName, _ChannelNum, _Username, none) -> ok;
+tap_in(Msg = #basic_message{exchange_name = #resource{name = XName,
+ virtual_host = VHost}},
+ QNames, ConnName, ChannelNum, Username, TraceX) ->
+ trace(TraceX, Msg, <<"publish">>, XName,
+ [{<<"vhost">>, longstr, VHost},
+ {<<"connection">>, longstr, ConnName},
+ {<<"channel">>, signedint, ChannelNum},
+ {<<"user">>, longstr, Username},
+ {<<"routed_queues">>, array,
+ [{longstr, QName#resource.name} || QName <- QNames]}]).
+
+-spec tap_out(rabbit_amqqueue:qmsg(), binary(),
+ rabbit_channel:channel_number(),
+ rabbit_types:username(), state()) -> 'ok'.
+
+tap_out(_Msg, _ConnName, _ChannelNum, _Username, none) -> ok;
+tap_out({#resource{name = QName, virtual_host = VHost},
+ _QPid, _QMsgId, Redelivered, Msg},
+ ConnName, ChannelNum, Username, TraceX) ->
+ RedeliveredNum = case Redelivered of true -> 1; false -> 0 end,
+ trace(TraceX, Msg, <<"deliver">>, QName,
+ [{<<"redelivered">>, signedint, RedeliveredNum},
+ {<<"vhost">>, longstr, VHost},
+ {<<"connection">>, longstr, ConnName},
+ {<<"channel">>, signedint, ChannelNum},
+ {<<"user">>, longstr, Username}]).
+
+%%----------------------------------------------------------------------------
+
+-spec start(rabbit_types:vhost()) -> 'ok'.
+
+start(VHost) ->
+ rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]),
+ update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end).
+
+-spec stop(rabbit_types:vhost()) -> 'ok'.
+
+stop(VHost) ->
+ rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]),
+ update_config(fun (VHosts) -> VHosts -- [VHost] end).
+
+update_config(Fun) ->
+ {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS),
+ VHosts = Fun(VHosts0),
+ application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
+ rabbit_channel:refresh_config_local(),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+trace(#exchange{name = Name}, #basic_message{exchange_name = Name},
+ _RKPrefix, _RKSuffix, _Extra) ->
+ ok;
+trace(X, Msg = #basic_message{content = #content{payload_fragments_rev = PFR}},
+ RKPrefix, RKSuffix, Extra) ->
+ ok = rabbit_basic:publish(
+ X, <<RKPrefix/binary, ".", RKSuffix/binary>>,
+ #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, PFR),
+ ok.
+
+msg_to_table(#basic_message{exchange_name = #resource{name = XName},
+ routing_keys = RoutingKeys,
+ content = Content}) ->
+ #content{properties = Props} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ {PropsTable, _Ix} =
+ lists:foldl(fun (K, {L, Ix}) ->
+ V = element(Ix, Props),
+ NewL = case V of
+ undefined -> L;
+ _ -> [{a2b(K), type(V), V} | L]
+ end,
+ {NewL, Ix + 1}
+ end, {[], 2}, record_info(fields, 'P_basic')),
+ [{<<"exchange_name">>, longstr, XName},
+ {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]},
+ {<<"properties">>, table, PropsTable},
+ {<<"node">>, longstr, a2b(node())}].
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+type(V) when is_list(V) -> table;
+type(V) when is_integer(V) -> signedint;
+type(_V) -> longstr.
diff --git a/deps/rabbit/src/rabbit_tracking.erl b/deps/rabbit/src/rabbit_tracking.erl
new file mode 100644
index 0000000000..a124d20226
--- /dev/null
+++ b/deps/rabbit/src/rabbit_tracking.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracking).
+
+%% Common behaviour and processing functions for tracking components
+%%
+%% See in use:
+%% * rabbit_connection_tracking
+%% * rabbit_channel_tracking
+
+-callback boot() -> ok.
+-callback update_tracked(term()) -> ok.
+-callback handle_cast(term()) -> ok.
+-callback register_tracked(
+ rabbit_types:tracked_connection() |
+ rabbit_types:tracked_channel()) -> 'ok'.
+-callback unregister_tracked(
+ rabbit_types:tracked_connection_id() |
+ rabbit_types:tracked_channel_id()) -> 'ok'.
+-callback count_tracked_items_in(term()) -> non_neg_integer().
+-callback clear_tracking_tables() -> 'ok'.
+-callback shutdown_tracked_items(list(), term()) -> ok.
+
+-export([id/2, count_tracked_items/4, match_tracked_items/2,
+ clear_tracking_table/1, delete_tracking_table/3,
+ delete_tracked_entry/3]).
+
+%%----------------------------------------------------------------------------
+
+-spec id(atom(), term()) ->
+ rabbit_types:tracked_connection_id() | rabbit_types:tracked_channel_id().
+
+id(Node, Name) -> {Node, Name}.
+
+-spec count_tracked_items(function(), integer(), term(), string()) ->
+ non_neg_integer().
+
+count_tracked_items(TableNameFun, CountRecPosition, Key, ContextMsg) ->
+ lists:foldl(fun (Node, Acc) ->
+ Tab = TableNameFun(Node),
+ try
+ N = case mnesia:dirty_read(Tab, Key) of
+ [] -> 0;
+ [Val] ->
+ element(CountRecPosition, Val)
+ end,
+ Acc + N
+ catch _:Err ->
+ rabbit_log:error(
+ "Failed to fetch number of ~p ~p on node ~p:~n~p~n",
+ [ContextMsg, Key, Node, Err]),
+ Acc
+ end
+ end, 0, rabbit_nodes:all_running()).
+
+-spec match_tracked_items(function(), tuple()) -> term().
+
+match_tracked_items(TableNameFun, MatchSpec) ->
+ lists:foldl(
+ fun (Node, Acc) ->
+ Tab = TableNameFun(Node),
+ Acc ++ mnesia:dirty_match_object(
+ Tab,
+ MatchSpec)
+ end, [], rabbit_nodes:all_running()).
+
+-spec clear_tracking_table(atom()) -> ok.
+
+clear_tracking_table(TableName) ->
+ case mnesia:clear_table(TableName) of
+ {atomic, ok} -> ok;
+ {aborted, _} -> ok
+ end.
+
+-spec delete_tracking_table(atom(), node(), string()) -> ok.
+
+delete_tracking_table(TableName, Node, ContextMsg) ->
+ case mnesia:delete_table(TableName) of
+ {atomic, ok} -> ok;
+ {aborted, {no_exists, _}} -> ok;
+ {aborted, Error} ->
+ rabbit_log:error("Failed to delete a ~p table for node ~p: ~p",
+ [ContextMsg, Node, Error]),
+ ok
+ end.
+
+-spec delete_tracked_entry({atom(), atom(), list()}, function(), term()) -> ok.
+
+delete_tracked_entry(_ExistsCheckSpec = {M, F, A}, TableNameFun, Key) ->
+ ClusterNodes = rabbit_nodes:all_running(),
+ ExistsInCluster =
+ lists:any(fun(Node) -> rpc:call(Node, M, F, A) end, ClusterNodes),
+ case ExistsInCluster of
+ false ->
+ [mnesia:dirty_delete(TableNameFun(Node), Key) || Node <- ClusterNodes];
+ true ->
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_upgrade.erl b/deps/rabbit/src/rabbit_upgrade.erl
new file mode 100644
index 0000000000..b1b128fecc
--- /dev/null
+++ b/deps/rabbit/src/rabbit_upgrade.erl
@@ -0,0 +1,314 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_upgrade).
+
+-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0,
+ maybe_migrate_queues_to_per_vhost_storage/0,
+ nodes_running/1, secondary_upgrade/1]).
+
+-include("rabbit.hrl").
+
+-define(VERSION_FILENAME, "schema_version").
+-define(LOCK_FILENAME, "schema_upgrade_lock").
+
+%% -------------------------------------------------------------------
+
+%% The upgrade logic is quite involved, due to the existence of
+%% clusters.
+%%
+%% Firstly, we have two different types of upgrades to do: Mnesia and
+%% everything else. Mnesia upgrades must only be done by one node in
+%% the cluster (we treat a non-clustered node as a single-node
+%% cluster). This is the primary upgrader. The other upgrades need to
+%% be done by all nodes.
+%%
+%% The primary upgrader has to start first (and do its Mnesia
+%% upgrades). Secondary upgraders need to reset their Mnesia database
+%% and then rejoin the cluster. They can't do the Mnesia upgrades as
+%% well and then merge databases since the cookie for each table will
+%% end up different and the merge will fail.
+%%
+%% This in turn means that we need to determine whether we are the
+%% primary or secondary upgrader *before* Mnesia comes up. If we
+%% didn't then the secondary upgrader would try to start Mnesia, and
+%% either hang waiting for a node which is not yet up, or fail since
+%% its schema differs from the other nodes in the cluster.
+%%
+%% Also, the primary upgrader needs to start Mnesia to do its
+%% upgrades, but needs to forcibly load tables rather than wait for
+%% them (in case it was not the last node to shut down, in which case
+%% it would wait forever).
+%%
+%% This in turn means that maybe_upgrade_mnesia/0 has to be patched
+%% into the boot process by prelaunch before the mnesia application is
+%% started. By the time Mnesia is started the upgrades have happened
+%% (on the primary), or Mnesia has been reset (on the secondary) and
+%% rabbit_mnesia:init_db_unchecked/2 can then make the node rejoin the cluster
+%% in the normal way.
+%%
+%% The non-mnesia upgrades are then triggered by
+%% rabbit_mnesia:init_db_unchecked/2. Of course, it's possible for a given
+%% upgrade process to only require Mnesia upgrades, or only require
+%% non-Mnesia upgrades. In the latter case no Mnesia resets and
+%% reclusterings occur.
+%%
+%% The primary upgrader needs to be a disc node. Ideally we would like
+%% it to be the last disc node to shut down (since otherwise there's a
+%% risk of data loss). On each node we therefore record the disc nodes
+%% that were still running when we shut down. A disc node that knows
+%% other nodes were up when it shut down, or a ram node, will refuse
+%% to be the primary upgrader, and will thus not start when upgrades
+%% are needed.
+%%
+%% However, this is racy if several nodes are shut down at once. Since
+%% rabbit records the running nodes, and shuts down before mnesia, the
+%% race manifests as all disc nodes thinking they are not the primary
+%% upgrader. Therefore the user can remove the record of the last disc
+%% node to shut down to get things going again. This may lose any
+%% mnesia changes that happened after the node chosen as the primary
+%% upgrader was shut down.
+
+%% -------------------------------------------------------------------
+
+ensure_backup_taken() ->
+ case filelib:is_file(lock_filename()) of
+ false -> case filelib:is_dir(backup_dir()) of
+ false -> ok = take_backup();
+ _ -> ok
+ end;
+ true ->
+ rabbit_log:error("Found lock file at ~s.
+ Either previous upgrade is in progress or has failed.
+ Database backup path: ~s",
+ [lock_filename(), backup_dir()]),
+ throw({error, previous_upgrade_failed})
+ end.
+
+take_backup() ->
+ BackupDir = backup_dir(),
+ info("upgrades: Backing up mnesia dir to ~p~n", [BackupDir]),
+ case rabbit_mnesia:copy_db(BackupDir) of
+ ok -> info("upgrades: Mnesia dir backed up to ~p~n",
+ [BackupDir]);
+ {error, E} -> throw({could_not_back_up_mnesia_dir, E, BackupDir})
+ end.
+
+ensure_backup_removed() ->
+ case filelib:is_dir(backup_dir()) of
+ true -> ok = remove_backup();
+ _ -> ok
+ end.
+
+remove_backup() ->
+ ok = rabbit_file:recursive_delete([backup_dir()]),
+ info("upgrades: Mnesia backup removed~n", []).
+
+-spec maybe_upgrade_mnesia() -> 'ok'.
+
+maybe_upgrade_mnesia() ->
+ AllNodes = rabbit_mnesia:cluster_nodes(all),
+ ok = rabbit_mnesia_rename:maybe_finish(AllNodes),
+ %% Mnesia upgrade is the first upgrade scope,
+ %% so we should create a backup here if there are any upgrades
+ case rabbit_version:all_upgrades_required([mnesia, local, message_store]) of
+ {error, starting_from_scratch} ->
+ ok;
+ {error, version_not_available} ->
+ case AllNodes of
+ [] -> die("Cluster upgrade needed but upgrading from "
+ "< 2.1.1.~nUnfortunately you will need to "
+ "rebuild the cluster.", []);
+ _ -> ok
+ end;
+ {error, _} = Err ->
+ throw(Err);
+ {ok, []} ->
+ ok;
+ {ok, Upgrades} ->
+ ensure_backup_taken(),
+ run_mnesia_upgrades(proplists:get_value(mnesia, Upgrades, []),
+ AllNodes)
+ end.
+
+run_mnesia_upgrades([], _) -> ok;
+run_mnesia_upgrades(Upgrades, AllNodes) ->
+ case upgrade_mode(AllNodes) of
+ primary -> primary_upgrade(Upgrades, AllNodes);
+ secondary -> secondary_upgrade(AllNodes)
+ end.
+
+upgrade_mode(AllNodes) ->
+ case nodes_running(AllNodes) of
+ [] ->
+ AfterUs = rabbit_nodes:all_running() -- [node()],
+ case {node_type_legacy(), AfterUs} of
+ {disc, []} ->
+ primary;
+ {disc, _} ->
+ Filename = rabbit_node_monitor:running_nodes_filename(),
+ die("Cluster upgrade needed but other disc nodes shut "
+ "down after this one.~nPlease first start the last "
+ "disc node to shut down.~n~nNote: if several disc "
+ "nodes were shut down simultaneously they may "
+ "all~nshow this message. In which case, remove "
+ "the lock file on one of them and~nstart that node. "
+ "The lock file on this node is:~n~n ~s ", [Filename]);
+ {ram, _} ->
+ die("Cluster upgrade needed but this is a ram node.~n"
+ "Please first start the last disc node to shut down.",
+ [])
+ end;
+ [Another|_] ->
+ MyVersion = rabbit_version:desired_for_scope(mnesia),
+ case rpc:call(Another, rabbit_version, desired_for_scope,
+ [mnesia]) of
+ {badrpc, {'EXIT', {undef, _}}} ->
+ die_because_cluster_upgrade_needed(unknown_old_version,
+ MyVersion);
+ {badrpc, Reason} ->
+ die_because_cluster_upgrade_needed({unknown, Reason},
+ MyVersion);
+ CV -> case rabbit_version:matches(
+ MyVersion, CV) of
+ true -> secondary;
+ false -> die_because_cluster_upgrade_needed(
+ CV, MyVersion)
+ end
+ end
+ end.
+
+-spec die_because_cluster_upgrade_needed(any(), any()) -> no_return().
+
+die_because_cluster_upgrade_needed(ClusterVersion, MyVersion) ->
+ %% The other node(s) are running an
+ %% unexpected version.
+ die("Cluster upgrade needed but other nodes are "
+ "running ~p~nand I want ~p",
+ [ClusterVersion, MyVersion]).
+
+-spec die(string(), list()) -> no_return().
+
+die(Msg, Args) ->
+ %% We don't throw or exit here since that gets thrown
+ %% straight out into do_boot, generating an erl_crash.dump
+ %% and displaying any error message in a confusing way.
+ rabbit_log:error(Msg, Args),
+ Str = rabbit_misc:format(
+ "~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args),
+ io:format(Str),
+ error_logger:logfile(close),
+ case application:get_env(rabbit, halt_on_upgrade_failure) of
+ {ok, false} -> throw({upgrade_error, Str});
+ _ -> halt(1) %% i.e. true or undefined
+ end.
+
+primary_upgrade(Upgrades, Nodes) ->
+ Others = Nodes -- [node()],
+ ok = apply_upgrades(
+ mnesia,
+ Upgrades,
+ fun () ->
+ rabbit_table:force_load(),
+ case Others of
+ [] -> ok;
+ _ -> info("mnesia upgrades: Breaking cluster~n", []),
+ [{atomic, ok} = mnesia:del_table_copy(schema, Node)
+ || Node <- Others]
+ end
+ end),
+ ok.
+
+secondary_upgrade(AllNodes) ->
+ %% must do this before we wipe out schema
+ NodeType = node_type_legacy(),
+ rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
+ cannot_delete_schema),
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ ok = rabbit_mnesia:init_db_unchecked(AllNodes, NodeType),
+ ok = rabbit_version:record_desired_for_scope(mnesia),
+ ok.
+
+nodes_running(Nodes) ->
+ [N || N <- Nodes, rabbit:is_running(N)].
+
+%% -------------------------------------------------------------------
+
+-spec maybe_upgrade_local() ->
+ 'ok' |
+ 'version_not_available' |
+ 'starting_from_scratch'.
+
+maybe_upgrade_local() ->
+ case rabbit_version:upgrades_required(local) of
+ {error, version_not_available} -> version_not_available;
+ {error, starting_from_scratch} -> starting_from_scratch;
+ {error, _} = Err -> throw(Err);
+ {ok, []} -> ensure_backup_removed(),
+ ok;
+ {ok, Upgrades} -> mnesia:stop(),
+ ok = apply_upgrades(local, Upgrades,
+ fun () -> ok end),
+ ok
+ end.
+
+%% -------------------------------------------------------------------
+
+maybe_migrate_queues_to_per_vhost_storage() ->
+ Result = case rabbit_version:upgrades_required(message_store) of
+ {error, version_not_available} -> version_not_available;
+ {error, starting_from_scratch} ->
+ starting_from_scratch;
+ {error, _} = Err -> throw(Err);
+ {ok, []} -> ok;
+ {ok, Upgrades} -> apply_upgrades(message_store,
+ Upgrades,
+ fun() -> ok end),
+ ok
+ end,
+ %% Message store upgrades should be
+ %% the last group.
+ %% Backup can be deleted here.
+ ensure_backup_removed(),
+ Result.
+
+%% -------------------------------------------------------------------
+
+apply_upgrades(Scope, Upgrades, Fun) ->
+ ok = rabbit_file:lock_file(lock_filename()),
+ info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
+ rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
+ Fun(),
+ [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades],
+ info("~s upgrades: All upgrades applied successfully~n", [Scope]),
+ ok = rabbit_version:record_desired_for_scope(Scope),
+ ok = file:delete(lock_filename()).
+
+apply_upgrade(Scope, {M, F}) ->
+ info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]),
+ ok = apply(M, F, []).
+
+%% -------------------------------------------------------------------
+
+dir() -> rabbit_mnesia:dir().
+
+lock_filename() -> lock_filename(dir()).
+lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME).
+backup_dir() -> dir() ++ "-upgrade-backup".
+
+node_type_legacy() ->
+ %% This is pretty ugly but we can't start Mnesia and ask it (will
+ %% hang), we can't look at the config file (may not include us
+ %% even if we're a disc node). We also can't use
+ %% rabbit_mnesia:node_type/0 because that will give false
+ %% positives on Rabbit up to 2.5.1.
+ case filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")) of
+ true -> disc;
+ false -> ram
+ end.
+
+info(Msg, Args) -> rabbit_log:info(Msg, Args).
diff --git a/deps/rabbit/src/rabbit_upgrade_functions.erl b/deps/rabbit/src/rabbit_upgrade_functions.erl
new file mode 100644
index 0000000000..59417c72bb
--- /dev/null
+++ b/deps/rabbit/src/rabbit_upgrade_functions.erl
@@ -0,0 +1,662 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_upgrade_functions).
+
+%% If you are tempted to add include("rabbit.hrl"). here, don't. Using record
+%% defs here leads to pain later.
+
+-compile([nowarn_export_all, export_all]).
+
+-rabbit_upgrade({remove_user_scope, mnesia, []}).
+-rabbit_upgrade({hash_passwords, mnesia, []}).
+-rabbit_upgrade({add_ip_to_listener, mnesia, []}).
+-rabbit_upgrade({add_opts_to_listener, mnesia, [add_ip_to_listener]}).
+-rabbit_upgrade({internal_exchanges, mnesia, []}).
+-rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}).
+-rabbit_upgrade({topic_trie, mnesia, []}).
+-rabbit_upgrade({semi_durable_route, mnesia, []}).
+-rabbit_upgrade({exchange_event_serial, mnesia, []}).
+-rabbit_upgrade({trace_exchanges, mnesia, [internal_exchanges]}).
+-rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}).
+-rabbit_upgrade({ha_mirrors, mnesia, []}).
+-rabbit_upgrade({gm, mnesia, []}).
+-rabbit_upgrade({exchange_scratch, mnesia, [trace_exchanges]}).
+-rabbit_upgrade({mirrored_supervisor, mnesia, []}).
+-rabbit_upgrade({topic_trie_node, mnesia, []}).
+-rabbit_upgrade({runtime_parameters, mnesia, []}).
+-rabbit_upgrade({exchange_scratches, mnesia, [exchange_scratch]}).
+-rabbit_upgrade({policy, mnesia,
+ [exchange_scratches, ha_mirrors]}).
+-rabbit_upgrade({sync_slave_pids, mnesia, [policy]}).
+-rabbit_upgrade({no_mirror_nodes, mnesia, [sync_slave_pids]}).
+-rabbit_upgrade({gm_pids, mnesia, [no_mirror_nodes]}).
+-rabbit_upgrade({exchange_decorators, mnesia, [policy]}).
+-rabbit_upgrade({policy_apply_to, mnesia, [runtime_parameters]}).
+-rabbit_upgrade({queue_decorators, mnesia, [gm_pids]}).
+-rabbit_upgrade({internal_system_x, mnesia, [exchange_decorators]}).
+-rabbit_upgrade({cluster_name, mnesia, [runtime_parameters]}).
+-rabbit_upgrade({down_slave_nodes, mnesia, [queue_decorators]}).
+-rabbit_upgrade({queue_state, mnesia, [down_slave_nodes]}).
+-rabbit_upgrade({recoverable_slaves, mnesia, [queue_state]}).
+-rabbit_upgrade({policy_version, mnesia, [recoverable_slaves]}).
+-rabbit_upgrade({slave_pids_pending_shutdown, mnesia, [policy_version]}).
+-rabbit_upgrade({user_password_hashing, mnesia, [hash_passwords]}).
+-rabbit_upgrade({operator_policies, mnesia, [slave_pids_pending_shutdown, internal_system_x]}).
+-rabbit_upgrade({vhost_limits, mnesia, []}).
+-rabbit_upgrade({queue_vhost_field, mnesia, [operator_policies]}).
+-rabbit_upgrade({topic_permission, mnesia, []}).
+-rabbit_upgrade({queue_options, mnesia, [queue_vhost_field]}).
+-rabbit_upgrade({exchange_options, mnesia, [operator_policies]}).
+
+%% -------------------------------------------------------------------
+
+%% replaces vhost.dummy (used to avoid having a single-field record
+%% which Mnesia doesn't like) with vhost.limits (which is actually
+%% used)
+
+-spec vhost_limits() -> 'ok'.
+
+vhost_limits() ->
+ transform(
+ rabbit_vhost,
+ fun ({vhost, VHost, _Dummy}) ->
+ {vhost, VHost, undefined}
+ end,
+ [virtual_host, limits]).
+
+%% It's a bad idea to use records or record_info here, even for the
+%% destination form. Because in the future, the destination form of
+%% your current transform may not match the record any more, and it
+%% would be messy to have to go back and fix old transforms at that
+%% point.
+
+-spec remove_user_scope() -> 'ok'.
+
+remove_user_scope() ->
+ transform(
+ rabbit_user_permission,
+ fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) ->
+ {user_permission, UV, {permission, Conf, Write, Read}}
+ end,
+ [user_vhost, permission]).
+
+%% this is an early migration that hashes passwords using MD5,
+%% only relevant to those migrating from 2.1.1.
+%% all users created after in 3.6.0 or later will use SHA-256 (unless configured
+%% otherwise)
+
+-spec hash_passwords() -> 'ok'.
+
+hash_passwords() ->
+ transform(
+ rabbit_user,
+ fun ({user, Username, Password, IsAdmin}) ->
+ Hash = rabbit_auth_backend_internal:hash_password(rabbit_password_hashing_md5, Password),
+ {user, Username, Hash, IsAdmin}
+ end,
+ [username, password_hash, is_admin]).
+
+-spec add_ip_to_listener() -> 'ok'.
+
+add_ip_to_listener() ->
+ transform(
+ rabbit_listener,
+ fun ({listener, Node, Protocol, Host, Port}) ->
+ {listener, Node, Protocol, Host, {0,0,0,0}, Port}
+ end,
+ [node, protocol, host, ip_address, port]).
+
+-spec add_opts_to_listener() -> 'ok'.
+
+add_opts_to_listener() ->
+ transform(
+ rabbit_listener,
+ fun ({listener, Node, Protocol, Host, IP, Port}) ->
+ {listener, Node, Protocol, Host, IP, Port, []}
+ end,
+ [node, protocol, host, ip_address, port, opts]).
+
+-spec internal_exchanges() -> 'ok'.
+
+internal_exchanges() ->
+ Tables = [rabbit_exchange, rabbit_durable_exchange],
+ AddInternalFun =
+ fun ({exchange, Name, Type, Durable, AutoDelete, Args}) ->
+ {exchange, Name, Type, Durable, AutoDelete, false, Args}
+ end,
+ [ ok = transform(T,
+ AddInternalFun,
+ [name, type, durable, auto_delete, internal, arguments])
+ || T <- Tables ],
+ ok.
+
+-spec user_to_internal_user() -> 'ok'.
+
+user_to_internal_user() ->
+ transform(
+ rabbit_user,
+ fun({user, Username, PasswordHash, IsAdmin}) ->
+ {internal_user, Username, PasswordHash, IsAdmin}
+ end,
+ [username, password_hash, is_admin], internal_user).
+
+-spec topic_trie() -> 'ok'.
+
+topic_trie() ->
+ create(rabbit_topic_trie_edge, [{record_name, topic_trie_edge},
+ {attributes, [trie_edge, node_id]},
+ {type, ordered_set}]),
+ create(rabbit_topic_trie_binding, [{record_name, topic_trie_binding},
+ {attributes, [trie_binding, value]},
+ {type, ordered_set}]).
+
+-spec semi_durable_route() -> 'ok'.
+
+semi_durable_route() ->
+ create(rabbit_semi_durable_route, [{record_name, route},
+ {attributes, [binding, value]}]).
+
+-spec exchange_event_serial() -> 'ok'.
+
+exchange_event_serial() ->
+ create(rabbit_exchange_serial, [{record_name, exchange_serial},
+ {attributes, [name, next]}]).
+
+-spec trace_exchanges() -> 'ok'.
+
+trace_exchanges() ->
+ [declare_exchange(
+ rabbit_misc:r(VHost, exchange, <<"amq.rabbitmq.trace">>), topic) ||
+ VHost <- rabbit_vhost:list_names()],
+ ok.
+
+-spec user_admin_to_tags() -> 'ok'.
+
+user_admin_to_tags() ->
+ transform(
+ rabbit_user,
+ fun({internal_user, Username, PasswordHash, true}) ->
+ {internal_user, Username, PasswordHash, [administrator]};
+ ({internal_user, Username, PasswordHash, false}) ->
+ {internal_user, Username, PasswordHash, [management]}
+ end,
+ [username, password_hash, tags], internal_user).
+
+-spec ha_mirrors() -> 'ok'.
+
+ha_mirrors() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddMirrorPidsFun =
+ fun ({amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid}) ->
+ {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid,
+ [], undefined}
+ end,
+ [ ok = transform(T,
+ AddMirrorPidsFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, mirror_nodes])
+ || T <- Tables ],
+ ok.
+
+-spec gm() -> 'ok'.
+
+gm() ->
+ create(gm_group, [{record_name, gm_group},
+ {attributes, [name, version, members]}]).
+
+-spec exchange_scratch() -> 'ok'.
+
+exchange_scratch() ->
+ ok = exchange_scratch(rabbit_exchange),
+ ok = exchange_scratch(rabbit_durable_exchange).
+
+exchange_scratch(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratch]).
+
+-spec mirrored_supervisor() -> 'ok'.
+
+mirrored_supervisor() ->
+ create(mirrored_sup_childspec,
+ [{record_name, mirrored_sup_childspec},
+ {attributes, [key, mirroring_pid, childspec]}]).
+
+-spec topic_trie_node() -> 'ok'.
+
+topic_trie_node() ->
+ create(rabbit_topic_trie_node,
+ [{record_name, topic_trie_node},
+ {attributes, [trie_node, edge_count, binding_count]},
+ {type, ordered_set}]).
+
+-spec runtime_parameters() -> 'ok'.
+
+runtime_parameters() ->
+ create(rabbit_runtime_parameters,
+ [{record_name, runtime_parameters},
+ {attributes, [key, value]},
+ {disc_copies, [node()]}]).
+
+exchange_scratches() ->
+ ok = exchange_scratches(rabbit_exchange),
+ ok = exchange_scratches(rabbit_durable_exchange).
+
+exchange_scratches(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type = <<"x-federation">>, Dur, AutoDel, Int, Args,
+ Scratch}) ->
+ Scratches = orddict:store(federation, Scratch, orddict:new()),
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches};
+ %% We assert here that nothing else uses the scratch mechanism ATM
+ ({exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches]).
+
+-spec policy() -> 'ok'.
+
+policy() ->
+ ok = exchange_policy(rabbit_exchange),
+ ok = exchange_policy(rabbit_durable_exchange),
+ ok = queue_policy(rabbit_queue),
+ ok = queue_policy(rabbit_durable_queue).
+
+exchange_policy(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
+ undefined}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches,
+ policy]).
+
+queue_policy(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes}) ->
+ {amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes,
+ undefined}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid,
+ slave_pids, mirror_nodes, policy]).
+
+-spec sync_slave_pids() -> 'ok'.
+
+sync_slave_pids() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddSyncSlavesFun =
+ fun ({amqqueue, N, D, AD, Excl, Args, Pid, SPids, MNodes, Pol}) ->
+ {amqqueue, N, D, AD, Excl, Args, Pid, SPids, [], MNodes, Pol}
+ end,
+ [ok = transform(T, AddSyncSlavesFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, mirror_nodes, policy])
+ || T <- Tables],
+ ok.
+
+-spec no_mirror_nodes() -> 'ok'.
+
+no_mirror_nodes() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ RemoveMirrorNodesFun =
+ fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, _MNodes, Pol}) ->
+ {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}
+ end,
+ [ok = transform(T, RemoveMirrorNodesFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, policy])
+ || T <- Tables],
+ ok.
+
+-spec gm_pids() -> 'ok'.
+
+gm_pids() ->
+ Tables = [rabbit_queue, rabbit_durable_queue],
+ AddGMPidsFun =
+ fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}) ->
+ {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol, []}
+ end,
+ [ok = transform(T, AddGMPidsFun,
+ [name, durable, auto_delete, exclusive_owner, arguments,
+ pid, slave_pids, sync_slave_pids, policy, gm_pids])
+ || T <- Tables],
+ ok.
+
+-spec exchange_decorators() -> 'ok'.
+
+exchange_decorators() ->
+ ok = exchange_decorators(rabbit_exchange),
+ ok = exchange_decorators(rabbit_durable_exchange).
+
+exchange_decorators(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
+ Policy}) ->
+ {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches, Policy,
+ {[], []}}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ decorators]).
+
+-spec policy_apply_to() -> 'ok'.
+
+policy_apply_to() ->
+ transform(
+ rabbit_runtime_parameters,
+ fun ({runtime_parameters, Key = {_VHost, <<"policy">>, _Name}, Value}) ->
+ ApplyTo = apply_to(proplists:get_value(<<"definition">>, Value)),
+ {runtime_parameters, Key, [{<<"apply-to">>, ApplyTo} | Value]};
+ ({runtime_parameters, Key, Value}) ->
+ {runtime_parameters, Key, Value}
+ end,
+ [key, value]),
+ rabbit_policy:invalidate(),
+ ok.
+
+apply_to(Def) ->
+ case [proplists:get_value(K, Def) ||
+ K <- [<<"federation-upstream-set">>, <<"ha-mode">>]] of
+ [undefined, undefined] -> <<"all">>;
+ [_, undefined] -> <<"exchanges">>;
+ [undefined, _] -> <<"queues">>;
+ [_, _] -> <<"all">>
+ end.
+
+-spec queue_decorators() -> 'ok'.
+
+queue_decorators() ->
+ ok = queue_decorators(rabbit_queue),
+ ok = queue_decorators(rabbit_durable_queue).
+
+queue_decorators(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids, []}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, policy, gm_pids, decorators]).
+
+-spec internal_system_x() -> 'ok'.
+
+internal_system_x() ->
+ transform(
+ rabbit_durable_exchange,
+ fun ({exchange, Name = {resource, _, _, <<"amq.rabbitmq.", _/binary>>},
+ Type, Dur, AutoDel, _Int, Args, Scratches, Policy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, true, Args, Scratches,
+ Policy, Decorators};
+ (X) ->
+ X
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ decorators]).
+
+-spec cluster_name() -> 'ok'.
+
+cluster_name() ->
+ {atomic, ok} = mnesia:transaction(fun cluster_name_tx/0),
+ ok.
+
+cluster_name_tx() ->
+ %% mnesia:transform_table/4 does not let us delete records
+ T = rabbit_runtime_parameters,
+ mnesia:write_lock_table(T),
+ Ks = [K || {_VHost, <<"federation">>, <<"local-nodename">>} = K
+ <- mnesia:all_keys(T)],
+ case Ks of
+ [] -> ok;
+ [K|Tl] -> [{runtime_parameters, _K, Name}] = mnesia:read(T, K, write),
+ R = {runtime_parameters, cluster_name, Name},
+ mnesia:write(T, R, write),
+ case Tl of
+ [] -> ok;
+ _ -> {VHost, _, _} = K,
+ error_logger:warning_msg(
+ "Multiple local-nodenames found, picking '~s' "
+ "from '~s' for cluster name~n", [Name, VHost])
+ end
+ end,
+ [mnesia:delete(T, K, write) || K <- Ks],
+ ok.
+
+-spec down_slave_nodes() -> 'ok'.
+
+down_slave_nodes() ->
+ ok = down_slave_nodes(rabbit_queue),
+ ok = down_slave_nodes(rabbit_durable_queue).
+
+down_slave_nodes(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, Policy, GmPids, Decorators}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, [], Policy, GmPids, Decorators}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, down_slave_nodes, policy, gm_pids, decorators]).
+
+-spec queue_state() -> 'ok'.
+
+queue_state() ->
+ ok = queue_state(rabbit_queue),
+ ok = queue_state(rabbit_durable_queue).
+
+queue_state(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ live}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, down_slave_nodes, policy, gm_pids, decorators, state]).
+
+-spec recoverable_slaves() -> 'ok'.
+
+recoverable_slaves() ->
+ ok = recoverable_slaves(rabbit_queue),
+ ok = recoverable_slaves(rabbit_durable_queue).
+
+recoverable_slaves(Table) ->
+ transform(
+ Table, fun (Q) -> Q end, %% Don't change shape of record
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators,
+ state]).
+
+policy_version() ->
+ ok = policy_version(rabbit_queue),
+ ok = policy_version(rabbit_durable_queue).
+
+policy_version(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, 0}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state,
+ policy_version]).
+
+slave_pids_pending_shutdown() ->
+ ok = slave_pids_pending_shutdown(rabbit_queue),
+ ok = slave_pids_pending_shutdown(rabbit_durable_queue).
+
+slave_pids_pending_shutdown(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, PolicyVersion}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, PolicyVersion, []}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state,
+ policy_version, slave_pids_pending_shutdown]).
+
+-spec operator_policies() -> 'ok'.
+
+operator_policies() ->
+ ok = exchange_operator_policies(rabbit_exchange),
+ ok = exchange_operator_policies(rabbit_durable_exchange),
+ ok = queue_operator_policies(rabbit_queue),
+ ok = queue_operator_policies(rabbit_durable_queue).
+
+exchange_operator_policies(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, undefined, Decorators}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ operator_policy, decorators]).
+
+queue_operator_policies(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, undefined, GmPids,
+ Decorators, State, PolicyVersion, SlavePidsPendingShutdown}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, operator_policy,
+ gm_pids, decorators, state, policy_version, slave_pids_pending_shutdown]).
+
+-spec queue_vhost_field() -> 'ok'.
+
+queue_vhost_field() ->
+ ok = queue_vhost_field(rabbit_queue),
+ ok = queue_vhost_field(rabbit_durable_queue),
+ {atomic, ok} = mnesia:add_table_index(rabbit_queue, vhost),
+ {atomic, ok} = mnesia:add_table_index(rabbit_durable_queue, vhost),
+ ok.
+
+queue_vhost_field(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name = {resource, VHost, queue, _QName}, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown, VHost}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, operator_policy,
+ gm_pids, decorators, state, policy_version, slave_pids_pending_shutdown, vhost]).
+
+-spec queue_options() -> 'ok'.
+
+queue_options() ->
+ ok = queue_options(rabbit_queue),
+ ok = queue_options(rabbit_durable_queue),
+ ok.
+
+queue_options(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown, VHost}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, OperatorPolicy, GmPids, Decorators,
+ State, PolicyVersion, SlavePidsPendingShutdown, VHost, #{}}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, operator_policy,
+ gm_pids, decorators, state, policy_version, slave_pids_pending_shutdown, vhost, options]).
+
+%% Prior to 3.6.0, passwords were hashed using MD5, this populates
+%% existing records with said default. Users created with 3.6.0+ will
+%% have internal_user.hashing_algorithm populated by the internal
+%% authn backend.
+
+-spec user_password_hashing() -> 'ok'.
+
+user_password_hashing() ->
+ transform(
+ rabbit_user,
+ fun ({internal_user, Username, Hash, Tags}) ->
+ {internal_user, Username, Hash, Tags, rabbit_password_hashing_md5}
+ end,
+ [username, password_hash, tags, hashing_algorithm]).
+
+-spec topic_permission() -> 'ok'.
+topic_permission() ->
+ create(rabbit_topic_permission,
+ [{record_name, topic_permission},
+ {attributes, [topic_permission_key, permission]},
+ {disc_copies, [node()]}]).
+
+-spec exchange_options() -> 'ok'.
+
+exchange_options() ->
+ ok = exchange_options(rabbit_exchange),
+ ok = exchange_options(rabbit_durable_exchange).
+
+exchange_options(Table) ->
+ transform(
+ Table,
+ fun ({exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, OperatorPolicy, Decorators}) ->
+ {exchange, Name, Type, Dur, AutoDel, Internal,
+ Args, Scratches, Policy, OperatorPolicy, Decorators, #{}}
+ end,
+ [name, type, durable, auto_delete, internal, arguments, scratches, policy,
+ operator_policy, decorators, options]).
+
+%%--------------------------------------------------------------------
+
+transform(TableName, Fun, FieldList) ->
+ rabbit_table:wait([TableName]),
+ {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList),
+ ok.
+
+transform(TableName, Fun, FieldList, NewRecordName) ->
+ rabbit_table:wait([TableName]),
+ {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList,
+ NewRecordName),
+ ok.
+
+create(Tab, TabDef) ->
+ rabbit_log:debug("Will create a schema table named '~s'", [Tab]),
+ {atomic, ok} = mnesia:create_table(Tab, TabDef),
+ ok.
+
+%% Dumb replacement for rabbit_exchange:declare that does not require
+%% the exchange type registry or worker pool to be running by dint of
+%% not validating anything and assuming the exchange type does not
+%% require serialisation. NB: this assumes the
+%% pre-exchange-scratch-space format
+declare_exchange(XName, Type) ->
+ X = {exchange, XName, Type, true, false, false, []},
+ ok = mnesia:dirty_write(rabbit_durable_exchange, X).
diff --git a/deps/rabbit/src/rabbit_upgrade_preparation.erl b/deps/rabbit/src/rabbit_upgrade_preparation.erl
new file mode 100644
index 0000000000..fc1de24610
--- /dev/null
+++ b/deps/rabbit/src/rabbit_upgrade_preparation.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_upgrade_preparation).
+
+-export([await_online_quorum_plus_one/1, await_online_synchronised_mirrors/1]).
+
+%%
+%% API
+%%
+
+-define(SAMPLING_INTERVAL, 200).
+
+await_online_quorum_plus_one(Timeout) ->
+ Iterations = ceil(Timeout / ?SAMPLING_INTERVAL),
+ do_await_safe_online_quorum(Iterations).
+
+
+await_online_synchronised_mirrors(Timeout) ->
+ Iterations = ceil(Timeout / ?SAMPLING_INTERVAL),
+ do_await_online_synchronised_mirrors(Iterations).
+
+
+%%
+%% Implementation
+%%
+
+do_await_safe_online_quorum(0) ->
+ false;
+do_await_safe_online_quorum(IterationsLeft) ->
+ case rabbit_quorum_queue:list_with_minimum_quorum() of
+ [] -> true;
+ List when is_list(List) ->
+ timer:sleep(?SAMPLING_INTERVAL),
+ do_await_safe_online_quorum(IterationsLeft - 1)
+ end.
+
+
+do_await_online_synchronised_mirrors(0) ->
+ false;
+do_await_online_synchronised_mirrors(IterationsLeft) ->
+ case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors() of
+ [] -> true;
+ List when is_list(List) ->
+ timer:sleep(?SAMPLING_INTERVAL),
+ do_await_online_synchronised_mirrors(IterationsLeft - 1)
+ end.
diff --git a/deps/rabbit/src/rabbit_variable_queue.erl b/deps/rabbit/src/rabbit_variable_queue.erl
new file mode 100644
index 0000000000..cf6fa4a189
--- /dev/null
+++ b/deps/rabbit/src/rabbit_variable_queue.erl
@@ -0,0 +1,3015 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_variable_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, multiple_routing_keys/0, handle_info/2]).
+
+-export([start/2, stop/1]).
+
+%% exported for testing only
+-export([start_msg_store/3, stop_msg_store/1, init/6]).
+
+-export([move_messages_to_vhost_store/0]).
+
+-export([migrate_queue/3, migrate_message/3, get_per_vhost_store_client/2,
+ get_global_store_client/1, log_upgrade_verbose/1,
+ log_upgrade_verbose/2]).
+
+-include_lib("stdlib/include/qlc.hrl").
+
+-define(QUEUE_MIGRATION_BATCH_SIZE, 100).
+-define(EMPTY_START_FUN_STATE, {fun (ok) -> finished end, ok}).
+
+%%----------------------------------------------------------------------------
+%% Messages, and their position in the queue, can be in memory or on
+%% disk, or both. Persistent messages will have both message and
+%% position pushed to disk as soon as they arrive; transient messages
+%% can be written to disk (and thus both types can be evicted from
+%% memory) under memory pressure. The question of whether a message is
+%% in RAM and whether it is persistent are orthogonal.
+%%
+%% Messages are persisted using the queue index and the message
+%% store. Normally the queue index holds the position of the message
+%% *within this queue* along with a couple of small bits of metadata,
+%% while the message store holds the message itself (including headers
+%% and other properties).
+%%
+%% However, as an optimisation, small messages can be embedded
+%% directly in the queue index and bypass the message store
+%% altogether.
+%%
+%% Definitions:
+%%
+%% alpha: this is a message where both the message itself, and its
+%% position within the queue are held in RAM
+%%
+%% beta: this is a message where the message itself is only held on
+%% disk (if persisted to the message store) but its position
+%% within the queue is held in RAM.
+%%
+%% gamma: this is a message where the message itself is only held on
+%% disk, but its position is both in RAM and on disk.
+%%
+%% delta: this is a collection of messages, represented by a single
+%% term, where the messages and their position are only held on
+%% disk.
+%%
+%% Note that for persistent messages, the message and its position
+%% within the queue are always held on disk, *in addition* to being in
+%% one of the above classifications.
+%%
+%% Also note that within this code, the term gamma seldom
+%% appears. It's frequently the case that gammas are defined by betas
+%% who have had their queue position recorded on disk.
+%%
+%% In general, messages move q1 -> q2 -> delta -> q3 -> q4, though
+%% many of these steps are frequently skipped. q1 and q4 only hold
+%% alphas, q2 and q3 hold both betas and gammas. When a message
+%% arrives, its classification is determined. It is then added to the
+%% rightmost appropriate queue.
+%%
+%% If a new message is determined to be a beta or gamma, q1 is
+%% empty. If a new message is determined to be a delta, q1 and q2 are
+%% empty (and actually q4 too).
+%%
+%% When removing messages from a queue, if q4 is empty then q3 is read
+%% directly. If q3 becomes empty then the next segment's worth of
+%% messages from delta are read into q3, reducing the size of
+%% delta. If the queue is non empty, either q4 or q3 contain
+%% entries. It is never permitted for delta to hold all the messages
+%% in the queue.
+%%
+%% The duration indicated to us by the memory_monitor is used to
+%% calculate, given our current ingress and egress rates, how many
+%% messages we should hold in RAM (i.e. as alphas). We track the
+%% ingress and egress rates for both messages and pending acks and
+%% rates for both are considered when calculating the number of
+%% messages to hold in RAM. When we need to push alphas to betas or
+%% betas to gammas, we favour writing out messages that are further
+%% from the head of the queue. This minimises writes to disk, as the
+%% messages closer to the tail of the queue stay in the queue for
+%% longer, thus do not need to be replaced as quickly by sending other
+%% messages to disk.
+%%
+%% Whilst messages are pushed to disk and forgotten from RAM as soon
+%% as requested by a new setting of the queue RAM duration, the
+%% inverse is not true: we only load messages back into RAM as
+%% demanded as the queue is read from. Thus only publishes to the
+%% queue will take up available spare capacity.
+%%
+%% When we report our duration to the memory monitor, we calculate
+%% average ingress and egress rates over the last two samples, and
+%% then calculate our duration based on the sum of the ingress and
+%% egress rates. More than two samples could be used, but it's a
+%% balance between responding quickly enough to changes in
+%% producers/consumers versus ignoring temporary blips. The problem
+%% with temporary blips is that with just a few queues, they can have
+%% substantial impact on the calculation of the average duration and
+%% hence cause unnecessary I/O. Another alternative is to increase the
+%% amqqueue_process:RAM_DURATION_UPDATE_PERIOD to beyond 5
+%% seconds. However, that then runs the risk of being too slow to
+%% inform the memory monitor of changes. Thus a 5 second interval,
+%% plus a rolling average over the last two samples seems to work
+%% well in practice.
+%%
+%% The sum of the ingress and egress rates is used because the egress
+%% rate alone is not sufficient. Adding in the ingress rate means that
+%% queues which are being flooded by messages are given more memory,
+%% resulting in them being able to process the messages faster (by
+%% doing less I/O, or at least deferring it) and thus helping keep
+%% their mailboxes empty and thus the queue as a whole is more
+%% responsive. If such a queue also has fast but previously idle
+%% consumers, the consumer can then start to be driven as fast as it
+%% can go, whereas if only egress rate was being used, the incoming
+%% messages may have to be written to disk and then read back in,
+%% resulting in the hard disk being a bottleneck in driving the
+%% consumers. Generally, we want to give Rabbit every chance of
+%% getting rid of messages as fast as possible and remaining
+%% responsive, and using only the egress rate impacts that goal.
+%%
+%% Once the queue has more alphas than the target_ram_count, the
+%% surplus must be converted to betas, if not gammas, if not rolled
+%% into delta. The conditions under which these transitions occur
+%% reflect the conflicting goals of minimising RAM cost per msg, and
+%% minimising CPU cost per msg. Once the msg has become a beta, its
+%% payload is no longer in RAM, thus a read from the msg_store must
+%% occur before the msg can be delivered, but the RAM cost of a beta
+%% is the same as a gamma, so converting a beta to gamma will not free
+%% up any further RAM. To reduce the RAM cost further, the gamma must
+%% be rolled into delta. Whilst recovering a beta or a gamma to an
+%% alpha requires only one disk read (from the msg_store), recovering
+%% a msg from within delta will require two reads (queue_index and
+%% then msg_store). But delta has a near-0 per-msg RAM cost. So the
+%% conflict is between using delta more, which will free up more
+%% memory, but require additional CPU and disk ops, versus using delta
+%% less and gammas and betas more, which will cost more memory, but
+%% require fewer disk ops and less CPU overhead.
+%%
+%% In the case of a persistent msg published to a durable queue, the
+%% msg is immediately written to the msg_store and queue_index. If
+%% then additionally converted from an alpha, it'll immediately go to
+%% a gamma (as it's already in queue_index), and cannot exist as a
+%% beta. Thus a durable queue with a mixture of persistent and
+%% transient msgs in it which has more messages than permitted by the
+%% target_ram_count may contain an interspersed mixture of betas and
+%% gammas in q2 and q3.
+%%
+%% There is then a ratio that controls how many betas and gammas there
+%% can be. This is based on the target_ram_count and thus expresses
+%% the fact that as the number of permitted alphas in the queue falls,
+%% so should the number of betas and gammas fall (i.e. delta
+%% grows). If q2 and q3 contain more than the permitted number of
+%% betas and gammas, then the surplus are forcibly converted to gammas
+%% (as necessary) and then rolled into delta. The ratio is that
+%% delta/(betas+gammas+delta) equals
+%% (betas+gammas+delta)/(target_ram_count+betas+gammas+delta). I.e. as
+%% the target_ram_count shrinks to 0, so must betas and gammas.
+%%
+%% The conversion of betas to deltas is done if there are at least
+%% ?IO_BATCH_SIZE betas in q2 & q3. This value should not be too small,
+%% otherwise the frequent operations on the queues of q2 and q3 will not be
+%% effectively amortised (switching the direction of queue access defeats
+%% amortisation). Note that there is a natural upper bound due to credit_flow
+%% limits on the alpha to beta conversion.
+%%
+%% The conversion from alphas to betas is chunked due to the
+%% credit_flow limits of the msg_store. This further smooths the
+%% effects of changes to the target_ram_count and ensures the queue
+%% remains responsive even when there is a large amount of IO work to
+%% do. The 'resume' callback is utilised to ensure that conversions
+%% are done as promptly as possible whilst ensuring the queue remains
+%% responsive.
+%%
+%% In the queue we keep track of both messages that are pending
+%% delivery and messages that are pending acks. In the event of a
+%% queue purge, we only need to load qi segments if the queue has
+%% elements in deltas (i.e. it came under significant memory
+%% pressure). In the event of a queue deletion, in addition to the
+%% preceding, by keeping track of pending acks in RAM, we do not need
+%% to search through qi segments looking for messages that are yet to
+%% be acknowledged.
+%%
+%% Pending acks are recorded in memory by storing the message itself.
+%% If the message has been sent to disk, we do not store the message
+%% content. During memory reduction, pending acks containing message
+%% content have that content removed and the corresponding messages
+%% are pushed out to disk.
+%%
+%% Messages from pending acks are returned to q4, q3 and delta during
+%% requeue, based on the limits of seq_id contained in each. Requeued
+%% messages retain their original seq_id, maintaining order
+%% when requeued.
+%%
+%% The order in which alphas are pushed to betas and pending acks
+%% are pushed to disk is determined dynamically. We always prefer to
+%% push messages for the source (alphas or acks) that is growing the
+%% fastest (with growth measured as avg. ingress - avg. egress).
+%%
+%% Notes on Clean Shutdown
+%% (This documents behaviour in variable_queue, queue_index and
+%% msg_store.)
+%%
+%% In order to try to achieve as fast a start-up as possible, if a
+%% clean shutdown occurs, we try to save out state to disk to reduce
+%% work on startup. In the msg_store this takes the form of the
+%% index_module's state, plus the file_summary ets table, and client
+%% refs. In the VQ, this takes the form of the count of persistent
+%% messages in the queue and references into the msg_stores. The
+%% queue_index adds to these terms the details of its segments and
+%% stores the terms in the queue directory.
+%%
+%% Two message stores are used. One is created for persistent messages
+%% to durable queues that must survive restarts, and the other is used
+%% for all other messages that just happen to need to be written to
+%% disk. On start up we can therefore nuke the transient message
+%% store, and be sure that the messages in the persistent store are
+%% all that we need.
+%%
+%% The references to the msg_stores are there so that the msg_store
+%% knows to only trust its saved state if all of the queues it was
+%% previously talking to come up cleanly. Likewise, the queues
+%% themselves (esp queue_index) skips work in init if all the queues
+%% and msg_store were shutdown cleanly. This gives both good speed
+%% improvements and also robustness so that if anything possibly went
+%% wrong in shutdown (or there was subsequent manual tampering), all
+%% messages and queues that can be recovered are recovered, safely.
+%%
+%% To delete transient messages lazily, the variable_queue, on
+%% startup, stores the next_seq_id reported by the queue_index as the
+%% transient_threshold. From that point on, whenever it's reading a
+%% message off disk via the queue_index, if the seq_id is below this
+%% threshold and the message is transient then it drops the message
+%% (the message itself won't exist on disk because it would have been
+%% stored in the transient msg_store which would have had its saved
+%% state nuked on startup). This avoids the expensive operation of
+%% scanning the entire queue on startup in order to delete transient
+%% messages that were only pushed to disk to save memory.
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+ { q1,
+ q2,
+ delta,
+ q3,
+ q4,
+ next_seq_id,
+ ram_pending_ack, %% msgs using store, still in RAM
+ disk_pending_ack, %% msgs in store, paged out
+ qi_pending_ack, %% msgs using qi, *can't* be paged out
+ index_state,
+ msg_store_clients,
+ durable,
+ transient_threshold,
+ qi_embed_msgs_below,
+
+ len, %% w/o unacked
+ bytes, %% w/o unacked
+ unacked_bytes,
+ persistent_count, %% w unacked
+ persistent_bytes, %% w unacked
+ delta_transient_bytes, %%
+
+ target_ram_count,
+ ram_msg_count, %% w/o unacked
+ ram_msg_count_prev,
+ ram_ack_count_prev,
+ ram_bytes, %% w unacked
+ out_counter,
+ in_counter,
+ rates,
+ msgs_on_disk,
+ msg_indices_on_disk,
+ unconfirmed,
+ confirmed,
+ ack_out_counter,
+ ack_in_counter,
+ %% Unlike the other counters these two do not feed into
+ %% #rates{} and get reset
+ disk_read_count,
+ disk_write_count,
+
+ io_batch_size,
+
+ %% default queue or lazy queue
+ mode,
+ %% number of reduce_memory_usage executions, once it
+ %% reaches a threshold the queue will manually trigger a runtime GC
+ %% see: maybe_execute_gc/1
+ memory_reduction_run_count,
+ %% Queue data is grouped by VHost. We need to store it
+ %% to work with queue index.
+ virtual_host,
+ waiting_bump = false
+ }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+ { seq_id,
+ msg_id,
+ msg,
+ is_persistent,
+ is_delivered,
+ msg_in_store,
+ index_on_disk,
+ persist_to,
+ msg_props
+ }).
+
+-record(delta,
+ { start_seq_id, %% start_seq_id is inclusive
+ count,
+ transient,
+ end_seq_id %% end_seq_id is exclusive
+ }).
+
+-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+
+-define(QUEUE, lqueue).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("amqqueue.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({multiple_routing_keys, local, []}).
+-rabbit_upgrade({move_messages_to_vhost_store, message_store, []}).
+
+-type seq_id() :: non_neg_integer().
+
+-type rates() :: #rates { in :: float(),
+ out :: float(),
+ ack_in :: float(),
+ ack_out :: float(),
+ timestamp :: rabbit_types:timestamp()}.
+
+-type delta() :: #delta { start_seq_id :: non_neg_integer(),
+ count :: non_neg_integer(),
+ end_seq_id :: non_neg_integer() }.
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type ack() :: seq_id().
+-type state() :: #vqstate {
+ q1 :: ?QUEUE:?QUEUE(),
+ q2 :: ?QUEUE:?QUEUE(),
+ delta :: delta(),
+ q3 :: ?QUEUE:?QUEUE(),
+ q4 :: ?QUEUE:?QUEUE(),
+ next_seq_id :: seq_id(),
+ ram_pending_ack :: gb_trees:tree(),
+ disk_pending_ack :: gb_trees:tree(),
+ qi_pending_ack :: gb_trees:tree(),
+ index_state :: any(),
+ msg_store_clients :: 'undefined' | {{any(), binary()},
+ {any(), binary()}},
+ durable :: boolean(),
+ transient_threshold :: non_neg_integer(),
+ qi_embed_msgs_below :: non_neg_integer(),
+
+ len :: non_neg_integer(),
+ bytes :: non_neg_integer(),
+ unacked_bytes :: non_neg_integer(),
+
+ persistent_count :: non_neg_integer(),
+ persistent_bytes :: non_neg_integer(),
+
+ target_ram_count :: non_neg_integer() | 'infinity',
+ ram_msg_count :: non_neg_integer(),
+ ram_msg_count_prev :: non_neg_integer(),
+ ram_ack_count_prev :: non_neg_integer(),
+ ram_bytes :: non_neg_integer(),
+ out_counter :: non_neg_integer(),
+ in_counter :: non_neg_integer(),
+ rates :: rates(),
+ msgs_on_disk :: gb_sets:set(),
+ msg_indices_on_disk :: gb_sets:set(),
+ unconfirmed :: gb_sets:set(),
+ confirmed :: gb_sets:set(),
+ ack_out_counter :: non_neg_integer(),
+ ack_in_counter :: non_neg_integer(),
+ disk_read_count :: non_neg_integer(),
+ disk_write_count :: non_neg_integer(),
+
+ io_batch_size :: pos_integer(),
+ mode :: 'default' | 'lazy',
+ memory_reduction_run_count :: non_neg_integer()}.
+
+-define(BLANK_DELTA, #delta { start_seq_id = undefined,
+ count = 0,
+ transient = 0,
+ end_seq_id = undefined }).
+-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
+ count = 0,
+ transient = 0,
+ end_seq_id = Z }).
+
+-define(MICROS_PER_SECOND, 1000000.0).
+
+%% We're sampling every 5s for RAM duration; a half life that is of
+%% the same order of magnitude is probably about right.
+-define(RATE_AVG_HALF_LIFE, 5.0).
+
+%% We will recalculate the #rates{} every time we get asked for our
+%% RAM duration, or every N messages published, whichever is
+%% sooner. We do this since the priority calculations in
+%% rabbit_amqqueue_process need fairly fresh rates.
+-define(MSGS_PER_RATE_CALC, 100).
+
+%% we define the garbage collector threshold
+%% it needs to tune the `reduce_memory_use` calls. Thus, the garbage collection.
+%% see: rabbitmq-server-973 and rabbitmq-server-964
+-define(DEFAULT_EXPLICIT_GC_RUN_OP_THRESHOLD, 1000).
+-define(EXPLICIT_GC_RUN_OP_THRESHOLD(Mode),
+ case get(explicit_gc_run_operation_threshold) of
+ undefined ->
+ Val = explicit_gc_run_operation_threshold_for_mode(Mode),
+ put(explicit_gc_run_operation_threshold, Val),
+ Val;
+ Val -> Val
+ end).
+
+explicit_gc_run_operation_threshold_for_mode(Mode) ->
+ {Key, Fallback} = case Mode of
+ lazy -> {lazy_queue_explicit_gc_run_operation_threshold,
+ ?DEFAULT_EXPLICIT_GC_RUN_OP_THRESHOLD};
+ _ -> {queue_explicit_gc_run_operation_threshold,
+ ?DEFAULT_EXPLICIT_GC_RUN_OP_THRESHOLD}
+ end,
+ rabbit_misc:get_env(rabbit, Key, Fallback).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(VHost, DurableQueues) ->
+ {AllTerms, StartFunState} = rabbit_queue_index:start(VHost, DurableQueues),
+ %% Group recovery terms by vhost.
+ ClientRefs = [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ start_msg_store(VHost, ClientRefs, StartFunState),
+ {ok, AllTerms}.
+
+stop(VHost) ->
+ ok = stop_msg_store(VHost),
+ ok = rabbit_queue_index:stop(VHost).
+
+start_msg_store(VHost, Refs, StartFunState) when is_list(Refs); Refs == undefined ->
+ rabbit_log:info("Starting message stores for vhost '~s'~n", [VHost]),
+ do_start_msg_store(VHost, ?TRANSIENT_MSG_STORE, undefined, ?EMPTY_START_FUN_STATE),
+ do_start_msg_store(VHost, ?PERSISTENT_MSG_STORE, Refs, StartFunState),
+ ok.
+
+do_start_msg_store(VHost, Type, Refs, StartFunState) ->
+ case rabbit_vhost_msg_store:start(VHost, Type, Refs, StartFunState) of
+ {ok, _} ->
+ rabbit_log:info("Started message store of type ~s for vhost '~s'~n", [abbreviated_type(Type), VHost]);
+ {error, {no_such_vhost, VHost}} = Err ->
+ rabbit_log:error("Failed to start message store of type ~s for vhost '~s': the vhost no longer exists!~n",
+ [Type, VHost]),
+ exit(Err);
+ {error, Error} ->
+ rabbit_log:error("Failed to start message store of type ~s for vhost '~s': ~p~n",
+ [Type, VHost, Error]),
+ exit({error, Error})
+ end.
+
+abbreviated_type(?TRANSIENT_MSG_STORE) -> transient;
+abbreviated_type(?PERSISTENT_MSG_STORE) -> persistent.
+
+stop_msg_store(VHost) ->
+ rabbit_vhost_msg_store:stop(VHost, ?TRANSIENT_MSG_STORE),
+ rabbit_vhost_msg_store:stop(VHost, ?PERSISTENT_MSG_STORE),
+ ok.
+
+init(Queue, Recover, Callback) ->
+ init(
+ Queue, Recover, Callback,
+ fun (MsgIds, ActionTaken) ->
+ msgs_written_to_disk(Callback, MsgIds, ActionTaken)
+ end,
+ fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end,
+ fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end).
+
+init(Q, new, AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ IsDurable = amqqueue:is_durable(Q),
+ IndexState = rabbit_queue_index:init(QueueName,
+ MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ VHost = QueueName#resource.virtual_host,
+ init(IsDurable, IndexState, 0, 0, [],
+ case IsDurable of
+ true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
+ MsgOnDiskFun, AsyncCallback, VHost);
+ false -> undefined
+ end,
+ msg_store_client_init(?TRANSIENT_MSG_STORE, undefined,
+ AsyncCallback, VHost), VHost);
+
+%% We can be recovering a transient queue if it crashed
+init(Q, Terms, AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) when ?is_amqqueue(Q) ->
+ QueueName = amqqueue:get_name(Q),
+ IsDurable = amqqueue:is_durable(Q),
+ {PRef, RecoveryTerms} = process_recovery_terms(Terms),
+ VHost = QueueName#resource.virtual_host,
+ {PersistentClient, ContainsCheckFun} =
+ case IsDurable of
+ true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
+ MsgOnDiskFun, AsyncCallback,
+ VHost),
+ {C, fun (MsgId) when is_binary(MsgId) ->
+ rabbit_msg_store:contains(MsgId, C);
+ (#basic_message{is_persistent = Persistent}) ->
+ Persistent
+ end};
+ false -> {undefined, fun(_MsgId) -> false end}
+ end,
+ TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
+ undefined, AsyncCallback,
+ VHost),
+ {DeltaCount, DeltaBytes, IndexState} =
+ rabbit_queue_index:recover(
+ QueueName, RecoveryTerms,
+ rabbit_vhost_msg_store:successfully_recovered_state(
+ VHost,
+ ?PERSISTENT_MSG_STORE),
+ ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms,
+ PersistentClient, TransientClient, VHost).
+
+process_recovery_terms(Terms=non_clean_shutdown) ->
+ {rabbit_guid:gen(), Terms};
+process_recovery_terms(Terms) ->
+ case proplists:get_value(persistent_ref, Terms) of
+ undefined -> {rabbit_guid:gen(), []};
+ PRef -> {PRef, Terms}
+ end.
+
+terminate(_Reason, State) ->
+ State1 = #vqstate { virtual_host = VHost,
+ persistent_count = PCount,
+ persistent_bytes = PBytes,
+ index_state = IndexState,
+ msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack(true, State),
+ PRef = case MSCStateP of
+ undefined -> undefined;
+ _ -> ok = maybe_client_terminate(MSCStateP),
+ rabbit_msg_store:client_ref(MSCStateP)
+ end,
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ Terms = [{persistent_ref, PRef},
+ {persistent_count, PCount},
+ {persistent_bytes, PBytes}],
+ a(State1#vqstate {
+ index_state = rabbit_queue_index:terminate(VHost, Terms, IndexState),
+ msg_store_clients = undefined }).
+
+%% the only difference between purge and delete is that delete also
+%% needs to delete everything that's been delivered and not ack'd.
+delete_and_terminate(_Reason, State) ->
+ %% Normally when we purge messages we interact with the qi by
+ %% issues delivers and acks for every purged message. In this case
+ %% we don't need to do that, so we just delete the qi.
+ State1 = purge_and_index_reset(State),
+ State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack_delete_and_terminate(State1),
+ case MSCStateP of
+ undefined -> ok;
+ _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
+ end,
+ rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ a(State2 #vqstate { msg_store_clients = undefined }).
+
+delete_crashed(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ ok = rabbit_queue_index:erase(QName).
+
+purge(State = #vqstate { len = Len }) ->
+ case is_pending_ack_empty(State) and is_unconfirmed_empty(State) of
+ true ->
+ {Len, purge_and_index_reset(State)};
+ false ->
+ {Len, purge_when_pending_acks(State)}
+ end.
+
+purge_acks(State) -> a(purge_pending_ack(false, State)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+ State1 =
+ publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ a(maybe_reduce_memory_use(maybe_update_rates(State1))).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, State1} =
+ lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes),
+ State2 = ui(State1),
+ a(maybe_reduce_memory_use(maybe_update_rates(State2))).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ {SeqId, a(maybe_reduce_memory_use(maybe_update_rates(State1)))}.
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, SeqIds, State1} =
+ lists:foldl(fun batch_publish_delivered1/2,
+ {ChPid, Flow, [], State}, Publishes),
+ State2 = ui(State1),
+ {lists:reverse(SeqIds), a(maybe_reduce_memory_use(maybe_update_rates(State2)))}.
+
+discard(_MsgId, _ChPid, _Flow, State) -> State.
+
+drain_confirmed(State = #vqstate { confirmed = C }) ->
+ case gb_sets:is_empty(C) of
+ true -> {[], State}; %% common case
+ false -> {gb_sets:to_list(C), State #vqstate {
+ confirmed = gb_sets:new() }}
+ end.
+
+dropwhile(Pred, State) ->
+ {MsgProps, State1} =
+ remove_by_predicate(Pred, State),
+ {MsgProps, a(State1)}.
+
+fetchwhile(Pred, Fun, Acc, State) ->
+ {MsgProps, Acc1, State1} =
+ fetch_by_predicate(Pred, Fun, Acc, State),
+ {MsgProps, Acc1, a(State1)}.
+
+fetch(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ %% it is possible that the message wasn't read from disk
+ %% at this point, so read it in.
+ {Msg, State2} = read_msg(MsgStatus, State1),
+ {AckTag, State3} = remove(AckRequired, MsgStatus, State2),
+ {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
+ end.
+
+drop(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ {AckTag, State2} = remove(AckRequired, MsgStatus, State1),
+ {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
+ end.
+
+%% Duplicated from rabbit_backing_queue
+-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
+
+ack([], State) ->
+ {[], State};
+%% optimisation: this head is essentially a partial evaluation of the
+%% general case below, for the single-ack case.
+ack([SeqId], State) ->
+ case remove_pending_ack(true, SeqId, State) of
+ {none, _} ->
+ {[], State};
+ {#msg_status { msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ ack_out_counter = AckOutCount }} ->
+ IndexState1 = case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState);
+ false -> IndexState
+ end,
+ case MsgInStore of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+ {[MsgId],
+ a(State1 #vqstate { index_state = IndexState1,
+ ack_out_counter = AckOutCount + 1 })}
+ end;
+ack(AckTags, State) ->
+ {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ ack_out_counter = AckOutCount }} =
+ lists:foldl(
+ fun (SeqId, {Acc, State2}) ->
+ case remove_pending_ack(true, SeqId, State2) of
+ {none, _} ->
+ {Acc, State2};
+ {MsgStatus, State3} ->
+ {accumulate_ack(MsgStatus, Acc), State3}
+ end
+ end, {accumulate_ack_init(), State}, AckTags),
+ IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ {lists:reverse(AllMsgIds),
+ a(State1 #vqstate { index_state = IndexState1,
+ ack_out_counter = AckOutCount + length(AckTags) })}.
+
+requeue(AckTags, #vqstate { mode = default,
+ delta = Delta,
+ q3 = Q3,
+ q4 = Q4,
+ in_counter = InCounter,
+ len = Len } = State) ->
+ {SeqIds, Q4a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q4, [],
+ beta_limit(Q3),
+ fun publish_alpha/2, State),
+ {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds,
+ delta_limit(Delta),
+ fun publish_beta/2, State1),
+ {Delta1, MsgIds2, State3} = delta_merge(SeqIds1, Delta, MsgIds1,
+ State2),
+ MsgCount = length(MsgIds2),
+ {MsgIds2, a(maybe_reduce_memory_use(
+ maybe_update_rates(ui(
+ State3 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ q4 = Q4a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount }))))};
+requeue(AckTags, #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3,
+ in_counter = InCounter,
+ len = Len } = State) ->
+ {SeqIds, Q3a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q3, [],
+ delta_limit(Delta),
+ fun publish_beta/2, State),
+ {Delta1, MsgIds1, State2} = delta_merge(SeqIds, Delta, MsgIds,
+ State1),
+ MsgCount = length(MsgIds1),
+ {MsgIds1, a(maybe_reduce_memory_use(
+ maybe_update_rates(ui(
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount }))))}.
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+ {AccN, StateN} =
+ lists:foldl(fun(SeqId, {Acc0, State0}) ->
+ MsgStatus = lookup_pending_ack(SeqId, State0),
+ {Msg, State1} = read_msg(MsgStatus, State0),
+ {MsgFun(Msg, SeqId, Acc0), State1}
+ end, {Acc, State}, AckTags),
+ {AccN, a(StateN)}.
+
+fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
+ {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
+ [msg_iterator(State),
+ disk_ack_iterator(State),
+ ram_ack_iterator(State),
+ qi_ack_iterator(State)]),
+ ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
+
+len(#vqstate { len = Len }) -> Len.
+
+is_empty(State) -> 0 == len(State).
+
+depth(State) ->
+ len(State) + count_pending_acks(State).
+
+set_ram_duration_target(
+ DurationTarget, State = #vqstate {
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ target_ram_count = TargetRamCount }) ->
+ Rate =
+ AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate,
+ TargetRamCount1 =
+ case DurationTarget of
+ infinity -> infinity;
+ _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec
+ end,
+ State1 = State #vqstate { target_ram_count = TargetRamCount1 },
+ a(case TargetRamCount1 == infinity orelse
+ (TargetRamCount =/= infinity andalso
+ TargetRamCount1 >= TargetRamCount) of
+ true -> State1;
+ false -> reduce_memory_use(State1)
+ end).
+
+maybe_update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount })
+ when InCount + OutCount > ?MSGS_PER_RATE_CALC ->
+ update_rates(State);
+maybe_update_rates(State) ->
+ State.
+
+update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount,
+ ack_in_counter = AckInCount,
+ ack_out_counter = AckOutCount,
+ rates = #rates{ in = InRate,
+ out = OutRate,
+ ack_in = AckInRate,
+ ack_out = AckOutRate,
+ timestamp = TS }}) ->
+ Now = erlang:monotonic_time(),
+
+ Rates = #rates { in = update_rate(Now, TS, InCount, InRate),
+ out = update_rate(Now, TS, OutCount, OutRate),
+ ack_in = update_rate(Now, TS, AckInCount, AckInRate),
+ ack_out = update_rate(Now, TS, AckOutCount, AckOutRate),
+ timestamp = Now },
+
+ State#vqstate{ in_counter = 0,
+ out_counter = 0,
+ ack_in_counter = 0,
+ ack_out_counter = 0,
+ rates = Rates }.
+
+update_rate(Now, TS, Count, Rate) ->
+ Time = erlang:convert_time_unit(Now - TS, native, micro_seconds) /
+ ?MICROS_PER_SECOND,
+ if
+ Time == 0 -> Rate;
+ true -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE,
+ Count / Time, Rate)
+ end.
+
+ram_duration(State) ->
+ State1 = #vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ ram_msg_count = RamMsgCount,
+ ram_msg_count_prev = RamMsgCountPrev,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA,
+ ram_ack_count_prev = RamAckCountPrev } =
+ update_rates(State),
+
+ RamAckCount = gb_trees:size(RPA) + gb_trees:size(QPA),
+
+ Duration = %% msgs+acks / (msgs+acks/sec) == sec
+ case lists:all(fun (X) -> X < 0.01 end,
+ [AvgEgressRate, AvgIngressRate,
+ AvgAckEgressRate, AvgAckIngressRate]) of
+ true -> infinity;
+ false -> (RamMsgCountPrev + RamMsgCount +
+ RamAckCount + RamAckCountPrev) /
+ (4 * (AvgEgressRate + AvgIngressRate +
+ AvgAckEgressRate + AvgAckIngressRate))
+ end,
+
+ {Duration, State1}.
+
+needs_timeout(#vqstate { index_state = IndexState }) ->
+ case rabbit_queue_index:needs_sync(IndexState) of
+ confirms -> timed;
+ other -> idle;
+ false -> false
+ end.
+
+timeout(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:sync(IndexState) }.
+
+handle_pre_hibernate(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }.
+
+handle_info(bump_reduce_memory_use, State = #vqstate{ waiting_bump = true }) ->
+ State#vqstate{ waiting_bump = false };
+handle_info(bump_reduce_memory_use, State) ->
+ State.
+
+resume(State) -> a(reduce_memory_use(State)).
+
+msg_rates(#vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate } }) ->
+ {AvgIngressRate, AvgEgressRate}.
+
+info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) ->
+ RamMsgCount;
+info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ gb_trees:size(RPA) + gb_trees:size(QPA);
+info(messages_ram, State) ->
+ info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State);
+info(messages_persistent, #vqstate{persistent_count = PersistentCount}) ->
+ PersistentCount;
+info(messages_paged_out, #vqstate{delta = #delta{transient = Count}}) ->
+ Count;
+info(message_bytes, #vqstate{bytes = Bytes,
+ unacked_bytes = UBytes}) ->
+ Bytes + UBytes;
+info(message_bytes_ready, #vqstate{bytes = Bytes}) ->
+ Bytes;
+info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) ->
+ UBytes;
+info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
+ RamBytes;
+info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
+ PersistentBytes;
+info(message_bytes_paged_out, #vqstate{delta_transient_bytes = PagedOutBytes}) ->
+ PagedOutBytes;
+info(head_message_timestamp, #vqstate{
+ q3 = Q3,
+ q4 = Q4,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ head_message_timestamp(Q3, Q4, RPA, QPA);
+info(disk_reads, #vqstate{disk_read_count = Count}) ->
+ Count;
+info(disk_writes, #vqstate{disk_write_count = Count}) ->
+ Count;
+info(backing_queue_status, #vqstate {
+ q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = Mode,
+ len = Len,
+ target_ram_count = TargetRamCount,
+ next_seq_id = NextSeqId,
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate }}) ->
+
+ [ {mode , Mode},
+ {q1 , ?QUEUE:len(Q1)},
+ {q2 , ?QUEUE:len(Q2)},
+ {delta , Delta},
+ {q3 , ?QUEUE:len(Q3)},
+ {q4 , ?QUEUE:len(Q4)},
+ {len , Len},
+ {target_ram_count , TargetRamCount},
+ {next_seq_id , NextSeqId},
+ {avg_ingress_rate , AvgIngressRate},
+ {avg_egress_rate , AvgEgressRate},
+ {avg_ack_ingress_rate, AvgAckIngressRate},
+ {avg_ack_egress_rate , AvgAckEgressRate} ];
+info(_, _) ->
+ ''.
+
+invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
+invoke( _, _, State) -> State.
+
+is_duplicate(_Msg, State) -> {false, State}.
+
+set_queue_mode(Mode, State = #vqstate { mode = Mode }) ->
+ State;
+set_queue_mode(lazy, State = #vqstate {
+ target_ram_count = TargetRamCount }) ->
+ %% To become a lazy queue we need to page everything to disk first.
+ State1 = convert_to_lazy(State),
+ %% restore the original target_ram_count
+ a(State1 #vqstate { mode = lazy, target_ram_count = TargetRamCount });
+set_queue_mode(default, State) ->
+ %% becoming a default queue means loading messages from disk like
+ %% when a queue is recovered.
+ a(maybe_deltas_to_betas(State #vqstate { mode = default }));
+set_queue_mode(_, State) ->
+ State.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) ->
+ lists:foldl(fun ({{#basic_message{ id = Id }, _Props}, AckTag}, Acc) ->
+ [{Id, AckTag} | Acc]
+ end, Accumulator, lists:zip(Msgs, AckTags)).
+
+convert_to_lazy(State) ->
+ State1 = #vqstate { delta = Delta, q3 = Q3, len = Len } =
+ set_ram_duration_target(0, State),
+ case Delta#delta.count + ?QUEUE:len(Q3) == Len of
+ true ->
+ State1;
+ false ->
+ %% When pushing messages to disk, we might have been
+ %% blocked by the msg_store, so we need to see if we have
+ %% to wait for more credit, and then keep paging messages.
+ %%
+ %% The amqqueue_process could have taken care of this, but
+ %% between the time it receives the bump_credit msg and
+ %% calls BQ:resume to keep paging messages to disk, some
+ %% other request may arrive to the BQ which at this moment
+ %% is not in a proper state for a lazy BQ (unless all
+ %% messages have been paged to disk already).
+ wait_for_msg_store_credit(),
+ convert_to_lazy(resume(State1))
+ end.
+
+wait_for_msg_store_credit() ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg)
+ end;
+ false -> ok
+ end.
+
+%% Get the Timestamp property of the first msg, if present. This is
+%% the one with the oldest timestamp among the heads of the pending
+%% acks and unread queues. We can't check disk_pending_acks as these
+%% are paged out - we assume some will soon be paged in rather than
+%% forcing it to happen. Pending ack msgs are included as they are
+%% regarded as unprocessed until acked, this also prevents the result
+%% apparently oscillating during repeated rejects. Q3 is only checked
+%% when Q4 is empty as any Q4 msg will be earlier.
+head_message_timestamp(Q3, Q4, RPA, QPA) ->
+ HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
+ HeadMsgStatus <-
+ [ get_qs_head([Q4, Q3]),
+ get_pa_head(RPA),
+ get_pa_head(QPA) ],
+ HeadMsgStatus /= undefined,
+ HeadMsgStatus#msg_status.msg /= undefined ],
+
+ Timestamps =
+ [Timestamp || HeadMsg <- HeadMsgs,
+ Timestamp <- [rabbit_basic:extract_timestamp(
+ HeadMsg#basic_message.content)],
+ Timestamp /= undefined
+ ],
+
+ case Timestamps == [] of
+ true -> '';
+ false -> lists:min(Timestamps)
+ end.
+
+get_qs_head(Qs) ->
+ catch lists:foldl(
+ fun (Q, Acc) ->
+ case get_q_head(Q) of
+ undefined -> Acc;
+ Val -> throw(Val)
+ end
+ end, undefined, Qs).
+
+get_q_head(Q) ->
+ get_collection_head(Q, fun ?QUEUE:is_empty/1, fun ?QUEUE:peek/1).
+
+get_pa_head(PA) ->
+ get_collection_head(PA, fun gb_trees:is_empty/1, fun gb_trees:smallest/1).
+
+get_collection_head(Col, IsEmpty, GetVal) ->
+ case IsEmpty(Col) of
+ false ->
+ {_, MsgStatus} = GetVal(Col),
+ MsgStatus;
+ true -> undefined
+ end.
+
+%%----------------------------------------------------------------------------
+%% Minor helpers
+%%----------------------------------------------------------------------------
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = default,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+
+ %% if q1 has messages then q3 cannot be empty. See publish/6.
+ true = E1 or not E3,
+ %% if q2 has messages then we have messages in delta (paged to
+ %% disk). See push_alphas_to_betas/2.
+ true = E2 or not ED,
+ %% if delta has messages then q3 cannot be empty. This is enforced
+ %% by paging, where min([?SEGMENT_ENTRY_COUNT, len(q3)]) messages
+ %% are always kept on RAM.
+ true = ED or not E3,
+ %% if the queue length is 0, then q3 and q4 must be empty.
+ true = LZ == (E3 and E4),
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
+ State;
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = lazy,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+ L3 = ?QUEUE:len(Q3),
+
+ %% q1 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E1,
+
+ %% q2 only gets messages from q1 when push_alphas_to_betas is
+ %% called for a non empty delta, which won't be the case for a
+ %% lazy queue. This means q2 must always be empty.
+ true = E2,
+
+ %% q4 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E4,
+
+ %% if the queue is empty, then delta is empty and q3 is empty.
+ true = LZ == (ED and E3),
+
+ %% There should be no messages in q1, q2, and q4
+ true = Delta#delta.count + L3 == Len,
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
+ State.
+
+d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
+ when Start + Count =< End ->
+ Delta.
+
+m(MsgStatus = #msg_status { is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk }) ->
+ true = (not IsPersistent) or IndexOnDisk,
+ true = msg_in_ram(MsgStatus) or MsgInStore,
+ MsgStatus.
+
+one_if(true ) -> 1;
+one_if(false) -> 0.
+
+cons_if(true, E, L) -> [E | L];
+cons_if(false, _E, L) -> L.
+
+gb_sets_maybe_insert(false, _Val, Set) -> Set;
+gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set).
+
+msg_status(IsPersistent, IsDelivered, SeqId,
+ Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) ->
+ #msg_status{seq_id = SeqId,
+ msg_id = MsgId,
+ msg = Msg,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_in_store = false,
+ index_on_disk = false,
+ persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize),
+ msg_props = MsgProps}.
+
+beta_msg_status({Msg = #basic_message{id = MsgId},
+ SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+ MS0#msg_status{msg_id = MsgId,
+ msg = Msg,
+ persist_to = queue_index,
+ msg_in_store = false};
+
+beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+ MS0#msg_status{msg_id = MsgId,
+ msg = undefined,
+ persist_to = msg_store,
+ msg_in_store = true}.
+
+beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered) ->
+ #msg_status{seq_id = SeqId,
+ msg = undefined,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ index_on_disk = true,
+ msg_props = MsgProps}.
+
+trim_msg_status(MsgStatus) ->
+ case persist_to(MsgStatus) of
+ msg_store -> MsgStatus#msg_status{msg = undefined};
+ queue_index -> MsgStatus
+ end.
+
+with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) ->
+ {Result, MSCStateP1} = Fun(MSCStateP),
+ {Result, {MSCStateP1, MSCStateT}};
+with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
+ {Result, MSCStateT1} = Fun(MSCStateT),
+ {Result, {MSCStateP, MSCStateT1}}.
+
+with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
+ {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
+ fun (MSCState1) ->
+ {Fun(MSCState1), MSCState1}
+ end),
+ Res.
+
+msg_store_client_init(MsgStore, MsgOnDiskFun, Callback, VHost) ->
+ msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
+ Callback, VHost).
+
+msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback, VHost) ->
+ CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
+ rabbit_vhost_msg_store:client_init(VHost, MsgStore,
+ Ref, MsgOnDiskFun,
+ fun () ->
+ Callback(?MODULE, CloseFDsFun)
+ end).
+
+msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:write_flow(MsgId, Msg, MSCState1)
+ end).
+
+msg_store_read(MSCState, IsPersistent, MsgId) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:read(MsgId, MSCState1)
+ end).
+
+msg_store_remove(MSCState, IsPersistent, MsgIds) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MCSState1) ->
+ rabbit_msg_store:remove(MsgIds, MCSState1)
+ end).
+
+msg_store_close_fds(MSCState, IsPersistent) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end).
+
+msg_store_close_fds_fun(IsPersistent) ->
+ fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) ->
+ {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent),
+ State #vqstate { msg_store_clients = MSCState1 }
+ end.
+
+maybe_write_delivered(false, _SeqId, IndexState) ->
+ IndexState;
+maybe_write_delivered(true, SeqId, IndexState) ->
+ rabbit_queue_index:deliver([SeqId], IndexState).
+
+betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) ->
+ {Filtered, Delivers, Acks, RamReadyCount, RamBytes, TransientCount, TransientBytes} =
+ lists:foldr(
+ fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
+ {Filtered1, Delivers1, Acks1, RRC, RB, TC, TB} = Acc) ->
+ case SeqId < TransientThreshold andalso not IsPersistent of
+ true -> {Filtered1,
+ cons_if(not IsDelivered, SeqId, Delivers1),
+ [SeqId | Acks1], RRC, RB, TC, TB};
+ false -> MsgStatus = m(beta_msg_status(M)),
+ HaveMsg = msg_in_ram(MsgStatus),
+ Size = msg_size(MsgStatus),
+ case is_msg_in_pending_acks(SeqId, State) of
+ false -> {?QUEUE:in_r(MsgStatus, Filtered1),
+ Delivers1, Acks1,
+ RRC + one_if(HaveMsg),
+ RB + one_if(HaveMsg) * Size,
+ TC + one_if(not IsPersistent),
+ TB + one_if(not IsPersistent) * Size};
+ true -> Acc %% [0]
+ end
+ end
+ end, {?QUEUE:new(), [], [], 0, 0, 0, 0}, List),
+ {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State),
+ TransientCount, TransientBytes}.
+%% [0] We don't increase RamBytes here, even though it pertains to
+%% unacked messages too, since if HaveMsg then the message must have
+%% been stored in the QI, thus the message must have been in
+%% qi_pending_ack, thus it must already have been in RAM.
+
+is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ (gb_trees:is_defined(SeqId, RPA) orelse
+ gb_trees:is_defined(SeqId, DPA) orelse
+ gb_trees:is_defined(SeqId, QPA)).
+
+expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X), IsPersistent) ->
+ d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1,
+ transient = one_if(not IsPersistent)});
+expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
+ count = Count,
+ transient = Transient } = Delta,
+ IsPersistent )
+ when SeqId < StartSeqId ->
+ d(Delta #delta { start_seq_id = SeqId, count = Count + 1,
+ transient = Transient + one_if(not IsPersistent)});
+expand_delta(SeqId, #delta { count = Count,
+ end_seq_id = EndSeqId,
+ transient = Transient } = Delta,
+ IsPersistent)
+ when SeqId >= EndSeqId ->
+ d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1,
+ transient = Transient + one_if(not IsPersistent)});
+expand_delta(_SeqId, #delta { count = Count,
+ transient = Transient } = Delta,
+ IsPersistent ) ->
+ d(Delta #delta { count = Count + 1,
+ transient = Transient + one_if(not IsPersistent) }).
+
+%%----------------------------------------------------------------------------
+%% Internal major helpers for Public API
+%%----------------------------------------------------------------------------
+
+init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
+ PersistentClient, TransientClient, VHost) ->
+ {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
+
+ {DeltaCount1, DeltaBytes1} =
+ case Terms of
+ non_clean_shutdown -> {DeltaCount, DeltaBytes};
+ _ -> {proplists:get_value(persistent_count,
+ Terms, DeltaCount),
+ proplists:get_value(persistent_bytes,
+ Terms, DeltaBytes)}
+ end,
+ Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
+ true -> ?BLANK_DELTA;
+ false -> d(#delta { start_seq_id = LowSeqId,
+ count = DeltaCount1,
+ transient = 0,
+ end_seq_id = NextSeqId })
+ end,
+ Now = erlang:monotonic_time(),
+ IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
+ ?IO_BATCH_SIZE),
+
+ {ok, IndexMaxSize} = application:get_env(
+ rabbit, queue_index_embed_msgs_below),
+ State = #vqstate {
+ q1 = ?QUEUE:new(),
+ q2 = ?QUEUE:new(),
+ delta = Delta,
+ q3 = ?QUEUE:new(),
+ q4 = ?QUEUE:new(),
+ next_seq_id = NextSeqId,
+ ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ qi_pending_ack = gb_trees:empty(),
+ index_state = IndexState1,
+ msg_store_clients = {PersistentClient, TransientClient},
+ durable = IsDurable,
+ transient_threshold = NextSeqId,
+ qi_embed_msgs_below = IndexMaxSize,
+
+ len = DeltaCount1,
+ persistent_count = DeltaCount1,
+ bytes = DeltaBytes1,
+ persistent_bytes = DeltaBytes1,
+ delta_transient_bytes = 0,
+
+ target_ram_count = infinity,
+ ram_msg_count = 0,
+ ram_msg_count_prev = 0,
+ ram_ack_count_prev = 0,
+ ram_bytes = 0,
+ unacked_bytes = 0,
+ out_counter = 0,
+ in_counter = 0,
+ rates = blank_rates(Now),
+ msgs_on_disk = gb_sets:new(),
+ msg_indices_on_disk = gb_sets:new(),
+ unconfirmed = gb_sets:new(),
+ confirmed = gb_sets:new(),
+ ack_out_counter = 0,
+ ack_in_counter = 0,
+ disk_read_count = 0,
+ disk_write_count = 0,
+
+ io_batch_size = IoBatchSize,
+
+ mode = default,
+ memory_reduction_run_count = 0,
+ virtual_host = VHost},
+ a(maybe_deltas_to_betas(State)).
+
+blank_rates(Now) ->
+ #rates { in = 0.0,
+ out = 0.0,
+ ack_in = 0.0,
+ ack_out = 0.0,
+ timestamp = Now}.
+
+in_r(MsgStatus = #msg_status { msg = undefined },
+ State = #vqstate { mode = default, q3 = Q3, q4 = Q4 }) ->
+ case ?QUEUE:is_empty(Q4) of
+ true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
+ false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
+ read_msg(MsgStatus, State),
+ MsgStatus1 = MsgStatus#msg_status{msg = Msg},
+ stats(ready0, {MsgStatus, MsgStatus1}, 0,
+ State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) })
+ end;
+in_r(MsgStatus,
+ State = #vqstate { mode = default, q4 = Q4 }) ->
+ State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) };
+%% lazy queues
+in_r(MsgStatus = #msg_status { seq_id = SeqId, is_persistent = IsPersistent },
+ State = #vqstate { mode = lazy, q3 = Q3, delta = Delta}) ->
+ case ?QUEUE:is_empty(Q3) of
+ true ->
+ {_MsgStatus1, State1} =
+ maybe_write_to_disk(true, true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, 1, State1),
+ Delta1 = expand_delta(SeqId, Delta, IsPersistent),
+ State2 #vqstate{ delta = Delta1};
+ false ->
+ State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }
+ end.
+
+queue_out(State = #vqstate { mode = default, q4 = Q4 }) ->
+ case ?QUEUE:out(Q4) of
+ {empty, _Q4} ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end;
+ {{value, MsgStatus}, Q4a} ->
+ {{value, MsgStatus}, State #vqstate { q4 = Q4a }}
+ end;
+%% lazy queues
+queue_out(State = #vqstate { mode = lazy }) ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end.
+
+read_msg(#msg_status{msg = undefined,
+ msg_id = MsgId,
+ is_persistent = IsPersistent}, State) ->
+ read_msg(MsgId, IsPersistent, State);
+read_msg(#msg_status{msg = Msg}, State) ->
+ {Msg, State}.
+
+read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState,
+ disk_read_count = Count}) ->
+ {{ok, Msg = #basic_message {}}, MSCState1} =
+ msg_store_read(MSCState, IsPersistent, MsgId),
+ {Msg, State #vqstate {msg_store_clients = MSCState1,
+ disk_read_count = Count + 1}}.
+
+stats(Signs, Statuses, DeltaPaged, State) ->
+ stats0(expand_signs(Signs), expand_statuses(Statuses), DeltaPaged, State).
+
+expand_signs(ready0) -> {0, 0, true};
+expand_signs(lazy_pub) -> {1, 0, true};
+expand_signs({A, B}) -> {A, B, false}.
+
+expand_statuses({none, A}) -> {false, msg_in_ram(A), A};
+expand_statuses({B, none}) -> {msg_in_ram(B), false, B};
+expand_statuses({lazy, A}) -> {false , false, A};
+expand_statuses({B, A}) -> {msg_in_ram(B), msg_in_ram(A), B}.
+
+%% In this function at least, we are religious: the variable name
+%% contains "Ready" or "Unacked" iff that is what it counts. If
+%% neither is present it counts both.
+stats0({DeltaReady, DeltaUnacked, ReadyMsgPaged},
+ {InRamBefore, InRamAfter, MsgStatus}, DeltaPaged,
+ State = #vqstate{len = ReadyCount,
+ bytes = ReadyBytes,
+ ram_msg_count = RamReadyCount,
+ persistent_count = PersistentCount,
+ unacked_bytes = UnackedBytes,
+ ram_bytes = RamBytes,
+ delta_transient_bytes = DeltaBytes,
+ persistent_bytes = PersistentBytes}) ->
+ S = msg_size(MsgStatus),
+ DeltaTotal = DeltaReady + DeltaUnacked,
+ DeltaRam = case {InRamBefore, InRamAfter} of
+ {false, false} -> 0;
+ {false, true} -> 1;
+ {true, false} -> -1;
+ {true, true} -> 0
+ end,
+ DeltaRamReady = case DeltaReady of
+ 1 -> one_if(InRamAfter);
+ -1 -> -one_if(InRamBefore);
+ 0 when ReadyMsgPaged -> DeltaRam;
+ 0 -> 0
+ end,
+ DeltaPersistent = DeltaTotal * one_if(MsgStatus#msg_status.is_persistent),
+ State#vqstate{len = ReadyCount + DeltaReady,
+ ram_msg_count = RamReadyCount + DeltaRamReady,
+ persistent_count = PersistentCount + DeltaPersistent,
+ bytes = ReadyBytes + DeltaReady * S,
+ unacked_bytes = UnackedBytes + DeltaUnacked * S,
+ ram_bytes = RamBytes + DeltaRam * S,
+ persistent_bytes = PersistentBytes + DeltaPersistent * S,
+ delta_transient_bytes = DeltaBytes + DeltaPaged * one_if(not MsgStatus#msg_status.is_persistent) * S}.
+
+msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size.
+
+msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
+
+%% first param: AckRequired
+remove(true, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ is_delivered = IsDelivered,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {out_counter = OutCount,
+ index_state = IndexState}) ->
+ %% Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ State1 = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State),
+
+ State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, 0, State1),
+
+ {SeqId, maybe_update_rates(
+ State2 #vqstate {out_counter = OutCount + 1,
+ index_state = IndexState1})};
+
+%% This function body has the same behaviour as remove_queue_entries/3
+%% but instead of removing messages based on a ?QUEUE, this removes
+%% just one message, the one referenced by the MsgStatus provided.
+remove(false, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {out_counter = OutCount,
+ index_state = IndexState,
+ msg_store_clients = MSCState}) ->
+ %% Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ %% Remove from msg_store and queue index, if necessary
+ case MsgInStore of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+
+ IndexState2 =
+ case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState1);
+ false -> IndexState1
+ end,
+
+ State1 = stats({-1, 0}, {MsgStatus, none}, 0, State),
+
+ {undefined, maybe_update_rates(
+ State1 #vqstate {out_counter = OutCount + 1,
+ index_state = IndexState2})}.
+
+%% This function exists as a way to improve dropwhile/2
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers and acks, instead of
+%% sending them one by one.
+%%
+%% Instead of removing every message as their are popped from the
+%% queue, it first accumulates them and then removes them by calling
+%% remove_queue_entries/3, since the behaviour of
+%% remove_queue_entries/3 when used with
+%% process_delivers_and_acks_fun(deliver_and_ack) is the same as
+%% calling remove(false, MsgStatus, State).
+%%
+%% remove/3 also updates the out_counter in every call, but here we do
+%% it just once at the end.
+remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) ->
+ {MsgProps, QAcc, State1} =
+ collect_by_predicate(Pred, ?QUEUE:new(), State),
+ State2 =
+ remove_queue_entries(
+ QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1),
+ %% maybe_update_rates/1 is called in remove/2 for every
+ %% message. Since we update out_counter only once, we call it just
+ %% there.
+ {MsgProps, maybe_update_rates(
+ State2 #vqstate {
+ out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% This function exists as a way to improve fetchwhile/4
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers, instead of sending them
+%% one by one.
+%%
+%% Fun is the function passed to fetchwhile/4 that's
+%% applied to every fetched message and used to build the fetchwhile/4
+%% result accumulator FetchAcc.
+fetch_by_predicate(Pred, Fun, FetchAcc,
+ State = #vqstate {
+ index_state = IndexState,
+ out_counter = OutCount}) ->
+ {MsgProps, QAcc, State1} =
+ collect_by_predicate(Pred, ?QUEUE:new(), State),
+
+ {Delivers, FetchAcc1, State2} =
+ process_queue_entries(QAcc, Fun, FetchAcc, State1),
+
+ IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState),
+
+ {MsgProps, FetchAcc1, maybe_update_rates(
+ State2 #vqstate {
+ index_state = IndexState1,
+ out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% We try to do here the same as what remove(true, State) does but
+%% processing several messages at the same time. The idea is to
+%% optimize rabbit_queue_index:deliver/2 calls by sending a list of
+%% SeqIds instead of one by one, thus process_queue_entries1 will
+%% accumulate the required deliveries, will record_pending_ack for
+%% each message, and will update stats, like remove/2 does.
+%%
+%% For the meaning of Fun and FetchAcc arguments see
+%% fetch_by_predicate/4 above.
+process_queue_entries(Q, Fun, FetchAcc, State) ->
+ ?QUEUE:foldl(fun (MsgStatus, Acc) ->
+ process_queue_entries1(MsgStatus, Fun, Acc)
+ end,
+ {[], FetchAcc, State}, Q).
+
+process_queue_entries1(
+ #msg_status { seq_id = SeqId, is_delivered = IsDelivered,
+ index_on_disk = IndexOnDisk} = MsgStatus,
+ Fun,
+ {Delivers, FetchAcc, State}) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ State2 = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State1),
+ {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ Fun(Msg, SeqId, FetchAcc),
+ stats({-1, 1}, {MsgStatus, MsgStatus}, 0, State2)}.
+
+collect_by_predicate(Pred, QAcc, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {undefined, QAcc, State1};
+ {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
+ case Pred(MsgProps) of
+ true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc),
+ State1);
+ false -> {MsgProps, QAcc, in_r(MsgStatus, State1)}
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+%% Helpers for Public API purge/1 function
+%%----------------------------------------------------------------------------
+
+%% The difference between purge_when_pending_acks/1
+%% vs. purge_and_index_reset/1 is that the first one issues a deliver
+%% and an ack to the queue index for every message that's being
+%% removed, while the later just resets the queue index state.
+purge_when_pending_acks(State) ->
+ State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State),
+ a(State1).
+
+purge_and_index_reset(State) ->
+ State1 = purge1(process_delivers_and_acks_fun(none), State),
+ a(reset_qi_state(State1)).
+
+%% This function removes messages from each of {q1, q2, q3, q4}.
+%%
+%% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3
+%% are specially handled by purge_betas_and_deltas/2.
+%%
+%% purge_betas_and_deltas/2 loads messages from the queue index,
+%% filling up q3 and in some cases moving messages form q2 to q3 while
+%% resetting q2 to an empty queue (see maybe_deltas_to_betas/2). The
+%% messages loaded into q3 are removed by calling
+%% remove_queue_entries/3 until there are no more messages to be read
+%% from the queue index. Messages are read in batches from the queue
+%% index.
+purge1(AfterFun, State = #vqstate { q4 = Q4}) ->
+ State1 = remove_queue_entries(Q4, AfterFun, State),
+
+ State2 = #vqstate {q1 = Q1} =
+ purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}),
+
+ State3 = remove_queue_entries(Q1, AfterFun, State2),
+
+ a(State3#vqstate{q1 = ?QUEUE:new()}).
+
+reset_qi_state(State = #vqstate{index_state = IndexState}) ->
+ State#vqstate{index_state =
+ rabbit_queue_index:reset_state(IndexState)}.
+
+is_pending_ack_empty(State) ->
+ count_pending_acks(State) =:= 0.
+
+is_unconfirmed_empty(#vqstate { unconfirmed = UC }) ->
+ gb_sets:is_empty(UC).
+
+count_pending_acks(#vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
+
+purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { mode = Mode }) ->
+ State0 = #vqstate { q3 = Q3 } =
+ case Mode of
+ lazy -> maybe_deltas_to_betas(DelsAndAcksFun, State);
+ _ -> State
+ end,
+
+ case ?QUEUE:is_empty(Q3) of
+ true -> State0;
+ false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0),
+ purge_betas_and_deltas(DelsAndAcksFun,
+ maybe_deltas_to_betas(
+ DelsAndAcksFun,
+ State1#vqstate{q3 = ?QUEUE:new()}))
+ end.
+
+remove_queue_entries(Q, DelsAndAcksFun,
+ State = #vqstate{msg_store_clients = MSCState}) ->
+ {MsgIdsByStore, Delivers, Acks, State1} =
+ ?QUEUE:foldl(fun remove_queue_entries1/2,
+ {maps:new(), [], [], State}, Q),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ DelsAndAcksFun(Delivers, Acks, State1).
+
+remove_queue_entries1(
+ #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered,
+ msg_in_store = MsgInStore, index_on_disk = IndexOnDisk,
+ is_persistent = IsPersistent} = MsgStatus,
+ {MsgIdsByStore, Delivers, Acks, State}) ->
+ {case MsgInStore of
+ true -> rabbit_misc:maps_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ cons_if(IndexOnDisk, SeqId, Acks),
+ stats({-1, 0}, {MsgStatus, none}, 0, State)}.
+
+process_delivers_and_acks_fun(deliver_and_ack) ->
+ fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) ->
+ IndexState1 =
+ rabbit_queue_index:ack(
+ Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
+ State #vqstate { index_state = IndexState1 }
+ end;
+process_delivers_and_acks_fun(_) ->
+ fun (_, _, State) ->
+ State
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for publishing
+%%----------------------------------------------------------------------------
+
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+ mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = case ?QUEUE:is_empty(Q3) of
+ false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
+ true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
+ end,
+ InCount1 = InCount + 1,
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats({1, 0}, {none, MsgStatus1}, 0,
+ State2#vqstate{ next_seq_id = SeqId + 1,
+ in_counter = InCount1,
+ unconfirmed = UC1 });
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC,
+ delta = Delta}) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ Delta1 = expand_delta(SeqId, Delta, IsPersistent),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats(lazy_pub, {lazy, m(MsgStatus1)}, 1,
+ State1#vqstate{ delta = Delta1,
+ next_seq_id = SeqId + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1}).
+
+batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) ->
+ {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4, State)}.
+
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1}, 0,
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3};
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1}, 0,
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3}.
+
+batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4,
+ State),
+ {ChPid, Flow, [SeqId | SeqIds], State1}.
+
+maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
+ msg_in_store = true }, State) ->
+ {MsgStatus, State};
+maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg, msg_id = MsgId,
+ is_persistent = IsPersistent },
+ State = #vqstate{ msg_store_clients = MSCState,
+ disk_write_count = Count})
+ when Force orelse IsPersistent ->
+ case persist_to(MsgStatus) of
+ msg_store -> ok = msg_store_write(MSCState, IsPersistent, MsgId,
+ prepare_to_store(Msg)),
+ {MsgStatus#msg_status{msg_in_store = true},
+ State#vqstate{disk_write_count = Count + 1}};
+ queue_index -> {MsgStatus, State}
+ end;
+maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+%% Due to certain optimisations made inside
+%% rabbit_queue_index:pre_publish/7 we need to have two separate
+%% functions for index persistence. This one is only used when paging
+%% during memory pressure. We didn't want to modify
+%% maybe_write_index_to_disk/3 because that function is used in other
+%% places.
+maybe_batch_write_index_to_disk(_Force,
+ MsgStatus = #msg_status {
+ index_on_disk = true }, State) ->
+ {MsgStatus, State};
+maybe_batch_write_index_to_disk(Force,
+ MsgStatus = #msg_status {
+ msg = Msg,
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps},
+ State = #vqstate {
+ target_ram_count = TargetRamCount,
+ disk_write_count = DiskWriteCount,
+ index_state = IndexState})
+ when Force orelse IsPersistent ->
+ {MsgOrId, DiskWriteCount1} =
+ case persist_to(MsgStatus) of
+ msg_store -> {MsgId, DiskWriteCount};
+ queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+ end,
+ IndexState1 = rabbit_queue_index:pre_publish(
+ MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered,
+ TargetRamCount, IndexState),
+ {MsgStatus#msg_status{index_on_disk = true},
+ State#vqstate{index_state = IndexState1,
+ disk_write_count = DiskWriteCount1}};
+maybe_batch_write_index_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
+ index_on_disk = true }, State) ->
+ {MsgStatus, State};
+maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg,
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps},
+ State = #vqstate{target_ram_count = TargetRamCount,
+ disk_write_count = DiskWriteCount,
+ index_state = IndexState})
+ when Force orelse IsPersistent ->
+ {MsgOrId, DiskWriteCount1} =
+ case persist_to(MsgStatus) of
+ msg_store -> {MsgId, DiskWriteCount};
+ queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+ end,
+ IndexState1 = rabbit_queue_index:publish(
+ MsgOrId, SeqId, MsgProps, IsPersistent, TargetRamCount,
+ IndexState),
+ IndexState2 = maybe_write_delivered(IsDelivered, SeqId, IndexState1),
+ {MsgStatus#msg_status{index_on_disk = true},
+ State#vqstate{index_state = IndexState2,
+ disk_write_count = DiskWriteCount1}};
+
+maybe_write_index_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+ maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+ maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+determine_persist_to(#basic_message{
+ content = #content{properties = Props,
+ properties_bin = PropsBin}},
+ #message_properties{size = BodySize},
+ IndexMaxSize) ->
+ %% The >= is so that you can set the env to 0 and never persist
+ %% to the index.
+ %%
+ %% We want this to be fast, so we avoid size(term_to_binary())
+ %% here, or using the term size estimation from truncate.erl, both
+ %% of which are too slow. So instead, if the message body size
+ %% goes over the limit then we avoid any other checks.
+ %%
+ %% If it doesn't we need to decide if the properties will push
+ %% it past the limit. If we have the encoded properties (usual
+ %% case) we can just check their size. If we don't (message came
+ %% via the direct client), we make a guess based on the number of
+ %% headers.
+ case BodySize >= IndexMaxSize of
+ true -> msg_store;
+ false -> Est = case is_binary(PropsBin) of
+ true -> BodySize + size(PropsBin);
+ false -> #'P_basic'{headers = Hs} = Props,
+ case Hs of
+ undefined -> 0;
+ _ -> length(Hs)
+ end * ?HEADER_GUESS_SIZE + BodySize
+ end,
+ case Est >= IndexMaxSize of
+ true -> msg_store;
+ false -> queue_index
+ end
+ end.
+
+persist_to(#msg_status{persist_to = To}) -> To.
+
+prepare_to_store(Msg) ->
+ Msg#basic_message{
+ %% don't persist any recoverable decoded properties
+ content = rabbit_binary_parser:clear_decoded_content(
+ Msg #basic_message.content)}.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for acks
+%%----------------------------------------------------------------------------
+
+record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus,
+ State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA,
+ ack_in_counter = AckInCount}) ->
+ Insert = fun (Tree) -> gb_trees:insert(SeqId, MsgStatus, Tree) end,
+ {RPA1, DPA1, QPA1} =
+ case {msg_in_ram(MsgStatus), persist_to(MsgStatus)} of
+ {false, _} -> {RPA, Insert(DPA), QPA};
+ {_, queue_index} -> {RPA, DPA, Insert(QPA)};
+ {_, msg_store} -> {Insert(RPA), DPA, QPA}
+ end,
+ State #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1,
+ qi_pending_ack = QPA1,
+ ack_in_counter = AckInCount + 1}.
+
+lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA}) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> V;
+ none -> case gb_trees:lookup(SeqId, DPA) of
+ {value, V} -> V;
+ none -> gb_trees:get(SeqId, QPA)
+ end
+ end.
+
+%% First parameter = UpdateStats
+remove_pending_ack(true, SeqId, State) ->
+ case remove_pending_ack(false, SeqId, State) of
+ {none, _} ->
+ {none, State};
+ {MsgStatus, State1} ->
+ {MsgStatus, stats({0, -1}, {MsgStatus, none}, 0, State1)}
+ end;
+remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA}) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
+ {V, State #vqstate { ram_pending_ack = RPA1 }};
+ none -> case gb_trees:lookup(SeqId, DPA) of
+ {value, V} ->
+ DPA1 = gb_trees:delete(SeqId, DPA),
+ {V, State#vqstate{disk_pending_ack = DPA1}};
+ none ->
+ case gb_trees:lookup(SeqId, QPA) of
+ {value, V} ->
+ QPA1 = gb_trees:delete(SeqId, QPA),
+ {V, State#vqstate{qi_pending_ack = QPA1}};
+ none ->
+ {none, State}
+ end
+ end
+ end.
+
+purge_pending_ack(KeepPersistent,
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State),
+ case KeepPersistent of
+ true -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState),
+ State1;
+ false -> IndexState1 =
+ rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ State1 #vqstate { index_state = IndexState1 }
+ end.
+
+purge_pending_ack_delete_and_terminate(
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ {_, MsgIdsByStore, State1} = purge_pending_ack1(State),
+ IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ State1 #vqstate { index_state = IndexState1 }.
+
+purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
+ {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
+ rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(
+ F, accumulate_ack_init(), RPA), DPA), QPA),
+ State1 = State #vqstate { ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ qi_pending_ack = gb_trees:empty()},
+ {IndexOnDiskSeqIds, MsgIdsByStore, State1}.
+
+%% MsgIdsByStore is an map with two keys:
+%%
+%% true: holds a list of Persistent Message Ids.
+%% false: holds a list of Transient Message Ids.
+%%
+%% When we call maps:to_list/1 we get two sets of msg ids, where
+%% IsPersistent is either true for persistent messages or false for
+%% transient ones. The msg_store_remove/3 function takes this boolean
+%% flag to determine from which store the messages should be removed
+%% from.
+remove_msgs_by_id(MsgIdsByStore, MSCState) ->
+ [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
+ || {IsPersistent, MsgIds} <- maps:to_list(MsgIdsByStore)].
+
+remove_transient_msgs_by_id(MsgIdsByStore, MSCState) ->
+ case maps:find(false, MsgIdsByStore) of
+ error -> ok;
+ {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds)
+ end.
+
+accumulate_ack_init() -> {[], maps:new(), []}.
+
+accumulate_ack(#msg_status { seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
+ {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
+ case MsgInStore of
+ true -> rabbit_misc:maps_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ [MsgId | AllMsgIds]}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for confirms (aka publisher acks)
+%%----------------------------------------------------------------------------
+
+record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC,
+ confirmed = C }) ->
+ State #vqstate {
+ msgs_on_disk = rabbit_misc:gb_sets_difference(MOD, MsgIdSet),
+ msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet),
+ unconfirmed = rabbit_misc:gb_sets_difference(UC, MsgIdSet),
+ confirmed = gb_sets:union(C, MsgIdSet) }.
+
+msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
+msgs_written_to_disk(Callback, MsgIdSet, written) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MIOD),
+ State #vqstate {
+ msgs_on_disk =
+ gb_sets:union(MOD, Confirmed) })
+ end).
+
+msg_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MOD),
+ State #vqstate {
+ msg_indices_on_disk =
+ gb_sets:union(MIOD, Confirmed) })
+ end).
+
+msgs_and_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end).
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for requeue
+%%----------------------------------------------------------------------------
+
+publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ MsgStatus1 = MsgStatus#msg_status { msg = Msg },
+ {MsgStatus1, stats({1, -1}, {MsgStatus, MsgStatus1}, 0, State1)};
+publish_alpha(MsgStatus, State) ->
+ {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, 0, State)}.
+
+publish_beta(MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_prepare_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, 0, State1)}.
+
+%% Rebuild queue, inserting sequence ids to maintain ordering
+queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
+ queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
+ Limit, PubFun, State).
+
+queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
+ Limit, PubFun, State)
+ when Limit == undefined orelse SeqId < Limit ->
+ case ?QUEUE:out(Q) of
+ {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
+ when SeqIdQ < SeqId ->
+ %% enqueue from the remaining queue
+ queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
+ Limit, PubFun, State);
+ {_, _Q1} ->
+ %% enqueue from the remaining list of sequence ids
+ case msg_from_pending_ack(SeqId, State) of
+ {none, _} ->
+ queue_merge(Rest, Q, Front, MsgIds, Limit, PubFun, State);
+ {MsgStatus, State1} ->
+ {#msg_status { msg_id = MsgId } = MsgStatus1, State2} =
+ PubFun(MsgStatus, State1),
+ queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds],
+ Limit, PubFun, State2)
+ end
+ end;
+queue_merge(SeqIds, Q, Front, MsgIds,
+ _Limit, _PubFun, State) ->
+ {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
+
+delta_merge([], Delta, MsgIds, State) ->
+ {Delta, MsgIds, State};
+delta_merge(SeqIds, Delta, MsgIds, State) ->
+ lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0} = Acc) ->
+ case msg_from_pending_ack(SeqId, State0) of
+ {none, _} ->
+ Acc;
+ {#msg_status { msg_id = MsgId,
+ is_persistent = IsPersistent } = MsgStatus, State1} ->
+ {_MsgStatus, State2} =
+ maybe_prepare_write_to_disk(true, true, MsgStatus, State1),
+ {expand_delta(SeqId, Delta0, IsPersistent), [MsgId | MsgIds0],
+ stats({1, -1}, {MsgStatus, none}, 1, State2)}
+ end
+ end, {Delta, MsgIds, State}, SeqIds).
+
+%% Mostly opposite of record_pending_ack/2
+msg_from_pending_ack(SeqId, State) ->
+ case remove_pending_ack(false, SeqId, State) of
+ {none, _} ->
+ {none, State};
+ {#msg_status { msg_props = MsgProps } = MsgStatus, State1} ->
+ {MsgStatus #msg_status {
+ msg_props = MsgProps #message_properties { needs_confirming = false } },
+ State1}
+ end.
+
+beta_limit(Q) ->
+ case ?QUEUE:peek(Q) of
+ {value, #msg_status { seq_id = SeqId }} -> SeqId;
+ empty -> undefined
+ end.
+
+delta_limit(?BLANK_DELTA_PATTERN(_X)) -> undefined;
+delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
+
+%%----------------------------------------------------------------------------
+%% Iterator
+%%----------------------------------------------------------------------------
+
+ram_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}.
+
+disk_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
+
+qi_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.qi_pending_ack)}.
+
+msg_iterator(State) -> istate(start, State).
+
+istate(start, State) -> {q4, State#vqstate.q4, State};
+istate(q4, State) -> {q3, State#vqstate.q3, State};
+istate(q3, State) -> {delta, State#vqstate.delta, State};
+istate(delta, State) -> {q2, State#vqstate.q2, State};
+istate(q2, State) -> {q1, State#vqstate.q1, State};
+istate(q1, _State) -> done.
+
+next({ack, It}, IndexState) ->
+ case gb_trees:next(It) of
+ none -> {empty, IndexState};
+ {_SeqId, MsgStatus, It1} -> Next = {ack, It1},
+ {value, MsgStatus, true, Next, IndexState}
+ end;
+next(done, IndexState) -> {empty, IndexState};
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqId}, State}, IndexState) ->
+ next(istate(delta, State), IndexState);
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqIdEnd} = Delta, State}, IndexState) ->
+ SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId),
+ SeqId1 = lists:min([SeqIdB, SeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState),
+ next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
+next({delta, Delta, [], State}, IndexState) ->
+ next({delta, Delta, State}, IndexState);
+next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
+ case is_msg_in_pending_acks(SeqId, State) of
+ false -> Next = {delta, Delta, Rest, State},
+ {value, beta_msg_status(M), false, Next, IndexState};
+ true -> next({delta, Delta, Rest, State}, IndexState)
+ end;
+next({Key, Q, State}, IndexState) ->
+ case ?QUEUE:out(Q) of
+ {empty, _Q} -> next(istate(Key, State), IndexState);
+ {{value, MsgStatus}, QN} -> Next = {Key, QN, State},
+ {value, MsgStatus, false, Next, IndexState}
+ end.
+
+inext(It, {Its, IndexState}) ->
+ case next(It, IndexState) of
+ {empty, IndexState1} ->
+ {Its, IndexState1};
+ {value, MsgStatus1, Unacked, It1, IndexState1} ->
+ {[{MsgStatus1, Unacked, It1} | Its], IndexState1}
+ end.
+
+ifold(_Fun, Acc, [], State0) ->
+ {Acc, State0};
+ifold(Fun, Acc, Its0, State0) ->
+ [{MsgStatus, Unacked, It} | Rest] =
+ lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
+ {#msg_status{seq_id = SeqId2}, _, _}) ->
+ SeqId1 =< SeqId2
+ end, Its0),
+ {Msg, State1} = read_msg(MsgStatus, State0),
+ case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
+ {stop, Acc1} ->
+ {Acc1, State1};
+ {cont, Acc1} ->
+ IndexState0 = State1#vqstate.index_state,
+ {Its1, IndexState1} = inext(It, {Rest, IndexState0}),
+ State2 = State1#vqstate{index_state = IndexState1},
+ ifold(Fun, Acc1, Its1, State2)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Phase changes
+%%----------------------------------------------------------------------------
+
+maybe_reduce_memory_use(State = #vqstate {memory_reduction_run_count = MRedRunCount,
+ mode = Mode}) ->
+ case MRedRunCount >= ?EXPLICIT_GC_RUN_OP_THRESHOLD(Mode) of
+ true -> State1 = reduce_memory_use(State),
+ State1#vqstate{memory_reduction_run_count = 0};
+ false -> State#vqstate{memory_reduction_run_count = MRedRunCount + 1}
+ end.
+
+reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
+ State;
+reduce_memory_use(State = #vqstate {
+ mode = default,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount,
+ io_batch_size = IoBatchSize,
+ rates = #rates { in = AvgIngress,
+ out = AvgEgress,
+ ack_in = AvgAckIngress,
+ ack_out = AvgAckEgress } }) ->
+ {CreditDiscBound, _} =rabbit_misc:get_env(rabbit,
+ msg_store_credit_disc_bound,
+ ?CREDIT_DISC_BOUND),
+ {NeedResumeA2B, State1} = {_, #vqstate { q2 = Q2, q3 = Q3 }} =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> {false, State};
+ %% Reduce memory of pending acks and alphas. The order is
+ %% determined based on which is growing faster. Whichever
+ %% comes second may very well get a quota of 0 if the
+ %% first manages to push out the max number of messages.
+ A2BChunk ->
+ %% In case there are few messages to be sent to a message store
+ %% and many messages to be embedded to the queue index,
+ %% we should limit the number of messages to be flushed
+ %% to avoid blocking the process.
+ A2BChunkActual = case A2BChunk > CreditDiscBound * 2 of
+ true -> CreditDiscBound * 2;
+ false -> A2BChunk
+ end,
+ Funs = case ((AvgAckIngress - AvgAckEgress) >
+ (AvgIngress - AvgEgress)) of
+ true -> [fun limit_ram_acks/2,
+ fun push_alphas_to_betas/2];
+ false -> [fun push_alphas_to_betas/2,
+ fun limit_ram_acks/2]
+ end,
+ {Quota, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) ->
+ ReduceFun(QuotaN, StateN)
+ end, {A2BChunkActual, State}, Funs),
+ {(Quota == 0) andalso (A2BChunk > A2BChunkActual), State2}
+ end,
+ Permitted = permitted_beta_count(State1),
+ {NeedResumeB2D, State3} =
+ %% If there are more messages with their queue position held in RAM,
+ %% a.k.a. betas, in Q2 & Q3 than IoBatchSize,
+ %% write their queue position to disk, a.k.a. push_betas_to_deltas
+ case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
+ Permitted) of
+ B2DChunk when B2DChunk >= IoBatchSize ->
+ %% Same as for alphas to betas. Limit a number of messages
+ %% to be flushed to disk at once to avoid blocking the process.
+ B2DChunkActual = case B2DChunk > CreditDiscBound * 2 of
+ true -> CreditDiscBound * 2;
+ false -> B2DChunk
+ end,
+ StateBD = push_betas_to_deltas(B2DChunkActual, State1),
+ {B2DChunk > B2DChunkActual, StateBD};
+ _ ->
+ {false, State1}
+ end,
+ %% We can be blocked by the credit flow, or limited by a batch size,
+ %% or finished with flushing.
+ %% If blocked by the credit flow - the credit grant will resume processing,
+ %% if limited by a batch - the batch continuation message should be sent.
+ %% The continuation message will be prioritised over publishes,
+ %% but not consumptions, so the queue can make progess.
+ Blocked = credit_flow:blocked(),
+ case {Blocked, NeedResumeA2B orelse NeedResumeB2D} of
+ %% Credit bump will continue paging
+ {true, _} -> State3;
+ %% Finished with paging
+ {false, false} -> State3;
+ %% Planning next batch
+ {false, true} ->
+ %% We don't want to use self-credit-flow, because it's harder to
+ %% reason about. So the process sends a (prioritised) message to
+ %% itself and sets a waiting_bump value to keep the message box clean
+ maybe_bump_reduce_memory_use(State3)
+ end;
+%% When using lazy queues, there are no alphas, so we don't need to
+%% call push_alphas_to_betas/2.
+reduce_memory_use(State = #vqstate {
+ mode = lazy,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount }) ->
+ State1 = #vqstate { q3 = Q3 } =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> State;
+ S1 -> {_, State2} = limit_ram_acks(S1, State),
+ State2
+ end,
+
+ State3 =
+ case chunk_size(?QUEUE:len(Q3),
+ permitted_beta_count(State1)) of
+ 0 ->
+ State1;
+ S2 ->
+ push_betas_to_deltas(S2, State1)
+ end,
+ garbage_collect(),
+ State3.
+
+maybe_bump_reduce_memory_use(State = #vqstate{ waiting_bump = true }) ->
+ State;
+maybe_bump_reduce_memory_use(State) ->
+ self() ! bump_reduce_memory_use,
+ State#vqstate{ waiting_bump = true }.
+
+limit_ram_acks(0, State) ->
+ {0, ui(State)};
+limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA }) ->
+ case gb_trees:is_empty(RPA) of
+ true ->
+ {Quota, ui(State)};
+ false ->
+ {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
+ {MsgStatus1, State1} =
+ maybe_prepare_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA),
+ limit_ram_acks(Quota - 1,
+ stats({0, 0}, {MsgStatus, MsgStatus2}, 0,
+ State1 #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1 }))
+ end.
+
+permitted_beta_count(#vqstate { len = 0 }) ->
+ infinity;
+permitted_beta_count(#vqstate { mode = lazy,
+ target_ram_count = TargetRamCount}) ->
+ TargetRamCount;
+permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
+ lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
+permitted_beta_count(#vqstate { q1 = Q1,
+ q4 = Q4,
+ target_ram_count = TargetRamCount,
+ len = Len }) ->
+ BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4),
+ lists:max([rabbit_queue_index:next_segment_boundary(0),
+ BetaDelta - ((BetaDelta * BetaDelta) div
+ (BetaDelta + TargetRamCount))]).
+
+chunk_size(Current, Permitted)
+ when Permitted =:= infinity orelse Permitted >= Current ->
+ 0;
+chunk_size(Current, Permitted) ->
+ Current - Permitted.
+
+fetch_from_q3(State = #vqstate { mode = default,
+ q1 = Q1,
+ q2 = Q2,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3,
+ q4 = Q4 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} ->
+ {empty, State};
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of
+ {true, true} ->
+ %% q3 is now empty, it wasn't before;
+ %% delta is still empty. So q2 must be
+ %% empty, and we know q4 is empty
+ %% otherwise we wouldn't be loading from
+ %% q3. As such, we can just set q4 to Q1.
+ true = ?QUEUE:is_empty(Q2), %% ASSERTION
+ true = ?QUEUE:is_empty(Q4), %% ASSERTION
+ State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 };
+ {true, false} ->
+ maybe_deltas_to_betas(State1);
+ {false, _} ->
+ %% q3 still isn't empty, we've not
+ %% touched delta, so the invariants
+ %% between q1, q2, delta and q3 are
+ %% maintained
+ State1
+ end,
+ {loaded, {MsgStatus, State2}}
+ end;
+%% lazy queues
+fetch_from_q3(State = #vqstate { mode = lazy,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} when DeltaCount =:= 0 ->
+ {empty, State};
+ {empty, _Q3} ->
+ fetch_from_q3(maybe_deltas_to_betas(State));
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ {loaded, {MsgStatus, State1}}
+ end.
+
+maybe_deltas_to_betas(State) ->
+ AfterFun = process_delivers_and_acks_fun(deliver_and_ack),
+ maybe_deltas_to_betas(AfterFun, State).
+
+maybe_deltas_to_betas(_DelsAndAcksFun,
+ State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) ->
+ State;
+maybe_deltas_to_betas(DelsAndAcksFun,
+ State = #vqstate {
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3,
+ index_state = IndexState,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes,
+ disk_read_count = DiskReadCount,
+ delta_transient_bytes = DeltaTransientBytes,
+ transient_threshold = TransientThreshold }) ->
+ #delta { start_seq_id = DeltaSeqId,
+ count = DeltaCount,
+ transient = Transient,
+ end_seq_id = DeltaSeqIdEnd } = Delta,
+ DeltaSeqId1 =
+ lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId),
+ DeltaSeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
+ IndexState),
+ {Q3a, RamCountsInc, RamBytesInc, State1, TransientCount, TransientBytes} =
+ betas_from_index_entries(List, TransientThreshold,
+ DelsAndAcksFun,
+ State #vqstate { index_state = IndexState1 }),
+ State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc,
+ ram_bytes = RamBytes + RamBytesInc,
+ disk_read_count = DiskReadCount + RamCountsInc },
+ case ?QUEUE:len(Q3a) of
+ 0 ->
+ %% we ignored every message in the segment due to it being
+ %% transient and below the threshold
+ maybe_deltas_to_betas(
+ DelsAndAcksFun,
+ State2 #vqstate {
+ delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
+ Q3aLen ->
+ Q3b = ?QUEUE:join(Q3, Q3a),
+ case DeltaCount - Q3aLen of
+ 0 ->
+ %% delta is now empty, but it wasn't before, so
+ %% can now join q2 onto q3
+ State2 #vqstate { q2 = ?QUEUE:new(),
+ delta = ?BLANK_DELTA,
+ q3 = ?QUEUE:join(Q3b, Q2),
+ delta_transient_bytes = 0};
+ N when N > 0 ->
+ Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
+ count = N,
+ transient = Transient - TransientCount,
+ end_seq_id = DeltaSeqIdEnd }),
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3b,
+ delta_transient_bytes = DeltaTransientBytes - TransientBytes }
+ end
+ end.
+
+push_alphas_to_betas(Quota, State) ->
+ {Quota1, State1} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out/1,
+ fun (MsgStatus, Q1a,
+ State0 = #vqstate { q3 = Q3, delta = #delta { count = 0,
+ transient = 0 } }) ->
+ State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) };
+ (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) ->
+ State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) }
+ end, Quota, State #vqstate.q1, State),
+ {Quota2, State2} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out_r/1,
+ fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) ->
+ State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a }
+ end, Quota1, State1 #vqstate.q4, State1),
+ {Quota2, State2}.
+
+push_alphas_to_betas(_Generator, _Consumer, Quota, _Q,
+ State = #vqstate { ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount })
+ when Quota =:= 0 orelse
+ TargetRamCount =:= infinity orelse
+ TargetRamCount >= RamMsgCount ->
+ {Quota, ui(State)};
+push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
+ %% We consume credits from the message_store whenever we need to
+ %% persist a message to disk. See:
+ %% rabbit_variable_queue:msg_store_write/4. So perhaps the
+ %% msg_store is trying to throttle down our queue.
+ case credit_flow:blocked() of
+ true -> {Quota, ui(State)};
+ false -> case Generator(Q) of
+ {empty, _Q} ->
+ {Quota, ui(State)};
+ {{value, MsgStatus}, Qa} ->
+ {MsgStatus1, State1} =
+ maybe_prepare_write_to_disk(true, false, MsgStatus,
+ State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ State2 = stats(
+ ready0, {MsgStatus, MsgStatus2}, 0, State1),
+ State3 = Consumer(MsgStatus2, Qa, State2),
+ push_alphas_to_betas(Generator, Consumer, Quota - 1,
+ Qa, State3)
+ end
+ end.
+
+push_betas_to_deltas(Quota, State = #vqstate { mode = default,
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun rabbit_queue_index:next_segment_boundary/1,
+ Q3, PushState),
+ {Q2a, PushState2} = push_betas_to_deltas(
+ fun ?QUEUE:out/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q2, PushState1),
+ {_, Delta1, State1} = PushState2,
+ State1 #vqstate { q2 = Q2a,
+ delta = Delta1,
+ q3 = Q3a };
+%% In the case of lazy queues we want to page as many messages as
+%% possible from q3.
+push_betas_to_deltas(Quota, State = #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q3, PushState),
+ {_, Delta1, State1} = PushState1,
+ State1 #vqstate { delta = Delta1,
+ q3 = Q3a }.
+
+
+push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
+ case ?QUEUE:is_empty(Q) of
+ true ->
+ {Q, PushState};
+ false ->
+ {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q),
+ {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q),
+ Limit = LimitFun(MinSeqId),
+ case MaxSeqId < Limit of
+ true -> {Q, PushState};
+ false -> push_betas_to_deltas1(Generator, Limit, Q, PushState)
+ end
+ end.
+
+push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) ->
+ {Q, {0, Delta, ui(State)}};
+push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) ->
+ case Generator(Q) of
+ {empty, _Q} ->
+ {Q, {Quota, Delta, ui(State)}};
+ {{value, #msg_status { seq_id = SeqId }}, _Qa}
+ when SeqId < Limit ->
+ {Q, {Quota, Delta, ui(State)}};
+ {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
+ {#msg_status { index_on_disk = true,
+ is_persistent = IsPersistent }, State1} =
+ maybe_batch_write_index_to_disk(true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, 1, State1),
+ Delta1 = expand_delta(SeqId, Delta, IsPersistent),
+ push_betas_to_deltas1(Generator, Limit, Qa,
+ {Quota - 1, Delta1, State2})
+ end.
+
+%% Flushes queue index batch caches and updates queue index state.
+ui(#vqstate{index_state = IndexState,
+ target_ram_count = TargetRamCount} = State) ->
+ IndexState1 = rabbit_queue_index:flush_pre_publish_cache(
+ TargetRamCount, IndexState),
+ State#vqstate{index_state = IndexState1}.
+
+%%----------------------------------------------------------------------------
+%% Upgrading
+%%----------------------------------------------------------------------------
+
+-spec multiple_routing_keys() -> 'ok'.
+
+multiple_routing_keys() ->
+ transform_storage(
+ fun ({basic_message, ExchangeName, Routing_Key, Content,
+ MsgId, Persistent}) ->
+ {ok, {basic_message, ExchangeName, [Routing_Key], Content,
+ MsgId, Persistent}};
+ (_) -> {error, corrupt_message}
+ end),
+ ok.
+
+
+%% Assumes message store is not running
+transform_storage(TransformFun) ->
+ transform_store(?PERSISTENT_MSG_STORE, TransformFun),
+ transform_store(?TRANSIENT_MSG_STORE, TransformFun).
+
+transform_store(Store, TransformFun) ->
+ rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
+ rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
+
+move_messages_to_vhost_store() ->
+ case list_persistent_queues() of
+ [] ->
+ log_upgrade("No durable queues found."
+ " Skipping message store migration"),
+ ok;
+ Queues ->
+ move_messages_to_vhost_store(Queues)
+ end,
+ ok = delete_old_store(),
+ ok = rabbit_queue_index:cleanup_global_recovery_terms().
+
+move_messages_to_vhost_store(Queues) ->
+ log_upgrade("Moving messages to per-vhost message store"),
+ %% Move the queue index for each persistent queue to the new store
+ lists:foreach(
+ fun(Queue) ->
+ QueueName = amqqueue:get_name(Queue),
+ rabbit_queue_index:move_to_per_vhost_stores(QueueName)
+ end,
+ Queues),
+ %% Legacy (global) msg_store may require recovery.
+ %% This upgrade step should only be started
+ %% if we are upgrading from a pre-3.7.0 version.
+ {QueuesWithTerms, RecoveryRefs, StartFunState} = read_old_recovery_terms(Queues),
+
+ OldStore = run_old_persistent_store(RecoveryRefs, StartFunState),
+
+ VHosts = rabbit_vhost:list_names(),
+
+ %% New store should not be recovered.
+ NewMsgStore = start_new_store(VHosts),
+ %% Recovery terms should be started for all vhosts for new store.
+ [ok = rabbit_recovery_terms:open_table(VHost) || VHost <- VHosts],
+
+ MigrationBatchSize = application:get_env(rabbit, queue_migration_batch_size,
+ ?QUEUE_MIGRATION_BATCH_SIZE),
+ in_batches(MigrationBatchSize,
+ {rabbit_variable_queue, migrate_queue, [OldStore, NewMsgStore]},
+ QueuesWithTerms,
+ "message_store upgrades: Migrating batch ~p of ~p queues. Out of total ~p ~n",
+ "message_store upgrades: Batch ~p of ~p queues migrated ~n. ~p total left"),
+
+ log_upgrade("Message store migration finished"),
+ ok = rabbit_sup:stop_child(OldStore),
+ [ok= rabbit_recovery_terms:close_table(VHost) || VHost <- VHosts],
+ ok = stop_new_store(NewMsgStore).
+
+in_batches(Size, MFA, List, MessageStart, MessageEnd) ->
+ in_batches(Size, 1, MFA, List, MessageStart, MessageEnd).
+
+in_batches(_, _, _, [], _, _) -> ok;
+in_batches(Size, BatchNum, MFA, List, MessageStart, MessageEnd) ->
+ Length = length(List),
+ {Batch, Tail} = case Size > Length of
+ true -> {List, []};
+ false -> lists:split(Size, List)
+ end,
+ ProcessedLength = (BatchNum - 1) * Size,
+ rabbit_log:info(MessageStart, [BatchNum, Size, ProcessedLength + Length]),
+ {M, F, A} = MFA,
+ Keys = [ rpc:async_call(node(), M, F, [El | A]) || El <- Batch ],
+ lists:foreach(fun(Key) ->
+ case rpc:yield(Key) of
+ {badrpc, Err} -> throw(Err);
+ _ -> ok
+ end
+ end,
+ Keys),
+ rabbit_log:info(MessageEnd, [BatchNum, Size, length(Tail)]),
+ in_batches(Size, BatchNum + 1, MFA, Tail, MessageStart, MessageEnd).
+
+migrate_queue({QueueName = #resource{virtual_host = VHost, name = Name},
+ RecoveryTerm},
+ OldStore, NewStore) ->
+ log_upgrade_verbose(
+ "Migrating messages in queue ~s in vhost ~s to per-vhost message store~n",
+ [Name, VHost]),
+ OldStoreClient = get_global_store_client(OldStore),
+ NewStoreClient = get_per_vhost_store_client(QueueName, NewStore),
+ %% WARNING: During scan_queue_segments queue index state is being recovered
+ %% and terminated. This can cause side effects!
+ rabbit_queue_index:scan_queue_segments(
+ %% We migrate only persistent messages which are found in message store
+ %% and are not acked yet
+ fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, OldC)
+ when is_binary(MsgId) ->
+ migrate_message(MsgId, OldC, NewStoreClient);
+ (_SeqId, _MsgId, _MsgProps,
+ _IsPersistent, _IsDelivered, _IsAcked, OldC) ->
+ OldC
+ end,
+ OldStoreClient,
+ QueueName),
+ rabbit_msg_store:client_terminate(OldStoreClient),
+ rabbit_msg_store:client_terminate(NewStoreClient),
+ NewClientRef = rabbit_msg_store:client_ref(NewStoreClient),
+ case RecoveryTerm of
+ non_clean_shutdown -> ok;
+ Term when is_list(Term) ->
+ NewRecoveryTerm = lists:keyreplace(persistent_ref, 1, RecoveryTerm,
+ {persistent_ref, NewClientRef}),
+ rabbit_queue_index:update_recovery_term(QueueName, NewRecoveryTerm)
+ end,
+ log_upgrade_verbose("Finished migrating queue ~s in vhost ~s", [Name, VHost]),
+ {QueueName, NewClientRef}.
+
+migrate_message(MsgId, OldC, NewC) ->
+ case rabbit_msg_store:read(MsgId, OldC) of
+ {{ok, Msg}, OldC1} ->
+ ok = rabbit_msg_store:write(MsgId, Msg, NewC),
+ OldC1;
+ _ -> OldC
+ end.
+
+get_per_vhost_store_client(#resource{virtual_host = VHost}, NewStore) ->
+ {VHost, StorePid} = lists:keyfind(VHost, 1, NewStore),
+ rabbit_msg_store:client_init(StorePid, rabbit_guid:gen(),
+ fun(_,_) -> ok end, fun() -> ok end).
+
+get_global_store_client(OldStore) ->
+ rabbit_msg_store:client_init(OldStore,
+ rabbit_guid:gen(),
+ fun(_,_) -> ok end,
+ fun() -> ok end).
+
+list_persistent_queues() ->
+ Node = node(),
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q <- mnesia:table(rabbit_durable_queue),
+ ?amqqueue_is_classic(Q),
+ amqqueue:qnode(Q) == Node,
+ mnesia:read(rabbit_queue, amqqueue:get_name(Q), read) =:= []]))
+ end).
+
+read_old_recovery_terms([]) ->
+ {[], [], ?EMPTY_START_FUN_STATE};
+read_old_recovery_terms(Queues) ->
+ QueueNames = [amqqueue:get_name(Q) || Q <- Queues],
+ {AllTerms, StartFunState} = rabbit_queue_index:read_global_recovery_terms(QueueNames),
+ Refs = [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ {lists:zip(QueueNames, AllTerms), Refs, StartFunState}.
+
+run_old_persistent_store(Refs, StartFunState) ->
+ OldStoreName = ?PERSISTENT_MSG_STORE,
+ ok = rabbit_sup:start_child(OldStoreName, rabbit_msg_store, start_global_store_link,
+ [OldStoreName, rabbit_mnesia:dir(),
+ Refs, StartFunState]),
+ OldStoreName.
+
+start_new_store(VHosts) ->
+ %% Ensure vhost supervisor is started, so we can add vhosts to it.
+ lists:map(fun(VHost) ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ {ok, Pid} = rabbit_msg_store:start_link(?PERSISTENT_MSG_STORE,
+ VHostDir,
+ undefined,
+ ?EMPTY_START_FUN_STATE),
+ {VHost, Pid}
+ end,
+ VHosts).
+
+stop_new_store(NewStore) ->
+ lists:foreach(fun({_VHost, StorePid}) ->
+ unlink(StorePid),
+ exit(StorePid, shutdown)
+ end,
+ NewStore),
+ ok.
+
+delete_old_store() ->
+ log_upgrade("Removing the old message store data"),
+ rabbit_file:recursive_delete(
+ [filename:join([rabbit_mnesia:dir(), ?PERSISTENT_MSG_STORE])]),
+ %% Delete old transient store as well
+ rabbit_file:recursive_delete(
+ [filename:join([rabbit_mnesia:dir(), ?TRANSIENT_MSG_STORE])]),
+ ok.
+
+log_upgrade(Msg) ->
+ log_upgrade(Msg, []).
+
+log_upgrade(Msg, Args) ->
+ rabbit_log:info("message_store upgrades: " ++ Msg, Args).
+
+log_upgrade_verbose(Msg) ->
+ log_upgrade_verbose(Msg, []).
+
+log_upgrade_verbose(Msg, Args) ->
+ rabbit_log_upgrade:info(Msg, Args).
+
+maybe_client_terminate(MSCStateP) ->
+ %% Queue might have been asked to stop by the supervisor, it needs a clean
+ %% shutdown in order for the supervising strategy to work - if it reaches max
+ %% restarts might bring the vhost down.
+ try
+ rabbit_msg_store:client_terminate(MSCStateP)
+ catch
+ _:_ ->
+ ok
+ end.
diff --git a/deps/rabbit/src/rabbit_version.erl b/deps/rabbit/src/rabbit_version.erl
new file mode 100644
index 0000000000..3f5462c7b4
--- /dev/null
+++ b/deps/rabbit/src/rabbit_version.erl
@@ -0,0 +1,227 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_version).
+
+-export([recorded/0, matches/2, desired/0, desired_for_scope/1,
+ record_desired/0, record_desired_for_scope/1,
+ upgrades_required/1, all_upgrades_required/1,
+ check_version_consistency/3,
+ check_version_consistency/4, check_otp_consistency/1,
+ version_error/3]).
+
+%% -------------------------------------------------------------------
+
+-export_type([scope/0, step/0]).
+
+-type scope() :: atom().
+-type scope_version() :: [atom()].
+-type step() :: {atom(), atom()}.
+
+-type version() :: [atom()].
+
+%% -------------------------------------------------------------------
+
+-define(VERSION_FILENAME, "schema_version").
+-define(SCOPES, [mnesia, local]).
+
+%% -------------------------------------------------------------------
+
+-spec recorded() -> rabbit_types:ok_or_error2(version(), any()).
+
+recorded() -> case rabbit_file:read_term_file(schema_filename()) of
+ {ok, [V]} -> {ok, V};
+ {error, _} = Err -> Err
+ end.
+
+record(V) -> ok = rabbit_file:write_term_file(schema_filename(), [V]).
+
+recorded_for_scope(Scope) ->
+ case recorded() of
+ {error, _} = Err ->
+ Err;
+ {ok, Version} ->
+ {ok, case lists:keysearch(Scope, 1, categorise_by_scope(Version)) of
+ false -> [];
+ {value, {Scope, SV1}} -> SV1
+ end}
+ end.
+
+record_for_scope(Scope, ScopeVersion) ->
+ case recorded() of
+ {error, _} = Err ->
+ Err;
+ {ok, Version} ->
+ Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version),
+ {Scope, ScopeVersion}),
+ ok = record([Name || {_Scope, Names} <- Version1, Name <- Names])
+ end.
+
+%% -------------------------------------------------------------------
+
+-spec matches([A], [A]) -> boolean().
+
+matches(VerA, VerB) ->
+ lists:usort(VerA) =:= lists:usort(VerB).
+
+%% -------------------------------------------------------------------
+
+-spec desired() -> version().
+
+desired() -> [Name || Scope <- ?SCOPES, Name <- desired_for_scope(Scope)].
+
+-spec desired_for_scope(scope()) -> scope_version().
+
+desired_for_scope(Scope) -> with_upgrade_graph(fun heads/1, Scope).
+
+-spec record_desired() -> 'ok'.
+
+record_desired() -> record(desired()).
+
+-spec record_desired_for_scope
+ (scope()) -> rabbit_types:ok_or_error(any()).
+
+record_desired_for_scope(Scope) ->
+ record_for_scope(Scope, desired_for_scope(Scope)).
+
+-spec upgrades_required
+ (scope()) -> rabbit_types:ok_or_error2([step()], any()).
+
+upgrades_required(Scope) ->
+ case recorded_for_scope(Scope) of
+ {error, enoent} ->
+ case filelib:is_file(rabbit_guid:filename()) of
+ false -> {error, starting_from_scratch};
+ true -> {error, version_not_available}
+ end;
+ {ok, CurrentHeads} ->
+ with_upgrade_graph(
+ fun (G) ->
+ case unknown_heads(CurrentHeads, G) of
+ [] -> {ok, upgrades_to_apply(CurrentHeads, G)};
+ Unknown -> {error, {future_upgrades_found, Unknown}}
+ end
+ end, Scope)
+ end.
+
+all_upgrades_required(Scopes) ->
+ case recorded() of
+ {error, enoent} ->
+ case filelib:is_file(rabbit_guid:filename()) of
+ false -> {error, starting_from_scratch};
+ true -> {error, version_not_available}
+ end;
+ {ok, _} ->
+ lists:foldl(
+ fun
+ (_, {error, Err}) -> {error, Err};
+ (Scope, {ok, Acc}) ->
+ case upgrades_required(Scope) of
+ %% Lift errors from any scope.
+ {error, Err} -> {error, Err};
+ %% Filter non-upgradable scopes
+ {ok, []} -> {ok, Acc};
+ {ok, Upgrades} -> {ok, [{Scope, Upgrades} | Acc]}
+ end
+ end,
+ {ok, []},
+ Scopes)
+ end.
+
+%% -------------------------------------------------------------------
+
+with_upgrade_graph(Fun, Scope) ->
+ case rabbit_misc:build_acyclic_graph(
+ fun ({_App, Module, Steps}) -> vertices(Module, Steps, Scope) end,
+ fun ({_App, Module, Steps}) -> edges(Module, Steps, Scope) end,
+ rabbit_misc:all_module_attributes(rabbit_upgrade)) of
+ {ok, G} -> try
+ Fun(G)
+ after
+ true = digraph:delete(G)
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ throw({error, {duplicate_upgrade_step, StepName}});
+ {error, {edge, {bad_vertex, StepName}, _From, _To}} ->
+ throw({error, {dependency_on_unknown_upgrade_step, StepName}});
+ {error, {edge, {bad_edge, StepNames}, _From, _To}} ->
+ throw({error, {cycle_in_upgrade_steps, StepNames}})
+ end.
+
+vertices(Module, Steps, Scope0) ->
+ [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps,
+ Scope0 == Scope1].
+
+edges(_Module, Steps, Scope0) ->
+ [{Require, StepName} || {StepName, Scope1, Requires} <- Steps,
+ Require <- Requires,
+ Scope0 == Scope1].
+unknown_heads(Heads, G) ->
+ [H || H <- Heads, digraph:vertex(G, H) =:= false].
+
+upgrades_to_apply(Heads, G) ->
+ %% Take all the vertices which can reach the known heads. That's
+ %% everything we've already applied. Subtract that from all
+ %% vertices: that's what we have to apply.
+ Unsorted = sets:to_list(
+ sets:subtract(
+ sets:from_list(digraph:vertices(G)),
+ sets:from_list(digraph_utils:reaching(Heads, G)))),
+ %% Form a subgraph from that list and find a topological ordering
+ %% so we can invoke them in order.
+ [element(2, digraph:vertex(G, StepName)) ||
+ StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))].
+
+heads(G) ->
+ lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]).
+
+%% -------------------------------------------------------------------
+
+categorise_by_scope(Version) when is_list(Version) ->
+ Categorised =
+ [{Scope, Name} || {_App, _Module, Attributes} <-
+ rabbit_misc:all_module_attributes(rabbit_upgrade),
+ {Name, Scope, _Requires} <- Attributes,
+ lists:member(Name, Version)],
+ maps:to_list(
+ lists:foldl(fun ({Scope, Name}, CatVersion) ->
+ rabbit_misc:maps_cons(Scope, Name, CatVersion)
+ end, maps:new(), Categorised)).
+
+dir() -> rabbit_mnesia:dir().
+
+schema_filename() -> filename:join(dir(), ?VERSION_FILENAME).
+
+%% --------------------------------------------------------------------
+
+-spec check_version_consistency
+ (string(), string(), string()) -> rabbit_types:ok_or_error(any()).
+
+check_version_consistency(This, Remote, Name) ->
+ check_version_consistency(This, Remote, Name, fun (A, B) -> A =:= B end).
+
+-spec check_version_consistency
+ (string(), string(), string(),
+ fun((string(), string()) -> boolean())) ->
+ rabbit_types:ok_or_error(any()).
+
+check_version_consistency(This, Remote, Name, Comp) ->
+ case Comp(This, Remote) of
+ true -> ok;
+ false -> version_error(Name, This, Remote)
+ end.
+
+version_error(Name, This, Remote) ->
+ {error, {inconsistent_cluster,
+ rabbit_misc:format("~s version mismatch: local node is ~s, "
+ "remote node ~s", [Name, This, Remote])}}.
+
+-spec check_otp_consistency
+ (string()) -> rabbit_types:ok_or_error(any()).
+
+check_otp_consistency(Remote) ->
+ check_version_consistency(rabbit_misc:otp_release(), Remote, "OTP").
diff --git a/deps/rabbit/src/rabbit_vhost.erl b/deps/rabbit/src/rabbit_vhost.erl
new file mode 100644
index 0000000000..c8c5fc961a
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost.erl
@@ -0,0 +1,422 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("vhost.hrl").
+
+-export([recover/0, recover/1]).
+-export([add/2, add/4, delete/2, exists/1, with/2, with_user_and_vhost/3, assert/1, update/2,
+ set_limits/2, vhost_cluster_state/1, is_running_on_all_nodes/1, await_running_on_all_nodes/2,
+ list/0, count/0, list_names/0, all/0, parse_tags/1]).
+-export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]).
+-export([dir/1, msg_store_dir_path/1, msg_store_dir_wildcard/0]).
+-export([delete_storage/1]).
+-export([vhost_down/1]).
+-export([put_vhost/5]).
+
+%%
+%% API
+%%
+
+recover() ->
+ %% Clear out remnants of old incarnation, in case we restarted
+ %% faster than other nodes handled DOWN messages from us.
+ rabbit_amqqueue:on_node_down(node()),
+
+ rabbit_amqqueue:warn_file_limit(),
+
+ %% Prepare rabbit_semi_durable_route table
+ rabbit_binding:recover(),
+
+ %% rabbit_vhost_sup_sup will start the actual recovery.
+ %% So recovery will be run every time a vhost supervisor is restarted.
+ ok = rabbit_vhost_sup_sup:start(),
+
+ [ok = rabbit_vhost_sup_sup:init_vhost(VHost) || VHost <- list_names()],
+ ok.
+
+recover(VHost) ->
+ VHostDir = msg_store_dir_path(VHost),
+ rabbit_log:info("Making sure data directory '~ts' for vhost '~s' exists~n",
+ [VHostDir, VHost]),
+ VHostStubFile = filename:join(VHostDir, ".vhost"),
+ ok = rabbit_file:ensure_dir(VHostStubFile),
+ ok = file:write_file(VHostStubFile, VHost),
+ {Recovered, Failed} = rabbit_amqqueue:recover(VHost),
+ AllQs = Recovered ++ Failed,
+ QNames = [amqqueue:get_name(Q) || Q <- AllQs],
+ ok = rabbit_binding:recover(rabbit_exchange:recover(VHost), QNames),
+ ok = rabbit_amqqueue:start(Recovered),
+ %% Start queue mirrors.
+ ok = rabbit_mirror_queue_misc:on_vhost_up(VHost),
+ ok.
+
+-define(INFO_KEYS, vhost:info_keys()).
+
+-spec parse_tags(binary() | string() | atom()) -> [atom()].
+parse_tags(undefined) ->
+ [];
+parse_tags("") ->
+ [];
+parse_tags(<<"">>) ->
+ [];
+parse_tags(Val) when is_binary(Val) ->
+ parse_tags(rabbit_data_coercion:to_list(Val));
+parse_tags(Val) when is_list(Val) ->
+ [trim_tag(Tag) || Tag <- re:split(Val, ",", [{return, list}])].
+
+-spec add(vhost:name(), rabbit_types:username()) -> rabbit_types:ok_or_error(any()).
+
+add(VHost, ActingUser) ->
+ case exists(VHost) of
+ true -> ok;
+ false -> do_add(VHost, <<"">>, [], ActingUser)
+ end.
+
+-spec add(vhost:name(), binary(), [atom()], rabbit_types:username()) -> rabbit_types:ok_or_error(any()).
+
+add(Name, Description, Tags, ActingUser) ->
+ case exists(Name) of
+ true -> ok;
+ false -> do_add(Name, Description, Tags, ActingUser)
+ end.
+
+do_add(Name, Description, Tags, ActingUser) ->
+ case Description of
+ undefined ->
+ rabbit_log:info("Adding vhost '~s' without a description", [Name]);
+ Value ->
+ rabbit_log:info("Adding vhost '~s' (description: '~s')", [Name, Value])
+ end,
+ VHost = rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:wread({rabbit_vhost, Name}) of
+ [] ->
+ Row = vhost:new(Name, [], #{description => Description, tags => Tags}),
+ rabbit_log:debug("Inserting a virtual host record ~p", [Row]),
+ ok = mnesia:write(rabbit_vhost, Row, write),
+ Row;
+ %% the vhost already exists
+ [Row] ->
+ Row
+ end
+ end,
+ fun (VHost1, true) ->
+ VHost1;
+ (VHost1, false) ->
+ [begin
+ Resource = rabbit_misc:r(Name, exchange, ExchangeName),
+ rabbit_log:debug("Will declare an exchange ~p", [Resource]),
+ _ = rabbit_exchange:declare(Resource, Type, true, false, Internal, [], ActingUser)
+ end || {ExchangeName, Type, Internal} <-
+ [{<<"">>, direct, false},
+ {<<"amq.direct">>, direct, false},
+ {<<"amq.topic">>, topic, false},
+ %% per 0-9-1 pdf
+ {<<"amq.match">>, headers, false},
+ %% per 0-9-1 xml
+ {<<"amq.headers">>, headers, false},
+ {<<"amq.fanout">>, fanout, false},
+ {<<"amq.rabbitmq.trace">>, topic, true}]],
+ VHost1
+ end),
+ case rabbit_vhost_sup_sup:start_on_all_nodes(Name) of
+ ok ->
+ rabbit_event:notify(vhost_created, info(VHost)
+ ++ [{user_who_performed_action, ActingUser},
+ {description, Description},
+ {tags, Tags}]),
+ ok;
+ {error, Reason} ->
+ Msg = rabbit_misc:format("failed to set up vhost '~s': ~p",
+ [Name, Reason]),
+ {error, Msg}
+ end.
+
+-spec delete(vhost:name(), rabbit_types:username()) -> rabbit_types:ok_or_error(any()).
+
+delete(VHost, ActingUser) ->
+ %% FIXME: We are forced to delete the queues and exchanges outside
+ %% the TX below. Queue deletion involves sending messages to the queue
+ %% process, which in turn results in further mnesia actions and
+ %% eventually the termination of that process. Exchange deletion causes
+ %% notifications which must be sent outside the TX
+ rabbit_log:info("Deleting vhost '~s'~n", [VHost]),
+ QDelFun = fun (Q) -> rabbit_amqqueue:delete(Q, false, false, ActingUser) end,
+ [begin
+ Name = amqqueue:get_name(Q),
+ assert_benign(rabbit_amqqueue:with(Name, QDelFun), ActingUser)
+ end || Q <- rabbit_amqqueue:list(VHost)],
+ [assert_benign(rabbit_exchange:delete(Name, false, ActingUser), ActingUser) ||
+ #exchange{name = Name} <- rabbit_exchange:list(VHost)],
+ Funs = rabbit_misc:execute_mnesia_transaction(
+ with(VHost, fun () -> internal_delete(VHost, ActingUser) end)),
+ ok = rabbit_event:notify(vhost_deleted, [{name, VHost},
+ {user_who_performed_action, ActingUser}]),
+ [case Fun() of
+ ok -> ok;
+ {error, {no_such_vhost, VHost}} -> ok
+ end || Fun <- Funs],
+ %% After vhost was deleted from mnesia DB, we try to stop vhost supervisors
+ %% on all the nodes.
+ rabbit_vhost_sup_sup:delete_on_all_nodes(VHost),
+ ok.
+
+put_vhost(Name, Description, Tags0, Trace, Username) ->
+ Tags = case Tags0 of
+ undefined -> <<"">>;
+ null -> <<"">>;
+ "undefined" -> <<"">>;
+ "null" -> <<"">>;
+ Other -> Other
+ end,
+ Result = case exists(Name) of
+ true -> ok;
+ false -> add(Name, Description, parse_tags(Tags), Username),
+ %% wait for up to 45 seconds for the vhost to initialise
+ %% on all nodes
+ case await_running_on_all_nodes(Name, 45000) of
+ ok ->
+ maybe_grant_full_permissions(Name, Username);
+ {error, timeout} ->
+ {error, timeout}
+ end
+ end,
+ case Trace of
+ true -> rabbit_trace:start(Name);
+ false -> rabbit_trace:stop(Name);
+ undefined -> ok
+ end,
+ Result.
+
+%% when definitions are loaded on boot, Username here will be ?INTERNAL_USER,
+%% which does not actually exist
+maybe_grant_full_permissions(_Name, ?INTERNAL_USER) ->
+ ok;
+maybe_grant_full_permissions(Name, Username) ->
+ U = rabbit_auth_backend_internal:lookup_user(Username),
+ maybe_grant_full_permissions(U, Name, Username).
+
+maybe_grant_full_permissions({ok, _}, Name, Username) ->
+ rabbit_auth_backend_internal:set_permissions(
+ Username, Name, <<".*">>, <<".*">>, <<".*">>, Username);
+maybe_grant_full_permissions(_, _Name, _Username) ->
+ ok.
+
+
+%% 50 ms
+-define(AWAIT_SAMPLE_INTERVAL, 50).
+
+-spec await_running_on_all_nodes(vhost:name(), integer()) -> ok | {error, timeout}.
+await_running_on_all_nodes(VHost, Timeout) ->
+ Attempts = round(Timeout / ?AWAIT_SAMPLE_INTERVAL),
+ await_running_on_all_nodes0(VHost, Attempts).
+
+await_running_on_all_nodes0(_VHost, 0) ->
+ {error, timeout};
+await_running_on_all_nodes0(VHost, Attempts) ->
+ case is_running_on_all_nodes(VHost) of
+ true -> ok;
+ _ ->
+ timer:sleep(?AWAIT_SAMPLE_INTERVAL),
+ await_running_on_all_nodes0(VHost, Attempts - 1)
+ end.
+
+-spec is_running_on_all_nodes(vhost:name()) -> boolean().
+is_running_on_all_nodes(VHost) ->
+ States = vhost_cluster_state(VHost),
+ lists:all(fun ({_Node, State}) -> State =:= running end,
+ States).
+
+-spec vhost_cluster_state(vhost:name()) -> [{atom(), atom()}].
+vhost_cluster_state(VHost) ->
+ Nodes = rabbit_nodes:all_running(),
+ lists:map(fun(Node) ->
+ State = case rabbit_misc:rpc_call(Node,
+ rabbit_vhost_sup_sup, is_vhost_alive,
+ [VHost]) of
+ {badrpc, nodedown} -> nodedown;
+ true -> running;
+ false -> stopped
+ end,
+ {Node, State}
+ end,
+ Nodes).
+
+vhost_down(VHost) ->
+ ok = rabbit_event:notify(vhost_down,
+ [{name, VHost},
+ {node, node()},
+ {user_who_performed_action, ?INTERNAL_USER}]).
+
+delete_storage(VHost) ->
+ VhostDir = msg_store_dir_path(VHost),
+ rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'~n", [VHost, VhostDir]),
+ %% Message store should be closed when vhost supervisor is closed.
+ case rabbit_file:recursive_delete([VhostDir]) of
+ ok -> ok;
+ {error, {_, enoent}} ->
+ %% a concurrent delete did the job for us
+ rabbit_log:warning("Tried to delete storage directories for vhost '~s', it failed with an ENOENT", [VHost]),
+ ok;
+ Other ->
+ rabbit_log:warning("Tried to delete storage directories for vhost '~s': ~p", [VHost, Other]),
+ Other
+ end.
+
+assert_benign(ok, _) -> ok;
+assert_benign({ok, _}, _) -> ok;
+assert_benign({ok, _, _}, _) -> ok;
+assert_benign({error, not_found}, _) -> ok;
+assert_benign({error, {absent, Q, _}}, ActingUser) ->
+ %% Removing the mnesia entries here is safe. If/when the down node
+ %% restarts, it will clear out the on-disk storage of the queue.
+ QName = amqqueue:get_name(Q),
+ rabbit_amqqueue:internal_delete(QName, ActingUser).
+
+internal_delete(VHost, ActingUser) ->
+ [ok = rabbit_auth_backend_internal:clear_permissions(
+ proplists:get_value(user, Info), VHost, ActingUser)
+ || Info <- rabbit_auth_backend_internal:list_vhost_permissions(VHost)],
+ TopicPermissions = rabbit_auth_backend_internal:list_vhost_topic_permissions(VHost),
+ [ok = rabbit_auth_backend_internal:clear_topic_permissions(
+ proplists:get_value(user, TopicPermission), VHost, ActingUser)
+ || TopicPermission <- TopicPermissions],
+ Fs1 = [rabbit_runtime_parameters:clear(VHost,
+ proplists:get_value(component, Info),
+ proplists:get_value(name, Info),
+ ActingUser)
+ || Info <- rabbit_runtime_parameters:list(VHost)],
+ Fs2 = [rabbit_policy:delete(VHost, proplists:get_value(name, Info), ActingUser)
+ || Info <- rabbit_policy:list(VHost)],
+ ok = mnesia:delete({rabbit_vhost, VHost}),
+ Fs1 ++ Fs2.
+
+-spec exists(vhost:name()) -> boolean().
+
+exists(VHost) ->
+ mnesia:dirty_read({rabbit_vhost, VHost}) /= [].
+
+-spec list_names() -> [vhost:name()].
+list_names() -> mnesia:dirty_all_keys(rabbit_vhost).
+
+%% Exists for backwards compatibility, prefer list_names/0.
+-spec list() -> [vhost:name()].
+list() -> list_names().
+
+-spec all() -> [vhost:vhost()].
+all() -> mnesia:dirty_match_object(rabbit_vhost, vhost:pattern_match_all()).
+
+-spec count() -> non_neg_integer().
+count() ->
+ length(list()).
+
+-spec with(vhost:name(), rabbit_misc:thunk(A)) -> A.
+
+with(VHost, Thunk) ->
+ fun () ->
+ case mnesia:read({rabbit_vhost, VHost}) of
+ [] ->
+ mnesia:abort({no_such_vhost, VHost});
+ [_V] ->
+ Thunk()
+ end
+ end.
+
+-spec with_user_and_vhost
+ (rabbit_types:username(), vhost:name(), rabbit_misc:thunk(A)) -> A.
+
+with_user_and_vhost(Username, VHost, Thunk) ->
+ rabbit_misc:with_user(Username, with(VHost, Thunk)).
+
+%% Like with/2 but outside an Mnesia tx
+
+-spec assert(vhost:name()) -> 'ok'.
+
+assert(VHost) -> case exists(VHost) of
+ true -> ok;
+ false -> throw({error, {no_such_vhost, VHost}})
+ end.
+
+-spec update(vhost:name(), fun((vhost:vhost()) -> vhost:vhost())) -> vhost:vhost().
+
+update(VHost, Fun) ->
+ case mnesia:read({rabbit_vhost, VHost}) of
+ [] ->
+ mnesia:abort({no_such_vhost, VHost});
+ [V] ->
+ V1 = Fun(V),
+ ok = mnesia:write(rabbit_vhost, V1, write),
+ V1
+ end.
+
+set_limits(VHost, undefined) ->
+ vhost:set_limits(VHost, []);
+set_limits(VHost, Limits) ->
+ vhost:set_limits(VHost, Limits).
+
+
+dir(Vhost) ->
+ <<Num:128>> = erlang:md5(Vhost),
+ rabbit_misc:format("~.36B", [Num]).
+
+msg_store_dir_path(VHost) ->
+ EncodedName = dir(VHost),
+ rabbit_data_coercion:to_list(filename:join([msg_store_dir_base(), EncodedName])).
+
+msg_store_dir_wildcard() ->
+ rabbit_data_coercion:to_list(filename:join([msg_store_dir_base(), "*"])).
+
+msg_store_dir_base() ->
+ Dir = rabbit_mnesia:dir(),
+ filename:join([Dir, "msg_stores", "vhosts"]).
+
+-spec trim_tag(list() | binary() | atom()) -> atom().
+trim_tag(Val) ->
+ rabbit_data_coercion:to_atom(string:trim(rabbit_data_coercion:to_list(Val))).
+
+%%----------------------------------------------------------------------------
+
+infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
+
+i(name, VHost) -> vhost:get_name(VHost);
+i(tracing, VHost) -> rabbit_trace:enabled(vhost:get_name(VHost));
+i(cluster_state, VHost) -> vhost_cluster_state(vhost:get_name(VHost));
+i(description, VHost) -> vhost:get_description(VHost);
+i(tags, VHost) -> vhost:get_tags(VHost);
+i(metadata, VHost) -> vhost:get_metadata(VHost);
+i(Item, VHost) ->
+ rabbit_log:error("Don't know how to compute a virtual host info item '~s' for virtual host '~p'", [Item, VHost]),
+ throw({bad_argument, Item}).
+
+-spec info(vhost:vhost() | vhost:name()) -> rabbit_types:infos().
+
+info(VHost) when ?is_vhost(VHost) ->
+ infos(?INFO_KEYS, VHost);
+info(Key) ->
+ case mnesia:dirty_read({rabbit_vhost, Key}) of
+ [] -> [];
+ [VHost] -> infos(?INFO_KEYS, VHost)
+ end.
+
+-spec info(vhost:vhost(), rabbit_types:info_keys()) -> rabbit_types:infos().
+info(VHost, Items) -> infos(Items, VHost).
+
+-spec info_all() -> [rabbit_types:infos()].
+info_all() -> info_all(?INFO_KEYS).
+
+-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+info_all(Items) -> [info(VHost, Items) || VHost <- all()].
+
+info_all(Ref, AggregatorPid) -> info_all(?INFO_KEYS, Ref, AggregatorPid).
+
+-spec info_all(rabbit_types:info_keys(), reference(), pid()) -> 'ok'.
+info_all(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(VHost) -> info(VHost, Items) end, all()).
diff --git a/deps/rabbit/src/rabbit_vhost_limit.erl b/deps/rabbit/src/rabbit_vhost_limit.erl
new file mode 100644
index 0000000000..bee01f3054
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_limit.erl
@@ -0,0 +1,205 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_limit).
+
+-behaviour(rabbit_runtime_parameter).
+
+-include("rabbit.hrl").
+
+-export([register/0]).
+-export([parse_set/3, set/3, clear/2]).
+-export([list/0, list/1]).
+-export([update_limit/4, clear_limit/3, get_limit/2]).
+-export([validate/5, notify/5, notify_clear/4]).
+-export([connection_limit/1, queue_limit/1,
+ is_over_queue_limit/1, would_exceed_queue_limit/2,
+ is_over_connection_limit/1]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "vhost limit parameters"},
+ {mfa, {rabbit_vhost_limit, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+%%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"vhost-limits">>, ?MODULE).
+
+validate(_VHost, <<"vhost-limits">>, Name, Term, _User) ->
+ rabbit_parameter_validation:proplist(
+ Name, vhost_limit_validation(), Term).
+
+notify(VHost, <<"vhost-limits">>, <<"limits">>, Limits, ActingUser) ->
+ rabbit_event:notify(vhost_limits_set, [{name, <<"limits">>},
+ {user_who_performed_action, ActingUser}
+ | Limits]),
+ update_vhost(VHost, Limits).
+
+notify_clear(VHost, <<"vhost-limits">>, <<"limits">>, ActingUser) ->
+ rabbit_event:notify(vhost_limits_cleared, [{name, <<"limits">>},
+ {user_who_performed_action, ActingUser}]),
+ %% If the function is called as a part of vhost deletion, the vhost can
+ %% be already deleted.
+ case rabbit_vhost:exists(VHost) of
+ true -> update_vhost(VHost, undefined);
+ false -> ok
+ end.
+
+connection_limit(VirtualHost) ->
+ get_limit(VirtualHost, <<"max-connections">>).
+
+queue_limit(VirtualHost) ->
+ get_limit(VirtualHost, <<"max-queues">>).
+
+
+query_limits(VHost) ->
+ case rabbit_runtime_parameters:list(VHost, <<"vhost-limits">>) of
+ [] -> [];
+ Params -> [ {pget(vhost, Param), pget(value, Param)}
+ || Param <- Params,
+ pget(value, Param) =/= undefined,
+ pget(name, Param) == <<"limits">> ]
+ end.
+
+
+-spec list() -> [{vhost:name(), rabbit_types:infos()}].
+list() ->
+ query_limits('_').
+
+-spec list(vhost:name()) -> rabbit_types:infos().
+list(VHost) ->
+ case query_limits(VHost) of
+ [] -> [];
+ [{VHost, Value}] -> Value
+ end.
+
+-spec is_over_connection_limit(vhost:name()) -> {true, non_neg_integer()} | false.
+
+is_over_connection_limit(VirtualHost) ->
+ case rabbit_vhost_limit:connection_limit(VirtualHost) of
+ %% no limit configured
+ undefined -> false;
+ %% with limit = 0, no connections are allowed
+ {ok, 0} -> {true, 0};
+ {ok, Limit} when is_integer(Limit) andalso Limit > 0 ->
+ ConnectionCount =
+ rabbit_connection_tracking:count_tracked_items_in({vhost, VirtualHost}),
+ case ConnectionCount >= Limit of
+ false -> false;
+ true -> {true, Limit}
+ end;
+ %% any negative value means "no limit". Note that parameter validation
+ %% will replace negative integers with 'undefined', so this is to be
+ %% explicit and extra defensive
+ {ok, Limit} when is_integer(Limit) andalso Limit < 0 -> false;
+ %% ignore non-integer limits
+ {ok, _Limit} -> false
+ end.
+
+-spec would_exceed_queue_limit(non_neg_integer(), vhost:name()) ->
+ {true, non_neg_integer(), non_neg_integer()} | false.
+
+would_exceed_queue_limit(AdditionalCount, VirtualHost) ->
+ case queue_limit(VirtualHost) of
+ undefined ->
+ %% no limit configured
+ false;
+ {ok, 0} ->
+ %% with limit = 0, no queues can be declared (perhaps not very
+ %% useful but consistent with the connection limit)
+ {true, 0, 0};
+ {ok, Limit} when is_integer(Limit) andalso Limit > 0 ->
+ QueueCount = rabbit_amqqueue:count(VirtualHost),
+ case (AdditionalCount + QueueCount) > Limit of
+ false -> false;
+ true -> {true, Limit, QueueCount}
+ end;
+ {ok, Limit} when is_integer(Limit) andalso Limit < 0 ->
+ %% any negative value means "no limit". Note that parameter validation
+ %% will replace negative integers with 'undefined', so this is to be
+ %% explicit and extra defensive
+ false;
+ {ok, _Limit} ->
+ %% ignore non-integer limits
+ false
+ end.
+
+-spec is_over_queue_limit(vhost:name()) -> {true, non_neg_integer()} | false.
+
+is_over_queue_limit(VirtualHost) ->
+ case would_exceed_queue_limit(1, VirtualHost) of
+ {true, Limit, _QueueCount} -> {true, Limit};
+ false -> false
+ end.
+
+%%----------------------------------------------------------------------------
+
+parse_set(VHost, Defn, ActingUser) ->
+ Definition = rabbit_data_coercion:to_binary(Defn),
+ case rabbit_json:try_decode(Definition) of
+ {ok, Term} ->
+ set(VHost, maps:to_list(Term), ActingUser);
+ {error, Reason} ->
+ {error_string,
+ rabbit_misc:format("JSON decoding error. Reason: ~ts", [Reason])}
+ end.
+
+set(VHost, Defn, ActingUser) ->
+ rabbit_runtime_parameters:set_any(VHost, <<"vhost-limits">>,
+ <<"limits">>, Defn, ActingUser).
+
+clear(VHost, ActingUser) ->
+ rabbit_runtime_parameters:clear_any(VHost, <<"vhost-limits">>,
+ <<"limits">>, ActingUser).
+
+update_limit(VHost, Name, Value, ActingUser) ->
+ OldDef = case rabbit_runtime_parameters:list(VHost, <<"vhost-limits">>) of
+ [] -> [];
+ [Param] -> pget(value, Param, [])
+ end,
+ NewDef = [{Name, Value} | lists:keydelete(Name, 1, OldDef)],
+ set(VHost, NewDef, ActingUser).
+
+clear_limit(VHost, Name, ActingUser) ->
+ OldDef = case rabbit_runtime_parameters:list(VHost, <<"vhost-limits">>) of
+ [] -> [];
+ [Param] -> pget(value, Param, [])
+ end,
+ NewDef = lists:keydelete(Name, 1, OldDef),
+ set(VHost, NewDef, ActingUser).
+
+vhost_limit_validation() ->
+ [{<<"max-connections">>, fun rabbit_parameter_validation:integer/2, optional},
+ {<<"max-queues">>, fun rabbit_parameter_validation:integer/2, optional}].
+
+update_vhost(VHostName, Limits) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ rabbit_vhost:update(VHostName,
+ fun(VHost) ->
+ rabbit_vhost:set_limits(VHost, Limits)
+ end)
+ end),
+ ok.
+
+get_limit(VirtualHost, Limit) ->
+ case rabbit_runtime_parameters:list(VirtualHost, <<"vhost-limits">>) of
+ [] -> undefined;
+ [Param] -> case pget(value, Param) of
+ undefined -> undefined;
+ Val -> case pget(Limit, Val) of
+ undefined -> undefined;
+ %% no limit
+ N when N < 0 -> undefined;
+ N when N >= 0 -> {ok, N}
+ end
+ end
+ end.
diff --git a/deps/rabbit/src/rabbit_vhost_msg_store.erl b/deps/rabbit/src/rabbit_vhost_msg_store.erl
new file mode 100644
index 0000000000..8667b4d143
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_msg_store.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_msg_store).
+
+-include("rabbit.hrl").
+
+-export([start/4, stop/2, client_init/5, successfully_recovered_state/2]).
+-export([vhost_store_pid/2]).
+
+start(VHost, Type, ClientRefs, StartupFunState) when is_list(ClientRefs);
+ ClientRefs == undefined ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ supervisor2:start_child(VHostSup,
+ {Type, {rabbit_msg_store, start_link,
+ [Type, VHostDir, ClientRefs, StartupFunState]},
+ transient, ?MSG_STORE_WORKER_WAIT, worker, [rabbit_msg_store]});
+ %% we can get here if a vhost is added and removed concurrently
+ %% e.g. some integration tests do it
+ {error, {no_such_vhost, VHost}} = E ->
+ rabbit_log:error("Failed to start a message store for vhost ~s: vhost no longer exists!",
+ [VHost]),
+ E
+ end.
+
+stop(VHost, Type) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, VHostSup} ->
+ ok = supervisor2:terminate_child(VHostSup, Type),
+ ok = supervisor2:delete_child(VHostSup, Type);
+ %% see start/4
+ {error, {no_such_vhost, VHost}} ->
+ rabbit_log:error("Failed to stop a message store for vhost ~s: vhost no longer exists!",
+ [VHost]),
+
+ ok
+ end.
+
+client_init(VHost, Type, Ref, MsgOnDiskFun, CloseFDsFun) ->
+ with_vhost_store(VHost, Type, fun(StorePid) ->
+ rabbit_msg_store:client_init(StorePid, Ref, MsgOnDiskFun, CloseFDsFun)
+ end).
+
+with_vhost_store(VHost, Type, Fun) ->
+ case vhost_store_pid(VHost, Type) of
+ no_pid ->
+ throw({message_store_not_started, Type, VHost});
+ Pid when is_pid(Pid) ->
+ Fun(Pid)
+ end.
+
+vhost_store_pid(VHost, Type) ->
+ {ok, VHostSup} = rabbit_vhost_sup_sup:get_vhost_sup(VHost),
+ case supervisor2:find_child(VHostSup, Type) of
+ [Pid] -> Pid;
+ [] -> no_pid
+ end.
+
+successfully_recovered_state(VHost, Type) ->
+ with_vhost_store(VHost, Type, fun(StorePid) ->
+ rabbit_msg_store:successfully_recovered_state(StorePid)
+ end).
diff --git a/deps/rabbit/src/rabbit_vhost_process.erl b/deps/rabbit/src/rabbit_vhost_process.erl
new file mode 100644
index 0000000000..cf70d49010
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_process.erl
@@ -0,0 +1,96 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module implements a vhost identity process.
+
+%% On start this process will try to recover the vhost data and
+%% processes structure (queues and message stores).
+%% If recovered successfully, the process will save it's PID
+%% to vhost process registry. If vhost process PID is in the registry and the
+%% process is alive - the vhost is considered running.
+
+%% On termination, the ptocess will notify of vhost going down.
+
+%% The process will also check periodically if the vhost still
+%% present in mnesia DB and stop the vhost supervision tree when it
+%% disappears.
+
+-module(rabbit_vhost_process).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-include("rabbit.hrl").
+
+-define(TICKTIME_RATIO, 4).
+
+-behaviour(gen_server2).
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+start_link(VHost) ->
+ gen_server2:start_link(?MODULE, [VHost], []).
+
+
+init([VHost]) ->
+ process_flag(trap_exit, true),
+ rabbit_log:debug("Recovering data for VHost ~p~n", [VHost]),
+ try
+ %% Recover the vhost data and save it to vhost registry.
+ ok = rabbit_vhost:recover(VHost),
+ rabbit_vhost_sup_sup:save_vhost_process(VHost, self()),
+ Interval = interval(),
+ timer:send_interval(Interval, check_vhost),
+ true = erlang:garbage_collect(),
+ {ok, VHost}
+ catch _:Reason:Stacktrace ->
+ rabbit_amqqueue:mark_local_durable_queues_stopped(VHost),
+ rabbit_log:error("Unable to recover vhost ~p data. Reason ~p~n"
+ " Stacktrace ~p",
+ [VHost, Reason, Stacktrace]),
+ {stop, Reason}
+ end.
+
+handle_call(_,_,VHost) ->
+ {reply, ok, VHost}.
+
+handle_cast(_, VHost) ->
+ {noreply, VHost}.
+
+handle_info(check_vhost, VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ true -> {noreply, VHost};
+ false ->
+ rabbit_log:warning("Virtual host '~s' is gone. "
+ "Stopping its top level supervisor.",
+ [VHost]),
+ %% Stop vhost's top supervisor in a one-off process to avoid a deadlock:
+ %% us (a child process) waiting for supervisor shutdown and our supervisor(s)
+ %% waiting for us to shutdown.
+ spawn(
+ fun() ->
+ rabbit_vhost_sup_sup:stop_and_delete_vhost(VHost)
+ end),
+ {noreply, VHost}
+ end;
+handle_info(_, VHost) ->
+ {noreply, VHost}.
+
+terminate(shutdown, VHost) ->
+ %% Notify that vhost is stopped.
+ rabbit_vhost:vhost_down(VHost);
+terminate(_, _VHost) ->
+ ok.
+
+code_change(_OldVsn, VHost, _Extra) ->
+ {ok, VHost}.
+
+interval() ->
+ application:get_env(kernel, net_ticktime, 60000) * ?TICKTIME_RATIO.
diff --git a/deps/rabbit/src/rabbit_vhost_sup.erl b/deps/rabbit/src/rabbit_vhost_sup.erl
new file mode 100644
index 0000000000..d82d827ecf
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_sup.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_sup).
+
+-include("rabbit.hrl").
+
+%% Each vhost gets an instance of this supervisor that supervises
+%% message stores and queues (via rabbit_amqqueue_sup_sup).
+-behaviour(supervisor2).
+-export([init/1]).
+-export([start_link/1]).
+
+start_link(VHost) ->
+ supervisor2:start_link(?MODULE, [VHost]).
+
+init([_VHost]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbit/src/rabbit_vhost_sup_sup.erl b/deps/rabbit/src/rabbit_vhost_sup_sup.erl
new file mode 100644
index 0000000000..c201237daa
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_sup_sup.erl
@@ -0,0 +1,271 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vhost_sup_sup).
+
+-include("rabbit.hrl").
+
+-behaviour(supervisor2).
+
+-export([init/1]).
+
+-export([start_link/0, start/0]).
+-export([init_vhost/1,
+ start_vhost/1, start_vhost/2,
+ get_vhost_sup/1, get_vhost_sup/2,
+ save_vhost_sup/3,
+ save_vhost_process/2]).
+-export([delete_on_all_nodes/1, start_on_all_nodes/1]).
+-export([is_vhost_alive/1]).
+-export([check/0]).
+
+%% Internal
+-export([stop_and_delete_vhost/1]).
+
+-record(vhost_sup, {vhost, vhost_sup_pid, wrapper_pid, vhost_process_pid}).
+
+start() ->
+ case supervisor:start_child(rabbit_sup, {?MODULE,
+ {?MODULE, start_link, []},
+ permanent, infinity, supervisor,
+ [?MODULE]}) of
+ {ok, _} -> ok;
+ {error, Err} -> {error, Err}
+ end.
+
+start_link() ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ %% This assumes that a single vhost termination should not shut down nodes
+ %% unless the operator opts in.
+ RestartStrategy = vhost_restart_strategy(),
+ ets:new(?MODULE, [named_table, public, {keypos, #vhost_sup.vhost}]),
+ {ok, {{simple_one_for_one, 0, 5},
+ [{rabbit_vhost, {rabbit_vhost_sup_wrapper, start_link, []},
+ RestartStrategy, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_vhost_sup_wrapper, rabbit_vhost_sup]}]}}.
+
+start_on_all_nodes(VHost) ->
+ %% Do not try to start a vhost on booting peer nodes
+ AllBooted = [Node || Node <- rabbit_nodes:all_running(), rabbit:is_booted(Node)],
+ Nodes = [node() | AllBooted],
+ Results = [{Node, start_vhost(VHost, Node)} || Node <- Nodes],
+ Failures = lists:filter(fun
+ ({_, {ok, _}}) -> false;
+ ({_, {error, {already_started, _}}}) -> false;
+ (_) -> true
+ end,
+ Results),
+ case Failures of
+ [] -> ok;
+ Errors -> {error, {failed_to_start_vhost_on_nodes, Errors}}
+ end.
+
+delete_on_all_nodes(VHost) ->
+ [ stop_and_delete_vhost(VHost, Node) || Node <- rabbit_nodes:all_running() ],
+ ok.
+
+stop_and_delete_vhost(VHost) ->
+ StopResult = case lookup_vhost_sup_record(VHost) of
+ not_found -> ok;
+ #vhost_sup{wrapper_pid = WrapperPid,
+ vhost_sup_pid = VHostSupPid} ->
+ case is_process_alive(WrapperPid) of
+ false -> ok;
+ true ->
+ rabbit_log:info("Stopping vhost supervisor ~p"
+ " for vhost '~s'~n",
+ [VHostSupPid, VHost]),
+ case supervisor2:terminate_child(?MODULE, WrapperPid) of
+ ok ->
+ true = ets:delete(?MODULE, VHost),
+ ok;
+ Other ->
+ Other
+ end
+ end
+ end,
+ ok = rabbit_vhost:delete_storage(VHost),
+ StopResult.
+
+%% We take an optimistic approach whan stopping a remote VHost supervisor.
+stop_and_delete_vhost(VHost, Node) when Node == node(self()) ->
+ stop_and_delete_vhost(VHost);
+stop_and_delete_vhost(VHost, Node) ->
+ case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, stop_and_delete_vhost, [VHost]) of
+ ok -> ok;
+ {badrpc, RpcErr} ->
+ rabbit_log:error("Failed to stop and delete a vhost ~p"
+ " on node ~p."
+ " Reason: ~p",
+ [VHost, Node, RpcErr]),
+ {error, RpcErr}
+ end.
+
+-spec init_vhost(rabbit_types:vhost()) -> ok | {error, {no_such_vhost, rabbit_types:vhost()}}.
+init_vhost(VHost) ->
+ case start_vhost(VHost) of
+ {ok, _} -> ok;
+ {error, {already_started, _}} ->
+ rabbit_log:warning(
+ "Attempting to start an already started vhost '~s'.",
+ [VHost]),
+ ok;
+ {error, {no_such_vhost, VHost}} ->
+ {error, {no_such_vhost, VHost}};
+ {error, Reason} ->
+ case vhost_restart_strategy() of
+ permanent ->
+ rabbit_log:error(
+ "Unable to initialize vhost data store for vhost '~s'."
+ " Reason: ~p",
+ [VHost, Reason]),
+ throw({error, Reason});
+ transient ->
+ rabbit_log:warning(
+ "Unable to initialize vhost data store for vhost '~s'."
+ " The vhost will be stopped for this node. "
+ " Reason: ~p",
+ [VHost, Reason]),
+ ok
+ end
+ end.
+
+-type vhost_error() :: {no_such_vhost, rabbit_types:vhost()} |
+ {vhost_supervisor_not_running, rabbit_types:vhost()}.
+
+-spec get_vhost_sup(rabbit_types:vhost(), node()) -> {ok, pid()} | {error, vhost_error() | term()}.
+get_vhost_sup(VHost, Node) ->
+ case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, get_vhost_sup, [VHost]) of
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid};
+ {error, Err} ->
+ {error, Err};
+ {badrpc, RpcErr} ->
+ {error, RpcErr}
+ end.
+
+-spec get_vhost_sup(rabbit_types:vhost()) -> {ok, pid()} | {error, vhost_error()}.
+get_vhost_sup(VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ false ->
+ {error, {no_such_vhost, VHost}};
+ true ->
+ case vhost_sup_pid(VHost) of
+ no_pid ->
+ {error, {vhost_supervisor_not_running, VHost}};
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid}
+ end
+ end.
+
+-spec start_vhost(rabbit_types:vhost(), node()) -> {ok, pid()} | {error, term()}.
+start_vhost(VHost, Node) ->
+ case rabbit_misc:rpc_call(Node, rabbit_vhost_sup_sup, start_vhost, [VHost]) of
+ {ok, Pid} -> {ok, Pid};
+ {error, Err} -> {error, Err};
+ {badrpc, RpcErr} -> {error, RpcErr}
+ end.
+
+-spec start_vhost(rabbit_types:vhost()) -> {ok, pid()} | {error, term()}.
+start_vhost(VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ false -> {error, {no_such_vhost, VHost}};
+ true ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ supervisor2:start_child(?MODULE, [VHost]);
+ undefined ->
+ {error, rabbit_vhost_sup_sup_not_running}
+ end
+ end.
+
+-spec is_vhost_alive(rabbit_types:vhost()) -> boolean().
+is_vhost_alive(VHost) ->
+%% A vhost is considered alive if it's supervision tree is alive and
+%% saved in the ETS table
+ case lookup_vhost_sup_record(VHost) of
+ #vhost_sup{wrapper_pid = WrapperPid,
+ vhost_sup_pid = VHostSupPid,
+ vhost_process_pid = VHostProcessPid}
+ when is_pid(WrapperPid),
+ is_pid(VHostSupPid),
+ is_pid(VHostProcessPid) ->
+ is_process_alive(WrapperPid)
+ andalso
+ is_process_alive(VHostSupPid)
+ andalso
+ is_process_alive(VHostProcessPid);
+ _ -> false
+ end.
+
+
+-spec save_vhost_sup(rabbit_types:vhost(), pid(), pid()) -> ok.
+save_vhost_sup(VHost, WrapperPid, VHostPid) ->
+ true = ets:insert(?MODULE, #vhost_sup{vhost = VHost,
+ vhost_sup_pid = VHostPid,
+ wrapper_pid = WrapperPid}),
+ ok.
+
+-spec save_vhost_process(rabbit_types:vhost(), pid()) -> ok.
+save_vhost_process(VHost, VHostProcessPid) ->
+ true = ets:update_element(?MODULE, VHost,
+ {#vhost_sup.vhost_process_pid, VHostProcessPid}),
+ ok.
+
+-spec lookup_vhost_sup_record(rabbit_types:vhost()) -> #vhost_sup{} | not_found.
+lookup_vhost_sup_record(VHost) ->
+ case ets:info(?MODULE, name) of
+ ?MODULE ->
+ case ets:lookup(?MODULE, VHost) of
+ [] -> not_found;
+ [#vhost_sup{} = VHostSup] -> VHostSup
+ end;
+ undefined -> not_found
+ end.
+
+-spec vhost_sup_pid(rabbit_types:vhost()) -> no_pid | {ok, pid()}.
+vhost_sup_pid(VHost) ->
+ case lookup_vhost_sup_record(VHost) of
+ not_found ->
+ no_pid;
+ #vhost_sup{vhost_sup_pid = Pid} = VHostSup ->
+ case erlang:is_process_alive(Pid) of
+ true -> {ok, Pid};
+ false ->
+ ets:delete_object(?MODULE, VHostSup),
+ no_pid
+ end
+ end.
+
+vhost_restart_strategy() ->
+ %% This assumes that a single vhost termination should not shut down nodes
+ %% unless the operator opts in.
+ case application:get_env(rabbit, vhost_restart_strategy, continue) of
+ continue -> transient;
+ stop_node -> permanent;
+ transient -> transient;
+ permanent -> permanent
+ end.
+
+check() ->
+ VHosts = rabbit_vhost:list_names(),
+ lists:filter(
+ fun(V) ->
+ case rabbit_vhost_sup_sup:get_vhost_sup(V) of
+ {ok, Sup} ->
+ MsgStores = [Pid || {Name, Pid, _, _} <- supervisor:which_children(Sup),
+ lists:member(Name, [msg_store_persistent,
+ msg_store_transient])],
+ not is_vhost_alive(V) orelse (not lists:all(fun(P) ->
+ erlang:is_process_alive(P)
+ end, MsgStores));
+ {error, _} ->
+ true
+ end
+ end, VHosts).
diff --git a/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl b/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl
new file mode 100644
index 0000000000..ed239ade69
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vhost_sup_wrapper.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module is a wrapper around vhost supervisor to
+%% provide exactly once restart semantics.
+
+-module(rabbit_vhost_sup_wrapper).
+
+-include("rabbit.hrl").
+
+-behaviour(supervisor2).
+-export([init/1]).
+-export([start_link/1]).
+-export([start_vhost_sup/1]).
+
+start_link(VHost) ->
+ %% Using supervisor, because supervisor2 does not stop a started child when
+ %% another one fails to start. Bug?
+ case rabbit_vhost_sup_sup:get_vhost_sup(VHost) of
+ {ok, Pid} ->
+ {error, {already_started, Pid}};
+ {error, _} ->
+ supervisor:start_link(?MODULE, [VHost])
+ end.
+
+init([VHost]) ->
+ %% 2 restarts in 5 minutes. One per message store.
+ {ok, {{one_for_all, 2, 300},
+ [
+ %% rabbit_vhost_sup is an empty supervisor container for
+ %% all data processes.
+ {rabbit_vhost_sup,
+ {rabbit_vhost_sup_wrapper, start_vhost_sup, [VHost]},
+ permanent, infinity, supervisor,
+ [rabbit_vhost_sup]},
+ %% rabbit_vhost_process is a vhost identity process, which
+ %% is responsible for data recovery and vhost aliveness status.
+ %% See the module comments for more info.
+ {rabbit_vhost_process,
+ {rabbit_vhost_process, start_link, [VHost]},
+ permanent, ?WORKER_WAIT, worker,
+ [rabbit_vhost_process]}]}}.
+
+
+start_vhost_sup(VHost) ->
+ case rabbit_vhost_sup:start_link(VHost) of
+ {ok, Pid} ->
+ %% Save vhost sup record with wrapper pid and vhost sup pid.
+ ok = rabbit_vhost_sup_sup:save_vhost_sup(VHost, self(), Pid),
+ {ok, Pid};
+ Other ->
+ Other
+ end.
diff --git a/deps/rabbit/src/rabbit_vm.erl b/deps/rabbit/src/rabbit_vm.erl
new file mode 100644
index 0000000000..b014e090c5
--- /dev/null
+++ b/deps/rabbit/src/rabbit_vm.erl
@@ -0,0 +1,427 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_vm).
+
+-export([memory/0, binary/0, ets_tables_memory/1]).
+
+-define(MAGIC_PLUGINS, ["cowboy", "ranch", "sockjs"]).
+
+%%----------------------------------------------------------------------------
+
+-spec memory() -> rabbit_types:infos().
+
+memory() ->
+ All = interesting_sups(),
+ {Sums, _Other} = sum_processes(
+ lists:append(All), distinguishers(), [memory]),
+
+ [Qs, QsSlave, Qqs, Ssqs, Srqs, SCoor, ConnsReader, ConnsWriter, ConnsChannel,
+ ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] =
+ [aggregate(Names, Sums, memory, fun (X) -> X end)
+ || Names <- distinguished_interesting_sups()],
+
+ MnesiaETS = mnesia_memory(),
+ MsgIndexETS = ets_memory(msg_stores()),
+ MetricsETS = ets_memory([rabbit_metrics]),
+ QuorumETS = ets_memory([ra_log_ets]),
+ MetricsProc = try
+ [{_, M}] = process_info(whereis(rabbit_metrics), [memory]),
+ M
+ catch
+ error:badarg ->
+ 0
+ end,
+ MgmtDbETS = ets_memory([rabbit_mgmt_storage]),
+ [{total, ErlangTotal},
+ {processes, Processes},
+ {ets, ETS},
+ {atom, Atom},
+ {binary, Bin},
+ {code, Code},
+ {system, System}] =
+ erlang:memory([total, processes, ets, atom, binary, code, system]),
+
+ Strategy = vm_memory_monitor:get_memory_calculation_strategy(),
+ Allocated = recon_alloc:memory(allocated),
+ Rss = vm_memory_monitor:get_rss_memory(),
+
+ AllocatedUnused = max(Allocated - ErlangTotal, 0),
+ OSReserved = max(Rss - Allocated, 0),
+
+ OtherProc = Processes
+ - ConnsReader - ConnsWriter - ConnsChannel - ConnsOther
+ - Qs - QsSlave - Qqs - Ssqs - Srqs - SCoor - MsgIndexProc - Plugins
+ - MgmtDbProc - MetricsProc,
+
+ [
+ %% Connections
+ {connection_readers, ConnsReader},
+ {connection_writers, ConnsWriter},
+ {connection_channels, ConnsChannel},
+ {connection_other, ConnsOther},
+
+ %% Queues
+ {queue_procs, Qs},
+ {queue_slave_procs, QsSlave},
+ {quorum_queue_procs, Qqs},
+ {stream_queue_procs, Ssqs},
+ {stream_queue_replica_reader_procs, Srqs},
+ {stream_queue_coordinator_procs, SCoor},
+
+ %% Processes
+ {plugins, Plugins},
+ {other_proc, lists:max([0, OtherProc])}, %% [1]
+
+ %% Metrics
+ {metrics, MetricsETS + MetricsProc},
+ {mgmt_db, MgmtDbETS + MgmtDbProc},
+
+ %% ETS
+ {mnesia, MnesiaETS},
+ {quorum_ets, QuorumETS},
+ {other_ets, ETS - MnesiaETS - MetricsETS - MgmtDbETS - MsgIndexETS - QuorumETS},
+
+ %% Messages (mostly, some binaries are not messages)
+ {binary, Bin},
+ {msg_index, MsgIndexETS + MsgIndexProc},
+
+ %% System
+ {code, Code},
+ {atom, Atom},
+ {other_system, System - ETS - Bin - Code - Atom},
+ {allocated_unused, AllocatedUnused},
+ {reserved_unallocated, OSReserved},
+ {strategy, Strategy},
+ {total, [{erlang, ErlangTotal},
+ {rss, Rss},
+ {allocated, Allocated}]}
+ ].
+%% [1] - erlang:memory(processes) can be less than the sum of its
+%% parts. Rather than display something nonsensical, just silence any
+%% claims about negative memory. See
+%% http://erlang.org/pipermail/erlang-questions/2012-September/069320.html
+
+-spec binary() -> rabbit_types:infos().
+
+binary() ->
+ All = interesting_sups(),
+ {Sums, Rest} =
+ sum_processes(
+ lists:append(All),
+ fun (binary, Info, Acc) ->
+ lists:foldl(fun ({Ptr, Sz, _RefCnt}, Acc0) ->
+ sets:add_element({Ptr, Sz}, Acc0)
+ end, Acc, Info)
+ end, distinguishers(), [{binary, sets:new()}]),
+ [Other, Qs, QsSlave, Qqs, Ssqs, Srqs, Scoor, ConnsReader, ConnsWriter,
+ ConnsChannel, ConnsOther, MsgIndexProc, MgmtDbProc, Plugins] =
+ [aggregate(Names, [{other, Rest} | Sums], binary, fun sum_binary/1)
+ || Names <- [[other] | distinguished_interesting_sups()]],
+ [{connection_readers, ConnsReader},
+ {connection_writers, ConnsWriter},
+ {connection_channels, ConnsChannel},
+ {connection_other, ConnsOther},
+ {queue_procs, Qs},
+ {queue_slave_procs, QsSlave},
+ {quorum_queue_procs, Qqs},
+ {stream_queue_procs, Ssqs},
+ {stream_queue_replica_reader_procs, Srqs},
+ {stream_queue_coordinator_procs, Scoor},
+ {plugins, Plugins},
+ {mgmt_db, MgmtDbProc},
+ {msg_index, MsgIndexProc},
+ {other, Other}].
+
+%%----------------------------------------------------------------------------
+
+mnesia_memory() ->
+ case mnesia:system_info(is_running) of
+ yes -> lists:sum([bytes(mnesia:table_info(Tab, memory)) ||
+ Tab <- mnesia:system_info(tables)]);
+ _ -> 0
+ end.
+
+ets_memory(Owners) ->
+ lists:sum([V || {_K, V} <- ets_tables_memory(Owners)]).
+
+-spec ets_tables_memory(Owners) -> rabbit_types:infos()
+ when Owners :: all | OwnerProcessName | [OwnerProcessName],
+ OwnerProcessName :: atom().
+
+ets_tables_memory(all) ->
+ [{ets:info(T, name), bytes(ets:info(T, memory))}
+ || T <- ets:all(),
+ is_atom(T)];
+ets_tables_memory(OwnerName) when is_atom(OwnerName) ->
+ ets_tables_memory([OwnerName]);
+ets_tables_memory(Owners) when is_list(Owners) ->
+ OwnerPids = lists:map(fun(O) when is_pid(O) -> O;
+ (O) when is_atom(O) -> whereis(O)
+ end,
+ Owners),
+ [{ets:info(T, name), bytes(ets:info(T, memory))}
+ || T <- ets:all(),
+ lists:member(ets:info(T, owner), OwnerPids)].
+
+bytes(Words) -> try
+ Words * erlang:system_info(wordsize)
+ catch
+ _:_ -> 0
+ end.
+
+interesting_sups() ->
+ [queue_sups(), quorum_sups(), stream_server_sups(), stream_reader_sups(),
+ conn_sups() | interesting_sups0()].
+
+queue_sups() ->
+ all_vhosts_children(rabbit_amqqueue_sup_sup).
+
+quorum_sups() ->
+ %% TODO: in the future not all ra servers may be queues and we needs
+ %% some way to filter this
+ case whereis(ra_server_sup_sup) of
+ undefined ->
+ [];
+ _ ->
+ [Pid || {_, Pid, _, _} <-
+ supervisor:which_children(ra_server_sup_sup)]
+ end.
+
+stream_server_sups() -> [osiris_server_sup].
+stream_reader_sups() -> [osiris_replica_reader_sup].
+
+msg_stores() ->
+ all_vhosts_children(msg_store_transient)
+ ++
+ all_vhosts_children(msg_store_persistent).
+
+all_vhosts_children(Name) ->
+ case whereis(rabbit_vhost_sup_sup) of
+ undefined -> [];
+ Pid when is_pid(Pid) ->
+ lists:filtermap(
+ fun({_, VHostSupWrapper, _, _}) ->
+ case supervisor2:find_child(VHostSupWrapper,
+ rabbit_vhost_sup) of
+ [] -> false;
+ [VHostSup] ->
+ case supervisor2:find_child(VHostSup, Name) of
+ [QSup] -> {true, QSup};
+ [] -> false
+ end
+ end
+ end,
+ supervisor:which_children(rabbit_vhost_sup_sup))
+ end.
+
+interesting_sups0() ->
+ MsgIndexProcs = msg_stores(),
+ MgmtDbProcs = [rabbit_mgmt_sup_sup],
+ PluginProcs = plugin_sups(),
+ [MsgIndexProcs, MgmtDbProcs, PluginProcs].
+
+conn_sups() ->
+ Ranches = lists:flatten(ranch_server_sups()),
+ [amqp_sup|Ranches].
+
+ranch_server_sups() ->
+ try
+ ets:match(ranch_server, {{conns_sup, '_'}, '$1'})
+ catch
+ %% Ranch ETS table doesn't exist yet
+ error:badarg -> []
+ end.
+
+with(Sups, With) -> [{Sup, With} || Sup <- Sups].
+
+distinguishers() -> with(queue_sups(), fun queue_type/1) ++
+ with(conn_sups(), fun conn_type/1) ++
+ with(quorum_sups(), fun ra_type/1).
+
+distinguished_interesting_sups() ->
+ [
+ with(queue_sups(), master),
+ with(queue_sups(), slave),
+ with(quorum_sups(), quorum),
+ stream_server_sups(),
+ stream_reader_sups(),
+ with(quorum_sups(), stream),
+ with(conn_sups(), reader),
+ with(conn_sups(), writer),
+ with(conn_sups(), channel),
+ with(conn_sups(), other)]
+ ++ interesting_sups0().
+
+plugin_sups() ->
+ lists:append([plugin_sup(App) ||
+ {App, _, _} <- rabbit_misc:which_applications(),
+ is_plugin(atom_to_list(App))]).
+
+plugin_sup(App) ->
+ case application_controller:get_master(App) of
+ undefined -> [];
+ Master -> case application_master:get_child(Master) of
+ {Pid, _} when is_pid(Pid) -> [process_name(Pid)];
+ Pid when is_pid(Pid) -> [process_name(Pid)];
+ _ -> []
+ end
+ end.
+
+process_name(Pid) ->
+ case process_info(Pid, registered_name) of
+ {registered_name, Name} -> Name;
+ _ -> Pid
+ end.
+
+is_plugin("rabbitmq_" ++ _) -> true;
+is_plugin(App) -> lists:member(App, ?MAGIC_PLUGINS).
+
+aggregate(Names, Sums, Key, Fun) ->
+ lists:sum([extract(Name, Sums, Key, Fun) || Name <- Names]).
+
+extract(Name, Sums, Key, Fun) ->
+ case keyfind(Name, Sums) of
+ {value, Accs} -> Fun(keyfetch(Key, Accs));
+ false -> 0
+ end.
+
+sum_binary(Set) ->
+ sets:fold(fun({_Pt, Sz}, Acc) -> Acc + Sz end, 0, Set).
+
+queue_type(PDict) ->
+ case keyfind(process_name, PDict) of
+ {value, {rabbit_mirror_queue_slave, _}} -> slave;
+ _ -> master
+ end.
+
+conn_type(PDict) ->
+ case keyfind(process_name, PDict) of
+ {value, {rabbit_reader, _}} -> reader;
+ {value, {rabbit_writer, _}} -> writer;
+ {value, {rabbit_channel, _}} -> channel;
+ _ -> other
+ end.
+
+ra_type(PDict) ->
+ case keyfind('$rabbit_vm_category', PDict) of
+ {value, rabbit_stream_coordinator} -> stream;
+ _ -> quorum
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% NB: this code is non-rabbit specific.
+
+-type process() :: pid() | atom().
+-type info_key() :: atom().
+-type info_value() :: any().
+-type info_item() :: {info_key(), info_value()}.
+-type accumulate() :: fun ((info_key(), info_value(), info_value()) ->
+ info_value()).
+-type distinguisher() :: fun (([{term(), term()}]) -> atom()).
+-type distinguishers() :: [{info_key(), distinguisher()}].
+-spec sum_processes([process()], distinguishers(), [info_key()]) ->
+ {[{process(), [info_item()]}], [info_item()]}.
+-spec sum_processes([process()], accumulate(), distinguishers(),
+ [info_item()]) ->
+ {[{process(), [info_item()]}], [info_item()]}.
+
+sum_processes(Names, Distinguishers, Items) ->
+ sum_processes(Names, fun (_, X, Y) -> X + Y end, Distinguishers,
+ [{Item, 0} || Item <- Items]).
+
+%% summarize the process_info of all processes based on their
+%% '$ancestor' hierarchy, recorded in their process dictionary.
+%%
+%% The function takes
+%%
+%% 1) a list of names/pids of processes that are accumulation points
+%% in the hierarchy.
+%%
+%% 2) a function that aggregates individual info items -taking the
+%% info item key, value and accumulated value as the input and
+%% producing a new accumulated value.
+%%
+%% 3) a list of info item key / initial accumulator value pairs.
+%%
+%% The process_info of a process is accumulated at the nearest of its
+%% ancestors that is mentioned in the first argument, or, if no such
+%% ancestor exists or the ancestor information is absent, in a special
+%% 'other' bucket.
+%%
+%% The result is a pair consisting of
+%%
+%% 1) a k/v list, containing for each of the accumulation names/pids a
+%% list of info items, containing the accumulated data, and
+%%
+%% 2) the 'other' bucket - a list of info items containing the
+%% accumulated data of all processes with no matching ancestors
+%%
+%% Note that this function operates on names as well as pids, but
+%% these must match whatever is contained in the '$ancestor' process
+%% dictionary entry. Generally that means for all registered processes
+%% the name should be used.
+sum_processes(Names, Fun, Distinguishers, Acc0) ->
+ Items = [Item || {Item, _Blank0} <- Acc0],
+ {NameAccs, OtherAcc} =
+ lists:foldl(
+ fun (Pid, Acc) ->
+ InfoItems = [registered_name, dictionary | Items],
+ case process_info(Pid, InfoItems) of
+ undefined ->
+ Acc;
+ [{registered_name, RegName}, {dictionary, D} | Vals] ->
+ %% see docs for process_info/2 for the
+ %% special handling of 'registered_name'
+ %% info items
+ Extra = case RegName of
+ [] -> [];
+ N -> [N]
+ end,
+ Name0 = find_ancestor(Extra, D, Names),
+ Name = case keyfind(Name0, Distinguishers) of
+ {value, DistFun} -> {Name0, DistFun(D)};
+ false -> Name0
+ end,
+ accumulate(
+ Name, Fun, orddict:from_list(Vals), Acc, Acc0)
+ end
+ end, {orddict:new(), Acc0}, processes()),
+ %% these conversions aren't strictly necessary; we do them simply
+ %% for the sake of encapsulating the representation.
+ {[{Name, orddict:to_list(Accs)} ||
+ {Name, Accs} <- orddict:to_list(NameAccs)],
+ orddict:to_list(OtherAcc)}.
+
+find_ancestor(Extra, D, Names) ->
+ Ancestors = case keyfind('$ancestors', D) of
+ {value, Ancs} -> Ancs;
+ false -> []
+ end,
+ case lists:splitwith(fun (A) -> not lists:member(A, Names) end,
+ Extra ++ Ancestors) of
+ {_, []} -> undefined;
+ {_, [Name | _]} -> Name
+ end.
+
+accumulate(undefined, Fun, ValsDict, {NameAccs, OtherAcc}, _Acc0) ->
+ {NameAccs, orddict:merge(Fun, ValsDict, OtherAcc)};
+accumulate(Name, Fun, ValsDict, {NameAccs, OtherAcc}, Acc0) ->
+ F = fun (NameAcc) -> orddict:merge(Fun, ValsDict, NameAcc) end,
+ {case orddict:is_key(Name, NameAccs) of
+ true -> orddict:update(Name, F, NameAccs);
+ false -> orddict:store( Name, F(Acc0), NameAccs)
+ end, OtherAcc}.
+
+keyfetch(K, L) -> {value, {_, V}} = lists:keysearch(K, 1, L),
+ V.
+
+keyfind(K, L) -> case lists:keysearch(K, 1, L) of
+ {value, {_, V}} -> {value, V};
+ false -> false
+ end.
diff --git a/deps/rabbit/src/supervised_lifecycle.erl b/deps/rabbit/src/supervised_lifecycle.erl
new file mode 100644
index 0000000000..0e1bb9b5c8
--- /dev/null
+++ b/deps/rabbit/src/supervised_lifecycle.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Invoke callbacks on startup and termination.
+%%
+%% Simply hook this process into a supervision hierarchy, to have the
+%% callbacks invoked at a precise point during the establishment and
+%% teardown of that hierarchy, respectively.
+%%
+%% Or launch the process independently, and link to it, to have the
+%% callbacks invoked on startup and when the linked process
+%% terminates, respectively.
+
+-module(supervised_lifecycle).
+
+-behavior(gen_server).
+
+-export([start_link/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(Name, StartMFA, StopMFA) ->
+ gen_server:start_link({local, Name}, ?MODULE, [StartMFA, StopMFA], []).
+
+%%----------------------------------------------------------------------------
+
+init([{M, F, A}, StopMFA]) ->
+ process_flag(trap_exit, true),
+ apply(M, F, A),
+ {ok, StopMFA}.
+
+handle_call(_Request, _From, State) -> {noreply, State}.
+
+handle_cast(_Msg, State) -> {noreply, State}.
+
+handle_info(_Info, State) -> {noreply, State}.
+
+terminate(_Reason, {M, F, A}) ->
+ apply(M, F, A),
+ ok.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
diff --git a/deps/rabbit/src/tcp_listener.erl b/deps/rabbit/src/tcp_listener.erl
new file mode 100644
index 0000000000..93c24ab397
--- /dev/null
+++ b/deps/rabbit/src/tcp_listener.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(tcp_listener).
+
+%% Represents a running TCP listener (a process that listens for inbound
+%% TCP or TLS connections). Every protocol supported typically has one
+%% or two listeners, plain TCP and (optionally) TLS, but there can
+%% be more, e.g. when multiple network interfaces are involved.
+%%
+%% A listener has 6 properties (is a tuple of 6):
+%%
+%% * IP address
+%% * Port
+%% * Node
+%% * Label (human-friendly name, e.g. AMQP 0-9-1)
+%% * Startup callback
+%% * Shutdown callback
+%%
+%% Listeners use Ranch in embedded mode to accept and "bridge" client
+%% connections with protocol entry points such as rabbit_reader.
+%%
+%% Listeners are tracked in a Mnesia table so that they can be
+%%
+%% * Shut down
+%% * Listed (e.g. in the management UI)
+%%
+%% Every tcp_listener process has callbacks that are executed on start
+%% and termination. Those must take care of listener registration
+%% among other things.
+%%
+%% Listeners are supervised by tcp_listener_sup (one supervisor per protocol).
+%%
+%% See also rabbit_networking and tcp_listener_sup.
+
+-behaviour(gen_server).
+
+-export([start_link/5]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {on_startup, on_shutdown, label, ip, port}).
+
+%%----------------------------------------------------------------------------
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link
+ (inet:ip_address(), inet:port_number(),
+ mfargs(), mfargs(), string()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(IPAddress, Port,
+ OnStartup, OnShutdown, Label) ->
+ gen_server:start_link(
+ ?MODULE, {IPAddress, Port,
+ OnStartup, OnShutdown, Label}, []).
+
+%%--------------------------------------------------------------------
+
+init({IPAddress, Port, {M,F,A} = OnStartup, OnShutdown, Label}) ->
+ process_flag(trap_exit, true),
+ error_logger:info_msg(
+ "started ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port]),
+ apply(M, F, A ++ [IPAddress, Port]),
+ {ok, #state{on_startup = OnStartup, on_shutdown = OnShutdown,
+ label = Label, ip=IPAddress, port=Port}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state{on_shutdown = {M,F,A}, label=Label, ip=IPAddress, port=Port}) ->
+ error_logger:info_msg("stopped ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port]),
+ apply(M, F, A ++ [IPAddress, Port]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit/src/tcp_listener_sup.erl b/deps/rabbit/src/tcp_listener_sup.erl
new file mode 100644
index 0000000000..82128bb2af
--- /dev/null
+++ b/deps/rabbit/src/tcp_listener_sup.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(tcp_listener_sup).
+
+%% Supervises TCP listeners. There is a separate supervisor for every
+%% protocol. In case of AMQP 0-9-1, it resides under rabbit_sup. Plugins
+%% that provide protocol support (e.g. STOMP) have an instance of this supervisor in their
+%% app supervision tree.
+%%
+%% See also rabbit_networking and tcp_listener.
+
+-behaviour(supervisor).
+
+-export([start_link/10]).
+-export([init/1]).
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link
+ (inet:ip_address(), inet:port_number(), module(), [gen_tcp:listen_option()],
+ module(), any(), mfargs(), mfargs(), integer(), string()) ->
+ rabbit_types:ok_pid_or_error().
+
+start_link(IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label) ->
+ supervisor:start_link(
+ ?MODULE, {IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label}).
+
+init({IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label}) ->
+ {ok, AckTimeout} = application:get_env(rabbit, ssl_handshake_timeout),
+ MaxConnections = rabbit_misc:get_env(rabbit, connection_max, infinity),
+ RanchListenerOpts = #{
+ num_acceptors => ConcurrentAcceptorCount,
+ max_connections => MaxConnections,
+ handshake_timeout => AckTimeout,
+ connection_type => supervisor,
+ socket_opts => [{ip, IPAddress},
+ {port, Port} |
+ SocketOpts]
+ },
+ Flags = {one_for_all, 10, 10},
+ OurChildSpecStart = {tcp_listener, start_link, [IPAddress, Port, OnStartup, OnShutdown, Label]},
+ OurChildSpec = {tcp_listener, OurChildSpecStart, transient, 16#ffffffff, worker, [tcp_listener]},
+ RanchChildSpec = ranch:child_spec(rabbit_networking:ranch_ref(IPAddress, Port),
+ Transport, RanchListenerOpts,
+ ProtoSup, ProtoOpts),
+ {ok, {Flags, [RanchChildSpec, OurChildSpec]}}.
diff --git a/deps/rabbit/src/term_to_binary_compat.erl b/deps/rabbit/src/term_to_binary_compat.erl
new file mode 100644
index 0000000000..327a846d1f
--- /dev/null
+++ b/deps/rabbit/src/term_to_binary_compat.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(term_to_binary_compat).
+
+-include("rabbit.hrl").
+
+-export([term_to_binary_1/1]).
+
+term_to_binary_1(Term) ->
+ term_to_binary(Term, [{minor_version, 1}]).
diff --git a/deps/rabbit/src/vhost.erl b/deps/rabbit/src/vhost.erl
new file mode 100644
index 0000000000..ca704183a0
--- /dev/null
+++ b/deps/rabbit/src/vhost.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(vhost).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("vhost.hrl").
+
+-export([
+ new/2,
+ new/3,
+ fields/0,
+ fields/1,
+ info_keys/0,
+ record_version_to_use/0,
+ upgrade/1,
+ upgrade_to/2,
+ pattern_match_all/0,
+ get_name/1,
+ get_limits/1,
+ get_metadata/1,
+ get_description/1,
+ get_tags/1,
+ set_limits/2
+]).
+
+-define(record_version, vhost_v2).
+
+-type(name() :: binary()).
+
+-type(metadata_key() :: atom()).
+
+-type(metadata() :: #{description => binary(),
+ tags => [atom()],
+ metadata_key() => any()} | undefined).
+
+-type vhost() :: vhost_v1:vhost_v1() | vhost_v2().
+
+-record(vhost, {
+ %% name as a binary
+ virtual_host :: name() | '_',
+ %% proplist of limits configured, if any
+ limits :: list() | '_',
+ metadata :: metadata() | '_'
+}).
+
+-type vhost_v2() :: #vhost{
+ virtual_host :: name(),
+ limits :: list(),
+ metadata :: metadata()
+ }.
+
+-type vhost_pattern() :: vhost_v1:vhost_v1_pattern() |
+ vhost_v2_pattern().
+-type vhost_v2_pattern() :: #vhost{
+ virtual_host :: name() | '_',
+ limits :: '_',
+ metadata :: '_'
+ }.
+
+-export_type([name/0,
+ metadata_key/0,
+ metadata/0,
+ vhost/0,
+ vhost_v2/0,
+ vhost_pattern/0,
+ vhost_v2_pattern/0]).
+
+-spec new(name(), list()) -> vhost().
+new(Name, Limits) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #vhost{virtual_host = Name, limits = Limits};
+ _ ->
+ vhost_v1:new(Name, Limits)
+ end.
+
+-spec new(name(), list(), map()) -> vhost().
+new(Name, Limits, Metadata) ->
+ case record_version_to_use() of
+ ?record_version ->
+ #vhost{virtual_host = Name, limits = Limits, metadata = Metadata};
+ _ ->
+ vhost_v1:new(Name, Limits)
+ end.
+
+-spec record_version_to_use() -> vhost_v1 | vhost_v2.
+
+record_version_to_use() ->
+ case rabbit_feature_flags:is_enabled(virtual_host_metadata) of
+ true -> ?record_version;
+ false -> vhost_v1:record_version_to_use()
+ end.
+
+-spec upgrade(vhost()) -> vhost().
+
+upgrade(#vhost{} = VHost) -> VHost;
+upgrade(OldVHost) -> upgrade_to(record_version_to_use(), OldVHost).
+
+-spec upgrade_to
+(vhost_v2, vhost()) -> vhost_v2();
+(vhost_v1, vhost_v1:vhost_v1()) -> vhost_v1:vhost_v1().
+
+upgrade_to(?record_version, #vhost{} = VHost) ->
+ VHost;
+upgrade_to(?record_version, OldVHost) ->
+ Fields = erlang:tuple_to_list(OldVHost) ++ [#{description => <<"">>, tags => []}],
+ #vhost{} = erlang:list_to_tuple(Fields);
+upgrade_to(Version, OldVHost) ->
+ vhost_v1:upgrade_to(Version, OldVHost).
+
+
+fields() ->
+ case record_version_to_use() of
+ ?record_version -> fields(?record_version);
+ _ -> vhost_v1:fields()
+ end.
+
+fields(?record_version) -> record_info(fields, vhost);
+fields(Version) -> vhost_v1:fields(Version).
+
+info_keys() ->
+ case record_version_to_use() of
+ %% note: this reports description and tags separately even though
+ %% they are stored in the metadata map. MK.
+ ?record_version -> [name, description, tags, metadata, tracing, cluster_state];
+ _ -> vhost_v1:info_keys()
+ end.
+
+-spec pattern_match_all() -> vhost_pattern().
+
+pattern_match_all() ->
+ case record_version_to_use() of
+ ?record_version -> #vhost{_ = '_'};
+ _ -> vhost_v1:pattern_match_all()
+ end.
+
+-spec get_name(vhost()) -> name().
+get_name(#vhost{virtual_host = Value}) -> Value;
+get_name(VHost) -> vhost_v1:get_name(VHost).
+
+-spec get_limits(vhost()) -> list().
+get_limits(#vhost{limits = Value}) -> Value;
+get_limits(VHost) -> vhost_v1:get_limits(VHost).
+
+-spec get_metadata(vhost()) -> metadata().
+get_metadata(#vhost{metadata = Value}) -> Value;
+get_metadata(VHost) -> vhost_v1:get_metadata(VHost).
+
+-spec get_description(vhost()) -> binary().
+get_description(#vhost{} = VHost) ->
+ maps:get(description, get_metadata(VHost), undefined);
+get_description(VHost) ->
+ vhost_v1:get_description(VHost).
+
+-spec get_tags(vhost()) -> [atom()].
+get_tags(#vhost{} = VHost) ->
+ maps:get(tags, get_metadata(VHost), undefined);
+get_tags(VHost) ->
+ vhost_v1:get_tags(VHost).
+
+set_limits(VHost, Value) ->
+ case record_version_to_use() of
+ ?record_version ->
+ VHost#vhost{limits = Value};
+ _ ->
+ vhost_v1:set_limits(VHost, Value)
+ end.
diff --git a/deps/rabbit/src/vhost_v1.erl b/deps/rabbit/src/vhost_v1.erl
new file mode 100644
index 0000000000..5b53eb148a
--- /dev/null
+++ b/deps/rabbit/src/vhost_v1.erl
@@ -0,0 +1,106 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(vhost_v1).
+
+-include("vhost.hrl").
+
+-export([new/2,
+ new/3,
+ upgrade/1,
+ upgrade_to/2,
+ fields/0,
+ fields/1,
+ info_keys/0,
+ field_name/0,
+ record_version_to_use/0,
+ pattern_match_all/0,
+ get_name/1,
+ get_limits/1,
+ get_metadata/1,
+ get_description/1,
+ get_tags/1,
+ set_limits/2
+]).
+
+-define(record_version, ?MODULE).
+
+%% Represents a vhost.
+%%
+%% Historically this record had 2 arguments although the 2nd
+%% was never used (`dummy`, always undefined). This is because
+%% single field records were/are illegal in OTP.
+%%
+%% As of 3.6.x, the second argument is vhost limits,
+%% which is actually used and has the same default.
+%% Nonetheless, this required a migration, see rabbit_upgrade_functions.
+
+-record(vhost, {
+ %% name as a binary
+ virtual_host :: vhost:name() | '_',
+ %% proplist of limits configured, if any
+ limits :: list() | '_'}).
+
+-type vhost() :: vhost_v1().
+-type vhost_v1() :: #vhost{
+ virtual_host :: vhost:name(),
+ limits :: list()
+ }.
+
+-export_type([vhost/0,
+ vhost_v1/0,
+ vhost_pattern/0,
+ vhost_v1_pattern/0]).
+
+
+-spec new(vhost:name(), list()) -> vhost().
+new(Name, Limits) ->
+ #vhost{virtual_host = Name, limits = Limits}.
+
+-spec new(vhost:name(), list(), map()) -> vhost().
+new(Name, Limits, _Metadata) ->
+ #vhost{virtual_host = Name, limits = Limits}.
+
+
+-spec record_version_to_use() -> vhost_v1.
+record_version_to_use() ->
+ ?record_version.
+
+-spec upgrade(vhost()) -> vhost().
+upgrade(#vhost{} = VHost) -> VHost.
+
+-spec upgrade_to(vhost_v1, vhost()) -> vhost().
+upgrade_to(?record_version, #vhost{} = VHost) ->
+ VHost.
+
+fields() -> fields(?record_version).
+
+fields(?record_version) -> record_info(fields, vhost).
+
+field_name() -> #vhost.virtual_host.
+
+info_keys() -> [name, tracing, cluster_state].
+
+-type vhost_pattern() :: vhost_v1_pattern().
+-type vhost_v1_pattern() :: #vhost{
+ virtual_host :: vhost:name() | '_',
+ limits :: '_'
+ }.
+
+-spec pattern_match_all() -> vhost_pattern().
+
+pattern_match_all() -> #vhost{_ = '_'}.
+
+get_name(#vhost{virtual_host = Value}) -> Value.
+get_limits(#vhost{limits = Value}) -> Value.
+
+get_metadata(_VHost) -> undefined.
+get_description(_VHost) -> undefined.
+get_tags(_VHost) -> undefined.
+
+set_limits(VHost, Value) ->
+ VHost#vhost{limits = Value}.
diff --git a/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl b/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl
new file mode 100644
index 0000000000..a02c4721bc
--- /dev/null
+++ b/deps/rabbit/test/amqqueue_backward_compatibility_SUITE.erl
@@ -0,0 +1,302 @@
+-module(amqqueue_backward_compatibility_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("amqqueue.hrl").
+
+-export([all/0,
+ groups/0,
+ init_per_suite/2,
+ end_per_suite/2,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ new_amqqueue_v1_is_amqqueue/1,
+ new_amqqueue_v2_is_amqqueue/1,
+ random_term_is_not_amqqueue/1,
+
+ amqqueue_v1_is_durable/1,
+ amqqueue_v2_is_durable/1,
+ random_term_is_not_durable/1,
+
+ amqqueue_v1_state_matching/1,
+ amqqueue_v2_state_matching/1,
+ random_term_state_matching/1,
+
+ amqqueue_v1_type_matching/1,
+ amqqueue_v2_type_matching/1,
+ random_term_type_matching/1,
+
+ upgrade_v1_to_v2/1
+ ]).
+
+-define(long_tuple, {random_tuple, a, b, c, d, e, f, g, h, i, j, k, l, m,
+ n, o, p, q, r, s, t, u, v, w, x, y, z}).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [new_amqqueue_v1_is_amqqueue,
+ new_amqqueue_v2_is_amqqueue,
+ random_term_is_not_amqqueue,
+ amqqueue_v1_is_durable,
+ amqqueue_v2_is_durable,
+ random_term_is_not_durable,
+ amqqueue_v1_state_matching,
+ amqqueue_v2_state_matching,
+ random_term_state_matching,
+ amqqueue_v1_type_matching,
+ amqqueue_v2_type_matching,
+ random_term_type_matching]}
+ ].
+
+init_per_suite(_, Config) -> Config.
+end_per_suite(_, Config) -> Config.
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+new_amqqueue_v1_is_amqqueue(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?is_amqqueue(Queue)),
+ ?assert(?is_amqqueue_v1(Queue)),
+ ?assert(not ?is_amqqueue_v2(Queue)),
+ ?assert(?amqqueue_is_classic(Queue)),
+ ?assert(amqqueue:is_classic(Queue)),
+ ?assert(not ?amqqueue_is_quorum(Queue)),
+ ?assert(not ?amqqueue_vhost_equals(Queue, <<"frazzle">>)),
+ ?assert(?amqqueue_has_valid_pid(Queue)),
+ ?assert(?amqqueue_pid_equals(Queue, self())),
+ ?assert(?amqqueue_pids_are_equal(Queue, Queue)),
+ ?assert(?amqqueue_pid_runs_on_local_node(Queue)),
+ ?assert(amqqueue:qnode(Queue) == node()).
+
+new_amqqueue_v2_is_amqqueue(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v2),
+ Queue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ rabbit_classic_queue),
+ ?assert(?is_amqqueue(Queue)),
+ ?assert(?is_amqqueue_v2(Queue)),
+ ?assert(not ?is_amqqueue_v1(Queue)),
+ ?assert(?amqqueue_is_classic(Queue)),
+ ?assert(amqqueue:is_classic(Queue)),
+ ?assert(not ?amqqueue_is_quorum(Queue)),
+ ?assert(not ?amqqueue_vhost_equals(Queue, <<"frazzle">>)),
+ ?assert(?amqqueue_has_valid_pid(Queue)),
+ ?assert(?amqqueue_pid_equals(Queue, self())),
+ ?assert(?amqqueue_pids_are_equal(Queue, Queue)),
+ ?assert(?amqqueue_pid_runs_on_local_node(Queue)),
+ ?assert(amqqueue:qnode(Queue) == node()).
+
+random_term_is_not_amqqueue(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?is_amqqueue(Term)),
+ ?assert(not ?is_amqqueue_v2(Term)),
+ ?assert(not ?is_amqqueue_v1(Term)).
+
+%% -------------------------------------------------------------------
+
+amqqueue_v1_is_durable(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ TransientQueue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ DurableQueue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(not ?amqqueue_is_durable(TransientQueue)),
+ ?assert(?amqqueue_is_durable(DurableQueue)).
+
+amqqueue_v2_is_durable(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ TransientQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ false,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ classic),
+ DurableQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ classic),
+ ?assert(not ?amqqueue_is_durable(TransientQueue)),
+ ?assert(?amqqueue_is_durable(DurableQueue)).
+
+random_term_is_not_durable(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?amqqueue_is_durable(Term)).
+
+%% -------------------------------------------------------------------
+
+amqqueue_v1_state_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue1 = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?amqqueue_state_is(Queue1, live)),
+ Queue2 = amqqueue:set_state(Queue1, stopped),
+ ?assert(?amqqueue_state_is(Queue2, stopped)).
+
+amqqueue_v2_state_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue1 = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ classic),
+ ?assert(?amqqueue_state_is(Queue1, live)),
+ Queue2 = amqqueue:set_state(Queue1, stopped),
+ ?assert(?amqqueue_state_is(Queue2, stopped)).
+
+random_term_state_matching(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?amqqueue_state_is(Term, live)).
+
+%% -------------------------------------------------------------------
+
+amqqueue_v1_type_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ Queue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?amqqueue_is_classic(Queue)),
+ ?assert(amqqueue:is_classic(Queue)),
+ ?assert(not ?amqqueue_is_quorum(Queue)).
+
+amqqueue_v2_type_matching(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ ClassicQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ rabbit_classic_queue),
+ ?assert(?amqqueue_is_classic(ClassicQueue)),
+ ?assert(amqqueue:is_classic(ClassicQueue)),
+ ?assert(not ?amqqueue_is_quorum(ClassicQueue)),
+ ?assert(not amqqueue:is_quorum(ClassicQueue)),
+ QuorumQueue = amqqueue:new_with_version(amqqueue_v2,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ rabbit_quorum_queue),
+ ?assert(not ?amqqueue_is_classic(QuorumQueue)),
+ ?assert(not amqqueue:is_classic(QuorumQueue)),
+ ?assert(?amqqueue_is_quorum(QuorumQueue)),
+ ?assert(amqqueue:is_quorum(QuorumQueue)).
+
+random_term_type_matching(_) ->
+ Term = ?long_tuple,
+ ?assert(not ?amqqueue_is_classic(Term)),
+ ?assert(not ?amqqueue_is_quorum(Term)),
+ ?assertException(error, function_clause, amqqueue:is_classic(Term)),
+ ?assertException(error, function_clause, amqqueue:is_quorum(Term)).
+
+%% -------------------------------------------------------------------
+
+upgrade_v1_to_v2(_) ->
+ VHost = <<"/">>,
+ Name = rabbit_misc:r(VHost, queue, my_amqqueue_v1),
+ OldQueue = amqqueue:new_with_version(amqqueue_v1,
+ Name,
+ self(),
+ true,
+ false,
+ none,
+ [],
+ VHost,
+ #{},
+ ?amqqueue_v1_type),
+ ?assert(?is_amqqueue_v1(OldQueue)),
+ ?assert(not ?is_amqqueue_v2(OldQueue)),
+ NewQueue = amqqueue:upgrade_to(amqqueue_v2, OldQueue),
+ ?assert(not ?is_amqqueue_v1(NewQueue)),
+ ?assert(?is_amqqueue_v2(NewQueue)).
diff --git a/deps/rabbit/test/backing_queue_SUITE.erl b/deps/rabbit/test/backing_queue_SUITE.erl
new file mode 100644
index 0000000000..be6004c8b9
--- /dev/null
+++ b/deps/rabbit/test/backing_queue_SUITE.erl
@@ -0,0 +1,1632 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(backing_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("amqqueue.hrl").
+
+-compile(export_all).
+
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+
+-define(TIMEOUT, 30000).
+-define(VHOST, <<"/">>).
+
+-define(VARIABLE_QUEUE_TESTCASES, [
+ variable_queue_dynamic_duration_change,
+ variable_queue_partial_segments_delta_thing,
+ variable_queue_all_the_bits_not_covered_elsewhere_A,
+ variable_queue_all_the_bits_not_covered_elsewhere_B,
+ variable_queue_drop,
+ variable_queue_fold_msg_on_disk,
+ variable_queue_dropfetchwhile,
+ variable_queue_dropwhile_varying_ram_duration,
+ variable_queue_fetchwhile_varying_ram_duration,
+ variable_queue_ack_limiting,
+ variable_queue_purge,
+ variable_queue_requeue,
+ variable_queue_requeue_ram_beta,
+ variable_queue_fold,
+ variable_queue_batch_publish,
+ variable_queue_batch_publish_delivered
+ ]).
+
+-define(BACKING_QUEUE_TESTCASES, [
+ bq_queue_index,
+ bq_queue_index_props,
+ {variable_queue_default, [parallel], ?VARIABLE_QUEUE_TESTCASES},
+ {variable_queue_lazy, [parallel], ?VARIABLE_QUEUE_TESTCASES ++
+ [variable_queue_mode_change]},
+ bq_variable_queue_delete_msg_store_files_callback,
+ bq_queue_recover
+ ]).
+
+all() ->
+ [
+ {group, backing_queue_tests}
+ ].
+
+groups() ->
+ [
+ {backing_queue_tests, [], [
+ msg_store,
+ {backing_queue_embed_limit_0, [], ?BACKING_QUEUE_TESTCASES},
+ {backing_queue_embed_limit_1024, [], ?BACKING_QUEUE_TESTCASES}
+ ]}
+ ].
+
+group(backing_queue_tests) ->
+ [
+ %% Several tests based on lazy queues may take more than 30 minutes.
+ {timetrap, {hours, 1}}
+ ];
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 2,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun(C) -> init_per_group1(Group, C) end,
+ fun setup_file_handle_cache/1
+ ]);
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [
+ fun(C) -> init_per_group1(Group, C) end
+ ])
+ end.
+
+init_per_group1(backing_queue_tests, Config) ->
+ Module = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, get_env, [rabbit, backing_queue_module]),
+ case Module of
+ {ok, rabbit_priority_queue} ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_backing_queue_test_group, [Config]);
+ _ ->
+ {skip, rabbit_misc:format(
+ "Backing queue module not supported by this test group: ~p~n",
+ [Module])}
+ end;
+init_per_group1(backing_queue_embed_limit_0, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below, 0]),
+ Config;
+init_per_group1(backing_queue_embed_limit_1024, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below, 1024]),
+ Config;
+init_per_group1(variable_queue_default, Config) ->
+ rabbit_ct_helpers:set_config(Config, {variable_queue_type, default});
+init_per_group1(variable_queue_lazy, Config) ->
+ rabbit_ct_helpers:set_config(Config, {variable_queue_type, lazy});
+init_per_group1(from_cluster_node1, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
+init_per_group1(from_cluster_node2, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
+init_per_group1(_, Config) ->
+ Config.
+
+setup_file_handle_cache(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_file_handle_cache1, []),
+ Config.
+
+setup_file_handle_cache1() ->
+ %% FIXME: Why are we doing this?
+ application:set_env(rabbit, file_handles_high_watermark, 10),
+ ok = file_handle_cache:set_limit(10),
+ ok.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ [fun(C) -> end_per_group1(Group, C) end] ++
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+end_per_group1(backing_queue_tests, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, teardown_backing_queue_test_group, [Config]);
+end_per_group1(Group, Config)
+when Group =:= backing_queue_embed_limit_0
+orelse Group =:= backing_queue_embed_limit_1024 ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below,
+ ?config(rmq_queue_index_embed_msgs_below, Config)]),
+ Config;
+end_per_group1(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue;
+ Testcase == variable_queue_fold ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config, 0, application, set_env,
+ [rabbit, queue_explicit_gc_run_operation_threshold, 0]),
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) when Testcase == variable_queue_requeue;
+ Testcase == variable_queue_fold ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config, 0, application, set_env,
+ [rabbit, queue_explicit_gc_run_operation_threshold, 1000]),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Message store.
+%% -------------------------------------------------------------------
+
+msg_store(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, msg_store1, [Config]).
+
+msg_store1(_Config) ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
+ {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
+ Ref = rabbit_guid:gen(),
+ {Cap, MSCState} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref),
+ Ref2 = rabbit_guid:gen(),
+ {Cap2, MSC2State} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref2),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% test confirm logic
+ passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% publish the first half
+ ok = msg_store_write(MsgIds1stHalf, MSCState),
+ %% sync on the first half
+ ok = on_disk_await(Cap, MsgIds1stHalf),
+ %% publish the second half
+ ok = msg_store_write(MsgIds2ndHalf, MSCState),
+ %% check they're all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% publish the latter half twice so we hit the caching and ref
+ %% count code. We need to do this through a 2nd client since a
+ %% single client is not supposed to write the same message more
+ %% than once without first removing it.
+ ok = msg_store_write(MsgIds2ndHalf, MSC2State),
+ %% check they're still all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% sync on the 2nd half
+ ok = on_disk_await(Cap2, MsgIds2ndHalf),
+ %% cleanup
+ ok = on_disk_stop(Cap2),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
+ ok = on_disk_stop(Cap),
+ %% read them all
+ MSCState1 = msg_store_read(MsgIds, MSCState),
+ %% read them all again - this will hit the cache, not disk
+ MSCState2 = msg_store_read(MsgIds, MSCState1),
+ %% remove them all
+ ok = msg_store_remove(MsgIds, MSCState2),
+ %% check first half doesn't exist
+ false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
+ %% check second half does exist
+ true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
+ %% read the second half again
+ MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
+ %% read the second half again, just for fun (aka code coverage)
+ MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
+ ok = rabbit_msg_store:client_terminate(MSCState4),
+ %% stop and restart, preserving every other msg in 2nd half
+ ok = rabbit_variable_queue:stop_msg_store(?VHOST),
+ ok = rabbit_variable_queue:start_msg_store(?VHOST,
+ [], {fun ([]) -> finished;
+ ([MsgId|MsgIdsTail])
+ when length(MsgIdsTail) rem 2 == 0 ->
+ {MsgId, 1, MsgIdsTail};
+ ([MsgId|MsgIdsTail]) ->
+ {MsgId, 0, MsgIdsTail}
+ end, MsgIds2ndHalf}),
+ MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we have the right msgs left
+ lists:foldl(
+ fun (MsgId, Bool) ->
+ not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
+ end, false, MsgIds2ndHalf),
+ ok = rabbit_msg_store:client_terminate(MSCState5),
+ %% restart empty
+ restart_msg_store_empty(),
+ MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we don't contain any of the msgs
+ false = msg_store_contains(false, MsgIds, MSCState6),
+ %% publish the first half again
+ ok = msg_store_write(MsgIds1stHalf, MSCState6),
+ %% this should force some sort of sync internally otherwise misread
+ ok = rabbit_msg_store:client_terminate(
+ msg_store_read(MsgIds1stHalf, MSCState6)),
+ MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_remove(MsgIds1stHalf, MSCState7),
+ ok = rabbit_msg_store:client_terminate(MSCState7),
+ %% restart empty
+ restart_msg_store_empty(), %% now safe to reuse msg_ids
+ %% push a lot of msgs in... at least 100 files worth
+ {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
+ PayloadSizeBits = 65536,
+ BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
+ MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
+ Payload = << 0:PayloadSizeBits >>,
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
+ MsgId <- MsgIdsBig],
+ MSCStateM
+ end),
+ %% now read them to ensure we hit the fast client-side reading
+ ok = foreach_with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MsgId, MSCStateM) ->
+ {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MsgIdsBig),
+ %% .., then 3s by 1...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
+ %% .., then remove 3s by 2, from the young end first. This hits
+ %% GC (under 50% good data left, but no empty files. Must GC).
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
+ %% .., then remove 3s by 3, from the young end first. This hits
+ %% GC...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
+ %% ensure empty
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ false = msg_store_contains(false, MsgIdsBig, MSCStateM),
+ MSCStateM
+ end),
+ %%
+ passed = test_msg_store_client_delete_and_terminate(),
+ %% restart empty
+ restart_msg_store_empty(),
+ passed.
+
+restart_msg_store_empty() ->
+ ok = rabbit_variable_queue:stop_msg_store(?VHOST),
+ ok = rabbit_variable_queue:start_msg_store(?VHOST,
+ undefined, {fun (ok) -> finished end, ok}).
+
+msg_id_bin(X) ->
+ erlang:md5(term_to_binary(X)).
+
+on_disk_capture() ->
+ receive
+ {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
+ stop -> done
+ end.
+
+on_disk_capture([_|_], _Awaiting, Pid) ->
+ Pid ! {self(), surplus};
+on_disk_capture(OnDisk, Awaiting, Pid) ->
+ receive
+ {on_disk, MsgIdsS} ->
+ MsgIds = gb_sets:to_list(MsgIdsS),
+ on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
+ Pid);
+ stop ->
+ done
+ after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
+ case Awaiting of
+ [] -> Pid ! {self(), arrived}, on_disk_capture();
+ _ -> Pid ! {self(), timeout}
+ end
+ end.
+
+on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
+ Pid ! {await, MsgIds, self()},
+ receive
+ {Pid, arrived} -> ok;
+ {Pid, Error} -> Error
+ end.
+
+on_disk_stop(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ Pid ! stop,
+ receive {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end.
+
+msg_store_client_init_capture(MsgStore, Ref) ->
+ Pid = spawn(fun on_disk_capture/0),
+ {Pid, rabbit_vhost_msg_store:client_init(?VHOST, MsgStore, Ref,
+ fun (MsgIds, _ActionTaken) ->
+ Pid ! {on_disk, MsgIds}
+ end, undefined)}.
+
+msg_store_contains(Atom, MsgIds, MSCState) ->
+ Atom = lists:foldl(
+ fun (MsgId, Atom1) when Atom1 =:= Atom ->
+ rabbit_msg_store:contains(MsgId, MSCState) end,
+ Atom, MsgIds).
+
+msg_store_read(MsgIds, MSCState) ->
+ lists:foldl(fun (MsgId, MSCStateM) ->
+ {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MSCState, MsgIds).
+
+msg_store_write(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_write_flow(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write_flow(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_remove(MsgIds, MSCState) ->
+ rabbit_msg_store:remove(MsgIds, MSCState).
+
+msg_store_remove(MsgStore, Ref, MsgIds) ->
+ with_msg_store_client(MsgStore, Ref,
+ fun (MSCStateM) ->
+ ok = msg_store_remove(MsgIds, MSCStateM),
+ MSCStateM
+ end).
+
+with_msg_store_client(MsgStore, Ref, Fun) ->
+ rabbit_msg_store:client_terminate(
+ Fun(msg_store_client_init(MsgStore, Ref))).
+
+foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
+ rabbit_msg_store:client_terminate(
+ lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
+ msg_store_client_init(MsgStore, Ref), L)).
+
+test_msg_store_confirms(MsgIds, Cap, MSCState) ->
+ %% write -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove -> _
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, []),
+ %% write, remove -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% write, remove, write -> confirmed, confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds ++ MsgIds),
+ %% remove, write -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove, write, remove -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% confirmation on timer-based sync
+ passed = test_msg_store_confirm_timer(),
+ passed.
+
+test_msg_store_confirm_timer() ->
+ Ref = rabbit_guid:gen(),
+ MsgId = msg_id_bin(1),
+ Self = self(),
+ MSCState = rabbit_vhost_msg_store:client_init(
+ ?VHOST,
+ ?PERSISTENT_MSG_STORE,
+ Ref,
+ fun (MsgIds, _ActionTaken) ->
+ case gb_sets:is_member(MsgId, MsgIds) of
+ true -> Self ! on_disk;
+ false -> ok
+ end
+ end, undefined),
+ ok = msg_store_write([MsgId], MSCState),
+ ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState, false),
+ ok = msg_store_remove([MsgId], MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+msg_store_keep_busy_until_confirm(MsgIds, MSCState, Blocked) ->
+ After = case Blocked of
+ false -> 0;
+ true -> ?MAX_WAIT
+ end,
+ Recurse = fun () -> msg_store_keep_busy_until_confirm(
+ MsgIds, MSCState, credit_flow:blocked()) end,
+ receive
+ on_disk -> ok;
+ {bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg),
+ Recurse()
+ after After ->
+ ok = msg_store_write_flow(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ Recurse()
+ end.
+
+test_msg_store_client_delete_and_terminate() ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
+ Ref = rabbit_guid:gen(),
+ MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_write(MsgIds, MSCState),
+ %% test the 'dying client' fast path for writes
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+%% -------------------------------------------------------------------
+%% Backing queue.
+%% -------------------------------------------------------------------
+
+setup_backing_queue_test_group(Config) ->
+ {ok, FileSizeLimit} =
+ application:get_env(rabbit, msg_store_file_size_limit),
+ application:set_env(rabbit, msg_store_file_size_limit, 512),
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ application:set_env(rabbit, queue_index_max_journal_entries, 128),
+ application:set_env(rabbit, msg_store_file_size_limit,
+ FileSizeLimit),
+ {ok, Bytes} =
+ application:get_env(rabbit, queue_index_embed_msgs_below),
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_queue_index_max_journal_entries, MaxJournal},
+ {rmq_queue_index_embed_msgs_below, Bytes}
+ ]).
+
+teardown_backing_queue_test_group(Config) ->
+ %% FIXME: Undo all the setup function did.
+ application:set_env(rabbit, queue_index_max_journal_entries,
+ ?config(rmq_queue_index_max_journal_entries, Config)),
+ %% We will have restarted the message store, and thus changed
+ %% the order of the children of rabbit_sup. This will cause
+ %% problems if there are subsequent failures - see bug 24262.
+ ok = restart_app(),
+ Config.
+
+bq_queue_index(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_index1, [Config]).
+
+bq_queue_index1(_Config) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ TwoSegs = SegmentSize + SegmentSize,
+ MostOfASegment = trunc(SegmentSize*0.75),
+ SeqIdsA = lists:seq(0, MostOfASegment-1),
+ SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
+ SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
+ SeqIdsD = lists:seq(0, SegmentSize*4),
+
+ with_empty_test_queue(
+ fun (Qi0, QName) ->
+ {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
+ {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
+ {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
+ {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
+ ok = verify_read_with_published(false, false, ReadA,
+ lists:reverse(SeqIdsMsgIdsA)),
+ %% should get length back as 0, as all the msgs were transient
+ {0, 0, Qi6} = restart_test_queue(Qi4, QName),
+ {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
+ {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
+ {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
+ {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
+ ok = verify_read_with_published(false, true, ReadB,
+ lists:reverse(SeqIdsMsgIdsB)),
+ %% should get length back as MostOfASegment
+ LenB = length(SeqIdsB),
+ BytesB = LenB * 10,
+ {LenB, BytesB, Qi12} = restart_test_queue(Qi10, QName),
+ {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
+ Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
+ {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
+ ok = verify_read_with_published(true, true, ReadC,
+ lists:reverse(SeqIdsMsgIdsB)),
+ Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
+ Qi17 = rabbit_queue_index:flush(Qi16),
+ %% Everything will have gone now because #pubs == #acks
+ {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
+ %% should get length back as 0 because all persistent
+ %% msgs have been acked
+ {0, 0, Qi19} = restart_test_queue(Qi18, QName),
+ Qi19
+ end),
+
+ %% These next bits are just to hit the auto deletion of segment files.
+ %% First, partials:
+ %% a) partial pub+del+ack, then move to new segment
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
+ false, Qi4),
+ Qi5
+ end),
+
+ %% b) partial pub+del, then move to new segment, then ack all in old segment
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
+ false, Qi2),
+ Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
+ rabbit_queue_index:flush(Qi4)
+ end),
+
+ %% c) just fill up several segments of all pubs, then +dels, then +acks
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
+ rabbit_queue_index:flush(Qi3)
+ end),
+
+ %% d) get messages in all states to a segment, then flush, then do
+ %% the same again, don't flush and read. This will hit all
+ %% possibilities in combining the segment with the journal.
+ with_empty_test_queue(
+ fun (Qi0, _QName) ->
+ {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
+ {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
+ ok = verify_read_with_published(true, false, ReadD,
+ [Four, Five, Six]),
+ {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
+ ok = verify_read_with_published(false, false, ReadE,
+ [Seven, Eight]),
+ Qi10
+ end),
+
+ %% e) as for (d), but use terminate instead of read, which will
+ %% exercise journal_minus_segment, not segment_plus_journal.
+ with_empty_test_queue(
+ fun (Qi0, QName) ->
+ {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
+ true, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ {5, 50, Qi4} = restart_test_queue(Qi3, QName),
+ {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {5, 50, Qi8} = restart_test_queue(Qi7, QName),
+ Qi8
+ end),
+
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, []),
+
+ passed.
+
+bq_queue_index_props(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_index_props1, [Config]).
+
+bq_queue_index_props1(_Config) ->
+ with_empty_test_queue(
+ fun(Qi0, _QName) ->
+ MsgId = rabbit_guid:gen(),
+ Props = #message_properties{expiry=12345, size = 10},
+ Qi1 = rabbit_queue_index:publish(
+ MsgId, 1, Props, true, infinity, Qi0),
+ {[{MsgId, 1, Props, _, _}], Qi2} =
+ rabbit_queue_index:read(1, 2, Qi1),
+ Qi2
+ end),
+
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, []),
+
+ passed.
+
+bq_variable_queue_delete_msg_store_files_callback(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_variable_queue_delete_msg_store_files_callback1, [Config]).
+
+bq_variable_queue_delete_msg_store_files_callback1(Config) ->
+ ok = restart_msg_store_empty(),
+ QName0 = queue_name(Config, <<"bq_variable_queue_delete_msg_store_files_callback-q">>),
+ {new, Q} = rabbit_amqqueue:declare(QName0, true, false, [], none, <<"acting-user">>),
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ Payload = <<0:8388608>>, %% 1MB
+ Count = 30,
+ QTState = publish_and_confirm(Q, Payload, Count),
+
+ rabbit_amqqueue:set_ram_duration_target(QPid, 0),
+
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}, _} =
+ rabbit_amqqueue:basic_get(Q, true, Limiter,
+ <<"bq_variable_queue_delete_msg_store_files_callback1">>,
+ QTState),
+ {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
+
+ %% give the queue a second to receive the close_fds callback msg
+ timer:sleep(1000),
+
+ rabbit_amqqueue:delete(Q, false, false, <<"acting-user">>),
+ passed.
+
+bq_queue_recover(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_recover1, [Config]).
+
+bq_queue_recover1(Config) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ QName0 = queue_name(Config, <<"bq_queue_recover-q">>),
+ {new, Q} = rabbit_amqqueue:declare(QName0, true, false, [], none, <<"acting-user">>),
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ QT = publish_and_confirm(Q, <<>>, Count),
+ SupPid = get_queue_sup_pid(Q),
+ true = is_pid(SupPid),
+ exit(SupPid, kill),
+ exit(QPid, kill),
+ MRef = erlang:monitor(process, QPid),
+ receive {'DOWN', MRef, process, QPid, _Info} -> ok
+ after 10000 -> exit(timeout_waiting_for_queue_death)
+ end,
+ rabbit_amqqueue:stop(?VHOST),
+ {Recovered, []} = rabbit_amqqueue:recover(?VHOST),
+ rabbit_amqqueue:start(Recovered),
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+ rabbit_amqqueue:with_or_die(
+ QName,
+ fun (Q1) when ?is_amqqueue(Q1) ->
+ QPid1 = amqqueue:get_pid(Q1),
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}, _} =
+ rabbit_amqqueue:basic_get(Q1, false, Limiter,
+ <<"bq_queue_recover1">>, QT),
+ exit(QPid1, shutdown),
+ VQ1 = variable_queue_init(Q, true),
+ {{_Msg1, true, _AckTag1}, VQ2} =
+ rabbit_variable_queue:fetch(true, VQ1),
+ CountMinusOne = rabbit_variable_queue:len(VQ2),
+ _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
+ ok = rabbit_amqqueue:internal_delete(QName, <<"acting-user">>)
+ end),
+ passed.
+
+%% Return the PID of the given queue's supervisor.
+get_queue_sup_pid(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ QPid = amqqueue:get_pid(Q),
+ VHost = QName#resource.virtual_host,
+ {ok, AmqSup} = rabbit_amqqueue_sup_sup:find_for_vhost(VHost, node(QPid)),
+ Sups = supervisor:which_children(AmqSup),
+ get_queue_sup_pid(Sups, QPid).
+
+get_queue_sup_pid([{_, SupPid, _, _} | Rest], QueuePid) ->
+ WorkerPids = [Pid || {_, Pid, _, _} <- supervisor:which_children(SupPid)],
+ case lists:member(QueuePid, WorkerPids) of
+ true -> SupPid;
+ false -> get_queue_sup_pid(Rest, QueuePid)
+ end;
+get_queue_sup_pid([], _QueuePid) ->
+ undefined.
+
+variable_queue_dynamic_duration_change(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dynamic_duration_change1, [Config]).
+
+variable_queue_dynamic_duration_change1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dynamic_duration_change2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dynamic_duration_change2(VQ0, _QName) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+
+ %% start by sending in a couple of segments worth
+ Len = 2*SegmentSize,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+ VQ7 = lists:foldl(
+ fun (Duration1, VQ4) ->
+ {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
+ VQ6 = variable_queue_set_ram_duration_target(
+ Duration1, VQ5),
+ publish_fetch_and_ack(Churn, Len, VQ6)
+ end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
+
+ %% drain
+ {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+
+ VQ10.
+
+variable_queue_partial_segments_delta_thing(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_partial_segments_delta_thing1, [Config]).
+
+variable_queue_partial_segments_delta_thing1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_partial_segments_delta_thing2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_partial_segments_delta_thing2(VQ0, _QName) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ HalfSegment = SegmentSize div 2,
+ OneAndAHalfSegment = SegmentSize + HalfSegment,
+ VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
+ {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
+ VQ3 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ2),
+ %% one segment in q3, and half a segment in delta
+ [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment}]),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = check_variable_queue_status(
+ variable_queue_publish(true, 1, VQ4),
+ %% one alpha, but it's in the same segment as the deltas
+ [{q1, 1},
+ {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment + 1}]),
+ {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
+ SegmentSize + HalfSegment + 1, VQ5),
+ VQ7 = check_variable_queue_status(
+ VQ6,
+ %% the half segment should now be in q3
+ [{q1, 1},
+ {delta, {delta, undefined, 0, undefined}},
+ {q3, HalfSegment},
+ {len, HalfSegment + 1}]),
+ {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
+ HalfSegment + 1, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
+ %% should be empty now
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+ VQ10.
+
+variable_queue_all_the_bits_not_covered_elsewhere_A(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_A1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_all_the_bits_not_covered_elsewhere_A2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0, QName) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ VQ1 = variable_queue_publish(true, Count, VQ0),
+ VQ2 = variable_queue_publish(false, Count, VQ1),
+ VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
+ {VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
+ Count + Count, VQ3),
+ {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
+ Count, VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(QName, true), true),
+ {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
+ Count1 = rabbit_variable_queue:len(VQ8),
+ VQ9 = variable_queue_publish(false, 1, VQ8),
+ VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
+ {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
+ {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
+ VQ12.
+
+variable_queue_all_the_bits_not_covered_elsewhere_B(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_B1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_all_the_bits_not_covered_elsewhere_B2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0, QName) ->
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(false, 4, VQ1),
+ {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
+ {_Guids, VQ4} =
+ rabbit_variable_queue:requeue(AckTags, VQ3),
+ VQ5 = rabbit_variable_queue:timeout(VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(QName, true), true),
+ {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
+ VQ8.
+
+variable_queue_drop(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_drop1, [Config]).
+
+variable_queue_drop1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_drop2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_drop2(VQ0, _QName) ->
+ %% start by sending a messages
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ %% drop message with AckRequired = true
+ {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
+ true = rabbit_variable_queue:is_empty(VQ2),
+ true = AckTag =/= undefinded,
+ %% drop again -> empty
+ {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
+ %% requeue
+ {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
+ %% drop message with AckRequired = false
+ {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
+ true = rabbit_variable_queue:is_empty(VQ5),
+ VQ5.
+
+variable_queue_fold_msg_on_disk(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fold_msg_on_disk1, [Config]).
+
+variable_queue_fold_msg_on_disk1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fold_msg_on_disk2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fold_msg_on_disk2(VQ0, _QName) ->
+ VQ1 = variable_queue_publish(true, 1, VQ0),
+ {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
+ {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
+ ok, VQ2, AckTags),
+ VQ3.
+
+variable_queue_dropfetchwhile(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dropfetchwhile1, [Config]).
+
+variable_queue_dropfetchwhile1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dropfetchwhile2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dropfetchwhile2(VQ0, _QName) ->
+ Count = 10,
+
+ %% add messages with sequential expiry
+ VQ1 = variable_queue_publish(
+ false, 1, Count,
+ fun (N, Props) -> Props#message_properties{expiry = N} end,
+ fun erlang:term_to_binary/1, VQ0),
+
+ %% fetch the first 5 messages
+ {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
+ rabbit_variable_queue:fetchwhile(
+ fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
+ fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
+ {[Msg | MsgAcc], [AckTag | AckAcc]}
+ end, {[], []}, VQ1),
+ true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
+
+ %% requeue them
+ {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
+
+ %% drop the first 5 messages
+ {#message_properties{expiry = 6}, VQ4} =
+ rabbit_variable_queue:dropwhile(
+ fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
+
+ %% fetch 5
+ VQ5 = lists:foldl(fun (N, VQN) ->
+ {{Msg, _, _}, VQM} =
+ rabbit_variable_queue:fetch(false, VQN),
+ true = msg2int(Msg) == N,
+ VQM
+ end, VQ4, lists:seq(6, Count)),
+
+ %% should be empty now
+ true = rabbit_variable_queue:is_empty(VQ5),
+
+ VQ5.
+
+variable_queue_dropwhile_varying_ram_duration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]).
+
+variable_queue_dropwhile_varying_ram_duration1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dropwhile_varying_ram_duration2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dropwhile_varying_ram_duration2(VQ0, _QName) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, VQ2} = rabbit_variable_queue:dropwhile(
+ fun (_) -> false end, VQ1),
+ VQ2
+ end, VQ0).
+
+variable_queue_fetchwhile_varying_ram_duration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]).
+
+variable_queue_fetchwhile_varying_ram_duration1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fetchwhile_varying_ram_duration2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fetchwhile_varying_ram_duration2(VQ0, _QName) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
+ fun (_) -> false end,
+ fun (_, _, A) -> A end,
+ ok, VQ1),
+ VQ2
+ end, VQ0).
+
+test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
+ VQ3 = Fun(VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = variable_queue_publish(false, 1, VQ4),
+ VQ6 = Fun(VQ5),
+ VQ6.
+
+variable_queue_ack_limiting(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_ack_limiting1, [Config]).
+
+variable_queue_ack_limiting1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_ack_limiting2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_ack_limiting2(VQ0, _Config) ->
+ %% start by sending in a bunch of messages
+ Len = 1024,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ %% update stats for duration
+ {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+
+ %% fetch half the messages
+ {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
+
+ VQ5 = check_variable_queue_status(
+ VQ4, [{len, Len div 2},
+ {messages_unacknowledged_ram, Len div 2},
+ {messages_ready_ram, Len div 2},
+ {messages_ram, Len}]),
+
+ %% ensure all acks go to disk on 0 duration target
+ VQ6 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ5),
+ [{len, Len div 2},
+ {target_ram_count, 0},
+ {messages_unacknowledged_ram, 0},
+ {messages_ready_ram, 0},
+ {messages_ram, 0}]),
+
+ VQ6.
+
+variable_queue_purge(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_purge1, [Config]).
+
+variable_queue_purge1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_purge2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_purge2(VQ0, _Config) ->
+ LenDepth = fun (VQ) ->
+ {rabbit_variable_queue:len(VQ),
+ rabbit_variable_queue:depth(VQ)}
+ end,
+ VQ1 = variable_queue_publish(false, 10, VQ0),
+ {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
+ {4, VQ3} = rabbit_variable_queue:purge(VQ2),
+ {0, 6} = LenDepth(VQ3),
+ {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
+ {2, 6} = LenDepth(VQ4),
+ VQ5 = rabbit_variable_queue:purge_acks(VQ4),
+ {2, 2} = LenDepth(VQ5),
+ VQ5.
+
+variable_queue_requeue(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_requeue1, [Config]).
+
+variable_queue_requeue1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_requeue2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_requeue2(VQ0, _Config) ->
+ {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Msgs =
+ lists:zip(RequeuedMsgs,
+ lists:duplicate(length(RequeuedMsgs), true)) ++
+ lists:zip(FreshMsgs,
+ lists:duplicate(length(FreshMsgs), false)),
+ VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
+ {{M, MRequeued, _}, VQb} =
+ rabbit_variable_queue:fetch(true, VQa),
+ Requeued = MRequeued, %% assertion
+ I = msg2int(M), %% assertion
+ VQb
+ end, VQ1, Msgs),
+ {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
+ VQ3.
+
+%% requeue from ram_pending_ack into q3, move to delta and then empty queue
+variable_queue_requeue_ram_beta(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_requeue_ram_beta1, [Config]).
+
+variable_queue_requeue_ram_beta1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_requeue_ram_beta2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_requeue_ram_beta2(VQ0, _Config) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
+ {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
+ VQ6 = requeue_one_by_one(Front, VQ5),
+ {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
+ {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
+ VQ8.
+
+variable_queue_fold(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fold1, [Config]).
+
+variable_queue_fold1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fold2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fold2(VQ0, _Config) ->
+ {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Count = rabbit_variable_queue:depth(VQ1),
+ Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
+ lists:foldl(fun (Cut, VQ2) ->
+ test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
+ end, VQ1, [0, 1, 2, Count div 2,
+ Count - 1, Count, Count + 1, Count * 2]).
+
+test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
+ {Acc, VQ1} = rabbit_variable_queue:fold(
+ fun (M, _, Pending, A) ->
+ MInt = msg2int(M),
+ Pending = lists:member(MInt, PendingMsgs), %% assert
+ case MInt =< Cut of
+ true -> {cont, [MInt | A]};
+ false -> {stop, A}
+ end
+ end, [], VQ0),
+ Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
+ Expected = lists:reverse(Acc), %% assertion
+ VQ1.
+
+variable_queue_batch_publish(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_batch_publish1, [Config]).
+
+variable_queue_batch_publish1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_batch_publish2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish2(VQ, _Config) ->
+ Count = 10,
+ VQ1 = variable_queue_batch_publish(true, Count, VQ),
+ Count = rabbit_variable_queue:len(VQ1),
+ VQ1.
+
+variable_queue_batch_publish_delivered(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_batch_publish_delivered1, [Config]).
+
+variable_queue_batch_publish_delivered1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_batch_publish_delivered2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish_delivered2(VQ, _Config) ->
+ Count = 10,
+ VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ),
+ Count = rabbit_variable_queue:depth(VQ1),
+ VQ1.
+
+%% same as test_variable_queue_requeue_ram_beta but randomly changing
+%% the queue mode after every step.
+variable_queue_mode_change(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_mode_change1, [Config]).
+
+variable_queue_mode_change1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_mode_change2/2,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_mode_change2(VQ0, _Config) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ VQ2 = maybe_switch_queue_mode(VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ VQ4 = maybe_switch_queue_mode(VQ3),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4),
+ VQ6 = maybe_switch_queue_mode(VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(0, VQ6),
+ VQ8 = maybe_switch_queue_mode(VQ7),
+ {_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8),
+ VQ10 = maybe_switch_queue_mode(VQ9),
+ VQ11 = requeue_one_by_one(Front, VQ10),
+ VQ12 = maybe_switch_queue_mode(VQ11),
+ {VQ13, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ12),
+ VQ14 = maybe_switch_queue_mode(VQ13),
+ {_, VQ15} = rabbit_variable_queue:ack(AcksAll, VQ14),
+ VQ16 = maybe_switch_queue_mode(VQ15),
+ VQ16.
+
+maybe_switch_queue_mode(VQ) ->
+ Mode = random_queue_mode(),
+ set_queue_mode(Mode, VQ).
+
+random_queue_mode() ->
+ Modes = [lazy, default],
+ lists:nth(rand:uniform(length(Modes)), Modes).
+
+pub_res({_, VQS}) ->
+ VQS;
+pub_res(VQS) ->
+ VQS.
+
+make_publish(IsPersistent, PayloadFun, PropFun, N) ->
+ {rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10}),
+ false}.
+
+make_publish_delivered(IsPersistent, PayloadFun, PropFun, N) ->
+ {rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10})}.
+
+queue_name(Config, Name) ->
+ Name1 = iolist_to_binary(rabbit_ct_helpers:config_to_testcase_name(Config, Name)),
+ queue_name(Name1).
+
+queue_name(Name) ->
+ rabbit_misc:r(<<"/">>, queue, Name).
+
+test_queue() ->
+ queue_name(rabbit_guid:gen()).
+
+init_test_queue(QName) ->
+ PRef = rabbit_guid:gen(),
+ PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
+ Res = rabbit_queue_index:recover(
+ QName, [], false,
+ fun (MsgId) ->
+ rabbit_msg_store:contains(MsgId, PersistentClient)
+ end,
+ fun nop/1, fun nop/1),
+ ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
+ Res.
+
+restart_test_queue(Qi, QName) ->
+ _ = rabbit_queue_index:terminate(?VHOST, [], Qi),
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, [QName]),
+ init_test_queue(QName).
+
+empty_test_queue(QName) ->
+ ok = rabbit_variable_queue:stop(?VHOST),
+ {ok, _} = rabbit_variable_queue:start(?VHOST, []),
+ {0, 0, Qi} = init_test_queue(QName),
+ _ = rabbit_queue_index:delete_and_terminate(Qi),
+ ok.
+
+unin_empty_test_queue(QName) ->
+ {0, 0, Qi} = init_test_queue(QName),
+ _ = rabbit_queue_index:delete_and_terminate(Qi),
+ ok.
+
+with_empty_test_queue(Fun) ->
+ QName = test_queue(),
+ ok = empty_test_queue(QName),
+ {0, 0, Qi} = init_test_queue(QName),
+ rabbit_queue_index:delete_and_terminate(Fun(Qi, QName)).
+
+restart_app() ->
+ rabbit:stop(),
+ rabbit:start().
+
+queue_index_publish(SeqIds, Persistent, Qi) ->
+ Ref = rabbit_guid:gen(),
+ MsgStore = case Persistent of
+ true -> ?PERSISTENT_MSG_STORE;
+ false -> ?TRANSIENT_MSG_STORE
+ end,
+ MSCState = msg_store_client_init(MsgStore, Ref),
+ {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
+ lists:foldl(
+ fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
+ MsgId = rabbit_guid:gen(),
+ QiM = rabbit_queue_index:publish(
+ MsgId, SeqId, #message_properties{size = 10},
+ Persistent, infinity, QiN),
+ ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
+ {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
+ end, {Qi, []}, SeqIds),
+ %% do this just to force all of the publishes through to the msg_store:
+ true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ {A, B}.
+
+verify_read_with_published(_Delivered, _Persistent, [], _) ->
+ ok;
+verify_read_with_published(Delivered, Persistent,
+ [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
+ [{SeqId, MsgId}|Published]) ->
+ verify_read_with_published(Delivered, Persistent, Read, Published);
+verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
+ ko.
+
+nop(_) -> ok.
+nop(_, _) -> ok.
+
+msg_store_client_init(MsgStore, Ref) ->
+ rabbit_vhost_msg_store:client_init(?VHOST, MsgStore, Ref, undefined, undefined).
+
+variable_queue_init(Q, Recover) ->
+ rabbit_variable_queue:init(
+ Q, case Recover of
+ true -> non_clean_shutdown;
+ false -> new
+ end, fun nop/2, fun nop/2, fun nop/1, fun nop/1).
+
+publish_and_confirm(Q, Payload, Count) ->
+ Seqs = lists:seq(1, Count),
+ QTState0 = rabbit_queue_type:new(Q, rabbit_queue_type:init()),
+ QTState =
+ lists:foldl(
+ fun (Seq, Acc0) ->
+ Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = 2},
+ Payload),
+ Delivery = #delivery{mandatory = false, sender = self(),
+ confirm = true, message = Msg, msg_seq_no = Seq,
+ flow = noflow},
+ {ok, Acc, _Actions} = rabbit_queue_type:deliver([Q], Delivery, Acc0),
+ Acc
+ end, QTState0, Seqs),
+ wait_for_confirms(gb_sets:from_list(Seqs)),
+ QTState.
+
+wait_for_confirms(Unconfirmed) ->
+ case gb_sets:is_empty(Unconfirmed) of
+ true -> ok;
+ false ->
+ receive
+ {'$gen_cast',
+ {queue_event, _QName, {confirm, Confirmed, _}}} ->
+ wait_for_confirms(
+ rabbit_misc:gb_sets_difference(
+ Unconfirmed, gb_sets:from_list(Confirmed)));
+ {'$gen_cast', {confirm, Confirmed, _}} ->
+ wait_for_confirms(
+ rabbit_misc:gb_sets_difference(
+ Unconfirmed, gb_sets:from_list(Confirmed)))
+ after ?TIMEOUT ->
+ flush(),
+ exit(timeout_waiting_for_confirm)
+ end
+ end.
+
+with_fresh_variable_queue(Fun, Mode) ->
+ Ref = make_ref(),
+ Me = self(),
+ %% Run in a separate process since rabbit_msg_store will send
+ %% bump_credit messages and we want to ignore them
+ spawn_link(fun() ->
+ QName = test_queue(),
+ ok = unin_empty_test_queue(QName),
+ VQ = variable_queue_init(test_amqqueue(QName, true), false),
+ S0 = variable_queue_status(VQ),
+ assert_props(S0, [{q1, 0}, {q2, 0},
+ {delta,
+ {delta, undefined, 0, undefined}},
+ {q3, 0}, {q4, 0},
+ {len, 0}]),
+ VQ1 = set_queue_mode(Mode, VQ),
+ try
+ _ = rabbit_variable_queue:delete_and_terminate(
+ shutdown, Fun(VQ1, QName)),
+ Me ! Ref
+ catch
+ Type:Error:Stacktrace ->
+ Me ! {Ref, Type, Error, Stacktrace}
+ end
+ end),
+ receive
+ Ref -> ok;
+ {Ref, Type, Error, ST} -> exit({Type, Error, ST})
+ end,
+ passed.
+
+set_queue_mode(Mode, VQ) ->
+ VQ1 = rabbit_variable_queue:set_queue_mode(Mode, VQ),
+ S1 = variable_queue_status(VQ1),
+ assert_props(S1, [{mode, Mode}]),
+ VQ1.
+
+variable_queue_publish(IsPersistent, Count, VQ) ->
+ variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ lists:foldl(
+ fun (N, VQN) ->
+
+ rabbit_variable_queue:publish(
+ rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10}),
+ false, self(), noflow, VQN)
+ end, VQ, lists:seq(Start, Start + Count - 1))).
+
+variable_queue_batch_publish(IsPersistent, Count, VQ) ->
+ variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_batch_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+ PayloadFun, fun make_publish/4,
+ fun rabbit_variable_queue:batch_publish/4,
+ VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) ->
+ variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+ PayloadFun, fun make_publish_delivered/4,
+ fun rabbit_variable_queue:batch_publish_delivered/4,
+ VQ).
+
+variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun,
+ MakePubFun, PubFun, VQ) ->
+ Publishes =
+ [MakePubFun(IsPersistent, PayloadFun, PropFun, N)
+ || N <- lists:seq(Start, Start + Count - 1)],
+ Res = PubFun(Publishes, self(), noflow, VQ),
+ VQ1 = pub_res(Res),
+ variable_queue_wait_for_shuffling_end(VQ1).
+
+variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
+ lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
+ Rem = Len - N,
+ {{#basic_message { is_persistent = IsPersistent },
+ IsDelivered, AckTagN}, VQM} =
+ rabbit_variable_queue:fetch(true, VQN),
+ Rem = rabbit_variable_queue:len(VQM),
+ {VQM, [AckTagN | AckTagsAcc]}
+ end, {VQ, []}, lists:seq(1, Count)).
+
+test_amqqueue(QName, Durable) ->
+ rabbit_amqqueue:pseudo_queue(QName, self(), Durable).
+
+assert_prop(List, Prop, Value) ->
+ case proplists:get_value(Prop, List)of
+ Value -> ok;
+ _ -> {exit, Prop, exp, Value, List}
+ end.
+
+assert_props(List, PropVals) ->
+ [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
+
+variable_queue_set_ram_duration_target(Duration, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
+
+publish_fetch_and_ack(0, _Len, VQ0) ->
+ VQ0;
+publish_fetch_and_ack(N, Len, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
+ Len = rabbit_variable_queue:len(VQ2),
+ {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
+ publish_fetch_and_ack(N-1, Len, VQ3).
+
+variable_queue_status(VQ) ->
+ Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status],
+ [{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++
+ rabbit_variable_queue:info(backing_queue_status, VQ).
+
+variable_queue_wait_for_shuffling_end(VQ) ->
+ case credit_flow:blocked() of
+ false -> VQ;
+ true ->
+ receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:resume(VQ))
+ end
+ end.
+
+msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
+ binary_to_term(list_to_binary(lists:reverse(P))).
+
+ack_subset(AckSeqs, Interval, Rem) ->
+ lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
+
+requeue_one_by_one(Acks, VQ) ->
+ lists:foldl(fun (AckTag, VQN) ->
+ {_MsgId, VQM} = rabbit_variable_queue:requeue(
+ [AckTag], VQN),
+ VQM
+ end, VQ, Acks).
+
+%% Create a vq with messages in q1, delta, and q3, and holes (in the
+%% form of pending acks) in the latter two.
+variable_queue_with_holes(VQ0) ->
+ Interval = 2048, %% should match vq:IO_BATCH_SIZE
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
+ Seq = lists:seq(1, Count),
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(
+ false, 1, Count,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ Acks = lists:reverse(AcksR),
+ AckSeqs = lists:zip(Acks, Seq),
+ [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
+ [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
+ %% we requeue in three phases in order to exercise requeuing logic
+ %% in various vq states
+ {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
+ Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
+ VQ5 = requeue_one_by_one(Subset1, VQ4),
+ %% by now we have some messages (and holes) in delta
+ VQ6 = requeue_one_by_one(Subset2, VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
+ %% add the q1 tail
+ VQ8 = variable_queue_publish(
+ true, Count + 1, Interval,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
+ %% assertions
+ Status = variable_queue_status(VQ8),
+
+ vq_with_holes_assertions(VQ8, proplists:get_value(mode, Status)),
+ Depth = Count + Interval,
+ Depth = rabbit_variable_queue:depth(VQ8),
+ Len = Depth - length(Subset3),
+ Len = rabbit_variable_queue:len(VQ8),
+
+ {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
+
+vq_with_holes_assertions(VQ, default) ->
+ [false =
+ case V of
+ {delta, _, 0, _} -> true;
+ 0 -> true;
+ _ -> false
+ end || {K, V} <- variable_queue_status(VQ),
+ lists:member(K, [q1, delta, q3])];
+vq_with_holes_assertions(VQ, lazy) ->
+ [false =
+ case V of
+ {delta, _, 0, _} -> true;
+ _ -> false
+ end || {K, V} <- variable_queue_status(VQ),
+ lists:member(K, [delta])].
+
+check_variable_queue_status(VQ0, Props) ->
+ VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
+ S = variable_queue_status(VQ1),
+ assert_props(S, Props),
+ VQ1.
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/channel_interceptor_SUITE.erl b/deps/rabbit/test/channel_interceptor_SUITE.erl
new file mode 100644
index 0000000000..e0a8050598
--- /dev/null
+++ b/deps/rabbit/test/channel_interceptor_SUITE.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(channel_interceptor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ register_interceptor,
+ register_failing_interceptors
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+register_interceptor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, register_interceptor1, [Config, dummy_interceptor]).
+
+register_interceptor1(Config, Interceptor) ->
+ PredefinedChannels = rabbit_channel:list(),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ QName = <<"register_interceptor-q">>,
+ amqp_channel:call(Ch1, #'queue.declare'{queue = QName}),
+
+ [ChannelProc] = rabbit_channel:list() -- PredefinedChannels,
+
+ [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>),
+
+ ok = rabbit_registry:register(channel_interceptor,
+ <<"dummy interceptor">>,
+ Interceptor),
+ [{interceptors, [{Interceptor, undefined}]}] =
+ rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"">>),
+
+ ok = rabbit_registry:unregister(channel_interceptor,
+ <<"dummy interceptor">>),
+ [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>),
+ passed.
+
+register_failing_interceptors(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, register_interceptor1, [Config, failing_dummy_interceptor]).
+
+check_send_receive(Ch1, QName, Send, Receive) ->
+ amqp_channel:call(Ch1,
+ #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Send}),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Receive}} =
+ amqp_channel:call(Ch1, #'basic.get'{queue = QName,
+ no_ack = true}).
diff --git a/deps/rabbit/test/channel_operation_timeout_SUITE.erl b/deps/rabbit/test/channel_operation_timeout_SUITE.erl
new file mode 100644
index 0000000000..15e0188604
--- /dev/null
+++ b/deps/rabbit/test/channel_operation_timeout_SUITE.erl
@@ -0,0 +1,198 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(channel_operation_timeout_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("amqqueue.hrl").
+
+-compile([export_all]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [cluster_ab]).
+-define(DEFAULT_VHOST, <<"/">>).
+-define(QRESOURCE(Q), rabbit_misc:r(?DEFAULT_VHOST, queue, Q)).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+-define(DELAY, 25).
+
+all() ->
+ [
+ notify_down_all
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 2,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+notify_down_all(Config) ->
+ Rabbit = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ RabbitCh = rabbit_ct_client_helpers:open_channel(Config, 0),
+ HareCh = rabbit_ct_client_helpers:open_channel(Config, 1),
+
+ ct:pal("one"),
+ %% success
+ set_channel_operation_timeout_config(Config, 1000),
+ configure_bq(Config),
+ QCfg0 = qconfig(RabbitCh, <<"q0">>, <<"ex0">>, true, false),
+ declare(QCfg0),
+ ct:pal("two"),
+ %% Testing rabbit_amqqueue:notify_down_all via rabbit_channel.
+ %% Consumer count = 0 after correct channel termination and
+ %% notification of queues via delegate:call/3
+ true = (0 =/= length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST))),
+ rabbit_ct_client_helpers:close_channel(RabbitCh),
+ 0 = length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST)),
+ false = is_process_alive(RabbitCh),
+ ct:pal("three"),
+
+ %% fail
+ set_channel_operation_timeout_config(Config, 10),
+ QCfg2 = qconfig(HareCh, <<"q1">>, <<"ex1">>, true, false),
+ declare(QCfg2),
+ publish(QCfg2, ?TIMEOUT_TEST_MSG),
+ timer:sleep(?DELAY),
+ rabbit_ct_client_helpers:close_channel(HareCh),
+ timer:sleep(?DELAY),
+ false = is_process_alive(HareCh),
+
+ pass.
+
+%% -------------------------
+%% Internal helper functions
+%% -------------------------
+
+set_channel_operation_timeout_config(Config, Timeout) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env, [rabbit, channel_operation_timeout, Timeout])],
+ ok.
+
+set_channel_operation_backing_queue(Config) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env,
+ [rabbit, backing_queue_module, channel_operation_timeout_test_queue])],
+ ok.
+
+re_enable_priority_queue(Config) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ rabbit_priority_queue, enable, [])],
+ ok.
+
+declare(QCfg) ->
+ QDeclare = #'queue.declare'{queue = Q = pget(name, QCfg), durable = true},
+ #'queue.declare_ok'{} = amqp_channel:call(Ch = pget(ch, QCfg), QDeclare),
+
+ ExDeclare = #'exchange.declare'{exchange = Ex = pget(ex, QCfg)},
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, ExDeclare),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = Ex,
+ routing_key = Q}),
+ maybe_subscribe(QCfg).
+
+maybe_subscribe(QCfg) ->
+ case pget(consume, QCfg) of
+ true ->
+ Sub = #'basic.consume'{queue = pget(name, QCfg)},
+ Ch = pget(ch, QCfg),
+ Del = pget(deliver, QCfg),
+ amqp_channel:subscribe(Ch, Sub,
+ spawn(fun() -> consume(Ch, Del) end));
+ _ -> ok
+ end.
+
+consume(_Ch, false) -> receive_nothing();
+consume(Ch, Deliver = true) ->
+ receive
+ {#'basic.deliver'{}, _Msg} ->
+ consume(Ch, Deliver)
+ end.
+
+publish(QCfg, Msg) ->
+ Publish = #'basic.publish'{exchange = pget(ex, QCfg),
+ routing_key = pget(name, QCfg)},
+ amqp_channel:call(pget(ch, QCfg), Publish,
+ #amqp_msg{payload = Msg}).
+
+get_consumers(Config, Node, VHost) when is_atom(Node),
+ is_binary(VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_amqqueue, consumers_all, [VHost]).
+
+get_amqqueue(QName0, []) ->
+ throw({not_found, QName0});
+get_amqqueue(QName0, [Q | Rem]) when ?is_amqqueue(Q) ->
+ QName1 = amqqueue:get_name(Q),
+ compare_amqqueue(QName0, QName1, Q, Rem).
+
+compare_amqqueue(QName, QName, Q, _Rem) ->
+ Q;
+compare_amqqueue(QName, _, _, Rem) ->
+ get_amqqueue(QName, Rem).
+
+qconfig(Ch, Name, Ex, Consume, Deliver) ->
+ [{ch, Ch}, {name, Name}, {ex,Ex}, {consume, Consume}, {deliver, Deliver}].
+
+receive_nothing() ->
+ receive
+ after infinity -> void
+ end.
+
+unhandled_req(Fun) ->
+ try
+ Fun()
+ catch
+ exit:{{shutdown,{_, ?NOT_FOUND, _}}, _} -> ok;
+ _:Reason -> {error, Reason}
+ end.
+
+configure_bq(Config) ->
+ ok = set_channel_operation_backing_queue(Config),
+ ok = re_enable_priority_queue(Config),
+ ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+ ?MODULE).
diff --git a/deps/rabbit/test/channel_operation_timeout_test_queue.erl b/deps/rabbit/test/channel_operation_timeout_test_queue.erl
new file mode 100644
index 0000000000..3190dad7a8
--- /dev/null
+++ b/deps/rabbit/test/channel_operation_timeout_test_queue.erl
@@ -0,0 +1,323 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(channel_operation_timeout_test_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ start/2, stop/1, zip_msgs_and_acks/4, handle_info/2]).
+
+%%----------------------------------------------------------------------------
+%% This test backing queue follows the variable queue implementation, with
+%% the exception that it will introduce infinite delays on some operations if
+%% the test message has been published, and is awaiting acknowledgement in the
+%% queue index. Test message is "timeout_test_msg!".
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+ { q1,
+ q2,
+ delta,
+ q3,
+ q4,
+ next_seq_id,
+ ram_pending_ack, %% msgs using store, still in RAM
+ disk_pending_ack, %% msgs in store, paged out
+ qi_pending_ack, %% msgs using qi, *can't* be paged out
+ index_state,
+ msg_store_clients,
+ durable,
+ transient_threshold,
+ qi_embed_msgs_below,
+
+ len, %% w/o unacked
+ bytes, %% w/o unacked
+ unacked_bytes,
+ persistent_count, %% w unacked
+ persistent_bytes, %% w unacked
+ delta_transient_bytes, %%
+
+ target_ram_count,
+ ram_msg_count, %% w/o unacked
+ ram_msg_count_prev,
+ ram_ack_count_prev,
+ ram_bytes, %% w unacked
+ out_counter,
+ in_counter,
+ rates,
+ msgs_on_disk,
+ msg_indices_on_disk,
+ unconfirmed,
+ confirmed,
+ ack_out_counter,
+ ack_in_counter,
+ %% Unlike the other counters these two do not feed into
+ %% #rates{} and get reset
+ disk_read_count,
+ disk_write_count,
+
+ io_batch_size,
+
+ %% default queue or lazy queue
+ mode,
+ %% number of reduce_memory_usage executions, once it
+ %% reaches a threshold the queue will manually trigger a runtime GC
+ %% see: maybe_execute_gc/1
+ memory_reduction_run_count,
+ %% Queue data is grouped by VHost. We need to store it
+ %% to work with queue index.
+ virtual_host,
+ waiting_bump = false
+ }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+ { seq_id,
+ msg_id,
+ msg,
+ is_persistent,
+ is_delivered,
+ msg_in_store,
+ index_on_disk,
+ persist_to,
+ msg_props
+ }).
+
+-record(delta,
+ { start_seq_id, %% start_seq_id is inclusive
+ count,
+ transient,
+ end_seq_id %% end_seq_id is exclusive
+ }).
+
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-define(QUEUE, lqueue).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+
+%%----------------------------------------------------------------------------
+
+-type seq_id() :: non_neg_integer().
+
+-type rates() :: #rates { in :: float(),
+ out :: float(),
+ ack_in :: float(),
+ ack_out :: float(),
+ timestamp :: rabbit_types:timestamp()}.
+
+-type delta() :: #delta { start_seq_id :: non_neg_integer(),
+ count :: non_neg_integer(),
+ end_seq_id :: non_neg_integer() }.
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type ack() :: seq_id().
+-type state() :: #vqstate {
+ q1 :: ?QUEUE:?QUEUE(),
+ q2 :: ?QUEUE:?QUEUE(),
+ delta :: delta(),
+ q3 :: ?QUEUE:?QUEUE(),
+ q4 :: ?QUEUE:?QUEUE(),
+ next_seq_id :: seq_id(),
+ ram_pending_ack :: gb_trees:tree(),
+ disk_pending_ack :: gb_trees:tree(),
+ qi_pending_ack :: gb_trees:tree(),
+ index_state :: any(),
+ msg_store_clients :: 'undefined' | {{any(), binary()},
+ {any(), binary()}},
+ durable :: boolean(),
+ transient_threshold :: non_neg_integer(),
+ qi_embed_msgs_below :: non_neg_integer(),
+
+ len :: non_neg_integer(),
+ bytes :: non_neg_integer(),
+ unacked_bytes :: non_neg_integer(),
+
+ persistent_count :: non_neg_integer(),
+ persistent_bytes :: non_neg_integer(),
+
+ target_ram_count :: non_neg_integer() | 'infinity',
+ ram_msg_count :: non_neg_integer(),
+ ram_msg_count_prev :: non_neg_integer(),
+ ram_ack_count_prev :: non_neg_integer(),
+ ram_bytes :: non_neg_integer(),
+ out_counter :: non_neg_integer(),
+ in_counter :: non_neg_integer(),
+ rates :: rates(),
+ msgs_on_disk :: gb_sets:set(),
+ msg_indices_on_disk :: gb_sets:set(),
+ unconfirmed :: gb_sets:set(),
+ confirmed :: gb_sets:set(),
+ ack_out_counter :: non_neg_integer(),
+ ack_in_counter :: non_neg_integer(),
+ disk_read_count :: non_neg_integer(),
+ disk_write_count :: non_neg_integer(),
+
+ io_batch_size :: pos_integer(),
+ mode :: 'default' | 'lazy',
+ virtual_host :: rabbit_types:vhost() }.
+%% Duplicated from rabbit_backing_queue
+-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(VHost, DurableQueues) ->
+ rabbit_variable_queue:start(VHost, DurableQueues).
+
+stop(VHost) ->
+ rabbit_variable_queue:stop(VHost).
+
+init(Queue, Recover, Callback) ->
+ rabbit_variable_queue:init(Queue, Recover, Callback).
+
+terminate(Reason, State) ->
+ rabbit_variable_queue:terminate(Reason, State).
+
+delete_and_terminate(Reason, State) ->
+ rabbit_variable_queue:delete_and_terminate(Reason, State).
+
+delete_crashed(Q) ->
+ rabbit_variable_queue:delete_crashed(Q).
+
+purge(State = #vqstate { qi_pending_ack= QPA }) ->
+ maybe_delay(QPA),
+ rabbit_variable_queue:purge(State).
+
+purge_acks(State) ->
+ rabbit_variable_queue:purge_acks(State).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+ rabbit_variable_queue:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+ rabbit_variable_queue:batch_publish(Publishes, ChPid, Flow, State).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+ rabbit_variable_queue:publish_delivered(Msg, MsgProps, ChPid, Flow, State).
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+ rabbit_variable_queue:batch_publish_delivered(Publishes, ChPid, Flow, State).
+
+discard(_MsgId, _ChPid, _Flow, State) -> State.
+
+drain_confirmed(State) ->
+ rabbit_variable_queue:drain_confirmed(State).
+
+dropwhile(Pred, State) ->
+ rabbit_variable_queue:dropwhile(Pred, State).
+
+fetchwhile(Pred, Fun, Acc, State) ->
+ rabbit_variable_queue:fetchwhile(Pred, Fun, Acc, State).
+
+fetch(AckRequired, State) ->
+ rabbit_variable_queue:fetch(AckRequired, State).
+
+drop(AckRequired, State) ->
+ rabbit_variable_queue:drop(AckRequired, State).
+
+ack(List, State) ->
+ rabbit_variable_queue:ack(List, State).
+
+requeue(AckTags, #vqstate { qi_pending_ack = QPA } = State) ->
+ maybe_delay(QPA),
+ rabbit_variable_queue:requeue(AckTags, State).
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+ rabbit_variable_queue:ackfold(MsgFun, Acc, State, AckTags).
+
+fold(Fun, Acc, State) ->
+ rabbit_variable_queue:fold(Fun, Acc, State).
+
+len(#vqstate { qi_pending_ack = QPA } = State) ->
+ maybe_delay(QPA),
+ rabbit_variable_queue:len(State).
+
+is_empty(State) -> 0 == len(State).
+
+depth(State) ->
+ rabbit_variable_queue:depth(State).
+
+set_ram_duration_target(DurationTarget, State) ->
+ rabbit_variable_queue:set_ram_duration_target(DurationTarget, State).
+
+ram_duration(State) ->
+ rabbit_variable_queue:ram_duration(State).
+
+needs_timeout(State) ->
+ rabbit_variable_queue:needs_timeout(State).
+
+timeout(State) ->
+ rabbit_variable_queue:timeout(State).
+
+handle_pre_hibernate(State) ->
+ rabbit_variable_queue:handle_pre_hibernate(State).
+
+handle_info(Msg, State) ->
+ rabbit_variable_queue:handle_info(Msg, State).
+
+resume(State) -> rabbit_variable_queue:resume(State).
+
+msg_rates(State) ->
+ rabbit_variable_queue:msg_rates(State).
+
+info(Info, State) ->
+ rabbit_variable_queue:info(Info, State).
+
+invoke(Module, Fun, State) -> rabbit_variable_queue:invoke(Module, Fun, State).
+
+is_duplicate(Msg, State) -> rabbit_variable_queue:is_duplicate(Msg, State).
+
+set_queue_mode(Mode, State) ->
+ rabbit_variable_queue:set_queue_mode(Mode, State).
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, State) ->
+ rabbit_variable_queue:zip_msgs_and_acks(Msgs, AckTags, Accumulator, State).
+
+%% Delay
+maybe_delay(QPA) ->
+ case is_timeout_test(gb_trees:values(QPA)) of
+ true -> receive
+ %% The queue received an EXIT message, it's probably the
+ %% node being stopped with "rabbitmqctl stop". Thus, abort
+ %% the wait and requeue the EXIT message.
+ {'EXIT', _, shutdown} = ExitMsg -> self() ! ExitMsg,
+ void
+ after infinity -> void
+ end;
+ _ -> void
+ end.
+
+is_timeout_test([]) -> false;
+is_timeout_test([#msg_status{
+ msg = #basic_message{
+ content = #content{
+ payload_fragments_rev = PFR}}}|Rem]) ->
+ case lists:member(?TIMEOUT_TEST_MSG, PFR) of
+ T = true -> T;
+ _ -> is_timeout_test(Rem)
+ end;
+is_timeout_test([_|Rem]) -> is_timeout_test(Rem).
diff --git a/deps/rabbit/test/cluster_SUITE.erl b/deps/rabbit/test/cluster_SUITE.erl
new file mode 100644
index 0000000000..9df196a8ed
--- /dev/null
+++ b/deps/rabbit/test/cluster_SUITE.erl
@@ -0,0 +1,307 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(cluster_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("include/amqqueue.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+-define(CLUSTER_TESTCASES, [
+ delegates_async,
+ delegates_sync,
+ queue_cleanup,
+ declare_on_dead_queue
+ ]).
+
+all() ->
+ [
+ {group, cluster_tests}
+ ].
+
+groups() ->
+ [
+ {cluster_tests, [], [
+ {from_cluster_node1, [], ?CLUSTER_TESTCASES},
+ {from_cluster_node2, [], ?CLUSTER_TESTCASES}
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun(C) -> init_per_group1(Group, C) end
+ ]);
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [
+ fun(C) -> init_per_group1(Group, C) end
+ ])
+ end.
+
+init_per_group1(from_cluster_node1, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
+init_per_group1(from_cluster_node2, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
+init_per_group1(_, Config) ->
+ Config.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Cluster-dependent tests.
+%% ---------------------------------------------------------------------------
+
+delegates_async(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, delegates_async1, [Config, To]).
+
+delegates_async1(_Config, SecondaryNode) ->
+ Self = self(),
+ Sender = fun (Pid) -> Pid ! {invoked, Self} end,
+
+ Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
+
+ ok = delegate:invoke_no_result(spawn(Responder), Sender),
+ ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
+ await_response(2),
+
+ passed.
+
+delegates_sync(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, delegates_sync1, [Config, To]).
+
+delegates_sync1(_Config, SecondaryNode) ->
+ Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
+ BadSender = fun (_Pid) -> exit(exception) end,
+
+ Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end),
+
+ BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end, bad_responder_died),
+
+ response = delegate:invoke(spawn(Responder), Sender),
+ response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
+
+ must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
+ must_exit(fun () ->
+ delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
+
+ LocalGoodPids = spawn_responders(node(), Responder, 2),
+ RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
+ LocalBadPids = spawn_responders(node(), BadResponder, 2),
+ RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
+
+ {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
+ true = lists:all(fun ({_, response}) -> true end, GoodRes),
+ GoodResPids = [Pid || {Pid, _} <- GoodRes],
+
+ Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
+ Good = lists:usort(GoodResPids),
+
+ {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
+ true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
+ BadResPids = [Pid || {Pid, _} <- BadRes],
+
+ Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
+ Bad = lists:usort(BadResPids),
+
+ MagicalPids = [rabbit_misc:string_to_pid(Str) ||
+ Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
+ {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
+ true = lists:all(
+ fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
+ BadNodes),
+ BadNodesPids = [Pid || {Pid, _} <- BadNodes],
+
+ Magical = lists:usort(MagicalPids),
+ Magical = lists:usort(BadNodesPids),
+
+ passed.
+
+queue_cleanup(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, queue_cleanup1, [Config, To]).
+
+queue_cleanup1(_Config, _SecondaryNode) ->
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
+ receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ rabbit_channel:shutdown(Ch),
+ rabbit:stop(),
+ rabbit:start(),
+ {_Writer2, Ch2} = test_spawn(),
+ rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
+ queue = ?CLEANUP_QUEUE_NAME }),
+ receive
+ #'channel.close'{reply_code = ?NOT_FOUND} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
+ end,
+ rabbit_channel:shutdown(Ch2),
+ passed.
+
+declare_on_dead_queue(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, declare_on_dead_queue1, [Config, To]).
+
+declare_on_dead_queue1(_Config, SecondaryNode) ->
+ QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
+ Self = self(),
+ Pid = spawn(SecondaryNode,
+ fun () ->
+ {new, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none, <<"acting-user">>),
+ QueueName = ?amqqueue_field_name(Q),
+ QPid = ?amqqueue_field_pid(Q),
+ exit(QPid, kill),
+ Self ! {self(), killed, QPid}
+ end),
+ receive
+ {Pid, killed, OldPid} ->
+ Q = dead_queue_loop(QueueName, OldPid),
+ {ok, 0} = rabbit_amqqueue:delete(Q, false, false, <<"acting-user">>),
+ passed
+ after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
+ end.
+
+
+make_responder(FMsg) -> make_responder(FMsg, timeout).
+make_responder(FMsg, Throw) ->
+ fun () ->
+ receive Msg -> FMsg(Msg)
+ after ?TIMEOUT -> throw(Throw)
+ end
+ end.
+
+spawn_responders(Node, Responder, Count) ->
+ [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
+
+await_response(0) ->
+ ok;
+await_response(Count) ->
+ receive
+ response -> ok,
+ await_response(Count - 1)
+ after ?TIMEOUT -> throw(timeout)
+ end.
+
+must_exit(Fun) ->
+ try
+ Fun(),
+ throw(exit_not_thrown)
+ catch
+ exit:_ -> ok
+ end.
+
+dead_queue_loop(QueueName, OldPid) ->
+ {existing, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none, <<"acting-user">>),
+ QPid = ?amqqueue_field_pid(Q),
+ case QPid of
+ OldPid -> timer:sleep(25),
+ dead_queue_loop(QueueName, OldPid);
+ _ -> true = rabbit_misc:is_process_alive(QPid),
+ Q
+ end.
+
+
+test_spawn() ->
+ {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ {Writer, Ch}.
+
+test_spawn(Node) ->
+ rpc:call(Node, ?MODULE, test_spawn_remote, []).
+
+%% Spawn an arbitrary long lived process, so we don't end up linking
+%% the channel to the short-lived process (RPC, here) spun up by the
+%% RPC server.
+test_spawn_remote() ->
+ RPC = self(),
+ spawn(fun () ->
+ {Writer, Ch} = test_spawn(),
+ RPC ! {Writer, Ch},
+ link(Ch),
+ receive
+ _ -> ok
+ end
+ end),
+ receive Res -> Res
+ after ?TIMEOUT -> throw(failed_to_receive_result)
+ end.
+
+queue_name(Config, Name) ->
+ Name1 = iolist_to_binary(rabbit_ct_helpers:config_to_testcase_name(Config, Name)),
+ queue_name(Name1).
+
+queue_name(Name) ->
+ rabbit_misc:r(<<"/">>, queue, Name).
diff --git a/deps/rabbit/test/cluster_rename_SUITE.erl b/deps/rabbit/test/cluster_rename_SUITE.erl
new file mode 100644
index 0000000000..cdf02c9643
--- /dev/null
+++ b/deps/rabbit/test/cluster_rename_SUITE.erl
@@ -0,0 +1,301 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(cluster_rename_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ % XXX post_change_nodename,
+ abortive_rename,
+ rename_fail,
+ rename_twice_fail
+ ]},
+ {cluster_size_3, [], [
+ rename_cluster_one_by_one,
+ rename_cluster_big_bang,
+ partial_one_by_one,
+ partial_big_bang
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 15}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2} %% Replaced with a list of node names later.
+ ]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3} %% Replaced with a list of node names later.
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Nodenames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, ClusterSize)
+ ],
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, Nodenames},
+ {rmq_nodes_clustered, true}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = case rabbit_ct_helpers:get_config(Config, save_config) of
+ undefined -> Config;
+ C -> C
+ end,
+ Config2 = rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config2, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% Rolling rename of a cluster, each node should do a secondary rename.
+rename_cluster_one_by_one(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ Config1 = stop_rename_start(Config, Node1, [Node1, jessica]),
+ Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+ Config3 = stop_rename_start(Config2, Node3, [Node3, flopsy]),
+
+ [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ consume_all(Config3,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config3}.
+
+%% Big bang rename of a cluster, Node1 should do a primary rename.
+rename_cluster_big_bang(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+ Map = [Node1, jessica, Node2, hazel, Node3, flopsy],
+ Config1 = rename_node(Config, Node1, Map),
+ Config2 = rename_node(Config1, Node2, Map),
+ Config3 = rename_node(Config2, Node3, Map),
+
+ [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Jessica),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Hazel),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Flopsy),
+
+ consume_all(Config3,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config3}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_one_by_one(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ Config1 = stop_rename_start(Config, Node1, [Node1, jessica]),
+ Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+
+ [Jessica, Hazel, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+ Config2, nodename),
+ consume_all(Config2,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Node3, <<"3">>}]),
+ {save_config, Config2}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_big_bang(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+ Map = [Node2, hazel, Node3, flopsy],
+ Config1 = rename_node(Config, Node2, Map),
+ Config2 = rename_node(Config1, Node3, Map),
+
+ [Node1, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(Config2,
+ nodename),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Node1),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Hazel),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Flopsy),
+
+ consume_all(Config2,
+ [{Node1, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config2}.
+
+% XXX %% We should be able to specify the -n parameter on ctl with either
+% XXX %% the before or after name for the local node (since in real cases
+% XXX %% one might want to invoke the command before or after the hostname
+% XXX %% has changed) - usually we test before so here we test after.
+% XXX post_change_nodename([Node1, _Bigwig]) ->
+% XXX publish(Node1, <<"Node1">>),
+% XXX
+% XXX Bugs1 = rabbit_test_configs:stop_node(Node1),
+% XXX Bugs2 = [{nodename, jessica} | proplists:delete(nodename, Bugs1)],
+% XXX Jessica0 = rename_node(Bugs2, jessica, [Node1, jessica]),
+% XXX Jessica = rabbit_test_configs:start_node(Jessica0),
+% XXX
+% XXX consume(Jessica, <<"Node1">>),
+% XXX stop_all([Jessica]),
+% XXX ok.
+
+%% If we invoke rename but the node name does not actually change, we
+%% should roll back.
+abortive_rename(Config) ->
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ publish(Config, Node1, <<"Node1">>),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ _Config1 = rename_node(Config, Node1, [Node1, jessica]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node1),
+
+ consume(Config, Node1, <<"Node1">>),
+ ok.
+
+%% And test some ways the command can fail.
+rename_fail(Config) ->
+ [Node1, Node2] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ %% Rename from a node that does not exist
+ ok = rename_node_fail(Config, Node1, [bugzilla, jessica]),
+ %% Rename to a node which does
+ ok = rename_node_fail(Config, Node1, [Node1, Node2]),
+ %% Rename two nodes to the same thing
+ ok = rename_node_fail(Config, Node1, [Node1, jessica, Node2, jessica]),
+ %% Rename while impersonating a node not in the cluster
+ Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Node1,
+ {nodename, 'rabbit@localhost'}),
+ ok = rename_node_fail(Config1, Node1, [Node1, jessica]),
+ ok.
+
+rename_twice_fail(Config) ->
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ Config1 = rename_node(Config, Node1, [Node1, indecisive]),
+ ok = rename_node_fail(Config, Node1, [indecisive, jessica]),
+ {save_config, Config1}.
+
+%% ----------------------------------------------------------------------------
+
+stop_rename_start(Config, Nodename, Map) ->
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Nodename),
+ Config1 = rename_node(Config, Nodename, Map),
+ ok = rabbit_ct_broker_helpers:start_node(Config1, Nodename),
+ Config1.
+
+rename_node(Config, Nodename, Map) ->
+ {ok, Config1} = do_rename_node(Config, Nodename, Map),
+ Config1.
+
+rename_node_fail(Config, Nodename, Map) ->
+ {error, _, _} = do_rename_node(Config, Nodename, Map),
+ ok.
+
+do_rename_node(Config, Nodename, Map) ->
+ Map1 = [
+ begin
+ NStr = atom_to_list(N),
+ case lists:member($@, NStr) of
+ true -> N;
+ false -> rabbit_nodes:make({NStr, "localhost"})
+ end
+ end
+ || N <- Map
+ ],
+ Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, Nodename,
+ ["rename_cluster_node" | Map1], 120000),
+ case Ret of
+ {ok, _} ->
+ Config1 = update_config_after_rename(Config, Map1),
+ {ok, Config1};
+ {error, _, _} = Error ->
+ Error
+ end.
+
+update_config_after_rename(Config, [Old, New | Rest]) ->
+ Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Old,
+ {nodename, New}),
+ update_config_after_rename(Config1, Rest);
+update_config_after_rename(Config, []) ->
+ Config.
+
+publish(Config, Node, Q) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Q}),
+ amqp_channel:wait_for_confirms(Ch),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+consume(Config, Node, Q) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Q}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+
+publish_all(Config, Nodes) ->
+ [publish(Config, Node, Key) || {Node, Key} <- Nodes].
+
+consume_all(Config, Nodes) ->
+ [consume(Config, Node, Key) || {Node, Key} <- Nodes].
+
+set_node(Nodename, Cfg) ->
+ [{nodename, Nodename} | proplists:delete(nodename, Cfg)].
diff --git a/deps/rabbit/test/clustering_management_SUITE.erl b/deps/rabbit/test/clustering_management_SUITE.erl
new file mode 100644
index 0000000000..550a30b511
--- /dev/null
+++ b/deps/rabbit/test/clustering_management_SUITE.erl
@@ -0,0 +1,861 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(clustering_management_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+ [
+ {group, unclustered_2_nodes},
+ {group, unclustered_3_nodes},
+ {group, clustered_2_nodes},
+ {group, clustered_4_nodes}
+ ].
+
+groups() ->
+ [
+ {unclustered_2_nodes, [], [
+ {cluster_size_2, [], [
+ classic_config_discovery_node_list
+ ]}
+ ]},
+ {unclustered_3_nodes, [], [
+ {cluster_size_3, [], [
+ join_and_part_cluster,
+ join_cluster_bad_operations,
+ join_to_start_interval,
+ forget_cluster_node,
+ change_cluster_node_type,
+ change_cluster_when_node_offline,
+ update_cluster_nodes,
+ force_reset_node
+ ]}
+ ]},
+ {clustered_2_nodes, [], [
+ {cluster_size_2, [], [
+ forget_removes_things,
+ reset_removes_things,
+ forget_offline_removes_things,
+ force_boot,
+ status_with_alarm,
+ pid_file_and_await_node_startup,
+ await_running_count,
+ start_with_invalid_schema_in_path,
+ persistent_cluster_id
+ ]}
+ ]},
+ {clustered_4_nodes, [], [
+ {cluster_size_4, [], [
+ forget_promotes_offline_follower
+ ]}
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 15}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:merge_app_env(
+ Config, {rabbit, [
+ {mnesia_table_loading_retry_limit, 2},
+ {mnesia_table_loading_retry_timeout,1000}
+ ]}),
+ rabbit_ct_helpers:run_setup_steps(Config1).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered_2_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(unclustered_3_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered_2_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(clustered_4_nodes, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_4, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 4}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}},
+ {keep_pid_file_on_exit, true}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+start_with_invalid_schema_in_path(Config) ->
+ [Rabbit, Hare] = cluster_members(Config),
+ stop_app(Rabbit),
+ stop_app(Hare),
+
+ create_bad_schema(Rabbit, Hare, Config),
+
+ start_app(Hare),
+ case start_app(Rabbit) of
+ ok -> ok;
+ ErrRabbit -> error({unable_to_start_with_bad_schema_in_work_dir, ErrRabbit})
+ end.
+
+persistent_cluster_id(Config) ->
+ case os:getenv("SECONDARY_UMBRELLA") of
+ false ->
+ [Rabbit, Hare] = cluster_members(Config),
+ ClusterIDA1 = rpc:call(Rabbit, rabbit_nodes, persistent_cluster_id, []),
+ ClusterIDB1 = rpc:call(Hare, rabbit_nodes, persistent_cluster_id, []),
+ ?assertEqual(ClusterIDA1, ClusterIDB1),
+
+ rabbit_ct_broker_helpers:restart_node(Config, Rabbit),
+ ClusterIDA2 = rpc:call(Rabbit, rabbit_nodes, persistent_cluster_id, []),
+ rabbit_ct_broker_helpers:restart_node(Config, Hare),
+ ClusterIDB2 = rpc:call(Hare, rabbit_nodes, persistent_cluster_id, []),
+ ?assertEqual(ClusterIDA1, ClusterIDA2),
+ ?assertEqual(ClusterIDA2, ClusterIDB2);
+ _ ->
+ %% skip the test in mixed version mode
+ {skip, "Should not run in mixed version environments"}
+ end.
+
+create_bad_schema(Rabbit, Hare, Config) ->
+ {ok, RabbitMnesiaDir} = rpc:call(Rabbit, application, get_env, [mnesia, dir]),
+ {ok, HareMnesiaDir} = rpc:call(Hare, application, get_env, [mnesia, dir]),
+ %% Make sure we don't use the current dir:
+ PrivDir = ?config(priv_dir, Config),
+ ct:pal("Priv dir ~p~n", [PrivDir]),
+ ok = filelib:ensure_dir(filename:join(PrivDir, "file")),
+
+ ok = rpc:call(Rabbit, file, set_cwd, [PrivDir]),
+ ok = rpc:call(Hare, file, set_cwd, [PrivDir]),
+
+ ok = rpc:call(Rabbit, application, unset_env, [mnesia, dir]),
+ ok = rpc:call(Hare, application, unset_env, [mnesia, dir]),
+ ok = rpc:call(Rabbit, mnesia, create_schema, [[Rabbit, Hare]]),
+ ok = rpc:call(Rabbit, mnesia, start, []),
+ {atomic,ok} = rpc:call(Rabbit, mnesia, create_table,
+ [rabbit_queue, [{ram_copies, [Rabbit, Hare]}]]),
+ stopped = rpc:call(Rabbit, mnesia, stop, []),
+ ok = rpc:call(Rabbit, application, set_env, [mnesia, dir, RabbitMnesiaDir]),
+ ok = rpc:call(Hare, application, set_env, [mnesia, dir, HareMnesiaDir]).
+
+join_and_part_cluster(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+ assert_not_clustered(Rabbit),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny),
+
+ stop_join_start(Rabbit, Bunny),
+ assert_clustered([Rabbit, Bunny]),
+
+ stop_join_start(Hare, Bunny, true),
+ assert_cluster_status(
+ {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]},
+ [Rabbit, Hare, Bunny]),
+
+ %% Allow clustering with already clustered node
+ ok = stop_app(Rabbit),
+ {ok, <<"The node is already a member of this cluster">>} =
+ join_cluster(Rabbit, Hare),
+ ok = start_app(Rabbit),
+
+ stop_reset_start(Rabbit),
+ assert_not_clustered(Rabbit),
+ assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]},
+ [Hare, Bunny]),
+
+ stop_reset_start(Hare),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny).
+
+join_cluster_bad_operations(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ UsePrelaunch = rabbit_ct_broker_helpers:rpc(
+ Config, Hare,
+ erlang, function_exported,
+ [rabbit_prelaunch, get_context, 0]),
+
+ %% Nonexistent node
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, non@existent) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster with mnesia running
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster the node with itself
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Do not let the node leave the cluster or reset if it's the only
+ %% ram node
+ stop_join_start(Hare, Rabbit, true),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_failure(fun () -> reset(Rabbit) end),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% Cannot start RAM-only node first
+ ok = stop_app(Rabbit),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> start_app(Hare) end),
+ ok = start_app(Rabbit),
+ case UsePrelaunch of
+ true ->
+ ok = start_app(Hare);
+ false ->
+ %% The Erlang VM has stopped after previous rabbit app failure
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare)
+ end,
+ ok.
+
+%% This tests that the nodes in the cluster are notified immediately of a node
+%% join, and not just after the app is started.
+join_to_start_interval(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+forget_cluster_node(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Trying to remove a node not in the cluster should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ %% Trying to remove an online node should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ ok = stop_app(Rabbit),
+ %% We're passing the --offline flag, but Hare is online
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end),
+ %% Removing some nonexistent node will fail
+ assert_failure(fun () -> forget_cluster_node(Hare, non@existent) end),
+ ok = forget_cluster_node(Hare, Rabbit),
+ assert_not_clustered(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit]),
+
+ %% Now we can't start Rabbit since it thinks that it's still in the cluster
+ %% with Hare, while Hare disagrees.
+ assert_failure(fun () -> start_app(Rabbit) end),
+
+ ok = reset(Rabbit),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Now we remove Rabbit from an offline node.
+ stop_join_start(Bunny, Hare),
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+ ok = stop_app(Hare),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ %% This is fine but we need the flag
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end),
+ %% Also fails because hare node is still running
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end),
+ %% But this works
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["forget_cluster_node", "--offline", Bunny]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+ ok = start_app(Rabbit),
+ %% Bunny still thinks its clustered with Rabbit and Hare
+ assert_failure(fun () -> start_app(Bunny) end),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_clustered([Rabbit, Hare]).
+
+forget_removes_things(Config) ->
+ test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(H, R) end).
+
+reset_removes_things(Config) ->
+ test_removes_things(Config, fun (R, _H) -> ok = reset(R) end).
+
+test_removes_things(Config, LoseRabbit) ->
+ Unmirrored = <<"unmirrored-queue">>,
+ [Rabbit, Hare] = cluster_members(Config),
+ RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ declare(RCh, Unmirrored),
+ ok = stop_app(Rabbit),
+
+ HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch declare(HCh, Unmirrored)),
+
+ ok = LoseRabbit(Rabbit, Hare),
+ HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare(HCh2, Unmirrored),
+ ok.
+
+forget_offline_removes_things(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Unmirrored = <<"unmirrored-queue">>,
+ X = <<"X">>,
+ RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ declare(RCh, Unmirrored),
+
+ amqp_channel:call(RCh, #'exchange.declare'{durable = true,
+ exchange = X,
+ auto_delete = true}),
+ amqp_channel:call(RCh, #'queue.bind'{queue = Unmirrored,
+ exchange = X}),
+ ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit),
+
+ HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch declare(HCh, Unmirrored)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["forget_cluster_node", "--offline", Rabbit]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+
+ HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare(HCh2, Unmirrored),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch amqp_channel:call(HCh2,#'exchange.declare'{durable = true,
+ exchange = X,
+ auto_delete = true,
+ passive = true})),
+ ok.
+
+forget_promotes_offline_follower(Config) ->
+ [A, B, C, D] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"mirrored-queue">>,
+ declare(ACh, QName),
+ set_ha_policy(Config, QName, A, [B, C]),
+ set_ha_policy(Config, QName, A, [C, D]), %% Test add and remove from recoverable_mirrors
+
+ %% Publish and confirm
+ amqp_channel:call(ACh, #'confirm.select'{}),
+ amqp_channel:cast(ACh, #'basic.publish'{routing_key = QName},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+ amqp_channel:wait_for_confirms(ACh),
+
+ %% We kill nodes rather than stop them in order to make sure
+ %% that we aren't dependent on anything that happens as they shut
+ %% down (see bug 26467).
+ ok = rabbit_ct_broker_helpers:kill_node(Config, D),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, C),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, B),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+ ["force_boot"]),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+
+ %% We should now have the following dramatis personae:
+ %% A - down, master
+ %% B - down, used to be mirror, no longer is, never had the message
+ %% C - running, should be mirror, but has wiped the message on restart
+ %% D - down, recoverable mirror, contains message
+ %%
+ %% So forgetting A should offline-promote the queue to D, keeping
+ %% the message.
+
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+ ["forget_cluster_node", A]),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, D),
+ DCh2 = rabbit_ct_client_helpers:open_channel(Config, D),
+ #'queue.declare_ok'{message_count = 1} = declare(DCh2, QName),
+ ok.
+
+set_ha_policy(Config, QName, Master, Slaves) ->
+ Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]],
+ HaPolicy = {<<"nodes">>, Nodes},
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Master, QName, HaPolicy),
+ await_followers(QName, Master, Slaves).
+
+await_followers(QName, Master, Slaves) ->
+ await_followers_0(QName, Master, Slaves, 10).
+
+await_followers_0(QName, Master, Slaves0, Tries) ->
+ {ok, Queue} = await_followers_lookup_queue(QName, Master),
+ SPids = amqqueue:get_slave_pids(Queue),
+ ActMaster = amqqueue:qnode(Queue),
+ ActSlaves = lists:usort([node(P) || P <- SPids]),
+ Slaves1 = lists:usort(Slaves0),
+ await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves1, Tries).
+
+await_followers_1(QName, _ActMaster, _ActSlaves, _Master, _Slaves, 0) ->
+ error({timeout_waiting_for_followers, QName});
+await_followers_1(QName, ActMaster, ActSlaves, Master, Slaves, Tries) ->
+ case {Master, Slaves} of
+ {ActMaster, ActSlaves} ->
+ ok;
+ _ ->
+ timer:sleep(250),
+ await_followers_0(QName, Master, Slaves, Tries - 1)
+ end.
+
+await_followers_lookup_queue(QName, Master) ->
+ await_followers_lookup_queue(QName, Master, 10).
+
+await_followers_lookup_queue(QName, _Master, 0) ->
+ error({timeout_looking_up_queue, QName});
+await_followers_lookup_queue(QName, Master, Tries) ->
+ RpcArgs = [rabbit_misc:r(<<"/">>, queue, QName)],
+ case rpc:call(Master, rabbit_amqqueue, lookup, RpcArgs) of
+ {error, not_found} ->
+ timer:sleep(250),
+ await_followers_lookup_queue(QName, Master, Tries - 1);
+ {ok, Q} ->
+ {ok, Q}
+ end.
+
+force_boot(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["force_boot"]),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["force_boot"]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ ok.
+
+change_cluster_node_type(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ %% Trying to change the node to the ram type when not clustered should always fail
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end),
+ ok = start_app(Rabbit),
+
+ ok = stop_app(Rabbit),
+ join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, disc),
+
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]},
+ [Rabbit, Hare]),
+
+ %% Changing to ram when you're the only ram node should fail
+ ok = stop_app(Hare),
+ assert_failure(fun () -> change_cluster_node_type(Hare, ram) end),
+ ok = start_app(Hare).
+
+change_cluster_when_node_offline(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Cluster the three notes
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ stop_join_start(Bunny, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+
+ %% Bring down Rabbit, and remove Bunny from the cluster while
+ %% Rabbit is offline
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]),
+
+ %% Bring Rabbit back up
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+
+ %% Now the same, but Rabbit is a RAM node, and we bring up Bunny
+ %% before
+ ok = stop_app(Rabbit),
+ ok = change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ stop_join_start(Bunny, Hare),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]},
+ [Rabbit, Hare, Bunny]),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]},
+ [Rabbit]),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ assert_not_clustered(Bunny).
+
+update_cluster_nodes(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Mnesia is running...
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ ok = stop_app(Bunny),
+ ok = join_cluster(Bunny, Hare),
+ ok = start_app(Bunny),
+ stop_reset_start(Hare),
+ assert_failure(fun () -> start_app(Rabbit) end),
+ %% Bogus node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existent) end),
+ %% Inconsistent node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+ ok = update_cluster_nodes(Rabbit, Bunny),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Hare),
+ assert_clustered([Rabbit, Bunny]).
+
+classic_config_discovery_node_list(Config) ->
+ [Rabbit, Hare] = cluster_members(Config),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], disc}]),
+ ok = start_app(Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], ram}]),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% List of nodes [node()] is equivalent to {[node()], disk}
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, [Rabbit]]),
+ ok = start_app(Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ %% If we use an invalid cluster_nodes conf, the node fails to start.
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, "Yes, please"]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit).
+
+force_reset_node(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ stop_join_start(Rabbit, Hare),
+ stop_app(Rabbit),
+ force_reset(Rabbit),
+ %% Hare thinks that Rabbit is still clustered
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Hare]),
+ %% %% ...but it isn't
+ assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]),
+ %% We can rejoin Rabbit and Hare
+ update_cluster_nodes(Rabbit, Hare),
+ start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+status_with_alarm(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ %% Given: an alarm is raised each node.
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["set_vm_memory_high_watermark", "0.000000001"]),
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["set_disk_free_limit", "2048G"]),
+
+ %% When: we ask for alarm status
+ S = rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_alarm, get_alarms, []),
+ R = rabbit_ct_broker_helpers:rpc(Config, Hare,
+ rabbit_alarm, get_alarms, []),
+
+ %% Then: both nodes have printed alarm information for eachother.
+ ok = alarm_information_on_each_node(S, Rabbit, Hare),
+ ok = alarm_information_on_each_node(R, Rabbit, Hare).
+
+alarm_information_on_each_node(Result, Rabbit, Hare) ->
+ %% Example result:
+ %% [{{resource_limit,disk,'rmq-ct-status_with_alarm-2-24240@localhost'},
+ %% []},
+ %% {{resource_limit,memory,'rmq-ct-status_with_alarm-1-24120@localhost'},
+ %% []}]
+ Alarms = [A || {A, _} <- Result],
+ ?assert(lists:member({resource_limit, memory, Rabbit}, Alarms)),
+ ?assert(lists:member({resource_limit, disk, Hare}, Alarms)),
+
+ ok.
+
+pid_file_and_await_node_startup(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit),
+ RabbitPidFile = ?config(pid_file, RabbitConfig),
+ %% ensure pid file is readable
+ {ok, _} = file:read_file(RabbitPidFile),
+ %% ensure wait works on running node
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]),
+ %% stop both nodes
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ %% starting first node fails - it was not the last node to stop
+ {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ PreviousPid = pid_from_file(RabbitPidFile),
+ %% start first node in the background
+ spawn_link(fun() ->
+ rabbit_ct_broker_helpers:start_node(Config, Rabbit)
+ end),
+ Attempts = 200,
+ Timeout = 50,
+ wait_for_pid_file_to_change(RabbitPidFile, PreviousPid, Attempts, Timeout),
+ {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]).
+
+await_running_count(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitConfig = rabbit_ct_broker_helpers:get_node_config(Config,Rabbit),
+ RabbitPidFile = ?config(pid_file, RabbitConfig),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]),
+ %% stop both nodes
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ %% start one node in the background
+ rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["wait", RabbitPidFile]),
+ ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [1, 30000])),
+ ?assertEqual({error, timeout},
+ rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [2, 1000])),
+ ?assertEqual({error, timeout},
+ rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [5, 1000])),
+ rabbit_ct_broker_helpers:start_node(Config, Hare),
+ %% this now succeeds
+ ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [2, 30000])),
+ %% this still succeeds
+ ?assertEqual(ok, rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [1, 30000])),
+ %% this still fails
+ ?assertEqual({error, timeout},
+ rabbit_ct_broker_helpers:rpc(Config, Rabbit,
+ rabbit_nodes,
+ await_running_count, [5, 1000])).
+
+%% ----------------------------------------------------------------------------
+%% Internal utils
+%% ----------------------------------------------------------------------------
+
+wait_for_pid_file_to_change(_, 0, _, _) ->
+ error(timeout_waiting_for_pid_file_to_have_running_pid);
+wait_for_pid_file_to_change(PidFile, PreviousPid, Attempts, Timeout) ->
+ Pid = pid_from_file(PidFile),
+ case Pid =/= undefined andalso Pid =/= PreviousPid of
+ true -> ok;
+ false ->
+ ct:sleep(Timeout),
+ wait_for_pid_file_to_change(PidFile,
+ PreviousPid,
+ Attempts - 1,
+ Timeout)
+ end.
+
+pid_from_file(PidFile) ->
+ case file:read_file(PidFile) of
+ {ok, Content} ->
+ string:strip(binary_to_list(Content), both, $\n);
+ {error, enoent} ->
+ undefined
+ end.
+
+cluster_members(Config) ->
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename).
+
+assert_cluster_status(Status0, Nodes) ->
+ Status = {AllNodes, _, _} = sort_cluster_status(Status0),
+ wait_for_cluster_status(Status, AllNodes, Nodes).
+
+wait_for_cluster_status(Status, AllNodes, Nodes) ->
+ Max = 10000 / ?LOOP_RECURSION_DELAY,
+ wait_for_cluster_status(0, Max, Status, AllNodes, Nodes).
+
+wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max ->
+ erlang:error({cluster_status_max_tries_failed,
+ [{nodes, Nodes},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) ->
+ case lists:all(fun (Node) ->
+ verify_status_equal(Node, Status, AllNodes)
+ end, Nodes) of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes)
+ end.
+
+verify_status_equal(Node, Status, AllNodes) ->
+ NodeStatus = sort_cluster_status(cluster_status(Node)),
+ (AllNodes =/= [Node]) =:= rpc:call(Node, rabbit_mnesia, is_clustered, [])
+ andalso NodeStatus =:= Status.
+
+cluster_status(Node) ->
+ {rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])}.
+
+sort_cluster_status({All, Disc, Running}) ->
+ {lists:sort(All), lists:sort(Disc), lists:sort(Running)}.
+
+assert_clustered(Nodes) ->
+ assert_cluster_status({Nodes, Nodes, Nodes}, Nodes).
+
+assert_not_clustered(Node) ->
+ assert_cluster_status({[Node], [Node], [Node]}, [Node]).
+
+assert_failure(Fun) ->
+ case catch Fun() of
+ {error, _Code, Reason} -> Reason;
+ {error, Reason} -> Reason;
+ {error_string, Reason} -> Reason;
+ {badrpc, {'EXIT', Reason}} -> Reason;
+ %% Failure to start an app result in node shutdown
+ {badrpc, nodedown} -> nodedown;
+ {badrpc_multi, Reason, _Nodes} -> Reason;
+ Other -> error({expected_failure, Other})
+ end.
+
+stop_app(Node) ->
+ rabbit_control_helper:command(stop_app, Node).
+
+start_app(Node) ->
+ rabbit_control_helper:command(start_app, Node).
+
+join_cluster(Node, To) ->
+ join_cluster(Node, To, false).
+
+join_cluster(Node, To, Ram) ->
+ rabbit_control_helper:command_with_output(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]).
+
+reset(Node) ->
+ rabbit_control_helper:command(reset, Node).
+
+force_reset(Node) ->
+ rabbit_control_helper:command(force_reset, Node).
+
+forget_cluster_node(Node, Removee, RemoveWhenOffline) ->
+ rabbit_control_helper:command(forget_cluster_node, Node, [atom_to_list(Removee)],
+ [{"--offline", RemoveWhenOffline}]).
+
+forget_cluster_node(Node, Removee) ->
+ forget_cluster_node(Node, Removee, false).
+
+change_cluster_node_type(Node, Type) ->
+ rabbit_control_helper:command(change_cluster_node_type, Node, [atom_to_list(Type)]).
+
+update_cluster_nodes(Node, DiscoveryNode) ->
+ rabbit_control_helper:command(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]).
+
+stop_join_start(Node, ClusterTo, Ram) ->
+ ok = stop_app(Node),
+ ok = join_cluster(Node, ClusterTo, Ram),
+ ok = start_app(Node).
+
+stop_join_start(Node, ClusterTo) ->
+ stop_join_start(Node, ClusterTo, false).
+
+stop_reset_start(Node) ->
+ ok = stop_app(Node),
+ ok = reset(Node),
+ ok = start_app(Node).
+
+declare(Ch, Name) ->
+ Res = amqp_channel:call(Ch, #'queue.declare'{durable = true,
+ queue = Name}),
+ amqp_channel:call(Ch, #'queue.bind'{queue = Name,
+ exchange = <<"amq.fanout">>}),
+ Res.
diff --git a/deps/rabbit/test/config_schema_SUITE.erl b/deps/rabbit/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..c538736897
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbit, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
diff --git a/deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbit/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbit/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets
new file mode 100644
index 0000000000..c6ac600dcc
--- /dev/null
+++ b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets
@@ -0,0 +1,797 @@
+[{internal_auth_backend,
+ "auth_backends.1 = internal",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_internal]}]}],
+ []},
+ {ldap_auth_backend,
+ "auth_backends.1 = ldap",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_ldap]}]}],
+ []},
+ {multiple_auth_backends,
+ "auth_backends.1 = ldap
+auth_backends.2 = internal",
+ [{rabbit,
+ [{auth_backends,
+ [rabbit_auth_backend_ldap,rabbit_auth_backend_internal]}]}],
+ []},
+ {full_name_auth_backend,
+ "auth_backends.1 = ldap
+# uses module name instead of a short alias, \"http\"
+auth_backends.2 = rabbit_auth_backend_http",
+ [{rabbit,
+ [{auth_backends,[rabbit_auth_backend_ldap,rabbit_auth_backend_http]}]}],
+ []},
+ {third_party_auth_backend,
+ "auth_backends.1.authn = internal
+# uses module name because this backend is from a 3rd party
+auth_backends.1.authz = rabbit_auth_backend_ip_range",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_internal,rabbit_auth_backend_ip_range}]}]}],
+ []},
+ {authn_authz_backend,
+ "auth_backends.1.authn = ldap
+auth_backends.1.authz = internal",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_ldap,rabbit_auth_backend_internal}]}]}],
+ []},
+ {authn_authz_multiple_backends,
+ "auth_backends.1.authn = ldap
+auth_backends.1.authz = internal
+auth_backends.2 = internal",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_ldap,rabbit_auth_backend_internal},
+ rabbit_auth_backend_internal]}]}],
+ []},
+ {authn_backend_only,
+ "auth_backends.1.authn = ldap",
+ [{rabbit,
+ [{auth_backends,
+ [{rabbit_auth_backend_ldap,rabbit_auth_backend_ldap}]}]}],
+ []},
+ {ssl_options,
+ "ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = true",
+ [{rabbit,
+ [{ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,true}]}]}],
+ []},
+ {tcp_listener,
+ "listeners.tcp.default = 5673",
+ [{rabbit,[{tcp_listeners,[5673]}]}],[]},
+ {ssl_listener,
+ "listeners.ssl = none",[{rabbit,[{ssl_listeners,[]}]}],[]},
+ {num_acceptors,
+ "num_acceptors.ssl = 1",[{rabbit,[{num_ssl_acceptors,1}]}],[]},
+
+ {socket_writer_gc_threshold,
+ "socket_writer.gc_threshold = 999666111", [{rabbit, [{writer_gc_threshold, 999666111}]}],[]},
+
+ {socket_writer_gc_threshold_off,
+ "socket_writer.gc_threshold = off", [{rabbit, [{writer_gc_threshold, undefined}]}],[]},
+
+ {default_user_settings,
+ "default_user = guest
+default_pass = guest
+default_user_tags.administrator = true
+default_permissions.configure = .*
+default_permissions.read = .*
+default_permissions.write = .*",
+ [{rabbit,
+ [{default_user,<<"guest">>},
+ {default_pass,<<"guest">>},
+ {default_user_tags,[administrator]},
+ {default_permissions,[<<".*">>,<<".*">>,<<".*">>]}]}],
+ []},
+ {cluster_formation,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
+cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2
+cluster_formation.node_type = disc",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,disc}]},
+ {cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
+ []},
+
+ {cluster_formation_module_classic_confog_alias,
+ "cluster_formation.peer_discovery_backend = classic_config
+cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
+cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config}]},
+ {cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
+ []},
+
+ {cluster_formation_module_dns_alias,
+ "cluster_formation.peer_discovery_backend = dns
+cluster_formation.dns.hostname = discovery.eng.example.local",
+ [{rabbit,
+ [
+ {cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_dns},
+ {peer_discovery_dns, [
+ {hostname, <<"discovery.eng.example.local">>}
+ ]}]}
+ ]}],
+ []},
+
+ {cluster_formation_disk,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+ cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1
+ cluster_formation.classic_config.nodes.peer2 = rabbit@hostname2
+ cluster_formation.node_type = disk",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,disc}]},
+ {cluster_nodes,{[rabbit@hostname2,rabbit@hostname1],disc}}]}],
+ []},
+ {cluster_formation_ram_ignored,
+ "cluster_formation.node_type = ram",[],[]},
+ {tcp_listen_options,
+ "tcp_listen_options.backlog = 128
+tcp_listen_options.nodelay = true
+tcp_listen_options.exit_on_close = false",
+ [{rabbit,
+ [{tcp_listen_options,
+ [{backlog,128},{nodelay,true},{exit_on_close,false}]}]}],
+ []},
+ {vm_memory_watermark_absolute,
+ "vm_memory_high_watermark.absolute = 1073741824",
+ [{rabbit,[{vm_memory_high_watermark,{absolute,1073741824}}]}],
+ []},
+ {vm_memory_watermark_absolute_units,
+ "vm_memory_high_watermark.absolute = 1024MB",
+ [{rabbit,[{vm_memory_high_watermark,{absolute,"1024MB"}}]}],
+ []},
+ {vm_memory_watermark_paging_ratio,
+ "vm_memory_high_watermark_paging_ratio = 0.75
+ vm_memory_high_watermark.relative = 0.4",
+ [{rabbit,
+ [{vm_memory_high_watermark_paging_ratio,0.75},
+ {vm_memory_high_watermark,0.4}]}],
+ []},
+ {memory_monitor_interval, "memory_monitor_interval = 5000",
+ [{rabbit,
+ [{memory_monitor_interval, 5000}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = rss",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, rss}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = erlang",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, erlang}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = allocated",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, allocated}]}],
+ []},
+ {vm_memory_calculation_strategy, "vm_memory_calculation_strategy = legacy",
+ [{rabbit,
+ [{vm_memory_calculation_strategy, legacy}]}],
+ []},
+ {total_memory_available_override_value,
+ "total_memory_available_override_value = 1000000000",
+ [{rabbit,[{total_memory_available_override_value, 1000000000}]}],
+ []},
+ {total_memory_available_override_value_units,
+ "total_memory_available_override_value = 1024MB",
+ [{rabbit,[{total_memory_available_override_value, "1024MB"}]}],
+ []},
+ {connection_max,
+ "connection_max = 999",
+ [{rabbit,[{connection_max, 999}]}],
+ []},
+ {connection_max,
+ "connection_max = infinity",
+ [{rabbit,[{connection_max, infinity}]}],
+ []},
+ {channel_max,
+ "channel_max = 16",
+ [{rabbit,[{channel_max, 16}]}],
+ []},
+ {max_message_size,
+ "max_message_size = 131072",
+ [{rabbit, [{max_message_size, 131072}]}],
+ []},
+ {listeners_tcp_ip,
+ "listeners.tcp.1 = 192.168.1.99:5672",
+ [{rabbit,[{tcp_listeners,[{"192.168.1.99",5672}]}]}],
+ []},
+ {listeners_tcp_ip_multiple,
+ "listeners.tcp.1 = 127.0.0.1:5672
+ listeners.tcp.2 = ::1:5672",
+ [{rabbit,[{tcp_listeners,[{"127.0.0.1",5672},{"::1",5672}]}]}],
+ []},
+ {listeners_tcp_ip_all,"listeners.tcp.1 = :::5672",
+ [{rabbit,[{tcp_listeners,[{"::",5672}]}]}],
+ []},
+ {listeners_tcp_ipv6,
+ "listeners.tcp.1 = fe80::2acf:e9ff:fe17:f97b:5672",
+ [{rabbit,[{tcp_listeners,[{"fe80::2acf:e9ff:fe17:f97b",5672}]}]}],
+ []},
+ {tcp_options_sndbuf,
+ "tcp_listen_options.backlog = 128
+ tcp_listen_options.nodelay = true
+ tcp_listen_options.sndbuf = 196608
+ tcp_listen_options.recbuf = 196608",
+ [{rabbit,
+ [{tcp_listen_options,
+ [{backlog,128},{nodelay,true},{sndbuf,196608},{recbuf,196608}]}]}],
+ []},
+ {tcp_listen_options_nodelay_with_kernel,
+ "tcp_listen_options.backlog = 4096
+ tcp_listen_options.nodelay = true",
+ [{kernel,
+ [{inet_default_connect_options,[{nodelay,true}]},
+ {inet_default_listen_options,[{nodelay,true}]}]}],
+ [{kernel,
+ [{inet_default_connect_options,[{nodelay,true}]},
+ {inet_default_listen_options,[{nodelay,true}]}]},
+ {rabbit,[{tcp_listen_options,[{backlog,4096},{nodelay,true}]}]}],
+ []},
+ {tcp_listen_options_nodelay,
+ "tcp_listen_options.backlog = 4096
+ tcp_listen_options.nodelay = true",
+ [{rabbit,[{tcp_listen_options,[{backlog,4096},{nodelay,true}]}]}],
+ []},
+ {ssl_handshake_timeout,
+ "ssl_handshake_timeout = 10000",
+ [{rabbit,[{ssl_handshake_timeout,10000}]}],
+ []},
+ {cluster_partition_handling_pause_if_all_down,
+ "cluster_partition_handling = pause_if_all_down
+
+ ## Recover strategy. Can be either 'autoheal' or 'ignore'
+ cluster_partition_handling.pause_if_all_down.recover = ignore
+
+ ## Node names to check
+ cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@myhost1
+ cluster_partition_handling.pause_if_all_down.nodes.2 = rabbit@myhost2",
+ [{rabbit,
+ [{cluster_partition_handling,
+ {pause_if_all_down,[rabbit@myhost2,rabbit@myhost1],ignore}}]}],
+ []},
+ {cluster_partition_handling_autoheal,
+ "cluster_partition_handling = autoheal",
+ [{rabbit,[{cluster_partition_handling,autoheal}]}],
+ []},
+ {password_hashing,
+ "password_hashing_module = rabbit_password_hashing_sha512",
+ [{rabbit,[{password_hashing_module,rabbit_password_hashing_sha512}]}],
+ []},
+ {ssl_options_verify_peer,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_password,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.password = t0p$3kRe7",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {password,"t0p$3kRe7"}]}]}],
+ []},
+ {ssl_options_tls_ver_old,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.versions.tls1_2 = tlsv1.2
+ ssl_options.versions.tls1_1 = tlsv1.1
+ ssl_options.versions.tls1 = tlsv1",
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1',tlsv1]}]}],
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1',tlsv1]}]},
+ {rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1',tlsv1]}]}]}],
+ []},
+ {ssl_options_tls_ver_new,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.versions.tls1_2 = tlsv1.2
+ ssl_options.versions.tls1_1 = tlsv1.1",
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]}],
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]},
+ {rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1']}]}]}],
+ []},
+
+ {ssl_options_ciphers,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.versions.1 = tlsv1.2
+ ssl_options.versions.2 = tlsv1.1
+ ssl_options.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+ ssl_options.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+ ssl_options.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+ ssl_options.ciphers.4 = ECDHE-RSA-AES256-SHA384
+ ssl_options.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+ ssl_options.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+ ssl_options.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+ ssl_options.ciphers.8 = ECDH-RSA-AES256-SHA384
+ ssl_options.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]}],
+ [{ssl,[{versions,['tlsv1.2','tlsv1.1']}]},
+ {rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {ciphers, [
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDH-ECDSA-AES256-GCM-SHA384",
+ "ECDH-RSA-AES256-GCM-SHA384",
+ "ECDH-ECDSA-AES256-SHA384",
+ "ECDH-RSA-AES256-SHA384",
+ "DHE-RSA-AES256-GCM-SHA384"
+ ]},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1']}]}]}],
+ []},
+
+ {ssl_options_allow_poodle,
+ "listeners.ssl.1 = 5671
+ ssl_allow_poodle_attack = true
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_allow_poodle_attack,true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_depth,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 2
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_depth_0,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 0
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,0},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_depth_255,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 255
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,255},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_honor_cipher_order,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 2
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false
+ ssl_options.honor_cipher_order = true",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}]}],
+ []},
+ {ssl_options_honor_ecc_order,
+ "listeners.ssl.1 = 5671
+ ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.depth = 2
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = false
+ ssl_options.honor_ecc_order = true",
+ [{rabbit,
+ [{ssl_listeners,[5671]},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_ecc_order, true}]}]}],
+ []},
+
+ {ssl_cert_login_from_cn,
+ "ssl_cert_login_from = common_name",
+ [{rabbit,[{ssl_cert_login_from, common_name}]}],
+ []},
+
+ {ssl_cert_login_from_dn,
+ "ssl_cert_login_from = distinguished_name",
+ [{rabbit,[{ssl_cert_login_from, distinguished_name}]}],
+ []},
+
+ {ssl_cert_login_from_san_dns,
+ "ssl_cert_login_from = subject_alternative_name
+ ssl_cert_login_san_type = dns
+ ssl_cert_login_san_index = 0",
+ [{rabbit,[
+ {ssl_cert_login_from, subject_alternative_name},
+ {ssl_cert_login_san_type, dns},
+ {ssl_cert_login_san_index, 0}
+ ]}],
+ []},
+
+ {tcp_listen_options_linger_on,
+ "tcp_listen_options.linger.on = true
+ tcp_listen_options.linger.timeout = 100",
+ [{rabbit,[{tcp_listen_options,[{linger,{true,100}}]}]}],
+ []},
+ {tcp_listen_options_linger_off,
+ "tcp_listen_options.linger.on = false
+ tcp_listen_options.linger.timeout = 100",
+ [{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}],
+ []},
+ {tcp_listen_options_linger_on_notimeout,
+ "tcp_listen_options.linger.on = true",
+ [{rabbit,[{tcp_listen_options,[{linger,{true,0}}]}]}],
+ []},
+ {tcp_listen_options_linger_timeout,
+ "tcp_listen_options.linger.timeout = 100",
+ [{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}],
+ []},
+
+ {cluster_formation_randomized_startup_delay_both_values,
+ "cluster_formation.randomized_startup_delay_range.min = 10
+ cluster_formation.randomized_startup_delay_range.max = 30",
+ [{rabbit, [{cluster_formation, [
+ {randomized_startup_delay_range, {10, 30}}
+ ]}]}],
+ []},
+
+ {cluster_formation_randomized_startup_delay_min_only,
+ "cluster_formation.randomized_startup_delay_range.min = 10",
+ [{rabbit, [{cluster_formation, [
+ {randomized_startup_delay_range, {10, 60}}
+ ]}]}],
+ []},
+
+ {cluster_formation_randomized_startup_delay_max_only,
+ "cluster_formation.randomized_startup_delay_range.max = 30",
+ [{rabbit, [{cluster_formation, [
+ {randomized_startup_delay_range, {5, 30}}
+ ]}]}],
+ []},
+
+ {cluster_formation_dns,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_dns
+ cluster_formation.dns.hostname = 192.168.0.2.xip.io
+ cluster_formation.node_type = disc",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_dns,[{hostname,<<"192.168.0.2.xip.io">>}]},
+ {peer_discovery_backend,rabbit_peer_discovery_dns},
+ {node_type,disc}]}]}],
+ []},
+ {cluster_formation_classic,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+ cluster_formation.node_type = disc",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,disc}]}]}],
+ []},
+ {cluster_formation_classic_ram,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+ cluster_formation.node_type = ram",
+ [{rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend,rabbit_peer_discovery_classic_config},
+ {node_type,ram}]}]}],
+ []},
+ {background_gc_enabled,
+ "background_gc_enabled = true
+ background_gc_target_interval = 30000",
+ [{rabbit,
+ [{background_gc_enabled,true},{background_gc_target_interval,30000}]}],
+ []},
+ {background_gc_disabled,
+ "background_gc_enabled = false
+ background_gc_target_interval = 30000",
+ [{rabbit,
+ [{background_gc_enabled,false},{background_gc_target_interval,30000}]}],
+ []},
+ {credential_validator_length,
+ "credential_validator.validation_backend = rabbit_credential_validator_min_password_length
+credential_validator.min_length = 10",
+ [{rabbit,
+ [{credential_validator,
+ [{validation_backend,
+ rabbit_credential_validator_min_password_length},
+ {min_length,10}]}]}],
+ []},
+ {credential_validator_regexp,
+ "credential_validator.validation_backend = rabbit_credential_validator_password_regexp
+credential_validator.regexp = ^abc\\d+",
+ [{rabbit,
+ [{credential_validator,
+ [{validation_backend,rabbit_credential_validator_password_regexp},
+ {regexp,"^abc\\d+"}]}]}],
+ []},
+ {proxy_protocol_on,
+ "proxy_protocol = true",
+ [{rabbit,[{proxy_protocol,true}]}],[]},
+ {proxy_protocol_off,
+ "proxy_protocol = false",
+ [{rabbit,[{proxy_protocol,false}]}],[]},
+ {log_debug_file,
+ "log.file.level = debug",
+ [{rabbit,[{log, [{file, [{level, debug}]}]}]}],
+ []},
+ {log_debug_console,
+ "log.console = true
+ log.console.level = debug",
+ [{rabbit,[{log, [{console, [{enabled, true}, {level, debug}]}]}]}],
+ []},
+ {log_debug_exchange,
+ "log.exchange = true
+ log.exchange.level = debug",
+ [{rabbit,[{log, [{exchange, [{enabled, true}, {level, debug}]}]}]}],
+ []},
+ {log_debug_syslog,
+ "log.syslog = true
+ log.syslog.level = debug",
+ [{rabbit,[{log, [{syslog, [{enabled, true}, {level, debug}]}]}]}],
+ []},
+ {log_file_name,
+ "log.file = file_name",
+ [{rabbit,[{log, [{file, [{file, "file_name"}]}]}]}],
+ []},
+ {log_file_disabled,
+ "log.file = false",
+ [{rabbit,[{log, [{file, [{file, false}]}]}]}],
+ []},
+ {log_category_level,
+ "log.connection.level = debug
+ log.channel.level = error",
+ [{rabbit,[{log, [{categories, [{connection, [{level, debug}]},
+ {channel, [{level, error}]}]}]}]}],
+ []},
+ {log_category_file,
+ "log.connection.file = file_name_connection
+ log.channel.file = file_name_channel",
+ [{rabbit,[{log, [{categories, [{connection, [{file, "file_name_connection"}]},
+ {channel, [{file, "file_name_channel"}]}]}]}]}],
+ []},
+
+ {default_worker_pool_size,
+ "default_worker_pool_size = 512",
+ [{rabbit, [
+ {default_worker_pool_size, 512}
+ ]}],
+ []},
+
+ {delegate_count,
+ "delegate_count = 64",
+ [{rabbit, [
+ {delegate_count, 64}
+ ]}],
+ []},
+
+ {kernel_net_ticktime,
+ "net_ticktime = 20",
+ [{kernel, [
+ {net_ticktime, 20}
+ ]}],
+ []},
+
+ {rabbit_consumer_timeout,
+ "consumer_timeout = 20000",
+ [{rabbit, [
+ {consumer_timeout, 20000}
+ ]}],
+ []},
+
+ {rabbit_msg_store_shutdown_timeout,
+ "message_store_shutdown_timeout = 600000",
+ [{rabbit, [
+ {msg_store_shutdown_timeout, 600000}
+ ]}],
+ []},
+
+ {rabbit_mnesia_table_loading_retry_timeout,
+ "mnesia_table_loading_retry_timeout = 45000",
+ [{rabbit, [
+ {mnesia_table_loading_retry_timeout, 45000}
+ ]}],
+ []},
+
+ {log_syslog_settings,
+ "log.syslog = true
+ log.syslog.identity = rabbitmq
+ log.syslog.facility = user
+ log.syslog.multiline_mode = true
+ log.syslog.ip = 10.10.10.10
+ log.syslog.port = 123",
+ [
+ {rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{app_name, "rabbitmq"},
+ {facility, user},
+ {multiline_mode, true},
+ {dest_host, "10.10.10.10"},
+ {dest_port, 123}]}
+ ],
+ []},
+ {log_syslog_tcp,
+ "log.syslog = true
+ log.syslog.transport = tcp
+ log.syslog.protocol = rfc5424
+ log.syslog.host = syslog.my-network.com",
+ [
+ {rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{protocol, {rfc5424, tcp}},
+ {dest_host, "syslog.my-network.com"}]}
+ ],
+ []},
+ {log_syslog_udp_default,
+ "log.syslog = true
+ log.syslog.protocol = rfc3164",
+ [
+ {rabbit,[{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{protocol, {rfc3164, udp}}]}
+ ],
+ []},
+ {log_syslog_tls,
+ "log.syslog = true
+ log.syslog.transport = tls
+ log.syslog.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ log.syslog.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ log.syslog.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ log.syslog.ssl_options.verify = verify_peer
+ log.syslog.ssl_options.fail_if_no_peer_cert = false",
+ [{rabbit, [{log, [{syslog, [{enabled, true}]}]}]},
+ {syslog, [{protocol, {rfc5424, tls,
+ [{verify,verify_peer},
+ {fail_if_no_peer_cert,false},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}]}}]}],
+ []},
+
+ %%
+ %% Definitions
+ %%
+
+ {definition_files, "load_definitions = test/definition_import_SUITE_data/case1.json",
+ [{rabbit,
+ [{load_definitions, "test/definition_import_SUITE_data/case1.json"}]}],
+ []},
+
+ %%
+ %% Raft
+ %%
+
+ {raft_data_dir,
+ "raft.data_dir = /data/rabbitmq/raft/log",
+ [{ra, [
+ {data_dir, "/data/rabbitmq/raft/log"}
+ ]}],
+ []},
+
+ {raft_segment_max_entries,
+ "raft.segment_max_entries = 65536",
+ [{ra, [
+ {segment_max_entries, 65536}
+ ]}],
+ []},
+
+ {raft_wal_max_size_bytes,
+ "raft.wal_max_size_bytes = 1048576",
+ [{ra, [
+ {wal_max_size_bytes, 1048576}
+ ]}],
+ []},
+
+ {raft_wal_max_batch_size,
+ "raft.wal_max_batch_size = 4096",
+ [{ra, [
+ {wal_max_batch_size, 4096}
+ ]}],
+ []},
+
+ {raft_snapshot_chunk_size,
+ "raft.snapshot_chunk_size = 1000000",
+ [{ra, [
+ {snapshot_chunk_size, 1000000}
+ ]}],
+ []}
+
+].
diff --git a/deps/rabbit/test/confirms_rejects_SUITE.erl b/deps/rabbit/test/confirms_rejects_SUITE.erl
new file mode 100644
index 0000000000..a51253885c
--- /dev/null
+++ b/deps/rabbit/test/confirms_rejects_SUITE.erl
@@ -0,0 +1,412 @@
+-module(confirms_rejects_SUITE).
+
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ OverflowTests = [
+ confirms_rejects_conflict,
+ policy_resets_to_default
+ ],
+ [
+ {parallel_tests, [parallel], [
+ {overflow_reject_publish_dlx, [parallel], OverflowTests},
+ {overflow_reject_publish, [parallel], OverflowTests},
+ dead_queue_rejects,
+ mixed_dead_alive_queues_reject
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+
+init_per_group(overflow_reject_publish, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish">>}
+ ]);
+init_per_group(overflow_reject_publish_dlx, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish-dlx">>}
+ ]);
+init_per_group(Group, Config) ->
+ ClusterSize = 2,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(overflow_reject_publish, _Config) ->
+ ok;
+end_per_group(overflow_reject_publish_dlx, _Config) ->
+ ok;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(policy_resets_to_default = Testcase, Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ rabbit_ct_helpers:testcase_started(
+ rabbit_ct_helpers:set_config(Config, [{conn, Conn}]), Testcase);
+init_per_testcase(Testcase, Config)
+ when Testcase == confirms_rejects_conflict;
+ Testcase == dead_queue_rejects;
+ Testcase == mixed_dead_alive_queues_reject ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+
+ rabbit_ct_helpers:testcase_started(
+ rabbit_ct_helpers:set_config(Config, [{conn, Conn}, {conn1, Conn1}]),
+ Testcase).
+
+end_per_testcase(policy_resets_to_default = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"policy_resets_to_default", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.delete'{queue = QueueName}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
+
+ Conn = ?config(conn, Config),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(confirms_rejects_conflict = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"confirms_rejects_conflict", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.delete'{queue = QueueName}),
+ end_per_testcase0(Testcase, Config);
+end_per_testcase(dead_queue_rejects = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"dead_queue_rejects">>}),
+ end_per_testcase0(Testcase, Config);
+end_per_testcase(mixed_dead_alive_queues_reject = Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"mixed_dead_alive_queues_reject_dead">>}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"mixed_dead_alive_queues_reject_alive">>}),
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = <<"mixed_dead_alive_queues_reject">>}),
+ end_per_testcase0(Testcase, Config).
+
+end_per_testcase0(Testcase, Config) ->
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
+
+ Conn = ?config(conn, Config),
+ Conn1 = ?config(conn1, Config),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn1),
+
+ clean_acks_mailbox(),
+
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+dead_queue_rejects(Config) ->
+ Conn = ?config(conn, Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QueueName = <<"dead_queue_rejects">>,
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true}),
+
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"HI">>}),
+
+ receive
+ {'basic.ack',_,_} -> ok
+ after 10000 ->
+ error(timeout_waiting_for_initial_ack)
+ end,
+
+ BasicPublish = #'basic.publish'{routing_key = QueueName},
+ AmqpMsg = #amqp_msg{payload = <<"HI">>},
+ kill_queue_expect_nack(Config, Ch, QueueName, BasicPublish, AmqpMsg, 5).
+
+mixed_dead_alive_queues_reject(Config) ->
+ Conn = ?config(conn, Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QueueNameDead = <<"mixed_dead_alive_queues_reject_dead">>,
+ QueueNameAlive = <<"mixed_dead_alive_queues_reject_alive">>,
+ ExchangeName = <<"mixed_dead_alive_queues_reject">>,
+
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueNameDead,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueNameAlive,
+ durable = true}),
+
+ amqp_channel:call(Ch, #'exchange.declare'{exchange = ExchangeName,
+ durable = true}),
+
+ amqp_channel:call(Ch, #'queue.bind'{exchange = ExchangeName,
+ queue = QueueNameAlive,
+ routing_key = <<"route">>}),
+
+ amqp_channel:call(Ch, #'queue.bind'{exchange = ExchangeName,
+ queue = QueueNameDead,
+ routing_key = <<"route">>}),
+
+ amqp_channel:call(Ch, #'basic.publish'{exchange = ExchangeName,
+ routing_key = <<"route">>},
+ #amqp_msg{payload = <<"HI">>}),
+
+ receive
+ {'basic.ack',_,_} -> ok;
+ {'basic.nack',_,_,_} -> error(expecting_ack_got_nack)
+ after 50000 ->
+ error({timeout_waiting_for_initial_ack, process_info(self(), messages)})
+ end,
+
+ BasicPublish = #'basic.publish'{exchange = ExchangeName, routing_key = <<"route">>},
+ AmqpMsg = #amqp_msg{payload = <<"HI">>},
+ kill_queue_expect_nack(Config, Ch, QueueNameDead, BasicPublish, AmqpMsg, 5).
+
+kill_queue_expect_nack(_Config, _Ch, _QueueName, _BasicPublish, _AmqpMsg, 0) ->
+ error(expecting_nack_got_ack);
+kill_queue_expect_nack(Config, Ch, QueueName, BasicPublish, AmqpMsg, Tries) ->
+ kill_the_queue(QueueName, Config),
+ amqp_channel:cast(Ch, BasicPublish, AmqpMsg),
+ R = receive
+ {'basic.nack',_,_,_} ->
+ ok;
+ {'basic.ack',_,_} ->
+ retry
+ after 10000 ->
+ error({timeout_waiting_for_nack, process_info(self(), messages)})
+ end,
+ case R of
+ ok ->
+ ok;
+ retry ->
+ kill_queue_expect_nack(Config, Ch, QueueName, BasicPublish, AmqpMsg, Tries - 1)
+ end.
+
+confirms_rejects_conflict(Config) ->
+ Conn = ?config(conn, Config),
+ Conn1 = ?config(conn1, Config),
+
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ false = Conn =:= Conn1,
+ false = Ch =:= Ch1,
+
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"confirms_rejects_conflict", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true,
+ arguments = [{<<"x-max-length">>, long, 12},
+ {<<"x-overflow">>, longstr, XOverflow}]
+ }),
+ %% Consume 3 messages at once. Do that often.
+ Consume = fun Consume() ->
+ receive
+ stop -> ok
+ after 1 ->
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ amqp_channel:cast(Ch1, #'basic.get'{queue = QueueName, no_ack = true}),
+ Consume()
+ end
+ end,
+
+ Produce = fun
+ Produce(0) -> ok;
+ Produce(N) ->
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"HI">>}),
+ Produce(N - 1)
+ end,
+
+ %% Initial queue should be full
+ % Produce(20),
+
+ %% Consumer is a separate process.
+ Consumer = spawn(Consume),
+
+ %% A long run. Should create race conditions hopefully.
+ Produce(500000),
+
+ Result = validate_acks_mailbox(),
+
+ Consumer ! stop,
+ % Result.
+ case Result of
+ ok -> ok;
+ {error, E} -> error(E)
+ end.
+
+policy_resets_to_default(Config) ->
+ Conn = ?config(conn, Config),
+
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ XOverflow = ?config(overflow, Config),
+ QueueName = <<"policy_resets_to_default", "_", XOverflow/binary>>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true
+ }),
+ MaxLength = 2,
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ QueueName, QueueName, <<"queues">>,
+ [{<<"max-length">>, MaxLength}, {<<"overflow">>, XOverflow}]),
+
+ timer:sleep(1000),
+
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"HI">>})
+ || _ <- lists:seq(1, MaxLength)],
+
+ assert_acks(MaxLength),
+
+ #'queue.declare_ok'{message_count = MaxLength} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true}),
+
+ RejectedMessage = <<"HI-rejected">>,
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = RejectedMessage}),
+
+ assert_nack(),
+
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ QueueName, QueueName, <<"queues">>,
+ [{<<"max-length">>, MaxLength}]),
+
+ NotRejectedMessage = <<"HI-not-rejected">>,
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = NotRejectedMessage}),
+
+ assert_ack(),
+
+ #'queue.declare_ok'{message_count = MaxLength} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName,
+ durable = true}),
+
+ Msgs = consume_all_messages(Ch, QueueName),
+ case {lists:member(RejectedMessage, Msgs), lists:member(NotRejectedMessage, Msgs)} of
+ {true, _} -> error({message_should_be_rejected, RejectedMessage});
+ {_, false} -> error({message_should_be_enqueued, NotRejectedMessage});
+ _ -> ok
+ end.
+
+consume_all_messages(Ch, QueueName) ->
+ consume_all_messages(Ch, QueueName, []).
+
+consume_all_messages(Ch, QueueName, Msgs) ->
+ case amqp_channel:call(Ch, #'basic.get'{queue = QueueName, no_ack = true}) of
+ {#'basic.get_ok'{}, #amqp_msg{payload = Msg}} ->
+ consume_all_messages(Ch, QueueName, [Msg | Msgs]);
+ #'basic.get_empty'{} -> Msgs
+ end.
+
+assert_ack() ->
+ receive {'basic.ack', _, _} -> ok
+ after 10000 -> error(timeout_waiting_for_ack)
+ end,
+ clean_acks_mailbox().
+
+assert_nack() ->
+ receive {'basic.nack', _, _, _} -> ok
+ after 10000 -> error(timeout_waiting_for_nack)
+ end,
+ clean_acks_mailbox().
+
+assert_acks(N) ->
+ receive {'basic.ack', N, _} -> ok
+ after 10000 -> error({timeout_waiting_for_ack, N})
+ end,
+ clean_acks_mailbox().
+
+validate_acks_mailbox() ->
+ Result = validate_acks_mailbox({0, ok}),
+ clean_acks_mailbox(),
+ Result.
+
+validate_acks_mailbox({LatestMultipleN, LatestMultipleAck}) ->
+ Received = receive
+ {'basic.ack', N, Multiple} = A -> {N, Multiple, A};
+ {'basic.nack', N, Multiple, _} = A -> {N, Multiple, A}
+ after
+ 10000 -> none
+ end,
+ % ct:pal("Received ~p~n", [Received]),
+ case Received of
+ {LatestN, IsMultiple, AckOrNack} ->
+ case LatestN < LatestMultipleN of
+ true ->
+ {error, {received_ack_lower_than_latest_multiple, AckOrNack, smaller_than, LatestMultipleAck}};
+ false ->
+ case IsMultiple of
+ true -> validate_acks_mailbox({LatestN, AckOrNack});
+ false -> validate_acks_mailbox({LatestMultipleN, LatestMultipleAck})
+ end
+ end;
+ none -> ok
+ end.
+
+clean_acks_mailbox() ->
+ receive
+ {'basic.ack', _, _} -> clean_acks_mailbox();
+ {'basic.nack', _, _, _} -> clean_acks_mailbox()
+ after
+ 1000 -> done
+ end.
+
+kill_the_queue(QueueName, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, kill_the_queue, [QueueName]).
+
+kill_the_queue(QueueName) ->
+ [begin
+ {ok, Q} = rabbit_amqqueue:lookup({resource, <<"/">>, queue, QueueName}),
+ Pid = amqqueue:get_pid(Q),
+ ct:pal("~w killed", [Pid]),
+ timer:sleep(1),
+ exit(Pid, kill)
+ end
+ || _ <- lists:seq(1, 50)],
+ timer:sleep(1),
+ {ok, Q} = rabbit_amqqueue:lookup({resource, <<"/">>, queue, QueueName}),
+ Pid = amqqueue:get_pid(Q),
+ case is_process_alive(Pid) of
+ %% Try to kill it again
+ true -> kill_the_queue(QueueName);
+ false -> ok
+ end.
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/consumer_timeout_SUITE.erl b/deps/rabbit/test/consumer_timeout_SUITE.erl
new file mode 100644
index 0000000000..468714328d
--- /dev/null
+++ b/deps/rabbit/test/consumer_timeout_SUITE.erl
@@ -0,0 +1,262 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(consumer_timeout_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ AllTests = [consumer_timeout,
+ consumer_timeout_basic_get,
+ consumer_timeout_no_basic_cancel_capability
+ ],
+ [
+ {parallel_tests, [],
+ [
+ {classic_queue, [parallel], AllTests},
+ {mirrored_queue, [parallel], AllTests},
+ {quorum_queue, [parallel], AllTests}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 7}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config0) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{channel_tick_interval, 1000},
+ {quorum_tick_interval, 1000},
+ {consumer_timeout, 5000}]}),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config0, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q},
+ {queue_name_2, Q2}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_2, Config)}),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+consumer_timeout(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false),
+ erlang:monitor(process, Conn),
+ erlang:monitor(process, Ch),
+ receive
+ {'DOWN', _, process, Ch, _} -> ok
+ after 30000 ->
+ flush(1),
+ exit(channel_exit_expected)
+ end,
+ receive
+ {'DOWN', _, process, Conn, _} ->
+ flush(1),
+ exit(unexpected_connection_exit)
+ after 2000 ->
+ ok
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consumer_timeout_basic_get(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_DelTag] = consume(Ch, QName, [<<"msg1">>]),
+ erlang:monitor(process, Conn),
+ erlang:monitor(process, Ch),
+ receive
+ {'DOWN', _, process, Ch, _} -> ok
+ after 30000 ->
+ flush(1),
+ exit(channel_exit_expected)
+ end,
+ receive
+ {'DOWN', _, process, Conn, _} ->
+ flush(1),
+ exit(unexpected_connection_exit)
+ after 2000 ->
+ ok
+ end,
+ ok.
+
+
+-define(CLIENT_CAPABILITIES,
+ [{<<"publisher_confirms">>, bool, true},
+ {<<"exchange_exchange_bindings">>, bool, true},
+ {<<"basic.nack">>, bool, true},
+ {<<"consumer_cancel_notify">>, bool, false},
+ {<<"connection.blocked">>, bool, true},
+ {<<"authentication_failure_close">>, bool, true}]).
+
+consumer_timeout_no_basic_cancel_capability(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Props = [{<<"capabilities">>, table, ?CLIENT_CAPABILITIES}],
+ AmqpParams = #amqp_params_network{port = Port,
+ host = "localhost",
+ virtual_host = <<"/">>,
+ client_properties = Props
+ },
+ {ok, Conn} = amqp_connection:start(AmqpParams),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ erlang:monitor(process, Conn),
+ erlang:monitor(process, Ch),
+ subscribe(Ch, QName, false),
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ redelivered = false}, _} ->
+ %% do nothing with the delivery should trigger timeout
+ ok
+ after 5000 ->
+ exit(deliver_timeout)
+ end,
+ receive
+ {'DOWN', _, process, Ch, _} -> ok
+ after 30000 ->
+ flush(1),
+ exit(channel_exit_expected)
+ end,
+ receive
+ {'DOWN', _, process, Conn, _} ->
+ flush(1),
+ exit(unexpected_connection_exit)
+ after 2000 ->
+ ok
+ end,
+ ok.
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+
+declare_queue(Ch, Config, QName) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = Durable}).
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ consume(Ch, QName, false, Payloads).
+
+consume(Ch, QName, NoAck, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName,
+ no_ack = NoAck}),
+ DTag
+ end || Payload <- Payloads].
+
+subscribe(Ch, Queue, NoAck) ->
+ subscribe(Ch, Queue, NoAck, <<"ctag">>).
+
+subscribe(Ch, Queue, NoAck, Ctag) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = Ctag},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Ctag} ->
+ ok
+ end.
+
+flush(T) ->
+ receive X ->
+ ct:pal("flushed ~w", [X]),
+ flush(T)
+ after T ->
+ ok
+ end.
diff --git a/deps/rabbit/test/crashing_queues_SUITE.erl b/deps/rabbit/test/crashing_queues_SUITE.erl
new file mode 100644
index 0000000000..cf88fb00f0
--- /dev/null
+++ b/deps/rabbit/test/crashing_queues_SUITE.erl
@@ -0,0 +1,267 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(crashing_queues_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ crashing_unmirrored,
+ crashing_mirrored,
+ give_up_after_repeated_crashes
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+crashing_unmirrored(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+ QName = <<"crashing_unmirrored-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ test_queue_failure(A, ChA, ConnB, 1, 0,
+ #'queue.declare'{queue = QName, durable = true}),
+ test_queue_failure(A, ChA, ConnB, 0, 0,
+ #'queue.declare'{queue = QName, durable = false}),
+ ok.
+
+crashing_mirrored(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+ QName = <<"crashing_mirrored-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ test_queue_failure(A, ChA, ConnB, 2, 1,
+ #'queue.declare'{queue = QName, durable = true}),
+ ok.
+
+test_queue_failure(Node, Ch, RaceConn, MsgCount, FollowerCount, Decl) ->
+ #'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl),
+ try
+ publish(Ch, QName, transient),
+ publish(Ch, QName, durable),
+ Racer = spawn_declare_racer(RaceConn, Decl),
+ kill_queue(Node, QName),
+ assert_message_count(MsgCount, Ch, QName),
+ assert_follower_count(FollowerCount, Node, QName),
+ stop_declare_racer(Racer)
+ after
+ amqp_channel:call(Ch, #'queue.delete'{queue = QName})
+ end.
+
+give_up_after_repeated_crashes(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+ QName = <<"give_up_after_repeated_crashes-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName,
+ durable = true}),
+ await_state(A, QName, running),
+ publish(ChA, QName, durable),
+ kill_queue_hard(A, QName),
+ {'EXIT', _} = (catch amqp_channel:call(
+ ChA, #'queue.declare'{queue = QName,
+ durable = true})),
+ await_state(A, QName, crashed),
+ amqp_channel:call(ChB, #'queue.delete'{queue = QName}),
+ amqp_channel:call(ChB, #'queue.declare'{queue = QName,
+ durable = true}),
+ await_state(A, QName, running),
+
+ %% Since it's convenient, also test absent queue status here.
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ await_state(A, QName, down),
+ ok.
+
+
+publish(Ch, QName, DelMode) ->
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = QName},
+ Msg = #amqp_msg{props = #'P_basic'{delivery_mode = del_mode(DelMode)}},
+ amqp_channel:cast(Ch, Publish, Msg),
+ amqp_channel:wait_for_confirms(Ch).
+
+del_mode(transient) -> 1;
+del_mode(durable) -> 2.
+
+spawn_declare_racer(Conn, Decl) ->
+ Self = self(),
+ spawn_link(fun() -> declare_racer_loop(Self, Conn, Decl) end).
+
+stop_declare_racer(Pid) ->
+ Pid ! stop,
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, process, Pid, _} -> ok
+ end.
+
+declare_racer_loop(Parent, Conn, Decl) ->
+ receive
+ stop -> unlink(Parent)
+ after 0 ->
+ %% Catch here because we might happen to catch the queue
+ %% while it is in the middle of recovering and thus
+ %% explode with NOT_FOUND because crashed. Doesn't matter,
+ %% we are only in this loop to try to fool the recovery
+ %% code anyway.
+ try
+ case amqp_connection:open_channel(Conn) of
+ {ok, Ch} -> amqp_channel:call(Ch, Decl);
+ closing -> ok
+ end
+ catch
+ exit:_ ->
+ ok
+ end,
+ declare_racer_loop(Parent, Conn, Decl)
+ end.
+
+await_state(Node, QName, State) ->
+ await_state(Node, QName, State, 30000).
+
+await_state(Node, QName, State, Time) ->
+ case state(Node, QName) of
+ State ->
+ ok;
+ Other ->
+ case Time of
+ 0 -> exit({timeout_awaiting_state, State, Other});
+ _ -> timer:sleep(100),
+ await_state(Node, QName, State, Time - 100)
+ end
+ end.
+
+state(Node, QName) ->
+ V = <<"/">>,
+ Res = rabbit_misc:r(V, queue, QName),
+ Infos = rpc:call(Node, rabbit_amqqueue, info_all, [V, [name, state]]),
+ case Infos of
+ [] -> undefined;
+ [[{name, Res}, {state, State}]] -> State
+ end.
+
+kill_queue_hard(Node, QName) ->
+ case kill_queue(Node, QName) of
+ crashed -> ok;
+ _NewPid -> timer:sleep(100),
+ kill_queue_hard(Node, QName)
+ end.
+
+kill_queue(Node, QName) ->
+ Pid1 = queue_pid(Node, QName),
+ exit(Pid1, boom),
+ await_new_pid(Node, QName, Pid1).
+
+queue_pid(Node, QName) ->
+ Q = lookup(Node, QName),
+ QPid = amqqueue:get_pid(Q),
+ State = amqqueue:get_state(Q),
+ #resource{virtual_host = VHost} = amqqueue:get_name(Q),
+ case State of
+ crashed ->
+ case rabbit_amqqueue_sup_sup:find_for_vhost(VHost, Node) of
+ {error, {queue_supervisor_not_found, _}} -> {error, no_sup};
+ {ok, SPid} ->
+ case sup_child(Node, SPid) of
+ {ok, _} -> QPid; %% restarting
+ {error, no_child} -> crashed %% given up
+ end
+ end;
+ _ -> QPid
+ end.
+
+sup_child(Node, Sup) ->
+ case rpc:call(Node, supervisor2, which_children, [Sup]) of
+ [{_, Child, _, _}] -> {ok, Child};
+ [] -> {error, no_child};
+ {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup}
+ end.
+
+lookup(Node, QName) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ Q.
+
+await_new_pid(Node, QName, OldPid) ->
+ case queue_pid(Node, QName) of
+ OldPid -> timer:sleep(10),
+ await_new_pid(Node, QName, OldPid);
+ New -> New
+ end.
+
+assert_message_count(Count, Ch, QName) ->
+ #'queue.declare_ok'{message_count = Count} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ passive = true}).
+
+assert_follower_count(Count, Node, QName) ->
+ Q = lookup(Node, QName),
+ [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]),
+ RealCount = case Pids of
+ '' -> 0;
+ _ -> length(Pids)
+ end,
+ case RealCount of
+ Count ->
+ ok;
+ _ when RealCount < Count ->
+ timer:sleep(10),
+ assert_follower_count(Count, Node, QName);
+ _ ->
+ exit({too_many_replicas, Count, RealCount})
+ end.
diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl
new file mode 100644
index 0000000000..4ee917aa21
--- /dev/null
+++ b/deps/rabbit/test/dead_lettering_SUITE.erl
@@ -0,0 +1,1174 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%% For the full spec see: https://www.rabbitmq.com/dlx.html
+%%
+-module(dead_lettering_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, dead_letter_tests}
+ ].
+
+groups() ->
+ DeadLetterTests = [dead_letter_nack,
+ dead_letter_multiple_nack,
+ dead_letter_nack_requeue,
+ dead_letter_nack_requeue_multiple,
+ dead_letter_reject,
+ dead_letter_reject_requeue,
+ dead_letter_max_length_drop_head,
+ dead_letter_missing_exchange,
+ dead_letter_routing_key,
+ dead_letter_routing_key_header_CC,
+ dead_letter_routing_key_header_BCC,
+ dead_letter_routing_key_cycle_max_length,
+ dead_letter_routing_key_cycle_with_reject,
+ dead_letter_policy,
+ dead_letter_override_policy,
+ dead_letter_ignore_policy,
+ dead_letter_headers,
+ dead_letter_headers_reason_maxlen,
+ dead_letter_headers_cycle,
+ dead_letter_headers_BCC,
+ dead_letter_headers_CC,
+ dead_letter_headers_CC_with_routing_key,
+ dead_letter_headers_first_death],
+ Opts = [],
+ [
+ {dead_letter_tests, [],
+ [
+ {classic_queue, Opts, DeadLetterTests ++ [dead_letter_ttl,
+ dead_letter_max_length_reject_publish_dlx,
+ dead_letter_routing_key_cycle_ttl,
+ dead_letter_headers_reason_expired,
+ dead_letter_headers_reason_expired_per_message]},
+ {mirrored_queue, Opts, DeadLetterTests ++ [dead_letter_ttl,
+ dead_letter_max_length_reject_publish_dlx,
+ dead_letter_routing_key_cycle_ttl,
+ dead_letter_headers_reason_expired,
+ dead_letter_headers_reason_expired_per_message]},
+ {quorum_queue, Opts, DeadLetterTests}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Policy = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_policy", [Group, Testcase])),
+ DLXExchange = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_dlx_exchange",
+ [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{dlx_exchange, DLXExchange},
+ {queue_name, Q},
+ {queue_name_dlx, Q2},
+ {policy, Policy}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_dlx, Config)}),
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = ?config(dlx_exchange, Config)}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, 0, ?config(policy, Config)),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Dead letter exchanges
+%%
+%% Messages are dead-lettered when:
+%% 1) message is rejected with basic.reject or basic.nack with requeue=false
+%% 2) message ttl expires (not implemented in quorum queues)
+%% 3) queue length limit is exceeded (only drop-head implemented in quorum queues)
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% 1) message is rejected with basic.nack, requeue=false and multiple=false
+dead_letter_nack(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ %% Consume them
+ [DTag1, DTag2, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Nack the last one with multiple = false
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Consume the last message from the dead letter queue
+ consume(Ch, DLXQName, [P3]),
+ consume_empty(Ch, DLXQName),
+ %% Nack the other two
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Consume the first two messages from the dead letter queue
+ consume(Ch, DLXQName, [P1, P2]),
+ consume_empty(Ch, DLXQName).
+
+%% 1) message is rejected with basic.nack, requeue=false and multiple=true
+dead_letter_multiple_nack(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ %% Consume them
+ [_, _, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Nack the last one with multiple = true
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = true,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"3">>, <<"3">>, <<"0">>]]),
+ %% Consume the 3 messages from the dead letter queue
+ consume(Ch, DLXQName, [P1, P2, P3]),
+ consume_empty(Ch, DLXQName),
+ %% Queue is empty
+ consume_empty(Ch, QName).
+
+%% 1) message is rejected with basic.nack, requeue=true and multiple=false. Dead-lettering does not take place
+dead_letter_nack_requeue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume them
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Nack the last one with multiple = false
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = false,
+ requeue = true}),
+ %% Consume the last message from the queue
+ wait_for_messages(Config, [[QName, <<"3">>, <<"1">>, <<"2">>]]),
+ consume(Ch, QName, [P3]),
+ consume_empty(Ch, QName),
+ %% Dead letter queue is empty
+ consume_empty(Ch, DLXQName).
+
+%% 1) message is rejected with basic.nack, requeue=true and multiple=true. Dead-lettering does not take place
+dead_letter_nack_requeue_multiple(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume them
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DTag3] = consume(Ch, QName, [P1, P2, P3]),
+ %% Queue is empty
+ consume_empty(Ch, QName),
+ %% Nack the last one with multiple = true
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag3,
+ multiple = true,
+ requeue = true}),
+ %% Consume the three messages from the queue
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ consume(Ch, QName, [P1, P2, P3]),
+ consume_empty(Ch, QName),
+ %% Dead letter queue is empty
+ consume_empty(Ch, DLXQName).
+
+%% 1) message is rejected with basic.reject, requeue=false
+dead_letter_reject(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the first message
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P1]),
+ %% Reject it
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag,
+ requeue = false}),
+ %% Consume it from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, DLXQName, [P1]),
+ consume_empty(Ch, DLXQName),
+ %% Consume the last two from the queue
+ _ = consume(Ch, QName, [P2, P3]),
+ consume_empty(Ch, QName).
+
+%% 1) Message is rejected with basic.reject, requeue=true. Dead-lettering does not take place.
+dead_letter_reject_requeue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the first one
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P1]),
+ %% Reject the first one
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag,
+ requeue = true}),
+ %% Consume the three messages from the queue
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ _ = consume(Ch, QName, [P1, P2, P3]),
+ consume_empty(Ch, QName),
+ %% Dead letter is empty
+ consume_empty(Ch, DLXQName).
+
+%% 2) Message ttl expires
+dead_letter_ttl(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-message-ttl">>, long, 1}]),
+
+ %% Publish message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ [_] = consume(Ch, DLXQName, [P1]).
+
+%% 3) The queue length limit is exceeded, message dropped is dead lettered.
+%% Default strategy: drop-head
+dead_letter_max_length_drop_head(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-max-length">>, long, 1}]),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the last one from the queue (max-length = 1)
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, QName, [P3]),
+ consume_empty(Ch, QName),
+ %% Consume the dropped ones from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]),
+ _ = consume(Ch, DLXQName, [P1, P2]),
+ consume_empty(Ch, DLXQName).
+
+%% Another strategy: reject-publish-dlx
+dead_letter_max_length_reject_publish_dlx(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName,
+ [{<<"x-max-length">>, long, 1},
+ {<<"x-overflow">>, longstr, <<"reject-publish-dlx">>}]),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ P3 = <<"msg3">>,
+
+ %% Publish 3 messages
+ publish(Ch, QName, [P1, P2, P3]),
+ %% Consume the first one from the queue (max-length = 1)
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, QName, [P1]),
+ consume_empty(Ch, QName),
+ %% Consume the dropped ones from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"2">>, <<"0">>]]),
+ _ = consume(Ch, DLXQName, [P2, P3]),
+ consume_empty(Ch, DLXQName).
+
+%% Dead letter exchange does not have to be declared when the queue is declared, but it should
+%% exist by the time messages need to be dead-lettered; if it is missing then, the messages will
+%% be silently dropped.
+dead_letter_missing_exchange(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ DLXExchange = <<"dlx-exchange-2">>,
+ #'exchange.delete_ok'{} = amqp_channel:call(Ch, #'exchange.delete'{exchange = DLXExchange}),
+
+ DeadLetterArgs = [{<<"x-max-length">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, DLXExchange},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQName}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish one message
+ publish(Ch, QName, [P1]),
+ %% Consume it
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P1]),
+ %% Reject it
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ %% Message is not in the dead letter queue (exchange does not exist)
+ consume_empty(Ch, DLXQName),
+
+ %% Declare the dead-letter exchange
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ %% Publish another message
+ publish(Ch, QName, [P2]),
+ %% Consume it
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag2] = consume(Ch, QName, [P2]),
+ %% Reject it
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = DTag2,
+ requeue = false}),
+ %% Consume the rejected message from the dead letter queue
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, DLXQName).
+
+%%
+%% ROUTING
+%%
+%% Dead-lettered messages are routed to their dead letter exchange either:
+%% with the routing key specified for the queue they were on; or,
+%% if this was not set, (3) with the same routing keys they were originally published with.
+%% (4) This includes routing keys added by the CC and BCC headers.
+%%
+%% 3) All previous tests used a specific key, test the original ones now.
+dead_letter_routing_key(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish, consume and nack the first message
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Both queues are empty as the message could not been routed in the dlx exchange
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ consume_empty(Ch, DLXQName),
+ %% Bind the dlx queue with the original queue routing key
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = QName}),
+ %% Publish, consume and nack the second message
+ publish(Ch, QName, [P2]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag2] = consume(Ch, QName, [P2]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ %% Message can now be routed using the recently binded key
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, QName).
+
+
+%% 4a) If a specific routing key was not set for the queue, use routing keys added by the
+%% CC and BCC headers
+dead_letter_routing_key_header_CC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ CCHeader = {<<"CC">>, array, [{longstr, DLXQName}]},
+
+ %% Publish, consume and nack two messages, one with CC header
+ publish(Ch, QName, [P1]),
+ publish(Ch, QName, [P2], [CCHeader]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [_, DTag2] = consume(Ch, QName, [P1, P2]),
+ %% P2 is also published to the DLX queue because of the binding to the default exchange
+ [_] = consume(Ch, DLXQName, [P2]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = true,
+ requeue = false}),
+ %% The second message should have been routed using the CC header
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ consume_empty(Ch, QName),
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, DLXQName).
+
+%% 4b) If a specific routing key was not set for the queue, use routing keys added by the
+%% CC and BCC headers
+dead_letter_routing_key_header_BCC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ BCCHeader = {<<"BCC">>, array, [{longstr, DLXQName}]},
+
+ %% Publish, consume and nack two messages, one with BCC header
+ publish(Ch, QName, [P1]),
+ publish(Ch, QName, [P2], [BCCHeader]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [_, DTag2] = consume(Ch, QName, [P1, P2]),
+ %% P2 is also published to the DLX queue because of the binding to the default exchange
+ [_] = consume(Ch, DLXQName, [P2]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = true,
+ requeue = false}),
+ %% The second message should have been routed using the BCC header
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ consume_empty(Ch, QName),
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, DLXQName).
+
+%% It is possible to form a cycle of message dead-lettering. For instance,
+%% this can happen when a queue dead-letters messages to the default exchange without
+%% specifying a dead-letter routing key (5). Messages in such cycles (i.e. messages that
+%% reach the same queue twice) will be dropped if there was no rejections in the entire cycle.
+%% i.e. x-message-ttl (7), x-max-length (6)
+%%
+%% 6) Message is dead lettered due to queue length limit, and then dropped by the broker as it is
+%% republished to the same queue.
+dead_letter_routing_key_cycle_max_length(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-max-length">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish messages, consume and acknowledge the second one (x-max-length = 1)
+ publish(Ch, QName, [P1, P2]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P2]),
+ consume_empty(Ch, QName),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ %% Queue is empty, P1 has not been republished in a loop
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName).
+
+%% 7) Message is dead lettered due to message ttl. Not yet implemented in quorum queues
+dead_letter_routing_key_cycle_ttl(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-message-ttl">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish messages
+ publish(Ch, QName, [P1, P2]),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName).
+
+%% 5) Messages continue to be republished as there are manual rejections
+dead_letter_routing_key_cycle_with_reject(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P = <<"msg1">>,
+
+ %% Publish message
+ publish(Ch, QName, [P]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Message its being republished
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_] = consume(Ch, QName, [P]).
+
+%%
+%% For any given queue, a DLX can be defined by clients using the queue's arguments,
+%% or in the server using policies (8). In the case where both policy and arguments specify a DLX,
+%% the one specified in arguments overrules the one specified in policy (9).
+%%
+%% 8) Use server policies
+dead_letter_policy(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use arguments
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = Args,
+ durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName,
+ durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+
+ %% Publish 2 messages
+ publish(Ch, QName, [P1, P2]),
+ %% Consume them
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [DTag1, DTag2] = consume(Ch, QName, [P1, P2]),
+ %% Nack the first one with multiple = false
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Only one message unack left in the queue
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ consume_empty(Ch, QName),
+ consume_empty(Ch, DLXQName),
+
+ %% Set a policy
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName,
+ <<"queues">>,
+ [{<<"dead-letter-exchange">>, DLXExchange},
+ {<<"dead-letter-routing-key">>, DLXQName}]),
+ timer:sleep(1000),
+ %% Nack the second message
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ %% Queue is empty
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ %% Consume the message from the dead letter queue
+ consume(Ch, DLXQName, [P2]),
+ consume_empty(Ch, DLXQName).
+
+%% 9) Argument overrides server policy
+dead_letter_override_policy(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ %% Set a policy, it creates a cycle but message will be republished with the nack.
+ %% Good enough for this test.
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName,
+ <<"queues">>,
+ [{<<"dead-letter-exchange">>, <<>>},
+ {<<"dead-letter-routing-key">>, QName}]),
+
+ %% Declare arguments override the policy and set routing queue
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ P1 = <<"msg1">>,
+
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Queue is empty
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume_empty(Ch, QName),
+ [_] = consume(Ch, DLXQName, [P1]).
+
+%% 9) Policy is set after have declared a queue with dead letter arguments. Policy will be
+%% overridden/ignored.
+dead_letter_ignore_policy(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ %% Set a policy
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?config(policy, Config), QName,
+ <<"queues">>,
+ [{<<"dead-letter-exchange">>, <<>>},
+ {<<"dead-letter-routing-key">>, QName}]),
+
+ P1 = <<"msg1">>,
+
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Message is in the dead letter queue, original queue is empty
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_] = consume(Ch, DLXQName, [P1]),
+ consume_empty(Ch, QName).
+
+%%
+%% HEADERS
+%%
+%% The dead-lettering process adds an array to the header of each dead-lettered message named
+%% x-death (10). This array contains an entry for each dead lettering event containing:
+%% queue, reason, time, exchange, routing-keys, count
+%% original-expiration (14) (if the message was dead-letterered due to per-message TTL)
+%% New entries are prepended to the beginning of the x-death array.
+%% Reason is one of the following: rejected (11), expired (12), maxlen (13)
+%%
+%% 10) and 11) Check all x-death headers, reason rejected
+dead_letter_headers(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ %% Publish and nack a message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, QName}, rabbit_misc:table_lookup(Death, <<"queue">>)),
+ ?assertEqual({longstr, <<"rejected">>}, rabbit_misc:table_lookup(Death, <<"reason">>)),
+ ?assertMatch({timestamp, _}, rabbit_misc:table_lookup(Death, <<"time">>)),
+ ?assertEqual({longstr, <<>>}, rabbit_misc:table_lookup(Death, <<"exchange">>)),
+ ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death, <<"count">>)),
+ ?assertEqual({array, [{longstr, QName}]}, rabbit_misc:table_lookup(Death, <<"routing-keys">>)).
+
+%% 12) Per-queue message ttl has expired
+dead_letter_headers_reason_expired(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-message-ttl">>, long, 1}]),
+
+ %% Publish a message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death, <<"reason">>)),
+ ?assertMatch(undefined, rabbit_misc:table_lookup(Death, <<"original-expiration">>)).
+
+%% 14) Per-message TTL has expired, original-expiration is added to x-death array
+dead_letter_headers_reason_expired_per_message(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName),
+
+ %% Publish a message
+ P1 = <<"msg1">>,
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = P1,
+ props = #'P_basic'{expiration = <<"1">>}}),
+ %% publish another message to ensure the queue performs message expirations
+ publish(Ch, QName, [<<"msg2">>]),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, <<"expired">>}, rabbit_misc:table_lookup(Death, <<"reason">>)),
+ ?assertMatch({longstr, <<"1">>}, rabbit_misc:table_lookup(Death, <<"original-expiration">>)).
+
+%% 13) Message expired with maxlen reason
+dead_letter_headers_reason_maxlen(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, [{<<"x-max-length">>, long, 1}]),
+
+ P1 = <<"msg1">>,
+ P2 = <<"msg2">>,
+ publish(Ch, QName, [P1, P2]),
+ %% Consume and check reason header
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ {array, [{table, Death}]} = rabbit_misc:table_lookup(Headers, <<"x-death">>),
+ ?assertEqual({longstr, <<"maxlen">>}, rabbit_misc:table_lookup(Death, <<"reason">>)).
+
+%% In case x-death already contains an entry with the same queue and dead lettering reason,
+%% its count field will be incremented and it will be moved to the beginning of the array
+dead_letter_headers_cycle(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+
+ P = <<"msg1">>,
+
+ %% Publish message
+ publish(Ch, QName, [P]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag] = consume(Ch, QName, [P]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {array, [{table, Death1}]} = rabbit_misc:table_lookup(Headers1, <<"x-death">>),
+ ?assertEqual({long, 1}, rabbit_misc:table_lookup(Death1, <<"count">>)),
+
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Message its being republished
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {array, [{table, Death2}]} = rabbit_misc:table_lookup(Headers2, <<"x-death">>),
+ ?assertEqual({long, 2}, rabbit_misc:table_lookup(Death2, <<"count">>)).
+
+%% Dead-lettering a message modifies its headers:
+%% the exchange name is replaced with that of the latest dead-letter exchange,
+%% the routing key may be replaced with that specified in a queue performing dead lettering,
+%% if the above happens, the CC header will also be removed (15) and
+%% the BCC header will be removed as per Sender-selected distribution (16)
+%%
+%% CC header is kept if no dead lettering routing key is provided
+dead_letter_headers_CC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key for dead lettering, the CC header is passed
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ CCHeader = {<<"CC">>, array, [{longstr, DLXQName}]},
+ publish(Ch, QName, [P1], [CCHeader]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Message is published to both queues because of CC header and DLX queue bound to both
+ %% exchanges
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ %% We check the headers to ensure no dead lettering has happened
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers1, <<"x-death">>)),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers2, <<"x-death">>)),
+
+ %% Nack the message so it now gets dead lettered
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers3}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, QName),
+ ?assertEqual({array, [{longstr, DLXQName}]}, rabbit_misc:table_lookup(Headers3, <<"CC">>)),
+ ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)).
+
+%% 15) CC header is removed when routing key is specified
+dead_letter_headers_CC_with_routing_key(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key for dead lettering, the CC header is passed
+ DeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, DLXQName},
+ {<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ CCHeader = {<<"CC">>, array, [{longstr, DLXQName}]},
+ publish(Ch, QName, [P1], [CCHeader]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Message is published to both queues because of CC header and DLX queue bound to both
+ %% exchanges
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ %% We check the headers to ensure no dead lettering has happened
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers1, <<"x-death">>)),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers2, <<"x-death">>)),
+
+ %% Nack the message so it now gets dead lettered
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers3}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, QName),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers3, <<"CC">>)),
+ ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)).
+
+%% 16) the BCC header will always be removed
+dead_letter_headers_BCC(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Do not use a specific key for dead lettering
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+ P1 = <<"msg1">>,
+ BCCHeader = {<<"BCC">>, array, [{longstr, DLXQName}]},
+ publish(Ch, QName, [P1], [BCCHeader]),
+ %% Message is published to both queues because of BCC header and DLX queue bound to both
+ %% exchanges
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{delivery_tag = DTag1}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ %% We check the headers to ensure no dead lettering has happened
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers1, <<"x-death">>)),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers2, <<"x-death">>)),
+
+ %% Nack the message so it now gets dead lettered
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[DLXQName, <<"2">>, <<"1">>, <<"1">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers3}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ consume_empty(Ch, QName),
+ ?assertEqual(undefined, rabbit_misc:table_lookup(Headers3, <<"BCC">>)),
+ ?assertMatch({array, _}, rabbit_misc:table_lookup(Headers3, <<"x-death">>)).
+
+
+%% Three top-level headers are added for the very first dead-lettering event.
+%% They are
+%% x-first-death-reason, x-first-death-queue, x-first-death-exchange
+%% They have the same values as the reason, queue, and exchange fields of the
+%% original
+%% dead lettering event. Once added, these headers are never modified.
+dead_letter_headers_first_death(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ DLXQName = ?config(queue_name_dlx, Config),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Let's create a small dead-lettering loop QName -> DLXQName -> QName
+ DeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, DLXQName},
+ {<<"x-dead-letter-exchange">>, longstr, DLXExchange}],
+ DLXDeadLetterArgs = [{<<"x-dead-letter-routing-key">>, longstr, QName},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>}],
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args, durable = Durable}),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable, arguments = DLXDeadLetterArgs}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}),
+
+
+ %% Publish and nack a message
+ P1 = <<"msg1">>,
+ publish(Ch, QName, [P1]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DTag1] = consume(Ch, QName, [P1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag1,
+ multiple = false,
+ requeue = false}),
+ %% Consume and check headers
+ wait_for_messages(Config, [[DLXQName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{delivery_tag = DTag2}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = DLXQName}),
+ ?assertEqual({longstr, <<"rejected">>},
+ rabbit_misc:table_lookup(Headers, <<"x-first-death-reason">>)),
+ ?assertEqual({longstr, QName},
+ rabbit_misc:table_lookup(Headers, <<"x-first-death-queue">>)),
+ ?assertEqual({longstr, <<>>},
+ rabbit_misc:table_lookup(Headers, <<"x-first-death-exchange">>)),
+ %% Nack the message again so it gets dead lettered to the initial queue. x-first-death
+ %% headers should not change
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ {#'basic.get_ok'{}, #amqp_msg{payload = P1,
+ props = #'P_basic'{headers = Headers2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ ?assertEqual({longstr, <<"rejected">>},
+ rabbit_misc:table_lookup(Headers2, <<"x-first-death-reason">>)),
+ ?assertEqual({longstr, QName},
+ rabbit_misc:table_lookup(Headers2, <<"x-first-death-queue">>)),
+ ?assertEqual({longstr, <<>>},
+ rabbit_misc:table_lookup(Headers2, <<"x-first-death-exchange">>)).
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+declare_dead_letter_queues(Ch, Config, QName, DLXQName) ->
+ declare_dead_letter_queues(Ch, Config, QName, DLXQName, []).
+
+declare_dead_letter_queues(Ch, Config, QName, DLXQName, ExtraArgs) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ DLXExchange = ?config(dlx_exchange, Config),
+
+ %% Declare DLX exchange
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, #'exchange.declare'{exchange = DLXExchange}),
+
+ %% Declare queue
+ DeadLetterArgs = [{<<"x-dead-letter-exchange">>, longstr, DLXExchange},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQName}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = DeadLetterArgs ++ Args ++ ExtraArgs, durable = Durable}),
+
+ %% Declare and bind DLX queue
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = DLXQName, durable = Durable}),
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, #'queue.bind'{queue = DLXQName,
+ exchange = DLXExchange,
+ routing_key = DLXQName}).
+
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+publish(Ch, QName, Payloads, Headers) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Payload,
+ props = #'P_basic'{headers = Headers}})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ DTag
+ end || Payload <- Payloads].
+
+consume_empty(Ch, QName) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+sync_mirrors(QName, Config) ->
+ case ?config(is_mirrored, Config) of
+ true ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
+ _ -> ok
+ end.
diff --git a/deps/rabbit/test/definition_import_SUITE.erl b/deps/rabbit/test/definition_import_SUITE.erl
new file mode 100644
index 0000000000..ac0c18da99
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE.erl
@@ -0,0 +1,257 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(definition_import_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, boot_time_import},
+ {group, roundtrip},
+ {group, import_on_a_running_node}
+ ].
+
+groups() ->
+ [
+ {import_on_a_running_node, [], [
+ %% Note: to make it easier to see which case failed,
+ %% these are intentionally not folded into a single case.
+ %% If generation becomes an alternative worth considering for these tests,
+ %% we'll just add a case that drives PropEr.
+ import_case1,
+ import_case2,
+ import_case3,
+ import_case4,
+ import_case5,
+ import_case6,
+ import_case7,
+ import_case8,
+ import_case9,
+ import_case10,
+ import_case11,
+ import_case12,
+ import_case13
+ ]},
+ {boot_time_import, [], [
+ import_on_a_booting_node
+ ]},
+
+ {roundtrip, [], [
+ export_import_round_trip_case1,
+ export_import_round_trip_case2
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config.
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(boot_time_import = Group, Config) ->
+ CasePath = filename:join(?config(data_dir, Config), "case5.json"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbit, [
+ {load_definitions, CasePath}
+ ]}),
+ rabbit_ct_helpers:run_setup_steps(Config2, rabbit_ct_broker_helpers:setup_steps());
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%
+%% Tests
+%%
+
+import_case1(Config) -> import_file_case(Config, "case1").
+import_case2(Config) -> import_file_case(Config, "case2").
+import_case3(Config) -> import_file_case(Config, "case3").
+import_case4(Config) -> import_file_case(Config, "case4").
+import_case6(Config) -> import_file_case(Config, "case6").
+import_case7(Config) -> import_file_case(Config, "case7").
+import_case8(Config) -> import_file_case(Config, "case8").
+
+import_case9(Config) -> import_from_directory_case(Config, "case9").
+
+import_case10(Config) -> import_from_directory_case_fails(Config, "case10").
+
+import_case5(Config) ->
+ import_file_case(Config, "case5"),
+ ?assertEqual(rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_runtime_parameters, value_global,
+ [mqtt_port_to_vhost_mapping]),
+ %% expect a proplist, see rabbitmq/rabbitmq-management#528
+ [{<<"1883">>,<<"/">>},
+ {<<"1884">>,<<"vhost2">>}]).
+
+import_case11(Config) -> import_file_case(Config, "case11").
+import_case12(Config) -> import_invalid_file_case(Config, "failing_case12").
+
+import_case13(Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ import_file_case(Config, "case13"),
+ VHost = <<"/">>,
+ QueueName = <<"definitions.import.case13.qq.1">>,
+ QueueIsImported =
+ fun () ->
+ case queue_lookup(Config, VHost, QueueName) of
+ {ok, _} -> true;
+ _ -> false
+ end
+ end,
+ rabbit_ct_helpers:await_condition(QueueIsImported, 20000),
+ {ok, Q} = queue_lookup(Config, VHost, QueueName),
+
+ %% see rabbitmq/rabbitmq-server#2400, rabbitmq/rabbitmq-server#2426
+ ?assert(amqqueue:is_quorum(Q)),
+ ?assertEqual([{<<"x-max-length">>, long, 991},
+ {<<"x-queue-type">>, longstr, <<"quorum">>}],
+ amqqueue:get_arguments(Q));
+ Skip ->
+ Skip
+ end.
+
+export_import_round_trip_case1(Config) ->
+ %% case 6 has runtime parameters that do not depend on any plugins
+ import_file_case(Config, "case6"),
+ Defs = export(Config),
+ import_raw(Config, rabbit_json:encode(Defs)).
+
+export_import_round_trip_case2(Config) ->
+ import_file_case(Config, "case9", "case9a"),
+ Defs = export(Config),
+ import_parsed(Config, Defs).
+
+import_on_a_booting_node(Config) ->
+ %% see case5.json
+ VHost = <<"vhost2">>,
+ %% verify that vhost2 eventually starts
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_vhost, await_running_on_all_nodes, [VHost, 3000]) of
+ ok -> ok;
+ {error, timeout} -> ct:fail("virtual host ~p was not imported on boot", [VHost])
+ end.
+
+%%
+%% Implementation
+%%
+
+import_file_case(Config, CaseName) ->
+ CasePath = filename:join([
+ ?config(data_dir, Config),
+ CaseName ++ ".json"
+ ]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_import_case, [CasePath]),
+ ok.
+
+import_file_case(Config, Subdirectory, CaseName) ->
+ CasePath = filename:join([
+ ?config(data_dir, Config),
+ Subdirectory,
+ CaseName ++ ".json"
+ ]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_import_case, [CasePath]),
+ ok.
+
+import_invalid_file_case(Config, CaseName) ->
+ CasePath = filename:join(?config(data_dir, Config), CaseName ++ ".json"),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_invalid_import_case, [CasePath]),
+ ok.
+
+import_from_directory_case(Config, CaseName) ->
+ import_from_directory_case_expect(Config, CaseName, ok).
+
+import_from_directory_case_fails(Config, CaseName) ->
+ import_from_directory_case_expect(Config, CaseName, error).
+
+import_from_directory_case_expect(Config, CaseName, Expected) ->
+ CasePath = filename:join(?config(data_dir, Config), CaseName),
+ ?assert(filelib:is_dir(CasePath)),
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_directory_import_case,
+ [CasePath, Expected]),
+ ok.
+
+import_raw(Config, Body) ->
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_definitions, import_raw, [Body]) of
+ ok -> ok;
+ {error, E} ->
+ ct:pal("Import of JSON definitions ~p failed: ~p~n", [Body, E]),
+ ct:fail({failure, Body, E})
+ end.
+
+import_parsed(Config, Body) ->
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_definitions, import_parsed, [Body]) of
+ ok -> ok;
+ {error, E} ->
+ ct:pal("Import of parsed definitions ~p failed: ~p~n", [Body, E]),
+ ct:fail({failure, Body, E})
+ end.
+
+export(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, run_export, []).
+
+run_export() ->
+ rabbit_definitions:all_definitions().
+
+run_directory_import_case(Path, Expected) ->
+ ct:pal("Will load definitions from files under ~p~n", [Path]),
+ Result = rabbit_definitions:maybe_load_definitions_from(true, Path),
+ case Expected of
+ ok ->
+ ok = Result;
+ error ->
+ ?assertMatch({error, {failed_to_import_definitions, _, _}}, Result)
+ end.
+
+run_import_case(Path) ->
+ {ok, Body} = file:read_file(Path),
+ ct:pal("Successfully loaded a definition to import from ~p~n", [Path]),
+ case rabbit_definitions:import_raw(Body) of
+ ok -> ok;
+ {error, E} ->
+ ct:pal("Import case ~p failed: ~p~n", [Path, E]),
+ ct:fail({failure, Path, E})
+ end.
+
+run_invalid_import_case(Path) ->
+ {ok, Body} = file:read_file(Path),
+ ct:pal("Successfully loaded a definition to import from ~p~n", [Path]),
+ case rabbit_definitions:import_raw(Body) of
+ ok ->
+ ct:pal("Expected import case ~p to fail~n", [Path]),
+ ct:fail({failure, Path});
+ {error, _E} -> ok
+ end.
+
+queue_lookup(Config, VHost, Name) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [rabbit_misc:r(VHost, queue, Name)]).
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case1.json b/deps/rabbit/test/definition_import_SUITE_data/case1.json
new file mode 100644
index 0000000000..b0785a5214
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case1.json
@@ -0,0 +1,99 @@
+{
+ "rabbit_version": "3.6.9",
+ "users": [
+ {
+ "name": "project_admin",
+ "password_hash": "A0EX\/2hiwrIDKFS+nEqwbCGcVxwEkDBFF3mBfkNW53KFFk64",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "\/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "project_admin",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "policies": [
+ {
+ "vhost": "\/",
+ "name": "nd-ns",
+ "pattern": "^project-nd-ns-",
+ "apply-to": "queues",
+ "definition": {
+ "expires": 120000,
+ "max-length": 10000
+ },
+ "priority": 1
+ },
+ {
+ "vhost": "\/",
+ "name": "nd-s",
+ "pattern": "^project-nd-s-",
+ "apply-to": "queues",
+ "definition": {
+ "expires": 1800000,
+ "max-length": 50000
+ },
+ "priority": 1
+ },
+ {
+ "vhost": "\/",
+ "name": "d-ns",
+ "pattern": "^project-d-ns-",
+ "apply-to": "queues",
+ "definition": {
+ "ha-mode": "exactly",
+ "ha-params": 3,
+ "ha-sync-mode": "automatic",
+ "expires": 604800000,
+ "ha-sync-batch-size": 100,
+ "queue-mode": "lazy"
+ },
+ "priority": 1
+ },
+ {
+ "vhost": "\/",
+ "name": "d-s",
+ "pattern": "^project-d-s-",
+ "apply-to": "queues",
+ "definition": {
+ "ha-mode": "exactly",
+ "ha-params": 3,
+ "ha-sync-mode": "automatic",
+ "expires": 604800000,
+ "queue-master-locator": "min-masters",
+ "ha-sync-batch-size": 100,
+ "queue-mode": "lazy"
+ },
+ "priority": 1
+ }
+ ],
+ "queues": [
+
+ ],
+ "exchanges": [
+ {
+ "name": "project.topic.default",
+ "vhost": "\/",
+ "type": "topic",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {
+
+ }
+ }
+ ],
+ "bindings": [
+
+ ]
+} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json b/deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json
new file mode 100644
index 0000000000..1eec5ccb9e
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case10/case10a.json
@@ -0,0 +1,67 @@
+{
+ "rabbit_version": "3.7.13",
+ "users": [
+ {
+ "name": "bunny_reader",
+ "password_hash": "ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "langohr_testbed"
+ },
+ {
+ "name": "bunny_testbed"
+ },
+ {
+ "name": "/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "bunny_reader",
+ "vhost": "bunny_testbed",
+ "configure": "^---$",
+ "write": "^---$",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [
+ {
+ "component": "vhost-limits",
+ "name": "limits",
+ "value": {
+ "max-connections": 14000
+ },
+ "vhost": "/"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "name": "bunny.basic_consume0.1364356981103202",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.1364356981103202",
+ "vhost": "bunny_testbed",
+ "durable": true,
+ "auto_delete": true,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [],
+ "bindings": []
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json b/deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json
new file mode 100644
index 0000000000..9eb48e341e
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case10/case10b.json
@@ -0,0 +1,595 @@
+{
+ "rabbit_version": "3.7.13",
+ "users": [
+ {
+ "name": "langohr",
+ "password_hash": "7p9PXlsYs92NlHSdNgPoDXmN77NqeGpzCTHpElq/wPS1eAEd",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "bunny_reader",
+ "password_hash": "ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "bunny_gem",
+ "password_hash": "8HH7uxmZS3FDldlYmHpFEE5+gWaeQaim8qpWIHkmNxuQK8xO",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "guest2",
+ "password_hash": "E04A7cvvsaDJBezc3Sc2jCnywe9oS4DX18qFe4dwkjIr26gf",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "monitoring"
+ },
+ {
+ "name": "guest",
+ "password_hash": "CPCbkNAHXgQ7vmrqwP9e7RWQsE8U2DqN7JA4ggS50c4LwDda",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ },
+ {
+ "name": "temp-user",
+ "password_hash": "CfUQkDeOYDrPkACDCjoF5zySbsXPIoMgNfv7FWfEpVFGegnL",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "management"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "langohr_testbed"
+ },
+ {
+ "name": "bunny_testbed"
+ },
+ {
+ "name": "/"
+ },
+ {
+ "name": "vhost3"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "bunny_reader",
+ "vhost": "bunny_testbed",
+ "configure": "^---$",
+ "write": "^---$",
+ "read": ".*"
+ },
+ {
+ "user": "bunny_gem",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "langohr",
+ "vhost": "langohr_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "langohr_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "vhost3",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "langohr",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "temp-user",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [
+ {
+ "value": {
+ "expires": 3600000,
+ "uri": "amqp://localhost:5673"
+ },
+ "vhost": "/",
+ "component": "federation-upstream",
+ "name": "up-hare"
+ },
+ {
+ "value": {
+ "max-connections": 2000
+ },
+ "vhost": "/",
+ "component": "vhost-limits",
+ "name": "limits"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "name": "bunny.basic_consume0.7103611911099639",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.6091120557781405",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.8661861002262826",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.3682573609392056",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.14855593896585362",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.9534242141484872",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.9434723539955824",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.12235844522013617",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.8370997977912426",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.4548488370639835",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.2289868670635532",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.00797124769641977",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "amq.gen-xddEPq9wHSNZKQbPK8pi3A",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": false,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.5195700828676673",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "bunny.basic_consume0.3071859764599716",
+ "vhost": "bunny_testbed",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "return",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "q1",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ },
+ {
+ "name": "declareArgs-deliveries-dead-letter",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.rabbitmq-basic-nack",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.test.recovery.q1",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests2.queues.client-named.durable.non-exclusive.non-auto-deleted",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ },
+ {
+ "name": "test.tx.rollback",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test-integration-declared-passive-queue",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.recover",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "amq.gen-7EZF7WjGIQFDoXexVF-e8w",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {
+ "x-message-ttl": 1500
+ }
+ },
+ {
+ "name": "test.integration.channel.error",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "confirm",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.rabbitmq-message-ttl",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {
+ "x-message-ttl": 100
+ }
+ },
+ {
+ "name": "declareWithTTL",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {
+ "x-message-ttl": 9000000
+ }
+ },
+ {
+ "name": "test.tx.commit",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "test.get-ok",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests2.queues.non-auto-deleted1",
+ "vhost": "/",
+ "durable": false,
+ "auto_delete": true,
+ "arguments": {}
+ },
+ {
+ "name": "qv3",
+ "vhost": "vhost3",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "bunny.tests.exchanges.fanout",
+ "vhost": "bunny_testbed",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "declareArgs-dead-letter",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic5",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.extensions.altexchanges.direct1",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {
+ "alternate-exchange": "langohr.extensions.altexchanges.fanout1"
+ }
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout1",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct3",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic4",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.test.recovery.fanout2",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout3",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct4",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic2",
+ "vhost": "/",
+ "type": "topic",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "test-integration-declared-passive-exchange",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "test-channel-still-exists",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic1",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout2",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct1",
+ "vhost": "/",
+ "type": "direct",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.direct2",
+ "vhost": "/",
+ "type": "direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.headers2",
+ "vhost": "/",
+ "type": "headers",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.topic3",
+ "vhost": "/",
+ "type": "topic",
+ "durable": false,
+ "auto_delete": true,
+ "internal": false,
+ "arguments": {}
+ },
+ {
+ "name": "langohr.tests.exchanges.fanout4",
+ "vhost": "/",
+ "type": "fanout",
+ "durable": false,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ],
+ "bindings": [
+ {
+ "source": "amq.fanout",
+ "vhost": "/",
+ "destination": "langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted",
+ "destination_type": "queue",
+ "routing_key": "",
+ "arguments": {}
+ },
+ {
+ "source": "declareArgs-dead-letter",
+ "vhost": "/",
+ "destination": "declareArgs-deliveries-dead-letter",
+ "destination_type": "queue",
+ "routing_key": "#",
+ "arguments": {}
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case11.json b/deps/rabbit/test/definition_import_SUITE_data/case11.json
new file mode 100644
index 0000000000..13afdf5cb5
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case11.json
@@ -0,0 +1,24 @@
+{
+ "rabbit_version": "3.8.0+rc.1.5.g9148053",
+ "rabbitmq_version": "3.8.0+rc.1.5.g9148053",
+ "queues": [
+ {
+ "name": "amq.queuebar",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "amq.foobar",
+ "vhost": "/",
+ "type": "direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case13.json b/deps/rabbit/test/definition_import_SUITE_data/case13.json
new file mode 100644
index 0000000000..726aab1e6c
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case13.json
@@ -0,0 +1,55 @@
+{
+ "bindings": [],
+ "exchanges": [],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost"
+ }
+ ],
+ "parameters": [],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "arguments": {
+ "x-max-length": 991,
+ "x-queue-type": "quorum"
+ },
+ "auto_delete": false,
+ "durable": true,
+ "name": "definitions.import.case13.qq.1",
+ "type": "quorum",
+ "vhost": "/"
+ }
+ ],
+ "rabbit_version": "3.8.6.gad0c0bd",
+ "rabbitmq_version": "3.8.6.gad0c0bd",
+ "topic_permissions": [],
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "e8lL5PHYcbv3Pd53EUoTOMnVDmsLDgVJXqSQMT+mrO4LVIdW",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "limits": [],
+ "metadata": {
+ "description": "Default virtual host",
+ "tags": []
+ },
+ "name": "/"
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case2.json b/deps/rabbit/test/definition_import_SUITE_data/case2.json
new file mode 100644
index 0000000000..0f0a014681
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case2.json
@@ -0,0 +1,49 @@
+{
+ "rabbit_version": "3.7.0-rc.1",
+ "users": [
+ {
+ "name": "guest",
+ "password_hash": "A0EX\/2hiwrIDKFS+nEqwbCGcVxwEkDBFF3mBfkNW53KFFk64",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "\/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "guest",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [
+
+ ],
+ "parameters": [
+
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@mercurio"
+ }
+ ],
+ "policies": [
+
+ ],
+ "queues": [
+
+ ],
+ "exchanges": [
+
+ ],
+ "bindings": [
+
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case3.json b/deps/rabbit/test/definition_import_SUITE_data/case3.json
new file mode 100644
index 0000000000..963039f254
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case3.json
@@ -0,0 +1 @@
+{"rabbit_version":"3.7.0-alpha.381","users":[{"name":"admin","password_hash":"Edl2rJd/zLC187M1SKibRoTb6+xGkvkqoKWEq0kdNUbNLyLJ","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"}],"vhosts":[{"name":"/"}],"permissions":[{"user":"admin","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[],"global_parameters":[{"name":"cluster_name","value":"rmq-gcp-37"}],"policies":[{"vhost":"/","name":"2-queue-replicas","pattern":".*","apply-to":"queues","definition":{"ha-mode":"exactly","ha-params":2,"ha-sync-mode":"automatic"},"priority":0}],"queues":[{"name":"wr-vary-load-lazy-persistent-1","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"lazy"}},{"name":"wr-vary-load-lazy-transient-1","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"lazy"}},{"name":"wr-vary-load-2","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"default"}},{"name":"wr-vary-load-1","vhost":"/","durable":true,"auto_delete":false,"arguments":{"x-queue-mode":"default"}},{"name":"aliveness-test","vhost":"/","durable":false,"auto_delete":false,"arguments":{}}],"exchanges":[],"bindings":[{"source":"amq.direct","vhost":"/","destination":"wr-vary-load-2","destination_type":"queue","routing_key":"wr-vary-load-2","arguments":{}},{"source":"amq.direct","vhost":"/","destination":"wr-vary-load-lazy-persistent-1","destination_type":"queue","routing_key":"wr-vary-load-lazy-persistent-1","arguments":{}},{"source":"amq.direct","vhost":"/","destination":"wr-vary-load-lazy-transient-1","destination_type":"queue","routing_key":"wr-vary-load-lazy-transient-1","arguments":{}}]} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case4.json b/deps/rabbit/test/definition_import_SUITE_data/case4.json
new file mode 100644
index 0000000000..f5223ff3a2
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case4.json
@@ -0,0 +1,49 @@
+{
+ "bindings": [],
+ "exchanges": [],
+ "parameters": [],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "policies": [
+ {
+ "apply-to": "all",
+ "definition": {
+ "queue-master-locator": "client-local"
+ },
+ "name": "abc",
+ "pattern": "^abc\\.",
+ "priority": 0,
+ "vhost": "/"
+ }
+ ],
+ "queues": [
+ {
+ "arguments": {},
+ "auto_delete": false,
+ "durable": false,
+ "name": "abc.def",
+ "vhost": "/"
+ }
+ ],
+ "rabbit_version": "0.0.0",
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "QM532K822VTbYBFbwSZEnT8jkH8TT0dPsUtja6vL0myfsrmk",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "/"
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case5.json b/deps/rabbit/test/definition_import_SUITE_data/case5.json
new file mode 100644
index 0000000000..607dfd3d1f
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case5.json
@@ -0,0 +1,63 @@
+{
+ "rabbit_version": "3.7.2",
+ "users": [
+ {
+ "name": "guest",
+ "password_hash": "PD4MQV8Ivcprh1\/yUS9x7jkpbXtWIZLTQ0tvnZPncpI6Ui0a",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "\/"
+ },
+ {
+ "name": "vhost2"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "guest",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "vhost2",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [
+
+ ],
+ "parameters": [
+
+ ],
+ "global_parameters": [
+ {
+ "name": "mqtt_port_to_vhost_mapping",
+ "value": {
+ "1883": "\/",
+ "1884": "vhost2"
+ }
+ },
+ {
+ "name": "cluster_name",
+ "value": "rabbitmq@localhost"
+ }
+ ],
+ "policies": [
+ ],
+ "queues": [
+ ],
+ "exchanges": [
+ ],
+ "bindings": [
+
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case6.json b/deps/rabbit/test/definition_import_SUITE_data/case6.json
new file mode 100644
index 0000000000..c0debb7de1
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case6.json
@@ -0,0 +1,47 @@
+{
+ "rabbit_version": "3.7.3+10.g41ec73b",
+ "users": [
+ {
+ "name": "guest",
+ "password_hash": "J+UiUxNQ3I8uPn6Lo2obWcl93VgXgbw4R+xhl3L5zHwkRFZG",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "guest",
+ "vhost": "/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [
+ {
+ "value": {
+ "max-queues": 456,
+ "max-connections": 123
+ },
+ "vhost": "/",
+ "component": "vhost-limits",
+ "name": "limits"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@localhost.localdomain"
+ }
+ ],
+ "policies": [],
+ "queues": [],
+ "exchanges": [],
+ "bindings": []
+} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case7.json b/deps/rabbit/test/definition_import_SUITE_data/case7.json
new file mode 100644
index 0000000000..7a8e0174ac
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case7.json
@@ -0,0 +1,398 @@
+
+
+{
+ "rabbit_version": "3.7.4",
+ "users": [
+ {
+ "name": "bunny_reader",
+ "password_hash": "rgJkcwpypdpIVhbLDj7CaCtFVg6Dyj3yQDcCbhyn29u49c88",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "bunny_gem",
+ "password_hash": "fHFOkIlJ8iohrhN4IQXIzIDrxsOfaekv97wA1W\/0N\/uxTWjE",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": ""
+ },
+ {
+ "name": "guest",
+ "password_hash": "ujflQBzsAaAfbNSLAy4y2iG9mMpgATaH5oXQfPLkxOhE1yzH",
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "bunny_testbed"
+ },
+ {
+ "name": "\/"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "bunny_reader",
+ "vhost": "bunny_testbed",
+ "configure": "^---$",
+ "write": "^---$",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "\/",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "bunny_gem",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ },
+ {
+ "user": "guest",
+ "vhost": "bunny_testbed",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [
+
+ ],
+ "parameters": [
+ {
+ "value": {
+ "pattern": "^apd\\\\.mce\\\\.estarchive.*$",
+ "definition": {
+ "max-length-bytes": 200000000
+ },
+ "priority": 0,
+ "apply-to": "queues"
+ },
+ "vhost": "\/",
+ "component": "operator_policy",
+ "name": "apd-mce-estarchive"
+ }
+ ],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@warp10"
+ }
+ ],
+ "policies": [
+
+ ],
+ "queues": [
+ {
+ "name": "test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "reply",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ack-test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "nack-test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "redelivered-test",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub02",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "known3",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ack-test-tx",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-As-CQ37wutHLc9H0PmjIPw",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub01",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test-receipt-tx",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub03",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test2",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "known",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "unsub04",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-j7FLeUn7ehTatYVNiBy6UA",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ir",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "nack-multi",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test-receipt",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "reliability",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "duplicate-consumer-tag-test2",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test-multi",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "test3",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-tMbeqL30tjlgaXMmaFM6Ew",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "nack-test-no-requeue",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "ack-test-individual",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "known2",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "custom-header",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ },
+ {
+ "name": "stomp-subscription-eWSXV2ty1R7VqfsnULKEkA",
+ "vhost": "\/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+
+ }
+ }
+ ],
+ "exchanges": [
+
+ ],
+ "bindings": [
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-tMbeqL30tjlgaXMmaFM6Ew",
+ "destination_type": "queue",
+ "routing_key": "durable",
+ "arguments": {
+
+ }
+ },
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-eWSXV2ty1R7VqfsnULKEkA",
+ "destination_type": "queue",
+ "routing_key": "durable-separate",
+ "arguments": {
+
+ }
+ },
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-j7FLeUn7ehTatYVNiBy6UA",
+ "destination_type": "queue",
+ "routing_key": "durable-separate",
+ "arguments": {
+
+ }
+ },
+ {
+ "source": "amq.topic",
+ "vhost": "\/",
+ "destination": "stomp-subscription-As-CQ37wutHLc9H0PmjIPw",
+ "destination_type": "queue",
+ "routing_key": "durable-shared",
+ "arguments": {
+
+ }
+ }
+ ]
+}
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case8.json b/deps/rabbit/test/definition_import_SUITE_data/case8.json
new file mode 100644
index 0000000000..1deb55b45c
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case8.json
@@ -0,0 +1,17 @@
+{
+ "rabbit_version": "3.8.0+beta.1.6.g0c7c7d9",
+ "parameters": [
+ {
+ "value": {
+ "max-connections": 6767
+ },
+ "vhost": "/",
+ "component": "vhost-limits",
+ "name": "limits"
+ }
+ ],
+ "policies": [],
+ "queues": [],
+ "exchanges": [],
+ "bindings": []
+} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json b/deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json
new file mode 100644
index 0000000000..2e7a77962d
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case9/case9a.json
@@ -0,0 +1 @@
+{"rabbit_version":"3.7.13","users":[{"name":"langohr","password_hash":"7p9PXlsYs92NlHSdNgPoDXmN77NqeGpzCTHpElq/wPS1eAEd","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_reader","password_hash":"ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_gem","password_hash":"8HH7uxmZS3FDldlYmHpFEE5+gWaeQaim8qpWIHkmNxuQK8xO","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"guest","password_hash":"CPCbkNAHXgQ7vmrqwP9e7RWQsE8U2DqN7JA4ggS50c4LwDda","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"},{"name":"temp-user","password_hash":"CfUQkDeOYDrPkACDCjoF5zySbsXPIoMgNfv7FWfEpVFGegnL","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"management"}],"vhosts":[{"name":"langohr_testbed"},{"name":"bunny_testbed"},{"name":"/"}],"permissions":[{"user":"bunny_reader","vhost":"bunny_testbed","configure":"^---$","write":"^---$","read":".*"},{"user":"bunny_gem","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"temp-user","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[],"global_parameters":[{"name":"cluster_name","value":"rabbit@localhost"}],"policies":[],"queues":[{"name":"bunny.basic_consume0.1364356981103202","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"return","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"q1","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"declareArgs-deliveries-dead-letter","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-basic-nack","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.test.recovery.q1","vhost":"/","durable":true,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.durable.non-exclusive.non-auto-deleted","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"test.tx.rollback","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test-integration-declared-passive-queue","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.recover","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"amq.gen-7EZF7WjGIQFDoXexVF-e8w","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":1500}},{"name":"test.integration.channel.error","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"confirm","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-message-ttl","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":100}},{"name":"declareWithTTL","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":9000000}},{"name":"test.tx.commit","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.get-ok","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.non-auto-deleted1","vhost":"/","durable":false,"auto_delete":true,"arguments":{}}],"exchanges":[{"name":"declareArgs-dead-letter","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic5","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.extensions.altexchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{"alternate-exchange":"langohr.extensions.altexchanges.fanout1"}},{"name":"langohr.tests.exchanges.fanout1","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct3","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic4","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout3","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct4","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic2","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"test-integration-declared-passive-exchange","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"test-channel-still-exists","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic1","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout2","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct2","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.headers2","vhost":"/","type":"headers","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic3","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.test.recovery.fanout1","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout4","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}}],"bindings":[{"source":"amq.fanout","vhost":"/","destination":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","destination_type":"queue","routing_key":"","arguments":{}},{"source":"declareArgs-dead-letter","vhost":"/","destination":"declareArgs-deliveries-dead-letter","destination_type":"queue","routing_key":"#","arguments":{}}]} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json b/deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json
new file mode 100644
index 0000000000..7cadd58b17
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/case9/case9b.json
@@ -0,0 +1 @@
+{"rabbit_version":"3.7.13","users":[{"name":"langohr","password_hash":"7p9PXlsYs92NlHSdNgPoDXmN77NqeGpzCTHpElq/wPS1eAEd","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_reader","password_hash":"ExmGdjBTmQEPxcW2z+dsOuPvjFbTBiYQgMByzfpE/IIXplYG","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"bunny_gem","password_hash":"8HH7uxmZS3FDldlYmHpFEE5+gWaeQaim8qpWIHkmNxuQK8xO","hashing_algorithm":"rabbit_password_hashing_sha256","tags":""},{"name":"guest2","password_hash":"E04A7cvvsaDJBezc3Sc2jCnywe9oS4DX18qFe4dwkjIr26gf","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"monitoring"},{"name":"guest","password_hash":"CPCbkNAHXgQ7vmrqwP9e7RWQsE8U2DqN7JA4ggS50c4LwDda","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"},{"name":"temp-user","password_hash":"CfUQkDeOYDrPkACDCjoF5zySbsXPIoMgNfv7FWfEpVFGegnL","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"management"}],"vhosts":[{"name":"langohr_testbed"},{"name":"bunny_testbed"},{"name":"/"},{"name":"vhost3"}],"permissions":[{"user":"bunny_reader","vhost":"bunny_testbed","configure":"^---$","write":"^---$","read":".*"},{"user":"bunny_gem","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"bunny_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"langohr_testbed","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"vhost3","configure":".*","write":".*","read":".*"},{"user":"langohr","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"temp-user","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[{"value":{"max-connections":2000},"vhost":"/","component":"vhost-limits","name":"limits"}],"global_parameters":[{"name":"cluster_name","value":"rabbit@localhost"}],"policies":[],"queues":[{"name":"bunny.basic_consume0.7103611911099639","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.6091120557781405","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.8661861002262826","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.3682573609392056","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.14855593896585362","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.9534242141484872","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.9434723539955824","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.12235844522013617","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.8370997977912426","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.4548488370639835","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.2289868670635532","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.00797124769641977","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"amq.gen-xddEPq9wHSNZKQbPK8pi3A","vhost":"bunny_testbed","durable":false,"auto_delete":false,"arguments":{}},{"name":"bunny.basic_consume0.5195700828676673","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"bunny.basic_consume0.3071859764599716","vhost":"bunny_testbed","durable":false,"auto_delete":true,"arguments":{}},{"name":"return","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"q1","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"declareArgs-deliveries-dead-letter","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-basic-nack","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.test.recovery.q1","vhost":"/","durable":true,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.durable.non-exclusive.non-auto-deleted","vhost":"/","durable":true,"auto_delete":false,"arguments":{}},{"name":"test.tx.rollback","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test-integration-declared-passive-queue","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.recover","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"amq.gen-7EZF7WjGIQFDoXexVF-e8w","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":1500}},{"name":"test.integration.channel.error","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"confirm","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.rabbitmq-message-ttl","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":100}},{"name":"declareWithTTL","vhost":"/","durable":false,"auto_delete":true,"arguments":{"x-message-ttl":9000000}},{"name":"test.tx.commit","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"test.get-ok","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"langohr.tests2.queues.non-auto-deleted1","vhost":"/","durable":false,"auto_delete":true,"arguments":{}},{"name":"qv3","vhost":"vhost3","durable":true,"auto_delete":false,"arguments":{}}],"exchanges":[{"name":"bunny.tests.exchanges.fanout","vhost":"bunny_testbed","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"declareArgs-dead-letter","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic5","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.extensions.altexchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{"alternate-exchange":"langohr.extensions.altexchanges.fanout1"}},{"name":"langohr.tests.exchanges.fanout1","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct3","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic4","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.test.recovery.fanout2","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout3","vhost":"/","type":"fanout","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct4","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic2","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"test-integration-declared-passive-exchange","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"test-channel-still-exists","vhost":"/","type":"direct","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic1","vhost":"/","type":"topic","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout2","vhost":"/","type":"fanout","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct1","vhost":"/","type":"direct","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.direct2","vhost":"/","type":"direct","durable":true,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.headers2","vhost":"/","type":"headers","durable":false,"auto_delete":false,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.topic3","vhost":"/","type":"topic","durable":false,"auto_delete":true,"internal":false,"arguments":{}},{"name":"langohr.tests.exchanges.fanout4","vhost":"/","type":"fanout","durable":false,"auto_delete":false,"internal":false,"arguments":{}}],"bindings":[{"source":"amq.fanout","vhost":"/","destination":"langohr.tests2.queues.client-named.non-durable.non-exclusive.auto-deleted","destination_type":"queue","routing_key":"","arguments":{}},{"source":"declareArgs-dead-letter","vhost":"/","destination":"declareArgs-deliveries-dead-letter","destination_type":"queue","routing_key":"#","arguments":{}}]} \ No newline at end of file
diff --git a/deps/rabbit/test/definition_import_SUITE_data/failing_case12.json b/deps/rabbit/test/definition_import_SUITE_data/failing_case12.json
new file mode 100644
index 0000000000..6ce0366a70
--- /dev/null
+++ b/deps/rabbit/test/definition_import_SUITE_data/failing_case12.json
@@ -0,0 +1,24 @@
+{
+ "rabbit_version": "3.8.0+rc.1.5.g9148053",
+ "rabbitmq_version": "3.8.0+rc.1.5.g9148053",
+ "queues": [
+ {
+ "name": "amq.queuebar",
+ "vhost": "/",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {}
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "invalid_type",
+ "vhost": "/",
+ "type": "definitly not direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ]
+}
diff --git a/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl
new file mode 100644
index 0000000000..820e13efa0
--- /dev/null
+++ b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl
@@ -0,0 +1,111 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(disconnect_detected_during_alarm_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, disconnect_detected_during_alarm}
+ ].
+
+groups() ->
+ [
+ %% Test previously executed with the multi-node target.
+ {disconnect_detected_during_alarm, [], [
+ disconnect_detected_during_alarm %% Trigger alarm.
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+end_per_group1(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Testcase
+%% ---------------------------------------------------------------------------
+
+disconnect_detected_during_alarm(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% Set a low memory high watermark.
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+ ["set_vm_memory_high_watermark", "0.000000001"]),
+
+ %% Open a connection and a channel.
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, A, tcp_port_amqp),
+ Heartbeat = 1,
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_network{port = Port,
+ heartbeat = Heartbeat}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ amqp_connection:register_blocked_handler(Conn, self()),
+ Publish = #'basic.publish'{routing_key = <<"nowhere-to-go">>},
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}),
+ receive
+ % Check that connection was indeed blocked
+ #'connection.blocked'{} -> ok
+ after
+ 1000 -> exit(connection_was_not_blocked)
+ end,
+
+ %% Connection is blocked, now we should forcefully kill it
+ {'EXIT', _} = (catch amqp_connection:close(Conn, 10)),
+
+ ListConnections =
+ fun() ->
+ rpc:call(A, rabbit_networking, connection_info_all, [])
+ end,
+
+ %% We've already disconnected, but blocked connection still should still linger on.
+ [SingleConn] = ListConnections(),
+ blocked = rabbit_misc:pget(state, SingleConn),
+
+ %% It should definitely go away after 2 heartbeat intervals.
+ timer:sleep(round(2.5 * 1000 * Heartbeat)),
+ [] = ListConnections(),
+
+ passed.
diff --git a/deps/rabbit/test/dummy_event_receiver.erl b/deps/rabbit/test/dummy_event_receiver.erl
new file mode 100644
index 0000000000..3d417b601b
--- /dev/null
+++ b/deps/rabbit/test/dummy_event_receiver.erl
@@ -0,0 +1,49 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dummy_event_receiver).
+
+-export([start/3, stop/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("rabbit.hrl").
+
+start(Pid, Nodes, Types) ->
+ Oks = [ok || _ <- Nodes],
+ {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
+ [rabbit_event, ?MODULE, [Pid, Types]]).
+
+stop() ->
+ gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Types]) ->
+ {ok, {Pid, Types}}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
+ case lists:member(Type, Types) of
+ true -> Pid ! Event;
+ false -> ok
+ end,
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbit/test/dummy_interceptor.erl b/deps/rabbit/test/dummy_interceptor.erl
new file mode 100644
index 0000000000..6d510a3073
--- /dev/null
+++ b/deps/rabbit/test/dummy_interceptor.erl
@@ -0,0 +1,26 @@
+-module(dummy_interceptor).
+
+-behaviour(rabbit_channel_interceptor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+
+-compile(export_all).
+
+init(_Ch) ->
+ undefined.
+
+description() ->
+ [{description,
+ <<"Empties payload on publish">>}].
+
+intercept(#'basic.publish'{} = Method, Content, _IState) ->
+ Content2 = Content#content{payload_fragments_rev = []},
+ {Method, Content2};
+
+intercept(Method, Content, _VHost) ->
+ {Method, Content}.
+
+applies_to() ->
+ ['basic.publish'].
diff --git a/deps/rabbit/test/dummy_runtime_parameters.erl b/deps/rabbit/test/dummy_runtime_parameters.erl
new file mode 100644
index 0000000000..01d0b74f95
--- /dev/null
+++ b/deps/rabbit/test/dummy_runtime_parameters.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dummy_runtime_parameters).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([validate/5, notify/5, notify_clear/4]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+ rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+ case lists:member(administrator, User#user.tags) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+validate(_, <<"test">>, _, _, _) -> {error, "meh", []}.
+
+notify(_, _, _, _, _) -> ok.
+notify_clear(_, _, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+ rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+ rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
+
+unregister_policy_validator() ->
+ rabbit_registry:unregister(policy_validator, <<"testeven">>),
+ rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+ case length(Terms) rem 2 =:= 0 of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+ case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy(_) ->
+ {error, "meh", []}.
diff --git a/deps/rabbit/test/dummy_supervisor2.erl b/deps/rabbit/test/dummy_supervisor2.erl
new file mode 100644
index 0000000000..354b3a0854
--- /dev/null
+++ b/deps/rabbit/test/dummy_supervisor2.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dummy_supervisor2).
+
+-behaviour(supervisor2).
+
+-export([
+ start_link/0,
+ init/1
+ ]).
+
+start_link() ->
+ Pid = spawn_link(fun () ->
+ process_flag(trap_exit, true),
+ receive stop -> ok end
+ end),
+ {ok, Pid}.
+
+init([Timeout]) ->
+ {ok, {{one_for_one, 0, 1},
+ [{test_sup, {supervisor2, start_link,
+ [{local, ?MODULE}, ?MODULE, []]},
+ transient, Timeout, supervisor, [?MODULE]}]}};
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{test_worker, {?MODULE, start_link, []},
+ temporary, 1000, worker, [?MODULE]}]}}.
diff --git a/deps/rabbit/test/dynamic_ha_SUITE.erl b/deps/rabbit/test/dynamic_ha_SUITE.erl
new file mode 100644
index 0000000000..85969135b6
--- /dev/null
+++ b/deps/rabbit/test/dynamic_ha_SUITE.erl
@@ -0,0 +1,1034 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dynamic_ha_SUITE).
+
+%% rabbit_tests:test_dynamic_mirroring() is a unit test which should
+%% test the logic of what all the policies decide to do, so we don't
+%% need to exhaustively test that here. What we need to test is that:
+%%
+%% * Going from non-mirrored to mirrored works and vice versa
+%% * Changing policy can add / remove mirrors and change the master
+%% * Adding a node will create a new mirror when there are not enough nodes
+%% for the policy
+%% * Removing a node will not create a new mirror even if the policy
+%% logic wants it (since this gives us a good way to lose messages
+%% on cluster shutdown, by repeated failover to new nodes)
+%%
+%% The first two are change_policy, the last two are change_cluster
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.test">>).
+-define(POLICY, <<"^ha.test$">>). %% " emacs
+-define(VHOST, <<"/">>).
+
+all() ->
+ [
+ {group, unclustered},
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {unclustered, [], [
+ {cluster_size_5, [], [
+ change_cluster
+ ]}
+ ]},
+ {clustered, [], [
+ {cluster_size_2, [], [
+ vhost_deletion,
+ force_delete_if_no_master,
+ promote_on_shutdown,
+ promote_on_failure,
+ follower_recovers_after_vhost_failure,
+ follower_recovers_after_vhost_down_and_up,
+ master_migrates_on_vhost_down,
+ follower_recovers_after_vhost_down_and_master_migrated,
+ queue_survive_adding_dead_vhost_mirror,
+ dynamic_mirroring
+ ]},
+ {cluster_size_3, [], [
+ change_policy,
+ rapid_change,
+ nodes_policy_should_pick_master_from_its_params,
+ promote_follower_after_standalone_restart,
+ queue_survive_adding_dead_vhost_mirror,
+ rebalance_all,
+ rebalance_exactly,
+ rebalance_nodes,
+ rebalance_multiple_blocked
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_5, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+dynamic_mirroring(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, dynamic_mirroring1, [Config]).
+
+dynamic_mirroring1(_Config) ->
+ %% Just unit tests of the node selection logic, see multi node
+ %% tests for the rest...
+ Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
+ {MNode, SNodes, SSNodes}, All) ->
+ {ok, M} = rabbit_mirror_queue_misc:module(Policy),
+ {NewM, NewSs0} = M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All),
+ NewSs1 = lists:sort(NewSs0),
+ case dm_list_match(NewSs, NewSs1, ExtraSs) of
+ ok -> ok;
+ error -> exit({no_match, NewSs, NewSs1, ExtraSs})
+ end
+ end,
+
+ Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]),
+
+ N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
+
+ %% Add a node
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
+ Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
+ %% Add two nodes and drop one
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
+ %% Don't try to include nodes that are not running
+ Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
+ %% If we can't find any of the nodes listed then just keep the master
+ Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
+ %% And once that's happened, still keep the master even when not listed,
+ %% if nothing is synced
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]),
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]),
+ %% But if something is synced we can lose the master - but make
+ %% sure we pick the new master from the nodes which are synced!
+ Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]),
+ Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]),
+
+ Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]),
+ Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
+ Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
+
+ passed.
+
+%% Does the first list match the second where the second is required
+%% to have exactly Extra superfluous items?
+dm_list_match([], [], 0) -> ok;
+dm_list_match(_, [], _Extra) -> error;
+dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
+dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
+
+change_policy(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ %% When we first declare a queue with no policy, it's not HA.
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ timer:sleep(200),
+ assert_followers(A, ?QNAME, {A, ''}),
+
+ %% Give it policy "all", it becomes HA and gets all mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>),
+ assert_followers(A, ?QNAME, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ %% Give it policy "nodes", it gets specific mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+ rabbit_misc:atom_to_binary(B)]}),
+ assert_followers(A, ?QNAME, {A, [B]}, [{A, [B, C]}]),
+
+ %% Now explicitly change the mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+ rabbit_misc:atom_to_binary(C)]}),
+ assert_followers(A, ?QNAME, {A, [C]}, [{A, [B, C]}]),
+
+ %% Clear the policy, and we go back to non-mirrored
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY),
+ assert_followers(A, ?QNAME, {A, ''}),
+
+ %% Test switching "away" from an unmirrored node
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(B),
+ rabbit_misc:atom_to_binary(C)]}),
+ assert_followers(A, ?QNAME, {B, [C]}, [{A, []}, {A, [B]}, {A, [C]}, {A, [B, C]}]),
+
+ ok.
+
+change_cluster(Config) ->
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:cluster_nodes(Config, [A, B, C]),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ assert_followers(A, ?QNAME, {A, ''}),
+
+ %% Give it policy exactly 4, it should mirror to all 3 nodes
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, {<<"exactly">>, 4}),
+ assert_followers(A, ?QNAME, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ %% Add D and E, D or E joins in
+ rabbit_ct_broker_helpers:cluster_nodes(Config, [A, D, E]),
+ assert_followers(A, ?QNAME, [{A, [B, C, D]}, {A, [B, C, E]}], [{A, [B, C]}]),
+
+ %% Remove one, the other joins in
+ rabbit_ct_broker_helpers:stop_node(Config, D),
+ assert_followers(A, ?QNAME, [{A, [B, C, D]}, {A, [B, C, E]}], [{A, [B, C]}]),
+
+ ok.
+
+rapid_change(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ {_Pid, MRef} = spawn_monitor(
+ fun() ->
+ [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)]
+ end),
+ rapid_loop(Config, A, MRef),
+ ok.
+
+rapid_amqp_ops(Ch, I) ->
+ Payload = list_to_binary(integer_to_list(I)),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = ?QNAME},
+ #amqp_msg{payload = Payload}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = ?QNAME,
+ no_ack = true}, self()),
+ receive #'basic.consume_ok'{} -> ok
+ end,
+ receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ ok
+ end,
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}).
+
+rapid_loop(Config, Node, MRef) ->
+ receive
+ {'DOWN', MRef, process, _Pid, normal} ->
+ ok;
+ {'DOWN', MRef, process, _Pid, Reason} ->
+ exit({amqp_ops_died, Reason})
+ after 0 ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY,
+ <<"all">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY),
+ rapid_loop(Config, Node, MRef)
+ end.
+
+queue_survive_adding_dead_vhost_mirror(Config) ->
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, 1, <<"/">>),
+ NodeA = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, NodeA),
+ QName = <<"queue_survive_adding_dead_vhost_mirror-q-1">>,
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName}),
+ Q = find_queue(QName, NodeA),
+ Pid = proplists:get_value(pid, Q),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ %% Queue should not fail
+ Q1 = find_queue(QName, NodeA),
+ Pid = proplists:get_value(pid, Q1).
+
+%% Vhost deletion needs to successfully tear down policies and queues
+%% with policies. At least smoke-test that it doesn't blow up.
+vhost_deletion(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ amqp_channel:call(ACh, #'queue.declare'{queue = <<"vhost_deletion-q">>}),
+ ok = rpc:call(A, rabbit_vhost, delete, [<<"/">>, <<"acting-user">>]),
+ ok.
+
+force_delete_if_no_master(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.nopromote.test1">>, <<"ha.nopromote.test2">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test1">>,
+ durable = true})),
+
+ BCh1 = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh1, #'queue.declare'{queue = <<"ha.nopromote.test2">>,
+ durable = true})),
+ BCh2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.delete_ok'{} =
+ amqp_channel:call(BCh2, #'queue.delete'{queue = <<"ha.nopromote.test1">>}),
+ %% Delete with if_empty will fail, since we don't know if the queue is empty
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(BCh2, #'queue.delete'{queue = <<"ha.nopromote.test2">>,
+ if_empty = true})),
+ BCh3 = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.delete_ok'{} =
+ amqp_channel:call(BCh3, #'queue.delete'{queue = <<"ha.nopromote.test2">>}),
+ ok.
+
+promote_on_failure(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>,
+ <<"all">>, [{<<"ha-promote-on-failure">>, <<"always">>}]),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>, [{<<"ha-promote-on-failure">>, <<"when-synced">>}]),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.promote.test">>,
+ durable = true}),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true}),
+ ok.
+
+promote_on_shutdown(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>,
+ <<"all">>, [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromoteonfailure">>,
+ <<"all">>, [{<<"ha-promote-on-failure">>, <<"when-synced">>},
+ {<<"ha-promote-on-shutdown">>, <<"always">>}]),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.promote.test">>,
+ <<"ha.nopromote.test">>,
+ <<"ha.nopromoteonfailure.test">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ BCh1 = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.promote.test">>,
+ durable = true}),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh1, #'queue.declare'{queue = <<"ha.nopromoteonfailure.test">>,
+ durable = true})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true}),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromoteonfailure.test">>,
+ durable = true}),
+ ok.
+
+nodes_policy_should_pick_master_from_its_params(Config) ->
+ [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A], [all])),
+ %% --> Master: A
+ %% Slaves: [B, C] or [C, B]
+ SSPids = ?awaitMatch(SSPids when is_list(SSPids),
+ proplists:get_value(synchronised_slave_pids,
+ find_queue(?QNAME, A)),
+ 10000),
+
+ %% Choose mirror that isn't the first sync mirror. Cover a bug that always
+ %% chose the first, even if it was not part of the policy
+ LastSlave = node(lists:last(SSPids)),
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A],
+ [{nodes, [LastSlave]}])),
+ %% --> Master: B or C (depends on the order of current mirrors )
+ %% Slaves: []
+
+ %% Now choose a new master that isn't synchronised. The previous
+ %% policy made sure that the queue only runs on one node (the last
+ %% from the initial synchronised list). Thus, by taking the first
+ %% node from this list, we know it is not synchronised.
+ %%
+ %% Because the policy doesn't cover any synchronised mirror, RabbitMQ
+ %% should instead use an existing synchronised mirror as the new master,
+ %% even though that isn't in the policy.
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A],
+ [{nodes, [LastSlave, A]}])),
+ %% --> Master: B or C (same as previous policy)
+ %% Slaves: [A]
+
+ NewMaster = node(erlang:hd(SSPids)),
+ ?assertEqual(true, apply_policy_to_declared_queue(Config, Ch, [A],
+ [{nodes, [NewMaster]}])),
+ %% --> Master: B or C (the other one compared to previous policy)
+ %% Slaves: []
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY).
+
+follower_recovers_after_vhost_failure(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"follower_recovers_after_vhost_failure-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(500),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+
+ %% Crash vhost on a node hosting a mirror
+ {ok, Sup} = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]),
+ exit(Sup, foo),
+
+ assert_followers(A, QName, {A, [B]}, [{A, []}]).
+
+follower_recovers_after_vhost_down_and_up(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"follower_recovers_after_vhost_down_and_up-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(200),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+
+ %% Crash vhost on a node hosting a mirror
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, B, <<"/">>),
+ %% rabbit_ct_broker_helpers:force_vhost_failure/2 will retry up to 10 times to
+ %% make sure that the top vhost supervision tree process did go down. MK.
+ timer:sleep(500),
+ %% Vhost is back up
+ case rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, start_vhost, [<<"/">>]) of
+ {ok, _Sup} -> ok;
+ {error,{already_started, _Sup}} -> ok
+ end,
+
+ assert_followers(A, QName, {A, [B]}, [{A, []}]).
+
+master_migrates_on_vhost_down(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"master_migrates_on_vhost_down-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(500),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+
+ %% Crash vhost on the node hosting queue master
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, A, <<"/">>),
+ timer:sleep(500),
+ assert_followers(A, QName, {B, []}).
+
+follower_recovers_after_vhost_down_and_master_migrated(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = <<"follower_recovers_after_vhost_down_and_master_migrated-q">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName}),
+ timer:sleep(500),
+ assert_followers(A, QName, {A, [B]}, [{A, []}]),
+ %% Crash vhost on the node hosting queue master
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, A, <<"/">>),
+ timer:sleep(500),
+ assert_followers(B, QName, {B, []}),
+
+ %% Restart the vhost on the node (previously) hosting queue master
+ case rabbit_ct_broker_helpers:rpc(Config, A, rabbit_vhost_sup_sup, start_vhost, [<<"/">>]) of
+ {ok, _Sup} -> ok;
+ {error,{already_started, _Sup}} -> ok
+ end,
+ timer:sleep(500),
+ assert_followers(B, QName, {B, [A]}, [{B, []}]).
+
+random_policy(Config) ->
+ run_proper(fun prop_random_policy/1, [Config]).
+
+failing_random_policies(Config) ->
+ [A, B | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ %% Those set of policies were found as failing by PropEr in the
+ %% `random_policy` test above. We add them explicitly here to make
+ %% sure they get tested.
+ ?assertEqual(true, test_random_policy(Config, Nodes,
+ [{nodes, [A, B]}, {nodes, [A]}])),
+ ?assertEqual(true, test_random_policy(Config, Nodes,
+ [{exactly, 3}, undefined, all, {nodes, [B]}])),
+ ?assertEqual(true, test_random_policy(Config, Nodes,
+ [all, undefined, {exactly, 2}, all, {exactly, 3}, {exactly, 3},
+ undefined, {exactly, 3}, all])).
+
+promote_follower_after_standalone_restart(Config) ->
+ %% Tests that mirrors can be brought up standalone after forgetting the rest
+ %% of the cluster. Slave ordering should be irrelevant.
+ %% https://github.com/rabbitmq/rabbitmq-server/issues/1213
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 15),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ rabbit_ct_helpers:await_condition(fun() ->
+ 15 =:= proplists:get_value(messages, find_queue(?QNAME, A))
+ end, 60000),
+
+ rabbit_ct_broker_helpers:stop_node(Config, C),
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ %% Restart one mirror
+ forget_cluster_node(Config, B, C),
+ forget_cluster_node(Config, B, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ rabbit_ct_helpers:await_condition(fun() ->
+ 15 =:= proplists:get_value(messages, find_queue(?QNAME, B))
+ end, 60000),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+
+ %% Restart the other
+ forget_cluster_node(Config, C, B),
+ forget_cluster_node(Config, C, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+ 15 = proplists:get_value(messages, find_queue(?QNAME, C)),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+
+ ok.
+
+rebalance_all(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"q.*">>, <<"all">>),
+ timer:sleep(1000),
+
+ rabbit_ct_client_helpers:publish(ACh, Q1, 5),
+ rabbit_ct_client_helpers:publish(ACh, Q2, 3),
+ assert_followers(A, Q1, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q2, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q3, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q4, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q5, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]),
+
+ %% Check that we have at most 2 queues per node
+ Condition1 = fun() ->
+ lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 2;
+ (_) -> true end,
+ NodeData)
+ end, Summary)
+ end,
+ rabbit_ct_helpers:await_condition(Condition1, 60000),
+
+ %% Check that Q1 and Q2 haven't moved
+ assert_followers(A, Q1, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+ assert_followers(A, Q2, {A, [B, C]}, [{A, []}, {A, [B]}, {A, [C]}]),
+
+ ok.
+
+rebalance_exactly(Config) ->
+ [A, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"q.*">>, {<<"exactly">>, 2}),
+ timer:sleep(1000),
+
+ %% Rebalancing happens with existing mirrors. Thus, before we
+ %% can verify it works as expected, we need the queues to be on
+ %% different mirrors.
+ %%
+ %% We only test Q3, Q4 and Q5 because the first two are expected to
+ %% stay where they are.
+ ensure_queues_are_mirrored_on_different_mirrors([Q3, Q4, Q5], A, ACh),
+
+ rabbit_ct_client_helpers:publish(ACh, Q1, 5),
+ rabbit_ct_client_helpers:publish(ACh, Q2, 3),
+
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))),
+
+ {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]),
+
+ %% Check that we have at most 2 queues per node
+ Condition1 = fun() ->
+ lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 2;
+ (_) -> true end,
+ NodeData)
+ end, Summary)
+ end,
+ rabbit_ct_helpers:await_condition(Condition1, 60000),
+
+ %% Check that Q1 and Q2 haven't moved
+ Condition2 = fun () ->
+ A =:= node(proplists:get_value(pid, find_queue(Q1, A))) andalso
+ A =:= node(proplists:get_value(pid, find_queue(Q2, A)))
+ end,
+ rabbit_ct_helpers:await_condition(Condition2, 40000),
+
+ ok.
+
+ensure_queues_are_mirrored_on_different_mirrors(Queues, Master, Ch) ->
+ SNodes = [node(SPid)
+ || Q <- Queues,
+ SPid <- proplists:get_value(slave_pids, find_queue(Q, Master))],
+ UniqueSNodes = lists:usort(SNodes),
+ case UniqueSNodes of
+ [_] ->
+ %% All passed queues are on the same mirror. Let's redeclare
+ %% one of them and test again.
+ Q = hd(Queues),
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q}),
+ ensure_queues_are_mirrored_on_different_mirrors(Queues, Master, Ch);
+ _ ->
+ ok
+ end.
+
+rebalance_nodes(Config) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"q.*">>,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A), rabbit_misc:atom_to_binary(B)]}),
+ timer:sleep(1000),
+
+ rabbit_ct_client_helpers:publish(ACh, Q1, 5),
+ rabbit_ct_client_helpers:publish(ACh, Q2, 3),
+
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))),
+
+ {ok, Summary} = rpc:call(A, rabbit_amqqueue, rebalance, [classic, ".*", ".*"]),
+
+ %% Check that we have at most 3 queues per node
+ ?assert(lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 3;
+ (_) -> true end,
+ NodeData)
+ end, Summary)),
+ %% Check that Q1 and Q2 haven't moved
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+
+ ok.
+
+rebalance_multiple_blocked(Config) ->
+ [A, _, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q1}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q2}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q3}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q4}),
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q5}),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q1, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q2, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q3, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q4, A)))),
+ ?assertEqual(A, node(proplists:get_value(pid, find_queue(Q5, A)))),
+ ?assert(rabbit_ct_broker_helpers:rpc(
+ Config, A,
+ ?MODULE, rebalance_multiple_blocked1, [Config])).
+
+rebalance_multiple_blocked1(_) ->
+ Parent = self(),
+ Fun = fun() ->
+ Parent ! rabbit_amqqueue:rebalance(classic, ".*", ".*")
+ end,
+ spawn(Fun),
+ spawn(Fun),
+ Rets = [receive Ret1 -> Ret1 end,
+ receive Ret2 -> Ret2 end],
+ lists:member({error, rebalance_in_progress}, Rets).
+
+%%----------------------------------------------------------------------------
+
+assert_followers(RPCNode, QName, Exp) ->
+ assert_followers(RPCNode, QName, Exp, []).
+
+assert_followers(RPCNode, QName, Exp, PermittedIntermediate) ->
+ assert_followers0(RPCNode, QName, Exp,
+ [{get(previous_exp_m_node), get(previous_exp_s_nodes)} |
+ PermittedIntermediate], 1000).
+
+assert_followers0(_RPCNode, _QName, [], _PermittedIntermediate, _Attempts) ->
+ error(invalid_expectation);
+assert_followers0(RPCNode, QName, [{ExpMNode, ExpSNodes}|T], PermittedIntermediate, Attempts) ->
+ case assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, nofail) of
+ ok ->
+ ok;
+ failed ->
+ assert_followers0(RPCNode, QName, T, PermittedIntermediate, Attempts - 1)
+ end;
+assert_followers0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts) ->
+ assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, fail).
+
+assert_followers1(_RPCNode, _QName, _Exp, _PermittedIntermediate, 0, fail) ->
+ error(give_up_waiting_for_followers);
+assert_followers1(_RPCNode, _QName, _Exp, _PermittedIntermediate, 0, nofail) ->
+ failed;
+assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate, Attempts, FastFail) ->
+ Q = find_queue(QName, RPCNode),
+ Pid = proplists:get_value(pid, Q),
+ SPids = proplists:get_value(slave_pids, Q),
+ ActMNode = node(Pid),
+ ActSNodes = case SPids of
+ '' -> '';
+ _ -> [node(SPid) || SPid <- SPids]
+ end,
+ case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of
+ false ->
+ %% It's an async change, so if nothing has changed let's
+ %% just wait - of course this means if something does not
+ %% change when expected then we time out the test which is
+ %% a bit tedious
+ case [{PermMNode, PermSNodes} || {PermMNode, PermSNodes} <- PermittedIntermediate,
+ PermMNode =:= ActMNode,
+ equal_list(PermSNodes, ActSNodes)] of
+ [] ->
+ case FastFail of
+ fail ->
+ ct:fail("Expected ~p / ~p, got ~p / ~p~nat ~p~n",
+ [ExpMNode, ExpSNodes, ActMNode, ActSNodes,
+ get_stacktrace()]);
+ nofail ->
+ failed
+ end;
+ State ->
+ ct:pal("Waiting to leave state ~p~n Waiting for ~p~n",
+ [State, {ExpMNode, ExpSNodes}]),
+ timer:sleep(200),
+ assert_followers1(RPCNode, QName, {ExpMNode, ExpSNodes},
+ PermittedIntermediate,
+ Attempts - 1, FastFail)
+ end;
+ true ->
+ put(previous_exp_m_node, ExpMNode),
+ put(previous_exp_s_nodes, ExpSNodes),
+ ok
+ end.
+
+equal_list('', '') -> true;
+equal_list('', _Act) -> false;
+equal_list(_Exp, '') -> false;
+equal_list([], []) -> true;
+equal_list(_Exp, []) -> false;
+equal_list([], _Act) -> false;
+equal_list([H|T], Act) -> case lists:member(H, Act) of
+ true -> equal_list(T, Act -- [H]);
+ false -> false
+ end.
+
+find_queue(QName, RPCNode) ->
+ find_queue(QName, RPCNode, 1000).
+
+find_queue(QName, RPCNode, 0) -> error({did_not_find_queue, QName, RPCNode});
+find_queue(QName, RPCNode, Attempts) ->
+ Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity),
+ case find_queue0(QName, Qs) of
+ did_not_find_queue -> timer:sleep(100),
+ find_queue(QName, RPCNode, Attempts - 1);
+ Q -> Q
+ end.
+
+find_queue0(QName, Qs) ->
+ case [Q || Q <- Qs, proplists:get_value(name, Q) =:=
+ rabbit_misc:r(?VHOST, queue, QName)] of
+ [R] -> R;
+ [] -> did_not_find_queue
+ end.
+
+get_stacktrace() ->
+ try
+ throw(e)
+ catch
+ _:e:Stacktrace ->
+ Stacktrace
+ end.
+
+%%----------------------------------------------------------------------------
+run_proper(Fun, Args) ->
+ ?assertEqual(true,
+ proper:counterexample(erlang:apply(Fun, Args),
+ [{numtests, 25},
+ {on_output, fun(F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) end}])).
+
+prop_random_policy(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ ?FORALL(
+ Policies, non_empty(list(policy_gen(Nodes))),
+ test_random_policy(Config, Nodes, Policies)).
+
+apply_policy_to_declared_queue(Config, Ch, Nodes, Policies) ->
+ [NodeA | _] = Nodes,
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ %% Add some load so mirrors can be busy synchronising
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 100000),
+ %% Apply policies in parallel on all nodes
+ apply_in_parallel(Config, Nodes, Policies),
+ %% Give it some time to generate all internal notifications
+ timer:sleep(2000),
+ %% Check the result
+ wait_for_last_policy(?QNAME, NodeA, Policies, 30).
+
+test_random_policy(Config, Nodes, Policies) ->
+ [NodeA | _] = Nodes,
+ Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA),
+ Result = apply_policy_to_declared_queue(Config, Ch, Nodes, Policies),
+ %% Cleanup
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY),
+ Result.
+
+apply_in_parallel(Config, Nodes, Policies) ->
+ Self = self(),
+ [spawn_link(fun() ->
+ [begin
+
+ apply_policy(Config, N, Policy)
+ end || Policy <- Policies],
+ Self ! parallel_task_done
+ end) || N <- Nodes],
+ [receive
+ parallel_task_done ->
+ ok
+ end || _ <- Nodes].
+
+%% Proper generators
+policy_gen(Nodes) ->
+ %% Stop mirroring needs to be called often to trigger rabbitmq-server#803
+ frequency([{3, undefined},
+ {1, all},
+ {1, {nodes, nodes_gen(Nodes)}},
+ {1, {exactly, choose(1, 3)}}
+ ]).
+
+nodes_gen(Nodes) ->
+ ?LET(List, non_empty(list(oneof(Nodes))),
+ sets:to_list(sets:from_list(List))).
+
+%% Checks
+wait_for_last_policy(QueueName, NodeA, TestedPolicies, Tries) ->
+ %% Ensure the owner/master is able to process a call request,
+ %% which means that all pending casts have been processed.
+ %% Use the information returned by owner/master to verify the
+ %% test result
+ Info = find_queue(QueueName, NodeA),
+ Pid = proplists:get_value(pid, Info),
+ Node = node(Pid),
+ %% Gets owner/master
+ case rpc:call(Node, gen_server, call, [Pid, info], 5000) of
+ {badrpc, _} ->
+ %% The queue is probably being migrated to another node.
+ %% Let's wait a bit longer.
+ timer:sleep(1000),
+ wait_for_last_policy(QueueName, NodeA, TestedPolicies, Tries - 1);
+ Result ->
+ FinalInfo = case Result of
+ {ok, I} -> I;
+ _ when is_list(Result) ->
+ Result
+ end,
+ %% The last policy is the final state
+ LastPolicy = lists:last(TestedPolicies),
+ case verify_policy(LastPolicy, FinalInfo) of
+ true ->
+ true;
+ false when Tries =:= 1 ->
+ Policies = rpc:call(Node, rabbit_policy, list, [], 5000),
+ ct:pal(
+ "Last policy not applied:~n"
+ " Queue node: ~s (~p)~n"
+ " Queue info: ~p~n"
+ " Configured policies: ~p~n"
+ " Tested policies: ~p",
+ [Node, Pid, FinalInfo, Policies, TestedPolicies]),
+ false;
+ false ->
+ timer:sleep(1000),
+ wait_for_last_policy(QueueName, NodeA, TestedPolicies,
+ Tries - 1)
+ end
+ end.
+
+verify_policy(undefined, Info) ->
+ %% If the queue is not mirrored, it returns ''
+ '' == proplists:get_value(slave_pids, Info);
+verify_policy(all, Info) ->
+ 2 == length(proplists:get_value(slave_pids, Info));
+verify_policy({exactly, 1}, Info) ->
+ %% If the queue is mirrored, it returns a list
+ [] == proplists:get_value(slave_pids, Info);
+verify_policy({exactly, N}, Info) ->
+ (N - 1) == length(proplists:get_value(slave_pids, Info));
+verify_policy({nodes, Nodes}, Info) ->
+ Master = node(proplists:get_value(pid, Info)),
+ Slaves = [node(P) || P <- proplists:get_value(slave_pids, Info)],
+ lists:sort(Nodes) == lists:sort([Master | Slaves]).
+
+%% Policies
+apply_policy(Config, N, undefined) ->
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY);
+apply_policy(Config, N, all) ->
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]);
+apply_policy(Config, N, {nodes, Nodes}) ->
+ NNodes = [rabbit_misc:atom_to_binary(Node) || Node <- Nodes],
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, {<<"nodes">>, NNodes},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]);
+apply_policy(Config, N, {exactly, Exactly}) ->
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, {<<"exactly">>, Exactly},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]).
+
+forget_cluster_node(Config, Node, NodeToRemove) ->
+ rabbit_ct_broker_helpers:rabbitmqctl(
+ Config, Node, ["forget_cluster_node", "--offline", NodeToRemove]).
diff --git a/deps/rabbit/test/dynamic_qq_SUITE.erl b/deps/rabbit/test/dynamic_qq_SUITE.erl
new file mode 100644
index 0000000000..9a8f2110d6
--- /dev/null
+++ b/deps/rabbit/test/dynamic_qq_SUITE.erl
@@ -0,0 +1,248 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(dynamic_qq_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(quorum_queue_utils, [wait_for_messages_ready/3,
+ ra_name/1]).
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {clustered, [], [
+ {cluster_size_3, [], [
+ recover_follower_after_standalone_restart,
+ vhost_deletion,
+ force_delete_if_no_consensus,
+ takeover_on_failure,
+ takeover_on_shutdown,
+ quorum_unaffected_after_vhost_failure
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}},
+ {queue_name, Q},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+%% Vhost deletion needs to successfully tear down queues.
+vhost_deletion(Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ ok = rpc:call(Node, rabbit_vhost, delete, [<<"/">>, <<"acting-user">>]),
+ ?assertMatch([],
+ rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name"])),
+ ok.
+
+force_delete_if_no_consensus(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ rabbit_ct_client_helpers:publish(ACh, QName, 10),
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertMatch(
+ #'queue.declare_ok'{},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true,
+ passive = true})),
+ BCh2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(BCh2, #'queue.delete'{queue = QName})),
+ ok.
+
+takeover_on_failure(Config) ->
+ takeover_on(Config, kill_node).
+
+takeover_on_shutdown(Config) ->
+ takeover_on(Config, stop_node).
+
+takeover_on(Config, Fun) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ rabbit_ct_client_helpers:publish(ACh, QName, 10),
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+
+ ok = rabbit_ct_broker_helpers:Fun(Config, C),
+ ok = rabbit_ct_broker_helpers:Fun(Config, A),
+
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true}),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true}),
+ ok.
+
+quorum_unaffected_after_vhost_failure(Config) ->
+ [A, B, _] = Servers0 = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Servers = lists:sort(Servers0),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(ACh, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+ timer:sleep(300),
+
+ Info0 = rpc:call(A, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info0, []))),
+
+ %% Crash vhost on both nodes
+ {ok, SupA} = rabbit_ct_broker_helpers:rpc(Config, A, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]),
+ exit(SupA, foo),
+ {ok, SupB} = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_vhost_sup_sup, get_vhost_sup, [<<"/">>]),
+ exit(SupB, foo),
+
+ Info = rpc:call(A, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info, []))).
+
+recover_follower_after_standalone_restart(Config) ->
+ case os:getenv("SECONDARY_UMBRELLA") of
+ false ->
+ %% Tests that followers can be brought up standalone after forgetting the
+ %% rest of the cluster. Consensus won't be reached as there is only one node in the
+ %% new cluster.
+ Servers = [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ QName = ?config(queue_name, Config),
+ Args = ?config(queue_args, Config),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = true
+ }),
+
+ rabbit_ct_client_helpers:publish(Ch, QName, 15),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ Name = ra_name(QName),
+ wait_for_messages_ready(Servers, Name, 15),
+
+ rabbit_ct_broker_helpers:stop_node(Config, C),
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ %% Restart one follower
+ forget_cluster_node(Config, B, C),
+ forget_cluster_node(Config, B, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ wait_for_messages_ready([B], Name, 15),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+
+ %% Restart the other
+ forget_cluster_node(Config, C, B),
+ forget_cluster_node(Config, C, A),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+ wait_for_messages_ready([C], Name, 15),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+ ok;
+ _ ->
+ {skip, "cannot be run in mixed mode"}
+ end.
+
+%%----------------------------------------------------------------------------
+forget_cluster_node(Config, Node, NodeToRemove) ->
+ rabbit_ct_broker_helpers:rabbitmqctl(
+ Config, Node, ["forget_cluster_node", "--offline", NodeToRemove]).
diff --git a/deps/rabbit/test/eager_sync_SUITE.erl b/deps/rabbit/test/eager_sync_SUITE.erl
new file mode 100644
index 0000000000..a9e2ea2107
--- /dev/null
+++ b/deps/rabbit/test/eager_sync_SUITE.erl
@@ -0,0 +1,271 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(eager_sync_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.two.test">>).
+-define(QNAME_AUTO, <<"ha.auto.test">>).
+-define(MESSAGE_COUNT, 200000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ eager_sync,
+ eager_sync_cancel,
+ eager_sync_auto,
+ eager_sync_auto_on_policy_change,
+ eager_sync_requeue
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 3,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+eager_sync(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Don't sync, lose messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+ %% Sync, keep messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ ok = sync(C, ?QNAME),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% Check the no-need-to-sync path
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ ok = sync(C, ?QNAME),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% keep unacknowledged messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2),
+ restart(Config, A),
+ rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3),
+ sync(C, ?QNAME),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_cancel(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+
+ set_app_sync_batch_size(A),
+ set_app_sync_batch_size(B),
+ set_app_sync_batch_size(C),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ eager_sync_cancel_test2(Config, A, B, C, Ch, 100).
+
+eager_sync_cancel_test2(_, _, _, _, _, 0) ->
+ error(no_more_attempts_left);
+eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts) ->
+ %% Sync then cancel
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ set_app_sync_batch_size(A),
+ spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end),
+ case wait_for_syncing(C, ?QNAME, 1) of
+ ok ->
+ case sync_cancel(C, ?QNAME) of
+ ok ->
+ wait_for_running(C, ?QNAME),
+ restart(Config, B),
+ set_app_sync_batch_size(B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ ok;
+ {ok, not_syncing} ->
+ %% Damn. Syncing finished between wait_for_syncing/3 and
+ %% sync_cancel/2 above. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1)
+ end;
+ synced_already ->
+ %% Damn. Syncing finished before wait_for_syncing/3. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(Config, A, B, C, Ch, Attempts - 1)
+ end.
+
+eager_sync_auto(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO,
+ durable = true}),
+
+ %% Sync automatically, don't lose messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+ restart(Config, A),
+ wait_for_sync(C, ?QNAME_AUTO),
+ restart(Config, B),
+ wait_for_sync(C, ?QNAME_AUTO),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_auto_on_policy_change(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Sync automatically once the policy is changed to tell us to.
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ Params = [rabbit_misc:atom_to_binary(N) || N <- [A, B]],
+ rabbit_ct_broker_helpers:set_ha_policy(Config,
+ A, <<"^ha.two.">>, {<<"nodes">>, Params},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(C, ?QNAME),
+
+ ok.
+
+eager_sync_requeue(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2),
+ {#'basic.get_ok'{delivery_tag = TagA}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ {#'basic.get_ok'{delivery_tag = TagB}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}),
+ restart(Config, B),
+ ok = sync(C, ?QNAME),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2),
+
+ ok.
+
+restart(Config, Node) ->
+ rabbit_ct_broker_helpers:restart_broker(Config, Node).
+
+sync(Node, QName) ->
+ case sync_nowait(Node, QName) of
+ ok -> wait_for_sync(Node, QName),
+ ok;
+ R -> R
+ end.
+
+sync_nowait(Node, QName) -> action(Node, sync_queue, QName).
+sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName).
+
+wait_for_sync(Node, QName) ->
+ sync_detection_SUITE:wait_for_sync_status(true, Node, QName).
+
+action(Node, Action, QName) ->
+ rabbit_control_helper:command_with_output(
+ Action, Node, [binary_to_list(QName)], [{"-p", "/"}]).
+
+queue(Node, QName) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, QName),
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+ Q.
+
+wait_for_syncing(Node, QName, Target) ->
+ case state(Node, QName) of
+ {{syncing, _}, _} -> ok;
+ {running, Target} -> synced_already;
+ _ -> timer:sleep(100),
+ wait_for_syncing(Node, QName, Target)
+ end.
+
+wait_for_running(Node, QName) ->
+ case state(Node, QName) of
+ {running, _} -> ok;
+ _ -> timer:sleep(100),
+ wait_for_running(Node, QName)
+ end.
+
+state(Node, QName) ->
+ [{state, State}, {synchronised_slave_pids, Pids}] =
+ rpc:call(Node, rabbit_amqqueue, info,
+ [queue(Node, QName), [state, synchronised_slave_pids]]),
+ {State, length(Pids)}.
+
+%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT
+%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will
+%% always finish before the test is able to cancel the sync.
+set_app_sync_batch_size(Node) ->
+ rabbit_control_helper:command(
+ eval, Node,
+ ["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]).
diff --git a/deps/rabbit/test/failing_dummy_interceptor.erl b/deps/rabbit/test/failing_dummy_interceptor.erl
new file mode 100644
index 0000000000..62669e7f1f
--- /dev/null
+++ b/deps/rabbit/test/failing_dummy_interceptor.erl
@@ -0,0 +1,27 @@
+-module(failing_dummy_interceptor).
+
+-behaviour(rabbit_channel_interceptor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+
+-compile(export_all).
+
+init(_Ch) ->
+ timer:sleep(15500),
+ undefined.
+
+description() ->
+ [{description,
+ <<"Empties payload on publish">>}].
+
+intercept(#'basic.publish'{} = Method, Content, _IState) ->
+ Content2 = Content#content{payload_fragments_rev = []},
+ {Method, Content2};
+
+intercept(Method, Content, _VHost) ->
+ {Method, Content}.
+
+applies_to() ->
+ ['basic.publish'].
diff --git a/deps/rabbit/test/feature_flags_SUITE.erl b/deps/rabbit/test/feature_flags_SUITE.erl
new file mode 100644
index 0000000000..29dfcf068b
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE.erl
@@ -0,0 +1,1156 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(feature_flags_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ registry_general_usage/1,
+ registry_concurrent_reloads/1,
+ enable_feature_flag_in_a_healthy_situation/1,
+ enable_unsupported_feature_flag_in_a_healthy_situation/1,
+ enable_feature_flag_when_ff_file_is_unwritable/1,
+ enable_feature_flag_with_a_network_partition/1,
+ mark_feature_flag_as_enabled_with_a_network_partition/1,
+
+ clustering_ok_with_ff_disabled_everywhere/1,
+ clustering_ok_with_ff_enabled_on_some_nodes/1,
+ clustering_ok_with_ff_enabled_everywhere/1,
+ clustering_ok_with_new_ff_disabled/1,
+ clustering_denied_with_new_ff_enabled/1,
+ clustering_ok_with_new_ff_disabled_from_plugin_on_some_nodes/1,
+ clustering_ok_with_new_ff_enabled_from_plugin_on_some_nodes/1,
+ activating_plugin_with_new_ff_disabled/1,
+ activating_plugin_with_new_ff_enabled/1
+ ]).
+
+suite() ->
+ [{timetrap, {minutes, 15}}].
+
+all() ->
+ [
+ {group, registry},
+ {group, enabling_on_single_node},
+ {group, enabling_in_cluster},
+ {group, clustering},
+ {group, activating_plugin}
+ ].
+
+groups() ->
+ [
+ {registry, [],
+ [
+ registry_general_usage,
+ registry_concurrent_reloads
+ ]},
+ {enabling_on_single_node, [],
+ [
+ enable_feature_flag_in_a_healthy_situation,
+ enable_unsupported_feature_flag_in_a_healthy_situation,
+ enable_feature_flag_when_ff_file_is_unwritable
+ ]},
+ {enabling_in_cluster, [],
+ [
+ enable_feature_flag_in_a_healthy_situation,
+ enable_unsupported_feature_flag_in_a_healthy_situation,
+ enable_feature_flag_when_ff_file_is_unwritable,
+ enable_feature_flag_with_a_network_partition,
+ mark_feature_flag_as_enabled_with_a_network_partition
+ ]},
+ {clustering, [],
+ [
+ clustering_ok_with_ff_disabled_everywhere,
+ clustering_ok_with_ff_enabled_on_some_nodes,
+ clustering_ok_with_ff_enabled_everywhere,
+ clustering_ok_with_new_ff_disabled,
+ clustering_denied_with_new_ff_enabled,
+ clustering_ok_with_new_ff_disabled_from_plugin_on_some_nodes,
+ clustering_ok_with_new_ff_enabled_from_plugin_on_some_nodes
+ ]},
+ {activating_plugin, [],
+ [
+ activating_plugin_with_new_ff_disabled,
+ activating_plugin_with_new_ff_enabled
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [
+ fun rabbit_ct_broker_helpers:configure_dist_proxy/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(enabling_on_single_node, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 1}]);
+init_per_group(enabling_in_cluster, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 5}]);
+init_per_group(clustering, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 2},
+ {rmq_nodes_clustered, false},
+ {start_rmq_with_plugins_disabled, true}]),
+ rabbit_ct_helpers:run_setup_steps(Config1, [
+ fun build_my_plugin/1,
+ fun work_around_cli_and_rabbit_circular_dep/1
+ ]);
+init_per_group(activating_plugin, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodes_count, 2},
+ {rmq_nodes_clustered, true},
+ {start_rmq_with_plugins_disabled, true}]),
+ rabbit_ct_helpers:run_setup_steps(Config1, [
+ fun build_my_plugin/1,
+ fun work_around_cli_and_rabbit_circular_dep/1
+ ]);
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ case ?config(tc_group_properties, Config) of
+ [{name, registry} | _] ->
+ application:set_env(lager, colored, true),
+ application:set_env(
+ lager,
+ handlers, [{lager_console_backend, [{level, debug}]}]),
+ application:set_env(
+ lager,
+ extra_sinks,
+ [{rabbit_log_lager_event,
+ [{handlers, [{lager_console_backend, [{level, debug}]}]}]
+ },
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers, [{lager_console_backend, [{level, debug}]}]}]
+ }]),
+ lager:start(),
+ FeatureFlagsFile = filename:join(?config(priv_dir, Config),
+ rabbit_misc:format(
+ "feature_flags-~s",
+ [Testcase])),
+ application:set_env(rabbit, feature_flags_file, FeatureFlagsFile),
+ rabbit_ct_helpers:set_config(
+ Config, {feature_flags_file, FeatureFlagsFile});
+ [{name, Name} | _]
+ when Name =:= enabling_on_single_node orelse
+ Name =:= clustering orelse
+ Name =:= activating_plugin ->
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes,
+ TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit,
+ [{forced_feature_flags_on_init, []},
+ {log, [{file, [{level, debug}]}]}]}),
+ Config3 = rabbit_ct_helpers:run_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Config3 of
+ {skip, _} ->
+ Config3;
+ _ ->
+ case is_feature_flag_subsystem_available(Config3) of
+ true ->
+ %% We can declare a new feature flag at
+ %% runtime. All of them are supported but
+ %% still disabled.
+ declare_arbitrary_feature_flag(Config3),
+ Config3;
+ false ->
+ end_per_testcase(Testcase, Config3),
+ {skip, "Feature flags subsystem unavailable"}
+ end
+ end;
+ [{name, enabling_in_cluster} | _] ->
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes,
+ TestNumber * ClusterSize}},
+ {net_ticktime, 5}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit,
+ [{forced_feature_flags_on_init, []},
+ {log, [{file, [{level, debug}]}]}]}),
+ Config3 = rabbit_ct_helpers:run_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Config3 of
+ {skip, _} ->
+ Config3;
+ _ ->
+ case is_feature_flag_subsystem_available(Config3) of
+ true ->
+ %% We can declare a new feature flag at
+ %% runtime. All of them are supported but
+ %% still disabled.
+ declare_arbitrary_feature_flag(Config3),
+ Config3;
+ false ->
+ end_per_testcase(Testcase, Config3),
+ {skip, "Feature flags subsystem unavailable"}
+ end
+ end
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = case ?config(tc_group_properties, Config) of
+ [{name, registry} | _] ->
+ Config;
+ _ ->
+ rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps())
+ end,
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+-define(list_ff(Which),
+ lists:sort(maps:keys(rabbit_ff_registry:list(Which)))).
+
+registry_general_usage(_Config) ->
+ %% At first, the registry must be uninitialized.
+ ?assertNot(rabbit_ff_registry:is_registry_initialized()),
+
+ FeatureFlags = #{ff_a =>
+ #{desc => "Feature flag A",
+ stability => stable},
+ ff_b =>
+ #{desc => "Feature flag B",
+ stability => stable}},
+ rabbit_feature_flags:inject_test_feature_flags(
+ feature_flags_to_app_attrs(FeatureFlags)),
+
+ %% After initialization, it must know about the feature flags
+ %% declared in this testsuite. They must be disabled however.
+ rabbit_feature_flags:initialize_registry(),
+ ?assert(rabbit_ff_registry:is_registry_initialized()),
+ ?assertMatch([ff_a, ff_b], ?list_ff(all)),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertEqual(erlang:map_size(rabbit_ff_registry:states()), 0),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% We can declare a new feature flag at runtime. All of them are
+ %% supported but still disabled.
+ NewFeatureFlags = #{ff_c =>
+ #{desc => "Feature flag C",
+ provided_by => ?MODULE,
+ stability => stable}},
+ rabbit_feature_flags:initialize_registry(NewFeatureFlags),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertEqual(erlang:map_size(rabbit_ff_registry:states()), 0),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b, ff_c], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% After enabling `ff_a`, it is actually the case. Others are
+ %% supported but remain disabled.
+ rabbit_feature_flags:initialize_registry(#{},
+ #{ff_a => true},
+ true),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertMatch(#{ff_a := true}, rabbit_ff_registry:states()),
+ ?assertMatch([ff_a], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_b, ff_c], ?list_ff(disabled)),
+ ?assert(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% This time, we mark the state of `ff_c` as `state_changing`. We
+ %% expect all other feature flag states to remain unchanged.
+ rabbit_feature_flags:initialize_registry(#{},
+ #{ff_a => false,
+ ff_c => state_changing},
+ true),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertMatch(#{ff_c := state_changing}, rabbit_ff_registry:states()),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([ff_c], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertMatch(state_changing, rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)),
+
+ %% Finally, we disable `ff_c`. All of them are supported but
+ %% disabled.
+ rabbit_feature_flags:initialize_registry(#{},
+ #{ff_b => false,
+ ff_c => false},
+ true),
+ ?assertMatch([ff_a, ff_b, ff_c],
+ lists:sort(maps:keys(rabbit_ff_registry:list(all)))),
+
+ ?assert(rabbit_ff_registry:is_supported(ff_a)),
+ ?assert(rabbit_ff_registry:is_supported(ff_b)),
+ ?assert(rabbit_ff_registry:is_supported(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_supported(ff_d)),
+
+ ?assertEqual(erlang:map_size(rabbit_ff_registry:states()), 0),
+ ?assertMatch([], ?list_ff(enabled)),
+ ?assertMatch([], ?list_ff(state_changing)),
+ ?assertMatch([ff_a, ff_b, ff_c], ?list_ff(disabled)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_a)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_b)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_c)),
+ ?assertNot(rabbit_ff_registry:is_enabled(ff_d)).
+
+registry_concurrent_reloads(_Config) ->
+ case rabbit_ff_registry:is_registry_initialized() of
+ true -> ok;
+ false -> rabbit_feature_flags:initialize_registry()
+ end,
+ ?assert(rabbit_ff_registry:is_registry_initialized()),
+
+ Parent = self(),
+
+ MakeName = fun(I) ->
+ list_to_atom(rabbit_misc:format("ff_~2..0b", [I]))
+ end,
+
+ ProcIs = lists:seq(1, 10),
+ Fun = fun(I) ->
+ %% Each process will declare its own feature flag to
+ %% make sure that each generated registry module is
+ %% different, and we don't loose previously declared
+ %% feature flags.
+ Name = MakeName(I),
+ Desc = rabbit_misc:format("Feature flag ~b", [I]),
+ NewFF = #{Name =>
+ #{desc => Desc,
+ stability => stable}},
+ rabbit_feature_flags:initialize_registry(NewFF),
+ unlink(Parent)
+ end,
+
+ %% Prepare feature flags which the spammer process should get at
+ %% some point.
+ FeatureFlags = #{ff_a =>
+ #{desc => "Feature flag A",
+ stability => stable},
+ ff_b =>
+ #{desc => "Feature flag B",
+ stability => stable}},
+ rabbit_feature_flags:inject_test_feature_flags(
+ feature_flags_to_app_attrs(FeatureFlags)),
+
+ %% Spawn a process which heavily uses the registry.
+ FinalFFList = lists:sort(
+ maps:keys(FeatureFlags) ++
+ [MakeName(I) || I <- ProcIs]),
+ Spammer = spawn_link(fun() -> registry_spammer([], FinalFFList) end),
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Started registry spammer (~p)",
+ [self()]),
+
+ %% We acquire the lock from the main process to synchronize the test
+ %% processes we are about to spawn.
+ Lock = rabbit_feature_flags:registry_loading_lock(),
+ ThisNode = [node()],
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Acquiring registry load lock"),
+ global:set_lock(Lock, ThisNode),
+
+ Pids = [begin
+ Pid = spawn_link(fun() -> Fun(I) end),
+ _ = erlang:monitor(process, Pid),
+ Pid
+ end
+ || I <- ProcIs],
+
+ %% We wait for one second to make sure all processes were started
+ %% and already sleep on the lock. Not really "make sure" because
+ %% we don't have a way to verify this fact, but it must be enough,
+ %% right?
+ timer:sleep(1000),
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Releasing registry load lock"),
+ global:del_lock(Lock, ThisNode),
+
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Wait for test processes to finish"),
+ lists:foreach(
+ fun(Pid) ->
+ receive {'DOWN', _, process, Pid, normal} -> ok end
+ end,
+ Pids),
+
+ %% We wait for one more second to make sure the spammer sees
+ %% all added feature flags.
+ timer:sleep(1000),
+
+ unlink(Spammer),
+ exit(Spammer, normal).
+
+registry_spammer(CurrentFeatureNames, FinalFeatureNames) ->
+ %% Infinite loop.
+ case ?list_ff(all) of
+ CurrentFeatureNames ->
+ registry_spammer(CurrentFeatureNames, FinalFeatureNames);
+ FinalFeatureNames ->
+ rabbit_log_feature_flags:info(
+ ?MODULE_STRING ": Registry spammer: all feature flags "
+ "appeared"),
+ registry_spammer1(FinalFeatureNames);
+ NewFeatureNames
+ when length(NewFeatureNames) > length(CurrentFeatureNames) ->
+ registry_spammer(NewFeatureNames, FinalFeatureNames)
+ end.
+
+registry_spammer1(FeatureNames) ->
+ ?assertEqual(FeatureNames, ?list_ff(all)),
+ registry_spammer1(FeatureNames).
+
+enable_feature_flag_in_a_healthy_situation(Config) ->
+ FeatureName = ff_from_testsuite,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Node = ClusterSize - 1,
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Re-enabling the feature flag also works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)).
+
+enable_unsupported_feature_flag_in_a_healthy_situation(Config) ->
+ FeatureName = unsupported_feature_flag,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Node = ClusterSize - 1,
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is unsupported and thus disabled.
+ ?assertEqual(
+ False,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ {error, unsupported},
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)).
+
+enable_feature_flag_when_ff_file_is_unwritable(Config) ->
+ QQSupported = rabbit_ct_broker_helpers:is_feature_flag_supported(
+ Config, quorum_queue),
+ case QQSupported of
+ true -> do_enable_feature_flag_when_ff_file_is_unwritable(Config);
+ false -> {skip, "Quorum queues are unsupported"}
+ end.
+
+do_enable_feature_flag_when_ff_file_is_unwritable(Config) ->
+ FeatureName = quorum_queue,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Node = ClusterSize - 1,
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+ Files = feature_flags_files(Config),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Restrict permissions on the `feature_flags` files.
+ [?assertEqual(ok, file:change_mode(File, 8#0444)) || File <- Files],
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, Node, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% The `feature_flags` file were not updated.
+ ?assertEqual(
+ lists:duplicate(ClusterSize, {ok, [[]]}),
+ [file:consult(File) || File <- feature_flags_files(Config)]),
+
+ %% Stop all nodes and restore permissions on the `feature_flags` files.
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [?assertEqual(ok, rabbit_ct_broker_helpers:stop_node(Config, N))
+ || N <- Nodes],
+ [?assertEqual(ok, file:change_mode(File, 8#0644)) || File <- Files],
+
+ %% Restart all nodes and assert the feature flag is still enabled and
+ %% the `feature_flags` files were correctly repaired.
+ [?assertEqual(ok, rabbit_ct_broker_helpers:start_node(Config, N))
+ || N <- lists:reverse(Nodes)],
+
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)),
+ ?assertEqual(
+ lists:duplicate(ClusterSize, {ok, [[FeatureName]]}),
+ [file:consult(File) || File <- feature_flags_files(Config)]).
+
+enable_feature_flag_with_a_network_partition(Config) ->
+ FeatureName = ff_from_testsuite,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Isolate nodes B and E from the rest of the cluster.
+ NodePairs = [{B, A},
+ {B, C},
+ {B, D},
+ {E, A},
+ {E, C},
+ {E, D}],
+ block(NodePairs),
+ timer:sleep(1000),
+
+ %% Enabling the feature flag should fail in the specific case of
+ %% `ff_from_testsuite`, if the network is broken.
+ ?assertEqual(
+ {error, unsupported},
+ enable_feature_flag_on(Config, B, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Repair the network and try again to enable the feature flag.
+ unblock(NodePairs),
+ timer:sleep(10000),
+ [?assertEqual(ok, rabbit_ct_broker_helpers:stop_node(Config, N))
+ || N <- [A, C, D]],
+ [?assertEqual(ok, rabbit_ct_broker_helpers:start_node(Config, N))
+ || N <- [A, C, D]],
+ declare_arbitrary_feature_flag(Config),
+
+ %% Enabling the feature flag works.
+ ?assertEqual(
+ ok,
+ enable_feature_flag_on(Config, B, FeatureName)),
+ ?assertEqual(
+ True,
+ is_feature_flag_enabled(Config, FeatureName)).
+
+mark_feature_flag_as_enabled_with_a_network_partition(Config) ->
+ FeatureName = ff_from_testsuite,
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ True = lists:duplicate(ClusterSize, true),
+ False = lists:duplicate(ClusterSize, false),
+
+ %% The feature flag is supported but disabled initially.
+ ?assertEqual(
+ True,
+ is_feature_flag_supported(Config, FeatureName)),
+ ?assertEqual(
+ False,
+ is_feature_flag_enabled(Config, FeatureName)),
+
+ %% Isolate node B from the rest of the cluster.
+ NodePairs = [{B, A},
+ {B, C},
+ {B, D},
+ {B, E}],
+ block(NodePairs),
+ timer:sleep(1000),
+
+ %% Mark the feature flag as enabled on all nodes from node B. This
+ %% is expected to timeout.
+ RemoteNodes = [A, C, D, E],
+ ?assertEqual(
+ {failed_to_mark_feature_flag_as_enabled_on_remote_nodes,
+ FeatureName,
+ true,
+ RemoteNodes},
+ rabbit_ct_broker_helpers:rpc(
+ Config, B,
+ rabbit_feature_flags, mark_as_enabled_remotely,
+ [RemoteNodes, FeatureName, true, 20000])),
+
+ RepairFun = fun() ->
+ %% Wait a few seconds before we repair the network.
+ timer:sleep(5000),
+
+ %% Repair the network and try again to enable
+ %% the feature flag.
+ unblock(NodePairs),
+ timer:sleep(1000)
+ end,
+ spawn(RepairFun),
+
+ %% Mark the feature flag as enabled on all nodes from node B. This
+ %% is expected to work this time.
+ ct:pal(?LOW_IMPORTANCE,
+ "Marking the feature flag as enabled on remote nodes...", []),
+ ?assertEqual(
+ ok,
+ rabbit_ct_broker_helpers:rpc(
+ Config, B,
+ rabbit_feature_flags, mark_as_enabled_remotely,
+ [RemoteNodes, FeatureName, true, 120000])).
+
+%% FIXME: Finish the testcase above ^
+
+clustering_ok_with_ff_disabled_everywhere(Config) ->
+ %% All feature flags are disabled. Clustering the two nodes should be
+ %% accepted because they are compatible.
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_supported(Config, ff_from_testsuite)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_supported(Config, ff_from_testsuite)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+ ok.
+
+clustering_ok_with_ff_enabled_on_some_nodes(Config) ->
+ %% The test feature flag is enabled on node 1, but not on node 2.
+ %% Clustering the two nodes should be accepted because they are
+ %% compatible. Also, the feature flag will be enabled on node 2 as a
+ %% consequence.
+ enable_feature_flag_on(Config, 0, ff_from_testsuite),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_supported(Config, ff_from_testsuite)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+ ok.
+
+clustering_ok_with_ff_enabled_everywhere(Config) ->
+ %% The test feature flags is enabled. Clustering the two nodes
+ %% should be accepted because they are compatible.
+ enable_feature_flag_everywhere(Config, ff_from_testsuite),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true ->
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, ff_from_testsuite));
+ false ->
+ ok
+ end,
+ ok.
+
+clustering_ok_with_new_ff_disabled(Config) ->
+ %% We declare a new (fake) feature flag on node 1. Clustering the
+ %% two nodes should still be accepted because that feature flag is
+ %% disabled.
+ NewFeatureFlags = #{time_travel =>
+ #{desc => "Time travel with RabbitMQ",
+ provided_by => rabbit,
+ stability => stable}},
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_feature_flags, initialize_registry, [NewFeatureFlags]),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([false, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+ ok.
+
+clustering_denied_with_new_ff_enabled(Config) ->
+ %% We declare a new (fake) feature flag on node 1. Clustering the
+ %% two nodes should then be forbidden because node 2 is sure it does
+ %% not support it (because the application, `rabbit` is loaded and
+ %% it does not have it).
+ NewFeatureFlags = #{time_travel =>
+ #{desc => "Time travel with RabbitMQ",
+ provided_by => rabbit,
+ stability => stable}},
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_feature_flags, initialize_registry, [NewFeatureFlags]),
+ enable_feature_flag_on(Config, 0, time_travel),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+
+ ?assertMatch({skip, _}, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, time_travel)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, time_travel));
+ false -> ok
+ end,
+ ok.
+
+clustering_ok_with_new_ff_disabled_from_plugin_on_some_nodes(Config) ->
+ %% We first enable the test plugin on node 1, then we try to cluster
+ %% them. Even though both nodes don't share the same feature
+ %% flags (the test plugin exposes one), they should be considered
+ %% compatible and the clustering should be allowed.
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+clustering_ok_with_new_ff_enabled_from_plugin_on_some_nodes(Config) ->
+ %% We first enable the test plugin on node 1 and enable its feature
+ %% flag, then we try to cluster them. Even though both nodes don't
+ %% share the same feature flags (the test plugin exposes one), they
+ %% should be considered compatible and the clustering should be
+ %% allowed.
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+ enable_feature_flag_on(Config, 0, plugin_ff),
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([true, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ ?assertEqual(Config, rabbit_ct_broker_helpers:cluster_nodes(Config)),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+activating_plugin_with_new_ff_disabled(Config) ->
+ %% Both nodes are clustered. A new plugin is enabled on node 1
+ %% and this plugin has a new feature flag node 2 does know about.
+ %% Enabling the plugin is allowed because nodes remain compatible,
+ %% as the plugin is missing on one node so it can't conflict.
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([false, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+activating_plugin_with_new_ff_enabled(Config) ->
+ %% Both nodes are clustered. A new plugin is enabled on node 1
+ %% and this plugin has a new feature flag node 2 does know about.
+ %% Enabling the plugin is allowed because nodes remain compatible,
+ %% as the plugin is missing on one node so it can't conflict.
+ %% Enabling the plugin's feature flag is also permitted for this
+ %% same reason.
+
+ FFSubsysOk = is_feature_flag_subsystem_available(Config),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([false, false],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([false, false],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0, "my_plugin"),
+ enable_feature_flag_on(Config, 0, plugin_ff),
+
+ log_feature_flags_of_all_nodes(Config),
+ case FFSubsysOk of
+ true -> ?assertEqual([true, true],
+ is_feature_flag_supported(Config, plugin_ff)),
+ ?assertEqual([true, true],
+ is_feature_flag_enabled(Config, plugin_ff));
+ false -> ok
+ end,
+ ok.
+
+%% -------------------------------------------------------------------
+%% Internal helpers.
+%% -------------------------------------------------------------------
+
+build_my_plugin(Config) ->
+ PluginSrcDir = filename:join(?config(data_dir, Config), "my_plugin"),
+ PluginsDir = filename:join(PluginSrcDir, "plugins"),
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_plugins_dir, PluginsDir}]),
+ {MyPlugin, OtherPlugins} = list_my_plugin_plugins(PluginSrcDir),
+ case MyPlugin of
+ [] ->
+ DepsDir = ?config(erlang_mk_depsdir, Config),
+ Args = ["test-dist",
+ {"DEPS_DIR=~s", [DepsDir]},
+ %% We clear ALL_DEPS_DIRS to make sure they are
+ %% not recompiled when the plugin is built. `rabbit`
+ %% was previously compiled with -DTEST and if it is
+ %% recompiled because of this plugin, it will be
+ %% recompiled without -DTEST: the testsuite depends
+ %% on test code so we can't allow that.
+ %%
+ %% Note that we do not clear the DEPS variable:
+ %% we need it to be correct because it is used to
+ %% generate `my_plugin.app` (and a RabbitMQ plugin
+ %% must depend on `rabbit`).
+ "ALL_DEPS_DIRS="],
+ case rabbit_ct_helpers:make(Config1, PluginSrcDir, Args) of
+ {ok, _} ->
+ {_, OtherPlugins1} = list_my_plugin_plugins(PluginSrcDir),
+ remove_other_plugins(PluginSrcDir, OtherPlugins1),
+ update_cli_path(Config1, PluginSrcDir);
+ {error, _} ->
+ {skip, "Failed to compile the `my_plugin` test plugin"}
+ end;
+ _ ->
+ remove_other_plugins(PluginSrcDir, OtherPlugins),
+ update_cli_path(Config1, PluginSrcDir)
+ end.
+
+update_cli_path(Config, PluginSrcDir) ->
+ SbinDir = filename:join(PluginSrcDir, "sbin"),
+ Rabbitmqctl = filename:join(SbinDir, "rabbitmqctl"),
+ RabbitmqPlugins = filename:join(SbinDir, "rabbitmq-plugins"),
+ RabbitmqQueues = filename:join(SbinDir, "rabbitmq-queues"),
+ case filelib:is_regular(Rabbitmqctl) of
+ true ->
+ ct:pal(?LOW_IMPORTANCE,
+ "Switching to CLI in e.g. ~s", [Rabbitmqctl]),
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{rabbitmqctl_cmd, Rabbitmqctl},
+ {rabbitmq_plugins_cmd, RabbitmqPlugins},
+ {rabbitmq_queues_cmd, RabbitmqQueues}]);
+ false ->
+ Config
+ end.
+
+list_my_plugin_plugins(PluginSrcDir) ->
+ Files = filelib:wildcard("plugins/*", PluginSrcDir),
+ lists:partition(
+ fun(Path) ->
+ Filename = filename:basename(Path),
+ re:run(Filename, "^my_plugin-", [{capture, none}]) =:= match
+ end, Files).
+
+remove_other_plugins(PluginSrcDir, OtherPlugins) ->
+ ok = rabbit_file:recursive_delete(
+ [filename:join(PluginSrcDir, OtherPlugin)
+ || OtherPlugin <- OtherPlugins]).
+
+work_around_cli_and_rabbit_circular_dep(Config) ->
+ %% FIXME: We also need to copy `rabbit` in `my_plugins` plugins
+ %% directory, not because `my_plugin` depends on it, but because the
+ %% CLI erroneously depends on the broker.
+ %%
+ %% This can't be fixed easily because this is a circular dependency
+ %% (i.e. the broker depends on the CLI). So until a proper solution
+ %% is implemented, keep this second copy of the broker for the CLI
+ %% to find it.
+ InitialPluginsDir = filename:join(
+ ?config(current_srcdir, Config),
+ "plugins"),
+ PluginsDir = ?config(rmq_plugins_dir, Config),
+ lists:foreach(
+ fun(Path) ->
+ Filename = filename:basename(Path),
+ IsRabbit = re:run(
+ Filename,
+ "^rabbit-", [{capture, none}]) =:= match,
+ case IsRabbit of
+ true ->
+ Dest = filename:join(PluginsDir, Filename),
+ ct:pal(
+ ?LOW_IMPORTANCE,
+ "Copy `~s` to `~s` to fix CLI erroneous "
+ "dependency on `rabbit`", [Path, Dest]),
+ ok = rabbit_file:recursive_copy(Path, Dest);
+ false ->
+ ok
+ end
+ end,
+ filelib:wildcard(filename:join(InitialPluginsDir, "*"))),
+ Config.
+
+enable_feature_flag_on(Config, Node, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit_feature_flags, enable, [FeatureName]).
+
+enable_feature_flag_everywhere(Config, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, enable, [FeatureName]).
+
+is_feature_flag_supported(Config, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, is_supported, [FeatureName]).
+
+is_feature_flag_enabled(Config, FeatureName) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, is_enabled, [FeatureName]).
+
+is_feature_flag_subsystem_available(Config) ->
+ lists:all(
+ fun(B) -> B end,
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, erlang, function_exported, [rabbit_feature_flags, list, 0])).
+
+feature_flags_files(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, enabled_feature_flags_list_file, []).
+
+log_feature_flags_of_all_nodes(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_feature_flags, info, [#{color => false,
+ lines => false}]).
+
+feature_flags_to_app_attrs(FeatureFlags) when is_map(FeatureFlags) ->
+ [{?MODULE, % Application
+ ?MODULE, % Module
+ maps:to_list(FeatureFlags)}].
+
+declare_arbitrary_feature_flag(Config) ->
+ FeatureFlags = #{ff_from_testsuite =>
+ #{desc => "My feature flag",
+ stability => stable}},
+ rabbit_ct_broker_helpers:rpc_all(
+ Config,
+ rabbit_feature_flags,
+ inject_test_feature_flags,
+ [feature_flags_to_app_attrs(FeatureFlags)]),
+ ok.
+
+block(Pairs) -> [block(X, Y) || {X, Y} <- Pairs].
+unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs].
+
+block(X, Y) ->
+ rabbit_ct_broker_helpers:block_traffic_between(X, Y).
+
+allow(X, Y) ->
+ rabbit_ct_broker_helpers:allow_traffic_between(X, Y).
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore
new file mode 100644
index 0000000000..f6d56e0687
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/.gitignore
@@ -0,0 +1,7 @@
+/.erlang.mk/
+/deps/
+/ebin/
+/escript
+/plugins/
+/my_plugin.d
+/sbin
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile
new file mode 100644
index 0000000000..8f6681090b
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/Makefile
@@ -0,0 +1,15 @@
+PROJECT = my_plugin
+PROJECT_DESCRIPTION = Plugin to test feature flags
+PROJECT_VERSION = 1.0.0
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk
new file mode 100644
index 0000000000..f303054bad
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/erlang.mk
@@ -0,0 +1 @@
+include ../../../erlang.mk
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk
new file mode 100644
index 0000000000..9f89dba726
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/rabbitmq-components.mk
@@ -0,0 +1 @@
+include ../../../rabbitmq-components.mk
diff --git a/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl
new file mode 100644
index 0000000000..687acdb5de
--- /dev/null
+++ b/deps/rabbit/test/feature_flags_SUITE_data/my_plugin/src/my_plugin.erl
@@ -0,0 +1,10 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(my_plugin).
+
+-rabbit_feature_flag({plugin_ff, #{desc => "Plugin's feature flag A"}}).
diff --git a/deps/rabbit/test/honeycomb_cth.erl b/deps/rabbit/test/honeycomb_cth.erl
new file mode 100644
index 0000000000..dd82da13c7
--- /dev/null
+++ b/deps/rabbit/test/honeycomb_cth.erl
@@ -0,0 +1,105 @@
+-module(honeycomb_cth).
+
+-export([id/1]).
+-export([init/2]).
+
+-export([pre_init_per_testcase/4]).
+-export([post_end_per_testcase/5]).
+
+-record(state, {directory, github_workflow, github_run_id,
+ github_repository, github_sha, github_ref,
+ base_rmq_ref, secondary_umbrella,
+ erlang_version, elixir_version,
+ otp_release, cpu_topology, schedulers,
+ system_architecture, system_memory_data,
+ start_times = #{}}).
+
+id(Opts) ->
+ proplists:get_value(directory, Opts, "/tmp/honeycomb").
+
+init(Id, _Opts) ->
+ application:ensure_all_started(os_mon),
+ {ok, #state{directory = Id,
+ github_workflow = os:getenv("GITHUB_WORKFLOW", "unknown"),
+ github_run_id = os:getenv("GITHUB_RUN_ID", "unknown"),
+ github_repository = os:getenv("GITHUB_REPOSITORY", "unknown"),
+ github_sha = os:getenv("GITHUB_SHA", "unknown"),
+ github_ref = os:getenv("GITHUB_REF", "unknown"),
+ base_rmq_ref = os:getenv("BASE_RMQ_REF", "unknown"),
+ secondary_umbrella = os:getenv("SECONDARY_UMBRELLA", "none"),
+ erlang_version = os:getenv("ERLANG_VERSION", "unknown"),
+ elixir_version = os:getenv("ELIXIR_VERSION", "unknown"),
+ otp_release = erlang:system_info(otp_release),
+ cpu_topology = erlang:system_info(cpu_topology),
+ schedulers = erlang:system_info(schedulers),
+ system_architecture = erlang:system_info(system_architecture),
+ system_memory_data = memsup:get_system_memory_data()}}.
+
+pre_init_per_testcase(Suite, TC, Config, #state{start_times = StartTimes} = State) ->
+ SuiteTimes = maps:get(Suite, StartTimes, #{}),
+ {Config, State#state{start_times =
+ StartTimes#{Suite =>
+ SuiteTimes#{TC => erlang:timestamp()}}}}.
+
+post_end_per_testcase(Suite, TC, _Config, Return, #state{github_workflow = GithubWorkflow,
+ github_run_id = GithubRunId,
+ github_repository = GithubRepository,
+ github_sha = GithubSha,
+ github_ref = GithubRef,
+ base_rmq_ref = BaseRmqRef,
+ secondary_umbrella = SecondaryUmbrella,
+ erlang_version = ErlangVersion,
+ elixir_version = ElixirVersion,
+ otp_release = OtpRelease,
+ cpu_topology = CpuTopology,
+ schedulers = Schedulers,
+ system_architecture = SystemArchitecture,
+ system_memory_data = SystemMemoryData,
+ start_times = StartTimes} = State) ->
+ EndTime = erlang:timestamp(),
+ SuiteTimes = maps:get(Suite, StartTimes),
+ {StartTime, SuiteTimes1} = maps:take(TC, SuiteTimes),
+ DurationMicroseconds = timer:now_diff(EndTime, StartTime),
+
+ File = filename(Suite, TC, State),
+ ok = filelib:ensure_dir(File),
+ {ok, F} = file:open(File, [write]),
+
+ Json = jsx:encode([{<<"ci">>, <<"GitHub Actions">>},
+ {<<"github_workflow">>, list_to_binary(GithubWorkflow)},
+ {<<"github_run_id">>, list_to_binary(GithubRunId)},
+ {<<"github_repository">>, list_to_binary(GithubRepository)},
+ {<<"github_sha">>, list_to_binary(GithubSha)},
+ {<<"github_ref">>, list_to_binary(GithubRef)},
+ {<<"base_rmq_ref">>, list_to_binary(BaseRmqRef)},
+ {<<"secondary_umbrella">>, list_to_binary(SecondaryUmbrella)},
+ {<<"erlang_version">>, list_to_binary(ErlangVersion)},
+ {<<"elixir_version">>, list_to_binary(ElixirVersion)},
+ {<<"otp_release">>, list_to_binary(OtpRelease)},
+ {<<"cpu_topology">>, cpu_topology_json_term(CpuTopology)},
+ {<<"schedulers">>, Schedulers},
+ {<<"system_architecture">>, list_to_binary(SystemArchitecture)},
+ {<<"system_memory_data">>, memory_json_term(SystemMemoryData)},
+ {<<"suite">>, list_to_binary(atom_to_list(Suite))},
+ {<<"testcase">>, list_to_binary(atom_to_list(TC))},
+ {<<"duration_seconds">>, DurationMicroseconds / 1000000},
+ {<<"result">>, list_to_binary(io_lib:format("~p", [Return]))}]),
+
+ file:write(F, Json),
+ file:close(F),
+ {Return, State#state{start_times = StartTimes#{Suite := SuiteTimes1}}}.
+
+filename(Suite, TC, #state{directory = Dir}) ->
+ filename:join(Dir,
+ integer_to_list(erlang:system_time())
+ ++ "_" ++ atom_to_list(Suite)
+ ++ "_" ++ atom_to_list(TC)
+ ++ ".json").
+
+memory_json_term(SystemMemoryData) when is_list(SystemMemoryData) ->
+ [{list_to_binary(atom_to_list(K)), V} || {K, V} <- SystemMemoryData].
+
+cpu_topology_json_term([{processor, Cores}]) when is_list(Cores) ->
+ [{<<"processor">>, [begin
+ [{<<"core">>, [{list_to_binary(atom_to_list(Kind)), Index}]}]
+ end || {core, {Kind, Index}} <- Cores]}].
diff --git a/deps/rabbit/test/lazy_queue_SUITE.erl b/deps/rabbit/test/lazy_queue_SUITE.erl
new file mode 100644
index 0000000000..8748b07aca
--- /dev/null
+++ b/deps/rabbit/test/lazy_queue_SUITE.erl
@@ -0,0 +1,215 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(lazy_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"queue.mode.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ declare_args,
+ queue_mode_policy,
+ publish_consume
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 2,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ LQ = <<"lazy-q">>,
+ declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+ assert_queue_mode(A, LQ, lazy),
+
+ DQ = <<"default-q">>,
+ declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+ assert_queue_mode(A, DQ, default),
+
+ DQ2 = <<"default-q2">>,
+ declare(Ch, DQ2),
+ assert_queue_mode(A, DQ2, default),
+
+ passed.
+
+queue_mode_policy(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ LQ = <<"lazy-q">>,
+ declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+ assert_queue_mode(A, LQ, lazy),
+
+ LQ2 = <<"lazy-q-2">>,
+ declare(Ch, LQ2),
+ assert_queue_mode(A, LQ2, lazy),
+
+ DQ = <<"default-q">>,
+ declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+ assert_queue_mode(A, DQ, default),
+
+ set_ha_mode_policy(Config, A, <<"default">>),
+
+ ok = wait_for_queue_mode(A, LQ, lazy, 5000),
+ ok = wait_for_queue_mode(A, LQ2, default, 5000),
+ ok = wait_for_queue_mode(A, DQ, default, 5000),
+
+ passed.
+
+publish_consume(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ declare(Ch, ?QNAME),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ consume(Ch, ?QNAME, ack),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"default">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ set_ha_mode_policy(Config, A, <<"default">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ cancel(Ch),
+
+ passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args}).
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+cancel(Ch) ->
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, Payload) ->
+ PBin = payload2bin(Payload),
+ receive
+ {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag)
+ end.
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+payload2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+set_ha_mode_policy(Config, Node, Mode) ->
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, <<"all">>,
+ [{<<"queue-mode">>, Mode}]).
+
+
+wait_for_queue_mode(_Node, _Q, _Mode, Max) when Max < 0 ->
+ fail;
+wait_for_queue_mode(Node, Q, Mode, Max) ->
+ case get_queue_mode(Node, Q) of
+ Mode -> ok;
+ _ -> timer:sleep(100),
+ wait_for_queue_mode(Node, Q, Mode, Max - 100)
+ end.
+
+assert_queue_mode(Node, Q, Expected) ->
+ Actual = get_queue_mode(Node, Q),
+ Expected = Actual.
+
+get_queue_mode(Node, Q) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q),
+ {ok, AMQQueue} =
+ rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+ [{backing_queue_status, Status}] =
+ rpc:call(Node, rabbit_amqqueue, info,
+ [AMQQueue, [backing_queue_status]]),
+ proplists:get_value(mode, Status).
diff --git a/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl b/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl
new file mode 100644
index 0000000000..fbd31fa3e8
--- /dev/null
+++ b/deps/rabbit/test/list_consumers_sanity_check_SUITE.erl
@@ -0,0 +1,125 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(list_consumers_sanity_check_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, list_consumers_sanity_check}
+ ].
+
+groups() ->
+ [
+ {list_consumers_sanity_check, [], [
+ list_consumers_sanity_check
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcase
+%% -------------------------------------------------------------------
+
+list_consumers_sanity_check(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Chan = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% this queue is not cleaned up because the entire node is
+ %% reset between tests
+ QName = <<"list_consumers_q">>,
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QName}),
+
+ %% No consumers even if we have some queues
+ [] = rabbitmqctl_list_consumers(Config, A),
+
+ %% Several consumers on single channel should be correctly reported
+ #'basic.consume_ok'{consumer_tag = CTag1} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+ #'basic.consume_ok'{consumer_tag = CTag2} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+ true = (lists:sort([CTag1, CTag2]) =:=
+ lists:sort(rabbitmqctl_list_consumers(Config, A))),
+
+ %% `rabbitmqctl report` shares some code with `list_consumers`, so
+ %% check that it also reports both channels
+ {ok, ReportStdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+ ["list_consumers", "--no-table-headers"]),
+ ReportLines = re:split(ReportStdOut, <<"\n">>, [trim]),
+ ReportCTags = [lists:nth(3, re:split(Row, <<"\t">>)) || <<"list_consumers_q", _/binary>> = Row <- ReportLines],
+ true = (lists:sort([CTag1, CTag2]) =:=
+ lists:sort(ReportCTags)).
+
+rabbitmqctl_list_consumers(Config, Node) ->
+ {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Node,
+ ["list_consumers", "--no-table-headers"]),
+ [<<"Listing consumers", _/binary>> | ConsumerRows] = re:split(StdOut, <<"\n">>, [trim]),
+ CTags = [ lists:nth(3, re:split(Row, <<"\t">>)) || Row <- ConsumerRows ],
+ CTags.
+
+list_queues_online_and_offline(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% Node B will be stopped
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}),
+
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]),
+
+ GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--online", "name", "--no-table-headers"])),
+ ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]],
+ ExpectUp = GotUp,
+
+ GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--offline", "name", "--no-table-headers"])),
+ ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]],
+ ExpectDown = GotDown,
+
+ GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "name", "--no-table-headers"])),
+ ExpectAll = ExpectUp ++ ExpectDown,
+ ExpectAll = GotAll,
+
+ ok.
diff --git a/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl b/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl
new file mode 100644
index 0000000000..d26fdc03e2
--- /dev/null
+++ b/deps/rabbit/test/list_queues_online_and_offline_SUITE.erl
@@ -0,0 +1,99 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(list_queues_online_and_offline_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, list_queues_online_and_offline}
+ ].
+
+groups() ->
+ [
+ {list_queues_online_and_offline, [], [
+ list_queues_online_and_offline %% Stop node B.
+ ]}
+ ].
+
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Testcase
+%% ---------------------------------------------------------------------------
+
+list_queues_online_and_offline(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% Node B will be stopped
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}),
+
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ [A] == rpc:call(A, rabbit_mnesia, cluster_nodes, [running])
+ end, 60000),
+
+ GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--online", "name", "--no-table-headers"])),
+ ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]],
+ ExpectUp = GotUp,
+
+ GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--offline", "name", "--no-table-headers"])),
+ ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]],
+ ExpectDown = GotDown,
+
+ GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "name", "--no-table-headers"])),
+ ExpectAll = ExpectUp ++ ExpectDown,
+ ExpectAll = GotAll,
+
+ ok.
diff --git a/deps/rabbit/test/maintenance_mode_SUITE.erl b/deps/rabbit/test/maintenance_mode_SUITE.erl
new file mode 100644
index 0000000000..3abbf9b064
--- /dev/null
+++ b/deps/rabbit/test/maintenance_mode_SUITE.erl
@@ -0,0 +1,284 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(maintenance_mode_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_3},
+ {group, quorum_queues}
+ ].
+
+groups() ->
+ [
+ {cluster_size_3, [], [
+ maintenance_mode_status,
+ listener_suspension_status,
+ client_connection_closure,
+ classic_mirrored_queue_leadership_transfer
+ ]},
+ {quorum_queues, [], [
+ quorum_queue_leadership_transfer
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Setup and teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_Group, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(quorum_queue_leadership_transfer = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ MaintenanceModeFFEnabled = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, maintenance_mode_status),
+ QuorumQueueFFEnabled = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, quorum_queue),
+ case MaintenanceModeFFEnabled of
+ ok ->
+ case QuorumQueueFFEnabled of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end;
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun rabbit_ct_broker_helpers:set_ha_policy_all/1]),
+ MaintenanceModeFFEnabled = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2,
+ maintenance_mode_status),
+ case MaintenanceModeFFEnabled of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+maintenance_mode_status(Config) ->
+ Nodes = [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_local_read(Config, Node)),
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, Node))
+ end || Node <- Nodes],
+
+ [begin
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, TargetNode, NodeToCheck))
+ end || NodeToCheck <- Nodes]
+ end || TargetNode <- Nodes],
+
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, B),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, B) end,
+ 10000),
+
+ [begin
+ ?assert(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, TargetNode, B))
+ end || TargetNode <- Nodes],
+
+ ?assertEqual(
+ lists:usort([A, C]),
+ lists:usort(rabbit_ct_broker_helpers:rpc(Config, B,
+ rabbit_maintenance, primary_replica_transfer_candidate_nodes, []))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, B),
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, B) end,
+ 10000),
+
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_local_read(Config, TargetNode, B)),
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, TargetNode, B))
+ end || TargetNode <- Nodes],
+
+ ?assertEqual(
+ lists:usort([A, C]),
+ lists:usort(rabbit_ct_broker_helpers:rpc(Config, B,
+ rabbit_maintenance, primary_replica_transfer_candidate_nodes, []))),
+
+ ok.
+
+
+listener_suspension_status(Config) ->
+ Nodes = [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ [begin
+ ?assertNot(rabbit_ct_broker_helpers:is_being_drained_consistent_read(Config, Node))
+ end || Node <- Nodes],
+
+ Conn1 = rabbit_ct_client_helpers:open_connection(Config, A),
+ ?assert(is_pid(Conn1)),
+ rabbit_ct_client_helpers:close_connection(Conn1),
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ ?assertEqual({error, econnrefused}, rabbit_ct_client_helpers:open_unmanaged_connection(Config, A)),
+
+ rabbit_ct_broker_helpers:revive_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ Conn3 = rabbit_ct_client_helpers:open_connection(Config, A),
+ ?assert(is_pid(Conn3)),
+ rabbit_ct_client_helpers:close_connection(Conn3),
+
+ ok.
+
+
+client_connection_closure(Config) ->
+ [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ Conn1 = rabbit_ct_client_helpers:open_connection(Config, A),
+ ?assert(is_pid(Conn1)),
+ ?assertEqual(1, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_networking, local_connections, []))),
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ ?assertEqual(0, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_networking, local_connections, []))),
+
+ rabbit_ct_broker_helpers:revive_node(Config, A).
+
+
+classic_mirrored_queue_leadership_transfer(Config) ->
+ [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ PolicyPattern = <<"^cq.mirrored">>,
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, PolicyPattern, <<"all">>),
+
+ Conn = rabbit_ct_client_helpers:open_connection(Config, A),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = <<"cq.mirrored.1">>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName, durable = true}),
+
+ ?assertEqual(1, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_amqqueue, list_local, [<<"/">>]))),
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ ?assertEqual(0, length(rabbit_ct_broker_helpers:rpc(Config, A, rabbit_amqqueue, list_local, [<<"/">>]))),
+
+ rabbit_ct_broker_helpers:revive_node(Config, A),
+ %% rabbit_ct_broker_helpers:set_ha_policy/4 uses pattern for policy name
+ rabbit_ct_broker_helpers:clear_policy(Config, A, PolicyPattern).
+
+quorum_queue_leadership_transfer(Config) ->
+ [A | _] = Nodenames = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ ct:pal("Picked node ~s for maintenance tests...", [A]),
+
+ rabbit_ct_helpers:await_condition(
+ fun () -> not rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ Conn = rabbit_ct_client_helpers:open_connection(Config, A),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = <<"qq.1">>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName, durable = true, arguments = [
+ {<<"x-queue-type">>, longstr, <<"quorum">>}
+ ]}),
+
+ %% we cannot assert on the number of local leaders here: declaring a QQ on node A
+ %% does not guarantee that the leader will be hosted on node A
+
+ rabbit_ct_broker_helpers:drain_node(Config, A),
+ rabbit_ct_helpers:await_condition(
+ fun () -> rabbit_ct_broker_helpers:is_being_drained_local_read(Config, A) end, 10000),
+
+ %% quorum queue leader election is asynchronous
+ AllTheSame = quorum_queue_utils:fifo_machines_use_same_version(
+ Config, Nodenames),
+ case AllTheSame of
+ true ->
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ LocalLeaders = rabbit_ct_broker_helpers:rpc(
+ Config, A,
+ rabbit_amqqueue,
+ list_local_leaders,
+ []),
+ length(LocalLeaders) =:= 0
+ end, 20000);
+ false ->
+ ct:pal(
+ ?LOW_IMPORTANCE,
+ "Skip leader election check because rabbit_fifo machines "
+ "have different versions", [])
+ end,
+
+ rabbit_ct_broker_helpers:revive_node(Config, A).
diff --git a/deps/rabbit/test/many_node_ha_SUITE.erl b/deps/rabbit/test/many_node_ha_SUITE.erl
new file mode 100644
index 0000000000..ece7dc8830
--- /dev/null
+++ b/deps/rabbit/test/many_node_ha_SUITE.erl
@@ -0,0 +1,112 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(many_node_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+suite() ->
+ [
+ {timetrap, {minutes, 5}}
+ ].
+
+all() ->
+ [
+ {group, cluster_size_6}
+ ].
+
+groups() ->
+ [
+ {cluster_size_6, [], [
+ kill_intermediate
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_6, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 6}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+kill_intermediate(Config) ->
+ [A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ MasterChannel = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E),
+ ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F),
+ Queue = <<"test">>,
+ amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% TODO: this seems *highly* timing dependant - the assumption being
+ %% that the kill will work quickly enough that there will still be
+ %% some messages in-flight that we *must* receive despite the intervening
+ %% node deaths. It would be nice if we could find a means to do this
+ %% in a way that is not actually timing dependent.
+
+ %% Worse still, it assumes that killing the master will cause a
+ %% failover to Slave1, and so on. Nope.
+
+ ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel,
+ Queue, self(), false, Msgs),
+
+ ProducerPid = rabbit_ha_test_producer:create(ProducerChannel,
+ Queue, self(), false, Msgs),
+
+ %% create a killer for the master and the first 3 mirrors
+ [rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) ||
+ {Node, Time} <- [{A, 50},
+ {B, 50},
+ {C, 100},
+ {D, 100}]],
+
+ %% verify that the consumer got all msgs, or die, or time out
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ ok.
diff --git a/deps/rabbit/test/message_size_limit_SUITE.erl b/deps/rabbit/test/message_size_limit_SUITE.erl
new file mode 100644
index 0000000000..f43a582c85
--- /dev/null
+++ b/deps/rabbit/test/message_size_limit_SUITE.erl
@@ -0,0 +1,145 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(message_size_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_CHANNEL_EXCEPTION, 5000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ max_message_size
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+max_message_size(Config) ->
+ Binary2M = gen_binary_mb(2),
+ Binary4M = gen_binary_mb(4),
+ Binary6M = gen_binary_mb(6),
+ Binary10M = gen_binary_mb(10),
+
+ Size2Mb = 1024 * 1024 * 2,
+ Size2Mb = byte_size(Binary2M),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, max_message_size, 1024 * 1024 * 3]),
+
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+
+ %% Binary is within the max size limit
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary2M}),
+ %% The channel process is alive
+ assert_channel_alive(Ch),
+
+ Monitor = monitor(process, Ch),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary4M}),
+ assert_channel_fail_max_size(Ch, Monitor),
+
+ %% increase the limit
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, max_message_size, 1024 * 1024 * 8]),
+
+ {_, Ch1} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"nope">>}, #amqp_msg{payload = Binary2M}),
+ assert_channel_alive(Ch1),
+
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"nope">>}, #amqp_msg{payload = Binary4M}),
+ assert_channel_alive(Ch1),
+
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"nope">>}, #amqp_msg{payload = Binary6M}),
+ assert_channel_alive(Ch1),
+
+ Monitor1 = monitor(process, Ch1),
+ amqp_channel:call(Ch1, #'basic.publish'{routing_key = <<"none">>}, #amqp_msg{payload = Binary10M}),
+ assert_channel_fail_max_size(Ch1, Monitor1),
+
+ %% increase beyond the hard limit
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, max_message_size, 1024 * 1024 * 600]),
+ Val = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_channel, get_max_message_size, []),
+
+ ?assertEqual(?MAX_MSG_SIZE, Val).
+
+%% -------------------------------------------------------------------
+%% Implementation
+%% -------------------------------------------------------------------
+
+gen_binary_mb(N) ->
+ B1M = << <<"_">> || _ <- lists:seq(1, 1024 * 1024) >>,
+ << B1M || _ <- lists:seq(1, N) >>.
+
+assert_channel_alive(Ch) ->
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = <<"nope">>},
+ #amqp_msg{payload = <<"HI">>}).
+
+assert_channel_fail_max_size(Ch, Monitor) ->
+ receive
+ {'DOWN', Monitor, process, Ch,
+ {shutdown,
+ {server_initiated_close, 406, _Error}}} ->
+ ok
+ after ?TIMEOUT_CHANNEL_EXCEPTION ->
+ error({channel_exception_expected, max_message_size})
+ end.
diff --git a/deps/rabbit/test/metrics_SUITE.erl b/deps/rabbit/test/metrics_SUITE.erl
new file mode 100644
index 0000000000..e585ccd5a8
--- /dev/null
+++ b/deps/rabbit/test/metrics_SUITE.erl
@@ -0,0 +1,404 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(metrics_SUITE).
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ connection,
+ channel,
+ channel_connection_close,
+ channel_queue_exchange_consumer_close_connection,
+ channel_queue_delete_queue,
+ connection_metric_count_test,
+ channel_metric_count_test,
+ queue_metric_count_test,
+ queue_metric_count_channel_per_queue_test,
+ connection_metric_idemp_test,
+ channel_metric_idemp_test,
+ queue_metric_idemp_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, fine},
+ {collect_statistics_interval, 500}
+ ]}).
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ clean_core_metrics(Config),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+% NB: node_stats tests are in the management_agent repo
+
+connection_metric_count_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_connection_metric_count/1, [Config], 25).
+
+channel_metric_count_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_channel_metric_count/1, [Config], 25).
+
+queue_metric_count_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_queue_metric_count/1, [Config], 5).
+
+queue_metric_count_channel_per_queue_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_queue_metric_count_channel_per_queue/1,
+ [Config], 5).
+
+connection_metric_idemp_test(Config) ->
+ connection_metric_idemp(Config, {1, 1}),
+ connection_metric_idemp(Config, {1, 2}),
+ connection_metric_idemp(Config, {2, 2}).
+
+channel_metric_idemp_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_channel_metric_idemp/1, [Config], 25).
+
+queue_metric_idemp_test(Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_queue_metric_idemp/1, [Config], 25).
+
+prop_connection_metric_idemp(Config) ->
+ ?FORALL(N, {integer(1, 25), integer(1, 25)},
+ connection_metric_idemp(Config, N)).
+
+prop_channel_metric_idemp(Config) ->
+ ?FORALL(N, {integer(1, 25), integer(1, 25)},
+ channel_metric_idemp(Config, N)).
+
+prop_queue_metric_idemp(Config) ->
+ ?FORALL(N, {integer(1, 25), integer(1, 25)},
+ queue_metric_idemp(Config, N)).
+
+prop_connection_metric_count(Config) ->
+ ?FORALL(N, {integer(1, 25), resize(100, list(oneof([add, remove])))},
+ connection_metric_count(Config, N)).
+
+prop_channel_metric_count(Config) ->
+ ?FORALL(N, {integer(1, 25), resize(100, list(oneof([add, remove])))},
+ channel_metric_count(Config, N)).
+
+prop_queue_metric_count(Config) ->
+ ?FORALL(N, {integer(1, 10), resize(10, list(oneof([add, remove])))},
+ queue_metric_count(Config, N)).
+
+prop_queue_metric_count_channel_per_queue(Config) ->
+ ?FORALL(N, {integer(1, 10), resize(10, list(oneof([add, remove])))},
+ queue_metric_count_channel_per_queue(Config, N)).
+
+connection_metric_idemp(Config, {N, R}) ->
+ Conns = [rabbit_ct_client_helpers:open_unmanaged_connection(Config)
+ || _ <- lists:seq(1, N)],
+ Table = ?awaitMatch(L when is_list(L) andalso length(L) == N,
+ [ Pid || {Pid, _} <- read_table_rpc(Config,
+ connection_metrics)],
+ 5000),
+ Table2 = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_coarse_metrics)],
+ % refresh stats 'R' times
+ [[Pid ! emit_stats || Pid <- Table] || _ <- lists:seq(1, R)],
+ force_metric_gc(Config),
+ TableAfter = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_metrics)],
+ TableAfter2 = [ Pid || {Pid, _} <- read_table_rpc(Config, connection_coarse_metrics)],
+ [rabbit_ct_client_helpers:close_connection(Conn) || Conn <- Conns],
+ ?assertEqual(Table, TableAfter),
+ ?assertEqual(Table2, TableAfter2),
+ ?assertEqual(N, length(Table)),
+ ?assertEqual(N, length(TableAfter)).
+
+channel_metric_idemp(Config, {N, R}) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ [amqp_connection:open_channel(Conn) || _ <- lists:seq(1, N)],
+ Table = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_metrics)],
+ Table2 = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_process_metrics)],
+ % refresh stats 'R' times
+ [[Pid ! emit_stats || Pid <- Table] || _ <- lists:seq(1, R)],
+ force_metric_gc(Config),
+ TableAfter = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_metrics)],
+ TableAfter2 = [ Pid || {Pid, _} <- read_table_rpc(Config, channel_process_metrics)],
+ rabbit_ct_client_helpers:close_connection(Conn),
+ (Table2 == TableAfter2) and (Table == TableAfter) and
+ (N == length(Table)) and (N == length(TableAfter)).
+
+queue_metric_idemp(Config, {N, R}) ->
+ clean_core_metrics(Config),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queues =
+ [begin
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ Queue
+ end || _ <- lists:seq(1, N)],
+
+ Table = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_metrics)],
+ Table2 = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_coarse_metrics)],
+ % refresh stats 'R' times
+ ChanTable = read_table_rpc(Config, channel_created),
+ [[Pid ! emit_stats || {Pid, _, _} <- ChanTable ] || _ <- lists:seq(1, R)],
+ force_metric_gc(Config),
+ TableAfter = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_metrics)],
+ TableAfter2 = [ Pid || {Pid, _, _} <- read_table_rpc(Config, queue_coarse_metrics)],
+ [ delete_queue(Chan, Q) || Q <- Queues],
+ rabbit_ct_client_helpers:close_connection(Conn),
+ (Table2 == TableAfter2) and (Table == TableAfter) and
+ (N == length(Table)) and (N == length(TableAfter)).
+
+connection_metric_count(Config, Ops) ->
+ add_rem_counter(Config, Ops,
+ {fun rabbit_ct_client_helpers:open_unmanaged_connection/1,
+ fun(Cfg) ->
+ rabbit_ct_client_helpers:close_connection(Cfg)
+ end},
+ [ connection_created,
+ connection_metrics,
+ connection_coarse_metrics ]).
+
+channel_metric_count(Config, Ops) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ Result = add_rem_counter(Config, Ops,
+ {fun (_Config) ->
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Chan
+ end,
+ fun amqp_channel:close/1},
+ [ channel_created,
+ channel_metrics,
+ channel_process_metrics ]),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ Result.
+
+queue_metric_count(Config, Ops) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ AddFun = fun (_) ->
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ Queue
+ end,
+ Result = add_rem_counter(Config, Ops,
+ {AddFun,
+ fun (Q) -> delete_queue(Chan, Q),
+ force_metric_gc(Config)
+ end}, [channel_queue_metrics,
+ channel_queue_exchange_metrics ]),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ Result.
+
+queue_metric_count_channel_per_queue(Config, Ops) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ AddFun = fun (_) ->
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ {Chan, Queue}
+ end,
+ Result = add_rem_counter(Config, Ops,
+ {AddFun,
+ fun ({Chan, Q}) ->
+ delete_queue(Chan, Q),
+ force_metric_gc(Config)
+ end},
+ [ channel_queue_metrics,
+ channel_queue_exchange_metrics ]),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ Result.
+
+add_rem_counter(Config, {Initial, Ops}, {AddFun, RemFun}, Tables) ->
+ Things = [ AddFun(Config) || _ <- lists:seq(1, Initial) ],
+ % either add or remove some things
+ {FinalLen, Things1} =
+ lists:foldl(fun(add, {L, Items}) ->
+ {L+1, [AddFun(Config) | Items]};
+ (remove, {L, [H|Tail]}) ->
+ RemFun(H),
+ {L-1, Tail};
+ (_, S) -> S end,
+ {Initial, Things},
+ Ops),
+ force_metric_gc(Config),
+ TabLens = lists:map(fun(T) ->
+ length(read_table_rpc(Config, T))
+ end, Tables),
+ [RemFun(Thing) || Thing <- Things1],
+ [FinalLen] == lists:usort(TabLens).
+
+
+connection(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ [_] = read_table_rpc(Config, connection_created),
+ [_] = read_table_rpc(Config, connection_metrics),
+ [_] = read_table_rpc(Config, connection_coarse_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ force_metric_gc(Config),
+ [] = read_table_rpc(Config, connection_created),
+ [] = read_table_rpc(Config, connection_metrics),
+ [] = read_table_rpc(Config, connection_coarse_metrics),
+ ok.
+
+channel(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ [_] = read_table_rpc(Config, channel_created),
+ [_] = read_table_rpc(Config, channel_metrics),
+ [_] = read_table_rpc(Config, channel_process_metrics),
+ ok = amqp_channel:close(Chan),
+ [] = read_table_rpc(Config, channel_created),
+ [] = read_table_rpc(Config, channel_metrics),
+ [] = read_table_rpc(Config, channel_process_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn).
+
+channel_connection_close(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, _} = amqp_connection:open_channel(Conn),
+ [_] = read_table_rpc(Config, channel_created),
+ [_] = read_table_rpc(Config, channel_metrics),
+ [_] = read_table_rpc(Config, channel_process_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ [] = read_table_rpc(Config, channel_created),
+ [] = read_table_rpc(Config, channel_metrics),
+ [] = read_table_rpc(Config, channel_process_metrics).
+
+channel_queue_delete_queue(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ [_] = read_table_rpc(Config, channel_queue_metrics),
+ [_] = read_table_rpc(Config, channel_queue_exchange_metrics),
+
+ delete_queue(Chan, Queue),
+ force_metric_gc(Config),
+ % ensure removal of queue cleans up channel_queue metrics
+ [] = read_table_rpc(Config, channel_queue_exchange_metrics),
+ [] = read_table_rpc(Config, channel_queue_metrics),
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
+
+channel_queue_exchange_consumer_close_connection(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queue = declare_queue(Chan),
+ ensure_exchange_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+
+ [_] = read_table_rpc(Config, channel_exchange_metrics),
+ [_] = read_table_rpc(Config, channel_queue_exchange_metrics),
+
+ ensure_channel_queue_metrics_populated(Chan, Queue),
+ force_channel_stats(Config),
+ [_] = read_table_rpc(Config, channel_queue_metrics),
+
+ Sub = #'basic.consume'{queue = Queue},
+ #'basic.consume_ok'{consumer_tag = _} =
+ amqp_channel:call(Chan, Sub),
+
+ [_] = read_table_rpc(Config, consumer_created),
+
+ ok = rabbit_ct_client_helpers:close_connection(Conn),
+ % ensure cleanup happened
+ force_metric_gc(Config),
+ [] = read_table_rpc(Config, channel_exchange_metrics),
+ [] = read_table_rpc(Config, channel_queue_exchange_metrics),
+ [] = read_table_rpc(Config, channel_queue_metrics),
+ [] = read_table_rpc(Config, consumer_created),
+ ok.
+
+
+
+%% -------------------------------------------------------------------
+%% Utilities
+%% -------------------------------------------------------------------
+
+declare_queue(Chan) ->
+ Declare = #'queue.declare'{durable = false, auto_delete = true},
+ #'queue.declare_ok'{queue = Name} = amqp_channel:call(Chan, Declare),
+ Name.
+
+delete_queue(Chan, Name) ->
+ Delete = #'queue.delete'{queue = Name},
+ #'queue.delete_ok'{} = amqp_channel:call(Chan, Delete).
+
+ensure_exchange_metrics_populated(Chan, RoutingKey) ->
+ % need to publish for exchange metrics to be populated
+ Publish = #'basic.publish'{routing_key = RoutingKey},
+ amqp_channel:call(Chan, Publish, #amqp_msg{payload = <<"hello">>}).
+
+ensure_channel_queue_metrics_populated(Chan, Queue) ->
+ % need to get and wait for timer for channel queue metrics to be populated
+ Get = #'basic.get'{queue = Queue, no_ack=true},
+ {#'basic.get_ok'{}, #amqp_msg{}} = amqp_channel:call(Chan, Get).
+
+force_channel_stats(Config) ->
+ [ Pid ! emit_stats || {Pid, _} <- read_table_rpc(Config, channel_created) ],
+ timer:sleep(100).
+
+read_table_rpc(Config, Table) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, read_table, [Table]).
+
+clean_core_metrics(Config) ->
+ [ rabbit_ct_broker_helpers:rpc(Config, 0, ets, delete_all_objects, [Table])
+ || {Table, _} <- ?CORE_TABLES].
+
+read_table(Table) ->
+ ets:tab2list(Table).
+
+force_metric_gc(Config) ->
+ timer:sleep(300),
+ rabbit_ct_broker_helpers:rpc(Config, 0, erlang, send,
+ [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, gen_server, call,
+ [rabbit_core_metrics_gc, test]).
diff --git a/deps/rabbit/test/mirrored_supervisor_SUITE.erl b/deps/rabbit/test/mirrored_supervisor_SUITE.erl
new file mode 100644
index 0000000000..7ce88cfdaa
--- /dev/null
+++ b/deps/rabbit/test/mirrored_supervisor_SUITE.erl
@@ -0,0 +1,328 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(MS, mirrored_supervisor).
+-define(SERVER, mirrored_supervisor_SUITE_gs).
+
+all() ->
+ [
+ migrate,
+ migrate_twice,
+ already_there,
+ delete_restart,
+ which_children,
+ large_group,
+ childspecs_at_init,
+ anonymous_supervisors,
+ no_migration_on_shutdown,
+ start_idempotence,
+ unsupported,
+ ignore,
+ startup_failure
+ ].
+
+init_per_suite(Config) ->
+ ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+ ok = application:start(mnesia),
+ lists:foreach(
+ fun ({Tab, TabDef}) ->
+ TabDef1 = proplists:delete(match, TabDef),
+ case mnesia:create_table(Tab, TabDef1) of
+ {atomic, ok} ->
+ ok;
+ {aborted, Reason} ->
+ throw({error,
+ {table_creation_failed, Tab, TabDef1, Reason}})
+ end
+ end, mirrored_supervisor:table_definitions()),
+ Config.
+
+end_per_suite(Config) ->
+ ok = application:stop(mnesia),
+ Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+%% Simplest test
+migrate(_Config) ->
+ passed = with_sups(
+ fun([A, _]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b]).
+
+%% Is migration transitive?
+migrate_twice(_Config) ->
+ passed = with_sups(
+ fun([A, B]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ {ok, C} = start_sup(c),
+ Pid2 = pid_of(worker),
+ kill_registered(B, Pid2),
+ Pid3 = pid_of(worker),
+ false = (Pid1 =:= Pid3),
+ kill(C)
+ end, [a, b]).
+
+%% Can't start the same child twice
+already_there(_Config) ->
+ passed = with_sups(
+ fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, S),
+ {error, {already_started, Pid}} = ?MS:start_child(b, S)
+ end, [a, b]).
+
+%% Deleting and restarting should work as per a normal supervisor
+delete_restart(_Config) ->
+ passed = with_sups(
+ fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid1} = ?MS:start_child(a, S),
+ {error, running} = ?MS:delete_child(a, worker),
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid2} = ?MS:start_child(b, S),
+ false = (Pid1 =:= Pid2),
+ ok = ?MS:terminate_child(b, worker),
+ {ok, Pid3} = ?MS:restart_child(b, worker),
+ Pid3 = pid_of(worker),
+ false = (Pid2 =:= Pid3),
+ %% Not the same supervisor as the worker is on
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid4} = ?MS:start_child(a, S),
+ false = (Pid3 =:= Pid4)
+ end, [a, b]).
+
+which_children(_Config) ->
+ passed = with_sups(
+ fun([A, B] = Both) ->
+ ?MS:start_child(A, childspec(worker)),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ok = ?MS:terminate_child(a, worker),
+ assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
+ {ok, _} = ?MS:restart_child(a, worker),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ?MS:start_child(B, childspec(worker2)),
+ assert_wc(Both, fun (C) -> 2 = length(C) end)
+ end, [a, b]).
+
+assert_wc(Sups, Fun) ->
+ [Fun(?MS:which_children(Sup)) || Sup <- Sups].
+
+wc_pid(Child) ->
+ {worker, Pid, worker, [?MODULE]} = Child,
+ Pid.
+
+%% Not all the members of the group should actually do the failover
+large_group(_Config) ->
+ passed = with_sups(
+ fun([A, _, _, _]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b, c, d]).
+
+%% Do childspecs work when returned from init?
+childspecs_at_init(_Config) ->
+ S = childspec(worker),
+ passed = with_sups(
+ fun([A, _]) ->
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [{a, [S]}, {b, [S]}]).
+
+anonymous_supervisors(_Config) ->
+ passed = with_sups(
+ fun([A, _B]) ->
+ {ok, _} = ?MS:start_child(A, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [anon, anon]).
+
+%% When a mirrored_supervisor terminates, we should not migrate, but
+%% the whole supervisor group should shut down. To test this we set up
+%% a situation where the gen_server will only fail if it's running
+%% under the supervisor called 'evil'. It should not migrate to
+%% 'good' and survive, rather the whole group should go away.
+no_migration_on_shutdown(_Config) ->
+ passed = with_sups(
+ fun([Evil, _]) ->
+ {ok, _} = ?MS:start_child(Evil, childspec(worker)),
+ try
+ call(worker, ping, 1000, 100),
+ exit(worker_should_not_have_migrated)
+ catch exit:{timeout_waiting_for_server, _, _} ->
+ ok
+ end
+ end, [evil, good]).
+
+start_idempotence(_Config) ->
+ passed = with_sups(
+ fun([_]) ->
+ CS = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, CS),
+ {error, {already_started, Pid}} = ?MS:start_child(a, CS),
+ ?MS:terminate_child(a, worker),
+ {error, already_present} = ?MS:start_child(a, CS)
+ end, [a]).
+
+unsupported(_Config) ->
+ try
+ ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {one_for_one, []}),
+ exit(no_global)
+ catch error:badarg ->
+ ok
+ end,
+ try
+ {ok, _} = ?MS:start_link({local, foo}, get_group(group),
+ fun tx_fun/1, ?MODULE, {simple_one_for_one, []}),
+ exit(no_sofo)
+ catch error:badarg ->
+ ok
+ end.
+
+%% Just test we don't blow up
+ignore(_Config) ->
+ ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {fake_strategy_for_ignore, []}).
+
+startup_failure(_Config) ->
+ [test_startup_failure(F) || F <- [want_error, want_exit]].
+
+test_startup_failure(Fail) ->
+ process_flag(trap_exit, true),
+ ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
+ {one_for_one, [childspec(Fail)]}),
+ receive
+ {'EXIT', _, shutdown} ->
+ ok
+ after 1000 ->
+ exit({did_not_exit, Fail})
+ end,
+ process_flag(trap_exit, false).
+
+%% ---------------------------------------------------------------------------
+
+with_sups(Fun, Sups) ->
+ inc_group(),
+ Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
+ Fun(Pids),
+ [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
+ timer:sleep(500),
+ passed.
+
+start_sup(Spec) ->
+ start_sup(Spec, group).
+
+start_sup({Name, ChildSpecs}, Group) ->
+ {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
+ %% We are not a supervisor, when we kill the supervisor we do not
+ %% want to die!
+ unlink(Pid),
+ {ok, Pid};
+
+start_sup(Name, Group) ->
+ start_sup({Name, []}, Group).
+
+start_sup0(anon, Group, ChildSpecs) ->
+ ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
+ {one_for_one, ChildSpecs});
+
+start_sup0(Name, Group, ChildSpecs) ->
+ ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
+ {one_for_one, ChildSpecs}).
+
+childspec(Id) ->
+ {Id,{?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
+
+pid_of(Id) ->
+ {received, Pid, ping} = call(Id, ping),
+ Pid.
+
+tx_fun(Fun) ->
+ case mnesia:sync_transaction(Fun) of
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+inc_group() ->
+ Count = case get(counter) of
+ undefined -> 0;
+ C -> C
+ end + 1,
+ put(counter, Count).
+
+get_group(Group) ->
+ {Group, get(counter)}.
+
+call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
+
+call(Id, Msg, MaxDelay, Decr) ->
+ call(Id, Msg, MaxDelay, Decr, undefined).
+
+call(Id, Msg, 0, _Decr, Stacktrace) ->
+ exit({timeout_waiting_for_server, {Id, Msg}, Stacktrace});
+
+call(Id, Msg, MaxDelay, Decr, _) ->
+ try
+ gen_server:call(Id, Msg, infinity)
+ catch exit:_:Stacktrace -> timer:sleep(Decr),
+ call(Id, Msg, MaxDelay - Decr, Decr, Stacktrace)
+ end.
+
+kill(Pid) -> kill(Pid, []).
+kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
+kill(Pid, Waits) ->
+ erlang:monitor(process, Pid),
+ [erlang:monitor(process, P) || P <- Waits],
+ exit(Pid, bang),
+ kill_wait(Pid),
+ [kill_wait(P) || P <- Waits].
+
+kill_registered(Pid, Child) ->
+ {registered_name, Name} = erlang:process_info(Child, registered_name),
+ kill(Pid, Child),
+ false = (Child =:= whereis(Name)),
+ ok.
+
+kill_wait(Pid) ->
+ receive
+ {'DOWN', _Ref, process, Pid, _Reason} ->
+ ok
+ end.
+
+%% ---------------------------------------------------------------------------
+
+init({fake_strategy_for_ignore, _ChildSpecs}) ->
+ ignore;
+
+init({Strategy, ChildSpecs}) ->
+ {ok, {{Strategy, 0, 1}, ChildSpecs}}.
diff --git a/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl b/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl
new file mode 100644
index 0000000000..62245231d7
--- /dev/null
+++ b/deps/rabbit/test/mirrored_supervisor_SUITE_gs.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE_gs).
+
+%% Dumb gen_server we can supervise
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-behaviour(gen_server).
+
+-define(MS, mirrored_supervisor).
+
+start_link(want_error) ->
+ {error, foo};
+
+start_link(want_exit) ->
+ exit(foo);
+
+start_link(Id) ->
+ gen_server:start_link({local, Id}, ?MODULE, [], []).
+
+%% ---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, state}.
+
+handle_call(Msg, _From, State) ->
+ die_if_my_supervisor_is_evil(),
+ {reply, {received, self(), Msg}, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+die_if_my_supervisor_is_evil() ->
+ try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
+ false -> ok;
+ _ -> exit(doooom)
+ catch
+ exit:{noproc, _} -> ok
+ end.
diff --git a/deps/rabbit/test/msg_store_SUITE.erl b/deps/rabbit/test/msg_store_SUITE.erl
new file mode 100644
index 0000000000..e349aa4443
--- /dev/null
+++ b/deps/rabbit/test/msg_store_SUITE.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(msg_store_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(T(Fun, Args), (catch apply(rabbit, Fun, Args))).
+
+all() ->
+ [
+ parameter_validation
+ ].
+
+parameter_validation(_Config) ->
+ %% make sure it works with default values
+ ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [?CREDIT_DISC_BOUND, ?IO_BATCH_SIZE]),
+
+ %% IO_BATCH_SIZE must be greater than CREDIT_DISC_BOUND initial credit
+ ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{4000, 800}, 5000]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{4000, 800}, 1500]),
+
+ %% All values must be integers
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, "1500"]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{"2000", 500}, abc]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, "500"}, 2048]),
+
+ %% CREDIT_DISC_BOUND must be a tuple
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [[2000, 500], 1500]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [2000, 1500]),
+
+ %% config values can't be smaller than default values
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{1999, 500}, 2048]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 499}, 2048]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, 2047]).
diff --git a/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl b/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl
new file mode 100644
index 0000000000..ddb753adf8
--- /dev/null
+++ b/deps/rabbit/test/peer_discovery_classic_config_SUITE.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(peer_discovery_classic_config_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(rabbit_ct_broker_helpers, [
+ cluster_members_online/2
+]).
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel}
+ ].
+
+groups() ->
+ [
+ {non_parallel, [], [
+ successful_discovery,
+ successful_discovery_with_a_subset_of_nodes_coming_online,
+ no_nodes_configured
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 5}}
+ ].
+
+
+%%
+%% Setup/teardown.
+%%
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(successful_discovery = Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+
+ N = 3,
+ NodeNames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, N)
+ ],
+ Config2 = rabbit_ct_helpers:set_config(Config1, [
+ {rmq_nodename_suffix, Testcase},
+ %% note: this must not include the host part
+ {rmq_nodes_count, NodeNames},
+ {rmq_nodes_clustered, false}
+ ]),
+ NodeNamesWithHostname = [rabbit_nodes:make({Name, "localhost"}) || Name <- NodeNames],
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ {rabbit, [
+ {cluster_nodes, {NodeNamesWithHostname, disc}},
+ {cluster_formation, [
+ {randomized_startup_delay_range, {1, 10}}
+ ]}
+ ]}),
+ rabbit_ct_helpers:run_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(successful_discovery_with_a_subset_of_nodes_coming_online = Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+
+ N = 2,
+ NodeNames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, N)
+ ],
+ Config2 = rabbit_ct_helpers:set_config(Config1, [
+ {rmq_nodename_suffix, Testcase},
+ %% note: this must not include the host part
+ {rmq_nodes_count, NodeNames},
+ {rmq_nodes_clustered, false}
+ ]),
+ NodeNamesWithHostname = [rabbit_nodes:make({Name, "localhost"}) || Name <- [nonexistent | NodeNames]],
+ %% reduce retry time since we know one node on the list does
+ %% not exist and not just unreachable
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ {rabbit, [
+ {cluster_formation, [
+ {discovery_retry_limit, 10},
+ {discovery_retry_interval, 200}
+ ]},
+ {cluster_nodes, {NodeNamesWithHostname, disc}},
+ {cluster_formation, [
+ {randomized_startup_delay_range, {1, 10}}
+ ]}
+ ]}),
+ rabbit_ct_helpers:run_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(no_nodes_configured = Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1, [
+ {rmq_nodename_suffix, Testcase},
+ {rmq_nodes_count, 2},
+ {rmq_nodes_clustered, false}
+ ]),
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ {rabbit, [
+ {cluster_nodes, {[], disc}},
+ {cluster_formation, [
+ {randomized_startup_delay_range, {1, 10}}
+ ]}
+ ]}),
+ rabbit_ct_helpers:run_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+
+%%
+%% Test cases
+%%
+successful_discovery(Config) ->
+ Condition = fun() ->
+ 3 =:= length(cluster_members_online(Config, 0)) andalso
+ 3 =:= length(cluster_members_online(Config, 1))
+ end,
+ await_cluster(Config, Condition, [1, 2]).
+
+successful_discovery_with_a_subset_of_nodes_coming_online(Config) ->
+ Condition = fun() ->
+ 2 =:= length(cluster_members_online(Config, 0)) andalso
+ 2 =:= length(cluster_members_online(Config, 1))
+ end,
+ await_cluster(Config, Condition, [1]).
+
+no_nodes_configured(Config) ->
+ Condition = fun() -> length(cluster_members_online(Config, 0)) < 2 end,
+ await_cluster(Config, Condition, [1]).
+
+reset_and_restart_node(Config, I) when is_integer(I) andalso I >= 0 ->
+ Name = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ rabbit_control_helper:command(stop_app, Name),
+ rabbit_ct_broker_helpers:reset_node(Config, Name),
+ rabbit_control_helper:command(start_app, Name).
+
+await_cluster(Config, Condition, Nodes) ->
+ try
+ rabbit_ct_helpers:await_condition(Condition, 30000)
+ catch
+ exit:{test_case_failed, _} ->
+ ct:pal(?LOW_IMPORTANCE, "Possible dead-lock; resetting/restarting these nodes: ~p", [Nodes]),
+ [reset_and_restart_node(Config, N) || N <- Nodes],
+ rabbit_ct_helpers:await_condition(Condition, 30000)
+ end.
diff --git a/deps/rabbit/test/peer_discovery_dns_SUITE.erl b/deps/rabbit/test/peer_discovery_dns_SUITE.erl
new file mode 100644
index 0000000000..5184bc11eb
--- /dev/null
+++ b/deps/rabbit/test/peer_discovery_dns_SUITE.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(peer_discovery_dns_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel}
+ ].
+
+groups() ->
+ [
+ {non_parallel, [], [
+ hostname_discovery_with_long_node_names,
+ hostname_discovery_with_short_node_names,
+ node_discovery_with_long_node_names,
+ node_discovery_with_short_node_names,
+ test_aaaa_record_hostname_discovery
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 1}}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+%% These are stable, publicly resolvable hostnames that
+%% both return A and AAAA records that reverse resolve.
+-define(DISCOVERY_ENDPOINT_RECORD_A, "dns.google").
+-define(DISCOVERY_ENDPOINT_RECORD_AAAA, "dns.google").
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+
+init_per_testcase(test_aaaa_record, Config) ->
+ case inet_res:lookup(?DISCOVERY_ENDPOINT_RECORD_AAAA, in, aaaa) of
+ [] ->
+ {skip, "pre-configured AAAA record does not resolve, skipping"};
+ [_ | _] ->
+ Config
+ end;
+
+init_per_testcase(_Testcase, Config) ->
+ case inet_res:lookup(?DISCOVERY_ENDPOINT_RECORD_A, in, a) of
+ [] ->
+ {skip, "pre-configured *.rabbitmq.com record does not resolve, skipping"};
+ [_ | _] ->
+ Config
+ end.
+
+
+end_per_testcase(_Testcase, Config) ->
+ case inet_res:lookup(?DISCOVERY_ENDPOINT_RECORD_A, in, a) of
+ [] ->
+ {skip, "pre-configured *.rabbitmq.com record does not resolve, skipping"};
+ [_ | _] ->
+ Config
+ end.
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+test_aaaa_record_hostname_discovery(_) ->
+ Result = rabbit_peer_discovery_dns:discover_hostnames(?DISCOVERY_ENDPOINT_RECORD_AAAA, true),
+ ?assert(string:str(lists:flatten(Result), "dns.google") > 0).
+
+hostname_discovery_with_long_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_hostnames(?DISCOVERY_ENDPOINT_RECORD_A, true),
+ ?assert(lists:member("dns.google", Result)).
+
+hostname_discovery_with_short_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_hostnames(?DISCOVERY_ENDPOINT_RECORD_A, false),
+ ?assert(lists:member("dns", Result)).
+
+node_discovery_with_long_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_nodes(?DISCOVERY_ENDPOINT_RECORD_A, true),
+ ?assert(lists:member('ct_rabbit@dns.google', Result)).
+
+node_discovery_with_short_node_names(_) ->
+ Result = rabbit_peer_discovery_dns:discover_nodes(?DISCOVERY_ENDPOINT_RECORD_A, false),
+ ?assert(lists:member(ct_rabbit@dns, Result)).
diff --git a/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl
new file mode 100644
index 0000000000..43c860c8bd
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_channel_limit_SUITE.erl
@@ -0,0 +1,1651 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_channel_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ most_basic_single_node_connection_and_channel_count,
+ single_node_single_user_connection_and_channel_count,
+ single_node_multiple_users_connection_and_channel_count,
+ single_node_list_in_user,
+ single_node_single_user_limit,
+ single_node_single_user_zero_limit,
+ single_node_single_user_clear_limits,
+ single_node_multiple_users_clear_limits,
+ single_node_multiple_users_limit,
+ single_node_multiple_users_zero_limit
+
+ ],
+ ClusterSize2Tests = [
+ most_basic_cluster_connection_and_channel_count,
+ cluster_single_user_connection_and_channel_count,
+ cluster_multiple_users_connection_and_channel_count,
+ cluster_node_restart_connection_and_channel_count,
+ cluster_node_list_on_node,
+ cluster_single_user_limit,
+ cluster_single_user_limit2,
+ cluster_single_user_zero_limit,
+ cluster_single_user_clear_limits,
+ cluster_multiple_users_clear_limits,
+ cluster_multiple_users_zero_limit
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2);
+
+init_per_group(cluster_rename, Config) ->
+ init_per_multinode_group(cluster_rename, Config, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ case Group of
+ cluster_rename ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config1;
+ _ ->
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1, rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, user_limits),
+ case EnableFF of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end
+ end.
+
+end_per_group(cluster_rename, Config) ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+clear_all_channel_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_channel_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+most_basic_single_node_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn] = open_connections(Config, [0]),
+ [Chan] = open_channels(Conn, 1),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 1
+ end),
+ close_channels([Chan]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+ close_connections([Conn]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+single_node_single_user_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ [Chan1] = open_channels(Conn1, 1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 1
+ end),
+ close_channels([Chan1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [0]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn4] = open_connections(Config, [0]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+single_node_multiple_users_connection_and_channel_count(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [{0, Username1}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ close_channels(Chans1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+ ?assertEqual(0, count_channels_of_user(Config, Username1)),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [{0, Username1}]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn4] = open_connections(Config, [{0, Username1}]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 2 andalso
+ count_channels_of_user(Config, Username1) =:= 10
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+
+ [Conn5] = open_connections(Config, [{0, Username2}]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 2 andalso
+ count_channels_of_user(Config, Username2) =:= 10
+ end),
+
+ [Conn6] = open_connections(Config, [{0, Username2}]),
+ Chans6 = [_|_] = open_channels(Conn6, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 3 andalso
+ count_channels_of_user(Config, Username2) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5 ++ Chans6),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+single_node_list_in_user(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_in(Config, Username1)) =:= 0 andalso
+ length(connections_in(Config, Username2)) =:= 0
+ end),
+
+ ?assertEqual(0, length(channels_in(Config, Username1))),
+ ?assertEqual(0, length(channels_in(Config, Username2))),
+
+ [Conn1] = open_connections(Config, [{0, Username1}]),
+ [Chan1] = open_channels(Conn1, 1),
+ [#tracked_connection{username = Username1}] = connections_in(Config, Username1),
+ [#tracked_channel{username = Username1}] = channels_in(Config, Username1),
+ close_channels([Chan1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(channels_in(Config, Username1)) =:= 0
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_in(Config, Username1)) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ [Chan2] = open_channels(Conn2, 1),
+ [#tracked_connection{username = Username2}] = connections_in(Config, Username2),
+ [#tracked_channel{username = Username2}] = channels_in(Config, Username2),
+
+ [Conn3] = open_connections(Config, [{0, Username1}]),
+ [Chan3] = open_channels(Conn3, 1),
+ [#tracked_connection{username = Username1}] = connections_in(Config, Username1),
+ [#tracked_channel{username = Username1}] = channels_in(Config, Username1),
+
+ [Conn4] = open_connections(Config, [{0, Username1}]),
+ [_Chan4] = open_channels(Conn4, 1),
+ close_connections([Conn4]),
+ [#tracked_connection{username = Username1}] = connections_in(Config, Username1),
+ [#tracked_channel{username = Username1}] = channels_in(Config, Username1),
+
+ [Conn5, Conn6] = open_connections(Config, [{0, Username2}, {0, Username2}]),
+ [Chan5] = open_channels(Conn5, 1),
+ [Chan6] = open_channels(Conn6, 1),
+ [<<"guest1">>, <<"guest2">>] =
+ lists:usort(lists:map(fun (#tracked_connection{username = V}) -> V end,
+ all_connections(Config))),
+ [<<"guest1">>, <<"guest2">>] =
+ lists:usort(lists:map(fun (#tracked_channel{username = V}) -> V end,
+ all_channels(Config))),
+
+ close_channels([Chan2, Chan3, Chan5, Chan6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_channels(Config)) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+most_basic_cluster_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+ ?assertEqual(1, count_connections_of_user(Config, Username)),
+ ?assertEqual(5, count_channels_of_user(Config, Username)),
+
+ [Conn2] = open_connections(Config, [1]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(2, count_connections_of_user(Config, Username)),
+ ?assertEqual(10, count_channels_of_user(Config, Username)),
+
+ [Conn3] = open_connections(Config, [1]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ ?assertEqual(3, count_connections_of_user(Config, Username)),
+ ?assertEqual(15, count_channels_of_user(Config, Username)),
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3),
+ ?awaitMatch(0, count_channels_of_user(Config, Username), 60000),
+
+ close_connections([Conn1, Conn2, Conn3]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000).
+
+cluster_single_user_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [1]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn4] = open_connections(Config, [1]),
+ Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+cluster_multiple_users_connection_and_channel_count(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [{0, Username1}]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username1) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [{1, Username1}]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 1 andalso
+ count_channels_of_user(Config, Username2) =:= 5
+ end),
+
+ [Conn4] = open_connections(Config, [{0, Username1}]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 2 andalso
+ count_channels_of_user(Config, Username1) =:= 10
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_channels_of_user(Config, Username1) =:= 5
+ end),
+
+ [Conn5] = open_connections(Config, [{1, Username2}]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 2 andalso
+ count_channels_of_user(Config, Username2) =:= 10
+ end),
+
+ [Conn6] = open_connections(Config, [{0, Username2}]),
+ Chans6 = [_|_] = open_channels(Conn6, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 3 andalso
+ count_channels_of_user(Config, Username2) =:= 15
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5 ++ Chans6),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+cluster_node_restart_connection_and_channel_count(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [1]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ [Conn4] = open_connections(Config, [1]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ [Conn5] = open_connections(Config, [1]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 4 andalso
+ count_channels_of_user(Config, Username) =:= 20
+ end),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1 andalso
+ count_channels_of_user(Config, Username) =:= 5
+ end),
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end).
+
+cluster_node_list_on_node(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 0 andalso
+ length(all_channels(Config)) =:= 0 andalso
+ length(connections_on_node(Config, 0)) =:= 0 andalso
+ length(channels_on_node(Config, 0)) =:= 0
+ end),
+
+ [Conn1] = open_connections(Config, [0]),
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ [#tracked_connection{node = A}] = connections_on_node(Config, 0),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length([Ch || Ch <- channels_on_node(Config, 0), Ch#tracked_channel.node =:= A]) =:= 5
+ end),
+ close_connections([Conn1]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 0)) =:= 0 andalso
+ length(channels_on_node(Config, 0)) =:= 0
+ end),
+
+ [Conn2] = open_connections(Config, [1]),
+ _Chans2 = [_|_] = open_channels(Conn2, 5),
+ [#tracked_connection{node = B}] = connections_on_node(Config, 1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length([Ch || Ch <- channels_on_node(Config, 1), Ch#tracked_channel.node =:= B]) =:= 5
+ end),
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 0)) =:= 1 andalso
+ length(channels_on_node(Config, 0)) =:= 5
+ end),
+
+ [Conn4] = open_connections(Config, [1]),
+ _Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 1)) =:= 2 andalso
+ length(channels_on_node(Config, 1)) =:= 10
+ end),
+
+ close_connections([Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 1)) =:= 1 andalso
+ length(channels_on_node(Config, 1)) =:= 5
+ end),
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(connections_on_node(Config, 0)) =:= 2 andalso
+ length(channels_on_node(Config, 0)) =:= 10
+ end),
+
+ rabbit_ct_broker_helpers:stop_broker(Config, 1),
+ await_running_node_refresh(Config, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 2 andalso
+ length(all_channels(Config)) =:= 10
+ end),
+
+ close_channels(Chans3 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_channels(Config)) =:= 0
+ end),
+
+ close_connections([Conn3, Conn5]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(all_connections(Config)) =:= 0
+ end),
+
+ rabbit_ct_broker_helpers:start_broker(Config, 1).
+
+single_node_single_user_limit(Config) ->
+ single_node_single_user_limit_with(Config, 5, 25),
+ single_node_single_user_limit_with(Config, -1, -1).
+
+single_node_single_user_limit_with(Config, ConnLimit, ChLimit) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 3, 15),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ [Conn1, Conn2, Conn3] = Conns1 = open_connections(Config, [0, 0, 0]),
+ [_Chans1, Chans2, Chans3] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_channel_is_rejected(Conn1),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) andalso
+ is_process_alive(Conn3)
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, ConnLimit, ChLimit),
+ [Conn4, Conn5] = Conns2 = open_connections(Config, [0, 0]),
+ [Chans4, Chans5] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+single_node_single_user_zero_limit(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 0, 0),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username, 1, 0),
+ [ConnA] = open_connections(Config, [0]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1),
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [0, 0]),
+ [Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 2 andalso
+ count_channels_of_user(Config, Username) =:= 10
+ end),
+
+ close_channels(Chans1 ++ Chans2),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000).
+
+single_node_single_user_clear_limits(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 3, 15),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ [Conn1, Conn2, Conn3] = Conns1 = open_connections(Config, [0, 0, 0]),
+ [_Chans1, Chans2, Chans3] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_channel_is_rejected(Conn1),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) andalso
+ is_process_alive(Conn3)
+ end),
+
+ %% reach limit again
+ [Conn4] = open_connections(Config, [{0, Username}]),
+ Chans4 = [_|_] = open_channels(Conn4, 5),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 3 andalso
+ count_channels_of_user(Config, Username) =:= 15
+ end),
+
+ clear_all_user_limits(Config, Username),
+
+ [Conn5, Conn6, Conn7] = Conns2 = open_connections(Config, [0, 0, 0]),
+ [Chans5, Chans6, Chans7] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6 ++ Chans7),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6, Conn7]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 5000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+single_node_multiple_users_clear_limits(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username1)),
+ ?assertEqual(0, count_connections_of_user(Config, Username2)),
+ ?assertEqual(0, count_channels_of_user(Config, Username1)),
+ ?assertEqual(0, count_channels_of_user(Config, Username2)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {0, Username2}]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ expect_that_client_channel_is_rejected(ConnB),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ is_process_alive(ConnB) =:= false
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ clear_all_user_limits(Config, Username1),
+ set_user_channel_limit_only(Config, Username2, -1),
+ set_user_connection_limit_only(Config, Username2, -1),
+
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username1}, {0, Username1}]),
+ [Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+single_node_multiple_users_limit(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 2, 10),
+ set_user_connection_and_channel_limit(Config, Username2, 2, 10),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [
+ {0, Username1},
+ {0, Username1},
+ {0, Username2},
+ {0, Username2}]),
+
+ [_Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn3) =:= true
+ end),
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 5),
+
+ set_user_connection_and_channel_limit(Config, Username1, 5, 25),
+ set_user_connection_and_channel_limit(Config, Username2, -10, -50),
+
+ [Conn6, Conn7, Conn8, Conn9, Conn10] = Conns2 = open_connections(Config, [
+ {0, Username1},
+ {0, Username1},
+ {0, Username1},
+ {0, Username2},
+ {0, Username2}]),
+
+ [Chans6, Chans7, Chans8, Chans9, Chans10] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6 ++
+ Chans7 ++ Chans8 ++ Chans9 ++ Chans10),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6,
+ Conn7, Conn8, Conn9, Conn10]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username1),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2).
+
+
+single_node_multiple_users_zero_limit(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {0, Username2}]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ expect_that_client_channel_is_rejected(ConnB),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ is_process_alive(ConnB) =:= false
+ end),
+
+ ?assertEqual(false, is_process_alive(ConnA)),
+ ?assertEqual(false, is_process_alive(ConnB)),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username1}, {0, Username1}]),
+ [Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+
+cluster_single_user_limit(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_limit_only(Config, Username, 2),
+ set_user_channel_limit_only(Config, Username, 10),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ %% here connections and channels are opened to different nodes
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username}, {1, Username}]),
+ [_Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username),
+ expect_that_client_connection_is_rejected(Config, 1, Username),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) =:= true
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, 5, 25),
+
+ [Conn3, Conn4] = Conns2 = open_connections(Config, [{0, Username}, {0, Username}]),
+ [Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns2],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4),
+ ?awaitMatch(0, count_channels_of_user(Config, Username), 60000),
+
+ close_connections([Conn2, Conn3, Conn4]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 60000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+cluster_single_user_limit2(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 2, 10),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ %% here a limit is reached on one node first
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username}, {0, Username}]),
+ [_Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username),
+ expect_that_client_connection_is_rejected(Config, 1, Username),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) =:= true
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, 5, 25),
+
+ [Conn3, Conn4, Conn5, Conn6, {error, not_allowed}] = open_connections(Config, [
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username}]),
+
+ [Chans3, Chans4, Chans5, Chans6, [{error, not_allowed}]] =
+ [open_channels(Conn, 1) || Conn <- [Conn3, Conn4, Conn5, Conn6, Conn1]],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username), 5000),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+
+cluster_single_user_zero_limit(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 0, 0),
+
+ ?assertEqual(0, count_connections_of_user(Config, Username)),
+ ?assertEqual(0, count_channels_of_user(Config, Username)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 1),
+ expect_that_client_connection_is_rejected(Config, 0),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username, 1, 0),
+ [ConnA] = open_connections(Config, [0]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnA)),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1),
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [0, 1, 0, 1]),
+ [Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3 ++ Chans4),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+cluster_single_user_clear_limits(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ set_user_connection_and_channel_limit(Config, Username, 2, 10),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0 andalso
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ %% here a limit is reached on one node first
+ [Conn1, Conn2] = Conns1 = open_connections(Config, [{0, Username}, {0, Username}]),
+ [_Chans1, Chans2] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, Username),
+ expect_that_client_connection_is_rejected(Config, 1, Username),
+ expect_that_client_channel_is_rejected(Conn1),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(Conn1) =:= false andalso
+ is_process_alive(Conn2) =:= true
+ end),
+ clear_all_user_limits(Config, Username),
+
+ [Conn3, Conn4, Conn5, Conn6, Conn7] = open_connections(Config, [
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username},
+ {1, Username}]),
+
+ [Chans3, Chans4, Chans5, Chans6, Chans7] =
+ [open_channels(Conn, 1) || Conn <- [Conn3, Conn4, Conn5, Conn6, Conn7]],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans4 ++ Chans5 ++ Chans6 ++ Chans7),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username) =:= 0
+ end),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5, Conn6, Conn7]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+cluster_multiple_users_clear_limits(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 1, Username1),
+ expect_that_client_connection_is_rejected(Config, 1, Username2),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {1, Username2}]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 1 andalso
+ count_connections_of_user(Config, Username2) =:= 1
+ end),
+ expect_that_client_channel_is_rejected(ConnA),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ is_process_alive(ConnA) =:= false andalso
+ is_process_alive(ConnB) =:= true
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 1
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ close_connections([ConnB]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnB)),
+
+ clear_all_user_limits(Config, Username1),
+ clear_all_user_limits(Config, Username2),
+
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [
+ {0, Username1},
+ {0, Username2},
+ {1, Username1},
+ {1, Username2}]),
+
+ [Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3 ++ Chans4),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+cluster_multiple_users_zero_limit(Config) ->
+ Username1 = <<"guest1">>,
+ Username2 = <<"guest2">>,
+
+ set_up_user(Config, Username1),
+ set_up_user(Config, Username2),
+
+ set_user_connection_and_channel_limit(Config, Username1, 0, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 0, 0),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 0
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, Username1),
+ expect_that_client_connection_is_rejected(Config, 0, Username2),
+ expect_that_client_connection_is_rejected(Config, 1, Username1),
+ expect_that_client_connection_is_rejected(Config, 1, Username2),
+
+ %% with limit = 0 no channels are allowed
+ set_user_connection_and_channel_limit(Config, Username1, 1, 0),
+ set_user_connection_and_channel_limit(Config, Username2, 1, 0),
+ [ConnA, ConnB] = open_connections(Config, [{0, Username1}, {1, Username2}]),
+
+ expect_that_client_channel_is_rejected(ConnA),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username1) =:= 0 andalso
+ count_connections_of_user(Config, Username2) =:= 1
+ end),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_channels_of_user(Config, Username1) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnA)),
+ ?assertEqual(true, is_process_alive(ConnB)),
+ close_connections([ConnB]),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ count_connections_of_user(Config, Username2) =:= 0 andalso
+ count_channels_of_user(Config, Username2) =:= 0
+ end),
+ ?assertEqual(false, is_process_alive(ConnB)),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1),
+
+ [Conn1, Conn2, Conn3, Conn4] = Conns1 = open_connections(Config, [
+ {0, Username1},
+ {0, Username2},
+ {1, Username1},
+ {1, Username2}]),
+
+ [Chans1, Chans2, Chans3, Chans4] = [open_channels(Conn, 5) || Conn <- Conns1],
+
+ close_channels(Chans1 ++ Chans2 ++ Chans3 ++ Chans4),
+ ?awaitMatch(0, count_channels_of_user(Config, Username1), 60000),
+ ?awaitMatch(0, count_channels_of_user(Config, Username2), 60000),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?awaitMatch(0, count_connections_of_user(Config, Username1), 60000),
+ ?awaitMatch(0, count_connections_of_user(Config, Username2), 60000),
+
+ set_user_connection_and_channel_limit(Config, Username1, -1, -1),
+ set_user_connection_and_channel_limit(Config, Username2, -1, -1).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndUsers) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, User}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ User, User);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndUsers),
+ timer:sleep(100),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns).
+
+open_channels(Conn, N) ->
+ [open_channel(Conn) || _ <- lists:seq(1, N)].
+
+open_channel(Conn) when is_pid(Conn) ->
+ try amqp_connection:open_channel(Conn) of
+ {ok, Ch} -> Ch
+ catch
+ _:_Error -> {error, not_allowed}
+ end.
+
+close_channels(Channels = [_|_]) ->
+ [rabbit_ct_client_helpers:close_channel(Ch) || Ch <- Channels].
+
+count_connections_of_user(Config, Username) ->
+ count_connections_in(Config, Username, 0).
+count_connections_in(Config, Username, NodeIndex) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+count_channels_of_user(Config, Username) ->
+ count_channels_in(Config, Username, 0).
+count_channels_in(Config, Username, NodeIndex) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+count_user_tracked_items(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ count_tracked_items_in, [{user, Username}]).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+channels_in(Config, Username) ->
+ channels_in(Config, 0, Username).
+channels_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+tracked_list_of_user(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_of_user, [Username]).
+
+connections_on_node(Config) ->
+ connections_on_node(Config, 0).
+connections_on_node(Config, NodeIndex) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename),
+ tracked_items_on_node(Config, NodeIndex, rabbit_connection_tracking, Node).
+
+channels_on_node(Config) ->
+ channels_on_node(Config, 0).
+channels_on_node(Config, NodeIndex) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename),
+ tracked_items_on_node(Config, NodeIndex, rabbit_channel_tracking, Node).
+
+tracked_items_on_node(Config, NodeIndex, TrackingMod, NodeForListing) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_on_node, [NodeForListing]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_connection_tracking).
+
+all_channels(Config) ->
+ all_channels(Config, 0).
+all_channels(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_channel_tracking).
+
+all_tracked_items(Config, NodeIndex, TrackingMod) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list, []).
+
+set_up_user(Config, Username) ->
+ VHost = proplists:get_value(rmq_vhost, Config),
+ rabbit_ct_broker_helpers:add_user(Config, Username),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username, VHost),
+ set_user_connection_and_channel_limit(Config, Username, -1, -1).
+
+set_user_connection_and_channel_limit(Config, Username, ConnLimit, ChLimit) ->
+ set_user_connection_and_channel_limit(Config, 0, Username, ConnLimit, ChLimit).
+
+set_user_connection_and_channel_limit(Config, NodeIndex, Username, ConnLimit, ChLimit) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_user_limits, Node, [rabbit_data_coercion:to_list(Username)] ++
+ ["{\"max-connections\": " ++ integer_to_list(ConnLimit) ++ "," ++
+ " \"max-channels\": " ++ integer_to_list(ChLimit) ++ "}"]).
+
+set_user_connection_limit_only(Config, Username, ConnLimit) ->
+ set_user_connection_limit_only(Config, 0, Username, ConnLimit).
+
+set_user_connection_limit_only(Config, NodeIndex, Username, ConnLimit) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_user_limits, Node, [rabbit_data_coercion:to_list(Username)] ++
+ ["{\"max-connections\": " ++ integer_to_list(ConnLimit) ++ "}"]).
+
+set_user_channel_limit_only(Config, Username, ChLimit) ->
+ set_user_channel_limit_only(Config, 0, Username, ChLimit).
+
+set_user_channel_limit_only(Config, NodeIndex, Username, ChLimit) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_user_limits, Node, [rabbit_data_coercion:to_list(Username)] ++
+ ["{\"max-channels\": " ++ integer_to_list(ChLimit) ++ "}"]).
+
+clear_all_user_limits(Config, Username) ->
+ clear_all_user_limits(Config, 0, Username).
+clear_all_user_limits(Config, NodeIndex, Username) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ clear_user_limits, Node, [rabbit_data_coercion:to_list(Username), "all"]).
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, User) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, User, User).
+
+expect_that_client_channel_is_rejected(Conn) ->
+ {error, not_allowed} = open_channel(Conn).
diff --git a/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl
new file mode 100644
index 0000000000..8af68f0112
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_channel_limit_partitions_SUITE.erl
@@ -0,0 +1,182 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_channel_limit_partitions_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_client_helpers, [open_unmanaged_connection/2
+ ]).
+
+all() ->
+ [
+ {group, net_ticktime_1}
+ ].
+
+groups() ->
+ [
+ {net_ticktime_1, [], [
+ cluster_full_partition_with_autoheal
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 12000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(
+ Config, [fun rabbit_ct_broker_helpers:configure_dist_proxy/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(net_ticktime_1 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]),
+ init_per_multinode_group(Group, Config1, 3).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1, rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, user_limits),
+ case EnableFF of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+cluster_full_partition_with_autoheal(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ rabbit_ct_broker_helpers:set_partition_handling_mode_globally(Config, autoheal),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% 6 connections, 2 per node
+ Conn1 = open_unmanaged_connection(Config, A),
+ Conn2 = open_unmanaged_connection(Config, A),
+ Conn3 = open_unmanaged_connection(Config, B),
+ Conn4 = open_unmanaged_connection(Config, B),
+ Conn5 = open_unmanaged_connection(Config, C),
+ Conn6 = open_unmanaged_connection(Config, C),
+
+ _Chans1 = [_|_] = open_channels(Conn1, 5),
+ _Chans3 = [_|_] = open_channels(Conn3, 5),
+ _Chans5 = [_|_] = open_channels(Conn5, 5),
+ wait_for_count_connections_in(Config, Username, 6, 60000),
+ ?assertEqual(15, count_channels_in(Config, Username)),
+
+ %% B drops off the network, non-reachable by either A or C
+ rabbit_ct_broker_helpers:block_traffic_between(A, B),
+ rabbit_ct_broker_helpers:block_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% A and C are still connected, so 4 connections are tracked
+ %% All connections to B are dropped
+ wait_for_count_connections_in(Config, Username, 4, 60000),
+ ?assertEqual(10, count_channels_in(Config, Username)),
+
+ rabbit_ct_broker_helpers:allow_traffic_between(A, B),
+ rabbit_ct_broker_helpers:allow_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% during autoheal B's connections were dropped
+ wait_for_count_connections_in(Config, Username, 4, 60000),
+ ?assertEqual(10, count_channels_in(Config, Username)),
+
+ lists:foreach(fun (Conn) ->
+ (catch rabbit_ct_client_helpers:close_connection(Conn))
+ end, [Conn1, Conn2, Conn3, Conn4,
+ Conn5, Conn6]),
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+
+ passed.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+wait_for_count_connections_in(Config, Username, Expected, Time) when Time =< 0 ->
+ ?assertMatch(Connections when length(Connections) == Expected,
+ connections_in(Config, Username));
+wait_for_count_connections_in(Config, Username, Expected, Time) ->
+ case connections_in(Config, Username) of
+ Connections when length(Connections) == Expected ->
+ ok;
+ _ ->
+ Sleep = 3000,
+ timer:sleep(Sleep),
+ wait_for_count_connections_in(Config, Username, Expected, Time - Sleep)
+ end.
+
+open_channels(Conn, N) ->
+ [begin
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Ch
+ end || _ <- lists:seq(1, N)].
+
+count_connections_in(Config, Username) ->
+ length(connections_in(Config, Username)).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+count_channels_in(Config, Username) ->
+ Channels = channels_in(Config, Username),
+ length([Ch || Ch = #tracked_channel{username = Username0} <- Channels,
+ Username =:= Username0]).
+
+channels_in(Config, Username) ->
+ channels_in(Config, 0, Username).
+channels_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+tracked_list_of_user(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_of_user, [Username]).
diff --git a/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl b/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl
new file mode 100644
index 0000000000..8b4bd91d09
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_channel_tracking_SUITE.erl
@@ -0,0 +1,850 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_channel_tracking_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ single_node_user_connection_channel_tracking,
+ single_node_user_deletion,
+ single_node_vhost_down_mimic,
+ single_node_vhost_deletion
+ ],
+ ClusterSize2Tests = [
+ cluster_user_deletion,
+ cluster_vhost_down_mimic,
+ cluster_vhost_deletion,
+ cluster_node_removed
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1, rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, user_limits),
+ case EnableFF of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ clear_all_channel_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+clear_all_channel_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_channel_tracking,
+ clear_tracking_tables,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+single_node_user_connection_channel_tracking(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ [Chan1] = open_channels(Conn1, 1),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ [#tracked_channel{username = Username}] = channels_in(Config, Username),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ ?assertEqual(true, is_process_alive(Chan1)),
+ close_channels([Chan1]),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+ ?awaitMatch(false, is_process_alive(Chan1), 20000),
+ close_connections([Conn1]),
+ ?awaitMatch(0, length(connections_in(Config, Username)), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000),
+ ?awaitMatch(false, is_process_alive(Conn1), 20000),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ timer:sleep(100),
+ [#tracked_connection{username = Username2}] = connections_in(Config, Username2),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ [Conn3] = open_connections(Config, [0]),
+ Chans3 = [_|_] = open_channels(Conn3, 5),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn3)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans3],
+
+ [Conn4] = open_connections(Config, [0]),
+ Chans4 = [_|_] = open_channels(Conn4, 5),
+ ?assertEqual(2, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(10, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn4)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans4],
+ kill_connections([Conn4]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ ?awaitMatch(5, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(1, tracked_user_connection_count(Config, Username), 20000),
+ ?awaitMatch(5, tracked_user_channel_count(Config, Username), 20000),
+ ?assertEqual(false, is_process_alive(Conn4)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans4],
+
+ [Conn5] = open_connections(Config, [0]),
+ Chans5 = [_|_] = open_channels(Conn5, 7),
+ [Username, Username] =
+ lists:map(fun (#tracked_connection{username = U}) -> U end,
+ connections_in(Config, Username)),
+ ?assertEqual(12, count_channels_in(Config, Username)),
+ ?assertEqual(12, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(2, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn5)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans5],
+
+ close_channels(Chans2 ++ Chans3 ++ Chans5),
+ ?awaitMatch(0, length(all_channels(Config)), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username2), 20000),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username2), 20000),
+ ?awaitMatch(0, length(all_connections(Config)), 20000).
+
+single_node_user_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 100),
+
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_user_table(Config, Username2)),
+ ?assertEqual(true, exists_in_tracked_channel_per_user_table(Config, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(100),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% ensure vhost entry is cleared after 'tracking_execution_timeout'
+ ?awaitMatch(false, exists_in_tracked_connection_per_user_table(Config, Username2), 20000),
+ ?awaitMatch(false, exists_in_tracked_channel_per_user_table(Config, Username2), 20000),
+
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ close_channels(Chans1),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+
+ close_connections([Conn1]),
+ ?awaitMatch(0, count_connections_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000).
+
+single_node_vhost_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 100),
+
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_vhost_table(Config, Vhost)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1],
+
+ %% ensure vhost entry is cleared after 'tracking_execution_timeout'
+ ?assertEqual(false, exists_in_tracked_connection_per_vhost_table(Config, Vhost)),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost).
+
+single_node_vhost_down_mimic(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% mimic vhost down event, while connections exist
+ mimic_vhost_down(Config, 0, Vhost),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1].
+
+cluster_user_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 0, 100),
+ set_tracking_execution_timeout(Config, 1, 100),
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 0)),
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 1)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_user_table(Config, 1, Username2)),
+ ?assertEqual(true, exists_in_tracked_channel_per_user_table(Config, 1, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% ensure user entry is cleared after 'tracking_execution_timeout'
+ ?assertEqual(false, exists_in_tracked_connection_per_user_table(Config, 1, Username2)),
+ ?assertEqual(false, exists_in_tracked_channel_per_user_table(Config, 1, Username2)),
+
+ close_channels(Chans1),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+
+ close_connections([Conn1]),
+ ?awaitMatch(0, count_connections_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000).
+
+cluster_vhost_deletion(Config) ->
+ set_tracking_execution_timeout(Config, 0, 100),
+ set_tracking_execution_timeout(Config, 1, 100),
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 0)),
+ ?assertEqual(100, get_tracking_execution_timeout(Config, 1)),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(true, exists_in_tracked_connection_per_vhost_table(Config, 0, Vhost)),
+ ?assertEqual(true, exists_in_tracked_connection_per_vhost_table(Config, 1, Vhost)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1],
+
+ %% ensure vhost entry is cleared after 'tracking_execution_timeout'
+ ?assertEqual(false, exists_in_tracked_connection_per_vhost_table(Config, 0, Vhost)),
+ ?assertEqual(false, exists_in_tracked_connection_per_vhost_table(Config, 1, Vhost)),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost),
+ rabbit_ct_broker_helpers:add_user(Config, Username),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username, Vhost).
+
+cluster_vhost_down_mimic(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ mimic_vhost_down(Config, 1, Vhost),
+ timer:sleep(100),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ %% gen_event notifies local handlers. remote connections still active
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ mimic_vhost_down(Config, 0, Vhost),
+ timer:sleep(100),
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(false, is_process_alive(Conn1)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans1].
+
+cluster_node_removed(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+ ?assertEqual(0, count_channels_in(Config, Username)),
+ ?assertEqual(0, count_channels_in(Config, Username2)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(0, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(0, tracked_user_channel_count(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ Chans1 = [_|_] = open_channels(Conn1, 5),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ [Conn2] = open_connections(Config, [{1, Username2}]),
+ Chans2 = [_|_] = open_channels(Conn2, 5),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+ ?assertEqual(5, count_channels_in(Config, Username2)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username2)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username2)),
+ ?assertEqual(true, is_process_alive(Conn2)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans2],
+
+ rabbit_ct_broker_helpers:stop_broker(Config, 1),
+ timer:sleep(200),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ rabbit_ct_broker_helpers:forget_cluster_node(Config, 0, 1),
+ timer:sleep(200),
+ NodeName = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+
+ DroppedConnTrackingTables =
+ rabbit_connection_tracking:get_all_tracked_connection_table_names_for_node(NodeName),
+ [?assertEqual(
+ {'EXIT', {aborted, {no_exists, Tab, all}}},
+ catch mnesia:table_info(Tab, all)) || Tab <- DroppedConnTrackingTables],
+
+ DroppedChTrackingTables =
+ rabbit_channel_tracking:get_all_tracked_channel_table_names_for_node(NodeName),
+ [?assertEqual(
+ {'EXIT', {aborted, {no_exists, Tab, all}}},
+ catch mnesia:table_info(Tab, all)) || Tab <- DroppedChTrackingTables],
+
+ ?assertEqual(false, is_process_alive(Conn2)),
+ [?assertEqual(false, is_process_alive(Ch)) || Ch <- Chans2],
+
+ ?assertEqual(1, count_connections_in(Config, Username)),
+ ?assertEqual(5, count_channels_in(Config, Username)),
+ ?assertEqual(1, tracked_user_connection_count(Config, Username)),
+ ?assertEqual(5, tracked_user_channel_count(Config, Username)),
+ ?assertEqual(true, is_process_alive(Conn1)),
+ [?assertEqual(true, is_process_alive(Ch)) || Ch <- Chans1],
+
+ close_channels(Chans1),
+ ?awaitMatch(0, count_channels_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_channel_count(Config, Username), 20000),
+
+ close_connections([Conn1]),
+ ?awaitMatch(0, count_connections_in(Config, Username), 20000),
+ ?awaitMatch(0, tracked_user_connection_count(Config, Username), 20000).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndUsers) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, User}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ User, User);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndUsers),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+kill_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ (catch exit(Conn, please_terminate))
+ end, Conns),
+ timer:sleep(500).
+
+open_channels(Conn, N) ->
+ [begin
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Ch
+ end || _ <- lists:seq(1, N)].
+
+close_channels(Channels = [_|_]) ->
+ [rabbit_ct_client_helpers:close_channel(Ch) || Ch <- Channels].
+
+count_connections_in(Config, Username) ->
+ length(connections_in(Config, Username)).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+count_channels_in(Config, Username) ->
+ Channels = channels_in(Config, Username),
+ length([Ch || Ch = #tracked_channel{username = Username0} <- Channels,
+ Username =:= Username0]).
+
+channels_in(Config, Username) ->
+ channels_in(Config, 0, Username).
+channels_in(Config, NodeIndex, Username) ->
+ tracked_list_of_user(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+tracked_list_of_user(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list_of_user, [Username]).
+
+tracked_user_connection_count(Config, Username) ->
+ tracked_user_connection_count(Config, 0, Username).
+tracked_user_connection_count(Config, NodeIndex, Username) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_connection_tracking, Username).
+
+tracked_user_channel_count(Config, Username) ->
+ tracked_user_channel_count(Config, 0, Username).
+tracked_user_channel_count(Config, NodeIndex, Username) ->
+ count_user_tracked_items(Config, NodeIndex, rabbit_channel_tracking, Username).
+
+count_user_tracked_items(Config, NodeIndex, TrackingMod, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ count_tracked_items_in, [{user, Username}]).
+
+exists_in_tracked_connection_per_vhost_table(Config, VHost) ->
+ exists_in_tracked_connection_per_vhost_table(Config, 0, VHost).
+exists_in_tracked_connection_per_vhost_table(Config, NodeIndex, VHost) ->
+ exists_in_tracking_table(Config, NodeIndex,
+ fun rabbit_connection_tracking:tracked_connection_per_vhost_table_name_for/1,
+ VHost).
+
+exists_in_tracked_connection_per_user_table(Config, Username) ->
+ exists_in_tracked_connection_per_user_table(Config, 0, Username).
+exists_in_tracked_connection_per_user_table(Config, NodeIndex, Username) ->
+ exists_in_tracking_table(Config, NodeIndex,
+ fun rabbit_connection_tracking:tracked_connection_per_user_table_name_for/1,
+ Username).
+
+exists_in_tracked_channel_per_user_table(Config, Username) ->
+ exists_in_tracked_channel_per_user_table(Config, 0, Username).
+exists_in_tracked_channel_per_user_table(Config, NodeIndex, Username) ->
+ exists_in_tracking_table(Config, NodeIndex,
+ fun rabbit_channel_tracking:tracked_channel_per_user_table_name_for/1,
+ Username).
+
+exists_in_tracking_table(Config, NodeIndex, TableNameFun, Key) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ Tab = TableNameFun(Node),
+ AllKeys = rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ mnesia,
+ dirty_all_keys, [Tab]),
+ lists:member(Key, AllKeys).
+
+mimic_vhost_down(Config, NodeIndex, VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_vhost, vhost_down, [VHost]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_connection_tracking).
+
+all_channels(Config) ->
+ all_channels(Config, 0).
+all_channels(Config, NodeIndex) ->
+ all_tracked_items(Config, NodeIndex, rabbit_channel_tracking).
+
+all_tracked_items(Config, NodeIndex, TrackingMod) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ TrackingMod,
+ list, []).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+set_tracking_execution_timeout(Config, Timeout) ->
+ set_tracking_execution_timeout(Config, 0, Timeout).
+set_tracking_execution_timeout(Config, NodeIndex, Timeout) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ application, set_env,
+ [rabbit, tracking_execution_timeout, Timeout]).
+
+get_tracking_execution_timeout(Config) ->
+ get_tracking_execution_timeout(Config, 0).
+get_tracking_execution_timeout(Config, NodeIndex) ->
+ {ok, Timeout} = rabbit_ct_broker_helpers:rpc(
+ Config, NodeIndex,
+ application, get_env,
+ [rabbit, tracking_execution_timeout]),
+ Timeout.
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit/test/per_user_connection_tracking_SUITE.erl b/deps/rabbit/test/per_user_connection_tracking_SUITE.erl
new file mode 100644
index 0000000000..36b0962eac
--- /dev/null
+++ b/deps/rabbit/test/per_user_connection_tracking_SUITE.erl
@@ -0,0 +1,269 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_user_connection_tracking_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ single_node_list_of_user,
+ single_node_user_deletion_forces_connection_closure
+ ],
+ ClusterSize2Tests = [
+ cluster_user_deletion_forces_connection_closure
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2).
+
+init_per_multinode_group(_Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracked_connection_tables_for_this_node,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+single_node_list_of_user(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, length(connections_in(Config, Username))),
+ ?assertEqual(0, length(connections_in(Config, Username2))),
+
+ [Conn1] = open_connections(Config, [0]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+ close_connections([Conn1]),
+ ?assertEqual(0, length(connections_in(Config, Username))),
+
+ [Conn2] = open_connections(Config, [{0, Username2}]),
+ [#tracked_connection{username = Username2}] = connections_in(Config, Username2),
+
+ [Conn3] = open_connections(Config, [0]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+
+ [Conn4] = open_connections(Config, [0]),
+ kill_connections([Conn4]),
+ [#tracked_connection{username = Username}] = connections_in(Config, Username),
+
+ [Conn5] = open_connections(Config, [0]),
+ [Username, Username] =
+ lists:map(fun (#tracked_connection{username = U}) -> U end,
+ connections_in(Config, Username)),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ ?assertEqual(0, length(all_connections(Config))).
+
+single_node_user_deletion_forces_connection_closure(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+
+ [_Conn2] = open_connections(Config, [{0, Username2}]),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, Username)).
+
+cluster_user_deletion_forces_connection_closure(Config) ->
+ Username = proplists:get_value(rmq_username, Config),
+ Username2 = <<"guest2">>,
+
+ Vhost = proplists:get_value(rmq_vhost, Config),
+
+ rabbit_ct_broker_helpers:add_user(Config, Username2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Username2, Vhost),
+
+ ?assertEqual(0, count_connections_in(Config, Username)),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ [Conn1] = open_connections(Config, [{0, Username}]),
+ ?assertEqual(1, count_connections_in(Config, Username)),
+
+ [_Conn2] = open_connections(Config, [{1, Username2}]),
+ ?assertEqual(1, count_connections_in(Config, Username2)),
+
+ rabbit_ct_broker_helpers:delete_user(Config, Username2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, Username2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, Username)).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndUsers) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, User}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ User, User);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndUsers),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+kill_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ (catch exit(Conn, please_terminate))
+ end, Conns),
+ timer:sleep(500).
+
+
+count_connections_in(Config, Username) ->
+ length(connections_in(Config, Username)).
+
+connections_in(Config, Username) ->
+ connections_in(Config, 0, Username).
+connections_in(Config, NodeIndex, Username) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list_of_user, [Username]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, []).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl
new file mode 100644
index 0000000000..a140b3e829
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl
@@ -0,0 +1,751 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_connection_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ most_basic_single_node_connection_count,
+ single_node_single_vhost_connection_count,
+ single_node_multiple_vhosts_connection_count,
+ single_node_list_in_vhost,
+ single_node_single_vhost_limit,
+ single_node_single_vhost_zero_limit,
+ single_node_multiple_vhosts_limit,
+ single_node_multiple_vhosts_zero_limit
+ ],
+ ClusterSize2Tests = [
+ most_basic_cluster_connection_count,
+ cluster_single_vhost_connection_count,
+ cluster_multiple_vhosts_connection_count,
+ cluster_node_restart_connection_count,
+ cluster_node_list_on_node,
+ cluster_single_vhost_limit,
+ cluster_single_vhost_limit2,
+ cluster_single_vhost_zero_limit,
+ cluster_multiple_vhosts_zero_limit
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests},
+ {cluster_rename, [], [
+ vhost_limit_after_node_renamed
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 9000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2);
+
+init_per_group(cluster_rename, Config) ->
+ init_per_multinode_group(cluster_rename, Config, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ case Group of
+ cluster_rename ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config1;
+ _ ->
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps())
+ end.
+
+end_per_group(cluster_rename, Config) ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ Config.
+
+end_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ Config1 = ?config(save_config, Config),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase);
+end_per_testcase(Testcase, Config) ->
+ clear_all_connection_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+clear_all_connection_tracking_tables(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(
+ Config,
+ rabbit_connection_tracking,
+ clear_tracked_connection_tables_for_this_node,
+ []).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+most_basic_single_node_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+ [Conn] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+single_node_single_vhost_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn4] = open_connections(Config, [0]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn5] = open_connections(Config, [0]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+single_node_multiple_vhosts_connection_count(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+
+ [Conn2] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn3] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn4] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(2, count_connections_in(Config, VHost1)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [Conn5] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(2, count_connections_in(Config, VHost2)),
+
+ [Conn6] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(3, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+single_node_list_in_vhost(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, length(connections_in(Config, VHost1))),
+ ?assertEqual(0, length(connections_in(Config, VHost2))),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ [#tracked_connection{vhost = VHost1}] = connections_in(Config, VHost1),
+ close_connections([Conn1]),
+ ?assertEqual(0, length(connections_in(Config, VHost1))),
+
+ [Conn2] = open_connections(Config, [{0, VHost2}]),
+ [#tracked_connection{vhost = VHost2}] = connections_in(Config, VHost2),
+
+ [Conn3] = open_connections(Config, [{0, VHost1}]),
+ [#tracked_connection{vhost = VHost1}] = connections_in(Config, VHost1),
+
+ [Conn4] = open_connections(Config, [{0, VHost1}]),
+ kill_connections([Conn4]),
+ [#tracked_connection{vhost = VHost1}] = connections_in(Config, VHost1),
+
+ [Conn5, Conn6] = open_connections(Config, [{0, VHost2}, {0, VHost2}]),
+ [<<"vhost1">>, <<"vhost2">>] =
+ lists:usort(lists:map(fun (#tracked_connection{vhost = V}) -> V end,
+ all_connections(Config))),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ ?assertEqual(0, length(all_connections(Config))),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+most_basic_cluster_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [1]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ close_connections([Conn1, Conn2, Conn3]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+cluster_single_vhost_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [1]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn4] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn5] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ close_connections([Conn2, Conn3, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+cluster_multiple_vhosts_connection_count(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+
+ [Conn2] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn3] = open_connections(Config, [{1, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ [Conn4] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(2, count_connections_in(Config, VHost1)),
+
+ kill_connections([Conn4]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [Conn5] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(2, count_connections_in(Config, VHost2)),
+
+ [Conn6] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(3, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn2, Conn3, Conn5, Conn6]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+cluster_node_restart_connection_count(Config) ->
+ VHost = <<"/">>,
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1] = open_connections(Config, [0]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn2] = open_connections(Config, [1]),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+
+ [Conn4] = open_connections(Config, [1]),
+ ?assertEqual(3, count_connections_in(Config, VHost)),
+
+ [Conn5] = open_connections(Config, [1]),
+ ?assertEqual(4, count_connections_in(Config, VHost)),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+ ?assertEqual(1, count_connections_in(Config, VHost)),
+
+ close_connections([Conn2, Conn3, Conn4, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+cluster_node_list_on_node(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ?assertEqual(0, length(all_connections(Config))),
+ ?assertEqual(0, length(connections_on_node(Config, 0))),
+
+ [Conn1] = open_connections(Config, [0]),
+ [#tracked_connection{node = A}] = connections_on_node(Config, 0),
+ close_connections([Conn1]),
+ ?assertEqual(0, length(connections_on_node(Config, 0))),
+
+ [_Conn2] = open_connections(Config, [1]),
+ [#tracked_connection{node = B}] = connections_on_node(Config, 1),
+
+ [Conn3] = open_connections(Config, [0]),
+ ?assertEqual(1, length(connections_on_node(Config, 0))),
+
+ [Conn4] = open_connections(Config, [1]),
+ ?assertEqual(2, length(connections_on_node(Config, 1))),
+
+ kill_connections([Conn4]),
+ ?assertEqual(1, length(connections_on_node(Config, 1))),
+
+ [Conn5] = open_connections(Config, [0]),
+ ?assertEqual(2, length(connections_on_node(Config, 0))),
+
+ rabbit_ct_broker_helpers:stop_broker(Config, 1),
+ await_running_node_refresh(Config, 0),
+
+ ?assertEqual(2, length(all_connections(Config))),
+ ?assertEqual(0, length(connections_on_node(Config, 0, B))),
+
+ close_connections([Conn3, Conn5]),
+ ?assertEqual(0, length(all_connections(Config, 0))),
+
+ rabbit_ct_broker_helpers:start_broker(Config, 1).
+
+single_node_single_vhost_limit(Config) ->
+ single_node_single_vhost_limit_with(Config, 5),
+ single_node_single_vhost_limit_with(Config, -1).
+
+single_node_single_vhost_limit_with(Config, WatermarkLimit) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 3),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1, Conn2, Conn3] = open_connections(Config, [0, 0, 0]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 0),
+
+ set_vhost_connection_limit(Config, VHost, WatermarkLimit),
+ [Conn4, Conn5] = open_connections(Config, [0, 0]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+single_node_single_vhost_zero_limit(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+ expect_that_client_connection_is_rejected(Config),
+
+ set_vhost_connection_limit(Config, VHost, -1),
+ [Conn1, Conn2] = open_connections(Config, [0, 0]),
+
+ close_connections([Conn1, Conn2]),
+ ?assertEqual(0, count_connections_in(Config, VHost)).
+
+
+single_node_multiple_vhosts_limit(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, 2),
+ set_vhost_connection_limit(Config, VHost2, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1, Conn2, Conn3, Conn4] = open_connections(Config, [
+ {0, VHost1},
+ {0, VHost1},
+ {0, VHost2},
+ {0, VHost2}]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+ expect_that_client_connection_is_rejected(Config, 0, VHost2),
+
+ [Conn5] = open_connections(Config, [0]),
+
+ set_vhost_connection_limit(Config, VHost1, 5),
+ set_vhost_connection_limit(Config, VHost2, -10),
+
+ [Conn6, Conn7, Conn8, Conn9, Conn10] = open_connections(Config, [
+ {0, VHost1},
+ {0, VHost1},
+ {0, VHost1},
+ {0, VHost2},
+ {0, VHost2}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5,
+ Conn6, Conn7, Conn8, Conn9, Conn10]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+
+single_node_multiple_vhosts_zero_limit(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, 0),
+ set_vhost_connection_limit(Config, VHost2, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+ expect_that_client_connection_is_rejected(Config, 0, VHost2),
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ [Conn1, Conn2] = open_connections(Config, [{0, VHost1}, {0, VHost1}]),
+
+ close_connections([Conn1, Conn2]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1).
+
+
+cluster_single_vhost_limit(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% here connections are opened to different nodes
+ [Conn1, Conn2] = open_connections(Config, [{0, VHost}, {1, VHost}]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, VHost),
+ expect_that_client_connection_is_rejected(Config, 1, VHost),
+
+ set_vhost_connection_limit(Config, VHost, 5),
+
+ [Conn3, Conn4] = open_connections(Config, [{0, VHost}, {0, VHost}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+cluster_single_vhost_limit2(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% here a limit is reached on one node first
+ [Conn1, Conn2] = open_connections(Config, [{0, VHost}, {0, VHost}]),
+
+ %% we've crossed the limit
+ expect_that_client_connection_is_rejected(Config, 0, VHost),
+ expect_that_client_connection_is_rejected(Config, 1, VHost),
+
+ set_vhost_connection_limit(Config, VHost, 5),
+
+ [Conn3, Conn4, Conn5, {error, not_allowed}] = open_connections(Config, [
+ {1, VHost},
+ {1, VHost},
+ {1, VHost},
+ {1, VHost}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4, Conn5]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+
+cluster_single_vhost_zero_limit(Config) ->
+ VHost = <<"/">>,
+ set_vhost_connection_limit(Config, VHost, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0),
+ expect_that_client_connection_is_rejected(Config, 1),
+ expect_that_client_connection_is_rejected(Config, 0),
+
+ set_vhost_connection_limit(Config, VHost, -1),
+ [Conn1, Conn2, Conn3, Conn4] = open_connections(Config, [0, 1, 0, 1]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ set_vhost_connection_limit(Config, VHost, -1).
+
+
+cluster_multiple_vhosts_zero_limit(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, 0),
+ set_vhost_connection_limit(Config, VHost2, 0),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ %% with limit = 0 no connections are allowed
+ expect_that_client_connection_is_rejected(Config, 0, VHost1),
+ expect_that_client_connection_is_rejected(Config, 0, VHost2),
+ expect_that_client_connection_is_rejected(Config, 1, VHost1),
+ expect_that_client_connection_is_rejected(Config, 1, VHost2),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1),
+
+ [Conn1, Conn2, Conn3, Conn4] = open_connections(Config, [
+ {0, VHost1},
+ {0, VHost2},
+ {1, VHost1},
+ {1, VHost2}]),
+
+ close_connections([Conn1, Conn2, Conn3, Conn4]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ set_vhost_connection_limit(Config, VHost1, -1),
+ set_vhost_connection_limit(Config, VHost2, -1).
+
+vhost_limit_after_node_renamed(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ VHost = <<"/renaming_node">>,
+ set_up_vhost(Config, VHost),
+ set_vhost_connection_limit(Config, VHost, 2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+
+ [Conn1, Conn2, {error, not_allowed}] = open_connections(Config,
+ [{0, VHost}, {1, VHost}, {0, VHost}]),
+ ?assertEqual(2, count_connections_in(Config, VHost)),
+ close_connections([Conn1, Conn2]),
+
+ Config1 = cluster_rename_SUITE:stop_rename_start(Config, A, [A, 'new-A']),
+
+ ?assertEqual(0, count_connections_in(Config1, VHost)),
+
+ [Conn3, Conn4, {error, not_allowed}] = open_connections(Config,
+ [{0, VHost}, {1, VHost}, {0, VHost}]),
+ ?assertEqual(2, count_connections_in(Config1, VHost)),
+ close_connections([Conn3, Conn4]),
+
+ set_vhost_connection_limit(Config1, VHost, -1),
+ {save_config, Config1}.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndVHosts) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, VHost}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ VHost);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndVHosts),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+kill_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ (catch exit(Conn, please_terminate))
+ end, Conns),
+ timer:sleep(500).
+
+count_connections_in(Config, VHost) ->
+ count_connections_in(Config, VHost, 0).
+count_connections_in(Config, VHost, NodeIndex) ->
+ timer:sleep(200),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ count_tracked_items_in, [{vhost, VHost}]).
+
+connections_in(Config, VHost) ->
+ connections_in(Config, 0, VHost).
+connections_in(Config, NodeIndex, VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, [VHost]).
+
+connections_on_node(Config) ->
+ connections_on_node(Config, 0).
+connections_on_node(Config, NodeIndex) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list_on_node, [Node]).
+connections_on_node(Config, NodeIndex, NodeForListing) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list_on_node, [NodeForListing]).
+
+all_connections(Config) ->
+ all_connections(Config, 0).
+all_connections(Config, NodeIndex) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, []).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+await_running_node_refresh(_Config, _NodeIndex) ->
+ timer:sleep(250).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, not_allowed} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl
new file mode 100644
index 0000000000..2748d95592
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_connection_limit_partitions_SUITE.erl
@@ -0,0 +1,150 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_connection_limit_partitions_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_client_helpers, [open_unmanaged_connection/2,
+ open_unmanaged_connection/3]).
+
+
+all() ->
+ [
+ {group, net_ticktime_1}
+ ].
+
+groups() ->
+ [
+ {net_ticktime_1, [], [
+ cluster_full_partition_with_autoheal
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 12000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [
+ fun rabbit_ct_broker_helpers:configure_dist_proxy/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(net_ticktime_1 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]),
+ init_per_multinode_group(Group, Config1, 3).
+
+init_per_multinode_group(_Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+cluster_full_partition_with_autoheal(Config) ->
+ VHost = <<"/">>,
+ rabbit_ct_broker_helpers:set_partition_handling_mode_globally(Config, autoheal),
+
+ ?assertEqual(0, count_connections_in(Config, VHost)),
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% 6 connections, 2 per node
+ Conn1 = open_unmanaged_connection(Config, A),
+ Conn2 = open_unmanaged_connection(Config, A),
+ Conn3 = open_unmanaged_connection(Config, B),
+ Conn4 = open_unmanaged_connection(Config, B),
+ Conn5 = open_unmanaged_connection(Config, C),
+ Conn6 = open_unmanaged_connection(Config, C),
+ wait_for_count_connections_in(Config, VHost, 6, 60000),
+
+ %% B drops off the network, non-reachable by either A or C
+ rabbit_ct_broker_helpers:block_traffic_between(A, B),
+ rabbit_ct_broker_helpers:block_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% A and C are still connected, so 4 connections are tracked
+ wait_for_count_connections_in(Config, VHost, 4, 60000),
+
+ rabbit_ct_broker_helpers:allow_traffic_between(A, B),
+ rabbit_ct_broker_helpers:allow_traffic_between(B, C),
+ timer:sleep(?DELAY),
+
+ %% during autoheal B's connections were dropped
+ wait_for_count_connections_in(Config, VHost, 4, 60000),
+
+ lists:foreach(fun (Conn) ->
+ (catch rabbit_ct_client_helpers:close_connection(Conn))
+ end, [Conn1, Conn2, Conn3, Conn4,
+ Conn5, Conn6]),
+
+ passed.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+wait_for_count_connections_in(Config, VHost, Expected, Time) when Time =< 0 ->
+ ?assertMatch(Connections when length(Connections) == Expected,
+ connections_in(Config, VHost));
+wait_for_count_connections_in(Config, VHost, Expected, Time) ->
+ case connections_in(Config, VHost) of
+ Connections when length(Connections) == Expected ->
+ ok;
+ _ ->
+ Sleep = 3000,
+ timer:sleep(Sleep),
+ wait_for_count_connections_in(Config, VHost, Expected, Time - Sleep)
+ end.
+
+count_connections_in(Config, VHost) ->
+ count_connections_in(Config, VHost, 0).
+count_connections_in(Config, VHost, NodeIndex) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ count_tracked_items_in, [{vhost, VHost}]).
+
+connections_in(Config, VHost) ->
+ connections_in(Config, 0, VHost).
+connections_in(Config, NodeIndex, VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ list, [VHost]).
diff --git a/deps/rabbit/test/per_vhost_msg_store_SUITE.erl b/deps/rabbit/test/per_vhost_msg_store_SUITE.erl
new file mode 100644
index 0000000000..8364d69462
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_msg_store_SUITE.erl
@@ -0,0 +1,245 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_msg_store_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(MSGS_COUNT, 100).
+
+all() ->
+ [
+ publish_to_different_dirs,
+ storage_deleted_on_vhost_delete,
+ single_vhost_storage_delete_is_safe
+ ].
+
+
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit, [{queue_index_embed_msgs_below, 100}]}),
+ rabbit_ct_helpers:run_setup_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(_, Config) ->
+ Vhost1 = <<"vhost1">>,
+ Vhost2 = <<"vhost2">>,
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost1),
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost2),
+ Chan1 = open_channel(Vhost1, Config),
+ Chan2 = open_channel(Vhost2, Config),
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{vhost1, Vhost1}, {vhost2, Vhost2},
+ {channel1, Chan1}, {channel2, Chan2}]).
+
+end_per_testcase(single_vhost_storage_delete_is_safe, Config) ->
+ Config;
+end_per_testcase(_, Config) ->
+ Vhost1 = ?config(vhost1, Config),
+ Vhost2 = ?config(vhost2, Config),
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, Vhost2),
+ Config.
+
+publish_to_different_dirs(Config) ->
+ Vhost1 = ?config(vhost1, Config),
+ Vhost2 = ?config(vhost2, Config),
+ Channel1 = ?config(channel1, Config),
+ Channel2 = ?config(channel2, Config),
+ Queue1 = declare_durable_queue(Channel1),
+ Queue2 = declare_durable_queue(Channel2),
+ FolderSize1 = get_folder_size(Vhost1, Config),
+ FolderSize2 = get_folder_size(Vhost2, Config),
+
+ %% Publish message to a queue index
+ publish_persistent_messages(index, Channel1, Queue1),
+ %% First storage increased
+ FolderSize11 = get_folder_size(Vhost1, Config),
+ true = (FolderSize1 < FolderSize11),
+ %% Second storage didn't increased
+ FolderSize2 = get_folder_size(Vhost2, Config),
+
+ %% Publish message to a message store
+ publish_persistent_messages(store, Channel1, Queue1),
+ %% First storage increased
+ FolderSize12 = get_folder_size(Vhost1, Config),
+ true = (FolderSize11 < FolderSize12),
+ %% Second storage didn't increased
+ FolderSize2 = get_folder_size(Vhost2, Config),
+
+ %% Publish message to a queue index
+ publish_persistent_messages(index, Channel2, Queue2),
+ %% First storage increased
+ FolderSize21 = get_folder_size(Vhost2, Config),
+ true = (FolderSize2 < FolderSize21),
+ %% Second storage didn't increased
+ FolderSize12 = get_folder_size(Vhost1, Config),
+
+ %% Publish message to a message store
+ publish_persistent_messages(store, Channel2, Queue2),
+ %% Second storage increased
+ FolderSize22 = get_folder_size(Vhost2, Config),
+ true = (FolderSize21 < FolderSize22),
+ %% First storage didn't increased
+ FolderSize12 = get_folder_size(Vhost1, Config).
+
+storage_deleted_on_vhost_delete(Config) ->
+ Vhost1 = ?config(vhost1, Config),
+ Channel1 = ?config(channel1, Config),
+ Queue1 = declare_durable_queue(Channel1),
+ FolderSize = get_global_folder_size(Config),
+
+ publish_persistent_messages(index, Channel1, Queue1),
+ publish_persistent_messages(store, Channel1, Queue1),
+ FolderSizeAfterPublish = get_global_folder_size(Config),
+
+ %% Total storage size increased
+ true = (FolderSize < FolderSizeAfterPublish),
+
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, Vhost1),
+
+ %% Total memory reduced
+ FolderSizeAfterDelete = get_global_folder_size(Config),
+ true = (FolderSizeAfterPublish > FolderSizeAfterDelete),
+
+ %% There is no Vhost1 folder
+ 0 = get_folder_size(Vhost1, Config).
+
+
+single_vhost_storage_delete_is_safe(Config) ->
+ct:pal("Start test 3", []),
+ Vhost1 = ?config(vhost1, Config),
+ Vhost2 = ?config(vhost2, Config),
+ Channel1 = ?config(channel1, Config),
+ Channel2 = ?config(channel2, Config),
+ Queue1 = declare_durable_queue(Channel1),
+ Queue2 = declare_durable_queue(Channel2),
+
+ %% Publish messages to both stores
+ publish_persistent_messages(index, Channel1, Queue1),
+ publish_persistent_messages(store, Channel1, Queue1),
+ publish_persistent_messages(index, Channel2, Queue2),
+ publish_persistent_messages(store, Channel2, Queue2),
+
+ queue_is_not_empty(Channel2, Queue2),
+ % Vhost2Dir = vhost_dir(Vhost2, Config),
+ % [StoreFile] = filelib:wildcard(binary_to_list(filename:join([Vhost2Dir, "msg_store_persistent_*", "0.rdq"]))),
+ % ct:pal("Store file ~p~n", [file:read_file(StoreFile)]).
+% ok.
+ rabbit_ct_broker_helpers:stop_broker(Config, 0),
+ delete_vhost_data(Vhost1, Config),
+ rabbit_ct_broker_helpers:start_broker(Config, 0),
+
+ Channel11 = open_channel(Vhost1, Config),
+ Channel21 = open_channel(Vhost2, Config),
+
+ %% There are no Vhost1 messages
+ queue_is_empty(Channel11, Queue1),
+
+ %% But Vhost2 messages are in place
+ queue_is_not_empty(Channel21, Queue2),
+ consume_messages(index, Channel21, Queue2),
+ consume_messages(store, Channel21, Queue2).
+
+declare_durable_queue(Channel) ->
+ QName = list_to_binary(erlang:ref_to_list(make_ref())),
+ #'queue.declare_ok'{queue = QName} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{queue = QName, durable = true}),
+ QName.
+
+publish_persistent_messages(Storage, Channel, Queue) ->
+ MessagePayload = case Storage of
+ index -> binary:copy(<<"=">>, 50);
+ store -> binary:copy(<<"-">>, 150)
+ end,
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ [amqp_channel:call(Channel,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = MessagePayload})
+ || _ <- lists:seq(1, ?MSGS_COUNT)],
+ amqp_channel:wait_for_confirms(Channel).
+
+
+get_folder_size(Vhost, Config) ->
+ Dir = vhost_dir(Vhost, Config),
+ folder_size(Dir).
+
+folder_size(Dir) ->
+ filelib:fold_files(Dir, ".*", true,
+ fun(F,Acc) -> filelib:file_size(F) + Acc end, 0).
+
+get_global_folder_size(Config) ->
+ BaseDir = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mnesia, dir, []),
+ folder_size(BaseDir).
+
+vhost_dir(Vhost, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_vhost, msg_store_dir_path, [Vhost]).
+
+delete_vhost_data(Vhost, Config) ->
+ Dir = vhost_dir(Vhost, Config),
+ rabbit_file:recursive_delete([Dir]).
+
+queue_is_empty(Channel, Queue) ->
+ #'queue.declare_ok'{queue = Queue, message_count = 0} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{ queue = Queue,
+ durable = true,
+ passive = true}).
+
+queue_is_not_empty(Channel, Queue) ->
+ #'queue.declare_ok'{queue = Queue, message_count = MsgCount} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{ queue = Queue,
+ durable = true,
+ passive = true}),
+ ExpectedCount = ?MSGS_COUNT * 2,
+ ExpectedCount = MsgCount.
+
+consume_messages(Storage, Channel, Queue) ->
+ MessagePayload = case Storage of
+ index -> binary:copy(<<"=">>, 50);
+ store -> binary:copy(<<"-">>, 150)
+ end,
+ lists:foreach(
+ fun(I) ->
+ ct:pal("Consume message ~p~n ~p~n", [I, MessagePayload]),
+ {#'basic.get_ok'{}, Content} =
+ amqp_channel:call(Channel,
+ #'basic.get'{queue = Queue,
+ no_ack = true}),
+ #amqp_msg{payload = MessagePayload} = Content
+ end,
+ lists:seq(1, ?MSGS_COUNT)),
+ ok.
+
+open_channel(Vhost, Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_direct{node = Node, virtual_host = Vhost}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Chan.
diff --git a/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl b/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl
new file mode 100644
index 0000000000..28a9f98537
--- /dev/null
+++ b/deps/rabbit/test/per_vhost_queue_limit_SUITE.erl
@@ -0,0 +1,682 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(per_vhost_queue_limit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_client_helpers, [open_unmanaged_connection/3,
+ close_connection_and_channel/2]).
+
+all() ->
+ [
+ {group, cluster_size_1}
+ , {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_1, [], [
+ most_basic_single_node_queue_count,
+ single_node_single_vhost_queue_count,
+ single_node_multiple_vhosts_queue_count,
+ single_node_single_vhost_limit,
+ single_node_single_vhost_zero_limit,
+ single_node_single_vhost_limit_with_durable_named_queue,
+ single_node_single_vhost_zero_limit_with_durable_named_queue,
+ single_node_single_vhost_limit_with_queue_ttl,
+ single_node_single_vhost_limit_with_redeclaration
+ ]},
+ {cluster_size_2, [], [
+ most_basic_cluster_queue_count,
+ cluster_multiple_vhosts_queue_count,
+ cluster_multiple_vhosts_limit,
+ cluster_multiple_vhosts_zero_limit,
+ cluster_multiple_vhosts_limit_with_durable_named_queue,
+ cluster_multiple_vhosts_zero_limit_with_durable_named_queue,
+ cluster_node_restart_queue_count
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1, Config) ->
+ init_per_multinode_group(cluster_size_1, Config, 1);
+init_per_group(cluster_size_2, Config) ->
+ init_per_multinode_group(cluster_size_2, Config, 2);
+init_per_group(cluster_rename, Config) ->
+ init_per_multinode_group(cluster_rename, Config, 2).
+
+init_per_multinode_group(Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ case Group of
+ cluster_rename ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config1;
+ _ ->
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps())
+ end.
+
+end_per_group(cluster_rename, Config) ->
+ % The broker is managed by {init,end}_per_testcase().
+ Config;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config.
+
+end_per_testcase(vhost_limit_after_node_renamed = Testcase, Config) ->
+ Config1 = ?config(save_config, Config),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+most_basic_single_node_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ declare_exclusive_queues(Ch, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost)),
+ close_connection_and_channel(Conn, Ch),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_single_vhost_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ declare_exclusive_queues(Ch, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost)),
+ declare_durable_queues(Ch, 10),
+ ?assertEqual(20, count_queues_in(Config, VHost)),
+ delete_durable_queues(Ch, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost)),
+ close_connection_and_channel(Conn, Ch),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_multiple_vhosts_queue_count(Config) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ Conn2 = open_unmanaged_connection(Config, 0, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost1)),
+ declare_durable_queues(Ch1, 10),
+ ?assertEqual(20, count_queues_in(Config, VHost1)),
+ delete_durable_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost1)),
+ declare_exclusive_queues(Ch2, 30),
+ ?assertEqual(30, count_queues_in(Config, VHost2)),
+ close_connection_and_channel(Conn1, Ch1),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ close_connection_and_channel(Conn2, Ch2),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+single_node_single_vhost_limit(Config) ->
+ single_node_single_vhost_limit_with(Config, 5),
+ single_node_single_vhost_limit_with(Config, 10).
+single_node_single_vhost_zero_limit(Config) ->
+ single_node_single_vhost_zero_limit_with(Config, #'queue.declare'{queue = <<"">>,
+ exclusive = true}).
+
+single_node_single_vhost_limit_with_durable_named_queue(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_single_vhost_zero_limit_with_durable_named_queue(Config) ->
+ single_node_single_vhost_zero_limit_with(Config, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true}).
+
+single_node_single_vhost_limit_with(Config, WatermarkLimit) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+ Conn = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ set_vhost_queue_limit(Config, VHost, WatermarkLimit),
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, WatermarkLimit)),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+single_node_single_vhost_zero_limit_with(Config, QueueDeclare) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ set_vhost_queue_limit(Config, VHost, 0),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, QueueDeclare)
+ end),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% lift the limit
+ set_vhost_queue_limit(Config, VHost, -1),
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, 100)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+single_node_single_vhost_limit_with_queue_ttl(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true,
+ arguments = [{<<"x-expires">>, long, 2000}]})
+ end, lists:seq(1, 3)),
+
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% wait for the queues to expire
+ timer:sleep(3000),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true}),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+single_node_single_vhost_limit_with_redeclaration(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost)),
+
+ set_vhost_queue_limit(Config, VHost, 3),
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ %% can't declare a new queue...
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% ...but re-declarations succeed
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+most_basic_cluster_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost, 1)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost, 1)),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ declare_exclusive_queues(Ch2, 15),
+ ?assertEqual(25, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(25, count_queues_in(Config, VHost, 1)),
+ close_connection_and_channel(Conn1, Ch1),
+ close_connection_and_channel(Conn2, Ch2),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost, 1)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+cluster_node_restart_queue_count(Config) ->
+ VHost = <<"queue-limits">>,
+ set_up_vhost(Config, VHost),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost, 1)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost, 1)),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 0),
+ ?assertEqual(0, count_queues_in(Config, VHost, 0)),
+
+ Conn2 = open_unmanaged_connection(Config, 1, VHost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ declare_exclusive_queues(Ch2, 15),
+ ?assertEqual(15, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(15, count_queues_in(Config, VHost, 1)),
+
+ declare_durable_queues(Ch2, 10),
+ ?assertEqual(25, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(25, count_queues_in(Config, VHost, 1)),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+
+ ?assertEqual(10, count_queues_in(Config, VHost, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost, 1)),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost).
+
+
+cluster_multiple_vhosts_queue_count(Config) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ declare_exclusive_queues(Ch1, 10),
+ ?assertEqual(10, count_queues_in(Config, VHost1, 0)),
+ ?assertEqual(10, count_queues_in(Config, VHost1, 1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 1)),
+
+ Conn2 = open_unmanaged_connection(Config, 0, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ declare_exclusive_queues(Ch2, 15),
+ ?assertEqual(15, count_queues_in(Config, VHost2, 0)),
+ ?assertEqual(15, count_queues_in(Config, VHost2, 1)),
+ close_connection_and_channel(Conn1, Ch1),
+ close_connection_and_channel(Conn2, Ch2),
+ ?assertEqual(0, count_queues_in(Config, VHost1, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost1, 1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 0)),
+ ?assertEqual(0, count_queues_in(Config, VHost2, 1)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+cluster_multiple_vhosts_limit(Config) ->
+ cluster_multiple_vhosts_limit_with(Config, 10),
+ cluster_multiple_vhosts_limit_with(Config, 20).
+
+cluster_multiple_vhosts_limit_with(Config, WatermarkLimit) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ set_vhost_queue_limit(Config, VHost1, 3),
+ set_vhost_queue_limit(Config, VHost2, 3),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ set_vhost_queue_limit(Config, VHost1, WatermarkLimit),
+
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, WatermarkLimit)),
+
+ Conn2 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ set_vhost_queue_limit(Config, VHost2, WatermarkLimit),
+
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, WatermarkLimit)),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+
+cluster_multiple_vhosts_zero_limit(Config) ->
+ cluster_multiple_vhosts_zero_limit_with(Config, #'queue.declare'{queue = <<"">>,
+ exclusive = true}).
+
+cluster_multiple_vhosts_limit_with_durable_named_queue(Config) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ set_vhost_queue_limit(Config, VHost1, 3),
+ set_vhost_queue_limit(Config, VHost2, 3),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+
+ Conn2 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q1">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q2">>,
+ exclusive = false,
+ durable = true}),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true}),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true})
+ end),
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"Q3">>,
+ exclusive = false,
+ durable = true})
+ end),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+cluster_multiple_vhosts_zero_limit_with_durable_named_queue(Config) ->
+ cluster_multiple_vhosts_zero_limit_with(Config, #'queue.declare'{queue = <<"Q4">>,
+ exclusive = false,
+ durable = true}).
+
+cluster_multiple_vhosts_zero_limit_with(Config, QueueDeclare) ->
+ VHost1 = <<"queue-limits1">>,
+ VHost2 = <<"queue-limits2">>,
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+ ?assertEqual(0, count_queues_in(Config, VHost1)),
+ ?assertEqual(0, count_queues_in(Config, VHost2)),
+
+ Conn1 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ Conn2 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ set_vhost_queue_limit(Config, VHost1, 0),
+ set_vhost_queue_limit(Config, VHost2, 0),
+
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch1, QueueDeclare)
+ end),
+ expect_shutdown_due_to_precondition_failed(
+ fun () ->
+ amqp_channel:call(Ch2, QueueDeclare)
+ end),
+
+
+ Conn3 = open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Ch3} = amqp_connection:open_channel(Conn3),
+ Conn4 = open_unmanaged_connection(Config, 1, VHost2),
+ {ok, Ch4} = amqp_connection:open_channel(Conn4),
+
+ %% lift the limits
+ set_vhost_queue_limit(Config, VHost1, -1),
+ set_vhost_queue_limit(Config, VHost2, -1),
+ lists:foreach(fun (_) ->
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch3, #'queue.declare'{queue = <<"">>,
+ exclusive = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch4, #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end, lists:seq(1, 400)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2).
+
+
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_queue_limit(Config, VHost, -1).
+
+set_vhost_queue_limit(Config, VHost, Count) ->
+ set_vhost_queue_limit(Config, 0, VHost, Count).
+
+set_vhost_queue_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-queues\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+count_queues_in(Config, VHost) ->
+ count_queues_in(Config, VHost, 0).
+count_queues_in(Config, VHost, NodeIndex) ->
+ timer:sleep(200),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_amqqueue,
+ count, [VHost]).
+
+declare_exclusive_queues(Ch, N) ->
+ lists:foreach(fun (_) ->
+ amqp_channel:call(Ch,
+ #'queue.declare'{queue = <<"">>,
+ exclusive = true})
+ end,
+ lists:seq(1, N)).
+
+declare_durable_queues(Ch, N) ->
+ lists:foreach(fun (I) ->
+ amqp_channel:call(Ch,
+ #'queue.declare'{queue = durable_queue_name(I),
+ exclusive = false,
+ durable = true})
+ end,
+ lists:seq(1, N)).
+
+delete_durable_queues(Ch, N) ->
+ lists:foreach(fun (I) ->
+ amqp_channel:call(Ch,
+ #'queue.delete'{queue = durable_queue_name(I)})
+ end,
+ lists:seq(1, N)).
+
+durable_queue_name(N) when is_integer(N) ->
+ iolist_to_binary(io_lib:format("queue-limits-durable-~p", [N])).
+
+expect_shutdown_due_to_precondition_failed(Thunk) ->
+ try
+ Thunk(),
+ ok
+ catch _:{{shutdown, {server_initiated_close, 406, _}}, _} ->
+ %% expected
+ ok
+ end.
diff --git a/deps/rabbit/test/policy_SUITE.erl b/deps/rabbit/test/policy_SUITE.erl
new file mode 100644
index 0000000000..ce68332d77
--- /dev/null
+++ b/deps/rabbit/test/policy_SUITE.erl
@@ -0,0 +1,204 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(policy_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ policy_ttl,
+ operator_policy_ttl,
+ operator_retroactive_policy_ttl,
+ operator_retroactive_policy_publish_ttl
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:setup_steps(),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:teardown_steps(),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+policy_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ rabbit_ct_broker_helpers:set_policy(Config, 0, <<"ttl-policy">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 20}]),
+
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 20)),
+ timer:sleep(50),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl-policy">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+operator_policy_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ % Operator policy will override
+ rabbit_ct_broker_helpers:set_policy(Config, 0, <<"ttl-policy">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 100000}]),
+ rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"ttl-policy-op">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 1}]),
+
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 50)),
+ timer:sleep(50),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl-policy">>),
+ rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"ttl-policy-op">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+operator_retroactive_policy_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 50)),
+ % Operator policy will override
+ rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"ttl-policy-op">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 1}]),
+
+ %% Old messages are not expired
+ timer:sleep(50),
+ get_messages(50, Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"ttl-policy-op">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+operator_retroactive_policy_publish_ttl(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"policy_ttl-queue">>,
+ declare(Ch, Q),
+ publish(Ch, Q, lists:seq(1, 50)),
+ % Operator policy will override
+ rabbit_ct_broker_helpers:set_operator_policy(Config, 0, <<"ttl-policy-op">>,
+ <<"policy_ttl-queue">>, <<"all">>, [{<<"message-ttl">>, 1}]),
+
+ %% Old messages are not expired, new ones only expire when they get to the head of
+ %% the queue
+ publish(Ch, Q, lists:seq(1, 25)),
+ timer:sleep(50),
+ [[<<"policy_ttl-queue">>, <<"75">>]] =
+ rabbit_ct_broker_helpers:rabbitmqctl_list(Config, 0, ["list_queues", "--no-table-headers"]),
+ get_messages(50, Ch, Q),
+ delete(Ch, Q),
+
+ rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"ttl-policy-op">>),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+%%----------------------------------------------------------------------------
+
+
+declare(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true}).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = erlang:md5(term_to_binary(P))}).
+
+publish1(Ch, Q, P, Pd) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = Pd}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_messages(0, Ch, Q) ->
+ get_empty(Ch, Q);
+get_messages(Number, Ch, Q) ->
+ case amqp_channel:call(Ch, #'basic.get'{queue = Q}) of
+ {#'basic.get_ok'{}, _} ->
+ get_messages(Number - 1, Ch, Q);
+ #'basic.get_empty'{} ->
+ exit(failed)
+ end.
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbit/test/priority_queue_SUITE.erl b/deps/rabbit/test/priority_queue_SUITE.erl
new file mode 100644
index 0000000000..a0c1732ffd
--- /dev/null
+++ b/deps/rabbit/test/priority_queue_SUITE.erl
@@ -0,0 +1,771 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ ackfold,
+ drop,
+ {overflow_reject_publish, [], [reject]},
+ {overflow_reject_publish_dlx, [], [reject]},
+ dropwhile_fetchwhile,
+ info_head_message_timestamp,
+ matching,
+ mirror_queue_sync,
+ mirror_queue_sync_priority_above_max,
+ mirror_queue_sync_priority_above_max_pending_ack,
+ mirror_queue_sync_order,
+ purge,
+ requeue,
+ resume,
+ simple_order,
+ straight_through,
+ invoke,
+ gen_server2_stats,
+ negative_max_priorities,
+ max_priorities_above_hard_limit
+ ]},
+ {cluster_size_3, [], [
+ mirror_queue_auto_ack,
+ mirror_fast_reset_policy,
+ mirror_reset_policy,
+ mirror_stop_pending_followers
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(cluster_size_3, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(overflow_reject_publish, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish">>}
+ ]);
+init_per_group(overflow_reject_publish_dlx, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish-dlx">>}
+ ]).
+
+end_per_group(overflow_reject_publish, _Config) ->
+ ok;
+end_per_group(overflow_reject_publish_dlx, _Config) ->
+ ok;
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:setup_steps(),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:teardown_steps(),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% The BQ API is used in all sorts of places in all sorts of
+%% ways. Therefore we have to jump through a few different hoops
+%% in order to integration-test it.
+%%
+%% * start/1, stop/0, init/3, terminate/2, delete_and_terminate/2
+%% - starting and stopping rabbit. durable queues / persistent msgs needed
+%% to test recovery
+%%
+%% * publish/5, drain_confirmed/1, fetch/2, ack/2, is_duplicate/2, msg_rates/1,
+%% needs_timeout/1, timeout/1, invoke/3, resume/1 [0]
+%% - regular publishing and consuming, with confirms and acks and durability
+%%
+%% * publish_delivered/4 - publish with acks straight through
+%% * discard/3 - publish without acks straight through
+%% * dropwhile/2 - expire messages without DLX
+%% * fetchwhile/4 - expire messages with DLX
+%% * ackfold/4 - reject messages with DLX
+%% * requeue/2 - reject messages without DLX
+%% * drop/2 - maxlen messages without DLX
+%% * purge/1 - issue AMQP queue.purge
+%% * purge_acks/1 - mirror queue explicit sync with unacked msgs
+%% * fold/3 - mirror queue explicit sync
+%% * depth/1 - mirror queue implicit sync detection
+%% * len/1, is_empty/1 - info items
+%% * handle_pre_hibernate/1 - hibernation
+%%
+%% * set_ram_duration_target/2, ram_duration/1, status/1
+%% - maybe need unit testing?
+%%
+%% [0] publish enough to get credit flow from msg store
+
+simple_order(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"simple_order-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ publish(Ch, Q, [2, 3, 1, 2, 3, 1, 2, 3, 1]),
+ get_all(Ch, Q, no_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ publish(Ch, Q, [3, 1, 2, 3, 1, 2, 3, 1, 2]),
+ get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+matching(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"matching-queue">>,
+ declare(Ch, Q, 5),
+ %% We round priority down, and 0 is the default
+ publish(Ch, Q, [undefined, 0, 5, 10, undefined]),
+ get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+resume(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"resume-queue">>,
+ declare(Ch, Q, 5),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ publish_many(Ch, Q, 10000),
+ amqp_channel:wait_for_confirms(Ch),
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+straight_through(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"straight_through-queue">>,
+ declare(Ch, Q, 3),
+ [begin
+ consume(Ch, Q, Ack),
+ [begin
+ publish1(Ch, Q, P),
+ assert_delivered(Ch, Ack, P)
+ end || P <- [1, 2, 3]],
+ cancel(Ch)
+ end || Ack <- [do_ack, no_ack]],
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+max_priorities_above_hard_limit(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"max_priorities_above_hard_limit">>,
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ %% Note that lower values (e.g. 300) will overflow the byte type here.
+ %% However, values >= 256 would still be rejected when used by
+ %% other clients
+ declare(Ch, Q, 3000)),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+negative_max_priorities(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"negative_max_priorities">>,
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(Ch, Q, -10)),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+
+invoke(Config) ->
+ %% Synthetic test to check the invoke callback, as the bug tested here
+ %% is only triggered with a race condition.
+ %% When mirroring is stopped, the backing queue of rabbit_amqqueue_process
+ %% changes from rabbit_mirror_queue_master to rabbit_priority_queue,
+ %% which shouldn't receive any invoke call. However, there might
+ %% be pending messages so the priority queue receives the
+ %% `run_backing_queue` cast message sent to the old master.
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"invoke-queue">>,
+ declare(Ch, Q, 3),
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, gen_server, cast,
+ [Pid,
+ {run_backing_queue, ?MODULE, fun(_, _) -> ok end}]),
+ Pid2 = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ Pid = Pid2,
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+
+gen_server2_stats(Config) ->
+ %% Synthetic test to check the invoke callback, as the bug tested here
+ %% is only triggered with a race condition.
+ %% When mirroring is stopped, the backing queue of rabbit_amqqueue_process
+ %% changes from rabbit_mirror_queue_master to rabbit_priority_queue,
+ %% which shouldn't receive any invoke call. However, there might
+ %% be pending messages so the priority queue receives the
+ %% `run_backing_queue` cast message sent to the old master.
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"gen_server2_stats_queue">>,
+ declare(Ch, Q, 3),
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ Metrics = rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit_core_metrics, get_gen_server2_stats,
+ [Pid]),
+ true = is_number(Metrics),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+dropwhile_fetchwhile(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"dropwhile_fetchwhile-queue">>,
+ [begin
+ declare(Ch, Q, Args ++ arguments(3)),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ timer:sleep(10),
+ get_empty(Ch, Q),
+ delete(Ch, Q)
+ end ||
+ Args <- [[{<<"x-message-ttl">>, long, 1}],
+ [{<<"x-message-ttl">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}]
+ ]],
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+ackfold(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"ackfolq-queue1">>,
+ Q2 = <<"ackfold-queue2">>,
+ declare(Ch, Q,
+ [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, Q2}
+ | arguments(3)]),
+ declare(Ch, Q2, none),
+ publish(Ch, Q, [1, 2, 3]),
+ [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = true,
+ requeue = false}),
+ timer:sleep(100),
+ get_all(Ch, Q2, do_ack, [3, 2, 1]),
+ delete(Ch, Q),
+ delete(Ch, Q2),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+requeue(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"requeue-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = true,
+ requeue = true}),
+ get_all(Ch, Q, do_ack, [3, 2, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+drop(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"drop-queue">>,
+ declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ %% We drop from the head, so this is according to the "spec" even
+ %% if not likely to be what the user wants.
+ get_all(Ch, Q, do_ack, [2, 1, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+reject(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ XOverflow = ?config(overflow, Config),
+ Q = <<"reject-queue-", XOverflow/binary>>,
+ declare(Ch, Q, [{<<"x-max-length">>, long, 4},
+ {<<"x-overflow">>, longstr, XOverflow}
+ | arguments(3)]),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ %% First 4 messages are published, all others are discarded.
+ get_all(Ch, Q, do_ack, [3, 2, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+purge(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"purge-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q}),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+info_head_message_timestamp(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, info_head_message_timestamp1, [Config]).
+
+info_head_message_timestamp1(_Config) ->
+ QName = rabbit_misc:r(<<"/">>, queue,
+ <<"info_head_message_timestamp-queue">>),
+ Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+ Q1 = amqqueue:set_arguments(Q0, [{<<"x-max-priority">>, long, 2}]),
+ PQ = rabbit_priority_queue,
+ BQS1 = PQ:init(Q1, new, fun(_, _) -> ok end),
+ %% The queue is empty: no timestamp.
+ true = PQ:is_empty(BQS1),
+ '' = PQ:info(head_message_timestamp, BQS1),
+ %% Publish one message with timestamp 1000.
+ Msg1 = #basic_message{
+ id = msg1,
+ content = #content{
+ properties = #'P_basic'{
+ priority = 1,
+ timestamp = 1000
+ }},
+ is_persistent = false
+ },
+ BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(),
+ noflow, BQS1),
+ 1000 = PQ:info(head_message_timestamp, BQS2),
+ %% Publish a higher priority message with no timestamp.
+ Msg2 = #basic_message{
+ id = msg2,
+ content = #content{
+ properties = #'P_basic'{
+ priority = 2
+ }},
+ is_persistent = false
+ },
+ BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(),
+ noflow, BQS2),
+ '' = PQ:info(head_message_timestamp, BQS3),
+ %% Consume message with no timestamp.
+ {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3),
+ 1000 = PQ:info(head_message_timestamp, BQS4),
+ %% Consume message with timestamp 1000, but do not acknowledge it
+ %% yet. The goal is to verify that the unacknowledged message's
+ %% timestamp is returned.
+ {{Msg1, _, AckTag}, BQS5} = PQ:fetch(true, BQS4),
+ 1000 = PQ:info(head_message_timestamp, BQS5),
+ %% Ack message. The queue is empty now.
+ {[msg1], BQS6} = PQ:ack([AckTag], BQS5),
+ true = PQ:is_empty(BQS6),
+ '' = PQ:info(head_message_timestamp, BQS6),
+ PQ:delete_and_terminate(a_whim, BQS6),
+ passed.
+
+ram_duration(_Config) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"ram_duration-queue">>),
+ Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+ Q1 = amqqueue:set_arguments(Q0, [{<<"x-max-priority">>, long, 5}]),
+ PQ = rabbit_priority_queue,
+ BQS1 = PQ:init(Q1, new, fun(_, _) -> ok end),
+ {_Duration1, BQS2} = PQ:ram_duration(BQS1),
+ BQS3 = PQ:set_ram_duration_target(infinity, BQS2),
+ BQS4 = PQ:set_ram_duration_target(1, BQS3),
+ {_Duration2, BQS5} = PQ:ram_duration(BQS4),
+ PQ:delete_and_terminate(a_whim, BQS5),
+ passed.
+
+mirror_queue_sync(Config) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Q = <<"mirror_queue_sync-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0,
+ <<"^mirror_queue_sync-queue$">>, <<"all">>),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3]),
+ %% master now has 9, mirror 6.
+ get_partial(Ch, Q, manual_ack, [3, 3, 3, 2, 2, 2]),
+ %% So some but not all are unacked at the mirror
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:control_action(sync_queue, Nodename0,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, Nodename0, rabbit_misc:r(<<"/">>, queue, Q)),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_sync_priority_above_max(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ %% Tests synchronisation of mirrors when priority is higher than max priority.
+ %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_queue_sync_priority_above_max-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [5, 5, 5]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_sync_priority_above_max_pending_ack(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Tests synchronisation of mirrors when priority is higher than max priority
+ %% and there are pending acks.
+ %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_queue_sync_priority_above_max_pending_ack-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [5, 5, 5]),
+ %% Consume but 'forget' to acknowledge
+ get_without_ack(Ch, Q),
+ get_without_ack(Ch, Q),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ synced_msgs(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 3),
+ synced_msgs(Config, B, rabbit_misc:r(<<"/">>, queue, Q), 3),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_auto_ack(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ %% Check correct use of AckRequired in the notifications to the mirrors.
+ %% If mirrors are notified with AckRequired == true when it is false,
+ %% the mirrors will crash with the depth notification as they will not
+ %% match the master delta.
+ %% Bug rabbitmq-server 687
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_queue_auto_ack-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ get_partial(Ch, Q, no_ack, [3, 2, 1]),
+
+ %% Retrieve mirrors
+ SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids),
+
+ %% Restart one of the mirrors so `request_depth` is triggered
+ rabbit_ct_broker_helpers:restart_node(Config, SNode1),
+
+ %% The alive mirror must have the same pid after its neighbour is restarted
+ timer:sleep(3000), %% ugly but we can't know when the `depth` instruction arrives
+ Slaves = nodes_and_pids(slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q))),
+ SPid2 = proplists:get_value(SNode2, Slaves),
+
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_queue_sync_order(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ {Conn2, Ch2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, B),
+ Q = <<"mirror_queue_sync_order-queue">>,
+ declare(Ch, Q, 3),
+ publish_payload(Ch, Q, [{1, <<"msg1">>}, {2, <<"msg2">>},
+ {2, <<"msg3">>}, {2, <<"msg4">>},
+ {3, <<"msg5">>}]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ %% Add and sync mirror
+ ok = rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_queue_sync_order-queue$">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+
+ %% Stop the master
+ rabbit_ct_broker_helpers:stop_node(Config, A),
+
+ get_payload(Ch2, Q, do_ack, [<<"msg5">>, <<"msg2">>, <<"msg3">>,
+ <<"msg4">>, <<"msg1">>]),
+
+ delete(Ch2, Q),
+ rabbit_ct_broker_helpers:start_node(Config, A),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+ passed.
+
+mirror_reset_policy(Config) ->
+ %% Gives time to the master to go through all stages.
+ %% Might eventually trigger some race conditions from #802,
+ %% although for that I would expect a longer run and higher
+ %% number of messages in the system.
+ mirror_reset_policy(Config, 5000).
+
+mirror_fast_reset_policy(Config) ->
+ %% This test seems to trigger the bug tested in invoke/1, but it
+ %% cannot guarantee it will always happen. Thus, both tests
+ %% should stay in the test suite.
+ mirror_reset_policy(Config, 5).
+
+
+mirror_reset_policy(Config, Wait) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_reset_policy-queue">>,
+ declare(Ch, Q, 5),
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ publish_many(Ch, Q, 20000),
+ [begin
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ timer:sleep(Wait),
+ rabbit_ct_broker_helpers:clear_policy(
+ Config, A, <<"^mirror_reset_policy-queue$">>),
+ timer:sleep(Wait)
+ end || _ <- lists:seq(1, 10)],
+ timer:sleep(1000),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2),
+ %% Verify master has not crashed
+ Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ delete(Ch, Q),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+mirror_stop_pending_followers(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ C = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
+
+ [ok = rabbit_ct_broker_helpers:rpc(
+ Config, Nodename, application, set_env, [rabbit, slave_wait_timeout, 0]) || Nodename <- [A, B, C]],
+
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+ Q = <<"mirror_stop_pending_followers-queue">>,
+ declare(Ch, Q, 5),
+ publish_many(Ch, Q, 20000),
+
+ [begin
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, A, <<"^mirror_stop_pending_followers-queue$">>, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2),
+ rabbit_ct_broker_helpers:clear_policy(
+ Config, A, <<"^mirror_stop_pending_followers-queue$">>)
+ end || _ <- lists:seq(1, 15)],
+
+ delete(Ch, Q),
+
+ [ok = rabbit_ct_broker_helpers:rpc(
+ Config, Nodename, application, set_env, [rabbit, slave_wait_timeout, 15000]) || Nodename <- [A, B, C]],
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q, Args) when is_list(Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args});
+declare(Ch, Q, Max) ->
+ declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish_payload(Ch, Q, PPds) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P, Pd) || {P, Pd} <- PPds],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish_many(_Ch, _Q, 0) -> ok;
+publish_many( Ch, Q, N) -> publish1(Ch, Q, rand:uniform(5)),
+ publish_many(Ch, Q, N - 1).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = priority2bin(P)}).
+
+publish1(Ch, Q, P, Pd) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = Pd}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+cancel(Ch) ->
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, P) ->
+ PBin = priority2bin(P),
+ receive
+ {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag)
+ end.
+
+get_all(Ch, Q, Ack, Ps) ->
+ DTags = get_partial(Ch, Q, Ack, Ps),
+ get_empty(Ch, Q),
+ DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps].
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, PBin) ->
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q,
+ no_ack = Ack =:= no_ack}),
+ ?assertEqual(PBin, PBin2),
+ maybe_ack(Ch, Ack, DTag).
+
+get_payload(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, P) || P <- Ps].
+
+get_without_ack(Ch, Q) ->
+ {#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false}).
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+arguments(none) -> [];
+arguments(Max) -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+%%----------------------------------------------------------------------------
+
+wait_for_sync(Config, Nodename, Q) ->
+ wait_for_sync(Config, Nodename, Q, 1).
+
+wait_for_sync(Config, Nodename, Q, Nodes) ->
+ wait_for_sync(Config, Nodename, Q, Nodes, 600).
+
+wait_for_sync(_, _, _, _, 0) ->
+ throw(sync_timeout);
+wait_for_sync(Config, Nodename, Q, Nodes, N) ->
+ case synced(Config, Nodename, Q, Nodes) of
+ true -> ok;
+ false -> timer:sleep(100),
+ wait_for_sync(Config, Nodename, Q, Nodes, N-1)
+ end.
+
+synced(Config, Nodename, Q, Nodes) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, synchronised_slave_pids]]),
+ [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info,
+ Q =:= Q1],
+ length(SSPids) =:= Nodes.
+
+synced_msgs(Config, Nodename, Q, Expected) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, messages]]),
+ [M] = [M || [{name, Q1}, {messages, M}] <- Info, Q =:= Q1],
+ M =:= Expected.
+
+nodes_and_pids(SPids) ->
+ lists:zip([node(S) || S <- SPids], SPids).
+
+slave_pids(Config, Nodename, Q) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, slave_pids]]),
+ [SPids] = [SPids || [{name, Q1}, {slave_pids, SPids}] <- Info,
+ Q =:= Q1],
+ SPids.
+
+queue_pid(Config, Nodename, Q) ->
+ Info = rabbit_ct_broker_helpers:rpc(
+ Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, pid]]),
+ [Pid] = [P || [{name, Q1}, {pid, P}] <- Info, Q =:= Q1],
+ Pid.
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbit/test/priority_queue_recovery_SUITE.erl b/deps/rabbit/test/priority_queue_recovery_SUITE.erl
new file mode 100644
index 0000000000..9679fb0449
--- /dev/null
+++ b/deps/rabbit/test/priority_queue_recovery_SUITE.erl
@@ -0,0 +1,144 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(priority_queue_recovery_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ recovery %% Restart RabbitMQ.
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+recovery(Config) ->
+ {Conn, Ch} = open(Config),
+ Q = <<"recovery-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 0),
+
+ {Conn2, Ch2} = open(Config, 1),
+ get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ delete(Ch2, Q),
+ rabbit_ct_client_helpers:close_channel(Ch2),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+ passed.
+
+
+%%----------------------------------------------------------------------------
+
+open(Config) ->
+ open(Config, 0).
+
+open(Config, NodeIndex) ->
+ rabbit_ct_client_helpers:open_connection_and_channel(Config, NodeIndex).
+
+declare(Ch, Q, Args) when is_list(Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args});
+declare(Ch, Q, Max) ->
+ declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = priority2bin(P)}).
+
+publish1(Ch, Q, P, Pd) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = Pd}).
+
+get_all(Ch, Q, Ack, Ps) ->
+ DTags = get_partial(Ch, Q, Ack, Ps),
+ get_empty(Ch, Q),
+ DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps].
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, PBin) ->
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q,
+ no_ack = Ack =:= no_ack}),
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag).
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+arguments(none) -> [];
+arguments(Max) -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
diff --git a/deps/rabbit/test/product_info_SUITE.erl b/deps/rabbit/test/product_info_SUITE.erl
new file mode 100644
index 0000000000..207f9222d1
--- /dev/null
+++ b/deps/rabbit/test/product_info_SUITE.erl
@@ -0,0 +1,171 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(product_info_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ override_product_name_in_conf/1,
+ override_product_version_in_conf/1,
+ set_motd_in_conf/1
+ ]).
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+all() ->
+ [
+ {group, parallel}
+ ].
+
+groups() ->
+ [
+ {parallel, [],
+ [override_product_name_in_conf,
+ override_product_version_in_conf,
+ set_motd_in_conf]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ case os:type() of
+ {unix, _} ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config);
+ _ ->
+ {skip, "This testsuite is only relevant on Unix"}
+ end.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 1,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = case Testcase of
+ override_product_name_in_conf ->
+ rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit, [{product_name, "MyProduct"}]});
+ override_product_version_in_conf ->
+ rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit, [{product_version, "MyVersion"}]});
+ set_motd_in_conf ->
+ PrivDir = ?config(priv_dir, Config),
+ MotdFile = filename:join(PrivDir, "motd.txt"),
+ ok = file:write_file(MotdFile, <<"My MOTD\n">>),
+ C2 = rabbit_ct_helpers:set_config(
+ Config1,
+ {motd_file, MotdFile}),
+ rabbit_ct_helpers:merge_app_env(
+ C2,
+ {rabbit, [{motd_file, MotdFile}]})
+ end,
+ rabbit_ct_helpers:run_steps(Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+override_product_name_in_conf(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ProductName = "MyProduct",
+ ?assertEqual(ProductName,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, product_name, [])),
+ grep_in_log_file(Config, A, ProductName),
+ grep_in_stdout(Config, A, ProductName).
+
+override_product_version_in_conf(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ProductVersion = "MyVersion",
+ ?assertEqual(ProductVersion,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, product_version, [])),
+ grep_in_log_file(Config, A, ProductVersion),
+ grep_in_stdout(Config, A, ProductVersion).
+
+set_motd_in_conf(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ MotdFile = ?config(motd_file, Config),
+ ?assertEqual(MotdFile,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, motd_file, [])),
+ {ok, Motd0} = file:read_file(MotdFile),
+ Motd = string:trim(Motd0, trailing, [$\r,$\n]),
+ ?assertEqual(Motd,
+ rabbit_ct_broker_helpers:rpc(
+ Config, A, rabbit, motd, [])),
+ grep_in_log_file(Config, A, Motd),
+ grep_in_stdout(Config, A, Motd).
+
+grep_in_log_file(Config, Node, String) ->
+ [Log | _] = rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit, log_locations, []),
+ ct:pal(?LOW_IMPORTANCE, "Grepping \"~s\" in ~s", [String, Log]),
+ %% We try to grep several times, in case the log file was not
+ %% fsync'd yet (and thus we don't see the content yet).
+ do_grep_in_log_file(String, Log, 30).
+
+do_grep_in_log_file(String, Log, Retries) ->
+ {ok, Content} = file:read_file(Log),
+ case re:run(Content, ["\\b", String, "\\b"], [{capture, none}]) of
+ match ->
+ ok;
+ nomatch when Retries > 1 ->
+ timer:sleep(1000),
+ do_grep_in_log_file(String, Log, Retries - 1);
+ nomatch ->
+ throw({failed_to_grep, String, Log, Content})
+ end.
+
+grep_in_stdout(Config, Node, String) ->
+ [Log | _] = rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit, log_locations, []),
+ LogDir = filename:dirname(Log),
+ Stdout = filename:join(LogDir, "startup_log"),
+ ct:pal(?LOW_IMPORTANCE, "Grepping \"~s\" in ~s", [String, Stdout]),
+ {ok, Content} = file:read_file(Stdout),
+ ?assertMatch(
+ match,
+ re:run(Content, ["\\b", String, "\\b"], [{capture, none}])).
diff --git a/deps/rabbit/test/proxy_protocol_SUITE.erl b/deps/rabbit/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..92c29b6063
--- /dev/null
+++ b/deps/rabbit/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,91 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(proxy_protocol_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() -> [
+ {sequential_tests, [], [
+ proxy_protocol,
+ proxy_protocol_tls
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(Config1, [
+ {rabbit, [
+ {proxy_protocol, true}
+ ]}
+ ]),
+ Config3 = rabbit_ct_helpers:set_config(Config2, {rabbitmq_ct_tls_verify, verify_none}),
+ rabbit_ct_helpers:run_setup_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ ok = inet:send(Socket, <<"AMQP", 0, 0, 9, 1>>),
+ {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+proxy_protocol_tls(Config) ->
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, SslSocket} = ssl:connect(Socket, [], ?TIMEOUT),
+ ok = ssl:send(SslSocket, <<"AMQP", 0, 0, 9, 1>>),
+ {ok, _Packet} = ssl:recv(SslSocket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+connection_name() ->
+ Pids = pg_local:get_members(rabbit_connections),
+ Pid = lists:nth(1, Pids),
+ {dictionary, Dict} = process_info(Pid, dictionary),
+ {process_name, {rabbit_reader, ConnectionName}} = lists:keyfind(process_name, 1, Dict),
+ ConnectionName.
diff --git a/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl b/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl
new file mode 100644
index 0000000000..f79fcae3ce
--- /dev/null
+++ b/deps/rabbit/test/publisher_confirms_parallel_SUITE.erl
@@ -0,0 +1,380 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(publisher_confirms_parallel_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 60000).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, publisher_confirm_tests}
+ ].
+
+groups() ->
+ PublisherConfirmTests = [publisher_confirms,
+ publisher_confirms_with_deleted_queue,
+ confirm_select_ok,
+ confirm_nowait,
+ confirm_ack,
+ confirm_acks,
+ confirm_mandatory_unroutable,
+ confirm_unroutable_message],
+ [
+ {publisher_confirm_tests, [],
+ [
+ {classic_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]},
+ {mirrored_queue, [parallel], PublisherConfirmTests ++ [confirm_nack]},
+ {quorum_queue, [],
+ [
+ {parllel_tests, [parallel], PublisherConfirmTests},
+ confirm_minority
+ ]}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, true}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ Config
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q},
+ {queue_name_2, Q2}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_2, Config)}),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% To enable confirms, a client sends the confirm.select method
+publisher_confirms(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ amqp_channel:unregister_confirm_handler(Ch),
+ ok.
+
+publisher_confirms_with_deleted_queue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ amqp_channel:call(Ch, #'queue.delete'{queue = QName}),
+ amqp_channel:wait_for_confirms_or_die(Ch, 5),
+ amqp_channel:unregister_confirm_handler(Ch).
+
+%% Depending on whether no-wait was set or not, the broker may respond with a confirm.select-ok
+confirm_select_ok(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ ?assertEqual(#'confirm.select_ok'{}, amqp_channel:call(Ch, #'confirm.select'{nowait = false})).
+
+confirm_nowait(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ ?assertEqual(ok, amqp_channel:call(Ch, #'confirm.select'{nowait = true})).
+
+%% The broker then confirms messages as it handles them by sending a basic.ack on the same channel.
+%% The delivery-tag field contains the sequence number of the confirmed message.
+confirm_ack(Config) ->
+ %% Ensure we receive an ack and not a nack
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ receive
+ #'basic.ack'{delivery_tag = 1} ->
+ ok
+ after 5000 ->
+ throw(missing_ack)
+ end.
+
+%% The broker may also set the multiple field in basic.ack to indicate that all messages up to
+%% and including the one with the sequence number have been handled.
+confirm_acks(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>, <<"msg4">>]),
+ receive_many(lists:seq(1, 4)).
+
+%% For unroutable messages, the broker will issue a confirm once the exchange verifies a message
+%% won't route to any queue (returns an empty list of queues).
+%% If the message is also published as mandatory, the basic.return is sent to the client before
+%% basic.ack.
+confirm_mandatory_unroutable(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ amqp_channel:register_return_handler(Ch, self()),
+ ok = amqp_channel:call(Ch, #'basic.publish'{routing_key = QName,
+ mandatory = true}, #amqp_msg{payload = <<"msg1">>}),
+ receive
+ {#'basic.return'{}, _} ->
+ ok
+ after 5000 ->
+ throw(missing_return)
+ end,
+ receive
+ #'basic.ack'{delivery_tag = 1} ->
+ ok
+ after 5000 ->
+ throw(missing_ack)
+ end.
+
+confirm_unroutable_message(Config) ->
+ %% Ensure we receive a nack for an unroutable message
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ receive
+ {#'basic.return'{}, _} ->
+ throw(unexpected_basic_return);
+ #'basic.ack'{delivery_tag = 1} ->
+ ok
+ after 5000 ->
+ throw(missing_ack)
+ end.
+
+%% In exceptional cases when the broker is unable to handle messages successfully,
+%% instead of a basic.ack, the broker will send a basic.nack.
+%% basic.nack will only be delivered if an internal error occurs in the Erlang process
+%% responsible for a queue.
+%% This test crashes the queue before it has time to answer, but it only works for classic
+%% queues. On quorum queues the followers will take over and rabbit_fifo_client will resend
+%% any pending messages.
+confirm_nack(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, confirm_nack1, [Config]).
+
+confirm_nack1(Config) ->
+ {_Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName1 = ?config(queue_name, Config),
+ QName2 = ?config(queue_name_2, Config),
+ DeclareBindDurableQueue =
+ fun(QName) ->
+ rabbit_channel:do(Ch, #'queue.declare'{durable = Durable,
+ queue = QName,
+ arguments = Args}),
+ receive #'queue.declare_ok'{} ->
+ rabbit_channel:do(Ch, #'queue.bind'{
+ queue = QName,
+ exchange = <<"amq.direct">>,
+ routing_key = "confirms-magic" }),
+ receive #'queue.bind_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_bind_queue)
+ end
+ after ?TIMEOUT -> throw(failed_to_declare_queue)
+ end
+ end,
+ %% Declare and bind two queues
+ DeclareBindDurableQueue(QName1),
+ DeclareBindDurableQueue(QName2),
+ %% Get the first one's pid (we'll crash it later)
+ {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
+ QPid1 = amqqueue:get_pid(Q1),
+ %% Enable confirms
+ rabbit_channel:do(Ch, #'confirm.select'{}),
+ receive
+ #'confirm.select_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_enable_confirms)
+ end,
+ %% Publish a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
+ routing_key = "confirms-magic"
+ },
+ rabbit_basic:build_content(
+ #'P_basic'{delivery_mode = 2}, <<"">>)),
+ %% We must not kill the queue before the channel has processed the
+ %% 'publish'.
+ ok = rabbit_channel:flush(Ch),
+ %% Crash the queue
+ QPid1 ! boom,
+ %% Wait for a nack
+ receive
+ #'basic.nack'{} -> ok;
+ #'basic.ack'{} -> throw(received_ack_instead_of_nack)
+ after ?TIMEOUT-> throw(did_not_receive_nack)
+ end,
+ receive
+ #'basic.ack'{} -> throw(received_ack_when_none_expected)
+ after 1000 -> ok
+ end,
+ %% Cleanup
+ unlink(Ch),
+ ok = rabbit_channel:shutdown(Ch),
+ passed.
+
+%% The closest to a nack behaviour that we can get on quorum queues is not answering while
+%% the cluster is in minority. Once the cluster recovers, a 'basic.ack' will be issued.
+confirm_minority(Config) ->
+ [_A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, C),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, [<<"msg1">>]),
+ receive
+ #'basic.nack'{} -> ok;
+ #'basic.ack'{} -> throw(unexpected_ack)
+ after 120000 ->
+ ok
+ end,
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ publish(Ch, QName, [<<"msg2">>]),
+ receive
+ #'basic.nack'{} -> throw(unexpected_nack);
+ #'basic.ack'{} -> ok
+ after 60000 ->
+ throw(missing_ack)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+declare_queue(Ch, Config, QName) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = Durable}).
+
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+publish(Ch, QName, Payloads, Headers) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Payload,
+ props = #'P_basic'{headers = Headers}})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ DTag
+ end || Payload <- Payloads].
+
+consume_empty(Ch, QName) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+sync_mirrors(QName, Config) ->
+ case ?config(is_mirrored, Config) of
+ true ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
+ _ -> ok
+ end.
+
+receive_many([]) ->
+ ok;
+receive_many(DTags) ->
+ receive
+ #'basic.ack'{delivery_tag = DTag, multiple = true} ->
+ receive_many(DTags -- lists:seq(1, DTag));
+ #'basic.ack'{delivery_tag = DTag, multiple = false} ->
+ receive_many(DTags -- [DTag])
+ after 5000 ->
+ throw(missing_ack)
+ end.
diff --git a/deps/rabbit/test/queue_length_limits_SUITE.erl b/deps/rabbit/test/queue_length_limits_SUITE.erl
new file mode 100644
index 0000000000..b86f502869
--- /dev/null
+++ b/deps/rabbit/test/queue_length_limits_SUITE.erl
@@ -0,0 +1,382 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(queue_length_limits_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_CHANNEL_EXCEPTION, 5000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ MaxLengthTests = [max_length_default,
+ max_length_bytes_default,
+ max_length_drop_head,
+ max_length_bytes_drop_head,
+ max_length_reject_confirm,
+ max_length_bytes_reject_confirm,
+ max_length_drop_publish,
+ max_length_drop_publish_requeue,
+ max_length_bytes_drop_publish],
+ [
+ {parallel_tests, [parallel], [
+ {max_length_classic, [], MaxLengthTests},
+ {max_length_quorum, [], [max_length_default,
+ max_length_bytes_default]
+ },
+ {max_length_mirrored, [], MaxLengthTests}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(max_length_classic, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]);
+init_per_group(max_length_quorum, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(max_length_mirrored, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {queue_durable, false}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [])
+ end.
+
+end_per_group(max_length_mirrored, Config) ->
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"^max_length.*queue">>),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{is_mirrored, false}]),
+ Config1;
+end_per_group(queue_max_length, Config) ->
+ Config;
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config)
+ when Testcase == max_length_drop_publish; Testcase == max_length_bytes_drop_publish;
+ Testcase == max_length_drop_publish_requeue;
+ Testcase == max_length_reject_confirm; Testcase == max_length_bytes_reject_confirm;
+ Testcase == max_length_drop_head; Testcase == max_length_bytes_drop_head;
+ Testcase == max_length_default; Testcase == max_length_bytes_default ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, 0),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+max_length_bytes_drop_head(Config) ->
+ max_length_bytes_drop_head(Config, [{<<"x-overflow">>, longstr, <<"drop-head">>}]).
+
+max_length_bytes_default(Config) ->
+ max_length_bytes_drop_head(Config, []).
+
+max_length_bytes_drop_head(Config, ExtraArgs) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ MaxLengthBytesArgs = [{<<"x-max-length-bytes">>, long, 100}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthBytesArgs ++ Args ++ ExtraArgs, durable = Durable}),
+
+ %% 80 bytes payload
+ Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
+ Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
+ Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
+ check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3).
+
+max_length_drop_head(Config) ->
+ max_length_drop_head(Config, [{<<"x-overflow">>, longstr, <<"drop-head">>}]).
+
+max_length_default(Config) ->
+ %% Defaults to drop_head
+ max_length_drop_head(Config, []).
+
+max_length_drop_head(Config, ExtraArgs) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ Args ++ ExtraArgs, durable = Durable}),
+
+ check_max_length_drops_head(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
+
+max_length_reject_confirm(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ QName = ?config(queue_name, Config),
+ Durable = ?config(queue_durable, Config),
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>),
+ check_max_length_rejects(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
+
+max_length_bytes_reject_confirm(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ QNameBytes = ?config(queue_name, Config),
+ Durable = ?config(queue_durable, Config),
+ MaxLengthBytesArgs = [{<<"x-max-length-bytes">>, long, 100}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QNameBytes, arguments = MaxLengthBytesArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+
+ %% 80 bytes payload
+ Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
+ Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
+ Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
+
+ check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3),
+ check_max_length_rejects(Config, QNameBytes, Ch, Payload1, Payload2, Payload3).
+
+max_length_drop_publish(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ %% If confirms are not enable, publishes will still be dropped in reject-publish mode.
+ check_max_length_drops_publish(Config, QName, Ch, <<"1">>, <<"2">>, <<"3">>).
+
+max_length_drop_publish_requeue(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ MaxLengthArgs = [{<<"x-max-length">>, long, 1}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName, arguments = MaxLengthArgs ++ OverflowArgs ++ Args, durable = Durable}),
+ %% If confirms are not enable, publishes will still be dropped in reject-publish mode.
+ check_max_length_requeue(Config, QName, Ch, <<"1">>, <<"2">>).
+
+max_length_bytes_drop_publish(Config) ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QNameBytes = ?config(queue_name, Config),
+ MaxLengthBytesArgs = [{<<"x-max-length-bytes">>, long, 100}],
+ OverflowArgs = [{<<"x-overflow">>, longstr, <<"reject-publish">>}],
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QNameBytes, arguments = MaxLengthBytesArgs ++ OverflowArgs ++ Args, durable = Durable}),
+
+ %% 80 bytes payload
+ Payload1 = << <<"1">> || _ <- lists:seq(1, 80) >>,
+ Payload2 = << <<"2">> || _ <- lists:seq(1, 80) >>,
+ Payload3 = << <<"3">> || _ <- lists:seq(1, 80) >>,
+
+ check_max_length_drops_publish(Config, QNameBytes, Ch, Payload1, Payload2, Payload3).
+
+%% -------------------------------------------------------------------
+%% Implementation
+%% -------------------------------------------------------------------
+
+check_max_length_requeue(Config, QName, Ch, Payload1, Payload2) ->
+ sync_mirrors(QName, Config),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% A single message is published and consumed
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag},
+ #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Another message is published
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+check_max_length_drops_publish(Config, QName, Ch, Payload1, Payload2, Payload3) ->
+ sync_mirrors(QName, Config),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% A single message is published and consumed
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Message 2 is dropped, message 1 stays
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Messages 2 and 3 are dropped, message 1 stays
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload3}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+check_max_length_rejects(Config, QName, Ch, Payload1, Payload2, Payload3) ->
+ sync_mirrors(QName, Config),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ flush(),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% First message can be enqueued and acks
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ receive #'basic.ack'{} -> ok
+ after 1000 -> error(expected_ack)
+ end,
+
+ %% The message cannot be enqueued and nacks
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ receive #'basic.nack'{} -> ok
+ after 1000 -> error(expected_nack)
+ end,
+
+ %% The message cannot be enqueued and nacks
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload3}),
+ receive #'basic.nack'{} -> ok
+ after 1000 -> error(expected_nack)
+ end,
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Now we can publish message 2.
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ receive #'basic.ack'{} -> ok
+ after 1000 -> error(expected_ack)
+ end,
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+check_max_length_drops_head(Config, QName, Ch, Payload1, Payload2, Payload3) ->
+ sync_mirrors(QName, Config),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ %% A single message is published and consumed
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload1}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Message 1 is replaced by message 2
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload2}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+
+ %% Messages 1 and 2 are replaced
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload1}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload2}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload3}),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload3}} = amqp_channel:call(Ch, #'basic.get'{queue = QName}),
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = QName}).
+
+sync_mirrors(QName, Config) ->
+ case rabbit_ct_helpers:get_config(Config, is_mirrored) of
+ true ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QName]);
+ _ -> ok
+ end.
+
+flush() ->
+ receive _ -> flush()
+ after 10 -> ok
+ end.
diff --git a/deps/rabbit/test/queue_master_location_SUITE.erl b/deps/rabbit/test/queue_master_location_SUITE.erl
new file mode 100644
index 0000000000..fab3eac3f0
--- /dev/null
+++ b/deps/rabbit/test/queue_master_location_SUITE.erl
@@ -0,0 +1,487 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(queue_master_location_SUITE).
+
+%% These tests use an ABC cluster with each node initialised with
+%% a different number of queues. When a queue is declared, different
+%% strategies can be applied to determine the queue's master node. Queue
+%% location strategies can be applied in the following ways;
+%% 1. As policy,
+%% 2. As config (in rabbitmq.config),
+%% 3. or as part of the queue's declare arguments.
+%%
+%% Currently supported strategies are;
+%% min-masters : The queue master node is calculated as the one with the
+%% least bound queues in the cluster.
+%% client-local: The queue master node is the local node from which
+%% the declaration is being carried out from
+%% random : The queue master node is randomly selected.
+%%
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(DEFAULT_VHOST_PATH, (<<"/">>)).
+-define(POLICY, <<"^qm.location$">>).
+
+all() ->
+ [
+ {group, cluster_size_3},
+ {group, maintenance_mode}
+ ].
+
+groups() ->
+ [
+ {cluster_size_3, [], [
+ declare_args,
+ declare_policy,
+ declare_invalid_policy,
+ declare_policy_nodes,
+ declare_policy_all,
+ declare_policy_exactly,
+ declare_config,
+ calculate_min_master,
+ calculate_min_master_with_bindings,
+ calculate_random,
+ calculate_client_local
+ ]},
+
+ {maintenance_mode, [], [
+ declare_with_min_masters_and_some_nodes_under_maintenance,
+ declare_with_min_masters_and_all_nodes_under_maintenance,
+
+ declare_with_random_and_some_nodes_under_maintenance,
+ declare_with_random_and_all_nodes_under_maintenance
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ %% Replaced with a list of node names later
+ {rmq_nodes_count, 3}
+ ]);
+init_per_group(maintenance_mode, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Nodenames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, ClusterSize)
+ ],
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, Nodenames},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ Config2 = rabbit_ct_helpers:run_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ FFEnabled = case Group of
+ maintenance_mode ->
+ rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2,
+ maintenance_mode_status);
+ _ ->
+ ok
+ end,
+ case FFEnabled of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_testcase(Testcase, Config2),
+ Skip
+ end.
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+%%
+%% Queue 'declarations'
+%%
+
+declare_args(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q).
+
+declare_policy(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ set_location_policy(Config, ?POLICY, <<"min-masters">>),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q).
+
+declare_invalid_policy(Config) ->
+ %% Tests that queue masters location returns 'ok', otherwise the validation of
+ %% any other parameter might be skipped and invalid policy accepted.
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"exactly">>},
+ %% this field is expected to be an integer
+ {<<"ha-params">>, <<"2">>}],
+ {error_string, _} = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_policy, set,
+ [<<"/">>, ?POLICY, <<".*">>, Policy, 0, <<"queues">>, <<"acting-user">>]).
+
+declare_policy_nodes(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ % Note:
+ % Node0 has 15 queues, Node1 has 8 and Node2 has 1
+ Node0Name = rabbit_data_coercion:to_binary(
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)),
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ Node1Name = rabbit_data_coercion:to_binary(Node1),
+ Nodes = [Node1Name, Node0Name],
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"nodes">>},
+ {<<"ha-params">>, Nodes}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
+ <<".*">>, <<"queues">>, Policy),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q, Node1).
+
+declare_policy_all(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ % Note:
+ % Node0 has 15 queues, Node1 has 8 and Node2 has 1
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"all">>}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
+ <<".*">>, <<"queues">>, Policy),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q).
+
+declare_policy_exactly(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ Policy = [{<<"queue-master-locator">>, <<"min-masters">>},
+ {<<"ha-mode">>, <<"exactly">>},
+ {<<"ha-params">>, 2}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0, ?POLICY,
+ <<".*">>, <<"queues">>, Policy),
+ QueueRes = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueRes, false, false, _Args=[], none),
+
+ Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:control_action(sync_queue, Node0,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, Node0, QueueRes, 1),
+
+ {ok, Queue} = rabbit_ct_broker_helpers:rpc(Config, Node0,
+ rabbit_amqqueue, lookup, [QueueRes]),
+ {MNode0, [SNode], [SSNode]} = rabbit_ct_broker_helpers:rpc(Config, Node0,
+ rabbit_mirror_queue_misc,
+ actual_queue_nodes, [Queue]),
+ ?assertEqual(SNode, SSNode),
+ {ok, MNode1} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assertEqual(MNode0, MNode1),
+ Node2 = rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
+ ?assertEqual(MNode1, Node2).
+
+declare_config(Config) ->
+ setup_test_environment(Config),
+ set_location_config(Config, <<"min-masters">>),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args = [], none),
+ verify_min_master(Config, Q),
+ unset_location_config(Config),
+ ok.
+
+%%
+%% Maintenance mode effects
+%%
+
+declare_with_min_masters_and_some_nodes_under_maintenance(Config) ->
+ set_location_policy(Config, ?POLICY, <<"min-masters">>),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 1),
+
+ QName = <<"qm.tests.min_masters.maintenance.case1">>,
+ Resource = rabbit_misc:r(<<"/">>, queue, QName),
+ Record = declare(Config, Resource, false, false, _Args = [], none),
+ %% the only node that's not being drained
+ ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 2, nodename),
+ node(amqqueue:get_pid(Record))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 1).
+
+declare_with_min_masters_and_all_nodes_under_maintenance(Config) ->
+ declare_with_all_nodes_under_maintenance(Config, <<"min-masters">>).
+
+declare_with_random_and_some_nodes_under_maintenance(Config) ->
+ set_location_policy(Config, ?POLICY, <<"random">>),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 2),
+
+ QName = <<"qm.tests.random.maintenance.case1">>,
+ Resource = rabbit_misc:r(<<"/">>, queue, QName),
+ Record = declare(Config, Resource, false, false, _Args = [], none),
+ %% the only node that's not being drained
+ ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ node(amqqueue:get_pid(Record))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 2).
+
+declare_with_random_and_all_nodes_under_maintenance(Config) ->
+ declare_with_all_nodes_under_maintenance(Config, <<"random">>).
+
+declare_with_all_nodes_under_maintenance(Config, Locator) ->
+ set_location_policy(Config, ?POLICY, Locator),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 1),
+ rabbit_ct_broker_helpers:mark_as_being_drained(Config, 2),
+
+ QName = rabbit_data_coercion:to_binary(
+ rabbit_misc:format("qm.tests.~s.maintenance.case2", [Locator])),
+ Resource = rabbit_misc:r(<<"/">>, queue, QName),
+ Record = declare(Config, Resource, false, false, _Args = [], none),
+ %% when queue master locator returns no node, the node that handles
+ %% the declaration method will be used as a fallback
+ ?assertEqual(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ node(amqqueue:get_pid(Record))),
+
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 0),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 1),
+ rabbit_ct_broker_helpers:unmark_as_being_drained(Config, 2).
+
+%%
+%% Test 'calculations'
+%%
+
+calculate_min_master(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q),
+ ok.
+
+calculate_min_master_with_bindings(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test_bound">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q),
+ %% Add 20 bindings to this queue
+ [ bind(Config, QueueName, integer_to_binary(N)) || N <- lists:seq(1, 20) ],
+
+ QueueName1 = rabbit_misc:r(<<"/">>, queue, Q1 = <<"qm.test_unbound">>),
+ declare(Config, QueueName1, false, false, Args, none),
+ % Another queue should still be on the same node, bindings should
+ % not account for min-masters counting
+ verify_min_master(Config, Q1),
+ ok.
+
+calculate_random(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"random">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_random(Config, Q),
+ ok.
+
+calculate_client_local(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q = <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, longstr, <<"client-local">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_client_local(Config, Q),
+ ok.
+
+%%
+%% Setup environment
+%%
+
+setup_test_environment(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [distribute_queues(Config, Node) || Node <- Nodes],
+ ok.
+
+distribute_queues(Config, Node) ->
+ ok = rpc:call(Node, application, unset_env, [rabbit, queue_master_location]),
+ Count = case rabbit_ct_broker_helpers:nodename_to_index(Config, Node) of
+ 0 -> 15;
+ 1 -> 8;
+ 2 -> 1
+ end,
+
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Node),
+ ok = declare_queues(Channel, declare_fun(), Count),
+ ok = create_e2e_binding(Channel, [<< "ex_1" >>, << "ex_2" >>]),
+ {ok, Channel}.
+
+%%
+%% Internal queue handling
+%%
+
+declare_queues(Channel, DeclareFun, 1) -> DeclareFun(Channel);
+declare_queues(Channel, DeclareFun, N) ->
+ DeclareFun(Channel),
+ declare_queues(Channel, DeclareFun, N-1).
+
+declare_exchange(Channel, Ex) ->
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = Ex}),
+ {ok, Ex}.
+
+declare_binding(Channel, Binding) ->
+ #'exchange.bind_ok'{} = amqp_channel:call(Channel, Binding),
+ ok.
+
+declare_fun() ->
+ fun(Channel) ->
+ #'queue.declare_ok'{} = amqp_channel:call(Channel, get_random_queue_declare()),
+ ok
+ end.
+
+create_e2e_binding(Channel, ExNamesBin) ->
+ [{ok, Ex1}, {ok, Ex2}] = [declare_exchange(Channel, Ex) || Ex <- ExNamesBin],
+ Binding = #'exchange.bind'{source = Ex1, destination = Ex2},
+ ok = declare_binding(Channel, Binding).
+
+get_random_queue_declare() ->
+ #'queue.declare'{passive = false,
+ durable = false,
+ exclusive = true,
+ auto_delete = false,
+ nowait = false,
+ arguments = []}.
+
+%%
+%% Internal helper functions
+%%
+
+get_cluster() -> [node()|nodes()].
+
+min_master_node(Config) ->
+ hd(lists:reverse(
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename))).
+
+set_location_config(Config, Strategy) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [ok = rabbit_ct_broker_helpers:rpc(Config, Node,
+ application, set_env,
+ [rabbit, queue_master_locator, Strategy]) || Node <- Nodes],
+ ok.
+
+unset_location_config(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [ok = rabbit_ct_broker_helpers:rpc(Config, Node,
+ application, unset_env,
+ [rabbit, queue_master_locator]) || Node <- Nodes],
+ ok.
+
+declare(Config, QueueName, Durable, AutoDelete, Args0, Owner) ->
+ Args1 = [QueueName, Durable, AutoDelete, Args0, Owner, <<"acting-user">>],
+ case rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, declare, Args1) of
+ {new, Queue} -> Queue;
+ Other -> Other
+ end.
+
+bind(Config, QueueName, RoutingKey) ->
+ ExchangeName = rabbit_misc:r(QueueName, exchange, <<"amq.direct">>),
+
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_binding, add,
+ [#binding{source = ExchangeName,
+ destination = QueueName,
+ key = RoutingKey,
+ args = []},
+ <<"acting-user">>]).
+
+verify_min_master(Config, Q, MinMasterNode) ->
+ Rpc = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assertEqual({ok, MinMasterNode}, Rpc).
+
+verify_min_master(Config, Q) ->
+ MinMaster = min_master_node(Config),
+ verify_min_master(Config, Q, MinMaster).
+
+verify_random(Config, Q) ->
+ [Node | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {ok, Master} = rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assert(lists:member(Master, Nodes)).
+
+verify_client_local(Config, Q) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Rpc = rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ ?assertEqual({ok, Node}, Rpc).
+
+set_location_policy(Config, Name, Strategy) ->
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]).
+
+wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen) ->
+ wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen, 600).
+
+wait_for_sync(_, _, _, _, 0) ->
+ throw(sync_timeout);
+wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen, N) ->
+ case synced(Config, Nodename, Q, ExpectedSSPidLen) of
+ true -> ok;
+ false -> timer:sleep(100),
+ wait_for_sync(Config, Nodename, Q, ExpectedSSPidLen, N-1)
+ end.
+
+synced(Config, Nodename, Q, ExpectedSSPidLen) ->
+ Args = [<<"/">>, [name, synchronised_slave_pids]],
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, Args),
+ [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info, Q =:= Q1],
+ length(SSPids) =:= ExpectedSSPidLen.
diff --git a/deps/rabbit/test/queue_parallel_SUITE.erl b/deps/rabbit/test/queue_parallel_SUITE.erl
new file mode 100644
index 0000000000..6f813512f4
--- /dev/null
+++ b/deps/rabbit/test/queue_parallel_SUITE.erl
@@ -0,0 +1,725 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+%%
+-module(queue_parallel_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+-import(quorum_queue_utils, [wait_for_messages/2]).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ AllTests = [publish,
+ consume,
+ consume_first_empty,
+ consume_from_empty_queue,
+ consume_and_autoack,
+ subscribe,
+ subscribe_consumers,
+ subscribe_with_autoack,
+ consume_and_ack,
+ consume_and_multiple_ack,
+ subscribe_and_ack,
+ subscribe_and_multiple_ack,
+ subscribe_and_requeue_multiple_nack,
+ subscribe_and_nack,
+ subscribe_and_requeue_nack,
+ subscribe_and_multiple_nack,
+ consume_and_requeue_nack,
+ consume_and_nack,
+ consume_and_requeue_multiple_nack,
+ consume_and_multiple_nack,
+ basic_cancel,
+ purge,
+ basic_recover,
+ delete_immediately_by_resource
+ ],
+ [
+ {parallel_tests, [],
+ [
+ {classic_queue, [parallel], AllTests ++ [delete_immediately_by_pid_succeeds,
+ trigger_message_store_compaction]},
+ {mirrored_queue, [parallel], AllTests ++ [delete_immediately_by_pid_succeeds,
+ trigger_message_store_compaction]},
+ {quorum_queue, [parallel], AllTests ++ [delete_immediately_by_pid_fails]},
+ {quorum_queue_in_memory_limit, [parallel], AllTests ++ [delete_immediately_by_pid_fails]},
+ {quorum_queue_in_memory_bytes, [parallel], AllTests ++ [delete_immediately_by_pid_fails]},
+ {stream_queue, [parallel], [publish,
+ subscribe]}
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(classic_queue, Config) ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+init_per_group(quorum_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(quorum_queue_in_memory_limit, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 1}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(quorum_queue_in_memory_bytes, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 1}]},
+ {consumer_args, []},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(mirrored_queue, Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<"^max_length.*queue">>,
+ <<"all">>, [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [{is_mirrored, true},
+ {queue_args, [{<<"x-queue-type">>, longstr, <<"classic">>}]},
+ {consumer_args, []},
+ {queue_durable, true}]),
+ rabbit_ct_helpers:run_steps(Config1, []);
+init_per_group(stream_queue, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, stream_queue) of
+ ok ->
+ rabbit_ct_helpers:set_config(
+ Config,
+ [{queue_args, [{<<"x-queue-type">>, longstr, <<"stream">>}]},
+ {consumer_args, [{<<"x-stream-offset">>, long, 0}]},
+ {queue_durable, true}]);
+ Skip ->
+ Skip
+ end;
+init_per_group(Group, Config0) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 3,
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{channel_tick_interval, 1000},
+ {quorum_tick_interval, 1000},
+ {stream_tick_interval, 1000}]}),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config0, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Q2 = rabbit_data_coercion:to_binary(io_lib:format("~p_~p_2", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q},
+ {queue_name_2, Q2}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name_2, Config)}),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+publish(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]).
+
+consume(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]).
+
+consume_first_empty(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ consume_empty(Ch, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, QName, true, [<<"msg1">>]),
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+consume_from_empty_queue(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ consume_empty(Ch, QName).
+
+consume_and_autoack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ consume(Ch, QName, true, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]).
+
+subscribe(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ %% Let's set consumer prefetch so it works with stream queues
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = false,
+ prefetch_count = 10})),
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+
+ CArgs = ?config(consumer_args, Config),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]).
+
+subscribe_consumers(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ CArgs = ?config(consumer_args, Config),
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = false,
+ prefetch_count = 10})),
+ subscribe(Ch, QName, false, CArgs),
+
+ %% validate we can retrieve the consumers
+ Consumers = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ [Consumer] = lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ QName == Resource#resource.name
+ end, Consumers),
+ ?assert(is_pid(proplists:get_value(channel_pid, Consumer))),
+ ?assert(is_binary(proplists:get_value(consumer_tag, Consumer))),
+ ?assertEqual(true, proplists:get_value(ack_required, Consumer)),
+ ?assertEqual(10, proplists:get_value(prefetch_count, Consumer)),
+ ?assertEqual([], proplists:get_value(arguments, Consumer)),
+
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+subscribe_with_autoack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ subscribe(Ch, QName, true, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]).
+
+consume_and_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DeliveryTag] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_multiple_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DeliveryTag] = consume(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_multiple_ack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+trigger_message_store_compaction(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ N = 12000,
+ [publish(Ch, QName, [binary:copy(<<"a">>, 5000)]) || _ <- lists:seq(1, N)],
+ wait_for_messages(Config, [[QName, <<"12000">>, <<"12000">>, <<"0">>]]),
+
+ AllDTags = rabbit_ct_client_helpers:consume_without_acknowledging(Ch, QName, N),
+ ToAck = lists:filter(fun (I) -> I > 500 andalso I < 11200 end, AllDTags),
+
+ [amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = Tag,
+ multiple = false}) || Tag <- ToAck],
+
+ %% give compaction a moment to start in and finish
+ timer:sleep(5000),
+ amqp_channel:cast(Ch, #'queue.purge'{queue = QName}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_requeue_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = true}),
+ receive_basic_deliver(true),
+ receive_basic_deliver(true),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1,
+ multiple = true}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_requeue_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [DeliveryTag] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"1">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [DeliveryTag] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_requeue_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DeliveryTag] = consume(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = true}),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+consume_and_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ [_, _, DeliveryTag] = consume(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_requeue_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+subscribe_and_multiple_nack(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"3">>, <<"0">>]]),
+ subscribe(Ch, QName, false, CArgs),
+ receive_basic_deliver(false),
+ receive_basic_deliver(false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages(Config, [[QName, <<"3">>, <<"0">>, <<"3">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = true,
+ requeue = false}),
+ wait_for_messages(Config, [[QName, <<"0">>, <<"0">>, <<"0">>]])
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+%% TODO test with single active
+basic_cancel(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ CArgs = ?config(consumer_args, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ CTag = atom_to_binary(?FUNCTION_NAME, utf8),
+
+ subscribe(Ch, QName, false, CTag, CArgs),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+ Consumers = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ ?assertEqual([], lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ QName == Resource#resource.name
+ end, Consumers)),
+ publish(Ch, QName, [<<"msg2">>, <<"msg3">>]),
+ wait_for_messages(Config, [[QName, <<"3">>, <<"2">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag}),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]])
+ after 5000 ->
+ exit(basic_deliver_timeout)
+ end,
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+purge(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>, <<"msg2">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"2">>, <<"0">>]]),
+ [_] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"2">>, <<"1">>, <<"1">>]]),
+ {'queue.purge_ok', 1} = amqp_channel:call(Ch, #'queue.purge'{queue = QName}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+basic_recover(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ publish(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ [_] = consume(Ch, QName, [<<"msg1">>]),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.recover'{requeue = true}),
+ wait_for_messages(Config, [[QName, <<"1">>, <<"1">>, <<"0">>]]),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+delete_immediately_by_pid_fails(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ Cmd = ["eval", "{ok, Q} = rabbit_amqqueue:lookup(rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QName) ++ "\">>)), Pid = rabbit_amqqueue:pid_of(Q), rabbit_amqqueue:delete_immediately([Pid])."],
+ {ok, Msg} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd),
+ ?assertEqual(match, re:run(Msg, ".*error.*", [{capture, none}])),
+
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = Durable,
+ passive = true,
+ auto_delete = false,
+ arguments = Args})),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+delete_immediately_by_pid_succeeds(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ Cmd = ["eval", "{ok, Q} = rabbit_amqqueue:lookup(rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QName) ++ "\">>)), Pid = rabbit_amqqueue:pid_of(Q), rabbit_amqqueue:delete_immediately([Pid])."],
+ {ok, Msg} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd),
+ ?assertEqual(match, re:run(Msg, ".*ok.*", [{capture, none}])),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = Durable,
+ passive = true,
+ auto_delete = false,
+ arguments = Args})),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+delete_immediately_by_resource(Config) ->
+ {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ QName = ?config(queue_name, Config),
+ declare_queue(Ch, Config, QName),
+
+ Cmd = ["eval", "rabbit_amqqueue:delete_immediately_by_resource([rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QName) ++ "\">>)])."],
+ ?assertEqual({ok, "ok\n"}, rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd)),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = Durable,
+ passive = true,
+ auto_delete = false,
+ arguments = Args})),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% Test helpers
+%%%%%%%%%%%%%%%%%%%%%%%%
+declare_queue(Ch, Config, QName) ->
+ Args = ?config(queue_args, Config),
+ Durable = ?config(queue_durable, Config),
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ arguments = Args,
+ durable = Durable}).
+
+publish(Ch, QName, Payloads) ->
+ [amqp_channel:call(Ch, #'basic.publish'{routing_key = QName}, #amqp_msg{payload = Payload})
+ || Payload <- Payloads].
+
+consume(Ch, QName, Payloads) ->
+ consume(Ch, QName, false, Payloads).
+
+consume(Ch, QName, NoAck, Payloads) ->
+ [begin
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName,
+ no_ack = NoAck}),
+ DTag
+ end || Payload <- Payloads].
+
+consume_empty(Ch, QName) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{queue = QName})).
+
+subscribe(Ch, Queue, NoAck, CArgs) ->
+ subscribe(Ch, Queue, NoAck, <<"ctag">>, CArgs).
+
+subscribe(Ch, Queue, NoAck, Ctag, CArgs) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = Ctag,
+ arguments = CArgs},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Ctag} ->
+ ok
+ end.
+
+receive_basic_deliver(Redelivered) ->
+ receive
+ {#'basic.deliver'{redelivered = R}, _} when R == Redelivered ->
+ ok
+ end.
+
+flush(T) ->
+ receive X ->
+ ct:pal("flushed ~w", [X]),
+ flush(T)
+ after T ->
+ ok
+ end.
diff --git a/deps/rabbit/test/queue_type_SUITE.erl b/deps/rabbit/test/queue_type_SUITE.erl
new file mode 100644
index 0000000000..aed5ad4ccb
--- /dev/null
+++ b/deps/rabbit/test/queue_type_SUITE.erl
@@ -0,0 +1,275 @@
+-module(queue_type_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, classic},
+ {group, quorum}
+ ].
+
+
+all_tests() ->
+ [
+ smoke,
+ ack_after_queue_delete
+ ].
+
+groups() ->
+ [
+ {classic, [], all_tests()},
+ {quorum, [], all_tests()}
+ ].
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{quorum_tick_interval, 1000}]}),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config),
+ ok.
+
+init_per_group(Group, Config) ->
+ ClusterSize = 3,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base}]),
+ Config1b = rabbit_ct_helpers:set_config(Config1,
+ [{queue_type, atom_to_binary(Group, utf8)},
+ {net_ticktime, 10}]),
+ Config2 = rabbit_ct_helpers:run_steps(Config1b,
+ [fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of
+ ok ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, application, set_env,
+ [rabbit, channel_tick_interval, 100]),
+ %% HACK: the larger cluster sizes benefit for a bit more time
+ %% after clustering before running the tests.
+ Config3 = case Group of
+ cluster_size_5 ->
+ timer:sleep(5000),
+ Config2;
+ _ ->
+ Config2
+ end,
+
+ rabbit_ct_broker_helpers:set_policy(
+ Config3, 0,
+ <<"ha-policy">>, <<".*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ Config3;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end.
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit,
+ [{core_metrics_gc_interval, 100},
+ {log, [{file, [{level, debug}]}]}]}),
+ {ra, [{min_wal_roll_over_interval, 30000}]}).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{queue_name, Q},
+ {alt_queue_name, <<Q/binary, "_alt">>}
+ ]),
+ rabbit_ct_helpers:run_steps(Config2,
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ catch delete_queues(),
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+smoke(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr,
+ ?config(queue_type, Config)}])),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, <<"msg1">>),
+ ct:pal("waiting for confirms from ~s", [QName]),
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} -> fail
+ after 2500 ->
+ flush(),
+ exit(confirm_timeout)
+ end,
+ DTag = basic_get(Ch, QName),
+
+ basic_ack(Ch, DTag),
+ basic_get_empty(Ch, QName),
+
+ %% consume
+ publish(Ch, QName, <<"msg2">>),
+ ConsumerTag1 = <<"ctag1">>,
+ ok = subscribe(Ch, QName, ConsumerTag1),
+ %% receive and ack
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{}} ->
+ basic_ack(Ch, DeliveryTag)
+ after 5000 ->
+ flush(),
+ exit(basic_deliver_timeout)
+ end,
+ basic_cancel(Ch, ConsumerTag1),
+
+ %% assert empty
+ basic_get_empty(Ch, QName),
+
+ %% consume and nack
+ ConsumerTag2 = <<"ctag2">>,
+ ok = subscribe(Ch, QName, ConsumerTag2),
+ publish(Ch, QName, <<"msg3">>),
+ receive
+ {#'basic.deliver'{delivery_tag = T,
+ redelivered = false},
+ #amqp_msg{}} ->
+ basic_cancel(Ch, ConsumerTag2),
+ basic_nack(Ch, T)
+ after 5000 ->
+ exit(basic_deliver_timeout)
+ end,
+ %% get and ack
+ basic_ack(Ch, basic_get(Ch, QName)),
+ ok.
+
+ack_after_queue_delete(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr,
+ ?config(queue_type, Config)}])),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, QName, <<"msg1">>),
+ ct:pal("waiting for confirms from ~s", [QName]),
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} ->
+ ct:fail("confirm nack - expected ack")
+ after 2500 ->
+ flush(),
+ exit(confirm_timeout)
+ end,
+
+ DTag = basic_get(Ch, QName),
+
+ ChRef = erlang:monitor(process, Ch),
+ #'queue.delete_ok'{} = delete(Ch, QName),
+
+ basic_ack(Ch, DTag),
+ %% assert no channel error
+ receive
+ {'DOWN', ChRef, process, _, _} ->
+ ct:fail("unexpected channel closure")
+ after 1000 ->
+ ok
+ end,
+ flush(),
+ ok.
+
+%% Utility
+%%
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Queue, Msg) ->
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg}).
+
+basic_get(Ch, Queue) ->
+ {GetOk, _} = Reply = amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = false}),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{}}, Reply),
+ GetOk#'basic.get_ok'.delivery_tag.
+
+basic_get_empty(Ch, Queue) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = false})).
+
+subscribe(Ch, Queue, CTag) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = false,
+ consumer_tag = CTag},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} ->
+ ok
+ after 5000 ->
+ exit(basic_consume_timeout)
+ end.
+
+basic_ack(Ch, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag,
+ multiple = false}).
+
+basic_cancel(Ch, CTag) ->
+ #'basic.cancel_ok'{} =
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}).
+
+basic_nack(Ch, DTag) ->
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ requeue = true,
+ multiple = false}).
+
+flush() ->
+ receive
+ Any ->
+ ct:pal("flush ~p", [Any]),
+ flush()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl
new file mode 100644
index 0000000000..36a6d41a61
--- /dev/null
+++ b/deps/rabbit/test/quorum_queue_SUITE.erl
@@ -0,0 +1,2792 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(quorum_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(quorum_queue_utils, [wait_for_messages_ready/3,
+ wait_for_messages_pending_ack/3,
+ wait_for_messages_total/3,
+ wait_for_messages/2,
+ dirty_query/3,
+ ra_name/1,
+ is_mixed_versions/0]).
+
+-compile(export_all).
+
+suite() ->
+ [{timetrap, 5 * 60000}].
+
+all() ->
+ [
+ {group, single_node},
+ {group, unclustered},
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {single_node, [], all_tests()},
+ {single_node, [], memory_tests()},
+ {single_node, [], [node_removal_is_quorum_critical]},
+ {unclustered, [], [
+ {cluster_size_2, [], [add_member]}
+ ]},
+ {clustered, [], [
+ {cluster_size_2, [], [cleanup_data_dir]},
+ {cluster_size_2, [], [add_member_not_running,
+ add_member_classic,
+ add_member_already_a_member,
+ add_member_not_found,
+ delete_member_not_running,
+ delete_member_classic,
+ delete_member_queue_not_found,
+ delete_member,
+ delete_member_not_a_member,
+ node_removal_is_quorum_critical]
+ ++ all_tests()},
+ {cluster_size_2, [], memory_tests()},
+ {cluster_size_3, [], [
+ declare_during_node_down,
+ simple_confirm_availability_on_leader_change,
+ publishing_to_unavailable_queue,
+ confirm_availability_on_leader_change,
+ recover_from_single_failure,
+ recover_from_multiple_failures,
+ leadership_takeover,
+ delete_declare,
+ delete_member_during_node_down,
+ metrics_cleanup_on_leadership_takeover,
+ metrics_cleanup_on_leader_crash,
+ consume_in_minority,
+ shrink_all,
+ rebalance,
+ file_handle_reservations,
+ file_handle_reservations_above_limit,
+ node_removal_is_not_quorum_critical
+ ]},
+ {cluster_size_5, [], [start_queue,
+ start_queue_concurrent,
+ quorum_cluster_size_3,
+ quorum_cluster_size_7,
+ node_removal_is_not_quorum_critical
+ ]},
+ {clustered_with_partitions, [], [
+ reconnect_consumer_and_publish,
+ reconnect_consumer_and_wait,
+ reconnect_consumer_and_wait_channel_down
+ ]}
+ ]}
+ ].
+
+all_tests() ->
+ [
+ declare_args,
+ declare_invalid_properties,
+ declare_server_named,
+ start_queue,
+ stop_queue,
+ restart_queue,
+ restart_all_types,
+ stop_start_rabbit_app,
+ publish_and_restart,
+ subscribe_should_fail_when_global_qos_true,
+ dead_letter_to_classic_queue,
+ dead_letter_with_memory_limit,
+ dead_letter_to_quorum_queue,
+ dead_letter_from_classic_to_quorum_queue,
+ dead_letter_policy,
+ cleanup_queue_state_on_channel_after_publish,
+ cleanup_queue_state_on_channel_after_subscribe,
+ sync_queue,
+ cancel_sync_queue,
+ idempotent_recover,
+ vhost_with_quorum_queue_is_deleted,
+ delete_immediately_by_resource,
+ consume_redelivery_count,
+ subscribe_redelivery_count,
+ message_bytes_metrics,
+ queue_length_limit_drop_head,
+ queue_length_limit_reject_publish,
+ subscribe_redelivery_limit,
+ subscribe_redelivery_policy,
+ subscribe_redelivery_limit_with_dead_letter,
+ queue_length_in_memory_limit_basic_get,
+ queue_length_in_memory_limit_subscribe,
+ queue_length_in_memory_limit,
+ queue_length_in_memory_limit_returns,
+ queue_length_in_memory_bytes_limit_basic_get,
+ queue_length_in_memory_bytes_limit_subscribe,
+ queue_length_in_memory_bytes_limit,
+ queue_length_in_memory_purge,
+ in_memory,
+ consumer_metrics,
+ invalid_policy,
+ delete_if_empty,
+ delete_if_unused,
+ queue_ttl,
+ peek,
+ consumer_priorities
+ ].
+
+memory_tests() ->
+ [
+ memory_alarm_rolls_wal
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{quorum_tick_interval, 1000}]}),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config1, {aten, [{poll_interval, 1000}]}),
+ rabbit_ct_helpers:run_setup_steps(
+ Config,
+ [fun rabbit_ct_broker_helpers:configure_dist_proxy/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(unclustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered_with_partitions, Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "clustered_with_partitions is too unreliable in mixed mode"};
+ false ->
+ rabbit_ct_helpers:set_config(Config, [{net_ticktime, 10}])
+ end;
+init_per_group(Group, Config) ->
+ ClusterSize = case Group of
+ single_node -> 1;
+ cluster_size_2 -> 2;
+ cluster_size_3 -> 3;
+ cluster_size_5 -> 5
+ end,
+ IsMixed = not (false == os:getenv("SECONDARY_UMBRELLA")),
+ case ClusterSize of
+ 2 when IsMixed ->
+ {skip, "cluster size 2 isn't mixed versions compatible"};
+ _ ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base}]),
+ Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]),
+ Ret = rabbit_ct_helpers:run_steps(Config1b,
+ [fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config2 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, quorum_queue),
+ case EnableFF of
+ ok ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, application, set_env,
+ [rabbit, channel_tick_interval, 100]),
+ %% HACK: the larger cluster sizes benefit for a bit
+ %% more time after clustering before running the
+ %% tests.
+ case Group of
+ cluster_size_5 ->
+ timer:sleep(5000),
+ Config2;
+ _ ->
+ Config2
+ end;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end
+ end
+ end.
+
+end_per_group(clustered, Config) ->
+ Config;
+end_per_group(unclustered, Config) ->
+ Config;
+end_per_group(clustered_with_partitions, Config) ->
+ Config;
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) when Testcase == reconnect_consumer_and_publish;
+ Testcase == reconnect_consumer_and_wait;
+ Testcase == reconnect_consumer_and_wait_channel_down ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base},
+ {queue_name, Q},
+ {alt_queue_name, <<Q/binary, "_alt">>}
+ ]),
+ Ret = rabbit_ct_helpers:run_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config3 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config3, quorum_queue),
+ case EnableFF of
+ ok ->
+ Config3;
+ Skip ->
+ end_per_testcase(Testcase, Config3),
+ Skip
+ end
+ end;
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{queue_name, Q},
+ {alt_queue_name, <<Q/binary, "_alt">>}
+ ]),
+ rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [{core_metrics_gc_interval, 100}]}),
+ {ra, [{min_wal_roll_over_interval, 30000}]}).
+
+end_per_testcase(Testcase, Config) when Testcase == reconnect_consumer_and_publish;
+ Testcase == reconnect_consumer_and_wait;
+ Testcase == reconnect_consumer_and_wait_channel_down ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase);
+end_per_testcase(Testcase, Config) ->
+ catch delete_queues(),
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-length">>, long, 2000},
+ {<<"x-max-length-bytes">>, long, 2000}]),
+ assert_queue_type(Server, LQ, rabbit_quorum_queue),
+
+ DQ = <<"classic-declare-args-q">>,
+ declare(Ch, DQ, [{<<"x-queue-type">>, longstr, <<"classic">>}]),
+ assert_queue_type(Server, DQ, rabbit_classic_queue),
+
+ DQ2 = <<"classic-q2">>,
+ declare(Ch, DQ2),
+ assert_queue_type(Server, DQ2, rabbit_classic_queue).
+
+declare_invalid_properties(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ LQ = ?config(queue_name, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = LQ,
+ auto_delete = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = LQ,
+ exclusive = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = LQ,
+ durable = false,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]})).
+
+declare_server_named(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(rabbit_ct_client_helpers:open_channel(Config, Server),
+ <<"">>, [{<<"x-queue-type">>, longstr, <<"quorum">>}])).
+
+start_queue(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Check that the application and one ra node are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 1,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+
+ %% Test declare an existing queue
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Test declare with same arguments
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Test declare an existing queue with different arguments
+ ?assertExit(_, declare(Ch, LQ, [])),
+
+ %% Check that the application and process are still up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))).
+
+start_queue_concurrent(Config) ->
+ Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ LQ = ?config(queue_name, Config),
+ Self = self(),
+ [begin
+ _ = spawn_link(fun () ->
+ {_Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, Server),
+ %% Test declare an existing queue
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ,
+ [{<<"x-queue-type">>,
+ longstr,
+ <<"quorum">>}])),
+ Self ! {done, Server}
+ end)
+ end || Server <- Servers],
+
+ [begin
+ receive {done, Server} -> ok
+ after 5000 -> exit({await_done_timeout, Server})
+ end
+ end || Server <- Servers],
+
+
+ ok.
+
+quorum_cluster_size_3(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "quorum_cluster_size_3 tests isn't mixed version reliable"};
+ false ->
+ quorum_cluster_size_x(Config, 3, 3)
+ end.
+
+quorum_cluster_size_7(Config) ->
+ quorum_cluster_size_x(Config, 7, 5).
+
+quorum_cluster_size_x(Config, Max, Expected) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ RaName = ra_name(QQ),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-quorum-initial-group-size">>, long, Max}])),
+ {ok, Members, _} = ra:members({RaName, Server}),
+ ?assertEqual(Expected, length(Members)),
+ Info = rpc:call(Server, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QQ)]),
+ MembersQ = proplists:get_value(members, Info),
+ ?assertEqual(Expected, length(MembersQ)).
+
+stop_queue(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Check that the application and one ra node are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 1,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+
+ %% Delete the quorum queue
+ ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = LQ})),
+ %% Check that the application and process are down
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))
+ end),
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))).
+
+restart_queue(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ %% Check that the application and one ra node are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 1,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ delete_queues(Ch2, [LQ]).
+
+idempotent_recover(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ LQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', LQ, 0, 0},
+ declare(Ch, LQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% kill default vhost to trigger recovery
+ [{_, SupWrapperPid, _, _} | _] = rpc:call(Server, supervisor,
+ which_children,
+ [rabbit_vhost_sup_sup]),
+ [{_, Pid, _, _} | _] = rpc:call(Server, supervisor,
+ which_children,
+ [SupWrapperPid]),
+ %% kill the vhost process to trigger recover
+ rpc:call(Server, erlang, exit, [Pid, kill]),
+
+ timer:sleep(1000),
+ %% validate quorum queue is still functional
+ RaName = ra_name(LQ),
+ {ok, _, _} = ra:members({RaName, Server}),
+ %% validate vhosts are running - or rather validate that at least one
+ %% vhost per cluster is running
+ [begin
+ #{cluster_state := ServerStatuses} = maps:from_list(I),
+ ?assertMatch(#{Server := running}, maps:from_list(ServerStatuses))
+ end || I <- rpc:call(Server, rabbit_vhost,info_all, [])],
+ ok.
+
+vhost_with_quorum_queue_is_deleted(Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ VHost = <<"vhost2">>,
+ QName = atom_to_binary(?FUNCTION_NAME, utf8),
+ RaName = binary_to_atom(<<VHost/binary, "_", QName/binary>>, utf8),
+ User = ?config(rmq_username, Config),
+ ok = rabbit_ct_broker_helpers:add_vhost(Config, Node, VHost, User),
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VHost),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, Node,
+ VHost),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ UId = rpc:call(Node, ra_directory, where_is, [RaName]),
+ ?assert(UId =/= undefined),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VHost),
+ %% validate quorum queues got deleted
+ undefined = rpc:call(Node, ra_directory, where_is, [RaName]),
+ ok.
+
+restart_all_types(Config) ->
+ %% Test the node restart with both types of queues (quorum and classic) to
+ %% ensure there are no regressions
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ1 = <<"restart_all_types-qq1">>,
+ ?assertEqual({'queue.declare_ok', QQ1, 0, 0},
+ declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ QQ2 = <<"restart_all_types-qq2">>,
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ CQ1 = <<"restart_all_types-classic1">>,
+ ?assertEqual({'queue.declare_ok', CQ1, 0, 0}, declare(Ch, CQ1, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ1, 1),
+ CQ2 = <<"restart_all_types-classic2">>,
+ ?assertEqual({'queue.declare_ok', CQ2, 0, 0}, declare(Ch, CQ2, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ2, 1),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ %% Check that the application and two ra nodes are up. Queues are restored
+ %% after the broker is marked as "ready", that's why we need to wait for
+ %% the condition.
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = length(Children) + 2,
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ Expected =:= length(
+ rpc:call(
+ Server,
+ supervisor,
+ which_children,
+ [ra_server_sup_sup]))
+ end, 60000),
+ %% Check the classic queues restarted correctly
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ1, no_ack = false}),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ2, no_ack = false}),
+ delete_queues(Ch2, [QQ1, QQ2, CQ1, CQ2]).
+
+delete_queues(Ch, Queues) ->
+ [amqp_channel:call(Ch, #'queue.delete'{queue = Q}) || Q <- Queues],
+ ok.
+
+stop_start_rabbit_app(Config) ->
+ %% Test start/stop of rabbit app with both types of queues (quorum and
+ %% classic) to ensure there are no regressions
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ1 = <<"stop_start_rabbit_app-qq">>,
+ ?assertEqual({'queue.declare_ok', QQ1, 0, 0},
+ declare(Ch, QQ1, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ QQ2 = <<"quorum-q2">>,
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ CQ1 = <<"stop_start_rabbit_app-classic">>,
+ ?assertEqual({'queue.declare_ok', CQ1, 0, 0}, declare(Ch, CQ1, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ1, 1),
+ CQ2 = <<"stop_start_rabbit_app-classic2">>,
+ ?assertEqual({'queue.declare_ok', CQ2, 0, 0}, declare(Ch, CQ2, [])),
+ rabbit_ct_client_helpers:publish(Ch, CQ2, 1),
+
+ rabbit_control_helper:command(stop_app, Server),
+ %% Check the ra application has stopped (thus its supervisor and queues)
+ ?assertMatch(false, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+
+ rabbit_control_helper:command(start_app, Server),
+
+ %% Check that the application and two ra nodes are up
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))),
+ Expected = Children + 2,
+ ?assertMatch(Expected,
+ length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))),
+ %% Check the classic queues restarted correctly
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ1, no_ack = false}),
+ {#'basic.get_ok'{}, #amqp_msg{}} =
+ amqp_channel:call(Ch2, #'basic.get'{queue = CQ2, no_ack = false}),
+ delete_queues(Ch2, [QQ1, QQ2, CQ1, CQ2]).
+
+publish_confirm(Ch, QName) ->
+ publish(Ch, QName),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ ct:pal("waiting for confirms from ~s", [QName]),
+ receive
+ #'basic.ack'{} ->
+ ct:pal("CONFIRMED! ~s", [QName]),
+ ok;
+ #'basic.nack'{} ->
+ ct:pal("NOT CONFIRMED! ~s", [QName]),
+ fail
+ after 2500 ->
+ exit(confirm_timeout)
+ end.
+
+publish_and_restart(Config) ->
+ %% Test the node restart with both types of queues (quorum and classic) to
+ %% ensure there are no regressions
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ RaName = ra_name(QQ),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ publish(rabbit_ct_client_helpers:open_channel(Config, Server), QQ),
+ wait_for_messages_ready(Servers, RaName, 2),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+consume_in_minority(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ ok.
+
+shrink_all(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ QQ = ?config(queue_name, Config),
+ AQ = ?config(alt_queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', AQ, 0, 0},
+ declare(Ch, AQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(500),
+ Result = rpc:call(Server0, rabbit_quorum_queue, shrink_all, [Server2]),
+ ?assertMatch([{_, {ok, 2}}, {_, {ok, 2}}], Result),
+ Result1 = rpc:call(Server0, rabbit_quorum_queue, shrink_all, [Server1]),
+ ?assertMatch([{_, {ok, 1}}, {_, {ok, 1}}], Result1),
+ Result2 = rpc:call(Server0, rabbit_quorum_queue, shrink_all, [Server0]),
+ ?assertMatch([{_, {error, 1, last_node}},
+ {_, {error, 1, last_node}}], Result2),
+ ok.
+
+rebalance(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "rebalance tests isn't mixed version compatible"};
+ false ->
+ rebalance0(Config)
+ end.
+
+rebalance0(Config) ->
+ [Server0, _, _] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ Q3 = <<"q3">>,
+ Q4 = <<"q4">>,
+ Q5 = <<"q5">>,
+
+ ?assertEqual({'queue.declare_ok', Q1, 0, 0},
+ declare(Ch, Q1, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', Q2, 0, 0},
+ declare(Ch, Q2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(1000),
+
+ {ok, _, {_, Leader1}} = ra:members({ra_name(Q1), Server0}),
+ {ok, _, {_, Leader2}} = ra:members({ra_name(Q2), Server0}),
+ rabbit_ct_client_helpers:publish(Ch, Q1, 3),
+ rabbit_ct_client_helpers:publish(Ch, Q2, 2),
+
+ ?assertEqual({'queue.declare_ok', Q3, 0, 0},
+ declare(Ch, Q3, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', Q4, 0, 0},
+ declare(Ch, Q4, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', Q5, 0, 0},
+ declare(Ch, Q5, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(500),
+ {ok, Summary} = rpc:call(Server0, rabbit_amqqueue, rebalance, [quorum, ".*", ".*"]),
+
+ %% Q1 and Q2 should not have moved leader, as these are the queues with more
+ %% log entries and we allow up to two queues per node (3 nodes, 5 queues)
+ ?assertMatch({ok, _, {_, Leader1}}, ra:members({ra_name(Q1), Server0})),
+ ?assertMatch({ok, _, {_, Leader2}}, ra:members({ra_name(Q2), Server0})),
+
+ %% Check that we have at most 2 queues per node
+ ?assert(lists:all(fun(NodeData) ->
+ lists:all(fun({_, V}) when is_integer(V) -> V =< 2;
+ (_) -> true end,
+ NodeData)
+ end, Summary)),
+ ok.
+
+subscribe_should_fail_when_global_qos_true(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ qos(Ch, 10, true),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ try subscribe(Ch, QQ, false) of
+ _ -> exit(subscribe_should_not_pass)
+ catch
+ _:_ = Err ->
+ ct:pal("subscribe_should_fail_when_global_qos_true caught an error: ~p", [Err])
+ end,
+ ok.
+
+dead_letter_to_classic_queue(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ CQ = <<"classic-dead_letter_to_classic_queue">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, CQ}
+ ])),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ test_dead_lettering(true, Config, Ch, Servers, ra_name(QQ), QQ, CQ).
+
+dead_letter_with_memory_limit(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ CQ = <<"classic-dead_letter_with_memory_limit">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 0},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, CQ}
+ ])),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ test_dead_lettering(true, Config, Ch, Servers, ra_name(QQ), QQ, CQ).
+
+test_dead_lettering(PolicySet, Config, Ch, Servers, RaName, Source, Destination) ->
+ publish(Ch, Source),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]),
+ DeliveryTag = consume(Ch, Source, false),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ case PolicySet of
+ true ->
+ wait_for_messages(Config, [[Destination, <<"1">>, <<"1">>, <<"0">>]]),
+ _ = consume(Ch, Destination, true);
+ false ->
+ wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]])
+ end.
+
+dead_letter_policy(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ CQ = <<"classic-dead_letter_policy">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"dlx">>, <<"dead_letter.*">>, <<"queues">>,
+ [{<<"dead-letter-exchange">>, <<"">>},
+ {<<"dead-letter-routing-key">>, CQ}]),
+ RaName = ra_name(QQ),
+ test_dead_lettering(true, Config, Ch, Servers, RaName, QQ, CQ),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"dlx">>),
+ test_dead_lettering(false, Config, Ch, Servers, RaName, QQ, CQ).
+
+invalid_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ttl">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"message-ttl">>, 5}]),
+ Info = rpc:call(Server, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QQ)]),
+ ?assertEqual('', proplists:get_value(policy, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl">>).
+
+dead_letter_to_quorum_queue(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ QQ2 = <<"dead_letter_to_quorum_queue-q2">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, QQ2}
+ ])),
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ RaName2 = ra_name(QQ2),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_ready(Servers, RaName2, 0),
+ wait_for_messages_pending_ack(Servers, RaName2, 0),
+ DeliveryTag = consume(Ch, QQ, false),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_for_messages_ready(Servers, RaName2, 0),
+ wait_for_messages_pending_ack(Servers, RaName2, 0),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_ready(Servers, RaName2, 1),
+ wait_for_messages_pending_ack(Servers, RaName2, 0),
+ _ = consume(Ch, QQ2, false).
+
+dead_letter_from_classic_to_quorum_queue(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ CQ = <<"classic-q-dead_letter_from_classic_to_quorum_queue">>,
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0},
+ declare(Ch, CQ, [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, QQ}
+ ])),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch, CQ),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[CQ, <<"1">>, <<"1">>, <<"0">>]]),
+ DeliveryTag = consume(Ch, CQ, false),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[CQ, <<"1">>, <<"0">>, <<"1">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages(Config, [[CQ, <<"0">>, <<"0">>, <<"0">>]]),
+ _ = consume(Ch, QQ, false),
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+cleanup_queue_state_on_channel_after_publish(Config) ->
+ %% Declare/delete the queue in one channel and publish on a different one,
+ %% to verify that the cleanup is propagated through channels
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch2, QQ),
+ Res = dirty_query(Servers, RaName, fun rabbit_fifo:query_consumer_count/1),
+ ct:pal ("Res ~p", [Res]),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_ready(Servers, RaName, 1),
+ [NCh1, NCh2] = rpc:call(Server, rabbit_channel, list, []),
+ %% Check the channel state contains the state for the quorum queue on
+ %% channel 1 and 2
+ wait_for_cleanup(Server, NCh1, 0),
+ wait_for_cleanup(Server, NCh2, 1),
+ %% then delete the queue and wait for the process to terminate
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch1, #'queue.delete'{queue = QQ})),
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children,
+ [ra_server_sup_sup]))
+ end),
+ %% Check that all queue states have been cleaned
+ wait_for_cleanup(Server, NCh2, 0),
+ wait_for_cleanup(Server, NCh1, 0).
+
+cleanup_queue_state_on_channel_after_subscribe(Config) ->
+ %% Declare/delete the queue and publish in one channel, while consuming on a
+ %% different one to verify that the cleanup is propagated through channels
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch1, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(Ch2, QQ, false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ amqp_channel:cast(Ch2, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ end,
+ [NCh1, NCh2] = rpc:call(Server, rabbit_channel, list, []),
+ %% Check the channel state contains the state for the quorum queue on channel 1 and 2
+ wait_for_cleanup(Server, NCh1, 1),
+ wait_for_cleanup(Server, NCh2, 1),
+ ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch1, #'queue.delete'{queue = QQ})),
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))
+ end),
+ %% Check that all queue states have been cleaned
+ wait_for_cleanup(Server, NCh1, 0),
+ wait_for_cleanup(Server, NCh2, 0).
+
+recover_from_single_failure(Config) ->
+ [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+ RaName = ra_name(QQ),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready([Server, Server1], RaName, 3),
+ wait_for_messages_pending_ack([Server, Server1], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ wait_for_messages_ready(Servers, RaName, 3),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+recover_from_multiple_failures(Config) ->
+ [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ RaName = ra_name(QQ),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+
+ %% there is an assumption here that the messages were not lost and were
+ %% recovered when a quorum was restored. Not the best test perhaps.
+ wait_for_messages_ready(Servers, RaName, 6),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+publishing_to_unavailable_queue(Config) ->
+ %% publishing to an unavialable queue but with a reachable member should result
+ %% in the initial enqueuer session timing out and the message being nacked
+ [Server, Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ TCh = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(TCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ct:pal("opening channel to ~w", [Server]),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish_many(Ch, QQ, 1),
+ %% this should result in a nack
+ ok = receive
+ #'basic.ack'{} -> fail;
+ #'basic.nack'{} -> ok
+ after 90000 ->
+ exit(confirm_timeout)
+ end,
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ timer:sleep(2000),
+ publish_many(Ch, QQ, 1),
+ %% this should now be acked
+ ok = receive
+ #'basic.ack'{} -> ok;
+ #'basic.nack'{} -> fail
+ after 90000 ->
+ exit(confirm_timeout)
+ end,
+ %% check we get at least on ack
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ ok.
+
+leadership_takeover(Config) ->
+ %% Kill nodes in succession forcing the takeover of leadership, and all messages that
+ %% are in the queue.
+ [Server, Server1, Server2] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ RaName = ra_name(QQ),
+
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server),
+
+ wait_for_messages_ready([Server2, Server], RaName, 3),
+ wait_for_messages_pending_ack([Server2, Server], RaName, 0),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ wait_for_messages_ready(Servers, RaName, 3),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+metrics_cleanup_on_leadership_takeover(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "metrics_cleanup_on_leadership_takeover tests isn't mixed version compatible"};
+ false ->
+ metrics_cleanup_on_leadership_takeover0(Config)
+ end.
+
+metrics_cleanup_on_leadership_takeover0(Config) ->
+ %% Queue core metrics should be deleted from a node once the leadership is transferred
+ %% to another follower
+ [Server, _, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ QRes = rabbit_misc:r(<<"/">>, queue, QQ),
+ wait_until(
+ fun() ->
+ case rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) of
+ [{QRes, 3, 0, 3, _}] -> true;
+ _ -> false
+ end
+ end),
+ force_leader_change(Servers, QQ),
+ wait_until(fun () ->
+ [] =:= rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) andalso
+ [] =:= rpc:call(Leader, ets, lookup, [queue_metrics, QRes])
+ end),
+ ok.
+
+metrics_cleanup_on_leader_crash(Config) ->
+ %% Queue core metrics should be deleted from a node once the leadership is transferred
+ %% to another follower
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+
+ wait_for_messages_ready([Server], RaName, 3),
+ wait_for_messages_pending_ack([Server], RaName, 0),
+ {ok, _, {Name, Leader}} = ra:members({RaName, Server}),
+ QRes = rabbit_misc:r(<<"/">>, queue, QQ),
+ wait_until(
+ fun() ->
+ case rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes]) of
+ [{QRes, 3, 0, 3, _}] -> true;
+ _ -> false
+ end
+ end),
+ Pid = rpc:call(Leader, erlang, whereis, [Name]),
+ rpc:call(Leader, erlang, exit, [Pid, kill]),
+ [Other | _] = lists:delete(Leader, Servers),
+ catch ra:trigger_election(Other),
+ %% kill it again just in case it came straight back up again
+ catch rpc:call(Leader, erlang, exit, [Pid, kill]),
+
+ %% this isn't a reliable test as the leader can be restarted so quickly
+ %% after a crash it is elected leader of the next term as well.
+ wait_until(
+ fun() ->
+ [] == rpc:call(Leader, ets, lookup, [queue_coarse_metrics, QRes])
+ end),
+ ok.
+
+
+delete_declare(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "delete_declare isn't mixed version reliable"};
+ false ->
+ delete_declare0(Config)
+ end.
+
+delete_declare0(Config) ->
+ %% Delete cluster in ra is asynchronous, we have to ensure that we handle that in rmq
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 3),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ})),
+ %% the actual data deletions happen after the call has returned as a quorum
+ %% queue leader waits for all nodes to confirm they replicated the poison
+ %% pill before terminating itself.
+ case is_mixed_versions() of
+ true ->
+ %% when in mixed versions the QQ may not be able to apply the posion
+ %% pill for all nodes so need to wait longer for forced delete to
+ %% happen
+ timer:sleep(10000);
+ false ->
+ timer:sleep(1000)
+ end,
+
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Ensure that is a new queue and it's empty
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+sync_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ {error, _, _} =
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"sync_queue">>, QQ]),
+ ok.
+
+cancel_sync_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ {error, _, _} =
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, [<<"cancel_sync_queue">>, QQ]),
+ ok.
+
+declare_during_node_down(Config) ->
+ [Server, DownServer, _] = Servers = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+
+ stop_node(Config, DownServer),
+ % rabbit_ct_broker_helpers:stop_node(Config, DownServer),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ timer:sleep(2000),
+ rabbit_ct_broker_helpers:start_node(Config, DownServer),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ ok.
+
+simple_confirm_availability_on_leader_change(Config) ->
+ [Node1, Node2, _Node3] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% declare a queue on node2 - this _should_ host the leader on node 2
+ DCh = rabbit_ct_client_helpers:open_channel(Config, Node2),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(DCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ erlang:process_flag(trap_exit, true),
+ %% open a channel to another node
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node1),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ ok = publish_confirm(Ch, QQ),
+
+ %% stop the node hosting the leader
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ %% this should not fail as the channel should detect the new leader and
+ %% resend to that
+ ok = publish_confirm(Ch, QQ),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node2),
+ ok.
+
+confirm_availability_on_leader_change(Config) ->
+ [Node1, Node2, _Node3] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ %% declare a queue on node2 - this _should_ host the leader on node 2
+ DCh = rabbit_ct_client_helpers:open_channel(Config, Node2),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(DCh, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ erlang:process_flag(trap_exit, true),
+ Pid = spawn_link(fun () ->
+ %% open a channel to another node
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node1),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ ConfirmLoop = fun Loop() ->
+ ok = publish_confirm(Ch, QQ),
+ receive {done, P} ->
+ P ! done,
+ ok
+ after 0 -> Loop() end
+ end,
+ ConfirmLoop()
+ end),
+
+ timer:sleep(500),
+ %% stop the node hosting the leader
+ stop_node(Config, Node2),
+ %% this should not fail as the channel should detect the new leader and
+ %% resend to that
+ timer:sleep(500),
+ Pid ! {done, self()},
+ receive
+ done -> ok;
+ {'EXIT', Pid, Err} ->
+ exit(Err)
+ after 5500 ->
+ flush(100),
+ exit(bah)
+ end,
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node2),
+ ok.
+
+flush(T) ->
+ receive X ->
+ ct:pal("flushed ~w", [X]),
+ flush(T)
+ after T ->
+ ok
+ end.
+
+
+add_member_not_running(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ct:pal("add_member_not_running config ~p", [Config]),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, 'rabbit@burrow', 5000])).
+
+add_member_classic(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ CQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, CQ, Server, 5000])).
+
+add_member_already_a_member(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ %% idempotent by design
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server, 5000])).
+
+add_member_not_found(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({error, not_found},
+ rpc:call(Server, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server, 5000])).
+
+add_member(Config) ->
+ [Server0, Server1] = Servers0 =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server1, 5000])),
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+ ?assertEqual(ok, rpc:call(Server0, rabbit_quorum_queue, add_member,
+ [<<"/">>, QQ, Server1, 5000])),
+ Info = rpc:call(Server0, rabbit_quorum_queue, infos,
+ [rabbit_misc:r(<<"/">>, queue, QQ)]),
+ Servers = lists:sort(Servers0),
+ ?assertEqual(Servers, lists:sort(proplists:get_value(online, Info, []))).
+
+delete_member_not_running(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ %% it should be possible to delete members that are not online (e.g. decomissioned)
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, 'rabbit@burrow'])).
+
+delete_member_classic(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ CQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', CQ, 0, 0}, declare(Ch, CQ, [])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, CQ, Server])).
+
+delete_member_queue_not_found(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({error, not_found},
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])).
+
+delete_member(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])).
+
+delete_member_not_a_member(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])),
+ %% idempotent by design
+ ?assertEqual(ok,
+ rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Server])).
+
+delete_member_during_node_down(Config) ->
+ [Server, DownServer, Remove] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+
+ stop_node(Config, DownServer),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(200),
+ ?assertEqual(ok, rpc:call(Server, rabbit_quorum_queue, delete_member,
+ [<<"/">>, QQ, Remove])),
+
+ rabbit_ct_broker_helpers:start_node(Config, DownServer),
+ ?assertEqual(ok, rpc:call(Server, rabbit_quorum_queue, repair_amqqueue_nodes,
+ [<<"/">>, QQ])),
+ ok.
+
+%% These tests check if node removal would cause any queues to lose (or not lose)
+%% their quorum. See rabbitmq/rabbitmq-cli#389 for background.
+
+node_removal_is_quorum_critical(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ [begin
+ Qs = rpc:call(S, rabbit_quorum_queue, list_with_minimum_quorum, []),
+ ?assertEqual([QName], queue_names(Qs))
+ end || S <- Servers].
+
+node_removal_is_not_quorum_critical(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+ Qs = rpc:call(Server, rabbit_quorum_queue, list_with_minimum_quorum, []),
+ ?assertEqual([], Qs).
+
+
+file_handle_reservations(Config) ->
+ case is_mixed_versions() of
+ true ->
+ {skip, "file_handle_reservations tests isn't mixed version compatible"};
+ false ->
+ file_handle_reservations0(Config)
+ end.
+
+file_handle_reservations0(Config) ->
+ Servers = [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server1}),
+ [Follower1, Follower2] = Servers -- [Leader],
+ ?assertEqual([{files_reserved, 5}],
+ rpc:call(Leader, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower1, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower2, file_handle_cache, info, [[files_reserved]])),
+ force_leader_change(Servers, QQ),
+ {ok, _, {_, Leader0}} = ra:members({RaName, Server1}),
+ [Follower01, Follower02] = Servers -- [Leader0],
+ ?assertEqual([{files_reserved, 5}],
+ rpc:call(Leader0, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower01, file_handle_cache, info, [[files_reserved]])),
+ ?assertEqual([{files_reserved, 2}],
+ rpc:call(Follower02, file_handle_cache, info, [[files_reserved]])).
+
+file_handle_reservations_above_limit(Config) ->
+ [S1, S2, S3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, S1),
+ QQ = ?config(queue_name, Config),
+ QQ2 = ?config(alt_queue_name, Config),
+
+ Limit = rpc:call(S1, file_handle_cache, get_limit, []),
+
+ ok = rpc:call(S1, file_handle_cache, set_limit, [3]),
+ ok = rpc:call(S2, file_handle_cache, set_limit, [3]),
+ ok = rpc:call(S3, file_handle_cache, set_limit, [3]),
+
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ ?assertEqual({'queue.declare_ok', QQ2, 0, 0},
+ declare(Ch, QQ2, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rpc:call(S1, file_handle_cache, set_limit, [Limit]),
+ ok = rpc:call(S2, file_handle_cache, set_limit, [Limit]),
+ ok = rpc:call(S3, file_handle_cache, set_limit, [Limit]).
+
+cleanup_data_dir(Config) ->
+ %% This test is slow, but also checks that we handle properly errors when
+ %% trying to delete a queue in minority. A case clause there had gone
+ %% previously unnoticed.
+
+ [Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ timer:sleep(100),
+
+ UId1 = proplists:get_value(ra_name(QQ), rpc:call(Server1, ra_directory, list_registered, [])),
+ UId2 = proplists:get_value(ra_name(QQ), rpc:call(Server2, ra_directory, list_registered, [])),
+ DataDir1 = rpc:call(Server1, ra_env, server_data_dir, [UId1]),
+ DataDir2 = rpc:call(Server2, ra_env, server_data_dir, [UId2]),
+ ?assert(filelib:is_dir(DataDir1)),
+ ?assert(filelib:is_dir(DataDir2)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ})),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+ %% data dir 1 should be force deleted at this point
+ ?assert(not filelib:is_dir(DataDir1)),
+ ?assert(filelib:is_dir(DataDir2)),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ timer:sleep(2000),
+
+ ?assertEqual(ok,
+ rpc:call(Server2, rabbit_quorum_queue, cleanup_data_dir, [])),
+ ?assert(not filelib:is_dir(DataDir2)),
+ ok.
+
+reconnect_consumer_and_publish(Config) ->
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, F2] = lists:delete(Leader, Servers),
+ ChF = rabbit_ct_client_helpers:open_channel(Config, F1),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(ChF, QQ, false),
+ receive
+ {#'basic.deliver'{redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ Up = [Leader, F2],
+ rabbit_ct_broker_helpers:block_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:block_traffic_between(F1, F2),
+ wait_for_messages_ready(Up, RaName, 1),
+ wait_for_messages_pending_ack(Up, RaName, 0),
+ wait_for_messages_ready([F1], RaName, 0),
+ wait_for_messages_pending_ack([F1], RaName, 1),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, F2),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 2),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ amqp_channel:cast(ChF, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = true}, _} ->
+ amqp_channel:cast(ChF, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ end.
+
+reconnect_consumer_and_wait(Config) ->
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, F2] = lists:delete(Leader, Servers),
+ ChF = rabbit_ct_client_helpers:open_channel(Config, F1),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(ChF, QQ, false),
+ receive
+ {#'basic.deliver'{redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ Up = [Leader, F2],
+ rabbit_ct_broker_helpers:block_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:block_traffic_between(F1, F2),
+ wait_for_messages_ready(Up, RaName, 1),
+ wait_for_messages_pending_ack(Up, RaName, 0),
+ wait_for_messages_ready([F1], RaName, 0),
+ wait_for_messages_pending_ack([F1], RaName, 1),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, F2),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = true}, _} ->
+ amqp_channel:cast(ChF, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ end.
+
+reconnect_consumer_and_wait_channel_down(Config) ->
+ [Server | _] = Servers =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, F2] = lists:delete(Leader, Servers),
+ ChF = rabbit_ct_client_helpers:open_channel(Config, F1),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(ChF, QQ, false),
+ receive
+ {#'basic.deliver'{redelivered = false}, _} ->
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1)
+ end,
+ Up = [Leader, F2],
+ rabbit_ct_broker_helpers:block_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:block_traffic_between(F1, F2),
+ wait_for_messages_ready(Up, RaName, 1),
+ wait_for_messages_pending_ack(Up, RaName, 0),
+ wait_for_messages_ready([F1], RaName, 0),
+ wait_for_messages_pending_ack([F1], RaName, 1),
+ rabbit_ct_client_helpers:close_channel(ChF),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, Leader),
+ rabbit_ct_broker_helpers:allow_traffic_between(F1, F2),
+ %% Let's give it a few seconds to ensure it doesn't attempt to
+ %% deliver to the down channel - it shouldn't be monitored
+ %% at this time!
+ timer:sleep(5000),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0).
+
+delete_immediately_by_resource(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% The stream coordinator is also a ra process, we need to ensure the quorum tests
+ %% are not affected by any other ra cluster that could be added in the future
+ Children = length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup])),
+
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ Cmd2 = ["eval", "rabbit_amqqueue:delete_immediately_by_resource([rabbit_misc:r(<<\"/\">>, queue, <<\"" ++ binary_to_list(QQ) ++ "\">>)])."],
+ ?assertEqual({ok, "ok\n"}, rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd2)),
+
+ %% Check that the application and process are down
+ wait_until(fun() ->
+ Children == length(rpc:call(Server, supervisor, which_children, [ra_server_sup_sup]))
+ end),
+ ?assertMatch({ra, _, _}, lists:keyfind(ra, 1,
+ rpc:call(Server, application, which_applications, []))).
+
+subscribe_redelivery_count(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ after 5000 ->
+ exit(basic_deliver_timeout)
+ end,
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ after 5000 ->
+ exit(basic_deliver_timeout_2)
+ end,
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H2}}} ->
+ ?assertMatch({DCHeader, _, 2}, rabbit_basic:header(DCHeader, H2)),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false}),
+ ct:pal("wait_for_messages_ready", []),
+ wait_for_messages_ready(Servers, RaName, 0),
+ ct:pal("wait_for_messages_pending_ack", []),
+ wait_for_messages_pending_ack(Servers, RaName, 0)
+ after 5000 ->
+ flush(500),
+ exit(basic_deliver_timeout_3)
+ end.
+
+subscribe_redelivery_limit(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-delivery-limit">>, long, 1}])),
+
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]),
+ receive
+ {#'basic.deliver'{redelivered = true}, #amqp_msg{}} ->
+ throw(unexpected_redelivery)
+ after 2000 ->
+ ok
+ end.
+
+subscribe_redelivery_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"delivery-limit">>, <<".*">>, <<"queues">>,
+ [{<<"delivery-limit">>, 1}]),
+
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]),
+ receive
+ {#'basic.deliver'{redelivered = true}, #amqp_msg{}} ->
+ throw(unexpected_redelivery)
+ after 2000 ->
+ ok
+ end,
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"delivery-limit">>).
+
+subscribe_redelivery_limit_with_dead_letter(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ DLX = <<"subcribe_redelivery_limit_with_dead_letter_dlx">>,
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-delivery-limit">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, DLX}
+ ])),
+ ?assertEqual({'queue.declare_ok', DLX, 0, 0},
+ declare(Ch, DLX, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ subscribe(Ch, QQ, false),
+
+ DCHeader = <<"x-delivery-count">>,
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} ->
+ ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} ->
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]),
+ wait_for_messages(Config, [[DLX, <<"1">>, <<"1">>, <<"0">>]]).
+
+consume_redelivery_count(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ RaName = ra_name(QQ),
+ publish(Ch, QQ),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+
+ DCHeader = <<"x-delivery-count">>,
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag,
+ redelivered = false},
+ #amqp_msg{props = #'P_basic'{headers = H0}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+ ?assertMatch({DCHeader, _, 0}, rabbit_basic:header(DCHeader, H0)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ %% wait for requeuing
+ timer:sleep(500),
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag1,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H1}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+ ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1,
+ multiple = false,
+ requeue = true}),
+
+ {#'basic.get_ok'{delivery_tag = DeliveryTag2,
+ redelivered = true},
+ #amqp_msg{props = #'P_basic'{headers = H2}}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+ ?assertMatch({DCHeader, _, 2}, rabbit_basic:header(DCHeader, H2)),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag2,
+ multiple = false,
+ requeue = true}),
+ ok.
+
+message_bytes_metrics(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ QRes = rabbit_misc:r(<<"/">>, queue, QQ),
+
+ publish(Ch, QQ),
+
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_until(fun() ->
+ {3, 3, 0} == get_message_bytes(Leader, QRes)
+ end),
+
+ subscribe(Ch, QQ, false),
+
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_until(fun() ->
+ {3, 0, 3} == get_message_bytes(Leader, QRes)
+ end),
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag,
+ redelivered = false}, _} ->
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = false}),
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_until(fun() ->
+ {0, 0, 0} == get_message_bytes(Leader, QRes)
+ end)
+ end,
+
+ %% Let's publish and then close the consumer channel. Messages must be
+ %% returned to the queue
+ publish(Ch, QQ),
+
+ wait_for_messages_ready(Servers, RaName, 0),
+ wait_for_messages_pending_ack(Servers, RaName, 1),
+ wait_until(fun() ->
+ {3, 0, 3} == get_message_bytes(Leader, QRes)
+ end),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_until(fun() ->
+ {3, 3, 0} == get_message_bytes(Leader, QRes)
+ end),
+ ok.
+
+memory_alarm_rolls_wal(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ WalDataDir = rpc:call(Server, ra_env, wal_data_dir, []),
+ [Wal0] = filelib:wildcard(WalDataDir ++ "/*.wal"),
+ rabbit_ct_broker_helpers:set_alarm(Config, Server, memory),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end
+ ),
+ timer:sleep(1000),
+ [Wal1] = filelib:wildcard(WalDataDir ++ "/*.wal"),
+ ?assert(Wal0 =/= Wal1),
+ %% roll over shouldn't happen if we trigger a new alarm in less than
+ %% min_wal_roll_over_interval
+ rabbit_ct_broker_helpers:set_alarm(Config, Server, memory),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end
+ ),
+ timer:sleep(1000),
+ [Wal2] = filelib:wildcard(WalDataDir ++ "/*.wal"),
+ ?assert(Wal1 == Wal2),
+ ok = rpc:call(Server, rabbit_alarm, clear_alarm,
+ [{{resource_limit, memory, Server}, []}]),
+ timer:sleep(1000),
+ ok.
+
+queue_length_limit_drop_head(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-length">>, long, 1}])),
+
+ RaName = ra_name(QQ),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg1">>}),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg2">>}),
+ wait_for_consensus(QQ, Config),
+ wait_for_messages_ready(Servers, RaName, 1),
+ wait_for_messages_pending_ack(Servers, RaName, 0),
+ wait_for_messages_total(Servers, RaName, 1),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})).
+
+queue_length_limit_reject_publish(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ RaName = ra_name(QQ),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-length">>, long, 1},
+ {<<"x-overflow">>, longstr, <<"reject-publish">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ ok = publish_confirm(Ch, QQ),
+ ok = publish_confirm(Ch, QQ),
+ %% give the channel some time to process the async reject_publish notification
+ %% now that we are over the limit it should start failing
+ wait_for_messages_total(Servers, RaName, 2),
+ fail = publish_confirm(Ch, QQ),
+ %% remove all messages
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = _}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = _}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ %% publish should be allowed again now
+ ok = publish_confirm(Ch, QQ),
+ ok.
+
+queue_length_in_memory_limit_basic_get(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 1}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg1}),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg2">>}),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})).
+
+queue_length_in_memory_limit_subscribe(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 1}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ subscribe(Ch, QQ, false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = false},
+ #amqp_msg{payload = Msg1}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1,
+ multiple = false})
+ end,
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = false},
+ #amqp_msg{payload = Msg2}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false})
+ end.
+
+queue_length_in_memory_limit(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+ Msg4 = <<"msg1111">>,
+
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ publish(Ch, QQ, Msg3),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+ publish(Ch, QQ, Msg4),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg2) + byte_size(Msg4)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+queue_length_in_memory_limit_returns(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+ Msg4 = <<"msg111">>,
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false})),
+
+ {#'basic.get_ok'{delivery_tag = DTag2}, #amqp_msg{payload = Msg2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = false}),
+
+ publish(Ch, QQ, Msg3),
+ publish(Ch, QQ, Msg4),
+
+ %% Ensure that returns are subject to in memory limits too
+ wait_for_messages(Config, [[QQ, <<"4">>, <<"2">>, <<"2">>]]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag2,
+ multiple = true,
+ requeue = true}),
+ wait_for_messages(Config, [[QQ, <<"4">>, <<"4">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg3) + byte_size(Msg4)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+queue_length_in_memory_bytes_limit_basic_get(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 6}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg1}),
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = QQ},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = <<"msg2">>}),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg2">>}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})).
+
+queue_length_in_memory_bytes_limit_subscribe(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 6}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ subscribe(Ch, QQ, false),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag1,
+ redelivered = false},
+ #amqp_msg{payload = Msg1}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag1,
+ multiple = false})
+ end,
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag2,
+ redelivered = false},
+ #amqp_msg{payload = Msg2}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2,
+ multiple = false})
+ end.
+
+queue_length_in_memory_bytes_limit(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-bytes">>, long, 12}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+ Msg4 = <<"msg1111">>,
+
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ publish(Ch, QQ, Msg3),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = Msg1}},
+ amqp_channel:call(Ch, #'basic.get'{queue = QQ,
+ no_ack = true})),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+ publish(Ch, QQ, Msg4),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg2) + byte_size(Msg4)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+queue_length_in_memory_purge(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+ Msg3 = <<"msg111">>,
+
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ publish(Ch, QQ, Msg3),
+ wait_for_messages(Config, [[QQ, <<"3">>, <<"3">>, <<"0">>]]),
+
+ ?assertEqual([{2, byte_size(Msg1) + byte_size(Msg2)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ {'queue.purge_ok', 3} = amqp_channel:call(Ch, #'queue.purge'{queue = QQ}),
+
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+peek(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-max-in-memory-length">>, long, 2}])),
+
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+
+ QName = rabbit_misc:r(<<"/">>, queue, QQ),
+ ?assertMatch({error, no_message_at_pos},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [1, QName])),
+ publish(Ch, QQ, Msg1),
+ publish(Ch, QQ, Msg2),
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+
+ ?assertMatch({ok, [_|_]},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [1, QName])),
+ ?assertMatch({ok, [_|_]},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [2, QName])),
+ ?assertMatch({error, no_message_at_pos},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue,
+ peek, [3, QName])),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"2">>, <<"0">>]]),
+ ok.
+
+in_memory(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ RaName = ra_name(QQ),
+ Msg1 = <<"msg1">>,
+ Msg2 = <<"msg11">>,
+
+ publish(Ch, QQ, Msg1),
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ ?assertEqual([{1, byte_size(Msg1)}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ subscribe(Ch, QQ, false),
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ publish(Ch, QQ, Msg2),
+
+ wait_for_messages(Config, [[QQ, <<"2">>, <<"0">>, <<"2">>]]),
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)),
+
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, #amqp_msg{}} ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ end,
+
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]),
+ ?assertEqual([{0, 0}],
+ dirty_query([Server], RaName, fun rabbit_fifo:query_in_memory_usage/1)).
+
+consumer_metrics(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ subscribe(Ch1, QQ, false),
+
+ RaName = ra_name(QQ),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ timer:sleep(5000),
+ QNameRes = rabbit_misc:r(<<"/">>, queue, QQ),
+ [{_, PropList, _}] = rpc:call(Leader, ets, lookup, [queue_metrics, QNameRes]),
+ ?assertMatch([{consumers, 1}], lists:filter(fun({Key, _}) ->
+ Key == consumers
+ end, PropList)).
+
+delete_if_empty(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Try to delete the quorum queue
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ,
+ if_empty = true})).
+
+delete_if_unused(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ publish(Ch, QQ),
+ wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]),
+ %% Try to delete the quorum queue
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'queue.delete'{queue = QQ,
+ if_unused = true})).
+
+queue_ttl(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-expires">>, long, 1000}])),
+ timer:sleep(5500),
+ %% check queue no longer exists
+ ?assertExit(
+ {{shutdown,
+ {server_initiated_close,404,
+ <<"NOT_FOUND - no queue 'queue_ttl' in vhost '/'">>}},
+ _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QQ,
+ passive = true,
+ durable = true,
+ auto_delete = false,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>},
+ {<<"x-expires">>, long, 1000}]})),
+ ok.
+
+consumer_priorities(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch, 2, false),
+ QQ = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', QQ, 0, 0},
+ declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% consumer with default priority
+ Tag1 = <<"ctag1">>,
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ,
+ no_ack = false,
+ consumer_tag = Tag1},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Tag1} ->
+ ok
+ end,
+ %% consumer with higher priority
+ Tag2 = <<"ctag2">>,
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ,
+ arguments = [{"x-priority", long, 10}],
+ no_ack = false,
+ consumer_tag = Tag2},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = Tag2} ->
+ ok
+ end,
+
+ publish(Ch, QQ),
+ %% Tag2 should receive the message
+ DT1 = receive
+ {#'basic.deliver'{delivery_tag = D1,
+ consumer_tag = Tag2}, _} ->
+ D1
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+ publish(Ch, QQ),
+ %% Tag2 should receive the message
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ consumer_tag = Tag2}, _} ->
+ ok
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+
+ publish(Ch, QQ),
+ %% Tag1 should receive the message as Tag2 has maxed qos
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ consumer_tag = Tag1}, _} ->
+ ok
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+
+ ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DT1,
+ multiple = false}),
+ publish(Ch, QQ),
+ %% Tag2 should receive the message
+ receive
+ {#'basic.deliver'{delivery_tag = _,
+ consumer_tag = Tag2}, _} ->
+ ok
+ after 5000 ->
+ flush(100),
+ ct:fail("basic.deliver timeout")
+ end,
+
+ ok.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+
+assert_queue_type(Server, Q, Expected) ->
+ Actual = get_queue_type(Server, Q),
+ Expected = Actual.
+
+get_queue_type(Server, Q0) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q0),
+ {ok, Q1} = rpc:call(Server, rabbit_amqqueue, lookup, [QNameRes]),
+ amqqueue:get_type(Q1).
+
+publish_many(Ch, Queue, Count) ->
+ [publish(Ch, Queue) || _ <- lists:seq(1, Count)].
+
+publish(Ch, Queue) ->
+ publish(Ch, Queue, <<"msg">>).
+
+publish(Ch, Queue, Msg) ->
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg}).
+
+consume(Ch, Queue, NoAck) ->
+ {GetOk, _} = Reply = amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = NoAck}),
+ ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, Reply),
+ GetOk#'basic.get_ok'.delivery_tag.
+
+consume_empty(Ch, Queue, NoAck) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{queue = Queue,
+ no_ack = NoAck})).
+
+subscribe(Ch, Queue, NoAck) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+qos(Ch, Prefetch, Global) ->
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = Global,
+ prefetch_count = Prefetch})).
+
+receive_basic_deliver(Redelivered) ->
+ receive
+ {#'basic.deliver'{redelivered = R}, _} when R == Redelivered ->
+ ok
+ end.
+
+wait_for_cleanup(Server, Channel, Number) ->
+ wait_for_cleanup(Server, Channel, Number, 60).
+
+wait_for_cleanup(Server, Channel, Number, 0) ->
+ ?assertEqual(length(rpc:call(Server, rabbit_channel, list_queue_states, [Channel])),
+ Number);
+wait_for_cleanup(Server, Channel, Number, N) ->
+ case length(rpc:call(Server, rabbit_channel, list_queue_states, [Channel])) of
+ Length when Number == Length ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_for_cleanup(Server, Channel, Number, N - 1)
+ end.
+
+wait_until(Condition) ->
+ wait_until(Condition, 60).
+
+wait_until(Condition, 0) ->
+ ?assertEqual(true, Condition());
+wait_until(Condition, N) ->
+ case Condition() of
+ true ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_until(Condition, N - 1)
+ end.
+
+
+force_leader_change([Server | _] = Servers, Q) ->
+ RaName = ra_name(Q),
+ {ok, _, {_, Leader}} = ra:members({RaName, Server}),
+ [F1, _] = Servers -- [Leader],
+ ok = rpc:call(F1, ra, trigger_election, [{RaName, F1}]),
+ case ra:members({RaName, Leader}) of
+ {ok, _, {_, Leader}} ->
+ %% Leader has been re-elected
+ force_leader_change(Servers, Q);
+ {ok, _, _} ->
+ %% Leader has changed
+ ok
+ end.
+
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+stop_node(Config, Server) ->
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Server, ["stop"]).
+
+get_message_bytes(Leader, QRes) ->
+ case rpc:call(Leader, ets, lookup, [queue_metrics, QRes]) of
+ [{QRes, Props, _}] ->
+ {proplists:get_value(message_bytes, Props),
+ proplists:get_value(message_bytes_ready, Props),
+ proplists:get_value(message_bytes_unacknowledged, Props)};
+ _ ->
+ []
+ end.
+
+wait_for_consensus(Name, Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ RaName = ra_name(Name),
+ {ok, _, _} = ra:members({RaName, Server}).
+
+queue_names(Records) ->
+ [begin
+ #resource{name = Name} = amqqueue:get_name(Q),
+ Name
+ end || Q <- Records].
diff --git a/deps/rabbit/test/quorum_queue_utils.erl b/deps/rabbit/test/quorum_queue_utils.erl
new file mode 100644
index 0000000000..224abeeeeb
--- /dev/null
+++ b/deps/rabbit/test/quorum_queue_utils.erl
@@ -0,0 +1,112 @@
+-module(quorum_queue_utils).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-export([
+ wait_for_messages_ready/3,
+ wait_for_messages_pending_ack/3,
+ wait_for_messages_total/3,
+ wait_for_messages/2,
+ dirty_query/3,
+ ra_name/1,
+ fifo_machines_use_same_version/1,
+ fifo_machines_use_same_version/2,
+ is_mixed_versions/0
+ ]).
+
+wait_for_messages_ready(Servers, QName, Ready) ->
+ wait_for_messages(Servers, QName, Ready,
+ fun rabbit_fifo:query_messages_ready/1, 60).
+
+wait_for_messages_pending_ack(Servers, QName, Ready) ->
+ wait_for_messages(Servers, QName, Ready,
+ fun rabbit_fifo:query_messages_checked_out/1, 60).
+
+wait_for_messages_total(Servers, QName, Total) ->
+ wait_for_messages(Servers, QName, Total,
+ fun rabbit_fifo:query_messages_total/1, 60).
+
+wait_for_messages(Servers, QName, Number, Fun, 0) ->
+ Msgs = dirty_query(Servers, QName, Fun),
+ ?assertEqual([Number || _ <- lists:seq(1, length(Servers))], Msgs);
+wait_for_messages(Servers, QName, Number, Fun, N) ->
+ Msgs = dirty_query(Servers, QName, Fun),
+ ct:pal("Got messages ~p ~p", [QName, Msgs]),
+ %% hack to allow the check to succeed in mixed versions clusters if at
+ %% least one node matches the criteria rather than all nodes for
+ F = case is_mixed_versions() of
+ true ->
+ any;
+ false ->
+ all
+ end,
+ case lists:F(fun(C) when is_integer(C) ->
+ C == Number;
+ (_) ->
+ false
+ end, Msgs) of
+ true ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_for_messages(Servers, QName, Number, Fun, N - 1)
+ end.
+
+wait_for_messages(Config, Stats) ->
+ wait_for_messages(Config, lists:sort(Stats), 60).
+
+wait_for_messages(Config, Stats, 0) ->
+ ?assertEqual(Stats,
+ lists:sort(
+ filter_queues(Stats,
+ rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "messages", "messages_ready",
+ "messages_unacknowledged"]))));
+wait_for_messages(Config, Stats, N) ->
+ case lists:sort(
+ filter_queues(Stats,
+ rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "messages", "messages_ready",
+ "messages_unacknowledged"]))) of
+ Stats0 when Stats0 == Stats ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ wait_for_messages(Config, Stats, N - 1)
+ end.
+
+dirty_query(Servers, QName, Fun) ->
+ lists:map(
+ fun(N) ->
+ case rpc:call(N, ra, local_query, [{QName, N}, Fun]) of
+ {ok, {_, Msgs}, _} ->
+ Msgs;
+ _E ->
+ undefined
+ end
+ end, Servers).
+
+ra_name(Q) ->
+ binary_to_atom(<<"%2F_", Q/binary>>, utf8).
+
+filter_queues(Expected, Got) ->
+ Keys = [K || [K, _, _, _] <- Expected],
+ lists:filter(fun([K, _, _, _]) ->
+ lists:member(K, Keys)
+ end, Got).
+
+fifo_machines_use_same_version(Config) ->
+ Nodenames = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ fifo_machines_use_same_version(Config, Nodenames).
+
+fifo_machines_use_same_version(Config, Nodenames)
+ when length(Nodenames) >= 1 ->
+ [MachineAVersion | OtherMachinesVersions] =
+ [(catch rabbit_ct_broker_helpers:rpc(
+ Config, Nodename,
+ rabbit_fifo, version, []))
+ || Nodename <- Nodenames],
+ lists:all(fun(V) -> V =:= MachineAVersion end, OtherMachinesVersions).
+
+is_mixed_versions() ->
+ not (false == os:getenv("SECONDARY_UMBRELLA")).
diff --git a/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl b/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl
new file mode 100644
index 0000000000..e721f5e0dd
--- /dev/null
+++ b/deps/rabbit/test/rabbit_auth_backend_context_propagation_mock.erl
@@ -0,0 +1,46 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% A mock authn/authz that records information during calls. For testing purposes only.
+
+-module(rabbit_auth_backend_context_propagation_mock).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4,
+ state_can_expire/0,
+ get/1, init/0]).
+
+init() ->
+ ets:new(?MODULE, [set, public, named_table]).
+
+user_login_authentication(_, AuthProps) ->
+ ets:insert(?MODULE, {authentication, AuthProps}),
+ {ok, #auth_user{username = <<"dummy">>,
+ tags = [],
+ impl = none}}.
+
+user_login_authorization(_, _) ->
+ {ok, does_not_matter}.
+
+check_vhost_access(#auth_user{}, _VHostPath, AuthzData) ->
+ ets:insert(?MODULE, {vhost_access, AuthzData}),
+ true.
+check_resource_access(#auth_user{}, #resource{}, _Permission, AuthzContext) ->
+ ets:insert(?MODULE, {resource_access, AuthzContext}),
+ true.
+check_topic_access(#auth_user{}, #resource{}, _Permission, TopicContext) ->
+ ets:insert(?MODULE, {topic_access, TopicContext}),
+ true.
+
+state_can_expire() -> false.
+
+get(K) ->
+ ets:lookup(?MODULE, K).
diff --git a/deps/rabbit/test/rabbit_confirms_SUITE.erl b/deps/rabbit/test/rabbit_confirms_SUITE.erl
new file mode 100644
index 0000000000..331c3ca7c3
--- /dev/null
+++ b/deps/rabbit/test/rabbit_confirms_SUITE.erl
@@ -0,0 +1,154 @@
+-module(rabbit_confirms_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ confirm,
+ reject,
+ remove_queue
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+confirm(_Config) ->
+ XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"Q">>),
+ QName2 = rabbit_misc:r(<<"/">>, queue, <<"Q2">>),
+ U0 = rabbit_confirms:init(),
+ ?assertEqual(0, rabbit_confirms:size(U0)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U0)),
+ ?assertEqual(true, rabbit_confirms:is_empty(U0)),
+
+ U1 = rabbit_confirms:insert(1, [QName], XName, U0),
+ ?assertEqual(1, rabbit_confirms:size(U1)),
+ ?assertEqual(1, rabbit_confirms:smallest(U1)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U1)),
+
+ {[{1, XName}], U2} = rabbit_confirms:confirm([1], QName, U1),
+ ?assertEqual(0, rabbit_confirms:size(U2)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U2)),
+ ?assertEqual(true, rabbit_confirms:is_empty(U2)),
+
+ U3 = rabbit_confirms:insert(2, [QName], XName, U1),
+ ?assertEqual(2, rabbit_confirms:size(U3)),
+ ?assertEqual(1, rabbit_confirms:smallest(U3)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U3)),
+
+ {[{1, XName}], U4} = rabbit_confirms:confirm([1], QName, U3),
+ ?assertEqual(1, rabbit_confirms:size(U4)),
+ ?assertEqual(2, rabbit_confirms:smallest(U4)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U4)),
+
+ U5 = rabbit_confirms:insert(2, [QName, QName2], XName, U1),
+ ?assertEqual(2, rabbit_confirms:size(U5)),
+ ?assertEqual(1, rabbit_confirms:smallest(U5)),
+ ?assertEqual(false, rabbit_confirms:is_empty(U5)),
+
+ {[{1, XName}], U6} = rabbit_confirms:confirm([1, 2], QName, U5),
+ ?assertEqual(2, rabbit_confirms:smallest(U6)),
+
+ {[{2, XName}], U7} = rabbit_confirms:confirm([2], QName2, U6),
+ ?assertEqual(0, rabbit_confirms:size(U7)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U7)),
+
+
+ U8 = rabbit_confirms:insert(2, [QName], XName, U1),
+ {[{1, XName}, {2, XName}], _U9} = rabbit_confirms:confirm([1, 2], QName, U8),
+ ok.
+
+
+reject(_Config) ->
+ XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"Q">>),
+ QName2 = rabbit_misc:r(<<"/">>, queue, <<"Q2">>),
+ U0 = rabbit_confirms:init(),
+ ?assertEqual(0, rabbit_confirms:size(U0)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U0)),
+ ?assertEqual(true, rabbit_confirms:is_empty(U0)),
+
+ U1 = rabbit_confirms:insert(1, [QName], XName, U0),
+
+ {ok, {1, XName}, U2} = rabbit_confirms:reject(1, U1),
+ {error, not_found} = rabbit_confirms:reject(1, U2),
+ ?assertEqual(0, rabbit_confirms:size(U2)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U2)),
+
+ U3 = rabbit_confirms:insert(2, [QName, QName2], XName, U1),
+
+ {ok, {1, XName}, U4} = rabbit_confirms:reject(1, U3),
+ {error, not_found} = rabbit_confirms:reject(1, U4),
+ ?assertEqual(1, rabbit_confirms:size(U4)),
+ ?assertEqual(2, rabbit_confirms:smallest(U4)),
+
+ {ok, {2, XName}, U5} = rabbit_confirms:reject(2, U3),
+ {error, not_found} = rabbit_confirms:reject(2, U5),
+ ?assertEqual(1, rabbit_confirms:size(U5)),
+ ?assertEqual(1, rabbit_confirms:smallest(U5)),
+
+ ok.
+
+remove_queue(_Config) ->
+ XName = rabbit_misc:r(<<"/">>, exchange, <<"X">>),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"Q">>),
+ QName2 = rabbit_misc:r(<<"/">>, queue, <<"Q2">>),
+ U0 = rabbit_confirms:init(),
+
+ U1 = rabbit_confirms:insert(1, [QName, QName2], XName, U0),
+ U2 = rabbit_confirms:insert(2, [QName2], XName, U1),
+ {[{2, XName}], U3} = rabbit_confirms:remove_queue(QName2, U2),
+ ?assertEqual(1, rabbit_confirms:size(U3)),
+ ?assertEqual(1, rabbit_confirms:smallest(U3)),
+ {[{1, XName}], U4} = rabbit_confirms:remove_queue(QName, U3),
+ ?assertEqual(0, rabbit_confirms:size(U4)),
+ ?assertEqual(undefined, rabbit_confirms:smallest(U4)),
+
+ U5 = rabbit_confirms:insert(1, [QName], XName, U0),
+ U6 = rabbit_confirms:insert(2, [QName], XName, U5),
+ {[{1, XName}, {2, XName}], _U} = rabbit_confirms:remove_queue(QName, U6),
+
+ ok.
+
+
+%% Utility
diff --git a/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl b/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl
new file mode 100644
index 0000000000..cae5502a0a
--- /dev/null
+++ b/deps/rabbit/test/rabbit_core_metrics_gc_SUITE.erl
@@ -0,0 +1,392 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_core_metrics_gc_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests},
+ {group, cluster_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [],
+ [ queue_metrics,
+ connection_metrics,
+ channel_metrics,
+ node_metrics,
+ gen_server2_metrics,
+ consumer_metrics
+ ]
+ },
+ {cluster_tests, [], [cluster_queue_metrics]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ AppEnv = {rabbit, [{core_metrics_gc_interval, 6000000},
+ {collect_statistics_interval, 100},
+ {collect_statistics, fine}]},
+ rabbit_ct_helpers:merge_app_env(Config, AppEnv).
+
+init_per_group(cluster_tests, Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Conf = [{rmq_nodename_suffix, cluster_tests}, {rmq_nodes_count, 2}],
+ Config1 = rabbit_ct_helpers:set_config(Config, Conf),
+ rabbit_ct_helpers:run_setup_steps(Config1, setup_steps());
+init_per_group(non_parallel_tests, Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Conf = [{rmq_nodename_suffix, non_parallel_tests}],
+ Config1 = rabbit_ct_helpers:set_config(Config, Conf),
+ rabbit_ct_helpers:run_setup_steps(Config1, setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase),
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()).
+
+setup_steps() ->
+ [ fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps().
+
+%% -------------------------------------------------------------------
+%% Single-node Testcases.
+%% -------------------------------------------------------------------
+
+queue_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_metrics">>},
+ #amqp_msg{payload = <<"hello">>}),
+ timer:sleep(150),
+
+ Q = q(<<"myqueue">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, queue_stats,
+ [Q, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, queue_stats,
+ [Q, 1, 1, 1, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_metrics, Q]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_coarse_metrics, Q]),
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_coarse_metrics]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_metrics, Q]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_coarse_metrics, Q]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+connection_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_metrics">>},
+ #amqp_msg{payload = <<"hello">>}),
+ timer:sleep(200),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ connection_created, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ connection_stats, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ connection_stats, [DeadPid, 1, 1, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_created, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_metrics, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_coarse_metrics, DeadPid]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_created, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_metrics, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_coarse_metrics, DeadPid]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [connection_created]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [connection_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [connection_coarse_metrics]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+channel_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_metrics">>},
+ #amqp_msg{payload = <<"hello">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"won't route $¢% anywhere">>},
+ #amqp_msg{payload = <<"hello">>}),
+ {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_metrics">>,
+ no_ack=true}),
+ timer:sleep(150),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ Q = q(<<"myqueue">>),
+ X = x(<<"myexchange">>),
+
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_created, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [DeadPid, infos]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [reductions, DeadPid, 1]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [exchange_stats, publish,
+ {DeadPid, X}, 1]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [queue_stats, get,
+ {DeadPid, Q}, 1]),
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ channel_stats, [queue_exchange_stats, publish,
+ {DeadPid, {Q, X}}, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_created, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_metrics, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_process_metrics, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_exchange_metrics, {DeadPid, X}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_metrics, {DeadPid, Q}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_exchange_metrics, {DeadPid, {Q, X}}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_created]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_process_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_exchange_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_queue_metrics]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [channel_queue_exchange_metrics]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_created, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_metrics, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_process_metrics, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_exchange_metrics, {DeadPid, X}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_metrics, {DeadPid, Q}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_exchange_metrics, {DeadPid, {Q, X}}]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+node_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, node_node_stats,
+ [{node(), 'deer@localhost'}, infos]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_metrics, {node(), 'deer@localhost'}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_metrics, {node(), 'deer@localhost'}]),
+
+ ok.
+
+gen_server2_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics, gen_server2_stats,
+ [DeadPid, 1]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [gen_server2_metrics, DeadPid]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [gen_server2_metrics]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [gen_server2_metrics, DeadPid]),
+
+ ok.
+
+consumer_metrics(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_metrics">>}),
+ amqp_channel:call(Ch, #'basic.consume'{queue = <<"queue_metrics">>}),
+ timer:sleep(200),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ QName = q(<<"queue_metrics">>),
+ CTag = <<"tag">>,
+ rabbit_ct_broker_helpers:rpc(Config, A, rabbit_core_metrics,
+ consumer_created, [DeadPid, CTag, true, true,
+ QName, 1, false, waiting, []]),
+ Id = {QName, DeadPid, CTag},
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, [consumer_created, Id]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_core_metrics_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_core_metrics_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list, [consumer_created]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, [consumer_created, Id]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_metrics">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+dead_pid() ->
+ spawn(fun() -> ok end).
+
+q(Name) ->
+ #resource{ virtual_host = <<"/">>,
+ kind = queue,
+ name = Name }.
+
+x(Name) ->
+ #resource{ virtual_host = <<"/">>,
+ kind = exchange,
+ name = Name }.
+
+%% -------------------------------------------------------------------
+%% Cluster Testcases.
+%% -------------------------------------------------------------------
+
+cluster_queue_metrics(Config) ->
+ VHost = <<"/">>,
+ QueueName = <<"cluster_queue_metrics">>,
+ PolicyName = <<"ha-policy-1">>,
+ PolicyPattern = <<".*">>,
+ PolicyAppliesTo = <<"queues">>,
+
+ Node0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node0),
+
+ Node0Name = rabbit_data_coercion:to_binary(Node0),
+ Definition0 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node0Name]}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ PolicyName, PolicyPattern,
+ PolicyAppliesTo, Definition0),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = QueueName}),
+ amqp_channel:call(Ch, #'basic.publish'{routing_key = QueueName},
+ #amqp_msg{payload = <<"hello">>}),
+
+ % Update policy to point to other node
+ Node1Name = rabbit_data_coercion:to_binary(Node1),
+ Definition1 = [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [Node1Name]}],
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ PolicyName, PolicyPattern,
+ PolicyAppliesTo, Definition1),
+
+ % Synchronize
+ Name = rabbit_misc:r(VHost, queue, QueueName),
+ [Q] = rabbit_ct_broker_helpers:rpc(Config, Node0, ets, lookup, [rabbit_queue, Name]),
+ QPid = amqqueue:get_pid(Q),
+ ok = rabbit_ct_broker_helpers:rpc(Config, Node0, rabbit_amqqueue, sync_mirrors, [QPid]),
+
+ % Check ETS table for data
+ wait_for(fun () ->
+ [] =:= rabbit_ct_broker_helpers:rpc(
+ Config, Node0, ets, tab2list,
+ [queue_coarse_metrics])
+ end, 60),
+
+ wait_for(fun () ->
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config, Node1, ets, tab2list,
+ [queue_coarse_metrics]),
+ case Ret of
+ [{Name, 1, 0, 1, _}] -> true;
+ _ -> false
+ end
+ end, 60),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue=QueueName}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ Config.
+
+wait_for(_Fun, 0) -> false;
+wait_for(Fun, Seconds) ->
+ case Fun() of
+ true -> ok;
+ false ->
+ timer:sleep(1000),
+ wait_for(Fun, Seconds - 1)
+ end.
diff --git a/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl b/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl
new file mode 100644
index 0000000000..92c01d2b0e
--- /dev/null
+++ b/deps/rabbit/test/rabbit_dummy_protocol_connection_info.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Dummy module to test rabbit_direct:extract_extra_auth_props
+
+-module(rabbit_dummy_protocol_connection_info).
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, Pid, _Infos) ->
+ case Pid of
+ -1 -> throw(error);
+ _ -> [{client_id, <<"DummyClientId">>}]
+ end.
diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl
new file mode 100644
index 0000000000..7b90d91bfa
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl
@@ -0,0 +1,1667 @@
+-module(rabbit_fifo_SUITE).
+
+%% rabbit_fifo unit tests suite
+
+-compile(export_all).
+
+-compile({no_auto_import, [apply/3]}).
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("src/rabbit_fifo.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+%% replicate eunit like test resultion
+all_tests() ->
+ [F || {F, _} <- ?MODULE:module_info(functions),
+ re:run(atom_to_list(F), "_test$") /= nomatch].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+-define(ASSERT_EFF(EfxPat, Effects),
+ ?ASSERT_EFF(EfxPat, true, Effects)).
+
+-define(ASSERT_EFF(EfxPat, Guard, Effects),
+ ?assert(lists:any(fun (EfxPat) when Guard -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(ASSERT_NO_EFF(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(ASSERT_NO_EFF(EfxPat, Guard, Effects),
+ ?assert(not lists:any(fun (EfxPat) when Guard -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(assertNoEffect(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+test_init(Name) ->
+ init(#{name => Name,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(Name, utf8)),
+ release_cursor_interval => 0}).
+
+enq_enq_checkout_test(_) ->
+ Cid = {<<"enq_enq_checkout_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {_State3, _, Effects} =
+ apply(meta(3),
+ rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, #{}),
+ State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, Effects),
+ ok.
+
+credit_enq_enq_checkout_settled_credit_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _, Effects} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {auto, 1, credited}, #{}), State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ Deliveries = lists:filter(fun ({send_msg, _, {delivery, _, _}, _}) -> true;
+ (_) -> false
+ end, Effects),
+ ?assertEqual(1, length(Deliveries)),
+ %% settle the delivery this should _not_ result in further messages being
+ %% delivered
+ {State4, SettledEffects} = settle(Cid, 4, 1, State3),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, SettledEffects)),
+ %% granting credit (3) should deliver the second msg if the receivers
+ %% delivery count is (1)
+ {State5, CreditEffects} = credit(Cid, 5, 1, 1, false, State4),
+ % ?debugFmt("CreditEffects ~p ~n~p", [CreditEffects, State4]),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, CreditEffects),
+ {_State6, FinalEffects} = enq(6, 3, third, State5),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, FinalEffects)),
+ ok.
+
+credit_with_drained_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ %% checkout with a single credit
+ {State1, _, _} =
+ apply(meta(1), rabbit_fifo:make_checkout(Cid, {auto, 1, credited},#{}),
+ State0),
+ ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 1,
+ delivery_count = 0}}},
+ State1),
+ {State, Result, _} =
+ apply(meta(3), rabbit_fifo:make_credit(Cid, 0, 5, true), State1),
+ ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 5}}},
+ State),
+ ?assertEqual({multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 5}}]},
+ Result),
+ ok.
+
+credit_and_drain_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ %% checkout without any initial credit (like AMQP 1.0 would)
+ {State3, _, CheckEffs} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {auto, 0, credited}, #{}),
+ State2),
+
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, CheckEffs),
+ {State4, {multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 2}}]},
+ Effects} = apply(meta(4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3),
+ ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 4}}},
+ State4),
+
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}},
+ {_, {_, second}}]}, _}, Effects),
+ {_State5, EnqEffs} = enq(5, 2, third, State4),
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, EnqEffs),
+ ok.
+
+
+
+enq_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ NumReady = 1,
+ {_State3, {dequeue, {0, {_, first}}, NumReady}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ ok.
+
+enq_enq_deq_deq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ {State3, {dequeue, {0, {_, first}}, 1}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ {_State4, {dequeue, empty}} =
+ apply(meta(4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State3),
+ ok.
+
+enq_enq_checkout_get_settled_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ % get returns a reply value
+ {_State2, {dequeue, {0, {_, first}}, _}, _Effs} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}),
+ State1),
+ ok.
+
+checkout_get_empty_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State = test_init(test),
+ {_State2, {dequeue, empty}} =
+ apply(meta(1), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State),
+ ok.
+
+untracked_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo:make_enqueue(undefined, undefined, first),
+ State0),
+ {_State2, {dequeue, {0, {_, first}}, _}, _} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+release_cursor_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _} = check(Cid, 3, 10, State2),
+ % no release cursor effect at this point
+ {State4, _} = settle(Cid, 4, 1, State3),
+ {_Final, Effects1} = settle(Cid, 5, 0, State4),
+ % empty queue forwards release cursor all the way
+ ?ASSERT_EFF({release_cursor, 5, _}, Effects1),
+ ok.
+
+checkout_enq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check(Cid, 1, test_init(test)),
+ {State2, Effects0} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, ?FUNCTION_NAME,
+ [{0, {_, first}}]}, _},
+ Effects0),
+ {State3, [_Inactive]} = enq(3, 2, second, State2),
+ {_, _Effects} = settle(Cid, 4, 0, State3),
+ % the release cursor is the smallest raft index that does not
+ % contribute to the state of the application
+ % ?ASSERT_EFF({release_cursor, 2, _}, Effects),
+ ok.
+
+out_of_order_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ % assert monitor was set up
+ ?ASSERT_EFF({monitor, _, _}, Effects2),
+ % enqueue seq num 3 and 4 before 2
+ {State3, Effects3} = enq(3, 3, third, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3),
+ {State4, Effects4} = enq(4, 4, fourth, State3),
+ % assert no further deliveries where made
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4),
+ {_State5, Effects5} = enq(5, 2, second, State4),
+ % assert two deliveries were now made
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}},
+ {_, {_, third}},
+ {_, {_, fourth}}]}, _},
+ Effects5),
+ ok.
+
+out_of_order_first_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = check_n(Cid, 5, 5, test_init(test)),
+ {_State2, Effects2} = enq(2, 10, first, State1),
+ ?ASSERT_EFF({monitor, process, _}, Effects2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+duplicate_enqueue_test(_) ->
+ Cid = {<<"duplicate_enqueue_test">>, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ {_State3, Effects3} = enq(3, 1, first, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3),
+ ok.
+
+return_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+ {State0, _} = enq(1, 1, msg, test_init(test)),
+ {State1, _} = check_auto(Cid, 2, State0),
+ {State2, _} = check_auto(Cid2, 3, State1),
+ {State3, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid, [0]), State2),
+ ?assertMatch(#{Cid := #consumer{checked_out = C}} when map_size(C) == 0,
+ State3#rabbit_fifo.consumers),
+ ?assertMatch(#{Cid2 := #consumer{checked_out = C2}} when map_size(C2) == 1,
+ State3#rabbit_fifo.consumers),
+ ok.
+
+return_dequeue_delivery_limit_test(_) ->
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, _} = enq(1, 1, msg, Init),
+
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+
+ {State1, {MsgId1, _}} = deq(2, Cid, unsettled, State0),
+ {State2, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId1]),
+ State1),
+
+ {State3, {MsgId2, _}} = deq(2, Cid2, unsettled, State2),
+ {State4, _, _} = apply(meta(4), rabbit_fifo:make_return(Cid2, [MsgId2]),
+ State3),
+ ?assertMatch(#{num_messages := 0}, rabbit_fifo:overview(State4)),
+ ok.
+
+return_non_existent_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _Inactive]} = enq(1, 1, second, test_init(test)),
+ % return non-existent
+ {_State2, _} = apply(meta(3), rabbit_fifo:make_return(Cid, [99]), State0),
+ ok.
+
+return_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {_, ok, [{send_msg, _, {delivery, _, [{_, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1),
+ ok.
+
+return_checked_out_limit_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, [_, _]} = enq(1, 1, first, Init),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1),
+ {#rabbit_fifo{ra_indexes = RaIdxs}, ok, [_ReleaseEff]} =
+ apply(meta(4), rabbit_fifo:make_return(Cid, [MsgId2]), State2),
+ ?assertEqual(0, rabbit_fifo_index:size(RaIdxs)),
+ ok.
+
+return_auto_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ % it first active then inactive as the consumer took on but cannot take
+ % any more
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active},
+ {aux, inactive}
+ ]} = check_auto(Cid, 2, State0),
+ % return should include another delivery
+ {_State2, _, Effects} = apply(meta(3), rabbit_fifo:make_return(Cid, [MsgId]), State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{_, {#{delivery_count := 1}, first}}]}, _},
+ Effects),
+ ok.
+
+cancelled_checkout_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ {State1, _} = check_auto(Cid, 2, State0),
+ % cancelled checkout should not return pending messages to queue
+ {State2, _, _} = apply(meta(3), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1),
+ ?assertEqual(1, lqueue:len(State2#rabbit_fifo.messages)),
+ ?assertEqual(0, lqueue:len(State2#rabbit_fifo.returns)),
+
+ {State3, {dequeue, empty}} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State2),
+ %% settle
+ {State4, ok, _} =
+ apply(meta(4), rabbit_fifo:make_settle(Cid, [0]), State3),
+
+ {_State, {dequeue, {_, {_, second}}, _}, _} =
+ apply(meta(5), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4),
+ ok.
+
+down_with_noproc_consumer_returns_unsettled_test(_) ->
+ Cid = {<<"down_consumer_returns_unsettled_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, second, test_init(test)),
+ {State1, [{monitor, process, Pid} | _]} = check(Cid, 2, State0),
+ {State2, _, _} = apply(meta(3), {down, Pid, noproc}, State1),
+ {_State, Effects} = check(Cid, 4, State2),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+down_with_noconnection_marks_suspect_and_node_is_monitored_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ Self = self(),
+ Node = node(Pid),
+ {State0, Effects0} = enq(1, 1, second, test_init(test)),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects0),
+ {State1, Effects1} = check_auto(Cid, 2, State0),
+ #consumer{credit = 0} = maps:get(Cid, State1#rabbit_fifo.consumers),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects1),
+ % monitor both enqueuer and consumer
+ % because we received a noconnection we now need to monitor the node
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ #consumer{credit = 1,
+ checked_out = Ch,
+ status = suspected_down} = maps:get(Cid, State2a#rabbit_fifo.consumers),
+ ?assertEqual(#{}, Ch),
+ %% validate consumer has credit
+ {State2, _, Effects2} = apply(meta(3), {down, Self, noconnection}, State2a),
+ ?ASSERT_EFF({monitor, node, _}, Effects2),
+ ?assertNoEffect({demonitor, process, _}, Effects2),
+ % when the node comes up we need to retry the process monitors for the
+ % disconnected processes
+ {State3, _, Effects3} = apply(meta(3), {nodeup, Node}, State2),
+ #consumer{status = up} = maps:get(Cid, State3#rabbit_fifo.consumers),
+ % try to re-monitor the suspect processes
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects3),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects3),
+ ok.
+
+down_with_noconnection_returns_unack_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ {State0, _} = enq(1, 1, second, test_init(test)),
+ ?assertEqual(1, lqueue:len(State0#rabbit_fifo.messages)),
+ ?assertEqual(0, lqueue:len(State0#rabbit_fifo.returns)),
+ {State1, {_, _}} = deq(2, Cid, unsettled, State0),
+ ?assertEqual(0, lqueue:len(State1#rabbit_fifo.messages)),
+ ?assertEqual(0, lqueue:len(State1#rabbit_fifo.returns)),
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ ?assertEqual(0, lqueue:len(State2a#rabbit_fifo.messages)),
+ ?assertEqual(1, lqueue:len(State2a#rabbit_fifo.returns)),
+ ?assertMatch(#consumer{checked_out = Ch,
+ status = suspected_down}
+ when map_size(Ch) == 0,
+ maps:get(Cid, State2a#rabbit_fifo.consumers)),
+ ok.
+
+down_with_noproc_enqueuer_is_cleaned_up_test(_) ->
+ State00 = test_init(test),
+ Pid = spawn(fun() -> ok end),
+ {State0, _, Effects0} = apply(meta(1), rabbit_fifo:make_enqueue(Pid, 1, first), State00),
+ ?ASSERT_EFF({monitor, process, _}, Effects0),
+ {State1, _, _} = apply(meta(3), {down, Pid, noproc}, State0),
+ % ensure there are no enqueuers
+ ?assert(0 =:= maps:size(State1#rabbit_fifo.enqueuers)),
+ ok.
+
+discarded_message_without_dead_letter_handler_is_removed_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1),
+ rabbit_fifo:make_discard(Cid, [0]), State1),
+ ?assertNoEffect({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+discarded_message_with_dead_letter_handler_emits_log_effect_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ State00 = init(#{name => test,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ dead_letter_handler =>
+ {somemod, somefun, [somearg]}}),
+ {State0, [_, _]} = enq(1, 1, first, State00),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1), rabbit_fifo:make_discard(Cid, [0]), State1),
+ % assert mod call effect with appended reason and message
+ ?ASSERT_EFF({log, _RaftIdxs, _}, Effects2),
+ ok.
+
+tick_test(_) ->
+ Cid = {<<"c">>, self()},
+ Cid2 = {<<"c2">>, self()},
+ {S0, _} = enq(1, 1, <<"fst">>, test_init(?FUNCTION_NAME)),
+ {S1, _} = enq(2, 2, <<"snd">>, S0),
+ {S2, {MsgId, _}} = deq(3, Cid, unsettled, S1),
+ {S3, {_, _}} = deq(4, Cid2, unsettled, S2),
+ {S4, _, _} = apply(meta(5), rabbit_fifo:make_return(Cid, [MsgId]), S3),
+
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{},
+ {?FUNCTION_NAME, 1, 1, 2, 1, 3, 3},
+ [_Node]
+ ]}] = rabbit_fifo:tick(1, S4),
+ ok.
+
+
+delivery_query_returns_deliveries_test(_) ->
+ Tag = atom_to_binary(?FUNCTION_NAME, utf8),
+ Cid = {Tag, self()},
+ Commands = [
+ rabbit_fifo:make_checkout(Cid, {auto, 5, simple_prefetch}, #{}),
+ rabbit_fifo:make_enqueue(self(), 1, one),
+ rabbit_fifo:make_enqueue(self(), 2, two),
+ rabbit_fifo:make_enqueue(self(), 3, tre),
+ rabbit_fifo:make_enqueue(self(), 4, for)
+ ],
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ {State, _Effects} = run_log(test_init(help), Entries),
+ % 3 deliveries are returned
+ [{0, {_, one}}] = rabbit_fifo:get_checked_out(Cid, 0, 0, State),
+ [_, _, _] = rabbit_fifo:get_checked_out(Cid, 1, 3, State),
+ ok.
+
+pending_enqueue_is_enqueued_on_down_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Pid = self(),
+ {State0, _} = enq(1, 2, first, test_init(test)),
+ {State1, _, _} = apply(meta(2), {down, Pid, noproc}, State0),
+ {_State2, {dequeue, {0, {_, first}}, 0}, _} =
+ apply(meta(3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+duplicate_delivery_test(_) ->
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ {#rabbit_fifo{ra_indexes = RaIdxs,
+ messages = Messages}, _} = enq(2, 1, first, State0),
+ ?assertEqual(1, rabbit_fifo_index:size(RaIdxs)),
+ ?assertEqual(1, lqueue:len(Messages)),
+ ok.
+
+state_enter_file_handle_leader_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ become_leader_handler => {m, f, [a]}}),
+
+ Resource = {resource, <<"/">>, queue, <<"test">>},
+ Effects = rabbit_fifo:state_enter(leader, S0),
+ ?assertEqual([
+ {mod_call, m, f, [a, the_name]},
+ {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}
+ ], Effects),
+ ok.
+
+state_enter_file_handle_other_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ Effects = rabbit_fifo:state_enter(other, S0),
+ ?assertEqual([
+ {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}
+ ],
+ Effects),
+ ok.
+
+state_enter_monitors_and_notifications_test(_) ->
+ Oth = spawn(fun () -> ok end),
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ Cid = {<<"adf">>, self()},
+ OthCid = {<<"oth">>, Oth},
+ {State1, _} = check(Cid, 2, State0),
+ {State, _} = check(OthCid, 3, State1),
+ Self = self(),
+ Effects = rabbit_fifo:state_enter(leader, State),
+
+ %% monitor all enqueuers and consumers
+ [{monitor, process, Self},
+ {monitor, process, Oth}] =
+ lists:filter(fun ({monitor, process, _}) -> true;
+ (_) -> false
+ end, Effects),
+ [{send_msg, Self, leader_change, ra_event},
+ {send_msg, Oth, leader_change, ra_event}] =
+ lists:filter(fun ({send_msg, _, leader_change, ra_event}) -> true;
+ (_) -> false
+ end, Effects),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+purge_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, {purge, 1}, _} = apply(meta(2), rabbit_fifo:make_purge(), State1),
+ {State3, _} = enq(3, 2, second, State2),
+ % get returns a reply value
+ {_State4, {dequeue, {0, {_, second}}, _}, [{monitor, _, _}]} =
+ apply(meta(4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State3),
+ ok.
+
+purge_with_checkout_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State0, _} = check_auto(Cid, 1, test_init(?FUNCTION_NAME)),
+ {State1, _} = enq(2, 1, <<"first">>, State0),
+ {State2, _} = enq(3, 2, <<"second">>, State1),
+ %% assert message bytes are non zero
+ ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0),
+ ?assert(State2#rabbit_fifo.msg_bytes_enqueue > 0),
+ {State3, {purge, 1}, _} = apply(meta(2), rabbit_fifo:make_purge(), State2),
+ ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0),
+ ?assertEqual(0, State3#rabbit_fifo.msg_bytes_enqueue),
+ ?assertEqual(1, rabbit_fifo_index:size(State3#rabbit_fifo.ra_indexes)),
+ #consumer{checked_out = Checked} = maps:get(Cid, State3#rabbit_fifo.consumers),
+ ?assertEqual(1, maps:size(Checked)),
+ ok.
+
+down_noproc_returns_checked_out_in_order_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ %% enqueue 100
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, 100)),
+ ?assertEqual(100, lqueue:len(S1#rabbit_fifo.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers),
+ ?assertEqual(100, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noproc}, S2),
+ Returns = lqueue:to_list(S#rabbit_fifo.returns),
+ ?assertEqual(100, length(Returns)),
+ ?assertEqual(0, maps:size(S#rabbit_fifo.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+down_noconnection_returns_checked_out_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ NumMsgs = 20,
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, NumMsgs)),
+ ?assertEqual(NumMsgs, lqueue:len(S1#rabbit_fifo.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers),
+ ?assertEqual(NumMsgs, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noconnection}, S2),
+ Returns = lqueue:to_list(S#rabbit_fifo.returns),
+ ?assertEqual(NumMsgs, length(Returns)),
+ ?assertMatch(#consumer{checked_out = Ch}
+ when map_size(Ch) == 0,
+ maps:get(Cid, S#rabbit_fifo.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+single_active_consumer_basic_get_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#rabbit_fifo.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)),
+ {State1, _} = enq(1, 1, first, State0),
+ {_State, {error, {unsupported, single_active_consumer}}} =
+ apply(meta(2), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State1),
+ ok.
+
+single_active_consumer_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#rabbit_fifo.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ meta(1),
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ C3 = {<<"ctag3">>, self()},
+ C4 = {<<"ctag4">>, self()},
+
+ % the first registered consumer is the active one, the others are waiting
+ ?assertEqual(1, map_size(State1#rabbit_fifo.consumers)),
+ ?assertMatch(#{C1 := _}, State1#rabbit_fifo.consumers),
+ ?assertEqual(3, length(State1#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State1#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C3, 1, State1#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State1#rabbit_fifo.waiting_consumers)),
+
+ % cancelling a waiting consumer
+ {State2, _, Effects1} = apply(meta(2),
+ make_checkout(C3, cancel, #{}),
+ State1),
+ % the active consumer should still be in place
+ ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)),
+ ?assertMatch(#{C1 := _}, State2#rabbit_fifo.consumers),
+ % the cancelled consumer has been removed from waiting consumers
+ ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State2#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State2#rabbit_fifo.waiting_consumers)),
+ % there are some effects to unregister the consumer
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects1),
+
+ % cancelling the active consumer
+ {State3, _, Effects2} = apply(meta(3),
+ make_checkout(C1, cancel, #{}),
+ State2),
+ % the second registered consumer is now the active one
+ ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)),
+ ?assertMatch(#{C2 := _}, State3#rabbit_fifo.consumers),
+ % the new active consumer is no longer in the waiting list
+ ?assertEqual(1, length(State3#rabbit_fifo.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1,
+ State3#rabbit_fifo.waiting_consumers)),
+ %% should have a cancel consumer handler mod_call effect and
+ %% an active new consumer effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % cancelling the active consumer
+ {State4, _, Effects3} = apply(meta(4),
+ make_checkout(C2, cancel, #{}),
+ State3),
+ % the last waiting consumer became the active one
+ ?assertEqual(1, map_size(State4#rabbit_fifo.consumers)),
+ ?assertMatch(#{C4 := _}, State4#rabbit_fifo.consumers),
+ % the waiting consumer list is now empty
+ ?assertEqual(0, length(State4#rabbit_fifo.waiting_consumers)),
+ % there are some effects to unregister the consumer and
+ % to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects3),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects3),
+
+ % cancelling the last consumer
+ {State5, _, Effects4} = apply(meta(5),
+ make_checkout(C4, cancel, #{}),
+ State4),
+ % no active consumer anymore
+ ?assertEqual(0, map_size(State5#rabbit_fifo.consumers)),
+ % still nothing in the waiting list
+ ?assertEqual(0, length(State5#rabbit_fifo.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, _}, Effects4),
+
+ ok.
+
+single_active_consumer_cancel_consumer_when_channel_is_down_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ [C1, C2, C3, C4] = Consumers =
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}],
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, Consumers),
+
+ % the channel of the active consumer goes down
+ {State2, _, Effects} = apply(meta(2), {down, Pid1, noproc}, State1),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)),
+ % there are still waiting consumers
+ ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)),
+ % effects to unregister the consumer and
+ % to update the new active one (metrics) are there
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects),
+
+ % the channel of the active consumer and a waiting consumer goes down
+ {State3, _, Effects2} = apply(meta(3), {down, Pid2, noproc}, State2),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)),
+ % no more waiting consumer
+ ?assertEqual(0, length(State3#rabbit_fifo.waiting_consumers)),
+ % effects to cancel both consumers of this channel + effect to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % the last channel goes down
+ {State4, _, Effects3} = apply(meta(4), {down, Pid3, doesnotmatter}, State3),
+ % no more consumers
+ ?assertEqual(0, map_size(State4#rabbit_fifo.consumers)),
+ ?assertEqual(0, length(State4#rabbit_fifo.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C4, Effects3),
+
+ ok.
+
+single_active_returns_messages_on_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1],
+ ConsumerIds = [{_, DownPid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+ {State2, _} = enq(4, 1, msg1, State1),
+ % simulate node goes down
+ {State3, _, _} = apply(meta(5), {down, DownPid, noconnection}, State2),
+ %% assert the consumer is up
+ ?assertMatch([_], lqueue:to_list(State3#rabbit_fifo.returns)),
+ ?assertMatch([{_, #consumer{checked_out = Checked}}]
+ when map_size(Checked) == 0,
+ State3#rabbit_fifo.waiting_consumers),
+
+ ok.
+
+single_active_consumer_replaces_consumer_when_down_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2, node()],
+ ConsumerIds = [C1 = {_, DownPid}, C2, _C3] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1a = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}},
+ State1a#rabbit_fifo.consumers),
+
+ {State1, _} = enq(10, 1, msg, State1a),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, DownPid, noconnection}, State1),
+
+ %% assert a new consumer is in place and it is up
+ ?assertMatch([{C2, #consumer{status = up,
+ checked_out = Ch}}]
+ when map_size(Ch) == 1,
+ maps:to_list(State2#rabbit_fifo.consumers)),
+
+ %% the disconnected consumer has been returned to waiting
+ ?assert(lists:any(fun ({C,_}) -> C =:= C1 end,
+ State2#rabbit_fifo.waiting_consumers)),
+ ?assertEqual(2, length(State2#rabbit_fifo.waiting_consumers)),
+
+ % simulate node comes back up
+ {State3, _, _} = apply(#{index => 2}, {nodeup, node(DownPid)}, State2),
+
+ %% the consumer is still active and the same as before
+ ?assertMatch([{C2, #consumer{status = up}}],
+ maps:to_list(State3#rabbit_fifo.consumers)),
+ % the waiting consumers should be un-suspected
+ ?assertEqual(2, length(State3#rabbit_fifo.waiting_consumers)),
+ lists:foreach(fun({_, #consumer{status = Status}}) ->
+ ?assert(Status /= suspected_down)
+ end, State3#rabbit_fifo.waiting_consumers),
+ ok.
+
+single_active_consumer_all_disconnected_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2],
+ ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}}, State1#rabbit_fifo.consumers),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, C1Pid, noconnection}, State1),
+ %% assert the consumer fails over to the consumer on n2
+ ?assertMatch(#{C2 := #consumer{status = up}}, State2#rabbit_fifo.consumers),
+ {State3, _, _} = apply(meta(6), {down, C2Pid, noconnection}, State2),
+ %% assert these no active consumer after both nodes are maked as down
+ ?assertMatch([], maps:to_list(State3#rabbit_fifo.consumers)),
+ %% n2 comes back
+ {State4, _, _} = apply(meta(7), {nodeup, node(C2Pid)}, State3),
+ %% ensure n2 is the active consumer as this node as been registered
+ %% as up again
+ ?assertMatch([{{<<"ctag_n2">>, _}, #consumer{status = up,
+ credit = 1}}],
+ maps:to_list(State4#rabbit_fifo.consumers)),
+ ok.
+
+single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource =>
+ rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo:state_enter(leader, State1),
+ %% 2 effects for each consumer process (channel process), 1 effect for the node,
+ %% 1 effect for file handle reservation
+ ?assertEqual(2 * 3 + 1 + 1, length(Effects)).
+
+single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) ->
+ Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => Resource,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo:state_enter(eol, State1),
+ %% 1 effect for each consumer process (channel process),
+ %% 1 effect for file handle reservation
+ ?assertEqual(4, length(Effects)).
+
+query_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ Consumers0 = State1#rabbit_fifo.consumers,
+ Consumer = maps:get({<<"ctag2">>, self()}, Consumers0),
+ Consumers1 = maps:put({<<"ctag2">>, self()},
+ Consumer#consumer{status = suspected_down}, Consumers0),
+ State2 = State1#rabbit_fifo{consumers = Consumers1},
+
+ ?assertEqual(3, rabbit_fifo:query_consumer_count(State2)),
+ Consumers2 = rabbit_fifo:query_consumers(State2),
+ ?assertEqual(4, maps:size(Consumers2)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag2">> ->
+ ?assertNot(Active),
+ ?assertEqual(suspected_down, ActivityStatus);
+ _ ->
+ ?assert(Active),
+ ?assertEqual(up, ActivityStatus)
+ end
+ end, [], Consumers2).
+
+query_consumers_when_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+
+ ?assertEqual(4, rabbit_fifo:query_consumer_count(State1)),
+ Consumers = rabbit_fifo:query_consumers(State1),
+ ?assertEqual(4, maps:size(Consumers)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag1">> ->
+ ?assert(Active),
+ ?assertEqual(single_active, ActivityStatus);
+ _ ->
+ ?assertNot(Active),
+ ?assertEqual(waiting, ActivityStatus)
+ end
+ end, [], Consumers).
+
+active_flag_updated_when_consumer_suspected_unsuspected_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} =
+ apply(
+ #{index => 1},
+ rabbit_fifo:make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(#{index => 3,
+ system_time => 1500}, {down, Pid1, noconnection}, State1),
+ % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node
+ ?assertEqual(4 + 1, length(Effects2)),
+
+ {_, _, Effects3} = apply(#{index => 4}, {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID
+ ?assertEqual(4 + 4, length(Effects3)).
+
+active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(meta(2), {down, Pid1, noconnection}, State1),
+ % one monitor and one consumer status update (deactivated)
+ ?assertEqual(3, length(Effects2)),
+
+ {_, _, Effects3} = apply(meta(3), {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to monitor the consumer PID
+ ?assertEqual(5, length(Effects3)).
+
+single_active_cancelled_with_unacked_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 1, simple_prefetch},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% enqueue 2 messages
+ {State2, _Effects2} = enq(3, 1, msg1, State1),
+ {State3, _Effects3} = enq(4, 2, msg2, State2),
+ %% one should be checked ou to C1
+ %% cancel C1
+ {State4, _, _} = apply(meta(5),
+ make_checkout(C1, cancel, #{}),
+ State3),
+ %% C2 should be the active consumer
+ ?assertMatch(#{C2 := #consumer{status = up,
+ checked_out = #{0 := _}}},
+ State4#rabbit_fifo.consumers),
+ %% C1 should be a cancelled consumer
+ ?assertMatch(#{C1 := #consumer{status = cancelled,
+ lifetime = once,
+ checked_out = #{0 := _}}},
+ State4#rabbit_fifo.consumers),
+ ?assertMatch([], State4#rabbit_fifo.waiting_consumers),
+
+ %% Ack both messages
+ {State5, _Effects5} = settle(C1, 1, 0, State4),
+ %% C1 should now be cancelled
+ {State6, _Effects6} = settle(C2, 2, 0, State5),
+
+ %% C2 should remain
+ ?assertMatch(#{C2 := #consumer{status = up}},
+ State6#rabbit_fifo.consumers),
+ %% C1 should be gone
+ ?assertNotMatch(#{C1 := _},
+ State6#rabbit_fifo.consumers),
+ ?assertMatch([], State6#rabbit_fifo.waiting_consumers),
+ ok.
+
+single_active_with_credited_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 0, credited},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% add some credit
+ C1Cred = rabbit_fifo:make_credit(C1, 5, 0, false),
+ {State2, _, _Effects2} = apply(meta(3), C1Cred, State1),
+ C2Cred = rabbit_fifo:make_credit(C2, 4, 0, false),
+ {State3, _} = apply(meta(4), C2Cred, State2),
+ %% both consumers should have credit
+ ?assertMatch(#{C1 := #consumer{credit = 5}},
+ State3#rabbit_fifo.consumers),
+ ?assertMatch([{C2, #consumer{credit = 4}}],
+ State3#rabbit_fifo.waiting_consumers),
+ ok.
+
+
+register_enqueuer_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ max_length => 2,
+ overflow_strategy => reject_publish}),
+ %% simply registering should be ok when we're below limit
+ Pid1 = test_util:fake_pid(node()),
+ {State1, ok, [_]} = apply(meta(1), make_register_enqueuer(Pid1), State0),
+
+ {State2, ok, _} = apply(meta(2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1),
+ %% register another enqueuer shoudl be ok
+ Pid2 = test_util:fake_pid(node()),
+ {State3, ok, [_]} = apply(meta(3), make_register_enqueuer(Pid2), State2),
+
+ {State4, ok, _} = apply(meta(4), rabbit_fifo:make_enqueue(Pid1, 2, two), State3),
+ {State5, ok, Efx} = apply(meta(5), rabbit_fifo:make_enqueue(Pid1, 3, three), State4),
+ % ct:pal("Efx ~p", [Efx]),
+ %% validate all registered enqueuers are notified of overflow state
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx),
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid2, Efx),
+
+ %% this time, registry should return reject_publish
+ {State6, reject_publish, [_]} = apply(meta(6), make_register_enqueuer(
+ test_util:fake_pid(node())), State5),
+ ?assertMatch(#{num_enqueuers := 3}, rabbit_fifo:overview(State6)),
+
+
+ %% remove two messages this should make the queue fall below the 0.8 limit
+ {State7, {dequeue, _, _}, _Efx7} =
+ apply(meta(7),
+ rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State6),
+ ct:pal("Efx7 ~p", [_Efx7]),
+ {State8, {dequeue, _, _}, Efx8} =
+ apply(meta(8),
+ rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State7),
+ ct:pal("Efx8 ~p", [Efx8]),
+ %% validate all registered enqueuers are notified of overflow state
+ ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx8),
+ ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid2, Efx8),
+ {_State9, {dequeue, _, _}, Efx9} =
+ apply(meta(9),
+ rabbit_fifo:make_checkout(<<"a">>, {dequeue, settled}, #{}), State8),
+ ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid1, Efx9),
+ ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid2, Efx9),
+ ok.
+
+reject_publish_purge_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ max_length => 2,
+ overflow_strategy => reject_publish}),
+ %% simply registering should be ok when we're below limit
+ Pid1 = test_util:fake_pid(node()),
+ {State1, ok, [_]} = apply(meta(1), make_register_enqueuer(Pid1), State0),
+ {State2, ok, _} = apply(meta(2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1),
+ {State3, ok, _} = apply(meta(3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2),
+ {State4, ok, Efx} = apply(meta(4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3),
+ % ct:pal("Efx ~p", [Efx]),
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx),
+ {_State5, {purge, 3}, Efx1} = apply(meta(5), rabbit_fifo:make_purge(), State4),
+ ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx1),
+ ok.
+
+reject_publish_applied_after_limit_test(_) ->
+ InitConf = #{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8))
+ },
+ State0 = init(InitConf),
+ %% simply registering should be ok when we're below limit
+ Pid1 = test_util:fake_pid(node()),
+ {State1, ok, [_]} = apply(meta(1), make_register_enqueuer(Pid1), State0),
+ {State2, ok, _} = apply(meta(2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1),
+ {State3, ok, _} = apply(meta(3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2),
+ {State4, ok, Efx} = apply(meta(4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3),
+ % ct:pal("Efx ~p", [Efx]),
+ ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx),
+ %% apply new config
+ Conf = #{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ max_length => 2,
+ overflow_strategy => reject_publish
+ },
+ {State5, ok, Efx1} = apply(meta(5), rabbit_fifo:make_update_config(Conf), State4),
+ ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx1),
+ Pid2 = test_util:fake_pid(node()),
+ {_State6, reject_publish, _} = apply(meta(1), make_register_enqueuer(Pid2), State5),
+ ok.
+
+purge_nodes_test(_) ->
+ Node = purged@node,
+ ThisNode = node(),
+ EnqPid = test_util:fake_pid(Node),
+ EnqPid2 = test_util:fake_pid(node()),
+ ConPid = test_util:fake_pid(Node),
+ Cid = {<<"tag">>, ConPid},
+ % WaitingPid = test_util:fake_pid(Node),
+
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ single_active_consumer_on => false}),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo:make_enqueue(EnqPid, 1, msg1),
+ State0),
+ {State2, _, _} = apply(meta(2),
+ rabbit_fifo:make_enqueue(EnqPid2, 1, msg2),
+ State1),
+ {State3, _} = check(Cid, 3, 1000, State2),
+ {State4, _, _} = apply(meta(4),
+ {down, EnqPid, noconnection},
+ State3),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode, Node]
+ ]}] , rabbit_fifo:tick(1, State4)),
+ %% assert there are both enqueuers and consumers
+ {State, _, _} = apply(meta(5),
+ rabbit_fifo:make_purge_nodes([Node]),
+ State4),
+
+ %% assert there are no enqueuers nor consumers
+ ?assertMatch(#rabbit_fifo{enqueuers = Enqs} when map_size(Enqs) == 1,
+ State),
+
+ ?assertMatch(#rabbit_fifo{consumers = Cons} when map_size(Cons) == 0,
+ State),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode]
+ ]}] , rabbit_fifo:tick(1, State)),
+ ok.
+
+meta(Idx) ->
+ meta(Idx, 0).
+
+meta(Idx, Timestamp) ->
+ #{index => Idx,
+ term => 1,
+ system_time => Timestamp,
+ from => {make_ref(), self()}}.
+
+enq(Idx, MsgSeq, Msg, State) ->
+ strip_reply(
+ apply(meta(Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)).
+
+deq(Idx, Cid, Settlement, State0) ->
+ {State, {dequeue, {MsgId, Msg}, _}, _} =
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {dequeue, Settlement}, #{}),
+ State0),
+ {State, {MsgId, Msg}}.
+
+check_n(Cid, Idx, N, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {auto, N, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {once, 1, simple_prefetch}, #{}),
+ State)).
+
+check_auto(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, Num, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo:make_checkout(Cid, {auto, Num, simple_prefetch}, #{}),
+ State)).
+
+settle(Cid, Idx, MsgId, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo:make_settle(Cid, [MsgId]), State)).
+
+credit(Cid, Idx, Credit, DelCnt, Drain, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo:make_credit(Cid, Credit, DelCnt, Drain),
+ State)).
+
+strip_reply({State, _, Effects}) ->
+ {State, Effects}.
+
+run_log(InitState, Entries) ->
+ lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) ->
+ case apply(meta(Idx), E, Acc0) of
+ {Acc, _, Efx} when is_list(Efx) ->
+ {Acc, Efx0 ++ Efx};
+ {Acc, _, Efx} ->
+ {Acc, Efx0 ++ [Efx]};
+ {Acc, _} ->
+ {Acc, Efx0}
+ end
+ end, {InitState, []}, Entries).
+
+
+%% AUX Tests
+
+aux_test(_) ->
+ _ = ra_machine_ets:start_link(),
+ Aux0 = init_aux(aux_test),
+ MacState = init(#{name => aux_test,
+ queue_resource =>
+ rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ ok = meck:new(ra_log, []),
+ Log = mock_log,
+ meck:expect(ra_log, last_index_term, fun (_) -> {0, 0} end),
+ {no_reply, Aux, mock_log} = handle_aux(leader, cast, active, Aux0,
+ Log, MacState),
+ {no_reply, _Aux, mock_log} = handle_aux(leader, cast, tick, Aux,
+ Log, MacState),
+ [X] = ets:lookup(rabbit_fifo_usage, aux_test),
+ meck:unload(),
+ ?assert(X > 0.0),
+ ok.
+
+
+%% machine version conversion test
+
+machine_version_test(_) ->
+ V0 = rabbit_fifo_v0,
+ S0 = V0:init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ Idx = 1,
+ {#rabbit_fifo{}, ok, []} = apply(meta(Idx), {machine_version, 0, 1}, S0),
+
+ Cid = {atom_to_binary(?FUNCTION_NAME, utf8), self()},
+ Entries = [
+ {1, rabbit_fifo_v0:make_enqueue(self(), 1, banana)},
+ {2, rabbit_fifo_v0:make_enqueue(self(), 2, apple)},
+ {3, rabbit_fifo_v0:make_checkout(Cid, {auto, 1, unsettled}, #{})}
+ ],
+ {S1, _Effects} = rabbit_fifo_v0_SUITE:run_log(S0, Entries),
+ Self = self(),
+ {#rabbit_fifo{enqueuers = #{Self := #enqueuer{}},
+ consumers = #{Cid := #consumer{priority = 0}},
+ service_queue = S,
+ messages = Msgs}, ok, []} = apply(meta(Idx),
+ {machine_version, 0, 1}, S1),
+ %% validate message conversion to lqueue
+ ?assertEqual(1, lqueue:len(Msgs)),
+ ?assert(priority_queue:is_queue(S)),
+ ok.
+
+queue_ttl_test(_) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ Conf = #{name => ?FUNCTION_NAME,
+ queue_resource => QName,
+ created => 1000,
+ expires => 1000},
+ S0 = rabbit_fifo:init(Conf),
+ Now = 1500,
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S0),
+ %% this should delete the queue
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 1000, S0),
+ %% adding a consumer should not ever trigger deletion
+ Cid = {<<"cid1">>, self()},
+ {S1, _} = check_auto(Cid, 1, S0),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S1),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S1),
+ %% cancelling the consumer should then
+ {S2, _, _} = apply(meta(2, Now),
+ rabbit_fifo:make_checkout(Cid, cancel, #{}), S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2),
+
+ %% Same for downs
+ {S2D, _, _} = apply(meta(2, Now),
+ {down, self(), noconnection}, S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2D),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2D),
+
+ %% dequeue should set last applied
+ {S1Deq, {dequeue, empty}} =
+ apply(meta(2, Now),
+ rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ S0),
+
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S1Deq),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S1Deq),
+ %% Enqueue message,
+ {E1, _, _} = apply(meta(2, Now),
+ rabbit_fifo:make_enqueue(self(), 1, msg1), S0),
+ Deq = {<<"deq1">>, self()},
+ {E2, {dequeue, {MsgId, _}, _}, _} =
+ apply(meta(3, Now),
+ rabbit_fifo:make_checkout(Deq, {dequeue, unsettled}, #{}),
+ E1),
+ {E3, _, _} = apply(meta(3, Now + 1000),
+ rabbit_fifo:make_settle(Deq, [MsgId]), E2),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1500, E3),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 3000, E3),
+
+ ok.
+
+queue_ttl_with_single_active_consumer_test(_) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ Conf = #{name => ?FUNCTION_NAME,
+ queue_resource => QName,
+ created => 1000,
+ expires => 1000,
+ single_active_consumer_on => true},
+ S0 = rabbit_fifo:init(Conf),
+ Now = 1500,
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S0),
+ %% this should delete the queue
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 1000, S0),
+ %% adding a consumer should not ever trigger deletion
+ Cid = {<<"cid1">>, self()},
+ {S1, _} = check_auto(Cid, 1, S0),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now, S1),
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S1),
+ %% cancelling the consumer should then
+ {S2, _, _} = apply(meta(2, Now),
+ rabbit_fifo:make_checkout(Cid, cancel, #{}), S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2),
+
+ %% Same for downs
+ {S2D, _, _} = apply(meta(2, Now),
+ {down, self(), noconnection}, S1),
+ %% last_active should have been reset when consumer was cancelled
+ %% last_active = 2500
+ [{mod_call, _, handle_tick, _}] = rabbit_fifo:tick(Now + 1000, S2D),
+ %% but now it should be deleted
+ [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]
+ = rabbit_fifo:tick(Now + 2500, S2D),
+
+ ok.
+
+query_peek_test(_) ->
+ State0 = test_init(test),
+ ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(1, State0)),
+ {State1, _} = enq(1, 1, first, State0),
+ {State2, _} = enq(2, 2, second, State1),
+ ?assertMatch({ok, {_, {_, first}}}, rabbit_fifo:query_peek(1, State1)),
+ ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(2, State1)),
+ ?assertMatch({ok, {_, {_, first}}}, rabbit_fifo:query_peek(1, State2)),
+ ?assertMatch({ok, {_, {_, second}}}, rabbit_fifo:query_peek(2, State2)),
+ ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(3, State2)),
+ ok.
+
+checkout_priority_test(_) ->
+ Cid = {<<"checkout_priority_test">>, self()},
+ Pid = spawn(fun () -> ok end),
+ Cid2 = {<<"checkout_priority_test2">>, Pid},
+ Args = [{<<"x-priority">>, long, 1}],
+ {S1, _, _} =
+ apply(meta(3),
+ rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch},
+ #{args => Args}),
+ test_init(test)),
+ {S2, _, _} =
+ apply(meta(3),
+ rabbit_fifo:make_checkout(Cid2, {once, 2, simple_prefetch},
+ #{args => []}),
+ S1),
+ {S3, E3} = enq(1, 1, first, S2),
+ ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E3),
+ {S4, E4} = enq(2, 2, second, S3),
+ ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E4),
+ {_S5, E5} = enq(3, 3, third, S4),
+ ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == Pid, E5),
+ ok.
+
+%% Utility
+
+init(Conf) -> rabbit_fifo:init(Conf).
+make_register_enqueuer(Pid) -> rabbit_fifo:make_register_enqueuer(Pid).
+apply(Meta, Entry, State) -> rabbit_fifo:apply(Meta, Entry, State).
+init_aux(Conf) -> rabbit_fifo:init_aux(Conf).
+handle_aux(S, T, C, A, L, M) -> rabbit_fifo:handle_aux(S, T, C, A, L, M).
+make_checkout(C, S, M) -> rabbit_fifo:make_checkout(C, S, M).
diff --git a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl
new file mode 100644
index 0000000000..37f5436dbf
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl
@@ -0,0 +1,661 @@
+-module(rabbit_fifo_int_SUITE).
+
+%% rabbit_fifo and rabbit_fifo_client integration suite
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(RA_EVENT_TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+all_tests() ->
+ [
+ basics,
+ return,
+ rabbit_fifo_returns_correlation,
+ resends_lost_command,
+ returns_after_down,
+ resends_after_lost_applied,
+ handles_reject_notification,
+ two_quick_enqueues,
+ detects_lost_delivery,
+ dequeue,
+ discard,
+ cancel_checkout,
+ credit,
+ untracked_enqueue,
+ flow,
+ test_queries,
+ duplicate_delivery,
+ usage
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_group(_, Config) ->
+ PrivDir = ?config(priv_dir, Config),
+ _ = application:load(ra),
+ ok = application:set_env(ra, data_dir, PrivDir),
+ application:ensure_all_started(ra),
+ application:ensure_all_started(lg),
+ Config.
+
+end_per_group(_, Config) ->
+ _ = application:stop(ra),
+ Config.
+
+init_per_testcase(TestCase, Config) ->
+ meck:new(rabbit_quorum_queue, [passthrough]),
+ meck:expect(rabbit_quorum_queue, handle_tick, fun (_, _, _) -> ok end),
+ meck:expect(rabbit_quorum_queue, file_handle_leader_reservation, fun (_) -> ok end),
+ meck:expect(rabbit_quorum_queue, file_handle_other_reservation, fun () -> ok end),
+ meck:expect(rabbit_quorum_queue, cancel_consumer_handler,
+ fun (_, _) -> ok end),
+ ra_server_sup_sup:remove_all(),
+ ServerName2 = list_to_atom(atom_to_list(TestCase) ++ "2"),
+ ServerName3 = list_to_atom(atom_to_list(TestCase) ++ "3"),
+ ClusterName = rabbit_misc:r("/", queue, atom_to_binary(TestCase, utf8)),
+ [
+ {cluster_name, ClusterName},
+ {uid, atom_to_binary(TestCase, utf8)},
+ {node_id, {TestCase, node()}},
+ {uid2, atom_to_binary(ServerName2, utf8)},
+ {node_id2, {ServerName2, node()}},
+ {uid3, atom_to_binary(ServerName3, utf8)},
+ {node_id3, {ServerName3, node()}}
+ | Config].
+
+end_per_testcase(_, Config) ->
+ meck:unload(),
+ Config.
+
+basics(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ UId = ?config(uid, Config),
+ CustomerTag = UId,
+ ok = start_cluster(ClusterName, [ServerId]),
+ FState0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, FState1} = rabbit_fifo_client:checkout(CustomerTag, 1, simple_prefetch,
+ #{}, FState0),
+
+ ra_log_wal:force_roll_over(ra_log_wal),
+ % create segment the segment will trigger a snapshot
+ timer:sleep(1000),
+
+ {ok, FState2} = rabbit_fifo_client:enqueue(one, FState1),
+ % process ra events
+ FState3 = process_ra_event(FState2, ?RA_EVENT_TIMEOUT),
+
+ FState5 = receive
+ {ra_event, From, Evt} ->
+ case rabbit_fifo_client:handle_ra_event(From, Evt, FState3) of
+ {ok, FState4,
+ [{deliver, C, true,
+ [{_Qname, _QRef, MsgId, _SomBool, _Msg}]}]} ->
+ {S, _A} = rabbit_fifo_client:settle(C, [MsgId], FState4),
+ S
+ end
+ after 5000 ->
+ exit(await_msg_timeout)
+ end,
+
+ % process settle applied notification
+ FState5b = process_ra_event(FState5, ?RA_EVENT_TIMEOUT),
+ _ = ra:stop_server(ServerId),
+ _ = ra:restart_server(ServerId),
+
+ %% wait for leader change to notice server is up again
+ receive
+ {ra_event, _, {machine, leader_change}} -> ok
+ after 5000 ->
+ exit(leader_change_timeout)
+ end,
+
+ {ok, FState6} = rabbit_fifo_client:enqueue(two, FState5b),
+ % process applied event
+ FState6b = process_ra_event(FState6, ?RA_EVENT_TIMEOUT),
+
+ receive
+ {ra_event, Frm, E} ->
+ case rabbit_fifo_client:handle_ra_event(Frm, E, FState6b) of
+ {ok, FState7, [{deliver, Ctag, true,
+ [{_, _, Mid, _, two}]}]} ->
+ {_, _} = rabbit_fifo_client:return(Ctag, [Mid], FState7),
+ ok
+ end
+ after 2000 ->
+ exit(await_msg_timeout)
+ end,
+ ra:stop_server(ServerId),
+ ok.
+
+return(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F00 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F0} = rabbit_fifo_client:enqueue(1, msg1, F00),
+ {ok, F1} = rabbit_fifo_client:enqueue(2, msg2, F0),
+ {_, _, F2} = process_ra_events(receive_ra_events(2, 0), F1),
+ {ok, _, {_, _, MsgId, _, _}, F} = rabbit_fifo_client:dequeue(<<"tag">>, unsettled, F2),
+ _F2 = rabbit_fifo_client:return(<<"tag">>, [MsgId], F),
+
+ ra:stop_server(ServerId),
+ ok.
+
+rabbit_fifo_returns_correlation(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(corr1, msg1, F0),
+ receive
+ {ra_event, Frm, E} ->
+ case rabbit_fifo_client:handle_ra_event(Frm, E, F1) of
+ {ok, _F2, [{settled, _, _}]} ->
+ ok;
+ Del ->
+ exit({unexpected, Del})
+ end
+ after 2000 ->
+ exit(await_msg_timeout)
+ end,
+ ra:stop_server(ServerId),
+ ok.
+
+duplicate_delivery(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(corr1, msg1, F1),
+ Fun = fun Loop(S0) ->
+ receive
+ {ra_event, Frm, E} = Evt ->
+ case rabbit_fifo_client:handle_ra_event(Frm, E, S0) of
+ {ok, S1, [{settled, _, _}]} ->
+ Loop(S1);
+ {ok, S1, _} ->
+ %% repeat event delivery
+ self() ! Evt,
+ %% check that then next received delivery doesn't
+ %% repeat or crash
+ receive
+ {ra_event, F, E1} ->
+ case rabbit_fifo_client:handle_ra_event(
+ F, E1, S1) of
+ {ok, S2, _} ->
+ S2
+ end
+ end
+ end
+ after 2000 ->
+ exit(await_msg_timeout)
+ end
+ end,
+ Fun(F2),
+ ra:stop_server(ServerId),
+ ok.
+
+usage(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(corr1, msg1, F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(corr2, msg2, F2),
+ {_, _, _} = process_ra_events(receive_ra_events(2, 2), F3),
+ % force tick and usage stats emission
+ ServerId ! tick_timeout,
+ timer:sleep(50),
+ Use = rabbit_fifo:usage(element(1, ServerId)),
+ ra:stop_server(ServerId),
+ ?assert(Use > 0.0),
+ ok.
+
+resends_lost_command(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ ok = meck:new(ra, [passthrough]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0),
+ % lose the enqueue
+ meck:expect(ra, pipeline_command, fun (_, _, _) -> ok end),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg2, F1),
+ meck:unload(ra),
+ {ok, F3} = rabbit_fifo_client:enqueue(msg3, F2),
+ {_, _, F4} = process_ra_events(receive_ra_events(2, 0), F3),
+ {ok, _, {_, _, _, _, msg1}, F5} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F4),
+ {ok, _, {_, _, _, _, msg2}, F6} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F5),
+ {ok, _, {_, _, _, _, msg3}, _F7} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F6),
+ ra:stop_server(ServerId),
+ ok.
+
+two_quick_enqueues(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ F1 = element(2, rabbit_fifo_client:enqueue(msg1, F0)),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg2, F1),
+ _ = process_ra_events(receive_ra_events(2, 0), F2),
+ ra:stop_server(ServerId),
+ ok.
+
+detects_lost_delivery(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F000 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F00} = rabbit_fifo_client:enqueue(msg1, F000),
+ {_, _, F0} = process_ra_events(receive_ra_events(1, 0), F00),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg2, F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(msg3, F2),
+ % lose first delivery
+ receive
+ {ra_event, _, {machine, {delivery, _, [{_, {_, msg1}}]}}} ->
+ ok
+ after 5000 ->
+ exit(await_delivery_timeout)
+ end,
+
+ % assert three deliveries were received
+ {[_, _, _], _, _} = process_ra_events(receive_ra_events(2, 2), F3),
+ ra:stop_server(ServerId),
+ ok.
+
+returns_after_down(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0),
+ {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F1),
+ % start a customer in a separate processes
+ % that exits after checkout
+ Self = self(),
+ _Pid = spawn(fun () ->
+ F = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 10,
+ simple_prefetch,
+ #{}, F),
+ Self ! checkout_done
+ end),
+ receive checkout_done -> ok after 1000 -> exit(checkout_done_timeout) end,
+ timer:sleep(1000),
+ % message should be available for dequeue
+ {ok, _, {_, _, _, _, msg1}, _} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F2),
+ ra:stop_server(ServerId),
+ ok.
+
+resends_after_lost_applied(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:enqueue(msg1, F0),
+ {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(msg2, F2),
+ % lose an applied event
+ receive
+ {ra_event, _, {applied, _}} ->
+ ok
+ after 500 ->
+ exit(await_ra_event_timeout)
+ end,
+ % send another message
+ {ok, F4} = rabbit_fifo_client:enqueue(msg3, F3),
+ {_, _, F5} = process_ra_events(receive_ra_events(1, 0), F4),
+ {ok, _, {_, _, _, _, msg1}, F6} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F5),
+ {ok, _, {_, _, _, _, msg2}, F7} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F6),
+ {ok, _, {_, _, _, _, msg3}, _F8} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F7),
+ ra:stop_server(ServerId),
+ ok.
+
+handles_reject_notification(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId1 = ?config(node_id, Config),
+ ServerId2 = ?config(node_id2, Config),
+ UId1 = ?config(uid, Config),
+ CId = {UId1, self()},
+
+ ok = start_cluster(ClusterName, [ServerId1, ServerId2]),
+ _ = ra:process_command(ServerId1,
+ rabbit_fifo:make_checkout(
+ CId,
+ {auto, 10, simple_prefetch},
+ #{})),
+ % reverse order - should try the first node in the list first
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId2, ServerId1]),
+ {ok, F1} = rabbit_fifo_client:enqueue(one, F0),
+
+ timer:sleep(500),
+
+ % the applied notification
+ _F2 = process_ra_events(receive_ra_events(1, 0), F1),
+ ra:stop_server(ServerId1),
+ ra:stop_server(ServerId2),
+ ok.
+
+discard(Config) ->
+ PrivDir = ?config(priv_dir, Config),
+ ServerId = ?config(node_id, Config),
+ UId = ?config(uid, Config),
+ ClusterName = ?config(cluster_name, Config),
+ Conf = #{cluster_name => ClusterName#resource.name,
+ id => ServerId,
+ uid => UId,
+ log_init_args => #{data_dir => PrivDir, uid => UId},
+ initial_member => [],
+ machine => {module, rabbit_fifo,
+ #{queue_resource => discard,
+ dead_letter_handler =>
+ {?MODULE, dead_letter_handler, [self()]}}}},
+ _ = ra:start_server(Conf),
+ ok = ra:trigger_election(ServerId),
+ _ = ra:members(ServerId),
+
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10,
+ simple_prefetch, #{}, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(msg1, F1),
+ F3 = discard_next_delivery(F2, 5000),
+ {empty, _F4} = rabbit_fifo_client:dequeue(<<"tag1">>, settled, F3),
+ receive
+ {dead_letter, Letters} ->
+ [{_, msg1}] = Letters,
+ ok
+ after 500 ->
+ flush(),
+ exit(dead_letter_timeout)
+ end,
+ ra:stop_server(ServerId),
+ ok.
+
+cancel_checkout(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F1),
+ {_, _, F3} = process_ra_events(receive_ra_events(1, 1), F2, [], [], fun (_, S) -> S end),
+ {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, F3),
+ {F5, _} = rabbit_fifo_client:return(<<"tag">>, [0], F4),
+ {ok, _, {_, _, _, _, m1}, F5} = rabbit_fifo_client:dequeue(<<"d1">>, settled, F5),
+ ok.
+
+credit(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(m2, F1),
+ {_, _, F3} = process_ra_events(receive_ra_events(2, 0), F2),
+ %% checkout with 0 prefetch
+ {ok, F4} = rabbit_fifo_client:checkout(<<"tag">>, 0, credited, #{}, F3),
+ %% assert no deliveries
+ {_, _, F5} = process_ra_events(receive_ra_events(), F4, [], [],
+ fun
+ (D, _) -> error({unexpected_delivery, D})
+ end),
+ %% provide some credit
+ {F6, []} = rabbit_fifo_client:credit(<<"tag">>, 1, false, F5),
+ {[{_, _, _, _, m1}], [{send_credit_reply, _}], F7} =
+ process_ra_events(receive_ra_events(1, 1), F6),
+
+ %% credit and drain
+ {F8, []} = rabbit_fifo_client:credit(<<"tag">>, 4, true, F7),
+ {[{_, _, _, _, m2}], [{send_credit_reply, _}, {send_drained, _}], F9} =
+ process_ra_events(receive_ra_events(1, 1), F8),
+ flush(),
+
+ %% enqueue another message - at this point the consumer credit should be
+ %% all used up due to the drain
+ {ok, F10} = rabbit_fifo_client:enqueue(m3, F9),
+ %% assert no deliveries
+ {_, _, F11} = process_ra_events(receive_ra_events(), F10, [], [],
+ fun
+ (D, _) -> error({unexpected_delivery, D})
+ end),
+ %% credit again and receive the last message
+ {F12, []} = rabbit_fifo_client:credit(<<"tag">>, 10, false, F11),
+ {[{_, _, _, _, m3}], _, _} = process_ra_events(receive_ra_events(1, 1), F12),
+ ok.
+
+untracked_enqueue(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+
+ ok = rabbit_fifo_client:untracked_enqueue([ServerId], msg1),
+ timer:sleep(100),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {ok, _, {_, _, _, _, msg1}, _F5} = rabbit_fifo_client:dequeue(<<"tag">>, settled, F0),
+ ra:stop_server(ServerId),
+ ok.
+
+
+flow(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 3),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(m2, F1),
+ {ok, F3} = rabbit_fifo_client:enqueue(m3, F2),
+ {slow, F4} = rabbit_fifo_client:enqueue(m4, F3),
+ {_, _, F5} = process_ra_events(receive_ra_events(4, 0), F4),
+ {ok, _} = rabbit_fifo_client:enqueue(m5, F5),
+ ra:stop_server(ServerId),
+ ok.
+
+test_queries(Config) ->
+ % ok = logger:set_primary_config(level, all),
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ ok = start_cluster(ClusterName, [ServerId]),
+ Self = self(),
+ P = spawn(fun () ->
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, F1} = rabbit_fifo_client:enqueue(m1, F0),
+ {ok, F2} = rabbit_fifo_client:enqueue(m2, F1),
+ process_ra_events(receive_ra_events(2, 0), F2),
+ Self ! ready,
+ receive stop -> ok end
+ end),
+ receive
+ ready -> ok
+ after 5000 ->
+ exit(ready_timeout)
+ end,
+ F0 = rabbit_fifo_client:init(ClusterName, [ServerId], 4),
+ {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 1, simple_prefetch, #{}, F0),
+ {ok, {_, Ready}, _} = ra:local_query(ServerId,
+ fun rabbit_fifo:query_messages_ready/1),
+ ?assertEqual(1, Ready),
+ {ok, {_, Checked}, _} = ra:local_query(ServerId,
+ fun rabbit_fifo:query_messages_checked_out/1),
+ ?assertEqual(1, Checked),
+ {ok, {_, Processes}, _} = ra:local_query(ServerId,
+ fun rabbit_fifo:query_processes/1),
+ ?assertEqual(2, length(Processes)),
+ P ! stop,
+ ra:stop_server(ServerId),
+ ok.
+
+dead_letter_handler(Pid, Msgs) ->
+ Pid ! {dead_letter, Msgs}.
+
+dequeue(Config) ->
+ ClusterName = ?config(cluster_name, Config),
+ ServerId = ?config(node_id, Config),
+ UId = ?config(uid, Config),
+ Tag = UId,
+ ok = start_cluster(ClusterName, [ServerId]),
+ F1 = rabbit_fifo_client:init(ClusterName, [ServerId]),
+ {empty, F1b} = rabbit_fifo_client:dequeue(Tag, settled, F1),
+ {ok, F2_} = rabbit_fifo_client:enqueue(msg1, F1b),
+ {_, _, F2} = process_ra_events(receive_ra_events(1, 0), F2_),
+
+ % {ok, {{0, {_, msg1}}, _}, F3} = rabbit_fifo_client:dequeue(Tag, settled, F2),
+ {ok, _, {_, _, 0, _, msg1}, F3} = rabbit_fifo_client:dequeue(Tag, settled, F2),
+ {ok, F4_} = rabbit_fifo_client:enqueue(msg2, F3),
+ {_, _, F4} = process_ra_events(receive_ra_events(1, 0), F4_),
+ {ok, _, {_, _, MsgId, _, msg2}, F5} = rabbit_fifo_client:dequeue(Tag, unsettled, F4),
+ {_F6, _A} = rabbit_fifo_client:settle(Tag, [MsgId], F5),
+ ra:stop_server(ServerId),
+ ok.
+
+conf(ClusterName, UId, ServerId, _, Peers) ->
+ #{cluster_name => ClusterName,
+ id => ServerId,
+ uid => UId,
+ log_init_args => #{uid => UId},
+ initial_members => Peers,
+ machine => {module, rabbit_fifo, #{}}}.
+
+process_ra_event(State, Wait) ->
+ receive
+ {ra_event, From, Evt} ->
+ {ok, S, _Actions} =
+ rabbit_fifo_client:handle_ra_event(From, Evt, State),
+ S
+ after Wait ->
+ exit(ra_event_timeout)
+ end.
+
+receive_ra_events(Applied, Deliveries) ->
+ receive_ra_events(Applied, Deliveries, []).
+
+receive_ra_events(Applied, Deliveries, Acc) when Applied =< 0, Deliveries =< 0->
+ %% what if we get more events? Testcases should check what they're!
+ lists:reverse(Acc);
+receive_ra_events(Applied, Deliveries, Acc) ->
+ receive
+ {ra_event, _, {applied, Seqs}} = Evt ->
+ receive_ra_events(Applied - length(Seqs), Deliveries, [Evt | Acc]);
+ {ra_event, _, {machine, {delivery, _, MsgIds}}} = Evt ->
+ receive_ra_events(Applied, Deliveries - length(MsgIds), [Evt | Acc]);
+ {ra_event, _, _} = Evt ->
+ receive_ra_events(Applied, Deliveries, [Evt | Acc])
+ after 5000 ->
+ exit({missing_events, Applied, Deliveries, Acc})
+ end.
+
+%% Flusing the mailbox to later check that deliveries hasn't been received
+receive_ra_events() ->
+ receive_ra_events([]).
+
+receive_ra_events(Acc) ->
+ receive
+ {ra_event, _, _} = Evt ->
+ receive_ra_events([Evt | Acc])
+ after 500 ->
+ Acc
+ end.
+
+process_ra_events(Events, State) ->
+ DeliveryFun = fun ({deliver, _, Tag, Msgs}, S) ->
+ MsgIds = [element(1, M) || M <- Msgs],
+ {S0, _} = rabbit_fifo_client:settle(Tag, MsgIds, S),
+ S0
+ end,
+ process_ra_events(Events, State, [], [], DeliveryFun).
+
+process_ra_events([], State0, Acc, Actions0, _DeliveryFun) ->
+ {Acc, Actions0, State0};
+process_ra_events([{ra_event, From, Evt} | Events], State0, Acc, Actions0, DeliveryFun) ->
+ case rabbit_fifo_client:handle_ra_event(From, Evt, State0) of
+ {ok, State1, Actions1} ->
+ {Msgs, Actions, State} =
+ lists:foldl(
+ fun ({deliver, _, _, Msgs} = Del, {M, A, S}) ->
+ {M ++ Msgs, A, DeliveryFun(Del, S)};
+ (Ac, {M, A, S}) ->
+ {M, A ++ [Ac], S}
+ end, {Acc, [], State1}, Actions1),
+ process_ra_events(Events, State, Msgs, Actions0 ++ Actions, DeliveryFun);
+ eol ->
+ eol
+ end.
+
+discard_next_delivery(State0, Wait) ->
+ receive
+ {ra_event, _, {machine, {delivery, _, _}}} = Evt ->
+ element(3, process_ra_events([Evt], State0, [], [],
+ fun ({deliver, Tag, _, Msgs}, S) ->
+ MsgIds = [element(3, M) || M <- Msgs],
+ {S0, _} = rabbit_fifo_client:discard(Tag, MsgIds, S),
+ S0
+ end))
+ after Wait ->
+ State0
+ end.
+
+return_next_delivery(State0, Wait) ->
+ receive
+ {ra_event, _, {machine, {delivery, _, _}}} = Evt ->
+ element(3, process_ra_events([Evt], State0, [], [],
+ fun ({deliver, Tag, _, Msgs}, S) ->
+ MsgIds = [element(3, M) || M <- Msgs],
+ {S0, _} = rabbit_fifo_client:return(Tag, MsgIds, S),
+ S0
+ end))
+ after Wait ->
+ State0
+ end.
+
+validate_process_down(Name, 0) ->
+ exit({process_not_down, Name});
+validate_process_down(Name, Num) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ timer:sleep(100),
+ validate_process_down(Name, Num-1)
+ end.
+
+start_cluster(ClusterName, ServerIds, RaFifoConfig) ->
+ {ok, Started, _} = ra:start_cluster(ClusterName#resource.name,
+ {module, rabbit_fifo, RaFifoConfig},
+ ServerIds),
+ ?assertEqual(length(Started), length(ServerIds)),
+ ok.
+
+start_cluster(ClusterName, ServerIds) ->
+ start_cluster(ClusterName, ServerIds, #{name => some_name,
+ queue_resource => ClusterName}).
+
+flush() ->
+ receive
+ Msg ->
+ ct:pal("flushed: ~w~n", [Msg]),
+ flush()
+ after 10 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl
new file mode 100644
index 0000000000..859db2178f
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl
@@ -0,0 +1,1211 @@
+-module(rabbit_fifo_prop_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("src/rabbit_fifo.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ test_run_log,
+ snapshots,
+ scenario1,
+ scenario2,
+ scenario3,
+ scenario4,
+ scenario5,
+ scenario6,
+ scenario7,
+ scenario8,
+ scenario9,
+ scenario10,
+ scenario11,
+ scenario12,
+ scenario13,
+ scenario14,
+ scenario15,
+ scenario16,
+ scenario17,
+ scenario18,
+ scenario19,
+ scenario20,
+ scenario21,
+ scenario22,
+ single_active,
+ single_active_01,
+ single_active_02,
+ single_active_03,
+ single_active_ordering,
+ single_active_ordering_01,
+ single_active_ordering_03,
+ in_memory_limit,
+ max_length
+ % single_active_ordering_02
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+% -type log_op() ::
+% {enqueue, pid(), maybe(msg_seqno()), Msg :: raw_msg()}.
+
+scenario1(_Config) ->
+ C1 = {<<>>, c:pid(0,6723,1)},
+ C2 = {<<0>>,c:pid(0,6723,1)},
+ E = c:pid(0,6720,1),
+
+ Commands = [
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,1,msg1),
+ make_enqueue(E,2,msg2),
+ make_checkout(C1, cancel), %% both on returns queue
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_return(C2, [0]), %% E1 in returns, E2 with C2
+ make_return(C2, [1]), %% E2 in returns E1 with C2
+ make_settle(C2, [2]) %% E2 with C2
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario2(_Config) ->
+ C1 = {<<>>, c:pid(0,346,1)},
+ C2 = {<<>>,c:pid(0,379,1)},
+ E = c:pid(0,327,1),
+ Commands = [make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,msg1),
+ make_checkout(C1, cancel),
+ make_enqueue(E,2,msg2),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_settle(C1, [0]),
+ make_settle(C2, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario3(_Config) ->
+ C1 = {<<>>, c:pid(0,179,1)},
+ E = c:pid(0,176,1),
+ Commands = [make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,1,msg1),
+ make_return(C1, [0]),
+ make_enqueue(E,2,msg2),
+ make_enqueue(E,3,msg3),
+ make_settle(C1, [1]),
+ make_settle(C1, [2])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario4(_Config) ->
+ C1 = {<<>>, c:pid(0,179,1)},
+ E = c:pid(0,176,1),
+ Commands = [make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,msg),
+ make_settle(C1, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario5(_Config) ->
+ C1 = {<<>>, c:pid(0,505,0)},
+ E = c:pid(0,465,9),
+ Commands = [make_enqueue(E,1,<<0>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,2,<<>>),
+ make_settle(C1,[0])],
+ run_snapshot_test(#{name => ?FUNCTION_NAME}, Commands),
+ ok.
+
+scenario6(_Config) ->
+ E = c:pid(0,465,9),
+ Commands = [make_enqueue(E,1,<<>>), %% 1 msg on queue - snap: prefix 1
+ make_enqueue(E,2,<<>>) %% 1. msg on queue - snap: prefix 1
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario7(_Config) ->
+ C1 = {<<>>, c:pid(0,208,0)},
+ E = c:pid(0,188,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,2,<<>>),
+ make_enqueue(E,3,<<>>),
+ make_settle(C1,[0])],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario8(_Config) ->
+ C1 = {<<>>, c:pid(0,208,0)},
+ E = c:pid(0,188,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_enqueue(E,2,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ % make_checkout(C1, cancel),
+ {down, E, noconnection},
+ make_settle(C1, [0])],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario9(_Config) ->
+ E = c:pid(0,188,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_enqueue(E,2,<<>>),
+ make_enqueue(E,3,<<>>)],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario10(_Config) ->
+ C1 = {<<>>, c:pid(0,208,0)},
+ E = c:pid(0,188,0),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<>>),
+ make_settle(C1, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 1}, Commands),
+ ok.
+
+scenario11(_Config) ->
+ C1 = {<<>>, c:pid(0,215,0)},
+ E = c:pid(0,217,0),
+ Commands = [
+ make_enqueue(E,1,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_checkout(C1, cancel),
+ make_enqueue(E,2,<<>>),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_settle(C1, [0]),
+ make_checkout(C1, cancel)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 2}, Commands),
+ ok.
+
+scenario12(_Config) ->
+ E = c:pid(0,217,0),
+ Commands = [make_enqueue(E,1,<<0>>),
+ make_enqueue(E,2,<<0>>),
+ make_enqueue(E,3,<<0>>)],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_bytes => 2}, Commands),
+ ok.
+
+scenario13(_Config) ->
+ E = c:pid(0,217,0),
+ Commands = [make_enqueue(E,1,<<0>>),
+ make_enqueue(E,2,<<>>),
+ make_enqueue(E,3,<<>>),
+ make_enqueue(E,4,<<>>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_length => 2}, Commands),
+ ok.
+
+scenario14(_Config) ->
+ E = c:pid(0,217,0),
+ Commands = [make_enqueue(E,1,<<0,0>>)],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_bytes => 1}, Commands),
+ ok.
+
+scenario15(_Config) ->
+ C1 = {<<>>, c:pid(0,179,1)},
+ E = c:pid(0,176,1),
+ Commands = [make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E, 1, msg1),
+ make_enqueue(E, 2, msg2),
+ make_return(C1, [0]),
+ make_return(C1, [2]),
+ make_settle(C1, [1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ delivery_limit => 1}, Commands),
+ ok.
+
+scenario16(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ C2 = {<<>>, c:pid(0,882,1)},
+ E = c:pid(0,176,1),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E, 1, msg1),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ {down, C1Pid, noproc}, %% msg1 allocated to C2
+ make_return(C2, [0]), %% msg1 returned
+ make_enqueue(E, 2, <<>>),
+ make_settle(C2, [0])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ delivery_limit => 1}, Commands),
+ ok.
+
+scenario17(_Config) ->
+ C1Pid = test_util:fake_pid(rabbit@fake_node1),
+ C1 = {<<0>>, C1Pid},
+ % C2Pid = test_util:fake_pid(fake_node1),
+ C2 = {<<>>, C1Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<"one">>),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ {down, C1Pid, noconnection},
+ make_checkout(C2, cancel),
+ make_enqueue(E,2,<<"two">>),
+ {nodeup,rabbit@fake_node1},
+ %% this has no effect as was returned
+ make_settle(C1, [0]),
+ %% this should settle "one"
+ make_settle(C1, [1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ single_active_consumer_on => true
+ }, Commands),
+ ok.
+
+scenario18(_Config) ->
+ E = c:pid(0,176,1),
+ Commands = [make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_enqueue(E,3,<<"3">>),
+ make_enqueue(E,4,<<"4">>),
+ make_enqueue(E,5,<<"5">>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ %% max_length => 3,
+ max_in_memory_length => 1}, Commands),
+ ok.
+
+scenario19(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,3,<<"3">>),
+ make_settle(C1, [0, 1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_in_memory_bytes => 370,
+ max_in_memory_length => 1}, Commands),
+ ok.
+
+scenario20(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [make_enqueue(E,1,<<>>),
+ make_enqueue(E,2,<<>>),
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ {down, C1Pid, noconnection},
+ make_enqueue(E,3,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,4,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,5,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,6,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>),
+ make_enqueue(E,7,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0>>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ max_bytes => 97,
+ max_in_memory_length => 1}, Commands),
+ ok.
+
+scenario21(_Config) ->
+ C1Pid = c:pid(0,883,1),
+ C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_enqueue(E,3,<<"3">>),
+ rabbit_fifo:make_discard(C1, [0]),
+ rabbit_fifo:make_settle(C1, [1])
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ release_cursor_interval => 1,
+ dead_letter_handler => {?MODULE, banana, []}},
+ Commands),
+ ok.
+
+scenario22(_Config) ->
+ % C1Pid = c:pid(0,883,1),
+ % C1 = {<<>>, C1Pid},
+ E = c:pid(0,176,1),
+ Commands = [
+ make_enqueue(E,1,<<"1">>),
+ make_enqueue(E,2,<<"2">>),
+ make_enqueue(E,3,<<"3">>),
+ make_enqueue(E,4,<<"4">>),
+ make_enqueue(E,5,<<"5">>)
+ ],
+ run_snapshot_test(#{name => ?FUNCTION_NAME,
+ release_cursor_interval => 1,
+ max_length => 3,
+ dead_letter_handler => {?MODULE, banana, []}},
+ Commands),
+ ok.
+
+single_active_01(_Config) ->
+ C1Pid = test_util:fake_pid(rabbit@fake_node1),
+ C1 = {<<0>>, C1Pid},
+ C2Pid = test_util:fake_pid(rabbit@fake_node2),
+ C2 = {<<>>, C2Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<"one">>),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_checkout(C1, cancel),
+ {nodeup,rabbit@fake_node1}
+ ],
+ ?assert(
+ single_active_prop(#{name => ?FUNCTION_NAME,
+ single_active_consumer_on => true
+ }, Commands, false)),
+ ok.
+
+single_active_02(_Config) ->
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ C2Pid = test_util:fake_pid(node()),
+ C2 = {<<>>, C2Pid},
+ E = test_util:fake_pid(node()),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E,1,<<"one">>),
+ {down,E,noconnection},
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_checkout(C2, cancel),
+ {down,E,noconnection}
+ ],
+ Conf = config(?FUNCTION_NAME, undefined, undefined, true, 1, undefined, undefined),
+ ?assert(single_active_prop(Conf, Commands, false)),
+ ok.
+
+single_active_03(_Config) ->
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ % C2Pid = test_util:fake_pid(rabbit@fake_node2),
+ % C2 = {<<>>, C2Pid},
+ Pid = test_util:fake_pid(node()),
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E, 1, 0),
+ make_enqueue(E, 2, 1),
+ {down, Pid, noconnection},
+ {nodeup, node()}
+ ],
+ Conf = config(?FUNCTION_NAME, 0, 0, true, 0, undefined, undefined),
+ ?assert(single_active_prop(Conf, Commands, true)),
+ ok.
+
+test_run_log(_Config) ->
+ Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end},
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit, InMemoryLength,
+ InMemoryBytes},
+ frequency([{10, {0, 0, false, 0, 0, 0}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ boolean(),
+ oneof([range(1, 3), undefined]),
+ oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined])
+ }}]),
+ ?FORALL(O, ?LET(Ops, log_gen(100), expand(Ops, Fun)),
+ collect({log_size, length(O)},
+ dump_generated(
+ config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes), O))))
+ end, [], 10).
+
+snapshots(_Config) ->
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, SingleActiveConsumer,
+ DeliveryLimit, InMemoryLength, InMemoryBytes,
+ Overflow},
+ frequency([{10, {0, 0, false, 0, 0, 0, drop_head}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ boolean(),
+ oneof([range(1, 3), undefined]),
+ oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ oneof([drop_head, reject_publish])
+ }}]),
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes,
+ Overflow),
+ ?FORALL(O, ?LET(Ops, log_gen(256), expand(Ops, Config)),
+ collect({log_size, length(O)},
+ snapshots_prop(Config, O)))
+ end)
+ end, [], 2500).
+
+single_active(_Config) ->
+ Size = 2000,
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, InMemoryBytes},
+ frequency([{10, {0, 0, 0, 0, 0}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ oneof([range(1, 3), undefined]),
+ oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined])
+ }}]),
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ true,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes),
+ ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)),
+ collect({log_size, length(O)},
+ single_active_prop(Config, O, false)))
+ end)
+ end, [], Size).
+
+single_active_ordering(_Config) ->
+ Size = 2000,
+ Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end},
+ run_proper(
+ fun () ->
+ ?FORALL(O, ?LET(Ops, log_gen_ordered(Size), expand(Ops, Fun)),
+ collect({log_size, length(O)},
+ single_active_prop(config(?FUNCTION_NAME,
+ undefined,
+ undefined,
+ true,
+ undefined,
+ undefined,
+ undefined), O,
+ true)))
+ end, [], Size).
+
+single_active_ordering_01(_Config) ->
+% [{enqueue,<0.145.0>,1,0},
+% {enqueue,<0.145.0>,1,1},
+% {checkout,{<<>>,<0.148.0>},{auto,1,simple_prefetch},#{ack => true,args => [],prefetch => 1,username => <<117,115,101,114>>}}
+% {enqueue,<0.140.0>,1,2},
+% {settle,{<<>>,<0.148.0>},[0]}]
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ E2 = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_enqueue(E, 1, 0),
+ make_enqueue(E, 2, 1),
+ make_checkout(C1, {auto,2,simple_prefetch}),
+ make_enqueue(E2, 1, 2),
+ make_settle(C1, [0])
+ ],
+ Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0),
+ ?assert(single_active_prop(Conf, Commands, true)),
+ ok.
+
+single_active_ordering_02(_Config) ->
+ %% this results in the pending enqueue being enqueued and violating
+ %% ordering
+% [{checkout, % {<<>>,<0.177.0>}, % {auto,1,simple_prefetch},
+% {enqueue,<0.172.0>,2,1},
+% {down,<0.172.0>,noproc},
+% {settle,{<<>>,<0.177.0>},[0]}]
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<0>>, C1Pid},
+ E = test_util:fake_pid(node()),
+ Commands = [
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_enqueue(E, 2, 1),
+ %% CANNOT HAPPEN
+ {down,E,noproc},
+ make_settle(C1, [0])
+ ],
+ Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0),
+ ?assert(single_active_prop(Conf, Commands, true)),
+ ok.
+
+single_active_ordering_03(_Config) ->
+ C1Pid = test_util:fake_pid(node()),
+ C1 = {<<1>>, C1Pid},
+ C2Pid = test_util:fake_pid(rabbit@fake_node2),
+ C2 = {<<2>>, C2Pid},
+ E = test_util:fake_pid(rabbit@fake_node2),
+ Commands = [
+ make_enqueue(E, 1, 0),
+ make_enqueue(E, 2, 1),
+ make_enqueue(E, 3, 2),
+ make_checkout(C1, {auto,1,simple_prefetch}),
+ make_checkout(C2, {auto,1,simple_prefetch}),
+ make_settle(C1, [0]),
+ make_checkout(C1, cancel),
+ {down, C1Pid, noconnection}
+ ],
+ Conf0 = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0),
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ try run_log(test_init(Conf), Entries) of
+ {State, Effects} ->
+ ct:pal("Effects: ~p~n", [Effects]),
+ ct:pal("State: ~p~n", [State]),
+ %% assert C1 has no messages
+ ?assertNotMatch(#{C1 := _}, State#rabbit_fifo.consumers),
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+in_memory_limit(_Config) ->
+ Size = 2000,
+ run_proper(
+ fun () ->
+ ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit,
+ InMemoryLength, InMemoryBytes},
+ frequency([{10, {0, 0, false, 0, 0, 0}},
+ {5, {oneof([range(1, 10), undefined]),
+ oneof([range(1, 1000), undefined]),
+ boolean(),
+ oneof([range(1, 3), undefined]),
+ range(1, 10),
+ range(1, 1000)
+ }}]),
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ Bytes,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ InMemoryBytes),
+ ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)),
+ collect({log_size, length(O)},
+ in_memory_limit_prop(Config, O)))
+ end)
+ end, [], Size).
+
+max_length(_Config) ->
+ %% tests that max length is never transgressed
+ Size = 1000,
+ run_proper(
+ fun () ->
+ ?FORALL({Length, SingleActiveConsumer, DeliveryLimit,
+ InMemoryLength},
+ {oneof([range(1, 100), undefined]),
+ boolean(),
+ range(1, 3),
+ range(1, 10)
+ },
+ begin
+ Config = config(?FUNCTION_NAME,
+ Length,
+ undefined,
+ SingleActiveConsumer,
+ DeliveryLimit,
+ InMemoryLength,
+ undefined),
+ ?FORALL(O, ?LET(Ops, log_gen_config(Size),
+ expand(Ops, Config)),
+ collect({log_size, length(O)},
+ max_length_prop(Config, O)))
+ end)
+ end, [], Size).
+
+config(Name, Length, Bytes, SingleActive, DeliveryLimit,
+ InMemoryLength, InMemoryBytes) ->
+config(Name, Length, Bytes, SingleActive, DeliveryLimit,
+ InMemoryLength, InMemoryBytes, drop_head).
+
+config(Name, Length, Bytes, SingleActive, DeliveryLimit,
+ InMemoryLength, InMemoryBytes, Overflow) ->
+ #{name => Name,
+ max_length => map_max(Length),
+ max_bytes => map_max(Bytes),
+ dead_letter_handler => {?MODULE, banana, []},
+ single_active_consumer_on => SingleActive,
+ delivery_limit => map_max(DeliveryLimit),
+ max_in_memory_length => map_max(InMemoryLength),
+ max_in_memory_bytes => map_max(InMemoryBytes),
+ overflow_strategy => Overflow}.
+
+map_max(0) -> undefined;
+map_max(N) -> N.
+
+in_memory_limit_prop(Conf0, Commands) ->
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ try run_log(test_init(Conf), Entries) of
+ {_State, Effects} ->
+ %% validate message ordering
+ lists:foldl(fun ({log, Idxs, _}, ReleaseCursorIdx) ->
+ validate_idx_order(Idxs, ReleaseCursorIdx),
+ ReleaseCursorIdx;
+ ({release_cursor, Idx, _}, _) ->
+ Idx;
+ (_, Acc) ->
+ Acc
+ end, 0, Effects),
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+max_length_prop(Conf0, Commands) ->
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ Invariant = fun (#rabbit_fifo{cfg = #cfg{max_length = MaxLen}} = S) ->
+ #{num_ready_messages := MsgReady} = rabbit_fifo:overview(S),
+ % ct:pal("msg Ready ~w ~w", [MsgReady, MaxLen]),
+ MsgReady =< MaxLen
+ end,
+ try run_log(test_init(Conf), Entries, Invariant) of
+ {_State, _Effects} ->
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+validate_idx_order([], _ReleaseCursorIdx) ->
+ true;
+validate_idx_order(Idxs, ReleaseCursorIdx) ->
+ Min = lists:min(Idxs),
+ case Min < ReleaseCursorIdx of
+ true ->
+ throw({invalid_log_index, Min, ReleaseCursorIdx});
+ false ->
+ ok
+ end.
+
+single_active_prop(Conf0, Commands, ValidateOrder) ->
+ Conf = Conf0#{release_cursor_interval => 100},
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ %% invariant: there can only be one active consumer at any one time
+ %% there can however be multiple cancelled consumers
+ Invariant = fun (#rabbit_fifo{consumers = Consumers}) ->
+ Up = maps:filter(fun (_, #consumer{status = S}) ->
+ S == up
+ end, Consumers),
+ map_size(Up) =< 1
+ end,
+ try run_log(test_init(Conf), Entries, Invariant) of
+ {_State, Effects} when ValidateOrder ->
+ % ct:pal("Effects: ~p~n", [Effects]),
+ % ct:pal("State: ~p~n", [State]),
+ %% validate message ordering
+ lists:foldl(fun ({send_msg, Pid, {delivery, Tag, Msgs}, ra_event},
+ Acc) ->
+ validate_msg_order({Tag, Pid}, Msgs, Acc);
+ (_, Acc) ->
+ Acc
+ end, -1, Effects),
+ true;
+ _ ->
+ true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+%% single active consumer ordering invariant:
+%% only redelivered messages can go backwards
+validate_msg_order(_, [], S) ->
+ S;
+validate_msg_order(Cid, [{_, {H, Num}} | Rem], PrevMax) ->
+ Redelivered = is_map(H) andalso maps:is_key(delivery_count, H),
+ case undefined of
+ _ when Num == PrevMax + 1 ->
+ %% forwards case
+ validate_msg_order(Cid, Rem, Num);
+ _ when Redelivered andalso Num =< PrevMax ->
+ %% the seq is lower but this is a redelivery
+ %% when the consumer changed and the next messages has been redelivered
+ %% we may go backwards but keep the highest seen
+ validate_msg_order(Cid, Rem, PrevMax);
+ _ ->
+ ct:pal("out of order ~w Prev ~w Curr ~w Redel ~w",
+ [Cid, PrevMax, Num, Redelivered]),
+ throw({outoforder, Cid, PrevMax, Num})
+ end.
+
+
+
+
+dump_generated(Conf, Commands) ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ true.
+
+snapshots_prop(Conf, Commands) ->
+ try run_snapshot_test(Conf, Commands) of
+ _ -> true
+ catch
+ Err ->
+ ct:pal("Commands: ~p~nConf~p~n", [Commands, Conf]),
+ ct:pal("Err: ~p~n", [Err]),
+ false
+ end.
+
+log_gen(Size) ->
+ log_gen(Size, binary()).
+
+log_gen(Size, _Body) ->
+ Nodes = [node(),
+ fakenode@fake,
+ fakenode@fake2
+ ],
+ ?LET(EPids, vector(2, pid_gen(Nodes)),
+ ?LET(CPids, vector(2, pid_gen(Nodes)),
+ resize(Size,
+ list(
+ frequency(
+ [{20, enqueue_gen(oneof(EPids))},
+ {40, {input_event,
+ frequency([{10, settle},
+ {2, return},
+ {2, discard},
+ {2, requeue}])}},
+ {2, checkout_gen(oneof(CPids))},
+ {1, checkout_cancel_gen(oneof(CPids))},
+ {1, down_gen(oneof(EPids ++ CPids))},
+ {1, nodeup_gen(Nodes)},
+ {1, purge}
+ ]))))).
+
+log_gen_config(Size) ->
+ Nodes = [node(),
+ fakenode@fake,
+ fakenode@fake2
+ ],
+ ?LET(EPids, vector(2, pid_gen(Nodes)),
+ ?LET(CPids, vector(2, pid_gen(Nodes)),
+ resize(Size,
+ list(
+ frequency(
+ [{20, enqueue_gen(oneof(EPids))},
+ {40, {input_event,
+ frequency([{5, settle},
+ {5, return},
+ {2, discard},
+ {2, requeue}])}},
+ {2, checkout_gen(oneof(CPids))},
+ {1, checkout_cancel_gen(oneof(CPids))},
+ {1, down_gen(oneof(EPids ++ CPids))},
+ {1, nodeup_gen(Nodes)},
+ {1, purge},
+ {1, ?LET({MaxInMem,
+ MaxLen},
+ {choose(1, 10),
+ choose(1, 10)},
+ {update_config,
+ #{max_in_memory_length => MaxInMem,
+ max_length => MaxLen}})
+ }]))))).
+
+log_gen_ordered(Size) ->
+ Nodes = [node(),
+ fakenode@fake,
+ fakenode@fake2
+ ],
+ ?LET(EPids, vector(1, pid_gen(Nodes)),
+ ?LET(CPids, vector(8, pid_gen(Nodes)),
+ resize(Size,
+ list(
+ frequency(
+ [{20, enqueue_gen(oneof(EPids), 10, 0)},
+ {40, {input_event,
+ frequency([{15, settle},
+ {1, return},
+ {1, discard},
+ {1, requeue}])}},
+ {7, checkout_gen(oneof(CPids))},
+ {2, checkout_cancel_gen(oneof(CPids))},
+ {2, down_gen(oneof(EPids ++ CPids))},
+ {1, nodeup_gen(Nodes)}
+ ]))))).
+
+monotonic_gen() ->
+ ?LET(_, integer(), erlang:unique_integer([positive, monotonic])).
+
+pid_gen(Nodes) ->
+ ?LET(Node, oneof(Nodes),
+ test_util:fake_pid(atom_to_binary(Node, utf8))).
+
+down_gen(Pid) ->
+ ?LET(E, {down, Pid, oneof([noconnection, noproc])}, E).
+
+nodeup_gen(Nodes) ->
+ {nodeup, oneof(Nodes)}.
+
+enqueue_gen(Pid) ->
+ enqueue_gen(Pid, 10, 1).
+
+enqueue_gen(Pid, Enq, Del) ->
+ ?LET(E, {enqueue, Pid,
+ frequency([{Enq, enqueue},
+ {Del, delay}]),
+ binary()}, E).
+
+checkout_cancel_gen(Pid) ->
+ {checkout, Pid, cancel}.
+
+checkout_gen(Pid) ->
+ %% pid, tag, prefetch
+ ?LET(C, {checkout, {binary(), Pid}, choose(1, 100)}, C).
+
+
+-record(t, {state = rabbit_fifo:init(#{name => proper,
+ queue_resource => blah,
+ release_cursor_interval => 1})
+ :: rabbit_fifo:state(),
+ index = 1 :: non_neg_integer(), %% raft index
+ enqueuers = #{} :: #{pid() => term()},
+ consumers = #{} :: #{{binary(), pid()} => term()},
+ effects = queue:new() :: queue:queue(),
+ %% to transform the body
+ enq_body_fun = {0, fun ra_lib:id/1},
+ config :: map(),
+ log = [] :: list(),
+ down = #{} :: #{pid() => noproc | noconnection}
+ }).
+
+expand(Ops, Config) ->
+ expand(Ops, Config, {undefined, fun ra_lib:id/1}).
+
+expand(Ops, Config, EnqFun) ->
+ %% execute each command against a rabbit_fifo state and capture all relevant
+ %% effects
+ T = #t{enq_body_fun = EnqFun,
+ config = Config},
+ #t{effects = Effs} = T1 = lists:foldl(fun handle_op/2, T, Ops),
+ %% process the remaining effect
+ #t{log = Log} = lists:foldl(fun do_apply/2,
+ T1#t{effects = queue:new()},
+ queue:to_list(Effs)),
+
+ lists:reverse(Log).
+
+
+handle_op({enqueue, Pid, When, Data},
+ #t{enqueuers = Enqs0,
+ enq_body_fun = {EnqSt0, Fun},
+ down = Down,
+ effects = Effs} = T) ->
+ case Down of
+ #{Pid := noproc} ->
+ %% if it's a noproc then it cannot exist - can it?
+ %% drop operation
+ T;
+ _ ->
+ Enqs = maps:update_with(Pid, fun (Seq) -> Seq + 1 end, 1, Enqs0),
+ MsgSeq = maps:get(Pid, Enqs),
+ {EnqSt, Msg} = Fun({EnqSt0, Data}),
+ Cmd = rabbit_fifo:make_enqueue(Pid, MsgSeq, Msg),
+ case When of
+ enqueue ->
+ do_apply(Cmd, T#t{enqueuers = Enqs,
+ enq_body_fun = {EnqSt, Fun}});
+ delay ->
+ %% just put the command on the effects queue
+ T#t{effects = queue:in(Cmd, Effs),
+ enqueuers = Enqs,
+ enq_body_fun = {EnqSt, Fun}}
+ end
+ end;
+handle_op({checkout, Pid, cancel}, #t{consumers = Cons0} = T) ->
+ case maps:keys(
+ maps:filter(fun ({_, P}, _) when P == Pid -> true;
+ (_, _) -> false
+ end, Cons0)) of
+ [CId | _] ->
+ Cons = maps:remove(CId, Cons0),
+ Cmd = rabbit_fifo:make_checkout(CId, cancel, #{}),
+ do_apply(Cmd, T#t{consumers = Cons});
+ _ ->
+ T
+ end;
+handle_op({checkout, CId, Prefetch}, #t{consumers = Cons0} = T) ->
+ case Cons0 of
+ #{CId := _} ->
+ %% ignore if it already exists
+ T;
+ _ ->
+ Cons = maps:put(CId, ok, Cons0),
+ Cmd = rabbit_fifo:make_checkout(CId,
+ {auto, Prefetch, simple_prefetch},
+ #{ack => true,
+ prefetch => Prefetch,
+ username => <<"user">>,
+ args => []}),
+
+ do_apply(Cmd, T#t{consumers = Cons})
+ end;
+handle_op({down, Pid, Reason} = Cmd, #t{down = Down} = T) ->
+ case Down of
+ #{Pid := noproc} ->
+ %% it it permanently down, cannot upgrade
+ T;
+ _ ->
+ %% it is either not down or down with noconnection
+ do_apply(Cmd, T#t{down = maps:put(Pid, Reason, Down)})
+ end;
+handle_op({nodeup, _} = Cmd, T) ->
+ do_apply(Cmd, T);
+handle_op({input_event, requeue}, #t{effects = Effs} = T) ->
+ %% this simulates certain settlements arriving out of order
+ case queue:out(Effs) of
+ {{value, Cmd}, Q} ->
+ T#t{effects = queue:in(Cmd, Q)};
+ _ ->
+ T
+ end;
+handle_op({input_event, Settlement}, #t{effects = Effs,
+ down = Down} = T) ->
+ case queue:out(Effs) of
+ {{value, {settle, MsgIds, CId}}, Q} ->
+ Cmd = case Settlement of
+ settle -> rabbit_fifo:make_settle(CId, MsgIds);
+ return -> rabbit_fifo:make_return(CId, MsgIds);
+ discard -> rabbit_fifo:make_discard(CId, MsgIds)
+ end,
+ do_apply(Cmd, T#t{effects = Q});
+ {{value, {enqueue, Pid, _, _} = Cmd}, Q} ->
+ case maps:is_key(Pid, Down) of
+ true ->
+ %% enqueues cannot arrive after down for the same process
+ %% drop message
+ T#t{effects = Q};
+ false ->
+ do_apply(Cmd, T#t{effects = Q})
+ end;
+ _ ->
+ T
+ end;
+handle_op(purge, T) ->
+ do_apply(rabbit_fifo:make_purge(), T);
+handle_op({update_config, Changes}, #t{config = Conf} = T) ->
+ Config = maps:merge(Conf, Changes),
+ do_apply(rabbit_fifo:make_update_config(Config), T).
+
+
+do_apply(Cmd, #t{effects = Effs,
+ index = Index, state = S0,
+ down = Down,
+ log = Log} = T) ->
+ case Cmd of
+ {enqueue, Pid, _, _} when is_map_key(Pid, Down) ->
+ %% down
+ T;
+ _ ->
+ {St, Effects} = case rabbit_fifo:apply(meta(Index), Cmd, S0) of
+ {S, _, E} when is_list(E) ->
+ {S, E};
+ {S, _, E} ->
+ {S, [E]};
+ {S, _} ->
+ {S, []}
+ end,
+
+ T#t{state = St,
+ index = Index + 1,
+ effects = enq_effs(Effects, Effs),
+ log = [Cmd | Log]}
+ end.
+
+enq_effs([], Q) -> Q;
+enq_effs([{send_msg, P, {delivery, CTag, Msgs}, ra_event} | Rem], Q) ->
+ MsgIds = [I || {I, _} <- Msgs],
+ %% always make settle commands by default
+ %% they can be changed depending on the input event later
+ Cmd = rabbit_fifo:make_settle({CTag, P}, MsgIds),
+ enq_effs(Rem, queue:in(Cmd, Q));
+enq_effs([_ | Rem], Q) ->
+ enq_effs(Rem, Q).
+
+
+%% Utility
+run_proper(Fun, Args, NumTests) ->
+ ?assertEqual(
+ true,
+ proper:counterexample(
+ erlang:apply(Fun, Args),
+ [{numtests, NumTests},
+ {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines
+ (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A)
+ end}])).
+
+run_snapshot_test(Conf, Commands) ->
+ %% create every incremental permutation of the commands lists
+ %% and run the snapshot tests against that
+ ct:pal("running snapshot test with ~b commands using config ~p",
+ [length(Commands), Conf]),
+ [begin
+ % ?debugFmt("~w running command to ~w~n", [?FUNCTION_NAME, lists:last(C)]),
+ run_snapshot_test0(Conf, C)
+ end || C <- prefixes(Commands, 1, [])].
+
+run_snapshot_test0(Conf, Commands) ->
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ {State0, Effects} = run_log(test_init(Conf), Entries),
+ State = rabbit_fifo:normalize(State0),
+
+ [begin
+ % ct:pal("release_cursor: ~b~n", [SnapIdx]),
+ %% drop all entries below and including the snapshot
+ Filtered = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true;
+ (_) -> false
+ end, Entries),
+ {S0, _} = run_log(SnapState, Filtered),
+ S = rabbit_fifo:normalize(S0),
+ % assert log can be restored from any release cursor index
+ case S of
+ State -> ok;
+ _ ->
+ ct:pal("Snapshot tests failed run log:~n"
+ "~p~n from ~n~p~n Entries~n~p~n"
+ "Config: ~p~n",
+ [Filtered, SnapState, Entries, Conf]),
+ ct:pal("Expected~n~p~nGot:~n~p", [State, S]),
+ ?assertEqual(State, S)
+ end
+ end || {release_cursor, SnapIdx, SnapState} <- Effects],
+ ok.
+
+%% transforms [1,2,3] into [[1,2,3], [1,2], [1]]
+prefixes(Source, N, Acc) when N > length(Source) ->
+ lists:reverse(Acc);
+prefixes(Source, N, Acc) ->
+ {X, _} = lists:split(N, Source),
+ prefixes(Source, N+1, [X | Acc]).
+
+run_log(InitState, Entries) ->
+ run_log(InitState, Entries, fun(_) -> true end).
+
+run_log(InitState, Entries, InvariantFun) ->
+ Invariant = fun(E, S) ->
+ case InvariantFun(S) of
+ true -> ok;
+ false ->
+ throw({invariant, E, S})
+ end
+ end,
+
+ lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) ->
+ case rabbit_fifo:apply(meta(Idx), E, Acc0) of
+ {Acc, _, Efx} when is_list(Efx) ->
+ Invariant(E, Acc),
+ {Acc, Efx0 ++ Efx};
+ {Acc, _, Efx} ->
+ Invariant(E, Acc),
+ {Acc, Efx0 ++ [Efx]};
+ {Acc, _} ->
+ Invariant(E, Acc),
+ {Acc, Efx0}
+ end
+ end, {InitState, []}, Entries).
+
+test_init(Conf) ->
+ Default = #{queue_resource => blah,
+ release_cursor_interval => 0,
+ metrics_handler => {?MODULE, metrics_handler, []}},
+ rabbit_fifo:init(maps:merge(Default, Conf)).
+
+meta(Idx) ->
+ #{index => Idx, term => 1, system_time => 0}.
+
+make_checkout(Cid, Spec) ->
+ rabbit_fifo:make_checkout(Cid, Spec, #{}).
+
+make_enqueue(Pid, Seq, Msg) ->
+ rabbit_fifo:make_enqueue(Pid, Seq, Msg).
+
+make_settle(Cid, MsgIds) ->
+ rabbit_fifo:make_settle(Cid, MsgIds).
+
+make_return(Cid, MsgIds) ->
+ rabbit_fifo:make_return(Cid, MsgIds).
diff --git a/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl b/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl
new file mode 100644
index 0000000000..fcb84377de
--- /dev/null
+++ b/deps/rabbit/test/rabbit_fifo_v0_SUITE.erl
@@ -0,0 +1,1392 @@
+-module(rabbit_fifo_v0_SUITE).
+
+%% rabbit_fifo unit tests suite
+
+-compile(export_all).
+
+-compile({no_auto_import, [apply/3]}).
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("src/rabbit_fifo_v0.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+%% replicate eunit like test resultion
+all_tests() ->
+ [F || {F, _} <- ?MODULE:module_info(functions),
+ re:run(atom_to_list(F), "_test$") /= nomatch].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+-define(ASSERT_EFF(EfxPat, Effects),
+ ?ASSERT_EFF(EfxPat, true, Effects)).
+
+-define(ASSERT_EFF(EfxPat, Guard, Effects),
+ ?assert(lists:any(fun (EfxPat) when Guard -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(ASSERT_NO_EFF(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+-define(assertNoEffect(EfxPat, Effects),
+ ?assert(not lists:any(fun (EfxPat) -> true;
+ (_) -> false
+ end, Effects))).
+
+test_init(Name) ->
+ init(#{name => Name,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(Name, utf8)),
+ release_cursor_interval => 0}).
+
+enq_enq_checkout_test(_) ->
+ Cid = {<<"enq_enq_checkout_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {_State3, _, Effects} =
+ apply(meta(3),
+ rabbit_fifo_v0:make_checkout(Cid, {once, 2, simple_prefetch}, #{}),
+ State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, Effects),
+ ok.
+
+credit_enq_enq_checkout_settled_credit_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _, Effects} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {auto, 1, credited}, #{}), State2),
+ ?ASSERT_EFF({monitor, _, _}, Effects),
+ Deliveries = lists:filter(fun ({send_msg, _, {delivery, _, _}, _}) -> true;
+ (_) -> false
+ end, Effects),
+ ?assertEqual(1, length(Deliveries)),
+ %% settle the delivery this should _not_ result in further messages being
+ %% delivered
+ {State4, SettledEffects} = settle(Cid, 4, 1, State3),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, SettledEffects)),
+ %% granting credit (3) should deliver the second msg if the receivers
+ %% delivery count is (1)
+ {State5, CreditEffects} = credit(Cid, 5, 1, 1, false, State4),
+ % ?debugFmt("CreditEffects ~p ~n~p", [CreditEffects, State4]),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, _}, _}, CreditEffects),
+ {_State6, FinalEffects} = enq(6, 3, third, State5),
+ ?assertEqual(false, lists:any(fun ({send_msg, _, {delivery, _, _}, _}) ->
+ true;
+ (_) -> false
+ end, FinalEffects)),
+ ok.
+
+credit_with_drained_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ %% checkout with a single credit
+ {State1, _, _} =
+ apply(meta(1), rabbit_fifo_v0:make_checkout(Cid, {auto, 1, credited},#{}),
+ State0),
+ ?assertMatch(#?STATE{consumers = #{Cid := #consumer{credit = 1,
+ delivery_count = 0}}},
+ State1),
+ {State, Result, _} =
+ apply(meta(3), rabbit_fifo_v0:make_credit(Cid, 0, 5, true), State1),
+ ?assertMatch(#?STATE{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 5}}},
+ State),
+ ?assertEqual({multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 5}}]},
+ Result),
+ ok.
+
+credit_and_drain_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ %% checkout without any initial credit (like AMQP 1.0 would)
+ {State3, _, CheckEffs} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {auto, 0, credited}, #{}),
+ State2),
+
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, CheckEffs),
+ {State4, {multi, [{send_credit_reply, 0},
+ {send_drained, {?FUNCTION_NAME, 2}}]},
+ Effects} = apply(meta(4), rabbit_fifo_v0:make_credit(Cid, 4, 0, true), State3),
+ ?assertMatch(#?STATE{consumers = #{Cid := #consumer{credit = 0,
+ delivery_count = 4}}},
+ State4),
+
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}},
+ {_, {_, second}}]}, _}, Effects),
+ {_State5, EnqEffs} = enq(5, 2, third, State4),
+ ?ASSERT_NO_EFF({send_msg, _, {delivery, _, _}}, EnqEffs),
+ ok.
+
+
+
+enq_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ NumReady = 1,
+ {_State3, {dequeue, {0, {_, first}}, NumReady}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ ok.
+
+enq_enq_deq_deq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ % get returns a reply value
+ {State3, {dequeue, {0, {_, first}}, 1}, [{monitor, _, _}]} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State2),
+ {_State4, {dequeue, empty}} =
+ apply(meta(4), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State3),
+ ok.
+
+enq_enq_checkout_get_settled_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ % get returns a reply value
+ {_State2, {dequeue, {0, {_, first}}, _}, _Effs} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}),
+ State1),
+ ok.
+
+checkout_get_empty_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State = test_init(test),
+ {_State2, {dequeue, empty}} =
+ apply(meta(1), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}), State),
+ ok.
+
+untracked_enq_deq_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = test_init(test),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo_v0:make_enqueue(undefined, undefined, first),
+ State0),
+ {_State2, {dequeue, {0, {_, first}}, _}, _} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+release_cursor_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, _} = enq(2, 2, second, State1),
+ {State3, _} = check(Cid, 3, 10, State2),
+ % no release cursor effect at this point
+ {State4, _} = settle(Cid, 4, 1, State3),
+ {_Final, Effects1} = settle(Cid, 5, 0, State4),
+ % empty queue forwards release cursor all the way
+ ?ASSERT_EFF({release_cursor, 5, _}, Effects1),
+ ok.
+
+checkout_enq_settle_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check(Cid, 1, test_init(test)),
+ {State2, Effects0} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, ?FUNCTION_NAME,
+ [{0, {_, first}}]}, _},
+ Effects0),
+ {State3, [_Inactive]} = enq(3, 2, second, State2),
+ {_, _Effects} = settle(Cid, 4, 0, State3),
+ % the release cursor is the smallest raft index that does not
+ % contribute to the state of the application
+ % ?ASSERT_EFF({release_cursor, 2, _}, Effects),
+ ok.
+
+out_of_order_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ % assert monitor was set up
+ ?ASSERT_EFF({monitor, _, _}, Effects2),
+ % enqueue seq num 3 and 4 before 2
+ {State3, Effects3} = enq(3, 3, third, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects3),
+ {State4, Effects4} = enq(4, 4, fourth, State3),
+ % assert no further deliveries where made
+ ?assertNoEffect({send_msg, _, {delivery, _, _}, _}, Effects4),
+ {_State5, Effects5} = enq(5, 2, second, State4),
+ % assert two deliveries were now made
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, second}},
+ {_, {_, third}},
+ {_, {_, fourth}}]}, _},
+ Effects5),
+ ok.
+
+out_of_order_first_enqueue_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ {State1, _} = check_n(Cid, 5, 5, test_init(test)),
+ {_State2, Effects2} = enq(2, 10, first, State1),
+ ?ASSERT_EFF({monitor, process, _}, Effects2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+duplicate_enqueue_test(_) ->
+ Cid = {<<"duplicate_enqueue_test">>, self()},
+ {State1, [{monitor, _, _} | _]} = check_n(Cid, 5, 5, test_init(test)),
+ {State2, Effects2} = enq(2, 1, first, State1),
+ ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2),
+ {_State3, Effects3} = enq(3, 1, first, State2),
+ ?assertNoEffect({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects3),
+ ok.
+
+return_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+ {State0, _} = enq(1, 1, msg, test_init(test)),
+ {State1, _} = check_auto(Cid, 2, State0),
+ {State2, _} = check_auto(Cid2, 3, State1),
+ {State3, _, _} = apply(meta(4), rabbit_fifo_v0:make_return(Cid, [0]), State2),
+ ?assertMatch(#{Cid := #consumer{checked_out = C}} when map_size(C) == 0,
+ State3#?STATE.consumers),
+ ?assertMatch(#{Cid2 := #consumer{checked_out = C2}} when map_size(C2) == 1,
+ State3#?STATE.consumers),
+ ok.
+
+return_dequeue_delivery_limit_test(_) ->
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, _} = enq(1, 1, msg, Init),
+
+ Cid = {<<"cid">>, self()},
+ Cid2 = {<<"cid2">>, self()},
+
+ {State1, {MsgId1, _}} = deq(2, Cid, unsettled, State0),
+ {State2, _, _} = apply(meta(4), rabbit_fifo_v0:make_return(Cid, [MsgId1]),
+ State1),
+
+ {State3, {MsgId2, _}} = deq(2, Cid2, unsettled, State2),
+ {State4, _, _} = apply(meta(4), rabbit_fifo_v0:make_return(Cid2, [MsgId2]),
+ State3),
+ ?assertMatch(#{num_messages := 0}, rabbit_fifo_v0:overview(State4)),
+ ok.
+
+return_non_existent_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _Inactive]} = enq(1, 1, second, test_init(test)),
+ % return non-existent
+ {_State2, _} = apply(meta(3), rabbit_fifo_v0:make_return(Cid, [99]), State0),
+ ok.
+
+return_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {_, ok, [{send_msg, _, {delivery, _, [{_, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo_v0:make_return(Cid, [MsgId]), State1),
+ ok.
+
+return_checked_out_limit_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Init = init(#{name => test,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(test, utf8)),
+ release_cursor_interval => 0,
+ delivery_limit => 1}),
+ {State0, [_, _]} = enq(1, 1, first, Init),
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active} | _ ]} = check_auto(Cid, 2, State0),
+ % returning immediately checks out the same message again
+ {State2, ok, [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _},
+ {aux, active}]} =
+ apply(meta(3), rabbit_fifo_v0:make_return(Cid, [MsgId]), State1),
+ {#?STATE{ra_indexes = RaIdxs}, ok, [_ReleaseEff]} =
+ apply(meta(4), rabbit_fifo_v0:make_return(Cid, [MsgId2]), State2),
+ ?assertEqual(0, rabbit_fifo_index:size(RaIdxs)),
+ ok.
+
+return_auto_checked_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ % it first active then inactive as the consumer took on but cannot take
+ % any more
+ {State1, [_Monitor,
+ {send_msg, _, {delivery, _, [{MsgId, _}]}, _},
+ {aux, active},
+ {aux, inactive}
+ ]} = check_auto(Cid, 2, State0),
+ % return should include another delivery
+ {_State2, _, Effects} = apply(meta(3), rabbit_fifo_v0:make_return(Cid, [MsgId]), State1),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{_, {#{delivery_count := 1}, first}}]}, _},
+ Effects),
+ ok.
+
+cancelled_checkout_out_test(_) ->
+ Cid = {<<"cid">>, self()},
+ {State00, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State0, [_]} = enq(2, 2, second, State00),
+ {State1, _} = check_auto(Cid, 2, State0),
+ % cancelled checkout should not return pending messages to queue
+ {State2, _, _} = apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, cancel, #{}), State1),
+ ?assertEqual(1, maps:size(State2#?STATE.messages)),
+ ?assertEqual(0, lqueue:len(State2#?STATE.returns)),
+
+ {State3, {dequeue, empty}} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State2),
+ %% settle
+ {State4, ok, _} =
+ apply(meta(4), rabbit_fifo_v0:make_settle(Cid, [0]), State3),
+
+ {_State, {dequeue, {_, {_, second}}, _}, _} =
+ apply(meta(5), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State4),
+ ok.
+
+down_with_noproc_consumer_returns_unsettled_test(_) ->
+ Cid = {<<"down_consumer_returns_unsettled_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, second, test_init(test)),
+ {State1, [{monitor, process, Pid} | _]} = check(Cid, 2, State0),
+ {State2, _, _} = apply(meta(3), {down, Pid, noproc}, State1),
+ {_State, Effects} = check(Cid, 4, State2),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+down_with_noconnection_marks_suspect_and_node_is_monitored_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ Self = self(),
+ Node = node(Pid),
+ {State0, Effects0} = enq(1, 1, second, test_init(test)),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects0),
+ {State1, Effects1} = check_auto(Cid, 2, State0),
+ #consumer{credit = 0} = maps:get(Cid, State1#?STATE.consumers),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects1),
+ % monitor both enqueuer and consumer
+ % because we received a noconnection we now need to monitor the node
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ #consumer{credit = 1,
+ checked_out = Ch,
+ status = suspected_down} = maps:get(Cid, State2a#?STATE.consumers),
+ ?assertEqual(#{}, Ch),
+ %% validate consumer has credit
+ {State2, _, Effects2} = apply(meta(3), {down, Self, noconnection}, State2a),
+ ?ASSERT_EFF({monitor, node, _}, Effects2),
+ ?assertNoEffect({demonitor, process, _}, Effects2),
+ % when the node comes up we need to retry the process monitors for the
+ % disconnected processes
+ {State3, _, Effects3} = apply(meta(3), {nodeup, Node}, State2),
+ #consumer{status = up} = maps:get(Cid, State3#?STATE.consumers),
+ % try to re-monitor the suspect processes
+ ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects3),
+ ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects3),
+ ok.
+
+down_with_noconnection_returns_unack_test(_) ->
+ Pid = spawn(fun() -> ok end),
+ Cid = {<<"down_with_noconnect">>, Pid},
+ {State0, _} = enq(1, 1, second, test_init(test)),
+ ?assertEqual(1, maps:size(State0#?STATE.messages)),
+ ?assertEqual(0, lqueue:len(State0#?STATE.returns)),
+ {State1, {_, _}} = deq(2, Cid, unsettled, State0),
+ ?assertEqual(0, maps:size(State1#?STATE.messages)),
+ ?assertEqual(0, lqueue:len(State1#?STATE.returns)),
+ {State2a, _, _} = apply(meta(3), {down, Pid, noconnection}, State1),
+ ?assertEqual(0, maps:size(State2a#?STATE.messages)),
+ ?assertEqual(1, lqueue:len(State2a#?STATE.returns)),
+ ?assertMatch(#consumer{checked_out = Ch,
+ status = suspected_down}
+ when map_size(Ch) == 0,
+ maps:get(Cid, State2a#?STATE.consumers)),
+ ok.
+
+down_with_noproc_enqueuer_is_cleaned_up_test(_) ->
+ State00 = test_init(test),
+ Pid = spawn(fun() -> ok end),
+ {State0, _, Effects0} = apply(meta(1), rabbit_fifo_v0:make_enqueue(Pid, 1, first), State00),
+ ?ASSERT_EFF({monitor, process, _}, Effects0),
+ {State1, _, _} = apply(meta(3), {down, Pid, noproc}, State0),
+ % ensure there are no enqueuers
+ ?assert(0 =:= maps:size(State1#?STATE.enqueuers)),
+ ok.
+
+discarded_message_without_dead_letter_handler_is_removed_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ {State0, [_, _]} = enq(1, 1, first, test_init(test)),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1),
+ rabbit_fifo_v0:make_discard(Cid, [0]), State1),
+ ?assertNoEffect({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects2),
+ ok.
+
+discarded_message_with_dead_letter_handler_emits_log_effect_test(_) ->
+ Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()},
+ State00 = init(#{name => test,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ dead_letter_handler =>
+ {somemod, somefun, [somearg]}}),
+ {State0, [_, _]} = enq(1, 1, first, State00),
+ {State1, Effects1} = check_n(Cid, 2, 10, State0),
+ ?ASSERT_EFF({send_msg, _,
+ {delivery, _, [{0, {_, first}}]}, _},
+ Effects1),
+ {_State2, _, Effects2} = apply(meta(1), rabbit_fifo_v0:make_discard(Cid, [0]), State1),
+ % assert mod call effect with appended reason and message
+ ?ASSERT_EFF({log, _RaftIdxs, _}, Effects2),
+ ok.
+
+tick_test(_) ->
+ Cid = {<<"c">>, self()},
+ Cid2 = {<<"c2">>, self()},
+ {S0, _} = enq(1, 1, <<"fst">>, test_init(?FUNCTION_NAME)),
+ {S1, _} = enq(2, 2, <<"snd">>, S0),
+ {S2, {MsgId, _}} = deq(3, Cid, unsettled, S1),
+ {S3, {_, _}} = deq(4, Cid2, unsettled, S2),
+ {S4, _, _} = apply(meta(5), rabbit_fifo_v0:make_return(Cid, [MsgId]), S3),
+
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{},
+ {?FUNCTION_NAME, 1, 1, 2, 1, 3, 3},
+ [_Node]
+ ]}] = rabbit_fifo_v0:tick(1, S4),
+ ok.
+
+
+delivery_query_returns_deliveries_test(_) ->
+ Tag = atom_to_binary(?FUNCTION_NAME, utf8),
+ Cid = {Tag, self()},
+ Commands = [
+ rabbit_fifo_v0:make_checkout(Cid, {auto, 5, simple_prefetch}, #{}),
+ rabbit_fifo_v0:make_enqueue(self(), 1, one),
+ rabbit_fifo_v0:make_enqueue(self(), 2, two),
+ rabbit_fifo_v0:make_enqueue(self(), 3, tre),
+ rabbit_fifo_v0:make_enqueue(self(), 4, for)
+ ],
+ Indexes = lists:seq(1, length(Commands)),
+ Entries = lists:zip(Indexes, Commands),
+ {State, _Effects} = run_log(test_init(help), Entries),
+ % 3 deliveries are returned
+ [{0, {_, one}}] = rabbit_fifo_v0:get_checked_out(Cid, 0, 0, State),
+ [_, _, _] = rabbit_fifo_v0:get_checked_out(Cid, 1, 3, State),
+ ok.
+
+pending_enqueue_is_enqueued_on_down_test(_) ->
+ Cid = {<<"cid">>, self()},
+ Pid = self(),
+ {State0, _} = enq(1, 2, first, test_init(test)),
+ {State1, _, _} = apply(meta(2), {down, Pid, noproc}, State0),
+ {_State2, {dequeue, {0, {_, first}}, 0}, _} =
+ apply(meta(3), rabbit_fifo_v0:make_checkout(Cid, {dequeue, settled}, #{}), State1),
+ ok.
+
+duplicate_delivery_test(_) ->
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ {#?STATE{ra_indexes = RaIdxs,
+ messages = Messages}, _} = enq(2, 1, first, State0),
+ ?assertEqual(1, rabbit_fifo_index:size(RaIdxs)),
+ ?assertEqual(1, maps:size(Messages)),
+ ok.
+
+state_enter_file_handle_leader_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>),
+ become_leader_handler => {m, f, [a]}}),
+
+ Resource = {resource, <<"/">>, queue, <<"test">>},
+ Effects = rabbit_fifo_v0:state_enter(leader, S0),
+ ?assertEqual([
+ {mod_call, m, f, [a, the_name]},
+ {mod_call, rabbit_quorum_queue, file_handle_leader_reservation, [Resource]}
+ ], Effects),
+ ok.
+
+state_enter_file_handle_other_reservation_test(_) ->
+ S0 = init(#{name => the_name,
+ queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ Effects = rabbit_fifo_v0:state_enter(other, S0),
+ ?assertEqual([
+ {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}
+ ],
+ Effects),
+ ok.
+
+state_enter_monitors_and_notifications_test(_) ->
+ Oth = spawn(fun () -> ok end),
+ {State0, _} = enq(1, 1, first, test_init(test)),
+ Cid = {<<"adf">>, self()},
+ OthCid = {<<"oth">>, Oth},
+ {State1, _} = check(Cid, 2, State0),
+ {State, _} = check(OthCid, 3, State1),
+ Self = self(),
+ Effects = rabbit_fifo_v0:state_enter(leader, State),
+
+ %% monitor all enqueuers and consumers
+ [{monitor, process, Self},
+ {monitor, process, Oth}] =
+ lists:filter(fun ({monitor, process, _}) -> true;
+ (_) -> false
+ end, Effects),
+ [{send_msg, Self, leader_change, ra_event},
+ {send_msg, Oth, leader_change, ra_event}] =
+ lists:filter(fun ({send_msg, _, leader_change, ra_event}) -> true;
+ (_) -> false
+ end, Effects),
+ ?ASSERT_EFF({monitor, process, _}, Effects),
+ ok.
+
+purge_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State1, _} = enq(1, 1, first, test_init(test)),
+ {State2, {purge, 1}, _} = apply(meta(2), rabbit_fifo_v0:make_purge(), State1),
+ {State3, _} = enq(3, 2, second, State2),
+ % get returns a reply value
+ {_State4, {dequeue, {0, {_, second}}, _}, [{monitor, _, _}]} =
+ apply(meta(4), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}), State3),
+ ok.
+
+purge_with_checkout_test(_) ->
+ Cid = {<<"purge_test">>, self()},
+ {State0, _} = check_auto(Cid, 1, test_init(?FUNCTION_NAME)),
+ {State1, _} = enq(2, 1, <<"first">>, State0),
+ {State2, _} = enq(3, 2, <<"second">>, State1),
+ %% assert message bytes are non zero
+ ?assert(State2#?STATE.msg_bytes_checkout > 0),
+ ?assert(State2#?STATE.msg_bytes_enqueue > 0),
+ {State3, {purge, 1}, _} = apply(meta(2), rabbit_fifo_v0:make_purge(), State2),
+ ?assert(State2#?STATE.msg_bytes_checkout > 0),
+ ?assertEqual(0, State3#?STATE.msg_bytes_enqueue),
+ ?assertEqual(1, rabbit_fifo_index:size(State3#?STATE.ra_indexes)),
+ #consumer{checked_out = Checked} = maps:get(Cid, State3#?STATE.consumers),
+ ?assertEqual(1, maps:size(Checked)),
+ ok.
+
+down_noproc_returns_checked_out_in_order_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ %% enqueue 100
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, 100)),
+ ?assertEqual(100, maps:size(S1#?STATE.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#?STATE.consumers),
+ ?assertEqual(100, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noproc}, S2),
+ Returns = lqueue:to_list(S#?STATE.returns),
+ ?assertEqual(100, length(Returns)),
+ ?assertEqual(0, maps:size(S#?STATE.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+down_noconnection_returns_checked_out_test(_) ->
+ S0 = test_init(?FUNCTION_NAME),
+ NumMsgs = 20,
+ S1 = lists:foldl(fun (Num, FS0) ->
+ {FS, _} = enq(Num, Num, Num, FS0),
+ FS
+ end, S0, lists:seq(1, NumMsgs)),
+ ?assertEqual(NumMsgs, maps:size(S1#?STATE.messages)),
+ Cid = {<<"cid">>, self()},
+ {S2, _} = check(Cid, 101, 1000, S1),
+ #consumer{checked_out = Checked} = maps:get(Cid, S2#?STATE.consumers),
+ ?assertEqual(NumMsgs, maps:size(Checked)),
+ %% simulate down
+ {S, _, _} = apply(meta(102), {down, self(), noconnection}, S2),
+ Returns = lqueue:to_list(S#?STATE.returns),
+ ?assertEqual(NumMsgs, length(Returns)),
+ ?assertMatch(#consumer{checked_out = Ch}
+ when map_size(Ch) == 0,
+ maps:get(Cid, S#?STATE.consumers)),
+ %% validate returns are in order
+ ?assertEqual(lists:sort(Returns), Returns),
+ ok.
+
+single_active_consumer_basic_get_test(_) ->
+ Cid = {?FUNCTION_NAME, self()},
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#?STATE.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#?STATE.consumers)),
+ {State1, _} = enq(1, 1, first, State0),
+ {_State, {error, unsupported}} =
+ apply(meta(2), rabbit_fifo_v0:make_checkout(Cid, {dequeue, unsettled}, #{}),
+ State1),
+ ok.
+
+single_active_consumer_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ ?assertEqual(single_active, State0#?STATE.cfg#cfg.consumer_strategy),
+ ?assertEqual(0, map_size(State0#?STATE.consumers)),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ meta(1),
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ C3 = {<<"ctag3">>, self()},
+ C4 = {<<"ctag4">>, self()},
+
+ % the first registered consumer is the active one, the others are waiting
+ ?assertEqual(1, map_size(State1#?STATE.consumers)),
+ ?assertMatch(#{C1 := _}, State1#?STATE.consumers),
+ ?assertEqual(3, length(State1#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State1#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C3, 1, State1#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State1#?STATE.waiting_consumers)),
+
+ % cancelling a waiting consumer
+ {State2, _, Effects1} = apply(meta(2),
+ make_checkout(C3, cancel, #{}),
+ State1),
+ % the active consumer should still be in place
+ ?assertEqual(1, map_size(State2#?STATE.consumers)),
+ ?assertMatch(#{C1 := _}, State2#?STATE.consumers),
+ % the cancelled consumer has been removed from waiting consumers
+ ?assertEqual(2, length(State2#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C2, 1, State2#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1, State2#?STATE.waiting_consumers)),
+ % there are some effects to unregister the consumer
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects1),
+
+ % cancelling the active consumer
+ {State3, _, Effects2} = apply(meta(3),
+ make_checkout(C1, cancel, #{}),
+ State2),
+ % the second registered consumer is now the active one
+ ?assertEqual(1, map_size(State3#?STATE.consumers)),
+ ?assertMatch(#{C2 := _}, State3#?STATE.consumers),
+ % the new active consumer is no longer in the waiting list
+ ?assertEqual(1, length(State3#?STATE.waiting_consumers)),
+ ?assertNotEqual(false, lists:keyfind(C4, 1,
+ State3#?STATE.waiting_consumers)),
+ %% should have a cancel consumer handler mod_call effect and
+ %% an active new consumer effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % cancelling the active consumer
+ {State4, _, Effects3} = apply(meta(4),
+ make_checkout(C2, cancel, #{}),
+ State3),
+ % the last waiting consumer became the active one
+ ?assertEqual(1, map_size(State4#?STATE.consumers)),
+ ?assertMatch(#{C4 := _}, State4#?STATE.consumers),
+ % the waiting consumer list is now empty
+ ?assertEqual(0, length(State4#?STATE.waiting_consumers)),
+ % there are some effects to unregister the consumer and
+ % to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects3),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects3),
+
+ % cancelling the last consumer
+ {State5, _, Effects4} = apply(meta(5),
+ make_checkout(C4, cancel, #{}),
+ State4),
+ % no active consumer anymore
+ ?assertEqual(0, map_size(State5#?STATE.consumers)),
+ % still nothing in the waiting list
+ ?assertEqual(0, length(State5#?STATE.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, _}, Effects4),
+
+ ok.
+
+single_active_consumer_cancel_consumer_when_channel_is_down_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ [C1, C2, C3, C4] = Consumers =
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}],
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, Consumers),
+
+ % the channel of the active consumer goes down
+ {State2, _, Effects} = apply(#{index => 2}, {down, Pid1, noproc}, State1),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State2#?STATE.consumers)),
+ % there are still waiting consumers
+ ?assertEqual(2, length(State2#?STATE.waiting_consumers)),
+ % effects to unregister the consumer and
+ % to update the new active one (metrics) are there
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C1, Effects),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects),
+
+ % the channel of the active consumer and a waiting consumer goes down
+ {State3, _, Effects2} = apply(#{index => 3}, {down, Pid2, noproc}, State2),
+ % fell back to another consumer
+ ?assertEqual(1, map_size(State3#?STATE.consumers)),
+ % no more waiting consumer
+ ?assertEqual(0, length(State3#?STATE.waiting_consumers)),
+ % effects to cancel both consumers of this channel + effect to update the new active one (metrics)
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C2, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C3, Effects2),
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ update_consumer_handler, _}, Effects2),
+
+ % the last channel goes down
+ {State4, _, Effects3} = apply(#{index => 4}, {down, Pid3, doesnotmatter}, State3),
+ % no more consumers
+ ?assertEqual(0, map_size(State4#?STATE.consumers)),
+ ?assertEqual(0, length(State4#?STATE.waiting_consumers)),
+ % there is an effect to unregister the consumer + queue inactive effect
+ ?ASSERT_EFF({mod_call, rabbit_quorum_queue,
+ cancel_consumer_handler, [_, C]}, C == C4, Effects3),
+
+ ok.
+
+single_active_returns_messages_on_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1],
+ ConsumerIds = [{_, DownPid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+ {State2, _} = enq(4, 1, msg1, State1),
+ % simulate node goes down
+ {State3, _, _} = apply(meta(5), {down, DownPid, noconnection}, State2),
+ %% assert the consumer is up
+ ?assertMatch([_], lqueue:to_list(State3#?STATE.returns)),
+ ?assertMatch([{_, #consumer{checked_out = Checked}}]
+ when map_size(Checked) == 0,
+ State3#?STATE.waiting_consumers),
+
+ ok.
+
+single_active_consumer_replaces_consumer_when_down_noconnection_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2, node()],
+ ConsumerIds = [C1 = {_, DownPid}, C2, _C3] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1a = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}},
+ State1a#?STATE.consumers),
+
+ {State1, _} = enq(10, 1, msg, State1a),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, DownPid, noconnection}, State1),
+
+ %% assert a new consumer is in place and it is up
+ ?assertMatch([{C2, #consumer{status = up,
+ checked_out = Ch}}]
+ when map_size(Ch) == 1,
+ maps:to_list(State2#?STATE.consumers)),
+
+ %% the disconnected consumer has been returned to waiting
+ ?assert(lists:any(fun ({C,_}) -> C =:= C1 end,
+ State2#?STATE.waiting_consumers)),
+ ?assertEqual(2, length(State2#?STATE.waiting_consumers)),
+
+ % simulate node comes back up
+ {State3, _, _} = apply(#{index => 2}, {nodeup, node(DownPid)}, State2),
+
+ %% the consumer is still active and the same as before
+ ?assertMatch([{C2, #consumer{status = up}}],
+ maps:to_list(State3#?STATE.consumers)),
+ % the waiting consumers should be un-suspected
+ ?assertEqual(2, length(State3#?STATE.waiting_consumers)),
+ lists:foreach(fun({_, #consumer{status = Status}}) ->
+ ?assert(Status /= suspected_down)
+ end, State3#?STATE.waiting_consumers),
+ ok.
+
+single_active_consumer_all_disconnected_test(_) ->
+ R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => R,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ Nodes = [n1, n2],
+ ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] =
+ [begin
+ B = atom_to_binary(N, utf8),
+ {<<"ctag_", B/binary>>,
+ test_util:fake_pid(N)}
+ end || N <- Nodes],
+ % adding some consumers
+ State1 = lists:foldl(
+ fun(CId, Acc0) ->
+ {Acc, _, _} =
+ apply(Meta,
+ make_checkout(CId,
+ {once, 1, simple_prefetch}, #{}),
+ Acc0),
+ Acc
+ end, State0, ConsumerIds),
+
+ %% assert the consumer is up
+ ?assertMatch(#{C1 := #consumer{status = up}}, State1#?STATE.consumers),
+
+ % simulate node goes down
+ {State2, _, _} = apply(meta(5), {down, C1Pid, noconnection}, State1),
+ %% assert the consumer fails over to the consumer on n2
+ ?assertMatch(#{C2 := #consumer{status = up}}, State2#?STATE.consumers),
+ {State3, _, _} = apply(meta(6), {down, C2Pid, noconnection}, State2),
+ %% assert these no active consumer after both nodes are maked as down
+ ?assertMatch([], maps:to_list(State3#?STATE.consumers)),
+ %% n2 comes back
+ {State4, _, _} = apply(meta(7), {nodeup, node(C2Pid)}, State3),
+ %% ensure n2 is the active consumer as this node as been registered
+ %% as up again
+ ?assertMatch([{{<<"ctag_n2">>, _}, #consumer{status = up,
+ credit = 1}}],
+ maps:to_list(State4#?STATE.consumers)),
+ ok.
+
+single_active_consumer_state_enter_leader_include_waiting_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource =>
+ rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo_v0:state_enter(leader, State1),
+ %% 2 effects for each consumer process (channel process), 1 effect for the node,
+ %% 1 effect for file handle reservation
+ ?assertEqual(2 * 3 + 1 + 1, length(Effects)).
+
+single_active_consumer_state_enter_eol_include_waiting_consumers_test(_) ->
+ Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)),
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => Resource,
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ Effects = rabbit_fifo_v0:state_enter(eol, State1),
+ %% 1 effect for each consumer process (channel process),
+ %% 1 effect for file handle reservation
+ ?assertEqual(4, length(Effects)).
+
+query_consumers_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+ Consumers0 = State1#?STATE.consumers,
+ Consumer = maps:get({<<"ctag2">>, self()}, Consumers0),
+ Consumers1 = maps:put({<<"ctag2">>, self()},
+ Consumer#consumer{status = suspected_down}, Consumers0),
+ State2 = State1#?STATE{consumers = Consumers1},
+
+ ?assertEqual(4, rabbit_fifo_v0:query_consumer_count(State2)),
+ Consumers2 = rabbit_fifo_v0:query_consumers(State2),
+ ?assertEqual(4, maps:size(Consumers2)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag2">> ->
+ ?assertNot(Active),
+ ?assertEqual(suspected_down, ActivityStatus);
+ _ ->
+ ?assert(Active),
+ ?assertEqual(up, ActivityStatus)
+ end
+ end, [], Consumers2).
+
+query_consumers_when_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+ Meta = #{index => 1},
+ % adding some consumers
+ AddConsumer = fun(CTag, State) ->
+ {NewState, _, _} = apply(
+ Meta,
+ make_checkout({CTag, self()},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]),
+
+ ?assertEqual(4, rabbit_fifo_v0:query_consumer_count(State1)),
+ Consumers = rabbit_fifo_v0:query_consumers(State1),
+ ?assertEqual(4, maps:size(Consumers)),
+ maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ <<"ctag1">> ->
+ ?assert(Active),
+ ?assertEqual(single_active, ActivityStatus);
+ _ ->
+ ?assertNot(Active),
+ ?assertEqual(waiting, ActivityStatus)
+ end
+ end, [], Consumers).
+
+active_flag_updated_when_consumer_suspected_unsuspected_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => false}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} =
+ apply(
+ #{index => 1},
+ rabbit_fifo_v0:make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch},
+ #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(#{index => 3}, {down, Pid1, noconnection}, State1),
+ % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node
+ ?assertEqual(4 + 1, length(Effects2)),
+
+ {_, _, Effects3} = apply(#{index => 4}, {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID
+ ?assertEqual(4 + 4, length(Effects3)).
+
+active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ DummyFunction = fun() -> ok end,
+ Pid1 = spawn(DummyFunction),
+ Pid2 = spawn(DummyFunction),
+ Pid3 = spawn(DummyFunction),
+
+ % adding some consumers
+ AddConsumer = fun({CTag, ChannelId}, State) ->
+ {NewState, _, _} = apply(
+ #{index => 1},
+ make_checkout({CTag, ChannelId},
+ {once, 1, simple_prefetch}, #{}),
+ State),
+ NewState
+ end,
+ State1 = lists:foldl(AddConsumer, State0,
+ [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2},
+ {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]),
+
+ {State2, _, Effects2} = apply(#{index => 2}, {down, Pid1, noconnection}, State1),
+ % one monitor and one consumer status update (deactivated)
+ ?assertEqual(3, length(Effects2)),
+
+ {_, _, Effects3} = apply(#{index => 3}, {nodeup, node(self())}, State2),
+ % for each consumer: 1 effect to monitor the consumer PID
+ ?assertEqual(5, length(Effects3)).
+
+single_active_cancelled_with_unacked_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 1, simple_prefetch},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% enqueue 2 messages
+ {State2, _Effects2} = enq(3, 1, msg1, State1),
+ {State3, _Effects3} = enq(4, 2, msg2, State2),
+ %% one should be checked ou to C1
+ %% cancel C1
+ {State4, _, _} = apply(meta(5),
+ make_checkout(C1, cancel, #{}),
+ State3),
+ %% C2 should be the active consumer
+ ?assertMatch(#{C2 := #consumer{status = up,
+ checked_out = #{0 := _}}},
+ State4#?STATE.consumers),
+ %% C1 should be a cancelled consumer
+ ?assertMatch(#{C1 := #consumer{status = cancelled,
+ lifetime = once,
+ checked_out = #{0 := _}}},
+ State4#?STATE.consumers),
+ ?assertMatch([], State4#?STATE.waiting_consumers),
+
+ %% Ack both messages
+ {State5, _Effects5} = settle(C1, 1, 0, State4),
+ %% C1 should now be cancelled
+ {State6, _Effects6} = settle(C2, 2, 0, State5),
+
+ %% C2 should remain
+ ?assertMatch(#{C2 := #consumer{status = up}},
+ State6#?STATE.consumers),
+ %% C1 should be gone
+ ?assertNotMatch(#{C1 := _},
+ State6#?STATE.consumers),
+ ?assertMatch([], State6#?STATE.waiting_consumers),
+ ok.
+
+single_active_with_credited_test(_) ->
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ release_cursor_interval => 0,
+ single_active_consumer_on => true}),
+
+ C1 = {<<"ctag1">>, self()},
+ C2 = {<<"ctag2">>, self()},
+ % adding some consumers
+ AddConsumer = fun(C, S0) ->
+ {S, _, _} = apply(
+ meta(1),
+ make_checkout(C,
+ {auto, 0, credited},
+ #{}),
+ S0),
+ S
+ end,
+ State1 = lists:foldl(AddConsumer, State0, [C1, C2]),
+
+ %% add some credit
+ C1Cred = rabbit_fifo_v0:make_credit(C1, 5, 0, false),
+ {State2, _, _Effects2} = apply(meta(3), C1Cred, State1),
+ C2Cred = rabbit_fifo_v0:make_credit(C2, 4, 0, false),
+ {State3, _} = apply(meta(4), C2Cred, State2),
+ %% both consumers should have credit
+ ?assertMatch(#{C1 := #consumer{credit = 5}},
+ State3#?STATE.consumers),
+ ?assertMatch([{C2, #consumer{credit = 4}}],
+ State3#?STATE.waiting_consumers),
+ ok.
+
+purge_nodes_test(_) ->
+ Node = purged@node,
+ ThisNode = node(),
+ EnqPid = test_util:fake_pid(Node),
+ EnqPid2 = test_util:fake_pid(node()),
+ ConPid = test_util:fake_pid(Node),
+ Cid = {<<"tag">>, ConPid},
+ % WaitingPid = test_util:fake_pid(Node),
+
+ State0 = init(#{name => ?FUNCTION_NAME,
+ queue_resource => rabbit_misc:r("/", queue,
+ atom_to_binary(?FUNCTION_NAME, utf8)),
+ single_active_consumer_on => false}),
+ {State1, _, _} = apply(meta(1),
+ rabbit_fifo_v0:make_enqueue(EnqPid, 1, msg1),
+ State0),
+ {State2, _, _} = apply(meta(2),
+ rabbit_fifo_v0:make_enqueue(EnqPid2, 1, msg2),
+ State1),
+ {State3, _} = check(Cid, 3, 1000, State2),
+ {State4, _, _} = apply(meta(4),
+ {down, EnqPid, noconnection},
+ State3),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode, Node]
+ ]}] , rabbit_fifo_v0:tick(1, State4)),
+ %% assert there are both enqueuers and consumers
+ {State, _, _} = apply(meta(5),
+ rabbit_fifo_v0:make_purge_nodes([Node]),
+ State4),
+
+ %% assert there are no enqueuers nor consumers
+ ?assertMatch(#?STATE{enqueuers = Enqs} when map_size(Enqs) == 1, State),
+ ?assertMatch(#?STATE{consumers = Cons} when map_size(Cons) == 0, State),
+ ?assertMatch(
+ [{mod_call, rabbit_quorum_queue, handle_tick,
+ [#resource{}, _Metrics,
+ [ThisNode]
+ ]}] , rabbit_fifo_v0:tick(1, State)),
+ ok.
+
+meta(Idx) ->
+ #{index => Idx, term => 1,
+ from => {make_ref(), self()}}.
+
+enq(Idx, MsgSeq, Msg, State) ->
+ strip_reply(
+ apply(meta(Idx), rabbit_fifo_v0:make_enqueue(self(), MsgSeq, Msg), State)).
+
+deq(Idx, Cid, Settlement, State0) ->
+ {State, {dequeue, {MsgId, Msg}, _}, _} =
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {dequeue, Settlement}, #{}),
+ State0),
+ {State, {MsgId, Msg}}.
+
+check_n(Cid, Idx, N, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {auto, N, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {once, 1, simple_prefetch}, #{}),
+ State)).
+
+check_auto(Cid, Idx, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}),
+ State)).
+
+check(Cid, Idx, Num, State) ->
+ strip_reply(
+ apply(meta(Idx),
+ rabbit_fifo_v0:make_checkout(Cid, {auto, Num, simple_prefetch}, #{}),
+ State)).
+
+settle(Cid, Idx, MsgId, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo_v0:make_settle(Cid, [MsgId]), State)).
+
+credit(Cid, Idx, Credit, DelCnt, Drain, State) ->
+ strip_reply(apply(meta(Idx), rabbit_fifo_v0:make_credit(Cid, Credit, DelCnt, Drain),
+ State)).
+
+strip_reply({State, _, Effects}) ->
+ {State, Effects}.
+
+run_log(InitState, Entries) ->
+ lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) ->
+ case apply(meta(Idx), E, Acc0) of
+ {Acc, _, Efx} when is_list(Efx) ->
+ {Acc, Efx0 ++ Efx};
+ {Acc, _, Efx} ->
+ {Acc, Efx0 ++ [Efx]};
+ {Acc, _} ->
+ {Acc, Efx0}
+ end
+ end, {InitState, []}, Entries).
+
+
+%% AUX Tests
+
+aux_test(_) ->
+ _ = ra_machine_ets:start_link(),
+ Aux0 = init_aux(aux_test),
+ MacState = init(#{name => aux_test,
+ queue_resource =>
+ rabbit_misc:r(<<"/">>, queue, <<"test">>)}),
+ ok = meck:new(ra_log, []),
+ Log = mock_log,
+ meck:expect(ra_log, last_index_term, fun (_) -> {0, 0} end),
+ {no_reply, Aux, mock_log} = handle_aux(leader, cast, active, Aux0,
+ Log, MacState),
+ {no_reply, _Aux, mock_log} = handle_aux(leader, cast, tick, Aux,
+ Log, MacState),
+ [X] = ets:lookup(rabbit_fifo_usage, aux_test),
+ meck:unload(),
+ ?assert(X > 0.0),
+ ok.
+
+%% Utility
+
+init(Conf) -> rabbit_fifo_v0:init(Conf).
+apply(Meta, Entry, State) -> rabbit_fifo_v0:apply(Meta, Entry, State).
+init_aux(Conf) -> rabbit_fifo_v0:init_aux(Conf).
+handle_aux(S, T, C, A, L, M) -> rabbit_fifo_v0:handle_aux(S, T, C, A, L, M).
+make_checkout(C, S, M) -> rabbit_fifo_v0:make_checkout(C, S, M).
diff --git a/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl b/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl
new file mode 100644
index 0000000000..937558aba8
--- /dev/null
+++ b/deps/rabbit/test/rabbit_foo_protocol_connection_info.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_foo_protocol_connection_info).
+
+%% Dummy module to test authentication context propagation
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, _Pid, Infos) ->
+ case proplists:get_value(variable_map, Infos, undefined) of
+ VariableMap when is_map(VariableMap) ->
+ case maps:get(<<"key1">>, VariableMap, []) of
+ Value when is_binary(Value)->
+ [{key1, Value}];
+ [] ->
+ []
+ end;
+ _ ->
+ []
+ end.
diff --git a/deps/rabbit/test/rabbit_ha_test_consumer.erl b/deps/rabbit/test/rabbit_ha_test_consumer.erl
new file mode 100644
index 0000000000..2467e40028
--- /dev/null
+++ b/deps/rabbit/test/rabbit_ha_test_consumer.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_ha_test_consumer).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([await_response/1, create/5, start/6]).
+
+await_response(ConsumerPid) ->
+ case receive {ConsumerPid, Response} -> Response end of
+ {error, Reason} -> erlang:error(Reason);
+ ok -> ok
+ end.
+
+create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) ->
+ ConsumerPid = spawn_link(?MODULE, start,
+ [TestPid, Channel, Queue, CancelOnFailover,
+ ExpectingMsgs + 1, ExpectingMsgs]),
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), ConsumerPid),
+ ConsumerPid.
+
+start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ error_logger:info_msg("consumer ~p on ~p awaiting ~w messages "
+ "(lowest seen = ~w, cancel-on-failover = ~w)~n",
+ [self(), Channel, MsgsToConsume, LowestSeen,
+ CancelOnFailover]),
+ run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) ->
+ consumer_reply(TestPid, ok);
+run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ receive
+ #'basic.consume_ok'{} ->
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ {Delivery = #'basic.deliver'{ redelivered = Redelivered },
+ #amqp_msg{payload = Payload}} ->
+ MsgNum = list_to_integer(binary_to_list(Payload)),
+
+ ack(Delivery, Channel),
+
+ %% we can receive any message we've already seen and,
+ %% because of the possibility of multiple requeuings, we
+ %% might see these messages in any order. If we are seeing
+ %% a message again, we don't decrement the MsgsToConsume
+ %% counter.
+ if
+ MsgNum + 1 == LowestSeen ->
+ error_logger:info_msg("recording ~w left ~w",
+ [MsgNum, MsgsToConsume]),
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, MsgNum, MsgsToConsume - 1);
+ MsgNum >= LowestSeen ->
+ error_logger:info_msg(
+ "consumer ~p on ~p ignoring redelivered msg ~p"
+ "lowest seen ~w~n",
+ [self(), Channel, MsgNum, LowestSeen]),
+ true = Redelivered, %% ASSERTION
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ true ->
+ %% We received a message we haven't seen before,
+ %% but it is not the next message in the expected
+ %% sequence.
+ consumer_reply(TestPid,
+ {error, {unexpected_message, MsgNum}})
+ end;
+ #'basic.cancel'{} when CancelOnFailover ->
+ error_logger:info_msg("consumer ~p on ~p received basic.cancel: "
+ "resubscribing to ~p on ~p~n",
+ [self(), Channel, Queue, Channel]),
+ resubscribe(TestPid, Channel, Queue, CancelOnFailover,
+ LowestSeen, MsgsToConsume);
+ #'basic.cancel'{} ->
+ exit(cancel_received_without_cancel_on_failover)
+ end.
+
+%%
+%% Private API
+%%
+
+resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen,
+ MsgsToConsume) ->
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), self()),
+ ok = receive #'basic.consume_ok'{} -> ok
+ end,
+ error_logger:info_msg("re-subscripting consumer ~p on ~p complete "
+ "(received basic.consume_ok)",
+ [self(), Channel]),
+ start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+consume_method(Queue, CancelOnFailover) ->
+ Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}],
+ #'basic.consume'{queue = Queue,
+ arguments = Args}.
+
+ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) ->
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ ok.
+
+consumer_reply(TestPid, Reply) ->
+ TestPid ! {self(), Reply}.
diff --git a/deps/rabbit/test/rabbit_ha_test_producer.erl b/deps/rabbit/test/rabbit_ha_test_producer.erl
new file mode 100644
index 0000000000..ed6969debe
--- /dev/null
+++ b/deps/rabbit/test/rabbit_ha_test_producer.erl
@@ -0,0 +1,131 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_ha_test_producer).
+
+-export([await_response/1, start/6, create/5, create/6]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+await_response(ProducerPid) ->
+ error_logger:info_msg("waiting for producer pid ~p~n", [ProducerPid]),
+ case receive {ProducerPid, Response} -> Response end of
+ ok -> ok;
+ {error, _} = Else -> exit(Else);
+ Else -> exit({weird_response, Else})
+ end.
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+ create(Channel, Queue, TestPid, Confirm, MsgsToSend, acks).
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend, Mode) ->
+ AckNackMsgs = case Mode of
+ acks -> {ok, {error, received_nacks}};
+ nacks -> {{error, received_acks}, ok}
+ end,
+ ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid,
+ Confirm, MsgsToSend, AckNackMsgs]),
+ receive
+ {ProducerPid, started} -> ProducerPid
+ end.
+
+start(Channel, Queue, TestPid, Confirm, MsgsToSend, AckNackMsgs) ->
+ ConfirmState =
+ case Confirm of
+ true -> amqp_channel:register_confirm_handler(Channel, self()),
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ gb_trees:empty();
+ false -> none
+ end,
+ TestPid ! {self(), started},
+ error_logger:info_msg("publishing ~w msgs on ~p~n", [MsgsToSend, Channel]),
+ producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs).
+
+%%
+%% Private API
+%%
+
+producer(_Channel, _Queue, TestPid, none, 0, _AckNackMsgs) ->
+ TestPid ! {self(), ok};
+producer(Channel, _Queue, TestPid, ConfirmState, 0, {AckMsg, NackMsg}) ->
+ error_logger:info_msg("awaiting confirms on channel ~p~n", [Channel]),
+ Msg = case drain_confirms(none, ConfirmState) of
+ %% No acks or nacks
+ acks -> AckMsg;
+ nacks -> NackMsg;
+ mix -> {error, received_both_acks_and_nacks};
+ {Nacks, CS} -> {error, {missing_confirms, Nacks,
+ lists:sort(gb_trees:keys(CS))}}
+ end,
+ TestPid ! {self(), Msg};
+
+producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend, AckNackMsgs) ->
+ Method = #'basic.publish'{exchange = <<"">>,
+ routing_key = Queue,
+ mandatory = false,
+ immediate = false},
+
+ ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend),
+
+ amqp_channel:call(Channel, Method,
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = list_to_binary(
+ integer_to_list(MsgsToSend))}),
+
+ producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1, AckNackMsgs).
+
+maybe_record_confirm(none, _, _) ->
+ none;
+maybe_record_confirm(ConfirmState, Channel, MsgsToSend) ->
+ SeqNo = amqp_channel:next_publish_seqno(Channel),
+ gb_trees:insert(SeqNo, MsgsToSend, ConfirmState).
+
+drain_confirms(Collected, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> Collected;
+ false -> receive
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ Collected1 = case Collected of
+ none -> acks;
+ acks -> acks;
+ nacks -> mix;
+ mix -> mix
+ end,
+ drain_confirms(Collected1,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState));
+ #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ Collected1 = case Collected of
+ none -> nacks;
+ nacks -> nacks;
+ acks -> mix;
+ mix -> mix
+ end,
+ drain_confirms(Collected1,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState))
+ after
+ 60000 -> {Collected, ConfirmState}
+ end
+ end.
+
+delete_confirms(DeliveryTag, false, ConfirmState) ->
+ gb_trees:delete(DeliveryTag, ConfirmState);
+delete_confirms(DeliveryTag, true, ConfirmState) ->
+ multi_confirm(DeliveryTag, ConfirmState).
+
+multi_confirm(DeliveryTag, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> ConfirmState;
+ false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState),
+ case Key =< DeliveryTag of
+ true -> multi_confirm(DeliveryTag, ConfirmState1);
+ false -> ConfirmState
+ end
+ end.
diff --git a/deps/rabbit/test/rabbit_msg_record_SUITE.erl b/deps/rabbit/test/rabbit_msg_record_SUITE.erl
new file mode 100644
index 0000000000..a82ba7481d
--- /dev/null
+++ b/deps/rabbit/test/rabbit_msg_record_SUITE.erl
@@ -0,0 +1,213 @@
+-module(rabbit_msg_record_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ ampq091_roundtrip,
+ message_id_ulong,
+ message_id_uuid,
+ message_id_binary,
+ message_id_large_binary,
+ message_id_large_string
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+ampq091_roundtrip(_Config) ->
+ Props = #'P_basic'{content_type = <<"text/plain">>,
+ content_encoding = <<"gzip">>,
+ headers = [{<<"x-stream-offset">>, long, 99},
+ {<<"x-string">>, longstr, <<"a string">>},
+ {<<"x-bool">>, bool, false},
+ {<<"x-unsignedbyte">>, unsignedbyte, 1},
+ {<<"x-unsignedshort">>, unsignedshort, 1},
+ {<<"x-unsignedint">>, unsignedint, 1},
+ {<<"x-signedint">>, signedint, 1},
+ {<<"x-timestamp">>, timestamp, 1},
+ {<<"x-double">>, double, 1.0},
+ {<<"x-float">>, float, 1.0},
+ {<<"x-binary">>, binary, <<"data">>}
+ ],
+ delivery_mode = 2,
+ priority = 99,
+ correlation_id = <<"corr">> ,
+ reply_to = <<"reply-to">>,
+ expiration = <<"1">>,
+ message_id = <<"msg-id">>,
+ timestamp = 99,
+ type = <<"45">>,
+ user_id = <<"banana">>,
+ app_id = <<"rmq">>
+ % cluster_id = <<"adf">>
+ },
+ Payload = [<<"data">>],
+ test_amqp091_roundtrip(Props, Payload),
+ test_amqp091_roundtrip(#'P_basic'{}, Payload),
+ ok.
+
+message_id_ulong(_Config) ->
+ Num = 9876789,
+ ULong = erlang:integer_to_binary(Num),
+ P = #'v1_0.properties'{message_id = {ulong, Num},
+ correlation_id = {ulong, Num}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = ULong,
+ correlation_id = ULong,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id-type">>, longstr, <<"ulong">>},
+ {<<"x-message-id-type">>, longstr, <<"ulong">>}
+ ]},
+ Props),
+ ok.
+
+message_id_uuid(_Config) ->
+ %% fake a uuid
+ UUId = erlang:md5(term_to_binary(make_ref())),
+ TextUUId = rabbit_data_coercion:to_binary(rabbit_guid:to_string(UUId)),
+ P = #'v1_0.properties'{message_id = {uuid, UUId},
+ correlation_id = {uuid, UUId}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = TextUUId,
+ correlation_id = TextUUId,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id-type">>, longstr, <<"uuid">>},
+ {<<"x-message-id-type">>, longstr, <<"uuid">>}
+ ]},
+ Props),
+ ok.
+
+message_id_binary(_Config) ->
+ %% fake a uuid
+ Orig = <<"asdfasdf">>,
+ Text = base64:encode(Orig),
+ P = #'v1_0.properties'{message_id = {binary, Orig},
+ correlation_id = {binary, Orig}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = Text,
+ correlation_id = Text,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id-type">>, longstr, <<"binary">>},
+ {<<"x-message-id-type">>, longstr, <<"binary">>}
+ ]},
+ Props),
+ ok.
+
+message_id_large_binary(_Config) ->
+ %% cannot fit in a shortstr
+ Orig = crypto:strong_rand_bytes(500),
+ P = #'v1_0.properties'{message_id = {binary, Orig},
+ correlation_id = {binary, Orig}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = undefined,
+ correlation_id = undefined,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id">>, longstr, Orig},
+ {<<"x-message-id">>, longstr, Orig}
+ ]},
+ Props),
+ ok.
+
+message_id_large_string(_Config) ->
+ %% cannot fit in a shortstr
+ Orig = base64:encode(crypto:strong_rand_bytes(500)),
+ P = #'v1_0.properties'{message_id = {utf8, Orig},
+ correlation_id = {utf8, Orig}},
+ D = #'v1_0.data'{content = <<"data">>},
+ Bin = [amqp10_framing:encode_bin(P),
+ amqp10_framing:encode_bin(D)],
+ R = rabbit_msg_record:init(iolist_to_binary(Bin)),
+ {Props, _} = rabbit_msg_record:to_amqp091(R),
+ ?assertMatch(#'P_basic'{message_id = undefined,
+ correlation_id = undefined,
+ headers =
+ [
+ %% ordering shouldn't matter
+ {<<"x-correlation-id">>, longstr, Orig},
+ {<<"x-message-id">>, longstr, Orig}
+ ]},
+ Props),
+ ok.
+
+%% Utility
+
+test_amqp091_roundtrip(Props, Payload) ->
+ MsgRecord0 = rabbit_msg_record:from_amqp091(Props, Payload),
+ MsgRecord = rabbit_msg_record:init(
+ iolist_to_binary(rabbit_msg_record:to_iodata(MsgRecord0))),
+ % meck:unload(),
+ {PropsOut, PayloadOut} = rabbit_msg_record:to_amqp091(MsgRecord),
+ ?assertEqual(Props, PropsOut),
+ ?assertEqual(iolist_to_binary(Payload),
+ iolist_to_binary(PayloadOut)),
+ ok.
+
+
diff --git a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl
new file mode 100644
index 0000000000..a1055458db
--- /dev/null
+++ b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl
@@ -0,0 +1,1610 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_queue_SUITE).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+suite() ->
+ [{timetrap, 5 * 60000}].
+
+all() ->
+ [
+ {group, single_node},
+ {group, cluster_size_2},
+ {group, cluster_size_3},
+ {group, unclustered_size_3_1},
+ {group, unclustered_size_3_2},
+ {group, unclustered_size_3_3},
+ {group, cluster_size_3_1}
+ ].
+
+groups() ->
+ [
+ {single_node, [], [restart_single_node] ++ all_tests()},
+ {cluster_size_2, [], all_tests()},
+ {cluster_size_3, [], all_tests() ++
+ [delete_replica,
+ delete_down_replica,
+ delete_classic_replica,
+ delete_quorum_replica,
+ consume_from_replica,
+ leader_failover,
+ initial_cluster_size_one,
+ initial_cluster_size_two,
+ initial_cluster_size_one_policy,
+ leader_locator_client_local,
+ leader_locator_random,
+ leader_locator_least_leaders,
+ leader_locator_policy]},
+ {unclustered_size_3_1, [], [add_replica]},
+ {unclustered_size_3_2, [], [consume_without_local_replica]},
+ {unclustered_size_3_3, [], [grow_coordinator_cluster]},
+ {cluster_size_3_1, [], [shrink_coordinator_cluster]}
+ ].
+
+all_tests() ->
+ [
+ declare_args,
+ declare_max_age,
+ declare_invalid_properties,
+ declare_server_named,
+ declare_queue,
+ delete_queue,
+ publish,
+ publish_confirm,
+ recover,
+ consume_without_qos,
+ consume,
+ consume_offset,
+ consume_timestamp_offset,
+ consume_timestamp_last_offset,
+ basic_get,
+ consume_with_autoack,
+ consume_and_nack,
+ consume_and_ack,
+ consume_and_reject,
+ consume_from_last,
+ consume_from_next,
+ consume_from_default,
+ consume_credit,
+ consume_credit_out_of_order_ack,
+ consume_credit_multiple_ack,
+ basic_cancel,
+ max_length_bytes,
+ max_age,
+ invalid_policy,
+ max_age_policy,
+ max_segment_size_policy,
+ purge
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config0, {rabbit, [{stream_tick_interval, 1000},
+ {log, [{file, [{level, debug}]}]}]}),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ ClusterSize = case Group of
+ single_node -> 1;
+ cluster_size_2 -> 2;
+ cluster_size_3 -> 3;
+ cluster_size_3_1 -> 3;
+ unclustered_size_3_1 -> 3;
+ unclustered_size_3_2 -> 3;
+ unclustered_size_3_3 -> 3
+ end,
+ Clustered = case Group of
+ unclustered_size_3_1 -> false;
+ unclustered_size_3_2 -> false;
+ unclustered_size_3_3 -> false;
+ _ -> true
+ end,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base},
+ {rmq_nodes_clustered, Clustered}]),
+ Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]),
+ Ret = rabbit_ct_helpers:run_steps(Config1b,
+ [fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config2 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config2, stream_queue),
+ case EnableFF of
+ ok ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, application, set_env,
+ [rabbit, channel_tick_interval, 100]),
+ Config2;
+ Skip ->
+ end_per_group(Group, Config2),
+ Skip
+ end
+ end.
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Q = rabbit_data_coercion:to_binary(Testcase),
+ Config2 = rabbit_ct_helpers:set_config(Config1, [{queue_name, Q}]),
+ rabbit_ct_helpers:run_steps(Config2, rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [{core_metrics_gc_interval, 100}]}).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-length">>, long, 2000}])),
+ assert_queue_type(Server, Q, rabbit_stream_queue).
+
+declare_max_age(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(rabbit_ct_client_helpers:open_channel(Config, Server), Q,
+ [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-age">>, longstr, <<"1A">>}])),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-age">>, longstr, <<"1Y">>}])),
+ assert_queue_type(Server, Q, rabbit_stream_queue).
+
+declare_invalid_properties(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Q = ?config(queue_name, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = Q,
+ auto_delete = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = Q,
+ exclusive = true,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:call(
+ rabbit_ct_client_helpers:open_channel(Config, Server),
+ #'queue.declare'{queue = Q,
+ durable = false,
+ arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]})).
+
+declare_server_named(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 406, _}}, _},
+ declare(rabbit_ct_client_helpers:open_channel(Config, Server),
+ <<"">>, [{<<"x-queue-type">>, longstr, <<"stream">>}])).
+
+declare_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ %% Test declare an existing queue
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertMatch([_], rpc:call(Server, supervisor, which_children,
+ [osiris_server_sup])),
+
+ %% Test declare an existing queue with different arguments
+ ?assertExit(_, declare(Ch, Q, [])).
+
+delete_queue(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})).
+
+add_replica(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+
+ %% Let's also try the add replica command on other queue types, it should fail
+ %% We're doing it in the same test for efficiency, otherwise we have to
+ %% start new rabbitmq clusters every time for a minor testcase
+ QClassic = <<Q/binary, "_classic">>,
+ QQuorum = <<Q/binary, "_quorum">>,
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ?assertEqual({'queue.declare_ok', QClassic, 0, 0},
+ declare(Ch, QClassic, [{<<"x-queue-type">>, longstr, <<"classic">>}])),
+ ?assertEqual({'queue.declare_ok', QQuorum, 0, 0},
+ declare(Ch, QQuorum, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server1])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QClassic, Server1])),
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QQuorum, Server1])),
+
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+ timer:sleep(1000),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QClassic, Server1])),
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, QQuorum, Server1])),
+ ?assertEqual(ok,
+ rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server1])),
+ %% replicas must be recorded on the state, and if we publish messages then they must
+ %% be stored on disk
+ check_leader_and_replicas(Config, Q, Server0, [Server1]),
+ %% And if we try again? Idempotent
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server1])),
+ %% Add another node
+ ok = rabbit_control_helper:command(stop_app, Server2),
+ ok = rabbit_control_helper:command(join_cluster, Server2, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server2),
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, add_replica,
+ [<<"/">>, Q, Server2])),
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]).
+
+delete_replica(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]),
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, 'zen@rabbit'])),
+ ?assertEqual(ok,
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])),
+ %% check it's gone
+ check_leader_and_replicas(Config, Q, Server0, [Server2]),
+ %% And if we try again? Idempotent
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])),
+ %% Delete the last replica
+ ?assertEqual(ok, rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server2])),
+ check_leader_and_replicas(Config, Q, Server0, []).
+
+grow_coordinator_cluster(Config) ->
+ [Server0, Server1, _Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ case rpc:call(Server0, ra, members, [{rabbit_stream_coordinator, Server0}]) of
+ {_, Members, _} ->
+ Nodes = lists:sort([N || {_, N} <- Members]),
+ lists:sort([Server0, Server1]) == Nodes;
+ _ ->
+ false
+ end
+ end, 60000).
+
+shrink_coordinator_cluster(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ok = rabbit_control_helper:command(stop_app, Server2),
+ ok = rabbit_control_helper:command(forget_cluster_node, Server0, [atom_to_list(Server2)], []),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ case rpc:call(Server0, ra, members, [{rabbit_stream_coordinator, Server0}]) of
+ {_, Members, _} ->
+ Nodes = lists:sort([N || {_, N} <- Members]),
+ lists:sort([Server0, Server1]) == Nodes;
+ _ ->
+ false
+ end
+ end, 60000).
+
+delete_classic_replica(Config) ->
+ [Server0, Server1, _Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"classic">>}])),
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, 'zen@rabbit'])),
+ ?assertEqual({error, classic_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])).
+
+delete_quorum_replica(Config) ->
+ [Server0, Server1, _Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"quorum">>}])),
+ %% Not a member of the cluster, what would happen?
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, 'zen@rabbit'])),
+ ?assertEqual({error, quorum_queue_not_supported},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])).
+
+delete_down_replica(Config) ->
+ [Server0, Server1, Server2] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ ?assertEqual({error, node_not_running},
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])),
+ %% check it isn't gone
+ check_leader_and_replicas(Config, Q, Server0, [Server1, Server2]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1),
+ ?assertEqual(ok,
+ rpc:call(Server0, rabbit_stream_queue, delete_replica,
+ [<<"/">>, Q, Server1])).
+
+publish(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ publish(Ch, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]).
+
+publish_confirm(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]).
+
+restart_single_node(Config) ->
+ [Server] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ publish(Ch, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+
+ rabbit_control_helper:command(stop_app, Server),
+ rabbit_control_helper:command(start_app, Server),
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ publish(Ch1, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]).
+
+recover(Config) ->
+ [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ publish(Ch, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+
+ [rabbit_ct_broker_helpers:stop_node(Config, S) || S <- Servers],
+ [rabbit_ct_broker_helpers:start_node(Config, S) || S <- lists:reverse(Servers)],
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]]),
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ publish(Ch1, Q),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"2">>, <<"0">>]]).
+
+consume_without_qos(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertExit({{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, consumer_tag = <<"ctag">>},
+ self())).
+
+consume_without_local_replica(Config) ->
+ [Server0, Server1 | _] =
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server0),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ %% Add another node to the cluster, but it won't have a replica
+ ok = rabbit_control_helper:command(stop_app, Server1),
+ ok = rabbit_control_helper:command(join_cluster, Server1, [atom_to_list(Server0)], []),
+ rabbit_control_helper:command(start_app, Server1),
+ timer:sleep(1000),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ qos(Ch1, 10, false),
+ ?assertExit({{shutdown, {server_initiated_close, 406, _}}, _},
+ amqp_channel:subscribe(Ch1, #'basic.consume'{queue = Q, consumer_tag = <<"ctag">>},
+ self())).
+
+consume(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ _ = amqp_channel:call(Ch1, #'basic.cancel'{consumer_tag = <<"ctag">>}),
+ ok = amqp_channel:close(Ch1),
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_offset(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ Payload = << <<"1">> || _ <- lists:seq(1, 500) >>,
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 1000)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ run_proper(
+ fun () ->
+ ?FORALL(Offset, range(0, 999),
+ begin
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, Offset),
+ receive_batch(Ch1, Offset, 999),
+ receive
+ {_,
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, S}]}}}
+ when S < Offset ->
+ exit({unexpected_offset, S})
+ after 1000 ->
+ ok
+ end,
+ amqp_channel:call(Ch1, #'basic.cancel'{consumer_tag = <<"ctag">>}),
+ true
+ end)
+ end, [], 25).
+
+consume_timestamp_offset(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ Payload = <<"111">>,
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ Offset = erlang:system_time(millisecond) - 600000,
+ amqp_channel:subscribe(
+ Ch1,
+ #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, timestamp, Offset}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ %% It has subscribed to a very old timestamp, so we will receive the whole stream
+ receive_batch(Ch1, 0, 99).
+
+consume_timestamp_last_offset(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, <<"111">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ %% Subscribe from now/future
+ Offset = erlang:system_time(millisecond) + 60000,
+ amqp_channel:subscribe(
+ Ch1,
+ #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, timestamp, Offset}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ receive
+ {_,
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, S}]}}}
+ when S < 100 ->
+ exit({unexpected_offset, S})
+ after 1000 ->
+ ok
+ end,
+
+ %% Publish a few more
+ [publish(Ch, Q, <<"msg2">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% Yeah! we got them
+ receive_batch(Ch1, 100, 199).
+
+basic_get(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'basic.get'{queue = Q})).
+
+consume_with_autoack(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ ?assertExit(
+ {{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ subscribe(Ch1, Q, true, 0)).
+
+consume_and_nack(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = false,
+ requeue = true}),
+ %% Nack will throw a not implemented exception. As it is a cast operation,
+ %% we'll detect the conneciton/channel closure on the next call.
+ %% Let's try to redeclare and see what happens
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}]))
+ after 10000 ->
+ exit(timeout)
+ end.
+
+basic_cancel(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ 1 == length(rabbit_ct_broker_helpers:rpc(Config, Server, ets, tab2list,
+ [consumer_created]))
+ end, 30000),
+ receive
+ {#'basic.deliver'{}, _} ->
+ amqp_channel:call(Ch1, #'basic.cancel'{consumer_tag = <<"ctag">>}),
+ ?assertMatch([], rabbit_ct_broker_helpers:rpc(Config, Server, ets, tab2list, [consumer_created]))
+ after 10000 ->
+ exit(timeout)
+ end.
+
+consume_and_reject(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = true}),
+ %% Reject will throw a not implemented exception. As it is a cast operation,
+ %% we'll detect the conneciton/channel closure on the next call.
+ %% Let's try to redeclare and see what happens
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}]))
+ after 10000 ->
+ exit(timeout)
+ end.
+
+consume_and_ack(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ publish(Ch, Q),
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+ subscribe(Ch1, Q, false, 0),
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ %% It will succeed as ack is now a credit operation. We should be
+ %% able to redeclare a queue (gen_server call op) as the channel
+ %% should still be open and declare is an idempotent operation
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"1">>, <<"1">>, <<"0">>]])
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_from_last(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [committed_offset]]),
+
+ %% We'll receive data from the last committed offset, let's check that is not the
+ %% first offset
+ CommittedOffset = proplists:get_value(committed_offset, Info),
+ ?assert(CommittedOffset > 0),
+
+ %% If the offset is not provided, we're subscribing to the tail of the stream
+ amqp_channel:subscribe(
+ Ch1, #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, longstr, <<"last">>}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ %% And receive the messages from the last committed offset to the end of the stream
+ receive_batch(Ch1, CommittedOffset, 99),
+
+ %% Publish a few more
+ [publish(Ch, Q, <<"msg2">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% Yeah! we got them
+ receive_batch(Ch1, 100, 199).
+
+consume_from_next(Config) ->
+ consume_from_next(Config, [{<<"x-stream-offset">>, longstr, <<"next">>}]).
+
+consume_from_default(Config) ->
+ consume_from_next(Config, []).
+
+consume_from_next(Config, Args) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 10, false),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [committed_offset]]),
+
+ %% We'll receive data from the last committed offset, let's check that is not the
+ %% first offset
+ CommittedOffset = proplists:get_value(committed_offset, Info),
+ ?assert(CommittedOffset > 0),
+
+ %% If the offset is not provided, we're subscribing to the tail of the stream
+ amqp_channel:subscribe(
+ Ch1, #'basic.consume'{queue = Q,
+ no_ack = false,
+ consumer_tag = <<"ctag">>,
+ arguments = Args},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end,
+
+ %% Publish a few more
+ [publish(Ch, Q, <<"msg2">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% Yeah! we got them
+ receive_batch(Ch1, 100, 199).
+
+consume_from_replica(Config) ->
+ [Server1, Server2 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch1, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch1, self()),
+ [publish(Ch1, Q, <<"msg1">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch1, 5),
+
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2),
+ qos(Ch2, 10, false),
+
+ subscribe(Ch2, Q, false, 0),
+ receive_batch(Ch2, 0, 99).
+
+consume_credit(Config) ->
+ %% Because osiris provides one chunk on every read and we don't want to buffer
+ %% messages in the broker to avoid memory penalties, the credit value won't
+ %% be strict - we allow it into the negative values.
+ %% We can test that after receiving a chunk, no more messages are delivered until
+ %% the credit goes back to a positive value.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ %% Let's publish a big batch, to ensure we have more than a chunk available
+ NumMsgs = 100,
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, NumMsgs)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% Let's subscribe with a small credit, easier to test
+ Credit = 2,
+ qos(Ch1, Credit, false),
+ subscribe(Ch1, Q, false, 0),
+
+ %% Receive everything
+ DeliveryTags = receive_batch(),
+
+ %% We receive at least the given credit as we know there are 100 messages in the queue
+ ?assert(length(DeliveryTags) >= Credit),
+
+ %% Let's ack as many messages as we can while avoiding a positive credit for new deliveries
+ {ToAck, Pending} = lists:split(length(DeliveryTags) - Credit, DeliveryTags),
+
+ [ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ || DeliveryTag <- ToAck],
+
+ %% Nothing here, this is good
+ receive
+ {#'basic.deliver'{}, _} ->
+ exit(unexpected_delivery)
+ after 1000 ->
+ ok
+ end,
+
+ %% Let's ack one more, we should receive a new chunk
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = hd(Pending),
+ multiple = false}),
+
+ %% Yeah, here is the new chunk!
+ receive
+ {#'basic.deliver'{}, _} ->
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_credit_out_of_order_ack(Config) ->
+ %% Like consume_credit but acknowledging the messages out of order.
+ %% We want to ensure it doesn't behave like multiple, that is if we have
+ %% credit 2 and received 10 messages, sending the ack for the message id
+ %% number 10 should only increase credit by 1.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ %% Let's publish a big batch, to ensure we have more than a chunk available
+ NumMsgs = 100,
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, NumMsgs)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% Let's subscribe with a small credit, easier to test
+ Credit = 2,
+ qos(Ch1, Credit, false),
+ subscribe(Ch1, Q, false, 0),
+
+ %% ******* This is the difference with consume_credit
+ %% Receive everything, let's reverse the delivery tags here so we ack out of order
+ DeliveryTags = lists:reverse(receive_batch()),
+
+ %% We receive at least the given credit as we know there are 100 messages in the queue
+ ?assert(length(DeliveryTags) >= Credit),
+
+ %% Let's ack as many messages as we can while avoiding a positive credit for new deliveries
+ {ToAck, Pending} = lists:split(length(DeliveryTags) - Credit, DeliveryTags),
+
+ [ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ || DeliveryTag <- ToAck],
+
+ %% Nothing here, this is good
+ receive
+ {#'basic.deliver'{}, _} ->
+ exit(unexpected_delivery)
+ after 1000 ->
+ ok
+ end,
+
+ %% Let's ack one more, we should receive a new chunk
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = hd(Pending),
+ multiple = false}),
+
+ %% Yeah, here is the new chunk!
+ receive
+ {#'basic.deliver'{}, _} ->
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+consume_credit_multiple_ack(Config) ->
+ %% Like consume_credit but acknowledging the messages out of order.
+ %% We want to ensure it doesn't behave like multiple, that is if we have
+ %% credit 2 and received 10 messages, sending the ack for the message id
+ %% number 10 should only increase credit by 1.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ %% Let's publish a big batch, to ensure we have more than a chunk available
+ NumMsgs = 100,
+ [publish(Ch, Q, <<"msg1">>) || _ <- lists:seq(1, NumMsgs)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ %% Let's subscribe with a small credit, easier to test
+ Credit = 2,
+ qos(Ch1, Credit, false),
+ subscribe(Ch1, Q, false, 0),
+
+ %% ******* This is the difference with consume_credit
+ %% Receive everything, let's reverse the delivery tags here so we ack out of order
+ DeliveryTag = lists:last(receive_batch()),
+
+ ok = amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = true}),
+
+ %% Yeah, here is the new chunk!
+ receive
+ {#'basic.deliver'{}, _} ->
+ ok
+ after 5000 ->
+ exit(timeout)
+ end.
+
+max_length_bytes(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-length-bytes">>, long, 500},
+ {<<"x-max-segment-size">>, long, 250}])),
+
+ Payload = << <<"1">> || _ <- lists:seq(1, 500) >>,
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ %% We don't yet have reliable metrics, as the committed offset doesn't work
+ %% as a counter once we start applying retention policies.
+ %% Let's wait for messages and hope these are less than the number of published ones
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 100, false),
+ subscribe(Ch1, Q, false, 0),
+
+ ?assert(length(receive_batch()) < 100).
+
+max_age(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-max-age">>, longstr, <<"10s">>},
+ {<<"x-max-segment-size">>, long, 250}])),
+
+ Payload = << <<"1">> || _ <- lists:seq(1, 500) >>,
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ timer:sleep(10000),
+
+ %% Let's publish again so the new segments will trigger the retention policy
+ [publish(Ch, Q, Payload) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch, 5),
+
+ timer:sleep(5000),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server),
+ qos(Ch1, 200, false),
+ subscribe(Ch1, Q, false, 0),
+ ?assertEqual(100, length(receive_batch())).
+
+leader_failover(Config) ->
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ #'confirm.select_ok'{} = amqp_channel:call(Ch1, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Ch1, self()),
+ [publish(Ch1, Q, <<"msg">>) || _ <- lists:seq(1, 100)],
+ amqp_channel:wait_for_confirms(Ch1, 5),
+
+ check_leader_and_replicas(Config, Q, Server1, [Server2, Server3]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server1),
+ timer:sleep(30000),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ QName = rabbit_misc:r(<<"/">>, queue, Q),
+ lists:member({name, QName}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 1, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader, members]])),
+ NewLeader = proplists:get_value(leader, Info),
+ ?assert(NewLeader =/= Server1),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Server1).
+
+initial_cluster_size_one(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-initial-cluster-size">>, long, 1}])),
+ check_leader_and_replicas(Config, Q, Server1, []),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})).
+
+initial_cluster_size_two(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-initial-cluster-size">>, long, 2}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader, members]])),
+ ?assertEqual(Server1, proplists:get_value(leader, Info)),
+ ?assertEqual(1, length(proplists:get_value(members, Info))),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})).
+
+initial_cluster_size_one_policy(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"cluster-size">>, <<"initial_cluster_size_one_policy">>, <<"queues">>,
+ [{<<"initial-cluster-size">>, 1}]),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-initial-cluster-size">>, long, 1}])),
+ check_leader_and_replicas(Config, Q, Server1, []),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"cluster-size">>).
+
+leader_locator_client_local(Config) ->
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ ?assertEqual(Server1, proplists:get_value(leader, Info)),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ %% Try second node
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server2),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch2, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ [Info2] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ ?assertEqual(Server2, proplists:get_value(leader, Info2)),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch2, #'queue.delete'{queue = Q})),
+
+ %% Try third node
+ Ch3 = rabbit_ct_client_helpers:open_channel(Config, Server3),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch3, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ [Info3] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ ?assertEqual(Server3, proplists:get_value(leader, Info3)),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch3, #'queue.delete'{queue = Q})).
+
+leader_locator_random(Config) ->
+ [Server1 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"random">>}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader = proplists:get_value(leader, Info),
+
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ repeat_until(
+ fun() ->
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"random">>}])),
+
+ [Info2] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader2 = proplists:get_value(leader, Info2),
+
+ Leader =/= Leader2
+ end, 10).
+
+leader_locator_least_leaders(Config) ->
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Q = ?config(queue_name, Config),
+
+ Q1 = <<"q1">>,
+ Q2 = <<"q2">>,
+ ?assertEqual({'queue.declare_ok', Q1, 0, 0},
+ declare(Ch, Q1, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+ ?assertEqual({'queue.declare_ok', Q2, 0, 0},
+ declare(Ch, Q2, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>},
+ {<<"x-queue-leader-locator">>, longstr, <<"least-leaders">>}])),
+
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader = proplists:get_value(leader, Info),
+
+ ?assert(lists:member(Leader, [Server2, Server3])).
+
+leader_locator_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"leader-locator">>, <<"leader_locator_.*">>, <<"queues">>,
+ [{<<"queue-leader-locator">>, <<"random">>}]),
+
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition,
+ name, leader]]),
+
+ ?assertEqual(<<"leader-locator">>, proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([{<<"queue-leader-locator">>, <<"random">>}],
+ proplists:get_value(effective_policy_definition, Info)),
+
+ Leader = proplists:get_value(leader, Info),
+
+ repeat_until(
+ fun() ->
+ ?assertMatch(#'queue.delete_ok'{},
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q})),
+
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ [Info2] = lists:filter(
+ fun(Props) ->
+ lists:member({name, rabbit_misc:r(<<"/">>, queue, Q)}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader]])),
+ Leader2 = proplists:get_value(leader, Info2),
+ Leader =/= Leader2
+ end, 10),
+
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"leader-locator">>).
+
+repeat_until(_, 0) ->
+ ct:fail("Condition did not materialize in the expected amount of attempts");
+repeat_until(Fun, N) ->
+ case Fun() of
+ true -> ok;
+ false -> repeat_until(Fun, N - 1)
+ end.
+
+invalid_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ha">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ttl">>, <<"invalid_policy.*">>, <<"queues">>,
+ [{<<"message-ttl">>, 5}]),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition]]),
+
+ ?assertEqual('', proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([], proplists:get_value(effective_policy_definition, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ttl">>).
+
+max_age_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"age">>, <<"max_age_policy.*">>, <<"queues">>,
+ [{<<"max-age">>, <<"1Y">>}]),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition]]),
+
+ ?assertEqual(<<"age">>, proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([{<<"max-age">>, <<"1Y">>}],
+ proplists:get_value(effective_policy_definition, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"age">>).
+
+max_segment_size_policy(Config) ->
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"segment">>, <<"max_segment_size.*">>, <<"queues">>,
+ [{<<"max-segment-size">>, 5000}]),
+
+ [Info] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [policy, operator_policy,
+ effective_policy_definition]]),
+
+ ?assertEqual(<<"segment">>, proplists:get_value(policy, Info)),
+ ?assertEqual('', proplists:get_value(operator_policy, Info)),
+ ?assertEqual([{<<"max-segment-size">>, 5000}],
+ proplists:get_value(effective_policy_definition, Info)),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"segment">>).
+
+purge(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Q = ?config(queue_name, Config),
+ ?assertEqual({'queue.declare_ok', Q, 0, 0},
+ declare(Ch, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 540, _}}}, _},
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q})).
+
+%%----------------------------------------------------------------------------
+
+delete_queues() ->
+ [{ok, _} = rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+assert_queue_type(Server, Q, Expected) ->
+ Actual = get_queue_type(Server, Q),
+ Expected = Actual.
+
+get_queue_type(Server, Q0) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q0),
+ {ok, Q1} = rpc:call(Server, rabbit_amqqueue, lookup, [QNameRes]),
+ amqqueue:get_type(Q1).
+
+check_leader_and_replicas(Config, Name, Leader, Replicas0) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Name),
+ [Info] = lists:filter(
+ fun(Props) ->
+ lists:member({name, QNameRes}, Props)
+ end,
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue,
+ info_all, [<<"/">>, [name, leader, members]])),
+ ?assertEqual(Leader, proplists:get_value(leader, Info)),
+ Replicas = lists:sort(Replicas0),
+ ?assertEqual(Replicas, lists:sort(proplists:get_value(members, Info))).
+
+publish(Ch, Queue) ->
+ publish(Ch, Queue, <<"msg">>).
+
+publish(Ch, Queue, Msg) ->
+ ok = amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Msg}).
+
+subscribe(Ch, Queue, NoAck, Offset) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ consumer_tag = <<"ctag">>,
+ arguments = [{<<"x-stream-offset">>, long, Offset}]},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+qos(Ch, Prefetch, Global) ->
+ ?assertMatch(#'basic.qos_ok'{},
+ amqp_channel:call(Ch, #'basic.qos'{global = Global,
+ prefetch_count = Prefetch})).
+
+receive_batch(Ch, N, N) ->
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, N}]}}} ->
+ ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false})
+ after 60000 ->
+ exit({missing_offset, N})
+ end;
+receive_batch(Ch, N, M) ->
+ receive
+ {_,
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, S}]}}}
+ when S < N ->
+ exit({unexpected_offset, S});
+ {#'basic.deliver'{delivery_tag = DeliveryTag},
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-stream-offset">>, long, N}]}}} ->
+ ok = amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false}),
+ receive_batch(Ch, N + 1, M)
+ after 60000 ->
+ exit({missing_offset, N})
+ end.
+
+receive_batch() ->
+ receive_batch([]).
+
+receive_batch(Acc) ->
+ receive
+ {#'basic.deliver'{delivery_tag = DeliveryTag}, _} ->
+ receive_batch([DeliveryTag | Acc])
+ after 5000 ->
+ lists:reverse(Acc)
+ end.
+
+run_proper(Fun, Args, NumTests) ->
+ ?assertEqual(
+ true,
+ proper:counterexample(
+ erlang:apply(Fun, Args),
+ [{numtests, NumTests},
+ {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines
+ (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A)
+ end}])).
diff --git a/deps/rabbit/test/rabbitmq-env.bats b/deps/rabbit/test/rabbitmq-env.bats
new file mode 100644
index 0000000000..4a016960c5
--- /dev/null
+++ b/deps/rabbit/test/rabbitmq-env.bats
@@ -0,0 +1,128 @@
+#!/usr/bin/env bats
+
+export RABBITMQ_SCRIPTS_DIR="$BATS_TEST_DIRNAME/../scripts"
+
+setup() {
+ export RABBITMQ_CONF_ENV_FILE="$BATS_TMPDIR/rabbitmq-env.$BATS_TEST_NAME.conf"
+}
+
+@test "default Erlang scheduler bind type" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+ echo $RABBITMQ_SCHEDULER_BIND_TYPE
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt db ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt db "* ]]
+}
+
+@test "can configure Erlang scheduler bind type via conf file" {
+ echo 'SCHEDULER_BIND_TYPE=u' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt u ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt u "* ]]
+}
+
+@test "can configure Erlang scheduler bind type via env" {
+ RABBITMQ_SCHEDULER_BIND_TYPE=tnnps source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt tnnps ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt tnnps "* ]]
+}
+
+@test "Erlang scheduler bind type env takes precedence over conf file" {
+ echo 'SCHEDULER_BIND_TYPE=s' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_SCHEDULER_BIND_TYPE=nnps source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +stbt nnps ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +stbt nnps "* ]]
+}
+
+@test "default Erlang distribution buffer size" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 128000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 128000 "* ]]
+}
+
+@test "can configure Erlang distribution buffer size via conf file" {
+ echo 'DISTRIBUTION_BUFFER_SIZE=123123' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 123123 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 123123 "* ]]
+}
+
+@test "can configure Erlang distribution buffer size via env" {
+ RABBITMQ_DISTRIBUTION_BUFFER_SIZE=2000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 2000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 2000000 "* ]]
+}
+
+@test "Erlang distribution buffer size env takes precedence over conf file" {
+ echo 'DISTRIBUTION_BUFFER_SIZE=3000000' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_DISTRIBUTION_BUFFER_SIZE=4000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +zdbbl 4000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +zdbbl 4000000 "* ]]
+}
+
+@test "default Erlang maximum number of processes" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 1048576 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 1048576 "* ]]
+}
+
+@test "can configure Erlang maximum number of processes via conf file" {
+ echo 'MAX_NUMBER_OF_PROCESSES=2000000' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 2000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 2000000 "* ]]
+}
+
+@test "can configure Erlang maximum number of processes via env" {
+ RABBITMQ_MAX_NUMBER_OF_PROCESSES=3000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 3000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 3000000 "* ]]
+}
+
+@test "Erlang maximum number of processes env takes precedence over conf file" {
+ echo 'MAX_NUMBER_OF_PROCESSES=4000000' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_MAX_NUMBER_OF_PROCESSES=5000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +P 5000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +P 5000000 "* ]]
+}
+
+@test "default Erlang maximum number of atoms" {
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 5000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 5000000 "* ]]
+}
+
+@test "can configure Erlang maximum number of atoms via conf file" {
+ echo 'MAX_NUMBER_OF_ATOMS=1000000' > "$RABBITMQ_CONF_ENV_FILE"
+ source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 1000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 1000000 "* ]]
+}
+
+@test "can configure Erlang maximum number of atoms via env" {
+ RABBITMQ_MAX_NUMBER_OF_ATOMS=2000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 2000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 2000000 "* ]]
+}
+
+@test "Erlang maximum number of atoms env takes precedence over conf file" {
+ echo 'MAX_NUMBER_OF_ATOMS=3000000' > "$RABBITMQ_CONF_ENV_FILE"
+ RABBITMQ_MAX_NUMBER_OF_ATOMS=4000000 source "$RABBITMQ_SCRIPTS_DIR/rabbitmq-env"
+
+ echo "expected RABBITMQ_SERVER_ERL_ARGS to contain ' +t 4000000 ', but got: $RABBITMQ_SERVER_ERL_ARGS"
+ [[ $RABBITMQ_SERVER_ERL_ARGS == *" +t 4000000 "* ]]
+}
diff --git a/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl b/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl
new file mode 100644
index 0000000000..bf5e9ee79e
--- /dev/null
+++ b/deps/rabbit/test/rabbitmq_queues_cli_integration_SUITE.erl
@@ -0,0 +1,139 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbitmq_queues_cli_integration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+groups() ->
+ [
+ {tests, [], [
+ shrink,
+ grow,
+ grow_invalid_node_filtered
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ case os:getenv("SECONDARY_UMBRELLA") of
+ false ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config);
+ _ ->
+ {skip, "growing and shrinking cannot be done in mixed mode"}
+ end.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(tests, Config0) ->
+ NumNodes = 3,
+ Config1 = rabbit_ct_helpers:set_config(
+ Config0, [{rmq_nodes_count, NumNodes},
+ {rmq_nodes_clustered, true}]),
+ Config2 = rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue) of
+ ok ->
+ Config2;
+ Skip ->
+ end_per_group(tests, Config2),
+ Skip
+ end.
+
+end_per_group(tests, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:ensure_rabbitmq_queues_cmd(
+ rabbit_ct_helpers:testcase_started(Config0, Testcase)).
+
+end_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:testcase_finished(Config0, Testcase).
+
+shrink(Config) ->
+ NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 2),
+ Nodename2 = ?config(nodename, NodeConfig),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Nodename2),
+ %% declare a quorum queue
+ QName = "shrink1",
+ #'queue.declare_ok'{} = declare_qq(Ch, QName),
+ {ok, Out1} = rabbitmq_queues(Config, 0, ["shrink", Nodename2]),
+ ?assertMatch(#{{"/", "shrink1"} := {2, ok}}, parse_result(Out1)),
+ Nodename1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ {ok, Out2} = rabbitmq_queues(Config, 0, ["shrink", Nodename1]),
+ ?assertMatch(#{{"/", "shrink1"} := {1, ok}}, parse_result(Out2)),
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Out3} = rabbitmq_queues(Config, 0, ["shrink", Nodename0]),
+ ?assertMatch(#{{"/", "shrink1"} := {1, error}}, parse_result(Out3)),
+ ok.
+
+grow(Config) ->
+ NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 2),
+ Nodename2 = ?config(nodename, NodeConfig),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Nodename2),
+ %% declare a quorum queue
+ QName = "grow1",
+ Args = [{<<"x-quorum-initial-group-size">>, long, 1}],
+ #'queue.declare_ok'{} = declare_qq(Ch, QName, Args),
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Out1} = rabbitmq_queues(Config, 0, ["grow", Nodename0, "all"]),
+ ?assertMatch(#{{"/", "grow1"} := {2, ok}}, parse_result(Out1)),
+ Nodename1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ {ok, Out2} = rabbitmq_queues(Config, 0, ["grow", Nodename1, "all"]),
+ ?assertMatch(#{{"/", "grow1"} := {3, ok}}, parse_result(Out2)),
+
+ {ok, Out3} = rabbitmq_queues(Config, 0, ["grow", Nodename0, "all"]),
+ ?assertNotMatch(#{{"/", "grow1"} := _}, parse_result(Out3)),
+ ok.
+
+grow_invalid_node_filtered(Config) ->
+ NodeConfig = rabbit_ct_broker_helpers:get_node_config(Config, 2),
+ Nodename2 = ?config(nodename, NodeConfig),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Nodename2),
+ %% declare a quorum queue
+ QName = "grow-err",
+ Args = [{<<"x-quorum-initial-group-size">>, long, 1}],
+ #'queue.declare_ok'{} = declare_qq(Ch, QName, Args),
+ DummyNode = not_really_a_node@nothing,
+ {ok, Out1} = rabbitmq_queues(Config, 0, ["grow", DummyNode, "all"]),
+ ?assertNotMatch(#{{"/", "grow-err"} := _}, parse_result(Out1)),
+ ok.
+
+parse_result(S) ->
+ Lines = string:split(S, "\n", all),
+ maps:from_list(
+ [{{Vhost, QName},
+ {erlang:list_to_integer(Size), case Result of
+ "ok" -> ok;
+ _ -> error
+ end}}
+ || [Vhost, QName, Size, Result] <-
+ [string:split(L, "\t", all) || L <- Lines]]).
+
+declare_qq(Ch, Q, Args0) ->
+ Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}] ++ Args0,
+ amqp_channel:call(Ch, #'queue.declare'{queue = list_to_binary(Q),
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+declare_qq(Ch, Q) ->
+ declare_qq(Ch, Q, []).
+
+rabbitmq_queues(Config, N, Args) ->
+ rabbit_ct_broker_helpers:rabbitmq_queues(Config, N, ["--silent" | Args]).
diff --git a/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl b/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl
new file mode 100644
index 0000000000..9c689f5667
--- /dev/null
+++ b/deps/rabbit/test/rabbitmqctl_integration_SUITE.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbitmqctl_integration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([all/0
+ ,groups/0
+ ,init_per_suite/1
+ ,end_per_suite/1
+ ,init_per_group/2
+ ,end_per_group/2
+ ,init_per_testcase/2
+ ,end_per_testcase/2
+ ]).
+
+-export([list_queues_local/1
+ ,list_queues_offline/1
+ ,list_queues_online/1
+ ,list_queues_stopped/1
+ ]).
+
+all() ->
+ [
+ {group, list_queues}
+ ].
+
+groups() ->
+ [
+ {list_queues, [],
+ [list_queues_local
+ ,list_queues_online
+ ,list_queues_offline
+ ,list_queues_stopped
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(list_queues, Config0) ->
+ NumNodes = 3,
+ Config = create_n_node_cluster(Config0, NumNodes),
+ Config1 = declare_some_queues(Config),
+ rabbit_ct_broker_helpers:stop_node(Config1, NumNodes - 1),
+ Config1;
+init_per_group(_, Config) ->
+ Config.
+
+create_n_node_cluster(Config0, NumNodes) ->
+ Config1 = rabbit_ct_helpers:set_config(
+ Config0, [{rmq_nodes_count, NumNodes},
+ {rmq_nodes_clustered, true}]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+declare_some_queues(Config) ->
+ Nodes = rabbit_ct_helpers:get_config(Config, rmq_nodes),
+ PerNodeQueues = [ declare_some_queues(Config, NodeNum)
+ || NodeNum <- lists:seq(0, length(Nodes)-1) ],
+ rabbit_ct_helpers:set_config(Config, {per_node_queues, PerNodeQueues}).
+
+declare_some_queues(Config, NodeNum) ->
+ {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, NodeNum),
+ NumQueues = 5,
+ Queues = [ list_to_binary(io_lib:format("queue-~b-on-node-~b", [QueueNum, NodeNum]))
+ || QueueNum <- lists:seq(1, NumQueues) ],
+ lists:foreach(fun (QueueName) ->
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QueueName, durable = true})
+ end, Queues),
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+ Queues.
+
+end_per_group(list_queues, Config0) ->
+ Config1 = case rabbit_ct_helpers:get_config(Config0, save_config) of
+ undefined -> Config0;
+ C -> C
+ end,
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(list_queues_stopped, Config0) ->
+ %% Start node 3 to crash it's queues
+ rabbit_ct_broker_helpers:start_node(Config0, 2),
+ %% Make vhost "down" on nodes 2 and 3
+ rabbit_ct_broker_helpers:force_vhost_failure(Config0, 1, <<"/">>),
+ rabbit_ct_broker_helpers:force_vhost_failure(Config0, 2, <<"/">>),
+
+ rabbit_ct_broker_helpers:stop_node(Config0, 2),
+ rabbit_ct_helpers:testcase_started(Config0, list_queues_stopped);
+
+init_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:testcase_started(Config0, Testcase).
+
+end_per_testcase(Testcase, Config0) ->
+ rabbit_ct_helpers:testcase_finished(Config0, Testcase).
+
+%%----------------------------------------------------------------------------
+%% Test cases
+%%----------------------------------------------------------------------------
+list_queues_local(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ assert_ctl_queues(Config, 0, ["--local"], Node1Queues),
+ assert_ctl_queues(Config, 1, ["--local"], Node2Queues),
+ ok.
+
+list_queues_online(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ OnlineQueues = Node1Queues ++ Node2Queues,
+ assert_ctl_queues(Config, 0, ["--online"], OnlineQueues),
+ assert_ctl_queues(Config, 1, ["--online"], OnlineQueues),
+ ok.
+
+list_queues_offline(Config) ->
+ Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))),
+ OfflineQueues = Node3Queues,
+ assert_ctl_queues(Config, 0, ["--offline"], OfflineQueues),
+ assert_ctl_queues(Config, 1, ["--offline"], OfflineQueues),
+ ok.
+
+list_queues_stopped(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))),
+
+ %% All queues are listed
+ ListedQueues =
+ [ {Name, State}
+ || [Name, State] <- rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "state", "--no-table-headers"]) ],
+
+ [ <<"running">> = proplists:get_value(Q, ListedQueues) || Q <- Node1Queues ],
+ %% Node is running. Vhost is down
+ [ <<"stopped">> = proplists:get_value(Q, ListedQueues) || Q <- Node2Queues ],
+ %% Node is not running. Vhost is down
+ [ <<"down">> = proplists:get_value(Q, ListedQueues) || Q <- Node3Queues ].
+
+%%----------------------------------------------------------------------------
+%% Helpers
+%%----------------------------------------------------------------------------
+assert_ctl_queues(Config, Node, Args, Expected0) ->
+ Expected = lists:sort(Expected0),
+ Got0 = run_list_queues(Config, Node, Args),
+ Got = lists:sort(lists:map(fun hd/1, Got0)),
+ ?assertMatch(Expected, Got).
+
+run_list_queues(Config, Node, Args) ->
+ rabbit_ct_broker_helpers:rabbitmqctl_list(Config, Node, ["list_queues"] ++ Args ++ ["name", "--no-table-headers"]).
diff --git a/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl b/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl
new file mode 100644
index 0000000000..6365f91d47
--- /dev/null
+++ b/deps/rabbit/test/rabbitmqctl_shutdown_SUITE.erl
@@ -0,0 +1,110 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmqctl_shutdown_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, running_node},
+ {group, non_running_node}
+ ].
+
+groups() ->
+ [
+ {running_node, [], [
+ successful_shutdown
+ ]},
+ {non_running_node, [], [
+ nothing_to_shutdown
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(running_node, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {need_start, true}
+ ]);
+init_per_group(non_running_node, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]).
+
+end_per_group(running_node, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, []);
+end_per_group(non_running_node, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, []).
+
+init_per_testcase(Testcase, Config0) ->
+ Config1 = case ?config(need_start, Config0) of
+ true ->
+ rabbit_ct_helpers:run_setup_steps(Config0,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ [fun save_node/1]);
+ _ ->
+ rabbit_ct_helpers:set_config(Config0,
+ [{node, non_existent_node@localhost}])
+ end,
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config0) ->
+ Config1 = case ?config(need_start, Config0) of
+ true ->
+ rabbit_ct_helpers:run_teardown_steps(Config0,
+ rabbit_ct_broker_helpers:teardown_steps());
+ _ -> Config0
+ end,
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+save_node(Config) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_helpers:set_config(Config, [{node, Node}]).
+
+successful_shutdown(Config) ->
+ Node = ?config(node, Config),
+ Pid = node_pid(Node),
+ ok = shutdown_ok(Node),
+ false = erlang_pid_is_running(Pid),
+ false = node_is_running(Node).
+
+
+nothing_to_shutdown(Config) ->
+ Node = ?config(node, Config),
+
+ { error, 69, _ } =
+ rabbit_ct_broker_helpers:control_action(shutdown, Node, []).
+
+node_pid(Node) ->
+ Val = rpc:call(Node, os, getpid, []),
+ true = is_list(Val),
+ list_to_integer(Val).
+
+erlang_pid_is_running(Pid) ->
+ rabbit_misc:is_os_process_alive(integer_to_list(Pid)).
+
+node_is_running(Node) ->
+ net_adm:ping(Node) == pong.
+
+shutdown_ok(Node) ->
+ %% Start a command
+ {stream, Stream} = rabbit_ct_broker_helpers:control_action(shutdown, Node, []),
+ %% Execute command steps. Each step will output a binary string
+ Lines = 'Elixir.Enum':to_list(Stream),
+ ct:pal("Command output ~p ~n", [Lines]),
+ [true = is_binary(Line) || Line <- Lines],
+ ok.
diff --git a/deps/rabbit/test/signal_handling_SUITE.erl b/deps/rabbit/test/signal_handling_SUITE.erl
new file mode 100644
index 0000000000..551f456039
--- /dev/null
+++ b/deps/rabbit/test/signal_handling_SUITE.erl
@@ -0,0 +1,160 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(signal_handling_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+
+ send_sighup/1,
+ send_sigterm/1,
+ send_sigtstp/1
+ ]).
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+all() ->
+ [
+ {group, signal_sent_to_pid_in_pidfile},
+ {group, signal_sent_to_pid_from_os_getpid}
+ ].
+
+groups() ->
+ Signals = [sighup,
+ sigterm,
+ sigtstp],
+ Tests = [list_to_existing_atom(rabbit_misc:format("send_~s", [Signal]))
+ || Signal <- Signals],
+ [
+ {signal_sent_to_pid_in_pidfile, [], Tests},
+ {signal_sent_to_pid_from_os_getpid, [], Tests}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ case os:type() of
+ {unix, _} ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config);
+ _ ->
+ {skip, "This testsuite is only relevant on Unix"}
+ end.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 1,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+send_sighup(Config) ->
+ {PidFile, Pid} = get_pidfile_and_pid(Config),
+
+ %% A SIGHUP signal should be ignored and the node should still be
+ %% running.
+ send_signal(Pid, "HUP"),
+ timer:sleep(10000),
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ?assert(rabbit_ct_broker_helpers:rpc(Config, A, rabbit, is_running, [])),
+ ?assert(filelib:is_regular(PidFile)).
+
+send_sigterm(Config) ->
+ {PidFile, Pid} = get_pidfile_and_pid(Config),
+
+ %% After sending a SIGTERM to the process, we expect the node to
+ %% exit.
+ send_signal(Pid, "TERM"),
+ wait_for_node_exit(Pid),
+
+ %% After a clean exit, the PID file should be removed.
+ ?assertNot(filelib:is_regular(PidFile)).
+
+send_sigtstp(Config) ->
+ {PidFile, Pid} = get_pidfile_and_pid(Config),
+
+ %% A SIGHUP signal should be ignored and the node should still be
+ %% running.
+ send_signal(Pid, "TSTP"),
+ timer:sleep(10000),
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ?assert(rabbit_ct_broker_helpers:rpc(Config, A, rabbit, is_running, [])),
+ ?assert(filelib:is_regular(PidFile)).
+
+get_pidfile_and_pid(Config) ->
+ PidFile = rabbit_ct_broker_helpers:get_node_config(Config, 0, pid_file),
+ ?assert(filelib:is_regular(PidFile)),
+
+ %% We send the signal to either the process referenced in the
+ %% PID file or the Erlang VM process. Up-to 3.8.x, they can be
+ %% the different process because the PID file may reference the
+ %% rabbitmq-server(8) script wrapper.
+ [{name, Group} | _] = ?config(tc_group_properties, Config),
+ Pid = case Group of
+ signal_sent_to_pid_in_pidfile ->
+ {ok, P} = file:read_file(PidFile),
+ string:trim(P, trailing, [$\r,$\n]);
+ signal_sent_to_pid_from_os_getpid ->
+ A = rabbit_ct_broker_helpers:get_node_config(
+ Config, 0, nodename),
+ rabbit_ct_broker_helpers:rpc(Config, A, os, getpid, [])
+ end,
+ {PidFile, Pid}.
+
+send_signal(Pid, Signal) ->
+ Cmd = ["kill",
+ "-" ++ Signal,
+ Pid],
+ ?assertMatch({ok, _}, rabbit_ct_helpers:exec(Cmd)).
+
+wait_for_node_exit(Pid) ->
+ case rabbit_misc:is_os_process_alive(Pid) of
+ true ->
+ timer:sleep(1000),
+ wait_for_node_exit(Pid);
+ false ->
+ ok
+ end.
diff --git a/deps/rabbit/test/simple_ha_SUITE.erl b/deps/rabbit/test/simple_ha_SUITE.erl
new file mode 100644
index 0000000000..8b2c1d6ebb
--- /dev/null
+++ b/deps/rabbit/test/simple_ha_SUITE.erl
@@ -0,0 +1,371 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(simple_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(DELAY, 8000).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ RejectTests = [
+ rejects_survive_stop,
+ rejects_survive_policy
+ ],
+ [
+ {cluster_size_2, [], [
+ rapid_redeclare,
+ declare_synchrony,
+ clean_up_exclusive_queues,
+ clean_up_and_redeclare_exclusive_queues_on_other_nodes
+ ]},
+ {cluster_size_3, [], [
+ consume_survives_stop,
+ consume_survives_policy,
+ auto_resume,
+ auto_resume_no_ccn_client,
+ confirms_survive_stop,
+ confirms_survive_policy,
+ {overflow_reject_publish, [], RejectTests},
+ {overflow_reject_publish_dlx, [], RejectTests}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]);
+init_per_group(overflow_reject_publish, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish">>}
+ ]);
+init_per_group(overflow_reject_publish_dlx, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {overflow, <<"reject-publish-dlx">>}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+rapid_redeclare(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ Queue = <<"test">>,
+ [begin
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = Queue})
+ end || _I <- lists:seq(1, 20)],
+ ok.
+
+%% Check that by the time we get a declare-ok back, the mirrors are up
+%% and in Mnesia.
+declare_synchrony(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ Q = <<"mirrored-queue">>,
+ declare(RabbitCh, Q),
+ amqp_channel:call(RabbitCh, #'confirm.select'{}),
+ amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+ amqp_channel:wait_for_confirms(RabbitCh),
+ rabbit_ct_broker_helpers:kill_node(Config, Rabbit),
+
+ #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q),
+ ok.
+
+declare(Ch, Name) ->
+ amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}).
+
+%% Ensure that exclusive queues are cleaned up when part of ha cluster
+%% and node is killed abruptly then restarted
+clean_up_exclusive_queues(Config) ->
+ QName = <<"excl">>,
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName,
+ exclusive = true}),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+ timer:sleep(?DELAY),
+ [] = rabbit_ct_broker_helpers:rpc(Config, B, rabbit_amqqueue, list, []),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ timer:sleep(?DELAY),
+ [[],[]] = rabbit_ct_broker_helpers:rpc_all(Config, rabbit_amqqueue, list, []),
+ ok.
+
+clean_up_and_redeclare_exclusive_queues_on_other_nodes(Config) ->
+ QueueCount = 10,
+ QueueNames = lists:map(fun(N) ->
+ NBin = erlang:integer_to_binary(N),
+ <<"exclusive-q-", NBin/binary>>
+ end, lists:seq(1, QueueCount)),
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, A),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ LocationMinMasters = [
+ {<<"x-queue-master-locator">>, longstr, <<"min-masters">>}
+ ],
+ lists:foreach(fun(QueueName) ->
+ declare_exclusive(Ch, QueueName, LocationMinMasters),
+ subscribe(Ch, QueueName)
+ end, QueueNames),
+
+ ok = rabbit_ct_broker_helpers:kill_node(Config, B),
+
+ Cancels = receive_cancels([]),
+ ?assert(length(Cancels) > 0),
+
+ RemaniningQueues = rabbit_ct_broker_helpers:rpc(Config, A, rabbit_amqqueue, list, []),
+
+ ?assertEqual(length(RemaniningQueues), QueueCount - length(Cancels)),
+
+ lists:foreach(fun(QueueName) ->
+ declare_exclusive(Ch, QueueName, LocationMinMasters),
+ true = rabbit_ct_client_helpers:publish(Ch, QueueName, 1),
+ subscribe(Ch, QueueName)
+ end, QueueNames),
+ Messages = receive_messages([]),
+ ?assertEqual(10, length(Messages)),
+ ok = rabbit_ct_client_helpers:close_connection(Conn).
+
+
+consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true).
+consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true).
+consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true).
+auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false).
+auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false,
+ false).
+
+confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2).
+confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2).
+
+rejects_survive_stop(Cf) -> rejects_survive(Cf, fun stop/2).
+rejects_survive_policy(Cf) -> rejects_survive(Cf, fun policy/2).
+
+%%----------------------------------------------------------------------------
+
+consume_survives(Config, DeathFun, CancelOnFailover) ->
+ consume_survives(Config, DeathFun, CancelOnFailover, true).
+
+consume_survives(Config,
+ DeathFun, CancelOnFailover, CCNSupported) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Channel1 = rabbit_ct_client_helpers:open_channel(Config, A),
+ Channel2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ Channel3 = rabbit_ct_client_helpers:open_channel(Config, C),
+
+ %% declare the queue on the master, mirrored to the two mirrors
+ Queue = <<"test">>,
+ amqp_channel:call(Channel1, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% start up a consumer
+ ConsCh = case CCNSupported of
+ true -> Channel2;
+ false -> Port = rabbit_ct_broker_helpers:get_node_config(
+ Config, B, tcp_port_amqp),
+ open_incapable_channel(Port)
+ end,
+ ConsumerPid = rabbit_ha_test_consumer:create(
+ ConsCh, Queue, self(), CancelOnFailover, Msgs),
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue,
+ self(), false, Msgs),
+ DeathFun(Config, A),
+ %% verify that the consumer got all msgs, or die - the await_response
+ %% calls throw an exception if anything goes wrong....
+ ct:pal("awaiting produce ~w", [ProducerPid]),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ct:pal("awaiting consumer ~w", [ConsumerPid]),
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ ok.
+
+confirms_survive(Config, DeathFun) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
+ Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
+
+ %% declare the queue on the master, mirrored to the two mirrors
+ Queue = <<"test">>,
+ amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
+ auto_delete = false,
+ durable = true}),
+
+ %% send one message to ensure the channel is flowing
+ amqp_channel:register_confirm_handler(Node1Channel, self()),
+ #'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}),
+
+ Payload = <<"initial message">>,
+ ok = amqp_channel:call(Node1Channel,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{payload = Payload}),
+
+ ok = receive
+ #'basic.ack'{multiple = false} -> ok;
+ #'basic.nack'{multiple = false} -> message_nacked
+ after
+ 5000 -> confirm_not_received
+ end,
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+ self(), true, Msgs),
+ DeathFun(Config, A),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ok.
+
+rejects_survive(Config, DeathFun) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
+ Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
+
+ %% declare the queue on the master, mirrored to the two mirrors
+ XOverflow = ?config(overflow, Config),
+ Queue = <<"test_rejects", "_", XOverflow/binary>>,
+ amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
+ auto_delete = false,
+ durable = true,
+ arguments = [{<<"x-max-length">>, long, 1},
+ {<<"x-overflow">>, longstr, XOverflow}]}),
+
+ amqp_channel:register_confirm_handler(Node1Channel, self()),
+ #'confirm.select_ok'{} = amqp_channel:call(Node1Channel, #'confirm.select'{}),
+
+ Payload = <<"there can be only one">>,
+ ok = amqp_channel:call(Node1Channel,
+ #'basic.publish'{routing_key = Queue},
+ #amqp_msg{payload = Payload}),
+
+ ok = receive
+ #'basic.ack'{multiple = false} -> ok;
+ #'basic.nack'{multiple = false} -> message_nacked
+ after
+ 5000 -> confirm_not_received
+ end,
+
+ %% send a bunch of messages from the producer. They should all be nacked, as the queue is full.
+ ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+ self(), true, Msgs, nacks),
+ DeathFun(Config, A),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Payload}} =
+ amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}),
+ %% There is only one message.
+ #'basic.get_empty'{} = amqp_channel:call(Node2Channel, #'basic.get'{queue = Queue}),
+ ok.
+
+
+
+stop(Config, Node) ->
+ rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50).
+
+sigkill(Config, Node) ->
+ rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50).
+
+policy(Config, Node)->
+ Nodes = [
+ rabbit_misc:atom_to_binary(N)
+ || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ N =/= Node],
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>,
+ {<<"nodes">>, Nodes}).
+
+open_incapable_channel(NodePort) ->
+ Props = [{<<"capabilities">>, table, []}],
+ {ok, ConsConn} =
+ amqp_connection:start(#amqp_params_network{port = NodePort,
+ client_properties = Props}),
+ {ok, Ch} = amqp_connection:open_channel(ConsConn),
+ Ch.
+
+declare_exclusive(Ch, QueueName, Args) ->
+ Declare = #'queue.declare'{queue = QueueName,
+ exclusive = true,
+ arguments = Args
+ },
+ #'queue.declare_ok'{} = amqp_channel:call(Ch, Declare).
+
+subscribe(Ch, QueueName) ->
+ ConsumeOk = amqp_channel:call(Ch, #'basic.consume'{queue = QueueName,
+ no_ack = true}),
+ #'basic.consume_ok'{} = ConsumeOk,
+ receive ConsumeOk -> ok after ?DELAY -> throw(consume_ok_timeout) end.
+
+receive_cancels(Cancels) ->
+ receive
+ #'basic.cancel'{} = C ->
+ receive_cancels([C|Cancels])
+ after ?DELAY ->
+ Cancels
+ end.
+
+receive_messages(All) ->
+ receive
+ {#'basic.deliver'{}, Msg} ->
+ receive_messages([Msg|All])
+ after ?DELAY ->
+ lists:reverse(All)
+ end.
diff --git a/deps/rabbit/test/single_active_consumer_SUITE.erl b/deps/rabbit/test/single_active_consumer_SUITE.erl
new file mode 100644
index 0000000000..59f2b6e83d
--- /dev/null
+++ b/deps/rabbit/test/single_active_consumer_SUITE.erl
@@ -0,0 +1,376 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(single_active_consumer_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, classic_queue}, {group, quorum_queue}
+ ].
+
+groups() ->
+ [
+ {classic_queue, [], [
+ all_messages_go_to_one_consumer,
+ fallback_to_another_consumer_when_first_one_is_cancelled,
+ fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled,
+ fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks,
+ amqp_exclusive_consume_fails_on_exclusive_consumer_queue
+ ]},
+ {quorum_queue, [], [
+ all_messages_go_to_one_consumer,
+ fallback_to_another_consumer_when_first_one_is_cancelled,
+ fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled,
+ fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks,
+ basic_get_is_unsupported
+ %% amqp_exclusive_consume_fails_on_exclusive_consumer_queue % Exclusive consume not implemented in QQ
+ ]}
+ ].
+
+init_per_suite(Config0) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config0, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config = rabbit_ct_helpers:merge_app_env(
+ Config1, {rabbit, [{quorum_tick_interval, 1000}]}),
+ rabbit_ct_helpers:run_setup_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(classic_queue, Config) ->
+ [{single_active_consumer_queue_declare,
+ #'queue.declare'{arguments = [
+ {<<"x-single-active-consumer">>, bool, true},
+ {<<"x-queue-type">>, longstr, <<"classic">>}
+ ],
+ auto_delete = true}
+ } | Config];
+init_per_group(quorum_queue, Config) ->
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_feature_flags, enable, [quorum_queue]),
+ case Ret of
+ ok ->
+ [{single_active_consumer_queue_declare,
+ #'queue.declare'{
+ arguments = [
+ {<<"x-single-active-consumer">>, bool, true},
+ {<<"x-queue-type">>, longstr, <<"quorum">>}
+ ],
+ durable = true, exclusive = false, auto_delete = false}
+ } | Config];
+ Error ->
+ {skip, {"Quorum queues are unsupported", Error}}
+ end.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config0) ->
+ Config = [{queue_name, atom_to_binary(Testcase, utf8)} | Config0],
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+all_messages_go_to_one_consumer(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ MessageCount = 5,
+ ConsumerPid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount}]),
+ #'basic.consume_ok'{consumer_tag = CTag1} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+ #'basic.consume_ok'{consumer_tag = CTag2} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(1, MessageCount)],
+
+ receive
+ {consumer_done, {MessagesPerConsumer, MessageCount}} ->
+ ?assertEqual(MessageCount, MessageCount),
+ ?assertEqual(2, maps:size(MessagesPerConsumer)),
+ ?assertEqual(MessageCount, maps:get(CTag1, MessagesPerConsumer)),
+ ?assertEqual(0, maps:get(CTag2, MessagesPerConsumer))
+ after ?TIMEOUT ->
+ flush(),
+ throw(failed)
+ end,
+
+ amqp_connection:close(C),
+ ok.
+
+fallback_to_another_consumer_when_first_one_is_cancelled(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ MessageCount = 10,
+ ConsumerPid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount}]),
+ #'basic.consume_ok'{consumer_tag = CTag1} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+ #'basic.consume_ok'{consumer_tag = CTag2} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+ #'basic.consume_ok'{consumer_tag = CTag3} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true}, ConsumerPid),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(1, MessageCount div 2)],
+
+ {ok, {MessagesPerConsumer1, _}} = wait_for_messages(MessageCount div 2),
+ FirstActiveConsumerInList = maps:keys(maps:filter(fun(_CTag, Count) -> Count > 0 end, MessagesPerConsumer1)),
+ ?assertEqual(1, length(FirstActiveConsumerInList)),
+
+ FirstActiveConsumer = lists:nth(1, FirstActiveConsumerInList),
+ #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = FirstActiveConsumer}),
+
+ {cancel_ok, FirstActiveConsumer} = wait_for_cancel_ok(),
+
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(MessageCount div 2 + 1, MessageCount - 1)],
+
+ {ok, {MessagesPerConsumer2, _}} = wait_for_messages(MessageCount div 2 - 1),
+ SecondActiveConsumerInList = maps:keys(maps:filter(
+ fun(CTag, Count) -> Count > 0 andalso CTag /= FirstActiveConsumer end,
+ MessagesPerConsumer2)
+ ),
+ ?assertEqual(1, length(SecondActiveConsumerInList)),
+ SecondActiveConsumer = lists:nth(1, SecondActiveConsumerInList),
+
+ #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = SecondActiveConsumer}),
+
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}),
+ ?assertMatch({ok, _}, wait_for_messages(1)),
+
+ LastActiveConsumer = lists:nth(1, lists:delete(FirstActiveConsumer, lists:delete(SecondActiveConsumer, [CTag1, CTag2, CTag3]))),
+
+ receive
+ {consumer_done, {MessagesPerConsumer, MessageCount}} ->
+ ?assertEqual(MessageCount, MessageCount),
+ ?assertEqual(3, maps:size(MessagesPerConsumer)),
+ ?assertEqual(MessageCount div 2, maps:get(FirstActiveConsumer, MessagesPerConsumer)),
+ ?assertEqual(MessageCount div 2 - 1, maps:get(SecondActiveConsumer, MessagesPerConsumer)),
+ ?assertEqual(1, maps:get(LastActiveConsumer, MessagesPerConsumer))
+ after ?TIMEOUT ->
+ flush(),
+ throw(failed)
+ end,
+
+ amqp_connection:close(C),
+ ok.
+
+fallback_to_another_consumer_when_first_one_is_cancelled_manual_acks(Config) ->
+ %% Let's ensure that although the consumer is cancelled we still keep the unacked
+ %% messages and accept acknowledgments on them.
+ [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = false}, self()),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = false}, self()),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = false}, self()),
+ Consumers0 = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ ?assertMatch([_, _, _], lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ Q == Resource#resource.name
+ end, Consumers0)),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = P}) || P <- [<<"msg1">>, <<"msg2">>]],
+
+ {CTag, DTag1} = receive_deliver(),
+ {_CTag, DTag2} = receive_deliver(),
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]),
+ #'basic.cancel_ok'{} = amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+
+ receive
+ #'basic.cancel_ok'{consumer_tag = CTag} ->
+ ok
+ end,
+ Consumers1 = rpc:call(Server, rabbit_amqqueue, consumers_all, [<<"/">>]),
+ ?assertMatch([_, _], lists:filter(fun(Props) ->
+ Resource = proplists:get_value(queue_name, Props),
+ Q == Resource#resource.name
+ end, Consumers1)),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]),
+
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = P}) || P <- [<<"msg3">>, <<"msg4">>]],
+
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"4">>, <<"0">>, <<"4">>]]),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag1}),
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag2}),
+ quorum_queue_utils:wait_for_messages(Config, [[Q, <<"2">>, <<"0">>, <<"2">>]]),
+
+ amqp_connection:close(C),
+ ok.
+
+fallback_to_another_consumer_when_exclusive_consumer_channel_is_cancelled(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ {C1, Ch1} = connection_and_channel(Config),
+ {C2, Ch2} = connection_and_channel(Config),
+ {C3, Ch3} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ MessageCount = 10,
+ Consumer1Pid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount div 2}]),
+ Consumer2Pid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount div 2 - 1}]),
+ Consumer3Pid = spawn(?MODULE, consume, [{self(), {maps:new(), 0}, MessageCount div 2 - 1}]),
+ #'basic.consume_ok'{consumer_tag = CTag1} =
+ amqp_channel:subscribe(Ch1, #'basic.consume'{queue = Q, no_ack = true, consumer_tag = <<"1">>}, Consumer1Pid),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch2, #'basic.consume'{queue = Q, no_ack = true, consumer_tag = <<"2">>}, Consumer2Pid),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch3, #'basic.consume'{queue = Q, no_ack = true, consumer_tag = <<"3">>}, Consumer3Pid),
+
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(1, MessageCount div 2)],
+
+ {MessagesPerConsumer1, MessageCount1} = consume_results(),
+ ?assertEqual(MessageCount div 2, MessageCount1),
+ ?assertEqual(1, maps:size(MessagesPerConsumer1)),
+ ?assertEqual(MessageCount div 2, maps:get(CTag1, MessagesPerConsumer1)),
+
+ ok = amqp_channel:close(Ch1),
+
+ [amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}) || _X <- lists:seq(MessageCount div 2 + 1, MessageCount - 1)],
+
+ {MessagesPerConsumer2, MessageCount2} = consume_results(),
+ ?assertEqual(MessageCount div 2 - 1, MessageCount2),
+ ?assertEqual(1, maps:size(MessagesPerConsumer2)),
+
+ ok = amqp_channel:close(Ch2),
+
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"poison">>}),
+
+ {MessagesPerConsumer3, MessageCount3} = consume_results(),
+ ?assertEqual(1, MessageCount3),
+ ?assertEqual(1, maps:size(MessagesPerConsumer3)),
+
+ [amqp_connection:close(Conn) || Conn <- [C1, C2, C3, C]],
+ ok.
+
+basic_get_is_unsupported(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 405, _}}, _},
+ amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false})),
+
+ amqp_connection:close(C),
+ ok.
+
+amqp_exclusive_consume_fails_on_exclusive_consumer_queue(Config) ->
+ {C, Ch} = connection_and_channel(Config),
+ Q = queue_declare(Ch, Config),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 403, _}}, _},
+ amqp_channel:call(Ch, #'basic.consume'{queue = Q, exclusive = true})
+ ),
+ amqp_connection:close(C),
+ ok.
+
+connection_and_channel(Config) ->
+ C = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Ch} = amqp_connection:open_channel(C),
+ {C, Ch}.
+
+queue_declare(Channel, Config) ->
+ QueueName = ?config(queue_name, Config),
+ Declare0 = ?config(single_active_consumer_queue_declare, Config),
+ Declare = Declare0#'queue.declare'{queue = QueueName},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Channel, Declare),
+ Q.
+
+consume({Parent, State, 0}) ->
+ Parent ! {consumer_done, State};
+consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown}) ->
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} ->
+ consume({Parent, {maps:put(CTag, 0, MessagesPerConsumer), MessageCount}, CountDown});
+ {#'basic.deliver'{consumer_tag = CTag}, #amqp_msg{payload = <<"poison">>}} ->
+ Parent ! {consumer_done,
+ {maps:update_with(CTag, fun(V) -> V + 1 end, MessagesPerConsumer),
+ MessageCount + 1}};
+ {#'basic.deliver'{consumer_tag = CTag}, _Content} ->
+ NewState = {maps:update_with(CTag, fun(V) -> V + 1 end, MessagesPerConsumer),
+ MessageCount + 1},
+ Parent ! {message, NewState},
+ consume({Parent, NewState, CountDown - 1});
+ #'basic.cancel_ok'{consumer_tag = CTag} ->
+ Parent ! {cancel_ok, CTag},
+ consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown});
+ _ ->
+ consume({Parent, {MessagesPerConsumer, MessageCount}, CountDown})
+ after ?TIMEOUT ->
+ Parent ! {consumer_timeout, {MessagesPerConsumer, MessageCount}},
+ flush(),
+ exit(consumer_timeout)
+ end.
+
+consume_results() ->
+ receive
+ {consumer_done, {MessagesPerConsumer, MessageCount}} ->
+ {MessagesPerConsumer, MessageCount};
+ {consumer_timeout, {MessagesPerConsumer, MessageCount}} ->
+ {MessagesPerConsumer, MessageCount};
+ _ ->
+ consume_results()
+ after ?TIMEOUT ->
+ flush(),
+ throw(failed)
+ end.
+
+wait_for_messages(ExpectedCount) ->
+ wait_for_messages(ExpectedCount, {}).
+
+wait_for_messages(0, State) ->
+ {ok, State};
+wait_for_messages(ExpectedCount, State) ->
+ receive
+ {message, {MessagesPerConsumer, MessageCount}} ->
+ wait_for_messages(ExpectedCount - 1, {MessagesPerConsumer, MessageCount})
+ after 5000 ->
+ {missing, ExpectedCount, State}
+ end.
+
+wait_for_cancel_ok() ->
+ receive
+ {cancel_ok, CTag} ->
+ {cancel_ok, CTag}
+ after 5000 ->
+ throw(consumer_cancel_ok_timeout)
+ end.
+
+receive_deliver() ->
+ receive
+ {#'basic.deliver'{consumer_tag = CTag,
+ delivery_tag = DTag}, _} ->
+ {CTag, DTag}
+ after 5000 ->
+ exit(deliver_timeout)
+ end.
+
+flush() ->
+ receive
+ Msg ->
+ ct:pal("flushed: ~w~n", [Msg]),
+ flush()
+ after 10 ->
+ ok
+ end.
diff --git a/deps/rabbit/test/sync_detection_SUITE.erl b/deps/rabbit/test/sync_detection_SUITE.erl
new file mode 100644
index 0000000000..55a86b7b3d
--- /dev/null
+++ b/deps/rabbit/test/sync_detection_SUITE.erl
@@ -0,0 +1,243 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(sync_detection_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ follower_synchronization
+ ]},
+ {cluster_size_3, [], [
+ follower_synchronization_ttl
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+follower_synchronization(Config) ->
+ [Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+ Queue = <<"ha.two.test">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% The comments on the right are the queue length and the pending acks on
+ %% the master.
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+
+ %% We get and ack one message when the mirror is down, and check that when we
+ %% start the mirror it's not marked as synced until ack the message. We also
+ %% publish another message when the mirror is up.
+ send_dummy_message(Channel, Queue), % 1 - 0
+ {#'basic.get_ok'{delivery_tag = Tag1}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+ follower_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue), % 1 - 1
+ follower_unsynced(Master, Queue),
+
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0
+
+ follower_synced(Master, Queue),
+
+ %% We restart the mirror and we send a message, so that the mirror will only
+ %% have one of the messages.
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+ send_dummy_message(Channel, Queue), % 2 - 0
+
+ follower_unsynced(Master, Queue),
+
+ %% We reject the message that the mirror doesn't have, and verify that it's
+ %% still unsynced
+ {#'basic.get_ok'{delivery_tag = Tag2}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ follower_unsynced(Master, Queue),
+ amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2,
+ requeue = true }), % 2 - 0
+ follower_unsynced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag3}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0
+ follower_synced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag4}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0
+ follower_synced(Master, Queue).
+
+follower_synchronization_ttl(Config) ->
+ [Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+ DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX),
+
+ %% We declare a DLX queue to wait for messages to be TTL'ed
+ DLXQueue = <<"dlx-queue">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue,
+ auto_delete = false}),
+
+ TestMsgTTL = 5000,
+ Queue = <<"ha.two.test">>,
+ %% Sadly we need fairly high numbers for the TTL because starting/stopping
+ %% nodes takes a fair amount of time.
+ Args = [{<<"x-message-ttl">>, long, TestMsgTTL},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}],
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false,
+ arguments = Args}),
+
+ follower_synced(Master, Queue),
+
+ %% All unknown
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+ follower_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ follower_synced(Master, Queue),
+
+ %% 1 unknown, 1 known
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ send_dummy_message(Channel, Queue),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+ follower_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue),
+ follower_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ follower_synced(Master, Queue),
+
+ %% %% both known
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ follower_synced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ follower_synced(Master, Queue),
+
+ ok.
+
+send_dummy_message(Channel, Queue) ->
+ Payload = <<"foo">>,
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+follower_pids(Node, Queue) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, Queue)]),
+ SSP = synchronised_slave_pids,
+ [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]),
+ case Pids of
+ '' -> [];
+ _ -> Pids
+ end.
+
+%% The mnesia synchronization takes a while, but we don't want to wait for the
+%% test to fail, since the timetrap is quite high.
+wait_for_sync_status(Status, Node, Queue) ->
+ Max = 90000 / ?LOOP_RECURSION_DELAY,
+ wait_for_sync_status(0, Max, Status, Node, Queue).
+
+wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
+ erlang:error({sync_status_max_tries_failed,
+ [{queue, Queue},
+ {node, Node},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_sync_status(N, Max, Status, Node, Queue) ->
+ Synced = length(follower_pids(Node, Queue)) =:= 1,
+ case Synced =:= Status of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_sync_status(N + 1, Max, Status, Node, Queue)
+ end.
+
+follower_synced(Node, Queue) ->
+ wait_for_sync_status(true, Node, Queue).
+
+follower_unsynced(Node, Queue) ->
+ wait_for_sync_status(false, Node, Queue).
+
+wait_for_messages(Queue, Channel, N) ->
+ Sub = #'basic.consume'{queue = Queue},
+ #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub),
+ receive
+ #'basic.consume_ok'{} -> ok
+ end,
+ lists:foreach(
+ fun (_) -> receive
+ {#'basic.deliver'{delivery_tag = Tag}, _Content} ->
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = Tag})
+ end
+ end, lists:seq(1, N)),
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}).
diff --git a/deps/rabbit/test/temp/head_message_timestamp_tests.py b/deps/rabbit/test/temp/head_message_timestamp_tests.py
new file mode 100755
index 0000000000..6698b88b7b
--- /dev/null
+++ b/deps/rabbit/test/temp/head_message_timestamp_tests.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# Tests for the SLA patch which adds the head_message_timestamp queue stat.
+# Uses both the management interface via rabbitmqadmin and the AMQP interface via Pika.
+# There's no particular reason to have used rabbitmqadmin other than saving some bulk.
+# Similarly, the separate declaration of exchanges and queues is just a preference
+# following a typical enterprise policy where admin users create these resources.
+
+from datetime import datetime
+import json
+import pika
+import os
+import sys
+from time import clock, mktime, sleep
+import unittest
+
+# Uses the rabbitmqadmin script.
+# To be imported this must be given a .py suffix and placed on the Python path
+from rabbitmqadmin import *
+
+TEXCH = 'head-message-timestamp-test'
+TQUEUE = 'head-message-timestamp-test-queue'
+
+TIMEOUT_SECS = 10
+
+TIMESTAMP1 = mktime(datetime(2010,1,1,12,00,01).timetuple())
+TIMESTAMP2 = mktime(datetime(2010,1,1,12,00,02).timetuple())
+
+AMQP_PORT = 99
+
+DELIVERY_MODE = 2
+DURABLE = False
+
+def log(msg):
+ print("\nINFO: " + msg)
+
+class RabbitTestCase(unittest.TestCase):
+ def setUp(self):
+ parser.set_conflict_handler('resolve')
+ (options, args) = make_configuration()
+ AMQP_PORT = int(options.port) - 10000
+
+ self.mgmt = Management(options, args)
+ self.mgmt.put('/exchanges/%2f/' + TEXCH, '{"type" : "fanout", "durable":' + str(DURABLE).lower() + '}')
+ self.mgmt.put('/queues/%2f/' + TQUEUE, '{"auto_delete":false,"durable":' + str(DURABLE).lower() + ',"arguments":[]}')
+ self.mgmt.post('/bindings/%2f/e/' + TEXCH + '/q/' + TQUEUE, '{"routing_key": ".*", "arguments":[]}')
+ self.credentials = pika.PlainCredentials(options.username, options.password)
+ parameters = pika.ConnectionParameters(options.hostname, port=AMQP_PORT, credentials=self.credentials)
+ self.connection = pika.BlockingConnection(parameters)
+ self.channel = self.connection.channel()
+
+ def tearDown(self):
+ parser.set_conflict_handler('resolve')
+ (options, args) = make_configuration()
+ self.mgmt = Management(options, args)
+ self.mgmt.delete('/queues/%2f/' + TQUEUE)
+ self.mgmt.delete('/exchanges/%2f/' + TEXCH)
+
+class RabbitSlaTestCase(RabbitTestCase):
+ def get_queue_stats(self, queue_name):
+ stats_str = self.mgmt.get('/queues/%2f/' + queue_name)
+ return json.loads(stats_str)
+
+ def get_head_message_timestamp(self, queue_name):
+ return self.get_queue_stats(queue_name)["head_message_timestamp"]
+
+ def send(self, message, timestamp=None):
+ self.channel.basic_publish(TEXCH, '', message,
+ pika.BasicProperties(content_type='text/plain',
+ delivery_mode=DELIVERY_MODE,
+ timestamp=timestamp))
+ log("Sent message with body: " + str(message))
+
+ def receive(self, queue):
+ method_frame, header_frame, body = self.channel.basic_get(queue = queue)
+ log("Received message with body: " + str(body))
+ return method_frame.delivery_tag, body
+
+ def ack(self, delivery_tag):
+ self.channel.basic_ack(delivery_tag)
+
+ def nack(self, delivery_tag):
+ self.channel.basic_nack(delivery_tag)
+
+ def wait_for_new_timestamp(self, queue, old_timestamp):
+ stats_wait_start = clock()
+ while ((clock() - stats_wait_start) < TIMEOUT_SECS and
+ self.get_head_message_timestamp(queue) == old_timestamp):
+ sleep(0.1)
+ log('Queue stats updated in ' + str(clock() - stats_wait_start) + ' secs.')
+ return self.get_head_message_timestamp(queue)
+
+ # TESTS
+
+ def test_no_timestamp_when_queue_is_empty(self):
+ assert self.get_head_message_timestamp(TQUEUE) == ''
+
+ def test_has_timestamp_when_first_msg_is_added(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ assert stats_timestamp == TIMESTAMP1
+
+ def test_no_timestamp_when_last_msg_is_removed(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ tag, body = self.receive(TQUEUE)
+ self.ack(tag)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, TIMESTAMP1)
+ assert stats_timestamp == ''
+
+ def test_timestamp_updated_when_msg_is_removed(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ self.send('Msg2', TIMESTAMP2)
+ tag, body = self.receive(TQUEUE)
+ self.ack(tag)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, TIMESTAMP1)
+ assert stats_timestamp == TIMESTAMP2
+
+ def test_timestamp_not_updated_before_msg_is_acked(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ tag, body = self.receive(TQUEUE)
+ sleep(1) # Allow time for update to appear if it was going to (it shouldn't)
+ assert self.get_head_message_timestamp(TQUEUE) == TIMESTAMP1
+ self.ack(tag)
+
+if __name__ == '__main__':
+ unittest.main(verbosity = 2)
+
+
diff --git a/deps/rabbit/test/temp/rabbitmqadmin.py b/deps/rabbit/test/temp/rabbitmqadmin.py
new file mode 100755
index 0000000000..cdddd56497
--- /dev/null
+++ b/deps/rabbit/test/temp/rabbitmqadmin.py
@@ -0,0 +1,934 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+# Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved
+
+import sys
+if sys.version_info[0] < 2 or sys.version_info[1] < 6:
+ print "Sorry, rabbitmqadmin requires at least Python 2.6."
+ sys.exit(1)
+
+from ConfigParser import ConfigParser, NoSectionError
+from optparse import OptionParser, TitledHelpFormatter
+import httplib
+import urllib
+import urlparse
+import base64
+import json
+import os
+import socket
+
+VERSION = '0.0.0'
+
+LISTABLE = {'connections': {'vhost': False},
+ 'channels': {'vhost': False},
+ 'consumers': {'vhost': True},
+ 'exchanges': {'vhost': True},
+ 'queues': {'vhost': True},
+ 'bindings': {'vhost': True},
+ 'users': {'vhost': False},
+ 'vhosts': {'vhost': False},
+ 'permissions': {'vhost': False},
+ 'nodes': {'vhost': False},
+ 'parameters': {'vhost': False,
+ 'json': ['value']},
+ 'policies': {'vhost': False,
+ 'json': ['definition']}}
+
+SHOWABLE = {'overview': {'vhost': False}}
+
+PROMOTE_COLUMNS = ['vhost', 'name', 'type',
+ 'source', 'destination', 'destination_type', 'routing_key']
+
+URIS = {
+ 'exchange': '/exchanges/{vhost}/{name}',
+ 'queue': '/queues/{vhost}/{name}',
+ 'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}',
+ 'binding_del':'/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}',
+ 'vhost': '/vhosts/{name}',
+ 'user': '/users/{name}',
+ 'permission': '/permissions/{vhost}/{user}',
+ 'parameter': '/parameters/{component}/{vhost}/{name}',
+ 'policy': '/policies/{vhost}/{name}'
+ }
+
+DECLARABLE = {
+ 'exchange': {'mandatory': ['name', 'type'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'internal': 'false', 'arguments': {}}},
+ 'queue': {'mandatory': ['name'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'arguments': {}, 'node': None}},
+ 'binding': {'mandatory': ['source', 'destination'],
+ 'json': ['arguments'],
+ 'optional': {'destination_type': 'queue',
+ 'routing_key': '', 'arguments': {}}},
+ 'vhost': {'mandatory': ['name'],
+ 'optional': {'tracing': None}},
+ 'user': {'mandatory': ['name', 'password', 'tags'],
+ 'optional': {}},
+ 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'],
+ 'optional': {}},
+ 'parameter': {'mandatory': ['component', 'name', 'value'],
+ 'json': ['value'],
+ 'optional': {}},
+ # Priority is 'json' to convert to int
+ 'policy': {'mandatory': ['name', 'pattern', 'definition'],
+ 'json': ['definition', 'priority'],
+ 'optional': {'priority' : 0, 'apply-to': None}}
+ }
+
+DELETABLE = {
+ 'exchange': {'mandatory': ['name']},
+ 'queue': {'mandatory': ['name']},
+ 'binding': {'mandatory': ['source', 'destination_type', 'destination',
+ 'properties_key']},
+ 'vhost': {'mandatory': ['name']},
+ 'user': {'mandatory': ['name']},
+ 'permission': {'mandatory': ['vhost', 'user']},
+ 'parameter': {'mandatory': ['component', 'name']},
+ 'policy': {'mandatory': ['name']}
+ }
+
+CLOSABLE = {
+ 'connection': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/connections/{name}'}
+ }
+
+PURGABLE = {
+ 'queue': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/queues/{vhost}/{name}/contents'}
+ }
+
+EXTRA_VERBS = {
+ 'publish': {'mandatory': ['routing_key'],
+ 'optional': {'payload': None,
+ 'exchange': 'amq.default',
+ 'payload_encoding': 'string'},
+ 'uri': '/exchanges/{vhost}/{exchange}/publish'},
+ 'get': {'mandatory': ['queue'],
+ 'optional': {'count': '1', 'requeue': 'true',
+ 'payload_file': None, 'encoding': 'auto'},
+ 'uri': '/queues/{vhost}/{queue}/get'}
+}
+
+for k in DECLARABLE:
+ DECLARABLE[k]['uri'] = URIS[k]
+
+for k in DELETABLE:
+ DELETABLE[k]['uri'] = URIS[k]
+ DELETABLE[k]['optional'] = {}
+DELETABLE['binding']['uri'] = URIS['binding_del']
+
+def short_usage():
+ return "rabbitmqadmin [options] subcommand"
+
+def title(name):
+ return "\n%s\n%s\n\n" % (name, '=' * len(name))
+
+def subcommands_usage():
+ usage = """Usage
+=====
+ """ + short_usage() + """
+
+ where subcommand is one of:
+""" + title("Display")
+
+ for l in LISTABLE:
+ usage += " list {0} [<column>...]\n".format(l)
+ for s in SHOWABLE:
+ usage += " show {0} [<column>...]\n".format(s)
+ usage += title("Object Manipulation")
+ usage += fmt_usage_stanza(DECLARABLE, 'declare')
+ usage += fmt_usage_stanza(DELETABLE, 'delete')
+ usage += fmt_usage_stanza(CLOSABLE, 'close')
+ usage += fmt_usage_stanza(PURGABLE, 'purge')
+ usage += title("Broker Definitions")
+ usage += """ export <file>
+ import <file>
+"""
+ usage += title("Publishing and Consuming")
+ usage += fmt_usage_stanza(EXTRA_VERBS, '')
+ usage += """
+ * If payload is not specified on publish, standard input is used
+
+ * If payload_file is not specified on get, the payload will be shown on
+ standard output along with the message metadata
+
+ * If payload_file is specified on get, count must not be set
+"""
+ return usage
+
+def config_usage():
+ usage = "Usage\n=====\n" + short_usage()
+ usage += "\n" + title("Configuration File")
+ usage += """ It is possible to specify a configuration file from the command line.
+ Hosts can be configured easily in a configuration file and called
+ from the command line.
+"""
+ usage += title("Example")
+ usage += """ # rabbitmqadmin.conf.example START
+
+ [host_normal]
+ hostname = localhost
+ port = 15672
+ username = guest
+ password = guest
+ declare_vhost = / # Used as default for declare / delete only
+ vhost = / # Used as default for declare / delete / list
+
+ [host_ssl]
+ hostname = otherhost
+ port = 15672
+ username = guest
+ password = guest
+ ssl = True
+ ssl_key_file = /path/to/key.pem
+ ssl_cert_file = /path/to/cert.pem
+
+ # rabbitmqadmin.conf.example END
+"""
+ usage += title("Use")
+ usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ..."""
+ return usage
+
+def more_help():
+ return """
+More Help
+=========
+
+For more help use the help subcommand:
+
+ rabbitmqadmin help subcommands # For a list of available subcommands
+ rabbitmqadmin help config # For help with the configuration file
+"""
+
+def fmt_usage_stanza(root, verb):
+ def fmt_args(args):
+ res = " ".join(["{0}=...".format(a) for a in args['mandatory']])
+ opts = " ".join("{0}=...".format(o) for o in args['optional'].keys())
+ if opts != "":
+ res += " [{0}]".format(opts)
+ return res
+
+ text = ""
+ if verb != "":
+ verb = " " + verb
+ for k in root.keys():
+ text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k]))
+ return text
+
+default_options = { "hostname" : "localhost",
+ "port" : "15672",
+ "declare_vhost" : "/",
+ "username" : "guest",
+ "password" : "guest",
+ "ssl" : False,
+ "verbose" : True,
+ "format" : "table",
+ "depth" : 1,
+ "bash_completion" : False }
+
+
+class MyFormatter(TitledHelpFormatter):
+ def format_epilog(self, epilog):
+ return epilog
+
+parser = OptionParser(usage=short_usage(),
+ formatter=MyFormatter(),
+ epilog=more_help())
+
+def make_parser():
+ def add(*args, **kwargs):
+ key = kwargs['dest']
+ if key in default_options:
+ default = " [default: %s]" % default_options[key]
+ kwargs['help'] = kwargs['help'] + default
+ parser.add_option(*args, **kwargs)
+
+ add("-c", "--config", dest="config",
+ help="configuration file [default: ~/.rabbitmqadmin.conf]",
+ metavar="CONFIG")
+ add("-N", "--node", dest="node",
+ help="node described in the configuration file [default: 'default'" + \
+ " only if configuration file is specified]",
+ metavar="NODE")
+ add("-H", "--host", dest="hostname",
+ help="connect to host HOST" ,
+ metavar="HOST")
+ add("-P", "--port", dest="port",
+ help="connect to port PORT",
+ metavar="PORT")
+ add("-V", "--vhost", dest="vhost",
+ help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]",
+ metavar="VHOST")
+ add("-u", "--username", dest="username",
+ help="connect using username USERNAME",
+ metavar="USERNAME")
+ add("-p", "--password", dest="password",
+ help="connect using password PASSWORD",
+ metavar="PASSWORD")
+ add("-q", "--quiet", action="store_false", dest="verbose",
+ help="suppress status messages")
+ add("-s", "--ssl", action="store_true", dest="ssl",
+ help="connect with ssl")
+ add("--ssl-key-file", dest="ssl_key_file",
+ help="PEM format key file for SSL")
+ add("--ssl-cert-file", dest="ssl_cert_file",
+ help="PEM format certificate file for SSL")
+ add("-f", "--format", dest="format",
+ help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]")
+ add("-S", "--sort", dest="sort", help="sort key for listing queries")
+ add("-R", "--sort-reverse", action="store_true", dest="sort_reverse",
+ help="reverse the sort order")
+ add("-d", "--depth", dest="depth",
+ help="maximum depth to recurse for listing tables")
+ add("--bash-completion", action="store_true",
+ dest="bash_completion",
+ help="Print bash completion script")
+ add("--version", action="store_true",
+ dest="version",
+ help="Display version and exit")
+
+def default_config():
+ home = os.getenv('USERPROFILE') or os.getenv('HOME')
+ if home is not None:
+ config_file = home + os.sep + ".rabbitmqadmin.conf"
+ if os.path.isfile(config_file):
+ return config_file
+ return None
+
+def make_configuration():
+ make_parser()
+ (options, args) = parser.parse_args()
+ setattr(options, "declare_vhost", None)
+ if options.version:
+ print_version()
+ if options.config is None:
+ config_file = default_config()
+ if config_file is not None:
+ setattr(options, "config", config_file)
+ else:
+ if not os.path.isfile(options.config):
+ assert_usage(False,
+ "Could not read config file '%s'" % options.config)
+
+ if options.node is None and options.config:
+ options.node = "default"
+ else:
+ options.node = options.node
+ for (key, val) in default_options.items():
+ if getattr(options, key) is None:
+ setattr(options, key, val)
+
+ if options.config is not None:
+ config = ConfigParser()
+ try:
+ config.read(options.config)
+ new_conf = dict(config.items(options.node))
+ except NoSectionError, error:
+ if options.node == "default":
+ pass
+ else:
+ assert_usage(False, ("Could not read section '%s' in config file" +
+ " '%s':\n %s") %
+ (options.node, options.config, error))
+ else:
+ for key, val in new_conf.items():
+ setattr(options, key, val)
+
+ return (options, args)
+
+def assert_usage(expr, error):
+ if not expr:
+ output("\nERROR: {0}\n".format(error))
+ output("{0} --help for help\n".format(os.path.basename(sys.argv[0])))
+ sys.exit(1)
+
+def print_version():
+ output("rabbitmqadmin {0}".format(VERSION))
+ sys.exit(0)
+
+def column_sort_key(col):
+ if col in PROMOTE_COLUMNS:
+ return (1, PROMOTE_COLUMNS.index(col))
+ else:
+ return (2, col)
+
+def main():
+ (options, args) = make_configuration()
+ if options.bash_completion:
+ print_bash_completion()
+ exit(0)
+ assert_usage(len(args) > 0, 'Action not specified')
+ mgmt = Management(options, args[1:])
+ mode = "invoke_" + args[0]
+ assert_usage(hasattr(mgmt, mode),
+ 'Action {0} not understood'.format(args[0]))
+ method = getattr(mgmt, "invoke_%s" % args[0])
+ method()
+
+def output(s):
+ print maybe_utf8(s, sys.stdout)
+
+def die(s):
+ sys.stderr.write(maybe_utf8("*** {0}\n".format(s), sys.stderr))
+ exit(1)
+
+def maybe_utf8(s, stream):
+ if stream.isatty():
+ # It will have an encoding, which Python will respect
+ return s
+ else:
+ # It won't have an encoding, and Python will pick ASCII by default
+ return s.encode('utf-8')
+
+class Management:
+ def __init__(self, options, args):
+ self.options = options
+ self.args = args
+
+ def get(self, path):
+ return self.http("GET", "/api%s" % path, "")
+
+ def put(self, path, body):
+ return self.http("PUT", "/api%s" % path, body)
+
+ def post(self, path, body):
+ return self.http("POST", "/api%s" % path, body)
+
+ def delete(self, path):
+ return self.http("DELETE", "/api%s" % path, "")
+
+ def http(self, method, path, body):
+ if self.options.ssl:
+ conn = httplib.HTTPSConnection(self.options.hostname,
+ self.options.port,
+ self.options.ssl_key_file,
+ self.options.ssl_cert_file)
+ else:
+ conn = httplib.HTTPConnection(self.options.hostname,
+ self.options.port)
+ headers = {"Authorization":
+ "Basic " + base64.b64encode(self.options.username + ":" +
+ self.options.password)}
+ if body != "":
+ headers["Content-Type"] = "application/json"
+ try:
+ conn.request(method, path, body, headers)
+ except socket.error, e:
+ die("Could not connect: {0}".format(e))
+ resp = conn.getresponse()
+ if resp.status == 400:
+ die(json.loads(resp.read())['reason'])
+ if resp.status == 401:
+ die("Access refused: {0}".format(path))
+ if resp.status == 404:
+ die("Not found: {0}".format(path))
+ if resp.status == 301:
+ url = urlparse.urlparse(resp.getheader('location'))
+ [host, port] = url.netloc.split(':')
+ self.options.hostname = host
+ self.options.port = int(port)
+ return self.http(method, url.path + '?' + url.query, body)
+ if resp.status < 200 or resp.status > 400:
+ raise Exception("Received %d %s for path %s\n%s"
+ % (resp.status, resp.reason, path, resp.read()))
+ return resp.read()
+
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def get_arg(self):
+ assert_usage(len(self.args) == 1, 'Exactly one argument required')
+ return self.args[0]
+
+ def invoke_help(self):
+ if len(self.args) == 0:
+ parser.print_help()
+ else:
+ help_cmd = self.get_arg()
+ if help_cmd == 'subcommands':
+ usage = subcommands_usage()
+ elif help_cmd == 'config':
+ usage = config_usage()
+ else:
+ assert_usage(False, """help topic must be one of:
+ subcommands
+ config""")
+ print usage
+ exit(0)
+
+ def invoke_publish(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
+ upload['properties'] = {} # TODO do we care here?
+ if not 'payload' in upload:
+ data = sys.stdin.read()
+ upload['payload'] = base64.b64encode(data)
+ upload['payload_encoding'] = 'base64'
+ resp = json.loads(self.post(uri, json.dumps(upload)))
+ if resp['routed']:
+ self.verbose("Message published")
+ else:
+ self.verbose("Message published but NOT routed")
+
+ def invoke_get(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get'])
+ payload_file = 'payload_file' in upload and upload['payload_file'] or None
+ assert_usage(not payload_file or upload['count'] == '1',
+ 'Cannot get multiple messages using payload_file')
+ result = self.post(uri, json.dumps(upload))
+ if payload_file:
+ write_payload_file(payload_file, result)
+ columns = ['routing_key', 'exchange', 'message_count',
+ 'payload_bytes', 'redelivered']
+ format_list(result, columns, {}, self.options)
+ else:
+ format_list(result, [], {}, self.options)
+
+ def invoke_export(self):
+ path = self.get_arg()
+ definitions = self.get("/definitions")
+ f = open(path, 'w')
+ f.write(definitions)
+ f.close()
+ self.verbose("Exported definitions for %s to \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_import(self):
+ path = self.get_arg()
+ f = open(path, 'r')
+ definitions = f.read()
+ f.close()
+ self.post("/definitions", definitions)
+ self.verbose("Imported definitions for %s from \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_list(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(LISTABLE, 'list', cols)
+ format_list(self.get(uri), cols, obj_info, self.options)
+
+ def invoke_show(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(SHOWABLE, 'show', cols)
+ format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
+
+ def list_show_uri(self, obj_types, verb, cols):
+ obj_type = self.args[0]
+ assert_usage(obj_type in obj_types,
+ "Don't know how to {0} {1}".format(verb, obj_type))
+ obj_info = obj_types[obj_type]
+ uri = "/%s" % obj_type
+ query = []
+ if obj_info['vhost'] and self.options.vhost:
+ uri += "/%s" % urllib.quote_plus(self.options.vhost)
+ if cols != []:
+ query.append("columns=" + ",".join(cols))
+ sort = self.options.sort
+ if sort:
+ query.append("sort=" + sort)
+ if self.options.sort_reverse:
+ query.append("sort_reverse=true")
+ query = "&".join(query)
+ if query != "":
+ uri += "?" + query
+ return (uri, obj_info)
+
+ def invoke_declare(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
+ if obj_type == 'binding':
+ self.post(uri, json.dumps(upload))
+ else:
+ self.put(uri, json.dumps(upload))
+ self.verbose("{0} declared".format(obj_type))
+
+ def invoke_delete(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DELETABLE)
+ self.delete(uri)
+ self.verbose("{0} deleted".format(obj_type))
+
+ def invoke_close(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE)
+ self.delete(uri)
+ self.verbose("{0} closed".format(obj_type))
+
+ def invoke_purge(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(PURGABLE)
+ self.delete(uri)
+ self.verbose("{0} purged".format(obj_type))
+
+ def declare_delete_parse(self, root):
+ assert_usage(len(self.args) > 0, 'Type not specified')
+ obj_type = self.args[0]
+ assert_usage(obj_type in root,
+ 'Type {0} not recognised'.format(obj_type))
+ obj = root[obj_type]
+ (uri, upload) = self.parse_args(self.args[1:], obj)
+ return (obj_type, uri, upload)
+
+ def parse_args(self, args, obj):
+ mandatory = obj['mandatory']
+ optional = obj['optional']
+ uri_template = obj['uri']
+ upload = {}
+ for k in optional.keys():
+ if optional[k]:
+ upload[k] = optional[k]
+ for arg in args:
+ assert_usage("=" in arg,
+ 'Argument "{0}" not in format name=value'.format(arg))
+ (name, value) = arg.split("=", 1)
+ assert_usage(name in mandatory or name in optional.keys(),
+ 'Argument "{0}" not recognised'.format(name))
+ if 'json' in obj and name in obj['json']:
+ upload[name] = self.parse_json(value)
+ else:
+ upload[name] = value
+ for m in mandatory:
+ assert_usage(m in upload.keys(),
+ 'mandatory argument "{0}" required'.format(m))
+ if 'vhost' not in mandatory:
+ upload['vhost'] = self.options.vhost or self.options.declare_vhost
+ uri_args = {}
+ for k in upload:
+ v = upload[k]
+ if v and isinstance(v, basestring):
+ uri_args[k] = urllib.quote_plus(v)
+ if k == 'destination_type':
+ uri_args['destination_char'] = v[0]
+ uri = uri_template.format(**uri_args)
+ return (uri, upload)
+
+ def parse_json(self, text):
+ try:
+ return json.loads(text)
+ except ValueError:
+ print "Could not parse JSON:\n {0}".format(text)
+ sys.exit(1)
+
+def format_list(json_list, columns, args, options):
+ format = options.format
+ formatter = None
+ if format == "raw_json":
+ output(json_list)
+ return
+ elif format == "pretty_json":
+ enc = json.JSONEncoder(False, False, True, True, True, 2)
+ output(enc.encode(json.loads(json_list)))
+ return
+ else:
+ formatter = FORMATS[format]
+ assert_usage(formatter != None,
+ "Format {0} not recognised".format(format))
+ formatter_instance = formatter(columns, args, options)
+ formatter_instance.display(json_list)
+
+class Lister:
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def display(self, json_list):
+ depth = sys.maxint
+ if len(self.columns) == 0:
+ depth = int(self.options.depth)
+ (columns, table) = self.list_to_table(json.loads(json_list), depth)
+ if len(table) > 0:
+ self.display_list(columns, table)
+ else:
+ self.verbose("No items")
+
+ def list_to_table(self, items, max_depth):
+ columns = {}
+ column_ix = {}
+ row = None
+ table = []
+
+ def add(prefix, depth, item, fun):
+ for key in item:
+ column = prefix == '' and key or (prefix + '.' + key)
+ subitem = item[key]
+ if type(subitem) == dict:
+ if self.obj_info.has_key('json') and key in self.obj_info['json']:
+ fun(column, json.dumps(subitem))
+ else:
+ if depth < max_depth:
+ add(column, depth + 1, subitem, fun)
+ elif type(subitem) == list:
+ # The first branch has mirror nodes in queues in
+ # mind (which come out looking decent); the second
+ # one has applications in nodes (which look less
+ # so, but what would look good?).
+ if [x for x in subitem if type(x) != unicode] == []:
+ serialised = " ".join(subitem)
+ else:
+ serialised = json.dumps(subitem)
+ fun(column, serialised)
+ else:
+ fun(column, subitem)
+
+ def add_to_columns(col, val):
+ columns[col] = True
+
+ def add_to_row(col, val):
+ if col in column_ix:
+ row[column_ix[col]] = unicode(val)
+
+ if len(self.columns) == 0:
+ for item in items:
+ add('', 1, item, add_to_columns)
+ columns = columns.keys()
+ columns.sort(key=column_sort_key)
+ else:
+ columns = self.columns
+
+ for i in xrange(0, len(columns)):
+ column_ix[columns[i]] = i
+ for item in items:
+ row = len(columns) * ['']
+ add('', 1, item, add_to_row)
+ table.append(row)
+
+ return (columns, table)
+
+class TSVList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ head = "\t".join(columns)
+ self.verbose(head)
+
+ for row in table:
+ line = "\t".join(row)
+ output(line)
+
+class LongList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ sep = "\n" + "-" * 80 + "\n"
+ max_width = 0
+ for col in columns:
+ max_width = max(max_width, len(col))
+ fmt = "{0:>" + unicode(max_width) + "}: {1}"
+ output(sep)
+ for i in xrange(0, len(table)):
+ for j in xrange(0, len(columns)):
+ output(fmt.format(columns[j], table[i][j]))
+ output(sep)
+
+class TableList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ total = [columns]
+ total.extend(table)
+ self.ascii_table(total)
+
+ def ascii_table(self, rows):
+ table = ""
+ col_widths = [0] * len(rows[0])
+ for i in xrange(0, len(rows[0])):
+ for j in xrange(0, len(rows)):
+ col_widths[i] = max(col_widths[i], len(rows[j][i]))
+ self.ascii_bar(col_widths)
+ self.ascii_row(col_widths, rows[0], "^")
+ self.ascii_bar(col_widths)
+ for row in rows[1:]:
+ self.ascii_row(col_widths, row, "<")
+ self.ascii_bar(col_widths)
+
+ def ascii_row(self, col_widths, row, align):
+ txt = "|"
+ for i in xrange(0, len(col_widths)):
+ fmt = " {0:" + align + unicode(col_widths[i]) + "} "
+ txt += fmt.format(row[i]) + "|"
+ output(txt)
+
+ def ascii_bar(self, col_widths):
+ txt = "+"
+ for w in col_widths:
+ txt += ("-" * (w + 2)) + "+"
+ output(txt)
+
+class KeyValueList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ for i in xrange(0, len(table)):
+ row = []
+ for j in xrange(0, len(columns)):
+ row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
+ output(" ".join(row))
+
+# TODO handle spaces etc in completable names
+class BashList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ ix = None
+ for i in xrange(0, len(columns)):
+ if columns[i] == 'name':
+ ix = i
+ if ix is not None:
+ res = []
+ for row in table:
+ res.append(row[ix])
+ output(" ".join(res))
+
+FORMATS = {
+ 'raw_json' : None, # Special cased
+ 'pretty_json' : None, # Ditto
+ 'tsv' : TSVList,
+ 'long' : LongList,
+ 'table' : TableList,
+ 'kvp' : KeyValueList,
+ 'bash' : BashList
+}
+
+def write_payload_file(payload_file, json_list):
+ result = json.loads(json_list)[0]
+ payload = result['payload']
+ payload_encoding = result['payload_encoding']
+ f = open(payload_file, 'w')
+ if payload_encoding == 'base64':
+ data = base64.b64decode(payload)
+ else:
+ data = payload
+ f.write(data)
+ f.close()
+
+def print_bash_completion():
+ script = """# This is a bash completion script for rabbitmqadmin.
+# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
+# to get tab completion. rabbitmqadmin must be on your PATH for this to work.
+_rabbitmqadmin()
+{
+ local cur prev opts base
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+
+ opts="list show declare delete close purge import export get publish help"
+ fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse"
+
+ case "${prev}" in
+ list)
+ COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ show)
+ COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ declare)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ delete)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ close)
+ COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ purge)
+ COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ export)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ import)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ help)
+ opts="subcommands config"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -H)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ --host)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ -V)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --vhost)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -u)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --username)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -f)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+ --format)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+
+"""
+ for l in LISTABLE:
+ key = l[0:len(l) - 1]
+ script += " " + key + """)
+ opts="$(rabbitmqadmin -q -f bash list """ + l + """)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+"""
+ script += """ *)
+ ;;
+ esac
+
+ COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
+ return 0
+}
+complete -F _rabbitmqadmin rabbitmqadmin
+"""
+ output(script)
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl b/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl
new file mode 100644
index 0000000000..2f56f56189
--- /dev/null
+++ b/deps/rabbit/test/term_to_binary_compat_prop_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+-module(term_to_binary_compat_prop_SUITE).
+
+-compile(export_all).
+
+-include("rabbit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+
+-define(ITERATIONS_TO_RUN_UNTIL_CONFIDENT, 10000).
+
+all() ->
+ [
+ ensure_term_to_binary_defaults_to_version_1,
+ term_to_binary_latin_atom,
+ queue_name_to_binary
+ ].
+
+erts_gt_8() ->
+ Vsn = erlang:system_info(version),
+ [Maj|_] = string:tokens(Vsn, "."),
+ list_to_integer(Maj) > 8.
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+%% R16B03 defaults term_to_binary version to 0, this test would always fail
+ensure_term_to_binary_defaults_to_version_1(Config) ->
+ CurrentERTS = erlang:system_info(version),
+ MinimumTestedERTS = "6.0",
+ case rabbit_misc:version_compare(CurrentERTS, MinimumTestedERTS, gte) of
+ true ->
+ Property = fun () ->
+ prop_ensure_term_to_binary_defaults_to_version_1(Config)
+ end,
+ rabbit_ct_proper_helpers:run_proper(
+ Property, [],
+ ?ITERATIONS_TO_RUN_UNTIL_CONFIDENT);
+ false ->
+ ct:pal(
+ ?LOW_IMPORTANCE,
+ "This test require ERTS ~p or above, running on ~p~n"
+ "Skipping test...",
+ [MinimumTestedERTS, CurrentERTS])
+ end.
+
+prop_ensure_term_to_binary_defaults_to_version_1(_Config) ->
+ ?FORALL(Term, any(),
+ begin
+ Current = term_to_binary(Term),
+ Compat = term_to_binary_compat:term_to_binary_1(Term),
+ Current =:= Compat
+ end).
+
+term_to_binary_latin_atom(Config) ->
+ Property = fun () -> prop_term_to_binary_latin_atom(Config) end,
+ rabbit_ct_proper_helpers:run_proper(Property, [],
+ ?ITERATIONS_TO_RUN_UNTIL_CONFIDENT).
+
+prop_term_to_binary_latin_atom(_Config) ->
+ ?FORALL(LatinString, list(integer(0, 255)),
+ begin
+ Length = length(LatinString),
+ Atom = list_to_atom(LatinString),
+ Binary = list_to_binary(LatinString),
+ <<131,100, Length:16, Binary/binary>> =:=
+ term_to_binary_compat:term_to_binary_1(Atom)
+ end).
+
+queue_name_to_binary(Config) ->
+ Property = fun () -> prop_queue_name_to_binary(Config) end,
+ rabbit_ct_proper_helpers:run_proper(Property, [],
+ ?ITERATIONS_TO_RUN_UNTIL_CONFIDENT).
+
+
+prop_queue_name_to_binary(_Config) ->
+ ?FORALL({VHost, QName}, {binary(), binary()},
+ begin
+ VHostBSize = byte_size(VHost),
+ NameBSize = byte_size(QName),
+ Expected =
+ <<131, %% Binary format "version"
+ 104, 4, %% 4-element tuple
+ 100, 0, 8, "resource", %% `resource` atom
+ 109, VHostBSize:32, VHost/binary, %% Vhost binary
+ 100, 0, 5, "queue", %% `queue` atom
+ 109, NameBSize:32, QName/binary>>, %% Name binary
+ Resource = rabbit_misc:r(VHost, queue, QName),
+ Current = term_to_binary_compat:term_to_binary_1(Resource),
+ Current =:= Expected
+ end).
diff --git a/deps/rabbit/test/test_util.erl b/deps/rabbit/test/test_util.erl
new file mode 100644
index 0000000000..9a82b0ea1c
--- /dev/null
+++ b/deps/rabbit/test/test_util.erl
@@ -0,0 +1,28 @@
+-module(test_util).
+
+-export([
+ fake_pid/1
+ ]).
+
+
+fake_pid(Node) ->
+ NodeBin = rabbit_data_coercion:to_binary(Node),
+ ThisNodeSize = size(term_to_binary(node())) + 1,
+ Pid = spawn(fun () -> ok end),
+ %% drop the local node data from a local pid
+ <<Pre:ThisNodeSize/binary, LocalPidData/binary>> = term_to_binary(Pid),
+ S = size(NodeBin),
+ %% get the encoding type of the pid
+ <<_:8, Type:8/unsigned, _/binary>> = Pre,
+ %% replace it with the incoming node binary
+ Final = <<131, Type, 100, S:16/unsigned, NodeBin/binary, LocalPidData/binary>>,
+ binary_to_term(Final).
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+fake_pid_test() ->
+ _ = fake_pid(banana),
+ ok.
+
+-endif.
diff --git a/deps/rabbit/test/topic_permission_SUITE.erl b/deps/rabbit/test/topic_permission_SUITE.erl
new file mode 100644
index 0000000000..2f123fd7f6
--- /dev/null
+++ b/deps/rabbit/test/topic_permission_SUITE.erl
@@ -0,0 +1,244 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(topic_permission_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() -> [
+ {sequential_tests, [], [
+ topic_permission_database_access,
+ topic_permission_checks
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, clear_tables, []),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+clear_tables() ->
+ {atomic, ok} = mnesia:clear_table(rabbit_topic_permission),
+ {atomic, ok} = mnesia:clear_table(rabbit_vhost),
+ {atomic, ok} = mnesia:clear_table(rabbit_user),
+ ok.
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+topic_permission_database_access(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_permission_database_access1, [Config]).
+
+topic_permission_database_access1(_Config) ->
+ 0 = length(ets:tab2list(rabbit_topic_permission)),
+ rabbit_vhost:add(<<"/">>, <<"acting-user">>),
+ rabbit_vhost:add(<<"other-vhost">>, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(<<"guest">>, <<"guest">>, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(<<"dummy">>, <<"dummy">>, <<"acting-user">>),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">>
+ ),
+ 1 = length(ets:tab2list(rabbit_topic_permission)),
+ 1 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 0 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"/">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"other-vhost">>)),
+ 1 = length(rabbit_auth_backend_internal:list_topic_permissions()),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ ),
+ 2 = length(ets:tab2list(rabbit_topic_permission)),
+ 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"other-vhost">>)),
+ 2 = length(rabbit_auth_backend_internal:list_topic_permissions()),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"topic1">>, "^a", "^a", <<"acting-user">>
+ ),
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"topic2">>, "^a", "^a", <<"acting-user">>
+ ),
+
+ 4 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 3 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_user_vhost_topic_permissions(<<"guest">>,<<"other-vhost">>)),
+ 4 = length(rabbit_auth_backend_internal:list_topic_permissions()),
+
+ rabbit_auth_backend_internal:clear_topic_permissions(<<"guest">>, <<"other-vhost">>,
+ <<"acting-user">>),
+ 0 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+ 3 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ rabbit_auth_backend_internal:clear_topic_permissions(<<"guest">>, <<"/">>, <<"topic1">>,
+ <<"acting-user">>),
+ 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ rabbit_auth_backend_internal:clear_topic_permissions(<<"guest">>, <<"/">>,
+ <<"acting-user">>),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+
+
+ {error, {no_such_user, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"non-existing-user">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ )),
+
+ {error, {no_such_vhost, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"non-existing-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ )),
+
+ {error, {no_such_user, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"non-existing-user">>, <<"non-existing-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ )),
+
+ {error, {no_such_user, _}} = (catch rabbit_auth_backend_internal:list_user_topic_permissions(
+ "non-existing-user"
+ )),
+
+ {error, {no_such_vhost, _}} = (catch rabbit_auth_backend_internal:list_vhost_topic_permissions(
+ "non-existing-vhost"
+ )),
+
+ {error, {invalid_regexp, _, _}} = (catch rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"amq.topic">>, "[", "^a", <<"acting-user">>
+ )),
+ ok.
+
+topic_permission_checks(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_permission_checks1, [Config]).
+
+topic_permission_checks1(_Config) ->
+ 0 = length(ets:tab2list(rabbit_topic_permission)),
+ rabbit_misc:execute_mnesia_transaction(fun() ->
+ ok = mnesia:write(rabbit_vhost,
+ vhost:new(<<"/">>, []),
+ write),
+ ok = mnesia:write(rabbit_vhost,
+ vhost:new(<<"other-vhost">>, []),
+ write)
+ end),
+ rabbit_auth_backend_internal:add_user(<<"guest">>, <<"guest">>, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(<<"dummy">>, <<"dummy">>, <<"acting-user">>),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"/">>, <<"amq.topic">>, "^a", "^a", <<"acting-user">>
+ ),
+ 1 = length(ets:tab2list(rabbit_topic_permission)),
+ 1 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 0 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"other-vhost">>, <<"amq.topic">>, ".*", ".*", <<"acting-user">>
+ ),
+ 2 = length(ets:tab2list(rabbit_topic_permission)),
+ 2 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"guest">>)),
+ 0 = length(rabbit_auth_backend_internal:list_user_topic_permissions(<<"dummy">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"/">>)),
+ 1 = length(rabbit_auth_backend_internal:list_vhost_topic_permissions(<<"other-vhost">>)),
+
+ User = #auth_user{username = <<"guest">>},
+ Topic = #resource{name = <<"amq.topic">>, virtual_host = <<"/">>,
+ kind = topic},
+ Context = #{routing_key => <<"a.b.c">>},
+ Permissions = [write, read],
+ %% user has access to exchange, routing key matches
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic,
+ Perm,
+ Context
+ ) || Perm <- Permissions],
+ %% user has access to exchange, routing key does not match
+ [false = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic,
+ Perm,
+ #{routing_key => <<"x.y.z">>}
+ ) || Perm <- Permissions],
+ %% user has access to exchange but not on this vhost
+ %% let pass when there's no match
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic#resource{virtual_host = <<"fancyvhost">>},
+ Perm,
+ Context
+ ) || Perm <- Permissions],
+ %% user does not have access to exchange
+ %% let pass when there's no match
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ #auth_user{username = <<"dummy">>},
+ Topic,
+ Perm,
+ Context
+ ) || Perm <- Permissions],
+
+ %% expand variables
+ rabbit_auth_backend_internal:set_topic_permissions(
+ <<"guest">>, <<"other-vhost">>, <<"amq.topic">>,
+ "services.{vhost}.accounts.{username}.notifications",
+ "services.{vhost}.accounts.{username}.notifications", <<"acting-user">>
+ ),
+ %% routing key OK
+ [true = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic#resource{virtual_host = <<"other-vhost">>},
+ Perm,
+ #{routing_key => <<"services.other-vhost.accounts.guest.notifications">>,
+ variable_map => #{
+ <<"username">> => <<"guest">>,
+ <<"vhost">> => <<"other-vhost">>
+ }
+ }
+ ) || Perm <- Permissions],
+ %% routing key KO
+ [false = rabbit_auth_backend_internal:check_topic_access(
+ User,
+ Topic#resource{virtual_host = <<"other-vhost">>},
+ Perm,
+ #{routing_key => <<"services.default.accounts.dummy.notifications">>,
+ variable_map => #{
+ <<"username">> => <<"guest">>,
+ <<"vhost">> => <<"other-vhost">>
+ }
+ }
+ ) || Perm <- Permissions],
+
+ ok.
diff --git a/deps/rabbit/test/unit_access_control_SUITE.erl b/deps/rabbit/test/unit_access_control_SUITE.erl
new file mode 100644
index 0000000000..af8f481083
--- /dev/null
+++ b/deps/rabbit/test/unit_access_control_SUITE.erl
@@ -0,0 +1,445 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_access_control_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests},
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ password_hashing,
+ unsupported_connection_refusal
+ ]},
+ {sequential_tests, [], [
+ login_with_credentials_but_no_password,
+ login_of_passwordless_user,
+ set_tags_for_passwordless_user,
+ change_password,
+ topic_matching,
+ auth_backend_internal_expand_topic_permission,
+ rabbit_direct_extract_extra_auth_props
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% ---------------------------------------------------------------------------
+%% Test Cases
+%% ---------------------------------------------------------------------------
+
+password_hashing(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, password_hashing1, [Config]).
+
+password_hashing1(_Config) ->
+ rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+ application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_md5),
+ rabbit_password_hashing_md5 = rabbit_password:hashing_mod(),
+ application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_sha256),
+ rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+
+ rabbit_password_hashing_sha256 =
+ rabbit_password:hashing_mod(rabbit_password_hashing_sha256),
+ rabbit_password_hashing_md5 =
+ rabbit_password:hashing_mod(rabbit_password_hashing_md5),
+ rabbit_password_hashing_md5 =
+ rabbit_password:hashing_mod(undefined),
+
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new()),
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new({hashing_algorithm, undefined})),
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new({hashing_algorithm, rabbit_password_hashing_md5})),
+
+ rabbit_password_hashing_sha256 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ internal_user:new({hashing_algorithm, rabbit_password_hashing_sha256})),
+
+ passed.
+
+change_password(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, change_password1, [Config]).
+
+change_password1(_Config) ->
+ UserName = <<"test_user">>,
+ Password = <<"test_password">>,
+ case rabbit_auth_backend_internal:lookup_user(UserName) of
+ {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName, <<"acting-user">>);
+ _ -> ok
+ end,
+ ok = application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_md5),
+ ok = rabbit_auth_backend_internal:add_user(UserName, Password, <<"acting-user">>),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+ ok = application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_sha256),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+
+ NewPassword = <<"test_password1">>,
+ ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword,
+ <<"acting-user">>),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, NewPassword}]),
+
+ {refused, _, [UserName]} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+ passed.
+
+
+login_with_credentials_but_no_password(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, login_with_credentials_but_no_password1, [Config]).
+
+login_with_credentials_but_no_password1(_Config) ->
+ Username = <<"login_with_credentials_but_no_password-user">>,
+ Password = <<"login_with_credentials_but_no_password-password">>,
+ ok = rabbit_auth_backend_internal:add_user(Username, Password, <<"acting-user">>),
+
+ try
+ rabbit_auth_backend_internal:user_login_authentication(Username,
+ [{key, <<"value">>}]),
+ ?assert(false)
+ catch exit:{unknown_auth_props, Username, [{key, <<"value">>}]} ->
+ ok
+ end,
+
+ ok = rabbit_auth_backend_internal:delete_user(Username, <<"acting-user">>),
+
+ passed.
+
+%% passwordless users are not supposed to be used with
+%% this backend (and PLAIN authentication mechanism in general)
+login_of_passwordless_user(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, login_of_passwordless_user1, [Config]).
+
+login_of_passwordless_user1(_Config) ->
+ Username = <<"login_of_passwordless_user-user">>,
+ Password = <<"">>,
+ ok = rabbit_auth_backend_internal:add_user(Username, Password, <<"acting-user">>),
+
+ ?assertMatch(
+ {refused, _Message, [Username]},
+ rabbit_auth_backend_internal:user_login_authentication(Username,
+ [{password, <<"">>}])),
+
+ ?assertMatch(
+ {refused, _Format, [Username]},
+ rabbit_auth_backend_internal:user_login_authentication(Username,
+ [{password, ""}])),
+
+ ok = rabbit_auth_backend_internal:delete_user(Username, <<"acting-user">>),
+
+ passed.
+
+
+set_tags_for_passwordless_user(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_tags_for_passwordless_user1, [Config]).
+
+set_tags_for_passwordless_user1(_Config) ->
+ Username = <<"set_tags_for_passwordless_user">>,
+ Password = <<"set_tags_for_passwordless_user">>,
+ ok = rabbit_auth_backend_internal:add_user(Username, Password,
+ <<"acting-user">>),
+ ok = rabbit_auth_backend_internal:clear_password(Username,
+ <<"acting-user">>),
+ ok = rabbit_auth_backend_internal:set_tags(Username, [management],
+ <<"acting-user">>),
+
+ {ok, User1} = rabbit_auth_backend_internal:lookup_user(Username),
+ ?assertEqual([management], internal_user:get_tags(User1)),
+
+ ok = rabbit_auth_backend_internal:set_tags(Username, [management, policymaker],
+ <<"acting-user">>),
+
+ {ok, User2} = rabbit_auth_backend_internal:lookup_user(Username),
+ ?assertEqual([management, policymaker], internal_user:get_tags(User2)),
+
+ ok = rabbit_auth_backend_internal:set_tags(Username, [],
+ <<"acting-user">>),
+
+ {ok, User3} = rabbit_auth_backend_internal:lookup_user(Username),
+ ?assertEqual([], internal_user:get_tags(User3)),
+
+ ok = rabbit_auth_backend_internal:delete_user(Username,
+ <<"acting-user">>),
+
+ passed.
+
+
+rabbit_direct_extract_extra_auth_props(_Config) ->
+ {ok, CSC} = code_server_cache:start_link(),
+ % no protocol to extract
+ [] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, 1,
+ [{name,<<"127.0.0.1:52366 -> 127.0.0.1:1883">>}]),
+ % protocol to extract, but no module to call
+ [] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, 1,
+ [{protocol, {'PROTOCOL_WITHOUT_MODULE', "1.0"}}]),
+ % see rabbit_dummy_protocol_connection_info module
+ % protocol to extract, module that returns a client ID
+ [{client_id, <<"DummyClientId">>}] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, 1,
+ [{protocol, {'DUMMY_PROTOCOL', "1.0"}}]),
+ % protocol to extract, but error thrown in module
+ [] = rabbit_direct:extract_extra_auth_props(
+ {<<"guest">>, <<"guest">>}, <<"/">>, -1,
+ [{protocol, {'DUMMY_PROTOCOL', "1.0"}}]),
+ gen_server:stop(CSC),
+ ok.
+
+auth_backend_internal_expand_topic_permission(_Config) ->
+ ExpandMap = #{<<"username">> => <<"guest">>, <<"vhost">> => <<"default">>},
+ %% simple case
+ <<"services/default/accounts/guest/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{username}/notifications">>,
+ ExpandMap
+ ),
+ %% replace variable twice
+ <<"services/default/accounts/default/guest/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{vhost}/{username}/notifications">>,
+ ExpandMap
+ ),
+ %% nothing to replace
+ <<"services/accounts/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/accounts/notifications">>,
+ ExpandMap
+ ),
+ %% the expand map isn't defined
+ <<"services/{vhost}/accounts/{username}/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{username}/notifications">>,
+ undefined
+ ),
+ %% the expand map is empty
+ <<"services/{vhost}/accounts/{username}/notifications">> =
+ rabbit_auth_backend_internal:expand_topic_permission(
+ <<"services/{vhost}/accounts/{username}/notifications">>,
+ #{}
+ ),
+ ok.
+
+unsupported_connection_refusal(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, unsupported_connection_refusal1, [Config]).
+
+unsupported_connection_refusal1(Config) ->
+ H = ?config(rmq_hostname, Config),
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ [passed = test_unsupported_connection_refusal(H, P, V) ||
+ V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
+ passed.
+
+test_unsupported_connection_refusal(H, P, Header) ->
+ {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
+ ok = gen_tcp:send(C, Header),
+ {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
+ ok = gen_tcp:close(C),
+ passed.
+
+
+%% -------------------------------------------------------------------
+%% Topic matching.
+%% -------------------------------------------------------------------
+
+topic_matching(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_matching1, [Config]).
+
+topic_matching1(_Config) ->
+ XName = #resource{virtual_host = <<"/">>,
+ kind = exchange,
+ name = <<"topic_matching-exchange">>},
+ X0 = #exchange{name = XName, type = topic, durable = false,
+ auto_delete = false, arguments = []},
+ X = rabbit_exchange_decorator:set(X0),
+ %% create
+ rabbit_exchange_type_topic:validate(X),
+ exchange_op_callback(X, create, []),
+
+ %% add some bindings
+ Bindings = [#binding{source = XName,
+ key = list_to_binary(Key),
+ destination = #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)},
+ args = Args} ||
+ {Key, Q, Args} <- [{"a.b.c", "t1", []},
+ {"a.*.c", "t2", []},
+ {"a.#.b", "t3", []},
+ {"a.b.b.c", "t4", []},
+ {"#", "t5", []},
+ {"#.#", "t6", []},
+ {"#.b", "t7", []},
+ {"*.*", "t8", []},
+ {"a.*", "t9", []},
+ {"*.b.c", "t10", []},
+ {"a.#", "t11", []},
+ {"a.#.#", "t12", []},
+ {"b.b.c", "t13", []},
+ {"a.b.b", "t14", []},
+ {"a.b", "t15", []},
+ {"b.c", "t16", []},
+ {"", "t17", []},
+ {"*.*.*", "t18", []},
+ {"vodka.martini", "t19", []},
+ {"a.b.c", "t20", []},
+ {"*.#", "t21", []},
+ {"#.*.#", "t22", []},
+ {"*.#.#", "t23", []},
+ {"#.#.#", "t24", []},
+ {"*", "t25", []},
+ {"#.b.#", "t26", []},
+ {"args-test", "t27",
+ [{<<"foo">>, longstr, <<"bar">>}]},
+ {"args-test", "t27", %% Note aliasing
+ [{<<"foo">>, longstr, <<"baz">>}]}]],
+ lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
+ Bindings),
+
+ %% test some matches
+ test_topic_expect_match(
+ X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
+ "t18", "t20", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
+ "t12", "t15", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
+ "t18", "t21", "t22", "t23", "t24", "t26"]},
+ {"", ["t5", "t6", "t17", "t24"]},
+ {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23",
+ "t24", "t26"]},
+ {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22",
+ "t23", "t24"]},
+ {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23",
+ "t24"]},
+ {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
+ "t24"]},
+ {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21",
+ "t22", "t23", "t24", "t26"]},
+ {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
+ {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25"]},
+ {"args-test", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25", "t27"]}]),
+ %% remove some bindings
+ RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
+ lists:nth(11, Bindings), lists:nth(19, Bindings),
+ lists:nth(21, Bindings), lists:nth(28, Bindings)],
+ exchange_op_callback(X, remove_bindings, [RemovedBindings]),
+ RemainingBindings = ordsets:to_list(
+ ordsets:subtract(ordsets:from_list(Bindings),
+ ordsets:from_list(RemovedBindings))),
+
+ %% test some matches
+ test_topic_expect_match(
+ X,
+ [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
+ "t23", "t24", "t26"]},
+ {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
+ "t22", "t23", "t24", "t26"]},
+ {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
+ "t23", "t24", "t26"]},
+ {"", ["t6", "t17", "t24"]},
+ {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]},
+ {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]},
+ {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]},
+ {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]},
+ {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23",
+ "t24", "t26"]},
+ {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
+ {"oneword", ["t6", "t22", "t23", "t24", "t25"]},
+ {"args-test", ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
+
+ %% remove the entire exchange
+ exchange_op_callback(X, delete, [RemainingBindings]),
+ %% none should match now
+ test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
+ passed.
+
+exchange_op_callback(X, Fun, Args) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
+ rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
+
+test_topic_expect_match(X, List) ->
+ lists:foreach(
+ fun ({Key, Expected}) ->
+ BinKey = list_to_binary(Key),
+ Message = rabbit_basic:message(X#exchange.name, BinKey,
+ #'P_basic'{}, <<>>),
+ Res = rabbit_exchange_type_topic:route(
+ X, #delivery{mandatory = false,
+ sender = self(),
+ message = Message}),
+ ExpectedRes = lists:map(
+ fun (Q) -> #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)}
+ end, Expected),
+ true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
+ end, List).
diff --git a/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl b/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl
new file mode 100644
index 0000000000..9cb1ad7267
--- /dev/null
+++ b/deps/rabbit/test/unit_access_control_authn_authz_context_propagation_SUITE.erl
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_access_control_authn_authz_context_propagation_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ propagate_context_to_auth_backend
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ AuthConfig = {rabbit, [
+ {auth_backends, [rabbit_auth_backend_context_propagation_mock]}
+ ]
+ },
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun(Conf) -> merge_app_env(AuthConfig, Conf) end ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(SomeConfig, Config) ->
+ rabbit_ct_helpers:merge_app_env(Config, SomeConfig).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+propagate_context_to_auth_backend(Config) ->
+ ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+ rabbit_auth_backend_context_propagation_mock),
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, propagate_context_to_auth_backend1, []).
+
+propagate_context_to_auth_backend1() ->
+ rabbit_auth_backend_context_propagation_mock:init(),
+ AmqpParams = #amqp_params_direct{
+ virtual_host = <<"/">>,
+ username = <<"guest">>,
+ password = <<"guest">>,
+ adapter_info = #amqp_adapter_info{additional_info = [
+ {variable_map, #{<<"key1">> => <<"value1">>}}
+ ],
+ protocol = {'FOO_PROTOCOL', '1.0'} %% this will trigger a call to rabbit_foo_protocol_connection_info
+ }
+ },
+ {ok, Conn} = amqp_connection:start(AmqpParams),
+
+ %% rabbit_direct will call the rabbit_foo_protocol_connection_info module to extract information
+ %% this information will be propagated to the authentication backend
+ [{authentication, AuthProps}] = rabbit_auth_backend_context_propagation_mock:get(authentication),
+ ?assertEqual(<<"value1">>, proplists:get_value(key1, AuthProps)),
+
+ %% variable_map is propagated from rabbit_direct to the authorization backend
+ [{vhost_access, AuthzData}] = rabbit_auth_backend_context_propagation_mock:get(vhost_access),
+ ?assertEqual(<<"value1">>, maps:get(<<"key1">>, AuthzData)),
+
+ %% variable_map is extracted when the channel is created and kept in its state
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ QName = <<"channel_propagate_context_to_authz_backend-q">>,
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName}),
+
+ check_send_receive(Ch, <<"">>, QName, QName),
+ amqp_channel:call(Ch, #'queue.bind'{queue = QName, exchange = <<"amq.topic">>, routing_key = <<"a.b">>}),
+ %% variable_map content is propagated from rabbit_channel to the authorization backend (resource check)
+ [{resource_access, AuthzContext}] = rabbit_auth_backend_context_propagation_mock:get(resource_access),
+ ?assertEqual(<<"value1">>, maps:get(<<"key1">>, AuthzContext)),
+
+ check_send_receive(Ch, <<"amq.topic">>, <<"a.b">>, QName),
+ %% variable_map is propagated from rabbit_channel to the authorization backend (topic check)
+ [{topic_access, TopicContext}] = rabbit_auth_backend_context_propagation_mock:get(topic_access),
+ VariableMap = maps:get(variable_map, TopicContext),
+ ?assertEqual(<<"value1">>, maps:get(<<"key1">>, VariableMap)),
+
+ passed.
+
+check_send_receive(Ch, Exchange, RoutingKey, QName) ->
+ amqp_channel:call(Ch,
+ #'basic.publish'{exchange = Exchange, routing_key = RoutingKey},
+ #amqp_msg{payload = <<"foo">>}),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = <<"foo">>}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = QName,
+ no_ack = true}).
diff --git a/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl b/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl
new file mode 100644
index 0000000000..6a6a07836c
--- /dev/null
+++ b/deps/rabbit/test/unit_access_control_credential_validation_SUITE.erl
@@ -0,0 +1,269 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_access_control_credential_validation_SUITE).
+
+-compile(export_all).
+-include_lib("proper/include/proper.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, unit},
+ {group, integration}
+ ].
+
+groups() ->
+ [
+ {integration, [], [
+ min_length_integration_fails
+ , regexp_integration_fails
+ , min_length_integration_succeeds
+ , regexp_integration_succeeds
+ , min_length_change_password_integration_fails
+ , regexp_change_password_integration_fails
+ , min_length_change_password_integration_succeeds
+ , regexp_change_password_integration_succeeds
+ ]},
+ {unit, [parallel], [
+ basic_unconditionally_accepting_succeeds,
+ min_length_fails,
+ min_length_succeeds,
+ min_length_proper_fails,
+ min_length_proper_succeeds,
+ regexp_fails,
+ regexp_succeeds,
+ regexp_proper_fails,
+ regexp_proper_succeeds
+ ]}
+].
+
+suite() ->
+ [
+ {timetrap, {minutes, 4}}
+ ].
+
+%%
+%% Setup/teardown
+%%
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(integration, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 1},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps());
+
+init_per_group(unit, Config) ->
+ Config.
+
+end_per_group(integration, Config) ->
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(unit, Config) ->
+ Config.
+
+-define(USERNAME, <<"abc">>).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%
+%% Test Cases
+%%
+
+basic_unconditionally_accepting_succeeds(_Config) ->
+ F = fun rabbit_credential_validator_accept_everything:validate/2,
+
+ Pwd1 = crypto:strong_rand_bytes(1),
+ ?assertEqual(ok, F(?USERNAME, Pwd1)),
+ Pwd2 = crypto:strong_rand_bytes(5),
+ ?assertEqual(ok, F(?USERNAME, Pwd2)),
+ Pwd3 = crypto:strong_rand_bytes(10),
+ ?assertEqual(ok, F(?USERNAME, Pwd3)),
+ Pwd4 = crypto:strong_rand_bytes(50),
+ ?assertEqual(ok, F(?USERNAME, Pwd4)),
+ Pwd5 = crypto:strong_rand_bytes(100),
+ ?assertEqual(ok, F(?USERNAME, Pwd5)),
+ Pwd6 = crypto:strong_rand_bytes(1000),
+ ?assertEqual(ok, F(?USERNAME, Pwd6)).
+
+min_length_fails(_Config) ->
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+
+ Pwd1 = crypto:strong_rand_bytes(1),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd1, 5)),
+ Pwd2 = crypto:strong_rand_bytes(5),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd2, 6)),
+ Pwd3 = crypto:strong_rand_bytes(10),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd3, 15)),
+ Pwd4 = crypto:strong_rand_bytes(50),
+ ?assertMatch({error, _}, F(?USERNAME, Pwd4, 60)),
+ Pwd5 = undefined,
+ ?assertMatch({error, _}, F(?USERNAME, Pwd5, 60)),
+ Pwd6 = <<"">>,
+ ?assertMatch({error, _}, F(?USERNAME, Pwd6, 60)).
+
+min_length_succeeds(_Config) ->
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(1), 1)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(6), 6)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(7), 6)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(20), 20)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(40), 30)),
+ ?assertEqual(ok, F(?USERNAME, crypto:strong_rand_bytes(50), 50)).
+
+min_length_proper_fails(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_min_length_fails_validation/0, [], 500).
+
+min_length_proper_succeeds(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_min_length_passes_validation/0, [], 500).
+
+regexp_fails(_Config) ->
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+
+ ?assertMatch({error, _}, F(?USERNAME, <<"abc">>, "^xyz")),
+ ?assertMatch({error, _}, F(?USERNAME, <<"abcdef">>, "^xyz")),
+ ?assertMatch({error, _}, F(?USERNAME, <<"abcxyz">>, "^abc\\d+")).
+
+regexp_succeeds(_Config) ->
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+
+ ?assertEqual(ok, F(?USERNAME, <<"abc">>, "^abc")),
+ ?assertEqual(ok, F(?USERNAME, <<"abcdef">>, "^abc")),
+ ?assertEqual(ok, F(?USERNAME, <<"abc123">>, "^abc\\d+")).
+
+regexp_proper_fails(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_regexp_fails_validation/0, [], 500).
+
+regexp_proper_succeeds(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_regexp_passes_validation/0, [], 500).
+
+min_length_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 50),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch({error, "minimum required password length is 50"},
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"_">>)).
+
+regexp_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch({error, _}, rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"_">>)).
+
+min_length_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 5),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>)).
+
+regexp_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"xyz12345678901">>)).
+
+min_length_change_password_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 50),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch({error, "minimum required password length is 50"},
+ rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"_">>)).
+
+regexp_change_password_integration_fails(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch({error, _}, rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"_">>)).
+
+min_length_change_password_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 5),
+ ?assertMatch(rabbit_credential_validator_min_password_length, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"abcdefghi">>)).
+
+regexp_change_password_integration_succeeds(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ rabbit_ct_broker_helpers:add_user(Config, ?USERNAME, <<"abcdefghi">>),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, regexp),
+ ?assertMatch(rabbit_credential_validator_password_regexp, validator_backend(Config)),
+ ?assertMatch(ok, rabbit_ct_broker_helpers:change_password(Config, ?USERNAME, <<"xyz12345678901">>)).
+
+%%
+%% PropEr
+%%
+
+prop_min_length_fails_validation() ->
+ N = 5,
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+ ?FORALL(Val, binary(N),
+ ?FORALL(Length, choose(N + 1, 100),
+ failed_validation(F(?USERNAME, Val, Length + 1)))).
+
+prop_min_length_passes_validation() ->
+ N = 20,
+ F = fun rabbit_credential_validator_min_password_length:validate/3,
+ ?FORALL(Val, binary(N),
+ ?FORALL(Length, choose(1, N - 1),
+ passed_validation(F(?USERNAME, Val, Length)))).
+
+prop_regexp_fails_validation() ->
+ N = 5,
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+ ?FORALL(Val, binary(N),
+ ?FORALL(Length, choose(N + 1, 100),
+ failed_validation(F(?USERNAME, Val, regexp_that_requires_length_of_at_least(Length + 1))))).
+
+prop_regexp_passes_validation() ->
+ N = 5,
+ F = fun rabbit_credential_validator_password_regexp:validate/3,
+ ?FORALL(Val, binary(N),
+ passed_validation(F(?USERNAME, Val, regexp_that_requires_length_of_at_most(size(Val) + 1)))).
+
+%%
+%% Helpers
+%%
+
+passed_validation(ok) ->
+ true;
+passed_validation({error, _}) ->
+ false.
+
+failed_validation(Result) ->
+ not passed_validation(Result).
+
+regexp_that_requires_length_of_at_least(N) when is_integer(N) ->
+ rabbit_misc:format("^[a-zA-Z0-9]{~p,~p}", [N, N + 10]).
+
+regexp_that_requires_length_of_at_most(N) when is_integer(N) ->
+ rabbit_misc:format("^[a-zA-Z0-9]{0,~p}", [N]).
+
+validator_backend(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_credential_validation, backend, []).
diff --git a/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl b/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl
new file mode 100644
index 0000000000..d483dbdd06
--- /dev/null
+++ b/deps/rabbit/test/unit_amqp091_content_framing_SUITE.erl
@@ -0,0 +1,231 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_amqp091_content_framing_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ write_table_with_invalid_existing_type,
+ invalid_existing_headers,
+ disparate_invalid_header_entries_accumulate_separately,
+ corrupt_or_invalid_headers_are_overwritten,
+ invalid_same_header_entry_accumulation,
+ content_framing,
+ content_transcoding,
+ table_codec
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+-define(XDEATH_TABLE,
+ [{<<"reason">>, longstr, <<"blah">>},
+ {<<"queue">>, longstr, <<"foo.bar.baz">>},
+ {<<"exchange">>, longstr, <<"my-exchange">>},
+ {<<"routing-keys">>, array, []}]).
+
+-define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
+
+-define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
+-define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
+-define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
+
+write_table_with_invalid_existing_type(_Config) ->
+ prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]).
+
+invalid_existing_headers(_Config) ->
+ Headers =
+ prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
+ {array, [{table, ?ROUTE_TABLE}]} =
+ rabbit_misc:table_lookup(Headers, <<"header2">>),
+ passed.
+
+disparate_invalid_header_entries_accumulate_separately(_Config) ->
+ BadHeaders = [?BAD_HEADER("header2")],
+ Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
+ Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
+ [?BAD_HEADER("header1") | Headers]),
+ {table, [?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("header2")]} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ passed.
+
+corrupt_or_invalid_headers_are_overwritten(_Config) ->
+ Headers0 = [?BAD_HEADER("header1"),
+ ?BAD_HEADER("x-invalid-headers")],
+ Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
+ {table,[?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("x-invalid-headers")]} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ passed.
+
+invalid_same_header_entry_accumulation(_Config) ->
+ BadHeader1 = ?BAD_HEADER2("header1", "a"),
+ Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
+ Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
+ [?BAD_HEADER2("header1", "b") | Headers]),
+ {table, InvalidHeaders} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ {array, [{longstr,<<"bad header1b">>},
+ {longstr,<<"bad header1a">>}]} =
+ rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
+ passed.
+
+prepend_check(HeaderKey, HeaderTable, Headers) ->
+ Headers1 = rabbit_basic:prepend_table_header(
+ HeaderKey, HeaderTable, Headers),
+ {table, Invalid} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
+ {array, [{Type, Value} | _]} =
+ rabbit_misc:table_lookup(Invalid, HeaderKey),
+ Headers1.
+
+
+%% Test that content frames don't exceed frame-max
+content_framing(_Config) ->
+ %% no content
+ passed = test_content_framing(4096, <<>>),
+ %% easily fit in one frame
+ passed = test_content_framing(4096, <<"Easy">>),
+ %% exactly one frame (empty frame = 8 bytes)
+ passed = test_content_framing(11, <<"One">>),
+ %% more than one frame
+ passed = test_content_framing(11, <<"More than one frame">>),
+ passed.
+
+test_content_framing(FrameMax, BodyBin) ->
+ [Header | Frames] =
+ rabbit_binary_generator:build_simple_content_frames(
+ 1,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, BodyBin),
+ rabbit_framing_amqp_0_9_1),
+ FrameMax,
+ rabbit_framing_amqp_0_9_1),
+ %% header is formatted correctly and the size is the total of the
+ %% fragments
+ <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
+ BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
+ BodySize = size(BodyBin),
+ true = lists:all(
+ fun (ContentFrame) ->
+ FrameBinary = list_to_binary(ContentFrame),
+ %% assert
+ <<_TypeAndChannel:3/binary,
+ Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
+ FrameBinary,
+ size(FrameBinary) =< FrameMax
+ end, Frames),
+ passed.
+
+content_transcoding(_Config) ->
+ %% there are no guarantees provided by 'clear' - it's just a hint
+ ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
+ ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
+ EnsureDecoded =
+ fun (C0) ->
+ C1 = rabbit_binary_parser:ensure_content_decoded(C0),
+ true = C1#content.properties =/= none,
+ C1
+ end,
+ EnsureEncoded =
+ fun (Protocol) ->
+ fun (C0) ->
+ C1 = rabbit_binary_generator:ensure_content_encoded(
+ C0, Protocol),
+ true = C1#content.properties_bin =/= none,
+ C1
+ end
+ end,
+ %% Beyond the assertions in Ensure*, the only testable guarantee
+ %% is that the operations should never fail.
+ %%
+ %% If we were using quickcheck we'd simply stuff all the above
+ %% into a generator for sequences of operations. In the absence of
+ %% quickcheck we pick particularly interesting sequences that:
+ %%
+ %% - execute every op twice since they are idempotent
+ %% - invoke clear_decoded, clear_encoded, decode and transcode
+ %% with one or both of decoded and encoded content present
+ [begin
+ sequence_with_content([Op]),
+ sequence_with_content([ClearEncoded, Op]),
+ sequence_with_content([ClearDecoded, Op])
+ end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
+ EnsureEncoded(rabbit_framing_amqp_0_9_1),
+ EnsureEncoded(rabbit_framing_amqp_0_8)]],
+ passed.
+
+sequence_with_content(Sequence) ->
+ lists:foldl(fun (F, V) -> F(F(V)) end,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, <<>>),
+ rabbit_framing_amqp_0_9_1),
+ Sequence).
+
+table_codec(_Config) ->
+ %% Note: this does not test inexact numbers (double and float) at the moment.
+ %% They won't pass the equality assertions.
+ Table = [{<<"longstr">>, longstr, <<"Here is a long string">>},
+ {<<"signedint">>, signedint, 12345},
+ {<<"decimal">>, decimal, {3, 123456}},
+ {<<"timestamp">>, timestamp, 109876543209876},
+ {<<"table">>, table, [{<<"one">>, signedint, 54321},
+ {<<"two">>, longstr,
+ <<"A long string">>}]},
+ {<<"byte">>, byte, -128},
+ {<<"long">>, long, 1234567890},
+ {<<"short">>, short, 655},
+ {<<"bool">>, bool, true},
+ {<<"binary">>, binary, <<"a binary string">>},
+ {<<"unsignedbyte">>, unsignedbyte, 250},
+ {<<"unsignedshort">>, unsignedshort, 65530},
+ {<<"unsignedint">>, unsignedint, 4294967290},
+ {<<"void">>, void, undefined},
+ {<<"array">>, array, [{signedint, 54321},
+ {longstr, <<"A long string">>}]}
+ ],
+ Binary = <<
+ 7,"longstr", "S", 21:32, "Here is a long string",
+ 9,"signedint", "I", 12345:32/signed,
+ 7,"decimal", "D", 3, 123456:32,
+ 9,"timestamp", "T", 109876543209876:64,
+ 5,"table", "F", 31:32, % length of table
+ 3,"one", "I", 54321:32,
+ 3,"two", "S", 13:32, "A long string",
+ 4,"byte", "b", -128:8/signed,
+ 4,"long", "l", 1234567890:64,
+ 5,"short", "s", 655:16,
+ 4,"bool", "t", 1,
+ 6,"binary", "x", 15:32, "a binary string",
+ 12,"unsignedbyte", "B", 250:8/unsigned,
+ 13,"unsignedshort", "u", 65530:16/unsigned,
+ 11,"unsignedint", "i", 4294967290:32/unsigned,
+ 4,"void", "V",
+ 5,"array", "A", 23:32,
+ "I", 54321:32,
+ "S", 13:32, "A long string"
+ >>,
+ Binary = rabbit_binary_generator:generate_table(Table),
+ Table = rabbit_binary_parser:parse_table(Binary),
+ passed.
diff --git a/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl b/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl
new file mode 100644
index 0000000000..036fb8ce28
--- /dev/null
+++ b/deps/rabbit/test/unit_amqp091_server_properties_SUITE.erl
@@ -0,0 +1,144 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_amqp091_server_properties_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+-define(TIMEOUT_CHANNEL_EXCEPTION, 5000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ configurable_server_properties
+ ]}
+ ].
+
+suite() ->
+ [
+ {timetrap, {minutes, 3}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = 2,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [])
+ end.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+init_per_testcase(Testcase, Config) ->
+ Group = proplists:get_value(name, ?config(tc_group_properties, Config)),
+ Q = rabbit_data_coercion:to_binary(io_lib:format("~p_~p", [Group, Testcase])),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{queue_name, Q}]),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+configurable_server_properties(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, configurable_server_properties1, [Config]).
+
+configurable_server_properties1(_Config) ->
+ %% List of the names of the built-in properties do we expect to find
+ BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
+ <<"copyright">>, <<"information">>],
+
+ Protocol = rabbit_framing_amqp_0_9_1,
+
+ %% Verify that the built-in properties are initially present
+ ActualPropNames = [Key || {Key, longstr, _} <-
+ rabbit_reader:server_properties(Protocol)],
+ true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
+ BuiltInPropNames),
+
+ %% Get the initial server properties configured in the environment
+ {ok, ServerProperties} = application:get_env(rabbit, server_properties),
+
+ %% Helper functions
+ ConsProp = fun (X) -> application:set_env(rabbit,
+ server_properties,
+ [X | ServerProperties]) end,
+ IsPropPresent =
+ fun (X) ->
+ lists:member(X, rabbit_reader:server_properties(Protocol))
+ end,
+
+ %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
+ NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
+ ConsProp(NewSimplifiedProperty),
+ %% Do we find hare soup, appropriately formatted in the generated properties?
+ ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
+ longstr,
+ list_to_binary(NewHareVal)},
+ true = IsPropPresent(ExpectedHareImage),
+
+ %% Add a wholly new property of the {BinaryKey, Type, Value} form
+ %% and check for it
+ NewProperty = {<<"new-bin-key">>, signedint, -1},
+ ConsProp(NewProperty),
+ %% Do we find the new property?
+ true = IsPropPresent(NewProperty),
+
+ %% Add a property that clobbers a built-in, and verify correct clobbering
+ {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
+ {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
+ list_to_binary(NewVerVal)},
+ ConsProp(NewVersion),
+ ClobberedServerProps = rabbit_reader:server_properties(Protocol),
+ %% Is the clobbering insert present?
+ true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
+ %% Is the clobbering insert the only thing with the clobbering key?
+ [{BinNewVerKey, longstr, BinNewVerVal}] =
+ [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
+
+ application:set_env(rabbit, server_properties, ServerProperties),
+ passed.
diff --git a/deps/rabbit/test/unit_app_management_SUITE.erl b/deps/rabbit/test/unit_app_management_SUITE.erl
new file mode 100644
index 0000000000..e08f151d57
--- /dev/null
+++ b/deps/rabbit/test/unit_app_management_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_app_management_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ app_management
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Application management.
+%% -------------------------------------------------------------------
+
+app_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, app_management1, [Config]).
+
+app_management1(_Config) ->
+ wait_for_application(rabbit),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = rabbit_trace:start(<<"/">>),
+ ok = rabbit:stop(),
+ ok = rabbit:stop(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit:start(),
+ ok = rabbit:start(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit_trace:stop(<<"/">>),
+ passed.
+
+no_exceptions(Mod, Fun, Args) ->
+ try erlang:apply(Mod, Fun, Args) of _ -> ok
+ catch Type:Ex -> {Type, Ex}
+ end.
+
+wait_for_application(Application) ->
+ wait_for_application(Application, 5000).
+
+wait_for_application(_, Time) when Time =< 0 ->
+ {error, timeout};
+wait_for_application(Application, Time) ->
+ Interval = 100,
+ case lists:keyfind(Application, 1, application:which_applications()) of
+ false ->
+ timer:sleep(Interval),
+ wait_for_application(Application, Time - Interval);
+ _ -> ok
+ end.
diff --git a/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl b/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl
new file mode 100644
index 0000000000..41dd685694
--- /dev/null
+++ b/deps/rabbit/test/unit_cluster_formation_locking_mocks_SUITE.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(unit_cluster_formation_locking_mocks_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ init_with_lock_exits_after_errors,
+ init_with_lock_ignore_after_errors,
+ init_with_lock_not_supported,
+ init_with_lock_supported
+ ]}
+ ].
+
+init_per_testcase(Testcase, Config) when Testcase == init_with_lock_exits_after_errors;
+ Testcase == init_with_lock_not_supported;
+ Testcase == init_with_lock_supported ->
+ application:set_env(rabbit, cluster_formation,
+ [{peer_discover_backend, peer_discover_classic_config},
+ {lock_acquisition_failure_mode, fail}]),
+ ok = meck:new(rabbit_peer_discovery_classic_config, [passthrough]),
+ Config;
+init_per_testcase(init_with_lock_ignore_after_errors, Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [{peer_discover_backend, peer_discover_classic_config},
+ {lock_acquisition_failure_mode, ignore}]),
+ ok = meck:new(rabbit_peer_discovery_classic_config, [passthrough]),
+ Config.
+
+end_per_testcase(_, _) ->
+ meck:unload(),
+ application:unset_env(rabbit, cluster_formation).
+
+init_with_lock_exits_after_errors(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {error, "test error"} end),
+ ?assertExit(cannot_acquire_startup_lock, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
+
+init_with_lock_ignore_after_errors(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {error, "test error"} end),
+ ?assertEqual(ok, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
+
+init_with_lock_not_supported(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> not_supported end),
+ ?assertEqual(ok, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
+
+init_with_lock_supported(_Config) ->
+ meck:expect(rabbit_peer_discovery_classic_config, lock, fun(_) -> {ok, data} end),
+ meck:expect(rabbit_peer_discovery_classic_config, unlock, fun(data) -> ok end),
+ ?assertEqual(ok, rabbit_mnesia:init_with_lock(2, 10, fun() -> ok end)),
+ ?assert(meck:validate(rabbit_peer_discovery_classic_config)),
+ passed.
diff --git a/deps/rabbit/test/unit_collections_SUITE.erl b/deps/rabbit/test/unit_collections_SUITE.erl
new file mode 100644
index 0000000000..1cbf65efce
--- /dev/null
+++ b/deps/rabbit/test/unit_collections_SUITE.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_collections_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ pmerge,
+ plmerge,
+ unfold
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+pmerge(_Config) ->
+ P = [{a, 1}, {b, 2}],
+ P = rabbit_misc:pmerge(a, 3, P),
+ [{c, 3} | P] = rabbit_misc:pmerge(c, 3, P),
+ passed.
+
+plmerge(_Config) ->
+ P1 = [{a, 1}, {b, 2}, {c, 3}],
+ P2 = [{a, 2}, {d, 4}],
+ [{a, 1}, {b, 2}, {c, 3}, {d, 4}] = rabbit_misc:plmerge(P1, P2),
+ passed.
+
+unfold(_Config) ->
+ {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
+ List = lists:seq(2,20,2),
+ {List, 0} = rabbit_misc:unfold(fun (0) -> false;
+ (N) -> {true, N*2, N-1}
+ end, 10),
+ passed.
diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl
new file mode 100644
index 0000000000..7536005797
--- /dev/null
+++ b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl
@@ -0,0 +1,233 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_config_value_encryption_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ decrypt_start_app,
+ decrypt_start_app_file,
+ decrypt_start_app_undefined,
+ decrypt_start_app_wrong_passphrase,
+ decrypt_config,
+ rabbitmqctl_encode
+ ]}
+ ].
+
+init_per_testcase(TC, Config) when TC =:= decrypt_start_app;
+ TC =:= decrypt_start_app_file;
+ TC =:= decrypt_start_app_undefined;
+ TC =:= decrypt_start_app_wrong_passphrase ->
+ application:set_env(rabbit, feature_flags_file, "", [{persistent, true}]),
+ Config;
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_TC, _Config) ->
+ ok.
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+decrypt_config(_Config) ->
+ %% Take all available block ciphers.
+ Hashes = rabbit_pbe:supported_hashes(),
+ Ciphers = rabbit_pbe:supported_ciphers(),
+ Iterations = [1, 10, 100, 1000],
+ %% Loop through all hashes, ciphers and iterations.
+ _ = [begin
+ PassPhrase = crypto:strong_rand_bytes(16),
+ do_decrypt_config({C, H, I, PassPhrase})
+ end || H <- Hashes, C <- Ciphers, I <- Iterations],
+ ok.
+
+do_decrypt_config(Algo = {C, H, I, P}) ->
+ ok = application:load(rabbit),
+ RabbitConfig = application:get_all_env(rabbit),
+ %% Encrypt a few values in configuration.
+ %% Common cases.
+ _ = [encrypt_value(Key, Algo) || Key <- [
+ tcp_listeners,
+ num_tcp_acceptors,
+ ssl_options,
+ vm_memory_high_watermark,
+ default_pass,
+ default_permissions,
+ cluster_nodes,
+ auth_mechanisms,
+ msg_store_credit_disc_bound]],
+ %% Special case: encrypt a value in a list.
+ {ok, [LoopbackUser]} = application:get_env(rabbit, loopback_users),
+ {encrypted, EncLoopbackUser} = rabbit_pbe:encrypt_term(C, H, I, P, LoopbackUser),
+ application:set_env(rabbit, loopback_users, [{encrypted, EncLoopbackUser}]),
+ %% Special case: encrypt a value in a key/value list.
+ {ok, TCPOpts} = application:get_env(rabbit, tcp_listen_options),
+ {_, Backlog} = lists:keyfind(backlog, 1, TCPOpts),
+ {_, Linger} = lists:keyfind(linger, 1, TCPOpts),
+ {encrypted, EncBacklog} = rabbit_pbe:encrypt_term(C, H, I, P, Backlog),
+ {encrypted, EncLinger} = rabbit_pbe:encrypt_term(C, H, I, P, Linger),
+ TCPOpts1 = lists:keyreplace(backlog, 1, TCPOpts, {backlog, {encrypted, EncBacklog}}),
+ TCPOpts2 = lists:keyreplace(linger, 1, TCPOpts1, {linger, {encrypted, EncLinger}}),
+ application:set_env(rabbit, tcp_listen_options, TCPOpts2),
+ %% Decrypt configuration.
+ rabbit_prelaunch_conf:decrypt_config([rabbit], Algo),
+ %% Check that configuration was decrypted properly.
+ RabbitConfig = application:get_all_env(rabbit),
+ ok = application:unload(rabbit),
+ ok.
+
+encrypt_value(Key, {C, H, I, P}) ->
+ {ok, Value} = application:get_env(rabbit, Key),
+ {encrypted, EncValue} = rabbit_pbe:encrypt_term(C, H, I, P, Value),
+ application:set_env(rabbit, Key, {encrypted, EncValue}).
+
+decrypt_start_app(Config) ->
+ do_decrypt_start_app(Config, "hello").
+
+decrypt_start_app_file(Config) ->
+ do_decrypt_start_app(Config, {file, ?config(data_dir, Config) ++ "/rabbit_shovel_test.passphrase"}).
+
+do_decrypt_start_app(Config, Passphrase) ->
+ %% Configure rabbit for decrypting configuration.
+ application:set_env(rabbit, config_entry_decoder, [
+ {cipher, aes_cbc256},
+ {hash, sha512},
+ {iterations, 1000},
+ {passphrase, Passphrase}
+ ], [{persistent, true}]),
+ %% Add the path to our test application.
+ code:add_path(?config(data_dir, Config) ++ "/lib/rabbit_shovel_test/ebin"),
+ %% Attempt to start our test application.
+ %%
+ %% We expect a failure *after* the decrypting has been done.
+ try
+ rabbit:start_apps([rabbit_shovel_test], #{rabbit => temporary})
+ catch _:_ ->
+ ok
+ end,
+ %% Check if the values have been decrypted.
+ {ok, Shovels} = application:get_env(rabbit_shovel_test, shovels),
+ {_, FirstShovel} = lists:keyfind(my_first_shovel, 1, Shovels),
+ {_, Sources} = lists:keyfind(sources, 1, FirstShovel),
+ {_, Brokers} = lists:keyfind(brokers, 1, Sources),
+ ["amqp://fred:secret@host1.domain/my_vhost",
+ "amqp://john:secret@host2.domain/my_vhost"] = Brokers,
+ ok.
+
+decrypt_start_app_undefined(Config) ->
+ %% Configure rabbit for decrypting configuration.
+ application:set_env(rabbit, config_entry_decoder, [
+ {cipher, aes_cbc256},
+ {hash, sha512},
+ {iterations, 1000}
+ %% No passphrase option!
+ ], [{persistent, true}]),
+ %% Add the path to our test application.
+ code:add_path(?config(data_dir, Config) ++ "/lib/rabbit_shovel_test/ebin"),
+ %% Attempt to start our test application.
+ %%
+ %% We expect a failure during decryption because the passphrase is missing.
+ try
+ rabbit:start_apps([rabbit_shovel_test], #{rabbit => temporary})
+ catch
+ throw:{bad_config_entry_decoder, missing_passphrase} -> ok;
+ _:Exception -> exit({unexpected_exception, Exception})
+ end.
+
+decrypt_start_app_wrong_passphrase(Config) ->
+ %% Configure rabbit for decrypting configuration.
+ application:set_env(rabbit, config_entry_decoder, [
+ {cipher, aes_cbc256},
+ {hash, sha512},
+ {iterations, 1000},
+ {passphrase, "wrong passphrase"}
+ ], [{persistent, true}]),
+ %% Add the path to our test application.
+ code:add_path(?config(data_dir, Config) ++ "/lib/rabbit_shovel_test/ebin"),
+ %% Attempt to start our test application.
+ %%
+ %% We expect a failure during decryption because the passphrase is wrong.
+ try
+ rabbit:start_apps([rabbit_shovel_test], #{rabbit => temporary})
+ catch
+ throw:{config_decryption_error, _, _} -> ok;
+ _:Exception -> exit({unexpected_exception, Exception})
+ end.
+
+rabbitmqctl_encode(_Config) ->
+ % list ciphers and hashes
+ {ok, _} = rabbit_control_pbe:list_ciphers(),
+ {ok, _} = rabbit_control_pbe:list_hashes(),
+ % incorrect ciphers, hashes and iteration number
+ {error, _} = rabbit_control_pbe:encode(funny_cipher, undefined, undefined, undefined),
+ {error, _} = rabbit_control_pbe:encode(undefined, funny_hash, undefined, undefined),
+ {error, _} = rabbit_control_pbe:encode(undefined, undefined, -1, undefined),
+ {error, _} = rabbit_control_pbe:encode(undefined, undefined, 0, undefined),
+ % incorrect number of arguments
+ {error, _} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ []
+ ),
+ {error, _} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [undefined]
+ ),
+ {error, _} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [undefined, undefined, undefined]
+ ),
+
+ % encrypt/decrypt
+ % string
+ rabbitmqctl_encode_encrypt_decrypt("foobar"),
+ % binary
+ rabbitmqctl_encode_encrypt_decrypt("<<\"foobar\">>"),
+ % tuple
+ rabbitmqctl_encode_encrypt_decrypt("{password,<<\"secret\">>}"),
+
+ ok.
+
+rabbitmqctl_encode_encrypt_decrypt(Secret) ->
+ PassPhrase = "passphrase",
+ {ok, Output} = rabbit_control_pbe:encode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [Secret, PassPhrase]
+ ),
+ {encrypted, Encrypted} = rabbit_control_pbe:evaluate_input_as_term(lists:flatten(Output)),
+
+ {ok, Result} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [Encrypted])), PassPhrase]
+ ),
+ Secret = lists:flatten(Result),
+ % decrypt with {encrypted, ...} form as input
+ {ok, Result} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [{encrypted, Encrypted}])), PassPhrase]
+ ),
+
+ % wrong passphrase
+ {error, _} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [Encrypted])), PassPhrase ++ " "]
+ ),
+ {error, _} = rabbit_control_pbe:decode(
+ rabbit_pbe:default_cipher(), rabbit_pbe:default_hash(), rabbit_pbe:default_iterations(),
+ [lists:flatten(io_lib:format("~p", [{encrypted, Encrypted}])), PassPhrase ++ " "]
+ ).
diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app
new file mode 100644
index 0000000000..a8481c9aa4
--- /dev/null
+++ b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/lib/rabbit_shovel_test/ebin/rabbit_shovel_test.app
@@ -0,0 +1,46 @@
+{application, rabbit_shovel_test,
+ [{description, "Test .app file for tests for encrypting configuration"},
+ {vsn, ""},
+ {modules, []},
+ {env, [ {shovels, [ {my_first_shovel,
+ [ {sources,
+ [ {brokers, [ {encrypted, <<"CfJXuka/uJYsqAtiJnwKpSY4moMPcOBh4sO8XDcdmhXbVYGKCDLKEilWPMfvOAQ2lN1BQneGn6bvDZi2+gDu6iHVKfafQAZSv8zcsVB3uYdBXFzqTCWO8TAsgG6LUMPT">>}
+ , {encrypted, <<"dBO6n+G1OiBwZeLXhvmNYeTE57nhBOmicUBF34zo4nQjerzQaNoEk8GA2Ts5PzMhYeO6U6Y9eEmheqIr9Gzh2duLZic65ZMQtIKNpWcZJllEhGpk7aV1COr23Yur9fWG">>}
+ ]}
+ , {declarations, [ {'exchange.declare',
+ [ {exchange, <<"my_fanout">>}
+ , {type, <<"fanout">>}
+ , durable
+ ]}
+ , {'queue.declare',
+ [{arguments,
+ [{<<"x-message-ttl">>, long, 60000}]}]}
+ , {'queue.bind',
+ [ {exchange, <<"my_direct">>}
+ , {queue, <<>>}
+ ]}
+ ]}
+ ]}
+ , {destinations,
+ [ {broker, "amqp://"}
+ , {declarations, [ {'exchange.declare',
+ [ {exchange, <<"my_direct">>}
+ , {type, <<"direct">>}
+ , durable
+ ]}
+ ]}
+ ]}
+ , {queue, <<>>}
+ , {prefetch_count, 10}
+ , {ack_mode, on_confirm}
+ , {publish_properties, [ {delivery_mode, 2} ]}
+ , {add_forward_headers, true}
+ , {publish_fields, [ {exchange, <<"my_direct">>}
+ , {routing_key, <<"from_shovel">>}
+ ]}
+ , {reconnect_delay, 5}
+ ]}
+ ]}
+ ]},
+
+ {applications, [kernel, stdlib]}]}.
diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase
new file mode 100644
index 0000000000..ce01362503
--- /dev/null
+++ b/deps/rabbit/test/unit_config_value_encryption_SUITE_data/rabbit_shovel_test.passphrase
@@ -0,0 +1 @@
+hello
diff --git a/deps/rabbit/test/unit_connection_tracking_SUITE.erl b/deps/rabbit/test/unit_connection_tracking_SUITE.erl
new file mode 100644
index 0000000000..4ea1744fa7
--- /dev/null
+++ b/deps/rabbit/test/unit_connection_tracking_SUITE.erl
@@ -0,0 +1,119 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_connection_tracking_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ exchange_count,
+ queue_count,
+ connection_count,
+ connection_lookup
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% ---------------------------------------------------------------------------
+%% Count functions for management only API purposes
+%% ---------------------------------------------------------------------------
+
+exchange_count(Config) ->
+ %% Default exchanges == 7
+ ?assertEqual(7, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, count, [])).
+
+queue_count(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:call(Ch, #'queue.declare'{ queue = <<"my-queue">> }),
+
+ ?assertEqual(1, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, count, [])),
+
+ amqp_channel:call(Ch, #'queue.delete'{ queue = <<"my-queue">> }),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
+
+%% connection_count/1 has been failing on Travis. This seems a legit failure, as the registering
+%% of connections in the tracker is async. `rabbit_connection_tracking_handler` receives a rabbit
+%% event with `connection_created`, which then forwards as a cast to `rabbit_connection_tracker`
+%% for register. We should wait a reasonable amount of time for the counter to increase before
+%% failing.
+connection_count(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking, count, []) == 1
+ end, 30000),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
+
+connection_lookup(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+
+ %% Let's wait until the connection is registered, otherwise this test could fail in a slow
+ %% machine as connection tracking is asynchronous
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking, count, []) == 1
+ end, 30000),
+
+ [Connection] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking, list, []),
+ ?assertMatch(Connection, rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_connection_tracking,
+ lookup,
+ [Connection#tracked_connection.name])),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ ok.
diff --git a/deps/rabbit/test/unit_credit_flow_SUITE.erl b/deps/rabbit/test/unit_credit_flow_SUITE.erl
new file mode 100644
index 0000000000..ffad444dde
--- /dev/null
+++ b/deps/rabbit/test/unit_credit_flow_SUITE.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_credit_flow_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ credit_flow_settings
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% ---------------------------------------------------------------------------
+%% Test Cases
+%% ---------------------------------------------------------------------------
+
+credit_flow_settings(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, credit_flow_settings1, [Config]).
+
+credit_flow_settings1(_Config) ->
+ passed = test_proc(400, 200, {400, 200}),
+ passed = test_proc(600, 300),
+ passed.
+
+test_proc(InitialCredit, MoreCreditAfter) ->
+ test_proc(InitialCredit, MoreCreditAfter, {InitialCredit, MoreCreditAfter}).
+test_proc(InitialCredit, MoreCreditAfter, Settings) ->
+ Pid = spawn(?MODULE, dummy, [Settings]),
+ Pid ! {credit, self()},
+ {InitialCredit, MoreCreditAfter} =
+ receive
+ {credit, Val} -> Val
+ end,
+ passed.
+
+dummy(Settings) ->
+ credit_flow:send(self()),
+ receive
+ {credit, From} ->
+ From ! {credit, Settings};
+ _ ->
+ dummy(Settings)
+ end.
diff --git a/deps/rabbit/test/unit_disk_monitor_SUITE.erl b/deps/rabbit/test/unit_disk_monitor_SUITE.erl
new file mode 100644
index 0000000000..bc21114c12
--- /dev/null
+++ b/deps/rabbit/test/unit_disk_monitor_SUITE.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_disk_monitor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ set_disk_free_limit_command
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+set_disk_free_limit_command(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_disk_free_limit_command1, [Config]).
+
+set_disk_free_limit_command1(_Config) ->
+ %% Use an integer
+ rabbit_disk_monitor:set_disk_free_limit({mem_relative, 1}),
+ disk_free_limit_to_total_memory_ratio_is(1),
+
+ %% Use a float
+ rabbit_disk_monitor:set_disk_free_limit({mem_relative, 1.5}),
+ disk_free_limit_to_total_memory_ratio_is(1.5),
+
+ %% use an absolute value
+ rabbit_disk_monitor:set_disk_free_limit("70MiB"),
+ ?assertEqual(73400320, rabbit_disk_monitor:get_disk_free_limit()),
+
+ rabbit_disk_monitor:set_disk_free_limit("50MB"),
+ ?assertEqual(50 * 1000 * 1000, rabbit_disk_monitor:get_disk_free_limit()),
+ passed.
+
+disk_free_limit_to_total_memory_ratio_is(MemRatio) ->
+ ExpectedLimit = MemRatio * vm_memory_monitor:get_total_memory(),
+ % Total memory is unstable, so checking order
+ true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() < 1.2,
+ true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() > 0.98.
diff --git a/deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl b/deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl
new file mode 100644
index 0000000000..af78d0d134
--- /dev/null
+++ b/deps/rabbit/test/unit_disk_monitor_mocks_SUITE.erl
@@ -0,0 +1,112 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_disk_monitor_mocks_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ disk_monitor,
+ disk_monitor_enable
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+disk_monitor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, disk_monitor1, [Config]).
+
+disk_monitor1(_Config) ->
+ %% Issue: rabbitmq-server #91
+ %% os module could be mocked using 'unstick', however it may have undesired
+ %% side effects in following tests. Thus, we mock at rabbit_misc level
+ ok = meck:new(rabbit_misc, [passthrough]),
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
+ ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
+ ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
+ meck:unload(rabbit_misc),
+ passed.
+
+disk_monitor_enable(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, disk_monitor_enable1, [Config]).
+
+disk_monitor_enable1(_Config) ->
+ ok = meck:new(rabbit_misc, [passthrough]),
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
+ application:set_env(rabbit, disk_monitor_failure_retries, 20000),
+ application:set_env(rabbit, disk_monitor_failure_retry_interval, 100),
+ ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
+ ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
+ undefined = rabbit_disk_monitor:get_disk_free(),
+ Cmd = case os:type() of
+ {win32, _} -> " Le volume dans le lecteur C n’a pas de nom.\n"
+ " Le numéro de série du volume est 707D-5BDC\n"
+ "\n"
+ " Répertoire de C:\Users\n"
+ "\n"
+ "10/12/2015 11:01 <DIR> .\n"
+ "10/12/2015 11:01 <DIR> ..\n"
+ " 0 fichier(s) 0 octets\n"
+ " 2 Rép(s) 758537121792 octets libres\n";
+ _ -> "Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on\n"
+ "/dev/disk1 975798272 234783364 740758908 25% 58759839 185189727 24% /\n"
+ end,
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> Cmd end),
+ timer:sleep(1000),
+ Bytes = 740758908 * 1024,
+ Bytes = rabbit_disk_monitor:get_disk_free(),
+ meck:unload(rabbit_misc),
+ application:set_env(rabbit, disk_monitor_failure_retries, 10),
+ application:set_env(rabbit, disk_monitor_failure_retry_interval, 120000),
+ passed.
diff --git a/deps/rabbit/test/unit_file_handle_cache_SUITE.erl b/deps/rabbit/test/unit_file_handle_cache_SUITE.erl
new file mode 100644
index 0000000000..f2252aa2b5
--- /dev/null
+++ b/deps/rabbit/test/unit_file_handle_cache_SUITE.erl
@@ -0,0 +1,278 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_file_handle_cache_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ file_handle_cache, %% Change FHC limit.
+ file_handle_cache_reserve,
+ file_handle_cache_reserve_release,
+ file_handle_cache_reserve_above_limit,
+ file_handle_cache_reserve_monitor,
+ file_handle_cache_reserve_open_file_above_limit
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun setup_file_handle_cache/1
+ ]).
+
+setup_file_handle_cache(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_file_handle_cache1, []),
+ Config.
+
+setup_file_handle_cache1() ->
+ %% FIXME: Why are we doing this?
+ application:set_env(rabbit, file_handles_high_watermark, 10),
+ ok = file_handle_cache:set_limit(10),
+ ok.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% ---------------------------------------------------------------------------
+%% file_handle_cache.
+%% ---------------------------------------------------------------------------
+
+file_handle_cache(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache1, [Config]).
+
+file_handle_cache1(_Config) ->
+ %% test copying when there is just one spare handle
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
+ TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+ ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
+ [Src1, Dst1, Src2, Dst2] = Files =
+ [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
+ Content = <<"foo">>,
+ CopyFun = fun (Src, Dst) ->
+ {ok, Hdl} = prim_file:open(Src, [binary, write]),
+ ok = prim_file:write(Hdl, Content),
+ ok = prim_file:sync(Hdl),
+ prim_file:close(Hdl),
+
+ {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
+ {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
+ Size = size(Content),
+ {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
+ ok = file_handle_cache:delete(SrcHdl),
+ ok = file_handle_cache:delete(DstHdl)
+ end,
+ Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
+ filename:join(TmpDir, "file5"),
+ [write], []),
+ receive {next, Pid1} -> Pid1 ! {next, self()} end,
+ file_handle_cache:delete(Hdl),
+ %% This will block and never return, so we
+ %% exercise the fhc tidying up the pending
+ %% queue on the death of a process.
+ ok = CopyFun(Src1, Dst1)
+ end),
+ ok = CopyFun(Src1, Dst1),
+ ok = file_handle_cache:set_limit(2),
+ Pid ! {next, self()},
+ receive {next, Pid} -> ok end,
+ timer:sleep(100),
+ Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
+ timer:sleep(100),
+ erlang:monitor(process, Pid),
+ erlang:monitor(process, Pid1),
+ exit(Pid, kill),
+ exit(Pid1, kill),
+ receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
+ receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
+ [file:delete(File) || File <- Files],
+ ok = file_handle_cache:set_limit(Limit),
+ passed.
+
+file_handle_cache_reserve(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve1, [Config]).
+
+file_handle_cache_reserve1(_Config) ->
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5),
+ %% Reserves are always accepted, even if above the limit
+ %% These are for special processes such as quorum queues
+ ok = file_handle_cache:set_reservation(7),
+
+ Self = self(),
+ spawn(fun () -> ok = file_handle_cache:obtain(),
+ Self ! obtained
+ end),
+
+ Props = file_handle_cache:info([files_reserved, sockets_used]),
+ ?assertEqual(7, proplists:get_value(files_reserved, Props)),
+ ?assertEqual(0, proplists:get_value(sockets_used, Props)),
+
+ %% The obtain should still be blocked, as there are no file handles
+ %% available
+ receive
+ obtained ->
+ throw(error_file_obtained)
+ after 1000 ->
+ %% Let's release 5 file handles, that should leave
+ %% enough free for the `obtain` to go through
+ file_handle_cache:set_reservation(2),
+ Props0 = file_handle_cache:info([files_reserved, sockets_used]),
+ ?assertEqual(2, proplists:get_value(files_reserved, Props0)),
+ ?assertEqual(1, proplists:get_value(sockets_used, Props0)),
+ receive
+ obtained ->
+ ok = file_handle_cache:set_limit(Limit),
+ passed
+ after 5000 ->
+ throw(error_file_not_released)
+ end
+ end.
+
+file_handle_cache_reserve_release(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_release1, [Config]).
+
+file_handle_cache_reserve_release1(_Config) ->
+ ok = file_handle_cache:set_reservation(7),
+ ?assertEqual([{files_reserved, 7}], file_handle_cache:info([files_reserved])),
+ ok = file_handle_cache:set_reservation(3),
+ ?assertEqual([{files_reserved, 3}], file_handle_cache:info([files_reserved])),
+ ok = file_handle_cache:release_reservation(),
+ ?assertEqual([{files_reserved, 0}], file_handle_cache:info([files_reserved])),
+ passed.
+
+file_handle_cache_reserve_above_limit(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_above_limit1, [Config]).
+
+file_handle_cache_reserve_above_limit1(_Config) ->
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5),
+ %% Reserves are always accepted, even if above the limit
+ %% These are for special processes such as quorum queues
+ ok = file_handle_cache:obtain(5),
+ ?assertEqual([{file_descriptor_limit, []}], rabbit_alarm:get_alarms()),
+
+ ok = file_handle_cache:set_reservation(7),
+
+ Props = file_handle_cache:info([files_reserved, sockets_used]),
+ ?assertEqual(7, proplists:get_value(files_reserved, Props)),
+ ?assertEqual(5, proplists:get_value(sockets_used, Props)),
+
+ ok = file_handle_cache:set_limit(Limit),
+ passed.
+
+file_handle_cache_reserve_open_file_above_limit(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_open_file_above_limit1, [Config]).
+
+file_handle_cache_reserve_open_file_above_limit1(_Config) ->
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5),
+ %% Reserves are always accepted, even if above the limit
+ %% These are for special processes such as quorum queues
+ ok = file_handle_cache:set_reservation(7),
+
+ Self = self(),
+ TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+ spawn(fun () -> {ok, _} = file_handle_cache:open(
+ filename:join(TmpDir, "file_above_limit"),
+ [write], []),
+ Self ! opened
+ end),
+
+ Props = file_handle_cache:info([files_reserved]),
+ ?assertEqual(7, proplists:get_value(files_reserved, Props)),
+
+ %% The open should still be blocked, as there are no file handles
+ %% available
+ receive
+ opened ->
+ throw(error_file_opened)
+ after 1000 ->
+ %% Let's release 5 file handles, that should leave
+ %% enough free for the `open` to go through
+ file_handle_cache:set_reservation(2),
+ Props0 = file_handle_cache:info([files_reserved, total_used]),
+ ?assertEqual(2, proplists:get_value(files_reserved, Props0)),
+ receive
+ opened ->
+ ok = file_handle_cache:set_limit(Limit),
+ passed
+ after 5000 ->
+ throw(error_file_not_released)
+ end
+ end.
+
+file_handle_cache_reserve_monitor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache_reserve_monitor1, [Config]).
+
+file_handle_cache_reserve_monitor1(_Config) ->
+ %% Check that if the process that does the reserve dies, the file handlers are
+ %% released by the cache
+ Self = self(),
+ Pid = spawn(fun () ->
+ ok = file_handle_cache:set_reservation(2),
+ Self ! done,
+ receive
+ stop -> ok
+ end
+ end),
+ receive
+ done -> ok
+ end,
+ ?assertEqual([{files_reserved, 2}], file_handle_cache:info([files_reserved])),
+ Pid ! stop,
+ timer:sleep(500),
+ ?assertEqual([{files_reserved, 0}], file_handle_cache:info([files_reserved])),
+ passed.
diff --git a/deps/rabbit/test/unit_gen_server2_SUITE.erl b/deps/rabbit/test/unit_gen_server2_SUITE.erl
new file mode 100644
index 0000000000..babd340f19
--- /dev/null
+++ b/deps/rabbit/test/unit_gen_server2_SUITE.erl
@@ -0,0 +1,152 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_gen_server2_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ gen_server2_with_state,
+ mcall
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+gen_server2_with_state(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, gen_server2_with_state1, [Config]).
+
+gen_server2_with_state1(_Config) ->
+ fhc_state = gen_server2:with_state(file_handle_cache,
+ fun (S) -> element(1, S) end),
+ passed.
+
+
+mcall(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, mcall1, [Config]).
+
+mcall1(_Config) ->
+ P1 = spawn(fun gs2_test_listener/0),
+ register(foo, P1),
+ global:register_name(gfoo, P1),
+
+ P2 = spawn(fun() -> exit(bang) end),
+ %% ensure P2 is dead (ignore the race setting up the monitor)
+ await_exit(P2),
+
+ P3 = spawn(fun gs2_test_crasher/0),
+
+ %% since P2 crashes almost immediately and P3 after receiving its first
+ %% message, we have to spawn a few more processes to handle the additional
+ %% cases we're interested in here
+ register(baz, spawn(fun gs2_test_crasher/0)),
+ register(bog, spawn(fun gs2_test_crasher/0)),
+ global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
+
+ NoNode = rabbit_nodes:make("nonode"),
+
+ Targets =
+ %% pids
+ [P1, P2, P3]
+ ++
+ %% registered names
+ [foo, bar, baz]
+ ++
+ %% {Name, Node} pairs
+ [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
+ ++
+ %% {global, Name}
+ [{global, gfoo}, {global, gbar}, {global, gbaz}],
+
+ GoodResults = [{D, goodbye} || D <- [P1, foo,
+ {foo, node()},
+ {global, gfoo}]],
+
+ BadResults = [{P2, noproc}, % died before use
+ {P3, boom}, % died on first use
+ {bar, noproc}, % never registered
+ {baz, boom}, % died on first use
+ {{bar, node()}, noproc}, % never registered
+ {{bog, node()}, boom}, % died on first use
+ {{foo, NoNode}, nodedown}, % invalid node
+ {{global, gbar}, noproc}, % never registered globally
+ {{global, gbaz}, boom}], % died on first use
+
+ {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
+ true = lists:sort(Replies) == lists:sort(GoodResults),
+ true = lists:sort(Errors) == lists:sort(BadResults),
+
+ %% cleanup (ignore the race setting up the monitor)
+ P1 ! stop,
+ await_exit(P1),
+ passed.
+
+await_exit(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, _, _, _} -> ok
+ end.
+
+gs2_test_crasher() ->
+ receive
+ {'$gen_call', _From, hello} -> exit(boom)
+ end.
+
+gs2_test_listener() ->
+ receive
+ {'$gen_call', From, hello} ->
+ gen_server2:reply(From, goodbye),
+ gs2_test_listener();
+ stop ->
+ ok
+ end.
diff --git a/deps/rabbit/test/unit_gm_SUITE.erl b/deps/rabbit/test/unit_gm_SUITE.erl
new file mode 100644
index 0000000000..74400ddaa5
--- /dev/null
+++ b/deps/rabbit/test/unit_gm_SUITE.erl
@@ -0,0 +1,242 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_gm_SUITE).
+
+-behaviour(gm).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include("gm_specs.hrl").
+
+-compile(export_all).
+
+-define(RECEIVE_OR_THROW(Body, Bool, Error),
+ receive Body ->
+ true = Bool,
+ passed
+ after 5000 ->
+ throw(Error)
+ end).
+
+all() ->
+ [
+ join_leave,
+ broadcast,
+ confirmed_broadcast,
+ member_death,
+ receive_in_order,
+ unexpected_msg,
+ down_in_members_change
+ ].
+
+init_per_suite(Config) ->
+ ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+ ok = application:start(mnesia),
+ {ok, FHC} = file_handle_cache:start_link(),
+ unlink(FHC),
+ {ok, WPS} = worker_pool_sup:start_link(),
+ unlink(WPS),
+ rabbit_ct_helpers:set_config(Config, [
+ {file_handle_cache_pid, FHC},
+ {worker_pool_sup_pid, WPS}
+ ]).
+
+end_per_suite(Config) ->
+ exit(?config(worker_pool_sup_pid, Config), shutdown),
+ exit(?config(file_handle_cache_pid, Config), shutdown),
+ ok = application:stop(mnesia),
+ Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+join_leave(_Config) ->
+ passed = with_two_members(fun (_Pid, _Pid2) -> passed end).
+
+broadcast(_Config) ->
+ passed = do_broadcast(fun gm:broadcast/2).
+
+confirmed_broadcast(_Config) ->
+ passed = do_broadcast(fun gm:confirmed_broadcast/2).
+
+member_death(_Config) ->
+ passed = with_two_members(
+ fun (Pid, Pid2) ->
+ {ok, Pid3} = gm:start_link(
+ ?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
+ timeout_joining_gm_group_3),
+ passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
+ passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
+
+ unlink(Pid3),
+ exit(Pid3, kill),
+
+ %% Have to do some broadcasts to ensure that all members
+ %% find out about the death.
+ BFun = broadcast_fun(fun gm:confirmed_broadcast/2),
+ passed = BFun(Pid, Pid2),
+ passed = BFun(Pid, Pid2),
+
+ passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
+ passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
+
+ passed
+ end).
+
+receive_in_order(_Config) ->
+ passed = with_two_members(
+ fun (Pid, Pid2) ->
+ Numbers = lists:seq(1,1000),
+ [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
+ || N <- Numbers],
+ passed = receive_numbers(
+ Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
+ passed
+ end).
+
+unexpected_msg(_Config) ->
+ passed = with_two_members(
+ fun(Pid, _) ->
+ Pid ! {make_ref(), old_gen_server_answer},
+ true = erlang:is_process_alive(Pid),
+ passed
+ end).
+
+down_in_members_change(_Config) ->
+ %% Setup
+ ok = gm:create_tables(),
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+ {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+ passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+ %% Test. Simulate that the gm group is deleted (forget_group) while
+ %% processing the 'DOWN' message from the neighbour
+ process_flag(trap_exit, true),
+ ok = meck:new(mnesia, [passthrough]),
+ ok = meck:expect(mnesia, read, fun({gm_group, ?MODULE}) ->
+ [];
+ (Key) ->
+ meck:passthrough([Key])
+ end),
+ gm:leave(Pid2),
+ Passed = receive
+ {'EXIT', Pid, shutdown} ->
+ passed;
+ {'EXIT', Pid, _} ->
+ crashed
+ after 15000 ->
+ timeout
+ end,
+ %% Cleanup
+ meck:unload(mnesia),
+ process_flag(trap_exit, false),
+ passed = Passed.
+
+
+do_broadcast(Fun) ->
+ with_two_members(broadcast_fun(Fun)).
+
+broadcast_fun(Fun) ->
+ fun (Pid, Pid2) ->
+ ok = Fun(Pid, magic_message),
+ passed = receive_or_throw({msg, Pid, Pid, magic_message},
+ timeout_waiting_for_msg),
+ passed = receive_or_throw({msg, Pid2, Pid, magic_message},
+ timeout_waiting_for_msg)
+ end.
+
+with_two_members(Fun) ->
+ ok = gm:create_tables(),
+
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+
+ {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+ passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+ passed = Fun(Pid, Pid2),
+
+ ok = gm:leave(Pid),
+ passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
+ passed =
+ receive_termination(Pid, normal, timeout_waiting_for_termination_1),
+
+ ok = gm:leave(Pid2),
+ passed =
+ receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
+
+ receive X -> throw({unexpected_message, X})
+ after 0 -> passed
+ end.
+
+receive_or_throw(Pattern, Error) ->
+ ?RECEIVE_OR_THROW(Pattern, true, Error).
+
+receive_birth(From, Born, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([Born] == Birth) andalso ([] == Death),
+ Error).
+
+receive_death(From, Died, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([] == Birth) andalso ([Died] == Death),
+ Error).
+
+receive_joined(From, Members, Error) ->
+ ?RECEIVE_OR_THROW({joined, From, Members1},
+ lists:usort(Members) == lists:usort(Members1),
+ Error).
+
+receive_termination(From, Reason, Error) ->
+ ?RECEIVE_OR_THROW({termination, From, Reason1},
+ Reason == Reason1,
+ Error).
+
+receive_numbers(_Pid, _Sender, _Error, []) ->
+ passed;
+receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
+ ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
+ M == N,
+ Error),
+ receive_numbers(Pid, Sender, Error, Numbers).
+
+%% -------------------------------------------------------------------
+%% gm behavior callbacks.
+%% -------------------------------------------------------------------
+
+joined(Pid, Members) ->
+ Pid ! {joined, self(), Members},
+ ok.
+
+members_changed(Pid, Births, Deaths) ->
+ Pid ! {members_changed, self(), Births, Deaths},
+ ok.
+
+handle_msg(Pid, From, Msg) ->
+ Pid ! {msg, self(), From, Msg},
+ ok.
+
+handle_terminate(Pid, Reason) ->
+ Pid ! {termination, self(), Reason},
+ ok.
diff --git a/deps/rabbit/test/unit_log_config_SUITE.erl b/deps/rabbit/test/unit_log_config_SUITE.erl
new file mode 100644
index 0000000000..6be403fd3e
--- /dev/null
+++ b/deps/rabbit/test/unit_log_config_SUITE.erl
@@ -0,0 +1,837 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_log_config_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ default,
+ env_var_tty,
+ config_file_handler,
+ config_file_handler_level,
+ config_file_handler_rotation,
+ config_console_handler,
+ config_exchange_handler,
+ config_syslog_handler,
+ config_syslog_handler_options,
+ config_multiple_handlers,
+
+ env_var_overrides_config,
+ env_var_disable_log,
+
+ config_sinks_level,
+ config_sink_file,
+ config_sink_file_override_config_handler_file,
+
+ config_handlers_merged_with_lager_handlers,
+ sink_handlers_merged_with_lager_extra_sinks_handlers,
+ sink_file_rewrites_file_backends
+ ].
+
+init_per_testcase(_, Config) ->
+ application:load(rabbit),
+ application:load(lager),
+ application:unset_env(rabbit, log),
+ application:unset_env(rabbit, lager_log_root),
+ application:unset_env(rabbit, lager_default_file),
+ application:unset_env(rabbit, lager_upgrade_file),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, rabbit_handlers),
+ application:unset_env(lager, extra_sinks),
+ unset_logs_var_origin(),
+ Config.
+
+end_per_testcase(_, Config) ->
+ application:unset_env(rabbit, log),
+ application:unset_env(rabbit, lager_log_root),
+ application:unset_env(rabbit, lager_default_file),
+ application:unset_env(rabbit, lager_upgrade_file),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, rabbit_handlers),
+ application:unset_env(lager, extra_sinks),
+ unset_logs_var_origin(),
+ application:unload(rabbit),
+ application:unload(lager),
+ Config.
+
+sink_file_rewrites_file_backends(_) ->
+ application:set_env(rabbit, log, [
+ %% Disable rabbit file handler
+ {file, [{file, false}]},
+ {categories, [{federation, [{file, "federation.log"}, {level, warning}]}]}
+ ]),
+
+ LagerHandlers = [
+ {lager_file_backend, [{file, "lager_file.log"}, {level, error}]},
+ {lager_file_backend, [{file, "lager_file_1.log"}, {level, error}]},
+ {lager_console_backend, [{level, info}]},
+ {lager_exchange_backend, [{level, info}]}
+ ],
+ application:set_env(lager, handlers, LagerHandlers),
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(sink_rewrite_sinks()),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+sink_rewrite_sinks() ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[
+ {lager_file_backend,
+ [{date, ""},
+ {file, "federation.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]},
+ {lager_console_backend, [{level, warning}]},
+ {lager_exchange_backend, [{level, warning}]}
+ ]},
+ {rabbit_handlers,[
+ {lager_file_backend,
+ [{date, ""},
+ {file, "federation.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]},
+ {lager_console_backend, [{level, warning}]},
+ {lager_exchange_backend, [{level, warning}]}
+ ]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}
+ ].
+
+sink_handlers_merged_with_lager_extra_sinks_handlers(_) ->
+ DefaultLevel = debug,
+ application:set_env(rabbit, log, [
+ {file, [{file, "rabbit_file.log"}, {level, DefaultLevel}]},
+ {console, [{enabled, true}, {level, error}]},
+ {exchange, [{enabled, true}, {level, error}]},
+ {categories, [
+ {connection, [{level, debug}]},
+ {channel, [{level, warning}, {file, "channel_log.log"}]}
+ ]}
+ ]),
+
+ LagerSinks = [
+ {rabbit_log_connection_lager_event,
+ [{handlers,
+ [{lager_file_backend,
+ [{file, "connection_lager.log"},
+ {level, info}]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,
+ [{lager_console_backend, [{level, debug}]},
+ {lager_exchange_backend, [{level, debug}]},
+ {lager_file_backend, [{level, error},
+ {file, "channel_lager.log"}]}]}]}],
+
+ application:set_env(lager, extra_sinks, LagerSinks),
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks([
+ {error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[
+ {lager_console_backend, [{level, error},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, error},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "channel_log.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]},
+ {lager_console_backend, [{level, debug}]},
+ {lager_exchange_backend, [{level, debug}]},
+ {lager_file_backend, [{level, error},
+ {file, "channel_lager.log"}]}
+ ]},
+ {rabbit_handlers,[
+ {lager_console_backend, [{level, error},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, error},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "channel_log.log"},
+ {formatter_config, formatter_config(file)},
+ {level, warning},
+ {size, 0}]}]}
+ ]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,debug]},
+ {lager_file_backend, [{file, "connection_lager.log"}, {level, info}]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}]),
+
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+config_handlers_merged_with_lager_handlers(_) ->
+ application:set_env(rabbit, log, [
+ {file, [{file, "rabbit_file.log"}, {level, debug}]},
+ {console, [{enabled, true}, {level, error}]},
+ {exchange, [{enabled, true}, {level, error}]},
+ {syslog, [{enabled, true}]}
+ ]),
+
+ LagerHandlers = [
+ {lager_file_backend, [{file, "lager_file.log"}, {level, info}]},
+ {lager_console_backend, [{level, info}]},
+ {lager_exchange_backend, [{level, info}]},
+ {lager_exchange_backend, [{level, info}]}
+ ],
+ application:set_env(lager, handlers, LagerHandlers),
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers("rabbit_file.log", debug),
+ ConsoleHandlers = expected_console_handler(error),
+ RabbitHandlers = expected_rabbit_handler(error),
+ SyslogHandlers = expected_syslog_handler(),
+
+ ExpectedRabbitHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers ++ RabbitHandlers ++ SyslogHandlers),
+ ExpectedHandlers = sort_handlers(ExpectedRabbitHandlers ++ LagerHandlers),
+
+ ?assertEqual(ExpectedRabbitHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))).
+
+config_sinks_level(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [
+ {categories, [
+ {connection, [{level, warning}]},
+ {channel, [{level, debug}]},
+ {mirroring, [{level, error}]}
+ ]}
+ ]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(level_sinks()),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+level_sinks() ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,debug]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,debug]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,warning]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,warning]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,error]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,error]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,
+ [lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}
+ ].
+
+config_sink_file(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ DefaultLevel = error,
+ application:set_env(rabbit, log, [
+ {console, [{enabled, true}]},
+ {exchange, [{enabled, true}]},
+ {file, [{level, DefaultLevel}]},
+ {categories, [
+ {connection, [{file, "connection.log"}, {level, warning}]}
+ ]}
+ ]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+config_sink_file_override_config_handler_file(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ NonDefaultLogFile = "rabbit_not_default.log",
+
+ DefaultLevel = error,
+ application:set_env(rabbit, log, [
+ {file, [{file, NonDefaultLogFile}, {level, DefaultLevel}]},
+ {console, [{enabled, true}]},
+ {exchange, [{enabled, true}]},
+ {categories, [
+ {connection, [{file, "connection.log"}, {level, warning}]}
+ ]}
+ ]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedSinks = sort_sinks(file_sinks(DefaultLevel)),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+file_sinks() ->
+ file_sinks(info).
+
+file_sinks(DefaultLevel) ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[
+ {lager_console_backend, [{level, warning},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, warning},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "connection.log"},
+ {formatter_config, formatter_config(file)},
+ {level, error},
+ {size, 0}]}]},
+ {rabbit_handlers,[
+ {lager_console_backend, [{level, warning},
+ {formatter_config, formatter_config(console)}]},
+ {lager_exchange_backend, [{level, warning},
+ {formatter_config, formatter_config(exchange)}]},
+ {lager_file_backend,
+ [{date, ""},
+ {file, "connection.log"},
+ {formatter_config, formatter_config(backend)},
+ {level, error},
+ {size, 0}]}]}
+ ]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,DefaultLevel]}]}]}
+ ].
+
+config_multiple_handlers(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [
+ %% Disable file output
+ {file, [{file, false}]},
+ %% Enable console output
+ {console, [{enabled, true}]},
+ %% Enable exchange output
+ {exchange, [{enabled, true}]},
+ %% Enable a syslog output
+ {syslog, [{enabled, true}, {level, error}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ ConsoleHandlers = expected_console_handler(),
+ RabbitHandlers = expected_rabbit_handler(),
+ SyslogHandlers = expected_syslog_handler(error),
+
+ ExpectedHandlers = sort_handlers(SyslogHandlers ++ ConsoleHandlers ++ RabbitHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_console_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{console, [{enabled, true}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ ConsoleHandlers = expected_console_handler(),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ ConsoleHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_exchange_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{exchange, [{enabled, true}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ ExchangeHandlers = expected_rabbit_handler(),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ ExchangeHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+expected_console_handler() ->
+ expected_console_handler(debug).
+
+expected_console_handler(Level) ->
+ [{lager_console_backend, [{level, Level},
+ {formatter_config, formatter_config(console)}]}].
+
+expected_rabbit_handler() ->
+ expected_rabbit_handler(debug).
+
+expected_rabbit_handler(Level) ->
+ [{lager_exchange_backend, [{level, Level},
+ {formatter_config, formatter_config(exchange)}]}].
+
+config_syslog_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{syslog, [{enabled, true}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ SyslogHandlers = expected_syslog_handler(),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_syslog_handler_options(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+ application:set_env(rabbit, log, [{syslog, [{enabled, true},
+ {level, warning}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ FileHandlers = default_expected_handlers(DefaultLogFile),
+ SyslogHandlers = expected_syslog_handler(warning),
+
+ ExpectedHandlers = sort_handlers(FileHandlers ++ SyslogHandlers),
+
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+expected_syslog_handler() ->
+ expected_syslog_handler(debug).
+
+expected_syslog_handler(Level) ->
+ [{syslog_lager_backend, [Level,
+ {},
+ {lager_default_formatter, syslog_formatter_config()}]}].
+
+env_var_overrides_config(_) ->
+ EnvLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, EnvLogFile),
+
+ ConfigLogFile = "rabbit_not_default.log",
+ application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]),
+
+ set_logs_var_origin(environment),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(EnvLogFile),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+env_var_disable_log(_) ->
+ application:set_env(rabbit, lager_default_file, false),
+
+ ConfigLogFile = "rabbit_not_default.log",
+ application:set_env(rabbit, log, [{file, [{file, ConfigLogFile}]}]),
+
+ set_logs_var_origin(environment),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = [],
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_file_handler(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ NonDefaultLogFile = "rabbit_not_default.log",
+ application:set_env(rabbit, log, [{file, [{file, NonDefaultLogFile}]}]),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(NonDefaultLogFile),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_file_handler_level(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [{file, [{level, warning}]}]),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(DefaultLogFile, warning),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+config_file_handler_rotation(_) ->
+ DefaultLogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, DefaultLogFile),
+
+ application:set_env(rabbit, log, [{file, [{date, "$D0"}, {size, 5000}, {count, 10}]}]),
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = sort_handlers(default_expected_handlers(DefaultLogFile, debug, 5000, "$D0", [{count, 10}])),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))).
+
+default(_) ->
+ LogRoot = "/tmp/log_base",
+ application:set_env(rabbit, lager_log_root, LogRoot),
+ LogFile = "rabbit_default.log",
+ application:set_env(rabbit, lager_default_file, LogFile),
+ LogUpgradeFile = "rabbit_default_upgrade.log",
+ application:set_env(rabbit, lager_upgrade_file, LogUpgradeFile),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = default_expected_handlers(LogFile),
+ LogRoot = application:get_env(lager, log_root, undefined),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
+
+ ExpectedSinks = default_expected_sinks(LogUpgradeFile),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+default_expected_handlers(File) ->
+ default_expected_handlers(File, debug, 0, "").
+default_expected_handlers(File, Level) ->
+ default_expected_handlers(File, Level, 0, "").
+default_expected_handlers(File, Level, RotSize, RotDate) ->
+ default_expected_handlers(File, Level, RotSize, RotDate, []).
+default_expected_handlers(File, Level, RotSize, RotDate, Extra) ->
+ [{lager_file_backend,
+ [{date, RotDate},
+ {file, File},
+ {formatter_config, formatter_config(file)},
+ {level, Level},
+ {size, RotSize}] ++ Extra}].
+
+default_expected_sinks(UpgradeFile) ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,
+ [{lager_file_backend,
+ [{date,[]},
+ {file, UpgradeFile},
+ {formatter_config, formatter_config(file)},
+ {level,info},
+ {size,0}]}]},
+ {rabbit_handlers,
+ [{lager_file_backend,
+ [{date,[]},
+ {file, UpgradeFile},
+ {formatter_config, formatter_config(file)},
+ {level,info},
+ {size,0}]}]}]}].
+
+env_var_tty(_) ->
+ application:set_env(rabbit, lager_log_root, "/tmp/log_base"),
+ application:set_env(rabbit, lager_default_file, tty),
+ application:set_env(rabbit, lager_upgrade_file, tty),
+ %% tty can only be set explicitly
+ set_logs_var_origin(environment),
+
+ rabbit_lager:configure_lager(),
+
+ ExpectedHandlers = tty_expected_handlers(),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, handlers, undefined))),
+ ?assertEqual(ExpectedHandlers, sort_handlers(application:get_env(lager, rabbit_handlers, undefined))),
+
+ %% Upgrade sink will be different.
+ ExpectedSinks = tty_expected_sinks(),
+ ?assertEqual(ExpectedSinks, sort_sinks(application:get_env(lager, extra_sinks, undefined))).
+
+set_logs_var_origin(Origin) ->
+ Context = #{var_origins => #{main_log_file => Origin}},
+ rabbit_prelaunch:store_context(Context),
+ ok.
+
+unset_logs_var_origin() ->
+ rabbit_prelaunch:clear_context_cache(),
+ ok.
+
+tty_expected_handlers() ->
+ [{lager_console_backend,
+ [{formatter_config, formatter_config(console)},
+ {level, debug}]}].
+
+tty_expected_sinks() ->
+ [{error_logger_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_channel_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_connection_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_feature_flags_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_federation_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers, [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ldap_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_mirroring_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_osiris_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_prelaunch_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_queue_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_ra_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_shovel_lager_event,
+ [{handlers, [{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,
+ [{lager_forwarder_backend,[lager_event,info]}]}]},
+ {rabbit_log_upgrade_lager_event,
+ [{handlers,[{lager_forwarder_backend,[lager_event,info]}]},
+ {rabbit_handlers,[{lager_forwarder_backend,[lager_event,info]}]}]}].
+
+sort_sinks(Sinks) ->
+ lists:ukeysort(1,
+ lists:map(
+ fun({Name, Config}) ->
+ Handlers = proplists:get_value(handlers, Config),
+ RabbitHandlers = proplists:get_value(rabbit_handlers, Config),
+ {Name, lists:ukeymerge(1,
+ [{handlers, sort_handlers(Handlers)},
+ {rabbit_handlers, sort_handlers(RabbitHandlers)}],
+ lists:ukeysort(1, Config))}
+ end,
+ Sinks)).
+
+sort_handlers(Handlers) ->
+ lists:keysort(1,
+ lists:map(
+ fun
+ ({Name, [{Atom, _}|_] = Config}) when is_atom(Atom) ->
+ {Name, lists:ukeysort(1, Config)};
+ %% Non-proplist configuration. forwarder backend
+ (Other) ->
+ Other
+ end,
+ Handlers)).
+
+formatter_config(console) ->
+ [date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\r\n"];
+formatter_config(_) ->
+ [date," ",time," ",color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].
+
+syslog_formatter_config() ->
+ [color,"[",severity, "] ", {pid,[]}, " ",message,"\n"].
diff --git a/deps/rabbit/test/unit_log_management_SUITE.erl b/deps/rabbit/test/unit_log_management_SUITE.erl
new file mode 100644
index 0000000000..9fc9c7839d
--- /dev/null
+++ b/deps/rabbit/test/unit_log_management_SUITE.erl
@@ -0,0 +1,413 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_log_management_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ log_management,
+ log_file_initialised_during_startup,
+ log_file_fails_to_initialise_during_startup,
+ externally_rotated_logs_are_automatically_reopened
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Application management.
+%% -------------------------------------------------------------------
+
+app_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, app_management1, [Config]).
+
+app_management1(_Config) ->
+ wait_for_application(rabbit),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = rabbit_trace:start(<<"/">>),
+ ok = rabbit:stop(),
+ ok = rabbit:stop(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit:start(),
+ ok = rabbit:start(),
+ ok = no_exceptions(rabbit, status, []),
+ ok = no_exceptions(rabbit, environment, []),
+ ok = rabbit_trace:stop(<<"/">>),
+ passed.
+
+no_exceptions(Mod, Fun, Args) ->
+ try erlang:apply(Mod, Fun, Args) of _ -> ok
+ catch Type:Ex -> {Type, Ex}
+ end.
+
+wait_for_application(Application) ->
+ wait_for_application(Application, 5000).
+
+wait_for_application(_, Time) when Time =< 0 ->
+ {error, timeout};
+wait_for_application(Application, Time) ->
+ Interval = 100,
+ case lists:keyfind(Application, 1, application:which_applications()) of
+ false ->
+ timer:sleep(Interval),
+ wait_for_application(Application, Time - Interval);
+ _ -> ok
+ end.
+
+
+
+%% -------------------------------------------------------------------
+%% Log management.
+%% -------------------------------------------------------------------
+
+log_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, log_management1, [Config]).
+
+log_management1(_Config) ->
+ [LogFile|_] = rabbit:log_locations(),
+ Suffix = ".0",
+
+ ok = test_logs_working([LogFile]),
+
+ %% prepare basic logs
+ file:delete(LogFile ++ Suffix),
+ ok = test_logs_working([LogFile]),
+
+ %% simple log rotation
+ ok = rabbit:rotate_logs(),
+ %% rabbit:rotate_logs/0 is asynchronous due to a limitation in
+ %% Lager. Therefore, we have no choice but to wait an arbitrary
+ %% amount of time.
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ [true, true] =:=
+ non_empty_files([LogFile ++ Suffix, LogFile])
+ end, 5000),
+ ok = test_logs_working([LogFile]),
+
+ %% log rotation on empty files
+ ok = clean_logs([LogFile], Suffix),
+ ok = rabbit:rotate_logs(),
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ [true, true] =:=
+ non_empty_files([LogFile ++ Suffix, LogFile])
+ end, 5000),
+
+ %% logs with suffix are not writable
+ ok = rabbit:rotate_logs(),
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ ok =:= make_files_non_writable([LogFile ++ Suffix])
+ end, 5000),
+ ok = rabbit:rotate_logs(),
+ ok = rabbit_ct_helpers:await_condition(
+ fun() ->
+ ok =:= test_logs_working([LogFile])
+ end, 5000),
+
+ %% rotate when original log files are not writable
+ ok = make_files_non_writable([LogFile]),
+ ok = rabbit:rotate_logs(),
+ timer:sleep(2000),
+
+ %% logging directed to tty (first, remove handlers)
+ ok = rabbit:stop(),
+ ok = make_files_writable([LogFile ++ Suffix]),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, tty),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ timer:sleep(200),
+ rabbit_log:info("test info"),
+
+ %% rotate logs when logging is turned off
+ ok = rabbit:stop(),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, false),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ timer:sleep(200),
+ rabbit_log:error("test error"),
+ timer:sleep(200),
+ ?assertEqual([{error,enoent}], empty_files([LogFile])),
+
+ %% cleanup
+ ok = rabbit:stop(),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, LogFile),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ ok = test_logs_working([LogFile]),
+ passed.
+
+log_file_initialised_during_startup(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, log_file_initialised_during_startup1, [Config]).
+
+log_file_initialised_during_startup1(_Config) ->
+ [LogFile|_] = rabbit:log_locations(),
+ Suffix = ".0",
+
+ %% start application with simple tty logging
+ ok = rabbit:stop(),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_default_file, tty),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+
+ %% start application with logging to non-existing directory
+ NonExistent = rabbit_misc:format(
+ "/tmp/non-existent/~s.log", [?FUNCTION_NAME]),
+ delete_file(NonExistent),
+ delete_file(filename:dirname(NonExistent)),
+ ok = rabbit:stop(),
+ ct:pal("Setting lager_default_file to \"~s\"", [NonExistent]),
+ ok = application:set_env(rabbit, lager_default_file, NonExistent),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+
+ %% clean up
+ ok = application:set_env(rabbit, lager_default_file, LogFile),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ passed.
+
+
+log_file_fails_to_initialise_during_startup(Config) ->
+ NonWritableDir = case os:type() of
+ {win32, _} -> "C:/Windows";
+ _ -> "/"
+ end,
+ case file:open(filename:join(NonWritableDir, "test.log"), [write]) of
+ {error, eacces} ->
+ passed = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ ?MODULE, log_file_fails_to_initialise_during_startup1,
+ [Config, NonWritableDir]);
+ %% macOS, "read only volume"
+ {error, erofs} ->
+ passed = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ ?MODULE, log_file_fails_to_initialise_during_startup1,
+ [Config, NonWritableDir]);
+ {ok, Fd} ->
+ %% If the supposedly non-writable directory is writable
+ %% (e.g. we are running the testsuite on Windows as
+ %% Administrator), we skip this test.
+ file:close(Fd),
+ {skip, "Supposedly non-writable directory is writable"}
+ end.
+
+log_file_fails_to_initialise_during_startup1(_Config, NonWritableDir) ->
+ [LogFile|_] = rabbit:log_locations(),
+ delete_file(LogFile),
+ Fn = rabbit_misc:format("~s.log", [?FUNCTION_NAME]),
+
+ %% start application with logging to directory with no
+ %% write permissions
+ NoPermission1 = filename:join(NonWritableDir, Fn),
+ delete_file(NoPermission1),
+ delete_file(filename:dirname(NoPermission1)),
+
+ ok = rabbit:stop(),
+ ct:pal("Setting lager_default_file to \"~s\"", [NoPermission1]),
+ ok = application:set_env(rabbit, lager_default_file, NoPermission1),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+
+ ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]),
+ ?assertThrow(
+ {error, {rabbit, {{cannot_log_to_file, _, _}, _}}},
+ rabbit:start()),
+
+ %% start application with logging to a subdirectory which
+ %% parent directory has no write permissions
+ NoPermission2 = filename:join([NonWritableDir,
+ "non-existent",
+ Fn]),
+ delete_file(NoPermission2),
+ delete_file(filename:dirname(NoPermission2)),
+
+ ct:pal("Setting lager_default_file to \"~s\"", [NoPermission2]),
+ ok = application:set_env(rabbit, lager_default_file, NoPermission2),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+
+ ct:pal("`rabbit` application env.: ~p", [application:get_all_env(rabbit)]),
+ ?assertThrow(
+ {error, {rabbit, {{cannot_log_to_file, _, _}, _}}},
+ rabbit:start()),
+
+ %% clean up
+ ok = application:set_env(rabbit, lager_default_file, LogFile),
+ application:unset_env(rabbit, log),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = rabbit:start(),
+ passed.
+
+externally_rotated_logs_are_automatically_reopened(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, externally_rotated_logs_are_automatically_reopened1, [Config]).
+
+externally_rotated_logs_are_automatically_reopened1(_Config) ->
+ [LogFile|_] = rabbit:log_locations(),
+
+ %% Make sure log file is opened
+ ok = test_logs_working([LogFile]),
+
+ %% Move it away - i.e. external log rotation happened
+ file:rename(LogFile, [LogFile, ".rotation_test"]),
+
+ %% New files should be created - test_logs_working/1 will check that
+ %% LogFile is not empty after doing some logging. And it's exactly
+ %% what we need to check here.
+ ok = test_logs_working([LogFile]),
+ passed.
+
+empty_or_nonexist_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ {error, enoent} -> true;
+ Error -> Error
+ end || File <- Files].
+
+empty_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ Error -> Error
+ end || File <- Files].
+
+non_empty_files(Files) ->
+ [case EmptyFile of
+ {error, Reason} -> {error, Reason};
+ _ -> not(EmptyFile)
+ end || EmptyFile <- empty_files(Files)].
+
+test_logs_working(LogFiles) ->
+ ok = rabbit_log:error("Log a test message"),
+ %% give the error loggers some time to catch up
+ timer:sleep(1000),
+ lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles),
+ ok.
+
+set_permissions(Path, Mode) ->
+ case file:read_file_info(Path) of
+ {ok, FInfo} -> file:write_file_info(
+ Path,
+ FInfo#file_info{mode=Mode});
+ Error -> Error
+ end.
+
+clean_logs(Files, Suffix) ->
+ [begin
+ ok = delete_file(File),
+ ok = delete_file([File, Suffix])
+ end || File <- Files],
+ ok.
+
+delete_file(File) ->
+ case file:delete(File) of
+ ok -> ok;
+ {error, enoent} -> ok;
+ Error -> Error
+ end.
+
+make_files_writable(Files) ->
+ [ok = file:write_file_info(File, #file_info{mode=8#644}) ||
+ File <- Files],
+ ok.
+
+make_files_non_writable(Files) ->
+ [ok = file:write_file_info(File, #file_info{mode=8#444}) ||
+ File <- Files],
+ ok.
+
+add_log_handlers(Handlers) ->
+ [ok = error_logger:add_report_handler(Handler, Args) ||
+ {Handler, Args} <- Handlers],
+ ok.
+
+%% sasl_report_file_h returns [] during terminate
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+%%
+%% error_logger_file_h returns ok since OTP 18.1
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+delete_log_handlers(Handlers) ->
+ [ok_or_empty_list(error_logger:delete_report_handler(Handler))
+ || Handler <- Handlers],
+ ok.
+
+ok_or_empty_list([]) ->
+ [];
+ok_or_empty_list(ok) ->
+ ok.
diff --git a/deps/rabbit/test/unit_operator_policy_SUITE.erl b/deps/rabbit/test/unit_operator_policy_SUITE.erl
new file mode 100644
index 0000000000..ae3285bb55
--- /dev/null
+++ b/deps/rabbit/test/unit_operator_policy_SUITE.erl
@@ -0,0 +1,107 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_operator_policy_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ merge_operator_policy_definitions
+ ]}
+ ].
+
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_TC, _Config) ->
+ ok.
+
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+merge_operator_policy_definitions(_Config) ->
+ P1 = undefined,
+ P2 = [{definition, [{<<"message-ttl">>, 3000}]}],
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(P1, P2)),
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(P2, P1)),
+
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(P1, rabbit_data_coercion:to_map(P2))),
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(rabbit_data_coercion:to_map(P2), P1)),
+
+ ?assertEqual(undefined, rabbit_policy:merge_operator_definitions(undefined, undefined)),
+
+ ?assertEqual([], rabbit_policy:merge_operator_definitions([], [])),
+ ?assertEqual([], rabbit_policy:merge_operator_definitions(#{}, [])),
+ ?assertEqual([], rabbit_policy:merge_operator_definitions(#{}, #{})),
+ ?assertEqual([], rabbit_policy:merge_operator_definitions([], #{})),
+
+ %% operator policy takes precedence
+ ?assertEqual([{<<"message-ttl">>, 3000}], rabbit_policy:merge_operator_definitions(
+ [{definition, [
+ {<<"message-ttl">>, 5000}
+ ]}],
+ [{definition, [
+ {<<"message-ttl">>, 3000}
+ ]}]
+ )),
+
+ ?assertEqual([{<<"delivery-limit">>, 20},
+ {<<"message-ttl">>, 3000}],
+ rabbit_policy:merge_operator_definitions(
+ [{definition, [
+ {<<"message-ttl">>, 5000},
+ {<<"delivery-limit">>, 20}
+ ]}],
+ [{definition, [
+ {<<"message-ttl">>, 3000}
+ ]}])
+ ),
+
+ ?assertEqual(
+ [{<<"delivery-limit">>, 20},
+ {<<"message-ttl">>, 3000},
+ {<<"unknown">>, <<"value">>}],
+
+ rabbit_policy:merge_operator_definitions(
+ #{definition => #{
+ <<"message-ttl">> => 5000,
+ <<"delivery-limit">> => 20
+ }},
+ #{definition => #{
+ <<"message-ttl">> => 3000,
+ <<"unknown">> => <<"value">>
+ }})
+ ),
+
+ ?assertEqual(
+ [{<<"delivery-limit">>, 20},
+ {<<"message-ttl">>, 3000}],
+
+ rabbit_policy:merge_operator_definitions(
+ #{definition => #{
+ <<"message-ttl">> => 5000,
+ <<"delivery-limit">> => 20
+ }},
+ [{definition, [
+ {<<"message-ttl">>, 3000}
+ ]}])
+ ),
+
+ passed.
diff --git a/deps/rabbit/test/unit_pg_local_SUITE.erl b/deps/rabbit/test/unit_pg_local_SUITE.erl
new file mode 100644
index 0000000000..54fafdd340
--- /dev/null
+++ b/deps/rabbit/test/unit_pg_local_SUITE.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_pg_local_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ pg_local,
+ pg_local_with_unexpected_deaths1,
+ pg_local_with_unexpected_deaths2
+ ]}
+ ].
+
+
+pg_local(_Config) ->
+ [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- lists:seq(0, 1)],
+ check_pg_local(ok, [], []),
+ %% P joins group a, then b, then a again
+ check_pg_local(pg_local:join(a, P), [P], []),
+ check_pg_local(pg_local:join(b, P), [P], [P]),
+ check_pg_local(pg_local:join(a, P), [P, P], [P]),
+ %% Q joins group a, then b, then b again
+ check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
+ %% P leaves groups a and a
+ check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
+ check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
+ %% leave/2 is idempotent
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ %% clean up all processes
+ [begin X ! done,
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [P, Q]],
+ %% ensure the groups are empty
+ check_pg_local(ok, [], []),
+ passed.
+
+pg_local_with_unexpected_deaths1(_Config) ->
+ [A, B] = [spawn(fun () -> receive X -> X end end) || _ <- lists:seq(0, 1)],
+ check_pg_local(ok, [], []),
+ %% A joins groups a and b
+ check_pg_local(pg_local:join(a, A), [A], []),
+ check_pg_local(pg_local:join(b, A), [A], [A]),
+ %% B joins group b
+ check_pg_local(pg_local:join(b, B), [A], [A, B]),
+
+ [begin erlang:exit(X, sleep_now_in_a_fire),
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [A, B]],
+ %% ensure the groups are empty
+ check_pg_local(ok, [], []),
+ ?assertNot(erlang:is_process_alive(A)),
+ ?assertNot(erlang:is_process_alive(B)),
+
+ passed.
+
+pg_local_with_unexpected_deaths2(_Config) ->
+ [A, B] = [spawn(fun () -> receive X -> X end end) || _ <- lists:seq(0, 1)],
+ check_pg_local(ok, [], []),
+ %% A joins groups a and b
+ check_pg_local(pg_local:join(a, A), [A], []),
+ check_pg_local(pg_local:join(b, A), [A], [A]),
+ %% B joins group b
+ check_pg_local(pg_local:join(b, B), [A], [A, B]),
+
+ %% something else yanks a record (or all of them) from the pg_local
+ %% bookkeeping table
+ ok = pg_local:clear(),
+
+ [begin erlang:exit(X, sleep_now_in_a_fire),
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [A, B]],
+ %% ensure the groups are empty
+ check_pg_local(ok, [], []),
+ ?assertNot(erlang:is_process_alive(A)),
+ ?assertNot(erlang:is_process_alive(B)),
+
+ passed.
+
+check_pg_local(ok, APids, BPids) ->
+ ok = pg_local:sync(),
+ ?assertEqual([true, true], [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
+ {Key, Pids} <- [{a, APids}, {b, BPids}]]).
diff --git a/deps/rabbit/test/unit_plugin_directories_SUITE.erl b/deps/rabbit/test/unit_plugin_directories_SUITE.erl
new file mode 100644
index 0000000000..1195434fae
--- /dev/null
+++ b/deps/rabbit/test/unit_plugin_directories_SUITE.erl
@@ -0,0 +1,76 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_plugin_directories_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ listing_plugins_from_multiple_directories
+ ]}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+listing_plugins_from_multiple_directories(Config) ->
+ %% Generate some fake plugins in .ez files
+ FirstDir = filename:join([?config(priv_dir, Config), "listing_plugins_from_multiple_directories-1"]),
+ SecondDir = filename:join([?config(priv_dir, Config), "listing_plugins_from_multiple_directories-2"]),
+ ok = file:make_dir(FirstDir),
+ ok = file:make_dir(SecondDir),
+ lists:foreach(fun({Dir, AppName, Vsn}) ->
+ EzName = filename:join([Dir, io_lib:format("~s-~s.ez", [AppName, Vsn])]),
+ AppFileName = lists:flatten(io_lib:format("~s-~s/ebin/~s.app", [AppName, Vsn, AppName])),
+ AppFileContents = list_to_binary(
+ io_lib:format(
+ "~p.",
+ [{application, AppName,
+ [{vsn, Vsn},
+ {applications, [kernel, stdlib, rabbit]}]}])),
+ {ok, {_, EzData}} = zip:zip(EzName, [{AppFileName, AppFileContents}], [memory]),
+ ok = file:write_file(EzName, EzData)
+ end,
+ [{FirstDir, plugin_first_dir, "3"},
+ {SecondDir, plugin_second_dir, "4"},
+ {FirstDir, plugin_both, "1"},
+ {SecondDir, plugin_both, "2"}]),
+
+ %% Everything was collected from both directories, plugin with higher
+ %% version should take precedence
+ PathSep = case os:type() of
+ {win32, _} -> ";";
+ _ -> ":"
+ end,
+ Path = FirstDir ++ PathSep ++ SecondDir,
+ Got = lists:sort([{Name, Vsn} || #plugin{name = Name, version = Vsn} <- rabbit_plugins:list(Path)]),
+ %% `rabbit` was loaded automatically by `rabbit_plugins:list/1`.
+ %% We want to unload it now so it does not interfere with other
+ %% testcases.
+ application:unload(rabbit),
+ Expected = [{plugin_both, "2"}, {plugin_first_dir, "3"}, {plugin_second_dir, "4"}],
+ case Got of
+ Expected ->
+ ok;
+ _ ->
+ ct:pal("Got ~p~nExpected: ~p", [Got, Expected]),
+ exit({wrong_plugins_list, Got})
+ end,
+ ok.
diff --git a/deps/rabbit/test/unit_plugin_versioning_SUITE.erl b/deps/rabbit/test/unit_plugin_versioning_SUITE.erl
new file mode 100644
index 0000000000..8032becedd
--- /dev/null
+++ b/deps/rabbit/test/unit_plugin_versioning_SUITE.erl
@@ -0,0 +1,170 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_plugin_versioning_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ version_support,
+ plugin_validation
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+version_support(_Config) ->
+ Examples = [
+ {[], "any version", true} %% anything goes
+ ,{[], "0.0.0", true} %% ditto
+ ,{[], "3.5.6", true} %% ditto
+ ,{["something"], "something", true} %% equal values match
+ ,{["3.5.4"], "something", false}
+ ,{["3.4.5", "3.6.0"], "0.0.0", true} %% zero version always match
+ ,{["3.4.5", "3.6.0"], "", true} %% empty version always match
+ ,{["something", "3.5.6"], "3.5.7", true} %% 3.5.7 matches ~> 3.5.6
+ ,{["3.4.0", "3.5.6"], "3.6.1", false} %% 3.6.x isn't supported
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.2", true} %% 3.5.2 matches ~> 3.5.2
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.1", false} %% lesser than the lower boundary
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.6.2", true} %% 3.6.2 matches ~> 3.6.1
+ ,{["3.5.2", "3.6.1", "3.6.8"], "3.6.2", true} %% 3.6.2 still matches ~> 3.6.1
+ ,{["3.5", "3.6", "3.7"], "3.5.1", true} %% x.y values equal to x.y.0
+ ,{["3"], "3.5.1", false} %% x values are not supported
+ ,{["3.5.2", "3.6.1"], "3.6.2.999", true} %% x.y.z.p values are supported
+ ,{["3.5.2", "3.6.2.333"], "3.6.2.999", true} %% x.y.z.p values are supported
+ ,{["3.5.2", "3.6.2.333"], "3.6.2.222", false} %% x.y.z.p values are supported
+ ,{["3.6.0", "3.7.0"], "3.6.3-alpha.1", true} %% Pre-release versions handled like semver part
+ ,{["3.6.0", "3.7.0"], "3.7.0-alpha.89", true}
+ ],
+
+ lists:foreach(
+ fun({Versions, RabbitVersion, Expected}) ->
+ {Expected, RabbitVersion, Versions} =
+ {rabbit_plugins:is_version_supported(RabbitVersion, Versions),
+ RabbitVersion, Versions}
+ end,
+ Examples),
+ ok.
+
+-record(validation_example, {rabbit_version, plugins, errors, valid}).
+
+plugin_validation(_Config) ->
+ Examples = [
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.2", ["3.5.6", "3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.1"]}]}],
+ errors = [],
+ valid = [plugin_a, plugin_b]},
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.6"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.0"]}]}],
+ errors =
+ [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]},
+ {plugin_b, [{missing_dependency, plugin_a}]}],
+ valid = []
+ },
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.6"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.0"]}]},
+ {plugin_c, "3.7.2", ["3.7.0"], [{plugin_b, ["3.7.3"]}]}],
+ errors =
+ [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]},
+ {plugin_b, [{missing_dependency, plugin_a}]},
+ {plugin_c, [{missing_dependency, plugin_b}]}],
+ valid = []
+ },
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]},
+ {plugin_d, "3.7.2", ["3.7.0"], [{plugin_c, ["3.7.3"]}]}],
+ errors =
+ [{plugin_b, [{{dependency_version_mismatch, "3.7.1", ["3.7.3"]}, plugin_a}]},
+ {plugin_d, [{missing_dependency, plugin_c}]}],
+ valid = [plugin_a]
+ },
+ #validation_example{
+ rabbit_version = "0.0.0",
+ plugins =
+ [{plugin_a, "", ["3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]}],
+ errors = [],
+ valid = [plugin_a, plugin_b]
+ }],
+ lists:foreach(
+ fun(#validation_example{rabbit_version = RabbitVersion,
+ plugins = PluginsExamples,
+ errors = Errors,
+ valid = ExpectedValid}) ->
+ Plugins = make_plugins(PluginsExamples),
+ {Valid, Invalid} = rabbit_plugins:validate_plugins(Plugins,
+ RabbitVersion),
+ Errors = lists:reverse(Invalid),
+ ExpectedValid = lists:reverse(lists:map(fun(#plugin{name = Name}) ->
+ Name
+ end,
+ Valid))
+ end,
+ Examples),
+ ok.
+
+make_plugins(Plugins) ->
+ lists:map(
+ fun({Name, Version, RabbitVersions, PluginsVersions}) ->
+ Deps = [K || {K,_V} <- PluginsVersions],
+ #plugin{name = Name,
+ version = Version,
+ dependencies = Deps,
+ broker_version_requirements = RabbitVersions,
+ dependency_version_requirements = PluginsVersions}
+ end,
+ Plugins).
diff --git a/deps/rabbit/test/unit_policy_validators_SUITE.erl b/deps/rabbit/test/unit_policy_validators_SUITE.erl
new file mode 100644
index 0000000000..c340d172af
--- /dev/null
+++ b/deps/rabbit/test/unit_policy_validators_SUITE.erl
@@ -0,0 +1,207 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_policy_validators_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, core_validators},
+ {group, classic_queue_mirroring_validators}
+ ].
+
+groups() ->
+ [
+ {core_validators, [parallel], [
+ alternate_exchange,
+ dead_letter_exchange,
+ dead_letter_routing_key,
+ message_ttl,
+ expires,
+ max_length,
+ max_length_bytes,
+ max_in_memory_length,
+ delivery_limit,
+ classic_queue_lazy_mode,
+ length_limit_overflow_mode
+ ]},
+
+ {classic_queue_mirroring_validators, [parallel], [
+ classic_queue_ha_mode,
+ classic_queue_ha_params
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group = classic_queue_mirroring_validators, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps());
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(classic_queue_mirroring_validators, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Core Validators
+%% -------------------------------------------------------------------
+
+alternate_exchange(_Config) ->
+ requires_binary_value(<<"alternate-exchange">>).
+
+dead_letter_exchange(_Config) ->
+ requires_binary_value(<<"dead-letter-exchange">>).
+
+dead_letter_routing_key(_Config) ->
+ requires_binary_value(<<"dead-letter-routing-key">>).
+
+message_ttl(_Config) ->
+ requires_non_negative_integer_value(<<"message-ttl">>).
+
+expires(_Config) ->
+ requires_positive_integer_value(<<"expires">>).
+
+max_length(_Config) ->
+ requires_non_negative_integer_value(<<"max-length">>).
+
+max_length_bytes(_Config) ->
+ requires_non_negative_integer_value(<<"max-length-bytes">>).
+
+max_in_memory_length(_Config) ->
+ requires_non_negative_integer_value(<<"max-in-memory-bytes">>).
+
+delivery_limit(_Config) ->
+ requires_non_negative_integer_value(<<"delivery-limit">>).
+
+classic_queue_lazy_mode(_Config) ->
+ test_valid_and_invalid_values(<<"queue-mode">>,
+ %% valid values
+ [<<"default">>, <<"lazy">>],
+ %% invalid values
+ [<<"unknown">>, <<"queue">>, <<"mode">>]).
+
+length_limit_overflow_mode(_Config) ->
+ test_valid_and_invalid_values(<<"overflow">>,
+ %% valid values
+ [<<"drop-head">>, <<"reject-publish">>, <<"reject-publish-dlx">>],
+ %% invalid values
+ [<<"unknown">>, <<"publish">>, <<"overflow">>, <<"mode">>]).
+
+
+%% -------------------------------------------------------------------
+%% CMQ Validators
+%% -------------------------------------------------------------------
+
+classic_queue_ha_mode(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, classic_queue_ha_mode1, [Config]).
+
+classic_queue_ha_mode1(_Config) ->
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"exactly">>},
+ {<<"ha-params">>, 2}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"nodes">>},
+ {<<"ha-params">>, [<<"rabbit@host1">>, <<"rabbit@host2">>]}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"all">>}
+ ])),
+
+ ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"lolwut">>},
+ {<<"ha-params">>, 2}
+ ])).
+
+classic_queue_ha_params(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, classic_queue_ha_mode1, [Config]).
+
+classic_queue_ha_params1(_Config) ->
+ ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"exactly">>},
+ {<<"ha-params">>, <<"2">>}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"nodes">>},
+ {<<"ha-params">>, <<"lolwut">>}
+ ])),
+
+ ?assertEqual(ok, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"all">>},
+ {<<"ha-params">>, <<"lolwut">>}
+ ])),
+
+ ?assertMatch({error, _, _}, rabbit_mirror_queue_misc:validate_policy([
+ {<<"ha-mode">>, <<"lolwut">>},
+ {<<"ha-params">>, 2}
+ ])).
+
+%%
+%% Implementation
+%%
+
+test_valid_and_invalid_values(Mod, Key, ValidValues, InvalidValues) ->
+ [begin
+ ?assertEqual(ok, Mod:validate_policy([
+ {Key, Val}
+ ]))
+ end || Val <- ValidValues],
+ [begin
+ ?assertMatch({error, _, _}, Mod:validate_policy([
+ {Key, Val}
+ ]))
+ end || Val <- InvalidValues].
+
+test_valid_and_invalid_values(Key, ValidValues, InvalidValues) ->
+ test_valid_and_invalid_values(rabbit_policies, Key, ValidValues, InvalidValues).
+
+requires_binary_value(Key) ->
+ test_valid_and_invalid_values(Key,
+ [<<"a.binary">>, <<"b.binary">>],
+ [1, rabbit]).
+
+requires_positive_integer_value(Key) ->
+ test_valid_and_invalid_values(Key,
+ [1, 1000],
+ [0, -1, <<"a.binary">>]).
+
+requires_non_negative_integer_value(Key) ->
+ test_valid_and_invalid_values(Key,
+ [0, 1, 1000],
+ [-1000, -1, <<"a.binary">>]).
diff --git a/deps/rabbit/test/unit_priority_queue_SUITE.erl b/deps/rabbit/test/unit_priority_queue_SUITE.erl
new file mode 100644
index 0000000000..5587e7d61f
--- /dev/null
+++ b/deps/rabbit/test/unit_priority_queue_SUITE.erl
@@ -0,0 +1,184 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ priority_queue
+ ]}
+ ].
+
+
+priority_queue(_Config) ->
+
+ false = priority_queue:is_queue(not_a_queue),
+
+ %% empty Q
+ Q = priority_queue:new(),
+ {true, true, 0, [], []} = test_priority_queue(Q),
+
+ %% 1-4 element no-priority Q
+ true = lists:all(fun (X) -> X =:= passed end,
+ lists:map(fun test_simple_n_element_queue/1,
+ lists:seq(1, 4))),
+
+ %% 1-element priority Q
+ Q1 = priority_queue:in(foo, 1, priority_queue:new()),
+ {true, false, 1, [{1, foo}], [foo]} =
+ test_priority_queue(Q1),
+
+ %% 2-element same-priority Q
+ Q2 = priority_queue:in(bar, 1, Q1),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q2),
+
+ %% 2-element different-priority Q
+ Q3 = priority_queue:in(bar, 2, Q1),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q3),
+
+ %% 1-element negative priority Q
+ Q4 = priority_queue:in(foo, -1, priority_queue:new()),
+ {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
+
+ %% merge 2 * 1-element no-priority Qs
+ Q5 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q5),
+
+ %% merge 1-element no-priority Q with 1-element priority Q
+ Q6 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
+ test_priority_queue(Q6),
+
+ %% merge 1-element priority Q with 1-element no-priority Q
+ Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q7),
+
+ %% merge 2 * 1-element same-priority Qs
+ Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q8),
+
+ %% merge 2 * 1-element different-priority Qs
+ Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 2, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q9),
+
+ %% merge 2 * 1-element different-priority Qs (other way around)
+ Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
+ priority_queue:in(foo, 1, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q10),
+
+ %% merge 2 * 2-element multi-different-priority Qs
+ Q11 = priority_queue:join(Q6, Q5),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
+ [bar, foo, foo, bar]} = test_priority_queue(Q11),
+
+ %% and the other way around
+ Q12 = priority_queue:join(Q5, Q6),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
+ [bar, foo, bar, foo]} = test_priority_queue(Q12),
+
+ %% merge with negative priorities
+ Q13 = priority_queue:join(Q4, Q5),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q13),
+
+ %% and the other way around
+ Q14 = priority_queue:join(Q5, Q4),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q14),
+
+ %% joins with empty queues:
+ Q1 = priority_queue:join(Q, Q1),
+ Q1 = priority_queue:join(Q1, Q),
+
+ %% insert with priority into non-empty zero-priority queue
+ Q15 = priority_queue:in(baz, 1, Q5),
+ {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
+ test_priority_queue(Q15),
+
+ %% 1-element infinity priority Q
+ Q16 = priority_queue:in(foo, infinity, Q),
+ {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
+
+ %% add infinity to 0-priority Q
+ Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q17),
+
+ %% and the other way around
+ Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q18),
+
+ %% add infinity to mixed-priority Q
+ Q19 = priority_queue:in(qux, infinity, Q3),
+ {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
+ test_priority_queue(Q19),
+
+ %% merge the above with a negative priority Q
+ Q20 = priority_queue:join(Q19, Q4),
+ {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
+ [qux, bar, foo, foo]} = test_priority_queue(Q20),
+
+ %% merge two infinity priority queues
+ Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
+ priority_queue:in(bar, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
+ test_priority_queue(Q21),
+
+ %% merge two mixed priority with infinity queues
+ Q22 = priority_queue:join(Q18, Q20),
+ {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
+ {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
+ test_priority_queue(Q22),
+
+ passed.
+
+priority_queue_in_all(Q, L) ->
+ lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
+
+priority_queue_out_all(Q) ->
+ case priority_queue:out(Q) of
+ {empty, _} -> [];
+ {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
+ end.
+
+test_priority_queue(Q) ->
+ {priority_queue:is_queue(Q),
+ priority_queue:is_empty(Q),
+ priority_queue:len(Q),
+ priority_queue:to_list(Q),
+ priority_queue_out_all(Q)}.
+
+test_simple_n_element_queue(N) ->
+ Items = lists:seq(1, N),
+ Q = priority_queue_in_all(priority_queue:new(), Items),
+ ToListRes = [{0, X} || X <- Items],
+ {true, false, N, ToListRes, Items} = test_priority_queue(Q),
+ passed.
diff --git a/deps/rabbit/test/unit_queue_consumers_SUITE.erl b/deps/rabbit/test/unit_queue_consumers_SUITE.erl
new file mode 100644
index 0000000000..0f48ea65b4
--- /dev/null
+++ b/deps/rabbit/test/unit_queue_consumers_SUITE.erl
@@ -0,0 +1,121 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_queue_consumers_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ is_same,
+ get_consumer,
+ get,
+ list_consumers
+ ].
+
+is_same(_Config) ->
+ ?assertEqual(
+ true,
+ rabbit_queue_consumers:is_same(
+ self(), <<"1">>,
+ consumer(self(), <<"1">>)
+ )),
+ ?assertEqual(
+ false,
+ rabbit_queue_consumers:is_same(
+ self(), <<"1">>,
+ consumer(self(), <<"2">>)
+ )),
+ Pid = spawn(?MODULE, function_for_process, []),
+ Pid ! whatever,
+ ?assertEqual(
+ false,
+ rabbit_queue_consumers:is_same(
+ self(), <<"1">>,
+ consumer(Pid, <<"1">>)
+ )),
+ ok.
+
+get(_Config) ->
+ Pid = spawn(?MODULE, function_for_process, []),
+ Pid ! whatever,
+ State = state(consumers([consumer(self(), <<"1">>), consumer(Pid, <<"2">>), consumer(self(), <<"3">>)])),
+ {Pid, {consumer, <<"2">>, _, _, _, _}} =
+ rabbit_queue_consumers:get(Pid, <<"2">>, State),
+ ?assertEqual(
+ undefined,
+ rabbit_queue_consumers:get(self(), <<"2">>, State)
+ ),
+ ?assertEqual(
+ undefined,
+ rabbit_queue_consumers:get(Pid, <<"1">>, State)
+ ),
+ ok.
+
+get_consumer(_Config) ->
+ Pid = spawn(unit_queue_consumers_SUITE, function_for_process, []),
+ Pid ! whatever,
+ State = state(consumers([consumer(self(), <<"1">>), consumer(Pid, <<"2">>), consumer(self(), <<"3">>)])),
+ {_Pid, {consumer, _, _, _, _, _}} =
+ rabbit_queue_consumers:get_consumer(State),
+ ?assertEqual(
+ undefined,
+ rabbit_queue_consumers:get_consumer(state(consumers([])))
+ ),
+ ok.
+
+list_consumers(_Config) ->
+ State = state(consumers([consumer(self(), <<"1">>), consumer(self(), <<"2">>), consumer(self(), <<"3">>)])),
+ Consumer = rabbit_queue_consumers:get_consumer(State),
+ {_Pid, ConsumerRecord} = Consumer,
+ CTag = rabbit_queue_consumers:consumer_tag(ConsumerRecord),
+ ConsumersWithSingleActive = rabbit_queue_consumers:all(State, Consumer, true),
+ ?assertEqual(3, length(ConsumersWithSingleActive)),
+ lists:foldl(fun({Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ case Tag of
+ CTag ->
+ ?assert(Active),
+ ?assertEqual(single_active, ActivityStatus);
+ _ ->
+ ?assertNot(Active),
+ ?assertEqual(waiting, ActivityStatus)
+ end
+ end, [], ConsumersWithSingleActive),
+ ConsumersNoSingleActive = rabbit_queue_consumers:all(State, none, false),
+ ?assertEqual(3, length(ConsumersNoSingleActive)),
+ lists:foldl(fun({Pid, _, _, _, Active, ActivityStatus, _, _}, _Acc) ->
+ ?assertEqual(self(), Pid),
+ ?assert(Active),
+ ?assertEqual(up, ActivityStatus)
+ end, [], ConsumersNoSingleActive),
+ ok.
+
+consumers([]) ->
+ priority_queue:new();
+consumers(Consumers) ->
+ consumers(Consumers, priority_queue:new()).
+
+consumers([H], Q) ->
+ priority_queue:in(H, Q);
+consumers([H | T], Q) ->
+ consumers(T, priority_queue:in(H, Q)).
+
+
+consumer(Pid, ConsumerTag) ->
+ {Pid, {consumer, ConsumerTag, true, 1, [], <<"guest">>}}.
+
+state(Consumers) ->
+ {state, Consumers, {}}.
+
+function_for_process() ->
+ receive
+ _ -> ok
+ end.
diff --git a/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl b/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl
new file mode 100644
index 0000000000..2ffed514e1
--- /dev/null
+++ b/deps/rabbit/test/unit_stats_and_metrics_SUITE.erl
@@ -0,0 +1,266 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_stats_and_metrics_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ channel_statistics,
+ head_message_timestamp_statistics
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Statistics.
+%% -------------------------------------------------------------------
+
+channel_statistics(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, channel_statistics1, [Config]).
+
+channel_statistics1(_Config) ->
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% ATM this just tests the queue / exchange stats in channels. That's
+ %% by far the most complex code though.
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+ X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
+
+ dummy_event_receiver:start(self(), [node()], [channel_stats]),
+
+ %% Check stats empty
+ Check1 = fun() ->
+ [] = ets:match(channel_queue_metrics, {Ch, QRes}),
+ [] = ets:match(channel_exchange_metrics, {Ch, X}),
+ [] = ets:match(channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check1, ?TIMEOUT),
+
+ %% Publish and get a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
+
+ %% Check the stats reflect that
+ Check2 = fun() ->
+ [{{Ch, QRes}, 1, 0, 0, 0, 0, 0, 0, 0}] = ets:lookup(
+ channel_queue_metrics,
+ {Ch, QRes}),
+ [{{Ch, X}, 1, 0, 0, 0, 0}] = ets:lookup(
+ channel_exchange_metrics,
+ {Ch, X}),
+ [{{Ch, {QRes, X}}, 1, 0}] = ets:lookup(
+ channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check2, ?TIMEOUT),
+
+ %% Check the stats are marked for removal on queue deletion.
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ Check3 = fun() ->
+ [{{Ch, QRes}, 1, 0, 0, 0, 0, 0, 0, 1}] = ets:lookup(
+ channel_queue_metrics,
+ {Ch, QRes}),
+ [{{Ch, X}, 1, 0, 0, 0, 0}] = ets:lookup(
+ channel_exchange_metrics,
+ {Ch, X}),
+ [{{Ch, {QRes, X}}, 1, 1}] = ets:lookup(
+ channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check3, ?TIMEOUT),
+
+ %% Check the garbage collection removes stuff.
+ force_metric_gc(),
+ Check4 = fun() ->
+ [] = ets:lookup(channel_queue_metrics, {Ch, QRes}),
+ [{{Ch, X}, 1, 0, 0, 0, 0}] = ets:lookup(
+ channel_exchange_metrics,
+ {Ch, X}),
+ [] = ets:lookup(channel_queue_exchange_metrics,
+ {Ch, {QRes, X}})
+ end,
+ test_ch_metrics(Check4, ?TIMEOUT),
+
+ rabbit_channel:shutdown(Ch),
+ dummy_event_receiver:stop(),
+ passed.
+
+force_metric_gc() ->
+ timer:sleep(300),
+ rabbit_core_metrics_gc ! start_gc,
+ gen_server:call(rabbit_core_metrics_gc, test).
+
+test_ch_metrics(Fun, Timeout) when Timeout =< 0 ->
+ Fun();
+test_ch_metrics(Fun, Timeout) ->
+ try
+ Fun()
+ catch
+ _:{badmatch, _} ->
+ timer:sleep(1000),
+ test_ch_metrics(Fun, Timeout - 1000)
+ end.
+
+head_message_timestamp_statistics(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, head_message_timestamp1, [Config]).
+
+head_message_timestamp1(_Config) ->
+ %% there is no convenient rabbit_channel API for confirms
+ %% this test could use, so it relies on tx.* methods
+ %% and gen_server2 flushing
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+
+ {ok, Q1} = rabbit_amqqueue:lookup(QRes),
+ QPid = amqqueue:get_pid(Q1),
+
+ %% Set up event receiver for queue
+ dummy_event_receiver:start(self(), [node()], [queue_stats]),
+
+ %% the head timestamp field is empty when the queue is empty
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == '')
+ end),
+
+ rabbit_channel:do(Ch, #'tx.select'{}),
+ receive #'tx.select_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_tx_select_ok)
+ end,
+
+ %% Publish two messages and check that the timestamp is that of the first message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{timestamp = 1}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{timestamp = 2}, <<"">>)),
+ rabbit_channel:do(Ch, #'tx.commit'{}),
+ rabbit_channel:flush(Ch),
+ receive #'tx.commit_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_tx_commit_ok)
+ end,
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == 1)
+ end),
+
+ %% Consume a message and check that the timestamp is now that of the second message
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == 2)
+ end),
+
+ %% Consume one more message and check again
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+ test_queue_statistics_receive_event(QPid,
+ fun (E) ->
+ (proplists:get_value(name, E) == QRes)
+ and
+ (proplists:get_value(head_message_timestamp, E) == '')
+ end),
+
+ %% Tear down
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ rabbit_channel:shutdown(Ch),
+ dummy_event_receiver:stop(),
+
+ passed.
+
+test_queue_statistics_receive_event(Q, Matcher) ->
+ %% Q ! emit_stats,
+ test_queue_statistics_receive_event1(Q, Matcher).
+
+test_queue_statistics_receive_event1(Q, Matcher) ->
+ receive #event{type = queue_stats, props = Props} ->
+ case Matcher(Props) of
+ true -> Props;
+ _ -> test_queue_statistics_receive_event1(Q, Matcher)
+ end
+ after ?TIMEOUT -> throw(failed_to_receive_event)
+ end.
+
+test_spawn() ->
+ {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ {Writer, Ch}.
diff --git a/deps/rabbit/test/unit_supervisor2_SUITE.erl b/deps/rabbit/test/unit_supervisor2_SUITE.erl
new file mode 100644
index 0000000000..50633984e2
--- /dev/null
+++ b/deps/rabbit/test/unit_supervisor2_SUITE.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_supervisor2_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ check_shutdown_stop,
+ check_shutdown_ignored
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test Cases
+%% -------------------------------------------------------------------
+
+check_shutdown_stop(_Config) ->
+ ok = check_shutdown(stop, 200, 200, 2000).
+
+check_shutdown_ignored(_Config) ->
+ ok = check_shutdown(ignored, 1, 2, 2000).
+
+check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
+ {ok, Sup} = supervisor2:start_link(dummy_supervisor2, [SupTimeout]),
+ Res = lists:foldl(
+ fun (I, ok) ->
+ TestSupPid = erlang:whereis(dummy_supervisor2),
+ ChildPids =
+ [begin
+ {ok, ChildPid} =
+ supervisor2:start_child(TestSupPid, []),
+ ChildPid
+ end || _ <- lists:seq(1, ChildCount)],
+ MRef = erlang:monitor(process, TestSupPid),
+ [P ! SigStop || P <- ChildPids],
+ ok = supervisor2:terminate_child(Sup, test_sup),
+ {ok, _} = supervisor2:restart_child(Sup, test_sup),
+ receive
+ {'DOWN', MRef, process, TestSupPid, shutdown} ->
+ ok;
+ {'DOWN', MRef, process, TestSupPid, Reason} ->
+ {error, {I, Reason}}
+ end;
+ (_, R) ->
+ R
+ end, ok, lists:seq(1, Iterations)),
+ unlink(Sup),
+ MSupRef = erlang:monitor(process, Sup),
+ exit(Sup, shutdown),
+ receive
+ {'DOWN', MSupRef, process, Sup, _Reason} ->
+ ok
+ end,
+ Res.
diff --git a/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl
new file mode 100644
index 0000000000..193df1f956
--- /dev/null
+++ b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl
@@ -0,0 +1,95 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_vm_memory_monitor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() ->
+ [
+ {sequential_tests, [], [
+ parse_line_linux,
+ set_vm_memory_high_watermark_command
+ ]}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+
+parse_line_linux(_Config) ->
+ lists:foreach(fun ({S, {K, V}}) ->
+ {K, V} = vm_memory_monitor:parse_line_linux(S)
+ end,
+ [{"MemTotal: 0 kB", {'MemTotal', 0}},
+ {"MemTotal: 502968 kB ", {'MemTotal', 515039232}},
+ {"MemFree: 178232 kB", {'MemFree', 182509568}},
+ {"MemTotal: 50296888", {'MemTotal', 50296888}},
+ {"MemTotal 502968 kB", {'MemTotal', 515039232}},
+ {"MemTotal 50296866 ", {'MemTotal', 50296866}}]),
+ ok.
+
+set_vm_memory_high_watermark_command(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_vm_memory_high_watermark_command1, [Config]).
+
+set_vm_memory_high_watermark_command1(_Config) ->
+ MemLimitRatio = 1.0,
+ MemTotal = vm_memory_monitor:get_total_memory(),
+
+ vm_memory_monitor:set_vm_memory_high_watermark(MemLimitRatio),
+ MemLimit = vm_memory_monitor:get_memory_limit(),
+ case MemLimit of
+ MemTotal -> ok;
+ _ -> MemTotalToMemLimitRatio = MemLimit * 100.0 / MemTotal / 100,
+ ct:fail(
+ "Expected memory high watermark to be ~p (~s), but it was ~p (~.1f)",
+ [MemTotal, MemLimitRatio, MemLimit, MemTotalToMemLimitRatio]
+ )
+ end.
diff --git a/deps/rabbit/test/upgrade_preparation_SUITE.erl b/deps/rabbit/test/upgrade_preparation_SUITE.erl
new file mode 100644
index 0000000000..880238515a
--- /dev/null
+++ b/deps/rabbit/test/upgrade_preparation_SUITE.erl
@@ -0,0 +1,109 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(upgrade_preparation_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {clustered, [], [
+ await_quorum_plus_one
+ ]}
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Test Case
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Group}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+
+init_per_testcase(TestCase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, TestCase),
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok -> Config;
+ Skip -> Skip
+ end.
+
+end_per_testcase(TestCase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, TestCase).
+
+
+
+%%
+%% Test Cases
+%%
+
+-define(WAITING_INTERVAL, 10000).
+
+await_quorum_plus_one(Config) ->
+ catch delete_queues(),
+ [A, B, _C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ declare(Ch, <<"qq.1">>, [{<<"x-queue-type">>, longstr, <<"quorum">>}]),
+ timer:sleep(100),
+ ?assert(await_quorum_plus_one(Config, 0)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, B),
+ ?assertNot(await_quorum_plus_one(Config, 0)),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, B),
+ ?assert(await_quorum_plus_one(Config, 0)).
+
+%%
+%% Implementation
+%%
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ auto_delete = false,
+ arguments = Args}).
+
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"tests">>) || Q <- rabbit_amqqueue:list()].
+
+await_quorum_plus_one(Config, Node) ->
+ await_quorum_plus_one(Config, Node, ?WAITING_INTERVAL).
+
+await_quorum_plus_one(Config, Node, Timeout) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_upgrade_preparation, await_online_quorum_plus_one, [Timeout], Timeout + 500).
+
diff --git a/deps/rabbit/test/vhost_SUITE.erl b/deps/rabbit/test/vhost_SUITE.erl
new file mode 100644
index 0000000000..4e6ffe0d74
--- /dev/null
+++ b/deps/rabbit/test/vhost_SUITE.erl
@@ -0,0 +1,381 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(vhost_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_1_network},
+ {group, cluster_size_2_network},
+ {group, cluster_size_1_direct},
+ {group, cluster_size_2_direct}
+ ].
+
+groups() ->
+ ClusterSize1Tests = [
+ single_node_vhost_deletion_forces_connection_closure,
+ vhost_failure_forces_connection_closure,
+ vhost_creation_idempotency
+ ],
+ ClusterSize2Tests = [
+ cluster_vhost_deletion_forces_connection_closure,
+ vhost_failure_forces_connection_closure,
+ vhost_failure_forces_connection_closure_on_failure_node,
+ node_starts_with_dead_vhosts,
+ node_starts_with_dead_vhosts_with_mirrors,
+ vhost_creation_idempotency
+ ],
+ [
+ {cluster_size_1_network, [], ClusterSize1Tests},
+ {cluster_size_2_network, [], ClusterSize2Tests},
+ {cluster_size_1_direct, [], ClusterSize1Tests},
+ {cluster_size_2_direct, [], ClusterSize2Tests}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 8}}
+ ].
+
+%% see partitions_SUITE
+-define(DELAY, 9000).
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_1_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_1_network, Config1, 1);
+init_per_group(cluster_size_2_network, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, network}]),
+ init_per_multinode_group(cluster_size_2_network, Config1, 2);
+init_per_group(cluster_size_1_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_1_direct, Config1, 1);
+init_per_group(cluster_size_2_direct, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
+ init_per_multinode_group(cluster_size_2_direct, Config1, 2).
+
+init_per_multinode_group(_Group, Config, NodeCount) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, NodeCount},
+ {rmq_nodename_suffix, Suffix}
+ ]),
+
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ clear_all_connection_tracking_tables(Config),
+ Config.
+
+end_per_testcase(Testcase, Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+ case Testcase of
+ cluster_vhost_deletion_forces_connection_closure -> ok;
+ single_node_vhost_deletion_forces_connection_closure -> ok;
+ _ ->
+ delete_vhost(Config, VHost2)
+ end,
+ delete_vhost(Config, VHost1),
+ clear_all_connection_tracking_tables(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+delete_vhost(Config, VHost) ->
+ case rabbit_ct_broker_helpers:delete_vhost(Config, VHost) of
+ ok -> ok;
+ {error, {no_such_vhost, _}} -> ok
+ end.
+
+clear_all_connection_tracking_tables(Config) ->
+ [rabbit_ct_broker_helpers:rpc(Config,
+ N,
+ rabbit_connection_tracking,
+ clear_tracked_connection_tables_for_this_node,
+ []) || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename)].
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+single_node_vhost_deletion_forces_connection_closure(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn2] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+vhost_failure_forces_connection_closure(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn2] = open_connections(Config, [{0, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, VHost2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+
+vhost_failure_forces_connection_closure_on_failure_node(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn20] = open_connections(Config, [{0, VHost2}]),
+ [_Conn21] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(2, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, 0, VHost2),
+ timer:sleep(200),
+ %% Vhost2 connection on node 1 is still alive
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+ %% Vhost1 connection on node 0 is still alive
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+
+cluster_vhost_deletion_forces_connection_closure(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ ?assertEqual(0, count_connections_in(Config, VHost1)),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ [Conn1] = open_connections(Config, [{0, VHost1}]),
+ ?assertEqual(1, count_connections_in(Config, VHost1)),
+
+ [_Conn2] = open_connections(Config, [{1, VHost2}]),
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2),
+ timer:sleep(200),
+ ?assertEqual(0, count_connections_in(Config, VHost2)),
+
+ close_connections([Conn1]),
+ ?assertEqual(0, count_connections_in(Config, VHost1)).
+
+node_starts_with_dead_vhosts(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1, VHost1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ QName = <<"node_starts_with_dead_vhosts-q-1">>,
+ amqp_channel:call(Chan, #'queue.declare'{queue = QName, durable = true}),
+ rabbit_ct_client_helpers:publish(Chan, QName, 10),
+
+ DataStore1 = rabbit_ct_broker_helpers:rpc(
+ Config, 1, rabbit_vhost, msg_store_dir_path, [VHost1]),
+
+ rabbit_ct_broker_helpers:stop_node(Config, 1),
+
+ file:write_file(filename:join(DataStore1, "recovery.dets"), <<"garbage">>),
+
+ %% The node should start without a vhost
+ ok = rabbit_ct_broker_helpers:start_node(Config, 1),
+
+ timer:sleep(3000),
+
+ ?assertEqual(true, rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost2])).
+
+node_starts_with_dead_vhosts_with_mirrors(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+
+ set_up_vhost(Config, VHost1),
+ set_up_vhost(Config, VHost2),
+
+ true = rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost1]),
+ true = rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost2]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, check, []),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VHost1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ QName = <<"node_starts_with_dead_vhosts_with_mirrors-q-0">>,
+ amqp_channel:call(Chan, #'queue.declare'{queue = QName, durable = true}),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_policy, set,
+ [VHost1, <<"mirror">>, <<".*">>, [{<<"ha-mode">>, <<"all">>}],
+ 0, <<"queues">>, <<"acting-user">>]),
+
+ %% Wait for the queue to start a mirror
+ timer:sleep(500),
+
+ rabbit_ct_client_helpers:publish(Chan, QName, 10),
+
+ {ok, Q} = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_amqqueue, lookup,
+ [rabbit_misc:r(VHost1, queue, QName)], infinity),
+
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+
+ [Pid] = amqqueue:get_sync_slave_pids(Q),
+
+ Node1 = node(Pid),
+
+ DataStore1 = rabbit_ct_broker_helpers:rpc(
+ Config, 1, rabbit_vhost, msg_store_dir_path, [VHost1]),
+
+ rabbit_ct_broker_helpers:stop_node(Config, 1),
+
+ file:write_file(filename:join(DataStore1, "recovery.dets"), <<"garbage">>),
+
+ %% The node should start without a vhost
+ ok = rabbit_ct_broker_helpers:start_node(Config, 1),
+
+ timer:sleep(3000),
+
+ ?assertEqual(true, rabbit_ct_broker_helpers:rpc(Config, 1,
+ rabbit_vhost_sup_sup, is_vhost_alive, [VHost2])).
+
+vhost_creation_idempotency(Config) ->
+ VHost = <<"idempotency-test">>,
+ try
+ ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)),
+ ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost)),
+ ?assertEqual(ok, rabbit_ct_broker_helpers:add_vhost(Config, VHost))
+ after
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost)
+ end.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+open_connections(Config, NodesAndVHosts) ->
+ % Randomly select connection type
+ OpenConnectionFun = case ?config(connection_type, Config) of
+ network -> open_unmanaged_connection;
+ direct -> open_unmanaged_connection_direct
+ end,
+ Conns = lists:map(fun
+ ({Node, VHost}) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node,
+ VHost);
+ (Node) ->
+ rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
+ end, NodesAndVHosts),
+ timer:sleep(500),
+ Conns.
+
+close_connections(Conns) ->
+ lists:foreach(fun
+ (Conn) ->
+ rabbit_ct_client_helpers:close_connection(Conn)
+ end, Conns),
+ timer:sleep(500).
+
+count_connections_in(Config, VHost) ->
+ count_connections_in(Config, VHost, 0).
+count_connections_in(Config, VHost, NodeIndex) ->
+ timer:sleep(200),
+ rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
+ rabbit_connection_tracking,
+ count_tracked_items_in, [{vhost, VHost}]).
+
+set_up_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost),
+ set_vhost_connection_limit(Config, VHost, -1).
+
+set_vhost_connection_limit(Config, VHost, Count) ->
+ set_vhost_connection_limit(Config, 0, VHost, Count).
+
+set_vhost_connection_limit(Config, NodeIndex, VHost, Count) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(
+ Config, NodeIndex, nodename),
+ ok = rabbit_ct_broker_helpers:control_action(
+ set_vhost_limits, Node,
+ ["{\"max-connections\": " ++ integer_to_list(Count) ++ "}"],
+ [{"-p", binary_to_list(VHost)}]).
+
+expect_that_client_connection_is_rejected(Config) ->
+ expect_that_client_connection_is_rejected(Config, 0).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex) ->
+ {error, _} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex).
+
+expect_that_client_connection_is_rejected(Config, NodeIndex, VHost) ->
+ {error, _} =
+ rabbit_ct_client_helpers:open_unmanaged_connection(Config, NodeIndex, VHost).
diff --git a/deps/rabbit_common/.gitignore b/deps/rabbit_common/.gitignore
new file mode 100644
index 0000000000..f609631433
--- /dev/null
+++ b/deps/rabbit_common/.gitignore
@@ -0,0 +1,31 @@
+*~
+.sw?
+.*.sw?
+.*.plt
+*.beam
+*.coverdata
+/.*.plt
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/ct.cover.spec
+/xrefr
+
+/rabbit_common.d
+
+# Generated source files.
+/include/rabbit_framing.hrl
+/src/rabbit_framing_amqp_0_8.erl
+/src/rabbit_framing_amqp_0_9_1.erl
diff --git a/deps/rabbit_common/.travis.yml b/deps/rabbit_common/.travis.yml
new file mode 100644
index 0000000000..a9b75d084d
--- /dev/null
+++ b/deps/rabbit_common/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: Tu26VJ9BsXxL20xxwWk4cbCkZyqyxYmNpSSqco5r3FLeU5hk5Vkk+s2BareRvqKhKHFlvyxu8GwsKtajMvsieP6y5J99gSeub6fDOIskPz61bo0aKA9nbDuBFSG1Z5wgXx1XRo0yDatLxXCXe3FbThRsylG7XNjtRaru1/lwuVxfxPtBGQ1opvQX71sST3GYSPoBYR+JlcVpU+uDHMAzsP8J0m5rEpxcl821aTMk3iz90hBQMsoLTBmSQePPcNqOA/1OH75VfjuXR8JBXHvA9njrUBrsyxgHf2uOh3jAXdIrHZwZg/17+y7gNVqByfx/UpGb8XEpVkncg/cRyVIHMk7/gFCZkeVC1QkIN5+EPiGLF7u32x9QaT7Zqz57iLh3IJzED2dj12qWaeX8QypF1K1r5qq4pRrN6iEZx76stpZbyFT4XnExHRdzPuouy7yz1gDHF0HOxbNLowzc/jk7tuTp+qmDSR5tRvegAIH3TONegxXyB7smdbvdI6MCN5/GP2bGK7HiqYWCmTGHtJwgxBKc5XoV8ZjpXfKxG98WbK5RsSP1miRnmxSbxaV0Gai1hfFlanJFFxTA9584O+NVRXNNFMfnnt20Ts6OwoXTcJ/boIPjF5Mcm0eJ4nz4R18TArXE4B5S4pTk3eQkG1ACDigkYZ3fc6ws4cWrt8BZASI=
+ - secure: fNEx9OXi2UisiYu0FiHJpV9+vWLB9DIUAIKG24GfUHVgZqFQOInBf5fEYrjlVgm5zNezSBS3hFNHXd/EXJF8KNgbf6mI0z4h4RyyQY98N+78tWvINoIawEeYpgC6NTI52MdaCfV+fTVWhiL0uP7mqWhLmll2bKXIy6HA6I9PnmiQSloNe64vUPF+UsVZHzzeabK4DR2VdI3h+BGXzOY9FG8Kt2voiXOLd2RFpVeN86FDTp+uVZY/K9e/MsktoK+XaZZ4qMAgm6lB32LVkzl3KA9ki6y6BY7le1m2c90hxAtBJGWZptkMb+VL0Fem39nEBnLjE0a0vIddp32PLJQmv6eopMfLay5BIkwtkRwv3P0uCwYd0bgYQSHF/gdTCcK1nr7fMhkQveBh6vmnbhrca7OeQRHz08+jo6EquUgNQZKmTZPWXQn9lS9mU/0EDLJJhn4KhJezGw6DcAAqB0KqmQedxtHMUT87by7LzhINwKZnm4y5WKA/W/zLI6dNqvIgc5C6UJh0EVgxa13GRmrnGmttV1dtLRQhiMJCbJykaekjPMULUmli0RbFz7bSFqFqEUsF+wwovyD+Y6D8KGOJdvvEYPdPIFpRPnhGUvH86JzsFdVKNJBicGI9LpCtlXlWNRbQIQ8uV5ze2HhxSJhtM6e6dB4d9yzpp6a81uR77bk=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.10'
+otp_release:
+ - '22.3'
+ - '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbit_common/CODE_OF_CONDUCT.md b/deps/rabbit_common/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbit_common/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbit_common/CONTRIBUTING.md b/deps/rabbit_common/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbit_common/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbit_common/LICENSE b/deps/rabbit_common/LICENSE
new file mode 100644
index 0000000000..bd1046bc1d
--- /dev/null
+++ b/deps/rabbit_common/LICENSE
@@ -0,0 +1,11 @@
+This package, the RabbitMQ commons library, is licensed under the MPL 2.0. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+The files `rabbit_numerical.erl' and `rabbit_http_util.erl` are (c) 2007
+Mochi Media, Inc and licensed under a MIT license, see LICENSE-MIT-Mochi.
+
+The files 'rabbit_semver.erl' and 'rabbit_semver_parser.erl' are Copyright (c) 2011
+Erlware, LLC and licensed under a MIT license, see LICENSE-MIT-Erlware-Commons.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbit_common/LICENSE-BSD-recon b/deps/rabbit_common/LICENSE-BSD-recon
new file mode 100644
index 0000000000..be0aebbaf1
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-BSD-recon
@@ -0,0 +1,27 @@
+Copyright (c) 2012-2017, Frédéric Trottier-Hébert
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+ The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/rabbit_common/LICENSE-MIT-Erlware-Commons b/deps/rabbit_common/LICENSE-MIT-Erlware-Commons
new file mode 100644
index 0000000000..fc89c0272d
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-MIT-Erlware-Commons
@@ -0,0 +1,21 @@
+Copyright (c) 2011 Erlware, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/deps/rabbit_common/LICENSE-MIT-Mochi b/deps/rabbit_common/LICENSE-MIT-Mochi
new file mode 100644
index 0000000000..c85b65a4d3
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-MIT-Mochi
@@ -0,0 +1,9 @@
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/rabbit_common/LICENSE-MPL-RabbitMQ b/deps/rabbit_common/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbit_common/Makefile b/deps/rabbit_common/Makefile
new file mode 100644
index 0000000000..6a31a9ccbf
--- /dev/null
+++ b/deps/rabbit_common/Makefile
@@ -0,0 +1,53 @@
+PROJECT = rabbit_common
+PROJECT_DESCRIPTION = Modules shared by rabbitmq-server and rabbitmq-erlang-client
+
+define PROJECT_APP_EXTRA_KEYS
+%% Hex.pm package informations.
+ {licenses, ["MPL-2.0"]},
+ {links, [
+ {"Website", "https://www.rabbitmq.com/"},
+ {"GitHub", "https://github.com/rabbitmq/rabbitmq-common"}
+ ]},
+ {build_tools, ["make", "rebar3"]},
+ {files, [
+ $(RABBITMQ_HEXPM_DEFAULT_FILES),
+ "mk"
+ ]}
+endef
+
+LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl
+DEPS = lager jsx ranch recon credentials_obfuscation
+
+dep_credentials_obfuscation = hex 2.2.0
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+# Variables and recipes in development.*.mk are meant to be used from
+# any Git clone. They are excluded from the files published to Hex.pm.
+# Generated files are published to Hex.pm however so people using this
+# source won't have to depend on Python and rabbitmq-codegen.
+#
+# That's why those Makefiles are included with `-include`: we ignore any
+# inclusion errors.
+
+-include development.pre.mk
+
+DEP_EARLY_PLUGINS = $(PROJECT)/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = $(PROJECT)/mk/rabbitmq-build.mk \
+ $(PROJECT)/mk/rabbitmq-hexpm.mk \
+ $(PROJECT)/mk/rabbitmq-dist.mk \
+ $(PROJECT)/mk/rabbitmq-test.mk \
+ $(PROJECT)/mk/rabbitmq-tools.mk
+
+WITHOUT = plugins/proper
+
+PLT_APPS += mnesia crypto ssl
+
+include mk/rabbitmq-components.mk
+include erlang.mk
+
+-include development.post.mk
diff --git a/deps/rabbit_common/README.md b/deps/rabbit_common/README.md
new file mode 100644
index 0000000000..139a0ab018
--- /dev/null
+++ b/deps/rabbit_common/README.md
@@ -0,0 +1,4 @@
+# RabbitMQ Common
+
+This library is shared between [RabbitMQ server](https://github.com/rabbitmq/rabbitmq-server), [RabbitMQ Erlang client](https://github.com/rabbitmq/rabbitmq-erlang-client)
+and other RabbitMQ ecosystem projects.
diff --git a/deps/rabbit_common/codegen.py b/deps/rabbit_common/codegen.py
new file mode 100755
index 0000000000..2e7bad69e9
--- /dev/null
+++ b/deps/rabbit_common/codegen.py
@@ -0,0 +1,582 @@
+#!/usr/bin/env python
+
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+from __future__ import nested_scopes
+from __future__ import print_function
+
+import sys
+
+from amqp_codegen import *
+import string
+import re
+
+# Coming up with a proper encoding of AMQP tables in JSON is too much
+# hassle at this stage. Given that the only default value we are
+# interested in is for the empty table, we only support that.
+def convertTable(d):
+ if len(d) == 0:
+ return "[]"
+ else:
+ raise Exception('Non-empty table defaults not supported ' + d)
+
+erlangDefaultValueTypeConvMap = {
+ bool : lambda x: str(x).lower(),
+ int : lambda x: str(x),
+ float : lambda x: str(x),
+ dict: convertTable
+}
+
+try:
+ _checkIfPython2 = unicode
+ erlangDefaultValueTypeConvMap[str] = lambda x: "<<\"" + x + "\">>"
+ erlangDefaultValueTypeConvMap[unicode] = lambda x: "<<\"" + x.encode("utf-8") + "\">>"
+except NameError:
+ erlangDefaultValueTypeConvMap[bytes] = lambda x: "<<\"" + x + "\">>"
+ erlangDefaultValueTypeConvMap[str] = lambda x: "<<\"" + x + "\">>"
+
+def erlangize(s):
+ s = s.replace('-', '_')
+ s = s.replace(' ', '_')
+ return s
+
+AmqpMethod.erlangName = lambda m: "'" + erlangize(m.klass.name) + '.' + erlangize(m.name) + "'"
+
+AmqpClass.erlangName = lambda c: "'" + erlangize(c.name) + "'"
+
+def erlangConstantName(s):
+ return '_'.join(re.split('[- ]', s.upper()))
+
+class PackedMethodBitField:
+ def __init__(self, index):
+ self.index = index
+ self.domain = 'bit'
+ self.contents = []
+
+ def extend(self, f):
+ self.contents.append(f)
+
+ def count(self):
+ return len(self.contents)
+
+ def full(self):
+ return self.count() == 8
+
+def multiLineFormat(things, prologue, separator, lineSeparator, epilogue, thingsPerLine = 4):
+ r = [prologue]
+ i = 0
+ for t in things:
+ if i != 0:
+ if i % thingsPerLine == 0:
+ r += [lineSeparator]
+ else:
+ r += [separator]
+ r += [t]
+ i += 1
+ r += [epilogue]
+ return "".join(r)
+
+def prettyType(typeName, subTypes, typesPerLine = 4):
+ """Pretty print a type signature made up of many alternative subtypes"""
+ sTs = multiLineFormat(subTypes,
+ "( ", " | ", "\n | ", " )",
+ thingsPerLine = typesPerLine)
+ return "-type %s ::\n %s." % (typeName, sTs)
+
+def printFileHeader():
+ print("""%% Autogenerated code. Do not edit.
+%%
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%""")
+
+def genErl(spec):
+ def erlType(domain):
+ return erlangize(spec.resolveDomain(domain))
+
+ def fieldTypeList(fields):
+ return '[' + ', '.join([erlType(f.domain) for f in fields]) + ']'
+
+ def fieldNameList(fields):
+ return '[' + ', '.join([erlangize(f.name) for f in fields]) + ']'
+
+ def fieldTempList(fields):
+ return '[' + ', '.join(['F' + str(f.index) for f in fields]) + ']'
+
+ def fieldMapList(fields):
+ return ', '.join([erlangize(f.name) + " = F" + str(f.index) for f in fields])
+
+ def genLookupMethodName(m):
+ print("lookup_method_name({%d, %d}) -> %s;" % (m.klass.index, m.index, m.erlangName()))
+
+ def genLookupClassName(c):
+ print("lookup_class_name(%d) -> %s;" % (c.index, c.erlangName()))
+
+ def genMethodId(m):
+ print("method_id(%s) -> {%d, %d};" % (m.erlangName(), m.klass.index, m.index))
+
+ def genMethodHasContent(m):
+ print("method_has_content(%s) -> %s;" % (m.erlangName(), str(m.hasContent).lower()))
+
+ def genMethodIsSynchronous(m):
+ hasNoWait = "nowait" in fieldNameList(m.arguments)
+ if m.isSynchronous and hasNoWait:
+ print("is_method_synchronous(#%s{nowait = NoWait}) -> not(NoWait);" % (m.erlangName()))
+ else:
+ print("is_method_synchronous(#%s{}) -> %s;" % (m.erlangName(), str(m.isSynchronous).lower()))
+
+ def genMethodFieldTypes(m):
+ """Not currently used - may be useful in future?"""
+ print("method_fieldtypes(%s) -> %s;" % (m.erlangName(), fieldTypeList(m.arguments)))
+
+ def genMethodFieldNames(m):
+ print("method_fieldnames(%s) -> %s;" % (m.erlangName(), fieldNameList(m.arguments)))
+
+ def packMethodFields(fields):
+ packed = []
+ bitfield = None
+ for f in fields:
+ if erlType(f.domain) == 'bit':
+ if not(bitfield) or bitfield.full():
+ bitfield = PackedMethodBitField(f.index)
+ packed.append(bitfield)
+ bitfield.extend(f)
+ else:
+ bitfield = None
+ packed.append(f)
+ return packed
+
+ def methodFieldFragment(f):
+ type = erlType(f.domain)
+ p = 'F' + str(f.index)
+ if type == 'shortstr':
+ return p+'Len:8/unsigned, '+p+':'+p+'Len/binary'
+ elif type == 'longstr':
+ return p+'Len:32/unsigned, '+p+':'+p+'Len/binary'
+ elif type == 'octet':
+ return p+':8/unsigned'
+ elif type == 'short':
+ return p+':16/unsigned'
+ elif type == 'long':
+ return p+':32/unsigned'
+ elif type == 'longlong':
+ return p+':64/unsigned'
+ elif type == 'timestamp':
+ return p+':64/unsigned'
+ elif type == 'bit':
+ return p+'Bits:8'
+ elif type == 'table':
+ return p+'Len:32/unsigned, '+p+'Tab:'+p+'Len/binary'
+
+ def genFieldPostprocessing(packed, hasContent):
+ for f in packed:
+ type = erlType(f.domain)
+ if type == 'bit':
+ for index in range(f.count()):
+ print(" F%d = ((F%dBits band %d) /= 0)," % \
+ (f.index + index,
+ f.index,
+ 1 << index))
+ elif type == 'table':
+ print(" F%d = rabbit_binary_parser:parse_table(F%dTab)," % \
+ (f.index, f.index))
+ # We skip the check on content-bearing methods for
+ # speed. This is a sanity check, not a security thing.
+ elif type == 'shortstr' and not hasContent:
+ print(" rabbit_binary_parser:assert_utf8(F%d)," % (f.index))
+ else:
+ pass
+
+ def genMethodRecord(m):
+ print("method_record(%s) -> #%s{};" % (m.erlangName(), m.erlangName()))
+
+ def genDecodeMethodFields(m):
+ packedFields = packMethodFields(m.arguments)
+ binaryPattern = ', '.join([methodFieldFragment(f) for f in packedFields])
+ if binaryPattern:
+ restSeparator = ', '
+ else:
+ restSeparator = ''
+ recordConstructorExpr = '#%s{%s}' % (m.erlangName(), fieldMapList(m.arguments))
+ print("decode_method_fields(%s, <<%s>>) ->" % (m.erlangName(), binaryPattern))
+ genFieldPostprocessing(packedFields, m.hasContent)
+ print(" %s;" % (recordConstructorExpr,))
+
+ def genDecodeProperties(c):
+ def presentBin(fields):
+ ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
+ return '<<' + ps + ', _:%d, R0/binary>>' % (16 - len(fields),)
+ def writePropFieldLine(field):
+ i = str(field.index)
+ if field.domain == 'bit':
+ print(" {F%s, R%s} = {P%s =/= 0, R%s}," % \
+ (i, str(field.index + 1), i, i))
+ else:
+ print(" {F%s, R%s} = if P%s =:= 0 -> {undefined, R%s}; true -> ?%s_VAL(R%s, L%s, V%s, X%s) end," % \
+ (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i, i))
+
+ if len(c.fields) == 0:
+ print("decode_properties(%d, <<>>) ->" % (c.index,))
+ else:
+ print(("decode_properties(%d, %s) ->" %
+ (c.index, presentBin(c.fields))))
+ for field in c.fields:
+ writePropFieldLine(field)
+ print(" <<>> = %s," % ('R' + str(len(c.fields))))
+ print(" #'P_%s'{%s};" % (erlangize(c.name), fieldMapList(c.fields)))
+
+ def genFieldPreprocessing(packed):
+ for f in packed:
+ type = erlType(f.domain)
+ if type == 'bit':
+ print(" F%dBits = (%s)," % \
+ (f.index,
+ ' bor '.join(['(bitvalue(F%d) bsl %d)' % (x.index, x.index - f.index)
+ for x in f.contents])))
+ elif type == 'table':
+ print(" F%dTab = rabbit_binary_generator:generate_table(F%d)," % (f.index, f.index))
+ print(" F%dLen = size(F%dTab)," % (f.index, f.index))
+ elif type == 'shortstr':
+ print(" F%dLen = shortstr_size(F%d)," % (f.index, f.index))
+ elif type == 'longstr':
+ print(" F%dLen = size(F%d)," % (f.index, f.index))
+ else:
+ pass
+
+ def genEncodeMethodFields(m):
+ packedFields = packMethodFields(m.arguments)
+ print("encode_method_fields(#%s{%s}) ->" % (m.erlangName(), fieldMapList(m.arguments)))
+ genFieldPreprocessing(packedFields)
+ print(" <<%s>>;" % (', '.join([methodFieldFragment(f) for f in packedFields])))
+
+ def genEncodeProperties(c):
+ def presentBin(fields):
+ ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
+ return '<<' + ps + ', 0:%d>>' % (16 - len(fields),)
+ def writePropFieldLine(field):
+ i = str(field.index)
+ if field.domain == 'bit':
+ print(" {P%s, R%s} = {F%s =:= 1, R%s}," % \
+ (i, str(field.index + 1), i, i))
+ else:
+ print(" {P%s, R%s} = if F%s =:= undefined -> {0, R%s}; true -> {1, [?%s_PROP(F%s, L%s) | R%s]} end," % \
+ (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i))
+
+ print("encode_properties(#'P_%s'{%s}) ->" % (erlangize(c.name), fieldMapList(c.fields)))
+ if len(c.fields) == 0:
+ print(" <<>>;")
+ else:
+ print(" R0 = [<<>>],")
+ for field in c.fields:
+ writePropFieldLine(field)
+ print(" list_to_binary([%s | lists:reverse(R%s)]);" % \
+ (presentBin(c.fields), str(len(c.fields))))
+
+ def messageConstantClass(cls):
+ # We do this because 0.8 uses "soft error" and 8.1 uses "soft-error".
+ return erlangConstantName(cls)
+
+ def genLookupException(c,v,cls):
+ mCls = messageConstantClass(cls)
+ if mCls == 'SOFT_ERROR': genLookupException1(c,'false')
+ elif mCls == 'HARD_ERROR': genLookupException1(c, 'true')
+ elif mCls == '': pass
+ else: raise Exception('Unknown constant class' + cls)
+
+ def genLookupException1(c,hardErrorBoolStr):
+ n = erlangConstantName(c)
+ print('lookup_amqp_exception(%s) -> {%s, ?%s, <<"%s">>};' % \
+ (n.lower(), hardErrorBoolStr, n, n))
+
+ def genAmqpException(c,v,cls):
+ n = erlangConstantName(c)
+ print('amqp_exception(?%s) -> %s;' % \
+ (n, n.lower()))
+
+ methods = spec.allMethods()
+
+ printFileHeader()
+ module = "rabbit_framing_amqp_%d_%d" % (spec.major, spec.minor)
+ if spec.revision != 0:
+ module = "%s_%d" % (module, spec.revision)
+ if module == "rabbit_framing_amqp_8_0":
+ module = "rabbit_framing_amqp_0_8"
+ print("-module(%s)." % module)
+ print("""-include("rabbit_framing.hrl").
+
+-export([version/0]).
+-export([lookup_method_name/1]).
+-export([lookup_class_name/1]).
+
+-export([method_id/1]).
+-export([method_has_content/1]).
+-export([is_method_synchronous/1]).
+-export([method_record/1]).
+-export([method_fieldnames/1]).
+-export([decode_method_fields/2]).
+-export([decode_properties/2]).
+-export([encode_method_fields/1]).
+-export([encode_properties/1]).
+-export([lookup_amqp_exception/1]).
+-export([amqp_exception/1]).
+
+""")
+ print("%% Various types")
+
+ print("""-export_type([amqp_field_type/0, amqp_property_type/0,
+ amqp_table/0, amqp_array/0, amqp_value/0,
+ amqp_method_name/0, amqp_method/0, amqp_method_record/0,
+ amqp_method_field_name/0, amqp_property_record/0,
+ amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
+
+-type amqp_field_type() ::
+ 'longstr' | 'signedint' | 'decimal' | 'timestamp' |
+ 'unsignedbyte' | 'unsignedshort' | 'unsignedint' |
+ 'table' | 'byte' | 'double' | 'float' | 'long' |
+ 'short' | 'bool' | 'binary' | 'void' | 'array'.
+-type amqp_property_type() ::
+ 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' |
+ 'longlong' | 'timestamp' | 'bit' | 'table'.
+
+-type amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}].
+-type amqp_array() :: [{amqp_field_type(), amqp_value()}].
+-type amqp_value() :: binary() | % longstr
+ integer() | % signedint
+ {non_neg_integer(), non_neg_integer()} | % decimal
+ amqp_table() |
+ amqp_array() |
+ byte() | % byte
+ float() | % double
+ integer() | % long
+ integer() | % short
+ boolean() | % bool
+ binary() | % binary
+ 'undefined' | % void
+ non_neg_integer(). % timestamp
+""")
+
+ print(prettyType("amqp_method_name()",
+ [m.erlangName() for m in methods]))
+ print(prettyType("amqp_method()",
+ ["{%s, %s}" % (m.klass.index, m.index) for m in methods],
+ 6))
+ print(prettyType("amqp_method_record()",
+ ["#%s{}" % (m.erlangName()) for m in methods]))
+ fieldNames = set()
+ for m in methods:
+ fieldNames.update([erlangize(f.name) for f in m.arguments])
+ fieldNames = [f for f in fieldNames]
+ fieldNames.sort()
+ print(prettyType("amqp_method_field_name()",
+ fieldNames))
+ print(prettyType("amqp_property_record()",
+ ["#'P_%s'{}" % erlangize(c.name) for c in spec.allClasses()]))
+ print(prettyType("amqp_exception()",
+ ["'%s'" % erlangConstantName(c).lower() for (c, v, cls) in spec.constants]))
+ print(prettyType("amqp_exception_code()",
+ ["%i" % v for (c, v, cls) in spec.constants]))
+ classIds = set()
+ for m in spec.allMethods():
+ classIds.add(m.klass.index)
+ print(prettyType("amqp_class_id()",
+ ["%i" % ci for ci in classIds]))
+ print(prettyType("amqp_class_name()",
+ ["%s" % c.erlangName() for c in spec.allClasses()]))
+
+ print("""
+%% Method signatures
+-spec version() -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}.
+-spec lookup_method_name(amqp_method()) -> amqp_method_name().
+-spec lookup_class_name(amqp_class_id()) -> amqp_class_name().
+-spec method_id(amqp_method_name()) -> amqp_method().
+-spec method_has_content(amqp_method_name()) -> boolean().
+-spec is_method_synchronous(amqp_method_record()) -> boolean().
+-spec method_record(amqp_method_name()) -> amqp_method_record().
+-spec method_fieldnames(amqp_method_name()) -> [amqp_method_field_name()].
+-spec decode_method_fields(amqp_method_name(), binary()) ->
+ amqp_method_record() | rabbit_types:connection_exit().
+-spec decode_properties(non_neg_integer(), binary()) -> amqp_property_record().
+-spec encode_method_fields(amqp_method_record()) -> binary().
+-spec encode_properties(amqp_property_record()) -> binary().
+-spec lookup_amqp_exception(amqp_exception()) ->
+ {boolean(), amqp_exception_code(), binary()}.
+-spec amqp_exception(amqp_exception_code()) -> amqp_exception().
+
+bitvalue(true) -> 1;
+bitvalue(false) -> 0;
+bitvalue(undefined) -> 0.
+
+shortstr_size(S) ->
+ case size(S) of
+ Len when Len =< 255 -> Len;
+ _ -> exit(method_field_shortstr_overflow)
+ end.
+
+-define(SHORTSTR_VAL(R, L, V, X),
+ begin
+ <<L:8/unsigned, V:L/binary, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONGSTR_VAL(R, L, V, X),
+ begin
+ <<L:32/unsigned, V:L/binary, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(SHORT_VAL(R, L, V, X),
+ begin
+ <<V:8/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONG_VAL(R, L, V, X),
+ begin
+ <<V:32/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONGLONG_VAL(R, L, V, X),
+ begin
+ <<V:64/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(OCTET_VAL(R, L, V, X),
+ begin
+ <<V:8/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(TABLE_VAL(R, L, V, X),
+ begin
+ <<L:32/unsigned, V:L/binary, X/binary>> = R,
+ {rabbit_binary_parser:parse_table(V), X}
+ end).
+
+-define(TIMESTAMP_VAL(R, L, V, X),
+ begin
+ <<V:64/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(SHORTSTR_PROP(X, L),
+ begin
+ L = size(X),
+ if L < 256 -> <<L:8, X:L/binary>>;
+ true -> exit(content_properties_shortstr_overflow)
+ end
+ end).
+
+-define(LONGSTR_PROP(X, L),
+ begin
+ L = size(X),
+ <<L:32, X:L/binary>>
+ end).
+
+-define(OCTET_PROP(X, L), <<X:8/unsigned>>).
+-define(SHORT_PROP(X, L), <<X:16/unsigned>>).
+-define(LONG_PROP(X, L), <<X:32/unsigned>>).
+-define(LONGLONG_PROP(X, L), <<X:64/unsigned>>).
+-define(TIMESTAMP_PROP(X, L), <<X:64/unsigned>>).
+
+-define(TABLE_PROP(X, T),
+ begin
+ T = rabbit_binary_generator:generate_table(X),
+ <<(size(T)):32, T/binary>>
+ end).
+""")
+ version = "{%d, %d, %d}" % (spec.major, spec.minor, spec.revision)
+ if version == '{8, 0, 0}': version = '{0, 8, 0}'
+ print("version() -> %s." % (version))
+
+ for m in methods: genLookupMethodName(m)
+ print("lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id}).")
+
+ for c in spec.allClasses(): genLookupClassName(c)
+ print("lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId}).")
+
+ for m in methods: genMethodId(m)
+ print("method_id(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodHasContent(m)
+ print("method_has_content(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodIsSynchronous(m)
+ print("is_method_synchronous(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodRecord(m)
+ print("method_record(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodFieldNames(m)
+ print("method_fieldnames(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genDecodeMethodFields(m)
+ print("decode_method_fields(Name, BinaryFields) ->")
+ print(" rabbit_misc:frame_error(Name, BinaryFields).")
+
+ for c in spec.allClasses(): genDecodeProperties(c)
+ print("decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId}).")
+
+ for m in methods: genEncodeMethodFields(m)
+ print("encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)}).")
+
+ for c in spec.allClasses(): genEncodeProperties(c)
+ print("encode_properties(Record) -> exit({unknown_properties_record, Record}).")
+
+ for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
+ print("lookup_amqp_exception(Code) ->")
+ print(" rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),")
+ print(" {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}.")
+
+ for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)
+ print("amqp_exception(_Code) -> undefined.")
+
+def genHrl(spec):
+ def fieldNameList(fields):
+ return ', '.join([erlangize(f.name) for f in fields])
+
+ def fieldNameListDefaults(fields):
+ def fillField(field):
+ result = erlangize(field.name)
+ if field.defaultvalue != None:
+ conv_fn = erlangDefaultValueTypeConvMap[type(field.defaultvalue)]
+ result += ' = ' + conv_fn(field.defaultvalue)
+ return result
+ return ', '.join([fillField(f) for f in fields])
+
+ methods = spec.allMethods()
+
+ printFileHeader()
+ print("-define(PROTOCOL_PORT, %d)." % (spec.port))
+
+ for (c,v,cls) in spec.constants:
+ print("-define(%s, %s)." % (erlangConstantName(c), v))
+
+ print("%% Method field records.")
+ for m in methods:
+ print("-record(%s, {%s})." % (m.erlangName(), fieldNameListDefaults(m.arguments)))
+
+ print("%% Class property records.")
+ for c in spec.allClasses():
+ print("-record('P_%s', {%s})." % (erlangize(c.name), fieldNameList(c.fields)))
+
+
+def generateErl(specPath):
+ genErl(AmqpSpec(specPath))
+
+def generateHrl(specPath):
+ genHrl(AmqpSpec(specPath))
+
+if __name__ == "__main__":
+ do_main_dict({"header": generateHrl,
+ "body": generateErl})
+
diff --git a/deps/rabbit_common/development.post.mk b/deps/rabbit_common/development.post.mk
new file mode 100644
index 0000000000..65708dbcd7
--- /dev/null
+++ b/deps/rabbit_common/development.post.mk
@@ -0,0 +1,33 @@
+# --------------------------------------------------------------------
+# Framing sources generation.
+# --------------------------------------------------------------------
+
+PYTHON ?= python
+CODEGEN = $(CURDIR)/codegen.py
+CODEGEN_DIR ?= $(DEPS_DIR)/rabbitmq_codegen
+CODEGEN_AMQP = $(CODEGEN_DIR)/amqp_codegen.py
+
+AMQP_SPEC_JSON_FILES_0_8 = $(CODEGEN_DIR)/amqp-rabbitmq-0.8.json
+AMQP_SPEC_JSON_FILES_0_9_1 = $(CODEGEN_DIR)/amqp-rabbitmq-0.9.1.json \
+ $(CODEGEN_DIR)/credit_extension.json
+
+include/rabbit_framing.hrl:: $(CODEGEN) $(CODEGEN_AMQP) \
+ $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) --ignore-conflicts header \
+ $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@
+
+src/rabbit_framing_amqp_0_9_1.erl:: $(CODEGEN) $(CODEGEN_AMQP) \
+ $(AMQP_SPEC_JSON_FILES_0_9_1)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) body $(AMQP_SPEC_JSON_FILES_0_9_1) $@
+
+src/rabbit_framing_amqp_0_8.erl:: $(CODEGEN) $(CODEGEN_AMQP) \
+ $(AMQP_SPEC_JSON_FILES_0_8)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) body $(AMQP_SPEC_JSON_FILES_0_8) $@
+
+clean:: clean-extra-sources
+
+clean-extra-sources:
+ $(gen_verbose) rm -f $(EXTRA_SOURCES)
diff --git a/deps/rabbit_common/development.pre.mk b/deps/rabbit_common/development.pre.mk
new file mode 100644
index 0000000000..0b11877df3
--- /dev/null
+++ b/deps/rabbit_common/development.pre.mk
@@ -0,0 +1,14 @@
+# Variables and recipes in development.*.mk are meant to be used from
+# any Git clone. They are excluded from the files published to Hex.pm.
+# Generated files are published to Hex.pm however so people using this
+# source won't have to depend on Python and rabbitmq-codegen.
+
+BUILD_DEPS = rabbitmq_codegen
+TEST_DEPS = proper
+
+EXTRA_SOURCES += include/rabbit_framing.hrl \
+ src/rabbit_framing_amqp_0_8.erl \
+ src/rabbit_framing_amqp_0_9_1.erl
+
+.DEFAULT_GOAL = all
+$(PROJECT).d:: $(EXTRA_SOURCES)
diff --git a/deps/rabbit_common/erlang.mk b/deps/rabbit_common/erlang.mk
new file mode 100644
index 0000000000..defddc4865
--- /dev/null
+++ b/deps/rabbit_common/erlang.mk
@@ -0,0 +1,7746 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT = plugins/proper
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbit_common/include/rabbit.hrl b/deps/rabbit_common/include/rabbit.hrl
new file mode 100644
index 0000000000..707f8099e0
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit.hrl
@@ -0,0 +1,267 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("resource.hrl").
+
+%% Passed around most places
+-record(user, {username,
+ tags,
+ authz_backends}). %% List of {Module, AuthUserImpl} pairs
+
+%% Passed to auth backends
+-record(auth_user, {username,
+ tags,
+ impl}).
+
+-record(permission, {configure, write, read}).
+-record(user_vhost, {username, virtual_host}).
+-record(user_permission, {user_vhost, permission}).
+-record(topic_permission_key, {user_vhost, exchange}).
+-record(topic_permission, {topic_permission_key, permission}).
+
+%% Client connection, used by rabbit_reader
+%% and related modules.
+-record(connection, {
+ %% e.g. <<"127.0.0.1:55054 -> 127.0.0.1:5672">>
+ name,
+ %% used for logging: same as `name`, but optionally
+ %% augmented with user-supplied name
+ log_name,
+ %% server host
+ host,
+ %% client host
+ peer_host,
+ %% server port
+ port,
+ %% client port
+ peer_port,
+ %% protocol implementation module,
+ %% e.g. rabbit_framing_amqp_0_9_1
+ protocol,
+ user,
+ %% heartbeat timeout value used, 0 means
+ %% heartbeats are disabled
+ timeout_sec,
+ %% maximum allowed frame size,
+ %% see frame_max in the AMQP 0-9-1 spec
+ frame_max,
+ %% greatest channel number allowed,
+ %% see channel_max in the AMQP 0-9-1 spec
+ channel_max,
+ vhost,
+ %% client name, version, platform, etc
+ client_properties,
+ %% what lists protocol extensions
+ %% does this client support?
+ capabilities,
+ %% authentication mechanism used
+ %% as a pair of {Name, Module}
+ auth_mechanism,
+ %% authentication mechanism state,
+ %% initialised by rabbit_auth_mechanism:init/1
+ %% implementations
+ auth_state,
+ %% time of connection
+ connected_at}).
+
+-record(content,
+ {class_id,
+ properties, %% either 'none', or a decoded record/tuple
+ properties_bin, %% either 'none', or an encoded properties binary
+ %% Note: at most one of properties and properties_bin can be
+ %% 'none' at once.
+ protocol, %% The protocol under which properties_bin was encoded
+ payload_fragments_rev %% list of binaries, in reverse order (!)
+ }).
+
+%% fields described as 'transient' here are cleared when writing to
+%% rabbit_durable_<thing>
+-record(exchange, {
+ name, type, durable, auto_delete, internal, arguments, %% immutable
+ scratches, %% durable, explicitly updated via update_scratch/3
+ policy, %% durable, implicitly updated when policy changes
+ operator_policy, %% durable, implicitly updated when policy changes
+ decorators,
+ options = #{}}). %% transient, recalculated in store/1 (i.e. recovery)
+
+-record(exchange_serial, {name, next}).
+
+%% mnesia doesn't like unary records, so we add a dummy 'value' field
+-record(route, {binding, value = const}).
+-record(reverse_route, {reverse_binding, value = const}).
+
+-record(binding, {source, key, destination, args = []}).
+-record(reverse_binding, {destination, key, source, args = []}).
+
+-record(topic_trie_node, {trie_node, edge_count, binding_count}).
+-record(topic_trie_edge, {trie_edge, node_id}).
+-record(topic_trie_binding, {trie_binding, value = const}).
+
+-record(trie_node, {exchange_name, node_id}).
+-record(trie_edge, {exchange_name, node_id, word}).
+-record(trie_binding, {exchange_name, node_id, destination, arguments}).
+
+-record(listener, {node, protocol, host, ip_address, port, opts = []}).
+
+-record(runtime_parameters, {key, value}).
+
+-record(basic_message,
+ {exchange_name, %% The exchange where the message was received
+ routing_keys = [], %% Routing keys used during publish
+ content, %% The message content
+ id, %% A `rabbit_guid:gen()` generated id
+ is_persistent}). %% Whether the message was published as persistent
+
+-record(delivery,
+ {mandatory, %% Whether the message was published as mandatory
+ confirm, %% Whether the message needs confirming
+ sender, %% The pid of the process that created the delivery
+ message, %% The #basic_message record
+ msg_seq_no, %% Msg Sequence Number from the channel publish_seqno field
+ flow}). %% Should flow control be used for this delivery
+
+-record(amqp_error, {name, explanation = "", method = none}).
+
+-record(event, {type, props, reference = undefined, timestamp}).
+
+-record(message_properties, {expiry, needs_confirming = false, size}).
+
+-record(plugin, {name, %% atom()
+ version, %% string()
+ description, %% string()
+ type, %% 'ez' or 'dir'
+ dependencies, %% [atom()]
+ location, %% string()
+ %% List of supported broker version ranges,
+ %% e.g. ["3.5.7", "3.6.1"]
+ broker_version_requirements, %% [string()]
+ %% Proplist of supported dependency versions,
+ %% e.g. [{rabbitmq_management, ["3.5.7", "3.6.1"]},
+ %% {rabbitmq_federation, ["3.5.7", "3.6.1"]},
+ %% {rabbitmq_email, ["0.1.0"]}]
+ dependency_version_requirements, %% [{atom(), [string()]}]
+ extra_dependencies %% string()
+ }).
+
+%% used to track connections across virtual hosts
+%% so that limits can be enforced
+-record(tracked_connection_per_vhost, {
+ vhost,
+ connection_count}).
+
+%% Used to track connections per user
+%% so that limits can be enforced
+-record(tracked_connection_per_user, {
+ user,
+ connection_count
+ }).
+
+%% Used to track detailed information
+%% about connections.
+-record(tracked_connection, {
+ %% {Node, ConnectionName}
+ id,
+ node,
+ vhost,
+ name,
+ pid,
+ protocol,
+ %% network or direct
+ type,
+ %% client host
+ peer_host,
+ %% client port
+ peer_port,
+ username,
+ %% time of connection
+ connected_at
+ }).
+
+%% Used to track channels per user
+%% so that limits can be enforced
+-record(tracked_channel_per_user, {
+ user,
+ channel_count
+ }).
+
+%% Used to track detailed information
+%% about channels.
+-record(tracked_channel, {
+ %% {Node, ChannelName}
+ id,
+ node,
+ vhost,
+ name,
+ pid,
+ username,
+ connection}).
+
+%% Indicates maintenance state of a node
+-record(node_maintenance_state, {
+ node,
+ status = regular,
+ context = #{}
+ }).
+%%----------------------------------------------------------------------------
+
+-define(COPYRIGHT_MESSAGE, "Copyright (c) 2007-2020 VMware, Inc. or its affiliates.").
+-define(INFORMATION_MESSAGE, "Licensed under the MPL 2.0. Website: https://rabbitmq.com").
+
+%% EMPTY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1
+%% - 1 byte of frame type
+%% - 2 bytes of channel number
+%% - 4 bytes of frame payload length
+%% - 1 byte of payload trailer FRAME_END byte
+%% See rabbit_binary_generator:check_empty_frame_size/0, an assertion
+%% called at startup.
+-define(EMPTY_FRAME_SIZE, 8).
+
+-define(MAX_WAIT, 16#ffffffff).
+-define(SUPERVISOR_WAIT,
+ rabbit_misc:get_env(rabbit, supervisor_shutdown_timeout, infinity)).
+-define(WORKER_WAIT,
+ rabbit_misc:get_env(rabbit, worker_shutdown_timeout, 30000)).
+-define(MSG_STORE_WORKER_WAIT,
+ rabbit_misc:get_env(rabbit, msg_store_shutdown_timeout, 600000)).
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+-define(CREDIT_DISC_BOUND, {4000, 800}).
+%% When we discover that we should write some indices to disk for some
+%% betas, the IO_BATCH_SIZE sets the number of betas that we must be
+%% due to write indices for before we do any work at all.
+-define(IO_BATCH_SIZE, 4096). %% next power-of-2 after ?CREDIT_DISC_BOUND
+
+-define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
+-define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
+-define(DELETED_HEADER, <<"BCC">>).
+
+-define(EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, <<"exchange-delete-in-progress">>).
+
+-define(CHANNEL_OPERATION_TIMEOUT, rabbit_misc:get_channel_operation_timeout()).
+
+%% Max supported number of priorities for a priority queue.
+-define(MAX_SUPPORTED_PRIORITY, 255).
+
+%% Max message size is hard limited to 512 MiB.
+%% If user configures a greater rabbit.max_message_size,
+%% this value is used instead.
+-define(MAX_MSG_SIZE, 536870912).
+
+-define(store_proc_name(N), rabbit_misc:store_proc_name(?MODULE, N)).
+
+%% For event audit purposes
+-define(INTERNAL_USER, <<"rmq-internal">>).
+-define(UNKNOWN_USER, <<"unknown">>).
+
+%% Store metadata in the trace files when message tracing is enabled.
+-define(LG_INFO(Info), is_pid(whereis(lg)) andalso (lg ! Info)).
+-define(LG_PROCESS_TYPE(Type), ?LG_INFO(#{process_type => Type})).
+
+%% Execution timeout of connection and channel tracking operations
+-define(TRACKING_EXECUTION_TIMEOUT,
+ rabbit_misc:get_env(rabbit, tracking_execution_timeout, 5000)).
diff --git a/deps/rabbit_common/include/rabbit_core_metrics.hrl b/deps/rabbit_common/include/rabbit_core_metrics.hrl
new file mode 100644
index 0000000000..17ffa2535b
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_core_metrics.hrl
@@ -0,0 +1,52 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% These tables contain the raw metrics as stored by RabbitMQ core
+-define(CORE_TABLES, [{connection_created, set},
+ {connection_metrics, set},
+ {connection_coarse_metrics, set},
+ {channel_created, set},
+ {channel_metrics, set},
+ {channel_queue_exchange_metrics, set},
+ {channel_queue_metrics, set},
+ {channel_exchange_metrics, set},
+ {channel_process_metrics, set},
+ {consumer_created, set},
+ {queue_metrics, set},
+ {queue_coarse_metrics, set},
+ {node_persister_metrics, set},
+ {node_coarse_metrics, set},
+ {node_metrics, set},
+ {node_node_metrics, set},
+ {connection_churn_metrics, set}]).
+
+-define(CORE_EXTRA_TABLES, [{gen_server2_metrics, set},
+ {auth_attempt_metrics, set},
+ {auth_attempt_detailed_metrics, set}]).
+
+-define(CONNECTION_CHURN_METRICS, {node(), 0, 0, 0, 0, 0, 0, 0}).
+
+%% connection_created :: {connection_id, proplist}
+%% connection_metrics :: {connection_id, proplist}
+%% connection_coarse_metrics :: {connection_id, recv_oct, send_oct, reductions}
+%% channel_created :: {channel_id, proplist}
+%% channel_metrics :: {channel_id, proplist}
+%% channel_queue_exchange_metrics :: {{channel_id, {queue_id, exchange_id}}, publish}
+%% channel_queue_metrics :: {{channel_id, queue_id}, proplist}
+%% channel_exchange_metrics :: {{channel_id, exchange_id}, proplist}
+%% channel_process_metrics :: {channel_id, reductions}
+%% consumer_created :: {{queue_id, channel_id, consumer_tag}, exclusive_consume,
+%% ack_required, prefetch_count, args}
+%% queue_metrics :: {queue_id, proplist}
+%% queue_coarse_metrics :: {queue_id, messages_ready, messages_unacknowledge,
+%% messages, reductions}
+%% node_persister_metrics :: {node_id, proplist}
+%% node_coarse_metrics :: {node_id, proplist}
+%% node_metrics :: {node_id, proplist}
+%% node_node_metrics :: {{node_id, node_id}, proplist}
+%% gen_server2_metrics :: {pid, buffer_length}
+%% connection_churn_metrics :: {node(), connection_created, connection_closed, channel_created, channel_closed, queue_declared, queue_created, queue_deleted}
diff --git a/deps/rabbit_common/include/rabbit_log.hrl b/deps/rabbit_common/include/rabbit_log.hrl
new file mode 100644
index 0000000000..9ce908e997
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_log.hrl
@@ -0,0 +1,8 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(LAGER_SINK, rabbit_log_lager_event).
diff --git a/deps/rabbit_common/include/rabbit_memory.hrl b/deps/rabbit_common/include/rabbit_memory.hrl
new file mode 100644
index 0000000000..c9991550fb
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_memory.hrl
@@ -0,0 +1,16 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(DEFAULT_MEMORY_CHECK_INTERVAL, 1000).
+-define(ONE_MiB, 1048576).
+
+%% For an unknown OS, we assume that we have 1GB of memory. It'll be
+%% wrong. Scale by vm_memory_high_watermark in configuration to get a
+%% sensible value.
+-define(MEMORY_SIZE_FOR_UNKNOWN_OS, 1073741824).
+-define(DEFAULT_VM_MEMORY_HIGH_WATERMARK, 0.4).
+-define(MAX_VM_MEMORY_HIGH_WATERMARK, 1.0).
diff --git a/deps/rabbit_common/include/rabbit_misc.hrl b/deps/rabbit_common/include/rabbit_misc.hrl
new file mode 100644
index 0000000000..98d4051a27
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_misc.hrl
@@ -0,0 +1,9 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(RPC_TIMEOUT, 15000).
+-define(RPC_INFINITE_TIMEOUT, infinity).
diff --git a/deps/rabbit_common/include/rabbit_msg_store.hrl b/deps/rabbit_common/include/rabbit_msg_store.hrl
new file mode 100644
index 0000000000..9d184ae153
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_msg_store.hrl
@@ -0,0 +1,12 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("rabbit.hrl").
+
+-type(msg() :: any()).
+
+-record(msg_location, {msg_id, ref_count, file, offset, total_size}).
diff --git a/deps/rabbit_common/include/resource.hrl b/deps/rabbit_common/include/resource.hrl
new file mode 100644
index 0000000000..5b2697f4d0
--- /dev/null
+++ b/deps/rabbit_common/include/resource.hrl
@@ -0,0 +1,14 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(resource, {
+ virtual_host,
+ %% exchange, queue, ...
+ kind,
+ %% name as a binary
+ name
+}).
diff --git a/deps/rabbit_common/mk/rabbitmq-build.mk b/deps/rabbit_common/mk/rabbitmq-build.mk
new file mode 100644
index 0000000000..2fedcf629b
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-build.mk
@@ -0,0 +1,42 @@
+# --------------------------------------------------------------------
+# Compiler flags.
+# --------------------------------------------------------------------
+
+ifeq ($(filter rabbitmq-macros.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-macros.mk
+endif
+
+# NOTE: This plugin is loaded twice because Erlang.mk recurses. That's
+# why ERL_LIBS may contain twice the path to Elixir libraries or
+# ERLC_OPTS may contain duplicated flags.
+
+TEST_ERLC_OPTS += +nowarn_export_all
+
+ifneq ($(filter-out rabbit_common amqp_client,$(PROJECT)),)
+# Add the CLI ebin directory to the code path for the compiler: plugin
+# CLI extensions may access behaviour modules defined in this directory.
+RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin
+endif
+
+# Add Lager parse_transform module and our default Lager extra sinks.
+LAGER_EXTRA_SINKS += rabbit_log \
+ rabbit_log_channel \
+ rabbit_log_connection \
+ rabbit_log_feature_flags \
+ rabbit_log_federation \
+ rabbit_log_ldap \
+ rabbit_log_mirroring \
+ rabbit_log_osiris \
+ rabbit_log_prelaunch \
+ rabbit_log_queue \
+ rabbit_log_ra \
+ rabbit_log_shovel \
+ rabbit_log_upgrade
+lager_extra_sinks = $(subst $(space),$(comma),$(LAGER_EXTRA_SINKS))
+
+RMQ_ERLC_OPTS += +'{parse_transform,lager_transform}' \
+ +'{lager_extra_sinks,[$(lager_extra_sinks)]}'
+
+# Push our compilation options to both the normal and test ERLC_OPTS.
+ERLC_OPTS += $(RMQ_ERLC_OPTS)
+TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)
diff --git a/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk
new file mode 100644
index 0000000000..4b110176a7
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk
@@ -0,0 +1,36 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+dep_amqp_client = hex $(PROJECT_VERSION)
+dep_rabbit_common = hex $(PROJECT_VERSION)
+
+# Third-party dependencies version pinning.
diff --git a/deps/rabbit_common/mk/rabbitmq-components.mk b/deps/rabbit_common/mk/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbit_common/mk/rabbitmq-dist.mk b/deps/rabbit_common/mk/rabbitmq-dist.mk
new file mode 100644
index 0000000000..3e17a27939
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-dist.mk
@@ -0,0 +1,365 @@
+.PHONY: dist test-dist do-dist cli-scripts cli-escripts clean-dist
+
+DIST_DIR = plugins
+CLI_SCRIPTS_DIR = sbin
+CLI_ESCRIPTS_DIR = escript
+MIX = echo y | mix
+
+# Set $(DIST_AS_EZS) to a non-empty value to enable the packaging of
+# plugins as .ez archives.
+ifeq ($(USE_RABBIT_BOOT_SCRIPT),)
+DIST_AS_EZS ?=
+else
+DIST_AS_EZS =
+endif
+
+dist_verbose_0 = @echo " DIST " $@;
+dist_verbose_2 = set -x;
+dist_verbose = $(dist_verbose_$(V))
+
+MIX_ARCHIVES ?= $(HOME)/.mix/archives
+
+MIX_TASK_ARCHIVE_DEPS_VERSION = 0.5.0
+mix_task_archive_deps = $(MIX_ARCHIVES)/mix_task_archive_deps-$(MIX_TASK_ARCHIVE_DEPS_VERSION)
+
+# We take the version of an Erlang application from the .app file. This
+# macro is called like this:
+#
+# $(call get_app_version,/path/to/name.app.src)
+
+ifeq ($(PLATFORM),msys2)
+core_unix_path = $(shell cygpath $1)
+else
+core_unix_path = $1
+endif
+
+define get_app_version
+$(shell awk '
+/{ *vsn *, *"/ {
+ vsn=$$0;
+ sub(/.*{ *vsn, *"/, "", vsn);
+ sub(/".*/, "", vsn);
+ print vsn;
+ exit;
+}' $(1))
+endef
+
+define get_mix_project_version
+$(shell cd $(1) && \
+ $(MIX) do deps.get, deps.compile, compile >/dev/null && \
+ $(MIX) run --no-start -e "IO.puts(Mix.Project.config[:version])")
+endef
+
+# Define the target to create an .ez plugin archive for an
+# Erlang.mk-based project. This macro is called like this:
+#
+# $(call do_ez_target_erlangmk,app_name,app_version,app_dir)
+
+define do_ez_target_erlangmk
+dist_$(1)_ez_dir = $$(if $(2),$(DIST_DIR)/$(1)-$(2), \
+ $$(if $$(VERSION),$(DIST_DIR)/$(1)-$$(VERSION),$(DIST_DIR)/$(1)))
+ifeq ($(DIST_AS_EZS),)
+dist_$(1)_ez = $$(dist_$(1)_ez_dir)
+else
+dist_$(1)_ez = $$(dist_$(1)_ez_dir).ez
+endif
+
+$$(dist_$(1)_ez): APP = $(1)
+$$(dist_$(1)_ez): VSN = $(2)
+$$(dist_$(1)_ez): SRC_DIR = $(3)
+$$(dist_$(1)_ez): EZ_DIR = $$(abspath $$(dist_$(1)_ez_dir))
+$$(dist_$(1)_ez): EZ = $$(dist_$(1)_ez)
+$$(dist_$(1)_ez): $$(if $$(wildcard $(3)/ebin $(3)/include $(3)/priv),\
+ $$(filter-out %/dep_built %/ebin/test,$$(call core_find,$$(wildcard $(3)/ebin $(3)/include $(3)/priv),*)),)
+
+# If the application's Makefile defines a `list-dist-deps` target, we
+# use it to populate the dependencies list. This is useful when the
+# application has also a `prepare-dist` target to modify the created
+# tree before we make an archive out of it.
+
+ifeq ($$(shell test -f $(3)/rabbitmq-components.mk \
+ && grep -q '^list-dist-deps::' $(3)/Makefile && echo yes),yes)
+$$(dist_$(1)_ez): $$(patsubst %,$(3)/%, \
+ $$(shell $(MAKE) --no-print-directory -C $(3) list-dist-deps \
+ APP=$(1) VSN=$(2) EZ_DIR=$$(abspath $$(dist_$(1)_ez_dir))))
+endif
+
+ERLANGMK_DIST_APPS += $(1)
+
+ERLANGMK_DIST_EZS += $$(dist_$(1)_ez)
+
+endef
+
+# Define the target to create an .ez plugin archive for a Mix-based
+# project. This macro is called like this:
+#
+# $(call do_ez_target_mix,app_name,app_version,app_dir)
+
+define get_mix_project_dep_ezs
+$(shell cd $(1) && \
+ $(MIX) do deps.get, deps.compile, compile >/dev/null && \
+ $(MIX) archive.build.all.list -e -o $(DIST_DIR) --skip "rabbit $(ERLANGMK_DIST_APPS)")
+endef
+
+define do_ez_target_mix
+dist_$(1)_ez_dir = $$(if $(2),$(DIST_DIR)/$(1)-$(2), \
+ $$(if $$(VERSION),$(DIST_DIR)/$(1)-$$(VERSION),$(DIST_DIR)/$(1)))
+dist_$(1)_ez = $$(dist_$(1)_ez_dir).ez
+
+$$(dist_$(1)_ez): APP = $(1)
+$$(dist_$(1)_ez): VSN = $(2)
+$$(dist_$(1)_ez): SRC_DIR = $(3)
+$$(dist_$(1)_ez): EZ_DIR = $$(abspath $$(dist_$(1)_ez_dir))
+$$(dist_$(1)_ez): EZ = $$(dist_$(1)_ez)
+$$(dist_$(1)_ez): $$(if $$(wildcard _build/dev/lib/$(1)/ebin $(3)/priv),\
+ $$(filter-out %/dep_built,$$(call core_find,$$(wildcard _build/dev/lib/$(1)/ebin $(3)/priv),*)),)
+
+MIX_DIST_EZS += $$(dist_$(1)_ez)
+EXTRA_DIST_EZS += $$(call get_mix_project_dep_ezs,$(3))
+
+endef
+
+# Real entry point: it tests the existence of an .app file to determine
+# if it is an Erlang application (and therefore if it should be provided
+# as an .ez plugin archive) and calls do_ez_target_erlangmk. If instead
+# it finds a Mix configuration file, it calls do_ez_target_mix. It
+# should be called as:
+#
+# $(call ez_target,path_to_app)
+
+define ez_target
+dist_$(1)_appdir = $(2)
+dist_$(1)_appfile = $$(dist_$(1)_appdir)/ebin/$(1).app
+dist_$(1)_mixfile = $$(dist_$(1)_appdir)/mix.exs
+
+$$(if $$(shell test -f $$(dist_$(1)_appfile) && echo OK), \
+ $$(eval $$(call do_ez_target_erlangmk,$(1),$$(call get_app_version,$$(dist_$(1)_appfile)),$$(dist_$(1)_appdir))), \
+ $$(if $$(shell test -f $$(dist_$(1)_mixfile) && [ "x$(1)" != "xrabbitmqctl" ] && [ "x$(1)" != "xrabbitmq_cli" ] && echo OK), \
+ $$(eval $$(call do_ez_target_mix,$(1),$$(call get_mix_project_version,$$(dist_$(1)_appdir)),$$(dist_$(1)_appdir)))))
+
+endef
+
+ifneq ($(filter do-dist,$(MAKECMDGOALS)),)
+# The following code is evaluated only when running "make do-dist",
+# otherwise it would trigger an infinite loop, as this code calls "make
+# list-dist-deps" (see do_ez_target_erlangmk).
+ifdef DIST_PLUGINS_LIST
+# Now, try to create an .ez target for the top-level project and all
+# dependencies.
+
+ifeq ($(wildcard $(DIST_PLUGINS_LIST)),)
+$(error DIST_PLUGINS_LIST ($(DIST_PLUGINS_LIST)) is missing)
+endif
+
+$(eval $(foreach path, \
+ $(filter-out %/looking_glass %/lz4, \
+ $(sort $(shell cat $(DIST_PLUGINS_LIST))) $(CURDIR)), \
+ $(call ez_target,$(if $(filter $(path),$(CURDIR)),$(PROJECT),$(notdir $(path))),$(path))))
+endif
+endif
+
+# The actual recipe to create the .ez plugin archive. Some variables
+# are defined in the do_ez_target_erlangmk and do_ez_target_mix macros
+# above. All .ez archives are also listed in this do_ez_target_erlangmk
+# and do_ez_target_mix macros.
+
+RSYNC ?= rsync
+RSYNC_V_0 =
+RSYNC_V_1 = -v
+RSYNC_V = $(RSYNC_V_$(V))
+
+ZIP ?= zip
+ZIP_V_0 = -q
+ZIP_V_1 =
+ZIP_V = $(ZIP_V_$(V))
+
+$(ERLANGMK_DIST_EZS):
+ $(verbose) rm -rf $(EZ_DIR) $(EZ)
+ $(verbose) mkdir -p $(EZ_DIR)
+ $(dist_verbose) $(RSYNC) -a $(RSYNC_V) \
+ --exclude '/ebin/dep_built' \
+ --exclude '/ebin/test' \
+ --include '/ebin/***' \
+ --include '/include/***' \
+ --include '/priv/***' \
+ --exclude '*' \
+ $(call core_unix_path,$(SRC_DIR))/ $(call core_unix_path,$(EZ_DIR))/
+ @# Give a chance to the application to make any modification it
+ @# wants to the tree before we make an archive.
+ $(verbose) ! (test -f $(SRC_DIR)/rabbitmq-components.mk \
+ && grep -q '^prepare-dist::' $(SRC_DIR)/Makefile) || \
+ $(MAKE) --no-print-directory -C $(SRC_DIR) prepare-dist \
+ APP=$(APP) VSN=$(VSN) EZ_DIR=$(EZ_DIR)
+ifneq ($(DIST_AS_EZS),)
+ $(verbose) (cd $(DIST_DIR) && \
+ find "$(basename $(notdir $@))" | LC_COLLATE=C sort \
+ > "$(basename $(notdir $@)).manifest" && \
+ $(ZIP) $(ZIP_V) --names-stdin "$(notdir $@)" \
+ < "$(basename $(notdir $@)).manifest")
+ $(verbose) rm -rf $(EZ_DIR) $(EZ_DIR).manifest
+endif
+
+$(MIX_DIST_EZS): $(mix_task_archive_deps)
+ $(verbose) cd $(SRC_DIR) && \
+ $(MIX) do deps.get, deps.compile, compile, archive.build.all \
+ -e -o $(abspath $(DIST_DIR)) --skip "rabbit $(ERLANGMK_DIST_APPS)"
+
+MIX_TASK_ARCHIVE_DEPS_URL = https://github.com/rabbitmq/mix_task_archive_deps/releases/download/$(MIX_TASK_ARCHIVE_DEPS_VERSION)/mix_task_archive_deps-$(MIX_TASK_ARCHIVE_DEPS_VERSION).ez
+
+$(mix_task_archive_deps):
+ $(gen_verbose) mix archive.install --force $(MIX_TASK_ARCHIVE_DEPS_URL)
+
+# We need to recurse because the top-level make instance is evaluated
+# before dependencies are downloaded.
+
+MAYBE_APPS_LIST = $(if $(shell test -f $(ERLANG_MK_TMP)/apps.log && echo OK), \
+ $(ERLANG_MK_TMP)/apps.log)
+DIST_LOCK = $(DIST_DIR).lock
+
+dist:: $(ERLANG_MK_RECURSIVE_DEPS_LIST) all
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ else \
+ $(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"; \
+ fi
+
+test-dist:: export TEST_DIR=NON-EXISTENT
+test-dist:: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) test-build
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ else \
+ $(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"; \
+ fi
+
+DIST_EZS = $(ERLANGMK_DIST_EZS) $(MIX_DIST_EZS)
+
+do-dist:: $(DIST_EZS)
+ $(verbose) unwanted='$(filter-out $(DIST_EZS) $(EXTRA_DIST_EZS), \
+ $(wildcard $(DIST_DIR)/*))'; \
+ test -z "$$unwanted" || (echo " RM $$unwanted" && rm -rf $$unwanted)
+
+CLI_SCRIPTS_LOCK = $(CLI_SCRIPTS_DIR).lock
+CLI_ESCRIPTS_LOCK = $(CLI_ESCRIPTS_DIR).lock
+
+ifneq ($(filter-out rabbit_common amqp10_common,$(PROJECT)),)
+dist:: install-cli
+test-build:: install-cli
+endif
+
+install-cli: install-cli-scripts install-cli-escripts
+ @:
+
+ifeq ($(PROJECT),rabbit)
+install-cli-scripts:
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(CLI_SCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in scripts/*; do \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(CLI_SCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in scripts/*; do \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ else \
+ mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in scripts/*; do \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done; \
+ fi
+else
+
+install-cli-scripts:
+ $(gen_verbose) \
+ set -e; \
+ if test -d "$(DEPS_DIR)/rabbit/scripts"; then \
+ rabbit_scripts_dir='$(DEPS_DIR)/rabbit/scripts'; \
+ elif test -d "$(DEPS_DIR)/../scripts"; then \
+ rabbit_scripts_dir='$(DEPS_DIR)/../scripts'; \
+ else \
+ echo 'rabbit/scripts directory not found' 1>&2; \
+ exit 1; \
+ fi; \
+ test -d "$$rabbit_scripts_dir"; \
+ if command -v flock >/dev/null; then \
+ flock $(CLI_SCRIPTS_LOCK) \
+ sh -e -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in "'$$rabbit_scripts_dir'"/*; do \
+ test -f "$$file"; \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(CLI_SCRIPTS_LOCK) \
+ sh -e -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in "'$$rabbit_scripts_dir'"/*; do \
+ test -f "$$file"; \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ else \
+ mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in "$$rabbit_scripts_dir"/*; do \
+ test -f "$$file"; \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done; \
+ fi
+endif
+
+install-cli-escripts:
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(CLI_ESCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_ESCRIPTS_DIR)" && \
+ $(MAKE) -C "$(DEPS_DIR)/rabbitmq_cli" install \
+ PREFIX="$(abspath $(CLI_ESCRIPTS_DIR))" \
+ DESTDIR='; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(CLI_ESCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_ESCRIPTS_DIR)" && \
+ $(MAKE) -C "$(DEPS_DIR)/rabbitmq_cli" install \
+ PREFIX="$(abspath $(CLI_ESCRIPTS_DIR))" \
+ DESTDIR='; \
+ else \
+ mkdir -p "$(CLI_ESCRIPTS_DIR)" && \
+ $(MAKE) -C "$(DEPS_DIR)/rabbitmq_cli" install \
+ PREFIX="$(abspath $(CLI_ESCRIPTS_DIR))" \
+ DESTDIR= ; \
+ fi
+
+clean-dist::
+ $(gen_verbose) rm -rf \
+ "$(DIST_DIR)" \
+ "$(CLI_SCRIPTS_DIR)" \
+ "$(CLI_ESCRIPTS_DIR)"
+
+clean:: clean-dist
diff --git a/deps/rabbit_common/mk/rabbitmq-early-plugin.mk b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk
new file mode 100644
index 0000000000..7b5f14b8f9
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk
@@ -0,0 +1,3 @@
+ifeq ($(filter rabbitmq-early-test.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-early-test.mk
+endif
diff --git a/deps/rabbit_common/mk/rabbitmq-early-test.mk b/deps/rabbit_common/mk/rabbitmq-early-test.mk
new file mode 100644
index 0000000000..f4f00173b3
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-early-test.mk
@@ -0,0 +1,130 @@
+# --------------------------------------------------------------------
+# xref
+# --------------------------------------------------------------------
+
+ifeq ($(filter distclean distclean-xref,$(MAKECMDGOALS)),)
+ifneq ($(PROJECT),rabbit_common)
+XREFR := $(DEPS_DIR)/rabbit_common/mk/xrefr
+else
+XREFR := mk/xrefr
+endif
+endif
+
+# --------------------------------------------------------------------
+# dialyzer
+# --------------------------------------------------------------------
+
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions
+
+# --------------------------------------------------------------------
+# %-on-concourse dependencies.
+# --------------------------------------------------------------------
+
+ifneq ($(words $(filter %-on-concourse,$(MAKECMDGOALS))),0)
+TEST_DEPS += ci $(RMQ_CI_CT_HOOKS)
+NO_AUTOPATCH += ci $(RMQ_CI_CT_HOOKS)
+dep_ci = git git@github.com:rabbitmq/rabbitmq-ci master
+endif
+
+# --------------------------------------------------------------------
+# Common Test flags.
+# --------------------------------------------------------------------
+
+# We start the common_test node as a hidden Erlang node. The benefit
+# is that other Erlang nodes won't try to connect to each other after
+# discovering the common_test node if they are not meant to.
+#
+# This helps when several unrelated RabbitMQ clusters are started in
+# parallel.
+
+CT_OPTS += -hidden
+
+# Enable the following common_test hooks on Travis and Concourse:
+#
+# cth_fail_fast
+# This hook will make sure the first failure puts an end to the
+# testsuites; ie. all remaining tests are skipped.
+#
+# cth_styledout
+# This hook will change the output of common_test to something more
+# concise and colored.
+#
+# On Jenkins, in addition to those common_test hooks, enable JUnit-like
+# report. Jenkins parses those reports so the results can be browsed
+# from its UI. Furthermore, it displays a graph showing evolution of the
+# results over time.
+
+ifndef TRAVIS
+CT_HOOKS ?= cth_styledout
+TEST_DEPS += cth_styledout
+endif
+
+ifdef TRAVIS
+FAIL_FAST = 1
+SKIP_AS_ERROR = 1
+endif
+
+ifdef CONCOURSE
+FAIL_FAST = 1
+SKIP_AS_ERROR = 1
+endif
+
+RMQ_CI_CT_HOOKS = cth_fail_fast
+ifeq ($(FAIL_FAST),1)
+CT_HOOKS += $(RMQ_CI_CT_HOOKS)
+TEST_DEPS += $(RMQ_CI_CT_HOOKS)
+endif
+
+dep_cth_fail_fast = git https://github.com/rabbitmq/cth_fail_fast.git master
+dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master
+
+CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS))
+CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE))
+
+# Disable most messages on Travis because it might exceed the limit
+# set by Travis.
+#
+# CAUTION: All arguments after -erl_args are passed to the emulator and
+# common_test doesn't interpret them! Therefore, all common_test flags
+# *MUST* appear before.
+
+CT_QUIET_FLAGS = -verbosity 50 \
+ -erl_args \
+ -kernel error_logger silent
+
+ifdef TRAVIS
+CT_OPTS += $(CT_QUIET_FLAGS)
+endif
+
+# On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped
+# testsuite/testgroup/testcase is considered an error.
+
+ifeq ($(SKIP_AS_ERROR),1)
+export RABBITMQ_CT_SKIP_AS_ERROR = true
+endif
+
+# --------------------------------------------------------------------
+# Looking Glass rules.
+# --------------------------------------------------------------------
+
+ifneq ("$(RABBITMQ_TRACER)","")
+BUILD_DEPS += looking_glass
+dep_looking_glass = git https://github.com/rabbitmq/looking-glass master
+ERL_LIBS := "$(ERL_LIBS):../looking_glass:../lz4"
+export RABBITMQ_TRACER
+endif
+
+define lg_callgrind.erl
+lg_callgrind:profile_many("traces.lz4.*", "callgrind.out", #{running => true}),
+halt().
+endef
+
+.PHONY: profile clean-profile
+
+profile:
+ $(gen_verbose) $(call erlang,$(call lg_callgrind.erl))
+
+clean:: clean-profile
+
+clean-profile:
+ $(gen_verbose) rm -f traces.lz4.* callgrind.out.*
diff --git a/deps/rabbit_common/mk/rabbitmq-hexpm.mk b/deps/rabbit_common/mk/rabbitmq-hexpm.mk
new file mode 100644
index 0000000000..24281b1321
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-hexpm.mk
@@ -0,0 +1,67 @@
+# --------------------------------------------------------------------
+# Hex.pm.
+# --------------------------------------------------------------------
+
+.PHONY: hex-publish hex-publish-docs
+
+HEXPM_URL = https://github.com/rabbitmq/hexpm-cli/releases/latest/download/hexpm
+HEXPM_CLI = $(ERLANG_MK_TMP)/hexpm
+
+$(HEXPM_CLI):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$@,$(HEXPM_URL))
+ $(verbose) chmod +x $@
+
+RABBIT_COMMON_HEXPM_VERSION = $(PROJECT_VERSION)
+AMQP_CLIENT_HEXPM_VERSION = $(PROJECT_VERSION)
+
+rebar.config: dep_rabbit_common = hex $(RABBIT_COMMON_HEXPM_VERSION)
+rebar.config: dep_amqp_client = hex $(AMQP_CLIENT_HEXPM_VERSION)
+
+define RABBITMQ_HEXPM_DEFAULT_FILES
+ "erlang.mk",
+ "git-revisions.txt",
+ "include",
+ "LICENSE*",
+ "Makefile",
+ "rabbitmq-components.mk",
+ "README",
+ "README.md",
+ "src"
+endef
+
+ifeq ($(PROJECT),rabbit_common)
+RMQ_COMPONENTS_PREFIX = mk
+RMQ_COMPONENTS_HEXPM = mk/rabbitmq-components.hexpm.mk
+else
+RMQ_COMPONENTS_PREFIX = .
+RMQ_COMPONENTS_HEXPM = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.hexpm.mk
+endif
+
+hex-publish: $(HEXPM_CLI) app rebar.config
+ $(gen_verbose) echo "$(PROJECT_DESCRIPTION) $(PROJECT_VERSION)" \
+ > git-revisions.txt
+ $(verbose) mv \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk \
+ rabbitmq-components.mk.not-hexpm
+ $(verbose) cp \
+ $(RMQ_COMPONENTS_HEXPM) \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk
+ $(verbose) grep -E '^dep.* = hex' \
+ rabbitmq-components.mk.not-hexpm \
+ >> $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk
+ $(verbose) touch -r \
+ rabbitmq-components.mk.not-hexpm \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk
+ $(verbose) trap '\
+ rm -f git-revisions.txt rebar.lock; \
+ if test -f rabbitmq-components.mk.not-hexpm; then \
+ mv \
+ rabbitmq-components.mk.not-hexpm \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk; \
+ fi' EXIT INT; \
+ $(HEXPM_CLI) publish
+
+hex-publish-docs: $(HEXPM_CLI) app docs
+ $(gen_verbose) trap 'rm -f rebar.lock' EXIT INT; \
+ $(HEXPM_CLI) docs
diff --git a/deps/rabbit_common/mk/rabbitmq-macros.mk b/deps/rabbit_common/mk/rabbitmq-macros.mk
new file mode 100644
index 0000000000..048745a7f0
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-macros.mk
@@ -0,0 +1,22 @@
+# Macro to compare two x.y.z versions.
+#
+# Usage:
+# ifeq ($(call compare_version,$(ERTS_VER),$(MAX_ERTS_VER),<),true)
+# # Only evaluated if $(ERTS_VER) < $(MAX_ERTS_VER)
+# endif
+
+define compare_version
+$(shell awk 'BEGIN {
+ split("$(1)", v1, ".");
+ version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
+
+ split("$(2)", v2, ".");
+ version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
+
+ if (version1 $(3) version2) {
+ print "true";
+ } else {
+ print "false";
+ }
+}')
+endef
diff --git a/deps/rabbit_common/mk/rabbitmq-plugin.mk b/deps/rabbit_common/mk/rabbitmq-plugin.mk
new file mode 100644
index 0000000000..29064a9a4f
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-plugin.mk
@@ -0,0 +1,23 @@
+ifeq ($(filter rabbitmq-build.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-build.mk
+endif
+
+ifeq ($(filter rabbitmq-hexpm.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-hexpm.mk
+endif
+
+ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk
+endif
+
+ifeq ($(filter rabbitmq-run.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-run.mk
+endif
+
+ifeq ($(filter rabbitmq-test.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-test.mk
+endif
+
+ifeq ($(filter rabbitmq-tools.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-tools.mk
+endif
diff --git a/deps/rabbit_common/mk/rabbitmq-run.mk b/deps/rabbit_common/mk/rabbitmq-run.mk
new file mode 100644
index 0000000000..bef62c03f7
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-run.mk
@@ -0,0 +1,428 @@
+.PHONY: run-broker run-background-broker run-node run-background-node \
+ start-background-node start-rabbit-on-node \
+ stop-rabbit-on-node set-resource-alarm clear-resource-alarm \
+ stop-node
+
+ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk
+endif
+
+exec_verbose_0 = @echo " EXEC " $@;
+exec_verbose_2 = set -x;
+exec_verbose = $(exec_verbose_$(V))
+
+ifeq ($(PLATFORM),msys2)
+TEST_TMPDIR ?= $(TEMP)/rabbitmq-test-instances
+else
+TMPDIR ?= /tmp
+TEST_TMPDIR ?= $(TMPDIR)/rabbitmq-test-instances
+endif
+
+# Location of the scripts controlling the broker.
+RABBITMQ_SCRIPTS_DIR ?= $(CURDIR)/sbin
+
+ifeq ($(PLATFORM),msys2)
+RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins.bat
+RABBITMQ_SERVER ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-server.bat
+RABBITMQCTL ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmqctl.bat
+else
+RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins
+RABBITMQ_SERVER ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-server
+RABBITMQCTL ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmqctl
+endif
+
+export RABBITMQ_SCRIPTS_DIR RABBITMQCTL RABBITMQ_PLUGINS RABBITMQ_SERVER
+
+# We export MAKE to be sure scripts and tests use the proper command.
+export MAKE
+
+# We need to pass the location of codegen to the Java client ant
+# process.
+CODEGEN_DIR = $(DEPS_DIR)/rabbitmq_codegen
+PYTHONPATH = $(CODEGEN_DIR)
+export PYTHONPATH
+
+ANT ?= ant
+ANT_FLAGS += -Dmake.bin=$(MAKE) \
+ -DUMBRELLA_AVAILABLE=true \
+ -Drabbitmqctl.bin=$(RABBITMQCTL) \
+ -Dsibling.codegen.dir=$(CODEGEN_DIR)
+ifeq ($(PROJECT),rabbitmq_test)
+ANT_FLAGS += -Dsibling.rabbitmq_test.dir=$(CURDIR)
+else
+ANT_FLAGS += -Dsibling.rabbitmq_test.dir=$(DEPS_DIR)/rabbitmq_test
+endif
+export ANT ANT_FLAGS
+
+node_tmpdir = $(TEST_TMPDIR)/$(1)
+node_pid_file = $(call node_tmpdir,$(1))/$(1).pid
+node_log_base = $(call node_tmpdir,$(1))/log
+node_mnesia_base = $(call node_tmpdir,$(1))/mnesia
+node_mnesia_dir = $(call node_mnesia_base,$(1))/$(1)
+node_quorum_dir = $(call node_mnesia_dir,$(1))/quorum
+node_stream_dir = $(call node_mnesia_dir,$(1))/stream
+node_plugins_expand_dir = $(call node_tmpdir,$(1))/plugins
+node_feature_flags_file = $(call node_tmpdir,$(1))/feature_flags
+node_enabled_plugins_file = $(call node_tmpdir,$(1))/enabled_plugins
+
+# Broker startup variables for the test environment.
+ifeq ($(PLATFORM),msys2)
+HOSTNAME := $(COMPUTERNAME)
+else
+HOSTNAME := $(shell hostname -s)
+endif
+
+RABBITMQ_NODENAME ?= rabbit@$(HOSTNAME)
+RABBITMQ_NODENAME_FOR_PATHS ?= $(RABBITMQ_NODENAME)
+NODE_TMPDIR ?= $(call node_tmpdir,$(RABBITMQ_NODENAME_FOR_PATHS))
+
+RABBITMQ_BASE ?= $(NODE_TMPDIR)
+RABBITMQ_PID_FILE ?= $(call node_pid_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_LOG_BASE ?= $(call node_log_base,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_MNESIA_BASE ?= $(call node_mnesia_base,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_MNESIA_DIR ?= $(call node_mnesia_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_QUORUM_DIR ?= $(call node_quorum_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_STREAM_DIR ?= $(call node_stream_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_PLUGINS_EXPAND_DIR ?= $(call node_plugins_expand_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_FEATURE_FLAGS_FILE ?= $(call node_feature_flags_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_ENABLED_PLUGINS_FILE ?= $(call node_enabled_plugins_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+
+# Enable colourful debug logging by default
+# To change this, set RABBITMQ_LOG to info, notice, warning etc.
+RABBITMQ_LOG ?= debug,+color
+export RABBITMQ_LOG
+
+# erlang.mk adds dependencies' ebin directory to ERL_LIBS. This is
+# a sane default, but we prefer to rely on the .ez archives in the
+# `plugins` directory so the plugin code is executed. The `plugins`
+# directory is added to ERL_LIBS by rabbitmq-env.
+DIST_ERL_LIBS = $(patsubst :%,%,$(patsubst %:,%,$(subst :$(APPS_DIR):,:,$(subst :$(DEPS_DIR):,:,:$(ERL_LIBS):))))
+
+ifdef PLUGINS_FROM_DEPS_DIR
+RMQ_PLUGINS_DIR=$(DEPS_DIR)
+else
+RMQ_PLUGINS_DIR=$(CURDIR)/$(DIST_DIR)
+endif
+
+define basic_script_env_settings
+MAKE="$(MAKE)" \
+ERL_LIBS="$(DIST_ERL_LIBS)" \
+RABBITMQ_NODENAME="$(1)" \
+RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \
+RABBITMQ_NODE_PORT="$(3)" \
+RABBITMQ_BASE="$(call node_tmpdir,$(2))" \
+RABBITMQ_PID_FILE="$(call node_pid_file,$(2))" \
+RABBITMQ_LOG_BASE="$(call node_log_base,$(2))" \
+RABBITMQ_MNESIA_BASE="$(call node_mnesia_base,$(2))" \
+RABBITMQ_MNESIA_DIR="$(call node_mnesia_dir,$(2))" \
+RABBITMQ_QUORUM_DIR="$(call node_quorum_dir,$(2))" \
+RABBITMQ_STREAM_DIR="$(call node_stream_dir,$(2))" \
+RABBITMQ_FEATURE_FLAGS_FILE="$(call node_feature_flags_file,$(2))" \
+RABBITMQ_PLUGINS_DIR="$(if $(RABBITMQ_PLUGINS_DIR),$(RABBITMQ_PLUGINS_DIR),$(RMQ_PLUGINS_DIR))" \
+RABBITMQ_PLUGINS_EXPAND_DIR="$(call node_plugins_expand_dir,$(2))" \
+RABBITMQ_SERVER_START_ARGS="-ra wal_sync_method sync $(RABBITMQ_SERVER_START_ARGS)" \
+RABBITMQ_ENABLED_PLUGINS="$(RABBITMQ_ENABLED_PLUGINS)"
+endef
+
+BASIC_SCRIPT_ENV_SETTINGS = \
+ $(call basic_script_env_settings,$(RABBITMQ_NODENAME),$(RABBITMQ_NODENAME_FOR_PATHS),$(RABBITMQ_NODE_PORT)) \
+ RABBITMQ_ENABLED_PLUGINS_FILE="$(RABBITMQ_ENABLED_PLUGINS_FILE)"
+
+test-tmpdir:
+ $(verbose) mkdir -p $(TEST_TMPDIR)
+
+virgin-test-tmpdir:
+ $(gen_verbose) rm -rf $(TEST_TMPDIR)
+ $(verbose) mkdir -p $(TEST_TMPDIR)
+
+node-tmpdir:
+ $(verbose) mkdir -p $(RABBITMQ_LOG_BASE) \
+ $(RABBITMQ_MNESIA_BASE) \
+ $(RABBITMQ_PLUGINS_EXPAND_DIR)
+
+virgin-node-tmpdir:
+ $(gen_verbose) rm -rf $(NODE_TMPDIR)
+ $(verbose) mkdir -p $(RABBITMQ_LOG_BASE) \
+ $(RABBITMQ_MNESIA_BASE) \
+ $(RABBITMQ_PLUGINS_EXPAND_DIR)
+
+.PHONY: test-tmpdir virgin-test-tmpdir node-tmpdir virgin-node-tmpdir
+
+ifdef LEAVE_PLUGINS_DISABLED
+RABBITMQ_ENABLED_PLUGINS ?=
+else
+RABBITMQ_ENABLED_PLUGINS ?= ALL
+endif
+
+# --------------------------------------------------------------------
+# Run a full RabbitMQ.
+# --------------------------------------------------------------------
+
+define test_rabbitmq_config
+%% vim:ft=erlang:
+
+[
+ {rabbit, [
+$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(RABBITMQ_NODE_PORT)]}$(comma),)
+ {loopback_users, []},
+ {log, [{file, [{level, debug}]},
+ {console, [{level, debug}]}]}
+ ]},
+ {rabbitmq_management, [
+$(if $(RABBITMQ_NODE_PORT), {listener$(comma) [{port$(comma) $(shell echo "$$(($(RABBITMQ_NODE_PORT) + 10000))")}]},)
+ ]},
+ {rabbitmq_mqtt, [
+$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(shell echo "$$((1883 + $(RABBITMQ_NODE_PORT) - 5672))")]},)
+ ]},
+ {rabbitmq_stomp, [
+$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(shell echo "$$((61613 + $(RABBITMQ_NODE_PORT) - 5672))")]},)
+ ]},
+ {ra, [
+ {data_dir, "$(RABBITMQ_QUORUM_DIR)"},
+ {wal_sync_method, sync}
+ ]},
+ {lager, [
+ {colors, [
+ %% https://misc.flogisoft.com/bash/tip_colors_and_formatting
+ {debug, "\\\e[0;34m" },
+ {info, "\\\e[1;37m" },
+ {notice, "\\\e[1;36m" },
+ {warning, "\\\e[1;33m" },
+ {error, "\\\e[1;31m" },
+ {critical, "\\\e[1;35m" },
+ {alert, "\\\e[1;44m" },
+ {emergency, "\\\e[1;41m" }
+ ]}
+ ]},
+ {osiris, [
+ {data_dir, "$(RABBITMQ_STREAM_DIR)"}
+ ]}
+].
+endef
+
+define test_rabbitmq_config_with_tls
+%% vim:ft=erlang:
+
+[
+ {rabbit, [
+ {loopback_users, []},
+ {log, [{file, [{level, debug}]},
+ {console, [{level, debug}]}]},
+ {ssl_listeners, [5671]},
+ {ssl_options, [
+ {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"},
+ {certfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/cert.pem"},
+ {keyfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}
+ ]},
+ {rabbitmq_management, [
+ {listener, [
+ {port, 15671},
+ {ssl, true},
+ {ssl_opts, [
+ {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"},
+ {certfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/cert.pem"},
+ {keyfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}
+ ]}
+ ]},
+ {ra, [
+ {data_dir, "$(RABBITMQ_QUORUM_DIR)"},
+ {wal_sync_method, sync}
+ ]},
+ {lager, [
+ {colors, [
+ %% https://misc.flogisoft.com/bash/tip_colors_and_formatting
+ {debug, "\\\e[0;34m" },
+ {info, "\\\e[1;37m" },
+ {notice, "\\\e[1;36m" },
+ {warning, "\\\e[1;33m" },
+ {error, "\\\e[1;31m" },
+ {critical, "\\\e[1;35m" },
+ {alert, "\\\e[1;44m" },
+ {emergency, "\\\e[1;41m" }
+ ]}
+ ]},
+ {osiris, [
+ {data_dir, "$(RABBITMQ_STREAM_DIR)"}
+ ]}
+].
+endef
+
+TEST_CONFIG_FILE ?= $(TEST_TMPDIR)/test.config
+TEST_TLS_CERTS_DIR := $(TEST_TMPDIR)/tls-certs
+ifeq ($(origin TEST_TLS_CERTS_DIR_in_config),undefined)
+ifeq ($(PLATFORM),msys2)
+TEST_TLS_CERTS_DIR_in_config := $(shell echo $(TEST_TLS_CERTS_DIR) | sed -E "s,^/([^/]+),\1:,")
+else
+TEST_TLS_CERTS_DIR_in_config := $(TEST_TLS_CERTS_DIR)
+endif
+export TEST_TLS_CERTS_DIR_in_config
+endif
+
+.PHONY: $(TEST_CONFIG_FILE)
+$(TEST_CONFIG_FILE): node-tmpdir
+ $(gen_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(config)))" > $@
+
+$(TEST_TLS_CERTS_DIR): node-tmpdir
+ $(gen_verbose) $(MAKE) -C $(DEPS_DIR)/rabbitmq_ct_helpers/tools/tls-certs \
+ DIR=$(TEST_TLS_CERTS_DIR) all
+
+show-test-tls-certs-dir: $(TEST_TLS_CERTS_DIR)
+ @echo $(TEST_TLS_CERTS_DIR)
+
+ifdef NOBUILD
+DIST_TARGET ?=
+else
+ifeq ($(wildcard ebin/test),)
+DIST_TARGET ?= dist
+else
+DIST_TARGET ?= test-dist
+endif
+endif
+
+run-broker run-tls-broker: RABBITMQ_CONFIG_FILE := $(basename $(TEST_CONFIG_FILE))
+run-broker: config := $(test_rabbitmq_config)
+run-tls-broker: config := $(test_rabbitmq_config_with_tls)
+run-tls-broker: $(TEST_TLS_CERTS_DIR)
+
+run-broker run-tls-broker: node-tmpdir $(DIST_TARGET) $(TEST_CONFIG_FILE)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_ALLOW_INPUT=true \
+ RABBITMQ_CONFIG_FILE=$(RABBITMQ_CONFIG_FILE) \
+ $(RABBITMQ_SERVER)
+
+run-background-broker: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ $(RABBITMQ_SERVER) -detached
+
+# --------------------------------------------------------------------
+# Run a bare Erlang node.
+# --------------------------------------------------------------------
+
+run-node: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ RABBITMQ_ALLOW_INPUT=true \
+ $(RABBITMQ_SERVER)
+
+run-background-node: virgin-node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ $(RABBITMQ_SERVER) -detached
+
+# --------------------------------------------------------------------
+# Start RabbitMQ in the background.
+# --------------------------------------------------------------------
+
+ifneq ($(LOG_TO_STDIO),yes)
+REDIRECT_STDIO = > $(RABBITMQ_LOG_BASE)/startup_log \
+ 2> $(RABBITMQ_LOG_BASE)/startup_err
+endif
+
+RMQCTL_WAIT_TIMEOUT ?= 60
+
+define rmq_started
+true = rpc:call('$(1)', rabbit, is_running, []),
+halt().
+endef
+
+start-background-node: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ $(RABBITMQ_SERVER) \
+ $(REDIRECT_STDIO) &
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait --timeout $(RMQCTL_WAIT_TIMEOUT) $(RABBITMQ_PID_FILE) kernel
+
+start-background-broker: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ $(RABBITMQ_SERVER) \
+ $(REDIRECT_STDIO) &
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait --timeout $(RMQCTL_WAIT_TIMEOUT) $(RABBITMQ_PID_FILE) && \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(call erlang,$(call rmq_started,$(RABBITMQ_NODENAME)),-sname sbb-$$$$ -hidden)
+
+start-rabbit-on-node:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit:start().' | \
+ sed -E -e '/^ completed with .* plugins\.$$/d' -e '/^ok$$/d'
+ $(verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait --timeout $(RMQCTL_WAIT_TIMEOUT) $(RABBITMQ_PID_FILE)
+
+stop-rabbit-on-node:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit:stop().' | \
+ sed -E -e '/^ok$$/d'
+
+stop-node:
+ $(exec_verbose) ( \
+ pid=$$(test -f $(RABBITMQ_PID_FILE) && cat $(RABBITMQ_PID_FILE)); \
+ test "$$pid" && \
+ kill -TERM "$$pid" && \
+ echo waiting for process to exit && \
+ while ps -p "$$pid" >/dev/null 2>&1; do sleep 1; done \
+ ) || :
+
+# " <-- To please Vim syntax hilighting.
+
+# --------------------------------------------------------------------
+# Start a RabbitMQ cluster in the background.
+# --------------------------------------------------------------------
+
+NODES ?= 2
+
+start-brokers start-cluster: $(DIST_TARGET)
+ @for n in $$(seq $(NODES)); do \
+ nodename="rabbit-$$n@$(HOSTNAME)"; \
+ $(MAKE) start-background-broker \
+ NOBUILD=1 \
+ RABBITMQ_NODENAME="$$nodename" \
+ RABBITMQ_NODE_PORT="$$((5672 + $$n - 1))" \
+ RABBITMQ_SERVER_START_ARGS=" \
+ -rabbit loopback_users [] \
+ -rabbitmq_management listener [{port,$$((15672 + $$n - 1))}] \
+ -rabbitmq_mqtt tcp_listeners [$$((1883 + $$n - 1))] \
+ -rabbitmq_stomp tcp_listeners [$$((61613 + $$n - 1))] \
+ -rabbitmq_prometheus tcp_config [{port,$$((15692 + $$n - 1))}] \
+ -rabbitmq_stream tcp_listeners [$$((5555 + $$n - 1))] \
+ "; \
+ if test '$@' = 'start-cluster' && test "$$nodename1"; then \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n "$$nodename" stop_app; \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n "$$nodename" join_cluster "$$nodename1"; \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n "$$nodename" start_app; \
+ else \
+ nodename1=$$nodename; \
+ fi; \
+ done
+
+stop-brokers stop-cluster:
+ @for n in $$(seq $(NODES) -1 1); do \
+ nodename="rabbit-$$n@$(HOSTNAME)"; \
+ $(MAKE) stop-node \
+ RABBITMQ_NODENAME="$$nodename"; \
+ done
+
+# --------------------------------------------------------------------
+# Used by testsuites.
+# --------------------------------------------------------------------
+
+set-resource-alarm:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit_alarm:set_alarm({{resource_limit, $(SOURCE), node()}, []}).'
+
+clear-resource-alarm:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit_alarm:clear_alarm({resource_limit, $(SOURCE), node()}).'
diff --git a/deps/rabbit_common/mk/rabbitmq-test.mk b/deps/rabbit_common/mk/rabbitmq-test.mk
new file mode 100644
index 0000000000..931f072125
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-test.mk
@@ -0,0 +1,80 @@
+.PHONY: ct-slow ct-fast
+
+ct-slow ct-fast:
+ $(MAKE) ct CT_SUITES='$(CT_SUITES)'
+
+# --------------------------------------------------------------------
+# xref
+# --------------------------------------------------------------------
+
+# We need the list of dependencies of the current project. We use it in
+# xrefr(1) to scan for Elixir-based projects. For those, we need to add
+# the path inside `_build` to the xref code path.
+
+ifneq ($(filter xref,$(MAKECMDGOALS)),)
+export ERLANG_MK_RECURSIVE_DEPS_LIST
+endif
+
+xref: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+
+# --------------------------------------------------------------------
+# Helpers to run Make targets on Concourse.
+# --------------------------------------------------------------------
+
+FLY ?= fly
+FLY_TARGET ?= $(shell $(FLY) targets | awk '/ci\.rabbitmq\.com/ { print $$1; }')
+
+CONCOURSE_TASK = $(ERLANG_MK_TMP)/concourse-task.yaml
+
+CI_DIR ?= $(DEPS_DIR)/ci
+PIPELINE_DIR = $(CI_DIR)/server-release
+BRANCH_RELEASE = $(shell "$(PIPELINE_DIR)/scripts/map-branch-to-release.sh" "$(base_rmq_ref)")
+PIPELINE_DATA = $(PIPELINE_DIR)/release-data-$(BRANCH_RELEASE).yaml
+REPOSITORY_NAME = $(shell "$(PIPELINE_DIR)/scripts/map-erlang-app-and-repository-name.sh" "$(PIPELINE_DATA)" "$(PROJECT)")
+
+CONCOURSE_PLATFORM ?= linux
+ERLANG_VERSION ?= $(shell "$(PIPELINE_DIR)/scripts/list-erlang-versions.sh" "$(PIPELINE_DATA)" | head -n 1)
+TASK_INPUTS = $(shell "$(PIPELINE_DIR)/scripts/list-task-inputs.sh" "$(CONCOURSE_TASK)")
+
+.PHONY: $(CONCOURSE_TASK)
+$(CONCOURSE_TASK): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+ $(gen_verbose) echo 'platform: $(CONCOURSE_PLATFORM)' > "$@"
+ $(verbose) echo 'inputs:' >> "$@"
+ $(verbose) echo ' - name: $(PROJECT)' >> "$@"
+ $(verbose) cat $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) | while read -r file; do \
+ echo " - name: $$(basename "$$file")" >> "$@"; \
+ done
+ $(verbose) echo 'outputs:' >> "$@"
+ $(verbose) echo ' - name: test-output' >> "$@"
+ifeq ($(CONCOURSE_PLATFORM),linux)
+ $(verbose) echo 'image_resource:' >> "$@"
+ $(verbose) echo ' type: docker-image' >> "$@"
+ $(verbose) echo ' source:' >> "$@"
+ $(verbose) echo ' repository: pivotalrabbitmq/rabbitmq-server-buildenv' >> "$@"
+ $(verbose) echo ' tag: linux-erlang-$(ERLANG_VERSION)' >> "$@"
+endif
+ $(verbose) echo 'run:' >> "$@"
+ $(verbose) echo ' path: ci/server-release/scripts/test-erlang-app.sh' >> "$@"
+ $(verbose) echo ' args:' >> "$@"
+ $(verbose) echo " - $(PROJECT)" >> "$@"
+# This section must be the last because the `%-on-concourse` target
+# appends other variables.
+ $(verbose) echo 'params:' >> "$@"
+ifdef V
+ $(verbose) echo ' V: "$(V)"' >> "$@"
+endif
+ifdef t
+ $(verbose) echo ' t: "$(t)"' >> "$@"
+endif
+
+%-on-concourse: $(CONCOURSE_TASK)
+ $(verbose) test -d "$(PIPELINE_DIR)"
+ $(verbose) echo ' MAKE_TARGET: "$*"' >> "$(CONCOURSE_TASK)"
+ $(FLY) -t $(FLY_TARGET) execute \
+ --config="$(CONCOURSE_TASK)" \
+ $(foreach input,$(TASK_INPUTS), \
+ $(if $(filter $(PROJECT),$(input)), \
+ --input="$(input)=.", \
+ --input="$(input)=$(DEPS_DIR)/$(input)")) \
+ --output="test-output=$(CT_LOGS_DIR)/on-concourse"
+ $(verbose) rm -f "$(CT_LOGS_DIR)/on-concourse/filename"
diff --git a/deps/rabbit_common/mk/rabbitmq-tools.mk b/deps/rabbit_common/mk/rabbitmq-tools.mk
new file mode 100644
index 0000000000..6672153cb0
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-tools.mk
@@ -0,0 +1,429 @@
+ifeq ($(PLATFORM),msys2)
+HOSTNAME := $(COMPUTERNAME)
+else
+HOSTNAME := $(shell hostname -s)
+endif
+
+READY_DEPS = $(foreach DEP,\
+ $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)), \
+ $(if $(wildcard $(DEPS_DIR)/$(DEP)),$(DEP),))
+
+RELEASED_RMQ_DEPS = $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS))
+
+.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \
+ show-current-git-fetch-url show-current-git-push-url
+
+show-upstream-git-fetch-url:
+ @echo $(RABBITMQ_UPSTREAM_FETCH_URL)
+
+show-upstream-git-push-url:
+ @echo $(RABBITMQ_UPSTREAM_PUSH_URL)
+
+show-current-git-fetch-url:
+ @echo $(RABBITMQ_CURRENT_FETCH_URL)
+
+show-current-git-push-url:
+ @echo $(RABBITMQ_CURRENT_PUSH_URL)
+
+.PHONY: update-erlang-mk update-rabbitmq-components.mk
+
+update-erlang-mk: erlang-mk
+ $(verbose) if test "$(DO_COMMIT)" = 'yes'; then \
+ git diff --quiet -- erlang.mk \
+ || git commit -m 'Update erlang.mk' -- erlang.mk; \
+ fi
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ ! test -f $$repo/erlang.mk \
+ || $(MAKE) -C $$repo erlang-mk; \
+ if test "$(DO_COMMIT)" = 'yes'; then \
+ (cd $$repo; \
+ git diff --quiet -- erlang.mk \
+ || git commit -m 'Update erlang.mk' -- erlang.mk); \
+ fi; \
+ done
+
+# --------------------------------------------------------------------
+# rabbitmq-components.mk checks.
+# --------------------------------------------------------------------
+
+UPSTREAM_RMQ_COMPONENTS_MK = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.mk
+
+ifeq ($(PROJECT),rabbit_common)
+check-rabbitmq-components.mk:
+ @:
+else
+check-rabbitmq-components.mk:
+ $(verbose) cmp -s rabbitmq-components.mk \
+ $(UPSTREAM_RMQ_COMPONENTS_MK) || \
+ (echo "error: rabbitmq-components.mk must be updated!" 1>&2; \
+ false)
+endif
+
+ifeq ($(PROJECT),rabbit_common)
+rabbitmq-components-mk:
+ @:
+else
+rabbitmq-components-mk:
+ifeq ($(FORCE),yes)
+ $(gen_verbose) cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+else
+ $(gen_verbose) if test -d .git && test -d $(DEPS_DIR)/rabbit_common/.git; then \
+ upstream_branch=$$(LANG=C git -C $(DEPS_DIR)/rabbit_common branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ local_branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ test "$$local_branch" = "$$upstream_branch" || exit 0; \
+ fi; \
+ cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+endif
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) git diff --quiet rabbitmq-components.mk \
+ || git commit -m 'Update rabbitmq-components.mk' rabbitmq-components.mk
+endif
+endif
+
+update-rabbitmq-components-mk: rabbitmq-components-mk
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ ! test -f $$repo/rabbitmq-components.mk \
+ || $(MAKE) -C $$repo rabbitmq-components-mk; \
+ done
+
+update-contributor-code-of-conduct:
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ cp $(DEPS_DIR)/rabbit_common/CODE_OF_CONDUCT.md $$repo/CODE_OF_CONDUCT.md; \
+ cp $(DEPS_DIR)/rabbit_common/CONTRIBUTING.md $$repo/CONTRIBUTING.md; \
+ done
+
+ifdef CREDS
+define replace_aws_creds
+ set -e; \
+ if test -f "$(CREDS)"; then \
+ key_id=$(shell travis encrypt --no-interactive \
+ "AWS_ACCESS_KEY_ID=$$(awk '/^rabbitmq-s3-access-key-id/ { print $$2; }' < "$(CREDS)")"); \
+ access_key=$(shell travis encrypt --no-interactive \
+ "AWS_SECRET_ACCESS_KEY=$$(awk '/^rabbitmq-s3-secret-access-key/ { print $$2; }' < "$(CREDS)")"); \
+ mv .travis.yml .travis.yml.orig; \
+ awk "\
+ /^ global:/ { \
+ print; \
+ print \" - secure: $$key_id\"; \
+ print \" - secure: $$access_key\"; \
+ next; \
+ } \
+ /- secure:/ { next; } \
+ { print; }" < .travis.yml.orig > .travis.yml; \
+ rm -f .travis.yml.orig; \
+ else \
+ echo " INFO: CREDS file missing; not setting/updating AWS credentials"; \
+ fi
+endef
+else
+define replace_aws_creds
+ echo " INFO: CREDS not set; not setting/updating AWS credentials"
+endef
+endif
+
+ifeq ($(PROJECT),rabbit_common)
+travis-yml:
+ $(gen_verbose) $(replace_aws_creds)
+else
+travis-yml:
+ $(gen_verbose) \
+ set -e; \
+ if test -d .git && test -d $(DEPS_DIR)/rabbit_common/.git; then \
+ upstream_branch=$$(LANG=C git -C $(DEPS_DIR)/rabbit_common branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ local_branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ test "$$local_branch" = "$$upstream_branch" || exit 0; \
+ fi; \
+ test -f .travis.yml || exit 0; \
+ (grep -E -- '- secure:' .travis.yml || :) > .travis.yml.creds; \
+ cp -a $(DEPS_DIR)/rabbit_common/.travis.yml .travis.yml.orig; \
+ awk ' \
+ /^ global:/ { \
+ print; \
+ system("test -f .travis.yml.creds && cat .travis.yml.creds"); \
+ next; \
+ } \
+ /- secure:/ { next; } \
+ { print; } \
+ ' < .travis.yml.orig > .travis.yml; \
+ rm -f .travis.yml.orig .travis.yml.creds; \
+ if test -f .travis.yml.patch; then \
+ patch -p0 < .travis.yml.patch; \
+ rm -f .travis.yml.orig; \
+ fi; \
+ $(replace_aws_creds)
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) ! test -f .travis.yml || \
+ git diff --quiet .travis.yml \
+ || git commit -m 'Travis CI: Update config from rabbitmq-common' .travis.yml
+endif
+endif
+
+update-travis-yml: travis-yml
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ ! test -f $$repo/rabbitmq-components.mk \
+ || $(MAKE) -C $$repo travis-yml; \
+ done
+
+ifneq ($(wildcard .git),)
+
+.PHONY: sync-gitremote sync-gituser
+
+sync-gitremote: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitremote)
+ @:
+
+%+sync-gitremote:
+ $(exec_verbose) cd $* && \
+ git remote set-url origin \
+ '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(notdir $*))'
+ $(verbose) cd $* && \
+ git remote set-url --push origin \
+ '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(notdir $*))'
+
+ifeq ($(origin, RMQ_GIT_GLOBAL_USER_NAME),undefined)
+RMQ_GIT_GLOBAL_USER_NAME := $(shell git config --global user.name)
+export RMQ_GIT_GLOBAL_USER_NAME
+endif
+ifeq ($(origin RMQ_GIT_GLOBAL_USER_EMAIL),undefined)
+RMQ_GIT_GLOBAL_USER_EMAIL := $(shell git config --global user.email)
+export RMQ_GIT_GLOBAL_USER_EMAIL
+endif
+ifeq ($(origin RMQ_GIT_USER_NAME),undefined)
+RMQ_GIT_USER_NAME := $(shell git config user.name)
+export RMQ_GIT_USER_NAME
+endif
+ifeq ($(origin RMQ_GIT_USER_EMAIL),undefined)
+RMQ_GIT_USER_EMAIL := $(shell git config user.email)
+export RMQ_GIT_USER_EMAIL
+endif
+
+sync-gituser: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gituser)
+ @:
+
+%+sync-gituser:
+ifeq ($(RMQ_GIT_USER_NAME),$(RMQ_GIT_GLOBAL_USER_NAME))
+ $(exec_verbose) cd $* && git config --unset user.name || :
+else
+ $(exec_verbose) cd $* && git config user.name "$(RMQ_GIT_USER_NAME)"
+endif
+ifeq ($(RMQ_GIT_USER_EMAIL),$(RMQ_GIT_GLOBAL_USER_EMAIL))
+ $(verbose) cd $* && git config --unset user.email || :
+else
+ $(verbose) cd $* && git config user.email "$(RMQ_GIT_USER_EMAIL)"
+endif
+
+.PHONY: sync-gitignore-from-master
+sync-gitignore-from-master: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitignore-from-master)
+
+%+sync-gitignore-from-master:
+ $(gen_verbose) cd $* && \
+ if test -d .git; then \
+ branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ ! test "$$branch" = 'master' || exit 0; \
+ git show origin/master:.gitignore > .gitignore; \
+ fi
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) cd $* && \
+ if test -d .git; then \
+ git diff --quiet .gitignore \
+ || git commit -m 'Git: Sync .gitignore from master' .gitignore; \
+ fi
+endif
+
+.PHONY: show-branch
+
+show-branch: $(READY_DEPS:%=$(DEPS_DIR)/%+show-branch)
+ $(verbose) printf '%-34s %s\n' $(PROJECT): "$$(git symbolic-ref -q --short HEAD || git describe --tags --exact-match)"
+
+%+show-branch:
+ $(verbose) printf '%-34s %s\n' $(notdir $*): "$$(cd $* && (git symbolic-ref -q --short HEAD || git describe --tags --exact-match))"
+
+SINCE_TAG ?= last-release
+COMMITS_LOG_OPTS ?= --oneline --decorate --no-merges
+MARKDOWN ?= no
+
+define show_commits_since_tag
+set -e; \
+if test "$1"; then \
+ erlang_app=$(notdir $1); \
+ repository=$(call rmq_cmp_repo_name,$(notdir $1)); \
+ git_dir=-C\ "$1"; \
+else \
+ erlang_app=$(PROJECT); \
+ repository=$(call rmq_cmp_repo_name,$(PROJECT)); \
+fi; \
+case "$(SINCE_TAG)" in \
+last-release) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \
+ ;; \
+*) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \
+ ;; \
+esac; \
+if test "$$tags_count" -gt 0; then \
+ case "$(SINCE_TAG)" in \
+ last-release) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags \
+ --exclude "*-beta*" \
+ --exclude "*_milestone*" \
+ --exclude "*[-_]rc*"); \
+ ;; \
+ last-prerelease) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags); \
+ ;; \
+ *) \
+ git $$git_dir rev-parse "$(SINCE_TAG)" -- >/dev/null; \
+ ref=$(SINCE_TAG); \
+ ;; \
+ esac; \
+ commits_count=$$(git $$git_dir log --oneline "$$ref.." | wc -l); \
+ if test "$$commits_count" -gt 0; then \
+ if test "$(MARKDOWN)" = yes; then \
+ printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\nCommits since \`$$ref\`:\n\n"; \
+ git $$git_dir --no-pager log $(COMMITS_LOG_OPTS) \
+ --format="format:* %s ([\`%h\`](https://github.com/rabbitmq/$$repository/commit/%H))" \
+ "$$ref.."; \
+ echo; \
+ else \
+ echo; \
+ echo "# $$repository - Commits since $$ref"; \
+ git $$git_dir log $(COMMITS_LOG_OPTS) "$$ref.."; \
+ fi; \
+ fi; \
+else \
+ if test "$(MARKDOWN)" = yes; then \
+ printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\n**New** since the last release!\n"; \
+ else \
+ echo; \
+ echo "# $$repository - New since the last release!"; \
+ fi; \
+fi
+endef
+
+.PHONY: commits-since-release
+
+commits-since-release: commits-since-release-title \
+ $(RELEASED_RMQ_DEPS:%=$(DEPS_DIR)/%+commits-since-release)
+ $(verbose) $(call show_commits_since_tag)
+
+commits-since-release-title:
+ $(verbose) set -e; \
+ case "$(SINCE_TAG)" in \
+ last-release) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \
+ ;; \
+ *) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \
+ ;; \
+ esac; \
+ if test "$$tags_count" -gt 0; then \
+ case "$(SINCE_TAG)" in \
+ last-release) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags \
+ --exclude "*-beta*" \
+ --exclude "*_milestone*" \
+ --exclude "*[-_]rc*"); \
+ ;; \
+ last-prerelease) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags); \
+ ;; \
+ *) \
+ ref=$(SINCE_TAG); \
+ ;; \
+ esac; \
+ version=$$(echo "$$ref" | sed -E \
+ -e 's/rabbitmq_v([0-9]+)_([0-9]+)_([0-9]+)/v\1.\2.\3/' \
+ -e 's/_milestone/-beta./' \
+ -e 's/_rc/-rc./' \
+ -e 's/^v//'); \
+ echo "# Changes since RabbitMQ $$version"; \
+ else \
+ echo "# Changes since the beginning of time"; \
+ fi
+
+%+commits-since-release:
+ $(verbose) $(call show_commits_since_tag,$*)
+
+endif # ($(wildcard .git),)
+
+# --------------------------------------------------------------------
+# erlang.mk query-deps* formatting.
+# --------------------------------------------------------------------
+
+# We need to provide a repo mapping for deps resolved via git_rmq fetch method
+query_repo_git_rmq = https://github.com/rabbitmq/$(call rmq_cmp_repo_name,$(1))
+
+# --------------------------------------------------------------------
+# Common test logs compression.
+# --------------------------------------------------------------------
+
+.PHONY: ct-logs-archive clean-ct-logs-archive
+
+ifneq ($(wildcard logs/*),)
+TAR := tar
+ifeq ($(PLATFORM),freebsd)
+TAR := gtar
+endif
+ifeq ($(PLATFORM),darwin)
+TAR := gtar
+endif
+
+CT_LOGS_ARCHIVE ?= $(PROJECT)-ct-logs-$(subst _,-,$(subst -,,$(subst .,,$(patsubst ct_run.ct_$(PROJECT)@$(HOSTNAME).%,%,$(notdir $(lastword $(wildcard logs/ct_run.*))))))).tar.xz
+
+ifeq ($(patsubst %.tar.xz,%,$(CT_LOGS_ARCHIVE)),$(CT_LOGS_ARCHIVE))
+$(error CT_LOGS_ARCHIVE file must use '.tar.xz' as its filename extension)
+endif
+
+ct-logs-archive: $(CT_LOGS_ARCHIVE)
+ @:
+
+$(CT_LOGS_ARCHIVE):
+ $(gen_verbose) \
+ for file in logs/*; do \
+ ! test -L "$$file" || rm "$$file"; \
+ done
+ $(verbose) \
+ $(TAR) -c \
+ --exclude "*/mnesia" \
+ --transform "s/^logs/$(patsubst %.tar.xz,%,$(notdir $(CT_LOGS_ARCHIVE)))/" \
+ -f - logs | \
+ xz > "$@"
+else
+ct-logs-archive:
+ @:
+endif
+
+clean-ct-logs-archive::
+ $(gen_verbose) rm -f $(PROJECT)-ct-logs-*.tar.xz
+
+clean:: clean-ct-logs-archive
+
+# --------------------------------------------------------------------
+# Generate a file listing RabbitMQ component dependencies and their
+# Git commit hash.
+# --------------------------------------------------------------------
+
+.PHONY: rabbitmq-deps.mk clean-rabbitmq-deps.mk
+
+rabbitmq-deps.mk: $(PROJECT)-rabbitmq-deps.mk
+ @:
+
+closing_paren := )
+
+define rmq_deps_mk_line
+dep_$(1) := git $(dir $(RABBITMQ_UPSTREAM_FETCH_URL))$(call rmq_cmp_repo_name,$(1)).git $$(git -C "$(2)" rev-parse HEAD)
+endef
+
+$(PROJECT)-rabbitmq-deps.mk: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+ $(gen_verbose) echo "# In $(PROJECT) - commit $$(git rev-parse HEAD)" > $@
+ $(verbose) cat $(ERLANG_MK_RECURSIVE_DEPS_LIST) | \
+ while read -r dir; do \
+ component=$$(basename "$$dir"); \
+ case "$$component" in \
+ $(foreach component,$(RABBITMQ_COMPONENTS),$(component)$(closing_paren) echo "$(call rmq_deps_mk_line,$(component),$$dir)" ;;) \
+ esac; \
+ done >> $@
+
+clean:: clean-rabbitmq-deps.mk
+
+clean-rabbitmq-deps.mk:
+ $(gen_verbose) rm -f $(PROJECT)-rabbitmq-deps.mk
diff --git a/deps/rabbit_common/mk/xrefr b/deps/rabbit_common/mk/xrefr
new file mode 100755
index 0000000000..03c408fcb4
--- /dev/null
+++ b/deps/rabbit_common/mk/xrefr
@@ -0,0 +1,338 @@
+#!/usr/bin/env escript
+%% vim:ft=erlang:
+
+%% The code is copied from xref_runner.
+%% https://github.com/inaka/xref_runner
+%%
+%% The only change is the support of our erlang_version_support
+%% attribute: we don't want any warnings about functions which will be
+%% dropped at load time.
+%%
+%% It's also a plain text escript instead of a compiled one because we
+%% want to support Erlang R16B03 and the version of xref_runner uses
+%% maps and is built with something like Erlang 18.
+
+%% This mode allows us to reference local function. For instance:
+%% lists:map(fun generate_comment/1, Comments)
+-mode(compile).
+
+-define(DIRS, ["ebin", "test"]).
+
+-define(CHECKS, [undefined_function_calls,
+ undefined_functions,
+ locals_not_used]).
+
+main(_) ->
+ Checks = ?CHECKS,
+ ElixirDeps = get_elixir_deps_paths(),
+ [true = code:add_path(P) || P <- ElixirDeps],
+ XrefWarnings = lists:append([check(Check) || Check <- Checks]),
+ warnings_prn(XrefWarnings),
+ case XrefWarnings of
+ [] -> ok;
+ _ -> halt(1)
+ end.
+
+get_elixir_deps_paths() ->
+ case os:getenv("ERLANG_MK_RECURSIVE_DEPS_LIST") of
+ false ->
+ [];
+ Filename ->
+ {ok, Fd} = file:open(Filename, [read]),
+ get_elixir_deps_paths1(Fd, [])
+ end.
+
+get_elixir_deps_paths1(Fd, Paths) ->
+ case file:read_line(Fd) of
+ {ok, Line0} ->
+ Line = Line0 -- [$\r, $\n],
+ RootPath = case os:type() of
+ {unix, _} ->
+ Line;
+ {win32, _} ->
+ case os:find_executable("cygpath.exe") of
+ false ->
+ Line;
+ Cygpath ->
+ os:cmd(
+ io_lib:format("~s --windows \"~s\"",
+ [Cygpath, Line]))
+ -- [$\r, $\n]
+ end
+ end,
+ Glob = filename:join([RootPath, "_build", "dev", "lib", "*", "ebin"]),
+ NewPaths = filelib:wildcard(Glob),
+ get_elixir_deps_paths1(Fd, Paths ++ NewPaths);
+ eof ->
+ add_elixir_stdlib_path(Paths)
+ end.
+
+add_elixir_stdlib_path(Paths) ->
+ case find_elixir_home() of
+ false -> Paths;
+ ElixirLibDir -> [ElixirLibDir | Paths]
+ end.
+
+find_elixir_home() ->
+ ElixirExe = case os:type() of
+ {unix, _} -> "elixir";
+ {win32, _} -> "elixir.bat"
+ end,
+ case os:find_executable(ElixirExe) of
+ false -> false;
+ ExePath -> resolve_symlink(ExePath)
+ end.
+
+resolve_symlink(ExePath) ->
+ case file:read_link_all(ExePath) of
+ {error, einval} ->
+ determine_elixir_home(ExePath);
+ {ok, ResolvedLink} ->
+ ExePath1 = filename:absname(ResolvedLink,
+ filename:dirname(ExePath)),
+ resolve_symlink(ExePath1);
+ {error, _} ->
+ false
+ end.
+
+determine_elixir_home(ExePath) ->
+ LibPath = filename:join([filename:dirname(filename:dirname(ExePath)),
+ "lib",
+ "elixir",
+ "ebin"]),
+ case filelib:is_dir(LibPath) of
+ true -> LibPath;
+ false -> {skip, "Failed to locate Elixir lib dir"}
+ end.
+check(Check) ->
+ Dirs = ?DIRS,
+ lists:foreach(fun code:add_path/1, Dirs),
+
+ {ok, Xref} = xref:start([]),
+ try
+ ok = xref:set_library_path(Xref, code:get_path()),
+
+ lists:foreach(
+ fun(Dir) ->
+ case filelib:is_dir(Dir) of
+ true -> {ok, _} = xref:add_directory(Xref, Dir);
+ false -> ok
+ end
+ end, Dirs),
+
+ {ok, Results} = xref:analyze(Xref, Check),
+
+ FilteredResults = filter_xref_results(Check, Results),
+
+ [result_to_warning(Check, Result) || Result <- FilteredResults]
+ after
+ stopped = xref:stop(Xref)
+ end.
+
+%% -------------------------------------------------------------------
+%% Filtering results.
+%% -------------------------------------------------------------------
+
+filter_xref_results(Check, Results) ->
+ SourceModules =
+ lists:usort([source_module(Result) || Result <- Results]),
+
+ Ignores = lists:flatmap(
+ fun(Module) -> get_ignorelist(Module, Check) end, SourceModules),
+
+ UnusedFunctions = lists:flatmap(
+ fun(Mod) -> get_unused_compat_functions(Mod) end,
+ SourceModules),
+
+ ToIgnore = case get(results_to_ignore) of
+ undefined -> [];
+ RTI -> RTI
+ end,
+ NewToIgnore = [parse_xref_target(Result)
+ || Result <- Results,
+ lists:member(parse_xref_source(Result), UnusedFunctions)],
+ AllToIgnore = ToIgnore ++ NewToIgnore ++ [mfa(M, {F, A})
+ || {_, {M, F, A}} <- Ignores],
+ put(results_to_ignore, AllToIgnore),
+
+ [Result || Result <- Results,
+ not lists:member(parse_xref_result(Result), Ignores) andalso
+ not lists:member(parse_xref_result(Result), AllToIgnore) andalso
+ not lists:member(parse_xref_source(Result), UnusedFunctions)].
+
+source_module({Mt, _Ft, _At}) -> Mt;
+source_module({{Ms, _Fs, _As}, _Target}) -> Ms.
+
+%%
+%% Ignore behaviour functions, and explicitly marked functions
+%%
+%% Functions can be ignored by using
+%% -ignore_xref([{F, A}, {M, F, A}...]).
+get_ignorelist(Mod, Check) ->
+ %% Get ignore_xref attribute and combine them in one list
+ Attributes =
+ try
+ Mod:module_info(attributes)
+ catch
+ _Class:_Error -> []
+ end,
+
+ IgnoreXref =
+ [mfa(Mod, Value) || {ignore_xref, Values} <- Attributes, Value <- Values],
+
+ BehaviourCallbacks = get_behaviour_callbacks(Check, Mod, Attributes),
+
+ %% And create a flat {M, F, A} list
+ IgnoreXref ++ BehaviourCallbacks.
+
+get_behaviour_callbacks(exports_not_used, Mod, Attributes) ->
+ Behaviours = [Value || {behaviour, Values} <- Attributes, Value <- Values],
+ [{Mod, {Mod, F, A}}
+ || B <- Behaviours, {F, A} <- B:behaviour_info(callbacks)];
+get_behaviour_callbacks(_Check, _Mod, _Attributes) ->
+ [].
+
+get_unused_compat_functions(Module) ->
+ OTPVersion = code_version:get_otp_version(),
+ Attributes = try
+ Module:module_info(attributes)
+ catch
+ _Class:_Error -> []
+ end,
+ CompatTuples = [Tuple
+ || {erlang_version_support, Tuples} <- Attributes,
+ Tuple <- Tuples],
+ get_unused_compat_functions(Module, OTPVersion, CompatTuples, []).
+
+get_unused_compat_functions(_, _, [], Result) ->
+ Result;
+get_unused_compat_functions(Module,
+ OTPVersion,
+ [{MinOTPVersion, Choices} | Rest],
+ Result) ->
+ Functions = lists:map(
+ fun({_, Arity, Pre, Post}) ->
+ if
+ OTPVersion >= MinOTPVersion ->
+ %% We ignore the "pre" function.
+ mfa(Module, {Pre, Arity});
+ true ->
+ %% We ignore the "post" function.
+ mfa(Module, {Post, Arity})
+ end
+ end, Choices),
+ get_unused_compat_functions(Module, OTPVersion, Rest,
+ Result ++ Functions).
+
+mfa(M, {F, A}) -> {M, {M, F, A}};
+mfa(M, MFA) -> {M, MFA}.
+
+parse_xref_result({{SM, _, _}, MFAt}) -> {SM, MFAt};
+parse_xref_result({TM, _, _} = MFAt) -> {TM, MFAt}.
+
+parse_xref_source({{SM, _, _} = MFAt, _}) -> {SM, MFAt};
+parse_xref_source({TM, _, _} = MFAt) -> {TM, MFAt}.
+
+parse_xref_target({_, {TM, _, _} = MFAt}) -> {TM, MFAt};
+parse_xref_target({TM, _, _} = MFAt) -> {TM, MFAt}.
+
+%% -------------------------------------------------------------------
+%% Preparing results.
+%% -------------------------------------------------------------------
+
+result_to_warning(Check, {MFASource, MFATarget}) ->
+ {Filename, Line} = get_source(MFASource),
+ [{filename, Filename},
+ {line, Line},
+ {source, MFASource},
+ {target, MFATarget},
+ {check, Check}];
+result_to_warning(Check, MFA) ->
+ {Filename, Line} = get_source(MFA),
+ [{filename, Filename},
+ {line, Line},
+ {source, MFA},
+ {check, Check}].
+
+%%
+%% Given a MFA, find the file and LOC where it's defined. Note that
+%% xref doesn't work if there is no abstract_code, so we can avoid
+%% being too paranoid here.
+%%
+get_source({M, F, A}) ->
+ case code:get_object_code(M) of
+ error -> {"", 0};
+ {M, Bin, _} -> find_function_source(M, F, A, Bin)
+ end.
+
+find_function_source(M, F, A, Bin) ->
+ AbstractCode = beam_lib:chunks(Bin, [abstract_code]),
+ {ok, {M, [{abstract_code, {raw_abstract_v1, Code}}]}} = AbstractCode,
+
+ %% Extract the original source filename from the abstract code
+ [Source|_] = [S || {attribute, _, file, {S, _}} <- Code],
+
+ %% Extract the line number for a given function def
+ Fn = [E || E <- Code,
+ element(1, E) == function,
+ element(3, E) == F,
+ element(4, E) == A],
+
+ case Fn of
+ [{function, Line, F, _, _}] when is_integer(Line) ->
+ {Source, Line};
+ [{function, Line, F, _, _}] ->
+ {Source, erl_anno:line(Line)};
+ %% do not crash if functions are exported, even though they
+ %% are not in the source.
+ %% parameterized modules add new/1 and instance/1 for example.
+ [] -> {Source, 0}
+ end.
+
+%% -------------------------------------------------------------------
+%% Reporting results.
+%% -------------------------------------------------------------------
+
+warnings_prn([]) ->
+ ok;
+warnings_prn(Comments) ->
+ Messages = lists:map(fun generate_comment/1, Comments),
+ lists:foreach(fun warning_prn/1, Messages).
+
+warning_prn(Message) ->
+ FullMessage = Message ++ "~n",
+ io:format(FullMessage, []).
+
+generate_comment(XrefWarning) ->
+ Filename = proplists:get_value(filename, XrefWarning),
+ Line = proplists:get_value(line, XrefWarning),
+ Source = proplists:get_value(source, XrefWarning),
+ Check = proplists:get_value(check, XrefWarning),
+ Target = proplists:get_value(target, XrefWarning),
+ Position = case {Filename, Line} of
+ {"", _} -> "";
+ {Filename, 0} -> [Filename, " "];
+ {Filename, Line} -> [Filename, ":",
+ integer_to_list(Line), " "]
+ end,
+ [Position, generate_comment_text(Check, Source, Target)].
+
+generate_comment_text(Check, {SM, SF, SA}, TMFA) ->
+ SMFA = io_lib:format("`~p:~p/~p`", [SM, SF, SA]),
+ generate_comment_text(Check, SMFA, TMFA);
+generate_comment_text(Check, SMFA, {TM, TF, TA}) ->
+ TMFA = io_lib:format("`~p:~p/~p`", [TM, TF, TA]),
+ generate_comment_text(Check, SMFA, TMFA);
+
+generate_comment_text(undefined_function_calls, SMFA, TMFA) ->
+ io_lib:format("~s calls undefined function ~s", [SMFA, TMFA]);
+generate_comment_text(undefined_functions, SMFA, _TMFA) ->
+ io_lib:format("~s is not defined as a function", [SMFA]);
+generate_comment_text(locals_not_used, SMFA, _TMFA) ->
+ io_lib:format("~s is an unused local function", [SMFA]);
+generate_comment_text(exports_not_used, SMFA, _TMFA) ->
+ io_lib:format("~s is an unused export", [SMFA]);
+generate_comment_text(deprecated_function_calls, SMFA, TMFA) ->
+ io_lib:format("~s calls deprecated function ~s", [SMFA, TMFA]);
+generate_comment_text(deprecated_functions, SMFA, _TMFA) ->
+ io_lib:format("~s is deprecated", [SMFA]).
diff --git a/deps/rabbit_common/src/app_utils.erl b/deps/rabbit_common/src/app_utils.erl
new file mode 100644
index 0000000000..df965575be
--- /dev/null
+++ b/deps/rabbit_common/src/app_utils.erl
@@ -0,0 +1,167 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(app_utils).
+
+-export([load_applications/1,
+ start_applications/1, start_applications/2, start_applications/3,
+ stop_applications/1, stop_applications/2, app_dependency_order/2,
+ app_dependencies/1]).
+
+-type error_handler() :: fun((atom(), any()) -> 'ok' | no_return()).
+-type restart_type() :: 'permanent' | 'transient' | 'temporary'.
+
+-spec load_applications([atom()]) -> 'ok'.
+-spec start_applications([atom()]) -> 'ok'.
+-spec stop_applications([atom()]) -> 'ok'.
+-spec start_applications([atom()], error_handler()) -> 'ok'.
+-spec start_applications([atom()], error_handler(), #{atom() => restart_type()}) -> 'ok'.
+-spec stop_applications([atom()], error_handler()) -> 'ok'.
+-spec app_dependency_order([atom()], boolean()) -> [digraph:vertex()].
+-spec app_dependencies(atom()) -> [atom()].
+-spec failed_to_start_app(atom(), any()) -> no_return().
+-spec failed_to_stop_app(atom(), any()) -> no_return().
+
+%%---------------------------------------------------------------------------
+%% Public API
+
+load_applications(Apps) ->
+ load_applications(queue:from_list(Apps), sets:new()),
+ ok.
+
+start_applications(Apps) ->
+ start_applications(
+ Apps, fun failed_to_start_app/2).
+
+stop_applications(Apps) ->
+ stop_applications(
+ Apps, fun failed_to_stop_app/2).
+
+failed_to_start_app(App, Reason) ->
+ throw({error, {cannot_start_application, App, Reason}}).
+
+failed_to_stop_app(App, Reason) ->
+ throw({error, {cannot_stop_application, App, Reason}}).
+
+start_applications(Apps, ErrorHandler) ->
+ start_applications(Apps, ErrorHandler, #{}).
+
+start_applications(Apps, ErrorHandler, RestartTypes) ->
+ manage_applications(fun lists:foldl/3,
+ fun(App) -> ensure_all_started(App, RestartTypes) end,
+ fun application:stop/1,
+ already_started,
+ ErrorHandler,
+ Apps).
+
+stop_applications(Apps, ErrorHandler) ->
+ manage_applications(fun lists:foldr/3,
+ fun(App) ->
+ rabbit_log:info("Stopping application '~s'", [App]),
+ application:stop(App)
+ end,
+ fun(App) -> ensure_all_started(App, #{}) end,
+ not_started,
+ ErrorHandler,
+ Apps).
+
+app_dependency_order(RootApps, StripUnreachable) ->
+ {ok, G} = rabbit_misc:build_acyclic_graph(
+ fun ({App, _Deps}) -> [{App, App}] end,
+ fun ({App, Deps}) -> [{Dep, App} || Dep <- Deps] end,
+ [{App, app_dependencies(App)} ||
+ {App, _Desc, _Vsn} <- application:loaded_applications()]),
+ try
+ case StripUnreachable of
+ true -> digraph:del_vertices(G, digraph:vertices(G) --
+ digraph_utils:reachable(RootApps, G));
+ false -> ok
+ end,
+ digraph_utils:topsort(G)
+ after
+ true = digraph:delete(G)
+ end.
+
+%%---------------------------------------------------------------------------
+%% Private API
+
+load_applications(Worklist, Loaded) ->
+ case queue:out(Worklist) of
+ {empty, _WorkList} ->
+ ok;
+ {{value, App}, Worklist1} ->
+ case sets:is_element(App, Loaded) of
+ true -> load_applications(Worklist1, Loaded);
+ false -> case application:load(App) of
+ ok -> ok;
+ {error, {already_loaded, App}} -> ok;
+ Error -> throw(Error)
+ end,
+ load_applications(
+ queue:join(Worklist1,
+ queue:from_list(app_dependencies(App))),
+ sets:add_element(App, Loaded))
+ end
+ end.
+
+app_dependencies(App) ->
+ case application:get_key(App, applications) of
+ undefined -> [];
+ {ok, Lst} -> Lst
+ end.
+
+manage_applications(Iterate, Do, Undo, SkipError, ErrorHandler, Apps) ->
+ Iterate(fun (App, Acc) ->
+ case Do(App) of
+ ok -> [App | Acc];
+ {ok, []} -> Acc;
+ {ok, [App]} -> [App | Acc];
+ {ok, StartedApps} -> StartedApps ++ Acc;
+ {error, {SkipError, _}} -> Acc;
+ {error, Reason} ->
+ lists:foreach(Undo, Acc),
+ ErrorHandler(App, Reason)
+ end
+ end, [], Apps),
+ ok.
+
+%% Stops the Erlang VM when the rabbit application stops abnormally
+%% i.e. message store reaches its restart limit
+default_restart_type(rabbit) -> transient;
+default_restart_type(_) -> temporary.
+
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%%
+%% Code originally from Erlang/OTP source lib/kernel/src/application.erl
+%% and modified to use RestartTypes map
+%%
+ensure_all_started(Application, RestartTypes) ->
+ case ensure_all_started(Application, RestartTypes, []) of
+ {ok, Started} ->
+ {ok, lists:reverse(Started)};
+ {error, Reason, Started} ->
+ _ = [application:stop(App) || App <- Started],
+ {error, Reason}
+ end.
+
+ensure_all_started(Application, RestartTypes, Started) ->
+ RestartType = maps:get(Application, RestartTypes, default_restart_type(Application)),
+ case application:start(Application, RestartType) of
+ ok ->
+ {ok, [Application | Started]};
+ {error, {already_started, Application}} ->
+ {ok, Started};
+ {error, {not_started, Dependency}} ->
+ case ensure_all_started(Dependency, RestartTypes, Started) of
+ {ok, NewStarted} ->
+ ensure_all_started(Application, RestartTypes, NewStarted);
+ Error ->
+ Error
+ end;
+ {error, Reason} ->
+ {error, {Application, Reason}, Started}
+ end.
diff --git a/deps/rabbit_common/src/code_version.erl b/deps/rabbit_common/src/code_version.erl
new file mode 100644
index 0000000000..76e9c75c7f
--- /dev/null
+++ b/deps/rabbit_common/src/code_version.erl
@@ -0,0 +1,348 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(code_version).
+
+-export([update/1, get_otp_version/0]).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+%%----------------------------------------------------------------------------
+%% @doc Reads the abstract code of the given `Module`, modifies it to adapt to
+%% the current Erlang version, compiles and loads the result.
+%% This function finds the current Erlang version and then selects the function
+%% call for that version, removing all other versions declared in the original
+%% beam file. `code_version:update/1` is triggered by the module itself the
+%% first time an affected function is called.
+%%
+%% The purpose of this functionality is to support the new time API introduced
+%% in ERTS 7.0, while providing compatibility with previous versions.
+%%
+%% `Module` must contain an attribute `erlang_version_support` containing a list of
+%% tuples:
+%%
+%% {ErlangVersion, [{OriginalFunction, Arity, PreErlangVersionFunction,
+%% PostErlangVersionFunction}]}
+%%
+%% All these new functions may be exported, and implemented as follows:
+%%
+%% OriginalFunction() ->
+%% code_version:update(?MODULE),
+%% ?MODULE:OriginalFunction().
+%%
+%% PostErlangVersionFunction() ->
+%% %% implementation using new time API
+%% ..
+%%
+%% PreErlangVersionFunction() ->
+%% %% implementation using fallback solution
+%% ..
+%%
+%% CAUTION: Make sure that all functions in the module are patched this
+%% way! If you have "regular" functions, you might hit a race condition
+%% between the unload of the old module and the load of the patched
+%% module. If all functions are patched, loading will be serialized,
+%% thanks to a lock acquired by `code_version`. However, if you have
+%% regular functions, any call to them will bypass that lock and the old
+%% code will be reloaded from disk. This will kill the process trying to
+%% patch the module.
+%%
+%% end
+%%----------------------------------------------------------------------------
+-spec update(atom()) -> ok | no_return().
+update(Module) ->
+ AbsCode = get_abs_code(Module),
+ Forms = replace_forms(Module, get_otp_version(), AbsCode),
+ Code = compile_forms(Forms),
+ load_code(Module, Code).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+load_code(Module, Code) ->
+ LockId = {{?MODULE, Module}, self()},
+ FakeFilename = "Loaded by rabbit_common",
+ global:set_lock(LockId, [node()]),
+ case code:which(Module) of
+ FakeFilename ->
+ ok;
+ _ ->
+ unload(Module),
+ case code:load_binary(Module, FakeFilename, Code) of
+ {module, _} -> ok;
+ {error, Reason} -> throw({cannot_load, Module, Reason})
+ end
+ end,
+ global:del_lock(LockId, [node()]),
+ ok.
+
+unload(Module) ->
+ code:soft_purge(Module),
+ code:delete(Module).
+
+compile_forms(Forms) ->
+ case compile:forms(Forms, [debug_info, return_errors]) of
+ {ok, _ModName, Code} ->
+ Code;
+ {ok, _ModName, Code, _Warnings} ->
+ Code;
+ Error ->
+ throw({cannot_compile_forms, Error})
+ end.
+
+get_abs_code(Module) ->
+ get_forms(get_object_code(Module)).
+
+get_object_code(Module) ->
+ case code:get_object_code(Module) of
+ {_Mod, Code, _File} ->
+ Code;
+ error ->
+ throw({not_found, Module})
+ end.
+
+get_forms(Code) ->
+ case beam_lib:chunks(Code, [abstract_code]) of
+ {ok, {_, [{abstract_code, {raw_abstract_v1, Forms}}]}} ->
+ Forms;
+ {ok, {Module, [{abstract_code, no_abstract_code}]}} ->
+ throw({no_abstract_code, Module});
+ {error, beam_lib, Reason} ->
+ throw({no_abstract_code, Reason})
+ end.
+
+get_otp_version() ->
+ Version = erlang:system_info(otp_release),
+ case re:run(Version, "^[0-9][0-9]", [{capture, first, list}]) of
+ {match, [V]} ->
+ list_to_integer(V);
+ _ ->
+ %% Could be anything below R17, we are not interested
+ 0
+ end.
+
+get_original_pairs(VersionSupport) ->
+ [{Orig, Arity} || {Orig, Arity, _Pre, _Post} <- VersionSupport].
+
+get_delete_pairs(true, VersionSupport) ->
+ [{Pre, Arity} || {_Orig, Arity, Pre, _Post} <- VersionSupport];
+get_delete_pairs(false, VersionSupport) ->
+ [{Post, Arity} || {_Orig, Arity, _Pre, Post} <- VersionSupport].
+
+get_rename_pairs(true, VersionSupport) ->
+ [{Post, Arity} || {_Orig, Arity, _Pre, Post} <- VersionSupport];
+get_rename_pairs(false, VersionSupport) ->
+ [{Pre, Arity} || {_Orig, Arity, Pre, _Post} <- VersionSupport].
+
+%% Pairs of {Renamed, OriginalName} functions
+get_name_pairs(true, VersionSupport) ->
+ [{{Post, Arity}, Orig} || {Orig, Arity, _Pre, Post} <- VersionSupport];
+get_name_pairs(false, VersionSupport) ->
+ [{{Pre, Arity}, Orig} || {Orig, Arity, Pre, _Post} <- VersionSupport].
+
+delete_abstract_functions(ToDelete) ->
+ fun(Tree, Function) ->
+ case lists:member(Function, ToDelete) of
+ true ->
+ erl_syntax:comment(["Deleted unused function"]);
+ false ->
+ Tree
+ end
+ end.
+
+rename_abstract_functions(ToRename, ToName) ->
+ fun(Tree, Function) ->
+ case lists:member(Function, ToRename) of
+ true ->
+ FunctionName = proplists:get_value(Function, ToName),
+ erl_syntax:function(
+ erl_syntax:atom(FunctionName),
+ erl_syntax:function_clauses(Tree));
+ false ->
+ Tree
+ end
+ end.
+
+replace_forms(Module, ErlangVersion, AbsCode) ->
+ %% Obtain attribute containing the list of functions that must be updated
+ Attr = Module:module_info(attributes),
+ VersionSupport = proplists:get_value(erlang_version_support, Attr),
+ {Pre, Post} = lists:splitwith(fun({Version, _Pairs}) ->
+ Version > ErlangVersion
+ end, VersionSupport),
+ %% Replace functions in two passes: replace for Erlang versions > current
+ %% first, Erlang versions =< current afterwards.
+ replace_version_forms(
+ true, replace_version_forms(false, AbsCode, get_version_functions(Pre)),
+ get_version_functions(Post)).
+
+get_version_functions(List) ->
+ lists:append([Pairs || {_Version, Pairs} <- List]).
+
+replace_version_forms(IsPost, AbsCode, VersionSupport) ->
+ %% Get pairs of {Function, Arity} for the triggering functions, which
+ %% are also the final function names.
+ Original = get_original_pairs(VersionSupport),
+ %% Get pairs of {Function, Arity} for the unused version
+ ToDelete = get_delete_pairs(IsPost, VersionSupport),
+ %% Delete original functions (those that trigger the code update) and
+ %% the unused version ones
+ DeleteFun = delete_abstract_functions(ToDelete ++ Original),
+ AbsCode0 = replace_function_forms(AbsCode, DeleteFun),
+ %% Get pairs of {Function, Arity} for the current version which must be
+ %% renamed
+ ToRename = get_rename_pairs(IsPost, VersionSupport),
+ %% Get paris of {Renamed, OriginalName} functions
+ ToName = get_name_pairs(IsPost, VersionSupport),
+ %% Rename versioned functions with their final name
+ RenameFun = rename_abstract_functions(ToRename, ToName),
+ AbsCode1 = replace_function_forms(AbsCode0, RenameFun),
+ %% Adjust `-dialyzer` attribute.
+ AbsCode2 = fix_dialyzer_attribute(AbsCode1, ToDelete, ToName),
+ %% Remove exports of all versioned functions
+ remove_exports(AbsCode2, ToDelete ++ ToRename).
+
+replace_function_forms(AbsCode, Fun) ->
+ ReplaceFunction =
+ fun(Tree) ->
+ Function = erl_syntax_lib:analyze_function(Tree),
+ Fun(Tree, Function)
+ end,
+ Filter = fun(Tree) ->
+ case erl_syntax:type(Tree) of
+ function -> ReplaceFunction(Tree);
+ _Other -> Tree
+ end
+ end,
+ fold_syntax_tree(Filter, AbsCode).
+
+fix_dialyzer_attribute(AbsCode, ToDelete, ToName) ->
+ FixDialyzer =
+ fun(Tree) ->
+ case erl_syntax_lib:analyze_attribute(Tree) of
+ {dialyzer, {_, Value}} ->
+ FixedValue = fix_dialyzer_attribute_value(Value,
+ ToDelete,
+ ToName),
+ rebuild_dialyzer({dialyzer, FixedValue});
+ _ ->
+ Tree
+ end
+ end,
+ Filter = fun(Tree) ->
+ case erl_syntax:type(Tree) of
+ attribute -> FixDialyzer(Tree);
+ _ -> Tree
+ end
+ end,
+ fold_syntax_tree(Filter, AbsCode).
+
+fix_dialyzer_attribute_value(Info, ToDelete, ToName)
+ when is_list(Info) ->
+ lists:map(
+ fun(I) ->
+ fix_dialyzer_attribute_value(I, ToDelete, ToName)
+ end,
+ Info);
+fix_dialyzer_attribute_value({Warn, FunList}, ToDelete, ToName) ->
+ FixedFunList = fix_dialyzer_attribute_funlist(FunList, ToDelete, ToName),
+ {Warn, FixedFunList};
+fix_dialyzer_attribute_value(Info, _, _)
+ when is_atom(Info) ->
+ Info.
+
+fix_dialyzer_attribute_funlist(FunList, ToDelete, ToName)
+ when is_list(FunList) ->
+ lists:filtermap(
+ fun(I) ->
+ case fix_dialyzer_attribute_funlist(I, ToDelete, ToName) of
+ [] -> false;
+ R -> {true, R}
+ end
+ end,
+ FunList);
+fix_dialyzer_attribute_funlist({FunName, Arity} = Fun,
+ ToDelete, ToName)
+ when is_atom(FunName) andalso is_integer(Arity) andalso Arity >= 0 ->
+ remove_or_rename(Fun, ToDelete, ToName);
+fix_dialyzer_attribute_funlist(FunList, _, _) ->
+ FunList.
+
+remove_or_rename(Fun, ToDelete, ToName) ->
+ case lists:member(Fun, ToDelete) of
+ true ->
+ [];
+ false ->
+ case proplists:get_value(Fun, ToName) of
+ undefined -> Fun;
+ NewName -> setelement(1, Fun, NewName)
+ end
+ end.
+
+rebuild_dialyzer({dialyzer, Value}) ->
+ erl_syntax:attribute(
+ erl_syntax:atom(dialyzer),
+ [rebuild_dialyzer_value(Value)]).
+
+rebuild_dialyzer_value(Value) when is_list(Value) ->
+ erl_syntax:list(
+ [rebuild_dialyzer_value(V) || V <- Value]);
+rebuild_dialyzer_value({Warn, FunList}) ->
+ erl_syntax:tuple(
+ [rebuild_dialyzer_warn(Warn),
+ rebuild_dialyzer_funlist(FunList)]);
+rebuild_dialyzer_value(Warn) when is_atom(Warn) ->
+ rebuild_dialyzer_warn(Warn).
+
+rebuild_dialyzer_warn(Warn) when is_list(Warn) ->
+ erl_syntax:list(
+ [rebuild_dialyzer_warn(W) || W <- Warn]);
+rebuild_dialyzer_warn(Warn) when is_atom(Warn) ->
+ erl_syntax:atom(Warn).
+
+rebuild_dialyzer_funlist(FunList) when is_list(FunList) ->
+ erl_syntax:list(
+ [rebuild_dialyzer_funlist({N, A}) || {N, A} <- FunList]);
+rebuild_dialyzer_funlist({FunName, Arity}) ->
+ erl_syntax:tuple([erl_syntax:atom(FunName), erl_syntax:integer(Arity)]).
+
+filter_export_pairs(Info, ToDelete) ->
+ lists:filter(fun(Pair) ->
+ not lists:member(Pair, ToDelete)
+ end, Info).
+
+remove_exports(AbsCode, ToDelete) ->
+ RemoveExports =
+ fun(Tree) ->
+ case erl_syntax_lib:analyze_attribute(Tree) of
+ {export, Info} ->
+ Remaining = filter_export_pairs(Info, ToDelete),
+ rebuild_export(Remaining);
+ _Other -> Tree
+ end
+ end,
+ Filter = fun(Tree) ->
+ case erl_syntax:type(Tree) of
+ attribute -> RemoveExports(Tree);
+ _Other -> Tree
+ end
+ end,
+ fold_syntax_tree(Filter, AbsCode).
+
+rebuild_export(Args) ->
+ erl_syntax:attribute(
+ erl_syntax:atom(export),
+ [erl_syntax:list(
+ [erl_syntax:arity_qualifier(erl_syntax:atom(N),
+ erl_syntax:integer(A))
+ || {N, A} <- Args])]).
+
+fold_syntax_tree(Filter, Forms) ->
+ Tree = erl_syntax:form_list(Forms),
+ NewTree = erl_syntax_lib:map(Filter, Tree),
+ erl_syntax:revert_forms(NewTree).
diff --git a/deps/rabbit_common/src/credit_flow.erl b/deps/rabbit_common/src/credit_flow.erl
new file mode 100644
index 0000000000..da1d9606c1
--- /dev/null
+++ b/deps/rabbit_common/src/credit_flow.erl
@@ -0,0 +1,210 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(credit_flow).
+
+%% Credit flow is controlled by a credit specification - a
+%% {InitialCredit, MoreCreditAfter} tuple. For the message sender,
+%% credit starts at InitialCredit and is decremented with every
+%% message sent. The message receiver grants more credit to the sender
+%% by sending it a {bump_credit, ...} control message after receiving
+%% MoreCreditAfter messages. The sender should pass this message in to
+%% handle_bump_msg/1. The sender should block when it goes below 0
+%% (check by invoking blocked/0). If a process is both a sender and a
+%% receiver it will not grant any more credit to its senders when it
+%% is itself blocked - thus the only processes that need to check
+%% blocked/0 are ones that read from network sockets.
+%%
+%% Credit flows left to right when process send messages down the
+%% chain, starting at the rabbit_reader, ending at the msg_store:
+%% reader -> channel -> queue_process -> msg_store.
+%%
+%% If the message store has a back log, then it will block the
+%% queue_process, which will block the channel, and finally the reader
+%% will be blocked, throttling down publishers.
+%%
+%% Once a process is unblocked, it will grant credits up the chain,
+%% possibly unblocking other processes:
+%% reader <--grant channel <--grant queue_process <--grant msg_store.
+%%
+%% Grepping the project files for `credit_flow` will reveal the places
+%% where this module is currently used, with extra comments on what's
+%% going on at each instance. Note that credit flow between mirrors
+%% synchronization has not been documented, since this doesn't affect
+%% client publishes.
+
+-define(DEFAULT_INITIAL_CREDIT, 200).
+-define(DEFAULT_MORE_CREDIT_AFTER, 100).
+
+-define(DEFAULT_CREDIT,
+ case get(credit_flow_default_credit) of
+ undefined ->
+ Val = rabbit_misc:get_env(rabbit, credit_flow_default_credit,
+ {?DEFAULT_INITIAL_CREDIT,
+ ?DEFAULT_MORE_CREDIT_AFTER}),
+ put(credit_flow_default_credit, Val),
+ Val;
+ Val -> Val
+ end).
+
+-export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0]).
+-export([peer_down/1]).
+-export([block/1, unblock/1]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([bump_msg/0]).
+
+-opaque(bump_msg() :: {pid(), non_neg_integer()}).
+-type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
+
+-spec send
+ (pid()) -> 'ok';
+ (credit_spec()) -> 'ok'.
+-spec ack(pid()) -> 'ok'.
+-spec ack(pid(), credit_spec()) -> 'ok'.
+-spec handle_bump_msg(bump_msg()) -> 'ok'.
+-spec blocked() -> boolean().
+-spec peer_down(pid()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+%% process dict update macro - eliminates the performance-hurting
+%% closure creation a HOF would introduce
+-define(UPDATE(Key, Default, Var, Expr),
+ begin
+ %% We deliberately allow Var to escape from the case here
+ %% to be used in Expr. Any temporary var we introduced
+ %% would also escape, and might conflict.
+ Var = case get(Key) of
+ undefined -> Default;
+ V -> V
+ end,
+ put(Key, Expr)
+ end).
+
+%% If current process was blocked by credit flow in the last
+%% STATE_CHANGE_INTERVAL milliseconds, state/0 will report it as "in
+%% flow".
+-define(STATE_CHANGE_INTERVAL, 1000000).
+
+-ifdef(CREDIT_FLOW_TRACING).
+-define(TRACE_BLOCKED(SELF, FROM), rabbit_event:notify(credit_flow_blocked,
+ [{process, SELF},
+ {process_info, erlang:process_info(SELF)},
+ {from, FROM},
+ {from_info, erlang:process_info(FROM)},
+ {timestamp,
+ os:system_time(
+ milliseconds)}])).
+-define(TRACE_UNBLOCKED(SELF, FROM), rabbit_event:notify(credit_flow_unblocked,
+ [{process, SELF},
+ {from, FROM},
+ {timestamp,
+ os:system_time(
+ milliseconds)}])).
+-else.
+-define(TRACE_BLOCKED(SELF, FROM), ok).
+-define(TRACE_UNBLOCKED(SELF, FROM), ok).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% There are two "flows" here; of messages and of credit, going in
+%% opposite directions. The variable names "From" and "To" refer to
+%% the flow of credit, but the function names refer to the flow of
+%% messages. This is the clearest I can make it (since the function
+%% names form the API and want to make sense externally, while the
+%% variable names are used in credit bookkeeping and want to make
+%% sense internally).
+
+%% For any given pair of processes, ack/2 and send/2 must always be
+%% called with the same credit_spec().
+
+send(From) -> send(From, ?DEFAULT_CREDIT).
+
+send(From, {InitialCredit, _MoreCreditAfter}) ->
+ ?UPDATE({credit_from, From}, InitialCredit, C,
+ if C == 1 -> block(From),
+ 0;
+ true -> C - 1
+ end).
+
+ack(To) -> ack(To, ?DEFAULT_CREDIT).
+
+ack(To, {_InitialCredit, MoreCreditAfter}) ->
+ ?UPDATE({credit_to, To}, MoreCreditAfter, C,
+ if C == 1 -> grant(To, MoreCreditAfter),
+ MoreCreditAfter;
+ true -> C - 1
+ end).
+
+handle_bump_msg({From, MoreCredit}) ->
+ ?UPDATE({credit_from, From}, 0, C,
+ if C =< 0 andalso C + MoreCredit > 0 -> unblock(From),
+ C + MoreCredit;
+ true -> C + MoreCredit
+ end).
+
+blocked() -> case get(credit_blocked) of
+ undefined -> false;
+ [] -> false;
+ _ -> true
+ end.
+
+state() -> case blocked() of
+ true -> flow;
+ false -> case get(credit_blocked_at) of
+ undefined -> running;
+ B -> Now = erlang:monotonic_time(),
+ Diff = erlang:convert_time_unit(Now - B,
+ native,
+ micro_seconds),
+ case Diff < ?STATE_CHANGE_INTERVAL of
+ true -> flow;
+ false -> running
+ end
+ end
+ end.
+
+peer_down(Peer) ->
+ %% In theory we could also remove it from credit_deferred here, but it
+ %% doesn't really matter; at some point later we will drain
+ %% credit_deferred and thus send messages into the void...
+ unblock(Peer),
+ erase({credit_from, Peer}),
+ erase({credit_to, Peer}),
+ ok.
+
+%% --------------------------------------------------------------------------
+
+grant(To, Quantity) ->
+ Msg = {bump_credit, {self(), Quantity}},
+ case blocked() of
+ false -> To ! Msg;
+ true -> ?UPDATE(credit_deferred, [], Deferred, [{To, Msg} | Deferred])
+ end.
+
+block(From) ->
+ ?TRACE_BLOCKED(self(), From),
+ case blocked() of
+ false -> put(credit_blocked_at, erlang:monotonic_time());
+ true -> ok
+ end,
+ ?UPDATE(credit_blocked, [], Blocks, [From | Blocks]).
+
+unblock(From) ->
+ ?TRACE_UNBLOCKED(self(), From),
+ ?UPDATE(credit_blocked, [], Blocks, Blocks -- [From]),
+ case blocked() of
+ false -> case erase(credit_deferred) of
+ undefined -> ok;
+ Credits -> _ = [To ! Msg || {To, Msg} <- Credits],
+ ok
+ end;
+ true -> ok
+ end.
diff --git a/deps/rabbit_common/src/delegate.erl b/deps/rabbit_common/src/delegate.erl
new file mode 100644
index 0000000000..a73d5e64b1
--- /dev/null
+++ b/deps/rabbit_common/src/delegate.erl
@@ -0,0 +1,277 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(delegate).
+
+%% delegate is an alternative way of doing remote calls. Compared to
+%% the rpc module, it reduces inter-node communication. For example,
+%% if a message is routed to 1,000 queues on node A and needs to be
+%% propagated to nodes B and C, it would be nice to avoid doing 2,000
+%% remote casts to queue processes.
+%%
+%% An important issue here is preserving order - we need to make sure
+%% that messages from a certain channel to a certain queue take a
+%% consistent route, to prevent them being reordered. In fact all
+%% AMQP-ish things (such as queue declaration results and basic.get)
+%% must take the same route as well, to ensure that clients see causal
+%% ordering correctly. Therefore we have a rather generic mechanism
+%% here rather than just a message-reflector. That's also why we pick
+%% the delegate process to use based on a hash of the source pid.
+%%
+%% When a function is invoked using delegate:invoke/2,
+%% or delegate:invoke_no_result/2 on a group of pids, the pids are first split
+%% into local and remote ones. Remote processes are then grouped by
+%% node. The function is then invoked locally and on every node (using
+%% gen_server2:multi/4) as many times as there are processes on that
+%% node, sequentially.
+%%
+%% Errors returned when executing functions on remote nodes are re-raised
+%% in the caller.
+%%
+%% RabbitMQ starts a pool of delegate processes on boot. The size of
+%% the pool is configurable, the aim is to make sure we don't have too
+%% few delegates and thus limit performance on many-CPU machines.
+
+-behaviour(gen_server2).
+
+-export([start_link/1, start_link/2, invoke_no_result/2,
+ invoke/2, invoke/3, monitor/2, monitor/3, demonitor/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {node, monitors, name}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([monitor_ref/0]).
+
+-type monitor_ref() :: reference() | {atom(), pid()}.
+-type fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}.
+
+-spec start_link
+ (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}.
+-spec invoke
+ ( pid(), fun_or_mfa(A)) -> A;
+ ([pid()], fun_or_mfa(A)) -> {[{pid(), A}], [{pid(), term()}]}.
+-spec invoke_no_result(pid() | [pid()], fun_or_mfa(any())) -> 'ok'.
+-spec monitor('process', pid()) -> monitor_ref().
+-spec demonitor(monitor_ref()) -> 'true'.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+-define(DEFAULT_NAME, "delegate_").
+
+%%----------------------------------------------------------------------------
+
+start_link(Num) ->
+ start_link(?DEFAULT_NAME, Num).
+
+start_link(Name, Num) ->
+ Name1 = delegate_name(Name, Num),
+ gen_server2:start_link({local, Name1}, ?MODULE, [Name1], []).
+
+invoke(Pid, FunOrMFA) ->
+ invoke(Pid, ?DEFAULT_NAME, FunOrMFA).
+
+invoke(Pid, _Name, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
+ apply1(FunOrMFA, Pid);
+invoke(Pid, Name, FunOrMFA) when is_pid(Pid) ->
+ case invoke([Pid], Name, FunOrMFA) of
+ {[{Pid, Result}], []} ->
+ Result;
+ {[], [{Pid, {Class, Reason, StackTrace}}]} ->
+ erlang:raise(Class, Reason, StackTrace)
+ end;
+
+invoke([], _Name, _FunOrMFA) -> %% optimisation
+ {[], []};
+invoke([Pid], _Name, FunOrMFA) when node(Pid) =:= node() -> %% optimisation
+ case safe_invoke(Pid, FunOrMFA) of
+ {ok, _, Result} -> {[{Pid, Result}], []};
+ {error, _, Error} -> {[], [{Pid, Error}]}
+ end;
+invoke(Pids, Name, FunOrMFA) when is_list(Pids) ->
+ {LocalPids, Grouped} = group_pids_by_node(Pids),
+ %% The use of multi_call is only safe because the timeout is
+ %% infinity, and thus there is no process spawned in order to do
+ %% the sending. Thus calls can't overtake preceding calls/casts.
+ {Replies, BadNodes} =
+ case maps:keys(Grouped) of
+ [] -> {[], []};
+ RemoteNodes -> gen_server2:multi_call(
+ RemoteNodes, delegate(self(), Name, RemoteNodes),
+ {invoke, FunOrMFA, Grouped}, infinity)
+ end,
+ BadPids = [{Pid, {exit, {nodedown, BadNode}, []}} ||
+ BadNode <- BadNodes,
+ Pid <- maps:get(BadNode, Grouped)],
+ ResultsNoNode = lists:append([safe_invoke(LocalPids, FunOrMFA) |
+ [Results || {_Node, Results} <- Replies]]),
+ lists:foldl(
+ fun ({ok, Pid, Result}, {Good, Bad}) -> {[{Pid, Result} | Good], Bad};
+ ({error, Pid, Error}, {Good, Bad}) -> {Good, [{Pid, Error} | Bad]}
+ end, {[], BadPids}, ResultsNoNode).
+
+monitor(process, Pid) ->
+ ?MODULE:monitor(process, Pid, ?DEFAULT_NAME).
+
+monitor(process, Pid, _Prefix) when node(Pid) =:= node() ->
+ erlang:monitor(process, Pid);
+monitor(process, Pid, Prefix) ->
+ Name = delegate(Pid, Prefix, [node(Pid)]),
+ gen_server2:cast(Name, {monitor, self(), Pid}),
+ {Name, Pid}.
+
+demonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref);
+demonitor({Name, Pid}) ->
+ gen_server2:cast(Name, {demonitor, self(), Pid}).
+
+invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
+ %% Optimization, avoids calling invoke_no_result/3.
+ %%
+ %% This may seem like a cosmetic change at first but it actually massively reduces the memory usage in mirrored
+ %% queues when ack/nack are sent to the node that hosts a mirror.
+ %% This way binary references are not kept around unnecessarily.
+ %%
+ %% See https://github.com/rabbitmq/rabbitmq-common/issues/208#issuecomment-311308583 for a before/after
+ %% comparison.
+ _ = safe_invoke(Pid, FunOrMFA), %% we don't care about any error
+ ok;
+invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) ->
+ %% Optimization, avoids calling invoke_no_result/3
+ RemoteNode = node(Pid),
+ gen_server2:abcast([RemoteNode], delegate(self(), ?DEFAULT_NAME, [RemoteNode]),
+ {invoke, FunOrMFA,
+ maps:from_list([{RemoteNode, [Pid]}])}),
+ ok;
+invoke_no_result([], _FunOrMFA) -> %% optimisation
+ ok;
+invoke_no_result([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation
+ _ = safe_invoke(Pid, FunOrMFA), %% must not die
+ ok;
+invoke_no_result([Pid], FunOrMFA) ->
+ RemoteNode = node(Pid),
+ gen_server2:abcast([RemoteNode], delegate(self(), ?DEFAULT_NAME, [RemoteNode]),
+ {invoke, FunOrMFA,
+ maps:from_list([{RemoteNode, [Pid]}])}),
+ ok;
+invoke_no_result(Pids, FunOrMFA) when is_list(Pids) ->
+ {LocalPids, Grouped} = group_pids_by_node(Pids),
+ case maps:keys(Grouped) of
+ [] -> ok;
+ RemoteNodes -> gen_server2:abcast(
+ RemoteNodes, delegate(self(), ?DEFAULT_NAME, RemoteNodes),
+ {invoke, FunOrMFA, Grouped})
+ end,
+ _ = safe_invoke(LocalPids, FunOrMFA), %% must not die
+ ok.
+
+%%----------------------------------------------------------------------------
+
+group_pids_by_node(Pids) ->
+ LocalNode = node(),
+ lists:foldl(
+ fun (Pid, {Local, Remote}) when node(Pid) =:= LocalNode ->
+ {[Pid | Local], Remote};
+ (Pid, {Local, Remote}) ->
+ {Local,
+ maps:update_with(
+ node(Pid), fun (List) -> [Pid | List] end, [Pid], Remote)}
+ end, {[], maps:new()}, Pids).
+
+delegate_name(Name, Hash) ->
+ list_to_atom(Name ++ integer_to_list(Hash)).
+
+delegate(Pid, Prefix, RemoteNodes) ->
+ case get(delegate) of
+ undefined -> Name = delegate_name(Prefix,
+ erlang:phash2(Pid,
+ delegate_sup:count(RemoteNodes, Prefix))),
+ put(delegate, Name),
+ Name;
+ Name -> Name
+ end.
+
+safe_invoke(Pids, FunOrMFA) when is_list(Pids) ->
+ [safe_invoke(Pid, FunOrMFA) || Pid <- Pids];
+safe_invoke(Pid, FunOrMFA) when is_pid(Pid) ->
+ try
+ {ok, Pid, apply1(FunOrMFA, Pid)}
+ catch Class:Reason:Stacktrace ->
+ {error, Pid, {Class, Reason, Stacktrace}}
+ end.
+
+apply1({M, F, A}, Arg) -> apply(M, F, [Arg | A]);
+apply1(Fun, Arg) -> Fun(Arg).
+
+%%----------------------------------------------------------------------------
+
+init([Name]) ->
+ {ok, #state{node = node(), monitors = dict:new(), name = Name}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({invoke, FunOrMFA, Grouped}, _From, State = #state{node = Node}) ->
+ {reply, safe_invoke(maps:get(Node, Grouped), FunOrMFA), State,
+ hibernate}.
+
+handle_cast({monitor, MonitoringPid, Pid},
+ State = #state{monitors = Monitors}) ->
+ Monitors1 = case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Pids1 = gb_sets:add_element(MonitoringPid, Pids),
+ dict:store(Pid, {Ref, Pids1}, Monitors);
+ error ->
+ Ref = erlang:monitor(process, Pid),
+ Pids = gb_sets:singleton(MonitoringPid),
+ dict:store(Pid, {Ref, Pids}, Monitors)
+ end,
+ {noreply, State#state{monitors = Monitors1}, hibernate};
+
+handle_cast({demonitor, MonitoringPid, Pid},
+ State = #state{monitors = Monitors}) ->
+ Monitors1 = case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Pids1 = gb_sets:del_element(MonitoringPid, Pids),
+ case gb_sets:is_empty(Pids1) of
+ true -> erlang:demonitor(Ref),
+ dict:erase(Pid, Monitors);
+ false -> dict:store(Pid, {Ref, Pids1}, Monitors)
+ end;
+ error ->
+ Monitors
+ end,
+ {noreply, State#state{monitors = Monitors1}, hibernate};
+
+handle_cast({invoke, FunOrMFA, Grouped}, State = #state{node = Node}) ->
+ _ = safe_invoke(maps:get(Node, Grouped), FunOrMFA),
+ {noreply, State, hibernate}.
+
+handle_info({'DOWN', Ref, process, Pid, Info},
+ State = #state{monitors = Monitors, name = Name}) ->
+ {noreply,
+ case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Msg = {'DOWN', {Name, Pid}, process, Pid, Info},
+ gb_sets:fold(fun (MonitoringPid, _) -> MonitoringPid ! Msg end,
+ none, Pids),
+ State#state{monitors = dict:erase(Pid, Monitors)};
+ error ->
+ State
+ end, hibernate};
+
+handle_info(_Info, State) ->
+ {noreply, State, hibernate}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/delegate_sup.erl b/deps/rabbit_common/src/delegate_sup.erl
new file mode 100644
index 0000000000..b92e1eaa46
--- /dev/null
+++ b/deps/rabbit_common/src/delegate_sup.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(delegate_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/1, start_link/2, count/1, count/2, sup_name/1]).
+
+-export([init/1]).
+
+-define(SERVER, "delegate_").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(integer()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(integer(), string()) -> rabbit_types:ok_pid_or_error().
+-spec count([node()]) -> integer().
+
+%%----------------------------------------------------------------------------
+
+sup_name(Prefix) ->
+ list_to_atom(Prefix ++ "sup").
+
+start_link(Count, Prefix) ->
+ supervisor:start_link({local, sup_name(Prefix)}, ?MODULE, [Count, Prefix]).
+start_link(Count) ->
+ start_link(Count, ?SERVER).
+
+count(Nodes) ->
+ count(Nodes, ?SERVER).
+
+count([], _) ->
+ 1;
+count([Node | Nodes], Prefix) ->
+ try
+ length(supervisor:which_children({sup_name(Prefix), Node}))
+ catch exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ count(Nodes, Prefix);
+ exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown;
+ R =:= nodedown ->
+ count(Nodes, Prefix)
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([Count, Name]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{Num, {delegate, start_link, [Name, Num]},
+ transient, 16#ffffffff, worker, [delegate]} ||
+ Num <- lists:seq(0, Count - 1)]}}.
diff --git a/deps/rabbit_common/src/file_handle_cache.erl b/deps/rabbit_common/src/file_handle_cache.erl
new file mode 100644
index 0000000000..9220f40ce4
--- /dev/null
+++ b/deps/rabbit_common/src/file_handle_cache.erl
@@ -0,0 +1,1564 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(file_handle_cache).
+
+%% A File Handle Cache
+%%
+%% This extends a subset of the functionality of the Erlang file
+%% module. In the below, we use "file handle" to specifically refer to
+%% file handles, and "file descriptor" to refer to descriptors which
+%% are not file handles, e.g. sockets.
+%%
+%% Some constraints
+%% 1) This supports one writer, multiple readers per file. Nothing
+%% else.
+%% 2) Do not open the same file from different processes. Bad things
+%% may happen, especially for writes.
+%% 3) Writes are all appends. You cannot write to the middle of a
+%% file, although you can truncate and then append if you want.
+%% 4) There are read and write buffers. Feel free to use the read_ahead
+%% mode, but beware of the interaction between that buffer and the write
+%% buffer.
+%%
+%% Some benefits
+%% 1) You do not have to remember to call sync before close
+%% 2) Buffering is much more flexible than with the plain file module,
+%% and you can control when the buffer gets flushed out. This means
+%% that you can rely on reads-after-writes working, without having to
+%% call the expensive sync.
+%% 3) Unnecessary calls to position and sync get optimised out.
+%% 4) You can find out what your 'real' offset is, and what your
+%% 'virtual' offset is (i.e. where the hdl really is, and where it
+%% would be after the write buffer is written out).
+%%
+%% There is also a server component which serves to limit the number
+%% of open file descriptors. This is a hard limit: the server
+%% component will ensure that clients do not have more file
+%% descriptors open than it's configured to allow.
+%%
+%% On open, the client requests permission from the server to open the
+%% required number of file handles. The server may ask the client to
+%% close other file handles that it has open, or it may queue the
+%% request and ask other clients to close file handles they have open
+%% in order to satisfy the request. Requests are always satisfied in
+%% the order they arrive, even if a latter request (for a small number
+%% of file handles) can be satisfied before an earlier request (for a
+%% larger number of file handles). On close, the client sends a
+%% message to the server. These messages allow the server to keep
+%% track of the number of open handles. The client also keeps a
+%% gb_tree which is updated on every use of a file handle, mapping the
+%% time at which the file handle was last used (timestamp) to the
+%% handle. Thus the smallest key in this tree maps to the file handle
+%% that has not been used for the longest amount of time. This
+%% smallest key is included in the messages to the server. As such,
+%% the server keeps track of when the least recently used file handle
+%% was used *at the point of the most recent open or close* by each
+%% client.
+%%
+%% Note that this data can go very out of date, by the client using
+%% the least recently used handle.
+%%
+%% When the limit is exceeded (i.e. the number of open file handles is
+%% at the limit and there are pending 'open' requests), the server
+%% calculates the average age of the last reported least recently used
+%% file handle of all the clients. It then tells all the clients to
+%% close any handles not used for longer than this average, by
+%% invoking the callback the client registered. The client should
+%% receive this message and pass it into
+%% set_maximum_since_use/1. However, it is highly possible this age
+%% will be greater than the ages of all the handles the client knows
+%% of because the client has used its file handles in the mean
+%% time. Thus at this point the client reports to the server the
+%% current timestamp at which its least recently used file handle was
+%% last used. The server will check two seconds later that either it
+%% is back under the limit, in which case all is well again, or if
+%% not, it will calculate a new average age. Its data will be much
+%% more recent now, and so it is very likely that when this is
+%% communicated to the clients, the clients will close file handles.
+%% (In extreme cases, where it's very likely that all clients have
+%% used their open handles since they last sent in an update, which
+%% would mean that the average will never cause any file handles to
+%% be closed, the server can send out an average age of 0, resulting
+%% in all available clients closing all their file handles.)
+%%
+%% Care is taken to ensure that (a) processes which are blocked
+%% waiting for file descriptors to become available are not sent
+%% requests to close file handles; and (b) given it is known how many
+%% file handles a process has open, when the average age is forced to
+%% 0, close messages are only sent to enough processes to release the
+%% correct number of file handles and the list of processes is
+%% randomly shuffled. This ensures we don't cause processes to
+%% needlessly close file handles, and ensures that we don't always
+%% make such requests of the same processes.
+%%
+%% The advantage of this scheme is that there is only communication
+%% from the client to the server on open, close, and when in the
+%% process of trying to reduce file handle usage. There is no
+%% communication from the client to the server on normal file handle
+%% operations. This scheme forms a feed-back loop - the server does
+%% not care which file handles are closed, just that some are, and it
+%% checks this repeatedly when over the limit.
+%%
+%% Handles which are closed as a result of the server are put into a
+%% "soft-closed" state in which the handle is closed (data flushed out
+%% and sync'd first) but the state is maintained. The handle will be
+%% fully reopened again as soon as needed, thus users of this library
+%% do not need to worry about their handles being closed by the server
+%% - reopening them when necessary is handled transparently.
+%%
+%% The server also supports obtain, release and transfer. obtain/{0,1}
+%% blocks until a file descriptor is available, at which point the
+%% requesting process is considered to 'own' more descriptor(s).
+%% release/{0,1} is the inverse operation and releases previously obtained
+%% descriptor(s). transfer/{1,2} transfers ownership of file descriptor(s)
+%% between processes. It is non-blocking. Obtain has a
+%% lower limit, set by the ?OBTAIN_LIMIT/1 macro. File handles can use
+%% the entire limit, but will be evicted by obtain calls up to the
+%% point at which no more obtain calls can be satisfied by the obtains
+%% limit. Thus there will always be some capacity available for file
+%% handles. Processes that use obtain are never asked to return them,
+%% and they are not managed in any way by the server. It is simply a
+%% mechanism to ensure that processes that need file descriptors such
+%% as sockets can do so in such a way that the overall number of open
+%% file descriptors is managed.
+%%
+%% The callers of register_callback/3, obtain, and the argument of
+%% transfer are monitored, reducing the count of handles in use
+%% appropriately when the processes terminate.
+
+-behaviour(gen_server2).
+
+-export([register_callback/3]).
+-export([open/3, close/1, read/2, append/2, needs_sync/1, sync/1, position/2,
+ truncate/1, current_virtual_offset/1, current_raw_offset/1, flush/1,
+ copy/3, set_maximum_since_use/1, delete/1, clear/1,
+ open_with_absolute_path/3]).
+-export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2,
+ set_limit/1, get_limit/0, info_keys/0, with_handle/1, with_handle/2,
+ info/0, info/1, clear_read_cache/0, clear_process_read_cache/0]).
+-export([set_reservation/0, set_reservation/1, release_reservation/0]).
+-export([ulimit/0]).
+
+-export([start_link/0, start_link/2, init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2, code_change/3, prioritise_cast/3]).
+
+-define(SERVER, ?MODULE).
+%% Reserve 3 handles for ra usage: wal, segment writer and a dets table
+-define(RESERVED_FOR_OTHERS, 100 + 3).
+
+-define(FILE_HANDLES_LIMIT_OTHER, 1024).
+-define(FILE_HANDLES_CHECK_INTERVAL, 2000).
+
+-define(OBTAIN_LIMIT(LIMIT), trunc((LIMIT * 0.9) - 2)).
+-define(CLIENT_ETS_TABLE, file_handle_cache_client).
+-define(ELDERS_ETS_TABLE, file_handle_cache_elders).
+
+%%----------------------------------------------------------------------------
+
+-record(file,
+ { reader_count,
+ has_writer
+ }).
+
+-record(handle,
+ { hdl,
+ ref,
+ offset,
+ is_dirty,
+ write_buffer_size,
+ write_buffer_size_limit,
+ write_buffer,
+ read_buffer,
+ read_buffer_pos,
+ read_buffer_rem, %% Num of bytes from pos to end
+ read_buffer_size, %% Next size of read buffer to use
+ read_buffer_size_limit, %% Max size of read buffer to use
+ read_buffer_usage, %% Bytes we have read from it, for tuning
+ at_eof,
+ path,
+ mode,
+ options,
+ is_write,
+ is_read,
+ last_used_at
+ }).
+
+-record(fhc_state,
+ { elders,
+ limit,
+ open_count,
+ open_pending,
+ obtain_limit, %%socket
+ obtain_count_socket,
+ obtain_count_file,
+ obtain_pending_socket,
+ obtain_pending_file,
+ clients,
+ timer_ref,
+ alarm_set,
+ alarm_clear,
+ reserve_count_socket,
+ reserve_count_file
+ }).
+
+-record(cstate,
+ { pid,
+ callback,
+ opened,
+ obtained_socket,
+ obtained_file,
+ blocked,
+ pending_closes,
+ reserved_socket,
+ reserved_file
+ }).
+
+-record(pending,
+ { kind,
+ pid,
+ requested,
+ from
+ }).
+
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+
+-type ref() :: any().
+-type ok_or_error() :: 'ok' | {'error', any()}.
+-type val_or_error(T) :: {'ok', T} | {'error', any()}.
+-type position() :: ('bof' | 'eof' | non_neg_integer() |
+ {('bof' |'eof'), non_neg_integer()} |
+ {'cur', integer()}).
+-type offset() :: non_neg_integer().
+
+-spec register_callback(atom(), atom(), [any()]) -> 'ok'.
+-spec open
+ (file:filename(), [any()],
+ [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
+ {'read_buffer', (non_neg_integer() | 'unbuffered')}]) ->
+ val_or_error(ref()).
+-spec open_with_absolute_path
+ (file:filename(), [any()],
+ [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
+ {'read_buffer', (non_neg_integer() | 'unbuffered')}]) ->
+ val_or_error(ref()).
+-spec close(ref()) -> ok_or_error().
+-spec read
+ (ref(), non_neg_integer()) -> val_or_error([char()] | binary()) | 'eof'.
+-spec append(ref(), iodata()) -> ok_or_error().
+-spec sync(ref()) -> ok_or_error().
+-spec position(ref(), position()) -> val_or_error(offset()).
+-spec truncate(ref()) -> ok_or_error().
+-spec current_virtual_offset(ref()) -> val_or_error(offset()).
+-spec current_raw_offset(ref()) -> val_or_error(offset()).
+-spec flush(ref()) -> ok_or_error().
+-spec copy(ref(), ref(), non_neg_integer()) -> val_or_error(non_neg_integer()).
+-spec delete(ref()) -> ok_or_error().
+-spec clear(ref()) -> ok_or_error().
+-spec set_maximum_since_use(non_neg_integer()) -> 'ok'.
+-spec obtain() -> 'ok'.
+-spec obtain(non_neg_integer()) -> 'ok'.
+-spec release() -> 'ok'.
+-spec release(non_neg_integer()) -> 'ok'.
+-spec transfer(pid()) -> 'ok'.
+-spec transfer(pid(), non_neg_integer()) -> 'ok'.
+-spec with_handle(fun(() -> A)) -> A.
+-spec with_handle(non_neg_integer(), fun(() -> A)) -> A.
+-spec set_limit(non_neg_integer()) -> 'ok'.
+-spec get_limit() -> non_neg_integer().
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info() -> rabbit_types:infos().
+-spec info([atom()]) -> rabbit_types:infos().
+-spec ulimit() -> 'unknown' | non_neg_integer().
+
+%%----------------------------------------------------------------------------
+-define(INFO_KEYS, [total_limit, total_used, sockets_limit, sockets_used]).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ start_link(fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
+
+start_link(AlarmSet, AlarmClear) ->
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [AlarmSet, AlarmClear],
+ [{timeout, infinity}]).
+
+register_callback(M, F, A)
+ when is_atom(M) andalso is_atom(F) andalso is_list(A) ->
+ gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}).
+
+open(Path, Mode, Options) ->
+ open_with_absolute_path(filename:absname(Path), Mode, Options).
+
+open_with_absolute_path(Path, Mode, Options) ->
+ File1 = #file { reader_count = RCount, has_writer = HasWriter } =
+ case get({Path, fhc_file}) of
+ File = #file {} -> File;
+ undefined -> #file { reader_count = 0,
+ has_writer = false }
+ end,
+ Mode1 = append_to_write(Mode),
+ IsWriter = is_writer(Mode1),
+ case IsWriter andalso HasWriter of
+ true -> {error, writer_exists};
+ false -> {ok, Ref} = new_closed_handle(Path, Mode1, Options),
+ case get_or_reopen_timed([{Ref, new}]) of
+ {ok, [_Handle1]} ->
+ RCount1 = case is_reader(Mode1) of
+ true -> RCount + 1;
+ false -> RCount
+ end,
+ HasWriter1 = HasWriter orelse IsWriter,
+ put({Path, fhc_file},
+ File1 #file { reader_count = RCount1,
+ has_writer = HasWriter1 }),
+ {ok, Ref};
+ Error ->
+ erase({Ref, fhc_handle}),
+ Error
+ end
+ end.
+
+close(Ref) ->
+ case erase({Ref, fhc_handle}) of
+ undefined -> ok;
+ Handle -> case hard_close(Handle) of
+ ok -> ok;
+ {Error, Handle1} -> put_handle(Ref, Handle1),
+ Error
+ end
+ end.
+
+read(Ref, Count) ->
+ with_flushed_handles(
+ [Ref], keep,
+ fun ([#handle { is_read = false }]) ->
+ {error, not_open_for_reading};
+ ([#handle{read_buffer_size_limit = 0,
+ hdl = Hdl, offset = Offset} = Handle]) ->
+ %% The read buffer is disabled. This is just an
+ %% optimization: the clauses below can handle this case.
+ case prim_file_read(Hdl, Count) of
+ {ok, Data} -> {{ok, Data},
+ [Handle#handle{offset = Offset+size(Data)}]};
+ eof -> {eof, [Handle #handle { at_eof = true }]};
+ Error -> {Error, Handle}
+ end;
+ ([Handle = #handle{read_buffer = Buf,
+ read_buffer_pos = BufPos,
+ read_buffer_rem = BufRem,
+ read_buffer_usage = BufUsg,
+ offset = Offset}])
+ when BufRem >= Count ->
+ <<_:BufPos/binary, Res:Count/binary, _/binary>> = Buf,
+ {{ok, Res}, [Handle#handle{offset = Offset + Count,
+ read_buffer_pos = BufPos + Count,
+ read_buffer_rem = BufRem - Count,
+ read_buffer_usage = BufUsg + Count }]};
+ ([Handle0]) ->
+ maybe_reduce_read_cache([Ref]),
+ Handle = #handle{read_buffer = Buf,
+ read_buffer_pos = BufPos,
+ read_buffer_rem = BufRem,
+ read_buffer_size = BufSz,
+ hdl = Hdl,
+ offset = Offset}
+ = tune_read_buffer_limit(Handle0, Count),
+ WantedCount = Count - BufRem,
+ case prim_file_read(Hdl, max(BufSz, WantedCount)) of
+ {ok, Data} ->
+ <<_:BufPos/binary, BufTl/binary>> = Buf,
+ ReadCount = size(Data),
+ case ReadCount < WantedCount of
+ true ->
+ OffSet1 = Offset + BufRem + ReadCount,
+ {{ok, <<BufTl/binary, Data/binary>>},
+ [reset_read_buffer(
+ Handle#handle{offset = OffSet1})]};
+ false ->
+ <<Hd:WantedCount/binary, _/binary>> = Data,
+ OffSet1 = Offset + BufRem + WantedCount,
+ BufRem1 = ReadCount - WantedCount,
+ {{ok, <<BufTl/binary, Hd/binary>>},
+ [Handle#handle{offset = OffSet1,
+ read_buffer = Data,
+ read_buffer_pos = WantedCount,
+ read_buffer_rem = BufRem1,
+ read_buffer_usage = WantedCount}]}
+ end;
+ eof ->
+ {eof, [Handle #handle { at_eof = true }]};
+ Error ->
+ {Error, [reset_read_buffer(Handle)]}
+ end
+ end).
+
+append(Ref, Data) ->
+ with_handles(
+ [Ref],
+ fun ([#handle { is_write = false }]) ->
+ {error, not_open_for_writing};
+ ([Handle]) ->
+ case maybe_seek(eof, Handle) of
+ {{ok, _Offset}, #handle { hdl = Hdl, offset = Offset,
+ write_buffer_size_limit = 0,
+ at_eof = true } = Handle1} ->
+ Offset1 = Offset + iolist_size(Data),
+ {prim_file_write(Hdl, Data),
+ [Handle1 #handle { is_dirty = true, offset = Offset1 }]};
+ {{ok, _Offset}, #handle { write_buffer = WriteBuffer,
+ write_buffer_size = Size,
+ write_buffer_size_limit = Limit,
+ at_eof = true } = Handle1} ->
+ WriteBuffer1 = [Data | WriteBuffer],
+ Size1 = Size + iolist_size(Data),
+ Handle2 = Handle1 #handle { write_buffer = WriteBuffer1,
+ write_buffer_size = Size1 },
+ case Limit =/= infinity andalso Size1 > Limit of
+ true -> {Result, Handle3} = write_buffer(Handle2),
+ {Result, [Handle3]};
+ false -> {ok, [Handle2]}
+ end;
+ {{error, _} = Error, Handle1} ->
+ {Error, [Handle1]}
+ end
+ end).
+
+sync(Ref) ->
+ with_flushed_handles(
+ [Ref], keep,
+ fun ([#handle { is_dirty = false, write_buffer = [] }]) ->
+ ok;
+ ([Handle = #handle { hdl = Hdl,
+ is_dirty = true, write_buffer = [] }]) ->
+ case prim_file_sync(Hdl) of
+ ok -> {ok, [Handle #handle { is_dirty = false }]};
+ Error -> {Error, [Handle]}
+ end
+ end).
+
+needs_sync(Ref) ->
+ %% This must *not* use with_handles/2; see bug 25052
+ case get({Ref, fhc_handle}) of
+ #handle { is_dirty = false, write_buffer = [] } -> false;
+ #handle {} -> true
+ end.
+
+position(Ref, NewOffset) ->
+ with_flushed_handles(
+ [Ref], keep,
+ fun ([Handle]) -> {Result, Handle1} = maybe_seek(NewOffset, Handle),
+ {Result, [Handle1]}
+ end).
+
+truncate(Ref) ->
+ with_flushed_handles(
+ [Ref],
+ fun ([Handle1 = #handle { hdl = Hdl }]) ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
+ Error -> {Error, [Handle1]}
+ end
+ end).
+
+current_virtual_offset(Ref) ->
+ with_handles([Ref], fun ([#handle { at_eof = true, is_write = true,
+ offset = Offset,
+ write_buffer_size = Size }]) ->
+ {ok, Offset + Size};
+ ([#handle { offset = Offset }]) ->
+ {ok, Offset}
+ end).
+
+current_raw_offset(Ref) ->
+ with_handles([Ref], fun ([Handle]) -> {ok, Handle #handle.offset} end).
+
+flush(Ref) ->
+ with_flushed_handles([Ref], fun ([Handle]) -> {ok, [Handle]} end).
+
+copy(Src, Dest, Count) ->
+ with_flushed_handles(
+ [Src, Dest],
+ fun ([SHandle = #handle { is_read = true, hdl = SHdl, offset = SOffset },
+ DHandle = #handle { is_write = true, hdl = DHdl, offset = DOffset }]
+ ) ->
+ case prim_file:copy(SHdl, DHdl, Count) of
+ {ok, Count1} = Result1 ->
+ {Result1,
+ [SHandle #handle { offset = SOffset + Count1 },
+ DHandle #handle { offset = DOffset + Count1,
+ is_dirty = true }]};
+ Error ->
+ {Error, [SHandle, DHandle]}
+ end;
+ (_Handles) ->
+ {error, incorrect_handle_modes}
+ end).
+
+delete(Ref) ->
+ case erase({Ref, fhc_handle}) of
+ undefined ->
+ ok;
+ Handle = #handle { path = Path } ->
+ case hard_close(Handle #handle { is_dirty = false,
+ write_buffer = [] }) of
+ ok -> prim_file:delete(Path);
+ {Error, Handle1} -> put_handle(Ref, Handle1),
+ Error
+ end
+ end.
+
+clear(Ref) ->
+ with_handles(
+ [Ref],
+ fun ([#handle { at_eof = true, write_buffer_size = 0, offset = 0 }]) ->
+ ok;
+ ([Handle]) ->
+ case maybe_seek(bof, Handle#handle{write_buffer = [],
+ write_buffer_size = 0}) of
+ {{ok, 0}, Handle1 = #handle { hdl = Hdl }} ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
+ Error -> {Error, [Handle1]}
+ end;
+ {{error, _} = Error, Handle1} ->
+ {Error, [Handle1]}
+ end
+ end).
+
+set_maximum_since_use(MaximumAge) ->
+ Now = erlang:monotonic_time(),
+ case lists:foldl(
+ fun ({{Ref, fhc_handle},
+ Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) ->
+ case Hdl =/= closed andalso
+ erlang:convert_time_unit(Now - Then,
+ native,
+ micro_seconds)
+ >= MaximumAge of
+ true -> soft_close(Ref, Handle) orelse Rep;
+ false -> Rep
+ end;
+ (_KeyValuePair, Rep) ->
+ Rep
+ end, false, get()) of
+ false -> age_tree_change(), ok;
+ true -> ok
+ end.
+
+obtain() -> obtain(1).
+set_reservation() -> set_reservation(1).
+release() -> release(1).
+release_reservation() -> release_reservation(file).
+transfer(Pid) -> transfer(Pid, 1).
+
+obtain(Count) -> obtain(Count, socket).
+set_reservation(Count) -> set_reservation(Count, file).
+release(Count) -> release(Count, socket).
+
+with_handle(Fun) ->
+ with_handle(1, Fun).
+
+with_handle(N, Fun) ->
+ ok = obtain(N, file),
+ try Fun()
+ after ok = release(N, file)
+ end.
+
+obtain(Count, Type) when Count > 0 ->
+ %% If the FHC isn't running, obtains succeed immediately.
+ case whereis(?SERVER) of
+ undefined -> ok;
+ _ -> gen_server2:call(
+ ?SERVER, {obtain, Count, Type, self()}, infinity)
+ end.
+
+set_reservation(Count, Type) when Count > 0 ->
+ %% If the FHC isn't running, reserve succeed immediately.
+ case whereis(?SERVER) of
+ undefined -> ok;
+ _ -> gen_server2:cast(?SERVER, {set_reservation, Count, Type, self()})
+ end.
+
+release(Count, Type) when Count > 0 ->
+ gen_server2:cast(?SERVER, {release, Count, Type, self()}).
+
+release_reservation(Type) ->
+ gen_server2:cast(?SERVER, {release_reservation, Type, self()}).
+
+transfer(Pid, Count) when Count > 0 ->
+ gen_server2:cast(?SERVER, {transfer, Count, self(), Pid}).
+
+set_limit(Limit) ->
+ gen_server2:call(?SERVER, {set_limit, Limit}, infinity).
+
+get_limit() ->
+ gen_server2:call(?SERVER, get_limit, infinity).
+
+info_keys() -> ?INFO_KEYS.
+
+info() -> info(?INFO_KEYS).
+info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
+
+clear_read_cache() ->
+ gen_server2:cast(?SERVER, clear_read_cache).
+
+clear_process_read_cache() ->
+ [
+ begin
+ Handle1 = reset_read_buffer(Handle),
+ put({Ref, fhc_handle}, Handle1)
+ end ||
+ {{Ref, fhc_handle}, Handle} <- get(),
+ size(Handle#handle.read_buffer) > 0
+ ].
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+
+prim_file_read(Hdl, Size) ->
+ file_handle_cache_stats:update(
+ io_read, Size, fun() -> prim_file:read(Hdl, Size) end).
+
+prim_file_write(Hdl, Bytes) ->
+ file_handle_cache_stats:update(
+ io_write, iolist_size(Bytes), fun() -> prim_file:write(Hdl, Bytes) end).
+
+prim_file_sync(Hdl) ->
+ file_handle_cache_stats:update(io_sync, fun() -> prim_file:sync(Hdl) end).
+
+prim_file_position(Hdl, NewOffset) ->
+ file_handle_cache_stats:update(
+ io_seek, fun() -> prim_file:position(Hdl, NewOffset) end).
+
+is_reader(Mode) -> lists:member(read, Mode).
+
+is_writer(Mode) -> lists:member(write, Mode).
+
+append_to_write(Mode) ->
+ case lists:member(append, Mode) of
+ true -> [write | Mode -- [append, write]];
+ false -> Mode
+ end.
+
+with_handles(Refs, Fun) ->
+ with_handles(Refs, reset, Fun).
+
+with_handles(Refs, ReadBuffer, Fun) ->
+ case get_or_reopen_timed([{Ref, reopen} || Ref <- Refs]) of
+ {ok, Handles0} ->
+ Handles = case ReadBuffer of
+ reset -> [reset_read_buffer(H) || H <- Handles0];
+ keep -> Handles0
+ end,
+ case Fun(Handles) of
+ {Result, Handles1} when is_list(Handles1) ->
+ _ = lists:zipwith(fun put_handle/2, Refs, Handles1),
+ Result;
+ Result ->
+ Result
+ end;
+ Error ->
+ Error
+ end.
+
+with_flushed_handles(Refs, Fun) ->
+ with_flushed_handles(Refs, reset, Fun).
+
+with_flushed_handles(Refs, ReadBuffer, Fun) ->
+ with_handles(
+ Refs, ReadBuffer,
+ fun (Handles) ->
+ case lists:foldl(
+ fun (Handle, {ok, HandlesAcc}) ->
+ {Res, Handle1} = write_buffer(Handle),
+ {Res, [Handle1 | HandlesAcc]};
+ (Handle, {Error, HandlesAcc}) ->
+ {Error, [Handle | HandlesAcc]}
+ end, {ok, []}, Handles) of
+ {ok, Handles1} ->
+ Fun(lists:reverse(Handles1));
+ {Error, Handles1} ->
+ {Error, lists:reverse(Handles1)}
+ end
+ end).
+
+get_or_reopen_timed(RefNewOrReopens) ->
+ file_handle_cache_stats:update(
+ io_file_handle_open_attempt, fun() -> get_or_reopen(RefNewOrReopens) end).
+
+get_or_reopen(RefNewOrReopens) ->
+ case partition_handles(RefNewOrReopens) of
+ {OpenHdls, []} ->
+ {ok, [Handle || {_Ref, Handle} <- OpenHdls]};
+ {OpenHdls, ClosedHdls} ->
+ Oldest = oldest(get_age_tree(),
+ fun () -> erlang:monotonic_time() end),
+ case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls),
+ Oldest}, infinity) of
+ ok ->
+ case reopen(ClosedHdls) of
+ {ok, RefHdls} -> sort_handles(RefNewOrReopens,
+ OpenHdls, RefHdls, []);
+ Error -> Error
+ end;
+ close ->
+ [soft_close(Ref, Handle) ||
+ {{Ref, fhc_handle}, Handle = #handle { hdl = Hdl }} <-
+ get(),
+ Hdl =/= closed],
+ get_or_reopen(RefNewOrReopens)
+ end
+ end.
+
+reopen(ClosedHdls) -> reopen(ClosedHdls, get_age_tree(), []).
+
+reopen([], Tree, RefHdls) ->
+ put_age_tree(Tree),
+ {ok, lists:reverse(RefHdls)};
+reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed,
+ path = Path,
+ mode = Mode0,
+ offset = Offset,
+ last_used_at = undefined }} |
+ RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) ->
+ Mode = case NewOrReopen of
+ new -> Mode0;
+ reopen -> file_handle_cache_stats:update(io_reopen),
+ [read | Mode0]
+ end,
+ case prim_file:open(Path, Mode) of
+ {ok, Hdl} ->
+ Now = erlang:monotonic_time(),
+ {{ok, _Offset}, Handle1} =
+ maybe_seek(Offset, reset_read_buffer(
+ Handle#handle{hdl = Hdl,
+ offset = 0,
+ last_used_at = Now})),
+ put({Ref, fhc_handle}, Handle1),
+ reopen(RefNewOrReopenHdls, gb_trees:insert({Now, Ref}, true, Tree),
+ [{Ref, Handle1} | RefHdls]);
+ Error ->
+ %% NB: none of the handles in ToOpen are in the age tree
+ Oldest = oldest(Tree, fun () -> undefined end),
+ [gen_server2:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen],
+ put_age_tree(Tree),
+ Error
+ end.
+
+partition_handles(RefNewOrReopens) ->
+ lists:foldr(
+ fun ({Ref, NewOrReopen}, {Open, Closed}) ->
+ case get({Ref, fhc_handle}) of
+ #handle { hdl = closed } = Handle ->
+ {Open, [{Ref, NewOrReopen, Handle} | Closed]};
+ #handle {} = Handle ->
+ {[{Ref, Handle} | Open], Closed}
+ end
+ end, {[], []}, RefNewOrReopens).
+
+sort_handles([], [], [], Acc) ->
+ {ok, lists:reverse(Acc)};
+sort_handles([{Ref, _} | RefHdls], [{Ref, Handle} | RefHdlsA], RefHdlsB, Acc) ->
+ sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]);
+sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) ->
+ sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]).
+
+put_handle(Ref, Handle = #handle { last_used_at = Then }) ->
+ Now = erlang:monotonic_time(),
+ age_tree_update(Then, Now, Ref),
+ put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }).
+
+with_age_tree(Fun) -> put_age_tree(Fun(get_age_tree())).
+
+get_age_tree() ->
+ case get(fhc_age_tree) of
+ undefined -> gb_trees:empty();
+ AgeTree -> AgeTree
+ end.
+
+put_age_tree(Tree) -> put(fhc_age_tree, Tree).
+
+age_tree_update(Then, Now, Ref) ->
+ with_age_tree(
+ fun (Tree) ->
+ gb_trees:insert({Now, Ref}, true,
+ gb_trees:delete_any({Then, Ref}, Tree))
+ end).
+
+age_tree_delete(Then, Ref) ->
+ with_age_tree(
+ fun (Tree) ->
+ Tree1 = gb_trees:delete_any({Then, Ref}, Tree),
+ Oldest = oldest(Tree1, fun () -> undefined end),
+ gen_server2:cast(?SERVER, {close, self(), Oldest}),
+ Tree1
+ end).
+
+age_tree_change() ->
+ with_age_tree(
+ fun (Tree) ->
+ case gb_trees:is_empty(Tree) of
+ true -> Tree;
+ false -> {{Oldest, _Ref}, _} = gb_trees:smallest(Tree),
+ gen_server2:cast(?SERVER, {update, self(), Oldest}),
+ Tree
+ end
+ end).
+
+oldest(Tree, DefaultFun) ->
+ case gb_trees:is_empty(Tree) of
+ true -> DefaultFun();
+ false -> {{Oldest, _Ref}, _} = gb_trees:smallest(Tree),
+ Oldest
+ end.
+
+new_closed_handle(Path, Mode, Options) ->
+ WriteBufferSize =
+ case application:get_env(rabbit, fhc_write_buffering) of
+ {ok, false} -> 0;
+ {ok, true} ->
+ case proplists:get_value(write_buffer, Options, unbuffered) of
+ unbuffered -> 0;
+ infinity -> infinity;
+ N when is_integer(N) -> N
+ end
+ end,
+ ReadBufferSize =
+ case application:get_env(rabbit, fhc_read_buffering) of
+ {ok, false} -> 0;
+ {ok, true} ->
+ case proplists:get_value(read_buffer, Options, unbuffered) of
+ unbuffered -> 0;
+ N2 when is_integer(N2) -> N2
+ end
+ end,
+ Ref = make_ref(),
+ put({Ref, fhc_handle}, #handle { hdl = closed,
+ ref = Ref,
+ offset = 0,
+ is_dirty = false,
+ write_buffer_size = 0,
+ write_buffer_size_limit = WriteBufferSize,
+ write_buffer = [],
+ read_buffer = <<>>,
+ read_buffer_pos = 0,
+ read_buffer_rem = 0,
+ read_buffer_size = ReadBufferSize,
+ read_buffer_size_limit = ReadBufferSize,
+ read_buffer_usage = 0,
+ at_eof = false,
+ path = Path,
+ mode = Mode,
+ options = Options,
+ is_write = is_writer(Mode),
+ is_read = is_reader(Mode),
+ last_used_at = undefined }),
+ {ok, Ref}.
+
+soft_close(Ref, Handle) ->
+ {Res, Handle1} = soft_close(Handle),
+ case Res of
+ ok -> put({Ref, fhc_handle}, Handle1),
+ true;
+ _ -> put_handle(Ref, Handle1),
+ false
+ end.
+
+soft_close(Handle = #handle { hdl = closed }) ->
+ {ok, Handle};
+soft_close(Handle) ->
+ case write_buffer(Handle) of
+ {ok, #handle { hdl = Hdl,
+ ref = Ref,
+ is_dirty = IsDirty,
+ last_used_at = Then } = Handle1 } ->
+ ok = case IsDirty of
+ true -> prim_file_sync(Hdl);
+ false -> ok
+ end,
+ ok = prim_file:close(Hdl),
+ age_tree_delete(Then, Ref),
+ {ok, Handle1 #handle { hdl = closed,
+ is_dirty = false,
+ last_used_at = undefined }};
+ {_Error, _Handle} = Result ->
+ Result
+ end.
+
+hard_close(Handle) ->
+ case soft_close(Handle) of
+ {ok, #handle { path = Path,
+ is_read = IsReader, is_write = IsWriter }} ->
+ #file { reader_count = RCount, has_writer = HasWriter } = File =
+ get({Path, fhc_file}),
+ RCount1 = case IsReader of
+ true -> RCount - 1;
+ false -> RCount
+ end,
+ HasWriter1 = HasWriter andalso not IsWriter,
+ case RCount1 =:= 0 andalso not HasWriter1 of
+ true -> erase({Path, fhc_file});
+ false -> put({Path, fhc_file},
+ File #file { reader_count = RCount1,
+ has_writer = HasWriter1 })
+ end,
+ ok;
+ {_Error, _Handle} = Result ->
+ Result
+ end.
+
+maybe_seek(New, Handle = #handle{hdl = Hdl,
+ offset = Old,
+ read_buffer_pos = BufPos,
+ read_buffer_rem = BufRem,
+ at_eof = AtEoF}) ->
+ {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Old, New),
+ case NeedsSeek of
+ true when is_number(New) andalso
+ ((New >= Old andalso New =< BufRem + Old)
+ orelse (New < Old andalso Old - New =< BufPos)) ->
+ Diff = New - Old,
+ {{ok, New}, Handle#handle{offset = New,
+ at_eof = AtEoF1,
+ read_buffer_pos = BufPos + Diff,
+ read_buffer_rem = BufRem - Diff}};
+ true ->
+ case prim_file_position(Hdl, New) of
+ {ok, Offset1} = Result ->
+ {Result, reset_read_buffer(Handle#handle{offset = Offset1,
+ at_eof = AtEoF1})};
+ {error, _} = Error ->
+ {Error, Handle}
+ end;
+ false ->
+ {{ok, Old}, Handle}
+ end.
+
+needs_seek( AtEoF, _CurOffset, cur ) -> {AtEoF, false};
+needs_seek( AtEoF, _CurOffset, {cur, 0}) -> {AtEoF, false};
+needs_seek( true, _CurOffset, eof ) -> {true , false};
+needs_seek( true, _CurOffset, {eof, 0}) -> {true , false};
+needs_seek( false, _CurOffset, eof ) -> {true , true };
+needs_seek( false, _CurOffset, {eof, 0}) -> {true , true };
+needs_seek( AtEoF, 0, bof ) -> {AtEoF, false};
+needs_seek( AtEoF, 0, {bof, 0}) -> {AtEoF, false};
+needs_seek( AtEoF, CurOffset, CurOffset) -> {AtEoF, false};
+needs_seek( true, CurOffset, {bof, DesiredOffset})
+ when DesiredOffset >= CurOffset ->
+ {true, true};
+needs_seek( true, _CurOffset, {cur, DesiredOffset})
+ when DesiredOffset > 0 ->
+ {true, true};
+needs_seek( true, CurOffset, DesiredOffset) %% same as {bof, DO}
+ when is_integer(DesiredOffset) andalso DesiredOffset >= CurOffset ->
+ {true, true};
+%% because we can't really track size, we could well end up at EoF and not know
+needs_seek(_AtEoF, _CurOffset, _DesiredOffset) ->
+ {false, true}.
+
+write_buffer(Handle = #handle { write_buffer = [] }) ->
+ {ok, Handle};
+write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
+ write_buffer = WriteBuffer,
+ write_buffer_size = DataSize,
+ at_eof = true }) ->
+ case prim_file_write(Hdl, lists:reverse(WriteBuffer)) of
+ ok ->
+ Offset1 = Offset + DataSize,
+ {ok, Handle #handle { offset = Offset1, is_dirty = true,
+ write_buffer = [], write_buffer_size = 0 }};
+ {error, _} = Error ->
+ {Error, Handle}
+ end.
+
+reset_read_buffer(Handle) ->
+ Handle#handle{read_buffer = <<>>,
+ read_buffer_pos = 0,
+ read_buffer_rem = 0}.
+
+%% We come into this function whenever there's been a miss while
+%% reading from the buffer - but note that when we first start with a
+%% new handle the usage will be 0. Therefore in that case don't take
+%% it as meaning the buffer was useless, we just haven't done anything
+%% yet!
+tune_read_buffer_limit(Handle = #handle{read_buffer_usage = 0}, _Count) ->
+ Handle;
+%% In this head we have been using the buffer but now tried to read
+%% outside it. So how did we do? If we used less than the size of the
+%% buffer, make the new buffer the size of what we used before, but
+%% add one byte (so that next time we can distinguish between getting
+%% the buffer size exactly right and actually wanting more). If we
+%% read 100% of what we had, then double it for next time, up to the
+%% limit that was set when we were created.
+tune_read_buffer_limit(Handle = #handle{read_buffer = Buf,
+ read_buffer_usage = Usg,
+ read_buffer_size = Sz,
+ read_buffer_size_limit = Lim}, Count) ->
+ %% If the buffer is <<>> then we are in the first read after a
+ %% reset, the read_buffer_usage is the total usage from before the
+ %% reset. But otherwise we are in a read which read off the end of
+ %% the buffer, so really the size of this read should be included
+ %% in the usage.
+ TotalUsg = case Buf of
+ <<>> -> Usg;
+ _ -> Usg + Count
+ end,
+ Handle#handle{read_buffer_usage = 0,
+ read_buffer_size = erlang:min(case TotalUsg < Sz of
+ true -> Usg + 1;
+ false -> Usg * 2
+ end, Lim)}.
+
+maybe_reduce_read_cache(SparedRefs) ->
+ case vm_memory_monitor:get_memory_use(bytes) of
+ {_, infinity} -> ok;
+ {MemUse, MemLimit} when MemUse < MemLimit -> ok;
+ {MemUse, MemLimit} -> reduce_read_cache(
+ (MemUse - MemLimit) * 2,
+ SparedRefs)
+ end.
+
+reduce_read_cache(MemToFree, SparedRefs) ->
+ Handles = lists:sort(
+ fun({_, H1}, {_, H2}) -> H1 < H2 end,
+ [{R, H} || {{R, fhc_handle}, H} <- get(),
+ not lists:member(R, SparedRefs)
+ andalso size(H#handle.read_buffer) > 0]),
+ FreedMem = lists:foldl(
+ fun
+ (_, Freed) when Freed >= MemToFree ->
+ Freed;
+ ({Ref, #handle{read_buffer = Buf} = Handle}, Freed) ->
+ Handle1 = reset_read_buffer(Handle),
+ put({Ref, fhc_handle}, Handle1),
+ Freed + size(Buf)
+ end, 0, Handles),
+ if
+ FreedMem < MemToFree andalso SparedRefs =/= [] ->
+ reduce_read_cache(MemToFree - FreedMem, []);
+ true ->
+ ok
+ end.
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(total_limit, #fhc_state{limit = Limit}) -> Limit;
+i(total_used, State) -> used(State);
+i(sockets_limit, #fhc_state{obtain_limit = Limit}) -> Limit;
+i(sockets_used, #fhc_state{obtain_count_socket = Count,
+ reserve_count_socket = RCount}) -> Count + RCount;
+i(files_reserved, #fhc_state{reserve_count_file = RCount}) -> RCount;
+i(Item, _) -> throw({bad_argument, Item}).
+
+used(#fhc_state{open_count = C1,
+ obtain_count_socket = C2,
+ obtain_count_file = C3,
+ reserve_count_socket = C4,
+ reserve_count_file = C5}) -> C1 + C2 + C3 + C4 + C5.
+
+%%----------------------------------------------------------------------------
+%% gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([AlarmSet, AlarmClear]) ->
+ _ = file_handle_cache_stats:init(),
+ Limit = case application:get_env(file_handles_high_watermark) of
+ {ok, Watermark} when (is_integer(Watermark) andalso
+ Watermark > 0) ->
+ Watermark;
+ _ ->
+ case ulimit() of
+ unknown -> ?FILE_HANDLES_LIMIT_OTHER;
+ Lim -> lists:max([2, Lim - ?RESERVED_FOR_OTHERS])
+ end
+ end,
+ ObtainLimit = obtain_limit(Limit),
+ error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n",
+ [Limit, ObtainLimit]),
+ Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
+ Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
+ {ok, #fhc_state { elders = Elders,
+ limit = Limit,
+ open_count = 0,
+ open_pending = pending_new(),
+ obtain_limit = ObtainLimit,
+ obtain_count_file = 0,
+ obtain_pending_file = pending_new(),
+ obtain_count_socket = 0,
+ obtain_pending_socket = pending_new(),
+ clients = Clients,
+ timer_ref = undefined,
+ alarm_set = AlarmSet,
+ alarm_clear = AlarmClear,
+ reserve_count_file = 0,
+ reserve_count_socket = 0 }}.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {release, _, _, _} -> 5;
+ {release_reservation, _, _, _} -> 5;
+ _ -> 0
+ end.
+
+handle_call({open, Pid, Requested, EldestUnusedSince}, From,
+ State = #fhc_state { open_count = Count,
+ open_pending = Pending,
+ elders = Elders,
+ clients = Clients })
+ when EldestUnusedSince =/= undefined ->
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
+ Item = #pending { kind = open,
+ pid = Pid,
+ requested = Requested,
+ from = From },
+ ok = track_client(Pid, Clients),
+ case needs_reduce(State #fhc_state { open_count = Count + Requested }) of
+ true -> case ets:lookup(Clients, Pid) of
+ [#cstate { opened = 0 }] ->
+ true = ets:update_element(
+ Clients, Pid, {#cstate.blocked, true}),
+ {noreply,
+ reduce(State #fhc_state {
+ open_pending = pending_in(Item, Pending) })};
+ [#cstate { opened = Opened }] ->
+ true = ets:update_element(
+ Clients, Pid,
+ {#cstate.pending_closes, Opened}),
+ {reply, close, State}
+ end;
+ false -> {noreply, run_pending_item(Item, State)}
+ end;
+
+handle_call({obtain, N, Type, Pid}, From,
+ State = #fhc_state { clients = Clients }) ->
+ Count = obtain_state(Type, count, State),
+ Pending = obtain_state(Type, pending, State),
+ ok = track_client(Pid, Clients),
+ Item = #pending { kind = {obtain, Type}, pid = Pid,
+ requested = N, from = From },
+ Enqueue = fun () ->
+ true = ets:update_element(Clients, Pid,
+ {#cstate.blocked, true}),
+ set_obtain_state(Type, pending,
+ pending_in(Item, Pending), State)
+ end,
+ {noreply,
+ case obtain_limit_reached(Type, State) of
+ true -> Enqueue();
+ false -> case needs_reduce(
+ set_obtain_state(Type, count, Count + 1, State)) of
+ true -> reduce(Enqueue());
+ false -> adjust_alarm(
+ State, run_pending_item(Item, State))
+ end
+ end};
+
+handle_call({set_limit, Limit}, _From, State) ->
+ {reply, ok, adjust_alarm(
+ State, maybe_reduce(
+ process_pending(
+ State #fhc_state {
+ limit = Limit,
+ obtain_limit = obtain_limit(Limit) })))};
+
+handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) ->
+ {reply, Limit, State};
+
+handle_call({info, Items}, _From, State) ->
+ {reply, infos(Items, State), State}.
+
+handle_cast({register_callback, Pid, MFA},
+ State = #fhc_state { clients = Clients }) ->
+ ok = track_client(Pid, Clients),
+ true = ets:update_element(Clients, Pid, {#cstate.callback, MFA}),
+ {noreply, State};
+
+handle_cast({update, Pid, EldestUnusedSince},
+ State = #fhc_state { elders = Elders })
+ when EldestUnusedSince =/= undefined ->
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
+ %% don't call maybe_reduce from here otherwise we can create a
+ %% storm of messages
+ {noreply, State};
+
+handle_cast({release, N, Type, Pid}, State) ->
+ State1 = process_pending(update_counts({obtain, Type}, Pid, -N, State)),
+ {noreply, adjust_alarm(State, State1)};
+
+handle_cast({close, Pid, EldestUnusedSince},
+ State = #fhc_state { elders = Elders, clients = Clients }) ->
+ true = case EldestUnusedSince of
+ undefined -> ets:delete(Elders, Pid);
+ _ -> ets:insert(Elders, {Pid, EldestUnusedSince})
+ end,
+ ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}),
+ {noreply, adjust_alarm(State, process_pending(
+ update_counts(open, Pid, -1, State)))};
+
+handle_cast({transfer, N, FromPid, ToPid}, State) ->
+ ok = track_client(ToPid, State#fhc_state.clients),
+ {noreply, process_pending(
+ update_counts({obtain, socket}, ToPid, +N,
+ update_counts({obtain, socket}, FromPid, -N,
+ State)))};
+
+handle_cast(clear_read_cache, State) ->
+ _ = clear_process_read_cache(),
+ {noreply, State};
+
+handle_cast({release_reservation, Type, Pid}, State) ->
+ State1 = process_pending(update_counts({reserve, Type}, Pid, 0, State)),
+ {noreply, adjust_alarm(State, State1)};
+
+handle_cast({set_reservation, N, Type, Pid},
+ State = #fhc_state { clients = Clients }) ->
+ ok = track_client(Pid, Clients),
+ NewState = process_pending(update_counts({reserve, Type}, Pid, N, State)),
+ {noreply, case needs_reduce(NewState) of
+ true -> reduce(NewState);
+ false -> adjust_alarm(State, NewState)
+ end}.
+
+handle_info(check_counts, State) ->
+ {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #fhc_state { elders = Elders,
+ open_count = OpenCount,
+ open_pending = OpenPending,
+ obtain_count_file = ObtainCountF,
+ obtain_count_socket = ObtainCountS,
+ obtain_pending_file = ObtainPendingF,
+ obtain_pending_socket = ObtainPendingS,
+ reserve_count_file = ReserveCountF,
+ reserve_count_socket = ReserveCountS,
+ clients = Clients }) ->
+ [#cstate { opened = Opened,
+ obtained_file = ObtainedFile,
+ obtained_socket = ObtainedSocket,
+ reserved_file = ReservedFile,
+ reserved_socket = ReservedSocket }] =
+ ets:lookup(Clients, Pid),
+ true = ets:delete(Clients, Pid),
+ true = ets:delete(Elders, Pid),
+ Fun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end,
+ State1 = process_pending(
+ State #fhc_state {
+ open_count = OpenCount - Opened,
+ open_pending = filter_pending(Fun, OpenPending),
+ obtain_count_file = ObtainCountF - ObtainedFile,
+ obtain_count_socket = ObtainCountS - ObtainedSocket,
+ obtain_pending_file = filter_pending(Fun, ObtainPendingF),
+ obtain_pending_socket = filter_pending(Fun, ObtainPendingS),
+ reserve_count_file = ReserveCountF - ReservedFile,
+ reserve_count_socket = ReserveCountS - ReservedSocket}),
+ {noreply, adjust_alarm(State, State1)}.
+
+terminate(_Reason, State = #fhc_state { clients = Clients,
+ elders = Elders }) ->
+ ets:delete(Clients),
+ ets:delete(Elders),
+ State.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% pending queue abstraction helpers
+%%----------------------------------------------------------------------------
+
+queue_fold(Fun, Init, Q) ->
+ case queue:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+filter_pending(Fun, {Count, Queue}) ->
+ {Delta, Queue1} =
+ queue_fold(
+ fun (Item = #pending { requested = Requested }, {DeltaN, QueueN}) ->
+ case Fun(Item) of
+ true -> {DeltaN, queue:in(Item, QueueN)};
+ false -> {DeltaN - Requested, QueueN}
+ end
+ end, {0, queue:new()}, Queue),
+ {Count + Delta, Queue1}.
+
+pending_new() ->
+ {0, queue:new()}.
+
+pending_in(Item = #pending { requested = Requested }, {Count, Queue}) ->
+ {Count + Requested, queue:in(Item, Queue)}.
+
+pending_out({0, _Queue} = Pending) ->
+ {empty, Pending};
+pending_out({N, Queue}) ->
+ {{value, #pending { requested = Requested }} = Result, Queue1} =
+ queue:out(Queue),
+ {Result, {N - Requested, Queue1}}.
+
+pending_count({Count, _Queue}) ->
+ Count.
+
+%%----------------------------------------------------------------------------
+%% server helpers
+%%----------------------------------------------------------------------------
+
+obtain_limit(infinity) -> infinity;
+obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of
+ OLimit when OLimit < 0 -> 0;
+ OLimit -> OLimit
+ end.
+
+obtain_limit_reached(socket, State) -> obtain_limit_reached(State);
+obtain_limit_reached(file, State) -> needs_reduce(State).
+
+obtain_limit_reached(#fhc_state{obtain_limit = Limit,
+ obtain_count_socket = Count,
+ reserve_count_socket = RCount}) ->
+ Limit =/= infinity andalso (RCount + Count) >= Limit.
+
+obtain_state(file, count, #fhc_state{obtain_count_file = N}) -> N;
+obtain_state(socket, count, #fhc_state{obtain_count_socket = N}) -> N;
+obtain_state(file, pending, #fhc_state{obtain_pending_file = N}) -> N;
+obtain_state(socket, pending, #fhc_state{obtain_pending_socket = N}) -> N.
+
+set_obtain_state(file, count, N, S) -> S#fhc_state{obtain_count_file = N};
+set_obtain_state(socket, count, N, S) -> S#fhc_state{obtain_count_socket = N};
+set_obtain_state(file, pending, N, S) -> S#fhc_state{obtain_pending_file = N};
+set_obtain_state(socket, pending, N, S) -> S#fhc_state{obtain_pending_socket = N}.
+
+adjust_alarm(OldState = #fhc_state { alarm_set = AlarmSet,
+ alarm_clear = AlarmClear }, NewState) ->
+ case {obtain_limit_reached(OldState), obtain_limit_reached(NewState)} of
+ {false, true} -> AlarmSet({file_descriptor_limit, []});
+ {true, false} -> AlarmClear(file_descriptor_limit);
+ _ -> ok
+ end,
+ NewState.
+
+process_pending(State = #fhc_state { limit = infinity }) ->
+ State;
+process_pending(State) ->
+ process_open(process_obtain(socket, process_obtain(file, State))).
+
+process_open(State = #fhc_state { limit = Limit,
+ open_pending = Pending}) ->
+ {Pending1, State1} = process_pending(Pending, Limit - used(State), State),
+ State1 #fhc_state { open_pending = Pending1 }.
+
+process_obtain(socket, State = #fhc_state { limit = Limit,
+ obtain_limit = ObtainLimit,
+ open_count = OpenCount,
+ obtain_count_socket = ObtainCount,
+ obtain_pending_socket = Pending,
+ obtain_count_file = ObtainCountF,
+ reserve_count_file = ReserveCountF,
+ reserve_count_socket = ReserveCount}) ->
+ Quota = min(ObtainLimit - ObtainCount,
+ Limit - (OpenCount + ObtainCount + ObtainCountF + ReserveCount + ReserveCountF)),
+ {Pending1, State1} = process_pending(Pending, Quota, State),
+ State1#fhc_state{obtain_pending_socket = Pending1};
+process_obtain(file, State = #fhc_state { limit = Limit,
+ open_count = OpenCount,
+ obtain_count_socket = ObtainCountS,
+ obtain_count_file = ObtainCountF,
+ obtain_pending_file = Pending,
+ reserve_count_file = ReserveCountF,
+ reserve_count_socket = ReserveCountS}) ->
+ Quota = Limit - (OpenCount + ObtainCountS + ObtainCountF + ReserveCountF + ReserveCountS),
+ {Pending1, State1} = process_pending(Pending, Quota, State),
+ State1#fhc_state{obtain_pending_file = Pending1}.
+
+process_pending(Pending, Quota, State) when Quota =< 0 ->
+ {Pending, State};
+process_pending(Pending, Quota, State) ->
+ case pending_out(Pending) of
+ {empty, _Pending} ->
+ {Pending, State};
+ {{value, #pending { requested = Requested }}, _Pending1}
+ when Requested > Quota ->
+ {Pending, State};
+ {{value, #pending { requested = Requested } = Item}, Pending1} ->
+ process_pending(Pending1, Quota - Requested,
+ run_pending_item(Item, State))
+ end.
+
+run_pending_item(#pending { kind = Kind,
+ pid = Pid,
+ requested = Requested,
+ from = From },
+ State = #fhc_state { clients = Clients }) ->
+ gen_server2:reply(From, ok),
+ true = ets:update_element(Clients, Pid, {#cstate.blocked, false}),
+ update_counts(Kind, Pid, Requested, State).
+
+update_counts(open, Pid, Delta,
+ State = #fhc_state { open_count = OpenCount,
+ clients = Clients }) ->
+ ets:update_counter(Clients, Pid, {#cstate.opened, Delta}),
+ State #fhc_state { open_count = OpenCount + Delta};
+update_counts({obtain, file}, Pid, Delta,
+ State = #fhc_state {obtain_count_file = ObtainCountF,
+ clients = Clients }) ->
+ ets:update_counter(Clients, Pid, {#cstate.obtained_file, Delta}),
+ State #fhc_state { obtain_count_file = ObtainCountF + Delta};
+update_counts({obtain, socket}, Pid, Delta,
+ State = #fhc_state {obtain_count_socket = ObtainCountS,
+ clients = Clients }) ->
+ ets:update_counter(Clients, Pid, {#cstate.obtained_socket, Delta}),
+ State #fhc_state { obtain_count_socket = ObtainCountS + Delta};
+update_counts({reserve, file}, Pid, NewReservation,
+ State = #fhc_state {reserve_count_file = ReserveCountF,
+ clients = Clients }) ->
+ [#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
+ Delta = NewReservation - R,
+ ets:update_counter(Clients, Pid, {#cstate.reserved_file, Delta}),
+ State #fhc_state { reserve_count_file = ReserveCountF + Delta};
+update_counts({reserve, socket}, Pid, NewReservation,
+ State = #fhc_state {reserve_count_socket = ReserveCountS,
+ clients = Clients }) ->
+ [#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
+ Delta = NewReservation - R,
+ ets:update_counter(Clients, Pid, {#cstate.reserved_socket, Delta}),
+ State #fhc_state { reserve_count_socket = ReserveCountS + Delta}.
+
+maybe_reduce(State) ->
+ case needs_reduce(State) of
+ true -> reduce(State);
+ false -> State
+ end.
+
+needs_reduce(#fhc_state { limit = Limit,
+ open_count = OpenCount,
+ open_pending = {OpenPending, _},
+ obtain_limit = ObtainLimit,
+ obtain_count_socket = ObtainCountS,
+ obtain_count_file = ObtainCountF,
+ obtain_pending_file = {ObtainPendingF, _},
+ obtain_pending_socket = {ObtainPendingS, _},
+ reserve_count_socket = ReserveCountS,
+ reserve_count_file = ReserveCountF}) ->
+ Limit =/= infinity
+ andalso (((OpenCount + ObtainCountS + ObtainCountF + ReserveCountS + ReserveCountF) > Limit)
+ orelse (OpenPending =/= 0)
+ orelse (ObtainPendingF =/= 0)
+ orelse (ObtainCountS < ObtainLimit
+ andalso (ObtainPendingS =/= 0))).
+
+reduce(State = #fhc_state { open_pending = OpenPending,
+ obtain_pending_file = ObtainPendingFile,
+ obtain_pending_socket = ObtainPendingSocket,
+ elders = Elders,
+ clients = Clients,
+ timer_ref = TRef }) ->
+ Now = erlang:monotonic_time(),
+ {CStates, Sum, ClientCount} =
+ ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
+ [#cstate { pending_closes = PendingCloses,
+ opened = Opened,
+ blocked = Blocked } = CState] =
+ ets:lookup(Clients, Pid),
+ TimeDiff = erlang:convert_time_unit(
+ Now - Eldest, native, micro_seconds),
+ case Blocked orelse PendingCloses =:= Opened of
+ true -> Accs;
+ false -> {[CState | CStatesAcc],
+ SumAcc + TimeDiff,
+ CountAcc + 1}
+ end
+ end, {[], 0, 0}, Elders),
+ case CStates of
+ [] -> ok;
+ _ -> case (Sum / ClientCount) -
+ (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of
+ AverageAge when AverageAge > 0 ->
+ notify_age(CStates, AverageAge);
+ _ ->
+ notify_age0(Clients, CStates,
+ pending_count(OpenPending) +
+ pending_count(ObtainPendingFile) +
+ pending_count(ObtainPendingSocket))
+ end
+ end,
+ case TRef of
+ undefined -> TRef1 = erlang:send_after(
+ ?FILE_HANDLES_CHECK_INTERVAL, ?SERVER,
+ check_counts),
+ State #fhc_state { timer_ref = TRef1 };
+ _ -> State
+ end.
+
+notify_age(CStates, AverageAge) ->
+ lists:foreach(
+ fun (#cstate { callback = undefined }) -> ok;
+ (#cstate { callback = {M, F, A} }) -> apply(M, F, A ++ [AverageAge])
+ end, CStates).
+
+notify_age0(Clients, CStates, Required) ->
+ case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of
+ [] -> ok;
+ Notifications -> S = rand:uniform(length(Notifications)),
+ {L1, L2} = lists:split(S, Notifications),
+ notify(Clients, Required, L2 ++ L1)
+ end.
+
+notify(_Clients, _Required, []) ->
+ ok;
+notify(_Clients, Required, _Notifications) when Required =< 0 ->
+ ok;
+notify(Clients, Required, [#cstate{ pid = Pid,
+ callback = {M, F, A},
+ opened = Opened } | Notifications]) ->
+ apply(M, F, A ++ [0]),
+ ets:update_element(Clients, Pid, {#cstate.pending_closes, Opened}),
+ notify(Clients, Required - Opened, Notifications).
+
+track_client(Pid, Clients) ->
+ case ets:insert_new(Clients, #cstate { pid = Pid,
+ callback = undefined,
+ opened = 0,
+ obtained_file = 0,
+ obtained_socket = 0,
+ blocked = false,
+ pending_closes = 0,
+ reserved_file = 0,
+ reserved_socket = 0 }) of
+ true -> _MRef = erlang:monitor(process, Pid),
+ ok;
+ false -> ok
+ end.
+
+
+%% To increase the number of file descriptors: on Windows set ERL_MAX_PORTS
+%% environment variable, on Linux set `ulimit -n`.
+ulimit() ->
+ IOStats = case erlang:system_info(check_io) of
+ [Val | _] when is_list(Val) -> Val;
+ Val when is_list(Val) -> Val;
+ _Other -> []
+ end,
+ case proplists:get_value(max_fds, IOStats) of
+ MaxFds when is_integer(MaxFds) andalso MaxFds > 1 ->
+ case os:type() of
+ {win32, _OsName} ->
+ %% On Windows max_fds is twice the number of open files:
+ %% https://github.com/yrashk/erlang/blob/e1282325ed75e52a98d5/erts/emulator/sys/win32/sys.c#L2459-2466
+ MaxFds div 2;
+ _Any ->
+ %% For other operating systems trust Erlang.
+ MaxFds
+ end;
+ _ ->
+ unknown
+ end.
diff --git a/deps/rabbit_common/src/file_handle_cache_stats.erl b/deps/rabbit_common/src/file_handle_cache_stats.erl
new file mode 100644
index 0000000000..e36a4b38dc
--- /dev/null
+++ b/deps/rabbit_common/src/file_handle_cache_stats.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(file_handle_cache_stats).
+
+%% stats about read / write operations that go through the fhc.
+
+-export([init/0, update/3, update/2, update/1, get/0]).
+
+-define(TABLE, ?MODULE).
+
+-define(COUNT,
+ [io_reopen, mnesia_ram_tx, mnesia_disk_tx,
+ msg_store_read, msg_store_write,
+ queue_index_journal_write, queue_index_write, queue_index_read]).
+-define(COUNT_TIME, [io_sync, io_seek, io_file_handle_open_attempt]).
+-define(COUNT_TIME_BYTES, [io_read, io_write]).
+
+init() ->
+ _ = ets:new(?TABLE, [public, named_table]),
+ [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT_TIME_BYTES,
+ Counter <- [count, bytes, time]],
+ [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT_TIME,
+ Counter <- [count, time]],
+ [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT,
+ Counter <- [count]].
+
+update(Op, Bytes, Thunk) ->
+ {Time, Res} = timer_tc(Thunk),
+ _ = ets:update_counter(?TABLE, {Op, count}, 1),
+ _ = ets:update_counter(?TABLE, {Op, bytes}, Bytes),
+ _ = ets:update_counter(?TABLE, {Op, time}, Time),
+ Res.
+
+update(Op, Thunk) ->
+ {Time, Res} = timer_tc(Thunk),
+ _ = ets:update_counter(?TABLE, {Op, count}, 1),
+ _ = ets:update_counter(?TABLE, {Op, time}, Time),
+ Res.
+
+update(Op) ->
+ ets:update_counter(?TABLE, {Op, count}, 1),
+ ok.
+
+get() ->
+ lists:sort(ets:tab2list(?TABLE)).
+
+timer_tc(Thunk) ->
+ T1 = erlang:monotonic_time(),
+ Res = Thunk(),
+ T2 = erlang:monotonic_time(),
+ Diff = erlang:convert_time_unit(T2 - T1, native, micro_seconds),
+ {Diff, Res}.
diff --git a/deps/rabbit_common/src/gen_server2.erl b/deps/rabbit_common/src/gen_server2.erl
new file mode 100644
index 0000000000..b80e921a89
--- /dev/null
+++ b/deps/rabbit_common/src/gen_server2.erl
@@ -0,0 +1,1419 @@
+%% This file is a copy of gen_server.erl from the R13B-1 Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) the module name is gen_server2
+%%
+%% 2) more efficient handling of selective receives in callbacks
+%% gen_server2 processes drain their message queue into an internal
+%% buffer before invoking any callback module functions. Messages are
+%% dequeued from the buffer for processing. Thus the effective message
+%% queue of a gen_server2 process is the concatenation of the internal
+%% buffer and the real message queue.
+%% As a result of the draining, any selective receive invoked inside a
+%% callback is less likely to have to scan a large message queue.
+%%
+%% 3) gen_server2:cast is guaranteed to be order-preserving
+%% The original code could reorder messages when communicating with a
+%% process on a remote node that was not currently connected.
+%%
+%% 4) The callback module can optionally implement prioritise_call/4,
+%% prioritise_cast/3 and prioritise_info/3. These functions take
+%% Message, From, Length and State or just Message, Length and State
+%% (where Length is the current number of messages waiting to be
+%% processed) and return a single integer representing the priority
+%% attached to the message, or 'drop' to ignore it (for
+%% prioritise_cast/3 and prioritise_info/3 only). Messages with
+%% higher priorities are processed before requests with lower
+%% priorities. The default priority is 0.
+%%
+%% 5) The callback module can optionally implement
+%% handle_pre_hibernate/1 and handle_post_hibernate/1. These will be
+%% called immediately prior to and post hibernation, respectively. If
+%% handle_pre_hibernate returns {hibernate, NewState} then the process
+%% will hibernate. If the module does not implement
+%% handle_pre_hibernate/1 then the default action is to hibernate.
+%%
+%% 6) init can return a 4th arg, {backoff, InitialTimeout,
+%% MinimumTimeout, DesiredHibernatePeriod} (all in milliseconds,
+%% 'infinity' does not make sense here). Then, on all callbacks which
+%% can return a timeout (including init), timeout can be
+%% 'hibernate'. When this is the case, the current timeout value will
+%% be used (initially, the InitialTimeout supplied from init). After
+%% this timeout has occurred, hibernation will occur as normal. Upon
+%% awaking, a new current timeout value will be calculated.
+%%
+%% The purpose is that the gen_server2 takes care of adjusting the
+%% current timeout value such that the process will increase the
+%% timeout value repeatedly if it is unable to sleep for the
+%% DesiredHibernatePeriod. If it is able to sleep for the
+%% DesiredHibernatePeriod it will decrease the current timeout down to
+%% the MinimumTimeout, so that the process is put to sleep sooner (and
+%% hopefully stays asleep for longer). In short, should a process
+%% using this receive a burst of messages, it should not hibernate
+%% between those messages, but as the messages become less frequent,
+%% the process will not only hibernate, it will do so sooner after
+%% each message.
+%%
+%% When using this backoff mechanism, normal timeout values (i.e. not
+%% 'hibernate') can still be used, and if they are used then the
+%% handle_info(timeout, State) will be called as normal. In this case,
+%% returning 'hibernate' from handle_info(timeout, State) will not
+%% hibernate the process immediately, as it would if backoff wasn't
+%% being used. Instead it'll wait for the current timeout as described
+%% above.
+%%
+%% 7) The callback module can return from any of the handle_*
+%% functions, a {become, Module, State} triple, or a {become, Module,
+%% State, Timeout} quadruple. This allows the gen_server to
+%% dynamically change the callback module. The State is the new state
+%% which will be passed into any of the callback functions in the new
+%% module. Note there is no form also encompassing a reply, thus if
+%% you wish to reply in handle_call/3 and change the callback module,
+%% you need to use gen_server2:reply/2 to issue the reply
+%% manually. The init function can similarly return a 5th argument,
+%% Module, in order to dynamically decide the callback module on init.
+%%
+%% 8) The callback module can optionally implement
+%% format_message_queue/2 which is the equivalent of format_status/2
+%% but where the second argument is specifically the priority_queue
+%% which contains the prioritised message_queue.
+%%
+%% 9) The function with_state/2 can be used to debug a process with
+%% heavyweight state (without needing to copy the entire state out of
+%% process as sys:get_status/1 would). Pass through a function which
+%% can be invoked on the state, get back the result. The state is not
+%% modified.
+%%
+%% 10) an mcall/1 function has been added for performing multiple
+%% call/3 in parallel. Unlike multi_call, which sends the same request
+%% to same-named processes residing on a supplied list of nodes, it
+%% operates on name/request pairs, where name is anything accepted by
+%% call/3, i.e. a pid, global name, local name, or local name on a
+%% particular node.
+%%
+%% 11) Internal buffer length is emitted as a core [RabbitMQ] metric.
+
+%% All modifications are (C) 2009-2020 VMware, Inc. or its affiliates.
+
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at https://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(gen_server2).
+
+-ifdef(OTP_RELEASE).
+-if(?OTP_RELEASE >= 22).
+-compile(nowarn_deprecated_function).
+-endif.
+-endif.
+
+%%% ---------------------------------------------------
+%%%
+%%% The idea behind THIS server is that the user module
+%%% provides (different) functions to handle different
+%%% kind of inputs.
+%%% If the Parent process terminates the Module:terminate/2
+%%% function is called.
+%%%
+%%% The user module should export:
+%%%
+%%% init(Args)
+%%% ==> {ok, State}
+%%% {ok, State, Timeout}
+%%% {ok, State, Timeout, Backoff}
+%%% {ok, State, Timeout, Backoff, Module}
+%%% ignore
+%%% {stop, Reason}
+%%%
+%%% handle_call(Msg, {From, Tag}, State)
+%%%
+%%% ==> {reply, Reply, State}
+%%% {reply, Reply, State, Timeout}
+%%% {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, Reply, State}
+%%% Reason = normal | shutdown | Term terminate(State) is called
+%%%
+%%% handle_cast(Msg, State)
+%%%
+%%% ==> {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term terminate(State) is called
+%%%
+%%% handle_info(Info, State) Info is e.g. {'EXIT', P, R}, {nodedown, N}, ...
+%%%
+%%% ==> {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% terminate(Reason, State) Let the user module clean up
+%%% Reason = normal | shutdown | {shutdown, Term} | Term
+%%% always called when server terminates
+%%%
+%%% ==> ok | Term
+%%%
+%%% handle_pre_hibernate(State)
+%%%
+%%% ==> {hibernate, State}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% handle_post_hibernate(State)
+%%%
+%%% ==> {noreply, State}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% The work flow (of the server) can be described as follows:
+%%%
+%%% User module Generic
+%%% ----------- -------
+%%% start -----> start
+%%% init <----- .
+%%%
+%%% loop
+%%% handle_call <----- .
+%%% -----> reply
+%%%
+%%% handle_cast <----- .
+%%%
+%%% handle_info <----- .
+%%%
+%%% terminate <----- .
+%%%
+%%% -----> reply
+%%%
+%%%
+%%% ---------------------------------------------------
+
+%% API
+-export([start/3, start/4,
+ start_link/3, start_link/4,
+ stop/1, stop/3,
+ call/2, call/3,
+ cast/2, reply/2,
+ abcast/2, abcast/3,
+ multi_call/2, multi_call/3, multi_call/4,
+ mcall/1,
+ with_state/2,
+ enter_loop/3, enter_loop/4, enter_loop/5, enter_loop/6, wake_hib/1]).
+
+%% System exports
+-export([system_continue/3,
+ system_terminate/4,
+ system_code_change/4,
+ format_status/2]).
+
+%% Internal exports
+-export([init_it/6]).
+
+-import(error_logger, [format/2]).
+
+%% State record
+-record(gs2_state, {parent, name, state, mod, time,
+ timeout_state, queue, debug, prioritisers,
+ timer, emit_stats_fun, stop_stats_fun}).
+
+%%%=========================================================================
+%%% Specs. These exist only to shut up dialyzer's warnings
+%%%=========================================================================
+
+-type gs2_state() :: #gs2_state{}.
+
+-spec handle_common_termination(any(), atom(), gs2_state()) -> no_return().
+-spec hibernate(gs2_state()) -> no_return().
+-spec pre_hibernate(gs2_state()) -> no_return().
+-spec system_terminate(_, _, _, gs2_state()) -> no_return().
+
+-type millis() :: non_neg_integer().
+
+-dialyzer({nowarn_function, do_multi_call/4}).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-callback init(Args :: term()) ->
+ {ok, State :: term()} |
+ {ok, State :: term(), timeout() | hibernate} |
+ {ok, State :: term(), timeout() | hibernate,
+ {backoff, millis(), millis(), millis()}} |
+ {ok, State :: term(), timeout() | hibernate,
+ {backoff, millis(), millis(), millis()}, atom()} |
+ ignore |
+ {stop, Reason :: term()}.
+-callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
+ State :: term()) ->
+ {reply, Reply :: term(), NewState :: term()} |
+ {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} |
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(),
+ Reply :: term(), NewState :: term()}.
+-callback handle_cast(Request :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback handle_info(Info :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
+ State :: term()) ->
+ ok | term().
+-callback code_change(OldVsn :: (term() | {down, term()}), State :: term(),
+ Extra :: term()) ->
+ {ok, NewState :: term()} | {error, Reason :: term()}.
+
+%% It's not possible to define "optional" -callbacks, so putting specs
+%% for handle_pre_hibernate/1 and handle_post_hibernate/1 will result
+%% in warnings (the same applied for the behaviour_info before).
+
+%%% -----------------------------------------------------------------
+%%% Starts a generic server.
+%%% start(Mod, Args, Options)
+%%% start(Name, Mod, Args, Options)
+%%% start_link(Mod, Args, Options)
+%%% start_link(Name, Mod, Args, Options) where:
+%%% Name ::= {local, atom()} | {global, atom()}
+%%% Mod ::= atom(), callback module implementing the 'real' server
+%%% Args ::= term(), init arguments (to Mod:init/1)
+%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
+%%% Flag ::= trace | log | {logfile, File} | statistics | debug
+%%% (debug == log && statistics)
+%%% Returns: {ok, Pid} |
+%%% {error, {already_started, Pid}} |
+%%% {error, Reason}
+%%% -----------------------------------------------------------------
+start(Mod, Args, Options) ->
+ gen:start(?MODULE, nolink, Mod, Args, Options).
+
+start(Name, Mod, Args, Options) ->
+ gen:start(?MODULE, nolink, Name, Mod, Args, Options).
+
+start_link(Mod, Args, Options) ->
+ gen:start(?MODULE, link, Mod, Args, Options).
+
+start_link(Name, Mod, Args, Options) ->
+ gen:start(?MODULE, link, Name, Mod, Args, Options).
+
+%% -----------------------------------------------------------------
+%% Stop a generic server and wait for it to terminate.
+%% If the server is located at another node, that node will
+%% be monitored.
+%% -----------------------------------------------------------------
+stop(Name) ->
+ gen:stop(Name).
+
+stop(Name, Reason, Timeout) ->
+ gen:stop(Name, Reason, Timeout).
+
+%% -----------------------------------------------------------------
+%% Make a call to a generic server.
+%% If the server is located at another node, that node will
+%% be monitored.
+%% If the client is trapping exits and is linked server termination
+%% is handled here (? Shall we do that here (or rely on timeouts) ?).
+%% -----------------------------------------------------------------
+call(Name, Request) ->
+ case catch gen:call(Name, '$gen_call', Request) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, call, [Name, Request]}})
+ end.
+
+call(Name, Request, Timeout) ->
+ case catch gen:call(Name, '$gen_call', Request, Timeout) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, call, [Name, Request, Timeout]}})
+ end.
+
+%% -----------------------------------------------------------------
+%% Make a cast to a generic server.
+%% -----------------------------------------------------------------
+cast({global,Name}, Request) ->
+ catch global:send(Name, {'$gen_cast', Request}),
+ ok;
+cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) ->
+ catch (Dest ! {'$gen_cast', Request}),
+ ok;
+cast(Dest, Request) when is_atom(Dest); is_pid(Dest) ->
+ catch (Dest ! {'$gen_cast', Request}),
+ ok.
+
+%% -----------------------------------------------------------------
+%% Send a reply to the client.
+%% -----------------------------------------------------------------
+reply({To, Tag}, Reply) ->
+ catch To ! {Tag, Reply}.
+
+%% -----------------------------------------------------------------
+%% Asynchronous broadcast, returns nothing, it's just send'n pray
+%% -----------------------------------------------------------------
+abcast(Name, Request) when is_atom(Name) ->
+ do_abcast([node() | nodes()], Name, {'$gen_cast', Request}).
+
+abcast(Nodes, Name, Request) when is_list(Nodes), is_atom(Name) ->
+ do_abcast(Nodes, Name, {'$gen_cast', Request}).
+
+do_abcast([Node|Nodes], Name, Msg) when is_atom(Node) ->
+ catch ({Name, Node} ! Msg),
+ do_abcast(Nodes, Name, Msg);
+do_abcast([], _,_) -> abcast.
+
+%%% -----------------------------------------------------------------
+%%% Make a call to servers at several nodes.
+%%% Returns: {[Replies],[BadNodes]}
+%%% A Timeout can be given
+%%%
+%%% A middleman process is used in case late answers arrives after
+%%% the timeout. If they would be allowed to glog the callers message
+%%% queue, it would probably become confused. Late answers will
+%%% now arrive to the terminated middleman and so be discarded.
+%%% -----------------------------------------------------------------
+multi_call(Name, Req)
+ when is_atom(Name) ->
+ do_multi_call([node() | nodes()], Name, Req, infinity).
+
+multi_call(Nodes, Name, Req)
+ when is_list(Nodes), is_atom(Name) ->
+ do_multi_call(Nodes, Name, Req, infinity).
+
+multi_call(Nodes, Name, Req, infinity) ->
+ do_multi_call(Nodes, Name, Req, infinity);
+multi_call(Nodes, Name, Req, Timeout)
+ when is_list(Nodes), is_atom(Name), is_integer(Timeout), Timeout >= 0 ->
+ do_multi_call(Nodes, Name, Req, Timeout).
+
+%%% -----------------------------------------------------------------
+%%% Make multiple calls to multiple servers, given pairs of servers
+%%% and messages.
+%%% Returns: {[{Dest, Reply}], [{Dest, Error}]}
+%%%
+%%% Dest can be pid() | RegName :: atom() |
+%%% {Name :: atom(), Node :: atom()} | {global, Name :: atom()}
+%%%
+%%% A middleman process is used to avoid clogging up the callers
+%%% message queue.
+%%% -----------------------------------------------------------------
+mcall(CallSpecs) ->
+ Tag = make_ref(),
+ {_, MRef} = spawn_monitor(
+ fun() ->
+ Refs = lists:foldl(
+ fun ({Dest, _Request}=S, Dict) ->
+ dict:store(do_mcall(S), Dest, Dict)
+ end, dict:new(), CallSpecs),
+ collect_replies(Tag, Refs, [], [])
+ end),
+ receive
+ {'DOWN', MRef, _, _, {Tag, Result}} -> Result;
+ {'DOWN', MRef, _, _, Reason} -> exit(Reason)
+ end.
+
+do_mcall({{global,Name}=Dest, Request}) ->
+ %% whereis_name is simply an ets lookup, and is precisely what
+ %% global:send/2 does, yet we need a Ref to put in the call to the
+ %% server, so invoking whereis_name makes a lot more sense here.
+ case global:whereis_name(Name) of
+ Pid when is_pid(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ catch msend(Pid, MRef, Request),
+ MRef;
+ undefined ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, Dest, noproc},
+ Ref
+ end;
+do_mcall({{Name,Node}=Dest, Request}) when is_atom(Name), is_atom(Node) ->
+ {_Node, MRef} = start_monitor(Node, Name), %% NB: we don't handle R6
+ catch msend(Dest, MRef, Request),
+ MRef;
+do_mcall({Dest, Request}) when is_atom(Dest); is_pid(Dest) ->
+ MRef = erlang:monitor(process, Dest),
+ catch msend(Dest, MRef, Request),
+ MRef.
+
+msend(Dest, MRef, Request) ->
+ erlang:send(Dest, {'$gen_call', {self(), MRef}, Request}, [noconnect]).
+
+collect_replies(Tag, Refs, Replies, Errors) ->
+ case dict:size(Refs) of
+ 0 -> exit({Tag, {Replies, Errors}});
+ _ -> receive
+ {MRef, Reply} ->
+ {Refs1, Replies1} = handle_call_result(MRef, Reply,
+ Refs, Replies),
+ collect_replies(Tag, Refs1, Replies1, Errors);
+ {'DOWN', MRef, _, _, Reason} ->
+ Reason1 = case Reason of
+ noconnection -> nodedown;
+ _ -> Reason
+ end,
+ {Refs1, Errors1} = handle_call_result(MRef, Reason1,
+ Refs, Errors),
+ collect_replies(Tag, Refs1, Replies, Errors1)
+ end
+ end.
+
+handle_call_result(MRef, Result, Refs, AccList) ->
+ %% we avoid the mailbox scanning cost of a call to erlang:demonitor/{1,2}
+ %% here, so we must cope with MRefs that we've already seen and erased
+ case dict:find(MRef, Refs) of
+ {ok, Pid} -> {dict:erase(MRef, Refs), [{Pid, Result}|AccList]};
+ _ -> {Refs, AccList}
+ end.
+
+%% -----------------------------------------------------------------
+%% Apply a function to a generic server's state.
+%% -----------------------------------------------------------------
+with_state(Name, Fun) ->
+ case catch gen:call(Name, '$with_state', Fun, infinity) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, with_state, [Name, Fun]}})
+ end.
+
+%%-----------------------------------------------------------------
+%% enter_loop(Mod, Options, State, <ServerName>, <TimeOut>, <Backoff>) ->_
+%%
+%% Description: Makes an existing process into a gen_server.
+%% The calling process will enter the gen_server receive
+%% loop and become a gen_server process.
+%% The process *must* have been started using one of the
+%% start functions in proc_lib, see proc_lib(3).
+%% The user is responsible for any initialization of the
+%% process, including registering a name for it.
+%%-----------------------------------------------------------------
+enter_loop(Mod, Options, State) ->
+ enter_loop(Mod, Options, State, self(), infinity, undefined).
+
+enter_loop(Mod, Options, State, Backoff = {backoff, _, _ , _}) ->
+ enter_loop(Mod, Options, State, self(), infinity, Backoff);
+
+enter_loop(Mod, Options, State, ServerName = {_, _}) ->
+ enter_loop(Mod, Options, State, ServerName, infinity, undefined);
+
+enter_loop(Mod, Options, State, Timeout) ->
+ enter_loop(Mod, Options, State, self(), Timeout, undefined).
+
+enter_loop(Mod, Options, State, ServerName, Backoff = {backoff, _, _, _}) ->
+ enter_loop(Mod, Options, State, ServerName, infinity, Backoff);
+
+enter_loop(Mod, Options, State, ServerName, Timeout) ->
+ enter_loop(Mod, Options, State, ServerName, Timeout, undefined).
+
+enter_loop(Mod, Options, State, ServerName, Timeout, Backoff) ->
+ Name = get_proc_name(ServerName),
+ Parent = get_parent(),
+ Debug = debug_options(Name, Options),
+ Queue = priority_queue:new(),
+ Backoff1 = extend_backoff(Backoff),
+ {EmitStatsFun, StopStatsFun} = stats_funs(),
+ loop(init_stats(find_prioritisers(
+ #gs2_state { parent = Parent, name = Name, state = State,
+ mod = Mod, time = Timeout, timeout_state = Backoff1,
+ queue = Queue, debug = Debug,
+ emit_stats_fun = EmitStatsFun,
+ stop_stats_fun = StopStatsFun }))).
+
+%%%========================================================================
+%%% Gen-callback functions
+%%%========================================================================
+
+%%% ---------------------------------------------------
+%%% Initiate the new process.
+%%% Register the name using the Rfunc function
+%%% Calls the Mod:init/Args function.
+%%% Finally an acknowledge is sent to Parent and the main
+%%% loop is entered.
+%%% ---------------------------------------------------
+init_it(Starter, self, Name, Mod, Args, Options) ->
+ init_it(Starter, self(), Name, Mod, Args, Options);
+init_it(Starter, Parent, Name0, Mod, Args, Options) ->
+ Name = name(Name0),
+ Debug = debug_options(Name, Options),
+ Queue = priority_queue:new(),
+ {EmitStatsFun, StopStatsFun} = stats_funs(),
+ GS2State = find_prioritisers(
+ #gs2_state { parent = Parent,
+ name = Name,
+ mod = Mod,
+ queue = Queue,
+ debug = Debug,
+ emit_stats_fun = EmitStatsFun,
+ stop_stats_fun = StopStatsFun }),
+ case catch Mod:init(Args) of
+ {ok, State} ->
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(GS2State#gs2_state { state = State,
+ time = infinity,
+ timeout_state = undefined }));
+ {ok, State, Timeout} ->
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(
+ GS2State#gs2_state { state = State,
+ time = Timeout,
+ timeout_state = undefined }));
+ {ok, State, Timeout, Backoff = {backoff, _, _, _}} ->
+ Backoff1 = extend_backoff(Backoff),
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(GS2State#gs2_state { state = State,
+ time = Timeout,
+ timeout_state = Backoff1 }));
+ {ok, State, Timeout, Backoff = {backoff, _, _, _}, Mod1} ->
+ Backoff1 = extend_backoff(Backoff),
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(find_prioritisers(
+ GS2State#gs2_state { mod = Mod1,
+ state = State,
+ time = Timeout,
+ timeout_state = Backoff1 })));
+ {stop, Reason} ->
+ %% For consistency, we must make sure that the
+ %% registered name (if any) is unregistered before
+ %% the parent process is notified about the failure.
+ %% (Otherwise, the parent process could get
+ %% an 'already_started' error if it immediately
+ %% tried starting the process again.)
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, {error, Reason}),
+ exit(Reason);
+ ignore ->
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, ignore),
+ exit(normal);
+ {'EXIT', Reason} ->
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, {error, Reason}),
+ exit(Reason);
+ Else ->
+ Error = {bad_return_value, Else},
+ proc_lib:init_ack(Starter, {error, Error}),
+ exit(Error)
+ end.
+
+name({local,Name}) -> Name;
+name({global,Name}) -> Name;
+%% name(Pid) when is_pid(Pid) -> Pid;
+%% when R12 goes away, drop the line beneath and uncomment the line above
+name(Name) -> Name.
+
+unregister_name({local,Name}) ->
+ _ = (catch unregister(Name));
+unregister_name({global,Name}) ->
+ _ = global:unregister_name(Name);
+unregister_name(Pid) when is_pid(Pid) ->
+ Pid;
+%% Under R12 let's just ignore it, as we have a single term as Name.
+%% On R13 it will never get here, as we get tuple with 'local/global' atom.
+unregister_name(_Name) -> ok.
+
+extend_backoff(undefined) ->
+ undefined;
+extend_backoff({backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod}) ->
+ {backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod,
+ rand:seed(exsplus)}.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+%%% ---------------------------------------------------
+%%% The MAIN loop.
+%%% ---------------------------------------------------
+loop(GS2State = #gs2_state { time = hibernate,
+ timeout_state = undefined,
+ queue = Queue }) ->
+ case priority_queue:is_empty(Queue) of
+ true ->
+ pre_hibernate(GS2State);
+ false ->
+ process_next_msg(GS2State)
+ end;
+
+loop(GS2State) ->
+ process_next_msg(drain(GS2State)).
+
+drain(GS2State) ->
+ receive
+ Input -> drain(in(Input, GS2State))
+ after 0 -> GS2State
+ end.
+
+process_next_msg(GS2State0 = #gs2_state { time = Time,
+ timeout_state = TimeoutState,
+ queue = Queue }) ->
+ case priority_queue:out(Queue) of
+ {{value, Msg}, Queue1} ->
+ GS2State = ensure_stats_timer(GS2State0),
+ process_msg(Msg, GS2State#gs2_state { queue = Queue1 });
+ {empty, Queue1} ->
+ {Time1, HibOnTimeout, GS2State}
+ = case {Time, TimeoutState} of
+ {hibernate, {backoff, Current, _Min, _Desired, _RSt}} ->
+ {Current, true, stop_stats_timer(GS2State0)};
+ {hibernate, _} ->
+ %% wake_hib/7 will set Time to hibernate. If
+ %% we were woken and didn't receive a msg
+ %% then we will get here and need a sensible
+ %% value for Time1, otherwise we crash.
+ %% R13B1 always waits infinitely when waking
+ %% from hibernation, so that's what we do
+ %% here too.
+ {infinity, false, GS2State0};
+ _ -> {Time, false, GS2State0}
+ end,
+ receive
+ Input ->
+ %% Time could be 'hibernate' here, so *don't* call loop
+ process_next_msg(
+ drain(in(Input, GS2State #gs2_state { queue = Queue1 })))
+ after Time1 ->
+ case HibOnTimeout of
+ true ->
+ pre_hibernate(
+ GS2State #gs2_state { queue = Queue1 });
+ false ->
+ process_msg(timeout,
+ GS2State #gs2_state { queue = Queue1 })
+ end
+ end
+ end.
+
+wake_hib(GS2State = #gs2_state { timeout_state = TS }) ->
+ TimeoutState1 = case TS of
+ undefined ->
+ undefined;
+ {SleptAt, TimeoutState} ->
+ adjust_timeout_state(SleptAt,
+ erlang:monotonic_time(),
+ TimeoutState)
+ end,
+ post_hibernate(
+ drain(GS2State #gs2_state { timeout_state = TimeoutState1 })).
+
+hibernate(GS2State = #gs2_state { timeout_state = TimeoutState }) ->
+ TS = case TimeoutState of
+ undefined -> undefined;
+ {backoff, _, _, _, _} -> {erlang:monotonic_time(),
+ TimeoutState}
+ end,
+ proc_lib:hibernate(?MODULE, wake_hib,
+ [GS2State #gs2_state { timeout_state = TS }]).
+
+pre_hibernate(GS2State0 = #gs2_state { state = State,
+ mod = Mod,
+ emit_stats_fun = EmitStatsFun }) ->
+ GS2State = EmitStatsFun(stop_stats_timer(GS2State0)),
+ case erlang:function_exported(Mod, handle_pre_hibernate, 1) of
+ true ->
+ case catch Mod:handle_pre_hibernate(State) of
+ {hibernate, NState} ->
+ hibernate(GS2State #gs2_state { state = NState } );
+ Reply ->
+ handle_common_termination(Reply, pre_hibernate, GS2State)
+ end;
+ false ->
+ hibernate(GS2State)
+ end.
+
+post_hibernate(GS2State0 = #gs2_state { state = State,
+ mod = Mod }) ->
+ GS2State = ensure_stats_timer(GS2State0),
+ case erlang:function_exported(Mod, handle_post_hibernate, 1) of
+ true ->
+ case catch Mod:handle_post_hibernate(State) of
+ {noreply, NState} ->
+ process_next_msg(GS2State #gs2_state { state = NState,
+ time = infinity });
+ {noreply, NState, Time} ->
+ process_next_msg(GS2State #gs2_state { state = NState,
+ time = Time });
+ Reply ->
+ handle_common_termination(Reply, post_hibernate, GS2State)
+ end;
+ false ->
+ %% use hibernate here, not infinity. This matches
+ %% R13B. The key is that we should be able to get through
+ %% to process_msg calling sys:handle_system_msg with Time
+ %% still set to hibernate, iff that msg is the very msg
+ %% that woke us up (or the first msg we receive after
+ %% waking up).
+ process_next_msg(GS2State #gs2_state { time = hibernate })
+ end.
+
+adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
+ DesiredHibPeriod, RandomState}) ->
+ NapLengthMicros = erlang:convert_time_unit(AwokeAt - SleptAt,
+ native, micro_seconds),
+ CurrentMicros = CurrentTO * 1000,
+ MinimumMicros = MinimumTO * 1000,
+ DesiredHibMicros = DesiredHibPeriod * 1000,
+ GapBetweenMessagesMicros = NapLengthMicros + CurrentMicros,
+ Base =
+ %% If enough time has passed between the last two messages then we
+ %% should consider sleeping sooner. Otherwise stay awake longer.
+ case GapBetweenMessagesMicros > (MinimumMicros + DesiredHibMicros) of
+ true -> lists:max([MinimumTO, CurrentTO div 2]);
+ false -> CurrentTO
+ end,
+ {Extra, RandomState1} = rand:uniform_s(Base, RandomState),
+ CurrentTO1 = Base + Extra,
+ {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
+
+in({'$gen_cast', Msg} = Input,
+ GS2State = #gs2_state { prioritisers = {_, F, _} }) ->
+ in(Input, F(Msg, GS2State), GS2State);
+in({'$gen_call', From, Msg} = Input,
+ GS2State = #gs2_state { prioritisers = {F, _, _} }) ->
+ in(Input, F(Msg, From, GS2State), GS2State);
+in({'$with_state', _From, _Fun} = Input, GS2State) ->
+ in(Input, 0, GS2State);
+in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) ->
+ in(Input, infinity, GS2State);
+in({system, _From, _Req} = Input, GS2State) ->
+ in(Input, infinity, GS2State);
+in(emit_gen_server2_stats, GS2State = #gs2_state{ emit_stats_fun = EmitStatsFun}) ->
+ next_stats_timer(EmitStatsFun(GS2State));
+in(Input, GS2State = #gs2_state { prioritisers = {_, _, F} }) ->
+ in(Input, F(Input, GS2State), GS2State).
+
+in(_Input, drop, GS2State) ->
+ GS2State;
+
+in(Input, Priority, GS2State = #gs2_state { queue = Queue }) ->
+ GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }.
+
+process_msg({system, From, Req},
+ GS2State = #gs2_state { parent = Parent, debug = Debug }) ->
+ case Req of
+ %% This clause will match only in R16B03.
+ %% Since 17.0 replace_state is not a system message.
+ {replace_state, StateFun} ->
+ GS2State1 = StateFun(GS2State),
+ _ = gen:reply(From, GS2State1),
+ system_continue(Parent, Debug, GS2State1);
+ _ ->
+ %% gen_server puts Hib on the end as the 7th arg, but that version
+ %% of the fun seems not to be documented so leaving out for now.
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State)
+ end;
+process_msg({'$with_state', From, Fun},
+ GS2State = #gs2_state{state = State}) ->
+ reply(From, catch Fun(State)),
+ loop(GS2State);
+process_msg({'EXIT', Parent, Reason} = Msg,
+ GS2State = #gs2_state { parent = Parent }) ->
+ terminate(Reason, Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { debug = [] }) ->
+ handle_msg(Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) ->
+ Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}),
+ handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }).
+
+%%% ---------------------------------------------------
+%%% Send/recive functions
+%%% ---------------------------------------------------
+
+do_multi_call(Nodes, Name, Req, infinity) ->
+ Tag = make_ref(),
+ Monitors = send_nodes(Nodes, Name, Tag, Req),
+ rec_nodes(Tag, Monitors, Name, undefined);
+do_multi_call(Nodes, Name, Req, Timeout) ->
+ Tag = make_ref(),
+ Caller = self(),
+ Receiver =
+ spawn(
+ fun () ->
+ %% Middleman process. Should be unsensitive to regular
+ %% exit signals. The synchronization is needed in case
+ %% the receiver would exit before the caller started
+ %% the monitor.
+ process_flag(trap_exit, true),
+ Mref = erlang:monitor(process, Caller),
+ receive
+ {Caller,Tag} ->
+ Monitors = send_nodes(Nodes, Name, Tag, Req),
+ TimerId = erlang:start_timer(Timeout, self(), ok),
+ Result = rec_nodes(Tag, Monitors, Name, TimerId),
+ exit({self(),Tag,Result});
+ {'DOWN',Mref,_,_,_} ->
+ %% Caller died before sending us the go-ahead.
+ %% Give up silently.
+ exit(normal)
+ end
+ end),
+ Mref = erlang:monitor(process, Receiver),
+ Receiver ! {self(),Tag},
+ receive
+ {'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
+ Result;
+ {'DOWN',Mref,_,_,Reason} ->
+ %% The middleman code failed. Or someone did
+ %% exit(_, kill) on the middleman process => Reason==killed
+ exit(Reason)
+ end.
+
+send_nodes(Nodes, Name, Tag, Req) ->
+ send_nodes(Nodes, Name, Tag, Req, []).
+
+send_nodes([Node|Tail], Name, Tag, Req, Monitors)
+ when is_atom(Node) ->
+ Monitor = start_monitor(Node, Name),
+ %% Handle non-existing names in rec_nodes.
+ catch {Name, Node} ! {'$gen_call', {self(), {Tag, Node}}, Req},
+ send_nodes(Tail, Name, Tag, Req, [Monitor | Monitors]);
+send_nodes([_Node|Tail], Name, Tag, Req, Monitors) ->
+ %% Skip non-atom Node
+ send_nodes(Tail, Name, Tag, Req, Monitors);
+send_nodes([], _Name, _Tag, _Req, Monitors) ->
+ Monitors.
+
+%% Against old nodes:
+%% If no reply has been delivered within 2 secs. (per node) check that
+%% the server really exists and wait for ever for the answer.
+%%
+%% Against contemporary nodes:
+%% Wait for reply, server 'DOWN', or timeout from TimerId.
+
+rec_nodes(Tag, Nodes, Name, TimerId) ->
+ rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId).
+
+rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ unmonitor(R),
+ rec_nodes(Tag, Tail, Name, Badnodes,
+ [{N,Reply}|Replies], Time, TimerId);
+ {timeout, TimerId, _} ->
+ unmonitor(R),
+ %% Collect all replies that already have arrived
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) ->
+ %% R6 node
+ receive
+ {nodedown, N} ->
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, Badnodes,
+ [{N,Reply}|Replies], 2000, TimerId);
+ {timeout, TimerId, _} ->
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ %% Collect all replies that already have arrived
+ rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies)
+ after Time ->
+ case rpc:call(N, erlang, whereis, [Name]) of
+ Pid when is_pid(Pid) -> % It exists try again.
+ rec_nodes(Tag, [N|Tail], Name, Badnodes,
+ Replies, infinity, TimerId);
+ _ -> % badnode
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, [N|Badnodes],
+ Replies, 2000, TimerId)
+ end
+ end;
+rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) ->
+ case catch erlang:cancel_timer(TimerId) of
+ false -> % It has already sent it's message
+ receive
+ {timeout, TimerId, _} -> ok
+ after 0 ->
+ ok
+ end;
+ _ -> % Timer was cancelled, or TimerId was 'undefined'
+ ok
+ end,
+ {Replies, Badnodes}.
+
+%% Collect all replies that already have arrived
+rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ unmonitor(R),
+ rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
+ after 0 ->
+ unmonitor(R),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) ->
+ %% R6 node
+ receive
+ {nodedown, N} ->
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
+ after 0 ->
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) ->
+ {Replies, Badnodes}.
+
+
+%%% ---------------------------------------------------
+%%% Monitor functions
+%%% ---------------------------------------------------
+
+start_monitor(Node, Name) when is_atom(Node), is_atom(Name) ->
+ if node() =:= nonode@nohost, Node =/= nonode@nohost ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
+ {Node, Ref};
+ true ->
+ case catch erlang:monitor(process, {Name, Node}) of
+ {'EXIT', _} ->
+ %% Remote node is R6
+ monitor_node(Node, true),
+ Node;
+ Ref when is_reference(Ref) ->
+ {Node, Ref}
+ end
+ end.
+
+%% Cancels a monitor started with Ref=erlang:monitor(_, _).
+unmonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ true
+ after 0 ->
+ true
+ end.
+
+%%% ---------------------------------------------------
+%%% Message handling functions
+%%% ---------------------------------------------------
+
+dispatch({'$gen_cast', Msg}, Mod, State) ->
+ Mod:handle_cast(Msg, State);
+dispatch(Info, Mod, State) ->
+ Mod:handle_info(Info, State).
+
+common_reply(_Name, From, Reply, _NState, [] = _Debug) ->
+ reply(From, Reply),
+ [];
+common_reply(Name, {To, _Tag} = From, Reply, NState, Debug) ->
+ reply(From, Reply),
+ sys:handle_debug(Debug, fun print_event/3, Name, {out, Reply, To, NState}).
+
+common_noreply(_Name, _NState, [] = _Debug) ->
+ [];
+common_noreply(Name, NState, Debug) ->
+ sys:handle_debug(Debug, fun print_event/3, Name, {noreply, NState}).
+
+common_become(_Name, _Mod, _NState, [] = _Debug) ->
+ [];
+common_become(Name, Mod, NState, Debug) ->
+ sys:handle_debug(Debug, fun print_event/3, Name, {become, Mod, NState}).
+
+handle_msg({'$gen_call', From, Msg}, GS2State = #gs2_state { mod = Mod,
+ state = State,
+ name = Name,
+ debug = Debug }) ->
+ case catch Mod:handle_call(Msg, From, State) of
+ {reply, Reply, NState} ->
+ Debug1 = common_reply(Name, From, Reply, NState, Debug),
+ loop(GS2State #gs2_state { state = NState,
+ time = infinity,
+ debug = Debug1 });
+ {reply, Reply, NState, Time1} ->
+ Debug1 = common_reply(Name, From, Reply, NState, Debug),
+ loop(GS2State #gs2_state { state = NState,
+ time = Time1,
+ debug = Debug1});
+ {stop, Reason, Reply, NState} ->
+ {'EXIT', R} =
+ (catch terminate(Reason, Msg,
+ GS2State #gs2_state { state = NState })),
+ _ = common_reply(Name, From, Reply, NState, Debug),
+ exit(R);
+ Other ->
+ handle_common_reply(Other, Msg, GS2State)
+ end;
+handle_msg(Msg, GS2State = #gs2_state { mod = Mod, state = State }) ->
+ Reply = (catch dispatch(Msg, Mod, State)),
+ handle_common_reply(Reply, Msg, GS2State).
+
+handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name,
+ debug = Debug}) ->
+ case Reply of
+ {noreply, NState} ->
+ Debug1 = common_noreply(Name, NState, Debug),
+ loop(GS2State #gs2_state {state = NState,
+ time = infinity,
+ debug = Debug1});
+ {noreply, NState, Time1} ->
+ Debug1 = common_noreply(Name, NState, Debug),
+ loop(GS2State #gs2_state {state = NState,
+ time = Time1,
+ debug = Debug1});
+ {become, Mod, NState} ->
+ Debug1 = common_become(Name, Mod, NState, Debug),
+ loop(find_prioritisers(
+ GS2State #gs2_state { mod = Mod,
+ state = NState,
+ time = infinity,
+ debug = Debug1 }));
+ {become, Mod, NState, Time1} ->
+ Debug1 = common_become(Name, Mod, NState, Debug),
+ loop(find_prioritisers(
+ GS2State #gs2_state { mod = Mod,
+ state = NState,
+ time = Time1,
+ debug = Debug1 }));
+ _ ->
+ handle_common_termination(Reply, Msg, GS2State)
+ end.
+
+handle_common_termination(Reply, Msg, GS2State) ->
+ case Reply of
+ {stop, Reason, NState} ->
+ terminate(Reason, Msg, GS2State #gs2_state { state = NState });
+ {'EXIT', What} ->
+ terminate(What, Msg, GS2State);
+ _ ->
+ terminate({bad_return_value, Reply}, Msg, GS2State)
+ end.
+
+%%-----------------------------------------------------------------
+%% Callback functions for system messages handling.
+%%-----------------------------------------------------------------
+system_continue(Parent, Debug, GS2State) ->
+ loop(GS2State #gs2_state { parent = Parent, debug = Debug }).
+
+system_terminate(Reason, _Parent, Debug, GS2State) ->
+ terminate(Reason, [], GS2State #gs2_state { debug = Debug }).
+
+system_code_change(GS2State = #gs2_state { mod = Mod,
+ state = State },
+ _Module, OldVsn, Extra) ->
+ case catch Mod:code_change(OldVsn, State, Extra) of
+ {ok, NewState} ->
+ NewGS2State = find_prioritisers(
+ GS2State #gs2_state { state = NewState }),
+ {ok, [NewGS2State]};
+ Else ->
+ Else
+ end.
+
+%%-----------------------------------------------------------------
+%% Format debug messages. Print them as the call-back module sees
+%% them, not as the real erlang messages. Use trace for that.
+%%-----------------------------------------------------------------
+print_event(Dev, {in, Msg}, Name) ->
+ case Msg of
+ {'$gen_call', {From, _Tag}, Call} ->
+ io:format(Dev, "*DBG* ~p got call ~p from ~w~n",
+ [Name, Call, From]);
+ {'$gen_cast', Cast} ->
+ io:format(Dev, "*DBG* ~p got cast ~p~n",
+ [Name, Cast]);
+ _ ->
+ io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg])
+ end;
+print_event(Dev, {out, Msg, To, State}, Name) ->
+ io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n",
+ [Name, Msg, To, State]);
+print_event(Dev, {noreply, State}, Name) ->
+ io:format(Dev, "*DBG* ~p new state ~w~n", [Name, State]);
+print_event(Dev, Event, Name) ->
+ io:format(Dev, "*DBG* ~p dbg ~p~n", [Name, Event]).
+
+
+%%% ---------------------------------------------------
+%%% Terminate the server.
+%%% ---------------------------------------------------
+
+-spec terminate(_, _, _) -> no_return().
+
+terminate(Reason, Msg, #gs2_state { name = Name,
+ mod = Mod,
+ state = State,
+ debug = Debug,
+ stop_stats_fun = StopStatsFun
+ } = GS2State) ->
+ StopStatsFun(stop_stats_timer(GS2State)),
+ case catch Mod:terminate(Reason, State) of
+ {'EXIT', R} ->
+ error_info(R, Reason, Name, Msg, State, Debug),
+ exit(R);
+ _ ->
+ case Reason of
+ normal ->
+ exit(normal);
+ shutdown ->
+ exit(shutdown);
+ {shutdown,_}=Shutdown ->
+ exit(Shutdown);
+ _ ->
+ error_info(Reason, undefined, Name, Msg, State, Debug),
+ exit(Reason)
+ end
+ end.
+
+error_info(_Reason, _RootCause, application_controller, _Msg, _State, _Debug) ->
+ %% OTP-5811 Don't send an error report if it's the system process
+ %% application_controller which is terminating - let init take care
+ %% of it instead
+ ok;
+error_info(Reason, RootCause, Name, Msg, State, Debug) ->
+ Reason1 = error_reason(Reason),
+ Fmt =
+ "** Generic server ~p terminating~n"
+ "** Last message in was ~p~n"
+ "** When Server state == ~p~n"
+ "** Reason for termination == ~n** ~p~n",
+ case RootCause of
+ undefined -> format(Fmt, [Name, Msg, State, Reason1]);
+ _ -> format(Fmt ++ "** In 'terminate' callback "
+ "with reason ==~n** ~p~n",
+ [Name, Msg, State, Reason1,
+ error_reason(RootCause)])
+ end,
+ sys:print_log(Debug),
+ ok.
+
+error_reason({undef,[{M,F,A}|MFAs]} = Reason) ->
+ case code:is_loaded(M) of
+ false -> {'module could not be loaded',[{M,F,A}|MFAs]};
+ _ -> case erlang:function_exported(M, F, length(A)) of
+ true -> Reason;
+ false -> {'function not exported',[{M,F,A}|MFAs]}
+ end
+ end;
+error_reason(Reason) ->
+ Reason.
+
+%%% ---------------------------------------------------
+%%% Misc. functions.
+%%% ---------------------------------------------------
+
+opt(Op, [{Op, Value}|_]) ->
+ {ok, Value};
+opt(Op, [_|Options]) ->
+ opt(Op, Options);
+opt(_, []) ->
+ false.
+
+debug_options(Name, Opts) ->
+ case opt(debug, Opts) of
+ {ok, Options} -> dbg_options(Name, Options);
+ _ -> dbg_options(Name, [])
+ end.
+
+dbg_options(Name, []) ->
+ Opts =
+ case init:get_argument(generic_debug) of
+ error ->
+ [];
+ _ ->
+ [log, statistics]
+ end,
+ dbg_opts(Name, Opts);
+dbg_options(Name, Opts) ->
+ dbg_opts(Name, Opts).
+
+dbg_opts(Name, Opts) ->
+ case catch sys:debug_options(Opts) of
+ {'EXIT',_} ->
+ format("~p: ignoring erroneous debug options - ~p~n",
+ [Name, Opts]),
+ [];
+ Dbg ->
+ Dbg
+ end.
+
+get_proc_name(Pid) when is_pid(Pid) ->
+ Pid;
+get_proc_name({local, Name}) ->
+ case process_info(self(), registered_name) of
+ {registered_name, Name} ->
+ Name;
+ {registered_name, _Name} ->
+ exit(process_not_registered);
+ [] ->
+ exit(process_not_registered)
+ end;
+get_proc_name({global, Name}) ->
+ case whereis_name(Name) of
+ undefined ->
+ exit(process_not_registered_globally);
+ Pid when Pid =:= self() ->
+ Name;
+ _Pid ->
+ exit(process_not_registered_globally)
+ end.
+
+get_parent() ->
+ case get('$ancestors') of
+ [Parent | _] when is_pid(Parent)->
+ Parent;
+ [Parent | _] when is_atom(Parent)->
+ name_to_pid(Parent);
+ _ ->
+ exit(process_was_not_started_by_proc_lib)
+ end.
+
+name_to_pid(Name) ->
+ case whereis(Name) of
+ undefined ->
+ case whereis_name(Name) of
+ undefined ->
+ exit(could_not_find_registered_name);
+ Pid ->
+ Pid
+ end;
+ Pid ->
+ Pid
+ end.
+
+whereis_name(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] ->
+ if node(Pid) == node() ->
+ case is_process_alive(Pid) of
+ true -> Pid;
+ false -> undefined
+ end;
+ true ->
+ Pid
+ end;
+ [] -> undefined
+ end.
+
+find_prioritisers(GS2State = #gs2_state { mod = Mod }) ->
+ PCall = function_exported_or_default(Mod, 'prioritise_call', 4,
+ fun (_Msg, _From, _State) -> 0 end),
+ PCast = function_exported_or_default(Mod, 'prioritise_cast', 3,
+ fun (_Msg, _State) -> 0 end),
+ PInfo = function_exported_or_default(Mod, 'prioritise_info', 3,
+ fun (_Msg, _State) -> 0 end),
+ GS2State #gs2_state { prioritisers = {PCall, PCast, PInfo} }.
+
+function_exported_or_default(Mod, Fun, Arity, Default) ->
+ case erlang:function_exported(Mod, Fun, Arity) of
+ true -> case Arity of
+ 3 -> fun (Msg, GS2State = #gs2_state { queue = Queue,
+ state = State }) ->
+ Length = priority_queue:len(Queue),
+ case catch Mod:Fun(Msg, Length, State) of
+ drop ->
+ drop;
+ Res when is_integer(Res) ->
+ Res;
+ Err ->
+ handle_common_termination(Err, Msg, GS2State)
+ end
+ end;
+ 4 -> fun (Msg, From, GS2State = #gs2_state { queue = Queue,
+ state = State }) ->
+ Length = priority_queue:len(Queue),
+ case catch Mod:Fun(Msg, From, Length, State) of
+ Res when is_integer(Res) ->
+ Res;
+ Err ->
+ handle_common_termination(Err, Msg, GS2State)
+ end
+ end
+ end;
+ false -> Default
+ end.
+
+%%-----------------------------------------------------------------
+%% Status information
+%%-----------------------------------------------------------------
+format_status(Opt, StatusData) ->
+ [PDict, SysState, Parent, Debug,
+ #gs2_state{name = Name, state = State, mod = Mod, queue = Queue}] =
+ StatusData,
+ NameTag = if is_pid(Name) ->
+ pid_to_list(Name);
+ is_atom(Name) ->
+ Name
+ end,
+ Header = lists:concat(["Status for generic server ", NameTag]),
+ Log = sys:get_debug(log, Debug, []),
+ Specfic = callback(Mod, format_status, [Opt, [PDict, State]],
+ fun () -> [{data, [{"State", State}]}] end),
+ Messages = callback(Mod, format_message_queue, [Opt, Queue],
+ fun () -> priority_queue:to_list(Queue) end),
+ [{header, Header},
+ {data, [{"Status", SysState},
+ {"Parent", Parent},
+ {"Logged events", Log},
+ {"Queued messages", Messages}]} |
+ Specfic].
+
+callback(Mod, FunName, Args, DefaultThunk) ->
+ case erlang:function_exported(Mod, FunName, length(Args)) of
+ true -> case catch apply(Mod, FunName, Args) of
+ {'EXIT', _} -> DefaultThunk();
+ Success -> Success
+ end;
+ false -> DefaultThunk()
+ end.
+
+stats_funs() ->
+ case ets:info(gen_server2_metrics) of
+ undefined ->
+ {fun(GS2State) -> GS2State end,
+ fun(GS2State) -> GS2State end};
+ _ ->
+ {fun emit_stats/1, fun stop_stats/1}
+ end.
+
+init_stats(State = #gs2_state{ emit_stats_fun = EmitStatsFun }) ->
+ StateWithInitTimer = rabbit_event:init_stats_timer(State, #gs2_state.timer),
+ next_stats_timer(EmitStatsFun(StateWithInitTimer)).
+
+next_stats_timer(State) ->
+ ensure_stats_timer(rabbit_event:reset_stats_timer(State, #gs2_state.timer)).
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State,
+ #gs2_state.timer,
+ emit_gen_server2_stats).
+
+stop_stats_timer(State) ->
+ rabbit_event:stop_stats_timer(State, #gs2_state.timer).
+
+emit_stats(State = #gs2_state{queue = Queue}) ->
+ rabbit_core_metrics:gen_server2_stats(self(), priority_queue:len(Queue)),
+ State.
+
+stop_stats(State) ->
+ rabbit_core_metrics:gen_server2_deleted(self()),
+ State.
diff --git a/deps/rabbit_common/src/lager_forwarder_backend.erl b/deps/rabbit_common/src/lager_forwarder_backend.erl
new file mode 100644
index 0000000000..936a1259ce
--- /dev/null
+++ b/deps/rabbit_common/src/lager_forwarder_backend.erl
@@ -0,0 +1,120 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(lager_forwarder_backend).
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-record(state, {
+ next_sink :: atom(),
+ level :: {'mask', integer()} | inherit
+ }).
+
+%% @private
+init(Sink) when is_atom(Sink) ->
+ init([Sink]);
+init([Sink]) when is_atom(Sink) ->
+ init([Sink, inherit]);
+init([Sink, inherit]) when is_atom(Sink) ->
+ {ok, #state{
+ next_sink = Sink,
+ level = inherit
+ }};
+init([Sink, Level]) when is_atom(Sink) ->
+ try
+ Mask = lager_util:config_to_mask(Level),
+ {ok, #state{
+ next_sink = Sink,
+ level = Mask
+ }}
+ catch
+ _:_ ->
+ {error, {fatal, bad_log_level}}
+ end;
+init(_) ->
+ {error, {fatal, bad_config}}.
+
+%% @private
+handle_call(get_loglevel, #state{next_sink = Sink, level = inherit} = State) ->
+ SinkPid = whereis(Sink),
+ Mask = case self() of
+ SinkPid ->
+ %% Avoid direct loops, defaults to 'info'.
+ 127;
+ _ ->
+ try
+ Levels = [gen_event:call(SinkPid, Handler, get_loglevel,
+ infinity)
+ || Handler <- gen_event:which_handlers(SinkPid)],
+ lists:foldl(fun
+ ({mask, Mask}, Acc) ->
+ Mask bor Acc;
+ (Level, Acc) when is_integer(Level) ->
+ {mask, Mask} = lager_util:config_to_mask(
+ lager_util:num_to_level(Level)),
+ Mask bor Acc;
+ (_, Acc) ->
+ Acc
+ end, 0, Levels)
+ catch
+ exit:noproc ->
+ 127
+ end
+ end,
+ {ok, {mask, Mask}, State};
+handle_call(get_loglevel, #state{level = Mask} = State) ->
+ {ok, Mask, State};
+handle_call({set_loglevel, inherit}, State) ->
+ {ok, ok, State#state{level = inherit}};
+handle_call({set_loglevel, Level}, State) ->
+ try lager_util:config_to_mask(Level) of
+ Mask ->
+ {ok, ok, State#state{level = Mask}}
+ catch
+ _:_ ->
+ {ok, {error, bad_log_level}, State}
+ end;
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+%% @private
+handle_event({log, LagerMsg}, #state{next_sink = Sink, level = Mask} = State) ->
+ SinkPid = whereis(Sink),
+ case self() of
+ SinkPid ->
+ %% Avoid direct loops.
+ ok;
+ _ ->
+ case Mask =:= inherit orelse
+ lager_util:is_loggable(LagerMsg, Mask, ?MODULE) of
+ true ->
+ case lager_config:get({Sink, async}, false) of
+ true -> gen_event:notify(SinkPid, {log, LagerMsg});
+ false -> gen_event:sync_notify(SinkPid, {log, LagerMsg})
+ end;
+ false ->
+ ok
+ end
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%% @private
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%% @private
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/mirrored_supervisor.erl b/deps/rabbit_common/src/mirrored_supervisor.erl
new file mode 100644
index 0000000000..61ddc068b6
--- /dev/null
+++ b/deps/rabbit_common/src/mirrored_supervisor.erl
@@ -0,0 +1,513 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor).
+
+%% pg2 is deprecated in OTP 23.
+-compile(nowarn_deprecated_function).
+
+%% Mirrored Supervisor
+%% ===================
+%%
+%% This module implements a new type of supervisor. It acts like a
+%% normal supervisor, but at creation time you also provide the name
+%% of a process group to join. All the supervisors within the
+%% process group act like a single large distributed supervisor:
+%%
+%% * A process with a given child_id will only exist on one
+%% supervisor within the group.
+%%
+%% * If one supervisor fails, children may migrate to surviving
+%% supervisors within the group.
+%%
+%% In almost all cases you will want to use the module name for the
+%% process group. Using multiple process groups with the same module
+%% name is supported. Having multiple module names for the same
+%% process group will lead to undefined behaviour.
+%%
+%% Motivation
+%% ----------
+%%
+%% Sometimes you have processes which:
+%%
+%% * Only need to exist once per cluster.
+%%
+%% * Does not contain much state (or can reconstruct its state easily).
+%%
+%% * Needs to be restarted elsewhere should it be running on a node
+%% which fails.
+%%
+%% By creating a mirrored supervisor group with one supervisor on
+%% each node, that's what you get.
+%%
+%%
+%% API use
+%% -------
+%%
+%% This is basically the same as for supervisor, except that:
+%%
+%% 1) start_link(Module, Args) becomes
+%% start_link(Group, TxFun, Module, Args).
+%%
+%% 2) start_link({local, Name}, Module, Args) becomes
+%% start_link({local, Name}, Group, TxFun, Module, Args).
+%%
+%% 3) start_link({global, Name}, Module, Args) is not available.
+%%
+%% 4) The restart strategy simple_one_for_one is not available.
+%%
+%% 5) Mnesia is used to hold global state. At some point your
+%% application should invoke create_tables() (or table_definitions()
+%% if it wants to manage table creation itself).
+%%
+%% The TxFun parameter to start_link/{4,5} is a function which the
+%% mirrored supervisor can use to execute Mnesia transactions. In the
+%% RabbitMQ server this goes via a worker pool; in other cases a
+%% function like:
+%%
+%% tx_fun(Fun) ->
+%% case mnesia:sync_transaction(Fun) of
+%% {atomic, Result} -> Result;
+%% {aborted, Reason} -> throw({error, Reason})
+%% end.
+%%
+%% could be used.
+%%
+%% Internals
+%% ---------
+%%
+%% Each mirrored_supervisor consists of three processes - the overall
+%% supervisor, the delegate supervisor and the mirroring server. The
+%% overall supervisor supervises the other two processes. Its pid is
+%% the one returned from start_link; the pids of the other two
+%% processes are effectively hidden in the API.
+%%
+%% The delegate supervisor is in charge of supervising all the child
+%% processes that are added to the supervisor as usual.
+%%
+%% The mirroring server intercepts calls to the supervisor API
+%% (directed at the overall supervisor), does any special handling,
+%% and forwards everything to the delegate supervisor.
+%%
+%% This module implements all three, hence init/1 is somewhat overloaded.
+%%
+%% The mirroring server creates and joins a process group on
+%% startup. It monitors all the existing members of this group, and
+%% broadcasts a "hello" message to them so that they can monitor it in
+%% turn. When it receives a 'DOWN' message, it checks to see if it's
+%% the "first" server in the group and restarts all the child
+%% processes from the dead supervisor if so.
+%%
+%% In the future we might load balance this.
+%%
+%% Startup is slightly fiddly. The mirroring server needs to know the
+%% Pid of the overall supervisor, but we don't have that until it has
+%% started. Therefore we set this after the fact. We also start any
+%% children we found in Module:init() at this point, since starting
+%% children requires knowing the overall supervisor pid.
+
+-define(SUPERVISOR, supervisor2).
+-define(GEN_SERVER, gen_server2).
+-define(SUP_MODULE, mirrored_supervisor_sups).
+
+-define(TABLE, mirrored_sup_childspec).
+-define(TABLE_DEF,
+ {?TABLE,
+ [{record_name, mirrored_sup_childspec},
+ {type, ordered_set},
+ {attributes, record_info(fields, mirrored_sup_childspec)}]}).
+-define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}).
+
+-export([start_link/4, start_link/5,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1, check_childspecs/1]).
+
+-behaviour(?GEN_SERVER).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-export([start_internal/3]).
+-export([create_tables/0, table_definitions/0]).
+
+-record(mirrored_sup_childspec, {key, mirroring_pid, childspec}).
+
+-record(state, {overall,
+ delegate,
+ group,
+ tx_fun,
+ initial_childspecs,
+ child_order}).
+
+%%--------------------------------------------------------------------------
+%% Callback behaviour
+%%--------------------------------------------------------------------------
+
+-callback init(Args :: term()) ->
+ {ok, {{RestartStrategy :: ?SUPERVISOR:strategy(),
+ MaxR :: non_neg_integer(),
+ MaxT :: non_neg_integer()},
+ [ChildSpec :: ?SUPERVISOR:child_spec()]}}
+ | ignore.
+
+%%--------------------------------------------------------------------------
+%% Specs
+%%--------------------------------------------------------------------------
+
+-type startlink_err() :: {'already_started', pid()} | 'shutdown' | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-type group_name() :: any().
+
+-type(tx_fun() :: fun((fun(() -> A)) -> A)).
+
+-spec start_link(GroupName, TxFun, Module, Args) -> startlink_ret() when
+ GroupName :: group_name(),
+ TxFun :: tx_fun(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_link(SupName, GroupName, TxFun, Module, Args) ->
+ startlink_ret() when
+ SupName :: ?SUPERVISOR:sup_name(),
+ GroupName :: group_name(),
+ TxFun :: tx_fun(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_internal(Group, TxFun, ChildSpecs) -> Result when
+ Group :: group_name(),
+ TxFun :: tx_fun(),
+ ChildSpecs :: [?SUPERVISOR:child_spec()],
+ Result :: {'ok', pid()} | {'error', term()}.
+
+-spec create_tables() -> Result when
+ Result :: 'ok'.
+
+%%----------------------------------------------------------------------------
+
+start_link(Group, TxFun, Mod, Args) ->
+ start_link0([], Group, TxFun, init(Mod, Args)).
+
+start_link({local, SupName}, Group, TxFun, Mod, Args) ->
+ start_link0([{local, SupName}], Group, TxFun, init(Mod, Args));
+
+start_link({global, _SupName}, _Group, _TxFun, _Mod, _Args) ->
+ erlang:error(badarg).
+
+start_link0(Prefix, Group, TxFun, Init) ->
+ case apply(?SUPERVISOR, start_link,
+ Prefix ++ [?SUP_MODULE, {overall, Group, TxFun, Init}]) of
+ {ok, Pid} -> case catch call(Pid, {init, Pid}) of
+ ok -> {ok, Pid};
+ E -> E
+ end;
+ Other -> Other
+ end.
+
+init(Mod, Args) ->
+ case Mod:init(Args) of
+ {ok, {{Bad, _, _}, _ChildSpecs}} when
+ Bad =:= simple_one_for_one -> erlang:error(badarg);
+ Init -> Init
+ end.
+
+start_child(Sup, ChildSpec) -> call(Sup, {start_child, ChildSpec}).
+delete_child(Sup, Id) -> find_call(Sup, Id, {delete_child, Id}).
+restart_child(Sup, Id) -> find_call(Sup, Id, {msg, restart_child, [Id]}).
+terminate_child(Sup, Id) -> find_call(Sup, Id, {msg, terminate_child, [Id]}).
+which_children(Sup) -> fold(which_children, Sup, fun lists:append/2).
+count_children(Sup) -> fold(count_children, Sup, fun add_proplists/2).
+check_childspecs(Specs) -> ?SUPERVISOR:check_childspecs(Specs).
+
+call(Sup, Msg) -> ?GEN_SERVER:call(mirroring(Sup), Msg, infinity).
+cast(Sup, Msg) -> with_exit_handler(
+ fun() -> ok end,
+ fun() -> ?GEN_SERVER:cast(mirroring(Sup), Msg) end).
+
+find_call(Sup, Id, Msg) ->
+ Group = call(Sup, group),
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1',
+ key = {Group, Id},
+ _ = '_'},
+ %% If we did this inside a tx we could still have failover
+ %% immediately after the tx - we can't be 100% here. So we may as
+ %% well dirty_select.
+ case mnesia:dirty_select(?TABLE, [{MatchHead, [], ['$1']}]) of
+ [Mirror] -> call(Mirror, Msg);
+ [] -> {error, not_found}
+ end.
+
+fold(FunAtom, Sup, AggFun) ->
+ Group = call(Sup, group),
+ lists:foldl(AggFun, [],
+ [apply(?SUPERVISOR, FunAtom, [D]) ||
+ M <- pg2:get_members(Group),
+ D <- [delegate(M)]]).
+
+child(Sup, Id) ->
+ [Pid] = [Pid || {Id1, Pid, _, _} <- ?SUPERVISOR:which_children(Sup),
+ Id1 =:= Id],
+ Pid.
+
+delegate(Sup) -> child(Sup, delegate).
+mirroring(Sup) -> child(Sup, mirroring).
+
+%%----------------------------------------------------------------------------
+
+start_internal(Group, TxFun, ChildSpecs) ->
+ ?GEN_SERVER:start_link(?MODULE, {Group, TxFun, ChildSpecs},
+ [{timeout, infinity}]).
+
+%%----------------------------------------------------------------------------
+
+init({Group, TxFun, ChildSpecs}) ->
+ {ok, #state{group = Group,
+ tx_fun = TxFun,
+ initial_childspecs = ChildSpecs,
+ child_order = child_order_from(ChildSpecs)}}.
+
+handle_call({init, Overall}, _From,
+ State = #state{overall = undefined,
+ delegate = undefined,
+ group = Group,
+ tx_fun = TxFun,
+ initial_childspecs = ChildSpecs}) ->
+ process_flag(trap_exit, true),
+ pg2:create(Group),
+ ok = pg2:join(Group, Overall),
+ Rest = pg2:get_members(Group) -- [Overall],
+ case Rest of
+ [] -> TxFun(fun() -> delete_all(Group) end);
+ _ -> ok
+ end,
+ [begin
+ ?GEN_SERVER:cast(mirroring(Pid), {ensure_monitoring, Overall}),
+ erlang:monitor(process, Pid)
+ end || Pid <- Rest],
+ Delegate = delegate(Overall),
+ erlang:monitor(process, Delegate),
+ State1 = State#state{overall = Overall, delegate = Delegate},
+ case errors([maybe_start(Group, TxFun, Overall, Delegate, S)
+ || S <- ChildSpecs]) of
+ [] -> {reply, ok, State1};
+ Errors -> {stop, {shutdown, Errors}, State1}
+ end;
+
+handle_call({start_child, ChildSpec}, _From,
+ State = #state{overall = Overall,
+ delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun}) ->
+ {reply, case maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) of
+ already_in_mnesia -> {error, already_present};
+ {already_in_mnesia, Pid} -> {error, {already_started, Pid}};
+ Else -> Else
+ end, State};
+
+handle_call({delete_child, Id}, _From, State = #state{delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun}) ->
+ {reply, stop(Group, TxFun, Delegate, Id), State};
+
+handle_call({msg, F, A}, _From, State = #state{delegate = Delegate}) ->
+ {reply, apply(?SUPERVISOR, F, [Delegate | A]), State};
+
+handle_call(group, _From, State = #state{group = Group}) ->
+ {reply, Group, State};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ensure_monitoring, Pid}, State) ->
+ erlang:monitor(process, Pid),
+ {noreply, State};
+
+handle_cast({die, Reason}, State = #state{group = Group}) ->
+ _ = tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{delegate = Pid, group = Group}) ->
+ %% Since the delegate is temporary, its death won't cause us to
+ %% die. Since the overall supervisor kills processes in reverse
+ %% order when shutting down "from above" and we started after the
+ %% delegate, if we see the delegate die then that means it died
+ %% "from below" i.e. due to the behaviour of its children, not
+ %% because the whole app was being torn down.
+ %%
+ %% Therefore if we get here we know we need to cause the entire
+ %% mirrored sup to shut down, not just fail over.
+ _ = tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_info({'DOWN', _Ref, process, Pid, _Reason},
+ State = #state{delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun,
+ overall = O,
+ child_order = ChildOrder}) ->
+ %% TODO load balance this
+ %% No guarantee pg2 will have received the DOWN before us.
+ R = case lists:sort(pg2:get_members(Group)) -- [Pid] of
+ [O | _] -> ChildSpecs =
+ TxFun(fun() -> update_all(O, Pid) end),
+ [start(Delegate, ChildSpec)
+ || ChildSpec <- restore_child_order(ChildSpecs,
+ ChildOrder)];
+ _ -> []
+ end,
+ case errors(R) of
+ [] -> {noreply, State};
+ Errors -> {stop, {shutdown, Errors}, State}
+ end;
+
+handle_info(Info, State) ->
+ {stop, {unexpected_info, Info}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+tell_all_peers_to_die(Group, Reason) ->
+ [cast(P, {die, Reason}) || P <- pg2:get_members(Group) -- [self()]].
+
+maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) ->
+ try TxFun(fun() -> check_start(Group, Overall, Delegate, ChildSpec) end) of
+ start -> start(Delegate, ChildSpec);
+ undefined -> already_in_mnesia;
+ Pid -> {already_in_mnesia, Pid}
+ catch
+ %% If we are torn down while in the transaction...
+ {error, E} -> {error, E}
+ end.
+
+check_start(Group, Overall, Delegate, ChildSpec) ->
+ case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
+ [] -> _ = write(Group, Overall, ChildSpec),
+ start;
+ [S] -> #mirrored_sup_childspec{key = {Group, Id},
+ mirroring_pid = Pid} = S,
+ case Overall of
+ Pid -> child(Delegate, Id);
+ _ -> case supervisor(Pid) of
+ dead -> _ = write(Group, Overall, ChildSpec),
+ start;
+ Delegate0 -> child(Delegate0, Id)
+ end
+ end
+ end.
+
+supervisor(Pid) -> with_exit_handler(fun() -> dead end,
+ fun() -> delegate(Pid) end).
+
+write(Group, Overall, ChildSpec) ->
+ S = #mirrored_sup_childspec{key = {Group, id(ChildSpec)},
+ mirroring_pid = Overall,
+ childspec = ChildSpec},
+ ok = mnesia:write(?TABLE, S, write),
+ ChildSpec.
+
+delete(Group, Id) ->
+ ok = mnesia:delete({?TABLE, {Group, Id}}).
+
+start(Delegate, ChildSpec) ->
+ apply(?SUPERVISOR, start_child, [Delegate, ChildSpec]).
+
+stop(Group, TxFun, Delegate, Id) ->
+ try TxFun(fun() -> check_stop(Group, Delegate, Id) end) of
+ deleted -> apply(?SUPERVISOR, delete_child, [Delegate, Id]);
+ running -> {error, running}
+ catch
+ {error, E} -> {error, E}
+ end.
+
+check_stop(Group, Delegate, Id) ->
+ case child(Delegate, Id) of
+ undefined -> delete(Group, Id),
+ deleted;
+ _ -> running
+ end.
+
+id({Id, _, _, _, _, _}) -> Id.
+
+update_all(Overall, OldOverall) ->
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = OldOverall,
+ key = '$1',
+ childspec = '$2',
+ _ = '_'},
+ [write(Group, Overall, C) ||
+ [{Group, _Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])].
+
+delete_all(Group) ->
+ MatchHead = #mirrored_sup_childspec{key = {Group, '_'},
+ childspec = '$1',
+ _ = '_'},
+ [delete(Group, id(C)) ||
+ C <- mnesia:select(?TABLE, [{MatchHead, [], ['$1']}])].
+
+errors(Results) -> [E || {error, E} <- Results].
+
+%%----------------------------------------------------------------------------
+
+create_tables() -> create_tables([?TABLE_DEF]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Ts]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Ts);
+ {aborted, {already_exists, ?TABLE}} -> create_tables(Ts);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE_DEF,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+%%----------------------------------------------------------------------------
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when R =:= noproc; R =:= nodedown;
+ R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end.
+
+add_proplists(P1, P2) ->
+ add_proplists(lists:keysort(1, P1), lists:keysort(1, P2), []).
+add_proplists([], P2, Acc) -> P2 ++ Acc;
+add_proplists(P1, [], Acc) -> P1 ++ Acc;
+add_proplists([{K, V1} | P1], [{K, V2} | P2], Acc) ->
+ add_proplists(P1, P2, [{K, V1 + V2} | Acc]);
+add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
+ add_proplists(P1, P2, [KV | Acc]);
+add_proplists(P1, [KV | P2], Acc) ->
+ add_proplists(P1, P2, [KV | Acc]).
+
+child_order_from(ChildSpecs) ->
+ lists:zipwith(fun(C, N) ->
+ {id(C), N}
+ end, ChildSpecs, lists:seq(1, length(ChildSpecs))).
+
+restore_child_order(ChildSpecs, ChildOrder) ->
+ lists:sort(fun(A, B) ->
+ proplists:get_value(id(A), ChildOrder)
+ < proplists:get_value(id(B), ChildOrder)
+ end, ChildSpecs).
diff --git a/deps/rabbit_common/src/mnesia_sync.erl b/deps/rabbit_common/src/mnesia_sync.erl
new file mode 100644
index 0000000000..2287436849
--- /dev/null
+++ b/deps/rabbit_common/src/mnesia_sync.erl
@@ -0,0 +1,64 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mnesia_sync).
+
+%% mnesia:sync_transaction/3 fails to guarantee that the log is flushed to disk
+%% at commit. This module is an attempt to minimise the risk of data loss by
+%% performing a coalesced log fsync. Unfortunately this is performed regardless
+%% of whether or not the log was appended to.
+
+-behaviour(gen_server).
+
+-export([sync/0]).
+
+-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {waiting, disc_node}).
+
+%%----------------------------------------------------------------------------
+
+-spec sync() -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+sync() ->
+ gen_server:call(?SERVER, sync, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{disc_node = mnesia:system_info(use_dir), waiting = []}}.
+
+handle_call(sync, _From, #state{disc_node = false} = State) ->
+ {reply, ok, State};
+handle_call(sync, From, #state{waiting = Waiting} = State) ->
+ {noreply, State#state{waiting = [From | Waiting]}, 0};
+handle_call(Request, _From, State) ->
+ {stop, {unhandled_call, Request}, State}.
+
+handle_cast(Request, State) ->
+ {stop, {unhandled_cast, Request}, State}.
+
+handle_info(timeout, #state{waiting = Waiting} = State) ->
+ ok = disk_log:sync(latest_log),
+ _ = [gen_server:reply(From, ok) || From <- Waiting],
+ {noreply, State#state{waiting = []}};
+handle_info(Message, State) ->
+ {stop, {unhandled_info, Message}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/pmon.erl b/deps/rabbit_common/src/pmon.erl
new file mode 100644
index 0000000000..f44168dfcf
--- /dev/null
+++ b/deps/rabbit_common/src/pmon.erl
@@ -0,0 +1,96 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(pmon).
+
+%% Process Monitor
+%% ================
+%%
+%% This module monitors processes so that every process has at most
+%% 1 monitor.
+%% Processes monitored can be dynamically added and removed.
+%%
+%% Unlike erlang:[de]monitor* functions, this module
+%% provides basic querying capability and avoids contacting down nodes.
+%%
+%% It is used to monitor nodes, queue mirrors, and by
+%% the queue collector, among other things.
+
+-export([new/0, new/1, monitor/2, monitor_all/2, demonitor/2,
+ is_monitored/2, erase/2, monitored/1, is_empty/1]).
+
+-compile({no_auto_import, [monitor/2]}).
+
+-record(state, {monitors = #{} :: #{item() => reference()},
+ module = erlang :: module()}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([?MODULE/0]).
+
+-opaque(?MODULE() :: #state{}).
+
+-type(item() :: pid() | {atom(), node()}).
+
+
+-spec new() -> ?MODULE().
+new() -> new(erlang).
+
+-spec new('erlang' | 'delegate') -> ?MODULE().
+new(Module) -> #state{module = Module}.
+
+-spec monitor(item(), ?MODULE()) -> ?MODULE().
+monitor(Item, S = #state{monitors = M, module = Module}) ->
+ case maps:is_key(Item, M) of
+ true -> S;
+ false -> case node_alive_shortcut(Item) of
+ true -> Ref = Module:monitor(process, Item),
+ S#state{monitors = maps:put(Item, Ref, M)};
+ false -> self() ! {'DOWN', fake_ref, process, Item,
+ nodedown},
+ S
+ end
+ end.
+
+-spec monitor_all([item()], ?MODULE()) -> ?MODULE().
+monitor_all([], S) -> S; %% optimisation
+monitor_all([Item], S) -> monitor(Item, S); %% optimisation
+monitor_all(Items, S) -> lists:foldl(fun monitor/2, S, Items).
+
+-spec demonitor(item(), ?MODULE()) -> ?MODULE().
+demonitor(Item, S = #state{monitors = M0, module = Module}) ->
+ case maps:take(Item, M0) of
+ {MRef, M} -> Module:demonitor(MRef),
+ S#state{monitors = M};
+ error -> S
+ end.
+
+-spec is_monitored(item(), ?MODULE()) -> boolean().
+is_monitored(Item, #state{monitors = M}) -> maps:is_key(Item, M).
+
+-spec erase(item(), ?MODULE()) -> ?MODULE().
+erase(Item, S = #state{monitors = M}) ->
+ S#state{monitors = maps:remove(Item, M)}.
+
+-spec monitored(?MODULE()) -> [item()].
+monitored(#state{monitors = M}) -> maps:keys(M).
+
+-spec is_empty(?MODULE()) -> boolean().
+is_empty(#state{monitors = M}) -> maps:size(M) == 0.
+
+%%----------------------------------------------------------------------------
+
+%% We check here to see if the node is alive in order to avoid trying
+%% to connect to it if it isn't - this can cause substantial
+%% slowdowns. We can't perform this shortcut if passed {Name, Node}
+%% since we would need to convert that into a pid for the fake 'DOWN'
+%% message, so we always return true here - but that's OK, it's just
+%% an optimisation.
+node_alive_shortcut(P) when is_pid(P) ->
+ lists:member(node(P), [node() | nodes()]);
+node_alive_shortcut({_Name, _Node}) ->
+ true.
diff --git a/deps/rabbit_common/src/priority_queue.erl b/deps/rabbit_common/src/priority_queue.erl
new file mode 100644
index 0000000000..4a7867129d
--- /dev/null
+++ b/deps/rabbit_common/src/priority_queue.erl
@@ -0,0 +1,234 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Priority queues have essentially the same interface as ordinary
+%% queues, except that a) there is an in/3 that takes a priority, and
+%% b) we have only implemented the core API we need.
+%%
+%% Priorities should be integers - the higher the value the higher the
+%% priority - but we don't actually check that.
+%%
+%% in/2 inserts items with priority 0.
+%%
+%% We optimise the case where a priority queue is being used just like
+%% an ordinary queue. When that is the case we represent the priority
+%% queue as an ordinary queue. We could just call into the 'queue'
+%% module for that, but for efficiency we implement the relevant
+%% functions directly in here, thus saving on inter-module calls and
+%% eliminating a level of boxing.
+%%
+%% When the queue contains items with non-zero priorities, it is
+%% represented as a sorted kv list with the inverted Priority as the
+%% key and an ordinary queue as the value. Here again we use our own
+%% ordinary queue implementation for efficiency, often making recursive
+%% calls into the same function knowing that ordinary queues represent
+%% a base case.
+
+
+-module(priority_queue).
+
+-export([new/0, is_queue/1, is_empty/1, len/1, to_list/1, from_list/1,
+ in/2, in/3, out/1, out_p/1, join/2, filter/2, fold/3, highest/1,
+ member/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([q/0]).
+
+-type(q() :: pqueue()).
+-type(priority() :: integer() | 'infinity').
+-type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
+-type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
+
+-spec new() -> pqueue().
+-spec is_queue(any()) -> boolean().
+-spec is_empty(pqueue()) -> boolean().
+-spec len(pqueue()) -> non_neg_integer().
+-spec to_list(pqueue()) -> [{priority(), any()}].
+-spec from_list([{priority(), any()}]) -> pqueue().
+-spec in(any(), pqueue()) -> pqueue().
+-spec in(any(), priority(), pqueue()) -> pqueue().
+-spec out(pqueue()) -> {empty | {value, any()}, pqueue()}.
+-spec out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}.
+-spec join(pqueue(), pqueue()) -> pqueue().
+-spec filter(fun ((any()) -> boolean()), pqueue()) -> pqueue().
+-spec fold
+ (fun ((any(), priority(), A) -> A), A, pqueue()) -> A.
+-spec highest(pqueue()) -> priority() | 'empty'.
+-spec member(any(), pqueue()) -> boolean().
+
+%%----------------------------------------------------------------------------
+
+new() ->
+ {queue, [], [], 0}.
+
+is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
+ true;
+is_queue({pqueue, Queues}) when is_list(Queues) ->
+ lists:all(fun ({infinity, Q}) -> is_queue(Q);
+ ({P, Q}) -> is_integer(P) andalso is_queue(Q)
+ end, Queues);
+is_queue(_) ->
+ false.
+
+is_empty({queue, [], [], 0}) ->
+ true;
+is_empty(_) ->
+ false.
+
+len({queue, _R, _F, L}) ->
+ L;
+len({pqueue, Queues}) ->
+ lists:sum([len(Q) || {_, Q} <- Queues]).
+
+to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
+ [{0, V} || V <- Out ++ lists:reverse(In, [])];
+to_list({pqueue, Queues}) ->
+ [{maybe_negate_priority(P), V} || {P, Q} <- Queues,
+ {0, V} <- to_list(Q)].
+
+from_list(L) ->
+ lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L).
+
+in(Item, Q) ->
+ in(Item, 0, Q).
+
+in(X, 0, {queue, [_] = In, [], 1}) ->
+ {queue, [X], In, 2};
+in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
+ {queue, [X|In], Out, Len + 1};
+in(X, Priority, _Q = {queue, [], [], 0}) ->
+ in(X, Priority, {pqueue, []});
+in(X, Priority, Q = {queue, _, _, _}) ->
+ in(X, Priority, {pqueue, [{0, Q}]});
+in(X, Priority, {pqueue, Queues}) ->
+ P = maybe_negate_priority(Priority),
+ {pqueue, case lists:keysearch(P, 1, Queues) of
+ {value, {_, Q}} ->
+ lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
+ false when P == infinity ->
+ [{P, {queue, [X], [], 1}} | Queues];
+ false ->
+ case Queues of
+ [{infinity, InfQueue} | Queues1] ->
+ [{infinity, InfQueue} |
+ lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
+ _ ->
+ lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
+ end
+ end}.
+
+out({queue, [], [], 0} = Q) ->
+ {empty, Q};
+out({queue, [V], [], 1}) ->
+ {{value, V}, {queue, [], [], 0}};
+out({queue, [Y|In], [], Len}) ->
+ [V|Out] = lists:reverse(In, []),
+ {{value, V}, {queue, [Y], Out, Len - 1}};
+out({queue, In, [V], Len}) when is_list(In) ->
+ {{value,V}, r2f(In, Len - 1)};
+out({queue, In,[V|Out], Len}) when is_list(In) ->
+ {{value, V}, {queue, In, Out, Len - 1}};
+out({pqueue, [{P, Q} | Queues]}) ->
+ {R, Q1} = out(Q),
+ NewQ = case is_empty(Q1) of
+ true -> case Queues of
+ [] -> {queue, [], [], 0};
+ [{0, OnlyQ}] -> OnlyQ;
+ [_|_] -> {pqueue, Queues}
+ end;
+ false -> {pqueue, [{P, Q1} | Queues]}
+ end,
+ {R, NewQ}.
+
+out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
+out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)).
+
+add_p(R, P) -> case R of
+ {empty, Q} -> {empty, Q};
+ {{value, V}, Q} -> {{value, V, P}, Q}
+ end.
+
+join(A, {queue, [], [], 0}) ->
+ A;
+join({queue, [], [], 0}, B) ->
+ B;
+join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
+ {queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
+join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
+ Post1 = case Post of
+ [] -> [ {0, A} ];
+ [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
+ _ -> [ {0, A} | Post ]
+ end,
+ {pqueue, Pre ++ Post1};
+join({pqueue, APQ}, B = {queue, _, _, _}) ->
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
+ Post1 = case Post of
+ [] -> [ {0, B} ];
+ [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
+ _ -> [ {0, B} | Post ]
+ end,
+ {pqueue, Pre ++ Post1};
+join({pqueue, APQ}, {pqueue, BPQ}) ->
+ {pqueue, merge(APQ, BPQ, [])}.
+
+merge([], BPQ, Acc) ->
+ lists:reverse(Acc, BPQ);
+merge(APQ, [], Acc) ->
+ lists:reverse(Acc, APQ);
+merge([{P, A}|As], [{P, B}|Bs], Acc) ->
+ merge(As, Bs, [ {P, join(A, B)} | Acc ]);
+merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
+ merge(As, Bs, [ {PA, A} | Acc ]);
+merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
+ merge(As, Bs, [ {PB, B} | Acc ]).
+
+filter(Pred, Q) -> fold(fun(V, P, Acc) ->
+ case Pred(V) of
+ true -> in(V, P, Acc);
+ false -> Acc
+ end
+ end, new(), Q).
+
+fold(Fun, Init, Q) -> case out_p(Q) of
+ {empty, _Q} -> Init;
+ {{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
+ end.
+
+highest({queue, [], [], 0}) -> empty;
+highest({queue, _, _, _}) -> 0;
+highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P).
+
+member(_X, {queue, [], [], 0}) ->
+ false;
+member(X, {queue, R, F, _Size}) ->
+ lists:member(X, R) orelse lists:member(X, F);
+member(_X, {pqueue, []}) ->
+ false;
+member(X, {pqueue, [{_P, Q}]}) ->
+ member(X, Q);
+member(X, {pqueue, [{_P, Q} | T]}) ->
+ case member(X, Q) of
+ true ->
+ true;
+ false ->
+ member(X, {pqueue, T})
+ end;
+member(X, Q) ->
+ erlang:error(badarg, [X,Q]).
+
+r2f([], 0) -> {queue, [], [], 0};
+r2f([_] = R, 1) -> {queue, [], R, 1};
+r2f([X,Y], 2) -> {queue, [X], [Y], 2};
+r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
+
+maybe_negate_priority(infinity) -> infinity;
+maybe_negate_priority(P) -> -P.
diff --git a/deps/rabbit_common/src/rabbit_amqp_connection.erl b/deps/rabbit_common/src/rabbit_amqp_connection.erl
new file mode 100644
index 0000000000..58486bd239
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_amqp_connection.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp_connection).
+
+-export([amqp_params/2]).
+
+-spec amqp_params(pid(), timeout()) -> [{atom(), term()}].
+amqp_params(ConnPid, Timeout) ->
+ P = try
+ gen_server:call(ConnPid, {info, [amqp_params]}, Timeout)
+ catch exit:{noproc, Error} ->
+ rabbit_log:debug("file ~p, line ~p - connection process ~p not alive: ~p~n",
+ [?FILE, ?LINE, ConnPid, Error]),
+ [];
+ _:Error ->
+ rabbit_log:debug("file ~p, line ~p - failed to get amqp_params from connection process ~p: ~p~n",
+ [?FILE, ?LINE, ConnPid, Error]),
+ []
+ end,
+ process_amqp_params_result(P).
+
+process_amqp_params_result({error, {bad_argument, amqp_params}}) ->
+ %% Some connection process modules do not handle the {info, [amqp_params]}
+ %% message (like rabbit_reader) and throw a bad_argument error
+ [];
+process_amqp_params_result({ok, AmqpParams}) ->
+ AmqpParams;
+process_amqp_params_result(AmqpParams) ->
+ AmqpParams.
diff --git a/deps/rabbit_common/src/rabbit_amqqueue_common.erl b/deps/rabbit_common/src/rabbit_amqqueue_common.erl
new file mode 100644
index 0000000000..a45356de78
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_amqqueue_common.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_common).
+
+-export([notify_sent/2, notify_sent_queue_down/1, delete_exclusive/2]).
+
+-define(MORE_CONSUMER_CREDIT_AFTER, 50).
+
+-spec notify_sent(pid(), pid()) -> 'ok'.
+
+notify_sent(QPid, ChPid) ->
+ Key = {consumer_credit_to, QPid},
+ put(Key, case get(Key) of
+ 1 -> gen_server2:cast(
+ QPid, {notify_sent, ChPid,
+ ?MORE_CONSUMER_CREDIT_AFTER}),
+ ?MORE_CONSUMER_CREDIT_AFTER;
+ undefined -> erlang:monitor(process, QPid),
+ ?MORE_CONSUMER_CREDIT_AFTER - 1;
+ C -> C - 1
+ end),
+ ok.
+
+-spec notify_sent_queue_down(pid()) -> 'ok'.
+
+notify_sent_queue_down(QPid) ->
+ erase({consumer_credit_to, QPid}),
+ ok.
+
+-spec delete_exclusive([pid()], pid()) -> 'ok'.
+
+delete_exclusive(QPids, ConnId) ->
+ [gen_server2:cast(QPid, {delete_exclusive, ConnId}) || QPid <- QPids],
+ ok.
diff --git a/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl b/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl
new file mode 100644
index 0000000000..8d30fdca1b
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_dummy).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user/0]).
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4]).
+-export([state_can_expire/0]).
+
+-spec user() -> rabbit_types:user().
+
+%% A user to be used by the direct client when permission checks are
+%% not needed. This user can do anything AMQPish.
+user() -> #user{username = <<"none">>,
+ tags = [],
+ authz_backends = [{?MODULE, none}]}.
+
+%% Implementation of rabbit_auth_backend
+
+user_login_authentication(_, _) ->
+ {refused, "cannot log in conventionally as dummy user", []}.
+
+user_login_authorization(_, _) ->
+ {refused, "cannot log in conventionally as dummy user", []}.
+
+check_vhost_access(#auth_user{}, _VHostPath, _AuthzData) -> true.
+check_resource_access(#auth_user{}, #resource{}, _Permission, _Context) -> true.
+check_topic_access(#auth_user{}, #resource{}, _Permission, _Context) -> true.
+
+state_can_expire() -> false.
diff --git a/deps/rabbit_common/src/rabbit_auth_mechanism.erl b/deps/rabbit_common/src/rabbit_auth_mechanism.erl
new file mode 100644
index 0000000000..38d21f3a5a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_auth_mechanism.erl
@@ -0,0 +1,41 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+%% A description.
+-callback description() -> [proplists:property()].
+
+%% If this mechanism is enabled, should it be offered for a given socket?
+%% (primarily so EXTERNAL can be TLS-only)
+-callback should_offer(rabbit_net:socket()) -> boolean().
+
+%% Called before authentication starts. Should create a state
+%% object to be passed through all the stages of authentication.
+-callback init(rabbit_net:socket()) -> any().
+
+%% Handle a stage of authentication. Possible responses:
+%% {ok, User}
+%% Authentication succeeded, and here's the user record.
+%% {challenge, Challenge, NextState}
+%% Another round is needed. Here's the state I want next time.
+%% {protocol_error, Msg, Args}
+%% Client got the protocol wrong. Log and die.
+%% {refused, Username, Msg, Args}
+%% Client failed authentication. Log and die.
+-callback handle_response(binary(), any()) ->
+ {'ok', rabbit_types:user()} |
+ {'challenge', binary(), any()} |
+ {'protocol_error', string(), [any()]} |
+ {'refused', rabbit_types:username() | none, string(), [any()]}.
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_authn_backend.erl b/deps/rabbit_common/src/rabbit_authn_backend.erl
new file mode 100644
index 0000000000..e600ec884f
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_authn_backend.erl
@@ -0,0 +1,27 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_authn_backend).
+
+-include("rabbit.hrl").
+
+%% Check a user can log in, given a username and a proplist of
+%% authentication information (e.g. [{password, Password}]). If your
+%% backend is not to be used for authentication, this should always
+%% refuse access.
+%%
+%% Possible responses:
+%% {ok, User}
+%% Authentication succeeded, and here's the user record.
+%% {error, Error}
+%% Something went wrong. Log and die.
+%% {refused, Msg, Args}
+%% Client failed authentication. Log and die.
+-callback user_login_authentication(rabbit_types:username(), [term()] | map()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
diff --git a/deps/rabbit_common/src/rabbit_authz_backend.erl b/deps/rabbit_common/src/rabbit_authz_backend.erl
new file mode 100644
index 0000000000..367aa8d1ef
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_authz_backend.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_authz_backend).
+
+-include("rabbit.hrl").
+
+%% Check that a user can log in, when this backend is being used for
+%% authorisation only. Authentication has already taken place
+%% successfully, but we need to check that the user exists in this
+%% backend, and initialise any impl field we will want to have passed
+%% back in future calls to check_vhost_access/3 and
+%% check_resource_access/3.
+%%
+%% Possible responses:
+%% {ok, Impl}
+%% {ok, Impl, Tags}
+%% User authorisation succeeded, and here's the impl and potential extra tags fields.
+%% {error, Error}
+%% Something went wrong. Log and die.
+%% {refused, Msg, Args}
+%% User authorisation failed. Log and die.
+-callback user_login_authorization(rabbit_types:username(), [term()] | map()) ->
+ {'ok', any()} |
+ {'ok', any(), any()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
+
+%% Given #auth_user, vhost and data (client IP for now), can a user log in to a vhost?
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_vhost_access(AuthUser :: rabbit_types:auth_user(),
+ VHost :: rabbit_types:vhost(),
+ AuthzData :: rabbit_types:authz_data()) ->
+ boolean() | {'error', any()}.
+
+%% Given #auth_user, resource and permission, can a user access a resource?
+%%
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_resource_access(rabbit_types:auth_user(),
+ rabbit_types:r(atom()),
+ rabbit_access_control:permission_atom(),
+ rabbit_types:authz_context()) ->
+ boolean() | {'error', any()}.
+
+%% Given #auth_user, topic as resource, permission, and context, can a user access the topic?
+%%
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_topic_access(rabbit_types:auth_user(),
+ rabbit_types:r(atom()),
+ rabbit_access_control:permission_atom(),
+ rabbit_types:topic_access_context()) ->
+ boolean() | {'error', any()}.
+
+%% Returns true for backends that support state or credential expiration (e.g. use JWTs).
+-callback state_can_expire() -> boolean().
+
+%% Updates backend state that has expired.
+%%
+%% Possible responses:
+%% {ok, User}
+%% Secret updated successfully, and here's the user record.
+%% {error, Error}
+%% Something went wrong.
+%% {refused, Msg, Args}
+%% New secret is not valid or the user cannot authenticate with it.
+-callback update_state(AuthUser :: rabbit_types:auth_user(),
+ NewState :: term()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
+
+-optional_callbacks([update_state/2]).
diff --git a/deps/rabbit_common/src/rabbit_basic_common.erl b/deps/rabbit_common/src/rabbit_basic_common.erl
new file mode 100644
index 0000000000..e88f1172af
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_basic_common.erl
@@ -0,0 +1,41 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_basic_common).
+-include("rabbit.hrl").
+
+-export([build_content/2, from_content/1]).
+
+-spec build_content
+ (rabbit_framing:amqp_property_record(), binary() | [binary()]) ->
+ rabbit_types:content().
+-spec from_content
+ (rabbit_types:content()) ->
+ {rabbit_framing:amqp_property_record(), binary()}.
+
+build_content(Properties, BodyBin) when is_binary(BodyBin) ->
+ build_content(Properties, [BodyBin]);
+
+build_content(Properties, PFR) ->
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ #content{class_id = ClassId,
+ properties = Properties,
+ properties_bin = none,
+ protocol = none,
+ payload_fragments_rev = PFR}.
+
+from_content(Content) ->
+ #content{class_id = ClassId,
+ properties = Props,
+ payload_fragments_rev = FragmentsRev} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ {Props, list_to_binary(lists:reverse(FragmentsRev))}.
diff --git a/deps/rabbit_common/src/rabbit_binary_generator.erl b/deps/rabbit_common/src/rabbit_binary_generator.erl
new file mode 100644
index 0000000000..7a56cb92b6
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_binary_generator.erl
@@ -0,0 +1,235 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_binary_generator).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([build_simple_method_frame/3,
+ build_simple_content_frames/4,
+ build_heartbeat_frame/0]).
+-export([generate_table/1]).
+-export([check_empty_frame_size/0]).
+-export([ensure_content_encoded/2, clear_encoded_content/1]).
+-export([map_exception/3]).
+
+%%----------------------------------------------------------------------------
+
+-type frame() :: [binary()].
+
+-spec build_simple_method_frame
+ (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(),
+ rabbit_types:protocol()) ->
+ frame().
+-spec build_simple_content_frames
+ (rabbit_channel:channel_number(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol()) ->
+ [frame()].
+-spec build_heartbeat_frame() -> frame().
+-spec generate_table(rabbit_framing:amqp_table()) -> binary().
+-spec check_empty_frame_size() -> 'ok'.
+-spec ensure_content_encoded
+ (rabbit_types:content(), rabbit_types:protocol()) ->
+ rabbit_types:encoded_content().
+-spec clear_encoded_content
+ (rabbit_types:content()) ->
+ rabbit_types:unencoded_content().
+-spec map_exception
+ (rabbit_channel:channel_number(), rabbit_types:amqp_error() | any(),
+ rabbit_types:protocol()) ->
+ {rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record()}.
+
+%%----------------------------------------------------------------------------
+
+build_simple_method_frame(ChannelInt, MethodRecord, Protocol) ->
+ MethodFields = Protocol:encode_method_fields(MethodRecord),
+ MethodName = rabbit_misc:method_record_type(MethodRecord),
+ {ClassId, MethodId} = Protocol:method_id(MethodName),
+ create_frame(1, ChannelInt, [<<ClassId:16, MethodId:16>>, MethodFields]).
+
+build_simple_content_frames(ChannelInt, Content, FrameMax, Protocol) ->
+ #content{class_id = ClassId,
+ properties_bin = ContentPropertiesBin,
+ payload_fragments_rev = PayloadFragmentsRev} =
+ ensure_content_encoded(Content, Protocol),
+ {BodySize, ContentFrames} =
+ build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt),
+ HeaderFrame = create_frame(2, ChannelInt,
+ [<<ClassId:16, 0:16, BodySize:64>>,
+ ContentPropertiesBin]),
+ [HeaderFrame | ContentFrames].
+
+build_content_frames(FragsRev, FrameMax, ChannelInt) ->
+ BodyPayloadMax = if FrameMax == 0 -> iolist_size(FragsRev);
+ true -> FrameMax - ?EMPTY_FRAME_SIZE
+ end,
+ build_content_frames(0, [], BodyPayloadMax, [],
+ lists:reverse(FragsRev), BodyPayloadMax, ChannelInt).
+
+build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
+ [], _BodyPayloadMax, _ChannelInt) ->
+ {SizeAcc, lists:reverse(FramesAcc)};
+build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
+ [<<>>], _BodyPayloadMax, _ChannelInt) ->
+ {SizeAcc, lists:reverse(FramesAcc)};
+build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
+ Frags, BodyPayloadMax, ChannelInt)
+ when FragSizeRem == 0 orelse Frags == [] ->
+ Frame = create_frame(3, ChannelInt, lists:reverse(FragAcc)),
+ FrameSize = BodyPayloadMax - FragSizeRem,
+ build_content_frames(SizeAcc + FrameSize, [Frame | FramesAcc],
+ BodyPayloadMax, [], Frags, BodyPayloadMax, ChannelInt);
+build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
+ [Frag | Frags], BodyPayloadMax, ChannelInt) ->
+ Size = size(Frag),
+ {NewFragSizeRem, NewFragAcc, NewFrags} =
+ if Size == 0 -> {FragSizeRem, FragAcc, Frags};
+ Size =< FragSizeRem -> {FragSizeRem - Size, [Frag | FragAcc], Frags};
+ true -> <<Head:FragSizeRem/binary, Tail/binary>> =
+ Frag,
+ {0, [Head | FragAcc], [Tail | Frags]}
+ end,
+ build_content_frames(SizeAcc, FramesAcc, NewFragSizeRem, NewFragAcc,
+ NewFrags, BodyPayloadMax, ChannelInt).
+
+build_heartbeat_frame() ->
+ create_frame(?FRAME_HEARTBEAT, 0, <<>>).
+
+create_frame(TypeInt, ChannelInt, Payload) ->
+ [<<TypeInt:8, ChannelInt:16, (iolist_size(Payload)):32>>, Payload,
+ ?FRAME_END].
+
+%% table_field_to_binary supports the AMQP 0-8/0-9 standard types, S,
+%% I, D, T and F, as well as the QPid extensions b, d, f, l, s, t, x,
+%% and V.
+table_field_to_binary({FName, T, V}) ->
+ [short_string_to_binary(FName) | field_value_to_binary(T, V)].
+
+field_value_to_binary(longstr, V) -> [$S | long_string_to_binary(V)];
+field_value_to_binary(signedint, V) -> [$I, <<V:32/signed>>];
+field_value_to_binary(decimal, V) -> {Before, After} = V,
+ [$D, Before, <<After:32>>];
+field_value_to_binary(timestamp, V) -> [$T, <<V:64>>];
+field_value_to_binary(table, V) -> [$F | table_to_binary(V)];
+field_value_to_binary(array, V) -> [$A | array_to_binary(V)];
+field_value_to_binary(byte, V) -> [$b, <<V:8/signed>>];
+field_value_to_binary(double, V) -> [$d, <<V:64/float>>];
+field_value_to_binary(float, V) -> [$f, <<V:32/float>>];
+field_value_to_binary(long, V) -> [$l, <<V:64/signed>>];
+field_value_to_binary(short, V) -> [$s, <<V:16/signed>>];
+field_value_to_binary(bool, V) -> [$t, if V -> 1; true -> 0 end];
+field_value_to_binary(binary, V) -> [$x | long_string_to_binary(V)];
+field_value_to_binary(unsignedbyte, V) -> [$B, <<V:8/unsigned>>];
+field_value_to_binary(unsignedshort, V) -> [$u, <<V:16/unsigned>>];
+field_value_to_binary(unsignedint, V) -> [$i, <<V:32/unsigned>>];
+field_value_to_binary(void, _V) -> [$V].
+
+table_to_binary(Table) when is_list(Table) ->
+ BinTable = generate_table_iolist(Table),
+ [<<(iolist_size(BinTable)):32>> | BinTable].
+
+array_to_binary(Array) when is_list(Array) ->
+ BinArray = generate_array_iolist(Array),
+ [<<(iolist_size(BinArray)):32>> | BinArray].
+
+generate_table(Table) when is_list(Table) ->
+ list_to_binary(generate_table_iolist(Table)).
+
+generate_table_iolist(Table) ->
+ lists:map(fun table_field_to_binary/1, Table).
+
+generate_array_iolist(Array) ->
+ lists:map(fun ({T, V}) -> field_value_to_binary(T, V) end, Array).
+
+short_string_to_binary(String) ->
+ Len = string_length(String),
+ if Len < 256 -> [<<Len:8>>, String];
+ true -> exit(content_properties_shortstr_overflow)
+ end.
+
+long_string_to_binary(String) ->
+ Len = string_length(String),
+ [<<Len:32>>, String].
+
+string_length(String) when is_binary(String) -> size(String);
+string_length(String) -> length(String).
+
+check_empty_frame_size() ->
+ %% Intended to ensure that EMPTY_FRAME_SIZE is defined correctly.
+ case iolist_size(create_frame(?FRAME_BODY, 0, <<>>)) of
+ ?EMPTY_FRAME_SIZE -> ok;
+ ComputedSize -> exit({incorrect_empty_frame_size,
+ ComputedSize, ?EMPTY_FRAME_SIZE})
+ end.
+
+ensure_content_encoded(Content = #content{properties_bin = PropBin,
+ protocol = Protocol}, Protocol)
+ when PropBin =/= none ->
+ Content;
+ensure_content_encoded(Content = #content{properties = none,
+ properties_bin = PropBin,
+ protocol = Protocol}, Protocol1)
+ when PropBin =/= none ->
+ Props = Protocol:decode_properties(Content#content.class_id, PropBin),
+ Content#content{properties = Props,
+ properties_bin = Protocol1:encode_properties(Props),
+ protocol = Protocol1};
+ensure_content_encoded(Content = #content{properties = Props}, Protocol)
+ when Props =/= none ->
+ Content#content{properties_bin = Protocol:encode_properties(Props),
+ protocol = Protocol}.
+
+clear_encoded_content(Content = #content{properties_bin = none,
+ protocol = none}) ->
+ Content;
+clear_encoded_content(Content = #content{properties = none}) ->
+ %% Only clear when we can rebuild the properties_bin later in
+ %% accordance to the content record definition comment - maximum
+ %% one of properties and properties_bin can be 'none'
+ Content;
+clear_encoded_content(Content = #content{}) ->
+ Content#content{properties_bin = none, protocol = none}.
+
+%% NB: this function is also used by the Erlang client
+map_exception(Channel, Reason, Protocol) ->
+ {SuggestedClose, ReplyCode, ReplyText, FailedMethod} =
+ lookup_amqp_exception(Reason, Protocol),
+ {ClassId, MethodId} = case FailedMethod of
+ {_, _} -> FailedMethod;
+ none -> {0, 0};
+ _ -> Protocol:method_id(FailedMethod)
+ end,
+ case SuggestedClose orelse (Channel == 0) of
+ true -> {0, #'connection.close'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ class_id = ClassId,
+ method_id = MethodId}};
+ false -> {Channel, #'channel.close'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ class_id = ClassId,
+ method_id = MethodId}}
+ end.
+
+lookup_amqp_exception(#amqp_error{name = Name,
+ explanation = Expl,
+ method = Method},
+ Protocol) ->
+ {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(Name),
+ ExplBin = amqp_exception_explanation(Text, Expl),
+ {ShouldClose, Code, ExplBin, Method};
+lookup_amqp_exception(Other, Protocol) ->
+ rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]),
+ {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
+ {ShouldClose, Code, Text, none}.
+
+amqp_exception_explanation(Text, Expl) ->
+ ExplBin = list_to_binary(Expl),
+ CompleteTextBin = <<Text/binary, " - ", ExplBin/binary>>,
+ if size(CompleteTextBin) > 255 -> <<CompleteTextBin:252/binary, "...">>;
+ true -> CompleteTextBin
+ end.
diff --git a/deps/rabbit_common/src/rabbit_binary_parser.erl b/deps/rabbit_common/src/rabbit_binary_parser.erl
new file mode 100644
index 0000000000..478b0f0cd2
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_binary_parser.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_binary_parser).
+
+-include("rabbit.hrl").
+
+-export([parse_table/1]).
+-export([ensure_content_decoded/1, clear_decoded_content/1]).
+-export([validate_utf8/1, assert_utf8/1]).
+
+%%----------------------------------------------------------------------------
+
+-spec parse_table(binary()) -> rabbit_framing:amqp_table().
+-spec ensure_content_decoded
+ (rabbit_types:content()) ->
+ rabbit_types:decoded_content().
+-spec clear_decoded_content
+ (rabbit_types:content()) ->
+ rabbit_types:undecoded_content().
+-spec validate_utf8(binary()) -> 'ok' | 'error'.
+-spec assert_utf8(binary()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+%% parse_table supports the AMQP 0-8/0-9 standard types, S, I, D, T
+%% and F, as well as the QPid extensions b, d, f, l, s, t, x, and V.
+
+-define(SIMPLE_PARSE_TABLE(BType, Pattern, RType),
+ parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ BType, Pattern, Rest/binary>>) ->
+ [{NameString, RType, Value} | parse_table(Rest)]).
+
+%% Note that we try to put these in approximately the order we expect
+%% to hit them, that's why the empty binary is half way through.
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, longstr, Value} | parse_table(Rest)];
+
+?SIMPLE_PARSE_TABLE($T, Value:64/unsigned, timestamp);
+
+parse_table(<<>>) ->
+ [];
+
+?SIMPLE_PARSE_TABLE($b, Value:8/signed, byte);
+?SIMPLE_PARSE_TABLE($B, Value:8/unsigned, unsignedbyte);
+
+?SIMPLE_PARSE_TABLE($s, Value:16/signed, short);
+?SIMPLE_PARSE_TABLE($u, Value:16/unsigned, unsignedshort);
+
+?SIMPLE_PARSE_TABLE($I, Value:32/signed, signedint);
+?SIMPLE_PARSE_TABLE($i, Value:32/unsigned, unsignedint);
+
+?SIMPLE_PARSE_TABLE($d, Value:64/float, double);
+?SIMPLE_PARSE_TABLE($f, Value:32/float, float);
+
+%% yes, both 'l' and 'L' fields are decoded to 64-bit signed values;
+%% see https://github.com/rabbitmq/rabbitmq-server/issues/1093#issuecomment-276351183,
+%% https://www.rabbitmq.com/amqp-0-9-1-errata.html, and section
+%% 4.2.1 of the spec for details.
+?SIMPLE_PARSE_TABLE($l, Value:64/signed, long);
+?SIMPLE_PARSE_TABLE($L, Value:64/signed, long);
+
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $t, Value:8/unsigned, Rest/binary>>) ->
+ [{NameString, bool, (Value /= 0)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
+ [{NameString, decimal, {Before, After}} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, table, parse_table(Value)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, array, parse_array(Value)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, binary, Value} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $V, Rest/binary>>) ->
+ [{NameString, void, undefined} | parse_table(Rest)].
+
+-define(SIMPLE_PARSE_ARRAY(BType, Pattern, RType),
+ parse_array(<<BType, Pattern, Rest/binary>>) ->
+ [{RType, Value} | parse_array(Rest)]).
+
+parse_array(<<$S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{longstr, Value} | parse_array(Rest)];
+
+?SIMPLE_PARSE_ARRAY($T, Value:64/unsigned, timestamp);
+
+parse_array(<<>>) ->
+ [];
+
+?SIMPLE_PARSE_ARRAY($b, Value:8/signed, byte);
+?SIMPLE_PARSE_ARRAY($B, Value:8/unsigned, unsignedbyte);
+
+?SIMPLE_PARSE_ARRAY($s, Value:16/signed, short);
+?SIMPLE_PARSE_ARRAY($u, Value:16/unsigned, unsignedshort);
+
+?SIMPLE_PARSE_ARRAY($I, Value:32/signed, signedint);
+?SIMPLE_PARSE_ARRAY($i, Value:32/unsigned, unsignedint);
+
+?SIMPLE_PARSE_ARRAY($d, Value:64/float, double);
+?SIMPLE_PARSE_ARRAY($f, Value:32/float, float);
+
+?SIMPLE_PARSE_ARRAY($l, Value:64/signed, long);
+?SIMPLE_PARSE_ARRAY($L, Value:64/signed, long);
+
+
+parse_array(<<$t, Value:8/unsigned, Rest/binary>>) ->
+ [{bool, (Value /= 0)} | parse_array(Rest)];
+
+parse_array(<<$D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
+ [{decimal, {Before, After}} | parse_array(Rest)];
+
+parse_array(<<$F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{table, parse_table(Value)} | parse_array(Rest)];
+
+parse_array(<<$A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{array, parse_array(Value)} | parse_array(Rest)];
+
+parse_array(<<$x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{binary, Value} | parse_array(Rest)];
+
+parse_array(<<$V, Rest/binary>>) ->
+ [{void, undefined} | parse_array(Rest)].
+
+ensure_content_decoded(Content = #content{properties = Props})
+ when Props =/= none ->
+ Content;
+ensure_content_decoded(Content = #content{properties_bin = PropBin,
+ protocol = Protocol})
+ when PropBin =/= none ->
+ Content#content{properties = Protocol:decode_properties(
+ Content#content.class_id, PropBin)}.
+
+clear_decoded_content(Content = #content{properties = none}) ->
+ Content;
+clear_decoded_content(Content = #content{properties_bin = none}) ->
+ %% Only clear when we can rebuild the properties later in
+ %% accordance to the content record definition comment - maximum
+ %% one of properties and properties_bin can be 'none'
+ Content;
+clear_decoded_content(Content = #content{}) ->
+ Content#content{properties = none}.
+
+assert_utf8(B) ->
+ case validate_utf8(B) of
+ ok -> ok;
+ error -> rabbit_misc:protocol_error(
+ frame_error, "Malformed UTF-8 in shortstr", [])
+ end.
+
+validate_utf8(Bin) ->
+ try
+ _ = xmerl_ucs:from_utf8(Bin),
+ ok
+ catch exit:{ucs, _} ->
+ error
+ end.
diff --git a/deps/rabbit_common/src/rabbit_cert_info.erl b/deps/rabbit_common/src/rabbit_cert_info.erl
new file mode 100644
index 0000000000..08e6f03c6c
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_cert_info.erl
@@ -0,0 +1,270 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_cert_info).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-export([issuer/1,
+ subject/1,
+ subject_alternative_names/1,
+ validity/1,
+ subject_items/2,
+ extensions/1
+]).
+
+%%--------------------------------------------------------------------------
+
+-export_type([certificate/0]).
+
+-type certificate() :: binary().
+
+%%--------------------------------------------------------------------------
+%% High-level functions used by reader
+%%--------------------------------------------------------------------------
+
+%% Return a string describing the certificate's issuer.
+-spec issuer(certificate()) -> string().
+
+issuer(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ issuer = Issuer }}) ->
+ format_rdn_sequence(Issuer)
+ end, Cert).
+
+%% Return a string describing the certificate's subject, as per RFC4514.
+-spec subject(certificate()) -> string().
+
+subject(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ subject = Subject }}) ->
+ format_rdn_sequence(Subject)
+ end, Cert).
+
+%% Return the parts of the certificate's subject.
+-spec subject_items
+ (certificate(), tuple()) -> [string()] | 'not_found'.
+
+subject_items(Cert, Type) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ subject = Subject }}) ->
+ find_by_type(Type, Subject)
+ end, Cert).
+
+-spec extensions(certificate()) -> [#'Extension'{}].
+extensions(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ extensions = Extensions }}) ->
+ Extensions
+ end, Cert).
+
+-spec subject_alternative_names(certificate()) -> [{atom(), string()}].
+subject_alternative_names(Cert) ->
+ Extensions = extensions(Cert),
+ try lists:keyfind(?'id-ce-subjectAltName', #'Extension'.extnID, Extensions) of
+ false -> [];
+ #'Extension'{extnValue = Val} -> Val
+ catch _:_ -> []
+ end.
+
+%% Return a string describing the certificate's validity.
+-spec validity(certificate()) -> string().
+
+validity(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ validity = {'Validity', Start, End} }}) ->
+ rabbit_misc:format("~s - ~s", [format_asn1_value(Start),
+ format_asn1_value(End)])
+ end, Cert).
+
+%%--------------------------------------------------------------------------
+
+cert_info(F, Cert) ->
+ F(public_key:pkix_decode_cert(Cert, otp)).
+
+find_by_type(Type, {rdnSequence, RDNs}) ->
+ case [V || #'AttributeTypeAndValue'{type = T, value = V}
+ <- lists:flatten(RDNs),
+ T == Type] of
+ [] -> not_found;
+ L -> [format_asn1_value(V) || V <- L]
+ end.
+
+%%--------------------------------------------------------------------------
+%% Formatting functions
+%%--------------------------------------------------------------------------
+
+%% Format and rdnSequence as a RFC4514 subject string.
+format_rdn_sequence({rdnSequence, Seq}) ->
+ string:join(lists:reverse([format_complex_rdn(RDN) || RDN <- Seq]), ",").
+
+%% Format an RDN set.
+format_complex_rdn(RDNs) ->
+ string:join([format_rdn(RDN) || RDN <- RDNs], "+").
+
+%% Format an RDN. If the type name is unknown, use the dotted decimal
+%% representation. See RFC4514, section 2.3.
+format_rdn(#'AttributeTypeAndValue'{type = T, value = V}) ->
+ FV = escape_rdn_value(format_asn1_value(V)),
+ Fmts = [{?'id-at-surname' , "SN"},
+ {?'id-at-givenName' , "GIVENNAME"},
+ {?'id-at-initials' , "INITIALS"},
+ {?'id-at-generationQualifier' , "GENERATIONQUALIFIER"},
+ {?'id-at-commonName' , "CN"},
+ {?'id-at-localityName' , "L"},
+ {?'id-at-stateOrProvinceName' , "ST"},
+ {?'id-at-organizationName' , "O"},
+ {?'id-at-organizationalUnitName' , "OU"},
+ {?'id-at-title' , "TITLE"},
+ {?'id-at-countryName' , "C"},
+ {?'id-at-serialNumber' , "SERIALNUMBER"},
+ {?'id-at-pseudonym' , "PSEUDONYM"},
+ {?'id-domainComponent' , "DC"},
+ {?'id-emailAddress' , "EMAILADDRESS"},
+ {?'street-address' , "STREET"},
+ {{0,9,2342,19200300,100,1,1} , "UID"}], %% Not in public_key.hrl
+ case proplists:lookup(T, Fmts) of
+ {_, Fmt} ->
+ rabbit_misc:format(Fmt ++ "=~s", [FV]);
+ none when is_tuple(T) ->
+ TypeL = [rabbit_misc:format("~w", [X]) || X <- tuple_to_list(T)],
+ rabbit_misc:format("~s=~s", [string:join(TypeL, "."), FV]);
+ none ->
+ rabbit_misc:format("~p=~s", [T, FV])
+ end.
+
+%% Escape a string as per RFC4514.
+escape_rdn_value(V) ->
+ escape_rdn_value(V, start).
+
+escape_rdn_value([], _) ->
+ [];
+escape_rdn_value([C | S], start) when C =:= $ ; C =:= $# ->
+ [$\\, C | escape_rdn_value(S, middle)];
+escape_rdn_value(S, start) ->
+ escape_rdn_value(S, middle);
+escape_rdn_value([$ ], middle) ->
+ [$\\, $ ];
+escape_rdn_value([C | S], middle) when C =:= $"; C =:= $+; C =:= $,; C =:= $;;
+ C =:= $<; C =:= $>; C =:= $\\ ->
+ [$\\, C | escape_rdn_value(S, middle)];
+escape_rdn_value([C | S], middle) when C < 32 ; C >= 126 ->
+ %% Of ASCII characters only U+0000 needs escaping, but for display
+ %% purposes it's handy to escape all non-printable chars. All non-ASCII
+ %% characters get converted to UTF-8 sequences and then escaped. We've
+ %% already got a UTF-8 sequence here, so just escape it.
+ rabbit_misc:format("\\~2.16.0B", [C]) ++ escape_rdn_value(S, middle);
+escape_rdn_value([C | S], middle) ->
+ [C | escape_rdn_value(S, middle)].
+
+%% Get the string representation of an OTPCertificate field.
+format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString;
+ ST =:= universalString; ST =:= utf8String;
+ ST =:= bmpString ->
+ format_directory_string(ST, S);
+format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2,
+ Min1, Min2, S1, S2, $Z]}) ->
+ rabbit_misc:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ",
+ [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]);
+%% We appear to get an untagged value back for an ia5string
+%% (e.g. domainComponent).
+format_asn1_value(V) when is_list(V) ->
+ V;
+format_asn1_value(V) when is_binary(V) ->
+ %% OTP does not decode some values when combined with an unknown
+ %% type. That's probably wrong, so as a last ditch effort let's
+ %% try manually decoding. 'DirectoryString' is semi-arbitrary -
+ %% but it is the type which covers the various string types we
+ %% handle below.
+ try
+ {ST, S} = public_key:der_decode('DirectoryString', V),
+ format_directory_string(ST, S)
+ catch _:_ ->
+ rabbit_misc:format("~p", [V])
+ end;
+format_asn1_value(V) ->
+ rabbit_misc:format("~p", [V]).
+
+%% DirectoryString { INTEGER : maxSize } ::= CHOICE {
+%% teletexString TeletexString (SIZE (1..maxSize)),
+%% printableString PrintableString (SIZE (1..maxSize)),
+%% bmpString BMPString (SIZE (1..maxSize)),
+%% universalString UniversalString (SIZE (1..maxSize)),
+%% uTF8String UTF8String (SIZE (1..maxSize)) }
+%%
+%% Precise definitions of printable / teletexString are hard to come
+%% by. This is what I reconstructed:
+%%
+%% printableString:
+%% "intended to represent the limited character sets available to
+%% mainframe input terminals"
+%% A-Z a-z 0-9 ' ( ) + , - . / : = ? [space]
+%% https://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx
+%%
+%% teletexString:
+%% "a sizable volume of software in the world treats TeletexString
+%% (T61String) as a simple 8-bit string with mostly Windows Latin 1
+%% (superset of iso-8859-1) encoding"
+%% https://www.mail-archive.com/asn1@asn1.org/msg00460.html
+%%
+%% (However according to that link X.680 actually defines
+%% TeletexString in some much more involved and crazy way. I suggest
+%% we treat it as ISO-8859-1 since Erlang does not support Windows
+%% Latin 1).
+%%
+%% bmpString:
+%% UCS-2 according to RFC 3641. Hence cannot represent Unicode
+%% characters above 65535 (outside the "Basic Multilingual Plane").
+%%
+%% universalString:
+%% UCS-4 according to RFC 3641.
+%%
+%% utf8String:
+%% UTF-8 according to RFC 3641.
+%%
+%% Within Rabbit we assume UTF-8 encoding. Since printableString is a
+%% subset of ASCII it is also a subset of UTF-8. The others need
+%% converting. Fortunately since the Erlang SSL library does the
+%% decoding for us (albeit into a weird format, see below), we just
+%% need to handle encoding into UTF-8. Note also that utf8Strings come
+%% back as binary.
+%%
+%% Note for testing: the default Ubuntu configuration for openssl will
+%% only create printableString or teletexString types no matter what
+%% you do. Edit string_mask in the [req] section of
+%% /etc/ssl/openssl.cnf to change this (see comments there). You
+%% probably also need to set utf8 = yes to get it to accept UTF-8 on
+%% the command line. Also note I could not get openssl to generate a
+%% universalString.
+
+format_directory_string(printableString, S) -> S;
+format_directory_string(teletexString, S) -> utf8_list_from(S);
+format_directory_string(bmpString, S) -> utf8_list_from(S);
+format_directory_string(universalString, S) -> utf8_list_from(S);
+format_directory_string(utf8String, S) -> binary_to_list(S).
+
+utf8_list_from(S) ->
+ binary_to_list(
+ unicode:characters_to_binary(flatten_ssl_list(S), utf32, utf8)).
+
+%% The Erlang SSL implementation invents its own representation for
+%% non-ascii strings - looking like [97,{0,0,3,187}] (that's LATIN
+%% SMALL LETTER A followed by GREEK SMALL LETTER LAMDA). We convert
+%% this into a list of unicode characters, which we can tell
+%% unicode:characters_to_binary is utf32.
+
+flatten_ssl_list(L) -> [flatten_ssl_list_item(I) || I <- L].
+
+flatten_ssl_list_item({A, B, C, D}) ->
+ A * (1 bsl 24) + B * (1 bsl 16) + C * (1 bsl 8) + D;
+flatten_ssl_list_item(N) when is_number (N) ->
+ N.
diff --git a/deps/rabbit_common/src/rabbit_channel_common.erl b/deps/rabbit_common/src/rabbit_channel_common.erl
new file mode 100644
index 0000000000..a21e17b2e7
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_channel_common.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_common).
+
+-export([do/2, do/3, do_flow/3, ready_for_close/1]).
+
+do(Pid, Method) ->
+ do(Pid, Method, none).
+
+do(Pid, Method, Content) ->
+ gen_server2:cast(Pid, {method, Method, Content, noflow}).
+
+do_flow(Pid, Method, Content) ->
+ %% Here we are tracking messages sent by the rabbit_reader
+ %% process. We are accessing the rabbit_reader process dictionary.
+ credit_flow:send(Pid),
+ gen_server2:cast(Pid, {method, Method, Content, flow}).
+
+ready_for_close(Pid) ->
+ gen_server2:cast(Pid, ready_for_close).
diff --git a/deps/rabbit_common/src/rabbit_command_assembler.erl b/deps/rabbit_common/src/rabbit_command_assembler.erl
new file mode 100644
index 0000000000..ea6b19d083
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_command_assembler.erl
@@ -0,0 +1,124 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_command_assembler).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([analyze_frame/3, init/1, process/2]).
+
+%%----------------------------------------------------------------------------
+
+%%----------------------------------------------------------------------------
+
+-export_type([frame/0]).
+
+-type frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
+ ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
+ ?FRAME_TRACE | ?FRAME_HEARTBEAT.
+-type protocol() :: rabbit_framing:protocol().
+-type method() :: rabbit_framing:amqp_method_record().
+-type class_id() :: rabbit_framing:amqp_class_id().
+-type weight() :: non_neg_integer().
+-type body_size() :: non_neg_integer().
+-type content() :: rabbit_types:undecoded_content().
+
+-type frame() ::
+ {'method', rabbit_framing:amqp_method_name(), binary()} |
+ {'content_header', class_id(), weight(), body_size(), binary()} |
+ {'content_body', binary()}.
+
+-type state() ::
+ {'method', protocol()} |
+ {'content_header', method(), class_id(), protocol()} |
+ {'content_body', method(), body_size(), class_id(), protocol()}.
+
+-spec analyze_frame(frame_type(), binary(), protocol()) ->
+ frame() | 'heartbeat' | 'error'.
+
+-spec init(protocol()) -> {ok, state()}.
+-spec process(frame(), state()) ->
+ {ok, state()} |
+ {ok, method(), state()} |
+ {ok, method(), content(), state()} |
+ {error, rabbit_types:amqp_error()}.
+
+%%--------------------------------------------------------------------
+
+analyze_frame(?FRAME_METHOD,
+ <<ClassId:16, MethodId:16, MethodFields/binary>>,
+ Protocol) ->
+ MethodName = Protocol:lookup_method_name({ClassId, MethodId}),
+ {method, MethodName, MethodFields};
+analyze_frame(?FRAME_HEADER,
+ <<ClassId:16, Weight:16, BodySize:64, Properties/binary>>,
+ _Protocol) ->
+ {content_header, ClassId, Weight, BodySize, Properties};
+analyze_frame(?FRAME_BODY, Body, _Protocol) ->
+ {content_body, Body};
+analyze_frame(?FRAME_HEARTBEAT, <<>>, _Protocol) ->
+ heartbeat;
+analyze_frame(_Type, _Body, _Protocol) ->
+ error.
+
+init(Protocol) -> {ok, {method, Protocol}}.
+
+process({method, MethodName, FieldsBin}, {method, Protocol}) ->
+ try
+ Method = Protocol:decode_method_fields(MethodName, FieldsBin),
+ case Protocol:method_has_content(MethodName) of
+ true -> {ClassId, _MethodId} = Protocol:method_id(MethodName),
+ {ok, {content_header, Method, ClassId, Protocol}};
+ false -> {ok, Method, {method, Protocol}}
+ end
+ catch exit:#amqp_error{} = Reason -> {error, Reason}
+ end;
+process(_Frame, {method, _Protocol}) ->
+ unexpected_frame("expected method frame, "
+ "got non method frame instead", [], none);
+process({content_header, ClassId, 0, 0, PropertiesBin},
+ {content_header, Method, ClassId, Protocol}) ->
+ Content = empty_content(ClassId, PropertiesBin, Protocol),
+ {ok, Method, Content, {method, Protocol}};
+process({content_header, ClassId, 0, BodySize, PropertiesBin},
+ {content_header, Method, ClassId, Protocol}) ->
+ Content = empty_content(ClassId, PropertiesBin, Protocol),
+ {ok, {content_body, Method, BodySize, Content, Protocol}};
+process({content_header, HeaderClassId, 0, _BodySize, _PropertiesBin},
+ {content_header, Method, ClassId, _Protocol}) ->
+ unexpected_frame("expected content header for class ~w, "
+ "got one for class ~w instead",
+ [ClassId, HeaderClassId], Method);
+process(_Frame, {content_header, Method, ClassId, _Protocol}) ->
+ unexpected_frame("expected content header for class ~w, "
+ "got non content header frame instead", [ClassId], Method);
+process({content_body, FragmentBin},
+ {content_body, Method, RemainingSize,
+ Content = #content{payload_fragments_rev = Fragments}, Protocol}) ->
+ NewContent = Content#content{
+ payload_fragments_rev = [FragmentBin | Fragments]},
+ case RemainingSize - size(FragmentBin) of
+ 0 -> {ok, Method, NewContent, {method, Protocol}};
+ Sz -> {ok, {content_body, Method, Sz, NewContent, Protocol}}
+ end;
+process(_Frame, {content_body, Method, _RemainingSize, _Content, _Protocol}) ->
+ unexpected_frame("expected content body, "
+ "got non content body frame instead", [], Method).
+
+%%--------------------------------------------------------------------
+
+empty_content(ClassId, PropertiesBin, Protocol) ->
+ #content{class_id = ClassId,
+ properties = none,
+ properties_bin = PropertiesBin,
+ protocol = Protocol,
+ payload_fragments_rev = []}.
+
+unexpected_frame(Format, Params, Method) when is_atom(Method) ->
+ {error, rabbit_misc:amqp_error(unexpected_frame, Format, Params, Method)};
+unexpected_frame(Format, Params, Method) ->
+ unexpected_frame(Format, Params, rabbit_misc:method_record_type(Method)).
diff --git a/deps/rabbit_common/src/rabbit_control_misc.erl b/deps/rabbit_common/src/rabbit_control_misc.erl
new file mode 100644
index 0000000000..0fff88a2cd
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_control_misc.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_control_misc).
+
+-export([emitting_map/4, emitting_map/5, emitting_map_with_exit_handler/4,
+ emitting_map_with_exit_handler/5, wait_for_info_messages/6,
+ spawn_emitter_caller/7, await_emitters_termination/1,
+ print_cmd_result/2]).
+
+-spec emitting_map(pid(), reference(), fun(), list()) -> 'ok'.
+-spec emitting_map(pid(), reference(), fun(), list(), atom()) -> 'ok'.
+-spec emitting_map_with_exit_handler
+ (pid(), reference(), fun(), list()) -> 'ok'.
+-spec emitting_map_with_exit_handler
+ (pid(), reference(), fun(), list(), 'continue') -> 'ok'.
+
+-type fold_fun() :: fun((Item :: term(), AccIn :: term()) -> AccOut :: term()).
+
+-spec wait_for_info_messages(pid(), reference(), fold_fun(), InitialAcc, timeout(), non_neg_integer()) -> OK | Err when
+ InitialAcc :: term(), Acc :: term(), OK :: {ok, Acc}, Err :: {error, term()}.
+-spec spawn_emitter_caller(node(), module(), atom(), [term()], reference(), pid(), timeout()) -> 'ok'.
+-spec await_emitters_termination([pid()]) -> 'ok'.
+
+-spec print_cmd_result(atom(), term()) -> 'ok'.
+
+emitting_map(AggregatorPid, Ref, Fun, List) ->
+ emitting_map(AggregatorPid, Ref, Fun, List, continue),
+ AggregatorPid ! {Ref, finished},
+ ok.
+
+emitting_map(AggregatorPid, Ref, Fun, List, continue) ->
+ _ = emitting_map0(AggregatorPid, Ref, Fun, List, fun step/4),
+ ok.
+
+emitting_map_with_exit_handler(AggregatorPid, Ref, Fun, List) ->
+ emitting_map_with_exit_handler(AggregatorPid, Ref, Fun, List, continue),
+ AggregatorPid ! {Ref, finished},
+ ok.
+
+emitting_map_with_exit_handler(AggregatorPid, Ref, Fun, List, continue) ->
+ _ = emitting_map0(AggregatorPid, Ref, Fun, List, fun step_with_exit_handler/4),
+ ok.
+
+emitting_map0(AggregatorPid, Ref, Fun, List, StepFun) ->
+ [StepFun(AggregatorPid, Ref, Fun, Item) || Item <- List].
+
+step(AggregatorPid, Ref, Fun, Item) ->
+ AggregatorPid ! {Ref, Fun(Item), continue},
+ ok.
+
+step_with_exit_handler(AggregatorPid, Ref, Fun, Item) ->
+ Noop = make_ref(),
+ case rabbit_misc:with_exit_handler(
+ fun () -> Noop end,
+ fun () -> Fun(Item) end) of
+ Noop ->
+ ok;
+ Res ->
+ AggregatorPid ! {Ref, Res, continue},
+ ok
+ end.
+
+%% Invokes RPC for async info collection in separate (but linked to
+%% the caller) process. Separate process waits for RPC to finish and
+%% in case of errors sends them in wait_for_info_messages/5-compatible
+%% form to aggregator process. Calling process is then expected to
+%% do blocking call of wait_for_info_messages/5.
+%%
+%% Remote function MUST use calls to emitting_map/4 (and other
+%% emitting_map's) to properly deliver requested information to an
+%% aggregator process.
+%%
+%% If for performance reasons several parallel emitting_map's need to
+%% be run, remote function MUST NOT return until all this
+%% emitting_map's are done. And during all this time remote RPC
+%% process MUST be linked to emitting
+%% processes. await_emitters_termination/1 helper can be used as a
+%% last statement of remote function to ensure this behaviour.
+spawn_emitter_caller(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+ _ = spawn_monitor(
+ fun () ->
+ case rpc_call_emitter(Node, Mod, Fun, Args, Ref, Pid, Timeout) of
+ {error, _} = Error ->
+ Pid ! {Ref, error, Error};
+ {bad_argument, _} = Error ->
+ Pid ! {Ref, error, Error};
+ {badrpc, _} = Error ->
+ Pid ! {Ref, error, Error};
+ _ ->
+ ok
+ end
+ end),
+ ok.
+
+rpc_call_emitter(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+ rabbit_misc:rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
+
+%% Aggregator process expects correct numbers of explicits ACKs about
+%% finished emission process. While everything is linked, we still
+%% need somehow to wait for termination of all emitters before
+%% returning from RPC call - otherwise links will be just broken with
+%% reason 'normal' and we can miss some errors, and subsequently
+%% hang.
+await_emitters_termination(Pids) ->
+ Monitors = [erlang:monitor(process, Pid) || Pid <- Pids],
+ collect_monitors(Monitors).
+
+collect_monitors([]) ->
+ ok;
+collect_monitors([Monitor|Rest]) ->
+ receive
+ {'DOWN', Monitor, process, _Pid, normal} ->
+ collect_monitors(Rest);
+ {'DOWN', Monitor, process, _Pid, noproc} ->
+ %% There is a link and a monitor to a process. Matching
+ %% this clause means that process has gracefully
+ %% terminated even before we've started monitoring.
+ collect_monitors(Rest);
+ {'DOWN', _, process, Pid, Reason} when Reason =/= normal,
+ Reason =/= noproc ->
+ exit({emitter_exit, Pid, Reason})
+ end.
+
+%% Wait for result of one or more calls to emitting_map-family
+%% functions.
+%%
+%% Number of expected acknowledgments is specified by ChunkCount
+%% argument. Most common usage will be with ChunkCount equals to
+%% number of live nodes, but it's not mandatory - thus more generic
+%% name of 'ChunkCount' was chosen.
+wait_for_info_messages(Pid, Ref, Fun, Acc0, Timeout, ChunkCount) ->
+ _ = notify_if_timeout(Pid, Ref, Timeout),
+ wait_for_info_messages(Ref, Fun, Acc0, ChunkCount).
+
+wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft) ->
+ receive
+ {Ref, finished} when ChunksLeft =:= 1 ->
+ {ok, Acc0};
+ {Ref, finished} ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft - 1);
+ {Ref, {timeout, T}} ->
+ exit({error, {timeout, (T / 1000)}});
+ {Ref, []} ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft);
+ {Ref, Result, continue} ->
+ wait_for_info_messages(Ref, Fun, Fun(Result, Acc0), ChunksLeft);
+ {Ref, error, Error} ->
+ {error, simplify_emission_error(Error)};
+ {'DOWN', _MRef, process, _Pid, normal} ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft);
+ {'DOWN', _MRef, process, _Pid, Reason} ->
+ {error, simplify_emission_error(Reason)};
+ _Msg ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft)
+ end.
+
+simplify_emission_error({badrpc, {'EXIT', {{nocatch, EmissionError}, _Stacktrace}}}) ->
+ EmissionError;
+simplify_emission_error({{nocatch, EmissionError}, _Stacktrace}) ->
+ EmissionError;
+simplify_emission_error({error, _} = Error) ->
+ Error;
+simplify_emission_error({bad_argument, _} = Error) ->
+ Error;
+simplify_emission_error(Anything) ->
+ {error, Anything}.
+
+notify_if_timeout(_, _, infinity) ->
+ ok;
+notify_if_timeout(Pid, Ref, Timeout) ->
+ erlang:send_after(Timeout, Pid, {Ref, {timeout, Timeout}}).
+
+print_cmd_result(authenticate_user, _Result) -> io:format("Success~n");
+print_cmd_result(join_cluster, already_member) -> io:format("The node is already a member of this cluster~n").
diff --git a/deps/rabbit_common/src/rabbit_core_metrics.erl b/deps/rabbit_common/src/rabbit_core_metrics.erl
new file mode 100644
index 0000000000..3a6732c0d2
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_core_metrics.erl
@@ -0,0 +1,437 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_core_metrics).
+
+-include("rabbit_core_metrics.hrl").
+
+-export([init/0]).
+-export([terminate/0]).
+
+-export([connection_created/2,
+ connection_closed/1,
+ connection_stats/2,
+ connection_stats/4]).
+
+-export([channel_created/2,
+ channel_closed/1,
+ channel_stats/2,
+ channel_stats/3,
+ channel_stats/4,
+ channel_queue_down/1,
+ channel_queue_exchange_down/1,
+ channel_exchange_down/1]).
+
+-export([consumer_created/9,
+ consumer_updated/9,
+ consumer_deleted/3]).
+
+-export([queue_stats/2,
+ queue_stats/5,
+ queue_declared/1,
+ queue_created/1,
+ queue_deleted/1,
+ queues_deleted/1]).
+
+-export([node_stats/2]).
+
+-export([node_node_stats/2]).
+
+-export([gen_server2_stats/2,
+ gen_server2_deleted/1,
+ get_gen_server2_stats/1]).
+
+-export([delete/2]).
+
+-export([auth_attempt_failed/3,
+ auth_attempt_succeeded/3,
+ reset_auth_attempt_metrics/0,
+ get_auth_attempts/0,
+ get_auth_attempts_by_source/0]).
+
+%%----------------------------------------------------------------------------
+%% Types
+%%----------------------------------------------------------------------------
+-type(channel_stats_id() :: pid() |
+ {pid(),
+ {rabbit_amqqueue:name(), rabbit_exchange:name()}} |
+ {pid(), rabbit_amqqueue:name()} |
+ {pid(), rabbit_exchange:name()}).
+
+-type(channel_stats_type() :: queue_exchange_stats | queue_stats |
+ exchange_stats | reductions).
+
+-type(activity_status() :: up | single_active | waiting | suspected_down).
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+-spec init() -> ok.
+-spec connection_created(pid(), rabbit_types:infos()) -> ok.
+-spec connection_closed(pid()) -> ok.
+-spec connection_stats(pid(), rabbit_types:infos()) -> ok.
+-spec connection_stats(pid(), integer(), integer(), integer()) -> ok.
+-spec channel_created(pid(), rabbit_types:infos()) -> ok.
+-spec channel_closed(pid()) -> ok.
+-spec channel_stats(pid(), rabbit_types:infos()) -> ok.
+-spec channel_stats(channel_stats_type(), channel_stats_id(),
+ rabbit_types:infos() | integer()) -> ok.
+-spec channel_queue_down({pid(), rabbit_amqqueue:name()}) -> ok.
+-spec channel_queue_exchange_down({pid(), {rabbit_amqqueue:name(),
+ rabbit_exchange:name()}}) -> ok.
+-spec channel_exchange_down({pid(), rabbit_exchange:name()}) -> ok.
+-spec consumer_created(pid(), binary(), boolean(), boolean(),
+ rabbit_amqqueue:name(), integer(), boolean(), activity_status(), list()) -> ok.
+-spec consumer_updated(pid(), binary(), boolean(), boolean(),
+ rabbit_amqqueue:name(), integer(), boolean(), activity_status(), list()) -> ok.
+-spec consumer_deleted(pid(), binary(), rabbit_amqqueue:name()) -> ok.
+-spec queue_stats(rabbit_amqqueue:name(), rabbit_types:infos()) -> ok.
+-spec queue_stats(rabbit_amqqueue:name(), integer(), integer(), integer(),
+ integer()) -> ok.
+-spec node_stats(atom(), rabbit_types:infos()) -> ok.
+-spec node_node_stats({node(), node()}, rabbit_types:infos()) -> ok.
+-spec gen_server2_stats(pid(), integer()) -> ok.
+-spec gen_server2_deleted(pid()) -> ok.
+-spec get_gen_server2_stats(pid()) -> integer() | 'not_found'.
+-spec delete(atom(), any()) -> ok.
+%%----------------------------------------------------------------------------
+%% Storage of the raw metrics in RabbitMQ core. All the processing of stats
+%% is done by the management plugin.
+%%----------------------------------------------------------------------------
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+init() ->
+ _ = [ets:new(Table, [Type, public, named_table, {write_concurrency, true},
+ {read_concurrency, true}])
+ || {Table, Type} <- ?CORE_TABLES ++ ?CORE_EXTRA_TABLES],
+ ok.
+
+terminate() ->
+ [ets:delete(Table)
+ || {Table, _Type} <- ?CORE_TABLES ++ ?CORE_EXTRA_TABLES],
+ ok.
+
+connection_created(Pid, Infos) ->
+ ets:insert(connection_created, {Pid, Infos}),
+ ets:update_counter(connection_churn_metrics, node(), {2, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+connection_closed(Pid) ->
+ ets:delete(connection_created, Pid),
+ ets:delete(connection_metrics, Pid),
+ %% Delete marker
+ ets:update_element(connection_coarse_metrics, Pid, {5, 1}),
+ ets:update_counter(connection_churn_metrics, node(), {3, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+connection_stats(Pid, Infos) ->
+ ets:insert(connection_metrics, {Pid, Infos}),
+ ok.
+
+connection_stats(Pid, Recv_oct, Send_oct, Reductions) ->
+ %% Includes delete marker
+ ets:insert(connection_coarse_metrics, {Pid, Recv_oct, Send_oct, Reductions, 0}),
+ ok.
+
+channel_created(Pid, Infos) ->
+ ets:insert(channel_created, {Pid, Infos}),
+ ets:update_counter(connection_churn_metrics, node(), {4, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+channel_closed(Pid) ->
+ ets:delete(channel_created, Pid),
+ ets:delete(channel_metrics, Pid),
+ ets:delete(channel_process_metrics, Pid),
+ ets:update_counter(connection_churn_metrics, node(), {5, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+channel_stats(Pid, Infos) ->
+ ets:insert(channel_metrics, {Pid, Infos}),
+ ok.
+
+channel_stats(reductions, Id, Value) ->
+ ets:insert(channel_process_metrics, {Id, Value}),
+ ok.
+
+channel_stats(exchange_stats, publish, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {2, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(exchange_stats, confirm, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {3, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(exchange_stats, return_unroutable, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {4, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(exchange_stats, drop_unroutable, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {5, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_exchange_stats, publish, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_exchange_metrics, Id, Value, {Id, 0, 0}),
+ ok;
+channel_stats(queue_stats, get, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {2, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, get_no_ack, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {3, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, deliver, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {4, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, deliver_no_ack, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {5, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, redeliver, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {6, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, ack, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {7, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, get_empty, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {8, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok.
+
+delete(Table, Key) ->
+ ets:delete(Table, Key),
+ ok.
+
+channel_queue_down(Id) ->
+ %% Delete marker
+ ets:update_element(channel_queue_metrics, Id, {9, 1}),
+ ok.
+
+channel_queue_exchange_down(Id) ->
+ %% Delete marker
+ ets:update_element(channel_queue_exchange_metrics, Id, {3, 1}),
+ ok.
+
+channel_exchange_down(Id) ->
+ %% Delete marker
+ ets:update_element(channel_exchange_metrics, Id, {6, 1}),
+ ok.
+
+consumer_created(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName,
+ PrefetchCount, Active, ActivityStatus, Args) ->
+ ets:insert(consumer_created, {{QName, ChPid, ConsumerTag}, ExclusiveConsume,
+ AckRequired, PrefetchCount, Active, ActivityStatus, Args}),
+ ok.
+
+consumer_updated(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName,
+ PrefetchCount, Active, ActivityStatus, Args) ->
+ ets:insert(consumer_created, {{QName, ChPid, ConsumerTag}, ExclusiveConsume,
+ AckRequired, PrefetchCount, Active, ActivityStatus, Args}),
+ ok.
+
+consumer_deleted(ChPid, ConsumerTag, QName) ->
+ ets:delete(consumer_created, {QName, ChPid, ConsumerTag}),
+ ok.
+
+queue_stats(Name, Infos) ->
+ %% Includes delete marker
+ ets:insert(queue_metrics, {Name, Infos, 0}),
+ ok.
+
+queue_stats(Name, MessagesReady, MessagesUnacknowledge, Messages, Reductions) ->
+ ets:insert(queue_coarse_metrics, {Name, MessagesReady, MessagesUnacknowledge,
+ Messages, Reductions}),
+ ok.
+
+queue_declared(_Name) ->
+ %% Name is not needed, but might be useful in the future.
+ ets:update_counter(connection_churn_metrics, node(), {6, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+queue_created(_Name) ->
+ %% Name is not needed, but might be useful in the future.
+ ets:update_counter(connection_churn_metrics, node(), {7, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+queue_deleted(Name) ->
+ ets:delete(queue_coarse_metrics, Name),
+ ets:update_counter(connection_churn_metrics, node(), {8, 1},
+ ?CONNECTION_CHURN_METRICS),
+ %% Delete markers
+ ets:update_element(queue_metrics, Name, {3, 1}),
+ CQX = ets:select(channel_queue_exchange_metrics, match_spec_cqx(Name)),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_exchange_metrics, Key, {3, 1})
+ end, CQX),
+ CQ = ets:select(channel_queue_metrics, match_spec_cq(Name)),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_metrics, Key, {9, 1})
+ end, CQ).
+
+queues_deleted(Queues) ->
+ ets:update_counter(connection_churn_metrics, node(), {8, length(Queues)},
+ ?CONNECTION_CHURN_METRICS),
+ [ delete_queue_metrics(Queue) || Queue <- Queues ],
+ [
+ begin
+ MatchSpecCondition = build_match_spec_conditions_to_delete_all_queues(QueuesPartition),
+ delete_channel_queue_exchange_metrics(MatchSpecCondition),
+ delete_channel_queue_metrics(MatchSpecCondition)
+ end || QueuesPartition <- partition_queues(Queues)
+ ],
+ ok.
+
+partition_queues(Queues) when length(Queues) >= 1000 ->
+ {Partition, Rest} = lists:split(1000, Queues),
+ [Partition | partition_queues(Rest)];
+partition_queues(Queues) ->
+ [Queues].
+
+delete_queue_metrics(Queue) ->
+ ets:delete(queue_coarse_metrics, Queue),
+ ets:update_element(queue_metrics, Queue, {3, 1}),
+ ok.
+
+delete_channel_queue_exchange_metrics(MatchSpecCondition) ->
+ ChannelQueueExchangeMetricsToUpdate = ets:select(
+ channel_queue_exchange_metrics,
+ [
+ {
+ {{'$2', {'$1', '$3'}}, '_', '_'},
+ [MatchSpecCondition],
+ [{{'$2', {{'$1', '$3'}}}}]
+ }
+ ]
+ ),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_exchange_metrics, Key, {3, 1})
+ end, ChannelQueueExchangeMetricsToUpdate).
+
+delete_channel_queue_metrics(MatchSpecCondition) ->
+ ChannelQueueMetricsToUpdate = ets:select(
+ channel_queue_metrics,
+ [
+ {
+ {{'$2', '$1'}, '_', '_', '_', '_', '_', '_', '_', '_'},
+ [MatchSpecCondition],
+ [{{'$2', '$1'}}]
+ }
+ ]
+ ),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_metrics, Key, {9, 1})
+ end, ChannelQueueMetricsToUpdate).
+
+% [{'orelse',
+% {'==', {Queue}, '$1'},
+% {'orelse',
+% {'==', {Queue}, '$1'},
+% % ...
+% {'orelse',
+% {'==', {Queue}, '$1'},
+% {'==', true, true}
+% }
+% }
+% }],
+build_match_spec_conditions_to_delete_all_queues([Queue|Queues]) ->
+ {'orelse',
+ {'==', {Queue}, '$1'},
+ build_match_spec_conditions_to_delete_all_queues(Queues)
+ };
+build_match_spec_conditions_to_delete_all_queues([]) ->
+ true.
+
+node_stats(persister_metrics, Infos) ->
+ ets:insert(node_persister_metrics, {node(), Infos}),
+ ok;
+node_stats(coarse_metrics, Infos) ->
+ ets:insert(node_coarse_metrics, {node(), Infos}),
+ ok;
+node_stats(node_metrics, Infos) ->
+ ets:insert(node_metrics, {node(), Infos}),
+ ok.
+
+node_node_stats(Id, Infos) ->
+ ets:insert(node_node_metrics, {Id, Infos}),
+ ok.
+
+match_spec_cqx(Id) ->
+ [{{{'$2', {'$1', '$3'}}, '_', '_'}, [{'==', {Id}, '$1'}], [{{'$2', {{'$1', '$3'}}}}]}].
+
+match_spec_cq(Id) ->
+ [{{{'$2', '$1'}, '_', '_', '_', '_', '_', '_', '_', '_'}, [{'==', {Id}, '$1'}], [{{'$2', '$1'}}]}].
+
+gen_server2_stats(Pid, BufferLength) ->
+ ets:insert(gen_server2_metrics, {Pid, BufferLength}),
+ ok.
+
+gen_server2_deleted(Pid) ->
+ ets:delete(gen_server2_metrics, Pid),
+ ok.
+
+get_gen_server2_stats(Pid) ->
+ case ets:lookup(gen_server2_metrics, Pid) of
+ [{Pid, BufferLength}] ->
+ BufferLength;
+ [] ->
+ not_found
+ end.
+
+auth_attempt_succeeded(RemoteAddress, Username, Protocol) ->
+ %% ETS entry is {Key = {RemoteAddress, Username}, Total, Succeeded, Failed}
+ update_auth_attempt(RemoteAddress, Username, Protocol, [{2, 1}, {3, 1}]).
+
+auth_attempt_failed(RemoteAddress, Username, Protocol) ->
+ %% ETS entry is {Key = {RemoteAddress, Username}, Total, Succeeded, Failed}
+ update_auth_attempt(RemoteAddress, Username, Protocol, [{2, 1}, {4, 1}]).
+
+update_auth_attempt(RemoteAddress, Username, Protocol, Incr) ->
+ %% It should default to false as per ip/user metrics could keep growing indefinitely
+ %% It's up to the operator to enable them, and reset it required
+ case application:get_env(rabbit, track_auth_attempt_source) of
+ {ok, true} ->
+ case {RemoteAddress, Username} of
+ {<<>>, <<>>} ->
+ ok;
+ _ ->
+ Key = {RemoteAddress, Username, Protocol},
+ _ = ets:update_counter(auth_attempt_detailed_metrics, Key, Incr, {Key, 0, 0, 0})
+ end;
+ {ok, false} ->
+ ok
+ end,
+ _ = ets:update_counter(auth_attempt_metrics, Protocol, Incr, {Protocol, 0, 0, 0}),
+ ok.
+
+reset_auth_attempt_metrics() ->
+ ets:delete_all_objects(auth_attempt_metrics),
+ ets:delete_all_objects(auth_attempt_detailed_metrics),
+ ok.
+
+get_auth_attempts() ->
+ [format_auth_attempt(A) || A <- ets:tab2list(auth_attempt_metrics)].
+
+get_auth_attempts_by_source() ->
+ [format_auth_attempt(A) || A <- ets:tab2list(auth_attempt_detailed_metrics)].
+
+format_auth_attempt({{RemoteAddress, Username, Protocol}, Total, Succeeded, Failed}) ->
+ [{remote_address, RemoteAddress}, {username, Username},
+ {protocol, atom_to_binary(Protocol, utf8)}, {auth_attempts, Total},
+ {auth_attempts_failed, Failed}, {auth_attempts_succeeded, Succeeded}];
+format_auth_attempt({Protocol, Total, Succeeded, Failed}) ->
+ [{protocol, atom_to_binary(Protocol, utf8)}, {auth_attempts, Total},
+ {auth_attempts_failed, Failed}, {auth_attempts_succeeded, Succeeded}].
diff --git a/deps/rabbit_common/src/rabbit_data_coercion.erl b/deps/rabbit_common/src/rabbit_data_coercion.erl
new file mode 100644
index 0000000000..9d2b39da94
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_data_coercion.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_data_coercion).
+
+-export([to_binary/1, to_list/1, to_atom/1, to_integer/1, to_proplist/1, to_map/1]).
+-export([to_atom/2]).
+
+-spec to_binary(Val :: binary() | list() | atom() | integer()) -> binary().
+to_binary(Val) when is_list(Val) -> list_to_binary(Val);
+to_binary(Val) when is_atom(Val) -> atom_to_binary(Val, utf8);
+to_binary(Val) when is_integer(Val) -> integer_to_binary(Val);
+to_binary(Val) -> Val.
+
+-spec to_list(Val :: integer() | list() | binary() | atom() | map()) -> list().
+to_list(Val) when is_list(Val) -> Val;
+to_list(Val) when is_map(Val) -> maps:to_list(Val);
+to_list(Val) when is_atom(Val) -> atom_to_list(Val);
+to_list(Val) when is_binary(Val) -> binary_to_list(Val);
+to_list(Val) when is_integer(Val) -> integer_to_list(Val).
+
+-spec to_atom(Val :: atom() | list() | binary()) -> atom().
+to_atom(Val) when is_atom(Val) -> Val;
+to_atom(Val) when is_list(Val) -> list_to_atom(Val);
+to_atom(Val) when is_binary(Val) -> binary_to_atom(Val, utf8).
+
+-spec to_atom(Val :: atom() | list() | binary(), Encoding :: atom()) -> atom().
+to_atom(Val, _Encoding) when is_atom(Val) -> Val;
+to_atom(Val, _Encoding) when is_list(Val) -> list_to_atom(Val);
+to_atom(Val, Encoding) when is_binary(Val) -> binary_to_atom(Val, Encoding).
+
+-spec to_integer(Val :: integer() | list() | binary()) -> integer().
+to_integer(Val) when is_integer(Val) -> Val;
+to_integer(Val) when is_list(Val) -> list_to_integer(Val);
+to_integer(Val) when is_binary(Val) -> binary_to_integer(Val).
+
+-spec to_proplist(Val :: map() | list()) -> list().
+to_proplist(Val) when is_list(Val) -> Val;
+to_proplist(Val) when is_map(Val) -> maps:to_list(Val).
+
+-spec to_map(Val :: map() | list()) -> map().
+to_map(Val) when is_map(Val) -> Val;
+to_map(Val) when is_list(Val) -> maps:from_list(Val).
diff --git a/deps/rabbit_common/src/rabbit_env.erl b/deps/rabbit_common/src/rabbit_env.erl
new file mode 100644
index 0000000000..8817103e81
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_env.erl
@@ -0,0 +1,1850 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_env).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([get_context/0,
+ get_context/1,
+ get_context_before_logging_init/0,
+ get_context_before_logging_init/1,
+ get_context_after_logging_init/1,
+ get_context_after_reloading_env/1,
+ dbg_config/0,
+ get_used_env_vars/0,
+ log_process_env/0,
+ log_context/1,
+ context_to_app_env_vars/1,
+ context_to_app_env_vars_no_logging/1,
+ context_to_code_path/1]).
+
+-ifdef(TEST).
+-export([parse_conf_env_file_output2/2,
+ value_is_yes/1]).
+-endif.
+
+-define(USED_ENV_VARS,
+ [
+ "RABBITMQ_ALLOW_INPUT",
+ "RABBITMQ_ADVANCED_CONFIG_FILE",
+ "RABBITMQ_BASE",
+ "RABBITMQ_CONF_ENV_FILE",
+ "RABBITMQ_CONFIG_FILE",
+ "RABBITMQ_CONFIG_FILES",
+ "RABBITMQ_DBG",
+ "RABBITMQ_DIST_PORT",
+ "RABBITMQ_ENABLED_PLUGINS",
+ "RABBITMQ_ENABLED_PLUGINS_FILE",
+ "RABBITMQ_FEATURE_FLAGS",
+ "RABBITMQ_FEATURE_FLAGS_FILE",
+ "RABBITMQ_HOME",
+ "RABBITMQ_KEEP_PID_FILE_ON_EXIT",
+ "RABBITMQ_LOG",
+ "RABBITMQ_LOG_BASE",
+ "RABBITMQ_LOG_FF_REGISTRY",
+ "RABBITMQ_LOGS",
+ "RABBITMQ_MNESIA_BASE",
+ "RABBITMQ_MNESIA_DIR",
+ "RABBITMQ_MOTD_FILE",
+ "RABBITMQ_NODE_IP_ADDRESS",
+ "RABBITMQ_NODE_PORT",
+ "RABBITMQ_NODENAME",
+ "RABBITMQ_PID_FILE",
+ "RABBITMQ_PLUGINS_DIR",
+ "RABBITMQ_PLUGINS_EXPAND_DIR",
+ "RABBITMQ_PRODUCT_NAME",
+ "RABBITMQ_PRODUCT_VERSION",
+ "RABBITMQ_QUORUM_DIR",
+ "RABBITMQ_STREAM_DIR",
+ "RABBITMQ_UPGRADE_LOG",
+ "RABBITMQ_USE_LONGNAME",
+ "SYS_PREFIX"
+ ]).
+
+get_context() ->
+ Context0 = get_context_before_logging_init(),
+ Context1 = get_context_after_logging_init(Context0),
+ get_context_after_reloading_env(Context1).
+
+get_context(TakeFromRemoteNode) ->
+ Context0 = get_context_before_logging_init(TakeFromRemoteNode),
+ Context1 = get_context_after_logging_init(Context0),
+ get_context_after_reloading_env(Context1).
+
+get_context_before_logging_init() ->
+ get_context_before_logging_init(false).
+
+get_context_before_logging_init(TakeFromRemoteNode) ->
+ %% The order of steps below is important because some of them
+ %% depends on previous steps.
+ Steps = [
+ fun os_type/1,
+ fun log_levels/1,
+ fun interactive_shell/1,
+ fun output_supports_colors/1
+ ],
+
+ run_context_steps(context_base(TakeFromRemoteNode), Steps).
+
+get_context_after_logging_init(Context) ->
+ %% The order of steps below is important because some of them
+ %% depends on previous steps.
+ Steps = [
+ fun sys_prefix/1,
+ fun rabbitmq_base/1,
+ fun data_dir/1,
+ fun rabbitmq_home/1,
+ fun config_base_dir/1,
+ fun load_conf_env_file/1,
+ fun log_levels/1
+ ],
+
+ run_context_steps(Context, Steps).
+
+get_context_after_reloading_env(Context) ->
+ %% The order of steps below is important because some of them
+ %% depends on previous steps.
+ Steps = [
+ fun nodename_type/1,
+ fun nodename/1,
+ fun split_nodename/1,
+ fun maybe_setup_dist_for_remote_query/1,
+ fun dbg_config/1,
+ fun main_config_file/1,
+ fun additional_config_files/1,
+ fun advanced_config_file/1,
+ fun log_base_dir/1,
+ fun main_log_file/1,
+ fun upgrade_log_file/1,
+ fun mnesia_base_dir/1,
+ fun mnesia_dir/1,
+ fun quorum_queue_dir/1,
+ fun stream_queue_dir/1,
+ fun pid_file/1,
+ fun keep_pid_file_on_exit/1,
+ fun feature_flags_file/1,
+ fun forced_feature_flags_on_init/1,
+ fun log_feature_flags_registry/1,
+ fun plugins_path/1,
+ fun plugins_expand_dir/1,
+ fun enabled_plugins_file/1,
+ fun enabled_plugins/1,
+ fun maybe_stop_dist_for_remote_query/1,
+ fun amqp_ipaddr/1,
+ fun amqp_tcp_port/1,
+ fun erlang_dist_tcp_port/1,
+ fun product_name/1,
+ fun product_version/1,
+ fun motd_file/1
+ ],
+
+ run_context_steps(Context, Steps).
+
+context_base(TakeFromRemoteNode) ->
+ Context = #{},
+ case TakeFromRemoteNode of
+ false ->
+ Context;
+ offline ->
+ update_context(Context,
+ from_remote_node,
+ offline);
+ _ when is_atom(TakeFromRemoteNode) ->
+ update_context(Context,
+ from_remote_node,
+ {TakeFromRemoteNode, 10000});
+ {RemoteNode, infinity}
+ when is_atom(RemoteNode) ->
+ update_context(Context,
+ from_remote_node,
+ TakeFromRemoteNode);
+ {RemoteNode, Timeout}
+ when is_atom(RemoteNode) andalso
+ is_integer(Timeout) andalso
+ Timeout >= 0 ->
+ update_context(Context,
+ from_remote_node,
+ {TakeFromRemoteNode, Timeout})
+ end.
+
+-ifdef(TEST).
+os_type(Context) ->
+ {OSType, Origin} =
+ try
+ {persistent_term:get({?MODULE, os_type}), environment}
+ catch
+ _:badarg ->
+ {os:type(), default}
+ end,
+ update_context(Context, os_type, OSType, Origin).
+-else.
+os_type(Context) ->
+ update_context(Context, os_type, os:type(), default).
+-endif.
+
+run_context_steps(Context, Steps) ->
+ lists:foldl(
+ fun(Step, Context1) -> Step(Context1) end,
+ Context,
+ Steps).
+
+update_context(Context, Key, Value) ->
+ Context#{Key => Value}.
+
+-define(origin_is_valid(O),
+ O =:= default orelse
+ O =:= environment orelse
+ O =:= remote_node).
+
+update_context(#{var_origins := Origins} = Context, Key, Value, Origin)
+ when ?origin_is_valid(Origin) ->
+ Context#{Key => Value,
+ var_origins => Origins#{Key => Origin}};
+update_context(Context, Key, Value, Origin)
+ when ?origin_is_valid(Origin) ->
+ Context#{Key => Value,
+ var_origins => #{Key => Origin}}.
+
+get_used_env_vars() ->
+ lists:filter(
+ fun({Var, _}) -> var_is_used(Var) end,
+ lists:sort(os:list_env_vars())).
+
+log_process_env() ->
+ rabbit_log_prelaunch:debug("Process environment:"),
+ lists:foreach(
+ fun({Var, Value}) ->
+ rabbit_log_prelaunch:debug(" - ~s = ~ts", [Var, Value])
+ end, lists:sort(os:list_env_vars())).
+
+log_context(Context) ->
+ rabbit_log_prelaunch:debug("Context (based on environment variables):"),
+ lists:foreach(
+ fun(Key) ->
+ Value = maps:get(Key, Context),
+ rabbit_log_prelaunch:debug(" - ~s: ~p", [Key, Value])
+ end,
+ lists:sort(maps:keys(Context))).
+
+context_to_app_env_vars(Context) ->
+ rabbit_log_prelaunch:debug(
+ "Setting default application environment variables:"),
+ Fun = fun({App, Param, Value}) ->
+ rabbit_log_prelaunch:debug(
+ " - ~s:~s = ~p", [App, Param, Value]),
+ ok = application:set_env(
+ App, Param, Value, [{persistent, true}])
+ end,
+ context_to_app_env_vars1(Context, Fun).
+
+context_to_app_env_vars_no_logging(Context) ->
+ Fun = fun({App, Param, Value}) ->
+ ok = application:set_env(
+ App, Param, Value, [{persistent, true}])
+ end,
+ context_to_app_env_vars1(Context, Fun).
+
+context_to_app_env_vars1(
+ #{mnesia_dir := MnesiaDir,
+ feature_flags_file := FFFile,
+ quorum_queue_dir := QuorumQueueDir,
+ stream_queue_dir := StreamQueueDir,
+ plugins_path := PluginsPath,
+ plugins_expand_dir := PluginsExpandDir,
+ enabled_plugins_file := EnabledPluginsFile} = Context,
+ Fun) ->
+ lists:foreach(
+ Fun,
+ %% Those are all the application environment variables which
+ %% were historically set on the erl(1) command line in
+ %% rabbitmq-server(8).
+ [{kernel, inet_default_connect_options, [{nodelay, true}]},
+ {sasl, errlog_type, error},
+ {os_mon, start_cpu_sup, false},
+ {os_mon, start_disksup, false},
+ {os_mon, start_memsup, false},
+ {mnesia, dir, MnesiaDir},
+ {ra, data_dir, QuorumQueueDir},
+ {osiris, data_dir, StreamQueueDir},
+ {rabbit, feature_flags_file, FFFile},
+ {rabbit, plugins_dir, PluginsPath},
+ {rabbit, plugins_expand_dir, PluginsExpandDir},
+ {rabbit, enabled_plugins_file, EnabledPluginsFile}]),
+
+ case Context of
+ #{erlang_dist_tcp_port := DistTcpPort} ->
+ lists:foreach(
+ Fun,
+ [{kernel, inet_dist_listen_min, DistTcpPort},
+ {kernel, inet_dist_listen_max, DistTcpPort}]);
+ _ ->
+ ok
+ end,
+ case Context of
+ #{amqp_ipaddr := IpAddr,
+ amqp_tcp_port := TcpPort}
+ when IpAddr /= undefined andalso TcpPort /= undefined ->
+ Fun({rabbit, tcp_listeners, [{IpAddr, TcpPort}]});
+ _ ->
+ ok
+ end,
+ ok.
+
+context_to_code_path(#{os_type := OSType, plugins_path := PluginsPath}) ->
+ Dirs = get_user_lib_dirs(OSType, PluginsPath),
+ code:add_pathsa(lists:reverse(Dirs)).
+
+%% -------------------------------------------------------------------
+%% Code copied from `kernel/src/code_server.erl`.
+%%
+%% The goal is to mimic the behavior of the `$ERL_LIBS` environment
+%% variable.
+
+get_user_lib_dirs(OSType, Path) ->
+ Sep = case OSType of
+ {win32, _} -> ";";
+ _ -> ":"
+ end,
+ SplitPath = string:lexemes(Path, Sep),
+ get_user_lib_dirs_1(SplitPath).
+
+get_user_lib_dirs_1([Dir|DirList]) ->
+ case erl_prim_loader:list_dir(Dir) of
+ {ok, Dirs} ->
+ Paths = make_path(Dir, Dirs),
+ %% Only add paths trailing with ./ebin.
+ [P || P <- Paths, filename:basename(P) =:= "ebin"] ++
+ get_user_lib_dirs_1(DirList);
+ error ->
+ get_user_lib_dirs_1(DirList)
+ end;
+get_user_lib_dirs_1([]) -> [].
+
+%%
+%% Create the initial path.
+%%
+make_path(BundleDir, Bundles0) ->
+ Bundles = choose_bundles(Bundles0),
+ make_path(BundleDir, Bundles, []).
+
+choose_bundles(Bundles) ->
+ ArchiveExt = archive_extension(),
+ Bs = lists:sort([create_bundle(B, ArchiveExt) || B <- Bundles]),
+ [FullName || {_Name,_NumVsn,FullName} <-
+ choose(lists:reverse(Bs), [], ArchiveExt)].
+
+create_bundle(FullName, ArchiveExt) ->
+ BaseName = filename:basename(FullName, ArchiveExt),
+ case split_base(BaseName) of
+ {Name, VsnStr} ->
+ case vsn_to_num(VsnStr) of
+ {ok, VsnNum} ->
+ {Name,VsnNum,FullName};
+ false ->
+ {FullName,[0],FullName}
+ end;
+ _ ->
+ {FullName,[0],FullName}
+ end.
+
+%% Convert "X.Y.Z. ..." to [K, L, M| ...]
+vsn_to_num(Vsn) ->
+ case is_vsn(Vsn) of
+ true ->
+ {ok, [list_to_integer(S) || S <- string:lexemes(Vsn, ".")]};
+ _ ->
+ false
+ end.
+
+is_vsn(Str) when is_list(Str) ->
+ Vsns = string:lexemes(Str, "."),
+ lists:all(fun is_numstr/1, Vsns).
+
+is_numstr(Cs) ->
+ lists:all(fun (C) when $0 =< C, C =< $9 -> true;
+ (_) -> false
+ end, Cs).
+
+choose([{Name,NumVsn,NewFullName}=New|Bs], Acc, ArchiveExt) ->
+ case lists:keyfind(Name, 1, Acc) of
+ {_, NV, OldFullName} when NV =:= NumVsn ->
+ case filename:extension(OldFullName) =:= ArchiveExt of
+ false ->
+ choose(Bs,Acc, ArchiveExt);
+ true ->
+ Acc2 = lists:keystore(Name, 1, Acc, New),
+ choose(Bs,Acc2, ArchiveExt)
+ end;
+ {_, _, _} ->
+ choose(Bs,Acc, ArchiveExt);
+ false ->
+ choose(Bs,[{Name,NumVsn,NewFullName}|Acc], ArchiveExt)
+ end;
+choose([],Acc, _ArchiveExt) ->
+ Acc.
+
+make_path(_, [], Res) ->
+ Res;
+make_path(BundleDir, [Bundle|Tail], Res) ->
+ Dir = filename:append(BundleDir, Bundle),
+ Ebin = filename:append(Dir, "ebin"),
+ %% First try with /ebin
+ case is_dir(Ebin) of
+ true ->
+ make_path(BundleDir, Tail, [Ebin|Res]);
+ false ->
+ %% Second try with archive
+ Ext = archive_extension(),
+ Base = filename:basename(Bundle, Ext),
+ Ebin2 = filename:join([BundleDir, Base ++ Ext, Base, "ebin"]),
+ Ebins =
+ case split_base(Base) of
+ {AppName,_} ->
+ Ebin3 = filename:join([BundleDir, Base ++ Ext,
+ AppName, "ebin"]),
+ [Ebin3, Ebin2, Dir];
+ _ ->
+ [Ebin2, Dir]
+ end,
+ case try_ebin_dirs(Ebins) of
+ {ok,FoundEbin} ->
+ make_path(BundleDir, Tail, [FoundEbin|Res]);
+ error ->
+ make_path(BundleDir, Tail, Res)
+ end
+ end.
+
+try_ebin_dirs([Ebin|Ebins]) ->
+ case is_dir(Ebin) of
+ true -> {ok,Ebin};
+ false -> try_ebin_dirs(Ebins)
+ end;
+try_ebin_dirs([]) ->
+ error.
+
+split_base(BaseName) ->
+ case string:lexemes(BaseName, "-") of
+ [_, _|_] = Toks ->
+ Vsn = lists:last(Toks),
+ AllButLast = lists:droplast(Toks),
+ {string:join(AllButLast, "-"),Vsn};
+ [_|_] ->
+ BaseName
+ end.
+
+is_dir(Path) ->
+ case erl_prim_loader:read_file_info(Path) of
+ {ok,#file_info{type=directory}} -> true;
+ _ -> false
+ end.
+
+archive_extension() ->
+ init:archive_extension().
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_NODENAME
+%% Erlang node name.
+%% Default: rabbit@<hostname>
+%%
+%% RABBITMQ_USE_LONGNAME
+%% Flag indicating if long Erlang node names should be used instead
+%% of short ones.
+%% Default: unset (use short names)
+
+nodename_type(Context) ->
+ case get_prefixed_env_var("RABBITMQ_USE_LONGNAME") of
+ false ->
+ update_context(Context, nodename_type, shortnames, default);
+ Value ->
+ NameType = case value_is_yes(Value) of
+ true -> longnames;
+ false -> shortnames
+ end,
+ update_context(Context, nodename_type, NameType, environment)
+ end.
+
+nodename(#{nodename_type := NameType} = Context) ->
+ LongHostname = net_adm:localhost(),
+ ShortHostname = re:replace(LongHostname, "\\..*$", "", [{return, list}]),
+ case get_prefixed_env_var("RABBITMQ_NODENAME") of
+ false when NameType =:= shortnames ->
+ Nodename = rabbit_nodes_common:make({"rabbit", ShortHostname}),
+ update_context(Context, nodename, Nodename, default);
+ false when NameType =:= longnames ->
+ Nodename = rabbit_nodes_common:make({"rabbit", LongHostname}),
+ update_context(Context, nodename, Nodename, default);
+ Value ->
+ Nodename = case string:find(Value, "@") of
+ nomatch when NameType =:= shortnames ->
+ rabbit_nodes_common:make({Value, ShortHostname});
+ nomatch when NameType =:= longnames ->
+ rabbit_nodes_common:make({Value, LongHostname});
+ _ ->
+ rabbit_nodes_common:make(Value)
+ end,
+ update_context(Context, nodename, Nodename, environment)
+ end.
+
+split_nodename(#{nodename := Nodename} = Context) ->
+ update_context(Context,
+ split_nodename, rabbit_nodes_common:parts(Nodename)).
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_CONFIG_FILE
+%% Main configuration file.
+%% Extension is optional. `.config` for the old erlang-term-based
+%% format, `.conf` for the new Cuttlefish-based format.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/rabbitmq
+%% (Windows) ${RABBITMQ_BASE}\rabbitmq
+%%
+%% RABBITMQ_CONFIG_FILES
+%% Additional configuration files.
+%% If a directory, all files directly inside it are loaded.
+%% If a glob pattern, all matching file are loaded.
+%% Only considered if the main configuration file is Cuttlefish-based.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/conf.d/*.conf
+%% (Windows) ${RABBITMQ_BASE}\conf.d\*.conf
+%%
+%% RABBITMQ_ADVANCED_CONFIG_FILE
+%% Advanced configuration file.
+%% Erlang-term-based format with a `.config` extension.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/advanced.config
+%% (Windows) ${RABBITMQ_BASE}\advanced.config
+
+config_base_dir(#{os_type := {unix, _},
+ sys_prefix := SysPrefix} = Context) ->
+ Dir = filename:join([SysPrefix, "etc", "rabbitmq"]),
+ update_context(Context, config_base_dir, Dir);
+config_base_dir(#{os_type := {win32, _},
+ rabbitmq_base := Dir} = Context) ->
+ update_context(Context, config_base_dir, Dir).
+
+main_config_file(Context) ->
+ case get_prefixed_env_var("RABBITMQ_CONFIG_FILE") of
+ false ->
+ File = get_default_main_config_file(Context),
+ update_context(Context, main_config_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, main_config_file, File, environment)
+ end.
+
+get_default_main_config_file(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "rabbitmq").
+
+additional_config_files(Context) ->
+ case get_prefixed_env_var("RABBITMQ_CONFIG_FILES") of
+ false ->
+ Pattern = get_default_additional_config_files(Context),
+ update_context(
+ Context, additional_config_files, Pattern, default);
+ Value ->
+ Pattern = normalize_path(Value),
+ update_context(
+ Context, additional_config_files, Pattern, environment)
+ end.
+
+get_default_additional_config_files(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join([ConfigBaseDir, "conf.d", "*.conf"]).
+
+advanced_config_file(Context) ->
+ case get_prefixed_env_var("RABBITMQ_ADVANCED_CONFIG_FILE") of
+ false ->
+ File = get_default_advanced_config_file(Context),
+ update_context(Context, advanced_config_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, advanced_config_file, File, environment)
+ end.
+
+get_default_advanced_config_file(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "advanced.config").
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_LOG_BASE
+%% Directory to write log files
+%% Default: (Unix) ${SYS_PREFIX}/var/log/rabbitmq
+%% (Windows) ${RABBITMQ_BASE}\log
+%%
+%% RABBITMQ_LOGS
+%% Main log file
+%% Default: ${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log
+%%
+%% RABBITMQ_UPDATE_LOG
+%% Upgrade-procesure-specific log file
+%% Default: ${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}_upgrade.log
+%%
+%% RABBITMQ_LOG
+%% Log level; overrides the configuration file value
+%% Default: (undefined)
+%%
+%% RABBITMQ_DBG
+%% List of `module`, `module:function` or `module:function/arity`
+%% to watch with dbg.
+%% Default: (undefined)
+
+log_levels(Context) ->
+ case get_prefixed_env_var("RABBITMQ_LOG") of
+ false ->
+ update_context(Context, log_levels, undefined, default);
+ Value ->
+ LogLevels = parse_log_levels(string:lexemes(Value, ","), #{}),
+ update_context(Context, log_levels, LogLevels, environment)
+ end.
+
+parse_log_levels([CategoryValue | Rest], Result) ->
+ case string:lexemes(CategoryValue, "=") of
+ ["+color"] ->
+ Result1 = Result#{color => true},
+ parse_log_levels(Rest, Result1);
+ ["-color"] ->
+ Result1 = Result#{color => false},
+ parse_log_levels(Rest, Result1);
+ [CategoryOrLevel] ->
+ case parse_level(CategoryOrLevel) of
+ undefined ->
+ Result1 = Result#{CategoryOrLevel => info},
+ parse_log_levels(Rest, Result1);
+ Level ->
+ Result1 = Result#{global => Level},
+ parse_log_levels(Rest, Result1)
+ end;
+ [Category, Level0] ->
+ case parse_level(Level0) of
+ undefined ->
+ parse_log_levels(Rest, Result);
+ Level ->
+ Result1 = Result#{Category => Level},
+ parse_log_levels(Rest, Result1)
+ end
+ end;
+parse_log_levels([], Result) ->
+ Result.
+
+parse_level("debug") -> debug;
+parse_level("info") -> info;
+parse_level("notice") -> notice;
+parse_level("warning") -> warning;
+parse_level("error") -> error;
+parse_level("critical") -> critical;
+parse_level("alert") -> alert;
+parse_level("emergency") -> emergency;
+parse_level("none") -> none;
+parse_level(_) -> undefined.
+
+log_base_dir(#{os_type := OSType} = Context) ->
+ case {get_prefixed_env_var("RABBITMQ_LOG_BASE"), OSType} of
+ {false, {unix, _}} ->
+ #{sys_prefix := SysPrefix} = Context,
+ Dir = filename:join([SysPrefix, "var", "log", "rabbitmq"]),
+ update_context(Context, log_base_dir, Dir, default);
+ {false, {win32, _}} ->
+ #{rabbitmq_base := RabbitmqBase} = Context,
+ Dir = filename:join([RabbitmqBase, "log"]),
+ update_context(Context, log_base_dir, Dir, default);
+ {Value, _} ->
+ Dir = normalize_path(Value),
+ update_context(Context, log_base_dir, Dir, environment)
+ end.
+
+main_log_file(#{nodename := Nodename,
+ log_base_dir := LogBaseDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_LOGS") of
+ false ->
+ File= filename:join(LogBaseDir,
+ atom_to_list(Nodename) ++ ".log"),
+ update_context(Context, main_log_file, File, default);
+ "-" ->
+ update_context(Context, main_log_file, "-", environment);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, main_log_file, File, environment)
+ end.
+
+upgrade_log_file(#{nodename := Nodename,
+ log_base_dir := LogBaseDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_UPGRADE_LOG") of
+ false ->
+ File = filename:join(LogBaseDir,
+ atom_to_list(Nodename) ++ "_upgrade.log"),
+ update_context(Context, upgrade_log_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, upgrade_log_file, File, environment)
+ end.
+
+dbg_config() ->
+ {Mods, Output} = get_dbg_config(),
+ #{dbg_output => Output,
+ dbg_mods => Mods}.
+
+dbg_config(Context) ->
+ DbgContext = dbg_config(),
+ maps:merge(Context, DbgContext).
+
+get_dbg_config() ->
+ Output = stdout,
+ DbgValue = get_prefixed_env_var("RABBITMQ_DBG"),
+ case DbgValue of
+ false -> {[], Output};
+ _ -> get_dbg_config1(string:lexemes(DbgValue, ","), [], Output)
+ end.
+
+get_dbg_config1(["=" ++ Filename | Rest], Mods, _) ->
+ get_dbg_config1(Rest, Mods, Filename);
+get_dbg_config1([SpecValue | Rest], Mods, Output) ->
+ Pattern = "([^:]+)(?::([^/]+)(?:/([0-9]+))?)?",
+ Options = [{capture, all_but_first, list}],
+ Mods1 = case re:run(SpecValue, Pattern, Options) of
+ {match, [M, F, A]} ->
+ Entry = {list_to_atom(M),
+ list_to_atom(F),
+ list_to_integer(A)},
+ [Entry | Mods];
+ {match, [M, F]} ->
+ Entry = {list_to_atom(M),
+ list_to_atom(F),
+ '_'},
+ [Entry | Mods];
+ {match, [M]} ->
+ Entry = {list_to_atom(M),
+ '_',
+ '_'},
+ [Entry | Mods];
+ nomatch ->
+ Mods
+ end,
+ get_dbg_config1(Rest, Mods1, Output);
+get_dbg_config1([], Mods, Output) ->
+ {lists:reverse(Mods), Output}.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_MNESIA_BASE
+%% Directory where to create Mnesia directory.
+%% Default: (Unix) ${SYS_PREFIX}/var/lib/rabbitmq/mnesia
+%% (Windows) ${RABBITMQ_BASE}/db
+%%
+%% RABBITMQ_MNESIA_DIR
+%% Directory where to put Mnesia data.
+%% Default: (Unix) ${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+%% (Windows) ${RABBITMQ_MNESIA_BASE}\${RABBITMQ_NODENAME}-mnesia
+
+mnesia_base_dir(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_BASE") of
+ false when Remote =:= offline ->
+ update_context(Context, mnesia_base_dir, undefined, default);
+ false ->
+ mnesia_base_dir_from_node(Context);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_base_dir, Dir, environment)
+ end;
+mnesia_base_dir(Context) ->
+ mnesia_base_dir_from_env(Context).
+
+mnesia_base_dir_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_BASE") of
+ false ->
+ Dir = get_default_mnesia_base_dir(Context),
+ update_context(Context, mnesia_base_dir, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_base_dir, Dir, environment)
+ end.
+
+mnesia_base_dir_from_node(Context) ->
+ %% This variable is used to compute other variables only, we
+ %% don't need to know what a remote node used initially. Only the
+ %% variables based on it are relevant.
+ update_context(Context, mnesia_base_dir, undefined, default).
+
+get_default_mnesia_base_dir(#{data_dir := DataDir} = Context) ->
+ Basename = case Context of
+ #{os_type := {unix, _}} -> "mnesia";
+ #{os_type := {win32, _}} -> "db"
+ end,
+ filename:join(DataDir, Basename).
+
+mnesia_dir(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_DIR") of
+ false when Remote =:= offline ->
+ update_context(Context, mnesia_dir, undefined, default);
+ false ->
+ mnesia_dir_from_node(Context);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_dir, Dir, environment)
+ end;
+mnesia_dir(Context) ->
+ mnesia_dir_from_env(Context).
+
+mnesia_dir_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_DIR") of
+ false ->
+ Dir = get_default_mnesia_dir(Context),
+ update_context(Context, mnesia_dir, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_dir, Dir, environment)
+ end.
+
+mnesia_dir_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote, application, get_env, [mnesia, dir]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {mnesia, dir, undefined}});
+ {ok, {ok, Value}} ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_dir, Dir, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, mnesia_dir, undefined, default)
+ end.
+
+get_default_mnesia_dir(#{os_type := {unix, _},
+ nodename := Nodename,
+ mnesia_base_dir := MnesiaBaseDir})
+ when MnesiaBaseDir =/= undefined ->
+ filename:join(MnesiaBaseDir, atom_to_list(Nodename));
+get_default_mnesia_dir(#{os_type := {win32, _},
+ nodename := Nodename,
+ mnesia_base_dir := MnesiaBaseDir})
+ when MnesiaBaseDir =/= undefined ->
+ filename:join(MnesiaBaseDir, atom_to_list(Nodename) ++ "-mnesia").
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_QUORUM_DIR
+%% Directory where to store Ra state for quorum queues.
+%% Default: ${RABBITMQ_MNESIA_DIR}/quorum
+
+quorum_queue_dir(#{mnesia_dir := MnesiaDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_QUORUM_DIR") of
+ false when MnesiaDir =/= undefined ->
+ Dir = filename:join(MnesiaDir, "quorum"),
+ update_context(Context, quorum_queue_dir, Dir, default);
+ false when MnesiaDir =:= undefined ->
+ update_context(Context, quorum_queue_dir, undefined, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, quorum_queue_dir, Dir, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_STREAM_DIR
+%% Directory where to store Ra state for stream queues.
+%% Default: ${RABBITMQ_MNESIA_DIR}/stream
+
+stream_queue_dir(#{mnesia_dir := MnesiaDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_STREAM_DIR") of
+ false when MnesiaDir =/= undefined ->
+ Dir = filename:join(MnesiaDir, "stream"),
+ update_context(Context, stream_queue_dir, Dir, default);
+ false when MnesiaDir =:= undefined ->
+ update_context(Context, stream_queue_dir, undefined, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, stream_queue_dir, Dir, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_PID_FILE
+%% File used to write the Erlang VM OS PID.
+%% Default: ${RABBITMQ_MNESIA_DIR}.pid
+%%
+%% RABBITMQ_KEEP_PID_FILE_ON_EXIT
+%% Whether to keep or remove the PID file on Erlang VM exit.
+%% Default: true
+
+pid_file(#{mnesia_base_dir := MnesiaBaseDir,
+ nodename := Nodename} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PID_FILE") of
+ false when MnesiaBaseDir =/= undefined ->
+ File = filename:join(MnesiaBaseDir,
+ atom_to_list(Nodename) ++ ".pid"),
+ update_context(Context, pid_file, File, default);
+ false when MnesiaBaseDir =:= undefined ->
+ update_context(Context, pid_file, undefined, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, pid_file, File, environment)
+ end.
+
+keep_pid_file_on_exit(Context) ->
+ case get_prefixed_env_var("RABBITMQ_KEEP_PID_FILE_ON_EXIT") of
+ false ->
+ update_context(Context, keep_pid_file_on_exit, false, default);
+ Value ->
+ Keep = value_is_yes(Value),
+ update_context(Context, keep_pid_file_on_exit, Keep, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_FEATURE_FLAGS_FILE
+%% File used to store enabled feature flags.
+%% Default: ${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-feature_flags
+
+feature_flags_file(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_FEATURE_FLAGS_FILE") of
+ false when Remote =:= offline ->
+ update_context(Context, feature_flags_file, undefined, default);
+ false ->
+ feature_flags_file_from_node(Context);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, feature_flags_file, File, environment)
+ end;
+feature_flags_file(Context) ->
+ feature_flags_file_from_env(Context).
+
+feature_flags_file_from_env(#{mnesia_base_dir := MnesiaBaseDir,
+ nodename := Nodename} = Context) ->
+ case get_env_var("RABBITMQ_FEATURE_FLAGS_FILE") of
+ false ->
+ File = filename:join(MnesiaBaseDir,
+ atom_to_list(Nodename) ++ "-feature_flags"),
+ update_context(Context, feature_flags_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, feature_flags_file, File, environment)
+ end.
+
+feature_flags_file_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote,
+ application, get_env, [rabbit, feature_flags_file]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {rabbit, feature_flags_file, undefined}});
+ {ok, {ok, Value}} ->
+ File = normalize_path(Value),
+ update_context(Context, feature_flags_file, File, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, feature_flags_file, undefined, default)
+ end.
+
+forced_feature_flags_on_init(Context) ->
+ Value = get_prefixed_env_var("RABBITMQ_FEATURE_FLAGS",
+ [keep_empty_string_as_is]),
+ case Value of
+ false ->
+ %% get_prefixed_env_var() considers an empty string
+ %% is the same as an undefined environment variable.
+ update_context(Context,
+ forced_feature_flags_on_init, undefined, default);
+ _ ->
+ Flags = [list_to_atom(V) || V <- string:lexemes(Value, ",")],
+ update_context(Context,
+ forced_feature_flags_on_init, Flags, environment)
+ end.
+
+log_feature_flags_registry(Context) ->
+ case get_prefixed_env_var("RABBITMQ_LOG_FF_REGISTRY") of
+ false ->
+ update_context(Context,
+ log_feature_flags_registry, false, default);
+ Value ->
+ Log = value_is_yes(Value),
+ update_context(Context,
+ log_feature_flags_registry, Log, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_PLUGINS_DIR
+%% List of directories where to look for plugins.
+%% Directories are separated by:
+%% ':' on Unix
+%% ';' on Windows
+%% Default: ${RABBITMQ_HOME}/plugins
+%%
+%% RABBITMQ_PLUGINS_EXPAND_DIR
+%% Directory where to expand plugin archives.
+%% Default: ${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+%%
+%% RABBITMQ_ENABLED_PLUGINS_FILE
+%% File where the list of enabled plugins is stored.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
+%% (Windows) ${RABBITMQ_BASE}\enabled_plugins
+%%
+%% RABBITMQ_ENABLED_PLUGINS
+%% List of plugins to enable on startup.
+%% Values are:
+%% "ALL" to enable all plugins
+%% "" to enable no plugin
+%% a list of plugin names, separated by a coma (',')
+%% Default: Empty (i.e. use ${RABBITMQ_ENABLED_PLUGINS_FILE})
+
+plugins_path(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PLUGINS_DIR") of
+ false when Remote =:= offline ->
+ update_context(Context, plugins_path, undefined, default);
+ false ->
+ plugins_path_from_node(Context);
+ Path ->
+ update_context(Context, plugins_path, Path, environment)
+ end;
+plugins_path(Context) ->
+ plugins_path_from_env(Context).
+
+plugins_path_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_PLUGINS_DIR") of
+ false ->
+ Path = get_default_plugins_path_from_env(Context),
+ update_context(Context, plugins_path, Path, default);
+ Path ->
+ update_context(Context, plugins_path, Path, environment)
+ end.
+
+plugins_path_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote, application, get_env, [rabbit, plugins_dir]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {rabbit, plugins_dir, undefined}});
+ {ok, {ok, Path}} ->
+ update_context(Context, plugins_path, Path, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, plugins_path, undefined, default)
+ end.
+
+get_default_plugins_path(#{from_remote_node := offline}) ->
+ undefined;
+get_default_plugins_path(#{from_remote_node := Remote}) ->
+ get_default_plugins_path_from_node(Remote);
+get_default_plugins_path(Context) ->
+ get_default_plugins_path_from_env(Context).
+
+get_default_plugins_path_from_env(#{os_type := OSType}) ->
+ ThisModDir = this_module_dir(),
+ PluginsDir = rabbit_common_mod_location_to_plugins_dir(ThisModDir),
+ case {OSType, PluginsDir} of
+ {{unix, _}, "/usr/lib/rabbitmq/" ++ _} ->
+ UserPluginsDir = filename:join(
+ ["/", "usr", "lib", "rabbitmq", "plugins"]),
+ UserPluginsDir ++ ":" ++ PluginsDir;
+ _ ->
+ PluginsDir
+ end.
+
+get_default_plugins_path_from_node(Remote) ->
+ Ret = query_remote(Remote, code, where_is_file, ["rabbit_common.app"]),
+ case Ret of
+ {ok, non_existing = Error} ->
+ throw({query, Remote, {code, where_is_file, Error}});
+ {ok, Path} ->
+ rabbit_common_mod_location_to_plugins_dir(filename:dirname(Path));
+ {badrpc, nodedown} ->
+ undefined
+ end.
+
+rabbit_common_mod_location_to_plugins_dir(ModDir) ->
+ case filename:basename(ModDir) of
+ "ebin" ->
+ case filelib:is_dir(ModDir) of
+ false ->
+ %% rabbit_common in the plugin's .ez archive.
+ filename:dirname(
+ filename:dirname(
+ filename:dirname(ModDir)));
+ true ->
+ %% rabbit_common in the plugin's directory.
+ filename:dirname(
+ filename:dirname(ModDir))
+ end;
+ _ ->
+ %% rabbit_common in the CLI escript.
+ filename:join(
+ filename:dirname(
+ filename:dirname(ModDir)),
+ "plugins")
+ end.
+
+plugins_expand_dir(#{mnesia_base_dir := MnesiaBaseDir,
+ nodename := Nodename} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PLUGINS_EXPAND_DIR") of
+ false when MnesiaBaseDir =/= undefined ->
+ Dir = filename:join(
+ MnesiaBaseDir,
+ atom_to_list(Nodename) ++ "-plugins-expand"),
+ update_context(Context, plugins_expand_dir, Dir, default);
+ false when MnesiaBaseDir =:= undefined ->
+ update_context(Context, plugins_expand_dir, undefined, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, plugins_expand_dir, Dir, environment)
+ end.
+
+enabled_plugins_file(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_ENABLED_PLUGINS_FILE") of
+ false when Remote =:= offline ->
+ update_context(Context, enabled_plugins_file, undefined, default);
+ false ->
+ enabled_plugins_file_from_node(Context);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, enabled_plugins_file, File, environment)
+ end;
+enabled_plugins_file(Context) ->
+ enabled_plugins_file_from_env(Context).
+
+enabled_plugins_file_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_ENABLED_PLUGINS_FILE") of
+ false ->
+ File = get_default_enabled_plugins_file(Context),
+ update_context(Context, enabled_plugins_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, enabled_plugins_file, File, environment)
+ end.
+
+get_default_enabled_plugins_file(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "enabled_plugins").
+
+enabled_plugins_file_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote,
+ application, get_env, [rabbit, enabled_plugins_file]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {rabbit, enabled_plugins_file, undefined}});
+ {ok, {ok, Value}} ->
+ File = normalize_path(Value),
+ update_context(Context, enabled_plugins_file, File, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, enabled_plugins_file, undefined, default)
+ end.
+
+enabled_plugins(Context) ->
+ Value = get_prefixed_env_var(
+ "RABBITMQ_ENABLED_PLUGINS",
+ [keep_empty_string_as_is]),
+ case Value of
+ false ->
+ update_context(Context, enabled_plugins, undefined, default);
+ "ALL" ->
+ update_context(Context, enabled_plugins, all, environment);
+ "" ->
+ update_context(Context, enabled_plugins, [], environment);
+ _ ->
+ Plugins = [list_to_atom(P) || P <- string:lexemes(Value, ",")],
+ update_context(Context, enabled_plugins, Plugins, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_NODE_IP_ADDRESS
+%% AMQP TCP IP address to listen on
+%% Default: unset (i.e. listen on all interfaces)
+%%
+%% RABBITMQ_NODE_PORT
+%% AMQP TCP port.
+%% Default: 5672
+%%
+%% RABBITMQ_DIST_PORT
+%% Erlang distribution TCP port.
+%% Default: ${RABBITMQ_NODE_PORT} + 20000
+
+amqp_ipaddr(Context) ->
+ case get_prefixed_env_var("RABBITMQ_NODE_IP_ADDRESS") of
+ false ->
+ update_context(Context, amqp_ipaddr, "auto", default);
+ Value ->
+ update_context(Context, amqp_ipaddr, Value, environment)
+ end.
+
+amqp_tcp_port(Context) ->
+ case get_prefixed_env_var("RABBITMQ_NODE_PORT") of
+ false ->
+ update_context(Context, amqp_tcp_port, 5672, default);
+ TcpPortStr ->
+ try
+ TcpPort = erlang:list_to_integer(TcpPortStr),
+ update_context(Context, amqp_tcp_port, TcpPort, environment)
+ catch
+ _:badarg ->
+ rabbit_log_prelaunch:error(
+ "Invalid value for $RABBITMQ_NODE_PORT: ~p",
+ [TcpPortStr]),
+ throw({exit, ex_config})
+ end
+ end.
+
+erlang_dist_tcp_port(#{amqp_tcp_port := AmqpTcpPort} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_DIST_PORT") of
+ false ->
+ TcpPort = AmqpTcpPort + 20000,
+ update_context(Context, erlang_dist_tcp_port, TcpPort, default);
+ TcpPortStr ->
+ try
+ TcpPort = erlang:list_to_integer(TcpPortStr),
+ update_context(Context,
+ erlang_dist_tcp_port, TcpPort, environment)
+ catch
+ _:badarg ->
+ rabbit_log_prelaunch:error(
+ "Invalid value for $RABBITMQ_DIST_PORT: ~p",
+ [TcpPortStr]),
+ throw({exit, ex_config})
+ end
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% SYS_PREFIX [Unix only]
+%% Default: ""
+%%
+%% RABBITMQ_BASE [Windows only]
+%% Directory where to put RabbitMQ data.
+%% Default: !APPDATA!\RabbitMQ
+
+sys_prefix(#{os_type := {unix, _}} = Context) ->
+ case get_env_var("SYS_PREFIX") of
+ false ->
+ update_context(Context, sys_prefix, "", default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, sys_prefix, Dir, environment)
+ end;
+sys_prefix(Context) ->
+ Context.
+
+rabbitmq_base(#{os_type := {win32, _}} = Context) ->
+ case get_env_var("RABBITMQ_BASE") of
+ false ->
+ AppData = normalize_path(get_env_var("APPDATA")),
+ Dir = filename:join(AppData, "RabbitMQ"),
+ update_context(Context, rabbitmq_base, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, rabbitmq_base, Dir, environment)
+ end;
+rabbitmq_base(Context) ->
+ Context.
+
+data_dir(#{os_type := {unix, _},
+ sys_prefix := SysPrefix} = Context) ->
+ Dir = filename:join([SysPrefix, "var", "lib", "rabbitmq"]),
+ update_context(Context, data_dir, Dir);
+data_dir(#{os_type := {win32, _},
+ rabbitmq_base := RabbitmqBase} = Context) ->
+ update_context(Context, data_dir, RabbitmqBase).
+
+rabbitmq_home(Context) ->
+ case get_env_var("RABBITMQ_HOME") of
+ false ->
+ Dir = filename:dirname(get_default_plugins_path(Context)),
+ update_context(Context, rabbitmq_home, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, rabbitmq_home, Dir, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_ALLOW_INPUT
+%% Indicate if an Erlang shell is started or not.
+%% Default: false
+
+interactive_shell(Context) ->
+ case get_env_var("RABBITMQ_ALLOW_INPUT") of
+ false ->
+ update_context(Context,
+ interactive_shell, false, default);
+ Value ->
+ update_context(Context,
+ interactive_shell, value_is_yes(Value), environment)
+ end.
+
+%% FIXME: We would need a way to call isatty(3) to make sure the output
+%% is a terminal.
+output_supports_colors(#{os_type := {unix, _}} = Context) ->
+ update_context(Context, output_supports_colors, true, default);
+output_supports_colors(#{os_type := {win32, _}} = Context) ->
+ update_context(Context, output_supports_colors, false, default).
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_PRODUCT_NAME
+%% Override the product name
+%% Default: unset (i.e. "RabbitMQ")
+%%
+%% RABBITMQ_PRODUCT_VERSION
+%% Override the product version
+%% Default: unset (i.e. `rabbit` application version).
+%%
+%% RABBITMQ_MOTD_FILE
+%% Indicate a filename containing a "message of the day" to add to
+%% the banners, both the logged and the printed ones.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/motd
+%% (Windows) ${RABBITMQ_BASE}\motd.txt
+
+product_name(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_NAME") of
+ false when Remote =:= offline ->
+ update_context(Context, product_name, undefined, default);
+ false ->
+ product_name_from_node(Context);
+ Value ->
+ update_context(Context, product_name, Value, environment)
+ end;
+product_name(Context) ->
+ product_name_from_env(Context).
+
+product_name_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_NAME") of
+ false ->
+ update_context(Context, product_name, undefined, default);
+ Value ->
+ update_context(Context, product_name, Value, environment)
+ end.
+
+product_name_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = (catch query_remote(Remote, rabbit, product_name, [])),
+ case Ret of
+ {badrpc, nodedown} ->
+ update_context(Context, product_name, undefined, default);
+ {query, _, _} ->
+ update_context(Context, product_name, undefined, default);
+ Value ->
+ update_context(Context, product_name, Value, remote_node)
+ end.
+
+product_version(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_VERSION") of
+ false when Remote =:= offline ->
+ update_context(Context, product_version, undefined, default);
+ false ->
+ product_version_from_node(Context);
+ Value ->
+ update_context(Context, product_version, Value, environment)
+ end;
+product_version(Context) ->
+ product_version_from_env(Context).
+
+product_version_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_VERSION") of
+ false ->
+ update_context(Context, product_version, undefined, default);
+ Value ->
+ update_context(Context, product_version, Value, environment)
+ end.
+
+product_version_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = (catch query_remote(Remote, rabbit, product_version, [])),
+ case Ret of
+ {badrpc, _} ->
+ update_context(Context, product_version, undefined, default);
+ {query, _, _} ->
+ update_context(Context, product_version, undefined, default);
+ Value ->
+ update_context(Context, product_version, Value, remote_node)
+ end.
+
+motd_file(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_MOTD_FILE") of
+ false when Remote =:= offline ->
+ update_context(Context, motd_file, undefined, default);
+ false ->
+ motd_file_from_node(Context);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, motd_file, File, environment)
+ end;
+motd_file(Context) ->
+ motd_file_from_env(Context).
+
+motd_file_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_MOTD_FILE") of
+ false ->
+ File = get_default_motd_file(Context),
+ update_context(Context, motd_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, motd_file, File, environment)
+ end.
+
+get_default_motd_file(#{os_type := {unix, _},
+ config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "motd");
+get_default_motd_file(#{os_type := {win32, _},
+ config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "motd.txt").
+
+motd_file_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = (catch query_remote(Remote, rabbit, motd_file, [])),
+ case Ret of
+ {badrpc, _} ->
+ update_context(Context, motd_file, undefined, default);
+ {query, _, _} ->
+ update_context(Context, motd_file, undefined, default);
+ File ->
+ update_context(Context, motd_file, File, remote_node)
+ end.
+
+%% -------------------------------------------------------------------
+%% Loading of rabbitmq-env.conf.
+%% -------------------------------------------------------------------
+
+load_conf_env_file(#{os_type := {unix, _},
+ sys_prefix := SysPrefix} = Context) ->
+ {ConfEnvFile, Origin} =
+ case get_prefixed_env_var("RABBITMQ_CONF_ENV_FILE") of
+ false ->
+ File = filename:join(
+ [SysPrefix, "etc", "rabbitmq", "rabbitmq-env.conf"]),
+ {File, default};
+ Value ->
+ {normalize_path(Value), environment}
+ end,
+ Context1 = update_context(Context, conf_env_file, ConfEnvFile, Origin),
+ case loading_conf_env_file_enabled(Context1) of
+ true ->
+ case filelib:is_regular(ConfEnvFile) of
+ false ->
+ rabbit_log_prelaunch:debug(
+ "No $RABBITMQ_CONF_ENV_FILE (~ts)", [ConfEnvFile]),
+ Context1;
+ true ->
+ case os:find_executable("sh") of
+ false -> Context1;
+ Sh -> do_load_conf_env_file(Context1,
+ Sh,
+ ConfEnvFile)
+ end
+ end;
+ false ->
+ rabbit_log_prelaunch:debug(
+ "Loading of $RABBITMQ_CONF_ENV_FILE (~ts) is disabled",
+ [ConfEnvFile]),
+ Context1
+ end;
+load_conf_env_file(#{os_type := {win32, _},
+ rabbitmq_base := RabbitmqBase} = Context) ->
+ {ConfEnvFile, Origin} =
+ case get_prefixed_env_var("RABBITMQ_CONF_ENV_FILE") of
+ false ->
+ File = filename:join([RabbitmqBase, "rabbitmq-env-conf.bat"]),
+ {File, default};
+ Value ->
+ {normalize_path(Value), environment}
+ end,
+ Context1 = update_context(Context, conf_env_file, ConfEnvFile, Origin),
+ case loading_conf_env_file_enabled(Context1) of
+ true ->
+ case filelib:is_regular(ConfEnvFile) of
+ false ->
+ rabbit_log_prelaunch:debug(
+ "No $RABBITMQ_CONF_ENV_FILE (~ts)", [ConfEnvFile]),
+ Context1;
+ true ->
+ case os:find_executable("cmd.exe") of
+ false ->
+ Cmd = os:getenv("ComSpec"),
+ CmdExists =
+ Cmd =/= false andalso
+ filelib:is_regular(Cmd),
+ case CmdExists of
+ false -> Context1;
+ true -> do_load_conf_env_file(Context1,
+ Cmd,
+ ConfEnvFile)
+ end;
+ Cmd ->
+ do_load_conf_env_file(Context1, Cmd, ConfEnvFile)
+ end
+ end;
+ false ->
+ rabbit_log_prelaunch:debug(
+ "Loading of $RABBITMQ_CONF_ENV_FILE (~ts) is disabled",
+ [ConfEnvFile]),
+ Context1
+ end;
+load_conf_env_file(Context) ->
+ Context.
+
+-spec loading_conf_env_file_enabled(map()) -> boolean().
+
+-ifdef(TEST).
+loading_conf_env_file_enabled(_) ->
+ persistent_term:get({?MODULE, load_conf_env_file}, true).
+-else.
+loading_conf_env_file_enabled(_) ->
+ %% When this module is built without `TEST` defined, we want this
+ %% function to always return true. However, this makes Dialyzer
+ %% think it can only return true: this is not the case when the
+ %% module is compiled with `TEST` defined. The following line is
+ %% here to trick Dialyzer.
+ erlang:get({?MODULE, always_undefined}) =:= undefined.
+-endif.
+
+do_load_conf_env_file(#{os_type := {unix, _}} = Context, Sh, ConfEnvFile) ->
+ rabbit_log_prelaunch:debug(
+ "Sourcing $RABBITMQ_CONF_ENV_FILE: ~ts", [ConfEnvFile]),
+
+ %% The script below sources the `CONF_ENV_FILE` file, then it shows a
+ %% marker line and all environment variables.
+ %%
+ %% The marker line is useful to distinguish any output from the sourced
+ %% script from the variables we are interested in.
+ Marker = vars_list_marker(),
+ Script = rabbit_misc:format(
+ ". \"~ts\" && "
+ "echo \"~s\" && "
+ "set", [ConfEnvFile, Marker]),
+
+ #{sys_prefix := SysPrefix,
+ rabbitmq_home := RabbitmqHome} = Context,
+ MainConfigFile = re:replace(
+ get_default_main_config_file(Context),
+ "\\.(conf|config)$", "", [{return, list}]),
+
+ %% The variables below are those the `CONF_ENV_FILE` file can expect.
+ Env = [
+ {"SYS_PREFIX", SysPrefix},
+ {"RABBITMQ_HOME", RabbitmqHome},
+ {"CONFIG_FILE", MainConfigFile},
+ {"ADVANCED_CONFIG_FILE", get_default_advanced_config_file(Context)},
+ {"MNESIA_BASE", get_default_mnesia_base_dir(Context)},
+ {"ENABLED_PLUGINS_FILE", get_default_enabled_plugins_file(Context)},
+ {"PLUGINS_DIR", get_default_plugins_path_from_env(Context)},
+ {"CONF_ENV_FILE_PHASE", "rabbtimq-prelaunch"}
+ ],
+
+ Args = ["-ex", "-c", Script],
+ Opts = [{args, Args},
+ {env, Env},
+ binary,
+ use_stdio,
+ stderr_to_stdout,
+ exit_status],
+ Port = erlang:open_port({spawn_executable, Sh}, Opts),
+ collect_conf_env_file_output(Context, Port, Marker, <<>>);
+do_load_conf_env_file(#{os_type := {win32, _}} = Context, Cmd, ConfEnvFile) ->
+ %% rabbitmq/rabbitmq-common#392
+ rabbit_log_prelaunch:debug(
+ "Executing $RABBITMQ_CONF_ENV_FILE: ~ts", [ConfEnvFile]),
+
+ %% The script below executes the `CONF_ENV_FILE` file, then it shows a
+ %% marker line and all environment variables.
+ %%
+ %% The marker line is useful to distinguish any output from the sourced
+ %% script from the variables we are interested in.
+ %%
+ %% Arguments are split into a list of strings to support a filename with
+ %% whitespaces in the path.
+ Marker = vars_list_marker(),
+ Script = [ConfEnvFile, "&&",
+ "echo", Marker, "&&",
+ "set"],
+
+ #{rabbitmq_base := RabbitmqBase,
+ rabbitmq_home := RabbitmqHome} = Context,
+ MainConfigFile = re:replace(
+ get_default_main_config_file(Context),
+ "\\.(conf|config)$", "", [{return, list}]),
+
+ %% The variables below are those the `CONF_ENV_FILE` file can expect.
+ Env = [
+ {"RABBITMQ_BASE", RabbitmqBase},
+ {"RABBITMQ_HOME", RabbitmqHome},
+ {"CONFIG_FILE", MainConfigFile},
+ {"ADVANCED_CONFIG_FILE", get_default_advanced_config_file(Context)},
+ {"MNESIA_BASE", get_default_mnesia_base_dir(Context)},
+ {"ENABLED_PLUGINS_FILE", get_default_enabled_plugins_file(Context)},
+ {"PLUGINS_DIR", get_default_plugins_path_from_env(Context)},
+ {"CONF_ENV_FILE_PHASE", "rabbtimq-prelaunch"}
+ ],
+
+ Args = ["/Q", "/C" | Script],
+ Opts = [{args, Args},
+ {env, Env},
+ hide,
+ binary,
+ stderr_to_stdout,
+ exit_status],
+ Port = erlang:open_port({spawn_executable, Cmd}, Opts),
+ collect_conf_env_file_output(Context, Port, "\"" ++ Marker ++ "\" ", <<>>).
+
+vars_list_marker() ->
+ rabbit_misc:format(
+ "-----BEGIN VARS LIST FOR PID ~s-----", [os:getpid()]).
+
+collect_conf_env_file_output(Context, Port, Marker, Output) ->
+ receive
+ {Port, {exit_status, ExitStatus}} ->
+ Lines = post_port_cmd_output(Context, Output, ExitStatus),
+ case ExitStatus of
+ 0 -> parse_conf_env_file_output(Context, Marker, Lines);
+ _ -> Context
+ end;
+ {Port, {data, Chunk}} ->
+ collect_conf_env_file_output(
+ Context, Port, Marker, [Output, Chunk])
+ end.
+
+post_port_cmd_output(#{os_type := {OSType, _}}, Output, ExitStatus) ->
+ rabbit_log_prelaunch:debug(
+ "$RABBITMQ_CONF_ENV_FILE exit status: ~b",
+ [ExitStatus]),
+ DecodedOutput = unicode:characters_to_list(Output),
+ LineSep = case OSType of
+ win32 -> "\r\n";
+ _ -> "\n"
+ end,
+ Lines = string:split(string:trim(DecodedOutput), LineSep, all),
+ rabbit_log_prelaunch:debug("$RABBITMQ_CONF_ENV_FILE output:"),
+ [rabbit_log_prelaunch:debug(" ~ts", [Line]) || Line <- Lines],
+ Lines.
+
+parse_conf_env_file_output(Context, _, []) ->
+ Context;
+parse_conf_env_file_output(Context, Marker, [Marker | Lines]) ->
+ %% Found our marker, let's parse variables.
+ parse_conf_env_file_output1(Context, Lines);
+parse_conf_env_file_output(Context, Marker, [_ | Lines]) ->
+ parse_conf_env_file_output(Context, Marker, Lines).
+
+parse_conf_env_file_output1(Context, Lines) ->
+ Vars = parse_conf_env_file_output2(Lines, #{}),
+ %% Re-export variables.
+ lists:foreach(
+ fun(Var) ->
+ IsUsed = var_is_used(Var),
+ IsSet = var_is_set(Var),
+ case IsUsed andalso not IsSet of
+ true ->
+ rabbit_log_prelaunch:debug(
+ "$RABBITMQ_CONF_ENV_FILE: re-exporting variable $~s",
+ [Var]),
+ os:putenv(Var, maps:get(Var, Vars));
+ false ->
+ ok
+ end
+ end, lists:sort(maps:keys(Vars))),
+ Context.
+
+parse_conf_env_file_output2([], Vars) ->
+ Vars;
+parse_conf_env_file_output2([Line | Lines], Vars) ->
+ SetXOutput = is_sh_set_x_output(Line),
+ ShFunction = is_sh_function(Line, Lines),
+ if
+ SetXOutput ->
+ parse_conf_env_file_output2(Lines, Vars);
+ ShFunction ->
+ skip_sh_function(Lines, Vars);
+ true ->
+ case string:split(Line, "=") of
+ [Var, IncompleteValue] ->
+ {Value, Lines1} = parse_sh_literal(IncompleteValue, Lines, ""),
+ Vars1 = Vars#{Var => Value},
+ parse_conf_env_file_output2(Lines1, Vars1);
+ _ ->
+ %% Parsing failed somehow.
+ rabbit_log_prelaunch:warning(
+ "Failed to parse $RABBITMQ_CONF_ENV_FILE output: ~p",
+ [Line]),
+ #{}
+ end
+ end.
+
+is_sh_set_x_output(Line) ->
+ re:run(Line, "^\\++ ", [{capture, none}]) =:= match.
+
+is_sh_function(_, []) ->
+ false;
+is_sh_function(Line, Lines) ->
+ re:run(Line, "\\s\\(\\)\\s*$", [{capture, none}]) =:= match
+ andalso
+ re:run(hd(Lines), "^\\s*\\{\\s*$", [{capture, none}]) =:= match.
+
+parse_sh_literal("'" ++ SingleQuoted, Lines, Literal) ->
+ parse_single_quoted_literal(SingleQuoted, Lines, Literal);
+parse_sh_literal("\"" ++ DoubleQuoted, Lines, Literal) ->
+ parse_double_quoted_literal(DoubleQuoted, Lines, Literal);
+parse_sh_literal("$'" ++ DollarSingleQuoted, Lines, Literal) ->
+ parse_dollar_single_quoted_literal(DollarSingleQuoted, Lines, Literal);
+parse_sh_literal(Unquoted, Lines, Literal) ->
+ {lists:reverse(Literal) ++ Unquoted, Lines}.
+
+parse_single_quoted_literal([$' | Rest], Lines, Literal) ->
+ %% We reached the closing single quote.
+ parse_sh_literal(Rest, Lines, Literal);
+parse_single_quoted_literal([], [Line | Lines], Literal) ->
+ %% We reached the end of line before finding the closing single
+ %% quote. The literal continues on the next line and includes that
+ %% newline character.
+ parse_single_quoted_literal(Line, Lines, [$\n | Literal]);
+parse_single_quoted_literal([C | Rest], Lines, Literal) ->
+ parse_single_quoted_literal(Rest, Lines, [C | Literal]).
+
+parse_double_quoted_literal([$" | Rest], Lines, Literal) ->
+ %% We reached the closing double quote.
+ parse_sh_literal(Rest, Lines, Literal);
+parse_double_quoted_literal([], [Line | Lines], Literal) ->
+ %% We reached the end of line before finding the closing double
+ %% quote. The literal continues on the next line and includes that
+ %% newline character.
+ parse_double_quoted_literal(Line, Lines, [$\n | Literal]);
+parse_double_quoted_literal([C | Rest], Lines, Literal) ->
+ parse_double_quoted_literal(Rest, Lines, [C | Literal]).
+
+parse_dollar_single_quoted_literal([$'], Lines, Literal) ->
+ %% We reached the closing single quote.
+ {lists:reverse(Literal), Lines};
+parse_dollar_single_quoted_literal([], [Line | Lines], Literal) ->
+ %% We reached the end of line before finding the closing single
+ %% quote. The literal continues on the next line and includes that
+ %% newline character.
+ parse_dollar_single_quoted_literal(Line, Lines, [$\n | Literal]);
+parse_dollar_single_quoted_literal([C | Rest], Lines, Literal) ->
+ parse_dollar_single_quoted_literal(Rest, Lines, [C | Literal]).
+
+skip_sh_function(["}" | Lines], Vars) ->
+ parse_conf_env_file_output2(Lines, Vars);
+skip_sh_function([_ | Lines], Vars) ->
+ skip_sh_function(Lines, Vars).
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+get_env_var(VarName) ->
+ get_env_var(VarName, []).
+
+get_env_var(VarName, Options) ->
+ KeepEmptyString = lists:member(keep_empty_string_as_is, Options),
+ case os:getenv(VarName) of
+ false -> false;
+ "" when not KeepEmptyString -> false;
+ Value -> Value
+ end.
+
+get_prefixed_env_var(VarName) ->
+ get_prefixed_env_var(VarName, []).
+
+get_prefixed_env_var("RABBITMQ_" ++ Suffix = VarName,
+ Options) ->
+ case get_env_var(VarName, Options) of
+ false -> get_env_var(Suffix, Options);
+ Value -> Value
+ end.
+
+var_is_used("RABBITMQ_" ++ _ = PrefixedVar) ->
+ lists:member(PrefixedVar, ?USED_ENV_VARS);
+var_is_used("HOME") ->
+ false;
+var_is_used(Var) ->
+ lists:member("RABBITMQ_" ++ Var, ?USED_ENV_VARS).
+
+%% The $RABBITMQ_* variables have precedence over their un-prefixed equivalent.
+%% Therefore, when we check if $RABBITMQ_* is set, we only look at this
+%% variable. However, when we check if an un-prefixed variable is set, we first
+%% look at its $RABBITMQ_* variant.
+var_is_set("RABBITMQ_" ++ _ = PrefixedVar) ->
+ os:getenv(PrefixedVar) /= false;
+var_is_set(Var) ->
+ os:getenv("RABBITMQ_" ++ Var) /= false orelse
+ os:getenv(Var) /= false.
+
+value_is_yes(Value) when is_list(Value) orelse is_binary(Value) ->
+ Options = [{capture, none}, caseless],
+ re:run(string:trim(Value), "^(1|yes|true)$", Options) =:= match;
+value_is_yes(_) ->
+ false.
+
+normalize_path("" = Path) ->
+ Path;
+normalize_path(Path) ->
+ filename:join(filename:split(Path)).
+
+this_module_dir() ->
+ File = code:which(?MODULE),
+ %% Possible locations:
+ %% - the rabbit_common plugin (as an .ez archive):
+ %% .../plugins/rabbit_common-$version.ez/rabbit_common-$version/ebin
+ %% - the rabbit_common plugin (as a directory):
+ %% .../plugins/rabbit_common-$version/ebin
+ %% - the CLI:
+ %% .../escript/$cli
+ filename:dirname(File).
+
+maybe_setup_dist_for_remote_query(
+ #{from_remote_node := offline} = Context) ->
+ Context;
+maybe_setup_dist_for_remote_query(
+ #{from_remote_node := {RemoteNode, _}} = Context) ->
+ {NamePart, HostPart} = rabbit_nodes_common:parts(RemoteNode),
+ NameType = rabbit_nodes_common:name_type(RemoteNode),
+ ok = rabbit_nodes_common:ensure_epmd(),
+ Context1 = setup_dist_for_remote_query(
+ Context, NamePart, HostPart, NameType, 50),
+ case is_rabbitmq_loaded_on_remote_node(Context1) of
+ true -> Context1;
+ false -> maybe_stop_dist_for_remote_query(
+ update_context(Context, from_remote_node, offline))
+ end;
+maybe_setup_dist_for_remote_query(Context) ->
+ Context.
+
+setup_dist_for_remote_query(
+ #{dist_started_for_remote_query := true} = Context,
+ _, _, _, _) ->
+ Context;
+setup_dist_for_remote_query(Context, _, _, _, 0) ->
+ Context;
+setup_dist_for_remote_query(#{from_remote_node := {Remote, _}} = Context,
+ NamePart, HostPart, NameType,
+ Attempts) ->
+ RndNamePart = NamePart ++ "_ctl_" ++ integer_to_list(rand:uniform(100)),
+ Nodename = rabbit_nodes_common:make({RndNamePart, HostPart}),
+ case net_kernel:start([Nodename, NameType]) of
+ {ok, _} ->
+ update_context(Context, dist_started_for_remote_query, true);
+ {error, {already_started, _}} ->
+ Context;
+ {error, {{already_started, _}, _}} ->
+ Context;
+ Error ->
+ logger:error(
+ "rabbit_env: Failed to setup distribution (as ~s) to "
+ "query node ~s: ~p",
+ [Nodename, Remote, Error]),
+ setup_dist_for_remote_query(Context,
+ NamePart, HostPart, NameType,
+ Attempts - 1)
+ end.
+
+is_rabbitmq_loaded_on_remote_node(
+ #{from_remote_node := Remote}) ->
+ case query_remote(Remote, application, loaded_applications, []) of
+ {ok, Apps} ->
+ lists:keymember(mnesia, 1, Apps) andalso
+ lists:keymember(rabbit, 1, Apps);
+ _ ->
+ false
+ end.
+
+maybe_stop_dist_for_remote_query(
+ #{dist_started_for_remote_query := true} = Context) ->
+ net_kernel:stop(),
+ maps:remove(dist_started_for_remote_query, Context);
+maybe_stop_dist_for_remote_query(Context) ->
+ Context.
+
+query_remote({RemoteNode, Timeout}, Mod, Func, Args) ->
+ Ret = rpc:call(RemoteNode, Mod, Func, Args, Timeout),
+ case Ret of
+ {badrpc, nodedown} = Error -> Error;
+ {badrpc, _} = Error -> throw({query, RemoteNode, Error});
+ _ -> {ok, Ret}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_error_logger_handler.erl b/deps/rabbit_common/src/rabbit_error_logger_handler.erl
new file mode 100644
index 0000000000..714790a449
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_error_logger_handler.erl
@@ -0,0 +1,169 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_error_logger_handler).
+
+-behaviour(gen_event).
+
+%% API
+-export([start_link/0, add_handler/0]).
+
+%% gen_event callbacks
+-export([init/1, handle_event/2, handle_call/2,
+ handle_info/2, terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {report = []}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Creates an event manager
+%%
+%% @spec start_link() -> {ok, Pid} | {error, Error}
+%% @end
+%%--------------------------------------------------------------------
+start_link() ->
+ gen_event:start_link({local, ?SERVER}).
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Adds an event handler
+%%
+%% @spec add_handler() -> ok | {'EXIT', Reason} | term()
+%% @end
+%%--------------------------------------------------------------------
+add_handler() ->
+ gen_event:add_handler(?SERVER, ?MODULE, []).
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever a new event handler is added to an event manager,
+%% this function is called to initialize the event handler.
+%%
+%% @spec init(Args) -> {ok, State}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+ {ok, #state{}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives an event sent using
+%% gen_event:notify/2 or gen_event:sync_notify/2, this function is
+%% called for each installed event handler to handle the event.
+%%
+%% @spec handle_event(Event, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+
+handle_event({info_report, _Gleader, {_Pid, _Type,
+ {net_kernel, {'EXIT', _, Reason}}}},
+ #state{report = Report} = State) ->
+ NewReport = case format(Reason) of
+ [] -> Report;
+ Formatted -> [Formatted | Report]
+ end,
+ {ok, State#state{report = NewReport}};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives a request sent using
+%% gen_event:call/3,4, this function is called for the specified
+%% event handler to handle the request.
+%%
+%% @spec handle_call(Request, State) ->
+%% {ok, Reply, State} |
+%% {swap_handler, Reply, Args1, State1, Mod2, Args2} |
+%% {remove_handler, Reply}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(get_connection_report, State) ->
+ {ok, lists:reverse(State#state.report), State#state{report = []}};
+handle_call(_Request, State) ->
+ Reply = ok,
+ {ok, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called for each installed event handler when
+%% an event manager receives any other message than an event or a
+%% synchronous request (or a system message).
+%%
+%% @spec handle_info(Info, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event handler is deleted from an event manager, this
+%% function is called. It should be the opposite of Module:init/1 and
+%% do any necessary cleaning up.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+format({check_dflag_xnc_failed, _What}) ->
+ {" * Remote node uses an incompatible Erlang version ~n", []};
+format({recv_challenge_failed, no_node, Node}) ->
+ {" * Node name (or hostname) mismatch: node ~p believes its node name is not ~p but something else.~n"
+ " All nodes and CLI tools must refer to node ~p using the same name the node itself uses (see its logs to find out what it is)~n",
+ [Node, Node, Node]};
+format({recv_challenge_failed, Error}) ->
+ {" * Distribution failed unexpectedly while waiting for challenge: ~p~n", [Error]};
+format({recv_challenge_ack_failed, bad_cookie}) ->
+ {" * Authentication failed (rejected by the local node), please check the Erlang cookie~n", []};
+format({recv_challenge_ack_failed, {error, closed}}) ->
+ {" * Authentication failed (rejected by the remote node), please check the Erlang cookie~n", []};
+format({recv_status_failed, not_allowed}) ->
+ {" * This node is not on the list of nodes authorised by remote node (see net_kernel:allow/1)~n", []};
+format({recv_status_failed, {error, closed}}) ->
+ {" * Remote host closed TCP connection before completing authentication. Is the Erlang distribution using TLS?~n", []};
+format(setup_timer_timeout) ->
+ {" * TCP connection to remote host has timed out. Is the Erlang distribution using TLS?~n", []};
+format(_) ->
+ [].
diff --git a/deps/rabbit_common/src/rabbit_event.erl b/deps/rabbit_common/src/rabbit_event.erl
new file mode 100644
index 0000000000..152335958a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_event.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_event).
+
+-include("rabbit.hrl").
+
+-export([start_link/0]).
+-export([init_stats_timer/2, init_disabled_stats_timer/2,
+ ensure_stats_timer/3, stop_stats_timer/2, reset_stats_timer/2]).
+-export([stats_level/2, if_enabled/3]).
+-export([notify/2, notify/3, notify_if/3]).
+-export([sync_notify/2, sync_notify/3]).
+
+-ignore_xref([{gen_event, start_link, 2}]).
+-dialyzer([{no_missing_calls, start_link/0}]).
+
+%%----------------------------------------------------------------------------
+
+-record(state, {level, interval, timer}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([event_type/0, event_props/0, event_timestamp/0, event/0]).
+
+-type event_type() :: atom().
+-type event_props() :: term().
+-type event_timestamp() :: non_neg_integer().
+
+-type event() :: #event { type :: event_type(),
+ props :: event_props(),
+ reference :: 'none' | reference(),
+ timestamp :: event_timestamp() }.
+
+-type level() :: 'none' | 'coarse' | 'fine'.
+
+-type timer_fun() :: fun (() -> 'ok').
+-type container() :: tuple().
+-type pos() :: non_neg_integer().
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec init_stats_timer(container(), pos()) -> container().
+-spec init_disabled_stats_timer(container(), pos()) -> container().
+-spec ensure_stats_timer(container(), pos(), term()) -> container().
+-spec stop_stats_timer(container(), pos()) -> container().
+-spec reset_stats_timer(container(), pos()) -> container().
+-spec stats_level(container(), pos()) -> level().
+-spec if_enabled(container(), pos(), timer_fun()) -> 'ok'.
+-spec notify(event_type(), event_props()) -> 'ok'.
+-spec notify(event_type(), event_props(), reference() | 'none') -> 'ok'.
+-spec notify_if(boolean(), event_type(), event_props()) -> 'ok'.
+-spec sync_notify(event_type(), event_props()) -> 'ok'.
+-spec sync_notify(event_type(), event_props(), reference() | 'none') -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ %% gen_event:start_link/2 is not available before OTP 20
+ %% RabbitMQ 3.7 supports OTP >= 19.3
+ case erlang:function_exported(gen_event, start_link, 2) of
+ true ->
+ gen_event:start_link(
+ {local, ?MODULE},
+ [{spawn_opt, [{fullsweep_after, 0}]}]
+ );
+ false ->
+ gen_event:start_link({local, ?MODULE})
+ end.
+
+%% The idea is, for each stat-emitting object:
+%%
+%% On startup:
+%% init_stats_timer(State)
+%% notify(created event)
+%% if_enabled(internal_emit_stats) - so we immediately send something
+%%
+%% On wakeup:
+%% ensure_stats_timer(State, emit_stats)
+%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.)
+%%
+%% emit_stats:
+%% if_enabled(internal_emit_stats)
+%% reset_stats_timer(State) - just bookkeeping
+%%
+%% Pre-hibernation:
+%% if_enabled(internal_emit_stats)
+%% stop_stats_timer(State)
+%%
+%% internal_emit_stats:
+%% notify(stats)
+
+init_stats_timer(C, P) ->
+ %% If the rabbit app is not loaded - use default none:5000
+ StatsLevel = application:get_env(rabbit, collect_statistics, none),
+ Interval = application:get_env(rabbit, collect_statistics_interval, 5000),
+ setelement(P, C, #state{level = StatsLevel, interval = Interval,
+ timer = undefined}).
+
+init_disabled_stats_timer(C, P) ->
+ setelement(P, C, #state{level = none, interval = 0, timer = undefined}).
+
+ensure_stats_timer(C, P, Msg) ->
+ case element(P, C) of
+ #state{level = Level, interval = Interval, timer = undefined} = State
+ when Level =/= none ->
+ TRef = erlang:send_after(Interval, self(), Msg),
+ setelement(P, C, State#state{timer = TRef});
+ #state{} ->
+ C
+ end.
+
+stop_stats_timer(C, P) ->
+ case element(P, C) of
+ #state{timer = TRef} = State when TRef =/= undefined ->
+ case erlang:cancel_timer(TRef) of
+ false -> C;
+ _ -> setelement(P, C, State#state{timer = undefined})
+ end;
+ #state{} ->
+ C
+ end.
+
+reset_stats_timer(C, P) ->
+ case element(P, C) of
+ #state{timer = TRef} = State when TRef =/= undefined ->
+ setelement(P, C, State#state{timer = undefined});
+ #state{} ->
+ C
+ end.
+
+stats_level(C, P) ->
+ #state{level = Level} = element(P, C),
+ Level.
+
+if_enabled(C, P, Fun) ->
+ case element(P, C) of
+ #state{level = none} -> ok;
+ #state{} -> Fun(), ok
+ end.
+
+notify_if(true, Type, Props) -> notify(Type, Props);
+notify_if(false, _Type, _Props) -> ok.
+
+notify(Type, Props) -> notify(Type, rabbit_data_coercion:to_proplist(Props), none).
+
+notify(Type, Props, Ref) ->
+ %% Using {Name, node()} here to not fail if the event handler is not started
+ gen_event:notify({?MODULE, node()}, event_cons(Type, rabbit_data_coercion:to_proplist(Props), Ref)).
+
+sync_notify(Type, Props) -> sync_notify(Type, Props, none).
+
+sync_notify(Type, Props, Ref) ->
+ gen_event:sync_notify(?MODULE, event_cons(Type, Props, Ref)).
+
+event_cons(Type, Props, Ref) ->
+ #event{type = Type,
+ props = Props,
+ reference = Ref,
+ timestamp = os:system_time(milli_seconds)}.
+
diff --git a/deps/rabbit_common/src/rabbit_exchange_type.erl b/deps/rabbit_common/src/rabbit_exchange_type.erl
new file mode 100644
index 0000000000..ebd5cadbdb
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_exchange_type.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type(tx() :: 'transaction' | 'none').
+-type(serial() :: pos_integer() | tx()).
+
+-callback description() -> [proplists:property()].
+
+%% Should Rabbit ensure that all binding events that are
+%% delivered to an individual exchange can be serialised? (they
+%% might still be delivered out of order, but there'll be a
+%% serial number).
+-callback serialise_events() -> boolean().
+
+%% The no_return is there so that we can have an "invalid" exchange
+%% type (see rabbit_exchange_type_invalid).
+-callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
+ rabbit_router:match_result().
+
+%% called BEFORE declaration, to check args etc; may exit with #amqp_error{}
+-callback validate(rabbit_types:exchange()) -> 'ok'.
+
+%% called BEFORE declaration, to check args etc
+-callback validate_binding(rabbit_types:exchange(), rabbit_types:binding()) ->
+ rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
+
+%% called after declaration and recovery
+-callback create(tx(), rabbit_types:exchange()) -> 'ok'.
+
+%% called after exchange (auto)deletion.
+-callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
+ 'ok'.
+
+%% called when the policy attached to this exchange changes.
+-callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
+ 'ok'.
+
+%% called after a binding has been added or recovered
+-callback add_binding(serial(), rabbit_types:exchange(),
+ rabbit_types:binding()) -> 'ok'.
+
+%% called after bindings have been deleted.
+-callback remove_bindings(serial(), rabbit_types:exchange(),
+ [rabbit_types:binding()]) -> 'ok'.
+
+%% called when comparing exchanges for equivalence - should return ok or
+%% exit with #amqp_error{}
+-callback assert_args_equivalence(rabbit_types:exchange(),
+ rabbit_framing:amqp_table()) ->
+ 'ok' | rabbit_types:connection_exit().
+
+%% Exchange type specific info keys
+-callback info(rabbit_types:exchange()) -> [{atom(), term()}].
+
+-callback info(rabbit_types:exchange(), [atom()]) -> [{atom(), term()}].
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_heartbeat.erl b/deps/rabbit_common/src/rabbit_heartbeat.erl
new file mode 100644
index 0000000000..8dbc7f3887
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_heartbeat.erl
@@ -0,0 +1,184 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_heartbeat).
+
+-export([start/6, start/7]).
+-export([start_heartbeat_sender/4, start_heartbeat_receiver/4,
+ pause_monitor/1, resume_monitor/1]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([heartbeaters/0]).
+
+-type heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}.
+
+-type heartbeat_callback() :: fun (() -> any()).
+
+-export_type([heartbeat_timeout/0]).
+-type heartbeat_timeout() :: non_neg_integer().
+
+-spec start
+ (pid(), rabbit_net:socket(), heartbeat_timeout(), heartbeat_callback(),
+ heartbeat_timeout(), heartbeat_callback()) ->
+ heartbeaters().
+
+-spec start
+ (pid(), rabbit_net:socket(), rabbit_types:proc_name(),
+ heartbeat_timeout(), heartbeat_callback(), heartbeat_timeout(),
+ heartbeat_callback()) ->
+ heartbeaters().
+
+-spec start_heartbeat_sender
+ (rabbit_net:socket(), heartbeat_timeout(), heartbeat_callback(),
+ rabbit_types:proc_type_and_name()) ->
+ rabbit_types:ok(pid()).
+-spec start_heartbeat_receiver
+ (rabbit_net:socket(), heartbeat_timeout(), heartbeat_callback(),
+ rabbit_types:proc_type_and_name()) ->
+ rabbit_types:ok(pid()).
+
+-spec pause_monitor(heartbeaters()) -> 'ok'.
+-spec resume_monitor(heartbeaters()) -> 'ok'.
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,{_, _}) -> any().
+-spec system_terminate(_,_,_,_) -> no_return().
+
+%%----------------------------------------------------------------------------
+start(SupPid, Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
+ start(SupPid, Sock, unknown,
+ SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun).
+
+start(SupPid, Sock, Identity,
+ SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
+ {ok, Sender} =
+ start_heartbeater(SendTimeoutSec, SupPid, Sock,
+ SendFun, heartbeat_sender,
+ start_heartbeat_sender, Identity),
+ {ok, Receiver} =
+ start_heartbeater(ReceiveTimeoutSec, SupPid, Sock,
+ ReceiveFun, heartbeat_receiver,
+ start_heartbeat_receiver, Identity),
+ {Sender, Receiver}.
+
+start_heartbeat_sender(Sock, TimeoutSec, SendFun, Identity) ->
+ %% the 'div 2' is there so that we don't end up waiting for nearly
+ %% 2 * TimeoutSec before sending a heartbeat in the boundary case
+ %% where the last message was sent just after a heartbeat.
+ heartbeater({Sock, TimeoutSec * 1000 div 2, send_oct, 0,
+ fun () -> SendFun(), continue end}, Identity).
+
+start_heartbeat_receiver(Sock, TimeoutSec, ReceiveFun, Identity) ->
+ %% we check for incoming data every interval, and time out after
+ %% two checks with no change. As a result we will time out between
+ %% 2 and 3 intervals after the last data has been received.
+ heartbeater({Sock, TimeoutSec * 1000, recv_oct, 1,
+ fun () -> ReceiveFun(), stop end}, Identity).
+
+pause_monitor({_Sender, none}) -> ok;
+pause_monitor({_Sender, Receiver}) -> Receiver ! pause, ok.
+
+resume_monitor({_Sender, none}) -> ok;
+resume_monitor({_Sender, Receiver}) -> Receiver ! resume, ok.
+
+system_continue(_Parent, Deb, {Params, State}) ->
+ heartbeater(Params, Deb, State).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+%%----------------------------------------------------------------------------
+start_heartbeater(0, _SupPid, _Sock, _TimeoutFun, _Name, _Callback,
+ _Identity) ->
+ {ok, none};
+start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback,
+ Identity) ->
+ supervisor2:start_child(
+ SupPid, {Name,
+ {rabbit_heartbeat, Callback,
+ [Sock, TimeoutSec, TimeoutFun, {Name, Identity}]},
+ transient, ?WORKER_WAIT, worker, [rabbit_heartbeat]}).
+
+heartbeater(Params, Identity) ->
+ Deb = sys:debug_options([]),
+ {ok, proc_lib:spawn_link(fun () ->
+ rabbit_misc:store_proc_name(Identity),
+ heartbeater(Params, Deb, {0, 0})
+ end)}.
+
+heartbeater({Sock, TimeoutMillisec, StatName, Threshold, Handler} = Params,
+ Deb, {StatVal0, SameCount} = State) ->
+ Recurse = fun (State1) -> heartbeater(Params, Deb, State1) end,
+ System = fun (From, Req) ->
+ sys:handle_system_msg(
+ Req, From, self(), ?MODULE, Deb, {Params, State})
+ end,
+ receive
+ pause ->
+ receive
+ resume -> Recurse({0, 0});
+ {system, From, Req} -> System(From, Req);
+ Other -> exit({unexpected_message, Other})
+ end;
+ {system, From, Req} ->
+ System(From, Req);
+ Other ->
+ exit({unexpected_message, Other})
+ after TimeoutMillisec ->
+ OkFun = fun(StatVal1) ->
+ if StatVal0 =:= 0 andalso StatName =:= send_oct ->
+ % Note: this clause is necessary to ensure the
+ % first RMQ -> client heartbeat is sent at the
+ % first interval, instead of waiting the first
+ % two intervals
+ {run_handler, {StatVal1, 0}};
+ StatVal1 =/= StatVal0 ->
+ {recurse, {StatVal1, 0}};
+ SameCount < Threshold ->
+ {recurse, {StatVal1, SameCount +1}};
+ true ->
+ {run_handler, {StatVal1, 0}}
+ end
+ end,
+ SSResult = get_sock_stats(Sock, StatName, OkFun),
+ handle_get_sock_stats(SSResult, Sock, StatName, Recurse, Handler)
+ end.
+
+handle_get_sock_stats(stop, _Sock, _StatName, _Recurse, _Handler) ->
+ ok;
+handle_get_sock_stats({recurse, RecurseArg}, _Sock, _StatName, Recurse, _Handler) ->
+ Recurse(RecurseArg);
+handle_get_sock_stats({run_handler, {_, SameCount}}, Sock, StatName, Recurse, Handler) ->
+ case Handler() of
+ stop -> ok;
+ continue ->
+ OkFun = fun(StatVal) ->
+ {recurse, {StatVal, SameCount}}
+ end,
+ SSResult = get_sock_stats(Sock, StatName, OkFun),
+ handle_get_sock_stats(SSResult, Sock, StatName, Recurse, Handler)
+ end.
+
+get_sock_stats(Sock, StatName, OkFun) ->
+ case rabbit_net:getstat(Sock, [StatName]) of
+ {ok, [{StatName, StatVal}]} ->
+ OkFun(StatVal);
+ {error, einval} ->
+ %% the socket is dead, most likely because the
+ %% connection is being shut down -> terminate
+ stop;
+ {error, Reason} ->
+ exit({cannot_get_socket_stats, Reason})
+ end.
diff --git a/deps/rabbit_common/src/rabbit_http_util.erl b/deps/rabbit_common/src/rabbit_http_util.erl
new file mode 100644
index 0000000000..d0ff498110
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_http_util.erl
@@ -0,0 +1,967 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing and quoting.
+
+-module(rabbit_http_util).
+-author('bob@mochimedia.com').
+-export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
+-export([path_split/1]).
+-export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
+-export([parse_header/1]).
+-export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1, cmd_status/2]).
+-export([record_to_proplist/2, record_to_proplist/3]).
+-export([safe_relative_path/1, partition/2]).
+-export([parse_qvalues/1, pick_accepted_encodings/3]).
+-export([make_io/1]).
+
+-define(PERCENT, 37). % $\%
+-define(FULLSTOP, 46). % $\.
+-define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
+ (C >= $a andalso C =< $f) orelse
+ (C >= $A andalso C =< $F))).
+-define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ (C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
+ C =:= $_))).
+
+hexdigit(C) when C < 10 -> $0 + C;
+hexdigit(C) when C < 16 -> $A + (C - 10).
+
+unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
+unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
+unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
+
+%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
+%% @doc Inspired by Python 2.5's str.partition:
+%% partition("foo/bar", "/") = {"foo", "/", "bar"},
+%% partition("foo", "/") = {"foo", "", ""}.
+partition(String, Sep) ->
+ case partition(String, Sep, []) of
+ undefined ->
+ {String, "", ""};
+ Result ->
+ Result
+ end.
+
+partition("", _Sep, _Acc) ->
+ undefined;
+partition(S, Sep, Acc) ->
+ case partition2(S, Sep) of
+ undefined ->
+ [C | Rest] = S,
+ partition(Rest, Sep, [C | Acc]);
+ Rest ->
+ {lists:reverse(Acc), Sep, Rest}
+ end.
+
+partition2(Rest, "") ->
+ Rest;
+partition2([C | R1], [C | R2]) ->
+ partition2(R1, R2);
+partition2(_S, _Sep) ->
+ undefined.
+
+
+
+%% @spec safe_relative_path(string()) -> string() | undefined
+%% @doc Return the reduced version of a relative path or undefined if it
+%% is not safe. safe relative paths can be joined with an absolute path
+%% and will result in a subdirectory of the absolute path. Safe paths
+%% never contain a backslash character.
+safe_relative_path("/" ++ _) ->
+ undefined;
+safe_relative_path(P) ->
+ case string:chr(P, $\\) of
+ 0 ->
+ safe_relative_path(P, []);
+ _ ->
+ undefined
+ end.
+
+safe_relative_path("", Acc) ->
+ case Acc of
+ [] ->
+ "";
+ _ ->
+ string:join(lists:reverse(Acc), "/")
+ end;
+safe_relative_path(P, Acc) ->
+ case partition(P, "/") of
+ {"", "/", _} ->
+ %% /foo or foo//bar
+ undefined;
+ {"..", _, _} when Acc =:= [] ->
+ undefined;
+ {"..", _, Rest} ->
+ safe_relative_path(Rest, tl(Acc));
+ {Part, "/", ""} ->
+ safe_relative_path("", ["", Part | Acc]);
+ {Part, _, Rest} ->
+ safe_relative_path(Rest, [Part | Acc])
+ end.
+
+%% @spec shell_quote(string()) -> string()
+%% @doc Quote a string according to UNIX shell quoting rules, returns a string
+%% surrounded by double quotes.
+shell_quote(L) ->
+ shell_quote(L, [$\"]).
+
+%% @spec cmd_port([string()], Options) -> port()
+%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
+cmd_port(Argv, Options) ->
+ open_port({spawn, cmd_string(Argv)}, Options).
+
+%% @spec cmd([string()]) -> string()
+%% @doc os:cmd(cmd_string(Argv)).
+cmd(Argv) ->
+ os:cmd(cmd_string(Argv)).
+
+%% @spec cmd_string([string()]) -> string()
+%% @doc Create a shell quoted command string from a list of arguments.
+cmd_string(Argv) ->
+ string:join([shell_quote(X) || X <- Argv], " ").
+
+%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application,
+%% will be spawned with cmd_port/2.
+cmd_status(Argv) ->
+ cmd_status(Argv, []).
+
+%% @spec cmd_status([string()], [atom()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application,
+%% will be spawned with cmd_port/2.
+cmd_status(Argv, Options) ->
+ Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
+ use_stdio, binary | Options]),
+ try cmd_loop(Port, [])
+ after catch port_close(Port)
+ end.
+
+%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from a port.
+cmd_loop(Port, Acc) ->
+ receive
+ {Port, {exit_status, Status}} ->
+ {Status, iolist_to_binary(lists:reverse(Acc))};
+ {Port, {data, Data}} ->
+ cmd_loop(Port, [Data | Acc])
+ end.
+
+%% @spec join([iolist()], iolist()) -> iolist()
+%% @doc Join a list of strings or binaries together with the given separator
+%% string or char or binary. The output is flattened, but may be an
+%% iolist() instead of a string() if any of the inputs are binary().
+join([], _Separator) ->
+ [];
+join([S], _Separator) ->
+ lists:flatten(S);
+join(Strings, Separator) ->
+ lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
+
+revjoin([], _Separator, Acc) ->
+ Acc;
+revjoin([S | Rest], Separator, []) ->
+ revjoin(Rest, Separator, [S]);
+revjoin([S | Rest], Separator, Acc) ->
+ revjoin(Rest, Separator, [S, Separator | Acc]).
+
+%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
+%% @doc URL safe encoding of the given term.
+quote_plus(Atom) when is_atom(Atom) ->
+ quote_plus(atom_to_list(Atom));
+quote_plus(Int) when is_integer(Int) ->
+ quote_plus(integer_to_list(Int));
+quote_plus(Binary) when is_binary(Binary) ->
+ quote_plus(binary_to_list(Binary));
+quote_plus(Float) when is_float(Float) ->
+ quote_plus(rabbit_numerical:digits(Float));
+quote_plus(String) ->
+ quote_plus(String, []).
+
+quote_plus([], Acc) ->
+ lists:reverse(Acc);
+quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
+ quote_plus(Rest, [C | Acc]);
+quote_plus([$\s | Rest], Acc) ->
+ quote_plus(Rest, [$+ | Acc]);
+quote_plus([C | Rest], Acc) ->
+ <<Hi:4, Lo:4>> = <<C>>,
+ quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
+
+%% @spec urlencode([{Key, Value}]) -> string()
+%% @doc URL encode the property list.
+urlencode(Props) ->
+ Pairs = lists:foldr(
+ fun ({K, V}, Acc) ->
+ [quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
+ end, [], Props),
+ string:join(Pairs, "&").
+
+%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
+%% @doc Parse a query string or application/x-www-form-urlencoded.
+parse_qs(Binary) when is_binary(Binary) ->
+ parse_qs(binary_to_list(Binary));
+parse_qs(String) ->
+ parse_qs(String, []).
+
+parse_qs([], Acc) ->
+ lists:reverse(Acc);
+parse_qs(String, Acc) ->
+ {Key, Rest} = parse_qs_key(String),
+ {Value, Rest1} = parse_qs_value(Rest),
+ parse_qs(Rest1, [{Key, Value} | Acc]).
+
+parse_qs_key(String) ->
+ parse_qs_key(String, []).
+
+parse_qs_key([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_key([$= | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$; | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$& | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key([C | Rest], Acc) ->
+ parse_qs_key(Rest, [C | Acc]).
+
+parse_qs_value(String) ->
+ parse_qs_value(String, []).
+
+parse_qs_value([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_value([$; | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([$& | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([C | Rest], Acc) ->
+ parse_qs_value(Rest, [C | Acc]).
+
+%% @spec unquote(string() | binary()) -> string()
+%% @doc Unquote a URL encoded string.
+unquote(Binary) when is_binary(Binary) ->
+ unquote(binary_to_list(Binary));
+unquote(String) ->
+ qs_revdecode(lists:reverse(String)).
+
+qs_revdecode(S) ->
+ qs_revdecode(S, []).
+
+qs_revdecode([], Acc) ->
+ Acc;
+qs_revdecode([$+ | Rest], Acc) ->
+ qs_revdecode(Rest, [$\s | Acc]);
+qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
+ qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
+qs_revdecode([C | Rest], Acc) ->
+ qs_revdecode(Rest, [C | Acc]).
+
+%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
+%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
+%% URLs.
+urlsplit(Url) ->
+ {Scheme, Url1} = urlsplit_scheme(Url),
+ {Netloc, Url2} = urlsplit_netloc(Url1),
+ {Path, Query, Fragment} = urlsplit_path(Url2),
+ {Scheme, Netloc, Path, Query, Fragment}.
+
+urlsplit_scheme(Url) ->
+ case urlsplit_scheme(Url, []) of
+ no_scheme ->
+ {"", Url};
+ Res ->
+ Res
+ end.
+
+urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ C =:= $+ orelse C =:= $- orelse
+ C =:= $.) ->
+ urlsplit_scheme(Rest, [C | Acc]);
+urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
+ {string:to_lower(lists:reverse(Acc)), Rest};
+urlsplit_scheme(_Rest, _Acc) ->
+ no_scheme.
+
+urlsplit_netloc("//" ++ Rest) ->
+ urlsplit_netloc(Rest, []);
+urlsplit_netloc(Path) ->
+ {"", Path}.
+
+urlsplit_netloc("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+ {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+ urlsplit_netloc(Rest, [C | Acc]).
+
+
+%% @spec path_split(string()) -> {Part, Rest}
+%% @doc Split a path starting from the left, as in URL traversal.
+%% path_split("foo/bar") = {"foo", "bar"},
+%% path_split("/foo/bar") = {"", "foo/bar"}.
+path_split(S) ->
+ path_split(S, []).
+
+path_split("", Acc) ->
+ {lists:reverse(Acc), ""};
+path_split("/" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+path_split([C | Rest], Acc) ->
+ path_split(Rest, [C | Acc]).
+
+
+%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
+urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
+ lists:flatten([case Scheme of "" -> ""; _ -> [Scheme, "://"] end,
+ Netloc,
+ urlunsplit_path({Path, Query, Fragment})]).
+
+%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL path from the 3-tuple.
+urlunsplit_path({Path, Query, Fragment}) ->
+ lists:flatten([Path,
+ case Query of "" -> ""; _ -> [$? | Query] end,
+ case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
+
+%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
+%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
+%% paths.
+urlsplit_path(Path) ->
+ urlsplit_path(Path, []).
+
+urlsplit_path("", Acc) ->
+ {lists:reverse(Acc), "", ""};
+urlsplit_path("?" ++ Rest, Acc) ->
+ {Query, Fragment} = urlsplit_query(Rest),
+ {lists:reverse(Acc), Query, Fragment};
+urlsplit_path("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), "", Rest};
+urlsplit_path([C | Rest], Acc) ->
+ urlsplit_path(Rest, [C | Acc]).
+
+urlsplit_query(Query) ->
+ urlsplit_query(Query, []).
+
+urlsplit_query("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_query("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+urlsplit_query([C | Rest], Acc) ->
+ urlsplit_query(Rest, [C | Acc]).
+
+%% @spec parse_header(string()) -> {Type, [{K, V}]}
+%% @doc Parse a Content-Type like header, return the main Content-Type
+%% and a property list of options.
+parse_header(String) ->
+ %% TODO: This is exactly as broken as Python's cgi module.
+ %% Should parse properly like mochiweb_cookies.
+ [Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
+ F = fun (S, Acc) ->
+ case lists:splitwith(fun (C) -> C =/= $= end, S) of
+ {"", _} ->
+ %% Skip anything with no name
+ Acc;
+ {_, ""} ->
+ %% Skip anything with no value
+ Acc;
+ {Name, [$\= | Value]} ->
+ [{string:to_lower(string:strip(Name)),
+ unquote_header(string:strip(Value))} | Acc]
+ end
+ end,
+ {string:to_lower(Type),
+ lists:foldr(F, [], Parts)}.
+
+unquote_header("\"" ++ Rest) ->
+ unquote_header(Rest, []);
+unquote_header(S) ->
+ S.
+
+unquote_header("", Acc) ->
+ lists:reverse(Acc);
+unquote_header("\"", Acc) ->
+ lists:reverse(Acc);
+unquote_header([$\\, C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]);
+unquote_header([C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]).
+
+%% @spec record_to_proplist(Record, Fields) -> proplist()
+%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
+record_to_proplist(Record, Fields) ->
+ record_to_proplist(Record, Fields, '__record').
+
+%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
+%% @doc Return a proplist of the given Record with each field in the
+%% Fields list set as a key with the corresponding value in the Record.
+%% TypeKey is the key that is used to store the record type
+%% Fields should be obtained by calling record_info(fields, record_type)
+%% where record_type is the record type of Record
+record_to_proplist(Record, Fields, TypeKey)
+ when tuple_size(Record) - 1 =:= length(Fields) ->
+ lists:zip([TypeKey | Fields], tuple_to_list(Record)).
+
+
+shell_quote([], Acc) ->
+ lists:reverse([$\" | Acc]);
+shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
+ C =:= $\\ orelse C =:= $\$ ->
+ shell_quote(Rest, [C, $\\ | Acc]);
+shell_quote([C | Rest], Acc) ->
+ shell_quote(Rest, [C | Acc]).
+
+%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
+%% @type qvalue() = {media_type() | encoding() , float()}.
+%% @type media_type() = string().
+%% @type encoding() = string().
+%%
+%% @doc Parses a list (given as a string) of elements with Q values associated
+%% to them. Elements are separated by commas and each element is separated
+%% from its Q value by a semicolon. Q values are optional but when missing
+%% the value of an element is considered as 1.0. A Q value is always in the
+%% range [0.0, 1.0]. A Q value list is used for example as the value of the
+%% HTTP "Accept" and "Accept-Encoding" headers.
+%%
+%% Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
+%% [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
+%%
+parse_qvalues(QValuesStr) ->
+ try
+ lists:map(
+ fun(Pair) ->
+ [Type | Params] = string:tokens(Pair, ";"),
+ NormParams = normalize_media_params(Params),
+ {Q, NonQParams} = extract_q(NormParams),
+ {string:join([string:strip(Type) | NonQParams], ";"), Q}
+ end,
+ string:tokens(string:to_lower(QValuesStr), ",")
+ )
+ catch
+ _Type:_Error ->
+ invalid_qvalue_string
+ end.
+
+normalize_media_params(Params) ->
+ {ok, Re} = re:compile("\\s"),
+ normalize_media_params(Re, Params, []).
+
+normalize_media_params(_Re, [], Acc) ->
+ lists:reverse(Acc);
+normalize_media_params(Re, [Param | Rest], Acc) ->
+ NormParam = re:replace(Param, Re, "", [global, {return, list}]),
+ normalize_media_params(Re, Rest, [NormParam | Acc]).
+
+extract_q(NormParams) ->
+ {ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
+ {ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
+ extract_q(KVRe, QRe, NormParams, []).
+
+extract_q(_KVRe, _QRe, [], Acc) ->
+ {1.0, lists:reverse(Acc)};
+extract_q(KVRe, QRe, [Param | Rest], Acc) ->
+ case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
+ {match, [Name, Value]} ->
+ case Name of
+ "q" ->
+ {match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
+ QVal = case Q of
+ "0" ->
+ 0.0;
+ "1" ->
+ 1.0;
+ Else ->
+ list_to_float(Else)
+ end,
+ case QVal < 0.0 orelse QVal > 1.0 of
+ false ->
+ {QVal, lists:reverse(Acc) ++ Rest}
+ end;
+ _ ->
+ extract_q(KVRe, QRe, Rest, [Param | Acc])
+ end
+ end.
+
+%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
+%% [encoding()]
+%%
+%% @doc Determines which encodings specified in the given Q values list are
+%% valid according to a list of supported encodings and a default encoding.
+%%
+%% The returned list of encodings is sorted, descendingly, according to the
+%% Q values of the given list. The last element of this list is the given
+%% default encoding unless this encoding is explicitly or implicitly
+%% marked with a Q value of 0.0 in the given Q values list.
+%% Note: encodings with the same Q value are kept in the same order as
+%% found in the input Q values list.
+%%
+%% This encoding picking process is described in section 14.3 of the
+%% RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% pick_accepted_encodings(
+%% [{"gzip", 0.5}, {"deflate", 1.0}],
+%% ["gzip", "identity"],
+%% "identity"
+%% ) ->
+%% ["gzip", "identity"]
+%%
+pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
+ SortedQList = lists:reverse(
+ lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
+ ),
+ {Accepted, Refused} = lists:foldr(
+ fun({E, Q}, {A, R}) ->
+ case Q > 0.0 of
+ true ->
+ {[E | A], R};
+ false ->
+ {A, [E | R]}
+ end
+ end,
+ {[], []},
+ SortedQList
+ ),
+ Refused1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Refused
+ ),
+ Accepted1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Accepted
+ ),
+ Accepted2 = case lists:member(DefaultEnc, Accepted1) of
+ true ->
+ Accepted1;
+ false ->
+ Accepted1 ++ [DefaultEnc]
+ end,
+ [E || E <- Accepted2, lists:member(E, SupportedEncs),
+ not lists:member(E, Refused1)].
+
+make_io(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+make_io(Integer) when is_integer(Integer) ->
+ integer_to_list(Integer);
+make_io(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+make_io_test() ->
+ ?assertEqual(
+ <<"atom">>,
+ iolist_to_binary(make_io(atom))),
+ ?assertEqual(
+ <<"20">>,
+ iolist_to_binary(make_io(20))),
+ ?assertEqual(
+ <<"list">>,
+ iolist_to_binary(make_io("list"))),
+ ?assertEqual(
+ <<"binary">>,
+ iolist_to_binary(make_io(<<"binary">>))),
+ ok.
+
+-record(test_record, {field1=f1, field2=f2}).
+record_to_proplist_test() ->
+ ?assertEqual(
+ [{'__record', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{}, record_info(fields, test_record))),
+ ?assertEqual(
+ [{'typekey', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{},
+ record_info(fields, test_record),
+ typekey)),
+ ok.
+
+shell_quote_test() ->
+ ?assertEqual(
+ "\"foo \\$bar\\\"\\`' baz\"",
+ shell_quote("foo $bar\"`' baz")),
+ ok.
+
+cmd_port_test_spool(Port, Acc) ->
+ receive
+ {Port, eof} ->
+ Acc;
+ {Port, {data, {eol, Data}}} ->
+ cmd_port_test_spool(Port, ["\n", Data | Acc]);
+ {Port, Unknown} ->
+ throw({unknown, Unknown})
+ after 1000 ->
+ throw(timeout)
+ end.
+
+cmd_port_test() ->
+ Port = cmd_port(["echo", "$bling$ `word`!"],
+ [eof, stream, {line, 4096}]),
+ Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
+ after catch port_close(Port)
+ end,
+ self() ! {Port, wtf},
+ try cmd_port_test_spool(Port, [])
+ catch throw:{unknown, wtf} -> ok
+ end,
+ try cmd_port_test_spool(Port, [])
+ catch throw:timeout -> ok
+ end,
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ Res).
+
+cmd_test() ->
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ cmd(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_string_test() ->
+ ?assertEqual(
+ "\"echo\" \"\\$bling\\$ \\`word\\`!\"",
+ cmd_string(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_status_test() ->
+ ?assertEqual(
+ {0, <<"$bling$ `word`!\n">>},
+ cmd_status(["echo", "$bling$ `word`!"])),
+ ok.
+
+
+parse_header_test() ->
+ ?assertEqual(
+ {"multipart/form-data", [{"boundary", "AaB03x"}]},
+ parse_header("multipart/form-data; boundary=AaB03x")),
+ %% This tests (currently) intentionally broken behavior
+ ?assertEqual(
+ {"multipart/form-data",
+ [{"b", ""},
+ {"cgi", "is"},
+ {"broken", "true\"e"}]},
+ parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
+ ok.
+
+path_split_test() ->
+ {"", "foo/bar"} = path_split("/foo/bar"),
+ {"foo", "bar"} = path_split("foo/bar"),
+ {"bar", ""} = path_split("bar"),
+ ok.
+
+urlsplit_test() ->
+ {"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
+ {"https", "host:port", "/foo", "", "bar?baz"} =
+ urlsplit("https://host:port/foo#bar?baz"),
+ {"https", "host", "", "", ""} = urlsplit("https://host"),
+ {"", "", "/wiki/Category:Fruit", "", ""} =
+ urlsplit("/wiki/Category:Fruit"),
+ ok.
+
+urlsplit_path_test() ->
+ {"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
+ {"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
+ {"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
+ {"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
+ {"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
+ {"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
+ ok.
+
+urlunsplit_test() ->
+ "/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
+ "https://host:port/foo#bar?baz" =
+ urlunsplit({"https", "host:port", "/foo", "", "bar?baz"}),
+ ok.
+
+urlunsplit_path_test() ->
+ "/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
+ "/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
+ "/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
+ "/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
+ "/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
+ "/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
+ ok.
+
+join_test() ->
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], $,)),
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], ",")),
+ ?assertEqual("foo bar",
+ join([["foo", " bar"]], ",")),
+ ?assertEqual("foo bar,baz",
+ join([["foo", " bar"], "baz"], ",")),
+ ?assertEqual("foo",
+ join(["foo"], ",")),
+ ?assertEqual("foobarbaz",
+ join(["foo", "bar", "baz"], "")),
+ ?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
+ join(["foo", "bar", "baz"], <<>>)),
+ ?assertEqual("foobar" ++ [<<"baz">>],
+ join(["foo", "bar", <<"baz">>], "")),
+ ?assertEqual("",
+ join([], "any")),
+ ok.
+
+quote_plus_test() ->
+ "foo" = quote_plus(foo),
+ "1" = quote_plus(1),
+ "1.1" = quote_plus(1.1),
+ "foo" = quote_plus("foo"),
+ "foo+bar" = quote_plus("foo bar"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%3B%26%3D" = quote_plus("foo;&="),
+ "foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
+ ok.
+
+unquote_test() ->
+ ?assertEqual("foo bar",
+ unquote("foo+bar")),
+ ?assertEqual("foo bar",
+ unquote("foo%20bar")),
+ ?assertEqual("foo\r\n",
+ unquote("foo%0D%0A")),
+ ?assertEqual("foo\r\n",
+ unquote(<<"foo%0D%0A">>)),
+ ok.
+
+urlencode_test() ->
+ "foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
+ {"baz", "wibble \r\n"},
+ {z, 1}]),
+ ok.
+
+parse_qs_test() ->
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
+ ?assertEqual(
+ [{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
+ parse_qs("=bar&baz=wibble+%0D%0a&z=")),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
+ ?assertEqual(
+ [],
+ parse_qs("")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}, {"baz", ""}],
+ parse_qs("foo;bar&baz")),
+ ok.
+
+partition_test() ->
+ {"foo", "", ""} = partition("foo", "/"),
+ {"foo", "/", "bar"} = partition("foo/bar", "/"),
+ {"foo", "/", ""} = partition("foo/", "/"),
+ {"", "/", "bar"} = partition("/bar", "/"),
+ {"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
+ ok.
+
+safe_relative_path_test() ->
+ "foo" = safe_relative_path("foo"),
+ "foo/" = safe_relative_path("foo/"),
+ "foo" = safe_relative_path("foo/bar/.."),
+ "bar" = safe_relative_path("foo/../bar"),
+ "bar/" = safe_relative_path("foo/../bar/"),
+ "" = safe_relative_path("foo/.."),
+ "" = safe_relative_path("foo/../"),
+ undefined = safe_relative_path("/foo"),
+ undefined = safe_relative_path("../foo"),
+ undefined = safe_relative_path("foo/../.."),
+ undefined = safe_relative_path("foo//"),
+ undefined = safe_relative_path("foo\\bar"),
+ ok.
+
+parse_qvalues_test() ->
+ [] = parse_qvalues(""),
+ [{"identity", 0.0}] = parse_qvalues("identity;q=0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
+ [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip,deflate,identity;q=0.0"
+ ),
+ [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "deflate,gzip,identity;q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
+ parse_qvalues("gzip,deflate,gzip,identity;q=0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip, deflate , identity; q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=1, deflate;q=1.0, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=1.0, identity;q=0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate , identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
+ "gzip; q=0.5,deflate,identity"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
+ parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
+ [{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
+ parse_qvalues("text/html;level=1, text/plain;q=0.5"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;level=1;q=0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;q=0.3;level=1, text/plain"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
+ invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
+ invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
+ ok.
+
+pick_accepted_encodings_test() ->
+ ["identity"] = pick_accepted_encodings(
+ [],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}, {"deflate", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ [] = pick_accepted_encodings(
+ [{"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 0.6}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"deflate", 0.0}, {"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}, {"deflate", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ok.
+
+-endif.
diff --git a/deps/rabbit_common/src/rabbit_json.erl b/deps/rabbit_common/src/rabbit_json.erl
new file mode 100644
index 0000000000..a10569135b
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_json.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_json).
+
+-export([decode/1, decode/2, try_decode/1, try_decode/2,
+ encode/1, encode/2, try_encode/1, try_encode/2]).
+
+-define(DEFAULT_DECODE_OPTIONS, [return_maps]).
+
+
+-spec decode(jsx:json_text()) -> jsx:json_term().
+decode(JSON) ->
+ decode(JSON, ?DEFAULT_DECODE_OPTIONS).
+
+
+-spec decode(jsx:json_text(), jsx_to_term:config()) -> jsx:json_term().
+decode(JSON, Opts) ->
+ jsx:decode(JSON, Opts).
+
+
+-spec try_decode(jsx:json_text()) -> {ok, jsx:json_term()} |
+ {error, Reason :: term()}.
+try_decode(JSON) ->
+ try_decode(JSON, ?DEFAULT_DECODE_OPTIONS).
+
+
+-spec try_decode(jsx:json_text(), jsx_to_term:config()) ->
+ {ok, jsx:json_term()} | {error, Reason :: term()}.
+try_decode(JSON, Opts) ->
+ try
+ {ok, decode(JSON, Opts)}
+ catch error: Reason ->
+ {error, Reason}
+ end.
+
+-spec encode(jsx:json_term()) -> jsx:json_text().
+encode(Term) ->
+ encode(Term, []).
+
+-spec encode(jsx:json_term(), jsx_to_json:config()) -> jsx:json_text().
+encode(Term, Opts) ->
+ jsx:encode(Term, Opts).
+
+
+-spec try_encode(jsx:json_term()) -> {ok, jsx:json_text()} |
+ {error, Reason :: term()}.
+try_encode(Term) ->
+ try_encode(Term, []).
+
+
+-spec try_encode(jsx:json_term(), jsx_to_term:config()) ->
+ {ok, jsx:json_text()} | {error, Reason :: term()}.
+try_encode(Term, Opts) ->
+ try
+ {ok, encode(Term, Opts)}
+ catch error: Reason ->
+ {error, Reason}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_log.erl b/deps/rabbit_common/src/rabbit_log.erl
new file mode 100644
index 0000000000..22b4619d1c
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_log.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log).
+
+-export([log/2, log/3, log/4]).
+-export([debug/1, debug/2, debug/3,
+ info/1, info/2, info/3,
+ notice/1, notice/2, notice/3,
+ warning/1, warning/2, warning/3,
+ error/1, error/2, error/3,
+ critical/1, critical/2, critical/3,
+ alert/1, alert/2, alert/3,
+ emergency/1, emergency/2, emergency/3,
+ none/1, none/2, none/3]).
+-export([make_internal_sink_name/1]).
+
+-include("rabbit_log.hrl").
+%%----------------------------------------------------------------------------
+
+-type category() :: channel |
+ connection |
+ federation |
+ feature_flags |
+ ldap |
+ mirroring |
+ osiris |
+ prelaunch |
+ queue |
+ ra |
+ shovel |
+ upgrade.
+
+-spec debug(string()) -> 'ok'.
+-spec debug(string(), [any()]) -> 'ok'.
+-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec info(string()) -> 'ok'.
+-spec info(string(), [any()]) -> 'ok'.
+-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec notice(string()) -> 'ok'.
+-spec notice(string(), [any()]) -> 'ok'.
+-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec warning(string()) -> 'ok'.
+-spec warning(string(), [any()]) -> 'ok'.
+-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec error(string()) -> 'ok'.
+-spec error(string(), [any()]) -> 'ok'.
+-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec critical(string()) -> 'ok'.
+-spec critical(string(), [any()]) -> 'ok'.
+-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec alert(string()) -> 'ok'.
+-spec alert(string(), [any()]) -> 'ok'.
+-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec emergency(string()) -> 'ok'.
+-spec emergency(string(), [any()]) -> 'ok'.
+-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec none(string()) -> 'ok'.
+-spec none(string(), [any()]) -> 'ok'.
+-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+-spec log(category(), lager:log_level(), string()) -> 'ok'.
+log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
+
+-spec log(category(), lager:log_level(), string(), [any()]) -> 'ok'.
+log(Category, Level, Fmt, Args) when is_list(Args) ->
+ Sink = case Category of
+ default -> ?LAGER_SINK;
+ _ -> make_internal_sink_name(Category)
+ end,
+ lager:log(Sink, Level, self(), Fmt, Args).
+
+%% logger(3) handler.
+log(#{level := Level,
+ msg := Msg,
+ meta := #{pid := Pid}} = _LogEvent,
+ _Config) ->
+ case Msg of
+ {report, #{label := {error_logger, _}}} ->
+ %% Avoid recursive loop.
+ ok;
+ {report, #{label := {application_controller, progress}}} ->
+ %% Already logged by Lager.
+ ok;
+ {report, #{label := {supervisor, progress}}} ->
+ %% Already logged by Lager.
+ ok;
+ {report, #{report := Report}} ->
+ %% FIXME: Is this code reached?
+ error_logger:info_report(Report);
+ {report, #{format := Format, args := Args}} when is_list(Format) ->
+ lager:log(?LAGER_SINK, Level, Pid, Format, Args);
+ {string, String} ->
+ lager:log(?LAGER_SINK, Level, Pid, "~ts", [String]);
+ {Format, Args} when is_list(Format) ->
+ lager:log(?LAGER_SINK, Level, Pid, Format, Args)
+ end.
+
+make_internal_sink_name(channel) -> rabbit_log_channel_lager_event;
+make_internal_sink_name(connection) -> rabbit_log_connection_lager_event;
+make_internal_sink_name(default) -> rabbit_log_lager_event;
+make_internal_sink_name(feature_flags) -> rabbit_log_feature_flags_lager_event;
+make_internal_sink_name(federation) -> rabbit_log_federation_lager_event;
+make_internal_sink_name(ldap) -> rabbit_log_ldap_lager_event;
+make_internal_sink_name(mirroring) -> rabbit_log_mirroring_lager_event;
+make_internal_sink_name(osiris) -> rabbit_log_osiris_lager_event;
+make_internal_sink_name(prelaunch) -> rabbit_log_prelaunch_lager_event;
+make_internal_sink_name(queue) -> rabbit_log_queue_lager_event;
+make_internal_sink_name(ra) -> rabbit_log_ra_lager_event;
+make_internal_sink_name(shovel) -> rabbit_log_shovel_lager_event;
+make_internal_sink_name(upgrade) -> rabbit_log_upgrade_lager_event;
+make_internal_sink_name(Category) ->
+ erlang:error({unknown_category, Category}).
+
+debug(Format) -> debug(Format, []).
+debug(Format, Args) -> debug(self(), Format, Args).
+debug(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, debug, Metadata, Format, Args).
+
+info(Format) -> info(Format, []).
+info(Format, Args) -> info(self(), Format, Args).
+info(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, info, Metadata, Format, Args).
+
+notice(Format) -> notice(Format, []).
+notice(Format, Args) -> notice(self(), Format, Args).
+notice(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, notice, Metadata, Format, Args).
+
+warning(Format) -> warning(Format, []).
+warning(Format, Args) -> warning(self(), Format, Args).
+warning(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, warning, Metadata, Format, Args).
+
+error(Format) -> ?MODULE:error(Format, []).
+error(Format, Args) -> ?MODULE:error(self(), Format, Args).
+error(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, error, Metadata, Format, Args).
+
+critical(Format) -> critical(Format, []).
+critical(Format, Args) -> critical(self(), Format, Args).
+critical(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, critical, Metadata, Format, Args).
+
+alert(Format) -> alert(Format, []).
+alert(Format, Args) -> alert(self(), Format, Args).
+alert(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, alert, Metadata, Format, Args).
+
+emergency(Format) -> emergency(Format, []).
+emergency(Format, Args) -> emergency(self(), Format, Args).
+emergency(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, emergency, Metadata, Format, Args).
+
+none(Format) -> none(Format, []).
+none(Format, Args) -> none(self(), Format, Args).
+none(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, none, Metadata, Format, Args).
diff --git a/deps/rabbit_common/src/rabbit_log_osiris_shim.erl b/deps/rabbit_common/src/rabbit_log_osiris_shim.erl
new file mode 100644
index 0000000000..09d6a63431
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_log_osiris_shim.erl
@@ -0,0 +1,26 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log_osiris_shim).
+
+%% just a shim to redirect logs from ra to rabbit_log
+
+-export([log/4]).
+
+-spec log(lager:log_level(), string(), [any()], _) -> ok.
+log(Level, Format, Args, _Meta) ->
+ rabbit_log:log(osiris, Level, Format, Args),
+ ok.
diff --git a/deps/rabbit_common/src/rabbit_log_ra_shim.erl b/deps/rabbit_common/src/rabbit_log_ra_shim.erl
new file mode 100644
index 0000000000..3d35ff6a07
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_log_ra_shim.erl
@@ -0,0 +1,16 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log_ra_shim).
+
+%% just a shim to redirect logs from ra to rabbit_log
+
+-export([log/4]).
+
+log(Level, Format, Args, _Meta) ->
+ rabbit_log:log(ra, Level, Format, Args),
+ ok.
diff --git a/deps/rabbit_common/src/rabbit_misc.erl b/deps/rabbit_common/src/rabbit_misc.erl
new file mode 100644
index 0000000000..c5fd86dcbb
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_misc.erl
@@ -0,0 +1,1434 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_misc).
+
+-ignore_xref([{maps, get, 2}]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include("rabbit_misc.hrl").
+
+-ifdef(TEST).
+-export([decompose_pid/1, compose_pid/4]).
+-endif.
+
+-export([method_record_type/1, polite_pause/0, polite_pause/1]).
+-export([die/1, frame_error/2, amqp_error/4, quit/1,
+ protocol_error/3, protocol_error/4, protocol_error/1]).
+-export([type_class/1, assert_args_equivalence/4, assert_field_equivalence/4]).
+-export([dirty_read/1]).
+-export([table_lookup/2, set_table_value/4, amqp_table/1, to_amqp_table/1]).
+-export([r/3, r/2, r_arg/4, rs/1]).
+-export([enable_cover/0, report_cover/0]).
+-export([enable_cover/1, report_cover/1]).
+-export([start_cover/1]).
+-export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1,
+ filter_exit_map/2]).
+-export([with_user/2]).
+-export([execute_mnesia_transaction/1]).
+-export([execute_mnesia_transaction/2]).
+-export([execute_mnesia_tx_with_tail/1]).
+-export([ensure_ok/2]).
+-export([tcp_name/3, format_inet_error/1]).
+-export([upmap/2, map_in_order/2, utf8_safe/1]).
+-export([table_filter/3]).
+-export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
+-export([format/2, format_many/1, format_stderr/2]).
+-export([unfold/2, ceil/1, queue_fold/3]).
+-export([sort_field_table/1]).
+-export([atom_to_binary/1, parse_bool/1, parse_int/1]).
+-export([pid_to_string/1, string_to_pid/1,
+ pid_change_node/2, node_to_fake_pid/1]).
+-export([version_compare/2, version_compare/3]).
+-export([version_minor_equivalent/2, strict_version_minor_equivalent/2]).
+-export([dict_cons/3, orddict_cons/3, maps_cons/3, gb_trees_cons/3]).
+-export([gb_trees_fold/3, gb_trees_foreach/2]).
+-export([all_module_attributes/1,
+ rabbitmq_related_module_attributes/1,
+ module_attributes_from_apps/2,
+ build_acyclic_graph/3]).
+-export([const/1]).
+-export([ntoa/1, ntoab/1]).
+-export([is_process_alive/1]).
+-export([pget/2, pget/3, pupdate/3, pget_or_die/2, pmerge/3, pset/3, plmerge/2]).
+-export([format_message_queue/2]).
+-export([append_rpc_all_nodes/4, append_rpc_all_nodes/5]).
+-export([os_cmd/1]).
+-export([is_os_process_alive/1]).
+-export([gb_sets_difference/2]).
+-export([version/0, otp_release/0, platform_and_version/0, otp_system_version/0,
+ rabbitmq_and_erlang_versions/0, which_applications/0]).
+-export([sequence_error/1]).
+-export([check_expiry/1]).
+-export([base64url/1]).
+-export([interval_operation/5]).
+-export([ensure_timer/4, stop_timer/2, send_after/3, cancel_timer/1]).
+-export([get_parent/0]).
+-export([store_proc_name/1, store_proc_name/2, get_proc_name/0]).
+-export([moving_average/4]).
+-export([escape_html_tags/1, b64decode_or_throw/1]).
+-export([get_env/3]).
+-export([get_channel_operation_timeout/0]).
+-export([random/1]).
+-export([rpc_call/4, rpc_call/5]).
+-export([get_gc_info/1]).
+-export([group_proplists_by/2]).
+
+%% Horrible macro to use in guards
+-define(IS_BENIGN_EXIT(R),
+ R =:= noproc; R =:= noconnection; R =:= nodedown; R =:= normal;
+ R =:= shutdown).
+
+%%----------------------------------------------------------------------------
+
+-export_type([resource_name/0, thunk/1, channel_or_connection_exit/0]).
+
+-type ok_or_error() :: rabbit_types:ok_or_error(any()).
+-type thunk(T) :: fun(() -> T).
+-type resource_name() :: binary().
+-type channel_or_connection_exit()
+ :: rabbit_types:channel_exit() | rabbit_types:connection_exit().
+-type digraph_label() :: term().
+-type graph_vertex_fun() ::
+ fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph_label()}]).
+-type graph_edge_fun() ::
+ fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph:vertex()}]).
+-type tref() :: {'erlang', reference()} | {timer, timer:tref()}.
+
+-spec method_record_type(rabbit_framing:amqp_method_record()) ->
+ rabbit_framing:amqp_method_name().
+-spec polite_pause() -> 'done'.
+-spec polite_pause(non_neg_integer()) -> 'done'.
+-spec die(rabbit_framing:amqp_exception()) -> channel_or_connection_exit().
+
+-spec quit(integer()) -> no_return().
+
+-spec frame_error(rabbit_framing:amqp_method_name(), binary()) ->
+ rabbit_types:connection_exit().
+-spec amqp_error
+ (rabbit_framing:amqp_exception(), string(), [any()],
+ rabbit_framing:amqp_method_name()) ->
+ rabbit_types:amqp_error().
+-spec protocol_error(rabbit_framing:amqp_exception(), string(), [any()]) ->
+ channel_or_connection_exit().
+-spec protocol_error
+ (rabbit_framing:amqp_exception(), string(), [any()],
+ rabbit_framing:amqp_method_name()) ->
+ channel_or_connection_exit().
+-spec protocol_error(rabbit_types:amqp_error()) ->
+ channel_or_connection_exit().
+-spec type_class(rabbit_framing:amqp_field_type()) -> atom().
+-spec assert_args_equivalence
+ (rabbit_framing:amqp_table(), rabbit_framing:amqp_table(),
+ rabbit_types:r(any()), [binary()]) ->
+ 'ok' | rabbit_types:connection_exit().
+-spec assert_field_equivalence
+ (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
+ 'ok' | rabbit_types:connection_exit().
+-spec equivalence_fail
+ (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
+ rabbit_types:connection_exit().
+-spec dirty_read({atom(), any()}) ->
+ rabbit_types:ok_or_error2(any(), 'not_found').
+-spec table_lookup(rabbit_framing:amqp_table(), binary()) ->
+ 'undefined' | {rabbit_framing:amqp_field_type(), any()}.
+-spec set_table_value
+ (rabbit_framing:amqp_table(), binary(), rabbit_framing:amqp_field_type(),
+ rabbit_framing:amqp_value()) ->
+ rabbit_framing:amqp_table().
+-spec r(rabbit_types:vhost(), K) ->
+ rabbit_types:r3(rabbit_types:vhost(), K, '_')
+ when is_subtype(K, atom()).
+-spec r(rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name()) ->
+ rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
+ when is_subtype(K, atom()).
+-spec r_arg
+ (rabbit_types:vhost() | rabbit_types:r(atom()), K,
+ rabbit_framing:amqp_table(), binary()) ->
+ undefined |
+ rabbit_types:error(
+ {invalid_type, rabbit_framing:amqp_field_type()}) |
+ rabbit_types:r(K) when is_subtype(K, atom()).
+-spec rs(rabbit_types:r(atom())) -> string().
+-spec enable_cover() -> ok_or_error().
+-spec start_cover([{string(), string()} | string()]) -> 'ok'.
+-spec report_cover() -> 'ok'.
+-spec enable_cover([file:filename() | atom()]) -> ok_or_error().
+-spec report_cover([file:filename() | atom()]) -> 'ok'.
+-spec throw_on_error
+ (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A.
+-spec with_exit_handler(thunk(A), thunk(A)) -> A.
+-spec is_abnormal_exit(any()) -> boolean().
+-spec filter_exit_map(fun ((A) -> B), [A]) -> [B].
+-spec with_user(rabbit_types:username(), thunk(A)) -> A.
+-spec execute_mnesia_transaction(thunk(A)) -> A.
+-spec execute_mnesia_transaction(thunk(A), fun ((A, boolean()) -> B)) -> B.
+-spec execute_mnesia_tx_with_tail
+ (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B)).
+-spec ensure_ok(ok_or_error(), atom()) -> 'ok'.
+-spec tcp_name(atom(), inet:ip_address(), rabbit_net:ip_port()) ->
+ atom().
+-spec format_inet_error(atom()) -> string().
+-spec upmap(fun ((A) -> B), [A]) -> [B].
+-spec map_in_order(fun ((A) -> B), [A]) -> [B].
+-spec table_filter
+ (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'), atom()) -> [A].
+-spec dirty_read_all(atom()) -> [any()].
+-spec dirty_foreach_key(fun ((any()) -> any()), atom()) ->
+ 'ok' | 'aborted'.
+-spec dirty_dump_log(file:filename()) -> ok_or_error().
+-spec format(string(), [any()]) -> string().
+-spec format_many([{string(), [any()]}]) -> string().
+-spec format_stderr(string(), [any()]) -> 'ok'.
+-spec unfold (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}.
+-spec ceil(number()) -> integer().
+-spec queue_fold(fun ((any(), B) -> B), B, queue:queue()) -> B.
+-spec sort_field_table(rabbit_framing:amqp_table()) ->
+ rabbit_framing:amqp_table().
+-spec pid_to_string(pid()) -> string().
+-spec string_to_pid(string()) -> pid().
+-spec pid_change_node(pid(), node()) -> pid().
+-spec node_to_fake_pid(atom()) -> pid().
+-spec version_compare(string(), string()) -> 'lt' | 'eq' | 'gt'.
+-spec version_compare
+ (rabbit_semver:version_string(), rabbit_semver:version_string(),
+ ('lt' | 'lte' | 'eq' | 'gte' | 'gt')) -> boolean().
+-spec version_minor_equivalent(rabbit_semver:version_string(), rabbit_semver:version_string()) -> boolean().
+-spec dict_cons(any(), any(), dict:dict()) -> dict:dict().
+-spec orddict_cons(any(), any(), orddict:orddict()) -> orddict:orddict().
+-spec gb_trees_cons(any(), any(), gb_trees:tree()) -> gb_trees:tree().
+-spec gb_trees_fold(fun ((any(), any(), A) -> A), A, gb_trees:tree()) -> A.
+-spec gb_trees_foreach(fun ((any(), any()) -> any()), gb_trees:tree()) ->
+ 'ok'.
+-spec all_module_attributes(atom()) -> [{atom(), atom(), [term()]}].
+-spec build_acyclic_graph
+ (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}]) ->
+ rabbit_types:ok_or_error2(
+ digraph:graph(),
+ {'vertex', 'duplicate', digraph:vertex()} |
+ {'edge',
+ ({bad_vertex, digraph:vertex()} |
+ {bad_edge, [digraph:vertex()]}),
+ digraph:vertex(), digraph:vertex()}).
+-spec const(A) -> thunk(A).
+-spec ntoa(inet:ip_address()) -> string().
+-spec ntoab(inet:ip_address()) -> string().
+-spec is_process_alive(pid()) -> boolean().
+
+-spec pmerge(term(), term(), [term()]) -> [term()].
+-spec plmerge([term()], [term()]) -> [term()].
+-spec pset(term(), term(), [term()]) -> [term()].
+-spec format_message_queue(any(), priority_queue:q()) -> term().
+-spec os_cmd(string()) -> string().
+-spec is_os_process_alive(non_neg_integer()) -> boolean().
+-spec gb_sets_difference(gb_sets:set(), gb_sets:set()) -> gb_sets:set().
+-spec version() -> string().
+-spec otp_release() -> string().
+-spec otp_system_version() -> string().
+-spec platform_and_version() -> string().
+-spec rabbitmq_and_erlang_versions() -> {string(), string()}.
+-spec which_applications() -> [{atom(), string(), string()}].
+-spec sequence_error([({'error', any()} | any())]) ->
+ {'error', any()} | any().
+-spec check_expiry(integer()) -> rabbit_types:ok_or_error(any()).
+-spec base64url(binary()) -> string().
+-spec interval_operation
+ ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer(),
+ non_neg_integer()) ->
+ {any(), non_neg_integer()}.
+-spec ensure_timer(A, non_neg_integer(), non_neg_integer(), any()) -> A.
+-spec stop_timer(A, non_neg_integer()) -> A.
+-spec send_after(non_neg_integer(), pid(), any()) -> tref().
+-spec cancel_timer(tref()) -> 'ok'.
+-spec get_parent() -> pid().
+-spec store_proc_name(atom(), rabbit_types:proc_name()) -> ok.
+-spec store_proc_name(rabbit_types:proc_type_and_name()) -> ok.
+-spec get_proc_name() -> rabbit_types:proc_name().
+-spec moving_average(float(), float(), float(), float() | 'undefined') ->
+ float().
+-spec get_env(atom(), atom(), term()) -> term().
+-spec get_channel_operation_timeout() -> non_neg_integer().
+-spec random(non_neg_integer()) -> non_neg_integer().
+-spec get_gc_info(pid()) -> [any()].
+-spec group_proplists_by(fun((proplists:proplist()) -> any()),
+ list(proplists:proplist())) -> list(list(proplists:proplist())).
+
+
+%%----------------------------------------------------------------------------
+
+method_record_type(Record) ->
+ element(1, Record).
+
+polite_pause() ->
+ polite_pause(3000).
+
+polite_pause(N) ->
+ receive
+ after N -> done
+ end.
+
+die(Error) ->
+ protocol_error(Error, "~w", [Error]).
+
+frame_error(MethodName, BinaryFields) ->
+ protocol_error(frame_error, "cannot decode ~w", [BinaryFields], MethodName).
+
+amqp_error(Name, ExplanationFormat, Params, Method) ->
+ Explanation = format(ExplanationFormat, Params),
+ #amqp_error{name = Name, explanation = Explanation, method = Method}.
+
+protocol_error(Name, ExplanationFormat, Params) ->
+ protocol_error(Name, ExplanationFormat, Params, none).
+
+protocol_error(Name, ExplanationFormat, Params, Method) ->
+ protocol_error(amqp_error(Name, ExplanationFormat, Params, Method)).
+
+protocol_error(#amqp_error{} = Error) ->
+ exit(Error).
+
+type_class(byte) -> int;
+type_class(short) -> int;
+type_class(signedint) -> int;
+type_class(long) -> int;
+type_class(decimal) -> int;
+type_class(unsignedbyte) -> int;
+type_class(unsignedshort) -> int;
+type_class(unsignedint) -> int;
+type_class(float) -> float;
+type_class(double) -> float;
+type_class(Other) -> Other.
+
+assert_args_equivalence(Orig, New, Name, Keys) ->
+ [assert_args_equivalence1(Orig, New, Name, Key) || Key <- Keys],
+ ok.
+
+assert_args_equivalence1(Orig, New, Name, Key) ->
+ {Orig1, New1} = {table_lookup(Orig, Key), table_lookup(New, Key)},
+ case {Orig1, New1} of
+ {Same, Same} ->
+ ok;
+ {{OrigType, OrigVal}, {NewType, NewVal}} ->
+ case type_class(OrigType) == type_class(NewType) andalso
+ OrigVal == NewVal of
+ true -> ok;
+ false -> assert_field_equivalence(OrigVal, NewVal, Name, Key)
+ end;
+ {OrigTypeVal, NewTypeVal} ->
+ assert_field_equivalence(OrigTypeVal, NewTypeVal, Name, Key)
+ end.
+
+%% Classic queues do not necessarily have an x-queue-type field associated with them
+%% so we special-case that scenario here
+%%
+%% Fixes rabbitmq/rabbitmq-common#341
+%%
+assert_field_equivalence(_Orig, _Orig, _Name, _Key) ->
+ ok;
+assert_field_equivalence(undefined, {longstr, <<"classic">>}, _Name, <<"x-queue-type">>) ->
+ ok;
+assert_field_equivalence({longstr, <<"classic">>}, undefined, _Name, <<"x-queue-type">>) ->
+ ok;
+assert_field_equivalence(Orig, New, Name, Key) ->
+ equivalence_fail(Orig, New, Name, Key).
+
+equivalence_fail(Orig, New, Name, Key) ->
+ protocol_error(precondition_failed, "inequivalent arg '~s' "
+ "for ~s: received ~s but current is ~s",
+ [Key, rs(Name), val(New), val(Orig)]).
+
+val(undefined) ->
+ "none";
+val({Type, Value}) ->
+ ValFmt = case is_binary(Value) of
+ true -> "~s";
+ false -> "~p"
+ end,
+ format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]);
+val(Value) ->
+ format(case is_binary(Value) of
+ true -> "'~s'";
+ false -> "'~p'"
+ end, [Value]).
+
+%% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+%% expensive due to general mnesia overheads (figuring out table types
+%% and locations, etc). We get away with bypassing these because we
+%% know that the tables we are looking at here
+%% - are not the schema table
+%% - have a local ram copy
+%% - do not have any indices
+dirty_read({Table, Key}) ->
+ case ets:lookup(Table, Key) of
+ [Result] -> {ok, Result};
+ [] -> {error, not_found}
+ end.
+
+%%
+%% Attribute Tables
+%%
+
+table_lookup(Table, Key) ->
+ case lists:keysearch(Key, 1, Table) of
+ {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin};
+ false -> undefined
+ end.
+
+set_table_value(Table, Key, Type, Value) ->
+ sort_field_table(
+ lists:keystore(Key, 1, Table, {Key, Type, Value})).
+
+to_amqp_table(M) when is_map(M) ->
+ lists:reverse(maps:fold(fun(K, V, Acc) -> [to_amqp_table_row(K, V)|Acc] end,
+ [], M));
+to_amqp_table(L) when is_list(L) ->
+ L.
+
+to_amqp_table_row(K, V) ->
+ {T, V2} = type_val(V),
+ {K, T, V2}.
+
+to_amqp_array(L) ->
+ [type_val(I) || I <- L].
+
+type_val(M) when is_map(M) -> {table, to_amqp_table(M)};
+type_val(L) when is_list(L) -> {array, to_amqp_array(L)};
+type_val(X) when is_binary(X) -> {longstr, X};
+type_val(X) when is_integer(X) -> {long, X};
+type_val(X) when is_number(X) -> {double, X};
+type_val(true) -> {bool, true};
+type_val(false) -> {bool, false};
+type_val(null) -> throw({error, null_not_allowed});
+type_val(X) -> throw({error, {unhandled_type, X}}).
+
+amqp_table(unknown) -> unknown;
+amqp_table(undefined) -> amqp_table([]);
+amqp_table([]) -> #{};
+amqp_table(#{}) -> #{};
+amqp_table(Table) -> maps:from_list([{Name, amqp_value(Type, Value)} ||
+ {Name, Type, Value} <- Table]).
+
+amqp_value(array, Vs) -> [amqp_value(T, V) || {T, V} <- Vs];
+amqp_value(table, V) -> amqp_table(V);
+amqp_value(decimal, {Before, After}) ->
+ erlang:list_to_float(
+ lists:flatten(io_lib:format("~p.~p", [Before, After])));
+amqp_value(_Type, V) when is_binary(V) -> utf8_safe(V);
+amqp_value(_Type, V) -> V.
+
+
+%%
+%% Resources
+%%
+
+r(#resource{virtual_host = VHostPath}, Kind, Name) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = Name};
+r(VHostPath, Kind, Name) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = Name}.
+
+r(VHostPath, Kind) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = '_'}.
+
+r_arg(#resource{virtual_host = VHostPath}, Kind, Table, Key) ->
+ r_arg(VHostPath, Kind, Table, Key);
+r_arg(VHostPath, Kind, Table, Key) ->
+ case table_lookup(Table, Key) of
+ {longstr, NameBin} -> r(VHostPath, Kind, NameBin);
+ undefined -> undefined;
+ {Type, _} -> {error, {invalid_type, Type}}
+ end.
+
+rs(#resource{virtual_host = VHostPath, kind = topic, name = Name}) ->
+ format("'~s' in vhost '~s'", [Name, VHostPath]);
+rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) ->
+ format("~s '~s' in vhost '~s'", [Kind, Name, VHostPath]).
+
+enable_cover() -> enable_cover(["."]).
+
+enable_cover(Dirs) ->
+ lists:foldl(fun (Dir, ok) ->
+ case cover:compile_beam_directory(
+ filename:join(lists:concat([Dir]),"ebin")) of
+ {error, _} = Err -> Err;
+ _ -> ok
+ end;
+ (_Dir, Err) ->
+ Err
+ end, ok, Dirs).
+
+start_cover(NodesS) ->
+ {ok, _} = cover:start([rabbit_nodes_common:make(N) || N <- NodesS]),
+ ok.
+
+report_cover() -> report_cover(["."]).
+
+report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok.
+
+report_cover1(Root) ->
+ Dir = filename:join(Root, "cover"),
+ ok = filelib:ensure_dir(filename:join(Dir, "junk")),
+ lists:foreach(fun (F) -> file:delete(F) end,
+ filelib:wildcard(filename:join(Dir, "*.html"))),
+ {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]),
+ {CT, NCT} =
+ lists:foldl(
+ fun (M,{CovTot, NotCovTot}) ->
+ {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module),
+ ok = report_coverage_percentage(SummaryFile,
+ Cov, NotCov, M),
+ {ok,_} = cover:analyze_to_file(
+ M,
+ filename:join(Dir, atom_to_list(M) ++ ".html"),
+ [html]),
+ {CovTot+Cov, NotCovTot+NotCov}
+ end,
+ {0, 0},
+ lists:sort(cover:modules())),
+ ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'),
+ ok = file:close(SummaryFile),
+ ok.
+
+report_coverage_percentage(File, Cov, NotCov, Mod) ->
+ io:fwrite(File, "~6.2f ~p~n",
+ [if
+ Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov);
+ true -> 100.0
+ end,
+ Mod]).
+
+%% @doc Halts the emulator returning the given status code to the os.
+%% On Windows this function will block indefinitely so as to give the io
+%% subsystem time to flush stdout completely.
+quit(Status) ->
+ case os:type() of
+ {unix, _} -> halt(Status);
+ {win32, _} -> init:stop(Status),
+ receive
+ after infinity -> ok
+ end
+ end.
+
+throw_on_error(E, Thunk) ->
+ case Thunk() of
+ {error, Reason} -> throw({E, Reason});
+ {ok, Res} -> Res;
+ Res -> Res
+ end.
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when ?IS_BENIGN_EXIT(R) -> Handler();
+ exit:{{R, _}, _} when ?IS_BENIGN_EXIT(R) -> Handler()
+ end.
+
+is_abnormal_exit(R) when ?IS_BENIGN_EXIT(R) -> false;
+is_abnormal_exit({R, _}) when ?IS_BENIGN_EXIT(R) -> false;
+is_abnormal_exit(_) -> true.
+
+filter_exit_map(F, L) ->
+ Ref = make_ref(),
+ lists:filter(fun (R) -> R =/= Ref end,
+ [with_exit_handler(
+ fun () -> Ref end,
+ fun () -> F(I) end) || I <- L]).
+
+
+with_user(Username, Thunk) ->
+ fun () ->
+ case mnesia:read({rabbit_user, Username}) of
+ [] ->
+ mnesia:abort({no_such_user, Username});
+ [_U] ->
+ Thunk()
+ end
+ end.
+
+execute_mnesia_transaction(TxFun) ->
+ %% Making this a sync_transaction allows us to use dirty_read
+ %% elsewhere and get a consistent result even when that read
+ %% executes on a different node.
+ case worker_pool:submit(
+ fun () ->
+ case mnesia:is_transaction() of
+ false -> DiskLogBefore = mnesia_dumper:get_log_writes(),
+ Res = mnesia:sync_transaction(TxFun),
+ DiskLogAfter = mnesia_dumper:get_log_writes(),
+ case DiskLogAfter == DiskLogBefore of
+ true -> file_handle_cache_stats:update(
+ mnesia_ram_tx),
+ Res;
+ false -> file_handle_cache_stats:update(
+ mnesia_disk_tx),
+ {sync, Res}
+ end;
+ true -> mnesia:sync_transaction(TxFun)
+ end
+ end, single) of
+ {sync, {atomic, Result}} -> mnesia_sync:sync(), Result;
+ {sync, {aborted, Reason}} -> throw({error, Reason});
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+%% Like execute_mnesia_transaction/1 with additional Pre- and Post-
+%% commit function
+execute_mnesia_transaction(TxFun, PrePostCommitFun) ->
+ case mnesia:is_transaction() of
+ true -> throw(unexpected_transaction);
+ false -> ok
+ end,
+ PrePostCommitFun(execute_mnesia_transaction(
+ fun () ->
+ Result = TxFun(),
+ PrePostCommitFun(Result, true),
+ Result
+ end), false).
+
+%% Like execute_mnesia_transaction/2, but TxFun is expected to return a
+%% TailFun which gets called (only) immediately after the tx commit
+execute_mnesia_tx_with_tail(TxFun) ->
+ case mnesia:is_transaction() of
+ true -> execute_mnesia_transaction(TxFun);
+ false -> TailFun = execute_mnesia_transaction(TxFun),
+ TailFun()
+ end.
+
+ensure_ok(ok, _) -> ok;
+ensure_ok({error, Reason}, ErrorTag) -> throw({error, {ErrorTag, Reason}}).
+
+tcp_name(Prefix, IPAddress, Port)
+ when is_atom(Prefix) andalso is_number(Port) ->
+ list_to_atom(
+ format("~w_~s:~w", [Prefix, inet_parse:ntoa(IPAddress), Port])).
+
+format_inet_error(E) -> format("~w (~s)", [E, format_inet_error0(E)]).
+
+format_inet_error0(address) -> "cannot connect to host/port";
+format_inet_error0(timeout) -> "timed out";
+format_inet_error0(Error) -> inet:format_error(Error).
+
+%% base64:decode throws lots of weird errors. Catch and convert to one
+%% that will cause a bad_request.
+b64decode_or_throw(B64) ->
+ try
+ base64:decode(B64)
+ catch error:_ ->
+ throw({error, {not_base64, B64}})
+ end.
+
+utf8_safe(V) ->
+ try
+ _ = xmerl_ucs:from_utf8(V),
+ V
+ catch exit:{ucs, _} ->
+ Enc = split_lines(base64:encode(V)),
+ <<"Not UTF-8, base64 is: ", Enc/binary>>
+ end.
+
+%% MIME enforces a limit on line length of base 64-encoded data to 76 characters.
+split_lines(<<Text:76/binary, Rest/binary>>) ->
+ <<Text/binary, $\n, (split_lines(Rest))/binary>>;
+split_lines(Text) ->
+ Text.
+
+
+%% This is a modified version of Luke Gorrie's pmap -
+%% https://lukego.livejournal.com/6753.html - that doesn't care about
+%% the order in which results are received.
+%%
+%% WARNING: This is is deliberately lightweight rather than robust -- if F
+%% throws, upmap will hang forever, so make sure F doesn't throw!
+upmap(F, L) ->
+ Parent = self(),
+ Ref = make_ref(),
+ [receive {Ref, Result} -> Result end
+ || _ <- [spawn(fun () -> Parent ! {Ref, F(X)} end) || X <- L]].
+
+map_in_order(F, L) ->
+ lists:reverse(
+ lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)).
+
+%% Apply a pre-post-commit function to all entries in a table that
+%% satisfy a predicate, and return those entries.
+%%
+%% We ignore entries that have been modified or removed.
+table_filter(Pred, PrePostCommitFun, TableName) ->
+ lists:foldl(
+ fun (E, Acc) ->
+ case execute_mnesia_transaction(
+ fun () -> mnesia:match_object(TableName, E, read) =/= []
+ andalso Pred(E) end,
+ fun (false, _Tx) -> false;
+ (true, Tx) -> PrePostCommitFun(E, Tx), true
+ end) of
+ false -> Acc;
+ true -> [E | Acc]
+ end
+ end, [], dirty_read_all(TableName)).
+
+dirty_read_all(TableName) ->
+ mnesia:dirty_select(TableName, [{'$1',[],['$1']}]).
+
+dirty_foreach_key(F, TableName) ->
+ dirty_foreach_key1(F, TableName, mnesia:dirty_first(TableName)).
+
+dirty_foreach_key1(_F, _TableName, '$end_of_table') ->
+ ok;
+dirty_foreach_key1(F, TableName, K) ->
+ case catch mnesia:dirty_next(TableName, K) of
+ {'EXIT', _} ->
+ aborted;
+ NextKey ->
+ F(K),
+ dirty_foreach_key1(F, TableName, NextKey)
+ end.
+
+dirty_dump_log(FileName) ->
+ {ok, LH} = disk_log:open([{name, dirty_dump_log},
+ {mode, read_only},
+ {file, FileName}]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, start)),
+ disk_log:close(LH).
+
+dirty_dump_log1(_LH, eof) ->
+ io:format("Done.~n");
+dirty_dump_log1(LH, {K, Terms}) ->
+ io:format("Chunk: ~p~n", [Terms]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, K));
+dirty_dump_log1(LH, {K, Terms, BadBytes}) ->
+ io:format("Bad Chunk, ~p: ~p~n", [BadBytes, Terms]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, K)).
+
+format(Fmt, Args) -> lists:flatten(io_lib:format(Fmt, Args)).
+
+format_many(List) ->
+ lists:flatten([io_lib:format(F ++ "~n", A) || {F, A} <- List]).
+
+format_stderr(Fmt, Args) ->
+ io:format(standard_error, Fmt, Args),
+ ok.
+
+unfold(Fun, Init) ->
+ unfold(Fun, [], Init).
+
+unfold(Fun, Acc, Init) ->
+ case Fun(Init) of
+ {true, E, I} -> unfold(Fun, [E|Acc], I);
+ false -> {Acc, Init}
+ end.
+
+ceil(N) ->
+ T = trunc(N),
+ case N == T of
+ true -> T;
+ false -> 1 + T
+ end.
+
+parse_bool(<<"true">>) -> true;
+parse_bool(<<"false">>) -> false;
+parse_bool(true) -> true;
+parse_bool(false) -> false;
+parse_bool(undefined) -> undefined;
+parse_bool(V) -> throw({error, {not_boolean, V}}).
+
+parse_int(I) when is_integer(I) -> I;
+parse_int(F) when is_number(F) -> trunc(F);
+parse_int(S) -> try
+ list_to_integer(binary_to_list(S))
+ catch error:badarg ->
+ throw({error, {not_integer, S}})
+ end.
+
+
+queue_fold(Fun, Init, Q) ->
+ case queue:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+%% Sorts a list of AMQP 0-9-1 table fields as per the AMQP 0-9-1 spec
+sort_field_table([]) ->
+ [];
+sort_field_table(M) when is_map(M) andalso map_size(M) =:= 0 ->
+ [];
+sort_field_table(Arguments) when is_map(Arguments) ->
+ sort_field_table(maps:to_list(Arguments));
+sort_field_table(Arguments) ->
+ lists:keysort(1, Arguments).
+
+atom_to_binary(A) ->
+ list_to_binary(atom_to_list(A)).
+
+%% This provides a string representation of a pid that is the same
+%% regardless of what node we are running on. The representation also
+%% permits easy identification of the pid's node.
+pid_to_string(Pid) when is_pid(Pid) ->
+ {Node, Cre, Id, Ser} = decompose_pid(Pid),
+ format("<~s.~B.~B.~B>", [Node, Cre, Id, Ser]).
+
+%% inverse of above
+string_to_pid(Str) ->
+ Err = {error, {invalid_pid_syntax, Str}},
+ %% The \ before the trailing $ is only there to keep emacs
+ %% font-lock from getting confused.
+ case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$",
+ [{capture,all_but_first,list}]) of
+ {match, [NodeStr, CreStr, IdStr, SerStr]} ->
+ [Cre, Id, Ser] = lists:map(fun list_to_integer/1,
+ [CreStr, IdStr, SerStr]),
+ compose_pid(list_to_atom(NodeStr), Cre, Id, Ser);
+ nomatch ->
+ throw(Err)
+ end.
+
+pid_change_node(Pid, NewNode) ->
+ {_OldNode, Cre, Id, Ser} = decompose_pid(Pid),
+ compose_pid(NewNode, Cre, Id, Ser).
+
+%% node(node_to_fake_pid(Node)) =:= Node.
+node_to_fake_pid(Node) ->
+ compose_pid(Node, 0, 0, 0).
+
+decompose_pid(Pid) when is_pid(Pid) ->
+ %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
+ %% 8.7)
+ Node = node(Pid),
+ BinPid0 = term_to_binary(Pid),
+ case BinPid0 of
+ %% NEW_PID_EXT
+ <<131, 88, BinPid/bits>> ->
+ NodeByteSize = byte_size(BinPid0) - 14,
+ <<_NodePrefix:NodeByteSize/binary, Id:32, Ser:32, Cre:32>> = BinPid,
+ {Node, Cre, Id, Ser};
+ %% PID_EXT
+ <<131, 103, BinPid/bits>> ->
+ NodeByteSize = byte_size(BinPid0) - 11,
+ <<_NodePrefix:NodeByteSize/binary, Id:32, Ser:32, Cre:8>> = BinPid,
+ {Node, Cre, Id, Ser}
+ end.
+
+compose_pid(Node, Cre, Id, Ser) ->
+ <<131,NodeEnc/binary>> = term_to_binary(Node),
+ binary_to_term(<<131,88,NodeEnc/binary,Id:32,Ser:32,Cre:32>>).
+
+version_compare(A, B, eq) -> rabbit_semver:eql(A, B);
+version_compare(A, B, lt) -> rabbit_semver:lt(A, B);
+version_compare(A, B, lte) -> rabbit_semver:lte(A, B);
+version_compare(A, B, gt) -> rabbit_semver:gt(A, B);
+version_compare(A, B, gte) -> rabbit_semver:gte(A, B).
+
+version_compare(A, B) ->
+ case version_compare(A, B, lt) of
+ true -> lt;
+ false -> case version_compare(A, B, gt) of
+ true -> gt;
+ false -> eq
+ end
+ end.
+
+%% For versions starting from 3.7.x:
+%% Versions are considered compatible (except for special cases; see
+%% below). The feature flags will determine if they are actually
+%% compatible.
+%%
+%% For versions up-to 3.7.x:
+%% a.b.c and a.b.d match, but a.b.c and a.d.e don't. If
+%% versions do not match that pattern, just compare them.
+%%
+%% Special case for 3.6.6 because it introduced a change to the schema.
+%% e.g. 3.6.6 is not compatible with 3.6.5
+%% This special case can be removed once 3.6.x reaches EOL
+version_minor_equivalent(A, B) ->
+ {{MajA, MinA, PatchA, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(A)),
+ {{MajB, MinB, PatchB, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(B)),
+
+ case {MajA, MinA, MajB, MinB} of
+ {3, 6, 3, 6} ->
+ if
+ PatchA >= 6 -> PatchB >= 6;
+ PatchA < 6 -> PatchB < 6;
+ true -> false
+ end;
+ _
+ when (MajA < 3 orelse (MajA =:= 3 andalso MinA =< 6))
+ orelse
+ (MajB < 3 orelse (MajB =:= 3 andalso MinB =< 6)) ->
+ MajA =:= MajB andalso MinA =:= MinB;
+ _ ->
+ %% Starting with RabbitMQ 3.7.x, we consider this
+ %% minor release series and all subsequent series to
+ %% be possibly compatible, based on just the version.
+ %% The real compatibility check is deferred to the
+ %% rabbit_feature_flags module in rabbitmq-server.
+ true
+ end.
+
+%% This is the same as above except that e.g. 3.7.x and 3.8.x are
+%% considered incompatible (as if there were no feature flags). This is
+%% useful to check plugin compatibility (`broker_versions_requirement`
+%% field in plugins).
+
+strict_version_minor_equivalent(A, B) ->
+ {{MajA, MinA, PatchA, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(A)),
+ {{MajB, MinB, PatchB, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(B)),
+
+ case {MajA, MinA, MajB, MinB} of
+ {3, 6, 3, 6} -> if
+ PatchA >= 6 -> PatchB >= 6;
+ PatchA < 6 -> PatchB < 6;
+ true -> false
+ end;
+ _ -> MajA =:= MajB andalso MinA =:= MinB
+ end.
+
+dict_cons(Key, Value, Dict) ->
+ dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
+
+orddict_cons(Key, Value, Dict) ->
+ orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
+
+maps_cons(Key, Value, Map) ->
+ maps:update_with(Key, fun (List) -> [Value | List] end, [Value], Map).
+
+gb_trees_cons(Key, Value, Tree) ->
+ case gb_trees:lookup(Key, Tree) of
+ {value, Values} -> gb_trees:update(Key, [Value | Values], Tree);
+ none -> gb_trees:insert(Key, [Value], Tree)
+ end.
+
+gb_trees_fold(Fun, Acc, Tree) ->
+ gb_trees_fold1(Fun, Acc, gb_trees:next(gb_trees:iterator(Tree))).
+
+gb_trees_fold1(_Fun, Acc, none) ->
+ Acc;
+gb_trees_fold1(Fun, Acc, {Key, Val, It}) ->
+ gb_trees_fold1(Fun, Fun(Key, Val, Acc), gb_trees:next(It)).
+
+gb_trees_foreach(Fun, Tree) ->
+ gb_trees_fold(fun (Key, Val, Acc) -> Fun(Key, Val), Acc end, ok, Tree).
+
+module_attributes(Module) ->
+ try
+ Module:module_info(attributes)
+ catch
+ _:undef ->
+ io:format("WARNING: module ~p not found, so not scanned for boot steps.~n",
+ [Module]),
+ []
+ end.
+
+all_module_attributes(Name) ->
+ Apps = [App || {App, _, _} <- application:loaded_applications()],
+ module_attributes_from_apps(Name, Apps).
+
+rabbitmq_related_module_attributes(Name) ->
+ Apps = rabbitmq_related_apps(),
+ module_attributes_from_apps(Name, Apps).
+
+rabbitmq_related_apps() ->
+ [App
+ || {App, _, _} <- application:loaded_applications(),
+ %% Only select RabbitMQ-related applications.
+ App =:= rabbit_common orelse
+ App =:= rabbitmq_prelaunch orelse
+ App =:= rabbit orelse
+ lists:member(
+ rabbit,
+ element(2, application:get_key(App, applications)))].
+
+module_attributes_from_apps(Name, Apps) ->
+ Targets =
+ lists:usort(
+ lists:append(
+ [[{App, Module} || Module <- Modules] ||
+ App <- Apps,
+ {ok, Modules} <- [application:get_key(App, modules)]])),
+ lists:foldl(
+ fun ({App, Module}, Acc) ->
+ case lists:append([Atts || {N, Atts} <- module_attributes(Module),
+ N =:= Name]) of
+ [] -> Acc;
+ Atts -> [{App, Module, Atts} | Acc]
+ end
+ end, [], Targets).
+
+build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
+ G = digraph:new([acyclic]),
+ try
+ _ = [case digraph:vertex(G, Vertex) of
+ false -> digraph:add_vertex(G, Vertex, Label);
+ _ -> ok = throw({graph_error, {vertex, duplicate, Vertex}})
+ end || GraphElem <- Graph,
+ {Vertex, Label} <- VertexFun(GraphElem)],
+ [case digraph:add_edge(G, From, To) of
+ {error, E} -> throw({graph_error, {edge, E, From, To}});
+ _ -> ok
+ end || GraphElem <- Graph,
+ {From, To} <- EdgeFun(GraphElem)],
+ {ok, G}
+ catch {graph_error, Reason} ->
+ true = digraph:delete(G),
+ {error, Reason}
+ end.
+
+const(X) -> fun () -> X end.
+
+%% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see
+%% when IPv6 is enabled but not used (i.e. 99% of the time).
+ntoa({0,0,0,0,0,16#ffff,AB,CD}) ->
+ inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256});
+ntoa(IP) ->
+ inet_parse:ntoa(IP).
+
+ntoab(IP) ->
+ Str = ntoa(IP),
+ case string:str(Str, ":") of
+ 0 -> Str;
+ _ -> "[" ++ Str ++ "]"
+ end.
+
+%% We try to avoid reconnecting to down nodes here; this is used in a
+%% loop in rabbit_amqqueue:on_node_down/1 and any delays we incur
+%% would be bad news.
+%%
+%% See also rabbit_mnesia:is_process_alive/1 which also requires the
+%% process be in the same running cluster as us (i.e. not partitioned
+%% or some random node).
+is_process_alive(Pid) when node(Pid) =:= node() ->
+ erlang:is_process_alive(Pid);
+is_process_alive(Pid) ->
+ Node = node(Pid),
+ lists:member(Node, [node() | nodes(connected)]) andalso
+ rpc:call(Node, erlang, is_process_alive, [Pid]) =:= true.
+
+-spec pget(term(), list() | map()) -> term().
+pget(K, M) when is_map(M) ->
+ case maps:find(K, M) of
+ {ok, V} ->
+ V;
+ _ ->
+ undefined
+ end;
+
+pget(K, P) ->
+ case lists:keyfind(K, 1, P) of
+ {K, V} ->
+ V;
+ _ ->
+ undefined
+ end.
+
+-spec pget(term(), list() | map(), term()) -> term().
+pget(K, M, D) when is_map(M) ->
+ case maps:find(K, M) of
+ {ok, V} ->
+ V;
+ _ ->
+ D
+ end;
+
+pget(K, P, D) ->
+ case lists:keyfind(K, 1, P) of
+ {K, V} ->
+ V;
+ _ ->
+ D
+ end.
+
+-spec pget_or_die(term(), list() | map()) -> term() | no_return().
+pget_or_die(K, M) when is_map(M) ->
+ case maps:find(K, M) of
+ error -> exit({error, key_missing, K});
+ {ok, V} -> V
+ end;
+
+pget_or_die(K, P) ->
+ case proplists:get_value(K, P) of
+ undefined -> exit({error, key_missing, K});
+ V -> V
+ end.
+
+pupdate(K, UpdateFun, P) ->
+ case lists:keyfind(K, 1, P) of
+ {K, V} ->
+ pset(K, UpdateFun(V), P);
+ _ ->
+ undefined
+ end.
+
+%% property merge
+pmerge(Key, Val, List) ->
+ case proplists:is_defined(Key, List) of
+ true -> List;
+ _ -> [{Key, Val} | List]
+ end.
+
+%% proplists merge
+plmerge(P1, P2) ->
+ %% Value from P1 suppresses value from P2
+ maps:to_list(maps:merge(maps:from_list(P2),
+ maps:from_list(P1))).
+
+%% groups a list of proplists by a key function
+group_proplists_by(KeyFun, ListOfPropLists) ->
+ Res = lists:foldl(fun(P, Agg) ->
+ Key = KeyFun(P),
+ Val = case maps:find(Key, Agg) of
+ {ok, O} -> [P|O];
+ error -> [P]
+ end,
+ maps:put(Key, Val, Agg)
+ end, #{}, ListOfPropLists),
+ [ X || {_, X} <- maps:to_list(Res)].
+
+pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
+
+format_message_queue(_Opt, MQ) ->
+ Len = priority_queue:len(MQ),
+ {Len,
+ case Len > 100 of
+ false -> priority_queue:to_list(MQ);
+ true -> {summary,
+ maps:to_list(
+ lists:foldl(
+ fun ({P, V}, Counts) ->
+ maps:update_with(
+ {P, format_message_queue_entry(V)},
+ fun(Old) -> Old + 1 end, 1, Counts)
+ end, maps:new(), priority_queue:to_list(MQ)))}
+ end}.
+
+format_message_queue_entry(V) when is_atom(V) ->
+ V;
+format_message_queue_entry(V) when is_tuple(V) ->
+ list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]);
+format_message_queue_entry(_V) ->
+ '_'.
+
+%% Same as rpc:multicall/4 but concatenates all results.
+%% M, F, A is expected to return a list. If it does not,
+%% its return value will be wrapped in a list.
+-spec append_rpc_all_nodes([node()], atom(), atom(), [any()]) -> [any()].
+append_rpc_all_nodes(Nodes, M, F, A) ->
+ do_append_rpc_all_nodes(Nodes, M, F, A, ?RPC_INFINITE_TIMEOUT).
+
+-spec append_rpc_all_nodes([node()], atom(), atom(), [any()], timeout()) -> [any()].
+append_rpc_all_nodes(Nodes, M, F, A, Timeout) ->
+ do_append_rpc_all_nodes(Nodes, M, F, A, Timeout).
+
+do_append_rpc_all_nodes(Nodes, M, F, A, ?RPC_INFINITE_TIMEOUT) ->
+ {ResL, _} = rpc:multicall(Nodes, M, F, A, ?RPC_INFINITE_TIMEOUT),
+ process_rpc_multicall_result(ResL);
+do_append_rpc_all_nodes(Nodes, M, F, A, Timeout) ->
+ {ResL, _} = try
+ rpc:multicall(Nodes, M, F, A, Timeout)
+ catch
+ error:internal_error -> {[], Nodes}
+ end,
+ process_rpc_multicall_result(ResL).
+
+process_rpc_multicall_result(ResL) ->
+ lists:append([case Res of
+ {badrpc, _} -> [];
+ Xs when is_list(Xs) -> Xs;
+ %% wrap it in a list
+ Other -> [Other]
+ end || Res <- ResL]).
+
+os_cmd(Command) ->
+ case os:type() of
+ {win32, _} ->
+ %% Clink workaround; see
+ %% https://code.google.com/p/clink/issues/detail?id=141
+ os:cmd(" " ++ Command);
+ _ ->
+ %% Don't just return "/bin/sh: <cmd>: not found" if not found
+ Exec = hd(string:tokens(Command, " ")),
+ case os:find_executable(Exec) of
+ false -> throw({command_not_found, Exec});
+ _ -> os:cmd(Command)
+ end
+ end.
+
+is_os_process_alive(Pid) ->
+ with_os([{unix, fun () ->
+ run_ps(Pid) =:= 0
+ end},
+ {win32, fun () ->
+ PidS = rabbit_data_coercion:to_list(Pid),
+ case os:find_executable("tasklist.exe") of
+ false ->
+ Cmd =
+ format(
+ "PowerShell -Command "
+ "\"(Get-Process -Id ~s).ProcessName\"",
+ [PidS]),
+ Res =
+ os_cmd(Cmd ++ " 2>&1") -- [$\r, $\n],
+ case Res of
+ "erl" -> true;
+ "werl" -> true;
+ _ -> false
+ end;
+ _ ->
+ Cmd =
+ "tasklist /nh /fi "
+ "\"pid eq " ++ PidS ++ "\"",
+ Res = os_cmd(Cmd ++ " 2>&1"),
+ match =:= re:run(Res,
+ "erl\\.exe",
+ [{capture, none}])
+ end
+ end}]).
+
+with_os(Handlers) ->
+ {OsFamily, _} = os:type(),
+ case proplists:get_value(OsFamily, Handlers) of
+ undefined -> throw({unsupported_os, OsFamily});
+ Handler -> Handler()
+ end.
+
+run_ps(Pid) ->
+ Cmd = "ps -p " ++ rabbit_data_coercion:to_list(Pid),
+ Port = erlang:open_port({spawn, Cmd},
+ [exit_status, {line, 16384},
+ use_stdio, stderr_to_stdout]),
+ exit_loop(Port).
+
+exit_loop(Port) ->
+ receive
+ {Port, {exit_status, Rc}} -> Rc;
+ {Port, _} -> exit_loop(Port)
+ end.
+
+gb_sets_difference(S1, S2) ->
+ gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
+
+version() ->
+ {ok, VSN} = application:get_key(rabbit, vsn),
+ VSN.
+
+%% See https://www.erlang.org/doc/system_principles/versions.html
+otp_release() ->
+ File = filename:join([code:root_dir(), "releases",
+ erlang:system_info(otp_release), "OTP_VERSION"]),
+ case file:read_file(File) of
+ {ok, VerBin} ->
+ %% 17.0 or later, we need the file for the minor version
+ string:strip(binary_to_list(VerBin), both, $\n);
+ {error, _} ->
+ %% R16B03 or earlier (no file, otp_release is correct)
+ %% or we couldn't read the file (so this is best we can do)
+ erlang:system_info(otp_release)
+ end.
+
+platform_and_version() ->
+ string:join(["Erlang/OTP", otp_release()], " ").
+
+otp_system_version() ->
+ string:strip(erlang:system_info(system_version), both, $\n).
+
+rabbitmq_and_erlang_versions() ->
+ {version(), otp_release()}.
+
+%% application:which_applications(infinity) is dangerous, since it can
+%% cause deadlocks on shutdown. So we have to use a timeout variant,
+%% but w/o creating spurious timeout errors. The timeout value is twice
+%% that of gen_server:call/2.
+which_applications() ->
+ try
+ application:which_applications(10000)
+ catch
+ exit:{timeout, _} -> []
+ end.
+
+sequence_error([T]) -> T;
+sequence_error([{error, _} = Error | _]) -> Error;
+sequence_error([_ | Rest]) -> sequence_error(Rest).
+
+check_expiry(N) when N < 0 -> {error, {value_negative, N}};
+check_expiry(_N) -> ok.
+
+base64url(In) ->
+ lists:reverse(lists:foldl(fun ($\+, Acc) -> [$\- | Acc];
+ ($\/, Acc) -> [$\_ | Acc];
+ ($\=, Acc) -> Acc;
+ (Chr, Acc) -> [Chr | Acc]
+ end, [], base64:encode_to_string(In))).
+
+%% Ideally, you'd want Fun to run every IdealInterval. but you don't
+%% want it to take more than MaxRatio of IdealInterval. So if it takes
+%% more then you want to run it less often. So we time how long it
+%% takes to run, and then suggest how long you should wait before
+%% running it again with a user specified max interval. Times are in millis.
+interval_operation({M, F, A}, MaxRatio, MaxInterval, IdealInterval, LastInterval) ->
+ {Micros, Res} = timer:tc(M, F, A),
+ {Res, case {Micros > 1000 * (MaxRatio * IdealInterval),
+ Micros > 1000 * (MaxRatio * LastInterval)} of
+ {true, true} -> lists:min([MaxInterval,
+ round(LastInterval * 1.5)]);
+ {true, false} -> LastInterval;
+ {false, false} -> lists:max([IdealInterval,
+ round(LastInterval / 1.5)])
+ end}.
+
+ensure_timer(State, Idx, After, Msg) ->
+ case element(Idx, State) of
+ undefined -> TRef = send_after(After, self(), Msg),
+ setelement(Idx, State, TRef);
+ _ -> State
+ end.
+
+stop_timer(State, Idx) ->
+ case element(Idx, State) of
+ undefined -> State;
+ TRef -> cancel_timer(TRef),
+ setelement(Idx, State, undefined)
+ end.
+
+%% timer:send_after/3 goes through a single timer process but allows
+%% long delays. erlang:send_after/3 does not have a bottleneck but
+%% only allows max 2^32-1 millis.
+-define(MAX_ERLANG_SEND_AFTER, 4294967295).
+send_after(Millis, Pid, Msg) when Millis > ?MAX_ERLANG_SEND_AFTER ->
+ {ok, Ref} = timer:send_after(Millis, Pid, Msg),
+ {timer, Ref};
+send_after(Millis, Pid, Msg) ->
+ {erlang, erlang:send_after(Millis, Pid, Msg)}.
+
+cancel_timer({erlang, Ref}) -> _ = erlang:cancel_timer(Ref),
+ ok;
+cancel_timer({timer, Ref}) -> {ok, cancel} = timer:cancel(Ref),
+ ok.
+
+store_proc_name(Type, ProcName) -> store_proc_name({Type, ProcName}).
+store_proc_name(TypeProcName) -> put(process_name, TypeProcName).
+
+get_proc_name() ->
+ case get(process_name) of
+ undefined ->
+ undefined;
+ {_Type, Name} ->
+ {ok, Name}
+ end.
+
+%% application:get_env/3 is only available in R16B01 or later.
+get_env(Application, Key, Def) ->
+ case application:get_env(Application, Key) of
+ {ok, Val} -> Val;
+ undefined -> Def
+ end.
+
+get_channel_operation_timeout() ->
+ %% Default channel_operation_timeout set to net_ticktime + 10s to
+ %% give allowance for any down messages to be received first,
+ %% whenever it is used for cross-node calls with timeouts.
+ Default = (net_kernel:get_net_ticktime() + 10) * 1000,
+ application:get_env(rabbit, channel_operation_timeout, Default).
+
+moving_average(_Time, _HalfLife, Next, undefined) ->
+ Next;
+%% We want the Weight to decrease as Time goes up (since Weight is the
+%% weight for the current sample, not the new one), so that the moving
+%% average decays at the same speed regardless of how long the time is
+%% between samplings. So we want Weight = math:exp(Something), where
+%% Something turns out to be negative.
+%%
+%% We want to determine Something here in terms of the Time taken
+%% since the last measurement, and a HalfLife. So we want Weight =
+%% math:exp(Time * Constant / HalfLife). What should Constant be? We
+%% want Weight to be 0.5 when Time = HalfLife.
+%%
+%% Plug those numbers in and you get 0.5 = math:exp(Constant). Take
+%% the log of each side and you get math:log(0.5) = Constant.
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+random(N) ->
+ rand:uniform(N).
+
+-spec escape_html_tags(string()) -> binary().
+
+escape_html_tags(S) ->
+ escape_html_tags(rabbit_data_coercion:to_list(S), []).
+
+
+-spec escape_html_tags(string(), string()) -> binary().
+
+escape_html_tags([], Acc) ->
+ rabbit_data_coercion:to_binary(lists:reverse(Acc));
+escape_html_tags("<" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&lt;", Acc));
+escape_html_tags(">" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&gt;", Acc));
+escape_html_tags("&" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&amp;", Acc));
+escape_html_tags([C | Rest], Acc) ->
+ escape_html_tags(Rest, [C | Acc]).
+
+%% If the server we are talking to has non-standard net_ticktime, and
+%% our connection lasts a while, we could get disconnected because of
+%% a timeout unless we set our ticktime to be the same. So let's do
+%% that.
+%% TODO: do not use an infinite timeout!
+-spec rpc_call(node(), atom(), atom(), [any()]) -> any() | {badrpc, term()}.
+rpc_call(Node, Mod, Fun, Args) ->
+ rpc_call(Node, Mod, Fun, Args, ?RPC_INFINITE_TIMEOUT).
+
+-spec rpc_call(node(), atom(), atom(), [any()], infinity | non_neg_integer()) -> any() | {badrpc, term()}.
+rpc_call(Node, Mod, Fun, Args, Timeout) ->
+ case rpc:call(Node, net_kernel, get_net_ticktime, [], Timeout) of
+ {badrpc, _} = E -> E;
+ ignored ->
+ rpc:call(Node, Mod, Fun, Args, Timeout);
+ {ongoing_change_to, NewValue} ->
+ _ = net_kernel:set_net_ticktime(NewValue, 0),
+ rpc:call(Node, Mod, Fun, Args, Timeout);
+ Time ->
+ _ = net_kernel:set_net_ticktime(Time, 0),
+ rpc:call(Node, Mod, Fun, Args, Timeout)
+ end.
+
+get_gc_info(Pid) ->
+ rabbit_runtime:get_gc_info(Pid).
+
+%% -------------------------------------------------------------------------
+%% Begin copypasta from gen_server2.erl
+
+get_parent() ->
+ case get('$ancestors') of
+ [Parent | _] when is_pid (Parent) -> Parent;
+ [Parent | _] when is_atom(Parent) -> name_to_pid(Parent);
+ _ -> exit(process_was_not_started_by_proc_lib)
+ end.
+
+name_to_pid(Name) ->
+ case whereis(Name) of
+ undefined -> case whereis_name(Name) of
+ undefined -> exit(could_not_find_registered_name);
+ Pid -> Pid
+ end;
+ Pid -> Pid
+ end.
+
+whereis_name(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] ->
+ if node(Pid) == node() -> case erlang:is_process_alive(Pid) of
+ true -> Pid;
+ false -> undefined
+ end;
+ true -> Pid
+ end;
+ [] -> undefined
+ end.
+
+%% End copypasta from gen_server2.erl
+%% -------------------------------------------------------------------------
diff --git a/deps/rabbit_common/src/rabbit_msg_store_index.erl b/deps/rabbit_common/src/rabbit_msg_store_index.erl
new file mode 100644
index 0000000000..ce9abe97a6
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_msg_store_index.erl
@@ -0,0 +1,89 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store_index).
+
+-include("rabbit_msg_store.hrl").
+
+%% Behaviour module to provide pluggable message store index.
+%% The index is used to locate message on disk and for reference-counting.
+
+%% Message store have several additional assumptions about performance and
+%% atomicity of some operations. See comments for each callback.
+
+-type(dir() :: string()).
+-type(index_state() :: any()).
+-type(fieldpos() :: non_neg_integer()).
+-type(fieldvalue() :: any()).
+-type(msg_location() :: #msg_location{}).
+
+
+%% There are two ways of starting an index:
+%% - `new` - starts a clean index
+%% - `recover` - attempts to read a saved index
+%% In both cases the old saved state should be deleted from directory.
+
+%% Initialize a fresh index state for msg store directory.
+-callback new(dir()) -> index_state().
+%% Try to recover gracefully stopped index state.
+-callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()).
+%% Gracefully shutdown the index.
+%% Should save the index state, which will be loaded by the 'recover' function.
+-callback terminate(index_state()) -> any().
+
+%% Lookup an entry in the index.
+%% Is called concurrently by msg_store, it's clients and GC processes.
+%% This function is called multiple times for each message store operation.
+%% Message store tries to avoid writing messages on disk if consumers can
+%% process them fast, so there will be a lot of lookups for non-existent
+%% entries, which should be as fast as possible.
+-callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | msg_location()).
+
+%% Insert an entry into the index.
+%% Is called by a msg_store process only.
+%% This function can exit if there is already an entry with the same ID
+-callback insert(msg_location(), index_state()) -> 'ok'.
+
+%% Update an entry in the index.
+%% Is called by a msg_store process only.
+%% The function is called during message store recovery after crash.
+%% The difference between update and insert functions, is that update
+%% should not fail if entry already exist, and should be atomic.
+-callback update(msg_location(), index_state()) -> 'ok'.
+
+%% Update positional fields in the entry tuple.
+%% Is called by msg_store and GC processes concurrently.
+%% This function can exit if there is no entry with specified ID
+%% This function is called to update reference-counters and file locations.
+%% File locations are updated from a GC process, reference-counters are
+%% updated from a message store process.
+%% This function should be atomic.
+-callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} |
+ [{fieldpos(), fieldvalue()}]),
+ index_state()) -> 'ok'.
+
+%% Delete an entry from the index by ID.
+%% Is called from a msg_store process only.
+%% This function should be atomic.
+-callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'.
+
+%% Delete an exactly matching entry from the index.
+%% Is called by GC process only.
+%% This function should match exact object to avoid deleting a zero-reference
+%% object, which reference-counter is being concurrently updated.
+%% This function should be atomic.
+-callback delete_object(msg_location(), index_state()) -> 'ok'.
+
+%% Delete temporary reference count entries with the 'file' record field equal to 'undefined'.
+%% Is called during index rebuild from scratch (e.g. after non-clean stop)
+%% During recovery after non-clean stop or file corruption, reference-counters
+%% are added to the index with `undefined` value for the `file` field.
+%% If message is found in a message store file, it's file field is updated.
+%% If some reference-counters miss the message location after recovery - they
+%% should be deleted.
+-callback clean_up_temporary_reference_count_entries_without_file(index_state()) -> 'ok'.
+
diff --git a/deps/rabbit_common/src/rabbit_net.erl b/deps/rabbit_common/src/rabbit_net.erl
new file mode 100644
index 0000000000..7685687ff0
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_net.erl
@@ -0,0 +1,321 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_net).
+-include("rabbit.hrl").
+
+-include_lib("kernel/include/inet.hrl").
+
+-export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2,
+ recv/1, sync_recv/2, async_recv/3, port_command/2, getopts/2,
+ setopts/2, send/2, close/1, fast_close/1, sockname/1, peername/1,
+ peercert/1, connection_string/2, socket_ends/2, is_loopback/1,
+ tcp_host/1, unwrap_socket/1, maybe_get_proxy_socket/1,
+ hostname/0, getifaddrs/0]).
+
+%%---------------------------------------------------------------------------
+
+-export_type([socket/0, ip_port/0, hostname/0]).
+
+-type stat_option() ::
+ 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
+ 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'.
+-type ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any()).
+-type ok_or_any_error() :: rabbit_types:ok_or_error(any()).
+-type socket() :: port() | ssl:sslsocket().
+-type opts() :: [{atom(), any()} |
+ {raw, non_neg_integer(), non_neg_integer(), binary()}].
+-type hostname() :: inet:hostname().
+-type ip_port() :: inet:port_number().
+% -type host_or_ip() :: binary() | inet:ip_address().
+-spec is_ssl(socket()) -> boolean().
+-spec ssl_info(socket()) -> 'nossl' | ok_val_or_error([{atom(), any()}]).
+-spec controlling_process(socket(), pid()) -> ok_or_any_error().
+-spec getstat(socket(), [stat_option()]) ->
+ ok_val_or_error([{stat_option(), integer()}]).
+-spec recv(socket()) ->
+ {'data', [char()] | binary()} |
+ 'closed' |
+ rabbit_types:error(any()) |
+ {'other', any()}.
+-spec sync_recv(socket(), integer()) ->
+ rabbit_types:ok(binary()) |
+ rabbit_types:error(any()).
+-spec async_recv(socket(), integer(), timeout()) ->
+ rabbit_types:ok(any()).
+-spec port_command(socket(), iolist()) -> 'true'.
+-spec getopts
+ (socket(),
+ [atom() |
+ {raw, non_neg_integer(), non_neg_integer(),
+ non_neg_integer() | binary()}]) ->
+ ok_val_or_error(opts()).
+-spec setopts(socket(), opts()) -> ok_or_any_error().
+-spec send(socket(), binary() | iolist()) -> ok_or_any_error().
+-spec close(socket()) -> ok_or_any_error().
+-spec fast_close(socket()) -> ok_or_any_error().
+-spec sockname(socket()) ->
+ ok_val_or_error({inet:ip_address(), ip_port()}).
+-spec peername(socket()) ->
+ ok_val_or_error({inet:ip_address(), ip_port()}).
+-spec peercert(socket()) ->
+ 'nossl' | ok_val_or_error(rabbit_ssl:certificate()).
+-spec connection_string(socket(), 'inbound' | 'outbound') ->
+ ok_val_or_error(string()).
+% -spec socket_ends(socket() | ranch_proxy:proxy_socket() | ranch_proxy_ssl:ssl_socket(),
+% 'inbound' | 'outbound') ->
+% ok_val_or_error({host_or_ip(), ip_port(),
+% host_or_ip(), ip_port()}).
+-spec is_loopback(socket() | inet:ip_address()) -> boolean().
+% -spec unwrap_socket(socket() | ranch_proxy:proxy_socket() | ranch_proxy_ssl:ssl_socket()) -> socket().
+
+-dialyzer({nowarn_function, [socket_ends/2, unwrap_socket/1]}).
+
+%%---------------------------------------------------------------------------
+
+-define(SSL_CLOSE_TIMEOUT, 5000).
+
+-define(IS_SSL(Sock), is_tuple(Sock)
+ andalso (tuple_size(Sock) =:= 3)
+ andalso (element(1, Sock) =:= sslsocket)).
+
+is_ssl(Sock) -> ?IS_SSL(Sock).
+
+%% Seems hackish. Is hackish. But the structure is stable and
+%% kept this way for backward compatibility reasons. We need
+%% it for two reasons: there are no ssl:getstat(Sock) function,
+%% and no ssl:close(Timeout) function. Both of them are being
+%% worked on as we speak.
+ssl_get_socket(Sock) ->
+ element(2, element(2, Sock)).
+
+ssl_info(Sock) when ?IS_SSL(Sock) ->
+ ssl:connection_information(Sock);
+ssl_info(_Sock) ->
+ nossl.
+
+controlling_process(Sock, Pid) when ?IS_SSL(Sock) ->
+ ssl:controlling_process(Sock, Pid);
+controlling_process(Sock, Pid) when is_port(Sock) ->
+ gen_tcp:controlling_process(Sock, Pid).
+
+getstat(Sock, Stats) when ?IS_SSL(Sock) ->
+ inet:getstat(ssl_get_socket(Sock), Stats);
+getstat(Sock, Stats) when is_port(Sock) ->
+ inet:getstat(Sock, Stats);
+%% Used by Proxy protocol support in plugins
+getstat({rabbit_proxy_socket, Sock, _}, Stats) when ?IS_SSL(Sock) ->
+ inet:getstat(ssl_get_socket(Sock), Stats);
+getstat({rabbit_proxy_socket, Sock, _}, Stats) when is_port(Sock) ->
+ inet:getstat(Sock, Stats).
+
+recv(Sock) when ?IS_SSL(Sock) ->
+ recv(Sock, {ssl, ssl_closed, ssl_error});
+recv(Sock) when is_port(Sock) ->
+ recv(Sock, {tcp, tcp_closed, tcp_error}).
+
+recv(S, {DataTag, ClosedTag, ErrorTag}) ->
+ receive
+ {DataTag, S, Data} -> {data, Data};
+ {ClosedTag, S} -> closed;
+ {ErrorTag, S, Reason} -> {error, Reason};
+ Other -> {other, Other}
+ end.
+
+sync_recv(Sock, Length) when ?IS_SSL(Sock) ->
+ ssl:recv(Sock, Length);
+sync_recv(Sock, Length) ->
+ gen_tcp:recv(Sock, Length).
+
+async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) ->
+ Pid = self(),
+ Ref = make_ref(),
+
+ spawn(fun () -> Pid ! {inet_async, Sock, Ref,
+ ssl:recv(Sock, Length, Timeout)}
+ end),
+
+ {ok, Ref};
+async_recv(Sock, Length, infinity) when is_port(Sock) ->
+ prim_inet:async_recv(Sock, Length, -1);
+async_recv(Sock, Length, Timeout) when is_port(Sock) ->
+ prim_inet:async_recv(Sock, Length, Timeout).
+
+port_command(Sock, Data) when ?IS_SSL(Sock) ->
+ case ssl:send(Sock, Data) of
+ ok -> self() ! {inet_reply, Sock, ok},
+ true;
+ {error, Reason} -> erlang:error(Reason)
+ end;
+port_command(Sock, Data) when is_port(Sock) ->
+ erlang:port_command(Sock, Data).
+
+getopts(Sock, Options) when ?IS_SSL(Sock) ->
+ ssl:getopts(Sock, Options);
+getopts(Sock, Options) when is_port(Sock) ->
+ inet:getopts(Sock, Options).
+
+setopts(Sock, Options) when ?IS_SSL(Sock) ->
+ ssl:setopts(Sock, Options);
+setopts(Sock, Options) when is_port(Sock) ->
+ inet:setopts(Sock, Options).
+
+send(Sock, Data) when ?IS_SSL(Sock) -> ssl:send(Sock, Data);
+send(Sock, Data) when is_port(Sock) -> gen_tcp:send(Sock, Data).
+
+close(Sock) when ?IS_SSL(Sock) -> ssl:close(Sock);
+close(Sock) when is_port(Sock) -> gen_tcp:close(Sock).
+
+fast_close(Sock) when ?IS_SSL(Sock) ->
+ %% We cannot simply port_close the underlying tcp socket since the
+ %% TLS protocol is quite insistent that a proper closing handshake
+ %% should take place (see RFC 5245 s7.2.1). So we call ssl:close
+ %% instead, but that can block for a very long time, e.g. when
+ %% there is lots of pending output and there is tcp backpressure,
+ %% or the ssl_connection process has entered the the
+ %% workaround_transport_delivery_problems function during
+ %% termination, which, inexplicably, does a gen_tcp:recv(Socket,
+ %% 0), which may never return if the client doesn't send a FIN or
+ %% that gets swallowed by the network. Since there is no timeout
+ %% variant of ssl:close, we construct our own.
+ {Pid, MRef} = spawn_monitor(fun () -> ssl:close(Sock) end),
+ erlang:send_after(?SSL_CLOSE_TIMEOUT, self(), {Pid, ssl_close_timeout}),
+ receive
+ {Pid, ssl_close_timeout} ->
+ erlang:demonitor(MRef, [flush]),
+ exit(Pid, kill);
+ {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end,
+ catch port_close(ssl_get_socket(Sock)),
+ ok;
+fast_close(Sock) when is_port(Sock) ->
+ catch port_close(Sock), ok.
+
+sockname(Sock) when ?IS_SSL(Sock) -> ssl:sockname(Sock);
+sockname(Sock) when is_port(Sock) -> inet:sockname(Sock).
+
+peername(Sock) when ?IS_SSL(Sock) -> ssl:peername(Sock);
+peername(Sock) when is_port(Sock) -> inet:peername(Sock).
+
+peercert(Sock) when ?IS_SSL(Sock) -> ssl:peercert(Sock);
+peercert(Sock) when is_port(Sock) -> nossl.
+
+connection_string(Sock, Direction) ->
+ case socket_ends(Sock, Direction) of
+ {ok, {FromAddress, FromPort, ToAddress, ToPort}} ->
+ {ok, rabbit_misc:format(
+ "~s:~p -> ~s:~p",
+ [maybe_ntoab(FromAddress), FromPort,
+ maybe_ntoab(ToAddress), ToPort])};
+ Error ->
+ Error
+ end.
+
+socket_ends(Sock, Direction) when ?IS_SSL(Sock);
+ is_port(Sock) ->
+ {From, To} = sock_funs(Direction),
+ case {From(Sock), To(Sock)} of
+ {{ok, {FromAddress, FromPort}}, {ok, {ToAddress, ToPort}}} ->
+ {ok, {rdns(FromAddress), FromPort,
+ rdns(ToAddress), ToPort}};
+ {{error, _Reason} = Error, _} ->
+ Error;
+ {_, {error, _Reason} = Error} ->
+ Error
+ end;
+socket_ends({rabbit_proxy_socket, CSocket, ProxyInfo}, Direction = inbound) ->
+ #{
+ src_address := FromAddress,
+ src_port := FromPort
+ } = ProxyInfo,
+ {_From, To} = sock_funs(Direction),
+ case To(CSocket) of
+ {ok, {ToAddress, ToPort}} ->
+ {ok, {rdns(FromAddress), FromPort,
+ rdns(ToAddress), ToPort}};
+ {error, _Reason} = Error ->
+ Error
+ end.
+
+maybe_ntoab(Addr) when is_tuple(Addr) -> rabbit_misc:ntoab(Addr);
+maybe_ntoab(Host) -> Host.
+
+tcp_host({0,0,0,0}) ->
+ hostname();
+
+tcp_host({0,0,0,0,0,0,0,0}) ->
+ hostname();
+
+tcp_host(IPAddress) ->
+ case inet:gethostbyaddr(IPAddress) of
+ {ok, #hostent{h_name = Name}} -> Name;
+ {error, _Reason} -> rabbit_misc:ntoa(IPAddress)
+ end.
+
+hostname() ->
+ {ok, Hostname} = inet:gethostname(),
+ case inet:gethostbyname(Hostname) of
+ {ok, #hostent{h_name = Name}} -> Name;
+ {error, _Reason} -> Hostname
+ end.
+
+format_nic_attribute({Key, undefined}) ->
+ {Key, undefined};
+format_nic_attribute({Key = flags, List}) when is_list(List) ->
+ Val = string:join(lists:map(fun rabbit_data_coercion:to_list/1, List), ", "),
+ {Key, rabbit_data_coercion:to_binary(Val)};
+format_nic_attribute({Key, Tuple}) when is_tuple(Tuple) and (Key =:= addr orelse
+ Key =:= broadaddr orelse
+ Key =:= netmask orelse
+ Key =:= dstaddr) ->
+ Val = inet_parse:ntoa(Tuple),
+ {Key, rabbit_data_coercion:to_binary(Val)};
+format_nic_attribute({Key = hwaddr, List}) when is_list(List) ->
+ %% [140, 133, 144, 28, 241, 121] => 8C:85:90:1C:F1:79
+ Val = string:join(lists:map(fun(N) -> integer_to_list(N, 16) end, List), ":"),
+ {Key, rabbit_data_coercion:to_binary(Val)}.
+
+getifaddrs() ->
+ {ok, AddrList} = inet:getifaddrs(),
+ Addrs0 = maps:from_list(AddrList),
+ maps:map(fun (_Key, Proplist) ->
+ lists:map(fun format_nic_attribute/1, Proplist)
+ end, Addrs0).
+
+rdns(Addr) ->
+ case application:get_env(rabbit, reverse_dns_lookups) of
+ {ok, true} -> list_to_binary(tcp_host(Addr));
+ _ -> Addr
+ end.
+
+sock_funs(inbound) -> {fun peername/1, fun sockname/1};
+sock_funs(outbound) -> {fun sockname/1, fun peername/1}.
+
+is_loopback(Sock) when is_port(Sock) ; ?IS_SSL(Sock) ->
+ case sockname(Sock) of
+ {ok, {Addr, _Port}} -> is_loopback(Addr);
+ {error, _} -> false
+ end;
+%% We could parse the results of inet:getifaddrs() instead. But that
+%% would be more complex and less maybe Windows-compatible...
+is_loopback({127,_,_,_}) -> true;
+is_loopback({0,0,0,0,0,0,0,1}) -> true;
+is_loopback({0,0,0,0,0,65535,AB,CD}) -> is_loopback(ipv4(AB, CD));
+is_loopback(_) -> false.
+
+ipv4(AB, CD) -> {AB bsr 8, AB band 255, CD bsr 8, CD band 255}.
+
+unwrap_socket({rabbit_proxy_socket, Sock, _}) ->
+ Sock;
+unwrap_socket(Sock) ->
+ Sock.
+
+maybe_get_proxy_socket(Sock={rabbit_proxy_socket, _, _}) ->
+ Sock;
+maybe_get_proxy_socket(_Sock) ->
+ undefined.
diff --git a/deps/rabbit_common/src/rabbit_nodes_common.erl b/deps/rabbit_common/src/rabbit_nodes_common.erl
new file mode 100644
index 0000000000..7e87ce2ea4
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_nodes_common.erl
@@ -0,0 +1,227 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_nodes_common).
+
+-define(EPMD_TIMEOUT, 30000).
+-define(TCP_DIAGNOSTIC_TIMEOUT, 5000).
+-define(ERROR_LOGGER_HANDLER, rabbit_error_logger_handler).
+
+-include_lib("kernel/include/inet.hrl").
+
+%%
+%% API
+%%
+
+-export([make/1, parts/1, names/1, name_type/1, ensure_epmd/0, is_running/2, is_process_running/2]).
+-export([cookie_hash/0, epmd_port/0, diagnostics/1]).
+
+-spec make({string(), string()} | string()) -> node().
+-spec parts(node() | string()) -> {string(), string()}.
+-spec ensure_epmd() -> 'ok'.
+-spec epmd_port() -> string().
+
+-spec names(string()) ->
+ rabbit_types:ok_or_error2([{string(), integer()}], term()).
+-spec diagnostics([node()]) -> string().
+-spec cookie_hash() -> string().
+
+%% net_adm:name/1 returns a new value, 'noport', in Erlang 24. This value being
+%% absent in the function spec in previous versions of Erlang, we get a warning
+%% from Dialyzer until we start to the yet-to-be-release Erlang 24 in CI.
+%% Therefore we disable this specific warning.
+-dialyzer({nowarn_function, diagnostics_node/1}).
+
+names(Hostname) ->
+ Self = self(),
+ Ref = make_ref(),
+ {Pid, MRef} = spawn_monitor(
+ fun () -> Self ! {Ref, net_adm:names(Hostname)} end),
+ _ = timer:exit_after(?EPMD_TIMEOUT, Pid, timeout),
+ receive
+ {Ref, Names} -> erlang:demonitor(MRef, [flush]),
+ Names;
+ {'DOWN', MRef, process, Pid, Reason} -> {error, Reason}
+ end.
+
+make({Prefix, Suffix}) -> rabbit_data_coercion:to_atom(
+ lists:append([rabbit_data_coercion:to_list(Prefix),
+ "@",
+ rabbit_data_coercion:to_list(Suffix)]));
+make(NodeStr) -> make(parts(NodeStr)).
+
+parts(Node) when is_atom(Node) ->
+ parts(atom_to_list(Node));
+parts(NodeStr) ->
+ case lists:splitwith(fun (E) -> E =/= $@ end, NodeStr) of
+ {Prefix, []} -> {_, Suffix} = parts(node()),
+ {Prefix, Suffix};
+ {Prefix, Suffix} -> {Prefix, tl(Suffix)}
+ end.
+
+name_type(Node) ->
+ {_, HostPart} = parts(Node),
+ case lists:member($., HostPart) of
+ false -> shortnames;
+ true -> longnames
+ end.
+
+epmd_port() ->
+ case init:get_argument(epmd_port) of
+ {ok, [[Port | _] | _]} when is_list(Port) -> Port;
+ error -> "4369"
+ end.
+
+ensure_epmd() ->
+ Exe = rabbit_runtime:get_erl_path(),
+ ID = rabbit_misc:random(1000000000),
+ Port = open_port(
+ {spawn_executable, Exe},
+ [{args, ["-boot", "no_dot_erlang",
+ "-sname", rabbit_misc:format("epmd-starter-~b", [ID]),
+ "-noinput", "-s", "erlang", "halt"]},
+ exit_status, stderr_to_stdout, use_stdio]),
+ port_shutdown_loop(Port).
+
+port_shutdown_loop(Port) ->
+ receive
+ {Port, {exit_status, _Rc}} -> ok;
+ {Port, _} -> port_shutdown_loop(Port)
+ end.
+
+cookie_hash() ->
+ base64:encode_to_string(erlang:md5(atom_to_list(erlang:get_cookie()))).
+
+diagnostics(Nodes) ->
+ verbose_erlang_distribution(true),
+ NodeDiags = [{"~nDIAGNOSTICS~n===========~n~n"
+ "attempted to contact: ~p~n", [Nodes]}] ++
+ [diagnostics_node(Node) || Node <- Nodes] ++
+ current_node_details(),
+ verbose_erlang_distribution(false),
+ rabbit_misc:format_many(lists:flatten(NodeDiags)).
+
+verbose_erlang_distribution(true) ->
+ net_kernel:verbose(1),
+ error_logger:add_report_handler(?ERROR_LOGGER_HANDLER);
+verbose_erlang_distribution(false) ->
+ net_kernel:verbose(0),
+ error_logger:delete_report_handler(?ERROR_LOGGER_HANDLER).
+
+current_node_details() ->
+ [{"~nCurrent node details:~n * node name: ~w", [node()]},
+ case init:get_argument(home) of
+ {ok, [[Home]]} -> {" * effective user's home directory: ~s", [Home]};
+ Other -> {" * effective user has no home directory: ~p", [Other]}
+ end,
+ {" * Erlang cookie hash: ~s", [cookie_hash()]}].
+
+diagnostics_node(Node) ->
+ {Name, Host} = parts(Node),
+ [{"~s:", [Node]} |
+ case names(Host) of
+ {error, Reason} ->
+ [{" * unable to connect to epmd (port ~s) on ~s: ~s~n",
+ [epmd_port(), Host, rabbit_misc:format_inet_error(Reason)]}];
+ noport ->
+ [{" * unable to connect to epmd (port ~s) on ~s: "
+ "couldn't resolve hostname~n",
+ [epmd_port(), Host]}];
+ {ok, NamePorts} ->
+ [{" * connected to epmd (port ~s) on ~s",
+ [epmd_port(), Host]}] ++
+ case net_adm:ping(Node) of
+ pong -> dist_working_diagnostics(Node);
+ pang -> dist_broken_diagnostics(Name, Host, NamePorts)
+ end
+ end].
+
+dist_working_diagnostics(Node) ->
+ case is_process_running(Node, rabbit) of
+ true -> [{" * node ~s up, 'rabbit' application running", [Node]}];
+ false -> [{" * node ~s up, 'rabbit' application not running~n"
+ " * running applications on ~s: ~p~n"
+ " * suggestion: use rabbitmqctl start_app on ~s",
+ [Node, Node, remote_apps(Node), Node]}]
+ end.
+
+remote_apps(Node) ->
+ %% We want a timeout here because really, we don't trust the node,
+ %% the last thing we want to do is hang.
+ case rpc:call(Node, application, which_applications, [5000]) of
+ {badrpc, _} = E -> E;
+ Apps -> [App || {App, _, _} <- Apps]
+ end.
+
+dist_broken_diagnostics(Name, Host, NamePorts) ->
+ case [{N, P} || {N, P} <- NamePorts, N =:= Name] of
+ [] ->
+ {SelfName, SelfHost} = parts(node()),
+ Others = [list_to_atom(N) || {N, _} <- NamePorts,
+ N =/= case SelfHost of
+ Host -> SelfName;
+ _ -> never_matches
+ end],
+ OthersDiag = case Others of
+ [] -> [{" no other nodes on ~s",
+ [Host]}];
+ _ -> [{" other nodes on ~s: ~p",
+ [Host, Others]}]
+ end,
+ [{" * epmd reports: node '~s' not running at all", [Name]},
+ OthersDiag, {" * suggestion: start the node", []}];
+ [{Name, Port}] ->
+ [{" * epmd reports node '~s' uses port ~b for inter-node and CLI tool traffic ", [Name, Port]} |
+ case diagnose_connect(Host, Port) of
+ ok ->
+ connection_succeeded_diagnostics();
+ {error, Reason} ->
+ [{" * can't establish TCP connection to the target node, reason: ~s~n"
+ " * suggestion: check if host '~s' resolves, is reachable and ports ~b, 4369 are not blocked by firewall",
+ [rabbit_misc:format_inet_error(Reason), Host, Port]}]
+ end]
+ end.
+
+connection_succeeded_diagnostics() ->
+ case gen_event:call(error_logger, ?ERROR_LOGGER_HANDLER, get_connection_report) of
+ [] ->
+ [{" * TCP connection succeeded but Erlang distribution failed ~n"
+ " * suggestion: check if the Erlang cookie identical for all server nodes and CLI tools~n"
+ " * suggestion: check if all server nodes and CLI tools use consistent hostnames when addressing each other~n"
+ " * suggestion: check if inter-node connections may be configured to use TLS. If so, all nodes and CLI tools must do that~n"
+ " * suggestion: see the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more~n", []}];
+ Report ->
+ [{" * TCP connection succeeded but Erlang distribution "
+ "failed ~n", []}]
+ ++ Report
+ end.
+
+diagnose_connect(Host, Port) ->
+ case inet:gethostbyname(Host) of
+ {ok, #hostent{h_addrtype = Family}} ->
+ case gen_tcp:connect(Host, Port, [Family],
+ ?TCP_DIAGNOSTIC_TIMEOUT) of
+ {ok, Socket} -> gen_tcp:close(Socket),
+ ok;
+ {error, _} = E -> E
+ end;
+ {error, _} = E ->
+ E
+ end.
+
+is_running(Node, Application) ->
+ case rpc:call(Node, rabbit_misc, which_applications, []) of
+ {badrpc, _} -> false;
+ Apps -> proplists:is_defined(Application, Apps)
+ end.
+
+is_process_running(Node, Process) ->
+ case rpc:call(Node, erlang, whereis, [Process]) of
+ {badrpc, _} -> false;
+ undefined -> false;
+ P when is_pid(P) -> true
+ end.
diff --git a/deps/rabbit_common/src/rabbit_numerical.erl b/deps/rabbit_common/src/rabbit_numerical.erl
new file mode 100644
index 0000000000..45cc67fda6
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_numerical.erl
@@ -0,0 +1,358 @@
+%% This file is a copy of `mochijson2.erl' from mochiweb, revision
+%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
+%% `LICENSE-MIT-Mochi'.
+
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Useful numeric algorithms for floats that cover some deficiencies
+%% in the math module. More interesting is digits/1, which implements
+%% the algorithm from:
+%% https://cs.indiana.edu/~burger/fp/index.html
+%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+%% Design and Implementation.
+
+-module(rabbit_numerical).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+
+%% IEEE 754 Float exponent bias
+-define(FLOAT_BIAS, 1022).
+-define(MIN_EXP, -1074).
+-define(BIG_POW, 4503599627370496).
+
+%% External API
+
+%% @spec digits(number()) -> string()
+%% @doc Returns a string that accurately represents the given integer or float
+%% using a conservative amount of digits. Great for generating
+%% human-readable output, or compact ASCII serializations for floats.
+digits(N) when is_integer(N) ->
+ integer_to_list(N);
+digits(0.0) ->
+ "0.0";
+digits(Float) ->
+ {Frac1, Exp1} = frexp_int(Float),
+ [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
+ {Place, Digits} = transform_digits(Place0, Digits0),
+ R = insert_decimal(Place, Digits),
+ case Float < 0 of
+ true ->
+ [$- | R];
+ _ ->
+ R
+ end.
+
+%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+%% @doc Return the fractional and exponent part of an IEEE 754 double,
+%% equivalent to the libc function of the same name.
+%% F = Frac * pow(2, Exp).
+frexp(F) ->
+ frexp1(unpack(F)).
+
+%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+%% @doc Moderately efficient way to exponentiate integers.
+%% int_pow(10, 2) = 100.
+int_pow(_X, 0) ->
+ 1;
+int_pow(X, N) when N > 0 ->
+ int_pow(X, N, 1).
+
+%% @spec int_ceil(F::float()) -> integer()
+%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+%% F when F == trunc(F);
+%% trunc(F) when F &lt; 0;
+%% trunc(F) + 1 when F &gt; 0.
+int_ceil(X) ->
+ T = trunc(X),
+ case (X - T) of
+ Pos when Pos > 0 -> T + 1;
+ _ -> T
+ end.
+
+
+%% Internal API
+
+int_pow(X, N, R) when N < 2 ->
+ R * X;
+int_pow(X, N, R) ->
+ int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+
+insert_decimal(0, S) ->
+ "0." ++ S;
+insert_decimal(Place, S) when Place > 0 ->
+ L = length(S),
+ case Place - L of
+ 0 ->
+ S ++ ".0";
+ N when N < 0 ->
+ {S0, S1} = lists:split(L + N, S),
+ S0 ++ "." ++ S1;
+ N when N < 6 ->
+ %% More places than digits
+ S ++ lists:duplicate(N, $0) ++ ".0";
+ _ ->
+ insert_decimal_exp(Place, S)
+ end;
+insert_decimal(Place, S) when Place > -6 ->
+ "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+insert_decimal(Place, S) ->
+ insert_decimal_exp(Place, S).
+
+insert_decimal_exp(Place, S) ->
+ [C | S0] = S,
+ S1 = case S0 of
+ [] ->
+ "0";
+ _ ->
+ S0
+ end,
+ Exp = case Place < 0 of
+ true ->
+ "e-";
+ false ->
+ "e+"
+ end,
+ [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+
+
+digits1(Float, Exp, Frac) ->
+ Round = ((Frac band 1) =:= 0),
+ case Exp >= 0 of
+ true ->
+ BExp = 1 bsl Exp,
+ case (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * BExp * 2), 2, BExp, BExp,
+ Round, Round, Float);
+ false ->
+ scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+ Round, Round, Float)
+ end;
+ false ->
+ case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+ Round, Round, Float);
+ false ->
+ scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+ Round, Round, Float)
+ end
+ end.
+
+scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+ Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+ %% Note that the scheme implementation uses a 326 element look-up table
+ %% for int_pow(10, N) where we do not.
+ case Est >= 0 of
+ true ->
+ fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+ LowOk, HighOk);
+ false ->
+ Scale = int_pow(10, -Est),
+ fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+ LowOk, HighOk)
+ end.
+
+fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+ TooLow = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TooLow of
+ true ->
+ [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+ false ->
+ [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+ end.
+
+generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+ D = R0 div S,
+ R = R0 rem S,
+ TC1 = case LowOk of
+ true ->
+ R =< MMinus;
+ false ->
+ R < MMinus
+ end,
+ TC2 = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TC1 of
+ false ->
+ case TC2 of
+ false ->
+ [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+ LowOk, HighOk)];
+ true ->
+ [D + 1]
+ end;
+ true ->
+ case TC2 of
+ false ->
+ [D];
+ true ->
+ case R * 2 < S of
+ true ->
+ [D];
+ false ->
+ [D + 1]
+ end
+ end
+ end.
+
+unpack(Float) ->
+ <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+ {Sign, Exp, Frac}.
+
+frexp1({_Sign, 0, 0}) ->
+ {0.0, 0};
+frexp1({Sign, 0, Frac}) ->
+ Exp = log2floor(Frac),
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+ {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+frexp1({Sign, Exp, Frac}) ->
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+ {Frac1, Exp - ?FLOAT_BIAS}.
+
+log2floor(Int) ->
+ log2floor(Int, 0).
+
+log2floor(0, N) ->
+ N;
+log2floor(Int, N) ->
+ log2floor(Int bsr 1, 1 + N).
+
+
+transform_digits(Place, [0 | Rest]) ->
+ transform_digits(Place, Rest);
+transform_digits(Place, Digits) ->
+ {Place, [$0 + D || D <- Digits]}.
+
+
+frexp_int(F) ->
+ case unpack(F) of
+ {_Sign, 0, Frac} ->
+ {Frac, ?MIN_EXP};
+ {_Sign, Exp, Frac} ->
+ {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+int_ceil_test() ->
+ ?assertEqual(1, int_ceil(0.0001)),
+ ?assertEqual(0, int_ceil(0.0)),
+ ?assertEqual(1, int_ceil(0.99)),
+ ?assertEqual(1, int_ceil(1.0)),
+ ?assertEqual(-1, int_ceil(-1.5)),
+ ?assertEqual(-2, int_ceil(-2.0)),
+ ok.
+
+int_pow_test() ->
+ ?assertEqual(1, int_pow(1, 1)),
+ ?assertEqual(1, int_pow(1, 0)),
+ ?assertEqual(1, int_pow(10, 0)),
+ ?assertEqual(10, int_pow(10, 1)),
+ ?assertEqual(100, int_pow(10, 2)),
+ ?assertEqual(1000, int_pow(10, 3)),
+ ok.
+
+digits_test() ->
+ ?assertEqual("0",
+ digits(0)),
+ ?assertEqual("0.0",
+ digits(0.0)),
+ ?assertEqual("1.0",
+ digits(1.0)),
+ ?assertEqual("-1.0",
+ digits(-1.0)),
+ ?assertEqual("0.1",
+ digits(0.1)),
+ ?assertEqual("0.01",
+ digits(0.01)),
+ ?assertEqual("0.001",
+ digits(0.001)),
+ ?assertEqual("1.0e+6",
+ digits(1000000.0)),
+ ?assertEqual("0.5",
+ digits(0.5)),
+ ?assertEqual("4503599627370496.0",
+ digits(4503599627370496.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324 =:= 5.0e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual("5.0e-324",
+ digits(SmallDenorm)),
+ ?assertEqual(SmallDenorm,
+ list_to_float(digits(SmallDenorm))),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual("2.225073858507201e-308",
+ digits(BigDenorm)),
+ ?assertEqual(BigDenorm,
+ list_to_float(digits(BigDenorm))),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual("2.2250738585072014e-308",
+ digits(SmallNorm)),
+ ?assertEqual(SmallNorm,
+ list_to_float(digits(SmallNorm))),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual("1.7976931348623157e+308",
+ digits(LargeNorm)),
+ ?assertEqual(LargeNorm,
+ list_to_float(digits(LargeNorm))),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual("5.0e-324",
+ digits(math:pow(2, -1074))),
+ ok.
+
+frexp_test() ->
+ %% zero
+ ?assertEqual({0.0, 0}, frexp(0.0)),
+ %% one
+ ?assertEqual({0.5, 1}, frexp(1.0)),
+ %% negative one
+ ?assertEqual({-0.5, 1}, frexp(-1.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999978, -1022},
+ frexp(BigDenorm)),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999989, 1024},
+ frexp(LargeNorm)),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual(
+ {0.5, -1073},
+ frexp(math:pow(2, -1074))),
+ ok.
+
+-endif.
diff --git a/deps/rabbit_common/src/rabbit_password_hashing.erl b/deps/rabbit_common/src/rabbit_password_hashing.erl
new file mode 100644
index 0000000000..53d4d04e10
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_password_hashing.erl
@@ -0,0 +1,11 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password_hashing).
+-include("rabbit.hrl").
+
+-callback hash(rabbit_types:password()) -> rabbit_types:password_hash().
diff --git a/deps/rabbit_common/src/rabbit_pbe.erl b/deps/rabbit_common/src/rabbit_pbe.erl
new file mode 100644
index 0000000000..d999d520a4
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_pbe.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_pbe).
+
+-export([supported_ciphers/0, supported_hashes/0, default_cipher/0, default_hash/0, default_iterations/0]).
+-export([encrypt_term/5, decrypt_term/5]).
+-export([encrypt/5, decrypt/5]).
+
+-export_type([encryption_result/0]).
+
+supported_ciphers() ->
+ credentials_obfuscation_pbe:supported_ciphers().
+
+supported_hashes() ->
+ credentials_obfuscation_pbe:supported_hashes().
+
+%% Default encryption parameters.
+default_cipher() ->
+ credentials_obfuscation_pbe:default_cipher().
+
+default_hash() ->
+ credentials_obfuscation_pbe:default_hash().
+
+default_iterations() ->
+ credentials_obfuscation_pbe:default_iterations().
+
+%% Encryption/decryption of arbitrary Erlang terms.
+
+encrypt_term(Cipher, Hash, Iterations, PassPhrase, Term) ->
+ credentials_obfuscation_pbe:encrypt_term(Cipher, Hash, Iterations, PassPhrase, Term).
+
+decrypt_term(_Cipher, _Hash, _Iterations, _PassPhrase, {plaintext, Term}) ->
+ Term;
+decrypt_term(Cipher, Hash, Iterations, PassPhrase, {encrypted, _Base64Binary}=Encrypted) ->
+ credentials_obfuscation_pbe:decrypt_term(Cipher, Hash, Iterations, PassPhrase, Encrypted).
+
+-type encryption_result() :: {'encrypted', binary()} | {'plaintext', binary()}.
+
+-spec encrypt(crypto:block_cipher(), crypto:hash_algorithms(),
+ pos_integer(), iodata() | '$pending-secret', binary()) -> encryption_result().
+encrypt(Cipher, Hash, Iterations, PassPhrase, ClearText) ->
+ credentials_obfuscation_pbe:encrypt(Cipher, Hash, Iterations, PassPhrase, ClearText).
+
+-spec decrypt(crypto:block_cipher(), crypto:hash_algorithms(),
+ pos_integer(), iodata(), encryption_result()) -> any().
+decrypt(_Cipher, _Hash, _Iterations, _PassPhrase, {plaintext, Term}) ->
+ Term;
+decrypt(Cipher, Hash, Iterations, PassPhrase, {encrypted, _Base64Binary}=Encrypted) ->
+ credentials_obfuscation_pbe:decrypt(Cipher, Hash, Iterations, PassPhrase, Encrypted).
diff --git a/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl b/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl
new file mode 100644
index 0000000000..af3683e72b
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl
@@ -0,0 +1,59 @@
+%% This module is based on the autocluster_backend module
+%% from rabbitmq-autocluster by Gavin Roy.
+%%
+%% Copyright (c) 2014-2015 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates
+%% All rights reserved.
+%%
+%% Redistribution and use in source and binary forms, with or without modification,
+%% are permitted provided that the following conditions are met:
+%%
+%% * Redistributions of source code must retain the above copyright notice, this
+%% list of conditions and the following disclaimer.
+%% * Redistributions in binary form must reproduce the above copyright notice,
+%% this list of conditions and the following disclaimer in the documentation
+%% and/or other materials provided with the distribution.
+%% * Neither the name of the project nor the names of its
+%% contributors may be used to endorse or promote products derived from this
+%% software without specific prior written permission.
+%%
+%% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+%% ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+%% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+%% IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+%% INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+%% BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+%% DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+%% LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+%% OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+%% ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+%%
+%% The Original Code is rabbitmq-autocluster.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2014-2015 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_backend).
+
+-include("rabbit.hrl").
+
+-callback init() -> ok | {error, Reason :: string()}.
+
+-callback list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+-callback supports_registration() -> boolean().
+
+-callback register() -> ok | {error, Reason :: string()}.
+
+-callback unregister() -> ok | {error, Reason :: string()}.
+
+-callback post_registration() -> ok | {error, Reason :: string()}.
+
+-callback lock(Node :: atom()) -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}.
+
+-callback unlock(Data :: term()) -> ok | {error, Reason :: string()}.
+
+-optional_callbacks([init/0]).
diff --git a/deps/rabbit_common/src/rabbit_policy_validator.erl b/deps/rabbit_common/src/rabbit_policy_validator.erl
new file mode 100644
index 0000000000..32b7a44fd9
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_policy_validator.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policy_validator).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-export_type([validate_results/0]).
+
+-type(validate_results() ::
+ 'ok' | {error, string(), [term()]} | [validate_results()]).
+
+-callback validate_policy([{binary(), term()}]) -> validate_results().
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_queue_collector.erl b/deps/rabbit_common/src/rabbit_queue_collector.erl
new file mode 100644
index 0000000000..ffc94ba6fb
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_queue_collector.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_collector).
+
+%% Queue collector keeps track of exclusive queues and cleans them
+%% up e.g. when their connection is closed.
+
+-behaviour(gen_server).
+
+-export([start_link/1, register/2, delete_all/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {monitors, delete_from}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_types:proc_name()) -> rabbit_types:ok_pid_or_error().
+
+start_link(ProcName) ->
+ gen_server:start_link(?MODULE, [ProcName], []).
+
+-spec register(pid(), pid()) -> 'ok'.
+
+register(CollectorPid, Q) ->
+ gen_server:call(CollectorPid, {register, Q}, infinity).
+
+delete_all(CollectorPid) ->
+ gen_server:call(CollectorPid, delete_all, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([ProcName]) ->
+ ?LG_PROCESS_TYPE(queue_collector),
+ ?store_proc_name(ProcName),
+ {ok, #state{monitors = pmon:new(), delete_from = undefined}}.
+
+%%--------------------------------------------------------------------------
+
+handle_call({register, QPid}, _From,
+ State = #state{monitors = QMons, delete_from = Deleting}) ->
+ case Deleting of
+ undefined -> ok;
+ _ -> ok = rabbit_amqqueue_common:delete_exclusive([QPid], Deleting)
+ end,
+ {reply, ok, State#state{monitors = pmon:monitor(QPid, QMons)}};
+
+handle_call(delete_all, From, State = #state{monitors = QMons,
+ delete_from = undefined}) ->
+ case pmon:monitored(QMons) of
+ [] -> {reply, ok, State#state{delete_from = From}};
+ QPids -> ok = rabbit_amqqueue_common:delete_exclusive(QPids, From),
+ {noreply, State#state{delete_from = From}}
+ end.
+
+handle_cast(Msg, State) ->
+ {stop, {unhandled_cast, Msg}, State}.
+
+handle_info({'DOWN', _MRef, process, DownPid, _Reason},
+ State = #state{monitors = QMons, delete_from = Deleting}) ->
+ QMons1 = pmon:erase(DownPid, QMons),
+ case Deleting =/= undefined andalso pmon:is_empty(QMons1) of
+ true -> gen_server:reply(Deleting, ok);
+ false -> ok
+ end,
+ {noreply, State#state{monitors = QMons1}}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/rabbit_registry.erl b/deps/rabbit_common/src/rabbit_registry.erl
new file mode 100644
index 0000000000..e68574828c
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_registry.erl
@@ -0,0 +1,165 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_registry).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([register/3, unregister/2,
+ binary_to_type/1, lookup_module/2, lookup_all/1]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec register(atom(), binary(), atom()) -> 'ok'.
+-spec unregister(atom(), binary()) -> 'ok'.
+-spec binary_to_type(binary()) -> atom() | rabbit_types:error('not_found').
+-spec lookup_module(atom(), atom()) ->
+ rabbit_types:ok_or_error2(atom(), 'not_found').
+-spec lookup_all(atom()) -> [{atom(), atom()}].
+
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%%---------------------------------------------------------------------------
+
+register(Class, TypeName, ModuleName) ->
+ gen_server:call(?SERVER, {register, Class, TypeName, ModuleName}, infinity).
+
+unregister(Class, TypeName) ->
+ gen_server:call(?SERVER, {unregister, Class, TypeName}, infinity).
+
+%% This is used with user-supplied arguments (e.g., on exchange
+%% declare), so we restrict it to existing atoms only. This means it
+%% can throw a badarg, indicating that the type cannot have been
+%% registered.
+binary_to_type(TypeBin) when is_binary(TypeBin) ->
+ case catch list_to_existing_atom(binary_to_list(TypeBin)) of
+ {'EXIT', {badarg, _}} -> {error, not_found};
+ TypeAtom -> TypeAtom
+ end.
+
+lookup_module(Class, T) when is_atom(T) ->
+ case ets:lookup(?ETS_NAME, {Class, T}) of
+ [{_, Module}] ->
+ {ok, Module};
+ [] ->
+ {error, not_found}
+ end.
+
+lookup_all(Class) ->
+ [{K, V} || [K, V] <- ets:match(?ETS_NAME, {{Class, '$1'}, '$2'})].
+
+%%---------------------------------------------------------------------------
+
+internal_binary_to_type(TypeBin) when is_binary(TypeBin) ->
+ list_to_atom(binary_to_list(TypeBin)).
+
+internal_register(Class, TypeName, ModuleName)
+ when is_atom(Class), is_binary(TypeName), is_atom(ModuleName) ->
+ ClassModule = class_module(Class),
+ Type = internal_binary_to_type(TypeName),
+ RegArg = {{Class, Type}, ModuleName},
+ ok = sanity_check_module(ClassModule, ModuleName),
+ true = ets:insert(?ETS_NAME, RegArg),
+ conditional_register(RegArg),
+ ok = ClassModule:added_to_rabbit_registry(Type, ModuleName),
+ ok.
+
+internal_unregister(Class, TypeName) ->
+ ClassModule = class_module(Class),
+ Type = internal_binary_to_type(TypeName),
+ UnregArg = {Class, Type},
+ conditional_unregister(UnregArg),
+ true = ets:delete(?ETS_NAME, UnregArg),
+ ok = ClassModule:removed_from_rabbit_registry(Type),
+ ok.
+
+%% register exchange decorator route callback only when implemented,
+%% in order to avoid unnecessary decorator calls on the fast
+%% publishing path
+conditional_register({{exchange_decorator, Type}, ModuleName}) ->
+ case erlang:function_exported(ModuleName, route, 2) of
+ true -> true = ets:insert(?ETS_NAME,
+ {{exchange_decorator_route, Type},
+ ModuleName});
+ false -> ok
+ end;
+conditional_register(_) ->
+ ok.
+
+conditional_unregister({exchange_decorator, Type}) ->
+ true = ets:delete(?ETS_NAME, {exchange_decorator_route, Type}),
+ ok;
+conditional_unregister(_) ->
+ ok.
+
+sanity_check_module(ClassModule, Module) ->
+ case catch lists:member(ClassModule,
+ lists:flatten(
+ [Bs || {Attr, Bs} <-
+ Module:module_info(attributes),
+ Attr =:= behavior orelse
+ Attr =:= behaviour])) of
+ {'EXIT', {undef, _}} -> {error, not_module};
+ false -> {error, {not_type, ClassModule}};
+ true -> ok
+ end.
+
+
+% Registry class modules. There should exist module for each registry class.
+% Class module should be behaviour (export behaviour_info/1) and implement
+% rabbit_registry_class behaviour itself: export added_to_rabbit_registry/2
+% and removed_from_rabbit_registry/1 functions.
+class_module(exchange) -> rabbit_exchange_type;
+class_module(auth_mechanism) -> rabbit_auth_mechanism;
+class_module(runtime_parameter) -> rabbit_runtime_parameter;
+class_module(exchange_decorator) -> rabbit_exchange_decorator;
+class_module(queue_decorator) -> rabbit_queue_decorator;
+class_module(policy_validator) -> rabbit_policy_validator;
+class_module(operator_policy_validator) -> rabbit_policy_validator;
+class_module(policy_merge_strategy) -> rabbit_policy_merge_strategy;
+class_module(ha_mode) -> rabbit_mirror_queue_mode;
+class_module(channel_interceptor) -> rabbit_channel_interceptor;
+class_module(queue_master_locator) -> rabbit_queue_master_locator.
+
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME, [protected, set, named_table]),
+ {ok, none}.
+
+handle_call({register, Class, TypeName, ModuleName}, _From, State) ->
+ ok = internal_register(Class, TypeName, ModuleName),
+ {reply, ok, State};
+
+handle_call({unregister, Class, TypeName}, _From, State) ->
+ ok = internal_unregister(Class, TypeName),
+ {reply, ok, State};
+
+handle_call(Request, _From, State) ->
+ {stop, {unhandled_call, Request}, State}.
+
+handle_cast(Request, State) ->
+ {stop, {unhandled_cast, Request}, State}.
+
+handle_info(Message, State) ->
+ {stop, {unhandled_info, Message}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/rabbit_registry_class.erl b/deps/rabbit_common/src/rabbit_registry_class.erl
new file mode 100644
index 0000000000..c302dc2311
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_registry_class.erl
@@ -0,0 +1,12 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_registry_class).
+
+-callback added_to_rabbit_registry(atom(), atom()) -> ok.
+
+-callback removed_from_rabbit_registry(atom()) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl b/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl
new file mode 100644
index 0000000000..6661706998
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+-module(rabbit_resource_monitor_misc).
+
+-export([parse_information_unit/1]).
+
+-spec parse_information_unit(integer() | string()) ->
+ {ok, integer()} | {error, parse_error}.
+
+parse_information_unit(Value) when is_integer(Value) -> {ok, Value};
+parse_information_unit(Value0) ->
+ Value = rabbit_data_coercion:to_list(Value0),
+ case re:run(Value,
+ "^(?<VAL>[0-9]+)(?<UNIT>kB|KB|MB|GB|kb|mb|gb|Kb|Mb|Gb|kiB|KiB|MiB|GiB|kib|mib|gib|KIB|MIB|GIB|k|K|m|M|g|G)?$",
+ [{capture, all_but_first, list}]) of
+ {match, [[], _]} ->
+ {ok, list_to_integer(Value)};
+ {match, [Num]} ->
+ {ok, list_to_integer(Num)};
+ {match, [Num, Unit]} ->
+ Multiplier = case Unit of
+ KiB when KiB =:= "k"; KiB =:= "kiB"; KiB =:= "K"; KiB =:= "KIB"; KiB =:= "kib" -> 1024;
+ MiB when MiB =:= "m"; MiB =:= "MiB"; MiB =:= "M"; MiB =:= "MIB"; MiB =:= "mib" -> 1024*1024;
+ GiB when GiB =:= "g"; GiB =:= "GiB"; GiB =:= "G"; GiB =:= "GIB"; GiB =:= "gib" -> 1024*1024*1024;
+ KB when KB =:= "KB"; KB =:= "kB"; KB =:= "kb"; KB =:= "Kb" -> 1000;
+ MB when MB =:= "MB"; MB =:= "mB"; MB =:= "mb"; MB =:= "Mb" -> 1000000;
+ GB when GB =:= "GB"; GB =:= "gB"; GB =:= "gb"; GB =:= "Gb" -> 1000000000
+ end,
+ {ok, list_to_integer(Num) * Multiplier};
+ nomatch ->
+ % log error
+ {error, parse_error}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_runtime.erl b/deps/rabbit_common/src/rabbit_runtime.erl
new file mode 100644
index 0000000000..94a5a5fcfe
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_runtime.erl
@@ -0,0 +1,66 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module provides access to runtime metrics that are exposed
+%% via CLI tools, management UI or otherwise used by the broker.
+
+-module(rabbit_runtime).
+
+%%
+%% API
+%%
+
+-export([guess_number_of_cpu_cores/0, msacc_stats/1]).
+-export([get_gc_info/1, gc_all_processes/0]).
+-export([get_erl_path/0]).
+
+-spec guess_number_of_cpu_cores() -> pos_integer().
+guess_number_of_cpu_cores() ->
+ case erlang:system_info(logical_processors_available) of
+ unknown -> % Happens on Mac OS X.
+ erlang:system_info(schedulers);
+ N -> N
+ end.
+
+-spec gc_all_processes() -> ok.
+gc_all_processes() ->
+ %% Run GC asynchronously. We don't care for completion notifications, so
+ %% don't use the asynchonous execution option.
+ spawn(fun() -> [erlang:garbage_collect(P, []) || P <- erlang:processes()] end),
+ ok.
+
+-spec get_gc_info(pid()) -> nonempty_list(tuple()).
+get_gc_info(Pid) ->
+ {garbage_collection, GC} = erlang:process_info(Pid, garbage_collection),
+ case proplists:get_value(max_heap_size, GC) of
+ I when is_integer(I) ->
+ GC;
+ undefined ->
+ GC;
+ Map ->
+ lists:keyreplace(max_heap_size, 1, GC,
+ {max_heap_size, maps:get(size, Map)})
+ end.
+
+-spec msacc_stats(integer()) -> nonempty_list(#{atom() => any()}).
+msacc_stats(TimeInMs) ->
+ msacc:start(TimeInMs),
+ S = msacc:stats(),
+ msacc:stop(),
+ S.
+
+% get the full path to the erl executable used to start this VM
+-spec get_erl_path() -> file:filename_all().
+get_erl_path() ->
+ {ok, [[Root]]} = init:get_argument(root),
+ Bin = filename:join(Root, "bin"),
+ case os:type() of
+ {win32, _} ->
+ filename:join(Bin, "erl.exe");
+ _ ->
+ filename:join(Bin, "erl")
+ end.
diff --git a/deps/rabbit_common/src/rabbit_runtime_parameter.erl b/deps/rabbit_common/src/rabbit_runtime_parameter.erl
new file mode 100644
index 0000000000..5f9970d25d
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_runtime_parameter.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameter).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type(validate_results() ::
+ 'ok' | {error, string(), [term()]} | [validate_results()]).
+
+-callback validate(rabbit_types:vhost(), binary(), binary(),
+ term(), rabbit_types:user()) -> validate_results().
+-callback notify(rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:username()) -> 'ok'.
+-callback notify_clear(rabbit_types:vhost(), binary(), binary(),
+ rabbit_types:username()) -> 'ok'.
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_semver.erl b/deps/rabbit_common/src/rabbit_semver.erl
new file mode 100644
index 0000000000..c80db0c27a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_semver.erl
@@ -0,0 +1,730 @@
+%%% vi:ts=4 sw=4 et
+
+%%% Imported from https://github.com/erlware/erlware_commons.git
+%%% Commit 09168347525916e291c8aa6e3073e260e5f4a116
+%%% - We export normalize/1.
+%%% - We add a few more testcases around string/binary comparison.
+
+%%%-------------------------------------------------------------------
+%%% @copyright (C) 2011, Erlware LLC
+%%% @doc
+%%% Helper functions for working with semver versioning strings.
+%%% See https://semver.org/ for the spec.
+%%% @end
+%%%-------------------------------------------------------------------
+-module(rabbit_semver).
+
+-export([parse/1,
+ format/1,
+ eql/2,
+ gt/2,
+ gte/2,
+ lt/2,
+ lte/2,
+ pes/2,
+ normalize/1,
+ between/3]).
+
+%% For internal use by the rabbit_semver_parser peg
+-export([internal_parse_version/1]).
+
+-export_type([semver/0,
+ version_string/0,
+ any_version/0]).
+
+%%%===================================================================
+%%% Public Types
+%%%===================================================================
+
+-type version_element() :: non_neg_integer() | binary().
+
+-type major_minor_patch_minpatch() ::
+ version_element()
+ | {version_element(), version_element()}
+ | {version_element(), version_element(), version_element()}
+ | {version_element(), version_element(),
+ version_element(), version_element()}.
+
+-type alpha_part() :: integer() | binary() | string().
+-type alpha_info() :: {PreRelease::[alpha_part()],
+ BuildVersion::[alpha_part()]}.
+
+-type semver() :: {major_minor_patch_minpatch(), alpha_info()}.
+
+-type version_string() :: string() | binary().
+
+-type any_version() :: version_string() | semver().
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%% @doc parse a string or binary into a valid semver representation
+-spec parse(any_version()) -> semver().
+parse(Version) when erlang:is_list(Version) ->
+ case rabbit_semver_parser:parse(Version) of
+ {fail, _} ->
+ {erlang:iolist_to_binary(Version), {[],[]}};
+ Good ->
+ Good
+ end;
+parse(Version) when erlang:is_binary(Version) ->
+ case rabbit_semver_parser:parse(Version) of
+ {fail, _} ->
+ {Version, {[],[]}};
+ Good ->
+ Good
+ end;
+parse(Version) ->
+ Version.
+
+-spec format(semver()) -> iolist().
+format({Maj, {AlphaPart, BuildPart}})
+ when erlang:is_integer(Maj);
+ erlang:is_binary(Maj) ->
+ [format_version_part(Maj),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)];
+format({{Maj, Min}, {AlphaPart, BuildPart}}) ->
+ [format_version_part(Maj), ".",
+ format_version_part(Min),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)];
+format({{Maj, Min, Patch}, {AlphaPart, BuildPart}}) ->
+ [format_version_part(Maj), ".",
+ format_version_part(Min), ".",
+ format_version_part(Patch),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)];
+format({{Maj, Min, Patch, MinPatch}, {AlphaPart, BuildPart}}) ->
+ [format_version_part(Maj), ".",
+ format_version_part(Min), ".",
+ format_version_part(Patch), ".",
+ format_version_part(MinPatch),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)].
+
+-spec format_version_part(integer() | binary()) -> iolist().
+format_version_part(Vsn)
+ when erlang:is_integer(Vsn) ->
+ erlang:integer_to_list(Vsn);
+format_version_part(Vsn)
+ when erlang:is_binary(Vsn) ->
+ Vsn.
+
+
+
+%% @doc test for quality between semver versions
+-spec eql(any_version(), any_version()) -> boolean().
+eql(VsnA, VsnB) ->
+ NVsnA = normalize(parse(VsnA)),
+ NVsnB = normalize(parse(VsnB)),
+ NVsnA =:= NVsnB.
+
+%% @doc Test that VsnA is greater than VsnB
+-spec gt(any_version(), any_version()) -> boolean().
+gt(VsnA, VsnB) ->
+ {MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
+ {MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
+ ((MMPA > MMPB)
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ ((AlphaA =:= [] andalso AlphaB =/= [])
+ orelse
+ ((not (AlphaB =:= [] andalso AlphaA =/= []))
+ andalso
+ (AlphaA > AlphaB))))
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ (AlphaA =:= AlphaB)
+ andalso
+ ((PatchB =:= [] andalso PatchA =/= [])
+ orelse
+ PatchA > PatchB))).
+
+%% @doc Test that VsnA is greater than or equal to VsnB
+-spec gte(any_version(), any_version()) -> boolean().
+gte(VsnA, VsnB) ->
+ NVsnA = normalize(parse(VsnA)),
+ NVsnB = normalize(parse(VsnB)),
+ gt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
+
+%% @doc Test that VsnA is less than VsnB
+-spec lt(any_version(), any_version()) -> boolean().
+lt(VsnA, VsnB) ->
+ {MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
+ {MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
+ ((MMPA < MMPB)
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ ((AlphaB =:= [] andalso AlphaA =/= [])
+ orelse
+ ((not (AlphaA =:= [] andalso AlphaB =/= []))
+ andalso
+ (AlphaA < AlphaB))))
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ (AlphaA =:= AlphaB)
+ andalso
+ ((PatchA =:= [] andalso PatchB =/= [])
+ orelse
+ PatchA < PatchB))).
+
+%% @doc Test that VsnA is less than or equal to VsnB
+-spec lte(any_version(), any_version()) -> boolean().
+lte(VsnA, VsnB) ->
+ NVsnA = normalize(parse(VsnA)),
+ NVsnB = normalize(parse(VsnB)),
+ lt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
+
+%% @doc Test that VsnMatch is greater than or equal to Vsn1 and
+%% less than or equal to Vsn2
+-spec between(any_version(), any_version(), any_version()) -> boolean().
+between(Vsn1, Vsn2, VsnMatch) ->
+ NVsnA = normalize(parse(Vsn1)),
+ NVsnB = normalize(parse(Vsn2)),
+ NVsnMatch = normalize(parse(VsnMatch)),
+ gte(NVsnMatch, NVsnA) andalso
+ lte(NVsnMatch, NVsnB).
+
+%% @doc check that VsnA is Approximately greater than VsnB
+%%
+%% Specifying ">= 2.6.5" is an optimistic version constraint. All
+%% versions greater than the one specified, including major releases
+%% (e.g. 3.0.0) are allowed.
+%%
+%% Conversely, specifying "~> 2.6" is pessimistic about future major
+%% revisions and "~> 2.6.5" is pessimistic about future minor
+%% revisions.
+%%
+%% "~> 2.6" matches cookbooks >= 2.6.0 AND &lt; 3.0.0
+%% "~> 2.6.5" matches cookbooks >= 2.6.5 AND &lt; 2.7.0
+pes(VsnA, VsnB) ->
+ internal_pes(parse(VsnA), parse(VsnB)).
+
+%%%===================================================================
+%%% Friend Functions
+%%%===================================================================
+%% @doc helper function for the peg grammar to parse the iolist into a semver
+-spec internal_parse_version(iolist()) -> semver().
+internal_parse_version([MMP, AlphaPart, BuildPart, _]) ->
+ {parse_major_minor_patch_minpatch(MMP), {parse_alpha_part(AlphaPart),
+ parse_alpha_part(BuildPart)}}.
+
+%% @doc helper function for the peg grammar to parse the iolist into a major_minor_patch
+-spec parse_major_minor_patch_minpatch(iolist()) -> major_minor_patch_minpatch().
+parse_major_minor_patch_minpatch([MajVsn, [], [], []]) ->
+ strip_maj_version(MajVsn);
+parse_major_minor_patch_minpatch([MajVsn, [<<".">>, MinVsn], [], []]) ->
+ {strip_maj_version(MajVsn), MinVsn};
+parse_major_minor_patch_minpatch([MajVsn,
+ [<<".">>, MinVsn],
+ [<<".">>, PatchVsn], []]) ->
+ {strip_maj_version(MajVsn), MinVsn, PatchVsn};
+parse_major_minor_patch_minpatch([MajVsn,
+ [<<".">>, MinVsn],
+ [<<".">>, PatchVsn],
+ [<<".">>, MinPatch]]) ->
+ {strip_maj_version(MajVsn), MinVsn, PatchVsn, MinPatch}.
+
+%% @doc helper function for the peg grammar to parse the iolist into an alpha part
+-spec parse_alpha_part(iolist()) -> [alpha_part()].
+parse_alpha_part([]) ->
+ [];
+parse_alpha_part([_, AV1, Rest]) ->
+ [erlang:iolist_to_binary(AV1) |
+ [format_alpha_part(Part) || Part <- Rest]].
+
+%% @doc according to semver alpha parts that can be treated like
+%% numbers must be. We implement that here by taking the alpha part
+%% and trying to convert it to a number, if it succeeds we use
+%% it. Otherwise we do not.
+-spec format_alpha_part(iolist()) -> integer() | binary().
+format_alpha_part([<<".">>, AlphaPart]) ->
+ Bin = erlang:iolist_to_binary(AlphaPart),
+ try
+ erlang:list_to_integer(erlang:binary_to_list(Bin))
+ catch
+ error:badarg ->
+ Bin
+ end.
+
+%%%===================================================================
+%%% Internal Functions
+%%%===================================================================
+-spec strip_maj_version(iolist()) -> version_element().
+strip_maj_version([<<"v">>, MajVsn]) ->
+ MajVsn;
+strip_maj_version([[], MajVsn]) ->
+ MajVsn;
+strip_maj_version(MajVsn) ->
+ MajVsn.
+
+-spec to_list(integer() | binary() | string()) -> string() | binary().
+to_list(Detail) when erlang:is_integer(Detail) ->
+ erlang:integer_to_list(Detail);
+to_list(Detail) when erlang:is_list(Detail); erlang:is_binary(Detail) ->
+ Detail.
+
+-spec format_vsn_rest(binary() | string(), [integer() | binary()]) -> iolist().
+format_vsn_rest(_TypeMark, []) ->
+ [];
+format_vsn_rest(TypeMark, [Head | Rest]) ->
+ [TypeMark, Head |
+ [[".", to_list(Detail)] || Detail <- Rest]].
+
+%% @doc normalize the semver so they can be compared
+-spec normalize(semver()) -> semver().
+normalize({Vsn, Rest})
+ when erlang:is_binary(Vsn);
+ erlang:is_integer(Vsn) ->
+ {{Vsn, 0, 0, 0}, Rest};
+normalize({{Maj, Min}, Rest}) ->
+ {{Maj, Min, 0, 0}, Rest};
+normalize({{Maj, Min, Patch}, Rest}) ->
+ {{Maj, Min, Patch, 0}, Rest};
+normalize(Other = {{_, _, _, _}, {_,_}}) ->
+ Other.
+
+%% @doc to do the pessimistic compare we need a parsed semver. This is
+%% the internal implementation of the of the pessimistic run. The
+%% external just ensures that versions are parsed.
+-spec internal_pes(semver(), semver()) -> boolean().
+internal_pes(VsnA, {{LM, LMI}, _})
+ when erlang:is_integer(LM),
+ erlang:is_integer(LMI) ->
+ gte(VsnA, {{LM, LMI, 0}, {[], []}}) andalso
+ lt(VsnA, {{LM + 1, 0, 0, 0}, {[], []}});
+internal_pes(VsnA, {{LM, LMI, LP}, _})
+ when erlang:is_integer(LM),
+ erlang:is_integer(LMI),
+ erlang:is_integer(LP) ->
+ gte(VsnA, {{LM, LMI, LP}, {[], []}})
+ andalso
+ lt(VsnA, {{LM, LMI + 1, 0, 0}, {[], []}});
+internal_pes(VsnA, {{LM, LMI, LP, LMP}, _})
+ when erlang:is_integer(LM),
+ erlang:is_integer(LMI),
+ erlang:is_integer(LP),
+ erlang:is_integer(LMP) ->
+ gte(VsnA, {{LM, LMI, LP, LMP}, {[], []}})
+ andalso
+ lt(VsnA, {{LM, LMI, LP + 1, 0}, {[], []}});
+internal_pes(Vsn, LVsn) ->
+ gte(Vsn, LVsn).
+
+%%%===================================================================
+%%% Test Functions
+%%%===================================================================
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+eql_test() ->
+ ?assertMatch(true, eql("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, eql(<<"1.0.0-alpha">>,
+ "1.0.0-alpha")),
+ ?assertMatch(true, eql("1.0.0-alpha",
+ <<"1.0.0-alpha">>)),
+ ?assertMatch(true, eql(<<"1.0.0-alpha">>,
+ <<"1.0.0-alpha">>)),
+ ?assertMatch(true, eql("v1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, eql("1",
+ "1.0.0")),
+ ?assertMatch(true, eql("v1",
+ "v1.0.0")),
+ ?assertMatch(true, eql("1.0",
+ "1.0.0")),
+ ?assertMatch(true, eql("1.0.0",
+ "1")),
+ ?assertMatch(true, eql("1.0.0.0",
+ "1")),
+ ?assertMatch(true, eql("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, eql("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, eql("1.0-alpha.1+build.1",
+ "1.0.0.0-alpha.1+build.1")),
+ ?assertMatch(true, eql("1.0-alpha.1+build.1",
+ "v1.0.0.0-alpha.1+build.1")),
+ ?assertMatch(true, eql("aa", "aa")),
+ ?assertMatch(true, eql("AA.BB", "AA.BB")),
+ ?assertMatch(true, eql("BBB-super", "BBB-super")),
+ ?assertMatch(true, not eql("1.0.0",
+ "1.0.1")),
+ ?assertMatch(true, not eql(<<"1.0.0">>,
+ "1.0.1")),
+ ?assertMatch(true, not eql("1.0.0",
+ <<"1.0.1">>)),
+ ?assertMatch(true, not eql(<<"1.0.0">>,
+ <<"1.0.1">>)),
+ ?assertMatch(true, not eql("1.0.0-alpha",
+ "1.0.1+alpha")),
+ ?assertMatch(true, not eql("1.0.0+build.1",
+ "1.0.1+build.2")),
+ ?assertMatch(true, not eql("1.0.0.0+build.1",
+ "1.0.0.1+build.2")),
+ ?assertMatch(true, not eql("FFF", "BBB")),
+ ?assertMatch(true, not eql("1", "1BBBB")).
+
+gt_test() ->
+ ?assertMatch(true, gt("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, gt("1.0.0.1-alpha.1",
+ "1.0.0.1-alpha")),
+ ?assertMatch(true, gt("1.0.0.4-alpha.1",
+ "1.0.0.2-alpha")),
+ ?assertMatch(true, gt("1.0.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, gt("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, gt("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, gt("1.0.0-beta.11",
+ "1.0.0.0-beta.2")),
+ ?assertMatch(true, gt("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, gt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, gt("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, gt("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, gt("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
+ "1.3.7.0+build")),
+ ?assertMatch(true, gt("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, gt("aa.cc",
+ "aa.bb")),
+ ?assertMatch(true, not gt("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not gt("1.0.0-alpha",
+ "1.0.0.0-alpha.1")),
+ ?assertMatch(true, not gt("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not gt("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, not gt("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, not gt("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not gt("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, not gt("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, not gt("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, not gt("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, not gt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")),
+ ?assertMatch(true, not gt("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not gt("1",
+ "1.0.0")),
+ ?assertMatch(true, not gt("aa.bb",
+ "aa.bb")),
+ ?assertMatch(true, not gt("aa.cc",
+ "aa.dd")),
+ ?assertMatch(true, not gt("1.0",
+ "1.0.0")),
+ ?assertMatch(true, not gt("1.0.0",
+ "1")),
+ ?assertMatch(true, not gt("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, not gt("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")).
+
+lt_test() ->
+ ?assertMatch(true, lt("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, lt("1.0.0-alpha",
+ "1.0.0.0-alpha.1")),
+ ?assertMatch(true, lt("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, lt("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, lt("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, lt("1.0.0.1-beta.11",
+ "1.0.0.1-rc.1")),
+ ?assertMatch(true, lt("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, lt("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, lt("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, lt("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, lt("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, lt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")),
+ ?assertMatch(true, not lt("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not lt("1",
+ "1.0.0")),
+ ?assertMatch(true, lt("1",
+ "1.0.0.1")),
+ ?assertMatch(true, lt("AA.DD",
+ "AA.EE")),
+ ?assertMatch(true, not lt("1.0",
+ "1.0.0")),
+ ?assertMatch(true, not lt("1.0.0.0",
+ "1")),
+ ?assertMatch(true, not lt("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, not lt("AA.DD", "AA.CC")),
+ ?assertMatch(true, not lt("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, not lt("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not lt("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not lt("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not lt("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, not lt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, not lt("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not lt("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, not lt("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, not lt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, not lt("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")).
+
+gte_test() ->
+ ?assertMatch(true, gte("1.0.0-alpha",
+ "1.0.0-alpha")),
+
+ ?assertMatch(true, gte("1",
+ "1.0.0")),
+
+ ?assertMatch(true, gte("1.0",
+ "1.0.0")),
+
+ ?assertMatch(true, gte("1.0.0",
+ "1")),
+
+ ?assertMatch(true, gte("1.0.0.0",
+ "1")),
+
+ ?assertMatch(true, gte("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+
+ ?assertMatch(true, gte("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+
+ ?assertMatch(true, gte("1.0.0-alpha.1+build.1",
+ "1.0.0.0-alpha.1+build.1")),
+ ?assertMatch(true, gte("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, gte("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, gte("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, gte("aa.bb", "aa.bb")),
+ ?assertMatch(true, gte("dd", "aa")),
+ ?assertMatch(true, gte("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, gte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, gte("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, gte("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, gte("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, gte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, gte("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, not gte("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not gte("CC", "DD")),
+ ?assertMatch(true, not gte("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not gte("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, not gte("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, not gte("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not gte("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, not gte("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, not gte("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, not gte("1.0.0",
+ "1.0.0+build.1")),
+ ?assertMatch(true, not gte("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, not gte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")).
+lte_test() ->
+ ?assertMatch(true, lte("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, lte("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, lte("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, lte("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, lte("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, lte("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, lte("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, lte("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, lte("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, lte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")),
+ ?assertMatch(true, lte("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, lte("1",
+ "1.0.0")),
+ ?assertMatch(true, lte("1.0",
+ "1.0.0")),
+ ?assertMatch(true, lte("1.0.0",
+ "1")),
+ ?assertMatch(true, lte("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, lte("1.0.0.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, lte("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, lte("aa","cc")),
+ ?assertMatch(true, lte("cc","cc")),
+ ?assertMatch(true, not lte("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not lte("cc", "aa")),
+ ?assertMatch(true, not lte("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not lte("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not lte("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, not lte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, not lte("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not lte("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, not lte("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, not lte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, not lte("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")).
+
+between_test() ->
+ ?assertMatch(true, between("1.0.0-alpha",
+ "1.0.0-alpha.3",
+ "1.0.0-alpha.2")),
+ ?assertMatch(true, between("1.0.0-alpha.1",
+ "1.0.0-beta.2",
+ "1.0.0-alpha.25")),
+ ?assertMatch(true, between("1.0.0-beta.2",
+ "1.0.0-beta.11",
+ "1.0.0-beta.7")),
+ ?assertMatch(true, between("1.0.0-beta.11",
+ "1.0.0-rc.3",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, between("1.0.0-rc.1",
+ "1.0.0-rc.1+build.3",
+ "1.0.0-rc.1+build.1")),
+
+ ?assertMatch(true, between("1.0.0.0-rc.1",
+ "1.0.0-rc.1+build.3",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, between("1.0.0-rc.1+build.1",
+ "1.0.0",
+ "1.0.0-rc.33")),
+ ?assertMatch(true, between("1.0.0",
+ "1.0.0+0.3.7",
+ "1.0.0+0.2")),
+ ?assertMatch(true, between("1.0.0+0.3.7",
+ "1.3.7+build",
+ "1.2")),
+ ?assertMatch(true, between("1.3.7+build",
+ "1.3.7+build.2.b8f12d7",
+ "1.3.7+build.1")),
+ ?assertMatch(true, between("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a",
+ "1.3.7+build.10.a36faa")),
+ ?assertMatch(true, between("1.0.0-alpha",
+ "1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, between("1",
+ "1.0.0",
+ "1.0.0")),
+ ?assertMatch(true, between("1.0",
+ "1.0.0",
+ "1.0.0")),
+
+ ?assertMatch(true, between("1.0",
+ "1.0.0.0",
+ "1.0.0.0")),
+ ?assertMatch(true, between("1.0.0",
+ "1",
+ "1")),
+ ?assertMatch(true, between("1.0+alpha.1",
+ "1.0.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, between("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, between("aaa",
+ "ddd",
+ "cc")),
+ ?assertMatch(true, not between("1.0.0-alpha.1",
+ "1.0.0-alpha.22",
+ "1.0.0")),
+ ?assertMatch(true, not between("1.0.0",
+ "1.0.0-alpha.1",
+ "2.0")),
+ ?assertMatch(true, not between("1.0.0-beta.1",
+ "1.0.0-beta.11",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not between("1.0.0-beta.11", "1.0.0-rc.1",
+ "1.0.0-rc.22")),
+ ?assertMatch(true, not between("aaa", "ddd", "zzz")).
+
+pes_test() ->
+ ?assertMatch(true, pes("2.6.0", "2.6")),
+ ?assertMatch(true, pes("2.7", "2.6")),
+ ?assertMatch(true, pes("2.8", "2.6")),
+ ?assertMatch(true, pes("2.9", "2.6")),
+ ?assertMatch(true, pes("A.B", "A.A")),
+ ?assertMatch(true, not pes("3.0.0", "2.6")),
+ ?assertMatch(true, not pes("2.5", "2.6")),
+ ?assertMatch(true, pes("2.6.5", "2.6.5")),
+ ?assertMatch(true, pes("2.6.6", "2.6.5")),
+ ?assertMatch(true, pes("2.6.7", "2.6.5")),
+ ?assertMatch(true, pes("2.6.8", "2.6.5")),
+ ?assertMatch(true, pes("2.6.9", "2.6.5")),
+ ?assertMatch(true, pes("2.6.0.9", "2.6.0.5")),
+ ?assertMatch(true, not pes("2.7", "2.6.5")),
+ ?assertMatch(true, not pes("2.1.7", "2.1.6.5")),
+ ?assertMatch(true, not pes("A.A", "A.B")),
+ ?assertMatch(true, not pes("2.5", "2.6.5")).
+
+version_format_test() ->
+ ?assertEqual(["1", [], []], format({1, {[],[]}})),
+ ?assertEqual(["1", ".", "2", ".", "34", [], []], format({{1,2,34},{[],[]}})),
+ ?assertEqual(<<"a">>, erlang:iolist_to_binary(format({<<"a">>, {[],[]}}))),
+ ?assertEqual(<<"a.b">>, erlang:iolist_to_binary(format({{<<"a">>,<<"b">>}, {[],[]}}))),
+ ?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))),
+ ?assertEqual(<<"1.2">>, erlang:iolist_to_binary(format({{1,2}, {[],[]}}))),
+ ?assertEqual(<<"1.2.2">>, erlang:iolist_to_binary(format({{1,2,2}, {[],[]}}))),
+ ?assertEqual(<<"1.99.2">>, erlang:iolist_to_binary(format({{1,99,2}, {[],[]}}))),
+ ?assertEqual(<<"1.99.2-alpha">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>],[]}}))),
+ ?assertEqual(<<"1.99.2-alpha.1">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>,1], []}}))),
+ ?assertEqual(<<"1.99.2+build.1.a36">>,
+ erlang:iolist_to_binary(format({{1,99,2}, {[], [<<"build">>, 1, <<"a36">>]}}))),
+ ?assertEqual(<<"1.99.2.44+build.1.a36">>,
+ erlang:iolist_to_binary(format({{1,99,2,44}, {[], [<<"build">>, 1, <<"a36">>]}}))),
+ ?assertEqual(<<"1.99.2-alpha.1+build.1.a36">>,
+ erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>, 1], [<<"build">>, 1, <<"a36">>]}}))),
+ ?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))).
+
+-endif.
diff --git a/deps/rabbit_common/src/rabbit_semver_parser.erl b/deps/rabbit_common/src/rabbit_semver_parser.erl
new file mode 100644
index 0000000000..3a036021f7
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_semver_parser.erl
@@ -0,0 +1,306 @@
+%%% Imported from https://github.com/erlware/erlware_commons.git
+%%% Commit 09168347525916e291c8aa6e3073e260e5f4a116
+
+-module(rabbit_semver_parser).
+-export([parse/1,file/1]).
+-define(p_anything,true).
+-define(p_charclass,true).
+-define(p_choose,true).
+-define(p_not,true).
+-define(p_one_or_more,true).
+-define(p_optional,true).
+-define(p_scan,true).
+-define(p_seq,true).
+-define(p_string,true).
+-define(p_zero_or_more,true).
+
+
+
+-spec file(file:name()) -> any().
+file(Filename) -> case file:read_file(Filename) of {ok,Bin} -> parse(Bin); Err -> Err end.
+
+-spec parse(binary() | list()) -> any().
+parse(List) when is_list(List) -> parse(unicode:characters_to_binary(List));
+parse(Input) when is_binary(Input) ->
+ _ = setup_memo(),
+ Result = case 'semver'(Input,{{line,1},{column,1}}) of
+ {AST, <<>>, _Index} -> AST;
+ Any -> Any
+ end,
+ release_memo(), Result.
+
+-spec 'semver'(input(), index()) -> parse_result().
+'semver'(Input, Index) ->
+ p(Input, Index, 'semver', fun(I,D) -> (p_seq([fun 'major_minor_patch_min_patch'/2, p_optional(p_seq([p_string(<<"-">>), fun 'alpha_part'/2, p_zero_or_more(p_seq([p_string(<<".">>), fun 'alpha_part'/2]))])), p_optional(p_seq([p_string(<<"+">>), fun 'alpha_part'/2, p_zero_or_more(p_seq([p_string(<<".">>), fun 'alpha_part'/2]))])), p_not(p_anything())]))(I,D) end, fun(Node, _Idx) -> rabbit_semver:internal_parse_version(Node) end).
+
+-spec 'major_minor_patch_min_patch'(input(), index()) -> parse_result().
+'major_minor_patch_min_patch'(Input, Index) ->
+ p(Input, Index, 'major_minor_patch_min_patch', fun(I,D) -> (p_seq([p_choose([p_seq([p_optional(p_string(<<"v">>)), fun 'numeric_part'/2]), fun 'alpha_part'/2]), p_optional(p_seq([p_string(<<".">>), fun 'version_part'/2])), p_optional(p_seq([p_string(<<".">>), fun 'version_part'/2])), p_optional(p_seq([p_string(<<".">>), fun 'version_part'/2]))]))(I,D) end, fun(Node, Idx) ->transform('major_minor_patch_min_patch', Node, Idx) end).
+
+-spec 'version_part'(input(), index()) -> parse_result().
+'version_part'(Input, Index) ->
+ p(Input, Index, 'version_part', fun(I,D) -> (p_choose([fun 'numeric_part'/2, fun 'alpha_part'/2]))(I,D) end, fun(Node, Idx) ->transform('version_part', Node, Idx) end).
+
+-spec 'numeric_part'(input(), index()) -> parse_result().
+'numeric_part'(Input, Index) ->
+ p(Input, Index, 'numeric_part', fun(I,D) -> (p_one_or_more(p_charclass(<<"[0-9]">>)))(I,D) end, fun(Node, _Idx) ->erlang:list_to_integer(erlang:binary_to_list(erlang:iolist_to_binary(Node))) end).
+
+-spec 'alpha_part'(input(), index()) -> parse_result().
+'alpha_part'(Input, Index) ->
+ p(Input, Index, 'alpha_part', fun(I,D) -> (p_one_or_more(p_charclass(<<"[A-Za-z0-9]">>)))(I,D) end, fun(Node, _Idx) ->erlang:iolist_to_binary(Node) end).
+
+
+transform(_,Node,_Index) -> Node.
+-file("peg_includes.hrl", 1).
+-type index() :: {{line, pos_integer()}, {column, pos_integer()}}.
+-type input() :: binary().
+-type parse_failure() :: {fail, term()}.
+-type parse_success() :: {term(), input(), index()}.
+-type parse_result() :: parse_failure() | parse_success().
+-type parse_fun() :: fun((input(), index()) -> parse_result()).
+-type xform_fun() :: fun((input(), index()) -> term()).
+
+-spec p(input(), index(), atom(), parse_fun(), xform_fun()) -> parse_result().
+p(Inp, StartIndex, Name, ParseFun, TransformFun) ->
+ case get_memo(StartIndex, Name) of % See if the current reduction is memoized
+ {ok, Memo} -> %Memo; % If it is, return the stored result
+ Memo;
+ _ -> % If not, attempt to parse
+ Result = case ParseFun(Inp, StartIndex) of
+ {fail,_} = Failure -> % If it fails, memoize the failure
+ Failure;
+ {Match, InpRem, NewIndex} -> % If it passes, transform and memoize the result.
+ Transformed = TransformFun(Match, StartIndex),
+ {Transformed, InpRem, NewIndex}
+ end,
+ memoize(StartIndex, Name, Result),
+ Result
+ end.
+
+-spec setup_memo() -> ets:tid().
+setup_memo() ->
+ put({parse_memo_table, ?MODULE}, ets:new(?MODULE, [set])).
+
+-spec release_memo() -> true.
+release_memo() ->
+ ets:delete(memo_table_name()).
+
+-spec memoize(index(), atom(), parse_result()) -> true.
+memoize(Index, Name, Result) ->
+ Memo = case ets:lookup(memo_table_name(), Index) of
+ [] -> [];
+ [{Index, Plist}] -> Plist
+ end,
+ ets:insert(memo_table_name(), {Index, [{Name, Result}|Memo]}).
+
+-spec get_memo(index(), atom()) -> {ok, term()} | {error, not_found}.
+get_memo(Index, Name) ->
+ case ets:lookup(memo_table_name(), Index) of
+ [] -> {error, not_found};
+ [{Index, Plist}] ->
+ case proplists:lookup(Name, Plist) of
+ {Name, Result} -> {ok, Result};
+ _ -> {error, not_found}
+ end
+ end.
+
+-spec memo_table_name() -> ets:tid().
+memo_table_name() ->
+ get({parse_memo_table, ?MODULE}).
+
+-ifdef(p_eof).
+-spec p_eof() -> parse_fun().
+p_eof() ->
+ fun(<<>>, Index) -> {eof, [], Index};
+ (_, Index) -> {fail, {expected, eof, Index}} end.
+-endif.
+
+-ifdef(p_optional).
+-spec p_optional(parse_fun()) -> parse_fun().
+p_optional(P) ->
+ fun(Input, Index) ->
+ case P(Input, Index) of
+ {fail,_} -> {[], Input, Index};
+ {_, _, _} = Success -> Success
+ end
+ end.
+-endif.
+
+-ifdef(p_not).
+-spec p_not(parse_fun()) -> parse_fun().
+p_not(P) ->
+ fun(Input, Index)->
+ case P(Input,Index) of
+ {fail,_} ->
+ {[], Input, Index};
+ {Result, _, _} -> {fail, {expected, {no_match, Result},Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_assert).
+-spec p_assert(parse_fun()) -> parse_fun().
+p_assert(P) ->
+ fun(Input,Index) ->
+ case P(Input,Index) of
+ {fail,_} = Failure-> Failure;
+ _ -> {[], Input, Index}
+ end
+ end.
+-endif.
+
+-ifdef(p_seq).
+-spec p_seq([parse_fun()]) -> parse_fun().
+p_seq(P) ->
+ fun(Input, Index) ->
+ p_all(P, Input, Index, [])
+ end.
+
+-spec p_all([parse_fun()], input(), index(), [term()]) -> parse_result().
+p_all([], Inp, Index, Accum ) -> {lists:reverse( Accum ), Inp, Index};
+p_all([P|Parsers], Inp, Index, Accum) ->
+ case P(Inp, Index) of
+ {fail, _} = Failure -> Failure;
+ {Result, InpRem, NewIndex} -> p_all(Parsers, InpRem, NewIndex, [Result|Accum])
+ end.
+-endif.
+
+-ifdef(p_choose).
+-spec p_choose([parse_fun()]) -> parse_fun().
+p_choose(Parsers) ->
+ fun(Input, Index) ->
+ p_attempt(Parsers, Input, Index, none)
+ end.
+
+-spec p_attempt([parse_fun()], input(), index(), none | parse_failure()) -> parse_result().
+p_attempt([], _Input, _Index, Failure) -> Failure;
+p_attempt([P|Parsers], Input, Index, FirstFailure)->
+ case P(Input, Index) of
+ {fail, _} = Failure ->
+ case FirstFailure of
+ none -> p_attempt(Parsers, Input, Index, Failure);
+ _ -> p_attempt(Parsers, Input, Index, FirstFailure)
+ end;
+ Result -> Result
+ end.
+-endif.
+
+-ifdef(p_zero_or_more).
+-spec p_zero_or_more(parse_fun()) -> parse_fun().
+p_zero_or_more(P) ->
+ fun(Input, Index) ->
+ p_scan(P, Input, Index, [])
+ end.
+-endif.
+
+-ifdef(p_one_or_more).
+-spec p_one_or_more(parse_fun()) -> parse_fun().
+p_one_or_more(P) ->
+ fun(Input, Index)->
+ Result = p_scan(P, Input, Index, []),
+ case Result of
+ {[_|_], _, _} ->
+ Result;
+ _ ->
+ {fail, {expected, Failure, _}} = P(Input,Index),
+ {fail, {expected, {at_least_one, Failure}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_label).
+-spec p_label(atom(), parse_fun()) -> parse_fun().
+p_label(Tag, P) ->
+ fun(Input, Index) ->
+ case P(Input, Index) of
+ {fail,_} = Failure ->
+ Failure;
+ {Result, InpRem, NewIndex} ->
+ {{Tag, Result}, InpRem, NewIndex}
+ end
+ end.
+-endif.
+
+-ifdef(p_scan).
+-spec p_scan(parse_fun(), input(), index(), [term()]) -> {[term()], input(), index()}.
+p_scan(_, <<>>, Index, Accum) -> {lists:reverse(Accum), <<>>, Index};
+p_scan(P, Inp, Index, Accum) ->
+ case P(Inp, Index) of
+ {fail,_} -> {lists:reverse(Accum), Inp, Index};
+ {Result, InpRem, NewIndex} -> p_scan(P, InpRem, NewIndex, [Result | Accum])
+ end.
+-endif.
+
+-ifdef(p_string).
+-spec p_string(binary()) -> parse_fun().
+p_string(S) ->
+ Length = erlang:byte_size(S),
+ fun(Input, Index) ->
+ try
+ <<S:Length/binary, Rest/binary>> = Input,
+ {S, Rest, p_advance_index(S, Index)}
+ catch
+ error:{badmatch,_} -> {fail, {expected, {string, S}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_anything).
+-spec p_anything() -> parse_fun().
+p_anything() ->
+ fun(<<>>, Index) -> {fail, {expected, any_character, Index}};
+ (Input, Index) when is_binary(Input) ->
+ <<C/utf8, Rest/binary>> = Input,
+ {<<C/utf8>>, Rest, p_advance_index(<<C/utf8>>, Index)}
+ end.
+-endif.
+
+-ifdef(p_charclass).
+-spec p_charclass(string() | binary()) -> parse_fun().
+p_charclass(Class) ->
+ {ok, RE} = re:compile(Class, [unicode, dotall]),
+ fun(Inp, Index) ->
+ case re:run(Inp, RE, [anchored]) of
+ {match, [{0, Length}|_]} ->
+ {Head, Tail} = erlang:split_binary(Inp, Length),
+ {Head, Tail, p_advance_index(Head, Index)};
+ _ -> {fail, {expected, {character_class, binary_to_list(Class)}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_regexp).
+-spec p_regexp(binary()) -> parse_fun().
+p_regexp(Regexp) ->
+ {ok, RE} = re:compile(Regexp, [unicode, dotall, anchored]),
+ fun(Inp, Index) ->
+ case re:run(Inp, RE) of
+ {match, [{0, Length}|_]} ->
+ {Head, Tail} = erlang:split_binary(Inp, Length),
+ {Head, Tail, p_advance_index(Head, Index)};
+ _ -> {fail, {expected, {regexp, binary_to_list(Regexp)}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(line).
+-spec line(index() | term()) -> pos_integer() | undefined.
+line({{line,L},_}) -> L;
+line(_) -> undefined.
+-endif.
+
+-ifdef(column).
+-spec column(index() | term()) -> pos_integer() | undefined.
+column({_,{column,C}}) -> C;
+column(_) -> undefined.
+-endif.
+
+-spec p_advance_index(input() | unicode:charlist() | pos_integer(), index()) -> index().
+p_advance_index(MatchedInput, Index) when is_list(MatchedInput) orelse is_binary(MatchedInput)-> % strings
+ lists:foldl(fun p_advance_index/2, Index, unicode:characters_to_list(MatchedInput));
+p_advance_index(MatchedInput, Index) when is_integer(MatchedInput) -> % single characters
+ {{line, Line}, {column, Col}} = Index,
+ case MatchedInput of
+ $\n -> {{line, Line+1}, {column, 1}};
+ _ -> {{line, Line}, {column, Col+1}}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_ssl_options.erl b/deps/rabbit_common/src/rabbit_ssl_options.erl
new file mode 100644
index 0000000000..4c2967df97
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_ssl_options.erl
@@ -0,0 +1,86 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ssl_options).
+
+-export([fix/1]).
+
+
+-define(BAD_SSL_PROTOCOL_VERSIONS, [
+ %% POODLE
+ sslv3
+ ]).
+
+-spec fix(rabbit_types:infos()) -> rabbit_types:infos().
+
+fix(Config) ->
+ fix_verify_fun(fix_ssl_protocol_versions(Config)).
+
+fix_verify_fun(SslOptsConfig) ->
+ %% Starting with ssl 4.0.1 in Erlang R14B, the verify_fun function
+ %% takes 3 arguments and returns a tuple.
+ case rabbit_misc:pget(verify_fun, SslOptsConfig) of
+ {Module, Function, InitialUserState} ->
+ Fun = make_verify_fun(Module, Function, InitialUserState),
+ rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
+ {Module, Function} when is_atom(Module) ->
+ Fun = make_verify_fun(Module, Function, none),
+ rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
+ {Verifyfun, _InitialUserState} when is_function(Verifyfun, 3) ->
+ SslOptsConfig;
+ undefined ->
+ SslOptsConfig
+ end.
+
+make_verify_fun(Module, Function, InitialUserState) ->
+ try
+ %% Preload the module: it is required to use
+ %% erlang:function_exported/3.
+ Module:module_info()
+ catch
+ _:Exception ->
+ rabbit_log:error("SSL verify_fun: module ~s missing: ~p~n",
+ [Module, Exception]),
+ throw({error, {invalid_verify_fun, missing_module}})
+ end,
+ NewForm = erlang:function_exported(Module, Function, 3),
+ OldForm = erlang:function_exported(Module, Function, 1),
+ case {NewForm, OldForm} of
+ {true, _} ->
+ %% This verify_fun is supported by Erlang R14B+ (ssl
+ %% 4.0.1 and later).
+ Fun = fun(OtpCert, Event, UserState) ->
+ Module:Function(OtpCert, Event, UserState)
+ end,
+ {Fun, InitialUserState};
+ {_, true} ->
+ %% This verify_fun is supported by Erlang R14B+ for
+ %% undocumented backward compatibility.
+ %%
+ %% InitialUserState is ignored in this case.
+ fun(Args) ->
+ Module:Function(Args)
+ end;
+ _ ->
+ rabbit_log:error("SSL verify_fun: no ~s:~s/3 exported~n",
+ [Module, Function]),
+ throw({error, {invalid_verify_fun, function_not_exported}})
+ end.
+
+fix_ssl_protocol_versions(Config) ->
+ case application:get_env(rabbit, ssl_allow_poodle_attack) of
+ {ok, true} ->
+ Config;
+ _ ->
+ Configured = case rabbit_misc:pget(versions, Config) of
+ undefined -> rabbit_misc:pget(available,
+ ssl:versions(),
+ []);
+ Vs -> Vs
+ end,
+ rabbit_misc:pset(versions, Configured -- ?BAD_SSL_PROTOCOL_VERSIONS, Config)
+ end.
diff --git a/deps/rabbit_common/src/rabbit_types.erl b/deps/rabbit_common/src/rabbit_types.erl
new file mode 100644
index 0000000000..c11004fdf4
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_types.erl
@@ -0,0 +1,196 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_types).
+
+-include("rabbit.hrl").
+
+-export_type([maybe/1, info/0, infos/0, info_key/0, info_keys/0,
+ message/0, msg_id/0, basic_message/0,
+ delivery/0, content/0, decoded_content/0, undecoded_content/0,
+ unencoded_content/0, encoded_content/0, message_properties/0,
+ vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0,
+ binding/0, binding_source/0, binding_destination/0,
+ exchange/0,
+ connection/0, connection_name/0, channel/0, channel_name/0,
+ protocol/0, auth_user/0, user/0,
+ username/0, password/0, password_hash/0,
+ ok/1, error/1, error/2, ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0,
+ channel_exit/0, connection_exit/0, mfargs/0, proc_name/0,
+ proc_type_and_name/0, timestamp/0, tracked_connection_id/0,
+ tracked_connection/0, tracked_channel_id/0, tracked_channel/0,
+ node_type/0, topic_access_context/0,
+ authz_data/0, authz_context/0]).
+
+-type(maybe(T) :: T | 'none').
+-type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
+
+-type(vhost() :: vhost:name()).
+-type(ctag() :: binary()).
+
+%% TODO: make this more precise by tying specific class_ids to
+%% specific properties
+-type(undecoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: 'none',
+ properties_bin :: binary(),
+ payload_fragments_rev :: [binary()]} |
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: rabbit_framing:amqp_property_record(),
+ properties_bin :: 'none',
+ payload_fragments_rev :: [binary()]}).
+-type(unencoded_content() :: undecoded_content()).
+-type(decoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: rabbit_framing:amqp_property_record(),
+ properties_bin :: maybe(binary()),
+ payload_fragments_rev :: [binary()]}).
+-type(encoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: maybe(rabbit_framing:amqp_property_record()),
+ properties_bin :: binary(),
+ payload_fragments_rev :: [binary()]}).
+-type(content() :: undecoded_content() | decoded_content()).
+-type(msg_id() :: rabbit_guid:guid()).
+-type(basic_message() ::
+ #basic_message{exchange_name :: rabbit_exchange:name(),
+ routing_keys :: [rabbit_router:routing_key()],
+ content :: content(),
+ id :: msg_id(),
+ is_persistent :: boolean()}).
+-type(message() :: basic_message()).
+-type(delivery() ::
+ #delivery{mandatory :: boolean(),
+ sender :: pid(),
+ message :: message()}).
+-type(message_properties() ::
+ #message_properties{expiry :: pos_integer() | 'undefined',
+ needs_confirming :: boolean()}).
+
+-type(info_key() :: atom()).
+-type(info_keys() :: [info_key()]).
+
+-type(info() :: {info_key(), any()}).
+-type(infos() :: [info()]).
+
+-type(amqp_error() ::
+ #amqp_error{name :: rabbit_framing:amqp_exception(),
+ explanation :: string(),
+ method :: rabbit_framing:amqp_method_name()}).
+
+-type(r(Kind) ::
+ r2(vhost(), Kind)).
+-type(r2(VirtualHost, Kind) ::
+ r3(VirtualHost, Kind, rabbit_misc:resource_name())).
+-type(r3(VirtualHost, Kind, Name) ::
+ #resource{virtual_host :: VirtualHost,
+ kind :: Kind,
+ name :: Name}).
+
+-type(listener() ::
+ #listener{node :: node(),
+ protocol :: atom(),
+ host :: rabbit_net:hostname(),
+ port :: rabbit_net:ip_port()}).
+
+-type(binding_source() :: rabbit_exchange:name()).
+-type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()).
+
+-type(binding() ::
+ #binding{source :: rabbit_exchange:name(),
+ destination :: binding_destination(),
+ key :: rabbit_binding:key(),
+ args :: rabbit_framing:amqp_table()}).
+
+-type(exchange() ::
+ #exchange{name :: rabbit_exchange:name(),
+ type :: rabbit_exchange:type(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ arguments :: rabbit_framing:amqp_table()}).
+
+-type(connection_name() :: binary()).
+
+%% used e.g. by rabbit_networking
+-type(connection() :: pid()).
+
+%% used e.g. by rabbit_connection_tracking
+-type(tracked_connection_id() :: {node(), connection_name()}).
+
+-type(tracked_connection() ::
+ #tracked_connection{id :: tracked_connection_id(),
+ node :: node(),
+ vhost :: vhost(),
+ name :: connection_name(),
+ pid :: connection(),
+ protocol :: protocol_name(),
+ peer_host :: rabbit_networking:hostname(),
+ peer_port :: rabbit_networking:ip_port(),
+ username :: username(),
+ connected_at :: integer()}).
+
+-type(channel_name() :: binary()).
+
+-type(channel() :: pid()).
+
+%% used e.g. by rabbit_channel_tracking
+-type(tracked_channel_id() :: {node(), channel_name()}).
+
+-type(tracked_channel() ::
+ #tracked_channel{ id :: tracked_channel_id(),
+ node :: node(),
+ vhost :: vhost(),
+ name :: channel_name(),
+ pid :: channel(),
+ username :: username(),
+ connection :: connection()}).
+
+%% old AMQP 0-9-1-centric type, avoid when possible
+-type(protocol() :: rabbit_framing:protocol()).
+
+-type(protocol_name() :: 'amqp0_8' | 'amqp0_9_1' | 'amqp1_0' | 'mqtt' | 'stomp' | any()).
+
+-type(node_type() :: 'disc' | 'ram').
+
+-type(auth_user() ::
+ #auth_user{username :: username(),
+ tags :: [atom()],
+ impl :: any()}).
+
+-type(authz_data() ::
+ #{peeraddr := inet:ip_address() | binary(),
+ _ => _ } | undefined).
+
+-type(user() ::
+ #user{username :: username(),
+ tags :: [atom()],
+ authz_backends :: [{atom(), any()}]}).
+
+-type(username() :: binary()).
+-type(password() :: binary()).
+-type(password_hash() :: binary()).
+
+-type(ok(A) :: {'ok', A}).
+-type(error(A) :: {'error', A}).
+-type(error(A, B) :: {'error', A, B}).
+-type(ok_or_error(A) :: 'ok' | error(A)).
+-type(ok_or_error2(A, B) :: ok(A) | error(B)).
+-type(ok_pid_or_error() :: ok_or_error2(pid(), any())).
+
+-type(channel_exit() :: no_return()).
+-type(connection_exit() :: no_return()).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-type(proc_name() :: term()).
+-type(proc_type_and_name() :: {atom(), proc_name()}).
+
+-type(topic_access_context() :: #{routing_key => rabbit_router:routing_key(),
+ variable_map => map(),
+ _ => _}).
+
+-type(authz_context() :: map()).
diff --git a/deps/rabbit_common/src/rabbit_writer.erl b/deps/rabbit_common/src/rabbit_writer.erl
new file mode 100644
index 0000000000..5bce50c87a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_writer.erl
@@ -0,0 +1,437 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_writer).
+
+%% This module backs writer processes ("writers"). The responsibility of
+%% a writer is to serialise protocol methods and write them to the socket.
+%% Every writer is associated with a channel and normally it's the channel
+%% that delegates method delivery to it. However, rabbit_reader
+%% (connection process) can use this module's functions to send data
+%% on channel 0, which is only used for connection negotiation and
+%% other "special" purposes.
+%%
+%% This module provides multiple functions that send protocol commands,
+%% including some that are credit flow-aware.
+%%
+%% Writers perform internal buffering. When the amount of data
+%% buffered exceeds a threshold, a socket flush is performed.
+%% See FLUSH_THRESHOLD for details.
+%%
+%% When a socket write fails, writer will exit.
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([start/6, start_link/6, start/7, start_link/7, start/8, start_link/8]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-export([send_command/2, send_command/3,
+ send_command_sync/2, send_command_sync/3,
+ send_command_and_notify/4, send_command_and_notify/5,
+ send_command_flow/2, send_command_flow/3,
+ flush/1]).
+-export([internal_send_command/4, internal_send_command/6]).
+-export([msg_size/1, maybe_gc_large_msg/1, maybe_gc_large_msg/2]).
+
+%% internal
+-export([enter_mainloop/2, mainloop/2, mainloop1/2]).
+
+-record(wstate, {
+ %% socket (port)
+ sock,
+ %% channel number
+ channel,
+ %% connection-negotiated frame_max setting
+ frame_max,
+ %% see #connection.protocol in rabbit_reader
+ protocol,
+ %% connection (rabbit_reader) process
+ reader,
+ %% statistics emission timer
+ stats_timer,
+ %% data pending delivery (between socket
+ %% flushes)
+ pending,
+ %% defines how ofter gc will be executed
+ writer_gc_threshold
+}).
+
+-define(HIBERNATE_AFTER, 5000).
+%% 1GB
+-define(DEFAULT_GC_THRESHOLD, 1000000000).
+
+%%---------------------------------------------------------------------------
+
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name()) ->
+ rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name()) ->
+ rabbit_types:ok(pid()).
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean()) ->
+ rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean()) ->
+ rabbit_types:ok(pid()).
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean(), undefined|non_neg_integer()) ->
+ rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean(), undefined|non_neg_integer()) ->
+ rabbit_types:ok(pid()).
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,#wstate{}) -> any().
+-spec system_terminate(_,_,_,_) -> no_return().
+
+-spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+ 'ok'.
+-spec send_command_sync(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_sync
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+ 'ok'.
+-spec send_command_and_notify
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_and_notify
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:content()) ->
+ 'ok'.
+-spec send_command_flow(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_flow
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+ 'ok'.
+-spec flush(pid()) -> 'ok'.
+-spec internal_send_command
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:protocol()) ->
+ 'ok'.
+-spec internal_send_command
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol()) ->
+ 'ok'.
+
+-spec msg_size
+ (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer().
+
+-spec maybe_gc_large_msg
+ (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer().
+-spec maybe_gc_large_msg
+ (rabbit_types:content() | rabbit_types:message(),
+ undefined | non_neg_integer()) -> undefined | non_neg_integer().
+
+%%---------------------------------------------------------------------------
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, ?DEFAULT_GC_THRESHOLD).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, ?DEFAULT_GC_THRESHOLD).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, GCThreshold) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats, GCThreshold),
+ {ok, proc_lib:spawn(?MODULE, enter_mainloop, [Identity, State])}.
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, GCThreshold) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats, GCThreshold),
+ {ok, proc_lib:spawn_link(?MODULE, enter_mainloop, [Identity, State])}.
+
+initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats, GCThreshold) ->
+ (case ReaderWantsStats of
+ true -> fun rabbit_event:init_stats_timer/2;
+ false -> fun rabbit_event:init_disabled_stats_timer/2
+ end)(#wstate{sock = Sock,
+ channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ reader = ReaderPid,
+ pending = [],
+ writer_gc_threshold = GCThreshold},
+ #wstate.stats_timer).
+
+system_continue(Parent, Deb, State) ->
+ mainloop(Deb, State#wstate{reader = Parent}).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+enter_mainloop(Identity, State) ->
+ ?LG_PROCESS_TYPE(writer),
+ Deb = sys:debug_options([]),
+ ?store_proc_name(Identity),
+ mainloop(Deb, State).
+
+mainloop(Deb, State) ->
+ try
+ mainloop1(Deb, State)
+ catch
+ exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
+ ReaderPid ! {channel_exit, Channel, Error}
+ end,
+ done.
+
+mainloop1(Deb, State = #wstate{pending = []}) ->
+ receive
+ Message -> {Deb1, State1} = handle_message(Deb, Message, State),
+ ?MODULE:mainloop1(Deb1, State1)
+ after ?HIBERNATE_AFTER ->
+ erlang:hibernate(?MODULE, mainloop, [Deb, State])
+ end;
+mainloop1(Deb, State) ->
+ receive
+ Message -> {Deb1, State1} = handle_message(Deb, Message, State),
+ ?MODULE:mainloop1(Deb1, State1)
+ after 0 ->
+ ?MODULE:mainloop1(Deb, internal_flush(State))
+ end.
+
+handle_message(Deb, {system, From, Req}, State = #wstate{reader = Parent}) ->
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Deb, State);
+handle_message(Deb, Message, State) ->
+ {Deb, handle_message(Message, State)}.
+
+handle_message({send_command, MethodRecord}, State) ->
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command, MethodRecord, Content}, State) ->
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({send_command_flow, MethodRecord, Sender}, State) ->
+ credit_flow:ack(Sender),
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command_flow, MethodRecord, Content, Sender}, State) ->
+ credit_flow:ack(Sender),
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
+ State1 = internal_flush(
+ internal_send_command_async(MethodRecord, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
+ State) ->
+ State1 = internal_flush(
+ internal_send_command_async(MethodRecord, Content, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, flush}, State) ->
+ State1 = internal_flush(State),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
+ State1 = internal_send_command_async(MethodRecord, State),
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
+ State) ->
+ State1 = internal_send_command_async(MethodRecord, Content, State),
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid),
+ State1;
+handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue_common:notify_sent_queue_down(QPid),
+ State;
+handle_message({inet_reply, _, ok}, State) ->
+ rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
+handle_message({inet_reply, _, Status}, _State) ->
+ exit({writer, send_failed, Status});
+handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
+ ReaderPid ! ensure_stats,
+ rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
+handle_message(Message, _State) ->
+ exit({writer, message_not_understood, Message}).
+
+%%---------------------------------------------------------------------------
+
+send_command(W, MethodRecord) ->
+ W ! {send_command, MethodRecord},
+ ok.
+
+send_command(W, MethodRecord, Content) ->
+ W ! {send_command, MethodRecord, Content},
+ ok.
+
+send_command_flow(W, MethodRecord) ->
+ credit_flow:send(W),
+ W ! {send_command_flow, MethodRecord, self()},
+ ok.
+
+send_command_flow(W, MethodRecord, Content) ->
+ credit_flow:send(W),
+ W ! {send_command_flow, MethodRecord, Content, self()},
+ ok.
+
+send_command_sync(W, MethodRecord) ->
+ call(W, {send_command_sync, MethodRecord}).
+
+send_command_sync(W, MethodRecord, Content) ->
+ call(W, {send_command_sync, MethodRecord, Content}).
+
+send_command_and_notify(W, Q, ChPid, MethodRecord) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord},
+ ok.
+
+send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
+ ok.
+
+flush(W) -> call(W, flush).
+
+%%---------------------------------------------------------------------------
+
+call(Pid, Msg) ->
+ {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
+ Res.
+
+%%---------------------------------------------------------------------------
+
+assemble_frame(Channel, MethodRecord, Protocol) ->
+ rabbit_binary_generator:build_simple_method_frame(
+ Channel, MethodRecord, Protocol).
+
+assemble_frames(Channel, MethodRecord, Content, FrameMax, Protocol) ->
+ MethodName = rabbit_misc:method_record_type(MethodRecord),
+ true = Protocol:method_has_content(MethodName), % assertion
+ MethodFrame = rabbit_binary_generator:build_simple_method_frame(
+ Channel, MethodRecord, Protocol),
+ ContentFrames = rabbit_binary_generator:build_simple_content_frames(
+ Channel, Content, FrameMax, Protocol),
+ [MethodFrame | ContentFrames].
+
+tcp_send(Sock, Data) ->
+ rabbit_misc:throw_on_error(inet_error,
+ fun () -> rabbit_net:send(Sock, Data) end).
+
+internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
+ ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
+
+internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
+ Protocol) ->
+ ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
+ (_Frame, Other) -> Other
+ end, ok, assemble_frames(Channel, MethodRecord,
+ Content, FrameMax, Protocol)).
+
+internal_send_command_async(MethodRecord,
+ State = #wstate{channel = Channel,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frame = assemble_frame(Channel, MethodRecord, Protocol),
+ maybe_flush(State#wstate{pending = [Frame | Pending]}).
+
+internal_send_command_async(MethodRecord, Content,
+ State = #wstate{channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ pending = Pending,
+ writer_gc_threshold = GCThreshold}) ->
+ Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
+ Protocol),
+ maybe_gc_large_msg(Content, GCThreshold),
+ maybe_flush(State#wstate{pending = [Frames | Pending]}).
+
+%% When the amount of protocol method data buffered exceeds
+%% this threshold, a socket flush is performed.
+%%
+%% This magic number is the tcp-over-ethernet MSS (1460) minus the
+%% minimum size of a AMQP 0-9-1 basic.deliver method frame (24) plus basic
+%% content header (22). The idea is that we want to flush just before
+%% exceeding the MSS.
+-define(FLUSH_THRESHOLD, 1414).
+
+maybe_flush(State = #wstate{pending = Pending}) ->
+ case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
+ true -> internal_flush(State);
+ false -> State
+ end.
+
+internal_flush(State = #wstate{pending = []}) ->
+ State;
+internal_flush(State = #wstate{sock = Sock, pending = Pending}) ->
+ ok = port_cmd(Sock, lists:reverse(Pending)),
+ State#wstate{pending = []}.
+
+%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
+%% Status} to obtain the result. That is bad when it is called from
+%% the writer since it requires scanning of the writers possibly quite
+%% large message queue.
+%%
+%% So instead we lift the code from prim_inet:send/2, which is what
+%% gen_tcp:send/2 calls, do the first half here and then just process
+%% the result code in handle_message/2 as and when it arrives.
+%%
+%% This means we may end up happily sending data down a closed/broken
+%% socket, but that's ok since a) data in the buffers will be lost in
+%% any case (so qualitatively we are no worse off than if we used
+%% gen_tcp:send/2), and b) we do detect the changed socket status
+%% eventually, i.e. when we get round to handling the result code.
+%%
+%% Also note that the port has bounded buffers and port_command blocks
+%% when these are full. So the fact that we process the result
+%% asynchronously does not impact flow control.
+port_cmd(Sock, Data) ->
+ true = try rabbit_net:port_command(Sock, Data)
+ catch error:Error -> exit({writer, send_failed, Error})
+ end,
+ ok.
+
+%% Some processes (channel, writer) can get huge amounts of binary
+%% garbage when processing huge messages at high speed (since we only
+%% do enough reductions to GC every few hundred messages, and if each
+%% message is 1MB then that's ugly). So count how many bytes of
+%% message we have processed, and force a GC every so often.
+maybe_gc_large_msg(Content) ->
+ maybe_gc_large_msg(Content, ?DEFAULT_GC_THRESHOLD).
+
+maybe_gc_large_msg(_Content, undefined) ->
+ undefined;
+maybe_gc_large_msg(Content, GCThreshold) ->
+ Size = msg_size(Content),
+ Current = case get(msg_size_for_gc) of
+ undefined -> 0;
+ C -> C
+ end,
+ New = Current + Size,
+ put(msg_size_for_gc, case New > GCThreshold of
+ true -> erlang:garbage_collect(),
+ 0;
+ false -> New
+ end),
+ Size.
+
+msg_size(#content{payload_fragments_rev = PFR}) -> iolist_size(PFR);
+msg_size(#basic_message{content = Content}) -> msg_size(Content).
diff --git a/deps/rabbit_common/src/supervisor2.erl b/deps/rabbit_common/src/supervisor2.erl
new file mode 100644
index 0000000000..08c764d0d8
--- /dev/null
+++ b/deps/rabbit_common/src/supervisor2.erl
@@ -0,0 +1,1651 @@
+%% This file is a copy of supervisor.erl from the Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) the module name is supervisor2
+%%
+%% 2) a find_child/2 utility function has been added
+%%
+%% 3) Added an 'intrinsic' restart type. Like the transient type, this
+%% type means the child should only be restarted if the child exits
+%% abnormally. Unlike the transient type, if the child exits
+%% normally, the supervisor itself also exits normally. If the
+%% child is a supervisor and it exits normally (i.e. with reason of
+%% 'shutdown') then the child's parent also exits normally.
+%%
+%% 4) child specifications can contain, as the restart type, a tuple
+%% {permanent, Delay} | {transient, Delay} | {intrinsic, Delay}
+%% where Delay >= 0 (see point (4) below for intrinsic). The delay,
+%% in seconds, indicates what should happen if a child, upon being
+%% restarted, exceeds the MaxT and MaxR parameters. Thus, if a
+%% child exits, it is restarted as normal. If it exits sufficiently
+%% quickly and often to exceed the boundaries set by the MaxT and
+%% MaxR parameters, and a Delay is specified, then rather than
+%% stopping the supervisor, the supervisor instead continues and
+%% tries to start up the child again, Delay seconds later.
+%%
+%% Note that if a child is delay-restarted this will reset the
+%% count of restarts towrds MaxR and MaxT. This matters if MaxT >
+%% Delay, since otherwise we would fail to restart after the delay.
+%%
+%% Sometimes, you may wish for a transient or intrinsic child to
+%% exit abnormally so that it gets restarted, but still log
+%% nothing. gen_server will log any exit reason other than
+%% 'normal', 'shutdown' or {'shutdown', _}. Thus the exit reason of
+%% {'shutdown', 'restart'} is interpreted to mean you wish the
+%% child to be restarted according to the delay parameters, but
+%% gen_server will not log the error. Thus from gen_server's
+%% perspective it's a normal exit, whilst from supervisor's
+%% perspective, it's an abnormal exit.
+%%
+%% 5) normal, and {shutdown, _} exit reasons are all treated the same
+%% (i.e. are regarded as normal exits)
+%%
+%% All modifications are (C) 2010-2020 VMware, Inc. or its affiliates.
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(supervisor2).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/2, start_link/3,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1,
+ check_childspecs/1, get_childspec/2,
+ find_child/2]).
+
+%% Internal exports
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, format_status/2]).
+
+%% For release_handler only
+-export([get_callback_module/1]).
+
+-include_lib("kernel/include/logger.hrl").
+
+-define(report_error(Error, Reason, Child, SupName),
+ ?LOG_ERROR(#{label=>{supervisor,Error},
+ report=>[{supervisor,SupName},
+ {errorContext,Error},
+ {reason,Reason},
+ {offender,extract_child(Child)}]},
+ #{domain=>[otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"SUPERVISOR REPORT"},
+ error_logger=>#{tag=>error_report,
+ type=>supervisor_report}})).
+
+%%--------------------------------------------------------------------------
+
+-export_type([sup_flags/0, child_spec/0, startchild_ret/0, strategy/0]).
+
+%%--------------------------------------------------------------------------
+
+-type child() :: 'undefined' | pid().
+-type child_id() :: term().
+-type mfargs() :: {M :: module(), F :: atom(), A :: [term()] | undefined}.
+-type modules() :: [module()] | 'dynamic'.
+-type delay() :: non_neg_integer().
+-type restart() :: 'permanent' | 'transient' | 'temporary' | 'intrinsic' | {'permanent', delay()} | {'transient', delay()} | {'intrinsic', delay()}.
+-type shutdown() :: 'brutal_kill' | timeout().
+-type worker() :: 'worker' | 'supervisor'.
+-type sup_name() :: {'local', Name :: atom()}
+ | {'global', Name :: atom()}
+ | {'via', Module :: module(), Name :: any()}.
+-type sup_ref() :: (Name :: atom())
+ | {Name :: atom(), Node :: node()}
+ | {'global', Name :: atom()}
+ | {'via', Module :: module(), Name :: any()}
+ | pid().
+-type child_spec() :: #{id := child_id(), % mandatory
+ start := mfargs(), % mandatory
+ restart => restart(), % optional
+ shutdown => shutdown(), % optional
+ type => worker(), % optional
+ modules => modules()} % optional
+ | {Id :: child_id(),
+ StartFunc :: mfargs(),
+ Restart :: restart(),
+ Shutdown :: shutdown(),
+ Type :: worker(),
+ Modules :: modules()}.
+
+-type strategy() :: 'one_for_all' | 'one_for_one'
+ | 'rest_for_one' | 'simple_one_for_one'.
+
+-type sup_flags() :: #{strategy => strategy(), % optional
+ intensity => non_neg_integer(), % optional
+ period => pos_integer()} % optional
+ | {RestartStrategy :: strategy(),
+ Intensity :: non_neg_integer(),
+ Period :: pos_integer()}.
+-type children() :: {Ids :: [child_id()], Db :: #{child_id() => child_rec()}}.
+
+%%--------------------------------------------------------------------------
+%% Defaults
+-define(default_flags, #{strategy => one_for_one,
+ intensity => 1,
+ period => 5}).
+-define(default_child_spec, #{restart => permanent,
+ type => worker}).
+%% Default 'shutdown' is 5000 for workers and infinity for supervisors.
+%% Default 'modules' is [M], where M comes from the child's start {M,F,A}.
+
+%%--------------------------------------------------------------------------
+
+-record(child, {% pid is undefined when child is not running
+ pid = undefined :: child()
+ | {restarting, pid() | undefined}
+ | [pid()],
+ id :: child_id(),
+ mfargs :: mfargs(),
+ restart_type :: restart(),
+ shutdown :: shutdown(),
+ child_type :: worker(),
+ modules = [] :: modules()}).
+-type child_rec() :: #child{}.
+
+-record(state, {name,
+ strategy :: strategy() | 'undefined',
+ children = {[],#{}} :: children(), % Ids in start order
+ dynamics :: {'maps', #{pid() => list()}}
+ | {'sets', sets:set(pid())}
+ | 'undefined',
+ intensity :: non_neg_integer() | 'undefined',
+ period :: pos_integer() | 'undefined',
+ restarts = [],
+ dynamic_restarts = 0 :: non_neg_integer(),
+ module,
+ args}).
+-type state() :: #state{}.
+
+-define(is_simple(State), State#state.strategy=:=simple_one_for_one).
+-define(is_temporary(_Child_), _Child_#child.restart_type=:=temporary).
+-define(is_permanent(_Child_), ((_Child_#child.restart_type=:=permanent) orelse
+ (is_tuple(_Child_#child.restart_type) andalso
+ tuple_size(_Child_#child.restart_type) =:= 2 andalso
+ element(1, _Child_#child.restart_type) =:= permanent))).
+
+-define(is_explicit_restart(R),
+ R == {shutdown, restart}).
+
+-callback init(Args :: term()) ->
+ {ok, {SupFlags :: sup_flags(), [ChildSpec :: child_spec()]}}
+ | ignore.
+
+-define(restarting(_Pid_), {restarting,_Pid_}).
+
+%%% ---------------------------------------------------
+%%% This is a general process supervisor built upon gen_server.erl.
+%%% Servers/processes should/could also be built using gen_server.erl.
+%%% SupName = {local, atom()} | {global, atom()}.
+%%% ---------------------------------------------------
+
+-type startlink_err() :: {'already_started', pid()}
+ | {'shutdown', term()}
+ | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-spec start_link(Module, Args) -> startlink_ret() when
+ Module :: module(),
+ Args :: term().
+start_link(Mod, Args) ->
+ gen_server:start_link(?MODULE, {self, Mod, Args}, []).
+
+-spec start_link(SupName, Module, Args) -> startlink_ret() when
+ SupName :: sup_name(),
+ Module :: module(),
+ Args :: term().
+start_link(SupName, Mod, Args) ->
+ gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []).
+
+%%% ---------------------------------------------------
+%%% Interface functions.
+%%% ---------------------------------------------------
+
+-type startchild_err() :: 'already_present' | {'already_started', Child :: child()} | term().
+-type startchild_ret() :: {'ok', Child :: child()} | {'ok', Child :: child(), Info :: term()} | {'error', startchild_err()}.
+
+-spec start_child(SupRef, ChildSpec) -> startchild_ret() when
+ SupRef :: sup_ref(),
+ ChildSpec :: child_spec() | (List :: [term()]).
+start_child(Supervisor, ChildSpec) ->
+ call(Supervisor, {start_child, ChildSpec}).
+
+-spec restart_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: {'ok', Child :: child()}
+ | {'ok', Child :: child(), Info :: term()}
+ | {'error', Error},
+ Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one' | term().
+restart_child(Supervisor, Id) ->
+ call(Supervisor, {restart_child, Id}).
+
+-spec delete_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one'.
+delete_child(Supervisor, Id) ->
+ call(Supervisor, {delete_child, Id}).
+
+%%-----------------------------------------------------------------
+%% Func: terminate_child/2
+%% Returns: ok | {error, Reason}
+%% Note that the child is *always* terminated in some
+%% way (maybe killed).
+%%-----------------------------------------------------------------
+
+-spec terminate_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: pid() | child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'not_found' | 'simple_one_for_one'.
+terminate_child(Supervisor, Id) ->
+ call(Supervisor, {terminate_child, Id}).
+
+-spec get_childspec(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: pid() | child_id(),
+ Result :: {'ok', child_spec()} | {'error', Error},
+ Error :: 'not_found'.
+get_childspec(Supervisor, Id) ->
+ call(Supervisor, {get_childspec, Id}).
+
+-spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
+ SupRef :: sup_ref(),
+ Id :: child_id() | undefined,
+ Child :: child() | 'restarting',
+ Type :: worker(),
+ Modules :: modules().
+which_children(Supervisor) ->
+ call(Supervisor, which_children).
+
+-spec count_children(SupRef) -> PropListOfCounts when
+ SupRef :: sup_ref(),
+ PropListOfCounts :: [Count],
+ Count :: {specs, ChildSpecCount :: non_neg_integer()}
+ | {active, ActiveProcessCount :: non_neg_integer()}
+ | {supervisors, ChildSupervisorCount :: non_neg_integer()}
+ |{workers, ChildWorkerCount :: non_neg_integer()}.
+count_children(Supervisor) ->
+ call(Supervisor, count_children).
+
+-spec find_child(Supervisor, Name) -> [pid()] when
+ Supervisor :: sup_ref(),
+ Name :: child_id().
+find_child(Supervisor, Name) ->
+ [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor),
+ Name1 =:= Name].
+
+call(Supervisor, Req) ->
+ gen_server:call(Supervisor, Req, infinity).
+
+-spec check_childspecs(ChildSpecs) -> Result when
+ ChildSpecs :: [child_spec()],
+ Result :: 'ok' | {'error', Error :: term()}.
+check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
+ case check_startspec(ChildSpecs) of
+ {ok, _} -> ok;
+ Error -> {error, Error}
+ end;
+check_childspecs(X) -> {error, {badarg, X}}.
+
+%%%-----------------------------------------------------------------
+%%% Called by release_handler during upgrade
+-spec get_callback_module(Pid) -> Module when
+ Pid :: pid(),
+ Module :: atom().
+get_callback_module(Pid) ->
+ {status, _Pid, {module, _Mod},
+ [_PDict, _SysState, _Parent, _Dbg, Misc]} = sys:get_status(Pid),
+ case lists:keyfind(?MODULE, 1, Misc) of
+ {?MODULE, [{"Callback", Mod}]} ->
+ Mod;
+ _ ->
+ [_Header, _Data, {data, [{"State", State}]} | _] = Misc,
+ State#state.module
+ end.
+
+%%% ---------------------------------------------------
+%%%
+%%% Initialize the supervisor.
+%%%
+%%% ---------------------------------------------------
+
+-type init_sup_name() :: sup_name() | 'self'.
+
+-type stop_rsn() :: {'shutdown', term()}
+ | {'bad_return', {module(),'init', term()}}
+ | {'bad_start_spec', term()}
+ | {'start_spec', term()}
+ | {'supervisor_data', term()}.
+
+-spec init({init_sup_name(), module(), [term()]}) ->
+ {'ok', state()} | 'ignore' | {'stop', stop_rsn()}.
+
+init({SupName, Mod, Args}) ->
+ process_flag(trap_exit, true),
+ case Mod:init(Args) of
+ {ok, {SupFlags, StartSpec}} ->
+ case init_state(SupName, SupFlags, Mod, Args) of
+ {ok, State} when ?is_simple(State) ->
+ init_dynamic(State, StartSpec);
+ {ok, State} ->
+ init_children(State, StartSpec);
+ Error ->
+ {stop, {supervisor_data, Error}}
+ end;
+ ignore ->
+ ignore;
+ Error ->
+ {stop, {bad_return, {Mod, init, Error}}}
+ end.
+
+init_children(State, StartSpec) ->
+ SupName = State#state.name,
+ case check_startspec(StartSpec) of
+ {ok, Children} ->
+ case start_children(Children, SupName) of
+ {ok, NChildren} ->
+ {ok, State#state{children = NChildren}};
+ {error, NChildren, Reason} ->
+ _ = terminate_children(NChildren, SupName),
+ {stop, {shutdown, Reason}}
+ end;
+ Error ->
+ {stop, {start_spec, Error}}
+ end.
+
+init_dynamic(State, [StartSpec]) ->
+ case check_startspec([StartSpec]) of
+ {ok, Children} ->
+ {ok, dyn_init(State#state{children = Children})};
+ Error ->
+ {stop, {start_spec, Error}}
+ end;
+init_dynamic(_State, StartSpec) ->
+ {stop, {bad_start_spec, StartSpec}}.
+
+%%-----------------------------------------------------------------
+%% Func: start_children/2
+%% Args: Children = children() % Ids in start order
+%% SupName = {local, atom()} | {global, atom()} | {pid(), Mod}
+%% Purpose: Start all children. The new map contains #child's
+%% with pids.
+%% Returns: {ok, NChildren} | {error, NChildren, Reason}
+%% NChildren = children() % Ids in termination order
+%% (reversed start order)
+%%-----------------------------------------------------------------
+start_children(Children, SupName) ->
+ Start =
+ fun(Id,Child) ->
+ case do_start_child(SupName, Child) of
+ {ok, undefined} when ?is_temporary(Child) ->
+ remove;
+ {ok, Pid} ->
+ {update,Child#child{pid = Pid}};
+ {ok, Pid, _Extra} ->
+ {update,Child#child{pid = Pid}};
+ {error, Reason} ->
+ ?report_error(start_error, Reason, Child, SupName),
+ {abort,{failed_to_start_child,Id,Reason}}
+ end
+ end,
+ children_map(Start,Children).
+
+do_start_child(SupName, Child) ->
+ #child{mfargs = {M, F, Args}} = Child,
+ case do_start_child_i(M, F, Args) of
+ {ok, Pid} when is_pid(Pid) ->
+ NChild = Child#child{pid = Pid},
+ report_progress(NChild, SupName),
+ {ok, Pid};
+ {ok, Pid, Extra} when is_pid(Pid) ->
+ NChild = Child#child{pid = Pid},
+ report_progress(NChild, SupName),
+ {ok, Pid, Extra};
+ Other ->
+ Other
+ end.
+
+do_start_child_i(M, F, A) ->
+ case catch apply(M, F, A) of
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid};
+ {ok, Pid, Extra} when is_pid(Pid) ->
+ {ok, Pid, Extra};
+ ignore ->
+ {ok, undefined};
+ {error, Error} ->
+ {error, Error};
+ What ->
+ {error, What}
+ end.
+
+%%% ---------------------------------------------------
+%%%
+%%% Callback functions.
+%%%
+%%% ---------------------------------------------------
+-type call() :: 'which_children' | 'count_children' | {_, _}. % XXX: refine
+-spec handle_call(call(), term(), state()) -> {'reply', term(), state()}.
+
+handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) ->
+ Child = get_dynamic_child(State),
+ #child{mfargs = {M, F, A}} = Child,
+ Args = A ++ EArgs,
+ case do_start_child_i(M, F, Args) of
+ {ok, undefined} ->
+ {reply, {ok, undefined}, State};
+ {ok, Pid} ->
+ NState = dyn_store(Pid, Args, State),
+ {reply, {ok, Pid}, NState};
+ {ok, Pid, Extra} ->
+ NState = dyn_store(Pid, Args, State),
+ {reply, {ok, Pid, Extra}, NState};
+ What ->
+ {reply, What, State}
+ end;
+
+handle_call({start_child, ChildSpec}, _From, State) ->
+ case check_childspec(ChildSpec) of
+ {ok, Child} ->
+ {Resp, NState} = handle_start_child(Child, State),
+ {reply, Resp, NState};
+ What ->
+ {reply, {error, What}, State}
+ end;
+
+%% terminate_child for simple_one_for_one can only be done with pid
+handle_call({terminate_child, Id}, _From, State) when not is_pid(Id),
+ ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({terminate_child, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} ->
+ do_terminate(Child, State#state.name),
+ {reply, ok, del_child(Child, State)};
+ error ->
+ {reply, {error, not_found}, State}
+ end;
+
+%% restart_child request is invalid for simple_one_for_one supervisors
+handle_call({restart_child, _Id}, _From, State) when ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({restart_child, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} when Child#child.pid =:= undefined ->
+ case do_start_child(State#state.name, Child) of
+ {ok, Pid} ->
+ NState = set_pid(Pid, Id, State),
+ {reply, {ok, Pid}, NState};
+ {ok, Pid, Extra} ->
+ NState = set_pid(Pid, Id, State),
+ {reply, {ok, Pid, Extra}, NState};
+ Error ->
+ {reply, Error, State}
+ end;
+ {ok, #child{pid=?restarting(_)}} ->
+ {reply, {error, restarting}, State};
+ {ok, _} ->
+ {reply, {error, running}, State};
+ _ ->
+ {reply, {error, not_found}, State}
+ end;
+
+%% delete_child request is invalid for simple_one_for_one supervisors
+handle_call({delete_child, _Id}, _From, State) when ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({delete_child, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} when Child#child.pid =:= undefined ->
+ NState = remove_child(Id, State),
+ {reply, ok, NState};
+ {ok, #child{pid=?restarting(_)}} ->
+ {reply, {error, restarting}, State};
+ {ok, _} ->
+ {reply, {error, running}, State};
+ _ ->
+ {reply, {error, not_found}, State}
+ end;
+
+handle_call({get_childspec, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} ->
+ {reply, {ok, child_to_spec(Child)}, State};
+ error ->
+ {reply, {error, not_found}, State}
+ end;
+
+handle_call(which_children, _From, State) when ?is_simple(State) ->
+ #child{child_type = CT,modules = Mods} = get_dynamic_child(State),
+ Reply = dyn_map(fun(?restarting(_)) -> {undefined, restarting, CT, Mods};
+ (Pid) -> {undefined, Pid, CT, Mods}
+ end, State),
+ {reply, Reply, State};
+
+handle_call(which_children, _From, State) ->
+ Resp =
+ children_to_list(
+ fun(Id,#child{pid = ?restarting(_),
+ child_type = ChildType, modules = Mods}) ->
+ {Id, restarting, ChildType, Mods};
+ (Id,#child{pid = Pid,
+ child_type = ChildType, modules = Mods}) ->
+ {Id, Pid, ChildType, Mods}
+ end,
+ State#state.children),
+ {reply, Resp, State};
+
+handle_call(count_children, _From, #state{dynamic_restarts = Restarts} = State)
+ when ?is_simple(State) ->
+ #child{child_type = CT} = get_dynamic_child(State),
+ Sz = dyn_size(State),
+ Active = Sz - Restarts, % Restarts is always 0 for temporary children
+ Reply = case CT of
+ supervisor -> [{specs, 1}, {active, Active},
+ {supervisors, Sz}, {workers, 0}];
+ worker -> [{specs, 1}, {active, Active},
+ {supervisors, 0}, {workers, Sz}]
+ end,
+ {reply, Reply, State};
+
+handle_call(count_children, _From, State) ->
+ %% Specs and children are together on the children list...
+ {Specs, Active, Supers, Workers} =
+ children_fold(fun(_Id, Child, Counts) ->
+ count_child(Child, Counts)
+ end, {0,0,0,0}, State#state.children),
+
+ %% Reformat counts to a property list.
+ Reply = [{specs, Specs}, {active, Active},
+ {supervisors, Supers}, {workers, Workers}],
+ {reply, Reply, State}.
+
+count_child(#child{pid = Pid, child_type = worker},
+ {Specs, Active, Supers, Workers}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true -> {Specs+1, Active+1, Supers, Workers+1};
+ false -> {Specs+1, Active, Supers, Workers+1}
+ end;
+count_child(#child{pid = Pid, child_type = supervisor},
+ {Specs, Active, Supers, Workers}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true -> {Specs+1, Active+1, Supers+1, Workers};
+ false -> {Specs+1, Active, Supers+1, Workers}
+ end.
+
+%%% If a restart attempt failed, this message is cast
+%%% from restart/2 in order to give gen_server the chance to
+%%% check it's inbox before trying again.
+-spec handle_cast({try_again_restart, child_id() | {'restarting',pid()}}, state()) ->
+ {'noreply', state()} | {stop, shutdown, state()}.
+
+handle_cast({try_again_restart,TryAgainId}, State) ->
+ case find_child_and_args(TryAgainId, State) of
+ {ok, Child = #child{pid=?restarting(_)}} ->
+ case restart(Child,State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {shutdown, State1} ->
+ {stop, shutdown, State1}
+ end;
+ _ ->
+ {noreply,State}
+ end.
+
+%%
+%% Take care of terminated children.
+%%
+-spec handle_info(term(), state()) ->
+ {'noreply', state()} | {'stop', 'shutdown', state()}.
+
+handle_info({'EXIT', Pid, Reason}, State) ->
+ case restart_child(Pid, Reason, State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {shutdown, State1} ->
+ {stop, shutdown, State1}
+ end;
+
+handle_info({delayed_restart, {Reason, Child}}, State) when ?is_simple(State) ->
+ try_restart(Reason, Child, State#state{restarts = []}); %% [1]
+handle_info({delayed_restart, {Reason, Child}}, State) ->
+ ChildId = Child#child.id,
+ case internal_find_child(ChildId, State) of
+ {ok, Child1} ->
+ try_restart(Reason, Child1, State#state{restarts = []}); %% [1]
+ _What ->
+ {noreply, State}
+ end;
+%% [1] When we receive a delayed_restart message we want to reset the
+%% restarts field since otherwise the MaxT might not have elapsed and
+%% we would just delay again and again. Since a common use of the
+%% delayed restart feature is for MaxR = 1, MaxT = some huge number
+%% (so that we don't end up bouncing around in non-delayed restarts)
+%% this is important.
+
+handle_info(Msg, State) ->
+ ?LOG_ERROR("Supervisor received unexpected message: ~tp~n",[Msg],
+ #{domain=>[otp],
+ error_logger=>#{tag=>error}}),
+ {noreply, State}.
+
+%%
+%% Terminate this server.
+%%
+-spec terminate(term(), state()) -> 'ok'.
+
+terminate(_Reason, State) when ?is_simple(State) ->
+ terminate_dynamic_children(State);
+terminate(_Reason, State) ->
+ terminate_children(State#state.children, State#state.name).
+
+%%
+%% Change code for the supervisor.
+%% Call the new call-back module and fetch the new start specification.
+%% Combine the new spec. with the old. If the new start spec. is
+%% not valid the code change will not succeed.
+%% Use the old Args as argument to Module:init/1.
+%% NOTE: This requires that the init function of the call-back module
+%% does not have any side effects.
+%%
+-spec code_change(term(), state(), term()) ->
+ {'ok', state()} | {'error', term()}.
+
+code_change(_, State, _) ->
+ case (State#state.module):init(State#state.args) of
+ {ok, {SupFlags, StartSpec}} ->
+ case set_flags(SupFlags, State) of
+ {ok, State1} ->
+ update_childspec(State1, StartSpec);
+ {invalid_type, SupFlags} ->
+ {error, {bad_flags, SupFlags}}; % backwards compatibility
+ Error ->
+ {error, Error}
+ end;
+ ignore ->
+ {ok, State};
+ Error ->
+ Error
+ end.
+
+update_childspec(State, StartSpec) when ?is_simple(State) ->
+ case check_startspec(StartSpec) of
+ {ok, {[_],_}=Children} ->
+ {ok, State#state{children = Children}};
+ Error ->
+ {error, Error}
+ end;
+update_childspec(State, StartSpec) ->
+ case check_startspec(StartSpec) of
+ {ok, Children} ->
+ OldC = State#state.children, % In reverse start order !
+ NewC = update_childspec1(OldC, Children, []),
+ {ok, State#state{children = NewC}};
+ Error ->
+ {error, Error}
+ end.
+
+update_childspec1({[Id|OldIds], OldDb}, {Ids,Db}, KeepOld) ->
+ case update_chsp(maps:get(Id,OldDb), Db) of
+ {ok,NewDb} ->
+ update_childspec1({OldIds,OldDb}, {Ids,NewDb}, KeepOld);
+ false ->
+ update_childspec1({OldIds,OldDb}, {Ids,Db}, [Id|KeepOld])
+ end;
+update_childspec1({[],OldDb}, {Ids,Db}, KeepOld) ->
+ KeepOldDb = maps:with(KeepOld,OldDb),
+ %% Return them in (kept) reverse start order.
+ {lists:reverse(Ids ++ KeepOld),maps:merge(KeepOldDb,Db)}.
+
+update_chsp(#child{id=Id}=OldChild, NewDb) ->
+ case maps:find(Id, NewDb) of
+ {ok,Child} ->
+ {ok,NewDb#{Id => Child#child{pid = OldChild#child.pid}}};
+ error -> % Id not found in new spec.
+ false
+ end.
+
+
+%%% ---------------------------------------------------
+%%% Start a new child.
+%%% ---------------------------------------------------
+
+handle_start_child(Child, State) ->
+ case internal_find_child(Child#child.id, State) of
+ error ->
+ case do_start_child(State#state.name, Child) of
+ {ok, undefined} when ?is_temporary(Child) ->
+ {{ok, undefined}, State};
+ {ok, Pid} ->
+ {{ok, Pid}, save_child(Child#child{pid = Pid}, State)};
+ {ok, Pid, Extra} ->
+ {{ok, Pid, Extra}, save_child(Child#child{pid = Pid}, State)};
+ {error, What} ->
+ {{error, {What, Child}}, State}
+ end;
+ {ok, OldChild} when is_pid(OldChild#child.pid) ->
+ {{error, {already_started, OldChild#child.pid}}, State};
+ {ok, _OldChild} ->
+ {{error, already_present}, State}
+ end.
+
+%%% ---------------------------------------------------
+%%% Restart. A process has terminated.
+%%% Returns: {ok, state()} | {shutdown, state()}
+%%% ---------------------------------------------------
+
+restart_child(Pid, Reason, State) ->
+ case find_child_and_args(Pid, State) of
+ {ok, Child} ->
+ do_restart(Reason, Child, State);
+ error ->
+ {ok, State}
+ end.
+
+try_restart(Reason, Child, State) ->
+ case do_restart(Reason, Child, State) of
+ {ok, NState} -> {noreply, NState};
+ {shutdown, State2} -> {stop, shutdown, State2}
+ end.
+
+do_restart(Reason, Child=#child{restart_type=permanent}, State) -> % is_permanent
+ ?report_error(child_terminated, Reason, Child, State#state.name),
+ restart(Child, State);
+do_restart(Reason, Child=#child{restart_type={permanent,_Delay}}, State) -> % is_permanent_delay
+ ?report_error(child_terminated, Reason, Child, State#state.name),
+ do_restart_delay(Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type=transient}, State) -> % is_transient
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(fun restart/2,
+ fun delete_child_and_continue/2,
+ Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type={transient,_Delay}}, State) -> % is_transient_delay
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(defer_to_restart_delay(Reason),
+ fun delete_child_and_continue/2,
+ Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type=intrinsic}, State) -> % is_intrinsic
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(fun restart/2,
+ fun delete_child_and_stop/2,
+ Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type={intrinsic,_Delay}}, State) -> % is_intrinsic_delay
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(defer_to_restart_delay(Reason),
+ fun delete_child_and_stop/2,
+ Reason, Child, State);
+do_restart(normal, Child, State) ->
+ NState = del_child(Child, State),
+ {ok, NState};
+do_restart(shutdown, Child, State) ->
+ NState = del_child(Child, State),
+ {ok, NState};
+do_restart({shutdown, _Term}, Child, State) ->
+ NState = del_child(Child, State),
+ {ok, NState};
+do_restart(Reason, Child, State) when ?is_temporary(Child) ->
+ ?report_error(child_terminated, Reason, Child, State#state.name),
+ NState = del_child(Child, State),
+ {ok, NState}.
+
+maybe_report_error(Reason, Child, State) ->
+ case is_abnormal_termination(Reason) of
+ true ->
+ ?report_error(child_terminated, Reason, Child, State#state.name);
+ false ->
+ ok
+ end.
+
+restart_if_explicit_or_abnormal(RestartHow, Otherwise, Reason, Child, State) ->
+ case ?is_explicit_restart(Reason) orelse is_abnormal_termination(Reason) of
+ true -> RestartHow(Child, State);
+ false -> Otherwise(Child, State)
+ end.
+
+defer_to_restart_delay(Reason) ->
+ fun(Child, State) -> do_restart_delay(Reason, Child, State) end.
+
+delete_child_and_continue(Child, State) ->
+ {ok, del_child(Child, State)}.
+
+delete_child_and_stop(Child, State) ->
+ NState = del_child(Child, State),
+ {shutdown, NState}.
+
+is_abnormal_termination(normal) -> false;
+is_abnormal_termination(shutdown) -> false;
+is_abnormal_termination({shutdown, _}) -> false;
+is_abnormal_termination(_Other) -> true.
+
+do_restart_delay(Reason,
+ Child = #child{id = ChildId,
+ pid = ChildPid0,
+ restart_type = {_RestartType, Delay}},
+ State0) ->
+ case add_restart(State0) of
+ {ok, State1} ->
+ Strategy = State1#state.strategy,
+ maybe_restart(Strategy, Child, State1);
+ {terminate, State1} ->
+ %% we've reached the max restart intensity, but the
+ %% add_restart will have added to the restarts
+ %% field. Given we don't want to die here, we need to go
+ %% back to the old restarts field otherwise we'll never
+ %% attempt to restart later, which is why we ignore
+ %% NState for this clause.
+ Msg = {delayed_restart, {Reason, Child}},
+ _TRef = erlang:send_after(trunc(Delay*1000), self(), Msg),
+ ChildPid1 = restarting(ChildPid0),
+ % Note: State0 is intentionally used here
+ % TODO LRB
+ State2 = set_pid(ChildPid1, ChildId, State1),
+ {ok, State2}
+ end.
+
+maybe_restart(Strategy, Child, State) ->
+ case restart(Strategy, Child, State) of
+ {{try_again, Reason}, NState2} ->
+ %% Leaving control back to gen_server before
+ %% trying again. This way other incoming requests
+ %% for the supervisor can be handled - e.g. a
+ %% shutdown request for the supervisor or the
+ %% child.
+ Id = if ?is_simple(State) -> Child#child.pid;
+ true -> Child#child.id
+ end,
+ Args = [self(), Id, Reason],
+ {ok, _TRef} = timer:apply_after(0, ?MODULE, try_again_restart, Args),
+ {ok, NState2};
+ Other ->
+ Other
+ end.
+
+restart(Child, State) ->
+ case add_restart(State) of
+ {ok, NState} ->
+ case restart(NState#state.strategy, Child, NState) of
+ {{try_again, TryAgainId}, NState2} ->
+ %% Leaving control back to gen_server before
+ %% trying again. This way other incoming requsts
+ %% for the supervisor can be handled - e.g. a
+ %% shutdown request for the supervisor or the
+ %% child.
+ try_again_restart(TryAgainId),
+ {ok,NState2};
+ Other ->
+ Other
+ end;
+ {terminate, NState} ->
+ ?report_error(shutdown, reached_max_restart_intensity,
+ Child, State#state.name),
+ {shutdown, del_child(Child, NState)}
+ end.
+
+restart(simple_one_for_one, Child, State0) ->
+ #child{pid = OldPid, mfargs = {M, F, A}} = Child,
+ State1 = case OldPid of
+ ?restarting(_) ->
+ NRes = State0#state.dynamic_restarts - 1,
+ State0#state{dynamic_restarts = NRes};
+ _ ->
+ State0
+ end,
+ State2 = dyn_erase(OldPid, State1),
+ case do_start_child_i(M, F, A) of
+ {ok, Pid} ->
+ NState = dyn_store(Pid, A, State2),
+ {ok, NState};
+ {ok, Pid, _Extra} ->
+ NState = dyn_store(Pid, A, State2),
+ {ok, NState};
+ {error, Error} ->
+ ROldPid = restarting(OldPid),
+ NRestarts = State2#state.dynamic_restarts + 1,
+ State3 = State2#state{dynamic_restarts = NRestarts},
+ NState = dyn_store(ROldPid, A, State3),
+ ?report_error(start_error, Error, Child, NState#state.name),
+ {{try_again, ROldPid}, NState}
+ end;
+restart(one_for_one, #child{id=Id} = Child, State) ->
+ OldPid = Child#child.pid,
+ case do_start_child(State#state.name, Child) of
+ {ok, Pid} ->
+ NState = set_pid(Pid, Id, State),
+ {ok, NState};
+ {ok, Pid, _Extra} ->
+ NState = set_pid(Pid, Id, State),
+ {ok, NState};
+ {error, Reason} ->
+ NState = set_pid(restarting(OldPid), Id, State),
+ ?report_error(start_error, Reason, Child, State#state.name),
+ {{try_again,Id}, NState}
+ end;
+restart(rest_for_one, #child{id=Id} = Child, #state{name=SupName} = State) ->
+ {ChAfter, ChBefore} = split_child(Id, State#state.children),
+ {Return, ChAfter2} = restart_multiple_children(Child, ChAfter, SupName),
+ {Return, State#state{children = append(ChAfter2,ChBefore)}};
+restart(one_for_all, Child, #state{name=SupName} = State) ->
+ Children1 = del_child(Child#child.id, State#state.children),
+ {Return, NChildren} = restart_multiple_children(Child, Children1, SupName),
+ {Return, State#state{children = NChildren}}.
+
+restart_multiple_children(Child, Children, SupName) ->
+ Children1 = terminate_children(Children, SupName),
+ case start_children(Children1, SupName) of
+ {ok, NChildren} ->
+ {ok, NChildren};
+ {error, NChildren, {failed_to_start_child, FailedId, _Reason}} ->
+ NewPid = if FailedId =:= Child#child.id ->
+ restarting(Child#child.pid);
+ true ->
+ ?restarting(undefined)
+ end,
+ {{try_again, FailedId}, set_pid(NewPid,FailedId,NChildren)}
+ end.
+
+restarting(Pid) when is_pid(Pid) -> ?restarting(Pid);
+restarting(RPid) -> RPid.
+
+-spec try_again_restart(child_id() | {'restarting',pid()}) -> 'ok'.
+try_again_restart(TryAgainId) ->
+ gen_server:cast(self(), {try_again_restart, TryAgainId}).
+
+%%-----------------------------------------------------------------
+%% Func: terminate_children/2
+%% Args: Children = children() % Ids in termination order
+%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
+%% Returns: NChildren = children() % Ids in startup order
+%% % (reversed termination order)
+%%-----------------------------------------------------------------
+terminate_children(Children, SupName) ->
+ Terminate =
+ fun(_Id,Child) when ?is_temporary(Child) ->
+ %% Temporary children should not be restarted and thus should
+ %% be skipped when building the list of terminated children.
+ do_terminate(Child, SupName),
+ remove;
+ (_Id,Child) ->
+ do_terminate(Child, SupName),
+ {update,Child#child{pid=undefined}}
+ end,
+ {ok,NChildren} = children_map(Terminate, Children),
+ NChildren.
+
+do_terminate(Child, SupName) when is_pid(Child#child.pid) ->
+ case shutdown(Child#child.pid, Child#child.shutdown) of
+ ok ->
+ ok;
+ {error, normal} when not (?is_permanent(Child)) ->
+ ok;
+ {error, OtherReason} ->
+ ?report_error(shutdown_error, OtherReason, Child, SupName)
+ end,
+ ok;
+do_terminate(_Child, _SupName) ->
+ ok.
+
+%%-----------------------------------------------------------------
+%% Shutdowns a child. We must check the EXIT value
+%% of the child, because it might have died with another reason than
+%% the wanted. In that case we want to report the error. We put a
+%% monitor on the child an check for the 'DOWN' message instead of
+%% checking for the 'EXIT' message, because if we check the 'EXIT'
+%% message a "naughty" child, who does unlink(Sup), could hang the
+%% supervisor.
+%% Returns: ok | {error, OtherReason} (this should be reported)
+%%-----------------------------------------------------------------
+shutdown(Pid, brutal_kill) ->
+ case monitor_child(Pid) of
+ ok ->
+ exit(Pid, kill),
+ receive
+ {'DOWN', _MRef, process, Pid, killed} ->
+ ok;
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end;
+shutdown(Pid, Time) ->
+ case monitor_child(Pid) of
+ ok ->
+ exit(Pid, shutdown), %% Try to shutdown gracefully
+ receive
+ {'DOWN', _MRef, process, Pid, shutdown} ->
+ ok;
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ after Time ->
+ exit(Pid, kill), %% Force termination.
+ receive
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ end
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%% Help function to shutdown/2 switches from link to monitor approach
+monitor_child(Pid) ->
+
+ %% Do the monitor operation first so that if the child dies
+ %% before the monitoring is done causing a 'DOWN'-message with
+ %% reason noproc, we will get the real reason in the 'EXIT'-message
+ %% unless a naughty child has already done unlink...
+ erlang:monitor(process, Pid),
+ unlink(Pid),
+
+ receive
+ %% If the child dies before the unlik we must empty
+ %% the mail-box of the 'EXIT'-message and the 'DOWN'-message.
+ {'EXIT', Pid, Reason} ->
+ receive
+ {'DOWN', _, process, Pid, _} ->
+ {error, Reason}
+ end
+ after 0 ->
+ %% If a naughty child did unlink and the child dies before
+ %% monitor the result will be that shutdown/2 receives a
+ %% 'DOWN'-message with reason noproc.
+ %% If the child should die after the unlink there
+ %% will be a 'DOWN'-message with a correct reason
+ %% that will be handled in shutdown/2.
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+%% Func: terminate_dynamic_children/1
+%% Args: State
+%% Returns: ok
+%%
+%% Shutdown all dynamic children. This happens when the supervisor is
+%% stopped. Because the supervisor can have millions of dynamic children, we
+%% can have a significative overhead here.
+%%-----------------------------------------------------------------
+terminate_dynamic_children(State) ->
+ Child = get_dynamic_child(State),
+ {Pids, EStack0} = monitor_dynamic_children(Child,State),
+ Sz = sets:size(Pids),
+ EStack = case Child#child.shutdown of
+ brutal_kill ->
+ sets:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
+ infinity ->
+ sets:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
+ Time ->
+ sets:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
+ TRef = erlang:start_timer(Time, self(), kill),
+ wait_dynamic_children(Child, Pids, Sz, TRef, EStack0)
+ end,
+ %% Unroll stacked errors and report them
+ dict:fold(fun(Reason, Ls, _) ->
+ ?report_error(shutdown_error, Reason,
+ Child#child{pid=Ls}, State#state.name)
+ end, ok, EStack).
+
+monitor_dynamic_children(Child,State) ->
+ dyn_fold(fun(P,{Pids, EStack}) when is_pid(P) ->
+ case monitor_child(P) of
+ ok ->
+ {sets:add_element(P, Pids), EStack};
+ {error, normal} when not (?is_permanent(Child)) ->
+ {Pids, EStack};
+ {error, Reason} ->
+ {Pids, dict:append(Reason, P, EStack)}
+ end;
+ (?restarting(_), {Pids, EStack}) ->
+ {Pids, EStack}
+ end, {sets:new(), dict:new()}, State).
+
+wait_dynamic_children(_Child, _Pids, 0, undefined, EStack) ->
+ EStack;
+wait_dynamic_children(_Child, _Pids, 0, TRef, EStack) ->
+ %% If the timer has expired before its cancellation, we must empty the
+ %% mail-box of the 'timeout'-message.
+ _ = erlang:cancel_timer(TRef),
+ receive
+ {timeout, TRef, kill} ->
+ EStack
+ after 0 ->
+ EStack
+ end;
+wait_dynamic_children(#child{shutdown=brutal_kill} = Child, Pids, Sz,
+ TRef, EStack) ->
+ receive
+ {'DOWN', _MRef, process, Pid, killed} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, Reason} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, dict:append(Reason, Pid, EStack))
+ end;
+wait_dynamic_children(Child, Pids, Sz, TRef, EStack) ->
+ receive
+ {'DOWN', _MRef, process, Pid, shutdown} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, {shutdown, _}} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, normal} when not (?is_permanent(Child)) ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, Reason} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, dict:append(Reason, Pid, EStack));
+
+ {timeout, TRef, kill} ->
+ sets:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack)
+ end.
+
+%%-----------------------------------------------------------------
+%% Access #state.children
+%%-----------------------------------------------------------------
+
+%% Note we do not want to save the parameter list for temporary processes as
+%% they will not be restarted, and hence we do not need this information.
+%% Especially for dynamic children to simple_one_for_one supervisors
+%% it could become very costly as it is not uncommon to spawn
+%% very many such processes.
+-spec save_child(child_rec(), state()) -> state().
+save_child(#child{mfargs = {M, F, _}} = Child, State) when ?is_temporary(Child) ->
+ do_save_child(Child#child{mfargs = {M, F, undefined}}, State);
+save_child(Child, State) ->
+ do_save_child(Child, State).
+
+-spec do_save_child(child_rec(), state()) -> state().
+do_save_child(#child{id = Id} = Child, #state{children = {Ids,Db}} = State) ->
+ State#state{children = {[Id|Ids],Db#{Id => Child}}}.
+
+-spec del_child(child_rec(), state()) -> state();
+ (child_id(), children()) -> children().
+del_child(#child{pid = Pid}, State) when ?is_simple(State) ->
+ dyn_erase(Pid,State);
+del_child(Child, State) when is_record(Child,child), is_record(State,state) ->
+ NChildren = del_child(Child#child.id, State#state.children),
+ State#state{children = NChildren};
+del_child(Id, {Ids,Db}) ->
+ case maps:get(Id, Db) of
+ Child when Child#child.restart_type =:= temporary ->
+ {lists:delete(Id, Ids), maps:remove(Id, Db)};
+ Child ->
+ {Ids, Db#{Id=>Child#child{pid=undefined}}}
+ end.
+
+%% In: {[S4, S3, Ch, S1, S0],Db}
+%% Ret: {{[S4, S3, Ch],Db1}, {[S1, S0],Db2}}
+%% Db1 and Db2 contain the keys in the lists they are associated with.
+-spec split_child(child_id(), children()) -> {children(), children()}.
+split_child(Id, {Ids,Db}) ->
+ {IdsAfter,IdsBefore} = split_ids(Id, Ids, []),
+ DbBefore = maps:with(IdsBefore,Db),
+ #{Id:=Ch} = DbAfter = maps:with(IdsAfter,Db),
+ {{IdsAfter,DbAfter#{Id=>Ch#child{pid=undefined}}},{IdsBefore,DbBefore}}.
+
+split_ids(Id, [Id|Ids], After) ->
+ {lists:reverse([Id|After]), Ids};
+split_ids(Id, [Other|Ids], After) ->
+ split_ids(Id, Ids, [Other | After]).
+
+%% Find the child record for a given Pid (dynamic child) or Id
+%% (non-dynamic child). This is called from the API functions.
+-spec internal_find_child(pid() | child_id(), state()) -> {ok,child_rec()} | error.
+internal_find_child(Pid, State) when is_pid(Pid), ?is_simple(State) ->
+ case find_dynamic_child(Pid, State) of
+ error ->
+ case find_dynamic_child(restarting(Pid), State) of
+ error ->
+ case erlang:is_process_alive(Pid) of
+ true -> error;
+ false -> {ok, get_dynamic_child(State)}
+ end;
+ Other ->
+ Other
+ end;
+ Other ->
+ Other
+ end;
+internal_find_child(Id, #state{children = {_Ids,Db}}) ->
+ maps:find(Id, Db).
+
+%% Get the child record - either by child id or by pid. If
+%% simple_one_for_one, then insert the pid and args into the returned
+%% child record. This is called when trying to restart the child.
+-spec find_child_and_args(IdOrPid, state()) -> {ok, child_rec()} | error when
+ IdOrPid :: pid() | {restarting,pid()} | child_id().
+find_child_and_args(Pid, State) when ?is_simple(State) ->
+ case find_dynamic_child(Pid, State) of
+ {ok,#child{mfargs={M,F,_}} = Child} ->
+ {ok, Args} = dyn_args(Pid, State),
+ {ok, Child#child{mfargs = {M, F, Args}}};
+ error ->
+ error
+ end;
+find_child_and_args(Pid, State) when is_pid(Pid) ->
+ find_child_by_pid(Pid, State);
+find_child_and_args(Id, #state{children={_Ids,Db}}) ->
+ maps:find(Id, Db).
+
+%% Given the pid, find the child record for a dynamic child, and
+%% include the pid in the returned record.
+-spec find_dynamic_child(IdOrPid, state()) -> {ok, child_rec()} | error when
+ IdOrPid :: pid() | {restarting,pid()} | child_id().
+find_dynamic_child(Pid, State) ->
+ case dyn_exists(Pid, State) of
+ true ->
+ Child = get_dynamic_child(State),
+ {ok, Child#child{pid=Pid}};
+ false ->
+ error
+ end.
+
+%% Given the pid, find the child record for a non-dyanamic child.
+-spec find_child_by_pid(IdOrPid, state()) -> {ok,child_rec()} | error when
+ IdOrPid :: pid() | {restarting,pid()}.
+find_child_by_pid(Pid,#state{children={_Ids,Db}}) ->
+ Fun = fun(_Id,#child{pid=P}=Ch,_) when P =:= Pid ->
+ throw(Ch);
+ (_,_,error) ->
+ error
+ end,
+ try maps:fold(Fun,error,Db)
+ catch throw:Child -> {ok,Child}
+ end.
+
+%% Get the child record from a simple_one_for_one supervisor - no pid
+%% It is assumed that the child can always be found
+-spec get_dynamic_child(state()) -> child_rec().
+get_dynamic_child(#state{children={[Id],Db}}) ->
+ #{Id := Child} = Db,
+ Child.
+
+%% Update pid in the given child record and store it in the process state
+-spec set_pid(term(), child_id(), state()) -> state();
+ (term(), child_id(), children()) -> children().
+set_pid(Pid, Id, #state{children=Children} = State) ->
+ State#state{children = set_pid(Pid, Id, Children)};
+set_pid(Pid, Id, {Ids, Db}) ->
+ NewDb = maps:update_with(Id, fun(Child) -> Child#child{pid=Pid} end, Db),
+ {Ids,NewDb}.
+
+%% Remove the Id and the child record from the process state
+-spec remove_child(child_id(), state()) -> state().
+remove_child(Id, #state{children={Ids,Db}} = State) ->
+ NewIds = lists:delete(Id,Ids),
+ NewDb = maps:remove(Id,Db),
+ State#state{children = {NewIds,NewDb}}.
+
+%% In the order of Ids, traverse the children and update each child
+%% according to the return value of the Fun.
+%% On error, abort and return the merge of the old and the updated map.
+%% NOTE: The returned list of Ids is reverted compared to the input.
+-spec children_map(Fun, children()) -> {ok, children()} |
+ {error,children(),Reason} when
+ Fun :: fun((child_id(),child_rec()) -> {update,child_rec()} |
+ remove |
+ {abort, Reason}),
+ Reason :: term().
+children_map(Fun,{Ids,Db}) ->
+ children_map(Fun, Ids, Db, []).
+
+children_map(Fun,[Id|Ids],Db,Acc) ->
+ case Fun(Id,maps:get(Id,Db)) of
+ {update,Child} ->
+ children_map(Fun,Ids,Db#{Id => Child},[Id|Acc]);
+ remove ->
+ children_map(Fun,Ids,maps:remove(Id,Db),Acc);
+ {abort,Reason} ->
+ {error,{lists:reverse(Ids)++[Id|Acc],Db},Reason}
+ end;
+children_map(_Fun,[],Db,Acc) ->
+ {ok,{Acc,Db}}.
+
+%% In the order of Ids, map over all children and return the list
+-spec children_to_list(Fun, children()) -> List when
+ Fun :: fun((child_id(), child_rec()) -> Elem),
+ List :: list(Elem),
+ Elem :: term().
+children_to_list(Fun,{Ids,Db}) ->
+ children_to_list(Fun, Ids, Db, []).
+children_to_list(Fun,[Id|Ids],Db,Acc) ->
+ children_to_list(Fun,Ids,Db,[Fun(Id,maps:get(Id,Db))|Acc]);
+children_to_list(_Fun,[],_Db,Acc) ->
+ lists:reverse(Acc).
+
+%% The order is not important - so ignore Ids
+-spec children_fold(Fun, Acc0, children()) -> Acc1 when
+ Fun :: fun((child_id(), child_rec(), AccIn) -> AccOut),
+ Acc0 :: term(),
+ Acc1 :: term(),
+ AccIn :: term(),
+ AccOut :: term().
+children_fold(Fun,Init,{_Ids,Db}) ->
+ maps:fold(Fun, Init, Db).
+
+-spec append(children(), children()) -> children().
+append({Ids1,Db1},{Ids2,Db2}) ->
+ {Ids1++Ids2,maps:merge(Db1,Db2)}.
+
+%%-----------------------------------------------------------------
+%% Func: init_state/4
+%% Args: SupName = {local, atom()} | {global, atom()} | self
+%% Type = {Strategy, MaxIntensity, Period}
+%% Strategy = one_for_one | one_for_all | simple_one_for_one |
+%% rest_for_one
+%% MaxIntensity = integer() >= 0
+%% Period = integer() > 0
+%% Mod :== atom()
+%% Args :== term()
+%% Purpose: Check that Type is of correct type (!)
+%% Returns: {ok, state()} | Error
+%%-----------------------------------------------------------------
+init_state(SupName, Type, Mod, Args) ->
+ set_flags(Type, #state{name = supname(SupName,Mod),
+ module = Mod,
+ args = Args}).
+
+set_flags(Flags, State) ->
+ try check_flags(Flags) of
+ #{strategy := Strategy, intensity := MaxIntensity, period := Period} ->
+ {ok, State#state{strategy = Strategy,
+ intensity = MaxIntensity,
+ period = Period}}
+ catch
+ Thrown -> Thrown
+ end.
+
+check_flags(SupFlags) when is_map(SupFlags) ->
+ do_check_flags(maps:merge(?default_flags,SupFlags));
+check_flags({Strategy, MaxIntensity, Period}) ->
+ check_flags(#{strategy => Strategy,
+ intensity => MaxIntensity,
+ period => Period});
+check_flags(What) ->
+ throw({invalid_type, What}).
+
+do_check_flags(#{strategy := Strategy,
+ intensity := MaxIntensity,
+ period := Period} = Flags) ->
+ validStrategy(Strategy),
+ validIntensity(MaxIntensity),
+ validPeriod(Period),
+ Flags.
+
+validStrategy(simple_one_for_one) -> true;
+validStrategy(one_for_one) -> true;
+validStrategy(one_for_all) -> true;
+validStrategy(rest_for_one) -> true;
+validStrategy(What) -> throw({invalid_strategy, What}).
+
+validIntensity(Max) when is_integer(Max),
+ Max >= 0 -> true;
+validIntensity(What) -> throw({invalid_intensity, What}).
+
+validPeriod(Period) when is_integer(Period),
+ Period > 0 -> true;
+validPeriod(What) -> throw({invalid_period, What}).
+
+supname(self, Mod) -> {self(), Mod};
+supname(N, _) -> N.
+
+%%% ------------------------------------------------------
+%%% Check that the children start specification is valid.
+%%% Input: [child_spec()]
+%%% Returns: {ok, [child_rec()]} | Error
+%%% ------------------------------------------------------
+
+check_startspec(Children) -> check_startspec(Children, [], #{}).
+
+check_startspec([ChildSpec|T], Ids, Db) ->
+ case check_childspec(ChildSpec) of
+ {ok, #child{id=Id}=Child} ->
+ case maps:is_key(Id, Db) of
+ %% The error message duplicate_child_name is kept for
+ %% backwards compatibility, although
+ %% duplicate_child_id would be more correct.
+ true -> {duplicate_child_name, Id};
+ false -> check_startspec(T, [Id | Ids], Db#{Id=>Child})
+ end;
+ Error -> Error
+ end;
+check_startspec([], Ids, Db) ->
+ {ok, {lists:reverse(Ids),Db}}.
+
+check_childspec(ChildSpec) when is_map(ChildSpec) ->
+ catch do_check_childspec(maps:merge(?default_child_spec,ChildSpec));
+check_childspec({Id, Func, RestartType, Shutdown, ChildType, Mods}) ->
+ check_childspec(#{id => Id,
+ start => Func,
+ restart => RestartType,
+ shutdown => Shutdown,
+ type => ChildType,
+ modules => Mods});
+check_childspec(X) -> {invalid_child_spec, X}.
+
+do_check_childspec(#{restart := RestartType,
+ type := ChildType} = ChildSpec)->
+ Id = case ChildSpec of
+ #{id := I} -> I;
+ _ -> throw(missing_id)
+ end,
+ Func = case ChildSpec of
+ #{start := F} -> F;
+ _ -> throw(missing_start)
+ end,
+ validId(Id),
+ validFunc(Func),
+ validRestartType(RestartType),
+ validChildType(ChildType),
+ Shutdown = case ChildSpec of
+ #{shutdown := S} -> S;
+ #{type := worker} -> 5000;
+ #{type := supervisor} -> infinity
+ end,
+ validShutdown(Shutdown),
+ Mods = case ChildSpec of
+ #{modules := Ms} -> Ms;
+ _ -> {M,_,_} = Func, [M]
+ end,
+ validMods(Mods),
+ {ok, #child{id = Id, mfargs = Func, restart_type = RestartType,
+ shutdown = Shutdown, child_type = ChildType, modules = Mods}}.
+
+validChildType(supervisor) -> true;
+validChildType(worker) -> true;
+validChildType(What) -> throw({invalid_child_type, What}).
+
+validId(_Id) -> true.
+
+validFunc({M, F, A}) when is_atom(M),
+ is_atom(F),
+ is_list(A) -> true;
+validFunc(Func) -> throw({invalid_mfa, Func}).
+
+validRestartType(permanent) -> true;
+validRestartType({permanent, Delay}) -> validDelay(Delay);
+validRestartType(temporary) -> true;
+validRestartType(transient) -> true;
+validRestartType({transient, Delay}) -> validDelay(Delay);
+validRestartType(intrinsic) -> true;
+validRestartType({intrinsic, Delay}) -> validDelay(Delay);
+validRestartType(RestartType) -> throw({invalid_restart_type, RestartType}).
+
+validDelay(Delay) when is_number(Delay), Delay >= 0 ->
+ true;
+validDelay(What) ->
+ throw({invalid_delay, What}).
+
+validShutdown(Shutdown)
+ when is_integer(Shutdown), Shutdown > 0 -> true;
+validShutdown(infinity) -> true;
+validShutdown(brutal_kill) -> true;
+validShutdown(Shutdown) -> throw({invalid_shutdown, Shutdown}).
+
+validMods(dynamic) -> true;
+validMods(Mods) when is_list(Mods) ->
+ lists:foreach(fun(Mod) ->
+ if
+ is_atom(Mod) -> ok;
+ true -> throw({invalid_module, Mod})
+ end
+ end,
+ Mods);
+validMods(Mods) -> throw({invalid_modules, Mods}).
+
+child_to_spec(#child{id = Id,
+ mfargs = Func,
+ restart_type = RestartType,
+ shutdown = Shutdown,
+ child_type = ChildType,
+ modules = Mods}) ->
+ #{id => Id,
+ start => Func,
+ restart => RestartType,
+ shutdown => Shutdown,
+ type => ChildType,
+ modules => Mods}.
+
+%%% ------------------------------------------------------
+%%% Add a new restart and calculate if the max restart
+%%% intensity has been reached (in that case the supervisor
+%%% shall terminate).
+%%% All restarts accured inside the period amount of seconds
+%%% are kept in the #state.restarts list.
+%%% Returns: {ok, State'} | {terminate, State'}
+%%% ------------------------------------------------------
+
+add_restart(State) ->
+ I = State#state.intensity,
+ P = State#state.period,
+ R = State#state.restarts,
+ Now = erlang:monotonic_time(1),
+ R1 = add_restart([Now|R], Now, P),
+ State1 = State#state{restarts = R1},
+ case length(R1) of
+ CurI when CurI =< I ->
+ {ok, State1};
+ _ ->
+ {terminate, State1}
+ end.
+
+add_restart([R|Restarts], Now, Period) ->
+ case inPeriod(R, Now, Period) of
+ true ->
+ [R|add_restart(Restarts, Now, Period)];
+ _ ->
+ []
+ end;
+add_restart([], _, _) ->
+ [].
+
+inPeriod(Then, Now, Period) ->
+ Now =< Then + Period.
+
+%%% ------------------------------------------------------
+%%% Error and progress reporting.
+%%% ------------------------------------------------------
+extract_child(Child) when is_list(Child#child.pid) ->
+ [{nb_children, length(Child#child.pid)},
+ {id, Child#child.id},
+ {mfargs, Child#child.mfargs},
+ {restart_type, Child#child.restart_type},
+ {shutdown, Child#child.shutdown},
+ {child_type, Child#child.child_type}];
+extract_child(Child) ->
+ [{pid, Child#child.pid},
+ {id, Child#child.id},
+ {mfargs, Child#child.mfargs},
+ {restart_type, Child#child.restart_type},
+ {shutdown, Child#child.shutdown},
+ {child_type, Child#child.child_type}].
+
+report_progress(Child, SupName) ->
+ ?LOG_INFO(#{label=>{supervisor,progress},
+ report=>[{supervisor,SupName},
+ {started,extract_child(Child)}]},
+ #{domain=>[otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
+
+format_status(terminate, [_PDict, State]) ->
+ State;
+format_status(_, [_PDict, State]) ->
+ [{data, [{"State", State}]},
+ {supervisor, [{"Callback", State#state.module}]}].
+
+%%%-----------------------------------------------------------------
+%%% Dynamics database access
+dyn_size(#state{dynamics = {Mod,Db}}) ->
+ Mod:size(Db).
+
+dyn_erase(Pid,#state{dynamics={sets,Db}}=State) ->
+ State#state{dynamics={sets,sets:del_element(Pid,Db)}};
+dyn_erase(Pid,#state{dynamics={maps,Db}}=State) ->
+ State#state{dynamics={maps,maps:remove(Pid,Db)}}.
+
+dyn_store(Pid,_,#state{dynamics={sets,Db}}=State) ->
+ State#state{dynamics={sets,sets:add_element(Pid,Db)}};
+dyn_store(Pid,Args,#state{dynamics={maps,Db}}=State) ->
+ State#state{dynamics={maps,Db#{Pid => Args}}}.
+
+dyn_fold(Fun,Init,#state{dynamics={sets,Db}}) ->
+ sets:fold(Fun,Init,Db);
+dyn_fold(Fun,Init,#state{dynamics={maps,Db}}) ->
+ maps:fold(fun(Pid,_,Acc) -> Fun(Pid,Acc) end, Init, Db).
+
+dyn_map(Fun, #state{dynamics={sets,Db}}) ->
+ lists:map(Fun, sets:to_list(Db));
+dyn_map(Fun, #state{dynamics={maps,Db}}) ->
+ lists:map(Fun, maps:keys(Db)).
+
+dyn_exists(Pid, #state{dynamics={sets, Db}}) ->
+ sets:is_element(Pid, Db);
+dyn_exists(Pid, #state{dynamics={maps, Db}}) ->
+ maps:is_key(Pid, Db).
+
+dyn_args(_Pid, #state{dynamics={sets, _Db}}) ->
+ {ok,undefined};
+dyn_args(Pid, #state{dynamics={maps, Db}}) ->
+ maps:find(Pid, Db).
+
+dyn_init(State) ->
+ dyn_init(get_dynamic_child(State),State).
+
+dyn_init(Child,State) when ?is_temporary(Child) ->
+ State#state{dynamics={sets,sets:new()}};
+dyn_init(_Child,State) ->
+ State#state{dynamics={maps,maps:new()}}.
diff --git a/deps/rabbit_common/src/vm_memory_monitor.erl b/deps/rabbit_common/src/vm_memory_monitor.erl
new file mode 100644
index 0000000000..73b5a23b78
--- /dev/null
+++ b/deps/rabbit_common/src/vm_memory_monitor.erl
@@ -0,0 +1,576 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% In practice Erlang shouldn't be allowed to grow to more than a half
+%% of available memory. The pessimistic scenario is when the Erlang VM
+%% has a single process that's consuming all memory. In such a case,
+%% during garbage collection, Erlang tries to allocate a huge chunk of
+%% continuous memory, which can result in a crash or heavy swapping.
+%%
+%% This module tries to warn Rabbit before such situations occur, so
+%% that it has a higher chance to avoid running out of memory.
+
+-module(vm_memory_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/1, start_link/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([get_total_memory/0, get_vm_limit/0,
+ get_check_interval/0, set_check_interval/1,
+ get_vm_memory_high_watermark/0, set_vm_memory_high_watermark/1,
+ get_memory_limit/0,
+ %% TODO: refactor in master
+ get_memory_use/1,
+ get_process_memory/0,
+ get_process_memory/1,
+ get_memory_calculation_strategy/0,
+ get_rss_memory/0]).
+
+%% for tests
+-export([parse_line_linux/1, parse_mem_limit/1]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {total_memory,
+ memory_limit,
+ process_memory,
+ memory_config_limit,
+ timeout,
+ timer,
+ alarmed,
+ alarm_funs,
+ os_type = undefined,
+ os_pid = undefined,
+ page_size = undefined,
+ proc_file = undefined}).
+
+-include("rabbit_memory.hrl").
+
+%%----------------------------------------------------------------------------
+
+-type memory_calculation_strategy() :: rss | erlang | allocated.
+-type vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()}).
+-spec start_link(float()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(float(), fun ((any()) -> 'ok'),
+ fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error().
+-spec get_total_memory() -> (non_neg_integer() | 'unknown').
+-spec get_vm_limit() -> non_neg_integer().
+-spec get_check_interval() -> non_neg_integer().
+-spec set_check_interval(non_neg_integer()) -> 'ok'.
+-spec get_vm_memory_high_watermark() -> vm_memory_high_watermark().
+-spec set_vm_memory_high_watermark(vm_memory_high_watermark()) -> 'ok'.
+-spec get_memory_limit() -> non_neg_integer().
+-spec get_memory_use(bytes) -> {non_neg_integer(), float() | infinity};
+ (ratio) -> float() | infinity.
+-spec get_cached_process_memory_and_limit() -> {non_neg_integer(),
+ float() | infinity}.
+-spec get_rss_memory() -> non_neg_integer().
+
+-export_type([memory_calculation_strategy/0]).
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+get_total_memory() ->
+ case application:get_env(rabbit, total_memory_available_override_value) of
+ {ok, Value} ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Value) of
+ {ok, ParsedTotal} ->
+ ParsedTotal;
+ {error, parse_error} ->
+ rabbit_log:warning(
+ "The override value for the total memmory available is "
+ "not a valid value: ~p, getting total from the system.~n",
+ [Value]),
+ get_total_memory_from_os()
+ end;
+ undefined ->
+ get_total_memory_from_os()
+ end.
+
+get_vm_limit() -> get_vm_limit(os:type()).
+
+get_check_interval() ->
+ gen_server:call(?MODULE, get_check_interval, infinity).
+
+set_check_interval(Fraction) ->
+ gen_server:call(?MODULE, {set_check_interval, Fraction}, infinity).
+
+get_vm_memory_high_watermark() ->
+ gen_server:call(?MODULE, get_vm_memory_high_watermark, infinity).
+
+set_vm_memory_high_watermark(Fraction) ->
+ gen_server:call(?MODULE, {set_vm_memory_high_watermark, Fraction},
+ infinity).
+
+get_memory_limit() ->
+ gen_server:call(?MODULE, get_memory_limit, infinity).
+
+get_memory_use(bytes) ->
+ {ProcessMemory, MemoryLimit} = get_cached_process_memory_and_limit(),
+ {ProcessMemory, case MemoryLimit > 0.0 of
+ true -> MemoryLimit;
+ false -> infinity
+ end};
+get_memory_use(ratio) ->
+ {ProcessMemory, MemoryLimit} = get_cached_process_memory_and_limit(),
+ case MemoryLimit > 0.0 of
+ true -> ProcessMemory / MemoryLimit;
+ false -> infinity
+ end.
+
+%% Memory reported by erlang:memory(total) is not supposed to
+%% be equal to the total size of all pages mapped to the emulator,
+%% according to http://erlang.org/doc/man/erlang.html#memory-0
+%% erlang:memory(total) under-reports memory usage by around 20%
+%%
+%% Win32 Note: 3.6.12 shipped with code that used wmic.exe to get the
+%% WorkingSetSize value for the running erl.exe process. Unfortunately
+%% even with a moderate invocation rate of 1 ops/second that uses more
+%% CPU resources than some Windows users are willing to tolerate.
+%% See rabbitmq/rabbitmq-server#1343 and rabbitmq/rabbitmq-common#224
+%% for details.
+-spec get_process_memory() -> Bytes :: integer().
+get_process_memory() ->
+ {ProcMem, _} = get_memory_use(bytes),
+ ProcMem.
+
+-spec get_process_memory(cached | current) -> Bytes :: integer().
+get_process_memory(cached) ->
+ {ProcMem, _} = get_memory_use(bytes),
+ ProcMem;
+get_process_memory(current) ->
+ get_process_memory_uncached().
+
+-spec get_memory_calculation_strategy() -> memory_calculation_strategy().
+get_memory_calculation_strategy() ->
+ case rabbit_misc:get_env(rabbit, vm_memory_calculation_strategy, rss) of
+ allocated -> allocated;
+ erlang -> erlang;
+ legacy -> erlang; %% backwards compatibility
+ rss -> rss;
+ UnsupportedValue ->
+ rabbit_log:warning(
+ "Unsupported value '~p' for vm_memory_calculation_strategy. "
+ "Supported values: (allocated|erlang|legacy|rss). "
+ "Defaulting to 'rss'",
+ [UnsupportedValue]
+ ),
+ rss
+ end.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+start_link(MemFraction) ->
+ start_link(MemFraction,
+ fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
+
+start_link(MemFraction, AlarmSet, AlarmClear) ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE,
+ [MemFraction, {AlarmSet, AlarmClear}], []).
+
+init([MemFraction, AlarmFuns]) ->
+ TRef = erlang:send_after(?DEFAULT_MEMORY_CHECK_INTERVAL, self(), update),
+ State0 = #state{timeout = ?DEFAULT_MEMORY_CHECK_INTERVAL,
+ timer = TRef,
+ alarmed = false,
+ alarm_funs = AlarmFuns},
+ State1 = update_process_memory(init_state_by_os(State0)),
+ {ok, set_mem_limits(State1, MemFraction)}.
+
+handle_call(get_vm_memory_high_watermark, _From,
+ #state{memory_config_limit = MemLimit} = State) ->
+ {reply, MemLimit, State};
+
+handle_call({set_vm_memory_high_watermark, MemLimit}, _From, State) ->
+ {reply, ok, set_mem_limits(State, MemLimit)};
+
+handle_call(get_check_interval, _From, State) ->
+ {reply, State#state.timeout, State};
+
+handle_call({set_check_interval, Timeout}, _From, State) ->
+ State1 = case erlang:cancel_timer(State#state.timer) of
+ false ->
+ State#state{timeout = Timeout};
+ _ ->
+ State#state{timeout = Timeout,
+ timer = erlang:send_after(Timeout, self(), update)}
+ end,
+ {reply, ok, State1};
+
+handle_call(get_memory_limit, _From, State) ->
+ {reply, State#state.memory_limit, State};
+
+handle_call(get_cached_process_memory_and_limit, _From, State) ->
+ {reply, {State#state.process_memory, State#state.memory_limit}, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ _ = erlang:cancel_timer(State#state.timer),
+ State1 = internal_update(State),
+ TRef = erlang:send_after(State1#state.timeout, self(), update),
+ {noreply, State1#state{ timer = TRef }};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Server Internals
+%%----------------------------------------------------------------------------
+get_rss_memory() ->
+ TmpState = init_state_by_os(#state{}),
+ {ok, ProcMem} = get_process_memory_using_strategy(rss, TmpState),
+ ProcMem.
+
+get_cached_process_memory_and_limit() ->
+ try
+ gen_server:call(?MODULE, get_cached_process_memory_and_limit, infinity)
+ catch exit:{noproc, Error} ->
+ rabbit_log:warning("Memory monitor process not yet started: ~p~n", [Error]),
+ ProcessMemory = get_process_memory_uncached(),
+ {ProcessMemory, infinity}
+ end.
+
+get_process_memory_uncached() ->
+ TmpState = update_process_memory(init_state_by_os(#state{})),
+ TmpState#state.process_memory.
+
+update_process_memory(State) ->
+ Strategy = get_memory_calculation_strategy(),
+ {ok, ProcMem} = get_process_memory_using_strategy(Strategy, State),
+ State#state{process_memory = ProcMem}.
+
+init_state_by_os(State = #state{os_type = undefined}) ->
+ OsType = os:type(),
+ OsPid = os:getpid(),
+ init_state_by_os(State#state{os_type = OsType, os_pid = OsPid});
+init_state_by_os(State0 = #state{os_type = {unix, linux}, os_pid = OsPid}) ->
+ PageSize = get_linux_pagesize(),
+ ProcFile = io_lib:format("/proc/~s/statm", [OsPid]),
+ State0#state{page_size = PageSize, proc_file = ProcFile};
+init_state_by_os(State) ->
+ State.
+
+get_process_memory_using_strategy(rss, #state{os_type = {unix, linux},
+ page_size = PageSize,
+ proc_file = ProcFile}) ->
+ Data = read_proc_file(ProcFile),
+ [_|[RssPagesStr|_]] = string:tokens(Data, " "),
+ ProcMem = list_to_integer(RssPagesStr) * PageSize,
+ {ok, ProcMem};
+get_process_memory_using_strategy(rss, #state{os_type = {unix, _},
+ os_pid = OsPid}) ->
+ Cmd = "ps -p " ++ OsPid ++ " -o rss=",
+ CmdOutput = os:cmd(Cmd),
+ case re:run(CmdOutput, "[0-9]+", [{capture, first, list}]) of
+ {match, [Match]} ->
+ ProcMem = list_to_integer(Match) * 1024,
+ {ok, ProcMem};
+ _ ->
+ {error, {unexpected_output_from_command, Cmd, CmdOutput}}
+ end;
+get_process_memory_using_strategy(rss, _State) ->
+ {ok, recon_alloc:memory(allocated)};
+get_process_memory_using_strategy(allocated, _State) ->
+ {ok, recon_alloc:memory(allocated)};
+get_process_memory_using_strategy(erlang, _State) ->
+ {ok, erlang:memory(total)}.
+
+get_total_memory_from_os() ->
+ try
+ get_total_memory(os:type())
+ catch _:Error:Stacktrace ->
+ rabbit_log:warning(
+ "Failed to get total system memory: ~n~p~n~p~n",
+ [Error, Stacktrace]),
+ unknown
+ end.
+
+set_mem_limits(State, MemLimit) ->
+ case erlang:system_info(wordsize) of
+ 4 ->
+ rabbit_log:warning(
+ "You are using a 32-bit version of Erlang: you may run into "
+ "memory address~n"
+ "space exhaustion or statistic counters overflow.~n");
+ _ ->
+ ok
+ end,
+ TotalMemory =
+ case get_total_memory() of
+ unknown ->
+ case State of
+ #state { total_memory = undefined,
+ memory_limit = undefined } ->
+ rabbit_log:warning(
+ "Unknown total memory size for your OS ~p. "
+ "Assuming memory size is ~p MiB (~p bytes).~n",
+ [os:type(),
+ trunc(?MEMORY_SIZE_FOR_UNKNOWN_OS/?ONE_MiB),
+ ?MEMORY_SIZE_FOR_UNKNOWN_OS]);
+ _ ->
+ ok
+ end,
+ ?MEMORY_SIZE_FOR_UNKNOWN_OS;
+ Memory -> Memory
+ end,
+ UsableMemory =
+ case get_vm_limit() of
+ Limit when Limit < TotalMemory ->
+ rabbit_log:warning(
+ "Only ~p MiB (~p bytes) of ~p MiB (~p bytes) memory usable due to "
+ "limited address space.~n"
+ "Crashes due to memory exhaustion are possible - see~n"
+ "https://www.rabbitmq.com/memory.html#address-space~n",
+ [trunc(Limit/?ONE_MiB), Limit, trunc(TotalMemory/?ONE_MiB),
+ TotalMemory]),
+ Limit;
+ _ ->
+ TotalMemory
+ end,
+ MemLim = interpret_limit(parse_mem_limit(MemLimit), UsableMemory),
+ rabbit_log:info(
+ "Memory high watermark set to ~p MiB (~p bytes)"
+ " of ~p MiB (~p bytes) total~n",
+ [trunc(MemLim/?ONE_MiB), MemLim,
+ trunc(TotalMemory/?ONE_MiB), TotalMemory]
+ ),
+ internal_update(State #state { total_memory = TotalMemory,
+ memory_limit = MemLim,
+ memory_config_limit = MemLimit}).
+
+interpret_limit({'absolute', MemLim}, UsableMemory) ->
+ erlang:min(MemLim, UsableMemory);
+interpret_limit(MemFraction, UsableMemory) ->
+ trunc(MemFraction * UsableMemory).
+
+parse_mem_limit({absolute, Limit}) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Limit) of
+ {ok, ParsedLimit} -> {absolute, ParsedLimit};
+ {error, parse_error} ->
+ rabbit_log:error("Unable to parse vm_memory_high_watermark value ~p", [Limit]),
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK
+ end;
+parse_mem_limit(MemLimit) when is_integer(MemLimit) ->
+ parse_mem_limit(float(MemLimit));
+parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit =< ?MAX_VM_MEMORY_HIGH_WATERMARK ->
+ MemLimit;
+parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit > ?MAX_VM_MEMORY_HIGH_WATERMARK ->
+ rabbit_log:warning(
+ "Memory high watermark of ~p is above the allowed maximum, falling back to ~p~n",
+ [MemLimit, ?MAX_VM_MEMORY_HIGH_WATERMARK]
+ ),
+ ?MAX_VM_MEMORY_HIGH_WATERMARK;
+parse_mem_limit(MemLimit) ->
+ rabbit_log:warning(
+ "Memory high watermark of ~p is invalid, defaulting to ~p~n",
+ [MemLimit, ?DEFAULT_VM_MEMORY_HIGH_WATERMARK]
+ ),
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK.
+
+internal_update(State0 = #state{memory_limit = MemLimit,
+ alarmed = Alarmed,
+ alarm_funs = {AlarmSet, AlarmClear}}) ->
+ State1 = update_process_memory(State0),
+ ProcMem = State1#state.process_memory,
+ NewAlarmed = ProcMem > MemLimit,
+ case {Alarmed, NewAlarmed} of
+ {false, true} -> emit_update_info(set, ProcMem, MemLimit),
+ AlarmSet({{resource_limit, memory, node()}, []});
+ {true, false} -> emit_update_info(clear, ProcMem, MemLimit),
+ AlarmClear({resource_limit, memory, node()});
+ _ -> ok
+ end,
+ State1#state{alarmed = NewAlarmed}.
+
+emit_update_info(AlarmState, MemUsed, MemLimit) ->
+ rabbit_log:info(
+ "vm_memory_high_watermark ~p. Memory used:~p allowed:~p~n",
+ [AlarmState, MemUsed, MemLimit]).
+
+%% According to https://msdn.microsoft.com/en-us/library/aa366778(VS.85).aspx
+%% Windows has 2GB and 8TB of address space for 32 and 64 bit accordingly.
+get_vm_limit({win32,_OSname}) ->
+ case erlang:system_info(wordsize) of
+ 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
+ 8 -> 8*1024*1024*1024*1024 %% 8 TB for 64 bits 2^42
+ end;
+
+%% On a 32-bit machine, if you're using more than 2 gigs of RAM you're
+%% in big trouble anyway.
+get_vm_limit(_OsType) ->
+ case erlang:system_info(wordsize) of
+ 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
+ 8 -> 256*1024*1024*1024*1024 %% 256 TB for 64 bits 2^48
+ %%https://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal Helpers
+%%----------------------------------------------------------------------------
+cmd(Command) ->
+ cmd(Command, true).
+
+cmd(Command, ThrowIfMissing) ->
+ Exec = hd(string:tokens(Command, " ")),
+ case {ThrowIfMissing, os:find_executable(Exec)} of
+ {true, false} ->
+ throw({command_not_found, Exec});
+ {false, false} ->
+ {error, command_not_found};
+ {_, _Filename} ->
+ os:cmd(Command)
+ end.
+
+default_linux_pagesize(CmdOutput) ->
+ rabbit_log:warning(
+ "Failed to get memory page size, using 4096. Reason: ~s",
+ [CmdOutput]),
+ 4096.
+
+get_linux_pagesize() ->
+ case cmd("getconf PAGESIZE", false) of
+ {error, command_not_found} ->
+ default_linux_pagesize("getconf not found in PATH");
+ CmdOutput ->
+ case re:run(CmdOutput, "^[0-9]+", [{capture, first, list}]) of
+ {match, [Match]} -> list_to_integer(Match);
+ _ ->
+ default_linux_pagesize(CmdOutput)
+ end
+ end.
+
+%% get_total_memory(OS) -> Total
+%% Windows and Freebsd code based on: memsup:get_memory_usage/1
+%% Original code was part of OTP and released under "Erlang Public License".
+
+get_total_memory({unix, darwin}) ->
+ sysctl("hw.memsize");
+
+get_total_memory({unix, freebsd}) ->
+ PageSize = sysctl("vm.stats.vm.v_page_size"),
+ PageCount = sysctl("vm.stats.vm.v_page_count"),
+ PageCount * PageSize;
+
+get_total_memory({unix, openbsd}) ->
+ sysctl("hw.usermem");
+
+get_total_memory({win32, _OSname}) ->
+ [Result|_] = os_mon_sysinfo:get_mem_info(),
+ {ok, [_MemLoad, TotPhys, _AvailPhys, _TotPage, _AvailPage, _TotV, _AvailV],
+ _RestStr} =
+ io_lib:fread("~d~d~d~d~d~d~d", Result),
+ TotPhys;
+
+get_total_memory({unix, linux}) ->
+ File = read_proc_file("/proc/meminfo"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_linux/1, Lines)),
+ dict:fetch('MemTotal', Dict);
+
+get_total_memory({unix, sunos}) ->
+ File = cmd("/usr/sbin/prtconf"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_sunos/1, Lines)),
+ dict:fetch('Memory size', Dict);
+
+get_total_memory({unix, aix}) ->
+ File = cmd("/usr/bin/vmstat -v"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_aix/1, Lines)),
+ dict:fetch('memory pages', Dict) * 4096;
+
+get_total_memory(_OsType) ->
+ unknown.
+
+%% A line looks like "MemTotal: 502968 kB"
+%% or (with broken OS/modules) "Readahead 123456 kB"
+parse_line_linux(Line) ->
+ {Name, Value, UnitRest} =
+ case string:tokens(Line, ":") of
+ %% no colon in the line
+ [S] ->
+ [K, RHS] = re:split(S, "\s", [{parts, 2}, {return, list}]),
+ [V | Unit] = string:tokens(RHS, " "),
+ {K, V, Unit};
+ [K, RHS | _Rest] ->
+ [V | Unit] = string:tokens(RHS, " "),
+ {K, V, Unit}
+ end,
+ Value1 = case UnitRest of
+ [] -> list_to_integer(Value); %% no units
+ ["kB"] -> list_to_integer(Value) * 1024;
+ ["KB"] -> list_to_integer(Value) * 1024
+ end,
+ {list_to_atom(Name), Value1}.
+
+%% A line looks like "Memory size: 1024 Megabytes"
+parse_line_sunos(Line) ->
+ case string:tokens(Line, ":") of
+ [Name, RHS | _Rest] ->
+ [Value1 | UnitsRest] = string:tokens(RHS, " "),
+ Value2 = case UnitsRest of
+ ["Gigabytes"] ->
+ list_to_integer(Value1) * ?ONE_MiB * 1024;
+ ["Megabytes"] ->
+ list_to_integer(Value1) * ?ONE_MiB;
+ ["Kilobytes"] ->
+ list_to_integer(Value1) * 1024;
+ _ ->
+ Value1 ++ UnitsRest %% no known units
+ end,
+ {list_to_atom(Name), Value2};
+ [Name] -> {list_to_atom(Name), none}
+ end.
+
+%% Lines look like " 12345 memory pages"
+%% or " 80.1 maxpin percentage"
+parse_line_aix(Line) ->
+ [Value | NameWords] = string:tokens(Line, " "),
+ Name = string:join(NameWords, " "),
+ {list_to_atom(Name),
+ case lists:member($., Value) of
+ true -> trunc(list_to_float(Value));
+ false -> list_to_integer(Value)
+ end}.
+
+sysctl(Def) ->
+ list_to_integer(cmd("/usr/bin/env sysctl -n " ++ Def) -- "\n").
+
+%% file:read_file does not work on files in /proc as it seems to get
+%% the size of the file first and then read that many bytes. But files
+%% in /proc always have length 0, we just have to read until we get
+%% eof.
+read_proc_file(File) ->
+ {ok, IoDevice} = file:open(File, [read, raw]),
+ Res = read_proc_file(IoDevice, []),
+ _ = file:close(IoDevice),
+ lists:flatten(lists:reverse(Res)).
+
+-define(BUFFER_SIZE, 1024).
+read_proc_file(IoDevice, Acc) ->
+ case file:read(IoDevice, ?BUFFER_SIZE) of
+ {ok, Res} -> read_proc_file(IoDevice, [Res | Acc]);
+ eof -> Acc
+ end.
diff --git a/deps/rabbit_common/src/worker_pool.erl b/deps/rabbit_common/src/worker_pool.erl
new file mode 100644
index 0000000000..f81e924653
--- /dev/null
+++ b/deps/rabbit_common/src/worker_pool.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool).
+
+%% Generic worker pool manager.
+%%
+%% Submitted jobs are functions. They can be executed synchronously
+%% (using worker_pool:submit/1, worker_pool:submit/2) or asynchronously
+%% (using worker_pool:submit_async/1).
+%%
+%% We typically use the worker pool if we want to limit the maximum
+%% parallelism of some job. We are not trying to dodge the cost of
+%% creating Erlang processes.
+%%
+%% Supports nested submission of jobs and two execution modes:
+%% 'single' and 'reuse'. Jobs executed in 'single' mode are invoked in
+%% a one-off process. Those executed in 'reuse' mode are invoked in a
+%% worker process out of the pool. Nested jobs are always executed
+%% immediately in current worker process.
+%%
+%% 'single' mode is offered to work around a bug in Mnesia: after
+%% network partitions reply messages for prior failed requests can be
+%% sent to Mnesia clients - a reused worker pool process can crash on
+%% receiving one.
+%%
+%% Caller submissions are enqueued internally. When the next worker
+%% process is available, it communicates it to the pool and is
+%% assigned a job to execute. If job execution fails with an error, no
+%% response is returned to the caller.
+%%
+%% Worker processes prioritise certain command-and-control messages
+%% from the pool.
+%%
+%% Future improvement points: job prioritisation.
+
+-behaviour(gen_server2).
+
+-export([start_link/1,
+ submit/1, submit/2, submit/3,
+ submit_async/1, submit_async/2,
+ dispatch_sync/1, dispatch_sync/2,
+ ready/2,
+ idle/2,
+ default_pool/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link(atom()) -> {'ok', pid()} | {'error', any()}.
+-spec submit(fun (() -> A) | mfargs()) -> A.
+-spec submit(fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit(atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit_async(fun (() -> any()) | mfargs()) -> 'ok'.
+-spec dispatch_sync(fun(() -> any()) | mfargs()) -> 'ok'.
+-spec ready(atom(), pid()) -> 'ok'.
+-spec idle(atom(), pid()) -> 'ok'.
+-spec default_pool() -> atom().
+
+%%----------------------------------------------------------------------------
+
+-define(DEFAULT_POOL, ?MODULE).
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+-record(state, { available, pending }).
+
+%%----------------------------------------------------------------------------
+
+start_link(Name) -> gen_server2:start_link({local, Name}, ?MODULE, [],
+ [{timeout, infinity}]).
+
+submit(Fun) ->
+ submit(?DEFAULT_POOL, Fun, reuse).
+
+%% ProcessModel =:= single is for working around the mnesia_locker bug.
+submit(Fun, ProcessModel) ->
+ submit(?DEFAULT_POOL, Fun, ProcessModel).
+
+submit(Server, Fun, ProcessModel) ->
+ case get(worker_pool_worker) of
+ true -> worker_pool_worker:run(Fun);
+ _ -> Pid = gen_server2:call(Server, {next_free, self()}, infinity),
+ worker_pool_worker:submit(Pid, Fun, ProcessModel)
+ end.
+
+submit_async(Fun) -> submit_async(?DEFAULT_POOL, Fun).
+
+submit_async(Server, Fun) -> gen_server2:cast(Server, {run_async, Fun}).
+
+dispatch_sync(Fun) ->
+ dispatch_sync(?DEFAULT_POOL, Fun).
+
+dispatch_sync(Server, Fun) ->
+ Pid = gen_server2:call(Server, {next_free, self()}, infinity),
+ worker_pool_worker:submit_async(Pid, Fun).
+
+ready(Server, WPid) -> gen_server2:cast(Server, {ready, WPid}).
+
+idle(Server, WPid) -> gen_server2:cast(Server, {idle, WPid}).
+
+default_pool() -> ?DEFAULT_POOL.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state { pending = queue:new(), available = ordsets:new() }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({next_free, CPid}, From, State = #state { available = [],
+ pending = Pending }) ->
+ {noreply, State#state{pending = queue:in({next_free, From, CPid}, Pending)},
+ hibernate};
+handle_call({next_free, CPid}, _From, State = #state { available =
+ [WPid | Avail1] }) ->
+ worker_pool_worker:next_job_from(WPid, CPid),
+ {reply, WPid, State #state { available = Avail1 }, hibernate};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ready, WPid}, State) ->
+ erlang:monitor(process, WPid),
+ handle_cast({idle, WPid}, State);
+
+handle_cast({idle, WPid}, State = #state { available = Avail,
+ pending = Pending }) ->
+ {noreply,
+ case queue:out(Pending) of
+ {empty, _Pending} ->
+ State #state { available = ordsets:add_element(WPid, Avail) };
+ {{value, {next_free, From, CPid}}, Pending1} ->
+ worker_pool_worker:next_job_from(WPid, CPid),
+ gen_server2:reply(From, WPid),
+ State #state { pending = Pending1 };
+ {{value, {run_async, Fun}}, Pending1} ->
+ worker_pool_worker:submit_async(WPid, Fun),
+ State #state { pending = Pending1 }
+ end, hibernate};
+
+handle_cast({run_async, Fun}, State = #state { available = [],
+ pending = Pending }) ->
+ {noreply, State #state { pending = queue:in({run_async, Fun}, Pending)},
+ hibernate};
+handle_cast({run_async, Fun}, State = #state { available = [WPid | Avail1] }) ->
+ worker_pool_worker:submit_async(WPid, Fun),
+ {noreply, State #state { available = Avail1 }, hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _MRef, process, WPid, _Reason},
+ State = #state { available = Avail }) ->
+ {noreply, State #state { available = ordsets:del_element(WPid, Avail) },
+ hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
diff --git a/deps/rabbit_common/src/worker_pool_sup.erl b/deps/rabbit_common/src/worker_pool_sup.erl
new file mode 100644
index 0000000000..96dbbb2357
--- /dev/null
+++ b/deps/rabbit_common/src/worker_pool_sup.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_link/1, start_link/2]).
+
+-export([init/1]).
+
+-export([default_pool_size/0]).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_link(non_neg_integer()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(non_neg_integer(), atom())
+ -> rabbit_types:ok_pid_or_error().
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ Size = default_pool_size(),
+ start_link(Size).
+
+start_link(PoolSize) ->
+ rabbit_log:info("Will use ~p processes for default worker pool", [PoolSize]),
+ start_link(PoolSize, worker_pool:default_pool()).
+
+start_link(PoolSize, PoolName) ->
+ rabbit_log:info("Starting worker pool '~p' with ~p processes in it", [PoolName, PoolSize]),
+ SupName = list_to_atom(atom_to_list(PoolName) ++ "_sup"),
+ supervisor:start_link({local, SupName}, ?MODULE, [PoolSize, PoolName]).
+
+%%----------------------------------------------------------------------------
+
+init([PoolSize, PoolName]) ->
+ %% we want to survive up to 1K of worker restarts per second,
+ %% e.g. when a large worker pool used for network connections
+ %% encounters a network failure. This is the case in the LDAP authentication
+ %% backend plugin.
+ {ok, {{one_for_one, 1000, 1},
+ [{worker_pool, {worker_pool, start_link, [PoolName]}, transient,
+ 16#ffffffff, worker, [worker_pool]} |
+ [{N, {worker_pool_worker, start_link, [PoolName]}, transient,
+ 16#ffffffff, worker, [worker_pool_worker]}
+ || N <- lists:seq(1, PoolSize)]]}}.
+
+%%
+%% Implementation
+%%
+
+-spec default_pool_size() -> integer().
+
+default_pool_size() ->
+ case rabbit_misc:get_env(rabbit, default_worker_pool_size, undefined) of
+ N when is_integer(N) -> N;
+ _ -> guess_default_pool_size()
+ end.
+
+-spec guess_default_pool_size() -> integer().
+
+guess_default_pool_size() ->
+ erlang:system_info(schedulers).
diff --git a/deps/rabbit_common/src/worker_pool_worker.erl b/deps/rabbit_common/src/worker_pool_worker.erl
new file mode 100644
index 0000000000..79436e0773
--- /dev/null
+++ b/deps/rabbit_common/src/worker_pool_worker.erl
@@ -0,0 +1,192 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool_worker).
+
+%% Executes jobs (functions) submitted to a worker pool with worker_pool:submit/1,
+%% worker_pool:submit/2 or worker_pool:submit_async/1.
+%%
+%% See worker_pool for an overview.
+
+-behaviour(gen_server2).
+
+-export([start_link/1, next_job_from/2, submit/3, submit_async/2,
+ run/1]).
+
+-export([set_maximum_since_use/2]).
+-export([set_timeout/2, set_timeout/3, clear_timeout/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, prioritise_cast/3]).
+
+%%----------------------------------------------------------------------------
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link(atom) -> {'ok', pid()} | {'error', any()}.
+-spec next_job_from(pid(), pid()) -> 'ok'.
+-spec submit(pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit_async(pid(), fun (() -> any()) | mfargs()) -> 'ok'.
+-spec run(fun (() -> A)) -> A; (mfargs()) -> any().
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+start_link(PoolName) ->
+ gen_server2:start_link(?MODULE, [PoolName], [{timeout, infinity}]).
+
+next_job_from(Pid, CPid) ->
+ gen_server2:cast(Pid, {next_job_from, CPid}).
+
+submit(Pid, Fun, ProcessModel) ->
+ gen_server2:call(Pid, {submit, Fun, self(), ProcessModel}, infinity).
+
+submit_async(Pid, Fun) ->
+ gen_server2:cast(Pid, {submit_async, Fun, self()}).
+
+set_maximum_since_use(Pid, Age) ->
+ gen_server2:cast(Pid, {set_maximum_since_use, Age}).
+
+run({M, F, A}) -> apply(M, F, A);
+run(Fun) -> Fun().
+
+run(Fun, reuse) ->
+ run(Fun);
+run(Fun, single) ->
+ Self = self(),
+ Ref = make_ref(),
+ spawn_link(fun () ->
+ put(worker_pool_worker, true),
+ Self ! {Ref, run(Fun)},
+ unlink(Self)
+ end),
+ receive
+ {Ref, Res} -> Res
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([PoolName]) ->
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+ ok = worker_pool:ready(PoolName, self()),
+ put(worker_pool_worker, true),
+ put(worker_pool_name, PoolName),
+ {ok, undefined, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
+prioritise_cast({next_job_from, _CPid}, _Len, _State) -> 7;
+prioritise_cast(_Msg, _Len, _State) -> 0.
+
+handle_call({submit, Fun, CPid, ProcessModel}, From, undefined) ->
+ {noreply, {job, CPid, From, Fun, ProcessModel}, hibernate};
+
+handle_call({submit, Fun, CPid, ProcessModel}, From, {from, CPid, MRef}) ->
+ erlang:demonitor(MRef),
+ gen_server2:reply(From, run(Fun, ProcessModel)),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({next_job_from, CPid}, undefined) ->
+ MRef = erlang:monitor(process, CPid),
+ {noreply, {from, CPid, MRef}, hibernate};
+
+handle_cast({next_job_from, CPid}, {job, CPid, From, Fun, ProcessModel}) ->
+ gen_server2:reply(From, run(Fun, ProcessModel)),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({submit_async, Fun, _CPid}, undefined) ->
+ run(Fun),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({submit_async, Fun, CPid}, {from, CPid, MRef}) ->
+ erlang:demonitor(MRef),
+ run(Fun),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ {noreply, State, hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', MRef, process, CPid, _Reason}, {from, CPid, MRef}) ->
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_info({'DOWN', _MRef, process, _Pid, _Reason}, State) ->
+ {noreply, State, hibernate};
+
+handle_info({timeout, Key, Fun}, State) ->
+ clear_timeout(Key),
+ Fun(),
+ {noreply, State, hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+-spec set_timeout(non_neg_integer(), fun(() -> any())) ->
+ {ok, reference()}.
+set_timeout(Time, Fun) ->
+ Key = make_ref(),
+ set_timeout(Key, Time, Fun).
+
+-spec set_timeout(Key, non_neg_integer(), fun(() -> any())) ->
+ {ok, Key} when Key :: any().
+set_timeout(Key, Time, Fun) ->
+ Timeouts = get_timeouts(),
+ set_timeout(Key, Time, Fun, Timeouts).
+
+-spec clear_timeout(any()) -> ok.
+clear_timeout(Key) ->
+ NewTimeouts = cancel_timeout(Key, get_timeouts()),
+ put(timeouts, NewTimeouts),
+ ok.
+
+get_timeouts() ->
+ case get(timeouts) of
+ undefined -> dict:new();
+ Dict -> Dict
+ end.
+
+set_timeout(Key, Time, Fun, Timeouts) ->
+ _ = cancel_timeout(Key, Timeouts),
+ TRef = erlang:send_after(Time, self(), {timeout, Key, Fun}),
+ NewTimeouts = dict:store(Key, TRef, Timeouts),
+ put(timeouts, NewTimeouts),
+ {ok, Key}.
+
+cancel_timeout(Key, Timeouts) ->
+ case dict:find(Key, Timeouts) of
+ {ok, TRef} ->
+ _ = erlang:cancel_timer(TRef),
+ receive {timeout, Key, _} -> ok
+ after 0 -> ok
+ end,
+ dict:erase(Key, Timeouts);
+ error ->
+ Timeouts
+ end.
diff --git a/deps/rabbit_common/test/gen_server2_test_server.erl b/deps/rabbit_common/test/gen_server2_test_server.erl
new file mode 100644
index 0000000000..0d68df8f7e
--- /dev/null
+++ b/deps/rabbit_common/test/gen_server2_test_server.erl
@@ -0,0 +1,72 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(gen_server2_test_server).
+-behaviour(gen_server2).
+-record(gs2_state, {parent, name, state, mod, time,
+ timeout_state, queue, debug, prioritisers,
+ timer, emit_stats_fun, stop_stats_fun}).
+
+-export([start_link/0, start_link/1, start_link/2, stats_count/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, handle_post_hibernate/1]).
+
+start_link(count_stats) ->
+ start_link(count_stats, infinity).
+
+start_link(count_stats, Time) ->
+ {ok, Server} = gen_server2:start_link(gen_server2_test_server, [Time], []),
+ Counter = gen_server2:call(Server, get_counter),
+ sys:replace_state(Server,
+ fun(GSState) ->
+ GSState#gs2_state{
+ emit_stats_fun = fun(State) -> count_stats(Counter), State end
+ }
+ end),
+ {ok, Server}.
+
+start_link() ->
+ gen_server2:start_link(gen_server2_test_server, [], []).
+
+stats_count(Server) ->
+ Counter = gen_server2:call(Server, get_counter),
+ [{count, Count}] = ets:lookup(Counter, count),
+ Count.
+
+init([]) ->
+ init([infinity]);
+init([Time]) ->
+ Counter = ets:new(stats_count, [public]),
+ ets:insert(Counter, {count, 0}),
+ case Time of
+ {backoff, _, _, _} ->
+ {ok, {counter, Counter}, hibernate, Time};
+ _ ->
+ {ok, {counter, Counter}, Time}
+ end.
+
+count_stats(Counter) ->
+ ets:update_counter(Counter, count, {2, 1}).
+
+handle_call(get_counter,_, {counter, Counter} = State) ->
+ {reply, Counter, State};
+handle_call(hibernate, _, State) ->
+ {reply, ok, State, hibernate};
+handle_call(_,_,State) ->
+ {reply, ok, State}.
+
+handle_cast({sleep, Time}, State) -> timer:sleep(Time), {noreply, State};
+handle_cast(_,State) -> {noreply, State}.
+
+handle_post_hibernate(State) -> {noreply, State}.
+
+handle_info(_,State) -> {noreply, State}.
+
+terminate(_,_State) -> ok.
+
+code_change(_,State,_) -> {ok, State}.
diff --git a/deps/rabbit_common/test/rabbit_env_SUITE.erl b/deps/rabbit_common/test/rabbit_env_SUITE.erl
new file mode 100644
index 0000000000..a881097e6b
--- /dev/null
+++ b/deps/rabbit_common/test/rabbit_env_SUITE.erl
@@ -0,0 +1,1098 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_env_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([all/0,
+ suite/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+ check_data_dir/1,
+ check_default_values/1,
+ check_values_from_reachable_remote_node/1,
+ check_values_from_offline_remote_node/1,
+ check_context_to_app_env_vars/1,
+ check_context_to_code_path/1,
+ check_RABBITMQ_ADVANCED_CONFIG_FILE/1,
+ check_RABBITMQ_CONFIG_FILE/1,
+ check_RABBITMQ_CONFIG_FILES/1,
+ check_RABBITMQ_DIST_PORT/1,
+ check_RABBITMQ_ENABLED_PLUGINS/1,
+ check_RABBITMQ_ENABLED_PLUGINS_FILE/1,
+ check_RABBITMQ_FEATURE_FLAGS_FILE/1,
+ check_RABBITMQ_KEEP_PID_FILE_ON_EXIT/1,
+ check_RABBITMQ_LOG/1,
+ check_RABBITMQ_LOG_BASE/1,
+ check_RABBITMQ_LOGS/1,
+ check_RABBITMQ_MNESIA_BASE/1,
+ check_RABBITMQ_MNESIA_DIR/1,
+ check_RABBITMQ_MOTD_FILE/1,
+ check_RABBITMQ_NODE_IP_ADDRESS/1,
+ check_RABBITMQ_NODE_PORT/1,
+ check_RABBITMQ_NODENAME/1,
+ check_RABBITMQ_PID_FILE/1,
+ check_RABBITMQ_PLUGINS_DIR/1,
+ check_RABBITMQ_PLUGINS_EXPAND_DIR/1,
+ check_RABBITMQ_PRODUCT_NAME/1,
+ check_RABBITMQ_PRODUCT_VERSION/1,
+ check_RABBITMQ_QUORUM_DIR/1,
+ check_RABBITMQ_STREAM_DIR/1,
+ check_RABBITMQ_UPGRADE_LOG/1,
+ check_RABBITMQ_USE_LOGNAME/1,
+ check_value_is_yes/1,
+ check_log_process_env/1,
+ check_log_context/1,
+ check_get_used_env_vars/1,
+ check_parse_conf_env_file_output/1
+ ]).
+
+all() ->
+ [
+ check_data_dir,
+ check_default_values,
+ check_values_from_reachable_remote_node,
+ check_values_from_offline_remote_node,
+ check_context_to_app_env_vars,
+ check_context_to_code_path,
+ check_RABBITMQ_ADVANCED_CONFIG_FILE,
+ check_RABBITMQ_CONFIG_FILE,
+ check_RABBITMQ_CONFIG_FILES,
+ check_RABBITMQ_DIST_PORT,
+ check_RABBITMQ_ENABLED_PLUGINS,
+ check_RABBITMQ_ENABLED_PLUGINS_FILE,
+ check_RABBITMQ_FEATURE_FLAGS_FILE,
+ check_RABBITMQ_KEEP_PID_FILE_ON_EXIT,
+ check_RABBITMQ_LOG,
+ check_RABBITMQ_LOG_BASE,
+ check_RABBITMQ_LOGS,
+ check_RABBITMQ_MNESIA_BASE,
+ check_RABBITMQ_MNESIA_DIR,
+ check_RABBITMQ_MOTD_FILE,
+ check_RABBITMQ_NODE_IP_ADDRESS,
+ check_RABBITMQ_NODE_PORT,
+ check_RABBITMQ_NODENAME,
+ check_RABBITMQ_PID_FILE,
+ check_RABBITMQ_PLUGINS_DIR,
+ check_RABBITMQ_PLUGINS_EXPAND_DIR,
+ check_RABBITMQ_PRODUCT_NAME,
+ check_RABBITMQ_PRODUCT_VERSION,
+ check_RABBITMQ_QUORUM_DIR,
+ check_RABBITMQ_UPGRADE_LOG,
+ check_RABBITMQ_USE_LOGNAME,
+ check_value_is_yes,
+ check_log_process_env,
+ check_log_context,
+ check_get_used_env_vars,
+ check_parse_conf_env_file_output
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 10}}].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], all()}
+ ].
+
+init_per_suite(Config) ->
+ persistent_term:put({rabbit_env, load_conf_env_file}, false),
+ Config.
+
+end_per_suite(Config) ->
+ persistent_term:erase({rabbit_env, load_conf_env_file}),
+ Config.
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+check_data_dir(_) ->
+ {Variable, ExpValue} = case os:type() of
+ {win32, _} ->
+ {"RABBITMQ_BASE",
+ "value of RABBITMQ_BASE"};
+ {unix, _} ->
+ {"SYS_PREFIX",
+ "value of SYS_PREFIX/var/lib/rabbitmq"}
+ end,
+ Value = "value of " ++ Variable,
+ os:putenv(Variable, Value),
+ ?assertMatch(#{data_dir := ExpValue}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable),
+ ?assertNotMatch(#{data_dir := ExpValue}, rabbit_env:get_context()),
+ ?assertMatch(#{data_dir := _}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable).
+
+check_default_values(_) ->
+ %% When `rabbit_env` is built with `TEST` defined, we can override
+ %% the OS type.
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(),
+
+ persistent_term:put({rabbit_env, os_type}, {win32, undefined}),
+ SavedAppData = os:getenv("APPDATA"),
+ os:putenv("APPDATA", "%APPDATA%"),
+ Win32Context = rabbit_env:get_context(),
+ case SavedAppData of
+ false -> os:unsetenv("APPDATA");
+ _ -> os:putenv("APPDATA", SavedAppData)
+ end,
+
+ persistent_term:erase({rabbit_env, os_type}),
+
+ {RFFValue, RFFOrigin} = forced_feature_flags_on_init_expect(),
+
+ Node = get_default_nodename(),
+ NodeS = atom_to_list(Node),
+
+ Origins = #{
+ additional_config_files => default,
+ advanced_config_file => default,
+ amqp_ipaddr => default,
+ amqp_tcp_port => default,
+ conf_env_file => default,
+ enabled_plugins => default,
+ enabled_plugins_file => default,
+ erlang_dist_tcp_port => default,
+ feature_flags_file => default,
+ forced_feature_flags_on_init => RFFOrigin,
+ interactive_shell => default,
+ keep_pid_file_on_exit => default,
+ log_base_dir => default,
+ log_feature_flags_registry => default,
+ log_levels => default,
+ main_config_file => default,
+ main_log_file => default,
+ mnesia_base_dir => default,
+ mnesia_dir => default,
+ motd_file => default,
+ nodename => default,
+ nodename_type => default,
+ os_type => environment,
+ output_supports_colors => default,
+ pid_file => default,
+ plugins_expand_dir => default,
+ plugins_path => default,
+ product_name => default,
+ product_version => default,
+ quorum_queue_dir => default,
+ rabbitmq_home => default,
+ stream_queue_dir => default,
+ upgrade_log_file => default
+ },
+
+ ?assertEqual(
+ #{additional_config_files => "/etc/rabbitmq/conf.d/*.conf",
+ advanced_config_file => "/etc/rabbitmq/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "/etc/rabbitmq/rabbitmq-env.conf",
+ config_base_dir => "/etc/rabbitmq",
+ data_dir => "/var/lib/rabbitmq",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => "/etc/rabbitmq/enabled_plugins",
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "-feature_flags",
+ forced_feature_flags_on_init => RFFValue,
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "/var/log/rabbitmq",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "/etc/rabbitmq/rabbitmq",
+ main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log",
+ mnesia_base_dir => "/var/lib/rabbitmq/mnesia",
+ mnesia_dir => "/var/lib/rabbitmq/mnesia/" ++ NodeS,
+ motd_file => "/etc/rabbitmq/motd",
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {unix, undefined},
+ output_supports_colors => true,
+ pid_file => "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ ".pid",
+ plugins_expand_dir =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "-plugins-expand",
+ plugins_path => maps:get(plugins_path, UnixContext),
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "/quorum",
+ rabbitmq_home => maps:get(rabbitmq_home, UnixContext),
+ stream_queue_dir =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "/stream",
+ split_nodename => rabbit_nodes_common:parts(Node),
+ sys_prefix => "",
+ upgrade_log_file =>
+ "/var/log/rabbitmq/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{sys_prefix => default}},
+ UnixContext),
+
+ ?assertEqual(
+ #{additional_config_files => "%APPDATA%/RabbitMQ/conf.d/*.conf",
+ advanced_config_file => "%APPDATA%/RabbitMQ/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "%APPDATA%/RabbitMQ/rabbitmq-env-conf.bat",
+ config_base_dir => "%APPDATA%/RabbitMQ",
+ data_dir => "%APPDATA%/RabbitMQ",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => "%APPDATA%/RabbitMQ/enabled_plugins",
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-feature_flags",
+ forced_feature_flags_on_init => RFFValue,
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "%APPDATA%/RabbitMQ/log",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "%APPDATA%/RabbitMQ/rabbitmq",
+ main_log_file => "%APPDATA%/RabbitMQ/log/" ++ NodeS ++ ".log",
+ mnesia_base_dir => "%APPDATA%/RabbitMQ/db",
+ mnesia_dir => "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-mnesia",
+ motd_file => "%APPDATA%/RabbitMQ/motd.txt",
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {win32, undefined},
+ output_supports_colors => false,
+ pid_file => "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ ".pid",
+ plugins_expand_dir =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-plugins-expand",
+ plugins_path => maps:get(plugins_path, Win32Context),
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-mnesia/quorum",
+ rabbitmq_base => "%APPDATA%/RabbitMQ",
+ rabbitmq_home => maps:get(rabbitmq_home, Win32Context),
+ stream_queue_dir =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-mnesia/stream",
+ split_nodename => rabbit_nodes_common:parts(Node),
+ upgrade_log_file =>
+ "%APPDATA%/RabbitMQ/log/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{rabbitmq_base => default}},
+ Win32Context).
+
+forced_feature_flags_on_init_expect() ->
+ %% In the case of mixed-versions-cluster testing in CI, the test
+ %% sets $RABBITMQ_FEATURE_FLAGS to an empty string. This obviously
+ %% changes the context returned by rabbit_env.
+ case os:getenv("RABBITMQ_FEATURE_FLAGS") of
+ false -> {undefined, default};
+ "" -> {[], environment}
+ end.
+
+check_values_from_reachable_remote_node(Config) ->
+ PrivDir = ?config(priv_dir, Config),
+
+ MnesiaDir = filename:join(PrivDir, "mnesia"),
+ RabbitAppDir = filename:join(PrivDir, "rabbit"),
+ RabbitEbinDir = filename:join(RabbitAppDir, "ebin"),
+
+ FeatureFlagsFile = filename:join(PrivDir, "feature_flags"),
+ PluginsDir = filename:join(PrivDir, "plugins"),
+ EnabledPluginsFile = filename:join(PrivDir, "enabled_plugins"),
+
+ ok = file:make_dir(MnesiaDir),
+ ok = file:make_dir(RabbitAppDir),
+ ok = file:make_dir(RabbitEbinDir),
+
+ %% Create a fake `rabbit` application.
+ App = {application,
+ rabbit,
+ [{vsn, "fake-rabbit"}]},
+ AppFile = filename:join(RabbitEbinDir, "rabbit.app"),
+ AppContent = io_lib:format("~p.~n", [App]),
+ ok = file:write_file(AppFile, AppContent),
+
+ %% Start a fake RabbitMQ node.
+ Node = rabbit_nodes_common:make(
+ {atom_to_list(?FUNCTION_NAME), "localhost"}),
+ NodeS = atom_to_list(Node),
+ true = os:putenv("RABBITMQ_NODENAME", NodeS),
+ RabbitCommonEbinDir = filename:dirname(code:which(rabbit_env)),
+ Args = ["-noinput",
+ "-sname", atom_to_list(Node),
+ "-pa", RabbitCommonEbinDir,
+ "-pa", RabbitEbinDir,
+ "-mnesia", "dir",
+ rabbit_misc:format("~p", [MnesiaDir]),
+ "-rabbit", "feature_flags_file",
+ rabbit_misc:format("~p", [FeatureFlagsFile]),
+ "-rabbit", "plugins_dir",
+ rabbit_misc:format("~p", [PluginsDir]),
+ "-rabbit", "enabled_plugins_file",
+ rabbit_misc:format("~p", [EnabledPluginsFile]),
+ "-eval",
+ "ok = application:load(mnesia),"
+ "ok = application:load(rabbit)."],
+ PortName = {spawn_executable, os:find_executable("erl")},
+ PortSettings = [{cd, PrivDir},
+ {args, Args},
+ {env, [{"ERL_LIBS", false}]},
+ {line, 512},
+ exit_status,
+ stderr_to_stdout],
+ ct:pal(
+ "Starting fake RabbitMQ node with the following settings:~n~p",
+ [PortSettings]),
+ Pid = spawn_link(
+ fun() ->
+ Port = erlang:open_port(PortName, PortSettings),
+ consume_stdout(Port, Node)
+ end),
+ wait_for_remote_node(Node),
+
+ try
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(Node),
+
+ persistent_term:erase({rabbit_env, os_type}),
+
+ {RFFValue, RFFOrigin} = forced_feature_flags_on_init_expect(),
+
+ Origins = #{
+ additional_config_files => default,
+ advanced_config_file => default,
+ amqp_ipaddr => default,
+ amqp_tcp_port => default,
+ conf_env_file => default,
+ enabled_plugins => default,
+ enabled_plugins_file => remote_node,
+ erlang_dist_tcp_port => default,
+ feature_flags_file => remote_node,
+ forced_feature_flags_on_init => RFFOrigin,
+ interactive_shell => default,
+ keep_pid_file_on_exit => default,
+ log_base_dir => default,
+ log_feature_flags_registry => default,
+ log_levels => default,
+ main_config_file => default,
+ main_log_file => default,
+ mnesia_base_dir => default,
+ mnesia_dir => remote_node,
+ motd_file => default,
+ nodename => environment,
+ nodename_type => default,
+ os_type => environment,
+ output_supports_colors => default,
+ pid_file => default,
+ plugins_expand_dir => default,
+ plugins_path => remote_node,
+ product_name => default,
+ product_version => default,
+ quorum_queue_dir => default,
+ rabbitmq_home => default,
+ stream_queue_dir => default,
+ upgrade_log_file => default
+ },
+
+ ?assertEqual(
+ #{additional_config_files => "/etc/rabbitmq/conf.d/*.conf",
+ advanced_config_file => "/etc/rabbitmq/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "/etc/rabbitmq/rabbitmq-env.conf",
+ config_base_dir => "/etc/rabbitmq",
+ data_dir => "/var/lib/rabbitmq",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => EnabledPluginsFile,
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file => FeatureFlagsFile,
+ forced_feature_flags_on_init => RFFValue,
+ from_remote_node => {Node, 10000},
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "/var/log/rabbitmq",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "/etc/rabbitmq/rabbitmq",
+ main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log",
+ mnesia_base_dir => undefined,
+ mnesia_dir => MnesiaDir,
+ motd_file => undefined,
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {unix, undefined},
+ output_supports_colors => true,
+ pid_file => undefined,
+ plugins_expand_dir => undefined,
+ plugins_path => PluginsDir,
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir => MnesiaDir ++ "/quorum",
+ rabbitmq_home => maps:get(rabbitmq_home, UnixContext),
+ stream_queue_dir => MnesiaDir ++ "/stream",
+ split_nodename => rabbit_nodes_common:parts(Node),
+ sys_prefix => "",
+ upgrade_log_file =>
+ "/var/log/rabbitmq/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{sys_prefix => default}},
+ UnixContext)
+ after
+ os:unsetenv("RABBITMQ_NODENAME"),
+ unlink(Pid),
+ rpc:call(Node, erlang, halt, [])
+ end.
+
+consume_stdout(Port, Nodename) ->
+ receive
+ {Port, {exit_status, X}} ->
+ ?assertEqual(0, X);
+ {Port, {data, Out}} ->
+ ct:pal("stdout: ~p", [Out]),
+ consume_stdout(Port, Nodename)
+ end.
+
+wait_for_remote_node(Nodename) ->
+ case net_adm:ping(Nodename) of
+ pong -> ok;
+ pang -> timer:sleep(200),
+ wait_for_remote_node(Nodename)
+ end.
+
+check_values_from_offline_remote_node(_) ->
+ Node = rabbit_nodes_common:make(
+ {atom_to_list(?FUNCTION_NAME), "localhost"}),
+ NodeS = atom_to_list(Node),
+ true = os:putenv("RABBITMQ_NODENAME", NodeS),
+
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(offline),
+
+ persistent_term:erase({rabbit_env, os_type}),
+ os:unsetenv("RABBITMQ_NODENAME"),
+
+ {RFFValue, RFFOrigin} = forced_feature_flags_on_init_expect(),
+
+ Origins = #{
+ additional_config_files => default,
+ advanced_config_file => default,
+ amqp_ipaddr => default,
+ amqp_tcp_port => default,
+ conf_env_file => default,
+ enabled_plugins => default,
+ enabled_plugins_file => default,
+ erlang_dist_tcp_port => default,
+ feature_flags_file => default,
+ forced_feature_flags_on_init => RFFOrigin,
+ interactive_shell => default,
+ keep_pid_file_on_exit => default,
+ log_base_dir => default,
+ log_feature_flags_registry => default,
+ log_levels => default,
+ main_config_file => default,
+ main_log_file => default,
+ mnesia_base_dir => default,
+ mnesia_dir => default,
+ motd_file => default,
+ nodename => environment,
+ nodename_type => default,
+ os_type => environment,
+ output_supports_colors => default,
+ pid_file => default,
+ plugins_expand_dir => default,
+ plugins_path => default,
+ product_name => default,
+ product_version => default,
+ quorum_queue_dir => default,
+ rabbitmq_home => default,
+ stream_queue_dir => default,
+ upgrade_log_file => default
+ },
+
+ ?assertEqual(
+ #{additional_config_files => "/etc/rabbitmq/conf.d/*.conf",
+ advanced_config_file => "/etc/rabbitmq/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "/etc/rabbitmq/rabbitmq-env.conf",
+ config_base_dir => "/etc/rabbitmq",
+ data_dir => "/var/lib/rabbitmq",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => undefined,
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file => undefined,
+ forced_feature_flags_on_init => RFFValue,
+ from_remote_node => offline,
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "/var/log/rabbitmq",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "/etc/rabbitmq/rabbitmq",
+ main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log",
+ mnesia_base_dir => undefined,
+ mnesia_dir => undefined,
+ motd_file => undefined,
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {unix, undefined},
+ output_supports_colors => true,
+ pid_file => undefined,
+ plugins_expand_dir => undefined,
+ plugins_path => undefined,
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir => undefined,
+ rabbitmq_home => maps:get(rabbitmq_home, UnixContext),
+ stream_queue_dir => undefined,
+ split_nodename => rabbit_nodes_common:parts(Node),
+ sys_prefix => "",
+ upgrade_log_file =>
+ "/var/log/rabbitmq/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{sys_prefix => default}},
+ UnixContext).
+
+check_context_to_app_env_vars(_) ->
+ %% When `rabbit_env` is built with `TEST` defined, we can override
+ %% the OS type.
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(),
+
+ persistent_term:erase({rabbit_env, os_type}),
+
+ Vars = [{mnesia, dir, maps:get(mnesia_dir, UnixContext)},
+ {ra, data_dir, maps:get(quorum_queue_dir, UnixContext)},
+ {osiris, data_dir, maps:get(stream_queue_dir, UnixContext)},
+ {rabbit, feature_flags_file,
+ maps:get(feature_flags_file, UnixContext)},
+ {rabbit, plugins_dir, maps:get(plugins_path, UnixContext)},
+ {rabbit, plugins_expand_dir,
+ maps:get(plugins_expand_dir, UnixContext)},
+ {rabbit, enabled_plugins_file,
+ maps:get(enabled_plugins_file, UnixContext)}],
+
+ lists:foreach(
+ fun({App, Param, _}) ->
+ ?assertEqual(undefined, application:get_env(App, Param))
+ end,
+ Vars),
+
+ rabbit_env:context_to_app_env_vars(UnixContext),
+ lists:foreach(
+ fun({App, Param, Value}) ->
+ ?assertEqual({ok, Value}, application:get_env(App, Param))
+ end,
+ Vars),
+
+ lists:foreach(
+ fun({App, Param, _}) ->
+ application:unset_env(App, Param),
+ ?assertEqual(undefined, application:get_env(App, Param))
+ end,
+ Vars),
+
+ rabbit_env:context_to_app_env_vars_no_logging(UnixContext),
+ lists:foreach(
+ fun({App, Param, Value}) ->
+ ?assertEqual({ok, Value}, application:get_env(App, Param))
+ end,
+ Vars).
+
+check_context_to_code_path(Config) ->
+ PrivDir = ?config(priv_dir, Config),
+ PluginsDir1 = filename:join(
+ PrivDir, rabbit_misc:format("~s-1", [?FUNCTION_NAME])),
+ MyPlugin1Dir = filename:join(PluginsDir1, "my_plugin1"),
+ MyPlugin1EbinDir = filename:join(MyPlugin1Dir, "ebin"),
+ PluginsDir2 = filename:join(
+ PrivDir, rabbit_misc:format("~s-2", [?FUNCTION_NAME])),
+ MyPlugin2Dir = filename:join(PluginsDir2, "my_plugin2"),
+ MyPlugin2EbinDir = filename:join(MyPlugin2Dir, "ebin"),
+
+ ok = file:make_dir(PluginsDir1),
+ ok = file:make_dir(MyPlugin1Dir),
+ ok = file:make_dir(MyPlugin1EbinDir),
+ ok = file:make_dir(PluginsDir2),
+ ok = file:make_dir(MyPlugin2Dir),
+ ok = file:make_dir(MyPlugin2EbinDir),
+
+ %% On Unix.
+ %%
+ %% We can't test the Unix codepath on Windows because the drive letter
+ %% separator conflicts with the path separator (they are both ':').
+ %% However, the Windows codepath can be tested on both Unix and Windows.
+ case os:type() of
+ {unix, _} ->
+ UnixPluginsPath = PluginsDir1 ++ ":" ++ PluginsDir2,
+ true = os:putenv("RABBITMQ_PLUGINS_DIR", UnixPluginsPath),
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(),
+
+ persistent_term:erase({rabbit_env, os_type}),
+ os:unsetenv("RABBITMQ_PLUGINS_DIR"),
+
+ ?assertEqual(UnixPluginsPath, maps:get(plugins_path, UnixContext)),
+
+ OldCodePath1 = code:get_path(),
+ ?assertNot(lists:member(MyPlugin1EbinDir, OldCodePath1)),
+ ?assertNot(lists:member(MyPlugin2EbinDir, OldCodePath1)),
+
+ rabbit_env:context_to_code_path(UnixContext),
+
+ NewCodePath1 = code:get_path(),
+ ?assert(lists:member(MyPlugin1EbinDir, NewCodePath1)),
+ ?assert(lists:member(MyPlugin2EbinDir, NewCodePath1)),
+ ?assertEqual(
+ [MyPlugin1EbinDir, MyPlugin2EbinDir],
+ lists:filter(
+ fun(Dir) ->
+ Dir =:= MyPlugin1EbinDir orelse
+ Dir =:= MyPlugin2EbinDir
+ end, NewCodePath1)),
+
+ true = code:del_path(MyPlugin1EbinDir),
+ true = code:del_path(MyPlugin2EbinDir);
+ _ ->
+ ok
+ end,
+
+ %% On Windows.
+ Win32PluginsPath = PluginsDir1 ++ ";" ++ PluginsDir2,
+ true = os:putenv("RABBITMQ_PLUGINS_DIR", Win32PluginsPath),
+ persistent_term:put({rabbit_env, os_type}, {win32, undefined}),
+ Win32Context = rabbit_env:get_context(),
+
+ persistent_term:erase({rabbit_env, os_type}),
+ os:unsetenv("RABBITMQ_PLUGINS_DIR"),
+
+ ?assertEqual(Win32PluginsPath, maps:get(plugins_path, Win32Context)),
+
+ OldCodePath2 = code:get_path(),
+ ?assertNot(lists:member(MyPlugin1EbinDir, OldCodePath2)),
+ ?assertNot(lists:member(MyPlugin2EbinDir, OldCodePath2)),
+
+ rabbit_env:context_to_code_path(Win32Context),
+
+ NewCodePath2 = code:get_path(),
+ ?assert(lists:member(MyPlugin1EbinDir, NewCodePath2)),
+ ?assert(lists:member(MyPlugin2EbinDir, NewCodePath2)),
+ ?assertEqual(
+ [MyPlugin1EbinDir, MyPlugin2EbinDir],
+ lists:filter(
+ fun(Dir) ->
+ Dir =:= MyPlugin1EbinDir orelse
+ Dir =:= MyPlugin2EbinDir
+ end, NewCodePath2)),
+
+ true = code:del_path(MyPlugin1EbinDir),
+ true = code:del_path(MyPlugin2EbinDir).
+
+check_RABBITMQ_ADVANCED_CONFIG_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_ADVANCED_CONFIG_FILE",
+ advanced_config_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_CONFIG_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_CONFIG_FILE",
+ main_config_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_CONFIG_FILES(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_CONFIG_FILES",
+ additional_config_files,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_DIST_PORT(_) ->
+ Value1 = random_int(),
+ Value2 = random_int(),
+ check_prefixed_variable("RABBITMQ_DIST_PORT",
+ erlang_dist_tcp_port,
+ 25672,
+ integer_to_list(Value1), Value1,
+ integer_to_list(Value2), Value2).
+
+check_RABBITMQ_ENABLED_PLUGINS(_) ->
+ Value1 = [random_atom(), random_atom()],
+ Value2 = [random_atom(), random_atom()],
+ check_prefixed_variable("RABBITMQ_ENABLED_PLUGINS",
+ enabled_plugins,
+ '_',
+ "", [],
+ "", []),
+ check_prefixed_variable("RABBITMQ_ENABLED_PLUGINS",
+ enabled_plugins,
+ '_',
+ rabbit_misc:format("~s,~s", Value1), Value1,
+ rabbit_misc:format("~s,~s", Value2), Value2).
+
+check_RABBITMQ_ENABLED_PLUGINS_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_ENABLED_PLUGINS_FILE",
+ enabled_plugins_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_FEATURE_FLAGS_FILE(_) ->
+ Value1 = random_string(),
+ check_variable("RABBITMQ_FEATURE_FLAGS_FILE",
+ feature_flags_file,
+ Value1, Value1).
+
+check_RABBITMQ_KEEP_PID_FILE_ON_EXIT(_) ->
+ Value1 = true,
+ Value2 = false,
+ check_prefixed_variable("RABBITMQ_KEEP_PID_FILE_ON_EXIT",
+ keep_pid_file_on_exit,
+ false,
+ atom_to_list(Value1), Value1,
+ atom_to_list(Value2), Value2).
+
+check_RABBITMQ_LOG(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ "critical", #{global => critical},
+ "emergency", #{global => emergency}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1, #{Value1 => info},
+ Value2, #{Value2 => info}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ ",none", #{global => none,
+ Value1 => info},
+ Value2 ++ ",none", #{global => none,
+ Value2 => info}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ "=debug", #{Value1 => debug},
+ Value2 ++ "=info", #{Value2 => info}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ ",-color", #{Value1 => info,
+ color => false},
+ Value2 ++ ",+color", #{Value2 => info,
+ color => true}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ "=notice,-color", #{Value1 => notice,
+ color => false},
+ Value2 ++ "=warning,+color", #{Value2 => warning,
+ color => true}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ "=error," ++ Value2, #{Value1 => error,
+ Value2 => info},
+ Value2 ++ "=alert," ++ Value1, #{Value1 => info,
+ Value2 => alert}).
+
+check_RABBITMQ_LOG_BASE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_LOG_BASE",
+ log_base_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_LOGS(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_LOGS",
+ main_log_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_UPGRADE_LOG(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_UPGRADE_LOG",
+ upgrade_log_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_MNESIA_BASE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_MNESIA_BASE",
+ mnesia_base_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_MNESIA_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_MNESIA_DIR",
+ mnesia_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_NODE_IP_ADDRESS(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_NODE_IP_ADDRESS",
+ amqp_ipaddr,
+ "auto",
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_NODE_PORT(_) ->
+ Value1 = random_int(),
+ Value2 = random_int(),
+ check_prefixed_variable("RABBITMQ_NODE_PORT",
+ amqp_tcp_port,
+ 5672,
+ integer_to_list(Value1), Value1,
+ integer_to_list(Value2), Value2).
+
+check_RABBITMQ_NODENAME(_) ->
+ DefaultNodename = get_default_nodename(),
+ {_, DefaultHostname} = rabbit_nodes_common:parts(DefaultNodename),
+
+ Value1 = random_atom(),
+ Value2 = random_atom(),
+ check_prefixed_variable("RABBITMQ_NODENAME",
+ nodename,
+ DefaultNodename,
+ atom_to_list(Value1),
+ list_to_atom(
+ atom_to_list(Value1) ++ "@" ++ DefaultHostname),
+ atom_to_list(Value2),
+ list_to_atom(
+ atom_to_list(Value2) ++ "@" ++ DefaultHostname)),
+
+ Value3 = list_to_atom(random_string() ++ "@" ++ random_string()),
+ Value4 = list_to_atom(random_string() ++ "@" ++ random_string()),
+ check_prefixed_variable("RABBITMQ_NODENAME",
+ nodename,
+ DefaultNodename,
+ atom_to_list(Value3), Value3,
+ atom_to_list(Value4), Value4).
+
+check_RABBITMQ_PID_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PID_FILE",
+ pid_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PLUGINS_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PLUGINS_DIR",
+ plugins_path,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PLUGINS_EXPAND_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PLUGINS_EXPAND_DIR",
+ plugins_expand_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PRODUCT_NAME(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PRODUCT_NAME",
+ product_name,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PRODUCT_VERSION(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PRODUCT_VERSION",
+ product_version,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_MOTD_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_MOTD_FILE",
+ motd_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_QUORUM_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_QUORUM_DIR",
+ quorum_queue_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_STREAM_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_STREAM_DIR",
+ stream_queue_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+check_RABBITMQ_USE_LOGNAME(_) ->
+ check_prefixed_variable("RABBITMQ_USE_LONGNAME",
+ nodename_type,
+ shortnames,
+ "true", longnames,
+ "false", shortnames).
+
+check_value_is_yes(_) ->
+ ?assert(rabbit_env:value_is_yes("1")),
+ ?assert(rabbit_env:value_is_yes("yes")),
+ ?assert(rabbit_env:value_is_yes("true")),
+ ?assertNot(rabbit_env:value_is_yes("0")),
+ ?assertNot(rabbit_env:value_is_yes("no")),
+ ?assertNot(rabbit_env:value_is_yes("false")),
+ ?assertNot(rabbit_env:value_is_yes(random_string() ++ ".")).
+
+check_log_process_env(_) ->
+ ok = rabbit_env:log_process_env().
+
+check_log_context(_) ->
+ Context = rabbit_env:get_context(),
+ ok = rabbit_env:log_context(Context).
+
+check_get_used_env_vars(_) ->
+ os:putenv("RABBITMQ_LOGS", "-"),
+ os:putenv("CONFIG_FILE", "filename"),
+ Vars = rabbit_env:get_used_env_vars(),
+ ?assert(lists:keymember("RABBITMQ_LOGS", 1, Vars)),
+ ?assert(lists:keymember("CONFIG_FILE", 1, Vars)),
+ ?assertNot(lists:keymember("HOME", 1, Vars)),
+ ?assertNot(lists:keymember("PATH", 1, Vars)),
+ os:unsetenv("RABBITMQ_LOGS"),
+ os:unsetenv("CONFIG_FILE").
+
+check_variable(Variable, Key, ValueToSet, Comparison) ->
+ os:putenv(Variable, ValueToSet),
+ ?assertMatch(#{Key := Comparison}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable),
+ Context = rabbit_env:get_context(),
+ ?assertNotMatch(#{Key := Comparison}, Context),
+ ?assertMatch(#{Key := _}, Context).
+
+check_prefixed_variable("RABBITMQ_" ++ Variable = PrefixedVariable,
+ Key,
+ DefaultValue,
+ Value1ToSet, Comparison1,
+ Value2ToSet, Comparison2) ->
+ os:putenv(Variable, Value1ToSet),
+ os:unsetenv(PrefixedVariable),
+ ?assertMatch(#{Key := Comparison1}, rabbit_env:get_context()),
+
+ os:putenv(PrefixedVariable, Value2ToSet),
+ ?assertMatch(#{Key := Comparison2}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable),
+ os:unsetenv(PrefixedVariable),
+ Context = rabbit_env:get_context(),
+ case DefaultValue of
+ '_' ->
+ ?assertNotMatch(#{Key := Comparison1}, Context),
+ ?assertNotMatch(#{Key := Comparison2}, Context),
+ ?assertMatch(#{Key := _}, Context);
+ _ ->
+ ?assertMatch(#{Key := DefaultValue}, Context)
+ end.
+
+random_int() -> rand:uniform(50000).
+random_string() -> integer_to_list(random_int()).
+random_atom() -> list_to_atom(random_string()).
+
+get_default_nodename() ->
+ CTNode = node(),
+ NodeS = re:replace(
+ atom_to_list(CTNode),
+ "^[^@]+@(.*)$",
+ "rabbit@\\1",
+ [{return, list}]),
+ list_to_atom(NodeS).
+
+check_parse_conf_env_file_output(_) ->
+ ?assertEqual(
+ #{},
+ rabbit_env:parse_conf_env_file_output2(
+ [],
+ #{}
+ )),
+ ?assertEqual(
+ #{"UNQUOTED" => "a",
+ "SINGLE_QUOTED" => "b",
+ "DOUBLE_QUOTED" => "c",
+ "SINGLE_DOLLAR" => "d"},
+ rabbit_env:parse_conf_env_file_output2(
+ ["UNQUOTED=a",
+ "SINGLE_QUOTED='b'",
+ "DOUBLE_QUOTED=\"c\"",
+ "SINGLE_DOLLAR=$'d'"],
+ #{}
+ )),
+ ?assertEqual(
+ #{"A" => "a",
+ "B" => "b",
+ "MULTI_LINE" => "\n'foobar'"},
+ rabbit_env:parse_conf_env_file_output2(
+ ["A=a",
+ "MULTI_LINE='",
+ "'\"'\"'foobar'\"'\"",
+ "B=b"],
+ #{}
+ )).
diff --git a/deps/rabbit_common/test/supervisor2_SUITE.erl b/deps/rabbit_common/test/supervisor2_SUITE.erl
new file mode 100644
index 0000000000..7b89363999
--- /dev/null
+++ b/deps/rabbit_common/test/supervisor2_SUITE.erl
@@ -0,0 +1,128 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(supervisor2_SUITE).
+
+-behaviour(supervisor2).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() -> [intrinsic, delayed_restart].
+
+intrinsic(_Config) ->
+ false = process_flag(trap_exit, true),
+ Intensity = 5,
+ Args = {one_for_one, intrinsic, Intensity},
+ {passed, SupPid} = with_sup(Args, fun test_supervisor_intrinsic/1),
+ receive
+ {'EXIT', SupPid, shutdown} -> ok
+ end,
+ false = is_process_alive(SupPid).
+
+delayed_restart(_Config) ->
+ DelayInSeconds = 1,
+ Intensity = 1,
+ Args0 = {simple_one_for_one, {permanent, DelayInSeconds}, Intensity},
+ F = fun(SupPid) ->
+ {ok, _ChildPid} =
+ supervisor2:start_child(SupPid, []),
+ test_supervisor_delayed_restart(SupPid)
+ end,
+ {passed, _} = with_sup(Args0, F),
+
+ Args1 = {one_for_one, {permanent, DelayInSeconds}, Intensity},
+ {passed, _} = with_sup(Args1, fun test_supervisor_delayed_restart/1).
+
+test_supervisor_intrinsic(SupPid) ->
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, abnormal),
+ ok = timer:sleep(100),
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, {shutdown, restart}),
+ ok = timer:sleep(100),
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, shutdown),
+ ok = timer:sleep(100),
+ passed.
+
+test_supervisor_delayed_restart(SupPid) ->
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, abnormal),
+ ok = timer:sleep(100),
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, abnormal),
+ ok = timer:sleep(100),
+ timeout = ping_child(SupPid),
+
+ ok = timer:sleep(1010),
+ ok = ping_child(SupPid),
+ passed.
+
+with_sup({RestartStrategy, Restart, Intensity}, Fun) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy, Restart, Intensity]),
+ Res = Fun(SupPid),
+ true = unlink(SupPid),
+ {Res, SupPid}.
+
+init([RestartStrategy, Restart, Intensity]) ->
+ SupFlags = #{
+ strategy => RestartStrategy,
+ intensity => Intensity,
+ period => 1
+ },
+ ChildSpec = #{
+ id => test,
+ start => {?MODULE, start_child, []},
+ restart => Restart,
+ shutdown => 16#ffffffff,
+ type => worker,
+ modules => [?MODULE]
+ },
+ {ok, {SupFlags, [ChildSpec]}}.
+
+start_child() ->
+ {ok, proc_lib:spawn_link(fun run_child/0)}.
+
+ping_child(SupPid) ->
+ Ref = make_ref(),
+ F = fun(ChildPid) ->
+ ChildPid ! {ping, Ref, self()}
+ end,
+ with_child_pid(SupPid, F),
+ receive
+ {pong, Ref} -> ok
+ after 1000 -> timeout
+ end.
+
+exit_child(SupPid, ExitType) ->
+ F = fun(ChildPid) ->
+ exit(ChildPid, ExitType)
+ end,
+ with_child_pid(SupPid, F),
+ ok.
+
+with_child_pid(SupPid, Fun) ->
+ case supervisor2:which_children(SupPid) of
+ [{_Id, undefined, worker, [?MODULE]}] -> ok;
+ [{_Id, restarting, worker, [?MODULE]}] -> ok;
+ [{_Id, ChildPid, worker, [?MODULE]}] -> Fun(ChildPid);
+ [] -> ok
+ end.
+
+run_child() ->
+ receive
+ {ping, Ref, Pid} ->
+ Pid ! {pong, Ref},
+ run_child()
+ end.
diff --git a/deps/rabbit_common/test/unit_SUITE.erl b/deps/rabbit_common/test/unit_SUITE.erl
new file mode 100644
index 0000000000..925155211f
--- /dev/null
+++ b/deps/rabbit_common/test/unit_SUITE.erl
@@ -0,0 +1,446 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbit_memory.hrl").
+-include("rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests},
+ {group, parse_mem_limit},
+ {group, gen_server2}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ data_coercion_to_proplist,
+ data_coercion_to_list,
+ data_coercion_to_map,
+ pget,
+ encrypt_decrypt,
+ encrypt_decrypt_term,
+ version_equivalence,
+ pid_decompose_compose,
+ platform_and_version,
+ frame_encoding_does_not_fail_with_empty_binary_payload,
+ amqp_table_conversion,
+ name_type,
+ get_erl_path
+ ]},
+ {parse_mem_limit, [parallel], [
+ parse_mem_limit_relative_exactly_max,
+ parse_mem_relative_above_max,
+ parse_mem_relative_integer,
+ parse_mem_relative_invalid
+ ]},
+ {gen_server2, [parallel], [
+ stats_timer_is_working,
+ stats_timer_writes_gen_server2_metrics_if_core_metrics_ets_exists,
+ stop_stats_timer_on_hibernation,
+ stop_stats_timer_on_backoff,
+ stop_stats_timer_on_backoff_when_backoff_less_than_stats_timeout,
+ gen_server2_stop
+ ]}
+ ].
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(stats_timer_is_working, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stop_stats_timer_on_hibernation, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stop_stats_timer_on_backoff, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stop_stats_timer_on_backoff_when_backoff_less_than_stats_timeout, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stats_timer_writes_gen_server2_metrics_if_core_metrics_ets_exists, Config) ->
+ rabbit_core_metrics:terminate(),
+ reset_stats_interval(),
+ Config;
+end_per_testcase(_, Config) -> Config.
+
+stats_timer_is_working(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ {ok, TestServer} = gen_server2_test_server:start_link(count_stats),
+ %% Start the emission
+ % TestServer ! emit_gen_server2_stats,
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(4, StatsCount).
+
+stats_timer_writes_gen_server2_metrics_if_core_metrics_ets_exists(_) ->
+ rabbit_core_metrics:init(),
+
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ {ok, TestServer} = gen_server2_test_server:start_link(),
+ timer:sleep(StatsInterval * 4),
+
+ %% No messages in the buffer
+ ?assertEqual(0, rabbit_core_metrics:get_gen_server2_stats(TestServer)),
+
+ %% Sleep to accumulate messages
+ gen_server2:cast(TestServer, {sleep, StatsInterval + 100}),
+
+ %% Sleep to get results
+ gen_server2:cast(TestServer, {sleep, 1000}),
+ gen_server2:cast(TestServer, ignore),
+ gen_server2:cast(TestServer, ignore),
+ gen_server2:cast(TestServer, ignore),
+
+ timer:sleep(StatsInterval + 150),
+ ?assertEqual(4, rabbit_core_metrics:get_gen_server2_stats(TestServer)).
+
+stop_stats_timer_on_hibernation(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ %% No backoff configured
+ {ok, TestServer} = gen_server2_test_server:start_link(count_stats),
+
+ ?assertEqual(ok, gen_server2:call(TestServer, hibernate)),
+
+ timer:sleep(50),
+
+ ?assertEqual({current_function,{erlang, hibernate, 3}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(StatsInterval * 6 + 100),
+ StatsCount1 = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ ?assertEqual(1, StatsCount1),
+
+ %% A message will wake up the process
+ gen_server2:call(TestServer, wake_up),
+ gen_server2:call(TestServer, wake_up),
+ gen_server2:call(TestServer, wake_up),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount5 = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(5, StatsCount5),
+ ?assertEqual(ok, gen_server2:call(TestServer, hibernate)),
+
+ timer:sleep(50),
+
+ {current_function,{erlang,hibernate,3}} =
+ erlang:process_info(TestServer, current_function),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount6 = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ 6 = StatsCount6.
+
+stop_stats_timer_on_backoff(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ Backoff = 1000,
+ {ok, TestServer} =
+ gen_server2_test_server:start_link(
+ count_stats,
+ {backoff, Backoff, Backoff, 10000}),
+
+ ok = gen_server2:call(TestServer, hibernate),
+
+ {current_function,{gen_server2,process_next_msg,1}} =
+ erlang:process_info(TestServer, current_function),
+
+ %% Receiving messages during backoff period does not emit stats
+ timer:sleep(Backoff div 2),
+ ok = gen_server2:call(TestServer, hibernate),
+
+ timer:sleep(Backoff div 2 + 50),
+ ?assertEqual({current_function,{gen_server2,process_next_msg,1}},
+ erlang:process_info(TestServer, current_function)),
+
+ %% Hibernate after backoff time after last message
+ timer:sleep(Backoff div 2),
+ ?assertEqual({current_function,{erlang,hibernate,3}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ ?assertEqual(1, StatsCount),
+
+ %% A message will wake up the process
+ gen_server2:call(TestServer, wake_up),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount5 = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(5, StatsCount5).
+
+stop_stats_timer_on_backoff_when_backoff_less_than_stats_timeout(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ Backoff = 200,
+ {ok, TestServer} =
+ gen_server2_test_server:start_link(
+ count_stats,
+ {backoff, Backoff, Backoff, 10000}),
+
+ ?assertEqual(ok, gen_server2:call(TestServer, hibernate)),
+
+ ?assertEqual({current_function, {gen_server2, process_next_msg, 1}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(Backoff + 50),
+
+ ?assertEqual({current_function, {erlang, hibernate, 3}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ ?assertEqual(1, StatsCount),
+
+ %% A message will wake up the process
+ gen_server2:call(TestServer, wake_up),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount5 = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(5, StatsCount5).
+
+gen_server2_stop(_) ->
+ {ok, TestServer} = gen_server2_test_server:start_link(),
+ ?assertEqual(ok, gen_server2:stop(TestServer)),
+ ?assertEqual(false, erlang:is_process_alive(TestServer)),
+ ?assertEqual({'EXIT', noproc}, (catch gen_server:stop(TestServer))),
+ ok.
+
+parse_mem_limit_relative_exactly_max(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit(1.0),
+ case MemLimit of
+ ?MAX_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?MAX_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+parse_mem_relative_above_max(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit(1.01),
+ case MemLimit of
+ ?MAX_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?MAX_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+parse_mem_relative_integer(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit(1),
+ case MemLimit of
+ ?MAX_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?MAX_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+parse_mem_relative_invalid(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit([255]),
+ case MemLimit of
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?DEFAULT_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+platform_and_version(_Config) ->
+ MajorVersion = erlang:system_info(otp_release),
+ Result = rabbit_misc:platform_and_version(),
+ RegExp = "^Erlang/OTP\s" ++ MajorVersion,
+ case re:run(Result, RegExp) of
+ nomatch -> ct:fail("~p does not match ~p", [Result, RegExp]);
+ {error, ErrType} -> ct:fail("~p", [ErrType]);
+ _ -> ok
+ end.
+
+data_coercion_to_map(_Config) ->
+ ?assertEqual(#{a => 1}, rabbit_data_coercion:to_map([{a, 1}])),
+ ?assertEqual(#{a => 1}, rabbit_data_coercion:to_map(#{a => 1})).
+
+data_coercion_to_proplist(_Config) ->
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_proplist([{a, 1}])),
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_proplist(#{a => 1})).
+
+data_coercion_to_list(_Config) ->
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_list([{a, 1}])),
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_list(#{a => 1})).
+
+pget(_Config) ->
+ ?assertEqual(1, rabbit_misc:pget(a, [{a, 1}])),
+ ?assertEqual(undefined, rabbit_misc:pget(b, [{a, 1}])),
+
+ ?assertEqual(1, rabbit_misc:pget(a, #{a => 1})),
+ ?assertEqual(undefined, rabbit_misc:pget(b, #{a => 1})).
+
+pid_decompose_compose(_Config) ->
+ Pid = self(),
+ {Node, Cre, Id, Ser} = rabbit_misc:decompose_pid(Pid),
+ Node = node(Pid),
+ Pid = rabbit_misc:compose_pid(Node, Cre, Id, Ser),
+ OtherNode = 'some_node@localhost',
+ PidOnOtherNode = rabbit_misc:pid_change_node(Pid, OtherNode),
+ {OtherNode, Cre, Id, Ser} = rabbit_misc:decompose_pid(PidOnOtherNode).
+
+encrypt_decrypt(_Config) ->
+ %% Take all available block ciphers.
+ Hashes = rabbit_pbe:supported_hashes(),
+ Ciphers = rabbit_pbe:supported_ciphers(),
+ %% For each cipher, try to encrypt and decrypt data sizes from 0 to 64 bytes
+ %% with a random passphrase.
+ _ = [begin
+ PassPhrase = crypto:strong_rand_bytes(16),
+ Iterations = rand:uniform(100),
+ Data = crypto:strong_rand_bytes(64),
+ [begin
+ Expected = binary:part(Data, 0, Len),
+ Enc = rabbit_pbe:encrypt(C, H, Iterations, PassPhrase, Expected),
+ Expected = iolist_to_binary(rabbit_pbe:decrypt(C, H, Iterations, PassPhrase, Enc))
+ end || Len <- lists:seq(0, byte_size(Data))]
+ end || H <- Hashes, C <- Ciphers],
+ ok.
+
+encrypt_decrypt_term(_Config) ->
+ %% Take all available block ciphers.
+ Hashes = rabbit_pbe:supported_hashes(),
+ Ciphers = rabbit_pbe:supported_ciphers(),
+ %% Different Erlang terms to try encrypting.
+ DataSet = [
+ 10000,
+ [5672],
+ [{"127.0.0.1", 5672},
+ {"::1", 5672}],
+ [{connection, info}, {channel, info}],
+ [{cacertfile, "/path/to/testca/cacert.pem"},
+ {certfile, "/path/to/server/cert.pem"},
+ {keyfile, "/path/to/server/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false}],
+ [<<".*">>, <<".*">>, <<".*">>]
+ ],
+ _ = [begin
+ PassPhrase = crypto:strong_rand_bytes(16),
+ Iterations = rand:uniform(100),
+ Enc = rabbit_pbe:encrypt_term(C, H, Iterations, PassPhrase, Data),
+ Data = rabbit_pbe:decrypt_term(C, H, Iterations, PassPhrase, Enc)
+ end || H <- Hashes, C <- Ciphers, Data <- DataSet],
+ ok.
+
+version_equivalence(_Config) ->
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
+ true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0.1", "3.0.0.3"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0.1", "3.0.1.3"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0.1", "3.1.0.1"),
+
+ false = rabbit_misc:version_minor_equivalent("3.5.7", "3.6.7"),
+ false = rabbit_misc:version_minor_equivalent("3.6.5", "3.6.6"),
+ false = rabbit_misc:version_minor_equivalent("3.6.6", "3.7.0"),
+ true = rabbit_misc:version_minor_equivalent("3.6.7", "3.6.6"),
+
+ %% Starting with RabbitMQ 3.7.x and feature flags introduced in
+ %% RabbitMQ 3.8.x, versions are considered equivalent and the actual
+ %% check is deferred to the feature flags module.
+ false = rabbit_misc:version_minor_equivalent("3.6.0", "3.8.0"),
+ true = rabbit_misc:version_minor_equivalent("3.7.0", "3.8.0"),
+ true = rabbit_misc:version_minor_equivalent("3.7.0", "3.10.0"),
+
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0.0">>),
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0.1">>),
+ true = rabbit_misc:version_minor_equivalent(<<"%%VSN%%">>, <<"%%VSN%%">>),
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0">>),
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0.0.1">>),
+ false = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.1.0">>),
+ false = rabbit_misc:version_minor_equivalent(<<"3.0.0.1">>, <<"3.1.0.1">>).
+
+frame_encoding_does_not_fail_with_empty_binary_payload(_Config) ->
+ [begin
+ Content = #content{
+ class_id = 60, properties = none, properties_bin = <<0,0>>, protocol = rabbit_framing_amqp_0_9_1,
+ payload_fragments_rev = P
+ },
+ ExpectedFrames = rabbit_binary_generator:build_simple_content_frames(1, Content, 0, rabbit_framing_amqp_0_9_1)
+ end || {P, ExpectedFrames} <- [
+ {[], [[<<2,0,1,0,0,0,14>>,[<<0,60,0,0,0,0,0,0,0,0,0,0>>,<<0,0>>],206]]},
+ {[<<>>], [[<<2,0,1,0,0,0,14>>,[<<0,60,0,0,0,0,0,0,0,0,0,0>>,<<0,0>>],206]]},
+ {[<<"payload">>], [[<<2,0,1,0,0,0,14>>,[<<0,60,0,0,0,0,0,0,0,0,0,7>>,<<0,0>>],206],
+ [<<3,0,1,0,0,0,7>>,[<<"payload">>],206]]}
+ ]],
+ ok.
+
+amqp_table_conversion(_Config) ->
+ assert_table(#{}, []),
+ assert_table(#{<<"x-expires">> => 1000},
+ [{<<"x-expires">>, long, 1000}]),
+ assert_table(#{<<"x-forwarding">> =>
+ [#{<<"uri">> => <<"amqp://localhost/%2F/upstream">>}]},
+ [{<<"x-forwarding">>, array,
+ [{table, [{<<"uri">>, longstr,
+ <<"amqp://localhost/%2F/upstream">>}]}]}]).
+
+assert_table(JSON, AMQP) ->
+ ?assertEqual(JSON, rabbit_misc:amqp_table(AMQP)),
+ ?assertEqual(AMQP, rabbit_misc:to_amqp_table(JSON)).
+
+
+set_stats_interval(Interval) ->
+ application:set_env(rabbit, collect_statistics, coarse),
+ application:set_env(rabbit, collect_statistics_interval, Interval).
+
+reset_stats_interval() ->
+ application:unset_env(rabbit, collect_statistics),
+ application:unset_env(rabbit, collect_statistics_interval).
+
+name_type(_) ->
+ ?assertEqual(shortnames, rabbit_nodes_common:name_type(rabbit)),
+ ?assertEqual(shortnames, rabbit_nodes_common:name_type(rabbit@localhost)),
+ ?assertEqual(longnames, rabbit_nodes_common:name_type('rabbit@localhost.example.com')),
+ ok.
+
+get_erl_path(_) ->
+ Exe = rabbit_runtime:get_erl_path(),
+ case os:type() of
+ {win32, _} ->
+ ?assertNotMatch(nomatch, string:find(Exe, "erl.exe"));
+ _ ->
+ ?assertNotMatch(nomatch, string:find(Exe, "erl"))
+ end,
+ ok.
diff --git a/deps/rabbit_common/test/unit_priority_queue_SUITE.erl b/deps/rabbit_common/test/unit_priority_queue_SUITE.erl
new file mode 100644
index 0000000000..8d58c72f10
--- /dev/null
+++ b/deps/rabbit_common/test/unit_priority_queue_SUITE.erl
@@ -0,0 +1,35 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ member,
+ member_priority_queue
+ ].
+
+member(_Config) ->
+ Q = lists:foldl(fun(V, Acc) -> priority_queue:in(V, Acc) end, priority_queue:new(), lists:seq(1, 10)),
+ ?assert(priority_queue:member(1, Q)),
+ ?assert(priority_queue:member(2, Q)),
+ ?assertNot(priority_queue:member(100, Q)),
+ ?assertNot(priority_queue:member(1, priority_queue:new())),
+ ok.
+
+member_priority_queue(_Config) ->
+ Q = lists:foldl(fun(V, Acc) -> priority_queue:in(V, V rem 4, Acc) end, priority_queue:new(),
+ lists:seq(1, 100)),
+ ?assert(priority_queue:member(1, Q)),
+ ?assert(priority_queue:member(50, Q)),
+ ?assertNot(priority_queue:member(200, Q)),
+ ok.
diff --git a/deps/rabbit_common/test/worker_pool_SUITE.erl b/deps/rabbit_common/test/worker_pool_SUITE.erl
new file mode 100644
index 0000000000..a50104f6c7
--- /dev/null
+++ b/deps/rabbit_common/test/worker_pool_SUITE.erl
@@ -0,0 +1,220 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(POOL_SIZE, 1).
+-define(POOL_NAME, test_pool).
+
+all() ->
+ [
+ run_code_synchronously,
+ run_code_asynchronously,
+ set_timeout,
+ cancel_timeout,
+ cancel_timeout_by_setting,
+ dispatch_async_blocks_until_task_begins
+ ].
+
+init_per_testcase(_, Config) ->
+ {ok, Pool} = worker_pool_sup:start_link(?POOL_SIZE, ?POOL_NAME),
+ [{pool_sup, Pool} | Config].
+
+end_per_testcase(_, Config) ->
+ Pool = ?config(pool_sup, Config),
+ unlink(Pool),
+ exit(Pool, kill).
+
+run_code_synchronously(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Sleep = 200,
+ {Time, Result} = timer:tc(fun() ->
+ worker_pool:submit(?POOL_NAME,
+ fun() ->
+ timer:sleep(Sleep),
+ Self ! {hi, Test},
+ self()
+ end,
+ reuse)
+ end),
+ % Worker run synchronously
+ true = Time > Sleep,
+ % Worker have sent message
+ receive {hi, Test} -> ok
+ after 0 -> error(no_message_from_worker)
+ end,
+ % Worker is a separate process
+ true = (Self /= Result).
+
+run_code_asynchronously(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Sleep = 200,
+ {Time, Result} = timer:tc(fun() ->
+ worker_pool:submit_async(?POOL_NAME,
+ fun() ->
+ timer:sleep(Sleep),
+ Self ! {hi, Test},
+ self()
+ end)
+ end),
+ % Worker run synchronously
+ true = Time < Sleep,
+ % Worker have sent message
+ receive {hi, Test} -> ok
+ after Sleep + 100 -> error(no_message_from_worker)
+ end,
+ % Worker is a separate process
+ true = (Self /= Result).
+
+set_timeout(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Worker = worker_pool:submit(?POOL_NAME,
+ fun() ->
+ Worker = self(),
+ timer:sleep(100),
+ worker_pool_worker:set_timeout(
+ my_timeout, 1000,
+ fun() ->
+ Self ! {hello, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ % Timeout will occur after 1000 ms only
+ receive {hello, Worker, Test} -> exit(timeout_should_wait)
+ after 0 -> ok
+ end,
+
+ timer:sleep(1000),
+
+ receive {hello, Worker, Test} -> ok
+ after 1000 -> exit(timeout_is_late)
+ end.
+
+
+cancel_timeout(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Worker = worker_pool:submit(?POOL_NAME,
+ fun() ->
+ Worker = self(),
+ timer:sleep(100),
+ worker_pool_worker:set_timeout(
+ my_timeout, 1000,
+ fun() ->
+ Self ! {hello, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ % Timeout will occur after 1000 ms only
+ receive {hello, Worker, Test} -> exit(timeout_should_wait)
+ after 0 -> ok
+ end,
+
+ worker_pool_worker:next_job_from(Worker, Self),
+ Worker = worker_pool_worker:submit(Worker,
+ fun() ->
+ worker_pool_worker:clear_timeout(my_timeout),
+ Worker
+ end,
+ reuse),
+
+ timer:sleep(1000),
+ receive {hello, Worker, Test} -> exit(timeout_is_not_cancelled)
+ after 0 -> ok
+ end.
+
+cancel_timeout_by_setting(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Worker = worker_pool:submit(?POOL_NAME,
+ fun() ->
+ Worker = self(),
+ timer:sleep(100),
+ worker_pool_worker:set_timeout(
+ my_timeout, 1000,
+ fun() ->
+ Self ! {hello, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ % Timeout will occur after 1000 ms only
+ receive {hello, Worker, Test} -> exit(timeout_should_wait)
+ after 0 -> ok
+ end,
+
+ worker_pool_worker:next_job_from(Worker, Self),
+ Worker = worker_pool_worker:submit(Worker,
+ fun() ->
+ worker_pool_worker:set_timeout(my_timeout, 1000,
+ fun() ->
+ Self ! {hello_reset, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ timer:sleep(1000),
+ receive {hello, Worker, Test} -> exit(timeout_is_not_cancelled)
+ after 0 -> ok
+ end,
+
+ receive {hello_reset, Worker, Test} -> ok
+ after 1000 -> exit(timeout_is_late)
+ end.
+
+dispatch_async_blocks_until_task_begins(_) ->
+ Self = self(),
+
+ Waiter = fun() ->
+ Self ! {register, self()},
+ receive
+ go -> ok
+ end
+ end,
+
+ ok = worker_pool:dispatch_sync(?POOL_NAME, Waiter),
+ SomeWorker = receive
+ {register, WPid} -> WPid
+ after 250 ->
+ none
+ end,
+ ?assert(is_process_alive(SomeWorker), "Dispatched tasks should be running"),
+ spawn(fun() ->
+ ok = worker_pool:dispatch_sync(?POOL_NAME,
+ Waiter),
+ Self ! done_waiting,
+ exit(normal)
+ end),
+ DidWait = receive
+ done_waiting ->
+ false
+ after 250 ->
+ true
+ end,
+ ?assert(DidWait, "dispatch_sync should block until there is a free worker"),
+ SomeWorker ! go,
+ DidFinish = receive
+ done_waiting ->
+ true
+ after 250 ->
+ false
+ end,
+ ?assert(DidFinish, "appearance of a free worker should unblock the dispatcher").
diff --git a/deps/rabbitmq_amqp1_0/.gitignore b/deps/rabbitmq_amqp1_0/.gitignore
new file mode 100644
index 0000000000..faa711d4f6
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/.gitignore
@@ -0,0 +1,35 @@
+.sw?
+.*.sw?
+*.beam
+*.plt
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+rabbitmq_amqp1_0.d
+
+[Dd]ebug/
+[Rr]elease/
+x64/
+build/
+[Bb]in/
+[Oo]bj/
+*.lock.json
+
+[Dd]ebug/
+[Rr]elease/
+x64/
+build/
+[Bb]in/
+[Oo]bj/
+*.lock.json
diff --git a/deps/rabbitmq_amqp1_0/.travis.yml b/deps/rabbitmq_amqp1_0/.travis.yml
new file mode 100644
index 0000000000..c189375aeb
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/.travis.yml
@@ -0,0 +1,66 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ sources:
+ - sourceline: deb [arch=amd64] https://packages.microsoft.com/ubuntu/16.04/prod xenial main
+ key_url: https://packages.microsoft.com/keys/microsoft.asc
+ packages:
+ - awscli
+ - dotnet-sdk-2.0.0
+ - maven
+cache:
+ apt: true
+env:
+ global:
+ - secure: hsWxOPcSAXQU9CKQSh829aPLt7oFim8lYVPMSfR0GH0JsnxWgayk2w3tyAblOJqpXTKPqYb+TfyeTOc+QQM8pcEtvi5jtr+GAqovI3UkXpR08J5ImVZVf3sfD4s92uoRjw0u8ift/qZxpDLpdDCKJibe3hRMxlvo5LQ5kCkNGFs=
+ - secure: D2xypqCxIxyhX2/PtXhiWa8kZF9itwhvDK3mxEI+67AM8zJBtrIlIcHifnFMM1RDjFzuBWq5t0BAbyuAGUw0EcjOnc3xW01N/ZG4DaHgXjurDSY0rCfkYLHltFjxJTbx2bH6nmgjdKYRb4W9hQJVcB7vtd1g7kVlzYkNevz4Sfk=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_amqp1_0/.travis.yml.patch b/deps/rabbitmq_amqp1_0/.travis.yml.patch
new file mode 100644
index 0000000000..299b3f1605
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/.travis.yml.patch
@@ -0,0 +1,16 @@
+--- ../rabbit_common/.travis.yml 2019-09-13 13:48:46.258483000 +0200
++++ .travis.yml 2020-03-03 13:02:21.836228000 +0100
+@@ -11,8 +11,13 @@
+ on_failure: always
+ addons:
+ apt:
++ sources:
++ - sourceline: deb [arch=amd64] https://packages.microsoft.com/ubuntu/16.04/prod xenial main
++ key_url: https://packages.microsoft.com/keys/microsoft.asc
+ packages:
+ - awscli
++ - dotnet-sdk-2.0.0
++ - maven
+ cache:
+ apt: true
+ env:
diff --git a/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md b/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..1f6ef1c576
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_amqp1_0/CONTRIBUTING.md b/deps/rabbitmq_amqp1_0/CONTRIBUTING.md
new file mode 100644
index 0000000000..45bbcbe62e
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_amqp1_0/LICENSE b/deps/rabbitmq_amqp1_0/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_amqp1_0/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_amqp1_0/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_amqp1_0/Makefile b/deps/rabbitmq_amqp1_0/Makefile
new file mode 100644
index 0000000000..5d9eddbb24
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/Makefile
@@ -0,0 +1,52 @@
+PROJECT = rabbitmq_amqp1_0
+PROJECT_DESCRIPTION = AMQP 1.0 support for RabbitMQ
+
+define PROJECT_ENV
+[
+ {default_user, "guest"},
+ {default_vhost, <<"/">>},
+ {protocol_strict_mode, false}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+BUILD_DEPS = rabbitmq_codegen
+DEPS = rabbit_common rabbit amqp_client amqp10_common
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp10_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+.DEFAULT_GOAL = all
+$(PROJECT).d:: $(EXTRA_SOURCES)
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# --------------------------------------------------------------------
+# Framing sources generation.
+# --------------------------------------------------------------------
+
+clean:: clean-extra-sources
+
+clean-extra-sources:
+ $(gen_verbose) rm -f $(EXTRA_SOURCES)
+
+distclean:: distclean-dotnet-tests distclean-java-tests
+
+distclean-dotnet-tests:
+ $(gen_verbose) cd test/system_SUITE_data/dotnet-tests && \
+ rm -rf bin obj && \
+ rm -f project.lock.json TestResult.xml
+
+distclean-java-tests:
+ $(gen_verbose) cd test/system_SUITE_data/java-tests && mvn clean
diff --git a/deps/rabbitmq_amqp1_0/README.md b/deps/rabbitmq_amqp1_0/README.md
new file mode 100644
index 0000000000..10dd5a7df0
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/README.md
@@ -0,0 +1,224 @@
+# AMQP 1.0 support for RabbitMQ
+
+This plugin adds AMQP 1.0 support to RabbitMQ.
+
+Despite the name,
+AMQP 0-9-1 and 1.0 are very much different protocols and thus
+1.0 is treated as a separate protocol supported by RabbitMQ,
+not a revision of the original protocol that will eventually supersede it.
+
+This plugin is several years old and is moderately mature. It may have certain
+limitations with its current architecture but most major AMQP 1.0 features should be in place.
+
+This plugin supports 0-9-1 and 1.0 client interoperability with certain limitations.
+
+# Configuration
+
+This plugin ships with modern versions of RabbitMQ.
+
+It will listen on the standard AMQP port, 5672. To reconfigure this,
+do so [as you would for 0-9-1](http://www.rabbitmq.com/configure.html). Clients connecting with 0-9-1
+will continue to work on the same port.
+
+The following two configuration options (which are specific to the AMQP 1.0 adapter)
+are accepted in the `rabbitmq_amqp1_0` section of the configuration file.
+
+AMQP 1.0 conceptually allows connections that are not authenticated
+with SASL (i.e. where no username and password is supplied). By
+default these will connect as the "guest" user. To change this, set
+`default_user` to a string with the name of the user to use, or the
+atom `none` to prevent unauthenticated connections.
+
+ {default_user, "guest"}
+
+The default virtual host can be specified using the `default_vhost` setting.
+See the "Virtual Hosts" section below for a description.
+
+ {default_vhost, <<"/">>}
+
+The `protocol_strict_mode` setting controls how strictly peers must conform
+to the specification. The default is not to enforce strictness, which allows
+non-fatal byte-counts in frames and inaccuracies in flow-control from peers.
+
+ {protocol_strict_mode, false}
+
+
+Configuration example using [sysctl config format](https://next.rabbitmq.com/configure.html#config-file-formats)
+(currently only available in RabbitMQ master):
+
+ amqp1_0.default_user = guest
+ amqp1_0.default_vhost = /
+ amqp1_0.protocol_strict_mode = false
+
+
+## Clients we have tested
+
+The current field of AMQP 1.0 clients is somewhat limited. Therefore
+we have not achieved as much interoperability as we might like.
+
+We have tested against:
+
+ * SwiftMQ Java client [1]
+ We have done most of our testing against this client and things seem
+ to work.
+
+ * QPid / Proton C client [2]
+ We have successfully tested against the "proton" command line tool
+ this client ships with.
+
+ * QPid / Proton Java client [2]
+ We have not been able to get this client to get as far as opening a
+ network connection (tested against 0.2 and 0.4).
+
+ * Windows Azure Service Bus [3]
+ It seems that the URI scheme used by this client assumes that it is
+ connecting to Azure; it does not seem to be possible to get it to
+ connect to another server.
+
+[1] http://www.swiftmq.com/products/router/swiftlets/sys_amqp/client/index.html
+
+[2] http://qpid.apache.org/proton/
+
+[3] http://www.windowsazure.com/en-us/develop/net/how-to-guides/service-bus-amqp/
+
+As new clients appear we will of course work on interoperability with them.
+
+# Interoperability with AMQP 0-9-1
+
+## Message payloads
+
+This implementation as a plugin aims for useful interoperability with
+AMQP 0-9-1 clients. AMQP 1.0 messages can be far more structured than
+AMQP 0-9-1 messages, which simply have a payload of bytes.
+
+The way we deal with this is that an AMQP 1.0 message with a single
+data section will be transcoded to an AMQP 0-9-1 message with just the
+bytes from that section, and vice versa. An AMQP 1.0 with any other
+payload will keep exactly that payload (i.e., encoded AMQP 1.0
+sections, concatenated), and for AMQP 0-9-1 clients the `type` field
+of the `basic.properties` will contain the value `"amqp-1.0"`.
+
+Thus, AMQP 0-9-1 clients may receive messages that they cannot
+understand (if they don't have an AMQP 1.0 codec handy, anyway);
+however, these will at least be labelled. AMQP 1.0 clients shall
+receive exactly what they expect.
+
+## Message properties, annotations, headers, etc.
+
+The headers and properties map as follows:
+
+ AMQP 1.0 AMQP 0-9-1
+ Header Properties
+ durable <---------------> delivery-mode [1]
+ priority <---------------> priority
+ ttl <---------------> expiration [2]
+ first-acquirer [3]
+ delivery-count [4]
+ Properties
+ message-id <---------------> message-id [5]
+ user-id <---------------> user-id
+ to [6]
+ subject [6]
+ reply-to <---------------> reply-to [6]
+ correlation-id <---------------> correlation-id
+ content-type <---------------> content-type
+ content-encoding <---------------> content-encoding
+ absolute-expiry-time [7]
+ creation-time <---------------> timestamp
+ Application headers <-------/-------> headers [8]
+
+[1] `durable` is `true` if and only if `delivery-mode` is `2`.
+
+[2] `expiration` is a shortstr; since RabbitMQ will expect this to be
+an encoded string, we translate a `ttl` to the string representation
+of its integer value.
+
+[3] `first-acquirer` is true if and only if the `basic.deliver` field
+`redelivered` is false.
+
+[4] `delivery-count` is left null.
+
+[5] AMQP 0-9-1 expects this to be a shortstr.
+
+[6] See Routing and Addressing below.
+
+[7] `absolute-expiry-time` has no corresponding field in AMQP 0-9-1,
+and is not supported in RabbitMQ in any case.
+
+[8] The application headers section and the `basic.properties` field
+`headers` are natural analogues. However, rather than try to transcode
+an AMQP 1.0 map to an AMQP 0-9-1 field-table, currently we discard
+application headers (of AMQP 1.0 messages) and headers (of AMQP 0-9-1
+messages sent through to AMQP 1.0). In other words, the (AMQP 1.0)
+application headers section is only available to AMQP 1.0 clients, and
+the (AMQP 0-9-1) headers field is only available to AMQP 0-9-1
+clients.
+
+Note that properties (in both AMQP 1.0 and AMQP 0-9-1) and application
+properties (in AMQP 1.0) are immutable; however, this can only apply
+when the sending and receiving clients are using the same protocol.
+
+## Routing and Addressing
+
+In AMQP 1.0 source and destination addresses are opaque values, and
+each message may have a `subject` field value.
+
+For targets, addresses are:
+
+ = "/exchange/" X "/" RK Publish to exchange X with routing key RK
+ | "/exchange/" X Publish to exchange X with message subject as routing key
+ | "/topic/" RK Publish to amq.topic with routing key RK
+ | "/amq/queue/" Q Publish to default exchange with routing key Q
+ | "/queue/" Q Publish to default exchange with routing key Q
+ | Q (no leading slash) Publish to default exchange with routing key Q
+ | "/queue" Publish to default exchange with message subj as routing key
+
+For sources, addresses are:
+
+ = "/exchange/" X "/" RK Consume from temp queue bound to X with routing key RK
+ | "/topic/" RK Consume from temp queue bound to amq.topic with routing key RK
+ | "/amq/queue/" Q Consume from Q
+ | "/queue/" Q Consume from Q
+ | Q (no leading slash) Consume from Q
+
+The intent is that the source and destination address formats should be
+mostly the same as those supported by the STOMP plugin, to the extent
+permitted by AMQP 1.0 semantics.
+
+## Virtual Hosts
+
+AMQP 1.0 has no equivalent of AMQP 0-9-1 virtual hosts. A virtual host
+on the broker may be addressed when opening an AMQP 1.0 connection by setting
+the `hostname` field, prefixing with "vhost:". Setting the `hostname` field
+to "vhost:/" addresses the default virtual host. If the `hostname` field
+does not start with "vhost:" then the `default_vhost` configuration
+setting will be consulted.
+
+# Limitations and unsupported features
+
+At the minute, the RabbitMQ AMQP 1.0 adapter does not support:
+
+ - "Exactly once" delivery [9]
+ - Link recovery [9]
+ - Full message fragmentation [10]
+ - Resuming messages
+ - "Modified" outcome
+ - Filters [11]
+ - Transactions
+ - Source/target expiry-policy other than link-detach and timeout
+ other than 0
+ - Max message size for links
+ - Aborted transfers
+ - TLS negotiation via the AMQP2100 handshake (although SSL is supported)
+
+[9] We do not deduplicate as a target, though we may resend as a
+source (messages that have no settled outcome when an outgoing link is
+detached will be requeued).
+
+[10] We do fragment messages over multiple frames; however, if this
+would overflow the session window we may discard or requeue messages.
+
+[11] In principle, filters for consuming from an exchange could
+translate to AMQP 0-9-1 bindings. This is not implemented, so
+effectively only consuming from fanout exchanges and queues is useful
+currently.
diff --git a/deps/rabbitmq_amqp1_0/erlang.mk b/deps/rabbitmq_amqp1_0/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl b/deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl
new file mode 100644
index 0000000000..ef3e8ea722
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/include/rabbit_amqp1_0.hrl
@@ -0,0 +1,52 @@
+%%-define(debug, true).
+
+-ifdef(debug).
+-define(DEBUG0(F), ?SAFE(io:format(F, []))).
+-define(DEBUG(F, A), ?SAFE(io:format(F, A))).
+-else.
+-define(DEBUG0(F), ok).
+-define(DEBUG(F, A), ok).
+-endif.
+
+-define(pprint(F), io:format("~p~n", [amqp10_framing:pprint(F)])).
+
+-define(SAFE(F),
+ ((fun() ->
+ try F
+ catch __T:__E ->
+ io:format("~p:~p thrown debugging~n~p~n",
+ [__T, __E, erlang:get_stacktrace()])
+ end
+ end)())).
+
+%% General consts
+
+-define(FRAME_1_0_MIN_SIZE, 512).
+
+-define(SEND_ROLE, false).
+-define(RECV_ROLE, true).
+
+%% Encoding
+
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+-define(INFO_ITEMS, [pid,
+ auth_mechanism,
+ host,
+ frame_max,
+ timeout,
+ user,
+ state,
+ recv_oct,
+ recv_cnt,
+ send_oct,
+ send_cnt,
+ ssl,
+ ssl_protocol,
+ ssl_key_exchange,
+ ssl_cipher,
+ ssl_hash,
+ peer_cert_issuer,
+ peer_cert_subject,
+ peer_cert_validity,
+ node]).
diff --git a/deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema b/deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema
new file mode 100644
index 0000000000..e6cfb68262
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/priv/schema/rabbitmq_amqp1_0.schema
@@ -0,0 +1,31 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ AMQP 1.0 Support
+%%
+%% See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md
+%% for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_amqp1_0,[
+%% Connections that are not authenticated with SASL will connect as this
+%% account. See the README for more information.
+%%
+%% Please note that setting this will allow clients to connect without
+%% authenticating!
+%%
+%% {default_user, "guest"},
+{mapping, "amqp1_0.default_user", "rabbitmq_amqp1_0.default_user",
+ [{datatype, [{enum, [none]}, string]}]}.
+%% Enable protocol strict mode. See the README for more information.
+%%
+%% {protocol_strict_mode, false}
+% ]},
+{mapping, "amqp1_0.protocol_strict_mode", "rabbitmq_amqp1_0.protocol_strict_mode",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "amqp1_0.default_vhost", "rabbitmq_amqp1_0.default_vhost",
+ [{datatype, string}]}.
+
+{translation , "rabbitmq_amqp1_0.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("amqp1_0.default_vhost", Conf))
+end}. \ No newline at end of file
diff --git a/deps/rabbitmq_amqp1_0/rabbitmq-components.mk b/deps/rabbitmq_amqp1_0/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl b/deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl
new file mode 100644
index 0000000000..77c9c46d5f
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand.erl
@@ -0,0 +1,79 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand').
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+-include("rabbit_amqp1_0.hrl").
+
+-export([formatter/0,
+ scopes/0,
+ switches/0,
+ aliases/0,
+ usage/0,
+ usage_additional/0,
+ banner/2,
+ validate/2,
+ merge_defaults/2,
+ run/2,
+ output/2,
+ help_section/0,
+ description/0]).
+
+formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Table'.
+scopes() -> [ctl, diagnostics].
+switches() -> [{verbose, boolean}].
+aliases() -> [{'V', verbose}].
+
+validate(Args, _) ->
+ case 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':validate_info_keys(Args,
+ ?INFO_ITEMS) of
+ {ok, _} -> ok;
+ Error -> Error
+ end.
+
+merge_defaults([], Opts) ->
+ merge_defaults([<<"pid">>], Opts);
+merge_defaults(Args, Opts) ->
+ {Args, maps:merge(#{verbose => false}, Opts)}.
+
+usage() ->
+ <<"list_amqp10_connections [<column> ...]">>.
+
+usage_additional() ->
+ Prefix = <<" must be one of ">>,
+ InfoItems = 'Elixir.Enum':join(lists:usort(?INFO_ITEMS), <<", ">>),
+ [
+ {<<"<column>">>, <<Prefix/binary, InfoItems/binary>>}
+ ].
+
+description() -> <<"Lists AMQP 1.0 connections on the target node">>.
+
+help_section() ->
+ {plugin, 'amqp1.0'}.
+
+run(Args, #{node := NodeName,
+ timeout := Timeout,
+ verbose := Verbose}) ->
+ InfoKeys = case Verbose of
+ true -> ?INFO_ITEMS;
+ false -> 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':prepare_info_keys(Args)
+ end,
+ Nodes = 'Elixir.RabbitMQ.CLI.Core.Helpers':nodes_in_cluster(NodeName),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.RpcStream':receive_list_items(
+ NodeName,
+ rabbit_amqp1_0,
+ emit_connection_info_all,
+ [Nodes, InfoKeys],
+ Timeout,
+ InfoKeys,
+ length(Nodes)).
+
+banner(_, _) -> <<"Listing AMQP 1.0 connections ...">>.
+
+output(Result, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result).
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl
new file mode 100644
index 0000000000..c130e1a33f
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0.erl
@@ -0,0 +1,40 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_amqp1_0).
+
+-export([connection_info_local/1,
+ emit_connection_info_local/3,
+ emit_connection_info_all/4,
+ list/0]).
+
+emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [spawn_link(Node, rabbit_amqp1_0, emit_connection_info_local,
+ [Items, Ref, AggregatorPid])
+ || Node <- Nodes],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_connection_info_local(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref,
+ fun(Pid) ->
+ rabbit_amqp1_0_reader:info(Pid, Items)
+ end,
+ list()).
+
+connection_info_local(Items) ->
+ Connections = list(),
+ [rabbit_amqp1_0_reader:info(Pid, Items) || Pid <- Connections].
+
+list() ->
+ [ReaderPid
+ || {_, TcpPid, _, [tcp_listener_sup]} <- supervisor:which_children(rabbit_sup),
+ {_, RanchLPid, _, [ranch_listener_sup]} <- supervisor:which_children(TcpPid),
+ {_, RanchCPid, _, [ranch_conns_sup]} <- supervisor:which_children(RanchLPid),
+ {rabbit_connection_sup, ConnPid, _, _} <- supervisor:which_children(RanchCPid),
+ {reader, ReaderPid, _, _} <- supervisor:which_children(ConnPid)
+ ].
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl
new file mode 100644
index 0000000000..de896b44ab
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_channel.erl
@@ -0,0 +1,61 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_channel).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([call/2, call/3, cast/2, cast/3, cast_flow/3, subscribe/3]).
+-export([convert_code/1, convert_error/1]).
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+
+call(Ch, Method) ->
+ convert_error(fun () -> amqp_channel:call(Ch, Method) end).
+
+call(Ch, Method, Content) ->
+ convert_error(fun () -> amqp_channel:call(Ch, Method, Content) end).
+
+cast(Ch, Method) ->
+ convert_error(fun () -> amqp_channel:cast(Ch, Method) end).
+
+cast(Ch, Method, Content) ->
+ convert_error(fun () -> amqp_channel:cast(Ch, Method, Content) end).
+
+cast_flow(Ch, Method, Content) ->
+ convert_error(fun () -> amqp_channel:cast_flow(Ch, Method, Content) end).
+
+subscribe(Ch, Method, Subscriber) ->
+ convert_error(fun () -> amqp_channel:subscribe(Ch, Method, Subscriber) end).
+
+convert_error(Fun) ->
+ try
+ Fun()
+ catch exit:{{shutdown, {server_initiated_close, Code, Msg}}, _} ->
+ protocol_error(convert_code(Code), Msg, [])
+ end.
+
+%% TODO this was completely off the top of my head. Check these make sense.
+convert_code(?CONTENT_TOO_LARGE) -> ?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL;
+convert_code(?NO_ROUTE) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED;
+convert_code(?NO_CONSUMERS) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED;
+convert_code(?ACCESS_REFUSED) -> ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS;
+convert_code(?NOT_FOUND) -> ?V_1_0_AMQP_ERROR_NOT_FOUND;
+convert_code(?RESOURCE_LOCKED) -> ?V_1_0_AMQP_ERROR_RESOURCE_LOCKED;
+convert_code(?PRECONDITION_FAILED) -> ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED;
+convert_code(?CONNECTION_FORCED) -> ?V_1_0_CONNECTION_ERROR_CONNECTION_FORCED;
+convert_code(?INVALID_PATH) -> ?V_1_0_AMQP_ERROR_INVALID_FIELD;
+convert_code(?FRAME_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?SYNTAX_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?COMMAND_INVALID) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?CHANNEL_ERROR) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?UNEXPECTED_FRAME) -> ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR;
+convert_code(?RESOURCE_ERROR) -> ?V_1_0_AMQP_ERROR_RESOURCE_LIMIT_EXCEEDED;
+convert_code(?NOT_ALLOWED) -> ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS;
+convert_code(?NOT_IMPLEMENTED) -> ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED;
+convert_code(?INTERNAL_ERROR) -> ?V_1_0_AMQP_ERROR_INTERNAL_ERROR.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl
new file mode 100644
index 0000000000..6ae8615589
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_incoming_link.erl
@@ -0,0 +1,228 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_incoming_link).
+
+-export([attach/3, transfer/4]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+
+%% Just make these constant for the time being.
+-define(INCOMING_CREDIT, 65536).
+
+-record(incoming_link, {name, exchange, routing_key,
+ delivery_id = undefined,
+ delivery_count = 0,
+ send_settle_mode = undefined,
+ recv_settle_mode = undefined,
+ credit_used = ?INCOMING_CREDIT div 2,
+ msg_acc = [],
+ route_state}).
+
+attach(#'v1_0.attach'{name = Name,
+ handle = Handle,
+ source = Source,
+ snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = RcvSettleMode,
+ target = Target,
+ initial_delivery_count = {uint, InitTransfer}},
+ BCh, DCh) ->
+ %% TODO associate link name with target
+ case ensure_target(Target,
+ #incoming_link{
+ name = Name,
+ route_state = rabbit_routing_util:init_state(),
+ delivery_count = InitTransfer },
+ DCh) of
+ {ok, ServerTarget, IncomingLink} ->
+ {_, _Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source),
+ %% Default is mixed
+ Confirm =
+ case SndSettleMode of
+ ?V_1_0_SENDER_SETTLE_MODE_SETTLED ->
+ false;
+ _ when SndSettleMode == undefined;
+ SndSettleMode == ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED;
+ SndSettleMode == ?V_1_0_SENDER_SETTLE_MODE_MIXED ->
+ amqp_channel:register_confirm_handler(BCh, self()),
+ rabbit_amqp1_0_channel:call(BCh, #'confirm.select'{}),
+ true
+ end,
+ Flow = #'v1_0.flow'{ handle = Handle,
+ link_credit = {uint, ?INCOMING_CREDIT},
+ drain = false,
+ echo = false },
+ Attach = #'v1_0.attach'{
+ name = Name,
+ handle = Handle,
+ source = Source,
+ snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = RcvSettleMode,
+ target = ServerTarget,
+ initial_delivery_count = undefined, % must be, I am the receiver
+ role = ?RECV_ROLE}, %% server is receiver
+ IncomingLink1 =
+ IncomingLink#incoming_link{recv_settle_mode = RcvSettleMode},
+ {ok, [Attach, Flow], IncomingLink1, Confirm};
+ {error, Reason} ->
+ %% TODO proper link establishment protocol here?
+ protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
+ "Attach rejected: ~p", [Reason])
+ end.
+
+set_delivery_id({uint, D},
+ #incoming_link{delivery_id = undefined} = Link) ->
+ Link#incoming_link{delivery_id = D};
+set_delivery_id(DeliveryId,
+ #incoming_link{delivery_id = D} = Link)
+ when DeliveryId == {uint, D} orelse DeliveryId == undefined ->
+ Link.
+
+effective_send_settle_mode(undefined, undefined) ->
+ false;
+effective_send_settle_mode(undefined, SettleMode)
+ when is_boolean(SettleMode) ->
+ SettleMode;
+effective_send_settle_mode(SettleMode, undefined)
+ when is_boolean(SettleMode) ->
+ SettleMode;
+effective_send_settle_mode(SettleMode, SettleMode)
+ when is_boolean(SettleMode) ->
+ SettleMode.
+
+effective_recv_settle_mode(undefined, undefined) ->
+ ?V_1_0_RECEIVER_SETTLE_MODE_FIRST;
+effective_recv_settle_mode(undefined, Mode) ->
+ Mode;
+effective_recv_settle_mode(Mode, _) ->
+ Mode.
+
+% TODO: validate effective settle modes against
+% those declared during attach
+
+% TODO: handle aborted transfers
+
+transfer(#'v1_0.transfer'{delivery_id = DeliveryId,
+ more = true,
+ settled = Settled}, MsgPart,
+ #incoming_link{msg_acc = MsgAcc,
+ send_settle_mode = SSM} = Link, _BCh) ->
+ {ok, set_delivery_id(
+ DeliveryId,
+ Link#incoming_link{msg_acc = [MsgPart | MsgAcc],
+ send_settle_mode =
+ effective_send_settle_mode(Settled, SSM)})};
+transfer(#'v1_0.transfer'{delivery_id = DeliveryId0,
+ settled = Settled,
+ rcv_settle_mode = RcvSettleMode,
+ handle = Handle},
+ MsgPart,
+ #incoming_link{exchange = X,
+ routing_key = LinkRKey,
+ delivery_count = Count,
+ credit_used = CreditUsed,
+ msg_acc = MsgAcc,
+ send_settle_mode = SSM,
+ recv_settle_mode = RSM} = Link, BCh) ->
+ MsgBin = iolist_to_binary(lists:reverse([MsgPart | MsgAcc])),
+ ?DEBUG("Inbound content:~n ~p~n",
+ [[amqp10_framing:pprint(Section) ||
+ Section <- amqp10_framing:decode_bin(MsgBin)]]),
+ {MsgRKey, Msg} = rabbit_amqp1_0_message:assemble(MsgBin),
+ RKey = case LinkRKey of
+ undefined -> MsgRKey;
+ _ -> LinkRKey
+ end,
+ rabbit_amqp1_0_channel:cast_flow(
+ BCh, #'basic.publish'{exchange = X,
+ routing_key = RKey}, Msg),
+ {SendFlow, CreditUsed1} = case CreditUsed - 1 of
+ C when C =< 0 ->
+ {true, ?INCOMING_CREDIT div 2};
+ D ->
+ {false, D}
+ end,
+ #incoming_link{delivery_id = DeliveryId} =
+ set_delivery_id(DeliveryId0, Link),
+ NewLink = Link#incoming_link{
+ delivery_id = undefined,
+ send_settle_mode = undefined,
+ delivery_count = rabbit_amqp1_0_util:serial_add(Count, 1),
+ credit_used = CreditUsed1,
+ msg_acc = []},
+ Reply = case SendFlow of
+ true -> ?DEBUG("sending flow for incoming ~p", [NewLink]),
+ [incoming_flow(NewLink, Handle)];
+ false -> []
+ end,
+ EffectiveSendSettleMode = effective_send_settle_mode(Settled, SSM),
+ EffectiveRecvSettleMode = effective_recv_settle_mode(RcvSettleMode, RSM),
+ case not EffectiveSendSettleMode andalso
+ EffectiveRecvSettleMode =:= ?V_1_0_RECEIVER_SETTLE_MODE_SECOND of
+ false -> ok;
+ true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "rcv-settle-mode second not supported", [])
+ end,
+ {message, Reply, NewLink, DeliveryId,
+ EffectiveSendSettleMode}.
+
+%% TODO default-outcome and outcomes, dynamic lifetimes
+
+ensure_target(Target = #'v1_0.target'{address = Address,
+ dynamic = Dynamic,
+ durable = Durable,
+ %% TODO
+ expiry_policy = _ExpiryPolicy,
+ %% TODO
+ timeout = _Timeout},
+ Link = #incoming_link{ route_state = RouteState }, DCh) ->
+ DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)},
+ {exclusive, false},
+ {auto_delete, false},
+ {check_exchange, true},
+ {nowait, false}],
+ case Dynamic of
+ true ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Dynamic targets not supported", []);
+ _ ->
+ ok
+ end,
+ case Address of
+ {utf8, Destination} ->
+ case rabbit_routing_util:parse_endpoint(Destination, true) of
+ {ok, Dest} ->
+ {ok, _Queue, RouteState1} =
+ rabbit_amqp1_0_channel:convert_error(
+ fun () ->
+ rabbit_routing_util:ensure_endpoint(
+ dest, DCh, Dest, DeclareParams,
+ RouteState)
+ end),
+ {XName, RK} = rabbit_routing_util:parse_routing(Dest),
+ {ok, Target, Link#incoming_link{
+ route_state = RouteState1,
+ exchange = list_to_binary(XName),
+ routing_key = case RK of
+ undefined -> undefined;
+ [] -> undefined;
+ _ -> list_to_binary(RK)
+ end}};
+ {error, _} = E ->
+ E
+ end;
+ _Else ->
+ {error, {address_not_utf8_string, Address}}
+ end.
+
+incoming_flow(#incoming_link{ delivery_count = Count }, Handle) ->
+ #'v1_0.flow'{handle = Handle,
+ delivery_count = {uint, Count},
+ link_credit = {uint, ?INCOMING_CREDIT}}.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl
new file mode 100644
index 0000000000..2ddfec4dbf
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl
@@ -0,0 +1,67 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_link_util).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([outcomes/1, ctag_to_handle/1, handle_to_ctag/1, durable/1]).
+
+-define(EXCHANGE_SUB_LIFETIME, "delete-on-close").
+-define(DEFAULT_OUTCOME, #'v1_0.released'{}).
+-define(SUPPORTED_OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED,
+ ?V_1_0_SYMBOL_REJECTED,
+ ?V_1_0_SYMBOL_RELEASED]).
+
+-define(OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED,
+ ?V_1_0_SYMBOL_REJECTED,
+ ?V_1_0_SYMBOL_RELEASED,
+ ?V_1_0_SYMBOL_MODIFIED]).
+
+outcomes(Source) ->
+ {DefaultOutcome, Outcomes} =
+ case Source of
+ #'v1_0.source' {
+ default_outcome = DO,
+ outcomes = Os
+ } ->
+ DO1 = case DO of
+ undefined -> ?DEFAULT_OUTCOME;
+ _ -> DO
+ end,
+ Os1 = case Os of
+ undefined -> ?SUPPORTED_OUTCOMES;
+ {array, symbol, Syms} -> Syms;
+ Bad1 -> rabbit_amqp1_0_util:protocol_error(
+ ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Outcomes not supported: ~p",
+ [Bad1])
+ end,
+ {DO1, Os1};
+ _ ->
+ {?DEFAULT_OUTCOME, ?SUPPORTED_OUTCOMES}
+ end,
+ case [O || O <- Outcomes, not lists:member(O, ?OUTCOMES)] of
+ [] -> {DefaultOutcome, {array, symbol, Outcomes}};
+ Bad -> rabbit_amqp1_0_util:protocol_error(
+ ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Outcomes not supported: ~p", [Bad])
+ end.
+
+handle_to_ctag({uint, H}) ->
+ <<"ctag-", H:32/integer>>.
+
+ctag_to_handle(<<"ctag-", H:32/integer>>) ->
+ {uint, H}.
+
+durable(undefined) -> false; %% default: none
+durable(?V_1_0_TERMINUS_DURABILITY_NONE) -> false;
+%% This one means "existence of the thing is durable, but unacked msgs
+%% aren't". We choose to upgrade that.
+durable(?V_1_0_TERMINUS_DURABILITY_CONFIGURATION) -> true;
+durable(?V_1_0_TERMINUS_DURABILITY_UNSETTLED_STATE) -> true.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl
new file mode 100644
index 0000000000..d3425bfa17
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_message.erl
@@ -0,0 +1,261 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_message).
+
+-export([assemble/1, annotated_message/3]).
+
+-define(PROPERTIES_HEADER, <<"x-amqp-1.0-properties">>).
+-define(APP_PROPERTIES_HEADER, <<"x-amqp-1.0-app-properties">>).
+-define(MESSAGE_ANNOTATIONS_HEADER, <<"x-amqp-1.0-message-annotations">>).
+-define(FOOTER, <<"x-amqp-1.0-footer">>).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+assemble(MsgBin) ->
+ {RKey, Props, Content} = assemble(header, {<<"">>, #'P_basic'{}, []},
+ decode_section(MsgBin), MsgBin),
+ {RKey, #amqp_msg{props = Props, payload = Content}}.
+
+assemble(header, {R, P, C}, {H = #'v1_0.header'{}, Rest}, _Uneaten) ->
+ assemble(message_annotations, {R, translate_header(H, P), C},
+ decode_section(Rest), Rest);
+assemble(header, {R, P, C}, Else, Uneaten) ->
+ assemble(message_annotations, {R, P, C}, Else, Uneaten);
+
+assemble(delivery_annotations, RPC, {#'v1_0.delivery_annotations'{}, Rest},
+ Uneaten) ->
+ %% ignore delivery annotations for now
+ %% TODO: handle "rejected" error
+ assemble(message_annotations, RPC, Rest, Uneaten);
+assemble(delivery_annotations, RPC, Else, Uneaten) ->
+ assemble(message_annotations, RPC, Else, Uneaten);
+
+assemble(message_annotations, {R, P = #'P_basic'{headers = Headers}, C},
+ {#'v1_0.message_annotations'{}, Rest}, Uneaten) ->
+ MsgAnnoBin = chunk(Rest, Uneaten),
+ assemble(properties, {R, P#'P_basic'{
+ headers = set_header(?MESSAGE_ANNOTATIONS_HEADER,
+ MsgAnnoBin, Headers)}, C},
+ decode_section(Rest), Rest);
+assemble(message_annotations, {R, P, C}, Else, Uneaten) ->
+ assemble(properties, {R, P, C}, Else, Uneaten);
+
+assemble(properties, {_R, P, C}, {X = #'v1_0.properties'{}, Rest}, Uneaten) ->
+ PropsBin = chunk(Rest, Uneaten),
+ assemble(app_properties, {routing_key(X),
+ translate_properties(X, PropsBin, P), C},
+ decode_section(Rest), Rest);
+assemble(properties, {R, P, C}, Else, Uneaten) ->
+ assemble(app_properties, {R, P, C}, Else, Uneaten);
+
+assemble(app_properties, {R, P = #'P_basic'{headers = Headers}, C},
+ {#'v1_0.application_properties'{}, Rest}, Uneaten) ->
+ AppPropsBin = chunk(Rest, Uneaten),
+ assemble(body, {R, P#'P_basic'{
+ headers = set_header(?APP_PROPERTIES_HEADER,
+ AppPropsBin, Headers)}, C},
+ decode_section(Rest), Rest);
+assemble(app_properties, {R, P, C}, Else, Uneaten) ->
+ assemble(body, {R, P, C}, Else, Uneaten);
+
+%% The only 'interoperable' content is a single amqp-data section.
+%% Everything else we will leave as-is. We still have to parse the
+%% sections one-by-one, however, to see when we hit the footer or
+%% whatever comes next.
+
+%% NB we do not strictly enforce the (slightly random) rules
+%% pertaining to body sections, that is:
+%% - one amqp-value; OR
+%% - one or more amqp-sequence; OR
+%% - one or more amqp-data.
+%% We allow any number of each kind, in any permutation.
+
+assemble(body, {R, P, _}, {#'v1_0.data'{content = Content}, Rest}, Uneaten) ->
+ Chunk = chunk(Rest, Uneaten),
+ assemble(amqp10body, {R, set_1_0_type(<<"binary">>, P),
+ {data, Content, Chunk}},
+ decode_section(Rest), Rest);
+assemble(body, {R, P, C}, Else, Uneaten) ->
+ assemble(amqp10body, {R, P, C}, Else, Uneaten);
+
+assemble(amqp10body, {R, P, C}, {{Type, _}, Rest}, Uneaten)
+ when Type =:= 'v1_0.data' orelse
+ Type =:= 'v1_0.amqp_sequence' orelse
+ Type =:= 'v1_0.amqp_value' ->
+ Encoded = chunk(Rest, Uneaten),
+ assemble(amqp10body,
+ {R, set_1_0_type(<<"amqp-1.0">>, P), add_body_section(Encoded, C)},
+ decode_section(Rest), Rest);
+assemble(amqp10body, {R, P, C}, Else, Uneaten) ->
+ assemble(footer, {R, P, compile_body(C)}, Else, Uneaten);
+
+assemble(footer, {R, P = #'P_basic'{headers = Headers}, C},
+ {#'v1_0.footer'{}, <<>>}, Uneaten) ->
+ {R, P#'P_basic'{headers = set_header(?FOOTER, Uneaten, Headers)}, C};
+assemble(footer, {R, P, C}, none, _) ->
+ {R, P, C};
+assemble(footer, _, Else, _) ->
+ exit({unexpected_trailing_sections, Else});
+
+assemble(Expected, _, Actual, _) ->
+ exit({expected_section, Expected, Actual}).
+
+decode_section(<<>>) ->
+ none;
+decode_section(MsgBin) ->
+ {AmqpValue, Rest} = amqp10_binary_parser:parse(MsgBin),
+ {amqp10_framing:decode(AmqpValue), Rest}.
+
+chunk(Rest, Uneaten) ->
+ ChunkLen = size(Uneaten) - size(Rest),
+ <<Chunk:ChunkLen/binary, _ActuallyRest/binary>> = Uneaten,
+ Chunk.
+
+add_body_section(C, {data, _, Bin}) ->
+ [C, Bin];
+add_body_section(C, Cs) ->
+ [C | Cs].
+
+compile_body({data, Content, _}) ->
+ Content;
+compile_body(Sections) ->
+ lists:reverse(Sections).
+
+translate_header(Header10, Props) ->
+ Props#'P_basic'{
+ delivery_mode = case Header10#'v1_0.header'.durable of
+ true -> 2;
+ _ -> 1
+ end,
+ priority = unwrap(Header10#'v1_0.header'.priority),
+ expiration = to_expiration(Header10#'v1_0.header'.ttl),
+ type = undefined,
+ app_id = undefined,
+ cluster_id = undefined}.
+
+translate_properties(Props10, Props10Bin,
+ Props = #'P_basic'{headers = Headers}) ->
+ Props#'P_basic'{
+ headers = set_header(?PROPERTIES_HEADER, Props10Bin,
+ Headers),
+ content_type = unwrap(Props10#'v1_0.properties'.content_type),
+ content_encoding = unwrap(Props10#'v1_0.properties'.content_encoding),
+ correlation_id = unwrap(Props10#'v1_0.properties'.correlation_id),
+ reply_to = case unwrap(Props10#'v1_0.properties'.reply_to) of
+ <<"/queue/", Q/binary>> -> Q;
+ Else -> Else
+ end,
+ message_id = unwrap(Props10#'v1_0.properties'.message_id),
+ user_id = unwrap(Props10#'v1_0.properties'.user_id),
+ timestamp = unwrap(Props10#'v1_0.properties'.creation_time)}.
+
+routing_key(Props10) ->
+ unwrap(Props10#'v1_0.properties'.subject).
+
+unwrap(undefined) -> undefined;
+unwrap({_Type, Thing}) -> Thing.
+
+to_expiration(undefined) ->
+ undefined;
+to_expiration({uint, Num}) ->
+ list_to_binary(integer_to_list(Num)).
+
+from_expiration(undefined) ->
+ undefined;
+from_expiration(PBasic) ->
+ case rabbit_basic:parse_expiration(PBasic) of
+ {ok, undefined} -> undefined;
+ {ok, N} -> {uint, N};
+ _ -> undefined
+ end.
+
+set_header(Header, Value, undefined) ->
+ set_header(Header, Value, []);
+set_header(Header, Value, Headers) ->
+ rabbit_misc:set_table_value(Headers, Header, longstr, Value).
+
+set_1_0_type(Type, Props = #'P_basic'{}) ->
+ Props#'P_basic'{type = Type}.
+
+%%--------------------------------------------------------------------
+
+%% TODO create delivery-annotations
+
+annotated_message(RKey, #'basic.deliver'{redelivered = Redelivered},
+ #amqp_msg{props = Props,
+ payload = Content}) ->
+ #'P_basic'{ headers = Headers } = Props,
+ Header10 = #'v1_0.header'
+ {durable = case Props#'P_basic'.delivery_mode of
+ 2 -> true;
+ _ -> false
+ end,
+ priority = wrap(ubyte, Props#'P_basic'.priority),
+ ttl = from_expiration(Props),
+ first_acquirer = not Redelivered,
+ delivery_count = undefined},
+ HeadersBin = amqp10_framing:encode_bin(Header10),
+ MsgAnnoBin =
+ case table_lookup(Headers, ?MESSAGE_ANNOTATIONS_HEADER) of
+ undefined -> <<>>;
+ {_, MABin} -> MABin
+ end,
+ PropsBin =
+ case table_lookup(Headers, ?PROPERTIES_HEADER) of
+ {_, Props10Bin} ->
+ Props10Bin;
+ undefined ->
+ Props10 = #'v1_0.properties'{
+ message_id = wrap(utf8, Props#'P_basic'.message_id),
+ user_id = wrap(utf8, Props#'P_basic'.user_id),
+ to = undefined,
+ subject = wrap(utf8, RKey),
+ reply_to = case Props#'P_basic'.reply_to of
+ undefined ->
+ undefined;
+ _ ->
+ wrap(utf8,
+ <<"/queue/",
+ (Props#'P_basic'.reply_to)/binary>>)
+ end,
+ correlation_id = wrap(utf8, Props#'P_basic'.correlation_id),
+ content_type = wrap(symbol, Props#'P_basic'.content_type),
+ content_encoding = wrap(symbol, Props#'P_basic'.content_encoding),
+ creation_time = wrap(timestamp, Props#'P_basic'.timestamp)},
+ amqp10_framing:encode_bin(Props10)
+ end,
+ AppPropsBin =
+ case table_lookup(Headers, ?APP_PROPERTIES_HEADER) of
+ {_, AppProps10Bin} ->
+ AppProps10Bin;
+ undefined ->
+ []
+ end,
+ DataBin = case Props#'P_basic'.type of
+ <<"amqp-1.0">> ->
+ Content;
+ _Else -> % e.g., <<"binary">> if originally from 1.0
+ amqp10_framing:encode_bin(
+ #'v1_0.data'{content = Content})
+ end,
+ FooterBin =
+ case table_lookup(Headers, ?FOOTER) of
+ undefined -> <<>>;
+ {_, FBin} -> FBin
+ end,
+ [HeadersBin, MsgAnnoBin, PropsBin, AppPropsBin, DataBin, FooterBin].
+
+wrap(_Type, undefined) ->
+ undefined;
+wrap(Type, Val) ->
+ {Type, Val}.
+
+table_lookup(undefined, _) -> undefined;
+table_lookup(Headers, Header) -> rabbit_misc:table_lookup(Headers, Header).
+
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl
new file mode 100644
index 0000000000..c83a152f40
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_outgoing_link.erl
@@ -0,0 +1,240 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_outgoing_link).
+
+-export([attach/3, delivery/6, transferred/3, credit_drained/3, flow/3]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [protocol_error/3, serial_add/2]).
+-import(rabbit_amqp1_0_link_util, [handle_to_ctag/1]).
+
+-define(INIT_TXFR_COUNT, 0).
+-define(DEFAULT_SEND_SETTLED, false).
+
+-record(outgoing_link, {queue,
+ delivery_count = 0,
+ send_settled,
+ default_outcome,
+ route_state}).
+
+attach(#'v1_0.attach'{name = Name,
+ handle = Handle,
+ source = Source,
+ snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = RcvSettleMode}, BCh, DCh) ->
+ {DefaultOutcome, Outcomes} = rabbit_amqp1_0_link_util:outcomes(Source),
+ SndSettled =
+ case SndSettleMode of
+ ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> true;
+ ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED -> false;
+ _ -> ?DEFAULT_SEND_SETTLED
+ end,
+ DOSym = amqp10_framing:symbol_for(DefaultOutcome),
+ case ensure_source(Source,
+ #outgoing_link{delivery_count = ?INIT_TXFR_COUNT,
+ send_settled = SndSettled,
+ default_outcome = DOSym,
+ route_state =
+ rabbit_routing_util:init_state()},
+ DCh) of
+ {ok, Source1, OutgoingLink = #outgoing_link{queue = QueueName}} ->
+ CTag = handle_to_ctag(Handle),
+ case rabbit_amqp1_0_channel:subscribe(
+ BCh, #'basic.consume'{
+ queue = QueueName,
+ consumer_tag = CTag,
+ %% we will ack when we've transferred
+ %% a message, or when we get an ack
+ %% from the client.
+ no_ack = false,
+ %% TODO exclusive?
+ exclusive = false,
+ arguments = [{<<"x-credit">>, table,
+ [{<<"credit">>, long, 0},
+ {<<"drain">>, bool, false}]}]},
+ self()) of
+ #'basic.consume_ok'{} ->
+ %% TODO we should avoid the race by getting the queue to send
+ %% attach back, but a.t.m. it would use the wrong codec.
+ {ok, [#'v1_0.attach'{
+ name = Name,
+ handle = Handle,
+ initial_delivery_count = {uint, ?INIT_TXFR_COUNT},
+ snd_settle_mode =
+ case SndSettled of
+ true -> ?V_1_0_SENDER_SETTLE_MODE_SETTLED;
+ false -> ?V_1_0_SENDER_SETTLE_MODE_UNSETTLED
+ end,
+ rcv_settle_mode = RcvSettleMode,
+ source = Source1#'v1_0.source'{
+ default_outcome = DefaultOutcome,
+ outcomes = Outcomes
+ },
+ role = ?SEND_ROLE}], OutgoingLink};
+ Fail ->
+ protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Consume failed: ~p", [Fail])
+ end;
+ {error, Reason} ->
+ %% TODO proper link establishment protocol here?
+ protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
+ "Attach rejected: ~p", [Reason])
+ end.
+
+credit_drained(#'basic.credit_drained'{credit_drained = CreditDrained},
+ Handle, Link = #outgoing_link{delivery_count = Count0}) ->
+ Count = Count0 + CreditDrained,
+ %% The transfer count that is given by the queue should be at
+ %% least that we have locally, since we will either have received
+ %% all the deliveries and transferred them, or the queue will have
+ %% advanced it due to drain. So we adopt the queue's idea of the
+ %% count.
+ %% TODO account for it not being there any more
+ F = #'v1_0.flow'{ handle = Handle,
+ delivery_count = {uint, Count},
+ link_credit = {uint, 0},
+ available = {uint, 0},
+ drain = true },
+ {F, Link#outgoing_link{delivery_count = Count}}.
+
+flow(#outgoing_link{delivery_count = LocalCount},
+ #'v1_0.flow'{handle = Handle,
+ delivery_count = Count0,
+ link_credit = {uint, RemoteCredit},
+ drain = Drain0}, BCh) ->
+ {uint, RemoteCount} = default(Count0, {uint, LocalCount}),
+ Drain = default(Drain0, false),
+ %% See section 2.6.7
+ LocalCredit = RemoteCount + RemoteCredit - LocalCount,
+ CTag = handle_to_ctag(Handle),
+ #'basic.credit_ok'{available = Available} =
+ rabbit_amqp1_0_channel:call(
+ BCh, #'basic.credit'{consumer_tag = CTag,
+ credit = LocalCredit,
+ drain = Drain}),
+ case Available of
+ -1 ->
+ {ok, []};
+ %% We don't know - probably because this flow relates
+ %% to a handle that does not yet exist
+ %% TODO is this an error?
+ _ ->
+ {ok, [#'v1_0.flow'{
+ handle = Handle,
+ delivery_count = {uint, LocalCount},
+ link_credit = {uint, LocalCredit},
+ available = {uint, Available},
+ drain = Drain}]}
+ end.
+
+default(undefined, Default) -> Default;
+default(Thing, _Default) -> Thing.
+
+ensure_source(Source = #'v1_0.source'{address = Address,
+ dynamic = Dynamic,
+ durable = Durable,
+ %% TODO
+ expiry_policy = _ExpiryPolicy,
+ %% TODO
+ timeout = _Timeout},
+ Link = #outgoing_link{ route_state = RouteState }, DCh) ->
+ DeclareParams = [{durable, rabbit_amqp1_0_link_util:durable(Durable)},
+ {exclusive, false},
+ {auto_delete, false},
+ {check_exchange, true},
+ {nowait, false}],
+ case Dynamic of
+ true -> protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Dynamic sources not supported", []);
+ _ -> ok
+ end,
+ case Address of
+ {utf8, Destination} ->
+ case rabbit_routing_util:parse_endpoint(Destination, false) of
+ {ok, Dest} ->
+ {ok, Queue, RouteState1} =
+ rabbit_amqp1_0_channel:convert_error(
+ fun() ->
+ rabbit_routing_util:ensure_endpoint(
+ source, DCh, Dest, DeclareParams,
+ RouteState)
+ end),
+ ER = rabbit_routing_util:parse_routing(Dest),
+ ok = rabbit_routing_util:ensure_binding(Queue, ER, DCh),
+ {ok, Source, Link#outgoing_link{route_state = RouteState1,
+ queue = Queue}};
+ {error, _} = E ->
+ E
+ end;
+ _ ->
+ {error, {address_not_utf8_string, Address}}
+ end.
+
+delivery(Deliver = #'basic.deliver'{delivery_tag = DeliveryTag,
+ routing_key = RKey},
+ Msg, FrameMax, Handle, Session,
+ #outgoing_link{send_settled = SendSettled,
+ default_outcome = DefaultOutcome}) ->
+ DeliveryId = rabbit_amqp1_0_session:next_delivery_id(Session),
+ Session1 = rabbit_amqp1_0_session:record_outgoing(
+ DeliveryTag, SendSettled, DefaultOutcome, Session),
+ Txfr = #'v1_0.transfer'{handle = Handle,
+ delivery_tag = {binary, <<DeliveryTag:64>>},
+ delivery_id = {uint, DeliveryId},
+ %% The only one in AMQP 1-0
+ message_format = {uint, 0},
+ settled = SendSettled,
+ resume = false,
+ more = false,
+ aborted = false,
+ %% TODO: actually batchable would be fine,
+ %% but in any case it's only a hint
+ batchable = false},
+ Msg1_0 = rabbit_amqp1_0_message:annotated_message(
+ RKey, Deliver, Msg),
+ ?DEBUG("Outbound content:~n ~p~n",
+ [[amqp10_framing:pprint(Section) ||
+ Section <- amqp10_framing:decode_bin(
+ iolist_to_binary(Msg1_0))]]),
+ %% TODO Ugh
+ TLen = iolist_size(amqp10_framing:encode_bin(Txfr)),
+ Frames = case FrameMax of
+ unlimited ->
+ [[Txfr, Msg1_0]];
+ _ ->
+ encode_frames(Txfr, Msg1_0, FrameMax - TLen, [])
+ end,
+ {ok, Frames, Session1}.
+
+encode_frames(_T, _Msg, MaxContentLen, _Transfers) when MaxContentLen =< 0 ->
+ protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL,
+ "Frame size is too small by ~p bytes", [-MaxContentLen]);
+encode_frames(T, Msg, MaxContentLen, Transfers) ->
+ case iolist_size(Msg) > MaxContentLen of
+ true ->
+ <<Chunk:MaxContentLen/binary, Rest/binary>> =
+ iolist_to_binary(Msg),
+ T1 = T#'v1_0.transfer'{more = true},
+ encode_frames(T, Rest, MaxContentLen,
+ [[T1, Chunk] | Transfers]);
+ false ->
+ lists:reverse([[T, Msg] | Transfers])
+ end.
+
+transferred(DeliveryTag, Channel,
+ Link = #outgoing_link{ delivery_count = Count,
+ send_settled = SendSettled }) ->
+ if SendSettled ->
+ rabbit_amqp1_0_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = DeliveryTag });
+ true ->
+ ok
+ end,
+ Link#outgoing_link{delivery_count = serial_add(Count, 1)}.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl
new file mode 100644
index 0000000000..1d4a5050e1
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_reader.erl
@@ -0,0 +1,809 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_reader).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include_lib("kernel/include/inet.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([init/2, mainloop/2]).
+-export([info/2]).
+
+%% TODO which of these are needed?
+-export([shutdown/2]).
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+-export([conserve_resources/3]).
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+
+-define(HANDSHAKE_TIMEOUT, 10).
+-define(NORMAL_TIMEOUT, 3).
+-define(CLOSING_TIMEOUT, 30).
+-define(SILENT_CLOSE_DELAY, 3).
+
+%%--------------------------------------------------------------------------
+
+-record(v1, {parent, sock, connection, callback, recv_len, pending_recv,
+ connection_state, queue_collector, heartbeater, helper_sup,
+ channel_sup_sup_pid, buf, buf_len, throttle, proxy_socket}).
+
+-record(v1_connection, {user, timeout_sec, frame_max, auth_mechanism, auth_state,
+ hostname}).
+
+-record(throttle, {alarmed_by, last_blocked_by, last_blocked_at}).
+
+-define(IS_RUNNING(State),
+ (State#v1.connection_state =:= running orelse
+ State#v1.connection_state =:= blocking orelse
+ State#v1.connection_state =:= blocked)).
+
+%%--------------------------------------------------------------------------
+
+unpack_from_0_9_1({Parent, Sock,RecvLen, PendingRecv,
+ HelperSupPid, Buf, BufLen, ProxySocket}) ->
+ #v1{parent = Parent,
+ sock = Sock,
+ callback = handshake,
+ recv_len = RecvLen,
+ pending_recv = PendingRecv,
+ connection_state = pre_init,
+ queue_collector = undefined,
+ heartbeater = none,
+ helper_sup = HelperSupPid,
+ buf = Buf,
+ buf_len = BufLen,
+ throttle = #throttle{alarmed_by = [],
+ last_blocked_by = none,
+ last_blocked_at = never},
+ connection = #v1_connection{user = none,
+ timeout_sec = ?HANDSHAKE_TIMEOUT,
+ frame_max = ?FRAME_MIN_SIZE,
+ auth_mechanism = none,
+ auth_state = none},
+ proxy_socket = ProxySocket}.
+
+shutdown(Pid, Explanation) ->
+ gen_server:call(Pid, {shutdown, Explanation}, infinity).
+
+system_continue(Parent, Deb, State) ->
+ ?MODULE:mainloop(Deb, State#v1{parent = Parent}).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+conserve_resources(Pid, Source, Conserve) ->
+ Pid ! {conserve_resources, Source, Conserve},
+ ok.
+
+server_properties() ->
+ %% The atom doesn't match anything, it's just "not 0-9-1".
+ Raw = lists:keydelete(
+ <<"capabilities">>, 1, rabbit_reader:server_properties(amqp_1_0)),
+ {map, [{{symbol, K}, {utf8, V}} || {K, longstr, V} <- Raw]}.
+
+%%--------------------------------------------------------------------------
+
+inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
+
+recvloop(Deb, State = #v1{pending_recv = true}) ->
+ mainloop(Deb, State);
+recvloop(Deb, State = #v1{connection_state = blocked}) ->
+ mainloop(Deb, State);
+recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen})
+ when BufLen < RecvLen ->
+ ok = rabbit_net:setopts(Sock, [{active, once}]),
+ mainloop(Deb, State#v1{pending_recv = true});
+recvloop(Deb, State = #v1{recv_len = RecvLen, buf = Buf, buf_len = BufLen}) ->
+ {Data, Rest} = split_binary(case Buf of
+ [B] -> B;
+ _ -> list_to_binary(lists:reverse(Buf))
+ end, RecvLen),
+ recvloop(Deb, handle_input(State#v1.callback, Data,
+ State#v1{buf = [Rest],
+ buf_len = BufLen - RecvLen})).
+
+mainloop(Deb, State = #v1{sock = Sock, buf = Buf, buf_len = BufLen}) ->
+ case rabbit_net:recv(Sock) of
+ {data, Data} ->
+ recvloop(Deb, State#v1{buf = [Data | Buf],
+ buf_len = BufLen + size(Data),
+ pending_recv = false});
+ closed when State#v1.connection_state =:= closed ->
+ ok;
+ closed ->
+ throw(connection_closed_abruptly);
+ {error, Reason} ->
+ throw({inet_error, Reason});
+ {other, {system, From, Request}} ->
+ sys:handle_system_msg(Request, From, State#v1.parent,
+ ?MODULE, Deb, State);
+ {other, Other} ->
+ case handle_other(Other, State) of
+ stop -> ok;
+ NewState -> recvloop(Deb, NewState)
+ end
+ end.
+
+handle_other({conserve_resources, Source, Conserve},
+ State = #v1{throttle = Throttle =
+ #throttle{alarmed_by = CR}}) ->
+ CR1 = case Conserve of
+ true -> lists:usort([Source | CR]);
+ false -> CR -- [Source]
+ end,
+ Throttle1 = Throttle#throttle{alarmed_by = CR1},
+ control_throttle(State#v1{throttle = Throttle1});
+handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
+ terminate(io_lib:format("broker forced connection closure "
+ "with reason '~w'", [Reason]), State),
+ %% this is what we are expected to do according to
+ %% http://www.erlang.org/doc/man/sys.html
+ %%
+ %% If we wanted to be *really* nice we should wait for a while for
+ %% clients to close the socket at their end, just as we do in the
+ %% ordinary error case. However, since this termination is
+ %% initiated by our parent it is probably more important to exit
+ %% quickly.
+ exit(Reason);
+handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
+ handle_dependent_exit(ChPid, Reason, State);
+handle_other(handshake_timeout, State)
+ when ?IS_RUNNING(State) orelse
+ State#v1.connection_state =:= closing orelse
+ State#v1.connection_state =:= closed ->
+ State;
+handle_other(handshake_timeout, State) ->
+ throw({handshake_timeout, State#v1.callback});
+handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
+ State;
+handle_other(heartbeat_timeout, #v1{connection_state = S}) ->
+ throw({heartbeat_timeout, S});
+handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
+ {ForceTermination, NewState} = terminate(Explanation, State),
+ gen_server:reply(From, ok),
+ case ForceTermination of
+ force -> stop;
+ normal -> NewState
+ end;
+handle_other({'$gen_cast', force_event_refresh}, State) ->
+ %% Ignore, the broker sent us this as it thinks we are a 0-9-1 connection
+ State;
+handle_other({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ control_throttle(State);
+handle_other(terminate_connection, State) ->
+ State;
+handle_other({info, InfoItems, Pid}, State) ->
+ Infos = lists:map(
+ fun(InfoItem) ->
+ {InfoItem, info_internal(InfoItem, State)}
+ end,
+ InfoItems),
+ Pid ! {info_reply, Infos},
+ State;
+handle_other(Other, _State) ->
+ %% internal error -> something worth dying for
+ exit({unexpected_message, Other}).
+
+switch_callback(State, Callback, Length) ->
+ State#v1{callback = Callback, recv_len = Length}.
+
+terminate(Reason, State) when ?IS_RUNNING(State) ->
+ {normal, handle_exception(State, 0,
+ {?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Connection forced: ~p~n", [Reason]})};
+terminate(_Reason, State) ->
+ {force, State}.
+
+control_throttle(State = #v1{connection_state = CS, throttle = Throttle}) ->
+ IsThrottled = ((Throttle#throttle.alarmed_by =/= []) orelse
+ credit_flow:blocked()),
+ case {CS, IsThrottled} of
+ {running, true} -> State#v1{connection_state = blocking};
+ {blocking, false} -> State#v1{connection_state = running};
+ {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
+ State#v1.heartbeater),
+ State#v1{connection_state = running};
+ {blocked, true} -> State#v1{throttle = update_last_blocked_by(
+ Throttle)};
+ {_, _} -> State
+ end.
+
+update_last_blocked_by(Throttle = #throttle{alarmed_by = []}) ->
+ Throttle#throttle{last_blocked_by = flow};
+update_last_blocked_by(Throttle) ->
+ Throttle#throttle{last_blocked_by = resource}.
+
+%%--------------------------------------------------------------------------
+%% error handling / termination
+
+close_connection(State = #v1{connection = #v1_connection{
+ timeout_sec = TimeoutSec}}) ->
+ erlang:send_after((if TimeoutSec > 0 andalso
+ TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
+ true -> ?CLOSING_TIMEOUT
+ end) * 1000, self(), terminate_connection),
+ State#v1{connection_state = closed}.
+
+handle_dependent_exit(ChPid, Reason, State) ->
+ case {ChPid, termination_kind(Reason)} of
+ {undefined, uncontrolled} ->
+ exit({abnormal_dependent_exit, ChPid, Reason});
+ {_Channel, controlled} ->
+ maybe_close(control_throttle(State));
+ {Channel, uncontrolled} ->
+ {RealReason, Trace} = Reason,
+ R = {?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Session error: ~p~n~p~n", [RealReason, Trace]},
+ maybe_close(handle_exception(control_throttle(State), Channel, R))
+ end.
+
+termination_kind(normal) -> controlled;
+termination_kind(_) -> uncontrolled.
+
+maybe_close(State = #v1{connection_state = closing,
+ sock = Sock}) ->
+ NewState = close_connection(State),
+ ok = send_on_channel0(Sock, #'v1_0.close'{}),
+ % Perform an rpc call to each session process to allow it time to
+ % process it's internal message buffer before the supervision tree
+ % shuts everything down and in flight messages such as dispositions
+ % could be lost
+ [ _ = rabbit_amqp1_0_session:get_info(SessionPid)
+ || {{channel, _}, {ch_fr_pid, SessionPid}} <- get()],
+ NewState;
+maybe_close(State) ->
+ State.
+
+error_frame(Condition, Fmt, Args) ->
+ #'v1_0.error'{condition = Condition,
+ description = {utf8, list_to_binary(
+ rabbit_misc:format(Fmt, Args))}}.
+
+handle_exception(State = #v1{connection_state = closed}, Channel,
+ #'v1_0.error'{description = {utf8, Desc}}) ->
+ rabbit_log_connection:error("AMQP 1.0 connection ~p (~p), channel ~p - error:~n~p~n",
+ [self(), closed, Channel, Desc]),
+ State;
+handle_exception(State = #v1{connection_state = CS}, Channel,
+ ErrorFrame = #'v1_0.error'{description = {utf8, Desc}})
+ when ?IS_RUNNING(State) orelse CS =:= closing ->
+ rabbit_log_connection:error("AMQP 1.0 connection ~p (~p), channel ~p - error:~n~p~n",
+ [self(), CS, Channel, Desc]),
+ %% TODO: session errors shouldn't force the connection to close
+ State1 = close_connection(State),
+ ok = send_on_channel0(State#v1.sock, #'v1_0.close'{error = ErrorFrame}),
+ State1;
+handle_exception(State, Channel, Error) ->
+ %% We don't trust the client at this point - force them to wait
+ %% for a bit so they can't DOS us with repeated failed logins etc.
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ throw({handshake_error, State#v1.connection_state, Channel, Error}).
+
+%%--------------------------------------------------------------------------
+
+%% Begin 1-0
+
+%% ----------------------------------------
+%% AMQP 1.0 frame handlers
+
+is_connection_frame(#'v1_0.open'{}) -> true;
+is_connection_frame(#'v1_0.close'{}) -> true;
+is_connection_frame(_) -> false.
+
+%% TODO Handle depending on connection state
+%% TODO It'd be nice to only decode up to the descriptor
+
+handle_1_0_frame(Mode, Channel, Payload, State) ->
+ try
+ handle_1_0_frame0(Mode, Channel, Payload, State)
+ catch
+ _:#'v1_0.error'{} = Reason ->
+ handle_exception(State, 0, Reason);
+ _:{error, {not_allowed, Username}} ->
+ %% section 2.8.15 in http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-complete-v1.0-os.pdf
+ handle_exception(State, 0, error_frame(
+ ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS,
+ "Access for user '~s' was refused: insufficient permissions~n", [Username]));
+ _:Reason:Trace ->
+ handle_exception(State, 0, error_frame(
+ ?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Reader error: ~p~n~p~n",
+ [Reason, Trace]))
+ end.
+
+%% Nothing specifies that connection methods have to be on a
+%% particular channel.
+handle_1_0_frame0(_Mode, Channel, Payload,
+ State = #v1{ connection_state = CS}) when
+ CS =:= closing; CS =:= closed ->
+ Sections = parse_1_0_frame(Payload, Channel),
+ case is_connection_frame(Sections) of
+ true -> handle_1_0_connection_frame(Sections, State);
+ false -> State
+ end;
+handle_1_0_frame0(Mode, Channel, Payload, State) ->
+ Sections = parse_1_0_frame(Payload, Channel),
+ case {Mode, is_connection_frame(Sections)} of
+ {amqp, true} -> handle_1_0_connection_frame(Sections, State);
+ {amqp, false} -> handle_1_0_session_frame(Channel, Sections, State);
+ {sasl, false} -> handle_1_0_sasl_frame(Sections, State)
+ end.
+
+parse_1_0_frame(Payload, _Channel) ->
+ {PerfDesc, Rest} = amqp10_binary_parser:parse(Payload),
+ Perf = amqp10_framing:decode(PerfDesc),
+ ?DEBUG("Channel ~p ->~n~p~n~s~n",
+ [_Channel, amqp10_framing:pprint(Perf),
+ case Rest of
+ <<>> -> <<>>;
+ _ -> rabbit_misc:format(
+ " followed by ~p bytes of content~n", [size(Rest)])
+ end]),
+ case Rest of
+ <<>> -> Perf;
+ _ -> {Perf, Rest}
+ end.
+
+handle_1_0_connection_frame(#'v1_0.open'{ max_frame_size = ClientFrameMax,
+ channel_max = ClientChannelMax,
+ idle_time_out = IdleTimeout,
+ hostname = Hostname },
+ State = #v1{
+ connection_state = starting,
+ connection = Connection,
+ throttle = Throttle,
+ helper_sup = HelperSupPid,
+ sock = Sock}) ->
+ ClientHeartbeatSec = case IdleTimeout of
+ undefined -> 0;
+ {uint, Interval} -> Interval div 1000
+ end,
+ FrameMax = case ClientFrameMax of
+ undefined -> unlimited;
+ {_, FM} -> FM
+ end,
+ {ok, HeartbeatSec} = application:get_env(rabbit, heartbeat),
+ State1 =
+ if (FrameMax =/= unlimited) and (FrameMax < ?FRAME_1_0_MIN_SIZE) ->
+ protocol_error(?V_1_0_AMQP_ERROR_FRAME_SIZE_TOO_SMALL,
+ "frame_max=~w < ~w min size",
+ [FrameMax, ?FRAME_1_0_MIN_SIZE]);
+ true ->
+ {ok, Collector} =
+ rabbit_connection_helper_sup:start_queue_collector(
+ HelperSupPid, <<"AMQP 1.0">>), %% TODO describe the connection
+ SendFun =
+ fun() ->
+ Frame =
+ amqp10_binary_generator:build_heartbeat_frame(),
+ catch rabbit_net:send(Sock, Frame)
+ end,
+
+ Parent = self(),
+ ReceiveFun =
+ fun() ->
+ Parent ! heartbeat_timeout
+ end,
+ %% [2.4.5] the value in idle-time-out SHOULD be half the peer's
+ %% actual timeout threshold
+ ReceiverHeartbeatSec = lists:min([HeartbeatSec * 2, 4294967]),
+ %% TODO: only start heartbeat receive timer at next next frame
+ Heartbeater =
+ rabbit_heartbeat:start(HelperSupPid, Sock,
+ ClientHeartbeatSec, SendFun,
+ ReceiverHeartbeatSec, ReceiveFun),
+ State#v1{connection_state = running,
+ connection = Connection#v1_connection{
+ frame_max = FrameMax,
+ hostname = Hostname},
+ heartbeater = Heartbeater,
+ queue_collector = Collector}
+ end,
+ HostnameVal = case Hostname of
+ undefined -> undefined;
+ {utf8, Val} -> Val
+ end,
+ rabbit_log:debug("AMQP 1.0 connection.open frame: hostname = ~s, extracted vhost = ~s, idle_timeout = ~p" ,
+ [HostnameVal, vhost(Hostname), HeartbeatSec * 1000]),
+ %% TODO enforce channel_max
+ ok = send_on_channel0(
+ Sock,
+ #'v1_0.open'{channel_max = ClientChannelMax,
+ max_frame_size = ClientFrameMax,
+ idle_time_out = {uint, HeartbeatSec * 1000},
+ container_id = {utf8, rabbit_nodes:cluster_name()},
+ properties = server_properties()}),
+ Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
+ control_throttle(
+ State1#v1{throttle = Throttle#throttle{alarmed_by = Conserve}});
+
+handle_1_0_connection_frame(_Frame, State) ->
+ maybe_close(State#v1{connection_state = closing}).
+
+handle_1_0_session_frame(Channel, Frame, State) ->
+ case get({channel, Channel}) of
+ {ch_fr_pid, SessionPid} ->
+ ok = rabbit_amqp1_0_session:process_frame(SessionPid, Frame),
+ case Frame of
+ #'v1_0.end'{} ->
+ erase({channel, Channel}),
+ State;
+ #'v1_0.transfer'{} ->
+ case (State#v1.connection_state =:= blocking) of
+ true ->
+ ok = rabbit_heartbeat:pause_monitor(
+ State#v1.heartbeater),
+ State#v1{connection_state = blocked};
+ false ->
+ State
+ end;
+ _ ->
+ State
+ end;
+ closing ->
+ case Frame of
+ #'v1_0.end'{} ->
+ erase({channel, Channel});
+ _Else ->
+ ok
+ end,
+ State;
+ undefined ->
+ case ?IS_RUNNING(State) of
+ true ->
+ ok = send_to_new_1_0_session(Channel, Frame, State),
+ State;
+ false ->
+ throw({channel_frame_while_starting,
+ Channel, State#v1.connection_state,
+ Frame})
+ end
+ end.
+
+%% TODO: write a proper ANONYMOUS plugin and unify with STOMP
+handle_1_0_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>},
+ hostname = _Hostname},
+ State = #v1{connection_state = starting,
+ sock = Sock}) ->
+ case application:get_env(rabbitmq_amqp1_0, default_user) of
+ {ok, none} ->
+ %% No need to do anything, we will blow up in start_connection
+ ok;
+ {ok, _} ->
+ %% We only need to send the frame, again start_connection
+ %% will set up the default user.
+ Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 0}},
+ ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl),
+ switch_callback(State#v1{connection_state = waiting_amqp0100},
+ handshake, 8)
+ end;
+handle_1_0_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, Mechanism},
+ initial_response = {binary, Response},
+ hostname = _Hostname},
+ State0 = #v1{connection_state = starting,
+ connection = Connection,
+ sock = Sock}) ->
+ AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
+ State = State0#v1{connection =
+ Connection#v1_connection{
+ auth_mechanism = {Mechanism, AuthMechanism},
+ auth_state = AuthMechanism:init(Sock)},
+ connection_state = securing},
+ auth_phase_1_0(Response, State);
+handle_1_0_sasl_frame(#'v1_0.sasl_response'{response = {binary, Response}},
+ State = #v1{connection_state = securing}) ->
+ auth_phase_1_0(Response, State);
+handle_1_0_sasl_frame(Frame, State) ->
+ throw({unexpected_1_0_sasl_frame, Frame, State}).
+
+%% We need to handle restarts...
+handle_input(handshake, <<"AMQP", 0, 1, 0, 0>>, State) ->
+ start_1_0_connection(amqp, State);
+
+%% 3 stands for "SASL" (keeping this here for when we do TLS)
+handle_input(handshake, <<"AMQP", 3, 1, 0, 0>>, State) ->
+ start_1_0_connection(sasl, State);
+
+handle_input({frame_header_1_0, Mode},
+ Header = <<Size:32, DOff:8, Type:8, Channel:16>>,
+ State) when DOff >= 2 ->
+ case {Mode, Type} of
+ {amqp, 0} -> ok;
+ {sasl, 1} -> ok;
+ _ -> throw({bad_1_0_header_type, Header, Mode})
+ end,
+ case Size of
+ 8 -> % length inclusive
+ State; %% heartbeat
+ _ ->
+ switch_callback(State, {frame_payload_1_0, Mode, DOff, Channel}, Size - 8)
+ end;
+handle_input({frame_header_1_0, _Mode}, Malformed, _State) ->
+ throw({bad_1_0_header, Malformed});
+handle_input({frame_payload_1_0, Mode, DOff, Channel},
+ FrameBin, State) ->
+ SkipBits = (DOff * 32 - 64), % DOff = 4-byte words, we've read 8 already
+ <<Skip:SkipBits, FramePayload/binary>> = FrameBin,
+ Skip = Skip, %% hide warning when debug is off
+ handle_1_0_frame(Mode, Channel, FramePayload,
+ switch_callback(State, {frame_header_1_0, Mode}, 8));
+
+handle_input(Callback, Data, _State) ->
+ throw({bad_input, Callback, Data}).
+
+init(Mode, PackedState) ->
+ %% By invoking recvloop here we become 1.0.
+ recvloop(sys:debug_options([]),
+ start_1_0_connection(Mode, unpack_from_0_9_1(PackedState))).
+
+start_1_0_connection(sasl, State = #v1{sock = Sock}) ->
+ send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>),
+ Ms = {array, symbol,
+ case application:get_env(rabbitmq_amqp1_0, default_user) of
+ {ok, none} -> [];
+ {ok, _} -> [{symbol, <<"ANONYMOUS">>}]
+ end ++
+ [{symbol, list_to_binary(atom_to_list(M))} || M <- auth_mechanisms(Sock)]},
+ Mechanisms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms},
+ ok = send_on_channel0(Sock, Mechanisms, rabbit_amqp1_0_sasl),
+ start_1_0_connection0(sasl, State);
+
+start_1_0_connection(amqp,
+ State = #v1{sock = Sock,
+ connection = C = #v1_connection{user = User}}) ->
+ {ok, NoAuthUsername} = application:get_env(rabbitmq_amqp1_0, default_user),
+ case {User, NoAuthUsername} of
+ {none, none} ->
+ send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>),
+ throw(banned_unauthenticated_connection);
+ {none, Username} ->
+ case rabbit_access_control:check_user_login(
+ list_to_binary(Username), []) of
+ {ok, NoAuthUser} ->
+ State1 = State#v1{
+ connection = C#v1_connection{user = NoAuthUser}},
+ send_1_0_handshake(Sock, <<"AMQP",0,1,0,0>>),
+ start_1_0_connection0(amqp, State1);
+ _ ->
+ send_1_0_handshake(Sock, <<"AMQP",3,1,0,0>>),
+ throw(default_user_missing)
+ end;
+ _ ->
+ send_1_0_handshake(Sock, <<"AMQP",0,1,0,0>>),
+ start_1_0_connection0(amqp, State)
+ end.
+
+start_1_0_connection0(Mode, State = #v1{connection = Connection,
+ helper_sup = HelperSup}) ->
+ ChannelSupSupPid =
+ case Mode of
+ sasl -> undefined;
+ amqp -> {ok, Pid} =
+ supervisor2:start_child(
+ HelperSup,
+ {channel_sup_sup,
+ {rabbit_amqp1_0_session_sup_sup, start_link, []},
+ intrinsic, infinity, supervisor,
+ [rabbit_amqp1_0_session_sup_sup]}),
+ Pid
+ end,
+ switch_callback(State#v1{connection = Connection#v1_connection{
+ timeout_sec = ?NORMAL_TIMEOUT},
+ channel_sup_sup_pid = ChannelSupSupPid,
+ connection_state = starting},
+ {frame_header_1_0, Mode}, 8).
+
+send_1_0_handshake(Sock, Handshake) ->
+ ok = inet_op(fun () -> rabbit_net:send(Sock, Handshake) end).
+
+send_on_channel0(Sock, Method) ->
+ send_on_channel0(Sock, Method, amqp10_framing).
+
+send_on_channel0(Sock, Method, Framing) ->
+ ok = rabbit_amqp1_0_writer:internal_send_command(
+ Sock, 0, Method, Framing).
+
+%% End 1-0
+
+auth_mechanism_to_module(TypeBin, Sock) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND,
+ "unknown authentication mechanism '~s'", [TypeBin]);
+ T ->
+ case {lists:member(T, auth_mechanisms(Sock)),
+ rabbit_registry:lookup_module(auth_mechanism, T)} of
+ {true, {ok, Module}} ->
+ Module;
+ _ ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_FOUND,
+ "invalid authentication mechanism '~s'", [T])
+ end
+ end.
+
+auth_mechanisms(Sock) ->
+ {ok, Configured} = application:get_env(rabbit, auth_mechanisms),
+ [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
+ Module:should_offer(Sock), lists:member(Name, Configured)].
+
+%% Begin 1-0
+
+auth_phase_1_0(Response,
+ State = #v1{connection = Connection =
+ #v1_connection{auth_mechanism = {Name, AuthMechanism},
+ auth_state = AuthState},
+ sock = Sock}) ->
+ case AuthMechanism:handle_response(Response, AuthState) of
+ {refused, User, Msg, Args} ->
+ %% We don't trust the client at this point - force them to wait
+ %% for a bit before sending the sasl outcome frame
+ %% so they can't DOS us with repeated failed logins etc.
+ rabbit_core_metrics:auth_attempt_failed(<<>>, User, amqp10),
+ timer:sleep(?SILENT_CLOSE_DELAY * 1000),
+ Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 1}},
+ ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl),
+ protocol_error(
+ ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, "~s login refused: ~s",
+ [Name, io_lib:format(Msg, Args)]);
+ {protocol_error, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(<<>>, <<>>, amqp10),
+ protocol_error(?V_1_0_AMQP_ERROR_DECODE_ERROR, Msg, Args);
+ {challenge, Challenge, AuthState1} ->
+ rabbit_core_metrics:auth_attempt_succeeded(<<>>, <<>>, amqp10),
+ Secure = #'v1_0.sasl_challenge'{challenge = {binary, Challenge}},
+ ok = send_on_channel0(Sock, Secure, rabbit_amqp1_0_sasl),
+ State#v1{connection = Connection =
+ #v1_connection{auth_state = AuthState1}};
+ {ok, User = #user{username = Username}} ->
+ case rabbit_access_control:check_user_loopback(Username, Sock) of
+ ok ->
+ rabbit_core_metrics:auth_attempt_succeeded(<<>>, Username, amqp10),
+ ok;
+ not_allowed ->
+ rabbit_core_metrics:auth_attempt_failed(<<>>, Username, amqp10),
+ protocol_error(
+ ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS,
+ "user '~s' can only connect via localhost",
+ [Username])
+ end,
+ Outcome = #'v1_0.sasl_outcome'{code = {ubyte, 0}},
+ ok = send_on_channel0(Sock, Outcome, rabbit_amqp1_0_sasl),
+ switch_callback(
+ State#v1{connection_state = waiting_amqp0100,
+ connection = Connection#v1_connection{user = User}},
+ handshake, 8)
+ end.
+
+send_to_new_1_0_session(Channel, Frame, State) ->
+ #v1{sock = Sock, queue_collector = Collector,
+ channel_sup_sup_pid = ChanSupSup,
+ connection = #v1_connection{frame_max = FrameMax,
+ hostname = Hostname,
+ user = User},
+ proxy_socket = ProxySocket} = State,
+ %% Note: the equivalent, start_channel is in channel_sup_sup
+ case rabbit_amqp1_0_session_sup_sup:start_session(
+ %% NB subtract fixed frame header size
+ ChanSupSup, {amqp10_framing, Sock, Channel,
+ case FrameMax of
+ unlimited -> unlimited;
+ _ -> FrameMax - 8
+ end,
+ self(), User, vhost(Hostname), Collector, ProxySocket}) of
+ {ok, ChSupPid, ChFrPid} ->
+ erlang:monitor(process, ChFrPid),
+ put({channel, Channel}, {ch_fr_pid, ChFrPid}),
+ put({ch_sup_pid, ChSupPid}, {{channel, Channel}, {ch_fr_pid, ChFrPid}}),
+ put({ch_fr_pid, ChFrPid}, {channel, Channel}),
+ ok = rabbit_amqp1_0_session:process_frame(ChFrPid, Frame);
+ {error, {not_allowed, _}} ->
+ rabbit_log:error("AMQP 1.0: user '~s' is not allowed to access virtual host '~s'",
+ [User#user.username, vhost(Hostname)]),
+ %% Let's skip the supervisor trace, this is an expected error
+ throw({error, {not_allowed, User#user.username}});
+ {error, _} = E ->
+ throw(E)
+ end.
+
+vhost({utf8, <<"vhost:", VHost/binary>>}) ->
+ VHost;
+vhost(_) ->
+ application:get_env(rabbitmq_amqp1_0, default_vhost,
+ application:get_env(rabbit, default_vhost, <<"/">>)).
+
+%% End 1-0
+
+info(Pid, InfoItems) ->
+ case InfoItems -- ?INFO_ITEMS of
+ [] ->
+ Ref = erlang:monitor(process, Pid),
+ Pid ! {info, InfoItems, self()},
+ receive
+ {info_reply, Items} ->
+ erlang:demonitor(Ref),
+ Items;
+ {'DOWN', _, process, Pid, _} ->
+ []
+ end;
+ UnknownItems -> throw({bad_argument, UnknownItems})
+ end.
+
+info_internal(node, #v1{}) -> node();
+info_internal(auth_mechanism, #v1{connection = #v1_connection{auth_mechanism = none}}) ->
+ none;
+info_internal(auth_mechanism, #v1{connection = #v1_connection{auth_mechanism = {Name, _Mod}}}) ->
+ Name;
+info_internal(host, #v1{connection = #v1_connection{hostname = {utf8, Val}}}) ->
+ Val;
+info_internal(host, #v1{connection = #v1_connection{hostname = Val}}) ->
+ Val;
+info_internal(frame_max, #v1{connection = #v1_connection{frame_max = Val}}) ->
+ Val;
+info_internal(timeout, #v1{connection = #v1_connection{timeout_sec = Val}}) ->
+ Val;
+info_internal(user,
+ #v1{connection = #v1_connection{user = #user{username = none}}}) ->
+ '';
+info_internal(username,
+ #v1{connection = #v1_connection{user = #user{username = Val}}}) ->
+ Val;
+info_internal(state, #v1{connection_state = Val}) ->
+ Val;
+info_internal(SockStat, S) when SockStat =:= recv_oct;
+ SockStat =:= recv_cnt;
+ SockStat =:= send_oct;
+ SockStat =:= send_cnt;
+ SockStat =:= send_pend ->
+ socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end,
+ fun ([{_, I}]) -> I end, S);
+info_internal(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock);
+info_internal(ssl_protocol, S) -> ssl_info(fun ({P, _}) -> P end, S);
+info_internal(ssl_key_exchange, S) -> ssl_info(fun ({_, {K, _, _}}) -> K end, S);
+info_internal(ssl_cipher, S) -> ssl_info(fun ({_, {_, C, _}}) -> C end, S);
+info_internal(ssl_hash, S) -> ssl_info(fun ({_, {_, _, H}}) -> H end, S);
+info_internal(peer_cert_issuer, S) ->
+ cert_info(fun rabbit_ssl:peer_cert_issuer/1, S);
+info_internal(peer_cert_subject, S) ->
+ cert_info(fun rabbit_ssl:peer_cert_subject/1, S);
+info_internal(peer_cert_validity, S) ->
+ cert_info(fun rabbit_ssl:peer_cert_validity/1, S).
+
+%% From rabbit_reader
+socket_info(Get, Select, #v1{sock = Sock}) ->
+ case Get(Sock) of
+ {ok, T} -> Select(T);
+ {error, _} -> ''
+ end.
+
+ssl_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:ssl_info(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, Items} ->
+ P = proplists:get_value(protocol, Items),
+ #{cipher := C,
+ key_exchange := K,
+ mac := H} = proplists:get_value(selected_cipher_suite, Items),
+ F({P, {K, C, H}})
+ end.
+
+cert_info(F, #v1{sock = Sock}) ->
+ case rabbit_net:peercert(Sock) of
+ nossl -> '';
+ {error, _} -> '';
+ {ok, Cert} -> list_to_binary(F(Cert))
+ end.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl
new file mode 100644
index 0000000000..6fbd5bb4d1
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session.erl
@@ -0,0 +1,398 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session).
+
+-export([process_frame/2,
+ get_info/1]).
+
+-export([init/1, begin_/2, maybe_init_publish_id/2, record_delivery/3,
+ incr_incoming_id/1, next_delivery_id/1, transfers_left/1,
+ record_transfers/2, bump_outgoing_window/1,
+ record_outgoing/4, settle/3, flow_fields/2, channel/1,
+ flow/2, ack/2, validate_attach/1]).
+
+-import(rabbit_amqp1_0_util, [protocol_error/3,
+ serial_add/2, serial_diff/2, serial_compare/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-define(MAX_SESSION_WINDOW_SIZE, 65535).
+-define(DEFAULT_MAX_HANDLE, 16#ffffffff).
+-define(CALL_TIMEOUT, 30000). % 30s - matches CLOSE_TIMEOUT
+
+-record(session, {channel_num, %% we just use the incoming (AMQP 1.0) channel number
+ remote_incoming_window, % keep track of the window until we're told
+ remote_outgoing_window,
+ next_incoming_id, % just to keep a check
+ incoming_window_max, % )
+ incoming_window, % ) so we know when to open the session window
+ next_outgoing_id = 0, % arbitrary count of outgoing transfers
+ outgoing_window,
+ outgoing_window_max,
+ next_publish_id, %% the 0-9-1-side counter for confirms
+ next_delivery_id = 0,
+ incoming_unsettled_map,
+ outgoing_unsettled_map }).
+
+%% We record delivery_id -> #outgoing_delivery{}, so that we can
+%% respond to dispositions about messages we've sent. NB the
+%% delivery-tag doubles as the id we use when acking the rabbit
+%% delivery.
+-record(outgoing_delivery, {delivery_tag, expected_outcome}).
+
+%% We record confirm_id -> #incoming_delivery{} so we can relay
+%% confirms from the broker back to the sending client. NB we have
+%% only one possible outcome, so there's no need to record it here.
+-record(incoming_delivery, {delivery_id}).
+
+get_info(Pid) ->
+ gen_server2:call(Pid, info, ?CALL_TIMEOUT).
+
+process_frame(Pid, Frame) ->
+ credit_flow:send(Pid),
+ gen_server2:cast(Pid, {frame, Frame, self()}).
+
+init(Channel) ->
+ #session{channel_num = Channel,
+ next_publish_id = 0,
+ incoming_unsettled_map = gb_trees:empty(),
+ outgoing_unsettled_map = gb_trees:empty()}.
+
+%% Session window:
+%%
+%% Each session has two abstract[1] buffers, one to record the
+%% unsettled state of incoming messages, one to record the unsettled
+%% state of outgoing messages. In general we want to bound these
+%% buffers; but if we bound them, and don't tell the other side, we
+%% may end up deadlocking the other party.
+%%
+%% Hence the flow frame contains a session window, expressed as the
+%% next-id and the window size for each of the buffers. The frame
+%% refers to the window of the sender of the frame, of course.
+%%
+%% The numbers work this way: for the outgoing window, the next-id
+%% counts the next transfer the session will send, and it will stop
+%% sending at next-id + window. For the incoming window, the next-id
+%% counts the next transfer id expected, and it will not accept
+%% messages beyond next-id + window (in fact it will probably close
+%% the session, since sending outside the window is a transgression of
+%% the protocol).
+%%
+%% We may as well just pick a value for the incoming and outgoing
+%% windows; choosing based on what the client says may just stop
+%% things dead, if the value is zero for instance.
+%%
+%% [1] Abstract because there probably won't be a data structure with
+%% a size directly related to transfers; settlement is done with
+%% delivery-id, which may refer to one or more transfers.
+begin_(#'v1_0.begin'{next_outgoing_id = {uint, RemoteNextOut},
+ incoming_window = {uint, RemoteInWindow},
+ outgoing_window = {uint, RemoteOutWindow},
+ handle_max = HandleMax0},
+ Session = #session{next_outgoing_id = LocalNextOut,
+ channel_num = Channel}) ->
+ InWindow = ?MAX_SESSION_WINDOW_SIZE,
+ OutWindow = ?MAX_SESSION_WINDOW_SIZE,
+ HandleMax = case HandleMax0 of
+ {uint, Max} -> Max;
+ _ -> ?DEFAULT_MAX_HANDLE
+ end,
+ {ok, #'v1_0.begin'{remote_channel = {ushort, Channel},
+ handle_max = {uint, HandleMax},
+ next_outgoing_id = {uint, LocalNextOut},
+ incoming_window = {uint, InWindow},
+ outgoing_window = {uint, OutWindow}},
+ Session#session{
+ outgoing_window = OutWindow,
+ outgoing_window_max = OutWindow,
+ next_incoming_id = RemoteNextOut,
+ remote_incoming_window = RemoteInWindow,
+ remote_outgoing_window = RemoteOutWindow,
+ incoming_window = InWindow,
+ incoming_window_max = InWindow},
+ OutWindow}.
+
+validate_attach(#'v1_0.attach'{target = #'v1_0.coordinator'{}}) ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Transactions not supported", []);
+validate_attach(#'v1_0.attach'{unsettled = Unsettled,
+ incomplete_unsettled = IncompleteSettled})
+ when Unsettled =/= undefined andalso Unsettled =/= {map, []} orelse
+ IncompleteSettled =:= true ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "Link recovery not supported", []);
+validate_attach(
+ #'v1_0.attach'{snd_settle_mode = SndSettleMode,
+ rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_SECOND})
+ when SndSettleMode =/= ?V_1_0_SENDER_SETTLE_MODE_SETTLED ->
+ protocol_error(?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+ "rcv-settle-mode second not supported", []);
+validate_attach(#'v1_0.attach'{}) ->
+ ok.
+
+maybe_init_publish_id(false, Session) ->
+ Session;
+maybe_init_publish_id(true, Session = #session{next_publish_id = Id}) ->
+ Session#session{next_publish_id = erlang:max(1, Id)}.
+
+record_delivery(DeliveryId, Settled,
+ Session = #session{next_publish_id = Id,
+ incoming_unsettled_map = Unsettled}) ->
+ Id1 = case Id of
+ 0 -> 0;
+ _ -> Id + 1 % this ought to be a serial number in the broker, but isn't
+ end,
+ Unsettled1 = case Settled of
+ true ->
+ Unsettled;
+ false ->
+ gb_trees:insert(Id,
+ #incoming_delivery{
+ delivery_id = DeliveryId },
+ Unsettled)
+ end,
+ Session#session{
+ next_publish_id = Id1,
+ incoming_unsettled_map = Unsettled1}.
+
+incr_incoming_id(Session = #session{ next_incoming_id = NextIn,
+ incoming_window = InWindow,
+ incoming_window_max = InWindowMax,
+ remote_outgoing_window = RemoteOut }) ->
+ NewOutWindow = RemoteOut - 1,
+ InWindow1 = InWindow - 1,
+ NewNextIn = serial_add(NextIn, 1),
+ %% If we've reached halfway, open the window
+ {Flows, NewInWindow} =
+ if InWindow1 =< (InWindowMax div 2) ->
+ {[#'v1_0.flow'{}], InWindowMax};
+ true ->
+ {[], InWindow1}
+ end,
+ {Flows, Session#session{ next_incoming_id = NewNextIn,
+ incoming_window = NewInWindow,
+ remote_outgoing_window = NewOutWindow}}.
+
+next_delivery_id(#session{next_delivery_id = Num}) -> Num.
+
+transfers_left(#session{remote_incoming_window = RemoteWindow,
+ outgoing_window = LocalWindow}) ->
+ {LocalWindow, RemoteWindow}.
+
+record_outgoing(DeliveryTag, SendSettled, DefaultOutcome,
+ Session = #session{next_delivery_id = DeliveryId,
+ outgoing_unsettled_map = Unsettled}) ->
+ Unsettled1 = case SendSettled of
+ true ->
+ Unsettled;
+ false ->
+ gb_trees:insert(DeliveryId,
+ #outgoing_delivery{
+ delivery_tag = DeliveryTag,
+ expected_outcome = DefaultOutcome },
+ Unsettled)
+ end,
+ Session#session{outgoing_unsettled_map = Unsettled1,
+ next_delivery_id = serial_add(DeliveryId, 1)}.
+
+record_transfers(NumTransfers,
+ Session = #session{ remote_incoming_window = RemoteInWindow,
+ outgoing_window = OutWindow,
+ next_outgoing_id = NextOutId }) ->
+ Session#session{ remote_incoming_window = RemoteInWindow - NumTransfers,
+ outgoing_window = OutWindow - NumTransfers,
+ next_outgoing_id = serial_add(NextOutId, NumTransfers) }.
+
+%% Make sure we have "room" in our outgoing window by bumping the
+%% window if necessary. TODO this *could* be based on how much
+%% notional "room" there is in outgoing_unsettled.
+bump_outgoing_window(Session = #session{ outgoing_window_max = OutMax }) ->
+ {#'v1_0.flow'{}, Session#session{ outgoing_window = OutMax }}.
+
+%% We've been told that the fate of a delivery has been determined.
+%% Generally if the other side has not settled it, we will do so. If
+%% the other side /has/ settled it, we don't need to reply -- it's
+%% already forgotten its state for the delivery anyway.
+settle(Disp = #'v1_0.disposition'{first = First0,
+ last = Last0,
+ state = _Outcome,
+ settled = Settled},
+ Session = #session{outgoing_unsettled_map = Unsettled},
+ UpstreamAckFun) ->
+ {uint, First} = First0,
+ %% Last may be omitted, in which case it's the same as first
+ Last = case Last0 of
+ {uint, L} -> L;
+ undefined -> First
+ end,
+ %% The other party may be talking about something we've already
+ %% forgotten; this isn't a crime, we can just ignore it.
+ case gb_trees:is_empty(Unsettled) of
+ true ->
+ {none, Session};
+ false ->
+ {LWM, _} = gb_trees:smallest(Unsettled),
+ {HWM, _} = gb_trees:largest(Unsettled),
+ if Last < LWM ->
+ {none, Session};
+ %% TODO this should probably be an error, rather than ignored.
+ First > HWM ->
+ {none, Session};
+ true ->
+ Unsettled1 =
+ lists:foldl(
+ fun (Delivery, Map) ->
+ case gb_trees:lookup(Delivery, Map) of
+ none ->
+ Map;
+ {value, Entry} ->
+ #outgoing_delivery{delivery_tag = DeliveryTag } = Entry,
+ ?DEBUG("Settling ~p with ~p~n", [Delivery, _Outcome]),
+ UpstreamAckFun(DeliveryTag),
+ gb_trees:delete(Delivery, Map)
+ end
+ end,
+ Unsettled, lists:seq(erlang:max(LWM, First),
+ erlang:min(HWM, Last))),
+ {case Settled of
+ true -> none;
+ false -> Disp#'v1_0.disposition'{ settled = true,
+ role = ?SEND_ROLE }
+ end,
+ Session#session{outgoing_unsettled_map = Unsettled1}}
+ end
+ end.
+
+flow_fields(Frames, Session) when is_list(Frames) ->
+ [flow_fields(F, Session) || F <- Frames];
+
+flow_fields(Flow = #'v1_0.flow'{},
+ #session{next_outgoing_id = NextOut,
+ next_incoming_id = NextIn,
+ outgoing_window = OutWindow,
+ incoming_window = InWindow}) ->
+ Flow#'v1_0.flow'{
+ next_outgoing_id = {uint, NextOut},
+ outgoing_window = {uint, OutWindow},
+ next_incoming_id = {uint, NextIn},
+ incoming_window = {uint, InWindow}};
+
+flow_fields(Frame, _Session) ->
+ Frame.
+
+channel(#session{channel_num = Channel}) -> Channel.
+
+%% We should already know the next outgoing transfer sequence number,
+%% because it's one more than the last transfer we saw; and, we don't
+%% need to know the next incoming transfer sequence number (although
+%% we might use it to detect congestion -- e.g., if it's lagging far
+%% behind our outgoing sequence number). We probably care about the
+%% outgoing window, since we want to keep it open by sending back
+%% settlements, but there's not much we can do to hurry things along.
+%%
+%% We do care about the incoming window, because we must not send
+%% beyond it. This may cause us problems, even in normal operation,
+%% since we want our unsettled transfers to be exactly those that are
+%% held as unacked by the backing channel; however, the far side may
+%% close the window while we still have messages pending transfer, and
+%% indeed, an individual message may take more than one 'slot'.
+%%
+%% Note that this isn't a race so far as AMQP 1.0 is concerned; it's
+%% only because AMQP 0-9-1 defines QoS in terms of the total number of
+%% unacked messages, whereas 1.0 has an explicit window.
+flow(#'v1_0.flow'{next_incoming_id = FlowNextIn0,
+ incoming_window = {uint, FlowInWindow},
+ next_outgoing_id = {uint, FlowNextOut},
+ outgoing_window = {uint, FlowOutWindow}},
+ Session = #session{next_incoming_id = LocalNextIn,
+ next_outgoing_id = LocalNextOut}) ->
+ %% The far side may not have our begin{} with our next-transfer-id
+ FlowNextIn = case FlowNextIn0 of
+ {uint, Id} -> Id;
+ undefined -> LocalNextOut
+ end,
+ case serial_compare(FlowNextOut, LocalNextIn) of
+ equal ->
+ case serial_compare(FlowNextIn, LocalNextOut) of
+ greater ->
+ protocol_error(?V_1_0_SESSION_ERROR_WINDOW_VIOLATION,
+ "Remote incoming id (~p) leads "
+ "local outgoing id (~p)",
+ [FlowNextIn, LocalNextOut]);
+ equal ->
+ Session#session{
+ remote_outgoing_window = FlowOutWindow,
+ remote_incoming_window = FlowInWindow};
+ less ->
+ Session#session{
+ remote_outgoing_window = FlowOutWindow,
+ remote_incoming_window =
+ serial_diff(serial_add(FlowNextIn, FlowInWindow),
+ LocalNextOut)}
+ end;
+ _ ->
+ case application:get_env(rabbitmq_amqp1_0, protocol_strict_mode) of
+ {ok, false} ->
+ Session#session{next_incoming_id = FlowNextOut};
+ {ok, true} ->
+ protocol_error(?V_1_0_SESSION_ERROR_WINDOW_VIOLATION,
+ "Remote outgoing id (~p) not equal to "
+ "local incoming id (~p)",
+ [FlowNextOut, LocalNextIn])
+ end
+ end.
+
+%% An acknowledgement from the queue, which we'll get if we are
+%% using confirms.
+ack(#'basic.ack'{delivery_tag = DTag, multiple = Multiple},
+ Session = #session{incoming_unsettled_map = Unsettled}) ->
+ {DeliveryIds, Unsettled1} =
+ case Multiple of
+ true -> acknowledgement_range(DTag, Unsettled);
+ false -> case gb_trees:lookup(DTag, Unsettled) of
+ {value, #incoming_delivery{ delivery_id = Id }} ->
+ {[Id], gb_trees:delete(DTag, Unsettled)};
+ none ->
+ {[], Unsettled}
+ end
+ end,
+ Disposition = case DeliveryIds of
+ [] -> [];
+ _ -> [acknowledgement(
+ DeliveryIds,
+ #'v1_0.disposition'{role = ?RECV_ROLE})]
+ end,
+ {Disposition,
+ Session#session{incoming_unsettled_map = Unsettled1}}.
+
+acknowledgement_range(DTag, Unsettled) ->
+ acknowledgement_range(DTag, Unsettled, []).
+
+acknowledgement_range(DTag, Unsettled, Acc) ->
+ case gb_trees:is_empty(Unsettled) of
+ true ->
+ {lists:reverse(Acc), Unsettled};
+ false ->
+ {DTag1, #incoming_delivery{ delivery_id = Id}} =
+ gb_trees:smallest(Unsettled),
+ case DTag1 =< DTag of
+ true ->
+ {_K, _V, Unsettled1} = gb_trees:take_smallest(Unsettled),
+ acknowledgement_range(DTag, Unsettled1,
+ [Id|Acc]);
+ false ->
+ {lists:reverse(Acc), Unsettled}
+ end
+ end.
+
+acknowledgement(DeliveryIds, Disposition) ->
+ Disposition#'v1_0.disposition'{ first = {uint, hd(DeliveryIds)},
+ last = {uint, lists:last(DeliveryIds)},
+ settled = true,
+ state = #'v1_0.accepted'{} }.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl
new file mode 100644
index 0000000000..5fcb720d3d
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_process.erl
@@ -0,0 +1,419 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session_process).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(gen_server2).
+
+-export([init/1, terminate/2, code_change/3,
+ handle_call/3, handle_cast/2, handle_info/2]).
+
+-export([start_link/1]).
+-export([info/1]).
+
+-record(state, {backing_connection, backing_channel, frame_max,
+ reader_pid, writer_pid, buffer, session}).
+
+-record(pending, {delivery_tag, frames, link_handle }).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [protocol_error/3]).
+-import(rabbit_amqp1_0_link_util, [ctag_to_handle/1]).
+
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, []).
+
+info(Pid) ->
+ gen_server2:call(Pid, info, infinity).
+%% ---------
+
+init({Channel, ReaderPid, WriterPid, #user{username = Username}, VHost,
+ FrameMax, AdapterInfo, _Collector}) ->
+ process_flag(trap_exit, true),
+ case amqp_connection:start(
+ #amqp_params_direct{username = Username,
+ virtual_host = VHost,
+ adapter_info = AdapterInfo}) of
+ {ok, Conn} ->
+ case amqp_connection:open_channel(Conn) of
+ {ok, Ch} ->
+ monitor(process, Ch),
+ {ok, #state{backing_connection = Conn,
+ backing_channel = Ch,
+ reader_pid = ReaderPid,
+ writer_pid = WriterPid,
+ frame_max = FrameMax,
+ buffer = queue:new(),
+ session = rabbit_amqp1_0_session:init(Channel)
+ }};
+ {error, Reason} ->
+ rabbit_log:warning("Closing session for connection ~p:~n~p~n",
+ [ReaderPid, Reason]),
+ {stop, Reason}
+ end;
+ {error, Reason} ->
+ rabbit_log:warning("Closing session for connection ~p:~n~p~n",
+ [ReaderPid, Reason]),
+ {stop, Reason}
+ end.
+
+terminate(_Reason, _State = #state{backing_connection = Conn}) ->
+ rabbit_misc:with_exit_handler(fun () -> ok end,
+ fun () -> amqp_connection:close(Conn) end).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_call(info, _From, #state{reader_pid = ReaderPid,
+ backing_connection = Conn} = State) ->
+ Info = [{reader, ReaderPid}, {connection, Conn}],
+ {reply, Info, State};
+handle_call(Msg, _From, State) ->
+ {reply, {error, not_understood, Msg}, State}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ %% Handled above
+ {noreply, State};
+
+handle_info({#'basic.deliver'{ consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag } = Deliver, Msg},
+ State = #state{frame_max = FrameMax,
+ buffer = Buffer,
+ session = Session}) ->
+ Handle = ctag_to_handle(ConsumerTag),
+ case get({out, Handle}) of
+ undefined ->
+ %% TODO handle missing link -- why does the queue think it's there?
+ rabbit_log:warning("Delivery to non-existent consumer ~p",
+ [ConsumerTag]),
+ {noreply, State};
+ Link ->
+ {ok, Frames, Session1} =
+ rabbit_amqp1_0_outgoing_link:delivery(
+ Deliver, Msg, FrameMax, Handle, Session, Link),
+ Pending = #pending{ delivery_tag = DeliveryTag,
+ frames = Frames,
+ link_handle = Handle },
+ Buffer1 = queue:in(Pending, Buffer),
+ {noreply, run_buffer(
+ state(Session1, State#state{ buffer = Buffer1 }))}
+ end;
+
+%% A message from the queue saying that there are no more messages
+handle_info(#'basic.credit_drained'{consumer_tag = CTag} = CreditDrained,
+ State = #state{writer_pid = WriterPid,
+ session = Session}) ->
+ Handle = ctag_to_handle(CTag),
+ Link = get({out, Handle}),
+ {Flow0, Link1} = rabbit_amqp1_0_outgoing_link:credit_drained(
+ CreditDrained, Handle, Link),
+ Flow = rabbit_amqp1_0_session:flow_fields(Flow0, Session),
+ rabbit_amqp1_0_writer:send_command(WriterPid, Flow),
+ put({out, Handle}, Link1),
+ {noreply, State};
+
+handle_info(#'basic.ack'{} = Ack, State = #state{writer_pid = WriterPid,
+ session = Session}) ->
+ {Reply, Session1} = rabbit_amqp1_0_session:ack(Ack, Session),
+ [rabbit_amqp1_0_writer:send_command(WriterPid, F) ||
+ F <- rabbit_amqp1_0_session:flow_fields(Reply, Session)],
+ {noreply, state(Session1, State)};
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, State};
+
+%% TODO these pretty much copied wholesale from rabbit_channel
+handle_info({'EXIT', WriterPid, Reason = {writer, send_failed, _Error}},
+ State = #state{writer_pid = WriterPid}) ->
+ State#state.reader_pid !
+ {channel_exit, rabbit_amqp1_0_session:channel(session(State)), Reason},
+ {stop, normal, State};
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State};
+handle_info({'DOWN', _MRef, process, Ch, Reason},
+ #state{reader_pid = ReaderPid,
+ writer_pid = Sock,
+ backing_channel = Ch} = State) ->
+ Error =
+ case Reason of
+ {shutdown, {server_initiated_close, Code, Msg}} ->
+ #'v1_0.error'{condition = rabbit_amqp1_0_channel:convert_code(Code),
+ description = {utf8, Msg}};
+ _ ->
+ #'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ description = {utf8,
+ list_to_binary(
+ lists:flatten(
+ io_lib:format("~w", [Reason])))}}
+ end,
+ End = #'v1_0.end'{ error = Error },
+ rabbit_log:warning("Closing session for connection ~p:~n~p~n",
+ [ReaderPid, Reason]),
+ ok = rabbit_amqp1_0_writer:send_command_sync(Sock, End),
+ {stop, normal, State};
+handle_info({'DOWN', _MRef, process, _QPid, _Reason}, State) ->
+ %% TODO do we care any more since we're using direct client?
+ {noreply, State}. % TODO rabbit_channel uses queue_blocked?
+
+handle_cast({frame, Frame, FlowPid},
+ State = #state{ reader_pid = ReaderPid,
+ writer_pid = Sock }) ->
+ credit_flow:ack(FlowPid),
+ try handle_control(Frame, State) of
+ {reply, Replies, NewState} when is_list(Replies) ->
+ lists:foreach(fun (Reply) ->
+ rabbit_amqp1_0_writer:send_command(Sock, Reply)
+ end, Replies),
+ noreply(NewState);
+ {reply, Reply, NewState} ->
+ rabbit_amqp1_0_writer:send_command(Sock, Reply),
+ noreply(NewState);
+ {noreply, NewState} ->
+ noreply(NewState);
+ stop ->
+ {stop, normal, State}
+ catch exit:Reason = #'v1_0.error'{} ->
+ %% TODO shut down nicely like rabbit_channel
+ End = #'v1_0.end'{ error = Reason },
+ rabbit_log:warning("Closing session for connection ~p:~n~p~n",
+ [ReaderPid, Reason]),
+ ok = rabbit_amqp1_0_writer:send_command_sync(Sock, End),
+ {stop, normal, State};
+ exit:normal ->
+ {stop, normal, State};
+ _:Reason:Stacktrace ->
+ {stop, {Reason, Stacktrace}, State}
+ end.
+
+%% TODO rabbit_channel returns {noreply, State, hibernate}, but that
+%% appears to break things here (it stops the session responding to
+%% frames).
+noreply(State) ->
+ {noreply, State}.
+
+%% ------
+
+handle_control(#'v1_0.begin'{} = Begin,
+ State = #state{backing_channel = Ch,
+ session = Session}) ->
+ {ok, Reply, Session1, Prefetch} =
+ rabbit_amqp1_0_session:begin_(Begin, Session),
+ %% Attempt to limit the number of "at risk" messages we can have.
+ rabbit_amqp1_0_channel:cast(Ch, #'basic.qos'{prefetch_count = Prefetch}),
+ reply(Reply, state(Session1, State));
+
+handle_control(#'v1_0.attach'{handle = Handle,
+ role = ?SEND_ROLE} = Attach,
+ State = #state{backing_channel = BCh,
+ backing_connection = Conn}) ->
+ ok = rabbit_amqp1_0_session:validate_attach(Attach),
+ {ok, Reply, Link, Confirm} =
+ with_disposable_channel(
+ Conn, fun (DCh) ->
+ rabbit_amqp1_0_incoming_link:attach(Attach, BCh, DCh)
+ end),
+ put({in, Handle}, Link),
+ reply(Reply, state(rabbit_amqp1_0_session:maybe_init_publish_id(
+ Confirm, session(State)), State));
+
+handle_control(#'v1_0.attach'{handle = Handle,
+ role = ?RECV_ROLE} = Attach,
+ State = #state{backing_channel = BCh,
+ backing_connection = Conn}) ->
+ ok = rabbit_amqp1_0_session:validate_attach(Attach),
+ {ok, Reply, Link} =
+ with_disposable_channel(
+ Conn, fun (DCh) ->
+ rabbit_amqp1_0_outgoing_link:attach(Attach, BCh, DCh)
+ end),
+ put({out, Handle}, Link),
+ reply(Reply, State);
+
+handle_control({Txfr = #'v1_0.transfer'{handle = Handle},
+ MsgPart},
+ State = #state{backing_channel = BCh,
+ session = Session}) ->
+ case get({in, Handle}) of
+ undefined ->
+ protocol_error(?V_1_0_AMQP_ERROR_ILLEGAL_STATE,
+ "Unknown link handle ~p", [Handle]);
+ Link ->
+ {Flows, Session1} = rabbit_amqp1_0_session:incr_incoming_id(Session),
+ case rabbit_amqp1_0_incoming_link:transfer(
+ Txfr, MsgPart, Link, BCh) of
+ {message, Reply, Link1, DeliveryId, Settled} ->
+ put({in, Handle}, Link1),
+ Session2 = rabbit_amqp1_0_session:record_delivery(
+ DeliveryId, Settled, Session1),
+ reply(Reply ++ Flows, state(Session2, State));
+ {ok, Link1} ->
+ put({in, Handle}, Link1),
+ reply(Flows, state(Session1, State))
+ end
+ end;
+
+%% Disposition: multiple deliveries may be settled at a time.
+%% TODO: should we send a flow after this, to indicate the state
+%% of the session window?
+handle_control(#'v1_0.disposition'{state = Outcome,
+ role = ?RECV_ROLE} = Disp,
+ State = #state{backing_channel = Ch}) ->
+ AckFun =
+ fun (DeliveryTag) ->
+ ok = rabbit_amqp1_0_channel:call(
+ Ch, case Outcome of
+ #'v1_0.accepted'{} ->
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = false};
+ #'v1_0.rejected'{} ->
+ #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = false};
+ #'v1_0.released'{} ->
+ #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = true};
+ _ ->
+ protocol_error(
+ ?V_1_0_AMQP_ERROR_INVALID_FIELD,
+ "Unrecognised state: ~p~n"
+ "Disposition was: ~p~n", [Outcome, Disp])
+ end)
+ end,
+ case rabbit_amqp1_0_session:settle(Disp, session(State), AckFun) of
+ {none, Session1} -> {noreply, state(Session1, State)};
+ {Reply, Session1} -> {reply, Reply, state(Session1, State)}
+ end;
+
+handle_control(#'v1_0.detach'{ handle = Handle }, State) ->
+ %% TODO keep the state around depending on the lifetime
+ %% TODO outgoing links?
+ erase({in, Handle}),
+ {reply, #'v1_0.detach'{ handle = Handle }, State};
+
+handle_control(#'v1_0.end'{}, _State = #state{ writer_pid = Sock }) ->
+ ok = rabbit_amqp1_0_writer:send_command(Sock, #'v1_0.end'{}),
+ stop;
+
+%% Flow control. These frames come with two pieces of information:
+%% the session window, and optionally, credit for a particular link.
+%% We'll deal with each of them separately.
+handle_control(Flow = #'v1_0.flow'{},
+ State = #state{backing_channel = BCh,
+ session = Session}) ->
+ State1 = state(rabbit_amqp1_0_session:flow(Flow, Session), State),
+ State2 = run_buffer(State1),
+ case Flow#'v1_0.flow'.handle of
+ undefined ->
+ {noreply, State2};
+ Handle ->
+ case get({in, Handle}) of
+ undefined ->
+ case get({out, Handle}) of
+ undefined ->
+ rabbit_log:warning("Flow for unknown link handle ~p", [Flow]),
+ protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD,
+ "Unattached handle: ~p", [Handle]);
+ Out ->
+ {ok, Reply} = rabbit_amqp1_0_outgoing_link:flow(
+ Out, Flow, BCh),
+ reply(Reply, State2)
+ end;
+ _In ->
+ %% We're being told about available messages at
+ %% the sender. Yawn.
+ %% TODO at least check transfer-count?
+ {noreply, State2}
+ end
+ end;
+
+handle_control(Frame, _State) ->
+ protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR,
+ "Unexpected frame ~p",
+ [amqp10_framing:pprint(Frame)]).
+
+run_buffer(State = #state{ writer_pid = WriterPid,
+ session = Session,
+ backing_channel = BCh,
+ buffer = Buffer }) ->
+ {Session1, Buffer1} =
+ run_buffer1(WriterPid, BCh, Session, Buffer),
+ State#state{ buffer = Buffer1, session = Session1 }.
+
+run_buffer1(WriterPid, BCh, Session, Buffer) ->
+ case rabbit_amqp1_0_session:transfers_left(Session) of
+ {LocalSpace, RemoteSpace} when RemoteSpace > 0 andalso LocalSpace > 0 ->
+ Space = erlang:min(LocalSpace, RemoteSpace),
+ case queue:out(Buffer) of
+ {empty, Buffer} ->
+ {Session, Buffer};
+ {{value, #pending{ delivery_tag = DeliveryTag,
+ frames = Frames,
+ link_handle = Handle } = Pending},
+ BufferTail} ->
+ Link = get({out, Handle}),
+ case send_frames(WriterPid, Frames, Space) of
+ {all, SpaceLeft} ->
+ NewLink =
+ rabbit_amqp1_0_outgoing_link:transferred(
+ DeliveryTag, BCh, Link),
+ put({out, Handle}, NewLink),
+ Session1 = rabbit_amqp1_0_session:record_transfers(
+ Space - SpaceLeft, Session),
+ run_buffer1(WriterPid, BCh, Session1, BufferTail);
+ {some, Rest} ->
+ Session1 = rabbit_amqp1_0_session:record_transfers(
+ Space, Session),
+ Buffer1 = queue:in_r(Pending#pending{ frames = Rest },
+ BufferTail),
+ run_buffer1(WriterPid, BCh, Session1, Buffer1)
+ end
+ end;
+ {_, RemoteSpace} when RemoteSpace > 0 ->
+ case rabbit_amqp1_0_session:bump_outgoing_window(Session) of
+ {Flow = #'v1_0.flow'{}, Session1} ->
+ rabbit_amqp1_0_writer:send_command(
+ WriterPid,
+ rabbit_amqp1_0_session:flow_fields(Flow, Session1)),
+ run_buffer1(WriterPid, BCh, Session1, Buffer);
+ {none, Session1} ->
+ {Session1, Buffer}
+ end;
+ _ ->
+ {Session, Buffer}
+ end.
+
+send_frames(_WriterPid, [], Left) ->
+ {all, Left};
+send_frames(_WriterPid, Rest, 0) ->
+ {some, Rest};
+send_frames(WriterPid, [[T, C] | Rest], Left) ->
+ rabbit_amqp1_0_writer:send_command(WriterPid, T, C),
+ send_frames(WriterPid, Rest, Left - 1).
+
+%% ------
+
+reply([], State) ->
+ {noreply, State};
+reply(Reply, State) ->
+ {reply, rabbit_amqp1_0_session:flow_fields(Reply, session(State)), State}.
+
+session(#state{session = Session}) -> Session.
+state(Session, State) -> State#state{session = Session}.
+
+with_disposable_channel(Conn, Fun) ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ Fun(Ch)
+ after
+ catch amqp_channel:close(Ch)
+ end.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl
new file mode 100644
index 0000000000..5f2462e8a0
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl
@@ -0,0 +1,62 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/1]).
+
+-export([init/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([start_link_args/0]).
+
+-type start_link_args() ::
+ {rabbit_types:protocol(), rabbit_net:socket(),
+ rabbit_channel:channel_number(), non_neg_integer(), pid(),
+ rabbit_access_control:username(), rabbit_types:vhost(), pid()}.
+
+-spec start_link(start_link_args()) -> {'ok', pid(), pid()}.
+
+%%----------------------------------------------------------------------------
+start_link({amqp10_framing, Sock, Channel, FrameMax, ReaderPid,
+ Username, VHost, Collector, ProxySocket}) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, WriterPid} =
+ supervisor2:start_child(
+ SupPid,
+ {writer, {rabbit_amqp1_0_writer, start_link,
+ [Sock, Channel, FrameMax, amqp10_framing,
+ ReaderPid]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_amqp1_0_writer]}),
+ SocketForAdapterInfo = case ProxySocket of
+ undefined -> Sock;
+ _ -> ProxySocket
+ end,
+ case supervisor2:start_child(
+ SupPid,
+ {channel, {rabbit_amqp1_0_session_process, start_link,
+ [{Channel, ReaderPid, WriterPid, Username, VHost, FrameMax,
+ adapter_info(SocketForAdapterInfo), Collector}]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_amqp1_0_session_process]}) of
+ {ok, ChannelPid} ->
+ {ok, SupPid, ChannelPid};
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
+
+adapter_info(Sock) ->
+ amqp_connection:socket_adapter_info(Sock, {'AMQP', "1.0"}).
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl
new file mode 100644
index 0000000000..e427064af0
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_session_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/0, start_session/2]).
+
+-export([init/1]).
+
+%% It would be much nicer if rabbit_channel_sup_sup was parameterised
+%% on the module.
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_session(pid(), rabbit_amqp1_0_session_sup:start_link_args()) ->
+ {'ok', pid(), pid()}.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+start_session(Pid, Args) ->
+ supervisor2:start_child(Pid, [Args]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{session_sup, {rabbit_amqp1_0_session_sup, start_link, []},
+ temporary, infinity, supervisor, [rabbit_amqp1_0_session_sup]}]}}.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl
new file mode 100644
index 0000000000..9d9ad5044d
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl
@@ -0,0 +1,72 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_util).
+
+-include("rabbit_amqp1_0.hrl").
+
+-export([protocol_error/3]).
+-export([serial_add/2, serial_compare/2, serial_diff/2]).
+
+-export_type([serial_number/0]).
+-type serial_number() :: non_neg_integer().
+-type serial_compare_result() :: 'equal' | 'less' | 'greater'.
+
+-spec serial_add(serial_number(), non_neg_integer()) ->
+ serial_number().
+-spec serial_compare(serial_number(), serial_number()) ->
+ serial_compare_result().
+-spec serial_diff(serial_number(), serial_number()) ->
+ integer().
+
+protocol_error(Condition, Msg, Args) ->
+ exit(#'v1_0.error'{
+ condition = Condition,
+ description = {utf8, list_to_binary(
+ lists:flatten(io_lib:format(Msg, Args)))}
+ }).
+
+%% Serial arithmetic for unsigned ints.
+%% http://www.faqs.org/rfcs/rfc1982.html
+%% SERIAL_BITS = 32
+
+%% 2 ^ SERIAL_BITS
+-define(SERIAL_MAX, 16#100000000).
+%% 2 ^ (SERIAL_BITS - 1) - 1
+-define(SERIAL_MAX_ADDEND, 16#7fffffff).
+
+serial_add(S, N) when N =< ?SERIAL_MAX_ADDEND ->
+ (S + N) rem ?SERIAL_MAX;
+serial_add(S, N) ->
+ exit({out_of_bound_serial_addition, S, N}).
+
+serial_compare(A, B) ->
+ if A =:= B ->
+ equal;
+ (A < B andalso B - A < ?SERIAL_MAX_ADDEND) orelse
+ (A > B andalso A - B > ?SERIAL_MAX_ADDEND) ->
+ less;
+ (A < B andalso B - A > ?SERIAL_MAX_ADDEND) orelse
+ (A > B andalso B - A < ?SERIAL_MAX_ADDEND) ->
+ greater;
+ true -> exit({indeterminate_serial_comparison, A, B})
+ end.
+
+-define(SERIAL_DIFF_BOUND, 16#80000000).
+
+serial_diff(A, B) ->
+ Diff = A - B,
+ if Diff > (?SERIAL_DIFF_BOUND) ->
+ %% B is actually greater than A
+ - (?SERIAL_MAX - Diff);
+ Diff < - (?SERIAL_DIFF_BOUND) ->
+ ?SERIAL_MAX + Diff;
+ Diff < ?SERIAL_DIFF_BOUND andalso Diff > -?SERIAL_DIFF_BOUND ->
+ Diff;
+ true ->
+ exit({indeterminate_serial_diff, A, B})
+ end.
diff --git a/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl
new file mode 100644
index 0000000000..2921f929db
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl
@@ -0,0 +1,292 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp1_0_writer).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-export([start/5, start_link/5, start/6, start_link/6]).
+-export([send_command/2, send_command/3,
+ send_command_sync/2, send_command_sync/3,
+ send_command_and_notify/4, send_command_and_notify/5]).
+-export([internal_send_command/4, internal_send_command/6]).
+
+%% internal
+-export([mainloop/1, mainloop1/1]).
+
+-record(wstate, {sock, channel, frame_max, protocol, reader,
+ stats_timer, pending}).
+
+-define(HIBERNATE_AFTER, 5000).
+-define(AMQP_SASL_FRAME_TYPE, 1).
+
+%%---------------------------------------------------------------------------
+
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid())
+ -> rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid())
+ -> rabbit_types:ok(pid()).
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
+ -> rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
+ -> rabbit_types:ok(pid()).
+-spec send_command
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok'.
+-spec send_command_sync
+ (pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_sync
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
+ -> 'ok'.
+-spec send_command_and_notify
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
+ -> 'ok'.
+-spec send_command_and_notify
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:content())
+ -> 'ok'.
+-spec internal_send_command
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:protocol())
+ -> 'ok'.
+-spec internal_send_command
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol())
+ -> 'ok'.
+
+%%---------------------------------------------------------------------------
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, false).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, false).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats),
+ {ok, proc_lib:spawn(?MODULE, mainloop, [State])}.
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats),
+ {ok, proc_lib:spawn_link(?MODULE, mainloop, [State])}.
+
+initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
+ (case ReaderWantsStats of
+ true -> fun rabbit_event:init_stats_timer/2;
+ false -> fun rabbit_event:init_disabled_stats_timer/2
+ end)(#wstate{sock = Sock,
+ channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ reader = ReaderPid,
+ pending = []},
+ #wstate.stats_timer).
+
+mainloop(State) ->
+ try
+ mainloop1(State)
+ catch
+ exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
+ ReaderPid ! {channel_exit, Channel, Error}
+ end,
+ done.
+
+mainloop1(State = #wstate{pending = []}) ->
+ receive
+ Message -> ?MODULE:mainloop1(handle_message(Message, State))
+ after ?HIBERNATE_AFTER ->
+ erlang:hibernate(?MODULE, mainloop, [State])
+ end;
+mainloop1(State) ->
+ receive
+ Message -> ?MODULE:mainloop1(handle_message(Message, State))
+ after 0 ->
+ ?MODULE:mainloop1(flush(State))
+ end.
+
+handle_message({send_command, MethodRecord}, State) ->
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command, MethodRecord, Content}, State) ->
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
+ State1 = flush(internal_send_command_async(MethodRecord, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
+ State) ->
+ State1 = flush(internal_send_command_async(MethodRecord, Content, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
+ State1 = internal_send_command_async(MethodRecord, State),
+ rabbit_amqqueue:notify_sent(QPid, ChPid),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
+ State) ->
+ State1 = internal_send_command_async(MethodRecord, Content, State),
+ rabbit_amqqueue:notify_sent(QPid, ChPid),
+ State1;
+handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue:notify_sent_queue_down(QPid),
+ State;
+handle_message({inet_reply, _, ok}, State) ->
+ rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
+handle_message({inet_reply, _, Status}, _State) ->
+ exit({writer, send_failed, Status});
+handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
+ ReaderPid ! ensure_stats,
+ rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
+handle_message(Message, _State) ->
+ exit({writer, message_not_understood, Message}).
+
+%%---------------------------------------------------------------------------
+
+send_command(W, MethodRecord) ->
+ W ! {send_command, MethodRecord},
+ ok.
+
+send_command(W, MethodRecord, Content) ->
+ W ! {send_command, MethodRecord, Content},
+ ok.
+
+send_command_sync(W, MethodRecord) ->
+ call(W, {send_command_sync, MethodRecord}).
+
+send_command_sync(W, MethodRecord, Content) ->
+ call(W, {send_command_sync, MethodRecord, Content}).
+
+send_command_and_notify(W, Q, ChPid, MethodRecord) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord},
+ ok.
+
+send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
+ ok.
+
+%%---------------------------------------------------------------------------
+
+call(Pid, Msg) ->
+ {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
+ Res.
+
+%%---------------------------------------------------------------------------
+
+%% Begin 1-0
+
+assemble_frame(Channel, Performative, amqp10_framing) ->
+ ?DEBUG("Channel ~p <-~n~p~n~n",
+ [Channel, amqp10_framing:pprint(Performative)]),
+ PerfBin = amqp10_framing:encode_bin(Performative),
+ amqp10_binary_generator:build_frame(Channel, PerfBin);
+
+assemble_frame(Channel, Performative, rabbit_amqp1_0_sasl) ->
+ ?DEBUG("Channel ~p <-~n~p~n~n",
+ [Channel, amqp10_framing:pprint(Performative)]),
+ PerfBin = amqp10_framing:encode_bin(Performative),
+ amqp10_binary_generator:build_frame(Channel,
+ ?AMQP_SASL_FRAME_TYPE, PerfBin).
+
+%% Note: a transfer record can be followed by a number of other
+%% records to make a complete frame but unlike 0-9-1 we may have many
+%% content records. However, that's already been handled for us, we're
+%% just sending a chunk, so from this perspective it's just a binary.
+
+assemble_frames(Channel, Performative, Content, _FrameMax,
+ amqp10_framing) ->
+ ?DEBUG("Channel ~p <-~n~p~n followed by ~p bytes of content~n~n",
+ [Channel, amqp10_framing:pprint(Performative),
+ iolist_size(Content)]),
+ PerfBin = amqp10_framing:encode_bin(Performative),
+ amqp10_binary_generator:build_frame(Channel, [PerfBin, Content]).
+
+%% End 1-0
+
+tcp_send(Sock, Data) ->
+ rabbit_misc:throw_on_error(inet_error,
+ fun () -> rabbit_net:send(Sock, Data) end).
+
+internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
+ ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
+
+internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
+ Protocol) ->
+ ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
+ (_Frame, Other) -> Other
+ end, ok, assemble_frames(Channel, MethodRecord,
+ Content, FrameMax, Protocol)).
+
+internal_send_command_async(MethodRecord,
+ State = #wstate{channel = Channel,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frame = assemble_frame(Channel, MethodRecord, Protocol),
+ maybe_flush(State#wstate{pending = [Frame | Pending]}).
+
+internal_send_command_async(MethodRecord, Content,
+ State = #wstate{channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
+ Protocol),
+ maybe_flush(State#wstate{pending = [Frames | Pending]}).
+
+%% This magic number is the tcp-over-ethernet MSS (1460) minus the
+%% minimum size of a AMQP basic.deliver method frame (24) plus basic
+%% content header (22). The idea is that we want to flush just before
+%% exceeding the MSS.
+-define(FLUSH_THRESHOLD, 1414).
+
+maybe_flush(State = #wstate{pending = Pending}) ->
+ case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
+ true -> flush(State);
+ false -> State
+ end.
+
+flush(State = #wstate{pending = []}) ->
+ State;
+flush(State = #wstate{sock = Sock, pending = Pending}) ->
+ ok = port_cmd(Sock, lists:reverse(Pending)),
+ State#wstate{pending = []}.
+
+%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
+%% Status} to obtain the result. That is bad when it is called from
+%% the writer since it requires scanning of the writers possibly quite
+%% large message queue.
+%%
+%% So instead we lift the code from prim_inet:send/2, which is what
+%% gen_tcp:send/2 calls, do the first half here and then just process
+%% the result code in handle_message/2 as and when it arrives.
+%%
+%% This means we may end up happily sending data down a closed/broken
+%% socket, but that's ok since a) data in the buffers will be lost in
+%% any case (so qualitatively we are no worse off than if we used
+%% gen_tcp:send/2), and b) we do detect the changed socket status
+%% eventually, i.e. when we get round to handling the result code.
+%%
+%% Also note that the port has bounded buffers and port_command blocks
+%% when these are full. So the fact that we process the result
+%% asynchronously does not impact flow control.
+port_cmd(Sock, Data) ->
+ true = try rabbit_net:port_command(Sock, Data)
+ catch error:Error -> exit({writer, send_failed, Error})
+ end,
+ ok.
diff --git a/deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl b/deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl
new file mode 100644
index 0000000000..922be93a85
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/amqp10_client_SUITE.erl
@@ -0,0 +1,203 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_client_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, tests},
+ {group, metrics}
+ ].
+
+groups() ->
+ [
+ {tests, [], [
+ roundtrip_quorum_queue_with_drain
+ ]},
+ {metrics, [], [
+ auth_attempt_metrics
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ application:ensure_all_started(amqp10_client),
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, [
+ {rmq_nodename_suffix, Suffix},
+ {amqp10_client_library, Group}
+ ]),
+ Config2 = rabbit_ct_helpers:run_setup_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config2, nodename),
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config2, 0,
+ rabbit_feature_flags,
+ is_supported_remotely,
+ [Nodes, [quorum_queue], 60000]),
+ case Ret of
+ true ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, rabbit_feature_flags, enable, [quorum_queue]),
+ Config2;
+ false ->
+ end_per_group(Group, Config2),
+ {skip, "Quorum queues are unsupported"}
+ end.
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%%% TESTS
+%%%
+
+roundtrip_quorum_queue_with_drain(Config) ->
+ Host = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ QName = atom_to_binary(?FUNCTION_NAME, utf8),
+ Address = <<"/amq/queue/", QName/binary>>,
+ %% declare a quorum queue
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}),
+ % create a configuration map
+ OpnConf = #{address => Host,
+ port => Port,
+ container_id => atom_to_binary(?FUNCTION_NAME, utf8),
+ sasl => {plain, <<"guest">>, <<"guest">>}},
+ % ct:pal("opening connectoin with ~p", [OpnConf]),
+ {ok, Connection} = amqp10_client:open_connection(OpnConf),
+ {ok, Session} = amqp10_client:begin_session(Connection),
+ SenderLinkName = <<"test-sender">>,
+ {ok, Sender} = amqp10_client:attach_sender_link(Session,
+ SenderLinkName,
+ Address),
+
+ % wait for credit to be received
+ receive
+ {amqp10_event, {link, Sender, credited}} -> ok
+ after 2000 ->
+ exit(credited_timeout)
+ end,
+
+ % create a new message using a delivery-tag, body and indicate
+ % it's settlement status (true meaning no disposition confirmation
+ % will be sent by the receiver).
+ OutMsg = amqp10_msg:new(<<"my-tag">>, <<"my-body">>, true),
+ ok = amqp10_client:send_msg(Sender, OutMsg),
+
+ flush("pre-receive"),
+ % create a receiver link
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"test-receiver">>,
+ Address),
+
+ % grant credit and drain
+ ok = amqp10_client:flow_link_credit(Receiver, 1, never, true),
+
+ % wait for a delivery
+ receive
+ {amqp10_msg, Receiver, _InMsg} -> ok
+ after 2000 ->
+ exit(delivery_timeout)
+ end,
+ OutMsg2 = amqp10_msg:new(<<"my-tag">>, <<"my-body2">>, true),
+ ok = amqp10_client:send_msg(Sender, OutMsg2),
+
+ %% no delivery should be made at this point
+ receive
+ {amqp10_msg, _, _} ->
+ exit(unexpected_delivery)
+ after 500 ->
+ ok
+ end,
+
+ flush("final"),
+ ok = amqp10_client:detach_link(Sender),
+
+ ok = amqp10_client:close_connection(Connection),
+ ok.
+
+auth_attempt_metrics(Config) ->
+ Host = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ % create a configuration map
+ OpnConf = #{address => Host,
+ port => Port,
+ container_id => atom_to_binary(?FUNCTION_NAME, utf8),
+ sasl => {plain, <<"guest">>, <<"guest">>}},
+ open_and_close_connection(OpnConf),
+ [Attempt] =
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, get_auth_attempts, []),
+ ?assertEqual(false, proplists:is_defined(remote_address, Attempt)),
+ ?assertEqual(false, proplists:is_defined(username, Attempt)),
+ ?assertEqual(proplists:get_value(protocol, Attempt), <<"amqp10">>),
+ ?assertEqual(proplists:get_value(auth_attempts, Attempt), 1),
+ ?assertEqual(proplists:get_value(auth_attempts_failed, Attempt), 0),
+ ?assertEqual(proplists:get_value(auth_attempts_succeeded, Attempt), 1),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, reset_auth_attempt_metrics, []),
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbit, track_auth_attempt_source, true]),
+ open_and_close_connection(OpnConf),
+ Attempts =
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, get_auth_attempts_by_source, []),
+ [Attempt1] = lists:filter(fun(Props) ->
+ proplists:is_defined(remote_address, Props)
+ end, Attempts),
+ ?assertEqual(proplists:get_value(remote_address, Attempt1), <<>>),
+ ?assertEqual(proplists:get_value(username, Attempt1), <<"guest">>),
+ ?assertEqual(proplists:get_value(protocol, Attempt), <<"amqp10">>),
+ ?assertEqual(proplists:get_value(auth_attempts, Attempt1), 1),
+ ?assertEqual(proplists:get_value(auth_attempts_failed, Attempt1), 0),
+ ?assertEqual(proplists:get_value(auth_attempts_succeeded, Attempt1), 1),
+ ok.
+
+%% internal
+%%
+
+flush(Prefix) ->
+ receive
+ Msg ->
+ ct:pal("~s flushed: ~w~n", [Prefix, Msg]),
+ flush(Prefix)
+ after 1 ->
+ ok
+ end.
+
+open_and_close_connection(OpnConf) ->
+ {ok, Connection} = amqp10_client:open_connection(OpnConf),
+ {ok, _} = amqp10_client:begin_session(Connection),
+ ok = amqp10_client:close_connection(Connection).
diff --git a/deps/rabbitmq_amqp1_0/test/command_SUITE.erl b/deps/rabbitmq_amqp1_0/test/command_SUITE.erl
new file mode 100644
index 0000000000..67528393e7
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/command_SUITE.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+-module(command_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_amqp1_0.hrl").
+
+-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListAmqp10ConnectionsCommand').
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ merge_defaults,
+ validate
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+merge_defaults(_Config) ->
+ {[<<"pid">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([], #{}),
+
+ {[<<"other_key">>], #{verbose := true}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => true}),
+
+ {[<<"other_key">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => false}).
+
+validate(_Config) ->
+ ok = ?COMMAND:validate([], #{}),
+ ok = ?COMMAND:validate([<<"recv_oct">>, <<"ssl">>], #{}),
+ ok = ?COMMAND:validate([atom_to_binary(K, utf8) || K <- ?INFO_ITEMS], #{}),
+ {validation_failure,{bad_info_key,[other]}} =
+ ?COMMAND:validate([<<"other">>], #{}).
diff --git a/deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..fd0a927bcb
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,139 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(proxy_protocol_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, sequential_tests}
+ ].
+
+groups() -> [
+ {sequential_tests, [], [
+ proxy_protocol,
+ proxy_protocol_tls
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(Config1, [
+ {rabbit, [
+ {proxy_protocol, true}
+ ]}
+ ]),
+ Config3 = rabbit_ct_helpers:set_config(Config2, {rabbitmq_ct_tls_verify, verify_none}),
+ rabbit_ct_helpers:run_setup_steps(Config3,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ [ok = inet:send(Socket, amqp_1_0_frame(FrameType))
+ || FrameType <- [header_sasl, sasl_init, header_amqp, open, 'begin']],
+ {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+proxy_protocol_tls(Config) ->
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, SslSocket} = ssl:connect(Socket, [], ?TIMEOUT),
+ [ok = ssl:send(SslSocket, amqp_1_0_frame(FrameType))
+ || FrameType <- [header_sasl, sasl_init, header_amqp, open, 'begin']],
+ {ok, _Packet} = ssl:recv(SslSocket, 0, ?TIMEOUT),
+ timer:sleep(1000),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+%% hex frames to send to have the connection recorded in RabbitMQ
+%% use wireshark with one of the Java tests to record those
+amqp_1_0_frame(header_sasl) ->
+ hex_frame_to_binary("414d515003010000");
+amqp_1_0_frame(header_amqp) ->
+ hex_frame_to_binary("414d515000010000");
+amqp_1_0_frame(sasl_init) ->
+ hex_frame_to_binary("0000001902010000005341c00c01a309414e4f4e594d4f5553");
+amqp_1_0_frame(open) ->
+ hex_frame_to_binary("0000003f02000000005310c03202a12438306335323662332d653530662d343835352d613564302d336466643738623537633730a1096c6f63616c686f7374");
+amqp_1_0_frame('begin') ->
+ hex_frame_to_binary("0000002002000000005311c01305405201707fffffff707fffffff700000ffff").
+
+hex_frame_to_binary(HexsString) ->
+ Hexs = split(HexsString, []),
+ Ints = [list_to_integer(Hex, 16) || Hex <- Hexs],
+ Result = list_to_binary(Ints),
+ Result.
+
+split([X1, X2 | T],Acc) ->
+ Byte = [[X1, X2]],
+ split(T, Acc ++ Byte);
+split([], Acc) ->
+ Acc.
+
+connection_name() ->
+ %% the connection can take some time to show up in the ETS
+ %% hence the retry
+ case retry(fun connection_registered/0, 20) of
+ true ->
+ Connections = ets:tab2list(connection_created),
+ {_Key, Values} = lists:nth(1, Connections),
+ {_, Name} = lists:keyfind(name, 1, Values),
+ Name;
+ false ->
+ error
+ end.
+
+connection_registered() ->
+ length(ets:tab2list(connection_created)) > 0.
+
+retry(_Function, 0) ->
+ false;
+retry(Function, Count) ->
+ Result = Function(),
+ case Result of
+ true ->
+ true;
+ false ->
+ timer:sleep(100),
+ retry(Function, Count - 1)
+ end.
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE.erl b/deps/rabbitmq_amqp1_0/test/system_SUITE.erl
new file mode 100644
index 0000000000..b5c92c26ad
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE.erl
@@ -0,0 +1,243 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, dotnet},
+ {group, java}
+ ].
+
+groups() ->
+ [
+ {dotnet, [], [
+ roundtrip,
+ roundtrip_to_amqp_091,
+ default_outcome,
+ outcomes,
+ fragmentation,
+ message_annotations,
+ footer,
+ data_types,
+ %% TODO at_most_once,
+ reject,
+ redelivery,
+ routing,
+ invalid_routes,
+ auth_failure,
+ access_failure,
+ access_failure_not_allowed,
+ access_failure_send
+ ]},
+ {java, [], [
+ roundtrip
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {amqp10_client_library, Group}
+ ]),
+ GroupSetupStep = case Group of
+ dotnet -> fun build_dotnet_test_project/1;
+ java -> fun build_maven_test_project/1
+ end,
+ rabbit_ct_helpers:run_setup_steps(Config1, [
+ GroupSetupStep
+ ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+build_dotnet_test_project(Config) ->
+ TestProjectDir = filename:join(
+ [?config(data_dir, Config), "fsharp-tests"]),
+ Ret = rabbit_ct_helpers:exec(["dotnet", "restore"],
+ [{cd, TestProjectDir}]),
+ case Ret of
+ {ok, _} ->
+ rabbit_ct_helpers:set_config(Config,
+ {dotnet_test_project_dir, TestProjectDir});
+ _ ->
+ {skip, "Failed to fetch .NET Core test project dependencies"}
+ end.
+
+build_maven_test_project(Config) ->
+ TestProjectDir = filename:join([?config(data_dir, Config), "java-tests"]),
+ Ret = rabbit_ct_helpers:exec([TestProjectDir ++ "/mvnw", "test-compile"],
+ [{cd, TestProjectDir}]),
+ case Ret of
+ {ok, _} ->
+ rabbit_ct_helpers:set_config(Config,
+ {maven_test_project_dir, TestProjectDir});
+ _ ->
+ {skip, "Failed to build Maven test project"}
+ end.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+roundtrip(Config) ->
+ run(Config, [
+ {dotnet, "roundtrip"},
+ {java, "RoundTripTest"}
+ ]).
+
+roundtrip_to_amqp_091(Config) ->
+ run(Config, [
+ {dotnet, "roundtrip_to_amqp_091"}
+ ]).
+
+default_outcome(Config) ->
+ run(Config, [
+ {dotnet, "default_outcome"}
+ ]).
+
+outcomes(Config) ->
+ run(Config, [
+ {dotnet, "outcomes"}
+ ]).
+
+fragmentation(Config) ->
+ run(Config, [
+ {dotnet, "fragmentation"}
+ ]).
+
+message_annotations(Config) ->
+ run(Config, [
+ {dotnet, "message_annotations"}
+ ]).
+
+footer(Config) ->
+ run(Config, [
+ {dotnet, "footer"}
+ ]).
+
+data_types(Config) ->
+ run(Config, [
+ {dotnet, "data_types"}
+ ]).
+
+%% at_most_once(Config) ->
+%% run(Config, [
+%% ]).
+
+reject(Config) ->
+ run(Config, [
+ {dotnet, "reject"}
+ ]).
+
+redelivery(Config) ->
+ run(Config, [
+ {dotnet, "redelivery"}
+ ]).
+
+routing(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"transient_q">>,
+ durable = false}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"durable_q">>,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"autodel_q">>,
+ auto_delete = true}),
+ run(Config, [
+ {dotnet, "routing"}
+ ]).
+
+invalid_routes(Config) ->
+ run(Config, [
+ {dotnet, "invalid_routes"}
+ ]).
+
+auth_failure(Config) ->
+ run(Config, [ {dotnet, "auth_failure"} ]).
+
+access_failure(Config) ->
+ User = <<"access_failure">>,
+ rabbit_ct_broker_helpers:add_user(Config, User, <<"boo">>),
+ rabbit_ct_broker_helpers:set_permissions(Config, User, <<"/">>,
+ <<".*">>, %% configure
+ <<"^banana.*">>, %% write
+ <<"^banana.*">> %% read
+ ),
+ run(Config, [ {dotnet, "access_failure"} ]).
+
+access_failure_not_allowed(Config) ->
+ User = <<"access_failure_not_allowed">>,
+ rabbit_ct_broker_helpers:add_user(Config, User, <<"boo">>),
+ run(Config, [ {dotnet, "access_failure_not_allowed"} ]).
+
+access_failure_send(Config) ->
+ User = <<"access_failure_send">>,
+ rabbit_ct_broker_helpers:add_user(Config, User, <<"boo">>),
+ rabbit_ct_broker_helpers:set_permissions(Config, User, <<"/">>,
+ <<".*">>, %% configure
+ <<"^banana.*">>, %% write
+ <<"^banana.*">> %% read
+ ),
+ run(Config, [ {dotnet, "access_failure_send"} ]).
+
+run(Config, Flavors) ->
+ ClientLibrary = ?config(amqp10_client_library, Config),
+ Fun = case ClientLibrary of
+ dotnet -> fun run_dotnet_test/2;
+ java -> fun run_java_test/2
+ end,
+ case proplists:get_value(ClientLibrary, Flavors) of
+ false -> ok;
+ TestName -> Fun(Config, TestName)
+ end.
+
+run_dotnet_test(Config, Method) ->
+ TestProjectDir = ?config(dotnet_test_project_dir, Config),
+ Uri = rabbit_ct_broker_helpers:node_uri(Config, 0),
+ Ret = rabbit_ct_helpers:exec(["dotnet", "run", "--", Method, Uri ],
+ [
+ {cd, TestProjectDir}
+ ]),
+ {ok, _} = Ret.
+
+run_java_test(Config, Class) ->
+ TestProjectDir = ?config(maven_test_project_dir, Config),
+ Ret = rabbit_ct_helpers:exec([
+ TestProjectDir ++ "/mvnw",
+ "test",
+ {"-Dtest=~s", [Class]},
+ {"-Drmq_broker_uri=~s", [rabbit_ct_broker_helpers:node_uri(Config, 0)]}
+ ],
+ [{cd, TestProjectDir}]),
+ {ok, _} = Ret.
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs
new file mode 100755
index 0000000000..19aa1009dd
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/Program.fs
@@ -0,0 +1,481 @@
+// Learn more about F# at http://fsharp.org
+
+open System
+open System.Threading
+open Amqp
+open Amqp.Sasl
+open Amqp.Framing
+open Amqp.Types
+
+let sleep (i: int) = System.Threading.Thread.Sleep i
+
+[<AutoOpen>]
+module RabbitClient =
+ open RabbitMQ.Client
+
+ let consume (m: IModel) queue autoAck f =
+ let consumer =
+ { new DefaultBasicConsumer(m) with
+ member x.HandleBasicDeliver(consumerTag,
+ deliveryTag,
+ redelivered,
+ exchange,
+ routingKey,
+ props,
+ body) =
+ f deliveryTag props body }
+ m.ContinuationTimeout <- (TimeSpan.FromSeconds 115.)
+ (* m.BasicQos(0u, 100us, false) |> ignore *)
+ let consumerTag = m.BasicConsume(queue, autoAck, consumer)
+ { new System.IDisposable with
+ member __.Dispose () =
+ m.BasicCancel(consumerTag) }
+
+[<AutoOpen>]
+module AmqpClient =
+ type AmqpConnection =
+ { Conn : Connection
+ Session: Session }
+ interface IDisposable with
+ member x.Dispose() =
+ try
+ x.Conn.Close()
+ x.Session.Close()
+ with _ -> ()
+
+ let connect uri =
+ let c = Address uri |> Connection
+ let s = Session c
+ { Conn = c; Session = s }
+
+ let connectWithOpen uri opn =
+ let c = Connection(Address uri, null, opn, null)
+ let s = Session c
+ { Conn = c; Session = s }
+
+ let senderReceiver ac name address =
+ let s = SenderLink(ac.Session, name + "-sender" , address)
+ let r = ReceiverLink(ac.Session, name + "-receiver", address)
+ r.SetCredit(100, true)
+ s, r
+
+ let receive (receiver: ReceiverLink) =
+ let rtd = receiver.Receive()
+ receiver.Accept rtd
+ rtd
+
+ let amqpSequence xs =
+ let l = Amqp.Types.List();
+ xs |> List.iter (l.Add >> ignore)
+ AmqpSequence(List = l)
+
+[<AutoOpen>]
+module Test =
+ let assertEqual a b =
+ if a <> b then
+ failwith (sprintf "Expected: %A\r\nGot: %A" a b)
+
+ let assertTrue b =
+ if not b then
+ failwith (sprintf "Expected True got False!")
+
+ let sampleTypes =
+ ["hi" :> obj
+ "" :> obj
+ "hi"B :> obj
+ ""B :> obj
+ Array.create 1000 50uy :> obj
+ true :> obj
+ 0y :> obj
+ 0uy :> obj
+ Byte.MaxValue :> obj
+ 0s :> obj
+ Int16.MaxValue :> obj
+ 0 :> obj
+ Int32.MaxValue :> obj
+ 0L :> obj
+ Int64.MaxValue :> obj
+ 0us :> obj
+ UInt16.MaxValue :> obj
+ 0u :> obj
+ UInt32.MaxValue :> obj
+ 0ul :> obj
+ UInt64.MaxValue :> obj
+ null :> obj
+ "\uFFF9" :> obj
+ Amqp.Types.Symbol("Symbol") :> obj
+ DateTime.Parse("2008-11-01T19:35:00.0000000Z").ToUniversalTime() :> obj
+ Guid("f275ea5e-0c57-4ad7-b11a-b20c563d3b71") :> obj
+ ]
+
+ let testOutcome uri (attach: Attach) (cond: string) =
+ use ac = connect uri
+ let trySet (mre: AutoResetEvent) =
+ try mre.Set() |> ignore with _ -> ()
+
+ use mre = new System.Threading.AutoResetEvent(false)
+ let mutable errorName = null
+ ac.Session.add_Closed (
+ new ClosedCallback (fun o err -> errorName <- string err.Condition; trySet mre))
+
+ let attached = new OnAttached (
+ fun l attach -> errorName <- null; trySet mre)
+
+ let receiver = ReceiverLink(ac.Session, "test-receiver", attach, attached)
+ mre.WaitOne(1000) |> ignore
+ if cond = null then
+ receiver.Close()
+ assertEqual cond errorName
+
+ let roundtrip uri =
+ use c = connect uri
+ let sender, receiver = senderReceiver c "test" "roundtrip-q"
+ for body in sampleTypes do
+ let corr = "correlation"
+ new Message(body,
+ Header = Header(Ttl = 500u),
+ Properties = new Properties(CorrelationId = corr))
+ |> sender.Send
+ let rtd = receive receiver
+ assertEqual body rtd.Body
+ assertTrue (rtd.Header.Ttl <= 500u)
+ assertEqual rtd.Properties.CorrelationId corr
+ ()
+
+ open RabbitMQ.Client
+
+ let roundtrip_to_amqp_091 uri =
+ use c = connect uri
+ let q = "roundtrip-091-q"
+ let corr = "corrlation"
+ let sender = SenderLink(c.Session, q + "-sender" , q)
+ new Message("hi"B, Header = Header(),
+ Properties = new Properties(CorrelationId = corr))
+ |> sender.Send
+ System.Threading.Thread.Sleep 500
+
+ let cf = ConnectionFactory()
+ cf.Uri <- Uri uri
+ use c = cf.CreateConnection()
+ use m = c.CreateModel()
+ use h = new AutoResetEvent(false)
+ let mutable id : string = null
+ let con = consume m q false (fun deliveryTag props body ->
+ printfn "got %A" props.CorrelationId
+ id <- props.CorrelationId
+ h.Set() |> ignore
+ m.BasicAck(deliveryTag, false) |> ignore)
+
+ h.WaitOne() |> ignore
+ assertEqual id corr
+ ()
+
+ let defaultOutcome uri =
+ for (defOut, cond, defObj) in
+ ["amqp:accepted:list", null, Accepted() :> Outcome
+ "amqp:rejected:list", null, Rejected() :> Outcome
+ "amqp:released:list", null, Released() :> Outcome] do
+
+ let source = new Source(Address = "default_outcome_q",
+ DefaultOutcome = defObj)
+ let attach = new Attach (Source = source,
+ Target = Target())
+
+ testOutcome uri attach cond
+
+ let outcomes uri =
+ for (outcome, cond) in
+ ["amqp:accepted:list", null
+ "amqp:rejected:list", null
+ "amqp:released:list", null
+ "amqp:modified:list", null
+ "amqp:madeup:list", "amqp:not-implemented"] do
+
+ let source = new Source(Address = "outcomes_q",
+ Outcomes = [| Symbol outcome |])
+ let attach = new Attach (Source = source,
+ Target = Target())
+
+ testOutcome uri attach cond
+
+
+ let fragmentation uri =
+ for frameSize, size in
+ [512u, 512
+ 512u, 600
+ 512u, 1024
+ 1024u, 1024] do
+ let addr = Address uri
+ let opn = Open(ContainerId = Guid.NewGuid().ToString(),
+ HostName = addr.Host, ChannelMax = 256us,
+ MaxFrameSize = frameSize)
+ use c = connectWithOpen uri opn
+ let sender, receiver = senderReceiver c "test" "framentation-q"
+ let m = new Message(String.replicate size "a")
+ sender.Send m
+ let m' = receive receiver
+ assertEqual (m.Body) (m'.Body)
+
+ let messageAnnotations uri =
+ use c = connect uri
+ let sender, receiver = senderReceiver c "test" "annotations-q"
+ let ann = MessageAnnotations()
+ let k1 = Symbol "key1"
+ let k2 = Symbol "key2"
+ ann.[Symbol "key1"] <- "value1"
+ ann.[Symbol "key2"] <- "value2"
+ let m = new Message("testing annotations", MessageAnnotations = ann)
+ sender.Send m
+ let m' = receive receiver
+
+ assertEqual m.Body m'.Body
+ assertEqual (m.MessageAnnotations.Descriptor) (m'.MessageAnnotations.Descriptor)
+ assertEqual 2 (m'.MessageAnnotations.Map.Count)
+ assertTrue (m.MessageAnnotations.[k1] = m'.MessageAnnotations.[k1])
+ assertTrue (m.MessageAnnotations.[k2] = m'.MessageAnnotations.[k2])
+
+ let footer uri =
+ use c = connect uri
+ let sender, receiver = senderReceiver c "test" "footer-q"
+ let footer = Footer()
+ let k1 = Symbol "key1"
+ let k2 = Symbol "key2"
+ footer.[Symbol "key1"] <- "value1"
+ footer.[Symbol "key2"] <- "value2"
+ let m = new Message("testing annotations", Footer = footer)
+ sender.Send m
+ let m' = receive receiver
+
+ assertEqual m.Body m'.Body
+ assertEqual (m.Footer.Descriptor) (m'.Footer.Descriptor)
+ assertEqual 2 (m'.Footer.Map.Count)
+ assertTrue (m.Footer.[k1] = m'.Footer.[k1])
+ assertTrue (m.Footer.[k2] = m'.Footer.[k2])
+
+ let datatypes uri =
+ use c = connect uri
+ let sender, receiver = senderReceiver c "test" "datatypes-q"
+ let aSeq = amqpSequence sampleTypes
+ (new Message(aSeq)) |> sender.Send
+ let rtd = receive receiver
+ let amqpSeq = rtd.Body :?> AmqpSequence
+ for a in amqpSeq.List do
+ List.exists ((=) a) sampleTypes |> assertTrue
+
+ let reject uri =
+ use c = connect uri
+ let sender, receiver = senderReceiver c "test" "reject-q"
+ new Message "testing reject" |> sender.Send
+ let m = receiver.Receive()
+ receiver.Reject(m)
+ assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.))
+
+ let redelivery uri =
+ use c = connect uri
+ let sender, receiver = senderReceiver c "test" "redelivery-q"
+ new Message "testing redelivery" |> sender.Send
+ let m = receiver.Receive()
+ assertTrue (m.Header.FirstAcquirer)
+ receiver.Close()
+ c.Session.Close()
+ let session = Session(c.Conn)
+ let receiver = ReceiverLink(session, "test-receiver", "redelivery-q")
+
+ let m' = receive receiver
+ assertEqual (m.Body :?> string) (m'.Body :?> string)
+ assertTrue (not m'.Header.FirstAcquirer)
+ assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.))
+ session.Close()
+
+ let routing uri =
+ for target, source, routingKey, succeed in
+ ["/queue/test", "test", "", true
+ "test", "/queue/test", "", true
+ "test", "test", "", true
+
+ "/topic/a.b.c.d", "/topic/#.c.*", "", true
+ "/exchange/amq.topic", "/topic/#.c.*", "a.b.c.d", true
+ "/topic/w.x.y.z", "/exchange/amq.topic/#.y.*", "", true
+ "/exchange/amq.topic", "/exchange/amq.topic/#.y.*", "w.x.y.z", true
+
+ "/exchange/amq.fanout", "/exchange/amq.fanout/", "", true
+ "/exchange/amq.direct", "/exchange/amq.direct/", "", true
+ "/exchange/amq.direct", "/exchange/amq.direct/a", "a", true
+
+ (* FIXME: The following three tests rely on the queue "test"
+ * created by previous tests in this function. *)
+ "/queue/test", "/amq/queue/test", "", true
+ "/amq/queue/test", "/queue/test", "", true
+ "/amq/queue/test", "/amq/queue/test", "", true
+
+ (* The following tests verify that a queue created out-of-band
+ * in AMQP is reachable from the AMQP 1.0 world. Queues are created
+ * from the common_test suite. *)
+ "/amq/queue/transient_q", "/amq/queue/transient_q", "", true
+ "/amq/queue/durable_q", "/amq/queue/durable_q", "", true
+ "/amq/queue/autodel_q", "/amq/queue/autodel_q", "", true] do
+
+ let rnd = Random()
+ use c = connect uri
+ let sender = SenderLink(c.Session, "test-sender", target)
+ let receiver = ReceiverLink(c.Session, "test-receiver", source)
+ receiver.SetCredit(100, true)
+ use m = new Message(rnd.Next(10000), Properties = Properties(Subject = routingKey))
+ sender.Send m
+ (* printfn "%s %s %s %A" target source routingKey succeed *)
+
+ if succeed then
+ let m' = receiver.Receive(TimeSpan.FromMilliseconds 3000.)
+ receiver.Accept m'
+ assertTrue (m' <> null)
+ assertEqual (m.Body :?> int) (m'.Body :?> int)
+ else
+ use m' = receiver.Receive(TimeSpan.FromMilliseconds 100.)
+ assertEqual null m'
+
+
+
+ let invalidRoutes uri =
+
+ for dest, cond in
+ ["/exchange/missing", "amqp:not-found"
+ "/fruit/orange", "amqp:invalid-field"] do
+ use ac = connect uri
+ let trySet (mre: AutoResetEvent) =
+ try mre.Set() |> ignore with _ -> ()
+
+ let mutable errorName = null
+ use mre = new System.Threading.AutoResetEvent(false)
+ ac.Session.add_Closed (
+ new ClosedCallback (fun _ err -> errorName <- err.Condition; trySet mre))
+
+ let attached = new OnAttached (fun _ _ -> trySet mre)
+
+ let sender = new SenderLink(ac.Session, "test-sender",
+ Target(Address = dest), attached);
+ mre.WaitOne() |> ignore
+
+ try
+ let receiver = ReceiverLink(ac.Session, "test-receiver", dest)
+ receiver.Close()
+ with
+ | :? Amqp.AmqpException as ae ->
+ assertEqual (ae.Error.Condition) (Symbol cond)
+ | _ -> failwith "invalid expection thrown"
+
+ let authFailure uri =
+ try
+ let u = Uri uri
+ let uri = sprintf "amqp://blah:blah@%s:%i" u.Host u.Port
+ let c = Connection(Address uri)
+ failwith "expected exception not received"
+ with
+ | :? Amqp.AmqpException ->
+ ()
+
+ let accessFailureSend uri =
+ try
+ let u = Uri uri
+ let uri = sprintf "amqp://access_failure:boo@%s:%i" u.Host u.Port
+ use ac = connect uri
+ let dest = "/amq/queue/test"
+ ac.Session.add_Closed (
+ new ClosedCallback (fun _ err -> printfn "session err %A" err.Condition
+ ))
+ let sender = new SenderLink(ac.Session, "test-sender", dest)
+ sender.Send(new Message "hi", TimeSpan.FromSeconds 15.)
+
+
+ failwith "expected exception not received"
+ with
+ | :? Amqp.AmqpException as ex ->
+ printfn "Exception %A" ex
+ ()
+
+ let accessFailure uri =
+ try
+ let u = Uri uri
+ let uri = sprintf "amqp://access_failure:boo@%s:%i" u.Host u.Port
+ use ac = connect uri
+ let dest = "/amq/queue/test"
+ let receiver = ReceiverLink(ac.Session, "test-receiver", dest)
+ receiver.Close()
+ failwith "expected exception not received"
+ with
+ | :? Amqp.AmqpException as ex ->
+ printfn "Exception %A" ex
+ ()
+
+ let accessFailureNotAllowed uri =
+ try
+ let u = Uri uri
+ let uri = sprintf "amqp://access_failure_not_allowed:boo@%s:%i" u.Host u.Port
+ use ac = connect uri
+ let dest = "/amq/queue/test"
+ let receiver = ReceiverLink(ac.Session, "test-receiver", dest)
+ receiver.Close()
+ failwith "expected exception not received"
+ with
+ | :? Amqp.AmqpException as ex ->
+ printfn "Exception %A" ex
+ ()
+
+let (|AsLower|) (s: string) =
+ match s with
+ | null -> null
+ | _ -> s.ToLowerInvariant()
+
+[<EntryPoint>]
+let main argv =
+ match List.ofArray argv with
+ | [AsLower "auth_failure"; uri] ->
+ authFailure uri
+ 0
+ | [AsLower "access_failure"; uri] ->
+ accessFailure uri
+ 0
+ | [AsLower "access_failure_not_allowed"; uri] ->
+ accessFailureNotAllowed uri
+ 0
+ | [AsLower "access_failure_send"; uri] ->
+ accessFailureSend uri
+ 0
+ | [AsLower "roundtrip"; uri] ->
+ roundtrip uri
+ 0
+ | [AsLower "roundtrip_to_amqp_091"; uri] ->
+ roundtrip_to_amqp_091 uri
+ 0
+ | [AsLower "data_types"; uri] ->
+ datatypes uri
+ 0
+ | [AsLower "default_outcome"; uri] ->
+ defaultOutcome uri
+ 0
+ | [AsLower "outcomes"; uri] ->
+ outcomes uri
+ 0
+ | [AsLower "fragmentation"; uri] ->
+ fragmentation uri
+ 0
+ | [AsLower "message_annotations"; uri] ->
+ messageAnnotations uri
+ 0
+ | [AsLower "footer"; uri] ->
+ footer uri
+ 0
+ | [AsLower "reject"; uri] ->
+ reject uri
+ 0
+ | [AsLower "redelivery"; uri] ->
+ redelivery uri
+ 0
+ | [AsLower "routing"; uri] ->
+ routing uri
+ 0
+ | [AsLower "invalid_routes"; uri] ->
+ invalidRoutes uri
+ 0
+ | _ ->
+ printfn "test %A not found. usage: <test> <uri>" argv
+ 1
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj
new file mode 100755
index 0000000000..85b60b9945
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/fsharp-tests/fsharp-tests.fsproj
@@ -0,0 +1,14 @@
+<Project Sdk="Microsoft.NET.Sdk">
+ <PropertyGroup>
+ <OutputType>Exe</OutputType>
+ <TargetFramework>netcoreapp2.0</TargetFramework>
+ </PropertyGroup>
+ <ItemGroup>
+ <Compile Include="Program.fs" />
+ </ItemGroup>
+ <ItemGroup>
+ <PackageReference Include="RabbitMQ.Client" Version="5.*" />
+ <PackageReference Include="AmqpNetLite" Version="2.1.2" />
+ <PackageReference Include="AmqpNetLite.Serialization" Version="2.1.2" />
+ </ItemGroup>
+</Project>
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.gitignore b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.gitignore
new file mode 100644
index 0000000000..b83d22266a
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.gitignore
@@ -0,0 +1 @@
+/target/
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100755
index 0000000000..2e394d5b34
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,110 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL =
+ "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: : " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jar
new file mode 100755
index 0000000000..01e6799737
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.jar
Binary files differ
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties
new file mode 100755
index 0000000000..00d32aab1d
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip \ No newline at end of file
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw
new file mode 100755
index 0000000000..8b9da3b8b6
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw
@@ -0,0 +1,286 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ wget "$jarUrl" -O "$wrapperJarPath"
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ curl -o "$wrapperJarPath" "$jarUrl"
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd
new file mode 100755
index 0000000000..a5284c7939
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/mvnw.cmd
@@ -0,0 +1,161 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM https://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ echo Found %WRAPPER_JAR%
+) else (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
+ echo Finished downloading %WRAPPER_JAR%
+)
+@REM End of extension
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml
new file mode 100644
index 0000000000..d8f01408e6
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml
@@ -0,0 +1,58 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>com.rabbitmq.amqp1_0.tests.proton</groupId>
+ <artifactId>rabbitmq-amqp1.0-java-tests</artifactId>
+ <packaging>jar</packaging>
+ <version>1.0-SNAPSHOT</version>
+ <name>rabbitmq-amqp1.0-java-tests</name>
+ <url>https://www.rabbitmq.com</url>
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.13.1</version>
+ <scope>test</scope>
+ </dependency>
+ <!-- <dependency> -->
+ <!-- <groupId>org.apache.qpid</groupId> -->
+ <!-- <artifactId>proton-j</artifactId> -->
+ <!-- <version>0.13.0</version> -->
+ <!-- <scope>test</scope> -->
+ <!-- </dependency> -->
+ <dependency>
+ <groupId>org.apache.qpid</groupId>
+ <artifactId>qpid-jms-client</artifactId>
+ <version>0.31.0</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.8.0</version>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.22.1</version>
+ <configuration>
+ <systemPropertyVariables>
+ <rmq_broker_uri>${rmq_broker_uri}</rmq_broker_uri>
+ </systemPropertyVariables>
+ <!--
+ needed because of bug in OpenJDK 8 u181 on Debian distros
+ see https://stackoverflow.com/questions/53010200/maven-surefire-could-not-find-forkedbooter-class
+ -->
+ <argLine>-Djdk.net.URLClassPath.disableClassPathURLCheck=true</argLine>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java
new file mode 100644
index 0000000000..db5850f8bf
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/jms/RoundTripTest.java
@@ -0,0 +1,84 @@
+// vim:sw=4:et:
+
+package com.rabbitmq.amqp1_0.tests.jms;
+
+import java.util.*;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import javax.jms.Connection;
+import javax.jms.ConnectionFactory;
+import javax.jms.DeliveryMode;
+import javax.jms.Destination;
+import javax.jms.ExceptionListener;
+import javax.jms.JMSException;
+import javax.jms.Message;
+import javax.jms.MessageConsumer;
+import javax.jms.MessageProducer;
+import javax.jms.Session;
+import javax.jms.TextMessage;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+
+/**
+ * Unit test for simple App.
+ */
+public class RoundTripTest
+ extends TestCase
+{
+ public static final String ADDRESS = "/jms-roundtrip-q";
+ public static final String PAYLOAD = "Payload";
+
+ /**
+ * Create the test case
+ *
+ * @param testName name of the test case
+ */
+ public RoundTripTest(String testName)
+ {
+ super(testName);
+ }
+
+ /**
+ * @return the suite of tests being tested
+ */
+ public static Test suite()
+ {
+ return new TestSuite(RoundTripTest.class);
+ }
+
+ public void test_roundtrip () throws Exception
+ {
+ String uri = System.getProperty("rmq_broker_uri");
+ String address = uri + ADDRESS;
+ Hashtable<Object, Object> env = new Hashtable<Object, Object>();
+ env.put(Context.INITIAL_CONTEXT_FACTORY, "org.apache.qpid.jms.jndi.JmsInitialContextFactory");
+ env.put("connectionfactory.myFactoryLookup", uri);
+ env.put("queue.myQueueLookup", "my-queue");
+ env.put("jms.sendTimeout", 5);
+ env.put("jms.requestTimeout", 5);
+ javax.naming.Context context = new javax.naming.InitialContext(env);
+
+ assertNotNull(uri);
+
+ ConnectionFactory factory = (ConnectionFactory) context.lookup("myFactoryLookup");
+ Destination queue = (Destination) context.lookup("myQueueLookup");
+
+ Connection connection = factory.createConnection("guest", "guest");
+ connection.start();
+
+ Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
+
+ MessageProducer messageProducer = session.createProducer(queue);
+ MessageConsumer messageConsumer = session.createConsumer(queue);
+
+ TextMessage message = session.createTextMessage("Hello world!");
+ messageProducer.send(message, DeliveryMode.NON_PERSISTENT,
+ Message.DEFAULT_PRIORITY, Message.DEFAULT_TIME_TO_LIVE);
+ TextMessage receivedMessage = (TextMessage) messageConsumer.receive(2000L);
+
+ assertEquals(message.getText(), receivedMessage.getText());
+ }
+}
diff --git a/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl b/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl
new file mode 100644
index 0000000000..0b10141a13
--- /dev/null
+++ b/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbit_amqp1_0.hrl").
+
+-import(rabbit_amqp1_0_util, [serial_add/2, serial_diff/2, serial_compare/2]).
+
+-compile(export_all).
+
+all() ->
+ [
+ serial_arithmetic
+ ].
+
+-include_lib("eunit/include/eunit.hrl").
+
+serial_arithmetic(_Config) ->
+ ?assertEqual(1, serial_add(0, 1)),
+ ?assertEqual(16#7fffffff, serial_add(0, 16#7fffffff)),
+ ?assertEqual(0, serial_add(16#ffffffff, 1)),
+ %% Cannot add more than 2 ^ 31 - 1
+ ?assertExit({out_of_bound_serial_addition, _, _},
+ serial_add(200, 16#80000000)),
+ ?assertEqual(1, serial_diff(1, 0)),
+ ?assertEqual(2, serial_diff(1, 16#ffffffff)),
+ ?assertEqual(-2, serial_diff(16#ffffffff, 1)),
+ ?assertExit({indeterminate_serial_diff, _, _},
+ serial_diff(0, 16#80000000)),
+ ?assertExit({indeterminate_serial_diff, _, _},
+ serial_diff(16#ffffffff, 16#7fffffff)).
diff --git a/deps/rabbitmq_auth_backend_cache/.gitignore b/deps/rabbitmq_auth_backend_cache/.gitignore
new file mode 100644
index 0000000000..78a981fec8
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/.gitignore
@@ -0,0 +1,19 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+test/config_schema_SUITE_data/schema/
+
+/rabbitmq_auth_backend_cache.d
diff --git a/deps/rabbitmq_auth_backend_cache/.travis.yml b/deps/rabbitmq_auth_backend_cache/.travis.yml
new file mode 100644
index 0000000000..980d3de4e1
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: gWlkVT8wmpOTTJtPYHpX7Lqq7RKDPS555K9nIR+l3wUTgMS8ocef5ZQ7PfpAACZ7APLa3kKK2DtlfHVYXJ/jHbOV+PAKgRfA4UXAZm2EYdhAxLqPL/FHe1GEfxdlSQQacdZ0yYl9EPQAk/8YQrMEPqg0ulhCWrAiL5/VQWlDg03pOKidL+T2JmJBmcVl/3pSf1xyV4HByacyqUzseMG4WhFU1cygqAZlbawPy44N0zZHiBzfZ0pz+e3LCvLFTD9NRgrRK3kAL/zcBYMAS/CXcz05Rh0uuNF2yHke8f9GxYPiVpP6KGFVlvVWEtzMWt1bopWLEzoHcM3MKEQQ3UGMfpWDxLlTZ3T318GTUcOGsEIEGtNHk+AbFPWA2rCW9G6/SEq5KPtEmfi7H5e7BpWoTpzwXuKPOCsLQZAxbgtACGL0HTlCtjEfr90h9t96V32rn7Wp3U+7p674zJPXap4u9Rtv6Z9zrn1aBbiotu7WwyAyRL+xklAYJ/Vuxqsv2eoovPza8JCgWPpyGH8DeOOPzuuH2D3SbjaDfqNvZBcXNhg16FnNh1LSCGSWspjdQZRIkaHFC44RBmQcsg+Kk9stFPz+b7CS6WKyrHk6IWv0rqxndBnSh5rGWsCkYhz38ZOzjog6fMae8L2diEKhbsnCQxuUcsRy8Rc68mc4oHMOlcg=
+ - secure: nPG6EWK7WkazGEpHkAOEahe3ijI21Y51OGgsVO+LEE1IxLGypAgiuQmwwYVfZhBeVsERtSvKrhShSOStoN44zellR4B32WnA4PYJzhELtw//68HxyDX8NTfJ0I0soRFSyNrbLh31QtvcwXYgAkZAsSlwwHhFOidc+V7g8dJeB5qNTl8YB7DUGqhXEBQqyM6UXwR2o6+fyRU9HwHCvnii+xJ1wf2QE1xJ/W/tE5T8ahuukQAiirlXm7nJCV2fr9R0nHyvxIgygW54aOIhBpaOxcTPWqqjO9E/pQiTztWweVpPrJJVUDV/zW3WrIYwHc5K+bFRKH+jt9fFjRZ+GHfUjkVXN7EeQ2A+HIcLxaJjv7ZqAZNeGC/OQUISCcvi+a9OgWbmWtf8xaofRT7MTEIXpFW4UhQAo1rm7AvXRP5I5mtS6vDo4ToyZQGsn3oscp/eVBcue8Of7hSA6dMSax+8RCFs2kFoJcTy+orBRdx7JEvk8CkRIIBVeQqZRtTlJJkJAojJ8urMR2Wf4UCG2Oiqhur5G2SZft2gBZ9Wrpplt7Z7G/PyDlD7y6uUCtCvI8BBiYS5jW8gBYTwpLqaNgMs0nWVtGNLsJcXZVDZlk2bKCdbywRVb5DlQHBvbetoEpMFU1X6esv2HfXpGz13ODNBvlAFPBMTaZ/21OCArzpD1Vc=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_auth_backend_cache/CODE_OF_CONDUCT.md b/deps/rabbitmq_auth_backend_cache/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_auth_backend_cache/CONTRIBUTING.md b/deps/rabbitmq_auth_backend_cache/CONTRIBUTING.md
new file mode 100644
index 0000000000..b50bd82900
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/CONTRIBUTING.md
@@ -0,0 +1,103 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+ make tests
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_auth_backend_cache/LICENSE b/deps/rabbitmq_auth_backend_cache/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_auth_backend_cache/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_auth_backend_cache/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_auth_backend_cache/Makefile b/deps/rabbitmq_auth_backend_cache/Makefile
new file mode 100644
index 0000000000..1b7d375828
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/Makefile
@@ -0,0 +1,32 @@
+PROJECT = rabbitmq_auth_backend_cache
+PROJECT_DESCRIPTION = RabbitMQ Authentication Backend cache
+PROJECT_MOD = rabbit_auth_backend_cache_app
+
+define PROJECT_ENV
+[
+ {cache_ttl, 15000},
+ {cache_module, rabbit_auth_cache_ets},
+ {cache_module_args, []},
+ {cached_backend, rabbit_auth_backend_internal},
+ {cache_refusals, false}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_auth_backend_cache/README.md b/deps/rabbitmq_auth_backend_cache/README.md
new file mode 100644
index 0000000000..af3bad9c7f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/README.md
@@ -0,0 +1,225 @@
+# RabbitMQ Access Control Cache Plugin
+
+This plugin provides a caching layer for [access control operations](https://rabbitmq.com/access-control.html)
+performed by RabbitMQ nodes.
+
+## Project Maturity
+
+As of 3.7.0, this plugin is distributed with RabbitMQ.
+
+## Overview
+
+This plugin provides a way to cache [authentication and authorization backend](https://rabbitmq.com/access-control.html)
+results for a configurable amount of time.
+It's not an independent auth backend but a caching layer for existing backends
+such as the built-in, [LDAP](https://github.com/rabbitmq/rabbitmq-auth-backend-ldap), or [HTTP](https://github.com/rabbitmq/rabbitmq-auth-backend-http)
+ones.
+
+Cache expiration is currently time-based. It is not very useful with the built-in
+(internal) [authn/authz backends](https://rabbitmq.com/access-control.html) but can be very useful for LDAP, HTTP or other backends that
+use network requests.
+
+## RabbitMQ Version Requirements
+
+As of 3.7.0, this plugin is distributed with RabbitMQ. Like any other plugin, it must
+be [enabled](https://www.rabbitmq.com/plugins.html#ways-to-enable-plugins) before it can be used.
+
+
+## Installation
+
+This plugin ships with reasonably recent RabbitMQ versions
+(e.g. `3.7.0` or later). Enable it with
+
+``` shell
+rabbitmq-plugins enable rabbitmq_auth_backend_cache
+```
+
+## Binary Builds
+
+Binary builds can be obtained [from project releases](https://github.com/rabbitmq/rabbitmq-auth-backend-cache/releases/) on GitHub.
+
+## Building
+
+You can build and install it like any other plugin (see
+[the plugin development guide](https://www.rabbitmq.com/plugin-development.html)).
+
+## Authentication and Authorization Backend Configuration
+
+To enable the plugin, set the value of the `auth_backends` configuration item
+for the `rabbit` application to include `rabbit_auth_backend_cache`.
+`auth_backends` is a list of authentication providers to try in order.
+
+
+So a configuration fragment that enables this plugin *only* (this example is **intentionally incomplete**) would look like:
+
+``` ini
+auth_backends.1 = cache
+```
+
+In the [classic config format](https://www.rabbitmq.com/configure.html#config-file-formats):
+
+``` erlang
+[
+ {rabbit, [
+ {auth_backends, [rabbit_auth_backend_cache]}
+ ]
+ }
+].
+```
+
+This plugin wraps another auth backend (an "upstream" one) to reduce load on it.
+
+To configure upstream auth backend, use the `auth_cache.cached_backend` configuration key
+(`rabbitmq_auth_backend_cache.cached_backend` in the classic config format).
+
+The following configuration uses the [LDAP backend]((https://rabbitmq.com/ldap.html)) for both authentication and authorization
+and wraps it with caching:
+
+ auth_backends.1 = cache
+
+ auth_cache.cached_backend = ldap
+
+In the classic config format:
+
+``` erlang
+[
+ {rabbit, [
+ %% ...
+ ]},
+ {rabbitmq_auth_backend_cache, [
+ {cached_backend, rabbit_auth_backend_ldap}
+ ]},
+ {rabbit_auth_backend_ldap, [
+ %% ...
+ ]},
+].
+```
+
+The following example combines this backend with the [HTTP backend](https://github.com/rabbitmq/rabbitmq-auth-backend-http/tree/master) and its [example Spring Boot application](https://github.com/rabbitmq/rabbitmq-auth-backend-http/tree/master/examples):
+
+
+ auth_backends.1 = cache
+ auth_cache.cached_backend = http
+
+ auth_http.http_method = post
+ auth_http.user_path = http://localhost:8080/auth/user
+ auth_http.vhost_path = http://localhost:8080/auth/vhost
+ auth_http.resource_path = http://localhost:8080/auth/resource
+ auth_http.topic_path = http://localhost:8080/auth/topic
+
+In the classic config format:
+
+``` erlang
+[
+ {rabbit, [
+ {auth_backends, [rabbit_auth_backend_cache]}
+ ]
+ },
+ {rabbitmq_auth_backend_cache, [
+ {cached_backend, rabbit_auth_backend_http}
+ ]
+ },
+ {rabbitmq_auth_backend_http, [{http_method, post},
+ {user_path, "http://127.0.0.1:8080/auth/user"},
+ {vhost_path, "http://127.0.0.1:8080/auth/vhost"},
+ {resource_path, "http://127.0.0.1:8080/auth/resource"},
+ {auth_http.topic_path, "http://127.0.0.1:8080/auth/topic"}
+ ]
+ }
+].
+```
+
+It is still possible to [use different backends for authorization and authentication](https://www.rabbitmq.com/access-control.html).
+
+The following example configures plugin to use LDAP backend for authentication
+but internal backend for authorisation:
+
+ auth_backends.1 = cache
+
+ auth_cache.cached_backend.authn = ldap
+ auth_cache.cached_backend.authz = internal
+
+In the classic config format:
+
+``` erlang
+[
+ {rabbit, [
+ %% ...
+ ]},
+ {rabbitmq_auth_backend_cache, [{cached_backend, {rabbit_auth_backend_ldap,
+ rabbit_auth_backend_internal}}]}].
+```
+
+
+
+## Cache Configuration
+
+You can configure TTL for cache items, by using `cache_ttl` configuration item, specified in **milliseconds**
+
+ auth_cache.cached_backend = ldap
+ auth_cache.cache_ttl = 5000
+
+Or using the classic config for both parameters:
+
+``` erlang
+[
+ {rabbit, [
+ %% ...
+ ]},
+ {rabbitmq_auth_backend_cache, [{cached_backend, rabbit_auth_backend_ldap},
+ {cache_ttl, 5000}]}].
+```
+
+You can also use a custom cache module to store cached requests. This module
+should be an erlang module implementing `rabbit_auth_cache` behaviour and (optionally)
+define `start_link` function to start cache process.
+
+This repository provides several implementations:
+
+ * `rabbit_auth_cache_dict` stores cache entries in the internal process dictionary. **This module is for demonstration only and should not be used in production**.
+ * `rabbit_auth_cache_ets` stores cache entries in an [ETS](https://learnyousomeerlang.com/ets) table and uses timers for cache invalidation. **This is the default implementation**.
+ * `rabbit_auth_cache_ets_segmented` stores cache entries in multiple ETS tables and does not delete individual cache items but rather
+ uses a separate process for garbage collection.
+ * `rabbit_auth_cache_ets_segmented_stateless` same as previous, but with minimal use of `gen_server` state, using ets tables to store information about segments.
+
+To specify module for caching you should use `cache_module` configuration item and
+specify start args with `cache_module_args`.
+Start args should be list of arguments passed to module `start_link` function
+
+Cache module can be set via sysctl config format:
+
+ auth_cache.cache_module = rabbit_auth_backend_ets_segmented
+
+Additional cache module arguments can only be defined via the [advanced config](https://www.rabbitmq.com/configure.html#advanced-config-file) or classic config format:
+
+``` erlang
+[
+ {rabbit, [
+ %% ...
+ ]},
+
+ {rabbitmq_auth_backend_cache, [{cache_module_args, [10000]}]}
+].
+```
+
+The above two snippets combined in the classic config format:
+
+``` erlang
+[
+ {rabbit, [
+ %% ...
+ ]},
+
+ {rabbitmq_auth_backend_cache, [{cache_module, rabbit_auth_backend_ets_segmented},
+ {cache_module_args, [10000]}]}
+].
+```
+
+The default values are `rabbit_auth_cache_ets` and `[]`, respectively.
+
+
+## License and Copyright
+
+(c) 2016-2020 VMware, Inc. or its affiliates.
+
+Released under the Mozilla Public License 2.0, same as RabbitMQ.
diff --git a/deps/rabbitmq_auth_backend_cache/erlang.mk b/deps/rabbitmq_auth_backend_cache/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_auth_backend_cache/priv/schema/rabbitmq_auth_backend_cache.schema b/deps/rabbitmq_auth_backend_cache/priv/schema/rabbitmq_auth_backend_cache.schema
new file mode 100644
index 0000000000..383931f20a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/priv/schema/rabbitmq_auth_backend_cache.schema
@@ -0,0 +1,62 @@
+
+%% ==========================================================================
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Authorization cache
+%%
+%% ----------------------------------------------------------------------------
+
+{mapping, "auth_cache.cached_backend", "rabbitmq_auth_backend_cache.cached_backend",[
+ {datatype, atom}
+]}.
+
+{mapping, "auth_cache.cached_backend.authn", "rabbitmq_auth_backend_cache.cached_backend",[
+ {datatype, atom}
+]}.
+
+{mapping, "auth_cache.cached_backend.authz", "rabbitmq_auth_backend_cache.cached_backend",[
+ {datatype, atom}
+]}.
+
+
+{translation, "rabbitmq_auth_backend_cache.cached_backend",
+fun(Conf) ->
+ BackendModule = fun
+ (internal) -> rabbit_auth_backend_internal;
+ (ldap) -> rabbit_auth_backend_ldap;
+ (http) -> rabbit_auth_backend_http;
+ (amqp) -> rabbit_auth_backend_amqp;
+ (dummy) -> rabbit_auth_backend_dummy;
+ (undefined) -> undefined;
+ (Other) when is_atom(Other) -> Other;
+ (_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
+ end,
+ AuthN = BackendModule(cuttlefish:conf_get("auth_cache.cached_backend.authn", Conf, undefined)),
+ AuthZ = BackendModule(cuttlefish:conf_get("auth_cache.cached_backend.authz", Conf, undefined)),
+ Common = BackendModule(cuttlefish:conf_get("auth_cache.cached_backend", Conf, undefined)),
+ case {Common, AuthN, AuthZ} of
+ {undefined, V, undefined} when V =/= undefined ->
+ cuttlefish:warn(io_lib:format("Cached authZ backend undefined. Using ~p", [AuthN])),
+ {AuthN, AuthN};
+ {undefined, undefined, V} when V =/= undefined ->
+ cuttlefish:warn(io_lib:format("Cached authN backend undefined. Using ~p", [AuthZ])),
+ {AuthZ, AuthZ};
+ {V, undefined, undefined} when V =/= undefined ->
+ Common;
+ {undefined, V, V1} when V =/= undefined, V1 =/= undefined ->
+ {AuthN, AuthZ};
+ _ ->
+ cuttlefish:invalid(iolist_to_binary(io_lib:format("Cached auth backend already defined", [])))
+ end
+end}.
+
+{mapping, "auth_cache.cache_ttl", "rabbitmq_auth_backend_cache.cache_ttl", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{mapping, "auth_cache.cache_module", "rabbitmq_auth_backend_cache.cache_module", [
+ {datatype, atom}
+]}.
+
+{mapping, "auth_cache.cache_refusals", "rabbitmq_auth_backend_cache.cache_refusals", [
+ {datatype, {enum, [true, false]}}
+]}.
diff --git a/deps/rabbitmq_auth_backend_cache/rabbitmq-components.mk b/deps/rabbitmq_auth_backend_cache/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl
new file mode 100644
index 0000000000..8a556723b5
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache.erl
@@ -0,0 +1,107 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_cache).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4,
+ state_can_expire/0]).
+
+%% API
+
+user_login_authentication(Username, AuthProps) ->
+ with_cache(authn, {user_login_authentication, [Username, AuthProps]},
+ fun({ok, _}) -> success;
+ ({refused, _, _}) -> refusal;
+ ({error, _} = Err) -> Err;
+ (_) -> unknown
+ end).
+
+user_login_authorization(Username, AuthProps) ->
+ with_cache(authz, {user_login_authorization, [Username, AuthProps]},
+ fun({ok, _}) -> success;
+ ({ok, _, _}) -> success;
+ ({refused, _, _}) -> refusal;
+ ({error, _} = Err) -> Err;
+ (_) -> unknown
+ end).
+
+check_vhost_access(#auth_user{} = AuthUser, VHostPath, AuthzData) ->
+ with_cache(authz, {check_vhost_access, [AuthUser, VHostPath, AuthzData]},
+ fun(true) -> success;
+ (false) -> refusal;
+ ({error, _} = Err) -> Err;
+ (_) -> unknown
+ end).
+
+check_resource_access(#auth_user{} = AuthUser,
+ #resource{} = Resource, Permission, AuthzContext) ->
+ with_cache(authz, {check_resource_access, [AuthUser, Resource, Permission, AuthzContext]},
+ fun(true) -> success;
+ (false) -> refusal;
+ ({error, _} = Err) -> Err;
+ (_) -> unknown
+ end).
+
+check_topic_access(#auth_user{} = AuthUser,
+ #resource{} = Resource, Permission, Context) ->
+ with_cache(authz, {check_topic_access, [AuthUser, Resource, Permission, Context]},
+ fun(true) -> success;
+ (false) -> refusal;
+ ({error, _} = Err) -> Err;
+ (_) -> unknown
+ end).
+
+state_can_expire() -> false.
+
+%%
+%% Implementation
+%%
+
+with_cache(BackendType, {F, A}, Fun) ->
+ {ok, AuthCache} = application:get_env(rabbitmq_auth_backend_cache,
+ cache_module),
+ case AuthCache:get({F, A}) of
+ {ok, Result} ->
+ Result;
+ {error, not_found} ->
+ Backend = get_cached_backend(BackendType),
+ {ok, TTL} = application:get_env(rabbitmq_auth_backend_cache,
+ cache_ttl),
+ BackendResult = apply(Backend, F, A),
+ case should_cache(BackendResult, Fun) of
+ true -> ok = AuthCache:put({F, A}, BackendResult, TTL);
+ false -> ok
+ end,
+ BackendResult
+ end.
+
+get_cached_backend(Type) ->
+ {ok, BackendConfig} = application:get_env(rabbitmq_auth_backend_cache,
+ cached_backend),
+ case BackendConfig of
+ Mod when is_atom(Mod) ->
+ Mod;
+ {N, Z} ->
+ case Type of
+ authn -> N;
+ authz -> Z
+ end
+ end.
+
+should_cache(Result, Fun) ->
+ {ok, CacheRefusals} = application:get_env(rabbitmq_auth_backend_cache,
+ cache_refusals),
+ case {Fun(Result), CacheRefusals} of
+ {success, _} -> true;
+ {refusal, true} -> true;
+ _ -> false
+ end.
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl
new file mode 100644
index 0000000000..c54f95393f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_backend_cache_app.erl
@@ -0,0 +1,37 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_cache_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ supervisor:start_link({local,?MODULE},?MODULE,[]).
+
+stop(_State) ->
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, AuthCache} = application:get_env(rabbitmq_auth_backend_cache,
+ cache_module),
+
+ {ok, AuthCacheArgs} = application:get_env(rabbitmq_auth_backend_cache, cache_module_args),
+ % Load module to be able to check exported function.
+ code:load_file(AuthCache),
+ ChildSpecs = case erlang:function_exported(AuthCache, start_link,
+ length(AuthCacheArgs)) of
+ true -> [{auth_cache, {AuthCache, start_link, AuthCacheArgs},
+ permanent, 5000, worker, [AuthCache]}];
+ false -> []
+ end,
+ {ok, {{one_for_one,3,10}, ChildSpecs}}.
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl
new file mode 100644
index 0000000000..e1b7418d15
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache.erl
@@ -0,0 +1,35 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_cache).
+
+-export([expiration/1, expired/1]).
+
+-ifdef(use_specs).
+
+-callback get(term()) -> term().
+
+-callback put(term(), term(), integer()) -> ok.
+
+-callback delete(term()) -> ok.
+
+-else.
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{get, 1}, {put, 3}, {delete, 1}];
+behaviour_info(_Other) ->
+ undefined.
+
+-endif.
+
+expiration(TTL) ->
+ erlang:system_time(milli_seconds) + TTL.
+
+expired(Exp) ->
+ erlang:system_time(milli_seconds) > Exp.
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl
new file mode 100644
index 0000000000..ce800a886e
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_dict.erl
@@ -0,0 +1,61 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_cache_dict).
+-behaviour(gen_server).
+-compile({no_auto_import,[get/1]}).
+-compile({no_auto_import,[put/2]}).
+
+-behaviour(rabbit_auth_cache).
+
+-export([start_link/0,
+ get/1, put/3, delete/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+get(Key) -> gen_server:call(?MODULE, {get, Key}).
+put(Key, Value, TTL) -> gen_server:cast(?MODULE, {put, Key, Value, TTL}).
+delete(Key) -> gen_server:call(?MODULE, {delete, Key}).
+
+init(_Args) -> {ok, nostate}.
+
+handle_call({get, Key}, _From, nostate) ->
+ Result = case erlang:get({items, Key}) of
+ undefined -> {error, not_found};
+ Val -> {ok, Val}
+ end,
+ {reply, Result, nostate};
+handle_call({delete, Key}, _From, nostate) ->
+ do_delete(Key),
+ {reply, ok, nostate}.
+
+handle_cast({put, Key, Value, TTL}, nostate) ->
+ erlang:put({items, Key}, Value),
+ {ok, TRef} = timer:apply_after(TTL, rabbit_auth_cache_dict, delete, [Key]),
+ erlang:put({timers, Key}, TRef),
+ {noreply, nostate}.
+
+handle_info(_Msg, nostate) ->
+ {noreply, nostate}.
+
+code_change(_OldVsn, nostate, _Extra) ->
+ {ok, nostate}.
+
+terminate(_Reason, nostate) ->
+ nostate.
+
+do_delete(Key) ->
+ erase({items, Key}),
+ case erlang:get({timers, Key}) of
+ undefined -> ok;
+ Tref -> timer:cancel(Tref),
+ erase({timers, Key})
+
+ end.
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl
new file mode 100644
index 0000000000..4cd36c2b3a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_cache_ets).
+-behaviour(gen_server).
+-compile({no_auto_import,[get/1]}).
+-compile({no_auto_import,[put/2]}).
+
+-behaviour(rabbit_auth_cache).
+
+-export([start_link/0,
+ get/1, put/3, delete/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {cache, timers, ttl}).
+
+start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+get(Key) -> gen_server:call(?MODULE, {get, Key}).
+put(Key, Value, TTL) ->
+ Expiration = rabbit_auth_cache:expiration(TTL),
+ gen_server:cast(?MODULE, {put, Key, Value, TTL, Expiration}).
+delete(Key) -> gen_server:call(?MODULE, {delete, Key}).
+
+init(_Args) ->
+ {ok, #state{cache = ets:new(?MODULE, [set, private]),
+ timers = ets:new(auth_cache_ets_timers, [set, private])}}.
+
+handle_call({get, Key}, _From, State = #state{cache = Table}) ->
+ Result = case ets:lookup(Table, Key) of
+ [{Key, {Exp, Val}}] -> case rabbit_auth_cache:expired(Exp) of
+ true -> {error, not_found};
+ false -> {ok, Val}
+ end;
+ [] -> {error, not_found}
+ end,
+ {reply, Result, State};
+handle_call({delete, Key}, _From, State = #state{cache = Table, timers = Timers}) ->
+ do_delete(Key, Table, Timers),
+ {reply, ok, State}.
+
+handle_cast({put, Key, Value, TTL, Expiration},
+ State = #state{cache = Table, timers = Timers}) ->
+ do_delete(Key, Table, Timers),
+ ets:insert(Table, {Key, {Expiration, Value}}),
+ {ok, TRef} = timer:apply_after(TTL, rabbit_auth_cache_ets, delete, [Key]),
+ ets:insert(Timers, {Key, TRef}),
+ {noreply, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State = #state{}) ->
+ State.
+
+do_delete(Key, Table, Timers) ->
+ true = ets:delete(Table, Key),
+ case ets:lookup(Timers, Key) of
+ [{Key, Tref}] -> timer:cancel(Tref),
+ true = ets:delete(Timers, Key);
+ [] -> ok
+ end.
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl
new file mode 100644
index 0000000000..cc7bcbfc02
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented.erl
@@ -0,0 +1,116 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_cache_ets_segmented).
+-behaviour(gen_server).
+-behaviour(rabbit_auth_cache).
+
+-export([start_link/1,
+ get/1, put/3, delete/1]).
+-export([gc/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {
+ segments = [],
+ gc_timer,
+ segment_size}).
+
+start_link(SegmentSize) ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [SegmentSize], []).
+
+get(Key) ->
+ case get_from_segments(Key) of
+ [] -> {error, not_found};
+ [V|_] -> {ok, V}
+ end.
+
+put(Key, Value, TTL) ->
+ Expiration = rabbit_auth_cache:expiration(TTL),
+ Segment = gen_server:call(?MODULE, {get_write_segment, Expiration}),
+ ets:insert(Segment, {Key, {Expiration, Value}}),
+ ok.
+
+delete(Key) ->
+ [ets:delete(Table, Key)
+ || Table <- gen_server:call(?MODULE, get_segment_tables)].
+
+gc() ->
+ case whereis(?MODULE) of
+ undefined -> ok;
+ Pid -> Pid ! gc
+ end.
+
+init([SegmentSize]) ->
+ InitSegment = ets:new(segment, [set, public]),
+ InitBoundary = rabbit_auth_cache:expiration(SegmentSize),
+ {ok, GCTimer} = timer:send_interval(SegmentSize * 2, gc),
+ {ok, #state{gc_timer = GCTimer, segment_size = SegmentSize,
+ segments = [{InitBoundary, InitSegment}]}}.
+
+handle_call({get_write_segment, Expiration}, _From,
+ State = #state{segments = Segments,
+ segment_size = SegmentSize}) ->
+ [{_, Segment} | _] = NewSegments = maybe_add_segment(Expiration, SegmentSize, Segments),
+ {reply, Segment, State#state{segments = NewSegments}};
+handle_call(get_segment_tables, _From, State = #state{segments = Segments}) ->
+ {_, Valid} = partition_expired_segments(Segments),
+ {_,Tables} = lists:unzip(Valid),
+ {reply, Tables, State}.
+
+handle_cast(_, State = #state{}) ->
+ {noreply, State}.
+
+handle_info(gc, State = #state{ segments = Segments }) ->
+ {Expired, Valid} = partition_expired_segments(Segments),
+ [ets:delete(Table) || {_, Table} <- Expired],
+ {noreply, State#state{ segments = Valid }};
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State = #state{gc_timer = Timer}) ->
+ timer:cancel(Timer),
+ State.
+
+partition_expired_segments(Segments) ->
+ lists:partition(
+ fun({Boundary, _}) -> rabbit_auth_cache:expired(Boundary) end,
+ Segments).
+
+maybe_add_segment(Expiration, SegmentSize, OldSegments) ->
+ case OldSegments of
+ [{OldBoundary, _}|_] when OldBoundary > Expiration ->
+ OldSegments;
+ _ ->
+ NewBoundary = Expiration + SegmentSize,
+ Segment = ets:new(segment, [set, public]),
+ [{NewBoundary, Segment} | OldSegments]
+ end.
+
+get_from_segments(Key) ->
+ Tables = gen_server:call(?MODULE, get_segment_tables),
+ lists:flatmap(
+ fun(undefined) -> [];
+ (T) ->
+ try ets:lookup(T, Key) of
+ [{Key, {Exp, Val}}] ->
+ case rabbit_auth_cache:expired(Exp) of
+ true -> [];
+ false -> [Val]
+ end;
+ [] -> []
+ % ETS table can be deleted concurrently.
+ catch
+ error:badarg -> []
+ end
+ end,
+ Tables).
+
diff --git a/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl
new file mode 100644
index 0000000000..fb959d2031
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/src/rabbit_auth_cache_ets_segmented_stateless.erl
@@ -0,0 +1,129 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_cache_ets_segmented_stateless).
+-behaviour(gen_server).
+-behaviour(rabbit_auth_cache).
+
+-export([start_link/1,
+ get/1, put/3, delete/1]).
+-export([gc/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(SEGMENT_TABLE, rabbit_auth_cache_ets_segmented_stateless_segment_table).
+
+-record(state, {gc_timer}).
+
+start_link(SegmentSize) ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [SegmentSize], []).
+
+get(Key) ->
+ case get_from_segments(Key) of
+ [] -> {error, not_found};
+ [V|_] -> {ok, V}
+ end.
+
+put(Key, Value, TTL) ->
+ Expiration = rabbit_auth_cache:expiration(TTL),
+ [{_, SegmentSize}] = ets:lookup(?SEGMENT_TABLE, segment_size),
+ Segment = segment(Expiration, SegmentSize),
+ Table = case ets:lookup(?SEGMENT_TABLE, Segment) of
+ [{Segment, T}] -> T;
+ [] -> add_segment(Segment)
+ end,
+ ets:insert(Table, {Key, {Expiration, Value}}),
+ ok.
+
+delete(Key) ->
+ [ets:delete(Table, Key)
+ || Table <- get_all_segment_tables()].
+
+gc() ->
+ case whereis(?MODULE) of
+ undefined -> ok;
+ Pid -> Pid ! gc
+ end.
+
+init([SegmentSize]) ->
+ ets:new(?SEGMENT_TABLE, [ordered_set, named_table, public]),
+ ets:insert(?SEGMENT_TABLE, {segment_size, SegmentSize}),
+
+ InitSegment = segment(rabbit_auth_cache:expiration(SegmentSize), SegmentSize),
+ do_add_segment(InitSegment),
+
+ {ok, GCTimer} = timer:send_interval(SegmentSize * 2, gc),
+ {ok, #state{gc_timer = GCTimer}}.
+
+handle_call({add_segment, Segment}, _From, State) ->
+ %% Double check segment if it's already created
+ Table = do_add_segment(Segment),
+ {reply, Table, State}.
+
+handle_cast(_, State = #state{}) ->
+ {noreply, State}.
+
+handle_info(gc, State = #state{}) ->
+ Now = erlang:system_time(milli_seconds),
+ MatchSpec = [{{'$1', '$2'}, [{'<', '$1', {const, Now}}], ['$2']}],
+ Expired = ets:select(?SEGMENT_TABLE, MatchSpec),
+ [ets:delete(Table) || Table <- Expired],
+ {noreply, State};
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State = #state{gc_timer = Timer}) ->
+ timer:cancel(Timer),
+ State.
+
+segment(Expiration, SegmentSize) ->
+ Begin = ((Expiration div SegmentSize) * SegmentSize),
+ End = Begin + SegmentSize,
+ End.
+
+add_segment(Segment) ->
+ gen_server:call(?MODULE, {add_segment, Segment}).
+
+do_add_segment(Segment) ->
+ case ets:lookup(?SEGMENT_TABLE, Segment) of
+ [{Segment, Table}] -> Table;
+ [] -> Table = ets:new(segment, [set, public]),
+ ets:insert(?SEGMENT_TABLE, {Segment, Table}),
+ Table
+ end.
+
+get_segment_tables() ->
+ Now = erlang:system_time(milli_seconds),
+ MatchSpec = [{{'$1', '$2'}, [{'>', '$1', {const, Now}}], ['$_']}],
+ [V || {K, V} <- ets:select(?SEGMENT_TABLE, MatchSpec), K =/= segment_size].
+
+get_all_segment_tables() ->
+ [V || {K, V} <- ets:tab2list(?SEGMENT_TABLE), K =/= segment_size].
+
+get_from_segments(Key) ->
+ Tables = get_segment_tables(),
+ lists:flatmap(
+ fun(undefined) -> [];
+ (T) ->
+ try ets:lookup(T, Key) of
+ [{Key, {Exp, Val}}] ->
+ case rabbit_auth_cache:expired(Exp) of
+ true -> [];
+ false -> [Val]
+ end;
+ [] -> []
+ % ETS table can be deleted concurrently.
+ catch
+ error:badarg -> []
+ end
+ end,
+ Tables).
+
diff --git a/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..bff0f60de0
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_auth_backend_cache, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE_data/rabbitmq_auth_backend_cache.snippets b/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE_data/rabbitmq_auth_backend_cache.snippets
new file mode 100644
index 0000000000..bb45fecddd
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/test/config_schema_SUITE_data/rabbitmq_auth_backend_cache.snippets
@@ -0,0 +1,55 @@
+[{enable_backend,
+ "auth_backends.1 = cache
+ auth_cache.cached_backend = ldap",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,[{cached_backend,rabbit_auth_backend_ldap}]}],
+ [rabbitmq_auth_backend_cache]},
+ {auth_backend_cache,
+ "auth_backends.1 = cache",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]}],
+ [rabbitmq_auth_backend_cache]},
+ {cached_backend,
+ "auth_backends.1 = cache
+ auth_cache.cached_backend = ldap",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,
+ [{cached_backend,rabbit_auth_backend_ldap}]}],
+ [rabbitmq_auth_backend_cache]},
+ {cached_authn_authz,
+ "auth_backends.1 = cache
+ auth_cache.cached_backend.authn = ldap
+ auth_cache.cached_backend.authz = http",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,
+ [{cached_backend,
+ {rabbit_auth_backend_ldap,rabbit_auth_backend_http}}]}],
+ [rabbitmq_auth_backend_cache]},
+ {cached_authn,
+ "auth_backends.1 = cache
+ auth_cache.cached_backend.authn = ldap",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,
+ [{cached_backend,
+ {rabbit_auth_backend_ldap,rabbit_auth_backend_ldap}}]}],
+ [rabbitmq_auth_backend_cache]},
+ {cache_ttl,
+ "auth_backends.1 = cache
+ auth_cache.cache_ttl = 200",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,[{cache_ttl,200}]}],
+ [rabbitmq_auth_backend_cache]},
+ {cache_module,
+ "auth_backends.1 = cache
+ auth_cache.cache_module = rabbit_auth_backend_ets_segmented",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,
+ [{cache_module,rabbit_auth_backend_ets_segmented}]}],
+ [rabbitmq_auth_backend_cache]},
+ {cache_refusals,
+ "auth_backends.1 = cache
+ auth_cache.cache_refusals = true",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_cache]}]},
+ {rabbitmq_auth_backend_cache,
+ [{cache_refusals,true}]}],
+ [rabbitmq_auth_backend_cache]}
+].
diff --git a/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl
new file mode 100644
index 0000000000..64752bf70b
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_backend_cache_SUITE.erl
@@ -0,0 +1,169 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_auth_backend_cache_SUITE).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ authentication_response,
+ authorization_response,
+ access_response,
+ cache_expiration,
+ cache_expiration_topic
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, rabbit_ct_broker_helpers:setup_steps() ++
+ [ fun setup_env/1 ]).
+
+setup_env(Config) ->
+ true = lists:member(rabbitmq_auth_backend_cache,
+ rpc(Config, rabbit_plugins, active, [])),
+ application:set_env(rabbit, auth_backends, [rabbit_auth_backend_cache]),
+
+ Config.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(access_response, Config) ->
+ ok = rpc(Config, rabbit_auth_backend_internal, set_topic_permissions, [
+ <<"guest">>, <<"/">>, <<"amq.topic">>, <<"^a">>, <<"^b">>, <<"acting-user">>
+ ]),
+ Config;
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(TestCase, Config) when TestCase == access_response;
+ TestCase == cache_expiration_topic ->
+ ok = rpc(Config, rabbit_auth_backend_internal, clear_topic_permissions, [
+ <<"guest">>, <<"/">>, <<"acting-user">>
+ ]),
+ Config;
+end_per_testcase(cache_expiration, Config) ->
+ rabbit_ct_broker_helpers:add_user(Config, <<"guest">>),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"/">>),
+ Config;
+end_per_testcase(_TestCase, Config) ->
+ Config.
+
+authentication_response(Config) ->
+ {ok, AuthRespOk} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ {ok, AuthRespOk} = rpc(Config,rabbit_auth_backend_cache, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ {refused, FailErr, FailArgs} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, [{password, <<"notguest">>}]]),
+ {refused, FailErr, FailArgs} = rpc(Config,rabbit_auth_backend_cache, user_login_authentication, [<<"guest">>, [{password, <<"notguest">>}]]).
+
+authorization_response(Config) ->
+ AuthProps = [{password, <<"guest">>}],
+ {ok, #auth_user{impl = Impl, tags = Tags}} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, AuthProps]),
+ {ok, Impl, Tags} = rpc(Config,rabbit_auth_backend_internal, user_login_authorization, [<<"guest">>, AuthProps]),
+ {ok, Impl, Tags} = rpc(Config,rabbit_auth_backend_cache, user_login_authorization, [<<"guest">>, AuthProps]),
+ {refused, FailErr, FailArgs} = rpc(Config,rabbit_auth_backend_internal, user_login_authorization, [<<"nonguest">>, AuthProps]),
+ {refused, FailErr, FailArgs} = rpc(Config,rabbit_auth_backend_cache, user_login_authorization, [<<"nonguest">>, AuthProps]).
+
+access_response(Config) ->
+ AvailableVhost = <<"/">>,
+ RestrictedVhost = <<"restricted">>,
+ AvailableResource = #resource{virtual_host = AvailableVhost, kind = exchange, name = <<"some">>},
+ RestrictedResource = #resource{virtual_host = RestrictedVhost, kind = exchange, name = <<"some">>},
+ TopicResource = #resource{virtual_host = AvailableVhost, kind = topic, name = <<"amq.topic">>},
+ AuthorisedTopicContext = #{routing_key => <<"a.b">>},
+ RestrictedTopicContext = #{routing_key => <<"b.b">>},
+
+ {ok, Auth} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ true = rpc(Config,rabbit_auth_backend_internal, check_vhost_access, [Auth, AvailableVhost, undefined]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_vhost_access, [Auth, AvailableVhost, undefined]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_vhost_access, [Auth, RestrictedVhost, undefined]),
+ false = rpc(Config,rabbit_auth_backend_cache, check_vhost_access, [Auth, RestrictedVhost, undefined]),
+
+ true = rpc(Config,rabbit_auth_backend_internal, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_resource_access, [Auth, RestrictedResource, configure, #{}]),
+ false = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, RestrictedResource, configure, #{}]),
+
+ true = rpc(Config,rabbit_auth_backend_internal, check_topic_access, [Auth, TopicResource, write, AuthorisedTopicContext]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_topic_access, [Auth, TopicResource, write, AuthorisedTopicContext]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]),
+ false = rpc(Config,rabbit_auth_backend_cache, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]).
+
+cache_expiration(Config) ->
+ AvailableVhost = <<"/">>,
+ AvailableResource = #resource{virtual_host = AvailableVhost, kind = excahnge, name = <<"some">>},
+ {ok, Auth} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ {ok, Auth} = rpc(Config,rabbit_auth_backend_cache, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ true = rpc(Config,rabbit_auth_backend_internal, check_vhost_access, [Auth, AvailableVhost, undefined]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_vhost_access, [Auth, AvailableVhost, undefined]),
+
+ true = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+
+ rpc(Config,rabbit_auth_backend_internal, change_password, [<<"guest">>, <<"newpass">>, <<"acting-user">>]),
+
+ {refused, _, _} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ {ok, Auth} = rpc(Config,rabbit_auth_backend_cache, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+ true = rpc(Config,rabbit_auth_backend_internal, check_vhost_access, [Auth, AvailableVhost, undefined]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_vhost_access, [Auth, AvailableVhost, undefined]),
+
+ true = rpc(Config,rabbit_auth_backend_internal, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+
+ rpc(Config,rabbit_auth_backend_internal, delete_user, [<<"guest">>, <<"acting-user">>]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_vhost_access, [Auth, AvailableVhost, undefined]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_vhost_access, [Auth, AvailableVhost, undefined]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+
+ {ok, TTL} = rpc(Config, application, get_env, [rabbitmq_auth_backend_cache, cache_ttl]),
+ timer:sleep(TTL),
+
+ {refused, _, _} = rpc(Config,rabbit_auth_backend_cache, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_vhost_access, [Auth, AvailableVhost, undefined]),
+ false = rpc(Config,rabbit_auth_backend_cache, check_vhost_access, [Auth, AvailableVhost, undefined]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_resource_access, [Auth, AvailableResource, configure, #{}]),
+ false = rpc(Config,rabbit_auth_backend_cache, check_resource_access, [Auth, AvailableResource, configure, #{}]).
+
+cache_expiration_topic(Config) ->
+ AvailableVhost = <<"/">>,
+ TopicResource = #resource{virtual_host = AvailableVhost, kind = topic, name = <<"amq.topic">>},
+ RestrictedTopicContext = #{routing_key => <<"b.b">>},
+
+ {ok, Auth} = rpc(Config,rabbit_auth_backend_internal, user_login_authentication, [<<"guest">>, [{password, <<"guest">>}]]),
+
+ % topic access is authorised if no permission is found
+ true = rpc(Config,rabbit_auth_backend_internal, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]),
+
+ ok = rpc(Config, rabbit_auth_backend_internal, set_topic_permissions, [
+ <<"guest">>, <<"/">>, <<"amq.topic">>, <<"^a">>, <<"^b">>, <<"acting-user">>
+ ]),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]),
+ true = rpc(Config,rabbit_auth_backend_cache, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]),
+
+ {ok, TTL} = rpc(Config, application, get_env, [rabbitmq_auth_backend_cache, cache_ttl]),
+ timer:sleep(TTL),
+
+ false = rpc(Config,rabbit_auth_backend_internal, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]),
+ false = rpc(Config,rabbit_auth_backend_cache, check_topic_access, [Auth, TopicResource, write, RestrictedTopicContext]).
+
+rpc(Config, M, F, A) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
+
+
+
+
diff --git a/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl
new file mode 100644
index 0000000000..5acb5f5a13
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_cache/test/rabbit_auth_cache_SUITE.erl
@@ -0,0 +1,200 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_auth_cache_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, rabbit_auth_cache_dict},
+ {group, rabbit_auth_cache_ets},
+ {group, rabbit_auth_cache_ets_segmented},
+ {group, rabbit_auth_cache_ets_segmented_stateless}
+ ].
+
+groups() ->
+ CommonTests = [get_empty, get_put, get_expired, put_replace, get_deleted, random_timing],
+ [
+ {rabbit_auth_cache_dict, [sequence], CommonTests},
+ {rabbit_auth_cache_ets, [sequence], CommonTests},
+ {rabbit_auth_cache_ets_segmented, [sequence], CommonTests},
+ {rabbit_auth_cache_ets_segmented_stateless, [sequence], CommonTests}
+ ].
+
+init_per_suite(Config) ->
+ application:load(rabbitmq_auth_backend_cache),
+ {ok, TTL} = application:get_env(rabbitmq_auth_backend_cache, cache_ttl),
+ rabbit_ct_helpers:set_config(Config, {current_ttl, TTL}).
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config)
+ when Group =:= rabbit_auth_cache_dict; Group =:= rabbit_auth_cache_ets ->
+ set_auth_cache_module(Group, [], Config);
+init_per_group(Group, Config)
+ when Group =:= rabbit_auth_cache_ets_segmented;
+ Group =:= rabbit_auth_cache_ets_segmented_stateless ->
+ TTL = ?config(current_ttl, Config),
+ set_auth_cache_module(Group, [TTL * 2], Config);
+init_per_group(_, Config) -> Config.
+
+set_auth_cache_module(Module, Args, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, {auth_cache_module, Module}),
+ rabbit_ct_helpers:set_config(Config1, {auth_cache_module_args, Args}).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Test, Config) ->
+ Config1 = init_per_testcase0(Test, Config),
+ AuthCacheModule = ?config(auth_cache_module, Config1),
+ AuthCacheModuleArgs = ?config(auth_cache_module_args, Config1),
+ apply(AuthCacheModule, start_link, AuthCacheModuleArgs),
+ Config1.
+
+init_per_testcase0(get_expired, Config) ->
+ TTL = ?config(current_ttl, Config),
+ TempTTL = 500,
+ application:set_env(rabbitmq_auth_backend_cache, cache_ttl, TempTTL),
+ Config1 = rabbit_ct_helpers:set_config(Config, {saved_ttl, TTL}),
+ Config2 = rabbit_ct_helpers:set_config(Config1, {current_ttl, TempTTL}),
+ rabbit_ct_helpers:set_config(Config2,
+ {auth_cache_module_args,
+ new_auth_cache_module_args(TTL, Config2)});
+init_per_testcase0(random_timing, Config) ->
+ TTL = ?config(current_ttl, Config),
+ TempTTL = 500,
+ application:set_env(rabbitmq_auth_backend_cache, cache_ttl, TempTTL),
+ Config1 = rabbit_ct_helpers:set_config(Config, {saved_ttl, TTL}),
+ Config2 = rabbit_ct_helpers:set_config(Config1, {current_ttl, TempTTL}),
+ rabbit_ct_helpers:set_config(Config2,
+ {auth_cache_module_args,
+ new_auth_cache_module_args(TTL, Config2)});
+init_per_testcase0(_, Config) -> Config.
+
+end_per_testcase(Test, Config) ->
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ % gen_server:stop(AuthCacheModule),
+ Pid = whereis(AuthCacheModule),
+ exit(Pid, normal),
+ end_per_testcase0(Test, Config).
+
+end_per_testcase0(get_expired, Config) ->
+ TTL = ?config(saved_ttl, Config),
+ application:set_env(rabbitmq_auth_backend_cache, cache_ttl, TTL),
+ Config1 = rabbit_ct_helpers:set_config(Config, {current_ttl, TTL}),
+ rabbit_ct_helpers:set_config(Config,
+ {auth_cache_module_args,
+ new_auth_cache_module_args(TTL, Config1)});
+end_per_testcase0(random_timing, Config) ->
+ TTL = ?config(saved_ttl, Config),
+ application:set_env(rabbitmq_auth_backend_cache, cache_ttl, TTL),
+ Config1 = rabbit_ct_helpers:set_config(Config, {current_ttl, TTL}),
+ rabbit_ct_helpers:set_config(Config,
+ {auth_cache_module_args,
+ new_auth_cache_module_args(TTL, Config1)});
+end_per_testcase0(_, Config) -> Config.
+
+new_auth_cache_module_args(TTL, Config) ->
+ case ?config(auth_cache_module_args, Config) of
+ [] -> [];
+ [_] -> [TTL * 2]
+ end.
+
+get_empty(Config) ->
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ {error, not_found} = AuthCacheModule:get(some_key),
+ {error, not_found} = AuthCacheModule:get(other_key).
+
+get_put(Config) ->
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ Key = some_key,
+ TTL = ?config(current_ttl, Config),
+ {error, not_found} = AuthCacheModule:get(Key),
+ ok = AuthCacheModule:put(Key, some_value, TTL),
+ {ok, some_value} = AuthCacheModule:get(Key).
+
+get_expired(Config) ->
+ TTL = ?config(current_ttl, Config),
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ Key = some_key,
+ {error, not_found} = AuthCacheModule:get(Key),
+ ok = AuthCacheModule:put(Key, some_value, TTL),
+ {ok, some_value} = AuthCacheModule:get(Key),
+ timer:sleep(TTL div 2),
+ {ok, some_value} = AuthCacheModule:get(Key),
+ timer:sleep(TTL),
+ {error, not_found} = AuthCacheModule:get(Key).
+
+put_replace(Config) ->
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ Key = some_key,
+ TTL = ?config(current_ttl, Config),
+ {error, not_found} = AuthCacheModule:get(Key),
+ ok = AuthCacheModule:put(Key, some_value, TTL),
+ {ok, some_value} = AuthCacheModule:get(Key),
+ ok = AuthCacheModule:put(Key, other_value, TTL),
+ {ok, other_value} = AuthCacheModule:get(Key).
+
+get_deleted(Config) ->
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ Key = some_key,
+ TTL = ?config(current_ttl, Config),
+ {error, not_found} = AuthCacheModule:get(Key),
+ ok = AuthCacheModule:put(Key, some_value, TTL),
+ {ok, some_value} = AuthCacheModule:get(Key),
+ AuthCacheModule:delete(Key),
+ {error, not_found} = AuthCacheModule:get(Key).
+
+
+random_timing(Config) ->
+ random_timing(Config, 30000, 1000).
+
+random_timing(Config, MaxTTL, Parallel) ->
+ AuthCacheModule = ?config(auth_cache_module, Config),
+ RandomTTls = [{N, rabbit_misc:random(MaxTTL) + 1000} || N <- lists:seq(1, Parallel)],
+ Pid = self(),
+ Ref = make_ref(),
+ Pids = lists:map(
+ fun({N, TTL}) ->
+ spawn_link(
+ fun() ->
+ Key = N,
+ Value = {tuple_with, N, TTL},
+ {error, not_found} = AuthCacheModule:get(Key),
+ PutTime = erlang:system_time(milli_seconds),
+ ok = AuthCacheModule:put(Key, Value, TTL),
+ case AuthCacheModule:get(Key) of
+ {ok, Value} -> ok;
+ Other ->
+ case AuthCacheModule of
+ rabbit_auth_cache_ets_segmented ->
+ State = sys:get_state(AuthCacheModule),
+ Data = case State of
+ {state, Segments, _, _} when is_list(Segments) ->
+ [ets:tab2list(Segment) || {_, Segment} <- Segments];
+ _ -> []
+ end,
+ error({Other, Value, PutTime, erlang:system_time(milli_seconds), State, Data});
+ _ ->
+ error({Other, Value, PutTime, erlang:system_time(milli_seconds)})
+ end
+ end,
+ % expiry error
+ timer:sleep(TTL + 200),
+ {error, not_found} = AuthCacheModule:get(Key),
+ Pid ! {ok, self(), Ref}
+ end)
+ end,
+ RandomTTls),
+ [receive {ok, P, Ref} -> ok after MaxTTL * 2 -> error(timeout) end || P <- Pids].
+
+
+
diff --git a/deps/rabbitmq_auth_backend_http/.gitignore b/deps/rabbitmq_auth_backend_http/.gitignore
new file mode 100644
index 0000000000..cc013357c8
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/.gitignore
@@ -0,0 +1,22 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+test/config_schema_SUITE_data/schema/
+
+/rabbitmq_auth_backend_http.d
+
+.idea
+
diff --git a/deps/rabbitmq_auth_backend_http/.travis.yml b/deps/rabbitmq_auth_backend_http/.travis.yml
new file mode 100644
index 0000000000..546d553b18
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: IRKRVDsvD0owEKM2GDKvuk5zSQJa+Z0Iu4JQuK78BBLTcmjVmWhHsaE1H9hwzCpWwC2yaMz/2C5GEEaK/ET1XnY48UvuihGWxpMnzbzhYuEx6CRSa9Ka7ik2LCP1lZ+t6OxBXT07PdJ2Ib3hPXcxgRHgOEAHfJ9gmMHgG9vrbiI=
+ - secure: SffTvt5GWsNq/Nc5epFn5ODIbnn17AkEUsrf3LQc8huKdN92x2v5rSrGgJAPN7WCRW1VPmlgwMF0tj6uY+oSKI+sotWb+dPdCyuXh/z4JhxX+S/ui4ItgbfD6DAui+eLXZxkvzGyv4NJ85aKhj5y8ztSqD7Fgeg0jErZwbNg6y0=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_auth_backend_http/CODE_OF_CONDUCT.md b/deps/rabbitmq_auth_backend_http/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_auth_backend_http/CONTRIBUTING.md b/deps/rabbitmq_auth_backend_http/CONTRIBUTING.md
new file mode 100644
index 0000000000..b50bd82900
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/CONTRIBUTING.md
@@ -0,0 +1,103 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+ make tests
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_auth_backend_http/LICENSE b/deps/rabbitmq_auth_backend_http/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_auth_backend_http/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_auth_backend_http/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_auth_backend_http/Makefile b/deps/rabbitmq_auth_backend_http/Makefile
new file mode 100644
index 0000000000..56480b4aa9
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/Makefile
@@ -0,0 +1,33 @@
+PROJECT = rabbitmq_auth_backend_http
+PROJECT_DESCRIPTION = RabbitMQ HTTP Authentication Backend
+PROJECT_MOD = rabbit_auth_backend_http_app
+
+define PROJECT_ENV
+[
+ {http_method, get},
+ {user_path, "http://localhost:8000/auth/user"},
+ {vhost_path, "http://localhost:8000/auth/vhost"},
+ {resource_path, "http://localhost:8000/auth/resource"},
+ {topic_path, "http://localhost:8000/auth/topic"}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+LOCAL_DEPS = inets
+DEPS = rabbit_common rabbit amqp_client
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers cowboy
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_auth_backend_http/README.md b/deps/rabbitmq_auth_backend_http/README.md
new file mode 100644
index 0000000000..651e32d7d6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/README.md
@@ -0,0 +1,170 @@
+# Overview
+
+This plugin provides the ability for your RabbitMQ server to perform
+authentication (determining who can log in) and authorisation
+(determining what permissions they have) by making requests to an HTTP
+server.
+
+This plugin can put a significant amount of load on its backing service.
+We recommend using it together with [rabbitmq_auth_backend_cache](http://github.com/rabbitmq/rabbitmq-auth-backend-cache)
+with a reasonable caching interval (e.g. 1-3 minutes).
+
+
+## Project Maturity
+
+As of 3.7.0, this plugin is distributed with RabbitMQ.
+
+
+## RabbitMQ Version Requirements
+
+As of 3.7.0, this plugin is distributed with RabbitMQ.
+
+As with all [authentication plugins](http://rabbitmq.com/access-control.html), this one requires RabbitMQ server
+2.3.1 or later.
+
+## Using with RabbitMQ 3.6.x
+
+Install the corresponding .ez files from our
+[Community Plugins page](http://www.rabbitmq.com/community-plugins.html). Note that different
+releases of this plugin support different versions of RabbitMQ.
+
+## Enabling the Plugin
+
+First enable the plugin using `rabbitmq-plugins`:
+
+ rabbitmq-plugins enable rabbitmq_auth_backend_http
+
+
+## Configuring the Plugin
+
+To use this backend exclusively, use the following snippet in `rabbitmq.conf` (currently
+in master)
+
+ auth_backends.1 = http
+
+Or, in the classic config format (`rabbitmq.config`, prior to 3.7.0) or `advanced.config`:
+
+ [{rabbit, [{auth_backends, [rabbit_auth_backend_http]}]}].
+
+See [RabbitMQ Configuration guide](http://www.rabbitmq.com/configure.html) and
+[Access Control guide](http://rabbitmq.com/access-control.html) for more information.
+
+You need to configure the plugin to know which URIs to point at
+and which HTTP method to use.
+
+Below is a minimal configuration file example.
+
+In `rabbitmq.conf`:
+
+ auth_backends.1 = http
+ auth_http.http_method = post
+ auth_http.user_path = http://some-server/auth/user
+ auth_http.vhost_path = http://some-server/auth/vhost
+ auth_http.resource_path = http://some-server/auth/resource
+ auth_http.topic_path = http://some-server/auth/topic
+
+In the [classic config format](http://www.rabbitmq.com/configure.html) (`rabbitmq.config` prior to 3.7.0 or `advanced.config`):
+
+ [
+ {rabbit, [{auth_backends, [rabbit_auth_backend_http]}]},
+ {rabbitmq_auth_backend_http,
+ [{http_method, post},
+ {user_path, "http(s)://some-server/auth/user"},
+ {vhost_path, "http(s)://some-server/auth/vhost"},
+ {resource_path, "http(s)://some-server/auth/resource"},
+ {topic_path, "http(s)://some-server/auth/topic"}]}
+ ].
+
+By default `http_method` configuration is `GET` for backwards compatibility. It's recommended
+to use `POST` requests to avoid credentials logging.
+
+## What Must My Web Server Do?
+
+This plugin requires that your web server respond to requests in a
+certain predefined format. It will make GET (by default) or POST requests
+against the URIs listed in the configuration file. It will add query string
+(for `GET` requests) or a URL-encoded request body (for `POST` requests) parameters as follows:
+
+### user_path
+
+* `username` - the name of the user
+* `password` - the password provided (may be missing if e.g. rabbitmq-auth-mechanism-ssl is used)
+
+### vhost_path
+
+* `username` - the name of the user
+* `vhost` - the name of the virtual host being accessed
+* `ip` - the client ip address
+
+Note that you cannot create arbitrary virtual hosts using this plugin; you can only determine whether your users can see / access the ones that exist.
+
+### resource_path
+
+* `username` - the name of the user
+* `vhost` - the name of the virtual host containing the resource
+* `resource` - the type of resource (`exchange`, `queue`, `topic`)
+* `name` - the name of the resource
+* `permission` - the access level to the resource (`configure`, `write`, `read`) - see [the Access Control guide](http://www.rabbitmq.com/access-control.html) for their meaning
+
+### topic_path
+
+* `username` - the name of the user
+* `vhost` - the name of the virtual host containing the resource
+* `resource` - the type of resource (`topic` in this case)
+* `name` - the name of the exchange
+* `permission` - the access level to the resource (`write` or `read`)
+* `routing_key` - the routing key of a published message (when the permission is `write`)
+or routing key of the queue binding (when the permission is `read`)
+
+See [topic authorisation](http://www.rabbitmq.com/access-control.html#topic-authorisation) for more information
+about topic authorisation.
+
+Your web server should always return HTTP 200 OK, with a body
+containing:
+
+* `deny` - deny access to the user / vhost / resource
+* `allow` - allow access to the user / vhost / resource
+* `allow [list of tags]` - (for `user_path` only) - allow access, and mark the user as an having the tags listed
+
+## Using TLS/HTTPS
+
+If your Web server uses HTTPS and certificate verification, you need to
+configure the plugin to use a CA and client certificate/key pair using the `rabbitmq_auth_backend_http.ssl_options` config variable:
+
+ [
+ {rabbit, [{auth_backends, [rabbit_auth_backend_http]}]},
+ {rabbitmq_auth_backend_http,
+ [{http_method, post},
+ {user_path, "https://some-server/auth/user"},
+ {vhost_path, "https://some-server/auth/vhost"},
+ {resource_path, "https://some-server/auth/resource"},
+ {topic_path, "https://some-server/auth/topic"},
+ {ssl_options,
+ [{cacertfile, "/path/to/cacert.pem"},
+ {certfile, "/path/to/client/cert.pem"},
+ {keyfile, "/path/to/client/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, true}]}]}
+ ].
+
+It is recommended to use TLS for authentication and enable peer verification.
+
+
+## Debugging
+
+Check the RabbitMQ logs if things don't seem to be working
+properly. Look for log messages containing "rabbit_auth_backend_http
+failed".
+
+## Example Apps
+
+There are [example backend services](./examples) available in Python, PHP, Spring Boot, ASP.NET Web API.
+
+See [examples README](./examples/README.md) for more information.
+
+## Building from Source
+
+You can build and install it like any other plugin (see
+[the plugin development guide](http://www.rabbitmq.com/plugin-development.html)).
+
+This plugin depends on the Erlang client (just to grab a URI parser).
diff --git a/deps/rabbitmq_auth_backend_http/TODO b/deps/rabbitmq_auth_backend_http/TODO
new file mode 100644
index 0000000000..5d8bf24512
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/TODO
@@ -0,0 +1 @@
+* Start ssl application when SSL URIs are used.
diff --git a/deps/rabbitmq_auth_backend_http/erlang.mk b/deps/rabbitmq_auth_backend_http/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_auth_backend_http/examples/README.md b/deps/rabbitmq_auth_backend_http/examples/README.md
new file mode 100644
index 0000000000..ecdf115f84
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/README.md
@@ -0,0 +1,235 @@
+# RabbitMQ HTTP Authn/Authz Backend Examples
+
+## Overview
+
+This directory provides a number of community contributed example applications that target
+different platforms and frameworks:
+
+ * Python and Django
+ * Java and Spring Boot
+ * Kotlin and Spring Boot
+ * C# and ASP.NET Web API
+ * C# and ASP.NET Core 2.1
+ * PHP
+
+## Python Example
+
+`rabbitmq_auth_backend_django` is a very minimalistic [Django](https://www.djangoproject.com/) application
+that rabbitmq-auth-backend-http can authenticate against. It's really
+not designed to be anything other than an example.
+
+### Running the Example
+
+Run
+
+``` shell
+start.sh
+```
+
+to launch it after [installing Django](https://docs.djangoproject.com/en/2.1/intro/install/).
+You may need to hack `start.sh` if you are not running Debian or Ubuntu.
+
+The app will use a local SQLite database. It uses the standard
+Django authentication database. All users get access to all vhosts and
+resources.
+
+The app recognises two users (to make the setup easier): `admin` and `someuser`.
+Passwords for those users do not matter. user `admin` as tagged as `administrator`.
+
+### HTTP Endpoint Examples
+
+`urls.py` and `auth/views.py` are the main modules that describe HTTP routes and
+views (endpoints).
+
+
+## Spring Boot Example
+
+`rabbitmq_auth_backend_spring_boot` is a simple [Spring Boot](https://projects.spring.io/spring-boot/)
+application that rabbitmq-auth-backend-http can authenticate against. It's really
+not designed to be anything other than an example.
+
+### Running the Example
+
+Import the example as a Maven project in your favorite IDE or run it directly from the command line:
+
+``` shell
+mvn spring-boot:run
+```
+
+The application listens on the 8080 port.
+
+### HTTP Endpoint Examples
+
+Have a look at the `AuthBackendHttpController`. There's only one user: `guest`,
+with the `guest` password. This implementation also checks the
+routing key starts with an `a` when publishing to a topic exchange
+or consuming from a topic. (an example of [topic authorisation](http://next.rabbitmq.com/access-control.html#topic-authorisation)).
+
+### rabbitmq.config Example
+
+Below is a [RabbitMQ config file](http://www.rabbitmq.com/configure.html) example to go with this
+example:
+
+``` ini
+auth_backends.1 = http
+
+auth_http.http_method = post
+auth_http.user_path = http://localhost:8080/auth/user
+auth_http.vhost_path = http://localhost:8080/auth/vhost
+auth_http.resource_path = http://localhost:8080/auth/resource
+auth_http.topic_path = http://localhost:8080/auth/topic
+```
+
+## Spring Boot Kotlin Example
+
+`rabbitmq_auth_backend_spring_boot_kotlin` is a simple [Spring Boot](https://projects.spring.io/spring-boot/)
+application written in Kotlin that rabbitmq-auth-backend-http can authenticate against. It's really
+not designed to be anything other than an example.
+It contains examples with recommended POST methods and example RabbitMQ configuration.
+It can be run the same way as the above example.
+
+
+## ASP.NET Web API Example
+
+`rabbitmq_auth_backend_webapi_dotnet` is a very minimalistic ASP.NET Web API application
+the plugin can authenticate against. It's really
+**not designed to be anything other than an example**.
+
+### Running the Example
+
+Open the WebApiHttpAuthService.csproj in Visual Studio 2017, More details about prerequisites can be found below.
+
+As with other examples, RabbitMQ [authentication and authorization backends](http://www.rabbitmq.com/access-control.html) must be configured
+to use this plugin and the endpoints provided by this example app.
+
+Then Build the solution and run it from Visual Studio.
+`Controllers/AuthController.cs` contains the authentication and authorization logic.
+By default All users get access to all vhosts and resources.
+User "authuser" will be denied access.
+
+### HTTP Endpoint Examples
+
+Have a look at `AuthController`.
+
+### Development Environment
+
+This example was developed using
+
+ * .NET Framework 4.5
+ * Visual Studio 2017
+ * Windows 10 and IIS v10.0
+
+It is possible to build and run service from Visual Studio browse the endpoint without using IIS.
+Port number may vary but will likely be `62190`.
+
+When the example is hosted on IIS, port 80 will be used by default.
+
+## ASP.NET Core 2.1 Example
+
+`rabbitmq_auth_backend_webapi_dotnetcore` is a modification of the `rabbitmq_auth_backend_webapi_dotnet` example
+designed for ASP.NET Core 2.1. It's very similar to the original version but it also adds some static typing
+for requests and responses.
+
+### Running the Example
+
+Open the solution file, WebApiHttpAuthService.sln` in Visual Studio 2017 or later.
+
+As with other examples, RabbitMQ [authentication and authorization backends](http://www.rabbitmq.com/access-control.html) must be configured
+to use this plugin and the endpoints provided by this example app.
+
+Then build the solution and run it from Visual Studio.
+`Controllers/AuthController.cs` contains the authentication and authorization logic.
+By default All users get access to all vhosts and resources.
+User "authuser" will be denied access.
+
+### HTTP Endpoint Examples
+
+Have a look at `AuthController`.
+
+### Development Environment
+
+This example was developed using
+
+ * ASP.NET Core 2.1
+ * Visual Studio 2017 (Visual Studio Code)
+ * Windows 10
+
+It is possible to build and run service from Visual Studio using IIS or from Visual Studio or Visual Studio Code using cross-platform server Kestrel.
+
+
+## PHP Example
+
+`rabbitmq_auth_backend_php` is a minimalistic PHP application that this plugin can authenticate against.
+It's really not designed to be anything other than an example.
+
+### Running the Example
+
+The example requires PHP >= 5.4 and [Composer](https://getcomposer.org/).
+
+The `rabbitmq-auth-backend-http-php` library depend on `symfony/security` and `symfony/http-foundation` components.
+Go to the `rabbitmq_auth_backend_php` folder and run `composer install`.
+
+``` shell
+cd rabbitmq_auth_backend_php/
+composer install
+```
+
+Now you can run the PHP 5.4 server (server at http://127.0.0.1:8080)
+
+``` shell
+composer start
+```
+
+Ensure the log file is writable `rabbitmq-auth-backend-http/examples/rabbitmq_auth_backend_php/var/log.log`.
+
+Go to `http://localhost:8080/user.php?username=Anthony&password=anthony-password`, all work properly if you see `Allow administrator`
+
+
+### HTTP Endpoint Examples
+
+Have a look at the `bootstrap.php`. By default this example implement the same authorization rules than RabbitMQ.
+
+Users list:
+
+| User | password | is admin | Vhost | Configure regex | Write regex | Read regex | tags |
+|--|--|--|--|--|--|--|--|
+| Anthony | anthony-password | ✔️ | All | All | All | All | administrator |
+| James | bond | | / | .* | .* | .* | management |
+| Roger | rabbit | | | | | | monitoring |
+| bunny | bugs | | | | | | policymaker |
+
+### rabbitmq.config Example
+
+Below is a [RabbitMQ config file](http://www.rabbitmq.com/configure.html) example to go with this
+example:
+
+``` ini
+auth_backends.1 = internal
+auth_backends.2 = http
+
+auth_http.user_path = http://localhost:62190/auth/user.php
+auth_http.vhost_path = http://localhost:62190/auth/vhost.php
+auth_http.resource_path = http://localhost:62190/auth/resource.php
+auth_http.topic_path = http://localhost:62190/auth/topic.php
+```
+
+See [RabbitMQ Access Control guide](http://www.rabbitmq.com/access-control.html) for more information.
+
+## Running with Docker Compose
+
+An example node can be started using a provided `docker-compose.yml` file that sets up RabbitMQ.
+There's also a file that sets up the Django example above:
+
+```bash
+docker-compose -f docker-compose.yml -f rabbitmq_auth_backend_django/docker-compose.yml up --build
+```
+
+Another file, `docker/nodered/docker-compose.yml`, will run [nodered](https://nodered.org/) on port 1880
+with a configured MQTT client that will connect to RabbitMQ and perform basic operations that will trigger
+requests to the example service:
+
+```bash
+docker-compose -f docker-compose.yml -f rabbitmq_auth_backend_django/docker-compose.yml -f docker/nodered/docker-compose.yml up --build
+```
+
+Edit the provided [config file](docker/rabbitmq.conf) and enable caching and logging settings.
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker-compose.yml b/deps/rabbitmq_auth_backend_http/examples/docker-compose.yml
new file mode 100644
index 0000000000..15109296c3
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker-compose.yml
@@ -0,0 +1,12 @@
+version: "3"
+
+services:
+ rabbit:
+ image: rabbitmq:management
+ volumes:
+ - ./docker/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf
+ - ./docker/enabled_plugins:/etc/rabbitmq/enabled_plugins
+ ports:
+ - 15672:15672
+ - 1883:1883
+
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/enabled_plugins b/deps/rabbitmq_auth_backend_http/examples/docker/enabled_plugins
new file mode 100644
index 0000000000..2016106225
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/enabled_plugins
@@ -0,0 +1 @@
+[rabbitmq_management,rabbitmq_auth_backend_cache,rabbitmq_auth_backend_http,rabbitmq_mqtt].
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/nodered/Dockerfile b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/Dockerfile
new file mode 100644
index 0000000000..0e0c6f15d6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/Dockerfile
@@ -0,0 +1,5 @@
+FROM nodered/node-red-docker
+
+COPY flows.json /data/
+COPY flows_cred.json /data/
+COPY settings.js /data/
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/nodered/docker-compose.yml b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/docker-compose.yml
new file mode 100644
index 0000000000..82680ffa41
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/docker-compose.yml
@@ -0,0 +1,8 @@
+version: "3"
+
+services:
+ nodered:
+ build: ./docker/nodered/
+ ports:
+ - 1880:1880
+
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows.json b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows.json
new file mode 100644
index 0000000000..f8fd2a9146
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows.json
@@ -0,0 +1 @@
+[{"id":"782c13cf.81627c","type":"tab","label":"Flow 1","disabled":false,"info":""},{"id":"ebd3e611.cdef38","type":"mqtt-broker","z":"","name":"as admin","broker":"rabbit","port":"1883","clientid":"adminclienid","usetls":false,"compatmode":true,"keepalive":"60","cleansession":true,"birthTopic":"","birthQos":"0","birthPayload":"","closeTopic":"","closeQos":"0","closePayload":"","willTopic":"","willQos":"0","willPayload":""},{"id":"90e1a809.a7ff5","type":"mqtt-broker","z":"","name":"someuser","broker":"rabbit","port":"1883","clientid":"someuserclientid","usetls":false,"compatmode":true,"keepalive":"60","cleansession":true,"birthTopic":"","birthQos":"0","birthPayload":"","closeTopic":"","closeQos":"0","closePayload":"","willTopic":"","willQos":"0","willPayload":""},{"id":"dac58353.662ad","type":"mqtt in","z":"782c13cf.81627c","name":"","topic":"some/topic/as/admin","qos":"2","broker":"ebd3e611.cdef38","x":160,"y":100,"wires":[["efe47f32.30786"]]},{"id":"f63b9dd3.36cde","type":"comment","z":"782c13cf.81627c","name":"admin","info":"","x":110,"y":60,"wires":[]},{"id":"e948a0d9.babb18","type":"mqtt out","z":"782c13cf.81627c","name":"","topic":"some/topic/as/admin","qos":"","retain":"","broker":"ebd3e611.cdef38","x":360,"y":160,"wires":[]},{"id":"cf18517f.444ef8","type":"inject","z":"782c13cf.81627c","name":"","topic":"","payload":"","payloadType":"date","repeat":"","crontab":"","once":false,"onceDelay":0.1,"x":120,"y":160,"wires":[["e948a0d9.babb18"]]},{"id":"efe47f32.30786","type":"debug","z":"782c13cf.81627c","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"false","x":430,"y":100,"wires":[]},{"id":"c7046417.f9b368","type":"mqtt in","z":"782c13cf.81627c","name":"","topic":"some/topic/as/someuser","qos":"2","broker":"90e1a809.a7ff5","x":170,"y":280,"wires":[["be230f6b.e78ec"]]},{"id":"be230f6b.e78ec","type":"debug","z":"782c13cf.81627c","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"false","x":420,"y":280,"wires":[]},{"id":"f3153151.f2bee8","type":"comment","z":"782c13cf.81627c","name":"someuser","info":"","x":120,"y":240,"wires":[]}] \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows_cred.json b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows_cred.json
new file mode 100644
index 0000000000..bfcee00920
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/flows_cred.json
@@ -0,0 +1 @@
+{"$":"00ae52fd43067c0f54608e6e26ba669f6VIl433Q1ap8mC1z8Gq4TymUQc4xgTTEKUHO5khripHCamXAJXfsezBEsKCPmAhv/1Q8GYVFbDdFH5EcYUs6u5MV4+LJ/jL01toZr6utgIoKYQmjpKoh4fHhScYrezMpKoNglVsV+HSOd91GLBGvyzldmh2JJrAMyZEPdCkDg+fobQ=="} \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/nodered/settings.js b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/settings.js
new file mode 100644
index 0000000000..5188aba0c9
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/nodered/settings.js
@@ -0,0 +1,260 @@
+/**
+ * Copyright JS Foundation and other contributors, http://js.foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ **/
+
+// The `https` setting requires the `fs` module. Uncomment the following
+// to make it available:
+//var fs = require("fs");
+
+module.exports = {
+ // the tcp port that the Node-RED web server is listening on
+ uiPort: process.env.PORT || 1880,
+
+ // By default, the Node-RED UI accepts connections on all IPv4 interfaces.
+ // To listen on all IPv6 addresses, set uiHost to "::",
+ // The following property can be used to listen on a specific interface. For
+ // example, the following would only allow connections from the local machine.
+ //uiHost: "127.0.0.1",
+
+ // Retry time in milliseconds for MQTT connections
+ mqttReconnectTime: 15000,
+
+ // Retry time in milliseconds for Serial port connections
+ serialReconnectTime: 15000,
+
+ // Retry time in milliseconds for TCP socket connections
+ //socketReconnectTime: 10000,
+
+ // Timeout in milliseconds for TCP server socket connections
+ // defaults to no timeout
+ //socketTimeout: 120000,
+
+ // Maximum number of messages to wait in queue while attempting to connect to TCP socket
+ // defaults to 1000
+ //tcpMsgQueueSize: 2000,
+
+ // Timeout in milliseconds for HTTP request connections
+ // defaults to 120 seconds
+ //httpRequestTimeout: 120000,
+
+ // The maximum length, in characters, of any message sent to the debug sidebar tab
+ debugMaxLength: 1000,
+
+ // The maximum number of messages nodes will buffer internally as part of their
+ // operation. This applies across a range of nodes that operate on message sequences.
+ // defaults to no limit. A value of 0 also means no limit is applied.
+ //nodeMaxMessageBufferLength: 0,
+
+ // To disable the option for using local files for storing keys and certificates in the TLS configuration
+ // node, set this to true
+ //tlsConfigDisableLocalFiles: true,
+
+ // Colourise the console output of the debug node
+ //debugUseColors: true,
+
+ // The file containing the flows. If not set, it defaults to flows_<hostname>.json
+ //flowFile: 'flows.json',
+
+ // To enabled pretty-printing of the flow within the flow file, set the following
+ // property to true:
+ //flowFilePretty: true,
+
+ // By default, credentials are encrypted in storage using a generated key. To
+ // specify your own secret, set the following property.
+ // If you want to disable encryption of credentials, set this property to false.
+ // Note: once you set this property, do not change it - doing so will prevent
+ // node-red from being able to decrypt your existing credentials and they will be
+ // lost.
+ credentialSecret: "a-secret-key",
+
+ // By default, all user data is stored in the Node-RED install directory. To
+ // use a different location, the following property can be used
+ //userDir: '/home/nol/.node-red/',
+
+ // Node-RED scans the `nodes` directory in the install directory to find nodes.
+ // The following property can be used to specify an additional directory to scan.
+ //nodesDir: '/home/nol/.node-red/nodes',
+
+ // By default, the Node-RED UI is available at http://localhost:1880/
+ // The following property can be used to specify a different root path.
+ // If set to false, this is disabled.
+ //httpAdminRoot: '/admin',
+
+ // Some nodes, such as HTTP In, can be used to listen for incoming http requests.
+ // By default, these are served relative to '/'. The following property
+ // can be used to specifiy a different root path. If set to false, this is
+ // disabled.
+ //httpNodeRoot: '/red-nodes',
+
+ // The following property can be used in place of 'httpAdminRoot' and 'httpNodeRoot',
+ // to apply the same root to both parts.
+ //httpRoot: '/red',
+
+ // When httpAdminRoot is used to move the UI to a different root path, the
+ // following property can be used to identify a directory of static content
+ // that should be served at http://localhost:1880/.
+ //httpStatic: '/home/nol/node-red-static/',
+
+ // The maximum size of HTTP request that will be accepted by the runtime api.
+ // Default: 5mb
+ //apiMaxLength: '5mb',
+
+ // If you installed the optional node-red-dashboard you can set it's path
+ // relative to httpRoot
+ //ui: { path: "ui" },
+
+ // Securing Node-RED
+ // -----------------
+ // To password protect the Node-RED editor and admin API, the following
+ // property can be used. See http://nodered.org/docs/security.html for details.
+ //adminAuth: {
+ // type: "credentials",
+ // users: [{
+ // username: "admin",
+ // password: "$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN.",
+ // permissions: "*"
+ // }]
+ //},
+
+ // To password protect the node-defined HTTP endpoints (httpNodeRoot), or
+ // the static content (httpStatic), the following properties can be used.
+ // The pass field is a bcrypt hash of the password.
+ // See http://nodered.org/docs/security.html#generating-the-password-hash
+ //httpNodeAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."},
+ //httpStaticAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."},
+
+ // The following property can be used to enable HTTPS
+ // See http://nodejs.org/api/https.html#https_https_createserver_options_requestlistener
+ // for details on its contents.
+ // See the comment at the top of this file on how to load the `fs` module used by
+ // this setting.
+ //
+ //https: {
+ // key: fs.readFileSync('privatekey.pem'),
+ // cert: fs.readFileSync('certificate.pem')
+ //},
+
+ // The following property can be used to cause insecure HTTP connections to
+ // be redirected to HTTPS.
+ //requireHttps: true,
+
+ // The following property can be used to disable the editor. The admin API
+ // is not affected by this option. To disable both the editor and the admin
+ // API, use either the httpRoot or httpAdminRoot properties
+ //disableEditor: false,
+
+ // The following property can be used to configure cross-origin resource sharing
+ // in the HTTP nodes.
+ // See https://github.com/troygoode/node-cors#configuration-options for
+ // details on its contents. The following is a basic permissive set of options:
+ //httpNodeCors: {
+ // origin: "*",
+ // methods: "GET,PUT,POST,DELETE"
+ //},
+
+ // If you need to set an http proxy please set an environment variable
+ // called http_proxy (or HTTP_PROXY) outside of Node-RED in the operating system.
+ // For example - http_proxy=http://myproxy.com:8080
+ // (Setting it here will have no effect)
+ // You may also specify no_proxy (or NO_PROXY) to supply a comma separated
+ // list of domains to not proxy, eg - no_proxy=.acme.co,.acme.co.uk
+
+ // The following property can be used to add a custom middleware function
+ // in front of all http in nodes. This allows custom authentication to be
+ // applied to all http in nodes, or any other sort of common request processing.
+ //httpNodeMiddleware: function(req,res,next) {
+ // // Handle/reject the request, or pass it on to the http in node by calling next();
+ // // Optionally skip our rawBodyParser by setting this to true;
+ // //req.skipRawBodyParser = true;
+ // next();
+ //},
+
+ // The following property can be used to verify websocket connection attempts.
+ // This allows, for example, the HTTP request headers to be checked to ensure
+ // they include valid authentication information.
+ //webSocketNodeVerifyClient: function(info) {
+ // // 'info' has three properties:
+ // // - origin : the value in the Origin header
+ // // - req : the HTTP request
+ // // - secure : true if req.connection.authorized or req.connection.encrypted is set
+ // //
+ // // The function should return true if the connection should be accepted, false otherwise.
+ // //
+ // // Alternatively, if this function is defined to accept a second argument, callback,
+ // // it can be used to verify the client asynchronously.
+ // // The callback takes three arguments:
+ // // - result : boolean, whether to accept the connection or not
+ // // - code : if result is false, the HTTP error status to return
+ // // - reason: if result is false, the HTTP reason string to return
+ //},
+
+ // Anything in this hash is globally available to all functions.
+ // It is accessed as context.global.
+ // eg:
+ // functionGlobalContext: { os:require('os') }
+ // can be accessed in a function block as:
+ // context.global.os
+
+ functionGlobalContext: {
+ // os:require('os'),
+ // jfive:require("johnny-five"),
+ // j5board:require("johnny-five").Board({repl:false})
+ },
+
+ // Context Storage
+ // The following property can be used to enable context storage. The configuration
+ // provided here will enable file-based context that flushes to disk every 30 seconds.
+ // Refer to the documentation for further options: https://nodered.org/docs/api/context/
+ //
+ //contextStorage: {
+ // default: {
+ // module:"localfilesystem"
+ // },
+ //},
+
+ // The following property can be used to order the categories in the editor
+ // palette. If a node's category is not in the list, the category will get
+ // added to the end of the palette.
+ // If not set, the following default order is used:
+ //paletteCategories: ['subflows', 'input', 'output', 'function', 'social', 'mobile', 'storage', 'analysis', 'advanced'],
+
+ // Configure the logging output
+ logging: {
+ // Only console logging is currently supported
+ console: {
+ // Level of logging to be recorded. Options are:
+ // fatal - only those errors which make the application unusable should be recorded
+ // error - record errors which are deemed fatal for a particular request + fatal errors
+ // warn - record problems which are non fatal + errors + fatal errors
+ // info - record information about the general running of the application + warn + error + fatal errors
+ // debug - record information which is more verbose than info + info + warn + error + fatal errors
+ // trace - record very detailed logging + debug + info + warn + error + fatal errors
+ // off - turn off all logging (doesn't affect metrics or audit)
+ level: "info",
+ // Whether or not to include metric events in the log output
+ metrics: false,
+ // Whether or not to include audit events in the log output
+ audit: false
+ }
+ },
+
+ // Customising the editor
+ editorTheme: {
+ projects: {
+ // To enable the Projects feature, set this value to true
+ enabled: false
+ }
+ },
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/docker/rabbitmq.conf b/deps/rabbitmq_auth_backend_http/examples/docker/rabbitmq.conf
new file mode 100644
index 0000000000..12effb76ac
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/docker/rabbitmq.conf
@@ -0,0 +1,22 @@
+loopback_users.guest = false
+listeners.tcp.default = 5672
+management.listener.port = 15672
+management.listener.ssl = false
+
+auth_backends.1 = http
+
+## This configures rabbitmq_auth_backend_cache that delegates to
+## the HTTP backend. If using this, make sure to comment the
+## auth_backends.1 line above.
+##
+# auth_backends.1 = cache
+#
+# auth_cache.cached_backend = http
+# auth_cache.cache_ttl = 5000
+
+
+auth_http.http_method = get
+auth_http.user_path = http://auth-backend:8000/auth/user
+auth_http.vhost_path = http://auth-backend:8000/auth/vhost
+auth_http.resource_path = http://auth-backend:8000/auth/resource
+auth_http.topic_path = http://auth-backend:8000/auth/topic
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/.gitignore b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/.gitignore
new file mode 100644
index 0000000000..6e9bc0cd9f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/.gitignore
@@ -0,0 +1 @@
+*.sqlite3 \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/Dockerfile b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/Dockerfile
new file mode 100644
index 0000000000..26ba7f9283
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/Dockerfile
@@ -0,0 +1,15 @@
+FROM python:3.7-alpine
+
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONUNBUFFERED 1
+
+WORKDIR /usr/src/app
+
+RUN pip install django
+
+
+COPY . /usr/src/app
+
+EXPOSE 8000
+
+ENTRYPOINT ["/usr/src/app/start.sh"]
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/docker-compose.yml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/docker-compose.yml
new file mode 100644
index 0000000000..dba3012840
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/docker-compose.yml
@@ -0,0 +1,6 @@
+version: "3"
+
+services:
+ auth-backend:
+ build: ./rabbitmq_auth_backend_django
+
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py
new file mode 100755
index 0000000000..1ae2e80b4b
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/manage.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+import os
+import sys
+
+if __name__ == "__main__":
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rabbitmq_auth_backend_django.settings")
+ try:
+ from django.core.management import execute_from_command_line
+ except ImportError:
+ # The above import may fail for some other reason. Ensure that the
+ # issue is really that Django is missing to avoid masking other
+ # exceptions on Python 2.
+ try:
+ import django
+ except ImportError:
+ raise ImportError(
+ "Couldn't import Django. Are you sure it's installed and "
+ "available on your PYTHONPATH environment variable? Did you "
+ "forget to activate a virtual environment?"
+ )
+ raise
+ execute_from_command_line(sys.argv)
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/__init__.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/__init__.py
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/__init__.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/__init__.py
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/models.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/models.py
new file mode 100644
index 0000000000..71a8362390
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/models.py
@@ -0,0 +1,3 @@
+from django.db import models
+
+# Create your models here.
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/tests.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/tests.py
new file mode 100644
index 0000000000..2247054b35
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/tests.py
@@ -0,0 +1,23 @@
+"""
+This file demonstrates two different styles of tests (one doctest and one
+unittest). These will both pass when you run "manage.py test".
+
+Replace these with more appropriate tests for your application.
+"""
+
+from django.test import TestCase
+
+class SimpleTest(TestCase):
+ def test_basic_addition(self):
+ """
+ Tests that 1 + 1 always equals 2.
+ """
+ self.failUnlessEqual(1 + 1, 2)
+
+__test__ = {"doctest": """
+Another way to test that 1 + 1 is equal to 2.
+
+>>> 1 + 1 == 2
+True
+"""}
+
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/views.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/views.py
new file mode 100644
index 0000000000..066623b962
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/auth/views.py
@@ -0,0 +1,41 @@
+from django.http import HttpResponse
+from django.contrib.auth import authenticate
+from django.views.decorators.csrf import csrf_exempt
+import logging
+
+logger = logging.getLogger('auth_backend')
+
+@csrf_exempt
+def user(request):
+ logger.info(request.GET)
+ if 'username' in request.GET and 'password' in request.GET:
+ username = request.GET['username']
+ password = request.GET['password']
+ if username == 'admin':
+ return HttpResponse("allow administrator")
+
+ if username == 'someuser':
+ return HttpResponse("allow")
+
+ user = authenticate(username=username, password=password)
+ if user:
+ if user.is_superuser:
+ return HttpResponse("allow administrator")
+ else:
+ return HttpResponse("allow management")
+ return HttpResponse("deny")
+
+@csrf_exempt
+def vhost(request):
+ logger.info(request.GET)
+ return HttpResponse("allow")
+
+@csrf_exempt
+def resource(request):
+ logger.info(request.GET)
+ return HttpResponse("allow")
+
+@csrf_exempt
+def topic(request):
+ logger.info(request.GET)
+ return HttpResponse("allow")
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/settings.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/settings.py
new file mode 100644
index 0000000000..df60998885
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/settings.py
@@ -0,0 +1,155 @@
+"""
+Django settings for rabbitmq_auth_backend_django project.
+
+Generated by 'django-admin startproject' using Django 1.10.5.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.10/topics/settings/
+
+For the full list of settings and their values, see
+https://docs.djangoproject.com/en/1.10/ref/settings/
+"""
+
+import os
+
+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
+
+# SECURITY WARNING: keep the secret key used in production secret!
+SECRET_KEY = '_wqlwxs-s(na_@1-@3=6uc2=-ka3f)))%-v#lgx4een8^#u92c'
+
+# SECURITY WARNING: don't run with debug turned on in production!
+DEBUG = True
+
+ALLOWED_HOSTS = ['0.0.0.0', '127.0.0.1', 'localhost', 'auth-backend']
+
+
+# Application definition
+
+INSTALLED_APPS = [
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.messages',
+ 'django.contrib.staticfiles',
+]
+
+MIDDLEWARE = [
+ 'django.middleware.security.SecurityMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.middleware.csrf.CsrfViewMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.messages.middleware.MessageMiddleware',
+ 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+]
+
+ROOT_URLCONF = 'rabbitmq_auth_backend_django.urls'
+
+TEMPLATES = [
+ {
+ 'BACKEND': 'django.template.backends.django.DjangoTemplates',
+ 'DIRS': [],
+ 'APP_DIRS': True,
+ 'OPTIONS': {
+ 'context_processors': [
+ 'django.template.context_processors.debug',
+ 'django.template.context_processors.request',
+ 'django.contrib.auth.context_processors.auth',
+ 'django.contrib.messages.context_processors.messages',
+ ],
+ },
+ },
+]
+
+WSGI_APPLICATION = 'rabbitmq_auth_backend_django.wsgi.application'
+
+
+# Database
+# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
+ }
+}
+
+
+# Password validation
+# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
+
+AUTH_PASSWORD_VALIDATORS = [
+ {
+ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
+ },
+]
+
+
+# Internationalization
+# https://docs.djangoproject.com/en/1.10/topics/i18n/
+
+LANGUAGE_CODE = 'en-us'
+
+TIME_ZONE = 'UTC'
+
+USE_I18N = True
+
+USE_L10N = True
+
+USE_TZ = True
+
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/1.10/howto/static-files/
+
+STATIC_URL = '/static/'
+
+LOGGING = {
+ 'version': 1,
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ },
+ 'loggers': {
+ 'django': {
+ 'handlers': ['console'],
+ 'propagate': True,
+ },
+ 'auth_backend': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+ }
+}
+#
+# if DEBUG:
+# # make all loggers use the console.
+# for logger in LOGGING['loggers']:
+# LOGGING['loggers'][logger]['handlers'] = ['console']
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/urls.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/urls.py
new file mode 100644
index 0000000000..671d4e0215
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/urls.py
@@ -0,0 +1,26 @@
+"""rabbitmq_auth_backend_django URL Configuration
+
+The `urlpatterns` list routes URLs to views. For more information please see:
+ https://docs.djangoproject.com/en/1.10/topics/http/urls/
+Examples:
+Function views
+ 1. Add an import: from my_app import views
+ 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
+Class-based views
+ 1. Add an import: from other_app.views import Home
+ 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
+Including another URLconf
+ 1. Import the include() function: from django.conf.urls import url, include
+ 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
+"""
+from django.conf.urls import url
+from django.contrib import admin
+import rabbitmq_auth_backend_django.auth.views as views
+
+urlpatterns = [
+ url(r'^auth/user', views.user),
+ url(r'^auth/vhost', views.vhost),
+ url(r'^auth/resource', views.resource),
+ url(r'^auth/topic', views.topic),
+ url(r'^admin/', admin.site.urls),
+]
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/wsgi.py b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/wsgi.py
new file mode 100644
index 0000000000..70a321f86f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/rabbitmq_auth_backend_django/wsgi.py
@@ -0,0 +1,16 @@
+"""
+WSGI config for rabbitmq_auth_backend_django project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
+"""
+
+import os
+
+from django.core.wsgi import get_wsgi_application
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rabbitmq_auth_backend_django.settings")
+
+application = get_wsgi_application()
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/start.sh b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/start.sh
new file mode 100755
index 0000000000..8ddb5c153d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_django/start.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+python manage.py migrate
+python manage.py runserver 0.0.0.0:8000
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/.gitignore b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/.gitignore
new file mode 100644
index 0000000000..767f63f730
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/.gitignore
@@ -0,0 +1,3 @@
+vendor
+var
+composer.lock
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/composer.json b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/composer.json
new file mode 100644
index 0000000000..ddf3e438e8
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/composer.json
@@ -0,0 +1,19 @@
+{
+ "description": "Example RabbitMQ http auth backend php",
+ "type": "library",
+ "license": "MIT",
+ "authors": [
+ {
+ "name": "Anthony",
+ "email": "instabledesign@gmail.com"
+ }
+ ],
+ "require": {
+ "php": ">=5.4",
+ "symftony/rabbitmq-auth-backend-http-php": "dev-master",
+ "monolog/monolog": "^1.23"
+ },
+ "scripts": {
+ "start": "php -S 127.0.0.1:8080 -t src"
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/bootstrap.php b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/bootstrap.php
new file mode 100644
index 0000000000..f7bf096faf
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/bootstrap.php
@@ -0,0 +1,136 @@
+<?php
+
+require_once __DIR__.'/../vendor/autoload.php';
+
+use Symfony\Component\Security\Core\Authentication\Token\Storage\TokenStorage;
+use Symfony\Component\Security\Core\User\InMemoryUserProvider;
+use Symfony\Component\Security\Core\Authentication\AuthenticationProviderManager;
+use Symfony\Component\Security\Core\Authorization\AccessDecisionManager;
+use Symfony\Component\Security\Core\Authorization\AuthorizationChecker;
+use RabbitMQAuth\Authentication\Authenticator;
+use RabbitMQAuth\Authentication\ChainAuthenticationChecker;
+use RabbitMQAuth\Authentication\UserPasswordTokenChecker;
+use RabbitMQAuth\Authentication\UserTokenChecker;
+use RabbitMQAuth\Authorization\DefaultVoter;
+use RabbitMQAuth\Controller\AuthController;
+use RabbitMQAuth\Security;
+use Monolog\Handler\StreamHandler;
+use Monolog\Logger;
+
+/**
+ * You must can edit the following users and theyre roles (tags)
+ */
+$userProvider = new InMemoryUserProvider(array(
+ //Admin user
+ 'Anthony' => array(
+ 'password' => 'anthony-password',
+ 'roles' => array(
+ 'administrator',
+ // 'impersonator', // report to https://www.rabbitmq.com/validated-user-id.html
+ ),
+ ),
+ 'James' => array(
+ 'password' => 'bond',
+ 'roles' => array(
+ 'management',
+ ),
+ ),
+ 'Roger' => array(
+ 'password' => 'rabbit',
+ 'roles' => array(
+ 'monitoring',
+ ),
+ ),
+ 'Bunny' => array(
+ 'password' => 'bugs',
+ 'roles' => array(
+ 'policymaker',
+ ),
+ ),
+));
+
+/**
+ * You can edit the user permissions here
+ *
+ * $permissions = arrray(
+ * '{USERNAME}' => array(
+ * '{VHOST}' => array(
+ * 'ip' => '{REGEX_IP}',
+ * 'read' => '{REGEX_READ}',
+ * 'write' => '{REGEX_WRITE}',
+ * 'configure' => '{REGEX_CONFIGURE}',
+ * ),
+ * ),
+ * );
+ */
+$permissions = array(
+ 'Anthony' => array(
+ 'isAdmin' => true,
+ ),
+ 'James' => array(
+ '/' => array(
+ 'ip' => '.*',
+ 'read' => '.*',
+ 'write' => '.*',
+ 'configure' => '.*',
+ ),
+ ),
+);
+
+/**
+ * Authenticator initialisation
+ *
+ * His gonna to find the user (with user provider) and to check the authentication with the authentication checker.
+ *
+ * We are 2 types of access token:
+ * - UserPasswordToken use with the user endpoint (to check the username and the password validity)
+ * - UserToken use with resource/topic/vhost endpoint (to check the username existence)
+ */
+$authenticator = new Authenticator(
+ $userProvider,
+ new ChainAuthenticationChecker(array(
+ new UserPasswordTokenChecker(),
+ new UserTokenChecker(),
+ ))
+);
+
+/**
+ * DefaultVoter is used to check the authorization.
+ *
+ * This class has the same implementation of default RabbitMQ authorization process.
+ *
+ * $permission is the configured user permission
+ */
+$defaultVoter = new DefaultVoter($permissions);
+
+/**
+ * This class is the initialisation of the symfony/security component
+ */
+$authenticationManager = new AuthenticationProviderManager(array($authenticator));
+$accessDecisionManager = new AccessDecisionManager(array($defaultVoter));
+
+$tokenStorage = new TokenStorage();
+
+$authorizationChecker = new AuthorizationChecker(
+ $tokenStorage,
+ $authenticationManager,
+ $accessDecisionManager
+);
+
+/**
+ * The security class is the main class
+ */
+$security = new Security($authenticationManager, $authorizationChecker);
+
+/**
+ * This is the auth controller.
+ *
+ * It take the http request and return the http response
+ */
+$authController = new AuthController($tokenStorage, $security);
+
+/** Add a logger */
+$stream = new StreamHandler(__DIR__.'/../var/log.log', Logger::DEBUG);
+$authenticator->setLogger((new Logger('rabbitmq_authenticator'))->pushHandler($stream));
+$defaultVoter->setLogger((new Logger('rabbitmq_default_voter'))->pushHandler($stream));
+$security->setLogger((new Logger('rabbitmq_security'))->pushHandler($stream));
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/resource.php b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/resource.php
new file mode 100644
index 0000000000..9b2448b867
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/resource.php
@@ -0,0 +1,14 @@
+<?php
+
+require_once '../vendor/autoload.php';
+require_once 'bootstrap.php';
+
+/**
+ * The resource action handle the request and check the authentication + authorization of the request params
+ * It check the QUERYSTRING params before the payload.
+ */
+$response = $authController->resourceAction(
+ \Symfony\Component\HttpFoundation\Request::createFromGlobals() // Create an request object
+);
+
+$response->send(); // send the http response
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/topic.php b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/topic.php
new file mode 100644
index 0000000000..1ad5b4f72e
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/topic.php
@@ -0,0 +1,14 @@
+<?php
+
+require_once '../vendor/autoload.php';
+require_once 'bootstrap.php';
+
+/**
+ * The resource action handle the request and check the authentication + authorization of the request params
+ * It check the QUERYSTRING params before the payload.
+ */
+$response = $authController->topicAction(
+ \Symfony\Component\HttpFoundation\Request::createFromGlobals() // Create an request object
+);
+
+$response->send(); // send the http response
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/user.php b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/user.php
new file mode 100644
index 0000000000..a8b372325a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/user.php
@@ -0,0 +1,14 @@
+<?php
+
+require_once '../vendor/autoload.php';
+require_once 'bootstrap.php';
+
+/**
+ * The resource action handle the request and check the authentication + authorization of the request params
+ * It check the QUERYSTRING params before the payload.
+ */
+$response = $authController->userAction(
+ \Symfony\Component\HttpFoundation\Request::createFromGlobals() // Create an request object
+);
+
+$response->send(); // send the http response
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/vhost.php b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/vhost.php
new file mode 100644
index 0000000000..3f49de4a88
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/src/vhost.php
@@ -0,0 +1,14 @@
+<?php
+
+require_once '../vendor/autoload.php';
+require_once 'bootstrap.php';
+
+/**
+ * The resource action handle the request and check the authentication + authorization of the request params
+ * It check the QUERYSTRING params before the payload.
+ */
+$response = $authController->vhostAction(
+ \Symfony\Component\HttpFoundation\Request::createFromGlobals() // Create an request object
+);
+
+$response->send(); // send the http response
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/var/log.log b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/var/log.log
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_php/var/log.log
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.gitignore b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.gitignore
new file mode 100644
index 0000000000..9b769de628
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.gitignore
@@ -0,0 +1,3 @@
+*.class
+target
+*.iml
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100644
index 0000000000..c32394f140
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2007-present the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ private static final String WRAPPER_VERSION = "0.5.5";
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
+ String username = System.getenv("MVNW_USERNAME");
+ char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
+ Authenticator.setDefault(new Authenticator() {
+ @Override
+ protected PasswordAuthentication getPasswordAuthentication() {
+ return new PasswordAuthentication(username, password);
+ }
+ });
+ }
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jar
new file mode 100644
index 0000000000..0d5e649888
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.jar
Binary files differ
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000000..fa87ad7ddf
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1,2 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.1/apache-maven-3.6.1-bin.zip
+wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw
new file mode 100755
index 0000000000..d2f0ea3808
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw
@@ -0,0 +1,310 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ if [ -n "$MVNW_REPOURL" ]; then
+ jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar"
+ else
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar"
+ fi
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+ if $cygwin; then
+ wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
+ fi
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
+ wget "$jarUrl" -O "$wrapperJarPath"
+ else
+ wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
+ fi
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
+ curl -o "$wrapperJarPath" "$jarUrl" -f
+ else
+ curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
+ fi
+
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ # For Cygwin, switch paths to Windows format before running javac
+ if $cygwin; then
+ javaClass=`cygpath --path --windows "$javaClass"`
+ fi
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+# Provide a "standardized" way to retrieve the CLI args that will
+# work with both Windows and non-Windows executions.
+MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
+export MAVEN_CMD_LINE_ARGS
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd
new file mode 100644
index 0000000000..b26ab24f03
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/mvnw.cmd
@@ -0,0 +1,182 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar"
+
+FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Found %WRAPPER_JAR%
+ )
+) else (
+ if not "%MVNW_REPOURL%" == "" (
+ SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar"
+ )
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ )
+
+ powershell -Command "&{"^
+ "$webclient = new-object System.Net.WebClient;"^
+ "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
+ "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
+ "}"^
+ "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
+ "}"
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Finished downloading %WRAPPER_JAR%
+ )
+)
+@REM End of extension
+
+@REM Provide a "standardized" way to retrieve the CLI args that will
+@REM work with both Windows and non-Windows executions.
+set MAVEN_CMD_LINE_ARGS=%*
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml
new file mode 100644
index 0000000000..50f0f9135d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-parent</artifactId>
+ <version>2.1.5.RELEASE</version>
+ </parent>
+
+ <properties>
+ <maven.compiler.source>1.8</maven.compiler.source>
+ <maven.compiler.target>1.8</maven.compiler.target>
+ </properties>
+
+ <groupId>com.rabbitmq.examples</groupId>
+ <artifactId>rabbitmq-auth-backend-spring-boot</artifactId>
+ <version>1.0-SNAPSHOT</version>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-web</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java
new file mode 100644
index 0000000000..0de4f340af
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/AuthBackendHttpController.java
@@ -0,0 +1,68 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.MultiValueMap;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestMethod;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static java.util.Arrays.asList;
+import static org.springframework.util.StringUtils.collectionToDelimitedString;
+
+/**
+ *
+ */
+@RestController
+@RequestMapping(path = "/auth", method = { RequestMethod.GET, RequestMethod.POST })
+public class AuthBackendHttpController {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(AuthBackendHttpController.class);
+
+ private final Map<String, User> users = new HashMap<String, User>() {{
+ put("guest", new User("guest", "guest", asList("administrator", "management")));
+ put("springy", new User("springy", "springy", asList("administrator", "management")));
+ }};
+
+ @RequestMapping("user")
+ public String user(@RequestParam("username") String username,
+ @RequestParam("password") String password) {
+ LOGGER.info("Trying to authenticate user {}", username);
+ User user = users.get(username);
+ if (user != null && user.getPassword().equals(password)) {
+ return "allow " + collectionToDelimitedString(user.getTags(), " ");
+ } else {
+ return "deny";
+ }
+ }
+
+ @RequestMapping("vhost")
+ public String vhost(VirtualHostCheck check) {
+ LOGGER.info("Checking vhost access with {}", check);
+ return "allow";
+ }
+
+ @RequestMapping("resource")
+ public String resource(ResourceCheck check) {
+ LOGGER.info("Checking resource access with {}", check);
+ return "allow";
+ }
+
+ @RequestMapping("topic")
+ public String topic(TopicCheck check) {
+ LOGGER.info("Checking topic access with {}", check);
+ return check.getRouting_key().startsWith("a") ? "allow" : "deny";
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java
new file mode 100644
index 0000000000..5cf5635078
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/BaseCheck.java
@@ -0,0 +1,40 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+public class BaseCheck {
+
+ private String username;
+
+ private String vhost;
+
+ public String getUsername() {
+ return username;
+ }
+
+ public void setUsername(String username) {
+ this.username = username;
+ }
+
+ public String getVhost() {
+ return vhost;
+ }
+
+ public void setVhost(String vhost) {
+ this.vhost = vhost;
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "{" +
+ "username='" + username + '\'' +
+ ", vhost='" + vhost + '\'' +
+ '}';
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java
new file mode 100644
index 0000000000..50d5c57e8a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/RabbitMqAuthBackendHttp.java
@@ -0,0 +1,68 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+import org.slf4j.LoggerFactory;
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.Profile;
+import org.springframework.ui.ModelMap;
+import org.springframework.util.StringUtils;
+import org.springframework.web.context.request.WebRequest;
+import org.springframework.web.context.request.WebRequestInterceptor;
+import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
+import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
+
+import java.util.stream.Collectors;
+
+@EnableAutoConfiguration
+@SpringBootApplication
+public class RabbitMqAuthBackendHttp {
+
+ public static void main(String[] args) {
+ SpringApplication.run(RabbitMqAuthBackendHttp.class, args);
+ }
+
+ // to enable: ./mvnw spring-boot:run -Dspring-boot.run.profiles=debug
+ @Profile("debug")
+ @Configuration
+ static class DebugConfiguration implements WebMvcConfigurer {
+
+ @Override
+ public void addInterceptors(InterceptorRegistry registry) {
+
+ registry.addWebRequestInterceptor(new WebRequestInterceptor() {
+ @Override
+ public void preHandle(WebRequest request) {
+ LoggerFactory.getLogger(DebugConfiguration.class).info(
+ "HTTP request parameters: {}",
+ request.getParameterMap()
+ .entrySet().stream()
+ .map(entry -> entry.getKey() + " = " + StringUtils.arrayToCommaDelimitedString(entry.getValue()))
+ .collect(Collectors.toList())
+ );
+ }
+
+ @Override
+ public void postHandle(WebRequest request, ModelMap model) {
+
+ }
+
+ @Override
+ public void afterCompletion(WebRequest request, Exception ex) {
+
+ }
+ });
+ }
+
+ }
+
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java
new file mode 100644
index 0000000000..3df3ef3ed2
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/ResourceCheck.java
@@ -0,0 +1,47 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+public class ResourceCheck extends BaseCheck {
+
+ private String resource, name, permission;
+
+ public String getResource() {
+ return resource;
+ }
+
+ public void setResource(String resource) {
+ this.resource = resource;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getPermission() {
+ return permission;
+ }
+
+ public void setPermission(String permission) {
+ this.permission = permission;
+ }
+
+ @Override
+ public String toString() {
+ return "ResourceCheck{" +
+ "resource='" + resource + '\'' +
+ ", name='" + name + '\'' +
+ ", permission='" + permission + '\'' +
+ "} " + super.toString();
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java
new file mode 100644
index 0000000000..9b378fe52b
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/TopicCheck.java
@@ -0,0 +1,29 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+public class TopicCheck extends ResourceCheck {
+
+ private String routing_key;
+
+ public String getRouting_key() {
+ return routing_key;
+ }
+
+ public void setRouting_key(String routing_key) {
+ this.routing_key = routing_key;
+ }
+
+ @Override
+ public String toString() {
+ return "TopicCheck{" +
+ "routing_key='" + routing_key + '\'' +
+ "} " + super.toString();
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java
new file mode 100644
index 0000000000..47017a0714
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/User.java
@@ -0,0 +1,62 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+
+/**
+ *
+ */
+public class User {
+
+ private final String username, password;
+
+ private final Collection<String> tags;
+
+ public User(String username, String password) {
+ this(username, password, Collections.<String>emptyList());
+ }
+
+ public User(String username, String password, Collection<String> tags) {
+ this.username = username;
+ this.password = password;
+ this.tags = tags;
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ public String getPassword() {
+ return password;
+ }
+
+ public Collection<String> getTags() {
+ return new ArrayList<String>(tags);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+
+ User user = (User) o;
+
+ return username.equals(user.username);
+ }
+
+ @Override
+ public int hashCode() {
+ return username.hashCode();
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java
new file mode 100644
index 0000000000..8cb11032f4
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/main/java/com/rabbitmq/examples/VirtualHostCheck.java
@@ -0,0 +1,15 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+public class VirtualHostCheck extends BaseCheck {
+
+
+
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java
new file mode 100644
index 0000000000..0b5635246a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/java/com/rabbitmq/examples/AuthBackendHttpControllerTest.java
@@ -0,0 +1,66 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+
+package com.rabbitmq.examples;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest;
+import org.springframework.test.context.junit4.SpringRunner;
+import org.springframework.test.web.servlet.MockMvc;
+
+import static org.assertj.core.api.Assertions.*;
+import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
+
+@RunWith(SpringRunner.class)
+@WebMvcTest(AuthBackendHttpController.class)
+public class AuthBackendHttpControllerTest {
+
+ @Autowired
+ private MockMvc mvc;
+
+ @Test public void authenticationAuthorisation() throws Exception {
+ this.mvc.perform(get("/auth/user").param("username", "guest").param("password", "guest"))
+ .andExpect(status().isOk()).andExpect(content().string("allow administrator management"));
+
+ this.mvc.perform(get("/auth/user").param("username", "guest").param("password", "wrong"))
+ .andExpect(status().isOk()).andExpect(content().string("deny"));
+
+ this.mvc.perform(get("/auth/vhost").param("username", "guest").param("vhost", "/"))
+ .andExpect(status().isOk()).andExpect(content().string("allow"));
+
+ this.mvc.perform(get("/auth/resource")
+ .param("username", "guest")
+ .param("vhost", "/")
+ .param("resource", "exchange")
+ .param("name", "amq.topic")
+ .param("permission", "write"))
+ .andExpect(status().isOk()).andExpect(content().string("allow"));
+
+ this.mvc.perform(get("/auth/topic")
+ .param("username", "guest")
+ .param("vhost", "/")
+ .param("resource", "exchange")
+ .param("name", "amq.topic")
+ .param("permission", "write")
+ .param("routing_key","a.b"))
+ .andExpect(status().isOk()).andExpect(content().string("allow"));
+
+ this.mvc.perform(get("/auth/topic")
+ .param("username", "guest")
+ .param("vhost", "/")
+ .param("resource", "exchange")
+ .param("name", "amq.topic")
+ .param("permission", "write")
+ .param("routing_key","b.b"))
+ .andExpect(status().isOk()).andExpect(content().string("deny"));
+ }
+
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/resources/logback-test.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/resources/logback-test.xml
new file mode 100644
index 0000000000..93f69d4fb0
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/src/test/resources/logback-test.xml
@@ -0,0 +1,16 @@
+<configuration>
+
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <logger name="com.rabbitmq.examples" level="INFO"/>
+
+
+ <root level="WARN">
+ <appender-ref ref="STDOUT" />
+ </root>
+
+</configuration>
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.gitignore b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.gitignore
new file mode 100644
index 0000000000..82eca336e3
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.gitignore
@@ -0,0 +1,25 @@
+/target/
+!.mvn/wrapper/maven-wrapper.jar
+
+### STS ###
+.apt_generated
+.classpath
+.factorypath
+.project
+.settings
+.springBeans
+.sts4-cache
+
+### IntelliJ IDEA ###
+.idea
+*.iws
+*.iml
+*.ipr
+
+### NetBeans ###
+/nbproject/private/
+/build/
+/nbbuild/
+/dist/
+/nbdist/
+/.nb-gradle/ \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jar
new file mode 100644
index 0000000000..01e6799737
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.jar
Binary files differ
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000000..7179346716
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/README.md b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/README.md
new file mode 100644
index 0000000000..2bed62b5d5
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/README.md
@@ -0,0 +1,34 @@
+# Spring Boot Kotlin Backend
+
+Run the backend with:
+
+ ./mvnw spring-boot:run
+
+Don't forget to configure the broker to use the backend!
+
+Use [`http`](https://httpie.org/doc) to test your program:
+
+ $ http -f POST http://localhost:8080/auth/user username=foo password=bar -v
+ POST /auth/user HTTP/1.1
+ Accept: */*
+ Accept-Encoding: gzip, deflate
+ Connection: keep-alive
+ Content-Length: 25
+ Content-Type: application/x-www-form-urlencoded; charset=utf-8
+ Host: localhost:8080
+ User-Agent: HTTPie/0.9.2
+
+ username=foo&password=bar
+
+ HTTP/1.1 200
+ Cache-Control: no-cache, no-store, max-age=0, must-revalidate
+ Content-Length: 5
+ Content-Type: text/plain;charset=UTF-8
+ Date: Thu, 01 Nov 2018 21:16:06 GMT
+ Expires: 0
+ Pragma: no-cache
+ X-Content-Type-Options: nosniff
+ X-Frame-Options: DENY
+ X-XSS-Protection: 1; mode=block
+
+ allow
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw
new file mode 100755
index 0000000000..8b9da3b8b6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw
@@ -0,0 +1,286 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ wget "$jarUrl" -O "$wrapperJarPath"
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ curl -o "$wrapperJarPath" "$jarUrl"
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd
new file mode 100644
index 0000000000..fef5a8f7f9
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/mvnw.cmd
@@ -0,0 +1,161 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM https://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ echo Found %WRAPPER_JAR%
+) else (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
+ echo Finished downloading %WRAPPER_JAR%
+)
+@REM End of extension
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml
new file mode 100644
index 0000000000..8d281d8aa8
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>com.rabbitmq.examples</groupId>
+ <artifactId>rabbitmq-auth-backend-spring-boot-kotlin</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <packaging>jar</packaging>
+
+ <name>rabbitmq-auth-backend-spring-boot-kotlin</name>
+ <description>Demo project for https://github.com/rabbitmq/rabbitmq-auth-backend-http</description>
+
+ <parent>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-parent</artifactId>
+ <version>2.1.0.RELEASE</version>
+ <relativePath/> <!-- lookup parent from repository -->
+ </parent>
+
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
+ <java.version>1.8</java.version>
+ <kotlin.version>1.3.0</kotlin.version>
+ <junit-jupiter.version>5.3.1</junit-jupiter.version>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-security</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-web</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.module</groupId>
+ <artifactId>jackson-module-kotlin</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.jetbrains.kotlin</groupId>
+ <artifactId>kotlin-stdlib-jdk8</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.jetbrains.kotlin</groupId>
+ <artifactId>kotlin-reflect</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.springframework.security</groupId>
+ <artifactId>spring-security-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-starter-test</artifactId>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.skyscreamer</groupId>
+ <artifactId>jsonassert</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.jetbrains.kotlin</groupId>
+ <artifactId>kotlin-test</artifactId>
+ <version>${kotlin.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <sourceDirectory>${project.basedir}/src/main/kotlin</sourceDirectory>
+ <testSourceDirectory>${project.basedir}/src/test/kotlin</testSourceDirectory>
+ <plugins>
+ <plugin>
+ <groupId>org.springframework.boot</groupId>
+ <artifactId>spring-boot-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <artifactId>kotlin-maven-plugin</artifactId>
+ <groupId>org.jetbrains.kotlin</groupId>
+ <configuration>
+ <args>
+ <arg>-Xjsr305=strict</arg>
+ </args>
+ <compilerPlugins>
+ <plugin>spring</plugin>
+ </compilerPlugins>
+ </configuration>
+ <dependencies>
+ <dependency>
+ <groupId>org.jetbrains.kotlin</groupId>
+ <artifactId>kotlin-maven-allopen</artifactId>
+ <version>${kotlin.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+
+</project>
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt
new file mode 100644
index 0000000000..5281fa06a0
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/AuthController.kt
@@ -0,0 +1,73 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+package com.rabbitmq.examples;
+
+import org.slf4j.LoggerFactory
+import org.springframework.web.bind.annotation.RequestMapping
+import org.springframework.web.bind.annotation.RequestMethod
+import org.springframework.web.bind.annotation.RestController
+
+/**
+ * Controller for the RabbitMQ authentication/authorisation as described
+ * in https://github.com/rabbitmq/rabbitmq-auth-backend-http
+ */
+@RequestMapping(path = ["/auth"], method = [RequestMethod.GET, RequestMethod.POST])
+@RestController
+class AuthController {
+
+ private val ALLOW = "allow"
+ private val DENY = "deny"
+
+ private val logger = LoggerFactory.getLogger(AuthController::class.java!!)
+
+ /**
+ * user_path
+ */
+ @RequestMapping(value = ["/user"], produces = ["text/plain"])
+ fun checkUserCredentials(passwordCheck: PasswordCheck): String {
+ logger.info("checkUserCredentials username: ${passwordCheck.username}")
+ if (passwordCheck.username == "guest" && passwordCheck.password == "guest") {
+ return "$ALLOW administrator management"
+ } else {
+ return DENY
+ }
+ }
+
+ /**
+ * vhost_path
+ */
+ @RequestMapping(value = ["/vhost"], produces = ["text/plain"])
+ fun checkVhost(question: VirtualHostCheck): String {
+ logger.info("checkVhost: $question")
+ return ALLOW
+ }
+
+ /**
+ * resource_path
+ */
+ @RequestMapping(value = ["/resource"], produces = ["text/plain"])
+ fun checkResource(question: ResourceCheck): String {
+ logger.info("checkResource: $question")
+ return ALLOW
+ }
+
+ /**
+ * topic_path
+ */
+ @RequestMapping(value = ["/topic"], produces = ["text/plain"])
+ fun checkTopic(question: TopicCheck): String {
+ logger.info("checkTopic: $question")
+ return if (question.routing_key.startsWith("a", false)) ALLOW else DENY
+ }
+
+}
+
+data class PasswordCheck(val username: String, val password: String)
+data class VirtualHostCheck(val username: String, val vhost: String)
+data class ResourceCheck(val username: String, val vhost: String, val resource: String, val name: String, val permission: String)
+data class TopicCheck(val username: String, val vhost: String, val resource: String, val name: String, val permission: String, val routing_key: String)
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/Configuration.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/Configuration.kt
new file mode 100644
index 0000000000..67a02e02c6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/Configuration.kt
@@ -0,0 +1,20 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+package com.rabbitmq.examples
+
+import org.springframework.context.annotation.Configuration
+import org.springframework.security.config.annotation.web.builders.HttpSecurity
+import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter
+
+
+@Configuration
+class SecurityConfiguration : WebSecurityConfigurerAdapter() {
+ override fun configure(http: HttpSecurity) {
+ http.csrf().ignoringAntMatchers("/auth/**")
+ }
+} \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt
new file mode 100644
index 0000000000..fd0a8d8d65
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/kotlin/com/rabbitmq/examples/RabbitmqAuthBackendApplication.kt
@@ -0,0 +1,18 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+package com.rabbitmq.examples
+
+import org.springframework.boot.autoconfigure.SpringBootApplication
+import org.springframework.boot.runApplication
+
+@SpringBootApplication
+class RabbitmqAuthBackendSpringBootKotlinApplication
+
+fun main(args: Array<String>) {
+ runApplication<RabbitmqAuthBackendSpringBootKotlinApplication>(*args)
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/application.properties b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/application.properties
new file mode 100644
index 0000000000..b76da5ffa8
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/application.properties
@@ -0,0 +1,2 @@
+logging.level.root=INFO
+logging.level.com.rabbitmq.examples=INFO \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/rabbitmq.conf b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/rabbitmq.conf
new file mode 100644
index 0000000000..02737bd5a5
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/main/resources/rabbitmq.conf
@@ -0,0 +1,11 @@
+# Additional configuration for /etc/rabbitmq/rabbitmq.conf
+# do not forget to initialize the plugin in the docker
+# RUN rabbitmq-plugins enable --offline rabbitmq_auth_backend_http
+
+# http backend
+auth_backends.1 = rabbit_auth_backend_http
+auth_http.user_path = http://<server:port>/<path>/auth/user
+auth_http.vhost_path = http://<server:port>/<path>/auth/vhost
+auth_http.resource_path = http://<server:port>/<path>/auth/resource
+auth_http.topic_path = http://<server:port>/<path>/auth/topic
+auth_http.http_method = post \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt
new file mode 100644
index 0000000000..be899d334c
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/src/test/kotlin/com/rabbitmq/examples/AuthApiTest.kt
@@ -0,0 +1,127 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+ */
+package com.rabbitmq.examples
+
+import org.junit.jupiter.api.Test
+import org.junit.jupiter.api.extension.ExtendWith
+import org.springframework.beans.factory.annotation.Autowired
+import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest
+import org.springframework.http.MediaType
+import org.springframework.test.context.junit.jupiter.SpringExtension
+import org.springframework.test.web.servlet.MockMvc
+import org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get
+import org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post
+import org.springframework.test.web.servlet.result.MockMvcResultMatchers
+import org.springframework.test.web.servlet.result.MockMvcResultMatchers.status
+
+@ExtendWith(SpringExtension::class)
+@WebMvcTest
+class AuthApiTest(@Autowired val mockMvc: MockMvc) {
+
+ // user
+ @Test
+ fun `Check authentication for external users with GET`() {
+ mockMvc.perform(get("/auth/user")
+ .param("username", "guest")
+ .param("password", "guest"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow administrator management"))
+
+ }
+
+ @Test
+ fun `Check deny for external users with GET`() {
+ mockMvc.perform(get("/auth/user")
+ .param("username", "guest")
+ .param("password", "wrong"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("deny"))
+ }
+
+ @Test
+ fun `Check authentication for external users with POST`() {
+ mockMvc.perform(post("/auth/user").contentType(MediaType.APPLICATION_FORM_URLENCODED)
+ .content("username=guest&password=guest"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow administrator management"))
+ }
+
+ // vhost
+ @Test
+ fun `Check vhost for external users with GET`() {
+ mockMvc.perform(get("/auth/vhost")
+ .param("username", "guest")
+ .param("vhost", "guest"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow"))
+ }
+
+ @Test
+ fun `Check vhost for external users with POST`() {
+ mockMvc.perform(post("/auth/vhost").contentType(MediaType.APPLICATION_FORM_URLENCODED)
+ .content("username=guest&vhost=guest"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow"))
+ }
+
+ // resource
+ @Test
+ fun `Check resource_path for external users with GET`() {
+ mockMvc.perform(get("/auth/resource")
+ .param("username", "guest")
+ .param("vhost", "guest")
+ .param("resource", "exchange")
+ .param("name", "amq.topic")
+ .param("permission", "write"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow"))
+ }
+
+ @Test
+ fun `Check resource_path for external users with POST`() {
+ mockMvc.perform(post("/auth/resource").contentType(MediaType.APPLICATION_FORM_URLENCODED)
+ .content("username=guest&vhost=guest&resource=exchange&name=amq.topic&permission=write"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow"))
+ }
+
+ // topic
+ @Test
+ fun `Check topic for external users with GET`() {
+ mockMvc.perform(get("/auth/topic")
+ .param("username", "guest")
+ .param("vhost", "guest")
+ .param("resource", "exchange")
+ .param("name", "amq.topic")
+ .param("routing_key", "a.b")
+ .param("permission", "write"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow"))
+ }
+
+ @Test
+ fun `Check topic for external users with POST`() {
+ mockMvc.perform(post("/auth/topic").contentType(MediaType.APPLICATION_FORM_URLENCODED)
+ .content("username=guest&vhost=guest&resource=exchange&name=amq.topic&permission=write&routing_key=a.b"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("allow"))
+ }
+
+ @Test
+ fun `Check deny topic for external users with GET`() {
+ mockMvc.perform(get("/auth/topic")
+ .param("username", "guest")
+ .param("vhost", "guest")
+ .param("resource", "exchange")
+ .param("name", "amq.topic")
+ .param("routing_key", "b.b")
+ .param("permission", "write"))
+ .andExpect(status().isOk)
+ .andExpect(MockMvcResultMatchers.content().string("deny"))
+ }
+} \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/App_Start/WebApiConfig.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/App_Start/WebApiConfig.cs
new file mode 100644
index 0000000000..bd11ca20c4
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/App_Start/WebApiConfig.cs
@@ -0,0 +1,12 @@
+using System.Web.Http;
+
+namespace WebApiHttpAuthService
+{
+ public static class WebApiConfig
+ {
+ public static void Register(HttpConfiguration config)
+ {
+ config.MapHttpAttributeRoutes();
+ }
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Controllers/AuthController.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Controllers/AuthController.cs
new file mode 100644
index 0000000000..8282d1407e
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Controllers/AuthController.cs
@@ -0,0 +1,143 @@
+using System;
+using System.Net;
+using System.Net.Http;
+using System.Net.Http.Formatting;
+using System.Text;
+using System.Web.Http;
+
+namespace WebApiHttpAuthService.Controllers
+{
+ [RoutePrefix("auth")]
+ public class AuthController : ApiController
+ {
+ // Note: the following is necessary to ensure that no
+ // BOM is part of the response
+ private static readonly UTF8Encoding encoding = new UTF8Encoding(false);
+
+ [Route("user")]
+ [HttpPost]
+ public HttpResponseMessage user(FormDataCollection form)
+ {
+ string content = "allow administrator management";
+ try
+ {
+ if (form != null)
+ {
+ string username = form.Get("username");
+ string password = form.Get("password");
+
+ if(username=="authuser") //Sample check you can put your custom logic over here
+ content = "deny";
+
+ string userlog = string.Format("user :{0}, password :{1}", username, password);
+ }
+ }
+ catch(Exception ex)
+ {
+ //check or log error
+ }
+
+ var resp = new HttpResponseMessage(HttpStatusCode.OK);
+ resp.Content = new StringContent(content, encoding, "text/plain");
+ return resp;
+ }
+
+ [Route("vhost")]
+ [HttpPost]
+ public HttpResponseMessage vhost(FormDataCollection form)
+ {
+ string content = "allow";
+ try
+ {
+ if (form != null)
+ {
+ string username = form.Get("username");
+ string ip = form.Get("ip");
+
+ if (username == "authuser") //Sample checks you can put your custom logic over here
+ content = "deny";
+
+ string userlog = string.Format("user :{0}, ip :{1}", username, ip);
+ }
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+ var resp = new HttpResponseMessage(HttpStatusCode.OK);
+ resp.Content = new StringContent(content, encoding, "text/plain");
+ return resp;
+ }
+
+ [Route("resource")]
+ [HttpPost]
+ public HttpResponseMessage resource(FormDataCollection form)
+ {
+ string content = "allow";
+
+ try
+ {
+ if (form != null)
+ {
+ string username = form.Get("username");
+ string vhost = form.Get("vhost");
+ string resource = form.Get("resource");
+ string name = form.Get("name");
+ string permission = form.Get("permission");
+
+ if (username == "authuser") //Sample checks you can put your custom logic over here
+ content = "deny";
+
+ string userlog = string.Format("user :{0}, vhost :{1}, resource :{2}, name: {3}, permission: {4}", username, vhost, resource, name, permission);
+
+ }
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+
+ var resp = new HttpResponseMessage(HttpStatusCode.OK);
+ resp.Content = new StringContent(content, encoding, "text/plain");
+ return resp;
+ }
+
+ [Route("topic")]
+ [HttpPost]
+ public HttpResponseMessage topic(FormDataCollection form)
+ {
+ string content = "allow";
+ try
+ {
+ if (form != null)
+ {
+ string username = form.Get("username");
+ string vhost = form.Get("vhost");
+ string resource = form.Get("resource");
+ string name = form.Get("name");
+ string permission = form.Get("permission");
+ string routing_key = form.Get("routing_key");
+
+ if (username == "authuser") //Sample checks you can put your custom logic over here
+ content = "deny";
+
+ string userlog = string.Format("user :{0}, vhost :{1}, resource :{2}, name: {3}, permission: {4}, routing_key :{5}", username, vhost, resource, name, permission, routing_key);
+
+ }
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+ var resp = new HttpResponseMessage(HttpStatusCode.OK);
+ resp.Content = new StringContent(content, encoding, "text/plain");
+ return resp;
+ }
+
+
+
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax
new file mode 100644
index 0000000000..b1a51c9b48
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax
@@ -0,0 +1 @@
+<%@ Application Codebehind="Global.asax.cs" Inherits="WebApiHttpAuthService.WebApiApplication" Language="C#" %>
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax.cs
new file mode 100644
index 0000000000..0f7abd87dc
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Global.asax.cs
@@ -0,0 +1,12 @@
+using System.Web.Http;
+
+namespace WebApiHttpAuthService
+{
+ public class WebApiApplication : System.Web.HttpApplication
+ {
+ protected void Application_Start()
+ {
+ GlobalConfiguration.Configure(WebApiConfig.Register);
+ }
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Properties/AssemblyInfo.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000000..1da9738e4d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Properties/AssemblyInfo.cs
@@ -0,0 +1,35 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("WebApiHttpAuthService")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("WebApiHttpAuthService")]
+[assembly: AssemblyCopyright("Copyright © 2017")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("f141c3e3-fd04-475b-b2b9-00328d0f907b")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+// You can specify all the values or you can default the Revision and Build Numbers
+// by using the '*' as shown below:
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Debug.config b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Debug.config
new file mode 100644
index 0000000000..c1a56423b0
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Debug.config
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!-- For more information on using web.config transformation visit https://go.microsoft.com/fwlink/?LinkId=125889 -->
+
+<configuration xmlns:xdt="http://schemas.microsoft.com/XML-Document-Transform">
+ <!--
+ In the example below, the "SetAttributes" transform will change the value of
+ "connectionString" to use "ReleaseSQLServer" only when the "Match" locator
+ finds an attribute "name" that has a value of "MyDB".
+
+ <connectionStrings>
+ <add name="MyDB"
+ connectionString="Data Source=ReleaseSQLServer;Initial Catalog=MyReleaseDB;Integrated Security=True"
+ xdt:Transform="SetAttributes" xdt:Locator="Match(name)"/>
+ </connectionStrings>
+ -->
+ <system.web>
+ <!--
+ In the example below, the "Replace" transform will replace the entire
+ <customErrors> section of your web.config file.
+ Note that because there is only one customErrors section under the
+ <system.web> node, there is no need to use the "xdt:Locator" attribute.
+
+ <customErrors defaultRedirect="GenericError.htm"
+ mode="RemoteOnly" xdt:Transform="Replace">
+ <error statusCode="500" redirect="InternalError.htm"/>
+ </customErrors>
+ -->
+ </system.web>
+</configuration> \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Release.config b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Release.config
new file mode 100644
index 0000000000..19058ed353
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.Release.config
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!-- For more information on using web.config transformation visit https://go.microsoft.com/fwlink/?LinkId=125889 -->
+
+<configuration xmlns:xdt="http://schemas.microsoft.com/XML-Document-Transform">
+ <!--
+ In the example below, the "SetAttributes" transform will change the value of
+ "connectionString" to use "ReleaseSQLServer" only when the "Match" locator
+ finds an attribute "name" that has a value of "MyDB".
+
+ <connectionStrings>
+ <add name="MyDB"
+ connectionString="Data Source=ReleaseSQLServer;Initial Catalog=MyReleaseDB;Integrated Security=True"
+ xdt:Transform="SetAttributes" xdt:Locator="Match(name)"/>
+ </connectionStrings>
+ -->
+ <system.web>
+ <compilation xdt:Transform="RemoveAttributes(debug)" />
+ <!--
+ In the example below, the "Replace" transform will replace the entire
+ <customErrors> section of your web.config file.
+ Note that because there is only one customErrors section under the
+ <system.web> node, there is no need to use the "xdt:Locator" attribute.
+
+ <customErrors defaultRedirect="GenericError.htm"
+ mode="RemoteOnly" xdt:Transform="Replace">
+ <error statusCode="500" redirect="InternalError.htm"/>
+ </customErrors>
+ -->
+ </system.web>
+</configuration> \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.config b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.config
new file mode 100644
index 0000000000..1c190314d0
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/Web.config
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ For more information on how to configure your ASP.NET application, please visit
+ https://go.microsoft.com/fwlink/?LinkId=301879
+ -->
+<configuration>
+ <appSettings />
+ <!--
+ For a description of web.config changes see http://go.microsoft.com/fwlink/?LinkId=235367.
+
+ The following attributes can be set on the <httpRuntime> tag.
+ <system.Web>
+ <httpRuntime targetFramework="4.5" />
+ </system.Web>
+ -->
+ <system.web>
+ <compilation debug="true" targetFramework="4.5" />
+ <httpRuntime targetFramework="4.5" />
+ </system.web>
+
+ <runtime>
+ <assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">
+ <dependentAssembly>
+ <assemblyIdentity name="System.Web.Helpers" publicKeyToken="31bf3856ad364e35" />
+ <bindingRedirect oldVersion="1.0.0.0-3.0.0.0" newVersion="3.0.0.0" />
+ </dependentAssembly>
+ <dependentAssembly>
+ <assemblyIdentity name="System.Web.Mvc" publicKeyToken="31bf3856ad364e35" />
+ <bindingRedirect oldVersion="1.0.0.0-5.2.3.0" newVersion="5.2.3.0" />
+ </dependentAssembly>
+ <dependentAssembly>
+ <assemblyIdentity name="System.Web.WebPages" publicKeyToken="31bf3856ad364e35" />
+ <bindingRedirect oldVersion="1.0.0.0-3.0.0.0" newVersion="3.0.0.0" />
+ </dependentAssembly>
+
+ </assemblyBinding>
+ </runtime>
+<system.webServer>
+ <handlers>
+ <remove name="ExtensionlessUrlHandler-Integrated-4.0" />
+ <remove name="OPTIONSVerbHandler" />
+ <remove name="TRACEVerbHandler" />
+ <add name="ExtensionlessUrlHandler-Integrated-4.0" path="*." verb="*" type="System.Web.Handlers.TransferRequestHandler" preCondition="integratedMode,runtimeVersionv4.0" />
+ </handlers>
+ </system.webServer></configuration> \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj
new file mode 100644
index 0000000000..912a9fdc7d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj
@@ -0,0 +1,136 @@
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+ <ProductVersion>
+ </ProductVersion>
+ <SchemaVersion>2.0</SchemaVersion>
+ <ProjectGuid>{F141C3E3-FD04-475B-B2B9-00328D0F907B}</ProjectGuid>
+ <ProjectTypeGuids>{349c5851-65df-11da-9384-00065b846f21};{fae04ec0-301f-11d3-bf4b-00c04f79efbc}</ProjectTypeGuids>
+ <OutputType>Library</OutputType>
+ <AppDesignerFolder>Properties</AppDesignerFolder>
+ <RootNamespace>WebApiHttpAuthService</RootNamespace>
+ <AssemblyName>WebApiHttpAuthService</AssemblyName>
+ <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
+ <UseIISExpress>true</UseIISExpress>
+ <Use64BitIISExpress />
+ <IISExpressSSLPort />
+ <IISExpressAnonymousAuthentication />
+ <IISExpressWindowsAuthentication />
+ <IISExpressUseClassicPipelineMode />
+ <UseGlobalApplicationHostFile />
+ <NuGetPackageImportStamp>
+ </NuGetPackageImportStamp>
+ <TargetFrameworkProfile />
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+ <DebugSymbols>true</DebugSymbols>
+ <DebugType>full</DebugType>
+ <Optimize>false</Optimize>
+ <OutputPath>bin\</OutputPath>
+ <DefineConstants>DEBUG;TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+ <DebugSymbols>true</DebugSymbols>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ </PropertyGroup>
+ <ItemGroup>
+ <Reference Include="Microsoft.CSharp" />
+ <Reference Include="Microsoft.Practices.ServiceLocation">
+ <HintPath>..\..\VEMS.Sample\Dependencies\WSF\Microsoft.Practices.ServiceLocation.dll</HintPath>
+ </Reference>
+ <Reference Include="System.Data.DataSetExtensions" />
+ <Reference Include="System.Net.Http" />
+ <Reference Include="System.Net.Http.Formatting">
+ <HintPath>packages\Microsoft.AspNet.WebApi.Client.5.2.3\lib\net45\System.Net.Http.Formatting.dll</HintPath>
+ </Reference>
+ <Reference Include="System.Web.DynamicData" />
+ <Reference Include="System.Web.Entity" />
+ <Reference Include="System.Web.ApplicationServices" />
+ <Reference Include="System.ComponentModel.DataAnnotations" />
+ <Reference Include="System" />
+ <Reference Include="System.Data" />
+ <Reference Include="System.Web.Extensions" />
+ <Reference Include="System.Drawing" />
+ <Reference Include="System.Web" />
+ <Reference Include="System.Web.Http">
+ <HintPath>packages\Microsoft.AspNet.WebApi.Core.5.2.3\lib\net45\System.Web.Http.dll</HintPath>
+ </Reference>
+ <Reference Include="System.Web.Http.WebHost">
+ <HintPath>packages\Microsoft.AspNet.WebApi.WebHost.5.2.3\lib\net45\System.Web.Http.WebHost.dll</HintPath>
+ </Reference>
+ <Reference Include="System.Xml" />
+ <Reference Include="System.Configuration" />
+ <Reference Include="System.Web.Services" />
+ <Reference Include="System.EnterpriseServices" />
+ <Reference Include="System.Xml.Linq" />
+ </ItemGroup>
+ <ItemGroup>
+ <Content Include="Global.asax" />
+ <Content Include="Web.config">
+ <SubType>Designer</SubType>
+ </Content>
+ </ItemGroup>
+ <ItemGroup>
+ <Compile Include="App_Start\WebApiConfig.cs" />
+ <Compile Include="Controllers\AuthController.cs" />
+ <Compile Include="Global.asax.cs">
+ <DependentUpon>Global.asax</DependentUpon>
+ </Compile>
+ <Compile Include="Properties\AssemblyInfo.cs" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="packages.config" />
+ <None Include="Web.Debug.config">
+ <DependentUpon>Web.config</DependentUpon>
+ </None>
+ <None Include="Web.Release.config">
+ <DependentUpon>Web.config</DependentUpon>
+ </None>
+ </ItemGroup>
+ <ItemGroup>
+ <Folder Include="App_Data\" />
+ <Folder Include="Models\" />
+ </ItemGroup>
+ <PropertyGroup>
+ <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">10.0</VisualStudioVersion>
+ <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
+ </PropertyGroup>
+ <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
+ <Import Project="$(VSToolsPath)\WebApplications\Microsoft.WebApplication.targets" Condition="'$(VSToolsPath)' != ''" />
+ <Import Project="$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v10.0\WebApplications\Microsoft.WebApplication.targets" Condition="false" />
+ <ProjectExtensions>
+ <VisualStudio>
+ <FlavorProperties GUID="{349c5851-65df-11da-9384-00065b846f21}">
+ <WebProjectProperties>
+ <UseIIS>True</UseIIS>
+ <AutoAssignPort>True</AutoAssignPort>
+ <DevelopmentServerPort>62243</DevelopmentServerPort>
+ <DevelopmentServerVPath>/</DevelopmentServerVPath>
+ <IISUrl>http://localhost:62190/</IISUrl>
+ <NTLMAuthentication>False</NTLMAuthentication>
+ <UseCustomServer>False</UseCustomServer>
+ <CustomServerUrl>
+ </CustomServerUrl>
+ <SaveServerSettingsInUserFile>False</SaveServerSettingsInUserFile>
+ </WebProjectProperties>
+ </FlavorProperties>
+ </VisualStudio>
+ </ProjectExtensions>
+ <!-- To modify your build process, add your task inside one of the targets below and uncomment it.
+ Other similar extension points exist, see Microsoft.Common.targets.
+ <Target Name="BeforeBuild">
+ </Target>
+ <Target Name="AfterBuild">
+ </Target>
+ -->
+</Project>
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj.user b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj.user
new file mode 100644
index 0000000000..1a60d6407f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.csproj.user
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup>
+ <UseIISExpress>true</UseIISExpress>
+ <Use64BitIISExpress />
+ <IISExpressSSLPort />
+ <IISExpressAnonymousAuthentication />
+ <IISExpressWindowsAuthentication />
+ <IISExpressUseClassicPipelineMode />
+ <UseGlobalApplicationHostFile />
+ <WebStackScaffolding_ControllerDialogWidth>600</WebStackScaffolding_ControllerDialogWidth>
+ <WebStackScaffolding_IsLayoutPageSelected>True</WebStackScaffolding_IsLayoutPageSelected>
+ <WebStackScaffolding_IsPartialViewSelected>False</WebStackScaffolding_IsPartialViewSelected>
+ <WebStackScaffolding_IsReferencingScriptLibrariesSelected>True</WebStackScaffolding_IsReferencingScriptLibrariesSelected>
+ <WebStackScaffolding_LayoutPageFile />
+ <WebStackScaffolding_IsAsyncSelected>False</WebStackScaffolding_IsAsyncSelected>
+ </PropertyGroup>
+ <ProjectExtensions>
+ <VisualStudio>
+ <FlavorProperties GUID="{349c5851-65df-11da-9384-00065b846f21}">
+ <WebProjectProperties>
+ <StartPageUrl>
+ </StartPageUrl>
+ <StartAction>CurrentPage</StartAction>
+ <AspNetDebugging>True</AspNetDebugging>
+ <SilverlightDebugging>False</SilverlightDebugging>
+ <NativeDebugging>False</NativeDebugging>
+ <SQLDebugging>False</SQLDebugging>
+ <ExternalProgram>
+ </ExternalProgram>
+ <StartExternalURL>
+ </StartExternalURL>
+ <StartCmdLineArguments>
+ </StartCmdLineArguments>
+ <StartWorkingDirectory>
+ </StartWorkingDirectory>
+ <EnableENC>True</EnableENC>
+ <AlwaysStartWebServerOnDebug>True</AlwaysStartWebServerOnDebug>
+ </WebProjectProperties>
+ </FlavorProperties>
+ </VisualStudio>
+ </ProjectExtensions>
+</Project> \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.sln b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.sln
new file mode 100644
index 0000000000..5dc14fc12c
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/WebApiHttpAuthService.sln
@@ -0,0 +1,25 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 15
+VisualStudioVersion = 15.0.26730.8
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "WebApiHttpAuthService", "WebApiHttpAuthService.csproj", "{F141C3E3-FD04-475B-B2B9-00328D0F907B}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {F141C3E3-FD04-475B-B2B9-00328D0F907B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {F141C3E3-FD04-475B-B2B9-00328D0F907B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {F141C3E3-FD04-475B-B2B9-00328D0F907B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {F141C3E3-FD04-475B-B2B9-00328D0F907B}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {2E60A79D-A68A-4486-A8F2-917C08BA2C8A}
+ EndGlobalSection
+EndGlobal
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/packages.config b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/packages.config
new file mode 100644
index 0000000000..f7de901155
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnet/packages.config
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="utf-8"?>
+<packages>
+ <package id="Microsoft.AspNet.WebApi" version="5.2.3" targetFramework="net45" />
+ <package id="Microsoft.AspNet.WebApi.Client" version="5.2.3" targetFramework="net45" />
+ <package id="Microsoft.AspNet.WebApi.Core" version="5.2.3" targetFramework="net45" />
+ <package id="Microsoft.AspNet.WebApi.WebHost" version="5.2.3" targetFramework="net45" />
+ <package id="Newtonsoft.Json" version="10.0.3" targetFramework="net45" />
+</packages> \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/AuthResult.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/AuthResult.cs
new file mode 100644
index 0000000000..12766b122c
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/AuthResult.cs
@@ -0,0 +1,22 @@
+using Microsoft.AspNetCore.Mvc;
+
+namespace RabbitMqAuthBackendHttp
+{
+ public static class AuthResult
+ {
+ public static IActionResult Allow()
+ {
+ return new OkObjectResult("allow");
+ }
+
+ public static IActionResult Allow(params string[] tags)
+ {
+ return new OkObjectResult($"allow {string.Join(" ", tags)}");
+ }
+
+ public static IActionResult Deny()
+ {
+ return new OkObjectResult("deny");
+ }
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Controllers/AuthController.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Controllers/AuthController.cs
new file mode 100644
index 0000000000..3f4b3fc19e
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Controllers/AuthController.cs
@@ -0,0 +1,110 @@
+using System;
+using Microsoft.AspNetCore.Mvc;
+using Microsoft.Extensions.Logging;
+using RabbitMqAuthBackendHttp.Requests;
+
+namespace RabbitMqAuthBackendHttp.Controllers
+{
+ [Route("[controller]")]
+ [ApiController]
+ public class AuthController : ControllerBase
+ {
+ private readonly ILogger<AuthController> _logger;
+
+ public AuthController(ILogger<AuthController> logger)
+ {
+ _logger = logger;
+ }
+
+ [HttpGet]
+ public ActionResult<string> Get()
+ {
+ return "AuthController";
+ }
+
+ [Route("user")]
+ [HttpPost]
+ public IActionResult CheckUser([FromForm]UserAuthRequest request)
+ {
+ var tags = new [] {"administrator", "management"};
+
+ try
+ {
+ var userlog = string.Format("user : {0}, password : {1}", request.UserName, request.Password);
+ _logger.LogInformation(userlog);
+
+ if (request.UserName == "authuser") //Sample check you can put your custom logic over here
+ return AuthResult.Deny();
+
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+ return AuthResult.Allow(tags);
+ }
+
+ [Route("vhost")]
+ [HttpPost]
+ public IActionResult CheckVhost([FromForm]VhostAuthRequest request)
+ {
+ try
+ {
+ var userlog = string.Format("user : {0}, ip : {1}", request.UserName, request.Ip);
+ _logger.LogInformation(userlog);
+
+ if (request.UserName == "authuser") //Sample checks you can put your custom logic over here
+ return AuthResult.Deny();
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+ return AuthResult.Allow();
+ }
+
+ [Route("resource")]
+ [HttpPost]
+ public IActionResult CheckResource([FromForm]ResourceAuthRequest request)
+ {
+ try
+ {
+ var userlog = $"user : {request.UserName}, vhost : {request.Vhost}, resource : {request.Resource}, " +
+ $"name : {request.Name}, permission : {request.Permission}";
+ _logger.LogInformation(userlog);
+
+ if (request.UserName == "authuser") //Sample checks you can put your custom logic over here
+ return AuthResult.Deny();
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+ return AuthResult.Allow();
+ }
+
+ [Route("topic")]
+ [HttpPost]
+ public IActionResult CheckTopic([FromForm]TopicAuthRequest request)
+ {
+ try
+ {
+ var userlog = $"user : {request.UserName}, vhost : {request.Vhost}, resource : {request.Resource}, " +
+ $"name : {request.Name}, routing key: {request.RoutingKey}, permission : {request.Permission}";
+ _logger.LogInformation(userlog);
+
+ if (request.UserName == "authuser") //Sample checks you can put your custom logic over here
+ return AuthResult.Deny();
+ }
+ catch (Exception ex)
+ {
+ //check or log error
+ }
+
+ return AuthResult.Allow();
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Program.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Program.cs
new file mode 100644
index 0000000000..c25b893205
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Program.cs
@@ -0,0 +1,17 @@
+using Microsoft.AspNetCore;
+using Microsoft.AspNetCore.Hosting;
+
+namespace RabbitMqAuthBackendHttp
+{
+ public class Program
+ {
+ public static void Main(string[] args)
+ {
+ CreateWebHostBuilder(args).Build().Run();
+ }
+
+ public static IWebHostBuilder CreateWebHostBuilder(string[] args) =>
+ WebHost.CreateDefaultBuilder(args)
+ .UseStartup<Startup>();
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Properties/launchSettings.json b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Properties/launchSettings.json
new file mode 100644
index 0000000000..3015f0da9f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Properties/launchSettings.json
@@ -0,0 +1,30 @@
+{
+ "iisSettings": {
+ "windowsAuthentication": false,
+ "anonymousAuthentication": true,
+ "iisExpress": {
+ "applicationUrl": "http://localhost:53729",
+ "sslPort": 44312
+ }
+ },
+ "$schema": "http://json.schemastore.org/launchsettings.json",
+ "profiles": {
+ "IIS Express": {
+ "commandName": "IISExpress",
+ "launchBrowser": true,
+ "launchUrl": "api/values",
+ "environmentVariables": {
+ "ASPNETCORE_ENVIRONMENT": "Development"
+ }
+ },
+ "RabbitMqAuthBackendHttp": {
+ "commandName": "Project",
+ "launchBrowser": true,
+ "launchUrl": "api/values",
+ "environmentVariables": {
+ "ASPNETCORE_ENVIRONMENT": "Development"
+ },
+ "applicationUrl": "http://localhost:5000"
+ }
+ }
+} \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj
new file mode 100644
index 0000000000..65edaa8124
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.csproj
@@ -0,0 +1,18 @@
+<Project Sdk="Microsoft.NET.Sdk.Web">
+
+ <PropertyGroup>
+ <TargetFramework>netcoreapp2.1</TargetFramework>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <Compile Remove="wwwroot\**" />
+ <Content Remove="wwwroot\**" />
+ <EmbeddedResource Remove="wwwroot\**" />
+ <None Remove="wwwroot\**" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <PackageReference Include="Microsoft.AspNetCore.App" />
+ </ItemGroup>
+
+</Project>
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.sln b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.sln
new file mode 100644
index 0000000000..d4392fc131
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/RabbitMqAuthBackendHttp.sln
@@ -0,0 +1,37 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 15
+VisualStudioVersion = 15.0.26124.0
+MinimumVisualStudioVersion = 15.0.26124.0
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "RabbitMqAuthBackendHttp", "RabbitMqAuthBackendHttp.csproj", "{5CBC938E-5097-4888-A43F-2A9C190F41A6}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|Any CPU = Release|Any CPU
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Debug|x64.Build.0 = Debug|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Debug|x86.Build.0 = Debug|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Release|Any CPU.Build.0 = Release|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Release|x64.ActiveCfg = Release|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Release|x64.Build.0 = Release|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Release|x86.ActiveCfg = Release|Any CPU
+ {5CBC938E-5097-4888-A43F-2A9C190F41A6}.Release|x86.Build.0 = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {8B62538E-3E8E-4B80-857E-D34C9DE1CD09}
+ EndGlobalSection
+EndGlobal
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/Resource.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/Resource.cs
new file mode 100644
index 0000000000..dcab5ac3f6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/Resource.cs
@@ -0,0 +1,11 @@
+namespace RabbitMqAuthBackendHttp.Requests
+{
+ public enum Resource
+ {
+ Exchange,
+
+ Queue,
+
+ Topic
+ }
+} \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs
new file mode 100644
index 0000000000..1b9bc9658c
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/ResourceAuthRequest.cs
@@ -0,0 +1,24 @@
+namespace RabbitMqAuthBackendHttp.Requests
+{
+ public class ResourceAuthRequest
+ {
+ public string UserName { get; set; }
+
+ public string Vhost { get; set; }
+
+ public Resource Resource { get; set; }
+
+ public string Name { get; set; }
+
+ public ResourcePermission Permission { get; set; }
+ }
+
+ public enum ResourcePermission
+ {
+ Configure,
+
+ Write,
+
+ Read
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs
new file mode 100644
index 0000000000..fbc9440a67
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/TopicAuthRequest.cs
@@ -0,0 +1,27 @@
+using Microsoft.AspNetCore.Mvc;
+
+namespace RabbitMqAuthBackendHttp.Requests
+{
+ public class TopicAuthRequest
+ {
+ public string UserName { get; set; }
+
+ public string Vhost { get; set; }
+
+ public string Name { get; set; }
+
+ public Resource Resource { get; set; }
+
+ public TopicPermission Permission { get; set; }
+
+ [ModelBinder(Name = "routing_key")]
+ public string RoutingKey { get; set; }
+ }
+
+ public enum TopicPermission
+ {
+ Write,
+
+ Read
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs
new file mode 100644
index 0000000000..e9b7282cee
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/UserAuthRequest.cs
@@ -0,0 +1,9 @@
+namespace RabbitMqAuthBackendHttp.Requests
+{
+ public class UserAuthRequest
+ {
+ public string UserName { get; set; }
+
+ public string Password { get; set; }
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs
new file mode 100644
index 0000000000..5b072df9c4
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Requests/VhostAuthRequest.cs
@@ -0,0 +1,12 @@
+namespace RabbitMqAuthBackendHttp.Requests
+{
+ public class VhostAuthRequest
+ {
+ public string UserName { get; set; }
+
+ public string Vhost { get; set; }
+
+ public string Ip { get; set; }
+ }
+}
+ \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Startup.cs b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Startup.cs
new file mode 100644
index 0000000000..3dd050830d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/Startup.cs
@@ -0,0 +1,38 @@
+using Microsoft.AspNetCore.Builder;
+using Microsoft.AspNetCore.Hosting;
+using Microsoft.AspNetCore.Mvc;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+
+namespace RabbitMqAuthBackendHttp
+{
+ public class Startup
+ {
+ public Startup(IConfiguration configuration)
+ {
+ Configuration = configuration;
+ }
+
+ public IConfiguration Configuration { get; }
+
+ public void ConfigureServices(IServiceCollection services)
+ {
+ services.AddMvc().SetCompatibilityVersion(CompatibilityVersion.Version_2_1);
+ }
+
+ public void Configure(IApplicationBuilder app, IHostingEnvironment env)
+ {
+ if (env.IsDevelopment())
+ {
+ app.UseDeveloperExceptionPage();
+ }
+ else
+ {
+ app.UseHsts();
+ }
+
+ app.UseHttpsRedirection();
+ app.UseMvc();
+ }
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.Development.json b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.Development.json
new file mode 100644
index 0000000000..e203e9407e
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.Development.json
@@ -0,0 +1,9 @@
+{
+ "Logging": {
+ "LogLevel": {
+ "Default": "Debug",
+ "System": "Information",
+ "Microsoft": "Information"
+ }
+ }
+}
diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.json b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.json
new file mode 100644
index 0000000000..def9159a7d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_webapi_dotnetcore/appsettings.json
@@ -0,0 +1,8 @@
+{
+ "Logging": {
+ "LogLevel": {
+ "Default": "Warning"
+ }
+ },
+ "AllowedHosts": "*"
+}
diff --git a/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema b/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema
new file mode 100644
index 0000000000..874c55b2c4
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema
@@ -0,0 +1,21 @@
+
+%% ==========================================================================
+%% ----------------------------------------------------------------------------
+%% RabbitMQ HTTP Authorization
+%%
+%% ----------------------------------------------------------------------------
+
+{mapping, "auth_http.http_method", "rabbitmq_auth_backend_http.http_method",
+ [{datatype, {enum, [get,post]}}]}.
+
+{mapping, "auth_http.user_path", "rabbitmq_auth_backend_http.user_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
+
+{mapping, "auth_http.vhost_path", "rabbitmq_auth_backend_http.vhost_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
+
+{mapping, "auth_http.resource_path", "rabbitmq_auth_backend_http.resource_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
+
+{mapping, "auth_http.topic_path", "rabbitmq_auth_backend_http.topic_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
diff --git a/deps/rabbitmq_auth_backend_http/rabbitmq-components.mk b/deps/rabbitmq_auth_backend_http/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl
new file mode 100644
index 0000000000..e28ac91e21
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http.erl
@@ -0,0 +1,188 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_http).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([description/0, p/1, q/1, join_tags/1]).
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4,
+ state_can_expire/0]).
+
+%% If keepalive connection is closed, retry N times before failing.
+-define(RETRY_ON_KEEPALIVE_CLOSED, 3).
+
+-define(RESOURCE_REQUEST_PARAMETERS, [username, vhost, resource, name, permission]).
+
+-define(SUCCESSFUL_RESPONSE_CODES, [200, 201]).
+
+%%--------------------------------------------------------------------
+
+description() ->
+ [{name, <<"HTTP">>},
+ {description, <<"HTTP authentication / authorisation">>}].
+
+%%--------------------------------------------------------------------
+
+user_login_authentication(Username, AuthProps) ->
+ case http_req(p(user_path), q([{username, Username}|AuthProps])) of
+ {error, _} = E -> E;
+ "deny" -> {refused, "Denied by the backing HTTP service", []};
+ "allow" ++ Rest -> Tags = [rabbit_data_coercion:to_atom(T) ||
+ T <- string:tokens(Rest, " ")],
+ {ok, #auth_user{username = Username,
+ tags = Tags,
+ impl = none}};
+ Other -> {error, {bad_response, Other}}
+ end.
+
+user_login_authorization(Username, AuthProps) ->
+ case user_login_authentication(Username, AuthProps) of
+ {ok, #auth_user{impl = Impl}} -> {ok, Impl};
+ Else -> Else
+ end.
+
+check_vhost_access(#auth_user{username = Username, tags = Tags}, VHost, undefined) ->
+ do_check_vhost_access(Username, Tags, VHost, "", undefined);
+check_vhost_access(#auth_user{username = Username, tags = Tags}, VHost,
+ AuthzData = #{peeraddr := PeerAddr}) when is_map(AuthzData) ->
+ AuthzData1 = maps:remove(peeraddr, AuthzData),
+ Ip = parse_peeraddr(PeerAddr),
+ do_check_vhost_access(Username, Tags, VHost, Ip, AuthzData1).
+
+do_check_vhost_access(Username, Tags, VHost, Ip, AuthzData) ->
+ OptionsParameters = context_as_parameters(AuthzData),
+ bool_req(vhost_path, [{username, Username},
+ {vhost, VHost},
+ {ip, Ip},
+ {tags, join_tags(Tags)}] ++ OptionsParameters).
+
+check_resource_access(#auth_user{username = Username, tags = Tags},
+ #resource{virtual_host = VHost, kind = Type, name = Name},
+ Permission,
+ AuthzContext) ->
+ OptionsParameters = context_as_parameters(AuthzContext),
+ bool_req(resource_path, [{username, Username},
+ {vhost, VHost},
+ {resource, Type},
+ {name, Name},
+ {permission, Permission},
+ {tags, join_tags(Tags)}] ++ OptionsParameters).
+
+check_topic_access(#auth_user{username = Username, tags = Tags},
+ #resource{virtual_host = VHost, kind = topic = Type, name = Name},
+ Permission,
+ Context) ->
+ OptionsParameters = context_as_parameters(Context),
+ bool_req(topic_path, [{username, Username},
+ {vhost, VHost},
+ {resource, Type},
+ {name, Name},
+ {permission, Permission},
+ {tags, join_tags(Tags)}] ++ OptionsParameters).
+
+state_can_expire() -> false.
+
+%%--------------------------------------------------------------------
+
+context_as_parameters(Options) when is_map(Options) ->
+ % filter keys that would erase fixed parameters
+ [{rabbit_data_coercion:to_atom(Key), maps:get(Key, Options)}
+ || Key <- maps:keys(Options),
+ lists:member(
+ rabbit_data_coercion:to_atom(Key),
+ ?RESOURCE_REQUEST_PARAMETERS) =:= false];
+context_as_parameters(_) ->
+ [].
+
+bool_req(PathName, Props) ->
+ case http_req(p(PathName), q(Props)) of
+ "deny" -> false;
+ "allow" -> true;
+ E -> E
+ end.
+
+http_req(Path, Query) -> http_req(Path, Query, ?RETRY_ON_KEEPALIVE_CLOSED).
+
+http_req(Path, Query, Retry) ->
+ case do_http_req(Path, Query) of
+ {error, socket_closed_remotely} ->
+ %% HTTP keepalive connection can no longer be used. Retry the request.
+ case Retry > 0 of
+ true -> http_req(Path, Query, Retry - 1);
+ false -> {error, socket_closed_remotely}
+ end;
+ Other -> Other
+ end.
+
+
+do_http_req(Path0, Query) ->
+ URI = uri_parser:parse(Path0, [{port, 80}]),
+ {host, Host} = lists:keyfind(host, 1, URI),
+ {port, Port} = lists:keyfind(port, 1, URI),
+ HostHdr = rabbit_misc:format("~s:~b", [Host, Port]),
+ {ok, Method} = application:get_env(rabbitmq_auth_backend_http, http_method),
+ Request = case rabbit_data_coercion:to_atom(Method) of
+ get ->
+ Path = Path0 ++ "?" ++ Query,
+ rabbit_log:debug("auth_backend_http: GET ~s", [Path]),
+ {Path, [{"Host", HostHdr}]};
+ post ->
+ rabbit_log:debug("auth_backend_http: POST ~s", [Path0]),
+ {Path0, [{"Host", HostHdr}], "application/x-www-form-urlencoded", Query}
+ end,
+ HttpOpts = case application:get_env(rabbitmq_auth_backend_http,
+ ssl_options) of
+ {ok, Opts} when is_list(Opts) -> [{ssl, Opts}];
+ _ -> []
+ end,
+
+ case httpc:request(Method, Request, HttpOpts, []) of
+ {ok, {{_HTTP, Code, _}, _Headers, Body}} ->
+ rabbit_log:debug("auth_backend_http: response code is ~p, body: ~p", [Code, Body]),
+ case lists:member(Code, ?SUCCESSFUL_RESPONSE_CODES) of
+ true -> case parse_resp(Body) of
+ {error, _} = E -> E;
+ Resp -> Resp
+ end;
+ false -> {error, {Code, Body}}
+ end;
+ {error, _} = E ->
+ E
+ end.
+
+p(PathName) ->
+ {ok, Path} = application:get_env(rabbitmq_auth_backend_http, PathName),
+ Path.
+
+q(Args) ->
+ string:join([escape(K, V) || {K, V} <- Args], "&").
+
+escape(K, Map) when is_map(Map) ->
+ string:join([escape(rabbit_data_coercion:to_list(K) ++ "." ++ rabbit_data_coercion:to_list(Key), Value)
+ || {Key, Value} <- maps:to_list(Map)], "&");
+escape(K, V) ->
+ rabbit_data_coercion:to_list(K) ++ "=" ++ rabbit_http_util:quote_plus(V).
+
+parse_resp(Resp) -> string:to_lower(string:strip(Resp)).
+
+join_tags([]) -> "";
+join_tags(Tags) ->
+ Strings = [rabbit_data_coercion:to_list(T) || T <- Tags],
+ string:join(Strings, " ").
+
+parse_peeraddr(PeerAddr) ->
+ handle_inet_ntoa_peeraddr(inet:ntoa(PeerAddr), PeerAddr).
+
+handle_inet_ntoa_peeraddr({error, einval}, PeerAddr) ->
+ rabbit_data_coercion:to_list(PeerAddr);
+handle_inet_ntoa_peeraddr(PeerAddrStr, _PeerAddr0) ->
+ PeerAddrStr.
diff --git a/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl
new file mode 100644
index 0000000000..139f888fd7
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/src/rabbit_auth_backend_http_app.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_http_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ supervisor:start_link({local,?MODULE},?MODULE,[]).
+
+stop(_State) ->
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one,3,10},[]}}.
diff --git a/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl b/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl
new file mode 100644
index 0000000000..c8e7d37373
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/test/auth_SUITE.erl
@@ -0,0 +1,62 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(auth_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(AUTH_PORT, 8000).
+-define(USER_PATH, "/auth/user").
+-define(BACKEND_CONFIG,
+ [{http_method, get},
+ {user_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ ?USER_PATH},
+ {vhost_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/vhost"},
+ {resource_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/resource"},
+ {topic_path, "http://localhost:" ++ integer_to_list(?AUTH_PORT) ++ "/auth/topic"}]).
+-define(ALLOWED_USER, #{username => <<"Ala">>,
+ password => <<"Kocur">>,
+ tags => [policymaker, monitoring]}).
+-define(DENIED_USER, #{username => <<"Alice">>, password => <<"Cat">>}).
+
+all() -> [grants_access_to_user, denies_access_to_user].
+
+init_per_suite(Config) ->
+ configure_http_auth_backend(),
+ #{username := Username, password := Password, tags := Tags} = ?ALLOWED_USER,
+ start_http_auth_server(?AUTH_PORT, ?USER_PATH, #{Username => {Password, Tags}}),
+ [{allowed_user, ?ALLOWED_USER}, {denied_user, ?DENIED_USER} | Config].
+
+end_per_suite(_Config) ->
+ stop_http_auth_server().
+
+grants_access_to_user(Config) ->
+ #{username := U, password := P, tags := T} = ?config(allowed_user, Config),
+ ?assertMatch({ok, #auth_user{username = U, tags = T}},
+ rabbit_auth_backend_http:user_login_authentication(U, [{password, P}])).
+
+denies_access_to_user(Config) ->
+ #{username := U, password := P} = ?config(denied_user, Config),
+ ?assertMatch({refused, "Denied by the backing HTTP service", []},
+ rabbit_auth_backend_http:user_login_authentication(U, [{password, P}])).
+
+%%% HELPERS
+
+configure_http_auth_backend() ->
+ {ok, _} = application:ensure_all_started(inets),
+ [application:set_env(rabbitmq_auth_backend_http, K, V) || {K, V} <- ?BACKEND_CONFIG].
+
+start_http_auth_server(Port, Path, Users) ->
+ application:ensure_all_started(cowboy),
+ Dispatch = cowboy_router:compile([{'_', [{Path, auth_http_mock, Users}]}]),
+ {ok, _} = cowboy:start_clear(
+ mock_http_auth_listener, [{port, Port}], #{env => #{dispatch => Dispatch}}).
+
+stop_http_auth_server() ->
+ cowboy:stop_listener(mock_http_auth_listener).
diff --git a/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl b/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl
new file mode 100644
index 0000000000..2da978d63d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/test/auth_http_mock.erl
@@ -0,0 +1,26 @@
+-module(auth_http_mock).
+
+-export([init/2]).
+
+%%% CALLBACKS
+
+init(Req = #{method := <<"GET">>}, Users) ->
+ QsVals = cowboy_req:parse_qs(Req),
+ Reply = authenticate(proplists:get_value(<<"username">>, QsVals),
+ proplists:get_value(<<"password">>, QsVals),
+ Users),
+ Req2 = cowboy_req:reply(200, #{<<"content-type">> => <<"text/plain">>}, Reply, Req),
+ {ok, Req2, Users}.
+
+%%% HELPERS
+
+authenticate(Username, Password, Users) ->
+ case maps:get(Username, Users, undefined) of
+ {MatchingPassword, Tags} when Password =:= MatchingPassword ->
+ StringTags = lists:map(fun(T) -> io_lib:format("~s", [T]) end, Tags),
+ <<"allow ", (list_to_binary(string:join(StringTags, " ")))/binary>>;
+ {_OtherPassword, _} ->
+ <<"deny">>;
+ undefined ->
+ <<"deny">>
+ end. \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..f6f0d17414
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_auth_backend_http, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets
new file mode 100644
index 0000000000..e12266db45
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets
@@ -0,0 +1,26 @@
+[{all_keys,
+ "auth_backends.1 = http
+ auth_http.http_method = post
+ auth_http.user_path = http://some-server/auth/user
+ auth_http.vhost_path = http://some-server/auth/vhost
+ auth_http.resource_path = http://some-server/auth/resource",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_http]}]},
+ {rabbitmq_auth_backend_http,
+ [{http_method, post},
+ {user_path,"http://some-server/auth/user"},
+ {vhost_path,"http://some-server/auth/vhost"},
+ {resource_path,"http://some-server/auth/resource"}]}],
+ [rabbitmq_auth_backend_http]}
+
+, {default_http_method,
+ "auth_backends.1 = http
+ auth_http.user_path = http://some-server/auth/user
+ auth_http.vhost_path = http://some-server/auth/vhost
+ auth_http.resource_path = http://some-server/auth/resource",
+ [{rabbit,[{auth_backends,[rabbit_auth_backend_http]}]},
+ {rabbitmq_auth_backend_http,
+ [{user_path,"http://some-server/auth/user"},
+ {vhost_path,"http://some-server/auth/vhost"},
+ {resource_path,"http://some-server/auth/resource"}]}],
+ [rabbitmq_auth_backend_http]}
+ ].
diff --git a/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl
new file mode 100644
index 0000000000..552399a313
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_http/test/unit_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [], [
+ query,
+ join_tags
+ ]}
+ ].
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+query(_Config) ->
+ ?assertEqual("username=guest&vhost=%2F&resource=topic&name=amp.topic&permission=write",
+ rabbit_auth_backend_http:q([
+ {username, <<"guest">>},
+ {vhost, <<"/">>},
+ {resource, topic},
+ {name, <<"amp.topic">>},
+ {permission, write}])),
+
+ ?assertEqual("username=guest&routing_key=a.b.c&variable_map.username=guest&variable_map.vhost=other-vhost",
+ rabbit_auth_backend_http:q([
+ {username, <<"guest">>},
+ {routing_key,<<"a.b.c">>},
+ {variable_map, #{<<"username">> => <<"guest">>,
+ <<"vhost">> => <<"other-vhost">>}
+ }])).
+
+join_tags(_Config) ->
+ ?assertEqual("management administrator custom",
+ rabbit_auth_backend_http:join_tags([management, administrator, custom])),
+ ?assertEqual("management administrator custom2",
+ rabbit_auth_backend_http:join_tags(["management", "administrator", "custom2"])),
+ ?assertEqual("management administrator custom3 group:dev",
+ rabbit_auth_backend_http:join_tags([management, administrator, custom3, 'group:dev'])).
diff --git a/deps/rabbitmq_auth_backend_ldap/.gitignore b/deps/rabbitmq_auth_backend_ldap/.gitignore
new file mode 100644
index 0000000000..c5df8bf79d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/.gitignore
@@ -0,0 +1,20 @@
+.sw?
+.*.sw?
+*.beam
+.vagrant
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+test/config_schema_SUITE_data/schema/
+
+rabbitmq_auth_backend_ldap.d
diff --git a/deps/rabbitmq_auth_backend_ldap/.travis.yml b/deps/rabbitmq_auth_backend_ldap/.travis.yml
new file mode 100644
index 0000000000..132fb506bc
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/.travis.yml
@@ -0,0 +1,63 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+ - slapd
+ - ldap-utils
+cache:
+ apt: true
+env:
+ global:
+ - secure: S0N4NmNvsfbdfxe78MCxPFXX1NXhbpG4FVViCPTdKJUQHaWsaHnAUke95ItXUVB5i5ct3ogxNZk0fYXdbIjMgI/cS4L+/ASJ4MhhdddM80lwPjdbgC6+AXzM5ncYbdLqjZlKM5If4VnL7LqVK9G3PxavuuGTi8Idnn57CWtrI3g=
+ - secure: C3B5R4FQ8iePMzhqq75BY2E2FVD1rZPAzTgEhZ+psgPvioyjXRIriHpFOMrqwQcPPpjdsHizpuSG9/SNHtjitw7uCB/Rnh8cyTnukZf7U4xPM37vKGq4HTYN8ABxjPyQGCWbS6TRXOf4Acp5qw8U87hmGmOlskTmnFinciAIvKA=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_auth_backend_ldap/.travis.yml.patch b/deps/rabbitmq_auth_backend_ldap/.travis.yml.patch
new file mode 100644
index 0000000000..1df3955850
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/.travis.yml.patch
@@ -0,0 +1,11 @@
+--- .travis.yml.orig 2020-03-26 15:55:03.107179000 +0100
++++ .travis.yml 2020-03-26 15:55:22.294603000 +0100
+@@ -13,6 +13,8 @@
+ apt:
+ packages:
+ - awscli
++ - slapd
++ - ldap-utils
+ cache:
+ apt: true
+ env:
diff --git a/deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md b/deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_auth_backend_ldap/CONTRIBUTING.md b/deps/rabbitmq_auth_backend_ldap/CONTRIBUTING.md
new file mode 100644
index 0000000000..b50bd82900
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/CONTRIBUTING.md
@@ -0,0 +1,103 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+ make tests
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_auth_backend_ldap/LICENSE b/deps/rabbitmq_auth_backend_ldap/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_auth_backend_ldap/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_auth_backend_ldap/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_auth_backend_ldap/Makefile b/deps/rabbitmq_auth_backend_ldap/Makefile
new file mode 100644
index 0000000000..0ee0c7e0c9
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/Makefile
@@ -0,0 +1,52 @@
+PROJECT = rabbitmq_auth_backend_ldap
+PROJECT_DESCRIPTION = RabbitMQ LDAP Authentication Backend
+PROJECT_MOD = rabbit_auth_backend_ldap_app
+
+# Note:
+# Use of these default values in calls to get_expected_env_str/2
+define PROJECT_ENV
+[
+ {servers, undefined},
+ {user_bind_pattern, none},
+ {user_dn_pattern, "$${username}"},
+ {dn_lookup_attribute, none},
+ {dn_lookup_base, none},
+ {group_lookup_base, none},
+ {dn_lookup_bind, as_user},
+ {other_bind, as_user},
+ {anon_auth, false},
+ {vhost_access_query, {constant, true}},
+ {resource_access_query, {constant, true}},
+ {topic_access_query, {constant, true}},
+ {tag_queries, [{administrator, {constant, false}}]},
+ {use_ssl, false},
+ {use_starttls, false},
+ {ssl_options, []},
+ {port, 389},
+ {timeout, infinity},
+ {log, false},
+ {pool_size, 64},
+ {idle_timeout, 300000}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+LOCAL_DEPS = eldap
+DEPS = rabbit_common rabbit
+TEST_DEPS = ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_auth_backend_ldap/README-authorisation.md b/deps/rabbitmq_auth_backend_ldap/README-authorisation.md
new file mode 100644
index 0000000000..5abf9b6842
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/README-authorisation.md
@@ -0,0 +1 @@
+See [RabbitMQ LDAP authentication backend docs](https://www.rabbitmq.com/ldap.html).
diff --git a/deps/rabbitmq_auth_backend_ldap/README.md b/deps/rabbitmq_auth_backend_ldap/README.md
new file mode 100644
index 0000000000..91c6c6d1fb
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/README.md
@@ -0,0 +1,37 @@
+# RabbitMQ LDAP Authentication Backend
+
+This plugin provides [authentication and authorisation backends](https://rabbitmq.com/access-control.html)
+for RabbitMQ that use LDAP.
+
+Under a heavy load this plugin can put a higher than expected amount of load on it's backing LDAP service.
+We recommend using it together with [rabbitmq_auth_backend_cache](https://github.com/rabbitmq/rabbitmq-auth-backend-cache)
+with a reasonable caching interval (e.g. 2-3 minutes).
+
+## Installation
+
+This plugin ships with reasonably recent RabbitMQ versions
+(e.g. `3.3.0` or later). Enable it with
+
+ rabbitmq-plugins enable rabbitmq_auth_backend_ldap
+
+## Documentation
+
+[See LDAP guide](https://www.rabbitmq.com/ldap.html) on rabbitmq.com.
+
+
+## Building from Source
+
+See [Plugin Development guide](https://www.rabbitmq.com/plugin-development.html).
+
+TL;DR: running
+
+ make dist
+
+will build the plugin and put build artifacts under the `./plugins` directory.
+
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the MPL, the same license as RabbitMQ.
diff --git a/deps/rabbitmq_auth_backend_ldap/TESTING.md b/deps/rabbitmq_auth_backend_ldap/TESTING.md
new file mode 100644
index 0000000000..0308b4fb07
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/TESTING.md
@@ -0,0 +1,25 @@
+# Running Tests
+
+## The OpenLDAP Dependency
+
+The testsuite depends on an OpenLDAP server. slapd(8) and the CLI must
+be installed for it to work. However there is no need to configure the
+server.
+
+The testsuite takes care of starting its own server and configuring it.
+It won't conflict with the system OpenLDAP server instance.
+
+The testsuite needs the following modules to be available:
+* `bdb`
+* `memberof`
+* `refint`
+
+## Running All Suites
+
+As with all other RabbitMQ subprojects,
+
+``` sh
+make tests
+```
+
+will run all test suites.
diff --git a/deps/rabbitmq_auth_backend_ldap/erlang.mk b/deps/rabbitmq_auth_backend_ldap/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_auth_backend_ldap/example/README.md b/deps/rabbitmq_auth_backend_ldap/example/README.md
new file mode 100644
index 0000000000..61b65312c1
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/example/README.md
@@ -0,0 +1,2 @@
+LDIFs files come from the following article:
+https://technicalnotes.wordpress.com/2014/04/19/openldap-setup-with-memberof-overlay/
diff --git a/deps/rabbitmq_auth_backend_ldap/example/global.ldif b/deps/rabbitmq_auth_backend_ldap/example/global.ldif
new file mode 100644
index 0000000000..373d9d9951
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/example/global.ldif
@@ -0,0 +1,27 @@
+# Load modules for database type
+dn: cn=module,cn=config
+objectclass: olcModuleList
+cn: module
+olcModuleLoad: back_bdb.la
+
+# Create directory database
+dn: olcDatabase=bdb,cn=config
+objectClass: olcDatabaseConfig
+objectClass: olcBdbConfig
+olcDatabase: bdb
+# Domain name (e.g. rabbitmq.com)
+olcSuffix: dc=rabbitmq,dc=com
+# Location on system where database is stored
+olcDbDirectory: /var/lib/ldap
+# Manager of the database
+olcRootDN: cn=admin,dc=rabbitmq,dc=com
+olcRootPW: admin
+olcAccess: to attrs=userPassword
+ by self write
+ by anonymous auth
+ by dn.base="cn=admin,dc=rabbitmq,dc=com" write
+ by * none
+olcAccess: to *
+ by self write
+ by dn.base="cn=admin,dc=rabbitmq,dc=com" write
+ by * read
diff --git a/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif b/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif
new file mode 100644
index 0000000000..6301e937a4
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif
@@ -0,0 +1,17 @@
+dn: cn=module,cn=config
+cn: module
+objectClass: olcModuleList
+olcModuleLoad: memberof
+olcModulePath: /usr/lib/ldap
+
+dn: olcOverlay={0}memberof,olcDatabase={1}bdb,cn=config
+objectClass: olcConfig
+objectClass: olcMemberOf
+objectClass: olcOverlayConfig
+objectClass: top
+olcOverlay: memberof
+olcMemberOfDangling: ignore
+olcMemberOfRefInt: TRUE
+olcMemberOfGroupOC: groupOfNames
+olcMemberOfMemberAD: member
+olcMemberOfMemberOfAD: memberOf
diff --git a/deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif b/deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif
new file mode 100644
index 0000000000..420f454ee1
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif
@@ -0,0 +1,3 @@
+dn: cn=module{1},cn=config
+add: olcmoduleload
+olcmoduleload: refint \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif b/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif
new file mode 100644
index 0000000000..0955a1a5fc
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif
@@ -0,0 +1,7 @@
+dn: olcOverlay={1}refint,olcDatabase={1}bdb,cn=config
+objectClass: olcConfig
+objectClass: olcOverlayConfig
+objectClass: olcRefintConfig
+objectClass: top
+olcOverlay: {1}refint
+olcRefintAttribute: memberof member manager owner
diff --git a/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema b/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema
new file mode 100644
index 0000000000..ae247dca91
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema
@@ -0,0 +1,339 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ LDAP Plugin
+%%
+%% See https://www.rabbitmq.com/ldap.html for details.
+%%
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_auth_backend_ldap,
+% [
+%%
+%% Connecting to the LDAP server(s)
+%% ================================
+%%
+
+%% Specify servers to bind to. You *must* set this in order for the plugin
+%% to work properly.
+%%
+%% {servers, ["your-server-name-goes-here"]},
+
+{mapping, "auth_ldap.servers", "rabbitmq_auth_backend_ldap.servers",
+ [{datatype, {enum, [none]}}]}.
+
+{mapping, "auth_ldap.servers.$server", "rabbitmq_auth_backend_ldap.servers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.servers",
+fun(Conf) ->
+ case cuttlefish:conf_get("auth_ldap.servers", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_ldap.servers", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Specify the LDAP port to connect to
+%%
+%% {port, 389},
+
+{mapping, "auth_ldap.port", "rabbitmq_auth_backend_ldap.port",
+ [{datatype, integer}]}.
+
+%% LDAP connection/worker pool size
+%%
+%% {pool_size, 64},
+
+{mapping, "auth_ldap.connection_pool_size", "rabbitmq_auth_backend_ldap.pool_size",
+ [{datatype, integer}]}.
+
+%% LDAP connection timeout, in milliseconds or 'infinity'
+%%
+%% {timeout, infinity},
+
+{mapping, "auth_ldap.timeout", "rabbitmq_auth_backend_ldap.timeout",
+ [{datatype, [integer, {atom, infinity}]}]}.
+
+%% LDAP connection inactivity timeout, in milliseconds or 'infinity'
+%%
+%% {idle_timeout, 300000},
+
+{mapping, "auth_ldap.idle_timeout", "rabbitmq_auth_backend_ldap.idle_timeout",
+ [{datatype, [integer, {atom, infinity}]}]}.
+
+%% Enable logging of LDAP queries.
+%% One of
+%% - false (no logging is performed)
+%% - true (verbose logging of the logic used by the plugin)
+%% - network (as true, but additionally logs LDAP network traffic)
+%% - network_unsafe (won't try to scrub any credentials)
+%%
+%% Defaults to false.
+%%
+%% {log, false},
+
+{mapping, "auth_ldap.log", "rabbitmq_auth_backend_ldap.log",
+ [{datatype, {enum, [true, false, network, network_unsafe]}}]}.
+
+%%
+%% Authentication
+%% ==============
+%%
+
+%% Pattern to convert the username given through AMQP to a different
+%% form before performing a simple bind
+%%
+%% {user_bind_pattern, "${ad_user}@${ad_domain}.com"},
+
+{mapping, "auth_ldap.user_bind_pattern", "rabbitmq_auth_backend_ldap.user_bind_pattern",
+ [{datatype, string}]}.
+
+%% Pattern to convert the username given through AMQP to a DN before
+%% binding
+%%
+%% {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
+
+{mapping, "auth_ldap.user_dn_pattern", "rabbitmq_auth_backend_ldap.user_dn_pattern",
+ [{datatype, string}]}.
+
+%% Alternatively, you can convert a username to a Distinguished
+%% Name via an LDAP lookup after binding. See the documentation for
+%% full details.
+
+%% When converting a username to a dn via a lookup, set these to
+%% the name of the attribute that represents the user name, and the
+%% base DN for the lookup query.
+%%
+%% {dn_lookup_attribute, "userPrincipalName"},
+%% {dn_lookup_base, "DC=gopivotal,DC=com"},
+
+{mapping, "auth_ldap.dn_lookup_attribute", "rabbitmq_auth_backend_ldap.dn_lookup_attribute",
+ [{datatype, [{enum, [none]}, string]}]}.
+
+{mapping, "auth_ldap.dn_lookup_base", "rabbitmq_auth_backend_ldap.dn_lookup_base",
+ [{datatype, [{enum, [none]}, string]}]}.
+
+{mapping, "auth_ldap.dn_lookup_bind", "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+ [{datatype, [{enum, [as_user, anon]}]}]}.
+
+{mapping, "auth_ldap.dn_lookup_bind.user_dn", "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+ [{datatype, [string]}]}.
+
+{mapping, "auth_ldap.dn_lookup_bind.password", "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+ [{datatype, [string]}]}.
+
+%% - as_user (to bind as the authenticated user - requires a password)
+%% - anon (to bind anonymously)
+%% - {UserDN, Password} (to bind with a specified user name and password)
+%%
+%% Defaults to 'as_user'.
+
+{translation, "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+fun(Conf) ->
+ case cuttlefish:conf_get("auth_ldap.dn_lookup_bind", Conf, undefined) of
+ as_user -> as_user;
+ anon -> anon;
+ _ ->
+ User = cuttlefish:conf_get("auth_ldap.dn_lookup_bind.user_dn", Conf),
+ Pass = cuttlefish:conf_get("auth_ldap.dn_lookup_bind.password", Conf),
+ case {User, Pass} of
+ {undefined, _} -> as_user;
+ {_, undefined} -> as_user;
+ _ -> {User, Pass}
+ end
+ end
+end}.
+
+%% Controls how to bind for authorisation queries and also to
+%% retrieve the details of users logging in without presenting a
+%% password (e.g., SASL EXTERNAL).
+%% One of
+%% - as_user (to bind as the authenticated user - requires a password)
+%% - anon (to bind anonymously)
+%% - {UserDN, Password} (to bind with a specified user name and password)
+%%
+%% Defaults to 'as_user'.
+
+{mapping, "auth_ldap.other_bind", "rabbitmq_auth_backend_ldap.other_bind",
+ [{datatype, {enum, [as_user, anon]}}]}.
+
+{mapping, "auth_ldap.other_bind.user_dn", "rabbitmq_auth_backend_ldap.other_bind",
+ [{datatype, string}]}.
+
+{mapping, "auth_ldap.other_bind.password", "rabbitmq_auth_backend_ldap.other_bind",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.other_bind",
+fun(Conf) ->
+ case cuttlefish:conf_get("auth_ldap.other_bind", Conf, undefined) of
+ as_user -> as_user;
+ anon -> anon;
+ _ ->
+ User = cuttlefish:conf_get("auth_ldap.other_bind.user_dn", Conf),
+ Pass = cuttlefish:conf_get("auth_ldap.other_bind.password", Conf),
+ case {User, Pass} of
+ {undefined, _} -> as_user;
+ {_, undefined} -> as_user;
+ _ -> {User, Pass}
+ end
+ end
+end}.
+
+%%
+%% Authorisation
+%% =============
+%%
+
+%% Groups are searched in the DN defined by the `group_lookup_base`
+%% configuration key, or the `dn_lookup_base` variable if
+%% former is `none`.
+
+{mapping, "auth_ldap.group_lookup_base", "rabbitmq_auth_backend_ldap.group_lookup_base",
+ [{datatype, [{enum, [none]}, string]}]}.
+
+%% The LDAP plugin can perform a variety of queries against your
+%% LDAP server to determine questions of authorisation. See
+%% https://www.rabbitmq.com/ldap.html#authorisation for more
+%% information.
+
+%% Set the query to use when determining vhost access
+%%
+%% {vhost_access_query, {in_group,
+%% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+%% Set the query to use when determining resource (e.g., queue) access
+%%
+%% {resource_access_query, {constant, true}},
+
+%% Set queries to determine which tags a user has
+%%
+%% {tag_queries, []}
+% ]},
+
+%% Connect to the LDAP server using TLS
+%%
+%% {use_ssl, false},
+
+{mapping, "auth_ldap.use_ssl", "rabbitmq_auth_backend_ldap.use_ssl",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Connect to the LDAP server using StartTLS
+%%
+%% {use_starttls, false},
+
+{mapping, "auth_ldap.use_starttls", "rabbitmq_auth_backend_ldap.use_starttls",
+ [{datatype, {enum, [true, false]}}]}.
+
+
+%% TLS options
+
+{mapping, "auth_ldap.ssl_options", "rabbitmq_auth_backend_ldap.ssl_options", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbitmq_auth_backend_ldap.ssl_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("auth_ldap.ssl_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid auth_ldap.ssl_options")
+ end
+end}.
+
+{mapping, "auth_ldap.ssl_options.verify", "rabbitmq_auth_backend_ldap.ssl_options.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "auth_ldap.ssl_options.fail_if_no_peer_cert", "rabbitmq_auth_backend_ldap.ssl_options.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.cacertfile", "rabbitmq_auth_backend_ldap.ssl_options.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "auth_ldap.ssl_options.certfile", "rabbitmq_auth_backend_ldap.ssl_options.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "auth_ldap.ssl_options.cacerts.$name", "rabbitmq_auth_backend_ldap.ssl_options.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.ssl_options.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_ldap.ssl_options.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "auth_ldap.ssl_options.cert", "rabbitmq_auth_backend_ldap.ssl_options.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.ssl_options.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("auth_ldap.ssl_options.cert", Conf))
+end}.
+
+{mapping, "auth_ldap.ssl_options.client_renegotiation", "rabbitmq_auth_backend_ldap.ssl_options.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.crl_check", "rabbitmq_auth_backend_ldap.ssl_options.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "auth_ldap.ssl_options.depth", "rabbitmq_auth_backend_ldap.ssl_options.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "auth_ldap.ssl_options.dh", "rabbitmq_auth_backend_ldap.ssl_options.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.ssl_options.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("auth_ldap.ssl_options.dh", Conf))
+end}.
+
+{mapping, "auth_ldap.ssl_options.dhfile", "rabbitmq_auth_backend_ldap.ssl_options.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "auth_ldap.ssl_options.honor_cipher_order", "rabbitmq_auth_backend_ldap.ssl_options.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.honor_ecc_order", "rabbitmq_auth_backend_ldap.ssl_options.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.key.RSAPrivateKey", "rabbitmq_auth_backend_ldap.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "auth_ldap.ssl_options.key.DSAPrivateKey", "rabbitmq_auth_backend_ldap.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "auth_ldap.ssl_options.key.PrivateKeyInfo", "rabbitmq_auth_backend_ldap.ssl_options.key",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.ssl_options.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("auth_ldap.ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "auth_ldap.ssl_options.keyfile", "rabbitmq_auth_backend_ldap.ssl_options.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "auth_ldap.ssl_options.log_alert", "rabbitmq_auth_backend_ldap.ssl_options.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.password", "rabbitmq_auth_backend_ldap.ssl_options.password",
+ [{datatype, string}]}.
+
+{mapping, "auth_ldap.ssl_options.psk_identity", "rabbitmq_auth_backend_ldap.ssl_options.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "auth_ldap.ssl_options.reuse_sessions", "rabbitmq_auth_backend_ldap.ssl_options.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.secure_renegotiate", "rabbitmq_auth_backend_ldap.ssl_options.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "auth_ldap.ssl_options.versions.$version", "rabbitmq_auth_backend_ldap.ssl_options.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.ssl_options.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_ldap.ssl_options.versions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
diff --git a/deps/rabbitmq_auth_backend_ldap/rabbitmq-components.mk b/deps/rabbitmq_auth_backend_ldap/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl
new file mode 100644
index 0000000000..5664cbf35e
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl
@@ -0,0 +1,928 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap).
+
+%% Connect to an LDAP server for authentication and authorisation
+
+-include_lib("eldap/include/eldap.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4,
+ state_can_expire/0]).
+
+-export([get_connections/0]).
+
+%% for tests
+-export([purge_connections/0]).
+
+-define(L(F, A), log("LDAP " ++ F, A)).
+-define(L1(F, A), log(" LDAP " ++ F, A)).
+-define(L2(F, A), log(" LDAP " ++ F, A)).
+-define(SCRUBBED_CREDENTIAL, "xxxx").
+-define(RESOURCE_ACCESS_QUERY_VARIABLES, [username, user_dn, vhost, resource, name, permission]).
+
+-define(LDAP_OPERATION_RETRIES, 10).
+
+-import(rabbit_misc, [pget/2]).
+
+-record(impl, { user_dn, password }).
+
+%%--------------------------------------------------------------------
+
+get_connections() ->
+ worker_pool:submit(ldap_pool, fun() -> get(ldap_conns) end, reuse).
+
+purge_connections() ->
+ [ok = worker_pool:submit(ldap_pool,
+ fun() -> purge_conn(Anon, Servers, Opts) end, reuse)
+ || {{Anon, Servers, Opts}, _} <- maps:to_list(get_connections())],
+ ok.
+
+user_login_authentication(Username, []) ->
+ %% Without password, e.g. EXTERNAL
+ ?L("CHECK: passwordless login for ~s", [Username]),
+ R = with_ldap(creds(none),
+ fun(LDAP) -> do_login(Username, unknown, none, LDAP) end),
+ ?L("DECISION: passwordless login for ~s: ~p",
+ [Username, log_result(R)]),
+ R;
+
+user_login_authentication(Username, AuthProps) when is_list(AuthProps) ->
+ case pget(password, AuthProps) of
+ undefined -> user_login_authentication(Username, []);
+ <<>> ->
+ %% Password "" is special in LDAP, see
+ %% https://tools.ietf.org/html/rfc4513#section-5.1.2
+ ?L("CHECK: unauthenticated login for ~s", [Username]),
+ ?L("DECISION: unauthenticated login for ~s: denied", [Username]),
+ {refused, "user '~s' - unauthenticated bind not allowed", [Username]};
+ PW ->
+ ?L("CHECK: login for ~s", [Username]),
+ R = case dn_lookup_when() of
+ prebind -> UserDN = username_to_dn_prebind(Username),
+ with_ldap({ok, {UserDN, PW}},
+ login_fun(Username, UserDN, PW, AuthProps));
+ _ -> with_ldap({ok, {simple_bind_fill_pattern(Username), PW}},
+ login_fun(Username, unknown, PW, AuthProps))
+ end,
+ ?L("DECISION: login for ~s: ~p", [Username, log_result(R)]),
+ R
+ end;
+
+user_login_authentication(Username, AuthProps) ->
+ exit({unknown_auth_props, Username, AuthProps}).
+
+user_login_authorization(Username, AuthProps) ->
+ case user_login_authentication(Username, AuthProps) of
+ {ok, #auth_user{impl = Impl, tags = Tags}} -> {ok, Impl, Tags};
+ Else -> Else
+ end.
+
+check_vhost_access(User = #auth_user{username = Username,
+ impl = #impl{user_dn = UserDN}},
+ VHost, AuthzData) ->
+ OptionsArgs = context_as_options(AuthzData, undefined),
+ ADArgs = rabbit_auth_backend_ldap_util:get_active_directory_args(Username),
+ Args = [{username, Username},
+ {user_dn, UserDN},
+ {vhost, VHost}] ++ OptionsArgs ++ ADArgs,
+ ?L("CHECK: ~s for ~s", [log_vhost(Args), log_user(User)]),
+ R0 = evaluate_ldap(env(vhost_access_query), Args, User),
+ R1 = ensure_rabbit_authz_backend_result(R0),
+ ?L("DECISION: ~s for ~s: ~p (~p)",
+ [log_vhost(Args), log_user(User),
+ log_result(R0), log_result(R1)]),
+ R1.
+
+check_resource_access(User = #auth_user{username = Username,
+ impl = #impl{user_dn = UserDN}},
+ #resource{virtual_host = VHost, kind = Type, name = Name},
+ Permission,
+ AuthzContext) ->
+ OptionsArgs = context_as_options(AuthzContext, undefined),
+ ADArgs = rabbit_auth_backend_ldap_util:get_active_directory_args(Username),
+ Args = [{username, Username},
+ {user_dn, UserDN},
+ {vhost, VHost},
+ {resource, Type},
+ {name, Name},
+ {permission, Permission}] ++ OptionsArgs ++ ADArgs,
+ ?L("CHECK: ~s for ~s", [log_resource(Args), log_user(User)]),
+ R0 = evaluate_ldap(env(resource_access_query), Args, User),
+ R1 = ensure_rabbit_authz_backend_result(R0),
+ ?L("DECISION: ~s for ~s: ~p (~p)",
+ [log_resource(Args), log_user(User),
+ log_result(R0), log_result(R1)]),
+ R1.
+
+check_topic_access(User = #auth_user{username = Username,
+ impl = #impl{user_dn = UserDN}},
+ #resource{virtual_host = VHost, kind = topic = Resource, name = Name},
+ Permission,
+ Context) ->
+ OptionsArgs = context_as_options(Context, undefined),
+ ADArgs = rabbit_auth_backend_ldap_util:get_active_directory_args(Username),
+ Args = [{username, Username},
+ {user_dn, UserDN},
+ {vhost, VHost},
+ {resource, Resource},
+ {name, Name},
+ {permission, Permission}] ++ OptionsArgs ++ ADArgs,
+ ?L("CHECK: ~s for ~s", [log_resource(Args), log_user(User)]),
+ R0 = evaluate_ldap(env(topic_access_query), Args, User),
+ R1 = ensure_rabbit_authz_backend_result(R0),
+ ?L("DECISION: ~s for ~s: ~p (~p)",
+ [log_resource(Args), log_user(User),
+ log_result(R0), log_result(R1)]),
+ R1.
+
+state_can_expire() -> false.
+
+%%--------------------------------------------------------------------
+
+ensure_rabbit_authz_backend_result(true) ->
+ true;
+ensure_rabbit_authz_backend_result(false) ->
+ false;
+ensure_rabbit_authz_backend_result({error, _}=Error) ->
+ Error;
+% rabbitmq/rabbitmq-auth-backend-ldap#116
+ensure_rabbit_authz_backend_result({refused, _, _}) ->
+ false;
+ensure_rabbit_authz_backend_result({ok, _}) ->
+ true;
+ensure_rabbit_authz_backend_result({ok, _, _}) ->
+ true.
+
+context_as_options(Context, Namespace) when is_map(Context) ->
+ % filter keys that would erase fixed variables
+ lists:flatten([begin
+ Value = maps:get(Key, Context),
+ case Value of
+ MapOfValues when is_map(MapOfValues) ->
+ context_as_options(MapOfValues, Key);
+ SimpleValue ->
+ case Namespace of
+ undefined ->
+ {rabbit_data_coercion:to_atom(Key), SimpleValue};
+ Namespace ->
+ {create_option_name_with_namespace(Namespace, Key), Value}
+ end
+ end
+ end || Key <- maps:keys(Context), lists:member(
+ rabbit_data_coercion:to_atom(Key),
+ ?RESOURCE_ACCESS_QUERY_VARIABLES) =:= false]);
+context_as_options(_, _) ->
+ [].
+
+create_option_name_with_namespace(Namespace, Key) ->
+ rabbit_data_coercion:to_atom(
+ rabbit_data_coercion:to_list(Namespace) ++ "." ++ rabbit_data_coercion:to_list(Key)
+ ).
+
+evaluate(Query, Args, User, LDAP) ->
+ ?L1("evaluating query: ~p", [Query]),
+ evaluate0(Query, Args, User, LDAP).
+
+evaluate0({constant, Bool}, _Args, _User, _LDAP) ->
+ ?L1("evaluated constant: ~p", [Bool]),
+ Bool;
+
+evaluate0({for, [{Type, Value, SubQuery}|Rest]}, Args, User, LDAP) ->
+ case pget(Type, Args) of
+ undefined -> {error, {args_do_not_contain, Type, Args}};
+ Value -> ?L1("selecting subquery ~s = ~s", [Type, Value]),
+ evaluate(SubQuery, Args, User, LDAP);
+ _ -> evaluate0({for, Rest}, Args, User, LDAP)
+ end;
+
+evaluate0({for, []}, _Args, _User, _LDAP) ->
+ {error, {for_query_incomplete}};
+
+evaluate0({exists, DNPattern}, Args, _User, LDAP) ->
+ %% eldap forces us to have a filter. objectClass should always be there.
+ Filter = eldap:present("objectClass"),
+ DN = fill(DNPattern, Args),
+ R = object_exists(DN, Filter, LDAP),
+ ?L1("evaluated exists for \"~s\": ~p", [DN, R]),
+ R;
+
+evaluate0({in_group, DNPattern}, Args, User, LDAP) ->
+ evaluate({in_group, DNPattern, "member"}, Args, User, LDAP);
+
+evaluate0({in_group, DNPattern, Desc}, Args,
+ #auth_user{impl = #impl{user_dn = UserDN}}, LDAP) ->
+ Filter = eldap:equalityMatch(Desc, UserDN),
+ DN = fill(DNPattern, Args),
+ R = object_exists(DN, Filter, LDAP),
+ ?L1("evaluated in_group for \"~s\": ~p", [DN, R]),
+ R;
+
+evaluate0({in_group_nested, DNPattern}, Args, User, LDAP) ->
+ evaluate({in_group_nested, DNPattern, "member", subtree},
+ Args, User, LDAP);
+evaluate0({in_group_nested, DNPattern, Desc}, Args, User, LDAP) ->
+ evaluate({in_group_nested, DNPattern, Desc, subtree},
+ Args, User, LDAP);
+evaluate0({in_group_nested, DNPattern, Desc, Scope}, Args,
+ #auth_user{impl = #impl{user_dn = UserDN}}, LDAP) ->
+ GroupsBase = case env(group_lookup_base) of
+ none ->
+ get_expected_env_str(dn_lookup_base, none);
+ B ->
+ B
+ end,
+ GroupDN = fill(DNPattern, Args),
+ EldapScope =
+ case Scope of
+ subtree -> eldap:wholeSubtree();
+ singlelevel -> eldap:singleLevel();
+ single_level -> eldap:singleLevel();
+ onelevel -> eldap:singleLevel();
+ one_level -> eldap:singleLevel()
+ end,
+ search_nested_group(LDAP, Desc, GroupsBase, EldapScope, UserDN, GroupDN, []);
+
+evaluate0({'not', SubQuery}, Args, User, LDAP) ->
+ R = evaluate(SubQuery, Args, User, LDAP),
+ ?L1("negated result to ~s", [R]),
+ not R;
+
+evaluate0({'and', Queries}, Args, User, LDAP) when is_list(Queries) ->
+ R = lists:foldl(fun (Q, true) -> evaluate(Q, Args, User, LDAP);
+ % Treat any non-true result as false
+ (_Q, _Result) -> false
+ end, true, Queries),
+ ?L1("'and' result: ~s", [R]),
+ R;
+
+evaluate0({'or', Queries}, Args, User, LDAP) when is_list(Queries) ->
+ R = lists:foldl(fun (_Q, true) -> true;
+ % Treat any non-true result as false
+ (Q, _Result) -> evaluate(Q, Args, User, LDAP)
+ end, false, Queries),
+ ?L1("'or' result: ~s", [R]),
+ R;
+
+evaluate0({equals, StringQuery1, StringQuery2}, Args, User, LDAP) ->
+ safe_eval(fun (String1, String2) ->
+ R = if String1 =:= String2 -> true;
+ true -> is_multi_attr_member(String1, String2)
+ end,
+ ?L1("evaluated equals \"~s\", \"~s\": ~s",
+ [format_multi_attr(String1),
+ format_multi_attr(String2), R]),
+ R
+ end,
+ evaluate(StringQuery1, Args, User, LDAP),
+ evaluate(StringQuery2, Args, User, LDAP));
+
+evaluate0({match, {string, _} = StringQuery, {string, _} = REQuery}, Args, User, LDAP) ->
+ safe_eval(fun (String1, String2) ->
+ do_match(String1, String2)
+ end,
+ evaluate(StringQuery, Args, User, LDAP),
+ evaluate(REQuery, Args, User, LDAP));
+
+evaluate0({match, StringQuery, {string, _} = REQuery}, Args, User, LDAP) when is_list(StringQuery)->
+ safe_eval(fun (String1, String2) ->
+ do_match(String1, String2)
+ end,
+ evaluate(StringQuery, Args, User, LDAP),
+ evaluate(REQuery, Args, User, LDAP));
+
+evaluate0({match, {string, _} = StringQuery, REQuery}, Args, User, LDAP) when is_list(REQuery) ->
+ safe_eval(fun (String1, String2) ->
+ do_match(String1, String2)
+ end,
+ evaluate(StringQuery, Args, User, LDAP),
+ evaluate(REQuery, Args, User, LDAP));
+
+evaluate0({match, StringQuery, REQuery}, Args, User, LDAP) when is_list(StringQuery),
+ is_list(REQuery) ->
+ safe_eval(fun (String1, String2) ->
+ do_match(String1, String2)
+ end,
+ evaluate(StringQuery, Args, User, LDAP),
+ evaluate(REQuery, Args, User, LDAP));
+
+evaluate0({match, StringQuery, REQuery}, Args, User, LDAP) ->
+ safe_eval(fun (String1, String2) ->
+ do_match_multi(String1, String2)
+ end,
+ evaluate(StringQuery, Args, User, LDAP),
+ evaluate(REQuery, Args, User, LDAP));
+
+evaluate0(StringPattern, Args, User, LDAP) when is_list(StringPattern) ->
+ evaluate0({string, StringPattern}, Args, User, LDAP);
+
+evaluate0({string, StringPattern}, Args, _User, _LDAP) ->
+ R = fill(StringPattern, Args),
+ ?L1("evaluated string for \"~s\"", [R]),
+ R;
+
+evaluate0({attribute, DNPattern, AttributeName}, Args, _User, LDAP) ->
+ DN = fill(DNPattern, Args),
+ R = attribute(DN, AttributeName, LDAP),
+ ?L1("evaluated attribute \"~s\" for \"~s\": ~p",
+ [AttributeName, DN, format_multi_attr(R)]),
+ R;
+
+evaluate0(Q, Args, _User, _LDAP) ->
+ {error, {unrecognised_query, Q, Args}}.
+
+search_groups(LDAP, Desc, GroupsBase, Scope, DN) ->
+ Filter = eldap:equalityMatch(Desc, DN),
+ case eldap:search(LDAP,
+ [{base, GroupsBase},
+ {filter, Filter},
+ {attributes, ["dn"]},
+ {scope, Scope}]) of
+ {error, _} = E ->
+ ?L("error searching for parent groups for \"~s\": ~p", [DN, E]),
+ [];
+ {ok, {referral, Referrals}} ->
+ {error, {referrals_not_supported, Referrals}};
+ {ok, #eldap_search_result{entries = []}} ->
+ [];
+ {ok, #eldap_search_result{entries = Entries}} ->
+ [ON || #eldap_entry{object_name = ON} <- Entries]
+ end.
+
+search_nested_group(LDAP, Desc, GroupsBase, Scope, CurrentDN, TargetDN, Path) ->
+ case lists:member(CurrentDN, Path) of
+ true ->
+ ?L("recursive cycle on DN ~s while searching for group ~s",
+ [CurrentDN, TargetDN]),
+ false;
+ false ->
+ GroupDNs = search_groups(LDAP, Desc, GroupsBase, Scope, CurrentDN),
+ case lists:member(TargetDN, GroupDNs) of
+ true ->
+ true;
+ false ->
+ NextPath = [CurrentDN | Path],
+ lists:any(fun(DN) ->
+ search_nested_group(LDAP, Desc, GroupsBase, Scope,
+ DN, TargetDN, NextPath)
+ end,
+ GroupDNs)
+ end
+ end.
+
+safe_eval(_F, {error, _}, _) -> false;
+safe_eval(_F, _, {error, _}) -> false;
+safe_eval(F, V1, V2) -> F(V1, V2).
+
+do_match(S1, S2) ->
+ case re:run(S1, S2) of
+ {match, _} ->
+ log_match(S1, S2, R = true),
+ R;
+ nomatch ->
+ log_match(S1, S2, R = false),
+ R
+ end.
+
+%% In some cases when fetching regular expressions, LDAP evalution()
+%% returns a list of strings, so we need to wrap guards around that.
+%% If a list of strings is returned, loop and match versus each element.
+do_match_multi(S1, []) ->
+ log_match(S1, [], R = false),
+ R;
+do_match_multi(S1 = [H1|_], [H2|Tail]) when is_list(H2) and not is_list(H1) ->
+ case re:run(S1, H2) of
+ {match, _} ->
+ log_match(S1, H2, R = true),
+ R;
+ _ ->
+ log_match(S1,H2, false),
+ do_match_multi(S1, Tail)
+ end;
+do_match_multi([], S2) ->
+ log_match([], S2, R = false),
+ R;
+do_match_multi([H1|Tail], S2 = [H2|_] ) when is_list(H1) and not is_list(H2) ->
+ case re:run(H1, S2) of
+ {match, _} ->
+ log_match(H1, S2, R = true),
+ R;
+ _ ->
+ log_match(H1, S2, false),
+ do_match_multi(Tail, S2)
+ end;
+do_match_multi([H1|_],[H2|_]) when is_list(H1) and is_list(H2) ->
+ false; %% Unsupported combination
+do_match_multi(S1, S2) ->
+ do_match(S1, S2).
+
+log_match(String, RE, Result) ->
+ ?L1("evaluated match \"~s\" against RE \"~s\": ~s",
+ [format_multi_attr(String),
+ format_multi_attr(RE), Result]).
+
+object_exists(DN, Filter, LDAP) ->
+ case eldap:search(LDAP,
+ [{base, DN},
+ {filter, Filter},
+ {attributes, ["objectClass"]}, %% Reduce verbiage
+ {scope, eldap:baseObject()}]) of
+ {ok, {referral, Referrals}} ->
+ {error, {referrals_not_supported, Referrals}};
+ {ok, #eldap_search_result{entries = Entries}} ->
+ length(Entries) > 0;
+ {error, _} = E ->
+ E
+ end.
+
+attribute(DN, AttributeName, LDAP) ->
+ case eldap:search(LDAP,
+ [{base, DN},
+ {filter, eldap:present("objectClass")},
+ {attributes, [AttributeName]}]) of
+ {ok, {referral, Referrals}} ->
+ {error, {referrals_not_supported, Referrals}};
+ {ok, #eldap_search_result{entries = E = [#eldap_entry{}|_]}} ->
+ get_attributes(AttributeName, E);
+ {ok, #eldap_search_result{entries = _}} ->
+ {error, not_found};
+ {error, _} = E ->
+ E
+ end.
+
+evaluate_ldap(Q, Args, User) ->
+ with_ldap(creds(User), fun(LDAP) -> evaluate(Q, Args, User, LDAP) end).
+
+%%--------------------------------------------------------------------
+
+with_ldap(Creds, Fun) -> with_ldap(Creds, Fun, env(servers)).
+
+with_ldap(_Creds, _Fun, undefined) ->
+ {error, no_ldap_servers_defined};
+
+with_ldap({error, _} = E, _Fun, _State) ->
+ E;
+
+%% TODO - while we now pool LDAP connections we don't make any attempt
+%% to avoid rebinding if the connection is already bound as the user
+%% of interest, so this could still be more efficient.
+with_ldap({ok, Creds}, Fun, Servers) ->
+ Opts0 = [{port, env(port)},
+ {idle_timeout, env(idle_timeout)},
+ {anon_auth, env(anon_auth)}],
+ Opts1 = case env(log) of
+ network ->
+ Pre = " LDAP network traffic: ",
+ rabbit_log_ldap:info(
+ " LDAP connecting to servers: ~p~n", [Servers]),
+ [{log, fun(1, S, A) -> rabbit_log_ldap:warning(Pre ++ S, A);
+ (2, S, A) ->
+ rabbit_log_ldap:info(Pre ++ S, scrub_creds(A, []))
+ end} | Opts0];
+ network_unsafe ->
+ Pre = " LDAP network traffic: ",
+ rabbit_log_ldap:info(
+ " LDAP connecting to servers: ~p~n", [Servers]),
+ [{log, fun(1, S, A) -> rabbit_log_ldap:warning(Pre ++ S, A);
+ (2, S, A) -> rabbit_log_ldap:info( Pre ++ S, A)
+ end} | Opts0];
+ _ ->
+ Opts0
+ end,
+ %% eldap defaults to 'infinity' but doesn't allow you to set that. Harrumph.
+ Opts = case env(timeout) of
+ infinity -> Opts1;
+ MS -> [{timeout, MS} | Opts1]
+ end,
+
+ worker_pool:submit(
+ ldap_pool,
+ fun () ->
+ case with_login(Creds, Servers, Opts, Fun) of
+ {error, {gen_tcp_error, _}} ->
+ purge_connection(Creds, Servers, Opts),
+ with_login(Creds, Servers, Opts, Fun);
+ Result -> Result
+ end
+ end, reuse).
+
+with_login(Creds, Servers, Opts, Fun) ->
+ with_login(Creds, Servers, Opts, Fun, ?LDAP_OPERATION_RETRIES).
+with_login(_Creds, _Servers, _Opts, _Fun, 0 = _RetriesLeft) ->
+ rabbit_log_ldap:warning("LDAP failed to perform an operation. TCP connection to a LDAP server was closed or otherwise defunct. Exhausted all retries."),
+ {error, ldap_connect_error};
+with_login(Creds, Servers, Opts, Fun, RetriesLeft) ->
+ case get_or_create_conn(Creds == anon, Servers, Opts) of
+ {ok, {ConnType, LDAP}} ->
+ Result = case Creds of
+ anon ->
+ ?L1("anonymous bind", []),
+ case call_ldap_fun(Fun, LDAP) of
+ {error, ldap_closed} ->
+ purge_connection(Creds, Servers, Opts),
+ with_login(Creds, Servers, Opts, Fun, RetriesLeft - 1);
+ Other -> Other
+ end;
+ {UserDN, Password} ->
+ case eldap:simple_bind(LDAP, UserDN, Password) of
+ ok ->
+ ?L1("bind succeeded: ~s",
+ [scrub_dn(UserDN, env(log))]),
+ case call_ldap_fun(Fun, LDAP, UserDN) of
+ {error, ldap_closed} ->
+ with_login(Creds, Servers, Opts, Fun, RetriesLeft - 1);
+ {error, {gen_tcp_error, _}} ->
+ with_login(Creds, Servers, Opts, Fun, RetriesLeft - 1);
+ Other -> Other
+ end;
+ {error, invalidCredentials} ->
+ ?L1("bind returned \"invalid credentials\": ~s",
+ [scrub_dn(UserDN, env(log))]),
+ {refused, UserDN, []};
+ {error, ldap_closed} ->
+ purge_connection(Creds, Servers, Opts),
+ with_login(Creds, Servers, Opts, Fun, RetriesLeft - 1);
+ {error, {gen_tcp_error, _}} ->
+ purge_connection(Creds, Servers, Opts),
+ with_login(Creds, Servers, Opts, Fun, RetriesLeft - 1);
+ {error, E} ->
+ ?L1("bind error: ~p ~p",
+ [scrub_dn(UserDN, env(log)), E]),
+ %% Do not report internal bind error to a client
+ {error, ldap_bind_error}
+ end
+ end,
+ ok = case ConnType of
+ eldap_transient -> eldap:close(LDAP);
+ _ -> ok
+ end,
+ Result;
+ Error ->
+ ?L1("connect error: ~p", [Error]),
+ case Error of
+ {error, {gen_tcp_error, _}} -> Error;
+ %% Do not report internal connection error to a client
+ _Other -> {error, ldap_connect_error}
+ end
+ end.
+
+purge_connection(Creds, Servers, Opts) ->
+ %% purge and retry with a new connection
+ rabbit_log_ldap:warning("TCP connection to a LDAP server was closed or otherwise defunct."),
+ purge_conn(Creds == anon, Servers, Opts),
+ rabbit_log_ldap:warning("LDAP will retry with a new connection.").
+
+call_ldap_fun(Fun, LDAP) ->
+ call_ldap_fun(Fun, LDAP, "").
+
+call_ldap_fun(Fun, LDAP, UserDN) ->
+ case Fun(LDAP) of
+ {error, ldap_closed} ->
+ %% LDAP connection was close, let with_login/5 retry
+ {error, ldap_closed};
+ {error, {gen_tcp_error, E}} ->
+ %% ditto
+ {error, {gen_tcp_error, E}};
+ {error, E} ->
+ ?L1("evaluate error: ~s ~p", [scrub_dn(UserDN, env(log)), E]),
+ {error, ldap_evaluate_error};
+ Other -> Other
+ end.
+
+%% Gets either the anonymous or bound (authenticated) connection
+get_or_create_conn(IsAnon, Servers, Opts) ->
+ Conns = case get(ldap_conns) of
+ undefined -> #{};
+ Dict -> Dict
+ end,
+ Key = {IsAnon, Servers, Opts},
+ case maps:find(Key, Conns) of
+ {ok, Conn} ->
+ Timeout = rabbit_misc:pget(idle_timeout, Opts, infinity),
+ %% Defer the timeout by re-setting it.
+ set_connection_timeout(Key, Timeout),
+ {ok, {eldap_pooled, Conn}};
+ error ->
+ {Timeout, EldapOpts} = case lists:keytake(idle_timeout, 1, Opts) of
+ false -> {infinity, Opts};
+ {value, {idle_timeout, T}, EOpts} -> {T, EOpts}
+ end,
+ case {eldap_open(Servers, EldapOpts), Timeout} of
+ %% If the timeout was set to 0, treat it as a one-off connection.
+ %% See rabbitmq/rabbitmq-auth-backend-ldap#120 for background.
+ {{ok, Conn}, 0} ->
+ {ok, {eldap_transient, Conn}};
+ %% Non-zero timeout, put it in the pool
+ {{ok, Conn}, Timeout} ->
+ put(ldap_conns, maps:put(Key, Conn, Conns)),
+ set_connection_timeout(Key, Timeout),
+ {ok, {eldap_pooled, Conn}};
+ {Error, _} ->
+ Error
+ end
+ end.
+
+set_connection_timeout(_, infinity) ->
+ ok;
+set_connection_timeout(Key, Timeout) when is_integer(Timeout) ->
+ worker_pool_worker:set_timeout(Key, Timeout,
+ fun() ->
+ Conns = case get(ldap_conns) of
+ undefined -> #{};
+ Dict -> Dict
+ end,
+ case maps:find(Key, Conns) of
+ {ok, Conn} ->
+ eldap:close(Conn),
+ put(ldap_conns, maps:remove(Key, Conns));
+ _ -> ok
+ end
+ end).
+
+%% Get attribute(s) from eldap entry
+get_attributes(_AttrName, []) -> {error, not_found};
+get_attributes(AttrName, [#eldap_entry{attributes = A}|Rem]) ->
+ case pget(AttrName, A) of
+ [Attr|[]] -> Attr;
+ Attrs when length(Attrs) > 1 -> Attrs;
+ _ -> get_attributes(AttrName, Rem)
+ end;
+get_attributes(AttrName, [_|Rem]) -> get_attributes(AttrName, Rem).
+
+%% Format multiple attribute values for logging
+format_multi_attr(Attrs) ->
+ format_multi_attr(io_lib:printable_list(Attrs), Attrs).
+
+format_multi_attr(true, Attrs) -> Attrs;
+format_multi_attr(_, Attrs) when is_list(Attrs) -> string:join(Attrs, "; ");
+format_multi_attr(_, Error) -> Error.
+
+
+%% In case of multiple attributes, check for equality bi-directionally
+is_multi_attr_member(Str1, Str2) ->
+ lists:member(Str1, Str2) orelse lists:member(Str2, Str1).
+
+purge_conn(IsAnon, Servers, Opts) ->
+ Conns = get(ldap_conns),
+ Key = {IsAnon, Servers, Opts},
+ {ok, Conn} = maps:find(Key, Conns),
+ rabbit_log_ldap:warning("LDAP will purge an already closed or defunct LDAP server connection from the pool"),
+ % We cannot close the connection with eldap:close/1 because as of OTP-13327
+ % eldap will try to do_unbind first and will fail with a `{gen_tcp_error, closed}`.
+ % Since we know that the connection is already closed, we just
+ % kill its process.
+ unlink(Conn),
+ exit(Conn, closed),
+ put(ldap_conns, maps:remove(Key, Conns)),
+ ok.
+
+eldap_open(Servers, Opts) ->
+ case eldap:open(Servers, ssl_conf() ++ Opts) of
+ {ok, LDAP} ->
+ TLS = env(use_starttls),
+ case {TLS, at_least("5.10.4")} of %%R16B03
+ {false, _} -> {ok, LDAP};
+ {true, false} -> exit({starttls_requires_min_r16b3});
+ {true, _} -> TLSOpts = ssl_options(),
+ ELDAP = eldap, %% Fool xref
+ case ELDAP:start_tls(LDAP, TLSOpts) of
+ ok -> {ok, LDAP};
+ Error -> Error
+ end
+ end;
+ Error ->
+ Error
+ end.
+
+ssl_conf() ->
+ %% We must make sure not to add SSL options unless a) we have at least R16A
+ %% b) we have SSL turned on (or it breaks StartTLS...)
+ case env(use_ssl) of
+ false -> [{ssl, false}];
+ true -> %% Only the unfixed version can be []
+ case {env(ssl_options), at_least("5.10")} of %% R16A
+ {_, true} -> [{ssl, true}, {sslopts, ssl_options()}];
+ {[], _} -> [{ssl, true}];
+ {_, false} -> exit({ssl_options_requires_min_r16a})
+ end
+ end.
+
+ssl_options() ->
+ rabbit_networking:fix_ssl_options(env(ssl_options)).
+
+at_least(Ver) ->
+ rabbit_misc:version_compare(erlang:system_info(version), Ver) =/= lt.
+
+% Note:
+% Default is configured in the Makefile
+get_expected_env_str(Key, Default) ->
+ V = case env(Key) of
+ Default ->
+ rabbit_log_ldap:warning("rabbitmq_auth_backend_ldap configuration key '~p' is set to "
+ "the default value of '~p', expected to get a non-default value~n",
+ [Key, Default]),
+ Default;
+ V0 ->
+ V0
+ end,
+ rabbit_data_coercion:to_list(V).
+
+env(F) ->
+ {ok, V} = application:get_env(rabbitmq_auth_backend_ldap, F),
+ V.
+
+login_fun(User, UserDN, Password, AuthProps) ->
+ fun(L) -> case pget(vhost, AuthProps) of
+ undefined -> do_login(User, UserDN, Password, L);
+ VHost -> do_login(User, UserDN, Password, VHost, L)
+ end
+ end.
+
+do_login(Username, PrebindUserDN, Password, LDAP) ->
+ do_login(Username, PrebindUserDN, Password, <<>>, LDAP).
+
+do_login(Username, PrebindUserDN, Password, VHost, LDAP) ->
+ UserDN = case PrebindUserDN of
+ unknown -> username_to_dn(Username, LDAP, dn_lookup_when());
+ _ -> PrebindUserDN
+ end,
+ User = #auth_user{username = Username,
+ impl = #impl{user_dn = UserDN,
+ password = Password}},
+ DTQ = fun (LDAPn) -> do_tag_queries(Username, UserDN, User, VHost, LDAPn) end,
+ TagRes = case env(other_bind) of
+ as_user -> DTQ(LDAP);
+ _ -> with_ldap(creds(User), DTQ)
+ end,
+ case TagRes of
+ {ok, L} -> {ok, User#auth_user{tags = [Tag || {Tag, true} <- L]}};
+ E -> E
+ end.
+
+do_tag_queries(Username, UserDN, User, VHost, LDAP) ->
+ {ok, [begin
+ ?L1("CHECK: does ~s have tag ~s?", [Username, Tag]),
+ VhostArgs = vhost_if_defined(VHost),
+ ADArgs = rabbit_auth_backend_ldap_util:get_active_directory_args(Username),
+ EvalArgs = [{username, Username}, {user_dn, UserDN}] ++ VhostArgs ++ ADArgs,
+ R = evaluate(Q, EvalArgs, User, LDAP),
+ ?L1("DECISION: does ~s have tag ~s? ~p",
+ [Username, Tag, R]),
+ {Tag, R}
+ end || {Tag, Q} <- env(tag_queries)]}.
+
+vhost_if_defined([]) -> [];
+vhost_if_defined(<<>>) -> [];
+vhost_if_defined(VHost) -> [{vhost, VHost}].
+
+dn_lookup_when() ->
+ case {env(dn_lookup_attribute), env(dn_lookup_bind)} of
+ {none, _} ->
+ never;
+ {_, as_user} ->
+ postbind;
+ %% make it more obvious what the invariants are,
+ %% see rabbitmq/rabbitmq-auth-backend-ldap#94. MK.
+ {_, anon} ->
+ prebind;
+ {_, _} ->
+ prebind
+ end.
+
+username_to_dn_prebind(Username) ->
+ with_ldap({ok, env(dn_lookup_bind)},
+ fun (LDAP) -> dn_lookup(Username, LDAP) end).
+
+username_to_dn(Username, LDAP, postbind) -> dn_lookup(Username, LDAP);
+username_to_dn(Username, _LDAP, _When) -> fill_user_dn_pattern(Username).
+
+dn_lookup(Username, LDAP) ->
+ Filled = fill_user_dn_pattern(Username),
+ DnLookupBase = get_expected_env_str(dn_lookup_base, none),
+ DnLookupAttribute = get_expected_env_str(dn_lookup_attribute, none),
+ case eldap:search(LDAP,
+ [{base, DnLookupBase},
+ {filter, eldap:equalityMatch(DnLookupAttribute, Filled)},
+ {attributes, ["distinguishedName"]}]) of
+ {ok, {referral, Referrals}} ->
+ {error, {referrals_not_supported, Referrals}};
+ {ok, #eldap_search_result{entries = [#eldap_entry{object_name = DN}]}}->
+ ?L1("DN lookup: ~s -> ~s", [Username, DN]),
+ DN;
+ {ok, #eldap_search_result{entries = Entries}} ->
+ rabbit_log_ldap:warning("Searching for DN for ~s, got back ~p~n",
+ [Filled, Entries]),
+ Filled;
+ {error, _} = E ->
+ exit(E)
+ end.
+
+fill_user_dn_pattern(Username) ->
+ ADArgs = rabbit_auth_backend_ldap_util:get_active_directory_args(Username),
+ fill(env(user_dn_pattern), [{username, Username}] ++ ADArgs).
+
+simple_bind_fill_pattern(Username) ->
+ simple_bind_fill_pattern(env(user_bind_pattern), Username).
+
+simple_bind_fill_pattern(none, Username) ->
+ fill_user_dn_pattern(Username);
+simple_bind_fill_pattern(Pattern, Username) ->
+ ADArgs = rabbit_auth_backend_ldap_util:get_active_directory_args(Username),
+ fill(Pattern, [{username, Username}] ++ ADArgs).
+
+creds(User) -> creds(User, env(other_bind)).
+
+creds(none, as_user) ->
+ {error, "'other_bind' set to 'as_user' but no password supplied"};
+creds(#auth_user{impl = #impl{user_dn = UserDN, password = PW}}, as_user) ->
+ {ok, {UserDN, PW}};
+creds(_, Creds) ->
+ {ok, Creds}.
+
+%% Scrub credentials
+scrub_creds([], Acc) -> lists:reverse(Acc);
+scrub_creds([H|Rem], Acc) ->
+ scrub_creds(Rem, [scrub_payload_creds(H)|Acc]).
+
+%% Scrub credentials from specific payloads
+scrub_payload_creds({'BindRequest', N, DN, {simple, _PWD}}) ->
+ {'BindRequest', N, scrub_dn(DN), {simple, ?SCRUBBED_CREDENTIAL}};
+scrub_payload_creds(Any) -> Any.
+
+scrub_dn(DN) -> scrub_dn(DN, network).
+
+scrub_dn(DN, network_unsafe) -> DN;
+scrub_dn(DN, false) -> DN;
+scrub_dn(DN, _) ->
+ case is_dn(DN) of
+ true -> scrub_rdn(string:tokens(DN, ","), []);
+ _ ->
+ %% We aren't fully certain its a DN, & don't know what sensitive
+ %% info could be contained, thus just scrub the entire credential
+ ?SCRUBBED_CREDENTIAL
+ end.
+
+scrub_rdn([], Acc) ->
+ string:join(lists:reverse(Acc), ",");
+scrub_rdn([DN|Rem], Acc) ->
+ DN0 = case catch string:tokens(DN, "=") of
+ L = [RDN, _] -> case string:to_lower(RDN) of
+ "cn" -> [RDN, ?SCRUBBED_CREDENTIAL];
+ "dc" -> [RDN, ?SCRUBBED_CREDENTIAL];
+ "ou" -> [RDN, ?SCRUBBED_CREDENTIAL];
+ "uid" -> [RDN, ?SCRUBBED_CREDENTIAL];
+ _ -> L
+ end;
+ _Any ->
+ %% There's no RDN, log "xxxx=xxxx"
+ [?SCRUBBED_CREDENTIAL, ?SCRUBBED_CREDENTIAL]
+ end,
+ scrub_rdn(Rem, [string:join(DN0, "=")|Acc]).
+
+is_dn(S) when is_list(S) ->
+ case catch string:tokens(to_list(S), "=") of
+ L when length(L) > 1 -> true;
+ _ -> false
+ end;
+is_dn(_S) -> false.
+
+to_list(S) when is_list(S) -> S;
+to_list(S) when is_binary(S) -> binary_to_list(S);
+to_list(S) when is_atom(S) -> atom_to_list(S);
+to_list(S) -> {error, {badarg, S}}.
+
+log(Fmt, Args) -> case env(log) of
+ false -> ok;
+ _ -> rabbit_log_ldap:info(Fmt ++ "~n", Args)
+ end.
+
+fill(Fmt, Args) ->
+ ?L2("filling template \"~s\" with~n ~p", [Fmt, Args]),
+ R = rabbit_auth_backend_ldap_util:fill(Fmt, Args),
+ ?L2("template result: \"~s\"", [R]),
+ R.
+
+log_result({ok, #auth_user{}}) -> ok;
+log_result(true) -> ok;
+log_result(false) -> denied;
+log_result({refused, _, _}) -> denied;
+log_result(E) -> E.
+
+log_user(#auth_user{username = U}) -> rabbit_misc:format("\"~s\"", [U]).
+
+log_vhost(Args) ->
+ rabbit_misc:format("access to vhost \"~s\"", [pget(vhost, Args)]).
+
+log_resource(Args) ->
+ rabbit_misc:format("~s permission for ~s \"~s\" in \"~s\"",
+ [pget(permission, Args), pget(resource, Args),
+ pget(name, Args), pget(vhost, Args)]).
diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl
new file mode 100644
index 0000000000..74c5a51598
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_app.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Dummy supervisor to get this application behaviour working
+-behaviour(supervisor).
+-export([create_ldap_pool/0, init/1]).
+
+-rabbit_boot_step({ldap_pool,
+ [{description, "LDAP pool"},
+ {mfa, {?MODULE, create_ldap_pool, []}},
+ {requires, kernel_ready}]}).
+
+create_ldap_pool() ->
+ {ok, PoolSize} = application:get_env(rabbitmq_auth_backend_ldap, pool_size),
+ rabbit_sup:start_supervisor_child(ldap_pool_sup, worker_pool_sup, [PoolSize, ldap_pool]).
+
+start(_Type, _StartArgs) ->
+ {ok, Backends} = application:get_env(rabbit, auth_backends),
+ case configured(rabbit_auth_backend_ldap, Backends) of
+ true -> ok;
+ false -> rabbit_log_ldap:warning(
+ "LDAP plugin loaded, but rabbit_auth_backend_ldap is not "
+ "in the list of auth_backends. LDAP auth will not work.")
+ end,
+ {ok, SSL} = application:get_env(rabbitmq_auth_backend_ldap, use_ssl),
+ {ok, TLS} = application:get_env(rabbitmq_auth_backend_ldap, use_starttls),
+ case SSL orelse TLS of
+ true ->
+ rabbit_networking:ensure_ssl(),
+ ok;
+ false -> ok
+ end,
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ ok.
+
+configured(_M, []) -> false;
+configured(M, [M |_]) -> true;
+configured(M, [{M,_}|_]) -> true;
+configured(M, [{_,M}|_]) -> true;
+configured(M, [_ |T]) -> configured(M, T).
+
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.
diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl
new file mode 100644
index 0000000000..1609255cc6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap_util.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_ldap_util).
+
+-export([fill/2, get_active_directory_args/1]).
+
+fill(Fmt, []) ->
+ binary_to_list(iolist_to_binary(Fmt));
+
+fill(Fmt, [{K, V} | T]) ->
+ Var = [[$\\, $$, ${] ++ atom_to_list(K) ++ [$}]],
+ fill(re:replace(Fmt, Var, [to_repl(V)], [global]), T).
+
+to_repl(V) when is_atom(V) -> to_repl(atom_to_list(V));
+to_repl(V) when is_binary(V) -> to_repl(binary_to_list(V));
+to_repl([]) -> [];
+to_repl([$\\ | T]) -> [$\\, $\\ | to_repl(T)];
+to_repl([$& | T]) -> [$\\, $& | to_repl(T)];
+to_repl([H | T]) -> [H | to_repl(T)];
+to_repl(_) -> []. % fancy variables like peer IP are just ignored
+
+get_active_directory_args([ADDomain, ADUser]) ->
+ [{ad_domain, ADDomain}, {ad_user, ADUser}];
+get_active_directory_args(Parts) when is_list(Parts) ->
+ [];
+get_active_directory_args(Username) when is_binary(Username) ->
+ % If Username is in Domain\User format, provide additional fill
+ % template arguments
+ get_active_directory_args(binary:split(Username, <<"\\">>, [trim_all])).
diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..1bc6136178
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_auth_backend_ldap, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets
new file mode 100644
index 0000000000..7e4ba70cec
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets
@@ -0,0 +1,276 @@
+[{ldap_servers,
+ "auth_ldap.servers.1 = DC1.domain.com
+ auth_ldap.servers.2 = DC1.eng.domain.com",
+ [{rabbitmq_auth_backend_ldap,
+ [{servers,["DC1.domain.com","DC1.eng.domain.com"]}]}],
+ [rabbitmq_auth_backend_ldap]},
+ {ldap_servers_short,
+ "auth_ldap.servers.1 = hostname1
+ auth_ldap.servers.2 = hostname2",
+ [{rabbitmq_auth_backend_ldap,[{servers,["hostname1","hostname2"]}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_port,
+ "auth_ldap.port = 1234",
+ [{rabbitmq_auth_backend_ldap,[
+ {port, 1234}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_connection_pool_size,
+ "auth_ldap.connection_pool_size = 128",
+ [{rabbitmq_auth_backend_ldap,[
+ {pool_size, 128}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_servers_short,
+ "auth_ldap.servers.1 = hostname1
+ auth_ldap.servers.2 = hostname2",
+ [{rabbitmq_auth_backend_ldap,[{servers,["hostname1","hostname2"]}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_timeouts,
+ "auth_ldap.timeout = 50000
+ auth_ldap.idle_timeout = 90000",
+ [{rabbitmq_auth_backend_ldap,[
+ {timeout, 50000},
+ {idle_timeout, 90000}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_use_ssl_true,
+ "auth_ldap.use_ssl = true",
+ [{rabbitmq_auth_backend_ldap,[
+ {use_ssl, true}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_use_ssl_false,
+ "auth_ldap.use_ssl = false",
+ [{rabbitmq_auth_backend_ldap,[
+ {use_ssl, false}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_use_starttls_true,
+ "auth_ldap.use_starttls = true",
+ [{rabbitmq_auth_backend_ldap,[
+ {use_starttls, true}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_log_false,
+ "auth_ldap.log = false",
+ [{rabbitmq_auth_backend_ldap,[
+ {log, false}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_log_true,
+ "auth_ldap.log = true",
+ [{rabbitmq_auth_backend_ldap,[
+ {log, true}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_log_network,
+ "auth_ldap.log = network",
+ [{rabbitmq_auth_backend_ldap,[
+ {log, network}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ldap_log_network_unsafe,
+ "auth_ldap.log = network_unsafe",
+ [{rabbitmq_auth_backend_ldap,[
+ {log, network_unsafe}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {user_dn_pattern,
+ "auth_ldap.user_dn_pattern = ${ad_user}-${ad_domain}",
+ [{rabbitmq_auth_backend_ldap,
+ [{user_dn_pattern, "${ad_user}-${ad_domain}"}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {user_bind_pattern,
+ "auth_ldap.user_bind_pattern = ${ad_user}-${ad_domain}",
+ [{rabbitmq_auth_backend_ldap,
+ [{user_bind_pattern, "${ad_user}-${ad_domain}"}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {group_lookup_base,
+ "auth_ldap.group_lookup_base = DC=gopivotal,DC=com",
+ [{rabbitmq_auth_backend_ldap,
+ [{group_lookup_base, "DC=gopivotal,DC=com"}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {dn_lookup,
+ "auth_ldap.dn_lookup_attribute = userPrincipalName
+ auth_ldap.dn_lookup_base = DC=gopivotal,DC=com
+ auth_ldap.dn_lookup_bind = as_user",
+ [{rabbitmq_auth_backend_ldap,
+ [{dn_lookup_attribute,"userPrincipalName"},
+ {dn_lookup_base,"DC=gopivotal,DC=com"},
+ {dn_lookup_bind,as_user}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {db_lookup_bind,
+ "auth_ldap.dn_lookup_bind.user_dn = username
+ auth_ldap.dn_lookup_bind.password = password",
+ [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,{"username","password"}}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {db_lookup_bind_anon,
+ "auth_ldap.dn_lookup_bind = anon",
+ [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,anon}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {other_bind_anon,
+ "auth_ldap.other_bind = anon",
+ [{rabbitmq_auth_backend_ldap,[{other_bind,anon}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {both_binds_anon,
+ "auth_ldap.dn_lookup_bind = anon
+ auth_ldap.other_bind = anon",
+ [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,anon},
+ {other_bind,anon}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {other_bind_as_user,
+ "auth_ldap.other_bind = as_user",
+ [{rabbitmq_auth_backend_ldap,[{other_bind,as_user}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {other_bind_pass,
+ "auth_ldap.other_bind.user_dn = username
+ auth_ldap.other_bind.password = password",
+ [{rabbitmq_auth_backend_ldap,[{other_bind,{"username","password"}}]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ssl_options,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.verify = verify_peer
+ auth_ldap.ssl_options.fail_if_no_peer_cert = true",
+ [{rabbitmq_auth_backend_ldap, [
+ {use_ssl, true},
+ {ssl_options,
+ [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, true}]}
+ ]}],
+ [rabbitmq_auth_backend_ldap]},
+
+ {ssl_options_verify_peer,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.verify = verify_peer
+ auth_ldap.ssl_options.fail_if_no_peer_cert = false",
+ [{rabbitmq_auth_backend_ldap,
+ [{use_ssl, true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_password,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.password = t0p$3kRe7",
+ [{rabbitmq_auth_backend_ldap,
+ [{use_ssl, true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {password,"t0p$3kRe7"}]}]}],
+ []},
+ {ssl_options_tls_versions,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.versions.tls1_2 = tlsv1.2
+ auth_ldap.ssl_options.versions.tls1_1 = tlsv1.1",
+ [],
+ [{rabbitmq_auth_backend_ldap,
+ [{ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {versions,['tlsv1.2','tlsv1.1']}]},
+ {use_ssl, true}]}],
+ []},
+ {ssl_options_depth,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.depth = 2
+ auth_ldap.ssl_options.verify = verify_peer
+ auth_ldap.ssl_options.fail_if_no_peer_cert = false",
+ [{rabbitmq_auth_backend_ldap,
+ [{use_ssl, true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}]}],
+ []},
+ {ssl_options_honor_cipher_order,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.depth = 2
+ auth_ldap.ssl_options.verify = verify_peer
+ auth_ldap.ssl_options.fail_if_no_peer_cert = false
+ auth_ldap.ssl_options.honor_cipher_order = true",
+ [{rabbitmq_auth_backend_ldap,
+ [{use_ssl, true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}]}],
+ []},
+ {ssl_options_honor_ecc_order,
+ "auth_ldap.use_ssl = true
+ auth_ldap.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ auth_ldap.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ auth_ldap.ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ auth_ldap.ssl_options.depth = 2
+ auth_ldap.ssl_options.verify = verify_peer
+ auth_ldap.ssl_options.fail_if_no_peer_cert = false
+ auth_ldap.ssl_options.honor_ecc_order = true",
+ [{rabbitmq_auth_backend_ldap,
+ [{use_ssl, true},
+ {ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth,2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_ecc_order, true}]}]}],
+ []}
+
+].
diff --git a/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl b/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl
new file mode 100644
index 0000000000..d881250040
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl
@@ -0,0 +1,201 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ldap_seed).
+
+-include_lib("eldap/include/eldap.hrl").
+
+-export([seed/1,delete/1]).
+
+seed(Logon) ->
+ H = connect(Logon),
+ ok = add(H, rabbitmq_com()),
+ ok = add(H, ou("people")),
+ [ add(H, P) || P <- people() ],
+ ok = add(H, ou("vhosts")),
+ ok = add(H, test()),
+ ok = add(H, ou("groups")),
+ [ add(H, P) || P <- groups() ],
+ eldap:close(H),
+ ok.
+
+rabbitmq_com() ->
+ {"dc=rabbitmq,dc=com",
+ [{"objectClass", ["dcObject", "organization"]},
+ {"dc", ["rabbitmq"]},
+ {"o", ["Test"]}]}.
+
+
+delete(Logon) ->
+ H = connect(Logon),
+ eldap:delete(H, "ou=test,dc=rabbitmq,dc=com"),
+ eldap:delete(H, "ou=test,ou=vhosts,dc=rabbitmq,dc=com"),
+ eldap:delete(H, "ou=vhosts,dc=rabbitmq,dc=com"),
+ [ eldap:delete(H, P) || {P, _} <- groups() ],
+ [ eldap:delete(H, P) || {P, _} <- people() ],
+ eldap:delete(H, "ou=groups,dc=rabbitmq,dc=com"),
+ eldap:delete(H, "ou=people,dc=rabbitmq,dc=com"),
+ eldap:delete(H, "dc=rabbitmq,dc=com"),
+ eldap:close(H),
+ ok.
+
+people() ->
+ [ bob(),
+ dominic(),
+ charlie(),
+ edward(),
+ johndoe(),
+ alice(),
+ peter(),
+ carol(),
+ jimmy()
+ ].
+
+groups() ->
+ [wheel_group(),
+ people_group(),
+ staff_group(),
+ bobs_group(),
+ bobs2_group(),
+ admins_group()
+ ].
+
+wheel_group() ->
+ {A, _} = alice(),
+ {C, _} = charlie(),
+ {D, _} = dominic(),
+ {P, _} = peter(),
+ {"cn=wheel,ou=groups,dc=rabbitmq,dc=com",
+ [{"objectClass", ["groupOfNames"]},
+ {"cn", ["wheel"]},
+ {"member", [A, C, D, P]}]}.
+
+people_group() ->
+ {C, _} = charlie(),
+ {D, _} = dominic(),
+ {P, _} = peter(),
+ {"cn=people,ou=groups,dc=rabbitmq,dc=com",
+ [{"objectClass", ["groupOfNames"]},
+ {"cn", ["people"]},
+ {"member", [C, D, P]}]}.
+
+staff_group() ->
+ {C, _} = charlie(),
+ {D, _} = dominic(),
+ {P, _} = peter(),
+ {"cn=staff,ou=groups,dc=rabbitmq,dc=com",
+ [{"objectClass", ["groupOfNames"]},
+ {"cn", ["people"]},
+ {"member", [C, D, P]}]}.
+
+bobs_group() ->
+ {B, _} = bob(),
+ {"cn=bobs,ou=groups,dc=rabbitmq,dc=com",
+ [{"objectClass", ["groupOfNames"]},
+ {"cn", ["bobs"]},
+ {"member", [B]}]}.
+
+bobs2_group() ->
+ {B, _} = bobs_group(),
+ {"cn=bobs2,ou=groups,dc=rabbitmq,dc=com",
+ [{"objectClass", ["groupOfNames"]},
+ {"cn", ["bobs2"]},
+ {"member", [B]}]}.
+
+admins_group() ->
+ {B, _} = bobs2_group(),
+ {W, _} = wheel_group(),
+ {"cn=admins,ou=groups,dc=rabbitmq,dc=com",
+ [{"objectClass", ["groupOfNames"]},
+ {"cn", ["admins"]},
+ {"member", [B, W]}]}.
+
+person(Cn, Sn) ->
+ {"cn="++Cn++",ou=people,dc=rabbitmq,dc=com",
+ [{"objectClass", ["person"]},
+ {"cn", [Cn]},
+ {"sn", [Sn]},
+ {"userPassword", ["password"]}]}.
+
+bob() -> person("Bob", "Robert").
+dominic() -> person("Dominic", "Dom").
+charlie() -> person("Charlie", "Charlie Boy").
+edward() -> person("Edward", "Ed").
+johndoe() -> person("John Doe", "Doe").
+
+alice() ->
+ {"cn=Alice,ou=people,dc=rabbitmq,dc=com",
+ [{"objectClass", ["person"]},
+ {"cn", ["Alice"]},
+ {"sn", ["Ali"]},
+ {"userPassword", ["password"]},
+ {"description", ["can-declare-queues"]}]}.
+
+peter() ->
+ {"uid=peter,ou=people,dc=rabbitmq,dc=com",
+ [{"cn", ["Peter"]},
+ {"givenName", ["Peter"]},
+ {"sn", ["Jones"]},
+ {"uid", ["peter"]},
+ {"uidNumber", ["5000"]},
+ {"gidNumber", ["10000"]},
+ {"homeDirectory", ["/home/peter"]},
+ {"mail", ["peter.jones@rabbitmq.com"]},
+ {"objectClass", ["top",
+ "posixAccount",
+ "shadowAccount",
+ "inetOrgPerson",
+ "organizationalPerson",
+ "person"]},
+ {"loginShell", ["/bin/bash"]},
+ {"userPassword", ["password"]},
+ {"memberOf", ["cn=wheel,ou=groups,dc=rabbitmq,dc=com",
+ "cn=staff,ou=groups,dc=rabbitmq,dc=com",
+ "cn=people,ou=groups,dc=rabbitmq,dc=com"]}]}.
+
+carol() ->
+ {"uid=carol,ou=people,dc=rabbitmq,dc=com",
+ [{"cn", ["Carol"]},
+ {"givenName", ["Carol"]},
+ {"sn", ["Meyers"]},
+ {"uid", ["peter"]},
+ {"uidNumber", ["655"]},
+ {"gidNumber", ["10000"]},
+ {"homeDirectory", ["/home/carol"]},
+ {"mail", ["carol.meyers@example.com"]},
+ {"objectClass", ["top",
+ "posixAccount",
+ "shadowAccount",
+ "inetOrgPerson",
+ "organizationalPerson",
+ "person"]},
+ {"loginShell", ["/bin/bash"]},
+ {"userPassword", ["password"]}]}.
+
+% rabbitmq/rabbitmq-auth-backend-ldap#100
+jimmy() ->
+ {"cn=Jimmy,ou=people,dc=rabbitmq,dc=com",
+ [{"objectClass", ["person"]},
+ {"cn", ["Jimmy"]},
+ {"sn", ["Makes"]},
+ {"userPassword", ["password"]},
+ {"description", ["^RMQ-foobar", "^RMQ-.*$"]}]}.
+
+add(H, {A, B}) ->
+ ok = eldap:add(H, A, B).
+
+connect({Host, Port}) ->
+ {ok, H} = eldap:open([Host], [{port, Port}]),
+ ok = eldap:simple_bind(H, "cn=admin,dc=rabbitmq,dc=com", "admin"),
+ H.
+
+ou(Name) ->
+ {"ou=" ++ Name ++ ",dc=rabbitmq,dc=com", [{"objectClass", ["organizationalUnit"]}, {"ou", [Name]}]}.
+
+test() ->
+ {"ou=test,ou=vhosts,dc=rabbitmq,dc=com", [{"objectClass", ["top", "organizationalUnit"]}, {"ou", ["test"]}]}.
+
diff --git a/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl b/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl
new file mode 100644
index 0000000000..34d692ab45
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl
@@ -0,0 +1,949 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(ALICE_NAME, "Alice").
+-define(BOB_NAME, "Bob").
+-define(CAROL_NAME, "Carol").
+-define(PETER_NAME, "Peter").
+-define(JIMMY_NAME, "Jimmy").
+
+-define(VHOST, "test").
+
+-define(ALICE, #amqp_params_network{username = <<?ALICE_NAME>>,
+ password = <<"password">>,
+ virtual_host = <<?VHOST>>}).
+
+-define(BOB, #amqp_params_network{username = <<?BOB_NAME>>,
+ password = <<"password">>,
+ virtual_host = <<?VHOST>>}).
+
+-define(CAROL, #amqp_params_network{username = <<?CAROL_NAME>>,
+ password = <<"password">>,
+ virtual_host = <<?VHOST>>}).
+
+-define(PETER, #amqp_params_network{username = <<?PETER_NAME>>,
+ password = <<"password">>,
+ virtual_host = <<?VHOST>>}).
+
+-define(JIMMY, #amqp_params_network{username = <<?JIMMY_NAME>>,
+ password = <<"password">>,
+ virtual_host = <<?VHOST>>}).
+
+-define(BASE_CONF_RABBIT, {rabbit, [{default_vhost, <<"test">>}]}).
+
+base_conf_ldap(LdapPort, IdleTimeout, PoolSize) ->
+ {rabbitmq_auth_backend_ldap, [{servers, ["localhost"]},
+ {user_dn_pattern, "cn=${username},ou=People,dc=rabbitmq,dc=com"},
+ {other_bind, anon},
+ {use_ssl, false},
+ {port, LdapPort},
+ {idle_timeout, IdleTimeout},
+ {pool_size, PoolSize},
+ {log, true},
+ {group_lookup_base, "ou=groups,dc=rabbitmq,dc=com"},
+ {vhost_access_query, vhost_access_query_base()},
+ {resource_access_query,
+ {for, [{resource, exchange,
+ {for, [{permission, configure,
+ {in_group, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}
+ },
+ {permission, write, {constant, true}},
+ {permission, read,
+ {match, {string, "${name}"},
+ {string, "^xch-${username}-.*"}}
+ }
+ ]}},
+ {resource, queue,
+ {for, [{permission, configure,
+ {match, {attribute, "${user_dn}", "description"},
+ {string, "can-declare-queues"}}
+ },
+ {permission, write, {constant, true}},
+ {permission, read,
+ {'or',
+ [{'and',
+ [{equals, "${name}", "test1"},
+ {equals, "${username}", "Alice"}]},
+ {'and',
+ [{equals, "${name}", "test2"},
+ {'not', {equals, "${username}", "Bob"}}]}
+ ]}}
+ ]}}
+ ]}},
+ {topic_access_query, topic_access_query_base()},
+ {tag_queries, [{monitor, {constant, true}},
+ {administrator, {constant, false}},
+ {management, {constant, false}}]}
+ ]}.
+
+%%--------------------------------------------------------------------
+
+all() ->
+ [
+ {group, non_parallel_tests},
+ {group, with_idle_timeout}
+ ].
+
+groups() ->
+ Tests = [
+ purge_connection,
+ ldap_only,
+ ldap_and_internal,
+ internal_followed_ldap_and_internal,
+ tag_attribution_ldap_only,
+ tag_attribution_ldap_and_internal,
+ tag_attribution_internal_followed_by_ldap_and_internal,
+ invalid_or_clause_ldap_only,
+ invalid_and_clause_ldap_only,
+ topic_authorisation_publishing_ldap_only,
+ topic_authorisation_consumption,
+ match_bidirectional,
+ match_bidirectional_gh_100
+ ],
+ [
+ {non_parallel_tests, [], Tests
+ },
+ {with_idle_timeout, [], [connections_closed_after_timeout | Tests]
+ }
+ ].
+
+suite() ->
+ [{timetrap, {minutes, 2}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [fun init_slapd/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, [fun stop_slapd/1]).
+
+init_per_group(Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group}
+ ]),
+ LdapPort = ?config(ldap_port, Config),
+ Config2 = rabbit_ct_helpers:merge_app_env(Config1, ?BASE_CONF_RABBIT),
+ Config3 = rabbit_ct_helpers:merge_app_env(Config2,
+ base_conf_ldap(LdapPort,
+ idle_timeout(Group),
+ pool_size(Group))),
+ Logon = {"localhost", LdapPort},
+ rabbit_ldap_seed:delete(Logon),
+ rabbit_ldap_seed:seed(Logon),
+ Config4 = rabbit_ct_helpers:set_config(Config3, {ldap_port, LdapPort}),
+
+ rabbit_ct_helpers:run_steps(Config4,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ldap_seed:delete({"localhost", ?config(ldap_port, Config)}),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_slapd(Config) ->
+ DataDir = ?config(data_dir, Config),
+ PrivDir = ?config(priv_dir, Config),
+ TcpPort = 25389,
+ SlapdDir = filename:join([PrivDir, "openldap"]),
+ InitSlapd = filename:join([DataDir, "init-slapd.sh"]),
+ Cmd = [InitSlapd, SlapdDir, {"~b", [TcpPort]}],
+ case rabbit_ct_helpers:exec(Cmd) of
+ {ok, Stdout} ->
+ {match, [SlapdPid]} = re:run(
+ Stdout,
+ "^SLAPD_PID=([0-9]+)$",
+ [{capture, all_but_first, list},
+ multiline]),
+ ct:pal(?LOW_IMPORTANCE,
+ "slapd(8) PID: ~s~nslapd(8) listening on: ~b",
+ [SlapdPid, TcpPort]),
+ rabbit_ct_helpers:set_config(Config,
+ [{slapd_pid, SlapdPid},
+ {ldap_port, TcpPort}]);
+ _ ->
+ _ = rabbit_ct_helpers:exec(["pkill", "-INT", "slapd"]),
+ {skip, "Failed to initialize slapd(8)"}
+ end.
+
+stop_slapd(Config) ->
+ SlapdPid = ?config(slapd_pid, Config),
+ Cmd = ["kill", "-INT", SlapdPid],
+ _ = rabbit_ct_helpers:exec(Cmd),
+ Config.
+
+idle_timeout(with_idle_timeout) -> 2000;
+idle_timeout(non_parallel_tests) -> infinity.
+
+pool_size(with_idle_timeout) -> 1;
+pool_size(non_parallel_tests) -> 10.
+
+init_internal(Config) ->
+ ok = control_action(Config, add_user, [?ALICE_NAME, ""]),
+ ok = control_action(Config, set_permissions, [?ALICE_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]),
+ ok = control_action(Config, set_user_tags, [?ALICE_NAME, "management", "foo"]),
+ ok = control_action(Config, add_user, [?BOB_NAME, ""]),
+ ok = control_action(Config, set_permissions, [?BOB_NAME, "^$", "^$", "^$"]),
+ ok = control_action(Config, add_user, [?PETER_NAME, ""]),
+ ok = control_action(Config, set_permissions, [?PETER_NAME, "^$", "^$", "^$"]).
+
+end_internal(Config) ->
+ ok = control_action(Config, delete_user, [?ALICE_NAME]),
+ ok = control_action(Config, delete_user, [?BOB_NAME]),
+ ok = control_action(Config, delete_user, [?PETER_NAME]).
+
+init_per_testcase(Testcase, Config)
+ when Testcase == ldap_and_internal;
+ Testcase == internal_followed_ldap_and_internal ->
+ init_internal(Config),
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config)
+ when Testcase == tag_attribution_ldap_and_internal;
+ Testcase == tag_attribution_internal_followed_by_ldap_and_internal ->
+ % back up tag queries
+ Cfg = case rabbit_ct_broker_helpers:rpc(Config, 0,
+ application,
+ get_env,
+ [rabbit_auth_backend_ldap, tag_queries]) of
+ undefined -> undefined;
+ {ok, X} -> X
+ end,
+ rabbit_ct_helpers:set_config(Config, {tag_queries_config, Cfg}),
+ internal_authorization_teardown(Config),
+ internal_authorization_setup(Config),
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config)
+ when Testcase == ldap_and_internal;
+ Testcase == internal_followed_ldap_and_internal ->
+ end_internal(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config)
+ when Testcase == tag_attribution_ldap_and_internal;
+ Testcase == tag_attribution_internal_followed_by_ldap_and_internal ->
+ % restore tag queries
+ Cfg = rabbit_ct_helpers:get_config(Config, tag_queries_config),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application,
+ set_env,
+ [rabbit_auth_backend_ldap, tag_queries, Cfg]),
+ internal_authorization_teardown(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(connections_closed_after_timeout, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application,
+ set_env,
+ [rabbitmq_auth_backend_ldap,
+ other_bind, anon]),
+ rabbit_ct_helpers:testcase_finished(Config, connections_closed_after_timeout);
+end_per_testcase(Testcase, Config)
+ when Testcase == invalid_or_clause_ldap_only;
+ Testcase == invalid_and_clause_ldap_only ->
+ set_env(Config, vhost_access_query_base_env()),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config)
+ when Testcase == topic_authorisation_publishing_ldap_only;
+ Testcase == topic_authorisation_consumption ->
+ set_env(Config, topic_access_query_base_env()),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+purge_connection(Config) ->
+ {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap,
+ user_login_authentication,
+ [<<?ALICE_NAME>>, []]),
+
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap,
+ purge_connections, []).
+
+connections_closed_after_timeout(Config) ->
+ {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap,
+ user_login_authentication,
+ [<<?ALICE_NAME>>, []]),
+
+ [_] = maps:to_list(get_ldap_connections(Config)),
+ ct:sleep(idle_timeout(with_idle_timeout) + 200),
+
+ %% There should be no connections after idle timeout
+ [] = maps:to_list(get_ldap_connections(Config)),
+
+ {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap,
+ user_login_authentication,
+ [<<?ALICE_NAME>>, []]),
+
+ ct:sleep(round(idle_timeout(with_idle_timeout)/2)),
+
+ %% Login with password opens different connection,
+ % so unauthorized connection will be closed
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application,
+ set_env,
+ [rabbitmq_auth_backend_ldap,
+ other_bind, as_user]),
+ {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap,
+ user_login_authentication,
+ [<<?ALICE_NAME>>,
+ [{password, <<"password">>}]]),
+
+ ct:sleep(round(idle_timeout(with_idle_timeout)/2)),
+
+ {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap,
+ user_login_authentication,
+ [<<?ALICE_NAME>>,
+ [{password, <<"password">>}]]),
+
+ ct:sleep(round(idle_timeout(with_idle_timeout)/2)),
+
+ [{Key, _Conn}] = maps:to_list(get_ldap_connections(Config)),
+
+ %% Key will be {IsAnon, Servers, Options}
+ %% IsAnon is false for password authorization
+ {false, _, _} = Key.
+
+
+get_ldap_connections(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_ldap, get_connections, []).
+
+ldap_only(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+ login(Config),
+ in_group(Config),
+ const(Config),
+ string_match(Config),
+ boolean_logic(Config),
+ tag_check(Config, [monitor]),
+ tag_check_subst(Config),
+ logging(Config),
+ ok.
+
+ldap_and_internal(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends,
+ [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]]),
+ login(Config),
+ permission_match(Config),
+ tag_check(Config, [monitor, management, foo]),
+ ok.
+
+internal_followed_ldap_and_internal(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends,
+ [rabbit_auth_backend_internal, {rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]]),
+ login(Config),
+ permission_match(Config),
+ tag_check(Config, [monitor, management, foo]),
+ ok.
+
+tag_attribution_ldap_only(Config) ->
+ set_env(Config, tag_query_configuration()),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+ tag_check(Config, <<"Edward">>, <<"password">>, [monitor, normal]).
+
+tag_attribution_ldap_and_internal(Config) ->
+ set_env(Config, tag_query_configuration()),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [{rabbit_auth_backend_ldap,
+ rabbit_auth_backend_internal}]]),
+ tag_check(Config, <<"Edward">>, <<"password">>,
+ [monitor, normal] ++ internal_authorization_tags()).
+
+tag_attribution_internal_followed_by_ldap_and_internal(Config) ->
+ set_env(Config, tag_query_configuration()),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_internal,
+ {rabbit_auth_backend_ldap,
+ rabbit_auth_backend_internal}]]),
+ tag_check(Config, <<"Edward">>, <<"password">>,
+ [monitor, normal] ++ internal_authorization_tags()).
+
+invalid_or_clause_ldap_only(Config) ->
+ set_env(Config, vhost_access_query_or_in_group()),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+ B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ {ok, C} = amqp_connection:start(B?ALICE),
+ ok = amqp_connection:close(C).
+
+invalid_and_clause_ldap_only(Config) ->
+ set_env(Config, vhost_access_query_and_in_group()),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+ B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ % NB: if the query crashes the ldap plugin it returns {error, access_refused}
+ % This may not be a reliable return value assertion
+ {error, not_allowed} = amqp_connection:start(B?ALICE).
+
+topic_authorisation_publishing_ldap_only(Config) ->
+ %% topic authorisation at publishing time is enforced in the AMQP channel
+ %% so it can be tested by sending messages
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+
+ %% default is to let pass
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ test_publish(P?ALICE, <<"amq.topic">>, <<"a.b.c">>, ok),
+
+ %% let pass for topic
+ set_env(Config, [{topic_access_query, {constant, true}}]),
+
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ test_publish(P?ALICE, <<"amq.topic">>, <<"a.b.c">>, ok),
+
+ %% check string substitution (on username)
+ set_env(Config, [{topic_access_query, {for, [{permission, write, {equals, "${username}", "Alice"}},
+ {permission, read, {constant, false}}
+ ]}}]),
+ test_publish(P?ALICE, <<"amq.topic">>, <<"a.b.c">>, ok),
+ test_publish(P?BOB, <<"amq.topic">>, <<"a.b.c">>, fail),
+
+ %% check string substitution on routing key (with regex)
+ set_env(Config, [{topic_access_query, {for, [{permission, write, {'and',
+ [{equals, "${username}", "Alice"},
+ {match, {string, "${routing_key}"}, {string, "^a"}}]
+ }},
+ {permission, read, {constant, false}}
+ ]}}]),
+ %% user and routing key OK
+ test_publish(P?ALICE, <<"amq.topic">>, <<"a.b.c">>, ok),
+ %% user and routing key OK
+ test_publish(P?ALICE, <<"amq.topic">>, <<"a.c">>, ok),
+ %% user OK, routing key KO, should fail
+ test_publish(P?ALICE, <<"amq.topic">>, <<"b.c">>, fail),
+ %% user KO, routing key OK, should fail
+ test_publish(P?BOB, <<"amq.topic">>, <<"a.b.c">>, fail),
+
+ ok.
+
+topic_authorisation_consumption(Config) ->
+ %% topic authorisation for consumption isn't enforced in AMQP
+ %% (it is in plugins like STOMP and MQTT, at subscription time)
+ %% so we directly test the LDAP backend, inside the broker
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_authorisation_consumption1, [Config]).
+
+topic_authorisation_consumption1(Config) ->
+ %% we can't use the LDAP backend record here, falling back to simple tuples
+ Alice = {auth_user,<<"Alice">>, [monitor],
+ {impl,"cn=Alice,ou=People,dc=rabbitmq,dc=com",<<"password">>}
+ },
+ Bob = {auth_user,<<"Bob">>, [monitor],
+ {impl,"cn=Bob,ou=People,dc=rabbitmq,dc=com",<<"password">>}
+ },
+ Resource = #resource{virtual_host = <<"/">>, name = <<"amq.topic">>, kind = topic},
+ Context = #{routing_key => <<"a.b">>,
+ variable_map => #{
+ <<"username">> => <<"guest">>,
+ <<"vhost">> => <<"other-vhost">>
+ }},
+ %% default is to let pass
+ true = rabbit_auth_backend_ldap:check_topic_access(Alice, Resource, read, Context),
+
+ %% let pass for topic
+ set_env(Config, [{topic_access_query, {for, [{permission, read, {constant, true}},
+ {permission, write, {constant, false}}]
+ }}]),
+
+ true = rabbit_auth_backend_ldap:check_topic_access(Alice, Resource, read, Context),
+
+ %% check string substitution (on username)
+ set_env(Config, [{topic_access_query, {for, [{permission, read, {equals, "${username}", "Alice"}},
+ {permission, write, {constant, false}}]
+ }}]),
+
+ true = rabbit_auth_backend_ldap:check_topic_access(Alice, Resource, read, Context),
+ false = rabbit_auth_backend_ldap:check_topic_access(Bob, Resource, read, Context),
+
+ %% check string substitution on routing key (with regex)
+ set_env(Config, [{topic_access_query, {for, [{permission, read, {'and',
+ [{equals, "${username}", "Alice"},
+ {match, {string, "${routing_key}"}, {string, "^a"}}]
+ }},
+ {permission, write, {constant, false}}]
+ }}]),
+ %% user and routing key OK
+ true = rabbit_auth_backend_ldap:check_topic_access(Alice, Resource, read, #{routing_key => <<"a.b.c">>}),
+ %% user and routing key OK
+ true = rabbit_auth_backend_ldap:check_topic_access(Alice, Resource, read, #{routing_key => <<"a.c">>}),
+ %% user OK, routing key KO, should fail
+ false = rabbit_auth_backend_ldap:check_topic_access(Alice, Resource, read, #{routing_key => <<"b.c">>}),
+ %% user KO, routing key OK, should fail
+ false = rabbit_auth_backend_ldap:check_topic_access(Bob, Resource, read, #{routing_key => <<"a.b.c">>}),
+ ok.
+
+match_bidirectional(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+
+ Configurations = [
+ fun resource_access_query_match/0,
+ fun resource_access_query_match_query_is_string/0,
+ fun resource_access_query_match_re_query_is_string/0,
+ fun resource_access_query_match_query_and_re_query_are_strings/0
+ ],
+
+ [begin
+ set_env(Config, ConfigurationFunction()),
+ Q1 = [#'queue.declare'{queue = <<"Alice-queue">>}],
+ Q2 = [#'queue.declare'{queue = <<"Ali">>}],
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ [test_resource(PTR) || PTR <- [{P?ALICE, Q1, ok},
+ {P?ALICE, Q2, fail}]]
+ end || ConfigurationFunction <- Configurations],
+ ok.
+
+match_bidirectional_gh_100(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+
+ Configurations = [
+ fun resource_access_query_match_gh_100/0,
+ fun resource_access_query_match_query_is_string_gh_100/0
+ ],
+
+ [begin
+ set_env(Config, ConfigurationFunction()),
+ Q1 = [#'queue.declare'{queue = <<"Jimmy-queue">>}],
+ Q2 = [#'queue.declare'{queue = <<"Jimmy">>}],
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ [test_resource(PTR) || PTR <- [{P?JIMMY, Q1, ok},
+ {P?JIMMY, Q2, ok}]]
+ end || ConfigurationFunction <- Configurations],
+ ok.
+
+%%--------------------------------------------------------------------
+
+test_publish(Person, Exchange, RoutingKey, ExpectedResult) ->
+ {ok, Connection} = amqp_connection:start(Person),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ ActualResult =
+ try
+ Publish = #'basic.publish'{exchange = Exchange, routing_key = RoutingKey},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+ amqp_channel:call(Channel, #'basic.qos'{prefetch_count = 0}),
+ ok
+ catch exit:_ -> fail
+ after
+ amqp_connection:close(Connection)
+ end,
+ ExpectedResult = ActualResult.
+
+login(Config) ->
+ lists:flatten(
+ [test_login(Config, {N, Env}, L, FilterList, case {LGood, EnvGood} of
+ {good, good} -> fun succ/1;
+ _ -> fun fail/1
+ end) ||
+ {LGood, FilterList, L, _Tags} <- logins(Config),
+ {N, {EnvGood, Env}} <- login_envs()]).
+
+logins(Config) -> logins_network(Config) ++ logins_direct(Config).
+
+%% Format for login tests, {Outcome, FilterList, Login, Tags}.
+%% Tests skipped for each login_env reference in FilterList.
+logins_network(Config) ->
+ B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ [{bad, [5, 6], B#amqp_params_network{}, []},
+ {bad, [5, 6], B#amqp_params_network{username = <<?ALICE_NAME>>}, []},
+ {bad, [5, 6], B#amqp_params_network{username = <<?ALICE_NAME>>,
+ password = <<"password">>}, []},
+ {bad, [5, 6], B#amqp_params_network{username = <<"Alice">>,
+ password = <<"Alicja">>,
+ virtual_host = <<?VHOST>>}, []},
+ {bad, [1, 2, 3, 4, 6, 7], B?CAROL, []},
+ {good, [5, 6], B?ALICE, []},
+ {good, [5, 6], B?BOB, []},
+ {good, [1, 2, 3, 4, 6, 7, 8], B?PETER, []}].
+
+logins_direct(Config) ->
+ N = #amqp_params_direct{node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)},
+ [{bad, [5], N#amqp_params_direct{}, []},
+ {bad, [5], N#amqp_params_direct{username = <<?ALICE_NAME>>}, []},
+ {bad, [5], N#amqp_params_direct{username = <<?ALICE_NAME>>,
+ password = <<"password">>}, [management]},
+ {good, [5], N#amqp_params_direct{username = <<?ALICE_NAME>>,
+ password = <<"password">>,
+ virtual_host = <<?VHOST>>}, [management]}].
+
+%% Format for login envs, {Reference, {Outcome, Env}}
+login_envs() ->
+ [{1, {good, base_login_env()}},
+ {2, {good, dn_lookup_pre_bind_env()}},
+ {3, {good, other_bind_admin_env()}},
+ {4, {good, other_bind_anon_env()}},
+ {5, {good, posix_vhost_access_multiattr_env()}},
+ {6, {good, tag_queries_subst_env()}},
+ {7, {bad, other_bind_broken_env()}},
+ {8, {good, vhost_access_query_nested_groups_env()}}].
+
+base_login_env() ->
+ [{user_dn_pattern, "cn=${username},ou=People,dc=rabbitmq,dc=com"},
+ {dn_lookup_attribute, none},
+ {dn_lookup_base, none},
+ {dn_lookup_bind, as_user},
+ {other_bind, as_user},
+ {tag_queries, [{monitor, {constant, true}},
+ {administrator, {constant, false}},
+ {management, {constant, false}}]},
+ {vhost_access_query, {exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"}},
+ {log, true}].
+
+%% TODO configure OpenLDAP to allow a dn_lookup_post_bind_env()
+dn_lookup_pre_bind_env() ->
+ [{user_dn_pattern, "${username}"},
+ {dn_lookup_attribute, "cn"},
+ {dn_lookup_base, "OU=People,DC=rabbitmq,DC=com"},
+ {dn_lookup_bind, {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+other_bind_admin_env() ->
+ [{other_bind, {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+other_bind_anon_env() ->
+ [{other_bind, anon}].
+
+other_bind_broken_env() ->
+ [{other_bind, {"cn=admin,dc=rabbitmq,dc=com", "admi"}}].
+
+tag_queries_subst_env() ->
+ [{tag_queries, [{administrator, {constant, false}},
+ {management,
+ {exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"}}]}].
+
+posix_vhost_access_multiattr_env() ->
+ [{user_dn_pattern, "uid=${username},ou=People,dc=rabbitmq,dc=com"},
+ {vhost_access_query,
+ {'and', [{exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"},
+ {equals,
+ {attribute, "${user_dn}","memberOf"},
+ {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}},
+ {equals,
+ {attribute, "${user_dn}","memberOf"},
+ {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"}},
+ {equals,
+ {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"},
+ {attribute,"${user_dn}","memberOf"}},
+ {equals,
+ {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"},
+ {attribute, "${user_dn}","memberOf"}},
+ {match,
+ {attribute, "${user_dn}","memberOf"},
+ {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}},
+ {match,
+ {attribute, "${user_dn}","memberOf"},
+ {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"}},
+ {match,
+ {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"},
+ {attribute, "${user_dn}","memberOf"}},
+ {match,
+ {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"},
+ {attribute, "${user_dn}","memberOf"}}
+ ]}}].
+
+vhost_access_query_or_in_group() ->
+ [{vhost_access_query,
+ {'or', [
+ {in_group, "cn=bananas,ou=groups,dc=rabbitmq,dc=com"},
+ {in_group, "cn=apples,ou=groups,dc=rabbitmq,dc=com"},
+ {in_group, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}
+ ]}}].
+
+vhost_access_query_and_in_group() ->
+ [{vhost_access_query,
+ {'and', [
+ {in_group, "cn=bananas,ou=groups,dc=rabbitmq,dc=com"},
+ {in_group, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}
+ ]}}].
+
+vhost_access_query_nested_groups_env() ->
+ [{vhost_access_query, {in_group_nested, "cn=admins,ou=groups,dc=rabbitmq,dc=com"}}].
+
+vhost_access_query_base_env() ->
+ [{vhost_access_query, vhost_access_query_base()}].
+
+vhost_access_query_base() ->
+ {exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"}.
+
+resource_access_query_match_gh_100() ->
+ [{resource_access_query,
+ {match, {string, "RMQ-${vhost}"}, {attribute, "${user_dn}", "description"}}
+ }].
+
+resource_access_query_match_query_is_string_gh_100() ->
+ [{resource_access_query,
+ {match, "RMQ-${vhost}", {attribute, "${user_dn}", "description"}}
+ }].
+
+resource_access_query_match() ->
+ [{resource_access_query, {match, {string, "${name}"},
+ {string, "^${username}-"}}
+ }].
+
+resource_access_query_match_query_is_string() ->
+ [{resource_access_query, {match, "${name}",
+ {string, "^${username}-"}}
+ }].
+
+resource_access_query_match_re_query_is_string() ->
+ [{resource_access_query, {match, {string, "${name}"},
+ "^${username}-"}
+ }].
+
+resource_access_query_match_query_and_re_query_are_strings() ->
+ [{resource_access_query, {match, "${name}",
+ "^${username}-"}
+ }].
+
+topic_access_query_base_env() ->
+ [{topic_access_query, topic_access_query_base()}].
+
+topic_access_query_base() ->
+ {constant, true}.
+
+test_login(Config, {N, Env}, Login, FilterList, ResultFun) ->
+ case lists:member(N, FilterList) of
+ true -> [];
+ _ ->
+ try
+ set_env(Config, Env),
+ ResultFun(Login)
+ after
+ set_env(Config, base_login_env())
+ end
+ end.
+
+rpc_set_env(Config, Args) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, Args).
+
+set_env(Config, Env) ->
+ [rpc_set_env(Config, [rabbitmq_auth_backend_ldap, K, V]) || {K, V} <- Env].
+
+succ(Login) ->
+ {ok, Pid} = amqp_connection:start(Login),
+ amqp_connection:close(Pid).
+fail(Login) -> ?assertMatch({error, _}, amqp_connection:start(Login)).
+
+%%--------------------------------------------------------------------
+
+in_group(Config) ->
+ X = [#'exchange.declare'{exchange = <<"test">>}],
+ B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ test_resources([{B?ALICE, X, ok},
+ {B?BOB, X, fail}]).
+
+const(Config) ->
+ Q = [#'queue.declare'{queue = <<"test">>}],
+ B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ test_resources([{B?ALICE, Q, ok},
+ {B?BOB, Q, fail}]).
+
+string_match(Config) ->
+ B = fun(N) ->
+ [#'exchange.declare'{exchange = N},
+ #'queue.declare'{queue = <<"test">>},
+ #'queue.bind'{exchange = N, queue = <<"test">>}]
+ end,
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ test_resources([{P?ALICE, B(<<"xch-Alice-abc123">>), ok},
+ {P?ALICE, B(<<"abc123">>), fail},
+ {P?ALICE, B(<<"xch-Someone Else-abc123">>), fail}]).
+
+boolean_logic(Config) ->
+ Q1 = [#'queue.declare'{queue = <<"test1">>},
+ #'basic.consume'{queue = <<"test1">>}],
+ Q2 = [#'queue.declare'{queue = <<"test2">>},
+ #'basic.consume'{queue = <<"test2">>}],
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ [test_resource(PTR) || PTR <- [{P?ALICE, Q1, ok},
+ {P?ALICE, Q2, ok},
+ {P?BOB, Q1, fail},
+ {P?BOB, Q2, fail}]].
+
+permission_match(Config) ->
+ B = fun(N) ->
+ [#'exchange.declare'{exchange = N},
+ #'queue.declare'{queue = <<"prefix-test">>},
+ #'queue.bind'{exchange = N, queue = <<"prefix-test">>}]
+ end,
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ test_resources([{P?ALICE, B(<<"prefix-abc123">>), ok},
+ {P?ALICE, B(<<"abc123">>), fail},
+ {P?ALICE, B(<<"xch-Alice-abc123">>), fail}]).
+
+%% Tag check tests, with substitution
+tag_check_subst(Config) ->
+ lists:flatten(
+ [test_tag_check(Config, tag_queries_subst_env(),
+ fun () -> tag_check(Config, Username, Password, VHost, Outcome, Tags) end) ||
+ {Outcome, _FilterList, #amqp_params_direct{username = Username,
+ password = Password,
+ virtual_host = VHost},
+ Tags} <- logins_direct(Config)]).
+
+%% Tag check
+tag_check(Config, Tags) ->
+ tag_check(Config, <<?ALICE_NAME>>, <<"password">>, Tags).
+
+tag_check(Config, Username, Password, Tags) ->
+ tag_check(Config, Username, Password, <<>>, good, Tags).
+
+tag_check(Config, Username, Password, VHost, Outcome, Tags)
+ when is_binary(Username), is_binary(Password), is_binary(VHost), is_list(Tags) ->
+ {ok, User} = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_access_control, check_user_login, [Username, [{password, Password}, {vhost, VHost}]]),
+ tag_check_outcome(Outcome, Tags, User);
+tag_check(_, _, _, _, _, _) -> fun() -> [] end.
+
+tag_check_outcome(good, Tags, User) -> ?assertEqual(Tags, User#user.tags);
+tag_check_outcome(bad, Tags, User) -> ?assertNotEqual(Tags, User#user.tags).
+
+test_tag_check(Config, Env, TagCheckFun) ->
+ try
+ set_env(Config, Env),
+ TagCheckFun()
+ after
+ set_env(Config, base_login_env())
+ end.
+
+tag_query_configuration() ->
+ [{tag_queries,
+ [{administrator, {constant, false}},
+ %% Query result for tag `management` is FALSE
+ %% because this object does NOT exist.
+ {management,
+ {exists, "cn=${username},ou=Faculty,dc=Computer Science,dc=Engineering"}},
+ {monitor, {constant, true}},
+ %% Query result for tag `normal` is TRUE because
+ %% this object exists.
+ {normal,
+ {exists, "cn=${username},ou=people,dc=rabbitmq,dc=com"}}]}].
+
+internal_authorization_setup(Config) ->
+ ok = control_action(Config, add_user, ["Edward", ""]),
+ ok = control_action(Config, set_user_tags, ["Edward"] ++
+ [ atom_to_list(T) || T <- internal_authorization_tags() ]).
+
+internal_authorization_teardown(Config) ->
+ control_action(Config, delete_user, ["Edward"]).
+
+internal_authorization_tags() ->
+ [foo, bar].
+
+%% Logging tests, triggered within 'test_login/4'
+logging(Config) ->
+ lists:flatten(
+ [test_login(Config, {N, Env}, L, FilterList, case {LGood, EnvGood} of
+ {good, good} -> fun succ/1;
+ _ -> fun fail/1
+ end) ||
+ {LGood, FilterList, L} <- logging_test_users(Config),
+ {N, {EnvGood, Env}} <- logging_envs()]).
+
+%% Format for logging tests, {Outcome, FilterList, Login}.
+logging_test_users(Config) ->
+ P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+ [{bad, [], P#amqp_params_network{username = <<?ALICE_NAME>>}},
+ {good, [], P?ALICE}].
+
+logging_envs() ->
+ [{1, {good, scrub_bind_creds_env()}},
+ {2, {good, display_bind_creds_env()}},
+ {3, {bad, scrub_bind_single_cred_env()}},
+ {4, {bad, scrub_bind_creds_no_equals_env()}},
+ {5, {bad, scrub_bind_creds_no_seperator_env()}}].
+
+scrub_bind_creds_env() ->
+ [{log, network},
+ {other_bind, {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+display_bind_creds_env() ->
+ [{log, network_unsafe},
+ {other_bind, {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+scrub_bind_single_cred_env() ->
+ [{log, network},
+ {other_bind, {"dc=com", "admin"}}].
+
+scrub_bind_creds_no_equals_env() ->
+ [{log, network},
+ {other_bind, {"cn*admin,dc>rabbitmq,dc&com", "admin"}}].
+
+scrub_bind_creds_no_seperator_env() ->
+ [{log, network},
+ {other_bind, {"cn=admindc=rabbitmqdc&com", "admin"}}].
+
+%%--------------------------------------------------------------------
+
+test_resources(PTRs) -> [test_resource(PTR) || PTR <- PTRs].
+
+test_resource({Person, Things, Result}) ->
+ {ok, Conn} = amqp_connection:start(Person),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ ?assertEqual(Result,
+ try
+ [amqp_channel:call(Ch, T) || T <- Things],
+ ok
+ catch exit:_ -> fail
+ after
+ amqp_connection:close(Conn)
+ end).
+
+control_action(Config, Command, Args) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ control_action(Config, Command, Node, Args, default_options()).
+
+control_action(Config, Command, Args, NewOpts) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ control_action(Config, Command, Node, Args,
+ expand_options(default_options(), NewOpts)).
+
+control_action(_Config, Command, Node, Args, Opts) ->
+ case rabbit_control_helper:command(Command, Node, Args, Opts) of
+ ok ->
+ io:format("done.~n"),
+ ok;
+ Other ->
+ io:format("failed.~n"),
+ Other
+ end.
+
+default_options() -> [{"-p", ?VHOST}, {"-q", "false"}].
+
+expand_options(As, Bs) ->
+ lists:foldl(fun({K, _}=A, R) ->
+ case proplists:is_defined(K, R) of
+ true -> R;
+ false -> [A | R]
+ end
+ end, Bs, As).
+
diff --git a/deps/rabbitmq_auth_backend_ldap/test/system_SUITE_data/init-slapd.sh b/deps/rabbitmq_auth_backend_ldap/test/system_SUITE_data/init-slapd.sh
new file mode 100755
index 0000000000..c1319898b2
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/system_SUITE_data/init-slapd.sh
@@ -0,0 +1,98 @@
+#!/bin/sh
+# vim:sw=4:et:
+
+set -ex
+
+slapd_data_dir=$1
+tcp_port=$2
+
+pidfile="$slapd_data_dir/slapd.pid"
+uri="ldap://localhost:$tcp_port"
+
+binddn="cn=config"
+passwd=secret
+
+case "$(uname -s)" in
+ Linux)
+ slapd=/usr/sbin/slapd
+ modulepath=/usr/lib/ldap
+ schema_dir=/etc/ldap/schema
+ ;;
+ FreeBSD)
+ slapd=/usr/local/libexec/slapd
+ modulepath=/usr/local/libexec/openldap
+ schema_dir=/usr/local/etc/openldap/schema
+ ;;
+ *)
+ exit 1
+ ;;
+esac
+
+# --------------------------------------------------------------------
+# slapd(8) configuration + start
+# --------------------------------------------------------------------
+
+rm -rf "$slapd_data_dir"
+mkdir -p "$slapd_data_dir"
+
+conf_file=$slapd_data_dir/slapd.conf
+cat <<EOF > "$conf_file"
+include $schema_dir/core.schema
+include $schema_dir/cosine.schema
+include $schema_dir/nis.schema
+include $schema_dir/inetorgperson.schema
+pidfile $pidfile
+modulepath $modulepath
+loglevel 7
+
+database config
+rootdn "$binddn"
+rootpw $passwd
+EOF
+
+cat "$conf_file"
+
+conf_dir=$slapd_data_dir/slapd.d
+mkdir -p "$conf_dir"
+
+# Start slapd(8).
+"$slapd" \
+ -f "$conf_file" \
+ -F "$conf_dir" \
+ -h "$uri"
+
+auth="-x -D $binddn -w $passwd"
+
+# We wait for the server to start.
+for seconds in 1 2 3 4 5 6 7 8 9 10; do
+ ldapsearch $auth -H "$uri" -LLL -b cn=config dn && break;
+ sleep 1
+done
+
+# --------------------------------------------------------------------
+# Load the example LDIFs for the testsuite.
+# --------------------------------------------------------------------
+
+script_dir=$(cd "$(dirname "$0")" && pwd)
+example_ldif_dir="$script_dir/../../example"
+example_data_dir="$slapd_data_dir/example"
+mkdir -p "$example_data_dir"
+
+# We update the hard-coded database directory with the one we computed
+# here, so the data is located inside the test directory.
+sed -E -e "s,^olcDbDirectory:.*,olcDbDirectory: $example_data_dir," \
+ < "$example_ldif_dir/global.ldif" | \
+ ldapadd $auth -H "$uri"
+
+# We remove the module path from the example LDIF as it was already
+# configured.
+sed -E -e "s,^olcModulePath:.*,olcModulePath: $modulepath," \
+ < "$example_ldif_dir/memberof_init.ldif" | \
+ ldapadd $auth -H "$uri"
+
+ldapmodify $auth -H "$uri" -f "$example_ldif_dir/refint_1.ldif"
+ldapadd $auth -H "$uri" -f "$example_ldif_dir/refint_2.ldif"
+
+ldapsearch $auth -H "$uri" -LLL -b cn=config dn
+
+echo SLAPD_PID=$(cat "$pidfile")
diff --git a/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl
new file mode 100644
index 0000000000..af318615f8
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile([export_all]).
+
+all() ->
+ [
+ fill,
+ ad_fill
+ ].
+
+fill(_Config) ->
+ F = fun(Fmt, Args, Res) ->
+ ?assertEqual(Res, rabbit_auth_backend_ldap_util:fill(Fmt, Args))
+ end,
+ F("x${username}x", [{username, "ab"}], "xabx"),
+ F("x${username}x", [{username, ab}], "xabx"),
+ F("x${username}x", [{username, <<"ab">>}], "xabx"),
+ F("x${username}x", [{username, ""}], "xx"),
+ F("x${username}x", [{fusername, "ab"}], "x${username}x"),
+ F("x${usernamex", [{username, "ab"}], "x${usernamex"),
+ F("x${username}x", [{username, "a\\b"}], "xa\\bx"),
+ F("x${username}x", [{username, "a&b"}], "xa&bx"),
+ ok.
+
+ad_fill(_Config) ->
+ F = fun(Fmt, Args, Res) ->
+ ?assertEqual(Res, rabbit_auth_backend_ldap_util:fill(Fmt, Args))
+ end,
+
+ U0 = <<"ADDomain\\ADUser">>,
+ A0 = rabbit_auth_backend_ldap_util:get_active_directory_args(U0),
+ F("x-${ad_domain}-x-${ad_user}-x", A0, "x-ADDomain-x-ADUser-x"),
+
+ U1 = <<"ADDomain\\ADUser\\Extra">>,
+ A1 = rabbit_auth_backend_ldap_util:get_active_directory_args(U1),
+ F("x-${ad_domain}-x-${ad_user}-x", A1, "x-ADDomain-x-ADUser\\Extra-x"),
+ ok.
diff --git a/deps/rabbitmq_auth_backend_oauth2/.gitignore b/deps/rabbitmq_auth_backend_oauth2/.gitignore
new file mode 100644
index 0000000000..895b779b8d
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_auth_backend_oauth2.d
diff --git a/deps/rabbitmq_auth_backend_oauth2/.travis.yml b/deps/rabbitmq_auth_backend_oauth2/.travis.yml
new file mode 100644
index 0000000000..06864faf4f
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/.travis.yml
@@ -0,0 +1,59 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_auth_backend_oauth2/CODE_OF_CONDUCT.md b/deps/rabbitmq_auth_backend_oauth2/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_auth_backend_oauth2/CONTRIBUTING.md b/deps/rabbitmq_auth_backend_oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000000..b50bd82900
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/CONTRIBUTING.md
@@ -0,0 +1,103 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+ make tests
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_auth_backend_oauth2/LICENSE b/deps/rabbitmq_auth_backend_oauth2/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_auth_backend_oauth2/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_auth_backend_oauth2/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_auth_backend_oauth2/Makefile b/deps/rabbitmq_auth_backend_oauth2/Makefile
new file mode 100644
index 0000000000..a80e608da2
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/Makefile
@@ -0,0 +1,20 @@
+PROJECT = rabbitmq_auth_backend_oauth2
+PROJECT_DESCRIPTION = OAuth 2 and JWT-based AuthN and AuthZ backend
+
+BUILD_DEPS = rabbit_common
+DEPS = rabbit cowlib jose
+TEST_DEPS = cowboy rabbitmq_web_dispatch rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+dep_jose = hex 1.10.1
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_auth_backend_oauth2/README.md b/deps/rabbitmq_auth_backend_oauth2/README.md
new file mode 100644
index 0000000000..d98d259779
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/README.md
@@ -0,0 +1,280 @@
+# OAuth 2.0 (JWT) Token Authorisation Backend for RabbitMQ
+
+[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-auth-backend-oauth2.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-auth-backend-oauth2)
+
+This [RabbitMQ authentication/authorisation backend](https://www.rabbitmq.com/access-control.html) plugin lets applications (clients)
+and users authenticate and authorize using JWT-encoded [OAuth 2.0 access tokens](https://tools.ietf.org/html/rfc6749#section-1.4).
+
+It is not specific to but developed against [Cloud Foundry UAA](https://github.com/cloudfoundry/uaa).
+
+An OAuth 2.0 primer is available [elsewhere on the Web](https://auth0.com/blog/oauth2-the-complete-guide/).
+
+
+## Supported RabbitMQ Versions
+
+The plugin targets and ships with RabbitMQ 3.8. Like all RabbitMQ [plugins](https://www.rabbitmq.com/plugins.html), it must be enabled before it can be used:
+
+``` shell
+rabbitmq-plugins enable rabbitmq_auth_backend_oauth2
+```
+
+
+## How it Works
+
+### Authorization Workflow
+
+This plugin does not communicate with an UAA server. It decodes an access token provided by
+the client and authorises a user based on the data stored in the token.
+
+The token can be any [JWT token](https://jwt.io/introduction/) which
+contains the `scope` and `aud` fields. The way the token was
+retrieved (such as what grant type was used) is outside of the scope
+of this plugin.
+
+### Prerequisites
+
+To use this plugin
+
+1. UAA should be configured to produce encrypted JWT tokens containing a set of RabbitMQ permission scopes
+2. All RabbitMQ nodes must be [configured to use the `rabbit_auth_backend_oauth2` backend](https://www.rabbitmq.com/access-control.html)
+3. All RabbitMQ nodes must be configure with a resource service ID (`resource_server_id`) that matches the scope prefix (e.g. `rabbitmq` in `rabbitmq.read:*/*`).
+
+### Authorization Flow
+
+1. Client authorize with OAuth 2.0 provider, requesting an `access_token` (using any grant type desired)
+2. Token scope returned by OAuth 2.0 provider must include RabbitMQ resource scopes that follow a convention used by this plugin: `configure:%2F/foo` means "configure permissions for 'foo' in vhost '/'")
+3. Client passes the token as password when connecting to a RabbitMQ node. **The username field is ignored**.
+4. The translated permissions are stored as part of the authenticated connection state and used the same
+ way permissions from RabbitMQ's internal database would be used.
+
+
+## Usage
+
+The plugin needs a UAA signing key to be configured in order to decrypt and verify client-provided tokens.
+To get the signing key from a running UAA node, use the
+[token_key endpoint](https://docs.cloudfoundry.org/api/uaa/version/4.6.0/index.html#token-key-s)
+or [uaac](https://github.com/cloudfoundry/cf-uaac) (the `uaac signing key` command).
+
+The following fields are required: `kty`, `value`, `alg`, and `kid`.
+
+Assuming UAA reports the following signing key information:
+
+```
+uaac signing key
+ kty: RSA
+ e: AQAB
+ use: sig
+ kid: a-key-ID
+ alg: RS256
+ value: -----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2dP+vRn+Kj+S/oGd49kq
+6+CKNAduCC1raLfTH7B3qjmZYm45yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhK
+IdcIWadhqDzdtn1hj/22iUwrhH0bd475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2
+B9q9KFBmo4Ahh/6+d4wM1rH9kxl0RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF
+2cr3wQwCfF1qVu4eAVNVfxfy/uEvG3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgG
+QAvkknWitpRK8KVLypEj5WKej6CF8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7
+VwIDAQAB
+-----END PUBLIC KEY-----
+ n: ANnT_r0Z_io_kv6BnePZKuvgijQHbggta2i30x-wd6o5mWJuOcg5fl5oCvQjZh15IaPar5oXZLHcw1bHXg5YSiHXCFmnYag83bZ9YY_9tolMK4R9G3eO-YZSnLImfqMv7HYBoAM75pk0JnTKhF6ldgfavShQZqOAIYf-vneMDNax_ZMZdEbzACi3vnWqCByI6JPIQju
+ HCkEBMPxKwXuEhdnK98EMAnxdalbuHgFTVX8X8v7hLxt0O8dNOT903CvkHGICcWr95YnLUouXcli4BkAL5JJ1oraUSvClS8qRI-Vino-ghfJ6t9LrZ9eRUINCZB6Ks8Igqqnnp_BiD7XiO1c
+```
+
+it will translate into the following configuration (in the [advanced RabbitMQ config format](https://www.rabbitmq.com/configure.html)):
+
+```erlang
+[
+ %% ...
+ %% backend configuration
+ {rabbitmq_auth_backend_oauth2, [
+ {resource_server_id, <<"my_rabbit_server">>},
+ %% UAA signing key configuration
+ {key_config, [
+ {signing_keys, #{
+ <<"a-key-ID">> => {pem, <<"-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2dP+vRn+Kj+S/oGd49kq
+6+CKNAduCC1raLfTH7B3qjmZYm45yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhK
+IdcIWadhqDzdtn1hj/22iUwrhH0bd475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2
+B9q9KFBmo4Ahh/6+d4wM1rH9kxl0RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF
+2cr3wQwCfF1qVu4eAVNVfxfy/uEvG3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgG
+QAvkknWitpRK8KVLypEj5WKej6CF8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7
+VwIDAQAB
+-----END PUBLIC KEY-----">>}
+ }}
+ ]}
+ ]}
+].
+```
+
+If a symmetric key is used, the configuration will look like this:
+
+```erlang
+[
+ {rabbitmq_auth_backend_oauth2, [
+ {resource_server_id, <<"my_rabbit_server">>},
+ {key_config, [
+ {signing_keys, #{
+ <<"a-key-ID">> => {map, #{<<"kty">> => <<"MAC">>,
+ <<"alg">> => <<"HS256">>,
+ <<"value">> => <<"my_signing_key">>}}
+ }}
+ ]}
+ ]},
+].
+```
+
+### Resource Server ID and Scope Prefixes
+
+OAuth 2.0 (and thus UAA-provided) tokens use scopes to communicate what set of permissions particular
+client has been granted. The scopes are free form strings.
+
+`resource_server_id` is a prefix used for scopes in UAA to avoid scope collisions (or unintended overlap).
+It is an empty string by default.
+
+### Scope-to-Permission Translation
+
+Scopes are translated into permission grants to RabbitMQ resources for the provided token.
+
+The current scope format is `<permission>:<vhost_pattern>/<name_pattern>[/<routing_key_pattern>]` where
+
+ * `<permission>` is an access permission (`configure`, `read`, or `write`)
+ * `<vhost_pattern>` is a wildcard pattern for vhosts token has access to.
+ * `<name_pattern>` is a wildcard pattern for resource name
+ * `<routing_key_pattern>` is an optional wildcard pattern for routing key in topic authorization
+
+Wildcard patterns are strings with optional wildcard symbols `*` that match
+any sequence of characters.
+
+Wildcard patterns match as following:
+
+ * `*` matches any string
+ * `foo*` matches any string starting with a `foo`
+ * `*foo` matches any string ending with a `foo`
+ * `foo*bar` matches any string starting with a `foo` and ending with a `bar`
+
+There can be multiple wildcards in a pattern:
+
+ * `start*middle*end`
+ * `*before*after*`
+
+**To use special characters like `*`, `%`, or `/` in a wildcard pattern,
+the pattern must be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).**
+
+These are the typical permissions examples:
+
+- `read:*/*`(`read:*/*/*`) - read permissions to any resource on any vhost
+- `write:*/*`(`write:*/*/*`) - write permissions to any resource on any vhost
+- `read:vhost1/*`(`read:vhost1/*/*`) - read permissions to any resource on the `vhost1` vhost
+- `read:vhost1/some*` - read permissions to all the resources, starting with `some` on the `vhost1` vhost
+- `write:vhsot1/some*/routing*` - topic write permissions to publish to an exchange starting with `some` with a routing key starting with `routing`
+
+See the [wildcard matching test suite](./test/wildcard_match_SUITE.erl) and [scopes test suite](./test/scope_SUITE.erl) for more examples.
+
+Scopes should be prefixed with `resource_server_id`. For example,
+if `resource_server_id` is "my_rabbit", a scope to enable read from any vhost will
+be `my_rabbit.read:*/*`.
+
+### Using a different token field for the Scope
+
+By default the plugin will look for the `scope` key in the token, you can configure the plugin to also look in other fields using the `additional_rabbitmq_scopes` setting.
+
+```erlang
+[
+ {rabbitmq_auth_backend_oauth2, [
+ {resource_server_id, <<"my_rabbit_server">>},
+ {additional_rabbitmq_scopes, <<"my_custom_scope_key">>},
+ ...
+ ]}
+ ]},
+].
+```
+
+### Using Tokens with Clients
+
+A client must present a valid `access_token` acquired from an OAuth 2.0 provider (UAA) as the **password**
+in order to authenticate with RabbitMQ.
+
+To learn more about UAA/OAuth 2.0 clients see [UAA docs](https://github.com/cloudfoundry/uaa/blob/master/docs/UAA-APIs.rst#id73).
+
+### Scope and Tags
+
+Users in RabbitMQ can have [tags associated with them](https://www.rabbitmq.com/access-control.html#user-tags).
+Tags are used to [control access to the management plugin](https://www.rabbitmq.com/management.html#permissions).
+
+
+In the OAuth context, tags can be added as part of the scope, using a format like `<resource_server_id>.tag:<tag>`. For
+example, if `resource_server_id` is "my_rabbit", a scope to grant access to the management plugin with
+the `monitoring` tag will be `my_rabbit.tag:monitoring`.
+
+## Examples
+
+The [demo](/demo) directory contains example configuration files which can be used to set up
+a development UAA server and issue tokens, which can be used to access RabbitMQ
+resources.
+
+### UAA and RabbitMQ Config Files
+
+To run the demo you need to have a [UAA](https://github.com/cloudfoundry/uaa) node
+installed or built from source.
+
+To make UAA use a particular config file, such as those provided in the demo directory,
+export the `CLOUDFOUNDRY_CONFIG_PATH` environment variable. For example, to use symmetric keys,
+see the UAA config files under the `demo/symmetric_keys` directory.
+
+`demo/symmetric_keys/rabbit.config` contains a RabbitMQ configuration file that
+sets up a matching signing key on the RabbitMQ end.
+
+### Running UAA
+
+To run UAA with a custom config file path, use the following from the UAA git repository:
+
+```
+CLOUDFOUNDRY_CONFIG_PATH=<path_to_plugin>/demo/symmetric_keys ./gradlew run
+```
+
+### Running RabbitMQ
+
+```
+RABBITMQ_CONFIG_FILE=<path_to_plugin>/demo/symmetric_keys/rabbitmq rabbitmq-server
+## Or to run from source from the plugin directory
+make run-broker RABBITMQ_CONFIG_FILE=demo/symmetric_keys/rabbitmq
+```
+
+The `rabbitmq_auth_backend_oauth2` plugin must be enabled on the RabbitMQ node.
+
+### Asymmetric Key Example
+
+To use an RSA (asymmetric) key, you can set `CLOUDFOUNDRY_CONFIG_PATH` to `demo/rsa_keys`.
+This directory also contains `rabbit.config` file, as well as a public key (`public_key.pem`)
+which will be used for signature verification.
+
+### UAA User and Permission Management
+
+UAA sets scopes from client scopes and user groups. The demo uses groups to set up
+a set of RabbitMQ permissions scopes.
+
+The `demo/setup.sh` script can be used to configure a demo user and groups.
+The script will also create RabbitMQ resources associated with permissions.
+The script uses `uaac` and `bunny` (RabbitMQ client) and requires them to be installed.
+
+When running the script, UAA server and RabbitMQ server should be running.
+You should configure `UAA_HOST` (localhost:8080/uaa for local machine) and
+`RABBITMQCTL` (a path to `rabbitmqctl` script) environment variables to run this script.
+
+```
+gem install cf-uaac
+gem install bunny
+RABBITMQCTL=<path_to_rabbitmqctl> demo/setup.sh
+```
+
+Please refer to `demo/setup.sh` to get more info about configuring UAA permissions.
+
+The script will return access tokens which can be used to authenticate and authorise
+in RabbitMQ. When connecting, pass the token in the **password** field. The username
+field will be ignored as long as the token provides a client ID.
+
+
+## License and Copyright
+
+(c) 2016-2020 VMware, Inc. or its affiliates.
+
+Released under the Mozilla Public License 2.0, same as RabbitMQ.
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/declare_queues.rb b/deps/rabbitmq_auth_backend_oauth2/demo/declare_queues.rb
new file mode 100644
index 0000000000..9ff2145ccf
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/declare_queues.rb
@@ -0,0 +1,19 @@
+#!/usr/bin/env ruby
+require 'bunny'
+
+queues = ARGV
+
+queues.each do |q|
+ split = q.split("/")
+ vhost = split[0]
+ queue_name = split[1]
+
+ conn = Bunny.new(:host => ENV["BUNNY_HOST"] || "127.0.0.1",
+ :user => ENV["BUNNY_USER"] || "guest",
+ :pass => ENV["BUNNY_PASS"] || "guest",
+ :vhost => vhost)
+ conn.start
+ ch = conn.create_channel
+ ch.queue(queue_name)
+ conn.stop
+end
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/Makefile b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/Makefile
new file mode 100644
index 0000000000..d93c77c15a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/Makefile
@@ -0,0 +1,101 @@
+PLATFORM := $(shell uname)
+
+ifeq ($(PLATFORM),Darwin)
+HOST_DOCKER := host.docker.internal
+endif
+
+ifeq ($(PLATFORM),Linux)
+HOST_DOCKER := $(shell ip addr show docker0 | grep -Po 'inet \K[\d.]+')
+endif
+
+.DEFAULT_GOAL = test
+
+.PHONY: run-uaa
+run-uaa:
+ docker exec uaa /bin/echo \
+ || docker run \
+ --detach \
+ --name uaa \
+ --publish 8080:8080 \
+ --mount type=bind,source=$(CURDIR)/uaa,target=/etc/uaa \
+ --env JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom" \
+ pkuryloski/uaa:74.15.0
+
+.PHONY: run-rabbitmq
+run-rabbitmq: run-uaa
+ docker exec rabbitmq /bin/echo \
+ || docker run \
+ --detach \
+ --name rabbitmq \
+ --publish 5672:5672 \
+ --publish 15672:15672 \
+ --mount type=bind,source="$(CURDIR)"/rabbitmq,target=/etc/rabbitmq \
+ rabbitmq:3.8-management
+
+.PHONY: wait-rabbitmq
+wait-rabbitmq: run-rabbitmq
+ docker exec rabbitmq rabbitmq-diagnostics ping -q \
+ || (sleep 15; docker exec rabbitmq rabbitmq-diagnostics ping -q)
+
+.PHONY: vhosts
+vhosts: wait-rabbitmq
+ docker exec rabbitmq rabbitmqctl add_vhost uaa_vhost
+ docker exec rabbitmq rabbitmqctl add_vhost other_vhost
+ docker exec rabbitmq rabbitmqctl set_permissions -p uaa_vhost admin '.*' '.*' '.*'
+ docker exec rabbitmq rabbitmqctl set_permissions -p other_vhost admin '.*' '.*' '.*'
+
+.PHONY: uaac-bunny-image
+uaac-bunny-image:
+ docker images | grep "^uaac\s" \
+ || docker run --name uaac ruby:2.5 gem install cf-uaac bunny
+ docker images | grep "^uaac\s" \
+ || (docker commit $$(docker ps -aqf "name=^uaac$$") uaac:latest \
+ && docker rm uaac)
+
+.PHONY: queues
+queues: vhosts uaac-bunny-image
+ docker run -it --rm \
+ --name declare-queues \
+ --mount type=bind,source="$(CURDIR)"/../declare_queues.rb,target=/workspace/declare_queues.rb \
+ -w /workspace \
+ uaac \
+ /bin/bash -c "BUNNY_HOST=$(HOST_DOCKER) \
+ BUNNY_USER=admin \
+ BUNNY_PASS=rabo2 \
+ ruby declare_queues.rb \
+ uaa_vhost/some_queue \
+ uaa_vhost/other_queue \
+ other_vhost/some_queue \
+ other_vhost/other_queue"
+
+.PHONY: tokens
+tokens: queues uaac-bunny-image
+ docker run -it --rm \
+ --name fetch-tokens \
+ uaac \
+ /bin/bash -c " \
+ uaac target http://$(HOST_DOCKER):8080/uaa \
+ && uaac signing key \
+ && uaac token owner get rabbit_client rabbit_super -s rabbit_secret -p rabbit_super \
+ && uaac token owner get rabbit_client rabbit_nosuper -s rabbit_secret -p rabbit_nosuper \
+ && uaac context rabbit_nosuper \
+ && uaac context rabbit_super"
+
+.PHONY: check-token
+check-token: queues uaac-bunny-image
+ docker run -it --rm \
+ --name check-token \
+ --mount type=bind,source="$(CURDIR)"/check_token.rb,target=/workspace/check_token.rb \
+ uaac \
+ /bin/bash -c " \
+ uaac target http://$(HOST_DOCKER):8080/uaa \
+ && uaac token owner get rabbit_client rabbit_super -s rabbit_secret -p rabbit_super \
+ && BUNNY_HOST=$(HOST_DOCKER) ruby /workspace/check_token.rb"
+
+.PHONY: test
+test: check-token
+
+.PHONY: cleanup
+cleanup:
+ docker stop rabbitmq uaa
+ docker rm rabbitmq uaa \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/check_token.rb b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/check_token.rb
new file mode 100644
index 0000000000..051adc835c
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/check_token.rb
@@ -0,0 +1,29 @@
+#!/usr/bin/env ruby
+require 'net/http'
+require 'bunny'
+require 'yaml'
+
+host = ENV["BUNNY_HOST"]
+
+config = YAML.load_file('/root/.uaac.yml')
+access_token = config["http://#{host}:8080/uaa"]["contexts"]["rabbit_super"]["access_token"]
+
+conn = Bunny.new(:host => host,
+ :user => "",
+ :pass => access_token,
+ :vhost => "uaa_vhost")
+conn.start
+puts "Connected via AMQP!"
+conn.stop
+
+uri = URI("http://#{host}:15672/api/vhosts")
+req = Net::HTTP::Get.new(uri)
+req['Authorization'] = "Bearer #{access_token}"
+
+res = Net::HTTP.start(uri.hostname, uri.port) { |http|
+ http.request(req)
+}
+
+raise "Could not connect to managment API." unless res.is_a?(Net::HTTPSuccess)
+puts "Connected via Management Plugin API!"
+puts res.body
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/enabled_plugins b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/enabled_plugins
new file mode 100644
index 0000000000..a23c6a883a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/enabled_plugins
@@ -0,0 +1 @@
+[rabbitmq_management,rabbitmq_auth_backend_oauth2]. \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/rabbitmq.config b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/rabbitmq.config
new file mode 100644
index 0000000000..a85cca56bb
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/rabbitmq/rabbitmq.config
@@ -0,0 +1,30 @@
+[
+ {rabbit, [
+ {default_user, <<"admin">>},
+ {default_pass, <<"rabo2">>},
+ {auth_backends, [rabbit_auth_backend_oauth2, rabbit_auth_backend_internal]}
+ ]},
+ {rabbitmq_management, [
+ {enable_uaa, true},
+ {uaa_client_id, "rabbit_client"},
+ {uaa_location, "http://localhost:8080/uaa"}
+ ]},
+ {rabbitmq_auth_backend_oauth2, [
+ {resource_server_id, <<"rabbitmq">>},
+ {key_config, [
+ {default_key, <<"key-1">>},
+ {signing_keys,
+ #{<<"key-1">> => {pem, <<"-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2dP+vRn+Kj+S/oGd49kq
+6+CKNAduCC1raLfTH7B3qjmZYm45yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhK
+IdcIWadhqDzdtn1hj/22iUwrhH0bd475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2
+B9q9KFBmo4Ahh/6+d4wM1rH9kxl0RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF
+2cr3wQwCfF1qVu4eAVNVfxfy/uEvG3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgG
+QAvkknWitpRK8KVLypEj5WKej6CF8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7
+VwIDAQAB
+-----END PUBLIC KEY-----">>}
+ }
+ }]
+ }
+ ]}
+].
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/log4j2.properties b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/log4j2.properties
new file mode 100644
index 0000000000..cc47df122c
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/log4j2.properties
@@ -0,0 +1,28 @@
+status = error
+dest = err
+name = UaaLog
+
+property.log_pattern=[%d{yyyy-MM-dd HH:mm:ss.SSS}] uaa%X{context} - %pid [%t] .... %5p --- %c{1}: %replace{%m}{(?<=password=|client_secret=)([^&]*)}{<redacted>}%n
+
+appender.uaaDefaultAppender.type = Console
+appender.uaaDefaultAppender.name = UaaDefaultAppender
+appender.uaaDefaultAppender.layout.type = PatternLayout
+appender.uaaDefaultAppender.layout.pattern = [UAA] ${log_pattern}
+
+appender.uaaAuditAppender.type = Console
+appender.uaaAuditAppender.name = UaaAuditAppender
+appender.uaaAuditAppender.layout.type = PatternLayout
+appender.uaaAuditAppender.layout.pattern = [UAA_AUDIT] ${log_pattern}
+
+rootLogger.level = info
+rootLogger.appenderRef.uaaDefaultAppender.ref = UaaDefaultAppender
+
+logger.UAAAudit.name = UAA.Audit
+logger.UAAAudit.level = info
+logger.UAAAudit.additivity = true
+logger.UAAAudit.appenderRef.auditEventLog.ref = UaaAuditAppender
+
+logger.cfIdentity.name = org.cloudfoundry.identity
+logger.cfIdentity.level = info
+logger.cfIdentity.additivity = false
+logger.cfIdentity.appenderRef.uaaDefaultAppender.ref = UaaDefaultAppender \ No newline at end of file
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/uaa.yml b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/uaa.yml
new file mode 100644
index 0000000000..ab26e5d0c5
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_docker/uaa/uaa.yml
@@ -0,0 +1,104 @@
+logging:
+ config: /etc/uaa/log4j2.properties
+
+issuer:
+ uri: http://localhost:8080/uaa
+
+encryption:
+ active_key_label: CHANGE-THIS-KEY
+ encryption_keys:
+ - label: CHANGE-THIS-KEY
+ passphrase: CHANGEME
+
+login:
+ serviceProviderKey: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIICXQIBAAKBgQDHtC5gUXxBKpEqZTLkNvFwNGnNIkggNOwOQVNbpO0WVHIivig5
+ L39WqS9u0hnA+O7MCA/KlrAR4bXaeVVhwfUPYBKIpaaTWFQR5cTR1UFZJL/OF9vA
+ fpOwznoD66DDCnQVpbCjtDYWX+x6imxn8HCYxhMol6ZnTbSsFW6VZjFMjQIDAQAB
+ AoGAVOj2Yvuigi6wJD99AO2fgF64sYCm/BKkX3dFEw0vxTPIh58kiRP554Xt5ges
+ 7ZCqL9QpqrChUikO4kJ+nB8Uq2AvaZHbpCEUmbip06IlgdA440o0r0CPo1mgNxGu
+ lhiWRN43Lruzfh9qKPhleg2dvyFGQxy5Gk6KW/t8IS4x4r0CQQD/dceBA+Ndj3Xp
+ ubHfxqNz4GTOxndc/AXAowPGpge2zpgIc7f50t8OHhG6XhsfJ0wyQEEvodDhZPYX
+ kKBnXNHzAkEAyCA76vAwuxqAd3MObhiebniAU3SnPf2u4fdL1EOm92dyFs1JxyyL
+ gu/DsjPjx6tRtn4YAalxCzmAMXFSb1qHfwJBAM3qx3z0gGKbUEWtPHcP7BNsrnWK
+ vw6By7VC8bk/ffpaP2yYspS66Le9fzbFwoDzMVVUO/dELVZyBnhqSRHoXQcCQQCe
+ A2WL8S5o7Vn19rC0GVgu3ZJlUrwiZEVLQdlrticFPXaFrn3Md82ICww3jmURaKHS
+ N+l4lnMda79eSp3OMmq9AkA0p79BvYsLshUJJnvbk76pCjR28PK4dV1gSDUEqQMB
+ qy45ptdwJLqLJCeNoR0JUcDNIRhOCuOPND7pcMtX6hI/
+ -----END RSA PRIVATE KEY-----
+ serviceProviderKeyPassword: password
+ serviceProviderCertificate: |
+ -----BEGIN CERTIFICATE-----
+ MIIDSTCCArKgAwIBAgIBADANBgkqhkiG9w0BAQQFADB8MQswCQYDVQQGEwJhdzEO
+ MAwGA1UECBMFYXJ1YmExDjAMBgNVBAoTBWFydWJhMQ4wDAYDVQQHEwVhcnViYTEO
+ MAwGA1UECxMFYXJ1YmExDjAMBgNVBAMTBWFydWJhMR0wGwYJKoZIhvcNAQkBFg5h
+ cnViYUBhcnViYS5hcjAeFw0xNTExMjAyMjI2MjdaFw0xNjExMTkyMjI2MjdaMHwx
+ CzAJBgNVBAYTAmF3MQ4wDAYDVQQIEwVhcnViYTEOMAwGA1UEChMFYXJ1YmExDjAM
+ BgNVBAcTBWFydWJhMQ4wDAYDVQQLEwVhcnViYTEOMAwGA1UEAxMFYXJ1YmExHTAb
+ BgkqhkiG9w0BCQEWDmFydWJhQGFydWJhLmFyMIGfMA0GCSqGSIb3DQEBAQUAA4GN
+ ADCBiQKBgQDHtC5gUXxBKpEqZTLkNvFwNGnNIkggNOwOQVNbpO0WVHIivig5L39W
+ qS9u0hnA+O7MCA/KlrAR4bXaeVVhwfUPYBKIpaaTWFQR5cTR1UFZJL/OF9vAfpOw
+ znoD66DDCnQVpbCjtDYWX+x6imxn8HCYxhMol6ZnTbSsFW6VZjFMjQIDAQABo4Ha
+ MIHXMB0GA1UdDgQWBBTx0lDzjH/iOBnOSQaSEWQLx1syGDCBpwYDVR0jBIGfMIGc
+ gBTx0lDzjH/iOBnOSQaSEWQLx1syGKGBgKR+MHwxCzAJBgNVBAYTAmF3MQ4wDAYD
+ VQQIEwVhcnViYTEOMAwGA1UEChMFYXJ1YmExDjAMBgNVBAcTBWFydWJhMQ4wDAYD
+ VQQLEwVhcnViYTEOMAwGA1UEAxMFYXJ1YmExHTAbBgkqhkiG9w0BCQEWDmFydWJh
+ QGFydWJhLmFyggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAYvBJ
+ 0HOZbbHClXmGUjGs+GS+xC1FO/am2suCSYqNB9dyMXfOWiJ1+TLJk+o/YZt8vuxC
+ KdcZYgl4l/L6PxJ982SRhc83ZW2dkAZI4M0/Ud3oePe84k8jm3A7EvH5wi5hvCkK
+ RpuRBwn3Ei+jCRouxTbzKPsuCVB+1sNyxMTXzf0=
+ -----END CERTIFICATE-----
+
+#The secret that an external login server will use to authenticate to the uaa using the id `login`
+LOGIN_SECRET: loginsecret
+
+oauth:
+ clients:
+ rabbit_client:
+ id: rabbit_client
+ secret: rabbit_secret
+ authorized-grant-types: password,implicit
+ scope: rabbitmq.*,openid
+ authorities: rabbitmq
+ redirect-uri: http://localhost:15672/**
+
+scim:
+ username_pattern: '[a-z0-9+\-_.@]+'
+ users:
+ - rabbit_super|rabbit_super|rabbit_super@example.com|Rabbit|Super|rabbitmq.read:*/*,rabbitmq.write:*/*,rabbitmq.configure:*/*,rabbitmq.tag:administrator
+ - rabbit_nosuper|rabbit_nosuper|rabbit_nosuper@example.com|Rabbit|Nosuper|rabbitmq.write:uaa_vhost/*,rabbitmq.read:uaa_vhost/some*
+
+jwt:
+ token:
+ policy:
+ keys:
+ key-1:
+ signingKey: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA2dP+vRn+Kj+S/oGd49kq6+CKNAduCC1raLfTH7B3qjmZYm45
+ yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhKIdcIWadhqDzdtn1hj/22iUwrhH0b
+ d475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2B9q9KFBmo4Ahh/6+d4wM1rH9kxl0
+ RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF2cr3wQwCfF1qVu4eAVNVfxfy/uEv
+ G3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgGQAvkknWitpRK8KVLypEj5WKej6CF
+ 8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7VwIDAQABAoIBAFsB5FszYepa11o3
+ 4zSPxgv4qyUjuYf3GfoNW0rRGp3nJLtoHAIYa0CcLX9kzsQfmLtxoY46mdppxr8Z
+ 2qUZpBdRVO7ILNfyXhthdQKI2NuyFDhtYK1p8bx6BXe095HMcvm2ohjXzPdTP4Hq
+ HrXAYXjUndUbClbjMJ82AnPF8pM70kBq7g733UqkdfrMuv6/d95Jiyw4cC7dGsI3
+ Ruz9DGhiAyCBtQ0tUB+6Kqn5DChSB+ccfMJjr6GnCVYmERxEQ5DJCTIX8am8C6KX
+ mAxUwHMTsEGBU6GzhcUgAwUFEK3I9RptdlRFp7F8E/P0LxmPkFdgaBNUhrdnB7Y4
+ 01n1R1kCgYEA/huFJgwVWSBSK/XIouFuQrxZOI9JbBbdmpFT7SBGCdFg26Or9y7j
+ +N5HE7yuoZ9PkBh17zzosZdsJhGocRYvO0LSq8cXvKXKCwn2fTMM7uJ/oQe68sxG
+ cF/fC0M/8LvRESWShH920rrERu0s161RuasdOPre0aXu7ZQzkQ68O6MCgYEA23NO
+ DHKNblBOdFEWsvotLqV8DrIbQ4le7sSgQr56/bdn9GScZk2JU0f+pqzpiGUy9bIt
+ 6uujvt5ar0IvpIQVdjf3dbp6Fy+Dwhd4yTR4dMdDECest7jL++/21x8Y0ywFhBIK
+ yEd+QxpOLXP6qaSKTGxL2rnTXRjl8/g629xQPL0CgYEAkNNOh+jLIgjxzGxA9dRV
+ 62M91qaTyi8eDkJV+wgx4taaxZP7Jt5qwCSvjegz/5m01wOZ88hbNxx+XxQhVJK4
+ SKZFO/I07Sfwh2oeOi0maeBdrYGiY09ZtiJuFRU3FBV3irZHU4zyRBh+VY5HyITX
+ 12JXPWp+JC7WhkG5QiuLzNECgYEA15OBzICLpx6Es4clAVT6JaSzJcyZM9MyyuOl
+ e2ubbrpJCK/9ZBIvIPzMj/e0wiSH1wzeRrSM+ud7tkcSfk6ytptsIN67KSOoD3b3
+ VNCStEU7ABe5eBG1cRzeI52MyYWpNYBzzyNMSacBvWz9hMD6ivCn44pAtGfNHclw
+ KKNYvxECgYBOamf25md9Jy6rtQsJVEJWw+8sB4lBlKEEadc5qekR7ZQ0hwj8CnTm
+ WOo856ynI28Sog62iw8F/do/z0B29RuGuxw+prkBkn3lg/VQXEitzqcYvota6osa
+ 8XSfaPiTyQwWpzbFNZzzemlTsIDiF3UqwkHvWaMYPDf4Ng3cokPPxw==
+ -----END RSA PRIVATE KEY-----
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/rabbitmq.config b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/rabbitmq.config
new file mode 100644
index 0000000000..5a0e45ddf6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/rabbitmq.config
@@ -0,0 +1,23 @@
+[
+ {rabbit, [
+ {auth_backends, [rabbit_auth_backend_oauth2, rabbit_auth_backend_internal]}
+ ]},
+ {rabbitmq_auth_backend_oauth2, [
+ {resource_server_id, <<"rabbitmq">>},
+ {key_config, [
+ {default_key, <<"legacy-token-key">>},
+ {signing_keys,
+ #{<<"legacy-token-key">> => {pem, <<"-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2dP+vRn+Kj+S/oGd49kq
+6+CKNAduCC1raLfTH7B3qjmZYm45yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhK
+IdcIWadhqDzdtn1hj/22iUwrhH0bd475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2
+B9q9KFBmo4Ahh/6+d4wM1rH9kxl0RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF
+2cr3wQwCfF1qVu4eAVNVfxfy/uEvG3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgG
+QAvkknWitpRK8KVLypEj5WKej6CF8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7
+VwIDAQAB
+-----END PUBLIC KEY-----">>}
+ }
+ }]
+ }
+ ]}
+].
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/uaa.yml b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/uaa.yml
new file mode 100644
index 0000000000..16759e3ee4
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/rsa_keys/uaa.yml
@@ -0,0 +1,41 @@
+jwt:
+ token:
+ signing-key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA2dP+vRn+Kj+S/oGd49kq6+CKNAduCC1raLfTH7B3qjmZYm45
+ yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhKIdcIWadhqDzdtn1hj/22iUwrhH0b
+ d475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2B9q9KFBmo4Ahh/6+d4wM1rH9kxl0
+ RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF2cr3wQwCfF1qVu4eAVNVfxfy/uEv
+ G3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgGQAvkknWitpRK8KVLypEj5WKej6CF
+ 8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7VwIDAQABAoIBAFsB5FszYepa11o3
+ 4zSPxgv4qyUjuYf3GfoNW0rRGp3nJLtoHAIYa0CcLX9kzsQfmLtxoY46mdppxr8Z
+ 2qUZpBdRVO7ILNfyXhthdQKI2NuyFDhtYK1p8bx6BXe095HMcvm2ohjXzPdTP4Hq
+ HrXAYXjUndUbClbjMJ82AnPF8pM70kBq7g733UqkdfrMuv6/d95Jiyw4cC7dGsI3
+ Ruz9DGhiAyCBtQ0tUB+6Kqn5DChSB+ccfMJjr6GnCVYmERxEQ5DJCTIX8am8C6KX
+ mAxUwHMTsEGBU6GzhcUgAwUFEK3I9RptdlRFp7F8E/P0LxmPkFdgaBNUhrdnB7Y4
+ 01n1R1kCgYEA/huFJgwVWSBSK/XIouFuQrxZOI9JbBbdmpFT7SBGCdFg26Or9y7j
+ +N5HE7yuoZ9PkBh17zzosZdsJhGocRYvO0LSq8cXvKXKCwn2fTMM7uJ/oQe68sxG
+ cF/fC0M/8LvRESWShH920rrERu0s161RuasdOPre0aXu7ZQzkQ68O6MCgYEA23NO
+ DHKNblBOdFEWsvotLqV8DrIbQ4le7sSgQr56/bdn9GScZk2JU0f+pqzpiGUy9bIt
+ 6uujvt5ar0IvpIQVdjf3dbp6Fy+Dwhd4yTR4dMdDECest7jL++/21x8Y0ywFhBIK
+ yEd+QxpOLXP6qaSKTGxL2rnTXRjl8/g629xQPL0CgYEAkNNOh+jLIgjxzGxA9dRV
+ 62M91qaTyi8eDkJV+wgx4taaxZP7Jt5qwCSvjegz/5m01wOZ88hbNxx+XxQhVJK4
+ SKZFO/I07Sfwh2oeOi0maeBdrYGiY09ZtiJuFRU3FBV3irZHU4zyRBh+VY5HyITX
+ 12JXPWp+JC7WhkG5QiuLzNECgYEA15OBzICLpx6Es4clAVT6JaSzJcyZM9MyyuOl
+ e2ubbrpJCK/9ZBIvIPzMj/e0wiSH1wzeRrSM+ud7tkcSfk6ytptsIN67KSOoD3b3
+ VNCStEU7ABe5eBG1cRzeI52MyYWpNYBzzyNMSacBvWz9hMD6ivCn44pAtGfNHclw
+ KKNYvxECgYBOamf25md9Jy6rtQsJVEJWw+8sB4lBlKEEadc5qekR7ZQ0hwj8CnTm
+ WOo856ynI28Sog62iw8F/do/z0B29RuGuxw+prkBkn3lg/VQXEitzqcYvota6osa
+ 8XSfaPiTyQwWpzbFNZzzemlTsIDiF3UqwkHvWaMYPDf4Ng3cokPPxw==
+ -----END RSA PRIVATE KEY-----
+ verification-key: |
+ -----BEGIN PUBLIC KEY-----
+ MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2dP+vRn+Kj+S/oGd49kq
+ 6+CKNAduCC1raLfTH7B3qjmZYm45yDl+XmgK9CNmHXkho9qvmhdksdzDVsdeDlhK
+ IdcIWadhqDzdtn1hj/22iUwrhH0bd475hlKcsiZ+oy/sdgGgAzvmmTQmdMqEXqV2
+ B9q9KFBmo4Ahh/6+d4wM1rH9kxl0RvMAKLe+daoIHIjok8hCO4cKQQEw/ErBe4SF
+ 2cr3wQwCfF1qVu4eAVNVfxfy/uEvG3Q7x005P3TcK+QcYgJxav3lictSi5dyWLgG
+ QAvkknWitpRK8KVLypEj5WKej6CF8nq30utn15FQg0JkHoqzwiCqqeen8GIPteI7
+ VwIDAQAB
+ -----END PUBLIC KEY-----
+
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/setup.sh b/deps/rabbitmq_auth_backend_oauth2/demo/setup.sh
new file mode 100755
index 0000000000..5df538dde5
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/setup.sh
@@ -0,0 +1,81 @@
+#!/usr/bin/env bash
+
+gem list cf-uaac | grep cf-uaac || exit 1
+gem list bunny | grep bunny || exit 1
+
+target=${UAA_HOST:="http://localhost:8080/uaa"}
+# export to use a different ctl, e.g. if the node was built from source
+ctl=${RABBITMQCTL:="rabbitmqctl"}
+
+# Target the server
+uaac target $target
+
+# Get admin client to manage UAA
+uaac token client get admin -s adminsecret
+
+# Set permission to list signing keys
+uaac client update admin --authorities "clients.read clients.secret clients.write uaa.admin clients.admin scim.write scim.read uaa.resource"
+
+# Rabbit UAA user
+# The user name and password needed to retrieve access tokens,
+# RabbiMQ does not have to be aware of this credentials.
+uaac user add rabbit_super -p rabbit_super --email rabbit_super@example.com
+uaac user add rabbit_nosuper -p rabbit_nosuper --email rabbit_nosuper@example.com
+
+# Create permissions
+# Uaa groups will become scopes, which should define RabbitMQ permissions and tags.
+uaac group add "rabbitmq.read:*/*"
+uaac group add "rabbitmq.write:*/*"
+uaac group add "rabbitmq.configure:*/*"
+
+uaac group add "rabbitmq.write:uaa_vhost/*"
+uaac group add "rabbitmq.read:uaa_vhost/some*"
+
+uaac group add "rabbitmq.tag:administrator"
+
+# Assigning groups to users.
+# rabbit_super will be able to read,write and configure any resources.
+# They will be able to connect to the management plugin as well.
+uaac member add "rabbitmq.read:*/*" rabbit_super
+uaac member add "rabbitmq.write:*/*" rabbit_super
+uaac member add "rabbitmq.configure:*/*" rabbit_super
+uaac member add "rabbitmq.tag:administrator" rabbit_super
+
+# rabbit_nosuper will be able to read uaa_vhost resources starting with some
+# and write to any uaa_vhost resources
+uaac member add "rabbitmq.write:uaa_vhost/*" rabbit_nosuper
+uaac member add "rabbitmq.read:uaa_vhost/some*" rabbit_nosuper
+
+# Configure RabbiqMQ
+# Add uaa_vhost and other vhost to check permissions
+$ctl add_vhost uaa_vhost
+$ctl add_vhost other_vhost
+
+# add e.g. --access_token_validity 60 --refresh_token_validity 3600 to experiment with token validity
+uaac client add rabbit_client --name rabbit_client --scope 'rabbitmq.*' --authorized_grant_types password,client_credentials --authorities rabbitmq --secret rabbit_secret --redirect_uri 'http://localhost:15672'
+
+# Set guest user permissions to create queues.
+$ctl set_permissions -p uaa_vhost guest '.*' '.*' '.*'
+$ctl set_permissions -p other_vhost guest '.*' '.*' '.*'
+
+# Get access tokens
+uaac token owner get rabbit_client rabbit_super -s rabbit_secret -p rabbit_super
+uaac token owner get rabbit_client rabbit_nosuper -s rabbit_secret -p rabbit_nosuper
+
+echo "Auth info for rabbit_nosuper user
+This user will have read access to uaa_vhost resources,
+which name start with some and write access to all uaa_vhost resources.
+Use access_token as a RabbitMQ password, the username is ignored"
+echo
+uaac context rabbit_nosuper
+echo
+
+echo "Auth info for rabbit_super user
+This user will have full access to all rabbitmq vhosts.
+Use access_token as a RabbitMQ password, the username is ignored"
+echo
+uaac context rabbit_super
+echo
+
+# Create queues
+ruby demo/declare_queues.rb uaa_vhost/some_queue uaa_vhost/other_queue other_vhost/some_queue other_vhost/other_queue
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/rabbitmq.config b/deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/rabbitmq.config
new file mode 100644
index 0000000000..89b59a1bee
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/rabbitmq.config
@@ -0,0 +1,31 @@
+[
+ %% Enable rabbit_auth_backend_oauth2
+ {rabbit, [
+ {auth_backends, [rabbit_auth_backend_oauth2, rabbit_auth_backend_internal]}
+ ]},
+
+ {rabbitmq_management, [
+ {enable_uaa, true},
+ {uaa_client_id, "rabbit_user_client"},
+ {uaa_location, "http://localhost:8080/uaa"}
+ ]},
+
+ %% Set a resource server ID. Will require all scopes to be prefixed with `rabbitmq.`
+ {rabbitmq_auth_backend_oauth2, [
+ {resource_server_id, <<"rabbitmq">>},
+ % Set up a legacy signing key
+ {key_config, [
+ {default_key, <<"legacy-token-key">>},
+ {signing_keys, #{
+ <<"legacy-token-key">> =>
+ {map, #{
+ <<"alg">> => <<"HS256">>,
+ <<"value">> => <<"rabbit_signing_key">>,
+ <<"kty">> => <<"MAC">>,
+ <<"use">> => <<"sig">>}
+ }
+ }
+ } %% signing keys
+ ]} % key_config
+ ]} % rabbitmq_auth_backend_oauth2
+].
diff --git a/deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/uaa.yml b/deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/uaa.yml
new file mode 100644
index 0000000000..ec7f2c882b
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/demo/symmetric_keys/uaa.yml
@@ -0,0 +1,4 @@
+jwt:
+ token:
+ signing-key: rabbit_signing_key
+ verification-key: rabbit_signing_key
diff --git a/deps/rabbitmq_auth_backend_oauth2/erlang.mk b/deps/rabbitmq_auth_backend_oauth2/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_auth_backend_oauth2/rabbitmq-components.mk b/deps/rabbitmq_auth_backend_oauth2/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_auth_backend_oauth2/scripts/seed.sh b/deps/rabbitmq_auth_backend_oauth2/scripts/seed.sh
new file mode 100755
index 0000000000..8812c41541
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/scripts/seed.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env sh
+
+uaac token client get admin -s adminsecret
+
+uaac client delete rabbit_client
+uaac client add rabbit_client --name rabbit_client \
+ --secret rabbit_secret \
+ --authorized_grant_types client_credentials \
+ --authorities 'rabbitmq.read:*/* rabbitmq.write:*/* rabbitmq.configure:*/* rabbitmq.tag:management rabbitmq.tag:administrator' \
+ --access_token_validity 86400
+
+uaac token client get rabbit_client -s rabbit_secret
+
+uaac token client get admin -s adminsecret
+
+uaac context rabbit_client
+
+# switch back to the admin context so that we have
+# the permissions to add the user
+uaac token client get admin -s adminsecret
+
+uaac user add rabbit_user -p rabbit_password --email rabbit_user@example.com
+
+uaac group add "rabbitmq.read:*/*"
+uaac group add "rabbitmq.write:*/*"
+uaac group add "rabbitmq.configure:*/*"
+
+uaac member add "rabbitmq.read:*/*" rabbit_user
+uaac member add "rabbitmq.write:*/*" rabbit_user
+uaac member add "rabbitmq.configure:*/*" rabbit_user
+
+uaac client add rabbit_user_client \
+ --name rabbit_user_client \
+ --scope 'rabbitmq.*' \
+ --authorized_grant_types password \
+ --secret rabbit_secret \
+ --redirect_uri 'http://localhost:15672'
+
+uaac token owner get rabbit_user_client rabbit_user -s rabbit_secret -p rabbit_password
+
+uaac token client get admin -s adminsecret
+
+uaac context rabbit_client
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl b/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl
new file mode 100644
index 0000000000..6571ba9c59
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand.erl
@@ -0,0 +1,143 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand').
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ switches/0,
+ aliases/0,
+ output/2,
+ formatter/0
+ ]).
+
+
+usage() ->
+ <<"add_uaa_key <name> [--json=<json_key>] [--pem=<public_key>] [--pem_file=<pem_file>]">>.
+
+switches() ->
+ [{json, string},
+ {pem, string},
+ {pem_file, string}].
+
+aliases() -> [].
+
+validate([], _Options) -> {validation_failure, not_enough_args};
+validate([_,_|_], _Options) -> {validation_failure, too_many_args};
+validate([_], Options) ->
+ Json = maps:get(json, Options, undefined),
+ Pem = maps:get(pem, Options, undefined),
+ PemFile = maps:get(pem_file, Options, undefined),
+ case {is_binary(Json), is_binary(Pem), is_binary(PemFile)} of
+ {false, false, false} ->
+ {validation_failure,
+ {bad_argument, <<"No key specified">>}};
+ {true, false, false} ->
+ validate_json(Json);
+ {false, true, false} ->
+ validate_pem(Pem);
+ {false, false, true} ->
+ validate_pem_file(PemFile);
+ {_, _, _} ->
+ {validation_failure,
+ {bad_argument, <<"There can be only one key type">>}}
+ end.
+
+validate_json(Json) ->
+ case rabbit_json:try_decode(Json) of
+ {ok, _} ->
+ case uaa_jwt:verify_signing_key(json, Json) of
+ ok -> ok;
+ {error, {fields_missing_for_kty, Kty}} ->
+ {validation_failure,
+ {bad_argument,
+ <<"Key fields are missing fot kty \"", Kty/binary, "\"">>}};
+ {error, unknown_kty} ->
+ {validation_failure,
+ {bad_argument, <<"\"kty\" field is invalid">>}};
+ {error, no_kty} ->
+ {validation_failure,
+ {bad_argument, <<"Json key should contain \"kty\" field">>}};
+ {error, Err} ->
+ {validation_failure, {bad_argument, Err}}
+ end;
+ {error, _} ->
+ {validation_failure, {bad_argument, <<"Invalid JSON">>}};
+ error ->
+ {validation_failure, {bad_argument, <<"Invalid JSON">>}}
+ end.
+
+validate_pem(Pem) ->
+ case uaa_jwt:verify_signing_key(pem, Pem) of
+ ok -> ok;
+ {error, invalid_pem_string} ->
+ {validation_failure, <<"Unable to read a key from the PEM string">>};
+ {error, Err} ->
+ {validation_failure, Err}
+ end.
+
+validate_pem_file(PemFile) ->
+ case uaa_jwt:verify_signing_key(pem_file, PemFile) of
+ ok -> ok;
+ {error, enoent} ->
+ {validation_failure, {bad_argument, <<"PEM file not found">>}};
+ {error, invalid_pem_file} ->
+ {validation_failure, <<"Unable to read a key from the PEM file">>};
+ {error, Err} ->
+ {validation_failure, Err}
+ end.
+
+merge_defaults(Args, #{pem_file := FileName} = Options) ->
+ AbsFileName = filename:absname(FileName),
+ {Args, Options#{pem_file := AbsFileName}};
+merge_defaults(Args, Options) -> {Args, Options}.
+
+banner([Name], #{json := Json}) ->
+ <<"Adding UAA signing key \"",
+ Name/binary,
+ "\" in JSON format: \"",
+ Json/binary, "\"">>;
+banner([Name], #{pem := Pem}) ->
+ <<"Adding UAA signing key \"",
+ Name/binary,
+ "\" public key: \"",
+ Pem/binary, "\"">>;
+banner([Name], #{pem_file := PemFile}) ->
+ <<"Adding UAA signing key \"",
+ Name/binary,
+ "\" filename: \"",
+ PemFile/binary, "\"">>.
+
+run([Name], #{node := Node} = Options) ->
+ {Type, Value} = case Options of
+ #{json := Json} -> {json, Json};
+ #{pem := Pem} -> {pem, Pem};
+ #{pem_file := PemFile} -> {pem_file, PemFile}
+ end,
+ case rabbit_misc:rpc_call(Node,
+ uaa_jwt, add_signing_key,
+ [Name, Type, Value]) of
+ {ok, _Keys} -> ok;
+ {error, Err} -> {error, Err}
+ end.
+
+output(E, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E).
+
+formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Erlang'.
+
+
+
+
+
+
+
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl
new file mode 100644
index 0000000000..e1a99ab7ea
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2.erl
@@ -0,0 +1,318 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_oauth2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([description/0]).
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4,
+ check_topic_access/4, check_token/1, state_can_expire/0, update_state/2]).
+
+% for testing
+-export([post_process_payload/1]).
+
+-import(rabbit_data_coercion, [to_map/1]).
+
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+%%--------------------------------------------------------------------
+
+-define(APP, rabbitmq_auth_backend_oauth2).
+-define(RESOURCE_SERVER_ID, resource_server_id).
+%% a term used by the IdentityServer community
+-define(COMPLEX_CLAIM, extra_scopes_source).
+
+description() ->
+ [{name, <<"OAuth 2">>},
+ {description, <<"Performs authentication and authorisation using JWT tokens and OAuth 2 scopes">>}].
+
+%%--------------------------------------------------------------------
+
+user_login_authentication(Username, AuthProps) ->
+ case authenticate(Username, AuthProps) of
+ {refused, Msg, Args} = AuthResult ->
+ rabbit_log:debug(Msg ++ "~n", Args),
+ AuthResult;
+ _ = AuthResult ->
+ AuthResult
+ end.
+
+user_login_authorization(Username, AuthProps) ->
+ case authenticate(Username, AuthProps) of
+ {ok, #auth_user{impl = Impl}} -> {ok, Impl};
+ Else -> Else
+ end.
+
+check_vhost_access(#auth_user{impl = DecodedToken},
+ VHost, _AuthzData) ->
+ with_decoded_token(DecodedToken,
+ fun() ->
+ Scopes = get_scopes(DecodedToken),
+ ScopeString = rabbit_oauth2_scope:concat_scopes(Scopes, ","),
+ rabbit_log:debug("Matching virtual host '~s' against the following scopes: ~s", [VHost, ScopeString]),
+ rabbit_oauth2_scope:vhost_access(VHost, Scopes)
+ end).
+
+check_resource_access(#auth_user{impl = DecodedToken},
+ Resource, Permission, _AuthzContext) ->
+ with_decoded_token(DecodedToken,
+ fun() ->
+ Scopes = get_scopes(DecodedToken),
+ rabbit_oauth2_scope:resource_access(Resource, Permission, Scopes)
+ end).
+
+check_topic_access(#auth_user{impl = DecodedToken},
+ Resource, Permission, Context) ->
+ with_decoded_token(DecodedToken,
+ fun() ->
+ Scopes = get_scopes(DecodedToken),
+ rabbit_oauth2_scope:topic_access(Resource, Permission, Context, Scopes)
+ end).
+
+state_can_expire() -> true.
+
+update_state(AuthUser, NewToken) ->
+ case check_token(NewToken) of
+ %% avoid logging the token
+ {error, _} = E -> E;
+ {refused, {error, {invalid_token, error, _Err, _Stacktrace}}} ->
+ {refused, "Authentication using an OAuth 2/JWT token failed: provided token is invalid"};
+ {refused, Err} ->
+ {refused, rabbit_misc:format("Authentication using an OAuth 2/JWT token failed: ~p", [Err])};
+ {ok, DecodedToken} ->
+ Tags = tags_from(DecodedToken),
+
+ {ok, AuthUser#auth_user{tags = Tags,
+ impl = DecodedToken}}
+ end.
+
+%%--------------------------------------------------------------------
+
+authenticate(Username0, AuthProps0) ->
+ AuthProps = to_map(AuthProps0),
+ Token = token_from_context(AuthProps),
+ case check_token(Token) of
+ %% avoid logging the token
+ {error, _} = E -> E;
+ {refused, {error, {invalid_token, error, _Err, _Stacktrace}}} ->
+ {refused, "Authentication using an OAuth 2/JWT token failed: provided token is invalid", []};
+ {refused, Err} ->
+ {refused, "Authentication using an OAuth 2/JWT token failed: ~p", [Err]};
+ {ok, DecodedToken} ->
+ Func = fun() ->
+ Username = username_from(Username0, DecodedToken),
+ Tags = tags_from(DecodedToken),
+
+ {ok, #auth_user{username = Username,
+ tags = Tags,
+ impl = DecodedToken}}
+ end,
+ case with_decoded_token(DecodedToken, Func) of
+ {error, Err} ->
+ {refused, "Authentication using an OAuth 2/JWT token failed: ~p", [Err]};
+ Else ->
+ Else
+ end
+ end.
+
+with_decoded_token(DecodedToken, Fun) ->
+ case validate_token_expiry(DecodedToken) of
+ ok -> Fun();
+ {error, Msg} = Err ->
+ rabbit_log:error(Msg),
+ Err
+ end.
+
+validate_token_expiry(#{<<"exp">> := Exp}) when is_integer(Exp) ->
+ Now = os:system_time(seconds),
+ case Exp =< Now of
+ true -> {error, rabbit_misc:format("Provided JWT token has expired at timestamp ~p (validated at ~p)", [Exp, Now])};
+ false -> ok
+ end;
+validate_token_expiry(#{}) -> ok.
+
+-spec check_token(binary()) -> {ok, map()} | {error, term()}.
+check_token(Token) ->
+ case uaa_jwt:decode_and_verify(Token) of
+ {error, Reason} -> {refused, {error, Reason}};
+ {true, Payload} -> validate_payload(post_process_payload(Payload));
+ {false, _} -> {refused, signature_invalid}
+ end.
+
+post_process_payload(Payload) when is_map(Payload) ->
+ Payload0 = maps:map(fun(K, V) ->
+ case K of
+ <<"aud">> when is_binary(V) -> binary:split(V, <<" ">>, [global, trim_all]);
+ <<"scope">> when is_binary(V) -> binary:split(V, <<" ">>, [global, trim_all]);
+ _ -> V
+ end
+ end,
+ Payload
+ ),
+ Payload1 = case does_include_complex_claim_field(Payload0) of
+ true -> post_process_payload_complex_claim(Payload0);
+ false -> Payload0
+ end,
+
+ Payload2 = case maps:is_key(<<"authorization">>, Payload1) of
+ true -> post_process_payload_keycloak(Payload1);
+ false -> Payload1
+ end,
+
+ Payload2.
+
+does_include_complex_claim_field(Payload) when is_map(Payload) ->
+ maps:is_key(application:get_env(?APP, ?COMPLEX_CLAIM, undefined), Payload).
+
+post_process_payload_complex_claim(Payload) ->
+ ComplexClaim = maps:get(application:get_env(?APP, ?COMPLEX_CLAIM, undefined), Payload),
+ ResourceServerId = rabbit_data_coercion:to_binary(application:get_env(?APP, ?RESOURCE_SERVER_ID, <<>>)),
+
+ AdditionalScopes =
+ case ComplexClaim of
+ L when is_list(L) -> L;
+ M when is_map(M) ->
+ case maps:get(ResourceServerId, M, undefined) of
+ undefined -> [];
+ Ks when is_list(Ks) ->
+ [erlang:iolist_to_binary([ResourceServerId, <<".">>, K]) || K <- Ks];
+ ClaimBin when is_binary(ClaimBin) ->
+ UnprefixedClaims = binary:split(ClaimBin, <<" ">>, [global, trim_all]),
+ [erlang:iolist_to_binary([ResourceServerId, <<".">>, K]) || K <- UnprefixedClaims];
+ _ -> []
+ end;
+ Bin when is_binary(Bin) ->
+ binary:split(Bin, <<" ">>, [global, trim_all]);
+ _ -> []
+ end,
+
+ case AdditionalScopes of
+ [] -> Payload;
+ _ ->
+ ExistingScopes = maps:get(<<"scope">>, Payload, []),
+ maps:put(<<"scope">>, AdditionalScopes ++ ExistingScopes, Payload)
+ end.
+
+%% keycloak token format: https://github.com/rabbitmq/rabbitmq-auth-backend-oauth2/issues/36
+post_process_payload_keycloak(#{<<"authorization">> := Authorization} = Payload) ->
+ AdditionalScopes = case maps:get(<<"permissions">>, Authorization, undefined) of
+ undefined -> [];
+ Permissions -> extract_scopes_from_keycloak_permissions([], Permissions)
+ end,
+ ExistingScopes = maps:get(<<"scope">>, Payload),
+ maps:put(<<"scope">>, AdditionalScopes ++ ExistingScopes, Payload).
+
+extract_scopes_from_keycloak_permissions(Acc, []) ->
+ Acc;
+extract_scopes_from_keycloak_permissions(Acc, [H | T]) when is_map(H) ->
+ Scopes = case maps:get(<<"scopes">>, H, []) of
+ ScopesAsList when is_list(ScopesAsList) ->
+ ScopesAsList;
+ ScopesAsBinary when is_binary(ScopesAsBinary) ->
+ [ScopesAsBinary]
+ end,
+ extract_scopes_from_keycloak_permissions(Acc ++ Scopes, T);
+extract_scopes_from_keycloak_permissions(Acc, [_ | T]) ->
+ extract_scopes_from_keycloak_permissions(Acc, T).
+
+validate_payload(#{<<"scope">> := _Scope, <<"aud">> := _Aud} = DecodedToken) ->
+ ResourceServerEnv = application:get_env(?APP, ?RESOURCE_SERVER_ID, <<>>),
+ ResourceServerId = rabbit_data_coercion:to_binary(ResourceServerEnv),
+ validate_payload(DecodedToken, ResourceServerId).
+
+validate_payload(#{<<"scope">> := Scope, <<"aud">> := Aud} = DecodedToken, ResourceServerId) ->
+ case check_aud(Aud, ResourceServerId) of
+ ok -> {ok, DecodedToken#{<<"scope">> => filter_scopes(Scope, ResourceServerId)}};
+ {error, Err} -> {refused, {invalid_aud, Err}}
+ end.
+
+filter_scopes(Scopes, <<"">>) -> Scopes;
+filter_scopes(Scopes, ResourceServerId) ->
+ PrefixPattern = <<ResourceServerId/binary, ".">>,
+ matching_scopes_without_prefix(Scopes, PrefixPattern).
+
+check_aud(_, <<>>) -> ok;
+check_aud(Aud, ResourceServerId) ->
+ case Aud of
+ List when is_list(List) ->
+ case lists:member(ResourceServerId, Aud) of
+ true -> ok;
+ false -> {error, {resource_id_not_found_in_aud, ResourceServerId, Aud}}
+ end;
+ _ -> {error, {badarg, {aud_is_not_a_list, Aud}}}
+ end.
+
+%%--------------------------------------------------------------------
+
+get_scopes(#{<<"scope">> := Scope}) -> Scope.
+
+-spec token_from_context(map()) -> binary() | undefined.
+token_from_context(AuthProps) ->
+ maps:get(password, AuthProps, undefined).
+
+%% Decoded tokens look like this:
+%%
+%% #{<<"aud">> => [<<"rabbitmq">>, <<"rabbit_client">>],
+%% <<"authorities">> => [<<"rabbitmq.read:*/*">>, <<"rabbitmq.write:*/*">>, <<"rabbitmq.configure:*/*">>],
+%% <<"azp">> => <<"rabbit_client">>,
+%% <<"cid">> => <<"rabbit_client">>,
+%% <<"client_id">> => <<"rabbit_client">>,
+%% <<"exp">> => 1530849387,
+%% <<"grant_type">> => <<"client_credentials">>,
+%% <<"iat">> => 1530806187,
+%% <<"iss">> => <<"http://localhost:8080/uaa/oauth/token">>,
+%% <<"jti">> => <<"df5d50a1cdcb4fa6bf32e7e03acfc74d">>,
+%% <<"rev_sig">> => <<"2f880d5b">>,
+%% <<"scope">> => [<<"rabbitmq.read:*/*">>, <<"rabbitmq.write:*/*">>, <<"rabbitmq.configure:*/*">>],
+%% <<"sub">> => <<"rabbit_client">>,
+%% <<"zid">> => <<"uaa">>}
+
+-spec username_from(binary(), map()) -> binary() | undefined.
+username_from(ClientProvidedUsername, DecodedToken) ->
+ ClientId = uaa_jwt:client_id(DecodedToken, undefined),
+ Sub = uaa_jwt:sub(DecodedToken, undefined),
+
+ rabbit_log:debug("Computing username from client's JWT token, client ID: '~s', sub: '~s'",
+ [ClientId, Sub]),
+
+ case uaa_jwt:client_id(DecodedToken, Sub) of
+ undefined ->
+ case ClientProvidedUsername of
+ undefined -> undefined;
+ <<>> -> undefined;
+ _Other -> ClientProvidedUsername
+ end;
+ Value ->
+ Value
+ end.
+
+-spec tags_from(map()) -> list(atom()).
+tags_from(DecodedToken) ->
+ Scopes = maps:get(<<"scope">>, DecodedToken, []),
+ TagScopes = matching_scopes_without_prefix(Scopes, <<"tag:">>),
+ lists:usort(lists:map(fun rabbit_data_coercion:to_atom/1, TagScopes)).
+
+matching_scopes_without_prefix(Scopes, PrefixPattern) ->
+ PatternLength = byte_size(PrefixPattern),
+ lists:filtermap(
+ fun(ScopeEl) ->
+ case binary:match(ScopeEl, PrefixPattern) of
+ {0, PatternLength} ->
+ ElLength = byte_size(ScopeEl),
+ {true,
+ binary:part(ScopeEl,
+ {PatternLength, ElLength - PatternLength})};
+ _ -> false
+ end
+ end,
+ Scopes).
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl
new file mode 100644
index 0000000000..e2b1d23131
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_auth_backend_oauth2_app.erl
@@ -0,0 +1,26 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_oauth2_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ supervisor:start_link({local,?MODULE},?MODULE,[]).
+
+stop(_State) ->
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one,3,10},[]}}.
+
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl
new file mode 100644
index 0000000000..2ebf6c3c52
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_scope.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_oauth2_scope).
+
+-export([vhost_access/2, resource_access/3, topic_access/4, concat_scopes/2]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-type permission() :: read | write | configure.
+
+%% API functions --------------------------------------------------------------
+-spec vhost_access(binary(), [binary()]) -> boolean().
+vhost_access(VHost, Scopes) ->
+ PermissionScopes = get_scope_permissions(Scopes),
+ lists:any(
+ fun({VHostPattern, _, _, _}) ->
+ wildcard:match(VHost, VHostPattern)
+ end,
+ PermissionScopes).
+
+-spec resource_access(rabbit_types:r(atom()), permission(), [binary()]) -> boolean().
+resource_access(#resource{virtual_host = VHost, name = Name},
+ Permission, Scopes) ->
+ lists:any(
+ fun({VHostPattern, NamePattern, _, ScopeGrantedPermission}) ->
+ wildcard:match(VHost, VHostPattern) andalso
+ wildcard:match(Name, NamePattern) andalso
+ Permission =:= ScopeGrantedPermission
+ end,
+ get_scope_permissions(Scopes)).
+
+topic_access(#resource{virtual_host = VHost, name = ExchangeName},
+ Permission,
+ #{routing_key := RoutingKey},
+ Scopes) ->
+ lists:any(
+ fun({VHostPattern, ExchangeNamePattern, RoutingKeyPattern, ScopeGrantedPermission}) ->
+ is_binary(RoutingKeyPattern) andalso
+ wildcard:match(VHost, VHostPattern) andalso
+ wildcard:match(ExchangeName, ExchangeNamePattern) andalso
+ wildcard:match(RoutingKey, RoutingKeyPattern) andalso
+ Permission =:= ScopeGrantedPermission
+ end,
+ get_scope_permissions(Scopes)).
+
+%% Internal -------------------------------------------------------------------
+
+-spec get_scope_permissions([binary()]) -> [{rabbit_types:r(pattern), permission()}].
+get_scope_permissions(Scopes) when is_list(Scopes) ->
+ lists:filtermap(
+ fun(ScopeEl) ->
+ case parse_permission_pattern(ScopeEl) of
+ ignore -> false;
+ Perm -> {true, Perm}
+ end
+ end,
+ Scopes).
+
+-spec concat_scopes([binary()], string()) -> string().
+concat_scopes(Scopes, Separator) when is_list(Scopes) ->
+ lists:concat(lists:join(Separator, lists:map(fun rabbit_data_coercion:to_list/1, Scopes))).
+
+-spec parse_permission_pattern(binary()) -> {rabbit_types:r(pattern), permission()}.
+parse_permission_pattern(<<"read:", ResourcePatternBin/binary>>) ->
+ Permission = read,
+ parse_resource_pattern(ResourcePatternBin, Permission);
+parse_permission_pattern(<<"write:", ResourcePatternBin/binary>>) ->
+ Permission = write,
+ parse_resource_pattern(ResourcePatternBin, Permission);
+parse_permission_pattern(<<"configure:", ResourcePatternBin/binary>>) ->
+ Permission = configure,
+ parse_resource_pattern(ResourcePatternBin, Permission);
+parse_permission_pattern(_Other) ->
+ ignore.
+
+-spec parse_resource_pattern(binary(), permission()) ->
+ {rabbit_types:vhost(), binary(), binary() | none, permission()}.
+parse_resource_pattern(Pattern, Permission) ->
+ case binary:split(Pattern, <<"/">>, [global]) of
+ [VhostPattern, NamePattern] ->
+ {VhostPattern, NamePattern, none, Permission};
+ [VhostPattern, NamePattern, RoutingKeyPattern] ->
+ {VhostPattern, NamePattern, RoutingKeyPattern, Permission};
+ _Other -> ignore
+ end.
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl
new file mode 100644
index 0000000000..e8d59f5670
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl
@@ -0,0 +1,122 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(uaa_jwt).
+
+-export([add_signing_key/3,
+ remove_signing_key/1,
+ decode_and_verify/1,
+ get_jwk/1,
+ verify_signing_key/2,
+ signing_keys/0]).
+
+-export([client_id/1, sub/1, client_id/2, sub/2]).
+
+-include_lib("jose/include/jose_jwk.hrl").
+
+-define(APP, rabbitmq_auth_backend_oauth2).
+
+-type key_type() :: json | pem | map.
+
+-spec add_signing_key(binary(), key_type(), binary() | map()) -> {ok, map()} | {error, term()}.
+
+add_signing_key(KeyId, Type, Value) ->
+ case verify_signing_key(Type, Value) of
+ ok ->
+ SigningKeys0 = signing_keys(),
+ SigningKeys1 = maps:put(KeyId, {Type, Value}, SigningKeys0),
+ ok = update_uaa_jwt_signing_keys(SigningKeys1),
+ {ok, SigningKeys1};
+ {error, _} = Err ->
+ Err
+ end.
+
+remove_signing_key(KeyId) ->
+ UaaEnv = application:get_env(?APP, key_config, []),
+ Keys0 = proplists:get_value(signing_keys, UaaEnv),
+ Keys1 = maps:remove(KeyId, Keys0),
+ update_uaa_jwt_signing_keys(UaaEnv, Keys1).
+
+-spec update_uaa_jwt_signing_keys(map()) -> ok.
+update_uaa_jwt_signing_keys(SigningKeys) ->
+ UaaEnv0 = application:get_env(?APP, key_config, []),
+ update_uaa_jwt_signing_keys(UaaEnv0, SigningKeys).
+
+-spec update_uaa_jwt_signing_keys([term()], map()) -> ok.
+update_uaa_jwt_signing_keys(UaaEnv0, SigningKeys) ->
+ UaaEnv1 = proplists:delete(signing_keys, UaaEnv0),
+ UaaEnv2 = [{signing_keys, SigningKeys} | UaaEnv1],
+ application:set_env(?APP, key_config, UaaEnv2).
+
+-spec decode_and_verify(binary()) -> {boolean(), map()} | {error, term()}.
+decode_and_verify(Token) ->
+ case uaa_jwt_jwt:get_key_id(Token) of
+ {ok, KeyId} ->
+ case get_jwk(KeyId) of
+ {ok, JWK} ->
+ uaa_jwt_jwt:decode_and_verify(JWK, Token);
+ {error, _} = Err ->
+ Err
+ end;
+ {error, _} = Err ->
+ Err
+ end.
+
+-spec get_jwk(binary()) -> {ok, map()} | {error, term()}.
+get_jwk(KeyId) ->
+ case signing_keys() of
+ undefined -> {error, signing_keys_not_configured};
+ Keys ->
+ case maps:get(KeyId, Keys, undefined) of
+ undefined ->
+ {error, key_not_found};
+ {Type, Value} ->
+ case Type of
+ json -> uaa_jwt_jwk:make_jwk(Value);
+ pem -> uaa_jwt_jwk:from_pem(Value);
+ pem_file -> uaa_jwt_jwk:from_pem_file(Value);
+ map -> uaa_jwt_jwk:make_jwk(Value);
+ _ -> {error, unknown_signing_key_type}
+ end
+ end
+ end.
+
+verify_signing_key(Type, Value) ->
+ Verified = case Type of
+ json -> uaa_jwt_jwk:make_jwk(Value);
+ pem -> uaa_jwt_jwk:from_pem(Value);
+ pem_file -> uaa_jwt_jwk:from_pem_file(Value);
+ map -> uaa_jwt_jwk:make_jwk(Value);
+ _ -> {error, unknown_signing_key_type}
+ end,
+ case Verified of
+ {ok, Key} ->
+ case jose_jwk:from(Key) of
+ #jose_jwk{} -> ok;
+ {error, Reason} -> {error, Reason}
+ end;
+ Err -> Err
+ end.
+
+signing_keys() ->
+ UaaEnv = application:get_env(?APP, key_config, []),
+ proplists:get_value(signing_keys, UaaEnv).
+
+-spec client_id(map()) -> binary() | undefined.
+client_id(DecodedToken) ->
+ maps:get(<<"client_id">>, DecodedToken, undefined).
+
+-spec client_id(map(), any()) -> binary() | undefined.
+client_id(DecodedToken, Default) ->
+ maps:get(<<"client_id">>, DecodedToken, Default).
+
+-spec sub(map()) -> binary() | undefined.
+sub(DecodedToken) ->
+ maps:get(<<"sub">>, DecodedToken, undefined).
+
+-spec sub(map(), any()) -> binary() | undefined.
+sub(DecodedToken, Default) ->
+ maps:get(<<"sub">>, DecodedToken, Default).
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl
new file mode 100644
index 0000000000..11d2819fb5
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwk.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(uaa_jwt_jwk).
+
+-export([make_jwk/1, from_pem/1, from_pem_file/1]).
+
+-include_lib("jose/include/jose_jwk.hrl").
+
+-spec make_jwk(binary() | map()) -> {ok, #{binary() => binary()}} | {error, term()}.
+make_jwk(Json) when is_binary(Json); is_list(Json) ->
+ JsonMap = jose:decode(iolist_to_binary(Json)),
+ make_jwk(JsonMap);
+
+make_jwk(JsonMap) when is_map(JsonMap) ->
+ case JsonMap of
+ #{<<"kty">> := <<"MAC">>, <<"value">> := _Value} ->
+ {ok, mac_to_oct(JsonMap)};
+ #{<<"kty">> := <<"RSA">>, <<"n">> := _N, <<"e">> := _E} ->
+ {ok, fix_alg(JsonMap)};
+ #{<<"kty">> := <<"oct">>, <<"k">> := _K} ->
+ {ok, fix_alg(JsonMap)};
+ #{<<"kty">> := <<"OKP">>, <<"crv">> := _Crv, <<"x">> := _X} ->
+ {ok, fix_alg(JsonMap)};
+ #{<<"kty">> := <<"EC">>} ->
+ {ok, fix_alg(JsonMap)};
+ #{<<"kty">> := Kty} when Kty == <<"oct">>;
+ Kty == <<"MAC">>;
+ Kty == <<"RSA">>;
+ Kty == <<"OKP">>;
+ Kty == <<"EC">> ->
+ {error, {fields_missing_for_kty, Kty}};
+ #{<<"kty">> := _Kty} ->
+ {error, unknown_kty};
+ #{} ->
+ {error, no_kty}
+ end.
+
+from_pem(Pem) ->
+ case jose_jwk:from_pem(Pem) of
+ #jose_jwk{} = Jwk -> {ok, Jwk};
+ Other ->
+ error_logger:warning_msg("Error parsing jwk from pem: ", [Other]),
+ {error, invalid_pem_string}
+ end.
+
+from_pem_file(FileName) ->
+ case filelib:is_file(FileName) of
+ false ->
+ {error, enoent};
+ true ->
+ case jose_jwk:from_pem_file(FileName) of
+ #jose_jwk{} = Jwk -> {ok, Jwk};
+ Other ->
+ error_logger:warning_msg("Error parsing jwk from pem file: ", [Other]),
+ {error, invalid_pem_file}
+ end
+ end.
+
+mac_to_oct(#{<<"kty">> := <<"MAC">>, <<"value">> := Value} = Key) ->
+ OktKey = maps:merge(Key,
+ #{<<"kty">> => <<"oct">>,
+ <<"k">> => base64url:encode(Value)}),
+ fix_alg(OktKey).
+
+fix_alg(#{<<"alg">> := Alg} = Key) ->
+ Algs = uaa_algs(),
+ case maps:get(Alg, Algs, undefined) of
+ undefined -> Key;
+ Val -> Key#{<<"alg">> := Val}
+ end;
+fix_alg(#{} = Key) -> Key.
+
+uaa_algs() ->
+ UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, uaa_jwt_decoder, []),
+ DefaultAlgs = #{<<"HMACSHA256">> => <<"HS256">>,
+ <<"HMACSHA384">> => <<"HS384">>,
+ <<"HMACSHA512">> => <<"HS512">>,
+ <<"SHA256withRSA">> => <<"RS256">>,
+ <<"SHA512withRSA">> => <<"RS512">>},
+ proplists:get_value(uaa_algs, UaaEnv, DefaultAlgs).
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl
new file mode 100644
index 0000000000..c2e41c5d52
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(uaa_jwt_jwt).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-export([decode/1, decode_and_verify/2, get_key_id/1]).
+
+-include_lib("jose/include/jose_jwt.hrl").
+-include_lib("jose/include/jose_jws.hrl").
+
+decode(Token) ->
+ try
+ #jose_jwt{fields = Fields} = jose_jwt:peek_payload(Token),
+ Fields
+ catch Type:Err:Stacktrace ->
+ {error, {invalid_token, Type, Err, Stacktrace}}
+ end.
+
+decode_and_verify(Jwk, Token) ->
+ case jose_jwt:verify(Jwk, Token) of
+ {true, #jose_jwt{fields = Fields}, _} -> {true, Fields};
+ {false, #jose_jwt{fields = Fields}, _} -> {false, Fields}
+ end.
+
+get_key_id(Token) ->
+ try
+ case jose_jwt:peek_protected(Token) of
+ #jose_jws{fields = #{<<"kid">> := Kid}} -> {ok, Kid};
+ #jose_jws{} -> get_default_key()
+ end
+ catch Type:Err:Stacktrace ->
+ {error, {invalid_token, Type, Err, Stacktrace}}
+ end.
+
+
+get_default_key() ->
+ UaaEnv = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []),
+ case proplists:get_value(default_key, UaaEnv, undefined) of
+ undefined -> {error, no_key};
+ Val -> {ok, Val}
+ end.
diff --git a/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl b/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl
new file mode 100644
index 0000000000..01212901c6
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/src/wildcard.erl
@@ -0,0 +1,58 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(wildcard).
+
+-export([match/2]).
+
+-spec match(Subject :: binary(), Pattern :: binary()) -> boolean().
+match(Subject, Pattern) ->
+ case parse_pattern(Pattern) of
+ [First | Rest] ->
+ FirstSize = byte_size(First),
+ case Subject of
+ % If a pattern does not start with a wildcard,
+ % do exact matching in the beginning of the subject
+ <<First:FirstSize/binary, _/binary>> ->
+ scan(Subject, Rest, FirstSize, byte_size(Subject));
+ _ -> false
+ end;
+ invalid -> false
+ end.
+
+-spec scan(Subject :: binary(), Pattern :: [binary()],
+ Pos :: integer(), Length :: integer()) -> boolean().
+% Pattern ends with a wildcard
+scan(_Subject, [<<>>], _Pos, _Length) -> true;
+% Pattern is complete. Subject scan is complete
+scan(_Subject, [], Length, Length) -> true;
+% No more pattern but subject scan is not complete
+scan(_Subject, [], Pos, Length) when Pos =/= Length -> false;
+% Subject scan is complete but there are more pattern elements
+scan(_Subject, _NonEmpty, Length, Length) -> false;
+% Skip duplicate wildcards
+scan(Subject, [<<>> | Rest], Pos, Length) ->
+ scan(Subject, Rest, Pos, Length);
+% Every other Part is after a wildcard
+scan(Subject, [Part | Rest], Pos, Length) ->
+ PartSize = byte_size(Part),
+ case binary:match(Subject, Part, [{scope, {Pos, Length - Pos}}]) of
+ nomatch -> false;
+ {PartPos, PartSize} ->
+ NewPos = PartPos + PartSize,
+ scan(Subject, Rest, NewPos, Length)
+ end.
+
+-spec parse_pattern(binary()) -> [binary()] | invalid.
+parse_pattern(Pattern) ->
+ Parts = binary:split(Pattern, <<"*">>, [global]),
+ try lists:map(fun(Part) -> cow_qs:urldecode(Part) end, Parts)
+ catch Type:Error ->
+ rabbit_log:warning("Invalid pattern ~p : ~p~n",
+ [Pattern, {Type, Error}]),
+ invalid
+ end.
diff --git a/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl
new file mode 100644
index 0000000000..ba46715db1
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/test/add_uaa_key_command_SUITE.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(add_uaa_key_command_SUITE).
+
+-compile(export_all).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("common_test/include/ct.hrl").
+
+-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand').
+
+all() ->
+ [validate_arguments,
+ validate_json_key,
+ validate_pem_key,
+ validate_pem_file_key
+ ].
+
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:run_setup_steps(Config, []).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, []).
+
+
+validate_arguments(_) ->
+ {validation_failure, too_many_args} =
+ ?COMMAND:validate([<<"one">>, <<"two">>], #{json => <<"{}">>}),
+ {validation_failure, not_enough_args} =
+ ?COMMAND:validate([], #{json => <<"{}">>}),
+ {validation_failure, {bad_argument, <<"No key specified">>}} =
+ ?COMMAND:validate([<<"foo">>], #{}),
+ {validation_failure, {bad_argument, <<"There can be only one key type">>}} =
+ ?COMMAND:validate([<<"foo">>], #{json => <<"{}">>, pem => <<"pem">>}),
+ {validation_failure, {bad_argument, <<"There can be only one key type">>}} =
+ ?COMMAND:validate([<<"foo">>], #{json => <<"{}">>, pem_file => <<"/tmp/key.pem">>}),
+ {validation_failure, {bad_argument, <<"There can be only one key type">>}} =
+ ?COMMAND:validate([<<"foo">>], #{pem => <<"pem">>, pem_file => <<"/tmp/key.pem">>}).
+
+validate_json_key(_) ->
+ {validation_failure, {bad_argument, <<"Invalid JSON">>}} =
+ ?COMMAND:validate([<<"foo">>], #{json => <<"foobar">>}),
+ {validation_failure, {bad_argument, <<"Json key should contain \"kty\" field">>}} =
+ ?COMMAND:validate([<<"foo">>], #{json => <<"{}">>}),
+ {validation_failure, {bad_argument, _}} =
+ ?COMMAND:validate([<<"foo">>], #{json => <<"{\"kty\": \"oct\"}">>}),
+ ValidJson = <<"{\"alg\":\"HS256\",\"k\":\"dG9rZW5rZXk\",\"kid\":\"token-key\",\"kty\":\"oct\",\"use\":\"sig\",\"value\":\"tokenkey\"}">>,
+ ok = ?COMMAND:validate([<<"foo">>], #{json => ValidJson}).
+
+validate_pem_key(Config) ->
+ {validation_failure, <<"Unable to read a key from the PEM string">>} =
+ ?COMMAND:validate([<<"foo">>], #{pem => <<"not a key">>}),
+ CertsDir = ?config(rmq_certsdir, Config),
+ Keyfile = filename:join([CertsDir, <<"client">>, <<"key.pem">>]),
+ {ok, Key} = file:read_file(Keyfile),
+ ok = ?COMMAND:validate([<<"foo">>], #{pem => Key}).
+
+validate_pem_file_key(Config) ->
+ {validation_failure, {bad_argument, <<"PEM file not found">>}} =
+ ?COMMAND:validate([<<"foo">>], #{pem_file => <<"non_existent_file">>}),
+ file:write_file("empty.pem", <<"">>),
+ {validation_failure, <<"Unable to read a key from the PEM file">>} =
+ ?COMMAND:validate([<<"foo">>], #{pem_file => <<"empty.pem">>}),
+ file:write_file("not_pem.pem", <<"">>),
+ {validation_failure, _} =
+ ?COMMAND:validate([<<"foo">>], #{pem_file => <<"not_pem.pem">>}),
+ CertsDir = ?config(rmq_certsdir, Config),
+ Keyfile = filename:join([CertsDir, <<"client">>, <<"key.pem">>]),
+ ok = ?COMMAND:validate([<<"foo">>], #{pem_file => Keyfile}).
+
diff --git a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl
new file mode 100644
index 0000000000..5b8ed5f837
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl
@@ -0,0 +1,99 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_auth_backend_oauth2_test_util).
+
+-compile(export_all).
+
+-define(DEFAULT_EXPIRATION_IN_SECONDS, 2).
+
+%%
+%% API
+%%
+
+sign_token_hs(Token, #{<<"kid">> := TokenKey} = Jwk) ->
+ sign_token_hs(Token, Jwk, TokenKey).
+
+sign_token_hs(Token, Jwk, TokenKey) ->
+ Jws = #{
+ <<"alg">> => <<"HS256">>,
+ <<"kid">> => TokenKey
+ },
+ sign_token(Token, Jwk, Jws).
+
+sign_token_rsa(Token, Jwk, TokenKey) ->
+ Jws = #{
+ <<"alg">> => <<"RS256">>,
+ <<"kid">> => TokenKey
+ },
+ sign_token(Token, Jwk, Jws).
+
+sign_token_no_kid(Token, Jwk) ->
+ Signed = jose_jwt:sign(Jwk, Token),
+ jose_jws:compact(Signed).
+
+sign_token(Token, Jwk, Jws) ->
+ Signed = jose_jwt:sign(Jwk, Jws, Token),
+ jose_jws:compact(Signed).
+
+fixture_jwk() ->
+ #{<<"alg">> => <<"HS256">>,
+ <<"k">> => <<"dG9rZW5rZXk">>,
+ <<"kid">> => <<"token-key">>,
+ <<"kty">> => <<"oct">>,
+ <<"use">> => <<"sig">>,
+ <<"value">> => <<"tokenkey">>}.
+
+full_permission_scopes() ->
+ [<<"rabbitmq.configure:*/*">>,
+ <<"rabbitmq.write:*/*">>,
+ <<"rabbitmq.read:*/*">>].
+
+expirable_token() ->
+ expirable_token(?DEFAULT_EXPIRATION_IN_SECONDS).
+
+expirable_token(Seconds) ->
+ TokenPayload = fixture_token(),
+ %% expiration is a timestamp with precision in seconds
+ TokenPayload#{<<"exp">> := os:system_time(seconds) + Seconds}.
+
+wait_for_token_to_expire() ->
+ timer:sleep(timer:seconds(?DEFAULT_EXPIRATION_IN_SECONDS)).
+
+wait_for_token_to_expire(DurationInMs) ->
+ timer:sleep(DurationInMs).
+
+expired_token() ->
+ expired_token_with_scopes(full_permission_scopes()).
+
+expired_token_with_scopes(Scopes) ->
+ token_with_scopes_and_expiration(Scopes, os:system_time(seconds) - 10).
+
+fixture_token_with_scopes(Scopes) ->
+ token_with_scopes_and_expiration(Scopes, os:system_time(seconds) + 10).
+
+token_with_scopes_and_expiration(Scopes, Expiration) ->
+ %% expiration is a timestamp with precision in seconds
+ #{<<"exp">> => Expiration,
+ <<"kid">> => <<"token-key">>,
+ <<"iss">> => <<"unit_test">>,
+ <<"foo">> => <<"bar">>,
+ <<"aud">> => [<<"rabbitmq">>],
+ <<"scope">> => Scopes}.
+
+fixture_token() ->
+ fixture_token([]).
+
+fixture_token(ExtraScopes) ->
+ Scopes = [<<"rabbitmq.configure:vhost/foo">>,
+ <<"rabbitmq.write:vhost/foo">>,
+ <<"rabbitmq.read:vhost/foo">>,
+ <<"rabbitmq.read:vhost/bar">>,
+ <<"rabbitmq.read:vhost/bar/%23%2Ffoo">>] ++ ExtraScopes,
+ fixture_token_with_scopes(Scopes).
+
+fixture_token_with_full_permissions() ->
+ fixture_token_with_scopes(full_permission_scopes()).
diff --git a/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl
new file mode 100644
index 0000000000..1338f28f50
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/test/scope_SUITE.erl
@@ -0,0 +1,341 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(scope_SUITE).
+
+-compile(export_all).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ permission_all,
+ permission_vhost,
+ permission_resource,
+ permission_topic
+ ].
+
+permission_all(_Config) ->
+ WildcardScopeWrite = <<"write:*/*">>,
+ WildcardScopeWriteTopic = <<"write:*/*/*">>,
+ WildcardScopeRead = <<"read:*/*">>,
+ WildcardScopeReadTopic = <<"read:*/*/*">>,
+ WildcardScopeConfigure = <<"configure:*/*">>,
+ WildcardScopeConfigureTopic = <<"configure:*/*/*">>,
+
+ ReadScopes = [WildcardScopeRead, WildcardScopeReadTopic],
+ WriteScopes = [WildcardScopeWrite, WildcardScopeWriteTopic],
+ ConfigureScopes = [WildcardScopeConfigure, WildcardScopeConfigureTopic],
+
+ ExampleVhosts = [<<"/">>, <<"foo">>, <<"*">>, <<"foo/bar">>, <<"юникод"/utf8>>],
+ ExampleResources = [<<"foo">>, <<"foo/bar">>, <<"*">>, <<"*/*">>, <<"юникод"/utf8>>],
+
+
+ [ vhost_allowed(<<"/">>, Scope) ||
+ Scope <- ReadScopes ++ WriteScopes ++ ConfigureScopes ],
+ [ vhost_allowed(<<"foo">>, Scope) ||
+ Scope <- ReadScopes ++ WriteScopes ++ ConfigureScopes ],
+ [ vhost_allowed(<<"*">>, Scope) ||
+ Scope <- ReadScopes ++ WriteScopes ++ ConfigureScopes ],
+ [ vhost_allowed(<<"foo/bar">>, Scope) ||
+ Scope <- ReadScopes ++ WriteScopes ++ ConfigureScopes ],
+ [ vhost_allowed(<<"юникод"/utf8>>, Scope) ||
+ Scope <- ReadScopes ++ WriteScopes ++ ConfigureScopes ],
+
+ [ read_allowed(Vhost, Resource, Scope) ||
+ Scope <- ReadScopes,
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ write_allowed(Vhost, Resource, Scope) ||
+ Scope <- WriteScopes,
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ configure_allowed(Vhost, Resource, Scope) ||
+ Scope <- ConfigureScopes,
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ read_refused(Vhost, Resource, Scope) ||
+ Scope <- WriteScopes ++ ConfigureScopes,
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ write_refused(Vhost, Resource, Scope) ||
+ Scope <- ReadScopes ++ ConfigureScopes,
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ configure_refused(Vhost, Resource, Scope) ||
+ Scope <- WriteScopes ++ ReadScopes,
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ].
+
+permission_vhost(_Config) ->
+ FooScopeWrite = <<"write:foo/*">>,
+ FooScopeWriteTopic = <<"write:foo/*/*">>,
+ FooScopeRead = <<"read:foo/*">>,
+ FooScopeReadTopic = <<"read:foo/*/*">>,
+ FooScopeConfigure = <<"configure:foo/*">>,
+ FooScopeConfigureTopic = <<"configure:foo/*/*">>,
+
+ ComplexVHost = <<"foo/bar/*/">>,
+ EncodedVhost = cow_qs:urlencode(ComplexVHost),
+
+ EncodedScopeWrite = <<"write:", EncodedVhost/binary, "/*">>,
+ EncodedScopeWriteTopic = <<"write:", EncodedVhost/binary, "/*/*">>,
+ EncodedScopeRead = <<"read:", EncodedVhost/binary, "/*">>,
+ EncodedScopeReadTopic = <<"read:", EncodedVhost/binary, "/*/*">>,
+ EncodedScopeConfigure = <<"configure:", EncodedVhost/binary, "/*">>,
+ EncodedScopeConfigureTopic = <<"configure:", EncodedVhost/binary, "/*/*">>,
+
+ FooReadScopes = [FooScopeRead, FooScopeReadTopic],
+ EncodedReadScopes = [EncodedScopeRead, EncodedScopeReadTopic],
+
+ FooWriteScopes = [FooScopeWrite, FooScopeWriteTopic],
+ EncodedWriteScopes = [EncodedScopeWrite, EncodedScopeWriteTopic],
+
+ FooConfigureScopes = [FooScopeConfigure, FooScopeConfigureTopic],
+ EncodedConfigureScopes = [EncodedScopeConfigure, EncodedScopeConfigureTopic],
+
+ ExampleResources = [<<"foo">>, <<"foo/bar">>, <<"*">>, <<"*/*">>, <<"юникод"/utf8>>],
+
+ Tags = [<<"tag:management">>, <<"tag:policymaker">>],
+
+ [ vhost_allowed(<<"foo">>, Scope) ||
+ Scope <- FooReadScopes ++ FooWriteScopes ++ FooConfigureScopes ],
+ [ vhost_allowed(ComplexVHost, [Scope] ++ Tags) ||
+ Scope <- EncodedReadScopes ++ EncodedWriteScopes ++ EncodedConfigureScopes ],
+
+ [ read_allowed(<<"foo">>, Resource, [Scope] ++ Tags) ||
+ Scope <- FooReadScopes,
+ Resource <- ExampleResources ],
+
+ [ write_allowed(<<"foo">>, Resource, [Scope] ++ Tags) ||
+ Scope <- FooWriteScopes,
+ Resource <- ExampleResources ],
+
+ [ configure_allowed(<<"foo">>, Resource, [Scope] ++ Tags) ||
+ Scope <- FooConfigureScopes,
+ Resource <- ExampleResources ],
+
+ [ read_refused(<<"foo">>, Resource, [Scope] ++ Tags) ||
+ Scope <- FooWriteScopes ++ FooConfigureScopes ++
+ EncodedWriteScopes ++ EncodedConfigureScopes,
+ Resource <- ExampleResources ],
+
+ [ write_refused(<<"foo">>, Resource, [Scope] ++ Tags) ||
+ Scope <- FooReadScopes ++ FooConfigureScopes ++
+ EncodedReadScopes ++ EncodedConfigureScopes,
+ Resource <- ExampleResources ],
+
+ [ configure_refused(<<"foo">>, Resource, [Scope] ++ Tags) ||
+ Scope <- FooWriteScopes ++ FooReadScopes ++
+ EncodedWriteScopes ++ EncodedReadScopes,
+ Resource <- ExampleResources ],
+
+ [ read_allowed(ComplexVHost, Resource, Scope) ||
+ Scope <- EncodedReadScopes,
+ Resource <- ExampleResources ],
+
+ [ write_allowed(ComplexVHost, Resource, Scope) ||
+ Scope <- EncodedWriteScopes,
+ Resource <- ExampleResources ],
+
+ [ configure_allowed(ComplexVHost, Resource, Scope) ||
+ Scope <- EncodedConfigureScopes,
+ Resource <- ExampleResources ],
+
+ [ read_refused(ComplexVHost, Resource, Scope) ||
+ Scope <- EncodedWriteScopes ++ EncodedConfigureScopes ++
+ FooWriteScopes ++ FooConfigureScopes,
+ Resource <- ExampleResources ],
+
+ [ write_refused(ComplexVHost, Resource, Scope) ||
+ Scope <- EncodedReadScopes ++ EncodedConfigureScopes ++
+ FooReadScopes ++ FooConfigureScopes,
+ Resource <- ExampleResources ],
+
+ [ configure_refused(ComplexVHost, Resource, Scope) ||
+ Scope <- EncodedWriteScopes ++ EncodedReadScopes ++
+ FooWriteScopes ++ FooReadScopes,
+ Resource <- ExampleResources ].
+
+permission_resource(_Config) ->
+ ComplexResource = <<"bar*/baz">>,
+ EncodedResource = cow_qs:urlencode(ComplexResource),
+
+ ScopeWrite = <<"write:*/", EncodedResource/binary>>,
+ ScopeWriteTopic = <<"write:*/", EncodedResource/binary, "/*">>,
+ ScopeRead = <<"read:*/", EncodedResource/binary>>,
+ ScopeReadTopic = <<"read:*/", EncodedResource/binary, "/*">>,
+ ScopeConfigure = <<"configure:*/", EncodedResource/binary>>,
+ ScopeConfigureTopic = <<"configure:*/", EncodedResource/binary, "/*">>,
+
+ ExampleVhosts = [<<"/">>, <<"foo">>, <<"*">>, <<"foo/bar">>, <<"юникод"/utf8>>],
+ ExampleResources = [<<"foo">>, <<"foo/bar">>, <<"*">>, <<"*/*">>, <<"юникод"/utf8>>],
+
+ %% Resource access is allowed for complex resource with any vhost
+ [ read_allowed(Vhost, ComplexResource, Scope) ||
+ Scope <- [ScopeRead, ScopeReadTopic],
+ Vhost <- ExampleVhosts ],
+ [ write_allowed(Vhost, ComplexResource, Scope) ||
+ Scope <- [ScopeWrite, ScopeWriteTopic],
+ Vhost <- ExampleVhosts ],
+ [ configure_allowed(Vhost, ComplexResource, Scope) ||
+ Scope <- [ScopeConfigure, ScopeConfigureTopic],
+ Vhost <- ExampleVhosts ],
+
+ %% Resource access is refused for any other resource
+ [ read_refused(Vhost, Resource, Scope) ||
+ Scope <- [ScopeWrite, ScopeWriteTopic,
+ ScopeRead, ScopeReadTopic,
+ ScopeConfigure, ScopeConfigureTopic],
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ write_refused(Vhost, Resource, Scope) ||
+ Scope <- [ScopeWrite, ScopeWriteTopic,
+ ScopeRead, ScopeReadTopic,
+ ScopeConfigure, ScopeConfigureTopic],
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ],
+
+ [ configure_refused(Vhost, Resource, Scope) ||
+ Scope <- [ScopeWrite, ScopeWriteTopic,
+ ScopeRead, ScopeReadTopic,
+ ScopeConfigure, ScopeConfigureTopic],
+ Vhost <- ExampleVhosts,
+ Resource <- ExampleResources ].
+
+permission_topic(_Config) ->
+ TopicWildcardRead = <<"read:*/*/*">>,
+ TopicVhostRead = <<"read:vhost/*/*">>,
+ TopicResourceRead = <<"read:*/exchange/*">>,
+ TopicRoutingKeyRead = <<"read:*/*/rout">>,
+ TopicRoutingSuffixKeyRead = <<"read:*/*/*rout">>,
+
+ ExampleVhosts = [<<"/">>, <<"foo">>, <<"*">>, <<"foo/bar">>, <<"юникод"/utf8>>],
+ ExampleResources = [<<"foo">>, <<"foo/bar">>, <<"*">>, <<"*/*">>, <<"юникод"/utf8>>],
+ ExampleRoutingKeys = [<<"rout">>, <<"norout">>, <<"some_other">>],
+
+ [ topic_read_allowed(VHost, Resource, RoutingKey, TopicWildcardRead) ||
+ VHost <- ExampleVhosts,
+ Resource <- ExampleResources,
+ RoutingKey <- ExampleRoutingKeys],
+
+ [ topic_read_allowed(<<"vhost">>, Resource, RoutingKey, TopicVhostRead) ||
+ Resource <- ExampleResources,
+ RoutingKey <- ExampleRoutingKeys],
+
+ [ topic_read_allowed(VHost, <<"exchange">>, RoutingKey, TopicResourceRead) ||
+ VHost <- ExampleVhosts,
+ RoutingKey <- ExampleRoutingKeys],
+
+ [ topic_read_allowed(VHost, Resource, <<"rout">>, TopicRoutingKeyRead) ||
+ VHost <- ExampleVhosts,
+ Resource <- ExampleResources],
+
+ [ topic_read_allowed(VHost, Resource, RoutingKey, TopicRoutingSuffixKeyRead) ||
+ VHost <- ExampleVhosts,
+ Resource <- ExampleResources,
+ RoutingKey <- [<<"rout">>, <<"norout">>, <<"sprout">>]],
+
+ [ topic_read_refused(VHost, Resource, RoutingKey, TopicVhostRead) ||
+ VHost <- ExampleVhosts,
+ Resource <- ExampleResources,
+ RoutingKey <- ExampleRoutingKeys],
+
+ [ topic_read_refused(VHost, Resource, RoutingKey, TopicResourceRead) ||
+ VHost <- ExampleVhosts,
+ Resource <- ExampleResources,
+ RoutingKey <- ExampleRoutingKeys],
+
+ [ topic_read_refused(VHost, Resource, RoutingKey, TopicRoutingKeyRead) ||
+ VHost <- ExampleVhosts,
+ Resource <- ExampleResources,
+ RoutingKey <- [<<"foo">>, <<"bar">>]].
+
+vhost_allowed(Vhost, Scopes) when is_list(Scopes) ->
+ ?assertEqual(true, rabbit_oauth2_scope:vhost_access(Vhost, Scopes));
+
+vhost_allowed(Vhost, Scope) ->
+ vhost_allowed(Vhost, [Scope]).
+
+read_allowed(Vhost, Resource, Scopes) when is_list(Scopes) ->
+ resource_perm(Vhost, Resource, Scopes, read, true);
+
+read_allowed(Vhost, Resource, Scope) ->
+ resource_perm(Vhost, Resource, Scope, read, true).
+
+read_refused(Vhost, Resource, Scopes) when is_list(Scopes) ->
+ resource_perm(Vhost, Resource, Scopes, read, false);
+
+read_refused(Vhost, Resource, Scope) ->
+ resource_perm(Vhost, Resource, Scope, read, false).
+
+write_allowed(Vhost, Resource, Scopes) when is_list(Scopes) ->
+ resource_perm(Vhost, Resource, Scopes, write, true);
+
+write_allowed(Vhost, Resource, Scope) ->
+ resource_perm(Vhost, Resource, Scope, write, true).
+
+write_refused(Vhost, Resource, Scopes) when is_list(Scopes) ->
+ resource_perm(Vhost, Resource, Scopes, write, false);
+
+write_refused(Vhost, Resource, Scope) ->
+ resource_perm(Vhost, Resource, Scope, write, false).
+
+configure_allowed(Vhost, Resource, Scopes) when is_list(Scopes) ->
+ resource_perm(Vhost, Resource, Scopes, configure, true);
+
+configure_allowed(Vhost, Resource, Scope) ->
+ resource_perm(Vhost, Resource, Scope, configure, true).
+
+configure_refused(Vhost, Resource, Scopes) when is_list(Scopes) ->
+ resource_perm(Vhost, Resource, Scopes, configure, false);
+
+configure_refused(Vhost, Resource, Scope) ->
+ resource_perm(Vhost, Resource, Scope, configure, false).
+
+
+resource_perm(Vhost, Resource, Scopes, Permission, Result) when is_list(Scopes) ->
+ [ ?assertEqual(Result, rabbit_oauth2_scope:resource_access(
+ #resource{virtual_host = Vhost,
+ kind = Kind,
+ name = Resource},
+ Permission,
+ Scopes)) || Kind <- [queue, exchange] ];
+
+resource_perm(Vhost, Resource, Scope, Permission, Result) ->
+ resource_perm(Vhost, Resource, [Scope], Permission, Result).
+
+topic_read_allowed(Vhost, Resource, RoutingKey, Scopes) when is_list(Scopes) ->
+ topic_perm(Vhost, Resource, RoutingKey, Scopes, read, true);
+
+topic_read_allowed(Vhost, Resource, RoutingKey, Scope) ->
+ topic_perm(Vhost, Resource, RoutingKey, Scope, read, true).
+
+topic_read_refused(Vhost, Resource, RoutingKey, Scopes) when is_list(Scopes) ->
+ topic_perm(Vhost, Resource, RoutingKey, Scopes, read, false);
+
+topic_read_refused(Vhost, Resource, RoutingKey, Scope) ->
+ topic_perm(Vhost, Resource, RoutingKey, Scope, read, false).
+
+topic_perm(Vhost, Resource, RoutingKey, Scopes, Permission, Result) when is_list(Scopes) ->
+ ?assertEqual(Result, rabbit_oauth2_scope:topic_access(
+ #resource{virtual_host = Vhost,
+ kind = topic,
+ name = Resource},
+ Permission,
+ #{routing_key => RoutingKey},
+ Scopes));
+
+topic_perm(Vhost, Resource, RoutingKey, Scope, Permission, Result) ->
+ topic_perm(Vhost, Resource, RoutingKey, [Scope], Permission, Result).
diff --git a/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl
new file mode 100644
index 0000000000..bb98b469a3
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl
@@ -0,0 +1,373 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(rabbit_ct_client_helpers, [close_connection/1, close_channel/1,
+ open_unmanaged_connection/4, open_unmanaged_connection/5,
+ close_connection_and_channel/2]).
+-import(rabbit_mgmt_test_util, [amqp_port/1]).
+
+all() ->
+ [
+ {group, happy_path},
+ {group, unhappy_path}
+ ].
+
+groups() ->
+ [
+ {happy_path, [], [
+ test_successful_connection_with_a_full_permission_token_and_all_defaults,
+ test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost,
+ test_successful_connection_with_simple_strings_for_aud_and_scope,
+ test_successful_connection_with_complex_claim_as_a_map,
+ test_successful_connection_with_complex_claim_as_a_list,
+ test_successful_connection_with_complex_claim_as_a_binary,
+ test_successful_connection_with_keycloak_token,
+ test_successful_token_refresh
+ ]},
+ {unhappy_path, [], [
+ test_failed_connection_with_expired_token,
+ test_failed_connection_with_a_non_token,
+ test_failed_connection_with_a_token_with_insufficient_vhost_permission,
+ test_failed_connection_with_a_token_with_insufficient_resource_permission,
+ test_failed_token_refresh_case1,
+ test_failed_token_refresh_case2
+ ]}
+ ].
+
+%%
+%% Setup and Teardown
+%%
+
+-define(UTIL_MOD, rabbit_auth_backend_oauth2_test_util).
+-define(RESOURCE_SERVER_ID, <<"rabbitmq">>).
+-define(EXTRA_SCOPES_SOURCE, <<"additional_rabbitmq_scopes">>).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config,
+ rabbit_ct_broker_helpers:setup_steps() ++ [
+ fun preconfigure_node/1,
+ fun preconfigure_token/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_broker_helpers:teardown_steps()).
+
+
+init_per_group(_Group, Config) ->
+ %% The broker is managed by {init,end}_per_testcase().
+ lists:foreach(fun(Value) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, Value)
+ end,
+ [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]),
+ Config.
+
+end_per_group(_Group, Config) ->
+ %% The broker is managed by {init,end}_per_testcase().
+ lists:foreach(fun(Value) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, Value)
+ end,
+ [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]),
+ Config.
+
+
+init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost orelse
+ Testcase =:= test_successful_token_refresh ->
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"vhost1">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config;
+
+init_per_testcase(Testcase, Config) when Testcase =:= test_failed_token_refresh_case1 orelse
+ Testcase =:= test_failed_token_refresh_case2 ->
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"vhost4">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config;
+
+init_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_complex_claim_as_a_map orelse
+ Testcase =:= test_successful_connection_with_complex_claim_as_a_list orelse
+ Testcase =:= test_successful_connection_with_complex_claim_as_a_binary ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_auth_backend_oauth2, extra_scopes_source, ?EXTRA_SCOPES_SOURCE]),
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config;
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config.
+
+end_per_testcase(Testcase, Config) when Testcase =:= test_failed_token_refresh_case1 orelse
+ Testcase =:= test_failed_token_refresh_case2 ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost4">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config;
+
+end_per_testcase(Testcase, Config) when Testcase =:= test_successful_connection_with_complex_claim_as_a_map orelse
+ Testcase =:= test_successful_connection_with_complex_claim_as_a_list orelse
+ Testcase =:= test_successful_connection_with_complex_claim_as_a_binary ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_auth_backend_oauth2, extra_scopes_source, undefined]),
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config;
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase),
+ Config.
+
+preconfigure_node(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbit, auth_backends, [rabbit_auth_backend_oauth2]]),
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ KeyConfig = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_auth_backend_oauth2, resource_server_id, ?RESOURCE_SERVER_ID]),
+
+ rabbit_ct_helpers:set_config(Config, {fixture_jwk, Jwk}).
+
+generate_valid_token(Config) ->
+ generate_valid_token(Config, ?UTIL_MOD:full_permission_scopes()).
+
+generate_valid_token(Config, Scopes) ->
+ generate_valid_token(Config, Scopes, undefined).
+
+generate_valid_token(Config, Scopes, Audience) ->
+ Jwk = case rabbit_ct_helpers:get_config(Config, fixture_jwk) of
+ undefined -> ?UTIL_MOD:fixture_jwk();
+ Value -> Value
+ end,
+ Token = case Audience of
+ undefined -> ?UTIL_MOD:fixture_token_with_scopes(Scopes);
+ DefinedAudience -> maps:put(<<"aud">>, DefinedAudience, ?UTIL_MOD:fixture_token_with_scopes(Scopes))
+ end,
+ ?UTIL_MOD:sign_token_hs(Token, Jwk).
+
+generate_valid_token_with_extra_fields(Config, ExtraFields) ->
+ Jwk = case rabbit_ct_helpers:get_config(Config, fixture_jwk) of
+ undefined -> ?UTIL_MOD:fixture_jwk();
+ Value -> Value
+ end,
+ Token = maps:merge(?UTIL_MOD:fixture_token_with_scopes([]), ExtraFields),
+ ?UTIL_MOD:sign_token_hs(Token, Jwk).
+
+generate_expired_token(Config) ->
+ generate_expired_token(Config, ?UTIL_MOD:full_permission_scopes()).
+
+generate_expired_token(Config, Scopes) ->
+ Jwk = case rabbit_ct_helpers:get_config(Config, fixture_jwk) of
+ undefined -> ?UTIL_MOD:fixture_jwk();
+ Value -> Value
+ end,
+ ?UTIL_MOD:sign_token_hs(?UTIL_MOD:expired_token_with_scopes(Scopes), Jwk).
+
+generate_expirable_token(Config, Seconds) ->
+ generate_expirable_token(Config, ?UTIL_MOD:full_permission_scopes(), Seconds).
+
+generate_expirable_token(Config, Scopes, Seconds) ->
+ Jwk = case rabbit_ct_helpers:get_config(Config, fixture_jwk) of
+ undefined -> ?UTIL_MOD:fixture_jwk();
+ Value -> Value
+ end,
+ Expiration = os:system_time(seconds) + Seconds,
+ ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_scopes_and_expiration(Scopes, Expiration), Jwk).
+
+preconfigure_token(Config) ->
+ Token = generate_valid_token(Config),
+ rabbit_ct_helpers:set_config(Config, {fixture_jwt, Token}).
+
+%%
+%% Test Cases
+%%
+
+test_successful_connection_with_a_full_permission_token_and_all_defaults(Config) ->
+ {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt),
+ Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost(Config) ->
+ {_Algo, Token} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost1/*">>,
+ <<"rabbitmq.write:vhost1/*">>,
+ <<"rabbitmq.read:vhost1/*">>]),
+ Conn = open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_connection_with_simple_strings_for_aud_and_scope(Config) ->
+ {_Algo, Token} = generate_valid_token(
+ Config,
+ <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>,
+ <<"hare rabbitmq">>
+ ),
+ Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_connection_with_complex_claim_as_a_map(Config) ->
+ {_Algo, Token} = generate_valid_token_with_extra_fields(
+ Config,
+ #{<<"additional_rabbitmq_scopes">> => #{<<"rabbitmq">> => [<<"configure:*/*">>, <<"read:*/*">>, <<"write:*/*">>]}}
+ ),
+ Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_connection_with_complex_claim_as_a_list(Config) ->
+ {_Algo, Token} = generate_valid_token_with_extra_fields(
+ Config,
+ #{<<"additional_rabbitmq_scopes">> => [<<"rabbitmq.configure:*/*">>, <<"rabbitmq.read:*/*">>, <<"rabbitmq.write:*/*">>]}
+ ),
+ Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_connection_with_complex_claim_as_a_binary(Config) ->
+ {_Algo, Token} = generate_valid_token_with_extra_fields(
+ Config,
+ #{<<"additional_rabbitmq_scopes">> => <<"rabbitmq.configure:*/* rabbitmq.read:*/*" "rabbitmq.write:*/*">>}
+ ),
+ Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_connection_with_keycloak_token(Config) ->
+ {_Algo, Token} = generate_valid_token_with_extra_fields(
+ Config,
+ #{<<"authorization">> => #{<<"permissions">> =>
+ [#{<<"rsid">> => <<"2c390fe4-02ad-41c7-98a2-cebb8c60ccf1">>,
+ <<"rsname">> => <<"allvhost">>,
+ <<"scopes">> => [<<"rabbitmq.configure:*/*">>]},
+ #{<<"rsid">> => <<"e7f12e94-4c34-43d8-b2b1-c516af644cee">>,
+ <<"rsname">> => <<"vhost1">>,
+ <<"scopes">> => [<<"rabbitmq.write:*/*">>]},
+ #{<<"rsid">> => <<"12ac3d1c-28c2-4521-8e33-0952eff10bd9">>,
+ <<"rsname">> => <<"Default Resource">>,
+ <<"scopes">> => [<<"rabbitmq.read:*/*">>]},
+ %% this one won't be used because of the resource id
+ #{<<"rsid">> => <<"bee8fac6-c3ec-11e9-aa8c-2a2ae2dbcce4">>,
+ <<"rsname">> => <<"Default Resource">>,
+ <<"scopes">> => [<<"rabbitmq-resource-read">>]}]}}
+ ),
+ Conn = open_unmanaged_connection(Config, 0, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ close_connection_and_channel(Conn, Ch).
+
+test_successful_token_refresh(Config) ->
+ Duration = 5,
+ {_Algo, Token} = generate_expirable_token(Config, [<<"rabbitmq.configure:vhost1/*">>,
+ <<"rabbitmq.write:vhost1/*">>,
+ <<"rabbitmq.read:vhost1/*">>],
+ Duration),
+ Conn = open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ {_Algo, Token2} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost1/*">>,
+ <<"rabbitmq.write:vhost1/*">>,
+ <<"rabbitmq.read:vhost1/*">>]),
+ ?UTIL_MOD:wait_for_token_to_expire(timer:seconds(Duration)),
+ ?assertEqual(ok, amqp_connection:update_secret(Conn, Token2, <<"token refresh">>)),
+
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch2, #'queue.declare'{exclusive = true}),
+
+ amqp_channel:close(Ch2),
+ close_connection_and_channel(Conn, Ch).
+
+
+test_failed_connection_with_expired_token(Config) ->
+ {_Algo, Token} = generate_expired_token(Config, [<<"rabbitmq.configure:vhost1/*">>,
+ <<"rabbitmq.write:vhost1/*">>,
+ <<"rabbitmq.read:vhost1/*">>]),
+ ?assertMatch({error, {auth_failure, _}},
+ open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token)).
+
+test_failed_connection_with_a_non_token(Config) ->
+ ?assertMatch({error, {auth_failure, _}},
+ open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, <<"a-non-token-value">>)).
+
+test_failed_connection_with_a_token_with_insufficient_vhost_permission(Config) ->
+ {_Algo, Token} = generate_valid_token(Config, [<<"rabbitmq.configure:alt-vhost/*">>,
+ <<"rabbitmq.write:alt-vhost/*">>,
+ <<"rabbitmq.read:alt-vhost/*">>]),
+ ?assertEqual({error, not_allowed},
+ open_unmanaged_connection(Config, 0, <<"off-limits-vhost">>, <<"username">>, Token)).
+
+test_failed_connection_with_a_token_with_insufficient_resource_permission(Config) ->
+ {_Algo, Token} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost2/jwt*">>,
+ <<"rabbitmq.write:vhost2/jwt*">>,
+ <<"rabbitmq.read:vhost2/jwt*">>]),
+ Conn = open_unmanaged_connection(Config, 0, <<"vhost2">>, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ ?assertExit({{shutdown, {server_initiated_close, 403, _}}, _},
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"alt-prefix.eq.1">>, exclusive = true})),
+ close_connection(Conn).
+
+test_failed_token_refresh_case1(Config) ->
+ {_Algo, Token} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost4/*">>,
+ <<"rabbitmq.write:vhost4/*">>,
+ <<"rabbitmq.read:vhost4/*">>]),
+ Conn = open_unmanaged_connection(Config, 0, <<"vhost4">>, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+
+ {_Algo, Token2} = generate_expired_token(Config, [<<"rabbitmq.configure:vhost4/*">>,
+ <<"rabbitmq.write:vhost4/*">>,
+ <<"rabbitmq.read:vhost4/*">>]),
+ %% the error is communicated asynchronously via a connection-level error
+ ?assertEqual(ok, amqp_connection:update_secret(Conn, Token2, <<"token refresh">>)),
+
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+ ?assertExit({{shutdown, {server_initiated_close, 403, _}}, _},
+ amqp_channel:call(Ch2, #'queue.declare'{queue = <<"a.q">>, exclusive = true})),
+
+ close_connection(Conn).
+
+test_failed_token_refresh_case2(Config) ->
+ {_Algo, Token} = generate_valid_token(Config, [<<"rabbitmq.configure:vhost4/*">>,
+ <<"rabbitmq.write:vhost4/*">>,
+ <<"rabbitmq.read:vhost4/*">>]),
+ Conn = open_unmanaged_connection(Config, 0, <<"vhost4">>, <<"username">>, Token),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ #'queue.declare_ok'{queue = _} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+
+ %% the error is communicated asynchronously via a connection-level error
+ ?assertEqual(ok, amqp_connection:update_secret(Conn, <<"not-a-token-^^^^5%">>, <<"token refresh">>)),
+
+ ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 530, _}}}, _},
+ amqp_connection:open_channel(Conn)),
+
+ close_connection(Conn).
diff --git a/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl
new file mode 100644
index 0000000000..f1ed34fabf
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl
@@ -0,0 +1,551 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(unit_SUITE).
+
+-compile(export_all).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ test_own_scope,
+ test_validate_payload_resource_server_id_mismatch,
+ test_validate_payload,
+ test_successful_access_with_a_token,
+ test_successful_access_with_a_token_that_has_tag_scopes,
+ test_unsuccessful_access_with_a_bogus_token,
+ test_restricted_vhost_access_with_a_valid_token,
+ test_insufficient_permissions_in_a_valid_token,
+ test_command_json,
+ test_command_pem,
+ test_command_pem_no_kid,
+ test_token_expiration,
+ test_incorrect_kid,
+ test_post_process_token_payload,
+ test_post_process_token_payload_keycloak,
+ test_post_process_token_payload_complex_claims
+ ].
+
+init_per_suite(Config) ->
+ application:load(rabbitmq_auth_backend_oauth2),
+ Env = application:get_all_env(rabbitmq_auth_backend_oauth2),
+ Config1 = rabbit_ct_helpers:set_config(Config, {env, Env}),
+ rabbit_ct_helpers:run_setup_steps(Config1, []).
+
+end_per_suite(Config) ->
+ Env = ?config(env, Config),
+ lists:foreach(
+ fun({K, V}) ->
+ application:set_env(rabbitmq_auth_backend_oauth2, K, V)
+ end,
+ Env),
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(test_post_process_token_payload_complex_claims, Config) ->
+ application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"additional_rabbitmq_scopes">>),
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ Config;
+
+init_per_testcase(_, Config) ->
+ Config.
+
+end_per_testcase(test_post_process_token_payload_complex_claims, Config) ->
+ application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, undefined),
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, undefined),
+ Config;
+end_per_testcase(_, Config) ->
+ Config.
+
+%%
+%% Test Cases
+%%
+
+-define(UTIL_MOD, rabbit_auth_backend_oauth2_test_util).
+-define(RESOURCE_SERVER_ID, <<"rabbitmq">>).
+
+test_post_process_token_payload(_) ->
+ ArgumentsExpections = [
+ {{[<<"rabbitmq">>, <<"hare">>], [<<"read">>, <<"write">>, <<"configure">>]},
+ {[<<"rabbitmq">>, <<"hare">>], [<<"read">>, <<"write">>, <<"configure">>]}},
+ {{<<"rabbitmq hare">>, <<"read write configure">>},
+ {[<<"rabbitmq">>, <<"hare">>], [<<"read">>, <<"write">>, <<"configure">>]}},
+ {{<<"rabbitmq">>, <<"read">>},
+ {[<<"rabbitmq">>], [<<"read">>]}}
+ ],
+ lists:foreach(
+ fun({{Aud, Scope}, {ExpectedAud, ExpectedScope}}) ->
+ Payload = post_process_token_payload(Aud, Scope),
+ ?assertEqual(ExpectedAud, maps:get(<<"aud">>, Payload)),
+ ?assertEqual(ExpectedScope, maps:get(<<"scope">>, Payload))
+ end, ArgumentsExpections).
+
+post_process_token_payload(Audience, Scopes) ->
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Token = maps:put(<<"aud">>, Audience, ?UTIL_MOD:fixture_token_with_scopes(Scopes)),
+ {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk),
+ {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken),
+ rabbit_auth_backend_oauth2:post_process_payload(Payload).
+
+test_post_process_token_payload_keycloak(_) ->
+ Pairs = [
+ %% common case
+ {
+ #{<<"permissions">> =>
+ [#{<<"rsid">> => <<"2c390fe4-02ad-41c7-98a2-cebb8c60ccf1">>,
+ <<"rsname">> => <<"allvhost">>,
+ <<"scopes">> => [<<"rabbitmq-resource.read:*/*">>]},
+ #{<<"rsid">> => <<"e7f12e94-4c34-43d8-b2b1-c516af644cee">>,
+ <<"rsname">> => <<"vhost1">>,
+ <<"scopes">> => [<<"rabbitmq-resource-read">>]},
+ #{<<"rsid">> => <<"12ac3d1c-28c2-4521-8e33-0952eff10bd9">>,
+ <<"rsname">> => <<"Default Resource">>}]},
+ [<<"rabbitmq-resource.read:*/*">>, <<"rabbitmq-resource-read">>]
+ },
+
+ %% one scopes field with a string instead of an array
+ {
+ #{<<"permissions">> =>
+ [#{<<"rsid">> => <<"2c390fe4-02ad-41c7-98a2-cebb8c60ccf1">>,
+ <<"rsname">> => <<"allvhost">>,
+ <<"scopes">> => <<"rabbitmq-resource.read:*/*">>},
+ #{<<"rsid">> => <<"e7f12e94-4c34-43d8-b2b1-c516af644cee">>,
+ <<"rsname">> => <<"vhost1">>,
+ <<"scopes">> => [<<"rabbitmq-resource-read">>]},
+ #{<<"rsid">> => <<"12ac3d1c-28c2-4521-8e33-0952eff10bd9">>,
+ <<"rsname">> => <<"Default Resource">>}]},
+ [<<"rabbitmq-resource.read:*/*">>, <<"rabbitmq-resource-read">>]
+ },
+
+ %% no scopes field in permissions
+ {
+ #{<<"permissions">> =>
+ [#{<<"rsid">> => <<"2c390fe4-02ad-41c7-98a2-cebb8c60ccf1">>,
+ <<"rsname">> => <<"allvhost">>},
+ #{<<"rsid">> => <<"e7f12e94-4c34-43d8-b2b1-c516af644cee">>,
+ <<"rsname">> => <<"vhost1">>},
+ #{<<"rsid">> => <<"12ac3d1c-28c2-4521-8e33-0952eff10bd9">>,
+ <<"rsname">> => <<"Default Resource">>}]},
+ []
+ },
+
+ %% no permissions
+ {
+ #{<<"permissions">> => []},
+ []
+ },
+ %% missing permissions key
+ {#{}, []}
+ ],
+ lists:foreach(
+ fun({Authorization, ExpectedScope}) ->
+ Payload = post_process_payload_with_keycloak_authorization(Authorization),
+ ?assertEqual(ExpectedScope, maps:get(<<"scope">>, Payload))
+ end, Pairs).
+
+post_process_payload_with_keycloak_authorization(Authorization) ->
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Token = maps:put(<<"authorization">>, Authorization, ?UTIL_MOD:fixture_token_with_scopes([])),
+ {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk),
+ {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken),
+ rabbit_auth_backend_oauth2:post_process_payload(Payload).
+
+
+
+test_post_process_token_payload_complex_claims(_) ->
+ Pairs = [
+ %% claims in form of binary
+ {
+ <<"rabbitmq.rabbitmq-resource.read:*/* rabbitmq.rabbitmq-resource-read">>,
+ [<<"rabbitmq.rabbitmq-resource.read:*/*">>, <<"rabbitmq.rabbitmq-resource-read">>]
+ },
+ %% claims in form of binary - empty result
+ {<<>>, []},
+ %% claims in form of list
+ {
+ [<<"rabbitmq.rabbitmq-resource.read:*/*">>,
+ <<"rabbitmq2.rabbitmq-resource-read">>],
+ [<<"rabbitmq.rabbitmq-resource.read:*/*">>, <<"rabbitmq2.rabbitmq-resource-read">>]
+ },
+ %% claims in form of list - empty result
+ {[], []},
+ %% claims are map with list content
+ {
+ #{<<"rabbitmq">> =>
+ [<<"rabbitmq-resource.read:*/*">>,
+ <<"rabbitmq-resource-read">>],
+ <<"rabbitmq3">> =>
+ [<<"rabbitmq-resource.write:*/*">>,
+ <<"rabbitmq-resource-write">>]},
+ [<<"rabbitmq.rabbitmq-resource.read:*/*">>, <<"rabbitmq.rabbitmq-resource-read">>]
+ },
+ %% claims are map with list content - empty result
+ {
+ #{<<"rabbitmq2">> =>
+ [<<"rabbitmq-resource.read:*/*">>,
+ <<"rabbitmq-resource-read">>]},
+ []
+ },
+ %% claims are map with binary content
+ {
+ #{<<"rabbitmq">> => <<"rabbitmq-resource.read:*/* rabbitmq-resource-read">>,
+ <<"rabbitmq3">> => <<"rabbitmq-resource.write:*/* rabbitmq-resource-write">>},
+ [<<"rabbitmq.rabbitmq-resource.read:*/*">>, <<"rabbitmq.rabbitmq-resource-read">>]
+ },
+ %% claims are map with binary content - empty result
+ {
+ #{<<"rabbitmq2">> => <<"rabbitmq-resource.read:*/* rabbitmq-resource-read">>}, []
+ },
+ %% claims are map with empty binary content - empty result
+ {
+ #{<<"rabbitmq">> => <<>>}, []
+ },
+ %% claims are map with empty list content - empty result
+ {
+ #{<<"rabbitmq">> => []}, []
+ },
+ %% no extra claims provided
+ {[], []},
+ %% no extra claims provided
+ {#{}, []}
+ ],
+ lists:foreach(
+ fun({Authorization, ExpectedScope}) ->
+ Payload = post_process_payload_with_complex_claim_authorization(Authorization),
+ ?assertEqual(ExpectedScope, maps:get(<<"scope">>, Payload))
+ end, Pairs).
+
+post_process_payload_with_complex_claim_authorization(Authorization) ->
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Token = maps:put(<<"additional_rabbitmq_scopes">>, Authorization, ?UTIL_MOD:fixture_token_with_scopes([])),
+ {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(Token, Jwk),
+ {true, Payload} = uaa_jwt_jwt:decode_and_verify(Jwk, EncodedToken),
+ rabbit_auth_backend_oauth2:post_process_payload(Payload).
+
+test_successful_access_with_a_token(_) ->
+ %% Generate a token with JOSE
+ %% Check authorization with the token
+ %% Check user access granted by token
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv),
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ Username = <<"username">>,
+ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:fixture_token(), Jwk),
+
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)),
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = queue,
+ name = <<"foo">>},
+ configure,
+ #{})),
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = exchange,
+ name = <<"foo">>},
+ write,
+ #{})),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = custom,
+ name = <<"bar">>},
+ read,
+ #{})),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_topic_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = topic,
+ name = <<"bar">>},
+ read,
+ #{routing_key => <<"#/foo">>})).
+
+test_successful_access_with_a_token_that_has_tag_scopes(_) ->
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv),
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ Username = <<"username">>,
+ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:fixture_token([<<"rabbitmq.tag:management">>,
+ <<"rabbitmq.tag:policymaker">>]), Jwk),
+
+ {ok, #auth_user{username = Username, tags = [management, policymaker]}} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]).
+
+test_unsuccessful_access_with_a_bogus_token(_) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+
+ Jwk0 = ?UTIL_MOD:fixture_jwk(),
+ Jwk = Jwk0#{<<"k">> => <<"bm90b2tlbmtleQ">>},
+ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv),
+
+ ?assertMatch({refused, _, _},
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, <<"not a token">>}])).
+
+test_restricted_vhost_access_with_a_valid_token(_) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:fixture_token(), Jwk),
+ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv),
+
+ %% this user can authenticate successfully and access certain vhosts
+ {ok, #auth_user{username = Username, tags = []} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]),
+
+ %% access to a different vhost
+ ?assertEqual(false, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"different vhost">>, none)).
+
+test_insufficient_permissions_in_a_valid_token(_) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:fixture_token(), Jwk),
+ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv),
+
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]),
+
+ %% access to these resources is not granted
+ ?assertEqual(false, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = queue,
+ name = <<"foo1">>},
+ configure,
+ #{})),
+ ?assertEqual(false, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = custom,
+ name = <<"bar">>},
+ write,
+ #{})),
+ ?assertEqual(false, rabbit_auth_backend_oauth2:check_topic_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = topic,
+ name = <<"bar">>},
+ read,
+ #{routing_key => <<"foo/#">>})).
+
+test_token_expiration(_) ->
+ Username = <<"username">>,
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ UaaEnv = [{signing_keys, #{<<"token-key">> => {map, Jwk}}}],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv),
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ TokenData = ?UTIL_MOD:expirable_token(),
+ Username = <<"username">>,
+ Token = ?UTIL_MOD:sign_token_hs(TokenData, Jwk),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}]),
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = queue,
+ name = <<"foo">>},
+ configure,
+ #{})),
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = exchange,
+ name = <<"foo">>},
+ write,
+ #{})),
+
+ ?UTIL_MOD:wait_for_token_to_expire(),
+ #{<<"exp">> := Exp} = TokenData,
+ ExpectedError = "Provided JWT token has expired at timestamp " ++ integer_to_list(Exp) ++ " (validated at " ++ integer_to_list(Exp) ++ ")",
+ ?assertEqual({error, ExpectedError},
+ rabbit_auth_backend_oauth2:check_resource_access(
+ User,
+ #resource{virtual_host = <<"vhost">>,
+ kind = queue,
+ name = <<"foo">>},
+ configure,
+ #{})),
+
+ ?assertMatch({refused, _, _},
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, [{password, Token}])).
+
+test_incorrect_kid(_) ->
+ AltKid = <<"other-token-key">>,
+ Username = <<"username">>,
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Jwk1 = Jwk#{<<"kid">> := AltKid},
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:fixture_token(), Jwk1),
+
+ ?assertMatch({refused, "Authentication using an OAuth 2/JWT token failed: ~p", [{error,key_not_found}]},
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token})).
+
+test_command_json(_) ->
+ Username = <<"username">>,
+ Jwk = ?UTIL_MOD:fixture_jwk(),
+ Json = rabbit_json:encode(Jwk),
+ 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run(
+ [<<"token-key">>],
+ #{node => node(), json => Json}),
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:fixture_token(), Jwk),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)).
+
+test_command_pem_file(Config) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ CertsDir = ?config(rmq_certsdir, Config),
+ Keyfile = filename:join([CertsDir, "client", "key.pem"]),
+ Jwk = jose_jwk:from_pem_file(Keyfile),
+
+ PublicJwk = jose_jwk:to_public(Jwk),
+ PublicKeyFile = filename:join([CertsDir, "client", "public.pem"]),
+ jose_jwk:to_pem_file(PublicKeyFile, PublicJwk),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run(
+ [<<"token-key">>],
+ #{node => node(), pem_file => PublicKeyFile}),
+
+ Token = ?UTIL_MOD:sign_token_rsa(?UTIL_MOD:fixture_token(), Jwk, <<"token-key">>),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)).
+
+
+test_command_pem_file_no_kid(Config) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ CertsDir = ?config(rmq_certsdir, Config),
+ Keyfile = filename:join([CertsDir, "client", "key.pem"]),
+ Jwk = jose_jwk:from_pem_file(Keyfile),
+
+ PublicJwk = jose_jwk:to_public(Jwk),
+ PublicKeyFile = filename:join([CertsDir, "client", "public.pem"]),
+ jose_jwk:to_pem_file(PublicKeyFile, PublicJwk),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run(
+ [<<"token-key">>],
+ #{node => node(), pem_file => PublicKeyFile}),
+
+ %% Set default key
+ {ok, UaaEnv0} = application:get_env(rabbitmq_auth_backend_oauth2, key_config),
+ UaaEnv1 = proplists:delete(default_key, UaaEnv0),
+ UaaEnv2 = [{default_key, <<"token-key">>} | UaaEnv1],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv2),
+
+ Token = ?UTIL_MOD:sign_token_no_kid(?UTIL_MOD:fixture_token(), Jwk),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)).
+
+test_command_pem(Config) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ CertsDir = ?config(rmq_certsdir, Config),
+ Keyfile = filename:join([CertsDir, "client", "key.pem"]),
+ Jwk = jose_jwk:from_pem_file(Keyfile),
+
+ Pem = jose_jwk:to_pem(jose_jwk:to_public(Jwk)),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run(
+ [<<"token-key">>],
+ #{node => node(), pem => Pem}),
+
+ Token = ?UTIL_MOD:sign_token_rsa(?UTIL_MOD:fixture_token(), Jwk, <<"token-key">>),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)).
+
+
+test_command_pem_no_kid(Config) ->
+ Username = <<"username">>,
+ application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>),
+ CertsDir = ?config(rmq_certsdir, Config),
+ Keyfile = filename:join([CertsDir, "client", "key.pem"]),
+ Jwk = jose_jwk:from_pem_file(Keyfile),
+
+ Pem = jose_jwk:to_pem(jose_jwk:to_public(Jwk)),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.Commands.AddUaaKeyCommand':run(
+ [<<"token-key">>],
+ #{node => node(), pem => Pem}),
+
+ %% This is the default key
+ {ok, UaaEnv0} = application:get_env(rabbitmq_auth_backend_oauth2, key_config),
+ UaaEnv1 = proplists:delete(default_key, UaaEnv0),
+ UaaEnv2 = [{default_key, <<"token-key">>} | UaaEnv1],
+ application:set_env(rabbitmq_auth_backend_oauth2, key_config, UaaEnv2),
+
+ Token = ?UTIL_MOD:sign_token_no_kid(?UTIL_MOD:fixture_token(), Jwk),
+ {ok, #auth_user{username = Username} = User} =
+ rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token}),
+
+ ?assertEqual(true, rabbit_auth_backend_oauth2:check_vhost_access(User, <<"vhost">>, none)).
+
+
+test_own_scope(_) ->
+ Examples = [
+ {<<"foo">>, [<<"foo">>, <<"foo.bar">>, <<"bar.foo">>,
+ <<"one.two">>, <<"foobar">>, <<"foo.other.third">>],
+ [<<"bar">>, <<"other.third">>]},
+ {<<"foo">>, [], []},
+ {<<"foo">>, [<<"foo">>, <<"other.foo.bar">>], []},
+ {<<"">>, [<<"foo">>, <<"bar">>], [<<"foo">>, <<"bar">>]}
+ ],
+ lists:map(
+ fun({ResId, Src, Dest}) ->
+ Dest = rabbit_auth_backend_oauth2:filter_scopes(Src, ResId)
+ end,
+ Examples).
+
+test_validate_payload_resource_server_id_mismatch(_) ->
+ NoKnownResourceServerId = #{<<"aud">> => [<<"foo">>, <<"bar">>],
+ <<"scope">> => [<<"foo">>, <<"foo.bar">>,
+ <<"bar.foo">>, <<"one.two">>,
+ <<"foobar">>, <<"foo.other.third">>]},
+ EmptyAud = #{<<"aud">> => [],
+ <<"scope">> => [<<"foo.bar">>, <<"bar.foo">>]},
+
+ ?assertEqual({refused, {invalid_aud, {resource_id_not_found_in_aud, ?RESOURCE_SERVER_ID,
+ [<<"foo">>,<<"bar">>]}}},
+ rabbit_auth_backend_oauth2:validate_payload(NoKnownResourceServerId, ?RESOURCE_SERVER_ID)),
+
+ ?assertEqual({refused, {invalid_aud, {resource_id_not_found_in_aud, ?RESOURCE_SERVER_ID, []}}},
+ rabbit_auth_backend_oauth2:validate_payload(EmptyAud, ?RESOURCE_SERVER_ID)).
+
+test_validate_payload(_) ->
+ KnownResourceServerId = #{<<"aud">> => [?RESOURCE_SERVER_ID],
+ <<"scope">> => [<<"foo">>, <<"rabbitmq.bar">>,
+ <<"bar.foo">>, <<"one.two">>,
+ <<"foobar">>, <<"rabbitmq.other.third">>]},
+ ?assertEqual({ok, #{<<"aud">> => [?RESOURCE_SERVER_ID],
+ <<"scope">> => [<<"bar">>, <<"other.third">>]}},
+ rabbit_auth_backend_oauth2:validate_payload(KnownResourceServerId, ?RESOURCE_SERVER_ID)).
diff --git a/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl
new file mode 100644
index 0000000000..6996a95f80
--- /dev/null
+++ b/deps/rabbitmq_auth_backend_oauth2/test/wildcard_match_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(wildcard_match_SUITE).
+
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ exact_match,
+ prefix_match,
+ suffix_match,
+ mixed_match
+ ].
+
+exact_match(_Config) ->
+ ?assertEqual(true, wildcard:match(<<"value">>, <<"value">>)),
+ ?assertEqual(true, wildcard:match(<<"string with / special % characters">>,
+ <<"string%20with%20%2F%20special%20%25%20characters">>)),
+ ?assertEqual(true, wildcard:match(<<"pattern with plus spaces">>,
+ <<"pattern+with++plus++spaces">>)),
+ ?assertEqual(true, wildcard:match(<<"pattern with plus spaces and * wildcard encoded">>,
+ <<"pattern+with+plus+spaces+and+%2A+wildcard+encoded">>)),
+ ?assertEqual(true, wildcard:match(<<"case with * special / characters">>,
+ <<"case+with+%2a+special+%2f+characters">>)),
+
+ ?assertEqual(false, wildcard:match(<<"casesensitive">>, <<"CaseSensitive">>)),
+
+ ?assertEqual(false, wildcard:match(<<"foo">>, <<"fo">>)),
+ ?assertEqual(false, wildcard:match(<<"foo">>, <<"fooo">>)),
+
+ %% Special characters and spaces should be %-encoded.
+ ?assertEqual(false, wildcard:match(<<"string with unescaped % character">>,
+ <<"string with unescaped % character">>)),
+
+ %% Here a wildcard is matched by another wildcard, so to match exactly the '*' character
+ %% we need to %-encode it
+ ?assertEqual(true, wildcard:match(<<"wildcard * is matched by wildcard">>,
+ <<"wildcard * is matched by wildcard">>)),
+
+ ?assertEqual(true, wildcard:match(<<"wildcard * and anything else is matched by wildcard">>,
+ <<"wildcard * is matched by wildcard">>)),
+
+ ?assertEqual(true, wildcard:match(<<"wildcard * is matched by urlencoded">>,
+ <<"wildcard+%2a+is+matched+by+urlencoded">>)),
+ ?assertEqual(false, wildcard:match(<<"wildcard * and extra content is not matched by urlencoded">>,
+ <<"wildcard+%2a+is+not+matched+by+urlencoded">>)),
+
+ %% Spaces do not interfere with parsing
+ ?assertEqual(true, wildcard:match(<<"pattern with spaces">>,
+ <<"pattern with spaces">>)).
+
+suffix_match(_Config) ->
+ ?assertEqual(true, wildcard:match(<<"foo">>, <<"*oo">>)),
+ %% Empty prefix
+ ?assertEqual(true, wildcard:match(<<"foo">>, <<"*foo">>)),
+ %% Anything goes
+ ?assertEqual(true, wildcard:match(<<"foo">>, <<"*">>)),
+ ?assertEqual(true, wildcard:match(<<"line * with * special * characters">>, <<"*+characters">>)),
+ ?assertEqual(true, wildcard:match(<<"line * with * special * characters">>, <<"*special+%2A+characters">>)),
+
+ ?assertEqual(false, wildcard:match(<<"foo">>, <<"*r">>)),
+ ?assertEqual(false, wildcard:match(<<"foo">>, <<"*foobar">>)).
+
+prefix_match(_Config) ->
+ ?assertEqual(true, wildcard:match(<<"foo">>, <<"fo*">>)),
+ %% Empty suffix
+ ?assertEqual(true, wildcard:match(<<"foo">>, <<"foo*">>)),
+ ?assertEqual(true, wildcard:match(<<"line * with * special * characters">>, <<"line+*">>)),
+ ?assertEqual(true, wildcard:match(<<"line * with * special * characters">>, <<"line+%2a*">>)),
+
+ %% Wildcard matches '*' character
+ ?assertEqual(true, wildcard:match(<<"string with unescaped *">>,
+ <<"string+with+unescaped+*">>)),
+
+ ?assertEqual(false, wildcard:match(<<"foo">>, <<"b*">>)),
+ ?assertEqual(false, wildcard:match(<<"foo">>, <<"barfoo*">>)).
+
+mixed_match(_Config) ->
+ %% Empty wildcards
+ ?assertEqual(true, wildcard:match(<<"string">>, <<"*str*ing*">>)),
+ ?assertEqual(true, wildcard:match(<<"str*ing">>, <<"*str*ing*">>)),
+
+ ?assertEqual(true, wildcard:match(<<"some long string">>, <<"some*string">>)),
+ ?assertEqual(true, wildcard:match(<<"some long string">>, <<"some*long*string">>)),
+ ?assertEqual(true, wildcard:match(<<"some long string">>, <<"*some*string*">>)),
+ ?assertEqual(true, wildcard:match(<<"some long string">>, <<"*long*">>)),
+ %% Matches two spaces (3 or more words)
+ ?assertEqual(true, wildcard:match(<<"some long string">>, <<"*+*+*">>)),
+
+ ?assertEqual(false, wildcard:match(<<"some string">>, <<"*+*+*">>)),
+ ?assertEqual(false, wildcard:match(<<"some long string">>, <<"some*other*string">>)),
+
+ ?assertEqual(true, wildcard:match(<<"some long string">>, <<"s*e*str*">>)),
+ %% The z doesn't appear in the subject
+ ?assertEqual(false, wildcard:match(<<"some long string">>, <<"s*z*str*">>)),
+
+ ?assertEqual(false, wildcard:match(<<"string">>, <<"*some*">>)),
+ ?assertEqual(false, wildcard:match(<<"string">>, <<"*some*string*">>)).
diff --git a/deps/rabbitmq_auth_mechanism_ssl/.gitignore b/deps/rabbitmq_auth_mechanism_ssl/.gitignore
new file mode 100644
index 0000000000..2e68166f1c
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_auth_mechanism_ssl.d
diff --git a/deps/rabbitmq_auth_mechanism_ssl/.travis.yml b/deps/rabbitmq_auth_mechanism_ssl/.travis.yml
new file mode 100644
index 0000000000..b18a51db01
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: EXMOCUfRfFJuzP+HoSZoFnNyMQ9+IOn3adb0YwoEjxeHK8LHDuQzANqRRqZ+Bln/qHbuNY8U6xqcARWhBYcNGnAsRED6IR7pXqUUZtbEP5kUQq1ghM5sAqYYaFjBVMYuJMQL1IltrPw8kenyJp7dn3ek07+KM0ZV1aYklL1suUI=
+ - secure: SlyycFG1JSNH2BBy3doWnZXyY4pRB3IfzLag8EJyDZVvkHND6svKwP+MSdAeeKOx9+vReRmzacMBtqoFIang3twycR/QIDhC5sRlolWKBQqXEMEbZRGqZJBKH4g+2IJqQVfoSoqAAZsbHusyQ9UhIcqD2KvN5P6Em4WedBRlWRY=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md b/deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_auth_mechanism_ssl/CONTRIBUTING.md b/deps/rabbitmq_auth_mechanism_ssl/CONTRIBUTING.md
new file mode 100644
index 0000000000..b50bd82900
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/CONTRIBUTING.md
@@ -0,0 +1,103 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+ make tests
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_auth_mechanism_ssl/LICENSE b/deps/rabbitmq_auth_mechanism_ssl/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_auth_mechanism_ssl/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_auth_mechanism_ssl/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_auth_mechanism_ssl/Makefile b/deps/rabbitmq_auth_mechanism_ssl/Makefile
new file mode 100644
index 0000000000..11a44a8d7f
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/Makefile
@@ -0,0 +1,27 @@
+PROJECT = rabbitmq_auth_mechanism_ssl
+PROJECT_DESCRIPTION = RabbitMQ SSL authentication (SASL EXTERNAL)
+PROJECT_MOD = rabbit_auth_mechanism_ssl_app
+
+define PROJECT_ENV
+[
+ {name_from, distinguished_name}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_auth_mechanism_ssl/README.md b/deps/rabbitmq_auth_mechanism_ssl/README.md
new file mode 100644
index 0000000000..47f855ba56
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/README.md
@@ -0,0 +1,86 @@
+# x509 (TLS/SSL) certificate Authentication Mechanism for RabbitMQ
+
+This plugin allows RabbitMQ clients authenticate using x509 certificates
+and TLS (PKI) [peer verification mechanism](https://tools.ietf.org/html/rfc5280#section-6)
+instead of credentials (username/password pairs).
+
+
+## How it Works
+
+When a client connects and performs TLS upgrade,
+the username is obtained from the client's
+TLS (x509) certificate. The user's password is not checked.
+
+In order to use this mechanism the client must connect with TLS enabled, and
+present a client certificate.
+
+
+## Usage
+
+This mechanism must also be enabled in RabbitMQ's configuration file,
+see [Authentication Mechanisms](https://www.rabbitmq.com/authentication.html) and
+[Configuration](https://www.rabbitmq.com/configure.html) guides for
+more details.
+
+A couple of examples:
+
+``` ini
+auth_mechanisms.1 = PLAIN
+auth_mechanisms.1 = AMQPLAIN
+auth_mechanisms.1 = EXTERNAL
+```
+
+to allow this mechanism in addition to the defaults, or:
+
+``` ini
+auth_mechanisms.1 = EXTERNAL
+```
+
+to allow only this mechanism and prohibit connections that use
+username and passwords.
+
+For safety the server must be configured with the SSL option 'verify'
+set to 'verify_peer', to ensure that if an SSL client presents a
+certificate, it gets verified.
+
+### Username Extraction from Certificate
+
+#### Distinguished Name
+
+By default this will set the username to an [RFC 4514](https://tools.ietf.org/html/rfc4514)-ish string form of
+the certificate's subject's Distinguished Name, similar to that
+produced by OpenSSL's "-nameopt [RFC 2253"](https://tools.ietf.org/html/rfc2253) option.
+
+You can obtain this string form from a certificate with a command like:
+
+```
+openssl x509 -in path/to/cert.pem -nameopt RFC2253 -subject -noout
+```
+
+or from an existing amqps connection with commands like:
+
+``` bash
+rabbitmqctl list_connections peer_cert_subject
+```
+
+#### Common Name
+
+To use the Common Name instead, set `rabbit.ssl_cert_login_from` to `common_name`:
+
+``` ini
+auth_mechanisms.1 = EXTERNAL
+
+ssl_cert_login_from = common_name
+```
+
+Note that the authenticated user will then be looked up in the
+[configured authentication / authorisation backend(s)](https://www.rabbitmq.com/access-control.html). This will be
+the internal node database by default but could include other
+backends if so configured.
+
+
+## Copyright & License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the same license as RabbitMQ.
diff --git a/deps/rabbitmq_auth_mechanism_ssl/erlang.mk b/deps/rabbitmq_auth_mechanism_ssl/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_auth_mechanism_ssl/rabbitmq-components.mk b/deps/rabbitmq_auth_mechanism_ssl/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl
new file mode 100644
index 0000000000..335dbbc2c5
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+-module(rabbit_auth_mechanism_ssl).
+
+-behaviour(rabbit_auth_mechanism).
+
+-export([description/0, should_offer/1, init/1, handle_response/2]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("public_key/include/public_key.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "external TLS peer verification-based authentication mechanism"},
+ {mfa, {rabbit_registry, register,
+ [auth_mechanism, <<"EXTERNAL">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready},
+ {cleanup, {rabbit_registry, unregister,
+ [auth_mechanism, <<"EXTERNAL">>]}}]}).
+
+-record(state, {username = undefined}).
+
+description() ->
+ [{description, <<"TLS peer verification-based authentication plugin. Used in combination with the EXTERNAL SASL mechanism.">>}].
+
+should_offer(Sock) ->
+ %% SASL EXTERNAL. SASL says EXTERNAL means "use credentials
+ %% established by means external to the mechanism". The username
+ %% is extracted from the the client certificate.
+ case rabbit_net:peercert(Sock) of
+ nossl -> false;
+ %% We offer EXTERNAL even if there is no peercert since that leads to
+ %% a more comprehensible error message: authentication is refused
+ %% below with "no peer certificate" rather than have the client fail
+ %% to negotiate an authentication mechanism.
+ {error, no_peercert} -> true;
+ {ok, _} -> true
+ end.
+
+init(Sock) ->
+ Username = case rabbit_net:peercert(Sock) of
+ {ok, C} ->
+ case rabbit_ssl:peer_cert_auth_name(C) of
+ unsafe -> {refused, none, "TLS configuration is unsafe", []};
+ not_found -> {refused, none, "no name found", []};
+ Name -> rabbit_data_coercion:to_binary(Name)
+ end;
+ {error, no_peercert} ->
+ {refused, none, "connection peer presented no TLS (x.509) certificate", []};
+ nossl ->
+ {refused, none, "not a TLS-enabled connection", []}
+ end,
+ rabbit_log:debug("auth mechanism TLS extracted username '~s' from peer certificate", [Username]),
+ #state{username = Username}.
+
+handle_response(_Response, #state{username = Username}) ->
+ case Username of
+ {refused, _, _, _} = E ->
+ E;
+ _ ->
+ rabbit_access_control:check_user_login(Username, [])
+ end.
diff --git a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl
new file mode 100644
index 0000000000..6b3af6acc0
--- /dev/null
+++ b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl_app.erl
@@ -0,0 +1,26 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism_ssl_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+
+-behaviour(supervisor).
+-export([init/1]).
+
+start(normal, []) ->
+ supervisor:start_link({local,?MODULE},?MODULE,[]).
+
+stop(_State) ->
+ ok.
+
+init([]) ->
+ {ok, {{one_for_one,3,10},[]}}.
diff --git a/deps/rabbitmq_aws/.editorconfig b/deps/rabbitmq_aws/.editorconfig
new file mode 100644
index 0000000000..235cf6854e
--- /dev/null
+++ b/deps/rabbitmq_aws/.editorconfig
@@ -0,0 +1,15 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+
+[Makefile]
+indent_style = tab
+
+# 2 space indentation
+[{*.erl, *.hrl, *.md}]
+indent_style = space
+indent_size = 2
diff --git a/deps/rabbitmq_aws/.gitignore b/deps/rabbitmq_aws/.gitignore
new file mode 100644
index 0000000000..9b81a19f05
--- /dev/null
+++ b/deps/rabbitmq_aws/.gitignore
@@ -0,0 +1,30 @@
+.rebar3
+_*
+.eunit
+*.o
+*.beam
+*.plt
+*.swp
+*.swo
+.erlang.cookie
+ebin
+log
+erl_crash.dump
+.rebar
+logs
+_build
+.idea
+cobertura.xml
+rebar.lock
+.erlang.mk
+deps
+rabbitmq_aws.d
+test/*xml
+cover
+/escript/
+/escript.lock
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+xrefr
diff --git a/deps/rabbitmq_aws/.travis.yml b/deps/rabbitmq_aws/.travis.yml
new file mode 100644
index 0000000000..06864faf4f
--- /dev/null
+++ b/deps/rabbitmq_aws/.travis.yml
@@ -0,0 +1,59 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_aws/LICENSE b/deps/rabbitmq_aws/LICENSE
new file mode 100644
index 0000000000..f520da9c58
--- /dev/null
+++ b/deps/rabbitmq_aws/LICENSE
@@ -0,0 +1,6 @@
+rabbitmq-aws is licensed under the BSD 3-Clause License, see LICENSE-rabbitmq_aws.
+
+This package makes use of the following third party libraries:
+
+ - httpc-aws - https://github.com/gmr/httpc-aws - BSD 3-Clause License, see LICENSE-httpc_aws
+ - erlcloud - https://github.com/erlcloud/erlcloud - Simplified BSD License, see LICENSE-erlcloud.
diff --git a/deps/rabbitmq_aws/LICENSE-erlcloud b/deps/rabbitmq_aws/LICENSE-erlcloud
new file mode 100644
index 0000000000..113cc7d18e
--- /dev/null
+++ b/deps/rabbitmq_aws/LICENSE-erlcloud
@@ -0,0 +1,22 @@
+Copyright (C) 2010 Brian Buchanan. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/deps/rabbitmq_aws/LICENSE-httpc_aws b/deps/rabbitmq_aws/LICENSE-httpc_aws
new file mode 100644
index 0000000000..b892d5b6c8
--- /dev/null
+++ b/deps/rabbitmq_aws/LICENSE-httpc_aws
@@ -0,0 +1,29 @@
+Copyright (c) 2016, Gavin M. Roy <gavinmroy@gmail.com>.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/rabbitmq_aws/LICENSE-rabbitmq_aws b/deps/rabbitmq_aws/LICENSE-rabbitmq_aws
new file mode 100644
index 0000000000..b892d5b6c8
--- /dev/null
+++ b/deps/rabbitmq_aws/LICENSE-rabbitmq_aws
@@ -0,0 +1,29 @@
+Copyright (c) 2016, Gavin M. Roy <gavinmroy@gmail.com>.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/rabbitmq_aws/Makefile b/deps/rabbitmq_aws/Makefile
new file mode 100644
index 0000000000..8da412b4f7
--- /dev/null
+++ b/deps/rabbitmq_aws/Makefile
@@ -0,0 +1,22 @@
+PROJECT = rabbitmq_aws
+PROJECT_DESCRIPTION = A minimalistic AWS API interface used by rabbitmq-autocluster (3.6.x) and other RabbitMQ plugins
+PROJECT_MOD = rabbitmq_aws_app
+PROJECT_REGISTERED = rabbitmq_aws
+
+define PROJECT_ENV
+[]
+endef
+
+LOCAL_DEPS = crypto inets ssl xmerl
+BUILD_DEPS = rabbit_common
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+TEST_DEPS = meck
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_aws/README.md b/deps/rabbitmq_aws/README.md
new file mode 100644
index 0000000000..9d87ab6bcd
--- /dev/null
+++ b/deps/rabbitmq_aws/README.md
@@ -0,0 +1,104 @@
+# rabbitmq-aws
+
+A fork of [gmr/httpc-aws](https://github.com/gmr/httpc-aws) for use in building RabbitMQ plugins that interact with Amazon Web Services APIs.
+
+[![Build Status](https://travis-ci.org/gmr/rabbitmq-aws.svg?branch=master)](https://travis-ci.org/gmr/rabbitmq-aws)
+
+## Supported Erlang Versions
+
+[Same as RabbitMQ](http://www.rabbitmq.com/which-erlang.html)
+
+## Configuration
+
+Configuration for *rabbitmq-aws* is can be provided in multiple ways. It is designed
+to behave similarly to the [AWS Command Line Interface](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html)
+with respect to providing region and configuration information. Additionally it
+has two methods, ``rabbitmq_aws:set_region/1`` and ``rabbitmq_aws:set_credentials/2``
+to allow for application specific configuration, bypassing the automatic configuration
+behavior.
+
+### Configuration Precedence
+
+The configuration values have the following precedence:
+
+ - Explicitly configured via API
+ - Environment variables
+ - Configuration file
+ - EC2 Instance Metadata Service where applicable
+
+### Credentials Precedence
+
+The credentials values have the following precedence:
+
+ - Explicitly configured via API
+ - Environment variables
+ - Credentials file
+ - EC2 Instance Metadata Service
+
+### Environment Variables
+
+As with the AWS CLI, the following environment variables can be used to provide
+configuration or to impact configuration behavior:
+
+ - ``AWS_DEFAULT_PROFILE``
+ - ``AWS_DEFAULT_REGION``
+ - ``AWS_CONFIG_FILE``
+ - ``AWS_SHARED_CREDENTIALS_FILE``
+ - ``AWS_ACCESS_KEY_ID``
+ - ``AWS_SECRET_ACCESS_KEY``
+
+## API Functions
+
+ Method | Description
+ ---------------------------------------|--------------------------------------------------------------------------------------------
+ ``rabbitmq_aws:set_region/1`` | Manually specify the AWS region to make requests to.
+ ``rabbitmq_aws:set_credentials/2`` | Manually specify the request credentials to use.
+ ``rabbitmq_aws:refresh_credentials/0`` | Refresh the credentials from the environment, filesystem, or EC2 Instance Metadata service.
+ ``rabbitmq_aws:get/2`` | Perform a GET request to the API specifying the service and request path.
+ ``rabbitmq_aws:get/3`` | Perform a GET request specifying the service, path, and headers.
+ ``rabbitmq_aws:post/4`` | Perform a POST request specifying the service, path, headers, and body.
+ ``rabbitmq_aws:request/5`` | Perform a request specifying the service, method, path, headers, and body.
+ ``rabbitmq_aws:request/6`` | Perform a request specifying the service, method, path, headers, body, and ``httpc:http_options().``
+ ``rabbitmq_aws:request/7`` | Perform a request specifying the service, method, path, headers, body, ``httpc:http_options()``, and override the API endpoint.
+
+
+## Example Usage
+
+The following example assumes that you either have locally configured credentials or that
+you're using the AWS Instance Metadata service for credentials:
+
+```erlang
+application:start(rabbitmq_aws).
+{ok, {Headers, Response}} = rabbitmq_aws:get("ec2","/?Action=DescribeTags&Version=2015-10-01").
+```
+
+To configure credentials, invoke ``rabbitmq_aws:set_credentials/2``:
+
+```erlang
+application:start(rabbitmq_aws).
+
+rabbitmq_aws:set_credentials("AKIDEXAMPLE", "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"),
+
+RequestHeaders = [{"Content-Type", "application/x-amz-json-1.0"},
+ {"X-Amz-Target", "DynamoDB_20120810.ListTables"}],
+
+{ok, {Headers, Response}} = rabbitmq_aws:post("dynamodb", "/",
+ "{\"Limit\": 20}",
+ RequestHeaders).
+```
+
+## Build
+
+```bash
+make
+```
+
+## Test
+
+```bash
+make tests
+```
+
+## License
+
+BSD 3-Clause License
diff --git a/deps/rabbitmq_aws/erlang.mk b/deps/rabbitmq_aws/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_aws/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_aws/include/rabbitmq_aws.hrl b/deps/rabbitmq_aws/include/rabbitmq_aws.hrl
new file mode 100644
index 0000000000..5c5a478a59
--- /dev/null
+++ b/deps/rabbitmq_aws/include/rabbitmq_aws.hrl
@@ -0,0 +1,115 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @copyright 2016-2020 VMware, Inc. or its affiliates.
+%% @headerfile
+%% @private
+%% @doc rabbitmq_aws client library constants and records
+%% @end
+%% ====================================================================
+
+-define(MIME_AWS_JSON, "application/x-amz-json-1.0").
+-define(SCHEME, https).
+
+-define(DEFAULT_REGION, "us-east-1").
+-define(DEFAULT_PROFILE, "default").
+
+-define(INSTANCE_AZ, "placement/availability-zone").
+-define(INSTANCE_HOST, "169.254.169.254").
+
+% rabbitmq/rabbitmq-peer-discovery-aws#25
+
+% Note: this timeout must not be greater than the default
+% gen_server:call timeout of 5000ms. INSTANCE_HOST is
+% a pseudo-ip that should have good performance, and the
+% data should be returned quickly. Note that `timeout`,
+% when set, is used as the connect and then request timeout
+% by `httpc`
+-define(DEFAULT_HTTP_TIMEOUT, 2250).
+
+-define(INSTANCE_CREDENTIALS, "iam/security-credentials").
+-define(INSTANCE_METADATA_BASE, "latest/meta-data").
+
+-type access_key() :: nonempty_string().
+-type secret_access_key() :: nonempty_string().
+-type expiration() :: calendar:datetime() | undefined.
+-type security_token() :: nonempty_string() | undefined.
+-type region() :: nonempty_string() | undefined.
+-type path() :: ssl:path().
+
+-type sc_ok() :: {ok, access_key(), secret_access_key(), expiration(), security_token()}.
+-type sc_error() :: {error, Reason :: atom()}.
+-type security_credentials() :: sc_ok() | sc_error().
+
+-record(state, {access_key :: access_key() | undefined,
+ secret_access_key :: secret_access_key() | undefined,
+ expiration :: expiration() | undefined,
+ security_token :: security_token() | undefined,
+ region :: region() | undefined,
+ error :: atom() | string() | undefined}).
+-type state() :: #state{}.
+
+-type scheme() :: atom().
+-type username() :: string().
+-type password() :: string().
+-type host() :: string().
+-type tcp_port() :: integer().
+-type query_args() :: [tuple() | string()].
+-type fragment() :: string().
+
+-type userinfo() :: {undefined | username(),
+ undefined | password()}.
+
+-type authority() :: {undefined | userinfo(),
+ host(),
+ undefined | tcp_port()}.
+-record(uri, {scheme :: undefined | scheme(),
+ authority :: authority(),
+ path :: undefined | path(),
+ query :: undefined | query_args(),
+ fragment :: undefined | fragment()}).
+
+-type method() :: head | get | put | post | trace | options | delete | patch.
+-type http_version() :: string().
+-type status_code() :: integer().
+-type reason_phrase() :: string().
+-type status_line() :: {http_version(), status_code(), reason_phrase()}.
+-type field() :: string().
+-type value() :: string().
+-type header() :: {Field :: field(), Value :: value()}.
+-type headers() :: [header()].
+-type body() :: string() | binary().
+
+-type ssl_options() :: [ssl:ssl_option()].
+
+-type http_option() :: {timeout, timeout()} |
+ {connect_timeout, timeout()} |
+ {ssl, ssl_options()} |
+ {essl, ssl_options()} |
+ {autoredirect, boolean()} |
+ {proxy_auth, {User :: string(), Password :: string()}} |
+ {version, http_version()} |
+ {relaxed, boolean()} |
+ {url_encode, boolean()}.
+-type http_options() :: [http_option()].
+
+
+-record(request, {access_key :: access_key(),
+ secret_access_key :: secret_access_key(),
+ security_token :: security_token(),
+ service :: string(),
+ region = "us-east-1" :: string(),
+ method = get :: method(),
+ headers = [] :: headers(),
+ uri :: string(),
+ body = "" :: body()}).
+-type request() :: #request{}.
+
+-type httpc_result() :: {ok, {status_line(), headers(), body()}} |
+ {ok, {status_code(), body()}} |
+ {error, term()}.
+
+-type result_ok() :: {ok, {ResponseHeaders :: headers(), Response :: list()}}.
+-type result_error() :: {error, Message :: reason_phrase(), {ResponseHeaders :: headers(), Response :: list()} | undefined} |
+ {error, {credentials, Reason :: string()}}.
+-type result() :: result_ok() | result_error().
diff --git a/deps/rabbitmq_aws/rabbitmq-components.mk b/deps/rabbitmq_aws/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_aws/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_aws/rabbitmq_aws.iml b/deps/rabbitmq_aws/rabbitmq_aws.iml
new file mode 100644
index 0000000000..3850ccd77d
--- /dev/null
+++ b/deps/rabbitmq_aws/rabbitmq_aws.iml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="ERLANG_MODULE" version="4">
+ <component name="FacetManager">
+ <facet type="erlang" name="Erlang">
+ <configuration />
+ </facet>
+ </component>
+ <component name="NewModuleRootManager" inherit-compiler-output="true">
+ <exclude-output />
+ <content url="file://$MODULE_DIR$">
+ <sourceFolder url="file://$MODULE_DIR$/include" type="erlang-include" />
+ <sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
+ <sourceFolder url="file://$MODULE_DIR$/test" isTestSource="true" />
+ <excludeFolder url="file://$MODULE_DIR$/.erlang.mk" />
+ <excludeFolder url="file://$MODULE_DIR$/.eunit" />
+ <excludeFolder url="file://$MODULE_DIR$/.idea" />
+ <excludeFolder url="file://$MODULE_DIR$/.rebar" />
+ <excludeFolder url="file://$MODULE_DIR$/_build" />
+ <excludeFolder url="file://$MODULE_DIR$/bin" />
+ </content>
+ <orderEntry type="inheritedJdk" />
+ <orderEntry type="sourceFolder" forTests="false" />
+ <orderEntry type="module" module-name="jsx" />
+ </component>
+</module> \ No newline at end of file
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws.erl b/deps/rabbitmq_aws/src/rabbitmq_aws.erl
new file mode 100644
index 0000000000..4a152f3b21
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws.erl
@@ -0,0 +1,472 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @doc rabbitmq_aws client library
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws).
+
+-behavior(gen_server).
+
+%% API exports
+-export([get/2, get/3,
+ post/4,
+ refresh_credentials/0,
+ request/5, request/6, request/7,
+ set_credentials/2,
+ has_credentials/0,
+ set_region/1]).
+
+%% gen-server exports
+-export([start_link/0,
+ init/1,
+ terminate/2,
+ code_change/3,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2]).
+
+%% Export all for unit tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-include("rabbitmq_aws.hrl").
+
+%%====================================================================
+%% exported wrapper functions
+%%====================================================================
+
+-spec get(Service :: string(),
+ Path :: path()) -> result().
+%% @doc Perform a HTTP GET request to the AWS API for the specified service. The
+%% response will automatically be decoded if it is either in JSON or XML
+%% format.
+%% @end
+get(Service, Path) ->
+ get(Service, Path, []).
+
+
+-spec get(Service :: string(),
+ Path :: path(),
+ Headers :: headers()) -> result().
+%% @doc Perform a HTTP GET request to the AWS API for the specified service. The
+%% response will automatically be decoded if it is either in JSON or XML
+%% format.
+%% @end
+get(Service, Path, Headers) ->
+ request(Service, get, Path, "", Headers).
+
+
+-spec post(Service :: string(),
+ Path :: path(),
+ Body :: body(),
+ Headers :: headers()) -> result().
+%% @doc Perform a HTTP Post request to the AWS API for the specified service. The
+%% response will automatically be decoded if it is either in JSON or XML
+%% format.
+%% @end
+post(Service, Path, Body, Headers) ->
+ request(Service, post, Path, Body, Headers).
+
+
+-spec refresh_credentials() -> ok | error.
+%% @doc Manually refresh the credentials from the environment, filesystem or EC2
+%% Instance metadata service.
+%% @end
+refresh_credentials() ->
+ gen_server:call(rabbitmq_aws, refresh_credentials).
+
+
+-spec request(Service :: string(),
+ Method :: method(),
+ Path :: path(),
+ Body :: body(),
+ Headers :: headers()) -> result().
+%% @doc Perform a HTTP request to the AWS API for the specified service. The
+%% response will automatically be decoded if it is either in JSON or XML
+%% format.
+%% @end
+request(Service, Method, Path, Body, Headers) ->
+ gen_server:call(rabbitmq_aws, {request, Service, Method, Headers, Path, Body, [], undefined}).
+
+
+-spec request(Service :: string(),
+ Method :: method(),
+ Path :: path(),
+ Body :: body(),
+ Headers :: headers(),
+ HTTPOptions :: http_options()) -> result().
+%% @doc Perform a HTTP request to the AWS API for the specified service. The
+%% response will automatically be decoded if it is either in JSON or XML
+%% format.
+%% @end
+request(Service, Method, Path, Body, Headers, HTTPOptions) ->
+ gen_server:call(rabbitmq_aws, {request, Service, Method, Headers, Path, Body, HTTPOptions, undefined}).
+
+
+-spec request(Service :: string(),
+ Method :: method(),
+ Path :: path(),
+ Body :: body(),
+ Headers :: headers(),
+ HTTPOptions :: http_options(),
+ Endpoint :: host()) -> result().
+%% @doc Perform a HTTP request to the AWS API for the specified service, overriding
+%% the endpoint URL to use when invoking the API. This is useful for local testing
+%% of services such as DynamoDB. The response will automatically be decoded
+%% if it is either in JSON or XML format.
+%% @end
+request(Service, Method, Path, Body, Headers, HTTPOptions, Endpoint) ->
+ gen_server:call(rabbitmq_aws, {request, Service, Method, Headers, Path, Body, HTTPOptions, Endpoint}).
+
+
+-spec set_credentials(access_key(), secret_access_key()) -> ok.
+%% @doc Manually set the access credentials for requests. This should
+%% be used in cases where the client application wants to control
+%% the credentials instead of automatically discovering them from
+%% configuration or the AWS Instance Metadata service.
+%% @end
+set_credentials(AccessKey, SecretAccessKey) ->
+ gen_server:call(rabbitmq_aws, {set_credentials, AccessKey, SecretAccessKey}).
+
+
+-spec set_region(Region :: string()) -> ok.
+%% @doc Manually set the AWS region to perform API requests to.
+%% @end
+set_region(Region) ->
+ gen_server:call(rabbitmq_aws, {set_region, Region}).
+
+
+%%====================================================================
+%% gen_server functions
+%%====================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+-spec init(list()) -> {ok, state()}.
+init([]) ->
+ {ok, #state{}}.
+
+
+terminate(_, _) ->
+ ok.
+
+
+code_change(_, _, State) ->
+ {ok, State}.
+
+
+handle_call(Msg, _From, #state{region = undefined}) ->
+ %% Delay initialisation until a RabbitMQ plugin require the AWS backend
+ {ok, Region} = rabbitmq_aws_config:region(),
+ {_, State} = load_credentials(#state{region = Region}),
+ handle_msg(Msg, State);
+handle_call(Msg, _From, State) ->
+ handle_msg(Msg, State).
+
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================
+handle_msg({request, Service, Method, Headers, Path, Body, Options, Host}, State) ->
+ {Response, NewState} = perform_request(State, Service, Method, Headers, Path, Body, Options, Host),
+ {reply, Response, NewState};
+
+handle_msg(get_state, State) ->
+ {reply, {ok, State}, State};
+
+handle_msg(refresh_credentials, State) ->
+ {Reply, NewState} = load_credentials(State),
+ {reply, Reply, NewState};
+
+handle_msg({set_credentials, AccessKey, SecretAccessKey}, State) ->
+ {reply, ok, State#state{access_key = AccessKey,
+ secret_access_key = SecretAccessKey,
+ security_token = undefined,
+ expiration = undefined,
+ error = undefined}};
+
+handle_msg({set_region, Region}, State) ->
+ {reply, ok, State#state{region = Region}};
+
+handle_msg(has_credentials, State) ->
+ {reply, has_credentials(State), State};
+
+handle_msg(_Request, State) ->
+ {noreply, State}.
+
+-spec endpoint(State :: state(), Host :: string(),
+ Service :: string(), Path :: string()) -> string().
+%% @doc Return the endpoint URL, either by constructing it with the service
+%% information passed in or by using the passed in Host value.
+%% @ednd
+endpoint(#state{region = Region}, undefined, Service, Path) ->
+ lists:flatten(["https://", endpoint_host(Region, Service), Path]);
+endpoint(_, Host, _, Path) ->
+ lists:flatten(["https://", Host, Path]).
+
+
+-spec endpoint_host(Region :: region(), Service :: string()) -> host().
+%% @doc Construct the endpoint hostname for the request based upon the service
+%% and region.
+%% @end
+endpoint_host(Region, Service) ->
+ lists:flatten(string:join([Service, Region, endpoint_tld(Region)], ".")).
+
+
+-spec endpoint_tld(Region :: region()) -> host().
+%% @doc Construct the endpoint hostname TLD for the request based upon the region.
+%% See https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region for details.
+%% @end
+endpoint_tld("cn-north-1") ->
+ "amazonaws.com.cn";
+endpoint_tld("cn-northwest-1") ->
+ "amazonaws.com.cn";
+endpoint_tld(_Other) ->
+ "amazonaws.com".
+
+-spec format_response(Response :: httpc_result()) -> result().
+%% @doc Format the httpc response result, returning the request result data
+%% structure. The response body will attempt to be decoded by invoking the
+%% maybe_decode_body/2 method.
+%% @end
+format_response({ok, {{_Version, 200, _Message}, Headers, Body}}) ->
+ {ok, {Headers, maybe_decode_body(get_content_type(Headers), Body)}};
+format_response({ok, {{_Version, StatusCode, Message}, Headers, Body}}) when StatusCode >= 400 ->
+ {error, Message, {Headers, maybe_decode_body(get_content_type(Headers), Body)}};
+format_response({error, Reason}) ->
+ {error, Reason, undefined}.
+
+-spec get_content_type(Headers :: headers()) -> {Type :: string(), Subtype :: string()}.
+%% @doc Fetch the content type from the headers and return it as a tuple of
+%% {Type, Subtype}.
+%% @end
+get_content_type(Headers) ->
+ Value = case proplists:get_value("content-type", Headers, undefined) of
+ undefined ->
+ proplists:get_value("Content-Type", Headers, "text/xml");
+ Other -> Other
+ end,
+ parse_content_type(Value).
+
+-spec has_credentials() -> true | false.
+has_credentials() ->
+ gen_server:call(rabbitmq_aws, has_credentials).
+
+-spec has_credentials(state()) -> true | false.
+%% @doc check to see if there are credentials made available in the current state
+%% returning false if not or if they have expired.
+%% @end
+has_credentials(#state{error = Error}) when Error /= undefined -> false;
+has_credentials(#state{access_key = Key}) when Key /= undefined -> true;
+has_credentials(_) -> false.
+
+
+-spec expired_credentials(Expiration :: calendar:datetime()) -> true | false.
+%% @doc Indicates if the date that is passed in has expired.
+%% end
+expired_credentials(undefined) -> false;
+expired_credentials(Expiration) ->
+ Now = calendar:datetime_to_gregorian_seconds(local_time()),
+ Expires = calendar:datetime_to_gregorian_seconds(Expiration),
+ Now >= Expires.
+
+
+-spec load_credentials(State :: state()) -> {ok, state()} | {error, state()}.
+%% @doc Load the credentials using the following order of configuration precedence:
+%% - Environment variables
+%% - Credentials file
+%% - EC2 Instance Metadata Service
+%% @end
+load_credentials(#state{region = Region}) ->
+ case rabbitmq_aws_config:credentials() of
+ {ok, AccessKey, SecretAccessKey, Expiration, SecurityToken} ->
+ {ok, #state{region = Region,
+ error = undefined,
+ access_key = AccessKey,
+ secret_access_key = SecretAccessKey,
+ expiration = Expiration,
+ security_token = SecurityToken}};
+ {error, Reason} ->
+ error_logger:error_msg("Could not load AWS credentials from environment variables, AWS_CONFIG_FILE, AWS_SHARED_CREDENTIALS_FILE or EC2 metadata endpoint: ~p. Will depend on config settings to be set.~n.", [Reason]),
+ {error, #state{region = Region,
+ error = Reason,
+ access_key = undefined,
+ secret_access_key = undefined,
+ expiration = undefined,
+ security_token = undefined}}
+ end.
+
+
+-spec local_time() -> calendar:datetime().
+%% @doc Return the current local time.
+%% @end
+local_time() ->
+ [Value] = calendar:local_time_to_universal_time_dst(calendar:local_time()),
+ Value.
+
+
+-spec maybe_decode_body(ContentType :: {nonempty_string(), nonempty_string()}, Body :: body()) -> list() | body().
+%% @doc Attempt to decode the response body based upon the mime type that is
+%% presented.
+%% @end.
+maybe_decode_body({"application", "x-amz-json-1.0"}, Body) ->
+ rabbitmq_aws_json:decode(Body);
+maybe_decode_body({"application", "json"}, Body) ->
+ rabbitmq_aws_json:decode(Body);
+maybe_decode_body({_, "xml"}, Body) ->
+ rabbitmq_aws_xml:parse(Body);
+maybe_decode_body(_ContentType, Body) ->
+ Body.
+
+
+-spec parse_content_type(ContentType :: string()) -> {Type :: string(), Subtype :: string()}.
+%% @doc parse a content type string returning a tuple of type/subtype
+%% @end
+parse_content_type(ContentType) ->
+ Parts = string:tokens(ContentType, ";"),
+ [Type, Subtype] = string:tokens(lists:nth(1, Parts), "/"),
+ {Type, Subtype}.
+
+
+-spec perform_request(State :: state(), Service :: string(), Method :: method(),
+ Headers :: headers(), Path :: path(), Body :: body(),
+ Options :: http_options(), Host :: string() | undefined)
+ -> {Result :: result(), NewState :: state()}.
+%% @doc Make the API request and return the formatted response.
+%% @end
+perform_request(State, Service, Method, Headers, Path, Body, Options, Host) ->
+ perform_request_has_creds(has_credentials(State), State, Service, Method,
+ Headers, Path, Body, Options, Host).
+
+
+-spec perform_request_has_creds(true | false, State :: state(),
+ Service :: string(), Method :: method(),
+ Headers :: headers(), Path :: path(), Body :: body(),
+ Options :: http_options(), Host :: string() | undefined)
+ -> {Result :: result(), NewState :: state()}.
+%% @doc Invoked after checking to see if there are credentials. If there are,
+%% validate they have not or will not expire, performing the request if not,
+%% otherwise return an error result.
+%% @end
+perform_request_has_creds(true, State, Service, Method, Headers, Path, Body, Options, Host) ->
+ perform_request_creds_expired(expired_credentials(State#state.expiration), State,
+ Service, Method, Headers, Path, Body, Options, Host);
+perform_request_has_creds(false, State, _, _, _, _, _, _, _) ->
+ perform_request_creds_error(State).
+
+
+-spec perform_request_creds_expired(true | false, State :: state(),
+ Service :: string(), Method :: method(),
+ Headers :: headers(), Path :: path(), Body :: body(),
+ Options :: http_options(), Host :: string() | undefined)
+ -> {Result :: result(), NewState :: state()}.
+%% @doc Invoked after checking to see if the current credentials have expired.
+%% If they haven't, perform the request, otherwise try and refresh the
+%% credentials before performing the request.
+%% @end
+perform_request_creds_expired(false, State, Service, Method, Headers, Path, Body, Options, Host) ->
+ perform_request_with_creds(State, Service, Method, Headers, Path, Body, Options, Host);
+perform_request_creds_expired(true, State, Service, Method, Headers, Path, Body, Options, Host) ->
+ perform_request_creds_refreshed(load_credentials(State), Service, Method, Headers, Path, Body, Options, Host).
+
+
+-spec perform_request_creds_refreshed({ok, State :: state()} | {error, State :: state()},
+ Service :: string(), Method :: method(),
+ Headers :: headers(), Path :: path(), Body :: body(),
+ Options :: http_options(), Host :: string() | undefined)
+ -> {Result :: result(), NewState :: state()}.
+%% @doc If it's been determined that there are credentials but they have expired,
+%% check to see if the credentials could be loaded and either make the request
+%% or return an error.
+%% @end
+perform_request_creds_refreshed({ok, State}, Service, Method, Headers, Path, Body, Options, Host) ->
+ perform_request_with_creds(State, Service, Method, Headers, Path, Body, Options, Host);
+perform_request_creds_refreshed({error, State}, _, _, _, _, _, _, _) ->
+ perform_request_creds_error(State).
+
+
+-spec perform_request_with_creds(State :: state(), Service :: string(), Method :: method(),
+ Headers :: headers(), Path :: path(), Body :: body(),
+ Options :: http_options(), Host :: string() | undefined)
+ -> {Result :: result(), NewState :: state()}.
+%% @doc Once it is validated that there are credentials to try and that they have not
+%% expired, perform the request and return the response.
+%% @end
+perform_request_with_creds(State, Service, Method, Headers, Path, Body, Options, Host) ->
+ URI = endpoint(State, Host, Service, Path),
+ SignedHeaders = sign_headers(State, Service, Method, URI, Headers, Body),
+ ContentType = proplists:get_value("content-type", SignedHeaders, undefined),
+ perform_request_with_creds(State, Method, URI, SignedHeaders, ContentType, Body, Options).
+
+
+-spec perform_request_with_creds(State :: state(), Method :: method(), URI :: string(),
+ Headers :: headers(), ContentType :: string() | undefined,
+ Body :: body(), Options :: http_options())
+ -> {Result :: result(), NewState :: state()}.
+%% @doc Once it is validated that there are credentials to try and that they have not
+%% expired, perform the request and return the response.
+%% @end
+perform_request_with_creds(State, Method, URI, Headers, undefined, "", Options0) ->
+ Options1 = ensure_timeout(Options0),
+ Response = httpc:request(Method, {URI, Headers}, Options1, []),
+ {format_response(Response), State};
+perform_request_with_creds(State, Method, URI, Headers, ContentType, Body, Options0) ->
+ Options1 = ensure_timeout(Options0),
+ Response = httpc:request(Method, {URI, Headers, ContentType, Body}, Options1, []),
+ {format_response(Response), State}.
+
+
+-spec perform_request_creds_error(State :: state()) ->
+ {result_error(), NewState :: state()}.
+%% @doc Return the error response when there are not any credentials to use with
+%% the request.
+%% @end
+perform_request_creds_error(State) ->
+ {{error, {credentials, State#state.error}}, State}.
+
+
+%% @doc Ensure that the timeout option is set and greater than 0 and less
+%% than about 1/2 of the default gen_server:call timeout. This gives
+%% enough time for a long connect and request phase to succeed.
+%% @end
+-spec ensure_timeout(Options :: http_options()) -> http_options().
+ensure_timeout(Options) ->
+ case proplists:get_value(timeout, Options) of
+ undefined ->
+ Options ++ [{timeout, ?DEFAULT_HTTP_TIMEOUT}];
+ Value when is_integer(Value) andalso Value >= 0 andalso Value =< ?DEFAULT_HTTP_TIMEOUT ->
+ Options;
+ _ ->
+ Options1 = proplists:delete(timeout, Options),
+ Options1 ++ [{timeout, ?DEFAULT_HTTP_TIMEOUT}]
+ end.
+
+
+-spec sign_headers(State :: state(), Service :: string(), Method :: method(),
+ URI :: string(), Headers :: headers(), Body :: body()) -> headers().
+%% @doc Build the signed headers for the API request.
+%% @end
+sign_headers(#state{access_key = AccessKey,
+ secret_access_key = SecretKey,
+ security_token = SecurityToken,
+ region = Region}, Service, Method, URI, Headers, Body) ->
+ rabbitmq_aws_sign:headers(#request{access_key = AccessKey,
+ secret_access_key = SecretKey,
+ security_token = SecurityToken,
+ region = Region,
+ service = Service,
+ method = Method,
+ uri = URI,
+ headers = Headers,
+ body = Body}).
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_app.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_app.erl
new file mode 100644
index 0000000000..b01196ec30
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_app.erl
@@ -0,0 +1,22 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @doc rabbitmq_aws application startup
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_app).
+
+-behaviour(application).
+
+%% Application callbacks
+-export([start/2, stop/1]).
+
+%% ===================================================================
+%% Application callbacks
+%% ===================================================================
+
+start(_StartType, _StartArgs) ->
+ rabbitmq_aws_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl
new file mode 100644
index 0000000000..09b7606799
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_config.erl
@@ -0,0 +1,694 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @copyright 2016-2020 VMware, Inc. or its affiliates.
+%% @private
+%% @doc rabbitmq_aws configuration functionality
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_config).
+
+%% API
+-export([credentials/0,
+ credentials/1,
+ value/2,
+ values/1,
+ instance_metadata_url/1,
+ instance_credentials_url/1,
+ instance_availability_zone_url/0,
+ instance_role_url/0,
+ region/0,
+ region/1]).
+
+%% Export all for unit tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-include("rabbitmq_aws.hrl").
+
+-spec credentials() -> security_credentials().
+%% @doc Return the credentials from environment variables, configuration or the
+%% EC2 local instance metadata server, if available.
+%%
+%% If the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment
+%% variables are set, those values will be returned. If they are not, the
+%% local configuration file or shared credentials file will be consulted.
+%% If either exists and can be checked, they will attempt to return the
+%% authentication credential values for the ``default`` profile if the
+%% ``AWS_DEFAULT_PROFILE`` environment is not set.
+%%
+%% When checking for the configuration file, it will attempt to read the
+%% file from ``~/.aws/config`` if the ``AWS_CONFIG_FILE`` environment
+%% variable is not set. If the file is found, and both the access key and
+%% secret access key are set for the profile, they will be returned. If not
+%% it will attempt to consult the shared credentials file.
+%%
+%% When checking for the shared credentials file, it will attempt to read
+%% read from ``~/.aws/credentials`` if the ``AWS_SHARED_CREDENTIALS_FILE``
+%% environment variable is not set. If the file is found and the both the
+%% access key and the secret access key are set for the profile, they will
+%% be returned.
+%%
+%% If credentials are returned at any point up through this stage, they
+%% will be returned as ``{ok, AccessKey, SecretKey, undefined}``,
+%% indicating the credentials are locally configured, and are not
+%% temporary.
+%%
+%% If no credentials could be resolved up until this point, there will be
+%% an attempt to contact a local EC2 instance metadata service for
+%% credentials.
+%%
+%% When the EC2 instance metadata server is checked for but does not exist,
+%% the operation will timeout in ``?DEFAULT_HTTP_TIMEOUT``ms.
+%%
+%% When the EC2 instance metadata server exists, but data is not returned
+%% quickly, the operation will timeout in ``?DEFAULT_HTTP_TIMEOUT``ms.
+%%
+%% If the service does exist, it will attempt to use the
+%% ``/meta-data/iam/security-credentials`` endpoint to request expiring
+%% request credentials to use. If they are found, a tuple of
+%% ``{ok, AccessKey, SecretAccessKey, SecurityToken}`` will be returned
+%% indicating the credentials are temporary and require the use of the
+%% ``X-Amz-Security-Token`` header should be used.
+%%
+%% Finally, if no credentials are found by this point, an error tuple
+%% will be returned.
+%% @end
+credentials() ->
+ credentials(profile()).
+
+-spec credentials(string()) -> security_credentials().
+%% @doc Return the credentials from environment variables, configuration or the
+%% EC2 local instance metadata server, if available.
+%%
+%% If the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment
+%% variables are set, those values will be returned. If they are not, the
+%% local configuration file or shared credentials file will be consulted.
+%%
+%% When checking for the configuration file, it will attempt to read the
+%% file from ``~/.aws/config`` if the ``AWS_CONFIG_FILE`` environment
+%% variable is not set. If the file is found, and both the access key and
+%% secret access key are set for the profile, they will be returned. If not
+%% it will attempt to consult the shared credentials file.
+%%
+%% When checking for the shared credentials file, it will attempt to read
+%% read from ``~/.aws/credentials`` if the ``AWS_SHARED_CREDENTIALS_FILE``
+%% environment variable is not set. If the file is found and the both the
+%% access key and the secret access key are set for the profile, they will
+%% be returned.
+%%
+%% If credentials are returned at any point up through this stage, they
+%% will be returned as ``{ok, AccessKey, SecretKey, undefined}``,
+%% indicating the credentials are locally configured, and are not
+%% temporary.
+%%
+%% If no credentials could be resolved up until this point, there will be
+%% an attempt to contact a local EC2 instance metadata service for
+%% credentials.
+%%
+%% When the EC2 instance metadata server is checked for but does not exist,
+%% the operation will timeout in ``?DEFAULT_HTTP_TIMEOUT``ms.
+%%
+%% When the EC2 instance metadata server exists, but data is not returned
+%% quickly, the operation will timeout in ``?DEFAULT_HTTP_TIMEOUT``ms.
+%%
+%% If the service does exist, it will attempt to use the
+%% ``/meta-data/iam/security-credentials`` endpoint to request expiring
+%% request credentials to use. If they are found, a tuple of
+%% ``{ok, AccessKey, SecretAccessKey, SecurityToken}`` will be returned
+%% indicating the credentials are temporary and require the use of the
+%% ``X-Amz-Security-Token`` header should be used.
+%%
+%% Finally, if no credentials are found by this point, an error tuple
+%% will be returned.
+%% @end
+credentials(Profile) ->
+ lookup_credentials(Profile,
+ os:getenv("AWS_ACCESS_KEY_ID"),
+ os:getenv("AWS_SECRET_ACCESS_KEY")).
+
+
+-spec region() -> {ok, string()}.
+%% @doc Return the region as configured by ``AWS_DEFAULT_REGION`` environment
+%% variable or as configured in the configuration file using the default
+%% profile or configured ``AWS_DEFAULT_PROFILE`` environment variable.
+%%
+%% If the environment variable is not set and a configuration
+%% file is not found, it will try and return the region from the EC2
+%% local instance metadata server.
+%% @end
+region() ->
+ region(profile()).
+
+
+-spec region(Region :: string()) -> {ok, region()}.
+%% @doc Return the region as configured by ``AWS_DEFAULT_REGION`` environment
+%% variable or as configured in the configuration file using the specified
+%% profile.
+%%
+%% If the environment variable is not set and a configuration
+%% file is not found, it will try and return the region from the EC2
+%% local instance metadata server.
+%% @end
+region(Profile) ->
+ case lookup_region(Profile, os:getenv("AWS_DEFAULT_REGION")) of
+ {ok, Region} -> {ok, Region};
+ _ -> {ok, ?DEFAULT_REGION}
+ end.
+
+
+-spec value(Profile :: string(), Key :: atom())
+ -> Value :: any() | {error, Reason :: atom()}.
+%% @doc Return the configuration data for the specified profile or an error
+%% if the profile is not found.
+%% @end
+value(Profile, Key) ->
+ get_value(Key, values(Profile)).
+
+
+-spec values(Profile :: string())
+ -> Settings :: list()
+ | {error, Reason :: atom()}.
+%% @doc Return the configuration data for the specified profile or an error
+%% if the profile is not found.
+%% @end
+values(Profile) ->
+ case config_file_data() of
+ {error, Reason} ->
+ {error, Reason};
+ Settings ->
+ Prefixed = lists:flatten(["profile ", Profile]),
+ proplists:get_value(Profile, Settings,
+ proplists:get_value(Prefixed,
+ Settings, {error, undefined}))
+ end.
+
+
+%% -----------------------------------------------------------------------------
+%% Private / Internal Methods
+%% -----------------------------------------------------------------------------
+
+
+-spec config_file() -> string().
+%% @doc Return the configuration file to test using either the value of the
+%% AWS_CONFIG_FILE or the default location where the file is expected to
+%% exist.
+%% @end
+config_file() ->
+ config_file(os:getenv("AWS_CONFIG_FILE")).
+
+
+-spec config_file(Path :: false | string()) -> string().
+%% @doc Return the configuration file to test using either the value of the
+%% AWS_CONFIG_FILE or the default location where the file is expected to
+%% exist.
+%% @end
+config_file(false) ->
+ filename:join([home_path(), ".aws", "config"]);
+config_file(EnvVar) ->
+ EnvVar.
+
+
+-spec config_file_data() -> list() | {error, Reason :: atom()}.
+%% @doc Return the values from a configuration file as a proplist by section
+%% @end
+config_file_data() ->
+ ini_file_data(config_file()).
+
+
+-spec credentials_file() -> string().
+%% @doc Return the shared credentials file to test using either the value of the
+%% AWS_SHARED_CREDENTIALS_FILE or the default location where the file
+%% is expected to exist.
+%% @end
+credentials_file() ->
+ credentials_file(os:getenv("AWS_SHARED_CREDENTIALS_FILE")).
+
+
+-spec credentials_file(Path :: false | string()) -> string().
+%% @doc Return the shared credentials file to test using either the value of the
+%% AWS_SHARED_CREDENTIALS_FILE or the default location where the file
+%% is expected to exist.
+%% @end
+credentials_file(false) ->
+ filename:join([home_path(), ".aws", "credentials"]);
+credentials_file(EnvVar) ->
+ EnvVar.
+
+-spec credentials_file_data() -> list() | {error, Reason :: atom()}.
+%% @doc Return the values from a configuration file as a proplist by section
+%% @end
+credentials_file_data() ->
+ ini_file_data(credentials_file()).
+
+
+-spec get_value(Key :: atom(), Settings :: list()) -> any();
+ (Key :: atom(), {error, Reason :: atom()}) -> {error, Reason :: atom()}.
+%% @doc Get the value for a key from a settings proplist.
+%% @end
+get_value(Key, Settings) when is_list(Settings) ->
+ proplists:get_value(Key, Settings, {error, undefined});
+get_value(_, {error, Reason}) -> {error, Reason}.
+
+
+-spec home_path() -> string().
+%% @doc Return the path to the current user's home directory, checking for the
+%% HOME environment variable before returning the current working
+%% directory if it's not set.
+%% @end
+home_path() ->
+ home_path(os:getenv("HOME")).
+
+
+-spec home_path(Value :: string() | false) -> string().
+%% @doc Return the path to the current user's home directory, checking for the
+%% HOME environment variable before returning the current working
+%% directory if it's not set.
+%% @end
+home_path(false) -> filename:absname(".");
+home_path(Value) -> Value.
+
+
+-spec ini_file_data(Path :: string())
+ -> {ok, list()} | {error, atom()}.
+%% @doc Return the parsed ini file for the specified path.
+%% @end
+ini_file_data(Path) ->
+ ini_file_data(Path, filelib:is_file(Path)).
+
+
+-spec ini_file_data(Path :: string(), FileExists :: true | false)
+ -> {ok, list()} | {error, atom()}.
+%% @doc Return the parsed ini file for the specified path.
+%% @end
+ini_file_data(Path, true) ->
+ case read_file(Path) of
+ {ok, Lines} -> ini_parse_lines(Lines, none, none, []);
+ {error, Reason} -> {error, Reason}
+ end;
+ini_file_data(_, false) -> {error, enoent}.
+
+
+-spec ini_format_key(any()) -> atom() | {error, type}.
+%% @doc Converts a ini file key to an atom, stripping any leading whitespace
+%% @end
+ini_format_key(Key) ->
+ case io_lib:printable_list(Key) of
+ true -> list_to_atom(string:strip(Key));
+ false -> {error, type}
+ end.
+
+
+-spec ini_parse_line(Section :: list(),
+ Key :: atom(),
+ Line :: binary())
+ -> {Section :: list(), Key :: string() | none}.
+%% @doc Parse the AWS configuration INI file, returning a proplist
+%% @end
+ini_parse_line(Section, Parent, <<" ", Line/binary>>) ->
+ Child = proplists:get_value(Parent, Section, []),
+ {ok, NewChild} = ini_parse_line_parts(Child, ini_split_line(Line)),
+ {lists:keystore(Parent, 1, Section, {Parent, NewChild}), Parent};
+ini_parse_line(Section, _, Line) ->
+ case ini_parse_line_parts(Section, ini_split_line(Line)) of
+ {ok, NewSection} -> {NewSection, none};
+ {new_parent, Parent} -> {Section, Parent}
+ end.
+
+
+-spec ini_parse_line_parts(Section :: list(),
+ Parts :: list())
+ -> {ok, list()} | {new_parent, atom()}.
+%% @doc Parse the AWS configuration INI file, returning a proplist
+%% @end
+ini_parse_line_parts(Section, []) -> {ok, Section};
+ini_parse_line_parts(Section, [RawKey, Value]) ->
+ Key = ini_format_key(RawKey),
+ {ok, lists:keystore(Key, 1, Section, {Key, maybe_convert_number(Value)})};
+ini_parse_line_parts(_, [RawKey]) ->
+ {new_parent, ini_format_key(RawKey)}.
+
+
+-spec ini_parse_lines(Lines::[binary()],
+ SectionName :: string() | atom(),
+ Parent :: atom(),
+ Accumulator :: list())
+ -> list().
+%% @doc Parse the AWS configuration INI file
+%% @end
+ini_parse_lines([], _, _, Settings) -> Settings;
+ini_parse_lines([H|T], SectionName, Parent, Settings) ->
+ {ok, NewSectionName} = ini_parse_section_name(SectionName, H),
+ {ok, NewParent, NewSettings} = ini_parse_section(H, NewSectionName,
+ Parent, Settings),
+ ini_parse_lines(T, NewSectionName, NewParent, NewSettings).
+
+
+-spec ini_parse_section(Line :: binary(),
+ SectionName :: string(),
+ Parent :: atom(),
+ Section :: list())
+ -> {ok, NewParent :: atom(), Section :: list()}.
+%% @doc Parse a line from the ini file, returning it as part of the appropriate
+%% section.
+%% @end
+ini_parse_section(Line, SectionName, Parent, Settings) ->
+ Section = proplists:get_value(SectionName, Settings, []),
+ {NewSection, NewParent} = ini_parse_line(Section, Parent, Line),
+ {ok, NewParent, lists:keystore(SectionName, 1, Settings,
+ {SectionName, NewSection})}.
+
+
+-spec ini_parse_section_name(CurrentSection :: string() | atom(),
+ Line :: binary())
+ -> {ok, SectionName :: string()}.
+%% @doc Attempts to parse a section name from the current line, returning either
+%% the new parsed section name, or the current section name.
+%% @end
+ini_parse_section_name(CurrentSection, Line) ->
+ Value = binary_to_list(Line),
+ case re:run(Value, "\\[([\\w\\s+\\-_]+)\\]", [{capture, all, list}]) of
+ {match, [_, SectionName]} -> {ok, SectionName};
+ nomatch -> {ok, CurrentSection}
+ end.
+
+
+-spec ini_split_line(binary()) -> list().
+%% @doc Split a key value pair delimited by ``=`` to a list of strings.
+%% @end
+ini_split_line(Line) ->
+ string:tokens(string:strip(binary_to_list(Line)), "=").
+
+
+-spec instance_availability_zone_url() -> string().
+%% @doc Return the URL for querying the availability zone from the Instance
+%% Metadata service
+%% @end
+instance_availability_zone_url() ->
+ instance_metadata_url(string:join([?INSTANCE_METADATA_BASE, ?INSTANCE_AZ], "/")).
+
+
+-spec instance_credentials_url(string()) -> string().
+%% @doc Return the URL for querying temporary credentials from the Instance
+%% Metadata service for the specified role
+%% @end
+instance_credentials_url(Role) ->
+ instance_metadata_url(string:join([?INSTANCE_METADATA_BASE, ?INSTANCE_CREDENTIALS, Role], "/")).
+
+
+-spec instance_metadata_url(string()) -> string().
+%% @doc Build the Instance Metadata service URL for the specified path
+%% @end
+instance_metadata_url(Path) ->
+ rabbitmq_aws_urilib:build(#uri{scheme = "http",
+ authority = {undefined, ?INSTANCE_HOST, undefined},
+ path = Path, query = []}).
+
+
+-spec instance_role_url() -> string().
+%% @doc Return the URL for querying the role associated with the current
+%% instance from the Instance Metadata service
+%% @end
+instance_role_url() ->
+ instance_metadata_url(string:join([?INSTANCE_METADATA_BASE, ?INSTANCE_CREDENTIALS], "/")).
+
+
+-spec lookup_credentials(Profile :: string(),
+ AccessKey :: string() | false,
+ SecretKey :: string() | false)
+ -> security_credentials().
+%% @doc Return the access key and secret access key if they are set in
+%% environment variables, otherwise lookup the credentials from the config
+%% file for the specified profile.
+%% @end
+lookup_credentials(Profile, false, _) ->
+ lookup_credentials_from_config(Profile,
+ value(Profile, aws_access_key_id),
+ value(Profile, aws_secret_access_key));
+lookup_credentials(Profile, _, false) ->
+ lookup_credentials_from_config(Profile,
+ value(Profile, aws_access_key_id),
+ value(Profile, aws_secret_access_key));
+lookup_credentials(_, AccessKey, SecretKey) ->
+ {ok, AccessKey, SecretKey, undefined, undefined}.
+
+
+-spec lookup_credentials_from_config(Profile :: string(),
+ access_key() | {error, Reason :: atom()},
+ secret_access_key()| {error, Reason :: atom()})
+ -> security_credentials().
+%% @doc Return the access key and secret access key if they are set in
+%% for the specified profile in the config file, if it exists. If it does
+%% not exist or the profile is not set or the values are not set in the
+%% profile, look up the values in the shared credentials file
+%% @end
+lookup_credentials_from_config(Profile, {error,_}, _) ->
+ lookup_credentials_from_file(Profile, credentials_file_data());
+lookup_credentials_from_config(_, AccessKey, SecretKey) ->
+ {ok, AccessKey, SecretKey, undefined, undefined}.
+
+
+-spec lookup_credentials_from_file(Profile :: string(),
+ Credentials :: list())
+ -> security_credentials().
+%% @doc Check to see if the shared credentials file exists and if it does,
+%% invoke ``lookup_credentials_from_shared_creds_section/2`` to attempt to
+%% get the credentials values out of it. If the file does not exist,
+%% attempt to lookup the values from the EC2 instance metadata service.
+%% @end
+lookup_credentials_from_file(_, {error,_}) ->
+ lookup_credentials_from_instance_metadata();
+lookup_credentials_from_file(Profile, Credentials) ->
+ Section = proplists:get_value(Profile, Credentials),
+ lookup_credentials_from_section(Section).
+
+
+-spec lookup_credentials_from_section(Credentials :: list() | undefined)
+ -> security_credentials().
+%% @doc Return the access key and secret access key if they are set in
+%% for the specified profile from the shared credentials file. If the
+%% profile is not set or the values are not set in the profile, attempt to
+%% lookup the values from the EC2 instance metadata service.
+%% @end
+lookup_credentials_from_section(undefined) ->
+ lookup_credentials_from_instance_metadata();
+lookup_credentials_from_section(Credentials) ->
+ AccessKey = proplists:get_value(aws_access_key_id, Credentials, undefined),
+ SecretKey = proplists:get_value(aws_secret_access_key, Credentials, undefined),
+ lookup_credentials_from_proplist(AccessKey, SecretKey).
+
+
+-spec lookup_credentials_from_proplist(AccessKey :: access_key(),
+ SecretAccessKey :: secret_access_key())
+ -> security_credentials().
+%% @doc Process the contents of the Credentials proplists checking if the
+%% access key and secret access key are both set.
+%% @end
+lookup_credentials_from_proplist(undefined, _) ->
+ lookup_credentials_from_instance_metadata();
+lookup_credentials_from_proplist(_, undefined) ->
+ lookup_credentials_from_instance_metadata();
+lookup_credentials_from_proplist(AccessKey, SecretKey) ->
+ {ok, AccessKey, SecretKey, undefined, undefined}.
+
+
+-spec lookup_credentials_from_instance_metadata()
+ -> security_credentials().
+%% @spec lookup_credentials_from_instance_metadata() -> Result.
+%% @doc Attempt to lookup the values from the EC2 instance metadata service.
+%% @end
+lookup_credentials_from_instance_metadata() ->
+ Role = maybe_get_role_from_instance_metadata(),
+ maybe_get_credentials_from_instance_metadata(Role).
+
+
+-spec lookup_region(Profile :: string(),
+ Region :: false | string())
+ -> {ok, string()} | {error, undefined}.
+%% @doc If Region is false, lookup the region from the config or the EC2
+%% instance metadata service.
+%% @end
+lookup_region(Profile, false) ->
+ lookup_region_from_config(values(Profile));
+lookup_region(_, Region) -> {ok, Region}.
+
+
+-spec lookup_region_from_config(Settings :: list() | {error, enoent})
+ -> {ok, string()} | {error, undefined}.
+%% @doc Return the region from the local configuration file. If local config
+%% settings are not found, try to lookup the region from the EC2 instance
+%% metadata service.
+%% @end
+lookup_region_from_config({error, enoent}) ->
+ maybe_get_region_from_instance_metadata();
+lookup_region_from_config(Settings) ->
+ lookup_region_from_settings(proplists:get_value(region, Settings)).
+
+
+-spec lookup_region_from_settings(any() | undefined)
+ -> {ok, string()} | {error, undefined}.
+%% @doc Decide if the region should be loaded from the Instance Metadata service
+%% of if it's already set.
+%% @end
+lookup_region_from_settings(undefined) ->
+ maybe_get_region_from_instance_metadata();
+lookup_region_from_settings(Region) ->
+ {ok, Region}.
+
+
+-spec maybe_convert_number(string()) -> integer() | float().
+%% @doc Returns an integer or float from a string if possible, otherwise
+%% returns the string().
+%% @end
+maybe_convert_number(Value) ->
+ Stripped = string:strip(Value),
+ case string:to_float(Stripped) of
+ {error,no_float} ->
+ try
+ list_to_integer(Stripped)
+ catch
+ error:badarg -> Stripped
+ end;
+ {F,_Rest} -> F
+ end.
+
+
+-spec maybe_get_credentials_from_instance_metadata({ok, Role :: string()} |
+ {error, undefined})
+ -> security_credentials().
+%% @doc Try to query the EC2 local instance metadata service to get temporary
+%% authentication credentials.
+%% @end
+maybe_get_credentials_from_instance_metadata({error, undefined}) ->
+ {error, undefined};
+maybe_get_credentials_from_instance_metadata({ok, Role}) ->
+ URL = instance_credentials_url(Role),
+ parse_credentials_response(perform_http_get(URL)).
+
+
+-spec maybe_get_region_from_instance_metadata()
+ -> {ok, Region :: string()} | {error, Reason :: atom()}.
+%% @doc Try to query the EC2 local instance metadata service to get the region
+%% @end
+maybe_get_region_from_instance_metadata() ->
+ URL = instance_availability_zone_url(),
+ parse_az_response(perform_http_get(URL)).
+
+
+%% @doc Try to query the EC2 local instance metadata service to get the role
+%% assigned to the instance.
+%% @end
+maybe_get_role_from_instance_metadata() ->
+ URL = instance_role_url(),
+ parse_body_response(perform_http_get(URL)).
+
+
+-spec parse_az_response(httpc_result())
+ -> {ok, Region :: string()} | {error, Reason :: atom()}.
+%% @doc Parse the response from the Availability Zone query to the
+%% Instance Metadata service, returning the Region if successful.
+%% end.
+parse_az_response({error, _}) -> {error, undefined};
+parse_az_response({ok, {{_, 200, _}, _, Body}})
+ -> {ok, region_from_availability_zone(Body)};
+parse_az_response({ok, {{_, _, _}, _, _}}) -> {error, undefined}.
+
+
+-spec parse_body_response(httpc_result())
+ -> {ok, Value :: string()} | {error, Reason :: atom()}.
+%% @doc Parse the return response from the Instance Metadata service where the
+%% body value is the string to process.
+%% end.
+parse_body_response({error, _}) -> {error, undefined};
+parse_body_response({ok, {{_, 200, _}, _, Body}}) -> {ok, Body};
+parse_body_response({ok, {{_, _, _}, _, _}}) -> {error, undefined}.
+
+
+-spec parse_credentials_response(httpc_result()) -> security_credentials().
+%% @doc Try to query the EC2 local instance metadata service to get the role
+%% assigned to the instance.
+%% @end
+parse_credentials_response({error, _}) -> {error, undefined};
+parse_credentials_response({ok, {{_, 404, _}, _, _}}) -> {error, undefined};
+parse_credentials_response({ok, {{_, 200, _}, _, Body}}) ->
+ Parsed = rabbitmq_aws_json:decode(Body),
+ {ok,
+ proplists:get_value("AccessKeyId", Parsed),
+ proplists:get_value("SecretAccessKey", Parsed),
+ parse_iso8601_timestamp(proplists:get_value("Expiration", Parsed)),
+ proplists:get_value("Token", Parsed)}.
+
+
+-spec perform_http_get(string()) -> httpc_result().
+%% @doc Wrap httpc:get/4 to simplify Instance Metadata service requests
+%% @end
+perform_http_get(URL) ->
+ httpc:request(get, {URL, []},
+ [{timeout, ?DEFAULT_HTTP_TIMEOUT}], []).
+
+
+-spec parse_iso8601_timestamp(Timestamp :: string() | binary()) -> calendar:datetime().
+%% @doc Parse a ISO8601 timestamp, returning a datetime() value.
+%% @end
+parse_iso8601_timestamp(Timestamp) when is_binary(Timestamp) ->
+ parse_iso8601_timestamp(binary_to_list(Timestamp));
+parse_iso8601_timestamp(Timestamp) ->
+ [Date, Time] = string:tokens(Timestamp, "T"),
+ [Year, Month, Day] = string:tokens(Date, "-"),
+ [Hour, Minute, Second] = string:tokens(Time, ":"),
+ {{list_to_integer(Year), list_to_integer(Month), list_to_integer(Day)},
+ {list_to_integer(Hour), list_to_integer(Minute), list_to_integer(string:left(Second,2))}}.
+
+
+-spec profile() -> string().
+%% @doc Return the value of the AWS_DEFAULT_PROFILE environment variable or the
+%% "default" profile.
+%% @end
+profile() -> profile(os:getenv("AWS_DEFAULT_PROFILE")).
+
+
+-spec profile(false | string()) -> string().
+%% @doc Process the value passed in to determine if we will return the default
+%% profile or the value from the environment variable.
+%% @end
+profile(false) -> ?DEFAULT_PROFILE;
+profile(Value) -> Value.
+
+
+-spec read_file(string()) -> list() | {error, Reason :: atom()}.
+%% @doc Read the specified file, returning the contents as a list of strings.
+%% @end
+read_file(Path) ->
+ read_from_file(file:open(Path, [read])).
+
+
+-spec read_from_file({ok, file:fd()} | {error, Reason :: atom()})
+ -> list() | {error, Reason :: atom()}.
+%% @doc Read the specified file, returning the contents as a list of strings.
+%% @end
+read_from_file({ok, Fd}) ->
+ read_file(Fd, []);
+read_from_file({error, Reason}) ->
+ {error, Reason}.
+
+
+-spec read_file(Fd :: file:io_device(), Lines :: list())
+ -> list() | {error, Reason :: atom()}.
+%% @doc Read from the open file, accumulating the lines in a list.
+%% @end
+read_file(Fd, Lines) ->
+ case file:read_line(Fd) of
+ {ok, Value} ->
+ Line = string:strip(Value, right, $\n),
+ read_file(Fd, lists:append(Lines, [list_to_binary(Line)]));
+ eof -> {ok, Lines};
+ {error, Reason} -> {error, Reason}
+ end.
+
+
+-spec region_from_availability_zone(Value :: string()) -> string().
+%% @doc Strip the availability zone suffix from the region.
+%% @end
+region_from_availability_zone(Value) ->
+ string:sub_string(Value, 1, length(Value) - 1).
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl
new file mode 100644
index 0000000000..5b4e3b2f45
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_json.erl
@@ -0,0 +1,62 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @doc Wrap a JSON parser to provide easy abstractions across
+%% implementations and ensure a consistent return interface.
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_json).
+
+-export([decode/1]).
+
+-spec decode(Value :: string() | binary()) -> list().
+%% @doc Decode a JSON string returning a proplist
+%% @end
+decode(Value) when is_list(Value) ->
+ decode(list_to_binary(Value));
+decode(Value) when is_binary(Value) ->
+ % We set an empty list of options because we don't want the default
+ % options set in rabbit_json:cecode/1. And we can't override
+ % 'return_maps' with '{return_maps, false}' because of a bug in jsx's
+ % options handler.
+ % See https://github.com/talentdeficit/jsx/pull/115
+ Decoded0 = rabbit_json:decode(Value, []),
+ Decoded = if
+ is_map(Decoded0) -> maps:to_list(Decoded0);
+ is_list(Decoded0) -> Decoded0
+ end,
+ convert_binary_values(Decoded, []).
+
+
+-spec convert_binary_values(Value :: list(), Accumulator :: list()) -> list().
+%% @doc Convert the binary key/value pairs returned by rabbit_json to strings.
+%% @end
+convert_binary_values([], Value) -> Value;
+convert_binary_values([{K, V}|T], Accum) when is_map(V) ->
+ convert_binary_values(
+ T,
+ lists:append(
+ Accum,
+ [{binary_to_list(K), convert_binary_values(maps:to_list(V), [])}]));
+convert_binary_values([{K, V}|T], Accum) when is_list(V) ->
+ convert_binary_values(
+ T,
+ lists:append(
+ Accum,
+ [{binary_to_list(K), convert_binary_values(V, [])}]));
+convert_binary_values([{}|T],Accum) ->
+ convert_binary_values(T, lists:append(Accum, [{}]));
+convert_binary_values([{K, V}|T], Accum) when is_binary(V) ->
+ convert_binary_values(T, lists:append(Accum, [{binary_to_list(K), binary_to_list(V)}]));
+convert_binary_values([{K, V}|T], Accum) ->
+ convert_binary_values(T, lists:append(Accum, [{binary_to_list(K), V}]));
+convert_binary_values([H|T], Accum) when is_map(H) ->
+ convert_binary_values(T, lists:append(Accum, convert_binary_values(maps:to_list(H), [])));
+convert_binary_values([H|T], Accum) when is_binary(H) ->
+ convert_binary_values(T, lists:append(Accum, [binary_to_list(H)]));
+convert_binary_values([H|T], Accum) when is_integer(H) ->
+ convert_binary_values(T, lists:append(Accum, [H]));
+convert_binary_values([H|T], Accum) when is_atom(H) ->
+ convert_binary_values(T, lists:append(Accum, [H]));
+convert_binary_values([H|T], Accum) ->
+ convert_binary_values(T, lists:append(Accum, convert_binary_values(H, []))).
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_sign.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_sign.erl
new file mode 100644
index 0000000000..b238f81cee
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_sign.erl
@@ -0,0 +1,289 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @private
+%% @doc rabbitmq_aws request signing methods
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_sign).
+
+%% API
+-export([headers/1, request_hash/5]).
+
+%% Transitional step until we can require Erlang/OTP 22 and
+%% use crypto:mac/4 instead of crypto:hmac/3.
+-compile(nowarn_deprecated_function).
+
+%% Export all for unit tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-ignore_xref([{crypto, hmac, 3}]).
+
+-include("rabbitmq_aws.hrl").
+
+-define(ALGORITHM, "AWS4-HMAC-SHA256").
+-define(ISOFORMAT_BASIC, "~4.10.0b~2.10.0b~2.10.0bT~2.10.0b~2.10.0b~2.10.0bZ").
+
+-spec headers(request()) -> headers().
+%% @doc Create the signed request headers
+%% end
+headers(Request) ->
+ RequestTimestamp = local_time(),
+ PayloadHash = sha256(Request#request.body),
+ URI = rabbitmq_aws_urilib:parse(Request#request.uri),
+ {_, Host, _} = URI#uri.authority,
+ Headers = append_headers(RequestTimestamp,
+ length(Request#request.body),
+ PayloadHash,
+ Host,
+ Request#request.security_token,
+ Request#request.headers),
+ RequestHash = request_hash(Request#request.method,
+ URI#uri.path,
+ URI#uri.query,
+ Headers,
+ Request#request.body),
+ AuthValue = authorization(Request#request.access_key,
+ Request#request.secret_access_key,
+ RequestTimestamp,
+ Request#request.region,
+ Request#request.service,
+ Headers,
+ RequestHash),
+ sort_headers(lists:merge([{"authorization", AuthValue}], Headers)).
+
+
+-spec amz_date(AMZTimestamp :: string()) -> string().
+%% @doc Extract the date from the AMZ timestamp format.
+%% @end
+amz_date(AMZTimestamp) ->
+ [RequestDate, _] = string:tokens(AMZTimestamp, "T"),
+ RequestDate.
+
+
+-spec append_headers(AMZDate :: string(),
+ ContentLength :: integer(),
+ PayloadHash :: string(),
+ Hostname :: host(),
+ SecurityToken :: security_token(),
+ Headers :: headers()) -> list().
+%% @doc Append the headers that need to be signed to the headers passed in with
+%% the request
+%% @end
+append_headers(AMZDate, ContentLength, PayloadHash, Hostname, SecurityToken, Headers) ->
+ Defaults = default_headers(AMZDate, ContentLength, PayloadHash, Hostname, SecurityToken),
+ Headers1 = [{string:to_lower(Key), Value} || {Key, Value} <- Headers],
+ Keys = lists:usort(lists:append([string:to_lower(Key) || {Key, _} <- Defaults],
+ [Key || {Key, _} <- Headers1])),
+ sort_headers([{Key, header_value(Key, Headers1, proplists:get_value(Key, Defaults))} || Key <- Keys]).
+
+
+-spec authorization(AccessKey :: access_key(),
+ SecretAccessKey :: secret_access_key(),
+ RequestTimestamp :: string(),
+ Region :: region(),
+ Service :: string(),
+ Headers :: headers(),
+ RequestHash :: string()) -> string().
+%% @doc Return the authorization header value
+%% @end
+authorization(AccessKey, SecretAccessKey, RequestTimestamp, Region, Service, Headers, RequestHash) ->
+ RequestDate = amz_date(RequestTimestamp),
+ Scope = scope(RequestDate, Region, Service),
+ Credentials = ?ALGORITHM ++ " Credential=" ++ AccessKey ++ "/" ++ Scope,
+ SignedHeaders = "SignedHeaders=" ++ signed_headers(Headers),
+ StringToSign = string_to_sign(RequestTimestamp, RequestDate, Region, Service, RequestHash),
+ SigningKey = signing_key(SecretAccessKey, RequestDate, Region, Service),
+ Signature = string:join(["Signature", signature(StringToSign, SigningKey)], "="),
+ string:join([Credentials, SignedHeaders, Signature], ", ").
+
+
+-spec default_headers(RequestTimestamp :: string(),
+ ContentLength :: integer(),
+ PayloadHash :: string(),
+ Hostname :: host(),
+ SecurityToken :: security_token()) -> headers().
+%% @doc build the base headers that are merged in with the headers for every
+%% request.
+%% @end
+default_headers(RequestTimestamp, ContentLength, PayloadHash, Hostname, undefined) ->
+ [{"content-length", integer_to_list(ContentLength)},
+ {"date", RequestTimestamp},
+ {"host", Hostname},
+ {"x-amz-content-sha256", PayloadHash}];
+default_headers(RequestTimestamp, ContentLength, PayloadHash, Hostname, SecurityToken) ->
+ [{"content-length", integer_to_list(ContentLength)},
+ {"date", RequestTimestamp},
+ {"host", Hostname},
+ {"x-amz-content-sha256", PayloadHash},
+ {"x-amz-security-token", SecurityToken}].
+
+
+-spec canonical_headers(Headers :: headers()) -> string().
+%% @doc Convert the headers list to a line-feed delimited string in the AWZ
+%% canonical headers format.
+%% @end
+canonical_headers(Headers) ->
+ canonical_headers(sort_headers(Headers), []).
+
+-spec canonical_headers(Headers :: headers(), CanonicalHeaders :: list()) -> string().
+%% @doc Convert the headers list to a line-feed delimited string in the AWZ
+%% canonical headers format.
+%% @end
+canonical_headers([], CanonicalHeaders) ->
+ lists:flatten(CanonicalHeaders);
+canonical_headers([{Key, Value}|T], CanonicalHeaders) ->
+ Header = string:join([string:to_lower(Key), Value], ":") ++ "\n",
+ canonical_headers(T, lists:append(CanonicalHeaders, [Header])).
+
+
+-spec credential_scope(RequestDate :: string(),
+ Region :: region(),
+ Service :: string()) -> string().
+%% @doc Return the credential scope string used in creating the request string to sign.
+%% @end
+credential_scope(RequestDate, Region, Service) ->
+ lists:flatten(string:join([RequestDate, Region, Service, "aws4_request"], "/")).
+
+
+-spec header_value(Key :: string(),
+ Headers :: headers(),
+ Default :: string()) -> string().
+%% @doc Return the the header value or the default value for the header if it
+%% is not specified.
+%% @end
+header_value(Key, Headers, Default) ->
+ proplists:get_value(Key, Headers, proplists:get_value(string:to_lower(Key), Headers, Default)).
+
+
+-spec hmac_sign(Key :: string(), Message :: string()) -> string().
+%% @doc Return the SHA-256 hash for the specified value.
+%% @end
+hmac_sign(Key, Message) ->
+ SignedValue = crypto:hmac(sha256, Key, Message),
+ binary_to_list(SignedValue).
+
+
+-spec local_time() -> string().
+%% @doc Return the current timestamp in GMT formatted in ISO8601 basic format.
+%% @end
+local_time() ->
+ [LocalTime] = calendar:local_time_to_universal_time_dst(calendar:local_time()),
+ local_time(LocalTime).
+
+
+-spec local_time(calendar:datetime()) -> string().
+%% @doc Return the current timestamp in GMT formatted in ISO8601 basic format.
+%% @end
+local_time({{Y,M,D},{HH,MM,SS}}) ->
+ lists:flatten(io_lib:format(?ISOFORMAT_BASIC, [Y, M, D, HH, MM, SS])).
+
+
+-spec query_string(QueryArgs :: list()) -> string().
+%% @doc Return the sorted query string for the specified arguments.
+%% @end
+query_string(undefined) -> "";
+query_string(QueryArgs) ->
+ rabbitmq_aws_urilib:build_query_string(lists:keysort(1, QueryArgs)).
+
+
+-spec request_hash(Method :: method(),
+ Path :: path(),
+ QArgs :: query_args(),
+ Headers :: headers(),
+ Payload :: string()) -> string().
+%% @doc Create the request hash value
+%% @end
+request_hash(Method, Path, QArgs, Headers, Payload) ->
+ RawPath = case string:slice(Path, 0, 1) of
+ "/" -> Path;
+ _ -> "/" ++ Path
+ end,
+ EncodedPath = uri_string:recompose(#{path => RawPath}),
+ CanonicalRequest = string:join([string:to_upper(atom_to_list(Method)),
+ EncodedPath,
+ query_string(QArgs),
+ canonical_headers(Headers),
+ signed_headers(Headers),
+ sha256(Payload)], "\n"),
+ sha256(CanonicalRequest).
+
+
+-spec scope(AMZDate :: string(),
+ Region :: region(),
+ Service :: string()) -> string().
+%% @doc Create the Scope string
+%% @end
+scope(AMZDate, Region, Service) ->
+ string:join([AMZDate, Region, Service, "aws4_request"], "/").
+
+
+-spec sha256(Value :: string()) -> string().
+%% @doc Return the SHA-256 hash for the specified value.
+%% @end
+sha256(Value) ->
+ lists:flatten(io_lib:format("~64.16.0b",
+ [binary:decode_unsigned(crypto:hash(sha256, Value))])).
+
+
+-spec signed_headers(Headers :: list()) -> string().
+%% @doc Return the signed headers string of delimited header key names
+%% @end
+signed_headers(Headers) ->
+ signed_headers(sort_headers(Headers), []).
+
+
+-spec signed_headers(Headers :: headers(), Values :: list()) -> string().
+%% @doc Return the signed headers string of delimited header key names
+%% @end
+signed_headers([], SignedHeaders) -> string:join(SignedHeaders, ";");
+signed_headers([{Key,_}|T], SignedHeaders) ->
+ signed_headers(T, SignedHeaders ++ [string:to_lower(Key)]).
+
+
+-spec signature(StringToSign :: string(),
+ SigningKey :: string()) -> string().
+%% @doc Create the request signature.
+%% @end
+signature(StringToSign, SigningKey) ->
+ SignedValue = crypto:hmac(sha256, SigningKey, StringToSign),
+ lists:flatten(io_lib:format("~64.16.0b", [binary:decode_unsigned(SignedValue)])).
+
+
+-spec signing_key(SecretKey :: secret_access_key(),
+ AMZDate :: string(),
+ Region :: region(),
+ Service :: string()) -> string().
+%% @doc Create the signing key
+%% @end
+signing_key(SecretKey, AMZDate, Region, Service) ->
+ DateKey = hmac_sign("AWS4" ++ SecretKey, AMZDate),
+ RegionKey = hmac_sign(DateKey, Region),
+ ServiceKey = hmac_sign(RegionKey, Service),
+ hmac_sign(ServiceKey, "aws4_request").
+
+
+-spec string_to_sign(RequestTimestamp :: string(),
+ RequestDate :: string(),
+ Region :: region(),
+ Service :: string(),
+ RequestHash :: string()) -> string().
+%% @doc Return the string to sign when creating the signed request.
+%% @end
+string_to_sign(RequestTimestamp, RequestDate, Region, Service, RequestHash) ->
+ CredentialScope = credential_scope(RequestDate, Region, Service),
+ lists:flatten(string:join([
+ ?ALGORITHM,
+ RequestTimestamp,
+ CredentialScope,
+ RequestHash
+ ], "\n")).
+
+
+-spec sort_headers(Headers :: headers()) -> headers().
+%% @doc Case-insensitive sorting of the request headers
+%% @end
+sort_headers(Headers) ->
+ lists:sort(fun({A,_}, {B, _}) -> string:to_lower(A) =< string:to_lower(B) end, Headers).
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_sup.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_sup.erl
new file mode 100644
index 0000000000..6327b2029d
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_sup.erl
@@ -0,0 +1,20 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @doc rabbitmq_aws supervisor for the gen_server process
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0,
+ init/1]).
+
+-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5, Type, [I]}).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ {ok, {{one_for_one, 5, 10}, [?CHILD(rabbitmq_aws, worker)]}}.
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_urilib.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_urilib.erl
new file mode 100644
index 0000000000..02435270ba
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_urilib.erl
@@ -0,0 +1,122 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016
+%% @doc urilib is a RFC-3986 URI Library for Erlang
+%% https://github.com/gmr/urilib
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_urilib).
+
+-export([build/1,
+ build_query_string/1,
+ parse/1,
+ parse_userinfo/1,
+ parse_userinfo_result/1
+ ]).
+
+%% Export all for unit tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-include("rabbitmq_aws.hrl").
+
+-spec build(#uri{}) -> string().
+%% @doc Build a URI string
+%% @end
+build(URI) ->
+ {UserInfo, Host, Port} = URI#uri.authority,
+ UriMap = #{
+ scheme => to_list(URI#uri.scheme),
+ host => Host
+ },
+ UriMap1 = case UserInfo of
+ undefined -> UriMap;
+ {User, undefined} -> maps:put(userinfo, User, UriMap);
+ {User, Password} -> maps:put(userinfo, User ++ ":" ++ Password, UriMap);
+ Value -> maps:put(userinfo, Value, UriMap)
+ end,
+ UriMap2 = case Port of
+ undefined -> UriMap1;
+ Value1 -> maps:put(port, Value1, UriMap1)
+ end,
+ UriMap3 = case URI#uri.path of
+ undefined -> maps:put(path, "", UriMap2);
+ Value2 ->
+ PrefixedPath = case string:slice(Value2, 0, 1) of
+ "/" -> Value2;
+ _ -> "/" ++ Value2
+ end,
+ maps:put(path, PrefixedPath, UriMap2)
+ end,
+ UriMap4 = case URI#uri.query of
+ undefined -> UriMap3;
+ "" -> UriMap3;
+ Value3 -> maps:put(query, build_query_string(Value3), UriMap3)
+ end,
+ UriMap5 = case URI#uri.fragment of
+ undefined -> UriMap4;
+ Value4 -> maps:put(fragment, Value4, UriMap4)
+ end,
+ uri_string:recompose(UriMap5).
+
+-spec parse(string()) -> #uri{} | {error, any()}.
+%% @doc Parse a URI string returning a record with the parsed results
+%% @end
+parse(Value) ->
+ UriMap = uri_string:parse(Value),
+ Scheme = maps:get(scheme, UriMap, "https"),
+ Host = maps:get(host, UriMap),
+
+ DefaultPort = case Scheme of
+ "http" -> 80;
+ "https" -> 443;
+ _ -> undefined
+ end,
+ Port = maps:get(port, UriMap, DefaultPort),
+ UserInfo = parse_userinfo(maps:get(userinfo, UriMap, undefined)),
+ Path = maps:get(path, UriMap),
+ Query = maps:get(query, UriMap, ""),
+ #uri{scheme = Scheme,
+ authority = {parse_userinfo(UserInfo), Host, Port},
+ path = Path,
+ query = uri_string:dissect_query(Query),
+ fragment = maps:get(fragment, UriMap, undefined)
+ }.
+
+
+-spec parse_userinfo(string() | undefined)
+ -> {username() | undefined, password() | undefined} | undefined.
+parse_userinfo(undefined) -> undefined;
+parse_userinfo([]) -> undefined;
+parse_userinfo({User, undefined}) -> {User, undefined};
+parse_userinfo({User, Password}) -> {User, Password};
+parse_userinfo(Value) ->
+ parse_userinfo_result(string:tokens(Value, ":")).
+
+
+-spec parse_userinfo_result(list())
+ -> {username() | undefined, password() | undefined} | undefined.
+parse_userinfo_result([User, Password]) -> {User, Password};
+parse_userinfo_result([User]) -> {User, undefined};
+parse_userinfo_result({User, undefined}) -> {User, undefined};
+parse_userinfo_result([]) -> undefined;
+parse_userinfo_result(User) -> {User, undefined}.
+
+%% @spec build_query(proplist()) -> string()
+%% @doc Build the query parameters string from a proplist
+%% @end
+%%
+
+-spec build_query_string([{any(), any()}]) -> string().
+
+build_query_string(Args) when is_list(Args) ->
+ Normalized = [{to_list(K), to_list(V)} || {K, V} <- Args],
+ uri_string:compose_query(Normalized).
+
+-spec to_list(Val :: integer() | list() | binary() | atom() | map()) -> list().
+to_list(Val) when is_list(Val) -> Val;
+to_list(Val) when is_map(Val) -> maps:to_list(Val);
+to_list(Val) when is_atom(Val) -> atom_to_list(Val);
+to_list(Val) when is_binary(Val) -> binary_to_list(Val);
+to_list(Val) when is_integer(Val) -> integer_to_list(Val).
diff --git a/deps/rabbitmq_aws/src/rabbitmq_aws_xml.erl b/deps/rabbitmq_aws/src/rabbitmq_aws_xml.erl
new file mode 100644
index 0000000000..fc3be5c642
--- /dev/null
+++ b/deps/rabbitmq_aws/src/rabbitmq_aws_xml.erl
@@ -0,0 +1,46 @@
+%% ====================================================================
+%% @author Gavin M. Roy <gavinmroy@gmail.com>
+%% @copyright 2016, Gavin M. Roy
+%% @doc Simple XML parser for AWS application/xml responses
+%% @end
+%% ====================================================================
+-module(rabbitmq_aws_xml).
+
+-export([parse/1]).
+
+-include_lib("xmerl/include/xmerl.hrl").
+
+-spec parse(Value :: string() | binary()) -> list().
+parse(Value) ->
+ {Element, _} = xmerl_scan:string(Value),
+ parse_node(Element).
+
+
+parse_node(#xmlElement{name=Name, content=Content}) ->
+ Value = parse_content(Content, []),
+ [{atom_to_list(Name), flatten_value(Value, Value)}].
+
+
+flatten_text([], Value) -> Value;
+flatten_text([{K,V}|T], Accum) when is_list(V) ->
+ flatten_text(T, lists:append([{K, V}], Accum));
+flatten_text([H | T], Accum) when is_list(H) ->
+ flatten_text(T, lists:append(T, Accum)).
+
+
+flatten_value([L], _) when is_list(L) -> L;
+flatten_value(L, _) when is_list(L) -> flatten_text(L, []).
+
+
+parse_content([], Value) -> Value;
+parse_content(#xmlElement{} = Element, Accum) ->
+ lists:append(parse_node(Element), Accum);
+parse_content(#xmlText{value=Value}, Accum) ->
+ case string:strip(Value) of
+ "" -> Accum;
+ "\n" -> Accum;
+ Stripped ->
+ lists:append([Stripped], Accum)
+ end;
+parse_content([H|T], Accum) ->
+ parse_content(T, parse_content(H, Accum)).
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_all_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_all_tests.erl
new file mode 100644
index 0000000000..1273e14f8b
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_all_tests.erl
@@ -0,0 +1,18 @@
+-module(rabbitmq_aws_all_tests).
+
+-export([run/0]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+run() ->
+ Result = {
+ eunit:test(rabbitmq_aws_app_tests, [verbose]),
+ eunit:test(rabbitmq_aws_config_tests, [verbose]),
+ eunit:test(rabbitmq_aws_json_tests, [verbose]),
+ eunit:test(rabbitmq_aws_sign_tests, [verbose]),
+ eunit:test(rabbitmq_aws_sup_tests, [verbose]),
+ eunit:test(rabbitmq_aws_tests, [verbose]),
+ eunit:test(rabbitmq_aws_urilib_tests, [verbose]),
+ eunit:test(rabbitmq_aws_xml_tests, [verbose])
+ },
+ ?assertEqual({ok, ok, ok, ok, ok, ok, ok, ok}, Result).
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_app_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_app_tests.erl
new file mode 100644
index 0000000000..ced4c0065b
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_app_tests.erl
@@ -0,0 +1,24 @@
+-module(rabbitmq_aws_app_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+start_test_() ->
+ {foreach,
+ fun() ->
+ meck:new(rabbitmq_aws_sup, [passthrough])
+ end,
+ fun(_) ->
+ meck:unload(rabbitmq_aws_sup)
+ end,
+ [
+ {"supervisor initialized", fun() ->
+ meck:expect(rabbitmq_aws_sup, start_link, fun() -> {ok, test_result} end),
+ ?assertEqual({ok, test_result},
+ rabbitmq_aws_app:start(temporary, [])),
+ meck:validate(rabbitmq_aws_sup)
+ end}
+ ]
+ }.
+
+stop_test() ->
+ ?assertEqual(ok, rabbitmq_aws_app:stop({})).
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_config_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_config_tests.erl
new file mode 100644
index 0000000000..ac1d65f59f
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_config_tests.erl
@@ -0,0 +1,344 @@
+-module(rabbitmq_aws_config_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbitmq_aws.hrl").
+
+
+config_file_test_() ->
+ [
+ {"from environment variable", fun() ->
+ os:putenv("AWS_CONFIG_FILE", "/etc/aws/config"),
+ ?assertEqual("/etc/aws/config", rabbitmq_aws_config:config_file())
+ end},
+ {"default without environment variable", fun() ->
+ os:unsetenv("AWS_CONFIG_FILE"),
+ os:putenv("HOME", "/home/rrabbit"),
+ ?assertEqual("/home/rrabbit/.aws/config",
+ rabbitmq_aws_config:config_file())
+ end}
+ ].
+
+config_file_data_test_() ->
+ [
+ {"successfully parses ini", fun() ->
+ setup_test_config_env_var(),
+ Expectation = [
+ {"default",
+ [{aws_access_key_id, "default-key"},
+ {aws_secret_access_key, "default-access-key"},
+ {region, "us-east-4"}]},
+ {"profile testing",
+ [{aws_access_key_id, "foo1"},
+ {aws_secret_access_key, "bar2"},
+ {s3, [{max_concurrent_requests, 10},
+ {max_queue_size, 1000}]},
+ {region, "us-west-5"}]},
+ {"profile no-region",
+ [{aws_access_key_id, "foo2"},
+ {aws_secret_access_key, "bar3"}]},
+ {"profile only-key",
+ [{aws_access_key_id, "foo3"}]},
+ {"profile only-secret",
+ [{aws_secret_access_key, "foo4"}]},
+ {"profile bad-entry",
+ [{aws_secret_access, "foo5"}]}
+ ],
+ ?assertEqual(Expectation,
+ rabbitmq_aws_config:config_file_data())
+ end},
+ {"file does not exist", fun() ->
+ ?assertEqual({error, enoent},
+ rabbitmq_aws_config:ini_file_data(filename:join([filename:absname("."), "bad_path"]), false))
+ end
+ },
+ {"file exists but path is invalid", fun() ->
+ ?assertEqual({error, enoent},
+ rabbitmq_aws_config:ini_file_data(filename:join([filename:absname("."), "bad_path"]), true))
+ end
+ }
+ ].
+
+
+instance_metadata_test_() ->
+ [
+ {"instance role URL", fun() ->
+ ?assertEqual("http://169.254.169.254/latest/meta-data/iam/security-credentials",
+ rabbitmq_aws_config:instance_role_url())
+ end},
+ {"availability zone URL", fun() ->
+ ?assertEqual("http://169.254.169.254/latest/meta-data/placement/availability-zone",
+ rabbitmq_aws_config:instance_availability_zone_url())
+ end},
+ {"arbitrary paths", fun () ->
+ ?assertEqual("http://169.254.169.254/a/b/c", rabbitmq_aws_config:instance_metadata_url("a/b/c")),
+ ?assertEqual("http://169.254.169.254/a/b/c", rabbitmq_aws_config:instance_metadata_url("/a/b/c"))
+ end}
+ ].
+
+credentials_file_test_() ->
+ [
+ {"from environment variable", fun() ->
+ os:putenv("AWS_SHARED_CREDENTIALS_FILE", "/etc/aws/credentials"),
+ ?assertEqual("/etc/aws/credentials", rabbitmq_aws_config:credentials_file())
+ end},
+ {"default without environment variable", fun() ->
+ os:unsetenv("AWS_SHARED_CREDENTIALS_FILE"),
+ os:putenv("HOME", "/home/rrabbit"),
+ ?assertEqual("/home/rrabbit/.aws/credentials",
+ rabbitmq_aws_config:credentials_file())
+ end}
+ ].
+
+
+credentials_test_() ->
+ {
+ foreach,
+ fun () ->
+ meck:new(httpc),
+ reset_environment(),
+ [httpc]
+ end,
+ fun meck:unload/1,
+ [
+ {"from environment variables", fun() ->
+ os:putenv("AWS_ACCESS_KEY_ID", "Sésame"),
+ os:putenv("AWS_SECRET_ACCESS_KEY", "ouvre-toi"),
+ ?assertEqual({ok, "Sésame", "ouvre-toi", undefined, undefined},
+ rabbitmq_aws_config:credentials())
+ end},
+ {"from config file with default profile", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({ok, "default-key", "default-access-key", undefined, undefined},
+ rabbitmq_aws_config:credentials())
+ end},
+ {"with missing environment variable", fun() ->
+ os:putenv("AWS_ACCESS_KEY_ID", "Sésame"),
+ ?assertEqual({error, undefined},
+ rabbitmq_aws_config:credentials())
+ end},
+ {"from config file with default profile", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({ok, "default-key", "default-access-key", undefined, undefined},
+ rabbitmq_aws_config:credentials())
+ end},
+ {"from config file with profile", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({ok, "foo1", "bar2", undefined, undefined},
+ rabbitmq_aws_config:credentials("testing"))
+ end},
+ {"from config file with bad profile", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({error, undefined},
+ rabbitmq_aws_config:credentials("bad-profile-name"))
+ end},
+ {"from credentials file with default profile", fun() ->
+ setup_test_credentials_env_var(),
+ ?assertEqual({ok, "foo1", "bar1", undefined, undefined},
+ rabbitmq_aws_config:credentials())
+ end},
+ {"from credentials file with profile", fun() ->
+ setup_test_credentials_env_var(),
+ ?assertEqual({ok, "foo2", "bar2", undefined, undefined},
+ rabbitmq_aws_config:credentials("development"))
+ end},
+ {"from credentials file with bad profile", fun() ->
+ setup_test_credentials_env_var(),
+ ?assertEqual({error, undefined},
+ rabbitmq_aws_config:credentials("bad-profile-name"))
+ end},
+ {"from credentials file with only the key in profile", fun() ->
+ setup_test_credentials_env_var(),
+ ?assertEqual({error, undefined},
+ rabbitmq_aws_config:credentials("only-key"))
+ end},
+ {"from credentials file with only the value in profile", fun() ->
+ setup_test_credentials_env_var(),
+ ?assertEqual({error, undefined},
+ rabbitmq_aws_config:credentials("only-value"))
+ end},
+ {"from credentials file with missing keys in profile", fun() ->
+ setup_test_credentials_env_var(),
+ ?assertEqual({error, undefined},
+ rabbitmq_aws_config:credentials("bad-entry"))
+ end},
+ {"from instance metadata service", fun() ->
+ CredsBody = "{\n \"Code\" : \"Success\",\n \"LastUpdated\" : \"2016-03-31T21:51:49Z\",\n \"Type\" : \"AWS-HMAC\",\n \"AccessKeyId\" : \"ASIAIMAFAKEACCESSKEY\",\n \"SecretAccessKey\" : \"2+t64tZZVaz0yp0x1G23ZRYn+FAKEyVALUEs/4qh\",\n \"Token\" : \"FAKE//////////wEAK/TOKEN/VALUE=\",\n \"Expiration\" : \"2016-04-01T04:13:28Z\"\n}",
+ meck:sequence(httpc, request, 4,
+ [{ok, {{protocol, 200, message}, headers, "Bob"}},
+ {ok, {{protocol, 200, message}, headers, CredsBody}}]),
+ Expectation = {ok, "ASIAIMAFAKEACCESSKEY", "2+t64tZZVaz0yp0x1G23ZRYn+FAKEyVALUEs/4qh",
+ {{2016,4,1},{4,13,28}}, "FAKE//////////wEAK/TOKEN/VALUE="},
+ ?assertEqual(Expectation, rabbitmq_aws_config:credentials())
+ end
+ },
+ {"with instance metadata service role error", fun() ->
+ meck:expect(httpc, request, 4, {error, timeout}),
+ ?assertEqual({error, undefined}, rabbitmq_aws_config:credentials())
+ end
+ },
+ {"with instance metadata service role http error", fun() ->
+ meck:expect(httpc, request, 4,
+ {ok, {{protocol, 500, message}, headers, "Internal Server Error"}}),
+ ?assertEqual({error, undefined}, rabbitmq_aws_config:credentials())
+ end
+ },
+ {"with instance metadata service credentials error", fun() ->
+ meck:sequence(httpc, request, 4,
+ [{ok, {{protocol, 200, message}, headers, "Bob"}},
+ {error, timeout}]),
+ ?assertEqual({error, undefined}, rabbitmq_aws_config:credentials())
+ end
+ },
+ {"with instance metadata service credentials not found", fun() ->
+ meck:sequence(httpc, request, 4,
+ [{ok, {{protocol, 200, message}, headers, "Bob"}},
+ {ok, {{protocol, 404, message}, headers, "File Not Found"}}]),
+ ?assertEqual({error, undefined}, rabbitmq_aws_config:credentials())
+ end
+ }
+
+ ]}.
+
+
+home_path_test_() ->
+ [
+ {"with HOME", fun() ->
+ os:putenv("HOME", "/home/rrabbit"),
+ ?assertEqual("/home/rrabbit",
+ rabbitmq_aws_config:home_path())
+ end},
+ {"without HOME", fun() ->
+ os:unsetenv("HOME"),
+ ?assertEqual(filename:absname("."),
+ rabbitmq_aws_config:home_path())
+ end}
+ ].
+
+
+ini_format_key_test_() ->
+ [
+ {"when value is list", fun() ->
+ ?assertEqual(test_key, rabbitmq_aws_config:ini_format_key("test_key"))
+ end},
+ {"when value is binary", fun() ->
+ ?assertEqual({error, type}, rabbitmq_aws_config:ini_format_key(<<"test_key">>))
+ end}
+ ].
+
+
+maybe_convert_number_test_() ->
+ [
+ {"when string contains an integer", fun() ->
+ ?assertEqual(123, rabbitmq_aws_config:maybe_convert_number("123"))
+ end},
+ {"when string contains a float", fun() ->
+ ?assertEqual(123.456, rabbitmq_aws_config:maybe_convert_number("123.456"))
+ end},
+ {"when string does not contain a number", fun() ->
+ ?assertEqual("hello, world", rabbitmq_aws_config:maybe_convert_number("hello, world"))
+ end}
+ ].
+
+
+parse_iso8601_test_() ->
+ [
+ {"parse test", fun() ->
+ Value = "2016-05-19T18:25:23Z",
+ Expectation = {{2016,5,19},{18,25,23}},
+ ?assertEqual(Expectation, rabbitmq_aws_config:parse_iso8601_timestamp(Value))
+ end}
+ ].
+
+
+profile_test_() ->
+ [
+ {"from environment variable", fun() ->
+ os:putenv("AWS_DEFAULT_PROFILE", "httpc-aws test"),
+ ?assertEqual("httpc-aws test", rabbitmq_aws_config:profile())
+ end},
+ {"default without environment variable", fun() ->
+ os:unsetenv("AWS_DEFAULT_PROFILE"),
+ ?assertEqual("default", rabbitmq_aws_config:profile())
+ end}
+ ].
+
+
+read_file_test_() ->
+ [
+ {"file does not exist", fun() ->
+ ?assertEqual({error, enoent}, rabbitmq_aws_config:read_file(filename:join([filename:absname("."), "bad_path"])))
+ end},
+ {"file handle is closed", fun() ->
+ {MegaSecs, Secs, MicroSecs} = erlang:timestamp(),
+ Name = lists:flatten(io_lib:format("~p-~p-~p.tmp", [MegaSecs, Secs, MicroSecs])),
+ {ok, Handle} = file:open(Name, [write]),
+ file:close(Handle),
+ ?assertEqual({error,terminated}, rabbitmq_aws_config:read_file(Handle, [])),
+ file:delete(Name)
+ end}
+ ].
+
+
+region_test_() ->
+ {
+ foreach,
+ fun () ->
+ meck:new(httpc),
+ reset_environment(),
+ [httpc]
+ end,
+ fun meck:unload/1,
+ [
+ {"with environment variable", fun() ->
+ os:putenv("AWS_DEFAULT_REGION", "us-west-1"),
+ ?assertEqual({ok, "us-west-1"}, rabbitmq_aws_config:region())
+ end},
+ {"with config file and specified profile", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({ok, "us-west-5"}, rabbitmq_aws_config:region("testing"))
+ end},
+ {"with config file using default profile", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({ok, "us-east-4"}, rabbitmq_aws_config:region())
+ end},
+ {"missing profile in config", fun() ->
+ setup_test_config_env_var(),
+ ?assertEqual({ok, ?DEFAULT_REGION}, rabbitmq_aws_config:region("no-region"))
+ end},
+ {"from instance metadata service", fun() ->
+ meck:expect(httpc, request, 4,
+ {ok, {{protocol, 200, message}, headers, "us-west-1a"}}),
+ ?assertEqual({ok, "us-west-1"}, rabbitmq_aws_config:region())
+ end},
+ {"full lookup failure", fun() ->
+ ?assertEqual({ok, ?DEFAULT_REGION}, rabbitmq_aws_config:region())
+ end},
+ {"http error failure", fun() ->
+ meck:expect(httpc, request, 4,
+ {ok, {{protocol, 500, message}, headers, "Internal Server Error"}}),
+ ?assertEqual({ok, ?DEFAULT_REGION}, rabbitmq_aws_config:region())
+ end}
+ ]}.
+
+
+reset_environment() ->
+ os:unsetenv("AWS_ACCESS_KEY_ID"),
+ os:unsetenv("AWS_DEFAULT_REGION"),
+ os:unsetenv("AWS_SECRET_ACCESS_KEY"),
+ setup_test_file_with_env_var("AWS_CONFIG_FILE", "bad_config.ini"),
+ setup_test_file_with_env_var("AWS_SHARED_CREDENTIALS_FILE",
+ "bad_credentials.ini"),
+ meck:expect(httpc, request, 4, {error, timeout}).
+
+setup_test_config_env_var() ->
+ setup_test_file_with_env_var("AWS_CONFIG_FILE", "test_aws_config.ini").
+
+setup_test_file_with_env_var(EnvVar, Filename) ->
+ os:putenv(EnvVar,
+ filename:join([filename:absname("."), "test", "src",
+ Filename])).
+
+setup_test_credentials_env_var() ->
+ setup_test_file_with_env_var("AWS_SHARED_CREDENTIALS_FILE",
+ "test_aws_credentials.ini").
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_json_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_json_tests.erl
new file mode 100644
index 0000000000..efc4b8c9c1
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_json_tests.erl
@@ -0,0 +1,63 @@
+-module(rabbitmq_aws_json_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbitmq_aws.hrl").
+
+parse_test_() ->
+ [
+ {"string decoding", fun() ->
+ Value = "{\"requestId\":\"bda7fbdb-eddb-41fa-8626-7ba87923d690\",\"number\":128,\"enabled\":true,\"tagSet\":[{\"resourceId\":\"i-13a4abea\",\"resourceType\":\"instance\",\"key\":\"Environment\",\"value\":\"prod-us-east-1\"},{\"resourceId\":\"i-13a4abea\",\"resourceType\":\"instance\",\"key\":\"aws:cloudformation:logical-id\",\"value\":\"AutoScalingGroup\"},{\"resourceId\":\"i-13a4abea\",\"resourceType\":\"instance\",\"key\":\"aws:cloudformation:stack-name\",\"value\":\"prod-us-east-1-ecs-1\"}]}",
+ Expectation = [
+ {"requestId","bda7fbdb-eddb-41fa-8626-7ba87923d690"},
+ {"number", 128},
+ {"enabled", true},
+ {"tagSet",
+ [{"resourceId","i-13a4abea"},
+ {"resourceType","instance"},
+ {"key","Environment"},
+ {"value","prod-us-east-1"},
+ {"resourceId","i-13a4abea"},
+ {"resourceType","instance"},
+ {"key","aws:cloudformation:logical-id"},
+ {"value","AutoScalingGroup"},
+ {"resourceId","i-13a4abea"},
+ {"resourceType","instance"},
+ {"key","aws:cloudformation:stack-name"},
+ {"value","prod-us-east-1-ecs-1"}]}
+ ],
+ ?assertEqual(Expectation, rabbitmq_aws_json:decode(Value))
+ end},
+ {"binary decoding", fun() ->
+ Value = <<"{\"requestId\":\"bda7fbdb-eddb-41fa-8626-7ba87923d690\",\"number\":128,\"enabled\":true,\"tagSet\":[{\"resourceId\":\"i-13a4abea\",\"resourceType\":\"instance\",\"key\":\"Environment\",\"value\":\"prod-us-east-1\"},{\"resourceId\":\"i-13a4abea\",\"resourceType\":\"instance\",\"key\":\"aws:cloudformation:logical-id\",\"value\":\"AutoScalingGroup\"},{\"resourceId\":\"i-13a4abea\",\"resourceType\":\"instance\",\"key\":\"aws:cloudformation:stack-name\",\"value\":\"prod-us-east-1-ecs-1\"}]}">>,
+ Expectation = [
+ {"requestId","bda7fbdb-eddb-41fa-8626-7ba87923d690"},
+ {"number", 128},
+ {"enabled", true},
+ {"tagSet",
+ [{"resourceId","i-13a4abea"},
+ {"resourceType","instance"},
+ {"key","Environment"},
+ {"value","prod-us-east-1"},
+ {"resourceId","i-13a4abea"},
+ {"resourceType","instance"},
+ {"key","aws:cloudformation:logical-id"},
+ {"value","AutoScalingGroup"},
+ {"resourceId","i-13a4abea"},
+ {"resourceType","instance"},
+ {"key","aws:cloudformation:stack-name"},
+ {"value","prod-us-east-1-ecs-1"}]}
+ ],
+ ?assertEqual(Expectation, rabbitmq_aws_json:decode(Value))
+ end},
+ {"list values", fun() ->
+ Value = "{\"misc\": [\"foo\", true, 123]\}",
+ Expectation = [{"misc", ["foo", true, 123]}],
+ ?assertEqual(Expectation, rabbitmq_aws_json:decode(Value))
+ end},
+ {"empty objects", fun() ->
+ Value = "{\"tags\": [{}]}",
+ Expectation = [{"tags", [{}]}],
+ ?assertEqual(Expectation, rabbitmq_aws_json:decode(Value))
+ end}
+ ].
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_sign_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_sign_tests.erl
new file mode 100644
index 0000000000..071c4c3ef0
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_sign_tests.erl
@@ -0,0 +1,291 @@
+-module(rabbitmq_aws_sign_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+-include("rabbitmq_aws.hrl").
+
+
+amz_date_test_() ->
+ [
+ {"value", fun() ->
+ ?assertEqual("20160220",
+ rabbitmq_aws_sign:amz_date("20160220T120000Z"))
+ end}
+ ].
+
+
+append_headers_test_() ->
+ [
+ {"with security token", fun() ->
+
+ Headers = [{"Content-Type", "application/x-amz-json-1.0"},
+ {"X-Amz-Target", "DynamoDB_20120810.DescribeTable"}],
+
+ AMZDate = "20160220T120000Z",
+ ContentLength = 128,
+ PayloadHash = "c888ac0919d062cee1d7b97f44f2a765e4dc9270bc720ba32b8d9f8720626213",
+ Hostname = "ec2.amazonaws.com",
+ SecurityToken = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L",
+ Expectation = [{"content-length", integer_to_list(ContentLength)},
+ {"content-type", "application/x-amz-json-1.0"},
+ {"date", AMZDate},
+ {"host", Hostname},
+ {"x-amz-content-sha256", PayloadHash},
+ {"x-amz-security-token", SecurityToken},
+ {"x-amz-target", "DynamoDB_20120810.DescribeTable"}],
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:append_headers(AMZDate, ContentLength,
+ PayloadHash, Hostname,
+ SecurityToken, Headers))
+ end},
+ {"without security token", fun() ->
+
+ Headers = [{"Content-Type", "application/x-amz-json-1.0"},
+ {"X-Amz-Target", "DynamoDB_20120810.DescribeTable"}],
+
+ AMZDate = "20160220T120000Z",
+ ContentLength = 128,
+ PayloadHash = "c888ac0919d062cee1d7b97f44f2a765e4dc9270bc720ba32b8d9f8720626213",
+ Hostname = "ec2.amazonaws.com",
+ Expectation = [{"content-length", integer_to_list(ContentLength)},
+ {"content-type", "application/x-amz-json-1.0"},
+ {"date", AMZDate},
+ {"host", Hostname},
+ {"x-amz-content-sha256", PayloadHash},
+ {"x-amz-target", "DynamoDB_20120810.DescribeTable"}],
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:append_headers(AMZDate, ContentLength,
+ PayloadHash, Hostname,
+ undefined, Headers))
+ end}
+ ].
+
+
+authorization_header_test_() ->
+ [
+ {"value", fun() ->
+ AccessKey = "AKIDEXAMPLE",
+ SecretKey = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ RequestTimestamp = "20150830T123600Z",
+ Region = "us-east-1",
+ Service = "iam",
+ Headers = [{"Content-Type", "application/x-www-form-urlencoded; charset=utf-8"},
+ {"Host", "iam.amazonaws.com"},
+ {"Date", "20150830T123600Z"}],
+ RequestHash = "f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59",
+ Expectation = "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;date;host, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7",
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:authorization(AccessKey, SecretKey, RequestTimestamp,
+ Region, Service, Headers, RequestHash))
+ end}
+ ].
+
+
+canonical_headers_test_() ->
+ [
+ {"with security token", fun() ->
+ Value = [{"Host", "iam.amazonaws.com"},
+ {"Content-Type", "content-type:application/x-www-form-urlencoded; charset=utf-8"},
+ {"My-Header2", "\"a b c \""},
+ {"My-Header1", "a b c"},
+ {"Date", "20150830T123600Z"}],
+ Expectation = lists:flatten([
+ "content-type:content-type:application/x-www-form-urlencoded; charset=utf-8\n",
+ "date:20150830T123600Z\n",
+ "host:iam.amazonaws.com\n",
+ "my-header1:a b c\n",
+ "my-header2:\"a b c \"\n"]),
+ ?assertEqual(Expectation, rabbitmq_aws_sign:canonical_headers(Value))
+ end}
+ ].
+
+credential_scope_test_() ->
+ [
+ {"string value", fun() ->
+ RequestDate = "20150830",
+ Region = "us-east-1",
+ Service = "iam",
+ Expectation = "20150830/us-east-1/iam/aws4_request",
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:credential_scope(RequestDate, Region, Service))
+ end}
+ ].
+
+hmac_sign_test_() ->
+ [
+ {"signed value", fun() ->
+ ?assertEqual([84, 114, 243, 48, 184, 73, 81, 138, 195, 123, 62, 27, 222, 141, 188, 149, 178, 82, 252, 75, 29, 34, 102, 186, 98, 232, 224, 105, 64, 6, 119, 33],
+ rabbitmq_aws_sign:hmac_sign("sixpence", "burn the witch"))
+ end}
+ ].
+
+query_string_test_() ->
+ [
+ {"properly sorted", fun() ->
+ QArgs = [{"Version", "2015-10-01"},
+ {"Action", "RunInstances"},
+ {"x-amz-algorithm", "AWS4-HMAC-SHA256"},
+ {"Date", "20160220T120000Z"},
+ {"x-amz-credential", "AKIDEXAMPLE/20140707/us-east-1/ec2/aws4_request"}],
+ Expectation = "Action=RunInstances&Date=20160220T120000Z&Version=2015-10-01&x-amz-algorithm=AWS4-HMAC-SHA256&x-amz-credential=AKIDEXAMPLE%2F20140707%2Fus-east-1%2Fec2%2Faws4_request",
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:query_string(QArgs))
+ end},
+ {"undefined", fun() ->
+ ?assertEqual([], rabbitmq_aws_sign:query_string(undefined))
+ end}
+ ].
+
+request_hash_test_() ->
+ [
+ {"hash value", fun() ->
+ Method = get,
+ Path = "/",
+ QArgs = [{"Action", "ListUsers"}, {"Version", "2010-05-08"}],
+ Headers = [{"Content-Type", "application/x-www-form-urlencoded; charset=utf-8"},
+ {"Host", "iam.amazonaws.com"},
+ {"Date", "20150830T123600Z"}],
+ Payload = "",
+ Expectation = "49b454e0f20fe17f437eaa570846fc5d687efc1752c8b5a1eeee5597a7eb92a5",
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:request_hash(Method, Path, QArgs, Headers, Payload))
+ end}
+ ].
+
+signature_test_() ->
+ [
+ {"value", fun() ->
+ StringToSign = "AWS4-HMAC-SHA256\n20150830T123600Z\n20150830/us-east-1/iam/aws4_request\nf536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59",
+ SigningKey = [196, 175, 177, 204, 87, 113, 216, 113, 118, 58, 57, 62, 68, 183, 3, 87, 27, 85, 204, 40, 66, 77, 26, 94, 134, 218, 110, 211, 193, 84, 164, 185],
+ Expectation = "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7",
+ ?assertEqual(Expectation, rabbitmq_aws_sign:signature(StringToSign, SigningKey))
+ end}
+ ].
+
+
+signed_headers_test_() ->
+ [
+ {"with security token", fun() ->
+ Value = [{"X-Amz-Security-Token", "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L"},
+ {"Date", "20160220T120000Z"},
+ {"Content-Type", "application/x-amz-json-1.0"},
+ {"Host", "ec2.amazonaws.com"},
+ {"Content-Length", 128},
+ {"X-Amz-Content-sha256", "c888ac0919d062cee1d7b97f44f2a765e4dc9270bc720ba32b8d9f8720626213"},
+ {"X-Amz-Target", "DynamoDB_20120810.DescribeTable"}],
+ Expectation = "content-length;content-type;date;host;x-amz-content-sha256;x-amz-security-token;x-amz-target",
+ ?assertEqual(Expectation, rabbitmq_aws_sign:signed_headers(Value))
+ end}
+ ].
+
+signing_key_test_() ->
+ [
+ {"signing key value", fun() ->
+ SecretKey = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ AMZDate = "20150830",
+ Region = "us-east-1",
+ Service = "iam",
+ Expectation = [196, 175, 177, 204, 87, 113, 216, 113, 118, 58, 57, 62, 68, 183, 3, 87, 27, 85, 204, 40, 66, 77, 26, 94, 134, 218, 110, 211, 193, 84, 164, 185],
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:signing_key(SecretKey, AMZDate, Region, Service))
+ end}
+ ].
+
+string_to_sign_test_() ->
+ [
+ {"string value", fun() ->
+ RequestTimestamp = "20150830T123600Z",
+ RequestDate = "20150830",
+ Region = "us-east-1",
+ Service = "iam",
+ RequestHash = "f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59",
+ Expectation = "AWS4-HMAC-SHA256\n20150830T123600Z\n20150830/us-east-1/iam/aws4_request\nf536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59",
+ ?assertEqual(Expectation,
+ rabbitmq_aws_sign:string_to_sign(RequestTimestamp, RequestDate, Region, Service, RequestHash))
+ end}
+ ].
+
+local_time_0_test_() ->
+ {foreach,
+ fun() ->
+ meck:new(calendar, [passthrough, unstick])
+ end,
+ fun(_) ->
+ meck:unload(calendar)
+ end,
+ [
+ {"variation1", fun() ->
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [{{2015, 05, 08}, {12, 36, 00}}] end),
+ Expectation = "20150508T123600Z",
+ ?assertEqual(Expectation, rabbitmq_aws_sign:local_time()),
+ meck:validate(calendar)
+ end}
+ ]}.
+
+local_time_1_test_() ->
+ [
+ {"variation1", fun() ->
+ Value = {{2015, 05, 08}, {13, 15, 20}},
+ Expectation = "20150508T131520Z",
+ ?assertEqual(Expectation, rabbitmq_aws_sign:local_time(Value))
+ end},
+ {"variation2", fun() ->
+ Value = {{2015, 05, 08}, {06, 07, 08}},
+ Expectation = "20150508T060708Z",
+ ?assertEqual(Expectation, rabbitmq_aws_sign:local_time(Value))
+ end}
+ ].
+
+headers_test_() ->
+ {foreach,
+ fun() ->
+ meck:new(calendar, [passthrough, unstick])
+ end,
+ fun(_) ->
+ meck:unload(calendar)
+ end,
+ [
+ {"without signing key", fun() ->
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [{{2015, 08, 30}, {12, 36, 00}}] end),
+ Request = #request{
+ access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ service = "iam",
+ method = get,
+ region = "us-east-1",
+ uri = "https://iam.amazonaws.com/?Action=ListUsers&Version=2015-05-08",
+ body = "",
+ headers = [{"Content-Type", "application/x-www-form-urlencoded; charset=utf-8"}]},
+ Expectation = [
+ {"authorization", "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-length;content-type;date;host;x-amz-content-sha256, Signature=81cb49e1e232a0a5f7f594ad6b2ad2b8b7adbafddb3604d00491fe8f3cc5a442"},
+ {"content-length", "0"},
+ {"content-type", "application/x-www-form-urlencoded; charset=utf-8"},
+ {"date", "20150830T123600Z"},
+ {"host", "iam.amazonaws.com"},
+ {"x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}
+ ],
+ ?assertEqual(Expectation, rabbitmq_aws_sign:headers(Request)),
+ meck:validate(calendar)
+ end},
+ {"with host header", fun() ->
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [{{2015, 08, 30}, {12, 36, 00}}] end),
+ Request = #request{
+ access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ service = "iam",
+ method = get,
+ region = "us-east-1",
+ uri = "https://s3.us-east-1.amazonaws.com/?list-type=2",
+ body = "",
+ headers = [{"host", "gavinroy.com.s3.amazonaws.com"}]},
+ Expectation = [
+ {"authorization", "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-length;date;host;x-amz-content-sha256, Signature=64e549daad14fc1ba9fc4aca6b7df4b2c60e352e3313090d84a2941c1e653d36"},
+ {"content-length","0"},
+ {"date","20150830T123600Z"},
+ {"host","gavinroy.com.s3.amazonaws.com"},
+ {"x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}
+ ],
+ ?assertEqual(Expectation, rabbitmq_aws_sign:headers(Request)),
+ meck:validate(calendar)
+ end}
+ ]
+ }.
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_sup_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_sup_tests.erl
new file mode 100644
index 0000000000..c26af15b38
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_sup_tests.erl
@@ -0,0 +1,27 @@
+-module(rabbitmq_aws_sup_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+start_link_test_() ->
+ {foreach,
+ fun() ->
+ meck:new(supervisor, [passthrough, unstick])
+ end,
+ fun(_) ->
+ meck:unload(supervisor)
+ end,
+ [
+ {"supervisor start_link", fun() ->
+ meck:expect(supervisor, start_link, fun(_, _, _) -> {ok, test_result} end),
+ ?assertEqual({ok, test_result},
+ rabbitmq_aws_sup:start_link()),
+ meck:validate(supervisor)
+ end}
+ ]
+ }.
+
+init_test() ->
+ ?assertEqual({ok, {{one_for_one, 5, 10},
+ [{rabbitmq_aws, {rabbitmq_aws, start_link, []},
+ permanent, 5, worker, [rabbitmq_aws]}]}},
+ rabbitmq_aws_sup:init([])).
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_tests.erl
new file mode 100644
index 0000000000..48c06e2628
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_tests.erl
@@ -0,0 +1,479 @@
+-module(rabbitmq_aws_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-include("include/rabbitmq_aws.hrl").
+
+init_test_() ->
+ {foreach,
+ fun() ->
+ os:putenv("AWS_DEFAULT_REGION", "us-west-3"),
+ meck:new(rabbitmq_aws_config, [passthrough])
+ end,
+ fun(_) ->
+ os:unsetenv("AWS_DEFAULT_REGION"),
+ meck:unload(rabbitmq_aws_config)
+ end,
+ [
+ {"ok", fun() ->
+ os:putenv("AWS_ACCESS_KEY_ID", "Sésame"),
+ os:putenv("AWS_SECRET_ACCESS_KEY", "ouvre-toi"),
+ {ok, Pid} = rabbitmq_aws:start_link(),
+ {ok, State} = gen_server:call(Pid, get_state),
+ ok = gen_server:stop(Pid),
+ os:unsetenv("AWS_ACCESS_KEY_ID"),
+ os:unsetenv("AWS_SECRET_ACCESS_KEY"),
+ Expectation = {state,"Sésame","ouvre-toi",undefined,undefined,"us-west-3", undefined},
+ ?assertEqual(Expectation, State)
+ end},
+ {"error", fun() ->
+ meck:expect(rabbitmq_aws_config, credentials, fun() -> {error, test_result} end),
+ {ok, Pid} = rabbitmq_aws:start_link(),
+ {ok, State} = gen_server:call(Pid, get_state),
+ ok = gen_server:stop(Pid),
+ Expectation = {state,undefined,undefined,undefined,undefined,"us-west-3",test_result},
+ ?assertEqual(Expectation, State),
+ meck:validate(rabbitmq_aws_config)
+ end}
+ ]
+ }.
+
+terminate_test() ->
+ ?assertEqual(ok, rabbitmq_aws:terminate(foo, bar)).
+
+code_change_test() ->
+ ?assertEqual({ok, {state, denial}}, rabbitmq_aws:code_change(foo, bar, {state, denial})).
+
+endpoint_test_() ->
+ [
+ {"specified", fun() ->
+ Region = "us-east-3",
+ Service = "dynamodb",
+ Path = "/",
+ Host = "localhost:32767",
+ Expectation = "https://localhost:32767/",
+ ?assertEqual(Expectation, rabbitmq_aws:endpoint(#state{region = Region}, Host, Service, Path))
+ end},
+ {"unspecified", fun() ->
+ Region = "us-east-3",
+ Service = "dynamodb",
+ Path = "/",
+ Host = undefined,
+ Expectation = "https://dynamodb.us-east-3.amazonaws.com/",
+ ?assertEqual(Expectation, rabbitmq_aws:endpoint(#state{region = Region}, Host, Service, Path))
+ end}
+ ].
+
+endpoint_host_test_() ->
+ [
+ {"dynamodb service", fun() ->
+ Expectation = "dynamodb.us-west-2.amazonaws.com",
+ ?assertEqual(Expectation, rabbitmq_aws:endpoint_host("us-west-2", "dynamodb"))
+ end}
+ ].
+
+cn_endpoint_host_test_() ->
+ [
+ {"s3", fun() ->
+ Expectation = "s3.cn-north-1.amazonaws.com.cn",
+ ?assertEqual(Expectation, rabbitmq_aws:endpoint_host("cn-north-1", "s3"))
+ end},
+ {"s3", fun() ->
+ Expectation = "s3.cn-northwest-1.amazonaws.com.cn",
+ ?assertEqual(Expectation, rabbitmq_aws:endpoint_host("cn-northwest-1", "s3"))
+ end}
+ ].
+
+expired_credentials_test_() ->
+ {
+ foreach,
+ fun () ->
+ meck:new(calendar, [passthrough, unstick]),
+ [calendar]
+ end,
+ fun meck:unload/1,
+ [
+ {"true", fun() ->
+ Value = {{2016, 4, 1}, {12, 0, 0}},
+ Expectation = true,
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [{{2016, 4, 1}, {12, 0, 0}}] end),
+ ?assertEqual(Expectation, rabbitmq_aws:expired_credentials(Value)),
+ meck:validate(calendar)
+ end},
+ {"false", fun() ->
+ Value = {{2016,5, 1}, {16, 30, 0}},
+ Expectation = false,
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [{{2016, 4, 1}, {12, 0, 0}}] end),
+ ?assertEqual(Expectation, rabbitmq_aws:expired_credentials(Value)),
+ meck:validate(calendar)
+ end},
+ {"undefined", fun() ->
+ ?assertEqual(false, rabbitmq_aws:expired_credentials(undefined))
+ end}
+ ]
+ }.
+
+format_response_test_() ->
+ [
+ {"ok", fun() ->
+ Response = {ok, {{"HTTP/1.1", 200, "Ok"}, [{"Content-Type", "text/xml"}], "<test>Value</test>"}},
+ Expectation = {ok, {[{"Content-Type", "text/xml"}], [{"test", "Value"}]}},
+ ?assertEqual(Expectation, rabbitmq_aws:format_response(Response))
+ end},
+ {"error", fun() ->
+ Response = {ok, {{"HTTP/1.1", 500, "Internal Server Error"}, [{"Content-Type", "text/xml"}], "<error>Boom</error>"}},
+ Expectation = {error, "Internal Server Error", {[{"Content-Type", "text/xml"}], [{"error", "Boom"}]}},
+ ?assertEqual(Expectation, rabbitmq_aws:format_response(Response))
+ end}
+ ].
+
+
+gen_server_call_test_() ->
+ {
+ foreach,
+ fun () ->
+ % We explicitely set a few defaults, in case the caller has
+ % something in ~/.aws.
+ os:putenv("AWS_DEFAULT_REGION", "us-west-3"),
+ os:putenv("AWS_ACCESS_KEY_ID", "Sésame"),
+ os:putenv("AWS_SECRET_ACCESS_KEY", "ouvre-toi"),
+ meck:new(httpc, []),
+ [httpc]
+ end,
+ fun (Mods) ->
+ meck:unload(Mods),
+ os:unsetenv("AWS_DEFAULT_REGION"),
+ os:unsetenv("AWS_ACCESS_KEY_ID"),
+ os:unsetenv("AWS_SECRET_ACCESS_KEY")
+ end,
+ [
+ {
+ "request",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-east-1"},
+ Service = "ec2",
+ Method = get,
+ Headers = [],
+ Path = "/?Action=DescribeTags&Version=2015-10-01",
+ Body = "",
+ Options = [],
+ Host = undefined,
+ meck:expect(httpc, request,
+ fun(get, {"https://ec2.us-east-1.amazonaws.com/?Action=DescribeTags&Version=2015-10-01", _Headers}, _Options, []) ->
+ {ok, {{"HTTP/1.0", 200, "OK"}, [{"content-type", "application/json"}], "{\"pass\": true}"}}
+ end),
+ Expectation = {reply, {ok, {[{"content-type", "application/json"}], [{"pass", true}]}}, State},
+ Result = rabbitmq_aws:handle_call({request, Service, Method, Headers, Path, Body, Options, Host}, eunit, State),
+ ?assertEqual(Expectation, Result),
+ meck:validate(httpc)
+ end
+ },
+ {
+ "get_state",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-east-1"},
+ ?assertEqual({reply, {ok, State}, State},
+ rabbitmq_aws:handle_call(get_state, eunit, State))
+ end
+ },
+ {
+ "refresh_credentials",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-east-1"},
+ State2 = #state{access_key = "AKIDEXAMPLE2",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY2",
+ region = "us-east-1",
+ security_token = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L2",
+ expiration = calendar:local_time()},
+ meck:new(rabbitmq_aws_config, [passthrough]),
+ meck:expect(rabbitmq_aws_config, credentials,
+ fun() ->
+ {ok,
+ State2#state.access_key,
+ State2#state.secret_access_key,
+ State2#state.expiration,
+ State2#state.security_token}
+ end),
+ ?assertEqual({reply, ok, State2}, rabbitmq_aws:handle_call(refresh_credentials, eunit, State)),
+ meck:validate(rabbitmq_aws_config),
+ meck:unload(rabbitmq_aws_config)
+ end
+ },
+ {
+ "set_credentials",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-west-3"},
+ ?assertEqual({reply, ok, State},
+ rabbitmq_aws:handle_call({set_credentials,
+ State#state.access_key,
+ State#state.secret_access_key}, eunit, #state{}))
+ end
+ },
+ {
+ "set_region",
+ fun() ->
+ State = #state{access_key = "Sésame",
+ secret_access_key = "ouvre-toi",
+ region = "us-east-5"},
+ ?assertEqual({reply, ok, State},
+ rabbitmq_aws:handle_call({set_region, "us-east-5"}, eunit, #state{}))
+ end
+ }
+ ]
+ }.
+
+get_content_type_test_() ->
+ [
+ {"from headers caps", fun() ->
+ Headers = [{"Content-Type", "text/xml"}],
+ Expectation = {"text", "xml"},
+ ?assertEqual(Expectation, rabbitmq_aws:get_content_type(Headers))
+ end},
+ {"from headers lower", fun() ->
+ Headers = [{"content-type", "text/xml"}],
+ Expectation = {"text", "xml"},
+ ?assertEqual(Expectation, rabbitmq_aws:get_content_type(Headers))
+ end}
+ ].
+
+has_credentials_test_() ->
+ [
+ {"true", fun() ->
+ ?assertEqual(true, rabbitmq_aws:has_credentials(#state{access_key = "TESTVALUE1"}))
+ end},
+ {"false", fun() ->
+ ?assertEqual(false, rabbitmq_aws:has_credentials(#state{error = "ERROR"}))
+ end}
+ ].
+
+
+local_time_test_() ->
+ {
+ foreach,
+ fun () ->
+ meck:new(calendar, [passthrough, unstick]),
+ [calendar]
+ end,
+ fun meck:unload/1,
+ [
+ {"value", fun() ->
+ Value = {{2016, 5, 1}, {12, 0, 0}},
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [Value] end),
+ ?assertEqual(Value, rabbitmq_aws:local_time()),
+ meck:validate(calendar)
+ end}
+ ]
+ }.
+
+
+maybe_decode_body_test_() ->
+ [
+ {"application/x-amz-json-1.0", fun() ->
+ ContentType = {"application", "x-amz-json-1.0"},
+ Body = "{\"test\": true}",
+ Expectation = [{"test", true}],
+ ?assertEqual(Expectation, rabbitmq_aws:maybe_decode_body(ContentType, Body))
+ end},
+ {"application/json", fun() ->
+ ContentType = {"application", "json"},
+ Body = "{\"test\": true}",
+ Expectation = [{"test", true}],
+ ?assertEqual(Expectation, rabbitmq_aws:maybe_decode_body(ContentType, Body))
+ end},
+ {"text/xml", fun() ->
+ ContentType = {"text", "xml"},
+ Body = "<test><node>value</node></test>",
+ Expectation = [{"test", [{"node", "value"}]}],
+ ?assertEqual(Expectation, rabbitmq_aws:maybe_decode_body(ContentType, Body))
+ end},
+ {"text/html [unsupported]", fun() ->
+ ContentType = {"text", "html"},
+ Body = "<html><head></head><body></body></html>",
+ ?assertEqual(Body, rabbitmq_aws:maybe_decode_body(ContentType, Body))
+ end}
+ ].
+
+parse_content_type_test_() ->
+ [
+ {"application/x-amz-json-1.0", fun() ->
+ Expectation = {"application", "x-amz-json-1.0"},
+ ?assertEqual(Expectation, rabbitmq_aws:parse_content_type("application/x-amz-json-1.0"))
+ end},
+ {"application/xml", fun() ->
+ Expectation = {"application", "xml"},
+ ?assertEqual(Expectation, rabbitmq_aws:parse_content_type("application/xml"))
+ end},
+ {"text/xml;charset=UTF-8", fun() ->
+ Expectation = {"text", "xml"},
+ ?assertEqual(Expectation, rabbitmq_aws:parse_content_type("text/xml"))
+ end}
+ ].
+
+
+perform_request_test_() ->
+ {
+ foreach,
+ fun () ->
+ meck:new(httpc, []),
+ meck:new(rabbitmq_aws_config, []),
+ [httpc, rabbitmq_aws_config]
+ end,
+ fun meck:unload/1,
+ [
+ {
+ "has_credentials true",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-east-1"},
+ Service = "ec2",
+ Method = get,
+ Headers = [],
+ Path = "/?Action=DescribeTags&Version=2015-10-01",
+ Body = "",
+ Options = [],
+ Host = undefined,
+ ExpectURI = "https://ec2.us-east-1.amazonaws.com/?Action=DescribeTags&Version=2015-10-01",
+ meck:expect(httpc, request,
+ fun(get, {URI, _Headers}, _Options, []) ->
+ case URI of
+ ExpectURI ->
+ {ok, {{"HTTP/1.0", 200, "OK"}, [{"content-type", "application/json"}], "{\"pass\": true}"}};
+ _ ->
+ {ok, {{"HTTP/1.0", 400, "RequestFailure", [{"content-type", "application/json"}], "{\"pass\": false}"}}}
+ end
+ end),
+ Expectation = {{ok, {[{"content-type", "application/json"}], [{"pass", true}]}}, State},
+ Result = rabbitmq_aws:perform_request(State, Service, Method, Headers, Path, Body, Options, Host),
+ ?assertEqual(Expectation, Result),
+ meck:validate(httpc)
+ end},
+ {
+ "has_credentials false",
+ fun() ->
+ State = #state{region = "us-east-1"},
+ Service = "ec2",
+ Method = get,
+ Headers = [],
+ Path = "/?Action=DescribeTags&Version=2015-10-01",
+ Body = "",
+ Options = [],
+ Host = undefined,
+ meck:expect(httpc, request, fun(get, {_URI, _Headers}, _Options, []) -> {ok, {{"HTTP/1.0", 400, "RequestFailure"}, [{"content-type", "application/json"}], "{\"pass\": false}"}} end),
+ Expectation = {{error, {credentials, State#state.error}}, State},
+ Result = rabbitmq_aws:perform_request(State, Service, Method, Headers, Path, Body, Options, Host),
+ ?assertEqual(Expectation, Result),
+ meck:validate(httpc)
+ end
+ },
+ {
+ "has expired credentials",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-east-1",
+ security_token = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L",
+ expiration = {{1973, 1, 1}, {10, 20, 30}}},
+ Service = "ec2",
+ Method = get,
+ Headers = [],
+ Path = "/?Action=DescribeTags&Version=2015-10-01",
+ Body = "",
+ Options = [],
+ Host = undefined,
+ meck:expect(rabbitmq_aws_config, credentials, fun() -> {error, unit_test} end),
+ Expectation = {{error, {credentials, unit_test}}, #state{region = State#state.region, error = unit_test}},
+ Result = rabbitmq_aws:perform_request(State, Service, Method, Headers, Path, Body, Options, Host),
+ ?assertEqual(Expectation, Result),
+ meck:validate(rabbitmq_aws_config)
+ end
+ },
+ {
+ "refresh expired creds",
+ fun() ->
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ region = "us-east-1",
+ security_token = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L",
+ expiration = {{1973, 1, 1}, {10, 20, 30}}},
+ Service = "ec2",
+ Method = post,
+ Headers = [{"Content-Type", "text/xml"}],
+ Path = "/?Action=DescribeTags&Version=2015-10-01",
+ Body = "<value>true</value>",
+ Options = [],
+ Host = undefined,
+ meck:expect(httpc, request,
+ fun(post, {_URI, _Headers, "text/xml", "<value>true</value>"}, _Options, []) ->
+ {ok, {{"HTTP/1.0", 200, "OK"}, [{"content-type", "application/json"}], "{\"pass\": true}"}}
+ end),
+
+ State2 = #state{access_key = "AKIDEXAMPLE2",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY2",
+ region = "us-east-1",
+ security_token = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L2",
+ expiration = calendar:local_time()},
+ meck:expect(rabbitmq_aws_config, credentials,
+ fun() ->
+ {ok,
+ State2#state.access_key,
+ State2#state.secret_access_key,
+ State2#state.expiration,
+ State2#state.security_token}
+ end),
+
+ Expectation = {{ok, {[{"content-type", "application/json"}], [{"pass", true}]}}, State2},
+ Result = rabbitmq_aws:perform_request(State, Service, Method, Headers, Path, Body, Options, Host),
+ ?assertEqual(Expectation, Result),
+ meck:validate(httpc),
+ meck:validate(rabbitmq_aws_config)
+ end},
+ {
+ "creds_error",
+ fun() ->
+ State = #state{error=unit_test},
+ Expectation = {{error, {credentials, State#state.error}}, State},
+ ?assertEqual(Expectation, rabbitmq_aws:perform_request_creds_error(State))
+ end}
+ ]
+ }.
+
+sign_headers_test_() ->
+ {
+ foreach,
+ fun () ->
+ meck:new(calendar, [passthrough, unstick]),
+ [calendar]
+ end,
+ fun meck:unload/1,
+ [
+ {"with security token", fun() ->
+ Value = {{2016, 5, 1}, {12, 0, 0}},
+ meck:expect(calendar, local_time_to_universal_time_dst, fun(_) -> [Value] end),
+ State = #state{access_key = "AKIDEXAMPLE",
+ secret_access_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ security_token = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L",
+ region = "us-east-1"},
+ Service = "ec2",
+ Method = get,
+ Headers = [],
+ Body = "",
+ URI = "http://ec2.us-east-1.amazonaws.com/?Action=DescribeTags&Version=2015-10-01",
+ Expectation = [{"authorization", "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20160501/us-east-1/ec2/aws4_request, SignedHeaders=content-length;date;host;x-amz-content-sha256;x-amz-security-token, Signature=62d10b4897f7d05e4454b75895b5e372f6c2eb6997943cd913680822e94c6999"},
+ {"content-length","0"},
+ {"date","20160501T120000Z"}, {"host","ec2.us-east-1.amazonaws.com"},
+ {"x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
+ {"x-amz-security-token", "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L"}],
+ ?assertEqual(Expectation, rabbitmq_aws:sign_headers(State, Service, Method, URI, Headers, Body)),
+ meck:validate(calendar)
+ end}
+ ]
+ }.
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_urilib_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_urilib_tests.erl
new file mode 100644
index 0000000000..c89e4554be
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_urilib_tests.erl
@@ -0,0 +1,154 @@
+-module(rabbitmq_aws_urilib_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbitmq_aws.hrl").
+
+build_test_() ->
+ [
+ {"variation1", fun() ->
+ Expect = "amqp://guest:password@rabbitmq:5672/%2F?heartbeat=5",
+ Value = #uri{scheme = "amqp",
+ authority = {{"guest", "password"}, "rabbitmq", 5672},
+ path = "/%2F", query = [{"heartbeat", "5"}]},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation2", fun() ->
+ Expect = "http://www.google.com:80/search?foo=bar#baz",
+ Value = #uri{scheme = http,
+ authority = {undefined, "www.google.com", 80},
+ path = "/search",
+ query = [{"foo", "bar"}],
+ fragment = "baz"},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation3", fun() ->
+ Expect = "https://www.google.com/search",
+ Value = #uri{scheme = "https",
+ authority = {undefined, "www.google.com", undefined},
+ path = "/search"},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation5", fun() ->
+ Expect = "https://www.google.com:443/search?foo=true",
+ Value = #uri{scheme = "https",
+ authority = {undefined, "www.google.com", 443},
+ path = "/search",
+ query = [{"foo", true}]},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation6", fun() ->
+ Expect = "https://bar@www.google.com:443/search?foo=true",
+ Value = #uri{scheme = "https",
+ authority = {{"bar", undefined}, "www.google.com", 443},
+ path = "/search",
+ query = [{"foo", true}]},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation7", fun() ->
+ Expect = "https://www.google.com:443/search?foo=true",
+ Value = #uri{scheme = "https",
+ authority = {undefined, "www.google.com", 443},
+ path = "/search",
+ query = [{"foo", true}]},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation8", fun() ->
+ Expect = "https://:@www.google.com:443/search?foo=true",
+ Value = #uri{scheme = "https",
+ authority = {{"", ""}, "www.google.com", 443},
+ path = "/search",
+ query = [{"foo", true}]},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation9", fun() ->
+ Expect = "https://bar:@www.google.com:443/search?foo=true#",
+ Value = #uri{scheme = "https",
+ authority={{"bar", ""}, "www.google.com", 443},
+ path="/search",
+ query=[{"foo", true}],
+ fragment=""},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation10", fun() ->
+ Expect = "http://www.google.com/search?foo=true#bar",
+ Value = #uri{scheme = "http",
+ authority = {undefined, "www.google.com", undefined},
+ path = "/search",
+ query = [{"foo", true}],
+ fragment = "bar"},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end},
+ {"variation11", fun() ->
+ Expect = "http://www.google.com",
+ Value = #uri{scheme = "http",
+ authority = {undefined, "www.google.com", undefined},
+ path = undefined,
+ query = []},
+ Result = rabbitmq_aws_urilib:build(Value),
+ ?assertEqual(Expect, Result)
+ end}
+ ].
+
+
+build_query_string_test_() ->
+ [
+ {"basic list", fun() ->
+ ?assertEqual("foo=bar&baz=qux",
+ rabbitmq_aws_urilib:build_query_string([{"foo", "bar"},
+ {"baz", "qux"}]))
+ end},
+ {"empty list", fun() ->
+ ?assertEqual("", rabbitmq_aws_urilib:build_query_string([]))
+ end}
+ ].
+
+
+parse_test_() ->
+ [
+ {"variation1", fun() ->
+ URI = "amqp://guest:password@rabbitmq:5672/%2F?heartbeat=5",
+ Expect = #uri{scheme = "amqp",
+ authority = {{"guest", "password"}, "rabbitmq", 5672},
+ path = "/%2F",
+ query = [{"heartbeat", "5"}],
+ fragment = undefined},
+ ?assertEqual(Expect, rabbitmq_aws_urilib:parse(URI))
+ end},
+ {"variation2", fun() ->
+ URI = "http://www.google.com/search?foo=bar#baz",
+ Expect = #uri{scheme = "http",
+ authority = {undefined, "www.google.com", 80},
+ path = "/search",
+ query = [{"foo", "bar"}],
+ fragment = "baz"},
+ ?assertEqual(Expect, rabbitmq_aws_urilib:parse(URI))
+ end},
+ {"variation3", fun() ->
+ URI = "https://www.google.com/search",
+ Expect = #uri{scheme = "https",
+ authority = {undefined, "www.google.com", 443},
+ path = "/search",
+ query = "",
+ fragment = undefined},
+ ?assertEqual(Expect, rabbitmq_aws_urilib:parse(URI))
+ end},
+ {"variation4", fun() ->
+ URI = "https://www.google.com/search?foo=true",
+ Expect = #uri{scheme = "https",
+ authority = {undefined, "www.google.com", 443},
+ path = "/search",
+ query = [{"foo", "true"}],
+ fragment = undefined},
+ ?assertEqual(Expect, rabbitmq_aws_urilib:parse(URI))
+ end}
+ ].
diff --git a/deps/rabbitmq_aws/test/src/rabbitmq_aws_xml_tests.erl b/deps/rabbitmq_aws/test/src/rabbitmq_aws_xml_tests.erl
new file mode 100644
index 0000000000..41b9d4ceb0
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/rabbitmq_aws_xml_tests.erl
@@ -0,0 +1,40 @@
+-module(rabbitmq_aws_xml_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbitmq_aws.hrl").
+
+parse_test_() ->
+ [
+ {"s3 error response", fun() ->
+ Response = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error><Code>SignatureDoesNotMatch</Code><Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message><AWSAccessKeyId>AKIAIPPU25E5RA4MIYKQ</AWSAccessKeyId><StringToSign>AWS4-HMAC-SHA256\n20160516T041429Z\n20160516/us-east-1/s3/aws4_request\n7e908e36ea6c07e542ffac21ec3e11acc3baf022d9133d9764e1521b152586f7</StringToSign><SignatureProvided>841d7b89150d246feee9bceb90f5cae91d0c45f44851742c73eb87dc8472748e</SignatureProvided><StringToSignBytes>41 57 53 34 2d 48 4d 41 43 2d 53 48 41 32 35 36 0a 32 30 31 36 30 35 31 36 54 30 34 31 34 32 39 5a 0a 32 30 31 36 30 35 31 36 2f 75 73 2d 65 61 73 74 2d 31 2f 73 33 2f 61 77 73 34 5f 72 65 71 75 65 73 74 0a 37 65 39 30 38 65 33 36 65 61 36 63 30 37 65 35 34 32 66 66 61 63 32 31 65 63 33 65 31 31 61 63 63 33 62 61 66 30 32 32 64 39 31 33 33 64 39 37 36 34 65 31 35 32 31 62 31 35 32 35 38 36 66 37</StringToSignBytes><CanonicalRequest>GET\n/\nlist-type=2\ncontent-length:0\ndate:20160516T041429Z\nhost:s3.us-east-1.amazonaws.com\nx-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n\ncontent-length;date;host;x-amz-content-sha256\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855</CanonicalRequest><CanonicalRequestBytes>47 45 54 0a 2f 0a 6c 69 73 74 2d 74 79 70 65 3d 32 0a 63 6f 6e 74 65 6e 74 2d 6c 65 6e 67 74 68 3a 30 0a 64 61 74 65 3a 32 30 31 36 30 35 31 36 54 30 34 31 34 32 39 5a 0a 68 6f 73 74 3a 73 33 2e 75 73 2d 65 61 73 74 2d 31 2e 61 6d 61 7a 6f 6e 61 77 73 2e 63 6f 6d 0a 78 2d 61 6d 7a 2d 63 6f 6e 74 65 6e 74 2d 73 68 61 32 35 36 3a 65 33 62 30 63 34 34 32 39 38 66 63 31 63 31 34 39 61 66 62 66 34 63 38 39 39 36 66 62 39 32 34 32 37 61 65 34 31 65 34 36 34 39 62 39 33 34 63 61 34 39 35 39 39 31 62 37 38 35 32 62 38 35 35 0a 0a 63 6f 6e 74 65 6e 74 2d 6c 65 6e 67 74 68 3b 64 61 74 65 3b 68 6f 73 74 3b 78 2d 61 6d 7a 2d 63 6f 6e 74 65 6e 74 2d 73 68 61 32 35 36 0a 65 33 62 30 63 34 34 32 39 38 66 63 31 63 31 34 39 61 66 62 66 34 63 38 39 39 36 66 62 39 32 34 32 37 61 65 34 31 65 34 36 34 39 62 39 33 34 63 61 34 39 35 39 39 31 62 37 38 35 32 62 38 35 35</CanonicalRequestBytes><RequestId>8EB36F450B78C45D</RequestId><HostId>IYXsnJ59yqGI/IzjGoPGUz7NGb/t0ETlWH4v5+l8EGWmHLbhB1b2MsjbSaY5A8M3g7Fn/Nliqpw=</HostId></Error>",
+ Expectation = [{"Error", [
+ {"Code", "SignatureDoesNotMatch"},
+ {"Message", "The request signature we calculated does not match the signature you provided. Check your key and signing method."},
+ {"AWSAccessKeyId", "AKIAIPPU25E5RA4MIYKQ"},
+ {"StringToSign", "AWS4-HMAC-SHA256\n20160516T041429Z\n20160516/us-east-1/s3/aws4_request\n7e908e36ea6c07e542ffac21ec3e11acc3baf022d9133d9764e1521b152586f7"},
+ {"SignatureProvided", "841d7b89150d246feee9bceb90f5cae91d0c45f44851742c73eb87dc8472748e"},
+ {"StringToSignBytes", "41 57 53 34 2d 48 4d 41 43 2d 53 48 41 32 35 36 0a 32 30 31 36 30 35 31 36 54 30 34 31 34 32 39 5a 0a 32 30 31 36 30 35 31 36 2f 75 73 2d 65 61 73 74 2d 31 2f 73 33 2f 61 77 73 34 5f 72 65 71 75 65 73 74 0a 37 65 39 30 38 65 33 36 65 61 36 63 30 37 65 35 34 32 66 66 61 63 32 31 65 63 33 65 31 31 61 63 63 33 62 61 66 30 32 32 64 39 31 33 33 64 39 37 36 34 65 31 35 32 31 62 31 35 32 35 38 36 66 37"},
+ {"CanonicalRequest", "GET\n/\nlist-type=2\ncontent-length:0\ndate:20160516T041429Z\nhost:s3.us-east-1.amazonaws.com\nx-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n\ncontent-length;date;host;x-amz-content-sha256\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
+ {"CanonicalRequestBytes", "47 45 54 0a 2f 0a 6c 69 73 74 2d 74 79 70 65 3d 32 0a 63 6f 6e 74 65 6e 74 2d 6c 65 6e 67 74 68 3a 30 0a 64 61 74 65 3a 32 30 31 36 30 35 31 36 54 30 34 31 34 32 39 5a 0a 68 6f 73 74 3a 73 33 2e 75 73 2d 65 61 73 74 2d 31 2e 61 6d 61 7a 6f 6e 61 77 73 2e 63 6f 6d 0a 78 2d 61 6d 7a 2d 63 6f 6e 74 65 6e 74 2d 73 68 61 32 35 36 3a 65 33 62 30 63 34 34 32 39 38 66 63 31 63 31 34 39 61 66 62 66 34 63 38 39 39 36 66 62 39 32 34 32 37 61 65 34 31 65 34 36 34 39 62 39 33 34 63 61 34 39 35 39 39 31 62 37 38 35 32 62 38 35 35 0a 0a 63 6f 6e 74 65 6e 74 2d 6c 65 6e 67 74 68 3b 64 61 74 65 3b 68 6f 73 74 3b 78 2d 61 6d 7a 2d 63 6f 6e 74 65 6e 74 2d 73 68 61 32 35 36 0a 65 33 62 30 63 34 34 32 39 38 66 63 31 63 31 34 39 61 66 62 66 34 63 38 39 39 36 66 62 39 32 34 32 37 61 65 34 31 65 34 36 34 39 62 39 33 34 63 61 34 39 35 39 39 31 62 37 38 35 32 62 38 35 35"},
+ {"RequestId","8EB36F450B78C45D"},
+ {"HostId", "IYXsnJ59yqGI/IzjGoPGUz7NGb/t0ETlWH4v5+l8EGWmHLbhB1b2MsjbSaY5A8M3g7Fn/Nliqpw="}
+ ]}],
+ ?assertEqual(Expectation, rabbitmq_aws_xml:parse(Response))
+ end},
+ {"whitespace", fun() ->
+ Response = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<test> <example> value</example>\n</test> \n",
+ Expectation = [{"test", [{"example", "value"}]}],
+ ?assertEqual(Expectation, rabbitmq_aws_xml:parse(Response))
+ end},
+ {"multiple items", fun() ->
+ Response = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<test><values><example>value</example><example>value2</example></values>\n</test> \n",
+ Expectation = [{"test", [{"values", [{"example", "value"}, {"example", "value2"}]}]}],
+ ?assertEqual(Expectation, rabbitmq_aws_xml:parse(Response))
+ end},
+ {"small snippert", fun() ->
+ Response = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<test>value</test>",
+ Expectation = [{"test", "value"}],
+ ?assertEqual(Expectation, rabbitmq_aws_xml:parse(Response))
+ end}
+ ].
diff --git a/deps/rabbitmq_aws/test/src/test_aws_config.ini b/deps/rabbitmq_aws/test/src/test_aws_config.ini
new file mode 100644
index 0000000000..370287a800
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/test_aws_config.ini
@@ -0,0 +1,25 @@
+[default]
+aws_access_key_id=default-key
+aws_secret_access_key=default-access-key
+region=us-east-4
+
+[profile testing]
+aws_access_key_id = foo1
+aws_secret_access_key = bar2
+s3 =
+ max_concurrent_requests=10
+ max_queue_size=1000
+region = us-west-5
+
+[profile no-region]
+aws_access_key_id = foo2
+aws_secret_access_key = bar3
+
+[profile only-key]
+aws_access_key_id = foo3
+
+[profile only-secret]
+aws_secret_access_key = foo4
+
+[profile bad-entry]
+aws_secret_access = foo5
diff --git a/deps/rabbitmq_aws/test/src/test_aws_credentials.ini b/deps/rabbitmq_aws/test/src/test_aws_credentials.ini
new file mode 100644
index 0000000000..03067b2c32
--- /dev/null
+++ b/deps/rabbitmq_aws/test/src/test_aws_credentials.ini
@@ -0,0 +1,16 @@
+[default]
+aws_access_key_id=foo1
+aws_secret_access_key=bar1
+
+[development]
+aws_access_key_id=foo2
+aws_secret_access_key=bar2
+
+[only-key]
+aws_access_key_id = foo3
+
+[only-secret]
+aws_secret_access_key = foo4
+
+[bad-entry]
+aws_secret_access = foo5
diff --git a/deps/rabbitmq_cli/.gitignore b/deps/rabbitmq_cli/.gitignore
new file mode 100644
index 0000000000..0ade5483bf
--- /dev/null
+++ b/deps/rabbitmq_cli/.gitignore
@@ -0,0 +1,12 @@
+/_build
+/cover
+/deps
+/escript
+/log
+/.erlang.mk/
+/ebin
+erl_crash.dump
+mix.lock
+*.ez
+.sw?
+.*.sw?
diff --git a/deps/rabbitmq_cli/.travis.yml b/deps/rabbitmq_cli/.travis.yml
new file mode 100644
index 0000000000..3937ec84c2
--- /dev/null
+++ b/deps/rabbitmq_cli/.travis.yml
@@ -0,0 +1,57 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+jobs:
+ include:
+ - elixir: '1.10'
+ otp_release: '22.3'
+ - elixir: '1.10'
+ otp_release: '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ - mix local.rebar --force
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make
+ DEPS_DIR=$PWD/..
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - |
+ git clone \
+ --branch "$base_rmq_ref" \
+ --depth 1 \
+ https://github.com/rabbitmq/rabbitmq-server-release.git \
+ ../rabbitmq_server_release
+ make start-background-broker -C ../rabbitmq_server_release \
+ DEPS_DIR=$PWD/.. \
+ PLUGINS='rabbitmq_federation rabbitmq_stomp' \
+ PROJECT_VERSION=3.9.0 \
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ DEPS_DIR=$PWD/..
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
diff --git a/deps/rabbitmq_cli/.travis.yml.patch b/deps/rabbitmq_cli/.travis.yml.patch
new file mode 100644
index 0000000000..3b85c106c3
--- /dev/null
+++ b/deps/rabbitmq_cli/.travis.yml.patch
@@ -0,0 +1,62 @@
+--- .travis.yml.orig 2020-10-12 17:29:44.096296000 +0200
++++ .travis.yml 2020-10-12 17:26:40.450974000 +0200
+@@ -22,38 +22,36 @@
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+-elixir:
+- - '1.10'
+-otp_release:
+- - '22.3'
+- - '23.1'
++jobs:
++ include:
++ - elixir: '1.10'
++ otp_release: '22.3'
++ - elixir: '1.10'
++ otp_release: '23.1'
+
+ install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+- skip
++ - mix local.rebar --force
+
+ script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+- - make check-rabbitmq-components.mk
++ - make
++ DEPS_DIR=$PWD/..
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+- - make xref
+- current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
++ - |
++ git clone \
++ --branch "$base_rmq_ref" \
++ --depth 1 \
++ https://github.com/rabbitmq/rabbitmq-server-release.git \
++ ../rabbitmq_server_release
++ make start-background-broker -C ../rabbitmq_server_release \
++ DEPS_DIR=$PWD/.. \
++ PLUGINS='rabbitmq_federation rabbitmq_stomp' \
++ PROJECT_VERSION=3.9.0 \
++ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
++ DEPS_DIR=$PWD/..
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+-
+-after_failure:
+- - |
+- cd "$TRAVIS_BUILD_DIR"
+- if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+- archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+-
+- tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+- xz > "${archive_name}.tar.xz"
+-
+- aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+- --region eu-west-1 \
+- --acl public-read
+- fi
diff --git a/deps/rabbitmq_cli/CODE_OF_CONDUCT.md b/deps/rabbitmq_cli/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_cli/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_cli/COMMAND_TUTORIAL.md b/deps/rabbitmq_cli/COMMAND_TUTORIAL.md
new file mode 100644
index 0000000000..8ead46afa7
--- /dev/null
+++ b/deps/rabbitmq_cli/COMMAND_TUTORIAL.md
@@ -0,0 +1,459 @@
+# Implementing Your Own rabbitmqctl Command
+
+## Introduction
+
+As of `3.7.0`, RabbitMQ [CLI
+tools](https://github.com/rabbitmq/rabbitmq-cli) (e.g. `rabbitmqctl`)
+allow plugin developers to extend them their own commands.
+
+The CLI is written in the [Elixir programming
+language](https://elixir-lang.org/) and commands can be implemented in
+Elixir, Erlang or any other Erlang-based language. This tutorial will
+use Elixir but also provides an Erlang example. The fundamentals are
+the same.
+
+This tutorial doesn't cover RabbitMQ plugin development process.
+To develop a new plugin you should check existing tutorials:
+
+ * [RabbitMQ Plugin Development](https://www.rabbitmq.com/plugin-development.html) (in Erlang)
+ * [Using Elixir to Write RabbitMQ Plugins](https://www.rabbitmq.com/blog/2013/06/03/using-elixir-to-write-rabbitmq-plugins/)
+
+
+## Anatomy of a RabbitMQ CLI Command
+
+A RabbitMQ CLI command is an Elixir/Erlang module that implements a
+particular [behavior](https://elixir-lang.org/getting-started/typespecs-and-behaviours.html).
+It should fulfill certain requirements in order to be discovered and load by CLI tools:
+
+ * Follow a naming convention (module name should match `RabbitMQ.CLI.(.*).Commands.(.*)Command`)
+ * Be included in a plugin application's module list (`modules` in the `.app` file)
+ * Implement `RabbitMQ.CLI.CommandBehaviour`
+
+## Implementing `RabbitMQ.CLI.CommandBehaviour` in Erlang
+
+When implementing a command in Erlang, you should add `Elixir` as a prefix to
+the module name and behaviour, because CLI is written in Elixir.
+It should match `Elixir.RabbitMQ.CLI.(.*).Commands.(.*)Command`
+And implement `Elixir.RabbitMQ.CLI.CommandBehaviour`
+
+
+## The Actual Tutorial
+
+Let's write a command, that does something simple, e.g. deleting a queue.
+We will use Elixir for that.
+
+First we need to declare a module with a behaviour, for example:
+
+```
+defmodule RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+end
+```
+
+So far so good. But if we try to compile it, we'd see compilation errors:
+
+```
+warning: undefined behaviour function usage/0 (for behaviour RabbitMQ.CLI.CommandBehaviour)
+ lib/delete_queue_command.ex:1
+
+warning: undefined behaviour function banner/2 (for behaviour RabbitMQ.CLI.CommandBehaviour)
+ lib/delete_queue_command.ex:1
+
+warning: undefined behaviour function merge_defaults/2 (for behaviour RabbitMQ.CLI.CommandBehaviour)
+ lib/delete_queue_command.ex:1
+
+warning: undefined behaviour function validate/2 (for behaviour RabbitMQ.CLI.CommandBehaviour)
+ lib/delete_queue_command.ex:1
+
+warning: undefined behaviour function run/2 (for behaviour RabbitMQ.CLI.CommandBehaviour)
+ lib/delete_queue_command.ex:1
+
+warning: undefined behaviour function output/2 (for behaviour RabbitMQ.CLI.CommandBehaviour)
+ lib/delete_queue_command.ex:1
+```
+
+So some functions are missing. Let's implement them.
+
+
+### Usage: Help Section
+
+We'll start with
+the `usage/0` function, to provide command name in the help section:
+
+```
+ def usage(), do: "delete_queue queue_name [--if-empty|-e] [--if-unused|-u] [--vhost|-p vhost]"
+```
+
+### CLI Argument Parsing: Switches, Positional Arguments, Aliases
+
+We want our command to accept a `queue_name` positional argument,
+and two named arguments (flags): `if_empty` and `if_unused`,
+and a `vhost` argument with a value.
+
+We also want to specify shortcuts to our named arguments so that the user can use
+`-e` instead of `--if-empty`.
+
+We'll next implement the `switches/0` and `aliases/0` functions to let CLI know how it
+should parse command line arguments for this command:
+
+```
+ def switches(), do: [if_empty: :boolean, if_unused: :boolean]
+ def aliases(), do: [e: :if_empty, u: :is_unused]
+```
+
+Switches specify long arguments names and types, aliases specify shorter names.
+
+You might have noticed there is no `vhost` switch there. It's because `vhost` is a global
+switch and will be available to all commands in the CLI: after all, many things
+in RabbitMQ are scoped per vhost.
+
+Both `switches/0` and `aliases/0` callbacks are optional.
+If your command doesn't have shorter argument names, you can omit `aliases/0`.
+If the command doesn't have any named arguments at all, you can omit both functions.
+
+We've described how the CLI should parse commands, now let's start describing what
+the command should do.
+
+### Command Banner
+
+We start with the `banner/2` function, that tells a user what the command is going to do.
+If you call the command with with `--dry-run` argument, it would only print the banner,
+without executing the actual command:
+
+```
+ def banner([qname], %{vhost: vhost,
+ if_empty: if_empty,
+ if_unused: if_unused}) do
+ if_empty_str = case if_empty do
+ true -> "if queue is empty"
+ false -> ""
+ end
+ if_unused_str = case if_unused do
+ true -> "if queue is unused"
+ false -> ""
+ end
+ "Deleting queue #{qname} on vhost #{vhost} " <>
+ Enum.join([if_empty_str, if_unused_str], " and ")
+ end
+
+```
+
+The function above can access arguments and command flags (named arguments)
+to decide what exactly it should do.
+
+### Default Argument Values and Argument Validation
+
+As you can see, the `banner/2` function accepts exactly one argument and expects
+the `vhost`, `if_empty` and `if_unused` options.
+To make sure the command have all the correct arguments, you can use
+the `merge_defaults/2` and `validate/2` functions:
+
+```
+ def merge_defaults(args, options) do
+ {
+ args,
+ Map.merge(%{if_empty: false, if_unused: false, vhost: "/"}, options)
+ }
+ end
+
+ def validate([], _options) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate([_,_|_], _options) do
+ {:validation_failure, :too_many_args}
+ end
+ def validate([""], _options) do
+ {
+ :validation_failure,
+ {:bad_argument, "queue name cannot be empty string."}
+ }
+ end
+ def validate([_], _options) do
+ :ok
+ end
+```
+
+The `merge_defaults/2` function accepts positional and options and returns a tuple
+with effective arguments and options that will be passed on to `validate/2`,
+`banner/2` and `run/2`.
+
+The `validate/2` function can return either `:ok` (just the atom) or a
+tuple in the form of `{:validation_failure, error}`. The function above checks
+that we have exactly one position argument and that it is not empty.
+
+While this is not enforced, for a command to be practical
+at least one `validate/2` head must return `:ok`.
+
+
+### Command Execution
+
+`validate/2` is useful for command line argument validation but there can be
+other things that require validation before a command can be executed. For example,
+a command may require a RabbitMQ node to be running (or stopped), a file to exist
+and be readable, an environment variable to be exported and so on.
+
+There's another validation function, `validate_execution_environment/2`, for
+such cases. That function accepts the same arguments and must return either `:ok`
+or `{:validation_failure, error}`. What's the difference, you may ask?
+`validate_execution_environment/2` is optional.
+
+To perform the actual command operation, the `run/2` command needs to be defined:
+
+```
+ def run([qname], %{node: node, vhost: vhost,
+ if_empty: if_empty, if_unused: if_unused}) do
+ ## Generate the queue resource name from queue name and vhost
+ queue_resource = :rabbit_misc.r(vhost, :queue, qname)
+ ## Lookup the queue on broker node using resource name
+ case :rabbit_misc.rpc_call(node, :rabbit_amqqueue, :lookup,
+ [queue_resource]) do
+ {:ok, queue} ->
+ ## Delete the queue
+ :rabbit_misc.rpc_call(node, :rabbit_amqqueue, :delete,
+ [queue, if_empty, if_unused]);
+ {:error, _} = error -> error
+ end
+ end
+```
+
+In the example above we delegate to a `:rabbit_misc` function in `run/2`. You can use any functions
+from [rabbit_common](https://github.com/rabbitmq/rabbitmq-common) directly but to
+do something on a broker (remote) node, you need to use RPC calls.
+It can be the standard Erlang `rpc:call` set of functions or `rabbit_misc:rpc_call/4`.
+The latter is used by all standard commands and is generally recommended.
+
+Target RabbitMQ node name is passed in as the `node` option, which is
+a global option and is available to all commands.
+
+
+### Command Output
+
+Finally we would like to present the user with a command execution result.
+To do that, we'll define `output/2` to format the `run/2` return value:
+
+```
+ def output({:error, :not_found}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Queue not found"}
+ end
+ def output({:error, :not_empty}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Queue is not empty"}
+ end
+ def output({:error, :in_use}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Queue is in use"}
+ end
+ def output({:ok, queue_length}, _options) do
+ {:ok, "Queue was successfully deleted with #{queue_length} messages"}
+ end
+ ## Use default output for all other cases
+ use RabbitMQ.CLI.DefaultOutput
+```
+
+We have function clauses for every possible output of `rabbit_amqqueue:delete/3` used
+in the `run/2` function.
+
+For a run to be successful, the `output/2` function should return a pair of `{:ok, result}`,
+and to indicate an error it should return a `{:error, exit_code, message}` tuple.
+`exit_code` must be an integer and `message` is a string or a list of strings.
+
+CLI program will exit with an `exit_code` in case of an error, or `0` in case of a success.
+
+`RabbitMQ.CLI.DefaultOutput` is a module which can handle common error cases
+(e.g. `badrpc` when the target RabbitMQ node cannot be contacted or authenticated with using the Erlang cookie).
+
+In the example above, we use Elixir's `use` statement to import
+function clauses for `output/2` from the `DefaultOutput` module. For
+some commands such delegation will be sufficient.
+
+### Testing the Command
+
+That's it. Now you can add this command to your plugin, compile it, enable the plugin and run
+
+`rabbitmqctl delete_queue my_queue --vhost my_vhost`
+
+to delete a queue.
+
+
+## Full Module Example in Elixir
+
+Full module definition in Elixir:
+
+```
+defmodule RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [if_empty: :boolean, if_unused: :boolean]
+ def aliases(), do: [e: :if_empty, u: :is_unused]
+
+ def usage(), do: "delete_queue queue_name [--if_empty|-e] [--if_unused|-u]"
+
+ def banner([qname], %{vhost: vhost,
+ if_empty: if_empty,
+ if_unused: if_unused}) do
+ if_empty_str = case if_empty do
+ true -> "if queue is empty"
+ false -> ""
+ end
+ if_unused_str = case if_unused do
+ true -> "if queue is unused"
+ false -> ""
+ end
+ "Deleting queue #{qname} on vhost #{vhost} " <>
+ Enum.join([if_empty_str, if_unused_str], " and ")
+ end
+
+ def merge_defaults(args, options) do
+ {
+ args,
+ Map.merge(%{if_empty: false, if_unused: false, vhost: "/"}, options)
+ }
+ end
+
+ def validate([], _options) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate([_,_|_], _options) do
+ {:validation_failure, :too_many_args}
+ end
+ def validate([""], _options) do
+ {
+ :validation_failure,
+ {:bad_argument, "queue name cannot be empty string."}
+ }
+ end
+ def validate([_], _options) do
+ :ok
+ end
+
+ def run([qname], %{node: node, vhost: vhost,
+ if_empty: if_empty, if_unused: if_unused}) do
+ ## Generate queue resource name from queue name and vhost
+ queue_resource = :rabbit_misc.r(vhost, :queue, qname)
+ ## Lookup a queue on broker node using resource name
+ case :rabbit_misc.rpc_call(node, :rabbit_amqqueue, :lookup,
+ [queue_resource]) do
+ {:ok, queue} ->
+ ## Delete queue
+ :rabbit_misc.rpc_call(node, :rabbit_amqqueue, :delete,
+ [queue, if_unused, if_empty, "cli_user"]);
+ {:error, _} = error -> error
+ end
+ end
+
+ def output({:error, :not_found}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Queue not found"}
+ end
+ def output({:error, :not_empty}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Queue is not empty"}
+ end
+ def output({:error, :in_use}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Queue is in use"}
+ end
+ def output({:ok, qlen}, _options) do
+ {:ok, "Queue was successfully deleted with #{qlen} messages"}
+ end
+ ## Use default output for all non-special case outputs
+ use RabbitMQ.CLI.DefaultOutput
+end
+```
+
+## Full Module Example in Erlang
+
+The same module implemented in Erlang. Note the fairly
+unusual Elixir module and behaviour names: since they contain
+dots, they must be escaped with single quotes to be valid Erlang atoms:
+
+```
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand').
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([switches/0, aliases/0, usage/0,
+ banner/2, merge_defaults/2, validate/2, run/2, output/2]).
+
+switches() -> [{if_empty, boolean}, {if_unused, boolean}].
+aliases() -> [{e, if_empty}, {u, is_unused}].
+
+usage() -> <<"delete_queue queue_name [--if_empty|-e] [--if_unused|-u] [--vhost|-p vhost]">>.
+
+banner([Qname], #{vhost := Vhost,
+ if_empty := IfEmpty,
+ if_unused := IfUnused}) ->
+ IfEmptyStr = case IfEmpty of
+ true -> ["if queue is empty"];
+ false -> []
+ end,
+ IfUnusedStr = case IfUnused of
+ true -> ["if queue is unused"];
+ false -> []
+ end,
+ iolist_to_binary(
+ io_lib:format("Deleting queue ~s on vhost ~s ~s",
+ [Qname, Vhost,
+ string:join(IfEmptyStr ++ IfUnusedStr, " and ")])).
+
+merge_defaults(Args, Options) ->
+ {
+ Args,
+ maps:merge(#{if_empty => false, if_unused => false, vhost => <<"/">>},
+ Options)
+ }.
+
+validate([], _Options) ->
+ {validation_failure, not_enough_args};
+validate([_,_|_], _Options) ->
+ {validation_failure, too_many_args};
+validate([<<"">>], _Options) ->
+ {
+ validation_failure,
+ {bad_argument, <<"queue name cannot be empty string.">>}
+ };
+validate([_], _Options) -> ok.
+
+run([Qname], #{node := Node, vhost := Vhost,
+ if_empty := IfEmpty, if_unused := IfUnused}) ->
+ %% Generate queue resource name from queue name and vhost
+ QueueResource = rabbit_misc:r(Vhost, queue, Qname),
+ %% Lookup a queue on broker node using resource name
+ case rabbit_misc:rpc_call(Node, rabbit_amqqueue, lookup, [QueueResource]) of
+ {ok, Queue} ->
+ %% Delete queue
+ rabbit_misc:rpc_call(Node, rabbit_amqqueue, delete,
+ [Queue, IfUnused, IfEmpty, <<"cli_user">>]);
+ {error, _} = Error -> Error
+ end.
+
+output({error, not_found}, _Options) ->
+ {
+ error,
+ 'Elixir.RabbitMQ.CLI.Core.ExitCodes':exit_usage(),
+ <<"Queue not found">>
+ };
+output({error, not_empty}, _Options) ->
+ {
+ error,
+ 'Elixir.RabbitMQ.CLI.Core.ExitCodes':exit_usage(),
+ <<"Queue is not empty">>
+ };
+output({error, in_use}, _Options) ->
+ {
+ error,
+ 'Elixir.RabbitMQ.CLI.Core.ExitCodes':exit_usage(),
+ <<"Queue is in use">>
+ };
+output({ok, qlen}, _Options) ->
+ {ok, <<"Queue was successfully deleted with #{qlen} messages">>};
+output(Other, Options) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Other, Options, ?MODULE).
+```
+
+## Wrapping Up
+
+Phew. That's it! Implementing a new CLI command wasn't too difficult.
+That's because extensibility was one of the goals of this new CLI tool suite.
+
+
+## Feedback and Getting Help
+
+If you have any feedback about CLI tools extensibility,
+don't hesitate to reach out on the [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
diff --git a/deps/rabbitmq_cli/CONTRIBUTING.md b/deps/rabbitmq_cli/CONTRIBUTING.md
new file mode 100644
index 0000000000..672b29d305
--- /dev/null
+++ b/deps/rabbitmq_cli/CONTRIBUTING.md
@@ -0,0 +1,143 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+Assuming you have:
+
+* Installed [Elixir](http://elixir-lang.org/install.html)
+* Have a local running RabbitMQ node with the `rabbitmq-federation` and `rabbitmq_stomp` plugins enabled, e.g.
+
+```
+make run-broker PLUGINS='rabbitmq_federation rabbitmq_stomp'
+```
+
+from a server release repository clone, use
+
+```
+make tests
+```
+
+to run all tests.
+
+### Running a Single Test Case
+
+To run a single test case, use `make test` like so:
+
+```
+make TEST_FILE=test/help_command_test.exs test
+```
+
+And if you want to run in verbose mode, set the `V` make variable:
+
+```
+make TEST_FILE=test/help_command_test.exs V=1 test
+```
+
+NOTE: You may see the following message several times:
+
+```
+warning: variable context is unused
+```
+
+This is nothing to be alarmed about; we're currently using setup context
+functions in Mix to start a new distributed node and connect it to the RabbitMQ
+server. It complains because we don't actually use the context dictionary, but
+it's fine otherwise.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_cli/DESIGN.md b/deps/rabbitmq_cli/DESIGN.md
new file mode 100644
index 0000000000..4179048159
--- /dev/null
+++ b/deps/rabbitmq_cli/DESIGN.md
@@ -0,0 +1,483 @@
+# New (3.7.0+) RabbitMQ CLI Tools
+
+## Summary
+
+RabbitMQ version 3.7 comes with brave new CLI tool to replace `rabbitmqctl`.
+
+Some of the issues in the older tool suite we wanted to address:
+
+ * Built-in into RabbitMQ server code
+ * Home-grown argument parser
+ * Started with `erl` with a lot of installation-dependent parameters
+ * Too many commands in a single tool
+ * All commands in resided the same module (function clauses)
+
+All this made it hard to maintain and extend the tools.
+
+The new CLI is different in a number of ways that address
+the above issues:
+
+ * Standalone [repository on GitHub](https://github.com/rabbitmq/rabbitmq-cli).
+ * Implemented in Elixir (using Elixir's standard CLI parser)
+ * Each command is its own module
+ * Extensible
+ * Commands can support more output formats (e.g. JSON and CSV)
+ * A single executable file that bundles all dependencies but the Erlang runtime
+ * Command scopes associate commands with tools (e.g. `rabbitmq-diagnostics` reuses relevant commands from `rabbitmqctl`)
+
+## Architecture
+
+Each command is defined in its own module and implements an Elixir (Erlang) behaviour.
+(See [Command behaviour](#command-behaviour))
+
+Output is processed by a formatter and printer which formats command output
+and render the output (the default being the standard I/O output).
+(see [Output formatting](#output-formatting))
+
+CLI core consists of several modules implementing command execution process:
+
+ * `RabbitMQCtl`: entry point. Generic execution logic.
+ * `Parser`: responsible for command line argument parsing (drives Elixir's `OptionParser`)
+ * `CommandModules`: responsible for command module discovery and loading
+ * `Config`: responsible for config unification: merges environment variable and command argument values
+ * `Output`: responsible for output formatting
+ * `Helpers`: self-explanatory
+
+### Command Execution Process
+
+#### Arguments parsing
+
+Command line arguments are parsed with [OptionParser](https://elixir-lang.org/docs/stable/elixir/OptionParser.html)
+Parser returns a list of unnamed arguments and a map of options (named arguments)
+First unnamed argument is a command name.
+Named arguments can be global or command specific.
+Command specific argument names and types are specified in the `switches/0` callback.
+Global argument names are described in [Global arguments]
+
+#### Command discovery
+
+If arguments list is not empty, its first element is considered a command name.
+Command name is converted to CamelCase and a module with
+`RabbitMQ.CLI.*.Commands.<CommandName>Command` name is selected as a command module.
+
+List of available command modules depend on current tool scope
+(see [Command scopes](#command-scopes))
+
+#### Defaults and validation
+
+After the command module is found, effective command arguments are calculated by
+ merging global defaults and command specific defaults for both unnamed and named arguments.
+
+A command specifies defaults using the `merge_defaults/2` callback
+ (see [Command behaviour](#command-behaviour))
+
+Arguments are then validated using the `validate/2` callback. `validate_execution_environment/2` is
+another (optional) validation function with the same signature as `validate/2`. It is meant to
+validate everything that is not CLI arguments, for example, whether a file exists or RabbitMQ is running
+or stopped on the target node.
+
+##### Command Aliases
+
+It is possible to define aliases for commands using an aliases file. The file name can be
+specified using `RABBITMQ_CLI_ALIASES_FILE` environment variable or the `--aliases-file`
+command lineargument.
+
+Aliases can be specified using `alias = command [options]` format.
+For example:
+
+```
+lq = list_queues
+lq_vhost1 = list_queues -p vhost1
+lq_off = list_queues --offline
+```
+
+with such aliases config file running
+
+```
+RABBITMQ_CLI_ALIASES_FILE=/path/to/aliases.conf rabbitmqctl lq
+```
+
+will be equivalent to executing
+
+```
+RABBITMQ_CLI_ALIASES_FILE=/path/to/aliases.conf rabbitmqctl list_queues
+```
+
+while
+
+```
+RABBITMQ_CLI_ALIASES_FILE=/path/to/aliases.conf rabbitmqctl lq_off
+```
+
+is the same as running
+
+```
+RABBITMQ_CLI_ALIASES_FILE=/path/to/aliases.conf rabbitmqctl list_queues --offline
+```
+
+Builtin or plugin-provided commands are looked up first, if that yields no result,
+aliases are inspected. Therefore it's not possible to override a command by
+configuring an alias.
+
+Just like command lookups, alias expansion happens in the `RabbitMQ.CLI.Core.Parser`
+module.
+
+
+##### Command Aliases with Variables
+
+Aliases can also contain arguments. Command name must be the first word after the `=`.
+Arguments specified in an alias will precede those passed from the command line.
+
+For example, if you specify the alias `passwd_user1 = change_password user1`,
+you can call it with `rabbitmqctl passwd_user1 new_password`.
+
+Combined with the `eval` command, aliases can be a powerful tool for users who cannot or don't want
+to develop, distribute and deploy their own commands via plugins.
+
+For instance, the following alias deletes all queues in a vhost:
+
+```
+delete_vhost_queues = eval '[rabbit_amqqueue:delete(Q, false, false, <<"rabbit-cli">>) || Q <- rabbit_amqqueue:list(_1)]'
+```
+
+This command will require a single positional argument for the vhost:
+
+```
+rabbitmqctl delete_vhost_queues vhost1
+```
+
+It is also possible to use a named vhost argument by specifying an underscore
+variable that's not an integer:
+
+```
+delete_vhost_queues = eval '[rabbit_amqqueue:delete(Q, false, false, <<"rabbit-cli">>) || Q <- rabbit_amqqueue:list(_vhost)]'
+```
+
+Then the alias can be called like this:
+
+```
+rabbitmqctl delete_vhost_queues -p vhost1
+# or
+rabbitmqctl delete_vhost_queues ---vhost <vhost>1
+```
+
+Keep in mind that `eval` command can accept only [global arguments](#global-arguments) as named,
+and it's advised to use positional arguments instead.
+
+Numbered arguments will be passed to the eval'd code as Elixir strings (or Erlang binaries).
+The code that relies on them should perform type conversion as necessary, e.g. by
+using functions from the [`rabbit_data_coercion` module](https://github.com/rabbitmq/rabbitmq-common/blob/stable/src/rabbit_data_coercion.erl).
+
+#### Command execution
+
+ Command is executed with the `run/2` callback, which contains main command logic.
+ This callback can return any value.
+
+ Output from the `run/2` callback is being processed with the `output/2` callback,
+ which should format the output to a specific type.
+ (see [Output formatting](#output-formatting))
+
+ Output callback can return an error, with a specific exit code.
+
+#### Printing and formatting
+
+ The `output/2` callback return value is being processed in `output.ex` module by
+ by the appropriate formatter and printer.
+
+ A formatter translates the output value to a sequence of strings or error value.
+ Example formatters are `json`, `csv` and `erlang`
+
+ A printer renders formatted strings to an output device (e.g. stdout, file)
+
+#### Return
+
+ Errors during execution (e.g. validation failures, command errors) are being printed
+ to `stderr`.
+
+ If command has failed to execute, a non-zero code is returned.
+
+
+## Usage
+
+CLI tool is an Elixir Mix application. It is compiled into an escript
+executable file.
+
+This file embeds Elixir, rabbit_common, and rabbitmqcli applications and can be executed anywhere
+where `escript` is installed (it comes as a part of `erlang`).
+
+Although, some commands require rabbitmq broker code and data directory to be known.
+For example, commands in `rabbitmq-plugins` tool and those controlling clustering
+(e.g. `forget_cluster_node`, `rename_cluster_node` ...)
+
+Those directories can be defined using environment options or rabbitmq environment variables.
+
+In the broker distribution the escript file is called from a shell/cmd script, which loads
+broker environment and exports it into the script.
+
+Environment variables also specify the locations of the enabled plugins file
+and the plugins directory.
+
+All enabled plugins will be searched for commands. Commands from enabled plugins will
+be shown in usage and available for execution.
+
+### Global Arguments
+
+#### Commonly Used Arguments
+
+ * node (n): atom, broker node name, defaults to `rabbit@<current host>`
+ * quiet (q): boolean, if set to `true`, command banner is not shown, defaults to `false`
+ * timeout (t): integer, timeout value in **seconds** (used in some commands), defaults to `infinity`
+ * vhost (p): string, vhost to talk to, defaults to `/`
+ * formatter: string, formatter to use to format command output. (see [Output formatting](#output-formatting))
+ * printer: string, printer to render output (see [Output formatting](#output-formatting))
+ * dry-run: boolean, if specified the command will not run, but print banner only
+
+#### Environment Arguments
+
+ * script-name: atom, configurable tool name (`rabbitmq-plugins`, `rabbitmqctl`) to select command scope (see [Command scopes](#command-scopes))
+ * rabbitmq-home: string, broker install directory
+ * mnesia-dir: string, broker mnesia data directory
+ * plugins-dir: string, broker plugins directory
+ * enabled-plugins-file: string, broker enabled plugins file
+ * longnames (l): boolean, use longnames to communicate with broker erlang node. Should be set to `true` only if broker is started with longnames.
+ * aliases-file: string, a file name to load aliases from
+ * erlang-cookie: atom, an [erlang distribution cookie](http://erlang.org/doc/reference_manual/distributed.html)
+
+Environment argument defaults are loaded from rabbitmq environment variables (see [Environment configuration](#environment-configuration)).
+
+Some named arguments have single-letter aliases (in parenthesis).
+
+Boolean options without a value are parsed as `true`
+
+For example, parsing command
+
+ rabbitmqctl list_queues --vhost my_vhost -t 10 --formatter=json name pid --quiet
+
+Will result with unnamed arguments list `["list_queues", "name", "pid"]`
+and named options map `%{vhost: "my_vhost", timeout: 10, quiet: true}`
+
+
+### Usage (Listing Commands in `help`)
+
+Usage is shown when the CLI is called without any arguments, or if there are some
+problems parsing arguments.
+
+In that cases exit code is `64`.
+
+If you want to show usage and return `0` exit code run `help` command
+
+ rabbitmqctl help
+
+Each tool (`rabbitmqctl`, `rabbitmq-plugins`) shows its scope of commands (see [Command scopes](#command-scopes))
+
+## Command Behaviour
+
+Each command is implemented as an Elixir (or Erlang) module. Command module should
+implement `RabbitMQ.CLI.CommandBehaviour` behaviour.
+
+Behaviour summary:
+
+Following functions MUST be implemented in a command module:
+
+ usage() :: String.t | [String.t]
+
+Command usage string, or several strings (one per line) to print in command listing in usage.
+Typically looks like `command_name [arg] --option=opt_value`
+
+ banner(arguments :: List.t, options :: Map.t) :: String.t
+
+Banner to print before the command execution.
+Ignored if argument `--quiet` is specified.
+If `--dry-run` argument is specified, th CLI will only print the banner.
+
+ merge_defaults(arguments :: List.t, options :: Map.t) :: {List.t, Map.t}
+
+Merge default values for arguments and options (named arguments).
+Returns a tuple with effective arguments and options, that will be passed to `validate/2` and `run/2`
+
+ validate(arguments :: List.t, options :: Map.t) :: :ok | {:validation_failure, Atom.t | {Atom.t, String.t}}
+
+Validate effective arguments and options.
+
+If function returns `{:validation_failure, err}`
+CLI will print usage to `stderr` and exit with non-zero exit code (typically 64).
+
+ run(arguments :: List.t, options :: Map.t) :: run_result :: any
+
+Run command. This function usually calls RPC on broker.
+
+ output(run_result :: any, options :: Map.t) :: :ok | {:ok, output :: any} | {:stream, Enum.t} | {:error, ExitCodes.exit_code, [String.t]}
+
+Cast the return value of `run/2` command to a formattable value and an exit code.
+
+- `:ok` - return `0` exit code and won't print anything
+
+- `{:ok, output}` - return `exit` code and print output with `format_output/2` callback in formatter
+
+- `{:stream, stream}` - format with `format_stream/2` callback in formatter, iterating over enumerable elements.
+Can return non-zero code, if error occurs during stream processing
+(stream element for error should be `{:error, Message}`).
+
+- `{:error, exit_code, strings}` - print error messages to `stderr` and return `exit_code` code
+
+There is a default implementation for this callback in `DefaultOutput` module
+
+Most of the standard commands use the default implementation via `use RabbitMQ.CLI.DefaultOutput`
+
+Following functions are optional:
+
+ switches() :: Keyword.t
+
+Keyword list of switches (argument names and types) for the command.
+
+For example: `def switches(), do: [offline: :boolean, time: :integer]` will
+parse `--offline --time=100` arguments to `%{offline: true, time: 100}` options.
+
+This switches are added to global switches (see [Arguments parsing](#arguments-parsing))
+
+ aliases() :: Keyword.t
+
+Keyword list of argument names one-letter aliases.
+For example: `[o: :offline, t: :timeout]`
+(see [Arguments parsing](#arguments-parsing))
+
+ usage_additional() :: String.t | [String.t]
+
+Additional usage strings to print after all commands basic usage.
+Used to explain additional arguments and not interfere with command listing.
+
+ formatter() :: Atom.t
+
+Default formatter for the command.
+Should be a module name of a module implementing `RabbitMQ.CLI.FormatterBehaviour` (see [Output formatting](#output-formatting))
+
+ scopes() :: [Atom.t]
+
+List of scopes to include command in. (see [Command scopes](#command-scopes))
+
+More information about command development can be found in [the command tutorial](COMMAND_TUTORIAL.md)
+
+## Command Scopes
+
+Commands can be organized in scopes to be used in different tools
+like `rabbitmq-diagnostics` or `rabbitmq-plugins`.
+
+One command can belong to multiple scopes. Scopes for a command can be
+defined in the `scopes/0` callback in the command module.
+Each scope is defined as an atom value.
+
+By default a command scope is selected using naming convention.
+If command module is called `RabbitMQ.CLI.MyScope.Commands.DoSomethingCommand`, it will belong to
+`my_scope` scope. A scope should be defined in snake_case. Namespace for the scope will be translated to CamelCase.
+
+When CLI is run, a scope is selected by a script name, which is the escript file name
+or the `--script-name` argument passed.
+
+A script name is associated with a single scope in the application environment:
+
+Script names for scopes:
+
+ * `rabbitmqctl` - `:ctl`
+ * `rabbitmq-plugins` - `:plugins`
+ * `rabbitmq-diagnostics` - `:diagnostics`
+
+This environment is extended by plugins `:scopes` environment variables,
+but cannot be overridden. Plugins scopes can override each other,
+so should be used with caution.
+
+So all the commands in the `RabbitMQ.CLI.Ctl.Commands` namespace will be available
+for `rabbitmqctl` script.
+All the commands in the `RabbitMQ.CLI.Plugins.Commands` namespace will be available
+for `rabbitmq-plugins` script.
+
+To add a command to `rabbitmqctl`, one should either name it with
+the `RabbitMQ.CLI.Ctl.Commands` prefix or add the `scopes()` callback,
+returning a list with `:ctl` element
+
+## Output Formatting
+
+The CLI supports extensible output formatting. Formatting consists of two stages:
+
+ * formatting - translating command output to a sequence of lines
+ * printing - outputting the lines to some device (e.g. stdout or filesystem)
+
+A formatter module performs formatting.
+Formatter is a module, implementing the `RabbitMQ.CLI.FormatterBehaviour` behaviour:
+
+ format_output(output :: any, options :: Map.t) :: String.t | [String.t]
+
+Format a single value, returned. It accepts output from command and named arguments (options)
+and returns a list of strings, that should be printed.
+
+ format_stream(output_stream :: Enumerable.t, options :: Map.t) :: Enumerable.t
+
+Format a stream of return values. This function uses elixir
+Stream [https://elixir-lang.org/docs/stable/elixir/Stream.html] abstraction
+to define processing of continuous data, so the CLI can output data in realtime.
+
+Used in `list_*` commands, that emit data asynchronously.
+
+`DefaultOutput` will return all enumerable data as stream,
+so it will be formatted with this function.
+
+A printer module performs printing. Printer module should implement
+the `RabbitMQ.CLI.PrinterBehaviour` behaviour:
+
+ init(options :: Map.t) :: {:ok, printer_state :: any} | {:error, error :: any}
+
+Init the internal printer state (e.g. open a file handler).
+
+ finish(printer_state :: any) :: :ok
+
+Finalize the internal printer state.
+
+ print_output(output :: String.t | [String.t], printer_state :: any) :: :ok
+
+Print the output lines in the printer state context.
+Is called for `{:ok, val}` command output after formatting `val` using formatter,
+and for each enumerable element of `{:stream, enum}` enumerable
+
+ print_ok(printer_state :: any) :: :ok
+
+Print an output without any values. Is called for `:ok` command return value.
+
+Output formatting logic is defined in `output.ex` module.
+
+Following rules apply for a value, returned from `output/2` callback:
+
+ * `:ok` - initializes a printer, calls `print_ok` and finishes the printer. Exit code is `0`.
+ * `{:ok, value}` - calls `format_output/2` from formatter, then passes the value to printer. Exit code is `0`.
+ * `{:stream, enum}` - calls `format_stream/2` to augment the stream with formatting logic,
+initializes a printer, then calls `print_output/2` for each successfully formatted stream
+element (which is not `{:error, msg}`). Exit code is `0` if there were no errors.
+In case of an error element, stream processing stops, error is printed to `stderr`
+and the CLI exits with nonzero exit code.
+ * `{:error, exit_code, msg}` - prints `msg` to `stderr` and exits with `exit_code`
+
+
+## Environment Configuration
+
+Some commands require information about the server environment to run correctly.
+Such information is:
+
+ * rabbitmq code directory
+ * rabbitmq mnesia directory
+ * enabled plugins file
+ * plugins directory
+
+Enabled plugins file and plugins directory are also used to locate plugins commands.
+
+This information can be provided using command line arguments (see [Environment arguments](#environment-arguments))
+
+By default it will be loaded from environment variables, same as used in rabbitmq broker:
+
+| Argument name | Environment variable |
+|----------------------|-------------------------------|
+| rabbitmq-home | RABBITMQ_HOME |
+| mnesia-dir | RABBITMQ_MNESIA_DIR |
+| plugins-dir | RABBITMQ_PLUGINS_DIR |
+| enabled-plugins-file | RABBITMQ_ENABLED_PLUGINS_FILE |
+| longnames | RABBITMQ_USE_LONGNAME |
+| node | RABBITMQ_NODENAME |
+| aliases-file | RABBITMQ_CLI_ALIASES_FILE |
+| erlang-cookie | RABBITMQ_ERLANG_COOKIE |
diff --git a/deps/rabbitmq_cli/LICENSE b/deps/rabbitmq_cli/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_cli/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_cli/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_cli/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_cli/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_cli/Makefile b/deps/rabbitmq_cli/Makefile
new file mode 100644
index 0000000000..856f39e605
--- /dev/null
+++ b/deps/rabbitmq_cli/Makefile
@@ -0,0 +1,160 @@
+PROJECT = rabbitmq_cli
+
+dep_observer_cli = git https://github.com/zhongwencool/observer_cli 1.4.4
+
+BUILD_DEPS = rabbit_common
+DEPS = observer_cli
+TEST_DEPS = amqp_client rabbit
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+VERBOSE_TEST ?= true
+MAX_CASES ?= 1
+
+MIX_TEST_OPTS ?= ""
+MIX_TEST = mix test --max-cases=$(MAX_CASES)
+
+ifneq ("",$(MIX_TEST_OPTS))
+MIX_TEST := $(MIX_TEST) $(MIX_TEST_OPTS)
+endif
+
+ifeq ($(VERBOSE_TEST),true)
+MIX_TEST := $(MIX_TEST) --trace
+endif
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+WITHOUT = plugins/cover \
+ plugins/ct \
+ plugins/dialyzer \
+ plugins/eunit \
+ plugins/proper \
+ plugins/triq
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# rabbitmq-mix.mk is generated during the creation of the RabbitMQ
+# source archive. It sets some environment variables to allow
+# rabbitmq_cli to build offline, using the bundled sources only.
+-include rabbitmq-mix.mk
+
+ACTUAL_ESCRIPTS = escript/rabbitmqctl
+LINKED_ESCRIPTS = escript/rabbitmq-plugins \
+ escript/rabbitmq-diagnostics \
+ escript/rabbitmq-queues \
+ escript/rabbitmq-streams \
+ escript/rabbitmq-upgrade
+ESCRIPTS = $(ACTUAL_ESCRIPTS) $(LINKED_ESCRIPTS)
+
+# Record the build and link dependency: the target files are linked to
+# their first dependency.
+rabbitmq-plugins = escript/rabbitmqctl
+rabbitmq-diagnostics = escript/rabbitmqctl
+rabbitmq-queues = escript/rabbitmqctl
+rabbitmq-streams = escript/rabbitmqctl
+rabbitmq-upgrade = escript/rabbitmqctl
+escript/rabbitmq-plugins escript/rabbitmq-diagnostics escript/rabbitmq-queues escript/rabbitmq-streams escript/rabbitmq-upgrade: escript/rabbitmqctl
+
+# We use hardlinks or symlinks in the `escript` directory and
+# install's PREFIX when a single escript can have several names (eg.
+# rabbitmq-plugins, rabbitmq-plugins and rabbitmq-diagnostics).
+#
+# Hardlinks and symlinks work on Windows. However, symlinks require
+# privileges unlike hardlinks. That's why we default to hardlinks,
+# unless USE_SYMLINKS_IN_ESCRIPTS_DIR is set.
+#
+# The link_escript function is called as:
+# $(call link_escript,source,target)
+#
+# The function assumes all escripts live in the same directory and that
+# the source was previously copied in that directory.
+
+ifdef USE_SYMLINKS_IN_ESCRIPTS_DIR
+link_escript = ln -sf "$(notdir $(1))" "$(2)"
+else
+link_escript = ln -f "$(dir $(2))$(notdir $(1))" "$(2)"
+endif
+
+app:: $(ESCRIPTS)
+ @:
+
+rabbitmqctl_srcs := mix.exs \
+ $(shell find config lib -name "*.ex" -o -name "*.exs")
+
+# Elixir dependencies are fetched and compiled as part of the alias
+# `mix make_all`. We do not fetch and build them in `make deps` because
+# mix(1) startup time is quite high. Thus we prefer to run it once, even
+# though it kind of breaks the Erlang.mk model.
+#
+# We write `y` on mix stdin because it asks approval to install Hex if
+# it's missing. Another way to do it is to use `mix local.hex` but it
+# can't be integrated in an alias and doing it from the Makefile isn't
+# practical.
+#
+# We also verify if the CLI is built from the RabbitMQ source archive
+# (by checking if the Hex registry/cache is present). If it is, we use
+# another alias. This alias does exactly the same thing as `make_all`,
+# but calls `deps.get --only prod` instead of `deps.get`. This is what
+# we do to create the source archive, and we must do the same here,
+# otherwise mix(1) complains about missing dependencies (the non-prod
+# ones).
+$(ACTUAL_ESCRIPTS): $(rabbitmqctl_srcs)
+ $(gen_verbose) if test -d ../.hex; then \
+ echo y | mix make_all_in_src_archive; \
+ else \
+ echo y | mix make_all; \
+ fi
+
+$(LINKED_ESCRIPTS):
+ $(verbose) rm -f "$@"
+ $(gen_verbose) $(call link_escript,$<,$@)
+
+rel:: $(ESCRIPTS)
+ @:
+
+tests:: $(ESCRIPTS)
+ $(gen_verbose) $(MIX_TEST) $(TEST_FILE)
+
+.PHONY: test
+
+test:: $(ESCRIPTS)
+ifdef TEST_FILE
+ $(gen_verbose) $(MIX_TEST) $(TEST_FILE)
+else
+ $(verbose) echo "TEST_FILE must be set, e.g. TEST_FILE=./test/ctl" 1>&2; false
+endif
+
+dialyzer:: $(ESCRIPTS)
+ MIX_ENV=test mix dialyzer
+
+.PHONY: install
+
+install: $(ESCRIPTS)
+ifdef PREFIX
+ $(gen_verbose) mkdir -p "$(DESTDIR)$(PREFIX)"
+ $(verbose) $(foreach script,$(ACTUAL_ESCRIPTS), \
+ cmp -s "$(script)" "$(DESTDIR)$(PREFIX)/$(notdir $(script))" || \
+ cp "$(script)" "$(DESTDIR)$(PREFIX)/$(notdir $(script))";)
+ $(verbose) $(foreach script,$(LINKED_ESCRIPTS), \
+ $(call link_escript,$($(notdir $(script))),$(DESTDIR)$(PREFIX)/$(notdir $(script)));)
+else
+ $(verbose) echo "You must specify a PREFIX" 1>&2; false
+endif
+
+clean:: clean-mix
+
+clean-mix:
+ $(gen_verbose) rm -f $(ESCRIPTS)
+ $(verbose) echo y | mix clean
+
+format:
+ $(verbose) mix format lib/**/*.ex
+
+repl:
+ $(verbose) iex --sname repl -S mix
diff --git a/deps/rabbitmq_cli/README.md b/deps/rabbitmq_cli/README.md
new file mode 100644
index 0000000000..1d1c1664d0
--- /dev/null
+++ b/deps/rabbitmq_cli/README.md
@@ -0,0 +1,129 @@
+# RabbitMQ CLI Tools
+
+[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-cli.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-cli)
+
+This repository contains [RabbitMQ CLI tools](https://rabbitmq.com/cli.html) ([rabbitmqctl](https://www.rabbitmq.com/man/rabbitmqctl.1.man.html) and
+others).
+
+This generation of CLI tools first shipped with RabbitMQ `3.7.0`.
+
+
+## Goals
+
+Team RabbitMQ wanted a set of tools that
+
+ * Was extensible from/with plugins
+ * Supported pluggable output formats (in particular machine-friendly ones)
+ * Had good test coverage
+ * Wasn't as coupled to the server repository
+ * Could be used as a low risk vehicle for [Elixir](https://elixir-lang.org) evaluation
+
+## Supported RabbitMQ Versions
+
+Long lived branches in this repository track the same branch in RabbitMQ core and related
+repositories. So `master` tracks `master` in rabbitmq-server, `v3.7.x` tracks branch `v3.7.x` in
+rabbitmq-server and so on.
+
+Please use the version of CLI tools that come with the RabbitMQ distribution version installed.
+
+
+## Building
+
+### Requirements
+
+Building this project requires
+
+ * Erlang/OTP 21.3 (or later)
+ * [Elixir](https://elixir-lang.org/) 1.10.0 (or later).
+
+Command line tools depend on [rabbitmq-common](https://github.com/rabbitmq/rabbitmq-common).
+Dependencies are being resolved by `erlang.mk`
+
+### Building Standalone Executables
+
+This repo produces a `rabbitmqctl` executable which can be used as different tools
+(`rabbitmq-plugins`, `rabbitmq-diagnostics`, `rabbitmq-queues`, `rabbitmq-streams`, `rabbitmq-upgrade`) by copying or symlinking it with different names.
+Depending on the name, a different set of commands will be loaded and available, including
+for `--help`.
+
+To generate the executable, run
+
+```
+make
+```
+
+## Usage
+
+### `rabbitmqctl`
+
+See `rabbitmqctl help` and [rabbitmqctl man page](https://www.rabbitmq.com/man/rabbitmqctl.1.man.html) for details.
+
+### `rabbitmq-plugins`
+
+See `rabbitmq-plugins help` and [rabbitmq-plugins man page](https://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html) for details.
+
+### `rabbitmq-diagnostics`
+
+See `rabbitmq-diagnostics help` and [rabbitmq-diagnostics man page](https://www.rabbitmq.com/rabbitmq-diagnostics.8.html).
+
+
+
+
+## Testing
+
+See [CONTRIBUTING.md](CONTRIBUTING.md).
+
+
+## Developing
+
+### Adding a New Command
+
+#### Conventions
+
+RabbitMQ CLI tools use module name conventions to match the command-line
+actions (commands) to modules. The convention is outlined in the `CommandBehaviour` module.
+
+#### Command Module Interface
+
+Each command module must implement the `RabbitMQ.CLI.CommandBehaviour` behaviour,
+which includes the following functions:
+
+ * `validate(args, opts)`, which returns either `:ok` or a tuple of `{:validation_failure, failure_detail}` where failure detail is typically one of: `:too_many_args`, `:not_enough_args` or `{:bad_argument, String.t}`.
+
+ * `merge_defaults(args, opts)`, which is used to return updated arguments and/or options.
+
+ * `run(args, opts)`, where the actual command is implemented. Here, `args` is a list of command-specific parameters and `opts` is a Map containing option flags.
+
+ * `usage`, which returns a string describing the command, its arguments and its optional flags.
+ * `banner(args, opts)`, which returns a string to be printed before the command output.
+
+There are also a number of optional callbacks:
+
+ * `switches`, which returns command specific switches.
+ * `aliases`, which returns a list of command aliases (if any).
+ * `formatter`: what output formatter should be used by default.
+ * `usage_additional`: extra values appended to the `usage` output
+ to provide additional command-specific documentation.
+ * `scopes`: what scopes this command appears in. Scopes associate
+ tools (e.g. `rabbitmqctl`, `rabbitmq-diagnostics`, `rabbitmq-queues`, `rabbitmq-streams`) with commands.
+ * `distribution`: control erlang distribution.
+ Can be `:cli` (default), `:none` or `{:fun, fun}`
+
+### Tutorial
+
+We have [a tutorial](./COMMAND_TUTORIAL.md) that demonstrates how to add a CLI
+command that deletes a queue.
+
+### Examples
+
+See `lib/rabbitmq/cli/ctl/commands/status_command.ex` and `test/status_command_test.exs` for minimalistic
+but not entirely trivial examples.
+
+
+## Copyright and License
+
+The project is [licensed under the MPL](LICENSE-MPL-RabbitMQ), the same license
+as RabbitMQ.
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
diff --git a/deps/rabbitmq_cli/config/config.exs b/deps/rabbitmq_cli/config/config.exs
new file mode 100644
index 0000000000..0a88337ac6
--- /dev/null
+++ b/deps/rabbitmq_cli/config/config.exs
@@ -0,0 +1,37 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+# This file is responsible for configuring your application
+# and its dependencies with the aid of the Mix.Config module.
+use Mix.Config
+
+# This configuration is loaded before any dependency and is restricted
+# to this project. If another project depends on this project, this
+# file won't be loaded nor affect the parent project. For this reason,
+# if you want to provide default values for your application for
+# 3rd-party users, it should be done in your "mix.exs" file.
+
+# You can configure for your application as:
+#
+# config :rabbitmqctl, key: :value
+#
+# And access this configuration in your application as:
+#
+# Application.get_env(:rabbitmqctl, :key)
+#
+# Or configure a 3rd-party app:
+#
+config :logger, [level: :warn, console: [device: :standard_error]]
+#
+
+# It is also possible to import configuration files, relative to this
+# directory. For example, you can emulate configuration per environment
+# by uncommenting the line below and defining dev.exs, test.exs and such.
+# Configuration from the imported file will override the ones defined
+# here (which is why it is important to import them last).
+#
+# import_config "#{Mix.env}.exs"
diff --git a/deps/rabbitmq_cli/erlang.mk b/deps/rabbitmq_cli/erlang.mk
new file mode 100644
index 0000000000..77933a9acf
--- /dev/null
+++ b/deps/rabbitmq_cli/erlang.mk
@@ -0,0 +1,7296 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT = plugins/cover plugins/ct plugins/dialyzer plugins/eunit plugins/proper plugins/triq
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_cli/include/.gitkeep b/deps/rabbitmq_cli/include/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_cli/include/.gitkeep
diff --git a/deps/rabbitmq_cli/lib/rabbit_common/records.ex b/deps/rabbitmq_cli/lib/rabbit_common/records.ex
new file mode 100644
index 0000000000..dc1503caf7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbit_common/records.ex
@@ -0,0 +1,19 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitCommon.Records do
+ require Record
+ import Record, only: [defrecord: 2, extract: 2]
+
+ # Important: amqqueue records must not be used directly since they are versioned
+ # for mixed version cluster compatibility. Convert records
+ # to maps on the server end to access the fields of those records. MK.
+ defrecord :listener, extract(:listener, from_lib: "rabbit_common/include/rabbit.hrl")
+ defrecord :plugin, extract(:plugin, from_lib: "rabbit_common/include/rabbit.hrl")
+ defrecord :resource, extract(:resource, from_lib: "rabbit_common/include/rabbit.hrl")
+
+ defrecord :hostent, extract(:hostent, from_lib: "kernel/include/inet.hrl")
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex
new file mode 100644
index 0000000000..1f907b528d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/auto_complete.ex
@@ -0,0 +1,118 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.AutoComplete do
+ alias RabbitMQ.CLI.Core.{CommandModules, Parser}
+
+ # Use the same jaro distance limit as in Elixir's "did you mean?"
+ @jaro_distance_limit 0.77
+
+ @spec complete(String.t(), [String.t()]) :: [String.t()]
+ def complete(_, []) do
+ []
+ end
+
+ def complete(script_name, args) do
+ case Parser.parse_global(args) do
+ {_, %{script_name: _args_script_name}, _} ->
+ complete(args)
+
+ _ ->
+ complete(["--script-name", script_name | args])
+ end
+ end
+
+ def suggest_command(_cmd_name, empty) when empty == %{} do
+ nil
+ end
+ def suggest_command(typed, module_map) do
+ suggestion =
+ module_map
+ |> Map.keys()
+ |> Enum.map(fn existing ->
+ {existing, String.jaro_distance(existing, typed)}
+ end)
+ |> Enum.max_by(fn {_, distance} -> distance end)
+
+ case suggestion do
+ {cmd, distance} when distance >= @jaro_distance_limit ->
+ {:suggest, cmd}
+ _ ->
+ nil
+ end
+ end
+
+ defp complete(tokens) do
+ {command, command_name, _, _, _} = Parser.parse(tokens)
+ last_token = List.last(tokens)
+
+ case {command, command_name} do
+ ## No command provided
+ {_, ""} ->
+ complete_default_opts(last_token)
+
+ ## Command is not found/incomplete
+ {:no_command, command_name} ->
+ complete_command_name(command_name)
+
+ {{:suggest, _}, command_name} ->
+ complete_command_name(command_name)
+
+ ## Command is found
+ {command, _} ->
+ complete_command_opts(command, last_token)
+ end
+ |> Enum.sort()
+ end
+
+ defp complete_default_opts(opt) do
+ Parser.default_switches()
+ |> Keyword.keys()
+ |> Enum.map(fn sw -> "--" <> to_string(sw) end)
+ |> select_starts_with(opt)
+ |> format_options
+ end
+
+ defp complete_command_name(command_name) do
+ module_map = CommandModules.module_map()
+
+ case module_map[command_name] do
+ nil ->
+ module_map
+ |> Map.keys()
+ |> select_starts_with(command_name)
+
+ _ ->
+ command_name
+ end
+ end
+
+ defp complete_command_opts(command, <<"-", _::binary>> = opt) do
+ switches =
+ command.switches
+ |> Keyword.keys()
+ |> Enum.map(fn sw -> "--" <> to_string(sw) end)
+
+ # aliases = command.aliases
+ # |> Keyword.keys
+ # |> Enum.map(fn(al) -> "-" <> to_string(al) end)
+ select_starts_with(switches, opt)
+ |> format_options
+ end
+
+ defp complete_command_opts(_, _) do
+ []
+ end
+
+ defp select_starts_with(list, prefix) do
+ Enum.filter(list, fn el -> String.starts_with?(el, prefix) end)
+ end
+
+ defp format_options(options) do
+ options
+ |> Enum.map(fn opt -> String.replace(opt, "_", "-") end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex
new file mode 100644
index 0000000000..800d4d3227
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/command_behaviour.ex
@@ -0,0 +1,170 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.CommandBehaviour do
+ alias RabbitMQ.CLI.Core.Helpers
+
+ @type pair_of_strings :: nonempty_list(String.t())
+
+ @callback usage() :: String.t() | [String.t()]
+ # validates CLI arguments
+ @callback validate(list(), map()) :: :ok | {:validation_failure, atom() | {atom(), String.t()}}
+ @callback merge_defaults(list(), map()) :: {list(), map()}
+ @callback banner(list(), map()) :: [String.t()] | String.t() | nil
+ @callback run(list(), map()) :: any
+ # Coerces run/2 return value into the standard command output form
+ # that is then formatted, printed and returned as an exit code.
+ # There is a default implementation for this callback in DefaultOutput module
+ @callback output(any, map()) ::
+ :ok
+ | {:ok, any}
+ | {:stream, Enum.t()}
+ | {:error, RabbitMQ.CLI.Core.ExitCodes.exit_code(), [String.t()]}
+
+ @optional_callbacks formatter: 0,
+ printer: 0,
+ scopes: 0,
+ usage_additional: 0,
+ usage_doc_guides: 0,
+ description: 0,
+ help_section: 0,
+ switches: 0,
+ aliases: 0,
+ # validates execution environment, e.g. file presence,
+ # whether RabbitMQ is in an expected state on a node, etc
+ validate_execution_environment: 2,
+ distribution: 1
+
+ @callback validate_execution_environment(list(), map()) ::
+ :ok | {:validation_failure, atom() | {atom(), any}}
+ @callback switches() :: Keyword.t()
+ @callback aliases() :: Keyword.t()
+
+ @callback formatter() :: atom()
+ @callback printer() :: atom()
+ @callback scopes() :: [atom()] | nil
+ @callback description() :: String.t()
+ @callback help_section() :: String.t()
+ @callback usage_additional() :: String.t() | [String.t()] | nonempty_list(pair_of_strings()) | [{String.t(), String.t()}]
+ @callback usage_doc_guides() :: String.t() | [String.t()]
+ ## Erlang distribution control
+ ## :cli - default rabbitmqctl generated node name
+ ## :none - disable erlang distribution
+ ## {:fun, fun} - use a custom function to start distribution
+ @callback distribution(map()) :: :cli | :none | {:fun, (map() -> :ok | {:error, any()})}
+
+ defmacro defcmd(map) do
+ usage_q = case map[:usage] do
+ nil -> :ok
+ usage ->
+ quote do def usage(), do: unquote(usage) end
+ end
+ scopes_q = case map[:scopes] do
+ nil -> :ok
+ scopes ->
+ quote do def scopes(), do: unquote(scopes) end
+ end
+ description_q = case map[:description] do
+ nil -> :ok
+ description ->
+ quote do def description(), do: unquote(description) end
+ end
+ help_section_q = case map[:help_section] do
+ nil -> :ok
+ help_section ->
+ quote do def help_section(), do: unquote(help_section) end
+ end
+ usage_additional_q = case map[:usage_additional] do
+ nil -> :ok
+ usage_additional ->
+ quote do def usage_additional(), do: unquote(usage_additional) end
+ end
+ formatter_q = case map[:formatter] do
+ nil -> :ok
+ formatter ->
+ quote do def formatter(), do: unquote(formatter) end
+ end
+ switches_q = case map[:switches] do
+ nil -> :ok
+ switches ->
+ quote do def switches(), do: unquote(switches) end
+ end
+ aliases_q = case map[:aliases] do
+ nil -> :ok
+ aliases ->
+ quote do def aliases(), do: unquote(aliases) end
+ end
+
+ quote do
+ unquote(usage_q)
+ unquote(scopes_q)
+ unquote(description_q)
+ unquote(help_section_q)
+ unquote(usage_additional_q)
+ unquote(formatter_q)
+ unquote(switches_q)
+ unquote(aliases_q)
+ end
+ end
+
+ def usage(cmd) do
+ cmd.usage()
+ end
+
+ def scopes(cmd) do
+ Helpers.apply_if_exported(cmd, :scopes, [], nil)
+ end
+
+ def description(cmd) do
+ Helpers.apply_if_exported(cmd, :description, [], "")
+ end
+
+ def help_section(cmd) do
+ case Helpers.apply_if_exported(cmd, :help_section, [], :other) do
+ :other ->
+ case Application.get_application(cmd) do
+ :rabbitmqctl -> :other
+ plugin -> {:plugin, plugin}
+ end
+ section ->
+ section
+ end
+ end
+
+ def usage_additional(cmd) do
+ Helpers.apply_if_exported(cmd, :usage_additional, [], [])
+ end
+
+ def usage_doc_guides(cmd) do
+ Helpers.apply_if_exported(cmd, :usage_doc_guides, [], [])
+ end
+
+ def formatter(cmd, default) do
+ Helpers.apply_if_exported(cmd, :formatter, [], default)
+ end
+
+ def printer(cmd, default) do
+ Helpers.apply_if_exported(cmd, :printer, [], default)
+ end
+
+ def switches(cmd) do
+ Helpers.apply_if_exported(cmd, :switches, [], [])
+ end
+
+ def aliases(cmd) do
+ Helpers.apply_if_exported(cmd, :aliases, [], [])
+ end
+
+ def validate_execution_environment(cmd, args, options) do
+ Helpers.apply_if_exported(cmd,
+ :validate_execution_environment, [args, options],
+ :ok)
+ end
+
+ def distribution(cmd, options) do
+ Helpers.apply_if_exported(cmd, :distribution, [options], :cli)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex
new file mode 100644
index 0000000000..3a78974b0c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_default_switches_and_timeout.ex
@@ -0,0 +1,16 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout do
+ defmacro __using__(_) do
+ quote do
+ def switches(), do: [timeout: :integer]
+ def aliases(), do: [t: :timeout]
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex
new file mode 100644
index 0000000000..166c4f22f7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_no_positional_arguments.ex
@@ -0,0 +1,21 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.AcceptsNoPositionalArguments do
+ defmacro __using__(_) do
+ quote do
+ def validate(args, _) when length(args) > 0 do
+ {:validation_failure, :too_many_args}
+ end
+
+ # Note: this will accept everything, so it must be the
+ # last validation clause defined!
+ def validate(_, _), do: :ok
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex
new file mode 100644
index 0000000000..3731775ed3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positional_argument.ex
@@ -0,0 +1,25 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.AcceptsOnePositionalArgument do
+ defmacro __using__(_) do
+ quote do
+ def validate(args, _) when length(args) == 0 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ # Note: this will accept everything, so it must be the
+ # last validation clause defined!
+ def validate(_, _), do: :ok
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex
new file mode 100644
index 0000000000..6b6fd28dfe
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_one_positive_integer_argument.ex
@@ -0,0 +1,34 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.AcceptsOnePositiveIntegerArgument do
+ defmacro __using__(_) do
+ quote do
+ def validate(args, _) when length(args) == 0 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([value], _) when is_integer(value) do
+ :ok
+ end
+
+ def validate([value], _) do
+ case Integer.parse(value) do
+ {n, _} when n >= 1 -> :ok
+ :error -> {:validation_failure, {:bad_argument, "Argument must be a positive integer"}}
+ end
+ end
+
+ def validate(_, _), do: :ok
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex
new file mode 100644
index 0000000000..18b2740b42
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/accepts_two_positional_arguments.ex
@@ -0,0 +1,25 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments do
+ defmacro __using__(_) do
+ quote do
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ # Note: this will accept everything, so it must be the
+ # last validation clause defined!
+ def validate([_, _], _), do: :ok
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex
new file mode 100644
index 0000000000..15143f7f69
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/alarms.ex
@@ -0,0 +1,88 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Alarms do
+ def alarm_lines(alarms, node_name) do
+ Enum.reduce(alarms, [], fn
+ :file_descriptor_limit, acc ->
+ ["File descriptor limit alarm on node #{node_name}" | acc]
+
+ {{:resource_limit, :memory, alarmed_node_name}, _}, acc ->
+ ["Memory alarm on node #{alarmed_node_name}" | acc]
+
+ {:resource_limit, :memory, alarmed_node_name}, acc ->
+ ["Memory alarm on node #{alarmed_node_name}" | acc]
+
+ {{:resource_limit, :disk, alarmed_node_name}, _}, acc ->
+ ["Free disk space alarm on node #{alarmed_node_name}" | acc]
+
+ {:resource_limit, :disk, alarmed_node_name}, acc ->
+ ["Free disk space alarm on node #{alarmed_node_name}" | acc]
+ end)
+ |> Enum.reverse()
+ end
+
+ def local_alarms(alarms, node_name) do
+ Enum.filter(
+ alarms,
+ fn
+ # local by definition
+ :file_descriptor_limit ->
+ true
+
+ {{:resource_limit, _, a_node}, _} ->
+ node_name == a_node
+ end
+ )
+ end
+
+ def clusterwide_alarms(alarms, node_name) do
+ alarms
+ |> Enum.reject(fn x -> x == :file_descriptor_limit end)
+ |> Enum.filter(fn {{:resource_limit, _, a_node}, _} ->
+ a_node != node_name
+ end)
+ end
+
+ def alarm_types(xs) do
+ Enum.map(xs, &alarm_type/1)
+ end
+
+ def alarm_type(val) when is_atom(val) do
+ val
+ end
+ def alarm_type({:resource_limit, val, _node}) do
+ val
+ end
+ def alarm_type({{:resource_limit, val, _node}, []}) do
+ val
+ end
+
+ def alarm_maps(xs) do
+ Enum.map(xs, &alarm_map/1)
+ end
+ def alarm_map(:file_descriptor_limit) do
+ %{
+ type: :resource_limit,
+ resource: :file_descriptors,
+ node: node()
+ }
+ end
+ def alarm_map({{:resource_limit, resource, node}, _}) do
+ %{
+ type: :resource_limit,
+ resource: resource,
+ node: node
+ }
+ end
+ def alarm_map({:resource_limit, resource, node}) do
+ %{
+ type: :resource_limit,
+ resource: resource,
+ node: node
+ }
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex
new file mode 100644
index 0000000000..e541a632ff
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/ansi.ex
@@ -0,0 +1,35 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.ANSI do
+ def bright(string) do
+ "#{IO.ANSI.bright()}#{string}#{IO.ANSI.reset()}"
+ end
+
+ def red(string) do
+ "#{IO.ANSI.red()}#{string}#{IO.ANSI.reset()}"
+ end
+
+ def yellow(string) do
+ "#{IO.ANSI.yellow()}#{string}#{IO.ANSI.reset()}"
+ end
+
+ def magenta(string) do
+ "#{IO.ANSI.magenta()}#{string}#{IO.ANSI.reset()}"
+ end
+
+ def bright_red(string) do
+ "#{IO.ANSI.bright()}#{IO.ANSI.red()}#{string}#{IO.ANSI.reset()}"
+ end
+
+ def bright_yellow(string) do
+ "#{IO.ANSI.bright()}#{IO.ANSI.yellow()}#{string}#{IO.ANSI.reset()}"
+ end
+
+ def bright_magenta(string) do
+ "#{IO.ANSI.bright()}#{IO.ANSI.magenta()}#{string}#{IO.ANSI.reset()}"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex
new file mode 100644
index 0000000000..32636fac6f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/code_path.ex
@@ -0,0 +1,108 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.CodePath do
+ alias RabbitMQ.CLI.Core.{Config, Paths, Platform}
+
+ def add_plugins_to_load_path(opts) do
+ with {:ok, plugins_dir} <- Paths.plugins_dir(opts) do
+ String.split(to_string(plugins_dir), Platform.path_separator())
+ |> Enum.map(&add_directory_plugins_to_load_path/1)
+
+ :ok
+ end
+ end
+
+ def add_directory_plugins_to_load_path(directory_with_plugins_inside_it) do
+ with {:ok, files} <- File.ls(directory_with_plugins_inside_it) do
+ Enum.map(
+ files,
+ fn filename ->
+ cond do
+ String.ends_with?(filename, [".ez"]) ->
+ Path.join([directory_with_plugins_inside_it, filename])
+ |> String.to_charlist()
+ |> add_archive_code_path()
+
+ File.dir?(filename) ->
+ Path.join([directory_with_plugins_inside_it, filename])
+ |> add_dir_code_path()
+
+ true ->
+ {:error, {:not_a_plugin, filename}}
+ end
+ end
+ )
+ end
+ end
+
+ defp add_archive_code_path(ez_dir) do
+ case :erl_prim_loader.list_dir(ez_dir) do
+ {:ok, [app_dir]} ->
+ app_in_ez = :filename.join(ez_dir, app_dir)
+ add_dir_code_path(app_in_ez)
+
+ _ ->
+ {:error, :no_app_dir}
+ end
+ end
+
+ defp add_dir_code_path(app_dir_0) do
+ app_dir = to_charlist(app_dir_0)
+
+ case :erl_prim_loader.list_dir(app_dir) do
+ {:ok, list} ->
+ case Enum.member?(list, 'ebin') do
+ true ->
+ ebin_dir = :filename.join(app_dir, 'ebin')
+ Code.append_path(ebin_dir)
+
+ false ->
+ {:error, :no_ebin}
+ end
+
+ _ ->
+ {:error, :app_dir_empty}
+ end
+ end
+
+ def require_rabbit_and_plugins(_, opts) do
+ require_rabbit_and_plugins(opts)
+ end
+
+ def require_rabbit_and_plugins(opts) do
+ with :ok <- require_rabbit(opts),
+ :ok <- add_plugins_to_load_path(opts),
+ do: :ok
+ end
+
+ def require_rabbit(_, opts) do
+ require_rabbit(opts)
+ end
+
+ def require_rabbit(opts) do
+ home = Config.get_option(:rabbitmq_home, opts)
+
+ case home do
+ nil ->
+ {:error, {:unable_to_load_rabbit, :rabbitmq_home_is_undefined}}
+
+ _ ->
+ case Application.load(:rabbit) do
+ :ok ->
+ Code.ensure_loaded(:rabbit_plugins)
+ :ok
+
+ {:error, {:already_loaded, :rabbit}} ->
+ Code.ensure_loaded(:rabbit_plugins)
+ :ok
+
+ {:error, err} ->
+ {:error, {:unable_to_load_rabbit, err}}
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex
new file mode 100644
index 0000000000..a1b7fc9237
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/command_modules.ex
@@ -0,0 +1,196 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.CommandModules do
+ alias RabbitMQ.CLI.Core.Config
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginsHelpers
+ alias RabbitMQ.CLI.CommandBehaviour
+
+ import RabbitMQ.CLI.Core.CodePath
+
+ @commands_ns ~r/RabbitMQ.CLI.(.*).Commands/
+
+ def module_map(opts \\ %{}) do
+ Application.get_env(:rabbitmqctl, :commands) || load(opts)
+ end
+
+ def module_map_core(opts \\ %{}) do
+ Application.get_env(:rabbitmqctl, :commands_core) || load_core(opts)
+ end
+
+ def load_core(opts) do
+ scope = script_scope(opts)
+ commands = load_commands_core(scope)
+ Application.put_env(:rabbitmqctl, :commands_core, commands)
+ commands
+ end
+
+ def load(opts) do
+ scope = script_scope(opts)
+ commands = load_commands(scope, opts)
+ Application.put_env(:rabbitmqctl, :commands, commands)
+ commands
+ end
+
+ def script_scope(opts) do
+ scopes = Application.get_env(:rabbitmqctl, :scopes, [])
+ scopes[Config.get_option(:script_name, opts)] || :none
+ end
+
+ def load_commands_core(scope) do
+ make_module_map(ctl_modules(), scope)
+ end
+
+ def load_commands(scope, opts) do
+ make_module_map(plugin_modules(opts) ++ ctl_modules(), scope)
+ end
+
+ def ctl_modules() do
+ Application.spec(:rabbitmqctl, :modules)
+ end
+
+ def plugin_modules(opts) do
+ require_rabbit(opts)
+
+ enabled_plugins =
+ try do
+ PluginsHelpers.read_enabled(opts)
+ catch
+ err ->
+ {:ok, enabled_plugins_file} = PluginsHelpers.enabled_plugins_file(opts)
+ require Logger
+
+ Logger.warn(
+ "Unable to read the enabled plugins file.\n" <>
+ " Reason: #{inspect(err)}\n" <>
+ " Commands provided by plugins will not be available.\n" <>
+ " Please make sure your user has sufficient permissions to read to\n" <>
+ " #{enabled_plugins_file}"
+ )
+
+ []
+ end
+
+ partitioned =
+ Enum.group_by(enabled_plugins, fn app ->
+ case Application.load(app) do
+ :ok -> :loaded
+ {:error, {:already_loaded, ^app}} -> :loaded
+ _ -> :not_found
+ end
+ end)
+
+ loaded = partitioned[:loaded] || []
+ missing = partitioned[:not_found] || []
+ ## If plugins are not in ERL_LIBS, they should be loaded from plugins_dir
+ case missing do
+ [] ->
+ :ok
+
+ _ ->
+ add_plugins_to_load_path(opts)
+ Enum.each(missing, fn app -> Application.load(app) end)
+ end
+
+ Enum.flat_map(loaded ++ missing, fn app ->
+ Application.spec(app, :modules) || []
+ end)
+ end
+
+ defp make_module_map(modules, scope) do
+ commands_ns = Regex.recompile!(@commands_ns)
+
+ modules
+ |> Enum.filter(fn mod ->
+ to_string(mod) =~ commands_ns and
+ module_exists?(mod) and
+ implements_command_behaviour?(mod) and
+ command_in_scope(mod, scope)
+ end)
+ |> Enum.map(&command_tuple/1)
+ |> Map.new()
+ end
+
+ defp module_exists?(nil) do
+ false
+ end
+
+ defp module_exists?(mod) do
+ Code.ensure_loaded?(mod)
+ end
+
+ defp implements_command_behaviour?(nil) do
+ false
+ end
+
+ defp implements_command_behaviour?(module) do
+ Enum.member?(
+ module.module_info(:attributes)[:behaviour] || [],
+ RabbitMQ.CLI.CommandBehaviour
+ )
+ end
+
+ def module_to_command(mod) do
+ mod
+ |> to_string
+ |> strip_namespace
+ |> to_snake_case
+ |> String.replace_suffix("_command", "")
+ end
+
+ defp command_tuple(cmd) do
+ {module_to_command(cmd), cmd}
+ end
+
+ def strip_namespace(str) do
+ str
+ |> String.split(".")
+ |> List.last()
+ end
+
+ def to_snake_case(<<c, str::binary>>) do
+ tail = for <<c <- str>>, into: "", do: snake(c)
+ <<to_lower_char(c), tail::binary>>
+ end
+
+ defp snake(c) do
+ if c >= ?A and c <= ?Z do
+ <<"_", c + 32>>
+ else
+ <<c>>
+ end
+ end
+
+ defp to_lower_char(c) do
+ if c >= ?A and c <= ?Z do
+ c + 32
+ else
+ c
+ end
+ end
+
+ defp command_in_scope(_cmd, :all) do
+ true
+ end
+
+ defp command_in_scope(cmd, scope) do
+ Enum.member?(command_scopes(cmd), scope)
+ end
+
+ defp command_scopes(cmd) do
+ case CommandBehaviour.scopes(cmd) do
+ nil ->
+ Regex.recompile!(@commands_ns)
+ |> Regex.run(to_string(cmd), capture: :all_but_first)
+ |> List.first()
+ |> to_snake_case
+ |> String.to_atom()
+ |> List.wrap()
+ scopes ->
+ scopes
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex
new file mode 100644
index 0000000000..251f9e582f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/config.ex
@@ -0,0 +1,200 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Config do
+ alias RabbitMQ.CLI.{
+ CommandBehaviour,
+ FormatterBehaviour,
+ PrinterBehaviour
+ }
+
+ alias RabbitMQ.CLI.Core.Helpers
+
+ #
+ # Environment
+ #
+
+ def get_option(name, opts \\ %{}) do
+ raw_option =
+ opts[name] ||
+ get_system_option(name, opts) ||
+ default(name)
+
+ normalise(name, raw_option)
+ end
+
+ def output_less?(opts) do
+ Map.get(opts, :silent, false) || Map.get(opts, :quiet, false)
+ end
+
+ def normalise(:node, nil), do: nil
+
+ def normalise(:node, node) when not is_atom(node) do
+ RabbitMQ.CLI.Core.DataCoercion.to_atom(node)
+ end
+
+ def normalise(:erlang_cookie, nil), do: nil
+
+ def normalise(:erlang_cookie, c) when not is_atom(c) do
+ RabbitMQ.CLI.Core.DataCoercion.to_atom(c)
+ end
+
+ def normalise(:longnames, true), do: :longnames
+ def normalise(:longnames, "true"), do: :longnames
+ def normalise(:longnames, 'true'), do: :longnames
+ def normalise(:longnames, "\"true\""), do: :longnames
+ def normalise(:longnames, _val), do: :shortnames
+ def normalise(_, value), do: value
+
+ def get_system_option(:script_name, _) do
+ Path.basename(:escript.script_name())
+ |> Path.rootname()
+ |> String.to_atom()
+ end
+
+ def get_system_option(:aliases_file, _) do
+ System.get_env("RABBITMQ_CLI_ALIASES_FILE")
+ end
+
+ def get_system_option(:erlang_cookie, _) do
+ System.get_env("RABBITMQ_ERLANG_COOKIE")
+ end
+
+ def get_system_option(:node, %{offline: true} = opts) do
+ remote_node =
+ case opts[:node] do
+ nil -> nil
+ val -> Helpers.normalise_node_option(val, opts[:longnames], opts)
+ end
+
+ context = get_env_context(remote_node, true)
+ get_val_from_env_context(context, :node)
+ end
+
+ def get_system_option(:node, opts) do
+ remote_node =
+ case opts[:node] do
+ nil -> nil
+ val -> Helpers.normalise_node_option(val, opts[:longnames], opts)
+ end
+
+ context = get_env_context(remote_node, false)
+ get_val_from_env_context(context, :node)
+ end
+
+ def get_system_option(name, opts) do
+ work_offline = opts[:offline] == true
+
+ remote_node =
+ case name do
+ :longnames -> nil
+ :rabbitmq_home -> nil
+ _ -> node_flag_or_default(opts)
+ end
+
+ context = get_env_context(remote_node, work_offline)
+ val0 = get_val_from_env_context(context, name)
+
+ val =
+ cond do
+ remote_node != nil and
+ val0 == :undefined and
+ (name == :mnesia_dir or name == :feature_flags_file or name == :plugins_dir or
+ name == :enabled_plugins_file) ->
+ context1 = get_env_context(nil, true)
+ get_val_from_env_context(context1, name)
+
+ true ->
+ val0
+ end
+
+ case val do
+ :undefined -> nil
+ _ -> val
+ end
+ end
+
+ def get_env_context(nil, _) do
+ :rabbit_env.get_context()
+ end
+
+ def get_env_context(remote_node, work_offline) do
+ case work_offline do
+ true -> :rabbit_env.get_context(:offline)
+ false -> :rabbit_env.get_context(remote_node)
+ end
+ end
+
+ def get_val_from_env_context(context, name) do
+ case name do
+ :node -> context[:nodename]
+ :longnames -> context[:nodename_type] == :longnames
+ :rabbitmq_home -> context[:rabbitmq_home]
+ :mnesia_dir -> context[:mnesia_dir]
+ :plugins_dir -> context[:plugins_path]
+ :plugins_expand_dir -> context[:plugins_expand_dir]
+ :feature_flags_file -> context[:feature_flags_file]
+ :enabled_plugins_file -> context[:enabled_plugins_file]
+ end
+ end
+
+ def node_flag_or_default(opts) do
+ case opts[:node] do
+ nil ->
+ # Just in case `opts` was not normalized yet (to get the
+ # default node), we do it here as well.
+ case Helpers.normalise_node_option(opts) do
+ {:error, _} -> nil
+ {:ok, normalized_opts} -> normalized_opts[:node]
+ end
+
+ node ->
+ node
+ end
+ end
+
+ def default(:script_name), do: :rabbitmqctl
+ def default(:node), do: :rabbit
+ def default(_), do: nil
+
+ #
+ # Formatters and Printers
+ #
+
+ def get_formatter(command, %{formatter: formatter}) do
+ module_name = FormatterBehaviour.module_name(formatter)
+
+ case Code.ensure_loaded(module_name) do
+ {:module, _} -> module_name
+ {:error, :nofile} -> CommandBehaviour.formatter(command, default_formatter())
+ end
+ end
+
+ def get_formatter(command, _) do
+ CommandBehaviour.formatter(command, default_formatter())
+ end
+
+ def get_printer(command, %{printer: printer}) do
+ module_name = PrinterBehaviour.module_name(printer)
+
+ case Code.ensure_loaded(module_name) do
+ {:module, _} -> module_name
+ {:error, :nofile} -> CommandBehaviour.printer(command, default_printer())
+ end
+ end
+
+ def get_printer(command, _) do
+ CommandBehaviour.printer(command, default_printer())
+ end
+
+ def default_formatter() do
+ RabbitMQ.CLI.Formatters.String
+ end
+
+ def default_printer() do
+ RabbitMQ.CLI.Printers.StdIO
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex
new file mode 100644
index 0000000000..9c3d3e7344
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/data_coercion.ex
@@ -0,0 +1,21 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defprotocol RabbitMQ.CLI.Core.DataCoercion do
+ def to_atom(data)
+end
+
+defimpl RabbitMQ.CLI.Core.DataCoercion, for: Atom do
+ def to_atom(atom), do: atom
+end
+
+defimpl RabbitMQ.CLI.Core.DataCoercion, for: BitString do
+ def to_atom(string), do: String.to_atom(string)
+end
+
+defimpl RabbitMQ.CLI.Core.DataCoercion, for: List do
+ def to_atom(list), do: List.to_atom(list)
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex
new file mode 100644
index 0000000000..403c9dd970
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/distribution.ex
@@ -0,0 +1,138 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Distribution do
+ alias RabbitMQ.CLI.Core.{ANSI, Config, Helpers}
+
+ #
+ # API
+ #
+
+ def start() do
+ start(%{})
+ end
+
+ def start(options) do
+ node_name_type = Config.get_option(:longnames, options)
+ result = start(node_name_type, 10, :undefined)
+ ensure_cookie(options)
+ result
+ end
+
+ def stop, do: Node.stop()
+
+ def start_as(node_name, options) do
+ node_name_type = Config.get_option(:longnames, options)
+ result = start_with_epmd(node_name, node_name_type)
+ ensure_cookie(options)
+ result
+ end
+
+ ## Optimization. We try to start EPMD only if distribution fails
+ def start_with_epmd(node_name, node_name_type) do
+ case Node.start(node_name, node_name_type) do
+ {:ok, _} = ok ->
+ ok
+
+ {:error, {:already_started, _}} = started ->
+ started
+
+ {:error, {{:already_started, _}, _}} = started ->
+ started
+
+ ## EPMD can be stopped. Retry with EPMD
+ {:error, _} ->
+ :rabbit_nodes_common.ensure_epmd()
+ Node.start(node_name, node_name_type)
+ end
+ end
+
+ def per_node_timeout(:infinity, _) do
+ :infinity
+ end
+
+ def per_node_timeout(timeout, node_count) do
+ Kernel.trunc(timeout / node_count)
+ end
+
+ #
+ # Implementation
+ #
+
+ def ensure_cookie(options) do
+ case Config.get_option(:erlang_cookie, options) do
+ nil ->
+ :ok
+
+ cookie ->
+ Node.set_cookie(cookie)
+ maybe_warn_about_deprecated_rabbitmq_erlang_cookie_env_variable(options)
+ :ok
+ end
+ end
+
+ defp start(_opt, 0, last_err) do
+ {:error, last_err}
+ end
+
+ defp start(node_name_type, attempts, _last_err) do
+ candidate = generate_cli_node_name(node_name_type)
+
+ case start_with_epmd(candidate, node_name_type) do
+ {:ok, _} ->
+ :ok
+
+ {:error, {:already_started, pid}} ->
+ {:ok, pid}
+
+ {:error, {{:already_started, pid}, _}} ->
+ {:ok, pid}
+
+ {:error, reason} ->
+ start(node_name_type, attempts - 1, reason)
+ end
+ end
+
+ defp generate_cli_node_name(node_name_type) do
+ case Helpers.get_rabbit_hostname(node_name_type) do
+ {:error, _} = err ->
+ throw(err)
+
+ rmq_hostname ->
+ # This limits the number of possible unique node names used by CLI tools to avoid
+ # the atom table from growing above the node limit. We must use reasonably unique IDs
+ # to allow for concurrent CLI tool execution.
+ #
+ # Enum.random/1 is constant time and space with range arguments https://hexdocs.pm/elixir/Enum.html#random/1.
+ id = Enum.random(1..1024)
+ String.to_atom("rabbitmqcli-#{id}-#{rmq_hostname}")
+ end
+ end
+
+ defp maybe_warn_about_deprecated_rabbitmq_erlang_cookie_env_variable(options) do
+ case System.get_env("RABBITMQ_ERLANG_COOKIE") do
+ nil ->
+ :ok
+
+ _ ->
+ case Config.output_less?(options) do
+ true ->
+ :ok
+
+ false ->
+ warning =
+ ANSI.bright_red(
+ "RABBITMQ_ERLANG_COOKIE env variable support is deprecated and will be REMOVED in a future version. "
+ ) <>
+ ANSI.yellow(
+ "Use the $HOME/.erlang.cookie file or the --erlang-cookie switch instead."
+ )
+
+ IO.puts(warning)
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex
new file mode 100644
index 0000000000..c75dcb0d7c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/doc_guide.ex
@@ -0,0 +1,67 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.DocGuide.Macros do
+ @moduledoc """
+ Helper module that works around a compiler limitation: macros cannot
+ be used in a module that defines them.
+ """
+ @default_domain "rabbitmq.com"
+
+ defmacro defguide(name, opts \\ []) do
+ domain = Keyword.get(opts, :domain, @default_domain)
+ fn_name = String.to_atom(name)
+ path_segment = Keyword.get(opts, :path_segment, String.replace(name, "_", "-"))
+
+ quote do
+ def unquote(fn_name)() do
+ unquote("https://#{domain}/#{path_segment}.html")
+ end
+ end
+ end
+end
+
+defmodule RabbitMQ.CLI.Core.DocGuide do
+ require RabbitMQ.CLI.Core.DocGuide.Macros
+ alias RabbitMQ.CLI.Core.DocGuide.Macros
+
+ #
+ # API
+ #
+
+ Macros.defguide("access_control")
+ Macros.defguide("alarms")
+ Macros.defguide("disk_alarms")
+ Macros.defguide("alternate_exchange", path_segment: "ae")
+ Macros.defguide("channels")
+ Macros.defguide("cli")
+ Macros.defguide("clustering")
+ Macros.defguide("cluster_formation")
+ Macros.defguide("connections")
+ Macros.defguide("configuration", path_segment: "configure")
+ Macros.defguide("consumers")
+ Macros.defguide("definitions")
+ Macros.defguide("erlang_versions", path_segment: "which-erlang")
+ Macros.defguide("feature_flags")
+ Macros.defguide("firehose")
+ Macros.defguide("mirroring", path_segment: "ha")
+ Macros.defguide("logging")
+ Macros.defguide("management")
+ Macros.defguide("memory_use")
+ Macros.defguide("monitoring")
+ Macros.defguide("networking")
+ Macros.defguide("parameters")
+ Macros.defguide("publishers")
+ Macros.defguide("plugins")
+ Macros.defguide("queues")
+ Macros.defguide("quorum_queues", domain: "next.rabbitmq.com")
+ Macros.defguide("stream_queues", domain: "next.rabbitmq.com")
+ Macros.defguide("runtime_tuning", path_segment: "runtime")
+ Macros.defguide("tls", path_segment: "ssl")
+ Macros.defguide("troubleshooting")
+ Macros.defguide("virtual_hosts", path_segments: "vhosts")
+ Macros.defguide("upgrade")
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex
new file mode 100644
index 0000000000..8d2e3aff9e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/erl_eval.ex
@@ -0,0 +1,26 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.ErlEval do
+ def parse_expr(expr) do
+ expr_str = to_charlist(expr)
+
+ case :erl_scan.string(expr_str) do
+ {:ok, scanned, _} ->
+ case :erl_parse.parse_exprs(scanned) do
+ {:ok, parsed} -> {:ok, parsed}
+ {:error, err} -> {:error, format_parse_error(err)}
+ end
+
+ {:error, err, _} ->
+ {:error, format_parse_error(err)}
+ end
+ end
+
+ defp format_parse_error({_line, mod, err}) do
+ to_string(:lists.flatten(mod.format_error(err)))
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex
new file mode 100644
index 0000000000..9e416d7153
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/exit_codes.ex
@@ -0,0 +1,56 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Lists predefined error exit codes used by RabbitMQ CLI tools.
+# The codes are adopted from [1], which (according to our team's research)
+# is possibly the most standardized set of command line tool exit codes there is.
+#
+# 1. https://www.freebsd.org/cgi/man.cgi?query=sysexits&apropos=0&sektion=0&manpath=FreeBSD+12.0-RELEASE&arch=default&format=html
+defmodule RabbitMQ.CLI.Core.ExitCodes do
+ @exit_ok 0
+ @exit_usage 64
+ @exit_dataerr 65
+ @exit_nouser 67
+ @exit_unavailable 69
+ @exit_software 70
+ @exit_tempfail 75
+ @exit_config 78
+
+ @type exit_code :: integer
+
+ def exit_ok, do: @exit_ok
+ def exit_usage, do: @exit_usage
+ def exit_dataerr, do: @exit_dataerr
+ def exit_nouser, do: @exit_nouser
+ def exit_unavailable, do: @exit_unavailable
+ def exit_software, do: @exit_software
+ def exit_tempfail, do: @exit_tempfail
+ def exit_config, do: @exit_config
+
+ def exit_code_for({:validation_failure, :not_enough_args}), do: exit_usage()
+ def exit_code_for({:validation_failure, :too_many_args}), do: exit_usage()
+ def exit_code_for({:validation_failure, {:not_enough_args, _}}), do: exit_usage()
+ def exit_code_for({:validation_failure, {:too_many_args, _}}), do: exit_usage()
+ def exit_code_for({:validation_failure, {:bad_argument, _}}), do: exit_dataerr()
+ def exit_code_for({:validation_failure, :bad_argument}), do: exit_dataerr()
+ def exit_code_for({:validation_failure, :eperm}), do: exit_dataerr()
+ def exit_code_for({:validation_failure, {:bad_option, _}}), do: exit_usage()
+ def exit_code_for({:validation_failure, _}), do: exit_usage()
+ # a special case of bad_argument
+ def exit_code_for({:no_such_vhost, _}), do: exit_dataerr()
+ def exit_code_for({:no_such_user, _}), do: exit_nouser()
+ def exit_code_for({:badrpc_multi, :timeout, _}), do: exit_tempfail()
+ def exit_code_for({:badrpc, :timeout}), do: exit_tempfail()
+ def exit_code_for({:badrpc, {:timeout, _}}), do: exit_tempfail()
+ def exit_code_for({:badrpc, {:timeout, _, _}}), do: exit_tempfail()
+ def exit_code_for(:timeout), do: exit_tempfail()
+ def exit_code_for({:timeout, _}), do: exit_tempfail()
+ def exit_code_for({:badrpc_multi, :nodedown, _}), do: exit_unavailable()
+ def exit_code_for({:badrpc, :nodedown}), do: exit_unavailable()
+ def exit_code_for({:node_name, _}), do: exit_dataerr()
+ def exit_code_for({:incompatible_version, _, _}), do: exit_unavailable()
+ def exit_code_for({:error, _}), do: exit_unavailable()
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex
new file mode 100644
index 0000000000..4b4b8c8d5d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/feature_flags.ex
@@ -0,0 +1,19 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.FeatureFlags do
+
+ #
+ # API
+ #
+
+ def feature_flag_lines(feature_flags) do
+ feature_flags
+ |> Enum.map(fn %{name: name, state: state} ->
+ "Flag: #{name}, state: #{state}"
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex
new file mode 100644
index 0000000000..97bd7c7bd9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/helpers.ex
@@ -0,0 +1,148 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Helpers do
+ alias RabbitMQ.CLI.Core.{Config, NodeName}
+ require Record
+
+ def get_rabbit_hostname(node_name_type \\ :shortnames) do
+ normalise_node(Config.get_option(:node), node_name_type)
+ end
+
+ def normalise_node(nil, node_name_type) do
+ normalise_node(Config.get_option(:node), node_name_type)
+ end
+
+ def normalise_node(name, node_name_type) do
+ case NodeName.create(name, node_name_type) do
+ {:ok, node_name} -> node_name
+ other -> other
+ end
+ end
+
+ # rabbitmq/rabbitmq-cli#278
+ def normalise_node_option(options) do
+ node_opt = Config.get_option(:node, options)
+ longnames_opt = Config.get_option(:longnames, options)
+ case NodeName.create(node_opt, longnames_opt) do
+ {:error, _} = err ->
+ err
+ {:ok, val} ->
+ {:ok, Map.put(options, :node, val)}
+ end
+ end
+
+ def normalise_node_option(nil, _, _) do
+ nil
+ end
+ def normalise_node_option(node_opt, longnames_opt, options) do
+ case NodeName.create(node_opt, longnames_opt) do
+ {:error, _} = err ->
+ err
+ {:ok, val} ->
+ {:ok, Map.put(options, :node, val)}
+ end
+ end
+
+ def case_insensitive_format(%{format: format} = opts) do
+ %{opts | format: String.downcase(format)}
+ end
+ def case_insensitive_format(opts), do: opts
+
+ def nodes_in_cluster(node, timeout \\ :infinity) do
+ with_nodes_in_cluster(node, fn nodes -> nodes end, timeout)
+ end
+
+ def with_nodes_in_cluster(node, fun, timeout \\ :infinity) do
+ case :rpc.call(node, :rabbit_mnesia, :cluster_nodes, [:running], timeout) do
+ {:badrpc, _} = err -> err
+ value -> fun.(value)
+ end
+ end
+
+ def node_running?(node) do
+ :net_adm.ping(node) == :pong
+ end
+
+ # Convert function to stream
+ def defer(fun) do
+ Stream.iterate(:ok, fn _ -> fun.() end)
+ |> Stream.drop(1)
+ |> Stream.take(1)
+ end
+
+ # Streamify a function sequence passing result
+ # Execution can be terminated by an error {:error, _}.
+ # The error will be the last element in the stream.
+ # Functions can return {:ok, val}, so val will be passed
+ # to then next function, or {:ok, val, output} where
+ # val will be passed and output will be put into the stream
+ def stream_until_error_parameterised(funs, init) do
+ Stream.transform(funs, {:just, init}, fn
+ f, {:just, val} ->
+ case f.(val) do
+ {:error, _} = err -> {[err], :nothing}
+ :ok -> {[], {:just, val}}
+ {:ok, new_val} -> {[], {:just, new_val}}
+ {:ok, new_val, out} -> {[out], {:just, new_val}}
+ end
+
+ _, :nothing ->
+ {:halt, :nothing}
+ end)
+ end
+
+ # Streamify function sequence.
+ # Execution can be terminated by an error {:error, _}.
+ # The error will be the last element in the stream.
+ def stream_until_error(funs) do
+ stream_until_error_parameterised(
+ Enum.map(
+ funs,
+ fn fun ->
+ fn :no_param ->
+ case fun.() do
+ {:error, _} = err -> err
+ other -> {:ok, :no_param, other}
+ end
+ end
+ end
+ ),
+ :no_param
+ )
+ end
+
+ def apply_if_exported(mod, fun, args, default) do
+ Code.ensure_loaded(mod)
+ case function_exported?(mod, fun, length(args)) do
+ true -> apply(mod, fun, args)
+ false -> default
+ end
+ end
+
+ def cli_acting_user, do: "rmq-cli"
+
+ def string_or_inspect(val) do
+ case String.Chars.impl_for(val) do
+ nil ->
+ inspect(val)
+
+ _ ->
+ try do
+ to_string(val)
+ catch
+ _, _ -> inspect(val)
+ end
+ end
+ end
+
+ def evaluate_input_as_term(input) do
+ {:ok, tokens, _end_line} = :erl_scan.string(to_charlist(input <> "."))
+ {:ok, abs_form} = :erl_parse.parse_exprs(tokens)
+ {:value, term_value, _bs} = :erl_eval.exprs(abs_form, :erl_eval.new_bindings())
+ term_value
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex
new file mode 100644
index 0000000000..5e1328be29
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/input.ex
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Input do
+ alias RabbitMQ.CLI.Core.Config
+
+ def consume_single_line_string_with_prompt(prompt, opts) do
+ val = case Config.output_less?(opts) do
+ true ->
+ IO.read(:stdio, :line)
+ false ->
+ IO.puts(prompt)
+ IO.read(:stdio, :line)
+ end
+
+ case val do
+ :eof -> :eof
+ "" -> :eof
+ s -> String.trim(s)
+ end
+ end
+
+ def consume_multiline_string() do
+ val = IO.read(:stdio, :all)
+
+ case val do
+ :eof -> :eof
+ "" -> :eof
+ s -> String.trim(s)
+ end
+ end
+
+ def infer_password(prompt, opts) do
+ consume_single_line_string_with_prompt(prompt, opts)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex
new file mode 100644
index 0000000000..0bc162186c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/listeners.ex
@@ -0,0 +1,312 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Listeners do
+ import Record, only: [defrecord: 3, extract: 2]
+ import RabbitCommon.Records
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ #
+ # API
+ #
+
+ defrecord :certificate, :Certificate, extract(:Certificate, from_lib: "public_key/include/public_key.hrl")
+ defrecord :tbscertificate, :TBSCertificate, extract(:TBSCertificate, from_lib: "public_key/include/public_key.hrl")
+ defrecord :validity, :Validity, extract(:Validity, from_lib: "public_key/include/public_key.hrl")
+
+ def listeners_on(listeners, target_node) do
+ Enum.filter(listeners, fn listener(node: node) ->
+ node == target_node
+ end)
+ end
+
+ def listeners_with_certificates(listeners) do
+ Enum.filter(listeners, fn listener(opts: opts) ->
+ Keyword.has_key?(opts, :cacertfile) or Keyword.has_key?(opts, :certfile)
+ end)
+ end
+
+ def listener_lines(listeners) do
+ listeners
+ |> listener_maps
+ |> Enum.map(fn %{interface: interface, port: port, protocol: protocol} ->
+ "Interface: #{interface}, port: #{port}, protocol: #{protocol}, purpose: #{
+ protocol_label(to_atom(protocol))
+ }"
+ end)
+ end
+ def listener_lines(listeners, node) do
+ listeners
+ |> listener_maps
+ |> Enum.map(fn %{interface: interface, port: port, protocol: protocol} ->
+ "Node: #{node}, interface: #{interface}, port: #{port}, protocol: #{protocol}, purpose: #{
+ protocol_label(to_atom(protocol))
+ }"
+ end)
+ end
+
+ def listener_map(listener) when is_map(listener) do
+ listener
+ end
+ def listener_map(listener) do
+ # Listener options are left out intentionally: they can contain deeply nested values
+ # that are impossible to serialise to JSON.
+ #
+ # Management plugin/HTTP API had its fair share of bugs because of that
+ # and now filters out a lot of options. Raw listener data can be seen in
+ # rabbitmq-diagnostics status.
+ listener(node: node, protocol: protocol, ip_address: interface, port: port) = listener
+
+ %{
+ node: node,
+ protocol: protocol,
+ interface: :inet.ntoa(interface) |> to_string |> maybe_enquote_interface,
+ port: port,
+ purpose: protocol_label(to_atom(protocol))
+ }
+ end
+
+ def listener_maps(listeners) do
+ Enum.map(listeners, &listener_map/1)
+ end
+
+ def listener_certs(listener) do
+ listener(node: node, protocol: protocol, ip_address: interface, port: port, opts: opts) = listener
+
+ %{
+ node: node,
+ protocol: protocol,
+ interface: :inet.ntoa(interface) |> to_string |> maybe_enquote_interface,
+ port: port,
+ purpose: protocol_label(to_atom(protocol)),
+ certfile: read_cert(Keyword.get(opts, :certfile)),
+ cacertfile: read_cert(Keyword.get(opts, :cacertfile))
+ }
+ end
+
+ def read_cert(nil) do
+ nil
+ end
+ def read_cert({:pem, pem}) do
+ pem
+ end
+ def read_cert(path) do
+ case File.read(path) do
+ {:ok, bin} ->
+ bin
+ {:error, _} = err ->
+ err
+ end
+ end
+
+ def listener_expiring_within(listener, seconds) do
+ listener(node: node, protocol: protocol, ip_address: interface, port: port, opts: opts) = listener
+ certfile = Keyword.get(opts, :certfile)
+ cacertfile = Keyword.get(opts, :cacertfile)
+ now = :calendar.datetime_to_gregorian_seconds(:calendar.universal_time())
+ expiry_date = now + seconds
+ certfile_expires_on = expired(cert_validity(read_cert(certfile)), expiry_date)
+ cacertfile_expires_on = expired(cert_validity(read_cert(cacertfile)), expiry_date)
+ case {certfile_expires_on, cacertfile_expires_on} do
+ {[], []} ->
+ false
+ _ ->
+ %{
+ node: node,
+ protocol: protocol,
+ interface: interface,
+ port: port,
+ certfile: certfile,
+ cacertfile: cacertfile,
+ certfile_expires_on: certfile_expires_on,
+ cacertfile_expires_on: cacertfile_expires_on
+ }
+ end
+ end
+
+ def expired_listener_map(%{node: node, protocol: protocol, interface: interface, port: port, certfile_expires_on: certfile_expires_on, cacertfile_expires_on: cacertfile_expires_on, certfile: certfile, cacertfile: cacertfile}) do
+ %{
+ node: node,
+ protocol: protocol,
+ interface: :inet.ntoa(interface) |> to_string |> maybe_enquote_interface,
+ port: port,
+ purpose: protocol_label(to_atom(protocol)),
+ certfile: certfile |> to_string,
+ cacertfile: cacertfile |> to_string,
+ certfile_expires_on: expires_on_list(certfile_expires_on),
+ cacertfile_expires_on: expires_on_list(cacertfile_expires_on)
+ }
+ end
+
+ def expires_on_list({:error, _} = error) do
+ [error]
+ end
+ def expires_on_list(expires) do
+ Enum.map(expires, &expires_on/1)
+ end
+
+ def expires_on({:error, _} = error) do
+ error
+ end
+ def expires_on(seconds) do
+ {:ok, naive} = NaiveDateTime.from_erl(:calendar.gregorian_seconds_to_datetime(seconds))
+ NaiveDateTime.to_string(naive)
+ end
+
+ def expired(nil, _) do
+ []
+ end
+ def expired({:error, _} = error, _) do
+ error
+ end
+ def expired(expires, expiry_date) do
+ Enum.filter(expires, fn ({:error, _}) -> true
+ (seconds) -> seconds < expiry_date end)
+ end
+
+ def cert_validity(nil) do
+ nil
+ end
+ def cert_validity(cert) do
+ dsa_entries = :public_key.pem_decode(cert)
+ case dsa_entries do
+ [] ->
+ {:error, "The certificate file provided does not contain any PEM entry."}
+ _ ->
+ now = :calendar.datetime_to_gregorian_seconds(:calendar.universal_time())
+ Enum.map(dsa_entries, fn ({:Certificate, _, _} = dsa_entry) ->
+ certificate(tbsCertificate: tbs_certificate) = :public_key.pem_entry_decode(dsa_entry)
+ tbscertificate(validity: validity) = tbs_certificate
+ validity(notAfter: not_after, notBefore: not_before) = validity
+ start = :pubkey_cert.time_str_2_gregorian_sec(not_before)
+ case start > now do
+ true ->
+ {:ok, naive} = NaiveDateTime.from_erl(:calendar.gregorian_seconds_to_datetime(start))
+ startdate = NaiveDateTime.to_string(naive)
+ {:error, "Certificate is not yet valid. It starts on #{startdate}"}
+ false ->
+ :pubkey_cert.time_str_2_gregorian_sec(not_after)
+ end
+ ({type, _, _}) ->
+ {:error, "The certificate file provided contains a #{type} entry."}
+ end)
+ end
+ end
+
+ def listener_rows(listeners) do
+ for listener(node: node, protocol: protocol, ip_address: interface, port: port) <- listeners do
+ # Listener options are left out intentionally, see above
+ [
+ node: node,
+ protocol: protocol,
+ interface: :inet.ntoa(interface) |> to_string |> maybe_enquote_interface,
+ port: port,
+ purpose: protocol_label(to_atom(protocol))
+ ]
+ end
+ end
+
+ def protocol_label(:amqp), do: "AMQP 0-9-1 and AMQP 1.0"
+ def protocol_label(:'amqp/ssl'), do: "AMQP 0-9-1 and AMQP 1.0 over TLS"
+ def protocol_label(:mqtt), do: "MQTT"
+ def protocol_label(:'mqtt/ssl'), do: "MQTT over TLS"
+ def protocol_label(:stomp), do: "STOMP"
+ def protocol_label(:'stomp/ssl'), do: "STOMP over TLS"
+ def protocol_label(:http), do: "HTTP API"
+ def protocol_label(:https), do: "HTTP API over TLS (HTTPS)"
+ def protocol_label(:"http/web-mqtt"), do: "MQTT over WebSockets"
+ def protocol_label(:"https/web-mqtt"), do: "MQTT over WebSockets and TLS (HTTPS)"
+ def protocol_label(:"http/web-stomp"), do: "STOMP over WebSockets"
+ def protocol_label(:"https/web-stomp"), do: "STOMP over WebSockets and TLS (HTTPS)"
+ def protocol_label(:"http/prometheus"), do: "Prometheus exporter API over HTTP"
+ def protocol_label(:"https/prometheus"), do: "Prometheus exporter API over TLS (HTTPS)"
+ def protocol_label(:clustering), do: "inter-node and CLI tool communication"
+ def protocol_label(other), do: to_string(other)
+
+ def normalize_protocol(proto) do
+ val = proto |> to_string |> String.downcase()
+
+ case val do
+ "amqp091" -> "amqp"
+ "amqp0.9.1" -> "amqp"
+ "amqp0-9-1" -> "amqp"
+ "amqp0_9_1" -> "amqp"
+ "amqp10" -> "amqp"
+ "amqp1.0" -> "amqp"
+ "amqp1-0" -> "amqp"
+ "amqp1_0" -> "amqp"
+ "amqps" -> "amqp/ssl"
+ "mqtt3.1" -> "mqtt"
+ "mqtt3.1.1" -> "mqtt"
+ "mqtt31" -> "mqtt"
+ "mqtt311" -> "mqtt"
+ "mqtt3_1" -> "mqtt"
+ "mqtt3_1_1" -> "mqtt"
+ "mqtts" -> "mqtt/ssl"
+ "mqtt+tls" -> "mqtt/ssl"
+ "mqtt+ssl" -> "mqtt/ssl"
+ "stomp1.0" -> "stomp"
+ "stomp1.1" -> "stomp"
+ "stomp1.2" -> "stomp"
+ "stomp10" -> "stomp"
+ "stomp11" -> "stomp"
+ "stomp12" -> "stomp"
+ "stomp1_0" -> "stomp"
+ "stomp1_1" -> "stomp"
+ "stomp1_2" -> "stomp"
+ "stomps" -> "stomp/ssl"
+ "stomp+tls" -> "stomp/ssl"
+ "stomp+ssl" -> "stomp/ssl"
+ "https" -> "https"
+ "http1" -> "http"
+ "http1.1" -> "http"
+ "http_api" -> "http"
+ "management" -> "http"
+ "management_ui" -> "http"
+ "ui" -> "http"
+ "cli" -> "clustering"
+ "distribution" -> "clustering"
+ "webmqtt" -> "http/web-mqtt"
+ "web-mqtt" -> "http/web-mqtt"
+ "web_mqtt" -> "http/web-mqtt"
+ "webmqtt/tls" -> "https/web-mqtt"
+ "web-mqtt/tls" -> "https/web-mqtt"
+ "webmqtt/ssl" -> "https/web-mqtt"
+ "web-mqtt/ssl" -> "https/web-mqtt"
+ "webmqtt+tls" -> "https/web-mqtt"
+ "web-mqtt+tls" -> "https/web-mqtt"
+ "webmqtt+ssl" -> "https/web-mqtt"
+ "web-mqtt+ssl" -> "https/web-mqtt"
+ "webstomp" -> "http/web-stomp"
+ "web-stomp" -> "http/web-stomp"
+ "web_stomp" -> "http/web-stomp"
+ "webstomp/tls" -> "https/web-stomp"
+ "web-stomp/tls" -> "https/web-stomp"
+ "webstomp/ssl" -> "https/web-stomp"
+ "web-stomp/ssl" -> "https/web-stomp"
+ "webstomp+tls" -> "https/web-stomp"
+ "web-stomp+tls" -> "https/web-stomp"
+ "webstomp+ssl" -> "https/web-stomp"
+ "web-stomp+ssl" -> "https/web-stomp"
+ _ -> val
+ end
+ end
+
+ #
+ # Implementation
+ #
+
+ defp maybe_enquote_interface(value) do
+ # This simplistic way of distinguishing IPv6 addresses,
+ # networks address ranges, etc actually works better
+ # for the kind of values we can get here than :inet functions. MK.
+ regex = Regex.recompile!(~r/:/)
+ case value =~ regex do
+ true -> "[#{value}]"
+ false -> value
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex
new file mode 100644
index 0000000000..b6d104bff0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/log_files.ex
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.LogFiles do
+ @spec get_log_locations(atom, integer | :infinity) :: [String.t] | {:badrpc, term}
+ def get_log_locations(node_name, timeout) do
+ case :rabbit_misc.rpc_call(node_name,
+ :rabbit_lager, :log_locations, [],
+ timeout) do
+ {:badrpc, _} = error -> error;
+ list -> Enum.map(list, &to_string/1)
+ end
+ end
+
+ @spec get_default_log_location(atom, integer | :infinity) ::
+ {:ok, String.t} | {:badrpc, term} | {:error, term}
+ def get_default_log_location(node_name, timeout) do
+ case get_log_locations(node_name, timeout) do
+ {:badrpc, _} = error -> error;
+ [] -> {:error, "No log files configured on the node"};
+ [first_log | _] = log_locations ->
+ case get_log_config_file_location(node_name, timeout) do
+ {:badrpc, _} = error -> error;
+ nil -> {:ok, first_log};
+ location ->
+ case Enum.member?(log_locations, location) do
+ true -> {:ok, to_string(location)};
+ ## Configured location was not propagated to lager?
+ false -> {:ok, first_log}
+ end
+ end
+ end
+ end
+
+ defp get_log_config_file_location(node_name, timeout) do
+ case :rabbit_misc.rpc_call(node_name,
+ :application, :get_env, [:rabbit, :log, :none],
+ timeout) do
+ {:badrpc, _} = error -> error;
+ :none -> nil;
+ log_config ->
+ case log_config[:file] do
+ nil -> nil;
+ file_config ->
+ file_config[:file]
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex
new file mode 100644
index 0000000000..92db5b5502
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/memory.ex
@@ -0,0 +1,105 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Memory do
+ alias RabbitMQ.CLI.InformationUnit, as: IU
+
+ def memory_units do
+ ["k", "kiB", "M", "MiB", "G", "GiB", "kB", "MB", "GB", ""]
+ end
+
+ def memory_unit_absolute(num, unit) when is_number(num) and num < 0,
+ do: {:bad_argument, [num, unit]}
+
+ def memory_unit_absolute(num, "k") when is_number(num), do: power_as_int(num, 2, 10)
+ def memory_unit_absolute(num, "kiB") when is_number(num), do: power_as_int(num, 2, 10)
+ def memory_unit_absolute(num, "M") when is_number(num), do: power_as_int(num, 2, 20)
+ def memory_unit_absolute(num, "MiB") when is_number(num), do: power_as_int(num, 2, 20)
+ def memory_unit_absolute(num, "G") when is_number(num), do: power_as_int(num, 2, 30)
+ def memory_unit_absolute(num, "GiB") when is_number(num), do: power_as_int(num, 2, 30)
+ def memory_unit_absolute(num, "kB") when is_number(num), do: power_as_int(num, 10, 3)
+ def memory_unit_absolute(num, "MB") when is_number(num), do: power_as_int(num, 10, 6)
+ def memory_unit_absolute(num, "GB") when is_number(num), do: power_as_int(num, 10, 9)
+ def memory_unit_absolute(num, "") when is_number(num), do: num
+ def memory_unit_absolute(num, unit) when is_number(num), do: {:bad_argument, [unit]}
+ def memory_unit_absolute(num, unit), do: {:bad_argument, [num, unit]}
+
+ def power_as_int(num, x, y), do: round(num * :math.pow(x, y))
+
+ def compute_relative_values(all_pairs) when is_map(all_pairs) do
+ compute_relative_values(Enum.into(all_pairs, []))
+ end
+ def compute_relative_values(all_pairs) do
+ num_pairs = Keyword.delete(all_pairs, :strategy)
+ # Includes RSS, allocated and runtime-used ("erlang") values.
+ # See https://github.com/rabbitmq/rabbitmq-server/pull/1404.
+ totals = Keyword.get(num_pairs, :total)
+ pairs = Keyword.delete(num_pairs, :total)
+ # Should not be necessary but be more defensive.
+ total =
+ max_of(totals) ||
+ Keyword.get(totals, :rss) ||
+ Keyword.get(totals, :allocated) ||
+ Keyword.get(totals, :erlang)
+
+ pairs
+ |> Enum.map(fn {k, v} ->
+ pg = (v / total) |> fraction_to_percent()
+ {k, %{bytes: v, percentage: pg}}
+ end)
+ |> Enum.sort_by(fn {_key, %{bytes: bytes}} -> bytes end, &>=/2)
+ end
+
+ def formatted_watermark(val) when is_float(val) do
+ %{relative: val}
+ end
+ def formatted_watermark({:absolute, val}) do
+ %{absolute: parse_watermark(val)}
+ end
+ def formatted_watermark(val) when is_integer(val) do
+ %{absolute: parse_watermark(val)}
+ end
+ def formatted_watermark(val) when is_bitstring(val) do
+ %{absolute: parse_watermark(val)}
+ end
+ def formatted_watermark(val) when is_list(val) do
+ %{absolute: parse_watermark(val)}
+ end
+
+ def parse_watermark({:absolute, n}) do
+ case IU.parse(n) do
+ {:ok, parsed} -> parsed
+ err -> err
+ end
+ end
+ def parse_watermark(n) when is_bitstring(n) do
+ case IU.parse(n) do
+ {:ok, parsed} -> parsed
+ err -> err
+ end
+ end
+ def parse_watermark(n) when is_list(n) do
+ case IU.parse(n) do
+ {:ok, parsed} -> parsed
+ err -> err
+ end
+ end
+ def parse_watermark(n) when is_float(n) or is_integer(n) do
+ n
+ end
+
+ #
+ # Implementation
+ #
+
+ defp fraction_to_percent(x) do
+ Float.round(x * 100, 2)
+ end
+
+ defp max_of(m) do
+ Keyword.values(m) |> Enum.max()
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex
new file mode 100644
index 0000000000..94b1b768b6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_default_virtual_host.ex
@@ -0,0 +1,15 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.MergesDefaultVirtualHost do
+ defmacro __using__(_) do
+ quote do
+ def merge_defaults(args, opts), do: {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex
new file mode 100644
index 0000000000..0ee6f3f05a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/merges_no_defaults.ex
@@ -0,0 +1,15 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.MergesNoDefaults do
+ defmacro __using__(_) do
+ quote do
+ def merge_defaults(args, opts), do: {args, opts}
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex
new file mode 100644
index 0000000000..12d99df7c1
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/networking.ex
@@ -0,0 +1,73 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Networking do
+ @type address_family() :: :inet | :inet6
+
+ @spec address_family(String.t() | atom() | charlist() | binary()) :: address_family()
+ def address_family(value) do
+ val = RabbitMQ.CLI.Core.DataCoercion.to_atom(value)
+ case val do
+ :inet -> :inet
+ :inet4 -> :inet
+ :inet6 -> :inet6
+ :ipv4 -> :inet
+ :ipv6 -> :inet6
+ :IPv4 -> :inet
+ :IPv6 -> :inet6
+ end
+ end
+
+ @spec address_family(String.t() | atom()) :: boolean()
+ def valid_address_family?(value) when is_atom(value) do
+ valid_address_family?(to_string(value))
+ end
+ def valid_address_family?("inet"), do: true
+ def valid_address_family?("inet4"), do: true
+ def valid_address_family?("inet6"), do: true
+ def valid_address_family?("ipv4"), do: true
+ def valid_address_family?("ipv6"), do: true
+ def valid_address_family?("IPv4"), do: true
+ def valid_address_family?("IPv6"), do: true
+ def valid_address_family?(_other), do: false
+
+ @spec format_address(:inet.ip_address()) :: String.t()
+ def format_address(addr) do
+ to_string(:inet.ntoa(addr))
+ end
+
+ @spec format_addresses([:inet.ip_address()]) :: [String.t()]
+ def format_addresses(addrs) do
+ Enum.map(addrs, &format_address/1)
+ end
+
+ @spec inetrc_map(nonempty_list()) :: map()
+ def inetrc_map(list) do
+ Enum.reduce(list, %{},
+ fn hosts, acc when is_list(hosts) ->
+ Map.put(acc, "hosts", host_resolution_map(hosts))
+ {k, v}, acc when k == :domain or k == :resolv_conf or k == :hosts_file ->
+ Map.put(acc, to_string(k), to_string(v))
+ {k, v}, acc when is_list(v) when k == :search or k == :lookup ->
+ Map.put(acc, to_string(k), Enum.join(Enum.map(v, &to_string/1), ", "))
+ {k, v}, acc when is_integer(v) ->
+ Map.put(acc, to_string(k), v)
+ {k, v, v2}, acc when is_tuple(v) when k == :nameserver or k == :nameservers or k == :alt_nameserver ->
+ Map.put(acc, to_string(k), "#{:inet.ntoa(v)}:#{v2}")
+ {k, v}, acc when is_tuple(v) when k == :nameserver or k == :nameservers or k == :alt_nameserver ->
+ Map.put(acc, to_string(k), to_string(:inet.ntoa(v)))
+ {k, v}, acc ->
+ Map.put(acc, to_string(k), to_string(v))
+ end)
+ end
+
+ def host_resolution_map(hosts) do
+ Enum.reduce(hosts, %{},
+ fn {:host, address, hosts}, acc ->
+ Map.put(acc, to_string(:inet.ntoa(address)), Enum.map(hosts, &to_string/1))
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex
new file mode 100644
index 0000000000..c39b215ca7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/node_name.ex
@@ -0,0 +1,198 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.NodeName do
+ alias RabbitMQ.CLI.Core.Config
+
+ @moduledoc """
+ Provides functions for correctly constructing node names given a node type and optional base name.
+ """
+
+ @doc """
+ Constructs complete node name based on :longnames or :shortnames and
+ a base name, in the same manner as Erlang/OTP lib/kernel/src/net_kernel.erl
+ """
+ def create(base, type) do
+ create_name(base, type, 1)
+ end
+
+ @doc """
+ Get local hostname
+ """
+ def hostname, do: :inet_db.gethostname() |> List.to_string()
+
+ @doc """
+ Get hostname part of current node name
+ """
+ def hostname_from_node do
+ [_, hostname] = split_node(Node.self())
+ hostname
+ end
+
+ @doc """
+ Get hostname part of given node name
+ """
+ def hostname_from_node(name) do
+ [_, hostname] = split_node(name)
+ hostname
+ end
+
+ def split_node(name) when is_atom(name) do
+ split_node(to_string(name))
+ end
+
+ def split_node(name) do
+ case String.split(name, "@", parts: 2) do
+ ["", host] ->
+ default_name = to_string(Config.default(:node))
+ [default_name, host]
+
+ [_head, _host] = rslt ->
+ rslt
+
+ [head] ->
+ [head, ""]
+ end
+ end
+
+ @doc """
+ Get local domain. If unavailable, makes a good guess. We're using
+ :inet_db here because that's what Erlang/OTP uses when it creates a node
+ name:
+ https://github.com/erlang/otp/blob/8ca061c3006ad69c2a8d1c835d0d678438966dfc/lib/kernel/src/net_kernel.erl#L1363-L1445
+ """
+ def domain do
+ domain(1)
+ end
+
+ #
+ # Implementation
+ #
+
+ defp domain(attempt) do
+ case {attempt, :inet_db.res_option(:domain), :os.type()} do
+ {1, [], _} ->
+ do_load_resolv()
+ domain(0)
+
+ {0, [], {:unix, :darwin}} ->
+ "local"
+
+ {0, [], _} ->
+ "localdomain"
+
+ {_, domain, _} ->
+ List.to_string(domain)
+ end
+ end
+
+ defp create_name(name, long_or_short_names, attempt) do
+ {head, host1} = create_hostpart(name, long_or_short_names)
+
+ case host1 do
+ {:ok, host_part} ->
+ case valid_name_head(head) do
+ true ->
+ {:ok, String.to_atom(head <> "@" <> host_part)}
+
+ false ->
+ {:error, {:node_name, :invalid_node_name_head}}
+ end
+
+ {:error, :long} when attempt == 1 ->
+ do_load_resolv()
+ create_name(name, long_or_short_names, 0)
+
+ {:error, :long} when attempt == 0 ->
+ case valid_name_head(head) do
+ true ->
+ {:ok, String.to_atom(head <> "@" <> hostname() <> "." <> domain())}
+
+ false ->
+ {:error, {:node_name, :invalid_node_name_head}}
+ end
+
+ {:error, :hostname_not_allowed} ->
+ {:error, {:node_name, :hostname_not_allowed}}
+
+ {:error, err_type} ->
+ {:error, {:node_name, err_type}}
+ end
+ end
+
+ defp create_hostpart(name, long_or_short_names) do
+ [head, host] = split_node(name)
+
+ host1 =
+ case {host, long_or_short_names} do
+ {"", :shortnames} ->
+ case :inet_db.gethostname() do
+ inet_db_host when inet_db_host != [] ->
+ {:ok, to_string(inet_db_host)}
+
+ _ ->
+ {:error, :short}
+ end
+
+ {"", :longnames} ->
+ case {:inet_db.gethostname(), :inet_db.res_option(:domain)} do
+ {inet_db_host, inet_db_domain}
+ when inet_db_host != [] and inet_db_domain != [] ->
+ {:ok, to_string(inet_db_host) <> "." <> to_string(inet_db_domain)}
+
+ _ ->
+ {:error, :long}
+ end
+
+ {_, type} ->
+ validate_hostname(host, type)
+ end
+
+ {head, host1}
+ end
+
+ defp validate_hostname(host, :longnames) do
+ case String.contains?(host, ".") do
+ true ->
+ validate_hostname_rx(host)
+
+ _ ->
+ validate_hostname(host <> "." <> domain(), :longnames)
+ end
+ end
+
+ defp validate_hostname(host, :shortnames) do
+ case String.contains?(host, ".") do
+ true ->
+ {:error, :short}
+
+ _ ->
+ validate_hostname_rx(host)
+ end
+ end
+
+ defp validate_hostname_rx(host) do
+ rx = Regex.compile!("^[!-ÿ]*$", [:unicode])
+
+ case Regex.match?(rx, host) do
+ true ->
+ {:ok, host}
+
+ false ->
+ {:error, :hostname_not_allowed}
+ end
+ end
+
+ defp valid_name_head(head) do
+ rx = Regex.compile!("^[0-9A-Za-z_\\-]+$", [:unicode])
+ Regex.match?(rx, head)
+ end
+
+ defp do_load_resolv do
+ # It could be we haven't read domain name from resolv file yet
+ :ok = :inet_config.do_load_resolv(:os.type(), :longnames)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex
new file mode 100644
index 0000000000..0b53d59748
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/os_pid.ex
@@ -0,0 +1,56 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.OsPid do
+ @external_process_check_interval 1000
+
+ @pid_regex ~r/^\s*(?<pid>\d+)/
+
+ #
+ # API
+ #
+
+ def wait_for_os_process_death(pid) do
+ case :rabbit_misc.is_os_process_alive(pid) do
+ true ->
+ :timer.sleep(@external_process_check_interval)
+ wait_for_os_process_death(pid)
+
+ false ->
+ :ok
+ end
+ end
+
+ def read_pid_from_file(pidfile_path, should_wait) do
+ case {:file.read_file(pidfile_path), should_wait} do
+ {{:ok, contents}, _} ->
+ pid_regex = Regex.recompile!(@pid_regex)
+
+ case Regex.named_captures(pid_regex, contents)["pid"] do
+ # e.g. the file is empty
+ nil ->
+ {:error, :could_not_read_pid_from_file, {:contents, contents}}
+
+ pid_string ->
+ try do
+ {pid, _remainder} = Integer.parse(pid_string)
+ pid
+ rescue
+ _e in ArgumentError ->
+ {:error, {:could_not_read_pid_from_file, {:contents, contents}}}
+ end
+ end
+
+ # file does not exist, wait and re-check
+ {{:error, :enoent}, true} ->
+ :timer.sleep(@external_process_check_interval)
+ read_pid_from_file(pidfile_path, should_wait)
+
+ {{:error, details}, _} ->
+ {:error, :could_not_read_pid_from_file, details}
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex
new file mode 100644
index 0000000000..1b2436cba4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/output.ex
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Output do
+ def format_output(:ok, _, _) do
+ :ok
+ end
+
+ # the command intends to produce no output
+ def format_output({:ok, nil}, _, _) do
+ :ok
+ end
+
+ def format_output({:ok, :check_passed}, _, _) do
+ :ok
+ end
+
+ def format_output({:ok, output}, formatter, options) do
+ {:ok, formatter.format_output(output, options)}
+ end
+
+ def format_output({:stream, stream}, formatter, options) do
+ {:stream, formatter.format_stream(stream, options)}
+ end
+
+ def print_output(output, printer, options) do
+ {:ok, printer_state} = printer.init(options)
+ exit_code = print_output_0(output, printer, printer_state)
+ printer.finish(printer_state)
+ exit_code
+ end
+
+ def print_output_0(:ok, printer, printer_state) do
+ printer.print_ok(printer_state)
+ :ok
+ end
+
+ # the command intends to produce no output
+ def print_output_0({:ok, nil}, _printer, _printer_state) do
+ :ok
+ end
+
+ def print_output_0({:ok, :check_passed}, _printer, _printer_state) do
+ :ok
+ end
+
+ def print_output_0({:ok, single_value}, printer, printer_state) do
+ printer.print_output(single_value, printer_state)
+ :ok
+ end
+
+ def print_output_0({:stream, stream}, printer, printer_state) do
+ case print_output_stream(stream, printer, printer_state) do
+ :ok -> :ok
+ {:error, _} = err -> err
+ end
+ end
+
+ def print_output_stream(stream, printer, printer_state) do
+ Enum.reduce_while(stream, :ok, fn
+ {:error, err}, _ ->
+ {:halt, {:error, err}}
+
+ val, _ ->
+ printer.print_output(val, printer_state)
+ {:cont, :ok}
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex
new file mode 100644
index 0000000000..28c4df2aa4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/parser.ex
@@ -0,0 +1,311 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Parser do
+ alias RabbitMQ.CLI.{CommandBehaviour, FormatterBehaviour}
+ alias RabbitMQ.CLI.Core.{CommandModules, Config}
+
+ def default_switches() do
+ [
+ node: :atom,
+ quiet: :boolean,
+ silent: :boolean,
+ dry_run: :boolean,
+ vhost: :string,
+ # for backwards compatibility,
+ # not all commands support timeouts
+ timeout: :integer,
+ longnames: :boolean,
+ formatter: :string,
+ printer: :string,
+ file: :string,
+ script_name: :atom,
+ rabbitmq_home: :string,
+ mnesia_dir: :string,
+ plugins_dir: :string,
+ enabled_plugins_file: :string,
+ aliases_file: :string,
+ erlang_cookie: :atom,
+ help: :boolean,
+ print_stacktrace: :boolean
+ ]
+ end
+
+ def default_aliases() do
+ [
+ p: :vhost,
+ n: :node,
+ q: :quiet,
+ s: :silent,
+ l: :longnames,
+ # for backwards compatibility,
+ # not all commands support timeouts
+ t: :timeout,
+ "?": :help
+ ]
+ end
+
+ @spec parse([String.t()]) ::
+ {command :: :no_command | atom() | {:suggest, String.t()}, command_name :: String.t(),
+ arguments :: [String.t()], options :: map(),
+ invalid :: [{String.t(), String.t() | nil}]}
+
+ def parse(input) do
+ {parsed_args, options, invalid} = parse_global(input)
+ {command_name, command_module, arguments} = look_up_command(parsed_args, options)
+
+ case command_module do
+ nil ->
+ {:no_command, command_name, arguments, options, invalid}
+
+ {:suggest, _} = suggest ->
+ {suggest, command_name, arguments, options, invalid}
+
+ {:alias, alias_module, alias_content} ->
+ {[_alias_command_name | cmd_arguments], cmd_options, cmd_invalid} =
+ parse_alias(input, command_name, alias_module, alias_content, options)
+
+ {alias_module, command_name, cmd_arguments, cmd_options, cmd_invalid}
+
+ command_module when is_atom(command_module) ->
+ {[^command_name | cmd_arguments], cmd_options, cmd_invalid} =
+ parse_command_specific(input, command_module, options)
+
+ {command_module, command_name, cmd_arguments, cmd_options, cmd_invalid}
+ end
+ end
+
+ def command_suggestion(_cmd_name, empty) when empty == %{} do
+ nil
+ end
+ def command_suggestion(typed, module_map) do
+ RabbitMQ.CLI.AutoComplete.suggest_command(typed, module_map)
+ end
+
+ defp look_up_command(parsed_args, options) do
+ case parsed_args do
+ [cmd_name | arguments] ->
+ ## This is an optimisation for pluggable command discovery.
+ ## Most of the time a command will be from rabbitmqctl application
+ ## so there is not point in scanning plugins for potential commands
+ CommandModules.load_core(options)
+ core_commands = CommandModules.module_map_core()
+
+ command =
+ case core_commands[cmd_name] do
+ nil ->
+ CommandModules.load(options)
+ module_map = CommandModules.module_map()
+
+ module_map[cmd_name] ||
+ command_alias(cmd_name, module_map, options) ||
+ command_suggestion(cmd_name, module_map)
+
+ c ->
+ c
+ end
+
+ {cmd_name, command, arguments}
+
+ [] ->
+ {"", nil, []}
+ end
+ end
+
+ defp command_alias(cmd_name, module_map, options) do
+ aliases = load_aliases(options)
+
+ case aliases[cmd_name] do
+ nil ->
+ nil
+
+ [alias_cmd_name | _] = alias_content ->
+ case module_map[alias_cmd_name] do
+ nil -> nil
+ module -> {:alias, module, alias_content}
+ end
+ end
+ end
+
+ defp load_aliases(options) do
+ aliases_file = Config.get_option(:aliases_file, options)
+
+ case aliases_file && File.read(aliases_file) do
+ ## No aliases file
+ nil ->
+ %{}
+
+ {:ok, content} ->
+ String.split(content, "\n")
+ |> Enum.reduce(
+ %{},
+ fn str, acc ->
+ case String.split(str, "=", parts: 2) do
+ [alias_name, alias_string] ->
+ Map.put(acc, String.trim(alias_name), OptionParser.split(alias_string))
+
+ _ ->
+ acc
+ end
+ end
+ )
+
+ {:error, err} ->
+ IO.puts(:stderr, "Error reading aliases file #{aliases_file}: #{err}")
+ %{}
+ end
+ end
+
+ defp parse_alias(input, command_name, module, alias_content, options) do
+ {pre_command_options, tail, invalid} = parse_global_head(input)
+ [^command_name | other] = tail
+ aliased_input = alias_content ++ other
+ {args, options, command_invalid} = parse_command_specific(aliased_input, module, options)
+ merged_options = Map.merge(options, pre_command_options)
+ {args, merged_options, command_invalid ++ invalid}
+ end
+
+ def parse_command_specific(input, command, options \\ %{}) do
+ formatter = Config.get_formatter(command, options)
+
+ switches = build_switches(default_switches(), command, formatter)
+ aliases = build_aliases(default_aliases(), command, formatter)
+ parse_generic(input, switches, aliases)
+ end
+
+ def parse_global_head(input) do
+ switches = default_switches()
+ aliases = default_aliases()
+
+ {options, tail, invalid} =
+ OptionParser.parse_head(
+ input,
+ strict: switches,
+ aliases: aliases,
+ allow_nonexistent_atoms: true
+ )
+
+ norm_options = normalize_options(options, switches) |> Map.new()
+ {norm_options, tail, invalid}
+ end
+
+ def parse_global(input) do
+ switches = default_switches()
+ aliases = default_aliases()
+ parse_generic(input, switches, aliases)
+ end
+
+ defp parse_generic(input, switches, aliases) do
+ {options, args, invalid} =
+ OptionParser.parse(
+ input,
+ strict: switches,
+ aliases: aliases,
+ allow_nonexistent_atoms: true
+ )
+
+ norm_options = normalize_options(options, switches) |> Map.new()
+ {args, norm_options, invalid}
+ end
+
+ defp build_switches(default, command, formatter) do
+ command_switches = CommandBehaviour.switches(command)
+ formatter_switches = FormatterBehaviour.switches(formatter)
+
+ assert_no_conflict(
+ command,
+ command_switches,
+ formatter_switches,
+ :redefining_formatter_switches
+ )
+
+ merge_if_different(
+ default,
+ formatter_switches,
+ {:formatter_invalid,
+ {formatter, {:redefining_global_switches, default, formatter_switches}}}
+ )
+ |> merge_if_different(
+ command_switches,
+ {:command_invalid, {command, {:redefining_global_switches, default, command_switches}}}
+ )
+ end
+
+ defp assert_no_conflict(command, command_fields, formatter_fields, err) do
+ merge_if_different(
+ formatter_fields,
+ command_fields,
+ {:command_invalid, {command, {err, formatter_fields, command_fields}}}
+ )
+
+ :ok
+ end
+
+ defp build_aliases(default, command, formatter) do
+ command_aliases = CommandBehaviour.aliases(command)
+ formatter_aliases = FormatterBehaviour.aliases(formatter)
+
+ assert_no_conflict(command, command_aliases, formatter_aliases, :redefining_formatter_aliases)
+
+ merge_if_different(
+ default,
+ formatter_aliases,
+ {:formatter_invalid, {command, {:redefining_global_aliases, default, formatter_aliases}}}
+ )
+ |> merge_if_different(
+ command_aliases,
+ {:command_invalid, {command, {:redefining_global_aliases, default, command_aliases}}}
+ )
+ end
+
+ defp merge_if_different(default, specific, error) do
+ case keyword_intersect(default, specific) do
+ [] ->
+ Keyword.merge(default, specific)
+
+ conflicts ->
+ # if all conflicting keys are of the same type,
+ # that's acceptable
+ case Enum.all?(
+ conflicts,
+ fn c ->
+ Keyword.get(default, c) == Keyword.get(specific, c)
+ end
+ ) do
+ true -> Keyword.merge(default, specific)
+ false -> exit(error)
+ end
+ end
+ end
+
+ defp keyword_intersect(one, two) do
+ one_keys = MapSet.new(Keyword.keys(one))
+ two_keys = MapSet.new(Keyword.keys(two))
+ intersection = MapSet.intersection(one_keys, two_keys)
+
+ case Enum.empty?(intersection) do
+ true -> []
+ false -> MapSet.to_list(intersection)
+ end
+ end
+
+ defp normalize_options(options, switches) do
+ Enum.map(
+ options,
+ fn {key, option} ->
+ {key, normalize_type(option, switches[key])}
+ end
+ )
+ end
+
+ defp normalize_type(value, :atom) when is_binary(value) do
+ String.to_atom(value)
+ end
+
+ defp normalize_type(value, _type) do
+ value
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex
new file mode 100644
index 0000000000..0e90834771
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/paths.ex
@@ -0,0 +1,55 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Paths do
+ alias RabbitMQ.CLI.Core.Config
+ import RabbitMQ.CLI.Core.Platform
+
+ def plugins_dir(_, opts) do
+ plugins_dir(opts)
+ end
+
+ def plugins_dir(opts) do
+ case Config.get_option(:plugins_dir, opts) do
+ nil ->
+ {:error, :no_plugins_dir}
+
+ dir ->
+ paths = String.split(to_string(dir), path_separator())
+
+ case Enum.any?(paths, &File.dir?/1) do
+ true -> {:ok, dir}
+ false -> {:error, :plugins_dir_does_not_exist}
+ end
+ end
+ end
+
+ def require_mnesia_dir(opts) do
+ case Application.get_env(:mnesia, :dir) do
+ nil ->
+ case Config.get_option(:mnesia_dir, opts) do
+ nil -> {:error, :mnesia_dir_not_found}
+ val -> Application.put_env(:mnesia, :dir, to_charlist(val))
+ end
+
+ _ ->
+ :ok
+ end
+ end
+
+ def require_feature_flags_file(opts) do
+ case Application.get_env(:rabbit, :feature_flags_file) do
+ nil ->
+ case Config.get_option(:feature_flags_file, opts) do
+ nil -> {:error, :feature_flags_file_not_found}
+ val -> Application.put_env(:rabbit, :feature_flags_file, to_charlist(val))
+ end
+
+ _ ->
+ :ok
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex
new file mode 100644
index 0000000000..561b2adb58
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/platform.ex
@@ -0,0 +1,37 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Platform do
+ def path_separator() do
+ case :os.type() do
+ {:unix, _} -> ":"
+ {:win32, _} -> ";"
+ end
+ end
+
+ def line_separator() do
+ case :os.type() do
+ {:unix, _} -> "\n"
+ {:win32, _} -> "\r\n"
+ end
+ end
+
+ def os_name({:unix, :linux}) do
+ "Linux"
+ end
+ def os_name({:unix, :darwin}) do
+ "macOS"
+ end
+ def os_name({:unix, :freebsd}) do
+ "FreeBSD"
+ end
+ def os_name({:unix, name}) do
+ name |> to_string |> String.capitalize
+ end
+ def os_name({:win32, _}) do
+ "Windows"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex
new file mode 100644
index 0000000000..7f5337a6e7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_running.ex
@@ -0,0 +1,17 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be running
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.RequiresRabbitAppRunning do
+ defmacro __using__(_) do
+ quote do
+ def validate_execution_environment(args, opts) do
+ RabbitMQ.CLI.Core.Validators.rabbit_is_running(args, opts)
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex
new file mode 100644
index 0000000000..48b2b6dcd0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/requires_rabbit_app_stopped.ex
@@ -0,0 +1,17 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Should be used by commands that require rabbit app to be stopped
+# but need no other execution environment validators.
+defmodule RabbitMQ.CLI.Core.RequiresRabbitAppStopped do
+ defmacro __using__(_) do
+ quote do
+ def validate_execution_environment(args, opts) do
+ RabbitMQ.CLI.Core.Validators.rabbit_is_not_running(args, opts)
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex
new file mode 100644
index 0000000000..666d7af065
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/validators.ex
@@ -0,0 +1,115 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Provides common validation functions.
+defmodule RabbitMQ.CLI.Core.Validators do
+ alias RabbitMQ.CLI.Core.Helpers
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+
+ def chain([validator | rest], args) do
+ case apply(validator, args) do
+ :ok -> chain(rest, args)
+ {:ok, _} -> chain(rest, args)
+ {:validation_failure, err} -> {:validation_failure, err}
+ {:error, err} -> {:validation_failure, err}
+ end
+ end
+
+ def chain([], _) do
+ :ok
+ end
+
+ def validate_step(:ok, step) do
+ case step.() do
+ {:error, err} -> {:validation_failure, err}
+ _ -> :ok
+ end
+ end
+
+ def validate_step({:validation_failure, err}, _) do
+ {:validation_failure, err}
+ end
+
+ def node_is_not_running(_, %{node: node_name}) do
+ case Helpers.node_running?(node_name) do
+ true -> {:validation_failure, :node_running}
+ false -> :ok
+ end
+ end
+
+ def node_is_running(_, %{node: node_name}) do
+ case Helpers.node_running?(node_name) do
+ false -> {:validation_failure, :node_not_running}
+ true -> :ok
+ end
+ end
+
+ def mnesia_dir_is_set(_, opts) do
+ case require_mnesia_dir(opts) do
+ :ok -> :ok
+ {:error, err} -> {:validation_failure, err}
+ end
+ end
+
+ def feature_flags_file_is_set(_, opts) do
+ case require_feature_flags_file(opts) do
+ :ok -> :ok
+ {:error, err} -> {:validation_failure, err}
+ end
+ end
+
+ def rabbit_is_loaded(_, opts) do
+ case require_rabbit(opts) do
+ :ok -> :ok
+ {:error, err} -> {:validation_failure, err}
+ end
+ end
+
+ def rabbit_app_running?(%{node: node, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node, :rabbit, :is_running, [], timeout) do
+ true -> true
+ false -> false
+ other -> {:error, other}
+ end
+ end
+
+ def rabbit_app_running?(_, opts) do
+ rabbit_app_running?(opts)
+ end
+
+ def rabbit_is_running(args, opts) do
+ case rabbit_app_state(args, opts) do
+ :running -> :ok
+ :stopped -> {:validation_failure, :rabbit_app_is_stopped}
+ other -> other
+ end
+ end
+
+ def rabbit_is_running_or_offline_flag_used(_args, %{offline: true}) do
+ :ok
+ end
+
+ def rabbit_is_running_or_offline_flag_used(args, opts) do
+ rabbit_is_running(args, opts)
+ end
+
+ def rabbit_is_not_running(args, opts) do
+ case rabbit_app_state(args, opts) do
+ :running -> {:validation_failure, :rabbit_app_is_running}
+ :stopped -> :ok
+ other -> other
+ end
+ end
+
+ def rabbit_app_state(_, opts) do
+ case rabbit_app_running?(opts) do
+ true -> :running
+ false -> :stopped
+ {:error, err} -> {:error, err}
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex
new file mode 100644
index 0000000000..bd5a24f9a0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/core/version.ex
@@ -0,0 +1,24 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Core.Version do
+ @default_timeout 30_000
+
+ def local_version do
+ to_string(:rabbit_misc.version())
+ end
+
+
+ def remote_version(node_name) do
+ remote_version(node_name, @default_timeout)
+ end
+ def remote_version(node_name, timeout) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_misc, :version, [], timeout) do
+ {:badrpc, _} = err -> err
+ val -> val
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex
new file mode 100644
index 0000000000..514922cac9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_user_command.ex
@@ -0,0 +1,98 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.AddUserCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers, Input}
+ import RabbitMQ.CLI.Core.Config, only: [output_less?: 1]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate(args, _) when length(args) < 1, do: {:validation_failure, :not_enough_args}
+ def validate(args, _) when length(args) > 2, do: {:validation_failure, :too_many_args}
+ def validate([_], _), do: :ok
+ def validate(["", _], _) do
+ {:validation_failure, {:bad_argument, "user cannot be an empty string"}}
+ end
+ def validate([_, _], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name} = opts) do
+ # note: blank passwords are currently allowed, they make sense
+ # e.g. when a user only authenticates using X.509 certificates.
+ # Credential validators can be used to require passwords of a certain length
+ # or following a certain pattern. This is a core server responsibility. MK.
+ case Input.infer_password("Password: ", opts) do
+ :eof -> {:error, :not_enough_args}
+ password -> :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :add_user,
+ [username, password, Helpers.cli_acting_user()]
+ )
+ end
+ end
+ def run([username, password], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :add_user,
+ [username, password, Helpers.cli_acting_user()]
+ )
+ end
+
+ def output({:error, :not_enough_args}, _) do
+ {:error, ExitCodes.exit_dataerr(), "Password is not provided via argument or stdin"}
+ end
+ def output({:error, {:user_already_exists, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} already exists"}}
+ end
+ def output({:error, {:user_already_exists, username}}, _) do
+ {:error, ExitCodes.exit_software(), "User \"#{username}\" already exists"}
+ end
+ def output(:ok, %{formatter: "json", node: node_name}) do
+ m = %{
+ "status" => "ok",
+ "node" => node_name,
+ "message" => "Done. Don't forget to grant the user permissions to some virtual hosts! See 'rabbitmqctl help set_permissions' to learn more."
+ }
+ {:ok, m}
+ end
+ def output(:ok, opts) do
+ case output_less?(opts) do
+ true ->
+ :ok
+ false ->
+ {:ok, "Done. Don't forget to grant the user permissions to some virtual hosts! See 'rabbitmqctl help set_permissions' to learn more."}
+ end
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "add_user <username> <password>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Self-explanatory"],
+ ["<password>", "Password this user will authenticate with. Use a blank string to disable password-based authentication."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description() do
+ "Creates a new user in the internal database. This user will have no permissions for any virtual hosts by default."
+ end
+
+ def banner([username | _], _), do: "Adding user \"#{username}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex
new file mode 100644
index 0000000000..04c1e61106
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/add_vhost_command.ex
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.AddVhostCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [description: :string,
+ tags: :string]
+ def aliases(), do: [d: :description]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{description: "", tags: ""}, opts)}
+ end
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([vhost], %{node: node_name, description: desc, tags: tags}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost, :add, [vhost, desc, parse_tags(tags), Helpers.cli_acting_user()])
+ end
+ def run([vhost], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost, :add, [vhost, Helpers.cli_acting_user()])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "add_vhost <vhost> [--description <description> --tags \"<tag1>,<tag2>,<...>\"]"
+
+ def usage_additional() do
+ [
+ ["<vhost>", "Virtual host name"],
+ ["--description <description>", "Virtual host description"],
+ ["--tags <tags>", "Command separated list of tags"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def description(), do: "Creates a virtual host"
+
+ def banner([vhost], _), do: "Adding vhost \"#{vhost}\" ..."
+
+ #
+ # Implementation
+ #
+
+ def parse_tags(tags) do
+ String.split(tags, ",", trim: true)
+ |> Enum.map(&String.trim/1)
+ |> Enum.map(&String.to_atom/1)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex
new file mode 100644
index 0000000000..9913633b84
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/authenticate_user_command.ex
@@ -0,0 +1,79 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.AuthenticateUserCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Input}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate(args, _) when length(args) < 1, do: {:validation_failure, :not_enough_args}
+ def validate(args, _) when length(args) > 2, do: {:validation_failure, :too_many_args}
+ def validate([_], _), do: :ok
+ def validate([_, _], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([user], %{node: node_name} = opts) do
+ # note: blank passwords are currently allowed, they make sense
+ # e.g. when a user only authenticates using X.509 certificates.
+ # Credential validators can be used to require passwords of a certain length
+ # or following a certain pattern. This is a core server responsibility. MK.
+ case Input.infer_password("Password: ", opts) do
+ :eof -> {:error, :not_enough_args}
+ password -> :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_access_control,
+ :check_user_pass_login,
+ [user, password]
+ )
+ end
+ end
+ def run([user, password], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_access_control,
+ :check_user_pass_login,
+ [user, password]
+ )
+ end
+
+ def usage, do: "authenticate_user <username> <password>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Username to use"],
+ ["<password>", "Password to use. Can be entered via stdin in interactive mode."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Attempts to authenticate a user. Exits with a non-zero code if authentication fails."
+
+ def banner([username | _], _), do: "Authenticating user \"#{username}\" ..."
+
+ def output({:error, :not_enough_args}, _) do
+ {:error, ExitCodes.exit_software(), "Password is not provided via argument or stdin"}
+ end
+ def output({:refused, user, msg, args}, _) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_dataerr(),
+ "Error: failed to authenticate user \"#{user}\"\n" <>
+ to_string(:io_lib.format(msg, args))}
+ end
+ def output({:ok, _user}, _) do
+ {:ok, "Success"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex
new file mode 100644
index 0000000000..19deb74f79
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/autocomplete_command.ex
@@ -0,0 +1,53 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.AutocompleteCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.{Config, DocGuide}
+
+ def scopes(), do: [:ctl, :diagnostics, :plugins, :queues]
+
+ def distribution(_), do: :none
+
+ def merge_defaults(args, opts) do
+ # enforce --silent as shell completion does not
+ # expect to receive any additional output, so the command
+ # is not really interactive
+ {args, Map.merge(opts, %{silent: true})}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+
+ def run(args, %{script_name: script_name}) do
+ {:stream, RabbitMQ.CLI.AutoComplete.complete(script_name, args)}
+ end
+ def run(args, opts) do
+ script_name = Config.get_system_option(:script_name, opts)
+
+ {:stream, RabbitMQ.CLI.AutoComplete.complete(script_name, args)}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "autocomplete [prefix]"
+ end
+
+ def banner(_args, _opts) do
+ nil
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.cli()
+ ]
+ end
+
+ def help_section(), do: :help
+
+ def description(), do: "Provides command name autocomplete variants"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex
new file mode 100644
index 0000000000..f0d1df6a02
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_online_nodes_command.ex
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.AwaitOnlineNodesCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 300_000
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args, Map.merge(opts, %{timeout: timeout})}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsOnePositiveIntegerArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([count], %{node: node_name, timeout: timeout}) do
+ {n, _} = Integer.parse(count)
+ :rabbit_misc.rpc_call(node_name, :rabbit_nodes, :await_running_count, [n, timeout])
+ end
+
+ def output({:error, :timeout}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: timed out while waiting. Not enough nodes joined #{node_name}'s cluster."}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([count], %{node: node_name, timeout: timeout}) when is_number(timeout) do
+ "Will wait for at least #{count} nodes to join the cluster of #{node_name}. Timeout: #{
+ trunc(timeout / 1000)
+ } seconds."
+ end
+
+ def banner([count], %{node: node_name, timeout: _timeout}) do
+ "Will wait for at least #{count} nodes to join the cluster of #{node_name}."
+ end
+
+ def usage() do
+ "await_online_nodes <count>"
+ end
+
+ def usage_additional() do
+ [
+ ["<count>", "how many cluster members must be up in order for this command to exit. When <count> is 1, always exits immediately."]
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Waits for <count> nodes to join the cluster"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex
new file mode 100644
index 0000000000..9a898224ce
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/await_startup_command.ex
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.AwaitStartupCommand do
+ @moduledoc """
+ Waits until target node is fully booted. If the node is already running,
+ returns immediately.
+
+ This command is meant to be used when automating deployments.
+ See also `AwaitOnlineNodesCommand`.
+ """
+
+ import RabbitMQ.CLI.Core.Config, only: [output_less?: 1]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 300_000
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{timeout: @default_timeout}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout} = opts) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :await_startup, [
+ node_name,
+ not output_less?(opts),
+ timeout
+ ])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "await_startup"
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Waits for the RabbitMQ application to start on the target node"
+
+ def banner(_, _), do: nil
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex
new file mode 100644
index 0000000000..2858040039
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cancel_sync_queue_command.ex
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.CancelSyncQueueCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([queue], %{vhost: vhost, node: node_name}) do
+ :rpc.call(
+ node_name,
+ :rabbit_mirror_queue_misc,
+ :cancel_sync_queue,
+ [:rabbit_misc.r(vhost, :queue, queue)],
+ :infinity
+ )
+ end
+
+ def usage, do: "cancel_sync_queue [--vhost <vhost>] <queue>"
+
+ def usage_additional() do
+ [
+ ["<queue>", "Queue name"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.mirroring()
+ ]
+ end
+
+ def help_section(), do: :replication
+
+ def description(), do: "Instructs a synchronising mirrored queue to stop synchronising itself"
+
+ def banner([queue], %{vhost: vhost, node: _node}) do
+ "Stopping synchronising queue '#{queue}' in vhost '#{vhost}' ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex
new file mode 100644
index 0000000000..93fc9c7da0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_cluster_node_type_command.ex
@@ -0,0 +1,87 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ChangeClusterNodeTypeCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, opts}
+ end
+
+ def validate([], _), do: {:validation_failure, :not_enough_args}
+
+ # node type
+ def validate(["disc"], _), do: :ok
+ def validate(["disk"], _), do: :ok
+ def validate(["ram"], _), do: :ok
+
+ def validate([_], _),
+ do: {:validation_failure, {:bad_argument, "The node type must be either disc or ram."}}
+
+ def validate(_, _), do: {:validation_failure, :too_many_args}
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppStopped
+
+ def run([node_type_arg], %{node: node_name}) do
+ normalized_type = normalize_type(String.to_atom(node_type_arg))
+ current_type = :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :node_type, [])
+
+ case current_type do
+ ^normalized_type ->
+ {:ok, "Node type is already #{normalized_type}"}
+
+ _ ->
+ :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :change_cluster_node_type, [
+ normalized_type
+ ])
+ end
+ end
+
+ def usage() do
+ "change_cluster_node_type <disc | ram>"
+ end
+
+ def usage_additional() do
+ [
+ ["<disc | ram>", "New node type"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Changes the type of the cluster node"
+
+ def banner([node_type], %{node: node_name}) do
+ "Turning #{node_name} into a #{node_type} node"
+ end
+
+ def output({:error, :mnesia_unexpectedly_running}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ RabbitMQ.CLI.DefaultOutput.mnesia_running_error(node_name)}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ defp normalize_type(:ram) do
+ :ram
+ end
+
+ defp normalize_type(:disc) do
+ :disc
+ end
+
+ defp normalize_type(:disk) do
+ :disc
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex
new file mode 100644
index 0000000000..b0dec0a824
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/change_password_command.ex
@@ -0,0 +1,76 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ChangePasswordCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers, Input}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate(args, _) when length(args) < 1, do: {:validation_failure, :not_enough_args}
+ def validate(args, _) when length(args) > 2, do: {:validation_failure, :too_many_args}
+ def validate([_], _), do: :ok
+ def validate([_, _], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name} = opts) do
+ # note: blank passwords are currently allowed, they make sense
+ # e.g. when a user only authenticates using X.509 certificates.
+ # Credential validators can be used to require passwords of a certain length
+ # or following a certain pattern. This is a core server responsibility. MK.
+ case Input.infer_password("Password: ", opts) do
+ :eof -> {:error, :not_enough_args}
+ password -> :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :change_password,
+ [username, password, Helpers.cli_acting_user()]
+ )
+ end
+ end
+ def run([username, password], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :change_password,
+ [username, password, Helpers.cli_acting_user()]
+ )
+ end
+
+ def output({:error, :not_enough_args}, _) do
+ {:error, ExitCodes.exit_software(), "Password is not provided via argument or stdin"}
+ end
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exists"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User \"#{username}\" does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "change_password <username> <password>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Name of the user whose password should be changed"],
+ ["<password>", "New password to set"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Changes the user password"
+
+ def banner([user | _], _), do: "Changing password for user \"#{user}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex
new file mode 100644
index 0000000000..c5cedeb96a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_global_parameter_command.ex
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearGlobalParameterCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([key], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime_parameters,
+ :clear_global,
+ [key, Helpers.cli_acting_user()]
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "clear_global_parameter <name>"
+
+ def usage_additional() do
+ [
+ ["<name>", "parameter name (identifier)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :parameters
+
+ def description(), do: "Clears a global runtime parameter"
+
+ def banner([key], _) do
+ "Clearing global runtime parameter \"#{key}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex
new file mode 100644
index 0000000000..4b77d4cb38
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_operator_policy_command.ex
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearOperatorPolicyCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([key], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_policy, :delete_op, [
+ vhost,
+ key,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ def usage, do: "clear_operator_policy [--vhost <vhost>] <name>"
+
+ def usage_additional() do
+ [
+ ["<name>", "policy name (identifier)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Clears an operator policy"
+
+ def banner([key], %{vhost: vhost}) do
+ "Clearing operator policy \"#{key}\" on vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex
new file mode 100644
index 0000000000..3997b1b61f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_parameter_command.ex
@@ -0,0 +1,60 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearParameterCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ def validate(args, _) when is_list(args) and length(args) < 2 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([_, _], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([component_name, key], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime_parameters,
+ :clear,
+ [vhost, component_name, key, Helpers.cli_acting_user()]
+ )
+ end
+
+ def usage, do: "clear_parameter [--vhost <vhost>] <component_name> <name>"
+
+ def usage_additional() do
+ [
+ ["<component_name>", "component name"],
+ ["<name>", "parameter name (identifier)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :parameters
+
+ def description(), do: "Clears a runtime parameter."
+
+ def banner([component_name, key], %{vhost: vhost}) do
+ "Clearing runtime parameter \"#{key}\" for component \"#{component_name}\" on vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex
new file mode 100644
index 0000000000..398af4813b
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_password_command.ex
@@ -0,0 +1,46 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearPasswordCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_user] = args, %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :clear_password,
+ args ++ [Helpers.cli_acting_user()]
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "clear_password <username>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Name of the user whose password should be cleared"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Clears (resets) password and disables password login for a user"
+
+ def banner([user], _), do: "Clearing password for user \"#{user}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex
new file mode 100644
index 0000000000..2fd129fffa
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_permissions_command.ex
@@ -0,0 +1,61 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearPermissionsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :clear_permissions, [
+ username,
+ vhost,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exist"}}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "Virtual host #{vhost} does not exist"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User #{username} does not exist"}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, _) do
+ {:error, "Virtual host #{vhost} does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "clear_permissions [--vhost <vhost>] <username>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Name of the user whose permissions should be revoked"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Revokes user permissions for a vhost"
+
+ def banner([username], %{vhost: vhost}) do
+ "Clearing permissions for user \"#{username}\" in vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex
new file mode 100644
index 0000000000..057c2e8c24
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_policy_command.ex
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearPolicyCommand do
+ alias RabbitMQ.CLI.Core.{Helpers, DocGuide}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([key], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_policy, :delete, [
+ vhost,
+ key,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ def usage, do: "clear_policy [--vhost <vhost>] <name>"
+
+ def usage_additional() do
+ [
+ ["<name>", "Name of policy to clear (remove)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Clears (removes) a policy"
+
+ def banner([key], %{vhost: vhost}) do
+ "Clearing policy \"#{key}\" on vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex
new file mode 100644
index 0000000000..5d0b249db6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_topic_permissions_command.ex
@@ -0,0 +1,85 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearTopicPermissionsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([_], _), do: :ok
+ def validate([_, _], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :clear_topic_permissions, [
+ username,
+ vhost,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ def run([username, exchange], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :clear_topic_permissions, [
+ username,
+ vhost,
+ exchange,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exist"}}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "Virtual host #{vhost} does not exist"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User #{username} does not exist"}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, _) do
+ {:error, "Virtual host #{vhost} does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "clear_topic_permissions [--vhost <vhost>] <username> [<exchange>]"
+
+ def usage_additional() do
+ [
+ ["<username>", "Name of the user whose topic permissions should be revoked"],
+ ["<exchange>", "Exchange the permissions are for"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Clears user topic permissions for a vhost or exchange"
+
+ def banner([username], %{vhost: vhost}) do
+ "Clearing topic permissions for user \"#{username}\" in vhost \"#{vhost}\" ..."
+ end
+
+ def banner([username, exchange], %{vhost: vhost}) do
+ "Clearing topic permissions on \"#{exchange}\" for user \"#{username}\" in vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex
new file mode 100644
index 0000000000..301de613bb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_user_limits_command.ex
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearUserLimitsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+
+ def run([username, limit_type], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :clear_user_limits, [
+ username,
+ limit_type,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def usage, do: "clear_user_limits username <limit_type> | all"
+
+ def usage_additional() do
+ [
+ ["<limit_type>", "Limit type, must be max-connections or max-channels"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Clears user connection/channel limits"
+
+ def banner([username, "all"], %{}) do
+ "Clearing all limits for user \"#{username}\" ..."
+ end
+ def banner([username, limit_type], %{}) do
+ "Clearing \"#{limit_type}\" limit for user \"#{username}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex
new file mode 100644
index 0000000000..a73f0ff670
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/clear_vhost_limits_command.ex
@@ -0,0 +1,43 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClearVhostLimitsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost_limit, :clear, [
+ vhost,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def usage, do: "clear_vhost_limits [--vhost <vhost>]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def description(), do: "Clears virtual host limits"
+
+ def banner([], %{vhost: vhost}) do
+ "Clearing vhost \"#{vhost}\" limits ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex
new file mode 100644
index 0000000000..d4c5b5f17a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_all_connections_command.ex
@@ -0,0 +1,124 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.CloseAllConnectionsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [global: :boolean, per_connection_delay: :integer, limit: :integer]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{global: false, vhost: "/", per_connection_delay: 0, limit: 0}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([explanation], %{
+ node: node_name,
+ vhost: vhost,
+ global: global_opt,
+ per_connection_delay: delay,
+ limit: limit
+ }) do
+ conns =
+ case global_opt do
+ false ->
+ per_vhost =
+ :rabbit_misc.rpc_call(node_name, :rabbit_connection_tracking, :list, [vhost])
+
+ apply_limit(per_vhost, limit)
+
+ true ->
+ :rabbit_misc.rpc_call(node_name, :rabbit_connection_tracking, :list_on_node, [node_name])
+ end
+
+ case conns do
+ {:badrpc, _} = err ->
+ err
+
+ _ ->
+ :rabbit_misc.rpc_call(
+ node_name,
+ # As of 3.7.15, this function was moved to the rabbit_connection_tracking module.
+ # rabbit_connection_tracking_handler still contains a delegating function with the same name.
+ # Continue using this one for now for maximum CLI/server version compatibility. MK.
+ :rabbit_connection_tracking_handler,
+ :close_connections,
+ [conns, explanation, delay]
+ )
+
+ {:ok, "Closed #{length(conns)} connections"}
+ end
+ end
+
+ def run(args, %{
+ node: node_name,
+ global: global_opt,
+ per_connection_delay: delay,
+ limit: limit
+ }) do
+ run(args, %{
+ node: node_name,
+ vhost: nil,
+ global: global_opt,
+ per_connection_delay: delay,
+ limit: limit
+ })
+ end
+
+ def output({:stream, stream}, _opts) do
+ {:stream, Stream.filter(stream, fn x -> x != :ok end)}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([explanation], %{node: node_name, global: true}) do
+ "Closing all connections to node #{node_name} (across all vhosts), reason: #{explanation}..."
+ end
+
+ def banner([explanation], %{vhost: vhost, limit: 0}) do
+ "Closing all connections in vhost #{vhost}, reason: #{explanation}..."
+ end
+
+ def banner([explanation], %{vhost: vhost, limit: limit}) do
+ "Closing #{limit} connections in vhost #{vhost}, reason: #{explanation}..."
+ end
+
+ def usage do
+ "close_all_connections [--vhost <vhost> --limit <limit>] [-n <node> --global] [--per-connection-delay <delay>] <explanation>"
+ end
+
+ def usage_additional do
+ [
+ ["--global", "consider connections across all virtual hosts"],
+ ["--limit <number>", "close up to this many connections"],
+ ["--per-connection-delay <milliseconds>", "inject a delay between closures"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.connections()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Instructs the broker to close all connections for the specified vhost or entire RabbitMQ node"
+
+ #
+ # Implementation
+ #
+
+ defp apply_limit(conns, 0) do
+ conns
+ end
+
+ defp apply_limit(conns, number) do
+ :lists.sublist(conns, number)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex
new file mode 100644
index 0000000000..371c582b19
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/close_connection_command.ex
@@ -0,0 +1,46 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.CloseConnectionCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([pid, explanation], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_networking, :close_connection, [
+ :rabbit_misc.string_to_pid(pid),
+ explanation
+ ])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "close_connection <connection pid> <explanation>"
+
+ def usage_additional do
+ [
+ ["<connection pid>", "connection identifier (Erlang PID), see list_connections"],
+ ["<explanation>", "reason for connection closure"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.connections()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Instructs the broker to close the connection associated with the Erlang process id"
+
+ def banner([pid, explanation], _), do: "Closing connection #{pid}, reason: #{explanation}..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex
new file mode 100644
index 0000000000..20f96e8075
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/cluster_status_command.ex
@@ -0,0 +1,285 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.{Alarms, ANSI, Listeners, Platform, FeatureFlags}
+ import RabbitMQ.CLI.Core.Distribution, only: [per_node_timeout: 2]
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 60_000
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args, Map.merge(%{timeout: timeout}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :status, []) do
+ {:badrpc, _} = err ->
+ err
+
+ status ->
+ case :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :cluster_nodes, [:running]) do
+ {:badrpc, _} = err ->
+ err
+
+ {:error, {:corrupt_or_missing_cluster_files, _, _}} ->
+ {:error, "Could not read mnesia files containing cluster status"}
+
+ nodes ->
+ count = length(nodes)
+ alarms_by_node = Enum.map(nodes, fn n -> alarms_by_node(n, per_node_timeout(timeout, count)) end)
+ listeners_by_node = Enum.map(nodes, fn n -> listeners_of(n, per_node_timeout(timeout, count)) end)
+ versions_by_node = Enum.map(nodes, fn n -> versions_by_node(n, per_node_timeout(timeout, count)) end)
+ maintenance_status_by_node = Enum.map(nodes,
+ fn n -> maintenance_status_by_node(n, per_node_timeout(timeout, count)) end)
+
+ feature_flags = case :rabbit_misc.rpc_call(node_name, :rabbit_ff_extra, :cli_info, [], timeout) do
+ {:badrpc, {:EXIT, {:undef, _}}} -> []
+ {:badrpc, _} = err -> err
+ val -> val
+ end
+
+ status ++
+ [{:alarms, alarms_by_node}] ++
+ [{:listeners, listeners_by_node}] ++
+ [{:versions, versions_by_node}] ++
+ [{:maintenance_status, maintenance_status_by_node}] ++
+ [{:feature_flags, feature_flags}]
+ end
+ end
+ end
+
+ def output({:error, :timeout}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: timed out while waiting for a response from #{node_name}."}
+ end
+
+ def output(result, %{formatter: "erlang"}) do
+ {:ok, result}
+ end
+
+ def output(result, %{formatter: "json"}) when is_list(result) do
+ # format more data structures as map for sensible JSON output
+ m = result_map(result)
+ |> Map.update(:alarms, [], fn xs -> alarm_maps(xs) end)
+ |> Map.update(:listeners, %{}, fn m ->
+ Enum.map(m, fn {n, xs} -> {n, listener_maps(xs)} end) |> Enum.into(%{})
+ end)
+
+ {:ok, m}
+ end
+
+ def output(result, %{node: node_name}) when is_list(result) do
+ m = result_map(result)
+
+ cluster_name_section = [
+ "#{bright("Basics")}\n",
+ "Cluster name: #{m[:cluster_name]}"
+ ]
+
+ disk_nodes_section = [
+ "\n#{bright("Disk Nodes")}\n",
+ ] ++ node_lines(m[:disk_nodes])
+
+ ram_nodes_section = case m[:ram_nodes] do
+ [] -> []
+ xs -> [
+ "\n#{bright("RAM Nodes")}\n",
+ ] ++ node_lines(xs)
+ end
+
+ running_nodes_section = [
+ "\n#{bright("Running Nodes")}\n",
+ ] ++ node_lines(m[:running_nodes])
+
+ versions_section = [
+ "\n#{bright("Versions")}\n",
+ ] ++ version_lines(m[:versions])
+
+ alarms_section = [
+ "\n#{bright("Alarms")}\n",
+ ] ++ case m[:alarms] do
+ [] -> ["(none)"]
+ xs -> alarm_lines(xs, node_name)
+ end
+
+ partitions_section = [
+ "\n#{bright("Network Partitions")}\n"
+ ] ++ case map_size(m[:partitions]) do
+ 0 -> ["(none)"]
+ _ -> partition_lines(m[:partitions])
+ end
+
+ listeners_section = [
+ "\n#{bright("Listeners")}\n"
+ ] ++ case map_size(m[:listeners]) do
+ 0 -> ["(none)"]
+ _ -> Enum.reduce(m[:listeners], [], fn {node, listeners}, acc ->
+ acc ++ listener_lines(listeners, node)
+ end)
+ end
+
+ maintenance_section = [
+ "\n#{bright("Maintenance status")}\n",
+ ] ++ maintenance_lines(m[:maintenance_status])
+
+ feature_flags_section = [
+ "\n#{bright("Feature flags")}\n"
+ ] ++ case Enum.count(m[:feature_flags]) do
+ 0 -> ["(none)"]
+ _ -> feature_flag_lines(m[:feature_flags])
+ end
+
+ lines = cluster_name_section ++ disk_nodes_section ++ ram_nodes_section ++ running_nodes_section ++
+ versions_section ++ maintenance_section ++ alarms_section ++ partitions_section ++
+ listeners_section ++ feature_flags_section
+
+ {:ok, Enum.join(lines, line_separator())}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.String
+
+ def usage, do: "cluster_status"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering(),
+ DocGuide.cluster_formation(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Displays all the nodes in the cluster grouped by node type, together with the currently running nodes"
+
+ def banner(_, %{node: node_name}), do: "Cluster status of node #{node_name} ..."
+
+ #
+ # Implementation
+ #
+
+ defp result_map(result) do
+ # [{nodes,[{disc,[hare@warp10,rabbit@warp10]},{ram,[flopsy@warp10]}]},
+ # {running_nodes,[flopsy@warp10,hare@warp10,rabbit@warp10]},
+ # {cluster_name,<<"rabbit@localhost">>},
+ # {partitions,[{flopsy@warp10,[rabbit@vagrant]},
+ # {hare@warp10,[rabbit@vagrant]}]},
+ # {alarms,[{flopsy@warp10,[]},
+ # {hare@warp10,[]},
+ # {rabbit@warp10,[{resource_limit,memory,rabbit@warp10}]}]}]
+ %{
+ cluster_name: Keyword.get(result, :cluster_name),
+ disk_nodes: result |> Keyword.get(:nodes, []) |> Keyword.get(:disc, []),
+ ram_nodes: result |> Keyword.get(:nodes, []) |> Keyword.get(:ram, []),
+ running_nodes: result |> Keyword.get(:running_nodes, []) |> Enum.map(&to_string/1),
+ alarms: Keyword.get(result, :alarms) |> Keyword.values |> Enum.concat |> Enum.uniq,
+ maintenance_status: Keyword.get(result, :maintenance_status, []) |> Enum.into(%{}),
+ partitions: Keyword.get(result, :partitions, []) |> Enum.into(%{}),
+ listeners: Keyword.get(result, :listeners, []) |> Enum.into(%{}),
+ versions: Keyword.get(result, :versions, []) |> Enum.into(%{}),
+ feature_flags: Keyword.get(result, :feature_flags, []) |> Enum.map(fn ff -> Enum.into(ff, %{}) end)
+ }
+ end
+
+ defp alarms_by_node(node, timeout) do
+ alarms = case :rabbit_misc.rpc_call(to_atom(node), :rabbit, :alarms, [], timeout) do
+ {:badrpc, _} -> []
+ xs -> xs
+ end
+
+ {node, alarms}
+ end
+
+ defp listeners_of(node, timeout) do
+ # This may seem inefficient since this call returns all known listeners
+ # in the cluster, so why do we run it on every node? See the badrpc clause,
+ # some nodes may be inavailable or partitioned from other nodes. This way we
+ # gather as complete a picture as possible. MK.
+ listeners = case :rabbit_misc.rpc_call(to_atom(node), :rabbit_networking, :active_listeners, [], timeout) do
+ {:badrpc, _} -> []
+ xs -> xs
+ end
+
+ {node, listeners_on(listeners, node)}
+ end
+
+ defp versions_by_node(node, timeout) do
+ {rmq_name, rmq_vsn, otp_vsn} = case :rabbit_misc.rpc_call(
+ to_atom(node), :rabbit, :product_info, [], timeout) do
+ {:badrpc, _} ->
+ {nil, nil, nil}
+ map ->
+ %{:otp_release => otp} = map
+ name = case map do
+ %{:product_name => v} -> v
+ %{:product_base_name => v} -> v
+ end
+ vsn = case map do
+ %{:product_version => v} -> v
+ %{:product_base_version => v} -> v
+ end
+ {name, vsn, otp}
+ end
+
+ {node, %{rabbitmq_name: to_string(rmq_name), rabbitmq_version: to_string(rmq_vsn), erlang_version: to_string(otp_vsn)}}
+ end
+
+ defp maintenance_status_by_node(node, timeout) do
+ target = to_atom(node)
+ result = case :rabbit_misc.rpc_call(target,
+ :rabbit_maintenance, :status_local_read, [target], timeout) do
+ {:badrpc, _} -> "unknown"
+ :regular -> "not under maintenance"
+ :draining -> magenta("marked for maintenance")
+ # forward compatibility: should we figure out a way to know when
+ # draining completes (it involves inherently asynchronous cluster
+ # operations such as quorum queue leader re-election), we'd introduce
+ # a new state
+ :drained -> magenta("marked for maintenance")
+ value -> to_string(value)
+ end
+
+ {node, result}
+ end
+
+ defp node_lines(nodes) do
+ Enum.map(nodes, &to_string/1) |> Enum.sort
+ end
+
+ defp version_lines(mapping) do
+ Enum.map(mapping, fn {node, %{rabbitmq_name: rmq_name, rabbitmq_version: rmq_vsn, erlang_version: otp_vsn}} ->
+ "#{node}: #{rmq_name} #{rmq_vsn} on Erlang #{otp_vsn}"
+ end)
+ end
+
+ defp partition_lines(mapping) do
+ Enum.map(mapping, fn {node, unreachable_peers} -> "Node #{node} cannot communicate with #{Enum.join(unreachable_peers, ", ")}" end)
+ end
+
+ defp maintenance_lines(mapping) do
+ Enum.map(mapping, fn {node, status} -> "Node: #{node}, status: #{status}" end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex
new file mode 100644
index 0000000000..015617b102
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex
@@ -0,0 +1,116 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+alias RabbitMQ.CLI.Core.Helpers
+
+defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def switches() do
+ [
+ cipher: :atom,
+ hash: :atom,
+ iterations: :integer
+ ]
+ end
+
+ def distribution(_), do: :none
+
+ def merge_defaults(args, opts) do
+ with_defaults = Map.merge(%{
+ cipher: :rabbit_pbe.default_cipher(),
+ hash: :rabbit_pbe.default_hash(),
+ iterations: :rabbit_pbe.default_iterations()
+ }, opts)
+ {args, with_defaults}
+ end
+
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, {:not_enough_args, "Please provide a value to decode and a passphrase"}}
+ end
+
+ def validate(args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(args, opts) when length(args) === 2 do
+ case {supports_cipher(opts.cipher), supports_hash(opts.hash), opts.iterations > 0} do
+ {false, _, _} ->
+ {:validation_failure, {:bad_argument, "The requested cipher is not supported"}}
+
+ {_, false, _} ->
+ {:validation_failure, {:bad_argument, "The requested hash is not supported"}}
+
+ {_, _, false} ->
+ {:validation_failure,
+ {:bad_argument,
+ "The requested number of iterations is incorrect (must be a positive integer)"}}
+
+ {true, true, true} ->
+ :ok
+ end
+ end
+
+ def run([value, passphrase], %{cipher: cipher, hash: hash, iterations: iterations}) do
+ try do
+ term_value = Helpers.evaluate_input_as_term(value)
+
+ term_to_decrypt =
+ case term_value do
+ {:encrypted, _} = encrypted ->
+ encrypted
+ _ ->
+ {:encrypted, term_value}
+ end
+
+ result = :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, term_to_decrypt)
+ {:ok, result}
+ catch
+ _, _ ->
+ {:error,
+ "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"}
+ end
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def banner([_, _], _) do
+ "Decrypting value ..."
+ end
+
+ def usage, do: "decode value passphrase [--cipher <cipher>] [--hash <hash>] [--iterations <iterations>]"
+
+ def usage_additional() do
+ [
+ ["<value>", "config value to decode"],
+ ["<passphrase>", "passphrase to use with the config value encryption key"],
+ ["--cipher <cipher>", "cipher suite to use"],
+ ["--hash <hash>", "hashing function to use"],
+ ["--iterations <iterations>", "number of iteration to apply"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Decrypts an encrypted configuration value"
+
+ #
+ # Implementation
+ #
+
+ defp supports_cipher(cipher), do: Enum.member?(:rabbit_pbe.supported_ciphers(), cipher)
+
+ defp supports_hash(hash), do: Enum.member?(:rabbit_pbe.supported_hashes(), hash)
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex
new file mode 100644
index 0000000000..5e5fe9b9c0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_queue_command.ex
@@ -0,0 +1,125 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [if_empty: :boolean, if_unused: :boolean, timeout: :integer]
+ def aliases(), do: [e: :if_empty, u: :if_unused, t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {
+ args,
+ Map.merge(%{if_empty: false, if_unused: false, vhost: "/"}, opts)
+ }
+ end
+
+ def validate([], _opts) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(args, _opts) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([""], _opts) do
+ {
+ :validation_failure,
+ {:bad_argument, "queue name cannot be an empty string"}
+ }
+ end
+
+ def validate([_], _opts) do
+ :ok
+ end
+
+ ## Validate rabbit application is running
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([qname], %{
+ node: node,
+ vhost: vhost,
+ if_empty: if_empty,
+ if_unused: if_unused,
+ timeout: timeout
+ }) do
+ ## Generate queue resource name from queue name and vhost
+ queue_resource = :rabbit_misc.r(vhost, :queue, qname)
+ ## Lookup a queue on broker node using resource name
+ case :rabbit_misc.rpc_call(node, :rabbit_amqqueue, :lookup, [queue_resource]) do
+ {:ok, queue} ->
+ ## Delete queue
+ :rabbit_misc.rpc_call(
+ node,
+ :rabbit_amqqueue,
+ :delete,
+ [queue, if_unused, if_empty, "cli_user"],
+ timeout
+ )
+
+ {:error, _} = error ->
+ error
+ end
+ end
+
+ def output({:error, :not_found}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), "Queue not found"}
+ end
+
+ def output({:error, :not_empty}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), "Queue is not empty"}
+ end
+
+ def output({:error, :in_use}, _options) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), "Queue is in use"}
+ end
+
+ def output({:ok, qlen}, _options) do
+ {:ok, "Queue was successfully deleted with #{qlen} messages"}
+ end
+
+ ## Use default output for all non-special case outputs
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([qname], %{vhost: vhost, if_empty: if_empty, if_unused: if_unused}) do
+ if_empty_str =
+ case if_empty do
+ true -> ["if queue is empty "]
+ false -> []
+ end
+
+ if_unused_str =
+ case if_unused do
+ true -> ["if queue is unused "]
+ false -> []
+ end
+
+ "Deleting queue '#{qname}' on vhost '#{vhost}' " <>
+ Enum.join(Enum.concat([if_empty_str, if_unused_str]), "and ") <> "..."
+ end
+
+ def usage(), do: "delete_queue <queue_name> [--if-empty|-e] [--if-unused|-u]"
+
+ def usage_additional() do
+ [
+ ["<queue_name>", "name of the queue to delete"],
+ ["--if-empty", "delete the queue if it is empty (has no messages ready for delivery)"],
+ ["--if-unused", "delete the queue only if it has no consumers"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.queues()
+ ]
+ end
+
+ def help_section(), do: :queues
+
+ def description(), do: "Deletes a queue"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex
new file mode 100644
index 0000000000..84f00a96f4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_user_command.ex
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.DeleteUserCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :delete_user,
+ [username, Helpers.cli_acting_user()]
+ )
+ end
+
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exists"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User \"#{username}\" does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "delete_user <username>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Name of the user to delete."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Removes a user from the internal database. Has no effect on users provided by external backends such as LDAP"
+
+ def banner([arg], _), do: "Deleting user \"#{arg}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex
new file mode 100644
index 0000000000..8ff6e1f047
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/delete_vhost_command.ex
@@ -0,0 +1,41 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.DeleteVhostCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([arg], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost, :delete, [arg, Helpers.cli_acting_user()])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "delete_vhost <vhost>"
+
+ def usage_additional() do
+ [
+ ["<vhost>", "Name of the virtual host to delete."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def description(), do: "Deletes a virtual host"
+
+ def banner([arg], _), do: "Deleting vhost \"#{arg}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex
new file mode 100644
index 0000000000..6af5a79e49
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex
@@ -0,0 +1,60 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts), do: {args, opts}
+
+ def validate([], _), do: {:validation_failure, :not_enough_args}
+ def validate([_|_] = args, _) when length(args) > 1, do: {:validation_failure, :too_many_args}
+ def validate([""], _), do: {:validation_failure, {:bad_argument, "feature_flag cannot be an empty string."}}
+ def validate([_], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run(["all"], %{node: node_name}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable_all, []) do
+ # Server does not support feature flags, consider none are available.
+ # See rabbitmq/rabbitmq-cli#344 for context. MK.
+ {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported}
+ {:badrpc, _} = err -> err
+ other -> other
+ end
+ end
+
+ def run([feature_flag], %{node: node_name}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable, [String.to_atom(feature_flag)]) do
+ # Server does not support feature flags, consider none are available.
+ # See rabbitmq/rabbitmq-cli#344 for context. MK.
+ {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported}
+ {:badrpc, _} = err -> err
+ other -> other
+ end
+ end
+
+ def output({:error, :unsupported}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "This feature flag is not supported by node #{node_name}"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "enable_feature_flag <all | feature_flag>"
+
+ def usage_additional() do
+ [
+ ["<feature_flag>", "name of the feature flag to enable, or \"all\" to enable all supported flags"]
+ ]
+end
+
+ def help_section(), do: :feature_flags
+
+ def description(), do: "Enables a feature flag or all supported feature flags on the target node"
+
+ def banner(["all"], _), do: "Enabling all feature flags ..."
+
+ def banner([feature_flag], _), do: "Enabling feature flag \"#{feature_flag}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex
new file mode 100644
index 0000000000..c625b4a5f5
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex
@@ -0,0 +1,102 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def switches() do
+ [
+ cipher: :atom,
+ hash: :atom,
+ iterations: :integer
+ ]
+ end
+
+ def distribution(_), do: :none
+
+ def merge_defaults(args, opts) do
+ with_defaults = Map.merge(%{
+ cipher: :rabbit_pbe.default_cipher(),
+ hash: :rabbit_pbe.default_hash(),
+ iterations: :rabbit_pbe.default_iterations()
+ }, opts)
+ {args, with_defaults}
+ end
+
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, {:not_enough_args, "Please provide a value to decode and a passphrase."}}
+ end
+
+ def validate(args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(args, opts) when length(args) === 2 do
+ case {supports_cipher(opts.cipher), supports_hash(opts.hash), opts.iterations > 0} do
+ {false, _, _} ->
+ {:validation_failure, {:bad_argument, "The requested cipher is not supported."}}
+
+ {_, false, _} ->
+ {:validation_failure, {:bad_argument, "The requested hash is not supported"}}
+
+ {_, _, false} ->
+ {:validation_failure, {:bad_argument, "The requested number of iterations is incorrect"}}
+
+ {true, true, true} ->
+ :ok
+ end
+ end
+
+ def run([value, passphrase], %{cipher: cipher, hash: hash, iterations: iterations}) do
+ try do
+ term_value = Helpers.evaluate_input_as_term(value)
+ result = {:encrypted, _} = :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value)
+ {:ok, result}
+ catch
+ _, _ ->
+ {:error, "Error during cipher operation."}
+ end
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def banner([_, _], _) do
+ "Encrypting value ..."
+ end
+
+ def usage, do: "encode value passphrase [--cipher <cipher>] [--hash <hash>] [--iterations <iterations>]"
+
+ def usage_additional() do
+ [
+ ["<value>", "config value to encode"],
+ ["<passphrase>", "passphrase to use with the config value encryption key"],
+ ["--cipher <cipher>", "cipher suite to use"],
+ ["--hash <hash>", "hashing function to use"],
+ ["--iterations <iterations>", "number of iteration to apply"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Encrypts a sensitive configuration value"
+
+ #
+ # Implementation
+ #
+
+ defp supports_cipher(cipher), do: Enum.member?(:rabbit_pbe.supported_ciphers(), cipher)
+
+ defp supports_hash(hash), do: Enum.member?(:rabbit_pbe.supported_hashes(), hash)
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex
new file mode 100644
index 0000000000..ac807512a9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/environment_command.ex
@@ -0,0 +1,38 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.EnvironmentCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :environment, [])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "environment"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Displays the name and value of each variable in the application environment for each running application"
+
+ def banner(_, %{node: node_name}), do: "Application environment of node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex
new file mode 100644
index 0000000000..35fe0a8803
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_command.ex
@@ -0,0 +1,107 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.EvalCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ErlEval, ExitCodes, Input}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _) do
+ # value will be provided via standard input
+ :ok
+ end
+
+ def validate(["" | _], _) do
+ {:validation_failure, "Expression must not be blank"}
+ end
+
+ def validate([expr | _], _) do
+ case ErlEval.parse_expr(expr) do
+ {:ok, _} -> :ok
+ {:error, err} -> {:validation_failure, err}
+ end
+ end
+
+ def run([], %{node: node_name} = opts) do
+ case Input.consume_multiline_string() do
+ :eof -> {:error, :not_enough_args}
+ expr ->
+ case ErlEval.parse_expr(expr) do
+ {:ok, parsed} ->
+ bindings = make_bindings([], opts)
+
+ case :rabbit_misc.rpc_call(node_name, :erl_eval, :exprs, [parsed, bindings]) do
+ {:value, value, _} -> {:ok, value}
+ err -> err
+ end
+
+ {:error, msg} -> {:error, msg}
+ end
+ end
+ end
+ def run([expr | arguments], %{node: node_name} = opts) do
+ case ErlEval.parse_expr(expr) do
+ {:ok, parsed} ->
+ bindings = make_bindings(arguments, opts)
+
+ case :rabbit_misc.rpc_call(node_name, :erl_eval, :exprs, [parsed, bindings]) do
+ {:value, value, _} -> {:ok, value}
+ err -> err
+ end
+
+ {:error, msg} -> {:error, msg}
+ end
+ end
+
+ def output({:error, :not_enough_args}, _) do
+ {:error, ExitCodes.exit_dataerr(), "Expression to evaluate is not provided via argument or stdin"}
+ end
+ def output({:error, msg}, _) do
+ {:error, ExitCodes.exit_dataerr(), "Evaluation failed: #{msg}"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "eval <expression>"
+
+ def usage_additional() do
+ [
+ ["<expression>", "Expression to evaluate"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.cli(),
+ DocGuide.monitoring(),
+ DocGuide.troubleshooting()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Evaluates a snippet of Erlang code on the target node"
+
+ def banner(_, _), do: nil
+
+ #
+ # Implementation
+ #
+
+ defp make_bindings(args, opts) do
+ Enum.with_index(args, 1)
+ |> Enum.map(fn {val, index} -> {String.to_atom("_#{index}"), val} end)
+ |> Enum.concat(option_bindings(opts))
+ end
+
+ defp option_bindings(opts) do
+ Enum.to_list(opts)
+ |> Enum.map(fn {key, val} -> {String.to_atom("_#{key}"), val} end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex
new file mode 100644
index 0000000000..6f46abbf17
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/eval_file_command.ex
@@ -0,0 +1,75 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.EvalFileCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ErlEval, ExitCodes}
+ alias RabbitMQ.CLI.Ctl.Commands.EvalCommand
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(["" | _], _) do
+ {:validation_failure, "File path must not be blank"}
+ end
+
+ def validate([file_path], _) do
+ case File.read(file_path) do
+ {:ok, expr} ->
+ case ErlEval.parse_expr(expr) do
+ {:ok, _} -> :ok
+ {:error, err} -> {:validation_failure, err}
+ end
+ {:error, :enoent} ->
+ {:validation_failure, "File #{file_path} does not exist"}
+ {:error, :eacces} ->
+ {:validation_failure, "Insufficient permissions to read file #{file_path}"}
+ {:error, err} ->
+ {:validation_failure, err}
+ end
+ end
+
+ def run([file_path], opts) do
+ case File.read(file_path) do
+ {:ok, expr} -> EvalCommand.run([expr], opts)
+ {:error, err} -> {:error, err}
+ end
+
+ end
+
+ def output({:error, msg}, _) do
+ {:error, ExitCodes.exit_dataerr(), "Evaluation failed: #{msg}"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "eval_file <file path>"
+
+ def usage_additional() do
+ [
+ ["<file path>", "Path to the file to evaluate"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.cli(),
+ DocGuide.monitoring(),
+ DocGuide.troubleshooting()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Evaluates a file that contains a snippet of Erlang code on the target node"
+
+ def banner(_, _), do: nil
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex
new file mode 100644
index 0000000000..469047c1af
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/exec_command.ex
@@ -0,0 +1,80 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ExecCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def switches(), do: [offline: :boolean]
+
+ def distribution(%{offline: true}), do: :none
+ def distribution(%{}), do: :cli
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([""], _) do
+ {:validation_failure, "Expression must not be blank"}
+ end
+
+ def validate([string], _) do
+ try do
+ Code.compile_string(string)
+ :ok
+ rescue
+ ex in SyntaxError ->
+ {:validation_failure, "SyntaxError: " <> Exception.message(ex)}
+
+ _ ->
+ :ok
+ end
+ end
+
+ def run([expr], %{} = opts) do
+ try do
+ {val, _} = Code.eval_string(expr, [options: opts], __ENV__)
+ {:ok, val}
+ rescue
+ ex ->
+ {:error, Exception.message(ex)}
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Inspect
+
+ def banner(_, _), do: nil
+
+ def usage, do: "exec <expression> [--offline]"
+
+ def usage_additional() do
+ [
+ ["<expression>", "Expression to evaluate"],
+ ["--offline", "disable inter-node communication"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.cli(),
+ DocGuide.monitoring(),
+ DocGuide.troubleshooting()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Evaluates a snippet of Elixir code on the CLI node"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex
new file mode 100644
index 0000000000..e4b026f160
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/export_definitions_command.ex
@@ -0,0 +1,143 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ExportDefinitionsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(["-"] = args, opts) do
+ {args, Map.merge(%{format: "json", silent: true}, Helpers.case_insensitive_format(opts))}
+ end
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{format: "json"}, Helpers.case_insensitive_format(opts))}
+ end
+
+ def switches(), do: [timeout: :integer, format: :string]
+ def aliases(), do: [t: :timeout]
+
+ def validate(_, %{format: format})
+ when format != "json" and format != "JSON" and format != "erlang" do
+ {:validation_failure, {:bad_argument, "Format should be either json or erlang"}}
+ end
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+ # output to stdout
+ def validate(["-"], _) do
+ :ok
+ end
+ def validate([path], _) do
+ dir = Path.dirname(path)
+ case File.exists?(dir, [raw: true]) do
+ true -> :ok
+ false -> {:validation_failure, {:bad_argument, "Directory #{dir} does not exist"}}
+ end
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run(["-"], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_definitions, :all_definitions, [], timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ result -> {:ok, result}
+ end
+ end
+ def run([path], %{node: node_name, timeout: timeout, format: format}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_definitions, :all_definitions, [], timeout) do
+ {:badrpc, _} = err -> err
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ result ->
+ # write to the file in run/2 because output/2 is not meant to
+ # produce side effects
+ body = serialise(result, format)
+ abs_path = Path.absname(path)
+
+ File.rm(abs_path)
+ case File.write(abs_path, body) do
+ # no output
+ :ok -> {:ok, nil}
+ {:error, :enoent} ->
+ {:error, ExitCodes.exit_dataerr(), "Parent directory or file #{path} does not exist"}
+ {:error, :enotdir} ->
+ {:error, ExitCodes.exit_dataerr(), "Parent directory of file #{path} is not a directory"}
+ {:error, :enospc} ->
+ {:error, ExitCodes.exit_dataerr(), "No space left on device hosting #{path}"}
+ {:error, :eacces} ->
+ {:error, ExitCodes.exit_dataerr(), "No permissions to write to file #{path} or its parent directory"}
+ {:error, :eisdir} ->
+ {:error, ExitCodes.exit_dataerr(), "Path #{path} is a directory"}
+ {:error, err} ->
+ {:error, ExitCodes.exit_dataerr(), "Could not write to file #{path}: #{err}"}
+ end
+ end
+ end
+
+ def output({:ok, nil}, _) do
+ {:ok, nil}
+ end
+ def output({:ok, result}, %{format: "json"}) when is_map(result) do
+ {:ok, serialise(result, "json")}
+ end
+ def output({:ok, result}, %{format: "erlang"}) when is_map(result) do
+ {:ok, serialise(result, "erlang")}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def printer(), do: RabbitMQ.CLI.Printers.StdIORaw
+
+ def usage, do: "export_definitions <file_path | \"-\"> [--format <json | erlang>]"
+
+ def usage_additional() do
+ [
+ ["<file>", "Local file path to export to. Pass a dash (-) for stdout."],
+ ["--format", "output format to use: json or erlang"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.definitions()
+ ]
+ end
+
+ def help_section(), do: :definitions
+
+ def description(), do: "Exports definitions in JSON or compressed Erlang Term Format."
+
+ def banner([path], %{format: fmt}), do: "Exporting definitions in #{human_friendly_format(fmt)} to a file at \"#{path}\" ..."
+
+ #
+ # Implementation
+ #
+
+ defp serialise(raw_map, "json") do
+ # make sure all runtime parameter values are maps, otherwise
+ # they will end up being a list of pairs (a keyword list/proplist)
+ # in the resulting JSON document
+ map = Map.update!(raw_map, :parameters, fn(params) ->
+ Enum.map(params, fn(param) ->
+ Map.update!(param, "value", &:rabbit_data_coercion.to_map/1)
+ end)
+ end)
+ {:ok, json} = JSON.encode(map)
+ json
+ end
+
+ defp serialise(map, "erlang") do
+ :erlang.term_to_binary(map, [{:compressed, 9}])
+ end
+
+ defp human_friendly_format("JSON"), do: "JSON"
+ defp human_friendly_format("json"), do: "JSON"
+ defp human_friendly_format("erlang"), do: "Erlang term format"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex
new file mode 100644
index 0000000000..261f86c6c1
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_boot_command.ex
@@ -0,0 +1,57 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ForceBootCommand do
+ alias RabbitMQ.CLI.Core.{Config, DocGuide}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ ##
+ def validate_execution_environment(args, opts) do
+ ## We don't use RequiresRabbitAppStopped helper because we don't want to fail
+ ## the validation if the node is not running.
+ case RabbitMQ.CLI.Core.Validators.rabbit_is_not_running(args, opts) do
+ :ok -> :ok
+ {:validation_failure, _} = failure -> failure
+ _other -> RabbitMQ.CLI.Core.Validators.node_is_not_running(args, opts)
+ end
+ end
+
+ def run([], %{node: node_name} = opts) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :force_load_next_boot, []) do
+ {:badrpc, :nodedown} ->
+ case Config.get_option(:mnesia_dir, opts) do
+ nil ->
+ {:error, :mnesia_dir_not_found}
+
+ dir ->
+ File.write(Path.join(dir, "force_load"), "")
+ end
+
+ _ ->
+ :ok
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "force_boot"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Forces node to start even if it cannot contact or rejoin any of its previously known peers"
+
+ def banner(_, _), do: nil
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex
new file mode 100644
index 0000000000..975154be50
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_gc_command.ex
@@ -0,0 +1,35 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ForceGcCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_runtime, :gc_all_processes, [], timeout)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "force_gc"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.memory_use()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description, do: "Makes all Erlang processes on the target node perform/schedule a full sweep garbage collection"
+
+ def banner([], %{node: node_name}), do: "Will ask all processes on node #{node_name} to schedule a full sweep GC"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex
new file mode 100644
index 0000000000..5f202f9d08
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/force_reset_command.ex
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ForceResetCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppStopped
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :force_reset, [])
+ end
+
+ def output({:error, :mnesia_unexpectedly_running}, %{node: node_name}) do
+ {:error, ExitCodes.exit_software(),
+ RabbitMQ.CLI.DefaultOutput.mnesia_running_error(node_name)}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "force_reset"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Forcefully returns a RabbitMQ node to its virgin state"
+
+ def banner(_, %{node: node_name}), do: "Forcefully resetting node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex
new file mode 100644
index 0000000000..cdf5ae7fbe
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/forget_cluster_node_command.ex
@@ -0,0 +1,122 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ForgetClusterNodeCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Distribution, Validators}
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [offline: :boolean]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{offline: false}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+
+ def validate_execution_environment([_node_to_remove] = args, %{offline: true} = opts) do
+ Validators.chain(
+ [
+ &Validators.node_is_not_running/2,
+ &Validators.mnesia_dir_is_set/2,
+ &Validators.feature_flags_file_is_set/2,
+ &Validators.rabbit_is_loaded/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def validate_execution_environment([_], %{offline: false}) do
+ :ok
+ end
+
+ def run([node_to_remove], %{node: node_name, offline: true} = opts) do
+ Stream.concat([
+ become(node_name, opts),
+ RabbitMQ.CLI.Core.Helpers.defer(fn ->
+ :rabbit_event.start_link()
+ :rabbit_mnesia.forget_cluster_node(to_atom(node_to_remove), true)
+ end)
+ ])
+ end
+
+ def run([node_to_remove], %{node: node_name, offline: false}) do
+ atom_name = to_atom(node_to_remove)
+ args = [atom_name, false]
+ case :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :forget_cluster_node, args) do
+ {:error, {:failed_to_remove_node, ^atom_name, {:active, _, _}}} ->
+ {:error, "RabbitMQ on node #{node_to_remove} must be stopped with 'rabbitmqctl -n #{node_to_remove} stop_app' before it can be removed"};
+ {:error, _} = error -> error;
+ {:badrpc, _} = error -> error;
+ :ok ->
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :shrink_all, [atom_name]) do
+ {:error, _} ->
+ {:error, "RabbitMQ failed to shrink some of the quorum queues on node #{node_to_remove}"};
+ _ -> :ok
+ end
+ other -> other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "forget_cluster_node [--offline] <existing_cluster_member_node>"
+ end
+
+ def usage_additional() do
+ [
+ ["--offline", "try to update cluster membership state directly. Use when target node is stopped. Only works for local nodes."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering(),
+ DocGuide.cluster_formation()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Removes a node from the cluster"
+
+ def banner([node_to_remove], %{offline: true}) do
+ "Removing node #{node_to_remove} from the cluster. Warning: quorum queues cannot be shrunk in offline mode"
+ end
+ def banner([node_to_remove], _) do
+ "Removing node #{node_to_remove} from the cluster"
+ end
+
+ #
+ # Implementation
+ #
+
+ defp become(node_name, opts) do
+ :error_logger.tty(false)
+
+ case :net_adm.ping(node_name) do
+ :pong ->
+ exit({:node_running, node_name})
+
+ :pang ->
+ :ok = :net_kernel.stop()
+
+ Stream.concat([
+ [" * Impersonating node: #{node_name}..."],
+ RabbitMQ.CLI.Core.Helpers.defer(fn ->
+ {:ok, _} = Distribution.start_as(node_name, opts)
+ " done"
+ end),
+ RabbitMQ.CLI.Core.Helpers.defer(fn ->
+ dir = :mnesia.system_info(:directory)
+ " * Mnesia directory: #{dir}..."
+ end)
+ ])
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex
new file mode 100644
index 0000000000..8f459cc83f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/help_command.ex
@@ -0,0 +1,343 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+alias RabbitMQ.CLI.CommandBehaviour
+
+defmodule RabbitMQ.CLI.Ctl.Commands.HelpCommand do
+ alias RabbitMQ.CLI.Core.{CommandModules, Config, ExitCodes}
+ alias RabbitMQ.CLI.Core.CommandModules
+
+ import RabbitMQ.CLI.Core.ANSI
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics, :plugins, :queues, :upgrade]
+ def switches(), do: [list_commands: :boolean]
+
+ def distribution(_), do: :none
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _), do: :ok
+ def validate([_command], _), do: :ok
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def run([command_name | _], opts) do
+ CommandModules.load(opts)
+
+ module_map = CommandModules.module_map()
+ case module_map[command_name] do
+ nil ->
+ # command not found
+ # {:error, all_usage(opts)}
+ case RabbitMQ.CLI.AutoComplete.suggest_command(command_name, module_map) do
+ {:suggest, suggested} ->
+ suggest_message = "\nCommand '#{command_name}' not found. \n" <>
+ "Did you mean '#{suggested}'? \n"
+ {:error, ExitCodes.exit_usage(), suggest_message}
+ nil ->
+ {:error, ExitCodes.exit_usage(), "\nCommand '#{command_name}' not found."}
+ end
+
+ command ->
+ {:ok, command_usage(command, opts)}
+ end
+ end
+
+ def run([], opts) do
+ CommandModules.load(opts)
+
+ case opts[:list_commands] do
+ true ->
+ {:ok, commands_description()}
+ _ ->
+ {:ok, all_usage(opts)}
+ end
+ end
+
+ def output({:ok, result}, _) do
+ {:ok, result}
+ end
+ def output({:error, result}, _) do
+ {:error, ExitCodes.exit_usage(), result}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner(_, _), do: nil
+
+ def help_section(), do: :help
+
+ def description(), do: "Displays usage information for a command"
+
+ def usage(), do: "help (<command> | [--list-commands])"
+
+ def usage_additional() do
+ [
+ ["--list-commands", "only output a list of discovered commands"]
+ ]
+ end
+
+
+ #
+ # Implementation
+ #
+
+ def all_usage(opts) do
+ tool_name = program_name(opts)
+ tool_usage(tool_name) ++
+ ["\n\nAvailable commands:\n"] ++ commands_description() ++
+ help_footer(tool_name)
+ end
+
+ def command_usage(command, opts) do
+ Enum.join([base_usage(command, opts)] ++
+ command_description(command) ++
+ additional_usage(command) ++
+ relevant_doc_guides(command) ++
+ general_options_usage(),
+ "\n\n") <> "\n"
+ end
+
+ defp tool_usage(tool_name) do
+ [
+ "\n#{bright("Usage")}\n\n" <>
+ "#{tool_name} [--node <node>] [--timeout <timeout>] [--longnames] [--quiet] <command> [<command options>]"
+ ]
+ end
+
+ def base_usage(command, opts) do
+ tool_name = program_name(opts)
+
+ maybe_timeout =
+ case command_supports_timeout(command) do
+ true -> " [--timeout <timeout>]"
+ false -> ""
+ end
+
+ Enum.join([
+ "\n#{bright("Usage")}\n\n",
+ "#{tool_name} [--node <node>] [--longnames] [--quiet] " <>
+ flatten_string(command.usage(), maybe_timeout)
+ ])
+ end
+
+ defp flatten_string(list, additional) when is_list(list) do
+ list
+ |> Enum.map(fn line -> line <> additional end)
+ |> Enum.join("\n")
+ end
+
+ defp flatten_string(str, additional) when is_binary(str) do
+ str <> additional
+ end
+
+ defp general_options_usage() do
+ [
+ "#{bright("General Options")}
+
+The following options are accepted by most or all commands.
+
+short | long | description
+-----------------|---------------|--------------------------------
+-? | --help | displays command help
+-n <node> | --node <node> | connect to node <node>
+-l | --longnames | use long host names
+-t | --timeout <n> | for commands that support it, operation timeout in seconds
+-q | --quiet | suppress informational messages
+-s | --silent | suppress informational messages
+ | and table header row
+-p | --vhost | for commands that are scoped to a virtual host,
+ | | virtual host to use
+ | --formatter | alternative result formatter to use
+ | if supported: json, pretty_table, table, csv, erlang
+ not all commands support all (or any) alternative formatters."]
+ end
+
+ defp command_description(command) do
+ case CommandBehaviour.description(command) do
+ "" -> []
+ other -> [other <> ".\n"]
+ end
+ end
+
+ defp list_item_formatter([option, description]) do
+ "#{option}\n\t#{description}\n"
+ end
+ defp list_item_formatter({option, description}) do
+ "#{option}\n\t#{description}\n"
+ end
+ defp list_item_formatter(line) do
+ "#{line}\n"
+ end
+
+ defp additional_usage(command) do
+ command_usage =
+ case CommandBehaviour.usage_additional(command) do
+ list when is_list(list) -> list |> Enum.map(&list_item_formatter/1)
+ bin when is_binary(bin) -> ["#{bin}\n"]
+ end
+ case command_usage do
+ [] -> []
+ usage ->
+ [flatten_string(["#{bright("Arguments and Options")}\n" | usage], "")]
+ end
+ end
+
+ defp relevant_doc_guides(command) do
+ guide_list =
+ case CommandBehaviour.usage_doc_guides(command) do
+ list when is_list(list) -> list |> Enum.map(fn ln -> " * #{ln}\n" end)
+ bin when is_binary(bin) -> [" * #{bin}\n"]
+ end
+ case guide_list do
+ [] -> []
+ usage ->
+ [flatten_string(["#{bright("Relevant Doc Guides")}\n" | usage], "")]
+ end
+ end
+
+ defp help_footer(tool_name) do
+ ["Use '#{tool_name} help <command>' to learn more about a specific command"]
+ end
+
+ defp command_supports_timeout(command) do
+ nil != CommandBehaviour.switches(command)[:timeout]
+ end
+
+ defp commands_description() do
+ module_map = CommandModules.module_map()
+
+ pad_commands_to = Enum.reduce(module_map, 0,
+ fn({name, _}, longest) ->
+ name_length = String.length(name)
+ case name_length > longest do
+ true -> name_length
+ false -> longest
+ end
+ end)
+
+ lines = module_map
+ |> Enum.map(
+ fn({name, cmd}) ->
+ description = CommandBehaviour.description(cmd)
+ help_section = CommandBehaviour.help_section(cmd)
+ {name, {description, help_section}}
+ end)
+ |> Enum.group_by(fn({_, {_, help_section}}) -> help_section end)
+ |> Enum.sort_by(
+ fn({help_section, _}) ->
+ case help_section do
+ :deprecated -> 999
+ :other -> 100
+ {:plugin, _} -> 99
+ :help -> 1
+ :node_management -> 2
+ :cluster_management -> 3
+ :replication -> 3
+ :user_management -> 4
+ :access_control -> 5
+ :observability_and_health_checks -> 6
+ :parameters -> 7
+ :policies -> 8
+ :virtual_hosts -> 9
+ _ -> 98
+ end
+ end)
+ |> Enum.map(
+ fn({help_section, section_helps}) ->
+ [
+ "\n" <> bright(section_head(help_section)) <> ":\n\n" |
+ Enum.sort(section_helps)
+ |> Enum.map(
+ fn({name, {description, _}}) ->
+ " #{String.pad_trailing(name, pad_commands_to)} #{description}\n"
+ end)
+ ]
+
+ end)
+ |> Enum.concat()
+
+ lines ++ ["\n"]
+ end
+
+ defp section_head(help_section) do
+ case help_section do
+ :help ->
+ "Help"
+ :user_management ->
+ "Users"
+ :cluster_management ->
+ "Cluster"
+ :replication ->
+ "Replication"
+ :node_management ->
+ "Nodes"
+ :queues ->
+ "Queues"
+ :observability_and_health_checks ->
+ "Monitoring, observability and health checks"
+ :virtual_hosts ->
+ "Virtual hosts"
+ :access_control ->
+ "Access Control"
+ :parameters ->
+ "Parameters"
+ :policies ->
+ "Policies"
+ :configuration ->
+ "Configuration and Environment"
+ :feature_flags ->
+ "Feature flags"
+ :other ->
+ "Other"
+ {:plugin, plugin} ->
+ plugin_section(plugin) <> " plugin"
+ custom ->
+ snake_case_to_capitalized_string(custom)
+ end
+ end
+
+ defp strip_rabbitmq_prefix(value, regex) do
+ Regex.replace(regex, value, "")
+ end
+
+ defp format_known_plugin_name_fragments(value) do
+ case value do
+ ["amqp1.0"] -> "AMQP 1.0"
+ ["amqp1", "0"] -> "AMQP 1.0"
+ ["management"] -> "Management"
+ ["management", "agent"] -> "Management"
+ ["mqtt"] -> "MQTT"
+ ["stomp"] -> "STOMP"
+ ["web", "mqtt"] -> "Web MQTT"
+ ["web", "stomp"] -> "Web STOMP"
+ [other] -> snake_case_to_capitalized_string(other)
+ fragments -> snake_case_to_capitalized_string(Enum.join(fragments, "_"))
+ end
+ end
+
+ defp plugin_section(plugin) do
+ regex = Regex.recompile!(~r/^rabbitmq_/)
+
+ to_string(plugin)
+ # drop rabbitmq_
+ |> strip_rabbitmq_prefix(regex)
+ |> String.split("_")
+ |> format_known_plugin_name_fragments()
+ end
+
+ defp snake_case_to_capitalized_string(value) do
+ to_string(value)
+ |> String.split("_")
+ |> Enum.map(&String.capitalize/1)
+ |> Enum.join(" ")
+ end
+
+ defp program_name(opts) do
+ Config.get_option(:script_name, opts)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex
new file mode 100644
index 0000000000..13f3468cb6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex
@@ -0,0 +1,98 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.HipeCompileCommand do
+ @moduledoc """
+ HiPE support has been deprecated since Erlang/OTP 22 (mid-2019) and
+ won't be a part of Erlang/OTP 24.
+
+ Therefore this command is DEPRECATED and is no-op.
+ """
+
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ import RabbitMQ.CLI.Core.CodePath
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ #
+ # API
+ #
+
+ def distribution(_), do: :none
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _), do: {:validation_failure, :not_enough_args}
+
+ def validate([target_dir], opts) do
+ :ok
+ |> Validators.validate_step(fn ->
+ case acceptable_path?(target_dir) do
+ true -> :ok
+ false -> {:error, {:bad_argument, "Target directory path cannot be blank"}}
+ end
+ end)
+ |> Validators.validate_step(fn ->
+ case File.dir?(target_dir) do
+ true ->
+ :ok
+
+ false ->
+ case File.mkdir_p(target_dir) do
+ :ok ->
+ :ok
+
+ {:error, perm} when perm == :eperm or perm == :eacces ->
+ {:error,
+ {:bad_argument,
+ "Cannot create target directory #{target_dir}: insufficient permissions"}}
+ end
+ end
+ end)
+ |> Validators.validate_step(fn -> require_rabbit(opts) end)
+ end
+
+ def validate(_, _), do: {:validation_failure, :too_many_args}
+
+ def run([_target_dir], _opts) do
+ :ok
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "hipe_compile <directory>"
+
+ def usage_additional do
+ [
+ ["<directory>", "Target directory for HiPE-compiled modules"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration(),
+ DocGuide.erlang_versions()
+ ]
+ end
+
+ def help_section(), do: :deprecated
+
+ def description() do
+ "DEPRECATED. This command is a no-op. HiPE is no longer supported by modern Erlang versions"
+ end
+
+ def banner([_target_dir], _) do
+ "This command is a no-op. HiPE is no longer supported by modern Erlang versions"
+ end
+
+ #
+ # Implementation
+ #
+
+ # Accepts any non-blank path
+ defp acceptable_path?(value) do
+ String.length(String.trim(value)) != 0
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex
new file mode 100644
index 0000000000..45ca0074f3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/import_definitions_command.ex
@@ -0,0 +1,136 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ImportDefinitionsCommand do
+ alias RabbitMQ.CLI.Core.{Config, DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(["-"] = args, opts) do
+ {args, Map.merge(%{format: "json", silent: true}, Helpers.case_insensitive_format(opts))}
+ end
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{format: "json"}, Helpers.case_insensitive_format(opts))}
+ end
+
+ def switches(), do: [timeout: :integer, format: :string]
+ def aliases(), do: [t: :timeout]
+
+ def validate(_, %{format: format})
+ when format != "json" and format != "JSON" and format != "erlang" do
+ {:validation_failure, {:bad_argument, "Format should be either json or erlang"}}
+ end
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate([path], _) do
+ case File.exists?(path, [raw: true]) do
+ true -> :ok
+ false -> {:validation_failure, {:bad_argument, "File #{path} does not exist"}}
+ end
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, format: format, timeout: timeout}) do
+ case IO.read(:stdio, :all) do
+ :eof -> {:error, :not_enough_args}
+ bin ->
+ case deserialise(bin, format) do
+ {:error, error} ->
+ {:error, ExitCodes.exit_dataerr(), "Failed to deserialise input (format: #{human_friendly_format(format)}) (error: #{inspect(error)})"}
+ {:ok, map} ->
+ :rabbit_misc.rpc_call(node_name, :rabbit_definitions, :import_parsed, [map], timeout)
+ end
+ end
+ end
+ def run([path], %{node: node_name, timeout: timeout, format: format}) do
+ abs_path = Path.absname(path)
+
+ case File.read(abs_path) do
+ {:ok, ""} ->
+ {:error, ExitCodes.exit_dataerr(), "File #{path} is zero-sized"}
+ {:ok, bin} ->
+ case deserialise(bin, format) do
+ {:error, error} ->
+ {:error, ExitCodes.exit_dataerr(), "Failed to deserialise input (format: #{human_friendly_format(format)}) (error: #{inspect(error)})"}
+ {:ok, map} ->
+ :rabbit_misc.rpc_call(node_name, :rabbit_definitions, :import_parsed, [map], timeout)
+ end
+ {:error, :enoent} ->
+ {:error, ExitCodes.exit_dataerr(), "Parent directory or file #{path} does not exist"}
+ {:error, :enotdir} ->
+ {:error, ExitCodes.exit_dataerr(), "Parent directory of file #{path} is not a directory"}
+ {:error, :eacces} ->
+ {:error, ExitCodes.exit_dataerr(), "No permissions to read from file #{path} or its parent directory"}
+ {:error, :eisdir} ->
+ {:error, ExitCodes.exit_dataerr(), "Path #{path} is a directory"}
+ {:error, err} ->
+ {:error, ExitCodes.exit_dataerr(), "Could not read from file #{path}: #{err}"}
+ end
+ end
+
+ def output(:ok, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name}}
+ end
+ def output(:ok, opts) do
+ case Config.output_less?(opts) do
+ true -> :ok
+ false -> {:ok, "Successfully started definition import. " <>
+ "This process is asynchronous and can take some time.\n"}
+ end
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def printer(), do: RabbitMQ.CLI.Printers.StdIORaw
+
+ def usage, do: "import_definitions <file_path | \"-\"> [--format <json | erlang>]"
+
+ def usage_additional() do
+ [
+ ["[file]", "Local file path to import from. If omitted will be read from standard input."],
+ ["--format", "input format to use: json or erlang"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.definitions()
+ ]
+ end
+
+ def help_section(), do: :definitions
+
+ def description(), do: "Imports definitions in JSON or compressed Erlang Term Format."
+
+ def banner([], %{format: fmt}) do
+ "Importing definitions in #{human_friendly_format(fmt)} from standard input ..."
+ end
+ def banner([path], %{format: fmt}) do
+ "Importing definitions in #{human_friendly_format(fmt)} from a file at \"#{path}\" ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp deserialise(bin, "json") do
+ JSON.decode(bin)
+ end
+
+ defp deserialise(bin, "erlang") do
+ try do
+ {:ok, :erlang.binary_to_term(bin)}
+ rescue e in ArgumentError ->
+ {:error, e.message}
+ end
+ end
+
+ defp human_friendly_format("JSON"), do: "JSON"
+ defp human_friendly_format("json"), do: "JSON"
+ defp human_friendly_format("erlang"), do: "Erlang term format"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex
new file mode 100644
index 0000000000..765fbd43f1
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/join_cluster_command.ex
@@ -0,0 +1,95 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.JoinClusterCommand do
+ alias RabbitMQ.CLI.Core.{Config, DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches() do
+ [
+ disc: :boolean,
+ ram: :boolean
+ ]
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{disc: false, ram: false}, opts)}
+ end
+
+ def validate(_, %{disc: true, ram: true}) do
+ {:validation_failure, {:bad_argument, "The node type must be either disc or ram."}}
+ end
+
+ def validate([], _), do: {:validation_failure, :not_enough_args}
+ def validate([_], _), do: :ok
+ def validate(_, _), do: {:validation_failure, :too_many_args}
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppStopped
+
+ def run([target_node], %{node: node_name, ram: ram, disc: disc} = opts) do
+ node_type =
+ case {ram, disc} do
+ {true, false} -> :ram
+ {false, true} -> :disc
+ ## disc is default
+ {false, false} -> :disc
+ end
+
+ long_or_short_names = Config.get_option(:longnames, opts)
+ target_node_normalised = Helpers.normalise_node(target_node, long_or_short_names)
+
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_mnesia,
+ :join_cluster,
+ [target_node_normalised, node_type]
+ )
+ end
+
+ def output({:ok, :already_member}, _) do
+ {:ok, "The node is already a member of this cluster"}
+ end
+
+ def output({:error, :mnesia_unexpectedly_running}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ RabbitMQ.CLI.DefaultOutput.mnesia_running_error(node_name)}
+ end
+
+ def output({:error, :cannot_cluster_node_with_itself}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: cannot cluster node with itself: #{node_name}"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([target_node], %{node: node_name}) do
+ "Clustering node #{node_name} with #{target_node}"
+ end
+
+ def usage() do
+ "join_cluster [--disc|--ram] <existing_cluster_member>"
+ end
+
+ def usage_additional() do
+ [
+ ["<existing_cluster_member>", "Existing cluster member (node) to join"],
+ ["--disc", "new node should be a disk one (stores its schema on disk). Highly recommended, used by default."],
+ ["--ram", "new node should be a RAM one (stores schema in RAM). Not recommended. Consult clustering doc guides first."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering(),
+ DocGuide.cluster_formation()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Instructs the node to become a member of the cluster that the specified node is in"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex
new file mode 100644
index 0000000000..19e8844089
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_bindings_command.ex
@@ -0,0 +1,74 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListBindingsCommand do
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @info_keys ~w(source_name source_kind destination_name destination_kind routing_key arguments)a
+
+ def info_keys(), do: @info_keys
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults([], opts) do
+ merge_defaults(
+ ~w(source_name source_kind
+ destination_name destination_kind
+ routing_key arguments),
+ opts
+ )
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout, vhost: vhost}) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+
+ RpcStream.receive_list_items(
+ node_name,
+ :rabbit_binding,
+ :info_all,
+ [vhost, info_keys],
+ timeout,
+ info_keys
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage() do
+ "list_bindings [--vhost <vhost>] [--no-table-headers] [<column> ...]"
+ end
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists all bindings on a vhost"
+
+ def banner(_, %{vhost: vhost}), do: "Listing bindings for vhost #{vhost}..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex
new file mode 100644
index 0000000000..5ae7450da1
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex
@@ -0,0 +1,85 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListChannelsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ @info_keys ~w(pid connection name number user vhost transactional
+ confirm consumer_count messages_unacknowledged
+ messages_uncommitted acks_uncommitted messages_unconfirmed
+ prefetch_count global_prefetch_count)a
+
+ def info_keys(), do: @info_keys
+
+ def merge_defaults([], opts) do
+ merge_defaults(~w(pid user consumer_count messages_unacknowledged), opts)
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], opts) do
+ run(~w(pid user consumer_count messages_unacknowledged), opts)
+ end
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout}) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+
+ Helpers.with_nodes_in_cluster(node_name, fn nodes ->
+ RpcStream.receive_list_items(
+ node_name,
+ :rabbit_channel,
+ :emit_info_all,
+ [nodes, info_keys],
+ timeout,
+ info_keys,
+ Kernel.length(nodes)
+ )
+ end)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def banner(_, _), do: "Listing channels ..."
+
+ def usage() do
+ "list_channels [--no-table-headers] [<column> ...]"
+ end
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.channels()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists all channels in the node"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex
new file mode 100644
index 0000000000..eb7075d261
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_ciphers_command.ex
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListCiphersCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def distribution(_), do: :none
+
+ def run(_, _) do
+ {:ok, :rabbit_pbe.supported_ciphers()}
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "list_ciphers"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration(),
+ DocGuide.tls()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists cipher suites supported by encoding commands"
+
+ def banner(_, _), do: "Listing supported ciphers ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex
new file mode 100644
index 0000000000..0e28272ea8
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex
@@ -0,0 +1,84 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListConnectionsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ @info_keys ~w(pid name port host peer_port peer_host ssl ssl_protocol
+ ssl_key_exchange ssl_cipher ssl_hash peer_cert_subject
+ peer_cert_issuer peer_cert_validity state
+ channels protocol auth_mechanism user vhost timeout frame_max
+ channel_max client_properties recv_oct recv_cnt send_oct
+ send_cnt send_pend connected_at)a
+
+ def info_keys(), do: @info_keys
+
+ def merge_defaults([], opts) do
+ merge_defaults(~w(user peer_host peer_port state), opts)
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout}) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+
+ Helpers.with_nodes_in_cluster(node_name, fn nodes ->
+ RpcStream.receive_list_items(
+ node_name,
+ :rabbit_networking,
+ :emit_connection_info_all,
+ [nodes, info_keys],
+ timeout,
+ info_keys,
+ Kernel.length(nodes)
+ )
+ end)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage() do
+ "list_connections [--no-table-headers] [<column> ...]"
+ end
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.connections()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists AMQP 0.9.1 connections for the node"
+
+ def banner(_, _), do: "Listing connections ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex
new file mode 100644
index 0000000000..90c587cbe8
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_consumers_command.ex
@@ -0,0 +1,108 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListConsumersCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ @info_keys ~w(queue_name channel_pid consumer_tag
+ ack_required prefetch_count active activity_status arguments)a
+
+ def info_keys(), do: @info_keys
+
+ def merge_defaults([], opts) do
+ {Enum.map(@info_keys -- [:activity_status], &Atom.to_string/1),
+ Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout, vhost: vhost}) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+
+ Helpers.with_nodes_in_cluster(node_name, fn nodes ->
+ RpcStream.receive_list_items_with_fun(
+ node_name,
+ [{:rabbit_amqqueue,
+ :emit_consumers_all,
+ [nodes, vhost]}],
+ timeout,
+ info_keys,
+ Kernel.length(nodes),
+ fn item -> fill_consumer_active_fields(item) end
+ )
+ end)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage() do
+ "list_consumers [--vhost <vhost>] [--no-table-headers] [<column> ...]"
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists all consumers for a vhost"
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.consumers()
+ ]
+ end
+
+ def banner(_, %{vhost: vhost}), do: "Listing consumers in vhost #{vhost} ..."
+
+ #
+ # Implementation
+ #
+
+ # add missing fields if response comes from node < 3.8
+ def fill_consumer_active_fields({[], {chunk, :continue}}) do
+ {[], {chunk, :continue}}
+ end
+
+ def fill_consumer_active_fields({items, {chunk, :continue}}) do
+ {Enum.map(items, fn item ->
+ case Keyword.has_key?(item, :active) do
+ true ->
+ item
+ false ->
+ Keyword.drop(item, [:arguments])
+ ++ [active: true, activity_status: :up]
+ ++ [arguments: Keyword.get(item, :arguments, [])]
+ end
+ end), {chunk, :continue}}
+ end
+
+ def fill_consumer_active_fields(v) do
+ v
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex
new file mode 100644
index 0000000000..a3b8b3521b
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_exchanges_command.ex
@@ -0,0 +1,67 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListExchangesCommand do
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @info_keys ~w(name type durable auto_delete internal arguments policy)a
+
+ def info_keys(), do: @info_keys
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults([], opts) do
+ merge_defaults(~w(name type), opts)
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout, vhost: vhost}) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+
+ RpcStream.receive_list_items(
+ node_name,
+ :rabbit_exchange,
+ :info_all,
+ [vhost, info_keys],
+ timeout,
+ info_keys
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage(), do: "list_exchanges [--vhost <vhost>] [--no-table-headers] [<column> ...]"
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists exchanges"
+
+ def banner(_, %{vhost: vhost}), do: "Listing exchanges for vhost #{vhost} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex
new file mode 100644
index 0000000000..46b4bc82c2
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_feature_flags_command.ex
@@ -0,0 +1,94 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListFeatureFlagsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ alias RabbitMQ.CLI.Ctl.InfoKeys
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ @info_keys ~w(name state stability provided_by desc doc_url)a
+
+ def info_keys(), do: @info_keys
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults([], opts), do: {["name", "state"], opts}
+ def merge_defaults(args, opts), do: {args, opts}
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ def validate_execution_environment(args, opts) do
+ Validators.chain(
+ [
+ &Validators.rabbit_is_loaded/2,
+ &Validators.rabbit_is_running/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def run([_|_] = args, %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_ff_extra, :cli_info, [], timeout) do
+ # Server does not support feature flags, consider none are available.
+ # See rabbitmq/rabbitmq-cli#344 for context. MK.
+ {:badrpc, {:EXIT, {:undef, _}}} -> []
+ {:badrpc, _} = err -> err
+ val -> filter_by_arg(val, args)
+ end
+
+ end
+
+ def banner(_, _), do: "Listing feature flags ..."
+
+ def usage, do: "list_feature_flags [<column> ...]"
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.feature_flags()
+ ]
+ end
+
+ def help_section(), do: :feature_flags
+
+ def description(), do: "Lists feature flags"
+
+ #
+ # Implementation
+ #
+
+ defp filter_by_arg(ff_info, _) when is_tuple(ff_info) do
+ # tuple means unexpected data
+ ff_info
+ end
+
+ defp filter_by_arg(ff_info, [_|_] = args) when is_list(ff_info) do
+ symbol_args = InfoKeys.prepare_info_keys(args)
+ Enum.map(ff_info,
+ fn(ff) ->
+ symbol_args
+ |> Enum.filter(fn(arg) -> ff[arg] != nil end)
+ |> Enum.map(fn(arg) -> {arg, ff[arg]} end)
+ end
+ )
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex
new file mode 100644
index 0000000000..8d3f2d795a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_global_parameters_command.ex
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListGlobalParametersCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime_parameters,
+ :list_global_formatted,
+ [],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_global_parameters [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :parameters
+
+ def description(), do: "Lists global runtime parameters"
+
+ def banner(_, _), do: "Listing global runtime parameters ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex
new file mode 100644
index 0000000000..9e0f25e6dd
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_hashes_command.ex
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListHashesCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def distribution(_), do: :none
+
+ def run(_, _) do
+ {:ok, :rabbit_pbe.supported_hashes()}
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "list_hashes"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration(),
+ DocGuide.tls()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists hash functions supported by encoding commands"
+
+ def banner(_, _), do: "Listing supported hash algorithms ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex
new file mode 100644
index 0000000000..dd2c54dfc0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_operator_policies_command.ex
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListOperatorPoliciesCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_policy,
+ :list_formatted_op,
+ [vhost],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_operator_policies [--vhost <vhost>] [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Lists operator policy overrides for a virtual host"
+
+ def banner(_, %{vhost: vhost}),
+ do: "Listing operator policy overrides for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex
new file mode 100644
index 0000000000..2d51f08527
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_parameters_command.ex
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListParametersCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime_parameters,
+ :list_formatted,
+ [vhost],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_parameters [--vhost <vhost>] [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :parameters
+
+ def description(), do: "Lists runtime parameters for a virtual host"
+
+ def banner(_, %{vhost: vhost}), do: "Listing runtime parameters for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex
new file mode 100644
index 0000000000..feaf917cfa
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_permissions_command.ex
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListPermissionsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :list_vhost_permissions,
+ [vhost],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_permissions [--vhost <vhost>] [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Lists user permissions in a virtual host"
+
+ def banner(_, %{vhost: vhost}), do: "Listing permissions for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex
new file mode 100644
index 0000000000..9fe8e37dc1
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_policies_command.ex
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListPoliciesCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_policy,
+ :list_formatted,
+ [vhost],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_policies [--vhost <vhost>] [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Lists all policies in a virtual host"
+
+ def banner(_, %{vhost: vhost}), do: "Listing policies for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex
new file mode 100644
index 0000000000..21f9fa78ec
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_queues_command.ex
@@ -0,0 +1,143 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand do
+ require RabbitMQ.CLI.Ctl.InfoKeys
+ require RabbitMQ.CLI.Ctl.RpcStream
+
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 60_000
+ @info_keys ~w(name durable auto_delete
+ arguments policy pid owner_pid exclusive exclusive_consumer_pid
+ exclusive_consumer_tag messages_ready messages_unacknowledged messages
+ messages_ready_ram messages_unacknowledged_ram messages_ram
+ messages_persistent message_bytes message_bytes_ready
+ message_bytes_unacknowledged message_bytes_ram message_bytes_persistent
+ head_message_timestamp disk_reads disk_writes consumers
+ consumer_utilisation memory slave_pids synchronised_slave_pids state type
+ leader members online)a
+
+ def description(), do: "Lists queues and their properties"
+ def usage(), do: "list_queues [--vhost <vhost>] [--online] [--offline] [--local] [--no-table-headers] [<column>, ...]"
+ def scopes(), do: [:ctl, :diagnostics]
+ def switches(), do: [offline: :boolean,
+ online: :boolean,
+ local: :boolean,
+ timeout: :integer]
+ def aliases(), do: [t: :timeout]
+
+ def info_keys(), do: @info_keys
+
+ defp default_opts() do
+ %{vhost: "/", offline: false, online: false, local: false, table_headers: true}
+ end
+
+ def merge_defaults([_ | _] = args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args,
+ Map.merge(
+ default_opts(),
+ Map.merge(opts, %{timeout: timeout})
+ )}
+ end
+
+ def merge_defaults([], opts) do
+ merge_defaults(~w(name messages), opts)
+ end
+
+ def validate(args, _opts) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ # note that --offline for this command has a different meaning:
+ # it lists queues with unavailable masters
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_ | _] = args, %{
+ node: node_name,
+ timeout: timeout,
+ vhost: vhost,
+ online: online_opt,
+ offline: offline_opt,
+ local: local_opt
+ }) do
+ {online, offline} =
+ case {online_opt, offline_opt} do
+ {false, false} -> {true, true}
+ other -> other
+ end
+
+ info_keys = InfoKeys.prepare_info_keys(args)
+
+ Helpers.with_nodes_in_cluster(node_name, fn nodes ->
+ offline_mfa = {:rabbit_amqqueue, :emit_info_down, [vhost, info_keys]}
+ local_mfa = {:rabbit_amqqueue, :emit_info_local, [vhost, info_keys]}
+ online_mfa = {:rabbit_amqqueue, :emit_info_all, [nodes, vhost, info_keys]}
+
+ {chunks, mfas} =
+ case {local_opt, offline, online} do
+ # Local takes precedence
+ {true, _, _} -> {1, [local_mfa]}
+ {_, true, true} -> {Kernel.length(nodes) + 1, [offline_mfa, online_mfa]}
+ {_, false, true} -> {Kernel.length(nodes), [online_mfa]}
+ {_, true, false} -> {1, [offline_mfa]}
+ end
+
+ RpcStream.receive_list_items_with_fun(node_name, mfas, timeout, info_keys, chunks, fn
+ {{:error, {:badrpc, {:timeout, to}}}, :finished} ->
+ {{:error,
+ {:badrpc,
+ {:timeout, to,
+ "Some queue(s) are unresponsive, use list_unresponsive_queues command."}}},
+ :finished}
+
+ any ->
+ any
+ end)
+ end)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def help_section(), do: :observability_and_health_checks
+
+ def usage_additional do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")],
+ ["--online", "lists only queues on online (reachable) nodes"],
+ ["--offline", "lists only queues on offline (unreachable) nodes"],
+ ["--local", "only return queues hosted on the target node"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.queues()
+ ]
+ end
+
+ def banner(_, %{vhost: vhost, timeout: timeout}) do
+ [
+ "Timeout: #{timeout / 1000} seconds ...",
+ "Listing queues for vhost #{vhost} ..."
+ ]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex
new file mode 100644
index 0000000000..1a22b3b26d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_topic_permissions_command.ex
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListTopicPermissionsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :list_vhost_topic_permissions,
+ [vhost],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_topic_permissions [--vhost <vhost>] [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Lists topic permissions in a virtual host"
+
+ def banner(_, %{vhost: vhost}), do: "Listing topic permissions for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex
new file mode 100644
index 0000000000..91d6d624f5
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_unresponsive_queues_command.ex
@@ -0,0 +1,95 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListUnresponsiveQueuesCommand do
+ require RabbitMQ.CLI.Ctl.InfoKeys
+ require RabbitMQ.CLI.Ctl.RpcStream
+
+ alias RabbitMQ.CLI.Ctl.{InfoKeys, RpcStream}
+ alias RabbitMQ.CLI.Core.Helpers
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @info_keys ~w(name durable auto_delete
+ arguments pid recoverable_slaves)a
+
+ def info_keys(), do: @info_keys
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ def switches() do
+ [queue_timeout: :integer, local: :boolean, timeout: :integer]
+ end
+
+ def aliases(), do: [t: :timeout]
+
+ defp default_opts() do
+ %{vhost: "/", local: false, queue_timeout: 15, table_headers: true}
+ end
+
+ def merge_defaults([_ | _] = args, opts) do
+ {args, Map.merge(default_opts(), opts)}
+ end
+
+ def merge_defaults([], opts) do
+ merge_defaults(~w(name), opts)
+ end
+
+ def validate(args, _opts) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run(args, %{
+ node: node_name,
+ vhost: vhost,
+ timeout: timeout,
+ queue_timeout: qtimeout,
+ local: local_opt
+ }) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+ queue_timeout = qtimeout * 1000
+
+ Helpers.with_nodes_in_cluster(node_name, fn nodes ->
+ local_mfa = {:rabbit_amqqueue, :emit_unresponsive_local, [vhost, info_keys, queue_timeout]}
+ all_mfa = {:rabbit_amqqueue, :emit_unresponsive, [nodes, vhost, info_keys, queue_timeout]}
+
+ {chunks, mfas} =
+ case local_opt do
+ true -> {1, [local_mfa]}
+ false -> {Kernel.length(nodes), [all_mfa]}
+ end
+
+ RpcStream.receive_list_items(node_name, mfas, timeout, info_keys, chunks)
+ end)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def banner(_, %{vhost: vhost}), do: "Listing unresponsive queues for vhost #{vhost} ..."
+
+ def usage() do
+ "list_unresponsive_queues [--local] [--queue-timeout <milliseconds>] [<column> ...] [--no-table-headers]"
+ end
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")],
+ ["--local", "only return queues hosted on the target node"],
+ ["--queue-timeout <milliseconds>", "per-queue timeout to use when checking for responsiveness"]
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Tests queues to respond within timeout. Lists those which did not respond"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex
new file mode 100644
index 0000000000..5e0de38b3f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_limits_command.ex
@@ -0,0 +1,91 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListUserLimitsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ def switches(), do: [global: :boolean, user: :string]
+
+ def merge_defaults(args, %{global: true} = opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{user: "guest", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, global: true}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :get_user_limits, []) do
+ [] ->
+ []
+
+ {:error, err} ->
+ {:error, err}
+
+ {:badrpc, node} ->
+ {:badrpc, node}
+
+ val ->
+ Enum.map(val, fn {user, val} ->
+ {:ok, val_encoded} = JSON.encode(Map.new(val))
+ [user: user, limits: val_encoded]
+ end)
+ end
+ end
+
+ def run([], %{node: node_name, user: username}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :get_user_limits, [username]) do
+ :undefined ->
+ {:error, {:no_such_user, username}}
+
+ {:error, err} ->
+ {:error, err}
+
+ {:badrpc, node} ->
+ {:badrpc, node}
+
+ val when is_list(val) or is_map(val) ->
+ {:ok, val_encoded} = JSON.encode(Map.new(val))
+ val_encoded
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_user_limits [--user <username>] [--global]"
+
+ def usage_additional() do
+ [
+ ["--global", "list limits for all the users"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Displays configured user limits"
+
+ def banner([], %{global: true}) do
+ "Listing limits for all users ..."
+ end
+
+ def banner([], %{user: username}) do
+ "Listing limits for user \"#{username}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex
new file mode 100644
index 0000000000..bd302eefd0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_permissions_command.ex
@@ -0,0 +1,58 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListUserPermissionsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def scopes(), do: [:ctl, :diagnostics]
+ def switches(), do: [timeout: :integer]
+ def aliases(), do: [t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def validate([], _), do: {:validation_failure, :not_enough_args}
+ def validate([_ | _] = args, _) when length(args) > 1, do: {:validation_failure, :too_many_args}
+ def validate([_], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :list_user_permissions,
+ [username],
+ timeout
+ )
+ end
+
+ def usage, do: "list_user_permissions [--no-table-headers] <username>"
+
+ def usage_additional do
+ [
+ ["<username>", "Name of the user"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Lists permissions of a user across all virtual hosts"
+
+ def banner([username], _), do: "Listing permissions for user \"#{username}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex
new file mode 100644
index 0000000000..48b7fee5e2
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_user_topic_permissions_command.ex
@@ -0,0 +1,54 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListUserTopicPermissionsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: false}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :list_user_topic_permissions,
+ [username],
+ timeout
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_user_topic_permissions [--no-table-headers] <username>"
+
+ def usage_additional do
+ [
+ ["<username>", "Name of the user"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Lists user topic permissions"
+
+ def banner([username], _), do: "Listing topic permissions for user \"#{username}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex
new file mode 100644
index 0000000000..e87ea386d0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_users_command.ex
@@ -0,0 +1,43 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListUsersCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :list_users, [], timeout)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_users [--no-table-headers]"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "List user names and tags"
+
+ def banner(_, _), do: "Listing users ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex
new file mode 100644
index 0000000000..67b138f1e0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhost_limits_command.ex
@@ -0,0 +1,90 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListVhostLimitsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+ def switches(), do: [global: :boolean]
+
+ def merge_defaults(args, %{global: true} = opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", table_headers: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, global: true}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_vhost_limit, :list, []) do
+ [] ->
+ []
+
+ {:error, err} ->
+ {:error, err}
+
+ {:badrpc, node} ->
+ {:badrpc, node}
+
+ val ->
+ Enum.map(val, fn {vhost, val} ->
+ {:ok, val_encoded} = JSON.encode(Map.new(val))
+ [vhost: vhost, limits: val_encoded]
+ end)
+ end
+ end
+
+ def run([], %{node: node_name, vhost: vhost}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_vhost_limit, :list, [vhost]) do
+ [] ->
+ []
+
+ {:error, err} ->
+ {:error, err}
+
+ {:badrpc, node} ->
+ {:badrpc, node}
+
+ val when is_list(val) or is_map(val) ->
+ JSON.encode(Map.new(val))
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_vhost_limits [--vhost <vhost>] [--global] [--no-table-headers]"
+
+ def usage_additional() do
+ [
+ ["--global", "list global limits (those not associated with a virtual host)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def description(), do: "Displays configured virtual host limits"
+
+ def banner([], %{global: true}) do
+ "Listing limits for all vhosts ..."
+ end
+
+ def banner([], %{vhost: vhost}) do
+ "Listing limits for vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex
new file mode 100644
index 0000000000..b570aa7486
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_vhosts_command.ex
@@ -0,0 +1,84 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ListVhostsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ alias RabbitMQ.CLI.Ctl.InfoKeys
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ @info_keys ~w(name description tags tracing cluster_state)a
+
+ def info_keys(), do: @info_keys
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults([], opts) do
+ merge_defaults(["name"], opts)
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost, :info_all, [], timeout)
+ |> filter_by_arg(args)
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "list_vhosts [--no-table-headers] [<column> ...]"
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts(),
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :access_control
+
+ def description(), do: "Lists virtual hosts"
+
+ def banner(_, _), do: "Listing vhosts ..."
+
+ #
+ # Implementation
+ #
+
+ defp filter_by_arg(vhosts, _) when is_tuple(vhosts) do
+ vhosts
+ end
+
+ defp filter_by_arg(vhosts, [_ | _] = args) do
+ symbol_args = InfoKeys.prepare_info_keys(args)
+
+ vhosts
+ |> Enum.map(fn vhost ->
+ symbol_args
+ |> Enum.filter(fn arg -> vhost[arg] != nil end)
+ |> Enum.map(fn arg -> {arg, vhost[arg]} end)
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex
new file mode 100644
index 0000000000..31ea748d9f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/node_health_check_command.ex
@@ -0,0 +1,87 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.NodeHealthCheckCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 70_000
+
+ def scopes(), do: [:ctl, :diagnostics]
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args, Map.merge(opts, %{timeout: timeout})}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_health_check, :node, [node_name, timeout]) do
+ :ok ->
+ :ok
+
+ true ->
+ :ok
+
+ {:badrpc, _} = err ->
+ err
+
+ {:error_string, error_message} ->
+ {:healthcheck_failed, error_message}
+
+ {:node_is_ko, error_message, _exit_code} ->
+ {:healthcheck_failed, error_message}
+
+ other ->
+ other
+ end
+ end
+
+ def output(:ok, _) do
+ {:ok, "Health check passed"}
+ end
+
+ def output({:healthcheck_failed, message}, _) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: health check failed. Message: #{message}"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "node_health_check"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :deprecated
+ def description() do
+ "DEPRECATED. Performs intrusive, opinionated health checks on a fully booted node. " <>
+ "See https://www.rabbitmq.com/monitoring.html#health-checks instead"
+ end
+
+ def banner(_, %{node: node_name, timeout: timeout}) do
+ [
+ "This command is DEPRECATED and will be removed in a future version.",
+ "It performs intrusive, opinionated health checks and requires a fully booted node.",
+ "Use one of the options covered in https://www.rabbitmq.com/monitoring.html#health-checks instead.",
+ "Timeout: #{trunc(timeout / 1000)} seconds ...",
+ "Checking health of node #{node_name} ..."
+ ]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex
new file mode 100644
index 0000000000..7efb3b39f3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/ping_command.ex
@@ -0,0 +1,90 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.PingCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 60_000
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args, Map.merge(opts, %{timeout: timeout})}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # this is very similar to what net_adm:ping/1 does reimplemented with support for custom timeouts
+ # and error values that are used by CLI commands
+ msg = "Failed to connect and authenticate to #{node_name} in #{timeout} ms"
+
+ try do
+ case :gen.call({:net_kernel, node_name}, :"$gen_call", {:is_auth, node()}, timeout) do
+ :ok ->
+ :ok
+
+ {:ok, _} ->
+ :ok
+
+ _ ->
+ :erlang.disconnect_node(node_name)
+ {:error, msg}
+ end
+ catch
+ :exit, _ ->
+ :erlang.disconnect_node(node_name)
+ {:error, msg}
+
+ _ ->
+ :erlang.disconnect_node(node_name)
+ {:error, msg}
+ end
+ end
+
+ def output(:ok, _) do
+ {:ok, "Ping succeeded"}
+ end
+
+ def output({:error, :timeout}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: timed out while waiting for a response from #{node_name}."}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "ping"
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Checks that the node OS process is up, registered with EPMD and CLI tools can authenticate with it"
+
+ def banner([], %{node: node_name, timeout: timeout}) when is_number(timeout) do
+ "Will ping #{node_name}. This only checks if the OS process is running and registered with epmd. Timeout: #{
+ timeout
+ } ms."
+ end
+
+ def banner([], %{node: node_name, timeout: _timeout}) do
+ "Will ping #{node_name}. This only checks if the OS process is running and registered with epmd."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex
new file mode 100644
index 0000000000..1be25beb7d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/purge_queue_command.ex
@@ -0,0 +1,73 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.PurgeQueueCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([queue], %{node: node_name, vhost: vhost, timeout: timeout}) do
+ res =
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_amqqueue,
+ :lookup,
+ [:rabbit_misc.r(vhost, :queue, queue)],
+ timeout
+ )
+
+ case res do
+ {:ok, q} -> purge(node_name, q, timeout)
+ _ -> res
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "purge_queue <queue>"
+
+ def usage_additional() do
+ [
+ ["<queue>", "Name of the queue to purge"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.queues()
+ ]
+ end
+
+ def help_section(), do: :queues
+
+ def description(), do: "Purges a queue (removes all messages in it)"
+
+ def banner([queue], %{vhost: vhost}) do
+ "Purging queue '#{queue}' in vhost '#{vhost}' ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp purge(node_name, q, timeout) do
+ res = :rabbit_misc.rpc_call(node_name, :rabbit_amqqueue, :purge, [q], timeout)
+
+ case res do
+ {:ok, _message_count} -> :ok
+ _ -> res
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex
new file mode 100644
index 0000000000..7faa30d00b
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rename_cluster_node_command.ex
@@ -0,0 +1,108 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.RenameClusterNodeCommand do
+ require Integer
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts), do: {args, opts}
+
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(_, _) do
+ :ok
+ end
+
+ def validate_execution_environment(args, opts) do
+ Validators.chain(
+ [
+ &validate_args_count_even/2,
+ &Validators.node_is_not_running/2,
+ &Validators.mnesia_dir_is_set/2,
+ &Validators.feature_flags_file_is_set/2,
+ &Validators.rabbit_is_loaded/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def run(nodes, %{node: node_name}) do
+ node_pairs = make_node_pairs(nodes)
+
+ try do
+ :rabbit_mnesia_rename.rename(node_name, node_pairs)
+ catch
+ _, reason ->
+ {:rename_failed, reason}
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "rename_cluster_node <oldnode1> <newnode1> [oldnode2] [newnode2] ..."
+ end
+
+ def usage_additional() do
+ [
+ ["<oldnode>", "Original node name"],
+ ["<newnode>", "New node name"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Renames cluster nodes in the local database"
+
+ def banner(args, _) do
+ [
+ "Renaming cluster nodes: \n ",
+ for {node_from, node_to} <- make_node_pairs(args) do
+ "#{node_from} -> #{node_to} \n"
+ end
+ ]
+ |> List.flatten()
+ |> Enum.join()
+ end
+
+ #
+ # Implementation
+ #
+
+ defp validate_args_count_even(args, _) do
+ case agrs_count_even?(args) do
+ true ->
+ :ok
+
+ false ->
+ {:validation_failure,
+ {:bad_argument, "Argument list should contain even number of nodes"}}
+ end
+ end
+
+ defp agrs_count_even?(args) do
+ Integer.is_even(length(args))
+ end
+
+ defp make_node_pairs([]) do
+ []
+ end
+
+ defp make_node_pairs([from, to | rest]) do
+ [{to_atom(from), to_atom(to)} | make_node_pairs(rest)]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex
new file mode 100644
index 0000000000..c06497a7e6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/report_command.ex
@@ -0,0 +1,118 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ReportCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ alias RabbitMQ.CLI.Ctl.Commands.{
+ ClusterStatusCommand,
+ EnvironmentCommand,
+ ListBindingsCommand,
+ ListChannelsCommand,
+ ListConnectionsCommand,
+ ListExchangesCommand,
+ ListGlobalParametersCommand,
+ ListParametersCommand,
+ ListPermissionsCommand,
+ ListPoliciesCommand,
+ ListQueuesCommand,
+ StatusCommand
+ }
+ alias RabbitMQ.CLI.Diagnostics.Commands.{
+ CommandLineArgumentsCommand,
+ OsEnvCommand
+ }
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([_ | _] = args, _) when length(args) != 0,
+ do: {:validation_failure, :too_many_args}
+
+ def validate([], %{formatter: formatter}) do
+ case formatter do
+ "report" -> :ok
+ _other -> {:validation_failure, "Only report formatter is supported"}
+ end
+ end
+
+ def validate([], _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name} = opts) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_vhost, :list_names, []) do
+ {:badrpc, _} = err ->
+ err
+
+ vhosts ->
+ data = [
+ run_command(StatusCommand, [], opts),
+ run_command(ClusterStatusCommand, [], opts),
+ run_command(EnvironmentCommand, [], opts),
+ run_command(ListConnectionsCommand, info_keys(ListConnectionsCommand), opts),
+ run_command(ListChannelsCommand, info_keys(ListChannelsCommand), opts),
+ run_command(CommandLineArgumentsCommand, [], opts),
+ run_command(OsEnvCommand, [], opts)
+ ]
+
+ vhost_data =
+ vhosts
+ |> Enum.flat_map(fn v ->
+ opts = Map.put(opts, :vhost, v)
+
+ [
+ run_command(ListQueuesCommand, info_keys(ListQueuesCommand), opts),
+ run_command(ListExchangesCommand, info_keys(ListExchangesCommand), opts),
+ run_command(ListBindingsCommand, info_keys(ListBindingsCommand), opts),
+ run_command(ListPermissionsCommand, [], opts),
+ run_command(ListPoliciesCommand, [], opts),
+ run_command(ListGlobalParametersCommand, [], opts),
+ run_command(ListParametersCommand, [], opts),
+
+ ]
+ end)
+
+ data ++ vhost_data
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Report
+
+ def usage, do: "report"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Generate a server status report containing a concatenation of all server status information for support purposes"
+
+ def banner(_, %{node: node_name}), do: "Reporting server status of node #{node_name} ..."
+
+ #
+ # Implementation
+ #
+
+ defp run_command(command, args, opts) do
+ {args, opts} = command.merge_defaults(args, opts)
+ banner = command.banner(args, opts)
+ command_result = command.run(args, opts) |> command.output(opts)
+ {command, banner, command_result}
+ end
+
+ defp info_keys(command) do
+ command.info_keys()
+ |> Enum.map(&to_string/1)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex
new file mode 100644
index 0000000000..575ef2491d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/reset_command.ex
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ResetCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppStopped
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :reset, [])
+ end
+
+ def output({:error, :mnesia_unexpectedly_running}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ RabbitMQ.CLI.DefaultOutput.mnesia_running_error(node_name)}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "reset"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering()
+ ]
+ end
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Instructs a RabbitMQ node to leave the cluster and return to its virgin state"
+
+ def banner(_, %{node: node_name}), do: "Resetting node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex
new file mode 100644
index 0000000000..36a7f702bc
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/restart_vhost_command.ex
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+alias RabbitMQ.CLI.Core.ExitCodes
+
+defmodule RabbitMQ.CLI.Ctl.Commands.RestartVhostCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts), do: {args, Map.merge(%{vhost: "/"}, opts)}
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, vhost: vhost, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost_sup_sup, :start_vhost, [vhost], timeout)
+ end
+
+ def output({:ok, _pid}, %{vhost: vhost, node: node_name}) do
+ {:ok, "Successfully restarted vhost '#{vhost}' on node '#{node_name}'"}
+ end
+
+ def output({:error, {:already_started, _pid}}, %{vhost: vhost, node: node_name}) do
+ {:ok, "Vhost '#{vhost}' is already running on node '#{node_name}'"}
+ end
+
+ def output({:error, err}, %{vhost: vhost, node: node_name}) do
+ {:error, ExitCodes.exit_software(),
+ ["Failed to start vhost '#{vhost}' on node '#{node_name}'", "Reason: #{inspect(err)}"]}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "restart_vhost [--vhost <vhost>]"
+
+ def usage_additional() do
+ [
+ ["--vhost", "Virtual host name"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def description(), do: "Restarts a failed vhost data stores and queues"
+
+ def banner(_, %{node: node_name, vhost: vhost}) do
+ "Trying to restart vhost '#{vhost}' on node '#{node_name}' ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex
new file mode 100644
index 0000000000..1f13660e0d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/resume_listeners_command.ex
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ResumeListenersCommand do
+ @moduledoc """
+ Resumes all client connection listeners making them accept new client
+ connections. This command is the opposite of `SuspendListenersCommand`.
+
+ This command is meant to be used when automating upgrades.
+ See also `SuspendListenersCommand`.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_maintenance, :resume_all_client_listeners, [], timeout)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "resume_listeners"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Resumes client connection listeners making them accept client connections again"
+
+ def banner(_, %{node: node_name}) do
+ "Will resume client connection listeners on node #{node_name}. "
+ <> "The node will now accept client connections"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex
new file mode 100644
index 0000000000..f3de3671fc
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/rotate_logs_command.ex
@@ -0,0 +1,34 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.RotateLogsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :rotate_logs, [])
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "rotate_logs"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.logging()
+ ]
+ end
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Instructs the RabbitMQ node to perform internal log rotation"
+
+ def banner(_, %{node: node_name}), do: "Rotating logs for node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex
new file mode 100644
index 0000000000..f919cb2ae6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_cluster_name_command.ex
@@ -0,0 +1,47 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetClusterNameCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([cluster_name], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_nodes, :set_cluster_name, [
+ cluster_name,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([cluster_name], _) do
+ "Setting cluster name to #{cluster_name} ..."
+ end
+
+ def usage, do: "set_cluster_name <name>"
+
+ def usage_additional() do
+ [
+ ["<name>", "New cluster name"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts(),
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Sets the cluster name"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex
new file mode 100644
index 0000000000..cf97c4655e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_disk_free_limit_command.ex
@@ -0,0 +1,140 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetDiskFreeLimitCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.Memory
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(["mem_relative"], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(["mem_relative" | _] = args, _) when length(args) != 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([limit], _) do
+ case Integer.parse(limit) do
+ {_, ""} ->
+ :ok
+
+ {limit_val, units} ->
+ case memory_unit_absolute(limit_val, units) do
+ scaled_limit when is_integer(scaled_limit) -> :ok
+ _ -> {:validation_failure, :bad_argument}
+ end
+
+ _ ->
+ {:validation_failure, :bad_argument}
+ end
+ end
+
+ def validate(["mem_relative", fraction], _) do
+ case Float.parse(fraction) do
+ {val, ""} when val >= 0.0 -> :ok
+ _ -> {:validation_failure, :bad_argument}
+ end
+ end
+
+ def validate([_ | rest], _) when length(rest) > 0 do
+ {:validation_failure, :too_many_args}
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run(["mem_relative", _] = args, opts) do
+ set_disk_free_limit_relative(args, opts)
+ end
+
+ def run([limit], %{node: _} = opts) when is_binary(limit) do
+ case Integer.parse(limit) do
+ {limit_val, ""} -> set_disk_free_limit_absolute([limit_val], opts)
+ {limit_val, units} -> set_disk_free_limit_in_units([limit_val, units], opts)
+ end
+ end
+
+ def run([limit], opts) do
+ set_disk_free_limit_absolute([limit], opts)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner(["mem_relative", arg], %{node: node_name}) do
+ "Setting disk free limit on #{node_name} to #{arg} times the total RAM ..."
+ end
+
+ def banner([arg], %{node: node_name}),
+ do: "Setting disk free limit on #{node_name} to #{arg} bytes ..."
+
+ def usage, do: "set_disk_free_limit <disk_limit> | mem_relative <fraction>"
+
+ def usage_additional() do
+ [
+ ["<disk_limit>", "New limit as an absolute value with units, e.g. 1GB"],
+ ["mem_relative <fraction>", "New limit as a fraction of total memory reported by the OS"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.disk_alarms(),
+ DocGuide.alarms()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Sets the disk_free_limit setting"
+
+ #
+ # Implementation
+ #
+
+ defp set_disk_free_limit_relative(["mem_relative", fraction], %{node: node_name})
+ when is_float(fraction) do
+ make_rpc_call(node_name, [{:mem_relative, fraction}])
+ end
+
+ defp set_disk_free_limit_relative(["mem_relative", integer_input], %{node: node_name})
+ when is_integer(integer_input) do
+ make_rpc_call(node_name, [{:mem_relative, integer_input * 1.0}])
+ end
+
+ defp set_disk_free_limit_relative(["mem_relative", fraction_str], %{node: _} = opts)
+ when is_binary(fraction_str) do
+ {fraction_val, ""} = Float.parse(fraction_str)
+ set_disk_free_limit_relative(["mem_relative", fraction_val], opts)
+ end
+
+ ## ------------------------ Absolute Size Call -----------------------------
+
+ defp set_disk_free_limit_absolute([limit], %{node: node_name}) when is_integer(limit) do
+ make_rpc_call(node_name, [limit])
+ end
+
+ defp set_disk_free_limit_absolute([limit], %{node: _} = opts) when is_float(limit) do
+ set_disk_free_limit_absolute([limit |> Float.floor() |> round], opts)
+ end
+
+ defp set_disk_free_limit_in_units([limit_val, units], opts) do
+ case memory_unit_absolute(limit_val, units) do
+ scaled_limit when is_integer(scaled_limit) ->
+ set_disk_free_limit_absolute([scaled_limit], opts)
+ end
+ end
+
+ defp make_rpc_call(node_name, args) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_disk_monitor, :set_disk_free_limit, args)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex
new file mode 100644
index 0000000000..8c46e9d592
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_global_parameter_command.ex
@@ -0,0 +1,57 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetGlobalParameterCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, value], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime_parameters,
+ :parse_set_global,
+ [name, value, Helpers.cli_acting_user()]
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "set_global_parameter <name> <value>"
+
+ def usage_additional() do
+ [
+ ["<name>", "global parameter name (identifier)"],
+ ["<value>", "parameter value"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :parameters
+
+ def description(), do: "Sets a runtime parameter."
+
+ def banner([name, value], _) do
+ "Setting global runtime parameter \"#{name}\" to \"#{value}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex
new file mode 100644
index 0000000000..f5a8eacbfc
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_log_level_command.ex
@@ -0,0 +1,74 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetLogLevelCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ @known_levels [
+ "debug",
+ "info",
+ "notice",
+ "warning",
+ "error",
+ "critical",
+ "alert",
+ "emergency",
+ "none"
+ ]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([level], _) do
+ case Enum.member?(@known_levels, level) do
+ true ->
+ :ok
+
+ false ->
+ {:error, "level #{level} is not supported. Try one of debug, info, warning, error, none"}
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([log_level], %{node: node_name}) do
+ arg = String.to_atom(log_level)
+ :rabbit_misc.rpc_call(node_name, :rabbit_lager, :set_log_level, [arg])
+ end
+
+ def usage, do: "set_log_level <log_level>"
+
+ def usage_additional() do
+ [
+ ["<log_level>", "new log level"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.logging()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Sets log level in the running node"
+
+ def banner([log_level], _), do: "Setting log level to \"#{log_level}\" ..."
+
+ def output({:error, {:invalid_log_level, level}}, _opts) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "level #{level} is not supported. Try one of debug, info, warning, error, none"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex
new file mode 100644
index 0000000000..3118c125cb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_operator_policy_command.ex
@@ -0,0 +1,79 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetOperatorPolicyCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [priority: :integer, apply_to: :string]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", priority: 0, apply_to: "all"}, opts)}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) < 3 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 3 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, pattern, definition], %{
+ node: node_name,
+ vhost: vhost,
+ priority: priority,
+ apply_to: apply_to
+ }) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_policy,
+ :parse_set_op,
+ [vhost, name, pattern, definition, priority, apply_to, Helpers.cli_acting_user()]
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "set_operator_policy [--vhost <vhost>] [--priority <priority>] [--apply-to <apply-to>] <name> <pattern> <definition>"
+ end
+
+ def usage_additional() do
+ [
+ ["<name>", "policy name (identifier)"],
+ ["<pattern>", "a regular expression pattern that will be used to match queue, exchanges, etc"],
+ ["<definition>", "policy definition (arguments). Must be a valid JSON document"],
+ ["--priority <priority>", "policy priority"],
+ ["--apply-to <queues | exchanges | all>", "policy should only apply to queues, exchanges, or all entities (both of the above)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Sets an operator policy that overrides a subset of arguments in user policies"
+
+ def banner([name, pattern, definition], %{vhost: vhost, priority: priority}) do
+ "Setting operator policy override \"#{name}\" for pattern \"#{pattern}\" to \"#{definition}\" with priority \"#{
+ priority
+ }\" for vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex
new file mode 100644
index 0000000000..910cc6ef73
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_parameter_command.ex
@@ -0,0 +1,68 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetParameterCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) < 3 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 3 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([component_name, name, value], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime_parameters,
+ :parse_set,
+ [vhost, component_name, name, value, Helpers.cli_acting_user()]
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "set_parameter [--vhost <vhost>] <component_name> <name> <value>"
+
+ def usage_additional() do
+ [
+ ["<component_name>", "component name"],
+ ["<name>", "parameter name (identifier)"],
+ ["<value>", "parameter value"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :parameters
+
+ def description(), do: "Sets a runtime parameter."
+
+ def banner([component_name, name, value], %{vhost: vhost}) do
+ "Setting runtime parameter \"#{name}\" for component \"#{component_name}\" to \"#{value}\" in vhost \"#{
+ vhost
+ }\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex
new file mode 100644
index 0000000000..c87969121c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_permissions_command.ex
@@ -0,0 +1,78 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetPermissionsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) < 4 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 4 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([user, conf, write, read], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :set_permissions,
+ [user, vhost, conf, write, read, Helpers.cli_acting_user()]
+ )
+ end
+
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exist"}}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "Virtual host #{vhost} does not exist"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User #{username} does not exist"}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, _) do
+ {:error, "Virtual host #{vhost} does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "set_permissions [--vhost <vhost>] <username> <conf> <write> <read>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Self-explanatory"],
+ ["<conf>", "Configuration permission pattern"],
+ ["<write>", "Write permission pattern"],
+ ["<read>", "Read permission pattern"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :access_control
+ def description(), do: "Sets user permissions for a vhost"
+
+ def banner([user | _], %{vhost: vhost}),
+ do: "Setting permissions for user \"#{user}\" in vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex
new file mode 100644
index 0000000000..af34f3c659
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_policy_command.ex
@@ -0,0 +1,76 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetPolicyCommand do
+ alias RabbitMQ.CLI.Core.{Helpers, DocGuide}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [priority: :integer, apply_to: :string]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/", priority: 0, apply_to: "all"}, opts)}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) < 3 do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 3 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, pattern, definition], %{
+ node: node_name,
+ vhost: vhost,
+ priority: priority,
+ apply_to: apply_to
+ }) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_policy,
+ :parse_set,
+ [vhost, name, pattern, definition, priority, apply_to, Helpers.cli_acting_user()]
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "set_policy [--vhost <vhost>] [--priority <priority>] [--apply-to <apply-to>] <name> <pattern> <definition>"
+ end
+
+ def usage_additional() do
+ [
+ ["<name>", "policy name (identifier)"],
+ ["<pattern>", "regular expression pattern that will be used to match queues, exchanges, etc"],
+ ["<definition>", "policy definition (arguments). Must be a valid JSON document"],
+ ["--priority <priority>", "policy priority"],
+ ["--apply-to <queues | exchanges | all>", "policy should only apply to queues, exchanges, or all entities (both of the above)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.parameters()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Sets or updates a policy"
+
+ def banner([name, pattern, definition], %{vhost: vhost, priority: priority}) do
+ "Setting policy \"#{name}\" for pattern \"#{pattern}\" to \"#{definition}\" with priority \"#{
+ priority
+ }\" for vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex
new file mode 100644
index 0000000000..c57dc1659b
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_topic_permissions_command.ex
@@ -0,0 +1,75 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetTopicPermissionsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ def validate(args, _) when length(args) < 4 do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 4 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([user, exchange, write_pattern, read_pattern], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :set_topic_permissions,
+ [user, vhost, exchange, write_pattern, read_pattern, Helpers.cli_acting_user()]
+ )
+ end
+
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exist"}}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "Virtual host #{vhost} does not exist"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User #{username} does not exist"}
+ end
+ def output({:error, {:no_such_vhost, vhost}}, _) do
+ {:error, "Virtual host #{vhost} does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage do
+ "set_topic_permissions [--vhost <vhost>] <username> <exchange> <write> <read>"
+ end
+
+ def usage_additional do
+ [
+ ["<username>", "Self-explanatory"],
+ ["<exchange>", "Topic exchange to set the permissions for"],
+ ["<write>", "Write permission pattern"],
+ ["<read>", "Read permission pattern"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :access_control
+
+ def description(), do: "Sets user topic permissions for an exchange"
+
+ def banner([user, exchange, _, _], %{vhost: vhost}),
+ do:
+ "Setting topic permissions on \"#{exchange}\" for user \"#{user}\" in vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex
new file mode 100644
index 0000000000..603a8008e7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_limits_command.ex
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetUserLimitsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([username, definition], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_auth_backend_internal, :set_user_limits, [
+ username,
+ definition,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "set_user_limits <username> <definition>"
+
+ def usage_additional() do
+ [
+ ["<username>", "Self-explanatory"],
+ ["<definition>", "Limit definitions as a JSON document"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Sets user limits"
+
+ def banner([username, definition], %{}) do
+ "Setting user limits to \"#{definition}\" for user \"#{username}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex
new file mode 100644
index 0000000000..eba8ed6123
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_user_tags_command.ex
@@ -0,0 +1,61 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetUserTagsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts), do: {args, opts}
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([user | tags], %{node: node_name}) do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_auth_backend_internal,
+ :set_tags,
+ [user, tags, Helpers.cli_acting_user()]
+ )
+ end
+
+ def output({:error, {:no_such_user, username}}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => "User #{username} does not exists"}}
+ end
+ def output({:error, {:no_such_user, username}}, _) do
+ {:error, ExitCodes.exit_nouser(), "User \"#{username}\" does not exist"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "set_user_tags <username> <tag> [...]"
+
+ def usage_additional() do
+ [
+ ["<username>", "Self-explanatory"],
+ ["<tags>", "Space separated list of tags"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.management(),
+ DocGuide.access_control()
+ ]
+ end
+
+ def help_section(), do: :user_management
+
+ def description(), do: "Sets user tags"
+
+ def banner([user | tags], _) do
+ "Setting tags for user \"#{user}\" to [#{tags |> Enum.join(", ")}] ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex
new file mode 100644
index 0000000000..f25f1c7bc4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vhost_limits_command.ex
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetVhostLimitsCommand do
+ alias RabbitMQ.CLI.Core.{DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([definition], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost_limit, :parse_set, [
+ vhost,
+ definition,
+ Helpers.cli_acting_user()
+ ])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "set_vhost_limits [--vhost <vhost>] <definition>"
+
+ def usage_additional() do
+ [
+ ["<definition>", "Limit definitions, must be a valid JSON document"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def description(), do: "Sets virtual host limits"
+
+ def banner([definition], %{vhost: vhost}) do
+ "Setting vhost limits to \"#{definition}\" for vhost \"#{vhost}\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex
new file mode 100644
index 0000000000..a4e4527f8f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/set_vm_memory_high_watermark_command.ex
@@ -0,0 +1,146 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SetVmMemoryHighWatermarkCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.Memory
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(["absolute"], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(["absolute" | _] = args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(["absolute", arg], _) do
+ case Integer.parse(arg) do
+ :error ->
+ {:validation_failure, :bad_argument}
+
+ {_, rest} ->
+ case Enum.member?(memory_units(), rest) do
+ true ->
+ :ok
+
+ false ->
+ case Float.parse(arg) do
+ {_, orest} when orest == rest ->
+ {:validation_failure, {:bad_argument, "Invalid units."}}
+
+ _ ->
+ {:validation_failure, {:bad_argument, "The threshold should be an integer."}}
+ end
+ end
+ end
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([arg], _) when is_number(arg) and (arg < 0.0 or arg > 1.0) do
+ {:validation_failure,
+ {:bad_argument, "The threshold should be a fraction between 0.0 and 1.0"}}
+ end
+
+ def validate([arg], %{}) when is_binary(arg) do
+ case Float.parse(arg) do
+ {arg, ""} when is_number(arg) and (arg < 0.0 or arg > 1.0) ->
+ {:validation_failure,
+ {:bad_argument, "The threshold should be a fraction between 0.0 and 1.0"}}
+
+ {_, ""} ->
+ :ok
+
+ _ ->
+ {:validation_failure, :bad_argument}
+ end
+ end
+
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run(["absolute", arg], opts) do
+ case Integer.parse(arg) do
+ {num, rest} ->
+ valid_units = rest in memory_units()
+ set_vm_memory_high_watermark_absolute({num, rest}, valid_units, opts)
+ end
+ end
+
+ def run([arg], %{node: node_name}) when is_number(arg) and arg >= 0.0 do
+ :rabbit_misc.rpc_call(
+ node_name,
+ :vm_memory_monitor,
+ :set_vm_memory_high_watermark,
+ [arg]
+ )
+ end
+
+ def run([arg], %{} = opts) when is_binary(arg) do
+ case Float.parse(arg) do
+ {num, ""} -> run([num], opts)
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage do
+ "set_vm_memory_high_watermark <fraction> | absolute <value>"
+ end
+
+ def usage_additional() do
+ [
+ ["<fraction>", "New limit as a fraction of total memory reported by the OS"],
+ ["absolute <value>", "New limit as an absolute value with units, e.g. 1GB"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.alarms(),
+ DocGuide.memory_use(),
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Sets the vm_memory_high_watermark setting"
+
+ def banner(["absolute", arg], %{node: node_name}) do
+ "Setting memory threshold on #{node_name} to #{arg} bytes ..."
+ end
+
+ def banner([arg], %{node: node_name}) do
+ "Setting memory threshold on #{node_name} to #{arg} ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp set_vm_memory_high_watermark_absolute({num, rest}, true, %{node: node_name})
+ when num > 0 do
+ val = memory_unit_absolute(num, rest)
+
+ :rabbit_misc.rpc_call(
+ node_name,
+ :vm_memory_monitor,
+ :set_vm_memory_high_watermark,
+ [{:absolute, val}]
+ )
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex
new file mode 100644
index 0000000000..10700bf309
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/shutdown_command.ex
@@ -0,0 +1,106 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.ShutdownCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ alias RabbitMQ.CLI.Core.{OsPid, NodeName}
+
+ def switches() do
+ [timeout: :integer,
+ wait: :boolean]
+ end
+
+ def aliases(), do: [timeout: :t]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{wait: true, timeout: 120}, opts)}
+ end
+
+ def validate([], %{wait: false}) do
+ :ok
+ end
+
+ def validate([], %{node: node_name, wait: true}) do
+ local_hostname = NodeName.hostname_from_node(Node.self())
+ remote_hostname = NodeName.hostname_from_node(node_name)
+ case addressing_local_node?(local_hostname, remote_hostname) do
+ true -> :ok;
+ false ->
+ msg = "\nThis command can only --wait for shutdown of local nodes " <>
+ "but node #{node_name} seems to be remote " <>
+ "(local hostname: #{local_hostname}, remote: #{remote_hostname}).\n" <>
+ "Pass --no-wait to shut node #{node_name} down without waiting.\n"
+ {:validation_failure, {:unsupported_target, msg}}
+ end
+ end
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, wait: false, timeout: timeout}) do
+ shut_down_node_without_waiting(node_name, timeout)
+ end
+
+ def run([], %{node: node_name, wait: true, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :os, :getpid, []) do
+ pid when is_list(pid) ->
+ shut_down_node_and_wait_pid_to_stop(node_name, pid, timeout)
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "shutdown [--wait]"
+
+ def usage_additional() do
+ [
+ ["--wait", "if set, will wait for target node to terminate (by inferring and monitoring its PID file). Only works for local nodes."],
+ ["--no-wait", "if set, will not wait for target node to terminate"]
+ ]
+ end
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Stops RabbitMQ and its runtime (Erlang VM). Monitors progress for local nodes. Does not require a PID file path."
+
+ def banner(_, _), do: nil
+
+
+ #
+ # Implementation
+ #
+
+ def addressing_local_node?(_, remote_hostname) when remote_hostname == :localhost , do: :true
+ def addressing_local_node?(_, remote_hostname) when remote_hostname == 'localhost', do: :true
+ def addressing_local_node?(_, remote_hostname) when remote_hostname == "localhost", do: :true
+ def addressing_local_node?(local_hostname, remote_hostname) do
+ local_hostname == remote_hostname
+ end
+
+ defp shut_down_node_without_waiting(node_name, timeout) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :stop_and_halt, [], timeout)
+ end
+
+ defp shut_down_node_and_wait_pid_to_stop(node_name, pid, timeout) do
+ {:stream,
+ RabbitMQ.CLI.Core.Helpers.stream_until_error([
+ fn -> "Shutting down RabbitMQ node #{node_name} running at PID #{pid}" end,
+ fn ->
+ res = shut_down_node_without_waiting(node_name, timeout)
+
+ case res do
+ :ok -> "Waiting for PID #{pid} to terminate"
+ {:badrpc, err} -> {:error, err}
+ {:error, _} = err -> err
+ end
+ end,
+ fn ->
+ OsPid.wait_for_os_process_death(pid)
+ "RabbitMQ node #{node_name} running at PID #{pid} successfully shut down"
+ end
+ ])}
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex
new file mode 100644
index 0000000000..900bd762fa
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/start_app_command.ex
@@ -0,0 +1,25 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.StartAppCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :start, [])
+ end
+
+ def usage, do: "start_app"
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Starts the RabbitMQ application but leaves the runtime (Erlang VM) running"
+
+ def banner(_, %{node: node_name}), do: "Starting node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex
new file mode 100644
index 0000000000..582f514f27
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/status_command.ex
@@ -0,0 +1,253 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.StatusCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ alias RabbitMQ.CLI.InformationUnit, as: IU
+ import RabbitMQ.CLI.Core.{Alarms, ANSI, DataCoercion, Listeners, Memory, Platform}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 60_000
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ def switches(), do: [unit: :string, timeout: :integer]
+ def aliases(), do: [t: :timeout]
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args, Map.merge(%{unit: "gb", timeout: timeout}, opts)}
+ end
+
+ def validate(args, _) when length(args) > 0 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate(_, %{unit: unit}) do
+ case IU.known_unit?(unit) do
+ true ->
+ :ok
+
+ false ->
+ {:validation_failure, "unit '#{unit}' is not supported. Please use one of: bytes, mb, gb"}
+ end
+ end
+ def validate(_, _), do: :ok
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :status, [], timeout)
+ end
+
+ def output({:error, :timeout}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: timed out while waiting for a response from #{node_name}."}
+ end
+
+ def output(result, %{formatter: "erlang"}) do
+ {:ok, result}
+ end
+
+ def output(result, %{formatter: "json"}) when is_list(result) do
+ m = result_map(result) |> Map.update(:alarms, [], fn xs -> alarm_maps(xs) end)
+
+ {:ok, m}
+ end
+
+ def output(result, %{node: node_name, unit: unit}) when is_list(result) do
+ m = result_map(result)
+
+ product_name_section = case m do
+ %{:product_name => product_name} when product_name != "" ->
+ ["Product name: #{product_name}"]
+ _ ->
+ []
+ end
+ product_version_section = case m do
+ %{:product_version => product_version} when product_version != "" ->
+ ["Product version: #{product_version}"]
+ _ ->
+ []
+ end
+
+ runtime_section = [
+ "#{bright("Runtime")}\n",
+ "OS PID: #{m[:pid]}",
+ "OS: #{m[:os]}",
+ # TODO: format
+ "Uptime (seconds): #{m[:uptime]}",
+ "Is under maintenance?: #{m[:is_under_maintenance]}"
+ ] ++
+ product_name_section ++
+ product_version_section ++
+ [
+ "RabbitMQ version: #{m[:rabbitmq_version]}",
+ "Node name: #{node_name}",
+ "Erlang configuration: #{m[:erlang_version]}",
+ "Erlang processes: #{m[:processes][:used]} used, #{m[:processes][:limit]} limit",
+ "Scheduler run queue: #{m[:run_queue]}",
+ "Cluster heartbeat timeout (net_ticktime): #{m[:net_ticktime]}"
+ ]
+
+ plugin_section = [
+ "\n#{bright("Plugins")}\n",
+ "Enabled plugin file: #{m[:enabled_plugin_file]}",
+ "Enabled plugins:\n"
+ ] ++ Enum.map(m[:active_plugins], fn pl -> " * #{pl}" end)
+
+ data_directory_section = [
+ "\n#{bright("Data directory")}\n",
+ "Node data directory: #{m[:data_directory]}",
+ "Raft data directory: #{m[:raft_data_directory]}"
+ ]
+
+ config_section = [
+ "\n#{bright("Config files")}\n"
+ ] ++ Enum.map(m[:config_files], fn path -> " * #{path}" end)
+
+ log_section = [
+ "\n#{bright("Log file(s)")}\n"
+ ] ++ Enum.map(m[:log_files], fn path -> " * #{path}" end)
+
+ alarms_section = [
+ "\n#{bright("Alarms")}\n",
+ ] ++ case m[:alarms] do
+ [] -> ["(none)"]
+ xs -> alarm_lines(xs, node_name)
+ end
+
+ breakdown = compute_relative_values(m[:memory])
+ memory_calculation_strategy = to_atom(m[:vm_memory_calculation_strategy])
+ total_memory = get_in(m[:memory], [:total, memory_calculation_strategy])
+
+ readable_watermark_setting = case m[:vm_memory_high_watermark_setting] do
+ %{:relative => val} -> "#{val} of available memory"
+ # absolute value
+ %{:absolute => val} -> "#{IU.convert(val, unit)} #{unit}"
+ end
+ memory_section = [
+ "\n#{bright("Memory")}\n",
+ "Total memory used: #{IU.convert(total_memory, unit)} #{unit}",
+ "Calculation strategy: #{memory_calculation_strategy}",
+ "Memory high watermark setting: #{readable_watermark_setting}, computed to: #{IU.convert(m[:vm_memory_high_watermark_limit], unit)} #{unit}\n"
+ ] ++ Enum.map(breakdown, fn({category, val}) -> "#{category}: #{IU.convert(val[:bytes], unit)} #{unit} (#{val[:percentage]} %)" end)
+
+ file_descriptors = [
+ "\n#{bright("File Descriptors")}\n",
+ "Total: #{m[:file_descriptors][:total_used]}, limit: #{m[:file_descriptors][:total_limit]}",
+ "Sockets: #{m[:file_descriptors][:sockets_used]}, limit: #{m[:file_descriptors][:sockets_limit]}"
+ ]
+
+ disk_space_section = [
+ "\n#{bright("Free Disk Space")}\n",
+ "Low free disk space watermark: #{IU.convert(m[:disk_free_limit], unit)} #{unit}",
+ "Free disk space: #{IU.convert(m[:disk_free], unit)} #{unit}"
+ ]
+
+ totals_section = [
+ "\n#{bright("Totals")}\n",
+ "Connection count: #{m[:totals][:connection_count]}",
+ "Queue count: #{m[:totals][:queue_count]}",
+ "Virtual host count: #{m[:totals][:virtual_host_count]}"
+ ]
+
+ listeners_section = [
+ "\n#{bright("Listeners")}\n",
+ ] ++ case m[:listeners] do
+ [] -> ["(none)"]
+ xs -> listener_lines(xs)
+ end
+ lines = runtime_section ++ plugin_section ++ data_directory_section ++
+ config_section ++ log_section ++ alarms_section ++ memory_section ++
+ file_descriptors ++ disk_space_section ++ totals_section ++ listeners_section
+
+ {:ok, Enum.join(lines, line_separator())}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.String
+
+ def usage, do: "status [--unit <unit>]"
+
+ def usage_additional() do
+ [
+ ["--unit <bytes | mb | gb>", "byte multiple (bytes, megabytes, gigabytes) to use"],
+ ["--formatter <json | erlang>", "alternative formatter (JSON, Erlang terms)"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Displays status of a node"
+
+ def banner(_, %{node: node_name}), do: "Status of node #{node_name} ..."
+
+ #
+ # Implementation
+ #
+
+ defp result_map(result) do
+ %{
+ os: os_name(Keyword.get(result, :os)),
+ pid: Keyword.get(result, :pid),
+ product_name: Keyword.get(result, :product_name) |> to_string,
+ product_version: Keyword.get(result, :product_version) |> to_string,
+ rabbitmq_version: Keyword.get(result, :rabbitmq_version) |> to_string,
+ erlang_version: Keyword.get(result, :erlang_version) |> to_string |> String.trim_trailing,
+ uptime: Keyword.get(result, :uptime),
+ is_under_maintenance: Keyword.get(result, :is_under_maintenance, false),
+ processes: Enum.into(Keyword.get(result, :processes), %{}),
+ run_queue: Keyword.get(result, :run_queue),
+ net_ticktime: net_ticktime(result),
+
+ vm_memory_calculation_strategy: Keyword.get(result, :vm_memory_calculation_strategy),
+ vm_memory_high_watermark_setting: Keyword.get(result, :vm_memory_high_watermark) |> formatted_watermark,
+ vm_memory_high_watermark_limit: Keyword.get(result, :vm_memory_limit),
+
+ disk_free_limit: Keyword.get(result, :disk_free_limit),
+ disk_free: Keyword.get(result, :disk_free),
+
+ file_descriptors: Enum.into(Keyword.get(result, :file_descriptors), %{}),
+
+ alarms: Keyword.get(result, :alarms),
+ listeners: listener_maps(Keyword.get(result, :listeners, [])),
+ memory: Keyword.get(result, :memory) |> Enum.into(%{}),
+
+ data_directory: Keyword.get(result, :data_directory) |> to_string,
+ raft_data_directory: Keyword.get(result, :raft_data_directory) |> to_string,
+
+ config_files: Keyword.get(result, :config_files) |> Enum.map(&to_string/1),
+ log_files: Keyword.get(result, :log_files) |> Enum.map(&to_string/1),
+
+ active_plugins: Keyword.get(result, :active_plugins) |> Enum.map(&to_string/1),
+ enabled_plugin_file: Keyword.get(result, :enabled_plugin_file) |> to_string,
+
+ totals: Keyword.get(result, :totals)
+ }
+ end
+
+ defp net_ticktime(result) do
+ case Keyword.get(result, :kernel) do
+ {:net_ticktime, n} -> n
+ n when is_integer(n) -> n
+ _ -> :undefined
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex
new file mode 100644
index 0000000000..c3cbe3b1fd
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_app_command.ex
@@ -0,0 +1,26 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.StopAppCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :stop, [])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "stop_app"
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Stops the RabbitMQ application, leaving the runtime (Erlang VM) running"
+
+ def banner(_, %{node: node_name}), do: "Stopping rabbit application on node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex
new file mode 100644
index 0000000000..becb75a0b5
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/stop_command.ex
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.StopCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ alias RabbitMQ.CLI.Core.OsPid
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{idempotent: false}, opts)}
+ end
+
+ def switches(), do: [idempotent: :boolean]
+
+ def validate([], _), do: :ok
+ def validate([_pidfile_path], _), do: :ok
+ def validate([_ | _] = args, _) when length(args) > 1, do: {:validation_failure, :too_many_args}
+
+ def run([], %{node: node_name, idempotent: true}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit, :stop_and_halt, []) do
+ {:badrpc, :nodedown} -> {:ok, "Node #{node_name} is no longer running"}
+ any -> any
+ end
+ end
+
+ def run([], %{node: node_name, idempotent: false}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :stop_and_halt, [])
+ end
+
+ def run([pidfile_path], %{node: node_name}) do
+ ret = OsPid.read_pid_from_file(pidfile_path, true)
+ :rabbit_misc.rpc_call(node_name, :rabbit, :stop_and_halt, [])
+
+ case ret do
+ {:error, details} ->
+ {:error, "could not read pid from file #{pidfile_path}. Error: #{details}"}
+
+ {:error, :could_not_read_pid_from_file, {:contents, s}} ->
+ {:error, "could not read pid from file #{pidfile_path}. File contents: #{s}"}
+
+ {:error, :could_not_read_pid_from_file, details} ->
+ {:error, "could not read pid from file #{pidfile_path}. Error: #{details}"}
+
+ pid ->
+ OsPid.wait_for_os_process_death(pid)
+ {:ok, "process #{pid} (take from pid file #{pidfile_path}) is no longer running"}
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "stop [--idempotent] [<pidfile>]"
+
+ def usage_additional() do
+ [
+ ["<pidfile>", "node PID file path to monitor. To avoid using a PID file, use 'rabbitmqctl shutdown'"],
+ ["--idempotent", "return success if target node is not running (cannot be contacted)"]
+ ]
+ end
+
+ def description(), do: "Stops RabbitMQ and its runtime (Erlang VM). Requires a local node pid file path to monitor progress."
+
+ def help_section(), do: :node_management
+
+ def banner([pidfile_path], %{node: node_name}) do
+ "Stopping and halting node #{node_name} (will monitor pid file #{pidfile_path}) ..."
+ end
+
+ def banner(_, %{node: node_name}), do: "Stopping and halting node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex
new file mode 100644
index 0000000000..31fcf738b9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/suspend_listeners_command.ex
@@ -0,0 +1,46 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SuspendListenersCommand do
+ @moduledoc """
+ Suspends all client connection listeners. Suspended listeners will not
+ accept any new connections but already established ones will not be interrupted.
+ `ResumeListenersCommand` will undo the effect of this command.
+
+ This command is meant to be used when automating upgrades.
+ See also `ResumeListenersCommand`.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_maintenance, :suspend_all_client_listeners, [], timeout)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "suspend_listeners"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Suspends client connection listeners so that no new client connections are accepted"
+
+ def banner(_, %{node: node_name}) do
+ "Will suspend client connection listeners on node #{node_name}. "
+ <> "The node will no longer accept client connections!"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex
new file mode 100644
index 0000000000..4b7112af57
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/sync_queue_command.ex
@@ -0,0 +1,54 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.SyncQueueCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([queue], %{vhost: vhost, node: node_name}) do
+ :rpc.call(
+ node_name,
+ :rabbit_mirror_queue_misc,
+ :sync_queue,
+ [:rabbit_misc.r(vhost, :queue, queue)],
+ :infinity
+ )
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage do
+ "sync_queue [--vhost <vhost>] <queue>"
+ end
+
+ def usage_additional() do
+ [
+ ["<queue>", "Name of the queue to synchronise"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.mirroring()
+ ]
+ end
+
+ def help_section(), do: :replication
+
+ def description(), do: "Instructs a mirrored queue with unsynchronised mirrors (follower replicas) to synchronise them"
+
+ def banner([queue], %{vhost: vhost, node: _node}) do
+ "Synchronising queue '#{queue}' in vhost '#{vhost}' ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex
new file mode 100644
index 0000000000..f2b6cc217f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_off_command.ex
@@ -0,0 +1,42 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.TraceOffCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(_, opts) do
+ {[], Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, vhost: vhost}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_trace, :stop, [vhost]) do
+ :ok -> {:ok, "Trace disabled for vhost #{vhost}"}
+ other -> other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage do
+ "trace_off [--vhost <vhost>]"
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.firehose(),
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def banner(_, %{vhost: vhost}), do: "Stopping tracing for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex
new file mode 100644
index 0000000000..33bb5a06d6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/trace_on_command.ex
@@ -0,0 +1,42 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.TraceOnCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(_, opts) do
+ {[], Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, vhost: vhost}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_trace, :start, [vhost]) do
+ :ok -> {:ok, "Trace enabled for vhost #{vhost}"}
+ other -> other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage do
+ "trace_on [--vhost <vhost>]"
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.firehose(),
+ DocGuide.virtual_hosts()
+ ]
+ end
+
+ def help_section(), do: :virtual_hosts
+
+ def banner(_, %{vhost: vhost}), do: "Starting tracing for vhost \"#{vhost}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex
new file mode 100644
index 0000000000..94b218e2c9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/update_cluster_nodes_command.ex
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.UpdateClusterNodesCommand do
+ alias RabbitMQ.CLI.Core.{Config, DocGuide, Helpers}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppStopped
+
+ def run([seed_node], options=%{node: node_name}) do
+ long_or_short_names = Config.get_option(:longnames, options)
+ seed_node_normalised = Helpers.normalise_node(seed_node, long_or_short_names)
+ :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_mnesia,
+ :update_cluster_nodes,
+ [seed_node_normalised]
+ )
+ end
+
+ def usage() do
+ "update_cluster_nodes <seed_node>"
+ end
+
+ def usage_additional() do
+ [
+ ["<seed_node>", "Cluster node to seed known cluster members from"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.clustering()
+ ]
+ end
+
+ def help_section(), do: :cluster_management
+
+ def description(), do: "Instructs a cluster member node to sync the list of known cluster members from <seed_node>"
+
+ def banner([seed_node], %{node: node_name}) do
+ "Will seed #{node_name} from #{seed_node} on next start"
+ end
+
+ def output({:error, :mnesia_unexpectedly_running}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ RabbitMQ.CLI.DefaultOutput.mnesia_running_error(node_name)}
+ end
+
+ def output({:error, :cannot_cluster_node_with_itself}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(),
+ "Error: cannot cluster node with itself: #{node_name}"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex
new file mode 100644
index 0000000000..8028054932
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/version_command.ex
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.VersionCommand do
+ alias RabbitMQ.CLI.Core.{Validators, Version}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:ctl, :diagnostics, :plugins]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def validate_execution_environment([] = args, opts) do
+ Validators.rabbit_is_loaded(args, opts)
+ end
+
+ def run([], %{formatter: "json"}) do
+ {:ok, %{version: Version.local_version()}}
+ end
+ def run([], %{formatter: "csv"}) do
+ row = [version: Version.local_version()]
+ {:ok, [row]}
+ end
+ def run([], _opts) do
+ {:ok, Version.local_version()}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section, do: :help
+
+ def description, do: "Displays CLI tools version"
+
+ def usage, do: "version"
+
+ def banner(_, _), do: nil
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex
new file mode 100644
index 0000000000..0699203de6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/wait_command.ex
@@ -0,0 +1,269 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.Commands.WaitCommand do
+ alias RabbitMQ.CLI.Core.{Helpers, Validators}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ @default_timeout 10_000
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ def switches(), do: [pid: :integer, timeout: :integer]
+ def aliases(), do: [P: :pid, t: :timeout]
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ val -> val
+ end
+
+ {args, Map.put(opts, :timeout, timeout)}
+ end
+
+ def validate([_ | _] = args, _) when length(args) > 1, do: {:validation_failure, :too_many_args}
+ def validate([_], %{pid: _}), do: {:validation_failure, "Cannot specify both pid and pidfile"}
+ def validate([_], _), do: :ok
+ def validate([], %{pid: _}), do: :ok
+ def validate([], _), do: {:validation_failure, "No pid or pidfile specified"}
+
+ def validate_execution_environment([], %{pid: _} = opts) do
+ Validators.rabbit_is_loaded([], opts)
+ end
+ def validate_execution_environment([_pid_file], opts) do
+ Validators.rabbit_is_loaded([], opts)
+ end
+
+ def run([pid_file], %{node: node_name, timeout: timeout} = opts) do
+ app_names = :rabbit_and_plugins
+ quiet = opts[:quiet] || false
+
+ Helpers.stream_until_error_parameterised(
+ [
+ log("Waiting for pid file '#{pid_file}' to appear", quiet),
+ fn _ -> wait_for_pid_file(pid_file, node_name, timeout) end,
+ log_param(fn pid -> "pid is #{pid}" end, quiet)
+ ] ++
+ wait_for_pid_funs(node_name, app_names, timeout, quiet),
+ :init
+ )
+ end
+
+ def run([], %{node: node_name, pid: pid, timeout: timeout} = opts) do
+ app_names = :rabbit_and_plugins
+ quiet = opts[:quiet] || false
+
+ Helpers.stream_until_error_parameterised(
+ wait_for_pid_funs(node_name, app_names, timeout, quiet),
+ pid
+ )
+ end
+
+ def output({:error, err}, opts) do
+ case format_error(err) do
+ :undefined -> RabbitMQ.CLI.DefaultOutput.output({:error, err}, opts)
+ error_str -> {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), error_str}
+ end
+ end
+
+ def output({:stream, stream}, _opts) do
+ {:stream,
+ Stream.map(stream, fn
+ {:error, err} ->
+ {:error,
+ case format_error(err) do
+ :undefined -> err
+ error_str -> error_str
+ end}
+
+ other ->
+ other
+ end)}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ # Banner is printed in wait steps
+ def banner(_, _), do: nil
+
+ def usage, do: "wait [<pidfile>] [--pid|-P <pid>]"
+
+ def usage_additional() do
+ [
+ ["<pidfile>", "PID file path"],
+ ["--pid <pid>", "operating system PID to monitor"]
+ ]
+ end
+
+ def help_section(), do: :node_management
+
+ def description(), do: "Waits for RabbitMQ node startup by monitoring a local PID file. See also 'rabbitmqctl await_online_nodes'"
+
+ #
+ # Implementation
+ #
+
+ def wait_for(timeout, fun) do
+ sleep = 1000
+
+ case wait_for_loop(timeout, sleep, fun) do
+ {:error, :timeout} -> {:error, {:timeout, timeout}}
+ other -> other
+ end
+ end
+
+ def wait_for_loop(timeout, _, _) when timeout <= 0 do
+ {:error, :timeout}
+ end
+
+ def wait_for_loop(timeout, sleep, fun) do
+ time = :erlang.system_time(:milli_seconds)
+
+ case fun.() do
+ {:error, :loop} ->
+ time_to_fun = :erlang.system_time(:milli_seconds) - time
+
+ time_taken =
+ case {time_to_fun > timeout, time_to_fun > sleep} do
+ ## The function took longer than timeout
+ {true, _} ->
+ time_to_fun
+
+ ## The function took longer than sleep
+ {false, true} ->
+ time_to_fun
+
+ ## We need to sleep
+ {false, false} ->
+ :timer.sleep(sleep)
+ time_to_fun + sleep
+ end
+
+ wait_for_loop(timeout - time_taken, sleep, fun)
+
+ other ->
+ other
+ end
+ end
+
+ defp wait_for_pid_funs(node_name, app_names, timeout, quiet) do
+ app_names_formatted = :io_lib.format('~p', [app_names])
+
+ [
+ log_param(
+ fn pid ->
+ "Waiting for erlang distribution on node '#{node_name}' while OS process '#{pid}' is running"
+ end,
+ quiet
+ ),
+ fn pid -> wait_for_erlang_distribution(pid, node_name, timeout) end,
+ log(
+ "Waiting for applications '#{app_names_formatted}' to start on node '#{node_name}'",
+ quiet
+ ),
+ fn _ -> wait_for_application(node_name, app_names) end,
+ log("Applications '#{app_names_formatted}' are running on node '#{node_name}'", quiet)
+ ]
+ end
+
+ defp log(_string, _quiet = true) do
+ fn val -> {:ok, val} end
+ end
+
+ defp log(string, _quiet = false) do
+ fn val -> {:ok, val, string} end
+ end
+
+ defp log_param(_fun, _quiet = true) do
+ fn val -> {:ok, val} end
+ end
+
+ defp log_param(fun, _quiet = false) do
+ fn val -> {:ok, val, fun.(val)} end
+ end
+
+ defp format_error(:process_not_running) do
+ "Error: process is not running."
+ end
+
+ defp format_error({:garbage_in_pid_file, _}) do
+ "Error: garbage in pid file."
+ end
+
+ defp format_error({:could_not_read_pid, err}) do
+ "Error: could not read pid. Detail: #{err}"
+ end
+
+ defp format_error(_) do
+ :undefined
+ end
+
+ defp wait_for_application(node_name, :rabbit_and_plugins) do
+ case :rabbit.await_startup(node_name) do
+ {:badrpc, err} -> {:error, {:badrpc, err}}
+ other -> other
+ end
+ end
+
+ defp wait_for_erlang_distribution(pid, node_name, timeout) do
+ wait_for(
+ timeout,
+ fn ->
+ case check_distribution(pid, node_name) do
+ # Loop while node is available.
+ {:error, :pang} -> {:error, :loop}
+ other -> other
+ end
+ end
+ )
+ end
+
+ defp check_distribution(pid, node_name) do
+ case is_os_process_alive(pid) do
+ true ->
+ case Node.ping(node_name) do
+ :pong -> :ok
+ :pang -> {:error, :pang}
+ end
+
+ false ->
+ {:error, :process_not_running}
+ end
+ end
+
+ defp is_os_process_alive(pid) do
+ :rabbit_misc.is_os_process_alive(to_charlist(pid))
+ end
+
+ defp wait_for_pid_file(pid_file, node_name, timeout) do
+ wait_for(
+ timeout,
+ fn ->
+ case :file.read_file(pid_file) do
+ {:ok, bin} ->
+ case Integer.parse(bin) do
+ :error ->
+ {:error, {:garbage_in_pid_file, pid_file}}
+
+ {pid, _} ->
+ case check_distribution(pid, node_name) do
+ :ok -> {:ok, pid}
+ _ -> {:error, :loop}
+ end
+ end
+
+ {:error, :enoent} ->
+ {:error, :loop}
+
+ {:error, err} ->
+ {:error, {:could_not_read_pid, err}}
+ end
+ end
+ )
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex
new file mode 100644
index 0000000000..26f86ae51e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/info_keys.ex
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.InfoKeys do
+ import RabbitCommon.Records
+ alias RabbitMQ.CLI.Core.DataCoercion
+
+ def validate_info_keys(args, valid_keys) do
+ info_keys = prepare_info_keys(args)
+
+ case invalid_info_keys(info_keys, Enum.map(valid_keys, &DataCoercion.to_atom/1)) do
+ [_ | _] = bad_info_keys ->
+ {:validation_failure, {:bad_info_key, bad_info_keys}}
+
+ [] ->
+ {:ok, info_keys}
+ end
+ end
+
+ def prepare_info_keys(args) do
+ args
+ |> Enum.flat_map(fn arg -> String.split(arg, ",", trim: true) end)
+ |> Enum.map(fn s -> String.replace(s, ",", "") end)
+ |> Enum.map(&String.trim/1)
+ |> Enum.map(&String.to_atom/1)
+ |> Enum.uniq()
+ end
+
+ def with_valid_info_keys(args, valid_keys, fun) do
+ case validate_info_keys(args, valid_keys) do
+ {:ok, info_keys} -> fun.(info_keys)
+ err -> err
+ end
+ end
+
+ defp invalid_info_keys(info_keys, valid_keys) do
+ MapSet.new(info_keys)
+ |> MapSet.difference(MapSet.new(valid_keys))
+ |> MapSet.to_list()
+ end
+
+ def info_for_keys(item, []) do
+ item
+ end
+
+ def info_for_keys([{_, _} | _] = item, info_keys) do
+ item
+ |> Enum.filter(fn {k, _} -> Enum.member?(info_keys, k) end)
+ |> Enum.map(fn {k, v} -> {k, format_info_item(v)} end)
+ end
+
+ defp format_info_item(resource(name: name)) do
+ name
+ end
+
+ defp format_info_item(any) do
+ any
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex
new file mode 100644
index 0000000000..4b672a6d88
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/rpc_stream.ex
@@ -0,0 +1,124 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Ctl.RpcStream do
+ alias RabbitMQ.CLI.Ctl.InfoKeys
+
+ def receive_list_items(node, mod, fun, args, timeout, info_keys) do
+ receive_list_items(node, [{mod, fun, args}], timeout, info_keys, 1)
+ end
+
+ def receive_list_items(node, mod, fun, args, timeout, info_keys, chunks) do
+ receive_list_items(node, [{mod, fun, args}], timeout, info_keys, chunks)
+ end
+
+ def receive_list_items(_node, _mfas, _timeout, _info_keys, 0) do
+ nil
+ end
+
+ def receive_list_items(node, mfas, timeout, info_keys, chunks_init) do
+ receive_list_items_with_fun(node, mfas, timeout, info_keys, chunks_init, fn v -> v end)
+ end
+
+ def receive_list_items_with_fun(node, mfas, timeout, info_keys, chunks_init, response_fun) do
+ pid = Kernel.self()
+ ref = Kernel.make_ref()
+ for {m, f, a} <- mfas, do: init_items_stream(node, m, f, a, timeout, pid, ref)
+
+ Stream.unfold(
+ {chunks_init, :continue},
+ fn
+ :finished ->
+ response_fun.(nil)
+
+ {chunks, :continue} ->
+ received =
+ receive do
+ {^ref, :finished} when chunks === 1 ->
+ nil
+
+ {^ref, :finished} ->
+ {[], {chunks - 1, :continue}}
+
+ {^ref, {:timeout, t}} ->
+ {{:error, {:badrpc, {:timeout, t / 1000}}}, :finished}
+
+ {^ref, []} ->
+ {[], {chunks, :continue}}
+
+ {^ref, :error, {:badrpc, :timeout}} ->
+ {{:error, {:badrpc, {:timeout, timeout / 1000}}}, :finished}
+
+ {^ref, result, :continue} ->
+ {result, {chunks, :continue}}
+
+ {:error, _} = error ->
+ {error, :finished}
+
+ {^ref, :error, error} ->
+ {{:error, simplify_emission_error(error)}, :finished}
+
+ {:DOWN, _mref, :process, _pid, :normal} ->
+ {[], {chunks, :continue}}
+
+ {:DOWN, _mref, :process, _pid, reason} ->
+ {{:error, simplify_emission_error(reason)}, :finished}
+ end
+
+ response_fun.(received)
+ end
+ )
+ |> display_list_items(info_keys)
+ end
+
+ def simplify_emission_error({:badrpc, {:EXIT, {{:nocatch, error}, error_details}}}) do
+ {error, error_details}
+ end
+
+ def simplify_emission_error({{:nocatch, error}, error_details}) do
+ {error, error_details}
+ end
+
+ def simplify_emission_error(other) do
+ other
+ end
+
+ defp display_list_items(items, info_keys) do
+ items
+ |> Stream.filter(fn
+ [] -> false
+ _ -> true
+ end)
+ |> Stream.map(fn
+ {:error, error} ->
+ error
+
+ # here item is a list of keyword lists:
+ [[{_, _} | _] | _] = item ->
+ Enum.map(item, fn i -> InfoKeys.info_for_keys(i, info_keys) end)
+
+ item ->
+ InfoKeys.info_for_keys(item, info_keys)
+ end)
+ end
+
+ defp init_items_stream(_node, _mod, _fun, _args, 0, pid, ref) do
+ set_stream_timeout(pid, ref, 0)
+ end
+
+ defp init_items_stream(node, mod, fun, args, timeout, pid, ref) do
+ :rabbit_control_misc.spawn_emitter_caller(node, mod, fun, args, ref, pid, timeout)
+ set_stream_timeout(pid, ref, timeout)
+ end
+
+ defp set_stream_timeout(_, _, :infinity) do
+ :ok
+ end
+
+ defp set_stream_timeout(pid, ref, timeout) do
+ Process.send_after(pid, {ref, {:timeout, timeout}}, timeout)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex
new file mode 100644
index 0000000000..d5e3f94a15
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/default_output.ex
@@ -0,0 +1,94 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+defmodule RabbitMQ.CLI.DefaultOutput do
+ # When `use RabbitMQ.CLI.DefaultOutput` is invoked,
+ # this will define output/2 that delegates to RabbitMQ.CLI.DefaultOutput.output/2.
+ defmacro __using__(_) do
+ quote do
+ def output(result, opts) do
+ RabbitMQ.CLI.DefaultOutput.output(result, opts)
+ end
+ end
+ end
+
+ def output(result, opts \\ %{}) do
+ format_output(normalize_output(result, opts))
+ end
+
+ def mnesia_running_error(node_name) do
+ "Mnesia is still running on node #{node_name}.\n" <>
+ "Please stop RabbitMQ with 'rabbitmqctl stop_app' first."
+ end
+
+ defp normalize_output(:ok, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name}}
+ end
+ defp normalize_output(:ok, _opts), do: :ok
+ defp normalize_output({:ok, value}, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "value" => value}}
+ end
+ defp normalize_output({:ok, _} = input, _opts), do: input
+ defp normalize_output({:stream, _} = input, _opts), do: input
+ defp normalize_output({:badrpc_multi, _, _} = input, _opts), do: {:error, input}
+ defp normalize_output({:badrpc, :nodedown} = input, _opts), do: {:error, input}
+ defp normalize_output({:badrpc, :timeout} = input, _opts), do: {:error, input}
+ defp normalize_output({:badrpc, {:timeout, _n}} = input, _opts), do: {:error, input}
+ defp normalize_output({:badrpc, {:timeout, _n, _msg}} = input, _opts), do: {:error, input}
+ defp normalize_output({:badrpc, {:EXIT, reason}}, _opts), do: {:error, reason}
+ defp normalize_output({:error, exit_code, string}, _opts) when is_integer(exit_code) do
+ {:error, exit_code, to_string(string)}
+ end
+ defp normalize_output({:error, format, args}, _opts)
+ when (is_list(format) or is_binary(format)) and is_list(args) do
+ {:error, to_string(:rabbit_misc.format(format, args))}
+ end
+ defp normalize_output({:error, _} = input, _opts), do: input
+ defp normalize_output({:error_string, string}, _opts) do
+ {:error, to_string(string)}
+ end
+ defp normalize_output(unknown, _opts) when is_atom(unknown), do: {:error, unknown}
+ defp normalize_output({unknown, _} = input, _opts) when is_atom(unknown), do: {:error, input}
+ defp normalize_output(result, _opts) when not is_atom(result), do: {:ok, result}
+
+
+ defp format_output({:error, _} = result) do
+ result
+ end
+ defp format_output({:error, _, _} = result) do
+ result
+ end
+
+ defp format_output(:ok) do
+ :ok
+ end
+
+ defp format_output({:ok, output}) do
+ case Enumerable.impl_for(output) do
+ nil ->
+ {:ok, output}
+
+ ## Do not streamify plain maps
+ Enumerable.Map ->
+ {:ok, output}
+
+ ## Do not streamify proplists
+ Enumerable.List ->
+ case FormatterHelpers.proplist?(output) do
+ true -> {:ok, output}
+ false -> {:stream, output}
+ end
+
+ _ ->
+ {:stream, output}
+ end
+ end
+
+ defp format_output({:stream, stream}) do
+ {:stream, stream}
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex
new file mode 100644
index 0000000000..7669a523eb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/alarms_command.ex
@@ -0,0 +1,77 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.AlarmsCommand do
+ @moduledoc """
+ Displays all alarms reported by the target node.
+
+ Returns a code of 0 unless there were connectivity and authentication
+ errors. This command is not meant to be used in health checks.
+ """
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+ import RabbitMQ.CLI.Core.Alarms
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # Example response when there are alarms:
+ #
+ # [
+ # file_descriptor_limit,
+ # {{resource_limit,disk,hare@warp10},[]},
+ # {{resource_limit,memory,hare@warp10},[]},
+ # {{resource_limit,disk,rabbit@warp10},[]},
+ # {{resource_limit,memory,rabbit@warp10},[]}
+ # ]
+ #
+ # The topmost file_descriptor_limit alarm is node-local.
+ :rabbit_misc.rpc_call(node_name, :rabbit_alarm, :get_alarms, [], timeout)
+ end
+
+ def output([], %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "alarms" => []}}
+ end
+
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no alarms, local or clusterwide"}
+ end
+
+ def output(alarms, %{node: node_name, formatter: "json"}) do
+ local = local_alarms(alarms, node_name)
+ global = clusterwide_alarms(alarms, node_name)
+
+ {:ok,
+ %{
+ "result" => "ok",
+ "local" => alarm_lines(local, node_name),
+ "global" => alarm_lines(global, node_name),
+ "message" => "Node #{node_name} reported alarms"
+ }}
+ end
+
+ def output(alarms, %{node: node_name}) do
+ lines = alarm_lines(alarms, node_name)
+
+ {:ok, Enum.join(lines, line_separator())}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists resource alarms (local or cluster-wide) in effect on the target node"
+
+ def usage, do: "alarms"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} to report any known resource alarms ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex
new file mode 100644
index 0000000000..33320d8e37
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/certificates_command.ex
@@ -0,0 +1,55 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CertificatesCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ import RabbitMQ.CLI.Core.Listeners
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_networking, :active_listeners, [], timeout) do
+ {:error, _} = err ->
+ err
+
+ {:error, _, _} = err ->
+ err
+
+ xs when is_list(xs) ->
+ listeners = listeners_with_certificates(listeners_on(xs, node_name))
+
+ case listeners do
+ [] -> %{}
+ _ -> Enum.map(listeners, &listener_certs/1)
+ end
+
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "certificates"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration(),
+ DocGuide.tls()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Displays certificates (public keys) for every listener on target node that is configured to use TLS"
+
+ def banner(_, %{node: node_name}), do: "Certificates of node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex
new file mode 100644
index 0000000000..04bb70317a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_alarms_command.ex
@@ -0,0 +1,86 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckAlarmsCommand do
+ @moduledoc """
+ Exits with a non-zero code if the target node reports any alarms,
+ local or clusterwide.
+
+ This command is meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Core.Alarms
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # Example response when there are alarms:
+ #
+ # [
+ # file_descriptor_limit,
+ # {{resource_limit,disk,hare@warp10},[]},
+ # {{resource_limit,memory,hare@warp10},[]},
+ # {{resource_limit,disk,rabbit@warp10},[]},
+ # {{resource_limit,memory,rabbit@warp10},[]}
+ # ]
+ #
+ # The topmost file_descriptor_limit alarm is node-local.
+ :rabbit_misc.rpc_call(node_name, :rabbit_alarm, :get_alarms, [], timeout)
+ end
+
+ def output([], %{formatter: "json"}) do
+ {:ok, %{"result" => "ok"}}
+ end
+
+ def output([], %{silent: true}) do
+ {:ok, :check_passed}
+ end
+
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no alarms, local or clusterwide"}
+ end
+
+ def output(alarms, %{node: node_name, formatter: "json"}) do
+ local = local_alarms(alarms, node_name)
+ global = clusterwide_alarms(alarms, node_name)
+
+ {:error, :check_failed,
+ %{
+ "result" => "error",
+ "local" => alarm_lines(local, node_name),
+ "global" => alarm_lines(global, node_name),
+ "message" => "Node #{node_name} reported alarms"
+ }}
+ end
+
+ def output(alarms, %{silent: true} = _opts) when is_list(alarms) do
+ {:error, :check_failed}
+ end
+
+ def output(alarms, %{node: node_name}) when is_list(alarms) do
+ lines = alarm_lines(alarms, node_name)
+
+ {:error, :check_failed, Enum.join(lines, line_separator())}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Health check that exits with a non-zero code if the target node reports any alarms, local or cluster-wide."
+
+ def usage, do: "check_alarms"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} to report any local resource alarms ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex
new file mode 100644
index 0000000000..d14ade59f6
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_certificate_expiration_command.ex
@@ -0,0 +1,101 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckCertificateExpirationCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ alias RabbitMQ.CLI.TimeUnit, as: TU
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ import RabbitMQ.CLI.Core.Listeners
+
+ def switches(), do: [unit: :string, within: :integer]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{unit: "weeks", within: 4}, opts)}
+ end
+
+ def validate(args, _) when length(args) > 0 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate(_, %{unit: unit}) do
+ case TU.known_unit?(unit) do
+ true ->
+ :ok
+
+ false ->
+ {:validation_failure, "unit '#{unit}' is not supported. Please use one of: days, weeks, months, years"}
+ end
+ end
+ def validate(_, _), do: :ok
+
+ def run([], %{node: node_name, unit: unit, within: within, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_networking, :active_listeners, [], timeout) do
+ {:error, _} = err ->
+ err
+
+ {:error, _, _} = err ->
+ err
+
+ {:badrpc, _} = err ->
+ err
+
+ xs when is_list(xs) ->
+ listeners = listeners_on(xs, node_name)
+ seconds = TU.convert(within, unit)
+ Enum.reduce(listeners, [], fn (listener, acc) -> case listener_expiring_within(listener, seconds) do
+ false -> acc
+ expiring -> [expiring | acc]
+ end
+ end)
+ end
+ end
+
+ def output([], %{formatter: "json"}) do
+ {:ok, %{"result" => "ok"}}
+ end
+
+ def output([], %{unit: unit, within: within}) do
+ unit_label = unit_label(within, unit)
+ {:ok, "No certificates are expiring within #{within} #{unit_label}."}
+ end
+
+ def output(listeners, %{formatter: "json"}) do
+ {:error, :check_failed, %{"result" => "error", "expired" => Enum.map(listeners, &expired_listener_map/1)}}
+ end
+
+ def output(listeners, %{}) do
+ {:error, :check_failed, Enum.map(listeners, &expired_listener_map/1)}
+ end
+
+ def unit_label(1, unit) do
+ unit |> String.slice(0..-2)
+ end
+ def unit_label(_within, unit) do
+ unit
+ end
+
+ def usage, do: "check_certificate_expiration [--within <period>] [--unit <unit>]"
+
+ def usage_additional() do
+ [
+ ["<period>", "period of time to check. Default is four (weeks)."],
+ ["<unit>", "time unit for the period, can be days, weeks, months, years. Default is weeks."],
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.tls(),
+ DocGuide.networking()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Checks the expiration date on the certificates for every listener configured to use TLS"
+
+ def banner(_, %{node: node_name}), do: "Checking certificate expiration on node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex
new file mode 100644
index 0000000000..1b11537793
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_local_alarms_command.ex
@@ -0,0 +1,85 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckLocalAlarmsCommand do
+ @moduledoc """
+ Exits with a non-zero code if the target node reports any local alarms.
+
+ This command is meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Core.Alarms
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # Example response when there are alarms:
+ #
+ # [
+ # file_descriptor_limit,
+ # {{resource_limit,disk,hare@warp10},[]},
+ # {{resource_limit,memory,hare@warp10},[]},
+ # {{resource_limit,disk,rabbit@warp10},[]},
+ # {{resource_limit,memory,rabbit@warp10},[]}
+ # ]
+ #
+ # The topmost file_descriptor_limit alarm is node-local.
+ case :rabbit_misc.rpc_call(node_name, :rabbit_alarm, :get_alarms, [], timeout) do
+ [] -> []
+ xs when is_list(xs) -> local_alarms(xs, node_name)
+ other -> other
+ end
+ end
+
+ def output([], %{formatter: "json"}) do
+ {:ok, %{"result" => "ok"}}
+ end
+
+ def output([], %{silent: true}) do
+ {:ok, :check_passed}
+ end
+
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no local alarms"}
+ end
+
+ def output(alarms, %{node: node_name, formatter: "json"}) do
+ {:error, :check_failed,
+ %{
+ "result" => "error",
+ "local" => alarm_lines(alarms, node_name),
+ "message" => "Node #{node_name} reported local alarms"
+ }}
+ end
+
+ def output(_alarms, %{silent: true}) do
+ {:error, :check_failed}
+ end
+
+ def output(alarms, %{node: node_name}) do
+ lines = alarm_lines(alarms, node_name)
+
+ {:error, Enum.join(lines, line_separator())}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Health check that exits with a non-zero code if the target node reports any local alarms"
+
+ def usage, do: "check_local_alarms"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} to report any local resource alarms ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex
new file mode 100644
index 0000000000..1c3d86ed83
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_connectivity_command.ex
@@ -0,0 +1,119 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckPortConnectivityCommand do
+ @moduledoc """
+ Checks all listeners on the target node by opening a TCP connection to each
+ and immediately closing it.
+
+ Returns a code of 0 unless there were connectivity and authentication
+ errors. This command is meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Diagnostics.Helpers,
+ only: [check_listener_connectivity: 3]
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+ import RabbitMQ.CLI.Core.Listeners
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 30_000
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+
+ {args, Map.merge(opts, %{timeout: timeout})}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_networking, :active_listeners, [], timeout) do
+ {:error, _} = err ->
+ err
+
+ {:error, _, _} = err ->
+ err
+
+ xs when is_list(xs) ->
+ locals = listeners_on(xs, node_name)
+
+ case locals do
+ [] -> {true, locals}
+ _ -> check_connectivity_of(locals, node_name, timeout)
+ end
+
+ other ->
+ other
+ end
+ end
+
+ def output({true, listeners}, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "listeners" => listener_maps(listeners)}}
+ end
+
+ def output({true, listeners}, %{node: node_name}) do
+ ports =
+ listeners
+ |> listener_maps
+ |> Enum.map(fn %{port: p} -> p end)
+ |> Enum.sort()
+ |> Enum.join(", ")
+
+ {:ok, "Successfully connected to ports #{ports} on node #{node_name}."}
+ end
+
+ def output({false, failures}, %{formatter: "json", node: node_name}) do
+ {:error, %{"result" => "error", "node" => node_name, "failures" => listener_maps(failures)}}
+ end
+
+ def output({false, failures}, %{node: node_name}) do
+ lines = [
+ "Connection to ports of the following listeners on node #{node_name} failed: "
+ | listener_lines(failures)
+ ]
+
+ {:error, Enum.join(lines, line_separator())}
+ end
+
+ def description(), do: "Basic TCP connectivity health check for each listener's port on the target node"
+
+ def help_section(), do: :observability_and_health_checks
+
+ def usage, do: "check_port_connectivity"
+
+ def banner([], %{node: node_name}) do
+ "Testing TCP connections to all active listeners on node #{node_name} ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp check_connectivity_of(listeners, node_name, timeout) do
+ # per listener timeout
+ t = Kernel.trunc(timeout / (length(listeners) + 1))
+
+ failures =
+ Enum.reject(
+ listeners,
+ fn l -> check_listener_connectivity(listener_map(l), node_name, t) end
+ )
+
+ case failures do
+ [] -> {true, listeners}
+ fs -> {false, fs}
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex
new file mode 100644
index 0000000000..f321d444db
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_port_listener_command.ex
@@ -0,0 +1,82 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckPortListenerCommand do
+ @moduledoc """
+ Exits with a non-zero code if there is no active listener
+ for the given port on the target node.
+
+ This command is meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Core.Listeners, only: [listeners_on: 2, listener_maps: 1]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositiveIntegerArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([port], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_networking, :active_listeners, [], timeout) do
+ {:error, _} = err ->
+ err
+
+ {:error, _, _} = err ->
+ err
+
+ xs when is_list(xs) ->
+ locals = listeners_on(xs, node_name) |> listener_maps
+
+ found =
+ Enum.any?(locals, fn %{port: p} ->
+ to_string(port) == to_string(p)
+ end)
+
+ case found do
+ true -> {true, port}
+ false -> {false, port, locals}
+ end
+
+ other ->
+ other
+ end
+ end
+
+ def output({true, port}, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "port" => port}}
+ end
+
+ def output({true, port}, %{node: node_name}) do
+ {:ok, "A listener for port #{port} is running on node #{node_name}."}
+ end
+
+ def output({false, port, listeners}, %{formatter: "json"}) do
+ ports = Enum.map(listeners, fn %{port: p} -> p end)
+
+ {:error, :check_failed,
+ %{"result" => "error", "missing" => port, "ports" => ports, "listeners" => listeners}}
+ end
+
+ def output({false, port, listeners}, %{node: node_name}) do
+ ports = Enum.map(listeners, fn %{port: p} -> p end) |> Enum.sort() |> Enum.join(", ")
+
+ {:error, :check_failed,
+ "No listener for port #{port} is active on node #{node_name}. " <>
+ "Found listeners that use the following ports: #{ports}"}
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Health check that exits with a non-zero code if target node does not have an active listener for given port"
+
+ def usage, do: "check_port_listener <port>"
+
+ def banner([port], %{node: node_name}) do
+ "Asking node #{node_name} if there's an active listener on port #{port} ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex
new file mode 100644
index 0000000000..10c81c971e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_protocol_listener_command.ex
@@ -0,0 +1,90 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckProtocolListenerCommand do
+ @moduledoc """
+ Exits with a non-zero code if there is no active listener
+ for the given protocol on the target node.
+
+ This command is meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Core.Listeners,
+ only: [listeners_on: 2, listener_maps: 1, normalize_protocol: 1]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([proto], %{node: node_name, timeout: timeout}) do
+ proto = normalize_protocol(proto)
+
+ case :rabbit_misc.rpc_call(node_name, :rabbit_networking, :active_listeners, [], timeout) do
+ {:error, _} = err ->
+ err
+
+ {:error, _, _} = err ->
+ err
+
+ xs when is_list(xs) ->
+ locals = listeners_on(xs, node_name) |> listener_maps
+
+ found =
+ Enum.any?(locals, fn %{protocol: p} ->
+ to_string(proto) == to_string(p)
+ end)
+
+ case found do
+ true -> {true, proto}
+ false -> {false, proto, locals}
+ end
+
+ other ->
+ other
+ end
+ end
+
+ def output({true, proto}, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "protocol" => proto}}
+ end
+
+ def output({true, proto}, %{node: node_name}) do
+ {:ok, "A listener for protocol #{proto} is running on node #{node_name}."}
+ end
+
+ def output({false, proto, listeners}, %{formatter: "json"}) do
+ protocols = Enum.map(listeners, fn %{protocol: p} -> p end)
+
+ {:error,
+ %{
+ "result" => "error",
+ "missing" => proto,
+ "protocols" => protocols,
+ "listeners" => listeners
+ }}
+ end
+
+ def output({false, proto, listeners}, %{node: node_name}) do
+ protocols = Enum.map(listeners, fn %{protocol: p} -> p end) |> Enum.sort() |> Enum.join(", ")
+
+ {:error,
+ "No listener for protocol #{proto} is active on node #{node_name}. " <>
+ "Found listeners for the following protocols: #{protocols}"}
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Health check that exits with a non-zero code if target node does not have an active listener for given protocol"
+
+ def usage, do: "check_protocol_listener <protocol>"
+
+ def banner([proto], %{node: node_name}) do
+ "Asking node #{node_name} if there's an active listener for protocol #{proto} ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex
new file mode 100644
index 0000000000..690f17e1e7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_running_command.ex
@@ -0,0 +1,46 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckRunningCommand do
+ @moduledoc """
+ Exits with a non-zero code if the RabbitMQ app on the target node is not running.
+
+ This command is meant to be used in health checks.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # Note: we use is_booted/1 over is_running/1 to avoid
+ # returning a positive result when the node is still booting
+ :rabbit_misc.rpc_call(node_name, :rabbit, :is_booted, [node_name], timeout)
+ end
+
+ def output(true, %{node: node_name} = _options) do
+ {:ok, "RabbitMQ on node #{node_name} is fully booted and running"}
+ end
+
+ def output(false, %{node: node_name} = _options) do
+ {:error,
+ "RabbitMQ on node #{node_name} is not running or has not fully booted yet (check with is_booting)"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Health check that exits with a non-zero code if the RabbitMQ app on the target node is not running"
+
+ def usage, do: "check_running"
+
+ def banner([], %{node: node_name}) do
+ "Checking if RabbitMQ is running on node #{node_name} ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex
new file mode 100644
index 0000000000..b3169b522d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/check_virtual_hosts_command.ex
@@ -0,0 +1,71 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CheckVirtualHostsCommand do
+ @moduledoc """
+ Exits with a non-zero code if the target node reports any vhost down.
+
+ This command is meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vhost_sup_sup, :check, [], timeout)
+ end
+
+ def output([], %{formatter: "json"}) do
+ {:ok, %{"result" => "ok"}}
+ end
+
+ def output([], %{silent: true}) do
+ {:ok, :check_passed}
+ end
+
+ def output([], %{formatter: "erlang"}) do
+ {:ok, :check_passed}
+ end
+
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported all vhosts as running"}
+ end
+
+ def output(vhosts, %{formatter: "erlang"} = _opts) when is_list(vhosts) do
+ {:error, :check_failed, {:down_vhosts, vhosts}}
+ end
+
+ def output(vhosts, %{formatter: "json"} = _opts) when is_list(vhosts) do
+ {:error, :check_failed, %{"result" => "error", "down_vhosts" => vhosts}}
+ end
+
+ def output(vhosts, %{silent: true} = _opts) when is_list(vhosts) do
+ {:error, :check_failed}
+ end
+
+ def output(vhosts, %{node: node_name}) when is_list(vhosts) do
+ lines = Enum.join(vhosts, line_separator())
+ {:error, "Some virtual hosts on node #{node_name} are down:\n#{lines}"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def description(), do: "Health check that checks if all vhosts are running in the target node"
+
+ def help_section(), do: :observability_and_health_checks
+
+ def usage, do: "check_virtual_hosts"
+
+ def banner([], %{node: node_name}) do
+ "Checking if all vhosts are running on node #{node_name} ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex
new file mode 100644
index 0000000000..86e8eee3a4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/cipher_suites_command.ex
@@ -0,0 +1,122 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CipherSuitesCommand do
+ alias RabbitMQ.CLI.Core.Helpers
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{all: false, format: "openssl"}, Helpers.case_insensitive_format(opts))}
+ end
+
+ def switches(), do: [timeout: :integer,
+ format: :string,
+ all: :boolean]
+ def aliases(), do: [t: :timeout]
+
+ def validate(_, %{format: format})
+ when format != "openssl" and format != "erlang" and format != "map" do
+ {:validation_failure, {:bad_argument, "Format should be either openssl, erlang or map"}}
+ end
+ def validate(args, _) when length(args) > 0 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(_, _), do: :ok
+
+ def run([], %{node: node_name, timeout: timeout, format: format} = opts) do
+ {mod, function} = case format do
+ "openssl" -> {:rabbit_ssl, :cipher_suites_openssl};
+ "erlang" -> {:rabbit_ssl, :cipher_suites_erlang};
+ "map" -> {:rabbit_ssl, :cipher_suites}
+ end
+ args = case opts do
+ %{all: true} -> [:all];
+ %{} -> [:default]
+ end
+ :rabbit_misc.rpc_call(node_name, mod, function, args, timeout)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([], %{format: "openssl"}), do: "Listing available cipher suites in OpenSSL format"
+ def banner([], %{format: "erlang"}), do: "Listing available cipher suites in Erlang term format"
+ def banner([], %{format: "map"}), do: "Listing available cipher suites in map format"
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists cipher suites enabled by default. To list all available cipher suites, add the --all argument."
+
+ def usage, do: "cipher_suites [--format <openssl | erlang | map>] [--all]"
+
+ def usage_additional() do
+ [
+ ["--format", "output format to use: openssl, erlang or map"],
+ ["--all", "list all available suites"]
+ ]
+ end
+
+ defmodule Formatter do
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(item, %{format: "erlang"}) do
+ to_string(:io_lib.format("~p", [item]))
+ end
+
+ def format_output(item, %{format: "map"}) do
+ to_string(:io_lib.format("~p", [item]))
+ end
+
+ def format_output(item, %{format: "openssl"} = opts) do
+ RabbitMQ.CLI.Formatters.String.format_output(item, opts)
+ end
+
+ def format_stream(stream, %{format: "erlang"} = opts) do
+ comma_separated(stream, opts)
+ end
+
+ def format_stream(stream, %{format: "map"} = opts) do
+ comma_separated(stream, opts)
+ end
+
+ def format_stream(stream, %{format: "openssl"} = opts) do
+ Stream.map(
+ stream,
+ FormatterHelpers.without_errors_1(fn el ->
+ format_output(el, opts)
+ end)
+ )
+ end
+
+ defp comma_separated(stream, opts) do
+ elements =
+ Stream.scan(
+ stream,
+ :empty,
+ FormatterHelpers.without_errors_2(fn element, previous ->
+ separator =
+ case previous do
+ :empty -> ""
+ _ -> ","
+ end
+
+ format_element(element, separator, opts)
+ end)
+ )
+
+ Stream.concat([["["], elements, ["]"]])
+ end
+
+ defp format_element(val, separator, opts) do
+ separator <> format_output(val, opts)
+ end
+ end
+
+ def formatter(), do: Formatter
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex
new file mode 100644
index 0000000000..adbf14cfc3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/command_line_arguments_command.ex
@@ -0,0 +1,41 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.CommandLineArgumentsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:diagnostics]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+
+ def validate(_, %{formatter: "json"}) do
+ {:validation_failure, :unsupported_formatter}
+ end
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :init, :get_arguments, [])
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Erlang
+
+ def usage, do: "command_line_arguments"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.configuration(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Displays target node's command-line arguments and flags as reported by the runtime"
+
+ def banner(_, %{node: node_name}), do: "Command line arguments of node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex
new file mode 100644
index 0000000000..e7ad171d11
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/consume_event_stream_command.ex
@@ -0,0 +1,71 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ConsumeEventStreamCommand do
+ @moduledoc """
+ Displays standard log file location on the target node
+ """
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [duration: :integer, pattern: :string, timeout: :integer]
+ def aliases(), do: [d: :duration, t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{duration: :infinity, pattern: ".*", quiet: true}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout, duration: duration, pattern: pattern}) do
+ pid = self()
+ ref = make_ref()
+ subscribed = :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_event_consumer, :register,
+ [pid, ref, duration, pattern],
+ timeout)
+ case subscribed do
+ {:ok, ^ref} ->
+ Stream.unfold(:confinue,
+ fn(:finished) -> nil
+ (:confinue) ->
+ receive do
+ {^ref, data, :finished} ->
+ {data, :finished};
+ {^ref, data, :confinue} ->
+ {data, :confinue}
+ end
+ end)
+ error -> error
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.JsonStream
+
+ def printer(), do: RabbitMQ.CLI.Printers.StdIORaw
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Streams internal events from a running node. Output is jq-compatible."
+
+ def usage, do: "consume_event_stream [--duration|-d <seconds>] [--pattern <pattern>]"
+
+ def usage_additional() do
+ [
+ ["<duration_in_seconds>", "duration in seconds to stream log. Defaults to infinity"],
+ ["<pattern>", "regular expression to pick events"]
+ ]
+ end
+
+ def banner([], %{node: node_name, duration: :infinity}) do
+ "Streaming logs from node #{node_name} ..."
+ end
+ def banner([], %{node: node_name, duration: duration}) do
+ "Streaming logs from node #{node_name} for #{duration} seconds ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex
new file mode 100644
index 0000000000..df182a0c97
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/disable_auth_attempt_source_tracking_command.ex
@@ -0,0 +1,35 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.DisableAuthAttemptSourceTrackingCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :application, :set_env,
+ [:rabbit, :track_auth_attempt_source, :false])
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "disable_track_auth_attempt_source"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Disables the tracking of peer IP address and username of authentication attempts"
+
+ def banner([], _), do: "Disabling authentication attempt source tracking ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex
new file mode 100644
index 0000000000..b23a13e370
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/discover_peers_command.ex
@@ -0,0 +1,36 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.DiscoverPeersCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_peer_discovery, :discover_cluster_nodes, [], timeout)
+ end
+
+ def output({:ok, {[], _}}, _options) do
+ {:ok, "No peers discovered"}
+ end
+
+ def output({:ok, {nodes, _}}, _options) do
+ {:ok, nodes}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Performs peer discovery and lists discovered nodes, if any"
+
+ def usage, do: "discover_peers"
+
+ def banner(_, _), do: "Discovering peers nodes ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex
new file mode 100644
index 0000000000..832891094b
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/enable_auth_attempt_source_tracking_command.ex
@@ -0,0 +1,36 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.EnableAuthAttemptSourceTrackingCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :application, :set_env,
+ [:rabbit, :track_auth_attempt_source, :true])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "enable_auth_attempt_source_tracking"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Enables the tracking of peer IP address and username of authentication attempts"
+
+ def banner([], _), do: "Enabling authentication attempt source tracking ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex
new file mode 100644
index 0000000000..b6e3186c94
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_hash_command.ex
@@ -0,0 +1,35 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieHashCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_data_coercion.to_binary(
+ :rabbit_misc.rpc_call(node_name, :rabbit_nodes_common, :cookie_hash, [], timeout))
+ end
+
+ def output(result, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok", "value" => result}}
+ end
+ def output(result, _options) when is_bitstring(result) do
+ {:ok, result}
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Displays a hash of the Erlang cookie (shared secret) used by the target node"
+
+ def usage, do: "erlang_cookie_hash"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} its Erlang cookie hash..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex
new file mode 100644
index 0000000000..578ba31c73
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_cookie_sources_command.ex
@@ -0,0 +1,116 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieSourcesCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ import RabbitMQ.CLI.Core.ANSI
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def distribution(_), do: :none
+
+ def run([], opts) do
+ switch_cookie = opts[:erlang_cookie]
+ home_dir = get_home_dir()
+ cookie_file_path = Path.join(home_dir, ".erlang.cookie")
+ cookie_file_stat = case File.stat(Path.join(home_dir, ".erlang.cookie")) do
+ {:error, :enoent} -> nil
+ {:ok, value} -> value
+ end
+ cookie_file_type = case cookie_file_stat do
+ nil -> nil
+ value -> value.type
+ end
+ cookie_file_access = case cookie_file_stat do
+ nil -> nil
+ value -> value.access
+ end
+ cookie_file_size = case cookie_file_stat do
+ nil -> nil
+ value -> value.size
+ end
+
+ %{
+ os_env_cookie_set: System.get_env("RABBITMQ_ERLANG_COOKIE") != nil,
+ os_env_cookie_value_length: String.length(System.get_env("RABBITMQ_ERLANG_COOKIE") || ""),
+ switch_cookie_set: switch_cookie != nil,
+ switch_cookie_value_length: String.length(to_string(switch_cookie) || ""),
+ effective_user: System.get_env("USER"),
+ home_dir: home_dir,
+ cookie_file_path: cookie_file_path,
+ cookie_file_exists: File.exists?(cookie_file_path),
+ cookie_file_type: cookie_file_type,
+ cookie_file_access: cookie_file_access,
+ cookie_file_size: cookie_file_size
+ }
+ end
+
+ def banner([], %{}), do: "Listing Erlang cookie sources used by CLI tools..."
+
+ def output(result, %{formatter: "json"}) do
+ {:ok, result}
+ end
+
+ def output(result, _opts) do
+ cookie_file_lines = [
+ "#{bright("Cookie File")}\n",
+ "Effective user: #{result[:effective_user] || "(none)"}",
+ "Effective home directory: #{result[:home_dir] || "(none)"}",
+ "Cookie file path: #{result[:cookie_file_path]}",
+ "Cookie file exists? #{result[:cookie_file_exists]}",
+ "Cookie file type: #{result[:cookie_file_type] || "(n/a)"}",
+ "Cookie file access: #{result[:cookie_file_access] || "(n/a)"}",
+ "Cookie file size: #{result[:cookie_file_size] || "(n/a)"}",
+ ]
+
+ switch_lines = [
+ "\n#{bright("Cookie CLI Switch")}\n",
+ "--erlang-cookie value set? #{result[:switch_cookie_set]}",
+ "--erlang-cookie value length: #{result[:switch_cookie_value_length] || 0}"
+ ]
+
+ os_env_lines = [
+ "\n#{bright("Env variable ")} #{bright_red("(Deprecated)")}\n",
+ "RABBITMQ_ERLANG_COOKIE value set? #{result[:os_env_cookie_set]}",
+ "RABBITMQ_ERLANG_COOKIE value length: #{result[:os_env_cookie_value_length] || 0}"
+ ]
+
+ lines = cookie_file_lines ++ switch_lines ++ os_env_lines
+
+ {:ok, lines}
+ end
+
+ def help_section(), do: :configuration
+
+ def description() do
+ "Display Erlang cookie source (e.g. $HOME/.erlang.cookie file) information useful for troubleshooting"
+ end
+
+ def usage, do: "erlang_cookie_sources"
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.StringPerLine
+
+ #
+ # Implementation
+ #
+
+ @doc """
+ Computes HOME directory path the same way Erlang VM/ei does,
+ including taking Windows-specific env variables into account.
+ """
+ def get_home_dir() do
+ homedrive = System.get_env("HOMEDRIVE")
+ homepath = System.get_env("HOMEPATH")
+
+ case {homedrive != nil, homepath != nil} do
+ {true, true} -> "#{homedrive}#{homepath}"
+ _ -> System.get_env("HOME")
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex
new file mode 100644
index 0000000000..053e0d142e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/erlang_version_command.ex
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ErlangVersionCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches() do
+ [details: :boolean, offline: :boolean, timeout: :integer]
+ end
+ def aliases(), do: [t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{details: false, offline: false}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{details: details, offline: true}) do
+ case details do
+ true ->
+ :rabbit_data_coercion.to_binary(
+ :rabbit_misc.otp_system_version())
+
+ false ->
+ :rabbit_data_coercion.to_binary(
+ :rabbit_misc.platform_and_version())
+ end
+ end
+ def run([], %{node: node_name, timeout: timeout, details: details}) do
+ case details do
+ true ->
+ :rabbit_data_coercion.to_binary(
+ :rabbit_misc.rpc_call(node_name, :rabbit_misc, :otp_system_version, [], timeout))
+
+ false ->
+ :rabbit_data_coercion.to_binary(
+ :rabbit_misc.rpc_call(node_name, :rabbit_misc, :platform_and_version, [], timeout))
+ end
+ end
+
+ def output(result, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok", "value" => result}}
+ end
+ def output(result, _opts) when is_bitstring(result) do
+ {:ok, result}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Displays Erlang/OTP version on the target node"
+
+ def usage, do: "erlang_version"
+
+ def usage_additional() do
+ [
+ ["--details", "when set, display additional Erlang/OTP system information"],
+ ["--offline", "when set, displays local Erlang/OTP version (that used by CLI tools)"]
+ ]
+ end
+
+ def banner([], %{offline: true}) do
+ "CLI Erlang/OTP version ..."
+ end
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} for its Erlang/OTP version..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex
new file mode 100644
index 0000000000..56b2253c90
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_booting_command.ex
@@ -0,0 +1,53 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.IsBootingCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit, :is_booting, [node_name], timeout)
+ end
+
+ def output(true, %{node: node_name, formatter: "json"}) do
+ m = %{
+ "result" => true,
+ "message" => "RabbitMQ on node #{node_name} is booting"
+ }
+ {:ok, m}
+ end
+
+ def output(false, %{node: node_name, formatter: "json"}) do
+ m = %{
+ "result" => false,
+ "message" => "RabbitMQ on node #{node_name} is fully booted (check with is_running), stopped or has not started booting yet"
+ }
+ {:ok, m}
+ end
+ def output(true, %{node: node_name}) do
+ {:ok, "RabbitMQ on node #{node_name} is booting"}
+ end
+
+ def output(false, %{node: node_name}) do
+ {:ok,
+ "RabbitMQ on node #{node_name} is fully booted (check with is_running), stopped or has not started booting yet"}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Checks if RabbitMQ is still booting on the target node"
+
+ def usage, do: "is_booting"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} for its boot status ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex
new file mode 100644
index 0000000000..ecf5ce9368
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/is_running_command.ex
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.IsRunningCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # Note: we use is_booted/1 over is_running/1 to avoid
+ # returning a positive result when the node is still booting
+ :rabbit_misc.rpc_call(node_name, :rabbit, :is_booted, [node_name], timeout)
+ end
+
+ def output(true, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => true, "message" => "RabbitMQ on node #{node_name} is fully booted and running"}}
+ end
+ def output(false, %{node: node_name, formatter: "json"}) do
+ {:ok,
+ %{"result" => false, "message" => "RabbitMQ on node #{node_name} is not running or has not fully booted yet (check with is_booting)"}}
+ end
+ def output(true, %{node: node_name}) do
+ {:ok, "RabbitMQ on node #{node_name} is fully booted and running"}
+ end
+ def output(false, %{node: node_name}) do
+ {:ok,
+ "RabbitMQ on node #{node_name} is not running or has not fully booted yet (check with is_booting)"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Checks if RabbitMQ is fully booted and running on the target node"
+
+ def usage, do: "is_running"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} for its status ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex
new file mode 100644
index 0000000000..d41409b8c4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_network_interfaces_command.ex
@@ -0,0 +1,77 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ListNetworkInterfacesCommand do
+ @moduledoc """
+ Displays all network interfaces (NICs) reported by the target node.
+ """
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+ import RabbitMQ.CLI.Core.ANSI
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [timeout: :integer, offline: :boolean]
+ def aliases(), do: [t: :timeout]
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{offline: true}) do
+ :rabbit_net.getifaddrs()
+ end
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_net, :getifaddrs, [], timeout)
+ end
+
+ def output(nic_map, %{node: node_name, formatter: "json"}) when map_size(nic_map) == 0 do
+ {:ok, %{"result" => "ok", "node" => node_name, "interfaces" => %{}}}
+ end
+ def output(nic_map, %{node: node_name}) when map_size(nic_map) == 0 do
+ {:ok, "Node #{node_name} reported no network interfaces"}
+ end
+ def output(nic_map0, %{node: node_name, formatter: "json"}) do
+ nic_map = Enum.map(nic_map0, fn ({k, v}) -> {to_string(k), v} end)
+ {:ok,
+ %{
+ "result" => "ok",
+ "interfaces" => Enum.into(nic_map, %{}),
+ "message" => "Node #{node_name} reported network interfaces"
+ }}
+ end
+ def output(nic_map, _) when is_map(nic_map) do
+ lines = nic_lines(nic_map)
+
+ {:ok, Enum.join(lines, line_separator())}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists network interfaces (NICs) on the target node"
+
+ def usage, do: "list_network_interfaces"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} to report its network interfaces ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp nic_lines(nic_map) do
+ Enum.reduce(nic_map, [],
+ fn({iface, props}, acc) ->
+ iface_lines = Enum.reduce(props, [],
+ fn({prop, val}, inner_acc) ->
+ ["#{prop}: #{val}" | inner_acc]
+ end)
+
+ header = "#{bright("Interface #{iface}")}\n"
+ acc ++ [header | iface_lines] ++ ["\n"]
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex
new file mode 100644
index 0000000000..4793cf6c46
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/list_node_auth_attempt_stats_command.ex
@@ -0,0 +1,75 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ListNodeAuthAttemptStatsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def switches(), do: [by_source: :boolean]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{by_source: false}, opts)}
+ end
+
+ def validate([], _), do: :ok
+ def validate(_, _), do: {:validation_failure, :too_many_args}
+
+ def run([], %{node: node_name, timeout: timeout, by_source: by_source}) do
+ case by_source do
+ :true ->
+ :rabbit_misc.rpc_call(
+ node_name, :rabbit_core_metrics, :get_auth_attempts_by_source, [], timeout)
+ :false ->
+ :rabbit_misc.rpc_call(
+ node_name, :rabbit_core_metrics, :get_auth_attempts, [], timeout)
+ end
+ end
+
+ def output([], %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "attempts" => []}}
+ end
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no authentication attempt stats"}
+ end
+ def output(rows, %{node: node_name, formatter: "json"}) do
+ maps = Enum.map(rows, &Map.new/1)
+ {:ok,
+ %{
+ "result" => "ok",
+ "node" => node_name,
+ "attempts" => maps
+ }}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "list_node_auth_attempts [--by-source]"
+
+ def usage_additional do
+ [
+ ["--by-source", "list authentication attempts by remote address and username"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+ def description(), do: "Lists authentication attempts on the target node"
+
+ def banner([], %{node: node_name}), do: "Listing authentication
+ attempts for node \"#{node_name}\" ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex
new file mode 100644
index 0000000000..f54ce3775e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/listeners_command.ex
@@ -0,0 +1,92 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ListenersCommand do
+ @moduledoc """
+ Displays all listeners on a node.
+
+ Returns a code of 0 unless there were connectivity and authentication
+ errors. This command is not meant to be used in health checks.
+ """
+
+ import RabbitMQ.CLI.Core.Listeners,
+ only: [listeners_on: 2, listener_lines: 1, listener_maps: 1, listener_rows: 1]
+
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ # Example listener list:
+ #
+ # [{listener,rabbit@warp10,clustering,"localhost",
+ # {0,0,0,0,0,0,0,0},
+ # 25672,[]},
+ # {listener,rabbit@warp10,amqp,"localhost",
+ # {0,0,0,0,0,0,0,0},
+ # 5672,
+ # [{backlog,128},
+ # {nodelay,true},
+ # {linger,{true,0}},
+ # {exit_on_close,false}]},
+ # {listener,rabbit@warp10,stomp,"localhost",
+ # {0,0,0,0,0,0,0,0},
+ # 61613,
+ # [{backlog,128},{nodelay,true}]}]
+ case :rabbit_misc.rpc_call(node_name, :rabbit_networking, :active_listeners, [], timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ xs when is_list(xs) -> listeners_on(xs, node_name)
+ other -> other
+ end
+ end
+
+ def output([], %{formatter: fmt}) when fmt == "csv" or fmt == "erlang" do
+ {:ok, []}
+ end
+
+ def output([], %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "listeners" => []}}
+ end
+
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no enabled listeners."}
+ end
+
+ def output(listeners, %{formatter: "erlang"}) do
+ {:ok, listener_rows(listeners)}
+ end
+
+ def output(listeners, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "listeners" => listener_maps(listeners)}}
+ end
+
+ def output(listeners, %{formatter: "csv"}) do
+ {:stream, [listener_rows(listeners)]}
+ end
+
+ def output(listeners, _opts) do
+ lines = listener_lines(listeners)
+
+ {:ok, Enum.join(lines, line_separator())}
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(),
+ do: "Lists active connection listeners (bound interface, port, protocol) on the target node"
+
+ def usage, do: "listeners"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} to report its protocol listeners ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex
new file mode 100644
index 0000000000..36ff562b41
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_location_command.ex
@@ -0,0 +1,56 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.LogLocationCommand do
+ @moduledoc """
+ Displays standard log file location on the target node
+ """
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.LogFiles
+
+ def switches, do: [all: :boolean, timeout: :integer]
+ def aliases, do: [a: :all, t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{all: false}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout, all: all}) do
+ case all do
+ true -> LogFiles.get_log_locations(node_name, timeout);
+ false -> LogFiles.get_default_log_location(node_name, timeout)
+ end
+ end
+
+ def output({:ok, location}, %{node: node_name, formatter: "json"}) do
+ {:ok, %{
+ "result" => "ok",
+ "node_name" => node_name,
+ "paths" => [location]
+ }}
+ end
+ def output(locations, %{node: node_name, formatter: "json"}) do
+ {:ok, %{
+ "result" => "ok",
+ "node_name" => node_name,
+ "paths" => locations
+ }}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Shows log file location(s) on target node"
+
+ def usage, do: "log_location [--all|-a]"
+
+ def banner([], %{node: node_name}) do
+ "Log file location(s) on node #{node_name} ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex
new file mode 100644
index 0000000000..9717908f60
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_command.ex
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.LogTailCommand do
+ @moduledoc """
+ Displays standard log file location on the target node
+ """
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.LogFiles
+
+ def switches, do: [number: :integer, timeout: :integer]
+ def aliases, do: ['N': :number, t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{number: 50}, opts)}
+ end
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout, number: n}) do
+ case LogFiles.get_default_log_location(node_name, timeout) do
+ {:ok, file} ->
+ :rabbit_misc.rpc_call(node_name,
+ :rabbit_log_tail, :tail_n_lines, [file, n],
+ timeout)
+ error -> error
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Prints the last N lines of the log on the node"
+
+ def usage, do: "log_tail [--number|-N <number>]"
+
+ def usage_additional do
+ [
+ ["<number>", "number of lines to print. Defaults to 50"]
+ ]
+ end
+
+ def banner([], %{node: node_name, number: n}) do
+ "Last #{n} log lines on node #{node_name} ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex
new file mode 100644
index 0000000000..5080fd0d1d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/log_tail_stream_command.ex
@@ -0,0 +1,73 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.LogTailStreamCommand do
+ @moduledoc """
+ Displays standard log file location on the target node
+ """
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.LogFiles
+
+
+ def switches(), do: [duration: :integer, timeout: :integer]
+ def aliases(), do: [d: :duration, t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{duration: :infinity}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def printer(), do: RabbitMQ.CLI.Printers.StdIORaw
+
+ def run([], %{node: node_name, timeout: timeout, duration: duration}) do
+ case LogFiles.get_default_log_location(node_name, timeout) do
+ {:ok, file} ->
+ pid = self()
+ ref = make_ref()
+ subscribed = :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_log_tail, :init_tail_stream,
+ [file, pid, ref, duration],
+ timeout)
+ case subscribed do
+ {:ok, ^ref} ->
+ Stream.unfold(:confinue,
+ fn(:finished) -> nil
+ (:confinue) ->
+ receive do
+ {^ref, data, :finished} -> {data, :finished};
+ {^ref, data, :confinue} -> {data, :confinue}
+ end
+ end)
+ error -> error
+ end
+ error -> error
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Streams logs from a running node for a period of time"
+
+ def usage, do: "log_tail_stream [--duration|-d <seconds>]"
+
+ def usage_additional() do
+ [
+ ["<duration_in_seconds>", "duration in seconds to stream log. Defaults to infinity"]
+ ]
+ end
+
+ def banner([], %{node: node_name, duration: :infinity}) do
+ "Streaming logs from node #{node_name} ..."
+ end
+ def banner([], %{node: node_name, duration: duration}) do
+ "Streaming logs from node #{node_name} for #{duration} seconds ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex
new file mode 100644
index 0000000000..c241780f62
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/maybe_stuck_command.ex
@@ -0,0 +1,29 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.MaybeStuckCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_diagnostics, :maybe_stuck, [], timeout)
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Detects Erlang processes (\"lightweight threads\") potentially not making progress on the target node"
+
+ def usage, do: "maybe_stuck"
+
+ def banner(_, %{node: node_name}) do
+ "Asking node #{node_name} to detect potentially stuck Erlang processes..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex
new file mode 100644
index 0000000000..356358b7d7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/memory_breakdown_command.ex
@@ -0,0 +1,103 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.MemoryBreakdownCommand do
+ alias RabbitMQ.CLI.InformationUnit, as: IU
+ import RabbitMQ.CLI.Core.Memory
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [unit: :string, timeout: :integer]
+ def aliases(), do: [t: :timeout]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{unit: "gb"}, opts)}
+ end
+
+ def validate(args, _) when length(args) > 0 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(_, %{unit: unit}) do
+ case IU.known_unit?(unit) do
+ true ->
+ :ok
+
+ false ->
+ {:validation_failure, "unit '#{unit}' is not supported. Please use one of: bytes, mb, gb"}
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_vm, :memory, [], timeout)
+ end
+
+ def output(result, %{formatter: "json"} = _opts) do
+ {:ok, compute_relative_values(result)}
+ end
+
+ def output(result, %{formatter: "csv"} = _opts) do
+ flattened =
+ compute_relative_values(result)
+ |> Enum.flat_map(fn {k, %{bytes: b, percentage: p}} ->
+ [{"#{k}.bytes", b}, {"#{k}.percentage", p}]
+ end)
+ |> Enum.sort_by(fn {key, _val} -> key end, &>=/2)
+
+ headers = Enum.map(flattened, fn {k, _v} -> k end)
+ values = Enum.map(flattened, fn {_k, v} -> v end)
+
+ {:stream, [headers, values]}
+ end
+
+ def output(result, _opts) do
+ {:ok, compute_relative_values(result)}
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Provides a memory usage breakdown on the target node."
+
+ def usage, do: "memory_breakdown [--unit <unit>]"
+
+ def usage_additional() do
+ [
+ ["--unit <bytes | mb | gb>", "byte multiple (bytes, megabytes, gigabytes) to use"],
+ ["--formatter <json | csv | erlang>", "alternative formatter to use, JSON, CSV or Erlang terms"]
+ ]
+ end
+
+ def banner([], %{node: node_name}) do
+ "Reporting memory breakdown on node #{node_name}..."
+ end
+
+ defmodule Formatter do
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+ alias RabbitMQ.CLI.InformationUnit, as: IU
+
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, %{unit: unit}) do
+ Enum.reduce(output, "", fn {key, %{bytes: bytes, percentage: percentage}}, acc ->
+ u = String.downcase(unit)
+ acc <> "#{key}: #{IU.convert(bytes, u)} #{u} (#{percentage}%)\n"
+ end)
+ end
+
+ def format_stream(stream, options) do
+ Stream.map(
+ stream,
+ FormatterHelpers.without_errors_1(fn el ->
+ format_output(el, options)
+ end)
+ )
+ end
+ end
+
+ def formatter(), do: Formatter
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex
new file mode 100644
index 0000000000..717e23e6b5
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/observer_command.ex
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ObserverCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def switches(), do: [interval: :integer]
+ def aliases(), do: [i: :interval]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{interval: 5}, opts)}
+ end
+
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, interval: interval}) do
+ case :observer_cli.start(node_name, [{:interval, interval * 1000}]) do
+ # See zhongwencool/observer_cli#54
+ {:badrpc, _} = err -> err
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ :ok -> {:ok, "Disconnected from #{node_name}."}
+ :quit -> {:ok, "Disconnected from #{node_name}."}
+ other -> other
+ end
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Starts a CLI observer interface on the target node"
+
+ def usage, do: "observer [--interval <seconds>]"
+
+ def usage_additional() do
+ [
+ ["--interval <seconds>", "Update interval to use, in seconds"]
+ ]
+ end
+
+ def banner(_, %{node: node_name}) do
+ "Starting a CLI observer interface on node #{node_name}..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex
new file mode 100644
index 0000000000..63e8c18beb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/os_env_command.ex
@@ -0,0 +1,67 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.OsEnvCommand do
+ @moduledoc """
+ Lists RabbitMQ-specific environment variables defined on target node
+ """
+
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_env, :get_used_env_vars, [], timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ xs when is_list(xs) ->
+ # convert keys and values to binaries (Elixir strings)
+ xs
+ |> Enum.map(fn {k, v} -> {:rabbit_data_coercion.to_binary(k), :rabbit_data_coercion.to_binary(v)} end)
+ |> :maps.from_list
+ other -> other
+ end
+ end
+
+ def output([], %{formatter: fmt}) when fmt == "csv" or fmt == "erlang" do
+ {:ok, []}
+ end
+ def output([], %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "variables" => []}}
+ end
+ def output([], %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no relevant environment variables."}
+ end
+ def output(vars, %{node: node_name, formatter: "json"}) do
+ {:ok, %{"result" => "ok", "node" => node_name, "variables" => vars}}
+ end
+ def output(vars, %{formatter: "csv"}) do
+ {:stream, [Enum.map(vars, fn({k, v}) -> [variable: k, value: v] end)]}
+ end
+ def output(vars, _opts) do
+ lines = Enum.map(vars, fn({k, v}) -> "#{k}=#{v}" end) |> Enum.join(line_separator())
+ {:ok, lines}
+ end
+
+ def usage() do
+ "os_env"
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Lists RabbitMQ-specific environment variables set on target node"
+
+ def banner(_, %{node: node_name}) do
+ "Listing RabbitMQ-specific environment variables defined on node #{node_name}..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex
new file mode 100644
index 0000000000..e3b08c2ac8
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/reset_node_auth_attempt_metrics_command.ex
@@ -0,0 +1,37 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ResetNodeAuthAttemptMetricsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_core_metrics, :reset_auth_attempt_metrics, [])
+ end
+
+ def usage, do: "reset_node_auth_attempt_metrics"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.access_control(),
+ DocGuide.monitoring()
+ ]
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Resets auth attempt metrics on the target node"
+
+ def banner([], %{node: node_name}) do
+ "Reset auth attempt metrics on node #{node_name} ..."
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex
new file mode 100644
index 0000000000..349dbee513
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolve_hostname_command.ex
@@ -0,0 +1,94 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ResolveHostnameCommand do
+ @moduledoc """
+ Resolves a hostname to one or more addresses of a given IP address family (IPv4 ot IPv6).
+ This command is not meant to compete with `dig` but rather provide a way
+ to perform basic resolution tests that take Erlang's inetrc file into account.
+ """
+
+ import RabbitCommon.Records
+ alias RabbitMQ.CLI.Core.Networking
+ alias RabbitMQ.CLI.Core.ExitCodes
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:diagnostics]
+
+ def switches(), do: [address_family: :string, offline: :boolean]
+ def aliases(), do: [a: :address_family]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{address_family: "IPv4", offline: false}, opts)}
+ end
+
+ def validate(args, _) when length(args) < 1, do: {:validation_failure, :not_enough_args}
+ def validate(args, _) when length(args) > 1, do: {:validation_failure, :too_many_args}
+ def validate([_], %{address_family: family}) do
+ case Networking.valid_address_family?(family) do
+ true -> :ok
+ false -> {:validation_failure, {:bad_argument, "unsupported IP address family #{family}. Valid values are: ipv4, ipv6"}}
+ end
+ end
+ def validate([_], _), do: :ok
+
+ def run([hostname], %{address_family: family, offline: true}) do
+ :inet.gethostbyname(to_charlist(hostname), Networking.address_family(family))
+ end
+ def run([hostname], %{node: node_name, address_family: family, offline: false, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :inet, :gethostbyname,
+ [to_charlist(hostname), Networking.address_family(family)], timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ {:ok, result} -> {:ok, result}
+ other -> other
+ end
+ end
+
+ def output({:error, :nxdomain}, %{node: node_name, formatter: "json"}) do
+ m = %{
+ "result" => "error",
+ "node" => node_name,
+ "message" => "Hostname does not resolve (resolution failed with an nxdomain)"
+ }
+ {:error, ExitCodes.exit_dataerr(), m}
+ end
+ def output({:error, :nxdomain}, _opts) do
+ {:error, ExitCodes.exit_dataerr(), "Hostname does not resolve (resolution failed with an nxdomain)"}
+ end
+ def output({:ok, result}, %{node: node_name, address_family: family, formatter: "json"}) do
+ hostname = hostent(result, :h_name)
+ addresses = hostent(result, :h_addr_list)
+ {:ok, %{
+ "result" => "ok",
+ "node" => node_name,
+ "hostname" => to_string(hostname),
+ "address_family" => family,
+ "addresses" => Networking.format_addresses(addresses)
+ }}
+ end
+ def output({:ok, result}, _opts) do
+ addresses = hostent(result, :h_addr_list)
+ {:ok, Enum.join(Networking.format_addresses(addresses), "\n")}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "resolve_hostname <hostname> [--address-family <ipv4 | ipv6>]"
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Resolves a hostname to a set of addresses. Takes Erlang's inetrc file into account."
+
+ def banner([hostname], %{offline: false, node: node_name, address_family: family}) do
+ "Asking node #{node_name} to resolve hostname #{hostname} to #{family} addresses..."
+ end
+ def banner([hostname], %{offline: true, address_family: family}) do
+ "Resolving hostname #{hostname} to #{family} addresses..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex
new file mode 100644
index 0000000000..a4f3d8d7d3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/resolver_info_command.ex
@@ -0,0 +1,84 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ResolverInfoCommand do
+ @moduledoc """
+ Displays effective hostname resolver (inetrc) configuration on target node
+ """
+
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+ import RabbitMQ.CLI.Core.ANSI, only: [bright: 1]
+ alias RabbitMQ.CLI.Core.Networking
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def scopes(), do: [:diagnostics]
+
+ def switches(), do: [offline: :boolean]
+ def aliases(), do: []
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{offline: false}, opts)}
+ end
+
+ def validate(args, _) when length(args) > 0, do: {:validation_failure, :too_many_args}
+ def validate([], _), do: :ok
+
+ def run([], %{offline: true}) do
+ Networking.inetrc_map(:inet.get_rc())
+ end
+ def run([], %{node: node_name, timeout: timeout, offline: false}) do
+ case :rabbit_misc.rpc_call(node_name, :inet, :get_rc, [], timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ xs when is_list(xs) -> Networking.inetrc_map(xs)
+ other -> other
+ end
+ end
+
+ def output(info, %{node: node_name, formatter: "json"}) do
+ {:ok, %{
+ "result" => "ok",
+ "node" => node_name,
+ "resolver" => info
+ }}
+ end
+ def output(info, _opts) do
+ main_section = [
+ "#{bright("Runtime Hostname Resolver (inetrc) Settings")}\n",
+ "Lookup order: #{info["lookup"]}",
+ "Hosts file: #{info["hosts_file"]}",
+ "Resolver conf file: #{info["resolv_conf"]}",
+ "Cache size: #{info["cache_size"]}"
+ ]
+ hosts_section = [
+ "\n#{bright("inetrc File Host Entries")}\n"
+ ] ++ case info["hosts"] do
+ [] -> ["(none)"]
+ nil -> ["(none)"]
+ hs -> Enum.reduce(hs, [], fn {k, v}, acc -> ["#{k} #{Enum.join(v, ", ")}" | acc] end)
+ end
+
+ lines = main_section ++ hosts_section
+
+ {:ok, Enum.join(lines, line_separator())}
+ end
+
+ def usage() do
+ "resolver_info"
+ end
+
+ def help_section(), do: :configuration
+
+ def description(), do: "Displays effective hostname resolver (inetrc) configuration on target node"
+
+ def banner(_, %{node: node_name, offline: false}) do
+ "Asking node #{node_name} for its effective hostname resolver (inetrc) configuration..."
+ end
+ def banner(_, %{offline: true}) do
+ "Displaying effective hostname resolver (inetrc) configuration used by CLI tools..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex
new file mode 100644
index 0000000000..ee5bb56566
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/runtime_thread_stats_command.ex
@@ -0,0 +1,70 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.RuntimeThreadStatsCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches(), do: [sample_interval: :integer]
+ def aliases(), do: [i: :sample_interval]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{sample_interval: 5}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout, sample_interval: interval}) do
+ case :rabbit_misc.rpc_call(
+ node_name,
+ :rabbit_runtime,
+ :msacc_stats,
+ [interval * 1000],
+ timeout
+ ) do
+ {:ok, stats} -> stats
+ other -> other
+ end
+ end
+
+ def output(result, %{formatter: "json"}) when is_list(result) do
+ {:error, "JSON formatter is not supported by this command"}
+ end
+
+ def output(result, %{formatter: "csv"}) when is_list(result) do
+ {:error, "CSV formatter is not supported by this command"}
+ end
+
+ def output(result, _options) when is_list(result) do
+ {:ok, result}
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Provides a breakdown of runtime thread activity stats on the target node"
+
+ def usage, do: "runtime_thread_stats [--sample-interval <interval>]"
+
+ def usage_additional() do
+ [
+ ["--sample-interval <seconds>", "sampling interval to use in seconds"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.runtime_tuning()
+ ]
+ end
+
+ def banner([], %{node: node_name, sample_interval: interval}) do
+ "Will collect runtime thread stats on #{node_name} for #{interval} seconds..."
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Msacc
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex
new file mode 100644
index 0000000000..50b750c772
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/schema_info_command.ex
@@ -0,0 +1,73 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.SchemaInfoCommand do
+ @moduledoc """
+ Lists all tables on the mnesia schema
+ """
+
+ alias RabbitMQ.CLI.Ctl.InfoKeys
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @info_keys ~w(name snmp load_order active_replicas all_nodes attributes checkpoints disc_copies
+ disc_only_copies external_copies frag_properties master_nodes ram_copies
+ storage_properties subscribers user_properties cstruct local_content
+ where_to_commit where_to_read name access_mode cookie load_by_force
+ load_node record_name size storage_type type where_to_write index arity
+ majority memory commit_work where_to_wlock load_reason record_validation
+ version wild_pattern index_info)a
+
+ def info_keys(), do: @info_keys
+
+ def scopes(), do: [:ctl, :diagnostics]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def merge_defaults([], opts) do
+ merge_defaults(
+ ~w(name cookie active_replicas user_properties),
+ opts
+ )
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{table_headers: true}, opts)}
+ end
+
+ def validate(args, _) do
+ case InfoKeys.validate_info_keys(args, @info_keys) do
+ {:ok, _} -> :ok
+ err -> err
+ end
+ end
+
+ def run([_ | _] = args, %{node: node_name, timeout: timeout}) do
+ info_keys = InfoKeys.prepare_info_keys(args)
+ :rabbit_misc.rpc_call(node_name, :rabbit_mnesia, :schema_info, [info_keys], timeout)
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage() do
+ "schema_info [--no-table-headers] [<column> ...]"
+ end
+
+ def usage_additional() do
+ [
+ ["<column>", "must be one of " <> Enum.join(Enum.sort(@info_keys), ", ")]
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists schema database tables and their properties"
+
+ def banner(_, %{node: node_name}), do: "Asking node #{node_name} to report its schema..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex
new file mode 100644
index 0000000000..9f4068e459
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/server_version_command.ex
@@ -0,0 +1,37 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.ServerVersionCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ :rabbit_data_coercion.to_binary(
+ :rabbit_misc.rpc_call(node_name, :rabbit_misc, :version, [], timeout))
+ end
+
+ def output(result, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok", "value" => result}}
+ end
+ def output(result, _options) when is_bitstring(result) do
+ {:ok, result}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Displays server version on the target node"
+
+ def usage, do: "server_version"
+
+ def banner([], %{node: node_name}) do
+ "Asking node #{node_name} for its RabbitMQ version..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex
new file mode 100644
index 0000000000..2f81bad889
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/tls_versions_command.ex
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Commands.TlsVersionsCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout} = _opts) do
+ :rabbit_misc.rpc_call(node_name, :ssl, :versions, [], timeout)
+ end
+
+ def banner([], %{}), do: "Listing all TLS versions supported by the runtime..."
+
+ def output(result, %{formatter: "json"}) do
+ vs = Map.new(result) |> Map.get(:available)
+
+ {:ok, %{versions: vs}}
+ end
+
+ def output(result, _opts) do
+ vs = Map.new(result) |> Map.get(:available)
+ {:ok, vs}
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Lists TLS versions supported (but not necessarily allowed) on the target node"
+
+ def usage, do: "tls_versions"
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.StringPerLine
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex
new file mode 100644
index 0000000000..601cc842cb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/diagnostics_helpers.ex
@@ -0,0 +1,38 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Diagnostics.Helpers do
+ def test_connection(hostname, port, timeout) do
+ case :gen_tcp.connect(hostname, port, [], timeout) do
+ {:error, _} -> :gen_tcp.connect(hostname, port, [:inet6], timeout)
+ r -> r
+ end
+ end
+
+ def check_port_connectivity(port, node_name, timeout) do
+ regex = Regex.recompile!(~r/^(.+)@/)
+ hostname = Regex.replace(regex, to_string(node_name), "") |> to_charlist
+ try do
+ case test_connection(hostname, port, timeout) do
+ {:error, _} ->
+ false
+
+ {:ok, port} ->
+ :ok = :gen_tcp.close(port)
+ true
+ end
+
+ # `gen_tcp:connect/4` will throw if the port is outside of its
+ # expected domain
+ catch
+ :exit, _ -> false
+ end
+ end
+
+ def check_listener_connectivity(%{port: port}, node_name, timeout) do
+ check_port_connectivity(port, node_name, timeout)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex
new file mode 100644
index 0000000000..498ba114b9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatter_behaviour.ex
@@ -0,0 +1,42 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Formats returned values e.g. to human-readable text or JSON.
+defmodule RabbitMQ.CLI.FormatterBehaviour do
+ alias RabbitMQ.CLI.Core.Helpers
+
+ @callback format_output(any, map()) :: String.t() | [String.t()]
+ @callback format_stream(Enumerable.t(), map()) :: Enumerable.t()
+
+ @optional_callbacks switches: 0,
+ aliases: 0
+
+ @callback switches() :: Keyword.t()
+ @callback aliases() :: Keyword.t()
+
+ def switches(formatter) do
+ Helpers.apply_if_exported(formatter, :switches, [], [])
+ end
+
+ def aliases(formatter) do
+ Helpers.apply_if_exported(formatter, :aliases, [], [])
+ end
+
+ def module_name(nil) do
+ nil
+ end
+ def module_name(formatter) do
+ mod = formatter |> String.downcase |> Macro.camelize
+ Module.safe_concat("RabbitMQ.CLI.Formatters", mod)
+ end
+
+ def machine_readable?(nil) do
+ false
+ end
+ def machine_readable?(formatter) do
+ Helpers.apply_if_exported(module_name(formatter), :machine_readable?, [], false)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex
new file mode 100644
index 0000000000..ab9acd613f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/csv.ex
@@ -0,0 +1,127 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+defmodule RabbitMQ.CLI.Formatters.Csv do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_stream(stream, _) do
+ ## Flatten list_consumers
+ Stream.flat_map(
+ stream,
+ fn
+ [first | _] = element ->
+ case FormatterHelpers.proplist?(first) or is_map(first) do
+ true -> element
+ false -> [element]
+ end
+
+ other ->
+ [other]
+ end
+ )
+ ## Add info_items names
+ |> Stream.transform(
+ :init,
+ FormatterHelpers.without_errors_2(fn
+ element, :init ->
+ {
+ case keys(element) do
+ nil -> [values(element)]
+ ks -> [ks, values(element)]
+ end,
+ :next
+ }
+
+ element, :next ->
+ {[values(element)], :next}
+ end)
+ )
+ |> CSV.encode(delimiter: "")
+ end
+
+ def format_output(output, _) do
+ case keys(output) do
+ nil -> [values(output)]
+ ks -> [ks, values(output)]
+ end
+ |> CSV.encode()
+ |> Enum.join()
+ end
+
+ def machine_readable?, do: true
+
+ #
+ # Implementation
+ #
+
+ defp keys(map) when is_map(map) do
+ Map.keys(map)
+ end
+
+ defp keys(list) when is_list(list) do
+ case FormatterHelpers.proplist?(list) do
+ true -> Keyword.keys(list)
+ false -> nil
+ end
+ end
+
+ defp keys(_other) do
+ nil
+ end
+
+ defp values(map) when is_map(map) do
+ Map.values(map)
+ end
+
+ defp values([]) do
+ []
+ end
+
+ defp values(list) when is_list(list) do
+ case FormatterHelpers.proplist?(list) do
+ true -> Keyword.values(list)
+ false -> list
+ end
+ end
+
+ defp values(other) do
+ other
+ end
+end
+
+defimpl CSV.Encode, for: PID do
+ def encode(pid, env \\ []) do
+ FormatterHelpers.format_info_item(pid)
+ |> to_string
+ |> CSV.Encode.encode(env)
+ end
+end
+
+defimpl CSV.Encode, for: List do
+ def encode(list, env \\ []) do
+ FormatterHelpers.format_info_item(list)
+ |> to_string
+ |> CSV.Encode.encode(env)
+ end
+end
+
+defimpl CSV.Encode, for: Tuple do
+ def encode(tuple, env \\ []) do
+ FormatterHelpers.format_info_item(tuple)
+ |> to_string
+ |> CSV.Encode.encode(env)
+ end
+end
+
+defimpl CSV.Encode, for: Map do
+ def encode(map, env \\ []) do
+ FormatterHelpers.format_info_item(map)
+ |> to_string
+ |> CSV.Encode.encode(env)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex
new file mode 100644
index 0000000000..0a8a78249f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/erlang.ex
@@ -0,0 +1,18 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Formatters.Erlang do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, _) do
+ :io_lib.format("~p", [output])
+ |> to_string
+ end
+
+ def format_stream(stream, options) do
+ [format_output(Enum.to_list(stream), options)]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex
new file mode 100644
index 0000000000..2ec4edc3d9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/formatter_helpers.ex
@@ -0,0 +1,182 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Formatters.FormatterHelpers do
+ import RabbitCommon.Records
+ use Bitwise
+
+ @type error :: {:error, term()} | {:error, integer(), String.t() | [String.t()]}
+
+ @spec without_errors_1((el -> result)) :: error() | result when el: term(), result: term()
+ def without_errors_1(fun) do
+ fn
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ other -> fun.(other)
+ end
+ end
+
+ @spec without_errors_1((el, acc -> result)) :: error() | result
+ when el: term(), result: term(), acc: term()
+ def without_errors_2(fun) do
+ fn
+ {:error, _} = err, _acc -> err
+ {:error, _, _} = err, _acc -> err
+ other, acc -> fun.(other, acc)
+ end
+ end
+
+ def proplist?([{_key, _value} | rest]), do: proplist?(rest)
+ def proplist?([]), do: true
+ def proplist?(_other), do: false
+
+
+ defmacro is_u8(x) do
+ quote do
+ unquote(x) >= 0 and unquote(x) <= 255
+ end
+ end
+
+ defmacro is_u16(x) do
+ quote do
+ unquote(x) >= 0 and unquote(x) <= 65_535
+ end
+ end
+
+ def format_info_item(item, escaped \\ true)
+
+ def format_info_item(map, escaped) when is_map(map) do
+ [
+ "\#\{",
+ Enum.map(
+ map,
+ fn {k, v} ->
+ ["#{escape(k, escaped)} => ", format_info_item(v, escaped)]
+ end
+ )
+ |> Enum.join(", "),
+ "}"
+ ]
+ end
+
+ # when Record.is_record(res, :resource) do
+ def format_info_item(resource(name: name), escaped) do
+ # resource(name: name) = res
+ escape(name, escaped)
+ end
+
+ def format_info_item({n1, n2, n3, n4} = value, _escaped)
+ when is_u8(n1) and is_u8(n2) and is_u8(n3) and is_u8(n4) do
+ :rabbit_misc.ntoa(value)
+ end
+
+ def format_info_item({k1, k2, k3, k4, k5, k6, k7, k8} = value, _escaped)
+ when is_u16(k1) and is_u16(k2) and is_u16(k3) and is_u16(k4) and
+ is_u16(k5) and is_u16(k6) and is_u16(k7) and is_u16(k8) do
+ :rabbit_misc.ntoa(value)
+ end
+
+ def format_info_item(value, _escaped) when is_pid(value) do
+ :rabbit_misc.pid_to_string(value)
+ end
+
+ def format_info_item(value, escaped) when is_binary(value) do
+ escape(value, escaped)
+ end
+
+ def format_info_item(value, escaped) when is_atom(value) do
+ escape(to_charlist(value), escaped)
+ end
+
+ def format_info_item(
+ [{key, type, _table_entry_value} | _] = value,
+ escaped
+ )
+ when is_binary(key) and
+ is_atom(type) do
+ :io_lib.format(
+ "~1000000000000tp",
+ [prettify_amqp_table(value, escaped)]
+ )
+ end
+
+ def format_info_item([t | _] = value, escaped)
+ when is_tuple(t) or is_pid(t) or is_binary(t) or is_atom(t) or is_list(t) do
+ [
+ "[",
+ Enum.map(
+ value,
+ fn el ->
+ format_info_item(el, escaped)
+ end
+ )
+ |> Enum.join(", "),
+ "]"
+ ]
+ end
+
+ def format_info_item({key, value}, escaped) do
+ ["{", :io_lib.format("~p", [key]), ", ", format_info_item(value, escaped), "}"]
+ end
+
+ def format_info_item(value, _escaped) do
+ :io_lib.format("~1000000000000tp", [value])
+ end
+
+ defp prettify_amqp_table(table, escaped) do
+ for {k, t, v} <- table do
+ {escape(k, escaped), prettify_typed_amqp_value(t, v, escaped)}
+ end
+ end
+
+ defp prettify_typed_amqp_value(:longstr, value, escaped) do
+ escape(value, escaped)
+ end
+
+ defp prettify_typed_amqp_value(:table, value, escaped) do
+ prettify_amqp_table(value, escaped)
+ end
+
+ defp prettify_typed_amqp_value(:array, value, escaped) do
+ for {t, v} <- value, do: prettify_typed_amqp_value(t, v, escaped)
+ end
+
+ defp prettify_typed_amqp_value(_type, value, _escaped) do
+ value
+ end
+
+ defp escape(atom, escaped) when is_atom(atom) do
+ escape(to_charlist(atom), escaped)
+ end
+
+ defp escape(bin, escaped) when is_binary(bin) do
+ escape(to_charlist(bin), escaped)
+ end
+
+ defp escape(l, false) when is_list(l) do
+ escape_char(:lists.reverse(l), [])
+ end
+
+ defp escape(l, true) when is_list(l) do
+ l
+ end
+
+ defp escape_char([?\\ | t], acc) do
+ escape_char(t, [?\\, ?\\ | acc])
+ end
+
+ defp escape_char([x | t], acc) when x >= 32 and x != 127 do
+ escape_char(t, [x | acc])
+ end
+
+ defp escape_char([x | t], acc) do
+ escape_char(t, [?\\, ?0 + (x >>> 6), ?0 + (x &&& 0o070 >>> 3), ?0 + (x &&& 7) | acc])
+ end
+
+ defp escape_char([], acc) do
+ acc
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex
new file mode 100644
index 0000000000..5939007cfe
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/inspect.ex
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+defmodule RabbitMQ.CLI.Formatters.Inspect do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, _) do
+ case is_binary(output) do
+ true -> output
+ false -> inspect(output)
+ end
+ end
+
+ def format_stream(stream, options) do
+ elements =
+ Stream.scan(
+ stream,
+ :empty,
+ FormatterHelpers.without_errors_2(fn element, previous ->
+ separator =
+ case previous do
+ :empty -> ""
+ _ -> ","
+ end
+
+ format_element(element, separator, options)
+ end)
+ )
+
+ Stream.concat([["["], elements, ["]"]])
+ end
+
+ def format_element(val, separator, options) do
+ separator <> format_output(val, options)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex
new file mode 100644
index 0000000000..eb55038715
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json.ex
@@ -0,0 +1,69 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Basic JSON formatter. Supports 1-level of
+# collection using start/finish_collection.
+# Primary purpose is to translate stream from CTL,
+# so there is no need for multiple collection levels
+alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+defmodule RabbitMQ.CLI.Formatters.Json do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, opts) when is_bitstring(output) do
+ format_output(%{"message" => output}, opts)
+ end
+ def format_output(output, _opts) do
+ {:ok, json} = JSON.encode(keys_to_atoms(output))
+ json
+ end
+
+ def format_stream(stream, options) do
+ ## Flatten list_consumers
+ elements =
+ Stream.flat_map(
+ stream,
+ fn
+ [first | _] = element ->
+ case FormatterHelpers.proplist?(first) or is_map(first) do
+ true -> element
+ false -> [element]
+ end
+
+ other ->
+ [other]
+ end
+ )
+ |> Stream.scan(
+ :empty,
+ FormatterHelpers.without_errors_2(fn element, previous ->
+ separator =
+ case previous do
+ :empty -> ""
+ _ -> ","
+ end
+
+ format_element(element, separator, options)
+ end)
+ )
+
+ Stream.concat([["["], elements, ["]"]])
+ end
+
+ def keys_to_atoms(enum) do
+ Enum.map(enum,
+ fn({k, v}) when is_binary(k) or is_list(k) ->
+ {String.to_atom(k), v}
+ (other) -> other
+ end)
+ end
+
+ def format_element(val, separator, options) do
+ separator <> format_output(val, options)
+ end
+
+ def machine_readable?, do: true
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex
new file mode 100644
index 0000000000..a1bea3fc11
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/json_stream.ex
@@ -0,0 +1,75 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Basic JSON formatter. Supports 1-level of
+# collection using start/finish_collection.
+# Primary purpose is to translate stream from CTL,
+# so there is no need for multiple collection levels
+
+defmodule RabbitMQ.CLI.Formatters.JsonStream do
+ @moduledoc """
+ Formats a potentially infinite stream of maps, proplists, keyword lists,
+ and other things that are essentially a map.
+
+ The output exclude JSON array boundaries. The output can be fed
+ to `jq' for pretty printing, filtering and querying.
+ """
+
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+ alias RabbitMQ.CLI.Core.Platform
+
+ def format_output("", _opts) do
+ # the empty string can be emitted along with a finishing marker that ends the stream
+ # (e.g. with commands that have a duration argument)
+ # we just emit the empty string as the last value for the stream in this case
+ ""
+ end
+ def format_output(output, _opts) do
+ {:ok, json} = JSON.encode(keys_to_atoms(output))
+ json
+ end
+
+ def format_stream(stream, options) do
+ elements =
+ Stream.flat_map(
+ stream,
+ fn
+ [first | _] = element ->
+ case FormatterHelpers.proplist?(first) or is_map(first) do
+ true -> element
+ false -> [element]
+ end
+
+ other ->
+ [other]
+ end
+ )
+ |> Stream.scan(
+ :empty,
+ FormatterHelpers.without_errors_2(fn element, _previous ->
+ format_element(element, options)
+ end)
+ )
+
+ elements
+ end
+
+ def keys_to_atoms(enum) do
+ Enum.map(enum,
+ fn({k, v}) when is_binary(k) or is_list(k) ->
+ {String.to_atom(k), v}
+ (other) -> other
+ end)
+ end
+
+ def format_element(val, options) do
+ format_output(val, options) <> Platform.line_separator()
+ end
+
+ def machine_readable?, do: true
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex
new file mode 100644
index 0000000000..992475a2d0
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/msacc.ex
@@ -0,0 +1,19 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Formatters.Msacc do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, _) do
+ {:ok, io} = StringIO.open("")
+ :msacc.print(io, output, %{})
+ StringIO.flush(io)
+ end
+
+ def format_stream(stream, options) do
+ [format_output(Enum.to_list(stream), options)]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex
new file mode 100644
index 0000000000..54881cc32f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/plugins.ex
@@ -0,0 +1,247 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+defmodule RabbitMQ.CLI.Formatters.Plugins do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(
+ %{status: status, format: format, plugins: plugins},
+ options
+ ) do
+ legend(status, format, options) ++ format_plugins(plugins, format)
+ end
+
+ def format_output(%{enabled: enabled, mode: _} = output, options) do
+ case length(enabled) do
+ 0 ->
+ ["Plugin configuration unchanged."]
+
+ _ ->
+ [
+ "The following plugins have been enabled:"
+ | for plugin <- enabled do
+ " #{plugin}"
+ end
+ ] ++
+ [""] ++
+ applying(output, options) ++
+ log_offline(output)
+ end
+ end
+
+ def format_output(%{disabled: disabled, mode: _} = output, options) do
+ case length(disabled) do
+ 0 ->
+ ["Plugin configuration unchanged."]
+
+ _ ->
+ [
+ "The following plugins have been disabled:"
+ | for plugin <- disabled do
+ " #{plugin}"
+ end
+ ] ++
+ [""] ++
+ applying(output, options) ++
+ log_offline(output)
+ end
+ end
+
+ ## Do not print enabled/disabled for set command
+ def format_output(%{} = output, options) do
+ applying(output, options)
+ end
+
+ def format_output([], %{node: node}) do
+ ["All plugins have been disabled.", "Applying plugin configuration to #{node}..."]
+ end
+
+ def format_output(plugins, %{node: node}) when is_list(plugins) do
+ [
+ "The following plugins have been configured:"
+ | for plugin <- plugins do
+ " #{plugin}"
+ end
+ ] ++
+ ["Applying plugin configuration to #{node}..."]
+ end
+
+ def format_output(output, _) do
+ :io_lib.format("~p", [output])
+ |> to_string
+ end
+
+ def format_stream(stream, options) do
+ Stream.map(
+ stream,
+ FormatterHelpers.without_errors_1(fn element ->
+ format_output(element, options)
+ end)
+ )
+ end
+
+ defp format_plugins(plugins, format) do
+ max_name_length =
+ Enum.reduce(plugins, 0, fn %{name: name}, len ->
+ max(String.length(to_string(name)), len)
+ end)
+
+ for plugin <- plugins do
+ format_plugin(plugin, format, max_name_length)
+ end
+ |> List.flatten()
+ end
+
+ defp format_plugin(%{name: name}, :minimal, _) do
+ to_string(name)
+ end
+
+ defp format_plugin(plugin, :normal, max_name_length) do
+ [summary(plugin) <> inline_version(plugin, max_name_length)]
+ end
+
+ defp format_plugin(plugin, :verbose, _) do
+ [summary(plugin) | verbose(plugin)]
+ end
+
+ defp summary(%{name: name, enabled: enabled, running: running}) do
+ enabled_sign =
+ case enabled do
+ :implicit -> "e"
+ :enabled -> "E"
+ :not_enabled -> " "
+ end
+
+ running_sign =
+ case running do
+ true -> "*"
+ false -> " "
+ end
+
+ "[#{enabled_sign}#{running_sign}] #{name}"
+ end
+
+ defp inline_version(%{name: name} = plugin, max_name_length) do
+ spacing =
+ String.duplicate(
+ " ",
+ max_name_length -
+ String.length(to_string(name))
+ )
+
+ spacing <> " " <> augment_version(plugin)
+ end
+
+ defp verbose(%{dependencies: dependencies, description: description} = plugin) do
+ prettified = to_string(:io_lib.format("~p", [dependencies]))
+
+ [
+ " Version: \t#{augment_version(plugin)}",
+ " Dependencies:\t#{prettified}",
+ " Description: \t#{description}"
+ ]
+ end
+
+ defp augment_version(%{version: version, running_version: nil}) do
+ to_string(version)
+ end
+
+ defp augment_version(%{version: version, running_version: version}) do
+ to_string(version)
+ end
+
+ defp augment_version(%{version: version, running_version: running_version}) do
+ "#{running_version} (pending upgrade to #{version})"
+ end
+
+ ## Do not print legend in minimal, quiet or silent mode
+ defp legend(_, :minimal, _) do
+ []
+ end
+ defp legend(_, _, %{quiet: true}) do
+ []
+ end
+ defp legend(_, _, %{silent: true}) do
+ []
+ end
+
+ defp legend(status, _, %{node: node}) do
+ [
+ " Configured: E = explicitly enabled; e = implicitly enabled",
+ " | Status: #{status_message(status, node)}",
+ " |/"
+ ]
+ end
+
+ defp status_message(:running, node) do
+ "* = running on #{node}"
+ end
+
+ defp status_message(:node_down, node) do
+ "[failed to contact #{node} - status not shown]"
+ end
+
+ defp applying(%{mode: :offline, set: set_plugins}, _) do
+ set_plugins_message =
+ case length(set_plugins) do
+ 0 -> "nothing to do"
+ len -> "set #{len} plugins"
+ end
+
+ [set_plugins_message <> "."]
+ end
+
+ defp applying(%{mode: :offline, enabled: enabled}, _) do
+ enabled_message =
+ case length(enabled) do
+ 0 -> "nothing to do"
+ len -> "enabled #{len} plugins"
+ end
+
+ [enabled_message <> "."]
+ end
+
+ defp applying(%{mode: :offline, disabled: disabled}, _) do
+ disabled_message =
+ case length(disabled) do
+ 0 -> "nothing to do"
+ len -> "disabled #{len} plugins"
+ end
+
+ [disabled_message <> "."]
+ end
+
+ defp applying(%{mode: :online, started: started, stopped: stopped}, _) do
+ stopped_message =
+ case length(stopped) do
+ 0 -> []
+ len -> ["stopped #{len} plugins"]
+ end
+
+ started_message =
+ case length(started) do
+ 0 -> []
+ len -> ["started #{len} plugins"]
+ end
+
+ change_message =
+ case Enum.join(started_message ++ stopped_message, " and ") do
+ "" -> "nothing to do"
+ msg -> msg
+ end
+
+ [change_message <> "."]
+ end
+
+ defp log_offline(%{mode: :offline}) do
+ ["Offline change; changes will take effect at broker restart."]
+ end
+
+ defp log_offline(_) do
+ []
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex
new file mode 100644
index 0000000000..6b9b7ed9fd
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/pretty_table.ex
@@ -0,0 +1,87 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Formatters.PrettyTable do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+ require Record
+ import Record
+
+ defrecord :table , extract(:table,
+ from_lib: "stdout_formatter/include/stdout_formatter.hrl")
+ defrecord :cell, extract(:cell,
+ from_lib: "stdout_formatter/include/stdout_formatter.hrl")
+ defrecord :paragraph, extract(:paragraph,
+ from_lib: "stdout_formatter/include/stdout_formatter.hrl")
+
+ def format_stream(stream, _opts) do
+ # Flatten for list_consumers
+ entries_with_keys = Stream.flat_map(stream,
+ fn([first | _] = element) ->
+ case FormatterHelpers.proplist?(first) or is_map(first) do
+ true -> element;
+ false -> [element]
+ end
+ (other) ->
+ [other]
+ end)
+ |> Enum.to_list()
+
+ # Use stdout_formatter library to format the table.
+ case entries_with_keys do
+ [first_entry | _] ->
+ col_headers = Stream.map(first_entry,
+ fn({key, _}) ->
+ cell(content: key, props: %{:title => true})
+ end)
+ |> Enum.to_list()
+ rows = Stream.map(entries_with_keys,
+ fn(element) ->
+ Stream.map(element,
+ fn({_, value}) ->
+ cell(content: value, props: %{})
+ end)
+ |> Enum.to_list()
+ end)
+ |> Enum.to_list()
+ ret = :stdout_formatter.to_string(
+ table(
+ rows: [col_headers | rows],
+ props: %{:cell_padding => {0, 1}}))
+ [to_string ret]
+ [] ->
+ entries_with_keys
+ end
+ end
+
+ def format_output(output, _opts) do
+ format = case is_binary(output) do
+ true -> "~s"
+ false -> "~p"
+ end
+ ret = :stdout_formatter.to_string(
+ table(
+ rows: [
+ [cell(content: "Output", props: %{:title => true})],
+ [cell(
+ content: paragraph(content: output,
+ props: %{:format => format}))]],
+ props: %{:cell_padding => {0, 1}}))
+ to_string ret
+ end
+
+ def format_value(value) do
+ case is_binary(value) do
+ true -> value
+ false -> case is_atom(value) do
+ true -> to_string(value)
+ false -> inspect(value)
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex
new file mode 100644
index 0000000000..4db89d611f
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/report.ex
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Formatters.Report do
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+ alias RabbitMQ.CLI.Core.{Output, Config}
+
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+ def format_output(_, _) do
+ raise "format_output is not implemented for report formatter"
+ end
+
+ def format_stream(stream, options) do
+ quiet = options[:quiet] || options[:silent] || false
+
+ Stream.flat_map(
+ stream,
+ FormatterHelpers.without_errors_1(fn
+ {_command, _banner, {:error, _} = err} ->
+ err
+
+ {_command, _banner, {:error, _, _} = err} ->
+ err
+
+ {command, banner, result} ->
+ case quiet do
+ true ->
+ Stream.concat([""], format_result(command, result, options))
+
+ false ->
+ Stream.concat(["" | banner_list(banner)], format_result(command, result, options))
+ end
+ end)
+ )
+ end
+
+ def format_result(command, output, options) do
+ formatter = Config.get_formatter(command, options)
+
+ case Output.format_output(output, formatter, options) do
+ :ok -> []
+ {:ok, val} -> [val]
+ {:stream, stream} -> stream
+ end
+ end
+
+ def banner_list([_ | _] = list), do: list
+ def banner_list(val), do: [val]
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex
new file mode 100644
index 0000000000..6fd7f2e0e3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string.ex
@@ -0,0 +1,26 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+## Prints values from a command as strings(if possible)
+defmodule RabbitMQ.CLI.Formatters.String do
+ alias RabbitMQ.CLI.Core.Helpers
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, _) do
+ Helpers.string_or_inspect(output)
+ end
+
+ def format_stream(stream, options) do
+ Stream.map(
+ stream,
+ FormatterHelpers.without_errors_1(fn el ->
+ format_output(el, options)
+ end)
+ )
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex
new file mode 100644
index 0000000000..4761b9a555
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/string_per_line.ex
@@ -0,0 +1,42 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Formatters.StringPerLine do
+ @doc """
+ Use this to output one stream (collection) element per line,
+ using the string formatter. Flattens the stream.
+ """
+
+ alias RabbitMQ.CLI.Formatters.FormatterHelpers
+ alias RabbitMQ.CLI.Core.Helpers
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def format_output(output, _) do
+ Enum.map(output, fn el -> Helpers.string_or_inspect(el) end)
+ end
+
+ def format_stream(stream, options) do
+ Stream.scan(
+ stream,
+ :empty,
+ FormatterHelpers.without_errors_2(fn element, previous ->
+ separator =
+ case previous do
+ :empty -> ""
+ _ -> line_separator()
+ end
+
+ format_element(element, separator, options)
+ end)
+ )
+ end
+
+ def format_element(val, separator, options) do
+ separator <> format_output(val, options)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex
new file mode 100644
index 0000000000..72d1682202
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/table.ex
@@ -0,0 +1,138 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+alias RabbitMQ.CLI.Formatters.FormatterHelpers
+
+defmodule RabbitMQ.CLI.Formatters.Table do
+ @behaviour RabbitMQ.CLI.FormatterBehaviour
+
+ def switches(), do: [table_headers: :boolean, pad_to_header: :boolean]
+
+ def format_stream(stream, options) do
+ # Flatten for list_consumers
+ Stream.flat_map(
+ stream,
+ fn
+ [first | _] = element ->
+ case FormatterHelpers.proplist?(first) or is_map(first) do
+ true -> element
+ false -> [element]
+ end
+
+ other ->
+ [other]
+ end
+ )
+ |> Stream.transform(
+ :init,
+ FormatterHelpers.without_errors_2(fn
+ element, :init ->
+ {maybe_header(element, options), :next}
+
+ element, :next ->
+ {[format_output_1(element, options)], :next}
+ end)
+ )
+ end
+
+ def format_output(output, options) do
+ maybe_header(output, options)
+ end
+
+ defp maybe_header(output, options) do
+ opt_table_headers = Map.get(options, :table_headers, true)
+ opt_silent = Map.get(options, :silent, false)
+
+ case {opt_silent, opt_table_headers} do
+ {true, _} ->
+ [format_output_1(output, options)]
+
+ {false, false} ->
+ [format_output_1(output, options)]
+
+ {false, true} ->
+ format_header(output) ++ [format_output_1(output, options)]
+ end
+ end
+
+ defp format_output_1(output, options) when is_map(output) do
+ escaped = escaped?(options)
+ pad_to_header = pad_to_header?(options)
+ format_line(output, escaped, pad_to_header)
+ end
+
+ defp format_output_1([], _) do
+ ""
+ end
+
+ defp format_output_1(output, options) do
+ escaped = escaped?(options)
+ pad_to_header = pad_to_header?(options)
+
+ case FormatterHelpers.proplist?(output) do
+ true -> format_line(output, escaped, pad_to_header)
+ false -> format_inspect(output)
+ end
+ end
+
+ defp escaped?(_), do: true
+
+ defp pad_to_header?(%{pad_to_header: pad}), do: pad
+ defp pad_to_header?(_), do: false
+
+ defp format_line(line, escaped, pad_to_header) do
+ values =
+ Enum.map(
+ line,
+ fn {k, v} ->
+ line = FormatterHelpers.format_info_item(v, escaped)
+
+ case pad_to_header do
+ true ->
+ String.pad_trailing(
+ to_string(line),
+ String.length(to_string(k))
+ )
+
+ false ->
+ line
+ end
+ end
+ )
+
+ Enum.join(values, "\t")
+ end
+
+ defp format_inspect(output) do
+ case is_binary(output) do
+ true -> output
+ false -> inspect(output)
+ end
+ end
+
+ @spec format_header(term()) :: [String.t()]
+ defp format_header(output) do
+ keys =
+ case output do
+ map when is_map(map) ->
+ Map.keys(map)
+
+ keyword when is_list(keyword) ->
+ case FormatterHelpers.proplist?(keyword) do
+ true -> Keyword.keys(keyword)
+ false -> []
+ end
+
+ _ ->
+ []
+ end
+
+ case keys do
+ [] -> []
+ _ -> [Enum.join(keys, "\t")]
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex
new file mode 100644
index 0000000000..ebef8de0ba
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/information_unit.ex
@@ -0,0 +1,68 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.InformationUnit do
+ require MapSet
+
+ @kilobyte_bytes 1000
+ @megabyte_bytes @kilobyte_bytes * 1000
+ @gigabyte_bytes @megabyte_bytes * 1000
+ @terabyte_bytes @gigabyte_bytes * 1000
+
+ def known_units() do
+ MapSet.new([
+ "bytes",
+ "kb",
+ "kilobytes",
+ "mb",
+ "megabytes",
+ "gb",
+ "gigabytes",
+ "tb",
+ "terabytes"
+ ])
+ end
+
+ def parse(val) do
+ :rabbit_resource_monitor_misc.parse_information_unit(val)
+ end
+
+ def convert(bytes, "bytes") do
+ bytes
+ end
+
+ def convert(bytes, unit) do
+ do_convert(bytes, String.downcase(unit))
+ end
+
+ def known_unit?(val) do
+ MapSet.member?(known_units(), String.downcase(val))
+ end
+
+ defp do_convert(bytes, "kb") do
+ Float.round(bytes / @kilobyte_bytes, 4)
+ end
+
+ defp do_convert(bytes, "kilobytes"), do: do_convert(bytes, "kb")
+
+ defp do_convert(bytes, "mb") do
+ Float.round(bytes / @megabyte_bytes, 4)
+ end
+
+ defp do_convert(bytes, "megabytes"), do: do_convert(bytes, "mb")
+
+ defp do_convert(bytes, "gb") do
+ Float.round(bytes / @gigabyte_bytes, 4)
+ end
+
+ defp do_convert(bytes, "gigabytes"), do: do_convert(bytes, "gb")
+
+ defp do_convert(bytes, "tb") do
+ Float.round(bytes / @terabyte_bytes, 4)
+ end
+
+ defp do_convert(bytes, "terabytes"), do: do_convert(bytes, "tb")
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex
new file mode 100644
index 0000000000..c3b6aecc0d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/directories_command.ex
@@ -0,0 +1,134 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Commands.DirectoriesCommand do
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators, Config}
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, %{offline: true} = opts) do
+ {args, opts}
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{online: true, offline: false}, opts)}
+ end
+
+ def distribution(%{offline: true}), do: :none
+ def distribution(%{offline: false}), do: :cli
+
+ def switches(), do: [online: :boolean, offline: :boolean]
+
+ def validate(_, %{online: true, offline: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both online and offline"}}
+ end
+
+ def validate(_, %{online: false, offline: false}) do
+ {:validation_failure, {:bad_argument, "Cannot set online and offline to false"}}
+ end
+
+ def validate([_ | _], _) do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([], _) do
+ :ok
+ end
+
+ def validate_execution_environment(args, %{offline: true} = opts) do
+ Validators.chain(
+ [
+ &require_rabbit_and_plugins/2,
+ &PluginHelpers.enabled_plugins_file/2,
+ &plugins_dir/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def validate_execution_environment(args, %{online: true} = opts) do
+ Validators.node_is_running(args, opts)
+ end
+
+ def run([], %{online: true, node: node_name}) do
+ do_run(fn key ->
+ :rabbit_misc.rpc_call(node_name, :rabbit_plugins, key, [])
+ end)
+ end
+
+ def run([], %{offline: true} = opts) do
+ do_run(fn key ->
+ Config.get_option(key, opts)
+ end)
+ end
+
+ def output({:ok, _map} = res, %{formatter: "json"}) do
+ res
+ end
+
+ def output({:ok, map}, _opts) do
+ s = """
+ Plugin archives directory: #{Map.get(map, :plugins_dir)}
+ Plugin expansion directory: #{Map.get(map, :plugins_expand_dir)}
+ Enabled plugins file: #{Map.get(map, :enabled_plugins_file)}
+ """
+
+ {:ok, String.trim_trailing(s)}
+ end
+
+ def output({:error, err}, _opts) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_software(), err}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([], %{offline: true}) do
+ "Listing plugin directories inferred from local environment..."
+ end
+
+ def banner([], %{online: true, node: node}) do
+ "Listing plugin directories used by node #{node}"
+ end
+
+ def usage, do: "directories [--offline] [--online]"
+
+ def usage_additional() do
+ [
+ ["--offline", "do not contact target node. Try to infer directories from the environment."],
+ ["--online", "infer directories from target node."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.plugins()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Displays plugin directory and enabled plugin file paths"
+
+ #
+ # Implementation
+ #
+
+ defp do_run(fun) do
+ # return an error or an {:ok, map} tuple
+ Enum.reduce([:plugins_dir, :plugins_expand_dir, :enabled_plugins_file], {:ok, %{}}, fn
+ _, {:error, err} ->
+ {:error, err}
+
+ key, {:ok, acc} ->
+ case fun.(key) do
+ {:error, err} -> {:error, err}
+ val -> {:ok, Map.put(acc, key, :rabbit_data_coercion.to_binary(val))}
+ end
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex
new file mode 100644
index 0000000000..4fea2ad34e
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/disable_command.ex
@@ -0,0 +1,146 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Commands.DisableCommand do
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Plugins
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{online: false, offline: false, all: false}, opts)}
+ end
+
+ def distribution(%{offline: true}), do: :none
+ def distribution(%{offline: false}), do: :cli
+
+ def switches(), do: [online: :boolean, offline: :boolean, all: :boolean]
+
+ def validate([], %{all: false}) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _], %{all: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both --all and a list of plugins"}}
+ end
+
+ def validate(_, %{online: true, offline: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both online and offline"}}
+ end
+
+ def validate(_args, _opts) do
+ :ok
+ end
+
+ def validate_execution_environment(args, opts) do
+ Validators.chain(
+ [
+ &PluginHelpers.can_set_plugins_with_mode/2,
+ &require_rabbit_and_plugins/2,
+ &PluginHelpers.enabled_plugins_file/2,
+ &plugins_dir/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def run(plugin_names, %{all: all_flag, node: node_name} = opts) do
+ plugins =
+ case all_flag do
+ false -> for s <- plugin_names, do: String.to_atom(s)
+ true -> PluginHelpers.plugin_names(PluginHelpers.list(opts))
+ end
+
+ enabled = PluginHelpers.read_enabled(opts)
+ all = PluginHelpers.list(opts)
+ implicit = :rabbit_plugins.dependencies(false, enabled, all)
+ to_disable_deps = :rabbit_plugins.dependencies(true, plugins, all)
+ plugins_to_set = MapSet.difference(MapSet.new(enabled), MapSet.new(to_disable_deps))
+
+ mode = PluginHelpers.mode(opts)
+
+ case PluginHelpers.set_enabled_plugins(MapSet.to_list(plugins_to_set), opts) do
+ {:ok, enabled_plugins} ->
+ {:stream,
+ Stream.concat([
+ [:rabbit_plugins.strictly_plugins(enabled_plugins, all)],
+ RabbitMQ.CLI.Core.Helpers.defer(fn ->
+ :timer.sleep(5000)
+
+ case PluginHelpers.update_enabled_plugins(enabled_plugins, mode, node_name, opts) do
+ %{set: new_enabled} = result ->
+ disabled = implicit -- new_enabled
+
+ filter_strictly_plugins(
+ Map.put(result, :disabled, :rabbit_plugins.strictly_plugins(disabled, all)),
+ all,
+ [:set, :started, :stopped]
+ )
+
+ other ->
+ other
+ end
+ end)
+ ])}
+
+ {:error, _} = err ->
+ err
+ end
+ end
+
+ use RabbitMQ.CLI.Plugins.ErrorOutput
+
+ def banner([], %{all: true, node: node_name}) do
+ "Disabling ALL plugins on node #{node_name}"
+ end
+
+ def banner(plugins, %{node: node_name}) do
+ ["Disabling plugins on node #{node_name}:" | plugins]
+ end
+
+ def usage, do: "disable <plugin1> [ <plugin2>] | --all [--offline] [--online]"
+
+ def usage_additional() do
+ [
+ ["<plugin1> [ <plugin2>]", "names of plugins to disable separated by a space"],
+ ["--online", "contact target node to disable the plugins. Changes are applied immediately."],
+ ["--offline", "update enabled plugins file directly without contacting target node. Changes will be delayed until the node is restarted."],
+ ["--all", "disable all currently enabled plugins"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.plugins()
+ ]
+ end
+
+ def help_section(), do: :plugin_management
+
+ def description(), do: "Disables one or more plugins"
+
+ #
+ # Implementation
+ #
+
+ defp filter_strictly_plugins(map, _all, []) do
+ map
+ end
+
+ defp filter_strictly_plugins(map, all, [head | tail]) do
+ case map[head] do
+ nil ->
+ filter_strictly_plugins(map, all, tail)
+
+ other ->
+ value = :rabbit_plugins.strictly_plugins(other, all)
+ filter_strictly_plugins(Map.put(map, head, value), all, tail)
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex
new file mode 100644
index 0000000000..530a2cbb6a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/enable_command.ex
@@ -0,0 +1,156 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Commands.EnableCommand do
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Plugins
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{online: false, offline: false, all: false}, opts)}
+ end
+
+ def distribution(%{offline: true}), do: :none
+ def distribution(%{offline: false}), do: :cli
+
+ def switches(), do: [online: :boolean, offline: :boolean, all: :boolean]
+
+ def validate([], %{all: false}) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _], %{all: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both --all and a list of plugins"}}
+ end
+
+ def validate(_, %{online: true, offline: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both online and offline"}}
+ end
+
+ def validate(_, _) do
+ :ok
+ end
+
+ def validate_execution_environment(args, opts) do
+ Validators.chain(
+ [
+ &PluginHelpers.can_set_plugins_with_mode/2,
+ &require_rabbit_and_plugins/2,
+ &PluginHelpers.enabled_plugins_file/2,
+ &plugins_dir/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def run(plugin_names, %{all: all_flag} = opts) do
+ plugins =
+ case all_flag do
+ false -> for s <- plugin_names, do: String.to_atom(s)
+ true -> PluginHelpers.plugin_names(PluginHelpers.list(opts))
+ end
+
+ case PluginHelpers.validate_plugins(plugins, opts) do
+ :ok -> do_run(plugins, opts)
+ other -> other
+ end
+ end
+
+ use RabbitMQ.CLI.Plugins.ErrorOutput
+
+ def banner([], %{all: true, node: node_name}) do
+ "Enabling ALL plugins on node #{node_name}"
+ end
+
+ def banner(plugins, %{node: node_name}) do
+ ["Enabling plugins on node #{node_name}:" | plugins]
+ end
+
+ def usage, do: "enable <plugin1> [ <plugin2>] | --all [--offline] [--online]"
+
+ def usage_additional() do
+ [
+ ["<plugin1> [ <plugin2>]", "names of plugins to enable separated by a space"],
+ ["--online", "contact target node to enable the plugins. Changes are applied immediately."],
+ ["--offline", "update enabled plugins file directly without contacting target node. Changes will be delayed until the node is restarted."],
+ ["--all", "enable all available plugins. Not recommended as some plugins may conflict or otherwise be incompatible!"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.plugins()
+ ]
+ end
+
+ def help_section(), do: :plugin_management
+
+ def description(), do: "Enables one or more plugins"
+
+ #
+ # Implementation
+ #
+
+ def do_run(plugins, %{node: node_name} = opts) do
+ enabled = PluginHelpers.read_enabled(opts)
+ all = PluginHelpers.list(opts)
+ implicit = :rabbit_plugins.dependencies(false, enabled, all)
+ enabled_implicitly = MapSet.difference(MapSet.new(implicit), MapSet.new(enabled))
+
+ plugins_to_set =
+ MapSet.union(
+ MapSet.new(enabled),
+ MapSet.difference(MapSet.new(plugins), enabled_implicitly)
+ )
+
+ mode = PluginHelpers.mode(opts)
+
+ case PluginHelpers.set_enabled_plugins(MapSet.to_list(plugins_to_set), opts) do
+ {:ok, enabled_plugins} ->
+ {:stream,
+ Stream.concat([
+ [:rabbit_plugins.strictly_plugins(enabled_plugins, all)],
+ RabbitMQ.CLI.Core.Helpers.defer(fn ->
+ case PluginHelpers.update_enabled_plugins(enabled_plugins, mode, node_name, opts) do
+ %{set: new_enabled} = result ->
+ enabled = new_enabled -- implicit
+
+ filter_strictly_plugins(
+ Map.put(result, :enabled, :rabbit_plugins.strictly_plugins(enabled, all)),
+ all,
+ [:set, :started, :stopped]
+ )
+
+ other ->
+ other
+ end
+ end)
+ ])}
+
+ {:error, _} = err ->
+ err
+ end
+ end
+
+ defp filter_strictly_plugins(map, _all, []) do
+ map
+ end
+
+ defp filter_strictly_plugins(map, all, [head | tail]) do
+ case map[head] do
+ nil ->
+ filter_strictly_plugins(map, all, tail)
+
+ other ->
+ value = :rabbit_plugins.strictly_plugins(other, all)
+ filter_strictly_plugins(Map.put(map, head, value), all, tail)
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex
new file mode 100644
index 0000000000..fa54b1eee3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/is_enabled.ex
@@ -0,0 +1,154 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Commands.IsEnabledCommand do
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, %{offline: true} = opts) do
+ {args, opts}
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{online: true, offline: false}, opts)}
+ end
+
+ def distribution(%{offline: true}), do: :none
+ def distribution(%{offline: false}), do: :cli
+
+ def switches(), do: [online: :boolean, offline: :boolean]
+
+ def validate(_, %{online: true, offline: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both online and offline"}}
+ end
+
+ def validate(_, %{online: false, offline: false}) do
+ {:validation_failure, {:bad_argument, "Cannot set online and offline to false"}}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate([_ | _], _) do
+ :ok
+ end
+
+ def validate_execution_environment(args, %{offline: true} = opts) do
+ Validators.chain(
+ [
+ &require_rabbit_and_plugins/2,
+ &PluginHelpers.enabled_plugins_file/2,
+ &plugins_dir/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def validate_execution_environment(args, %{online: true} = opts) do
+ Validators.node_is_running(args, opts)
+ end
+
+ def run(args, %{online: true, node: node_name} = opts) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_plugins, :active, []) do
+ {:error, _} = e ->
+ e
+
+ plugins ->
+ plugins = Enum.map(plugins, &Atom.to_string/1) |> Enum.sort()
+
+ case Enum.filter(args, fn x -> not Enum.member?(plugins, x) end) do
+ [] -> {:ok, positive_result_message(args, opts)}
+ xs -> {:error, negative_result_message(xs, opts, plugins)}
+ end
+ end
+ end
+
+ def run(args, %{offline: true} = opts) do
+ plugins = PluginHelpers.list_names(opts) |> Enum.map(&Atom.to_string/1) |> Enum.sort()
+
+ case Enum.filter(args, fn x -> not Enum.member?(plugins, x) end) do
+ [] -> {:ok, positive_result_message(args, opts)}
+ xs -> {:error, negative_result_message(xs, opts, plugins)}
+ end
+ end
+
+ def output({:ok, msg}, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok", "message" => msg}}
+ end
+
+ def output({:error, msg}, %{formatter: "json"}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_unavailable(),
+ %{"result" => "error", "message" => msg}}
+ end
+
+ def output({:error, err}, _opts) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_unavailable(), err}
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "is_enabled <plugin1> [ <plugin2>] [--offline] [--online]"
+
+ def usage_additional() do
+ [
+ ["<plugin1> [ <plugin2>]", "names of plugins to check separated by a space"],
+ ["--online", "contact target node to perform the check. Requires the node to be running and reachable."],
+ ["--offline", "check enabled plugins file directly without contacting target node."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.plugins()
+ ]
+ end
+
+ def banner(args, %{offline: true}) do
+ "Inferring if #{plugin_or_plugins(args)} from local environment..."
+ end
+
+ def banner(args, %{online: true, node: node}) do
+ "Asking node #{node} if #{plugin_or_plugins(args)} enabled..."
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Health check that exits with a non-zero code if provided plugins are not enabled on target node"
+
+ #
+ # Implementation
+ #
+
+ def plugin_or_plugins(args) when length(args) == 1 do
+ "plugin #{PluginHelpers.comma_separated_names(args)} is"
+ end
+
+ def plugin_or_plugins(args) when length(args) > 1 do
+ "plugins #{PluginHelpers.comma_separated_names(args)} are"
+ end
+
+ defp positive_result_message(args, %{online: true, node: node_name}) do
+ String.capitalize("#{plugin_or_plugins(args)} enabled on node #{node_name}")
+ end
+
+ defp positive_result_message(args, %{offline: true}) do
+ String.capitalize("#{plugin_or_plugins(args)} enabled")
+ end
+
+ defp negative_result_message(missing, %{online: true, node: node_name}, plugins) do
+ String.capitalize("#{plugin_or_plugins(missing)} not enabled on node #{node_name}. ") <>
+ "Enabled plugins and dependencies: #{PluginHelpers.comma_separated_names(plugins)}"
+ end
+
+ defp negative_result_message(missing, %{offline: true}, plugins) do
+ String.capitalize("#{plugin_or_plugins(missing)} not enabled. ") <>
+ "Enabled plugins and dependencies: #{PluginHelpers.comma_separated_names(plugins)}"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex
new file mode 100644
index 0000000000..a4e943a149
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/list_command.ex
@@ -0,0 +1,188 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Commands.ListCommand do
+ import RabbitCommon.Records
+
+ alias RabbitMQ.CLI.Core.{DocGuide, Validators}
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Plugins
+
+ def merge_defaults([], opts), do: merge_defaults([".*"], opts)
+ def merge_defaults(args, opts), do: {args, Map.merge(default_opts(), opts)}
+
+ def switches(),
+ do: [verbose: :boolean, minimal: :boolean, enabled: :boolean, implicitly_enabled: :boolean]
+
+ def aliases(), do: [v: :verbose, m: :minimal, E: :enabled, e: :implicitly_enabled]
+
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate(_, %{verbose: true, minimal: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both verbose and minimal"}}
+ end
+
+ def validate(_, _) do
+ :ok
+ end
+
+ def validate_execution_environment(args, opts) do
+ Validators.chain(
+ [
+ &require_rabbit_and_plugins/2,
+ &PluginHelpers.enabled_plugins_file/2,
+ &plugins_dir/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def run([pattern], %{node: node_name} = opts) do
+ %{verbose: verbose, minimal: minimal, enabled: only_enabled, implicitly_enabled: all_enabled} =
+ opts
+
+ all = PluginHelpers.list(opts)
+ enabled = PluginHelpers.read_enabled(opts)
+
+ missing = MapSet.difference(MapSet.new(enabled), MapSet.new(PluginHelpers.plugin_names(all)))
+
+ case Enum.empty?(missing) do
+ true ->
+ :ok
+
+ false ->
+ names = Enum.join(Enum.to_list(missing), ", ")
+ IO.puts("WARNING - plugins currently enabled but missing: #{names}\n")
+ end
+
+ implicit = :rabbit_plugins.dependencies(false, enabled, all)
+ enabled_implicitly = implicit -- enabled
+
+ {status, running} =
+ case remote_running_plugins(node_name) do
+ :error -> {:node_down, []}
+ {:ok, active} -> {:running, active}
+ end
+
+ {:ok, re} = Regex.compile(pattern)
+
+ format =
+ case {verbose, minimal} do
+ {true, false} -> :verbose
+ {false, true} -> :minimal
+ {false, false} -> :normal
+ end
+
+ plugins =
+ Enum.filter(
+ all,
+ fn plugin ->
+ name = PluginHelpers.plugin_name(plugin)
+
+ :rabbit_plugins.is_strictly_plugin(plugin) and
+ Regex.match?(re, to_string(name)) and
+ cond do
+ only_enabled -> Enum.member?(enabled, name)
+ all_enabled -> Enum.member?(enabled ++ enabled_implicitly, name)
+ true -> true
+ end
+ end
+ )
+
+ %{
+ status: status,
+ format: format,
+ plugins: format_plugins(plugins, format, enabled, enabled_implicitly, running)
+ }
+ end
+
+ def banner([pattern], _), do: "Listing plugins with pattern \"#{pattern}\" ..."
+
+ def usage, do: "list [pattern] [--verbose] [--minimal] [--enabled] [--implicitly-enabled]"
+
+ def usage_additional() do
+ [
+ ["<pattern>", "only list plugins that match a regular expression pattern"],
+ ["--verbose", "output more information"],
+ ["--minimal", "only print plugin names. Most useful in compbination with --silent and --enabled."],
+ ["--enabled", "only list enabled plugins"],
+ ["--implicitly-enabled", "include plugins enabled as dependencies of other plugins"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.plugins()
+ ]
+ end
+
+ def help_section(), do: :plugin_management
+
+ def description(), do: "Lists plugins and their state"
+
+ #
+ # Implementation
+ #
+
+ defp remote_running_plugins(node) do
+ case :rabbit_misc.rpc_call(node, :rabbit_plugins, :running_plugins, []) do
+ {:badrpc, _} -> :error
+ active_with_version -> active_with_version
+ end
+ end
+
+ defp format_plugins(plugins, format, enabled, enabled_implicitly, running) do
+ plugins
+ |> sort_plugins
+ |> Enum.map(fn plugin ->
+ format_plugin(plugin, format, enabled, enabled_implicitly, running)
+ end)
+ end
+
+ defp sort_plugins(plugins) do
+ Enum.sort_by(plugins, &PluginHelpers.plugin_name/1)
+ end
+
+ defp format_plugin(plugin, :minimal, _, _, _) do
+ %{name: PluginHelpers.plugin_name(plugin)}
+ end
+
+ defp format_plugin(plugin, :normal, enabled, enabled_implicitly, running) do
+ plugin(name: name, version: version) = plugin
+
+ enabled_mode =
+ case {Enum.member?(enabled, name), Enum.member?(enabled_implicitly, name)} do
+ {true, false} -> :enabled
+ {false, true} -> :implicit
+ {false, false} -> :not_enabled
+ end
+
+ %{
+ name: name,
+ version: version,
+ running_version: running[name],
+ enabled: enabled_mode,
+ running: Keyword.has_key?(running, name)
+ }
+ end
+
+ defp format_plugin(plugin, :verbose, enabled, enabled_implicitly, running) do
+ normal = format_plugin(plugin, :normal, enabled, enabled_implicitly, running)
+ plugin(dependencies: dependencies, description: description) = plugin
+ Map.merge(normal, %{dependencies: dependencies, description: description})
+ end
+
+ defp default_opts() do
+ %{minimal: false, verbose: false, enabled: false, implicitly_enabled: false}
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex
new file mode 100644
index 0000000000..68b442a547
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/commands/set_command.ex
@@ -0,0 +1,133 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Commands.SetCommand do
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+ alias RabbitMQ.CLI.Core.{DocGuide, ExitCodes, Validators}
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Plugins
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{online: false, offline: false}, opts)}
+ end
+
+ def distribution(%{offline: true}), do: :none
+ def distribution(%{offline: false}), do: :cli
+
+ def switches(), do: [online: :boolean, offline: :boolean]
+
+ def validate(_, %{online: true, offline: true}) do
+ {:validation_failure, {:bad_argument, "Cannot set both online and offline"}}
+ end
+
+ def validate(_, _) do
+ :ok
+ end
+
+ def validate_execution_environment(args, opts) do
+ Validators.chain(
+ [
+ &PluginHelpers.can_set_plugins_with_mode/2,
+ &require_rabbit_and_plugins/2,
+ &PluginHelpers.enabled_plugins_file/2,
+ &plugins_dir/2
+ ],
+ [args, opts]
+ )
+ end
+
+ def run(plugin_names, opts) do
+ plugins = for s <- plugin_names, do: String.to_atom(s)
+
+ case PluginHelpers.validate_plugins(plugins, opts) do
+ :ok -> do_run(plugins, opts)
+ other -> other
+ end
+ end
+
+ def output({:error, {:plugins_not_found, missing}}, _opts) do
+ {:error, ExitCodes.exit_dataerr(),
+ "The following plugins were not found: #{Enum.join(Enum.to_list(missing), ", ")}"}
+ end
+
+ use RabbitMQ.CLI.Plugins.ErrorOutput
+
+ def banner(plugins, %{node: node_name}) do
+ ["Enabling plugins on node #{node_name}:" | plugins]
+ end
+
+ def usage, do: "set <plugin1> [ <plugin2>] [--offline] [--online]"
+
+ def usage_additional() do
+ [
+ ["<plugin1> [ <plugin2>]", "names of plugins to enable separated by a space. All other plugins will be disabled."],
+ ["--online", "contact target node to enable the plugins. Changes are applied immediately."],
+ ["--offline", "update enabled plugins file directly without contacting target node. Changes will be delayed until the node is restarted."]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.plugins()
+ ]
+ end
+
+ def help_section(), do: :plugin_management
+
+ def description(), do: "Enables one or more plugins, disables the rest"
+
+ #
+ # Implementation
+ #
+
+ def do_run(plugins, %{node: node_name} = opts) do
+ all = PluginHelpers.list(opts)
+ mode = PluginHelpers.mode(opts)
+
+ case PluginHelpers.set_enabled_plugins(plugins, opts) do
+ {:ok, enabled_plugins} ->
+ {:stream,
+ Stream.concat([
+ [:rabbit_plugins.strictly_plugins(enabled_plugins, all)],
+ RabbitMQ.CLI.Core.Helpers.defer(fn ->
+ case PluginHelpers.update_enabled_plugins(
+ enabled_plugins,
+ mode,
+ node_name,
+ opts
+ ) do
+ %{set: _} = map ->
+ filter_strictly_plugins(map, all, [:set, :started, :stopped])
+
+ {:error, _} = err ->
+ err
+ end
+ end)
+ ])}
+
+ {:error, _} = err ->
+ err
+ end
+ end
+
+ defp filter_strictly_plugins(map, _all, []) do
+ map
+ end
+
+ defp filter_strictly_plugins(map, all, [head | tail]) do
+ case map[head] do
+ nil ->
+ filter_strictly_plugins(map, all, tail)
+
+ other ->
+ value = :rabbit_plugins.strictly_plugins(other, all)
+ filter_strictly_plugins(Map.put(map, head, value), all, tail)
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex
new file mode 100644
index 0000000000..51c75ed99a
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/error_output.ex
@@ -0,0 +1,55 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+# Default output implementation for plugin commands
+defmodule RabbitMQ.CLI.Plugins.ErrorOutput do
+ alias RabbitMQ.CLI.Core.ExitCodes
+
+ defmacro __using__(_) do
+ quote do
+ def output({:error, {:enabled_plugins_mismatch, cli_path, node_path}}, opts) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at #{cli_path}: target node #{opts[:node]} uses a different path (#{
+ node_path
+ })"}
+ end
+
+ def output({:error, {:cannot_read_enabled_plugins_file, path, :eacces}}, _opts) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not read enabled plugins file at #{path}: the file does not exist or permission was denied (EACCES)"}
+ end
+
+ def output({:error, {:cannot_read_enabled_plugins_file, path, :enoent}}, _opts) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not read enabled plugins file at #{path}: the file does not exist (ENOENT)"}
+ end
+
+ def output({:error, {:cannot_write_enabled_plugins_file, path, :eacces}}, _opts) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at #{path}: the file does not exist or permission was denied (EACCES)"}
+ end
+
+ def output({:error, {:cannot_write_enabled_plugins_file, path, :enoent}}, _opts) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at #{path}: the file does not exist (ENOENT)"}
+ end
+
+ def output({:error, err}, _opts) do
+ {:error, ExitCodes.exit_software(), err}
+ end
+
+ def output({:stream, stream}, _opts) do
+ {:stream, stream}
+ end
+ end
+
+ # quote
+ end
+
+ # defmacro
+end
+
+# defmodule
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex
new file mode 100644
index 0000000000..bf8b4f772b
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/plugins/plugins_helpers.ex
@@ -0,0 +1,232 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Plugins.Helpers do
+ import RabbitMQ.CLI.Core.DataCoercion
+ import RabbitCommon.Records
+ import RabbitMQ.CLI.Core.Platform, only: [path_separator: 0]
+ import RabbitMQ.CLI.Core.{CodePath, Paths}
+ alias RabbitMQ.CLI.Core.{Config, Validators}
+
+ def mode(opts) do
+ %{online: online, offline: offline} = opts
+
+ case {online, offline} do
+ {true, false} -> :online
+ {false, true} -> :offline
+ {false, false} -> :best_effort
+ end
+ end
+
+ def can_set_plugins_with_mode(args, opts) do
+ case mode(opts) do
+ # can always set offline plugins list
+ :offline ->
+ :ok
+
+ # assume online mode, fall back to offline mode in case of errors
+ :best_effort ->
+ :ok
+
+ # a running node is required
+ :online ->
+ Validators.chain(
+ [&Validators.node_is_running/2, &Validators.rabbit_is_running/2],
+ [args, opts]
+ )
+ end
+ end
+
+ def list(opts) do
+ {:ok, dir} = plugins_dir(opts)
+ add_all_to_path(dir)
+ :lists.usort(:rabbit_plugins.list(to_charlist(dir)))
+ end
+
+ def list_names(opts) do
+ list(opts) |> plugin_names
+ end
+
+ def read_enabled(opts) do
+ case enabled_plugins_file(opts) do
+ {:ok, enabled} ->
+ :rabbit_plugins.read_enabled(to_charlist(enabled))
+
+ # Existence of enabled_plugins_file should be validated separately
+ {:error, :no_plugins_file} ->
+ []
+ end
+ end
+
+ def enabled_plugins_file(opts) do
+ case Config.get_option(:enabled_plugins_file, opts) do
+ nil -> {:error, :no_plugins_file}
+ file -> {:ok, file}
+ end
+ end
+
+ def enabled_plugins_file(_, opts) do
+ enabled_plugins_file(opts)
+ end
+
+ def set_enabled_plugins(plugins, opts) do
+ plugin_atoms = :lists.usort(for plugin <- plugins, do: to_atom(plugin))
+ require_rabbit_and_plugins(opts)
+ {:ok, plugins_file} = enabled_plugins_file(opts)
+ write_enabled_plugins(plugin_atoms, plugins_file, opts)
+ end
+
+ @spec update_enabled_plugins(
+ [atom()],
+ :online | :offline | :best_effort,
+ node(),
+ map()
+ ) :: map() | {:error, any()}
+ def update_enabled_plugins(enabled_plugins, mode, node_name, opts) do
+ {:ok, plugins_file} = enabled_plugins_file(opts)
+
+ case mode do
+ :online ->
+ case update_enabled_plugins(node_name, plugins_file) do
+ {:ok, started, stopped} ->
+ %{
+ mode: :online,
+ started: Enum.sort(started),
+ stopped: Enum.sort(stopped),
+ set: Enum.sort(enabled_plugins)
+ }
+
+ {:error, _} = err ->
+ err
+ end
+
+ :best_effort ->
+ case update_enabled_plugins(node_name, plugins_file) do
+ {:ok, started, stopped} ->
+ %{
+ mode: :online,
+ started: Enum.sort(started),
+ stopped: Enum.sort(stopped),
+ set: Enum.sort(enabled_plugins)
+ }
+
+ {:error, :offline} ->
+ %{mode: :offline, set: Enum.sort(enabled_plugins)}
+
+ {:error, {:enabled_plugins_mismatch, _, _}} = err ->
+ err
+ end
+
+ :offline ->
+ %{mode: :offline, set: Enum.sort(enabled_plugins)}
+ end
+ end
+
+ def validate_plugins(requested_plugins, opts) do
+ ## Maybe check all plugins
+ plugins =
+ case opts do
+ %{all: true} -> plugin_names(list(opts))
+ _ -> requested_plugins
+ end
+
+ all = list(opts)
+ deps = :rabbit_plugins.dependencies(false, plugins, all)
+
+ deps_plugins =
+ Enum.filter(all, fn plugin ->
+ name = plugin_name(plugin)
+ Enum.member?(deps, name)
+ end)
+
+ case :rabbit_plugins.validate_plugins(deps_plugins) do
+ {_, []} -> :ok
+ {_, invalid} -> {:error, :rabbit_plugins.format_invalid_plugins(invalid)}
+ end
+ end
+
+ def plugin_name(plugin) when is_binary(plugin) do
+ plugin
+ end
+
+ def plugin_name(plugin) when is_atom(plugin) do
+ Atom.to_string(plugin)
+ end
+
+ def plugin_name(plugin) do
+ plugin(name: name) = plugin
+ name
+ end
+
+ def plugin_names(plugins) do
+ for plugin <- plugins, do: plugin_name(plugin)
+ end
+
+ def comma_separated_names(plugins) do
+ Enum.join(plugin_names(plugins), ", ")
+ end
+
+ #
+ # Implementation
+ #
+
+ defp to_list(str) when is_binary(str) do
+ :erlang.binary_to_list(str)
+ end
+
+ defp to_list(lst) when is_list(lst) do
+ lst
+ end
+
+ defp to_list(atm) when is_atom(atm) do
+ to_list(Atom.to_string(atm))
+ end
+
+ defp write_enabled_plugins(plugins, plugins_file, opts) do
+ all = list(opts)
+ all_plugin_names = Enum.map(all, &plugin_name/1)
+ missing = MapSet.difference(MapSet.new(plugins), MapSet.new(all_plugin_names))
+
+ case Enum.empty?(missing) do
+ true ->
+ case :rabbit_file.write_term_file(to_charlist(plugins_file), [plugins]) do
+ :ok ->
+ all_enabled = :rabbit_plugins.dependencies(false, plugins, all)
+ {:ok, Enum.sort(all_enabled)}
+
+ {:error, reason} ->
+ {:error, {:cannot_write_enabled_plugins_file, plugins_file, reason}}
+ end
+
+ false ->
+ {:error, {:plugins_not_found, Enum.to_list(missing)}}
+ end
+ end
+
+ defp update_enabled_plugins(node_name, plugins_file) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_plugins, :ensure, [to_list(plugins_file)]) do
+ {:badrpc, :nodedown} -> {:error, :offline}
+ {:error, :rabbit_not_running} -> {:error, :offline}
+ {:ok, start, stop} -> {:ok, start, stop}
+ {:error, _} = err -> err
+ end
+ end
+
+ defp add_all_to_path(plugins_directories) do
+ directories = String.split(to_string(plugins_directories), path_separator())
+
+ Enum.map(directories, fn directory ->
+ with {:ok, subdirs} <- File.ls(directory) do
+ for subdir <- subdirs do
+ Path.join([directory, subdir, "ebin"])
+ |> Code.append_path()
+ end
+ end
+ end)
+
+ :ok
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex
new file mode 100644
index 0000000000..b2bedfdaad
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printer_behaviour.ex
@@ -0,0 +1,21 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.PrinterBehaviour do
+ @callback init(options :: map()) :: {:ok, printer_state :: any} | {:error, error :: any}
+ @callback finish(printer_state :: any) :: :ok
+
+ @callback print_output(output :: String.t() | [String.t()], printer_state :: any) :: :ok
+ @callback print_ok(printer_state :: any) :: :ok
+
+ def module_name(nil) do
+ nil
+ end
+ def module_name(printer) do
+ mod = printer |> String.downcase |> Macro.camelize
+ String.to_atom("RabbitMQ.CLI.Printers." <> mod)
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex
new file mode 100644
index 0000000000..ba0daaeebb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/file.ex
@@ -0,0 +1,36 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Printers.File do
+ @behaviour RabbitMQ.CLI.PrinterBehaviour
+
+ def init(options) do
+ file = options[:file]
+
+ case File.open(file) do
+ {:ok, io_device} -> {:ok, %{device: io_device}}
+ {:error, err} -> {:error, err}
+ end
+ end
+
+ def finish(%{device: io_device}) do
+ :ok = File.close(io_device)
+ end
+
+ def print_output(output, %{device: io_device}) when is_list(output) do
+ for line <- output do
+ IO.puts(io_device, line)
+ end
+ end
+
+ def print_output(output, %{device: io_device}) do
+ IO.puts(io_device, output)
+ end
+
+ def print_ok(_) do
+ :ok
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex
new file mode 100644
index 0000000000..206feff56d
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io.ex
@@ -0,0 +1,28 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Printers.StdIO do
+ @behaviour RabbitMQ.CLI.PrinterBehaviour
+
+ def init(_), do: {:ok, :ok}
+ def finish(_), do: :ok
+
+ def print_output(nil, _), do: :ok
+
+ def print_output(output, _) when is_list(output) do
+ for line <- output do
+ IO.puts(line)
+ end
+ end
+
+ def print_output(output, _) do
+ IO.puts(output)
+ end
+
+ def print_ok(_) do
+ :ok
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex
new file mode 100644
index 0000000000..16846907b4
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/printers/std_io_raw.ex
@@ -0,0 +1,28 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Printers.StdIORaw do
+ @behaviour RabbitMQ.CLI.PrinterBehaviour
+
+ def init(_), do: {:ok, :ok}
+ def finish(_), do: :ok
+
+ def print_output(nil, _), do: :ok
+
+ def print_output(output, _) when is_list(output) do
+ for line <- output do
+ IO.write(line)
+ end
+ end
+
+ def print_output(output, _) do
+ IO.write(output)
+ end
+
+ def print_ok(_) do
+ :ok
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex
new file mode 100644
index 0000000000..e789d00343
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/add_member_command.ex
@@ -0,0 +1,70 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 5_000
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ other -> other
+ end
+ {args, Map.merge(%{vhost: "/", timeout: timeout}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, node] = _args, %{vhost: vhost, node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :add_member, [
+ vhost,
+ name,
+ to_atom(node),
+ timeout
+ ]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "Cannot add members to a classic queue"}
+
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "add_member [--vhost <vhost>] <queue> <node>"
+
+ def usage_additional do
+ [
+ ["<queue>", "quorum queue name"],
+ ["<node>", "node to add a new replica on"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def help_section, do: :replication
+
+ def description, do: "Adds a quorum queue member (replica) on the given node."
+
+ def banner([name, node], _) do
+ [
+ "Adding a replica for queue #{name} on node #{node}..."
+ ]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex
new file mode 100644
index 0000000000..c31b83d29c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_mirror_sync_critical_command.ex
@@ -0,0 +1,105 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand do
+ @moduledoc """
+ Exits with a non-zero code if there are classic mirrored queues that don't
+ have any in sync mirrors online and would potentially lose data
+ if the target node is shut down.
+
+ This command is meant to be used as a pre-upgrade (pre-shutdown) check.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ def scopes(), do: [:diagnostics, :queues]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name,
+ :rabbit_nodes, :is_single_node_cluster, [], timeout) do
+ # if target node is the only one in the cluster, the check makes little sense
+ # and false positives can be misleading
+ true -> {:ok, :single_node_cluster}
+ false ->
+ case :rabbit_misc.rpc_call(node_name,
+ :rabbit_amqqueue, :list_local_mirrored_classic_without_synchronised_mirrors_for_cli, [], timeout) do
+ [] -> {:ok, []}
+ qs when is_list(qs) -> {:ok, qs}
+ other -> other
+ end
+ other -> other
+ end
+ end
+
+ def output({:ok, :single_node_cluster}, %{formatter: "json"}) do
+ {:ok, %{
+ "result" => "ok",
+ "message" => "Target node seems to be the only one in a single node cluster, the check does not apply"
+ }}
+ end
+ def output({:ok, []}, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok"}}
+ end
+ def output({:ok, :single_node_cluster}, %{silent: true}) do
+ {:ok, :check_passed}
+ end
+ def output({:ok, []}, %{silent: true}) do
+ {:ok, :check_passed}
+ end
+ def output({:ok, :single_node_cluster}, %{node: node_name}) do
+ {:ok, "Node #{node_name} seems to be the only one in a single node cluster, the check does not apply"}
+ end
+ def output({:ok, []}, %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no classic mirrored queues without online synchronised mirrors"}
+ end
+ def output({:ok, qs}, %{node: node_name, formatter: "json"}) when is_list(qs) do
+ {:error, :check_failed,
+ %{
+ "result" => "error",
+ "queues" => qs,
+ "message" => "Node #{node_name} reported local classic mirrored queues without online synchronised mirrors"
+ }}
+ end
+ def output({:ok, qs}, %{silent: true}) when is_list(qs) do
+ {:error, :check_failed}
+ end
+ def output({:ok, qs}, %{node: node_name}) when is_list(qs) do
+ lines = queue_lines(qs, node_name)
+
+ {:error, :check_failed, Enum.join(lines, line_separator())}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description() do
+ "Health check that exits with a non-zero code if there are classic mirrored queues " <>
+ "without online synchronised mirrors (queues that would potentially lose data if the target node is shut down)"
+ end
+
+ def usage, do: "check_if_node_is_mirror_sync_critical"
+
+ def banner([], %{node: node_name}) do
+ "Checking if node #{node_name} is critical for data safety of any classic mirrored queues ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ def queue_lines(qs, node_name) do
+ for q <- qs do
+ "#{q["readable_name"]} would lose its only synchronised replica (master) if node #{node_name} is stopped"
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex
new file mode 100644
index 0000000000..d8f4a34c1c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/check_if_node_is_quorum_critical_command.ex
@@ -0,0 +1,118 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsQuorumCriticalCommand do
+ @moduledoc """
+ Exits with a non-zero code if there are quorum queues that would lose their quorum
+ if the target node is shut down.
+
+ This command is meant to be used as a pre-upgrade (pre-shutdown) check.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ import RabbitMQ.CLI.Core.Platform, only: [line_separator: 0]
+
+ def scopes(), do: [:diagnostics, :queues]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_nodes, :is_single_node_cluster, [], timeout) do
+ # if target node is the only one in the cluster, the check makes little sense
+ # and false positives can be misleading
+ true -> {:ok, :single_node_cluster}
+ false ->
+ case :rabbit_misc.rpc_call(node_name, :rabbit_maintenance, :is_being_drained_local_read, [node_name]) do
+ # if target node is under maintenance, it has already transferred all of its quorum queue
+ # replicas. Don't consider it to be quorum critical. See rabbitmq/rabbitmq-server#2469
+ true -> {:ok, :under_maintenance}
+ false ->
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :list_with_minimum_quorum_for_cli, [], timeout) do
+ [] -> {:ok, []}
+ qs when is_list(qs) -> {:ok, qs}
+ other -> other
+ end
+ end
+ other -> other
+ end
+ end
+
+ def output({:ok, :single_node_cluster}, %{formatter: "json"}) do
+ {:ok, %{
+ "result" => "ok",
+ "message" => "Target node seems to be the only one in a single node cluster, the check does not apply"
+ }}
+ end
+ def output({:ok, :under_maintenance}, %{formatter: "json"}) do
+ {:ok, %{
+ "result" => "ok",
+ "message" => "Target node seems to be in maintenance mode, the check does not apply"
+ }}
+ end
+ def output({:ok, []}, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok"}}
+ end
+ def output({:ok, :single_node_cluster}, %{silent: true}) do
+ {:ok, :check_passed}
+ end
+ def output({:ok, :under_maintenance}, %{silent: true}) do
+ {:ok, :check_passed}
+ end
+ def output({:ok, []}, %{silent: true}) do
+ {:ok, :check_passed}
+ end
+ def output({:ok, :single_node_cluster}, %{node: node_name}) do
+ {:ok, "Node #{node_name} seems to be the only one in a single node cluster, the check does not apply"}
+ end
+ def output({:ok, :under_maintenance}, %{node: node_name}) do
+ {:ok, "Node #{node_name} seems to be in maintenance mode, the check does not apply"}
+ end
+ def output({:ok, []}, %{node: node_name}) do
+ {:ok, "Node #{node_name} reported no quorum queues with minimum quorum"}
+ end
+ def output({:ok, qs}, %{node: node_name, formatter: "json"}) when is_list(qs) do
+ {:error, :check_failed,
+ %{
+ "result" => "error",
+ "queues" => qs,
+ "message" => "Node #{node_name} reported local queues with minimum online quorum"
+ }}
+ end
+ def output({:ok, qs}, %{silent: true}) when is_list(qs) do
+ {:error, :check_failed}
+ end
+ def output({:ok, qs}, %{node: node_name}) when is_list(qs) do
+ lines = queue_lines(qs, node_name)
+
+ {:error, :check_failed, Enum.join(lines, line_separator())}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description() do
+ "Health check that exits with a non-zero code if there are queues " <>
+ "with minimum online quorum (queues that would lose their quorum if the target node is shut down)"
+ end
+
+ def usage, do: "check_if_node_is_quorum_critical"
+
+ def banner([], %{node: node_name}) do
+ "Checking if node #{node_name} is critical for quorum of any quorum queues ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ def queue_lines(qs, node_name) do
+ for q <- qs, do: "#{q["readable_name"]} would lose quorum if node #{node_name} is stopped"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex
new file mode 100644
index 0000000000..1579bf6809
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/delete_member_command.ex
@@ -0,0 +1,58 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.DeleteMemberCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, node] = _args, %{vhost: vhost, node: node_name}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :delete_member, [
+ vhost,
+ name,
+ to_atom(node)
+ ]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "Cannot add members to a classic queue"}
+
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "delete_member [--vhost <vhost>] <queue> <node>"
+
+ def usage_additional do
+ [
+ ["<queue>", "quorum queue name"],
+ ["<node>", "node to remove a new replica on"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def help_section, do: :replication
+
+ def description, do: "Removes a quorum queue member (replica) on the given node."
+
+ def banner([name, node], _) do
+ "Removing a replica of queue #{name} on node #{node}..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex
new file mode 100644
index 0000000000..4e0ce903fe
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/grow_command.ex
@@ -0,0 +1,126 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.GrowCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ defp default_opts, do: %{vhost_pattern: ".*",
+ queue_pattern: ".*",
+ errors_only: false}
+
+ def switches(),
+ do: [
+ vhost_pattern: :string,
+ queue_pattern: :string,
+ errors_only: :boolean
+ ]
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(default_opts(), opts)}
+ end
+
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, :not_enough_args}
+ end
+
+ def validate(args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate([_, s], _) do
+ case s do
+ "all" -> :ok
+ "even" -> :ok
+ _ ->
+ {:validation_failure, "strategy '#{s}' is not recognised."}
+ end
+ end
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([node, strategy], %{node: node_name,
+ vhost_pattern: vhost_pat,
+ queue_pattern: queue_pat,
+ errors_only: errors_only}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :grow, [
+ to_atom(node),
+ vhost_pat,
+ queue_pat,
+ to_atom(strategy)]) do
+ {:error, _} = error -> error;
+ {:badrpc, _} = error -> error;
+ results when errors_only ->
+ for {{:resource, vhost, _kind, name}, {:errors, _, _} = res} <- results,
+ do: [{:vhost, vhost},
+ {:name, name},
+ {:size, format_size res},
+ {:result, format_result res}]
+ results ->
+ for {{:resource, vhost, _kind, name}, res} <- results,
+ do: [{:vhost, vhost},
+ {:name, name},
+ {:size, format_size res},
+ {:result, format_result res}]
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def usage, do: "grow <node> <all | even> [--vhost-pattern <pattern>] [--queue-pattern <pattern>]"
+
+ def usage_additional do
+ [
+ ["<node>", "node name to place replicas on"],
+ ["<all | even>", "how many matching quorum queues should have a replica added on this node: all or half (evenly numbered)?"],
+ ["--queue-pattern <pattern>", "regular expression to match queue names"],
+ ["--vhost-pattern <pattern>", "regular expression to match virtual host names"],
+ ["--errors-only", "only list queues which reported an error"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def help_section, do: :cluster_management
+
+ def description, do: "Grows quorum queue clusters by adding a member (replica) to all or half of matching quorum queues on the given node."
+
+ def banner([node, strategy], _) do
+ "Growing #{strategy} quorum queues on #{node}..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp format_size({:ok, size}) do
+ size
+ end
+ defp format_size({:error, _size, :timeout}) do
+ # the actual size is uncertain here
+ "?"
+ end
+ defp format_size({:error, size, _}) do
+ size
+ end
+
+ defp format_result({:ok, _size}) do
+ "ok"
+ end
+ defp format_result({:error, _size, :timeout}) do
+ "error: the operation timed out and may not have been completed"
+ end
+ defp format_result({:error, _size, err}) do
+ :io.format "error: ~W", [err, 10]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex
new file mode 100644
index 0000000000..a159c119e9
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/peek_command.ex
@@ -0,0 +1,112 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.PeekCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ def scopes(), do: [:queues]
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+
+ use RabbitMQ.CLI.Core.MergesDefaultVirtualHost
+
+ def validate(args, _) when length(args) < 2 do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 2 do
+ {:validation_failure, :too_many_args}
+ end
+ def validate([_, raw_pos], _) do
+ pos = case Integer.parse(raw_pos) do
+ {n, _} -> n
+ :error -> :error
+ _ -> :error
+ end
+
+ invalid_pos = {:validation_failure, "position value must be a positive integer"}
+ case pos do
+ :error -> invalid_pos
+ num when num < 1 -> invalid_pos
+ num when num >= 1 -> :ok
+ end
+ end
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, pos] = _args, %{node: node_name, vhost: vhost}) do
+ {pos, _} = Integer.parse(pos)
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :peek, [vhost, name, pos]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "Cannot peek into a classic queue"}
+ {:ok, msg} ->
+ {:ok, msg}
+ err ->
+ err
+ end
+ end
+
+ def output({:error, :not_found}, %{vhost: vhost, formatter: "json"}) do
+ {:error,
+ %{
+ "result" => "error",
+ "message" => "Target queue was not found in virtual host '#{vhost}'"
+ }}
+ end
+ def output({:error, :no_message_at_pos}, %{formatter: "json"}) do
+ {:error,
+ %{
+ "result" => "error",
+ "message" => "Target queue does not have a message at that position"
+ }}
+ end
+ def output({:error, error}, %{formatter: "json"}) do
+ {:error,
+ %{
+ "result" => "error",
+ "message" => "Failed to perform the operation: #{error}"
+ }}
+ end
+ def output({:error, :not_found}, %{vhost: vhost}) do
+ {:error, "Target queue was not found in virtual host '#{vhost}'"}
+ end
+ def output({:error, :no_message_at_pos}, _) do
+ {:error, "Target queue does not have a message at that position"}
+ end
+ def output({:ok, msg}, %{formatter: "json"}) do
+ {:ok, %{"result" => "ok", "message" => Enum.into(msg, %{})}}
+ end
+ def output({:ok, msg}, _) do
+ res = Enum.map(msg, fn {k,v} -> [{"keys", k}, {"values", v}] end)
+ {:stream, res}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.PrettyTable
+
+ def usage() do
+ "peek [--vhost <vhost>] <queue> <position>"
+ end
+
+ def usage_additional do
+ [
+ ["<queue>", "Name of the queue",
+ "<position>", "Position in the queue, starts at 1"]
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def description(), do: "Peeks at the given position of a quorum queue"
+
+ def banner([name, pos], %{node: node_name}),
+ do: "Peeking at quorum queue #{name} at position #{pos} on node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex
new file mode 100644
index 0000000000..01a61b2536
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/quorum_status_command.ex
@@ -0,0 +1,54 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.QuorumStatusCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ def scopes(), do: [:diagnostics, :queues]
+
+ def merge_defaults(args, opts), do: {args, Map.merge(%{vhost: "/"}, opts)}
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name] = _args, %{node: node_name, vhost: vhost}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :status, [vhost, name]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "Cannot get quorum status of a classic queue"}
+
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.PrettyTable
+
+ def usage() do
+ "quorum_status [--vhost <vhost>] <queue>"
+ end
+
+ def usage_additional do
+ [
+ ["<queue>", "Name of the queue"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def help_section(), do: :observability_and_health_checks
+
+ def description(), do: "Displays quorum status of a quorum queue"
+
+ def banner([name], %{node: node_name}),
+ do: "Status of quorum queue #{name} on node #{node_name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex
new file mode 100644
index 0000000000..1416a7c570
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/rebalance_command.ex
@@ -0,0 +1,84 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.RebalanceCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+
+ @known_types [
+ "all",
+ "classic",
+ "quorum"
+ ]
+
+ defp default_opts, do: %{vhost_pattern: ".*",
+ queue_pattern: ".*"}
+
+ def switches(),
+ do: [
+ vhost_pattern: :string,
+ queue_pattern: :string
+ ]
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(default_opts(), opts)}
+ end
+
+ def validate([], _) do
+ {:validation_failure, :not_enough_args}
+ end
+ def validate(args, _) when length(args) > 1 do
+ {:validation_failure, :too_many_args}
+ end
+
+ def validate([type], _) do
+ case Enum.member?(@known_types, type) do
+ true ->
+ :ok
+
+ false ->
+ {:error, "type #{type} is not supported. Try one of all, classic, quorum."}
+ end
+ end
+
+ def run([type], %{node: node_name,
+ vhost_pattern: vhost_pat,
+ queue_pattern: queue_pat}) do
+ arg = String.to_atom(type)
+ :rabbit_misc.rpc_call(node_name, :rabbit_amqqueue, :rebalance, [arg, vhost_pat, queue_pat])
+ end
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.PrettyTable
+
+ def usage, do: "rebalance < all | classic | quorum > [--vhost-pattern <pattern>] [--queue-pattern <pattern>]"
+
+ def usage_additional do
+ [
+ ["<type>", "queue type, must be one of: all, classic, quorum"],
+ ["--queue-pattern <pattern>", "regular expression to match queue names"],
+ ["--vhost-pattern <pattern>", "regular expression to match virtual host names"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def help_section, do: :cluster_management
+
+ def description, do: "Rebalances queues."
+
+ def banner([type], _) do
+ "Rebalancing #{type} queues..."
+ end
+
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex
new file mode 100644
index 0000000000..3452cc8741
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/reclaim_quorum_memory_command.ex
@@ -0,0 +1,69 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.ReclaimQuorumMemoryCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ def scopes(), do: [:diagnostics, :queues]
+
+ use RabbitMQ.CLI.Core.MergesDefaultVirtualHost
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name] = _args, %{node: node_name, vhost: vhost}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :reclaim_memory, [vhost, name]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "This operation is not applicable to classic queues"}
+
+ other ->
+ other
+ end
+ end
+
+ def output({:error, :not_found}, %{vhost: vhost, formatter: "json"}) do
+ {:error,
+ %{
+ "result" => "error",
+ "message" => "Target queue was not found in virtual host '#{vhost}'"
+ }}
+ end
+ def output({:error, error}, %{formatter: "json"}) do
+ {:error,
+ %{
+ "result" => "error",
+ "message" => "Failed to perform the operation: #{error}"
+ }}
+ end
+ def output({:error, :not_found}, %{vhost: vhost}) do
+ {:error, "Target queue was not found in virtual host '#{vhost}'"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage() do
+ "reclaim_quorum_memory [--vhost <vhost>] <quorum queue>"
+ end
+
+ def usage_additional do
+ [
+ ["<queue>", "Name of the quorum queue"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues(),
+ DocGuide.memory_use()
+ ]
+ end
+
+ def help_section(), do: :operations
+
+ def description(), do: "Flushes quorum queue processes WAL, performs a full sweep GC on all of its local Erlang processes"
+
+ def banner([name], %{}),
+ do: "Will flush Raft WAL of quorum queue #{name} ..."
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex
new file mode 100644
index 0000000000..1bff2b9a1c
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/queues/commands/shrink_command.ex
@@ -0,0 +1,97 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.ShrinkCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def switches() do
+ [errors_only: :boolean]
+ end
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{errors_only: false}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsOnePositionalArgument
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([node], %{node: node_name, errors_only: errs}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_quorum_queue, :shrink_all, [to_atom(node)]) do
+ {:error, _} = error -> error;
+ {:badrpc, _} = error -> error;
+ results when errs ->
+ for {{:resource, vhost, _kind, name}, {:error, _, _} = res} <- results,
+ do: [{:vhost, vhost},
+ {:name, name},
+ {:size, format_size(res)},
+ {:result, format_result(res)}]
+ results ->
+ for {{:resource, vhost, _kind, name}, res} <- results,
+ do: [{:vhost, vhost},
+ {:name, name},
+ {:size, format_size(res)},
+ {:result, format_result(res)}]
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def formatter(), do: RabbitMQ.CLI.Formatters.Table
+
+ def banner([node], _) do
+ "Shrinking quorum queues on #{node}..."
+ end
+
+ def usage, do: "shrink <node> [--errors-only]"
+
+ def usage_additional() do
+ [
+ ["<node>", "node name to remove replicas from"],
+ ["--errors-only", "only list queues which reported an error"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues()
+ ]
+ end
+
+ def help_section, do: :cluster_management
+
+ def description, do: "Shrinks quorum queue clusters by removing any members (replicas) on the given node."
+
+ #
+ # Implementation
+ #
+
+ defp format_size({:ok, size}) do
+ size
+ end
+ defp format_size({:error, _size, :timeout}) do
+ # the actual size is uncertain here
+ "?"
+ end
+ defp format_size({:error, size, _}) do
+ size
+ end
+
+ defp format_result({:ok, _size}) do
+ "ok"
+ end
+ defp format_result({:error, _size, :last_node}) do
+ "error: the last node cannot be removed"
+ end
+ defp format_result({:error, _size, :timeout}) do
+ "error: the operation timed out and may not have been completed"
+ end
+ defp format_result({:error, _size, err}) do
+ :io.format "error: ~W", [err, 10]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex
new file mode 100644
index 0000000000..8dc6da0281
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex
@@ -0,0 +1,64 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsDefaultSwitchesAndTimeout
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, node] = _args, %{vhost: vhost, node: node_name}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_stream_queue, :add_replica, [
+ vhost,
+ name,
+ to_atom(node)
+ ]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "Cannot add replicas to a classic queue"}
+
+ {:error, :quorum_queue_not_supported} ->
+ {:error, "Cannot add replicas to a quorum queue"}
+
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "add_replica [--vhost <vhost>] <queue> <node>"
+
+ def usage_additional do
+ [
+ ["<queue>", "stream queue name"],
+ ["<node>", "node to add a new replica on"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.stream_queues()
+ ]
+ end
+
+ def help_section, do: :replication
+
+ def description, do: "Adds a stream queue replica on the given node."
+
+ def banner([name, node], _) do
+ [
+ "Adding a replica for queue #{name} on node #{node}..."
+ ]
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex
new file mode 100644
index 0000000000..ca15282949
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/delete_replica_command.ex
@@ -0,0 +1,61 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Streams.Commands.DeleteReplicaCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+ import RabbitMQ.CLI.Core.DataCoercion
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts) do
+ {args, Map.merge(%{vhost: "/"}, opts)}
+ end
+
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, node] = _args, %{vhost: vhost, node: node_name}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_stream_queue, :delete_replica, [
+ vhost,
+ name,
+ to_atom(node)
+ ]) do
+ {:error, :classic_queue_not_supported} ->
+ {:error, "Cannot delete replicas from a classic queue"}
+
+ {:error, :quorum_queue_not_supported} ->
+ {:error, "Cannot delete replicas from a quorum queue"}
+
+ other ->
+ other
+ end
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "delete_replica [--vhost <vhost>] <queue> <node>"
+
+ def usage_additional do
+ [
+ ["<queue>", "stream queue name"],
+ ["<node>", "node to remove a replica from"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.stream_queues()
+ ]
+ end
+
+ def help_section, do: :replication
+
+ def description, do: "Removes a stream queue replica on the given node."
+
+ def banner([name, node], _) do
+ "Removing a replica of queue #{name} on node #{node}..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex
new file mode 100644
index 0000000000..ee89a1ec57
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/set_stream_retention_policy_command.ex
@@ -0,0 +1,58 @@
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 Pivotal Software, Inc. All rights reserved.
+
+defmodule RabbitMQ.CLI.Streams.Commands.SetStreamRetentionPolicyCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ def merge_defaults(args, opts), do: {args, Map.merge(%{vhost: "/"}, opts)}
+
+ use RabbitMQ.CLI.Core.AcceptsTwoPositionalArguments
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+
+ def run([name, retention_policy], %{node: node_name, vhost: vhost}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_stream_queue, :set_retention_policy, [
+ name,
+ vhost,
+ retention_policy
+ ])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def banner([name, retention_policy], _) do
+ "Setting retention policy of stream queue #{name} to #{retention_policy} ..."
+ end
+
+ def usage, do: "set_stream_retention_policy [--vhost <vhost>] <name> <policy>"
+
+ def usage_additional() do
+ [
+ ["<name>", "stream queue name"],
+ ["<policy>", "retention policy"]
+ ]
+ end
+
+ def usage_doc_guides() do
+ [
+ DocGuide.stream_queues()
+ ]
+ end
+
+ def help_section(), do: :policies
+
+ def description(), do: "Sets the retention policy of a stream queue"
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex
new file mode 100644
index 0000000000..fa08c4befe
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/time_unit.ex
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.TimeUnit do
+ require MapSet
+
+ @days_seconds 86400
+ @weeks_seconds @days_seconds * 7
+ @months_seconds @days_seconds * (365 / 12)
+ @years_seconds @days_seconds * 365
+
+ def known_units() do
+ MapSet.new([
+ "days",
+ "weeks",
+ "months",
+ "years"
+ ])
+ end
+
+ def convert(time, unit) do
+ do_convert(time, String.downcase(unit))
+ end
+
+ def known_unit?(val) do
+ MapSet.member?(known_units(), String.downcase(val))
+ end
+
+ defp do_convert(time, "days") do
+ time * @days_seconds
+ end
+
+ defp do_convert(time, "weeks") do
+ time * @weeks_seconds
+ end
+
+ defp do_convert(time, "months") do
+ time * @months_seconds
+ end
+
+ defp do_convert(time, "years") do
+ time * @years_seconds
+ end
+
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex
new file mode 100644
index 0000000000..ca00ddbbb7
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_quorum_plus_one_command.ex
@@ -0,0 +1,65 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineQuorumPlusOneCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 120_000
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ val -> val
+ end
+
+ {args, Map.put(opts, :timeout, timeout)}
+ end
+
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ rpc_timeout = timeout + 500
+ case :rabbit_misc.rpc_call(node_name, :rabbit_upgrade_preparation, :await_online_quorum_plus_one, [timeout], rpc_timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ {:badrpc, _} = err -> err
+
+ true -> :ok
+ false -> {:error, "time is up, no quorum + 1 online replicas came online for at least some quorum queues"}
+ end
+ end
+
+ def output({:error, msg}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => msg}}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "await_online_quorum_plus_one"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.quorum_queues(),
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section, do: :upgrade
+
+ def description() do
+ "Waits for all quorum queues to have an above minimum online quorum. " <>
+ "This makes sure that no queues would lose their quorum if the target node is shut down"
+ end
+
+ def banner([], %{timeout: timeout}) do
+ "Will wait for a quorum + 1 of nodes to be online for all quorum queues for #{round(timeout/1000)} seconds..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex
new file mode 100644
index 0000000000..d6fb40bad2
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/await_online_synchronized_mirror_command.ex
@@ -0,0 +1,65 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineSynchronizedMirrorCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ @default_timeout 120_000
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def merge_defaults(args, opts) do
+ timeout =
+ case opts[:timeout] do
+ nil -> @default_timeout
+ :infinity -> @default_timeout
+ val -> val
+ end
+
+ {args, Map.put(opts, :timeout, timeout)}
+ end
+
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ rpc_timeout = timeout + 500
+ case :rabbit_misc.rpc_call(node_name, :rabbit_upgrade_preparation, :await_online_synchronised_mirrors, [timeout], rpc_timeout) do
+ {:error, _} = err -> err
+ {:error, _, _} = err -> err
+ {:badrpc, _} = err -> err
+
+ true -> :ok
+ false -> {:error, "time is up, no synchronised mirror came online for at least some classic mirrored queues"}
+ end
+ end
+
+ def output({:error, msg}, %{node: node_name, formatter: "json"}) do
+ {:error, %{"result" => "error", "node" => node_name, "message" => msg}}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "await_online_synchronized_mirror"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.mirroring(),
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section, do: :upgrade
+
+ def description() do
+ "Waits for all classic mirrored queues hosted on the target node to have at least one synchronized mirror online. " <>
+ "This makes sure that if target node is shut down, there will be an up-to-date mirror to promote."
+ end
+
+ def banner([], %{timeout: timeout}) do
+ "Will wait for a synchronised mirror be online for all classic mirrored queues for #{round(timeout/1000)} seconds..."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex
new file mode 100644
index 0000000000..c6d2fc86eb
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/drain_command.ex
@@ -0,0 +1,54 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Upgrade.Commands.DrainCommand do
+ @moduledoc """
+ Puts the node in maintenance mode. Such node would not accept any
+ new client connections, closes the connections it previously had,
+ transfers leadership of locally hosted queues, and will not be considered
+ for primary queue replica placement.
+
+ This command is meant to be used when automating upgrades.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_maintenance, :drain, [], timeout) do
+ # Server does not support maintenance mode
+ {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported}
+ {:badrpc, _} = err -> err
+ other -> other
+ end
+ end
+
+ def output({:error, :unsupported}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Maintenance mode is not supported by node #{node_name}"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "drain"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section(), do: :upgrade
+
+ def description(), do: "Puts the node in maintenance mode. Such nodes will not serve any client traffic or host any primary queue replicas"
+
+ def banner(_, %{node: node_name}) do
+ "Will put node #{node_name} into maintenance mode. "
+ <> "The node will no longer serve any client traffic!"
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex
new file mode 100644
index 0000000000..76453ce9f3
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/post_upgrade_command.ex
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Upgrade.Commands.PostUpgradeCommand do
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ use RabbitMQ.CLI.Core.RequiresRabbitAppRunning
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name}) do
+ :rabbit_misc.rpc_call(node_name, :rabbit_amqqueue, :rebalance, [:all, ".*", ".*"])
+ end
+
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "post_upgrade"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section, do: :upgrade
+
+ def description, do: "Runs post-upgrade tasks"
+
+ def banner([], _) do
+ "Executing post upgrade tasks...\n" <>
+ "Rebalancing queue masters..."
+ end
+
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex
new file mode 100644
index 0000000000..a594561a55
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/upgrade/commands/revive_command.ex
@@ -0,0 +1,56 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Upgrade.Commands.ReviveCommand do
+ @moduledoc """
+ Puts the node out of maintenance and into regular operating mode.
+ Such nodes will again serve client traffic and be considered for
+ primary queue replica placement.
+
+ A node will automatically go into regular operational mode
+ after a restart.
+
+ This command is meant to be used when automating upgrades.
+ """
+
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+
+ alias RabbitMQ.CLI.Core.DocGuide
+
+ use RabbitMQ.CLI.Core.MergesNoDefaults
+ use RabbitMQ.CLI.Core.AcceptsNoPositionalArguments
+
+ def run([], %{node: node_name, timeout: timeout}) do
+ case :rabbit_misc.rpc_call(node_name, :rabbit_maintenance, :revive, [], timeout) do
+ # Server does not support maintenance mode
+ {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported}
+ {:badrpc, _} = err -> err
+ other -> other
+ end
+ end
+
+ def output({:error, :unsupported}, %{node: node_name}) do
+ {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage, "Maintenance mode is not supported by node #{node_name}"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+
+ def usage, do: "revive"
+
+ def usage_doc_guides() do
+ [
+ DocGuide.upgrade()
+ ]
+ end
+
+ def help_section(), do: :upgrade
+
+ def description(), do: "Puts the node out of maintenance and into regular operating mode. Such nodes will again serve client traffic and host primary queue replicas"
+
+ def banner(_, %{node: node_name}) do
+ "Will put node #{node_name} back into regular operating mode. "
+ <> "The node will again serve client traffic and host primary queue replicas."
+ end
+end
diff --git a/deps/rabbitmq_cli/lib/rabbitmqctl.ex b/deps/rabbitmq_cli/lib/rabbitmqctl.ex
new file mode 100644
index 0000000000..42a0f20434
--- /dev/null
+++ b/deps/rabbitmq_cli/lib/rabbitmqctl.ex
@@ -0,0 +1,620 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQCtl do
+ alias RabbitMQ.CLI.Core.{
+ CommandModules,
+ Config,
+ Distribution,
+ ExitCodes,
+ Helpers,
+ Output,
+ Parser
+ }
+ alias RabbitMQ.CLI.{CommandBehaviour, FormatterBehaviour}
+ alias RabbitMQ.CLI.Ctl.Commands.HelpCommand
+
+ # Enable unit tests for private functions
+ @compile if Mix.env() == :test, do: :export_all
+
+ @type options() :: map()
+ @type command_result() :: {:error, ExitCodes.exit_code(), term()} | term()
+
+ def main(["--auto-complete" | []]) do
+ handle_shutdown(:ok)
+ end
+
+ def main(unparsed_command) do
+ exec_command(unparsed_command, &process_output/3)
+ |> handle_shutdown
+ end
+
+ def exec_command([] = unparsed_command, _) do
+ {_args, parsed_options, _} = Parser.parse_global(unparsed_command)
+
+ # this invocation is considered to be invalid. curl and grep do the
+ # same thing.
+ {:error, ExitCodes.exit_usage(), Enum.join(HelpCommand.all_usage(parsed_options), "")};
+ end
+ def exec_command(["--help"] = unparsed_command, _) do
+ {_args, parsed_options, _} = Parser.parse_global(unparsed_command)
+
+ # the user asked for --help and we are displaying it to her,
+ # reporting a success
+ {:ok, ExitCodes.exit_ok(), Enum.join(HelpCommand.all_usage(parsed_options), "")};
+ end
+ def exec_command(["--version"] = _unparsed_command, opts) do
+ # rewrite `--version` as `version`
+ exec_command(["version"], opts)
+ end
+ def exec_command(["--auto-complete" | args], opts) do
+ # rewrite `--auto-complete` as `autocomplete`
+ exec_command(["autocomplete" | args], opts)
+ end
+ def exec_command(unparsed_command, output_fun) do
+ {command, command_name, arguments, parsed_options, invalid} = Parser.parse(unparsed_command)
+
+ case {command, invalid} do
+ {:no_command, _} ->
+ command_not_found_string =
+ case command_name do
+ "" -> ""
+ _ -> "\nCommand '#{command_name}' not found. \n"
+ end
+
+ usage_string =
+ command_not_found_string <>
+ Enum.join(HelpCommand.all_usage(parsed_options), "")
+
+ {:error, ExitCodes.exit_usage(), usage_string}
+
+ {{:suggest, suggested}, _} ->
+ suggest_message =
+ "\nCommand '#{command_name}' not found. \n" <>
+ "Did you mean '#{suggested}'? \n"
+
+ {:error, ExitCodes.exit_usage(), suggest_message}
+
+ {_, [_ | _]} ->
+ argument_validation_error_output(
+ {:bad_option, invalid},
+ command,
+ unparsed_command,
+ parsed_options
+ )
+
+ _ ->
+ options = parsed_options |> merge_all_defaults |> normalise_options
+
+ try do
+ do_exec_parsed_command(unparsed_command, output_fun, arguments, command, options)
+ catch error_type, error ->
+ maybe_print_stacktrace(error_type, error, __STACKTRACE__, options)
+ format_error(error, options, command)
+ end
+ end
+ end
+
+ def do_exec_parsed_command(unparsed_command, output_fun, arguments, command, options) do
+ case options[:help] do
+ true ->
+ {:ok, ExitCodes.exit_ok(), HelpCommand.command_usage(command, options)};
+ _ ->
+ {arguments, options} = command.merge_defaults(arguments, options)
+
+ maybe_with_distribution(command, options, fn ->
+ # rabbitmq/rabbitmq-cli#278
+ case Helpers.normalise_node_option(options) do
+ {:error, _} = err ->
+ format_error(err, options, command)
+ {:ok, options} ->
+ # The code below implements a tiny decision tree that has
+ # to do with CLI argument and environment state validation.
+ case command.validate(arguments, options) do
+ :ok ->
+ # then optionally validate execution environment
+ case CommandBehaviour.validate_execution_environment(command, arguments, options) do
+ :ok ->
+ result = proceed_to_execution(command, arguments, options)
+ handle_command_output(result, command, options, output_fun)
+
+ {:validation_failure, err} ->
+ environment_validation_error_output(err, command, unparsed_command, options)
+
+ {:error, _} = err ->
+ format_error(err, options, command)
+ end
+
+ {:validation_failure, err} ->
+ argument_validation_error_output(err, command, unparsed_command, options)
+
+ {:error, _} = err ->
+ format_error(err, options, command)
+ end
+ end
+ end)
+ end
+ end
+
+ defp proceed_to_execution(command, arguments, options) do
+ maybe_print_banner(command, arguments, options)
+ maybe_run_command(command, arguments, options)
+ end
+
+ defp maybe_run_command(_, _, %{dry_run: true}) do
+ :ok
+ end
+
+ defp maybe_run_command(command, arguments, options) do
+ try do
+ command.run(arguments, options) |> command.output(options)
+ catch error_type, error ->
+ maybe_print_stacktrace(error_type, error, __STACKTRACE__, options)
+ format_error(error, options, command)
+ end
+ end
+
+ def maybe_print_stacktrace(error_type, :undef = error, stacktrace, _opts) do
+ do_print_stacktrace(error_type, error, stacktrace)
+ end
+ def maybe_print_stacktrace(error_type, :function_clause = error, stacktrace, _opts) do
+ do_print_stacktrace(error_type, error, stacktrace)
+ end
+ def maybe_print_stacktrace(error_type, :badarg = error, stacktrace, _opts) do
+ do_print_stacktrace(error_type, error, stacktrace)
+ end
+ def maybe_print_stacktrace(error_type, {:case_clause, _val} = error, stacktrace, _opts) do
+ do_print_stacktrace(error_type, error, stacktrace)
+ end
+ def maybe_print_stacktrace(error_type, error, stacktrace, %{print_stacktrace: true}) do
+ do_print_stacktrace(error_type, error, stacktrace)
+ end
+ def maybe_print_stacktrace(_error_type, _error, _stacktrace, %{print_stacktrace: false}) do
+ nil
+ end
+ def maybe_print_stacktrace(_error_type, _error, _stacktrace, _opts) do
+ nil
+ end
+
+ defp do_print_stacktrace(error_type, error, stacktrace) do
+ IO.puts("Stack trace: \n")
+ IO.puts(Exception.format(error_type, error, stacktrace))
+ end
+
+ def handle_command_output(output, command, options, output_fun) do
+ case output do
+ {:error, _, _} = err ->
+ format_error(err, options, command)
+
+ {:error, _} = err ->
+ format_error(err, options, command)
+
+ _ ->
+ output_fun.(output, command, options)
+ end
+ end
+
+ defp process_output(output, command, options) do
+ formatter = Config.get_formatter(command, options)
+ printer = Config.get_printer(command, options)
+
+ output
+ |> Output.format_output(formatter, options)
+ |> Output.print_output(printer, options)
+ |> case do
+ {:error, _} = err -> format_error(err, options, command)
+ other -> other
+ end
+ end
+
+ defp output_device(exit_code) do
+ case exit_code == ExitCodes.exit_ok() do
+ true -> :stdio
+ false -> :stderr
+ end
+ end
+
+ defp handle_shutdown({:error, exit_code, nil}) do
+ exit_program(exit_code)
+ end
+
+ defp handle_shutdown({_, exit_code, output}) do
+ device = output_device(exit_code)
+
+ for line <- List.flatten([output]) do
+ IO.puts(device, Helpers.string_or_inspect(line))
+ end
+
+ exit_program(exit_code)
+ end
+
+ defp handle_shutdown(_) do
+ exit_program(ExitCodes.exit_ok())
+ end
+
+ def merge_all_defaults(%{} = options) do
+ options
+ |> merge_defaults_node
+ |> merge_defaults_timeout
+ |> merge_defaults_longnames
+ end
+
+ defp merge_defaults_node(%{} = opts) do
+ longnames_opt = Config.get_option(:longnames, opts)
+ try do
+ default_rabbit_nodename = Helpers.get_rabbit_hostname(longnames_opt)
+ Map.merge(%{node: default_rabbit_nodename}, opts)
+ catch _error_type, _err ->
+ opts
+ end
+ end
+
+ defp merge_defaults_timeout(%{} = opts), do: Map.merge(%{timeout: :infinity}, opts)
+
+ defp merge_defaults_longnames(%{} = opts), do: Map.merge(%{longnames: false}, opts)
+
+ defp normalise_options(opts) do
+ opts |> normalise_timeout
+ end
+
+ defp normalise_timeout(%{timeout: timeout} = opts)
+ when is_integer(timeout) do
+ Map.put(opts, :timeout, timeout * 1000)
+ end
+
+ defp normalise_timeout(opts) do
+ opts
+ end
+
+ # Suppress banner if --quiet option is provided
+ defp maybe_print_banner(_, _, %{quiet: true}) do
+ nil
+ end
+
+ # Suppress banner if --silent option is provided
+ defp maybe_print_banner(_, _, %{silent: true}) do
+ nil
+ end
+
+ defp maybe_print_banner(command, args, opts) do
+ # Suppress banner if a machine-readable formatter is used
+ formatter = Map.get(opts, :formatter)
+ case FormatterBehaviour.machine_readable?(formatter) do
+ true -> nil
+ false ->
+ case command.banner(args, opts) do
+ nil ->
+ nil
+
+ banner ->
+ case banner do
+ list when is_list(list) ->
+ for line <- list, do: IO.puts(line)
+
+ binary when is_binary(binary) ->
+ IO.puts(binary)
+ end
+ end
+ end
+ end
+
+ def argument_validation_error_output(err_detail, command, unparsed_command, options) do
+ err = format_validation_error(err_detail)
+
+ base_error =
+ "Error (argument validation): #{err}\nArguments given:\n\t#{
+ unparsed_command |> Enum.join(" ")
+ }"
+
+ validation_error_output(err_detail, base_error, command, options)
+ end
+
+ def environment_validation_error_output(err_detail, command, unparsed_command, options) do
+ err = format_validation_error(err_detail)
+ base_error = "Error: #{err}\nArguments given:\n\t#{unparsed_command |> Enum.join(" ")}"
+ validation_error_output(err_detail, base_error, command, options)
+ end
+
+ defp validation_error_output(err_detail, base_error, command, options) do
+ usage = HelpCommand.base_usage(command, options)
+ message = base_error <> "\n" <> usage
+ {:error, ExitCodes.exit_code_for({:validation_failure, err_detail}), message}
+ end
+
+ defp format_validation_error(:not_enough_args), do: "not enough arguments."
+ defp format_validation_error({:not_enough_args, detail}), do: "not enough arguments: #{detail}"
+ defp format_validation_error(:too_many_args), do: "too many arguments."
+ defp format_validation_error({:too_many_args, detail}), do: "too many arguments: #{detail}"
+ defp format_validation_error(:bad_argument), do: "Bad argument."
+ defp format_validation_error({:bad_argument, detail}), do: "Bad argument: #{detail}"
+
+ defp format_validation_error({:unsupported_target, details}) do
+ details
+ end
+
+ defp format_validation_error({:bad_option, opts}) do
+ header = "Invalid options for this command:"
+
+ Enum.join(
+ [
+ header
+ | for {key, val} <- opts do
+ "#{key} : #{val}"
+ end
+ ],
+ "\n"
+ )
+ end
+
+ defp format_validation_error({:bad_info_key, keys}),
+ do: "Info key(s) #{Enum.join(keys, ",")} are not supported"
+
+ defp format_validation_error(:rabbit_app_is_stopped),
+ do:
+ "this command requires the 'rabbit' app to be running on the target node. Start it with 'rabbitmqctl start_app'."
+
+ defp format_validation_error(:rabbit_app_is_running),
+ do:
+ "this command requires the 'rabbit' app to be stopped on the target node. Stop it with 'rabbitmqctl stop_app'."
+
+ defp format_validation_error(:node_running),
+ do: "this command requires the target node to be stopped."
+
+ defp format_validation_error(:node_not_running),
+ do: "this command requires the target node to be running."
+
+ defp format_validation_error(:unsupported_formatter),
+ do: "the requested formatter is not supported by this command"
+
+ defp format_validation_error(err), do: inspect(err)
+
+ defp exit_program(code) do
+ :net_kernel.stop()
+ exit({:shutdown, code})
+ end
+
+ defp format_error({:error, {:node_name, :hostname_not_allowed}}, _, _) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Unsupported node name: hostname is invalid (possibly contains unsupported characters).\nIf using FQDN node names, use the -l / --longnames argument."}
+ end
+ defp format_error({:error, {:node_name, :invalid_node_name_head}}, _, _) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Unsupported node name: node name head (the part before the @) is invalid. Only alphanumerics, _ and - characters are allowed.\nIf using FQDN node names, use the -l / --longnames argument"}
+ end
+ defp format_error({:error, {:node_name, err_reason} = result}, opts, module) do
+ op = CommandModules.module_to_command(module)
+ node = opts[:node] || "(failed to parse or compute default value)"
+ {:error, ExitCodes.exit_code_for(result),
+ "Error: operation #{op} failed due to invalid node name (node: #{node}, reason: #{err_reason}).\nIf using FQDN node names, use the -l / --longnames argument"}
+ end
+
+ defp format_error({:error, {:badrpc_multi, :nodedown, [node | _]} = result}, opts, _) do
+ diagnostics = get_node_diagnostics(node)
+
+ {:error, ExitCodes.exit_code_for(result),
+ badrpc_error_message_header(node, opts) <> diagnostics}
+ end
+
+ defp format_error({:error, {:badrpc_multi, :timeout, [node | _]} = result}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for(result),
+ "Error: operation #{op} on node #{node} timed out. Timeout value used: #{opts[:timeout]}"}
+ end
+
+ defp format_error({:error, {:badrpc, :nodedown} = result}, opts, _) do
+ diagnostics = get_node_diagnostics(opts[:node])
+
+ {:error, ExitCodes.exit_code_for(result),
+ badrpc_error_message_header(opts[:node], opts) <> diagnostics}
+ end
+
+ defp format_error({:error, {:badrpc, :timeout} = result}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for(result),
+ "Error: operation #{op} on node #{opts[:node]} timed out. Timeout value used: #{
+ opts[:timeout]
+ }"}
+ end
+
+ defp format_error({:error, {:badrpc, {:timeout, to}} = result}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for(result),
+ "Error: operation #{op} on node #{opts[:node]} timed out. Timeout value used: #{to}"}
+ end
+
+ defp format_error({:error, {:badrpc, {:timeout, to, warning}}}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for({:timeout, to}),
+ "Error: operation #{op} on node #{opts[:node]} timed out. Timeout value used: #{to}. #{
+ warning
+ }"}
+ end
+
+ defp format_error({:error, {:no_such_vhost, vhost} = result}, _opts, _) do
+ {:error, ExitCodes.exit_code_for(result), "Virtual host '#{vhost}' does not exist"}
+ end
+
+ defp format_error({:error, {:incompatible_version, local_version, remote_version} = result}, _opts, _) do
+ {:error, ExitCodes.exit_code_for(result), "Detected potential version incompatibility. CLI tool version: #{local_version}, server: #{remote_version}"}
+ end
+
+ defp format_error({:error, {:timeout, to} = result}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for(result),
+ "Error: operation #{op} on node #{opts[:node]} timed out. Timeout value used: #{to}"}
+ end
+
+ defp format_error({:error, :timeout = result}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for(result),
+ "Error: operation #{op} on node #{opts[:node]} timed out. Timeout value used: #{opts[:timeout]}"}
+ end
+
+ defp format_error({:error, :timeout, msg}, opts, module) do
+ op = CommandModules.module_to_command(module)
+
+ {:error, ExitCodes.exit_code_for(:timeout),
+ "Error: operation #{op} on node #{opts[:node]} timed out: #{msg}. Timeout value used: #{opts[:timeout]}"}
+ end
+
+ # Plugins
+ defp format_error({:error, {:enabled_plugins_mismatch, cli_path, node_path}}, opts, _module) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at #{cli_path}: target node #{opts[:node]} uses a different path (#{
+ node_path
+ })"}
+ end
+
+ defp format_error({:error, {:cannot_read_enabled_plugins_file, path, :eacces}}, _opts, _module) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not read enabled plugins file at #{path}: the file does not exist or permission was denied (EACCES)"}
+ end
+
+ defp format_error({:error, {:cannot_read_enabled_plugins_file, path, :enoent}}, _opts, _module) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not read enabled plugins file at #{path}: the file does not exist (ENOENT)"}
+ end
+
+ defp format_error({:error, {:cannot_write_enabled_plugins_file, path, :eacces}}, _opts, _module) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at #{path}: the file does not exist or permission was denied (EACCES)"}
+ end
+
+ defp format_error({:error, {:cannot_write_enabled_plugins_file, path, :enoent}}, _opts, _module) do
+ {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at #{path}: the file does not exist (ENOENT)"}
+ end
+
+ # Special case health checks. This makes it easier to change
+ # output of all health checks at once.
+ defp format_error({:error, :check_failed}, %{formatter: "json"}, _) do
+ {:error, ExitCodes.exit_unavailable(), nil}
+ end
+ defp format_error({:error, :check_failed}, _, _) do
+ {:error, ExitCodes.exit_unavailable(), nil}
+ end
+ defp format_error({:error, :check_failed, err}, %{formatter: "json"}, _) when is_map(err) do
+ {:ok, res} = JSON.encode(err)
+ {:error, ExitCodes.exit_unavailable(), res}
+ end
+ defp format_error({:error, :check_failed, err}, %{formatter: "json"}, _) do
+ {:error, ExitCodes.exit_unavailable(), err}
+ end
+ defp format_error({:error, :check_failed, err}, _, _) do
+ {:error, ExitCodes.exit_unavailable(), err}
+ end
+
+ defp format_error({:error, nil}, _, _) do
+ # the command intends to produce no output, e.g. a return code
+ # is sufficient
+ {:error, ExitCodes.exit_unavailable(), nil}
+ end
+
+ # Catch all clauses
+ defp format_error({:error, err}, %{formatter: "json"}, _) when is_map(err) do
+ {:ok, res} = JSON.encode(err)
+ {:error, ExitCodes.exit_unavailable(), res}
+ end
+ defp format_error({:error, exit_code, err}, %{formatter: "json"}, _) when is_map(err) do
+ {:ok, res} = JSON.encode(err)
+ {:error, exit_code, res}
+ end
+
+ defp format_error({:error, exit_code, err}, _, _) do
+ string_err = Helpers.string_or_inspect(err)
+
+ {:error, exit_code, "Error:\n#{string_err}"}
+ end
+
+ defp format_error({:error, err} = result, _, _) do
+ string_err = Helpers.string_or_inspect(err)
+
+ {:error, ExitCodes.exit_code_for(result), "Error:\n#{string_err}"}
+ end
+
+ defp format_error(error, _opts, _module) do
+ {:error, ExitCodes.exit_software(), "#{inspect(error)}\n"}
+ end
+
+ defp get_node_diagnostics(nil) do
+ "Target node is not defined"
+ end
+
+ defp get_node_diagnostics(node_name) do
+ to_string(:rabbit_nodes_common.diagnostics([node_name]))
+ end
+
+ defp badrpc_error_message_header(node, _opts) do
+ """
+ Error: unable to perform an operation on node '#{node}'. Please see diagnostics information and suggestions below.
+
+ Most common reasons for this are:
+
+ * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues)
+ * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server)
+ * Target node is not running
+
+ In addition to the diagnostics info below:
+
+ * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more
+ * Consult server logs on node #{node}
+ * If target node is configured to use long node names, don't forget to use --longnames with CLI tools
+ """
+ end
+
+ ## Tries to enable erlang distribution, which can be configured
+ ## via distribution callback in the command as :cli, :none or {:custom, fun()}.
+ ## :cli - default rabbitmqctl node name
+ ## :none - do not start a distribution (e.g. offline command)
+ ## {:fun, fun} - run a custom function to enable distribution.
+ ## custom mode is usefult for commands which should have specific node name.
+ ## Runs code if distribution is successful, or not needed.
+ @spec maybe_with_distribution(module(), options(), (() -> command_result())) :: command_result()
+ defp maybe_with_distribution(command, options, code) do
+ try do
+ maybe_with_distribution_without_catch(command, options, code)
+ catch error_type, error ->
+ maybe_print_stacktrace(error_type, error, __STACKTRACE__, options)
+ format_error(error, options, command)
+ end
+ end
+
+ defp maybe_with_distribution_without_catch(command, options, code) do
+ distribution_type = CommandBehaviour.distribution(command, options)
+
+ case distribution_type do
+ :none ->
+ code.()
+
+ :cli ->
+ case Distribution.start(options) do
+ :ok ->
+ code.()
+
+ {:ok, _} ->
+ code.()
+
+ {:error, reason} ->
+ {:error, ExitCodes.exit_config(), "Distribution failed: #{inspect(reason)}"}
+ end
+
+ {:fun, fun} ->
+ case fun.(options) do
+ :ok ->
+ code.()
+
+ {:error, reason} ->
+ {:error, ExitCodes.exit_config(), "Distribution failed: #{inspect(reason)}"}
+ end
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/mix.exs b/deps/rabbitmq_cli/mix.exs
new file mode 100644
index 0000000000..09bbda3846
--- /dev/null
+++ b/deps/rabbitmq_cli/mix.exs
@@ -0,0 +1,209 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQCtl.MixfileBase do
+ use Mix.Project
+
+ def project do
+ [
+ app: :rabbitmqctl,
+ version: "3.8.0-dev",
+ elixir: ">= 1.10.4 and < 1.12.0",
+ build_embedded: Mix.env == :prod,
+ start_permanent: Mix.env == :prod,
+ escript: [main_module: RabbitMQCtl,
+ emu_args: "-hidden",
+ path: "escript/rabbitmqctl"],
+ deps: deps(),
+ aliases: aliases(),
+ xref: [
+ exclude: [
+ CSV,
+ CSV.Encode,
+ JSON,
+ :mnesia,
+ :msacc,
+ :observer_cli,
+ :public_key,
+ :pubkey_cert,
+ :rabbit,
+ :rabbit_control_misc,
+ :rabbit_data_coercion,
+ :rabbit_env,
+ :rabbit_event,
+ :rabbit_file,
+ :rabbit_net,
+ :rabbit_lager,
+ :rabbit_log,
+ :rabbit_misc,
+ :rabbit_mnesia,
+ :rabbit_mnesia_rename,
+ :rabbit_nodes_common,
+ :rabbit_pbe,
+ :rabbit_plugins,
+ :rabbit_resource_monitor_misc,
+ :stdout_formatter
+ ]
+ ]
+ ]
+ end
+
+ # Configuration for the OTP application
+ #
+ # Type "mix help compile.app" for more information
+ def application do
+ [applications: [:logger],
+ env: [scopes: ['rabbitmq-plugins': :plugins,
+ rabbitmqctl: :ctl,
+ 'rabbitmq-diagnostics': :diagnostics,
+ 'rabbitmq-queues': :queues,
+ 'rabbitmq-streams': :streams,
+ 'rabbitmq-upgrade': :upgrade]]
+ ]
+ |> add_modules(Mix.env)
+ end
+
+
+ defp add_modules(app, :test) do
+ # There are issues with building a package without this line ¯\_(ツ)_/¯
+ Mix.Project.get
+ path = Mix.Project.compile_path
+ mods = modules_from(Path.wildcard("#{path}/*.beam"))
+ test_modules = [RabbitMQ.CLI.Ctl.Commands.DuckCommand,
+ RabbitMQ.CLI.Ctl.Commands.GrayGooseCommand,
+ RabbitMQ.CLI.Ctl.Commands.UglyDucklingCommand,
+ RabbitMQ.CLI.Plugins.Commands.StorkCommand,
+ RabbitMQ.CLI.Plugins.Commands.HeronCommand,
+ RabbitMQ.CLI.Custom.Commands.CrowCommand,
+ RabbitMQ.CLI.Custom.Commands.RavenCommand,
+ RabbitMQ.CLI.Seagull.Commands.SeagullCommand,
+ RabbitMQ.CLI.Seagull.Commands.PacificGullCommand,
+ RabbitMQ.CLI.Seagull.Commands.HerringGullCommand,
+ RabbitMQ.CLI.Seagull.Commands.HermannGullCommand,
+ RabbitMQ.CLI.Wolf.Commands.CanisLupusCommand,
+ RabbitMQ.CLI.Wolf.Commands.CanisLatransCommand,
+ RabbitMQ.CLI.Wolf.Commands.CanisAureusCommand
+ ]
+ [{:modules, mods ++ test_modules |> Enum.sort} | app]
+ end
+ defp add_modules(app, _) do
+ app
+ end
+
+ defp modules_from(beams) do
+ Enum.map beams, &(&1 |> Path.basename |> Path.rootname(".beam") |> String.to_atom)
+ end
+
+ # Dependencies can be Hex packages:
+ #
+ # {:mydep, "~> 0.3.0"}
+ #
+ # Or git/path repositories:
+ #
+ # {:mydep, git: "https://github.com/elixir-lang/mydep.git", tag: "0.1.0"}
+ #
+ # Type "mix help deps" for more examples and options
+ #
+ # CAUTION: Dependencies which are shipped with RabbitMQ *MUST* com
+ # from Hex.pm! Therefore it's ok to fetch dependencies from Git if
+ # they are test dependencies or it is temporary while testing a patch.
+ # But that's about it. If in doubt, use Hex.pm!
+ #
+ # The reason is that we have some Makefile code to put dependencies
+ # from Hex.pm in RabbitMQ source archive (the source archive must be
+ # self-contained and RabbitMQ must be buildable offline). However, we
+ # don't have the equivalent for other methods.
+ defp deps() do
+ elixir_deps = [
+ {:json, "~> 1.2.0"},
+ {:csv, "~> 2.3.0"},
+ {:stdout_formatter, "~> 0.2.3"},
+ {:observer_cli, "~> 1.5.0"},
+
+ {:amqp, "~> 1.2.0", only: :test},
+ {:dialyxir, "~> 0.5", only: :test, runtime: false},
+ {:temp, "~> 0.4", only: :test},
+ {:x509, "~> 0.7", only: :test}
+ ]
+
+ rabbitmq_deps = case System.get_env("DEPS_DIR") do
+ nil ->
+ # rabbitmq_cli is built as a standalone Elixir application.
+ [
+ {:rabbit_common, "~> 3.7.0"},
+ {:amqp_client, "~> 3.7.0", only: :test}
+ ]
+ deps_dir ->
+ # rabbitmq_cli is built as part of RabbitMQ.
+
+ # Mix is confused by any `rebar.{config,lock}` we might have left in
+ # `rabbit_common` or `amqp_client`. So just remove those files to be
+ # safe, as they are generated when we publish to Hex.pm only.
+ for dir <- ["rabbit_common", "amqp_client"] do
+ for file <- ["rebar.config", "rebar.lock"] do
+ File.rm(Path.join([deps_dir, dir, file]))
+ end
+ end
+
+ # We disable compilation for rabbit_common and amqp_client
+ # because Erlang.mk already built them.
+ [
+ {
+ :rabbit_common,
+ path: Path.join(deps_dir, "rabbit_common"),
+ compile: false,
+ override: true
+ },
+ {
+ :goldrush,
+ path: Path.join(deps_dir, "goldrush"),
+ compile: false,
+ override: true
+ },
+ {
+ :lager,
+ path: Path.join(deps_dir, "lager"),
+ compile: false,
+ override: true
+ },
+ {
+ :amqp_client,
+ path: Path.join(deps_dir, "amqp_client"),
+ compile: false,
+ override: true,
+ only: :test
+ },
+ ]
+ end
+
+ elixir_deps ++ rabbitmq_deps
+ end
+
+ defp aliases do
+ [
+ make_deps: [
+ "deps.get",
+ "deps.compile",
+ ],
+ make_app: [
+ "compile",
+ "escript.build",
+ ],
+ make_all: [
+ "deps.get",
+ "deps.compile",
+ "compile",
+ "escript.build",
+ ],
+ make_all_in_src_archive: [
+ "deps.get --only prod",
+ "deps.compile",
+ "compile",
+ "escript.build",
+ ],
+ ]
+ end
+end
diff --git a/deps/rabbitmq_cli/rabbitmq-components.mk b/deps/rabbitmq_cli/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_cli/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_cli/test/core/args_processing_test.exs b/deps/rabbitmq_cli/test/core/args_processing_test.exs
new file mode 100644
index 0000000000..18c67d3a4a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/args_processing_test.exs
@@ -0,0 +1,90 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ArgsProcessingTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ defp list_commands() do
+ [
+ RabbitMQ.CLI.Ctl.Commands.ListBindingsCommand,
+ RabbitMQ.CLI.Ctl.Commands.ListChannelsCommand,
+ RabbitMQ.CLI.Ctl.Commands.ListConnectionsCommand,
+ RabbitMQ.CLI.Ctl.Commands.ListConsumersCommand,
+ RabbitMQ.CLI.Ctl.Commands.ListExchangesCommand,
+ RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand,
+ RabbitMQ.CLI.Ctl.Commands.ListVhostsCommand
+ ]
+ end
+
+ defp all_commands() do
+ RabbitMQ.CLI.Core.CommandModules.load_commands(:all, %{})
+ |> Map.values
+ end
+
+ defp line_filter([_, description]) do
+ Regex.match?(~r/must be one of/, description)
+ end
+ defp line_filter(line) do
+ Regex.match?(~r/must be one of/, line)
+ end
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn -> delete_user(context[:user]) end)
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 50_000, vhost: "/"}}
+ end
+
+ test "defaults are merged with positinal args", _context do
+ commands = all_commands()
+ Enum.each(commands,
+ fn(command) ->
+ command.merge_defaults([], %{})
+ command.merge_defaults(["arg"], %{})
+ command.merge_defaults(["two", "args"], %{})
+ command.merge_defaults(["even", "more", "args"], %{})
+
+ command.merge_defaults([], %{unknown: "option"})
+ command.merge_defaults(["arg"], %{unknown: "option"})
+ end)
+ end
+
+ # this test parses info keys mentioned in the usage_additional section
+ # and makes sure they pass validation, including when separated by a comma
+ # or a mix of commas and spaces
+ test "comma-separated info items are supported", context do
+ commands = list_commands()
+ Enum.each(commands, fn(command) ->
+ items_usage = case command.usage_additional() do
+ # find the line with info items, ignore the rest
+ list when is_list(list) ->
+ # list items can be strings or pairs
+ Enum.filter(list, &line_filter/1) |> List.first |> Enum.join(" ")
+ string ->
+ string
+ end
+ # info_item, info_item2, …
+ case Regex.run(~r/.*one of (.*)$/, items_usage, [capture: :all_but_first]) do
+ nil ->
+ throw "Command #{command} does not list info items in usage_additional or the format has changed. Output: #{items_usage}"
+ [info_items] ->
+ :ok = command.validate([info_items], context[:opts])
+ :ok = command.validate(String.split(info_items, " "), context[:opts])
+ run_command_ok(command, [info_items], context[:opts])
+ run_command_ok(command, String.split(info_items, " "), context[:opts])
+ end
+ end)
+ end
+
+ def run_command_ok(command, args_init, options_init) do
+ {args, options} = command.merge_defaults(args_init, options_init)
+ assert_stream_without_errors(command.run(args, options))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/auto_complete_test.exs b/deps/rabbitmq_cli/test/core/auto_complete_test.exs
new file mode 100644
index 0000000000..d410ec6640
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/auto_complete_test.exs
@@ -0,0 +1,85 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AutoCompleteTest do
+ use ExUnit.Case, async: false
+
+ @subject RabbitMQ.CLI.AutoComplete
+
+
+ test "Auto-completes a command" do
+ ["canis_aureus", "canis_latrans", "canis_lupus"] = @subject.complete("rabbitmqctl", ["canis"])
+ ["canis_aureus", "canis_latrans", "canis_lupus"] = @subject.complete("rabbitmqctl", ["canis_"])
+ ["canis_latrans", "canis_lupus"] = @subject.complete("rabbitmqctl", ["canis_l"])
+ ["canis_latrans"] = @subject.complete("rabbitmqctl", ["canis_la"])
+ ["canis_aureus"] = @subject.complete("rabbitmqctl", ["canis_a"])
+ ["canis_aureus"] = @subject.complete("rabbitmqctl", ["--node", "foo", "--quet", "canis_a"])
+ end
+
+ test "Auto-completes default options if command is not specified" do
+ ["--vhost"] = @subject.complete("rabbitmqctl", ["--vh"])
+ ## Prints script_name as script-name
+ ["--script-name"] = @subject.complete("rabbitmqctl", ["--script"])
+ ["--script-name"] = @subject.complete("rabbitmqctl", ["--node", "foo", "--script"])
+ end
+
+ test "Auto-completes the command options if full command is specified" do
+ ["--colour", "--dingo", "--dog"] = @subject.complete("rabbitmqctl", ["canis_lupus", "-"])
+ ["--colour", "--dingo", "--dog"] = @subject.complete("rabbitmqctl", ["canis_lupus", "--"])
+ ["--dingo", "--dog"] = @subject.complete("rabbitmqctl", ["canis_lupus", "--d"])
+ end
+
+ test "Auto-completes scoped command" do
+ ["enable"] = @subject.complete("rabbitmq-plugins", ["enab"])
+ scopes = Application.get_env(:rabbitmqctl, :scopes)
+ scopes_with_wolf = Keyword.put(scopes, :rabbitmq_wolf, :wolf)
+ Application.put_env(:rabbitmqctl, :scopes, scopes_with_wolf)
+ on_exit(fn() ->
+ Application.put_env(:rabbitmqctl, :scopes, scopes)
+ end)
+
+ ["canis_aureus", "canis_latrans", "canis_lupus"] = @subject.complete("rabbitmq_wolf", ["canis"])
+ end
+
+ test "Auto-completes scoped command with --script-name flag" do
+ ["enable"] = @subject.complete("rabbitmqctl", ["--script-name", "rabbitmq-plugins", "enab"])
+ end
+end
+
+defmodule RabbitMQ.CLI.Wolf.Commands.CanisLupusCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["canis_lupus"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+ def switches(), do: [colour: :string, dingo: :boolean, dog: :boolean]
+ def scopes, do: [:ctl, :wolf]
+end
+
+defmodule RabbitMQ.CLI.Wolf.Commands.CanisLatransCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["canis_latrans"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+ def scopes, do: [:ctl, :wolf]
+end
+
+defmodule RabbitMQ.CLI.Wolf.Commands.CanisAureusCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["canis_aureus"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+ def scopes, do: [:ctl, :wolf]
+end
diff --git a/deps/rabbitmq_cli/test/core/command_modules_test.exs b/deps/rabbitmq_cli/test/core/command_modules_test.exs
new file mode 100644
index 0000000000..8617415a22
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/command_modules_test.exs
@@ -0,0 +1,202 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CommandModulesTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @subject RabbitMQ.CLI.Core.CommandModules
+
+ setup_all do
+ on_exit(fn ->
+ set_scope(:none)
+ Application.put_env(:rabbitmqctl, :commands, nil)
+ end)
+ :ok
+ end
+
+ test "command modules has existing commands" do
+ assert @subject.load_commands(:all, %{})["duck"] ==
+ RabbitMQ.CLI.Ctl.Commands.DuckCommand
+ end
+
+ test "command with multiple underscores shows up in map" do
+ assert @subject.load_commands(:all, %{})["gray_goose"] ==
+ RabbitMQ.CLI.Ctl.Commands.GrayGooseCommand
+ end
+
+ test "command modules does not have non-existent commands" do
+ assert @subject.load_commands(:all, %{})["usurper"] == nil
+ end
+
+ test "non command modules do not show in command map" do
+ assert @subject.load_commands(:all, %{})["ugly_duckling"] == nil
+ end
+
+ test "loaded commands are saved in env variable" do
+ set_scope(:ctl)
+ commands = @subject.module_map
+ assert commands == @subject.module_map
+ assert commands == Application.get_env(:rabbitmqctl, :commands)
+ end
+
+ test "load commands for current scope" do
+ set_scope(:ctl)
+ commands = @subject.load(%{})
+ assert commands == @subject.load_commands(:ctl, %{})
+
+ assert commands["duck"] == RabbitMQ.CLI.Ctl.Commands.DuckCommand
+ assert commands["gray_goose"] == RabbitMQ.CLI.Ctl.Commands.GrayGooseCommand
+
+ assert commands["stork"] == nil
+ assert commands["heron"] == nil
+
+ assert commands["crow"] == nil
+ assert commands["raven"] == nil
+
+ set_scope(:plugins)
+ commands = @subject.load(%{})
+ assert commands == @subject.load_commands(:plugins, %{})
+ assert commands["duck"] == nil
+ assert commands["gray_goose"] == nil
+
+ assert commands["stork"] == RabbitMQ.CLI.Plugins.Commands.StorkCommand
+ assert commands["heron"] == RabbitMQ.CLI.Plugins.Commands.HeronCommand
+
+ assert commands["crow"] == nil
+ assert commands["raven"] == nil
+ end
+
+ test "can set scopes inside command" do
+ plugin_commands = @subject.load_commands(:plugins, %{})
+
+ assert plugin_commands["duck"] == nil
+ assert plugin_commands["gray_goose"] == nil
+
+ assert plugin_commands["stork"] == RabbitMQ.CLI.Plugins.Commands.StorkCommand
+ assert plugin_commands["heron"] == RabbitMQ.CLI.Plugins.Commands.HeronCommand
+
+ assert plugin_commands["crow"] == nil
+ assert plugin_commands["raven"] == nil
+
+ # SeagullCommand has scopes() defined as [:plugins, :custom]
+ assert plugin_commands["seagull"] == RabbitMQ.CLI.Seagull.Commands.SeagullCommand
+
+ custom_commands = @subject.load_commands(:custom, %{})
+
+ assert custom_commands["duck"] == nil
+ assert custom_commands["gray_goose"] == nil
+
+ assert custom_commands["stork"] == nil
+ assert custom_commands["heron"] == nil
+
+ assert custom_commands["crow"] == RabbitMQ.CLI.Custom.Commands.CrowCommand
+ assert custom_commands["raven"] == RabbitMQ.CLI.Custom.Commands.RavenCommand
+
+ # SeagullCommand has scopes() defined as [:plugins, :custom]
+ assert custom_commands["seagull"] == RabbitMQ.CLI.Seagull.Commands.SeagullCommand
+
+ end
+
+ ## ------------------- commands/0 tests --------------------
+
+ test "command_modules has existing commands" do
+ set_scope(:ctl)
+ @subject.load(%{})
+ assert @subject.module_map["status"] == RabbitMQ.CLI.Ctl.Commands.StatusCommand
+ assert @subject.module_map["environment"] == RabbitMQ.CLI.Ctl.Commands.EnvironmentCommand
+ end
+
+ test "command_modules does not have non-existent commands" do
+ set_scope(:ctl)
+ @subject.load(%{})
+ assert @subject.module_map[:p_equals_np_proof] == nil
+ end
+end
+
+# Mock command modules for Ctl
+
+defmodule RabbitMQ.CLI.Ctl.Commands.DuckCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["duck"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+defmodule RabbitMQ.CLI.Ctl.Commands.GrayGooseCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["gray_goose"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+defmodule RabbitMQ.CLI.Ctl.Commands.UglyDucklingCommand do
+end
+
+
+# Mock command modules for Plugins
+
+defmodule RabbitMQ.CLI.Plugins.Commands.StorkCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["stork"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+defmodule RabbitMQ.CLI.Plugins.Commands.HeronCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["heron"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+# Mock command modules for Custom
+
+defmodule RabbitMQ.CLI.Custom.Commands.CrowCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["crow"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+ def scopes(), do: [:custom, ]
+end
+
+defmodule RabbitMQ.CLI.Custom.Commands.RavenCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["raven"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+defmodule RabbitMQ.CLI.Seagull.Commands.SeagullCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["seagull"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+ def scopes(), do: [:plugins, :custom]
+end
+
+
diff --git a/deps/rabbitmq_cli/test/core/default_output_test.exs b/deps/rabbitmq_cli/test/core/default_output_test.exs
new file mode 100644
index 0000000000..f567c5cc96
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/default_output_test.exs
@@ -0,0 +1,114 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule DefaultOutputTest do
+ use ExUnit.Case, async: false
+
+ test "ok is passed as is" do
+ assert match?(:ok, ExampleCommand.output(:ok, %{}))
+ end
+
+ test "ok with message is passed as is" do
+ assert match?({:ok, :message}, ExampleCommand.output({:ok, :message}, %{}))
+ assert match?({:ok, {:complex, "message"}}, ExampleCommand.output({:ok, {:complex, "message"}}, %{}))
+ end
+
+ test "enumerable is passed as stream" do
+ assert match?({:stream, 'list'}, ExampleCommand.output({:ok, 'list'}, %{}))
+ assert match?({:stream, 'list'}, ExampleCommand.output('list', %{}))
+
+ assert match?({:stream, [1,2,3]}, ExampleCommand.output({:ok, [1,2,3]}, %{}))
+ assert match?({:stream, [1,2,3]}, ExampleCommand.output([1,2,3], %{}))
+
+ stream = Stream.timer(10000)
+ assert match?({:stream, ^stream}, ExampleCommand.output({:ok, stream}, %{}))
+ assert match?({:stream, ^stream}, ExampleCommand.output(stream, %{}))
+ end
+
+ test "badrpc is an error" do
+ {:error, {:badrpc, :nodedown}} =
+ ExampleCommand.output({:badrpc, :nodedown}, %{})
+
+ {:error, {:badrpc, :timeout}} =
+ ExampleCommand.output({:badrpc, :timeout}, %{})
+ end
+
+ test "unknown atom is error" do
+ {:error, :error_message} = ExampleCommand.output(:error_message, %{})
+ end
+
+ test "unknown tuple is error" do
+ {:error, {:left, :right}} = ExampleCommand.output({:left, :right}, %{})
+ end
+
+ test "error_string is error" do
+ assert {:error, "I am string"} == ExampleCommand.output({:error_string, "I am string"}, %{})
+ end
+
+ test "error_string is converted to string" do
+ assert match?({:error, "I am charlist"},
+ ExampleCommand.output({:error_string, 'I am charlist'}, %{}))
+ end
+
+ test "error is formatted" do
+ {:error, "I am formatted \"string\""} =
+ ExampleCommand.output({:error, 'I am formatted ~p', ['string']}, %{})
+ end
+
+ test "non atom value is ok" do
+ val = "foo"
+ assert match?({:ok, ^val}, ExampleCommand.output(val, %{}))
+ val = 125
+ assert match?({:ok, ^val}, ExampleCommand.output(val, %{}))
+ val = 100.2
+ assert match?({:ok, ^val}, ExampleCommand.output(val, %{}))
+ val = {:one, :two, :three}
+ assert match?({:ok, ^val}, ExampleCommand.output(val, %{}))
+ end
+
+ test "custom output function can be defined" do
+ assert {:error, 125, "Non standard"} == ExampleCommandWithCustomOutput.output(:non_standard_output, %{})
+ end
+
+ test "default output works even if custom output is defined" do
+ assert :ok == ExampleCommandWithCustomOutput.output(:ok, %{})
+ assert {:ok, {:complex, "message"}} == ExampleCommandWithCustomOutput.output({:ok, {:complex, "message"}}, %{})
+
+ assert {:stream, [1,2,3]} == ExampleCommandWithCustomOutput.output({:ok, [1,2,3]}, %{})
+ assert {:stream, [1,2,3]} == ExampleCommandWithCustomOutput.output([1,2,3], %{})
+
+ assert {:error, {:badrpc, :nodedown}} ==
+ ExampleCommandWithCustomOutput.output({:badrpc, :nodedown}, %{})
+ assert {:error, {:badrpc, :timeout}} ==
+ ExampleCommandWithCustomOutput.output({:badrpc, :timeout}, %{})
+
+ error = %{i: [am: "arbitrary", error: 1]}
+ {:error, ^error} = ExampleCommandWithCustomOutput.output({:error, error}, %{})
+
+ assert {:error, "I am string"} == ExampleCommandWithCustomOutput.output({:error_string, "I am string"}, %{})
+
+ val = "foo"
+ assert match?({:ok, ^val}, ExampleCommandWithCustomOutput.output(val, %{}))
+ val = 125
+ assert match?({:ok, ^val}, ExampleCommandWithCustomOutput.output(val, %{}))
+ val = 100.2
+ assert match?({:ok, ^val}, ExampleCommandWithCustomOutput.output(val, %{}))
+ val = {:one, :two, :three}
+ assert match?({:ok, ^val}, ExampleCommandWithCustomOutput.output(val, %{}))
+ end
+end
+
+defmodule ExampleCommand do
+ use RabbitMQ.CLI.DefaultOutput
+end
+
+defmodule ExampleCommandWithCustomOutput do
+ def output(:non_standard_output, _) do
+ {:error, 125, "Non standard"}
+ end
+ use RabbitMQ.CLI.DefaultOutput
+end
diff --git a/deps/rabbitmq_cli/test/core/distribution_test.exs b/deps/rabbitmq_cli/test/core/distribution_test.exs
new file mode 100644
index 0000000000..00dd872ab4
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/distribution_test.exs
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+alias RabbitMQ.CLI.Core.Distribution
+
+defmodule DistributionTest do
+ use ExUnit.Case, async: false
+
+ setup_all do
+ :net_kernel.stop()
+ :ok
+ end
+
+ test "set cookie via environment variable" do
+ on_exit(fn ->
+ :net_kernel.stop()
+ System.delete_env("RABBITMQ_ERLANG_COOKIE")
+ end)
+ try do
+ :nocookie = Node.get_cookie()
+ catch
+ # one of net_kernel processes is not running ¯\_(ツ)_/¯
+ :exit, _ -> :ok
+ end
+ System.put_env("RABBITMQ_ERLANG_COOKIE", "mycookie")
+ opts = %{}
+ Distribution.start(opts)
+ :mycookie = Node.get_cookie()
+ end
+
+ test "set cookie via argument" do
+ on_exit(fn ->
+ :net_kernel.stop()
+ end)
+ try do
+ :nocookie = Node.get_cookie()
+ catch
+ # one of net_kernel processes is not running ¯\_(ツ)_/¯
+ :exit, _ -> :ok
+ end
+ opts = %{erlang_cookie: :mycookie}
+ Distribution.start(opts)
+ :mycookie = Node.get_cookie()
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/helpers_test.exs b/deps/rabbitmq_cli/test/core/helpers_test.exs
new file mode 100644
index 0000000000..71d107bef8
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/helpers_test.exs
@@ -0,0 +1,140 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule HelpersTest do
+ alias RabbitMQ.CLI.Core.{Config, Helpers}
+ import RabbitMQ.CLI.Core.{CodePath, Memory}
+
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ ## --------------------- get_rabbit_hostname()/0 tests -------------------------
+
+ test "RabbitMQ hostname is properly formed" do
+ assert Helpers.get_rabbit_hostname() |> Atom.to_string =~ ~r/rabbit@\w+/
+ end
+
+ ## ------------------- memory_unit* tests --------------------
+
+ test "an invalid memory unit fails " do
+ assert memory_unit_absolute(10, "gigantibytes") == {:bad_argument, ["gigantibytes"]}
+ end
+
+ test "an invalid number fails " do
+ assert memory_unit_absolute("lots", "gigantibytes") == {:bad_argument, ["lots", "gigantibytes"]}
+ assert memory_unit_absolute(-1, "gigantibytes") == {:bad_argument, [-1, "gigantibytes"]}
+ end
+
+ test "valid number and unit returns a valid result " do
+ assert memory_unit_absolute(10, "k") == 10240
+ assert memory_unit_absolute(10, "kiB") == 10240
+ assert memory_unit_absolute(10, "M") == 10485760
+ assert memory_unit_absolute(10, "MiB") == 10485760
+ assert memory_unit_absolute(10, "G") == 10737418240
+ assert memory_unit_absolute(10, "GiB")== 10737418240
+ assert memory_unit_absolute(10, "kB")== 10000
+ assert memory_unit_absolute(10, "MB")== 10000000
+ assert memory_unit_absolute(10, "GB")== 10000000000
+ assert memory_unit_absolute(10, "") == 10
+ end
+
+ ## ------------------- Helpers.normalise_node_option tests --------------------
+
+ test "longnames: 'rabbit' as node name, correct domain is used" do
+ default_name = Config.default(:node)
+ options = %{node: default_name, longnames: true}
+ {:ok, options} = Helpers.normalise_node_option(options)
+ assert options[:node] == :"rabbit@#{hostname()}.#{domain()}"
+ end
+
+ test "shortnames: 'rabbit' as node name, no domain is used" do
+ options = %{node: :rabbit, longnames: false}
+ {:ok, options} = Helpers.normalise_node_option(options)
+ assert options[:node] == :"rabbit@#{hostname()}"
+ end
+
+ ## ------------------- normalise_node tests (:shortnames) --------------------
+
+ test "shortnames: if nil input, retrieve standard rabbit hostname" do
+ assert Helpers.normalise_node(nil, :shortnames) == get_rabbit_hostname()
+ end
+
+ test "shortnames: if input is an atom short name, return the atom with hostname" do
+ want = String.to_atom("rabbit_test@#{hostname()}")
+ got = Helpers.normalise_node(:rabbit_test, :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: if input is a string fully qualified node name, return an atom" do
+ want = String.to_atom("rabbit_test@#{hostname()}")
+ got = Helpers.normalise_node("rabbit_test@#{hostname()}", :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: if input is a short node name, host name is added" do
+ want = String.to_atom("rabbit_test@#{hostname()}")
+ got = Helpers.normalise_node("rabbit_test", :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: if input is a hostname without a node name, default node name is added" do
+ default_name = Config.default(:node)
+ want = String.to_atom("#{default_name}@#{hostname()}")
+ got = Helpers.normalise_node("@#{hostname()}", :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: if input is a short node name with an @ and no hostname, local host name is added" do
+ want = String.to_atom("rabbit_test@#{hostname()}")
+ got = Helpers.normalise_node("rabbit_test@", :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: if input contains more than one @, return an atom" do
+ want = String.to_atom("rabbit@rabbit_test@#{hostname()}")
+ got = Helpers.normalise_node("rabbit@rabbit_test@#{hostname()}", :shortnames)
+ assert want == got
+ end
+
+ ## ------------------- normalise_node tests (:longnames) --------------------
+
+ test "longnames: if nil input, retrieve standard rabbit hostname" do
+ want = get_rabbit_hostname(:longnames)
+ got = Helpers.normalise_node(nil, :longnames)
+ assert want == got
+ end
+
+ test "longnames: if input is an atom short name, return the atom with full hostname" do
+ want = String.to_atom("rabbit_test@#{hostname()}.#{domain()}")
+ got = Helpers.normalise_node(:rabbit_test, :longnames)
+ assert want == got
+ end
+
+ ## ------------------- require_rabbit/1 tests --------------------
+
+ test "locate plugin with version number in filename" do
+ plugins_directory_03 = fixture_plugins_path("plugins-subdirectory-03")
+ rabbitmq_home = :rabbit_misc.rpc_call(node(), :code, :lib_dir, [:rabbit])
+ opts = %{plugins_dir: to_string(plugins_directory_03),
+ rabbitmq_home: rabbitmq_home}
+ assert Enum.member?(Application.loaded_applications(), {:mock_rabbitmq_plugins_03, 'New project', '0.1.0'}) == false
+ require_rabbit_and_plugins(opts)
+ Application.load(:mock_rabbitmq_plugins_03)
+ assert Enum.member?(Application.loaded_applications(), {:mock_rabbitmq_plugins_03, 'New project', '0.1.0'})
+ end
+
+ test "locate plugin without version number in filename" do
+ plugins_directory_04 = fixture_plugins_path("plugins-subdirectory-04")
+ rabbitmq_home = :rabbit_misc.rpc_call(node(), :code, :lib_dir, [:rabbit])
+ opts = %{plugins_dir: to_string(plugins_directory_04),
+ rabbitmq_home: rabbitmq_home}
+ assert Enum.member?(Application.loaded_applications(), {:mock_rabbitmq_plugins_04, 'New project', 'rolling'}) == false
+ require_rabbit_and_plugins(opts)
+ Application.load(:mock_rabbitmq_plugins_04)
+ assert Enum.member?(Application.loaded_applications(), {:mock_rabbitmq_plugins_04, 'New project', 'rolling'})
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/core/information_unit_test.exs b/deps/rabbitmq_cli/test/core/information_unit_test.exs
new file mode 100644
index 0000000000..568b687b2d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/information_unit_test.exs
@@ -0,0 +1,44 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule InformationUnitTest do
+ use ExUnit.Case, async: true
+
+ alias RabbitMQ.CLI.InformationUnit, as: IU
+
+ test "bytes, MB, GB, TB are known units" do
+ Enum.each(["bytes", "mb", "MB", "gb", "GB", "tb", "TB"],
+ fn x -> assert IU.known_unit?(x) end)
+ end
+
+ test "glip-glops, millibars, gold pressed latinum bars and looney and are not known units" do
+ Enum.each(["glip-glops", "millibars", "gold pressed latinum bars", "looney"],
+ fn x -> assert not IU.known_unit?(x) end)
+ end
+
+ test "conversion to bytes" do
+ assert IU.convert(0, "bytes") == 0
+ assert IU.convert(100, "bytes") == 100
+ assert IU.convert(9988, "bytes") == 9988
+ end
+
+ test "conversion to MB" do
+ assert IU.convert(1000000, "mb") == 1.0
+ assert IU.convert(9500000, "mb") == 9.5
+ assert IU.convert(97893000, "mb") == 97.893
+ assert IU.convert(978930000, "mb") == 978.93
+ end
+
+ test "conversion to GB" do
+ assert IU.convert(978930000, "gb") == 0.9789
+
+ assert IU.convert(1000000000, "gb") == 1.0
+ assert IU.convert(9500000000, "gb") == 9.5
+ assert IU.convert(97893000000, "gb") == 97.893
+ assert IU.convert(978930000000, "gb") == 978.93
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/json_stream_test.exs b/deps/rabbitmq_cli/test/core/json_stream_test.exs
new file mode 100644
index 0000000000..ab3bebd62c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/json_stream_test.exs
@@ -0,0 +1,24 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule JsonStreamTest do
+ use ExUnit.Case, async: false
+
+ @formatter RabbitMQ.CLI.Formatters.JsonStream
+
+ test "format_output map with atom keys is converted to JSON object" do
+ assert @formatter.format_output(%{a: :apple, b: :beer}, %{}) == "{\"a\":\"apple\",\"b\":\"beer\"}"
+ end
+
+ test "format_output map with binary keys is converted to JSON object" do
+ assert @formatter.format_output(%{"a" => :apple, "b" => :beer}, %{}) == "{\"a\":\"apple\",\"b\":\"beer\"}"
+ end
+
+ test "format_output empty binary is converted to empty JSON array" do
+ assert @formatter.format_output("", %{}) == ""
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/core/listeners_test.exs b/deps/rabbitmq_cli/test/core/listeners_test.exs
new file mode 100644
index 0000000000..266413c6fa
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/listeners_test.exs
@@ -0,0 +1,64 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CoreListenersTest do
+ use ExUnit.Case, async: true
+
+ import RabbitMQ.CLI.Core.Listeners
+ import RabbitCommon.Records
+
+ test "listener record translation to a map" do
+ assert listener_map(listener(node: :rabbit@mercurio,
+ protocol: :stomp,
+ ip_address: {0,0,0,0,0,0,0,0},
+ port: 61613)) ==
+ %{
+ interface: "[::]",
+ node: :rabbit@mercurio,
+ port: 61613,
+ protocol: :stomp,
+ purpose: "STOMP"
+ }
+ end
+
+ test "[human-readable] protocol labels" do
+ assert protocol_label(:amqp) == "AMQP 0-9-1 and AMQP 1.0"
+ assert protocol_label(:'amqp/ssl') == "AMQP 0-9-1 and AMQP 1.0 over TLS"
+ assert protocol_label(:mqtt) == "MQTT"
+ assert protocol_label(:'mqtt/ssl') == "MQTT over TLS"
+ assert protocol_label(:stomp) == "STOMP"
+ assert protocol_label(:'stomp/ssl') == "STOMP over TLS"
+ assert protocol_label(:http) == "HTTP API"
+ assert protocol_label(:https) == "HTTP API over TLS (HTTPS)"
+ assert protocol_label(:'https/web-stomp') == "STOMP over WebSockets and TLS (HTTPS)"
+ assert protocol_label(:'https/web-mqtt') == "MQTT over WebSockets and TLS (HTTPS)"
+
+ assert protocol_label(:'http/prometheus') == "Prometheus exporter API over HTTP"
+ assert protocol_label(:'https/prometheus') == "Prometheus exporter API over TLS (HTTPS)"
+ end
+
+ test "listener expiring within" do
+ validityInDays = 10
+ validity = X509.Certificate.Validity.days_from_now(validityInDays)
+ ca_key = X509.PrivateKey.new_ec(:secp256r1)
+ ca = X509.Certificate.self_signed(ca_key,
+ "/C=US/ST=CA/L=San Francisco/O=Megacorp/CN=Megacorp Intermediate CA",
+ template: :root_ca,
+ validity: validity
+ )
+ pem = X509.Certificate.to_pem(ca)
+
+ opts = [{:certfile, {:pem, pem}}, {:cacertfile, {:pem, pem}}]
+ listener = listener(node: :rabbit@mercurio,
+ protocol: :stomp,
+ ip_address: {0,0,0,0,0,0,0,0},
+ port: 61613,
+ opts: opts)
+
+ assert not listener_expiring_within(listener, 86400 * (validityInDays - 5))
+ assert listener_expiring_within(listener, 86400 * (validityInDays + 5))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/node_name_test.exs b/deps/rabbitmq_cli/test/core/node_name_test.exs
new file mode 100644
index 0000000000..89bc1484bc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/node_name_test.exs
@@ -0,0 +1,73 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule NodeNameTest do
+ use ExUnit.Case, async: true
+
+ @subject RabbitMQ.CLI.Core.NodeName
+
+ test "shortnames: RabbitMQ nodename is properly formed from atom" do
+ want = String.to_atom("rabbit@#{:inet_db.gethostname()}")
+ {:ok, got} = @subject.create(:rabbit, :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: RabbitMQ nodename is properly formed from string" do
+ want = String.to_atom("rabbit@#{:inet_db.gethostname()}")
+ {:ok, got} = @subject.create("rabbit", :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: RabbitMQ nodename is properly formed with trailing @" do
+ want = String.to_atom("rabbit@#{:inet_db.gethostname()}")
+ {:ok, got} = @subject.create(:rabbit@, :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: RabbitMQ nodename is properly formed with host part" do
+ want = :rabbit@foofoo
+ {:ok, got} = @subject.create(want, :shortnames)
+ assert want == got
+ end
+
+ test "shortnames: nodename head only supports alphanumerics, underscores and hyphens in name head" do
+ {:error, {:node_name, :invalid_node_name_head}} = @subject.create("кириллица", :shortnames)
+ end
+
+ test "longnames: RabbitMQ nodename is properly formed from atom" do
+ {:ok, got} = @subject.create(:rabbit, :longnames)
+ assert Atom.to_string(got) =~ ~r/rabbit@[\w\-]+\.\w+/
+ end
+
+ test "longnames: RabbitMQ nodename is properly formed from string" do
+ {:ok, got} = @subject.create("rabbit", :longnames)
+ assert Atom.to_string(got) =~ ~r/rabbit@[\w\-]+\.\w+/
+ end
+
+ test "longnames: RabbitMQ nodename is properly formed from atom with domain" do
+ want = :"rabbit@localhost.localdomain"
+ {:ok, got} = @subject.create(want, :longnames)
+ assert want == got
+ end
+
+ test "longnames: RabbitMQ nodename is properly formed from string with domain" do
+ name_str = "rabbit@localhost.localdomain"
+ want = String.to_atom(name_str)
+ {:ok, got} = @subject.create(name_str, :longnames)
+ assert want == got
+ end
+
+ test "longnames: RabbitMQ nodename is properly formed from string with partial domain" do
+ name_str = "rabbit@localhost"
+ want = String.to_atom(name_str <> "." <> @subject.domain())
+ {:ok, got} = @subject.create(name_str, :longnames)
+ assert want == got
+ end
+
+ test "longnames: nodename head only supports alphanumerics, underscores and hyphens in name head" do
+ {:error, {:node_name, :invalid_node_name_head}} = @subject.create("кириллица", :longnames)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/os_pid_test.exs b/deps/rabbitmq_cli/test/core/os_pid_test.exs
new file mode 100644
index 0000000000..2d110f591f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/os_pid_test.exs
@@ -0,0 +1,54 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule OsPidTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @subject RabbitMQ.CLI.Core.OsPid
+
+ #
+ # Tests
+ #
+
+ describe "#read_pid_from_file with should_wait = false" do
+ test "with a valid pid file returns an integer value" do
+ path = fixture_file_path("valid_pidfile.pid")
+
+ assert (File.exists?(path) and File.regular?(path))
+ assert @subject.read_pid_from_file(path, false) == 13566
+ end
+
+ test "with a valid pid file that includes spaces returns an integer value" do
+ path = fixture_file_path("valid_pidfile_with_spaces.pid")
+
+ assert (File.exists?(path) and File.regular?(path))
+ assert @subject.read_pid_from_file(path, false) == 83777
+ end
+
+ test "with an empty file" do
+ path = fixture_file_path("empty_pidfile.pid")
+
+ assert (File.exists?(path) and File.regular?(path))
+ assert match?({:error, :could_not_read_pid_from_file, _}, @subject.read_pid_from_file(path, false))
+ end
+
+ test "with a non-empty file full of garbage (that doesn't parse)" do
+ path = fixture_file_path("invalid_pidfile.pid")
+
+ assert (File.exists?(path) and File.regular?(path))
+ assert match?({:error, :could_not_read_pid_from_file, _}, @subject.read_pid_from_file(path, false))
+ end
+
+ test "with a file that does not exist" do
+ path = fixture_file_path("pidfile_that_does_not_exist_128787df8s7f8%4&^.pid")
+
+ assert !File.exists?(path)
+ assert match?({:error, :could_not_read_pid_from_file, _}, @subject.read_pid_from_file(path, false))
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/parser_test.exs b/deps/rabbitmq_cli/test/core/parser_test.exs
new file mode 100644
index 0000000000..b483db1fdd
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/parser_test.exs
@@ -0,0 +1,369 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+## Mock command for command specific parser
+defmodule RabbitMQ.CLI.Seagull.Commands.HerringGullCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["herring_gull"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+ def switches(), do: [herring: :string, garbage: :boolean]
+ def aliases(), do: [h: :herring, g: :garbage]
+end
+
+defmodule RabbitMQ.CLI.Seagull.Commands.PacificGullCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["pacific_gull"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+defmodule RabbitMQ.CLI.Seagull.Commands.HermannGullCommand do
+ @behaviour RabbitMQ.CLI.CommandBehaviour
+ use RabbitMQ.CLI.DefaultOutput
+ def usage(), do: ["hermann_gull"]
+ def validate(_,_), do: :ok
+ def merge_defaults(_,_), do: {[], %{}}
+ def banner(_,_), do: ""
+ def run(_,_), do: :ok
+end
+
+defmodule ParserTest do
+ use ExUnit.Case, async: true
+ import ExUnit.CaptureIO
+ import TestHelper
+
+ @subject RabbitMQ.CLI.Core.Parser
+
+ setup_all do
+ Code.ensure_loaded(RabbitMQ.CLI.Seagull.Commands.HerringGullCommand)
+ Code.ensure_loaded(RabbitMQ.CLI.Seagull.Commands.PacificGullCommand)
+ set_scope(:seagull)
+ on_exit(fn ->
+ set_scope(:none)
+ end)
+ :ok
+ end
+
+ test "one arity 0 command, no options" do
+ assert @subject.parse_global(["sandwich"]) == {["sandwich"], %{}, []}
+ end
+
+ test "one arity 1 command, no options" do
+ assert @subject.parse_global(["sandwich", "pastrami"]) == {["sandwich", "pastrami"], %{}, []}
+ end
+
+ test "no commands, no options (empty string)" do
+ assert @subject.parse_global([""]) == {[""], %{}, []}
+ end
+
+ test "no commands, no options (empty array)" do
+ assert @subject.parse_global([]) == {[],%{}, []}
+ end
+
+ test "one arity 1 command, one double-dash quiet flag" do
+ assert @subject.parse_global(["sandwich", "pastrami", "--quiet"]) ==
+ {["sandwich", "pastrami"], %{quiet: true}, []}
+ end
+
+ test "one arity 1 command, one single-dash quiet flag" do
+ assert @subject.parse_global(["sandwich", "pastrami", "-q"]) ==
+ {["sandwich", "pastrami"], %{quiet: true}, []}
+ end
+
+ test "one arity 1 command, one double-dash silent flag" do
+ assert @subject.parse_global(["sandwich", "pastrami", "--silent"]) ==
+ {["sandwich", "pastrami"], %{silent: true}, []}
+ end
+
+ test "one arity 1 command, one single-dash silent flag" do
+ assert @subject.parse_global(["sandwich", "pastrami", "-s"]) ==
+ {["sandwich", "pastrami"], %{silent: true}, []}
+ end
+
+ test "one arity 0 command, one single-dash node option" do
+ assert @subject.parse_global(["sandwich", "-n", "rabbitmq@localhost"]) ==
+ {["sandwich"], %{node: :rabbitmq@localhost}, []}
+ end
+
+ test "one arity 1 command, one single-dash node option" do
+ assert @subject.parse_global(["sandwich", "pastrami", "-n", "rabbitmq@localhost"]) ==
+ {["sandwich", "pastrami"], %{node: :rabbitmq@localhost}, []}
+ end
+
+ test "one arity 1 command, one single-dash node option and one quiet flag" do
+ assert @subject.parse_global(["sandwich", "pastrami", "-n", "rabbitmq@localhost", "--quiet"]) ==
+ {["sandwich", "pastrami"], %{node: :rabbitmq@localhost, quiet: true}, []}
+ end
+
+ test "single-dash node option before command" do
+ assert @subject.parse_global(["-n", "rabbitmq@localhost", "sandwich", "pastrami"]) ==
+ {["sandwich", "pastrami"], %{node: :rabbitmq@localhost}, []}
+ end
+
+ test "no commands, one double-dash node option" do
+ assert @subject.parse_global(["--node=rabbitmq@localhost"]) == {[], %{node: :rabbitmq@localhost}, []}
+ end
+
+ test "no commands, one single-dash -p option" do
+ assert @subject.parse_global(["-p", "sandwich"]) == {[], %{vhost: "sandwich"}, []}
+ end
+
+ test "global parse treats command-specific arguments as invalid (ignores them)" do
+ command_line = ["seagull", "--herring", "atlantic", "-g", "-p", "my_vhost"]
+ {args, options, invalid} = @subject.parse_global(command_line)
+ assert {args, options, invalid} ==
+ {["seagull", "atlantic"], %{vhost: "my_vhost"}, [{"--herring", nil}, {"-g", nil}]}
+ end
+
+ test "global parse treats command-specific arguments that are separated by an equals sign as invalid (ignores them)" do
+ command_line = ["seagull", "--herring=atlantic", "-g", "-p", "my_vhost"]
+ {args, options, invalid} = @subject.parse_global(command_line)
+ assert {args, options, invalid} ==
+ {["seagull"], %{vhost: "my_vhost"}, [{"--herring", nil}, {"-g", nil}]}
+ end
+
+ test "command-specific parse recognizes command switches" do
+ command_line = ["seagull", "--herring", "atlantic", "-g", "-p", "my_vhost"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse_command_specific(command_line, command) ==
+ {["seagull"], %{vhost: "my_vhost", herring: "atlantic", garbage: true}, []}
+ end
+
+ test "command-specific parse recognizes command switches that are separated by an equals sign" do
+ command_line = ["seagull", "--herring=atlantic", "-g", "-p", "my_vhost"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse_command_specific(command_line, command) ==
+ {["seagull"], %{vhost: "my_vhost", herring: "atlantic", garbage: true}, []}
+ end
+
+ test "command-specific switches and aliases are optional" do
+ command_line = ["seagull", "-p", "my_vhost"]
+ command = RabbitMQ.CLI.Seagull.Commands.PacificGullCommand
+ assert @subject.parse_command_specific(command_line, command) ==
+ {["seagull"], %{vhost: "my_vhost"}, []}
+ end
+
+ test "--timeout can be specified before command" do
+ # for backwards compatibility
+ assert @subject.parse_global(["-n", "rabbitmq@localhost", "--timeout", "5", "sandwich", "pastrami"]) ==
+ {["sandwich", "pastrami"], %{node: :rabbitmq@localhost, timeout: 5}, []}
+ end
+
+ test "-t can be specified before command" do
+ # for backwards compatibility
+ assert @subject.parse_global(["-n", "rabbitmq@localhost", "-t", "5", "sandwich", "pastrami"]) ==
+ {["sandwich", "pastrami"], %{node: :rabbitmq@localhost, timeout: 5}, []}
+ end
+
+ test "parse/1 returns command name" do
+ command_line = ["pacific_gull", "fly", "-p", "my_vhost"]
+ command = RabbitMQ.CLI.Seagull.Commands.PacificGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "pacific_gull", ["fly"], %{vhost: "my_vhost"}, []}
+ end
+
+ test "parse/1 returns command name when a global flag comes before the command" do
+ command_line = ["-p", "my_vhost", "pacific_gull", "fly"]
+ command = RabbitMQ.CLI.Seagull.Commands.PacificGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "pacific_gull", ["fly"], %{vhost: "my_vhost"}, []}
+ end
+
+ test "parse/1 returns command name when a global flag separated by an equals sign comes before the command" do
+ command_line = ["-p=my_vhost", "pacific_gull", "fly"]
+ command = RabbitMQ.CLI.Seagull.Commands.PacificGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "pacific_gull", ["fly"], %{vhost: "my_vhost"}, []}
+ end
+
+ test "parse/1 returns :no_command when given an empty argument list" do
+ command_line = ["-p", "my_vhost"]
+ assert @subject.parse(command_line) ==
+ {:no_command, "", [], %{vhost: "my_vhost"}, []}
+ end
+
+ test "parse/1 returns :no_command and command name when command isn't known" do
+ command_line = ["atlantic_gull", "-p", "my_vhost"]
+ assert @subject.parse(command_line) ==
+ {:no_command, "atlantic_gull", [], %{vhost: "my_vhost"}, []}
+ end
+
+ test "parse/1 returns :no command if command-specific options come before the command" do
+ command_line = ["--herring", "atlantic", "herring_gull", "-p", "my_vhost"]
+ assert @subject.parse(command_line) ==
+ {:no_command, "atlantic", ["herring_gull"],
+ %{vhost: "my_vhost"}, [{"--herring", nil}]}
+ end
+
+ test "parse/1 returns command name if a global option comes before the command" do
+ command_line = ["-p", "my_vhost", "herring_gull"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "herring_gull", [], %{vhost: "my_vhost"}, []}
+ end
+
+ test "parse/1 returns command name if multiple global options come before the command" do
+ command_line = ["-p", "my_vhost", "-q", "-n", "rabbit@test", "herring_gull"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "herring_gull", [], %{vhost: "my_vhost", node: :rabbit@test, quiet: true}, []}
+ end
+
+ test "parse/1 returns command name if multiple global options separated by an equals sign come before the command" do
+ command_line = ["-p=my_vhost", "-q", "--node=rabbit@test", "herring_gull"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "herring_gull", [], %{vhost: "my_vhost", node: :rabbit@test, quiet: true}, []}
+ end
+
+ test "parse/1 returns command with command specific options" do
+ command_line = ["herring_gull", "--herring", "atlantic",
+ "-g", "fly", "-p", "my_vhost"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "herring_gull", ["fly"],
+ %{vhost: "my_vhost", herring: "atlantic", garbage: true}, []}
+ end
+
+ test "parse/1 returns command with command specific options that are separated by an equals sign" do
+ command_line = ["herring_gull", "--herring=atlantic",
+ "-g", "fly", "-p=my_vhost"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "herring_gull", ["fly"],
+ %{vhost: "my_vhost", herring: "atlantic", garbage: true}, []}
+ end
+
+ test "parse/1 expands command-defined aliases" do
+ command_line = ["herring_gull", "fly", "-g"]
+ command = RabbitMQ.CLI.Seagull.Commands.HerringGullCommand
+ assert @subject.parse(command_line) ==
+ {command, "herring_gull", ["fly"], %{garbage: true}, []}
+ end
+
+ test "parse/1 returns invalid/extra options for command" do
+ command_line = ["pacific_gull", "fly",
+ "--herring", "atlantic",
+ "-p", "my_vhost"]
+ pacific_gull = RabbitMQ.CLI.Seagull.Commands.PacificGullCommand
+ assert @subject.parse(command_line) ==
+ {pacific_gull, "pacific_gull", ["fly", "atlantic"],
+ %{vhost: "my_vhost"},
+ [{"--herring", nil}]}
+ end
+
+ test "parse/1 suggests similar command" do
+ # One letter difference
+ assert @subject.parse(["pacific_gulf"]) ==
+ {{:suggest, "pacific_gull"}, "pacific_gulf", [], %{}, []}
+
+ # One letter missing
+ assert @subject.parse(["pacific_gul"]) ==
+ {{:suggest, "pacific_gull"}, "pacific_gul", [], %{}, []}
+
+ # One letter extra
+ assert @subject.parse(["pacific_gulll"]) ==
+ {{:suggest, "pacific_gull"}, "pacific_gulll", [], %{}, []}
+
+ # Five letter difference
+ assert @subject.parse(["pacifistcatl"]) ==
+ {{:suggest, "pacific_gull"}, "pacifistcatl", [], %{}, []}
+
+ # Five letters missing
+ assert @subject.parse(["pacific"]) ==
+ {{:suggest, "pacific_gull"}, "pacific", [], %{}, []}
+
+ # Closest to similar
+ assert @subject.parse(["herrdog_gull"]) ==
+ {{:suggest, "herring_gull"}, "herrdog_gull", [], %{}, []}
+
+ # Closest to similar
+ assert @subject.parse(["hermaug_gull"]) ==
+ {{:suggest, "hermann_gull"}, "hermaug_gull", [], %{}, []}
+ end
+
+ @tag cd: "fixtures"
+ test "parse/1 supports aliases" do
+ aliases = """
+ larus_pacificus = pacific_gull
+ gull_with_herring = herring_gull --herring atlantic
+ flying_gull = herring_gull fly
+ garbage_gull = herring_gull -g
+ complex_gull = herring_gull --herring pacific -g --formatter=erlang eat
+ invalid_gull = herring_gull --invalid
+ unknown_gull = mysterious_gull
+ """
+
+ aliases_file_name = "aliases.ini"
+ File.write(aliases_file_name, aliases)
+ on_exit(fn() ->
+ File.rm(aliases_file_name)
+ end)
+
+ assert @subject.parse(["larus_pacificus", "--aliases-file", aliases_file_name]) ==
+ {RabbitMQ.CLI.Seagull.Commands.PacificGullCommand,
+ "larus_pacificus",
+ [],
+ %{aliases_file: aliases_file_name},
+ []}
+
+ assert @subject.parse(["gull_with_herring", "--aliases-file", aliases_file_name]) ==
+ {RabbitMQ.CLI.Seagull.Commands.HerringGullCommand,
+ "gull_with_herring",
+ [],
+ %{aliases_file: aliases_file_name, herring: "atlantic"},
+ []}
+
+ assert @subject.parse(["flying_gull", "--aliases-file", aliases_file_name]) ==
+ {RabbitMQ.CLI.Seagull.Commands.HerringGullCommand,
+ "flying_gull",
+ ["fly"],
+ %{aliases_file: aliases_file_name},
+ []}
+
+ assert @subject.parse(["garbage_gull", "--aliases-file", aliases_file_name]) ==
+ {RabbitMQ.CLI.Seagull.Commands.HerringGullCommand,
+ "garbage_gull",
+ [],
+ %{aliases_file: aliases_file_name, garbage: true},
+ []}
+
+ assert @subject.parse(["complex_gull", "--aliases-file", aliases_file_name]) ==
+ {RabbitMQ.CLI.Seagull.Commands.HerringGullCommand,
+ "complex_gull",
+ ["eat"],
+ %{aliases_file: aliases_file_name, garbage: true, herring: "pacific", formatter: "erlang"},
+ []}
+
+ assert @subject.parse(["invalid_gull", "--aliases-file", aliases_file_name]) ==
+ {RabbitMQ.CLI.Seagull.Commands.HerringGullCommand,
+ "invalid_gull",
+ [],
+ %{aliases_file: aliases_file_name},
+ [{"--invalid", nil}]}
+
+ assert @subject.parse(["unknown_gull", "--aliases-file", aliases_file_name]) ==
+ {:no_command, "unknown_gull", [], %{aliases_file: aliases_file_name}, []}
+
+ File.rm(aliases_file_name)
+
+
+ assert capture_io(:stderr,
+ fn ->
+ assert @subject.parse(["larus_pacificus", "--aliases-file", aliases_file_name]) ==
+ {:no_command, "larus_pacificus", [], %{aliases_file: aliases_file_name}, []}
+ end) =~ "Error reading aliases file"
+
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/core/rpc_stream_test.exs b/deps/rabbitmq_cli/test/core/rpc_stream_test.exs
new file mode 100644
index 0000000000..cadd303f23
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/rpc_stream_test.exs
@@ -0,0 +1,94 @@
+defmodule RpcStreamTest do
+ use ExUnit.Case, async: false
+
+ require RabbitMQ.CLI.Ctl.RpcStream
+ alias RabbitMQ.CLI.Ctl.RpcStream
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+
+ :ok
+
+ end
+
+ test "emit empty list" do
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list, [[]], :infinity, []])
+
+ assert [] == items
+ end
+
+ test "emit list without filters" do
+ list = [:one, :two, :three]
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list, [list], :infinity, []])
+
+ assert list == items
+ end
+
+
+ test "emit list with filters" do
+ list = [[one: 1, two: 2, three: 3], [one: 11, two: 12, three: 13]]
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list, [list], :infinity, [:one, :two]])
+
+ assert [[one: 1, two: 2], [one: 11, two: 12]] == items
+ end
+
+ test "emit list of lists with filters" do
+ list = [[[one: 1, two: 2, three: 3], [one: 11, two: 12, three: 13]],
+ [[one: 21, two: 22, three: 23], [one: 31, two: 32, three: 33]]]
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list, [list], :infinity, [:one, :two]])
+
+ assert [[[one: 1, two: 2], [one: 11, two: 12]], [[one: 21, two: 22], [one: 31, two: 32]]] == items
+ end
+
+ test "emission timeout 0 return badrpc" do
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list, [[]], 0, []])
+
+ assert [{:badrpc, {:timeout, 0.0}}] == items
+ end
+
+ test "emission timeout return badrpc with timeout value in seconds" do
+ timeout_fun = fn(x) -> :timer.sleep(1000); x end
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list_map, [[1,2,3], timeout_fun], 100, []])
+ assert [{:badrpc, {:timeout, 0.1}}] == items
+ end
+
+ test "emission timeout in progress return badrpc with timeout value in seconds as last element" do
+ timeout_fun = fn(x) -> :timer.sleep(100); x end
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list_map, [[1,2,3], timeout_fun], 150, []])
+ assert [1, {:badrpc, {:timeout, 0.15}}] == items
+ end
+
+ test "parallel emission do not mix values" do
+ {:ok, agent} = Agent.start_link(fn() -> :init end)
+ list1 = [:one, :two, :three]
+ list2 = [:dog, :cat, :pig]
+ # Adding timeout to make sure emissions are executed in parallel
+ timeout_fun = fn(x) -> :timer.sleep(10); x end
+ Agent.update(agent,
+ fn(:init) ->
+ receive_list_items_to_list([Kernel.node, TestHelper, :emit_list_map, [list2, timeout_fun], :infinity, []])
+ end)
+ items1 = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list_map, [list1, timeout_fun], :infinity, []])
+ items2 = Agent.get(agent, fn(x) -> x end)
+
+ assert items1 == list1
+ assert items2 == list2
+ end
+
+ test "can receive from multiple emission sources in parallel" do
+ list1 = [:one, :two, :three]
+ list2 = [:dog, :cat, :pig]
+ items = receive_list_items_to_list([Kernel.node, TestHelper, :emit_list_multiple_sources, [list1, list2], :infinity, []], 2)
+ assert Kernel.length(list1 ++ list2) == Kernel.length(items)
+ assert MapSet.new(list1 ++ list2) == MapSet.new(items)
+ end
+
+ def receive_list_items_to_list(args, chunks \\ 1) do
+ res = Kernel.apply(RpcStream, :receive_list_items, args ++ [chunks])
+ case Enumerable.impl_for(res) do
+ nil -> res;
+ _ -> Enum.to_list(res)
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/core/table_formatter_test.exs b/deps/rabbitmq_cli/test/core/table_formatter_test.exs
new file mode 100644
index 0000000000..60bf2060f1
--- /dev/null
+++ b/deps/rabbitmq_cli/test/core/table_formatter_test.exs
@@ -0,0 +1,46 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule TableFormatterTest do
+ use ExUnit.Case, async: false
+
+ @formatter RabbitMQ.CLI.Formatters.Table
+
+ test "format_output tab-separates map values" do
+ assert @formatter.format_output(%{a: :apple, b: :beer}, %{}) == ["a\tb", "apple\tbeer"]
+ assert @formatter.format_output(%{a: :apple, b: :beer, c: 1}, %{}) == ["a\tb\tc", "apple\tbeer\t1"]
+ assert @formatter.format_output(%{a: "apple", b: 'beer', c: 1}, %{}) == ["a\tb\tc", "apple\t\"beer\"\t1"]
+ end
+
+ test "format_output tab-separates keyword values" do
+ assert @formatter.format_output([a: :apple, b: :beer], %{}) == ["a\tb", "apple\tbeer"]
+ assert @formatter.format_output([a: :apple, b: :beer, c: 1], %{}) == ["a\tb\tc", "apple\tbeer\t1"]
+ assert @formatter.format_output([a: "apple", b: 'beer', c: 1], %{}) == ["a\tb\tc", "apple\t\"beer\"\t1"]
+ end
+
+ test "format_stream tab-separates map values" do
+ assert @formatter.format_stream([%{a: :apple, b: :beer, c: 1},
+ %{a: "aadvark", b: 'bee', c: 2}], %{})
+ |> Enum.to_list ==
+ ["a\tb\tc", "apple\tbeer\t1", "aadvark\t\"bee\"\t2"]
+ end
+
+ test "format_stream tab-separates keyword values" do
+ assert @formatter.format_stream([[a: :apple, b: :beer, c: 1],
+ [a: "aadvark", b: 'bee', c: 2]], %{})
+ |> Enum.to_list ==
+ ["a\tb\tc", "apple\tbeer\t1", "aadvark\t\"bee\"\t2"]
+ end
+
+ test "format_output formats non-string values with inspect recursively" do
+ assert @formatter.format_output(%{a: :apple, b: "beer", c: {:carp, "fish"}, d: [door: :way], e: %{elk: "horn", for: :you}}, %{}) ==
+ ["a\tb\tc\td\te", "apple\tbeer\t{carp, fish}\t[{door, way}]\t\#{elk => horn, for => you}"]
+
+ assert @formatter.format_output(%{a: :apple, b: "beer", c: {:carp, {:small, :fish}}, d: [door: {:way, "big"}], e: %{elk: [horn: :big]}}, %{}) ==
+ ["a\tb\tc\td\te", "apple\tbeer\t{carp, {small, fish}}\t[{door, {way, big}}]\t\#{elk => [{horn, big}]}"]
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs b/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs
new file mode 100644
index 0000000000..ec21691da9
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/add_user_command_test.exs
@@ -0,0 +1,86 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AddUserCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.AddUserCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn -> delete_user(context[:user]) end)
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: no positional arguments fails" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: too many positional arguments fails" do
+ assert @command.validate(["user", "password", "extra"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: two arguments passes" do
+ assert @command.validate(["user", "password"], %{}) == :ok
+ end
+
+ test "validate: one argument passes" do
+ assert @command.validate(["user"], %{}) == :ok
+ end
+
+ @tag user: "", password: "password"
+ test "validate: an empty username fails", context do
+ assert match?({:validation_failure, {:bad_argument, _}}, @command.validate([context[:user], context[:password]], context[:opts]))
+ end
+
+ # Blank passwords are currently allowed, they make sense
+ # e.g. when a user only authenticates using X.509 certificates.
+ # Credential validators can be used to require passwords of a certain length
+ # or following a certain pattern. This is a core server responsibility. MK.
+ @tag user: "some_rando", password: ""
+ test "validate: an empty password is allowed", context do
+ assert @command.validate([context[:user], context[:password]], context[:opts]) == :ok
+ end
+
+ @tag user: "someone", password: "password"
+ test "run: request to a non-existent node returns a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([context[:user], context[:password]], opts))
+ end
+
+ @tag user: "someone", password: "password"
+ test "run: default case completes successfully", context do
+ assert @command.run([context[:user], context[:password]], context[:opts]) == :ok
+ assert list_users() |> Enum.count(fn(record) -> record[:user] == context[:user] end) == 1
+ end
+
+ @tag user: "someone", password: "password"
+ test "run: adding an existing user returns an error", context do
+ add_user(context[:user], context[:password])
+ assert @command.run([context[:user], context[:password]], context[:opts]) == {:error, {:user_already_exists, context[:user]}}
+ assert list_users() |> Enum.count(fn(record) -> record[:user] == context[:user] end) == 1
+ end
+
+ @tag user: "someone", password: "password"
+ test "banner", context do
+ assert @command.banner([context[:user], context[:password]], context[:opts])
+ =~ ~r/Adding user \"#{context[:user]}\" \.\.\./
+ end
+
+ @tag user: "someone"
+ test "output: formats a user_already_exists error", context do
+ {:error, 70, "User \"someone\" already exists"} =
+ @command.output({:error, {:user_already_exists, context[:user]}}, %{})
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs b/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs
new file mode 100644
index 0000000000..f9f6362c19
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/add_vhost_command_test.exs
@@ -0,0 +1,68 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AddVhostCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.AddVhostCommand
+ @vhost "test"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ setup context do
+ on_exit(context, fn -> delete_vhost(context[:vhost]) end)
+ :ok
+ end
+
+ test "validate: no arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: too many arguments fails validation" do
+ assert @command.validate(["test", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: one argument passes validation" do
+ assert @command.validate(["new-vhost"], %{}) == :ok
+ assert @command.validate(["new-vhost"], %{description: "Used by team A"}) == :ok
+ assert @command.validate(["new-vhost"], %{description: "Used by team A for QA purposes", tags: "qa,team-a"}) == :ok
+ end
+
+ @tag vhost: @vhost
+ test "run: passing a valid vhost name to a running RabbitMQ node succeeds", context do
+ assert @command.run([context[:vhost]], context[:opts]) == :ok
+ assert list_vhosts() |> Enum.count(fn(record) -> record[:name] == context[:vhost] end) == 1
+ end
+
+ @tag vhost: ""
+ test "run: passing an empty string for vhost name with a running RabbitMQ node still succeeds", context do
+ assert @command.run([context[:vhost]], context[:opts]) == :ok
+ assert list_vhosts() |> Enum.count(fn(record) -> record[:name] == context[:vhost] end) == 1
+ end
+
+ test "run: attempt to use an unreachable node returns a nodedown" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["na"], opts))
+ end
+
+ test "run: adding the same host twice is idempotent", context do
+ add_vhost context[:vhost]
+
+ assert @command.run([context[:vhost]], context[:opts]) == :ok
+ assert list_vhosts() |> Enum.count(fn(record) -> record[:name] == context[:vhost] end) == 1
+ end
+
+ @tag vhost: @vhost
+ test "banner", context do
+ assert @command.banner([context[:vhost]], context[:opts])
+ =~ ~r/Adding vhost \"#{context[:vhost]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs b/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs
new file mode 100644
index 0000000000..506dfad367
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/authenticate_user_command_test.exs
@@ -0,0 +1,81 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AuthenticateUserCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands. AuthenticateUserCommand
+ @user "user1"
+ @password "password"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_user(@user, @password)
+ on_exit(context, fn -> delete_user(@user) end)
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: no positional arguments fails" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: too many positional arguments fails" do
+ assert @command.validate(["user", "password", "extra"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: one argument passes" do
+ assert @command.validate(["user"], %{}) == :ok
+ end
+
+ test "validate: two arguments passes" do
+ assert @command.validate(["user", "password"], %{}) == :ok
+ end
+
+ @tag user: @user, password: @password
+ test "run: a valid username and password returns okay", context do
+ assert {:ok, _} = @command.run([context[:user], context[:password]], context[:opts])
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["user", "password"], opts))
+ end
+
+ @tag user: @user, password: "treachery"
+ test "run: a valid username and invalid password returns refused", context do
+ assert {:refused, _, _, _} = @command.run([context[:user], context[:password]], context[:opts])
+ end
+
+ @tag user: "interloper", password: @password
+ test "run: an invalid username returns refused", context do
+ assert {:refused, _, _, _} = @command.run([context[:user], context[:password]], context[:opts])
+ end
+
+ @tag user: @user, password: @password
+ test "banner", context do
+ assert @command.banner([context[:user], context[:password]], context[:opts])
+ =~ ~r/Authenticating user/
+ assert @command.banner([context[:user], context[:password]], context[:opts])
+ =~ ~r/"#{context[:user]}"/
+ end
+
+ test "output: refused error", context do
+ user = "example_user"
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_dataerr
+ assert match?({:error, ^exit_code,
+ "Error: failed to authenticate user \"example_user\"\n" <>
+ "Unable to foo"},
+ @command.output({:refused, user, "Unable to ~s", ["foo"]}, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs b/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs
new file mode 100644
index 0000000000..52b3c8d026
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/autocomplete_command_test.exs
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AutocompleteCommandTest do
+ use ExUnit.Case, async: true
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.AutocompleteCommand
+ setup do
+ {:ok, opts: %{
+ script_name: "rabbitmqctl",
+ node: get_rabbit_hostname()
+ }}
+ end
+
+ test "shows up in help" do
+ s = @command.usage()
+ assert s =~ ~r/autocomplete/
+ end
+
+ test "enforces --silent" do
+ assert @command.merge_defaults(["list_"], %{}) == {["list_"], %{silent: true}}
+ end
+
+ test "validate: providing no arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing two or more arguments fails validation" do
+ assert @command.validate(["list_", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: providing a single argument passes validation" do
+ assert @command.validate(["list_c"], %{}) == :ok
+ end
+
+ test "run: lists completion options", context do
+ {:stream, completion_options} = @command.run(["list_c"], context[:opts])
+
+ assert Enum.member?(completion_options, "list_channels")
+ assert Enum.member?(completion_options, "list_connections")
+ assert Enum.member?(completion_options, "list_consumers")
+ end
+
+ test "banner shows that the name is being set", context do
+ assert @command.banner(["list_"], context[:opts]) == nil
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs b/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs
new file mode 100644
index 0000000000..bf9eeb574d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/await_online_nodes_command_test.exs
@@ -0,0 +1,44 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AwaitOnlineNodesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.AwaitOnlineNodesCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 300_000}}
+ end
+
+ setup context do
+ on_exit(context, fn -> delete_vhost(context[:vhost]) end)
+ :ok
+ end
+
+ test "validate: wrong number of arguments results in arg count errors" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["1", "1"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: a call with node count of 1 with a running RabbitMQ node succeeds", context do
+ assert @command.run(["1"], context[:opts]) == :ok
+ end
+
+ test "run: a call to an unreachable RabbitMQ node returns a nodedown" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["1"], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner(["1"], context[:opts])
+ =~ ~r/Will wait for at least 1 nodes to join the cluster of #{context[:opts][:node]}. Timeout: 300 seconds./
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs b/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs
new file mode 100644
index 0000000000..554ec5ee77
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/await_startup_command_test.exs
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule AwaitStartupCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.AwaitStartupCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 300_000}}
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "merge_defaults: default timeout is 5 minutes" do
+ assert @command.merge_defaults([], %{}) == {[], %{timeout: 300_000}}
+ end
+
+ test "validate: accepts no arguments", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "run: request to a fully booted node succeeds", context do
+ # this timeout value is in seconds
+ assert @command.run([], Map.merge(context[:opts], %{timeout: 5})) == :ok
+ end
+
+ test "empty banner", context do
+ nil = @command.banner([], context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs b/deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs
new file mode 100644
index 0000000000..8503e6ab5f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/cancel_sync_command_test.exs
@@ -0,0 +1,64 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CancelSyncQueueCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.CancelSyncQueueCommand
+
+ @vhost "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ vhost: @vhost
+ }}
+ end
+
+ test "validate: specifying no queue name is reported as an error", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: specifying two queue names is reported as an error", context do
+ assert @command.validate(["q1", "q2"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: specifying three queue names is reported as an error", context do
+ assert @command.validate(["q1", "q2", "q3"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: specifying one queue name succeeds", context do
+ assert @command.validate(["q1"], context[:opts]) == :ok
+ end
+
+ test "run: request to a non-existent RabbitMQ node returns a nodedown" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["q1"], opts))
+ end
+
+ test "banner", context do
+ s = @command.banner(["q1"], context[:opts])
+
+ assert s =~ ~r/Stopping synchronising queue/
+ assert s =~ ~r/q1/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs b/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs
new file mode 100644
index 0000000000..8fcb7de3ae
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/change_cluster_node_type_command_test.exs
@@ -0,0 +1,84 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ChangeClusterNodeTypeCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ChangeClusterNodeTypeCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname()
+ }}
+ end
+
+ test "validate: node type of disc, disk, and ram pass validation", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["foo"], context[:opts]))
+
+ assert :ok == @command.validate(["ram"], context[:opts])
+ assert :ok == @command.validate(["disc"], context[:opts])
+ assert :ok == @command.validate(["disk"], context[:opts])
+ end
+
+ test "validate: providing no arguments fails validation", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+ test "validate: providing too many arguments fails validation", context do
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ # TODO
+ #test "run: change ram node to disc node", context do
+ #end
+
+ # TODO
+ #test "run: change disk node to ram node", context do
+ #end
+
+ test "run: request to a node with running RabbitMQ app fails", context do
+ assert match?(
+ {:error, :mnesia_unexpectedly_running},
+ @command.run(["ram"], context[:opts]))
+ end
+
+ test "run: request to an unreachable node returns a badrpc", _context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?(
+ {:badrpc, :nodedown},
+ @command.run(["ram"], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner(["ram"], context[:opts]) =~
+ ~r/Turning #{get_rabbit_hostname()} into a ram node/
+ end
+
+ test "output mnesia is running error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code,
+ "Mnesia is still running on node " <> _},
+ @command.output({:error, :mnesia_unexpectedly_running}, context[:opts]))
+
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs b/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs
new file mode 100644
index 0000000000..3a415085dd
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/change_password_command_test.exs
@@ -0,0 +1,80 @@
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ChangePasswordCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands. ChangePasswordCommand
+ @user "user1"
+ @password "password"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_user(@user, @password)
+ on_exit(context, fn -> delete_user(@user) end)
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: no positional arguments fails" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: too many positional arguments fails" do
+ assert @command.validate(["user", "password", "extra"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: two arguments passes" do
+ assert @command.validate(["user", "password"], %{}) == :ok
+ end
+
+ test "validate: one argument passes" do
+ assert @command.validate(["user"], %{}) == :ok
+ end
+
+ @tag user: @user, password: "new_password"
+ test "run: a valid username and new password return ok", context do
+ assert @command.run([context[:user], context[:password]], context[:opts]) == :ok
+ assert {:ok, _} = authenticate_user(context[:user], context[:password])
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["user", "password"], opts))
+ end
+
+ @tag user: @user, password: @password
+ test "run: changing password to the same thing is ok", context do
+ assert @command.run([context[:user], context[:password]], context[:opts]) == :ok
+ assert {:ok, _} = authenticate_user(context[:user], context[:password])
+ end
+
+ @tag user: "interloper", password: "new_password"
+ test "run: an invalid user returns an error", context do
+ assert @command.run([context[:user], context[:password]], context[:opts]) == {:error, {:no_such_user, "interloper"}}
+ end
+
+ @tag user: @user, password: @password
+ test "banner", context do
+ assert @command.banner([context[:user], context[:password]], context[:opts])
+ =~ ~r/Changing password for user/
+ assert @command.banner([context[:user], context[:password]], context[:opts])
+ =~ ~r/"#{context[:user]}"/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs
new file mode 100644
index 0000000000..adadc3c223
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_global_parameter_command_test.exs
@@ -0,0 +1,86 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearGlobalParameterCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClearGlobalParameterCommand
+ @key :mqtt_default_vhosts
+ @value "{\"O=client,CN=dummy\":\"somevhost\"}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_global_parameter context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ }
+ }
+ end
+
+ test "validate: expects a single argument" do
+ assert @command.validate(["one"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this is", "too many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag key: @key
+ test "run: when global parameter does not exist, returns an error", context do
+ assert @command.run(
+ [context[:key]],
+ context[:opts]
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([@key], opts))
+ end
+
+ @tag key: @key
+ test "run: clears the parameter", context do
+ set_global_parameter(context[:key], @value)
+
+ assert @command.run(
+ [context[:key]],
+ context[:opts]
+ ) == :ok
+
+ assert_parameter_empty(context)
+ end
+
+ @tag key: @key, value: @value
+ test "banner", context do
+ set_global_parameter(context[:key], @value)
+
+ s = @command.banner(
+ [context[:key]],
+ context[:opts]
+ )
+
+ assert s =~ ~r/Clearing global runtime parameter/
+ assert s =~ ~r/"#{context[:key]}"/
+ end
+
+ defp assert_parameter_empty(context) do
+ parameter = list_global_parameters()
+ |> Enum.filter(fn(param) ->
+ param[:key] == context[:key]
+ end)
+ assert parameter === []
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs
new file mode 100644
index 0000000000..834caf89f7
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_operator_policy_command_test.exs
@@ -0,0 +1,127 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearOperatorPolicyCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClearOperatorPolicyCommand
+ @vhost "test1"
+ @key "message-expiry"
+ @pattern "^queue\."
+ @value "{\"message-ttl\":10}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_operator_policy(context[:vhost], context[:key])
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ }
+ }
+ end
+
+ test "merge_defaults: adds default vhost if missing" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ end
+
+ test "merge_defaults: does not change provided vhost" do
+ assert @command.merge_defaults([], %{vhost: "test_vhost"}) == {[], %{vhost: "test_vhost"}}
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: providing one argument and no options passes validation" do
+ assert @command.validate(["a-policy"], %{}) == :ok
+ end
+
+ @tag pattern: @pattern, key: @key, vhost: @vhost
+ test "run: if policy does not exist, returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+ assert match?({:badrpc, _}, @command.run([@key], opts))
+ end
+
+
+ @tag pattern: @pattern, key: @key, vhost: @vhost
+ test "run: if policy exists, returns ok and removes it", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ set_operator_policy(context[:vhost], context[:key], context[:pattern], @value)
+
+ assert @command.run(
+ [context[:key]],
+ vhost_opts
+ ) == :ok
+
+ assert_operator_policy_does_not_exist(context)
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: a non-existent vhost returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ set_operator_policy(context[:vhost], context[:key], context[:pattern], @value)
+
+ s = @command.banner(
+ [context[:key]],
+ vhost_opts
+ )
+
+ assert s =~ ~r/Clearing operator policy/
+ assert s =~ ~r/"#{context[:key]}"/
+ end
+
+ defp assert_operator_policy_does_not_exist(context) do
+ policy = context[:vhost]
+ |> list_operator_policies
+ |> Enum.filter(fn(param) ->
+ param[:pattern] == context[:pattern] and
+ param[:key] == context[:key]
+ end)
+ assert policy === []
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs
new file mode 100644
index 0000000000..4f08234cb6
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_parameter_command_test.exs
@@ -0,0 +1,138 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearParameterCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClearParameterCommand
+ @vhost "test1"
+ @root "/"
+ @component_name "federation-upstream"
+ @key "reconnect-delay"
+ @value "{\"uri\":\"amqp://127.0.0.1:5672\"}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ enable_federation_plugin()
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_parameter context[:vhost], context[:component_name], context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ }
+ }
+ end
+
+ test "merge_defaults: adds default vhost if missing" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: argument validation" do
+ assert @command.validate(["one", "two"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this", "is", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag component_name: @component_name, key: @key, vhost: @vhost
+ test "run: returns error, if parameter does not exist", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:component_name], context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+ assert match?({:badrpc, _}, @command.run([@component_name, @key], opts))
+ end
+
+
+ @tag component_name: @component_name, key: @key, vhost: @vhost
+ test "run: returns ok and clears parameter, if it exists", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ set_parameter(context[:vhost], context[:component_name], context[:key], @value)
+
+ assert @command.run(
+ [context[:component_name], context[:key]],
+ vhost_opts
+ ) == :ok
+
+ assert_parameter_empty(context)
+ end
+
+ @tag component_name: "bad-component-name", key: @key, value: @value, vhost: @root
+ test "run: an invalid component_name returns a 'parameter does not exist' error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ assert @command.run(
+ [context[:component_name], context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+
+ assert list_parameters(context[:vhost]) == []
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: an invalid vhost returns a 'parameter does not exist' error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:component_name], context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ set_parameter(context[:vhost], context[:component_name], context[:key], @value)
+
+ s = @command.banner(
+ [context[:component_name], context[:key]],
+ vhost_opts
+ )
+
+ assert s =~ ~r/Clearing runtime parameter/
+ assert s =~ ~r/"#{context[:key]}"/
+ assert s =~ ~r/"#{context[:component_name]}"/
+ assert s =~ ~r/"#{context[:vhost]}"/
+ end
+
+ defp assert_parameter_empty(context) do
+ parameter = context[:vhost]
+ |> list_parameters
+ |> Enum.filter(fn(param) ->
+ param[:component_name] == context[:component_name] and
+ param[:key] == context[:key]
+ end)
+ assert parameter === []
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs
new file mode 100644
index 0000000000..0843ca3970
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_password_command_test.exs
@@ -0,0 +1,64 @@
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearPasswordCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands. ClearPasswordCommand
+ @user "user1"
+ @password "password"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_user(@user, @password)
+ on_exit(context, fn -> delete_user(@user) end)
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: argument count is correct" do
+ assert @command.validate(["username"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["username", "extra"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag user: @user, password: @password
+ test "run: a valid username clears the password and returns okay", context do
+ assert @command.run([context[:user]], context[:opts]) == :ok
+ assert {:refused, _, _, _} = authenticate_user(context[:user], context[:password])
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["user"], opts))
+ end
+
+ @tag user: "interloper"
+ test "run: An invalid username returns a no-such-user error message", context do
+ assert @command.run([context[:user]], context[:opts]) == {:error, {:no_such_user, "interloper"}}
+ end
+
+ @tag user: @user
+ test "banner", context do
+ s = @command.banner([context[:user]], context[:opts])
+
+ assert s =~ ~r/Clearing password/
+ assert s =~ ~r/"#{context[:user]}"/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs
new file mode 100644
index 0000000000..89bfe8c457
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_permissions_command_test.exs
@@ -0,0 +1,100 @@
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearPermissionsTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands. ClearPermissionsCommand
+ @user "user1"
+ @password "password"
+ @default_vhost "/"
+ @specific_vhost "vhost1"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_user(@user, @password)
+ add_vhost(@specific_vhost)
+
+ on_exit([], fn ->
+ delete_user(@user)
+ delete_vhost(@specific_vhost)
+ end)
+
+ :ok
+ end
+
+ setup context do
+ set_permissions(@user, @default_vhost, ["^#{@user}-.*", ".*", ".*"])
+ set_permissions(@user, @specific_vhost, ["^#{@user}-.*", ".*", ".*"])
+
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname(), vhost: context[:vhost]}
+ }
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: argument count validates" do
+ assert @command.validate(["one"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag user: "fake_user"
+ test "run: can't clear permissions for non-existing user", context do
+ assert @command.run([context[:user]], context[:opts]) == {:error, {:no_such_user, context[:user]}}
+ end
+
+ @tag user: @user, vhost: @default_vhost
+ test "run: a valid username clears permissions", context do
+ assert @command.run([context[:user]], context[:opts]) == :ok
+
+ assert list_permissions(@default_vhost)
+ |> Enum.filter(fn(record) -> record[:user] == context[:user] end) == []
+ end
+
+ test "run: on an invalid node, return a badrpc message" do
+ arg = ["some_name"]
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(arg, opts))
+ end
+
+ @tag user: @user, vhost: @specific_vhost
+ test "run: on a valid specified vhost, clear permissions", context do
+ assert @command.run([context[:user]], context[:opts]) == :ok
+
+ assert list_permissions(context[:vhost])
+ |> Enum.filter(fn(record) -> record[:user] == context[:user] end) == []
+ end
+
+ @tag user: @user, vhost: "bad_vhost"
+ test "run: on an invalid vhost, return no_such_vhost error", context do
+ assert @command.run([context[:user]], context[:opts]) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag user: @user, vhost: @specific_vhost
+ test "banner", context do
+ s = @command.banner([context[:user]], context[:opts])
+
+ assert s =~ ~r/Clearing permissions/
+ assert s =~ ~r/\"#{context[:user]}\"/
+ assert s =~ ~r/\"#{context[:vhost]}\"/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs
new file mode 100644
index 0000000000..f36f65d25f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_policy_command_test.exs
@@ -0,0 +1,129 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearPolicyCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClearPolicyCommand
+ @vhost "test1"
+ @key "federate"
+ @pattern "^fed\."
+ @value "{\"federation-upstream-set\":\"all\"}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ enable_federation_plugin()
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_policy context[:vhost], context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ }
+ }
+ end
+
+ test "merge_defaults: adds default vhost if missing" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ end
+
+ test "merge_defaults: does not change defined vhost" do
+ assert @command.merge_defaults([], %{vhost: "test_vhost"}) == {[], %{vhost: "test_vhost"}}
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: providing one argument and no options passes validation" do
+ assert @command.validate(["a-policy"], %{}) == :ok
+ end
+
+ @tag pattern: @pattern, key: @key, vhost: @vhost
+ test "run: if policy does not exist, returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+ assert match?({:badrpc, _}, @command.run([@key], opts))
+ end
+
+
+ @tag pattern: @pattern, key: @key, vhost: @vhost
+ test "run: if policy exists, returns ok and removes it", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ set_policy(context[:vhost], context[:key], context[:pattern], @value)
+
+ assert @command.run(
+ [context[:key]],
+ vhost_opts
+ ) == :ok
+
+ assert_policy_does_not_exist(context)
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: a non-existent vhost returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key]],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ set_policy(context[:vhost], context[:key], context[:pattern], @value)
+
+ s = @command.banner(
+ [context[:key]],
+ vhost_opts
+ )
+
+ assert s =~ ~r/Clearing policy/
+ assert s =~ ~r/"#{context[:key]}"/
+ end
+
+ defp assert_policy_does_not_exist(context) do
+ policy = context[:vhost]
+ |> list_policies
+ |> Enum.filter(fn(param) ->
+ param[:pattern] == context[:pattern] and
+ param[:key] == context[:key]
+ end)
+ assert policy === []
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs
new file mode 100644
index 0000000000..2b5fb6e12a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_topic_permissions_command_test.exs
@@ -0,0 +1,107 @@
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearTopicPermissionsTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands. ClearTopicPermissionsCommand
+ @user "user1"
+ @password "password"
+ @specific_vhost "vhost1"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_user(@user, @password)
+ add_vhost(@specific_vhost)
+
+ on_exit([], fn ->
+ clear_topic_permissions(@user, @specific_vhost)
+ delete_user(@user)
+ delete_vhost(@specific_vhost)
+ end)
+
+ :ok
+ end
+
+ setup context do
+ set_topic_permissions(@user, @specific_vhost, "amq.topic", "^a", "^b")
+ set_topic_permissions(@user, @specific_vhost, "topic1", "^a", "^b")
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname(), vhost: context[:vhost]}
+ }
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: expects username and optional exchange" do
+ assert @command.validate(["username"], %{}) == :ok
+ assert @command.validate(["username", "exchange"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag user: "fake_user"
+ test "run: can't clear topic permissions for non-existing user", context do
+ assert @command.run([context[:user]], context[:opts]) == {:error, {:no_such_user, context[:user]}}
+ end
+
+ @tag user: @user, vhost: "bad_vhost"
+ test "run: on an invalid vhost, return no_such_vhost error", context do
+ assert @command.run([context[:user]], context[:opts]) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag user: @user, vhost: @specific_vhost
+ test "run: with a valid username clears all permissions for vhost", context do
+ assert Enum.count(list_user_topic_permissions(@user)) == 2
+ assert @command.run([context[:user]], context[:opts]) == :ok
+
+ assert Enum.count(list_user_topic_permissions(@user)) == 0
+ end
+
+ @tag user: @user, vhost: @specific_vhost
+ test "run: with a valid username and exchange clears only exchange permissions", context do
+ assert Enum.count(list_user_topic_permissions(@user)) == 2
+ assert @command.run([context[:user], "amq.topic"], context[:opts]) == :ok
+
+ assert Enum.count(list_user_topic_permissions(@user)) == 1
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ arg = ["some_name"]
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(arg, opts))
+ end
+
+ @tag user: @user, vhost: @specific_vhost
+ test "banner with username only", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:user]], vhost_opts)
+ =~ ~r/Clearing topic permissions for user \"#{context[:user]}\" in vhost \"#{context[:vhost]}\" \.\.\./
+ end
+
+ @tag user: @user, vhost: @specific_vhost
+ test "banner with username and exchange name", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:user], "amq.topic"], vhost_opts)
+ =~ ~r/Clearing topic permissions on \"amq.topic\" for user \"#{context[:user]}\" in vhost \"#{context[:vhost]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs
new file mode 100644
index 0000000000..eb05a875bc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_user_limits_command_test.exs
@@ -0,0 +1,115 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ClearUserLimitsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClearUserLimitsCommand
+
+ @user "someone"
+ @password "password"
+ @limittype "max-channels"
+ @channel_definition "{\"max-channels\":100}"
+ @definition "{\"max-channels\":500, \"max-connections\":100}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_user @user, @password
+
+ on_exit([], fn ->
+ delete_user @user
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_user_limits(context[:user])
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ }
+ }
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not-enough"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@user, @limittype], opts))
+ end
+
+ test "run: if limit exists, returns ok and clears it", context do
+ :ok = set_user_limits(@user, @channel_definition)
+
+ assert get_user_limits(@user) != []
+
+ assert @command.run(
+ [@user, @limittype],
+ context[:opts]
+ ) == :ok
+
+ assert get_user_limits(@user) == %{}
+ end
+
+ test "run: if limit exists, returns ok and clears all limits for the given user", context do
+ :ok = set_user_limits(@user, @definition)
+
+ assert get_user_limits(@user) != []
+
+ assert @command.run(
+ [@user, "all"],
+ context[:opts]
+ ) == :ok
+
+ assert get_user_limits(@user) == %{}
+ end
+
+ @tag user: "bad-user"
+ test "run: a non-existent user returns an error", context do
+
+ assert @command.run(
+ [context[:user], @limittype],
+ context[:opts]
+ ) == {:error, {:no_such_user, "bad-user"}}
+ end
+
+ test "banner: for a limit type", context do
+
+ s = @command.banner(
+ [@user, @limittype],
+ context[:opts]
+ )
+
+ assert s == "Clearing \"#{@limittype}\" limit for user \"#{@user}\" ..."
+ end
+
+ test "banner: for all", context do
+
+ s = @command.banner(
+ [@user, "all"],
+ context[:opts]
+ )
+
+ assert s == "Clearing all limits for user \"#{@user}\" ..."
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs
new file mode 100644
index 0000000000..4dd681c901
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/clear_vhost_limits_command_test.exs
@@ -0,0 +1,103 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClearVhostLimitsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClearVhostLimitsCommand
+ @vhost "test1"
+ @definition "{\"max-connections\":100}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_vhost_limits(context[:vhost])
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ }
+ }
+ end
+
+ test "merge_defaults: adds default vhost if missing" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ end
+
+ test "merge_defaults: does not change defined vhost" do
+ assert @command.merge_defaults([], %{vhost: "test_vhost"}) == {[], %{vhost: "test_vhost"}}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: providing zero arguments and no options passes validation" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+
+ @tag vhost: @vhost
+ test "run: if limits exist, returns ok and clears them", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ :ok = set_vhost_limits(context[:vhost], @definition)
+
+ assert get_vhost_limits(context[:vhost]) != []
+
+ assert @command.run(
+ [],
+ vhost_opts
+ ) == :ok
+
+ assert get_vhost_limits(context[:vhost]) == %{}
+ end
+
+ @tag vhost: "bad-vhost"
+ test "run: a non-existent vhost returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [],
+ vhost_opts
+ ) == {:error_string, 'Parameter does not exist'}
+ end
+
+ @tag vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ s = @command.banner(
+ [],
+ vhost_opts
+ )
+
+ assert s =~ ~r/Clearing vhost \"#{context[:vhost]}\" limits .../
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs b/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs
new file mode 100644
index 0000000000..f08969f319
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/close_all_connections_command_test.exs
@@ -0,0 +1,147 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule CloseAllConnectionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ alias RabbitMQ.CLI.Ctl.RpcStream
+ @helpers RabbitMQ.CLI.Core.Helpers
+ @command RabbitMQ.CLI.Ctl.Commands.CloseAllConnectionsCommand
+
+ @vhost "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ close_all_connections(get_rabbit_hostname())
+
+ on_exit([], fn ->
+ close_all_connections(get_rabbit_hostname())
+ end)
+
+ :ok
+ end
+
+ setup context do
+ node_name = get_rabbit_hostname()
+ close_all_connections(node_name)
+ await_no_client_connections(node_name, 5_000)
+
+ {:ok, context}
+ end
+
+ test "validate: with an invalid number of arguments returns an arg count error", context do
+ assert @command.validate(["random", "explanation"], context[:opts]) == {:validation_failure, :too_many_args}
+ assert @command.validate([], context[:opts]) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: with the correct number of arguments returns ok", context do
+ assert @command.validate(["explanation"], context[:opts]) == :ok
+ end
+
+ test "run: a close connections request in an existing vhost with all defaults closes all connections", context do
+ with_connection(@vhost, fn(_) ->
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ nodes = @helpers.nodes_in_cluster(node)
+ [[vhost: @vhost]] = fetch_connection_vhosts(node, nodes)
+ opts = %{node: node, vhost: @vhost, global: false, per_connection_delay: 0, limit: 0}
+ assert {:ok, "Closed 1 connections"} == @command.run(["test"], opts)
+
+ await_no_client_connections(node, 5_000)
+ assert fetch_connection_vhosts(node, nodes) == []
+ end)
+ end
+
+ test "run: close a limited number of connections in an existing vhost closes a subset of connections", context do
+ with_connections([@vhost, @vhost, @vhost], fn(_) ->
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ nodes = @helpers.nodes_in_cluster(node)
+ [[vhost: @vhost], [vhost: @vhost], [vhost: @vhost]] = fetch_connection_vhosts(node, nodes)
+ opts = %{node: node, vhost: @vhost, global: false, per_connection_delay: 0, limit: 2}
+ assert {:ok, "Closed 2 connections"} == @command.run(["test"], opts)
+ Process.sleep(100)
+ assert fetch_connection_vhosts(node, nodes) == [[vhost: @vhost]]
+ end)
+ end
+
+ test "run: a close connections request for a non-existing vhost does nothing", context do
+ with_connection(@vhost, fn(_) ->
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ nodes = @helpers.nodes_in_cluster(node)
+ [[vhost: @vhost]] = fetch_connection_vhosts(node, nodes)
+ opts = %{node: node, vhost: "non_existent-9288737", global: false, per_connection_delay: 0, limit: 0}
+ assert {:ok, "Closed 0 connections"} == @command.run(["test"], opts)
+ assert fetch_connection_vhosts(node, nodes) == [[vhost: @vhost]]
+ end)
+ end
+
+ test "run: a close connections request to an existing node with --global (all vhosts)", context do
+ with_connection(@vhost, fn(_) ->
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ nodes = @helpers.nodes_in_cluster(node)
+ [[vhost: @vhost]] = fetch_connection_vhosts(node, nodes)
+ opts = %{node: node, global: true, per_connection_delay: 0, limit: 0}
+ assert {:ok, "Closed 1 connections"} == @command.run(["test"], opts)
+ await_no_client_connections(node, 5_000)
+ assert fetch_connection_vhosts(node, nodes) == []
+ end)
+ end
+
+ test "run: a close_all_connections request to non-existent RabbitMQ node returns a badrpc" do
+ opts = %{node: :jake@thedog, vhost: @vhost, global: true, per_connection_delay: 0, limit: 0, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["test"], opts))
+ end
+
+ test "banner for vhost option", context do
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ opts = %{node: node, vhost: "burrow", global: false, per_connection_delay: 0, limit: 0}
+ s = @command.banner(["some reason"], opts)
+ assert s =~ ~r/Closing all connections in vhost burrow/
+ assert s =~ ~r/some reason/
+ end
+
+ test "banner for vhost option with limit", context do
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ opts = %{node: node, vhost: "burrow", global: false, per_connection_delay: 0, limit: 2}
+ s = @command.banner(["some reason"], opts)
+ assert s =~ ~r/Closing 2 connections in vhost burrow/
+ assert s =~ ~r/some reason/
+ end
+
+ test "banner for global option" do
+ opts = %{node: :test@localhost, vhost: "burrow", global: true, per_connection_delay: 0, limit: 0}
+ s = @command.banner(["some reason"], opts)
+ assert s =~ ~r/Closing all connections to node test@localhost/
+ assert s =~ ~r/some reason/
+ end
+
+ defp fetch_connection_vhosts(node, nodes) do
+ fetch_connection_vhosts(node, nodes, 50)
+ end
+
+ defp fetch_connection_vhosts(node, nodes, retries) do
+ stream = RpcStream.receive_list_items(node,
+ :rabbit_networking,
+ :emit_connection_info_all,
+ [nodes, [:vhost]],
+ :infinity,
+ [:vhost],
+ Kernel.length(nodes))
+ xs = Enum.to_list(stream)
+
+ case {xs, retries} do
+ {xs, 0} ->
+ xs
+ {[], n} when n >= 0 ->
+ Process.sleep(10)
+ fetch_connection_vhosts(node, nodes, retries - 1)
+ _ ->
+ xs
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs b/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs
new file mode 100644
index 0000000000..0d1271a67f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/close_connection_command_test.exs
@@ -0,0 +1,96 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule CloseConnectionCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ alias RabbitMQ.CLI.Ctl.RpcStream
+
+ @helpers RabbitMQ.CLI.Core.Helpers
+
+ @command RabbitMQ.CLI.Ctl.Commands.CloseConnectionCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ close_all_connections(get_rabbit_hostname())
+
+ on_exit([], fn ->
+ close_all_connections(get_rabbit_hostname())
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: :infinity}}
+ end
+
+ test "validate: with an invalid number of arguments returns an arg count error", context do
+ assert @command.validate(["pid", "explanation", "extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ assert @command.validate(["pid"], context[:opts]) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: with the correct number of arguments returns ok", context do
+ assert @command.validate(["pid", "test"], context[:opts]) == :ok
+ end
+
+ test "run: a close connection request on an existing connection", context do
+ with_connection("/", fn(_) ->
+ Process.sleep(500)
+ node = @helpers.normalise_node(context[:node], :shortnames)
+ nodes = @helpers.nodes_in_cluster(node)
+ [[pid: pid]] = fetch_connection_pids(node, nodes)
+ assert :ok == @command.run([:rabbit_misc.pid_to_string(pid), "test"], %{node: node})
+ Process.sleep(500)
+ assert fetch_connection_pids(node, nodes) == []
+ end)
+ end
+
+ test "run: a close connection request on for a non existing connection returns successfully", context do
+ assert match?(:ok,
+ @command.run(["<#{node()}.2.121.12>", "test"], %{node: @helpers.normalise_node(context[:node], :shortnames)}))
+ end
+
+ test "run: a close_connection request on nonexistent RabbitMQ node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["<rabbit@localhost.1.2.1>", "test"], opts))
+ end
+
+ test "banner", context do
+ s = @command.banner(["<rabbit@bananas.1.2.3>", "some reason"], context[:opts])
+ assert s =~ ~r/Closing connection/
+ assert s =~ ~r/<rabbit@bananas.1.2.3>/
+ end
+
+ defp fetch_connection_pids(node, nodes) do
+ fetch_connection_pids(node, nodes, 10)
+ end
+
+ defp fetch_connection_pids(node, nodes, retries) do
+ stream = RpcStream.receive_list_items(node,
+ :rabbit_networking,
+ :emit_connection_info_all,
+ [nodes, [:pid]],
+ :infinity,
+ [:pid],
+ Kernel.length(nodes))
+ xs = Enum.to_list(stream)
+
+ case {xs, retries} do
+ {xs, 0} ->
+ xs
+ {[], n} when n >= 0 ->
+ Process.sleep(100)
+ fetch_connection_pids(node, nodes, retries - 1)
+ _ ->
+ xs
+ end
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs b/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs
new file mode 100644
index 0000000000..e582355e7a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/cluster_status_command_test.exs
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ClusterStatusCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ClusterStatusCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 12000}}
+ end
+
+ test "validate: argument count validates", context do
+ assert @command.validate([], context[:opts]) == :ok
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: status request to a reachable node returns cluster information", context do
+ n = context[:opts][:node]
+ res = @command.run([], context[:opts])
+
+ assert Enum.member?(res[:nodes][:disc], n)
+ assert res[:partitions] == []
+ assert res[:alarms][n] == []
+ end
+
+ test "run: status request on nonexistent RabbitMQ node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ s = @command.banner([], context[:opts])
+
+ assert s =~ ~r/Cluster status of node/
+ assert s =~ ~r/#{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/decode_command_test.exs b/deps/rabbitmq_cli/test/ctl/decode_command_test.exs
new file mode 100644
index 0000000000..79850d7786
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/decode_command_test.exs
@@ -0,0 +1,95 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule DecodeCommandTest do
+ use ExUnit.Case, async: false
+ @command RabbitMQ.CLI.Ctl.Commands.DecodeCommand
+
+ setup _context do
+ {:ok, opts: %{
+ cipher: :rabbit_pbe.default_cipher,
+ hash: :rabbit_pbe.default_hash,
+ iterations: :rabbit_pbe.default_iterations
+ }}
+ end
+
+ test "validate: providing exactly 2 positional arguments passes", context do
+ assert :ok == @command.validate(["value", "secret"], context[:opts])
+ end
+
+ test "validate: providing zero or one positional argument fails", context do
+ assert match?({:validation_failure, {:not_enough_args, _}},
+ @command.validate([], context[:opts]))
+ assert match?({:validation_failure, {:not_enough_args, _}},
+ @command.validate(["value"], context[:opts]))
+ end
+
+ test "validate: providing three or more positional argument fails", context do
+ assert match?({:validation_failure, :too_many_args},
+ @command.validate(["value", "secret", "incorrect"], context[:opts]))
+ end
+
+ test "validate: hash and cipher must be supported", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{cipher: :funny_cipher}))
+ )
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{hash: :funny_hash}))
+ )
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{cipher: :funny_cipher, hash: :funny_hash}))
+ )
+ assert :ok == @command.validate(["value", "secret"], context[:opts])
+ end
+
+ test "validate: number of iterations must greater than 0", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: 0}))
+ )
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: -1}))
+ )
+ assert :ok == @command.validate(["value", "secret"], context[:opts])
+ end
+
+ test "run: encrypt/decrypt", context do
+ # an Erlang list/bitstring
+ encrypt_decrypt(to_charlist("foobar"), context)
+ # a binary
+ encrypt_decrypt("foobar", context)
+ # a tuple
+ encrypt_decrypt({:password, "secret"}, context)
+ end
+
+ defp encrypt_decrypt(secret, context) do
+ passphrase = "passphrase"
+ cipher = context[:opts][:cipher]
+ hash = context[:opts][:hash]
+ iterations = context[:opts][:iterations]
+ output = {:encrypted, _encrypted} = :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, secret)
+
+ {:encrypted, encrypted} = output
+ # decode plain value
+ assert {:ok, secret} === @command.run([format_as_erlang_term(encrypted), passphrase], context[:opts])
+ # decode {encrypted, ...} tuple form
+ assert {:ok, secret} === @command.run([format_as_erlang_term(output), passphrase], context[:opts])
+
+ # wrong passphrase
+ assert match?({:error, _},
+ @command.run([format_as_erlang_term(encrypted), "wrong/passphrase"], context[:opts]))
+ assert match?({:error, _},
+ @command.run([format_as_erlang_term(output), "wrong passphrase"], context[:opts]))
+ end
+
+ defp format_as_erlang_term(value) do
+ :io_lib.format("~p", [value]) |> :lists.flatten() |> to_string()
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs b/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs
new file mode 100644
index 0000000000..b0971b8961
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/delete_queue_command_test.exs
@@ -0,0 +1,119 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule DeleteQueueCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.DeleteQueueCommand
+ @user "guest"
+ @vhost "delete-queue-vhost"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ vhost: @vhost,
+ timeout: context[:test_timeout],
+ if_empty: false,
+ if_unused: false
+ }}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/", if_empty: false, if_unused: false}}
+ assert @command.merge_defaults([], %{vhost: "non_default", if_empty: true}) ==
+ {[], %{vhost: "non_default", if_empty: true, if_unused: false}}
+ end
+
+ test "validate: providing no queue name fails validation", context do
+ assert match?(
+ {:validation_failure, :not_enough_args},
+ @command.validate([], context[:opts])
+ )
+ end
+
+ test "validate: providing an empty queue name fails validation", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, "queue name cannot be an empty string"}},
+ @command.validate([""], context[:opts])
+ )
+ end
+
+ test "validate: providing a non-blank queue name and -u succeeds", context do
+ assert @command.validate(["a-queue"], %{
+ node: get_rabbit_hostname(),
+ vhost: @vhost,
+ timeout: context[:test_timeout],
+ if_unused: false
+ }) == :ok
+ end
+
+ @tag test_timeout: 30000
+ test "run: request to an existent queue on active node succeeds", context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(context, fn -> delete_vhost(@vhost) end)
+
+ q = "foo"
+ n = 20
+
+ declare_queue(q, @vhost)
+ publish_messages(@vhost, q, n)
+
+ assert @command.run([q], context[:opts]) == {:ok, n}
+ {:error, :not_found} = lookup_queue(q, @vhost)
+ end
+
+ @tag test_timeout: 30000
+ test "run: request to a non-existent queue on active node returns not found", context do
+ assert @command.run(["non-existent"], context[:opts]) == {:error, :not_found}
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return a bad RPC", context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(context, fn -> delete_vhost(@vhost) end)
+
+ q = "foo"
+ declare_queue(q, @vhost)
+ assert @command.run([q], context[:opts]) == {:badrpc, :timeout}
+ end
+
+ test "shows up in help" do
+ s = @command.usage()
+ assert s =~ ~r/delete_queue/
+ end
+
+ test "defaults to vhost /" do
+ assert @command.merge_defaults(["foo"], %{bar: "baz"}) == {["foo"], %{bar: "baz", vhost: "/", if_unused: false, if_empty: false}}
+ end
+
+ test "validate: with extra arguments returns an arg count error" do
+ assert @command.validate(["queue-name", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: with no arguments returns an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: with correct args returns ok" do
+ assert @command.validate(["q"], %{}) == :ok
+ end
+
+ test "banner informs that vhost's queue is deleted" do
+ assert @command.banner(["my-q"], %{vhost: "/foo", if_empty: false, if_unused: false}) == "Deleting queue 'my-q' on vhost '/foo' ..."
+ assert @command.banner(["my-q"], %{vhost: "/foo", if_empty: true, if_unused: false}) == "Deleting queue 'my-q' on vhost '/foo' if queue is empty ..."
+ assert @command.banner(["my-q"], %{vhost: "/foo", if_empty: true, if_unused: true}) == "Deleting queue 'my-q' on vhost '/foo' if queue is empty and if queue is unused ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs b/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs
new file mode 100644
index 0000000000..97f09654a9
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/delete_user_command_test.exs
@@ -0,0 +1,59 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule DeleteUserCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.DeleteUserCommand
+ @user "username"
+ @password "password"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ add_user(context[:user], @password)
+ on_exit(context, fn -> delete_user(context[:user]) end)
+
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ @tag user: @user
+ test "validate: argument count validates" do
+ assert @command.validate(["one"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag user: @user
+ test "run: A valid username returns ok", context do
+ assert @command.run([context[:user]], context[:opts]) == :ok
+
+ assert list_users() |> Enum.count(fn(record) -> record[:user] == context[:user] end) == 0
+ end
+
+ test "run: An invalid Rabbit node returns a bad rpc message" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(["username"], opts))
+ end
+
+ @tag user: @user
+ test "run: An invalid username returns an error", context do
+ assert @command.run(["no_one"], context[:opts]) == {:error, {:no_such_user, "no_one"}}
+ end
+
+ @tag user: @user
+ test "banner", context do
+ s = @command.banner([context[:user]], context[:opts])
+ assert s =~ ~r/Deleting user/
+ assert s =~ ~r/\"#{context[:user]}\"/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs b/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs
new file mode 100644
index 0000000000..057f0789dc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/delete_vhost_command_test.exs
@@ -0,0 +1,67 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule DeleteVhostCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.DeleteVhostCommand
+ @vhost "test"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_vhost(context[:vhost])
+ on_exit(context, fn -> delete_vhost(context[:vhost]) end)
+
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: argument count validates" do
+ assert @command.validate(["tst"], %{}) == :ok
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["test", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag vhost: @vhost
+ test "run: A valid name to an active RabbitMQ node is successful", context do
+ assert @command.run([context[:vhost]], context[:opts]) == :ok
+
+ assert list_vhosts() |> Enum.count(fn(record) -> record[:name] == context[:vhost] end) == 0
+ end
+
+ @tag vhost: ""
+ test "run: An empty string to an active RabbitMQ node is successful", context do
+ assert @command.run([context[:vhost]], context[:opts]) == :ok
+
+ assert list_vhosts() |> Enum.count(fn(record) -> record[:name] == context[:vhost] end) == 0
+ end
+
+ test "run: A call to invalid or inactive RabbitMQ node returns a nodedown" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(["na"], opts))
+ end
+
+ @tag vhost: @vhost
+ test "run: Deleting the same host twice results in a host not found message", context do
+ @command.run([context[:vhost]], context[:opts])
+ assert @command.run([context[:vhost]], context[:opts]) ==
+ {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag vhost: @vhost
+ test "banner", context do
+ s = @command.banner([context[:vhost]], context[:opts])
+ assert s =~ ~r/Deleting vhost/
+ assert s =~ ~r/\"#{context[:vhost]}\"/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs b/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs
new file mode 100644
index 0000000000..f8a3e62920
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs
@@ -0,0 +1,70 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule EnableFeatureFlagCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand
+ @feature_flag :ff_from_enable_ff_testsuite
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ # Define an arbitrary feature flag for the test.
+ node = get_rabbit_hostname()
+ new_feature_flags = %{
+ @feature_flag =>
+ %{desc: "My feature flag",
+ provided_by: :EnableFeatureFlagCommandTest,
+ stability: :stable}}
+ :ok = :rabbit_misc.rpc_call(
+ node, :rabbit_feature_flags, :initialize_registry, [new_feature_flags])
+
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname()},
+ feature_flag: @feature_flag
+ }
+ end
+
+ test "validate: wrong number of arguments results in arg count errors" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["ff_from_enable_ff_testsuite", "whoops"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: passing an empty string for feature_flag name is an arg error", context do
+ assert match?({:validation_failure, {:bad_argument, _}}, @command.validate([""], context[:opts]))
+ end
+
+ test "run: passing a valid feature_flag name to a running RabbitMQ node succeeds", context do
+ assert @command.run([Atom.to_string(context[:feature_flag])], context[:opts]) == :ok
+ assert list_feature_flags(:enabled) |> Map.has_key?(context[:feature_flag])
+ end
+
+ test "run: attempt to use an unreachable node returns a nodedown" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["na"], opts))
+ end
+
+ test "run: enabling the same feature flag twice is idempotent", context do
+ enable_feature_flag context[:feature_flag]
+ assert @command.run([Atom.to_string(context[:feature_flag])], context[:opts]) == :ok
+ assert list_feature_flags(:enabled) |> Map.has_key?(context[:feature_flag])
+ end
+
+ test "run: enabling all feature flags succeeds", context do
+ enable_feature_flag context[:feature_flag]
+ assert @command.run(["all"], context[:opts]) == :ok
+ assert list_feature_flags(:enabled) |> Map.has_key?(context[:feature_flag])
+ end
+
+ test "banner", context do
+ assert @command.banner([context[:feature_flag]], context[:opts])
+ =~ ~r/Enabling feature flag \"#{context[:feature_flag]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/encode_command_test.exs b/deps/rabbitmq_cli/test/ctl/encode_command_test.exs
new file mode 100644
index 0000000000..550e4b24da
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/encode_command_test.exs
@@ -0,0 +1,92 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule EncodeCommandTest do
+ use ExUnit.Case, async: false
+
+ @command RabbitMQ.CLI.Ctl.Commands.EncodeCommand
+
+ setup _context do
+ {:ok, opts: %{
+ cipher: :rabbit_pbe.default_cipher,
+ hash: :rabbit_pbe.default_hash,
+ iterations: :rabbit_pbe.default_iterations
+ }}
+ end
+
+ test "validate: providing exactly 2 positional arguments passes", context do
+ assert :ok == @command.validate(["value", "secret"], context[:opts])
+ end
+
+ test "validate: providing zero or one positional argument fails", context do
+ assert match?({:validation_failure, {:not_enough_args, _}},
+ @command.validate([], context[:opts]))
+ assert match?({:validation_failure, {:not_enough_args, _}},
+ @command.validate(["value"], context[:opts]))
+ end
+
+ test "validate: providing three or more positional argument fails", context do
+ assert match?({:validation_failure, :too_many_args},
+ @command.validate(["value", "secret", "incorrect"], context[:opts]))
+ end
+
+ test "validate: hash and cipher must be supported", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{cipher: :funny_cipher}))
+ )
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{hash: :funny_hash}))
+ )
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{cipher: :funny_cipher, hash: :funny_hash}))
+ )
+ assert :ok == @command.validate(["value", "secret"], context[:opts])
+ end
+
+ test "validate: number of iterations must greater than 0", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: 0}))
+ )
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: -1}))
+ )
+ assert :ok == @command.validate(["value", "secret"], context[:opts])
+ end
+
+ test "run: encrypt/decrypt", context do
+ # an Erlang list/bitstring
+ encrypt_decrypt(to_charlist("foobar"), context)
+ # a binary
+ encrypt_decrypt("foobar", context)
+ # a tuple
+ encrypt_decrypt({:password, "secret"}, context)
+ end
+
+ defp encrypt_decrypt(secret, context) do
+ secret_as_erlang_term = format_as_erlang_term(secret)
+ passphrase = "passphrase"
+
+ cipher = context[:opts][:cipher]
+ hash = context[:opts][:hash]
+ iterations = context[:opts][:iterations]
+
+ {:ok, output} = @command.run([secret_as_erlang_term, passphrase], context[:opts])
+ {:encrypted, encrypted} = output
+ # decode plain value
+ assert secret === :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, {:plaintext, secret})
+ # decode {encrypted, ...} tuple form
+ assert secret === :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, {:encrypted, encrypted})
+ end
+
+ defp format_as_erlang_term(value) do
+ :io_lib.format("~p", [value]) |> :lists.flatten() |> to_string()
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/environment_command_test.exs b/deps/rabbitmq_cli/test/ctl/environment_command_test.exs
new file mode 100644
index 0000000000..7f801d54dc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/environment_command_test.exs
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule EnvironmentCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.EnvironmentCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: argument count validates" do
+ assert @command.validate([], %{}) == :ok
+ assert @command.validate(["extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag target: get_rabbit_hostname()
+ test "run: environment request on a named, active RMQ node is successful", context do
+ assert @command.run([], context[:opts])[:kernel] != nil
+ assert @command.run([], context[:opts])[:rabbit] != nil
+ end
+
+ test "run: environment request on nonexistent RabbitMQ node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Application environment of node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/eval_command_test.exs b/deps/rabbitmq_cli/test/ctl/eval_command_test.exs
new file mode 100644
index 0000000000..92a2d77667
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/eval_command_test.exs
@@ -0,0 +1,74 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule EvalCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+ import ExUnit.CaptureIO
+
+ @command RabbitMQ.CLI.Ctl.Commands.EvalCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup _ do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: providing no arguments succeeds" do
+ # expression is expected to be provided via standard input
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "validate: empty expression to eval fails validation" do
+ assert @command.validate([""], %{}) == {:validation_failure, "Expression must not be blank"}
+ assert @command.validate(["", "foo"], %{}) == {:validation_failure, "Expression must not be blank"}
+ end
+
+ test "validate: syntax error in expression to eval fails validation" do
+ assert @command.validate(["foo bar"], %{}) == {:validation_failure, "syntax error before: bar"}
+ assert @command.validate(["foo bar", "foo"], %{}) == {:validation_failure, "syntax error before: bar"}
+ end
+
+ test "run: request to a non-existent node returns a badrpc", _context do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(["ok."], opts))
+ end
+
+ test "run: evaluates provided Erlang expression", context do
+ assert @command.run(["foo."], context[:opts]) == {:ok, :foo}
+ assert @command.run(["length([1,2,3])."], context[:opts]) == {:ok, 3}
+ assert @command.run(["lists:sum([1,2,3])."], context[:opts]) == {:ok, 6}
+ {:ok, apps} = @command.run(["application:loaded_applications()."], context[:opts])
+ assert is_list(apps)
+ end
+
+ test "run: evaluates provided expression on the target server node", context do
+ {:ok, apps} = @command.run(["application:loaded_applications()."], context[:opts])
+ assert is_list(apps)
+ assert List.keymember?(apps, :rabbit, 0)
+ end
+
+ test "run: returns stdout output", context do
+ assert capture_io(fn ->
+ assert @command.run(["io:format(\"output\")."], context[:opts]) == {:ok, :ok}
+ end) == "output"
+ end
+
+ test "run: passes parameters to the expression as positional/numerical variables", context do
+ assert @command.run(["binary_to_atom(_1, utf8).", "foo"], context[:opts]) == {:ok, :foo}
+ assert @command.run(["{_1, _2}.", "foo", "bar"], context[:opts]) == {:ok, {"foo", "bar"}}
+ end
+
+ test "run: passes globally recognised options as named variables", context do
+ assert @command.run(["{_vhost, _node}."], Map.put(context[:opts], :vhost, "a-node")) ==
+ {:ok, {"a-node", context[:opts][:node]}}
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs b/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs
new file mode 100644
index 0000000000..74cb272f98
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/eval_file_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule EvalFileCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.EvalFileCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup _ do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: providing no arguments fails validation" do
+ # expression is expected to be provided via standard input
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: empty file path fails validation" do
+ assert @command.validate([""], %{}) == {:validation_failure, "File path must not be blank"}
+ end
+
+ test "validate: path to a non-existent file fails validation" do
+ path = "/tmp/rabbitmq/cli-tests/12937293782368263726.lolz.escript"
+ assert @command.validate([path], %{}) == {:validation_failure, "File #{path} does not exist"}
+ end
+
+ test "run: request to a non-existent node returns a badrpc", _context do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([valid_file_path()], opts))
+ end
+
+ test "run: evaluates expressions in the file on the target server node", context do
+ {:ok, apps} = @command.run([loaded_applications_file_path()], context[:opts])
+ assert is_list(apps)
+ assert List.keymember?(apps, :rabbit, 0)
+ end
+
+ test "run: returns evaluation result", context do
+ assert {:ok, 2} == @command.run([valid_file_path()], context[:opts])
+ end
+
+ test "run: reports invalid syntax errors", context do
+ assert match?({:error, _}, @command.run([invalid_file_path()], context[:opts]))
+ end
+
+ #
+ # Implementation
+ #
+
+ defp valid_file_path() do
+ Path.join([File.cwd!(), "test", "fixtures", "files", "valid_erl_expression.escript"])
+ end
+
+ defp invalid_file_path() do
+ Path.join([File.cwd!(), "test", "fixtures", "files", "invalid_erl_expression.escript"])
+ end
+
+ defp loaded_applications_file_path() do
+ Path.join([File.cwd!(), "test", "fixtures", "files", "loaded_applications.escript"])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/exec_command_test.exs b/deps/rabbitmq_cli/test/ctl/exec_command_test.exs
new file mode 100644
index 0000000000..bb839f5434
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/exec_command_test.exs
@@ -0,0 +1,47 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ExecCommandTest do
+ use ExUnit.Case, async: false
+
+ @command RabbitMQ.CLI.Ctl.Commands.ExecCommand
+
+ setup _ do
+ {:ok, opts: %{}}
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: there should be only one argument" do
+ assert @command.validate(["foo", "bar"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["", "bar"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: empty expression to exec fails validation" do
+ assert @command.validate([""], %{}) == {:validation_failure, "Expression must not be blank"}
+ end
+
+ test "validate: success" do
+ :ok = @command.validate([":ok"], %{})
+ end
+
+ test "run: executes elixir code" do
+ {:ok, :ok} = @command.run([":ok"], %{})
+ node = Node.self()
+ {:ok, ^node} = @command.run(["Node.self()"], %{})
+ {:ok, 3} = @command.run(["1 + 2"], %{})
+ end
+
+ test "run: binds options variable" do
+ opts = %{my: :custom, option: 123}
+ {:ok, ^opts} = @command.run(["options"], opts)
+ {:ok, 123} = @command.run(["options[:option]"], opts)
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs b/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs
new file mode 100644
index 0000000000..3506b1ea80
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/export_definitions_command_test.exs
@@ -0,0 +1,138 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ExportDefinitionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ExportDefinitionsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ format: context[:format] || "json"
+ }}
+ end
+
+ test "merge_defaults: defaults to JSON for format" do
+ assert @command.merge_defaults([valid_file_path()], %{}) ==
+ {[valid_file_path()], %{format: "json"}}
+ end
+
+ test "merge_defaults: defaults to --silent if target is stdout" do
+ assert @command.merge_defaults(["-"], %{}) == {["-"], %{format: "json", silent: true}}
+ end
+
+ test "merge_defaults: format is case insensitive" do
+ assert @command.merge_defaults([valid_file_path()], %{format: "JSON"}) ==
+ {[valid_file_path()], %{format: "json"}}
+ assert @command.merge_defaults([valid_file_path()], %{format: "Erlang"}) ==
+ {[valid_file_path()], %{format: "erlang"}}
+ end
+
+ test "merge_defaults: format can be overridden" do
+ assert @command.merge_defaults([valid_file_path()], %{format: "erlang"}) ==
+ {[valid_file_path()], %{format: "erlang"}}
+ end
+
+ test "validate: accepts a file path argument", context do
+ assert @command.validate([valid_file_path()], context[:opts]) == :ok
+ end
+
+ test "validate: accepts a dash for stdout", context do
+ assert @command.validate(["-"], context[:opts]) == :ok
+ end
+
+ test "validate: unsupported format fails validation", context do
+ assert match?({:validation_failure, {:bad_argument, _}},
+ @command.validate([valid_file_path()], Map.merge(context[:opts], %{format: "yolo"})))
+ end
+
+ test "validate: no positional arguments fails validation", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: more than one positional argument fails validation", context do
+ assert @command.validate([valid_file_path(), "extra-arg"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: supports JSON and Erlang formats", context do
+ assert @command.validate([valid_file_path()], Map.merge(context[:opts], %{format: "json"})) == :ok
+ assert @command.validate([valid_file_path()], Map.merge(context[:opts], %{format: "erlang"})) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ result = @command.run([valid_file_path()],
+ %{node: :jake@thedog,
+ timeout: context[:test_timeout],
+ format: "json"})
+ assert match?({:badrpc, _}, result)
+ end
+
+ @tag format: "json"
+ test "run: returns a list of definitions when target is stdout and format is JSON", context do
+ {:ok, map} = @command.run(["-"], context[:opts])
+ assert Map.has_key?(map, :rabbitmq_version)
+ end
+
+ @tag format: "erlang"
+ test "run: returns a list of definitions when target is stdout and format is Erlang Terms", context do
+ {:ok, map} = @command.run(["-"], context[:opts])
+ assert Map.has_key?(map, :rabbitmq_version)
+ end
+
+ @tag format: "json"
+ test "run: writes to a file and returns nil when target is a file and format is JSON", context do
+ File.rm(valid_file_path())
+ {:ok, nil} = @command.run([valid_file_path()], context[:opts])
+
+ {:ok, bin} = File.read(valid_file_path())
+ {:ok, map} = JSON.decode(bin)
+ assert Map.has_key?(map, "rabbitmq_version")
+ end
+
+ @tag format: "json"
+ test "run: correctly formats runtime parameter values", context do
+ File.rm(valid_file_path())
+ imported_file_path = Path.join([File.cwd!(), "test", "fixtures", "files", "definitions.json"])
+ # prepopulate some runtime parameters
+ RabbitMQ.CLI.Ctl.Commands.ImportDefinitionsCommand.run([imported_file_path], context[:opts])
+
+ {:ok, nil} = @command.run([valid_file_path()], context[:opts])
+
+ # clean up the state we've modified
+ clear_parameter("/", "federation-upstream", "up-1")
+
+ {:ok, bin} = File.read(valid_file_path())
+ {:ok, map} = JSON.decode(bin)
+ assert Map.has_key?(map, "rabbitmq_version")
+ params = map["parameters"]
+ assert is_map(hd(params)["value"])
+ end
+
+ @tag format: "erlang"
+ test "run: writes to a file and returns nil when target is a file and format is Erlang Terms", context do
+ File.rm(valid_file_path())
+ {:ok, nil} = @command.run([valid_file_path()], context[:opts])
+
+ {:ok, bin} = File.read(valid_file_path())
+ map = :erlang.binary_to_term(bin)
+ assert Map.has_key?(map, :rabbitmq_version)
+ end
+
+ defp valid_file_path(), do: "#{System.tmp_dir()}/definitions"
+end
diff --git a/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs b/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs
new file mode 100644
index 0000000000..a33d7b2e89
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/force_boot_command_test.exs
@@ -0,0 +1,63 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ForceBootCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ForceBootCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup _ do
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ }
+ }
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: the rabbit app running on target node fails validation", context do
+ assert @command.validate_execution_environment([], context[:opts]) ==
+ {:validation_failure, :rabbit_app_is_running}
+ end
+
+ test "run: sets a force boot marker file on target node", context do
+ stop_rabbitmq_app()
+ on_exit(fn -> start_rabbitmq_app() end)
+ assert @command.run([], context[:opts]) == :ok
+ mnesia_dir = :rpc.call(get_rabbit_hostname(), :rabbit_mnesia, :dir, [])
+
+ path = Path.join(mnesia_dir, "force_load")
+ assert File.exists?(path)
+ File.rm(path)
+ end
+
+ test "run: if RABBITMQ_MNESIA_DIR is defined, creates a force boot marker file" do
+ node = :unknown@localhost
+ temp_dir = "#{Mix.Project.config()[:elixirc_paths]}/tmp"
+ File.mkdir_p!(temp_dir)
+ on_exit(fn -> File.rm_rf!(temp_dir) end)
+ System.put_env("RABBITMQ_MNESIA_DIR", temp_dir)
+
+ assert @command.run([], %{node: node}) == :ok
+ assert File.exists?(Path.join(temp_dir, "force_load"))
+
+ System.delete_env("RABBITMQ_MNESIA_DIR")
+ File.rm_rf(temp_dir)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs b/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs
new file mode 100644
index 0000000000..b9583931d3
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/force_gc_command_test.exs
@@ -0,0 +1,46 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ForceGcCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ForceGcCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ reset_vm_memory_high_watermark()
+
+ on_exit([], fn ->
+ reset_vm_memory_high_watermark()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 200}}
+ end
+
+
+ test "merge_defaults: merge not defaults" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: with extra arguments returns an error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, timeout: 200}))
+ end
+
+ test "run: request to a named, active node succeeds", context do
+ assert @command.run([], context[:opts]) == :ok
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs b/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs
new file mode 100644
index 0000000000..5b695302f4
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/force_reset_command_test.exs
@@ -0,0 +1,68 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ForceResetCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ForceResetCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: force reset request to an active node with a stopped rabbit app succeeds", context do
+ add_vhost "some_vhost"
+ # ensure the vhost really does exist
+ assert vhost_exists? "some_vhost"
+ stop_rabbitmq_app()
+ assert :ok == @command.run([], context[:opts])
+ start_rabbitmq_app()
+ # check that the created vhost no longer exists
+ assert match?([_], list_vhosts())
+ end
+
+ test "run: reset request to an active node with a running rabbit app fails", context do
+ add_vhost "some_vhost"
+ assert vhost_exists? "some_vhost"
+ assert match?({:error, :mnesia_unexpectedly_running}, @command.run([], context[:opts]))
+ assert vhost_exists? "some_vhost"
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Forcefully resetting node #{get_rabbit_hostname()}/
+ end
+
+ test "output mnesia is running error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code,
+ "Mnesia is still running on node " <> _},
+ @command.output({:error, :mnesia_unexpectedly_running}, context[:opts]))
+
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs b/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs
new file mode 100644
index 0000000000..0f09e4fee8
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/forget_cluster_node_command_test.exs
@@ -0,0 +1,132 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ForgetClusterNodeCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ForgetClusterNodeCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ start_rabbitmq_app()
+
+ {:ok, plugins_dir} =
+ :rabbit_misc.rpc_call(node, :application, :get_env, [:rabbit, :plugins_dir])
+
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+ mnesia_dir = :rabbit_misc.rpc_call(node, :rabbit_mnesia, :dir, [])
+
+ feature_flags_file =
+ :rabbit_misc.rpc_call(node, :rabbit_feature_flags, :enabled_feature_flags_list_file, [])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ {:ok,
+ opts: %{
+ rabbitmq_home: rabbitmq_home,
+ plugins_dir: plugins_dir,
+ mnesia_dir: mnesia_dir,
+ feature_flags_file: feature_flags_file,
+ offline: false
+ }}
+ end
+
+ setup context do
+ {:ok, opts: Map.merge(context[:opts], %{node: get_rabbit_hostname()})}
+ end
+
+ test "validate: specifying no target node is reported as an error", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: specifying multiple target nodes is reported as an error", context do
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate_execution_environment: offline request to a running node fails", context do
+ assert match?(
+ {:validation_failure, :node_running},
+ @command.validate_execution_environment(
+ ["other_node@localhost"],
+ Map.merge(context[:opts], %{offline: true})
+ )
+ )
+ end
+
+ test "validate_execution_environment: offline forget without mnesia dir fails", context do
+ offline_opts =
+ Map.merge(
+ context[:opts],
+ %{offline: true, node: :non_exist@localhost}
+ )
+
+ opts_without_mnesia = Map.delete(offline_opts, :mnesia_dir)
+ Application.put_env(:mnesia, :dir, "/tmp")
+ on_exit(fn -> Application.delete_env(:mnesia, :dir) end)
+
+ assert match?(
+ :ok,
+ @command.validate_execution_environment(
+ ["other_node@localhost"],
+ opts_without_mnesia
+ )
+ )
+
+ Application.delete_env(:mnesia, :dir)
+ System.put_env("RABBITMQ_MNESIA_DIR", "/tmp")
+ on_exit(fn -> System.delete_env("RABBITMQ_MNESIA_DIR") end)
+
+ assert match?(
+ :ok,
+ @command.validate_execution_environment(
+ ["other_node@localhost"],
+ opts_without_mnesia
+ )
+ )
+
+ System.delete_env("RABBITMQ_MNESIA_DIR")
+
+ assert match?(
+ :ok,
+ @command.validate_execution_environment(["other_node@localhost"], offline_opts)
+ )
+ end
+
+ test "validate_execution_environment: online mode does not fail is mnesia is not loaded",
+ context do
+ opts_without_mnesia = Map.delete(context[:opts], :mnesia_dir)
+
+ assert match?(
+ :ok,
+ @command.validate_execution_environment(
+ ["other_node@localhost"],
+ opts_without_mnesia
+ )
+ )
+ end
+
+ test "run: online request to a non-existent node returns a badrpc", context do
+ assert match?(
+ {:badrpc, :nodedown},
+ @command.run(
+ [context[:opts][:node]],
+ Map.merge(context[:opts], %{node: :non_exist@localhost})
+ )
+ )
+ end
+
+ test "banner", context do
+ assert @command.banner(["a"], context[:opts]) =~
+ ~r/Removing node a from the cluster/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/help_command_test.exs b/deps/rabbitmq_cli/test/ctl/help_command_test.exs
new file mode 100644
index 0000000000..d30a4d98c7
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/help_command_test.exs
@@ -0,0 +1,76 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule HelpCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ alias RabbitMQ.CLI.Core.{CommandModules}
+
+ @command RabbitMQ.CLI.Ctl.Commands.HelpCommand
+
+ setup_all do
+ set_scope(:all)
+ :ok
+ end
+
+ test "validate: providing no position arguments passes validation" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "validate: providing one position argument passes validation" do
+ assert @command.validate(["status"], %{}) == :ok
+ end
+
+ test "validate: providing two or more position arguments fails validation" do
+ assert @command.validate(["extra1", "extra2"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: prints basic usage info" do
+ {:ok, lines} = @command.run([], %{})
+ output = Enum.join(lines, "\n")
+ assert output =~ ~r/[-n <node>] [-t <timeout>]/
+ assert output =~ ~r/commands/i
+ end
+
+ test "run: ctl command usage info is printed if command is specified" do
+ ctl_commands = CommandModules.module_map
+ |> Enum.filter(fn({_name, command_mod}) ->
+ to_string(command_mod) =~ ~r/^RabbitMQ\.CLI\.Ctl\.Commands/
+ end)
+ |> Enum.map(fn({name, _}) -> name end)
+
+ IO.inspect(ctl_commands)
+ Enum.each(
+ ctl_commands,
+ fn(command) ->
+ assert @command.run([command], %{}) =~ ~r/#{command}/
+ end)
+ end
+
+ test "run prints command info" do
+ ctl_commands = CommandModules.module_map
+ |> Enum.filter(fn({_name, command_mod}) ->
+ to_string(command_mod) =~ ~r/^RabbitMQ\.CLI\.Ctl\.Commands/
+ end)
+ |> Enum.map(fn({name, _}) -> name end)
+
+ Enum.each(
+ ctl_commands,
+ fn(command) ->
+ {:ok, lines} = @command.run([], %{})
+ output = Enum.join(lines, "\n")
+ assert output =~ ~r/\n\s+#{command}.*\n/
+ end)
+ end
+
+ test "run: exits with the code of OK" do
+ assert @command.output({:ok, "Help string"}, %{}) ==
+ {:ok, "Help string"}
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs b/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs
new file mode 100644
index 0000000000..fb7f975ec5
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/import_definitions_command_test.exs
@@ -0,0 +1,88 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ImportDefinitionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ImportDefinitionsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ format: context[:format] || "json"
+ }}
+ end
+
+ test "merge_defaults: defaults to JSON for format" do
+ assert @command.merge_defaults([valid_file_path()], %{}) ==
+ {[valid_file_path()], %{format: "json"}}
+ end
+
+ test "merge_defaults: defaults to --silent if target is stdout" do
+ assert @command.merge_defaults(["-"], %{}) == {["-"], %{format: "json", silent: true}}
+ end
+
+ test "merge_defaults: format is case insensitive" do
+ assert @command.merge_defaults([valid_file_path()], %{format: "JSON"}) ==
+ {[valid_file_path()], %{format: "json"}}
+ assert @command.merge_defaults([valid_file_path()], %{format: "Erlang"}) ==
+ {[valid_file_path()], %{format: "erlang"}}
+ end
+
+ test "merge_defaults: format can be overridden" do
+ assert @command.merge_defaults([valid_file_path()], %{format: "erlang"}) ==
+ {[valid_file_path()], %{format: "erlang"}}
+ end
+
+ test "validate: accepts a file path argument", context do
+ assert @command.validate([valid_file_path()], context[:opts]) == :ok
+ end
+
+ test "validate: unsupported format fails validation", context do
+ assert match?({:validation_failure, {:bad_argument, _}},
+ @command.validate([valid_file_path()], Map.merge(context[:opts], %{format: "yolo"})))
+ end
+
+ test "validate: more than one positional argument fails validation", context do
+ assert @command.validate([valid_file_path(), "extra-arg"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: supports JSON and Erlang formats", context do
+ assert @command.validate([valid_file_path()], Map.merge(context[:opts], %{format: "json"})) == :ok
+ assert @command.validate([valid_file_path()], Map.merge(context[:opts], %{format: "erlang"})) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ result = @command.run([valid_file_path()],
+ %{node: :jake@thedog,
+ timeout: context[:test_timeout],
+ format: "json"})
+ assert match?({:badrpc, _}, result)
+ end
+
+ @tag format: "json"
+ test "run: imports definitions from a file", context do
+ assert :ok == @command.run([valid_file_path()], context[:opts])
+
+ # clean up the state we've modified
+ clear_parameter("/", "federation-upstream", "up-1")
+ end
+
+ defp valid_file_path() do
+ Path.join([File.cwd!(), "test", "fixtures", "files", "definitions.json"])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs b/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs
new file mode 100644
index 0000000000..2a9c7ec861
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/join_cluster_command_test.exs
@@ -0,0 +1,104 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule JoinClusterCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.JoinClusterCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ disc: true,
+ ram: false,
+ }}
+ end
+
+ test "validate: specifying both --disc and --ram is reported as invalid", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["a"], Map.merge(context[:opts], %{disc: true, ram: true}))
+ )
+ end
+ test "validate: specifying no target node is reported as an error", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+ test "validate: specifying multiple target nodes is reported as an error", context do
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ # TODO
+ #test "run: successful join as a disc node", context do
+ #end
+
+ # TODO
+ #test "run: successful join as a RAM node", context do
+ #end
+
+ test "run: joining self is invalid", context do
+ stop_rabbitmq_app()
+ assert match?(
+ {:error, :cannot_cluster_node_with_itself},
+ @command.run([context[:opts][:node]], context[:opts]))
+ start_rabbitmq_app()
+ end
+
+ # TODO
+ test "run: request to an active node fails", context do
+ assert match?(
+ {:error, :mnesia_unexpectedly_running},
+ @command.run([context[:opts][:node]], context[:opts]))
+ end
+
+ test "run: request to a non-existent node returns a badrpc", context do
+ opts = %{
+ node: :jake@thedog,
+ disc: true,
+ ram: false,
+ timeout: 200
+ }
+ assert match?(
+ {:badrpc, _},
+ @command.run([context[:opts][:node]], opts))
+ end
+
+ test "run: joining a non-existent node returns a badrpc", context do
+ stop_rabbitmq_app()
+ assert match?(
+ {:badrpc_multi, _, [_]},
+ @command.run([:jake@thedog], context[:opts]))
+ start_rabbitmq_app()
+ end
+
+ test "banner", context do
+ assert @command.banner(["a"], context[:opts]) =~
+ ~r/Clustering node #{get_rabbit_hostname()} with a/
+ end
+
+ test "output mnesia is running error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code,
+ "Mnesia is still running on node " <> _},
+ @command.output({:error, :mnesia_unexpectedly_running}, context[:opts]))
+
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_bindings_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_bindings_command_test.exs
new file mode 100644
index 0000000000..dae2377322
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_bindings_command_test.exs
@@ -0,0 +1,85 @@
+defmodule ListBindingsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListBindingsCommand
+ @vhost "test1"
+ @user "guest"
+ @default_timeout :infinity
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout,
+ vhost: @vhost
+ }
+ }
+ end
+
+ test "merge_defaults: adds all keys if none specificed", context do
+ default_keys = ~w(source_name source_kind destination_name destination_kind routing_key arguments)
+ declare_queue("test_queue", @vhost)
+ :timer.sleep(100)
+
+ {keys, _} = @command.merge_defaults([], context[:opts])
+ assert default_keys == keys
+ end
+
+ test "merge_defaults: includes table headers by default", _context do
+ {_, opts} = @command.merge_defaults([], %{})
+ assert opts[:table_headers]
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: returns multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "source_name"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["source_name", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["source_kind", "oink", "source_name"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["source_name"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.0}}]
+ end
+
+ test "run: no bindings for no queues", context do
+ [] = run_command_to_list(@command, [["source_name"], context[:opts]])
+ end
+
+ test "run: can filter info keys", context do
+ wanted_keys = ~w(source_name destination_name routing_key)
+ declare_queue("test_queue", @vhost)
+ assert run_command_to_list(@command, [wanted_keys, context[:opts]]) ==
+ [[source_name: "", destination_name: "test_queue", routing_key: "test_queue"]]
+ end
+
+ test "banner" do
+ assert String.starts_with?(@command.banner([], %{vhost: "some_vhost"}), "Listing bindings")
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs
new file mode 100644
index 0000000000..6ccf602211
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_channels_command_test.exs
@@ -0,0 +1,118 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListChannelsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListChannelsCommand
+ @default_timeout :infinity
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ close_all_connections(get_rabbit_hostname())
+
+ on_exit([], fn ->
+ close_all_connections(get_rabbit_hostname())
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout
+ }
+ }
+ end
+
+ test "merge_defaults: default channel info keys are pid, user, consumer_count, and messages_unacknowledged", context do
+ assert match?({~w(pid user consumer_count messages_unacknowledged), _}, @command.merge_defaults([], context[:opts]))
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: returns multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: returns bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "pid"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["user", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["user", "oink", "pid"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: zero timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["user"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.0}}]
+ end
+
+ test "run: multiple channels on multiple connections", context do
+ node_name = get_rabbit_hostname()
+ close_all_connections(node_name)
+ existent_channels = :rabbit_misc.rpc_call(node_name,:rabbit_channel, :list, [])
+ with_channel("/", fn(_channel1) ->
+ with_channel("/", fn(_channel2) ->
+ all_channels = run_command_to_list(@command, [["pid", "user", "connection"], context[:opts]])
+ channels = Enum.filter(all_channels,
+ fn(ch) ->
+ not Enum.member?(existent_channels, ch[:pid])
+ end)
+ chan1 = Enum.at(channels, 0)
+ chan2 = Enum.at(channels, 1)
+ assert Keyword.keys(chan1) == ~w(pid user connection)a
+ assert Keyword.keys(chan2) == ~w(pid user connection)a
+ assert "guest" == chan1[:user]
+ assert "guest" == chan2[:user]
+ assert chan1[:pid] !== chan2[:pid]
+ end)
+ end)
+ end
+
+ test "run: multiple channels on single connection", context do
+ node_name = get_rabbit_hostname()
+ close_all_connections(get_rabbit_hostname())
+ with_connection("/", fn(conn) ->
+ existent_channels = :rabbit_misc.rpc_call(node_name,:rabbit_channel, :list, [])
+ {:ok, _} = AMQP.Channel.open(conn)
+ {:ok, _} = AMQP.Channel.open(conn)
+ all_channels = run_command_to_list(@command, [["pid", "user", "connection"], context[:opts]])
+ channels = Enum.filter(all_channels,
+ fn(ch) ->
+ not Enum.member?(existent_channels, ch[:pid])
+ end)
+
+ chan1 = Enum.at(channels, 0)
+ chan2 = Enum.at(channels, 1)
+ assert Keyword.keys(chan1) == ~w(pid user connection)a
+ assert Keyword.keys(chan2) == ~w(pid user connection)a
+ assert "guest" == chan1[:user]
+ assert "guest" == chan2[:user]
+ assert chan1[:pid] !== chan2[:pid]
+ end)
+ end
+
+ test "run: info keys order is preserved", context do
+ close_all_connections(get_rabbit_hostname())
+ with_channel("/", fn(_channel) ->
+ channels = run_command_to_list(@command, [~w(connection vhost name pid number user), context[:opts]])
+ chan = Enum.at(channels, 0)
+ assert Keyword.keys(chan) == ~w(connection vhost name pid number user)a
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs
new file mode 100644
index 0000000000..6f600ba5d8
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_ciphers_command_test.exs
@@ -0,0 +1,29 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListCiphersCommandTest do
+ use ExUnit.Case
+ @command RabbitMQ.CLI.Ctl.Commands.ListCiphersCommand
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "run: lists ciphers", _context do
+ assert match?(
+ {:ok, _},
+ @command.run([], %{})
+ )
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_connections_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_connections_command_test.exs
new file mode 100644
index 0000000000..9cfcb8787f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_connections_command_test.exs
@@ -0,0 +1,90 @@
+defmodule ListConnectionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListConnectionsCommand
+ @user "guest"
+ @default_timeout 15000
+ @default_options %{table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ close_all_connections(get_rabbit_hostname())
+
+ on_exit([], fn ->
+ close_all_connections(get_rabbit_hostname())
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout
+ }
+ }
+ end
+
+ test "merge_defaults: user, peer_host, peer_port and state by default" do
+ assert @command.merge_defaults([], %{}) == {~w(user peer_host peer_port state), @default_options}
+ end
+
+ test "merge_defaults: includes table headers by default", _context do
+ {_, opts} = @command.merge_defaults([], %{})
+ assert opts[:table_headers]
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "peer_host"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["user", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["user", "oink", "peer_host"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["name"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.0}}]
+ end
+
+ test "run: filter single key", context do
+ vhost = "/"
+ with_connection(vhost, fn(_conn) ->
+ conns = run_command_to_list(@command, [["name"], context[:opts]])
+ assert (Enum.map(conns, &Keyword.keys/1) |> Enum.uniq) == [[:name]]
+ assert Enum.any?(conns, fn(conn) -> conn[:name] != nil end)
+ end)
+ end
+
+ test "run: show connection vhost", context do
+ vhost = "custom_vhost"
+ add_vhost vhost
+ set_permissions @user, vhost, [".*", ".*", ".*"]
+ on_exit(fn ->
+ delete_vhost vhost
+ end)
+ with_connection(vhost, fn(_conn) ->
+ conns = run_command_to_list(@command, [["vhost"], context[:opts]])
+ assert (Enum.map(conns, &Keyword.keys/1) |> Enum.uniq) == [[:vhost]]
+ assert Enum.any?(conns, fn(conn) -> conn[:vhost] == vhost end)
+ end)
+ end
+
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_consumers_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_consumers_command_test.exs
new file mode 100644
index 0000000000..d49313162a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_consumers_command_test.exs
@@ -0,0 +1,213 @@
+defmodule ListConsumersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListConsumersCommand
+
+ @vhost "test1"
+ @user "guest"
+ @default_timeout :infinity
+ @info_keys ~w(queue_name channel_pid consumer_tag ack_required prefetch_count active arguments)
+ @default_options %{vhost: "/", table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout,
+ vhost: @vhost
+ }
+ }
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {@info_keys, @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {@info_keys, %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: returns multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "queue_name"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["queue_name", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["channel_pid", "oink", "queue_name"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: zero timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["queue_name"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.0}}]
+ end
+
+ test "run: no consumers for no open connections", context do
+ close_all_connections(get_rabbit_hostname())
+ [] = run_command_to_list(@command, [["queue_name"], context[:opts]])
+ end
+
+ test "run: defaults test", context do
+ queue_name = "test_queue1"
+ consumer_tag = "i_am_consumer"
+ info_keys_s = ~w(queue_name channel_pid consumer_tag ack_required prefetch_count arguments)
+ info_keys_a = Enum.map(info_keys_s, &String.to_atom/1)
+ declare_queue(queue_name, @vhost)
+ with_channel(@vhost, fn(channel) ->
+ {:ok, _} = AMQP.Basic.consume(channel, queue_name, nil, [consumer_tag: consumer_tag])
+ :timer.sleep(100)
+ [[consumer]] = run_command_to_list(@command, [info_keys_s, context[:opts]])
+ assert info_keys_a == Keyword.keys(consumer)
+ assert consumer[:consumer_tag] == consumer_tag
+ assert consumer[:queue_name] == queue_name
+ assert Keyword.delete(consumer, :channel_pid) ==
+ [queue_name: queue_name, consumer_tag: consumer_tag,
+ ack_required: true, prefetch_count: 0, arguments: []]
+
+ end)
+ end
+
+ test "run: consumers are grouped by queues (multiple consumer per queue)", context do
+ queue_name1 = "test_queue1"
+ queue_name2 = "test_queue2"
+ declare_queue("test_queue1", @vhost)
+ declare_queue("test_queue2", @vhost)
+ with_channel(@vhost, fn(channel) ->
+ {:ok, tag1} = AMQP.Basic.consume(channel, queue_name1)
+ {:ok, tag2} = AMQP.Basic.consume(channel, queue_name2)
+ {:ok, tag3} = AMQP.Basic.consume(channel, queue_name2)
+ :timer.sleep(100)
+ try do
+ consumers = run_command_to_list(@command, [["queue_name", "consumer_tag"], context[:opts]])
+ {[[consumer1]], [consumers2]} = Enum.split_with(consumers, fn([_]) -> true; ([_,_]) -> false end)
+ assert [queue_name: queue_name1, consumer_tag: tag1] == consumer1
+ assert Keyword.equal?([{tag2, queue_name2}, {tag3, queue_name2}],
+ for([queue_name: q, consumer_tag: t] <- consumers2, do: {t, q}))
+ after
+ AMQP.Basic.cancel(channel, tag1)
+ AMQP.Basic.cancel(channel, tag2)
+ AMQP.Basic.cancel(channel, tag3)
+ end
+ end)
+ end
+
+ test "run: active and activity status fields are set properly when requested", context do
+ queue_types = ["classic", "quorum"]
+ Enum.each queue_types, fn queue_type ->
+ queue_name = "active-activity-status-fields-" <> queue_type
+ declare_queue(queue_name, @vhost, true, false, [{"x-queue-type", :longstr, queue_type}])
+ :timer.sleep(200)
+ with_channel(@vhost, fn(channel) ->
+ {:ok, tag1} = AMQP.Basic.consume(channel, queue_name)
+ {:ok, tag2} = AMQP.Basic.consume(channel, queue_name)
+ {:ok, tag3} = AMQP.Basic.consume(channel, queue_name)
+ :timer.sleep(100)
+ try do
+ consumers = List.first(run_command_to_list(@command, [["queue_name", "consumer_tag", "active", "activity_status"], context[:opts]]))
+ assert Keyword.equal?([{tag1, queue_name, true, :up},
+ {tag2, queue_name, true, :up}, {tag3, queue_name, true, :up}],
+ for([queue_name: q, consumer_tag: t, active: a, activity_status: as] <- consumers, do: {t, q, a, as}))
+ after
+ AMQP.Basic.cancel(channel, tag1)
+ AMQP.Basic.cancel(channel, tag2)
+ AMQP.Basic.cancel(channel, tag3)
+ :timer.sleep(100)
+ delete_queue(queue_name, @vhost)
+ end
+ end)
+ end
+ end
+
+ test "run: active and activity status fields are set properly when requested and single active consumer is enabled", context do
+ queue_types = ["classic", "quorum"]
+ Enum.each queue_types, fn queue_type ->
+ queue_name = "single-active-consumer-" <> queue_type
+ declare_queue(queue_name, @vhost, true, false,
+ [{"x-single-active-consumer", :bool, true}, {"x-queue-type", :longstr, queue_type}])
+ :timer.sleep(200)
+ with_channel(@vhost, fn(channel) ->
+ {:ok, tag1} = AMQP.Basic.consume(channel, queue_name)
+ {:ok, tag2} = AMQP.Basic.consume(channel, queue_name)
+ {:ok, tag3} = AMQP.Basic.consume(channel, queue_name)
+ :timer.sleep(100)
+ try do
+ consumers = List.first(run_command_to_list(@command, [["queue_name", "consumer_tag", "active", "activity_status"], context[:opts]]))
+ assert Keyword.equal?([{tag1, queue_name, true, :single_active},
+ {tag2, queue_name, false, :waiting}, {tag3, queue_name, false, :waiting}],
+ for([queue_name: q, consumer_tag: t, active: a, activity_status: as] <- consumers, do: {t, q, a, as}))
+ AMQP.Basic.cancel(channel, tag1)
+ :timer.sleep(100)
+ consumers = List.first(run_command_to_list(@command, [["queue_name", "consumer_tag", "active", "activity_status"], context[:opts]]))
+ assert Keyword.equal?([{tag2, queue_name, true, :single_active}, {tag3, queue_name, false, :waiting}],
+ for([queue_name: q, consumer_tag: t, active: a, activity_status: as] <- consumers, do: {t, q, a, as}))
+ after
+ AMQP.Basic.cancel(channel, tag2)
+ AMQP.Basic.cancel(channel, tag3)
+ :timer.sleep(100)
+ delete_queue(queue_name, @vhost)
+ end
+ end)
+ end
+ end
+
+ test "fill_consumer_active_fields: add missing fields if necessary" do
+ consumer38 = [
+ queue_name: {:resource, "/", :queue, "queue1"},
+ channel_pid: "",
+ consumer_tag: "ctag1",
+ ack_required: false,
+ prefetch_count: 0,
+ active: true,
+ activity_status: :up,
+ arguments: []
+ ]
+ assert @command.fill_consumer_active_fields({[
+ consumer38
+ ], {1, :continue}}) == {[consumer38], {1, :continue}}
+
+ assert @command.fill_consumer_active_fields({[
+ [
+ queue_name: {:resource, "/", :queue, "queue2"},
+ channel_pid: "",
+ consumer_tag: "ctag2",
+ ack_required: false,
+ prefetch_count: 0,
+ arguments: []
+ ]
+ ], {1, :continue}}) == {[
+ [
+ queue_name: {:resource, "/", :queue, "queue2"},
+ channel_pid: "",
+ consumer_tag: "ctag2",
+ ack_required: false,
+ prefetch_count: 0,
+ active: true,
+ activity_status: :up,
+ arguments: []
+ ]
+ ], {1, :continue}}
+
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs
new file mode 100644
index 0000000000..fd89cfd066
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_exchanges_command_test.exs
@@ -0,0 +1,160 @@
+defmodule ListExchangesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListExchangesCommand
+
+ @vhost "test1"
+ @user "guest"
+ @default_timeout :infinity
+ @default_exchanges [{"amq.direct", :direct},
+ {"amq.fanout", :fanout},
+ {"amq.match", :headers},
+ {"amq.rabbitmq.trace", :topic},
+ {"amq.headers", :headers},
+ {"amq.topic", :topic},
+ {"", :direct}]
+ @default_options %{vhost: "/", table_headers: true}
+
+ defp default_exchange_names() do
+ {names, _types} = Enum.unzip(@default_exchanges)
+ names
+ end
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+ {
+ :ok,
+ opts: %{
+ quiet: true,
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout,
+ vhost: @vhost
+ }
+ }
+ end
+
+ test "merge_defaults: should include name and type when no arguments provided and add default vhost to opts" do
+ assert @command.merge_defaults([], %{})
+ == {["name", "type"], @default_options}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {["name", "type"], @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {["name", "type"], %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: returns multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "type"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["name", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["name", "oink", "type"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: zero timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["name"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.0}}]
+ end
+
+ test "run: show default exchanges by default", context do
+ assert MapSet.new(run_command_to_list(@command, [["name"], context[:opts]])) ==
+ MapSet.new(for {ex_name, _ex_type} <- @default_exchanges, do: [name: ex_name])
+ end
+
+ test "run: default options test", context do
+ exchange_name = "test_exchange"
+ declare_exchange(exchange_name, @vhost)
+ assert MapSet.new(run_command_to_list(@command, [["name", "type"], context[:opts]])) ==
+ MapSet.new(
+ for({ex_name, ex_type} <- @default_exchanges, do: [name: ex_name, type: ex_type]) ++
+ [[name: exchange_name, type: :direct]])
+ end
+
+ test "run: list multiple exchanges", context do
+ declare_exchange("test_exchange_1", @vhost, :direct)
+ declare_exchange("test_exchange_2", @vhost, :fanout)
+ non_default_exchanges = run_command_to_list(@command, [["name", "type"], context[:opts]])
+ |> without_default_exchanges
+ assert_set_equal(
+ non_default_exchanges,
+ [[name: "test_exchange_1", type: :direct],
+ [name: "test_exchange_2", type: :fanout]])
+ end
+
+ def assert_set_equal(one, two) do
+ assert MapSet.new(one) == MapSet.new(two)
+ end
+
+ test "run: info keys filter single key", context do
+ declare_exchange("test_exchange_1", @vhost)
+ declare_exchange("test_exchange_2", @vhost)
+ non_default_exchanges = run_command_to_list(@command, [["name"], context[:opts]])
+ |> without_default_exchanges
+ assert_set_equal(
+ non_default_exchanges,
+ [[name: "test_exchange_1"],
+ [name: "test_exchange_2"]])
+ end
+
+
+ test "run: info keys add additional keys", context do
+ declare_exchange("durable_exchange", @vhost, :direct, true)
+ declare_exchange("auto_delete_exchange", @vhost, :fanout, false, true)
+ non_default_exchanges = run_command_to_list(@command, [["name", "type", "durable", "auto_delete"], context[:opts]])
+ |> without_default_exchanges
+ assert_set_equal(
+ non_default_exchanges,
+ [[name: "auto_delete_exchange", type: :fanout, durable: false, auto_delete: true],
+ [name: "durable_exchange", type: :direct, durable: true, auto_delete: false]])
+ end
+
+ test "run: specifying a vhost returns the targeted vhost exchanges", context do
+ other_vhost = "other_vhost"
+ add_vhost other_vhost
+ on_exit(fn ->
+ delete_vhost other_vhost
+ end)
+ declare_exchange("test_exchange_1", @vhost)
+ declare_exchange("test_exchange_2", other_vhost)
+ non_default_exchanges1 = run_command_to_list(@command, [["name"], context[:opts]])
+ |> without_default_exchanges
+
+ non_default_exchanges2 = run_command_to_list(@command, [["name"], %{context[:opts] | :vhost => other_vhost}])
+ |> without_default_exchanges
+
+ assert non_default_exchanges1 == [[name: "test_exchange_1"]]
+ assert non_default_exchanges2 == [[name: "test_exchange_2"]]
+ end
+
+ defp without_default_exchanges(xs) do
+ Enum.filter(xs,
+ fn(x) ->
+ not Enum.member?(default_exchange_names(), x[:name])
+ end)
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs
new file mode 100644
index 0000000000..b2cf1ad52a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_feature_flags_command_test.exs
@@ -0,0 +1,122 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListFeatureFlagsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListFeatureFlagsCommand
+
+ @flag1 :ff1_from_list_ff_testsuite
+ @flag2 :ff2_from_list_ff_testsuite
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ # Define an arbitrary feature flag for the test.
+ node = get_rabbit_hostname()
+ new_feature_flags = %{
+ @flag1 =>
+ %{desc: "My feature flag #1",
+ provided_by: :ListFeatureFlagsCommandTest,
+ stability: :stable},
+ @flag2 =>
+ %{desc: "My feature flag #2",
+ provided_by: :ListFeatureFlagsCommandTest,
+ stability: :stable}}
+ :ok = :rabbit_misc.rpc_call(
+ node, :rabbit_feature_flags, :initialize_registry, [new_feature_flags])
+ :ok = :rabbit_misc.rpc_call(
+ node, :rabbit_feature_flags, :enable_all, [])
+
+ name_result = [
+ [{:name, @flag1}],
+ [{:name, @flag2}]
+ ]
+
+ full_result = [
+ [{:name, @flag1}, {:state, :enabled}],
+ [{:name, @flag2}, {:state, :enabled}]
+ ]
+
+ {
+ :ok,
+ name_result: name_result,
+ full_result: full_result
+ }
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}
+ }
+ end
+
+ test "merge_defaults with no command, print just use the names" do
+ assert match?({["name", "state"], %{}}, @command.merge_defaults([], %{}))
+ end
+
+ test "validate: return bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "name"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["name", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["name", "oink", "state"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ test "run: on a bad RabbitMQ node, return a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["name"], opts))
+ end
+
+ @tag test_timeout: :infinity
+ test "run: with the name tag, print just the names", context do
+ matches_found = @command.run(["name"], context[:opts])
+ assert Enum.all?(context[:name_result], fn(feature_name) ->
+ Enum.find(matches_found, fn(found) -> found == feature_name end)
+ end)
+ end
+
+ @tag test_timeout: :infinity
+ test "run: duplicate args do not produce duplicate entries", context do
+ # checks to ensure that all expected feature flags are in the results
+ matches_found = @command.run(["name", "name"], context[:opts])
+ assert Enum.all?(context[:name_result], fn(feature_name) ->
+ Enum.find(matches_found, fn(found) -> found == feature_name end)
+ end)
+ end
+
+ @tag test_timeout: 30000
+ test "run: sufficiently long timeouts don't interfere with results", context do
+ matches_found = @command.run(["name", "state"], context[:opts])
+ assert Enum.all?(context[:full_result], fn(feature_name) ->
+ Enum.find(matches_found, fn(found) -> found == feature_name end)
+ end)
+ end
+
+ @tag test_timeout: 0, username: "guest"
+ test "run: timeout causes command to return a bad RPC", context do
+ assert @command.run(["name", "state"], context[:opts]) ==
+ {:badrpc, :timeout}
+ end
+
+ @tag test_timeout: :infinity
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Listing feature flags \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs
new file mode 100644
index 0000000000..eabd6a3628
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_global_parameters_command_test.exs
@@ -0,0 +1,86 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListGlobalParametersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListGlobalParametersCommand
+
+ @key :mqtt_default_vhosts
+ @value "{\"O=client,CN=dummy\":\"somevhost\"}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ on_exit(fn ->
+ clear_global_parameter context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: (context[:timeout] || :infinity),
+ }
+ }
+ end
+
+ test "validate: wrong number of arguments leads to an arg count error" do
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag key: @key, value: @value
+ test "run: a well-formed command returns list of global parameters", context do
+ set_global_parameter(context[:key], @value)
+ @command.run([], context[:opts])
+ |> assert_parameter_list(context)
+ end
+
+ @tag key: @key, value: @value
+ test "run: zero timeout return badrpc", context do
+ set_global_parameter(context[:key], @value)
+ assert @command.run([], Map.put(context[:opts], :timeout, 0)) == {:badrpc, :timeout}
+ end
+
+ test "run: multiple parameters returned in list", context do
+ initial = for param <- @command.run([], context[:opts]), do: Map.new(param)
+ parameters = [
+ %{name: :global_param_1, value: "{\"key1\":\"value1\"}"},
+ %{name: :global_param_2, value: "{\"key2\":\"value2\"}"}
+ ]
+
+
+ Enum.each(parameters, fn(%{name: name, value: value}) ->
+ set_global_parameter(name, value)
+ on_exit(fn ->
+ clear_global_parameter(name)
+ end)
+ end)
+
+ parameters = initial ++ parameters
+ params = for param <- @command.run([], context[:opts]), do: Map.new(param)
+
+ assert MapSet.new(params) == MapSet.new(parameters)
+ end
+
+ @tag key: @key, value: @value
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Listing global runtime parameters \.\.\./
+ end
+
+ # Checks each element of the first parameter against the expected context values
+ defp assert_parameter_list(params, context) do
+ [param | _] = params
+ assert MapSet.new(param) == MapSet.new([name: context[:key],
+ value: context[:value]])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs
new file mode 100644
index 0000000000..2869479a8a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_hashes_command_test.exs
@@ -0,0 +1,29 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListHashesCommandTest do
+ use ExUnit.Case
+ @command RabbitMQ.CLI.Ctl.Commands.ListHashesCommand
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "run: lists hashes", _context do
+ assert match?(
+ {:ok, _},
+ @command.run([], %{})
+ )
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs
new file mode 100644
index 0000000000..6c86fe8441
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_operator_policies_command_test.exs
@@ -0,0 +1,142 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListOperatorPoliciesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListOperatorPoliciesCommand
+
+ @vhost "test1"
+ @root "/"
+ @key "message-expiry"
+ @pattern "^queue\."
+ @value "{\"message-ttl\":10}"
+ @apply_to "all"
+ @default_options %{vhost: "/", table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(fn ->
+ clear_operator_policy context[:vhost], context[:key]
+ end)
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: (context[:timeout] || :infinity),
+ vhost: context[:vhost],
+ apply_to: @apply_to,
+ priority: 0
+ }
+ }
+ end
+
+ test "merge_defaults: default vhost is '/'" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "run: a well-formed, host-specific command returns list of policies", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ set_operator_policy(context[:vhost], context[:key], context[:pattern], @value)
+ @command.run([], vhost_opts)
+ |> assert_operator_policy_list(context)
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @root
+ test "run: a well-formed command with no vhost runs against the default one", context do
+
+ set_operator_policy("/", context[:key], context[:pattern], @value)
+ on_exit(fn ->
+ clear_operator_policy("/", context[:key])
+ end)
+
+ @command.run([], context[:opts])
+ |> assert_operator_policy_list(context)
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "run: providing a timeout of 0 returns a badrpc", context do
+ set_operator_policy(context[:vhost], context[:key], context[:pattern], @value)
+ assert @command.run([], Map.put(context[:opts], :timeout, 0)) == {:badrpc, :timeout}
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: "bad-vhost"
+ test "run: providing a non-existent vhost returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag vhost: @vhost
+ test "run: when multiple policies exist in the vhost, returns them all", context do
+ policies = [
+ %{vhost: @vhost, name: "some-policy", pattern: "foo", definition: "{\"message-ttl\":10}", 'apply-to': "all", priority: 0},
+ %{vhost: @vhost, name: "other-policy", pattern: "bar", definition: "{\"expires\":20}", 'apply-to': "all", priority: 0}
+ ]
+ policies
+ |> Enum.map(
+ fn(%{name: name, pattern: pattern, definition: value}) ->
+ set_operator_policy(context[:vhost], name, pattern, value)
+ on_exit(fn ->
+ clear_operator_policy(context[:vhost], name)
+ end)
+ end)
+
+ pols = for policy <- @command.run([], context[:opts]), do: Map.new(policy)
+
+ assert MapSet.new(pols) == MapSet.new(policies)
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([], vhost_opts)
+ =~ ~r/Listing operator policy overrides for vhost \"#{context[:vhost]}\" \.\.\./
+ end
+
+ # Checks each element of the first policy against the expected context values
+ defp assert_operator_policy_list(policies, context) do
+ [policy] = policies
+ assert MapSet.new(policy) == MapSet.new([name: context[:key],
+ pattern: context[:pattern],
+ definition: context[:value],
+ vhost: context[:vhost],
+ priority: context[:opts][:priority],
+ "apply-to": context[:opts][:apply_to]])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs
new file mode 100644
index 0000000000..f42e55353a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_parameters_command_test.exs
@@ -0,0 +1,154 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListParametersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListParametersCommand
+
+ @vhost "test1"
+ @root "/"
+ @component_name "federation-upstream"
+ @key "reconnect-delay"
+ @value "{\"uri\":\"amqp://\"}"
+ @default_options %{vhost: "/", table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+
+ {:ok, [enabled_plugins]} = :file.consult(plugins_file)
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home}
+
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation], :online, node, opts)
+
+ add_vhost @vhost
+
+ enable_federation_plugin()
+
+ on_exit(fn ->
+ set_enabled_plugins(enabled_plugins, :online, get_rabbit_hostname(), opts)
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(fn ->
+ clear_parameter context[:vhost], context[:component_name], context[:key]
+ end)
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: (context[:timeout] || :infinity),
+ vhost: context[:vhost]
+ }
+ }
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: wrong number of arguments leads to an arg count error" do
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @vhost
+ test "run: a well-formed, host-specific command returns list of parameters", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ set_parameter(context[:vhost], context[:component_name], context[:key], @value)
+ @command.run([], vhost_opts)
+ |> assert_parameter_list(context)
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @root
+ test "run: a well-formed command with no vhost runs against the default", context do
+
+ set_parameter("/", context[:component_name], context[:key], @value)
+ on_exit(fn ->
+ clear_parameter("/", context[:component_name], context[:key])
+ end)
+
+ @command.run([], context[:opts])
+ |> assert_parameter_list(context)
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @vhost
+ test "run: zero timeout return badrpc", context do
+ set_parameter(context[:vhost], context[:component_name], context[:key], @value)
+ assert @command.run([], Map.put(context[:opts], :timeout, 0)) == {:badrpc, :timeout}
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: an invalid vhost returns a no-such-vhost error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag vhost: @vhost
+ test "run: multiple parameters returned in list", context do
+ parameters = [
+ %{component: "federation-upstream", name: "my-upstream", value: "{\"uri\":\"amqp://\"}"},
+ %{component: "exchange-delete-in-progress", name: "my-key", value: "{\"foo\":\"bar\"}"}
+ ]
+ parameters
+ |> Enum.map(
+ fn(%{component: component, name: name, value: value}) ->
+ set_parameter(context[:vhost], component, name, value)
+ on_exit(fn ->
+ clear_parameter(context[:vhost], component, name)
+ end)
+ end)
+
+ params = for param <- @command.run([], context[:opts]), do: Map.new(param)
+
+ assert MapSet.new(params) == MapSet.new(parameters)
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([], vhost_opts)
+ =~ ~r/Listing runtime parameters for vhost \"#{context[:vhost]}\" \.\.\./
+ end
+
+ # Checks each element of the first parameter against the expected context values
+ defp assert_parameter_list(params, context) do
+ [param] = params
+ assert MapSet.new(param) == MapSet.new([component: context[:component_name],
+ name: context[:key],
+ value: context[:value]])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs
new file mode 100644
index 0000000000..eda8f001af
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_permissions_command_test.exs
@@ -0,0 +1,92 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListPermissionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListPermissionsCommand
+
+ @vhost "test1"
+ @user "guest"
+ @root "/"
+ @default_timeout :infinity
+ @default_options %{vhost: "/", table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+ set_permissions @user, @vhost, ["^guest-.*", ".*", ".*"]
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout],
+ vhost: "/"
+ }
+ }
+ end
+
+ test "merge_defaults adds default options" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: invalid parameters yield an arg count error" do
+ assert @command.validate(["extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: on a bad RabbitMQ node, return a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag test_timeout: @default_timeout, vhost: @vhost
+ test "run: specifying a vhost returns the targeted vhost permissions", context do
+ assert @command.run(
+ [],
+ Map.merge(context[:opts], %{vhost: @vhost})
+ ) == [[user: "guest", configure: "^guest-.*", write: ".*", read: ".*"]]
+ end
+
+ @tag test_timeout: 30000
+ test "run: sufficiently long timeouts don't interfere with results", context do
+ results = @command.run([], context[:opts])
+ Enum.all?([[user: "guest", configure: ".*", write: ".*", read: ".*"]], fn(perm) ->
+ Enum.find(results, fn(found) -> found == perm end)
+ end)
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return a bad RPC", context do
+ assert @command.run([], context[:opts]) ==
+ {:badrpc, :timeout}
+ end
+
+ @tag vhost: @root
+ test "banner", context do
+ ctx = Map.merge(context[:opts], %{vhost: @vhost})
+ assert @command.banner([], ctx )
+ =~ ~r/Listing permissions for vhost \"#{Regex.escape(ctx[:vhost])}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs
new file mode 100644
index 0000000000..49ef6ee856
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_policies_command_test.exs
@@ -0,0 +1,144 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListPoliciesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListPoliciesCommand
+
+ @vhost "test1"
+ @default_vhost "/"
+ @key "federate"
+ @pattern "^fed\."
+ @value "{\"federation-upstream-set\":\"all\"}"
+ @apply_to "all"
+ @default_options %{vhost: "/", table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+ enable_federation_plugin()
+
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+
+ on_exit(fn ->
+ clear_policy context[:vhost], context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: (context[:timeout] || :infinity),
+ vhost: context[:vhost],
+ apply_to: @apply_to,
+ priority: 0
+ }
+ }
+ end
+
+ test "merge_defaults: default vhost is '/'" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "run: a well-formed, host-specific command returns list of policies", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ set_policy(context[:vhost], context[:key], context[:pattern], @value)
+ @command.run([], vhost_opts)
+ |> assert_policy_list(context)
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @default_vhost
+ test "run: a well-formed command with no vhost runs against the default one", context do
+ set_policy("/", context[:key], context[:pattern], @value)
+ on_exit(fn ->
+ clear_policy("/", context[:key])
+ end)
+
+ @command.run([], context[:opts])
+ |> assert_policy_list(context)
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "run: providing a timeout of 0 returns a badrpc", context do
+ set_policy(context[:vhost], context[:key], context[:pattern], @value)
+ assert @command.run([], Map.put(context[:opts], :timeout, 0)) == {:badrpc, :timeout}
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: "bad-vhost"
+ test "run: providing a non-existent vhost returns an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag vhost: @vhost
+ test "run: when multiple policies exist in the vhost, returns them all", context do
+ policies = [
+ %{vhost: @vhost, name: "some-policy", pattern: "foo", definition: "{\"federation-upstream-set\":\"all\"}", 'apply-to': "all", priority: 0},
+ %{vhost: @vhost, name: "other-policy", pattern: "bar", definition: "{\"ha-mode\":\"all\"}", 'apply-to': "all", priority: 0}
+ ]
+ policies
+ |> Enum.map(
+ fn(%{name: name, pattern: pattern, definition: value}) ->
+ set_policy(context[:vhost], name, pattern, value)
+ on_exit(fn ->
+ clear_policy(context[:vhost], name)
+ end)
+ end)
+
+ pols = for policy <- @command.run([], context[:opts]), do: Map.new(policy)
+
+ assert MapSet.new(pols) == MapSet.new(policies)
+ end
+
+ @tag key: @key, pattern: @pattern, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([], vhost_opts)
+ =~ ~r/Listing policies for vhost \"#{context[:vhost]}\" \.\.\./
+ end
+
+ # Checks each element of the first policy against the expected context values
+ defp assert_policy_list(policies, context) do
+ [policy | _] = policies
+ assert MapSet.new(policy) == MapSet.new([name: context[:key],
+ pattern: context[:pattern],
+ definition: context[:value],
+ vhost: context[:vhost],
+ priority: context[:opts][:priority],
+ "apply-to": context[:opts][:apply_to]])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_queues_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_queues_command_test.exs
new file mode 100644
index 0000000000..a6635c7933
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_queues_command_test.exs
@@ -0,0 +1,145 @@
+defmodule ListQueuesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListQueuesCommand
+
+ @vhost "test1"
+ @user "guest"
+ @default_timeout 15000
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ reset_vm_memory_high_watermark()
+ delete_all_queues()
+ close_all_connections(get_rabbit_hostname())
+
+ on_exit([], fn ->
+ delete_all_queues()
+ close_all_connections(get_rabbit_hostname())
+ end)
+
+ :ok
+ end
+
+ setup context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+ {
+ :ok,
+ opts: %{
+ quiet: true,
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout,
+ vhost: @vhost,
+ offline: false,
+ online: false,
+ local: false
+ }
+ }
+ end
+
+ test "merge_defaults: no info keys returns names and message count" do
+ assert match?({["name", "messages"], _}, @command.merge_defaults([], %{}))
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "messages"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["name", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["name", "oink", "messages"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["name"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.0, "Some queue(s) are unresponsive, use list_unresponsive_queues command."}}]
+ end
+
+ @tag test_timeout: 1
+ test "run: command timeout (several thousands queues in 1ms) return badrpc with timeout value in seconds", context do
+ # we assume it will take longer than 1 ms to list thousands of queues
+ n = 5000
+ for i <- 1..n do
+ declare_queue("test_queue_" <> Integer.to_string(i), @vhost)
+ end
+ assert run_command_to_list(@command, [["name"], context[:opts]]) ==
+ [{:badrpc, {:timeout, 0.001, "Some queue(s) are unresponsive, use list_unresponsive_queues command."}}]
+ for i <- 1..n do
+ delete_queue("test_queue_" <> Integer.to_string(i), @vhost)
+ end
+ end
+
+ @tag test_timeout: 5000
+ test "run: return multiple queues", context do
+ declare_queue("test_queue_1", @vhost)
+ publish_messages(@vhost, "test_queue_1", 3)
+ declare_queue("test_queue_2", @vhost)
+ publish_messages(@vhost, "test_queue_2", 1)
+ assert Keyword.equal?(run_command_to_list(@command, [["name", "messages"], context[:opts]]),
+ [[name: "test_queue_1", messages: 3],
+ [name: "test_queue_2", messages: 1]])
+ end
+
+ @tag test_timeout: 5000
+ test "run: info keys filter single key", context do
+ declare_queue("test_queue_1", @vhost)
+ declare_queue("test_queue_2", @vhost)
+ assert Keyword.equal?(run_command_to_list(@command, [["name"], context[:opts]]),
+ [[name: "test_queue_1"],
+ [name: "test_queue_2"]])
+ end
+
+ @tag test_timeout: 5000
+ test "run: info keys add additional keys", context do
+ declare_queue("durable_queue", @vhost, true)
+ publish_messages(@vhost, "durable_queue", 3)
+ declare_queue("auto_delete_queue", @vhost, false, true)
+ publish_messages(@vhost, "auto_delete_queue", 1)
+ assert Keyword.equal?(
+ run_command_to_list(@command, [["name", "messages", "durable", "auto_delete"], context[:opts]]),
+ [[name: "durable_queue", messages: 3, durable: true, auto_delete: false],
+ [name: "auto_delete_queue", messages: 1, durable: false, auto_delete: true]])
+ end
+
+ @tag test_timeout: 5000
+ test "run: info keys order is preserved", context do
+ declare_queue("durable_queue", @vhost, true)
+ publish_messages(@vhost, "durable_queue", 3)
+ declare_queue("auto_delete_queue", @vhost, false, true)
+ publish_messages(@vhost, "auto_delete_queue", 1)
+ assert Keyword.equal?(
+ run_command_to_list(@command, [["messages", "durable", "name", "auto_delete"], context[:opts]]),
+ [[messages: 3, durable: true, name: "durable_queue", auto_delete: false],
+ [messages: 1, durable: false, name: "auto_delete_queue", auto_delete: true]])
+ end
+
+ @tag test_timeout: 5000
+ test "run: specifying a vhost returns the targeted vhost queues", context do
+ other_vhost = "other_vhost"
+ add_vhost other_vhost
+ on_exit(fn ->
+ delete_vhost other_vhost
+ end)
+ declare_queue("test_queue_1", @vhost)
+ declare_queue("test_queue_2", other_vhost)
+ assert run_command_to_list(@command, [["name"], context[:opts]]) == [[name: "test_queue_1"]]
+ assert run_command_to_list(@command, [["name"], %{context[:opts] | :vhost => other_vhost}]) == [[name: "test_queue_2"]]
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs
new file mode 100644
index 0000000000..8de1f2536a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_topic_permissions_command_test.exs
@@ -0,0 +1,85 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListTopicPermissionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListTopicPermissionsCommand
+
+ @vhost "test1"
+ @user "user1"
+ @password "password"
+ @root "/"
+ @default_timeout :infinity
+ @default_options %{vhost: "/", table_headers: true}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost(@vhost)
+ add_user(@user, @password)
+ set_topic_permissions(@user, @vhost, "amq.topic", "^a", "^b")
+ set_topic_permissions(@user, @vhost, "topic1", "^a", "^b")
+
+ on_exit([], fn ->
+ clear_topic_permissions(@user, @vhost)
+ delete_user(@user)
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout],
+ vhost: "/"
+ }
+ }
+ end
+
+ test "merge_defaults adds default vhost" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], @default_options}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default",
+ table_headers: true}}
+ end
+
+ test "validate: does not expect any parameter" do
+ assert @command.validate(["extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag test_timeout: @default_timeout, vhost: @vhost
+ test "run: specifying a vhost returns the topic permissions for the targeted vhost", context do
+ permissions = @command.run([], Map.merge(context[:opts], %{vhost: @vhost}))
+ assert Enum.count(permissions) == 2
+ assert Enum.sort(permissions) == [
+ [user: @user, exchange: "amq.topic", write: "^a", read: "^b"],
+ [user: @user, exchange: "topic1", write: "^a", read: "^b"]
+ ]
+ end
+
+ @tag vhost: @root
+ test "banner", context do
+ ctx = Map.merge(context[:opts], %{vhost: @vhost})
+ assert @command.banner([], ctx )
+ =~ ~r/Listing topic permissions for vhost \"#{Regex.escape(ctx[:vhost])}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs
new file mode 100644
index 0000000000..7b0370f940
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_user_limits_command_test.exs
@@ -0,0 +1,103 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListUserLimitsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListUserLimitsCommand
+
+ @user "guest"
+ @user1 "test_user1"
+ @password1 "password1"
+ @connection_limit_defn "{\"max-connections\":100}"
+ @channel_limit_defn "{\"max-channels\":1000}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ user = context[:user] || @user
+
+ clear_user_limits(user)
+
+ on_exit(context, fn ->
+ clear_user_limits(user)
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ global: true
+ },
+ user: user
+ }
+ end
+
+ test "merge_defaults: does not change defined user" do
+ assert match?({[], %{user: "test_user"}}, @command.merge_defaults([], %{user: "test_user"}))
+ end
+
+ test "validate: providing arguments fails validation" do
+ assert @command.validate(["many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: a well-formed command returns an empty list if there are no limits", context do
+ assert @command.run([], context[:opts]) == []
+ end
+
+ test "run: a well-formed user specific command returns an empty json object if there are no limits" do
+ assert @command.run([], %{node: get_rabbit_hostname(),
+ user: @user}) == "{}"
+ end
+
+ test "run: list limits for all users", context do
+ add_user(@user1, @password1)
+ on_exit(fn() ->
+ delete_user(@user1)
+ end)
+ set_user_limits(@user, @connection_limit_defn)
+ set_user_limits(@user1, @channel_limit_defn)
+
+ assert Enum.sort(@command.run([], context[:opts])) ==
+ Enum.sort([[user: @user, limits: @connection_limit_defn],
+ [user: @user1, limits: @channel_limit_defn]])
+ end
+
+ test "run: list limits for a single user", context do
+ user_opts = Map.put(context[:opts], :user, @user)
+ set_user_limits(@user, @connection_limit_defn)
+
+ assert @command.run([], user_opts) ==
+ [[user: @user, limits: @connection_limit_defn]]
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, user: "guest", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag user: "user"
+ test "run: providing a non-existent user reports an error", _context do
+ s = "non-existent-user"
+
+ assert @command.run([], %{node: get_rabbit_hostname(),
+ user: s}) == {:error, {:no_such_user, s}}
+ end
+
+ test "banner", context do
+ assert @command.banner([], %{user: context[:user]})
+ == "Listing limits for user \"#{context[:user]}\" ..."
+ assert @command.banner([], %{global: true})
+ == "Listing limits for all users ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs
new file mode 100644
index 0000000000..ddd44c0e01
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_user_permissions_command_test.exs
@@ -0,0 +1,91 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListUserPermissionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListUserPermissionsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ default_result = [
+ [
+ {:vhost,<<"/">>},
+ {:configure,<<".*">>},
+ {:write,<<".*">>},
+ {:read,<<".*">>}
+ ]
+ ]
+
+ no_such_user_result = {:error, {:no_such_user, context[:username]}}
+
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]},
+ result: default_result,
+ no_such_user: no_such_user_result,
+ timeout: {:badrpc, :timeout}
+ }
+ end
+
+## -------------------------------- Usage -------------------------------------
+
+ test "validate: wrong number of arguments results in an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["guest", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+## ------------------------------- Username -----------------------------------
+
+ @tag test_timeout: :infinity, username: "guest"
+ test "run: valid user returns a list of permissions", context do
+ results = @command.run([context[:username]], context[:opts])
+ assert Enum.all?(context[:result], fn(perm) ->
+ Enum.find(results, fn(found) -> found == perm end)
+ end)
+ end
+
+ @tag test_timeout: :infinity, username: "interloper"
+ test "run: invalid user returns a no-such-user error", context do
+ assert @command.run(
+ [context[:username]], context[:opts]) == context[:no_such_user]
+ end
+
+## --------------------------------- Flags ------------------------------------
+
+ test "run: unreachable RabbitMQ node returns a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["guest"], %{node: :jake@thedog, timeout: 200}))
+ end
+
+ @tag test_timeout: 30000, username: "guest"
+ test "run: long user-defined timeout doesn't interfere with operation", context do
+ results = @command.run([context[:username]], context[:opts])
+ Enum.all?(context[:result], fn(perm) ->
+ Enum.find(results, fn(found) -> found == perm end)
+ end)
+ end
+
+ @tag test_timeout: 0, username: "guest"
+ test "run: timeout causes command to return a bad RPC", context do
+ assert @command.run(
+ [context[:username]],
+ context[:opts]
+ ) == context[:timeout]
+ end
+
+ @tag test_timeout: :infinity
+ test "banner", context do
+ assert @command.banner( [context[:username]], context[:opts])
+ =~ ~r/Listing permissions for user \"#{context[:username]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs
new file mode 100644
index 0000000000..edf935de77
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_user_topic_permissions_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListUserTopicPermissionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListUserTopicPermissionsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ set_topic_permissions("guest", "/", "amq.topic", "^a", "^b")
+ set_topic_permissions("guest", "/", "topic1", "^a", "^b")
+
+ on_exit([], fn ->
+ clear_topic_permissions("guest", "/")
+ end)
+
+ :ok
+ end
+
+ setup context do
+ no_such_user_result = {:error, {:no_such_user, context[:username]}}
+
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]},
+ no_such_user: no_such_user_result,
+ timeout: {:badrpc, :timeout}
+ }
+ end
+
+## -------------------------------- Usage -------------------------------------
+
+ test "validate: expect username argument" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["guest", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+## ------------------------------- Username -----------------------------------
+
+ @tag test_timeout: :infinity, username: "guest"
+ test "run: valid user returns a list of topic permissions", context do
+ results = @command.run([context[:username]], context[:opts])
+ assert Enum.count(results) == 2
+ end
+
+ @tag test_timeout: :infinity, username: "interloper"
+ test "run: invalid user returns a no-such-user error", context do
+ assert @command.run(
+ [context[:username]], context[:opts]) == context[:no_such_user]
+ end
+
+## --------------------------------- Flags ------------------------------------
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(["guest"], opts))
+ end
+
+ @tag test_timeout: :infinity
+ test "banner", context do
+ assert @command.banner( [context[:username]], context[:opts])
+ =~ ~r/Listing topic permissions for user \"#{context[:username]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs
new file mode 100644
index 0000000000..bcfdb84b2b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_users_command_test.exs
@@ -0,0 +1,74 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListUsersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListUsersCommand
+
+ @user "user1"
+ @password "password"
+ @guest "guest"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ std_result = [
+ [{:user,@guest},{:tags,[:administrator]}],
+ [{:user,@user},{:tags,[]}]
+ ]
+
+ {:ok, std_result: std_result}
+ end
+
+ setup context do
+ add_user @user, @password
+ on_exit([], fn -> delete_user @user end)
+
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}}
+ end
+
+ test "validate: On incorrect number of commands, return an arg count error" do
+ assert @command.validate(["extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 15000
+ test "run: On a successful query, return an array of lists of tuples", context do
+ matches_found = @command.run([], context[:opts])
+
+ assert Enum.all?(context[:std_result], fn(user) ->
+ Enum.find(matches_found, fn(found) -> found == user end)
+ end)
+ end
+
+ test "run: On an invalid rabbitmq node, return a bad rpc" do
+ assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, timeout: 200}))
+ end
+
+ @tag test_timeout: 30000
+ test "run: sufficiently long timeouts don't interfere with results", context do
+ # checks to ensure that all expected users are in the results
+ matches_found = @command.run([], context[:opts])
+
+ assert Enum.all?(context[:std_result], fn(user) ->
+ Enum.find(matches_found, fn(found) -> found == user end)
+ end)
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return a bad RPC", context do
+ assert @command.run([], context[:opts]) ==
+ {:badrpc, :timeout}
+ end
+
+ @tag test_timeout: :infinity
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Listing users \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs
new file mode 100644
index 0000000000..f07d40672a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_vhost_limits_command_test.exs
@@ -0,0 +1,111 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListVhostLimitsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListVhostLimitsCommand
+
+ @vhost "test_vhost"
+ @vhost1 "test_vhost1"
+ @connection_limit_defn "{\"max-connections\":100}"
+ @queue_limit_defn "{\"max-queues\":1000}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+ vhost = context[:vhost] || @vhost
+
+ clear_vhost_limits(vhost)
+
+ on_exit(context, fn ->
+ clear_vhost_limits(vhost)
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ global: true
+ },
+ vhost: vhost
+ }
+ end
+
+ test "merge_defaults: does not change defined vhost" do
+ assert match?({[], %{vhost: "test_vhost"}}, @command.merge_defaults([], %{vhost: "test_vhost"}))
+ end
+
+ test "validate: providing arguments fails validation" do
+ assert @command.validate(["many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: a well-formed command returns an empty list if there are no limits", context do
+ assert @command.run([], context[:opts]) == []
+ end
+
+ test "run: a well-formed vhost specific command returns an empty list if there are no limits", context do
+ vhost_opts = Map.put(context[:opts], :vhost, @vhost)
+ assert @command.run([], vhost_opts) == []
+ end
+
+ test "run: list limits for all vhosts", context do
+ add_vhost(@vhost1)
+ on_exit(fn() ->
+ delete_vhost(@vhost1)
+ end)
+ set_vhost_limits(@vhost, @connection_limit_defn)
+ set_vhost_limits(@vhost1, @queue_limit_defn)
+
+ assert Enum.sort(@command.run([], context[:opts])) ==
+ Enum.sort([[vhost: @vhost, limits: @connection_limit_defn],
+ [vhost: @vhost1, limits: @queue_limit_defn]])
+ end
+
+ test "run: list limits for a single vhost", context do
+ vhost_opts = Map.put(context[:opts], :vhost, @vhost)
+ set_vhost_limits(@vhost, @connection_limit_defn)
+
+ assert @command.run([], vhost_opts) ==
+ [[vhost: @vhost, limits: @connection_limit_defn]]
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag vhost: "bad-vhost"
+ test "run: providing a non-existent vhost reports an error", _context do
+ s = "non-existent-vhost-a9sd89"
+
+ assert @command.run([], %{node: get_rabbit_hostname(),
+ vhost: s}) == {:error, {:no_such_vhost, s}}
+ end
+
+ test "banner", context do
+ assert @command.banner([], %{vhost: context[:vhost]})
+ == "Listing limits for vhost \"#{context[:vhost]}\" ..."
+ assert @command.banner([], %{global: true})
+ == "Listing limits for all vhosts ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs b/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs
new file mode 100644
index 0000000000..76f46af422
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/list_vhosts_command_test.exs
@@ -0,0 +1,160 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ListVhostsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ListVhostsCommand
+
+ @vhost1 "test1"
+ @vhost2 "test2"
+ @root "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost1
+ add_vhost @vhost2
+ trace_off @root
+
+ on_exit([], fn ->
+ delete_vhost @vhost1
+ delete_vhost @vhost2
+ end)
+
+ name_result = [
+ [{:name, @vhost1}],
+ [{:name, @vhost2}],
+ [{:name, @root}]
+ ]
+
+ tracing_result = [
+ [{:tracing, false}],
+ [{:tracing, false}],
+ [{:tracing, false}]
+ ]
+
+ full_result = [
+ [{:name, @vhost1}, {:tracing, false}],
+ [{:name, @vhost2}, {:tracing, false}],
+ [{:name, @root}, {:tracing, false}]
+ ]
+
+ transposed_result = [
+ [{:tracing, false}, {:name, @vhost1}],
+ [{:tracing, false}, {:name, @vhost2}],
+ [{:tracing, false}, {:name, @root}]
+ ]
+
+ {
+ :ok,
+ name_result: name_result,
+ tracing_result: tracing_result,
+ full_result: full_result,
+ transposed_result: transposed_result
+ }
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}
+ }
+ end
+
+ test "merge_defaults with no command, print just use the names" do
+ assert match?({["name"], %{}}, @command.merge_defaults([], %{}))
+ end
+
+ test "validate: return bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "tracing"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["name", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["name", "oink", "tracing"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ test "run: on a bad RabbitMQ node, return a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(["name"], opts))
+ end
+
+ @tag test_timeout: :infinity
+ test "run: with the name tag, print just the names", context do
+ # checks to ensure that all expected vhosts are in the results
+ matches_found = @command.run(["name"], context[:opts])
+ assert Enum.all?(context[:name_result], fn(vhost) ->
+ Enum.find(matches_found, fn(found) -> found == vhost end)
+ end)
+ end
+
+ @tag test_timeout: :infinity
+ test "run: with the tracing tag, print just say if tracing is on", context do
+ # checks to ensure that all expected vhosts are in the results
+ matches_found = @command.run(["tracing"], context[:opts])
+ assert Enum.all?(context[:tracing_result], fn(vhost) ->
+ Enum.find(matches_found, fn(found) -> found == vhost end)
+ end)
+ end
+
+ @tag test_timeout: :infinity
+ test "run: with name and tracing keys, print both", context do
+ # checks to ensure that all expected vhosts are in the results
+ matches_found = @command.run(["name", "tracing"], context[:opts])
+ assert Enum.all?(context[:full_result], fn(vhost) ->
+ Enum.find(matches_found, fn(found) -> found == vhost end)
+ end)
+
+ # checks to ensure that all expected vhosts are in the results
+ matches_found = @command.run(["tracing", "name"], context[:opts])
+ assert Enum.all?(context[:transposed_result], fn(vhost) ->
+ Enum.find(matches_found, fn(found) -> found == vhost end)
+ end)
+ end
+
+ @tag test_timeout: :infinity
+ test "run: duplicate args do not produce duplicate entries", context do
+ # checks to ensure that all expected vhosts are in the results
+ matches_found = @command.run(["name", "name"], context[:opts])
+ assert Enum.all?(context[:name_result], fn(vhost) ->
+ Enum.find(matches_found, fn(found) -> found == vhost end)
+ end)
+ end
+
+ @tag test_timeout: 30000
+ test "run: sufficiently long timeouts don't interfere with results", context do
+ # checks to ensure that all expected vhosts are in the results
+ matches_found = @command.run(["name", "tracing"], context[:opts])
+ assert Enum.all?(context[:full_result], fn(vhost) ->
+ Enum.find(matches_found, fn(found) -> found == vhost end)
+ end)
+ end
+
+ @tag test_timeout: 0, username: "guest"
+ test "run: timeout causes command to return a bad RPC", context do
+ assert @command.run(["name", "tracing"], context[:opts]) ==
+ {:badrpc, :timeout}
+ end
+
+ @tag test_timeout: :infinity
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Listing vhosts \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs b/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs
new file mode 100644
index 0000000000..12ff786bfb
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/node_health_check_command_test.exs
@@ -0,0 +1,65 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule NodeHealthCheckCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.NodeHealthCheckCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ reset_vm_memory_high_watermark()
+
+ on_exit([], fn ->
+ reset_vm_memory_high_watermark()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 20000}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: with no arguments succeeds", _context do
+ assert @command.validate([], []) == :ok
+ end
+
+ test "validate: with a named, active node argument succeeds", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "run: request to a named, active node succeeds", context do
+ assert @command.run([], context[:opts])
+ end
+
+ test "run: request to a named, active node with an alarm in effect fails", context do
+ set_vm_memory_high_watermark(0.0000000000001)
+ # give VM memory monitor check some time to kick in
+ :timer.sleep(1500)
+ {:healthcheck_failed, _message} = @command.run([], context[:opts])
+
+ reset_vm_memory_high_watermark()
+ :timer.sleep(1500)
+ assert @command.run([], context[:opts]) == :ok
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, timeout: 200}))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) |> Enum.join("\n") =~ ~r/Checking health/
+ assert @command.banner([], context[:opts]) |> Enum.join("\n") =~ ~r/#{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/ping_command_test.exs b/deps/rabbitmq_cli/test/ctl/ping_command_test.exs
new file mode 100644
index 0000000000..347013a4a8
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/ping_command_test.exs
@@ -0,0 +1,56 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule PingCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.PingCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ reset_vm_memory_high_watermark()
+
+ on_exit([], fn ->
+ reset_vm_memory_high_watermark()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 200}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: with no arguments succeeds", _context do
+ assert @command.validate([], []) == :ok
+ end
+
+ test "validate: with a named, active node argument succeeds", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "run: request to a named, active node succeeds", context do
+ assert @command.run([], context[:opts])
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ assert match?({:error, _}, @command.run([], %{node: :jake@thedog, timeout: 200}))
+ end
+
+ test "banner", context do
+ banner = @command.banner([], context[:opts])
+
+ assert banner =~ ~r/Will ping/
+ assert banner =~ ~r/#{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs b/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs
new file mode 100644
index 0000000000..9891175f15
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/purge_queue_command_test.exs
@@ -0,0 +1,88 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule PurgeQueueCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.PurgeQueueCommand
+ @user "guest"
+ @vhost "purge-queue-vhost"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ vhost: @vhost,
+ timeout: context[:test_timeout]
+ }}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ @tag test_timeout: 30000
+ test "request to an existent queue on active node succeeds", context do
+ add_vhost @vhost
+ set_permissions @user, @vhost, [".*", ".*", ".*"]
+ on_exit(context, fn -> delete_vhost(@vhost) end)
+
+ q = "foo"
+ n = 20
+
+ declare_queue(q, @vhost)
+ assert message_count(@vhost, q) == 0
+
+ publish_messages(@vhost, q, n)
+ assert message_count(@vhost, q) == n
+
+ assert @command.run([q], context[:opts]) == :ok
+ assert message_count(@vhost, q) == 0
+ end
+
+ @tag test_timeout: 30000
+ test "request to a non-existent queue on active node returns not found", context do
+ assert @command.run(["non-existent"], context[:opts]) == {:error, :not_found}
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return a bad RPC", context do
+ assert @command.run(["foo"], context[:opts]) == {:badrpc, :timeout}
+ end
+
+ test "shows up in help" do
+ s = @command.usage()
+ assert s =~ ~r/purge_queue/
+ end
+
+ test "defaults to vhost /" do
+ assert @command.merge_defaults(["foo"], %{bar: "baz"}) == {["foo"], %{bar: "baz", vhost: "/"}}
+ end
+
+ test "validate: with extra arguments returns an arg count error" do
+ assert @command.validate(["queue-name", "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: with no arguments returns an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: with correct args returns ok" do
+ assert @command.validate(["q"], %{}) == :ok
+ end
+
+ test "banner informs that vhost's queue is purged" do
+ assert @command.banner(["my-q"], %{vhost: "/foo"}) == "Purging queue 'my-q' in vhost '/foo' ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs b/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs
new file mode 100644
index 0000000000..02bf2ad795
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/rename_cluster_node_command_test.exs
@@ -0,0 +1,102 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RenameClusterNodeCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.RenameClusterNodeCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ start_rabbitmq_app()
+
+ {:ok, plugins_dir} =
+ :rabbit_misc.rpc_call(node, :application, :get_env, [:rabbit, :plugins_dir])
+
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+ mnesia_dir = :rabbit_misc.rpc_call(node, :rabbit_mnesia, :dir, [])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ {:ok, opts: %{rabbitmq_home: rabbitmq_home, plugins_dir: plugins_dir, mnesia_dir: mnesia_dir}}
+ end
+
+ setup context do
+ {:ok,
+ opts:
+ Map.merge(
+ context[:opts],
+ %{node: :not_running@localhost}
+ )}
+ end
+
+ test "validate: specifying no nodes fails validation", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: specifying one node only fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate_execution_environment: specifying an uneven number of arguments fails validation",
+ context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate_execution_environment(["a", "b", "c"], context[:opts])
+ )
+ end
+
+ test "validate_execution_environment: request to a running node fails", _context do
+ node = get_rabbit_hostname()
+
+ assert match?(
+ {:validation_failure, :node_running},
+ @command.validate_execution_environment([to_string(node), "other_node@localhost"], %{
+ node: node
+ })
+ )
+ end
+
+ test "validate_execution_environment: not providing node mnesia dir fails validation",
+ context do
+ opts_without_mnesia = Map.delete(context[:opts], :mnesia_dir)
+ Application.put_env(:mnesia, :dir, "/tmp")
+ on_exit(fn -> Application.delete_env(:mnesia, :dir) end)
+
+ assert :ok ==
+ @command.validate(
+ ["some_node@localhost", "other_node@localhost"],
+ opts_without_mnesia
+ )
+
+ Application.delete_env(:mnesia, :dir)
+ System.put_env("RABBITMQ_MNESIA_DIR", "/tmp")
+ on_exit(fn -> System.delete_env("RABBITMQ_MNESIA_DIR") end)
+
+ assert :ok ==
+ @command.validate(
+ ["some_node@localhost", "other_node@localhost"],
+ opts_without_mnesia
+ )
+
+ System.delete_env("RABBITMQ_MNESIA_DIR")
+
+ assert :ok ==
+ @command.validate(["some_node@localhost", "other_node@localhost"], context[:opts])
+ end
+
+ test "banner", context do
+ assert @command.banner(["a", "b"], context[:opts]) =~
+ ~r/Renaming cluster nodes: \n a -> b/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/report_command_test.exs b/deps/rabbitmq_cli/test/ctl/report_command_test.exs
new file mode 100644
index 0000000000..f207ab8c2b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/report_command_test.exs
@@ -0,0 +1,44 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ReportTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ReportCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: :infinity}}
+ end
+
+ test "validate: with extra arguments, status returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: report request to a reachable node succeeds", context do
+ output = @command.run([], context[:opts]) |> Enum.to_list
+
+ assert_stream_without_errors(output)
+ end
+
+ test "run: report request on nonexistent RabbitMQ node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Reporting server status of node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/reset_command_test.exs b/deps/rabbitmq_cli/test/ctl/reset_command_test.exs
new file mode 100644
index 0000000000..8bded47377
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/reset_command_test.exs
@@ -0,0 +1,68 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ResetCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ResetCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: reset request to an active node with a stopped rabbit app succeeds", context do
+ add_vhost "some_vhost"
+ #ensure the vhost really does exist
+ assert vhost_exists? "some_vhost"
+ stop_rabbitmq_app()
+ assert :ok == @command.run([], context[:opts])
+ start_rabbitmq_app()
+ #check that the created vhost no longer exists
+ assert match?([_], list_vhosts())
+ end
+
+ test "run: reset request to an active node with a running rabbit app fails", context do
+ add_vhost "some_vhost"
+ assert vhost_exists? "some_vhost"
+ assert match?({:error, :mnesia_unexpectedly_running}, @command.run([], context[:opts]))
+ assert vhost_exists? "some_vhost"
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Resetting node #{get_rabbit_hostname()}/
+ end
+
+ test "output mnesia is running error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code,
+ "Mnesia is still running on node " <> _},
+ @command.output({:error, :mnesia_unexpectedly_running}, context[:opts]))
+
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs b/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs
new file mode 100644
index 0000000000..c8d2fe7c48
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/restart_vhost_command_test.exs
@@ -0,0 +1,95 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule RestartVhostCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.RestartVhostCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ @vhost "vhost_to_restart"
+ @timeout 10000
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ vhost: @vhost,
+ timeout: @timeout
+ }}
+ end
+
+ test "validate: specifying arguments is reported as an error", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ assert @command.validate(["a", "b"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a non-existent node returns a badrpc", _context do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: @timeout}
+ assert match?(
+ {:badrpc, _},
+ @command.run([], opts))
+ end
+
+ test "banner", context do
+ expected = "Trying to restart vhost '#{@vhost}' on node '#{get_rabbit_hostname()}' ..."
+ ^expected = @command.banner([], context[:opts])
+ end
+
+ test "run: restarting an existing vhost returns already_started", context do
+ setup_vhosts()
+ {:error, {:already_started, _}} = @command.run([], context[:opts])
+ end
+
+ test "run: restarting an failed vhost returns ok", context do
+ setup_vhosts()
+ vhost = context[:opts][:vhost]
+ node_name = context[:opts][:node]
+ force_vhost_failure(node_name, vhost)
+ {:ok, _} = @command.run([], context[:opts])
+ {:ok, _} = :rpc.call(node_name, :rabbit_vhost_sup_sup, :get_vhost_sup, [vhost])
+ end
+
+ #
+ # Implementation
+ #
+
+ defp setup_vhosts do
+ add_vhost @vhost
+ # give the vhost a chance to fully start and initialise
+ :timer.sleep(1000)
+ on_exit(fn ->
+ delete_vhost @vhost
+ end)
+ end
+
+ defp force_vhost_failure(node_name, vhost) do
+ case :rpc.call(node_name, :rabbit_vhost_sup_sup, :get_vhost_sup, [vhost]) do
+ {:ok, sup} ->
+ case :lists.keyfind(:msg_store_persistent, 1, :supervisor.which_children(sup)) do
+ {_, pid, _, _} ->
+ Process.exit(pid, :foo)
+ :timer.sleep(5000)
+ force_vhost_failure(node_name, vhost);
+ false ->
+ Process.exit(sup, :foo)
+ :timer.sleep(5000)
+ force_vhost_failure(node_name, vhost)
+ end;
+ {:error, {:vhost_supervisor_not_running, _}} ->
+ :ok
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs b/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs
new file mode 100644
index 0000000000..3aad0b355b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/resume_listeners_command_test.exs
@@ -0,0 +1,67 @@
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ResumeListenersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ResumeListenersCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ resume_all_client_listeners()
+
+ node_name = get_rabbit_hostname()
+ on_exit(fn ->
+ resume_all_client_listeners()
+ close_all_connections(node_name)
+ end)
+
+ {:ok, opts: %{node: node_name, timeout: 30_000}}
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "merge_defaults: merges no defaults" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: accepts no arguments", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "run: resumes all client TCP listeners so new client connects are accepted", context do
+ suspend_all_client_listeners()
+ expect_client_connection_failure()
+
+ assert @command.run([], Map.merge(context[:opts], %{timeout: 5_000})) == :ok
+
+ # implies a successful connection
+ with_channel("/", fn _ -> :ok end)
+ close_all_connections(get_rabbit_hostname())
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/rotate_logs_command_test.exs b/deps/rabbitmq_cli/test/ctl/rotate_logs_command_test.exs
new file mode 100644
index 0000000000..13eed87d43
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/rotate_logs_command_test.exs
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule RotateLogsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.RotateLogsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a named, active node succeeds", context do
+ assert @command.run([], context[:opts]) == :ok
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Rotating logs for node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs
new file mode 100644
index 0000000000..a0852522e4
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_cluster_name_command_test.exs
@@ -0,0 +1,63 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetClusterNameCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetClusterNameCommand
+
+ setup_all do
+ :net_kernel.start([:rabbitmqctl, :shortnames])
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "shows up in help" do
+ s = @command.usage()
+ assert s =~ ~r/set_cluster_name/
+ end
+
+ test "has no defaults" do
+ assert @command.merge_defaults(["foo"], %{bar: "baz"}) == {["foo"], %{bar: "baz"}}
+ end
+
+ test "validate: with insufficient number of arguments, return arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: with too many arguments, return arg count error" do
+ assert @command.validate(["foo", "bar"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: with correct number of arguments, return ok" do
+ assert @command.validate(["mynewname"], %{}) == :ok
+ end
+
+ test "run: valid name returns ok", context do
+ s = get_cluster_name()
+ assert @command.run(["agoodname"], context[:opts]) == :ok
+ # restore original name
+ @command.run([s], context[:opts])
+ end
+
+ test "run: An invalid Rabbit node returns a bad rpc message" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(["clustername"], opts))
+ end
+
+ test "banner shows that the name is being set" do
+ s = @command.banner(["annoyyou"], %{})
+ assert s == "Setting cluster name to annoyyou ..."
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs
new file mode 100644
index 0000000000..80f0e1511f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_disk_free_limit_command_test.exs
@@ -0,0 +1,173 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetDiskFreeLimitCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetDiskFreeLimitCommand
+
+ @default_limit 1048576
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ set_disk_free_limit(@default_limit)
+
+ on_exit([], fn ->
+ set_disk_free_limit(@default_limit)
+ end)
+
+ end
+
+ setup context do
+ context[:tag] # silences warnings
+ on_exit([], fn -> set_disk_free_limit(@default_limit) end)
+
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: an invalid number of arguments results in arg count errors" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag limit: "2097152bytes"
+ test "run: an invalid string input returns a bad arg and does not change the limit", context do
+ assert @command.validate([context[:limit]], context[:opts]) ==
+ {:validation_failure, :bad_argument}
+ end
+
+ test "validate: valid fractional inputs return an ok", context do
+ assert @command.validate(
+ ["mem_relative", "0.0"],
+ context[:opts]
+ ) == :ok
+
+ assert @command.validate(
+ ["mem_relative", "0.5"],
+ context[:opts]
+ ) == :ok
+
+ assert @command.validate(
+ ["mem_relative", "1.8"],
+ context[:opts]
+ ) == :ok
+ end
+
+ test "validate: a value outside the accepted range returns an error", context do
+ assert @command.validate(
+ ["mem_relative", "-1.0"],
+ context[:opts]
+ ) == {:validation_failure, :bad_argument}
+ end
+
+ @tag fraction: "1.3"
+ test "validate: a valid float string input returns ok", context do
+ assert @command.validate(
+ ["mem_relative", context[:fraction]],
+ context[:opts]
+ ) == :ok
+ end
+
+ @tag fraction: "1.3salt"
+ test "validate: an invalid string input returns a bad argument", context do
+ assert @command.validate(
+ ["mem_relative", context[:fraction]],
+ context[:opts]
+ ) == {:validation_failure, :bad_argument}
+ end
+
+## ------------------------ validate mem_relative command -------------------------------------------
+
+ test "validate: an invalid number of mem_relative arguments results in an arg count error" do
+ assert @command.validate(["mem_relative"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["mem_relative", 1.3, "extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+
+## ------------------------ run absolute command -------------------------------------------
+
+ @tag test_timeout: 3000
+ test "run: an invalid node returns a bad rpc" do
+ args = [@default_limit]
+ opts = %{node: :jake@thedog}
+
+ assert match?({:badrpc, _}, @command.run(args, opts))
+ end
+
+ @tag limit: 2097152
+ test "run: a valid integer input returns an ok and sets the disk free limit", context do
+ assert @command.run([context[:limit]], context[:opts]) == :ok
+ assert status()[:disk_free_limit] === context[:limit]
+ end
+
+ @tag limit: 2097152.0
+ test "run: a valid non-fractional float input returns an ok and sets the disk free limit", context do
+ assert @command.run([context[:limit]], context[:opts]) == :ok
+ assert status()[:disk_free_limit] === round(context[:limit])
+ end
+
+ @tag limit: 2097152.9
+ test "run: a valid fractional float input returns an ok and sets the disk free limit", context do
+ assert @command.run([context[:limit]], context[:opts]) == :ok
+ assert status()[:disk_free_limit] === context[:limit] |> Float.floor |> round
+ end
+
+ @tag limit: "2097152"
+ test "run: an integer string input returns an ok and sets the disk free limit", context do
+ assert @command.run([context[:limit]], context[:opts]) == :ok
+ assert status()[:disk_free_limit] === String.to_integer(context[:limit])
+ end
+
+ @tag limit: "2MB"
+ test "run: an valid unit string input returns an ok and changes the limit", context do
+ assert @command.run([context[:limit]], context[:opts]) == :ok
+ assert status()[:disk_free_limit] === 2000000
+ end
+
+## ------------------------ run relative command -------------------------------------------
+
+ @tag fraction: 1
+ test "run: an integer input returns ok", context do
+ assert @command.run(
+ ["mem_relative", context[:fraction]],
+ context[:opts]
+ ) == :ok
+ end
+
+ @tag fraction: 1.1
+ test "run: a factional input returns ok", context do
+ assert @command.run(
+ ["mem_relative", context[:fraction]],
+ context[:opts]
+ ) == :ok
+ end
+
+
+ test "banner: returns absolute message", context do
+ assert @command.banner(["10"], context[:opts])
+ =~ ~r/Setting disk free limit on #{get_rabbit_hostname()} to 10 bytes .../
+
+ assert @command.banner(["-10"], context[:opts])
+ =~ ~r/Setting disk free limit on #{get_rabbit_hostname()} to -10 bytes .../
+
+ assert @command.banner(["sandwich"], context[:opts])
+ =~ ~r/Setting disk free limit on #{get_rabbit_hostname()} to sandwich bytes .../
+ end
+
+ test "banner: returns memory-relative message", context do
+ assert @command.banner(["mem_relative", "1.3"], context[:opts])
+ =~ ~r/Setting disk free limit on #{get_rabbit_hostname()} to 1\.3 times the total RAM \.\.\./
+
+ assert @command.banner(["mem_relative", "-1.3"], context[:opts])
+ =~ ~r/Setting disk free limit on #{get_rabbit_hostname()} to -1\.3 times the total RAM \.\.\./
+
+ assert @command.banner(["mem_relative", "sandwich"], context[:opts])
+ =~ ~r/Setting disk free limit on #{get_rabbit_hostname()} to sandwich times the total RAM \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs
new file mode 100644
index 0000000000..848f29a0b8
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_global_parameter_command_test.exs
@@ -0,0 +1,82 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetGlobalParameterCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetGlobalParameterCommand
+
+ @key :mqtt_default_vhosts
+ @value "{\"O=client,CN=dummy\":\"somevhost\"}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_global_parameter context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ }
+ }
+ end
+
+ test "validate: expects a key and a value" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag key: @key, value: @value
+ test "run: expects a key and a value", context do
+ assert @command.run(
+ [context[:key], context[:value]],
+ context[:opts]
+ ) == :ok
+
+ assert_parameter_fields(context)
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@key, @value], opts))
+ end
+
+ @tag key: @key, value: "bad-value"
+ test "run: a value that fails to parse as JSON returns a decoding error", context do
+ initial = list_global_parameters()
+ assert match?({:error_string, _},
+ @command.run([context[:key], context[:value]],
+ context[:opts]))
+
+ assert list_global_parameters() == initial
+ end
+
+ @tag key: @key, value: @value
+ test "banner", context do
+ assert @command.banner([context[:key], context[:value]], context[:opts])
+ =~ ~r/Setting global runtime parameter \"#{context[:key]}\" to \"#{context[:value]}\" \.\.\./
+ end
+
+ # Checks each element of the first parameter against the expected context values
+ defp assert_parameter_fields(context) do
+ result_param = list_global_parameters() |> List.first
+
+ assert result_param[:value] == context[:value]
+ assert result_param[:name] == context[:key]
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs
new file mode 100644
index 0000000000..b4108219ba
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_log_level_command_test.exs
@@ -0,0 +1,44 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule SetLogLevelCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetLogLevelCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ {:ok,
+ log_level: "debug",
+ opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: with a single known level succeeds", context do
+ assert @command.validate([context[:log_level]], context[:opts]) == :ok
+ end
+
+ test "validate: with a single unsupported level fails", context do
+ assert match?({:error, _}, @command.validate(["lolwut"], context[:opts]))
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate([context[:log_level], "whoops"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a named, active node succeeds", context do
+ assert @command.run([context[:log_level]], context[:opts]) == :ok
+ end
+
+ test "run: request to a non-existent node returns a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([context[:log_level]], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([context[:log_level]], context[:opts]) == "Setting log level to \"debug\" ..."
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs
new file mode 100644
index 0000000000..5911132a32
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_operator_policy_command_test.exs
@@ -0,0 +1,153 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetOperatorPolicyCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetOperatorPolicyCommand
+
+ @vhost "test1"
+ @root "/"
+ @key "message-expiry"
+ @pattern "^queue\."
+ @value "{\"message-ttl\":10}"
+ @apply_to "all"
+ @priority 0
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+
+ on_exit(context, fn ->
+ clear_operator_policy(context[:vhost], context[:key])
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ vhost: "/",
+ apply_to: @apply_to,
+ priority: @priority
+ }
+ }
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: @root
+ test "merge_defaults: a well-formed command with no vhost runs against the default" do
+ assert match?({_, %{vhost: "/"}}, @command.merge_defaults([], %{}))
+ end
+
+ test "merge_defaults: does not change defined vhost" do
+ assert match?({[], %{vhost: "test_vhost"}}, @command.merge_defaults([], %{vhost: "test_vhost"}))
+ end
+
+ test "merge_defaults: default apply_to is \"all\"" do
+ assert match?({_, %{apply_to: "all"}}, @command.merge_defaults([], %{}))
+ assert match?({_, %{apply_to: "custom"}}, @command.merge_defaults([], %{apply_to: "custom"}))
+ end
+
+ test "merge_defaults: default priority is 0" do
+ assert match?({_, %{priority: 0}}, @command.merge_defaults([], %{}))
+ assert match?({_, %{priority: 3}}, @command.merge_defaults([], %{priority: 3}))
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: @vhost
+ test "run: a well-formed, host-specific command returns okay", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ vhost_opts
+ ) == :ok
+
+ assert_operator_policy_fields(context)
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", priority: 0, apply_to: "all", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@key, @pattern, @value], opts))
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: providing a non-existent vhost reports an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag pattern: @pattern, key: @key, value: "bad-value", vhost: @root
+ test "run: an invalid value returns a JSON decoding error", context do
+ assert match?({:error_string, _},
+ @command.run([context[:key], context[:pattern], context[:value]],
+ context[:opts]))
+
+ assert list_operator_policies(context[:vhost]) == []
+ end
+
+ @tag pattern: @pattern, key: @key, value: "{\"foo\":\"bar\"}", vhost: @root
+ test "run: invalid policy returns an error", context do
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\n[{<<"foo">>,<<"bar">>}] are not recognised policy settings\n'}
+
+ assert list_operator_policies(context[:vhost]) == []
+ end
+
+ @tag pattern: @pattern, key: @key, value: "{}", vhost: @root
+ test "run: an empty JSON object value returns an error", context do
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\nno policy provided\n'}
+
+ assert list_operator_policies(context[:vhost]) == []
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:key], context[:pattern], context[:value]], vhost_opts)
+ == "Setting operator policy override \"#{context[:key]}\" for pattern \"#{context[:pattern]}\" to \"#{context[:value]}\" with priority \"#{context[:opts][:priority]}\" for vhost \"#{context[:vhost]}\" \.\.\."
+ end
+
+ # Checks each element of the first policy against the expected context values
+ defp assert_operator_policy_fields(context) do
+ result_policy = context[:vhost] |> list_operator_policies |> List.first
+ assert result_policy[:definition] == context[:value]
+ assert result_policy[:vhost] == context[:vhost]
+ assert result_policy[:pattern] == context[:pattern]
+ assert result_policy[:name] == context[:key]
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs
new file mode 100644
index 0000000000..50a2543dee
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_parameter_command_test.exs
@@ -0,0 +1,136 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetParameterCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetParameterCommand
+
+ @vhost "test1"
+ @root "/"
+ @component_name "federation-upstream"
+ @key "reconnect-delay"
+ @value "{\"uri\":\"amqp://127.0.0.1:5672\"}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ enable_federation_plugin()
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ # featured in a definitions file imported by other tests
+ clear_parameter("/", "federation-upstream", "up-1")
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn ->
+ clear_parameter context[:vhost], context[:component_name], context[:key]
+ end)
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ vhost: context[:vhost]
+ }
+ }
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @root
+ test "merge_defaults: a well-formed command with no vhost runs against the default" do
+ assert match?({_, %{vhost: "/"}}, @command.merge_defaults([], %{}))
+ assert match?({_, %{vhost: "non_default"}}, @command.merge_defaults([], %{vhost: "non_default"}))
+ end
+
+ test "validate: wrong number of arguments leads to an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @vhost
+ test "run: a well-formed, host-specific command returns okay", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:component_name], context[:key], context[:value]],
+ vhost_opts
+ ) == :ok
+
+ assert_parameter_fields(context)
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@component_name, @key, @value], opts))
+ end
+
+ @tag component_name: "bad-component-name", key: @key, value: @value, vhost: @root
+ test "run: an invalid component_name returns a validation failed error", context do
+ assert @command.run(
+ [context[:component_name], context[:key], context[:value]],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\ncomponent #{context[:component_name]} not found\n'}
+
+ assert list_parameters(context[:vhost]) == []
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: an invalid vhost returns a no-such-vhost error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:component_name], context[:key], context[:value]],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag component_name: @component_name, key: @key, value: "bad-value", vhost: @root
+ test "run: an invalid value returns a JSON decoding error", context do
+ assert match?({:error_string, _},
+ @command.run([context[:component_name], context[:key], context[:value]],
+ context[:opts]))
+
+ assert list_parameters(context[:vhost]) == []
+ end
+
+ @tag component_name: @component_name, key: @key, value: "{}", vhost: @root
+ test "run: an empty JSON object value returns a key \"uri\" not found error", context do
+ assert @command.run(
+ [context[:component_name], context[:key], context[:value]],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\nKey "uri" not found in reconnect-delay\n'}
+
+ assert list_parameters(context[:vhost]) == []
+ end
+
+ @tag component_name: @component_name, key: @key, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:component_name], context[:key], context[:value]], vhost_opts)
+ =~ ~r/Setting runtime parameter \"#{context[:key]}\" for component \"#{context[:component_name]}\" to \"#{context[:value]}\" in vhost \"#{context[:vhost]}\" \.\.\./
+ end
+
+ # Checks each element of the first parameter against the expected context values
+ defp assert_parameter_fields(context) do
+ result_param = context[:vhost] |> list_parameters |> List.first
+
+ assert result_param[:value] == context[:value]
+ assert result_param[:component] == context[:component_name]
+ assert result_param[:name] == context[:key]
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs
new file mode 100644
index 0000000000..c2628f2728
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_permissions_command_test.exs
@@ -0,0 +1,114 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetPermissionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetPermissionsCommand
+
+ @vhost "test1"
+ @user "guest"
+ @root "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+
+ on_exit(context, fn ->
+ set_permissions context[:user], context[:vhost], [".*", ".*", ".*"]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ vhost: context[:vhost]
+ }
+ }
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: wrong number of arguments leads to an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not", "quite", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this", "is", "way", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag user: @user, vhost: @vhost
+ test "run: a well-formed, host-specific command returns okay", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:user], "^#{context[:user]}-.*", ".*", ".*"],
+ vhost_opts
+ ) == :ok
+
+ u = Enum.find(list_permissions(context[:vhost]), fn(x) -> x[:user] == context[:user] end)
+ assert u[:configure] == "^#{context[:user]}-.*"
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@user, ".*", ".*", ".*"], opts))
+ end
+
+ @tag user: "interloper", vhost: @root
+ test "run: an invalid user returns a no-such-user error", context do
+ assert @command.run(
+ [context[:user], "^#{context[:user]}-.*", ".*", ".*"],
+ context[:opts]
+ ) == {:error, {:no_such_user, context[:user]}}
+ end
+
+ @tag user: @user, vhost: "wintermute"
+ test "run: an invalid vhost returns a no-such-vhost error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:user], "^#{context[:user]}-.*", ".*", ".*"],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag user: @user, vhost: @root
+ test "run: invalid regex patterns returns an error", context do
+ assert @command.run(
+ [context[:user], "^#{context[:user]}-.*", ".*", "*"],
+ context[:opts]
+ ) == {:error, {:invalid_regexp, '*', {'nothing to repeat', 0}}}
+
+ # asserts that the failed command didn't change anything
+ u = Enum.find(list_permissions(context[:vhost]), fn(x) -> x[:user] == context[:user] end)
+ assert u == [user: context[:user], configure: ".*", write: ".*", read: ".*"]
+ end
+
+ @tag user: @user, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:user], "^#{context[:user]}-.*", ".*", ".*"], vhost_opts)
+ =~ ~r/Setting permissions for user \"#{context[:user]}\" in vhost \"#{context[:vhost]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs
new file mode 100644
index 0000000000..0422933ecb
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_policy_command_test.exs
@@ -0,0 +1,217 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetPolicyCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetPolicyCommand
+
+ @vhost "test1"
+ @root "/"
+ @key "federate"
+ @pattern "^fed\."
+ @value "{\"federation-upstream-set\":\"all\"}"
+ @apply_to "all"
+ @priority 0
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ enable_federation_plugin()
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+
+ on_exit(context, fn ->
+ clear_policy context[:vhost], context[:key]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ vhost: "/",
+ apply_to: @apply_to,
+ priority: @priority
+ }
+ }
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: @root
+ test "merge_defaults: a well-formed command with no vhost runs against the default" do
+ assert match?({_, %{vhost: "/"}}, @command.merge_defaults([], %{}))
+ end
+
+ test "merge_defaults: does not change defined vhost" do
+ assert match?({[], %{vhost: "test_vhost"}}, @command.merge_defaults([], %{vhost: "test_vhost"}))
+ end
+
+ test "merge_defaults: default apply_to is \"all\"" do
+ assert match?({_, %{apply_to: "all"}}, @command.merge_defaults([], %{}))
+ assert match?({_, %{apply_to: "custom"}}, @command.merge_defaults([], %{apply_to: "custom"}))
+ end
+
+ test "merge_defaults: default priority is 0" do
+ assert match?({_, %{priority: 0}}, @command.merge_defaults([], %{}))
+ assert match?({_, %{priority: 3}}, @command.merge_defaults([], %{priority: 3}))
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: @vhost
+ test "run: a well-formed, host-specific command returns okay", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ vhost_opts
+ ) == :ok
+
+ assert_policy_fields(context)
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", priority: 0, apply_to: "all", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@key, @pattern, @value], opts))
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: "bad-vhost"
+ test "run: providing a non-existent vhost reports an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ @tag pattern: @pattern, key: @key, value: "bad-value", vhost: @root
+ test "run: an invalid value returns a JSON decoding error", context do
+ assert match?({:error_string, _},
+ @command.run([context[:key], context[:pattern], context[:value]],
+ context[:opts]))
+
+ assert list_policies(context[:vhost]) == []
+ end
+
+ @tag pattern: @pattern, key: @key, value: "{\"foo\":\"bar\"}", vhost: @root
+ test "run: invalid policy returns an error", context do
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\n[{<<"foo">>,<<"bar">>}] are not recognised policy settings\n'}
+
+ assert list_policies(context[:vhost]) == []
+ end
+
+ @tag pattern: @pattern, key: @key, value: "{}", vhost: @root
+ test "run: an empty JSON object value returns an error", context do
+ assert @command.run(
+ [context[:key], context[:pattern], context[:value]],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\nno policy provided\n'}
+
+ assert list_policies(context[:vhost]) == []
+ end
+
+ @tag pattern: @pattern, key: @key, value: @value, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:key], context[:pattern], context[:value]], vhost_opts)
+ == "Setting policy \"#{context[:key]}\" for pattern \"#{context[:pattern]}\" to \"#{context[:value]}\" with priority \"#{context[:opts][:priority]}\" for vhost \"#{context[:vhost]}\" \.\.\."
+ end
+
+ @tag pattern: "ha_", key: "ha_policy_test", vhost: @vhost
+ test "ha policy validation", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ context = Map.put(context, :opts, vhost_opts)
+ pass_validation(context, "{\"ha-mode\":\"all\"}")
+ fail_validation(context, "{\"ha-mode\":\"made_up\"}")
+
+ fail_validation(context, "{\"ha-mode\":\"nodes\"}")
+ fail_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":2}")
+ fail_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}")
+ pass_validation(context, "{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}")
+ fail_validation(context, "{\"ha-params\":[\"a\",\"b\"]}")
+
+ fail_validation(context, "{\"ha-mode\":\"exactly\"}")
+ fail_validation(context, "{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}")
+ pass_validation(context, "{\"ha-mode\":\"exactly\",\"ha-params\":2}")
+ fail_validation(context, "{\"ha-params\":2}")
+
+ pass_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}")
+ pass_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}")
+ fail_validation(context, "{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}")
+ fail_validation(context, "{\"ha-sync-mode\":\"manual\"}")
+ fail_validation(context, "{\"ha-sync-mode\":\"automatic\"}")
+ end
+
+ @tag pattern: "ha_", key: "ha_policy_test", vhost: @vhost
+ test "queue master locator policy validation", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ context = Map.put(context, :opts, vhost_opts)
+ pass_validation(context, "{\"queue-master-locator\":\"min-masters\"}")
+ pass_validation(context, "{\"queue-master-locator\":\"client-local\"}")
+ pass_validation(context, "{\"queue-master-locator\":\"random\"}")
+ fail_validation(context, "{\"queue-master-locator\":\"made_up\"}")
+ end
+
+ @tag pattern: "ha_", key: "ha_policy_test", vhost: @vhost
+ test "queue modes policy validation", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+ context = Map.put(context, :opts, vhost_opts)
+ pass_validation(context, "{\"queue-mode\":\"lazy\"}")
+ pass_validation(context, "{\"queue-mode\":\"default\"}")
+ fail_validation(context, "{\"queue-mode\":\"wrong\"}")
+ end
+
+ def pass_validation(context, value) do
+ assert @command.run(
+ [context[:key], context[:pattern], value],
+ context[:opts]
+ ) == :ok
+ assert_policy_fields(Map.merge(context, %{value: value}))
+ end
+
+ def fail_validation(context, value) do
+ result = @command.run(
+ [context[:key], context[:pattern], value],
+ context[:opts]
+ )
+ assert {:error_string, _} = result
+ {:error_string, msg} = result
+ assert "Validation failed"<>_ = to_string(msg)
+ end
+
+ # Checks each element of the first policy against the expected context values
+ defp assert_policy_fields(context) do
+ result_policy = context[:vhost] |> list_policies |> List.first
+ assert result_policy[:definition] == context[:value]
+ assert result_policy[:vhost] == context[:vhost]
+ assert result_policy[:pattern] == context[:pattern]
+ assert result_policy[:name] == context[:key]
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs
new file mode 100644
index 0000000000..f117f5a789
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_topic_permissions_command_test.exs
@@ -0,0 +1,114 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetTopicPermissionsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetTopicPermissionsCommand
+
+ @vhost "test1"
+ @user "guest"
+ @root "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+
+ on_exit(context, fn ->
+ clear_topic_permissions context[:user], context[:vhost]
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ vhost: context[:vhost]
+ }
+ }
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: expects username, exchange, and pattern arguments" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["insufficient"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["still", "not", "enough"], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["this", "is", "way", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag user: @user, vhost: @vhost
+ test "run: a well-formed, host-specific command returns okay", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:user], "amq.topic", "^a", "^b"],
+ vhost_opts
+ ) == :ok
+
+ assert List.first(list_user_topic_permissions(context[:user]))[:write] == "^a"
+ assert List.first(list_user_topic_permissions(context[:user]))[:read] == "^b"
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@user, "amq.topic", "^a", "^b"], opts))
+ end
+
+ @tag user: "interloper", vhost: @root
+ test "run: an invalid user returns a no-such-user error", context do
+ assert @command.run(
+ [context[:user], "amq.topic", "^a", "^b"],
+ context[:opts]
+ ) == {:error, {:no_such_user, context[:user]}}
+ end
+
+ @tag user: @user, vhost: "wintermute"
+ test "run: an invalid vhost returns a no-such-vhost error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:user], "amq.topic", "^a", "^b"],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+
+ assert Enum.count(list_user_topic_permissions(context[:user])) == 0
+ end
+
+ @tag user: @user, vhost: @root
+ test "run: invalid regex patterns return error", context do
+ n = Enum.count(list_user_topic_permissions(context[:user]))
+ {:error, {:invalid_regexp, _, _}} = @command.run(
+ [context[:user], "amq.topic", "[", "^b"],
+ context[:opts]
+ )
+ assert Enum.count(list_user_topic_permissions(context[:user])) == n
+ end
+
+ @tag user: @user, vhost: @vhost
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:user], "amq.topic", "^a", "^b"], vhost_opts)
+ =~ ~r/Setting topic permissions on \"amq.topic\" for user \"#{context[:user]}\" in vhost \"#{context[:vhost]}\" \.\.\./
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs
new file mode 100644
index 0000000000..6179267396
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_user_limits_command_test.exs
@@ -0,0 +1,137 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule SetUserLimitsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetUserLimitsCommand
+
+ @user "someone"
+ @password "password"
+ @conn_definition "{\"max-connections\":100}"
+ @channel_definition "{\"max-channels\":200}"
+ @definition "{\"max-connections\":50, \"max-channels\":500}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_user @user, @password
+
+ on_exit([], fn ->
+ delete_user @user
+ end)
+
+ :ok
+ end
+
+ setup context do
+ user = context[:user] || @user
+
+ clear_user_limits(user)
+
+ on_exit(context, fn ->
+ clear_user_limits(user)
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname()
+ },
+ user: user
+ }
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["not-enough"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: a well-formed, host-specific command returns okay", context do
+ assert @command.run(
+ [context[:user],
+ @conn_definition],
+ context[:opts]
+ ) == :ok
+
+ assert_limits(context, @conn_definition)
+ clear_user_limits(context[:user])
+
+ assert @command.run(
+ [context[:user],
+ @channel_definition],
+ context[:opts]
+ ) == :ok
+
+ assert_limits(context, @channel_definition)
+ end
+
+ test "run: a well-formed command to set both max-connections and max-channels returns okay", context do
+ assert @command.run(
+ [context[:user],
+ @definition],
+ context[:opts]
+ ) == :ok
+
+ assert_limits(context, @definition)
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@user, @conn_definition], opts))
+ end
+
+ @tag user: "non-existent-user"
+ test "run: providing a non-existent user reports an error", context do
+
+ assert @command.run(
+ [context[:user],
+ @conn_definition],
+ context[:opts]
+ ) == {:error, {:no_such_user, context[:user]}}
+ end
+
+ test "run: an invalid definition returns a JSON decoding error", context do
+ assert match?({:error_string, _},
+ @command.run(
+ [context[:user],
+ ["this_is_not_json"]],
+ context[:opts]))
+
+ assert get_user_limits(context[:user]) == %{}
+ end
+
+ test "run: invalid limit returns an error", context do
+ assert @command.run(
+ [context[:user],
+ "{\"foo\":\"bar\"}"],
+ context[:opts]
+ ) == {:error_string, 'Unrecognised terms [{<<"foo">>,<<"bar">>}] in user-limits'}
+
+ assert get_user_limits(context[:user]) == %{}
+ end
+
+ test "banner", context do
+ assert @command.banner([context[:user], context[:conn_definition]], context[:opts])
+ == "Setting user limits to \"#{context[:conn_definition]}\" for user \"#{context[:user]}\" ..."
+ end
+
+ #
+ # Implementation
+ #
+
+ defp assert_limits(context, definition) do
+ limits = get_user_limits(context[:user])
+ assert {:ok, limits} == JSON.decode(definition)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs
new file mode 100644
index 0000000000..cdc51e673f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_user_tags_command_test.exs
@@ -0,0 +1,144 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetUserTagsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetUserTagsCommand
+
+ @user "user1"
+ @password "password"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_user @user, @password
+
+ on_exit([], fn ->
+ delete_user(@user)
+ end)
+
+ :ok
+ end
+
+ setup context do
+ context[:user] # silences warnings
+ on_exit([], fn -> set_user_tags(context[:user], []) end)
+
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: on an incorrect number of arguments, return an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "run: throws a badrpc when instructed to contact an unreachable RabbitMQ node" do
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@user, :imperator], opts))
+ end
+
+ @tag user: @user, tags: [:imperator]
+ test "run: on a single optional argument, add a flag to the user", context do
+ @command.run(
+ [context[:user] | context[:tags]],
+ context[:opts]
+ )
+
+ result = Enum.find(
+ list_users(),
+ fn(record) -> record[:user] == context[:user] end
+ )
+
+ assert result[:tags] == context[:tags]
+ end
+
+ @tag user: "interloper", tags: [:imperator]
+ test "run: on an invalid user, get a no such user error", context do
+ assert @command.run(
+ [context[:user] | context[:tags]],
+ context[:opts]
+ ) == {:error, {:no_such_user, context[:user]}}
+ end
+
+ @tag user: @user, tags: [:imperator, :generalissimo]
+ test "run: on multiple optional arguments, add all flags to the user", context do
+ @command.run(
+ [context[:user] | context[:tags]],
+ context[:opts]
+ )
+
+ result = Enum.find(
+ list_users(),
+ fn(record) -> record[:user] == context[:user] end
+ )
+
+ assert result[:tags] == context[:tags]
+ end
+
+ @tag user: @user, tags: [:imperator]
+ test "run: with no optional arguments, clear user tags", context do
+
+ set_user_tags(context[:user], context[:tags])
+
+ @command.run([context[:user]], context[:opts])
+
+ result = Enum.find(
+ list_users(),
+ fn(record) -> record[:user] == context[:user] end
+ )
+
+ assert result[:tags] == []
+ end
+
+ @tag user: @user, tags: [:imperator]
+ test "run: identical calls are idempotent", context do
+
+ set_user_tags(context[:user], context[:tags])
+
+ assert @command.run(
+ [context[:user] | context[:tags]],
+ context[:opts]
+ ) == :ok
+
+ result = Enum.find(
+ list_users(),
+ fn(record) -> record[:user] == context[:user] end
+ )
+
+ assert result[:tags] == context[:tags]
+ end
+
+ @tag user: @user, old_tags: [:imperator], new_tags: [:generalissimo]
+ test "run: if different tags exist, overwrite them", context do
+
+ set_user_tags(context[:user], context[:old_tags])
+
+ assert @command.run(
+ [context[:user] | context[:new_tags]],
+ context[:opts]
+ ) == :ok
+
+ result = Enum.find(
+ list_users(),
+ fn(record) -> record[:user] == context[:user] end
+ )
+
+ assert result[:tags] == context[:new_tags]
+ end
+
+ @tag user: @user, tags: ["imperator"]
+ test "banner", context do
+ assert @command.banner(
+ [context[:user] | context[:tags]],
+ context[:opts]
+ )
+ =~ ~r/Setting tags for user \"#{context[:user]}\" to \[#{context[:tags]}\] \.\.\./
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs
new file mode 100644
index 0000000000..b5c679b02f
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_vhost_limits_command_test.exs
@@ -0,0 +1,137 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule SetVhostLimitsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetVhostLimitsCommand
+
+ @vhost "test1"
+ @definition "{\"max-connections\":100}"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost @vhost
+
+ on_exit([], fn ->
+ delete_vhost @vhost
+ end)
+
+ :ok
+ end
+
+ setup context do
+
+ vhost = context[:vhost] || @vhost
+
+ clear_vhost_limits(vhost)
+
+ on_exit(context, fn ->
+ clear_vhost_limits(vhost)
+ end)
+
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ vhost: vhost
+ },
+ definition: context[:definition] || @definition,
+ vhost: vhost
+ }
+ end
+
+ test "merge_defaults: a well-formed command with no vhost runs against the default" do
+ assert match?({_, %{vhost: "/"}}, @command.merge_defaults([], %{}))
+ end
+
+ test "merge_defaults: does not change defined vhost" do
+ assert match?({[], %{vhost: "test_vhost"}}, @command.merge_defaults([], %{vhost: "test_vhost"}))
+ end
+
+ test "validate: providing too few arguments fails validation" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation" do
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["this", "is", "too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: a well-formed, host-specific command returns okay", context do
+ assert @command.run(
+ [context[:definition]],
+ context[:opts]
+ ) == :ok
+
+ assert_limits(context)
+ end
+
+ test "run: an unreachable node throws a badrpc" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([@definition], opts))
+ end
+
+ @tag vhost: "bad-vhost"
+ test "run: providing a non-existent vhost reports an error", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.run(
+ [context[:definition]],
+ vhost_opts
+ ) == {:error, {:no_such_vhost, context[:vhost]}}
+ end
+
+ test "run: an invalid definition returns a JSON decoding error", context do
+ assert match?({:error_string, _},
+ @command.run(["bad_value"], context[:opts]))
+
+ assert get_vhost_limits(context[:vhost]) == %{}
+ end
+
+ test "run: invalid limit returns an error", context do
+ assert @command.run(
+ ["{\"foo\":\"bar\"}"],
+ context[:opts]
+ ) == {:error_string, 'Validation failed\n\nUnrecognised terms [{<<"foo">>,<<"bar">>}] in limits\n'}
+
+ assert get_vhost_limits(context[:vhost]) == %{}
+ end
+
+ test "run: an empty JSON object definition unsets all limits for vhost", context do
+
+ assert @command.run(
+ [@definition],
+ context[:opts]
+ ) == :ok
+
+ assert_limits(context)
+
+ assert @command.run(
+ ["{}"],
+ context[:opts]
+ ) == :ok
+
+ assert get_vhost_limits(context[:vhost]) == %{}
+ end
+
+ test "banner", context do
+ vhost_opts = Map.merge(context[:opts], %{vhost: context[:vhost]})
+
+ assert @command.banner([context[:definition]], vhost_opts)
+ == "Setting vhost limits to \"#{context[:definition]}\" for vhost \"#{context[:vhost]}\" ..."
+ end
+
+ defp assert_limits(context) do
+ limits = get_vhost_limits(context[:vhost])
+ assert {:ok, limits} == JSON.decode(context[:definition])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs b/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs
new file mode 100644
index 0000000000..bd9719ab40
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/set_vm_memory_high_watermark_command_test.exs
@@ -0,0 +1,162 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule SetVmMemoryHighWatermarkCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+ import RabbitMQ.CLI.Core.{Alarms, Memory}
+
+ @command RabbitMQ.CLI.Ctl.Commands.SetVmMemoryHighWatermarkCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ start_rabbitmq_app()
+ reset_vm_memory_high_watermark()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ reset_vm_memory_high_watermark()
+ end)
+
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: a string returns an error", context do
+ assert @command.validate(["sandwich"], context[:opts]) == {:validation_failure, :bad_argument}
+ assert @command.validate(["0.4sandwich"], context[:opts]) == {:validation_failure, :bad_argument}
+ end
+
+ test "validate: valid numerical value returns valid", context do
+ assert @command.validate(["0.7"], context[:opts]) == :ok
+ assert @command.validate(["1"], context[:opts]) == :ok
+ end
+
+ test "run: valid numerical value returns valid", context do
+ assert @command.run([0.7], context[:opts]) == :ok
+ assert status()[:vm_memory_high_watermark] == 0.7
+
+ assert @command.run([1], context[:opts]) == :ok
+ assert status()[:vm_memory_high_watermark] == 1
+ end
+
+ test "validate: validate a valid numerical string value returns valid", context do
+ assert @command.validate(["0.7"], context[:opts]) == :ok
+ assert @command.validate(["1"], context[:opts]) == :ok
+ end
+
+ test "validate: the wrong number of arguments returns an arg count error" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ assert @command.validate(["too", "many"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: a negative number returns a bad argument", context do
+ assert @command.validate(["-0.1"], context[:opts]) == {:validation_failure, {:bad_argument, "The threshold should be a fraction between 0.0 and 1.0"}}
+ end
+
+ test "validate: a percentage returns a bad argument", context do
+ assert @command.validate(["40"], context[:opts]) == {:validation_failure, {:bad_argument, "The threshold should be a fraction between 0.0 and 1.0"}}
+ end
+
+ test "validate: a value greater than 1.0 returns a bad argument", context do
+ assert @command.validate(["1.1"], context[:opts]) == {:validation_failure, {:bad_argument, "The threshold should be a fraction between 0.0 and 1.0"}}
+ end
+
+ @tag test_timeout: 3000
+ test "run: on an invalid node, return a bad rpc" do
+ args = [0.7]
+ opts = %{node: :jake@thedog, timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run(args, opts))
+ end
+
+## ---------------------------- Absolute tests --------------------------------
+
+ test "validate: an absolute call without an argument returns not enough args" do
+ assert @command.validate(["absolute"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: an absolute call with too many arguments returns too many args" do
+ assert @command.validate(["absolute", "too", "many"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: a single absolute integer return valid", context do
+ assert @command.validate(["absolute","10"], context[:opts]) == :ok
+ end
+ test "run: a single absolute integer return ok", context do
+ assert @command.run(["absolute","10"], context[:opts]) == :ok
+ assert status()[:vm_memory_high_watermark] == {:absolute, memory_unit_absolute(10, "")}
+ end
+
+ test "validate: a single absolute integer with an invalid memory unit fails ", context do
+ assert @command.validate(["absolute","10bytes"], context[:opts]) == {:validation_failure, {:bad_argument, "Invalid units."}}
+ end
+
+ test "validate: a single absolute float with a valid memory unit fails ", context do
+ assert @command.validate(["absolute","10.0MB"], context[:opts]) == {:validation_failure, {:bad_argument, "The threshold should be an integer."}}
+ end
+
+ test "validate: a single absolute float with an invalid memory unit fails ", context do
+ assert @command.validate(["absolute","10.0bytes"], context[:opts]) == {:validation_failure, {:bad_argument, "The threshold should be an integer."}}
+ end
+
+ test "validate: a single absolute string fails ", context do
+ assert @command.validate(["absolute","large"], context[:opts]) == {:validation_failure, :bad_argument}
+ end
+
+ test "validate: a single absolute string with a valid unit fails ", context do
+ assert @command.validate(["absolute","manyGB"], context[:opts]) == {:validation_failure, :bad_argument}
+ end
+
+ test "run: a single absolute integer with memory units return ok", context do
+ memory_units()
+ |> Enum.each(fn mu ->
+ arg = "10#{mu}"
+ assert @command.run(["absolute",arg], context[:opts]) == :ok
+ assert status()[:vm_memory_high_watermark] == {:absolute, memory_unit_absolute(10, mu)}
+ end)
+ end
+
+ test "run: low watermark sets alarm", context do
+ old_watermark = status()[:vm_memory_high_watermark]
+ on_exit(fn() ->
+ args = case old_watermark do
+ {:absolute, val} -> ["absolute", to_string(val)];
+ other -> [to_string(other)]
+ end
+ @command.run(args, context[:opts])
+ end)
+ ## this will trigger an alarm
+ @command.run(["absolute", "2000"], context[:opts])
+
+ assert [:memory] == alarm_types(status()[:alarms])
+ end
+
+ test "banner: absolute memory request prints info message", context do
+ assert @command.banner(["absolute", "10"], context[:opts])
+ =~ ~r/Setting memory threshold on #{get_rabbit_hostname()} to 10 bytes .../
+
+ assert @command.banner(["absolute", "-10"], context[:opts])
+ =~ ~r/Setting memory threshold on #{get_rabbit_hostname()} to -10 bytes .../
+
+ assert @command.banner(["absolute", "sandwich"], context[:opts])
+ =~ ~r/Setting memory threshold on #{get_rabbit_hostname()} to sandwich bytes .../
+ end
+
+ test "banner, relative memory", context do
+ assert @command.banner(["0.7"], context[:opts])
+ =~ ~r/Setting memory threshold on #{get_rabbit_hostname()} to 0.7 .../
+
+ assert @command.banner(["-0.7"], context[:opts])
+ =~ ~r/Setting memory threshold on #{get_rabbit_hostname()} to -0.7 .../
+
+ assert @command.banner(["sandwich"], context[:opts])
+ =~ ~r/Setting memory threshold on #{get_rabbit_hostname()} to sandwich .../
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs b/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs
new file mode 100644
index 0000000000..153c136c4b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/shutdown_command_test.exs
@@ -0,0 +1,53 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ShutdownCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.ShutdownCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 15}}
+ end
+
+ test "validate: accepts no arguments", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: in wait mode, checks if local and target node hostnames match" do
+ assert match?({:validation_failure, {:unsupported_target, _}},
+ @command.validate([], %{wait: true, node: :'rabbit@some.remote.hostname'}))
+ end
+
+ test "validate: in wait mode, always assumes @localhost nodes are local" do
+ assert @command.validate([], %{wait: true, node: :rabbit@localhost}) == :ok
+ end
+
+ test "validate: in no wait mode, passes unconditionally", context do
+ assert @command.validate([], Map.merge(%{wait: false}, context[:opts])) == :ok
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, wait: false, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "empty banner", context do
+ nil = @command.banner([], context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs b/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs
new file mode 100644
index 0000000000..bdd8632842
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/start_app_command_test.exs
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule StartAppCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.StartAppCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to an active node succeeds", context do
+ node = RabbitMQ.CLI.Core.Helpers.normalise_node(context[:node], :shortnames)
+ stop_rabbitmq_app()
+ refute :rabbit_misc.rpc_call(node, :rabbit, :is_running, [])
+ assert @command.run([], context[:opts])
+ assert :rabbit_misc.rpc_call(node, :rabbit, :is_running, [])
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Starting node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/status_command_test.exs b/deps/rabbitmq_cli/test/ctl/status_command_test.exs
new file mode 100644
index 0000000000..03ab6cb8fc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/status_command_test.exs
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule StatusCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.StatusCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: 60_000}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a named, active node succeeds", context do
+ assert @command.run([], context[:opts])[:pid] != nil
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Status of node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs b/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs
new file mode 100644
index 0000000000..60551b2189
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/stop_app_command_test.exs
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule StopAppCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.StopAppCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to an active node succeeds", context do
+ node = RabbitMQ.CLI.Core.Helpers.normalise_node(context[:node], :shortnames)
+ assert :rabbit_misc.rpc_call(node, :rabbit, :is_running, [])
+ assert @command.run([], context[:opts])
+ refute :rabbit_misc.rpc_call(node, :rabbit, :is_running, [])
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Stopping rabbit application on node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/stop_command_test.exs b/deps/rabbitmq_cli/test/ctl/stop_command_test.exs
new file mode 100644
index 0000000000..2f1dca2eae
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/stop_command_test.exs
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule StopCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.StopCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(),
+ idempotent: false}}
+ end
+
+ test "validate accepts no arguments", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate accepts a PID file path", context do
+ assert @command.validate(["/path/to/pidfile.pid"], context[:opts]) == :ok
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["/path/to/pidfile.pid", "extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ # NB: as this commands shuts down the Erlang vm it isn't really practical to test it here
+
+ test "run: request to a non-existent node with --idempotent=false returns a badrpc" do
+ opts = %{node: :jake@thedog, idempotent: false, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "run: request to a non-existent node with --idempotent returns ok" do
+ opts = %{node: :jake@thedog, idempotent: true, timeout: 200}
+ assert match?({:ok, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts]) =~ ~r/Stopping and halting node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs b/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs
new file mode 100644
index 0000000000..602cdf9f8b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/suspend_listeners_command_test.exs
@@ -0,0 +1,67 @@
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule SuspendListenersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SuspendListenersCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ resume_all_client_listeners()
+
+ node_name = get_rabbit_hostname()
+ on_exit(fn ->
+ resume_all_client_listeners()
+ close_all_connections(node_name)
+ end)
+
+ {:ok, opts: %{node: node_name, timeout: 30_000}}
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname()}}
+ end
+
+ test "merge_defaults: merges no defaults" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: accepts no arguments", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: with extra arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: request to a non-existent node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "run: suspends all client TCP listeners so no new client connects are accepted", context do
+ assert @command.run([], Map.merge(context[:opts], %{timeout: 5_000})) == :ok
+
+ expect_client_connection_failure()
+ resume_all_client_listeners()
+
+ # implies a successful connection
+ with_channel("/", fn _ -> :ok end)
+ close_all_connections(get_rabbit_hostname())
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs b/deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs
new file mode 100644
index 0000000000..3d3f866dd0
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/sync_queue_command_test.exs
@@ -0,0 +1,64 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule SyncQueueCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.SyncQueueCommand
+
+ @vhost "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ vhost: @vhost
+ }}
+ end
+
+ test "validate: specifying no queue name is reported as an error", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: specifying two queue names is reported as an error", context do
+ assert @command.validate(["q1", "q2"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: specifying three queue names is reported as an error", context do
+ assert @command.validate(["q1", "q2", "q3"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: specifying one queue name succeeds", context do
+ assert @command.validate(["q1"], context[:opts]) == :ok
+ end
+
+ test "run: request to a non-existent RabbitMQ node returns a nodedown" do
+ opts = %{node: :jake@thedog, vhost: @vhost, timeout: 200}
+ assert match?({:badrpc, _}, @command.run(["q1"], opts))
+ end
+
+ test "banner", context do
+ s = @command.banner(["q1"], context[:opts])
+
+ assert s =~ ~r/Synchronising queue/
+ assert s =~ ~r/q1/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs b/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs
new file mode 100644
index 0000000000..0ea53774cb
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/trace_off_command_test.exs
@@ -0,0 +1,78 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule TraceOffCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.TraceOffCommand
+
+ @test_vhost "test"
+ @default_vhost "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost(@test_vhost)
+
+ on_exit([], fn ->
+ delete_vhost(@test_vhost)
+ end)
+
+ :ok
+ end
+
+ setup context do
+ trace_on(context[:vhost])
+ on_exit(context, fn -> trace_off(context[:vhost]) end)
+ {:ok, opts: %{node: get_rabbit_hostname(), vhost: context[:vhost]}}
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: wrong number of arguments triggers arg count error" do
+ assert @command.validate(["extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: on an active node, trace_off command works on default" do
+ opts = %{node: get_rabbit_hostname()}
+ opts_with_vhost = %{node: get_rabbit_hostname(), vhost: "/"}
+ trace_on(@default_vhost)
+
+ assert @command.merge_defaults([], opts) == {[], opts_with_vhost}
+ end
+
+ test "run: on an invalid RabbitMQ node, return a nodedown" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag target: get_rabbit_hostname(), vhost: @default_vhost
+ test "run: calls to trace_off are idempotent", context do
+ @command.run([], context[:opts])
+ assert @command.run([], context[:opts]) == {:ok, "Trace disabled for vhost #{@default_vhost}"}
+ end
+
+ @tag vhost: @test_vhost
+ test "run: on an active node, trace_off command works on named vhost", context do
+ assert @command.run([], context[:opts]) == {:ok, "Trace disabled for vhost #{@test_vhost}"}
+ end
+
+ @tag vhost: "toast"
+ test "run: Turning tracing off on invalid host returns successfully", context do
+ assert @command.run([], context[:opts]) == {:ok, "Trace disabled for vhost toast"}
+ end
+
+ @tag vhost: @default_vhost
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Stopping tracing for vhost "#{context[:vhost]}" .../
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs b/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs
new file mode 100644
index 0000000000..4db58772a1
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/trace_on_command_test.exs
@@ -0,0 +1,79 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule TraceOnCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.TraceOnCommand
+
+ @test_vhost "test"
+ @default_vhost "/"
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ add_vhost(@test_vhost)
+
+ on_exit([], fn ->
+ delete_vhost(@test_vhost)
+ end)
+
+ :ok
+ end
+
+ setup context do
+ on_exit(context, fn -> trace_off(context[:vhost]) end)
+ {:ok, opts: %{node: get_rabbit_hostname(), vhost: context[:vhost]}}
+ end
+
+ test "merge_defaults: on an active node, trace_on command works on default" do
+ opts = %{node: get_rabbit_hostname()}
+ opts_with_vhost = %{node: get_rabbit_hostname(), vhost: "/"}
+
+ assert @command.merge_defaults([], opts) == {[], opts_with_vhost}
+
+ trace_off(@default_vhost)
+ end
+
+ test "merge_defaults: defaults can be overridden" do
+ assert @command.merge_defaults([], %{}) == {[], %{vhost: "/"}}
+ assert @command.merge_defaults([], %{vhost: "non_default"}) == {[], %{vhost: "non_default"}}
+ end
+
+ test "validate: wrong number of arguments triggers arg count error" do
+ assert @command.validate(["extra"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: on an invalid RabbitMQ node, return a nodedown" do
+ opts = %{node: :jake@thedog, vhost: "/", timeout: 200}
+
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ @tag vhost: @default_vhost
+ test "run: calls to trace_on are idempotent", context do
+ @command.run([], context[:opts])
+ assert @command.run([], context[:opts]) == {:ok, "Trace enabled for vhost #{@default_vhost}"}
+ end
+
+ @tag vhost: @test_vhost
+ test "run: on an active node, trace_on command works on named vhost", context do
+ assert @command.run([], context[:opts]) == {:ok, "Trace enabled for vhost #{@test_vhost}"}
+ end
+
+ @tag vhost: "toast"
+ test "run: Turning tracing on on invalid host returns successfully", context do
+ assert @command.run([], context[:opts]) == {:ok, "Trace enabled for vhost toast"}
+ end
+
+ @tag vhost: @default_vhost
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Starting tracing for vhost "#{context[:vhost]}" .../
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs b/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs
new file mode 100644
index 0000000000..b94c21f1be
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/update_cluster_nodes_command_test.exs
@@ -0,0 +1,80 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule UpdateClusterNodesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.UpdateClusterNodesCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname()
+ }}
+ end
+
+ test "validate: providing too few arguments fails validation", context do
+ assert @command.validate([], context[:opts]) ==
+ {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: providing too many arguments fails validation", context do
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: specifying self as seed node fails validation", context do
+ stop_rabbitmq_app()
+ assert match?(
+ {:error, :cannot_cluster_node_with_itself},
+ @command.run([context[:opts][:node]], context[:opts]))
+ start_rabbitmq_app()
+ end
+
+ test "run: request to an unreachable node returns a badrpc", context do
+ opts = %{
+ node: :jake@thedog,
+ timeout: 200
+ }
+ assert match?(
+ {:badrpc, :nodedown},
+ @command.run([context[:opts][:node]], opts))
+ end
+
+ test "run: specifying an unreachable node as seed returns a badrpc", context do
+ stop_rabbitmq_app()
+ assert match?(
+ {:badrpc_multi, _, [_]},
+ @command.run([:jake@thedog], context[:opts]))
+ start_rabbitmq_app()
+ end
+
+ test "banner", context do
+ assert @command.banner(["a"], context[:opts]) =~
+ ~r/Will seed #{get_rabbit_hostname()} from a on next start/
+ end
+
+ test "output mnesia is running error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code,
+ "Mnesia is still running on node " <> _},
+ @command.output({:error, :mnesia_unexpectedly_running}, context[:opts]))
+
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/version_command_test.exs b/deps/rabbitmq_cli/test/ctl/version_command_test.exs
new file mode 100644
index 0000000000..76216b6cf0
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/version_command_test.exs
@@ -0,0 +1,24 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule VersionCommandTest do
+ use ExUnit.Case
+
+ @command RabbitMQ.CLI.Ctl.Commands.VersionCommand
+
+ test "merge_defaults: merges no defaults" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+end
diff --git a/deps/rabbitmq_cli/test/ctl/wait_command_test.exs b/deps/rabbitmq_cli/test/ctl/wait_command_test.exs
new file mode 100644
index 0000000000..c1fd604245
--- /dev/null
+++ b/deps/rabbitmq_cli/test/ctl/wait_command_test.exs
@@ -0,0 +1,114 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule WaitCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Ctl.Commands.WaitCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ RabbitMQ.CLI.Core.Distribution.start()
+ rabbitmq_home = :rabbit_misc.rpc_call(get_rabbit_hostname(), :code, :lib_dir, [:rabbit])
+
+ {:ok, opts: %{node: get_rabbit_hostname(),
+ rabbitmq_home: rabbitmq_home,
+ timeout: 500}}
+ end
+
+
+ test "validate: cannot have both pid and pidfile", context do
+ {:validation_failure, "Cannot specify both pid and pidfile"} =
+ @command.validate(["pid_file"], Map.merge(context[:opts], %{pid: 123}))
+ end
+
+ test "validate: should have either pid or pidfile", context do
+ {:validation_failure, "No pid or pidfile specified"} =
+ @command.validate([], context[:opts])
+ end
+
+ test "validate: with more than one argument returns an arg count error", context do
+ assert @command.validate(["pid_file", "extra"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "run: times out waiting for non-existent pid file", context do
+ {:error, {:timeout, _}} = @command.run(["pid_file"], context[:opts]) |> Enum.to_list |> List.last
+ end
+
+ test "run: fails if pid process does not exist", context do
+ non_existent_pid = get_non_existent_os_pid()
+ {:error, :process_not_running} =
+ @command.run([], Map.merge(context[:opts], %{pid: non_existent_pid}))
+ |> Enum.to_list
+ |> List.last
+ end
+
+ test "run: times out if unable to communicate with the node", context do
+ pid = String.to_integer(System.get_pid())
+ {:error, {:timeout, _}} =
+ @command.run([], Map.merge(context[:opts], %{pid: pid, node: :nonode@nohost}))
+ |> Enum.to_list
+ |> List.last
+ end
+
+ test "run: happy path", context do
+ pid = :erlang.list_to_integer(:rpc.call(context[:opts][:node], :os, :getpid, []))
+ output = @command.run([], Map.merge(context[:opts], %{pid: pid}))
+ assert_stream_without_errors(output)
+ end
+
+ test "run: happy path in quiet mode", context do
+ pid = :erlang.list_to_integer(:rpc.call(context[:opts][:node], :os, :getpid, []))
+ output = @command.run([], Map.merge(context[:opts], %{pid: pid, quiet: true}))
+ [] = Enum.to_list(output)
+ end
+
+ test "no banner", context do
+ nil = @command.banner([], context[:opts])
+ end
+
+ test "output: process not running error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code, "Error: process is not running."},
+ @command.output({:error, :process_not_running}, context[:opts]))
+ end
+
+ test "output: garbage in pid file error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code, "Error: garbage in pid file."},
+ @command.output({:error, {:garbage_in_pid_file, "somefile"}}, context[:opts]))
+ end
+
+ test "output: could not read pid error", context do
+ exit_code = RabbitMQ.CLI.Core.ExitCodes.exit_software
+ assert match?({:error, ^exit_code, "Error: could not read pid. Detail: something wrong"},
+ @command.output({:error, {:could_not_read_pid, "something wrong"}}, context[:opts]))
+ end
+
+ test "output: default output is fine", context do
+ assert match?({:error, "message"}, @command.output({:error, "message"}, context[:opts]))
+ assert match?({:error, :message}, @command.output({:error, :message}, context[:opts]))
+ assert match?({:error, :message}, @command.output(:message, context[:opts]))
+ assert match?({:ok, "ok"}, @command.output({:ok, "ok"}, context[:opts]))
+ assert match?(:ok, @command.output(:ok, context[:opts]))
+ assert match?({:ok, "ok"}, @command.output("ok", context[:opts]))
+ end
+
+ def get_non_existent_os_pid(pid \\ 2) do
+ case :rabbit_misc.is_os_process_alive(to_charlist(pid)) do
+ true -> get_non_existent_os_pid(pid + 1)
+ false -> pid
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs
new file mode 100644
index 0000000000..70a2bfda64
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/alarms_command_test.exs
@@ -0,0 +1,69 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule AlarmsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+ import RabbitMQ.CLI.Core.Alarms, only: [alarm_types: 1]
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.AlarmsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: when target node has no alarms in effect, returns an empty list", context do
+ assert [] == status()[:alarms]
+
+ assert @command.run([], context[:opts]) == []
+ end
+
+ test "run: when target node has an alarm in effect, returns it", context do
+ old_watermark = status()[:vm_memory_high_watermark]
+ on_exit(fn() ->
+ set_vm_memory_high_watermark(old_watermark)
+ end)
+ # 2000 bytes will trigger an alarm
+ set_vm_memory_high_watermark({:absolute, 2000})
+
+ assert [:memory] == alarm_types(status()[:alarms])
+ assert length(@command.run([], context[:opts])) == 1
+
+ set_vm_memory_high_watermark(old_watermark)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs
new file mode 100644
index 0000000000..f5b64282e3
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_alarms_command_test.exs
@@ -0,0 +1,118 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckAlarmsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+ import RabbitMQ.CLI.Core.Alarms, only: [alarm_types: 1]
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckAlarmsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when target node has no alarms in effect, returns an empty list", context do
+ assert [] == status()[:alarms]
+
+ assert @command.run([], context[:opts]) == []
+ end
+
+ test "run: when target node has an alarm in effect, returns it", context do
+ old_watermark = status()[:vm_memory_high_watermark]
+ on_exit(fn() ->
+ set_vm_memory_high_watermark(old_watermark)
+ end)
+ # 2000 bytes will trigger an alarm
+ set_vm_memory_high_watermark({:absolute, 2000})
+
+ assert [:memory] == alarm_types(status()[:alarms])
+ assert length(@command.run([], context[:opts])) == 1
+
+ set_vm_memory_high_watermark(old_watermark)
+ end
+
+
+ test "output: when target node has no alarms in effect, returns a success", context do
+ assert [] == status()[:alarms]
+
+ assert match?({:ok, _}, @command.output([], context[:opts]))
+ end
+
+ test "output: when target node has an alarm in effect, returns a failure", context do
+ for input <- [
+ [
+ :file_descriptor_limit
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, :hare@warp10}, []}
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, :hare@warp10}, []},
+ {{:resource_limit, :memory, :hare@warp10}, []},
+ {{:resource_limit, :disk, :rabbit@warp10}, []},
+ {{:resource_limit, :memory, :rabbit@warp10}, []}
+ ]
+ ] do
+ assert match?({:error, _, _}, @command.output(input, context[:opts]))
+ end
+ end
+
+ test "output: when target node has an alarm in effect and --silent is passed, returns a silent failure", _context do
+ for input <- [
+ [
+ :file_descriptor_limit
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, :hare@warp10}, []}
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, :hare@warp10}, []},
+ {{:resource_limit, :memory, :hare@warp10}, []},
+ {{:resource_limit, :disk, :rabbit@warp10}, []},
+ {{:resource_limit, :memory, :rabbit@warp10}, []}
+ ]
+ ] do
+ assert {:error, :check_failed} == @command.output(input, %{silent: true})
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs
new file mode 100644
index 0000000000..0aaf66c707
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_local_alarms_command_test.exs
@@ -0,0 +1,111 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckLocalAlarmsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+ import RabbitMQ.CLI.Core.Alarms, only: [alarm_types: 1]
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckLocalAlarmsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when target node has no alarms in effect, returns an empty list", context do
+ assert [] == status()[:alarms]
+
+ assert @command.run([], context[:opts]) == []
+ end
+
+ test "run: when target node has a local alarm in effect, returns it", context do
+ old_watermark = status()[:vm_memory_high_watermark]
+ on_exit(fn() ->
+ set_vm_memory_high_watermark(old_watermark)
+ end)
+ # 2000 bytes will trigger an alarm
+ set_vm_memory_high_watermark({:absolute, 2000})
+
+ assert [:memory] == alarm_types(status()[:alarms])
+ assert length(@command.run([], context[:opts])) == 1
+
+ set_vm_memory_high_watermark(old_watermark)
+ end
+
+ test "output: when target node has no local alarms in effect, returns a success", context do
+ assert [] == status()[:alarms]
+
+ assert match?({:ok, _}, @command.output([], context[:opts]))
+ end
+
+ # note: it's run/2 that filters out non-local alarms
+ test "output: when target node has a local alarm in effect, returns a failure", context do
+ for input <- [
+ [
+ :file_descriptor_limit
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, get_rabbit_hostname()}, []},
+ {{:resource_limit, :memory, get_rabbit_hostname()}, []}
+ ]
+ ] do
+ assert match?({:error, _}, @command.output(input, context[:opts]))
+ end
+ end
+
+ # note: it's run/2 that filters out non-local alarms
+ test "output: when target node has an alarm in effect and --silent is passed, returns a silent failure", _context do
+ for input <- [
+ [
+ :file_descriptor_limit
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, :hare@warp10}, []}
+ ],
+ [
+ :file_descriptor_limit,
+ {{:resource_limit, :disk, get_rabbit_hostname()}, []},
+ {{:resource_limit, :memory, get_rabbit_hostname()}, []}
+ ]
+ ] do
+ assert {:error, :check_failed} == @command.output(input, %{silent: true})
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs
new file mode 100644
index 0000000000..845a7b6f1d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_port_connectivity_command_test.exs
@@ -0,0 +1,59 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckPortConnectivityCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckPortConnectivityCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: provides a default timeout" do
+ assert @command.merge_defaults([], %{}) == {[], %{timeout: 30000}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: tries to connect to every inferred active listener", context do
+ assert match?({true, _}, @command.run([], context[:opts]))
+ end
+
+
+ test "output: when all connections succeeded, returns a success", context do
+ assert match?({:ok, _}, @command.output({true, []}, context[:opts]))
+ end
+
+ # note: it's run/2 that filters out non-local alarms
+ test "output: when target node has a local alarm in effect, returns a failure", context do
+ failure = {:listener, :rabbit@mercurio, :lolz, 'mercurio',
+ {0, 0, 0, 0, 0, 0, 0, 0}, 7761613,
+ [backlog: 128, nodelay: true]}
+ assert match?({:error, _}, @command.output({false, [failure]}, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs
new file mode 100644
index 0000000000..7c0428c190
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_port_listener_command_test.exs
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckPortListenerCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckPortListenerCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when two or more arguments are provided, returns a failure" do
+ assert @command.validate([5672, 61613], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats a single positional argument and default switches as a success" do
+ assert @command.validate([1883], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([61613], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when a listener for the protocol is active, returns a success", context do
+ assert match?({true, _}, @command.run([5672], context[:opts]))
+ end
+
+ test "run: when a listener on the port is not active or unknown, returns an error", context do
+ assert match?({false, _, _}, @command.run([47777], context[:opts]))
+ end
+
+ test "output: when a listener for the port is active, returns a success", context do
+ assert match?({:ok, _}, @command.output({true, 5672}, context[:opts]))
+ end
+
+ test "output: when a listener for the port is not active, returns an error", context do
+ assert match?({:error, _, _}, @command.output({false, 15672, []}, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs
new file mode 100644
index 0000000000..a6aef88bc1
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_protocol_listener_command_test.exs
@@ -0,0 +1,68 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckProtocolListenerCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckProtocolListenerCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when two or more arguments are provided, returns a failure" do
+ assert @command.validate(["amqp", "stomp"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats a single positional argument and default switches as a success" do
+ assert @command.validate(["mqtt"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run(["stomp"], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when a listener for the protocol is active, returns a success", context do
+ assert match?({true, _}, @command.run(["amqp"], context[:opts]))
+ end
+
+ test "run: accepts a number of alternative protocol names/spellings", context do
+ for p <- ["amqp", "amqp1.0", "amqp10", "amqp091", "stomp1.2", "distribution"] do
+ assert match?({true, _}, @command.run([p], context[:opts]))
+ end
+ end
+
+ test "run: when a listener for the protocol is not active or unknown, returns an error", context do
+ assert match?({false, _, _}, @command.run(["non-existent-proto"], context[:opts]))
+ end
+
+ test "output: when a listener for the protocol is active, returns a success", context do
+ assert match?({:ok, _}, @command.output({true, "amqp"}, context[:opts]))
+ end
+
+ test "output: when a listener for the protocol is not active, returns an error", context do
+ assert match?({:error, _}, @command.output({false, "http", []}, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs
new file mode 100644
index 0000000000..ab89d1e89e
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_running_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckRunningCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckRunningCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when the RabbitMQ app is booted and started, returns true", context do
+ await_rabbitmq_startup()
+
+ assert @command.run([], context[:opts])
+ end
+
+ test "run: when the RabbitMQ app is stopped, returns false", context do
+ stop_rabbitmq_app()
+
+ refute is_rabbitmq_app_running()
+ refute @command.run([], context[:opts])
+
+ start_rabbitmq_app()
+ end
+
+ test "output: when the result is true, returns successfully", context do
+ assert match?({:ok, _}, @command.output(true, context[:opts]))
+ end
+
+ # this is a check command
+ test "output: when the result is false, returns an error", context do
+ assert match?({:error, _}, @command.output(false, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs
new file mode 100644
index 0000000000..2fab76ae9b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/check_virtual_hosts_command_test.exs
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule CheckVirtualHostsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CheckVirtualHostsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: is a no-op" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "output: when all virtual hosts are reported as up, returns a success", context do
+ assert match?({:ok, _}, @command.output([], context[:opts]))
+ end
+
+ test "output: when target node reports a virtual host as down, returns a failure", context do
+ assert match?({:error, _}, @command.output(["a-down-vhost"], context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs
new file mode 100644
index 0000000000..2ee5edddb8
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/cipher_suites_command_test.exs
@@ -0,0 +1,101 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule CipherSuitesCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CipherSuitesCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ format: context[:format] || "openssl",
+ all: false
+ }}
+ end
+
+ test "merge_defaults: defaults to the OpenSSL format" do
+ assert @command.merge_defaults([], %{}) == {[], %{format: "openssl", all: false}}
+ end
+
+ test "merge_defaults: format is case insensitive" do
+ assert @command.merge_defaults([], %{format: "OpenSSL"}) == {[], %{format: "openssl", all: false}}
+ assert @command.merge_defaults([], %{format: "Erlang"}) == {[], %{format: "erlang", all: false}}
+ assert @command.merge_defaults([], %{format: "Map"}) == {[], %{format: "map", all: false}}
+ end
+
+ test "merge_defaults: format can be overridden" do
+ assert @command.merge_defaults([], %{format: "map"}) == {[], %{format: "map", all: false}}
+ end
+
+ test "validate: treats positional arguments as a failure", context do
+ assert @command.validate(["extra-arg"], context[:opts]) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: supports openssl, erlang and map formats", context do
+ assert @command.validate([], Map.merge(context[:opts], %{format: "openssl"})) == :ok
+ assert @command.validate([], Map.merge(context[:opts], %{format: "erlang"})) == :ok
+ assert @command.validate([], Map.merge(context[:opts], %{format: "map"})) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag format: "openssl"
+ test "run: returns a list of cipher suites in OpenSSL format", context do
+ res = @command.run([], context[:opts])
+ for cipher <- res, do: assert true == is_list(cipher)
+ # the list is long and its values are environment-specific,
+ # so we simply assert that it is non-empty. MK.
+ assert length(res) > 0
+ end
+
+ @tag format: "erlang"
+ test "run: returns a list of cipher suites in erlang format", context do
+ res = @command.run([], context[:opts])
+
+ for cipher <- res, do: assert true = is_tuple(cipher)
+ # the list is long and its values are environment-specific,
+ # so we simply assert that it is non-empty. MK.
+ assert length(res) > 0
+ end
+
+ @tag format: "map"
+ test "run: returns a list of cipher suites in map format", context do
+ res = @command.run([], context[:opts])
+ for cipher <- res, do: assert true = is_map(cipher)
+ # the list is long and its values are environment-specific,
+ # so we simply assert that it is non-empty. MK.
+ assert length(res) > 0
+ end
+
+ test "run: returns more cipher suites when all suites requested", context do
+ default_suites_opts = Map.merge(context[:opts], %{all: false})
+ default_suites = @command.run([], default_suites_opts)
+
+ all_suites_opts = Map.merge(context[:opts], %{all: true})
+ all_suites = @command.run([], all_suites_opts)
+
+ assert length(all_suites) > length(default_suites)
+ assert length(default_suites -- all_suites) == 0
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs
new file mode 100644
index 0000000000..caa959ce44
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/command_line_arguments_command_test.exs
@@ -0,0 +1,44 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule CommandLineArgumentsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.CommandLineArgumentsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: :infinity}}
+ end
+
+ test "validate: with extra arguments, command line arguments returns an arg count error", context do
+ assert @command.validate(["extra"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "run: command line arguments request to a reachable node succeeds", context do
+ output = @command.run([], context[:opts]) |> Enum.to_list
+
+ assert_stream_without_errors(output)
+ end
+
+ test "run: command line arguments request on nonexistent RabbitMQ node returns a badrpc" do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ assert @command.banner([], context[:opts])
+ =~ ~r/Command line arguments of node #{get_rabbit_hostname()}/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs
new file mode 100644
index 0000000000..b11cdb38c2
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/consume_event_stream_command_test.exs
@@ -0,0 +1,73 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ConsumeEventStreamCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ConsumeEventStreamCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ duration: :infinity,
+ pattern: ".*"
+ }}
+ end
+
+ test "merge_defaults: duration defaults to infinity, pattern to anything" do
+ assert @command.merge_defaults([], %{}) == {[], %{duration: :infinity,
+ pattern: ".*",
+ quiet: true}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: consumes events for N seconds", context do
+
+ stream = @command.run([], Map.merge(context[:opts], %{duration: 5}))
+ :rpc.call(get_rabbit_hostname(), :rabbit_event, :notify, [String.to_atom("event_type1"),
+ [{String.to_atom("args"), 1}]])
+ :rpc.call(get_rabbit_hostname(), :rabbit_event, :notify, [String.to_atom("event_type2"),
+ [{String.to_atom("pid"), self()}]])
+
+
+ event1 = Enum.find(stream, nil, fn x -> Keyword.get(x, :event, nil) == "event.type1" end)
+ event2 = Enum.find(stream, nil, fn x -> Keyword.get(x, :event, nil) == "event.type2" end)
+ assert event1 != nil
+ assert event2 != nil
+ assert Keyword.get(event1, :args, nil) == 1
+ assert is_binary(Keyword.get(event2, :pid, nil))
+
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs
new file mode 100644
index 0000000000..7a2b4295c7
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/disable_auth_attempt_source_tracking_command_test.exs
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule DisbleAuthAttemptSourceTrackingCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.DisableAuthAttemptSourceTrackingCommand
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}}
+ end
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 15000
+ test "run: disables source tracking for auth attempt stats", context do
+ assert :ok = @command.run([], context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs
new file mode 100644
index 0000000000..dd54d6eed9
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/discover_peers_command_test.exs
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule DiscoverPeersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.DiscoverPeersCommand
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}}
+ end
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 15000
+ test "run: returns a list of nodes when the backend isn't configured", context do
+ assert match?({:ok, {[], _}}, @command.run([], context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs
new file mode 100644
index 0000000000..c55ac6134b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/enable_auth_attempt_source_tracking_command_test.exs
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule EnableAuthAttemptSourceTrackingCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.EnableAuthAttemptSourceTrackingCommand
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}}
+ end
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 15000
+ test "run: enables source tracking for auth attempt stats", context do
+ assert :ok = @command.run([], context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs
new file mode 100644
index 0000000000..5dff653989
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_hash_command_test.exs
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ErlangCookieHashCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieHashCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 5000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
+ end
+
+ test "run: returns the erlang cookie hash", context do
+ res = @command.run([], context[:opts])
+ assert is_bitstring(res)
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs
new file mode 100644
index 0000000000..794dd52a44
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/erlang_cookie_sources_command_test.exs
@@ -0,0 +1,37 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ErlangCookieSourcesCommandTest do
+ use ExUnit.Case, async: true
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ErlangCookieSourcesCommand
+
+ setup _context do
+ {:ok, opts: %{}}
+ end
+
+ test "merge_defaults: merges no defaults" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "run: returns Erlang cookie sources info", context do
+ result = @command.run([], context[:opts])
+
+ assert result[:effective_user] != nil
+ assert result[:home_dir] != nil
+ assert result[:cookie_file_path] != nil
+ assert result[:cookie_file_exists] != nil
+ assert result[:cookie_file_access] != nil
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs
new file mode 100644
index 0000000000..3bdaa645e2
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/erlang_version_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ErlangVersionCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ErlangVersionCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ details: false,
+ offline: false
+ }}
+ end
+
+ test "merge_defaults: defaults to remote version and abbreviated output" do
+ assert @command.merge_defaults([], %{}) == {[], %{details: false, offline: false}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "validate: treats empty positional arguments and --details as a success" do
+ assert @command.validate([], %{details: true}) == :ok
+ end
+
+ test "validate: treats empty positional arguments and --offline as a success" do
+ assert @command.validate([], %{offline: true}) == :ok
+ end
+
+ test "validate: treats empty positional arguments, --details and --offline as a success" do
+ assert @command.validate([], %{details: true, offline: true}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, details: false})))
+ end
+
+ test "run: returns Erlang/OTP version on the target node", context do
+ res = @command.run([], context[:opts])
+ assert is_bitstring(res)
+ end
+
+ test "run with --details: returns Erlang/OTP version on the target node", context do
+ res = @command.run([], Map.merge(%{details: true}, context[:opts]))
+ assert is_bitstring(res)
+ end
+
+ test "run: when --offline is used, returns local Erlang/OTP version", context do
+ res = @command.run([], Map.merge(context[:opts], %{offline: true}))
+ assert is_bitstring(res)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs
new file mode 100644
index 0000000000..fc7c2595a9
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/is_booting_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule IsBootingCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.IsBootingCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when the RabbitMQ app is fully booted and running, returns false", context do
+ await_rabbitmq_startup()
+
+ refute @command.run([], context[:opts])
+ end
+
+ test "run: when the RabbitMQ app is stopped, returns false", context do
+ stop_rabbitmq_app()
+
+ refute is_rabbitmq_app_running()
+ refute @command.run([], context[:opts])
+
+ start_rabbitmq_app()
+ end
+
+ test "output: when the result is true, returns successfully", context do
+ assert match?({:ok, _}, @command.output(true, context[:opts]))
+ end
+
+ # this is an info command and not a check one
+ test "output: when the result is false, returns successfully", context do
+ assert match?({:ok, _}, @command.output(false, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs
new file mode 100644
index 0000000000..120af9d7d7
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/is_running_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule IsRunningCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.IsRunningCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: when the RabbitMQ app is booted and started, returns true", context do
+ await_rabbitmq_startup()
+
+ assert @command.run([], context[:opts])
+ end
+
+ test "run: when the RabbitMQ app is stopped, returns false", context do
+ stop_rabbitmq_app()
+
+ refute is_rabbitmq_app_running()
+ refute @command.run([], context[:opts])
+
+ start_rabbitmq_app()
+ end
+
+ test "output: when the result is true, returns successfully", context do
+ assert match?({:ok, _}, @command.output(true, context[:opts]))
+ end
+
+ # this is an info command and not a check one
+ test "output: when the result is false, returns successfully", context do
+ assert match?({:ok, _}, @command.output(false, context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs
new file mode 100644
index 0000000000..ccaac33d9b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/list_network_interfaces_command_test.exs
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListNetworkInterfacesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ListNetworkInterfacesCommand
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout]}}
+ end
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 15000
+ test "run: returns a map of interfaces", context do
+ assert match?(%{}, @command.run([], context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs
new file mode 100644
index 0000000000..c6ac28a340
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/list_node_auth_attempt_stats_command_test.exs
@@ -0,0 +1,39 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListNodeAuthAttemptStatsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ListNodeAuthAttemptStatsCommand
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{node: get_rabbit_hostname(), timeout: context[:test_timeout], by_source: false}}
+ end
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 15000
+ test "run: returns auth attempt stats", context do
+ assert is_list(@command.run([], context[:opts]))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs
new file mode 100644
index 0000000000..fc20cae7fc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/listeners_command_test.exs
@@ -0,0 +1,78 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListenersCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+ import RabbitMQ.CLI.Core.Listeners, only: [listener_maps: 1]
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ListenersCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: returns a list of node-local listeners", context do
+ xs = @command.run([], context[:opts]) |> listener_maps
+
+ assert length(xs) >= 3
+ for p <- [5672, 61613, 25672] do
+ assert Enum.any?(xs, fn %{port: port} -> port == p end)
+ end
+ end
+
+ test "output: returns a formatted list of node-local listeners", context do
+ raw = @command.run([], context[:opts])
+ {:ok, msg} = @command.output(raw, context[:opts])
+
+ for p <- [5672, 61613, 25672] do
+ assert msg =~ ~r/#{p}/
+ end
+ end
+
+ test "output: when formatter is JSON, returns an array of listener maps", context do
+ raw = @command.run([], context[:opts])
+ {:ok, doc} = @command.output(raw, Map.merge(%{formatter: "json"}, context[:opts]))
+ xs = doc["listeners"]
+
+ assert length(xs) >= 3
+ for p <- [5672, 61613, 25672] do
+ assert Enum.any?(xs, fn %{port: port} -> port == p end)
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs
new file mode 100644
index 0000000000..64a85fc519
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/log_location_command_test.exs
@@ -0,0 +1,98 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule LogLocationCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.LogLocationCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ all: false
+ }}
+ end
+
+ test "merge_defaults: all is false" do
+ assert @command.merge_defaults([], %{}) == {[], %{all: :false}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{all: :false}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: prints default log location", context do
+ # Let Lager's log message rate lapse or else some messages
+ # we assert on might be dropped. MK.
+ Process.sleep(1000)
+ {:ok, logfile} = @command.run([], context[:opts])
+ log_message = "file location"
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, [log_message])
+ wait_for_log_message(log_message, logfile)
+ {:ok, log_file_data} = File.read(logfile)
+ assert String.match?(log_file_data, Regex.compile!(log_message))
+ end
+
+ test "run: shows all log locations", context do
+ # Let Lager's log message rate lapse or else some messages
+ # we assert on might be dropped. MK.
+ Process.sleep(1000)
+ # This assumes default configuration
+ [logfile, upgrade_log_file] =
+ @command.run([], Map.merge(context[:opts], %{all: true}))
+
+ log_message = "checking the default log file when checking all"
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, [log_message])
+ wait_for_log_message(log_message, logfile)
+
+ log_message_upgrade = "checking the upgrade log file when checking all"
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_log, :log, [:upgrade, :error, log_message_upgrade, []])
+ wait_for_log_message(log_message_upgrade, upgrade_log_file)
+ end
+
+ test "run: fails if there is no log file configured", context do
+ {:ok, upgrade_file} = :rpc.call(get_rabbit_hostname(), :application, :get_env, [:rabbit, :lager_upgrade_file])
+ {:ok, default_file} = :rpc.call(get_rabbit_hostname(), :application, :get_env, [:rabbit, :lager_default_file])
+ on_exit([], fn ->
+ :rpc.call(get_rabbit_hostname(), :application, :set_env, [:rabbit, :lager_upgrade_file, upgrade_file])
+ :rpc.call(get_rabbit_hostname(), :application, :set_env, [:rabbit, :lager_default_file, default_file])
+ :rpc.call(get_rabbit_hostname(), :rabbit_lager, :configure_lager, [])
+ start_rabbitmq_app()
+ end)
+ stop_rabbitmq_app()
+ :rpc.call(get_rabbit_hostname(), :application, :unset_env, [:rabbit, :lager_upgrade_file])
+ :rpc.call(get_rabbit_hostname(), :application, :unset_env, [:rabbit, :lager_default_file])
+ :rpc.call(get_rabbit_hostname(), :application, :unset_env, [:rabbit, :log])
+ :rpc.call(get_rabbit_hostname(), :rabbit_lager, :configure_lager, [])
+ {:error, "No log files configured on the node"} = @command.run([], context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs
new file mode 100644
index 0000000000..fb19821d55
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/log_tail_command_test.exs
@@ -0,0 +1,115 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule LogTailCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.LogTailCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ number: 50
+ }}
+ end
+
+ test "merge_defaults: number is 50" do
+ assert @command.merge_defaults([], %{}) == {[], %{number: 50}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: shows last 50 lines from the log by default", context do
+ # Let Lager's log message rate lapse or else some messages
+ # we assert on might be dropped. MK.
+ Process.sleep(1000)
+ clear_log_files()
+ log_messages =
+ Enum.map(:lists.seq(1, 50),
+ fn(n) ->
+ message = "Getting log tail #{n}"
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, [message])
+ message
+ end)
+ wait_for_log_message("Getting log tail 50")
+ lines = @command.run([], context[:opts])
+ assert Enum.count(lines) == 50
+
+ Enum.map(Enum.zip(log_messages, lines),
+ fn({message, line}) ->
+ assert String.match?(line, Regex.compile!(message))
+ end)
+ end
+
+ test "run: returns N lines", context do
+ # Let Lager's log message rate lapse or else some messages
+ # we assert on might be dropped. MK.
+ Process.sleep(1000)
+
+ ## Log a bunch of lines
+ Enum.map(:lists.seq(1, 50),
+ fn(n) ->
+ message = "More lines #{n}"
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, [message])
+ message
+ end)
+ wait_for_log_message("More lines 50")
+ assert Enum.count(@command.run([], Map.merge(context[:opts], %{number: 20}))) == 20
+ assert Enum.count(@command.run([], Map.merge(context[:opts], %{number: 30}))) == 30
+ assert Enum.count(@command.run([], Map.merge(context[:opts], %{number: 40}))) == 40
+ end
+
+ test "run: may return less than N lines if N is high", context do
+ # Let Lager's log message rate lapse or else some messages
+ # we assert on might be dropped. MK.
+ Process.sleep(1000)
+ clear_log_files()
+ ## Log a bunch of lines
+ Enum.map(:lists.seq(1, 100),
+ fn(n) ->
+ message = "More lines #{n}"
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, [message])
+ message
+ end)
+ wait_for_log_message("More lines 50")
+ assert Enum.count(@command.run([], Map.merge(context[:opts], %{number: 50}))) == 50
+ assert Enum.count(@command.run([], Map.merge(context[:opts], %{number: 200}))) < 200
+ end
+
+ def clear_log_files() do
+ [_|_] = logs = :rpc.call(get_rabbit_hostname(), :rabbit_lager, :log_locations, [])
+ Enum.map(logs, fn(log) ->
+ File.write(log, "")
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs
new file mode 100644
index 0000000000..4ad2785604
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/log_tail_stream_command_test.exs
@@ -0,0 +1,107 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule LogTailStreamCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.LogTailStreamCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ duration: :infinity
+ }}
+ end
+
+ test "merge_defaults: duration defaults to infinity" do
+ assert @command.merge_defaults([], %{}) == {[], %{duration: :infinity}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: streams messages for N seconds", context do
+ ensure_log_file()
+ time_before = System.system_time(:second)
+
+ stream = @command.run([], Map.merge(context[:opts], %{duration: 15}))
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, ["Message"])
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, ["Message1"])
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, ["Message2"])
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, ["Message3"])
+
+ # This may take a long time and fail with an ExUnit timeout
+ data = Enum.join(stream)
+
+ time_after = System.system_time(:second)
+
+ assert String.match?(data, ~r/Message/)
+ assert String.match?(data, ~r/Message1/)
+ assert String.match?(data, ~r/Message2/)
+ assert String.match?(data, ~r/Message3/)
+
+ time_spent = time_after - time_before
+ assert time_spent > 15
+ # This my take longer then duration but not too long
+ assert time_spent < 45
+ end
+
+ test "run: may return an error if there is no log", context do
+ delete_log_files()
+ {:error, :enoent} = @command.run([], Map.merge(context[:opts], %{duration: 5}))
+ end
+
+ def ensure_log_file() do
+ [log|_] = :rpc.call(get_rabbit_hostname(), :rabbit_lager, :log_locations, [])
+ ensure_file(log, 100)
+ end
+
+ def ensure_file(log, 0) do
+ flunk("timed out trying to ensure the log file #{log}")
+ end
+ def ensure_file(log, attempts) do
+ case File.exists?(log) do
+ true -> :ok
+ false ->
+ :rpc.call(get_rabbit_hostname(), :rabbit_log, :error, ["Ping"])
+ :timer.sleep(100)
+ ensure_file(log, attempts - 1)
+ end
+ end
+
+ def delete_log_files() do
+ [_|_] = logs = :rpc.call(get_rabbit_hostname(), :rabbit_lager, :log_locations, [])
+ Enum.map(logs, fn(log) ->
+ File.rm(log)
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs
new file mode 100644
index 0000000000..3b70966d1c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/maybe_stuck_command_test.exs
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule MaybeStuckCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.MaybeStuckCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 15000
+ }}
+ end
+
+ test "merge_defaults: returns inputs" do
+ assert @command.merge_defaults([], %{timeout: 30}) == {[], %{timeout: 30}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout throws a badrpc", context do
+ assert @command.run([], context[:opts]) == {:badrpc, :timeout}
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs
new file mode 100644
index 0000000000..8f7ffb14dc
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/memory_breakdown_command_test.exs
@@ -0,0 +1,72 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule MemoryBreakdownCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.MemoryBreakdownCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: 5000,
+ unit: "gb"
+ }}
+ end
+
+ test "validate: specifying a positional argument fails validation", context do
+ assert @command.validate(["abc"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+
+ assert @command.validate(["abc", "def"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: specifying no positional arguments and no options succeeds", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: specifying gigabytes as a --unit succeeds", context do
+ assert @command.validate([], Map.merge(context[:opts], %{unit: "gb"})) == :ok
+ end
+
+ test "validate: specifying bytes as a --unit succeeds", context do
+ assert @command.validate([], Map.merge(context[:opts], %{unit: "bytes"})) == :ok
+ end
+
+ test "validate: specifying megabytes as a --unit succeeds", context do
+ assert @command.validate([], Map.merge(context[:opts], %{unit: "mb"})) == :ok
+ end
+
+ test "validate: specifying glip-glops as a --unit fails validation", context do
+ assert @command.validate([], Map.merge(context[:opts], %{unit: "glip-glops"})) ==
+ {:validation_failure, "unit 'glip-glops' is not supported. Please use one of: bytes, mb, gb"}
+ end
+
+ test "run: request to a non-existent RabbitMQ node returns a nodedown" do
+ opts = %{node: :jake@thedog, timeout: 200, unit: "gb"}
+ assert match?({:badrpc, _}, @command.run([], opts))
+ end
+
+ test "banner", context do
+ s = @command.banner([], context[:opts])
+
+ assert s =~ ~r/Reporting memory breakdown on node/
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs
new file mode 100644
index 0000000000..8ff97abb0b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/observer_command_test.exs
@@ -0,0 +1,44 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ObserverCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ObserverCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ interval: 5,
+ timeout: context[:test_timeout] || 15000
+ }}
+ end
+
+ test "merge_defaults: injects a default interval of 5s" do
+ assert @command.merge_defaults([], %{}) == {[], %{interval: 5}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, interval: 5}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs
new file mode 100644
index 0000000000..254b41c9f2
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/os_env_command_test.exs
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule OsEnvCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.OsEnvCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ all: false
+ }}
+ end
+
+ test "merge_defaults: merges no defaults" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: returns defined RabbitMQ-specific environment variables", context do
+ vars = @command.run([], context[:opts])
+
+ # Only variables that are used by RABBITMQ are returned.
+ # They can be prefixed with RABBITMQ_ or not, rabbit_env tries both
+ # when filtering env variables down.
+ assert Enum.any?(vars, fn({k, _v}) ->
+ String.starts_with?(k, "RABBITMQ_") or String.starts_with?(k, "rabbitmq_")
+ end)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs
new file mode 100644
index 0000000000..2019154f0c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/resolve_hostname_command_test.exs
@@ -0,0 +1,85 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ResolveHostnameCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ResolveHostnameCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ address_family: "ipv4",
+ offline: false
+ }}
+ end
+
+ test "merge_defaults: defaults to IPv4 address family" do
+ assert @command.merge_defaults([], %{}) == {[], %{address_family: "IPv4", offline: false}}
+ end
+
+ test "validate: a single positional argument passes validation" do
+ assert @command.validate(["rabbitmq.com"], %{}) == :ok
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["elixir-lang.org", "extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: address family other than IPv4 or IPv6 fails validation" do
+ assert match?({:validation_failure, {:bad_argument, _}},
+ @command.validate(["elixir-lang.org"], %{address_family: "ipv5"}))
+
+ assert match?({:validation_failure, {:bad_argument, _}},
+ @command.validate(["elixir-lang.org"], %{address_family: "IPv5"}))
+ end
+
+ test "validate: IPv4 for address family passes validation" do
+ assert @command.validate(["elixir-lang.org"], %{address_family: "ipv4"}) == :ok
+ assert @command.validate(["elixir-lang.org"], %{address_family: "IPv4"}) == :ok
+ end
+
+ test "validate: IPv6 for address family passes validation" do
+ assert @command.validate(["elixir-lang.org"], %{address_family: "ipv6"}) == :ok
+ assert @command.validate(["elixir-lang.org"], %{address_family: "IPv6"}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})
+ assert match?({:badrpc, _}, @command.run(["elixir-lang.org"], opts))
+ end
+
+ test "run: returns a resolution result", context do
+ case @command.run(["github.com"], context[:opts]) do
+ {:ok, _hostent} -> :ok
+ {:error, :nxdomain} -> :ok
+ other -> flunk("hostname resolution returned #{other}")
+ end
+ end
+
+ test "run with --offline: returns a resolution result", context do
+ case @command.run(["github.com"], Map.merge(context[:opts], %{offline: true})) do
+ {:ok, _hostent} -> :ok
+ {:error, :nxdomain} -> :ok
+ other -> flunk("hostname resolution returned #{other}")
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs
new file mode 100644
index 0000000000..001371ed37
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/resolver_info_command_test.exs
@@ -0,0 +1,65 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ResolverInfoCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ResolverInfoCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ start_rabbitmq_app()
+
+ ExUnit.configure([max_cases: 1])
+
+ on_exit([], fn ->
+ start_rabbitmq_app()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ offline: false
+ }}
+ end
+
+ test "merge_defaults: defaults to offline mode" do
+ assert @command.merge_defaults([], %{}) == {[], %{offline: false}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog, timeout: 100})))
+ end
+
+ test "run: returns host resolver (inetrc) information", context do
+ result = @command.run([], context[:opts])
+
+ assert result["lookup"] != nil
+ assert result["hosts_file"] != nil
+ end
+
+ test "run: returns host resolver (inetrc) information with --offline", context do
+ result = @command.run([], Map.merge(context[:opts], %{offline: true}))
+
+ assert result["lookup"] != nil
+ assert result["hosts_file"] != nil
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs
new file mode 100644
index 0000000000..34ab7b9c63
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/runtime_thread_stats_command_test.exs
@@ -0,0 +1,50 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule RuntimeThreadStatsCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.RuntimeThreadStatsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 10000,
+ sample_interval: 1
+ }}
+ end
+
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 2000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ @tag test_timeout: 6000
+ test "run: returns msacc-formatted output", context do
+ res = @command.run([], context[:opts])
+ # the output is long and its values are environment-specific,
+ # so we simply assert that it is non-empty. MK.
+ assert length(res) > 0
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/schema_info_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/schema_info_command_test.exs
new file mode 100644
index 0000000000..369592522a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/schema_info_command_test.exs
@@ -0,0 +1,69 @@
+defmodule SchemaInfoCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.SchemaInfoCommand
+ @default_timeout :infinity
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || @default_timeout
+ }
+ }
+ end
+
+ test "merge_defaults: adds all keys if none specificed", context do
+ default_keys = ~w(name cookie active_replicas user_properties)
+
+ {keys, _} = @command.merge_defaults([], context[:opts])
+ assert default_keys == keys
+ end
+
+ test "merge_defaults: includes table headers by default", _context do
+ {_, opts} = @command.merge_defaults([], %{})
+ assert opts[:table_headers]
+ end
+
+ test "validate: returns bad_info_key on a single bad arg", context do
+ assert @command.validate(["quack"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ end
+
+ test "validate: returns multiple bad args return a list of bad info key values", context do
+ assert @command.validate(["quack", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink, :quack]}}
+ end
+
+ test "validate: return bad_info_key on mix of good and bad args", context do
+ assert @command.validate(["quack", "cookie"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:quack]}}
+ assert @command.validate(["access_mode", "oink"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ assert @command.validate(["access_mode", "oink", "name"], context[:opts]) ==
+ {:validation_failure, {:bad_info_key, [:oink]}}
+ end
+
+ @tag test_timeout: 0
+ test "run: timeout causes command to return badrpc", context do
+ assert run_command_to_list(@command, [["source_name"], context[:opts]]) ==
+ {:badrpc, :timeout}
+ end
+
+ test "run: can filter info keys", context do
+ wanted_keys = ~w(name access_mode)
+ assert match?([[name: _, access_mode: _] | _], run_command_to_list(@command, [wanted_keys, context[:opts]]))
+ end
+
+ test "banner" do
+ assert String.starts_with?(@command.banner([], %{node: "node@node"}), "Asking node")
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs
new file mode 100644
index 0000000000..72c32e32f1
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/server_version_command_test.exs
@@ -0,0 +1,48 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule ServerVersionCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.ServerVersionCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run: returns RabbitMQ version on the target node", context do
+ res = @command.run([], context[:opts])
+ assert is_bitstring(res)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs b/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs
new file mode 100644
index 0000000000..0e38a0461e
--- /dev/null
+++ b/deps/rabbitmq_cli/test/diagnostics/tls_versions_command_test.exs
@@ -0,0 +1,60 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule TlsVersionsCommandTest do
+ use ExUnit.Case
+ import TestHelper
+
+ @command RabbitMQ.CLI.Diagnostics.Commands.TlsVersionsCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "merge_defaults: is a no-op" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: treats positional arguments as a failure" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats empty positional arguments and default switches as a success" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], %{node: :jake@thedog})))
+ end
+
+ test "run when formatter is set to JSON: return a document with a list of supported TLS versions", context do
+ m = @command.run([], Map.merge(context[:opts], %{formatter: "json"})) |> Map.new
+ xs = Map.get(m, :available)
+
+ # assert that we have a list and tlsv1.2 is included
+ assert length(xs) > 0
+ assert Enum.member?(xs, :"tlsv1.2")
+ end
+
+ test "run and output: return a list of supported TLS versions", context do
+ m = @command.run([], context[:opts])
+ {:ok, res} = @command.output(m, context[:opts])
+
+ # assert that we have a list and tlsv1.2 is included
+ assert length(res) > 0
+ assert Enum.member?(res, :"tlsv1.2")
+ end
+end
diff --git a/deps/rabbitmq_cli/test/fixtures/files/definitions.json b/deps/rabbitmq_cli/test/fixtures/files/definitions.json
new file mode 100644
index 0000000000..1391870028
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/definitions.json
@@ -0,0 +1,40 @@
+{
+ "rabbit_version": "3.7.21",
+ "vhosts": [
+ {
+ "name": "\/"
+ }
+ ],
+ "queues": [
+
+ ],
+ "exchanges": [
+ {
+ "name": "project.topic.default",
+ "vhost": "\/",
+ "type": "topic",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {
+
+ }
+ }
+ ],
+ "bindings": [
+
+ ],
+
+ "parameters": [
+ {
+ "component": "federation-upstream",
+ "name": "up-1",
+ "value": {
+ "ack-mode": "on-confirm",
+ "trust-user-id": false,
+ "uri": "amqp://127.0.0.1:5672"
+ },
+ "vhost": "/"
+ }
+ ]
+}
diff --git a/deps/rabbitmq_cli/test/fixtures/files/empty_pidfile.pid b/deps/rabbitmq_cli/test/fixtures/files/empty_pidfile.pid
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/empty_pidfile.pid
diff --git a/deps/rabbitmq_cli/test/fixtures/files/invalid_erl_expression.escript b/deps/rabbitmq_cli/test/fixtures/files/invalid_erl_expression.escript
new file mode 100644
index 0000000000..6802266390
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/invalid_erl_expression.escript
@@ -0,0 +1 @@
+1 + . $$$ ///\\\
diff --git a/deps/rabbitmq_cli/test/fixtures/files/invalid_pidfile.pid b/deps/rabbitmq_cli/test/fixtures/files/invalid_pidfile.pid
new file mode 100644
index 0000000000..a4fa4cf4e4
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/invalid_pidfile.pid
@@ -0,0 +1 @@
+invalid///&
diff --git a/deps/rabbitmq_cli/test/fixtures/files/loaded_applications.escript b/deps/rabbitmq_cli/test/fixtures/files/loaded_applications.escript
new file mode 100644
index 0000000000..6db0ab52d3
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/loaded_applications.escript
@@ -0,0 +1 @@
+application:loaded_applications().
diff --git a/deps/rabbitmq_cli/test/fixtures/files/valid_erl_expression.escript b/deps/rabbitmq_cli/test/fixtures/files/valid_erl_expression.escript
new file mode 100644
index 0000000000..fb18c2507a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/valid_erl_expression.escript
@@ -0,0 +1 @@
+1 + 1.
diff --git a/deps/rabbitmq_cli/test/fixtures/files/valid_pidfile.pid b/deps/rabbitmq_cli/test/fixtures/files/valid_pidfile.pid
new file mode 100644
index 0000000000..8b64142ea1
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/valid_pidfile.pid
@@ -0,0 +1 @@
+13566
diff --git a/deps/rabbitmq_cli/test/fixtures/files/valid_pidfile_with_spaces.pid b/deps/rabbitmq_cli/test/fixtures/files/valid_pidfile_with_spaces.pid
new file mode 100644
index 0000000000..83a97170f4
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/files/valid_pidfile_with_spaces.pid
@@ -0,0 +1 @@
+ 83777
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/.gitignore b/deps/rabbitmq_cli/test/fixtures/plugins/.gitignore
new file mode 100644
index 0000000000..8f884eb3ab
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/.gitignore
@@ -0,0 +1 @@
+!*.ez
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.1.0.ez
new file mode 100644
index 0000000000..8cbe118971
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.2.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.2.0.ez
new file mode 100644
index 0000000000..57e93ba7a0
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_01-0.2.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_02-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_02-0.1.0.ez
new file mode 100644
index 0000000000..c9cbef855c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-01/mock_rabbitmq_plugins_02-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-02/mock_rabbitmq_plugins_02-0.2.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-02/mock_rabbitmq_plugins_02-0.2.0.ez
new file mode 100644
index 0000000000..c68a17d33b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-02/mock_rabbitmq_plugins_02-0.2.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-03/mock_rabbitmq_plugins_03-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-03/mock_rabbitmq_plugins_03-0.1.0.ez
new file mode 100644
index 0000000000..448518b9d6
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-03/mock_rabbitmq_plugins_03-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-04/mock_rabbitmq_plugins_04.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-04/mock_rabbitmq_plugins_04.ez
new file mode 100644
index 0000000000..8d48fe534a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins-subdirectory-04/mock_rabbitmq_plugins_04.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_7-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_7-0.1.0.ez
new file mode 100644
index 0000000000..1596be2d90
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_7-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0.ez
new file mode 100644
index 0000000000..d5d32bd490
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app
new file mode 100644
index 0000000000..dae70550b6
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app
@@ -0,0 +1,10 @@
+{application, mock_rabbitmq_plugin_for_3_8, [
+ {description, "New project"},
+ {vsn, "0.1.0"},
+ {modules, ['mock_rabbitmq_plugins_01_app','mock_rabbitmq_plugins_01_sup']},
+ {registered, [mock_rabbitmq_plugins_01_sup]},
+ {applications, [kernel,stdlib,rabbit]},
+ {mod, {mock_rabbitmq_plugins_01_app, []}},
+ {env, []},
+ {broker_version_requirements, ["3.8.0", "3.9.0"]}
+]}.
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_app.beam b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_app.beam
new file mode 100644
index 0000000000..903e1c3f6e
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_app.beam
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_sup.beam b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_sup.beam
new file mode 100644
index 0000000000..7d6dd6820c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugins_01_sup.beam
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_01-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_01-0.1.0.ez
new file mode 100644
index 0000000000..6eacd2cd1e
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_01-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_02-0.1.0.ez b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_02-0.1.0.ez
new file mode 100644
index 0000000000..1c085e6104
--- /dev/null
+++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugins_02-0.1.0.ez
Binary files differ
diff --git a/deps/rabbitmq_cli/test/json_formatting.exs b/deps/rabbitmq_cli/test/json_formatting.exs
new file mode 100644
index 0000000000..c0e35e2ad3
--- /dev/null
+++ b/deps/rabbitmq_cli/test/json_formatting.exs
@@ -0,0 +1,59 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule JSONFormattingTest do
+ use ExUnit.Case, async: false
+ import ExUnit.CaptureIO
+ import RabbitMQ.CLI.Core.ExitCodes
+ import TestHelper
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ set_scope(:all)
+
+ :ok
+ end
+
+ test "JSON output of status" do
+ set_scope(:ctl)
+
+ node = to_string(get_rabbit_hostname())
+ command = ["status", "-n", node, "--formatter=json"]
+ output = capture_io(:stdio, fn ->
+ error_check(command, exit_ok())
+ end)
+ {:ok, doc} = JSON.decode(output)
+
+ assert Map.has_key?(doc, "memory")
+ assert Map.has_key?(doc, "file_descriptors")
+ assert Map.has_key?(doc, "listeners")
+ assert Map.has_key?(doc, "processes")
+ assert Map.has_key?(doc, "os")
+ assert Map.has_key?(doc, "pid")
+ assert Map.has_key?(doc, "rabbitmq_version")
+
+ assert doc["alarms"] == []
+ end
+
+ test "JSON output of cluster_status" do
+ set_scope(:ctl)
+
+ node = to_string(get_rabbit_hostname())
+ command = ["cluster_status", "-n", node, "--formatter=json"]
+ output = capture_io(:stdio, fn ->
+ error_check(command, exit_ok())
+ end)
+ {:ok, doc} = JSON.decode(output)
+
+ assert Enum.member?(doc["disk_nodes"], node)
+ assert Map.has_key?(doc["listeners"], node)
+ assert Map.has_key?(doc["versions"], node)
+ assert doc["alarms"] == []
+ assert doc["partitions"] == %{}
+ end
+end
diff --git a/deps/rabbitmq_cli/test/plugins/directories_command_test.exs b/deps/rabbitmq_cli/test/plugins/directories_command_test.exs
new file mode 100644
index 0000000000..cae418717a
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/directories_command_test.exs
@@ -0,0 +1,103 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule DirectoriesCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Plugins.Commands.DirectoriesCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ {:ok, plugins_expand_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_expand_dir])
+
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+
+ {:ok, opts: %{
+ plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ plugins_expand_dir: plugins_expand_dir,
+ rabbitmq_home: rabbitmq_home,
+ }}
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: Map.merge(context[:opts], %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ })
+ }
+ end
+
+ test "validate: providing no arguments passes validation", context do
+ assert @command.validate([], context[:opts]) == :ok
+ end
+
+ test "validate: providing --online passes validation", context do
+ assert @command.validate([], Map.merge(%{online: true}, context[:opts])) == :ok
+ end
+
+ test "validate: providing --offline passes validation", context do
+ assert @command.validate([], Map.merge(%{offline: true}, context[:opts])) == :ok
+ end
+
+ test "validate: providing any arguments fails validation", context do
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: setting both --online and --offline to false fails validation", context do
+ assert @command.validate([], Map.merge(context[:opts], %{online: false, offline: false})) ==
+ {:validation_failure, {:bad_argument, "Cannot set online and offline to false"}}
+ end
+
+ test "validate: setting both --online and --offline to true fails validation", context do
+ assert @command.validate([], Map.merge(context[:opts], %{online: true, offline: true})) ==
+ {:validation_failure, {:bad_argument, "Cannot set both online and offline"}}
+ end
+
+ test "validate_execution_environment: when --offline is used, specifying a non-existent enabled_plugins_file passes validation", context do
+ opts = context[:opts] |> Map.merge(%{offline: true, enabled_plugins_file: "none"})
+ assert @command.validate_execution_environment([], opts) == :ok
+ end
+
+ test "validate_execution_environment: when --offline is used, specifying a non-existent plugins_dir fails validation", context do
+ opts = context[:opts] |> Map.merge(%{offline: true, plugins_dir: "none"})
+ assert @command.validate_execution_environment([], opts) == {:validation_failure, :plugins_dir_does_not_exist}
+ end
+
+ test "validate_execution_environment: when --online is used, specifying a non-existent enabled_plugins_file passes validation", context do
+ opts = context[:opts] |> Map.merge(%{online: true, enabled_plugins_file: "none"})
+ assert @command.validate_execution_environment([], opts) == :ok
+ end
+
+ test "validate_execution_environment: when --online is used, specifying a non-existent plugins_dir passes validation", context do
+ opts = context[:opts] |> Map.merge(%{online: true, plugins_dir: "none"})
+ assert @command.validate_execution_environment([], opts) == :ok
+ end
+
+
+ test "run: when --online is used, lists plugin directories", context do
+ opts = Map.merge(context[:opts], %{online: true})
+ dirs = %{plugins_dir: to_string(Map.get(opts, :plugins_dir)),
+ plugins_expand_dir: to_string(Map.get(opts, :plugins_expand_dir)),
+ enabled_plugins_file: to_string(Map.get(opts, :plugins_file))}
+
+ assert @command.run([], opts) == {:ok, dirs}
+ end
+end
diff --git a/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs
new file mode 100644
index 0000000000..dc5d92e090
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/disable_plugins_command_test.exs
@@ -0,0 +1,187 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule DisablePluginsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ alias RabbitMQ.CLI.Core.ExitCodes
+
+ @command RabbitMQ.CLI.Plugins.Commands.DisableCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+
+ IO.puts("plugins disable tests default env: enabled plugins = #{plugins_file}, plugins dir = #{plugins_dir}, RabbitMQ home directory = #{rabbitmq_home}")
+
+ {:ok, [enabled_plugins]} = :file.consult(plugins_file)
+ IO.puts("plugins disable tests will assume tnat #{Enum.join(enabled_plugins, ",")} is the list of enabled plugins to revert to")
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home,
+ online: false, offline: false,
+ all: false}
+
+ on_exit(fn ->
+ set_enabled_plugins(enabled_plugins, :online, get_rabbit_hostname(), opts)
+ end)
+
+ {:ok, opts: opts}
+ end
+
+ setup context do
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation],
+ :online,
+ get_rabbit_hostname(),
+ context[:opts])
+
+
+ {
+ :ok,
+ opts: Map.merge(context[:opts], %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ })
+ }
+ end
+
+ test "validate: specifying both --online and --offline is reported as invalid", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["a"], Map.merge(context[:opts], %{online: true, offline: true}))
+ )
+ end
+
+ test "validate: not specifying plugins to enable is reported as invalid", context do
+ assert match?(
+ {:validation_failure, :not_enough_args},
+ @command.validate([], Map.merge(context[:opts], %{online: true, offline: false}))
+ )
+ end
+
+ test "validate_execution_environment: specifying a non-existent enabled_plugins_file is fine", context do
+ assert @command.validate_execution_environment(["a"], Map.merge(context[:opts], %{enabled_plugins_file: "none"})) == :ok
+ end
+
+ test "validate_execution_environment: specifying a non-existent plugins_dir is reported as an error", context do
+ assert @command.validate_execution_environment(["a"], Map.merge(context[:opts], %{plugins_dir: "none"})) ==
+ {:validation_failure, :plugins_dir_does_not_exist}
+ end
+
+ test "node is inaccessible, writes out enabled plugins file and returns implicitly enabled plugin list", context do
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{node: :nonode}))
+ assert [[:rabbitmq_federation],
+ %{mode: :offline, disabled: [:rabbitmq_stomp], set: [:rabbitmq_federation]}] ==
+ Enum.to_list(test_stream)
+ assert {:ok, [[:rabbitmq_federation]]} == :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp] ==
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "in offline mode, writes out enabled plugins and reports implicitly enabled plugin list", context do
+ assert {:stream, test_stream} = @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[:rabbitmq_federation],
+ %{mode: :offline, disabled: [:rabbitmq_stomp], set: [:rabbitmq_federation]}] == Enum.to_list(test_stream)
+ assert {:ok, [[:rabbitmq_federation]]} == :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp] ==
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "in offline mode , removes implicitly enabled plugins when last explicitly enabled one is removed", context do
+ assert {:stream, test_stream0} =
+ @command.run(["rabbitmq_federation"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[:rabbitmq_stomp],
+ %{mode: :offline, disabled: [:rabbitmq_federation], set: [:rabbitmq_stomp]}] == Enum.to_list(test_stream0)
+ assert {:ok, [[:rabbitmq_stomp]]} == :file.consult(context[:opts][:enabled_plugins_file])
+
+ assert {:stream, test_stream1} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[],
+ %{mode: :offline, disabled: [:rabbitmq_stomp], set: []}] ==
+ Enum.to_list(test_stream1)
+ assert {:ok, [[]]} = :file.consult(context[:opts][:enabled_plugins_file])
+ end
+
+ test "updates plugin list and stops disabled plugins", context do
+ assert {:stream, test_stream0} =
+ @command.run(["rabbitmq_stomp"], context[:opts])
+ assert [[:rabbitmq_federation],
+ %{mode: :online,
+ started: [], stopped: [:rabbitmq_stomp],
+ disabled: [:rabbitmq_stomp],
+ set: [:rabbitmq_federation]}] ==
+ Enum.to_list(test_stream0)
+ assert {:ok, [[:rabbitmq_federation]]} == :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation] ==
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+
+ assert {:stream, test_stream1} =
+ @command.run(["rabbitmq_federation"], context[:opts])
+ assert [[],
+ %{mode: :online,
+ started: [], stopped: [:rabbitmq_federation],
+ disabled: [:rabbitmq_federation],
+ set: []}] ==
+ Enum.to_list(test_stream1)
+ assert {:ok, [[]]} == :file.consult(context[:opts][:enabled_plugins_file])
+ assert Enum.empty?(Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, [])))
+ end
+
+ test "can disable multiple plugins at once", context do
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp", "rabbitmq_federation"], context[:opts])
+ assert [[],
+ %{mode: :online,
+ started: [], stopped: [:rabbitmq_federation, :rabbitmq_stomp],
+ disabled: [:rabbitmq_federation, :rabbitmq_stomp],
+ set: []}] ==
+ Enum.to_list(test_stream)
+ assert {:ok, [[]]} == :file.consult(context[:opts][:enabled_plugins_file])
+ assert Enum.empty?(Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, [])))
+ end
+
+ test "disabling a dependency disables all plugins that depend on it", context do
+ assert {:stream, test_stream} = @command.run(["amqp_client"], context[:opts])
+ assert [[],
+ %{mode: :online,
+ started: [], stopped: [:rabbitmq_federation, :rabbitmq_stomp],
+ disabled: [:rabbitmq_federation, :rabbitmq_stomp],
+ set: []}] ==
+ Enum.to_list(test_stream)
+ assert {:ok, [[]]} == :file.consult(context[:opts][:enabled_plugins_file])
+ assert Enum.empty?(Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, [])))
+ end
+
+ test "formats enabled plugins mismatch errors", context do
+ err = {:enabled_plugins_mismatch, '/tmp/a/cli/path', '/tmp/a/server/path'}
+ assert {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at /tmp/a/cli/path: target node #{context[:opts][:node]} uses a different path (/tmp/a/server/path)"}
+ == @command.output({:error, err}, context[:opts])
+ end
+
+ test "formats enabled plugins write errors", context do
+ err1 = {:cannot_write_enabled_plugins_file, "/tmp/a/path", :eacces}
+ assert {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at /tmp/a/path: the file does not exist or permission was denied (EACCES)"} ==
+ @command.output({:error, err1}, context[:opts])
+
+ err2 = {:cannot_write_enabled_plugins_file, "/tmp/a/path", :enoent}
+ assert {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at /tmp/a/path: the file does not exist (ENOENT)"} ==
+ @command.output({:error, err2}, context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs
new file mode 100644
index 0000000000..09aaf38351
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/enable_plugins_command_test.exs
@@ -0,0 +1,243 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule EnablePluginsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ alias RabbitMQ.CLI.Core.ExitCodes
+
+ @command RabbitMQ.CLI.Plugins.Commands.EnableCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+
+ IO.puts("plugins enable tests default env: enabled plugins = #{plugins_file}, plugins dir = #{plugins_dir}, RabbitMQ home directory = #{rabbitmq_home}")
+
+ {:ok, [enabled_plugins]} = :file.consult(plugins_file)
+ IO.puts("plugins enable tests will assume tnat #{Enum.join(enabled_plugins, ",")} is the list of enabled plugins to revert to")
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home,
+ online: false, offline: false,
+ all: false}
+
+ on_exit(fn ->
+ set_enabled_plugins(enabled_plugins, :online, get_rabbit_hostname(), opts)
+ end)
+
+
+ {:ok, opts: opts}
+ end
+
+ setup context do
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation],
+ :online,
+ get_rabbit_hostname(),
+ context[:opts])
+
+ {
+ :ok,
+ opts: Map.merge(context[:opts], %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ })
+ }
+ end
+
+ test "validate: specifying both --online and --offline is reported as invalid", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["a"], Map.merge(context[:opts], %{online: true, offline: true}))
+ )
+ end
+
+ test "validate: not specifying any plugins to enable is reported as invalid", context do
+ assert match?(
+ {:validation_failure, :not_enough_args},
+ @command.validate([], Map.merge(context[:opts], %{online: true, offline: false}))
+ )
+ end
+
+ test "validate_execution_environment: specifying a non-existent enabled_plugins_file is fine", context do
+ assert @command.validate_execution_environment(["a"], Map.merge(context[:opts], %{enabled_plugins_file: "none"})) == :ok
+ end
+
+ test "validate_execution_environment: specifying a non-existent plugins_dir is reported as an error", context do
+ assert @command.validate_execution_environment(["a"], Map.merge(context[:opts], %{plugins_dir: "none"})) ==
+ {:validation_failure, :plugins_dir_does_not_exist}
+ end
+
+ test "if node is inaccessible, writes enabled plugins file and reports implicitly enabled plugin list", context do
+ # Clears enabled plugins file
+ set_enabled_plugins([], :offline, :nonode, context[:opts])
+
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{node: :nonode}))
+ assert [[:rabbitmq_stomp],
+ %{mode: :offline, enabled: [:rabbitmq_stomp], set: [:rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream)
+ check_plugins_enabled([:rabbitmq_stomp], context)
+ assert [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp] ==
+ currently_active_plugins(context)
+ end
+
+ test "in offline mode, writes enabled plugins and reports implicitly enabled plugin list", context do
+ # Clears enabled plugins file
+ set_enabled_plugins([], :offline, :nonode, context[:opts])
+
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[:rabbitmq_stomp],
+ %{mode: :offline, enabled: [:rabbitmq_stomp], set: [:rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream)
+ check_plugins_enabled([:rabbitmq_stomp], context)
+ assert_equal_sets(
+ [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp],
+ currently_active_plugins(context))
+ end
+
+ test "adds additional plugins to those already enabled", context do
+ # Clears enabled plugins file
+ set_enabled_plugins([], :offline, :nonode, context[:opts])
+
+ assert {:stream, test_stream0} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[:rabbitmq_stomp],
+ %{mode: :offline, enabled: [:rabbitmq_stomp], set: [:rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream0)
+ check_plugins_enabled([:rabbitmq_stomp], context)
+ assert {:stream, test_stream1} =
+ @command.run(["rabbitmq_federation"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[:rabbitmq_federation, :rabbitmq_stomp],
+ %{mode: :offline, enabled: [:rabbitmq_federation],
+ set: [:rabbitmq_federation, :rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream1)
+ check_plugins_enabled([:rabbitmq_stomp, :rabbitmq_federation], context)
+ end
+
+ test "updates plugin list and starts newly enabled plugins", context do
+ # Clears enabled plugins file and stop all plugins
+ set_enabled_plugins([], :online, context[:opts][:node], context[:opts])
+
+ assert {:stream, test_stream0} =
+ @command.run(["rabbitmq_stomp"], context[:opts])
+ assert [[:rabbitmq_stomp],
+ %{mode: :online,
+ started: [:rabbitmq_stomp], stopped: [],
+ enabled: [:rabbitmq_stomp],
+ set: [:rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream0)
+
+ check_plugins_enabled([:rabbitmq_stomp], context)
+ assert_equal_sets([:amqp_client, :rabbitmq_stomp], currently_active_plugins(context))
+
+ {:stream, test_stream1} =
+ @command.run(["rabbitmq_federation"], context[:opts])
+ assert [[:rabbitmq_federation, :rabbitmq_stomp],
+ %{mode: :online,
+ started: [:rabbitmq_federation], stopped: [],
+ enabled: [:rabbitmq_federation],
+ set: [:rabbitmq_federation, :rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream1)
+
+ check_plugins_enabled([:rabbitmq_stomp, :rabbitmq_federation], context)
+ assert_equal_sets([:amqp_client, :rabbitmq_federation, :rabbitmq_stomp], currently_active_plugins(context))
+ end
+
+ test "can enable multiple plugins at once", context do
+ # Clears plugins file and stop all plugins
+ set_enabled_plugins([], :online, context[:opts][:node], context[:opts])
+
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp", "rabbitmq_federation"], context[:opts])
+ assert [[:rabbitmq_federation, :rabbitmq_stomp],
+ %{mode: :online,
+ started: [:rabbitmq_federation, :rabbitmq_stomp], stopped: [],
+ enabled: [:rabbitmq_federation, :rabbitmq_stomp],
+ set: [:rabbitmq_federation, :rabbitmq_stomp]}] ==
+ Enum.to_list(test_stream)
+ check_plugins_enabled([:rabbitmq_stomp, :rabbitmq_federation], context)
+
+ assert_equal_sets([:amqp_client, :rabbitmq_federation, :rabbitmq_stomp], currently_active_plugins(context))
+ end
+
+ test "does not enable an already implicitly enabled plugin", context do
+ # Clears enabled plugins file and stop all plugins
+ set_enabled_plugins([:rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ assert {:stream, test_stream} =
+ @command.run(["amqp_client"], context[:opts])
+ assert [[:rabbitmq_federation],
+ %{mode: :online,
+ started: [], stopped: [],
+ enabled: [],
+ set: [:rabbitmq_federation]}] ==
+ Enum.to_list(test_stream)
+ check_plugins_enabled([:rabbitmq_federation], context)
+ assert [:amqp_client, :rabbitmq_federation] ==
+ currently_active_plugins(context)
+
+ end
+
+ test "run: does not enable plugins with unmet version requirements", context do
+ set_enabled_plugins([], :online, context[:opts][:node], context[:opts])
+
+ plugins_directory = fixture_plugins_path("plugins_with_version_requirements")
+ opts = get_opts_with_plugins_directories(context, [plugins_directory])
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+
+ {:stream, _} =
+ @command.run(["mock_rabbitmq_plugin_for_3_8"], opts)
+ check_plugins_enabled([:mock_rabbitmq_plugin_for_3_8], context)
+
+ # Not changed
+ {:error, _version_error} = @command.run(["mock_rabbitmq_plugin_for_3_7"], opts)
+ check_plugins_enabled([:mock_rabbitmq_plugin_for_3_8], context)
+
+ end
+
+ test "run: does not enable plugins with unmet version requirements even when enabling all plugins", context do
+ set_enabled_plugins([], :online, context[:opts][:node], context[:opts])
+
+ plugins_directory = fixture_plugins_path("plugins_with_version_requirements")
+ opts = get_opts_with_plugins_directories(context, [plugins_directory])
+ opts = Map.merge(opts, %{all: true})
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+
+ {:error, _version_error} = @command.run([], opts)
+
+ check_plugins_enabled([], context)
+ end
+
+ test "formats enabled plugins mismatch errors", context do
+ err = {:enabled_plugins_mismatch, '/tmp/a/cli/path', '/tmp/a/server/path'}
+ assert {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at /tmp/a/cli/path: target node #{context[:opts][:node]} uses a different path (/tmp/a/server/path)"}
+ == @command.output({:error, err}, context[:opts])
+ end
+
+ test "formats enabled plugins write errors", context do
+ err1 = {:cannot_write_enabled_plugins_file, "/tmp/a/path", :eacces}
+ assert {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at /tmp/a/path: the file does not exist or permission was denied (EACCES)"} ==
+ @command.output({:error, err1}, context[:opts])
+
+ err2 = {:cannot_write_enabled_plugins_file, "/tmp/a/path", :enoent}
+ assert {:error, ExitCodes.exit_dataerr(),
+ "Could not update enabled plugins file at /tmp/a/path: the file does not exist (ENOENT)"} ==
+ @command.output({:error, err2}, context[:opts])
+ end
+end
diff --git a/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs b/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs
new file mode 100644
index 0000000000..af2900228b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/is_enabled_command_test.exs
@@ -0,0 +1,103 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule PluginIsEnabledCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Plugins.Commands.IsEnabledCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+
+ {:ok, [enabled_plugins]} = :file.consult(plugins_file)
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home,
+ online: false, offline: false}
+
+ on_exit(fn ->
+ set_enabled_plugins(enabled_plugins, :online, get_rabbit_hostname(), opts)
+ end)
+
+
+ {:ok, opts: opts}
+ end
+
+ setup context do
+ {
+ :ok,
+ opts: Map.merge(context[:opts], %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ })
+ }
+ end
+
+
+
+ test "validate: specifying both --online and --offline is reported as invalid", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate(["rabbitmq_stomp"], Map.merge(context[:opts], %{online: true, offline: true}))
+ )
+ end
+
+ test "validate: not specifying any plugins to check is reported as invalid", context do
+ opts = context[:opts] |> Map.merge(%{online: true, offline: false})
+ assert match?({:validation_failure, :not_enough_args}, @command.validate([], opts))
+ end
+
+ test "validate_execution_environment: specifying a non-existent enabled_plugins_file is fine", context do
+ assert @command.validate_execution_environment(["rabbitmq_stomp"],
+ Map.merge(context[:opts], %{online: false,
+ offline: true,
+ enabled_plugins_file: "none"})) == :ok
+ end
+
+ test "validate_execution_environment: specifying a non-existent plugins_dir is reported as an error", context do
+ opts = context[:opts] |> Map.merge(%{online: false,
+ offline: true,
+ plugins_dir: "none"})
+
+ assert @command.validate_execution_environment(["rabbitmq_stomp"], opts) ==
+ {:validation_failure, :plugins_dir_does_not_exist}
+ end
+
+ test "run: when given a single enabled plugin, reports it as such", context do
+ opts = context[:opts] |> Map.merge(%{online: true, offline: false})
+ assert match?({:ok, _},
+ assert @command.run(["rabbitmq_stomp"], opts))
+ end
+
+ test "run: when given a list of actually enabled plugins, reports them as such", context do
+ opts = context[:opts] |> Map.merge(%{online: true, offline: false})
+ assert match?({:ok, _},
+ assert @command.run(["rabbitmq_stomp", "rabbitmq_federation"], opts))
+ end
+
+ test "run: when given a list of non-existent plugins, reports them as not enabled", context do
+ opts = context[:opts] |> Map.merge(%{online: true, offline: false})
+ assert match?({:error, _},
+ assert @command.run(["rabbitmq_xyz", "abc_xyz"], opts))
+ end
+
+ test "run: when given a list with one non-existent plugin, reports the group as not [all] enabled", context do
+ opts = context[:opts] |> Map.merge(%{online: true, offline: false})
+ assert match?({:error, _},
+ assert @command.run(["rabbitmq_stomp", "abc_xyz"], opts))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs
new file mode 100644
index 0000000000..33d9420435
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/list_plugins_command_test.exs
@@ -0,0 +1,235 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ListPluginsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Plugins.Commands.ListCommand
+
+ def reset_enabled_plugins_to_preconfigured_defaults(context) do
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation],
+ :online,
+ get_rabbit_hostname(), context[:opts])
+ end
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+ IO.puts("plugins list tests default env: enabled plugins = #{plugins_file}, plugins dir = #{plugins_dir}, RabbitMQ home directory = #{rabbitmq_home}")
+
+ {:ok, [enabled_plugins]} = :file.consult(plugins_file)
+ IO.puts("plugins list tests will assume tnat #{Enum.join(enabled_plugins, ",")} is the list of enabled plugins to revert to")
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home,
+ minimal: false, verbose: false,
+ enabled: false, implicitly_enabled: false}
+
+ on_exit(fn ->
+ set_enabled_plugins(enabled_plugins, :online, get_rabbit_hostname(), opts)
+ end)
+
+
+ {:ok, opts: opts}
+ end
+
+ setup context do
+ reset_enabled_plugins_to_preconfigured_defaults(context)
+
+ {
+ :ok,
+ opts: Map.merge(context[:opts], %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ })
+ }
+ end
+
+ test "validate: specifying both --minimal and --verbose is reported as invalid", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate([], Map.merge(context[:opts], %{minimal: true, verbose: true}))
+ )
+ end
+
+ test "validate: specifying multiple patterns is reported as an error", context do
+ assert @command.validate(["a", "b", "c"], context[:opts]) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate_execution_environment: specifying a non-existent enabled_plugins_file is fine", context do
+ assert @command.validate_execution_environment(["a"], Map.merge(context[:opts], %{enabled_plugins_file: "none"})) == :ok
+ end
+
+ test "validate_execution_environment: specifying non existent plugins_dir is reported as an error", context do
+ assert @command.validate_execution_environment(["a"], Map.merge(context[:opts], %{plugins_dir: "none"})) ==
+ {:validation_failure, :plugins_dir_does_not_exist}
+ end
+
+ test "will report list of plugins from file for stopped node", context do
+ node = context[:opts][:node]
+ :ok = :rabbit_misc.rpc_call(node, :application, :stop, [:rabbitmq_stomp])
+ on_exit(fn ->
+ :rabbit_misc.rpc_call(node, :application, :start, [:rabbitmq_stomp])
+ end)
+ assert %{status: :node_down,
+ plugins: [%{name: :rabbitmq_federation, enabled: :enabled, running: false},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: false}]} =
+ @command.run([".*"], Map.merge(context[:opts], %{node: :nonode}))
+ end
+
+ test "will report list of started plugins for started node", context do
+ node = context[:opts][:node]
+ :ok = :rabbit_misc.rpc_call(node, :application, :stop, [:rabbitmq_stomp])
+ on_exit(fn ->
+ :rabbit_misc.rpc_call(node, :application, :start, [:rabbitmq_stomp])
+ end)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation, enabled: :enabled, running: true},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: false}]} =
+ @command.run([".*"], context[:opts])
+ end
+
+ test "will report description and dependencies for verbose mode", context do
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation, enabled: :enabled, running: true, description: _, dependencies: [:amqp_client]},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: true, description: _, dependencies: [:amqp_client]}]} =
+ @command.run([".*"], Map.merge(context[:opts], %{verbose: true}))
+ end
+
+ test "will report plugin names in minimal mode", context do
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation}, %{name: :rabbitmq_stomp}]} =
+ @command.run([".*"], Map.merge(context[:opts], %{minimal: true}))
+ end
+
+ test "by default lists all plugins", context do
+ set_enabled_plugins([:rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ on_exit(fn ->
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ end)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation, enabled: :enabled, running: true},
+ %{name: :rabbitmq_stomp, enabled: :not_enabled, running: false}]} =
+ @command.run([".*"], context[:opts])
+ end
+
+ test "with enabled flag lists only explicitly enabled plugins", context do
+ set_enabled_plugins([:rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ on_exit(fn ->
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ end)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation, enabled: :enabled, running: true}]} =
+ @command.run([".*"], Map.merge(context[:opts], %{enabled: true}))
+ end
+
+ test "with implicitly_enabled flag lists explicitly and implicitly enabled plugins", context do
+ set_enabled_plugins([:rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ on_exit(fn ->
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ end)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation, enabled: :enabled, running: true}]} =
+ @command.run([".*"], Map.merge(context[:opts], %{implicitly_enabled: true}))
+ end
+
+ test "will filter plugins by name with pattern provided", context do
+ set_enabled_plugins([:rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ on_exit(fn ->
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation], :online, context[:opts][:node], context[:opts])
+ end)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation}]} =
+ @command.run(["fede"], Map.merge(context[:opts], %{minimal: true}))
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_stomp}]} =
+ @command.run(["stomp$"], Map.merge(context[:opts], %{minimal: true}))
+ end
+
+ test "validate: validation is OK when we use multiple plugins directories, one of them does not exist", context do
+ opts = get_opts_with_non_existing_plugins_directory(context)
+ assert @command.validate([], opts) == :ok
+ end
+
+ test "validate: validation is OK when we use multiple plugins directories, directories do exist", context do
+ opts = get_opts_with_existing_plugins_directory(context)
+ assert @command.validate([], opts) == :ok
+ end
+
+ test "should succeed when using multiple plugins directories, one of them does not exist", context do
+ opts = get_opts_with_non_existing_plugins_directory(context)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation}, %{name: :rabbitmq_stomp}]} =
+ @command.run([".*"], Map.merge(opts, %{minimal: true}))
+ end
+
+
+ test "should succeed when using multiple plugins directories, directories do exist and do contain plugins", context do
+ opts = get_opts_with_existing_plugins_directory(context)
+ assert %{status: :running,
+ plugins: [%{name: :rabbitmq_federation}, %{name: :rabbitmq_stomp}]} =
+ @command.run([".*"], Map.merge(opts, %{minimal: true}))
+ end
+
+ test "should list plugins when using multiple plugins directories", context do
+ plugins_directory = fixture_plugins_path("plugins-subdirectory-01")
+ opts = get_opts_with_plugins_directories(context, [plugins_directory])
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+ reset_enabled_plugins_to_preconfigured_defaults(context)
+ assert %{status: :running,
+ plugins: [%{name: :mock_rabbitmq_plugins_01}, %{name: :mock_rabbitmq_plugins_02},
+ %{name: :rabbitmq_federation}, %{name: :rabbitmq_stomp}]} =
+ @command.run([".*"], Map.merge(opts, %{minimal: true}))
+ end
+
+ test "will report list of plugins with latest version picked", context do
+ plugins_directory_01 = fixture_plugins_path("plugins-subdirectory-01")
+ plugins_directory_02 = fixture_plugins_path("plugins-subdirectory-02")
+ opts = get_opts_with_plugins_directories(context, [plugins_directory_01, plugins_directory_02])
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+ reset_enabled_plugins_to_preconfigured_defaults(context)
+ assert %{status: :running,
+ plugins: [%{name: :mock_rabbitmq_plugins_01, enabled: :not_enabled, running: false, version: '0.2.0'},
+ %{name: :mock_rabbitmq_plugins_02, enabled: :not_enabled, running: false, version: '0.2.0'},
+ %{name: :rabbitmq_federation, enabled: :enabled, running: true},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: true}]} =
+ @command.run([".*"], opts)
+ end
+
+ test "will report both running and pending upgrade versions", context do
+ plugins_directory_01 = fixture_plugins_path("plugins-subdirectory-01")
+ plugins_directory_02 = fixture_plugins_path("plugins-subdirectory-02")
+ opts = get_opts_with_plugins_directories(context, [plugins_directory_01])
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+ set_enabled_plugins([:mock_rabbitmq_plugins_02, :rabbitmq_federation, :rabbitmq_stomp],
+ :online, get_rabbit_hostname(), opts)
+ assert %{status: :running,
+ plugins: [%{name: :mock_rabbitmq_plugins_01, enabled: :not_enabled, running: false, version: '0.2.0'},
+ %{name: :mock_rabbitmq_plugins_02, enabled: :enabled, running: true, version: '0.1.0', running_version: '0.1.0'},
+ %{name: :rabbitmq_federation, enabled: :enabled, running: true},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: true}]} =
+ @command.run([".*"], opts)
+ opts = get_opts_with_plugins_directories(context, [plugins_directory_01, plugins_directory_02])
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+ assert %{status: :running,
+ plugins: [%{name: :mock_rabbitmq_plugins_01, enabled: :not_enabled, running: false, version: '0.2.0'},
+ %{name: :mock_rabbitmq_plugins_02, enabled: :enabled, running: true, version: '0.2.0', running_version: '0.1.0'},
+ %{name: :rabbitmq_federation, enabled: :enabled, running: true},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: true}]} =
+ @command.run([".*"], opts)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs b/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs
new file mode 100644
index 0000000000..9bf185d7e0
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/plugins_formatter_test.exs
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule PluginsFormatterTest do
+ use ExUnit.Case, async: false
+
+ @formatter RabbitMQ.CLI.Formatters.Plugins
+
+ test "format_output with --silent and --minimal" do
+ result = @formatter.format_output(
+ %{status: :running,
+ plugins: [%{name: :amqp_client, enabled: :implicit, running: true, version: '3.7.0', running_version: nil},
+ %{name: :mock_rabbitmq_plugins_01, enabled: :not_enabled, running: false, version: '0.2.0', running_version: nil},
+ %{name: :mock_rabbitmq_plugins_02, enabled: :enabled, running: true, version: '0.2.0', running_version: '0.1.0'},
+ %{name: :rabbitmq_federation, enabled: :enabled, running: true, version: '3.7.0', running_version: nil},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: true, version: '3.7.0', running_version: nil}],
+ format: :minimal}, %{node: "rabbit@localhost", silent: true})
+ assert result == ["amqp_client",
+ "mock_rabbitmq_plugins_01",
+ "mock_rabbitmq_plugins_02",
+ "rabbitmq_federation",
+ "rabbitmq_stomp"]
+ end
+
+ test "format_output pending upgrade version message" do
+ result = @formatter.format_output(
+ %{status: :running,
+ plugins: [%{name: :amqp_client, enabled: :implicit, running: true, version: '3.7.0', running_version: nil},
+ %{name: :mock_rabbitmq_plugins_01, enabled: :not_enabled, running: false, version: '0.2.0', running_version: nil},
+ %{name: :mock_rabbitmq_plugins_02, enabled: :enabled, running: true, version: '0.2.0', running_version: '0.1.0'},
+ %{name: :rabbitmq_federation, enabled: :enabled, running: true, version: '3.7.0', running_version: nil},
+ %{name: :rabbitmq_stomp, enabled: :enabled, running: true, version: '3.7.0', running_version: nil}],
+ format: :normal}, %{node: "rabbit@localhost"})
+ assert result == [" Configured: E = explicitly enabled; e = implicitly enabled",
+ " | Status: * = running on rabbit@localhost", " |/",
+ "[e*] amqp_client 3.7.0", "[ ] mock_rabbitmq_plugins_01 0.2.0",
+ "[E*] mock_rabbitmq_plugins_02 0.1.0 (pending upgrade to 0.2.0)",
+ "[E*] rabbitmq_federation 3.7.0", "[E*] rabbitmq_stomp 3.7.0"]
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs b/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs
new file mode 100644
index 0000000000..3ebc3dfc98
--- /dev/null
+++ b/deps/rabbitmq_cli/test/plugins/set_plugins_command_test.exs
@@ -0,0 +1,157 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule SetPluginsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Plugins.Commands.SetCommand
+
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+ node = get_rabbit_hostname()
+
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+
+ {:ok, [enabled_plugins]} = :file.consult(plugins_file)
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home,
+ online: false, offline: false}
+
+ on_exit(fn ->
+ set_enabled_plugins(enabled_plugins, :online, get_rabbit_hostname(),opts)
+ end)
+
+ {:ok, opts: opts}
+ end
+
+ setup context do
+
+ set_enabled_plugins([:rabbitmq_stomp, :rabbitmq_federation],
+ :online,
+ get_rabbit_hostname(),
+ context[:opts])
+
+ {
+ :ok,
+ opts: Map.merge(context[:opts], %{
+ node: get_rabbit_hostname(),
+ timeout: 1000
+ })
+ }
+ end
+
+ test "validate: specifying both --online and --offline is reported as invalid", context do
+ assert match?(
+ {:validation_failure, {:bad_argument, _}},
+ @command.validate([], Map.merge(context[:opts], %{online: true, offline: true}))
+ )
+ end
+
+ test "validate_execution_environment: specifying a non-existent enabled_plugins_file is fine", context do
+ assert @command.validate_execution_environment([], Map.merge(context[:opts], %{enabled_plugins_file: "none"})) ==
+ :ok
+ end
+
+ test "validate_execution_environment: specifying non existent plugins_dir is reported as an error", context do
+ assert @command.validate_execution_environment([], Map.merge(context[:opts], %{plugins_dir: "none"})) ==
+ {:validation_failure, :plugins_dir_does_not_exist}
+ end
+
+ test "will write enabled plugins file if node is inaccessible and report implicitly enabled list", context do
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{node: :nonode}))
+ assert [[:rabbitmq_stomp],
+ %{mode: :offline, set: [:rabbitmq_stomp]}] =
+ Enum.to_list(test_stream)
+ assert {:ok, [[:rabbitmq_stomp]]} = :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp] =
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "will write enabled plugins in offline mode and report implicitly enabled list", context do
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_stomp"], Map.merge(context[:opts], %{offline: true, online: false}))
+ assert [[:rabbitmq_stomp],
+ %{mode: :offline, set: [:rabbitmq_stomp]}] =
+ Enum.to_list(test_stream)
+ assert {:ok, [[:rabbitmq_stomp]]} = :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp] =
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "will update list of plugins and start/stop enabled/disabled plugins", context do
+ assert {:stream, test_stream0} = @command.run(["rabbitmq_stomp"], context[:opts])
+ assert [[:rabbitmq_stomp],
+ %{mode: :online,
+ started: [], stopped: [:rabbitmq_federation],
+ set: [:rabbitmq_stomp]}] =
+ Enum.to_list(test_stream0)
+ assert {:ok, [[:rabbitmq_stomp]]} = :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_stomp] =
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ assert {:stream, test_stream1} = @command.run(["rabbitmq_federation"], context[:opts])
+ assert [[:rabbitmq_federation],
+ %{mode: :online,
+ started: [:rabbitmq_federation], stopped: [:rabbitmq_stomp],
+ set: [:rabbitmq_federation]}] =
+ Enum.to_list(test_stream1)
+ assert {:ok, [[:rabbitmq_federation]]} = :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation] =
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "can disable all plugins", context do
+ assert {:stream, test_stream} = @command.run([], context[:opts])
+ assert [[],
+ %{mode: :online,
+ started: [], stopped: [:rabbitmq_federation, :rabbitmq_stomp],
+ set: []}] =
+ Enum.to_list(test_stream)
+ assert {:ok, [[]]} = :file.consult(context[:opts][:enabled_plugins_file])
+ assert [] = Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "can set multiple plugins", context do
+ set_enabled_plugins([], :online, get_rabbit_hostname(), context[:opts])
+ assert {:stream, test_stream} =
+ @command.run(["rabbitmq_federation", "rabbitmq_stomp"], context[:opts])
+ assert [[:rabbitmq_federation, :rabbitmq_stomp],
+ %{mode: :online,
+ started: [:rabbitmq_federation, :rabbitmq_stomp],
+ stopped: [],
+ set: [:rabbitmq_federation, :rabbitmq_stomp]}] =
+ Enum.to_list(test_stream)
+ assert {:ok, [[:rabbitmq_federation, :rabbitmq_stomp]]} =
+ :file.consult(context[:opts][:enabled_plugins_file])
+ assert [:amqp_client, :rabbitmq_federation, :rabbitmq_stomp] =
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ test "run: does not enable plugins with unmet version requirements", context do
+ set_enabled_plugins([], :online, context[:opts][:node], context[:opts])
+
+ plugins_directory = fixture_plugins_path("plugins_with_version_requirements")
+ opts = get_opts_with_plugins_directories(context, [plugins_directory])
+ switch_plugins_directories(context[:opts][:plugins_dir], opts[:plugins_dir])
+
+
+ {:stream, _} = @command.run(["mock_rabbitmq_plugin_for_3_8"], opts)
+ check_plugins_enabled([:mock_rabbitmq_plugin_for_3_8], context)
+
+ {:error, _version_error} = @command.run(["mock_rabbitmq_plugin_for_3_7"], opts)
+ check_plugins_enabled([:mock_rabbitmq_plugin_for_3_8], context)
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/add_member_command_test.exs b/deps/rabbitmq_cli/test/queues/add_member_command_test.exs
new file mode 100644
index 0000000000..71705ccb2c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/add_member_command_test.exs
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.AddMemberCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.AddMemberCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when three or more arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "rabbit@new-node", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "rabbit@new-node", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats two positional arguments and default switches as a success" do
+ assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a", "rabbit@new-node"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs b/deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs
new file mode 100644
index 0000000000..cbab5a6470
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/check_if_node_is_mirror_sync_critical_command_test.exs
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsMirrorSyncCriticalCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "validate: any positional arguments fail validation" do
+ assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "two"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "two", "three"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs b/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs
new file mode 100644
index 0000000000..3a1b8abf34
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/check_if_node_is_quorum_critical_command_test.exs
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsQuorumCriticalCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.CheckIfNodeIsQuorumCriticalCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ test "validate: any positional arguments fail validation" do
+ assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "two"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "two", "three"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run([], %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs b/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs
new file mode 100644
index 0000000000..6880de6399
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/delete_member_command_test.exs
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.DeleteMemberCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.DeleteMemberCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when three or more arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "rabbit@new-node", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "rabbit@new-node", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats two positional arguments and default switches as a success" do
+ assert @command.validate(["quorum-queue-a", "rabbit@new-node"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a", "rabbit@new-node"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/grow_command_test.exs b/deps/rabbitmq_cli/test/queues/grow_command_test.exs
new file mode 100644
index 0000000000..d0d459065b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/grow_command_test.exs
@@ -0,0 +1,67 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.GrowCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.GrowCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ vhost_pattern: ".*",
+ queue_pattern: ".*",
+ errors_only: false
+ }}
+ end
+
+ test "merge_defaults: defaults to reporting complete results" do
+ assert @command.merge_defaults([], %{}) ==
+ {[], %{vhost_pattern: ".*",
+ queue_pattern: ".*",
+ errors_only: false}}
+ end
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when a node and even are provided, returns a success" do
+ assert @command.validate(["quorum-queue-a", "even"], %{}) == :ok
+ end
+
+ test "validate: when a node and all are provided, returns a success" do
+ assert @command.validate(["quorum-queue-a", "all"], %{}) == :ok
+ end
+
+ test "validate: when a node and something else is provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "banana"], %{}) ==
+ {:validation_failure, "strategy 'banana' is not recognised."}
+ end
+
+ test "validate: when three arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "extra-arg", "another-extra-arg"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a", "all"],
+ Map.merge(context[:opts], %{node: :jake@thedog, timeout: 200})))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/peek_command_test.exs b/deps/rabbitmq_cli/test/queues/peek_command_test.exs
new file mode 100644
index 0000000000..567de4f4d2
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/peek_command_test.exs
@@ -0,0 +1,59 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.PeekCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.PeekCommand
+ @invalid_position {:validation_failure, "position value must be a positive integer"}
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: treats no arguments as a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: treats a single positional argument as a failure" do
+ assert @command.validate(["quorum-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when two or more arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "1"], %{}) == :ok
+ assert @command.validate(["quorum-queue-a", "extra-arg", "another-extra-arg"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: when position is a negative number, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "-1"], %{}) == @invalid_position
+ end
+
+ test "validate: when position is zero, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "0"], %{}) == @invalid_position
+ end
+
+ test "validate: when position cannot be parsed to an integer, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "third"], %{}) == @invalid_position
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a", "1"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs b/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs
new file mode 100644
index 0000000000..ec694db9ba
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/quorum_status_command_test.exs
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.QuorumStatusCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.QuorumStatusCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: treats no arguments as a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: accepts a single positional argument" do
+ assert @command.validate(["quorum-queue-a"], %{}) == :ok
+ end
+
+ test "validate: when two or more arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs b/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs
new file mode 100644
index 0000000000..bec7bab50d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/reclaim_quorum_memory_command_test.exs
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.ReclaimQuorumMemoryCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.ReclaimQuorumMemoryCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: treats no arguments as a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: accepts a single positional argument" do
+ assert @command.validate(["quorum-queue-a"], %{}) == :ok
+ end
+
+ test "validate: when two or more arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/queues/shrink_command_test.exs b/deps/rabbitmq_cli/test/queues/shrink_command_test.exs
new file mode 100644
index 0000000000..8441deaeb0
--- /dev/null
+++ b/deps/rabbitmq_cli/test/queues/shrink_command_test.exs
@@ -0,0 +1,55 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Queues.Commands.ShrinkCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Queues.Commands.ShrinkCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000,
+ errors_only: false
+ }}
+ end
+
+ test "merge_defaults: defaults to reporting complete results" do
+ assert @command.merge_defaults([], %{}) == {[], %{errors_only: false}}
+ end
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a success" do
+ assert @command.validate(["quorum-queue-a"], %{}) == :ok
+ end
+
+ test "validate: when three or more arguments are provided, returns a failure" do
+ assert @command.validate(["quorum-queue-a", "one-extra-arg"], %{}) ==
+ {:validation_failure, :too_many_args}
+ assert @command.validate(["quorum-queue-a", "extra-arg", "another-extra-arg"], %{}) ==
+ {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats one positional arguments and default switches as a success" do
+ assert @command.validate(["quorum-queue-a"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ assert match?({:badrpc, _}, @command.run(["quorum-queue-a"],
+ Map.merge(context[:opts], %{node: :jake@thedog, vhost: "/", timeout: 200})))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/rabbitmqctl_test.exs b/deps/rabbitmq_cli/test/rabbitmqctl_test.exs
new file mode 100644
index 0000000000..c6b085daad
--- /dev/null
+++ b/deps/rabbitmq_cli/test/rabbitmqctl_test.exs
@@ -0,0 +1,301 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule RabbitMQCtlTest do
+ use ExUnit.Case, async: false
+ import ExUnit.CaptureIO
+ import RabbitMQ.CLI.Core.ExitCodes
+ import TestHelper
+
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ set_scope(:all)
+
+ :ok
+ end
+
+ #
+ # --help and `help [command]`
+ #
+
+ test "--help option prints help for the command and exits with an OK" do
+ command = ["status", "--help"]
+ assert capture_io(fn ->
+ error_check(command, exit_ok())
+ end) =~ ~r/Usage/
+ end
+
+ test "bare --help prints general help and exits with an OK" do
+ command = ["--help"]
+ assert capture_io(fn ->
+ error_check(command, exit_ok())
+ end) =~ ~r/Usage/
+ end
+
+ test "help [command] prints help for the command and exits with an OK" do
+ command = ["help", "status"]
+ assert capture_io(fn ->
+ error_check(command, exit_ok())
+ end) =~ ~r/Usage/
+ end
+
+ test "bare help command prints general help and exits with an OK" do
+ command = ["help"]
+ assert capture_io(fn ->
+ error_check(command, exit_ok())
+ end) =~ ~r/Usage/
+ end
+
+ #
+ # Validation and Error Handling
+ #
+
+ test "print error message on a bad connection" do
+ command = ["status", "-n", "sandwich@pastrami"]
+ assert capture_io(:stderr, fn ->
+ error_check(command, exit_unavailable())
+ end) =~ ~r/unable to perform an operation on node 'sandwich@pastrami'/
+ end
+
+ test "when an RPC call times out, prints a timeout message" do
+ command = ["list_users", "-t", "0"]
+ assert capture_io(:stderr, fn ->
+ error_check(command, exit_tempfail())
+ end) =~ ~r/Error: operation list_users on node #{get_rabbit_hostname()} timed out. Timeout value used: 0/
+ end
+
+ test "when authentication fails, prints an authentication error message" do
+ add_user "kirk", "khaaaaaan"
+ command = ["authenticate_user", "kirk", "makeitso"]
+ assert capture_io(:stderr,
+ fn -> error_check(command, exit_dataerr())
+ end) =~ ~r/Error: failed to authenticate user \"kirk\"/
+ delete_user "kirk"
+ end
+
+ test "when invoked without arguments, displays a generic usage message and exits with a non-zero code" do
+ command = []
+ assert capture_io(:stderr, fn ->
+ error_check(command, exit_usage())
+ end) =~ ~r/usage/i
+ end
+
+ test "when invoked with only a --help, shows a generic usage message and exits with a success" do
+ command = ["--help"]
+ assert capture_io(:stdio, fn ->
+ error_check(command, exit_ok())
+ end) =~ ~r/usage/i
+ end
+
+ test "when invoked with --help [command], shows a generic usage message and exits with a success" do
+ command = ["--help", "status"]
+ assert capture_io(:stdio, fn ->
+ error_check(command, exit_ok())
+ end) =~ ~r/usage/i
+ end
+
+ test "when no command name is provided, displays usage" do
+ command = ["-n", "sandwich@pastrami"]
+ assert capture_io(:stderr, fn ->
+ error_check(command, exit_usage())
+ end) =~ ~r/usage/i
+ end
+
+ test "short node name without the host part connects properly" do
+ command = ["status", "-n", "rabbit"]
+ capture_io(:stderr, fn -> error_check(command, exit_ok()) end)
+ end
+
+ test "a non-existent command results in help message displayed" do
+ command = ["not_real"]
+ assert capture_io(:stderr, fn ->
+ error_check(command, exit_usage())
+ end) =~ ~r/Usage/
+ end
+
+ test "a command that's been provided extra arguments exits with a reasonable error code" do
+ command = ["status", "extra"]
+ output = capture_io(:stderr, fn ->
+ error_check(command, exit_usage())
+ end)
+ assert output =~ ~r/too many arguments/
+ assert output =~ ~r/Usage/
+ assert output =~ ~r/status/
+ end
+
+ test "a command that's been provided insufficient arguments exits with a reasonable error code" do
+ command = ["list_user_permissions"]
+ output = capture_io(:stderr, fn ->
+ error_check(command, exit_usage())
+ end)
+ assert output =~ ~r/not enough arguments/
+ assert output =~ ~r/Usage/
+ assert output =~ ~r/list_user_permissions/
+ end
+
+ test "a command that's provided an invalid argument exits a reasonable error" do
+ command = ["set_disk_free_limit", "2097152bytes"]
+ capture_io(:stderr, fn -> error_check(command, exit_dataerr()) end)
+ end
+
+ test "a command that fails with an error exits with a reasonable error code" do
+ command = ["delete_user", "voldemort"]
+ capture_io(:stderr, fn -> error_check(command, exit_nouser()) end)
+ end
+
+ test "a mcommand with an unsupported option as the first command-line arg fails gracefully" do
+ command1 = ["--invalid=true", "list_permissions", "-p", "/"]
+ assert capture_io(:stderr, fn ->
+ error_check(command1, exit_usage())
+ end) =~ ~r/Invalid options for this command/
+
+ command2 = ["--node", "rabbit", "status", "quack"]
+ assert capture_io(:stderr, fn ->
+ error_check(command2, exit_usage())
+ end) =~ ~r/too many arguments./
+
+ command3 = ["--node", "rabbit", "add_user"]
+ assert capture_io(:stderr, fn ->
+ error_check(command3, exit_usage())
+ end) =~ ~r/not enough arguments./
+ end
+
+## ------------------------- Default Flags ------------------------------------
+
+ test "an empty node option is filled with the default rabbit node" do
+ assert RabbitMQCtl.merge_all_defaults(%{})[:node] ==
+ TestHelper.get_rabbit_hostname()
+ end
+
+ test "a non-empty node option is not overwritten" do
+ assert RabbitMQCtl.merge_all_defaults(%{node: :jake@thedog})[:node] ==
+ :jake@thedog
+ end
+
+ test "an empty timeout option is set to infinity" do
+ assert RabbitMQCtl.merge_all_defaults(%{})[:timeout] == :infinity
+ end
+
+ test "a non-empty timeout option is not overridden" do
+ assert RabbitMQCtl.merge_all_defaults(%{timeout: 60})[:timeout] == 60
+ end
+
+ test "other parameters are not overridden by the default" do
+ assert RabbitMQCtl.merge_all_defaults(%{vhost: "quack"})[:vhost] == "quack"
+ end
+
+ test "any flags that aren't global or command-specific cause a bad option" do
+ command1 = ["status", "--nod=rabbit"]
+ assert capture_io(:stderr, fn ->
+ error_check(command1, exit_usage())
+ end) =~ ~r/Invalid options for this command/
+
+ command2 = ["list_permissions", "-o", "/"]
+ assert capture_io(:stderr, fn ->
+ error_check(command2, exit_usage())
+ end) =~ ~r/Invalid options for this command/
+ end
+
+ #
+ # --auto-complete and `autocomplete [command]`
+ #
+
+ test "--auto-complete delegates to the autocomplete command" do
+ # Note: these are not script name (scope) aware without --script-name
+ # but the actual command invoked in a shell will be
+ check_output(["--auto-complete", "list_q"], "list_queues\n")
+ check_output(["--auto-complete", "list_con", "--script-name", "rabbitmq-diagnostics"], "list_connections\nlist_consumers\n")
+ check_output(["--auto-complete", "--script-name", "rabbitmq-diagnostics", "mem"], "memory_breakdown\n")
+ check_output(["--auto-complete", "--script-name", "rabbitmq-queues", "add_m"], "add_member\n")
+ end
+
+ test "autocompletion command used directly" do
+ # Note: these are not script name (scope) aware without --script-name
+ # but the actual command invoked in a shell will be
+ check_output(["autocomplete", "list_q"], "list_queues\n")
+ check_output(["autocomplete", "list_con", "--script-name", "rabbitmq-diagnostics"], "list_connections\nlist_consumers\n")
+ check_output(["autocomplete", "--script-name", "rabbitmq-diagnostics", "mem"], "memory_breakdown\n")
+ check_output(["autocomplete", "--script-name", "rabbitmq-queues", "add_m"], "add_member\n")
+ end
+
+ defp check_output(cmd, out) do
+ assert capture_io(fn ->
+ error_check(cmd, exit_ok())
+ end) == out
+ end
+
+
+## ------------------------- Error formatting ---------------------------------
+
+ test "badrpc nodedown error" do
+ exit_code = exit_unavailable()
+ node = :example@node
+ {:error, ^exit_code, message} =
+ RabbitMQCtl.handle_command_output(
+ {:error, {:badrpc, :nodedown}},
+ :no_command, %{node: node},
+ fn(output, _, _) -> output end)
+
+ assert message =~ ~r/Error: unable to perform an operation on node/
+ assert message =~ ~r/DIAGNOSTICS/
+ assert message =~ ~r/attempted to contact/
+
+ localnode = :non_existent_node@localhost
+ {:error, ^exit_code, message} =
+ RabbitMQCtl.handle_command_output(
+ {:error, {:badrpc, :nodedown}},
+ :no_command, %{node: localnode},
+ fn(output, _, _) -> output end)
+ assert message =~ ~r/DIAGNOSTICS/
+ assert message =~ ~r/attempted to contact/
+ assert message =~ ~r/suggestion: start the node/
+ end
+
+ test "badrpc timeout error" do
+ exit_code = exit_tempfail()
+ timeout = 1000
+ nodename = :node@host
+ err_msg = "Error: operation example on node node@host timed out. Timeout value used: #{timeout}"
+ {:error, ^exit_code, ^err_msg} =
+ RabbitMQCtl.handle_command_output(
+ {:error, {:badrpc, :timeout}},
+ ExampleCommand, %{timeout: timeout, node: nodename},
+ fn(output, _, _) -> output end)
+ end
+
+ test "generic error" do
+ exit_code = exit_unavailable()
+ {:error, ^exit_code, "Error:\nerror message"} =
+ RabbitMQCtl.handle_command_output(
+ {:error, "error message"},
+ :no_command, %{},
+ fn(output, _, _) -> output end)
+ end
+
+ test "inspect arbitrary error" do
+ exit_code = exit_unavailable()
+ error = %{i: [am: "arbitrary", error: 1]}
+ inspected = inspect(error)
+ {:error, ^exit_code, "Error:\n" <> ^inspected} =
+ RabbitMQCtl.handle_command_output(
+ {:error, error},
+ :no_command, %{},
+ fn(output, _, _) -> output end)
+ end
+
+ test "atom error" do
+ exit_code = exit_unavailable()
+ {:error, ^exit_code, "Error:\nerror_message"} =
+ RabbitMQCtl.handle_command_output(
+ {:error, :error_message},
+ :no_command, %{},
+ fn(output, _, _) -> output end)
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs b/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs
new file mode 100644
index 0000000000..cffcd2e34d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/streams/add_replica_command_test.exs
@@ -0,0 +1,57 @@
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Streams.Commands.AddReplicaCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a failure" do
+ assert @command.validate(["stream-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when three or more arguments are provided, returns a failure" do
+ assert @command.validate(["stream-queue-a", "rabbit@new-node", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["stream-queue-a", "rabbit@new-node", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats two positional arguments and default switches as a success" do
+ assert @command.validate(["stream-queue-a", "rabbit@new-node"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["stream-queue-a", "rabbit@new-node"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs b/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs
new file mode 100644
index 0000000000..cf6bcbe20d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/streams/delete_replica_command_test.exs
@@ -0,0 +1,57 @@
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule RabbitMQ.CLI.Streams.Commands.DeleteReplicaCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Streams.Commands.DeleteReplicaCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a failure" do
+ assert @command.validate(["stream-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when three or more arguments are provided, returns a failure" do
+ assert @command.validate(["stream-queue-a", "rabbit@new-node", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["stream-queue-a", "rabbit@new-node", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats two positional arguments and default switches as a success" do
+ assert @command.validate(["stream-queue-a", "rabbit@new-node"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["stream-queue-a", "rabbit@new-node"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs b/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs
new file mode 100644
index 0000000000..56f960320b
--- /dev/null
+++ b/deps/rabbitmq_cli/test/streams/set_stream_retention_policy_command_test.exs
@@ -0,0 +1,63 @@
+## The contents of this file are subject to the Mozilla Public License
+## Version 1.1 (the "License"); you may not use this file except in
+## compliance with the License. You may obtain a copy of the License
+## at https://www.mozilla.org/MPL/
+##
+## Software distributed under the License is distributed on an "AS IS"
+## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+## the License for the specific language governing rights and
+## limitations under the License.
+##
+## The Original Code is RabbitMQ.
+##
+## The Initial Developer of the Original Code is GoPivotal, Inc.
+## Copyright (c) 2007-2020 Pivotal Software, Inc. All rights reserved.
+
+defmodule RabbitMQ.CLI.Streams.Commands.SetStreamRetentionPolicyCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Streams.Commands.SetStreamRetentionPolicyCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 30000
+ }}
+ end
+
+
+ test "validate: when no arguments are provided, returns a failure" do
+ assert @command.validate([], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when one argument is provided, returns a failure" do
+ assert @command.validate(["stream-queue-a"], %{}) == {:validation_failure, :not_enough_args}
+ end
+
+ test "validate: when three or more arguments are provided, returns a failure" do
+ assert @command.validate(["stream-queue-a", "1D", "one-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ assert @command.validate(["stream-queue-a", "1D", "extra-arg", "another-extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: treats two positional arguments and default switches as a success" do
+ assert @command.validate(["stream-queue-a", "2Y"], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc" do
+ assert match?({:badrpc, _}, @command.run(["stream-queue-a", "1Y"],
+ %{node: :jake@thedog, vhost: "/", timeout: 200}))
+ end
+
+ test "run: targeting an unknown queue returns an error", context do
+ assert match?({:error, _}, @command.run(["stream-queue-a", "1Y"],
+ Map.merge(context[:opts], %{vhost: "/"})))
+ end
+end
diff --git a/deps/rabbitmq_cli/test/test_helper.exs b/deps/rabbitmq_cli/test/test_helper.exs
new file mode 100644
index 0000000000..fca68e57bd
--- /dev/null
+++ b/deps/rabbitmq_cli/test/test_helper.exs
@@ -0,0 +1,620 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+four_hours = 240 * 60 * 1000
+ExUnit.configure(
+ exclude: [disabled: true],
+ module_load_timeout: four_hours,
+ timeout: four_hours)
+
+ExUnit.start()
+
+defmodule TestHelper do
+ import ExUnit.Assertions
+ alias RabbitMQ.CLI.Plugins.Helpers, as: PluginHelpers
+
+ alias RabbitMQ.CLI.Core.{CommandModules, Config, Helpers, NodeName}
+ import RabbitMQ.CLI.Core.Platform
+
+ def get_rabbit_hostname(node_name_type \\ :shortnames) do
+ Helpers.get_rabbit_hostname(node_name_type)
+ end
+
+ def hostname, do: NodeName.hostname()
+
+ def domain, do: NodeName.domain()
+
+ def fixture_file_path(filename) do
+ Path.join([File.cwd!(), "test", "fixtures", "files", filename])
+ end
+
+ def fixture_plugins_path(plugins_directory) do
+ Path.join([File.cwd!(), "test", "fixtures", "plugins", plugins_directory])
+ end
+
+ def get_cluster_name() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_nodes, :cluster_name, [])
+ end
+
+ def add_vhost(name) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_vhost, :add, [name, "acting-user"])
+ end
+
+ def delete_vhost(name) do
+ # some quick tests create and delete a vhost immediately, resulting
+ # in a high enough restart intensity in rabbit_vhost_sup_wrapper to
+ # make the rabbit app terminate. See https://github.com/rabbitmq/rabbitmq-server/issues/1280.
+ :timer.sleep(250)
+ :rpc.call(get_rabbit_hostname(), :rabbit_vhost, :delete, [name, "acting-user"])
+ end
+
+ def list_vhosts() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_vhost, :info_all, [])
+ end
+
+ def enable_feature_flag(feature_flag) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_feature_flags, :enable, [feature_flag])
+ end
+
+ def list_feature_flags(arg) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_feature_flags, :list, [arg])
+ end
+
+ def add_user(name, password) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_auth_backend_internal, :add_user,
+ [name, password, "acting-user"])
+ end
+
+ def delete_user(name) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_auth_backend_internal, :delete_user,
+ [name, "acting-user"])
+ end
+
+ def list_users() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_auth_backend_internal, :list_users, [])
+ end
+
+ def trace_on(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_trace, :start, [vhost])
+ end
+
+ def trace_off(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_trace, :stop, [vhost])
+ end
+
+ def set_user_tags(name, tags) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_auth_backend_internal, :set_tags,
+ [name, tags, "acting-user"])
+ end
+
+ def authenticate_user(name, password) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_access_control,:check_user_pass_login, [name, password])
+ end
+
+ def set_parameter(vhost, component_name, key, value) do
+ :ok = :rpc.call(get_rabbit_hostname(), :rabbit_runtime_parameters, :parse_set, [vhost, component_name, key, value, :nouser])
+ end
+
+ def clear_parameter(vhost, component_name, key) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_runtime_parameters, :clear, [vhost, component_name, key, <<"acting-user">>])
+ end
+
+ def list_parameters(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_runtime_parameters, :list_formatted, [vhost])
+ end
+
+ def set_global_parameter(key, value) do
+ :ok = :rpc.call(get_rabbit_hostname(), :rabbit_runtime_parameters, :parse_set_global,
+ [key, value, "acting-user"])
+ end
+
+ def clear_global_parameter(key) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_runtime_parameters, :clear_global,
+ [key, "acting-user"])
+ end
+
+ def list_global_parameters() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_runtime_parameters, :list_global_formatted, [])
+ end
+
+ def set_permissions(user, vhost, [conf, write, read]) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_auth_backend_internal, :set_permissions, [user, vhost, conf, write, read, "acting-user"])
+ end
+
+ def list_policies(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_policy, :list_formatted, [vhost])
+ end
+
+ def set_policy(vhost, name, pattern, value) do
+ {:ok, decoded} = :rabbit_json.try_decode(value)
+ parsed = :maps.to_list(decoded)
+ :ok = :rpc.call(get_rabbit_hostname(), :rabbit_policy, :set, [vhost, name, pattern, parsed, 0, "all", "acting-user"])
+ end
+
+ def clear_policy(vhost, key) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_policy, :delete, [vhost, key, "acting-user"])
+ end
+
+ def list_operator_policies(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_policy, :list_formatted_op, [vhost])
+ end
+
+ def set_operator_policy(vhost, name, pattern, value) do
+ {:ok, decoded} = :rabbit_json.try_decode(value)
+ parsed = :maps.to_list(decoded)
+ :ok = :rpc.call(get_rabbit_hostname(), :rabbit_policy, :set_op, [vhost, name, pattern, parsed, 0, "all", "acting-user"])
+ end
+
+ def clear_operator_policy(vhost, key) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_policy, :delete_op, [vhost, key, "acting-user"])
+ end
+
+ def declare_queue(name, vhost, durable \\ false, auto_delete \\ false, args \\ [], owner \\ :none) do
+ queue_name = :rabbit_misc.r(vhost, :queue, name)
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_amqqueue, :declare,
+ [queue_name, durable, auto_delete, args, owner, "acting-user"])
+ end
+
+ def delete_queue(name, vhost) do
+ queue_name = :rabbit_misc.r(vhost, :queue, name)
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_amqqueue, :delete,
+ [queue_name, false, false, "acting-user"])
+ end
+
+ def lookup_queue(name, vhost) do
+ queue_name = :rabbit_misc.r(vhost, :queue, name)
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_amqqueue, :lookup,
+ [queue_name])
+ end
+
+ def declare_exchange(name, vhost, type \\ :direct, durable \\ false, auto_delete \\ false, internal \\ false, args \\ []) do
+ exchange_name = :rabbit_misc.r(vhost, :exchange, name)
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_exchange, :declare,
+ [exchange_name, type, durable, auto_delete, internal, args, "acting-user"])
+ end
+
+ def list_permissions(vhost) do
+ :rpc.call(
+ get_rabbit_hostname(),
+ :rabbit_auth_backend_internal,
+ :list_vhost_permissions,
+ [vhost],
+ :infinity
+ )
+ end
+
+ def set_topic_permissions(user, vhost, exchange, writePerm, readPerm) do
+ :rpc.call(
+ get_rabbit_hostname(),
+ :rabbit_auth_backend_internal,
+ :set_topic_permissions,
+ [user, vhost, exchange, writePerm, readPerm, "acting-user"],
+ :infinity
+ )
+ end
+
+ def list_user_topic_permissions(user) do
+ :rpc.call(
+ get_rabbit_hostname(),
+ :rabbit_auth_backend_internal,
+ :list_user_topic_permissions,
+ [user],
+ :infinity
+ )
+ end
+
+ def clear_topic_permissions(user, vhost) do
+ :rpc.call(
+ get_rabbit_hostname(),
+ :rabbit_auth_backend_internal,
+ :clear_topic_permissions,
+ [user, vhost, "acting-user"],
+ :infinity
+ )
+ end
+
+ def set_vm_memory_high_watermark(limit) do
+ :rpc.call(get_rabbit_hostname(), :vm_memory_monitor, :set_vm_memory_high_watermark, [limit])
+ end
+
+ def set_disk_free_limit(limit) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_disk_monitor, :set_disk_free_limit, [limit])
+ end
+
+
+ #
+ # App lifecycle
+ #
+
+ def await_rabbitmq_startup() do
+ await_rabbitmq_startup_with_retries(100)
+ end
+
+ def await_rabbitmq_startup_with_retries(0) do
+ throw({:error, "Failed to call rabbit.await_startup/0 with retries: node #{get_rabbit_hostname()} was down"})
+ end
+ def await_rabbitmq_startup_with_retries(retries_left) do
+ case :rabbit_misc.rpc_call(get_rabbit_hostname(), :rabbit, :await_startup, []) do
+ :ok ->
+ :ok
+ {:badrpc, :nodedown} ->
+ :timer.sleep(50)
+ await_rabbitmq_startup_with_retries(retries_left - 1)
+ end
+ end
+
+ def await_condition(fun, timeout) do
+ retries = Integer.floor_div(timeout, 50)
+ await_condition_with_retries(fun, retries)
+ end
+
+ def await_condition_with_retries(_fun, 0) do
+ throw({:error, "Condition did not materialize"})
+ end
+ def await_condition_with_retries(fun, retries_left) do
+ case fun.() do
+ true -> :ok
+ _ ->
+ :timer.sleep(50)
+ await_condition_with_retries(fun, retries_left - 1)
+ end
+ end
+
+ def is_rabbitmq_app_running() do
+ :rabbit_misc.rpc_call(get_rabbit_hostname(), :rabbit, :is_booted, [])
+ end
+
+ def start_rabbitmq_app do
+ :rabbit_misc.rpc_call(get_rabbit_hostname(), :rabbit, :start, [])
+ await_rabbitmq_startup()
+ :timer.sleep(250)
+ end
+
+ def stop_rabbitmq_app do
+ :rabbit_misc.rpc_call(get_rabbit_hostname(), :rabbit, :stop, [])
+ :timer.sleep(1200)
+ end
+
+ def drain_node() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_maintenance, :drain, [])
+ end
+
+ def revive_node() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_maintenance, :revive, [])
+ end
+
+ def is_draining_node() do
+ node = get_rabbit_hostname()
+ :rpc.call(node, :rabbit_maintenance, :is_being_drained_local_read, [node])
+ end
+
+ def status do
+ :rpc.call(get_rabbit_hostname(), :rabbit, :status, [])
+ end
+
+ def error_check(cmd_line, code) do
+ assert catch_exit(RabbitMQCtl.main(cmd_line)) == {:shutdown, code}
+ end
+
+ def with_channel(vhost, fun) do
+ with_connection(vhost,
+ fn(conn) ->
+ {:ok, chan} = AMQP.Channel.open(conn)
+ AMQP.Confirm.select(chan)
+ fun.(chan)
+ end)
+ end
+
+ def with_connection(vhost, fun) do
+ Application.ensure_all_started(:amqp)
+ {:ok, conn} = AMQP.Connection.open(virtual_host: vhost)
+ ExUnit.Callbacks.on_exit(fn ->
+ try do
+ :amqp_connection.close(conn, 1000)
+ catch
+ :exit, _ -> :ok
+ end
+ end)
+ fun.(conn)
+ end
+
+ def with_connections(vhosts, fun) do
+ Application.ensure_all_started(:amqp)
+ conns = for v <- vhosts do
+ {:ok, conn} = AMQP.Connection.open(virtual_host: v)
+ conn
+ end
+ ExUnit.Callbacks.on_exit(fn ->
+ try do
+ for c <- conns, do: :amqp_connection.close(c, 1000)
+ catch
+ :exit, _ -> :ok
+ end
+ end)
+ fun.(conns)
+ end
+
+ def message_count(vhost, queue_name) do
+ with_channel(vhost, fn(channel) ->
+ {:ok, %{message_count: mc}} = AMQP.Queue.declare(channel, queue_name)
+ mc
+ end)
+ end
+
+ def publish_messages(vhost, queue_name, count) do
+ with_channel(vhost, fn(channel) ->
+ AMQP.Queue.purge(channel, queue_name)
+ for i <- 1..count do
+ AMQP.Basic.publish(channel, "", queue_name,
+ "test_message" <> Integer.to_string(i))
+ end
+ AMQP.Confirm.wait_for_confirms(channel, 30)
+ end)
+ end
+
+ def await_no_client_connections(node, timeout) do
+ iterations = timeout / 10
+ await_no_client_connections_with_iterations(node, iterations)
+ end
+
+ def await_no_client_connections_with_iterations(_node, n) when n <= 0 do
+ flunk "Ran out of retries, still have active client connections"
+ end
+ def await_no_client_connections_with_iterations(node, n) when n > 0 do
+ case :rpc.call(node, :rabbit_networking, :connections_local, []) do
+ [] -> :ok
+ _xs ->
+ :timer.sleep(10)
+ await_no_client_connections_with_iterations(node, n - 1)
+ end
+ end
+
+ def close_all_connections(node) do
+ # we intentionally use connections_local/0 here because connections/0,
+ # the cluster-wide version, loads some bits around cluster membership
+ # that are not normally ready with a single node. MK.
+ #
+ # when/if we decide to test
+ # this project against a cluster of nodes this will need revisiting. MK.
+ for pid <- :rpc.call(node, :rabbit_networking, :connections_local, []) do
+ :rpc.call(node, :rabbit_networking, :close_connection, [pid, :force_closed])
+ end
+ await_no_client_connections(node, 5000)
+ end
+
+ def expect_client_connection_failure() do
+ expect_client_connection_failure("/")
+ end
+ def expect_client_connection_failure(vhost) do
+ Application.ensure_all_started(:amqp)
+ assert {:error, :econnrefused} == AMQP.Connection.open(virtual_host: vhost)
+ end
+
+ def delete_all_queues() do
+ try do
+ immediately_delete_all_queues(:rabbit_amqqueue.list())
+ catch
+ _, _ -> :ok
+ end
+ end
+
+ def delete_all_queues(vhost) do
+ try do
+ immediately_delete_all_queues(:rabbit_amqqueue.list(vhost))
+ catch
+ _, _ -> :ok
+ end
+ end
+
+ defp immediately_delete_all_queues(qs) do
+ for q <- qs do
+ try do
+ :rpc.call(
+ get_rabbit_hostname(),
+ :rabbit_amqeueue,
+ :delete,
+ [q, false, false],
+ 5000
+ )
+ catch
+ _, _ -> :ok
+ end
+ end
+ end
+
+ def reset_vm_memory_high_watermark() do
+ try do
+ :rpc.call(
+ get_rabbit_hostname(),
+ :vm_memory_monitor,
+ :set_vm_memory_high_watermark,
+ [0.4],
+ 5000
+ )
+ catch
+ _, _ -> :ok
+ end
+ end
+
+ def emit_list_multiple_sources(list1, list2, ref, pid) do
+ pids = for list <- [list1, list2], do: Kernel.spawn_link(TestHelper, :emit_list, [list, ref, pid])
+ :rabbit_control_misc.await_emitters_termination(pids)
+ end
+
+ def emit_list(list, ref, pid) do
+ emit_list_map(list, &(&1), ref, pid)
+ end
+
+ def emit_list_map(list, fun, ref, pid) do
+ :rabbit_control_misc.emitting_map(pid, ref, fun, list)
+ end
+
+ def run_command_to_list(command, args) do
+ res = Kernel.apply(command, :run, args)
+ case Enumerable.impl_for(res) do
+ nil -> res;
+ _ -> Enum.to_list(res)
+ end
+ end
+
+ def vhost_exists?(vhost) do
+ Enum.any?(list_vhosts(), fn(v) -> v[:name] == vhost end)
+ end
+
+ def set_enabled_plugins(plugins, mode, node, opts) do
+ {:ok, enabled} = PluginHelpers.set_enabled_plugins(plugins, opts)
+
+ PluginHelpers.update_enabled_plugins(enabled, mode, node, opts)
+ end
+
+ def currently_active_plugins(context) do
+ Enum.sort(:rabbit_misc.rpc_call(context[:opts][:node], :rabbit_plugins, :active, []))
+ end
+
+ def enable_federation_plugin() do
+ node = get_rabbit_hostname()
+ {:ok, plugins_file} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :enabled_plugins_file])
+ {:ok, plugins_dir} = :rabbit_misc.rpc_call(node,
+ :application, :get_env,
+ [:rabbit, :plugins_dir])
+ rabbitmq_home = :rabbit_misc.rpc_call(node, :code, :lib_dir, [:rabbit])
+ {:ok, [_enabled_plugins]} = :file.consult(plugins_file)
+
+ opts = %{enabled_plugins_file: plugins_file,
+ plugins_dir: plugins_dir,
+ rabbitmq_home: rabbitmq_home,
+ online: true, offline: false}
+
+ plugins = currently_active_plugins(%{opts: %{node: node}})
+ case Enum.member?(plugins, :rabbitmq_federation) do
+ true -> :ok
+ false ->
+ set_enabled_plugins(plugins ++ [:rabbitmq_federation], :online, get_rabbit_hostname(), opts)
+ end
+ end
+
+ def set_vhost_limits(vhost, limits) do
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_vhost_limit, :parse_set, [vhost, limits, <<"acting-user">>])
+ end
+ def get_vhost_limits(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_vhost_limit, :list, [vhost])
+ |> Map.new
+ end
+
+ def clear_vhost_limits(vhost) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_vhost_limit, :clear, [vhost, <<"acting-user">>])
+ end
+
+ def resume_all_client_listeners() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_maintenance, :resume_all_client_listeners, [])
+ end
+
+ def suspend_all_client_listeners() do
+ :rpc.call(get_rabbit_hostname(), :rabbit_maintenance, :suspend_all_client_listeners, [])
+ end
+
+ def set_user_limits(user, limits) do
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_auth_backend_internal, :set_user_limits, [user, limits, <<"acting-user">>])
+ end
+
+ def get_user_limits(user) do
+ :rpc.call(get_rabbit_hostname(), :rabbit_auth_backend_internal, :get_user_limits, [user])
+ |> Map.new
+ end
+
+ def clear_user_limits(user) do
+ clear_user_limits user, "max-connections"
+ clear_user_limits user, "max-channels"
+ end
+
+ def clear_user_limits(user, limittype) do
+ :rpc.call(get_rabbit_hostname(),
+ :rabbit_auth_backend_internal, :clear_user_limits, [user, limittype, <<"acting-user">>])
+ end
+
+ def set_scope(scope) do
+ script_name = Config.get_option(:script_name, %{})
+ scopes = Keyword.put(Application.get_env(:rabbitmqctl, :scopes), script_name, scope)
+ Application.put_env(:rabbitmqctl, :scopes, scopes)
+ CommandModules.load(%{})
+ end
+
+ def switch_plugins_directories(old_value, new_value) do
+ :rabbit_misc.rpc_call(get_rabbit_hostname(), :application, :set_env,
+ [:rabbit, :plugins_dir, new_value])
+ ExUnit.Callbacks.on_exit(fn ->
+ :rabbit_misc.rpc_call(get_rabbit_hostname(), :application, :set_env,
+ [:rabbit, :plugins_dir, old_value])
+ end)
+ end
+
+ def get_opts_with_non_existing_plugins_directory(context) do
+ get_opts_with_plugins_directories(context, ["/tmp/non_existing_rabbitmq_dummy_plugins"])
+ end
+
+ def get_opts_with_plugins_directories(context, plugins_directories) do
+ opts = context[:opts]
+ plugins_dir = opts[:plugins_dir]
+ all_directories = Enum.join([to_string(plugins_dir) | plugins_directories], path_separator())
+ %{opts | plugins_dir: to_charlist(all_directories)}
+ end
+
+ def get_opts_with_existing_plugins_directory(context) do
+ extra_plugin_directory = System.tmp_dir!() |> Path.join("existing_rabbitmq_dummy_plugins")
+ File.mkdir!(extra_plugin_directory)
+ ExUnit.Callbacks.on_exit(fn ->
+ File.rm_rf(extra_plugin_directory)
+ end)
+ get_opts_with_plugins_directories(context, [extra_plugin_directory])
+ end
+
+ def check_plugins_enabled(plugins, context) do
+ {:ok, [xs]} = :file.consult(context[:opts][:enabled_plugins_file])
+ assert_equal_sets(plugins, xs)
+ end
+
+ def assert_equal_sets(a, b) do
+ asorted = Enum.sort(a)
+ bsorted = Enum.sort(b)
+ assert asorted == bsorted
+ end
+
+ def assert_stream_without_errors(stream) do
+ true = Enum.all?(stream, fn({:error, _}) -> false;
+ ({:error, _, _}) -> false;
+ (_) -> true end)
+ end
+
+ def wait_for_log_message(message, file \\ nil, attempts \\ 100) do
+ ## Assume default log is the first one
+ log_file = case file do
+ nil ->
+ [default_log | _] = :rpc.call(get_rabbit_hostname(), :rabbit_lager, :log_locations, [])
+ default_log
+ _ -> file
+ end
+ case File.read(log_file) do
+ {:ok, data} ->
+ case String.match?(data, Regex.compile!(message)) do
+ true -> :ok
+ false ->
+ :timer.sleep(100)
+ wait_for_log_message(message, log_file, attempts - 1)
+ end
+ _ ->
+ :timer.sleep(100)
+ wait_for_log_message(message, log_file, attempts - 1)
+ end
+ end
+end
diff --git a/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs b/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs
new file mode 100644
index 0000000000..c169f9ff5d
--- /dev/null
+++ b/deps/rabbitmq_cli/test/upgrade/await_online_quorum_plus_one_command_test.exs
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AwaitOnlineQuorumPlusOneCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineQuorumPlusOneCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 5000
+ }}
+ end
+
+ test "merge_defaults: overrides a timeout" do
+ assert @command.merge_defaults([], %{}) == {[], %{timeout: 120_000}}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: succeeds with no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs b/deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs
new file mode 100644
index 0000000000..7089dada2c
--- /dev/null
+++ b/deps/rabbitmq_cli/test/upgrade/await_online_synchronized_mirror_command_test.exs
@@ -0,0 +1,45 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule AwaitOnlineSynchronizedMirrorsCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Upgrade.Commands.AwaitOnlineSynchronizedMirrorCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 5000
+ }}
+ end
+
+ test "merge_defaults: overrides a timeout" do
+ assert @command.merge_defaults([], %{}) == {[], %{timeout: 120_000}}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: succeeds with no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs b/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs
new file mode 100644
index 0000000000..3533f7feff
--- /dev/null
+++ b/deps/rabbitmq_cli/test/upgrade/drain_command_test.exs
@@ -0,0 +1,57 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule DrainCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Upgrade.Commands.DrainCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ revive_node()
+
+ on_exit(fn ->
+ revive_node()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 5000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: succeeds with no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
+ end
+
+ test "run: puts target node into maintenance mode", context do
+ assert not is_draining_node()
+ assert :ok == @command.run([], context[:opts])
+
+ await_condition(fn -> is_draining_node() end, 7000)
+ revive_node()
+ end
+end
diff --git a/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs b/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs
new file mode 100644
index 0000000000..e77390ecf0
--- /dev/null
+++ b/deps/rabbitmq_cli/test/upgrade/post_upgrade_command_test.exs
@@ -0,0 +1,49 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+defmodule PostUpgradeCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Upgrade.Commands.PostUpgradeCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 5000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: succeeds with no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
+ end
+
+ test "run: returns an OK", context do
+ assert match?({:ok, _}, @command.run([], context[:opts]))
+ end
+
+end
diff --git a/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs b/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs
new file mode 100644
index 0000000000..6d43d59b83
--- /dev/null
+++ b/deps/rabbitmq_cli/test/upgrade/revive_command_test.exs
@@ -0,0 +1,57 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+defmodule ReviveCommandTest do
+ use ExUnit.Case, async: false
+ import TestHelper
+
+ @command RabbitMQ.CLI.Upgrade.Commands.ReviveCommand
+
+ setup_all do
+ RabbitMQ.CLI.Core.Distribution.start()
+
+ revive_node()
+
+ on_exit(fn ->
+ revive_node()
+ end)
+
+ :ok
+ end
+
+ setup context do
+ {:ok, opts: %{
+ node: get_rabbit_hostname(),
+ timeout: context[:test_timeout] || 5000
+ }}
+ end
+
+ test "merge_defaults: nothing to do" do
+ assert @command.merge_defaults([], %{}) == {[], %{}}
+ end
+
+ test "validate: accepts no positional arguments" do
+ assert @command.validate(["extra-arg"], %{}) == {:validation_failure, :too_many_args}
+ end
+
+ test "validate: succeeds with no positional arguments" do
+ assert @command.validate([], %{}) == :ok
+ end
+
+ @tag test_timeout: 3000
+ test "run: targeting an unreachable node throws a badrpc", context do
+ opts = %{node: :jake@thedog, timeout: 200}
+ assert match?({:badrpc, _}, @command.run([], Map.merge(context[:opts], opts)))
+ end
+
+ test "run: puts target node into regular operating mode", context do
+ assert not is_draining_node()
+ drain_node()
+ await_condition(fn -> is_draining_node() end, 7000)
+ assert :ok == @command.run([], context[:opts])
+ await_condition(fn -> not is_draining_node() end, 7000)
+ end
+end
diff --git a/deps/rabbitmq_codegen/.gitignore b/deps/rabbitmq_codegen/.gitignore
new file mode 100644
index 0000000000..7ced2f9af7
--- /dev/null
+++ b/deps/rabbitmq_codegen/.gitignore
@@ -0,0 +1,11 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.pyc
+erl_crash.dump
+/build/
+/cover/
+/dist/
+/ebin/
+/tmp/
diff --git a/deps/rabbitmq_codegen/CODE_OF_CONDUCT.md b/deps/rabbitmq_codegen/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_codegen/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_codegen/CONTRIBUTING.md b/deps/rabbitmq_codegen/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_codegen/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_codegen/LICENSE b/deps/rabbitmq_codegen/LICENSE
new file mode 100644
index 0000000000..06ac104533
--- /dev/null
+++ b/deps/rabbitmq_codegen/LICENSE
@@ -0,0 +1,6 @@
+This package, the RabbitMQ code generation library and associated
+files, is licensed under the MPL 2.0. For the MPL 2.0, please see
+LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_codegen/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_codegen/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_codegen/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_codegen/Makefile b/deps/rabbitmq_codegen/Makefile
new file mode 100644
index 0000000000..55d72ed88a
--- /dev/null
+++ b/deps/rabbitmq_codegen/Makefile
@@ -0,0 +1,42 @@
+.PHONY: all clean distclean
+
+all:
+ @:
+
+clean:
+ rm -f *.pyc
+
+distclean: clean
+ find . -regex '.*\(~\|#\|\.swp\)' -exec rm {} \;
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := rabbitmq-codegen
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \
+ show-current-git-fetch-url show-current-git-push-url
+
+show-upstream-git-fetch-url:
+ @echo $(RABBITMQ_UPSTREAM_FETCH_URL)
+
+show-upstream-git-push-url:
+ @echo $(RABBITMQ_UPSTREAM_PUSH_URL)
+
+show-current-git-fetch-url:
+ @echo $(RABBITMQ_CURRENT_FETCH_URL)
+
+show-current-git-push-url:
+ @echo $(RABBITMQ_CURRENT_PUSH_URL)
diff --git a/deps/rabbitmq_codegen/README.extensions.md b/deps/rabbitmq_codegen/README.extensions.md
new file mode 100644
index 0000000000..0a22d41913
--- /dev/null
+++ b/deps/rabbitmq_codegen/README.extensions.md
@@ -0,0 +1,189 @@
+# Protocol extensions
+
+The `amqp_codegen.py` AMQP specification compiler has recently been
+enhanced to take more than a single specification file, which allows
+AMQP library authors to include extensions to the core protocol
+without needing to modify the core AMQP specification file as
+distributed.
+
+The compiler is invoked with the path to a single "main" specification
+document and zero or more paths to "extension" documents.
+
+The order of the extensions matters: any later class property
+definitions, for instance, are added to the list of definitions in
+order of appearance. In general, composition of extensions with a core
+specification document is therefore non-commutative.
+
+## The main document
+
+Written in the style of a
+[json-shapes](https://github.com/tonyg/json-shapes) schema:
+
+ DomainDefinition = _and(array_of(string()), array_length_equals(2));
+
+ ConstantDefinition = {
+ "name": string(),
+ "value": number(),
+ "class": optional(_or("soft-error", "hard-error"))
+ };
+
+ FieldDefinition = {
+ "name": string(),
+ "type": string(),
+ "default-value": optional(anything())
+ };
+
+ MethodDefinition = {
+ "name": string(),
+ "id": number(),
+ "arguments": array_of(FieldDefinition),
+ "synchronous": optional(boolean()),
+ "content": optional(boolean())
+ };
+
+ ClassDefinition = {
+ "name": string(),
+ "id": number(),
+ "methods": array_of(MethodDefinition),
+ "properties": optional(array_of(FieldDefinition))
+ };
+
+ MainDocument = {
+ "major-version": number(),
+ "minor-version": number(),
+ "revision": optional(number()),
+ "port": number(),
+ "domains": array_of(DomainDefinition),
+ "constants": array_of(ConstantDefinition),
+ "classes": array_of(ClassDefinition),
+ }
+
+Within a `FieldDefinition`, the keyword `domain` can be used instead
+of `type`, but `type` is preferred and `domain` is deprecated.
+
+Type names can either be a defined `domain` name or a built-in name
+from the following list:
+
+ - octet
+ - shortstr
+ - longstr
+ - short
+ - long
+ - longlong
+ - bit
+ - table
+ - timestamp
+
+Method and class IDs must be integers between 0 and 65535,
+inclusive. Note that there is no specific subset of the space reserved
+for experimental or site-local extensions, so be careful not to
+conflict with IDs used by the AMQP core specification.
+
+If the `synchronous` field of a `MethodDefinition` is missing, it is
+assumed to be `false`; the same applies to the `content` field.
+
+A `ConstantDefinition` with a `class` attribute is considered to be an
+error-code definition; otherwise, it is considered to be a
+straightforward numeric constant.
+
+## Extensions
+
+Written in the style of a
+[json-shapes](https://github.com/tonyg/json-shapes) schema, and
+referencing some of the type definitions given above:
+
+ ExtensionDocument = {
+ "extension": anything(),
+ "domains": array_of(DomainDefinition),
+ "constants": array_of(ConstantDefinition),
+ "classes": array_of(ClassDefinition)
+ };
+
+The `extension` keyword is used to describe the extension informally
+for human readers. Typically it will be a dictionary, with members
+such as:
+
+ {
+ "name": "The name of the extension",
+ "version": "1.0",
+ "copyright": "Copyright (C) 1234 Yoyodyne, Inc."
+ }
+
+## Merge behaviour
+
+In the case of conflicts between values specified in the main document
+and in any extension documents, type-specific merge operators are
+invoked.
+
+ - Any doubly-defined domain names are regarded as true
+ conflicts. Otherwise, all the domain definitions from all the main
+ and extension documents supplied to the compiler are merged into a
+ single dictionary.
+
+ - Constant definitions are treated as per domain names above,
+ *mutatis mutandis*.
+
+ - Classes and their methods are a little trickier: if an extension
+ defines a class with the same name as one previously defined, then
+ only the `methods` and `properties` fields of the extension's class
+ definition are attended to.
+
+ - Any doubly-defined method names or property names within a class
+ are treated as true conflicts.
+
+ - Properties defined in an extension are added to the end of the
+ extant property list for the class.
+
+ (Extensions are of course permitted to define brand new classes as
+ well as to extend existing ones.)
+
+ - Any other kind of conflict leads to a raised
+ `AmqpSpecFileMergeConflict` exception.
+
+## Invoking the spec compiler
+
+Your code generation code should invoke `amqp_codegen.do_main_dict`
+with a dictionary of functions as the sole argument. Each will be
+used for generationg a separate file. The `do_main_dict` function
+will parse the command-line arguments supplied when python was
+invoked.
+
+The command-line will be parsed as:
+
+ python your_codegen.py <action> <mainspec> [<extspec> ...] <outfile>
+
+where `<action>` is a key into the dictionary supplied to
+`do_main_dict` and is used to select which generation function is
+called. The `<mainspec>` and `<extspec>` arguments are file names of
+specification documents containing expressions in the syntax given
+above. The *final* argument on the command line, `<outfile>`, is the
+name of the source-code file to generate.
+
+Here's a tiny example of the layout of a code generation module that
+uses `amqp_codegen`:
+
+ import amqp_codegen
+
+ def generateHeader(specPath):
+ spec = amqp_codegen.AmqpSpec(specPath)
+ ...
+
+ def generateImpl(specPath):
+ spec = amqp_codegen.AmqpSpec(specPath)
+ ...
+
+ if __name__ == "__main__":
+ amqp_codegen.do_main_dict({"header": generateHeader,
+ "body": generateImpl})
+
+The reasons for allowing more than one action, are that
+
+ - many languages have separate "header"-type files (C and Erlang, to
+ name two)
+ - `Makefile`s often require separate rules for generating the two
+ kinds of file, but it's convenient to keep the generation code
+ together in a single python module
+
+The main reason things are laid out this way, however, is simply that
+it's an accident of the history of the code. We may change the API to
+`amqp_codegen` in future to clean things up a little.
diff --git a/deps/rabbitmq_codegen/amqp-1.0/messaging.xml b/deps/rabbitmq_codegen/amqp-1.0/messaging.xml
new file mode 100644
index 0000000000..f3b9b3a18f
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-1.0/messaging.xml
@@ -0,0 +1,168 @@
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="messaging" xmlns="https://www.amqp.org/schema/amqp.xsd">
+ <section name="message-format">
+ <type name="header" class="composite" source="list" provides="section">
+ <descriptor name="amqp:header:list" code="0x00000000:0x00000070"/>
+ <field name="durable" type="boolean"/>
+ <field name="priority" type="ubyte"/>
+ <field name="ttl" type="milliseconds"/>
+ <field name="first-acquirer" type="boolean"/>
+ <field name="delivery-count" type="uint"/>
+ </type>
+ <type name="delivery-annotations" class="restricted" source="annotations" provides="section">
+ <descriptor name="amqp:delivery-annotations:map" code="0x00000000:0x00000071"/>
+ </type>
+ <type name="message-annotations" class="restricted" source="annotations" provides="section">
+ <descriptor name="amqp:message-annotations:map" code="0x00000000:0x00000072"/>
+ </type>
+ <type name="properties" class="composite" source="list" provides="section">
+ <descriptor name="amqp:properties:list" code="0x00000000:0x00000073"/>
+ <field name="message-id" type="*" requires="message-id"/>
+ <field name="user-id" type="binary"/>
+ <field name="to" type="*" requires="address"/>
+ <field name="subject" type="string"/>
+ <field name="reply-to" type="*" requires="address"/>
+ <field name="correlation-id" type="*" requires="message-id"/>
+ <field name="content-type" type="symbol"/>
+ <field name="content-encoding" type="symbol"/>
+ <field name="absolute-expiry-time" type="timestamp"/>
+ <field name="creation-time" type="timestamp"/>
+ <field name="group-id" type="string"/>
+ <field name="group-sequence" type="sequence-no"/>
+ <field name="reply-to-group-id" type="string"/>
+ </type>
+ <type name="application-properties" class="restricted" source="map" provides="section">
+ <descriptor name="amqp:application-properties:map" code="0x00000000:0x00000074"/>
+ </type>
+ <type name="data" class="restricted" source="binary" provides="section">
+ <descriptor name="amqp:data:binary" code="0x00000000:0x00000075"/>
+ </type>
+ <type name="amqp-sequence" class="restricted" source="list" provides="section">
+ <descriptor name="amqp:amqp-sequence:list" code="0x00000000:0x00000076"/>
+ </type>
+ <type name="amqp-value" class="restricted" source="*" provides="section">
+ <descriptor name="amqp:amqp-value:*" code="0x00000000:0x00000077"/>
+ </type>
+ <type name="footer" class="restricted" source="annotations" provides="section">
+ <descriptor name="amqp:footer:map" code="0x00000000:0x00000078"/>
+ </type>
+ <type name="annotations" class="restricted" source="map"/>
+ <type name="message-id-ulong" class="restricted" source="ulong" provides="message-id"/>
+ <type name="message-id-uuid" class="restricted" source="uuid" provides="message-id"/>
+ <type name="message-id-binary" class="restricted" source="binary" provides="message-id"/>
+ <type name="message-id-string" class="restricted" source="string" provides="message-id"/>
+ <type name="address-string" class="restricted" source="string" provides="address"/>
+ <definition name="MESSAGE-FORMAT" value="0"/>
+ </section>
+ <section name="delivery-state">
+ <type name="received" class="composite" source="list" provides="delivery-state">
+ <descriptor name="amqp:received:list" code="0x00000000:0x00000023"/>
+ <field name="section-number" type="uint" mandatory="true"/>
+ <field name="section-offset" type="ulong" mandatory="true"/>
+ </type>
+ <type name="accepted" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:accepted:list" code="0x00000000:0x00000024"/>
+ </type>
+ <type name="rejected" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:rejected:list" code="0x00000000:0x00000025"/>
+ <field name="error" type="error"/>
+ </type>
+ <type name="released" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:released:list" code="0x00000000:0x00000026"/>
+ </type>
+ <type name="modified" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:modified:list" code="0x00000000:0x00000027"/>
+ <field name="delivery-failed" type="boolean"/>
+ <field name="undeliverable-here" type="boolean"/>
+ <field name="message-annotations" type="fields"/>
+ </type>
+ </section>
+ <section name="addressing">
+ <type name="source" class="composite" source="list" provides="source">
+ <descriptor name="amqp:source:list" code="0x00000000:0x00000028"/>
+ <field name="address" type="*" requires="address"/>
+ <field name="durable" type="terminus-durability" default="none"/>
+ <field name="expiry-policy" type="terminus-expiry-policy" default="session-end"/>
+ <field name="timeout" type="seconds" default="0"/>
+ <field name="dynamic" type="boolean" default="false"/>
+ <field name="dynamic-node-properties" type="node-properties"/>
+ <field name="distribution-mode" type="symbol" requires="distribution-mode"/>
+ <field name="filter" type="filter-set"/>
+ <field name="default-outcome" type="*" requires="outcome"/>
+ <field name="outcomes" type="symbol" multiple="true"/>
+ <field name="capabilities" type="symbol" multiple="true"/>
+ </type>
+ <type name="target" class="composite" source="list" provides="target">
+ <descriptor name="amqp:target:list" code="0x00000000:0x00000029"/>
+ <field name="address" type="*" requires="address"/>
+ <field name="durable" type="terminus-durability" default="none"/>
+ <field name="expiry-policy" type="terminus-expiry-policy" default="session-end"/>
+ <field name="timeout" type="seconds" default="0"/>
+ <field name="dynamic" type="boolean" default="false"/>
+ <field name="dynamic-node-properties" type="node-properties"/>
+ <field name="capabilities" type="symbol" multiple="true"/>
+ </type>
+ <type name="terminus-durability" class="restricted" source="uint">
+ <choice name="none" value="0"/>
+ <choice name="configuration" value="1"/>
+ <choice name="unsettled-state" value="2"/>
+ </type>
+ <type name="terminus-expiry-policy" class="restricted" source="symbol">
+ <choice name="link-detach" value="link-detach"/>
+ <choice name="session-end" value="session-end"/>
+ <choice name="connection-close" value="connection-close"/>
+ <choice name="never" value="never"/>
+ </type>
+ <type name="std-dist-mode" class="restricted" source="symbol" provides="distribution-mode">
+ <choice name="move" value="move"/>
+ <choice name="copy" value="copy"/>
+ </type>
+ <type name="filter-set" class="restricted" source="map"/>
+ <type name="node-properties" class="restricted" source="fields"/>
+ <type name="delete-on-close" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-close:list" code="0x00000000:0x0000002b"/>
+ </type>
+ <type name="delete-on-no-links" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-no-links:list" code="0x00000000:0x0000002c"/>
+ </type>
+ <type name="delete-on-no-messages" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-no-messages:list" code="0x00000000:0x0000002d"/>
+ </type>
+ <type name="delete-on-no-links-or-messages" class="composite" source="list" provides="lifetime-policy">
+ <descriptor name="amqp:delete-on-no-links-or-messages:list" code="0x00000000:0x0000002e"/>
+ </type>
+ </section>
+</amqp>
diff --git a/deps/rabbitmq_codegen/amqp-1.0/security.xml b/deps/rabbitmq_codegen/amqp-1.0/security.xml
new file mode 100644
index 0000000000..22b2bb3839
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-1.0/security.xml
@@ -0,0 +1,76 @@
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="security" xmlns="https://www.amqp.org/schema/amqp.xsd">
+ <section name="tls">
+ <definition name="TLS-MAJOR" value="1"/>
+ <definition name="TLS-MINOR" value="0"/>
+ <definition name="TLS-REVISION" value="0"/>
+ </section>
+ <section name="sasl">
+ <type name="sasl-mechanisms" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-mechanisms:list" code="0x00000000:0x00000040"/>
+ <field name="sasl-server-mechanisms" type="symbol" mandatory="true" multiple="true"/>
+ </type>
+ <type name="sasl-init" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-init:list" code="0x00000000:0x00000041"/>
+ <field name="mechanism" type="symbol" mandatory="true"/>
+ <field name="initial-response" type="binary"/>
+ <field name="hostname" type="string"/>
+ </type>
+ <type name="sasl-challenge" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-challenge:list" code="0x00000000:0x00000042"/>
+ <field name="challenge" type="binary" mandatory="true"/>
+ </type>
+ <type name="sasl-response" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-response:list" code="0x00000000:0x00000043"/>
+ <field name="response" type="binary" mandatory="true"/>
+ </type>
+ <type name="sasl-outcome" class="composite" source="list" provides="sasl-frame">
+ <descriptor name="amqp:sasl-outcome:list" code="0x00000000:0x00000044"/>
+ <field name="code" type="sasl-code" mandatory="true"/>
+ <field name="additional-data" type="binary"/>
+ </type>
+ <type name="sasl-code" class="restricted" source="ubyte">
+ <choice name="ok" value="0"/>
+ <choice name="auth" value="1"/>
+ <choice name="sys" value="2"/>
+ <choice name="sys-perm" value="3"/>
+ <choice name="sys-temp" value="4"/>
+ </type>
+ <definition name="SASL-MAJOR" value="1"/>
+ <definition name="SASL-MINOR" value="0"/>
+ <definition name="SASL-REVISION" value="0"/>
+ </section>
+</amqp>
diff --git a/deps/rabbitmq_codegen/amqp-1.0/transactions.xml b/deps/rabbitmq_codegen/amqp-1.0/transactions.xml
new file mode 100644
index 0000000000..c562d75643
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-1.0/transactions.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="transactions" xmlns="https://www.amqp.org/schema/amqp.xsd">
+ <section name="coordination">
+ <type name="coordinator" class="composite" source="list" provides="target">
+ <descriptor name="amqp:coordinator:list" code="0x00000000:0x00000030"/>
+ <field name="capabilities" type="symbol" requires="txn-capability" multiple="true"/>
+ </type>
+ <type name="declare" class="composite" source="list">
+ <descriptor name="amqp:declare:list" code="0x00000000:0x00000031"/>
+ <field name="global-id" type="*" requires="global-tx-id"/>
+ </type>
+ <type name="discharge" class="composite" source="list">
+ <descriptor name="amqp:discharge:list" code="0x00000000:0x00000032"/>
+ <field name="txn-id" type="*" mandatory="true" requires="txn-id"/>
+ <field name="fail" type="boolean"/>
+ </type>
+ <type name="transaction-id" class="restricted" source="binary" provides="txn-id"/>
+ <type name="declared" class="composite" source="list" provides="delivery-state, outcome">
+ <descriptor name="amqp:declared:list" code="0x00000000:0x00000033"/>
+ <field name="txn-id" type="*" mandatory="true" requires="txn-id"/>
+ </type>
+ <type name="transactional-state" class="composite" source="list" provides="delivery-state">
+ <descriptor name="amqp:transactional-state:list" code="0x00000000:0x00000034"/>
+ <field name="txn-id" type="*" mandatory="true" requires="txn-id"/>
+ <field name="outcome" type="*" requires="outcome"/>
+ </type>
+ <type name="txn-capability" class="restricted" source="symbol" provides="txn-capability">
+ <choice name="local-transactions" value="amqp:local-transactions"/>
+ <choice name="distributed-transactions" value="amqp:distributed-transactions"/>
+ <choice name="promotable-transactions" value="amqp:promotable-transactions"/>
+ <choice name="multi-txns-per-ssn" value="amqp:multi-txns-per-ssn"/>
+ <choice name="multi-ssns-per-txn" value="amqp:multi-ssns-per-txn"/>
+ </type>
+ <type name="transaction-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="unknown-id" value="amqp:transaction:unknown-id"/>
+ <choice name="transaction-rollback" value="amqp:transaction:rollback"/>
+ <choice name="transaction-timeout" value="amqp:transaction:timeout"/>
+ </type>
+ </section>
+</amqp>
diff --git a/deps/rabbitmq_codegen/amqp-1.0/transport.xml b/deps/rabbitmq_codegen/amqp-1.0/transport.xml
new file mode 100644
index 0000000000..34c1cf25fd
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-1.0/transport.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="transport" xmlns="https://www.amqp.org/schema/amqp.xsd">
+ <section name="performatives">
+ <type name="open" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:open:list" code="0x00000000:0x00000010"/>
+ <field name="container-id" type="string" mandatory="true"/>
+ <field name="hostname" type="string"/>
+ <field name="max-frame-size" type="uint" default="4294967295"/>
+ <field name="channel-max" type="ushort" default="65535"/>
+ <field name="idle-time-out" type="milliseconds"/>
+ <field name="outgoing-locales" type="ietf-language-tag" multiple="true"/>
+ <field name="incoming-locales" type="ietf-language-tag" multiple="true"/>
+ <field name="offered-capabilities" type="symbol" multiple="true"/>
+ <field name="desired-capabilities" type="symbol" multiple="true"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="begin" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:begin:list" code="0x00000000:0x00000011"/>
+ <field name="remote-channel" type="ushort"/>
+ <field name="next-outgoing-id" type="transfer-number" mandatory="true"/>
+ <field name="incoming-window" type="uint" mandatory="true"/>
+ <field name="outgoing-window" type="uint" mandatory="true"/>
+ <field name="handle-max" type="handle" default="4294967295"/>
+ <field name="offered-capabilities" type="symbol" multiple="true"/>
+ <field name="desired-capabilities" type="symbol" multiple="true"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="attach" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:attach:list" code="0x00000000:0x00000012"/>
+ <field name="name" type="string" mandatory="true"/>
+ <field name="handle" type="handle" mandatory="true"/>
+ <field name="role" type="role" mandatory="true"/>
+ <field name="snd-settle-mode" type="sender-settle-mode" default="mixed"/>
+ <field name="rcv-settle-mode" type="receiver-settle-mode" default="first"/>
+ <field name="source" type="*" requires="source"/>
+ <field name="target" type="*" requires="target"/>
+ <field name="unsettled" type="map"/>
+ <field name="incomplete-unsettled" type="boolean" default="false"/>
+ <field name="initial-delivery-count" type="sequence-no"/>
+ <field name="max-message-size" type="ulong"/>
+ <field name="offered-capabilities" type="symbol" multiple="true"/>
+ <field name="desired-capabilities" type="symbol" multiple="true"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="flow" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:flow:list" code="0x00000000:0x00000013"/>
+ <field name="next-incoming-id" type="transfer-number"/>
+ <field name="incoming-window" type="uint" mandatory="true"/>
+ <field name="next-outgoing-id" type="transfer-number" mandatory="true"/>
+ <field name="outgoing-window" type="uint" mandatory="true"/>
+ <field name="handle" type="handle"/>
+ <field name="delivery-count" type="sequence-no"/>
+ <field name="link-credit" type="uint"/>
+ <field name="available" type="uint"/>
+ <field name="drain" type="boolean" default="false"/>
+ <field name="echo" type="boolean" default="false"/>
+ <field name="properties" type="fields"/>
+ </type>
+ <type name="transfer" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:transfer:list" code="0x00000000:0x00000014"/>
+ <field name="handle" type="handle" mandatory="true"/>
+ <field name="delivery-id" type="delivery-number"/>
+ <field name="delivery-tag" type="delivery-tag"/>
+ <field name="message-format" type="message-format"/>
+ <field name="settled" type="boolean"/>
+ <field name="more" type="boolean" default="false"/>
+ <field name="rcv-settle-mode" type="receiver-settle-mode"/>
+ <field name="state" type="*" requires="delivery-state"/>
+ <field name="resume" type="boolean" default="false"/>
+ <field name="aborted" type="boolean" default="false"/>
+ <field name="batchable" type="boolean" default="false"/>
+ </type>
+ <type name="disposition" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:disposition:list" code="0x00000000:0x00000015"/>
+ <field name="role" type="role" mandatory="true"/>
+ <field name="first" type="delivery-number" mandatory="true"/>
+ <field name="last" type="delivery-number"/>
+ <field name="settled" type="boolean" default="false"/>
+ <field name="state" type="*" requires="delivery-state"/>
+ <field name="batchable" type="boolean" default="false"/>
+ </type>
+ <type name="detach" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:detach:list" code="0x00000000:0x00000016"/>
+ <field name="handle" type="handle" mandatory="true"/>
+ <field name="closed" type="boolean" default="false"/>
+ <field name="error" type="error"/>
+ </type>
+ <type name="end" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:end:list" code="0x00000000:0x00000017"/>
+ <field name="error" type="error"/>
+ </type>
+ <type name="close" class="composite" source="list" provides="frame">
+ <descriptor name="amqp:close:list" code="0x00000000:0x00000018"/>
+ <field name="error" type="error"/>
+ </type>
+ </section>
+ <section name="definitions">
+ <type name="role" class="restricted" source="boolean">
+ <choice name="sender" value="false"/>
+ <choice name="receiver" value="true"/>
+ </type>
+ <type name="sender-settle-mode" class="restricted" source="ubyte">
+ <choice name="unsettled" value="0"/>
+ <choice name="settled" value="1"/>
+ <choice name="mixed" value="2"/>
+ </type>
+ <type name="receiver-settle-mode" class="restricted" source="ubyte">
+ <choice name="first" value="0"/>
+ <choice name="second" value="1"/>
+ </type>
+ <type name="handle" class="restricted" source="uint"/>
+ <type name="seconds" class="restricted" source="uint"/>
+ <type name="milliseconds" class="restricted" source="uint"/>
+ <type name="delivery-tag" class="restricted" source="binary"/>
+ <type name="delivery-number" class="restricted" source="sequence-no"/>
+ <type name="transfer-number" class="restricted" source="sequence-no"/>
+ <type name="sequence-no" class="restricted" source="uint"/>
+ <type name="message-format" class="restricted" source="uint"/>
+ <type name="ietf-language-tag" class="restricted" source="symbol"/>
+ <type name="fields" class="restricted" source="map"/>
+ <type name="error" class="composite" source="list">
+ <descriptor name="amqp:error:list" code="0x00000000:0x0000001d"/>
+ <field name="condition" type="symbol" mandatory="true" requires="error-condition"/>
+ <field name="description" type="string"/>
+ <field name="info" type="fields"/>
+ </type>
+ <type name="amqp-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="internal-error" value="amqp:internal-error"/>
+ <choice name="not-found" value="amqp:not-found"/>
+ <choice name="unauthorized-access" value="amqp:unauthorized-access"/>
+ <choice name="decode-error" value="amqp:decode-error"/>
+ <choice name="resource-limit-exceeded" value="amqp:resource-limit-exceeded"/>
+ <choice name="not-allowed" value="amqp:not-allowed"/>
+ <choice name="invalid-field" value="amqp:invalid-field"/>
+ <choice name="not-implemented" value="amqp:not-implemented"/>
+ <choice name="resource-locked" value="amqp:resource-locked"/>
+ <choice name="precondition-failed" value="amqp:precondition-failed"/>
+ <choice name="resource-deleted" value="amqp:resource-deleted"/>
+ <choice name="illegal-state" value="amqp:illegal-state"/>
+ <choice name="frame-size-too-small" value="amqp:frame-size-too-small"/>
+ </type>
+ <type name="connection-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="connection-forced" value="amqp:connection:forced"/>
+ <choice name="framing-error" value="amqp:connection:framing-error"/>
+ <choice name="redirect" value="amqp:connection:redirect"/>
+ </type>
+ <type name="session-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="window-violation" value="amqp:session:window-violation"/>
+ <choice name="errant-link" value="amqp:session:errant-link"/>
+ <choice name="handle-in-use" value="amqp:session:handle-in-use"/>
+ <choice name="unattached-handle" value="amqp:session:unattached-handle"/>
+ </type>
+ <type name="link-error" class="restricted" source="symbol" provides="error-condition">
+ <choice name="detach-forced" value="amqp:link:detach-forced"/>
+ <choice name="transfer-limit-exceeded" value="amqp:link:transfer-limit-exceeded"/>
+ <choice name="message-size-exceeded" value="amqp:link:message-size-exceeded"/>
+ <choice name="redirect" value="amqp:link:redirect"/>
+ <choice name="stolen" value="amqp:link:stolen"/>
+ </type>
+ <definition name="PORT" value="5672"/>
+ <definition name="SECURE-PORT" value="5671"/>
+ <definition name="MAJOR" value="1"/>
+ <definition name="MINOR" value="0"/>
+ <definition name="REVISION" value="0"/>
+ <definition name="MIN-MAX-FRAME-SIZE" value="512"/>
+ </section>
+</amqp>
diff --git a/deps/rabbitmq_codegen/amqp-1.0/types.xml b/deps/rabbitmq_codegen/amqp-1.0/types.xml
new file mode 100644
index 0000000000..f91baa3d59
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-1.0/types.xml
@@ -0,0 +1,125 @@
+<?xml version="1.0"?>
+
+<!--
+Copyright Bank of America, N.A., Barclays Bank PLC, Cisco Systems, Credit
+Suisse, Deutsche Boerse, Envoy Technologies Inc., Goldman Sachs, HCL
+Technologies Ltd, IIT Software GmbH, iMatix Corporation, INETCO Systems Limited,
+Informatica Corporation, JPMorgan Chase & Co., Kaazing Corporation, N.A,
+Microsoft Corporation, my-Channels, Novell, Progress Software, Red Hat Inc.,
+Software AG, Solace Systems Inc., StormMQ Ltd., Tervela Inc., TWIST Process
+Innovations Ltd, GoPivotal, Inc., and WS02 Inc. 2006-2011. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<amqp name="types" xmlns="https://www.amqp.org/schema/amqp.xsd">
+ <section name="encodings">
+ <type name="null" class="primitive">
+ <encoding code="0x40" category="fixed" width="0"/>
+ </type>
+ <type name="boolean" class="primitive">
+ <encoding code="0x56" category="fixed" width="1"/>
+ <encoding name="true" code="0x41" category="fixed" width="0"/>
+ <encoding name="false" code="0x42" category="fixed" width="0"/>
+ </type>
+ <type name="ubyte" class="primitive">
+ <encoding code="0x50" category="fixed" width="1"/>
+ </type>
+ <type name="ushort" class="primitive">
+ <encoding code="0x60" category="fixed" width="2"/>
+ </type>
+ <type name="uint" class="primitive">
+ <encoding code="0x70" category="fixed" width="4"/>
+ <encoding name="smalluint" code="0x52" category="fixed" width="1"/>
+ <encoding name="uint0" code="0x43" category="fixed" width="0"/>
+ </type>
+ <type name="ulong" class="primitive">
+ <encoding code="0x80" category="fixed" width="8"/>
+ <encoding name="smallulong" code="0x53" category="fixed" width="1"/>
+ <encoding name="ulong0" code="0x44" category="fixed" width="0"/>
+ </type>
+ <type name="byte" class="primitive">
+ <encoding code="0x51" category="fixed" width="1"/>
+ </type>
+ <type name="short" class="primitive">
+ <encoding code="0x61" category="fixed" width="2"/>
+ </type>
+ <type name="int" class="primitive">
+ <encoding code="0x71" category="fixed" width="4"/>
+ <encoding name="smallint" code="0x54" category="fixed" width="1"/>
+ </type>
+ <type name="long" class="primitive">
+ <encoding code="0x81" category="fixed" width="8"/>
+ <encoding name="smalllong" code="0x55" category="fixed" width="1"/>
+ </type>
+ <type name="float" class="primitive">
+ <encoding name="ieee-754" code="0x72" category="fixed" width="4"/>
+ </type>
+ <type name="double" class="primitive">
+ <encoding name="ieee-754" code="0x82" category="fixed" width="8"/>
+ </type>
+ <type name="decimal32" class="primitive">
+ <encoding name="ieee-754" code="0x74" category="fixed" width="4"/>
+ </type>
+ <type name="decimal64" class="primitive">
+ <encoding name="ieee-754" code="0x84" category="fixed" width="8"/>
+ </type>
+ <type name="decimal128" class="primitive">
+ <encoding name="ieee-754" code="0x94" category="fixed" width="16"/>
+ </type>
+ <type name="char" class="primitive">
+ <encoding name="utf32" code="0x73" category="fixed" width="4"/>
+ </type>
+ <type name="timestamp" class="primitive">
+ <encoding name="ms64" code="0x83" category="fixed" width="8"/>
+ </type>
+ <type name="uuid" class="primitive">
+ <encoding code="0x98" category="fixed" width="16"/>
+ </type>
+ <type name="binary" class="primitive">
+ <encoding name="vbin8" code="0xa0" category="variable" width="1"/>
+ <encoding name="vbin32" code="0xb0" category="variable" width="4"/>
+ </type>
+ <type name="string" class="primitive">
+ <encoding name="str8-utf8" code="0xa1" category="variable" width="1"/>
+ <encoding name="str32-utf8" code="0xb1" category="variable" width="4"/>
+ </type>
+ <type name="symbol" class="primitive">
+ <encoding name="sym8" code="0xa3" category="variable" width="1"/>
+ <encoding name="sym32" code="0xb3" category="variable" width="4"/>
+ </type>
+ <type name="list" class="primitive">
+ <encoding name="list0" code="0x45" category="fixed" width="0"/>
+ <encoding name="list8" code="0xc0" category="compound" width="1"/>
+ <encoding name="list32" code="0xd0" category="compound" width="4"/>
+ </type>
+ <type name="map" class="primitive">
+ <encoding name="map8" code="0xc1" category="compound" width="1"/>
+ <encoding name="map32" code="0xd1" category="compound" width="4"/>
+ </type>
+ <type name="array" class="primitive">
+ <encoding name="array8" code="0xe0" category="array" width="1"/>
+ <encoding name="array32" code="0xf0" category="array" width="4"/>
+ </type>
+ </section>
+</amqp>
diff --git a/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json b/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json
new file mode 100644
index 0000000000..c0b30a5d44
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-rabbitmq-0.8.json
@@ -0,0 +1,659 @@
+{
+ "name": "AMQP",
+ "major-version": 8,
+ "minor-version": 0,
+ "port": 5672,
+ "copyright": [
+ "Copyright (C) 2008-2020 VMware, Inc. or its affiliates.\n",
+ "\n",
+ "Permission is hereby granted, free of charge, to any person\n",
+ "obtaining a copy of this file (the \"Software\"), to deal in the\n",
+ "Software without restriction, including without limitation the \n",
+ "rights to use, copy, modify, merge, publish, distribute, \n",
+ "sublicense, and/or sell copies of the Software, and to permit \n",
+ "persons to whom the Software is furnished to do so, subject to \n",
+ "the following conditions:\n",
+ "\n",
+ "The above copyright notice and this permission notice shall be\n",
+ "included in all copies or substantial portions of the Software.\n",
+ "\n",
+ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
+ "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n",
+ "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
+ "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n",
+ "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n",
+ "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
+ "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n",
+ "OTHER DEALINGS IN THE SOFTWARE.\n",
+ "\n",
+ "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n",
+ "\n",
+ "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n",
+ "http://twiststandards.org/?option=com_docman&task=cat_view&gid=28&Itemid=90\n",
+ "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n",
+ "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"],
+
+ "domains": [
+ ["access-ticket", "short"],
+ ["bit", "bit"],
+ ["channel-id", "longstr"],
+ ["class-id", "short"],
+ ["consumer-tag", "shortstr"],
+ ["delivery-tag", "longlong"],
+ ["destination", "shortstr"],
+ ["duration", "longlong"],
+ ["exchange-name", "shortstr"],
+ ["known-hosts", "shortstr"],
+ ["long", "long"],
+ ["longlong", "longlong"],
+ ["longstr", "longstr"],
+ ["method-id", "short"],
+ ["no-ack", "bit"],
+ ["no-local", "bit"],
+ ["octet", "octet"],
+ ["offset", "longlong"],
+ ["path", "shortstr"],
+ ["peer-properties", "table"],
+ ["queue-name", "shortstr"],
+ ["redelivered", "bit"],
+ ["reference", "longstr"],
+ ["reject-code", "short"],
+ ["reject-text", "shortstr"],
+ ["reply-code", "short"],
+ ["reply-text", "shortstr"],
+ ["security-token", "longstr"],
+ ["short", "short"],
+ ["shortstr", "shortstr"],
+ ["table", "table"],
+ ["timestamp", "timestamp"]
+ ],
+
+ "constants": [
+ {"name": "FRAME-METHOD", "value": 1},
+ {"name": "FRAME-HEADER", "value": 2},
+ {"name": "FRAME-BODY", "value": 3},
+ {"name": "FRAME-OOB-METHOD", "value": 4},
+ {"name": "FRAME-OOB-HEADER", "value": 5},
+ {"name": "FRAME-OOB-BODY", "value": 6},
+ {"name": "FRAME-TRACE", "value": 7},
+ {"name": "FRAME-HEARTBEAT", "value": 8},
+ {"name": "FRAME-MIN-SIZE", "value": 4096},
+ {"name": "FRAME-END", "value": 206},
+ {"name": "REPLY-SUCCESS", "value": 200},
+ {"name": "NOT-DELIVERED", "value": 310, "class": "soft-error"},
+ {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"},
+ {"name": "NO-ROUTE", "value": 312, "class": "soft-error"},
+ {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"},
+ {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"},
+ {"name": "NOT-FOUND", "value": 404, "class": "soft-error"},
+ {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"},
+ {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"},
+ {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"},
+ {"name": "INVALID-PATH", "value": 402, "class": "hard-error"},
+ {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"},
+ {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"},
+ {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"},
+ {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"},
+ {"name": "UNEXPECTED-FRAME", "value": 505, "class": "hard-error"},
+ {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"},
+ {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"},
+ {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"},
+ {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"}
+ ],
+
+ "classes": [
+ {
+ "id": 10,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "octet", "name": "version-major", "default-value": 0},
+ {"type": "octet", "name": "version-minor", "default-value": 8},
+ {"domain": "peer-properties", "name": "server-properties"},
+ {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "locales", "default-value": "en_US"}],
+ "name": "start",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"domain": "peer-properties", "name": "client-properties"},
+ {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "response"},
+ {"type": "shortstr", "name": "locale", "default-value": "en_US"}],
+ "name": "start-ok"},
+ {"id": 20,
+ "arguments": [{"type": "longstr", "name": "challenge"}],
+ "name": "secure",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "longstr", "name": "response"}],
+ "name": "secure-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune-ok"},
+ {"id": 40,
+ "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"},
+ {"type": "shortstr", "name": "capabilities", "default-value": ""},
+ {"type": "bit", "name": "insist", "default-value": false}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}],
+ "name": "open-ok"},
+ {"id": 50,
+ "arguments": [{"type": "shortstr", "name": "host"},
+ {"type": "shortstr", "name": "known-hosts", "default-value": ""}],
+ "name": "redirect"},
+ {"id": 60,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 61,
+ "arguments": [],
+ "name": "close-ok"}],
+ "name": "connection",
+ "properties": []
+ },
+ {
+ "id": 20,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "open-ok"},
+ {"id": 20,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "table", "name": "details", "default-value": {}}],
+ "name": "alert"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [],
+ "name": "close-ok"}],
+ "name": "channel"
+ },
+ {
+ "id": 30,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "passive", "default-value": true},
+ {"type": "bit", "name": "active", "default-value": true},
+ {"type": "bit", "name": "write", "default-value": true},
+ {"type": "bit", "name": "read", "default-value": true}],
+ "name": "request",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1}],
+ "name": "request-ok"}],
+ "name": "access"
+ },
+ {
+ "id": 40,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "type", "default-value": "direct"},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "internal", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "delete-ok"}],
+ "name": "exchange"
+ },
+ {
+ "id": 50,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "shortstr", "name": "queue"},
+ {"type": "long", "name": "message-count"},
+ {"type": "long", "name": "consumer-count"}],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "bind",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "bind-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "purge",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "long", "name": "message-count"}],
+ "name": "purge-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "if-empty", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "long", "name": "message-count"}],
+ "name": "delete-ok"},
+ {"id": 50,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "unbind",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "unbind-ok"}
+ ],
+ "name": "queue"
+ },
+ {
+ "id": 60,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"domain": "access-ticket", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "no-ack", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false}],
+ "name": "publish"},
+ {"content": true,
+ "id": 50,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"content": true,
+ "id": 60,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "deliver"},
+ {"id": 70,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "no-ack", "default-value": false}],
+ "name": "get",
+ "synchronous" : true},
+ {"content": true,
+ "id": 71,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"},
+ {"type": "long", "name": "message-count"}],
+ "name": "get-ok"},
+ {"id": 72,
+ "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}],
+ "name": "get-empty"},
+ {"id": 80,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false}],
+ "name": "ack"},
+ {"id": 90,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "reject"},
+ {"id": 100,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover-async"},
+ {"id": 110,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover",
+ "synchronous" : true},
+ {"id": 111,
+ "arguments": [],
+ "name": "recover-ok"}],
+ "name": "basic",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "delivery-mode"},
+ {"type": "octet", "name": "priority"},
+ {"type": "shortstr", "name": "correlation-id"},
+ {"type": "shortstr", "name": "reply-to"},
+ {"type": "shortstr", "name": "expiration"},
+ {"type": "shortstr", "name": "message-id"},
+ {"type": "timestamp", "name": "timestamp"},
+ {"type": "shortstr", "name": "type"},
+ {"type": "shortstr", "name": "user-id"},
+ {"type": "shortstr", "name": "app-id"},
+ {"type": "shortstr", "name": "cluster-id"}]
+ },
+ {
+ "id": 70,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "no-ack", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"id": 40,
+ "arguments": [{"type": "shortstr", "name": "identifier"},
+ {"type": "longlong", "name": "content-size"}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "longlong", "name": "staged-size"}],
+ "name": "open-ok"},
+ {"content": true,
+ "id": 50,
+ "arguments": [],
+ "name": "stage"},
+ {"id": 60,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false},
+ {"type": "shortstr", "name": "identifier"}],
+ "name": "publish"},
+ {"content": true,
+ "id": 70,
+ "arguments": [{"type": "short", "name": "reply-code", "default-value": 200},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"id": 80,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"},
+ {"type": "shortstr", "name": "identifier"}],
+ "name": "deliver"},
+ {"id": 90,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false}],
+ "name": "ack"},
+ {"id": 100,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "reject"}],
+ "name": "file",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "priority"},
+ {"type": "shortstr", "name": "reply-to"},
+ {"type": "shortstr", "name": "message-id"},
+ {"type": "shortstr", "name": "filename"},
+ {"type": "timestamp", "name": "timestamp"},
+ {"type": "shortstr", "name": "cluster-id"}]
+ },
+ {
+ "id": 80,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "long", "name": "consume-rate", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1},
+ {"type": "shortstr", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false}],
+ "name": "publish"},
+ {"content": true,
+ "id": 50,
+ "arguments": [{"type": "short", "name": "reply-code", "default-value": 200},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"content": true,
+ "id": 60,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "shortstr", "name": "exchange"},
+ {"type": "shortstr", "name": "queue"}],
+ "name": "deliver"}],
+ "name": "stream",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "priority"},
+ {"type": "timestamp", "name": "timestamp"}]
+ },
+ {
+ "id": 90,
+ "methods": [{"id": 10,
+ "arguments": [],
+ "name": "select",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"},
+ {"id": 20,
+ "arguments": [],
+ "name": "commit",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "commit-ok"},
+ {"id": 30,
+ "arguments": [],
+ "name": "rollback",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [],
+ "name": "rollback-ok"}],
+ "name": "tx"
+ },
+ {
+ "id": 100,
+ "methods": [{"id": 10,
+ "arguments": [],
+ "name": "select",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"},
+ {"id": 20,
+ "arguments": [{"type": "shortstr", "name": "dtx-identifier"}],
+ "name": "start",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [], "name": "start-ok"}],
+ "name": "dtx"
+ },
+ {
+ "id": 110,
+ "methods": [{"content": true,
+ "id": 10,
+ "arguments": [{"type": "table", "name": "meta-data"}],
+ "name": "request"}],
+ "name": "tunnel",
+ "properties": [{"type": "table", "name": "headers"},
+ {"type": "shortstr", "name": "proxy-name"},
+ {"type": "shortstr", "name": "data-name"},
+ {"type": "octet", "name": "durable"},
+ {"type": "octet", "name": "broadcast"}]
+ },
+ {
+ "id": 120,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "octet", "name": "integer-1"},
+ {"type": "short", "name": "integer-2"},
+ {"type": "long", "name": "integer-3"},
+ {"type": "longlong", "name": "integer-4"},
+ {"type": "octet", "name": "operation"}],
+ "name": "integer",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "longlong", "name": "result"}],
+ "name": "integer-ok"},
+ {"id": 20,
+ "arguments": [{"type": "shortstr", "name": "string-1"},
+ {"type": "longstr", "name": "string-2"},
+ {"type": "octet", "name": "operation"}],
+ "name": "string",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "longstr", "name": "result"}],
+ "name": "string-ok"},
+ {"id": 30,
+ "arguments": [{"type": "table", "name": "table"},
+ {"type": "octet", "name": "integer-op"},
+ {"type": "octet", "name": "string-op"}],
+ "name": "table",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "longlong", "name": "integer-result"},
+ {"type": "longstr", "name": "string-result"}],
+ "name": "table-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [],
+ "name": "content",
+ "synchronous" : true},
+ {"content": true,
+ "id": 41,
+ "arguments": [{"type": "long", "name": "content-checksum"}],
+ "name": "content-ok"}],
+ "name": "test"
+ }
+ ]
+}
diff --git a/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json b/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json
new file mode 100644
index 0000000000..329d63ee97
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp-rabbitmq-0.9.1.json
@@ -0,0 +1,483 @@
+{
+ "name": "AMQP",
+ "major-version": 0,
+ "minor-version": 9,
+ "revision": 1,
+ "port": 5672,
+ "copyright": [
+ "Copyright (C) 2008-2020 VMware, Inc. or its affiliates.\n",
+ "\n",
+ "Permission is hereby granted, free of charge, to any person\n",
+ "obtaining a copy of this file (the \"Software\"), to deal in the\n",
+ "Software without restriction, including without limitation the \n",
+ "rights to use, copy, modify, merge, publish, distribute, \n",
+ "sublicense, and/or sell copies of the Software, and to permit \n",
+ "persons to whom the Software is furnished to do so, subject to \n",
+ "the following conditions:\n",
+ "\n",
+ "The above copyright notice and this permission notice shall be\n",
+ "included in all copies or substantial portions of the Software.\n",
+ "\n",
+ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
+ "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n",
+ "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
+ "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n",
+ "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n",
+ "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
+ "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n",
+ "OTHER DEALINGS IN THE SOFTWARE.\n",
+ "\n",
+ "Class information entered from amqp_xml0-8.pdf and domain types from amqp-xml-doc0-9.pdf\n",
+ "Updated for 0-9-1 by Tony Garnock-Jones\n",
+ "\n",
+ "b3cb053f15e7b98808c0ccc67f23cb3e amqp_xml0-8.pdf\n",
+ "http://twiststandards.org/?option=com_docman&task=cat_view&gid=28&Itemid=90\n",
+ "8444db91e2949dbecfb2585e9eef6d64 amqp-xml-doc0-9.pdf\n",
+ "https://jira.amqp.org/confluence/download/attachments/720900/amqp-xml-doc0-9.pdf?version=1\n"],
+
+ "domains": [
+ ["bit", "bit"],
+ ["channel-id", "longstr"],
+ ["class-id", "short"],
+ ["consumer-tag", "shortstr"],
+ ["delivery-tag", "longlong"],
+ ["destination", "shortstr"],
+ ["duration", "longlong"],
+ ["exchange-name", "shortstr"],
+ ["long", "long"],
+ ["longlong", "longlong"],
+ ["longstr", "longstr"],
+ ["message-count", "long"],
+ ["method-id", "short"],
+ ["no-ack", "bit"],
+ ["no-local", "bit"],
+ ["octet", "octet"],
+ ["offset", "longlong"],
+ ["path", "shortstr"],
+ ["peer-properties", "table"],
+ ["queue-name", "shortstr"],
+ ["redelivered", "bit"],
+ ["reference", "longstr"],
+ ["reject-code", "short"],
+ ["reject-text", "shortstr"],
+ ["reply-code", "short"],
+ ["reply-text", "shortstr"],
+ ["security-token", "longstr"],
+ ["short", "short"],
+ ["shortstr", "shortstr"],
+ ["table", "table"],
+ ["timestamp", "timestamp"]
+ ],
+
+ "constants": [
+ {"name": "FRAME-METHOD", "value": 1},
+ {"name": "FRAME-HEADER", "value": 2},
+ {"name": "FRAME-BODY", "value": 3},
+ {"name": "FRAME-HEARTBEAT", "value": 8},
+ {"name": "FRAME-MIN-SIZE", "value": 4096},
+ {"name": "FRAME-END", "value": 206},
+ {"name": "REPLY-SUCCESS", "value": 200},
+ {"name": "CONTENT-TOO-LARGE", "value": 311, "class": "soft-error"},
+ {"name": "NO-ROUTE", "value": 312, "class": "soft-error"},
+ {"name": "NO-CONSUMERS", "value": 313, "class": "soft-error"},
+ {"name": "ACCESS-REFUSED", "value": 403, "class": "soft-error"},
+ {"name": "NOT-FOUND", "value": 404, "class": "soft-error"},
+ {"name": "RESOURCE-LOCKED", "value": 405, "class": "soft-error"},
+ {"name": "PRECONDITION-FAILED", "value": 406, "class": "soft-error"},
+ {"name": "CONNECTION-FORCED", "value": 320, "class": "hard-error"},
+ {"name": "INVALID-PATH", "value": 402, "class": "hard-error"},
+ {"name": "FRAME-ERROR", "value": 501, "class": "hard-error"},
+ {"name": "SYNTAX-ERROR", "value": 502, "class": "hard-error"},
+ {"name": "COMMAND-INVALID", "value": 503, "class": "hard-error"},
+ {"name": "CHANNEL-ERROR", "value": 504, "class": "hard-error"},
+ {"name": "UNEXPECTED-FRAME", "value": 505, "class": "hard-error"},
+ {"name": "RESOURCE-ERROR", "value": 506, "class": "hard-error"},
+ {"name": "NOT-ALLOWED", "value": 530, "class": "hard-error"},
+ {"name": "NOT-IMPLEMENTED", "value": 540, "class": "hard-error"},
+ {"name": "INTERNAL-ERROR", "value": 541, "class": "hard-error"}
+ ],
+
+ "classes": [
+ {
+ "id": 10,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "octet", "name": "version-major", "default-value": 0},
+ {"type": "octet", "name": "version-minor", "default-value": 9},
+ {"domain": "peer-properties", "name": "server-properties"},
+ {"type": "longstr", "name": "mechanisms", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "locales", "default-value": "en_US"}],
+ "name": "start",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"domain": "peer-properties", "name": "client-properties"},
+ {"type": "shortstr", "name": "mechanism", "default-value": "PLAIN"},
+ {"type": "longstr", "name": "response"},
+ {"type": "shortstr", "name": "locale", "default-value": "en_US"}],
+ "name": "start-ok"},
+ {"id": 20,
+ "arguments": [{"type": "longstr", "name": "challenge"}],
+ "name": "secure",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "longstr", "name": "response"}],
+ "name": "secure-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "short", "name": "channel-max", "default-value": 0},
+ {"type": "long", "name": "frame-max", "default-value": 0},
+ {"type": "short", "name": "heartbeat", "default-value": 0}],
+ "name": "tune-ok"},
+ {"id": 40,
+ "arguments": [{"type": "shortstr", "name": "virtual-host", "default-value": "/"},
+ {"type": "shortstr", "name": "capabilities", "default-value": ""},
+ {"type": "bit", "name": "insist", "default-value": false}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"type": "shortstr", "name": "known-hosts", "default-value": ""}],
+ "name": "open-ok"},
+ {"id": 50,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "close-ok"},
+ {"id": 60,
+ "arguments": [{"type": "shortstr", "name": "reason", "default-value": ""}],
+ "name": "blocked"},
+ {"id": 61,
+ "arguments": [],
+ "name": "unblocked"},
+ {"id": 70,
+ "arguments": [{"type": "longstr", "name": "new-secret"},
+ {"type": "shortstr", "name": "reason"}],
+ "name": "update-secret",
+ "synchronous" : true},
+ {"id": 71,
+ "arguments": [],
+ "name": "update-secret-ok"}
+ ],
+ "name": "connection",
+ "properties": []
+ },
+ {
+ "id": 20,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "out-of-band", "default-value": ""}],
+ "name": "open",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "longstr", "name": "channel-id", "default-value": ""}],
+ "name": "open-ok"},
+ {"id": 20,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "bit", "name": "active"}],
+ "name": "flow-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"type": "short", "name": "class-id"},
+ {"type": "short", "name": "method-id"}],
+ "name": "close",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [],
+ "name": "close-ok"}],
+ "name": "channel"
+ },
+ {
+ "id": 30,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "shortstr", "name": "realm", "default-value": "/data"},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "passive", "default-value": true},
+ {"type": "bit", "name": "active", "default-value": true},
+ {"type": "bit", "name": "write", "default-value": true},
+ {"type": "bit", "name": "read", "default-value": true}],
+ "name": "request",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 1}],
+ "name": "request-ok"}],
+ "name": "access"
+ },
+ {
+ "id": 40,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "shortstr", "name": "type", "default-value": "direct"},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "internal", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "delete-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "exchange-name", "name": "destination"},
+ {"domain": "exchange-name", "name": "source"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "bind",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [],
+ "name": "bind-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "exchange-name", "name": "destination"},
+ {"domain": "exchange-name", "name": "source"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "unbind",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "unbind-ok"}],
+ "name": "exchange"
+ },
+ {
+ "id": 50,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "passive", "default-value": false},
+ {"type": "bit", "name": "durable", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "auto-delete", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "declare",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [{"domain": "queue-name", "name": "queue"},
+ {"domain": "message-count", "name": "message-count"},
+ {"type": "long", "name": "consumer-count"}],
+ "name": "declare-ok"},
+ {"id": 20,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "bind",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "bind-ok"},
+ {"id": 30,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "purge",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"domain": "message-count", "name": "message-count"}],
+ "name": "purge-ok"},
+ {"id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "if-unused", "default-value": false},
+ {"type": "bit", "name": "if-empty", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "delete",
+ "synchronous" : true},
+ {"id": 41,
+ "arguments": [{"domain": "message-count", "name": "message-count"}],
+ "name": "delete-ok"},
+ {"id": 50,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "unbind",
+ "synchronous" : true},
+ {"id": 51,
+ "arguments": [],
+ "name": "unbind-ok"}
+ ],
+ "name": "queue"
+ },
+ {
+ "id": 60,
+ "methods": [{"id": 10,
+ "arguments": [{"type": "long", "name": "prefetch-size", "default-value": 0},
+ {"type": "short", "name": "prefetch-count", "default-value": 0},
+ {"type": "bit", "name": "global", "default-value": false}],
+ "name": "qos",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "qos-ok"},
+ {"id": 20,
+ "arguments": [{"domain": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "bit", "name": "no-local", "default-value": false},
+ {"type": "bit", "name": "no-ack", "default-value": false},
+ {"type": "bit", "name": "exclusive", "default-value": false},
+ {"type": "bit", "name": "nowait", "default-value": false},
+ {"type": "table", "name": "arguments", "default-value": {}}],
+ "name": "consume",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "consume-ok"},
+ {"id": 30,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "cancel",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"}],
+ "name": "cancel-ok"},
+ {"content": true,
+ "id": 40,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "exchange-name", "name": "exchange", "default-value": ""},
+ {"type": "shortstr", "name": "routing-key", "default-value": ""},
+ {"type": "bit", "name": "mandatory", "default-value": false},
+ {"type": "bit", "name": "immediate", "default-value": false}],
+ "name": "publish"},
+ {"content": true,
+ "id": 50,
+ "arguments": [{"type": "short", "name": "reply-code"},
+ {"type": "shortstr", "name": "reply-text", "default-value": ""},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "return"},
+ {"content": true,
+ "id": 60,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag"},
+ {"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"}],
+ "name": "deliver"},
+ {"id": 70,
+ "arguments": [{"type": "short", "name": "ticket", "default-value": 0},
+ {"domain": "queue-name", "name": "queue", "default-value": ""},
+ {"type": "bit", "name": "no-ack", "default-value": false}],
+ "name": "get",
+ "synchronous" : true},
+ {"content": true,
+ "id": 71,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "redelivered", "default-value": false},
+ {"domain": "exchange-name", "name": "exchange"},
+ {"type": "shortstr", "name": "routing-key"},
+ {"domain": "message-count", "name": "message-count"}],
+ "name": "get-ok"},
+ {"id": 72,
+ "arguments": [{"type": "shortstr", "name": "cluster-id", "default-value": ""}],
+ "name": "get-empty"},
+ {"id": 80,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false}],
+ "name": "ack"},
+ {"id": 90,
+ "arguments": [{"type": "longlong", "name": "delivery-tag"},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "reject"},
+ {"id": 100,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover-async"},
+ {"id": 110,
+ "arguments": [{"type": "bit", "name": "requeue", "default-value": false}],
+ "name": "recover",
+ "synchronous" : true},
+ {"id": 111,
+ "arguments": [],
+ "name": "recover-ok"},
+ {"id": 120,
+ "arguments": [{"type": "longlong", "name": "delivery-tag", "default-value": 0},
+ {"type": "bit", "name": "multiple", "default-value": false},
+ {"type": "bit", "name": "requeue", "default-value": true}],
+ "name": "nack"}],
+ "name": "basic",
+ "properties": [{"type": "shortstr", "name": "content-type"},
+ {"type": "shortstr", "name": "content-encoding"},
+ {"type": "table", "name": "headers"},
+ {"type": "octet", "name": "delivery-mode"},
+ {"type": "octet", "name": "priority"},
+ {"type": "shortstr", "name": "correlation-id"},
+ {"type": "shortstr", "name": "reply-to"},
+ {"type": "shortstr", "name": "expiration"},
+ {"type": "shortstr", "name": "message-id"},
+ {"type": "timestamp", "name": "timestamp"},
+ {"type": "shortstr", "name": "type"},
+ {"type": "shortstr", "name": "user-id"},
+ {"type": "shortstr", "name": "app-id"},
+ {"type": "shortstr", "name": "cluster-id"}]
+ },
+ {
+ "id": 90,
+ "methods": [{"id": 10,
+ "arguments": [],
+ "name": "select",
+ "synchronous" : true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"},
+ {"id": 20,
+ "arguments": [],
+ "name": "commit",
+ "synchronous" : true},
+ {"id": 21,
+ "arguments": [],
+ "name": "commit-ok"},
+ {"id": 30,
+ "arguments": [],
+ "name": "rollback",
+ "synchronous" : true},
+ {"id": 31,
+ "arguments": [],
+ "name": "rollback-ok"}],
+ "name": "tx"
+ },
+ {
+ "id": 85,
+ "methods": [{"id": 10,
+ "arguments": [
+ {"type": "bit", "name": "nowait", "default-value": false}],
+ "name": "select",
+ "synchronous": true},
+ {"id": 11,
+ "arguments": [],
+ "name": "select-ok"}],
+ "name": "confirm"
+ }
+ ]
+}
diff --git a/deps/rabbitmq_codegen/amqp_codegen.py b/deps/rabbitmq_codegen/amqp_codegen.py
new file mode 100644
index 0000000000..39fb825239
--- /dev/null
+++ b/deps/rabbitmq_codegen/amqp_codegen.py
@@ -0,0 +1,287 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+from __future__ import nested_scopes, print_function
+import errno
+import re
+import sys
+import os
+from optparse import OptionParser
+
+try:
+ try:
+ import simplejson as json
+ except ImportError as e:
+ if sys.hexversion >= 0x20600f0:
+ import json
+ else:
+ raise e
+except ImportError:
+ print(" You don't appear to have simplejson.py installed", file = sys.stderr)
+ print(" (an implementation of a JSON reader and writer in Python).", file = sys.stderr)
+ print(" You can install it:", file = sys.stderr)
+ print(" - by running 'apt-get install python-simplejson' on Debian-based systems,", file = sys.stderr)
+ print(" - by running 'yum install python-simplejson' on Fedora/Red Hat system,", file = sys.stderr)
+ print(" - by running 'port install py25-simplejson' on Macports on OS X", file = sys.stderr)
+ print(" (you may need to say 'make PYTHON=python2.5', as well),", file = sys.stderr)
+ print(" - from sources from 'https://pypi.python.org/pypi/simplejson'", file = sys.stderr)
+ print(" - simplejson is a standard json library in the Python core since 2.6", file = sys.stderr)
+ sys.exit(1)
+
+def insert_base_types(d):
+ for t in ['octet', 'shortstr', 'longstr', 'short', 'long',
+ 'longlong', 'bit', 'table', 'timestamp']:
+ d[t] = t
+
+class AmqpSpecFileMergeConflict(Exception): pass
+
+# If ignore_conflicts is true, then we allow acc and new to conflict,
+# with whatever's already in acc winning and new being ignored. If
+# ignore_conflicts is false, acc and new must not conflict.
+
+def default_spec_value_merger(key, acc, new, ignore_conflicts):
+ if acc is None or acc == new or ignore_conflicts:
+ return new
+ else:
+ raise AmqpSpecFileMergeConflict(key, acc, new)
+
+def extension_info_merger(key, acc, new, ignore_conflicts):
+ return acc + [new]
+
+def domains_merger(key, acc, new, ignore_conflicts):
+ merged = dict((k, v) for [k, v] in acc)
+ for [k, v] in new:
+ if k in merged:
+ if not ignore_conflicts:
+ raise AmqpSpecFileMergeConflict(key, acc, new)
+ else:
+ merged[k] = v
+
+ return [[k, v] for (k, v) in merged.items()]
+
+def merge_dict_lists_by(dict_key, acc, new, ignore_conflicts):
+ acc_index = set(v[dict_key] for v in acc)
+ result = list(acc) # shallow copy
+ for v in new:
+ if v[dict_key] in acc_index:
+ if not ignore_conflicts:
+ raise AmqpSpecFileMergeConflict(description, acc, new)
+ else:
+ result.append(v)
+ return result
+
+def constants_merger(key, acc, new, ignore_conflicts):
+ return merge_dict_lists_by("name", acc, new, ignore_conflicts)
+
+def methods_merger(classname, acc, new, ignore_conflicts):
+ return merge_dict_lists_by("name", acc, new, ignore_conflicts)
+
+def properties_merger(classname, acc, new, ignore_conflicts):
+ return merge_dict_lists_by("name", acc, new, ignore_conflicts)
+
+def class_merger(acc, new, ignore_conflicts):
+ acc["methods"] = methods_merger(acc["name"],
+ acc["methods"],
+ new["methods"],
+ ignore_conflicts)
+ acc["properties"] = properties_merger(acc["name"],
+ acc.get("properties", []),
+ new.get("properties", []),
+ ignore_conflicts)
+
+def classes_merger(key, acc, new, ignore_conflicts):
+ acc_dict = dict((v["name"], v) for v in acc)
+ result = list(acc) # shallow copy
+ for w in new:
+ if w["name"] in acc_dict:
+ class_merger(acc_dict[w["name"]], w, ignore_conflicts)
+ else:
+ result.append(w)
+ return result
+
+mergers = {
+ "extension": (extension_info_merger, []),
+ "domains": (domains_merger, []),
+ "constants": (constants_merger, []),
+ "classes": (classes_merger, []),
+}
+
+def merge_load_specs(filenames, ignore_conflicts):
+ handles = [open(filename) for filename in filenames]
+ docs = [json.load(handle) for handle in handles]
+ spec = {}
+ for doc in docs:
+ for (key, value) in doc.items():
+ (merger, default_value) = mergers.get(key, (default_spec_value_merger, None))
+ spec[key] = merger(key, spec.get(key, default_value), value, ignore_conflicts)
+ for handle in handles: handle.close()
+ return spec
+
+class AmqpSpec:
+ # Slight wart: use a class member rather than change the ctor signature
+ # to avoid breaking everyone else's code.
+ ignore_conflicts = False
+
+ def __init__(self, filenames):
+ self.spec = merge_load_specs(filenames, AmqpSpec.ignore_conflicts)
+
+ self.major = self.spec['major-version']
+ self.minor = self.spec['minor-version']
+ self.revision = ('revision' in self.spec) and (self.spec['revision'] or 0)
+ self.port = self.spec['port']
+
+ self.domains = {}
+ insert_base_types(self.domains)
+ for entry in self.spec['domains']:
+ self.domains[ entry[0] ] = entry[1]
+
+ self.constants = []
+ for d in self.spec['constants']:
+ if 'class' in d:
+ klass = d['class']
+ else:
+ klass = ''
+ self.constants.append((d['name'], d['value'], klass))
+
+ self.classes = []
+ for element in self.spec['classes']:
+ self.classes.append(AmqpClass(self, element))
+
+ def allClasses(self):
+ return self.classes
+
+ def allMethods(self):
+ return [m for c in self.classes for m in c.allMethods()]
+
+ def resolveDomain(self, n):
+ return self.domains[n]
+
+class AmqpEntity:
+ def __init__(self, element):
+ self.element = element
+ self.name = element['name']
+
+class AmqpClass(AmqpEntity):
+ def __init__(self, spec, element):
+ AmqpEntity.__init__(self, element)
+ self.spec = spec
+ self.index = int(self.element['id'])
+
+ self.methods = []
+ for method_element in self.element['methods']:
+ self.methods.append(AmqpMethod(self, method_element))
+
+ self.hasContentProperties = False
+ for method in self.methods:
+ if method.hasContent:
+ self.hasContentProperties = True
+ break
+
+ self.fields = []
+ if 'properties' in self.element:
+ index = 0
+ for e in self.element['properties']:
+ self.fields.append(AmqpField(self, e, index))
+ index = index + 1
+
+ def allMethods(self):
+ return self.methods
+
+ def __repr__(self):
+ return 'AmqpClass("' + self.name + '")'
+
+class AmqpMethod(AmqpEntity):
+ def __init__(self, klass, element):
+ AmqpEntity.__init__(self, element)
+ self.klass = klass
+ self.index = int(self.element['id'])
+ if 'synchronous' in self.element:
+ self.isSynchronous = self.element['synchronous']
+ else:
+ self.isSynchronous = False
+ if 'content' in self.element:
+ self.hasContent = self.element['content']
+ else:
+ self.hasContent = False
+ self.arguments = []
+
+ index = 0
+ for argument in element['arguments']:
+ self.arguments.append(AmqpField(self, argument, index))
+ index = index + 1
+
+ def __repr__(self):
+ return 'AmqpMethod("' + self.klass.name + "." + self.name + '" ' + repr(self.arguments) + ')'
+
+class AmqpField(AmqpEntity):
+ def __init__(self, method, element, index):
+ AmqpEntity.__init__(self, element)
+ self.method = method
+ self.index = index
+
+ if 'type' in self.element:
+ self.domain = self.element['type']
+ else:
+ self.domain = self.element['domain']
+ if 'default-value' in self.element:
+ self.defaultvalue = self.element['default-value']
+ else:
+ self.defaultvalue = None
+
+ def __repr__(self):
+ return 'AmqpField("' + self.name + '")'
+
+def do_main(header_fn, body_fn):
+ do_main_dict({"header": header_fn, "body": body_fn})
+
+def do_main_dict(funcDict):
+ def usage():
+ print("Usage:", file = sys.stderr)
+ print(" {0} <function> <path_to_amqp_spec.json>... <path_to_output_file>".format(sys.argv[0]), file = sys.stderr)
+ print(" where <function> is one of: {0}".format(", ".join([k for k in funcDict.keys()])), file = sys.stderr)
+
+ def mkdir_p(path):
+ try:
+ os.makedirs(path)
+ except OSError as exc: # Python >2.5
+ if exc.errno == errno.EEXIST and os.path.isdir(path):
+ pass
+ else:
+ raise
+
+ def execute(fn, amqp_specs, out_file):
+ stdout = sys.stdout
+ mkdir_p(os.path.dirname(out_file))
+ f = open(out_file, 'w')
+ success = False
+ try:
+ sys.stdout = f
+ fn(amqp_specs)
+ success = True
+ finally:
+ sys.stdout = stdout
+ f.close()
+ if not success:
+ os.remove(out_file)
+
+ parser = OptionParser()
+ parser.add_option("--ignore-conflicts", action="store_true", dest="ignore_conflicts", default=False)
+ (options, args) = parser.parse_args()
+
+ if len(args) < 3:
+ usage()
+ sys.exit(1)
+ else:
+ function = args[0]
+ sources = args[1:-1]
+ dest = args[-1]
+ AmqpSpec.ignore_conflicts = options.ignore_conflicts
+ if function in funcDict:
+ execute(funcDict[function], sources, dest)
+ else:
+ usage()
+ sys.exit(1)
diff --git a/deps/rabbitmq_codegen/credit_extension.json b/deps/rabbitmq_codegen/credit_extension.json
new file mode 100644
index 0000000000..0fedebb0e2
--- /dev/null
+++ b/deps/rabbitmq_codegen/credit_extension.json
@@ -0,0 +1,54 @@
+{
+ "extension": {
+ "name": "credit",
+ "version": "0.1",
+ "status": [
+ "This extension is used internally by the broker and plugins. ",
+ "It is NOT intended to be used by regular clients over the ",
+ "network. This extension is subject to change without notice; ",
+ "hence you are strongly discouraged from building clients ",
+ "which use it."],
+ "copyright": [
+ "Copyright (C) 2008-2020 VMware, Inc. or its affiliates.\n",
+ "\n",
+ "Permission is hereby granted, free of charge, to any person\n",
+ "obtaining a copy of this file (the \"Software\"), to deal in the\n",
+ "Software without restriction, including without limitation the \n",
+ "rights to use, copy, modify, merge, publish, distribute, \n",
+ "sublicense, and/or sell copies of the Software, and to permit \n",
+ "persons to whom the Software is furnished to do so, subject to \n",
+ "the following conditions:\n",
+ "\n",
+ "The above copyright notice and this permission notice shall be\n",
+ "included in all copies or substantial portions of the Software.\n",
+ "\n",
+ "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n",
+ "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n",
+ "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n",
+ "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n",
+ "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n",
+ "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
+ "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n",
+ "OTHER DEALINGS IN THE SOFTWARE.\n"]
+ },
+
+ "classes": [
+ {
+ "id": 60,
+ "methods": [{"id": 200,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "long", "name": "credit"},
+ {"type": "bit", "name": "drain"}],
+ "name": "credit",
+ "synchronous" : true},
+ {"id": 201,
+ "arguments": [{"type": "long", "name": "available"}],
+ "name": "credit-ok"},
+ {"id": 202,
+ "arguments": [{"type": "shortstr", "name": "consumer-tag", "default-value": ""},
+ {"type": "long", "name": "credit-drained"}],
+ "name": "credit-drained"}],
+ "name": "basic"
+ }
+ ]
+}
diff --git a/deps/rabbitmq_codegen/demo_extension.json b/deps/rabbitmq_codegen/demo_extension.json
new file mode 100644
index 0000000000..8eff39474e
--- /dev/null
+++ b/deps/rabbitmq_codegen/demo_extension.json
@@ -0,0 +1,18 @@
+{
+ "extension": {
+ "name": "demo",
+ "version": "1.0",
+ "copyright": "Copyright (C) 2009-2020 VMware, Inc. or its affiliates."
+ },
+ "domains": [
+ ["foo-domain", "shortstr"]
+ ],
+ "constants": [
+ {"name": "FOO-CONSTANT", "value": 121212}
+ ],
+ "classes": [
+ {"name": "demo",
+ "id": 555,
+ "methods": [{"name": "one", "id": 1, "arguments": []}]}
+ ]
+}
diff --git a/deps/rabbitmq_codegen/license_info b/deps/rabbitmq_codegen/license_info
new file mode 100644
index 0000000000..b64ea5bec9
--- /dev/null
+++ b/deps/rabbitmq_codegen/license_info
@@ -0,0 +1,4 @@
+The files amqp-rabbitmq-0.8.json and amqp-rabbitmq-0.9.1.json are
+"Copyright (C) 2008-2020 VMware, Inc. or its affiliates. and are covered by the MIT
+license.
+
diff --git a/deps/rabbitmq_consistent_hash_exchange/.gitignore b/deps/rabbitmq_consistent_hash_exchange/.gitignore
new file mode 100644
index 0000000000..f39007c3cb
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/.gitignore
@@ -0,0 +1,19 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/debug/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+rabbitmq_consistent_hash_exchange.d
diff --git a/deps/rabbitmq_consistent_hash_exchange/.travis.yml b/deps/rabbitmq_consistent_hash_exchange/.travis.yml
new file mode 100644
index 0000000000..c77454ed7d
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: gP1vHQMh1avpsW+alOoGMWz1+oAdUW2MFbOLLInmkW2woP/TXxDM6ZgZMxHPWt1d9uzdWR3IaIh0Jq7dIMOG7UgeHKu5RP3YUXjjS7rKChDTH/zTNapHahn5dJwL9NAbOGcqvYTJhJtEA4CzfqiFlgEUH0ckDDDN8UeMiYvRWMY=
+ - secure: WlvV5Vlid2EiEZsVZ8VI8WSvwO5vevIsRNH2W2oiMI89q2qqJA2Ec0JTiqb0ve1KMUNnpdHs66fCCMN8g0TFNLErureFiVh+zobYRfDAJecSrXEdlZdJ+af9zG1H6aNOux01jLq4wLGAoppSVBDsOgtu3d1pwXiOuKezsm7LmDA=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md b/deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_consistent_hash_exchange/CONTRIBUTING.md b/deps/rabbitmq_consistent_hash_exchange/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_consistent_hash_exchange/LICENSE b/deps/rabbitmq_consistent_hash_exchange/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_consistent_hash_exchange/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_consistent_hash_exchange/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_consistent_hash_exchange/Makefile b/deps/rabbitmq_consistent_hash_exchange/Makefile
new file mode 100644
index 0000000000..c63fd515a5
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_consistent_hash_exchange
+PROJECT_DESCRIPTION = Consistent Hash Exchange Type
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_consistent_hash_exchange/README.md b/deps/rabbitmq_consistent_hash_exchange/README.md
new file mode 100644
index 0000000000..1b288faab1
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/README.md
@@ -0,0 +1,821 @@
+# RabbitMQ Consistent Hash Exchange Type
+
+## Introduction
+
+This plugin adds a consistent-hash exchange type to RabbitMQ. This
+exchange type uses consistent hashing (intro blog posts: [one](http://www.martinbroadhurst.com/Consistent-Hash-Ring.html), [two](http://michaelnielsen.org/blog/consistent-hashing/), [three](https://akshatm.svbtle.com/consistent-hash-rings-theory-and-implementation)) to distribute
+messages between the bound queues. It is recommended to get a basic understanding of the
+concept before evaluating this plugin and its alternatives.
+
+[rabbitmq-sharding](https://github.com/rabbitmq/rabbitmq-sharding) is another plugin
+that provides a way to partition a stream of messages among a set of consumers
+while trading off total stream ordering for processing parallelism.
+
+## Problem Definition
+
+In various scenarios it may be desired to ensure that messages sent to an
+exchange are reasonably [uniformly distributed](https://en.wikipedia.org/wiki/Uniform_distribution_(discrete)) across a number of
+queues based on the routing key of the message, a [nominated
+header](#routing-on-a-header), or a [message property](#routing-on-a-header).
+Technically this can be accomplished using a direct or topic exchange,
+binding queues to that exchange and then publishing messages to that exchange that
+match the various binding keys.
+
+However, arranging things this way can be problematic:
+
+1. It is difficult to ensure that all queues bound to the exchange
+will receive a (roughly) equal number of messages (distribution uniformity)
+without baking in to the publishers quite a lot of knowledge about the number of queues and
+their bindings.
+
+2. When the number of queues changes, it is not easy to ensure that the
+new topology still distributes messages between the different queues
+evenly.
+
+[Consistent Hashing](https://en.wikipedia.org/wiki/Consistent_hashing)
+is a hashing technique whereby each bucket appears at multiple points
+throughout the hash space, and the bucket selected is the nearest
+higher (or lower, it doesn't matter, provided it's consistent) bucket
+to the computed hash (and the hash space wraps around). The effect of
+this is that when a new bucket is added or an existing bucket removed,
+only a very few hashes change which bucket they are routed to.
+
+## Supported RabbitMQ Versions
+
+This plugin ships with RabbitMQ.
+
+## Supported Erlang Versions
+
+This plugin supports the same [Erlang versions](https://rabbitmq.com/which-erlang.html) as RabbitMQ core.
+
+## Enabling the Plugin
+
+This plugin ships with RabbitMQ. Like all other [RabbitMQ plugins](https://www.rabbitmq.com/plugins.html),
+it has to be enabled before it can be used:
+
+``` sh
+rabbitmq-plugins enable rabbitmq_consistent_hash_exchange
+```
+
+## Provided Exchange Type
+
+The exchange type is `"x-consistent-hash"`.
+
+## How It Works
+
+In the case of Consistent Hashing as an exchange type, the hash is
+calculated from a message property (most commonly the routing key).
+
+When a queue is bound to this exchange, it is assigned one or more
+partitions on the consistent hashing ring depending on its binding weight
+(covered below).
+
+For every property hash (e.g. routing key), a hash position computed
+and a corresponding hash ring partition is picked. That partition corresponds
+to a bound queue, and the message is routed to that queue.
+
+Assuming a reasonably even routing key distribution of inbound messages,
+routed messages should be reasonably evenly distributed across all
+ring partitions, and thus queues according to their binding weights.
+
+### Binding Weights
+
+When a queue is bound to a Consistent Hash exchange, the binding key
+is a number-as-a-string which indicates the binding weight: the number
+of buckets (sections of the range) that will be associated with the
+target queue.
+
+### Consistent Hashing-based Routing
+
+The hashing distributes *routing keys* among queues, not *message payloads*
+among queues; all messages with the same routing key will go the
+same queue. So, if you wish for queue A to receive twice as many
+routing keys routed to it than are routed to queue B, then you bind
+the queue A with a binding key of twice the number (as a string --
+binding keys are always strings) of the binding key of the binding
+to queue B. Note this is only the case if your routing keys are
+evenly distributed in the hash space. If, for example, only two
+distinct routing keys are used on all the messages, there's a chance
+both keys will route (consistently!) to the same queue, even though
+other queues have higher values in their binding key. With a larger
+set of routing keys used, the statistical distribution of routing
+keys approaches the ratios of the binding keys.
+
+Each message gets delivered to at most one queue. On average, a
+message gets delivered to exactly one queue. Concurrent binding changes
+and queue primary replica failures can affect this but on average.
+
+### Node Restart Effects
+
+Consistent hashing ring is stored in memory and will be re-populated
+from exchange bindings when the node boots. Relative positioning of queues
+on the ring is not guaranteed to be the same between restarts. In practice
+this means that after a restart, all queues will still receive roughly
+the same number of messages routed to them (assuming routing key distribution
+does not change) but a given routing key now **may route to a different queue**.
+
+In other words, this exchange type provides consistent message distribution
+between queues but cannot guarantee stable routing [queue] locality for a message
+with a fixed routing key.
+
+
+## Usage Example
+
+### The Topology
+
+In the below example the queues `q0` and `q1` get bound each with the weight of 1
+in the hash space to the exchange `e` which means they'll each get
+roughly the same number of routing keys. The queues `q2` and `q3`
+however, get 2 buckets each (their weight is 2) which means they'll each get roughly the
+same number of routing keys too, but that will be approximately twice
+as many as `q0` and `q1`.
+
+Note the `routing_key`s in the bindings are numbers-as-strings. This
+is because AMQP 0-9-1 specifies the `routing_key` field must be a string.
+
+### Choosing Appropriate Weight Values
+
+The example uses low weight values intentionally.
+Higher values will reduce throughput of the exchange, primarily for
+workloads that experience a high binding churn (queues are bound to
+and unbound from a consistent hash exchange frequently).
+Single digit weight values are recommended (and usually sufficient).
+
+### Inspecting Message Counts
+
+The example then publishes 100,000 messages to our
+exchange with random routing keys, the queues will get their share of
+messages roughly equal to the binding keys ratios. After this has
+completed, message distribution between queues can be inspected using
+RabbitMQ's management UI and `rabbitmqctl list_queues`.
+
+## Routing Keys and Uniformity of Distribution
+
+It is important to ensure that the messages being published
+to the exchange have varying routing keys: if a very
+small set of routing keys are being used then there's a possibility of
+messages not being evenly distributed between the bound queues. With a
+large number of bound queues some queues may get no messages routed to
+them at all.
+
+If pseudo-random or unique values such as client/session/request identifiers
+are used for routing keys (or another property used for hashing) then
+reasonably uniform distribution should be observed.
+
+### Executable Versions
+
+Executable versions of some of the code examples can be found under [./examples](./examples).
+
+### Code Example in Python
+
+This version of the example uses [Pika](https://pika.readthedocs.io/en/stable/), the most widely used Python client for RabbitMQ:
+
+``` python
+#!/usr/bin/env python
+
+import pika
+import time
+
+conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
+ch = conn.channel()
+
+ch.exchange_declare(exchange="e", exchange_type="x-consistent-hash", durable=True)
+
+for q in ["q1", "q2", "q3", "q4"]:
+ ch.queue_declare(queue=q, durable=True)
+ ch.queue_purge(queue=q)
+
+for q in ["q1", "q2"]:
+ ch.queue_bind(exchange="e", queue=q, routing_key="1")
+
+for q in ["q3", "q4"]:
+ ch.queue_bind(exchange="e", queue=q, routing_key="2")
+
+n = 100000
+
+for rk in list(map(lambda s: str(s), range(0, n))):
+ ch.basic_publish(exchange="e", routing_key=rk, body="")
+print("Done publishing.")
+
+print("Waiting for routing to finish...")
+# in order to keep this example simpler and focused,
+# wait for a few seconds instead of using publisher confirms and waiting for those
+time.sleep(5)
+
+print("Done.")
+conn.close()
+```
+
+### Code Example in Java
+
+Below is a version of the example that uses
+the official [RabbitMQ Java client](https://www.rabbitmq.com/api-guide.html):
+
+``` java
+package com.rabbitmq.examples;
+
+import com.rabbitmq.client.*;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+public class ConsistentHashExchangeExample1 {
+ private static String CONSISTENT_HASH_EXCHANGE_TYPE = "x-consistent-hash";
+
+ public static void main(String[] argv) throws IOException, TimeoutException, InterruptedException {
+ ConnectionFactory cf = new ConnectionFactory();
+ Connection conn = cf.newConnection();
+ Channel ch = conn.createChannel();
+
+ for (String q : Arrays.asList("q1", "q2", "q3", "q4")) {
+ ch.queueDeclare(q, true, false, false, null);
+ ch.queuePurge(q);
+ }
+
+ ch.exchangeDeclare("e1", CONSISTENT_HASH_EXCHANGE_TYPE, true, false, null);
+
+ for (String q : Arrays.asList("q1", "q2")) {
+ ch.queueBind(q, "e1", "1");
+ }
+
+ for (String q : Arrays.asList("q3", "q4")) {
+ ch.queueBind(q, "e1", "2");
+ }
+
+ ch.confirmSelect();
+
+ AMQP.BasicProperties.Builder bldr = new AMQP.BasicProperties.Builder();
+ for (int i = 0; i < 100000; i++) {
+ ch.basicPublish("e1", String.valueOf(i), bldr.build(), "".getBytes("UTF-8"));
+ }
+
+ ch.waitForConfirmsOrDie(10000);
+
+ System.out.println("Done publishing!");
+ System.out.println("Evaluating results...");
+ // wait for one stats emission interval so that queue counters
+ // are up-to-date in the management UI
+ Thread.sleep(5);
+
+ System.out.println("Done.");
+ conn.close();
+ }
+}
+```
+
+### Code Example in Ruby
+
+Below is a version that uses [Bunny](http://rubybunny.info), the most widely used
+Ruby client for RabbitMQ:
+
+``` ruby
+#!/usr/bin/env ruby
+
+require 'bunny'
+
+conn = Bunny.new
+conn.start
+
+ch = conn.create_channel
+ch.confirm_select
+
+q1 = ch.queue("q1", durable: true)
+q2 = ch.queue("q2", durable: true)
+q3 = ch.queue("q3", durable: true)
+q4 = ch.queue("q4", durable: true)
+
+[q1, q2, q3, q4]. each(&:purge)
+
+x = ch.exchange("chx", type: "x-consistent-hash", durable: true)
+
+[q1, q2].each { |q| q.bind(x, routing_key: "1") }
+[q3, q4].each { |q| q.bind(x, routing_key: "2") }
+
+n = 100_000
+n.times do |i|
+ x.publish(i.to_s, routing_key: i.to_s)
+end
+
+ch.wait_for_confirms
+puts "Done publishing!"
+
+# wait for queue stats to be emitted so that management UI numbers
+# are up-to-date
+sleep 5
+conn.close
+puts "Done"
+```
+
+
+### Code Example in Erlang
+
+Below is a version of the example that uses
+the [RabbitMQ Erlang client](https://www.rabbitmq.com/erlang-client-user-guide.html):
+
+``` erlang
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queues = [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>],
+ amqp_channel:call(Chan,
+ #'exchange.declare'{
+ exchange = <<"e">>, type = <<"x-consistent-hash">>
+ }),
+ [amqp_channel:call(Chan, #'queue.declare'{queue = Q}) || Q <- Queues],
+ [amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"1">>})
+ || Q <- [<<"q0">>, <<"q1">>]],
+ [amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"2">>})
+ || Q <- [<<"q2">>, <<"q3">>]],
+ RK = list_to_binary(integer_to_list(random:uniform(1000000))),
+ Msg = #amqp_msg{props = #'P_basic'{}, payload = <<>>},
+ [amqp_channel:call(Chan,
+ #'basic.publish'{
+ exchange = <<"e">>,
+ routing_key = RK
+ }, Msg) || _ <- lists:seq(1, 100000)],
+amqp_connection:close(Conn),
+ok.
+```
+
+## Configuration
+
+### Routing on a Header
+
+Under most circumstances the routing key is a good choice for something to
+hash. However, in some cases it is necessary to use the routing key for some other
+purpose (for example with more complex routing involving exchange to
+exchange bindings). In this case it is possible to configure the consistent hash
+exchange to route based on a named header instead. To do this, declare the
+exchange with a string argument called "hash-header" naming the header to
+be used.
+
+When a `"hash-header"` is specified, the chosen header **must be provided**.
+If published messages do not contain the header, they will all get
+routed to the same **arbitrarily chosen** queue.
+
+#### Code Example in Python
+
+``` python
+#!/usr/bin/env python
+
+import pika
+import time
+
+conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
+ch = conn.channel()
+
+args = {u'hash-header': u'hash-on'}
+ch.exchange_declare(exchange='e2',
+ exchange_type='x-consistent-hash',
+ arguments=args,
+ durable=True)
+
+for q in ['q1', 'q2', 'q3', 'q4']:
+ ch.queue_declare(queue=q, durable=True)
+ ch.queue_purge(queue=q)
+
+for q in ['q1', 'q2']:
+ ch.queue_bind(exchange='e2', queue=q, routing_key='1')
+
+for q in ['q3', 'q4']:
+ ch.queue_bind(exchange='e2', queue=q, routing_key='2')
+
+n = 100000
+
+for rk in list(map(lambda s: str(s), range(0, n))):
+ hdrs = {u'hash-on': rk}
+ ch.basic_publish(exchange='e2',
+ routing_key='',
+ body='',
+ properties=pika.BasicProperties(content_type='text/plain',
+ delivery_mode=2,
+ headers=hdrs))
+print('Done publishing.')
+
+print('Waiting for routing to finish...')
+# in order to keep this example simpler and focused,
+# wait for a few seconds instead of using publisher confirms and waiting for those
+time.sleep(5)
+
+print('Done.')
+conn.close()
+```
+
+#### Code Example in Java
+
+``` java
+package com.rabbitmq.examples;
+
+import com.rabbitmq.client.*;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+public class ConsistentHashExchangeExample2 {
+ public static final String EXCHANGE = "e2";
+ private static String EXCHANGE_TYPE = "x-consistent-hash";
+
+ public static void main(String[] argv) throws IOException, TimeoutException, InterruptedException {
+ ConnectionFactory cf = new ConnectionFactory();
+ Connection conn = cf.newConnection();
+ Channel ch = conn.createChannel();
+
+ for (String q : Arrays.asList("q1", "q2", "q3", "q4")) {
+ ch.queueDeclare(q, true, false, false, null);
+ ch.queuePurge(q);
+ }
+
+ Map<String, Object> args = new HashMap<>();
+ args.put("hash-header", "hash-on");
+ ch.exchangeDeclare(EXCHANGE, EXCHANGE_TYPE, true, false, args);
+
+ for (String q : Arrays.asList("q1", "q2")) {
+ ch.queueBind(q, EXCHANGE, "1");
+ }
+
+ for (String q : Arrays.asList("q3", "q4")) {
+ ch.queueBind(q, EXCHANGE, "2");
+ }
+
+ ch.confirmSelect();
+
+
+ for (int i = 0; i < 100000; i++) {
+ AMQP.BasicProperties.Builder bldr = new AMQP.BasicProperties.Builder();
+ Map<String, Object> hdrs = new HashMap<>();
+ hdrs.put("hash-on", String.valueOf(i));
+ ch.basicPublish(EXCHANGE, "", bldr.headers(hdrs).build(), "".getBytes("UTF-8"));
+ }
+
+ ch.waitForConfirmsOrDie(10000);
+
+ System.out.println("Done publishing!");
+ System.out.println("Evaluating results...");
+ // wait for one stats emission interval so that queue counters
+ // are up-to-date in the management UI
+ Thread.sleep(5);
+
+ System.out.println("Done.");
+ conn.close();
+ }
+}
+```
+
+#### Code Example in Ruby
+
+``` ruby
+#!/usr/bin/env ruby
+
+require 'bundler'
+Bundler.setup(:default, :test)
+require 'bunny'
+
+conn = Bunny.new
+conn.start
+
+ch = conn.create_channel
+ch.confirm_select
+
+q1 = ch.queue("q1", durable: true)
+q2 = ch.queue("q2", durable: true)
+q3 = ch.queue("q3", durable: true)
+q4 = ch.queue("q4", durable: true)
+
+[q1, q2, q3, q4]. each(&:purge)
+
+x = ch.exchange("x2", type: "x-consistent-hash", durable: true, arguments: {"hash-header" => "hash-on"})
+
+[q1, q2].each { |q| q.bind(x, routing_key: "1") }
+[q3, q4].each { |q| q.bind(x, routing_key: "2") }
+
+n = 100_000
+(0..n).map(&:to_s).each do |i|
+ x.publish(i.to_s, routing_key: rand.to_s, headers: {"hash-on": i})
+end
+
+ch.wait_for_confirms
+puts "Done publishing!"
+
+# wait for queue stats to be emitted so that management UI numbers
+# are up-to-date
+sleep 5
+conn.close
+puts "Done"
+```
+
+#### Code Example in Erlang
+
+With RabbitMQ Erlang client:
+
+``` erlang
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queues = [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>],
+ amqp_channel:call(
+ Chan, #'exchange.declare'{
+ exchange = <<"e">>,
+ type = <<"x-consistent-hash">>,
+ arguments = [{<<"hash-header">>, longstr, <<"hash-on">>}]
+ }),
+ [amqp_channel:call(Chan, #'queue.declare'{queue = Q}) || Q <- Queues],
+ [amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"1">>})
+ || Q <- [<<"q0">>, <<"q1">>]],
+ [amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"2">>})
+ || Q <- [<<"q2">>, <<"q3">>]],
+ RK = list_to_binary(integer_to_list(random:uniform(1000000))),
+ Msg = #amqp_msg {props = #'P_basic'{headers = [{<<"hash-on">>, longstr, RK}]}, payload = <<>>},
+ [amqp_channel:call(Chan,
+ #'basic.publish'{
+ exchange = <<"e">>,
+ routing_key = <<"">>,
+ }, Msg) || _ <- lists:seq(1, 100000)],
+amqp_connection:close(Conn),
+ok.
+```
+
+
+### Routing on a Message Property
+
+In addition to a value in the header property, you can also route on the
+``message_id``, ``correlation_id``, or ``timestamp`` message properties. To do so,
+declare the exchange with a string argument called ``"hash-property"`` naming the
+property to be used.
+
+When a `"hash-property"` is specified, the chosen property **must be provided**.
+If published messages do not contain the property, they will all get
+routed to the same **arbitrarily chosen** queue.
+
+#### Code Example in Python
+
+``` python
+#!/usr/bin/env python
+
+import pika
+import time
+
+conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
+ch = conn.channel()
+
+args = {u'hash-property': u'message_id'}
+ch.exchange_declare(exchange='e3',
+ exchange_type='x-consistent-hash',
+ arguments=args,
+ durable=True)
+
+for q in ['q1', 'q2', 'q3', 'q4']:
+ ch.queue_declare(queue=q, durable=True)
+ ch.queue_purge(queue=q)
+
+for q in ['q1', 'q2']:
+ ch.queue_bind(exchange='e3', queue=q, routing_key='1')
+
+for q in ['q3', 'q4']:
+ ch.queue_bind(exchange='e3', queue=q, routing_key='2')
+
+n = 100000
+
+for rk in list(map(lambda s: str(s), range(0, n))):
+ ch.basic_publish(exchange='e3',
+ routing_key='',
+ body='',
+ properties=pika.BasicProperties(content_type='text/plain',
+ delivery_mode=2,
+ message_id=rk))
+print('Done publishing.')
+
+print('Waiting for routing to finish...')
+# in order to keep this example simpler and focused,
+# wait for a few seconds instead of using publisher confirms and waiting for those
+time.sleep(5)
+
+print('Done.')
+conn.close()
+```
+
+#### Code Example in Java
+
+``` java
+package com.rabbitmq.examples;
+
+import com.rabbitmq.client.*;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+public class ConsistentHashExchangeExample3 {
+ public static final String EXCHANGE = "e3";
+ private static String EXCHANGE_TYPE = "x-consistent-hash";
+
+ public static void main(String[] argv) throws IOException, TimeoutException, InterruptedException {
+ ConnectionFactory cf = new ConnectionFactory();
+ Connection conn = cf.newConnection();
+ Channel ch = conn.createChannel();
+
+ for (String q : Arrays.asList("q1", "q2", "q3", "q4")) {
+ ch.queueDeclare(q, true, false, false, null);
+ ch.queuePurge(q);
+ }
+
+ Map<String, Object> args = new HashMap<>();
+ args.put("hash-property", "message_id");
+ ch.exchangeDeclare(EXCHANGE, EXCHANGE_TYPE, true, false, args);
+
+ for (String q : Arrays.asList("q1", "q2")) {
+ ch.queueBind(q, EXCHANGE, "1");
+ }
+
+ for (String q : Arrays.asList("q3", "q4")) {
+ ch.queueBind(q, EXCHANGE, "2");
+ }
+
+ ch.confirmSelect();
+
+
+ for (int i = 0; i < 100000; i++) {
+ AMQP.BasicProperties.Builder bldr = new AMQP.BasicProperties.Builder();
+ ch.basicPublish(EXCHANGE, "", bldr.messageId(String.valueOf(i)).build(), "".getBytes("UTF-8"));
+ }
+
+ ch.waitForConfirmsOrDie(10000);
+
+ System.out.println("Done publishing!");
+ System.out.println("Evaluating results...");
+ // wait for one stats emission interval so that queue counters
+ // are up-to-date in the management UI
+ Thread.sleep(5);
+
+ System.out.println("Done.");
+ conn.close();
+ }
+}
+```
+
+#### Code Example in Ruby
+
+``` ruby
+#!/usr/bin/env ruby
+
+require 'bundler'
+Bundler.setup(:default, :test)
+require 'bunny'
+
+conn = Bunny.new
+conn.start
+
+ch = conn.create_channel
+ch.confirm_select
+
+q1 = ch.queue("q1", durable: true)
+q2 = ch.queue("q2", durable: true)
+q3 = ch.queue("q3", durable: true)
+q4 = ch.queue("q4", durable: true)
+
+[q1, q2, q3, q4].each(&:purge)
+
+x = ch.exchange("x3", type: "x-consistent-hash", durable: true, arguments: {"hash-property" => "message_id"})
+
+[q1, q2].each { |q| q.bind(x, routing_key: "1") }
+[q3, q4].each { |q| q.bind(x, routing_key: "2") }
+
+n = 100_000
+(0..n).map(&:to_s).each do |i|
+ x.publish(i.to_s, routing_key: rand.to_s, message_id: i)
+end
+
+ch.wait_for_confirms
+puts "Done publishing!"
+
+# wait for queue stats to be emitted so that management UI numbers
+# are up-to-date
+sleep 5
+conn.close
+puts "Done"
+```
+
+#### Code Example in Erlang
+
+``` erlang
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+test() ->
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ Queues = [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>],
+ amqp_channel:call(Chan,
+ #'exchange.declare'{
+ exchange = <<"e">>, type = <<"x-consistent-hash">>,
+ arguments = {<<"hash-property">>, longstr, <<"message_id">>}
+ }),
+ [amqp_channel:call(Chan, #'queue.declare'{queue = Q}) || Q <- Queues],
+ [amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"1">>})
+ || Q <- [<<"q0">>, <<"q1">>]],
+ [amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = <<"e">>,
+ routing_key = <<"2">>})
+ || Q <- [<<"q2">>, <<"q3">>]],
+ RK = list_to_binary(integer_to_list(random:uniform(1000000)),
+ Msg = #amqp_msg{props = #'P_basic'{message_id = RK}, payload = <<>>},
+ [amqp_channel:call(Chan,
+ #'basic.publish'{
+ exchange = <<"e">>,
+ routing_key = <<"">>,
+ )
+ }, Msg) || _ <- lists:seq(1, 100000)],
+amqp_connection:close(Conn),
+ok.
+```
+
+
+## Getting Help
+
+If you have questions or need help, feel free to ask on the
+[RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Implementation Details
+
+The hash function used in this plugin as of RabbitMQ 3.7.8
+is [A Fast, Minimal Memory, Consistent Hash Algorithm](https://arxiv.org/abs/1406.2294) by Lamping and Veach. Erlang's `phash2` function is used to convert non-integer values to
+an integer one that can be used by the jump consistent hash function by Lamping and Veach.
+
+### Distribution Uniformity
+
+A Chi-squared test was used to evaluate distribution uniformity. Below are the
+results for 18 bucket counts and how they compare to two commonly used `p-value`
+thresholds:
+
+|Number of buckets|Chi-squared test result|Degrees of freedom|p-value = 0.05|p-value = 0.01|
+|-|-----------|------------------|--------|--------|
+|2|0.5|1|3.84|6.64|
+|3|0.946|2|5.99|9.21|
+|4|2.939|3|7.81|11.35|
+|5|2.163|4|3.49|13.28|
+|6|2.592|5|11.07|15.09|
+|7|4.654|6|12.59|16.81|
+|8|7.566|7|14.07|18.48|
+|9|5.847|8|15.51|20.09|
+|10|9.790|9|16.92|21.67|
+|11|13.448|10|18.31|23.21|
+|12|12.432|11|19.68|24.73|
+|13|12.338|12|21.02|26.22|
+|14|9.898|13|22.36|27.69|
+|15|8.513|14|23.69|29.14|
+|16|6.997|15|24.99|30.58|
+|17|6.279|16|26.30|32.00|
+|18|10.373|17|28.87|34.81|
+|19|12.935|18|30.14|36.19|
+|20|11.895|19|31.41|37.57|
+
+### Binding Operations and Bucket Management
+
+When a queue is bound to a consistent hash exchange, the protocol method, `queue.bind`,
+carries a weight in the routing (binding) key. The binding is given
+a number of buckets on the hash ring (hash space) equal to the weight.
+When a queue is unbound, the buckets added for the binding are deleted.
+These two operations use linear algorithms to update the ring.
+
+To perform routing the exchange extract the appropriate value for hashing,
+hashes it and retrieves a bucket number from the ring, then the bucket and
+its associated queue.
+
+The implementation assumes there is only one binding between a consistent hash
+exchange and a queue. Having more than one binding is unnecessary because
+queue weight can be provided at the time of binding.
+
+### Clustered Environments
+
+The state of the hash space is distributed across all cluster nodes.
+
+
+## Continuous Integration
+
+[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-consistent-hash-exchange.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-consistent-hash-exchange)
+
+## Copyright and License
+
+(c) 2013-2020 VMware, Inc. or its affiliates.
+
+Released under the Mozilla Public License 2.0, same as RabbitMQ.
+See [LICENSE](./LICENSE) for details.
diff --git a/deps/rabbitmq_consistent_hash_exchange/erlang.mk b/deps/rabbitmq_consistent_hash_exchange/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/README.md b/deps/rabbitmq_consistent_hash_exchange/examples/python/README.md
new file mode 100644
index 0000000000..033b1017f0
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/README.md
@@ -0,0 +1,50 @@
+# RabbitMQ Consistent Hash Exchange Examples in Python
+
+This directory contains runnable Python examples for the [RabbitMQ Consistent Hash Exchange plugin](https://github.com/rabbitmq/rabbitmq-consistent-hash-exchange/).
+They are the same examples as in the [plugin's README](../../README.md) file.
+
+## Prerequisites
+
+The examples assume a RabbitMQ node with the `rabbitmq_consistent_hash_exchange` plugin
+enabled is running on `localhost` and that default user credentials and virtual host
+were not deleted.
+
+## Dependency Installation
+
+``` sh
+pip install -r ./requirements.txt
+```
+
+## Workload Details
+
+This example uses four queues: `q1` through `q4`. The first two of them are bound to a consistent hashing
+exchange with a weight of `1`, while the last two of them use a weight of `2`. This means
+that `q3` and `q4` will get roughly twice as many published messages routed to them
+compared to either `q1` or `q2`.
+
+## Running the Example
+
+``` sh
+# hashing on the routing key
+python ./example1.py
+
+# hashing on a custom header
+python ./example2.py
+
+# hashing on a message property
+python ./example3.py
+```
+
+## Inspecting Queue States
+
+To list bindings to the exchange and their weights, use
+
+``` shell
+rabbitmqctl list_bindings | grep chx
+```
+
+To list queues and the number of ready messages in them:
+
+``` shell
+rabbitmqctl list_queues name messages_ready
+``` \ No newline at end of file
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py b/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py
new file mode 100644
index 0000000000..6cf67d6c82
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/example1.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+import pika
+import time
+
+conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
+ch = conn.channel()
+
+ch.exchange_declare(exchange="e", exchange_type="x-consistent-hash", durable=True)
+
+for q in ["q1", "q2", "q3", "q4"]:
+ ch.queue_declare(queue=q, durable=True)
+ ch.queue_purge(queue=q)
+
+for q in ["q1", "q2"]:
+ ch.queue_bind(exchange="e", queue=q, routing_key="1")
+
+for q in ["q3", "q4"]:
+ ch.queue_bind(exchange="e", queue=q, routing_key="2")
+
+n = 100000
+
+for rk in list(map(lambda s: str(s), range(0, n))):
+ ch.basic_publish(exchange="e", routing_key=rk, body="")
+print("Done publishing.")
+
+print("Waiting for routing to finish...")
+# in order to keep this example simpler and focused,
+# wait for a few seconds instead of using publisher confirms and waiting for those
+time.sleep(5)
+
+print("Done.")
+conn.close()
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py b/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py
new file mode 100644
index 0000000000..8c1ac15d38
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/example2.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+import pika
+import time
+
+conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
+ch = conn.channel()
+
+args = {u'hash-header': u'hash-on'}
+ch.exchange_declare(exchange='e2',
+ exchange_type='x-consistent-hash',
+ arguments=args,
+ durable=True)
+
+for q in ['q1', 'q2', 'q3', 'q4']:
+ ch.queue_declare(queue=q, durable=True)
+ ch.queue_purge(queue=q)
+
+for q in ['q1', 'q2']:
+ ch.queue_bind(exchange='e2', queue=q, routing_key='1')
+
+for q in ['q3', 'q4']:
+ ch.queue_bind(exchange='e2', queue=q, routing_key='2')
+
+n = 100000
+
+for rk in list(map(lambda s: str(s), range(0, n))):
+ hdrs = {u'hash-on': rk}
+ ch.basic_publish(exchange='e2',
+ routing_key='',
+ body='',
+ properties=pika.BasicProperties(content_type='text/plain',
+ delivery_mode=2,
+ headers=hdrs))
+print('Done publishing.')
+
+print('Waiting for routing to finish...')
+# in order to keep this example simpler and focused,
+# wait for a few seconds instead of using publisher confirms and waiting for those
+time.sleep(5)
+
+print('Done.')
+conn.close()
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py b/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py
new file mode 100644
index 0000000000..0b7450171c
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/example3.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+import pika
+import time
+
+conn = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
+ch = conn.channel()
+
+args = {u'hash-property': u'message_id'}
+ch.exchange_declare(exchange='e3',
+ exchange_type='x-consistent-hash',
+ arguments=args,
+ durable=True)
+
+for q in ['q1', 'q2', 'q3', 'q4']:
+ ch.queue_declare(queue=q, durable=True)
+ ch.queue_purge(queue=q)
+
+for q in ['q1', 'q2']:
+ ch.queue_bind(exchange='e3', queue=q, routing_key='1')
+
+for q in ['q3', 'q4']:
+ ch.queue_bind(exchange='e3', queue=q, routing_key='2')
+
+n = 100000
+
+for rk in list(map(lambda s: str(s), range(0, n))):
+ ch.basic_publish(exchange='e3',
+ routing_key='',
+ body='',
+ properties=pika.BasicProperties(content_type='text/plain',
+ delivery_mode=2,
+ message_id=rk))
+print('Done publishing.')
+
+print('Waiting for routing to finish...')
+# in order to keep this example simpler and focused,
+# wait for a few seconds instead of using publisher confirms and waiting for those
+time.sleep(5)
+
+print('Done.')
+conn.close()
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/python/requirements.txt b/deps/rabbitmq_consistent_hash_exchange/examples/python/requirements.txt
new file mode 100644
index 0000000000..abccec9e65
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/python/requirements.txt
@@ -0,0 +1 @@
+pika>=1.1.0
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile
new file mode 100644
index 0000000000..0c86b3b7f9
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile
@@ -0,0 +1,5 @@
+# encoding: utf-8
+
+source "https://rubygems.org"
+
+gem "bunny", ">= 2.15.0", "< 3.0"
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile.lock b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile.lock
new file mode 100644
index 0000000000..04835be092
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/Gemfile.lock
@@ -0,0 +1,15 @@
+GEM
+ remote: https://rubygems.org/
+ specs:
+ amq-protocol (2.3.1)
+ bunny (2.15.0)
+ amq-protocol (~> 2.3, >= 2.3.1)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ bunny (>= 2.15.0, < 3.0)
+
+BUNDLED WITH
+ 2.1.4
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/ruby/README.md b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/README.md
new file mode 100644
index 0000000000..e50acf4b28
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/README.md
@@ -0,0 +1,50 @@
+# RabbitMQ Consistent Hash Exchange Examples in Ruby
+
+This directory contains runnable Ruby examples for the [RabbitMQ Consistent Hash Exchange plugin](https://github.com/rabbitmq/rabbitmq-consistent-hash-exchange/).
+They are the same examples as in the [plugin's README](../../README.md) file.
+
+## Prerequisites
+
+The examples assume a RabbitMQ node with the `rabbitmq_consistent_hash_exchange` plugin
+enabled is running on `localhost` and that default user credentials and virtual host
+were not deleted.
+
+## Dependency Installation
+
+``` sh
+bundle install
+```
+
+## Workload Details
+
+This example uses four queues: `q1` through `q4`. The first two of them are bound to a consistent hashing
+exchange with a weight of `1`, while the last two of them use a weight of `2`. This means
+that `q3` and `q4` will get roughly twice as many published messages routed to them
+compared to either `q1` or `q2`.
+
+## Running the Example
+
+``` sh
+# hashing on the routing key
+bundle exec ruby ./example1.rb
+
+# hashing on a custom header
+bundle exec ruby ./example2.rb
+
+# hashing on a message property
+bundle exec ruby ./example3.rb
+```
+
+## Inspecting Queue States
+
+To list bindings to the exchange and their weights, use
+
+``` shell
+rabbitmqctl list_bindings | grep chx
+```
+
+To list queues and the number of ready messages in them:
+
+``` shell
+rabbitmqctl list_queues name messages_ready
+``` \ No newline at end of file
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example1.rb b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example1.rb
new file mode 100644
index 0000000000..e6d0a3d201
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example1.rb
@@ -0,0 +1,35 @@
+#!/usr/bin/env ruby
+
+require 'bunny'
+
+conn = Bunny.new
+conn.start
+
+ch = conn.create_channel
+ch.confirm_select
+
+q1 = ch.queue("q1", durable: true)
+q2 = ch.queue("q2", durable: true)
+q3 = ch.queue("q3", durable: true)
+q4 = ch.queue("q4", durable: true)
+
+[q1, q2, q3, q4]. each(&:purge)
+
+x = ch.exchange("chx", type: "x-consistent-hash", durable: true)
+
+[q1, q2].each { |q| q.bind(x, routing_key: "1") }
+[q3, q4].each { |q| q.bind(x, routing_key: "2") }
+
+n = 100_000
+n.times do |i|
+ x.publish(i.to_s, routing_key: i.to_s)
+end
+
+ch.wait_for_confirms
+puts "Done publishing!"
+
+# wait for queue stats to be emitted so that management UI numbers
+# are up-to-date
+sleep 5
+conn.close
+puts "Done"
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example2.rb b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example2.rb
new file mode 100644
index 0000000000..65536a9e47
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example2.rb
@@ -0,0 +1,37 @@
+#!/usr/bin/env ruby
+
+require 'bundler'
+Bundler.setup(:default, :test)
+require 'bunny'
+
+conn = Bunny.new
+conn.start
+
+ch = conn.create_channel
+ch.confirm_select
+
+q1 = ch.queue("q1", durable: true)
+q2 = ch.queue("q2", durable: true)
+q3 = ch.queue("q3", durable: true)
+q4 = ch.queue("q4", durable: true)
+
+[q1, q2, q3, q4]. each(&:purge)
+
+x = ch.exchange("x2", type: "x-consistent-hash", durable: true, arguments: {"hash-header" => "hash-on"})
+
+[q1, q2].each { |q| q.bind(x, routing_key: "1") }
+[q3, q4].each { |q| q.bind(x, routing_key: "2") }
+
+n = 100_000
+(0..n).map(&:to_s).each do |i|
+ x.publish(i.to_s, routing_key: rand.to_s, headers: {"hash-on": i})
+end
+
+ch.wait_for_confirms
+puts "Done publishing!"
+
+# wait for queue stats to be emitted so that management UI numbers
+# are up-to-date
+sleep 5
+conn.close
+puts "Done"
diff --git a/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example3.rb b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example3.rb
new file mode 100644
index 0000000000..dbd7956cf7
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/examples/ruby/example3.rb
@@ -0,0 +1,37 @@
+#!/usr/bin/env ruby
+
+require 'bundler'
+Bundler.setup(:default, :test)
+require 'bunny'
+
+conn = Bunny.new
+conn.start
+
+ch = conn.create_channel
+ch.confirm_select
+
+q1 = ch.queue("q1", durable: true)
+q2 = ch.queue("q2", durable: true)
+q3 = ch.queue("q3", durable: true)
+q4 = ch.queue("q4", durable: true)
+
+[q1, q2, q3, q4].each(&:purge)
+
+x = ch.exchange("x3", type: "x-consistent-hash", durable: true, arguments: {"hash-property" => "message_id"})
+
+[q1, q2].each { |q| q.bind(x, routing_key: "1") }
+[q3, q4].each { |q| q.bind(x, routing_key: "2") }
+
+n = 100_000
+(0..n).map(&:to_s).each do |i|
+ x.publish(i.to_s, routing_key: rand.to_s, message_id: i)
+end
+
+ch.wait_for_confirms
+puts "Done publishing!"
+
+# wait for queue stats to be emitted so that management UI numbers
+# are up-to-date
+sleep 5
+conn.close
+puts "Done"
diff --git a/deps/rabbitmq_consistent_hash_exchange/include/rabbitmq_consistent_hash_exchange.hrl b/deps/rabbitmq_consistent_hash_exchange/include/rabbitmq_consistent_hash_exchange.hrl
new file mode 100644
index 0000000000..3fac35eb5e
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/include/rabbitmq_consistent_hash_exchange.hrl
@@ -0,0 +1,7 @@
+-record(chx_hash_ring, {
+ %% a resource
+ exchange,
+ %% a map of bucket => queue | exchange
+ bucket_map,
+ next_bucket_number
+}).
diff --git a/deps/rabbitmq_consistent_hash_exchange/rabbitmq-components.mk b/deps/rabbitmq_consistent_hash_exchange/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl b/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl
new file mode 100644
index 0000000000..8f61b9726c
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/src/Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand.erl
@@ -0,0 +1,134 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Diagnostics.Commands.ConsistentHashExchangeRingStateCommand').
+
+-include_lib("rabbit_common/include/resource.hrl").
+-include("rabbitmq_consistent_hash_exchange.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ flags/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ switches/0,
+ aliases/0,
+ output/2,
+ scopes/0,
+ formatter/0,
+ help_section/0,
+ description/0
+ ]).
+
+-import(rabbit_data_coercion, [to_binary/1]).
+
+-define(NOT_FOUND_MESSAGE, <<"Exchange does not exist or is of a different type">>).
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+
+scopes() ->
+ ['diagnostics'].
+
+switches() ->
+ [].
+
+aliases() ->
+ [].
+
+flags() ->
+ [].
+
+merge_defaults(Args, Opts) ->
+ {Args, maps:merge(#{vhost => <<"/">>}, Opts)}.
+
+validate([], _Opts) ->
+ {validation_failure, not_enough_args};
+validate([_Exchange], _Opts) ->
+ ok;
+validate(_, _Opts) ->
+ {validation_failure, too_many_args}.
+
+run([Exchange], #{node := Node, vhost := VirtualHost}) ->
+ case rabbit_misc:rpc_call(Node, rabbit_exchange_type_consistent_hash, ring_state, [VirtualHost, Exchange]) of
+ {badrpc, _} = Error ->
+ Error;
+ {badrpc, _, _} = Error ->
+ Error;
+ {error, _} = Error ->
+ Error;
+ {ok, State} ->
+ {ok, State}
+ end.
+
+output({error, not_found}, #{node := Node, formatter := <<"json">>}) ->
+ {error, #{
+ <<"result">> => <<"error">>,
+ <<"node">> => Node,
+ <<"message">> => ?NOT_FOUND_MESSAGE
+ }};
+output({ok, #chx_hash_ring{exchange = Resource = #resource{name = Exchange}, bucket_map = Buckets}}, #{node := Node, formatter := <<"json">>}) ->
+ {ok, #{
+ <<"result">> => <<"ok">>,
+ <<"node">> => Node,
+ <<"exchange">> => Exchange,
+ <<"message">> => to_binary(rabbit_misc:format("Consistent hashing ring state for ~s",
+ [rabbit_misc:rs(Resource)])),
+ <<"buckets">> =>
+ maps:from_list(lists:map(fun ({Key, #resource{kind = queue, name = Queue}}) ->
+ {to_binary(Key), Queue}
+ end, maps:to_list(Buckets)))
+ }};
+output({error, not_found}, _Opts) ->
+ {error, 'Elixir.RabbitMQ.CLI.Core.ExitCodes':exit_dataerr(), ?NOT_FOUND_MESSAGE};
+output({ok, #chx_hash_ring{bucket_map = Buckets0}}, _Opts) ->
+ Buckets = maps:map(fun(_Key, #resource{kind = queue, name = Queue}) -> Queue end, Buckets0),
+ {ok, (ring_state_lines(Buckets))};
+output(Result, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result).
+
+usage() ->
+ <<"consistent_hash_exchange_ring_state <exchange>">>.
+
+usage_additional() ->
+ [].
+
+formatter() ->
+ 'Elixir.RabbitMQ.CLI.Formatters.String'.
+
+usage_doc_guides() ->
+ [].
+
+help_section() ->
+ {plugin, consistent_hash_exchange}.
+
+description() ->
+ <<"Displays consistent hashing exchange ring state">>.
+
+banner([Exchange], #{vhost := VirtualHost}) ->
+ erlang:iolist_to_binary([<<"Inspecting consistent hashing ring state for exchange ">>,
+ to_binary(Exchange),
+ <<" in virtual host ">>,
+ to_binary(rabbit_misc:format("'~s'", [VirtualHost])),
+ <<"...">>]).
+
+%%
+%% Implementation
+%%
+
+ring_state_lines(Buckets) ->
+ Fun = fun (Key, QName, Acc) ->
+ [to_binary(rabbit_misc:format("Ring index: ~b, queue: '~s'~n", [Key, QName])) | Acc]
+ end,
+ lists:usort(maps:fold(Fun, [], Buckets)).
diff --git a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl
new file mode 100644
index 0000000000..d33a79db2d
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl
@@ -0,0 +1,343 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_consistent_hash).
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-include("rabbitmq_consistent_hash_exchange.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, assert_args_equivalence/2]).
+-export([init/0]).
+-export([info/1, info/2]).
+-export([ring_state/2]).
+
+-rabbit_boot_step(
+ {rabbit_exchange_type_consistent_hash_registry,
+ [{description, "exchange type x-consistent-hash: registry"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"x-consistent-hash">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready},
+ {cleanup, {rabbit_registry, unregister,
+ [exchange, <<"x-consistent-hash">>]}}]}).
+
+-rabbit_boot_step(
+ {rabbit_exchange_type_consistent_hash_mnesia,
+ [{description, "exchange type x-consistent-hash: shared state"},
+ {mfa, {?MODULE, init, []}},
+ {requires, database},
+ {enables, external_infrastructure}]}).
+
+%% This data model allows for efficient routing and exchange deletion
+%% but less efficient (linear) binding management.
+
+-define(HASH_RING_STATE_TABLE, rabbit_exchange_type_consistent_hash_ring_state).
+
+-define(PROPERTIES, [<<"correlation_id">>, <<"message_id">>, <<"timestamp">>]).
+
+%% OTP 19.3 does not support exs1024s
+-define(SEED_ALGORITHM, exs1024).
+
+init() ->
+ mnesia:create_table(?HASH_RING_STATE_TABLE, [{record_name, chx_hash_ring},
+ {attributes, record_info(fields, chx_hash_ring)},
+ {type, ordered_set}]),
+ mnesia:add_table_copy(?HASH_RING_STATE_TABLE, node(), ram_copies),
+ mnesia:wait_for_tables([?HASH_RING_STATE_TABLE], 30000),
+ recover(),
+ ok.
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"Consistent Hashing Exchange">>}].
+
+serialise_events() -> false.
+
+route(#exchange {name = Name,
+ arguments = Args},
+ #delivery {message = Msg}) ->
+ case ets:lookup(?HASH_RING_STATE_TABLE, Name) of
+ [] ->
+ [];
+ [#chx_hash_ring{bucket_map = BM}] ->
+ case maps:size(BM) of
+ 0 -> [];
+ N ->
+ K = value_to_hash(hash_on(Args), Msg),
+ SelectedBucket = jump_consistent_hash(K, N),
+
+ case maps:get(SelectedBucket, BM, undefined) of
+ undefined ->
+ rabbit_log:warning("Bucket ~p not found", [SelectedBucket]),
+ [];
+ Queue -> [Queue]
+ end
+ end
+ end.
+
+validate(#exchange{arguments = Args}) ->
+ case hash_args(Args) of
+ {undefined, undefined} ->
+ ok;
+ {undefined, {_Type, Value}} ->
+ case lists:member(Value, ?PROPERTIES) of
+ true -> ok;
+ false ->
+ rabbit_misc:protocol_error(precondition_failed,
+ "Unsupported property: ~s",
+ [Value])
+ end;
+ {_, undefined} ->
+ ok;
+ {_, _} ->
+ rabbit_misc:protocol_error(precondition_failed,
+ "hash-header and hash-property are mutually exclusive",
+ [])
+ end.
+
+validate_binding(_X, #binding { key = K }) ->
+ try
+ V = list_to_integer(binary_to_list(K)),
+ case V < 1 of
+ true -> {error, {binding_invalid, "The binding key must be greater than 0", []}};
+ false -> ok
+ end
+ catch error:badarg ->
+ {error, {binding_invalid, "The binding key must be an integer: ~p", [K]}}
+ end.
+
+maybe_initialise_hash_ring_state(transaction, #exchange{name = Name}) ->
+ maybe_initialise_hash_ring_state(transaction, Name);
+maybe_initialise_hash_ring_state(transaction, X = #resource{}) ->
+ case mnesia:read(?HASH_RING_STATE_TABLE, X) of
+ [_] -> ok;
+ [] ->
+ rabbit_log:debug("Consistent hashing exchange: will initialise hashing ring schema database record"),
+ mnesia:write_lock_table(?HASH_RING_STATE_TABLE),
+ ok = mnesia:write(?HASH_RING_STATE_TABLE, #chx_hash_ring{
+ exchange = X,
+ next_bucket_number = 0,
+ bucket_map = #{}}, write)
+ end;
+
+maybe_initialise_hash_ring_state(_, X) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() -> maybe_initialise_hash_ring_state(transaction, X) end).
+
+recover() ->
+ %% topology recovery has already happened, we have to recover state for any durable
+ %% consistent hash exchanges since plugin activation was moved later in boot process
+ %% starting with RabbitMQ 3.8.4
+ case list_exchanges() of
+ {ok, Xs} ->
+ rabbit_log:debug("Consistent hashing exchange: have ~b durable exchanges to recover", [length(Xs)]),
+ [recover_exchange_and_bindings(X) || X <- lists:usort(Xs)];
+ {aborted, Reason} ->
+ rabbit_log:error(
+ "Consistent hashing exchange: failed to recover durable exchange ring state, reason: ~p",
+ [Reason])
+ end.
+
+list_exchanges() ->
+ case mnesia:transaction(
+ fun () ->
+ mnesia:match_object(rabbit_exchange,
+ #exchange{durable = true, type = 'x-consistent-hash', _ = '_'}, write)
+ end) of
+ {atomic, Xs} ->
+ {ok, Xs};
+ {aborted, Reason} ->
+ {aborted, Reason}
+ end.
+
+recover_exchange_and_bindings(#exchange{name = XName} = X) ->
+ mnesia:transaction(
+ fun () ->
+ rabbit_log:debug("Consistent hashing exchange: will recover exchange ~s", [rabbit_misc:rs(XName)]),
+ create(transaction, X),
+ rabbit_log:debug("Consistent hashing exchange: recovered exchange ~s", [rabbit_misc:rs(XName)]),
+ Bindings = rabbit_binding:list_for_source(XName),
+ rabbit_log:debug("Consistent hashing exchange: have ~b bindings to recover for exchange ~s",
+ [length(Bindings), rabbit_misc:rs(XName)]),
+ [add_binding(transaction, X, B) || B <- lists:usort(Bindings)],
+ rabbit_log:debug("Consistent hashing exchange: recovered bindings for exchange ~s",
+ [rabbit_misc:rs(XName)])
+ end).
+
+create(transaction, X) ->
+ maybe_initialise_hash_ring_state(transaction, X);
+create(Tx, X) ->
+ maybe_initialise_hash_ring_state(Tx, X).
+
+delete(transaction, #exchange{name = Name}, _Bs) ->
+ mnesia:write_lock_table(?HASH_RING_STATE_TABLE),
+
+ ok = mnesia:delete({?HASH_RING_STATE_TABLE, Name});
+delete(_Tx, _X, _Bs) ->
+ ok.
+
+policy_changed(_X1, _X2) -> ok.
+
+add_binding(transaction, X,
+ B = #binding{source = S, destination = D, key = K}) ->
+ Weight = rabbit_data_coercion:to_integer(K),
+ rabbit_log:debug("Consistent hashing exchange: adding binding from "
+ "exchange ~s to destination ~s with routing key '~s'", [rabbit_misc:rs(S), rabbit_misc:rs(D), K]),
+
+ case mnesia:read(?HASH_RING_STATE_TABLE, S) of
+ [State0 = #chx_hash_ring{bucket_map = BM0,
+ next_bucket_number = NexN0}] ->
+ NextN = NexN0 + Weight,
+ %% hi/lo bucket counters are 0-based but weight is 1-based
+ Range = lists:seq(NexN0, (NextN - 1)),
+ BM = lists:foldl(fun(Key, Acc) ->
+ maps:put(Key, D, Acc)
+ end, BM0, Range),
+ State = State0#chx_hash_ring{bucket_map = BM,
+ next_bucket_number = NextN},
+
+ ok = mnesia:write(?HASH_RING_STATE_TABLE, State, write),
+ ok;
+ [] ->
+ maybe_initialise_hash_ring_state(transaction, S),
+ add_binding(transaction, X, B)
+ end;
+add_binding(none, _X, _B) ->
+ ok.
+
+remove_bindings(transaction, _X, Bindings) ->
+ [remove_binding(B) || B <- Bindings],
+
+ ok;
+remove_bindings(none, X, Bindings) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() -> remove_bindings(transaction, X, Bindings) end),
+ ok.
+
+remove_binding(#binding{source = S, destination = D, key = RK}) ->
+ Weight = rabbit_data_coercion:to_integer(RK),
+ rabbit_log:debug("Consistent hashing exchange: removing binding "
+ "from exchange '~p' to destination '~p' with routing key '~s'",
+ [rabbit_misc:rs(S), rabbit_misc:rs(D), RK]),
+
+ case mnesia:read(?HASH_RING_STATE_TABLE, S) of
+ [State0 = #chx_hash_ring{bucket_map = BM0,
+ next_bucket_number = NexN0}] ->
+ %% Buckets with lower numbers stay as is; buckets that
+ %% belong to this binding are removed; buckets with
+ %% greater numbers are updated (their numbers are adjusted downwards by weight)
+ BucketsOfThisBinding = maps:filter(fun (_K, V) -> V =:= D end, BM0),
+ case maps:size(BucketsOfThisBinding) of
+ 0 -> ok;
+ N when N >= 1 ->
+ KeysOfThisBinding = lists:usort(maps:keys(BucketsOfThisBinding)),
+ LastBucket = lists:last(KeysOfThisBinding),
+ FirstBucket = hd(KeysOfThisBinding),
+ BucketsDownTheRing = maps:filter(fun (K, _) -> K > LastBucket end, BM0),
+ UnchangedBuckets = maps:filter(fun (K, _) -> K < FirstBucket end, BM0),
+
+ %% final state with "down the ring" buckets updated
+ NewBucketsDownTheRing = maps:fold(
+ fun(K0, V, Acc) ->
+ maps:put(K0 - Weight, V, Acc)
+ end, #{}, BucketsDownTheRing),
+ BM1 = maps:merge(UnchangedBuckets, NewBucketsDownTheRing),
+ NextN = NexN0 - Weight,
+ State = State0#chx_hash_ring{bucket_map = BM1,
+ next_bucket_number = NextN},
+
+ ok = mnesia:write(?HASH_RING_STATE_TABLE, State, write)
+ end;
+ [] ->
+ rabbit_log:warning("Can't remove binding: hash ring state for exchange ~s wasn't found",
+ [rabbit_misc:rs(S)]),
+ ok
+ end.
+
+ring_state(VirtualHost, Exchange) ->
+ Resource = rabbit_misc:r(VirtualHost, exchange, Exchange),
+ case mnesia:dirty_read(?HASH_RING_STATE_TABLE, Resource) of
+ [] -> {error, not_found};
+ [Row] -> {ok, Row}
+ end.
+
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+%%
+%% Jump-consistent hashing.
+%%
+
+jump_consistent_hash(_Key, 1) ->
+ 0;
+jump_consistent_hash(KeyList, NumberOfBuckets) when is_list(KeyList) ->
+ jump_consistent_hash(hd(KeyList), NumberOfBuckets);
+jump_consistent_hash(Key, NumberOfBuckets) when is_integer(Key) ->
+ SeedState = rand:seed_s(?SEED_ALGORITHM, {Key, Key, Key}),
+ jump_consistent_hash_value(-1, 0, NumberOfBuckets, SeedState);
+jump_consistent_hash(Key, NumberOfBuckets) ->
+ jump_consistent_hash(erlang:phash2(Key), NumberOfBuckets).
+
+jump_consistent_hash_value(B, J, NumberOfBuckets, _SeedState) when J >= NumberOfBuckets ->
+ B;
+
+jump_consistent_hash_value(_B0, J0, NumberOfBuckets, SeedState0) ->
+ B = J0,
+ {R, SeedState} = rand:uniform_s(SeedState0),
+ J = trunc((B + 1) / R),
+ jump_consistent_hash_value(B, J, NumberOfBuckets, SeedState).
+
+value_to_hash(undefined, #basic_message { routing_keys = Routes }) ->
+ Routes;
+value_to_hash({header, Header}, #basic_message { content = Content }) ->
+ Headers = rabbit_basic:extract_headers(Content),
+ case Headers of
+ undefined -> undefined;
+ _ -> rabbit_misc:table_lookup(Headers, Header)
+ end;
+value_to_hash({property, Property}, #basic_message { content = Content }) ->
+ #content{properties = #'P_basic'{ correlation_id = CorrId,
+ message_id = MsgId,
+ timestamp = Timestamp }} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ case Property of
+ <<"correlation_id">> -> CorrId;
+ <<"message_id">> -> MsgId;
+ <<"timestamp">> ->
+ case Timestamp of
+ undefined -> undefined;
+ _ -> integer_to_binary(Timestamp)
+ end
+ end.
+
+hash_args(Args) ->
+ Header =
+ case rabbit_misc:table_lookup(Args, <<"hash-header">>) of
+ undefined -> undefined;
+ {longstr, V1} -> {header, V1}
+ end,
+ Property =
+ case rabbit_misc:table_lookup(Args, <<"hash-property">>) of
+ undefined -> undefined;
+ {longstr, V2} -> {property, V2}
+ end,
+ {Header, Property}.
+
+hash_on(Args) ->
+ case hash_args(Args) of
+ {undefined, undefined} -> undefined;
+ {Header, undefined} -> Header;
+ {undefined, Property} -> Property
+ end.
diff --git a/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl b/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl
new file mode 100644
index 0000000000..644f15a38a
--- /dev/null
+++ b/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl
@@ -0,0 +1,605 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_consistent_hash_SUITE).
+
+-compile(export_all).
+
+-include("rabbitmq_consistent_hash_exchange.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, routing_tests},
+ {group, hash_ring_management_tests}
+ ].
+
+groups() ->
+ [
+ {routing_tests, [], [
+ routing_key_hashing_test,
+ custom_header_hashing_test,
+ message_id_hashing_test,
+ correlation_id_hashing_test,
+ timestamp_hashing_test,
+ other_routing_test
+ ]},
+ {hash_ring_management_tests, [], [
+ test_durable_exchange_hash_ring_recovery_between_node_restarts,
+ test_hash_ring_updates_when_queue_is_deleted,
+ test_hash_ring_updates_when_multiple_queues_are_deleted,
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure,
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case2,
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case3,
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case4,
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case5,
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case6,
+ test_hash_ring_updates_when_exchange_is_deleted,
+ test_hash_ring_updates_when_queue_is_unbound
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ clean_up_test_topology(Config),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+-define(AllQs, [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>, <<"q4">>, <<"q5">>, <<"q6">>,
+ <<"e-q0">>, <<"e-q1">>, <<"e-q2">>, <<"e-q3">>, <<"e-q4">>, <<"e-q5">>, <<"e-q6">>,
+ <<"d-q0">>, <<"d-q1">>, <<"d-q2">>, <<"d-q3">>, <<"d-q4">>, <<"d-q5">>, <<"d-q6">>]).
+-define(RoutingTestQs, [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>]).
+
+%% N.B. lowering this value below 100K increases the probability
+%% of failing the Chi squared test in some environments
+-define(DEFAULT_SAMPLE_COUNT, 150000).
+
+routing_key_hashing_test(Config) ->
+ ok = test_with_rk(Config, ?RoutingTestQs).
+
+custom_header_hashing_test(Config) ->
+ ok = test_with_header(Config, ?RoutingTestQs).
+
+message_id_hashing_test(Config) ->
+ ok = test_with_message_id(Config, ?RoutingTestQs).
+
+correlation_id_hashing_test(Config) ->
+ ok = test_with_correlation_id(Config, ?RoutingTestQs).
+
+timestamp_hashing_test(Config) ->
+ ok = test_with_timestamp(Config, ?RoutingTestQs).
+
+other_routing_test(Config) ->
+ ok = test_binding_with_negative_routing_key(Config),
+ ok = test_binding_with_non_numeric_routing_key(Config),
+ ok = test_non_supported_property(Config),
+ ok = test_mutually_exclusive_arguments(Config),
+ ok.
+
+
+%% -------------------------------------------------------------------
+%% Implementation
+%% -------------------------------------------------------------------
+
+test_with_rk(Config, Qs) ->
+ test0(Config, fun (E) ->
+ #'basic.publish'{exchange = E, routing_key = rnd()}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [], Qs).
+
+test_with_header(Config, Qs) ->
+ test0(Config, fun (E) ->
+ #'basic.publish'{exchange = E}
+ end,
+ fun() ->
+ H = [{<<"hashme">>, longstr, rnd()}],
+ #amqp_msg{props = #'P_basic'{headers = H}, payload = <<>>}
+ end, [{<<"hash-header">>, longstr, <<"hashme">>}], Qs).
+
+test_with_correlation_id(Config, Qs) ->
+ test0(Config, fun(E) ->
+ #'basic.publish'{exchange = E}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{correlation_id = rnd()}, payload = <<>>}
+ end, [{<<"hash-property">>, longstr, <<"correlation_id">>}], Qs).
+
+test_with_message_id(Config, Qs) ->
+ test0(Config, fun(E) ->
+ #'basic.publish'{exchange = E}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{message_id = rnd()}, payload = <<>>}
+ end, [{<<"hash-property">>, longstr, <<"message_id">>}], Qs).
+
+test_with_timestamp(Config, Qs) ->
+ test0(Config, fun(E) ->
+ #'basic.publish'{exchange = E}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{timestamp = rnd_int()}, payload = <<>>}
+ end, [{<<"hash-property">>, longstr, <<"timestamp">>}], Qs).
+
+test_mutually_exclusive_arguments(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ process_flag(trap_exit, true),
+ Cmd = #'exchange.declare'{
+ exchange = <<"fail">>,
+ type = <<"x-consistent-hash">>,
+ arguments = [{<<"hash-header">>, longstr, <<"foo">>},
+ {<<"hash-property">>, longstr, <<"bar">>}]
+ },
+ ?assertExit(_, amqp_channel:call(Chan, Cmd)),
+
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+test_non_supported_property(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ process_flag(trap_exit, true),
+ Cmd = #'exchange.declare'{
+ exchange = <<"fail">>,
+ type = <<"x-consistent-hash">>,
+ arguments = [{<<"hash-property">>, longstr, <<"app_id">>}]
+ },
+ ?assertExit(_, amqp_channel:call(Chan, Cmd)),
+
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+rnd() ->
+ list_to_binary(integer_to_list(rnd_int())).
+
+rnd_int() ->
+ rand:uniform(10000000).
+
+test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues) ->
+ test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues, ?DEFAULT_SAMPLE_COUNT).
+
+test0(Config, MakeMethod, MakeMsg, DeclareArgs, [Q1, Q2, Q3, Q4] = Queues, IterationCount) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+ #'confirm.select_ok'{} = amqp_channel:call(Chan, #'confirm.select'{}),
+
+ CHX = <<"e">>,
+
+ clean_up_test_topology(Config, CHX, Queues),
+
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = CHX,
+ type = <<"x-consistent-hash">>,
+ auto_delete = true,
+ arguments = DeclareArgs
+ }),
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = CHX,
+ routing_key = <<"1">>})
+ || Q <- [Q1, Q2]],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = CHX,
+ routing_key = <<"2">>})
+ || Q <- [Q3, Q4]],
+
+ [amqp_channel:call(Chan,
+ MakeMethod(CHX),
+ MakeMsg()) || _ <- lists:duplicate(IterationCount, const)],
+ amqp_channel:wait_for_confirms(Chan, 300),
+ timer:sleep(500),
+
+ Counts =
+ [begin
+ #'queue.declare_ok'{message_count = M} =
+ amqp_channel:call(Chan, #'queue.declare' {queue = Q,
+ exclusive = true}),
+ M
+ end || Q <- Queues],
+ ?assertEqual(IterationCount, lists:sum(Counts)), %% All messages got routed
+
+ %% Chi-square test
+ %% H0: routing keys are not evenly distributed according to weight
+ Expected = [IterationCount div 6, IterationCount div 6, (IterationCount div 6) * 2, (IterationCount div 6) * 2],
+ Obs = lists:zip(Counts, Expected),
+ Chi = lists:sum([((O - E) * (O - E)) / E || {O, E} <- Obs]),
+ ct:pal("Chi-square test for 3 degrees of freedom is ~p, p = 0.01 is 11.35, observations (counts, expected): ~p",
+ [Chi, Obs]),
+
+ clean_up_test_topology(Config, CHX, Queues),
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+test_binding_with_negative_routing_key(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+ X = <<"bind-fail">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare1 = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1),
+ Q = <<"test-queue">>,
+ Declare2 = #'queue.declare'{queue = Q},
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, Declare2),
+ process_flag(trap_exit, true),
+ Cmd = #'queue.bind'{exchange = <<"bind-fail">>,
+ routing_key = <<"-1">>},
+ ?assertExit(_, amqp_channel:call(Chan, Cmd)),
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, 0),
+ amqp_channel:call(Ch2, #'queue.delete'{queue = Q}),
+
+ rabbit_ct_client_helpers:close_channel(Chan),
+ rabbit_ct_client_helpers:close_channel(Ch2),
+ ok.
+
+test_binding_with_non_numeric_routing_key(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+ X = <<"bind-fail">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare1 = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1),
+ Q = <<"test-queue">>,
+ Declare2 = #'queue.declare'{queue = Q},
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, Declare2),
+ process_flag(trap_exit, true),
+ Cmd = #'queue.bind'{exchange = <<"bind-fail">>,
+ routing_key = <<"not-a-number">>},
+ ?assertExit(_, amqp_channel:call(Chan, Cmd)),
+
+ Ch2 = rabbit_ct_client_helpers:open_channel(Config, 0),
+ amqp_channel:call(Ch2, #'queue.delete'{queue = Q}),
+
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+%%
+%% Hash Ring management
+%%
+
+test_durable_exchange_hash_ring_recovery_between_node_restarts(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ X = <<"test_hash_ring_recovery_between_node_restarts">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ durable = true,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ Queues = [<<"d-q1">>, <<"d-q2">>, <<"d-q3">>],
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare'{
+ queue = Q, durable = true, exclusive = false}) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = <<"3">>})
+ || Q <- Queues],
+
+ ?assertEqual(9, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+
+ rabbit_ct_broker_helpers:restart_node(Config, 0),
+ rabbit_ct_helpers:await_condition(
+ fun () -> count_buckets_of_exchange(Config, X) > 0 end, 15000),
+
+ ?assertEqual(9, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+
+ clean_up_test_topology(Config, X, Queues),
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+test_hash_ring_updates_when_queue_is_deleted(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ X = <<"test_hash_ring_updates_when_queue_is_deleted">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ Q = <<"d-q">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare'{
+ queue = Q, durable = true, exclusive = false}),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = <<"1">>}),
+
+ ?assertEqual(1, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+
+ amqp_channel:call(Chan, #'queue.delete' {queue = Q}),
+ ?assertEqual(0, count_buckets_of_exchange(Config, X)),
+
+ clean_up_test_topology(Config, X, [Q]),
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+test_hash_ring_updates_when_multiple_queues_are_deleted(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ X = <<"test_hash_ring_updates_when_multiple_queues_are_deleted">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ Queues = [<<"d-q1">>, <<"d-q2">>, <<"d-q3">>],
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare'{
+ queue = Q, durable = true, exclusive = false}) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = <<"3">>})
+ || Q <- Queues],
+
+ ?assertEqual(9, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+
+ amqp_channel:call(Chan, #'queue.delete' {queue = <<"d-q1">>}),
+ ?assertEqual(6, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+
+ amqp_channel:call(Chan, #'queue.delete' {queue = <<"d-q2">>}),
+ amqp_channel:call(Chan, #'queue.delete' {queue = <<"d-q3">>}),
+ ?assertEqual(0, count_buckets_of_exchange(Config, X)),
+
+ clean_up_test_topology(Config, X, Queues),
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ X = <<"test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ Queues = [<<"e-q1">>, <<"e-q2">>, <<"e-q3">>, <<"e-q4">>, <<"e-q5">>, <<"e-q6">>],
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = X,
+ routing_key = <<"3">>})
+ || Q <- Queues],
+
+ ct:pal("all hash ring rows: ~p", [hash_ring_rows(Config)]),
+
+ ?assertEqual(18, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+ ok = amqp_connection:close(Conn),
+ timer:sleep(500),
+
+ ct:pal("all hash ring rows after connection closure: ~p", [hash_ring_rows(Config)]),
+
+ ?assertEqual(0, count_buckets_of_exchange(Config, X)),
+ clean_up_test_topology(Config, X, []),
+ ok.
+
+%% rabbitmq/rabbitmq-consistent-has-exchange#40, uses higher weights
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case2(Config) ->
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case(Config, ?FUNCTION_NAME, 50).
+
+%% rabbitmq/rabbitmq-consistent-has-exchange#40
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case3(Config) ->
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case(Config, ?FUNCTION_NAME, 34).
+
+%% rabbitmq/rabbitmq-consistent-has-exchange#40
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case4(Config) ->
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case(Config, ?FUNCTION_NAME, 100).
+
+%% rabbitmq/rabbitmq-consistent-has-exchange#40
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case5(Config) ->
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case(Config, ?FUNCTION_NAME, 268).
+
+%% rabbitmq/rabbitmq-consistent-has-exchange#40
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case6(Config) ->
+ test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case(Config, ?FUNCTION_NAME, 1937).
+
+test_hash_ring_updates_when_exclusive_queues_are_deleted_due_to_connection_closure_case(Config, XAsList, Key) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+
+ X = atom_to_binary(XAsList, utf8),
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ NumQueues = 6,
+ Queues = [begin
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Chan, #'queue.declare' {exclusive = true }),
+ Q
+ end || _ <- lists:seq(1, NumQueues)],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = X,
+ routing_key = integer_to_binary(Key)})
+ || Q <- Queues],
+
+ ct:pal("all hash ring rows: ~p", [hash_ring_rows(Config)]),
+
+ %% NumQueues x 'Key' buckets per binding
+ ?assertEqual(NumQueues * Key, count_buckets_of_exchange(Config, X)),
+ assert_ring_consistency(Config, X),
+ ok = amqp_connection:close(Conn),
+ timer:sleep(1000),
+
+ ct:pal("all hash ring rows after connection closure (~p): ~p", [XAsList, hash_ring_rows(Config)]),
+
+ ?assertEqual(0, count_buckets_of_exchange(Config, X)),
+ clean_up_test_topology(Config, X, []),
+ ok.
+
+test_hash_ring_updates_when_exchange_is_deleted(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ X = <<"test_hash_ring_updates_when_exchange_is_deleted">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ Queues = [<<"e-q1">>, <<"e-q2">>, <<"e-q3">>, <<"e-q4">>, <<"e-q5">>, <<"e-q6">>],
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = X,
+ routing_key = <<"2">>})
+ || Q <- Queues],
+
+ ?assertEqual(12, count_buckets_of_exchange(Config, X)),
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ ?assertEqual(0, count_buckets_of_exchange(Config, X)),
+
+ amqp_channel:call(Chan, #'queue.delete' {queue = <<"e-q1">>}),
+ ?assertEqual(0, count_buckets_of_exchange(Config, X)),
+
+ clean_up_test_topology(Config, X, Queues),
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+test_hash_ring_updates_when_queue_is_unbound(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ X = <<"test_hash_ring_updates_when_queue_is_unbound">>,
+ amqp_channel:call(Chan, #'exchange.delete' {exchange = X}),
+
+ Declare = #'exchange.declare'{exchange = X,
+ type = <<"x-consistent-hash">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare),
+
+ Queues = [<<"e-q1">>, <<"e-q2">>, <<"e-q3">>, <<"e-q4">>, <<"e-q5">>, <<"e-q6">>],
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {queue = Q,
+ exchange = X,
+ routing_key = <<"2">>})
+ || Q <- Queues],
+
+ ?assertEqual(12, count_buckets_of_exchange(Config, X)),
+ amqp_channel:call(Chan, #'queue.unbind'{exchange = X,
+ queue = <<"e-q2">>,
+ routing_key = <<"2">>}),
+
+ ?assertEqual(10, count_buckets_of_exchange(Config, X)),
+ amqp_channel:call(Chan, #'queue.unbind'{exchange = X,
+ queue = <<"e-q6">>,
+ routing_key = <<"2">>}),
+ ?assertEqual(8, count_buckets_of_exchange(Config, X)),
+
+ clean_up_test_topology(Config, X, Queues),
+ rabbit_ct_client_helpers:close_channel(Chan),
+ ok.
+
+
+%%
+%% Helpers
+%%
+
+hash_ring_state(Config, X) ->
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0, ets, lookup,
+ [rabbit_exchange_type_consistent_hash_ring_state,
+ rabbit_misc:r(<<"/">>, exchange, X)]).
+
+hash_ring_rows(Config) ->
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0, ets, tab2list, [rabbit_exchange_type_consistent_hash_ring_state]).
+
+assert_ring_consistency(Config, X) ->
+ [#chx_hash_ring{bucket_map = M}] = hash_ring_state(Config, X),
+ Buckets = lists:usort(maps:keys(M)),
+ Hi = lists:last(Buckets),
+
+ %% bucket numbers form a sequence without gaps or duplicates
+ ?assertEqual(lists:seq(0, Hi), lists:usort(Buckets)).
+
+count_buckets_of_exchange(Config, X) ->
+ case hash_ring_state(Config, X) of
+ [#chx_hash_ring{bucket_map = M}] -> maps:size(M);
+ [] -> 0
+ end.
+
+count_all_hash_ring_buckets(Config) ->
+ Rows = hash_ring_rows(Config),
+ lists:foldl(fun(#chx_hash_ring{bucket_map = M}, Acc) -> Acc + maps:size(M) end, 0, Rows).
+
+clean_up_test_topology(Config) ->
+ clean_up_test_topology(Config, none, ?AllQs).
+
+clean_up_test_topology(Config, none, Qs) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ [amqp_channel:call(Ch, #'queue.delete' {queue = Q}) || Q <- Qs],
+ rabbit_ct_client_helpers:close_channel(Ch);
+
+clean_up_test_topology(Config, X, Qs) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ amqp_channel:call(Ch, #'exchange.delete' {exchange = X}),
+ [amqp_channel:call(Ch, #'queue.delete' {queue = Q}) || Q <- Qs],
+ rabbit_ct_client_helpers:close_channel(Ch).
diff --git a/deps/rabbitmq_event_exchange/.gitignore b/deps/rabbitmq_event_exchange/.gitignore
new file mode 100644
index 0000000000..17df3b3932
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/.gitignore
@@ -0,0 +1,19 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+test/config_schema_SUITE_data/schema/rabbit.schema
+
+rabbitmq_event_exchange.d
diff --git a/deps/rabbitmq_event_exchange/.travis.yml b/deps/rabbitmq_event_exchange/.travis.yml
new file mode 100644
index 0000000000..7b7176904b
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: i3JOb6jEw0a4DbGi9y+MaG34u1BY7cXKYGrI209VDLX23iD28Aa/9BkI9R0Brd2g+RxNZR5DPpYTtewO+zFBXOqFNpoeP7QVvVATQVZl7cMtSDmlcul3QwuKfng3wfZ417BgioSqkm+gVN6eS8mvn0ngt35Jfq0eIPledLlAY3Y=
+ - secure: jWod/Y/C/Eaj0u9hqLm98YrheE3ttm0LyqzNVpdLrkyIYrFpZOSfpvl3CKfZdIESZ8H3OAX8YP40wxwn7EBmTnFR03mwKRCu8zKvDgdiIc0+MaCXq7c1RNkwcGUlj2jlZ4XnxMPvimCeozPGyPuO4XMZAuOyhekHKDcg+hQD6Ko=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md b/deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_event_exchange/CONTRIBUTING.md b/deps/rabbitmq_event_exchange/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_event_exchange/LICENSE b/deps/rabbitmq_event_exchange/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_event_exchange/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_event_exchange/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_event_exchange/Makefile b/deps/rabbitmq_event_exchange/Makefile
new file mode 100644
index 0000000000..3e04c49c34
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/Makefile
@@ -0,0 +1,20 @@
+PROJECT = rabbitmq_event_exchange
+PROJECT_DESCRIPTION = Event Exchange Type
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_event_exchange/README.md b/deps/rabbitmq_event_exchange/README.md
new file mode 100644
index 0000000000..3869cb23aa
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/README.md
@@ -0,0 +1,169 @@
+# RabbitMQ Event Exchange
+
+## Overview
+
+This plugin exposes the internal RabbitMQ event mechanism as messages that clients
+can consume. It's useful
+if you want to keep track of certain events, e.g. when queues, exchanges, bindings, users,
+connections, channels are created and deleted. This plugin filters out stats
+events, so you are almost certainly going to get better results using
+the management plugin for stats.
+
+## How it Works
+
+It declares a topic exchange called 'amq.rabbitmq.event' in the default
+virtual host. All events are published to this exchange with routing
+keys like 'exchange.created', 'binding.deleted' etc, so you can
+subscribe to only the events you're interested in.
+
+The exchange behaves similarly to 'amq.rabbitmq.log': everything gets
+published there; if you don't trust a user with the information that
+gets published, don't allow them access.
+
+
+## Installation
+
+### With RabbitMQ 3.6.0 or Later
+
+Most recent RabbitMQ version ships with this plugin.
+As of RabbitMQ `3.6.0` this plugin is included into the RabbitMQ distribution.
+
+Enable it with the following command:
+
+```bash
+rabbitmq-plugins enable rabbitmq_event_exchange
+```
+
+### With RabbitMQ 3.5.x
+
+You can download a pre-built binary of this plugin from
+the [RabbitMQ Community Plugins](https://www.rabbitmq.com/community-plugins.html) page.
+
+Then run the following command:
+
+```bash
+rabbitmq-plugins enable rabbitmq_event_exchange
+```
+
+## Event format
+
+Each event has various properties associated with it. These are
+translated into AMQP 0-9-1 data encoding and inserted in the message headers. The
+**message body is always blank**.
+
+## Events
+
+So far RabbitMQ and related plugins emit events with the following routing keys:
+
+### RabbitMQ Broker
+
+Queue, Exchange and Binding events:
+
+ * `queue.deleted`
+ * `queue.created`
+ * `exchange.created`
+ * `exchange.deleted`
+ * `binding.created`
+ * `binding.deleted`
+
+Connection and Channel events:
+
+ * `connection.created`
+ * `connection.closed`
+ * `channel.created`
+ * `channel.closed`
+
+Consumer events:
+
+ * `consumer.created`
+ * `consumer.deleted`
+
+Policy and Parameter events:
+
+ * `policy.set`
+ * `policy.cleared`
+ * `parameter.set`
+ * `parameter.cleared`
+
+Virtual host events:
+
+ * `vhost.created`
+ * `vhost.deleted`
+ * `vhost.limits.set`
+ * `vhost.limits.cleared`
+
+User related events:
+
+ * `user.authentication.success`
+ * `user.authentication.failure`
+ * `user.created`
+ * `user.deleted`
+ * `user.password.changed`
+ * `user.password.cleared`
+ * `user.tags.set`
+
+Permission events:
+
+ * `permission.created`
+ * `permission.deleted`
+ * `topic.permission.created`
+ * `topic.permission.deleted`
+
+Alarm events:
+
+ * `alarm.set`
+ * `alarm.cleared`
+
+### Shovel Plugin
+
+Worker events:
+
+ * `shovel.worker.status`
+ * `shovel.worker.removed`
+
+### Federation Plugin
+
+Link events:
+
+ * `federation.link.status`
+ * `federation.link.removed`
+
+## Example
+
+There is a usage example using the Java client in `examples/java`.
+
+
+## Configuration
+
+ * `rabbitmq_event_exchange.vhost`: what vhost should the `amq.rabbitmq.event` exchange be declared in. Default: `rabbit.default_vhost` (`<<"/">>`).
+
+
+## Uninstalling
+
+If you want to remove the exchange which this plugin creates, first
+disable the plugin and restart the broker. Then you can delete the exchange,
+e.g. with :
+
+ rabbitmqctl eval 'rabbit_exchange:delete(rabbit_misc:r(<<"/">>, exchange, <<"amq.rabbitmq.event">>), false).'
+
+
+## Building from Source
+
+Building is no different from [building other RabbitMQ plugins](https://www.rabbitmq.com/plugin-development.html).
+
+TL;DR:
+
+ git clone https://github.com.com/rabbitmq/rabbitmq-public-umbrella.git umbrella
+ cd umbrella
+ make co
+ make up BRANCH=stable
+ cd deps
+ git clone https://github.com/rabbitmq/rabbitmq-event-exchange.git rabbitmq_event_exchange
+ cd rabbitmq_event_exchange
+ make dist
+
+
+## License
+
+Released under the Mozilla Public License 2.0,
+the same as RabbitMQ.
diff --git a/deps/rabbitmq_event_exchange/erlang.mk b/deps/rabbitmq_event_exchange/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_event_exchange/examples/java/QueueEvents.java b/deps/rabbitmq_event_exchange/examples/java/QueueEvents.java
new file mode 100644
index 0000000000..7f69c4bb9a
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/examples/java/QueueEvents.java
@@ -0,0 +1,44 @@
+
+import com.rabbitmq.client.AMQP;
+import com.rabbitmq.client.Channel;
+import com.rabbitmq.client.Connection;
+import com.rabbitmq.client.ConnectionFactory;
+import com.rabbitmq.client.Consumer;
+import com.rabbitmq.client.DefaultConsumer;
+import com.rabbitmq.client.Envelope;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+public class QueueEvents {
+ public static void main(String[] args) throws IOException, InterruptedException, TimeoutException {
+ ConnectionFactory f = new ConnectionFactory();
+ Connection c = f.newConnection();
+ Channel ch = c.createChannel();
+ String q = ch.queueDeclare().getQueue();
+ ch.queueBind(q, "amq.rabbitmq.event", "queue.*");
+
+ Consumer consumer = new DefaultConsumer(ch) {
+ @Override
+ public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
+ String event = envelope.getRoutingKey();
+ Map<String, Object> headers = properties.getHeaders();
+ String name = headers.get("name").toString();
+ String vhost = headers.get("vhost").toString();
+
+ if (event.equals("queue.created")) {
+ boolean durable = (Boolean) headers.get("durable");
+ String durableString = durable ? " (durable)" : " (transient)";
+ System.out.println("Created: " + name + " in " + vhost + durableString);
+ }
+ else /* queue.deleted is the only other possibility */ {
+ System.out.println("Deleted: " + name + " in " + vhost);
+ }
+ }
+ };
+ ch.basicConsume(q, true, consumer);
+ System.out.println("QUEUE EVENTS");
+ System.out.println("============\n");
+ }
+}
diff --git a/deps/rabbitmq_event_exchange/include/rabbit_event_exchange.hrl b/deps/rabbitmq_event_exchange/include/rabbit_event_exchange.hrl
new file mode 100644
index 0000000000..b977474d44
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/include/rabbit_event_exchange.hrl
@@ -0,0 +1 @@
+-define(EXCH_NAME, <<"amq.rabbitmq.event">>).
diff --git a/deps/rabbitmq_event_exchange/priv/schema/rabbitmq_event_exchange.schema b/deps/rabbitmq_event_exchange/priv/schema/rabbitmq_event_exchange.schema
new file mode 100644
index 0000000000..c8b2efe5ac
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/priv/schema/rabbitmq_event_exchange.schema
@@ -0,0 +1,7 @@
+{mapping, "event_exchange.vhost", "rabbitmq_event_exchange.vhost",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_event_exchange.vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("event_exchange.vhost", Conf))
+end}.
diff --git a/deps/rabbitmq_event_exchange/rabbitmq-components.mk b/deps/rabbitmq_event_exchange/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl b/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl
new file mode 100644
index 0000000000..ea7fffafcd
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/src/rabbit_event_exchange_decorator.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_event_exchange_decorator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_event_exchange.hrl").
+
+-rabbit_boot_step({?MODULE,
+ [{description, "event exchange decorator"},
+ {mfa, {rabbit_registry, register,
+ [exchange_decorator, <<"event">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {cleanup, {rabbit_registry, unregister,
+ [exchange_decorator, <<"event">>]}},
+ {enables, recovery}]}).
+
+-behaviour(rabbit_exchange_decorator).
+
+-export([description/0, serialise_events/1]).
+-export([create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, route/2, active_for/1]).
+
+description() ->
+ [{description, <<"Event exchange decorator">>}].
+
+serialise_events(_) -> false.
+
+create(_, _) ->
+ ok.
+
+delete(_, _, _) ->
+ ok.
+
+policy_changed(_, _) ->
+ ok.
+
+add_binding(transaction, #exchange{name = #resource{name = ?EXCH_NAME} = Name},
+ _Bs) ->
+ case rabbit_binding:list_for_source(Name) of
+ [_] ->
+ rpc:abcast(rabbit_event, {event_exchange, added_first_binding}),
+ ok;
+ _ ->
+ ok
+ end;
+add_binding(_, _X, _Bs) ->
+ ok.
+
+remove_bindings(transaction, #exchange{name = #resource{name = ?EXCH_NAME} = Name},
+ _Bs) ->
+ case rabbit_binding:list_for_source(Name) of
+ [] ->
+ rpc:abcast(rabbit_event, {event_exchange, removed_last_binding}),
+ ok;
+ _ ->
+ ok
+ end;
+remove_bindings(_, _X, _Bs) ->
+ ok.
+
+route(_, _) -> [].
+
+active_for(_) -> noroute.
diff --git a/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl b/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl
new file mode 100644
index 0000000000..eb511abaec
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl
@@ -0,0 +1,247 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_event).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("rabbit_event_exchange.hrl").
+
+-export([register/0, unregister/0]).
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+-export([info/1, info/2]).
+
+-export([fmt_proplist/1]). %% testing
+
+-record(state, {vhost,
+ has_any_bindings
+ }).
+
+-rabbit_boot_step({rabbit_event_exchange,
+ [{description, "event exchange"},
+ {mfa, {?MODULE, register, []}},
+ {cleanup, {?MODULE, unregister, []}},
+ {requires, recovery},
+ {enables, routing_ready}]}).
+
+%%----------------------------------------------------------------------------
+
+info(_X) -> [].
+
+info(_X, _) -> [].
+
+register() ->
+ rabbit_exchange:declare(exchange(), topic, true, false, true, [],
+ ?INTERNAL_USER),
+ gen_event:add_handler(rabbit_event, ?MODULE, []).
+
+unregister() ->
+ rabbit_exchange:delete(exchange(), false, ?INTERNAL_USER),
+ gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+exchange() ->
+ exchange(get_vhost()).
+
+exchange(VHost) ->
+ _ = ensure_vhost_exists(VHost),
+ rabbit_misc:r(VHost, exchange, ?EXCH_NAME).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ VHost = get_vhost(),
+ X = rabbit_misc:r(VHost, exchange, ?EXCH_NAME),
+ HasBindings = case rabbit_binding:list_for_source(X) of
+ [] -> false;
+ _ -> true
+ end,
+ {ok, #state{vhost = VHost,
+ has_any_bindings = HasBindings}}.
+
+handle_call(_Request, State) -> {ok, not_understood, State}.
+
+handle_event(_, #state{has_any_bindings = false} = State) ->
+ {ok, State};
+handle_event(#event{type = Type,
+ props = Props,
+ timestamp = TS,
+ reference = none}, #state{vhost = VHost} = State) ->
+ case key(Type) of
+ ignore -> ok;
+ Key ->
+ Props2 = [{<<"timestamp_in_ms">>, TS} | Props],
+ PBasic = #'P_basic'{delivery_mode = 2,
+ headers = fmt_proplist(Props2),
+ %% 0-9-1 says the timestamp is a
+ %% "64 bit POSIX
+ %% timestamp". That's second
+ %% resolution, not millisecond.
+ timestamp = erlang:convert_time_unit(
+ TS, milli_seconds, seconds)},
+ Msg = rabbit_basic:message(exchange(VHost), Key, PBasic, <<>>),
+ rabbit_basic:publish(
+ rabbit_basic:delivery(false, false, Msg, undefined))
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info({event_exchange, added_first_binding}, State) ->
+ {ok, State#state{has_any_bindings = true}};
+handle_info({event_exchange, removed_last_binding}, State) ->
+ {ok, State#state{has_any_bindings = false}};
+handle_info(_Info, State) -> {ok, State}.
+
+terminate(_Arg, _State) -> ok.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+ensure_vhost_exists(VHost) ->
+ case rabbit_vhost:exists(VHost) of
+ false -> rabbit_vhost:add(VHost, ?INTERNAL_USER);
+ _ -> ok
+ end.
+
+%% pattern matching is way more efficient that the string operations,
+%% let's use all the keys we're aware of to speed up the handler.
+%% Any unknown or new one will be processed as before (see last function clause).
+key(queue_deleted) ->
+ <<"queue.deleted">>;
+key(queue_created) ->
+ <<"queue.created">>;
+key(exchange_created) ->
+ <<"exchange.created">>;
+key(exchange_deleted) ->
+ <<"exchange.deleted">>;
+key(binding_created) ->
+ <<"binding.created">>;
+key(connection_created) ->
+ <<"connection.created">>;
+key(connection_closed) ->
+ <<"connection.closed">>;
+key(channel_created) ->
+ <<"channel.created">>;
+key(channel_closed) ->
+ <<"channel.closed">>;
+key(consumer_created) ->
+ <<"consumer.created">>;
+key(consumer_deleted) ->
+ <<"consumer.deleted">>;
+key(queue_stats) ->
+ ignore;
+key(connection_stats) ->
+ ignore;
+key(policy_set) ->
+ <<"policy.set">>;
+key(policy_cleared) ->
+ <<"policy.cleared">>;
+key(parameter_set) ->
+ <<"parameter.set">>;
+key(parameter_cleared) ->
+ <<"parameter.cleared">>;
+key(vhost_created) ->
+ <<"vhost.created">>;
+key(vhost_deleted) ->
+ <<"vhost.deleted">>;
+key(vhost_limits_set) ->
+ <<"vhost.limits.set">>;
+key(vhost_limits_cleared) ->
+ <<"vhost.limits.cleared">>;
+key(user_authentication_success) ->
+ <<"user.authentication.success">>;
+key(user_authentication_failure) ->
+ <<"user.authentication.failure">>;
+key(user_created) ->
+ <<"user.created">>;
+key(user_deleted) ->
+ <<"user.deleted">>;
+key(user_password_changed) ->
+ <<"user.password.changed">>;
+key(user_password_cleared) ->
+ <<"user.password.cleared">>;
+key(user_tags_set) ->
+ <<"user.tags.set">>;
+key(permission_created) ->
+ <<"permission.created">>;
+key(permission_deleted) ->
+ <<"permission.deleted">>;
+key(topic_permission_created) ->
+ <<"topic.permission.created">>;
+key(topic_permission_deleted) ->
+ <<"topic.permission.deleted">>;
+key(alarm_set) ->
+ <<"alarm.set">>;
+key(alarm_cleared) ->
+ <<"alarm.cleared">>;
+key(shovel_worker_status) ->
+ <<"shovel.worker.status">>;
+key(shovel_worker_removed) ->
+ <<"shovel.worker.removed">>;
+key(federation_link_status) ->
+ <<"federation.link.status">>;
+key(federation_link_removed) ->
+ <<"federation.link.removed">>;
+key(S) ->
+ case string:tokens(atom_to_list(S), "_") of
+ [_, "stats"] -> ignore;
+ Tokens -> list_to_binary(string:join(Tokens, "."))
+ end.
+
+fmt_proplist(Props) ->
+ lists:foldl(fun({K, V}, Acc) ->
+ case fmt(a2b(K), V) of
+ L when is_list(L) -> lists:append(L, Acc);
+ T -> [T | Acc]
+ end
+ end, [], Props).
+
+fmt(K, #resource{virtual_host = VHost,
+ name = Name}) -> [{K, longstr, Name},
+ {<<"vhost">>, longstr, VHost}];
+fmt(K, true) -> {K, bool, true};
+fmt(K, false) -> {K, bool, false};
+fmt(K, V) when is_atom(V) -> {K, longstr, atom_to_binary(V, utf8)};
+fmt(K, V) when is_integer(V) -> {K, long, V};
+fmt(K, V) when is_number(V) -> {K, float, V};
+fmt(K, V) when is_binary(V) -> {K, longstr, V};
+fmt(K, [{_, _}|_] = Vs) -> {K, table, fmt_proplist(Vs)};
+fmt(K, Vs) when is_list(Vs) -> {K, array, [fmt(V) || V <- Vs]};
+fmt(K, V) when is_pid(V) -> {K, longstr,
+ list_to_binary(rabbit_misc:pid_to_string(V))};
+fmt(K, V) -> {K, longstr,
+ list_to_binary(
+ rabbit_misc:format("~1000000000p", [V]))}.
+
+%% Exactly the same as fmt/2, duplicated only for performance issues
+fmt(true) -> {bool, true};
+fmt(false) -> {bool, false};
+fmt(V) when is_atom(V) -> {longstr, atom_to_binary(V, utf8)};
+fmt(V) when is_integer(V) -> {long, V};
+fmt(V) when is_number(V) -> {float, V};
+fmt(V) when is_binary(V) -> {longstr, V};
+fmt([{_, _}|_] = Vs) -> {table, fmt_proplist(Vs)};
+fmt(Vs) when is_list(Vs) -> {array, [fmt(V) || V <- Vs]};
+fmt(V) when is_pid(V) -> {longstr,
+ list_to_binary(rabbit_misc:pid_to_string(V))};
+fmt(V) -> {longstr,
+ list_to_binary(
+ rabbit_misc:format("~1000000000p", [V]))}.
+
+a2b(A) when is_atom(A) -> atom_to_binary(A, utf8);
+a2b(B) when is_binary(B) -> B.
+
+get_vhost() ->
+ case application:get_env(rabbitmq_event_exchange, vhost) of
+ undefined ->
+ {ok, V} = application:get_env(rabbit, default_vhost),
+ V;
+ {ok, V} ->
+ V
+ end.
diff --git a/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl b/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..ac20e857ab
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/test/config_schema_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_event_exchange, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
diff --git a/deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/rabbitmq_event_exchange.snippets b/deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/rabbitmq_event_exchange.snippets
new file mode 100644
index 0000000000..2fceed017a
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/rabbitmq_event_exchange.snippets
@@ -0,0 +1,19 @@
+[
+ {virtual_host1,
+ "event_exchange.vhost = /",
+ [
+ {rabbitmq_event_exchange, [
+ {vhost, <<"/">>}
+ ]}
+ ], [rabbitmq_event_exchange]
+ },
+
+ {virtual_host2,
+ "event_exchange.vhost = dev",
+ [
+ {rabbitmq_event_exchange, [
+ {vhost, <<"dev">>}
+ ]}
+ ], [rabbitmq_event_exchange]
+ }
+].
diff --git a/deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/schema/rabbitmq_event_exchange.schema b/deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/schema/rabbitmq_event_exchange.schema
new file mode 100644
index 0000000000..c8b2efe5ac
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/test/config_schema_SUITE_data/schema/rabbitmq_event_exchange.schema
@@ -0,0 +1,7 @@
+{mapping, "event_exchange.vhost", "rabbitmq_event_exchange.vhost",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_event_exchange.vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("event_exchange.vhost", Conf))
+end}.
diff --git a/deps/rabbitmq_event_exchange/test/system_SUITE.erl b/deps/rabbitmq_event_exchange/test/system_SUITE.erl
new file mode 100644
index 0000000000..79b819b962
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/test/system_SUITE.erl
@@ -0,0 +1,490 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(TAG, <<"user_who_performed_action">>).
+
+all() ->
+ [
+ queue_created,
+ authentication,
+ audit_queue,
+ audit_exchange,
+ audit_binding,
+ audit_vhost,
+ audit_vhost_deletion,
+ audit_channel,
+ audit_connection,
+ audit_direct_connection,
+ audit_consumer,
+ audit_vhost_internal_parameter,
+ audit_parameter,
+ audit_policy,
+ audit_vhost_limit,
+ audit_user,
+ audit_user_password,
+ audit_user_tags,
+ audit_permission,
+ audit_topic_permission,
+ resource_alarm,
+ unregister
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Config2.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+%% Only really tests that we're not completely broken.
+queue_created(Config) ->
+ Now = os:system_time(seconds),
+
+ Ch = declare_event_queue(Config, <<"queue.*">>),
+
+ #'queue.declare_ok'{queue = Q2} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+
+ receive
+ {#'basic.deliver'{routing_key = Key},
+ #amqp_msg{props = #'P_basic'{headers = Headers, timestamp = TS}}} ->
+ %% timestamp is within the last 5 seconds
+ true = ((TS - Now) =< 5),
+ <<"queue.created">> = Key,
+ {longstr, Q2} = rabbit_misc:table_lookup(Headers, <<"name">>)
+ end,
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+
+authentication(Config) ->
+ Ch = declare_event_queue(Config, <<"user.#">>),
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+
+ receive
+ {#'basic.deliver'{routing_key = Key},
+ #amqp_msg{props = #'P_basic'{headers = Headers}}} ->
+ <<"user.authentication.success">> = Key,
+ undefined = rabbit_misc:table_lookup(Headers, <<"vhost">>),
+ {longstr, _PeerHost} = rabbit_misc:table_lookup(Headers, <<"peer_host">>),
+ {bool, false} = rabbit_misc:table_lookup(Headers, <<"ssl">>)
+ end,
+
+ amqp_connection:close(Conn2),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_queue(Config) ->
+ Ch = declare_event_queue(Config, <<"queue.*">>),
+
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+
+ User = proplists:get_value(rmq_username, Config),
+ receive_user_in_event(<<"queue.created">>, User),
+
+ #'queue.delete_ok'{} =
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}),
+
+ receive_user_in_event(<<"queue.deleted">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_exchange(Config) ->
+ Ch = declare_event_queue(Config, <<"exchange.*">>),
+
+ X = <<"exchange.audited">>,
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Ch, #'exchange.declare'{exchange = X,
+ type = <<"topic">>}),
+
+ User = proplists:get_value(rmq_username, Config),
+ receive_user_in_event(<<"exchange.created">>, User),
+
+ #'exchange.delete_ok'{} =
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = X}),
+
+ receive_user_in_event(<<"exchange.deleted">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_binding(Config) ->
+ Ch = declare_event_queue(Config, <<"binding.*">>),
+ %% The binding to the event exchange itself is the first queued event
+ User = proplists:get_value(rmq_username, Config),
+ receive_user_in_event(<<"binding.created">>, User),
+
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = <<"amq.direct">>,
+ routing_key = <<"test">>}),
+ receive_user_in_event(<<"binding.created">>, User),
+
+ #'queue.unbind_ok'{} =
+ amqp_channel:call(Ch, #'queue.unbind'{queue = Q,
+ exchange = <<"amq.direct">>,
+ routing_key = <<"test">>}),
+ receive_user_in_event(<<"binding.deleted">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_vhost(Config) ->
+ Ch = declare_event_queue(Config, <<"vhost.*">>),
+ User = <<"Bugs Bunny">>,
+
+ rabbit_ct_broker_helpers:add_vhost(Config, 0, <<"test-vhost">>, User),
+ receive_user_in_event(<<"vhost.created">>, User),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, 0, <<"test-vhost">>, User),
+ receive_user_in_event(<<"vhost.deleted">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_vhost_deletion(Config) ->
+ Ch = declare_event_queue(Config, <<"queue.*">>),
+ ConnUser = proplists:get_value(rmq_username, Config),
+ User = <<"Bugs Bunny">>,
+ Vhost = <<"test-vhost">>,
+
+ rabbit_ct_broker_helpers:add_vhost(Config, 0, Vhost, User),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, ConnUser, Vhost),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, Vhost),
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+
+ %% The user that creates the queue is the connection one, not the vhost creator
+ #'queue.declare_ok'{queue = _Q} = amqp_channel:call(Ch2, #'queue.declare'{}),
+ receive_user_in_event(<<"queue.created">>, ConnUser),
+ ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch2),
+
+ %% Validate that the user deleting the queue is the one used to delete the vhost,
+ %% not the original user that created the queue (the connection one)
+ rabbit_ct_broker_helpers:delete_vhost(Config, 0, Vhost, User),
+ receive_user_in_event(<<"queue.deleted">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_channel(Config) ->
+ Ch = declare_event_queue(Config, <<"channel.*">>),
+ User = proplists:get_value(rmq_username, Config),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+ receive_user_in_event(<<"channel.created">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch2),
+ receive_user_in_event(<<"channel.closed">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_connection(Config) ->
+ Ch = declare_event_queue(Config, <<"connection.*">>),
+ User = proplists:get_value(rmq_username, Config),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ receive_user_in_event(<<"connection.created">>, User),
+
+ %% Username is not available in connection_close
+ rabbit_ct_client_helpers:close_connection(Conn),
+ receive_event(<<"connection.closed">>, ?TAG, undefined),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_direct_connection(Config) ->
+ Ch = declare_event_queue(Config, <<"connection.*">>),
+ User = proplists:get_value(rmq_username, Config),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection_direct(Config),
+ receive_user_in_event(<<"connection.created">>, User),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+ receive_event(<<"connection.closed">>, ?TAG, undefined),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_consumer(Config) ->
+ Ch = declare_event_queue(Config, <<"consumer.*">>),
+ User = proplists:get_value(rmq_username, Config),
+ receive_user_in_event(<<"consumer.created">>, User),
+
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true},
+ self()),
+ CTag = receive #'basic.consume_ok'{consumer_tag = C} -> C end,
+ receive_user_in_event(<<"consumer.created">>, User),
+
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+ receive_user_in_event(<<"consumer.deleted">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_vhost_internal_parameter(Config) ->
+ Ch = declare_event_queue(Config, <<"parameter.*">>),
+ User = <<"Bugs Bunny">>,
+ Vhost = <<"test-vhost">>,
+
+ rabbit_ct_broker_helpers:add_vhost(Config, 0, Vhost, User),
+ rabbit_ct_broker_helpers:delete_vhost(Config, 0, Vhost, User),
+ receive_user_in_event(<<"parameter.set">>, User),
+ receive_user_in_event(<<"parameter.cleared">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_parameter(Config) ->
+ Ch = declare_event_queue(Config, <<"parameter.*">>),
+ VHost = proplists:get_value(rmq_vhost, Config),
+ User = <<"Bugs Bunny">>,
+
+ ok = rabbit_ct_broker_helpers:set_parameter(
+ Config, 0, VHost, <<"vhost-limits">>, <<"limits">>,
+ [{<<"max-connections">>, 200}], User),
+ receive_user_in_event(<<"parameter.set">>, User),
+
+ ok = rabbit_ct_broker_helpers:clear_parameter(
+ Config, 0, VHost, <<"vhost-limits">>, <<"limits">>, User),
+ receive_user_in_event(<<"parameter.cleared">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_policy(Config) ->
+ Ch = declare_event_queue(Config, <<"policy.*">>),
+ User = <<"Bugs Bunny">>,
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0, <<".*">>, <<"all">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}], User),
+ receive_user_in_event(<<"policy.set">>, User),
+
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<".*">>, User),
+ receive_user_in_event(<<"policy.cleared">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_vhost_limit(Config) ->
+ Ch = declare_event_queue(Config, <<"vhost.limits.*">>),
+ VHost = proplists:get_value(rmq_vhost, Config),
+ User = <<"Bugs Bunny">>,
+
+ ok = rabbit_ct_broker_helpers:set_parameter(
+ Config, 0, VHost, <<"vhost-limits">>, <<"limits">>,
+ [{<<"max-connections">>, 200}], User),
+ receive_user_in_event(<<"vhost.limits.set">>, User),
+
+ ok = rabbit_ct_broker_helpers:clear_parameter(
+ Config, 0, VHost, <<"vhost-limits">>, <<"limits">>, User),
+ receive_user_in_event(<<"vhost.limits.cleared">>, User),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_user(Config) ->
+ Ch = declare_event_queue(Config, <<"user.*">>),
+ ActingUser = <<"Bugs Bunny">>,
+ User = <<"Wabbit">>,
+
+ rabbit_ct_broker_helpers:add_user(Config, 0, User, User, ActingUser),
+ receive_user_in_event(<<"user.created">>, ActingUser),
+
+ rabbit_ct_broker_helpers:delete_user(Config, 0, User, ActingUser),
+ receive_user_in_event(<<"user.deleted">>, ActingUser),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_user_password(Config) ->
+ Ch = declare_event_queue(Config, <<"user.password.*">>),
+ ActingUser = <<"Bugs Bunny">>,
+ User = <<"Wabbit">>,
+
+ rabbit_ct_broker_helpers:add_user(Config, 0, User, User, ActingUser),
+ rabbit_ct_broker_helpers:change_password(Config, 0, User, <<"pass">>, ActingUser),
+ receive_user_in_event(<<"user.password.changed">>, ActingUser),
+
+ rabbit_ct_broker_helpers:clear_password(Config, 0, User, ActingUser),
+ receive_user_in_event(<<"user.password.cleared">>, ActingUser),
+ rabbit_ct_broker_helpers:delete_user(Config, 0, User, ActingUser),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_user_tags(Config) ->
+ Ch = declare_event_queue(Config, <<"user.tags.*">>),
+ ActingUser = <<"Bugs Bunny">>,
+ User = <<"Wabbit">>,
+
+ rabbit_ct_broker_helpers:add_user(Config, 0, User, User, ActingUser),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, User, [management], ActingUser),
+ receive_user_in_event(<<"user.tags.set">>, ActingUser),
+
+ rabbit_ct_broker_helpers:delete_user(Config, 0, User, ActingUser),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_permission(Config) ->
+ Ch = declare_event_queue(Config, <<"permission.*">>),
+ VHost = proplists:get_value(rmq_vhost, Config),
+ ActingUser = <<"Bugs Bunny">>,
+ User = <<"Wabbit">>,
+
+ rabbit_ct_broker_helpers:add_user(Config, 0, User, User, ActingUser),
+ rabbit_ct_broker_helpers:set_permissions(Config, 0, User, VHost, <<".*">>,
+ <<".*">>, <<".*">>, ActingUser),
+ receive_user_in_event(<<"permission.created">>, ActingUser),
+
+ rabbit_ct_broker_helpers:clear_permissions(Config, 0, User, VHost, ActingUser),
+ receive_user_in_event(<<"permission.deleted">>, ActingUser),
+ rabbit_ct_broker_helpers:delete_user(Config, 0, User, ActingUser),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+audit_topic_permission(Config) ->
+ Ch = declare_event_queue(Config, <<"topic.permission.*">>),
+ VHost = proplists:get_value(rmq_vhost, Config),
+ ActingUser = <<"Bugs Bunny">>,
+ User = <<"Wabbit">>,
+
+ rabbit_ct_broker_helpers:add_user(Config, 0, User, User, ActingUser),
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_auth_backend_internal, set_topic_permissions,
+ [User, VHost, <<"amq.topic">>, "^a", "^a", ActingUser]),
+ receive_user_in_event(<<"topic.permission.created">>, ActingUser),
+
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_auth_backend_internal, clear_topic_permissions,
+ [User, VHost, ActingUser]),
+ receive_user_in_event(<<"topic.permission.deleted">>, ActingUser),
+ rabbit_ct_broker_helpers:delete_user(Config, 0, User, ActingUser),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+resource_alarm(Config) ->
+ Ch = declare_event_queue(Config, <<"alarm.*">>),
+
+ Source = disk,
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_alarm, set_alarm,
+ [{{resource_limit, Source, Node}, []}]),
+ receive_event(<<"alarm.set">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_alarm, clear_alarm,
+ [{resource_limit, Source, Node}]),
+ receive_event(<<"alarm.cleared">>),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ ok.
+
+unregister(Config) ->
+ X = rabbit_misc:r(<<"/">>, exchange, <<"amq.rabbitmq.event">>),
+
+ ?assertMatch({ok, _},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange,
+ lookup, [X])),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange_type_event,
+ unregister, []),
+
+ ?assertEqual({error, not_found},
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange,
+ lookup, [X])),
+ ok.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+declare_event_queue(Config, RoutingKey) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = <<"amq.rabbitmq.event">>,
+ routing_key = RoutingKey}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true},
+ self()),
+ receive
+ #'basic.consume_ok'{} -> ok
+ end,
+ Ch.
+
+receive_user_in_event(Event, User) ->
+ receive_event(Event, ?TAG, {longstr, User}).
+
+receive_event(Event, Key, Value) ->
+ receive
+ {#'basic.deliver'{routing_key = RoutingKey},
+ #amqp_msg{props = #'P_basic'{headers = Headers}}} ->
+ Event = RoutingKey,
+ Value = rabbit_misc:table_lookup(Headers, Key)
+ after
+ 60000 ->
+ throw({receive_event_timeout, Event, Key, Value})
+ end.
+
+receive_event(Event) ->
+ receive
+ {#'basic.deliver'{routing_key = RoutingKey},
+ #amqp_msg{props = #'P_basic'{}}} ->
+ Event = RoutingKey
+ after
+ 60000 ->
+ throw({receive_event_timeout, Event})
+ end.
diff --git a/deps/rabbitmq_event_exchange/test/unit_SUITE.erl b/deps/rabbitmq_event_exchange/test/unit_SUITE.erl
new file mode 100644
index 0000000000..02f0ab143c
--- /dev/null
+++ b/deps/rabbitmq_event_exchange/test/unit_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() -> [ encoding ].
+
+encoding(_) ->
+ T = fun (In, Exp) ->
+ ?assertEqual(
+ lists:sort(rabbit_exchange_type_event:fmt_proplist(In)),
+ lists:sort(Exp))
+ end,
+ T([{name, <<"test">>}],
+ [{<<"name">>, longstr, <<"test">>}]),
+ T([{name, rabbit_misc:r(<<"/">>, exchange, <<"test">>)}],
+ [{<<"name">>, longstr, <<"test">>},
+ {<<"vhost">>, longstr, <<"/">>}]),
+ T([{name, <<"test">>},
+ {number, 1},
+ {real, 1.0},
+ {bool, true},
+ {atom, hydrogen},
+ {weird, {1,2,3,[a|1],"a"}},
+ {list, [1,2,[a,b]]},
+ {proplist, [{foo, a},
+ {bar, [{baz, b},
+ {bash, c}]}]}
+ ],
+ [{<<"name">>, longstr, <<"test">>},
+ {<<"number">>, long, 1},
+ {<<"real">>, float, 1.0},
+ {<<"bool">>, bool, true},
+ {<<"atom">>, longstr, <<"hydrogen">>},
+ {<<"weird">>, longstr, <<"{1,2,3,[a|1],\"a\"}">>},
+ {<<"list">>, array, [{long, 1},
+ {long, 2},
+ {array, [{longstr, <<"a">>},
+ {longstr, <<"b">>}]}]},
+ {<<"proplist">>, table,
+ [{<<"bar">>, table, [{<<"bash">>, longstr, <<"c">>},
+ {<<"baz">>, longstr, <<"b">>}]},
+ {<<"foo">>, longstr, <<"a">>}]}
+ ]),
+ ok.
diff --git a/deps/rabbitmq_federation/.gitignore b/deps/rabbitmq_federation/.gitignore
new file mode 100644
index 0000000000..f05d110de3
--- /dev/null
+++ b/deps/rabbitmq_federation/.gitignore
@@ -0,0 +1,18 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+rabbitmq_federation.d
diff --git a/deps/rabbitmq_federation/.travis.yml b/deps/rabbitmq_federation/.travis.yml
new file mode 100644
index 0000000000..489729b1cc
--- /dev/null
+++ b/deps/rabbitmq_federation/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: NPk7yFzpxl8ZhheQvf1MG3dWTQq+8o1Fp+XvrBWr3PiQAn8TWHmlmxAt74LlTqRpu8CXINCOpVUYNWF9Gl4gZ0HerV1Gobj0hrF8/PVO6Qz7bqRhcnXj1ax1q4CMjzqyBtt09Pd06Vmx/hgEOQys8+kimpaftIWfBREKyjlYSss=
+ - secure: PjgBRIvkFXBaty94tOf4oomMSg9B9qdVLKmlb1o8DzE4KkCt2Cb3u00tqHYkvSzzgizSYsdff21TLrdQxw04aRtSVUIk2hqMt4tHbE9+sZL1Y8Cd75ZpjiSOYqnJtG0RbNxhTplCoyDOjaJWc9pjWgafHwHrZdo1wEgfzWDPWic=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_federation/CODE_OF_CONDUCT.md b/deps/rabbitmq_federation/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_federation/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_federation/CONTRIBUTING.md b/deps/rabbitmq_federation/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_federation/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_federation/LICENSE b/deps/rabbitmq_federation/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_federation/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_federation/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_federation/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_federation/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_federation/Makefile b/deps/rabbitmq_federation/Makefile
new file mode 100644
index 0000000000..ebe2eea6ba
--- /dev/null
+++ b/deps/rabbitmq_federation/Makefile
@@ -0,0 +1,29 @@
+PROJECT = rabbitmq_federation
+PROJECT_DESCRIPTION = RabbitMQ Federation
+PROJECT_MOD = rabbit_federation_app
+
+define PROJECT_ENV
+[
+ {pgroup_name_cluster_id, false},
+ {internal_exchange_check_interval, 90000}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit amqp_client
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_federation/README-hacking b/deps/rabbitmq_federation/README-hacking
new file mode 100644
index 0000000000..6432552fe3
--- /dev/null
+++ b/deps/rabbitmq_federation/README-hacking
@@ -0,0 +1,143 @@
+This file is intended to tell you How It All Works, concentrating on
+the things you might not expect.
+
+The theory
+==========
+
+The 'x-federation' exchange is defined in
+rabbit_federation_exchange. This starts up a bunch of link processes
+(one for each upstream) which:
+
+ * Connect to the upstream broker
+ * Create a queue and bind it to the upstream exchange
+ * Keep bindings in sync with the downstream exchange
+ * Consume messages from the upstream queue and republish them to the
+ downstream exchange (matching confirms with acks)
+
+Each link process monitors the connections / channels it opens, and
+dies if they do. We use a supervisor2 to ensure that we get some
+backoff when restarting.
+
+We use process groups to identify all link processes for a certain
+exchange, as well as all link processes together.
+
+However, there are a bunch of wrinkles:
+
+
+Wrinkle: The exchange will be recovered when the Erlang client is not available
+===============================================================================
+
+Exchange recovery happens within the rabbit application - therefore at
+the time that the exchange is recovered, we can't make any connections
+since the amqp_client application has not yet started. Each link
+therefore initially has a state 'not_started'. When it is created it
+checks to see if the rabbitmq_federation application is running. If
+so, it starts fully. If not, it goes into the 'not_started'
+state. When rabbitmq_federation starts, it sends a 'go' message to all
+links, prodding them to bring up the link.
+
+
+Wrinkle: On reconnect we want to assert bindings atomically
+===========================================================
+
+If the link goes down for whatever reason, then by the time it comes
+up again the bindings downstream may no longer be in sync with those
+upstream. Therefore on link establishment we want to ensure that a
+certain set of bindings exists. (Of course bringing up a link for the
+first time is a simple case of this.) And we want to do this with AMQP
+methods. But if we were to tear down all bindings and recreate them,
+we would have a time period when messages would not be forwarded for
+bindings that *do* still exist before and after.
+
+We use exchange to exchange bindings to work around this:
+
+We bind the upstream exchange (X) to the upstream queue (Q) via an
+internal fanout exchange (IXA) like so: (routing keys R1 and R2):
+
+ X----R1,R2--->IXA---->Q
+
+This has the same effect as binding the queue to the exchange directly.
+
+Now imagine the link has gone down, and is about to be
+reestablished. In the meanwhile, routing has changed downstream so
+that we now want routing keys R1 and R3. On link reconnection we can
+create and bind another internal fanout exchange IXB:
+
+ X----R1,R2--->IXA---->Q
+ | ^
+ | |
+ \----R1,R3--->IXB-----/
+
+and then delete the original exchange IXA:
+
+ X Q
+ | ^
+ | |
+ \----R1,R3--->IXB-----/
+
+This means that messages matching R1 are always routed during the
+switchover. Messages for R3 will start being routed as soon as we bind
+the second exchange, and messages for R2 will be stopped in a timely
+way. Of course this could lag the downstream situation somewhat, in
+which case some R2 messages will get thrown away downstream since they
+are unroutable. However this lag is inevitable when the link goes
+down.
+
+This means that the downstream only needs to keep track of whether the
+upstream is currently going via internal exchange A or B. This is
+held in the exchange scratch space in Mnesia.
+
+
+Wrinkle: We need to amalgamate bindings
+=======================================
+
+Since we only bind to one exchange upstream, but the downstream
+exchange can be bound to many queues, we can have duplicated bindings
+downstream (same source, routing key and args but different
+destination) that cannot be duplicated upstream (since the destination
+is the same). The link therefore maintains a mapping of (Key, Args) to
+set(Dest). Duplicated bindings do not get repeated upstream, and are
+only unbound upstream when the last one goes away downstream.
+
+Furthermore, this works as an optimisation since this will tend to
+reduce upstream binding count and churn.
+
+
+Wrinkle: We may receive binding events out of order
+===================================================
+
+The rabbit_federation_exchange callbacks are invoked by channel
+processes within rabbit. Therefore they can be executed concurrently,
+and can arrive at the link processes in an order that does not
+correspond to the wall clock.
+
+We need to keep the state of the link in sync with Mnesia. Therefore
+not only do we need to impose an ordering on these events, we need to
+impose Mnesia's ordering on them. We therefore added a function to the
+callback interface, serialise_events. When this returns true, the
+callback mechanism inside rabbit increments a per-exchange counter
+within an Mnesia transaction, and returns the value as part of the
+add_binding and remove_binding callbacks. The link process then queues
+up these events, and replays them in order. The link process's state
+thus always follows Mnesia (it may be delayed, but the effects happen
+in the same order).
+
+
+Other issues
+============
+
+Since links are implemented in terms of AMQP, link failure may cause
+messages to be redelivered. If you're unlucky this could lead to
+duplication.
+
+Message duplication can also happen with some topologies. In some
+cases it may not be possible to set max_hops such that messages arrive
+once at every node.
+
+While we correctly order bind / unbind events, we don't do the same
+thing for exchange creation / deletion. (This is harder - if you
+delete and recreate an exchange with the same name, is it the same
+exchange? What about if its type changes?) This would only be an issue
+if exchanges churn rapidly; however we could get into a state where
+Mnesia sees CDCD but we see CDDC and leave a process running when we
+shouldn't.
diff --git a/deps/rabbitmq_federation/README.md b/deps/rabbitmq_federation/README.md
new file mode 100644
index 0000000000..efebf43d3a
--- /dev/null
+++ b/deps/rabbitmq_federation/README.md
@@ -0,0 +1,25 @@
+## RabbitMQ Federation
+
+[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-federation.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-federation)
+
+RabbitMQ federation offers a group of features for loosely
+coupled and WAN-friendly distributed RabbitMQ setups. Note that
+this is not an alternative to queue mirroring.
+
+
+## Supported RabbitMQ Versions
+
+This plugin ships with RabbitMQ, there is no need to
+install it separately.
+
+
+## Documentation
+
+See [RabbitMQ federation plugin](https://www.rabbitmq.com/federation.html) on rabbitmq.com.
+
+
+## License and Copyright
+
+Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html).
+
+2007-2015 (c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_federation/erlang.mk b/deps/rabbitmq_federation/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_federation/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_federation/etc/rabbit-test.sh b/deps/rabbitmq_federation/etc/rabbit-test.sh
new file mode 100644
index 0000000000..5697412c26
--- /dev/null
+++ b/deps/rabbitmq_federation/etc/rabbit-test.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+CTL=$1
+
+# Test direct connections
+$CTL set_parameter federation-upstream localhost '{"uri": "amqp://"}'
+# We will test the guest:guest gets stripped out in user_id_test
+$CTL set_parameter federation-upstream local5673 '{"uri": "amqp://guest:guest@localhost:5673"}'
+
+$CTL set_parameter federation-upstream-set upstream '[{"upstream": "localhost", "exchange": "upstream", "queue": "upstream"}]'
+$CTL set_parameter federation-upstream-set upstream2 '[{"upstream": "localhost", "exchange": "upstream2", "queue": "upstream2"}]'
+$CTL set_parameter federation-upstream-set localhost '[{"upstream": "localhost"}]'
+$CTL set_parameter federation-upstream-set upstream12 '[{"upstream": "localhost", "exchange": "upstream", "queue": "upstream"},
+ {"upstream": "localhost", "exchange": "upstream2", "queue": "upstream2"}]'
+$CTL set_parameter federation-upstream-set one '[{"upstream": "localhost", "exchange": "one", "queue": "one"}]'
+$CTL set_parameter federation-upstream-set two '[{"upstream": "localhost", "exchange": "two", "queue": "two"}]'
+$CTL set_parameter federation-upstream-set upstream5673 '[{"upstream": "local5673", "exchange": "upstream"}]'
+
+$CTL set_policy fed "^fed\." '{"federation-upstream-set": "upstream"}'
+$CTL set_policy fed12 "^fed12\." '{"federation-upstream-set": "upstream12"}'
+$CTL set_policy one "^two$" '{"federation-upstream-set": "one"}'
+$CTL set_policy two "^one$" '{"federation-upstream-set": "two"}'
+$CTL set_policy hare "^hare\." '{"federation-upstream-set": "upstream5673"}'
+$CTL set_policy all "^all\." '{"federation-upstream-set": "all"}'
+$CTL set_policy new "^new\." '{"federation-upstream-set": "new-set"}'
diff --git a/deps/rabbitmq_federation/etc/setup-rabbit-test.sh b/deps/rabbitmq_federation/etc/setup-rabbit-test.sh
new file mode 100755
index 0000000000..2e2282ee07
--- /dev/null
+++ b/deps/rabbitmq_federation/etc/setup-rabbit-test.sh
@@ -0,0 +1,2 @@
+#!/bin/sh -e
+sh -e `dirname $0`/rabbit-test.sh "$DEPS_DIR/rabbit/scripts/rabbitmqctl -n $RABBITMQ_NODENAME"
diff --git a/deps/rabbitmq_federation/include/rabbit_federation.hrl b/deps/rabbitmq_federation/include/rabbit_federation.hrl
new file mode 100644
index 0000000000..af92e1aa25
--- /dev/null
+++ b/deps/rabbitmq_federation/include/rabbit_federation.hrl
@@ -0,0 +1,44 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(upstream, {uris,
+ exchange_name,
+ queue_name,
+ consumer_tag,
+ prefetch_count,
+ max_hops,
+ reconnect_delay,
+ expires,
+ message_ttl,
+ trust_user_id,
+ ack_mode,
+ ha_policy,
+ name,
+ bind_nowait,
+ resource_cleanup_mode}).
+
+-record(upstream_params,
+ {uri,
+ params,
+ x_or_q,
+ %% The next two can be derived from the above three, but we don't
+ %% want to do that every time we forward a message.
+ safe_uri,
+ table}).
+
+%% Name of the message header used to collect the hop (forwarding) path
+%% metadata as the message is forwarded by exchange federation.
+-define(ROUTING_HEADER, <<"x-received-from">>).
+-define(BINDING_HEADER, <<"x-bound-from">>).
+-define(MAX_HOPS_ARG, <<"x-max-hops">>).
+%% Identifies a cluster, used by exchange federation cycle detection
+-define(DOWNSTREAM_NAME_ARG, <<"x-downstream-name">>).
+%% Identifies a virtual host, used by exchange federation cycle detection
+-define(DOWNSTREAM_VHOST_ARG, <<"x-downstream-vhost">>).
+-define(DEF_PREFETCH, 1000).
+
+-define(FEDERATION_GUIDE_URL, <<"https://rabbitmq.com/federation.html">>).
diff --git a/deps/rabbitmq_federation/rabbitmq-components.mk b/deps/rabbitmq_federation/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_federation/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl
new file mode 100644
index 0000000000..bab4dddeec
--- /dev/null
+++ b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand.erl
@@ -0,0 +1,117 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand').
+
+-include("rabbit_federation.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ flags/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ switches/0,
+ aliases/0,
+ output/2,
+ scopes/0,
+ formatter/0,
+ help_section/0,
+ description/0
+ ]).
+
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+usage() ->
+ <<"federation_status [--only-down]">>.
+
+usage_additional() ->
+ [
+ {<<"--only-down">>, <<"only display links that failed or are not currently connected">>}
+ ].
+
+usage_doc_guides() ->
+ [?FEDERATION_GUIDE_URL].
+
+help_section() ->
+ {plugin, federation}.
+
+description() ->
+ <<"Displays federation link status">>.
+
+flags() ->
+ [].
+
+validate(_,_) ->
+ ok.
+
+formatter() ->
+ 'Elixir.RabbitMQ.CLI.Formatters.Erlang'.
+
+merge_defaults(A, Opts) ->
+ {A, maps:merge(#{only_down => false}, Opts)}.
+
+banner(_, #{node := Node, only_down := true}) ->
+ erlang:iolist_to_binary([<<"Listing federation links which are down on node ">>,
+ atom_to_binary(Node, utf8), <<"...">>]);
+banner(_, #{node := Node, only_down := false}) ->
+ erlang:iolist_to_binary([<<"Listing federation links on node ">>,
+ atom_to_binary(Node, utf8), <<"...">>]).
+
+run(_Args, #{node := Node, only_down := OnlyDown}) ->
+ case rabbit_misc:rpc_call(Node, rabbit_federation_status, status, []) of
+ {badrpc, _} = Error ->
+ Error;
+ Status ->
+ {stream, filter(Status, OnlyDown)}
+ end.
+
+switches() ->
+ [{only_down, boolean}].
+
+aliases() ->
+ [].
+
+output({stream, FederationStatus}, _) ->
+ Formatted = [begin
+ Timestamp = proplists:get_value(timestamp, St),
+ Map0 = maps:remove(timestamp, maps:from_list(St)),
+ Map1 = maps:merge(#{queue => <<>>,
+ exchange => <<>>,
+ upstream_queue => <<>>,
+ upstream_exchange => <<>>,
+ local_connection => <<>>,
+ error => <<>>}, Map0),
+ Map1#{last_changed => fmt_ts(Timestamp)}
+ end || St <- FederationStatus],
+ {stream, Formatted};
+output(E, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E).
+
+scopes() ->
+ ['ctl', 'diagnostics'].
+
+%%----------------------------------------------------------------------------
+%% Formatting
+%%----------------------------------------------------------------------------
+fmt_ts({{YY, MM, DD}, {Hour, Min, Sec}}) ->
+ erlang:list_to_binary(
+ io_lib:format("~4..0w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w",
+ [YY, MM, DD, Hour, Min, Sec])).
+
+filter(Status, _OnlyDown = false) ->
+ Status;
+filter(Status, _OnlyDown = true) ->
+ [St || St <- Status,
+ not lists:member(proplists:get_value(status, St), [running, starting])].
diff --git a/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl
new file mode 100644
index 0000000000..8d062c692c
--- /dev/null
+++ b/deps/rabbitmq_federation/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand').
+
+-include("rabbit_federation.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ flags/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ aliases/0,
+ output/2,
+ help_section/0,
+ description/0
+ ]).
+
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+usage() ->
+ <<"restart_federation_link <link_id>">>.
+
+usage_additional() ->
+ [
+ {<<"<link_id>">>, <<"ID of the link to restart">>}
+ ].
+
+usage_doc_guides() ->
+ [?FEDERATION_GUIDE_URL].
+
+help_section() ->
+ {plugin, federation}.
+
+description() ->
+ <<"Restarts a running federation link">>.
+
+flags() ->
+ [].
+
+validate([], _Opts) ->
+ {validation_failure, not_enough_args};
+validate([_, _ | _], _Opts) ->
+ {validation_failure, too_many_args};
+validate([_], _) ->
+ ok.
+
+merge_defaults(A, O) ->
+ {A, O}.
+
+banner([Link], #{node := Node}) ->
+ erlang:iolist_to_binary([<<"Restarting federation link ">>, Link, << " on node ">>,
+ atom_to_binary(Node, utf8)]).
+
+run([Id], #{node := Node}) ->
+ case rabbit_misc:rpc_call(Node, rabbit_federation_status, lookup, [Id]) of
+ {badrpc, _} = Error ->
+ Error;
+ not_found ->
+ {error, <<"Link with the given ID was not found">>};
+ Obj ->
+ Upstream = proplists:get_value(upstream, Obj),
+ Supervisor = proplists:get_value(supervisor, Obj),
+ rabbit_misc:rpc_call(Node, rabbit_federation_link_sup, restart,
+ [Supervisor, Upstream])
+ end.
+
+aliases() ->
+ [].
+
+output(Output, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Output).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_app.erl b/deps/rabbitmq_federation/src/rabbit_federation_app.erl
new file mode 100644
index 0000000000..ee7ba91e5f
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_app.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+
+%% All of our actual server processes are supervised by
+%% rabbit_federation_sup, which is started by a rabbit_boot_step
+%% (since it needs to start up before queue / exchange recovery, so it
+%% can't be part of our application).
+%%
+%% However, we still need an application behaviour since we need to
+%% know when our application has started since then the Erlang client
+%% will have started and we can therefore start our links going. Since
+%% the application behaviour needs a tree of processes to supervise,
+%% this is it...
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_federation_exchange_link:go(),
+ rabbit_federation_queue_link:go(),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ ok.
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_db.erl b/deps/rabbitmq_federation/src/rabbit_federation_db.erl
new file mode 100644
index 0000000000..e35e3646a8
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_db.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_db).
+
+-include("rabbit_federation.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(DICT, orddict).
+
+-export([get_active_suffix/3, set_active_suffix/3, prune_scratch/2]).
+
+%%----------------------------------------------------------------------------
+
+get_active_suffix(XName, Upstream, Default) ->
+ case rabbit_exchange:lookup_scratch(XName, federation) of
+ {ok, Dict} ->
+ case ?DICT:find(key(Upstream), Dict) of
+ {ok, Suffix} -> Suffix;
+ error -> Default
+ end;
+ {error, not_found} ->
+ Default
+ end.
+
+set_active_suffix(XName, Upstream, Suffix) ->
+ ok = rabbit_exchange:update_scratch(
+ XName, federation,
+ fun(D) -> ?DICT:store(key(Upstream), Suffix, ensure(D)) end).
+
+prune_scratch(XName, Upstreams) ->
+ ok = rabbit_exchange:update_scratch(
+ XName, federation,
+ fun(D) -> Keys = [key(U) || U <- Upstreams],
+ ?DICT:filter(
+ fun(K, _V) -> lists:member(K, Keys) end, ensure(D))
+ end).
+
+key(#upstream{name = UpstreamName, exchange_name = XNameBin}) ->
+ {UpstreamName, XNameBin}.
+
+ensure(undefined) -> ?DICT:new();
+ensure(D) -> D.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_event.erl b/deps/rabbitmq_federation/src/rabbit_federation_event.erl
new file mode 100644
index 0000000000..417b8ecba3
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_event.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_event).
+-behaviour(gen_event).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([add_handler/0, remove_handler/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+
+%%----------------------------------------------------------------------------
+
+add_handler() ->
+ gen_event:add_handler(rabbit_event, ?MODULE, []).
+
+remove_handler() ->
+ gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+init([]) ->
+ {ok, []}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(#event{type = parameter_set,
+ props = Props0}, State) ->
+ Props = rabbit_data_coercion:to_list(Props0),
+ case {pget(component, Props), pget(name, Props)} of
+ {global, cluster_name} ->
+ rabbit_federation_parameters:adjust(everything);
+ _ ->
+ ok
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl b/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl
new file mode 100644
index 0000000000..6b85b6756b
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_exchange.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% TODO rename this
+-module(rabbit_federation_exchange).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation exchange decorator"},
+ {mfa, {rabbit_registry, register,
+ [exchange_decorator, <<"federation">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {cleanup, {rabbit_registry, unregister,
+ [exchange_decorator, <<"federation">>]}},
+ {enables, recovery}]}).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-behaviour(rabbit_exchange_decorator).
+
+-export([description/0, serialise_events/1]).
+-export([create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, route/2, active_for/1]).
+
+%%----------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Federation exchange decorator">>}].
+
+serialise_events(X) -> federate(X).
+
+create(transaction, _X) ->
+ ok;
+create(none, X) ->
+ maybe_start(X).
+
+delete(transaction, _X, _Bs) ->
+ ok;
+delete(none, X, _Bs) ->
+ maybe_stop(X).
+
+policy_changed(OldX, NewX) ->
+ maybe_stop(OldX),
+ maybe_start(NewX).
+
+add_binding(transaction, _X, _B) ->
+ ok;
+add_binding(Serial, X = #exchange{name = XName}, B) ->
+ case federate(X) of
+ true -> rabbit_federation_exchange_link:add_binding(Serial, XName, B),
+ ok;
+ false -> ok
+ end.
+
+remove_bindings(transaction, _X, _Bs) ->
+ ok;
+remove_bindings(Serial, X = #exchange{name = XName}, Bs) ->
+ case federate(X) of
+ true -> rabbit_federation_exchange_link:remove_bindings(Serial, XName, Bs),
+ ok;
+ false -> ok
+ end.
+
+route(_, _) -> [].
+
+active_for(X) ->
+ case federate(X) of
+ true -> noroute;
+ false -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+%% Don't federate default exchange, we can't bind to it
+federate(#exchange{name = #resource{name = <<"">>}}) ->
+ false;
+
+%% Don't federate any of our intermediate exchanges. Note that we use
+%% internal=true since older brokers may not declare
+%% x-federation-upstream on us. Also other internal exchanges should
+%% probably not be federated.
+federate(#exchange{internal = true}) ->
+ false;
+
+federate(X) ->
+ rabbit_federation_upstream:federate(X).
+
+maybe_start(X = #exchange{name = XName})->
+ case federate(X) of
+ true -> ok = rabbit_federation_db:prune_scratch(
+ XName, rabbit_federation_upstream:for(X)),
+ ok = rabbit_federation_exchange_link_sup_sup:start_child(X),
+ ok;
+ false -> ok
+ end.
+
+maybe_stop(X = #exchange{name = XName}) ->
+ case federate(X) of
+ true -> ok = rabbit_federation_exchange_link_sup_sup:stop_child(X),
+ rabbit_federation_status:remove_exchange_or_queue(XName);
+ false -> ok
+ end.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl
new file mode 100644
index 0000000000..869ab047ae
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link.erl
@@ -0,0 +1,696 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_exchange_link).
+
+%% pg2 is deprecated in OTP 23.
+-compile(nowarn_deprecated_function).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(gen_server2).
+
+-export([go/0, add_binding/3, remove_bindings/3]).
+-export([list_routing_keys/1]). %% For testing
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_federation_util, [name/1, vhost/1, pgname/1]).
+
+-record(state, {upstream,
+ upstream_params,
+ upstream_name,
+ connection,
+ channel,
+ cmd_channel,
+ consumer_tag,
+ queue,
+ internal_exchange,
+ waiting_cmds = gb_trees:empty(),
+ next_serial,
+ bindings = #{},
+ downstream_connection,
+ downstream_channel,
+ downstream_exchange,
+ unacked,
+ internal_exchange_timer,
+ internal_exchange_interval}).
+
+%%----------------------------------------------------------------------------
+
+%% We start off in a state where we do not connect, since we can first
+%% start during exchange recovery, when rabbit is not fully started
+%% and the Erlang client is not running. This then gets invoked when
+%% the federation app is started.
+go() -> cast(go).
+
+add_binding(S, XN, B) -> cast(XN, {enqueue, S, {add_binding, B}}).
+remove_bindings(S, XN, Bs) -> cast(XN, {enqueue, S, {remove_bindings, Bs}}).
+
+list_routing_keys(XN) -> call(XN, list_routing_keys).
+
+%%----------------------------------------------------------------------------
+
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, [{timeout, infinity}]).
+
+init({Upstream, XName}) ->
+ %% If we are starting up due to a policy change then it's possible
+ %% for the exchange to have been deleted before we got here, in which
+ %% case it's possible that delete callback would also have been called
+ %% before we got here. So check if we still exist.
+ case rabbit_exchange:lookup(XName) of
+ {ok, X} ->
+ DeobfuscatedUpstream = rabbit_federation_util:deobfuscate_upstream(Upstream),
+ DeobfuscatedUParams = rabbit_federation_upstream:to_params(DeobfuscatedUpstream, X),
+ UParams = rabbit_federation_util:obfuscate_upstream_params(DeobfuscatedUParams),
+ rabbit_federation_status:report(Upstream, UParams, XName, starting),
+ join(rabbit_federation_exchanges),
+ join({rabbit_federation_exchange, XName}),
+ gen_server2:cast(self(), maybe_go),
+ {ok, {not_started, {Upstream, UParams, XName}}};
+ {error, not_found} ->
+ rabbit_federation_link_util:log_warning(XName, "not found, stopping link~n", []),
+ {stop, gone}
+ end.
+
+handle_call(list_routing_keys, _From, State = #state{bindings = Bindings}) ->
+ {reply, lists:sort([K || {K, _} <- maps:keys(Bindings)]), State};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(maybe_go, S0 = {not_started, _Args}) ->
+ case federation_up() of
+ true -> go(S0);
+ false -> {noreply, S0}
+ end;
+
+handle_cast(go, S0 = {not_started, _Args}) ->
+ go(S0);
+
+%% There's a small race - I think we can realise federation is up
+%% before 'go' gets invoked. Ignore.
+handle_cast(go, State) ->
+ {noreply, State};
+
+handle_cast({enqueue, _, _}, State = {not_started, _}) ->
+ {noreply, State};
+
+handle_cast({enqueue, Serial, Cmd},
+ State = #state{waiting_cmds = Waiting,
+ downstream_exchange = XName}) ->
+ Waiting1 = gb_trees:insert(Serial, Cmd, Waiting),
+ try
+ {noreply, play_back_commands(State#state{waiting_cmds = Waiting1})}
+ catch exit:{{shutdown, {server_initiated_close, 404, Text}}, _} ->
+ rabbit_federation_link_util:log_warning(
+ XName, "detected upstream changes, restarting link: ~p~n", [Text]),
+ {stop, {shutdown, restart}, State}
+ end;
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+handle_info(#'basic.ack'{} = Ack, State = #state{channel = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:ack(Ack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.nack'{} = Nack, State = #state{channel = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:nack(Nack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info({#'basic.deliver'{routing_key = Key,
+ redelivered = Redelivered} = DeliverMethod, Msg},
+ State = #state{
+ upstream = Upstream = #upstream{max_hops = MaxH},
+ upstream_params = UParams = #upstream_params{x_or_q = UpstreamX},
+ upstream_name = UName,
+ downstream_exchange = #resource{name = XNameBin, virtual_host = DVhost},
+ downstream_channel = DCh,
+ channel = Ch,
+ unacked = Unacked}) ->
+ UVhost = vhost(UpstreamX),
+ PublishMethod = #'basic.publish'{exchange = XNameBin,
+ routing_key = Key},
+ HeadersFun = fun (H) -> update_routing_headers(UParams, UName, UVhost, Redelivered, H) end,
+ %% We need to check should_forward/2 here in case the upstream
+ %% does not have federation and thus is using a fanout exchange.
+ ForwardFun = fun (H) ->
+ DName = rabbit_nodes:cluster_name(),
+ rabbit_federation_util:should_forward(H, MaxH, DName, DVhost)
+ end,
+ Unacked1 = rabbit_federation_link_util:forward(
+ Upstream, DeliverMethod, Ch, DCh, PublishMethod,
+ HeadersFun, ForwardFun, Msg, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.cancel'{}, State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName}) ->
+ rabbit_federation_link_util:connection_error(
+ local, basic_cancel, Upstream, UParams, XName, State);
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{downstream_channel = DCh,
+ channel = Ch,
+ cmd_channel = CmdCh,
+ upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName}) ->
+ handle_down(Pid, Reason, Ch, CmdCh, DCh,
+ {Upstream, UParams, XName}, State);
+
+handle_info(check_internal_exchange, State = #state{internal_exchange = IntXNameBin,
+ internal_exchange_interval = Interval}) ->
+ case check_internal_exchange(IntXNameBin, State) of
+ upstream_not_found ->
+ rabbit_log_federation:warning("Federation link could not find upstream exchange '~s' and will restart",
+ [IntXNameBin]),
+ {stop, {shutdown, restart}, State};
+ _ ->
+ TRef = erlang:send_after(Interval, self(), check_internal_exchange),
+ {noreply, State#state{internal_exchange_timer = TRef}}
+ end;
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(_Reason, {not_started, _}) ->
+ ok;
+terminate(Reason, #state{downstream_connection = DConn,
+ connection = Conn,
+ upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName,
+ internal_exchange_timer = TRef,
+ internal_exchange = IntExchange,
+ queue = Queue}) when Reason =:= shutdown;
+ Reason =:= {shutdown, restart};
+ Reason =:= gone ->
+ timer:cancel(TRef),
+ rabbit_federation_link_util:ensure_connection_closed(DConn),
+
+ rabbit_log:debug("Exchange federation: link is shutting down, resource cleanup mode: ~p", [Upstream#upstream.resource_cleanup_mode]),
+ case Upstream#upstream.resource_cleanup_mode of
+ never -> ok;
+ _ ->
+ %% This is a normal shutdown and we are allowed to clean up the internally used queue and exchange
+ rabbit_log:debug("Federated exchange '~s' link will delete its internal queue '~s'", [Upstream#upstream.exchange_name, Queue]),
+ delete_upstream_queue(Conn, Queue),
+ rabbit_log:debug("Federated exchange '~s' link will delete its upstream exchange", [Upstream#upstream.exchange_name]),
+ delete_upstream_exchange(Conn, IntExchange)
+ end,
+
+ rabbit_federation_link_util:ensure_connection_closed(Conn),
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, XName),
+ ok;
+%% unexpected shutdown
+terminate(Reason, #state{downstream_connection = DConn,
+ connection = Conn,
+ upstream = Upstream,
+ upstream_params = UParams,
+ downstream_exchange = XName,
+ internal_exchange_timer = TRef}) ->
+ timer:cancel(TRef),
+
+ rabbit_federation_link_util:ensure_connection_closed(DConn),
+
+ %% unlike in the clean shutdown case above, we keep the queue
+ %% and exchange around
+
+ rabbit_federation_link_util:ensure_connection_closed(Conn),
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, XName),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+call(XName, Msg) -> [gen_server2:call(Pid, Msg, infinity) || Pid <- x(XName)].
+cast(Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- all()].
+cast(XName, Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- x(XName)].
+
+join(Name) ->
+ pg2:create(pgname(Name)),
+ ok = pg2:join(pgname(Name), self()).
+
+all() ->
+ pg2:create(pgname(rabbit_federation_exchanges)),
+ pg2:get_members(pgname(rabbit_federation_exchanges)).
+
+x(XName) ->
+ pg2:create(pgname({rabbit_federation_exchange, XName})),
+ pg2:get_members(pgname({rabbit_federation_exchange, XName})).
+
+%%----------------------------------------------------------------------------
+
+federation_up() -> is_pid(whereis(rabbit_federation_app)).
+
+handle_command({add_binding, Binding}, State) ->
+ add_binding(Binding, State);
+
+handle_command({remove_bindings, Bindings}, State) ->
+ lists:foldl(fun remove_binding/2, State, Bindings).
+
+play_back_commands(State = #state{waiting_cmds = Waiting,
+ next_serial = Next}) ->
+ case gb_trees:is_empty(Waiting) of
+ false -> case gb_trees:take_smallest(Waiting) of
+ {Next, Cmd, Waiting1} ->
+ %% The next one. Just execute it.
+ play_back_commands(
+ handle_command(Cmd, State#state{
+ waiting_cmds = Waiting1,
+ next_serial = Next + 1}));
+ {Serial, _Cmd, Waiting1} when Serial < Next ->
+ %% This command came from before we executed
+ %% binding:list_for_source. Ignore it.
+ play_back_commands(State#state{
+ waiting_cmds = Waiting1});
+ _ ->
+ %% Some future command. Don't do anything.
+ State
+ end;
+ true -> State
+ end.
+
+add_binding(B, State) ->
+ binding_op(fun record_binding/2, bind_cmd(bind, B, State), B, State).
+
+remove_binding(B, State) ->
+ binding_op(fun forget_binding/2, bind_cmd(unbind, B, State), B, State).
+
+record_binding(B = #binding{destination = Dest},
+ State = #state{bindings = Bs}) ->
+ {DoIt, Set} = case maps:find(key(B), Bs) of
+ error -> {true, sets:from_list([Dest])};
+ {ok, Dests} -> {false, sets:add_element(
+ Dest, Dests)}
+ end,
+ {DoIt, State#state{bindings = maps:put(key(B), Set, Bs)}}.
+
+forget_binding(B = #binding{destination = Dest},
+ State = #state{bindings = Bs}) ->
+ Dests = sets:del_element(Dest, maps:get(key(B), Bs)),
+ {DoIt, Bs1} = case sets:size(Dests) of
+ 0 -> {true, maps:remove(key(B), Bs)};
+ _ -> {false, maps:put(key(B), Dests, Bs)}
+ end,
+ {DoIt, State#state{bindings = Bs1}}.
+
+binding_op(UpdateFun, Cmd, B = #binding{args = Args},
+ State = #state{cmd_channel = Ch}) ->
+ {DoIt, State1} =
+ case rabbit_misc:table_lookup(Args, ?BINDING_HEADER) of
+ undefined -> UpdateFun(B, State);
+ {array, _} -> {Cmd =/= ignore, State}
+ end,
+ case DoIt of
+ true -> amqp_channel:call(Ch, Cmd);
+ false -> ok
+ end,
+ State1.
+
+bind_cmd(Type, #binding{key = Key, args = Args},
+ State = #state{internal_exchange = IntXNameBin,
+ upstream_params = UpstreamParams,
+ upstream = Upstream}) ->
+ #upstream_params{x_or_q = X} = UpstreamParams,
+ #upstream{bind_nowait = Nowait} = Upstream,
+ case update_binding(Args, State) of
+ ignore -> ignore;
+ NewArgs -> bind_cmd0(Type, name(X), IntXNameBin, Key, NewArgs, Nowait)
+ end.
+
+bind_cmd0(bind, Source, Destination, RoutingKey, Arguments, Nowait) ->
+ #'exchange.bind'{source = Source,
+ destination = Destination,
+ routing_key = RoutingKey,
+ arguments = Arguments,
+ nowait = Nowait};
+
+bind_cmd0(unbind, Source, Destination, RoutingKey, Arguments, Nowait) ->
+ #'exchange.unbind'{source = Source,
+ destination = Destination,
+ routing_key = RoutingKey,
+ arguments = Arguments,
+ nowait = Nowait}.
+
+%% This function adds information about the current node to the
+%% binding arguments, or returns 'ignore' if it determines the binding
+%% should propagate no further. The interesting part is the latter.
+%%
+%% We want bindings to propagate in the same way as messages
+%% w.r.t. max_hops - if we determine that a message can get from node
+%% A to B (assuming bindings are in place) then it follows that a
+%% binding at B should propagate back to A, and no further. There is
+%% no point in propagating bindings past the point where messages
+%% would propagate, and we will lose messages if bindings don't
+%% propagate as far.
+%%
+%% Note that we still want to have limits on how far messages can
+%% propagate: limiting our bindings is not enough, since other
+%% bindings from other nodes can overlap.
+%%
+%% So in short we want bindings to obey max_hops. However, they can't
+%% just obey the max_hops of the current link, since they are
+%% travelling in the opposite direction to messages! Consider the
+%% following federation:
+%%
+%% A -----------> B -----------> C
+%% max_hops=1 max_hops=2
+%%
+%% where the arrows indicate message flow. A binding created at C
+%% should propagate to B, then to A, and no further. Therefore every
+%% time we traverse a link, we keep a count of the number of hops that
+%% a message could have made so far to reach this point, and still be
+%% able to propagate. When this number ("hops" below) reaches 0 we
+%% propagate no further.
+%%
+%% hops(link(N)) is given by:
+%%
+%% min(hops(link(N-1))-1, max_hops(link(N)))
+%%
+%% where link(N) is the link that bindings propagate over after N
+%% steps (e.g. link(1) is CB above, link(2) is BA).
+%%
+%% In other words, we count down to 0 from the link with the most
+%% restrictive max_hops we have yet passed through.
+
+update_binding(Args, #state{downstream_exchange = X,
+ upstream = Upstream,
+ upstream_params = #upstream_params{x_or_q = UpstreamX},
+ upstream_name = UName}) ->
+ #upstream{max_hops = MaxHops} = Upstream,
+ UVhost = vhost(UpstreamX),
+ Hops = case rabbit_misc:table_lookup(Args, ?BINDING_HEADER) of
+ undefined -> MaxHops;
+ {array, All} -> [{table, Prev} | _] = All,
+ PrevHops = get_hops(Prev),
+ case rabbit_federation_util:already_seen(
+ UName, UVhost, All) of
+ true -> 0;
+ false -> lists:min([PrevHops - 1, MaxHops])
+ end
+ end,
+ case Hops of
+ 0 -> ignore;
+ _ -> Cluster = rabbit_nodes:cluster_name(),
+ ABSuffix = rabbit_federation_db:get_active_suffix(
+ X, Upstream, <<"A">>),
+ DVhost = vhost(X),
+ DName = name(X),
+ Down = <<DVhost/binary,":", DName/binary, " ", ABSuffix/binary>>,
+ Info = [{<<"cluster-name">>, longstr, Cluster},
+ {<<"vhost">>, longstr, DVhost},
+ {<<"exchange">>, longstr, Down},
+ {<<"hops">>, short, Hops}],
+ rabbit_basic:prepend_table_header(?BINDING_HEADER, Info, Args)
+ end.
+
+
+
+key(#binding{key = Key, args = Args}) -> {Key, Args}.
+
+go(S0 = {not_started, {Upstream, UParams, DownXName}}) ->
+ Unacked = rabbit_federation_link_util:unacked_new(),
+
+ log_link_startup_attempt(Upstream, DownXName),
+ rabbit_federation_link_util:start_conn_ch(
+ fun (Conn, Ch, DConn, DCh) ->
+ {ok, CmdCh} = open_cmd_channel(Conn, Upstream, UParams, DownXName, S0),
+ erlang:monitor(process, CmdCh),
+ Props = pget(server_properties,
+ amqp_connection:info(Conn, [server_properties])),
+ UName = case rabbit_misc:table_lookup(
+ Props, <<"cluster_name">>) of
+ {longstr, N} -> N;
+ _ -> unknown
+ end,
+ {Serial, Bindings} =
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ {rabbit_exchange:peek_serial(DownXName),
+ rabbit_binding:list_for_source(DownXName)}
+ end),
+ true = is_integer(Serial),
+ %% If we are very short lived, Serial can be undefined at
+ %% this point (since the deletion of the X could have
+ %% overtaken the creation of this process). However, this
+ %% is not a big deal - 'undefined' just becomes the next
+ %% serial we will process. Since it compares larger than
+ %% any number we never process any commands. And we will
+ %% soon get told to stop anyway.
+ {ok, Interval} = application:get_env(rabbitmq_federation,
+ internal_exchange_check_interval),
+ State = ensure_upstream_bindings(
+ consume_from_upstream_queue(
+ #state{upstream = Upstream,
+ upstream_params = UParams,
+ upstream_name = UName,
+ connection = Conn,
+ channel = Ch,
+ cmd_channel = CmdCh,
+ next_serial = Serial,
+ downstream_connection = DConn,
+ downstream_channel = DCh,
+ downstream_exchange = DownXName,
+ unacked = Unacked,
+ internal_exchange_interval = Interval}),
+ Bindings),
+ rabbit_log_federation:info("Federation link for ~s (upstream: ~s) will perform internal exchange checks "
+ "every ~b seconds", [rabbit_misc:rs(DownXName), UName, round(Interval / 1000)]),
+ TRef = erlang:send_after(Interval, self(), check_internal_exchange),
+ {noreply, State#state{internal_exchange_timer = TRef}}
+ end, Upstream, UParams, DownXName, S0).
+
+log_link_startup_attempt(OUpstream, DownXName) ->
+ rabbit_log_federation:debug("Will try to start a federation link for ~s, upstream: '~s'",
+ [rabbit_misc:rs(DownXName), OUpstream#upstream.name]).
+
+open_cmd_channel(Conn, Upstream = #upstream{name = UName}, UParams, DownXName, S0) ->
+ rabbit_log_federation:debug("Will open a command channel to upstream '~s' for downstream federated ~s",
+ [UName, rabbit_misc:rs(DownXName)]),
+ case amqp_connection:open_channel(Conn) of
+ {ok, CCh} ->
+ erlang:monitor(process, CCh),
+ {ok, CCh};
+ E ->
+ rabbit_federation_link_util:ensure_connection_closed(Conn),
+ rabbit_federation_link_util:connection_error(command_channel, E,
+ Upstream, UParams, DownXName, S0),
+ E
+ end.
+
+consume_from_upstream_queue(
+ State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ channel = Ch,
+ downstream_exchange = DownXName}) ->
+ #upstream{prefetch_count = Prefetch,
+ expires = Expiry,
+ message_ttl = TTL,
+ ha_policy = HA} = Upstream,
+ #upstream_params{x_or_q = X,
+ params = Params} = UParams,
+ Q = upstream_queue_name(name(X), vhost(Params), DownXName),
+ Args = [A || {_K, _T, V} = A
+ <- [{<<"x-expires">>, long, Expiry},
+ {<<"x-message-ttl">>, long, TTL},
+ {<<"x-ha-policy">>, longstr, HA},
+ {<<"x-internal-purpose">>, longstr, <<"federation">>}],
+ V =/= none],
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args}),
+ NoAck = Upstream#upstream.ack_mode =:= 'no-ack',
+ case NoAck of
+ false -> amqp_channel:call(Ch, #'basic.qos'{prefetch_count = Prefetch});
+ true -> ok
+ end,
+ #'basic.consume_ok'{consumer_tag = CTag} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = NoAck}, self()),
+ State#state{consumer_tag = CTag,
+ queue = Q}.
+
+ensure_upstream_bindings(State = #state{upstream = Upstream,
+ connection = Conn,
+ channel = Ch,
+ downstream_exchange = DownXName,
+ queue = Q}, Bindings) ->
+ OldSuffix = rabbit_federation_db:get_active_suffix(
+ DownXName, Upstream, <<"A">>),
+ Suffix = case OldSuffix of
+ <<"A">> -> <<"B">>;
+ <<"B">> -> <<"A">>
+ end,
+ IntXNameBin = upstream_exchange_name(Q, Suffix),
+ ensure_upstream_exchange(State),
+ ensure_internal_exchange(IntXNameBin, State),
+ amqp_channel:call(Ch, #'queue.bind'{exchange = IntXNameBin, queue = Q}),
+ State1 = State#state{internal_exchange = IntXNameBin},
+ rabbit_federation_db:set_active_suffix(DownXName, Upstream, Suffix),
+ State2 = lists:foldl(fun add_binding/2, State1, Bindings),
+ OldIntXNameBin = upstream_exchange_name(Q, OldSuffix),
+ delete_upstream_exchange(Conn, OldIntXNameBin),
+ State2.
+
+ensure_upstream_exchange(#state{upstream_params = UParams,
+ connection = Conn,
+ channel = Ch}) ->
+ #upstream_params{x_or_q = X} = UParams,
+ #exchange{type = Type,
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Arguments} = X,
+ Decl = #'exchange.declare'{exchange = name(X),
+ type = list_to_binary(atom_to_list(Type)),
+ durable = Durable,
+ auto_delete = AutoDelete,
+ internal = Internal,
+ arguments = Arguments},
+ rabbit_federation_link_util:disposable_channel_call(
+ Conn, Decl#'exchange.declare'{passive = true},
+ fun(?NOT_FOUND, _Text) ->
+ amqp_channel:call(Ch, Decl)
+ end).
+
+ensure_internal_exchange(IntXNameBin,
+ #state{upstream = #upstream{max_hops = MaxHops, name = UName},
+ upstream_params = UParams,
+ connection = Conn,
+ channel = Ch,
+ downstream_exchange = #resource{virtual_host = DVhost}}) ->
+ rabbit_log_federation:debug("Exchange federation will set up exchange '~s' in upstream '~s'",
+ [IntXNameBin, UName]),
+ #upstream_params{params = Params} = rabbit_federation_util:deobfuscate_upstream_params(UParams),
+ rabbit_log_federation:debug("Will delete upstream exchange '~s'", [IntXNameBin]),
+ delete_upstream_exchange(Conn, IntXNameBin),
+ rabbit_log_federation:debug("Will declare an internal upstream exchange '~s'", [IntXNameBin]),
+ Base = #'exchange.declare'{exchange = IntXNameBin,
+ durable = true,
+ internal = true,
+ auto_delete = true},
+ Purpose = [{<<"x-internal-purpose">>, longstr, <<"federation">>}],
+ XFUArgs = [{?MAX_HOPS_ARG, long, MaxHops},
+ {?DOWNSTREAM_NAME_ARG, longstr, cycle_detection_node_identifier()},
+ {?DOWNSTREAM_VHOST_ARG, longstr, DVhost}
+ | Purpose],
+ XFU = Base#'exchange.declare'{type = <<"x-federation-upstream">>,
+ arguments = XFUArgs},
+ Fan = Base#'exchange.declare'{type = <<"fanout">>,
+ arguments = Purpose},
+ rabbit_federation_link_util:disposable_connection_call(
+ Params, XFU, fun(?COMMAND_INVALID, _Text) ->
+ amqp_channel:call(Ch, Fan)
+ end).
+
+check_internal_exchange(IntXNameBin,
+ #state{upstream = #upstream{max_hops = MaxHops, name = UName},
+ upstream_params = UParams,
+ downstream_exchange = XName = #resource{virtual_host = DVhost}}) ->
+ #upstream_params{params = Params} =
+ rabbit_federation_util:deobfuscate_upstream_params(UParams),
+ rabbit_log_federation:debug("Exchange federation will check on exchange '~s' in upstream '~s'",
+ [IntXNameBin, UName]),
+ Base = #'exchange.declare'{exchange = IntXNameBin,
+ passive = true,
+ durable = true,
+ internal = true,
+ auto_delete = true},
+ Purpose = [{<<"x-internal-purpose">>, longstr, <<"federation">>}],
+ XFUArgs = [{?MAX_HOPS_ARG, long, MaxHops},
+ {?DOWNSTREAM_NAME_ARG, longstr, cycle_detection_node_identifier()},
+ {?DOWNSTREAM_VHOST_ARG, longstr, DVhost}
+ | Purpose],
+ XFU = Base#'exchange.declare'{type = <<"x-federation-upstream">>,
+ arguments = XFUArgs},
+ rabbit_federation_link_util:disposable_connection_call(
+ Params, XFU, fun(404, Text) ->
+ rabbit_federation_link_util:log_warning(
+ XName, "detected internal upstream exchange changes,"
+ " restarting link: ~p~n", [Text]),
+ upstream_not_found;
+ (Code, Text) ->
+ rabbit_federation_link_util:log_warning(
+ XName, "internal upstream exchange check failed: ~p ~p~n",
+ [Code, Text]),
+ error
+ end).
+
+upstream_queue_name(XNameBin, VHost, #resource{name = DownXNameBin,
+ virtual_host = DownVHost}) ->
+ Node = rabbit_nodes:cluster_name(),
+ DownPart = case DownVHost of
+ VHost -> case DownXNameBin of
+ XNameBin -> <<"">>;
+ _ -> <<":", DownXNameBin/binary>>
+ end;
+ _ -> <<":", DownVHost/binary,
+ ":", DownXNameBin/binary>>
+ end,
+ <<"federation: ", XNameBin/binary, " -> ", Node/binary, DownPart/binary>>.
+
+cycle_detection_node_identifier() ->
+ rabbit_nodes:cluster_name().
+
+upstream_exchange_name(UpstreamQName, Suffix) ->
+ <<UpstreamQName/binary, " ", Suffix/binary>>.
+
+delete_upstream_exchange(Conn, XNameBin) ->
+ rabbit_federation_link_util:disposable_channel_call(
+ Conn, #'exchange.delete'{exchange = XNameBin}).
+
+delete_upstream_queue(Conn, Queue) ->
+ rabbit_federation_link_util:disposable_channel_call(
+ Conn, #'queue.delete'{queue = Queue}).
+
+update_routing_headers(#upstream_params{table = Table}, UpstreamName, UVhost, Redelivered, Headers) ->
+ NewValue = Table ++
+ [{<<"redelivered">>, bool, Redelivered}] ++
+ header_for_upstream_name(UpstreamName) ++
+ header_for_upstream_vhost(UVhost),
+ rabbit_basic:prepend_table_header(?ROUTING_HEADER, NewValue, Headers).
+
+header_for_upstream_name(unknown) -> [];
+header_for_upstream_name(Name) -> [{<<"cluster-name">>, longstr, Name}].
+
+header_for_upstream_vhost(unknown) -> [];
+header_for_upstream_vhost(Name) -> [{<<"vhost">>, longstr, Name}].
+
+get_hops(Table) ->
+ case rabbit_misc:table_lookup(Table, <<"hops">>) of
+ %% see rabbit_binary_generator
+ {short, N} -> N;
+ {long, N} -> N;
+ {byte, N} -> N;
+ {signedint, N} -> N;
+ {unsignedbyte, N} -> N;
+ {unsignedshort, N} -> N;
+ {unsignedint, N} -> N;
+ {_, N} when is_integer(N) andalso N >= 0 -> N
+ end.
+
+handle_down(DCh, Reason, _Ch, _CmdCh, DCh, Args, State) ->
+ rabbit_federation_link_util:handle_downstream_down(Reason, Args, State);
+handle_down(ChPid, Reason, Ch, CmdCh, _DCh, Args, State)
+ when ChPid =:= Ch; ChPid =:= CmdCh ->
+ rabbit_federation_link_util:handle_upstream_down(Reason, Args, State).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl
new file mode 100644
index 0000000000..fda76a5070
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_exchange_link_sup_sup).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+%% Supervises the upstream links for all exchanges (but not queues). We need
+%% different handling here since exchanges want a mirrored sup.
+
+-export([start_link/0, start_child/1, adjust/1, stop_child/1]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ mirrored_supervisor:start_link({local, ?SUPERVISOR}, ?SUPERVISOR,
+ fun rabbit_misc:execute_mnesia_transaction/1,
+ ?MODULE, []).
+
+%% Note that the next supervisor down, rabbit_federation_link_sup, is common
+%% between exchanges and queues.
+start_child(X) ->
+ case mirrored_supervisor:start_child(
+ ?SUPERVISOR,
+ {id(X), {rabbit_federation_link_sup, start_link, [X]},
+ transient, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_federation_link_sup]}) of
+ {ok, _Pid} -> ok;
+ {error, {already_started, _Pid}} ->
+ #exchange{name = ExchangeName} = X,
+ rabbit_log_federation:debug("Federation link for exchange ~p was already started",
+ [rabbit_misc:rs(ExchangeName)]),
+ ok;
+ %% A link returned {stop, gone}, the link_sup shut down, that's OK.
+ {error, {shutdown, _}} -> ok
+ end.
+
+adjust({clear_upstream, VHost, UpstreamName}) ->
+ [rabbit_federation_link_sup:adjust(Pid, X, {clear_upstream, UpstreamName}) ||
+ {#exchange{name = Name} = X, Pid, _, _} <- mirrored_supervisor:which_children(?SUPERVISOR),
+ Name#resource.virtual_host == VHost],
+ ok;
+adjust(Reason) ->
+ [rabbit_federation_link_sup:adjust(Pid, X, Reason) ||
+ {X, Pid, _, _} <- mirrored_supervisor:which_children(?SUPERVISOR)],
+ ok.
+
+stop_child(X) ->
+ case mirrored_supervisor:terminate_child(?SUPERVISOR, id(X)) of
+ ok -> ok;
+ {error, Err} ->
+ #exchange{name = ExchangeName} = X,
+ rabbit_log_federation:warning(
+ "Attempt to stop a federation link for exchange ~p failed: ~p",
+ [rabbit_misc:rs(ExchangeName), Err]),
+ ok
+ end,
+ ok = mirrored_supervisor:delete_child(?SUPERVISOR, id(X)).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 1200, 60}, []}}.
+
+%% See comment in rabbit_federation_queue_link_sup_sup:id/1
+id(X = #exchange{policy = Policy}) -> X1 = rabbit_exchange:immutable(X),
+ X1#exchange{policy = Policy}.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl
new file mode 100644
index 0000000000..27d1b50277
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl
@@ -0,0 +1,109 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_link_sup).
+
+-behaviour(supervisor2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+-include("rabbit_federation.hrl").
+
+%% Supervises the upstream links for an exchange or queue.
+
+-export([start_link/1, adjust/3, restart/2]).
+-export([init/1]).
+
+start_link(XorQ) ->
+ supervisor2:start_link(?MODULE, XorQ).
+
+adjust(Sup, XorQ, everything) ->
+ [stop(Sup, Upstream, XorQ) ||
+ {Upstream, _, _, _} <- supervisor2:which_children(Sup)],
+ [{ok, _Pid} = supervisor2:start_child(Sup, Spec) || Spec <- specs(XorQ)];
+
+adjust(Sup, XorQ, {upstream, UpstreamName}) ->
+ OldUpstreams0 = children(Sup, UpstreamName),
+ NewUpstreams0 = rabbit_federation_upstream:for(XorQ, UpstreamName),
+ %% If any haven't changed, don't restart them. The broker will
+ %% avoid telling us about connections that have not changed
+ %% syntactically, but even if one has, this XorQ may not have that
+ %% connection in an upstream, so we still need to check here.
+ {OldUpstreams, NewUpstreams} =
+ lists:foldl(
+ fun (OldU, {OldUs, NewUs}) ->
+ case lists:member(OldU, NewUs) of
+ true -> {OldUs -- [OldU], NewUs -- [OldU]};
+ false -> {OldUs, NewUs}
+ end
+ end, {OldUpstreams0, NewUpstreams0}, OldUpstreams0),
+ [stop(Sup, OldUpstream, XorQ) || OldUpstream <- OldUpstreams],
+ [start(Sup, NewUpstream, XorQ) || NewUpstream <- NewUpstreams];
+
+adjust(Sup, XorQ, {clear_upstream, UpstreamName}) ->
+ ok = rabbit_federation_db:prune_scratch(
+ name(XorQ), rabbit_federation_upstream:for(XorQ)),
+ [stop(Sup, Upstream, XorQ) || Upstream <- children(Sup, UpstreamName)];
+
+adjust(Sup, X = #exchange{name = XName}, {upstream_set, _Set}) ->
+ adjust(Sup, X, everything),
+ case rabbit_federation_upstream:federate(X) of
+ false -> ok;
+ true -> ok = rabbit_federation_db:prune_scratch(
+ XName, rabbit_federation_upstream:for(X))
+ end;
+adjust(Sup, Q, {upstream_set, _}) when ?is_amqqueue(Q) ->
+ adjust(Sup, Q, everything);
+adjust(Sup, XorQ, {clear_upstream_set, _}) ->
+ adjust(Sup, XorQ, everything).
+
+restart(Sup, Upstream) ->
+ ok = supervisor2:terminate_child(Sup, Upstream),
+ {ok, _Pid} = supervisor2:restart_child(Sup, Upstream),
+ ok.
+
+start(Sup, Upstream, XorQ) ->
+ {ok, _Pid} = supervisor2:start_child(Sup, spec(rabbit_federation_util:obfuscate_upstream(Upstream), XorQ)),
+ ok.
+
+stop(Sup, Upstream, XorQ) ->
+ ok = supervisor2:terminate_child(Sup, Upstream),
+ ok = supervisor2:delete_child(Sup, Upstream),
+ %% While the link will report its own removal, that only works if
+ %% the link was actually up. If the link was broken and failing to
+ %% come up, the possibility exists that there *is* no link
+ %% process, but we still have a report in the status table. So
+ %% remove it here too.
+ rabbit_federation_status:remove(Upstream, name(XorQ)).
+
+children(Sup, UpstreamName) ->
+ rabbit_federation_util:find_upstreams(
+ UpstreamName, [U || {U, _, _, _} <- supervisor2:which_children(Sup)]).
+
+%%----------------------------------------------------------------------------
+
+init(XorQ) ->
+ %% 1, ?MAX_WAIT so that we always give up after one fast retry and get
+ %% into the reconnect delay.
+ {ok, {{one_for_one, 1, ?MAX_WAIT}, specs(XorQ)}}.
+
+specs(XorQ) ->
+ [spec(rabbit_federation_util:obfuscate_upstream(Upstream), XorQ)
+ || Upstream <- rabbit_federation_upstream:for(XorQ)].
+
+spec(U = #upstream{reconnect_delay = Delay}, #exchange{name = XName}) ->
+ {U, {rabbit_federation_exchange_link, start_link, [{U, XName}]},
+ {permanent, Delay}, ?WORKER_WAIT, worker,
+ [rabbit_federation_exchange_link]};
+
+spec(Upstream = #upstream{reconnect_delay = Delay}, Q) when ?is_amqqueue(Q) ->
+ {Upstream, {rabbit_federation_queue_link, start_link, [{Upstream, Q}]},
+ {permanent, Delay}, ?WORKER_WAIT, worker,
+ [rabbit_federation_queue_link]}.
+
+name(#exchange{name = XName}) -> XName;
+name(Q) when ?is_amqqueue(Q) -> amqqueue:get_name(Q).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl b/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl
new file mode 100644
index 0000000000..a5fd560f0b
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_link_util.erl
@@ -0,0 +1,364 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_link_util).
+
+-include_lib("rabbit/include/amqqueue.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+%% real
+-export([start_conn_ch/5, disposable_channel_call/2, disposable_channel_call/3,
+ disposable_connection_call/3, ensure_connection_closed/1,
+ log_terminate/4, unacked_new/0, ack/3, nack/3, forward/9,
+ handle_downstream_down/3, handle_upstream_down/3,
+ get_connection_name/2, log_debug/3, log_info/3, log_warning/3,
+ log_error/3]).
+
+%% temp
+-export([connection_error/6]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(MAX_CONNECTION_CLOSE_TIMEOUT, 10000).
+
+%%----------------------------------------------------------------------------
+
+start_conn_ch(Fun, OUpstream, OUParams,
+ XorQName = #resource{virtual_host = DownVHost}, State) ->
+
+ Upstream = rabbit_federation_util:deobfuscate_upstream(OUpstream),
+ UParams = rabbit_federation_util:deobfuscate_upstream_params(OUParams),
+
+ ConnName = get_connection_name(Upstream, UParams),
+ case open_monitor(#amqp_params_direct{virtual_host = DownVHost}, ConnName) of
+ {ok, DConn, DCh} ->
+ case Upstream#upstream.ack_mode of
+ 'on-confirm' ->
+ #'confirm.select_ok'{} =
+ amqp_channel:call(DCh, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(DCh, self());
+ _ ->
+ ok
+ end,
+ case open_monitor(UParams#upstream_params.params, ConnName) of
+ {ok, Conn, Ch} ->
+ %% Don't trap exits until we have established
+ %% connections so that if we try to delete
+ %% federation upstreams while waiting for a
+ %% connection to be established then we don't
+ %% block
+ process_flag(trap_exit, true),
+ try
+ R = Fun(Conn, Ch, DConn, DCh),
+ log_info(
+ XorQName, "connected to ~s~n",
+ [rabbit_federation_upstream:params_to_string(
+ UParams)]),
+ Name = pget(name, amqp_connection:info(DConn, [name])),
+ rabbit_federation_status:report(
+ OUpstream, OUParams, XorQName, {running, Name}),
+ R
+ catch exit:E ->
+ %% terminate/2 will not get this, as we
+ %% have not put them in our state yet
+ ensure_connection_closed(DConn),
+ ensure_connection_closed(Conn),
+ connection_error(remote_start, E,
+ OUpstream, OUParams, XorQName, State)
+ end;
+ E ->
+ ensure_connection_closed(DConn),
+ connection_error(remote_start, E,
+ OUpstream, OUParams, XorQName, State)
+ end;
+ E ->
+ connection_error(local_start, E,
+ OUpstream, OUParams, XorQName, State)
+ end.
+
+get_connection_name(#upstream{name = UpstreamName},
+ #upstream_params{x_or_q = Resource}) when is_record(Resource, exchange)->
+ Policy = Resource#exchange.policy,
+ PolicyName = proplists:get_value(name, Policy),
+ connection_name(UpstreamName, PolicyName);
+
+get_connection_name(#upstream{name = UpstreamName},
+ #upstream_params{x_or_q = Resource}) when ?is_amqqueue(Resource) ->
+ Policy = amqqueue:get_policy(Resource),
+ PolicyName = proplists:get_value(name, Policy),
+ connection_name(UpstreamName, PolicyName);
+
+get_connection_name(_, _) ->
+ connection_name(undefined, undefined).
+
+connection_name(Upstream, Policy) when is_binary(Upstream), is_binary(Policy) ->
+ <<<<"Federation link (upstream: ">>/binary, Upstream/binary, <<", policy: ">>/binary, Policy/binary, <<")">>/binary>>;
+connection_name(_, _) ->
+ <<"Federation link">>.
+
+open_monitor(Params, Name) ->
+ case open(Params, Name) of
+ {ok, Conn, Ch} -> erlang:monitor(process, Ch),
+ {ok, Conn, Ch};
+ E -> E
+ end.
+
+open(Params, Name) ->
+ try
+ amqp_connection:start(Params, Name)
+ of
+ {ok, Conn} ->
+ try
+ amqp_connection:open_channel(Conn)
+ of
+ {ok, Ch} -> {ok, Conn, Ch};
+ E -> ensure_connection_closed(Conn),
+ E
+ catch
+ _:E ->
+ ensure_connection_closed(Conn),
+ E
+ end;
+ E -> E
+ catch
+ _:E -> E
+ end.
+
+ensure_channel_closed(Ch) -> catch amqp_channel:close(Ch).
+
+ensure_connection_closed(Conn) ->
+ catch amqp_connection:close(Conn, ?MAX_CONNECTION_CLOSE_TIMEOUT).
+
+connection_error(remote_start, {{shutdown, {server_initiated_close, Code, Message}}, _} = E,
+ Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_warning(XorQName,
+ "did not connect to ~s. Server has closed the connection due to an error, code: ~p, "
+ "message: ~s",
+ [rabbit_federation_upstream:params_to_string(UParams),
+ Code, Message]),
+ {stop, {shutdown, restart}, State};
+
+connection_error(remote_start, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_warning(XorQName, "did not connect to ~s. Reason: ~p",
+ [rabbit_federation_upstream:params_to_string(UParams),
+ E]),
+ {stop, {shutdown, restart}, State};
+
+connection_error(remote, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_info(XorQName, "disconnected from ~s~n~p",
+ [rabbit_federation_upstream:params_to_string(UParams), E]),
+ {stop, {shutdown, restart}, State};
+
+connection_error(command_channel, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_info(XorQName, "failed to open a command channel for upstream ~s~n~p",
+ [rabbit_federation_upstream:params_to_string(UParams), E]),
+ {stop, {shutdown, restart}, State};
+
+connection_error(local, basic_cancel, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, {error, basic_cancel}),
+ log_info(XorQName, "received a 'basic.cancel'", []),
+ {stop, {shutdown, restart}, State};
+
+connection_error(local_start, E, Upstream, UParams, XorQName, State) ->
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(E)),
+ log_warning(XorQName, "did not connect locally~n~p", [E]),
+ {stop, {shutdown, restart}, State}.
+
+%% If we terminate due to a gen_server call exploding (almost
+%% certainly due to an amqp_channel:call() exploding) then we do not
+%% want to report the gen_server call in our status.
+clean_reason({E = {shutdown, _}, _}) -> E;
+clean_reason(E) -> E.
+
+%% local / disconnected never gets invoked, see handle_info({'DOWN', ...
+
+%%----------------------------------------------------------------------------
+
+unacked_new() -> gb_trees:empty().
+
+ack(#'basic.ack'{delivery_tag = Seq,
+ multiple = Multiple}, Ch, Unack) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = gb_trees:get(Seq, Unack),
+ multiple = Multiple}),
+ remove_delivery_tags(Seq, Multiple, Unack).
+
+
+%% Note: at time of writing the broker will never send requeue=false. And it's
+%% hard to imagine why it would. But we may as well handle it.
+nack(#'basic.nack'{delivery_tag = Seq,
+ multiple = Multiple,
+ requeue = Requeue}, Ch, Unack) ->
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = gb_trees:get(Seq, Unack),
+ multiple = Multiple,
+ requeue = Requeue}),
+ remove_delivery_tags(Seq, Multiple, Unack).
+
+remove_delivery_tags(Seq, false, Unacked) ->
+ gb_trees:delete(Seq, Unacked);
+remove_delivery_tags(Seq, true, Unacked) ->
+ case gb_trees:is_empty(Unacked) of
+ true -> Unacked;
+ false -> {Smallest, _Val, Unacked1} = gb_trees:take_smallest(Unacked),
+ case Smallest > Seq of
+ true -> Unacked;
+ false -> remove_delivery_tags(Seq, true, Unacked1)
+ end
+ end.
+
+forward(#upstream{ack_mode = AckMode,
+ trust_user_id = Trust},
+ #'basic.deliver'{delivery_tag = DT},
+ Ch, DCh, PublishMethod, HeadersFun, ForwardFun, Msg, Unacked) ->
+ Headers = extract_headers(Msg),
+ case ForwardFun(Headers) of
+ true -> Msg1 = maybe_clear_user_id(
+ Trust, update_headers(HeadersFun(Headers), Msg)),
+ Seq = case AckMode of
+ 'on-confirm' -> amqp_channel:next_publish_seqno(DCh);
+ _ -> ignore
+ end,
+ amqp_channel:cast(DCh, PublishMethod, Msg1),
+ case AckMode of
+ 'on-confirm' ->
+ gb_trees:insert(Seq, DT, Unacked);
+ 'on-publish' ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DT}),
+ Unacked;
+ 'no-ack' ->
+ Unacked
+ end;
+ false -> amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DT}),
+ %% Drop it, but acknowledge it!
+ Unacked
+ end.
+
+maybe_clear_user_id(false, Msg = #amqp_msg{props = Props}) ->
+ Msg#amqp_msg{props = Props#'P_basic'{user_id = undefined}};
+maybe_clear_user_id(true, Msg) ->
+ Msg.
+
+extract_headers(#amqp_msg{props = #'P_basic'{headers = Headers}}) ->
+ Headers.
+
+update_headers(Headers, Msg = #amqp_msg{props = Props}) ->
+ Msg#amqp_msg{props = Props#'P_basic'{headers = Headers}}.
+
+%%----------------------------------------------------------------------------
+
+%% If the downstream channel shuts down cleanly, we can just ignore it
+%% - we're the same node, we're presumably about to go down too.
+handle_downstream_down(shutdown, _Args, State) ->
+ {noreply, State};
+
+handle_downstream_down(Reason, _Args, State) ->
+ {stop, {downstream_channel_down, Reason}, State}.
+
+%% If the upstream channel goes down for an intelligible reason, just
+%% log it and die quietly.
+handle_upstream_down({shutdown, Reason}, {Upstream, UParams, XName}, State) ->
+ rabbit_federation_link_util:connection_error(
+ remote, {upstream_channel_down, Reason}, Upstream, UParams, XName, State);
+
+handle_upstream_down(Reason, _Args, State) ->
+ {stop, {upstream_channel_down, Reason}, State}.
+
+%%----------------------------------------------------------------------------
+
+log_terminate(gone, _Upstream, _UParams, _XorQName) ->
+ %% the link cannot start, this has been logged already
+ ok;
+log_terminate({shutdown, restart}, _Upstream, _UParams, _XorQName) ->
+ %% We've already logged this before munging the reason
+ ok;
+log_terminate(shutdown, Upstream, UParams, XorQName) ->
+ %% The supervisor is shutting us down; we are probably restarting
+ %% the link because configuration has changed. So try to shut down
+ %% nicely so that we do not cause unacked messages to be
+ %% redelivered.
+ log_info(XorQName, "disconnecting from ~s~n",
+ [rabbit_federation_upstream:params_to_string(UParams)]),
+ rabbit_federation_status:remove(Upstream, XorQName);
+
+log_terminate(Reason, Upstream, UParams, XorQName) ->
+ %% Unexpected death. sasl will log it, but we should update
+ %% rabbit_federation_status.
+ rabbit_federation_status:report(
+ Upstream, UParams, XorQName, clean_reason(Reason)).
+
+log_debug(XorQName, Fmt, Args) -> log(debug, XorQName, Fmt, Args).
+log_info(XorQName, Fmt, Args) -> log(info, XorQName, Fmt, Args).
+log_warning(XorQName, Fmt, Args) -> log(warning, XorQName, Fmt, Args).
+log_error(XorQName, Fmt, Args) -> log(error, XorQName, Fmt, Args).
+
+log(Level, XorQName, Fmt0, Args0) ->
+ Fmt = "Federation ~s " ++ Fmt0,
+ Args = [rabbit_misc:rs(XorQName) | Args0],
+ case Level of
+ debug -> rabbit_log_federation:debug(Fmt, Args);
+ info -> rabbit_log_federation:info(Fmt, Args);
+ warning -> rabbit_log_federation:warning(Fmt, Args);
+ error -> rabbit_log_federation:error(Fmt, Args)
+ end.
+
+%%----------------------------------------------------------------------------
+
+disposable_channel_call(Conn, Method) ->
+ disposable_channel_call(Conn, Method, fun(_, _) -> ok end).
+
+disposable_channel_call(Conn, Method, ErrFun) ->
+ try
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ amqp_channel:call(Ch, Method)
+ catch exit:{{shutdown, {server_initiated_close, Code, Message}}, _} ->
+ ErrFun(Code, Message)
+ after
+ ensure_channel_closed(Ch)
+ end
+ catch
+ Exception:Reason ->
+ rabbit_log_federation:error("Federation link could not create a disposable (one-off) channel due to an error ~p: ~p~n", [Exception, Reason])
+ end.
+
+disposable_connection_call(Params, Method, ErrFun) ->
+ try
+ rabbit_log_federation:debug("Disposable connection parameters: ~p", [Params]),
+ case open(Params, <<"Disposable exchange federation link connection">>) of
+ {ok, Conn, Ch} ->
+ try
+ amqp_channel:call(Ch, Method)
+ catch exit:{{shutdown, {connection_closing, {server_initiated_close, Code, Message}}}, _} ->
+ ErrFun(Code, Message);
+ exit:{{shutdown, {server_initiated_close, Code, Message}}, _} ->
+ ErrFun(Code, Message)
+ after
+ ensure_connection_closed(Conn)
+ end;
+ {error, {auth_failure, Message}} ->
+ rabbit_log_federation:error("Federation link could not open a disposable (one-off) connection "
+ "due to an authentication failure: ~s~n", [Message]);
+ Error ->
+ rabbit_log_federation:error("Federation link could not open a disposable (one-off) connection, "
+ "reason: ~p~n", [Error]),
+ Error
+ end
+ catch
+ Exception:Reason ->
+ rabbit_log_federation:error("Federation link could not create a disposable (one-off) connection "
+ "due to an error ~p: ~p~n", [Exception, Reason])
+ end.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl b/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl
new file mode 100644
index 0000000000..928e41dc0f
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_parameters.erl
@@ -0,0 +1,139 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_parameters).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([validate/5, notify/5, notify_clear/4]).
+-export([register/0, unregister/0, validate_policy/1, adjust/1]).
+
+-define(RUNTIME_PARAMETERS,
+ [{runtime_parameter, <<"federation">>},
+ {runtime_parameter, <<"federation-upstream">>},
+ {runtime_parameter, <<"federation-upstream-set">>},
+ {policy_validator, <<"federation-upstream">>},
+ {policy_validator, <<"federation-upstream-pattern">>},
+ {policy_validator, <<"federation-upstream-set">>}]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation parameters"},
+ {mfa, {rabbit_federation_parameters, register, []}},
+ {requires, rabbit_registry},
+ {cleanup, {rabbit_federation_parameters, unregister, []}},
+ {enables, recovery}]}).
+
+register() ->
+ [rabbit_registry:register(Class, Name, ?MODULE) ||
+ {Class, Name} <- ?RUNTIME_PARAMETERS],
+ ok.
+
+unregister() ->
+ [rabbit_registry:unregister(Class, Name) ||
+ {Class, Name} <- ?RUNTIME_PARAMETERS],
+ ok.
+
+validate(_VHost, <<"federation-upstream-set">>, Name, Term0, _User) ->
+ Term = [rabbit_data_coercion:to_proplist(Upstream) || Upstream <- Term0],
+ [rabbit_parameter_validation:proplist(
+ Name,
+ [{<<"upstream">>, fun rabbit_parameter_validation:binary/2, mandatory} |
+ shared_validation()], Upstream)
+ || Upstream <- Term];
+
+validate(_VHost, <<"federation-upstream">>, Name, Term0, _User) ->
+ Term = rabbit_data_coercion:to_proplist(Term0),
+ rabbit_parameter_validation:proplist(
+ Name, [{<<"uri">>, fun validate_uri/2, mandatory} |
+ shared_validation()], Term);
+
+validate(_VHost, _Component, Name, _Term, _User) ->
+ {error, "name not recognised: ~p", [Name]}.
+
+notify(_VHost, <<"federation-upstream-set">>, Name, _Term, _Username) ->
+ adjust({upstream_set, Name});
+
+notify(_VHost, <<"federation-upstream">>, Name, _Term, _Username) ->
+ adjust({upstream, Name}).
+
+notify_clear(_VHost, <<"federation-upstream-set">>, Name, _Username) ->
+ adjust({clear_upstream_set, Name});
+
+notify_clear(VHost, <<"federation-upstream">>, Name, _Username) ->
+ rabbit_federation_exchange_link_sup_sup:adjust({clear_upstream, VHost, Name}),
+ rabbit_federation_queue_link_sup_sup:adjust({clear_upstream, VHost, Name}).
+
+adjust(Thing) ->
+ rabbit_federation_exchange_link_sup_sup:adjust(Thing),
+ rabbit_federation_queue_link_sup_sup:adjust(Thing).
+
+%%----------------------------------------------------------------------------
+
+shared_validation() ->
+ [{<<"exchange">>, fun rabbit_parameter_validation:binary/2, optional},
+ {<<"queue">>, fun rabbit_parameter_validation:binary/2, optional},
+ {<<"consumer-tag">>, fun rabbit_parameter_validation:binary/2, optional},
+ {<<"prefetch-count">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"reconnect-delay">>,fun rabbit_parameter_validation:number/2, optional},
+ {<<"max-hops">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"expires">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"message-ttl">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"trust-user-id">>, fun rabbit_parameter_validation:boolean/2, optional},
+ {<<"ack-mode">>, rabbit_parameter_validation:enum(
+ ['no-ack', 'on-publish', 'on-confirm']), optional},
+ {<<"resource-cleanup-mode">>, rabbit_parameter_validation:enum(['default', 'never']), optional},
+ {<<"ha-policy">>, fun rabbit_parameter_validation:binary/2, optional},
+ {<<"bind-nowait">>, fun rabbit_parameter_validation:boolean/2, optional}].
+
+validate_uri(Name, Term) when is_binary(Term) ->
+ case rabbit_parameter_validation:binary(Name, Term) of
+ ok -> case amqp_uri:parse(binary_to_list(Term)) of
+ {ok, _} -> ok;
+ {error, E} -> {error, "\"~s\" not a valid URI: ~p", [Term, E]}
+ end;
+ E -> E
+ end;
+validate_uri(Name, Term) ->
+ case rabbit_parameter_validation:list(Name, Term) of
+ ok -> case [V || U <- Term,
+ V <- [validate_uri(Name, U)],
+ element(1, V) =:= error] of
+ [] -> ok;
+ [E | _] -> E
+ end;
+ E -> E
+ end.
+
+%%----------------------------------------------------------------------------
+
+validate_policy([{<<"federation-upstream-set">>, Value}])
+ when is_binary(Value) ->
+ ok;
+validate_policy([{<<"federation-upstream-set">>, Value}]) ->
+ {error, "~p is not a valid federation upstream set name", [Value]};
+
+validate_policy([{<<"federation-upstream-pattern">>, Value}])
+ when is_binary(Value) ->
+ case re:compile(Value) of
+ {ok, _} -> ok;
+ {error, Reason} -> {error, "could not compile pattern ~s to a regular expression. "
+ "Error: ~p", [Value, Reason]}
+ end;
+validate_policy([{<<"federation-upstream-pattern">>, Value}]) ->
+ {error, "~p is not a valid federation upstream pattern name", [Value]};
+
+validate_policy([{<<"federation-upstream">>, Value}])
+ when is_binary(Value) ->
+ ok;
+validate_policy([{<<"federation-upstream">>, Value}]) ->
+ {error, "~p is not a valid federation upstream name", [Value]};
+
+validate_policy(L) when length(L) >= 2 ->
+ {error, "cannot specify federation-upstream, federation-upstream-set "
+ "or federation-upstream-pattern together", []}.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_queue.erl b/deps/rabbitmq_federation/src/rabbit_federation_queue.erl
new file mode 100644
index 0000000000..3117792589
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_queue.erl
@@ -0,0 +1,111 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_queue).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation queue decorator"},
+ {mfa, {rabbit_queue_decorator, register,
+ [<<"federation">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {cleanup, {rabbit_queue_decorator, unregister,
+ [<<"federation">>]}},
+ {enables, recovery}]}).
+
+-include_lib("rabbit/include/amqqueue.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(rabbit_queue_decorator).
+
+-export([startup/1, shutdown/1, policy_changed/2, active_for/1,
+ consumer_state_changed/3]).
+-export([policy_changed_local/2]).
+
+%%----------------------------------------------------------------------------
+
+startup(Q) ->
+ case active_for(Q) of
+ true -> rabbit_federation_queue_link_sup_sup:start_child(Q);
+ false -> ok
+ end,
+ ok.
+
+shutdown(Q) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ case active_for(Q) of
+ true -> rabbit_federation_queue_link_sup_sup:stop_child(Q),
+ rabbit_federation_status:remove_exchange_or_queue(QName);
+ false -> ok
+ end,
+ ok.
+
+policy_changed(Q1, Q2) when ?is_amqqueue(Q1) ->
+ QName = amqqueue:get_name(Q1),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q0} when ?is_amqqueue(Q0) ->
+ QPid = amqqueue:get_pid(Q0),
+ rpc:call(node(QPid), rabbit_federation_queue,
+ policy_changed_local, [Q1, Q2]);
+ {error, not_found} ->
+ ok
+ end.
+
+policy_changed_local(Q1, Q2) ->
+ shutdown(Q1),
+ startup(Q2).
+
+active_for(Q) ->
+ Args = amqqueue:get_arguments(Q),
+ case rabbit_misc:table_lookup(Args, <<"x-internal-purpose">>) of
+ {longstr, _} -> false; %% [0]
+ _ -> rabbit_federation_upstream:federate(Q)
+ end.
+%% [0] Currently the only "internal purpose" is federation, but I
+%% suspect if we introduce another one it will also be for something
+%% that doesn't want to be federated.
+
+%% We need to reconsider whether we need to run or pause every time
+%% the consumer state changes in the queue. But why can the state
+%% change?
+%%
+%% consumer blocked | We may have no more active consumers, and thus need to
+%% | pause
+%% |
+%% consumer unblocked | We don't care
+%% |
+%% queue empty | The queue has become empty therefore we need to run to
+%% | get more messages
+%% |
+%% basic consume | We don't care
+%% |
+%% basic cancel | We may have no more active consumers, and thus need to
+%% | pause
+%% |
+%% refresh | We asked for it (we have started a new link after
+%% | failover and need something to prod us into action
+%% | (or not)).
+%%
+%% In the cases where we don't care it's not prohibitively expensive
+%% for us to be here anyway, so never mind.
+%%
+%% Note that there is no "queue became non-empty" state change - that's
+%% because of the queue invariant. If the queue transitions from empty to
+%% non-empty then it must have no active consumers - in which case it stays
+%% the same from our POV.
+
+consumer_state_changed(Q, MaxActivePriority, IsEmpty) ->
+ QName = amqqueue:get_name(Q),
+ case IsEmpty andalso active_unfederated(MaxActivePriority) of
+ true -> rabbit_federation_queue_link:run(QName);
+ false -> rabbit_federation_queue_link:pause(QName)
+ end,
+ ok.
+
+active_unfederated(empty) -> false;
+active_unfederated(P) when P >= 0 -> true;
+active_unfederated(_P) -> false.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl b/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl
new file mode 100644
index 0000000000..97389cb8f6
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_queue_link.erl
@@ -0,0 +1,330 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_queue_link).
+
+%% pg2 is deprecated in OTP 23.
+-compile(nowarn_deprecated_function).
+
+-include_lib("rabbit/include/amqqueue.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/1, go/0, run/1, pause/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+-import(rabbit_federation_util, [name/1, pgname/1]).
+
+-record(not_started, {queue, run, upstream, upstream_params}).
+-record(state, {queue, run, conn, ch, dconn, dch, upstream, upstream_params,
+ unacked}).
+
+start_link(Args) ->
+ gen_server2:start_link(?MODULE, Args, [{timeout, infinity}]).
+
+run(QName) -> cast(QName, run).
+pause(QName) -> cast(QName, pause).
+go() -> cast(go).
+
+%%----------------------------------------------------------------------------
+%%call(QName, Msg) -> [gen_server2:call(Pid, Msg, infinity) || Pid <- q(QName)].
+cast(Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- all()].
+cast(QName, Msg) -> [gen_server2:cast(Pid, Msg) || Pid <- q(QName)].
+
+join(Name) ->
+ pg2:create(pgname(Name)),
+ ok = pg2:join(pgname(Name), self()).
+
+all() ->
+ pg2:create(pgname(rabbit_federation_queues)),
+ pg2:get_members(pgname(rabbit_federation_queues)).
+
+q(QName) ->
+ pg2:create(pgname({rabbit_federation_queue, QName})),
+ pg2:get_members(pgname({rabbit_federation_queue, QName})).
+
+federation_up() ->
+ proplists:is_defined(rabbitmq_federation,
+ application:which_applications(infinity)).
+
+%%----------------------------------------------------------------------------
+
+init({Upstream, Queue}) when ?is_amqqueue(Queue) ->
+ QName = amqqueue:get_name(Queue),
+ case rabbit_amqqueue:lookup(QName) of
+ {ok, Q} ->
+ DeobfuscatedUpstream = rabbit_federation_util:deobfuscate_upstream(Upstream),
+ DeobfuscatedUParams = rabbit_federation_upstream:to_params(DeobfuscatedUpstream, Queue),
+ UParams = rabbit_federation_util:obfuscate_upstream_params(DeobfuscatedUParams),
+ rabbit_federation_status:report(Upstream, UParams, QName, starting),
+ join(rabbit_federation_queues),
+ join({rabbit_federation_queue, QName}),
+ gen_server2:cast(self(), maybe_go),
+ rabbit_amqqueue:notify_decorators(Q),
+ {ok, #not_started{queue = Queue,
+ run = false,
+ upstream = Upstream,
+ upstream_params = UParams}};
+ {error, not_found} ->
+ rabbit_federation_link_util:log_warning(QName, "not found, stopping link~n", []),
+ {stop, gone}
+ end.
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast(maybe_go, State) ->
+ case federation_up() of
+ true -> go(State);
+ false -> {noreply, State}
+ end;
+
+handle_cast(go, State = #not_started{}) ->
+ go(State);
+
+handle_cast(go, State) ->
+ {noreply, State};
+
+handle_cast(run, State = #state{upstream = Upstream,
+ upstream_params = UParams,
+ ch = Ch,
+ run = false}) ->
+ consume(Ch, Upstream, UParams#upstream_params.x_or_q),
+ {noreply, State#state{run = true}};
+
+handle_cast(run, State = #not_started{}) ->
+ {noreply, State#not_started{run = true}};
+
+handle_cast(run, State) ->
+ %% Already started
+ {noreply, State};
+
+handle_cast(pause, State = #state{run = false}) ->
+ %% Already paused
+ {noreply, State};
+
+handle_cast(pause, State = #not_started{}) ->
+ {noreply, State#not_started{run = false}};
+
+handle_cast(pause, State = #state{ch = Ch, upstream = Upstream}) ->
+ cancel(Ch, Upstream),
+ {noreply, State#state{run = false}};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State};
+
+handle_info(#'basic.ack'{} = Ack, State = #state{ch = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:ack(Ack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.nack'{} = Nack, State = #state{ch = Ch,
+ unacked = Unacked}) ->
+ Unacked1 = rabbit_federation_link_util:nack(Nack, Ch, Unacked),
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info({#'basic.deliver'{redelivered = Redelivered,
+ exchange = X,
+ routing_key = K} = DeliverMethod, Msg},
+ State = #state{queue = Q,
+ upstream = Upstream,
+ upstream_params = UParams,
+ ch = Ch,
+ dch = DCh,
+ unacked = Unacked}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ PublishMethod = #'basic.publish'{exchange = <<"">>,
+ routing_key = QName#resource.name},
+ HeadersFun = fun (H) -> update_headers(UParams, Redelivered, X, K, H) end,
+ ForwardFun = fun (_H) -> true end,
+ Unacked1 = rabbit_federation_link_util:forward(
+ Upstream, DeliverMethod, Ch, DCh, PublishMethod,
+ HeadersFun, ForwardFun, Msg, Unacked),
+ %% TODO actually we could reject when 'stopped'
+ {noreply, State#state{unacked = Unacked1}};
+
+handle_info(#'basic.cancel'{},
+ State = #state{queue = Q,
+ upstream = Upstream,
+ upstream_params = UParams}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ rabbit_federation_link_util:connection_error(
+ local, basic_cancel, Upstream, UParams, QName, State);
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{dch = DCh,
+ ch = Ch,
+ upstream = Upstream,
+ upstream_params = UParams,
+ queue = Q}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ handle_down(Pid, Reason, Ch, DCh, {Upstream, UParams, QName}, State);
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+terminate(Reason, #not_started{upstream = Upstream,
+ upstream_params = UParams,
+ queue = Q}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, QName),
+ ok;
+
+terminate(Reason, #state{dconn = DConn,
+ conn = Conn,
+ upstream = Upstream,
+ upstream_params = UParams,
+ queue = Q}) when ?is_amqqueue(Q) ->
+ QName = amqqueue:get_name(Q),
+ rabbit_federation_link_util:ensure_connection_closed(DConn),
+ rabbit_federation_link_util:ensure_connection_closed(Conn),
+ rabbit_federation_link_util:log_terminate(Reason, Upstream, UParams, QName),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+go(S0 = #not_started{run = Run,
+ upstream = Upstream = #upstream{
+ prefetch_count = Prefetch},
+ upstream_params = UParams,
+ queue = Queue}) when ?is_amqqueue(Queue) ->
+ QName = amqqueue:get_name(Queue),
+ #upstream_params{x_or_q = UQueue} = UParams,
+ Durable = amqqueue:is_durable(UQueue),
+ AutoDelete = amqqueue:is_auto_delete(UQueue),
+ Args = amqqueue:get_arguments(UQueue),
+ Unacked = rabbit_federation_link_util:unacked_new(),
+ rabbit_federation_link_util:start_conn_ch(
+ fun (Conn, Ch, DConn, DCh) ->
+ check_upstream_suitable(Conn),
+ amqp_channel:call(Ch, #'queue.declare'{queue = name(UQueue),
+ durable = Durable,
+ auto_delete = AutoDelete,
+ arguments = Args}),
+ case Upstream#upstream.ack_mode of
+ 'no-ack' -> ok;
+ _ -> amqp_channel:call(
+ Ch, #'basic.qos'{prefetch_count = Prefetch})
+ end,
+ amqp_selective_consumer:register_default_consumer(Ch, self()),
+ case Run of
+ true -> consume(Ch, Upstream, UQueue);
+ false -> ok
+ end,
+ {noreply, #state{queue = Queue,
+ run = Run,
+ conn = Conn,
+ ch = Ch,
+ dconn = DConn,
+ dch = DCh,
+ upstream = Upstream,
+ upstream_params = UParams,
+ unacked = Unacked}}
+ end, Upstream, UParams, QName, S0).
+
+check_upstream_suitable(Conn) ->
+ Props = pget(server_properties,
+ amqp_connection:info(Conn, [server_properties])),
+ {table, Caps} = rabbit_misc:table_lookup(Props, <<"capabilities">>),
+ case rabbit_misc:table_lookup(Caps, <<"consumer_priorities">>) of
+ {bool, true} -> ok;
+ _ -> exit({error, upstream_lacks_consumer_priorities})
+ end.
+
+update_headers(UParams, Redelivered, X, K, undefined) ->
+ update_headers(UParams, Redelivered, X, K, []);
+
+update_headers(#upstream_params{table = Table}, Redelivered, X, K, Headers) ->
+ {Headers1, Count} =
+ case rabbit_misc:table_lookup(Headers, ?ROUTING_HEADER) of
+ undefined ->
+ %% We only want to record the original exchange and
+ %% routing key the first time a message gets
+ %% forwarded; after that it's known that they were
+ %% <<>> and QueueName respectively.
+ {init_x_original_source_headers(Headers, X, K), 0};
+ {array, Been} ->
+ update_visit_count(Table, Been, Headers);
+ %% this means the header comes from the client
+ %% which re-published the message, most likely unintentionally.
+ %% We can't assume much about the value, so we simply ignore it.
+ _Other ->
+ {init_x_original_source_headers(Headers, X, K), 0}
+ end,
+ rabbit_basic:prepend_table_header(
+ ?ROUTING_HEADER, Table ++ [{<<"redelivered">>, bool, Redelivered},
+ {<<"visit-count">>, long, Count + 1}],
+ swap_cc_header(Headers1)).
+
+init_x_original_source_headers(Headers, X, K) ->
+ rabbit_misc:set_table_value(
+ rabbit_misc:set_table_value(
+ Headers, <<"x-original-exchange">>, longstr, X),
+ <<"x-original-routing-key">>, longstr, K).
+
+update_visit_count(Table, Been, Headers) ->
+ {Found, Been1} = lists:partition(
+ fun(I) -> visit_match(I, Table) end,
+ Been),
+ C = case Found of
+ [] -> 0;
+ [{table, T}] -> case rabbit_misc:table_lookup(
+ T, <<"visit-count">>) of
+ {_, I} when is_number(I) -> I;
+ _ -> 0
+ end
+ end,
+ {rabbit_misc:set_table_value(
+ Headers, ?ROUTING_HEADER, array, Been1), C}.
+
+swap_cc_header(Table) ->
+ [{case K of
+ <<"CC">> -> <<"x-original-cc">>;
+ _ -> K
+ end, T, V} || {K, T, V} <- Table].
+
+visit_match({table, T}, Info) ->
+ lists:all(fun (K) ->
+ rabbit_misc:table_lookup(T, K) =:=
+ rabbit_misc:table_lookup(Info, K)
+ end, [<<"uri">>, <<"virtual_host">>, <<"queue">>]);
+visit_match(_ ,_) ->
+ false.
+
+consumer_tag(#upstream{consumer_tag = ConsumerTag}) ->
+ ConsumerTag.
+
+consume(Ch, Upstream, UQueue) ->
+ ConsumerTag = consumer_tag(Upstream),
+ NoAck = Upstream#upstream.ack_mode =:= 'no-ack',
+ amqp_channel:cast(
+ Ch, #'basic.consume'{queue = name(UQueue),
+ no_ack = NoAck,
+ nowait = true,
+ consumer_tag = ConsumerTag,
+ arguments = [{<<"x-priority">>, long, -1}]}).
+
+cancel(Ch, Upstream) ->
+ ConsumerTag = consumer_tag(Upstream),
+ amqp_channel:cast(Ch, #'basic.cancel'{nowait = true,
+ consumer_tag = ConsumerTag}).
+
+handle_down(DCh, Reason, _Ch, DCh, Args, State) ->
+ rabbit_federation_link_util:handle_downstream_down(Reason, Args, State);
+handle_down(Ch, Reason, Ch, _DCh, Args, State) ->
+ rabbit_federation_link_util:handle_upstream_down(Reason, Args, State).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl
new file mode 100644
index 0000000000..1f6ec2b88f
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl
@@ -0,0 +1,87 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_queue_link_sup_sup).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+%% Supervises the upstream links for all queues (but not exchanges). We need
+%% different handling here since queues do not want a mirrored sup.
+
+-export([start_link/0, start_child/1, adjust/1, stop_child/1]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ mirrored_supervisor:start_link({local, ?SUPERVISOR}, ?SUPERVISOR,
+ fun rabbit_misc:execute_mnesia_transaction/1,
+ ?MODULE, []).
+
+%% Note that the next supervisor down, rabbit_federation_link_sup, is common
+%% between exchanges and queues.
+start_child(Q) ->
+ case mirrored_supervisor:start_child(
+ ?SUPERVISOR,
+ {id(Q), {rabbit_federation_link_sup, start_link, [Q]},
+ transient, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_federation_link_sup]}) of
+ {ok, _Pid} -> ok;
+ {error, {already_started, _Pid}} ->
+ QueueName = amqqueue:get_name(Q),
+ rabbit_log_federation:warning("Federation link for queue ~p was already started",
+ [rabbit_misc:rs(QueueName)]),
+ ok;
+ %% A link returned {stop, gone}, the link_sup shut down, that's OK.
+ {error, {shutdown, _}} -> ok
+ end.
+
+
+adjust({clear_upstream, VHost, UpstreamName}) ->
+ [rabbit_federation_link_sup:adjust(Pid, Q, {clear_upstream, UpstreamName}) ||
+ {Q, Pid, _, _} <- mirrored_supervisor:which_children(?SUPERVISOR),
+ ?amqqueue_vhost_equals(Q, VHost)],
+ ok;
+adjust(Reason) ->
+ [rabbit_federation_link_sup:adjust(Pid, Q, Reason) ||
+ {Q, Pid, _, _} <- mirrored_supervisor:which_children(?SUPERVISOR)],
+ ok.
+
+stop_child(Q) ->
+ case mirrored_supervisor:terminate_child(?SUPERVISOR, id(Q)) of
+ ok -> ok;
+ {error, Err} ->
+ QueueName = amqqueue:get_name(Q),
+ rabbit_log_federation:warning(
+ "Attempt to stop a federation link for queue ~p failed: ~p",
+ [rabbit_misc:rs(QueueName), Err]),
+ ok
+ end,
+ ok = mirrored_supervisor:delete_child(?SUPERVISOR, id(Q)).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 1200, 60}, []}}.
+
+%% Clean out all mutable aspects of the queue except policy. We need
+%% to keep the entire queue around rather than just take its name
+%% since we will want to know its policy to determine how to federate
+%% it, and its immutable properties in case we want to redeclare it
+%% upstream. We don't just take its name and look it up again since
+%% that would introduce race conditions when policies change
+%% frequently. Note that since we take down all the links and start
+%% again when policies change, the policy will always be correct, so
+%% we don't clear it out here and can trust it.
+id(Q) when ?is_amqqueue(Q) ->
+ Policy = amqqueue:get_policy(Q),
+ Q1 = rabbit_amqqueue:immutable(Q),
+ amqqueue:set_policy(Q1, Policy).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_status.erl b/deps/rabbitmq_federation/src/rabbit_federation_status.erl
new file mode 100644
index 0000000000..04afec990d
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_status.erl
@@ -0,0 +1,175 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_status).
+-behaviour(gen_server).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-export([start_link/0]).
+
+-export([report/4, remove_exchange_or_queue/1, remove/2, status/0, lookup/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(rabbit_federation_util, [name/1]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-record(state, {}).
+-record(entry, {key, uri, status, timestamp, id, supervisor, upstream}).
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+report(Upstream, UParams, XorQName, Status) ->
+ [Supervisor | _] = get('$ancestors'),
+ gen_server:cast(?SERVER, {report, Supervisor, Upstream, UParams, XorQName,
+ Status, calendar:local_time()}).
+
+remove_exchange_or_queue(XorQName) ->
+ gen_server:call(?SERVER, {remove_exchange_or_queue, XorQName}, infinity).
+
+remove(Upstream, XorQName) ->
+ gen_server:call(?SERVER, {remove, Upstream, XorQName}, infinity).
+
+status() ->
+ gen_server:call(?SERVER, status, infinity).
+
+lookup(Id) ->
+ gen_server:call(?SERVER, {lookup, Id}, infinity).
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME,
+ [named_table, {keypos, #entry.key}, private]),
+ {ok, #state{}}.
+
+handle_call({remove_exchange_or_queue, XorQName}, _From, State) ->
+ [link_gone(Entry)
+ || Entry <- ets:match_object(?ETS_NAME, match_entry(xorqkey(XorQName)))],
+ {reply, ok, State};
+
+handle_call({remove, Upstream, XorQName}, _From, State) ->
+ case ets:match_object(?ETS_NAME, match_entry(key(XorQName, Upstream))) of
+ [Entry] -> link_gone(Entry);
+ [] -> ok
+ end,
+ {reply, ok, State};
+
+handle_call({lookup, Id}, _From, State) ->
+ Link = case ets:match_object(?ETS_NAME, match_id(Id)) of
+ [Entry] ->
+ [{key, Entry#entry.key},
+ {uri, Entry#entry.uri},
+ {status, Entry#entry.status},
+ {timestamp, Entry#entry.timestamp},
+ {id, Entry#entry.id},
+ {supervisor, Entry#entry.supervisor},
+ {upstream, Entry#entry.upstream}];
+ [] -> not_found
+ end,
+ {reply, Link, State};
+
+handle_call(status, _From, State) ->
+ Entries = ets:tab2list(?ETS_NAME),
+ {reply, [format(Entry) || Entry <- Entries], State}.
+
+handle_cast({report, Supervisor, Upstream, #upstream_params{safe_uri = URI},
+ XorQName, Status, Timestamp}, State) ->
+ Key = key(XorQName, Upstream),
+ Entry = #entry{key = Key,
+ status = Status,
+ uri = URI,
+ timestamp = Timestamp,
+ supervisor = Supervisor,
+ upstream = Upstream,
+ id = unique_id(Key)},
+ true = ets:insert(?ETS_NAME, Entry),
+ rabbit_event:notify(federation_link_status, format(Entry)),
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+format(#entry{status = Status,
+ uri = URI,
+ timestamp = Timestamp} = Entry) ->
+ identity(Entry) ++ split_status(Status) ++ [{uri, URI},
+ {timestamp, Timestamp}].
+
+identity(#entry{key = {#resource{virtual_host = VHost,
+ kind = Type,
+ name = XorQNameBin},
+ UpstreamName, UXorQNameBin},
+ id = Id,
+ upstream = #upstream{consumer_tag = ConsumerTag}}) ->
+ case Type of
+ exchange -> [{exchange, XorQNameBin},
+ {upstream_exchange, UXorQNameBin}];
+ queue -> [{queue, XorQNameBin},
+ {upstream_queue, UXorQNameBin},
+ {consumer_tag, ConsumerTag}]
+ end ++ [{type, Type},
+ {vhost, VHost},
+ {upstream, UpstreamName},
+ {id, Id}].
+
+unique_id(Key = {#resource{}, UpName, ResName}) when is_binary(UpName), is_binary(ResName) ->
+ PHash = erlang:phash2(Key, 1 bsl 32),
+ << << case N >= 10 of
+ true -> N - 10 + $a;
+ false -> N + $0 end >>
+ || <<N:4>> <= <<PHash:32>> >>.
+
+split_status({running, ConnName}) -> [{status, running},
+ {local_connection, ConnName}];
+split_status({Status, Error}) -> [{status, Status},
+ {error, Error}];
+split_status(Status) when is_atom(Status) -> [{status, Status}].
+
+link_gone(Entry) ->
+ rabbit_event:notify(federation_link_removed, identity(Entry)),
+ true = ets:delete_object(?ETS_NAME, Entry).
+
+%% We don't want to key off the entire upstream, bits of it may change
+key(XName = #resource{kind = exchange}, #upstream{name = UpstreamName,
+ exchange_name = UXNameBin}) ->
+ {XName, UpstreamName, UXNameBin};
+
+key(QName = #resource{kind = queue}, #upstream{name = UpstreamName,
+ queue_name = UQNameBin}) ->
+ {QName, UpstreamName, UQNameBin}.
+
+xorqkey(XorQName) ->
+ {XorQName, '_', '_'}.
+
+match_entry(Key) ->
+ #entry{key = Key,
+ uri = '_',
+ status = '_',
+ timestamp = '_',
+ id = '_',
+ supervisor = '_',
+ upstream = '_'}.
+
+match_id(Id) ->
+ #entry{key = '_',
+ uri = '_',
+ status = '_',
+ timestamp = '_',
+ id = Id,
+ supervisor = '_',
+ upstream = '_'}.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_sup.erl b/deps/rabbitmq_federation/src/rabbit_federation_sup.erl
new file mode 100644
index 0000000000..d3642b52c2
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_sup.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_sup).
+
+-behaviour(supervisor).
+
+%% Supervises everything. There is just one of these.
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, rabbit_federation_sup).
+
+-export([start_link/0, stop/0]).
+
+-export([init/1]).
+
+%% This supervisor needs to be part of the rabbit application since
+%% a) it needs to be in place when exchange recovery takes place
+%% b) it needs to go up and down with rabbit
+
+-rabbit_boot_step({rabbit_federation_supervisor,
+ [{description, "federation"},
+ {mfa, {rabbit_sup, start_child, [?MODULE]}},
+ {requires, kernel_ready},
+ {cleanup, {?MODULE, stop, []}},
+ {enables, rabbit_federation_exchange},
+ {enables, rabbit_federation_queue}]}).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ R = supervisor:start_link({local, ?SUPERVISOR}, ?MODULE, []),
+ rabbit_federation_event:add_handler(),
+ R.
+
+stop() ->
+ rabbit_federation_event:remove_handler(),
+ ok = supervisor:terminate_child(rabbit_sup, ?MODULE),
+ ok = supervisor:delete_child(rabbit_sup, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ Status = {status, {rabbit_federation_status, start_link, []},
+ transient, ?WORKER_WAIT, worker,
+ [rabbit_federation_status]},
+ XLinkSupSup = {x_links,
+ {rabbit_federation_exchange_link_sup_sup, start_link, []},
+ transient, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_federation_exchange_link_sup_sup]},
+ QLinkSupSup = {q_links,
+ {rabbit_federation_queue_link_sup_sup, start_link, []},
+ transient, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_federation_queue_link_sup_sup]},
+ %% with default reconnect-delay of 5 second, this supports up to
+ %% 100 links constantly failing and being restarted a minute
+ %% (or 200 links if reconnect-delay is 10 seconds, 600 with 30 seconds,
+ %% etc: N * (60/reconnect-delay) <= 1200)
+ {ok, {{one_for_one, 1200, 60}, [Status, XLinkSupSup, QLinkSupSup]}}.
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl b/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl
new file mode 100644
index 0000000000..e079b850b5
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_upstream).
+
+-include("rabbit_federation.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([federate/1, for/1, for/2, params_to_string/1, to_params/2]).
+%% For testing
+-export([from_set/2, from_pattern/2, remove_credentials/1]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+-import(rabbit_federation_util, [name/1, vhost/1, r/1]).
+-import(rabbit_data_coercion, [to_atom/1]).
+
+%%----------------------------------------------------------------------------
+
+federate(XorQ) ->
+ rabbit_policy:get(<<"federation-upstream">>, XorQ) =/= undefined orelse
+ rabbit_policy:get(<<"federation-upstream-set">>, XorQ) =/= undefined orelse
+ rabbit_policy:get(<<"federation-upstream-pattern">>, XorQ) =/= undefined.
+
+for(XorQ) ->
+ case federate(XorQ) of
+ false -> [];
+ true -> from_set_contents(upstreams(XorQ), XorQ)
+ end.
+
+for(XorQ, UpstreamName) ->
+ case federate(XorQ) of
+ false -> [];
+ true -> rabbit_federation_util:find_upstreams(
+ UpstreamName, from_set_contents(upstreams(XorQ), XorQ))
+ end.
+
+upstreams(XorQ) ->
+ UName = rabbit_policy:get(<<"federation-upstream">>, XorQ),
+ USetName = rabbit_policy:get(<<"federation-upstream-set">>, XorQ),
+ UPatternValue = rabbit_policy:get(<<"federation-upstream-pattern">>, XorQ),
+ %% Cannot define 2 at a time, see rabbit_federation_parameters:validate_policy/1
+ case {UName, USetName, UPatternValue} of
+ {undefined, undefined, undefined} -> [];
+ {undefined, undefined, _} -> find_contents(UPatternValue, vhost(XorQ));
+ {undefined, _, undefined} -> set_contents(USetName, vhost(XorQ));
+ {_, undefined, undefined} -> [[{<<"upstream">>, UName}]]
+ end.
+
+params_table(SafeURI, XorQ) ->
+ Key = case XorQ of
+ #exchange{} -> <<"exchange">>;
+ Q when ?is_amqqueue(Q) -> <<"queue">>
+ end,
+ [{<<"uri">>, longstr, SafeURI},
+ {Key, longstr, name(XorQ)}].
+
+params_to_string(#upstream_params{safe_uri = SafeURI,
+ x_or_q = XorQ}) ->
+ print("~s on ~s", [rabbit_misc:rs(r(XorQ)), SafeURI]).
+
+remove_credentials(URI) ->
+ list_to_binary(amqp_uri:remove_credentials(binary_to_list(URI))).
+
+to_params(Upstream = #upstream{uris = URIs}, XorQ) ->
+ URI = lists:nth(rand:uniform(length(URIs)), URIs),
+ {ok, Params} = amqp_uri:parse(binary_to_list(URI), vhost(XorQ)),
+ XorQ1 = with_name(Upstream, vhost(Params), XorQ),
+ SafeURI = remove_credentials(URI),
+ #upstream_params{params = Params,
+ uri = URI,
+ x_or_q = XorQ1,
+ safe_uri = SafeURI,
+ table = params_table(SafeURI, XorQ)}.
+
+print(Fmt, Args) -> iolist_to_binary(io_lib:format(Fmt, Args)).
+
+from_set(SetName, XorQ) ->
+ from_set_contents(set_contents(SetName, vhost(XorQ)), XorQ).
+
+from_pattern(SetName, XorQ) ->
+ from_set_contents(find_contents(SetName, vhost(XorQ)), XorQ).
+
+set_contents(<<"all">>, VHost) ->
+ Upstreams0 = rabbit_runtime_parameters:list(
+ VHost, <<"federation-upstream">>),
+ Upstreams = [rabbit_data_coercion:to_list(U) || U <- Upstreams0],
+ [[{<<"upstream">>, pget(name, U)}] || U <- Upstreams];
+
+set_contents(SetName, VHost) ->
+ case rabbit_runtime_parameters:value(
+ VHost, <<"federation-upstream-set">>, SetName) of
+ not_found -> [];
+ Set -> Set
+ end.
+
+find_contents(RegExp, VHost) ->
+ Upstreams0 = rabbit_runtime_parameters:list(
+ VHost, <<"federation-upstream">>),
+ Upstreams = [rabbit_data_coercion:to_list(U) || U <- Upstreams0,
+ re:run(pget(name, U), RegExp) =/= nomatch],
+ [[{<<"upstream">>, pget(name, U)}] || U <- Upstreams].
+
+from_set_contents(Set, XorQ) ->
+ Results = [from_set_element(P, XorQ) || P <- Set],
+ [R || R <- Results, R =/= not_found].
+
+from_set_element(UpstreamSetElem0, XorQ) ->
+ UpstreamSetElem = rabbit_data_coercion:to_proplist(UpstreamSetElem0),
+ Name = bget(upstream, UpstreamSetElem, []),
+ case rabbit_runtime_parameters:value(
+ vhost(XorQ), <<"federation-upstream">>, Name) of
+ not_found -> not_found;
+ Upstream -> from_upstream_or_set(
+ UpstreamSetElem, Name, Upstream, XorQ)
+ end.
+
+from_upstream_or_set(US, Name, U, XorQ) ->
+ URIParam = bget(uri, US, U),
+ URIs = case URIParam of
+ B when is_binary(B) -> [B];
+ L when is_list(L) -> L
+ end,
+ #upstream{uris = URIs,
+ exchange_name = bget(exchange, US, U, name(XorQ)),
+ queue_name = bget(queue, US, U, name(XorQ)),
+ consumer_tag = bget('consumer-tag', US, U, <<"federation-link-", Name/binary>>),
+ prefetch_count = bget('prefetch-count', US, U, ?DEF_PREFETCH),
+ reconnect_delay = bget('reconnect-delay', US, U, 5),
+ max_hops = bget('max-hops', US, U, 1),
+ expires = bget(expires, US, U, none),
+ message_ttl = bget('message-ttl', US, U, none),
+ trust_user_id = bget('trust-user-id', US, U, false),
+ ack_mode = to_atom(bget('ack-mode', US, U, <<"on-confirm">>)),
+ ha_policy = bget('ha-policy', US, U, none),
+ name = Name,
+ bind_nowait = bget('bind-nowait', US, U, false),
+ resource_cleanup_mode = to_atom(bget('resource-cleanup-mode', US, U, <<"default">>))}.
+
+%%----------------------------------------------------------------------------
+
+bget(K, L1, L2) -> bget(K, L1, L2, undefined).
+
+bget(K0, L1, L2, D) ->
+ K = a2b(K0),
+ %% coerce maps to proplists
+ PL1 = rabbit_data_coercion:to_list(L1),
+ PL2 = rabbit_data_coercion:to_list(L2),
+ case pget(K, PL1, undefined) of
+ undefined -> pget(K, PL2, D);
+ Result -> Result
+ end.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+with_name(#upstream{exchange_name = XNameBin}, VHostBin, X = #exchange{}) ->
+ X#exchange{name = rabbit_misc:r(VHostBin, exchange, XNameBin)};
+
+with_name(#upstream{queue_name = QNameBin}, VHostBin, Q) when ?is_amqqueue(Q) ->
+ amqqueue:set_name(Q, rabbit_misc:r(VHostBin, queue, QNameBin)).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl b/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl
new file mode 100644
index 0000000000..6018dd90a5
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_upstream_exchange.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_upstream_exchange).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "federation upstream exchange type"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"x-federation-upstream">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {cleanup, {rabbit_registry, unregister,
+ [exchange, <<"x-federation-upstream">>]}},
+ {enables, recovery}]}).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_federation.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, assert_args_equivalence/2]).
+-export([info/1, info/2]).
+
+%%----------------------------------------------------------------------------
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{description, <<"Federation upstream helper exchange">>},
+ {internal_purpose, federation}].
+
+serialise_events() -> false.
+
+route(X = #exchange{arguments = Args},
+ D = #delivery{message = #basic_message{content = Content}}) ->
+ %% This arg was introduced in the same release as this exchange type;
+ %% it must be set
+ {long, MaxHops} = rabbit_misc:table_lookup(Args, ?MAX_HOPS_ARG),
+ %% Will be missing for pre-3.3.0 versions
+ DName = case rabbit_misc:table_lookup(Args, ?DOWNSTREAM_NAME_ARG) of
+ {longstr, Val0} -> Val0;
+ _ -> unknown
+ end,
+ %% Will be missing for pre-3.8.9 versions
+ DVhost = case rabbit_misc:table_lookup(Args, ?DOWNSTREAM_VHOST_ARG) of
+ {longstr, Val1} -> Val1;
+ _ -> unknown
+ end,
+ Headers = rabbit_basic:extract_headers(Content),
+ case rabbit_federation_util:should_forward(Headers, MaxHops, DName, DVhost) of
+ true -> rabbit_exchange_type_fanout:route(X, D);
+ false -> []
+ end.
+
+validate(#exchange{arguments = Args}) ->
+ rabbit_federation_util:validate_arg(?MAX_HOPS_ARG, long, Args).
+
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+
+assert_args_equivalence(X = #exchange{name = Name,
+ arguments = Args}, ReqArgs) ->
+ rabbit_misc:assert_args_equivalence(Args, ReqArgs, Name, [?MAX_HOPS_ARG]),
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbitmq_federation/src/rabbit_federation_util.erl b/deps/rabbitmq_federation/src/rabbit_federation_util.erl
new file mode 100644
index 0000000000..160bac996e
--- /dev/null
+++ b/deps/rabbitmq_federation/src/rabbit_federation_util.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_util).
+
+-include_lib("rabbit/include/amqqueue.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-export([should_forward/4, find_upstreams/2, already_seen/3]).
+-export([validate_arg/3, fail/2, name/1, vhost/1, r/1, pgname/1]).
+-export([obfuscate_upstream/1, deobfuscate_upstream/1, obfuscate_upstream_params/1, deobfuscate_upstream_params/1]).
+
+-import(rabbit_misc, [pget_or_die/2, pget/3]).
+
+%%----------------------------------------------------------------------------
+
+should_forward(undefined, _MaxHops, _DName, _DVhost) ->
+ true;
+should_forward(Headers, MaxHops, DName, DVhost) ->
+ case rabbit_misc:table_lookup(Headers, ?ROUTING_HEADER) of
+ {array, A} -> length(A) < MaxHops andalso not already_seen(DName, DVhost, A);
+ _ -> true
+ end.
+
+%% Used to detect message and binding forwarding cycles.
+already_seen(UpstreamID, UpstreamVhost, Array) ->
+ lists:any(fun ({table, T}) ->
+ {longstr, UpstreamID} =:= rabbit_misc:table_lookup(T, <<"cluster-name">>) andalso
+ {longstr, UpstreamVhost} =:= rabbit_misc:table_lookup(T, <<"vhost">>);
+ (_) ->
+ false
+ end, Array).
+
+find_upstreams(Name, Upstreams) ->
+ [U || U = #upstream{name = Name2} <- Upstreams,
+ Name =:= Name2].
+
+validate_arg(Name, Type, Args) ->
+ case rabbit_misc:table_lookup(Args, Name) of
+ {Type, _} -> ok;
+ undefined -> fail("Argument ~s missing", [Name]);
+ _ -> fail("Argument ~s must be of type ~s", [Name, Type])
+ end.
+
+-spec fail(io:format(), [term()]) -> no_return().
+
+fail(Fmt, Args) -> rabbit_misc:protocol_error(precondition_failed, Fmt, Args).
+
+name( #resource{name = XorQName}) -> XorQName;
+name(#exchange{name = #resource{name = XName}}) -> XName;
+name(Q) when ?is_amqqueue(Q) -> #resource{name = QName} = amqqueue:get_name(Q), QName.
+
+vhost( #resource{virtual_host = VHost}) -> VHost;
+vhost(#exchange{name = #resource{virtual_host = VHost}}) -> VHost;
+vhost(Q) when ?is_amqqueue(Q) -> #resource{virtual_host = VHost} = amqqueue:get_name(Q), VHost;
+vhost(#amqp_params_direct{virtual_host = VHost}) -> VHost;
+vhost(#amqp_params_network{virtual_host = VHost}) -> VHost.
+
+r(#exchange{name = XName}) -> XName;
+r(Q) when ?is_amqqueue(Q) -> amqqueue:get_name(Q).
+
+pgname(Name) ->
+ case application:get_env(rabbitmq_federation, pgroup_name_cluster_id) of
+ {ok, false} -> Name;
+ {ok, true} -> {rabbit_nodes:cluster_name(), Name};
+ %% default value is 'false', so do the same thing
+ {ok, undefined} -> Name;
+ _ -> Name
+ end.
+
+obfuscate_upstream(#upstream{uris = Uris} = Upstream) ->
+ Upstream#upstream{uris = [credentials_obfuscation:encrypt(Uri) || Uri <- Uris]}.
+
+obfuscate_upstream_params(#upstream_params{uri = Uri, params = #amqp_params_network{password = Password} = Params} = UParams) ->
+ UParams#upstream_params{
+ uri = credentials_obfuscation:encrypt(Uri),
+ params = Params#amqp_params_network{password = credentials_obfuscation:encrypt(rabbit_data_coercion:to_binary(Password))}
+ };
+obfuscate_upstream_params(#upstream_params{uri = Uri, params = #amqp_params_direct{password = Password} = Params} = UParams) ->
+ UParams#upstream_params{
+ uri = credentials_obfuscation:encrypt(Uri),
+ params = Params#amqp_params_direct{password = credentials_obfuscation:encrypt(rabbit_data_coercion:to_binary(Password))}
+ }.
+
+deobfuscate_upstream(#upstream{uris = EncryptedUris} = Upstream) ->
+ Upstream#upstream{uris = [credentials_obfuscation:decrypt(EncryptedUri) || EncryptedUri <- EncryptedUris]}.
+
+deobfuscate_upstream_params(#upstream_params{uri = EncryptedUri, params = #amqp_params_network{password = EncryptedPassword} = Params} = UParams) ->
+ UParams#upstream_params{
+ uri = credentials_obfuscation:decrypt(EncryptedUri),
+ params = Params#amqp_params_network{password = credentials_obfuscation:decrypt(EncryptedPassword)}
+ };
+deobfuscate_upstream_params(#upstream_params{uri = EncryptedUri, params = #amqp_params_direct{password = EncryptedPassword} = Params} = UParams) ->
+ UParams#upstream_params{
+ uri = credentials_obfuscation:decrypt(EncryptedUri),
+ params = Params#amqp_params_direct{password = credentials_obfuscation:decrypt(EncryptedPassword)}
+ }.
diff --git a/deps/rabbitmq_federation/test/exchange_SUITE.erl b/deps/rabbitmq_federation/test/exchange_SUITE.erl
new file mode 100644
index 0000000000..a0cd51c7c9
--- /dev/null
+++ b/deps/rabbitmq_federation/test/exchange_SUITE.erl
@@ -0,0 +1,1319 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(exchange_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-include("rabbit_federation.hrl").
+
+-compile(export_all).
+
+-import(rabbit_federation_test_util,
+ [wait_for_federation/2, expect/3, expect/4, expect_empty/2,
+ set_upstream/4, set_upstream/5, set_upstream_in_vhost/5, set_upstream_in_vhost/6,
+ clear_upstream/3, set_upstream_set/4,
+ set_policy/5, set_policy_pattern/5, clear_policy/3,
+ set_policy_upstream/5, set_policy_upstreams/4,
+ all_federation_links/2, federation_links_in_vhost/3, status_fields/2]).
+
+-import(rabbit_ct_broker_helpers,
+ [set_policy_in_vhost/7]).
+
+all() ->
+ [
+ {group, without_automatic_setup},
+ {group, without_disambiguate},
+ {group, with_disambiguate}
+ ].
+
+groups() ->
+ [
+ {without_automatic_setup, [], [
+ message_cycle_detection_case2
+ ]},
+ {without_disambiguate, [], [
+ {cluster_size_1, [], [
+ simple,
+ multiple_upstreams,
+ multiple_upstreams_pattern,
+ multiple_uris,
+ multiple_downstreams,
+ e2e,
+ unbind_on_delete,
+ unbind_on_unbind,
+ unbind_gets_transmitted,
+ no_loop,
+ dynamic_reconfiguration,
+ dynamic_reconfiguration_integrity,
+ federate_unfederate,
+ dynamic_plugin_stop_start,
+ dynamic_plugin_cleanup_stop_start,
+ dynamic_policy_cleanup,
+ delete_federated_exchange_upstream,
+ delete_federated_queue_upstream
+ ]}
+ ]},
+ {with_disambiguate, [], [
+ {cluster_size_2, [], [
+ user_id,
+ message_cycle_detection_case1,
+ restart_upstream
+ ]},
+ {cluster_size_3, [], [
+ max_hops,
+ binding_propagation
+ ]},
+
+ {without_plugins, [], [
+ {cluster_size_2, [], [
+ upstream_has_no_federation
+ ]}
+ ]}
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(without_automatic_setup, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {rmq_nodes_clustered, false},
+ {rmq_nodes_count, 1}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(without_disambiguate, Config) ->
+ rabbit_ct_helpers:set_config(Config,
+ {disambiguate_step, []});
+init_per_group(with_disambiguate, Config) ->
+ rabbit_ct_helpers:set_config(Config,
+ {disambiguate_step, [fun rabbit_federation_test_util:disambiguate/1]});
+init_per_group(without_plugins, Config) ->
+ rabbit_ct_helpers:set_config(Config,
+ {broker_with_plugins, [true, false]});
+init_per_group(cluster_size_1 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 1}
+ ]),
+ init_per_group1(Group, Config1);
+init_per_group(cluster_size_2 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]),
+ init_per_group1(Group, Config1);
+init_per_group(cluster_size_3 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]),
+ init_per_group1(Group, Config1).
+
+init_per_group1(Group, Config) ->
+ SetupFederation = case Group of
+ cluster_size_1 -> [fun rabbit_federation_test_util:setup_federation/1];
+ cluster_size_2 -> [];
+ cluster_size_3 -> []
+ end,
+ Disambiguate = ?config(disambiguate_step, Config),
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {rmq_nodes_clustered, false}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ SetupFederation ++ Disambiguate).
+
+end_per_group(without_disambiguate, Config) ->
+ Config;
+end_per_group(with_disambiguate, Config) ->
+ Config;
+end_per_group(without_plugins, Config) ->
+ Config;
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()
+ ).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO">>)
+ end, upstream_downstream()).
+
+multiple_upstreams(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q = bind_queue(Ch, <<"fed12.downstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream2">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>),
+ publish_expect(Ch, <<"upstream2">>, <<"key">>, Q, <<"HELLO2">>)
+ end, [x(<<"upstream">>),
+ x(<<"upstream2">>),
+ x(<<"fed12.downstream">>)]).
+
+multiple_upstreams_pattern(Config) ->
+ set_upstream(Config, 0, <<"local453x">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 0), [
+ {<<"exchange">>, <<"upstream">>},
+ {<<"queue">>, <<"upstream">>}]),
+
+ set_upstream(Config, 0, <<"local3214x">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 0), [
+ {<<"exchange">>, <<"upstream2">>},
+ {<<"queue">>, <<"upstream2">>}]),
+
+ set_policy_pattern(Config, 0, <<"pattern">>, <<"^pattern\.">>, <<"local\\d+x">>),
+
+ with_ch(Config,
+ fun (Ch) ->
+ Q = bind_queue(Ch, <<"pattern.downstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream2">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>),
+ publish_expect(Ch, <<"upstream2">>, <<"key">>, Q, <<"HELLO2">>)
+ end, [x(<<"upstream">>),
+ x(<<"upstream2">>),
+ x(<<"pattern.downstream">>)]),
+
+ clear_upstream(Config, 0, <<"local453x">>),
+ clear_upstream(Config, 0, <<"local3214x">>),
+ clear_policy(Config, 0, <<"pattern">>).
+
+multiple_uris(Config) ->
+ %% We can't use a direct connection for Kill() to work.
+ URIs = [
+ rabbit_ct_broker_helpers:node_uri(Config, 0),
+ rabbit_ct_broker_helpers:node_uri(Config, 0, [use_ipaddr])
+ ],
+ set_upstream(Config, 0, <<"localhost">>, URIs),
+ WithCh = fun(F) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ F(Ch),
+ rabbit_ct_client_helpers:close_channels_and_connection(
+ Config, 0)
+ end,
+ WithCh(fun (Ch) -> declare_all(Ch, upstream_downstream()) end),
+ expect_uris(Config, 0, URIs),
+ WithCh(fun (Ch) -> delete_all(Ch, upstream_downstream()) end),
+ %% Put back how it was
+ rabbit_federation_test_util:setup_federation(Config),
+ ok.
+
+expect_uris(_, _, []) ->
+ ok;
+expect_uris(Config, Node, URIs) ->
+ [Link] = rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_federation_status, status, []),
+ URI = rabbit_misc:pget(uri, Link),
+ kill_only_connection(Config, Node),
+ expect_uris(Config, Node, URIs -- [URI]).
+
+kill_only_connection(Config, Node) ->
+ case connection_pids(Config, Node) of
+ [Pid] -> catch rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_networking, close_connection, [Pid, "boom"]), %% [1]
+ wait_for_pid_to_die(Config, Node, Pid);
+ _ -> timer:sleep(100),
+ kill_only_connection(Config, Node)
+ end.
+
+%% [1] the catch is because we could still see a connection from a
+%% previous time round. If so that's fine (we'll just loop around
+%% again) but we don't want the test to fail because a connection
+%% closed as we were trying to close it.
+
+wait_for_pid_to_die(Config, Node, Pid) ->
+ case connection_pids(Config, Node) of
+ [Pid] -> timer:sleep(100),
+ wait_for_pid_to_die(Config, Node, Pid);
+ _ -> ok
+ end.
+
+
+multiple_downstreams(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ Q12 = bind_queue(Ch, <<"fed12.downstream2">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>, 2),
+ await_binding(Config, 0, <<"upstream2">>, <<"key">>),
+ publish(Ch, <<"upstream">>, <<"key">>, <<"HELLO1">>),
+ publish(Ch, <<"upstream2">>, <<"key">>, <<"HELLO2">>),
+ expect(Ch, Q1, [<<"HELLO1">>]),
+ expect(Ch, Q12, [<<"HELLO1">>, <<"HELLO2">>])
+ end, upstream_downstream() ++
+ [x(<<"upstream2">>),
+ x(<<"fed12.downstream2">>)]).
+
+e2e(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ bind_exchange(Ch, <<"downstream2">>, <<"fed.downstream">>,
+ <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>),
+ Q = bind_queue(Ch, <<"downstream2">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>)
+ end, upstream_downstream() ++ [x(<<"downstream2">>)]).
+
+unbind_on_delete(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>),
+ delete_queue(Ch, Q2),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>)
+ end, upstream_downstream()).
+
+unbind_on_unbind(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key">>),
+ unbind_queue(Ch, Q2, <<"fed.downstream">>, <<"key">>),
+ publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>),
+ delete_queue(Ch, Q2)
+ end, upstream_downstream()).
+
+user_id(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_policy_upstream(Config, Rabbit, <<"^test$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 1), []),
+ Perm = fun (F, A) ->
+ ok = rpc:call(Hare,
+ rabbit_auth_backend_internal, F, A)
+ end,
+ Perm(add_user, [<<"hare-user">>, <<"hare-user">>, <<"acting-user">>]),
+ Perm(set_permissions, [<<"hare-user">>,
+ <<"/">>, <<".*">>, <<".*">>, <<".*">>,
+ <<"acting-user">>]),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ {ok, Conn2} = amqp_connection:start(
+ #amqp_params_network{
+ username = <<"hare-user">>,
+ password = <<"hare-user">>,
+ port = rabbit_ct_broker_helpers:get_node_config(Config, Hare,
+ tcp_port_amqp)}),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ declare_exchange(Ch2, x(<<"test">>)),
+ declare_exchange(Ch, x(<<"test">>)),
+ Q = bind_queue(Ch, <<"test">>, <<"key">>),
+ await_binding(Config, Hare, <<"test">>, <<"key">>),
+
+ Msg = #amqp_msg{props = #'P_basic'{user_id = <<"hare-user">>},
+ payload = <<"HELLO">>},
+
+ SafeUri = fun (H) ->
+ {array, [{table, Recv}]} =
+ rabbit_misc:table_lookup(
+ H, <<"x-received-from">>),
+ URI = rabbit_ct_broker_helpers:node_uri(Config, 1),
+ {longstr, URI} =
+ rabbit_misc:table_lookup(Recv, <<"uri">>)
+ end,
+ ExpectUser =
+ fun (ExpUser) ->
+ fun () ->
+ receive
+ {#'basic.deliver'{},
+ #amqp_msg{props = Props,
+ payload = Payload}} ->
+ #'P_basic'{user_id = ActUser,
+ headers = Headers} = Props,
+ SafeUri(Headers),
+ <<"HELLO">> = Payload,
+ ExpUser = ActUser
+ end
+ end
+ end,
+
+ wait_for_federation(
+ 90,
+ fun() ->
+ VHost = <<"/">>,
+ X1s = rabbit_ct_broker_helpers:rpc(
+ Config, Rabbit, rabbit_exchange, list, [VHost]),
+ L1 =
+ [X || X <- X1s,
+ X#exchange.name =:= #resource{virtual_host = VHost,
+ kind = exchange,
+ name = <<"test">>},
+ X#exchange.scratches =:= [{federation,
+ [{{<<"upstream-2">>,
+ <<"test">>},
+ <<"B">>}]}]],
+ X2s = rabbit_ct_broker_helpers:rpc(
+ Config, Hare, rabbit_exchange, list, [VHost]),
+ L2 =
+ [X || X <- X2s,
+ X#exchange.type =:= 'x-federation-upstream'],
+ [] =/= L1 andalso [] =/= L2 andalso
+ has_internal_federated_queue(Config, Hare, VHost)
+ end),
+ publish(Ch2, <<"test">>, <<"key">>, Msg),
+ expect(Ch, Q, ExpectUser(undefined)),
+
+ set_policy_upstream(Config, Rabbit, <<"^test$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 1),
+ [{<<"trust-user-id">>, true}]),
+ wait_for_federation(
+ 90,
+ fun() ->
+ VHost = <<"/">>,
+ X1s = rabbit_ct_broker_helpers:rpc(
+ Config, Rabbit, rabbit_exchange, list, [VHost]),
+ L1 =
+ [X || X <- X1s,
+ X#exchange.name =:= #resource{virtual_host = VHost,
+ kind = exchange,
+ name = <<"test">>},
+ X#exchange.scratches =:= [{federation,
+ [{{<<"upstream-2">>,
+ <<"test">>},
+ <<"A">>}]}]],
+ X2s = rabbit_ct_broker_helpers:rpc(
+ Config, Hare, rabbit_exchange, list, [VHost]),
+ L2 =
+ [X || X <- X2s,
+ X#exchange.type =:= 'x-federation-upstream'],
+ [] =/= L1 andalso [] =/= L2 andalso
+ has_internal_federated_queue(Config, Hare, VHost)
+ end),
+ publish(Ch2, <<"test">>, <<"key">>, Msg),
+ expect(Ch, Q, ExpectUser(<<"hare-user">>)),
+
+ amqp_channel:close(Ch2),
+ amqp_connection:close(Conn2),
+
+ ok.
+
+%% In order to test that unbinds get sent we deliberately set up a
+%% broken config - with topic upstream and fanout downstream. You
+%% shouldn't really do this, but it lets us see "extra" messages that
+%% get sent.
+unbind_gets_transmitted(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q11 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
+ Q12 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
+ Q21 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
+ Q22 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key1">>),
+ await_binding(Config, 0, <<"upstream">>, <<"key2">>),
+ [delete_queue(Ch, Q) || Q <- [Q12, Q21, Q22]],
+ publish(Ch, <<"upstream">>, <<"key1">>, <<"YES">>),
+ publish(Ch, <<"upstream">>, <<"key2">>, <<"NO">>),
+ expect(Ch, Q11, [<<"YES">>]),
+ expect_empty(Ch, Q11)
+ end, [x(<<"upstream">>),
+ x(<<"fed.downstream">>)]).
+
+no_loop(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Q1 = bind_queue(Ch, <<"one">>, <<"key">>),
+ Q2 = bind_queue(Ch, <<"two">>, <<"key">>),
+ await_binding(Config, 0, <<"one">>, <<"key">>, 2),
+ await_binding(Config, 0, <<"two">>, <<"key">>, 2),
+ publish(Ch, <<"one">>, <<"key">>, <<"Hello from one">>),
+ publish(Ch, <<"two">>, <<"key">>, <<"Hello from two">>),
+ expect(Ch, Q1, [<<"Hello from one">>, <<"Hello from two">>]),
+ expect(Ch, Q2, [<<"Hello from one">>, <<"Hello from two">>]),
+ expect_empty(Ch, Q1),
+ expect_empty(Ch, Q2)
+ end, [x(<<"one">>),
+ x(<<"two">>)]).
+
+suffix(Config, Node, Name, XName) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_federation_db, get_active_suffix,
+ [xr(<<"fed.downstream">>),
+ #upstream{name = Name,
+ exchange_name = list_to_binary(XName)}, none]).
+
+restart_upstream(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Downstream = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ Upstream = rabbit_ct_client_helpers:open_channel(Config, Hare),
+
+ rabbit_federation_test_util:set_upstream(Config,
+ Rabbit, <<"hare">>, rabbit_ct_broker_helpers:node_uri(Config, 1)),
+ rabbit_federation_test_util:set_upstream_set(Config,
+ Rabbit, <<"upstream">>,
+ [{<<"hare">>, [{<<"exchange">>, <<"upstream">>}]}]),
+ rabbit_federation_test_util:set_policy(Config,
+ Rabbit, <<"hare">>, <<"^hare\\.">>, <<"upstream">>),
+
+ declare_exchange(Upstream, x(<<"upstream">>)),
+ declare_exchange(Downstream, x(<<"hare.downstream">>)),
+
+ Qstays = bind_queue(Downstream, <<"hare.downstream">>, <<"stays">>),
+ Qgoes = bind_queue(Downstream, <<"hare.downstream">>, <<"goes">>),
+
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Hare),
+ rabbit_ct_broker_helpers:stop_node(Config, Hare),
+
+ Qcomes = bind_queue(Downstream, <<"hare.downstream">>, <<"comes">>),
+ unbind_queue(Downstream, Qgoes, <<"hare.downstream">>, <<"goes">>),
+
+ rabbit_ct_broker_helpers:start_node(Config, Hare),
+ Upstream1 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+
+ %% Wait for the link to come up and for these bindings
+ %% to be transferred
+ await_binding(Config, Hare, <<"upstream">>, <<"comes">>, 1),
+ await_binding_absent(Config, Hare, <<"upstream">>, <<"goes">>),
+ await_binding(Config, Hare, <<"upstream">>, <<"stays">>, 1),
+
+ publish(Upstream1, <<"upstream">>, <<"goes">>, <<"GOES">>),
+ publish(Upstream1, <<"upstream">>, <<"stays">>, <<"STAYS">>),
+ publish(Upstream1, <<"upstream">>, <<"comes">>, <<"COMES">>),
+
+ expect(Downstream, Qstays, [<<"STAYS">>]),
+ expect(Downstream, Qcomes, [<<"COMES">>]),
+ expect_empty(Downstream, Qgoes),
+
+ delete_exchange(Downstream, <<"hare.downstream">>),
+ delete_exchange(Upstream1, <<"upstream">>),
+
+ rabbit_federation_test_util:clear_policy(Config,
+ Rabbit, <<"hare">>),
+ rabbit_federation_test_util:clear_upstream_set(Config,
+ Rabbit, <<"upstream">>),
+ rabbit_federation_test_util:clear_upstream(Config,
+ Rabbit, <<"hare">>),
+ ok.
+
+%% flopsy, mopsy and cottontail, connected in a ring with max_hops = 2
+%% for each connection. We should not see any duplicates.
+
+max_hops(Config) ->
+ [Flopsy, Mopsy, Cottontail] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ [set_policy_upstream(Config, Downstream,
+ <<"^ring$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, Upstream),
+ [{<<"max-hops">>, 2}])
+ || {Downstream, Upstream} <- [{Flopsy, Cottontail},
+ {Mopsy, Flopsy},
+ {Cottontail, Mopsy}]],
+
+ FlopsyCh = rabbit_ct_client_helpers:open_channel(Config, Flopsy),
+ MopsyCh = rabbit_ct_client_helpers:open_channel(Config, Mopsy),
+ CottontailCh = rabbit_ct_client_helpers:open_channel(Config, Cottontail),
+
+ declare_exchange(FlopsyCh, x(<<"ring">>)),
+ declare_exchange(MopsyCh, x(<<"ring">>)),
+ declare_exchange(CottontailCh, x(<<"ring">>)),
+
+ Q1 = bind_queue(FlopsyCh, <<"ring">>, <<"key">>),
+ Q2 = bind_queue(MopsyCh, <<"ring">>, <<"key">>),
+ Q3 = bind_queue(CottontailCh, <<"ring">>, <<"key">>),
+
+ await_binding(Config, Flopsy, <<"ring">>, <<"key">>, 3),
+ await_binding(Config, Mopsy, <<"ring">>, <<"key">>, 3),
+ await_binding(Config, Cottontail, <<"ring">>, <<"key">>, 3),
+
+ publish(FlopsyCh, <<"ring">>, <<"key">>, <<"HELLO flopsy">>),
+ publish(MopsyCh, <<"ring">>, <<"key">>, <<"HELLO mopsy">>),
+ publish(CottontailCh, <<"ring">>, <<"key">>, <<"HELLO cottontail">>),
+
+ Msgs = [<<"HELLO flopsy">>, <<"HELLO mopsy">>, <<"HELLO cottontail">>],
+ expect(FlopsyCh, Q1, Msgs),
+ expect(MopsyCh, Q2, Msgs),
+ expect(CottontailCh, Q3, Msgs),
+ expect_empty(FlopsyCh, Q1),
+ expect_empty(MopsyCh, Q2),
+ expect_empty(CottontailCh, Q3),
+ ok.
+
+%% Two nodes, federated two way with the same virtual hosts, and max_hops set to a
+%% high value.
+message_cycle_detection_case1(Config) ->
+ [Cycle1, Cycle2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [set_policy_upstream(Config, Downstream,
+ <<"^cycle$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, Upstream),
+ [{<<"max-hops">>, 10}])
+ || {Downstream, Upstream} <- [{Cycle1, Cycle2}, {Cycle2, Cycle1}]],
+
+ Cycle1Ch = rabbit_ct_client_helpers:open_channel(Config, Cycle1),
+ Cycle2Ch = rabbit_ct_client_helpers:open_channel(Config, Cycle2),
+
+ declare_exchange(Cycle1Ch, x(<<"cycle">>)),
+ declare_exchange(Cycle2Ch, x(<<"cycle">>)),
+
+ Q1 = bind_queue(Cycle1Ch, <<"cycle">>, <<"cycle_detection-key">>),
+ Q2 = bind_queue(Cycle2Ch, <<"cycle">>, <<"cycle_detection-key">>),
+
+ %% "key" present twice because once for the local queue and once
+ %% for federation in each case
+ await_binding(Config, Cycle1, <<"cycle">>, <<"cycle_detection-key">>, 2),
+ await_binding(Config, Cycle2, <<"cycle">>, <<"cycle_detection-key">>, 2),
+
+ publish(Cycle1Ch, <<"cycle">>, <<"cycle_detection-key">>, <<"HELLO1">>),
+ publish(Cycle2Ch, <<"cycle">>, <<"cycle_detection-key">>, <<"HELLO2">>),
+
+ Msgs = [<<"HELLO1">>, <<"HELLO2">>],
+ expect(Cycle1Ch, Q1, Msgs),
+ expect(Cycle2Ch, Q2, Msgs),
+ expect_empty(Cycle1Ch, Q1),
+ expect_empty(Cycle2Ch, Q2),
+
+ ok.
+
+node_uri_with_virtual_host(Config, Vhost) ->
+ node_uri_with_virtual_host(Config, 0, Vhost).
+
+node_uri_with_virtual_host(Config, Node, Vhost) ->
+ NodeURI = rabbit_ct_broker_helpers:node_uri(Config, Node),
+ <<NodeURI/binary, "/", Vhost/binary>>.
+
+upstream_policy_defs(Upstream) ->
+ maps:to_list(#{<<"federation-upstream">> => Upstream}).
+
+%% Exchange federation between three local virtual hosts, A -> B -> C,
+%% propagates messages from A to C with a high enough max-hops value
+message_cycle_detection_case2(Config) ->
+ VH1 = <<"cycles.a">>,
+ VH2 = <<"cycles.b">>,
+ VH3 = <<"cycles.c">>,
+ [begin
+ rabbit_ct_broker_helpers:add_vhost(Config, V),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, V)
+ end || V <- [VH1, VH2, VH3]],
+
+ %% make sure that cycle detection does not drop messages because of a limit on hops
+ UpstreamOpts = [{<<"max-hops">>, 5}],
+ %% VH1 is an upstream for VH2
+ %% VH2 is an upstream for VH3
+ UpstreamA = <<"upstream_a">>,
+ URI1 = node_uri_with_virtual_host(Config, VH1),
+ set_upstream_in_vhost(Config, 0, VH2, UpstreamA, URI1, UpstreamOpts),
+ UpstreamB = <<"upstream_b">>,
+ URI2 = node_uri_with_virtual_host(Config, VH2),
+ set_upstream_in_vhost(Config, 0, VH3, UpstreamB, URI2, UpstreamOpts),
+
+ %% policies
+ set_policy_in_vhost(Config, 0, VH2, <<"federate.x">>, <<"^federated">>, <<"exchanges">>, upstream_policy_defs(UpstreamA)),
+ set_policy_in_vhost(Config, 0, VH3, <<"federate.x">>, <<"^federated">>, <<"exchanges">>, upstream_policy_defs(UpstreamB)),
+
+ %% channels
+ VH1Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH1),
+ VH2Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH2),
+ VH3Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH3),
+
+ {ok, VH1Ch} = amqp_connection:open_channel(VH1Conn),
+ {ok, VH2Ch} = amqp_connection:open_channel(VH2Conn),
+ {ok, VH3Ch} = amqp_connection:open_channel(VH3Conn),
+
+ X = <<"federated.x">>,
+ declare_exchange(VH3Ch, x(X, <<"fanout">>)),
+ declare_exchange(VH2Ch, x(X, <<"fanout">>)),
+ declare_exchange(VH1Ch, x(X, <<"fanout">>)),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ LinksInB = federation_links_in_vhost(Config, 0, VH2),
+ LinksInC = federation_links_in_vhost(Config, 0, VH3),
+ length(LinksInB) =:= 1 andalso
+ length(LinksInC) =:= 1 andalso
+ [running] =:= status_fields(status, LinksInB ++ LinksInC)
+ end),
+
+ Statuses = federation_links_in_vhost(Config, 0, VH2) ++ federation_links_in_vhost(Config, 0, VH3),
+
+ ?assertEqual(lists:usort([URI1, URI2]),
+ status_fields(uri, Statuses)),
+ ?assertEqual(lists:usort([<<"federated.x">>]),
+ status_fields(upstream_exchange, Statuses)),
+ ?assertEqual(lists:usort([VH2, VH3]),
+ status_fields(vhost, Statuses)),
+ ?assertEqual(lists:usort([exchange]),
+ status_fields(type, Statuses)),
+
+ %% give links some time to set up their topology
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ ExchangesInA = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [VH1]),
+ ExchangesInB = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [VH2]),
+ length(ExchangesInA) >= 1 andalso
+ length(ExchangesInB) >= 1
+ end),
+
+ RK = <<"doesn't matter">>,
+ Q = bind_queue(VH3Ch, X, RK),
+ ?assertEqual(ok, await_binding(Config, 0, VH2, X, RK, 1)),
+ ?assertEqual(ok, await_binding(Config, 0, VH3, X, RK, 1)),
+ timer:sleep(2000),
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ ExchangesInA = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list, [VH1]),
+ lists:any(fun(#exchange{name = XName}) ->
+ XName =:= rabbit_misc:r(VH1, exchange, X)
+ end, ExchangesInA)
+ end),
+
+ Payload1 = <<"msg1">>,
+ Payload2 = <<"msg2">>,
+ publish(VH1Ch, X, RK, Payload1),
+ publish(VH1Ch, X, RK, Payload2),
+
+ Msgs = [Payload1, Payload2],
+ %% payloads published to a federated exchange in A reach a queue in C
+ expect(VH3Ch, Q, Msgs, 10000),
+
+ [amqp_connection:close(Conn) || Conn <- [VH1Conn, VH2Conn, VH3Conn]],
+ [rabbit_ct_broker_helpers:delete_vhost(Config, Vhost) || Vhost <- [VH1, VH2, VH3]],
+ ok.
+
+%% Arrows indicate message flow. Numbers indicate max_hops.
+%%
+%% Dylan ---1--> Bugs ---2--> Jessica
+%% |^ |^
+%% |\--------------1---------------/|
+%% \---------------1----------------/
+%%
+%%
+%% We want to demonstrate that if we bind a queue locally at each
+%% broker, (exactly) the following bindings propagate:
+%%
+%% Bugs binds to Dylan
+%% Jessica binds to Bugs, which then propagates on to Dylan
+%% Jessica binds to Dylan directly
+%% Dylan binds to Jessica.
+%%
+%% i.e. Dylan has two bindings from Jessica and one from Bugs
+%% Bugs has one binding from Jessica
+%% Jessica has one binding from Dylan
+%%
+%% So we tag each binding with its original broker and see how far it gets
+%%
+%% Also we check that when we tear down the original bindings
+%% that we get rid of everything again.
+
+binding_propagation(Config) ->
+ [Dylan, Bugs, Jessica] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ set_policy_upstream(Config, Dylan, <<"^x$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, Jessica), []),
+ set_policy_upstream(Config, Bugs, <<"^x$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, Dylan), []),
+ set_policy_upstreams(Config, Jessica, <<"^x$">>, [
+ {rabbit_ct_broker_helpers:node_uri(Config, Dylan), []},
+ {rabbit_ct_broker_helpers:node_uri(Config, Bugs),
+ [{<<"max-hops">>, 2}]}
+ ]),
+ DylanCh = rabbit_ct_client_helpers:open_channel(Config, Dylan),
+ BugsCh = rabbit_ct_client_helpers:open_channel(Config, Bugs),
+ JessicaCh = rabbit_ct_client_helpers:open_channel(Config, Jessica),
+
+ declare_exchange(DylanCh, x(<<"x">>)),
+ declare_exchange(BugsCh, x(<<"x">>)),
+ declare_exchange(JessicaCh, x(<<"x">>)),
+
+ Q1 = bind_queue(DylanCh, <<"x">>, <<"dylan">>),
+ Q2 = bind_queue(BugsCh, <<"x">>, <<"bugs">>),
+ Q3 = bind_queue(JessicaCh, <<"x">>, <<"jessica">>),
+
+ await_binding(Config, Dylan, <<"x">>, <<"jessica">>, 2),
+ await_bindings(Config, Dylan, <<"x">>, [<<"bugs">>, <<"dylan">>]),
+ await_bindings(Config, Bugs, <<"x">>, [<<"jessica">>, <<"bugs">>]),
+ await_bindings(Config, Jessica, <<"x">>, [<<"dylan">>, <<"jessica">>]),
+
+ delete_queue(DylanCh, Q1),
+ delete_queue(BugsCh, Q2),
+ delete_queue(JessicaCh, Q3),
+
+ await_bindings(Config, Dylan, <<"x">>, []),
+ await_bindings(Config, Bugs, <<"x">>, []),
+ await_bindings(Config, Jessica, <<"x">>, []),
+
+ ok.
+
+upstream_has_no_federation(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_policy_upstream(Config, Rabbit, <<"^test$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, Hare), []),
+ Downstream = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ Upstream = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare_exchange(Upstream, x(<<"test">>)),
+ declare_exchange(Downstream, x(<<"test">>)),
+ Q = bind_queue(Downstream, <<"test">>, <<"routing">>),
+ await_binding(Config, Hare, <<"test">>, <<"routing">>),
+ publish(Upstream, <<"test">>, <<"routing">>, <<"HELLO">>),
+ expect(Downstream, Q, [<<"HELLO">>]),
+ ok.
+
+dynamic_reconfiguration(Config) ->
+ with_ch(Config,
+ fun (_Ch) ->
+ Xs = [<<"all.fed1">>, <<"all.fed2">>],
+ %% Left from the conf we set up for previous tests
+ assert_connections(Config, 0, Xs, [<<"localhost">>, <<"local5673">>]),
+
+ %% Test that clearing connections works
+ clear_upstream(Config, 0, <<"localhost">>),
+ clear_upstream(Config, 0, <<"local5673">>),
+ assert_connections(Config, 0, Xs, []),
+
+ %% Test that readding them and changing them works
+ set_upstream(Config, 0,
+ <<"localhost">>, rabbit_ct_broker_helpers:node_uri(Config, 0)),
+ %% Do it twice so we at least hit the no-restart optimisation
+ URI = rabbit_ct_broker_helpers:node_uri(Config, 0, [use_ipaddr]),
+ set_upstream(Config, 0, <<"localhost">>, URI),
+ set_upstream(Config, 0, <<"localhost">>, URI),
+ assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+ %% And re-add the last - for next test
+ rabbit_federation_test_util:setup_federation(Config)
+ end, [x(<<"all.fed1">>), x(<<"all.fed2">>)]).
+
+dynamic_reconfiguration_integrity(Config) ->
+ with_ch(Config,
+ fun (_Ch) ->
+ Xs = [<<"new.fed1">>, <<"new.fed2">>],
+
+ %% Declared exchanges with nonexistent set - no links
+ assert_connections(Config, 0, Xs, []),
+
+ %% Create the set - links appear
+ set_upstream_set(Config, 0, <<"new-set">>, [{<<"localhost">>, []}]),
+ assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+ %% Add nonexistent connections to set - nothing breaks
+ set_upstream_set(Config, 0,
+ <<"new-set">>, [{<<"localhost">>, []},
+ {<<"does-not-exist">>, []}]),
+ assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+ %% Change connection in set - links change
+ set_upstream_set(Config, 0, <<"new-set">>, [{<<"local5673">>, []}]),
+ assert_connections(Config, 0, Xs, [<<"local5673">>])
+ end, [x(<<"new.fed1">>), x(<<"new.fed2">>)]).
+
+delete_federated_exchange_upstream(Config) ->
+ %% If two exchanges in different virtual hosts have the same name, only one should be deleted.
+ VH1 = <<"federation-downstream1">>,
+ rabbit_ct_broker_helpers:delete_vhost(Config, VH1),
+ rabbit_ct_broker_helpers:add_vhost(Config, VH1),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VH1),
+ VH2 = <<"federation-downstream2">>,
+ rabbit_ct_broker_helpers:delete_vhost(Config, VH2),
+ rabbit_ct_broker_helpers:add_vhost(Config, VH2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VH2),
+
+ Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH1),
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH2),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ #'exchange.declare_ok'{} = declare_exchange(Ch1, #'exchange.declare'{exchange = <<"federated.topic">>,
+ type = <<"topic">>,
+ durable = true}),
+ #'exchange.declare_ok'{} = declare_exchange(Ch2, #'exchange.declare'{exchange = <<"federated.topic">>,
+ type = <<"topic">>,
+ durable = true}),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_policy, set,
+ [VH1,
+ <<"federation">>, <<"^federated\.">>,
+ [{<<"federation-upstream-set">>, <<"all">>}],
+ 0, <<"exchanges">>, <<"acting-user">>]),
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_policy, set,
+ [VH2,
+ <<"federation">>, <<"^federated\.">>,
+ [{<<"federation-upstream-set">>, <<"all">>}],
+ 0, <<"exchanges">>, <<"acting-user">>]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0, VH1,
+ <<"federation-upstream">>, <<"upstream">>,
+ [{<<"uri">>, node_uri_with_virtual_host(Config, VH2)}]),
+ rabbit_ct_broker_helpers:set_parameter(Config, 0, VH2,
+ <<"federation-upstream">>, <<"upstream">>,
+ [{<<"uri">>, node_uri_with_virtual_host(Config, VH1)}]),
+
+ ?assertEqual(1, length(federation_links_in_vhost(Config, 0, VH1))),
+ ?assertEqual(1, length(federation_links_in_vhost(Config, 0, VH2))),
+
+ rabbit_ct_broker_helpers:clear_parameter(Config, 0, VH2,
+ <<"federation-upstream">>, <<"upstream">>),
+
+ %% one link is still around
+ ?assertEqual(1, length(federation_links_in_vhost(Config, 0, VH1))),
+ ?assertEqual(0, length(federation_links_in_vhost(Config, 0, VH2))),
+ LinksInVH1 = federation_links_in_vhost(Config, 0, VH1),
+ ?assertEqual(VH1, proplists:get_value(vhost, hd(LinksInVH1))),
+
+ [rabbit_ct_broker_helpers:delete_vhost(Config, Val) || Val <- [VH1, VH2]].
+
+delete_federated_queue_upstream(Config) ->
+ %% If two queues in different virtual hosts have the same name, only one should be deleted.
+ VH1 = <<"federation-downstream1">>,
+ rabbit_ct_broker_helpers:delete_vhost(Config, VH1),
+ rabbit_ct_broker_helpers:add_vhost(Config, VH1),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VH1),
+ VH2 = <<"federation-downstream2">>,
+ rabbit_ct_broker_helpers:delete_vhost(Config, VH2),
+ rabbit_ct_broker_helpers:add_vhost(Config, VH2),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VH2),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_policy, set,
+ [VH1,
+ <<"federation">>, <<"^federated\.">>,
+ [{<<"federation-upstream-set">>, <<"all">>}],
+ 0, <<"queues">>, <<"acting-user">>]),
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_policy, set,
+ [VH2,
+ <<"federation">>, <<"^federated\.">>,
+ [{<<"federation-upstream-set">>, <<"all">>}],
+ 0, <<"queues">>, <<"acting-user">>]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0, VH1,
+ <<"federation-upstream">>, <<"upstream">>,
+ [{<<"uri">>, node_uri_with_virtual_host(Config, VH2)}]),
+ rabbit_ct_broker_helpers:set_parameter(Config, 0, VH2,
+ <<"federation-upstream">>, <<"upstream">>,
+ [{<<"uri">>, node_uri_with_virtual_host(Config, VH1)}]),
+
+ Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH1),
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0, VH2),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ #'queue.declare_ok'{} = declare_queue(Ch1,
+ #'queue.declare'{queue = <<"federated.queue">>,
+ durable = true}),
+ #'queue.declare_ok'{} = declare_queue(Ch2,
+ #'queue.declare'{queue = <<"federated.queue">>,
+ durable = true}),
+
+
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ length(federation_links_in_vhost(Config, 0, VH1)) > 0 andalso
+ length(federation_links_in_vhost(Config, 0, VH2)) > 0
+ end),
+
+ ?assertEqual(1, length(federation_links_in_vhost(Config, 0, VH1))),
+ ?assertEqual(1, length(federation_links_in_vhost(Config, 0, VH2))),
+
+ rabbit_ct_broker_helpers:clear_parameter(Config, 0, VH2,
+ <<"federation-upstream">>, <<"upstream">>),
+
+ ?assertEqual(1, length(federation_links_in_vhost(Config, 0, VH1))),
+ ?assertEqual(0, length(federation_links_in_vhost(Config, 0, VH2))),
+ LinksInVH1 = federation_links_in_vhost(Config, 0, VH1),
+ ?assertEqual(VH1, proplists:get_value(vhost, hd(LinksInVH1))),
+
+ [rabbit_ct_broker_helpers:delete_vhost(Config, Val) || Val <- [VH1, VH2]].
+
+federate_unfederate(Config) ->
+ with_ch(Config,
+ fun (_Ch) ->
+ Xs = [<<"dyn.exch1">>, <<"dyn.exch2">>],
+
+ %% Declareda non-federated exchanges - no links
+ assert_connections(Config, 0, Xs, []),
+
+ %% Federate them - links appear
+ set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"all">>),
+ assert_connections(Config, 0, Xs, [<<"localhost">>, <<"local5673">>]),
+
+ %% Change policy - links change
+ set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+ assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+ %% Unfederate them - links disappear
+ clear_policy(Config, 0, <<"dyn">>),
+ assert_connections(Config, 0, Xs, [])
+ end, [x(<<"dyn.exch1">>), x(<<"dyn.exch2">>)]).
+
+dynamic_plugin_stop_start(Config) ->
+ X1 = <<"dyn.exch1">>,
+ X2 = <<"dyn.exch2">>,
+ with_ch(Config,
+ fun (Ch) ->
+ set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+
+ %% Declare federated exchange - get link
+ assert_connections(Config, 0, [X1], [<<"localhost">>]),
+
+ %% Disable plugin, link goes
+ ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+ "rabbitmq_federation"),
+ %% We can't check with status for obvious reasons...
+ undefined = rabbit_ct_broker_helpers:rpc(Config, 0,
+ erlang, whereis, [rabbit_federation_sup]),
+ {error, not_found} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_registry, lookup_module,
+ [exchange, 'x-federation-upstream']),
+
+ %% Create exchange then re-enable plugin, links appear
+ declare_exchange(Ch, x(X2)),
+ ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+ "rabbitmq_federation"),
+ assert_connections(Config, 0, [X1, X2], [<<"localhost">>]),
+ {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_registry, lookup_module,
+ [exchange, 'x-federation-upstream']),
+
+ wait_for_federation(
+ 90,
+ fun() ->
+ VHost = <<"/">>,
+ Xs = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_exchange, list, [VHost]),
+ L1 =
+ [X || X <- Xs,
+ X#exchange.type =:= 'x-federation-upstream'],
+ L2 =
+ [X || X <- Xs,
+ X#exchange.name =:= #resource{
+ virtual_host = VHost,
+ kind = exchange,
+ name = X1},
+ X#exchange.scratches =:= [{federation,
+ [{{<<"localhost">>,
+ X1},
+ <<"A">>}]}]],
+ L3 =
+ [X || X <- Xs,
+ X#exchange.name =:= #resource{
+ virtual_host = VHost,
+ kind = exchange,
+ name = X2},
+ X#exchange.scratches =:= [{federation,
+ [{{<<"localhost">>,
+ X2},
+ <<"B">>}]}]],
+ length(L1) =:= 2 andalso [] =/= L2 andalso [] =/= L3 andalso
+ has_internal_federated_queue(Config, 0, VHost)
+ end),
+
+ %% Test both exchanges work. They are just federated to
+ %% themselves so should duplicate messages.
+ [begin
+ Q = bind_queue(Ch, X, <<"key">>),
+ await_binding(Config, 0, X, <<"key">>, 2),
+ publish(Ch, X, <<"key">>, <<"HELLO">>),
+ expect(Ch, Q, [<<"HELLO">>, <<"HELLO">>]),
+ delete_queue(Ch, Q)
+ end || X <- [X1, X2]],
+
+ clear_policy(Config, 0, <<"dyn">>),
+ assert_connections(Config, 0, [X1, X2], []),
+ delete_exchange(Ch, X2)
+ end, [x(X1)]).
+
+dynamic_plugin_cleanup_stop_start(Config) ->
+ X1 = <<"dyn.exch1">>,
+ with_ch(Config,
+ fun (_Ch) ->
+ set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+
+ %% Declare a federated exchange, a link starts
+ assert_connections(Config, 0, [X1], [<<"localhost">>]),
+ wait_for_federation(
+ 90,
+ fun() ->
+ VHost = <<"/">>,
+ Xs = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_exchange, list, [VHost]),
+ L1 =
+ [X || X <- Xs,
+ X#exchange.type =:= 'x-federation-upstream'],
+ L2 =
+ [X || X <- Xs,
+ X#exchange.name =:= #resource{
+ virtual_host = VHost,
+ kind = exchange,
+ name = X1},
+ X#exchange.scratches =:= [{federation,
+ [{{<<"localhost">>,
+ X1},
+ <<"B">>}]}]],
+ [] =/= L1 andalso [] =/= L2 andalso
+ has_internal_federated_queue(Config, 0, VHost)
+ end),
+
+ ?assert(has_internal_federated_exchange(Config, 0, <<"/">>)),
+ ?assert(has_internal_federated_queue(Config, 0, <<"/">>)),
+
+ %% Disable plugin, link goes
+ ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+ "rabbitmq_federation"),
+
+ %% Internal exchanges and queues need cleanup
+ ?assert(not has_internal_federated_exchange(Config, 0, <<"/">>)),
+ ?assert(not has_internal_federated_queue(Config, 0, <<"/">>)),
+
+ ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+ "rabbitmq_federation"),
+ clear_policy(Config, 0, <<"dyn">>),
+ assert_connections(Config, 0, [X1], [])
+ end, [x(X1)]).
+
+dynamic_policy_cleanup(Config) ->
+ X1 = <<"dyn.exch1">>,
+ with_ch(Config,
+ fun (_Ch) ->
+ set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+
+ %% Declare federated exchange - get link
+ assert_connections(Config, 0, [X1], [<<"localhost">>]),
+ wait_for_federation(
+ 90,
+ fun() ->
+ VHost = <<"/">>,
+ Xs = rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_exchange, list, [VHost]),
+ L1 =
+ [X || X <- Xs,
+ X#exchange.type =:= 'x-federation-upstream'],
+ L2 =
+ [X || X <- Xs,
+ X#exchange.name =:= #resource{
+ virtual_host = VHost,
+ kind = exchange,
+ name = X1},
+ X#exchange.scratches =:= [{federation,
+ [{{<<"localhost">>,
+ X1},
+ <<"B">>}]}]],
+ [] =/= L1 andalso [] =/= L2 andalso
+ has_internal_federated_queue(Config, 0, VHost)
+ end),
+
+ ?assert(has_internal_federated_exchange(Config, 0, <<"/">>)),
+ ?assert(has_internal_federated_queue(Config, 0, <<"/">>)),
+
+ clear_policy(Config, 0, <<"dyn">>),
+ timer:sleep(5000),
+
+ %% Internal exchanges and queues need cleanup
+ ?assert(not has_internal_federated_exchange(Config, 0, <<"/">>)),
+ ?assert(not has_internal_federated_queue(Config, 0, <<"/">>)),
+
+ clear_policy(Config, 0, <<"dyn">>),
+ assert_connections(Config, 0, [X1], [])
+ end, [x(X1)]).
+
+has_internal_federated_exchange(Config, Node, VHost) ->
+ lists:any(fun(X) ->
+ X#exchange.type == 'x-federation-upstream'
+ end, rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_exchange, list, [VHost])).
+
+has_internal_federated_queue(Config, Node, VHost) ->
+ lists:any(
+ fun(Q) ->
+ {'longstr', <<"federation">>} ==
+ rabbit_misc:table_lookup(amqqueue:get_arguments(Q), <<"x-internal-purpose">>)
+ end, rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_amqqueue, list, [VHost])).
+
+%%----------------------------------------------------------------------------
+
+with_ch(Config, Fun, Xs) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ declare_all(Ch, Xs),
+ rabbit_federation_test_util:assert_status(Config, 0,
+ Xs, {exchange, upstream_exchange}),
+ Fun(Ch),
+ delete_all(Ch, Xs),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ cleanup(Config, 0),
+ ok.
+
+cleanup(Config, Node) ->
+ [rabbit_ct_broker_helpers:rpc(
+ Config, Node, rabbit_amqqueue, delete, [Q, false, false,
+ <<"acting-user">>]) ||
+ Q <- queues(Config, Node)].
+
+queues(Config, Node) ->
+ Ret = rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_amqqueue, list, [<<"/">>]),
+ case Ret of
+ {badrpc, _} -> [];
+ Qs -> Qs
+ end.
+
+stop_other_node(Config, Node) ->
+ cleanup(Config, Node),
+ rabbit_federation_test_util:stop_other_node(Config, Node).
+
+declare_all(Ch, Xs) -> [declare_exchange(Ch, X) || X <- Xs].
+delete_all(Ch, Xs) ->
+ [delete_exchange(Ch, X) || #'exchange.declare'{exchange = X} <- Xs].
+
+declare_exchange(Ch, X) ->
+ amqp_channel:call(Ch, X).
+
+x(Name) -> x(Name, <<"topic">>).
+
+x(Name, Type) ->
+ #'exchange.declare'{exchange = Name,
+ type = Type,
+ durable = true}.
+
+xr(Name) ->
+ rabbit_misc:r(<<"/">>, exchange, Name).
+
+xr(Vhost, Name) ->
+ rabbit_misc:r(Vhost, exchange, Name).
+
+declare_queue(Ch) ->
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ Q.
+
+declare_queue(Ch, Q) ->
+ amqp_channel:call(Ch, Q).
+
+bind_queue(Ch, Q, X, Key) ->
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = X,
+ routing_key = Key}).
+
+unbind_queue(Ch, Q, X, Key) ->
+ amqp_channel:call(Ch, #'queue.unbind'{queue = Q,
+ exchange = X,
+ routing_key = Key}).
+
+bind_exchange(Ch, D, S, Key) ->
+ amqp_channel:call(Ch, #'exchange.bind'{destination = D,
+ source = S,
+ routing_key = Key}).
+
+bind_queue(Ch, X, Key) ->
+ Q = declare_queue(Ch),
+ bind_queue(Ch, Q, X, Key),
+ Q.
+
+delete_exchange(Ch, X) ->
+ amqp_channel:call(Ch, #'exchange.delete'{exchange = X}).
+
+delete_queue(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+await_binding(Config, Node, X, Key) ->
+ await_binding(Config, Node, X, Key, 1).
+
+await_binding(Config, Node, X, Key, ExpectedBindingCount) when is_integer(ExpectedBindingCount) ->
+ await_binding(Config, Node, <<"/">>, X, Key, ExpectedBindingCount).
+
+await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount) when is_integer(ExpectedBindingCount) ->
+ Attempts = 100,
+ await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount, Attempts).
+
+await_binding(_Config, _Node, _Vhost, _X, _Key, ExpectedBindingCount, 0) ->
+ {error, rabbit_misc:format("expected ~s bindings but they did not materialize in time", [ExpectedBindingCount])};
+await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount, AttemptsLeft) when is_integer(ExpectedBindingCount) ->
+ case bound_keys_from(Config, Node, Vhost, X, Key) of
+ Bs when length(Bs) < ExpectedBindingCount ->
+ timer:sleep(100),
+ await_binding(Config, Node, Vhost, X, Key, ExpectedBindingCount, AttemptsLeft - 1);
+ Bs when length(Bs) =:= ExpectedBindingCount ->
+ ok;
+ Bs ->
+ {error, rabbit_misc:format("expected ~b bindings, got ~b", [ExpectedBindingCount, length(Bs)])}
+ end.
+
+await_bindings(Config, Node, X, Keys) ->
+ [await_binding(Config, Node, X, Key) || Key <- Keys].
+
+await_binding_absent(Config, Node, X, Key) ->
+ case bound_keys_from(Config, Node, <<"/">>, X, Key) of
+ [] -> ok;
+ _ -> timer:sleep(100),
+ await_binding_absent(Config, Node, X, Key)
+ end.
+
+bound_keys_from(Config, Node, Vhost, X, Key) ->
+ List = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_binding,
+ list_for_source, [xr(Vhost, X)]),
+ [K || #binding{key = K} <- List, K =:= Key].
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+ publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+ amqp_channel:call(Ch, #'basic.publish'{exchange = X,
+ routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, [Payload]).
+
+%%----------------------------------------------------------------------------
+
+assert_connections(Config, Node, Xs, Conns) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ ?MODULE, assert_connections1, [Xs, Conns]).
+
+assert_connections1(Xs, Conns) ->
+ Links = [{X, C, X} ||
+ X <- Xs,
+ C <- Conns],
+ Remaining = lists:foldl(
+ fun (Link, Status) ->
+ rabbit_federation_test_util:assert_link_status(
+ Link, Status, {exchange, upstream_exchange})
+ end, rabbit_federation_status:status(), Links),
+ [] = Remaining,
+ ok.
+
+connection_pids(Config, Node) ->
+ [P || [{pid, P}] <-
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_networking, connection_info_all, [[pid]])].
+
+upstream_downstream() ->
+ [x(<<"upstream">>), x(<<"fed.downstream">>)].
diff --git a/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl b/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl
new file mode 100644
index 0000000000..b7702bcf02
--- /dev/null
+++ b/deps/rabbitmq_federation/test/federation_status_command_SUITE.erl
@@ -0,0 +1,168 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(federation_status_command_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(CMD, 'Elixir.RabbitMQ.CLI.Ctl.Commands.FederationStatusCommand').
+
+all() ->
+ [
+ {group, not_federated},
+ {group, federated},
+ {group, federated_down}
+ ].
+
+groups() ->
+ [
+ {not_federated, [], [
+ run_not_federated,
+ output_not_federated
+ ]},
+ {federated, [], [
+ run_federated,
+ output_federated
+ ]},
+ {federated_down, [], [
+ run_down_federated
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Config2.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(federated, Config) ->
+ rabbit_federation_test_util:setup_federation(Config),
+ Config;
+init_per_group(federated_down, Config) ->
+ rabbit_federation_test_util:setup_down_federation(Config),
+ Config;
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+run_not_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, []} = ?CMD:run([], Opts#{only_down => false}).
+
+output_not_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, []} = ?CMD:output({stream, []}, Opts).
+
+run_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ %% All
+ rabbit_federation_test_util:with_ch(
+ Config,
+ fun(_) ->
+ timer:sleep(3000),
+ {stream, [Props]} = ?CMD:run([], Opts#{only_down => false}),
+ <<"upstream">> = proplists:get_value(upstream_queue, Props),
+ <<"fed.downstream">> = proplists:get_value(queue, Props),
+ <<"fed.tag">> = proplists:get_value(consumer_tag, Props),
+ running = proplists:get_value(status, Props)
+ end,
+ [rabbit_federation_test_util:q(<<"upstream">>),
+ rabbit_federation_test_util:q(<<"fed.downstream">>)]),
+ %% Down
+ rabbit_federation_test_util:with_ch(
+ Config,
+ fun(_) ->
+ {stream, []} = ?CMD:run([], Opts#{only_down => true})
+ end,
+ [rabbit_federation_test_util:q(<<"upstream">>),
+ rabbit_federation_test_util:q(<<"fed.downstream">>)]).
+
+run_down_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ %% All
+ rabbit_federation_test_util:with_ch(
+ Config,
+ fun(_) ->
+ timer:sleep(3000),
+ {stream, ManyProps} = ?CMD:run([], Opts#{only_down => false}),
+ Links = [{proplists:get_value(upstream, Props),
+ proplists:get_value(status, Props)}
+ || Props <- ManyProps],
+ [{<<"broken-bunny">>, error}, {<<"localhost">>, running}]
+ = lists:sort(Links)
+ end,
+ [rabbit_federation_test_util:q(<<"upstream">>),
+ rabbit_federation_test_util:q(<<"fed.downstream">>)]),
+ %% Down
+ rabbit_federation_test_util:with_ch(
+ Config,
+ fun(_) ->
+ timer:sleep(3000),
+ {stream, [Props]} = ?CMD:run([], Opts#{only_down => true}),
+ <<"broken-bunny">> = proplists:get_value(upstream, Props),
+ error = proplists:get_value(status, Props)
+ end,
+ [rabbit_federation_test_util:q(<<"upstream">>),
+ rabbit_federation_test_util:q(<<"fed.downstream">>)]).
+
+output_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ Input = {stream,[[{queue, <<"fed.downstream">>},
+ {consumer_tag, <<"fed.tag">>},
+ {upstream_queue, <<"upstream">>},
+ {type, queue},
+ {vhost, <<"/">>},
+ {upstream, <<"localhost">>},
+ {status, running},
+ {local_connection, <<"<rmq-ct-federation_status_command_SUITE-1-21000@localhost.1.563.0>">>},
+ {uri, <<"amqp://localhost:21000">>},
+ {timestamp, {{2016,11,21},{8,51,19}}}]]},
+ {stream, [#{queue := <<"fed.downstream">>,
+ upstream_queue := <<"upstream">>,
+ type := queue,
+ vhost := <<"/">>,
+ upstream := <<"localhost">>,
+ status := running,
+ local_connection := <<"<rmq-ct-federation_status_command_SUITE-1-21000@localhost.1.563.0>">>,
+ uri := <<"amqp://localhost:21000">>,
+ last_changed := <<"2016-11-21 08:51:19">>,
+ exchange := <<>>,
+ upstream_exchange := <<>>,
+ error := <<>>}]}
+ = ?CMD:output(Input, Opts).
diff --git a/deps/rabbitmq_federation/test/queue_SUITE.erl b/deps/rabbitmq_federation/test/queue_SUITE.erl
new file mode 100644
index 0000000000..5c3660fb64
--- /dev/null
+++ b/deps/rabbitmq_federation/test/queue_SUITE.erl
@@ -0,0 +1,328 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-import(rabbit_federation_test_util,
+ [wait_for_federation/2, expect/3, expect/4,
+ set_upstream/4, set_upstream/5, clear_upstream/3, set_policy/5, clear_policy/3,
+ set_policy_pattern/5, set_policy_upstream/5, q/1, with_ch/3,
+ declare_queue/2, delete_queue/2,
+ federation_links_in_vhost/3]).
+
+-define(INITIAL_WAIT, 6000).
+-define(EXPECT_FEDERATION_TIMEOUT, 30000).
+
+all() ->
+ [
+ {group, without_disambiguate},
+ {group, with_disambiguate}
+ ].
+
+groups() ->
+ [
+ {without_disambiguate, [], [
+ {cluster_size_1, [], [
+ simple,
+ multiple_upstreams,
+ multiple_upstreams_pattern,
+ multiple_downstreams,
+ bidirectional,
+ dynamic_reconfiguration,
+ federate_unfederate,
+ dynamic_plugin_stop_start
+ ]}
+ ]},
+ {with_disambiguate, [], [
+ {cluster_size_2, [], [
+ restart_upstream
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(without_disambiguate, Config) ->
+ rabbit_ct_helpers:set_config(Config,
+ {disambiguate_step, []});
+init_per_group(with_disambiguate, Config) ->
+ rabbit_ct_helpers:set_config(Config,
+ {disambiguate_step, [fun rabbit_federation_test_util:disambiguate/1]});
+init_per_group(cluster_size_1 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 1}
+ ]),
+ init_per_group1(Group, Config1);
+init_per_group(cluster_size_2 = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]),
+ init_per_group1(Group, Config1).
+
+init_per_group1(Group, Config) ->
+ SetupFederation = case Group of
+ cluster_size_1 -> [fun rabbit_federation_test_util:setup_federation/1];
+ cluster_size_2 -> []
+ end,
+ Disambiguate = ?config(disambiguate_step, Config),
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {rmq_nodes_clustered, false}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ SetupFederation ++ Disambiguate).
+
+end_per_group(without_disambiguate, Config) ->
+ Config;
+end_per_group(with_disambiguate, Config) ->
+ Config;
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>)
+ end, upstream_downstream()).
+
+multiple_upstreams(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"fed12.downstream">>),
+ expect_federation(Ch, <<"upstream2">>, <<"fed12.downstream">>)
+ end, [q(<<"upstream">>),
+ q(<<"upstream2">>),
+ q(<<"fed12.downstream">>)]).
+
+multiple_upstreams_pattern(Config) ->
+ set_upstream(Config, 0, <<"local453x">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 0), [
+ {<<"exchange">>, <<"upstream">>},
+ {<<"queue">>, <<"upstream">>}]),
+
+ set_upstream(Config, 0, <<"zzzzzZZzz">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 0), [
+ {<<"exchange">>, <<"upstream-zzz">>},
+ {<<"queue">>, <<"upstream-zzz">>}]),
+
+ set_upstream(Config, 0, <<"local3214x">>,
+ rabbit_ct_broker_helpers:node_uri(Config, 0), [
+ {<<"exchange">>, <<"upstream2">>},
+ {<<"queue">>, <<"upstream2">>}]),
+
+ set_policy_pattern(Config, 0, <<"pattern">>, <<"^pattern\.">>, <<"local\\d+x">>),
+
+ with_ch(Config,
+ fun (Ch) ->
+ expect_federation(Ch, <<"upstream">>, <<"pattern.downstream">>, ?EXPECT_FEDERATION_TIMEOUT),
+ expect_federation(Ch, <<"upstream2">>, <<"pattern.downstream">>, ?EXPECT_FEDERATION_TIMEOUT)
+ end, [q(<<"upstream">>),
+ q(<<"upstream2">>),
+ q(<<"pattern.downstream">>)]),
+
+ clear_upstream(Config, 0, <<"local453x">>),
+ clear_upstream(Config, 0, <<"local3214x">>),
+ clear_policy(Config, 0, <<"pattern">>).
+
+multiple_downstreams(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ timer:sleep(?INITIAL_WAIT),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>, ?EXPECT_FEDERATION_TIMEOUT),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream2">>, ?EXPECT_FEDERATION_TIMEOUT)
+ end, upstream_downstream() ++ [q(<<"fed.downstream2">>)]).
+
+bidirectional(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ timer:sleep(?INITIAL_WAIT),
+ publish_expect(Ch, <<>>, <<"one">>, <<"one">>, <<"first one">>, ?EXPECT_FEDERATION_TIMEOUT),
+ publish_expect(Ch, <<>>, <<"two">>, <<"two">>, <<"first two">>, ?EXPECT_FEDERATION_TIMEOUT),
+ Seq = lists:seq(1, 100),
+ [publish(Ch, <<>>, <<"one">>, <<"bulk">>) || _ <- Seq],
+ [publish(Ch, <<>>, <<"two">>, <<"bulk">>) || _ <- Seq],
+ expect(Ch, <<"one">>, repeat(150, <<"bulk">>)),
+ expect(Ch, <<"two">>, repeat(50, <<"bulk">>)),
+ expect_empty(Ch, <<"one">>),
+ expect_empty(Ch, <<"two">>)
+ end, [q(<<"one">>),
+ q(<<"two">>)]).
+
+dynamic_reconfiguration(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ timer:sleep(?INITIAL_WAIT),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>, ?EXPECT_FEDERATION_TIMEOUT),
+
+ %% Test that clearing connections works
+ clear_upstream(Config, 0, <<"localhost">>),
+ expect_no_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
+
+ %% Test that reading them and changing them works
+ set_upstream(Config, 0,
+ <<"localhost">>, rabbit_ct_broker_helpers:node_uri(Config, 0)),
+ %% Do it twice so we at least hit the no-restart optimisation
+ URI = rabbit_ct_broker_helpers:node_uri(Config, 0, [use_ipaddr]),
+ set_upstream(Config, 0, <<"localhost">>, URI),
+ set_upstream(Config, 0, <<"localhost">>, URI),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>)
+ end, upstream_downstream()).
+
+federate_unfederate(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ timer:sleep(?INITIAL_WAIT),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>, ?EXPECT_FEDERATION_TIMEOUT),
+ expect_federation(Ch, <<"upstream">>, <<"fed.downstream2">>, ?EXPECT_FEDERATION_TIMEOUT),
+
+ %% clear the policy
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"fed">>),
+
+ expect_no_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
+ expect_no_federation(Ch, <<"upstream">>, <<"fed.downstream2">>),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"fed">>, <<"^fed\.">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"upstream">>}])
+ end, upstream_downstream() ++ [q(<<"fed.downstream2">>)]).
+
+dynamic_plugin_stop_start(Config) ->
+ DownQ2 = <<"fed.downstream2">>,
+ with_ch(Config,
+ fun (Ch) ->
+ timer:sleep(?INITIAL_WAIT),
+ UpQ = <<"upstream">>,
+ DownQ1 = <<"fed.downstream">>,
+ expect_federation(Ch, UpQ, DownQ1, ?EXPECT_FEDERATION_TIMEOUT),
+ expect_federation(Ch, UpQ, DownQ2, ?EXPECT_FEDERATION_TIMEOUT),
+
+ %% Disable the plugin, the link disappears
+ ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0, "rabbitmq_federation"),
+
+ expect_no_federation(Ch, UpQ, DownQ1),
+ expect_no_federation(Ch, UpQ, DownQ2),
+
+ declare_queue(Ch, q(DownQ1)),
+ declare_queue(Ch, q(DownQ2)),
+ ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0, "rabbitmq_federation"),
+
+ %% Declare a queue then re-enable the plugin, the links appear
+ wait_for_federation(
+ 90,
+ fun() ->
+ Status = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, status, []),
+ L = [
+ Entry || Entry <- Status,
+ proplists:get_value(queue, Entry) =:= DownQ1 orelse
+ proplists:get_value(queue, Entry) =:= DownQ2,
+ proplists:get_value(upstream_queue, Entry) =:= UpQ,
+ proplists:get_value(status, Entry) =:= running
+ ],
+ length(L) =:= 2
+ end),
+ expect_federation(Ch, UpQ, DownQ1, 120000)
+ end, upstream_downstream() ++ [q(DownQ2)]).
+
+restart_upstream(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ set_policy_upstream(Config, Rabbit, <<"^test$">>,
+ rabbit_ct_broker_helpers:node_uri(Config, Hare), []),
+
+ Downstream = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ Upstream = rabbit_ct_client_helpers:open_channel(Config, Hare),
+
+ declare_queue(Upstream, q(<<"test">>)),
+ declare_queue(Downstream, q(<<"test">>)),
+ Seq = lists:seq(1, 100),
+ [publish(Upstream, <<>>, <<"test">>, <<"bulk">>) || _ <- Seq],
+ expect(Upstream, <<"test">>, repeat(25, <<"bulk">>)),
+ expect(Downstream, <<"test">>, repeat(25, <<"bulk">>)),
+
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Hare),
+ ok = rabbit_ct_broker_helpers:restart_node(Config, Hare),
+ Upstream2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+
+ expect(Upstream2, <<"test">>, repeat(25, <<"bulk">>)),
+ expect(Downstream, <<"test">>, repeat(25, <<"bulk">>)),
+ expect_empty(Upstream2, <<"test">>),
+ expect_empty(Downstream, <<"test">>),
+
+ ok.
+
+%upstream_has_no_federation(Config) ->
+% %% TODO
+% ok.
+
+%%----------------------------------------------------------------------------
+repeat(Count, Item) -> [Item || _ <- lists:seq(1, Count)].
+
+%%----------------------------------------------------------------------------
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+ publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+ amqp_channel:call(Ch, #'basic.publish'{exchange = X,
+ routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, [Payload]).
+
+publish_expect(Ch, X, Key, Q, Payload, Timeout) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, [Payload], Timeout).
+
+%% Doubled due to our strange basic.get behaviour.
+expect_empty(Ch, Q) ->
+ rabbit_federation_test_util:expect_empty(Ch, Q),
+ rabbit_federation_test_util:expect_empty(Ch, Q).
+
+expect_federation(Ch, UpstreamQ, DownstreamQ) ->
+ publish_expect(Ch, <<>>, UpstreamQ, DownstreamQ, <<"HELLO">>).
+
+expect_federation(Ch, UpstreamQ, DownstreamQ, Timeout) ->
+ publish_expect(Ch, <<>>, UpstreamQ, DownstreamQ, <<"HELLO">>, Timeout).
+
+expect_no_federation(Ch, UpstreamQ, DownstreamQ) ->
+ publish(Ch, <<>>, UpstreamQ, <<"HELLO">>),
+ expect_empty(Ch, DownstreamQ),
+ expect(Ch, UpstreamQ, [<<"HELLO">>]).
+
+upstream_downstream() ->
+ [q(<<"upstream">>), q(<<"fed.downstream">>)].
diff --git a/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl b/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl
new file mode 100644
index 0000000000..6b802a3f15
--- /dev/null
+++ b/deps/rabbitmq_federation/test/rabbit_federation_status_SUITE.erl
@@ -0,0 +1,129 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_status_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-include("rabbit_federation.hrl").
+
+-compile(export_all).
+
+-import(rabbit_federation_test_util,
+ [expect/3, expect_empty/2,
+ set_upstream/4, clear_upstream/3, set_upstream_set/4,
+ set_policy/5, clear_policy/3,
+ set_policy_upstream/5, set_policy_upstreams/4,
+ no_plugins/1, with_ch/3]).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ exchange_status,
+ queue_status,
+ lookup_exchange_status,
+ lookup_queue_status,
+ lookup_bad_status
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun rabbit_federation_test_util:setup_federation/1]).
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+exchange_status(Config) ->
+ exchange_SUITE:with_ch(
+ Config,
+ fun (_Ch) ->
+ [Link] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, status, []),
+ true = is_binary(proplists:get_value(id, Link))
+ end, exchange_SUITE:upstream_downstream()).
+
+queue_status(Config) ->
+ with_ch(
+ Config,
+ fun (_Ch) ->
+ timer:sleep(3000),
+ [Link] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, status, []),
+ true = is_binary(proplists:get_value(id, Link))
+ end, queue_SUITE:upstream_downstream()).
+
+lookup_exchange_status(Config) ->
+ exchange_SUITE:with_ch(
+ Config,
+ fun (_Ch) ->
+ [Link] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, status, []),
+ Id = proplists:get_value(id, Link),
+ Props = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, lookup, [Id]),
+ lists:all(fun(K) -> lists:keymember(K, 1, Props) end,
+ [key, uri, status, timestamp, id, supervisor, upstream])
+ end, exchange_SUITE:upstream_downstream()).
+
+lookup_queue_status(Config) ->
+ with_ch(
+ Config,
+ fun (_Ch) ->
+ timer:sleep(3000),
+ [Link] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, status, []),
+ Id = proplists:get_value(id, Link),
+ Props = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, lookup, [Id]),
+ lists:all(fun(K) -> lists:keymember(K, 1, Props) end,
+ [key, uri, status, timestamp, id, supervisor, upstream])
+ end, queue_SUITE:upstream_downstream()).
+
+lookup_bad_status(Config) ->
+ with_ch(
+ Config,
+ fun (_Ch) ->
+ timer:sleep(3000),
+ not_found = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ rabbit_federation_status, lookup, [<<"justmadeitup">>])
+ end, queue_SUITE:upstream_downstream()).
diff --git a/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl b/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl
new file mode 100644
index 0000000000..534817a2a4
--- /dev/null
+++ b/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl
@@ -0,0 +1,354 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_test_util).
+
+-include("rabbit_federation.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-import(rabbit_misc, [pget/2]).
+
+setup_federation(Config) ->
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream">>, <<"localhost">>, [
+ {<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config, 0)},
+ {<<"consumer-tag">>, <<"fed.tag">>}]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream">>, <<"local5673">>, [
+ {<<"uri">>, <<"amqp://localhost:1">>}]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"upstream">>, [
+ [
+ {<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"upstream">>},
+ {<<"queue">>, <<"upstream">>}
+ ]
+ ]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"upstream2">>, [
+ [
+ {<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"upstream2">>},
+ {<<"queue">>, <<"upstream2">>}
+ ]
+ ]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"localhost">>, [
+ [{<<"upstream">>, <<"localhost">>}]
+ ]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"upstream12">>, [
+ [
+ {<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"upstream">>},
+ {<<"queue">>, <<"upstream">>}
+ ], [
+ {<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"upstream2">>},
+ {<<"queue">>, <<"upstream2">>}
+ ]
+ ]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"one">>, [
+ [
+ {<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"one">>},
+ {<<"queue">>, <<"one">>}
+ ]
+ ]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"two">>, [
+ [
+ {<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"two">>},
+ {<<"queue">>, <<"two">>}
+ ]
+ ]),
+
+ rabbit_ct_broker_helpers:set_parameter(Config, 0,
+ <<"federation-upstream-set">>, <<"upstream5673">>, [
+ [
+ {<<"upstream">>, <<"local5673">>},
+ {<<"exchange">>, <<"upstream">>}
+ ]
+ ]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"fed">>, <<"^fed\.">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"upstream">>}]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"fed12">>, <<"^fed12\.">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"upstream12">>}]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"one">>, <<"^two$">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"one">>}]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"two">>, <<"^one$">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"two">>}]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"hare">>, <<"^hare\.">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"upstream5673">>}]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"all">>, <<"^all\.">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"all">>}]),
+
+ rabbit_ct_broker_helpers:set_policy(Config, 0,
+ <<"new">>, <<"^new\.">>, <<"all">>, [
+ {<<"federation-upstream-set">>, <<"new-set">>}]),
+ Config.
+
+setup_down_federation(Config) ->
+ rabbit_ct_broker_helpers:set_parameter(
+ Config, 0, <<"federation-upstream">>, <<"broken-bunny">>,
+ [{<<"uri">>, <<"amqp://broken-bunny">>},
+ {<<"reconnect-delay">>, 600000}]),
+ rabbit_ct_broker_helpers:set_parameter(
+ Config, 0, <<"federation-upstream">>, <<"localhost">>,
+ [{<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config, 0)}]),
+ rabbit_ct_broker_helpers:set_parameter(
+ Config, 0,
+ <<"federation-upstream-set">>, <<"upstream">>,
+ [[{<<"upstream">>, <<"localhost">>},
+ {<<"exchange">>, <<"upstream">>},
+ {<<"queue">>, <<"upstream">>}],
+ [{<<"upstream">>, <<"broken-bunny">>},
+ {<<"exchange">>, <<"upstream">>},
+ {<<"queue">>, <<"upstream">>}]]),
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ <<"fed">>, <<"^fed\.">>, <<"all">>, [{<<"federation-upstream-set">>, <<"upstream">>}]),
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ <<"fed">>, <<"^fed\.">>, <<"all">>, [{<<"federation-upstream-set">>, <<"upstream">>}]),
+ Config.
+
+wait_for_federation(Retries, Fun) ->
+ case Fun() of
+ true ->
+ ok;
+ false when Retries > 0 ->
+ timer:sleep(1000),
+ wait_for_federation(Retries - 1, Fun);
+ false ->
+ throw({timeout_while_waiting_for_federation, Fun})
+ end.
+
+expect(Ch, Q, Fun) when is_function(Fun) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = true}, self()),
+ CTag = receive
+ #'basic.consume_ok'{consumer_tag = CT} -> CT
+ end,
+ Fun(),
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag});
+
+expect(Ch, Q, Payloads) ->
+ expect(Ch, Q, fun() -> expect(Payloads) end).
+
+expect(Ch, Q, Payloads, Timeout) ->
+ expect(Ch, Q, fun() -> expect(Payloads, Timeout) end).
+
+expect([]) ->
+ ok;
+expect(Payloads) ->
+ expect(Payloads, 60000).
+
+expect([], _Timeout) ->
+ ok;
+expect(Payloads, Timeout) ->
+ receive
+ {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ case lists:member(Payload, Payloads) of
+ true ->
+ ct:pal("Consumed a message: ~p", [Payload]),
+ expect(Payloads -- [Payload], Timeout);
+ false -> ?assert(false, rabbit_misc:format("received an unexpected payload ~p", [Payload]))
+ end
+ after Timeout ->
+ ?assert(false, rabbit_misc:format("Did not receive expected payloads ~p in time", [Payloads]))
+ end.
+
+expect_empty(Ch, Q) ->
+ ?assertMatch(#'basic.get_empty'{},
+ amqp_channel:call(Ch, #'basic.get'{ queue = Q })).
+
+set_upstream(Config, Node, Name, URI) ->
+ set_upstream(Config, Node, Name, URI, []).
+
+set_upstream(Config, Node, Name, URI, Extra) ->
+ rabbit_ct_broker_helpers:set_parameter(Config, Node,
+ <<"federation-upstream">>, Name, [{<<"uri">>, URI} | Extra]).
+
+set_upstream_in_vhost(Config, Node, VirtualHost, Name, URI) ->
+ set_upstream_in_vhost(Config, Node, VirtualHost, Name, URI, []).
+
+set_upstream_in_vhost(Config, Node, VirtualHost, Name, URI, Extra) ->
+ rabbit_ct_broker_helpers:set_parameter(Config, Node, VirtualHost,
+ <<"federation-upstream">>, Name, [{<<"uri">>, URI} | Extra]).
+
+clear_upstream(Config, Node, Name) ->
+ rabbit_ct_broker_helpers:clear_parameter(Config, Node,
+ <<"federation-upstream">>, Name).
+
+set_upstream_set(Config, Node, Name, Set) ->
+ rabbit_ct_broker_helpers:set_parameter(Config, Node,
+ <<"federation-upstream-set">>, Name,
+ [[{<<"upstream">>, UStream} | Extra] || {UStream, Extra} <- Set]).
+
+clear_upstream_set(Config, Node, Name) ->
+ rabbit_ct_broker_helpers:clear_parameter(Config, Node,
+ <<"federation-upstream-set">>, Name).
+
+set_policy(Config, Node, Name, Pattern, UpstreamSet) ->
+ rabbit_ct_broker_helpers:set_policy(Config, Node,
+ Name, Pattern, <<"all">>,
+ [{<<"federation-upstream-set">>, UpstreamSet}]).
+
+set_policy_pattern(Config, Node, Name, Pattern, Regex) ->
+ rabbit_ct_broker_helpers:set_policy(Config, Node,
+ Name, Pattern, <<"all">>,
+ [{<<"federation-upstream-pattern">>, Regex}]).
+
+clear_policy(Config, Node, Name) ->
+ rabbit_ct_broker_helpers:clear_policy(Config, Node, Name).
+
+set_policy_upstream(Config, Node, Pattern, URI, Extra) ->
+ set_policy_upstreams(Config, Node, Pattern, [{URI, Extra}]).
+
+set_policy_upstreams(Config, Node, Pattern, URIExtras) ->
+ put(upstream_num, 1),
+ [set_upstream(Config, Node, gen_upstream_name(), URI, Extra)
+ || {URI, Extra} <- URIExtras],
+ set_policy(Config, Node, Pattern, Pattern, <<"all">>).
+
+gen_upstream_name() ->
+ list_to_binary("upstream-" ++ integer_to_list(next_upstream_num())).
+
+next_upstream_num() ->
+ R = get(upstream_num) + 1,
+ put(upstream_num, R),
+ R.
+
+%% Make sure that even though multiple nodes are in a single
+%% distributed system, we still keep all our process groups separate.
+disambiguate(Config) ->
+ rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env,
+ [rabbitmq_federation, pgroup_name_cluster_id, true]),
+ Config.
+
+no_plugins(Cfg) ->
+ [{K, case K of
+ plugins -> none;
+ _ -> V
+ end} || {K, V} <- Cfg].
+
+%%----------------------------------------------------------------------------
+
+all_federation_links(Config, Node) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_federation_status, status, []).
+
+federation_links_in_vhost(Config, Node, VirtualHost) ->
+ Links = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_federation_status, status, []),
+ lists:filter(
+ fun(Link) ->
+ VirtualHost =:= proplists:get_value(vhost, Link)
+ end, Links).
+
+status_fields(Prop, Statuses) ->
+ lists:usort(
+ lists:map(
+ fun(Link) -> proplists:get_value(Prop, Link) end,
+ Statuses)).
+
+assert_status(Config, Node, XorQs, Names) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ ?MODULE, assert_status1, [XorQs, Names]).
+
+assert_status1(XorQs, Names) ->
+ [begin
+ ct:pal("links(XorQ) for ~p: ~p", [XorQ, links(XorQ)])
+ end || XorQ <- XorQs],
+ Links = lists:append([links(XorQ) || XorQ <- XorQs]),
+ Remaining = lists:foldl(fun (Link, Status) ->
+ assert_link_status(Link, Status, Names)
+ end, rabbit_federation_status:status(), Links),
+ ?assertEqual([], Remaining),
+ ok.
+
+assert_link_status({DXorQNameBin, UpstreamName, UXorQNameBin}, Status,
+ {TypeName, UpstreamTypeName}) ->
+ {This, Rest} = lists:partition(
+ fun(St) ->
+ pget(upstream, St) =:= UpstreamName andalso
+ pget(TypeName, St) =:= DXorQNameBin andalso
+ pget(UpstreamTypeName, St) =:= UXorQNameBin
+ end, Status),
+ ?assertMatch([_], This),
+ Rest.
+
+links(#'exchange.declare'{exchange = Name}) ->
+ case rabbit_exchange:lookup(xr(Name)) of
+ {ok, X} ->
+ case rabbit_policy:get(<<"federation-upstream-set">>, X) of
+ undefined ->
+ case rabbit_policy:get(<<"federation-upstream-pattern">>, X) of
+ undefined -> [];
+ Regex ->
+ [{Name, U#upstream.name, U#upstream.exchange_name} ||
+ U <- rabbit_federation_upstream:from_pattern(Regex, X)]
+ end;
+ Set ->
+ [{Name, U#upstream.name, U#upstream.exchange_name} ||
+ U <- rabbit_federation_upstream:from_set(Set, X)]
+ end;
+ {error, not_found} ->
+ []
+ end.
+
+xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+
+with_ch(Config, Fun, Qs) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ declare_all(Ch, Qs),
+ %% Clean up queues even after test failure.
+ try
+ Fun(Ch)
+ after
+ delete_all(Ch, Qs),
+ rabbit_ct_client_helpers:close_channel(Ch)
+ end,
+ ok.
+
+declare_all(Ch, Qs) -> [declare_queue(Ch, Q) || Q <- Qs].
+delete_all(Ch, Qs) ->
+ [delete_queue(Ch, Q) || #'queue.declare'{queue = Q} <- Qs].
+
+declare_queue(Ch, Q) ->
+ amqp_channel:call(Ch, Q).
+
+delete_queue(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+q(Name) ->
+ #'queue.declare'{queue = Name,
+ durable = true}.
diff --git a/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl b/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl
new file mode 100644
index 0000000000..511bae6540
--- /dev/null
+++ b/deps/rabbitmq_federation/test/restart_federation_link_command_SUITE.erl
@@ -0,0 +1,101 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(restart_federation_link_command_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(CMD, 'Elixir.RabbitMQ.CLI.Ctl.Commands.RestartFederationLinkCommand').
+
+all() ->
+ [
+ {group, federated_down}
+ ].
+
+groups() ->
+ [
+ {federated_down, [], [
+ run,
+ run_not_found,
+ output
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Config2.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(federated_down, Config) ->
+ rabbit_federation_test_util:setup_down_federation(Config),
+ Config;
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+run_not_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, []} = ?CMD:run([], Opts#{'only-down' => false}).
+
+output_not_federated(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, []} = ?CMD:output({stream, []}, Opts).
+
+run(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ rabbit_federation_test_util:with_ch(
+ Config,
+ fun(_) ->
+ timer:sleep(3000),
+ [Link | _] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_status, status, []),
+ Id = proplists:get_value(id, Link),
+ ok = ?CMD:run([Id], Opts)
+ end,
+ [rabbit_federation_test_util:q(<<"upstream">>),
+ rabbit_federation_test_util:q(<<"fed.downstream">>)]).
+
+run_not_found(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {error, _ErrorMsg} = ?CMD:run([<<"MakingItUp">>], Opts).
+
+output(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ ok = ?CMD:output(ok, Opts).
diff --git a/deps/rabbitmq_federation/test/unit_SUITE.erl b/deps/rabbitmq_federation/test/unit_SUITE.erl
new file mode 100644
index 0000000000..7d0707f96c
--- /dev/null
+++ b/deps/rabbitmq_federation/test/unit_SUITE.erl
@@ -0,0 +1,65 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_federation.hrl").
+
+-compile(export_all).
+
+all() -> [
+ obfuscate_upstream,
+ obfuscate_upstream_params_network,
+ obfuscate_upstream_params_network_with_char_list_password_value,
+ obfuscate_upstream_params_direct
+].
+
+init_per_suite(Config) ->
+ application:ensure_all_started(credentials_obfuscation),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+obfuscate_upstream(_Config) ->
+ Upstream = #upstream{uris = [<<"amqp://guest:password@localhost">>]},
+ ObfuscatedUpstream = rabbit_federation_util:obfuscate_upstream(Upstream),
+ ?assertEqual(Upstream, rabbit_federation_util:deobfuscate_upstream(ObfuscatedUpstream)),
+ ok.
+
+obfuscate_upstream_params_network(_Config) ->
+ UpstreamParams = #upstream_params{
+ uri = <<"amqp://guest:password@localhost">>,
+ params = #amqp_params_network{password = <<"password">>}
+ },
+ ObfuscatedUpstreamParams = rabbit_federation_util:obfuscate_upstream_params(UpstreamParams),
+ ?assertEqual(UpstreamParams, rabbit_federation_util:deobfuscate_upstream_params(ObfuscatedUpstreamParams)),
+ ok.
+
+obfuscate_upstream_params_network_with_char_list_password_value(_Config) ->
+ Input = #upstream_params{
+ uri = <<"amqp://guest:password@localhost">>,
+ params = #amqp_params_network{password = "password"}
+ },
+ Output = #upstream_params{
+ uri = <<"amqp://guest:password@localhost">>,
+ params = #amqp_params_network{password = <<"password">>}
+ },
+ ObfuscatedUpstreamParams = rabbit_federation_util:obfuscate_upstream_params(Input),
+ ?assertEqual(Output, rabbit_federation_util:deobfuscate_upstream_params(ObfuscatedUpstreamParams)),
+ ok.
+
+ obfuscate_upstream_params_direct(_Config) ->
+ UpstreamParams = #upstream_params{
+ uri = <<"amqp://guest:password@localhost">>,
+ params = #amqp_params_direct{password = <<"password">>}
+ },
+ ObfuscatedUpstreamParams = rabbit_federation_util:obfuscate_upstream_params(UpstreamParams),
+ ?assertEqual(UpstreamParams, rabbit_federation_util:deobfuscate_upstream_params(ObfuscatedUpstreamParams)),
+ ok.
diff --git a/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl b/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl
new file mode 100644
index 0000000000..f65dffbe8e
--- /dev/null
+++ b/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl
@@ -0,0 +1,230 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_inbroker_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbit_federation.hrl").
+
+-compile(export_all).
+
+-define(US_NAME, <<"upstream">>).
+-define(DS_NAME, <<"fed.downstream">>).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ serialisation,
+ scratch_space,
+ remove_credentials,
+ get_connection_name,
+ upstream_validation,
+ upstream_set_validation
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% Test that we apply binding changes in the correct order even when
+%% they arrive out of order.
+serialisation(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, serialisation1, []).
+
+serialisation1() ->
+ with_exchanges(
+ fun(X) ->
+ [B1, B2, B3] = [b(K) || K <- [<<"1">>, <<"2">>, <<"3">>]],
+ remove_bindings(4, X, [B1, B3]),
+ add_binding(5, X, B1),
+ add_binding(1, X, B1),
+ add_binding(2, X, B2),
+ add_binding(3, X, B3),
+ %% List of lists because one for each link
+ Keys = rabbit_federation_exchange_link:list_routing_keys(
+ X#exchange.name),
+ [[<<"1">>, <<"2">>]] =:= Keys
+ end).
+
+scratch_space(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, scratch_space1, []).
+
+scratch_space1() ->
+ A = <<"A">>,
+ B = <<"B">>,
+ DB = rabbit_federation_db,
+ with_exchanges(
+ fun(#exchange{name = N}) ->
+ DB:set_active_suffix(N, upstream(x), A),
+ DB:set_active_suffix(N, upstream(y), A),
+ DB:prune_scratch(N, [upstream(y), upstream(z)]),
+ DB:set_active_suffix(N, upstream(y), B),
+ DB:set_active_suffix(N, upstream(z), A),
+ none = DB:get_active_suffix(N, upstream(x), none),
+ B = DB:get_active_suffix(N, upstream(y), none),
+ A = DB:get_active_suffix(N, upstream(z), none)
+ end).
+
+remove_credentials(Config) ->
+ Test0 = fun (In, Exp) ->
+ Act = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_federation_upstream, remove_credentials, [In]),
+ Exp = Act
+ end,
+ Cat = fun (Bs) ->
+ list_to_binary(lists:append([binary_to_list(B) || B <- Bs]))
+ end,
+ Test = fun (Scheme, Rest) ->
+ Exp = Cat([Scheme, Rest]),
+ Test0(Exp, Exp),
+ Test0(Cat([Scheme, <<"user@">>, Rest]), Exp),
+ Test0(Cat([Scheme, <<"user:pass@">>, Rest]), Exp)
+ end,
+ Test(<<"amqp://">>, <<"">>),
+ Test(<<"amqp://">>, <<"localhost">>),
+ Test(<<"amqp://">>, <<"localhost/">>),
+ Test(<<"amqp://">>, <<"localhost/foo">>),
+ Test(<<"amqp://">>, <<"localhost:5672">>),
+ Test(<<"amqp://">>, <<"localhost:5672/foo">>),
+ Test(<<"amqps://">>, <<"localhost:5672/%2f">>),
+ ok.
+
+get_connection_name(Config) ->
+ Amqqueue = rabbit_ct_broker_helpers:rpc(
+ Config, 0,
+ amqqueue, new, [rabbit_misc:r(<<"/">>, queue, <<"queue">>),
+ self(),
+ false,
+ false,
+ none,
+ [],
+ undefined,
+ #{},
+ classic]),
+ AmqqueueWithPolicy = amqqueue:set_policy(Amqqueue, [{name, <<"my.federation.policy">>}]),
+ AmqqueueWithEmptyPolicy = amqqueue:set_policy(Amqqueue, []),
+
+
+ <<"Federation link (upstream: my.upstream, policy: my.federation.policy)">> = rabbit_federation_link_util:get_connection_name(
+ #upstream{name = <<"my.upstream">>},
+ #upstream_params{x_or_q = AmqqueueWithPolicy}
+ ),
+ <<"Federation link (upstream: my.upstream, policy: my.federation.policy)">> = rabbit_federation_link_util:get_connection_name(
+ #upstream{name = <<"my.upstream">>},
+ #upstream_params{x_or_q = #exchange{policy = [{name, <<"my.federation.policy">>}]}}
+ ),
+ <<"Federation link">> = rabbit_federation_link_util:get_connection_name(
+ #upstream{},
+ #upstream_params{x_or_q = AmqqueueWithEmptyPolicy}
+ ),
+ <<"Federation link">> = rabbit_federation_link_util:get_connection_name(
+ #upstream{},
+ #upstream_params{x_or_q = #exchange{policy = []}}
+ ),
+ <<"Federation link">> = rabbit_federation_link_util:get_connection_name(
+ whatever,
+ whatever
+ ),
+ ok.
+
+upstream_set_validation(_Config) ->
+ ?assertEqual(rabbit_federation_parameters:validate(<<"/">>, <<"federation-upstream-set">>,
+ <<"a-name">>,
+ [[{<<"upstream">>, <<"devtest1">>}],
+ [{<<"upstream">>, <<"devtest2">>}]],
+ <<"acting-user">>),
+ [[ok], [ok]]),
+ ?assertEqual(rabbit_federation_parameters:validate(<<"/">>, <<"federation-upstream-set">>,
+ <<"a-name">>,
+ [#{<<"upstream">> => <<"devtest3">>},
+ #{<<"upstream">> => <<"devtest4">>}],
+ <<"acting-user">>),
+ [[ok], [ok]]),
+ ok.
+
+upstream_validation(_Config) ->
+ ?assertEqual(rabbit_federation_parameters:validate(<<"/">>, <<"federation-upstream">>,
+ <<"a-name">>,
+ [{<<"uri">>, <<"amqp://127.0.0.1/%2f">>}],
+ <<"acting-user">>),
+ [ok]),
+ ?assertEqual(rabbit_federation_parameters:validate(<<"/">>, <<"federation-upstream">>,
+ <<"a-name">>,
+ #{<<"uri">> => <<"amqp://127.0.0.1/%2f">>},
+ <<"acting-user">>),
+ [ok]),
+ ok.
+
+with_exchanges(Fun) ->
+ rabbit_exchange:declare(r(?US_NAME), fanout, false, false, false, [],
+ <<"acting-user">>),
+ X = rabbit_exchange:declare(r(?DS_NAME), fanout, false, false, false, [],
+ <<"acting-user">>),
+ Fun(X),
+ %% Delete downstream first or it will recreate the upstream
+ rabbit_exchange:delete(r(?DS_NAME), false, <<"acting-user">>),
+ rabbit_exchange:delete(r(?US_NAME), false, <<"acting-user">>),
+ ok.
+
+add_binding(Ser, X, B) ->
+ rabbit_federation_exchange:add_binding(transaction, X, B),
+ rabbit_federation_exchange:add_binding(Ser, X, B).
+
+remove_bindings(Ser, X, Bs) ->
+ rabbit_federation_exchange:remove_bindings(transaction, X, Bs),
+ rabbit_federation_exchange:remove_bindings(Ser, X, Bs).
+
+r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+
+b(Key) ->
+ #binding{source = ?DS_NAME, destination = <<"whatever">>,
+ key = Key, args = []}.
+
+upstream(UpstreamName) ->
+ #upstream{name = atom_to_list(UpstreamName),
+ exchange_name = <<"upstream">>}.
diff --git a/deps/rabbitmq_federation_management/.gitignore b/deps/rabbitmq_federation_management/.gitignore
new file mode 100644
index 0000000000..04f4e80c4f
--- /dev/null
+++ b/deps/rabbitmq_federation_management/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_federation_management.d
diff --git a/deps/rabbitmq_federation_management/.travis.yml b/deps/rabbitmq_federation_management/.travis.yml
new file mode 100644
index 0000000000..8d80d10052
--- /dev/null
+++ b/deps/rabbitmq_federation_management/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: QjlyeWlfsQY8ejgtfLNJoKa8baKSs2+1uW+am1jE2/QVWsuJROLJbRpLnqmixtr6Xvlh1ayUTre+g3rtXHsmBQ5HEU51GvK/s3TA8JolkZs2vpx96UTKWFQaYHNyCSdovkXXon5jR+jN8D6z2HBgV5Emf0Fjx0w8/avhAt+Z8X8=
+ - secure: SPsBl7vHGNof4p3i0tRNxh8C22cmCDrcaukpp2Y+591TsSdKQsZ/hNFNylb7Qfuaavpizijb+Twk4gD3TPhTagYHZaviUFRNbYrv09Ck5UvU+8z5R/XVfusFZ1OsY4awX2DzSuL3qwNuXKCVcLGlK83Z/HswVXEi4j6VHLI29iU=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md b/deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_federation_management/CONTRIBUTING.md b/deps/rabbitmq_federation_management/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_federation_management/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_federation_management/LICENSE b/deps/rabbitmq_federation_management/LICENSE
new file mode 100644
index 0000000000..544a73ae03
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE
@@ -0,0 +1,13 @@
+This package, the RabbitMQ FederationManagement Plugin is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+This package makes use of the following third party libraries:
+
+jQuery - https://jquery.com/ - MIT license, see LICENSE-MIT-jQuery164
+EJS - https://embeddedjs.com/ - MIT license, see LICENSE-MIT-EJS10
+Sammy - https://code.quirkey.com/sammy/ - MIT license, see LICENSE-MIT-Sammy060
+cowboy - https://github.com/ninenines/cowboy - ISC license
+base64.js - https://code.google.com/p/stringencoders/ - BSD license, see LICENSE-BSD-base64js
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_federation_management/LICENSE-APACHE2-ExplorerCanvas b/deps/rabbitmq_federation_management/LICENSE-APACHE2-ExplorerCanvas
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-APACHE2-ExplorerCanvas
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/deps/rabbitmq_federation_management/LICENSE-BSD-base64js b/deps/rabbitmq_federation_management/LICENSE-BSD-base64js
new file mode 100644
index 0000000000..bc116b0231
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-BSD-base64js
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * https://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
diff --git a/deps/rabbitmq_federation_management/LICENSE-MIT-EJS10 b/deps/rabbitmq_federation_management/LICENSE-MIT-EJS10
new file mode 100644
index 0000000000..f3bdcd8887
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-MIT-EJS10
@@ -0,0 +1,23 @@
+EJS - Embedded JavaScript
+
+Copyright (c) 2007 Edward Benson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
diff --git a/deps/rabbitmq_federation_management/LICENSE-MIT-Flot b/deps/rabbitmq_federation_management/LICENSE-MIT-Flot
new file mode 100644
index 0000000000..67f4625607
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-MIT-Flot
@@ -0,0 +1,22 @@
+Copyright (c) 2007-2013 IOLA and Ole Laursen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/rabbitmq_federation_management/LICENSE-MIT-Sammy060 b/deps/rabbitmq_federation_management/LICENSE-MIT-Sammy060
new file mode 100644
index 0000000000..3debf5abe3
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-MIT-Sammy060
@@ -0,0 +1,25 @@
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+
+
diff --git a/deps/rabbitmq_federation_management/LICENSE-MIT-jQuery164 b/deps/rabbitmq_federation_management/LICENSE-MIT-jQuery164
new file mode 100644
index 0000000000..c7fbb2f7e3
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-MIT-jQuery164
@@ -0,0 +1,21 @@
+Copyright (c) 2011 John Resig, https://jquery.com/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/deps/rabbitmq_federation_management/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_federation_management/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_federation_management/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_federation_management/Makefile b/deps/rabbitmq_federation_management/Makefile
new file mode 100644
index 0000000000..7f81abbf44
--- /dev/null
+++ b/deps/rabbitmq_federation_management/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_federation_management
+PROJECT_DESCRIPTION = RabbitMQ Federation Management
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit rabbitmq_management rabbitmq_federation
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_federation_management/README.md b/deps/rabbitmq_federation_management/README.md
new file mode 100644
index 0000000000..182da17c5a
--- /dev/null
+++ b/deps/rabbitmq_federation_management/README.md
@@ -0,0 +1,44 @@
+# RabbitMQ Federation Management Plugin
+
+This plugin adds information on federation link status to the management
+plugin.
+
+
+## Installation
+
+In recent releases, this [plugin](https://www.rabbitmq.com/plugins.html) ships with RabbitMQ.
+[Enable](https://www.rabbitmq.com/plugins.html#basics) it with
+
+``` shell
+rabbitmq-plugins enable rabbitmq_management rabbitmq_federation_management
+```
+
+If you have a heterogenous cluster (where the nodes have different
+plugins installed), this should be installed on the same nodes as the
+management plugin.
+
+
+## Use over HTTP API
+
+The HTTP API endpoints allow for retrieval of federation links:
+
+ # lists all links
+ GET /api/federation-links
+ # lists links in a vhost
+ GET /api/federation-links/{vhost}
+
+
+## Building From Source
+
+To [build the plugin](https://www.rabbitmq.com/plugin-development.html), use
+
+ make dist
+
+and see under the `./plugins` directory.
+
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+See `LICENSE` for license information.
diff --git a/deps/rabbitmq_federation_management/erlang.mk b/deps/rabbitmq_federation_management/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_federation_management/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_federation_management/priv/www/js/federation.js b/deps/rabbitmq_federation_management/priv/www/js/federation.js
new file mode 100644
index 0000000000..4ea78c932d
--- /dev/null
+++ b/deps/rabbitmq_federation_management/priv/www/js/federation.js
@@ -0,0 +1,89 @@
+dispatcher_add(function(sammy) {
+ sammy.get('#/federation', function() {
+ render({'links': {path: '/federation-links',
+ options:{vhost:true}},
+ 'vhosts': '/vhosts'},
+ 'federation', '#/federation');
+ });
+ sammy.get('#/federation-upstreams', function() {
+ render({'upstreams': {path: '/parameters/federation-upstream',
+ options:{vhost:true}},
+ 'vhosts': '/vhosts',
+ 'globals': '/parameters/federation'},
+ 'federation-upstreams', '#/federation-upstreams');
+ });
+ sammy.get('#/federation-upstreams/:vhost/:id', function() {
+ render({'upstream': '/parameters/federation-upstream/' + esc(this.params['vhost']) + '/' + esc(this.params['id'])},
+ 'federation-upstream', '#/federation');
+ });
+ sammy.put('#/fed-parameters', function() {
+ var num_keys = ['expires', 'message-ttl', 'max-hops',
+ 'prefetch-count', 'reconnect-delay'];
+ var bool_keys = ['trust-user-id'];
+ var arrayable_keys = ['uri'];
+ put_parameter(this, [], num_keys, bool_keys, arrayable_keys);
+ return false;
+ });
+ sammy.del('#/fed-parameters', function() {
+ if (sync_delete(this, '/parameters/:component/:vhost/:name'))
+ go_to('#/federation-upstreams');
+ return false;
+ });
+ sammy.del("#/federation-restart-link", function(){
+ if(sync_delete(this, '/federation-links/vhost/:vhost/:id/:node/restart')){
+ update();
+ }
+ });
+});
+
+NAVIGATION['Admin'][0]['Federation Status'] = ['#/federation', "monitoring"];
+NAVIGATION['Admin'][0]['Federation Upstreams'] = ['#/federation-upstreams', "policymaker"];
+
+HELP['federation-uri'] =
+ 'URI to connect to. If upstream is a cluster and can have several URIs, you can enter them here separated by spaces.';
+
+HELP['federation-prefetch'] =
+ 'Maximum number of unacknowledged messages that may be in flight over a federation link at one time. Defaults to 1000 if not set.';
+
+
+HELP['federation-reconnect'] =
+ 'Time in seconds to wait after a network link goes down before attempting reconnection. Defaults to 5 if not set.';
+
+
+HELP['federation-ack-mode'] =
+ '<dl>\
+ <dt><code>on-confirm</code></dt>\
+ <dd>Messages are acknowledged to the upstream broker after they have been confirmed downstream. Handles network errors and broker failures without losing messages. The slowest option, and the default.</dd>\
+ <dt><code>on-publish</code></dt>\
+ <dd>Messages are acknowledged to the upstream broker after they have been published downstream. Handles network errors without losing messages, but may lose messages in the event of broker failures.</dd>\
+ <dt><code>no-ack</code></dt>\
+ <dd>Message acknowledgements are not used. The fastest option, but may lose messages in the event of network or broker failures.</dd>\
+</dl>';
+
+HELP['federation-trust-user-id'] =
+ 'Set "Yes" to preserve the "user-id" field across a federation link, even if the user-id does not match that used to republish the message. Set to "No" to clear the "user-id" field when messages are federated. Only set this to "Yes" if you trust the upstream broker not to forge user-ids.';
+
+HELP['exchange'] =
+ 'The name of the upstream exchange. Default is to use the same name as the federated exchange.';
+
+HELP['federation-max-hops'] =
+ 'Maximum number of federation links that messages can traverse before being dropped. Defaults to 1 if not set.';
+
+HELP['federation-expires'] =
+ 'Time in milliseconds that the upstream should remember about this node for. After this time all upstream state will be removed. Leave this blank to mean "forever".';
+
+HELP['federation-ttl'] =
+ 'Time in milliseconds that undelivered messages should be held upstream when there is a network outage or backlog. Leave this blank to mean "forever".';
+
+HELP['ha-policy'] =
+ 'Determines the "x-ha-policy" argument for the upstream queue for a federated exchange. Default is "none", meaning the queue is not HA.';
+
+HELP['queue'] =
+ 'The name of the upstream queue. Default is to use the same name as the federated queue.';
+
+HELP['consumer-tag'] =
+ 'The consumer tag to use when consuming from upstream. Optional.';
+
+function link_fed_conn(vhost, name) {
+ return _link_to(name, '#/federation-upstreams/' + esc(vhost) + '/' + esc(name));
+}
diff --git a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs
new file mode 100644
index 0000000000..6fad08dc93
--- /dev/null
+++ b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstream.ejs
@@ -0,0 +1,95 @@
+<h1>Federation Upstream: <b><%= fmt_string(upstream.name) %></b><%= fmt_maybe_vhost(upstream.vhost) %></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <table class="facts">
+ <tr>
+ <th>
+ <h3>General parameters</h3>
+ </th>
+ <tr>
+ <th>URI</th>
+ <td><%= fmt_string(fmt_uri_with_credentials(upstream.value.uri)) %></td>
+ </tr>
+ <tr>
+ <th>Prefetch Count</th>
+ <td><%= fmt_string(upstream.value['prefetch-count']) %></td>
+ </tr>
+ <tr>
+ <th>Reconnect Delay</th>
+ <td><%= fmt_time(upstream.value['reconnect-delay'], 's') %></td>
+ </tr>
+ <tr>
+ <th>Ack Mode</th>
+ <td><%= fmt_string(upstream.value['ack-mode']) %></td>
+ </tr>
+ <tr>
+ <th>Trust User-ID</th>
+ <td><%= fmt_boolean(upstream.value['trust-user-id']) %></td>
+ </tr>
+
+ <tr>
+ <th>
+ <h3>Federated exchange parameters</h3>
+ </th>
+ </tr>
+ <tr>
+ <th>Exchange</th>
+ <td><%= fmt_string(upstream.value['exchange']) %></td>
+ </tr>
+
+
+ <tr>
+ <th>Max Hops</th>
+ <td><%= fmt_string(upstream.value['max-hops']) %></td>
+ </tr>
+
+ <tr>
+ <th>Expires</th>
+ <td><%= fmt_time(upstream.value.expires, 'ms') %></td>
+ </tr>
+
+ <tr>
+ <th>Message TTL</th>
+ <td><%= fmt_time(upstream.value['message-ttl'], 'ms') %></td>
+ </tr>
+
+ <tr>
+ <th>HA Policy</th>
+ <td><%= fmt_string(upstream.value['ha-policy']) %></td>
+ </tr>
+
+ <tr>
+ <th>
+ <h3>Federated queue parameters</h3>
+ </th>
+ </tr>
+
+ <tr>
+ <th>Queue</th>
+ <td><%= fmt_string(upstream.value['queue']) %></td>
+ </tr>
+
+ <tr>
+ <th>Consumer tag</th>
+ <td><%= fmt_string(upstream.value['consumer-tag']) %></td>
+ </tr>
+
+
+
+ </table>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this upstream</h2>
+ <div class="hider">
+ <form action="#/fed-parameters" method="delete" class="confirm">
+ <input type="hidden" name="component" value="federation-upstream"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(upstream.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(upstream.name) %>"/>
+ <input type="submit" value="Delete this upstream"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs
new file mode 100644
index 0000000000..5b3e14d063
--- /dev/null
+++ b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation-upstreams.ejs
@@ -0,0 +1,262 @@
+<h1>Federation Upstreams</h1>
+<div class="section">
+ <h2>Upstreams</h2>
+ <div class="hider updatable">
+<% if (upstreams.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th>URI</th>
+ <th>Prefetch Count</th>
+ <th>Reconnect Delay</th>
+ <th>Ack mode</th>
+ <th>Trust User-ID</th>
+ <th>Exchange</th>
+ <th>Max Hops</th>
+ <th>Expiry</th>
+ <th>Message TTL</th>
+ <th>HA Policy</th>
+ <th>Queue</th>
+ <th>Consumer tag</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < upstreams.length; i++) {
+ var upstream = upstreams[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(upstream.vhost) %></td>
+<% } %>
+ <td><%= link_fed_conn(upstream.vhost, upstream.name) %></td>
+ <td><%= fmt_shortened_uri(fmt_uri_with_credentials(upstream.value.uri)) %></td>
+ <td class="r"><%= upstream.value['prefetch-count'] %></td>
+ <td class="r"><%= fmt_time(upstream.value['reconnect-delay'], 's') %></td>
+ <td class="c"><%= fmt_string(upstream.value['ack-mode']) %></td>
+ <td class="c"><%= fmt_boolean(upstream.value['trust-user-id']) %></td>
+ <td class="c"><%= fmt_string(upstream.value['exchange']) %></td>
+ <td class="r"><%= upstream.value['max-hops'] %></td>
+ <td class="r"><%= fmt_time(upstream.value.expires, 'ms') %></td>
+ <td class="r"><%= fmt_time(upstream.value['message-ttl'], 'ms') %></td>
+ <td class="r"><%= fmt_string(upstream.value['ha-policy']) %></td>
+ <td class="r"><%= fmt_string(upstream.value['queue']) %></td>
+ <td class="r"><%= upstream.value['consumer-tag'] %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no upstreams ...</p>
+<% } %>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new upstream</h2>
+ <div class="hider">
+ <form action="#/fed-parameters" method="put">
+ <input type="hidden" name="component" value="federation-upstream"/>
+ <table class="form">
+ <tr>
+ <th>
+ <h3> General parameters </h3>
+ </th>
+ </tr>
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+
+
+ <tr>
+ <th>
+ <label>
+ URI:
+ <span class="help" id="federation-uri"></span>
+ </label>
+ </th>
+ <td><input type="text" name="uri"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Prefetch count:
+ <span class="help" id="federation-prefetch"></span>
+ </label>
+ </th>
+ <td><input type="text" name="prefetch-count"/></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Reconnect delay:
+ <span class="help" id="federation-reconnect"></span>
+ </label>
+ </th>
+ <td><input type="text" name="reconnect-delay"/> s</td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Acknowledgement Mode:
+ <span class="help" id="federation-ack-mode"></span>
+ </label>
+ </th>
+ <td>
+ <select name="ack-mode">
+ <option value="on-confirm">On confirm</option>
+ <option value="on-publish">On publish</option>
+ <option value="no-ack">No ack</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Trust User-ID:
+ <span class="help" id="federation-trust-user-id"></span>
+ </label>
+ </th>
+
+ <td>
+ <select name="trust-user-id">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+
+ <tr>
+ <th>
+ <h3>Federated exchanges parameters </h3>
+ </th>
+ </tr>
+
+
+ <tr>
+ <th>
+ <label>
+ Exchange:
+ <span class="help" id="exchange"></span>
+ </label>
+ </th>
+ <td><input type="text" name="exchange"/></td>
+ </tr>
+
+ <tr>
+ <th>
+ <label>
+ Max hops:
+ <span class="help" id="federation-max-hops"></span>
+ </label>
+ </th>
+ <td><input type="text" name="max-hops"/></td>
+ </tr>
+
+ <tr>
+ <th>
+ <label>
+ Expires:
+ <span class="help" id="federation-expires"></span>
+ </label>
+ </th>
+ <td><input type="text" name="expires"/> ms</td>
+ </tr>
+
+ <tr>
+ <th>
+ <label>
+ Message TTL:
+ <span class="help" id="federation-ttl"></span>
+ </label>
+ </th>
+ <td><input type="text" name="message-ttl"/> ms</td>
+ </tr>
+
+
+ <tr>
+ <th>
+ <label>
+ HA Policy:
+ <span class="help" id="ha-policy"></span>
+ </label>
+ </th>
+ <td><input type="text" name="ha-policy"/></td>
+ </tr>
+ </tr>
+
+ <tr>
+ <th>
+ <h3>Federated queues parameter </h3>
+ </th>
+ </tr>
+
+ <tr>
+ <th>
+ <label>
+ Queue:
+ <span class="help" id="queue"></span>
+ </label>
+ </th>
+ <td><input type="text" name="queue"/></td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Consumer tag:
+ <span class="help" id="consumer-tag"></span>
+ </label>
+ </th>
+ <td><input type="text" name="consumer-tag"/></td>
+ </tr>
+ </tr>
+
+
+
+ </table>
+ <input type="submit" value="Add upstream"/>
+ </form>
+ </div>
+</div>
+<div class="section-hidden">
+ <h2>URI examples</h2>
+ <div class="hider">
+ <ul>
+ <li>
+ <code>amqp://server-name</code><br/>
+ connect to server-name, without SSL and default credentials
+ </li>
+ <li>
+ <code>amqp://user:password@server-name/my-vhost</code><br/>
+ connect to server-name, with credentials and overridden
+ virtual host
+ </li>
+ <li>
+ <code>amqps://user:password@server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer</code><br/>
+ connect to server-name, with credentials and SSL
+ </li>
+ <li>
+ <code>amqps://server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external</code><br/>
+ connect to server-name, with SSL and EXTERNAL authentication
+ </li>
+ </ul>
+ </div>
+</div>
diff --git a/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation.ejs b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation.ejs
new file mode 100644
index 0000000000..e47f697544
--- /dev/null
+++ b/deps/rabbitmq_federation_management/priv/www/js/tmpl/federation.ejs
@@ -0,0 +1,103 @@
+<h1>Federation Status</h1>
+<div class="section">
+ <h2>Running Links</h2>
+ <div class="hider updatable">
+<% if (links.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th>Upstream</th>
+ <th>URI</th>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Exchange / Queue</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+ <th>State</th>
+ <th>Inbound message rate</th>
+ <th>Last changed</th>
+ <th>ID</th>
+ <th>Consumer tag</th>
+ <th>Operations</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < links.length; i++) {
+ var link = links[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= fmt_string(link.upstream) %>
+ <% if (link.type == 'exchange' &&
+ link.exchange != link.upstream_exchange) { %>
+ <sub><%= fmt_string(link.upstream_exchange) %></sub>
+ <% } else if (link.type == 'queue' &&
+ link.queue != link.upstream_queue) { %>
+ <sub><%= fmt_string(link.upstream_queue) %></sub>
+ <% } %>
+ </td>
+ <td><%= fmt_string(fmt_uri_with_credentials(link.uri)) %></td>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(link.vhost) %></td>
+<% } %>
+ <td>
+ <% if (link.type == 'exchange') { %>
+ <%= link_exchange(link.vhost, link.exchange) %>
+ <% } else { %>
+ <%= link_queue(link.vhost, link.queue) %>
+ <% } %>
+ <sub><%= fmt_string(link.type) %></sub>
+ </td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(link.node) %></td>
+<% } %>
+<% if (link.error) { %>
+ <td>
+ <%= fmt_state('red', link.status) %>
+ </td>
+ <td></td>
+ <td><%= link.timestamp %></td>
+ </tr>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <td colspan="7">
+<% } else { %>
+ <td colspan="6">
+<% } %>
+ Error detail:
+ <pre><%= fmt_escape_html(link.error) %></pre>
+ </td>
+ </tr>
+<% } else { %>
+ <td>
+ <%= fmt_state(link.status == 'starting' ? 'yellow' : 'green', link.status) %>
+ </td>
+ <td class="r">
+ <% if (link.local_channel) { %>
+ <%= fmt_detail_rate(link.local_channel.message_stats, 'confirm') %>
+ <% } %>
+ </td>
+ <td><%= link.timestamp %></td>
+ <td><%= link.id %></td>
+ <td><%= link.consumer_tag %></td>
+ <td>
+ <form action="#/federation-restart-link" method="delete" class="confirm">
+ <input type="hidden" name="id" value="<%= link.id %>"/>
+ <input type="hidden" name="node" value="<%= fmt_node(link.node) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(link.vhost) %>"/>
+ <input type="submit" value="Restart"/>
+ </form>
+ </td>
+ </tr>
+<% } %>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no links ...</p>
+<% } %>
+</div>
+</div>
diff --git a/deps/rabbitmq_federation_management/rabbitmq-components.mk b/deps/rabbitmq_federation_management/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_federation_management/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl b/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl
new file mode 100644
index 0000000000..bf3dbbf44a
--- /dev/null
+++ b/deps/rabbitmq_federation_management/src/rabbit_federation_mgmt.erl
@@ -0,0 +1,143 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_federation_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+-export([init/2, to_json/2, resource_exists/2, content_types_provided/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+dispatcher() -> [{"/federation-links", ?MODULE, [all]},
+ {"/federation-links/:vhost", ?MODULE, [all]},
+ {"/federation-links/state/down/", ?MODULE, [down]},
+ {"/federation-links/:vhost/state/down", ?MODULE, [down]},
+ {"/federation-links/vhost/:vhost/:id/:node/restart", ?MODULE, []}].
+
+web_ui() -> [{javascript, <<"federation.js">>}].
+
+%%--------------------------------------------------------------------
+
+init(Req, [Filter]) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), {Filter, #context{}}};
+init(Req, []) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> case rabbit_mgmt_util:id(id, ReqData) of
+ %% Listing links
+ none -> true;
+ %% Restarting a link
+ Id ->
+ lookup(Id, ReqData)
+ end
+ end, ReqData, Context}.
+
+to_json(ReqData, {Filter, Context}) ->
+ Chs = rabbit_mgmt_db:get_all_channels(
+ rabbit_mgmt_util:range(ReqData)),
+ rabbit_mgmt_util:reply_list(
+ filter_vhost(status(Chs, ReqData, Context, Filter), ReqData), ReqData, Context);
+to_json(ReqData, Context) ->
+ to_json(ReqData, {all, Context}).
+
+is_authorized(ReqData, {Filter, Context}) ->
+ {Res, RD, C} = rabbit_mgmt_util:is_authorized_monitor(ReqData, Context),
+ {Res, RD, {Filter, C}};
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+delete_resource(ReqData, Context) ->
+ Reply = case rabbit_mgmt_util:id(id, ReqData) of
+ none -> false;
+ Id -> restart(Id, ReqData)
+ end,
+ {Reply, ReqData, Context}.
+
+%%--------------------------------------------------------------------
+filter_vhost(List, ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(
+ ReqData,
+ fun(V) -> lists:filter(fun(I) -> pget(vhost, I) =:= V end, List) end).
+
+status(Chs, ReqData, Context, Filter) ->
+ rabbit_mgmt_util:filter_vhost(
+ lists:append([status(Node, Chs, Filter) || Node <- [node() | nodes()]]),
+ ReqData, Context).
+
+status(Node, Chs, Filter) ->
+ case rpc:call(Node, rabbit_federation_status, status, [], infinity) of
+ {badrpc, {'EXIT', {undef, _}}} -> [];
+ {badrpc, {'EXIT', {noproc, _}}} -> [];
+ Status -> [format(Node, I, Chs) || I <- Status,
+ filter_status(I, Filter)]
+ end.
+
+filter_status(_, all) ->
+ true;
+filter_status(Props, down) ->
+ Status = pget(status, Props),
+ not lists:member(Status, [running, starting]).
+
+format(Node, Info, Chs) ->
+ LocalCh = case rabbit_mgmt_format:strip_pids(
+ [Ch || Ch <- Chs,
+ pget(name, pget(connection_details, Ch))
+ =:= pget(local_connection, Info)]) of
+ [Ch] -> [{local_channel, Ch}];
+ [] -> []
+ end,
+ [{node, Node} | format_info(Info)] ++ LocalCh.
+
+format_info(Items) ->
+ [format_item(I) || I <- Items].
+
+format_item({timestamp, {{Y, M, D}, {H, Min, S}}}) ->
+ {timestamp, print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w",
+ [Y, M, D, H, Min, S])};
+format_item({error, E}) ->
+ {error, rabbit_mgmt_format:print("~p", [E])};
+format_item(I) ->
+ I.
+
+print(Fmt, Val) ->
+ list_to_binary(io_lib:format(Fmt, Val)).
+
+lookup(Id, ReqData) ->
+ Node = get_node(ReqData),
+ case rpc:call(Node, rabbit_federation_status, lookup, [Id], infinity) of
+ {badrpc, _} -> false;
+ not_found -> false;
+ _ -> true
+ end.
+
+restart(Id, ReqData) ->
+ Node = get_node(ReqData),
+ case rpc:call(Node, rabbit_federation_status, lookup, [Id], infinity) of
+ not_found -> false;
+ Obj ->
+ Upstream = proplists:get_value(upstream, Obj),
+ Supervisor = proplists:get_value(supervisor, Obj),
+ rpc:call(Node, rabbit_federation_link_sup, restart, [Supervisor, Upstream], infinity),
+ true
+ end.
+
+get_node(ReqData) ->
+ list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))).
diff --git a/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl b/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl
new file mode 100644
index 0000000000..a5a8efcf77
--- /dev/null
+++ b/deps/rabbitmq_federation_management/test/federation_mgmt_SUITE.erl
@@ -0,0 +1,257 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(federation_mgmt_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ federation_links,
+ federation_down_links,
+ restart_link
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun setup_federation/1]).
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+setup_federation(Config) ->
+ set_policy(Config),
+ Port = amqp_port(Config, 0),
+ Uri = lists:flatten(io_lib:format("amqp://myuser:myuser@localhost:~p", [Port])),
+ rabbit_ct_broker_helpers:set_parameter(
+ Config, 0, <<"federation-upstream">>, <<"broken-bunny">>,
+ [{<<"uri">>, list_to_binary(Uri)},
+ {<<"reconnect-delay">>, 600000}]),
+ rabbit_ct_broker_helpers:set_parameter(
+ Config, 0, <<"federation-upstream">>, <<"bunny">>,
+ [{<<"uri">>, <<"amqp://">>},
+ {<<"reconnect-delay">>, 600000}]),
+ Config.
+
+set_policy(Config) ->
+ rabbit_ct_broker_helpers:set_policy(
+ Config, 0,
+ <<"fed">>, <<".*">>, <<"all">>, [{<<"federation-upstream-set">>, <<"all">>}]).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+federation_links(Config) ->
+ DefaultExchanges = [<<"amq.direct">>, <<"amq.fanout">>, <<"amq.headers">>,
+ <<"amq.match">>, <<"amq.topic">>],
+ Running = [{X, <<"bunny">>, <<"running">>} || X <- DefaultExchanges],
+ Down = [{X, <<"broken-bunny">>, <<"error">>} || X <- DefaultExchanges],
+ All = lists:sort(Running ++ Down),
+ Verify = fun(Result) ->
+ All == lists:sort(Result)
+ end,
+ %% Verify we have 5 running links and 5 down links
+ wait_until(fun() ->
+ AllLinks = http_get(Config, "/federation-links"),
+ Result = [{maps:get(exchange, Link),
+ maps:get(upstream, Link),
+ maps:get(status, Link)} || Link <- AllLinks],
+ Verify(Result)
+ end).
+
+federation_down_links(Config) ->
+ DefaultExchanges = [<<"amq.direct">>, <<"amq.fanout">>, <<"amq.headers">>,
+ <<"amq.match">>, <<"amq.topic">>],
+ Down = lists:sort([{X, <<"broken-bunny">>, <<"error">>} || X <- DefaultExchanges]),
+ %% we might have to wait for all links to get into 'error' status,
+ %% but no other status is allowed on the meanwhile
+ Verify = fun(Result) ->
+ lists:all(fun({_, _, <<"error">>}) ->
+ true;
+ (_) ->
+ throw(down_links_returned_wrong_status)
+ end, Result) andalso (Down == lists:sort(Result))
+ end,
+ wait_until(fun() ->
+ AllLinks = http_get(Config, "/federation-links/state/down"),
+ Result = [{maps:get(exchange, Link),
+ maps:get(upstream, Link),
+ maps:get(status, Link)} || Link <- AllLinks],
+ Verify(Result)
+ end).
+
+restart_link(Config) ->
+ try
+ federation_down_links(Config),
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>}, {tags, <<"">>},
+ {username, <<"myuser">>}],
+ [?CREATED, ?NO_CONTENT]),
+ http_put(Config, "/permissions/%2F/myuser",
+ [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>},
+ {vhost, <<"/">>}, {username, <<"myuser">>}],
+ [?CREATED, ?NO_CONTENT]),
+ Links = http_get(Config, "/federation-links/state/down"),
+ [http_delete(Config, restart_uri(Link)) || Link <- Links],
+ wait_until(fun() ->
+ [] == http_get(Config, "/federation-links/state/down")
+ end)
+ after
+ http_delete(Config, "/users/myuser"),
+ rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"fed">>),
+ set_policy(Config)
+ end.
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+wait_until(Fun) ->
+ wait_until(Fun, 600).
+
+wait_until(_Fun, 0) ->
+ throw(federation_links_timeout);
+wait_until(Fun, N) ->
+ case Fun() of
+ true ->
+ ok;
+ false ->
+ timer:sleep(1000),
+ wait_until(Fun, N-1)
+ end.
+
+restart_uri(Link) ->
+ "/federation-links/vhost/%2f/" ++
+ binary_to_list(maps:get(id, Link)) ++ "/" ++
+ binary_to_list(maps:get(node, Link)) ++ "/restart".
+
+%% -------------------------------------------------------------------
+%% Helpers from rabbitmq_management tests
+%% -------------------------------------------------------------------
+http_get(Config, Path) ->
+ http_get(Config, Path, ?OK).
+
+http_get(Config, Path, CodeExp) ->
+ http_get(Config, Path, "guest", "guest", CodeExp).
+
+http_get(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, 0, get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_put(Config, Path, List, CodeExp) ->
+ http_put_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_put_raw(Config, Path, Body, CodeExp) ->
+ http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp, []).
+
+http_put_raw(Config, Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(Config, put, Path, Body, User, Pass, CodeExp, []).
+
+http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp, MoreHeaders) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, 0, Type, Path, [auth_header(User, Pass)] ++ MoreHeaders, Body),
+ assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_delete(Config, Path) ->
+ http_delete(Config, Path, "guest", "guest", ?NO_CONTENT).
+
+http_delete(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, 0, delete, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+format_for_upload(none) ->
+ <<"">>;
+format_for_upload(List) ->
+ iolist_to_binary(rabbit_json:encode(List)).
+
+req(Config, Node, Type, Path, Headers) ->
+ httpc:request(Type, {uri_base_from(Config, Node) ++ Path, Headers}, ?HTTPC_OPTS, []).
+
+req(Config, Node, Type, Path, Headers, Body) ->
+ httpc:request(Type, {uri_base_from(Config, Node) ++ Path, Headers, "application/json", Body},
+ ?HTTPC_OPTS, []).
+
+uri_base_from(Config, Node) ->
+ binary_to_list(
+ rabbit_mgmt_format:print(
+ "http://localhost:~w/api",
+ [mgmt_port(Config, Node)])).
+
+auth_header(Username, Password) ->
+ {"Authorization",
+ "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+mgmt_port(Config, Node) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_mgmt).
+
+amqp_port(Config, Node) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp).
+
+assert_code(CodesExpected, CodeAct, Type, Path, Body) when is_list(CodesExpected) ->
+ case lists:member(CodeAct, CodesExpected) of
+ true ->
+ ok;
+ false ->
+ throw({expected, CodesExpected, got, CodeAct, type, Type,
+ path, Path, body, Body})
+ end;
+assert_code(CodeExp, CodeAct, Type, Path, Body) ->
+ case CodeExp of
+ CodeAct -> ok;
+ _ -> throw({expected, CodeExp, got, CodeAct, type, Type,
+ path, Path, body, Body})
+ end.
+
+decode(?OK, _Headers, ResBody) ->
+ cleanup(rabbit_json:decode(rabbit_data_coercion:to_binary(ResBody)));
+decode(_, Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+ [cleanup(I) || I <- L];
+cleanup(M) when is_map(M) ->
+ maps:fold(fun(K, V, Acc) ->
+ Acc#{binary_to_atom(K, latin1) => cleanup(V)}
+ end, #{}, M);
+cleanup(I) ->
+ I.
diff --git a/deps/rabbitmq_jms_topic_exchange/.gitignore b/deps/rabbitmq_jms_topic_exchange/.gitignore
new file mode 100644
index 0000000000..6870ec18a4
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_jms_topic_exchange.d
diff --git a/deps/rabbitmq_jms_topic_exchange/.travis.yml b/deps/rabbitmq_jms_topic_exchange/.travis.yml
new file mode 100644
index 0000000000..f19aaaa1a4
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: HPosloBroEdylR0kRia0fSV+9jH36OuO0EBMfX4ztRozQ1h1RjuSjo2eFJ75kNafURXyQgSllWD3OsRuM5kcfMCKUMJivAagNL75ZUdRfFJ+/UsIHAsvZjd/cLlb1O2QeJ+4uKVyTR8Zz9jNRG6A+qW7H8CobaMZZpszWJ8nv/zoyhANt19TrCelThANwcfBHN1n+BthSf+cMZuBbwB4fJ9srNVGfZP5Grm3QNk8xbnRXmnnu9wiezOzyn7WqpF3YIgFI7ZzLsIggZI6RLvtYu4of+qzKQ9qyjQyXxUeHZjYnEizNlNyPFgBrfPMDOmFYP5qQ/PM1B7kO5/DyGlr7htoDgUXHfzTsdjTRoROJ8sf/Ds9upeKMSUoUUY80VdfYIh+NtvQ+3M/E5IeM1Qj1QN67QC2BE1cWebYNtiS1L9YUv60kOksTSjcigQ3RlmKZkUBIVbNxW7W0uPDF2yVNRcXaAqTp7uerwsmgnrgR1jrWwB7ZqXFfN0vziVjGYBOrr6KIol3+feNWmK95LfpO074ZLBoJVCMrZeQD9s8TJv2DBrgCUSvzHc02+B1khRZOzqbF3Oj4ynNIFnERzWZgo04zzu7Mk1yTyYIEVVHZ4nyKWDBvtibw6QQdY/3f41jPBBpRUW1cKcPshBWw028AGNFuhZ4S2PNxWKcxvDS4z8=
+ - secure: JqQCbqJB5N9YoOfU5tHozpPbMnb9Iib7Q8FfacSvOOfl43QeI5ZWScUcpV+GLXJdfl1k0iAwFHfz4jDwzewItWHeQprq8amaFRvZhKckfA8VHqcW42SPqr1/kS5tONcYdeo6wUlRak4sY2j/W6dqdnW9g2JZERJ6xIquETgp1ezF1brphw3Y6/hfD9ZppPaGXAE59eeUCeAVpZDNlHD9jvlYgu9n1TG/xd1snXjVpT7MND6XG5cyWRogw6Hn+185t5aFfU5Gi/oew9+rwSjasjAHdEW/goeHDRrr9+kCGTb9i+zuEYuL8Jvjk7HJoZR5/QybtHpos2NOe7RpU9xqnYGvMuUcLGj0x/8U/SuYKh9uqdN+KBnoDwuN2ILJn/huKmzTXQHcaPzGvU2fl40pSXo/4DNcaWxNoBMYIo6+sSbJofAkASKkWPJxh88odiSJkiOQxYL/S7odsDUo2/OmnrnzXo7cRv2x7mNMRO5LQpc0e2wN7/rhEoCVnhaKA6eMBC2sNmkO6Dl/JasH1g8uOIH7fHARQbSIBUucV7JQkF1hCuxJZMstL+QiTGcwoWss+c0yvhdk8ivsRQ1ByabvJnfg2eiqmXx3wzcJo7Ajp/Rfk2FSY0oPmzh/5tQhj7Z1WcYvODxTty6+Mog7gMS77Q7I4L8mhnaaOaFxLjnzXqc=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md b/deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md b/deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_jms_topic_exchange/LICENSE b/deps/rabbitmq_jms_topic_exchange/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_jms_topic_exchange/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_jms_topic_exchange/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_jms_topic_exchange/LICENSES.txt b/deps/rabbitmq_jms_topic_exchange/LICENSES.txt
new file mode 100644
index 0000000000..aca032b7ad
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/LICENSES.txt
@@ -0,0 +1,736 @@
+open_source_license.txt
+
+JMS Client for Pivotal RabbitMQ 1.4.6 GA
+
+===========================================================================
+
+The following copyright statements and licenses apply to various open
+source software packages (or portions thereof) that are distributed with
+this Pivotal Software, Inc. Product.
+
+The Pivotal Product may also include other Pivotal components, which may
+contain additional open source software packages. One or more such
+open_source_licenses.txt files may therefore accompany this Pivotal
+Product.
+
+The Pivotal Product that includes this file does not necessarily use all
+the open source software packages referred to below and may also only
+use portions of a given package.
+
+
+=============== TABLE OF CONTENTS =============================
+
+
+The following is a listing of the open source components detailed in
+this document. This list is provided for your convenience; please read
+further if you wish to review the copyright notice(s) and the full text
+of the license associated with each component.
+
+
+
+
+SECTION 1: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES
+
+ >>> slf4j-api-1.7.5
+
+
+
+SECTION 2: Apache License, V2.0
+
+ >>> geronimo-jms_1.1_spec-1.1.1
+
+
+
+SECTION 3: Mozilla Public License, v2.0
+
+ >>> amqp-client
+
+
+
+APPENDIX. Standard License Files
+
+ >>> Apache License, V2.0
+
+ >>> Mozilla Public License, v2.0
+
+
+
+--------------- SECTION 1: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES ----------
+
+BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES are applicable to the following component(s).
+
+
+>>> slf4j-api-1.7.5
+
+Copyright (c) 2004-2011 QOS.ch
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+--------------- SECTION 2: Apache License, V2.0 ----------
+
+Apache License, V2.0 is applicable to the following component(s).
+
+
+>>> geronimo-jms_1.1_spec-1.1.1
+
+Apache Geronimo
+Copyright 2003-2008 The Apache Software Foundation
+
+This product includes software developed by
+The Apache Software Foundation (https://www.apache.org/).
+
+
+--------------- SECTION 3: Mozilla Public License, v2.0 ----------
+
+Mozilla Public License, v2.0 is applicable to the following component(s).
+
+
+>>> amqp-client
+
+// The contents of this file are subject to the Mozilla Public License
+// Version 1.1 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/MPL/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is GoPivotal, Inc.
+// Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+ADDITIONAL LICENSE INFORMATION:
+
+> Apache 2.0
+
+amqp-client-3.5.6-sources.jar\com\rabbitmq\tools\json\JSONWriter.java
+
+/*
+ Copyright (c) 2006-2007 Frank Carver
+ Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All Rights Reserved
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+
+> Public Domain
+
+amqp-client-sources.jar\com\rabbitmq\client\impl\VariableLinkedBlockingQueue.java
+
+/*
+ * Modifications Copyright 2015 Pivotal Software, Inc and licenced as per
+ * the rest of the RabbitMQ Java client.
+ */
+* Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * https://creativecommons.org/licenses/publicdomain
+ */
+
+
+=============== APPENDIX. Standard License Files ==============
+
+
+
+--------------- SECTION 1: Apache License, V2.0 -----------
+
+Apache License
+
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction,
+and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the
+copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other
+entities that control, are controlled by, or are under common control
+with that entity. For the purposes of this definition, "control" means
+(i) the power, direct or indirect, to cause the direction or management
+of such entity, whether by contract or otherwise, or (ii) ownership
+of fifty percent (50%) or more of the outstanding shares, or (iii)
+beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation source,
+and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation
+or translation of a Source form, including but not limited to compiled
+object code, generated documentation, and conversions to other media
+types.
+
+"Work" shall mean the work of authorship, whether in Source or
+Object form, made available under the License, as indicated by a copyright
+notice that is included in or attached to the work (an example is provided
+in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form,
+that is based on (or derived from) the Work and for which the editorial
+revisions, annotations, elaborations, or other modifications represent,
+as a whole, an original work of authorship. For the purposes of this
+License, Derivative Works shall not include works that remain separable
+from, or merely link (or bind by name) to the interfaces of, the Work
+and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the
+original version of the Work and any modifications or additions to
+that Work or Derivative Works thereof, that is intentionally submitted
+to Licensor for inclusion in the Work by the copyright owner or by an
+individual or Legal Entity authorized to submit on behalf of the copyright
+owner. For the purposes of this definition, "submitted" means any form of
+electronic, verbal, or written communication sent to the Licensor or its
+representatives, including but not limited to communication on electronic
+mailing lists, source code control systems, and issue tracking systems
+that are managed by, or on behalf of, the Licensor for the purpose of
+discussing and improving the Work, but excluding communication that is
+conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity
+on behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License.
+Subject to the terms and conditions of this License, each Contributor
+hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
+royalty-free, irrevocable copyright license to reproduce, prepare
+Derivative Works of, publicly display, publicly perform, sublicense, and
+distribute the Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+Subject to the terms and conditions of this License, each Contributor
+hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
+royalty- free, irrevocable (except as stated in this section) patent
+license to make, have made, use, offer to sell, sell, import, and
+otherwise transfer the Work, where such license applies only to those
+patent claims licensable by such Contributor that are necessarily
+infringed by their Contribution(s) alone or by combination of
+their Contribution(s) with the Work to which such Contribution(s)
+was submitted. If You institute patent litigation against any entity
+(including a cross-claim or counterclaim in a lawsuit) alleging that the
+Work or a Contribution incorporated within the Work constitutes direct
+or contributory patent infringement, then any patent licenses granted
+to You under this License for that Work shall terminate as of the date
+such litigation is filed.
+
+4. Redistribution.
+You may reproduce and distribute copies of the Work or Derivative Works
+thereof in any medium, with or without modifications, and in Source or
+Object form, provided that You meet the following conditions:
+
+ a. You must give any other recipients of the Work or Derivative Works
+ a copy of this License; and
+
+ b. You must cause any modified files to carry prominent notices stating
+ that You changed the files; and
+
+ c. You must retain, in the Source form of any Derivative Works that
+ You distribute, all copyright, patent, trademark, and attribution
+ notices from the Source form of the Work, excluding those notices
+ that do not pertain to any part of the Derivative Works; and
+
+ d. If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one of
+ the following places: within a NOTICE text file distributed as part
+ of the Derivative Works; within the Source form or documentation,
+ if provided along with the Derivative Works; or, within a display
+ generated by the Derivative Works, if and wherever such third-party
+ notices normally appear. The contents of the NOTICE file are for
+ informational purposes only and do not modify the License. You
+ may add Your own attribution notices within Derivative Works that
+ You distribute, alongside or as an addendum to the NOTICE text
+ from the Work, provided that such additional attribution notices
+ cannot be construed as modifying the License. You may add Your own
+ copyright statement to Your modifications and may provide additional
+ or different license terms and conditions for use, reproduction, or
+ distribution of Your modifications, or for any such Derivative Works
+ as a whole, provided Your use, reproduction, and distribution of the
+ Work otherwise complies with the conditions stated in this License.
+
+5. Submission of Contributions.
+Unless You explicitly state otherwise, any Contribution intentionally
+submitted for inclusion in the Work by You to the Licensor shall be
+under the terms and conditions of this License, without any additional
+terms or conditions. Notwithstanding the above, nothing herein shall
+supersede or modify the terms of any separate license agreement you may
+have executed with Licensor regarding such Contributions.
+
+6. Trademarks.
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+Unless required by applicable law or agreed to in writing, Licensor
+provides the Work (and each Contributor provides its Contributions) on
+an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+express or implied, including, without limitation, any warranties or
+conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
+A PARTICULAR PURPOSE. You are solely responsible for determining the
+appropriateness of using or redistributing the Work and assume any risks
+associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability.
+In no event and under no legal theory, whether in tort (including
+negligence), contract, or otherwise, unless required by applicable law
+(such as deliberate and grossly negligent acts) or agreed to in writing,
+shall any Contributor be liable to You for damages, including any direct,
+indirect, special, incidental, or consequential damages of any character
+arising as a result of this License or out of the use or inability to
+use the Work (including but not limited to damages for loss of goodwill,
+work stoppage, computer failure or malfunction, or any and all other
+commercial damages or losses), even if such Contributor has been advised
+of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+While redistributing the Work or Derivative Works thereof, You may
+choose to offer, and charge a fee for, acceptance of support, warranty,
+indemnity, or other liability obligations and/or rights consistent with
+this License. However, in accepting such obligations, You may act only
+on Your own behalf and on Your sole responsibility, not on behalf of
+any other Contributor, and only if You agree to indemnify, defend, and
+hold each Contributor harmless for any liability incurred by, or claims
+asserted against, such Contributor by reason of your accepting any such
+warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+
+
+--------------- SECTION 2: Mozilla Public License, V2.0 -----------
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+Copyright (c) 2007-2020 VMware, Inc. or its affiliates.
+
+
+
+===========================================================================
+
+To the extent any open source components are licensed under the
+GPL and/or LGPL, or other similar licenses that require the
+source code and/or modifications to source code to be made
+available (as would be noted above), you may obtain a copy of
+the source code corresponding to the binaries for such open
+source components and modifications thereto, if any, (the
+"Source Files"), by downloading the Source Files from Pivotal's website at
+https://tanzu.vmware.com/open-source, or by sending a request,
+with your name and address to: VMware, Inc., 3401 Hillview Ave,
+Palo Alto, CA 94304, Attention: General Counsel. All such requests should
+clearly specify: OPEN SOURCE FILES REQUEST, Attention General Counsel.
+VMware shall mail a copy of the Source Files to you on a CD or equivalent physical medium.
+This offer to obtain a copy of the Source Files is valid for three
+years from the date you acquired this Software product.
+Alternatively, the Source Files may accompany the VMware product.
+
+
+[RABBITJMS146GASS110315] \ No newline at end of file
diff --git a/deps/rabbitmq_jms_topic_exchange/Makefile b/deps/rabbitmq_jms_topic_exchange/Makefile
new file mode 100644
index 0000000000..c5b45c1b3c
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/Makefile
@@ -0,0 +1,17 @@
+PROJECT = rabbitmq_jms_topic_exchange
+PROJECT_DESCRIPTION = RabbitMQ JMS topic selector exchange plugin
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_jms_topic_exchange/README.md b/deps/rabbitmq_jms_topic_exchange/README.md
new file mode 100644
index 0000000000..a1c81823a8
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/README.md
@@ -0,0 +1,64 @@
+# RabbitMQ JMS Topic Exchange Plugin
+
+## Overview
+
+This plugin adds server-side support for [RabbitMQ JMS client](https://github.com/rabbitmq/rabbitmq-jms-client).
+This plugin provides support for JMS topic routing and selection based on JMS SQL selection
+rules.
+
+This implementation is based upon the [Java Messaging Service
+Specification Version 1.1](https://www.oracle.com/technetwork/java/docs-136352.html).
+
+## Project Maturity
+
+RabbitMQ JMS-related projects are several years old and can be considered
+reasonably mature. They have been first open sourced in June 2016.
+Some related projects (e.g. a compliance test suite) and documentation are yet to be open sourced.
+
+## Supported RabbitMQ Versions
+
+This plugin targets RabbitMQ `3.6.0` and later versions.
+
+## Installation
+
+This plugin ships with RabbitMQ starting with `3.6.3`. Enable it with
+
+```
+[sudo] rabbitmq-plugins enable rabbitmq_jms_topic_exchange
+```
+
+
+## Design
+
+The plugin this generates is a user-written exchange type for RabbitMQ
+client use. The exchange type name is "`x_jms_topic`" but this is _not_
+a topic exchange. Instead it works together with a standard topic
+exchange to provide the JMS topic selection function.
+
+When JMS Selectors are used on a Topic Destination consumer, the
+destination (queue) is bound to an exchange of type `x_jms_topic`, with
+arguments that indicate what the selection criteria are. The
+`x_jms_topic` exchange is, in turn, bound to the standard Topic Exchange
+used by JMS messaging (this uses the RabbitMQ exchange-to-exchange
+binding extension to the AMQP 0-9-1 protocol).
+
+In this way, normal topic routing can occur, with the overhead of
+selection only applying when selection is used, and _after_ the routing
+and filtering implied by the topic name.
+
+## Building From Source
+
+Building is no different from [building other RabbitMQ plugins](https://www.rabbitmq.com/plugin-development.html).
+
+TL;DR:
+
+ git clone https://github.com/rabbitmq/rabbitmq-jms-topic-exchange.git
+ cd rabbitmq-jms-topic-exchange
+ make -j dist
+ ls plugins/*
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+See [LICENSE](./LICENSE) for license information.
diff --git a/deps/rabbitmq_jms_topic_exchange/erlang.mk b/deps/rabbitmq_jms_topic_exchange/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl b/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl
new file mode 100644
index 0000000000..2157a84389
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%% -----------------------------------------------------------------------------
+
+%% JMS on Rabbit Topic Selector Exchange plugin definitions
+
+%% -----------------------------------------------------------------------------
+%% User-defined exchange type name
+-define(X_TYPE_NAME, <<"x-jms-topic">>).
+
+%% -----------------------------------------------------------------------------
+%% mnesia database records
+-define(JMS_TOPIC_TABLE, x_jms_topic_table).
+-define(JMS_TOPIC_RECORD, x_jms_topic_xs).
+
+%% Key is x_name -- the exchange name
+-record(?JMS_TOPIC_RECORD, {x_name, x_selection_policy = undefined, x_selector_funs}).
+%% fields:
+%% x_selector_funs
+%% a partial map (`dict`) of binding functions:
+%% dict: RoutingKey x DestName -/-> BindingSelectorFun
+%% (there is no default, but an empty map will be initially inserted)
+%% where a BindingSelectorFun has the signature:
+%% bsf : Headers -> boolean
+%% x_selection_policy
+%% not used, retained for backwards compatibility of db records.
+%% -----------------------------------------------------------------------------
+
+%% -----------------------------------------------------------------------------
+
+%% -----------------------------------------------------------------------------
+%% Name of arg on binding used to specify erlang term -- string type
+%% private static final String RJMS_COMPILED_SELECTOR_ARG = "rjms_erlang_selector";
+%% in JMS Client.
+-define(RJMS_COMPILED_SELECTOR_ARG, <<"rjms_erlang_selector">>).
+%% -----------------------------------------------------------------------------
diff --git a/deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk b/deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl b/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl
new file mode 100644
index 0000000000..f9addb8430
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl
@@ -0,0 +1,300 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%% -----------------------------------------------------------------------------
+
+%% JMS on Rabbit Selector Exchange plugin
+
+%% -----------------------------------------------------------------------------
+-module(rabbit_jms_topic_exchange).
+
+-behaviour(rabbit_exchange_type).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("rabbit_jms_topic_exchange.hrl").
+
+%% Rabbit exchange type functions:
+-export([ description/0
+ , serialise_events/0
+ , route/2
+ , validate/1
+ , create/2
+ , delete/3
+ , validate_binding/2
+ , add_binding/3
+ , remove_bindings/3
+ , assert_args_equivalence/2
+ , policy_changed/2 ]).
+
+%% Initialisation of database function:
+-export([setup_db_schema/0]).
+
+-export([info/1, info/2]).
+
+%%----------------------------------------------------------------------------
+
+%% Register exchange type
+-rabbit_boot_step({ ?MODULE
+ , [ {description, "exchange type JMS topic selector"}
+ , {mfa, {rabbit_registry, register, [exchange, ?X_TYPE_NAME, ?MODULE]}}
+ , {cleanup, {rabbit_registry, unregister, [exchange, ?X_TYPE_NAME]}}
+ , {requires, rabbit_registry}
+ , {enables, kernel_ready} ] }).
+
+%% Initialise database
+-rabbit_boot_step({ rabbit_jms_topic_exchange_mnesia
+ , [ {description, "database exchange type JMS topic selector"}
+ , {mfa, {?MODULE, setup_db_schema, []}}
+ , {requires, database}
+ , {enables, external_infrastructure} ] }).
+
+%%----------------------------------------------------------------------------
+
+% Initialise database table for all exchanges of type <<"x-jms-topic">>
+setup_db_schema() ->
+ case mnesia:create_table( ?JMS_TOPIC_TABLE
+ , [ {attributes, record_info(fields, ?JMS_TOPIC_RECORD)}
+ , {record_name, ?JMS_TOPIC_RECORD}
+ , {type, set} ]
+ ) of
+ {atomic, ok} -> ok;
+ {aborted, {already_exists, ?JMS_TOPIC_TABLE}} -> ok
+ end.
+
+%%----------------------------------------------------------------------------
+%% R E F E R E N C E T Y P E I N F O R M A T I O N
+
+%% -type(binding() ::
+%% #binding{source :: rabbit_exchange:name(),
+%% destination :: binding_destination(),
+%% key :: rabbit_binding:key(),
+%% args :: rabbit_framing:amqp_table()}).
+%%
+%% -type(exchange() ::
+%% #exchange{name :: rabbit_exchange:name(),
+%% type :: rabbit_exchange:type(),
+%% durable :: boolean(),
+%% auto_delete :: boolean(),
+%% arguments :: rabbit_framing:amqp_table()}).
+%%
+%% -type(amqp_field_type() ::
+%% 'longstr' | 'signedint' | 'decimal' | 'timestamp' |
+%% 'table' | 'byte' | 'double' | 'float' | 'long' |
+%% 'short' | 'bool' | 'binary' | 'void' | 'array').
+
+%%----------------------------------------------------------------------------
+%% E X P O R T E D E X C H A N G E B E H A V I O U R
+
+% Exchange description
+description() -> [ {name, <<"jms-selector">>}
+ , {description, <<"JMS selector exchange">>} ].
+
+% Binding event serialisation
+serialise_events() -> false.
+
+% Route messages
+route( #exchange{name = XName}
+ , #delivery{message = #basic_message{content = MessageContent, routing_keys = RKs}}
+ ) ->
+ BindingFuns = get_binding_funs_x(XName),
+ match_bindings(XName, RKs, MessageContent, BindingFuns).
+
+
+% Before exchange declaration
+validate(_X) -> ok.
+
+% After exchange declaration and recovery
+create(transaction, #exchange{name = XName}) ->
+ add_initial_record(XName);
+create(_Tx, _X) ->
+ ok.
+
+% Delete an exchange
+delete(transaction, #exchange{name = XName}, _Bs) ->
+ delete_state(XName),
+ ok;
+delete(_Tx, _X, _Bs) ->
+ ok.
+
+% Before add binding
+validate_binding(_X, _B) -> ok.
+
+% A new binding has ben added or recovered
+add_binding( Tx
+ , #exchange{name = XName}
+ , #binding{key = BindingKey, destination = Dest, args = Args}
+ ) ->
+ Selector = get_string_arg(Args, ?RJMS_COMPILED_SELECTOR_ARG),
+ BindGen = generate_binding_fun(Selector),
+ case {Tx, BindGen} of
+ {transaction, {ok, BindFun}} ->
+ add_binding_fun(XName, {{BindingKey, Dest}, BindFun});
+ {none, {error, _}} ->
+ parsing_error(XName, Selector, Dest);
+ _ ->
+ ok
+ end,
+ ok.
+
+% Binding removal
+remove_bindings( transaction
+ , #exchange{name = XName}
+ , Bindings
+ ) ->
+ remove_binding_funs(XName, Bindings),
+ ok;
+remove_bindings(_Tx, _X, _Bs) ->
+ ok.
+
+% Exchange argument equivalence
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+% Policy change notifications ignored
+policy_changed(_X1, _X2) -> ok.
+
+% Stub for type-specific exchange information
+info(_X) -> [].
+info(_X, _) -> [].
+
+
+%%----------------------------------------------------------------------------
+%% P R I V A T E F U N C T I O N S
+
+% Get a string argument from the args or arguments parameters
+get_string_arg(Args, ArgName) -> get_string_arg(Args, ArgName, error).
+
+get_string_arg(Args, ArgName, Default) ->
+ case rabbit_misc:table_lookup(Args, ArgName) of
+ {longstr, BinVal} -> binary_to_list(BinVal);
+ _ -> Default
+ end.
+
+% Match bindings for the current message
+match_bindings( XName, _RoutingKeys, MessageContent, BindingFuns) ->
+ MessageHeaders = get_headers(MessageContent),
+ rabbit_router:match_bindings( XName
+ , fun(#binding{key = Key, destination = Dest}) ->
+ binding_fun_match({Key, Dest}, MessageHeaders, BindingFuns)
+ end
+ ).
+
+% Select binding function from Funs dictionary, apply it to Headers and return result (true|false)
+binding_fun_match(DictKey, Headers, FunsDict) ->
+ case dict:find(DictKey, FunsDict) of
+ {ok, Fun} when is_function(Fun, 1) -> Fun(Headers);
+ error -> false % do not match if no function found
+ end.
+
+% get Headers from message content
+get_headers(Content) ->
+ case (Content#content.properties)#'P_basic'.headers of
+ undefined -> [];
+ H -> rabbit_misc:sort_field_table(H)
+ end.
+
+% generate the function that checks the message against the selector
+generate_binding_fun(ERL) ->
+ case decode_term(ERL) of
+ {error, _} -> error;
+ {ok, ErlTerm} -> check_fun(ErlTerm)
+ end.
+
+% build checking function from compiled expression
+check_fun(CompiledExp) ->
+ { ok,
+ fun(Headers) ->
+ selector_match(CompiledExp, Headers)
+ end
+ }.
+
+% get an erlang term from a string
+decode_term(Str) ->
+ try
+ {ok, Ts, _} = erl_scan:string(Str),
+ {ok, Term} = erl_parse:parse_term(Ts),
+ {ok, Term}
+ catch
+ Err -> {error, {invalid_erlang_term, Err}}
+ end.
+
+% Evaluate the selector and check against the Headers
+selector_match(Selector, Headers) ->
+ case sjx_evaluator:evaluate(Selector, Headers) of
+ true -> true;
+ _ -> false
+ end.
+
+% get binding funs from state (using dirty_reads)
+get_binding_funs_x(XName) ->
+ mnesia:async_dirty(
+ fun() ->
+ #?JMS_TOPIC_RECORD{x_selector_funs = BindingFuns} = read_state(XName),
+ BindingFuns
+ end,
+ []
+ ).
+
+add_initial_record(XName) ->
+ write_state_fun(XName, dict:new()).
+
+% add binding fun to binding fun dictionary
+add_binding_fun(XName, BindingKeyAndFun) ->
+ #?JMS_TOPIC_RECORD{x_selector_funs = BindingFuns} = read_state_for_update(XName),
+ write_state_fun(XName, put_item(BindingFuns, BindingKeyAndFun)).
+
+% remove binding funs from binding fun dictionary
+remove_binding_funs(XName, Bindings) ->
+ BindingKeys = [ {BindingKey, DestName} || #binding{key = BindingKey, destination = DestName} <- Bindings ],
+ #?JMS_TOPIC_RECORD{x_selector_funs = BindingFuns} = read_state_for_update(XName),
+ write_state_fun(XName, remove_items(BindingFuns, BindingKeys)).
+
+% add an item to the dictionary of binding functions
+put_item(Dict, {Key, Item}) -> dict:store(Key, Item, Dict).
+
+% remove a list of keyed items from the dictionary, by key
+remove_items(Dict, []) -> Dict;
+remove_items(Dict, [Key | Keys]) -> remove_items(dict:erase(Key, Dict), Keys).
+
+% delete all the state saved for this exchange
+delete_state(XName) ->
+ mnesia:delete(?JMS_TOPIC_TABLE, XName, write).
+
+% Basic read for update
+read_state_for_update(XName) -> read_state(XName, write).
+
+% Basic read
+read_state(XName) -> read_state(XName, read).
+
+% Lockable read
+read_state(XName, Lock) ->
+ case mnesia:read(?JMS_TOPIC_TABLE, XName, Lock) of
+ [Rec] -> Rec;
+ _ -> exchange_state_corrupt_error(XName)
+ end.
+
+% Basic write
+write_state_fun(XName, BFuns) ->
+ mnesia:write( ?JMS_TOPIC_TABLE
+ , #?JMS_TOPIC_RECORD{x_name = XName, x_selector_funs = BFuns}
+ , write ).
+
+%%----------------------------------------------------------------------------
+%% E R R O R S
+
+% state error
+exchange_state_corrupt_error(#resource{name = XName}) ->
+ rabbit_misc:protocol_error( internal_error
+ , "exchange named '~s' has no saved state or incorrect saved state"
+ , [XName] ).
+
+% parsing error
+parsing_error(#resource{name = XName}, S, #resource{name = DestName}) ->
+ rabbit_misc:protocol_error( precondition_failed
+ , "cannot parse selector '~p' binding destination '~s' to exchange '~s'"
+ , [S, DestName, XName] ).
+
+%%----------------------------------------------------------------------------
diff --git a/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl b/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl
new file mode 100644
index 0000000000..ec5f574291
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl
@@ -0,0 +1,169 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%% -----------------------------------------------------------------------------
+%% Derived from works which were:
+%% Copyright (c) 2002, 2012 Tim Watson (watson.timothy@gmail.com)
+%% Copyright (c) 2012, 2013 Steve Powell (Zteve.Powell@gmail.com)
+%% -----------------------------------------------------------------------------
+
+%% Evaluate an SQL expression for filtering purposes
+
+%% -----------------------------------------------------------------------------
+
+-module(sjx_evaluator).
+
+-export([evaluate/2]).
+%% Evaluation function
+%%
+%% Given Headers (a list of keyed typed values), and a
+%% parsed SQL string, evaluate the truth or falsity of the expression.
+%%
+%% If an identifier is absent from Headers, or the types do not match the comparisons, the
+%% expression will evaluate to false.
+
+-type itemname() :: binary().
+-type itemtype() ::
+ 'longstr' | 'signedint' | 'byte' | 'double' | 'float' | 'long' | 'short' | 'bool'.
+-type itemvalue() :: any().
+
+-type tableitem() :: { itemname(), itemtype(), itemvalue() }.
+-type table() :: list(tableitem()).
+
+-type expression() :: any().
+
+-spec evaluate(expression(), table()) -> true | false | error.
+
+
+evaluate( true, _Headers ) -> true;
+evaluate( false, _Headers ) -> false;
+
+evaluate( {'not', Exp }, Headers ) -> not3(evaluate(Exp, Headers));
+evaluate( {'ident', Ident }, Headers ) -> lookup_value(Headers, Ident);
+evaluate( {'is_null', Exp }, Headers ) -> val_of(Exp, Headers) =:= undefined;
+evaluate( {'not_null', Exp }, Headers ) -> val_of(Exp, Headers) =/= undefined;
+evaluate( { Op, Exp }, Headers ) -> do_una_op(Op, evaluate(Exp, Headers));
+
+evaluate( {'and', Exp1, Exp2 }, Headers ) -> and3(evaluate(Exp1, Headers), evaluate(Exp2, Headers));
+evaluate( {'or', Exp1, Exp2 }, Headers ) -> or3(evaluate(Exp1, Headers), evaluate(Exp2, Headers));
+evaluate( {'like', LHS, Patt, Esc }, Headers ) -> isLike(val_of(LHS, Headers), {Patt, Esc});
+evaluate( {'not_like', LHS, Patt, Esc }, Headers ) -> not3(isLike(val_of(LHS, Headers), {Patt, Esc}));
+evaluate( { Op, Exp, {range, From, To} }, Headers ) -> evaluate({ Op, Exp, From, To }, Headers);
+evaluate( {'between', Exp, From, To}, Hs ) -> between(evaluate(Exp, Hs), evaluate(From, Hs), evaluate(To, Hs));
+evaluate( {'not_between', Exp, From, To}, Hs ) -> not3(between(evaluate(Exp, Hs), evaluate(From, Hs), evaluate(To, Hs)));
+evaluate( { Op, LHS, RHS }, Headers ) -> do_bin_op(Op, evaluate(LHS, Headers), evaluate(RHS, Headers));
+
+evaluate( Value, _Headers ) -> Value.
+
+not3(true ) -> false;
+not3(false) -> true;
+not3(_ ) -> undefined.
+
+and3(true, true ) -> true;
+and3(false, _ ) -> false;
+and3(_, false) -> false;
+and3(_, _ ) -> undefined.
+
+or3(false, false) -> false;
+or3(true, _ ) -> true;
+or3(_, true ) -> true;
+or3(_, _ ) -> undefined.
+
+do_una_op(_, undefined) -> undefined;
+do_una_op('-', E) -> -E;
+do_una_op('+', E) -> +E;
+do_una_op(_, _) -> error.
+
+do_bin_op(_, undefined, _) -> undefined;
+do_bin_op(_, _, undefined ) -> undefined;
+do_bin_op('=' , L, R) -> L == R;
+do_bin_op('<>', L, R) -> L /= R;
+do_bin_op('>' , L, R) -> L > R;
+do_bin_op('<' , L, R) -> L < R;
+do_bin_op('>=', L, R) -> L >= R;
+do_bin_op('<=', L, R) -> L =< R;
+do_bin_op('in', L, R) -> isIn(L, R);
+do_bin_op('not_in', L, R) -> not isIn(L, R);
+do_bin_op('+' , L, R) -> L + R;
+do_bin_op('-' , L, R) -> L - R;
+do_bin_op('*' , L, R) -> L * R;
+do_bin_op('/' , L, R) when R /= 0 -> L / R;
+do_bin_op('/' , L, R) when L > 0 andalso R == 0 -> plus_infinity;
+do_bin_op('/' , L, R) when L < 0 andalso R == 0 -> minus_infinity;
+do_bin_op('/' , L, R) when L == 0 andalso R == 0 -> nan;
+do_bin_op(_,_,_) -> error.
+
+isLike(undefined, _Patt) -> undefined;
+isLike(L, {regex, MP}) -> patt_match(L, MP);
+isLike(L, {Patt, Esc}) -> patt_match(L, pattern_of(Patt, Esc)).
+
+patt_match(L, MP) ->
+ BS = byte_size(L),
+ case re:run(L, MP, [{capture, first}]) of
+ {match, [{0, BS}]} -> true;
+ _ -> false
+ end.
+
+isIn(_L, [] ) -> false;
+isIn( L, [L|_]) -> true;
+isIn( L, [_|R]) -> isIn(L,R).
+
+val_of({'ident', Ident}, Hs) -> lookup_value(Hs, Ident);
+val_of(Value, _Hs) -> Value.
+
+between(E, F, T) when E =:= undefined orelse F =:= undefined orelse T =:= undefined -> undefined;
+between(Value, Lo, Hi) -> Lo =< Value andalso Value =< Hi.
+
+lookup_value(Table, Key) ->
+ case lists:keyfind(Key, 1, Table) of
+ {_, longstr, Value} -> Value;
+ {_, signedint, Value} -> Value;
+ {_, float, Value} -> Value;
+ {_, double, Value} -> Value;
+ {_, byte, Value} -> Value;
+ {_, short, Value} -> Value;
+ {_, long, Value} -> Value;
+ {_, bool, Value} -> Value;
+ false -> undefined
+ end.
+
+pattern_of(S, Esc) -> compile_re(gen_re(binary_to_list(S), Esc)).
+
+gen_re(S, <<Ch>> ) -> convert(S, [], Ch );
+gen_re(S, no_escape) -> convert(S, [], no_escape);
+gen_re(_,_) -> error.
+
+convert([], Acc, _Esc) -> lists:reverse(Acc);
+convert([Esc, Ch | Rest], Acc, Esc) -> convert(Rest, [escape(Ch) | Acc], Esc);
+convert([$_ | Rest], Acc, Esc) -> convert(Rest, [$. | Acc], Esc);
+convert([$% | Rest], Acc, Esc) -> convert(Rest, [".*" | Acc], Esc);
+convert([Ch | Rest], Acc, Esc) -> convert(Rest, [escape(Ch) | Acc], Esc).
+
+escape($.) -> "\\.";
+escape($*) -> "\\*";
+escape($+) -> "\\+";
+escape($?) -> "\\?";
+escape($^) -> "\\^";
+escape($=) -> "\\=";
+escape($!) -> "\\!";
+escape($:) -> "\\:";
+escape($$) -> "\\$";
+escape(${) -> "\\{";
+escape($}) -> "\\}";
+escape($() -> "\\(";
+escape($)) -> "\\)";
+escape($|) -> "\\|";
+escape($[) -> "\\[";
+escape($]) -> "\\]";
+escape($/) -> "\\/";
+escape($\\) -> "\\\\";
+escape(Ch) -> Ch.
+
+compile_re(error) -> error;
+compile_re(MatchMany) ->
+ case re:compile(MatchMany)
+ of {ok, Rx} -> Rx;
+ _ -> error
+ end.
diff --git a/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl
new file mode 100644
index 0000000000..808e1b1db6
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl
@@ -0,0 +1,131 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2013-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rjms_topic_selector_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_jms_topic_exchange.hrl").
+
+-import(rabbit_ct_client_helpers, [open_connection_and_channel/1,
+ close_connection_and_channel/2]).
+
+%% Useful test constructors
+-define(BSELECTARG(BinStr), {?RJMS_COMPILED_SELECTOR_ARG, longstr, BinStr}).
+-define(BASICMSG(Payload, Hdrs), #'amqp_msg'{props=#'P_basic'{headers=Hdrs}, payload=Payload}).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ test_topic_selection
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+test_topic_selection(Config) ->
+ {Connection, Channel} = open_connection_and_channel(Config),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+
+ Exchange = declare_rjms_exchange(Channel, "rjms_test_topic_selector_exchange", []),
+
+ %% Declare a queue and bind it
+ Q = declare_queue(Channel),
+ bind_queue(Channel, Q, Exchange, <<"select-key">>, [?BSELECTARG(<<"{ident, <<\"boolVal\">>}.">>)]),
+
+ publish_two_messages(Channel, Exchange, <<"select-key">>),
+ amqp_channel:wait_for_confirms(Channel, 5),
+
+ get_and_check(Channel, Q, 0, <<"true">>),
+
+ close_connection_and_channel(Connection, Channel),
+ ok.
+
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+%% Declare a rjms_topic_selector exchange, with args
+declare_rjms_exchange(Ch, XNameStr, XArgs) ->
+ Exchange = list_to_binary(XNameStr),
+ Decl = #'exchange.declare'{ exchange = Exchange
+ , type = <<"x-jms-topic">>
+ , arguments = XArgs },
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, Decl),
+ Exchange.
+
+%% Bind a selector queue to an exchange
+bind_queue(Ch, Q, Ex, RKey, Args) ->
+ Binding = #'queue.bind'{ queue = Q
+ , exchange = Ex
+ , routing_key = RKey
+ , arguments = Args
+ },
+ #'queue.bind_ok'{} = amqp_channel:call(Ch, Binding),
+ ok.
+
+%% Declare a queue, return Q name (as binary)
+declare_queue(Ch) ->
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Ch, #'queue.declare'{}),
+ Q.
+
+%% Get message from Q and check remaining and payload.
+get_and_check(Channel, Queue, ExpectedRemaining, ExpectedPayload) ->
+ Get = #'basic.get'{queue = Queue},
+ {#'basic.get_ok'{delivery_tag = Tag, message_count = Remaining}, Content}
+ = amqp_channel:call(Channel, Get),
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}),
+
+ ExpectedRemaining = Remaining,
+ ExpectedPayload = Content#amqp_msg.payload,
+ ok.
+
+publish_two_messages(Chan, Exch, RoutingKey) ->
+ Publish = #'basic.publish'{exchange = Exch, routing_key = RoutingKey},
+ amqp_channel:cast(Chan, Publish, ?BASICMSG(<<"false">>, [{<<"boolVal">>, 'bool', false}])),
+ amqp_channel:cast(Chan, Publish, ?BASICMSG(<<"true">>, [{<<"boolVal">>, 'bool', true}])),
+ ok.
diff --git a/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl
new file mode 100644
index 0000000000..63d442e973
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2012-2020 VMware, Inc. or its affiliates. All rights reserved.
+%% -----------------------------------------------------------------------------
+
+%% Unit test file for RJMS Topic Selector plugin
+
+%% -----------------------------------------------------------------------------
+
+-module(rjms_topic_selector_unit_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("rabbit_jms_topic_exchange.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_jms_topic_exchange, [ description/0
+ , serialise_events/0
+ , route/2
+ , validate/1
+ , create/2
+ , delete/3
+ , validate_binding/2
+ , add_binding/3
+ , remove_bindings/3
+ , assert_args_equivalence/2
+ , policy_changed/3 ]).
+
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ description_test,
+ serialise_events_test,
+ validate_test,
+ create_test,
+ delete_test,
+ validate_binding_test,
+ add_binding_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_Testcase, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+description_test(_Config) ->
+ ?assertMatch([{name, _}, {description, _}], description()).
+
+serialise_events_test(_Config) ->
+ ?assertMatch(false, serialise_events()).
+
+validate_test(_Config) ->
+ ?assertEqual(ok, validate(any_exchange)).
+
+create_test(_Config) ->
+ ?assertEqual(ok, create(none, any_exchange)).
+
+delete_test(_Config) ->
+ ?assertEqual(ok, delete(none, any_exchange, any_bindings)).
+
+validate_binding_test(_Config) ->
+ ?assertEqual(ok, validate_binding(any_exchange, any_bindings)).
+
+add_binding_test(_Config) ->
+ ?assertEqual(ok, add_binding(none, dummy_exchange(), dummy_binding())).
+
+dummy_exchange() ->
+ #exchange{name = <<"XName">>, arguments = []}.
+
+dummy_binding() ->
+ #binding{ key = <<"BindingKey">>
+ , destination = #resource{name = <<"DName">>}
+ , args = [{?RJMS_COMPILED_SELECTOR_ARG, longstr, <<"<<\"false\">>.">>}]}.
diff --git a/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl b/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl
new file mode 100644
index 0000000000..99e15a0f3d
--- /dev/null
+++ b/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl
@@ -0,0 +1,122 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2012, 2013 Steve Powell (Zteve.Powell@gmail.com)
+%% -----------------------------------------------------------------------------
+
+%% Tests for sjx_evaluator
+
+%% -----------------------------------------------------------------------------
+-module(sjx_evaluation_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(sjx_evaluator, [evaluate/2]).
+
+%% Fixed type info for identifiers
+%%
+-define(TEST_TYPE_INFO,
+[ {<<"JMSType">>, longstr, <<"string">>}
+, {<<"JMSCorrelationID">>, longstr, <<"string">>}
+, {<<"JMSMessageID">>, longstr, <<"string">>}
+, {<<"JMSDeliveryMode">>, longstr, <<"string">>}
+, {<<"JMSPriority">>, longstr, <<"number">>}
+, {<<"JMSTimestamp">>, longstr, <<"number">>}
+]).
+
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ basic_evaluate_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_Testcase, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+basic_evaluate_test(_Config) ->
+ Hs = [{<<"JMSType">>, longstr, <<"car">>},
+ {<<"colour">>, longstr, <<"blue">>},
+ {<<"altcol">>, longstr, <<"'blue">>},
+ {<<"likevar">>, longstr, <<"bl_ue">>},
+ {<<"weight">>, signedint, 2501},
+ {<<"WeIgHt">>, signedint, 2},
+ {<<"afloat">>, float, 3.0e-2},
+ {<<"abool">>, bool, false}],
+
+ [ ?_assert( eval(Hs, {'=', {'ident', <<"JMSType">>}, <<"car">>} ))
+ , ?_assert(not eval(Hs, {'ident', <<"abool">>} ))
+ , ?_assert( eval(Hs, {'not', {'ident', <<"abool">>}} ))
+ , ?_assert( eval(Hs, {'=', {'ident', <<"colour">>}, <<"blue">>} ))
+ , ?_assert( eval(Hs, {'=', {'ident', <<"weight">>}, 2501} ))
+ , ?_assert( eval(Hs, {'=', {'ident', <<"WeIgHt">>}, 2} ))
+ , ?_assert( eval(Hs, {'=', 2501, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'=', {'ident', <<"afloat">>}, 3.0e-2} ))
+ , ?_assert( eval(Hs, {'>', {'ident', <<"weight">>}, 2500} ))
+ , ?_assert( eval(Hs, {'<', {'ident', <<"weight">>}, 2502} ))
+ , ?_assert( eval(Hs, {'>=', {'ident', <<"weight">>}, 2501} ))
+ , ?_assert( eval(Hs, {'<=', {'ident', <<"weight">>}, 2501} ))
+ , ?_assert(not eval(Hs, {'<=', {'ident', <<"weight">>}, 2500} ))
+ , ?_assert( eval(Hs, {'between', {'ident', <<"weight">>}, {'range', 0, 2501}} ))
+ , ?_assert( eval(Hs, {'between', {'ident', <<"weight">>}, {'range', 2500, 2501}} ))
+ , ?_assert( eval(Hs, {'between', 17, {'range', 17, 18}} ))
+ , ?_assert( eval(Hs, {'between', 17, {'range', 17, 17}} ))
+ , ?_assert( eval(Hs, {'not_between', 16, {'range', 17, 18}} ))
+ , ?_assert( eval(Hs, {'<', 2500, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'>', 2502, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'<=', 2500, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'>=', 2502, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'<=', 2501, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'>=', 2501, {'ident', <<"weight">>}} ))
+ , ?_assert( eval(Hs, {'like', {'ident', <<"colour">>}, {<<"bl%">>, 'no_escape'}} ))
+ , ?_assert( eval(Hs, {'like', {'ident', <<"likevar">>}, {<<"b_!_ue">>, <<"!">>}} ))
+ , ?_assert( eval(Hs, {'like', {'ident', <<"colour">>}, {<<"bl_e">>, 'no_escape'}} ))
+ , ?_assert( eval(Hs, {'not_like', {'ident', <<"colour">>}, {<<"l%">>, 'no_escape'}} ))
+ , ?_assert(not eval(Hs, {'not_like', {'ident', <<"colour">>}, {<<"bl%">>, 'no_escape'}} ))
+ , ?_assert( eval(Hs, {'in', {'ident', <<"colour">>}, [<<"blue">>, <<"green">>]} ))
+ , ?_assert(not eval(Hs, {'not_in', {'ident', <<"colour">>}, [<<"green">>, <<"blue">>]} ))
+ , ?_assert(not eval(Hs, {'in', {'ident', <<"colour">>}, [<<"bleen">>, <<"grue">>]} ))
+ , ?_assert( eval(Hs, {'not_in', {'ident', <<"colour">>}, [<<"grue">>, <<"bleen">>]} ))
+ , ?_assert( eval(Hs, {'not_like', {'ident', <<"altcol">>}, {<<"bl%">>, 'no_escape'}} ))
+ , ?_assert( eval(Hs, {'like', {'ident', <<"altcol">>}, {<<"'bl%">>, 'no_escape'}} ))
+ , ?_assert( eval(Hs, {'or', {'and', {'like', {'ident', <<"colour">>}, {<<"bl%">>, 'no_escape'}}
+ , {'>', {'ident', <<"weight">>}, 2500}}
+ , false} ))
+ , ?_assert(undefined =:= eval(Hs, {'<=', {'ident', <<"missing">>}, 2500} ))
+ , ?_assert(undefined =:= eval(Hs, {'in', {'ident', <<"missing">>}, [<<"blue">>]} ))
+ ].
+
+eval(Hs, S) -> evaluate(S, Hs).
diff --git a/deps/rabbitmq_management/.gitignore b/deps/rabbitmq_management/.gitignore
new file mode 100644
index 0000000000..ab433eaff0
--- /dev/null
+++ b/deps/rabbitmq_management/.gitignore
@@ -0,0 +1,36 @@
+.sw?
+.*.sw?
+*.beam
+*.pem
+erl_crash.dump
+MnesiaCore.*
+/.erlang.mk/
+/cover/
+/debug/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+rabbitmq_management.d
+.rabbitmq_management.plt
+
+# Common Test
+ct_run*
+all_runs.html
+index.html
+ct_default.css
+ct_log_cache
+variables-ct*
+
+*.coverdata
+
+test/config_schema_SUITE_data/schema/
+.vscode/* \ No newline at end of file
diff --git a/deps/rabbitmq_management/.travis.yml b/deps/rabbitmq_management/.travis.yml
new file mode 100644
index 0000000000..a9b75d084d
--- /dev/null
+++ b/deps/rabbitmq_management/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: Tu26VJ9BsXxL20xxwWk4cbCkZyqyxYmNpSSqco5r3FLeU5hk5Vkk+s2BareRvqKhKHFlvyxu8GwsKtajMvsieP6y5J99gSeub6fDOIskPz61bo0aKA9nbDuBFSG1Z5wgXx1XRo0yDatLxXCXe3FbThRsylG7XNjtRaru1/lwuVxfxPtBGQ1opvQX71sST3GYSPoBYR+JlcVpU+uDHMAzsP8J0m5rEpxcl821aTMk3iz90hBQMsoLTBmSQePPcNqOA/1OH75VfjuXR8JBXHvA9njrUBrsyxgHf2uOh3jAXdIrHZwZg/17+y7gNVqByfx/UpGb8XEpVkncg/cRyVIHMk7/gFCZkeVC1QkIN5+EPiGLF7u32x9QaT7Zqz57iLh3IJzED2dj12qWaeX8QypF1K1r5qq4pRrN6iEZx76stpZbyFT4XnExHRdzPuouy7yz1gDHF0HOxbNLowzc/jk7tuTp+qmDSR5tRvegAIH3TONegxXyB7smdbvdI6MCN5/GP2bGK7HiqYWCmTGHtJwgxBKc5XoV8ZjpXfKxG98WbK5RsSP1miRnmxSbxaV0Gai1hfFlanJFFxTA9584O+NVRXNNFMfnnt20Ts6OwoXTcJ/boIPjF5Mcm0eJ4nz4R18TArXE4B5S4pTk3eQkG1ACDigkYZ3fc6ws4cWrt8BZASI=
+ - secure: fNEx9OXi2UisiYu0FiHJpV9+vWLB9DIUAIKG24GfUHVgZqFQOInBf5fEYrjlVgm5zNezSBS3hFNHXd/EXJF8KNgbf6mI0z4h4RyyQY98N+78tWvINoIawEeYpgC6NTI52MdaCfV+fTVWhiL0uP7mqWhLmll2bKXIy6HA6I9PnmiQSloNe64vUPF+UsVZHzzeabK4DR2VdI3h+BGXzOY9FG8Kt2voiXOLd2RFpVeN86FDTp+uVZY/K9e/MsktoK+XaZZ4qMAgm6lB32LVkzl3KA9ki6y6BY7le1m2c90hxAtBJGWZptkMb+VL0Fem39nEBnLjE0a0vIddp32PLJQmv6eopMfLay5BIkwtkRwv3P0uCwYd0bgYQSHF/gdTCcK1nr7fMhkQveBh6vmnbhrca7OeQRHz08+jo6EquUgNQZKmTZPWXQn9lS9mU/0EDLJJhn4KhJezGw6DcAAqB0KqmQedxtHMUT87by7LzhINwKZnm4y5WKA/W/zLI6dNqvIgc5C6UJh0EVgxa13GRmrnGmttV1dtLRQhiMJCbJykaekjPMULUmli0RbFz7bSFqFqEUsF+wwovyD+Y6D8KGOJdvvEYPdPIFpRPnhGUvH86JzsFdVKNJBicGI9LpCtlXlWNRbQIQ8uV5ze2HhxSJhtM6e6dB4d9yzpp6a81uR77bk=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.10'
+otp_release:
+ - '22.3'
+ - '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_management/CODE_OF_CONDUCT.md b/deps/rabbitmq_management/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_management/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_management/CONTRIBUTING.md b/deps/rabbitmq_management/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_management/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_management/Caddyfile b/deps/rabbitmq_management/Caddyfile
new file mode 100644
index 0000000000..04a803d14b
--- /dev/null
+++ b/deps/rabbitmq_management/Caddyfile
@@ -0,0 +1,5 @@
+:2015
+log stdout
+errors stderr
+root priv/www
+proxy /api localhost:15672
diff --git a/deps/rabbitmq_management/LICENSE b/deps/rabbitmq_management/LICENSE
new file mode 100644
index 0000000000..261faf450b
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE
@@ -0,0 +1,16 @@
+This package, the RabbitMQ Management Plugin is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+This package makes use of the following third party libraries:
+
+| Name | Website | License type | License file |
+| -------------- | ------------------------------------------------------ | ------------- | ------------------------ |
+| ExplorerCanvas | https://github.com/arv/explorercanvas | APACHE 2 | LICENSE-APACHE2-excanvas |
+| base64.js | https://code.google.com/p/stringencoders/ | BSD | LICENSE-BSD-base64js |
+| Cowboy | https://ninenines.eu/ | ISC | LICENSE-ISC-cowboy |
+| EJS | https://code.google.com/archive/p/embeddedjavascript/ | MIT | LICENSE-MIT-EJS |
+| Flot | https://github.com/flot/flot | MIT | LICENSE-MIT-Flot |
+| Sammy | https://code.quirkey.com/sammy/ | MIT | LICENSE-MIT-Sammy |
+| jQuery | https://jquery.com/ | MIT | LICENSE-MIT-jQuery |
+
+If you have any questions regarding licensing, please contact us at info@rabbitmq.com.
diff --git a/deps/rabbitmq_management/LICENSE-APACHE2-excanvas b/deps/rabbitmq_management/LICENSE-APACHE2-excanvas
new file mode 100644
index 0000000000..62589edd12
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-APACHE2-excanvas
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/deps/rabbitmq_management/LICENSE-BSD-base64js b/deps/rabbitmq_management/LICENSE-BSD-base64js
new file mode 100644
index 0000000000..bc116b0231
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-BSD-base64js
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * https://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
diff --git a/deps/rabbitmq_management/LICENSE-ISC-cowboy b/deps/rabbitmq_management/LICENSE-ISC-cowboy
new file mode 100644
index 0000000000..9d28158fb7
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-ISC-cowboy
@@ -0,0 +1,13 @@
+Copyright (c) 2011-2017, Loïc Hoguin <essen@ninenines.eu>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/deps/rabbitmq_management/LICENSE-MIT-EJS b/deps/rabbitmq_management/LICENSE-MIT-EJS
new file mode 100644
index 0000000000..5990a35a7e
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-MIT-EJS
@@ -0,0 +1,21 @@
+EJS - Embedded JavaScript
+
+Copyright (c) 2007 Edward Benson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/deps/rabbitmq_management/LICENSE-MIT-Flot b/deps/rabbitmq_management/LICENSE-MIT-Flot
new file mode 100644
index 0000000000..719da064fe
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-MIT-Flot
@@ -0,0 +1,22 @@
+Copyright (c) 2007-2014 IOLA and Ole Laursen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/rabbitmq_management/LICENSE-MIT-Sammy b/deps/rabbitmq_management/LICENSE-MIT-Sammy
new file mode 100644
index 0000000000..ad77bb8e6b
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-MIT-Sammy
@@ -0,0 +1,22 @@
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/rabbitmq_management/LICENSE-MIT-jQuery b/deps/rabbitmq_management/LICENSE-MIT-jQuery
new file mode 100644
index 0000000000..68b4f25813
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-MIT-jQuery
@@ -0,0 +1,20 @@
+Copyright (c) 2011 John Resig, https://jquery.com/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/rabbitmq_management/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_management/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_management/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_management/Makefile b/deps/rabbitmq_management/Makefile
new file mode 100644
index 0000000000..2704f1f5d9
--- /dev/null
+++ b/deps/rabbitmq_management/Makefile
@@ -0,0 +1,52 @@
+PROJECT = rabbitmq_management
+PROJECT_DESCRIPTION = RabbitMQ Management Console
+PROJECT_MOD = rabbit_mgmt_app
+
+define PROJECT_ENV
+[
+ {http_log_dir, none},
+ {load_definitions, none},
+ {management_db_cache_multiplier, 5},
+ {process_stats_gc_timeout, 300000},
+ {stats_event_max_backlog, 250},
+
+ {cors_allow_origins, []},
+ {cors_max_age, 1800},
+ {content_security_policy, "script-src 'self' 'unsafe-eval' 'unsafe-inline'; object-src 'self'"}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit amqp_client cowboy cowlib rabbitmq_web_dispatch rabbitmq_management_agent
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers proper
+LOCAL_DEPS += mnesia ranch ssl crypto public_key
+
+# FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked.
+# See rabbitmq-components.mk.
+BUILD_DEPS += ranch
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
+
+# --------------------------------------------------------------------
+# Distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @echo bin/rabbitmqadmin
+
+prepare-dist::
+ $(verbose) sed 's/%%VSN%%/$(PROJECT_VERSION)/' bin/rabbitmqadmin \
+ > $(EZ_DIR)/priv/www/cli/rabbitmqadmin
diff --git a/deps/rabbitmq_management/README.md b/deps/rabbitmq_management/README.md
new file mode 100644
index 0000000000..8073b31f95
--- /dev/null
+++ b/deps/rabbitmq_management/README.md
@@ -0,0 +1,21 @@
+[![Test Erlang 22.3](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-management/Test%20-%20Erlang%2022.3/master?label=Erlang%2022.3)](https://github.com/rabbitmq/rabbitmq-management/actions?query=workflow%3A%22Test+-+Erlang+22.3%22+branch%3A%22master%22)
+[![Test Erlang 23](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-management/Test%20-%20Erlang%2021.3/master?label=Erlang%2023.0)](https://github.com/rabbitmq/rabbitmq-management/actions?query=workflow%3A%22Test+-+Erlang+23.0%22+branch%3A%22master%22)
+
+# RabbitMQ Management Plugin
+
+This plugin provides a management UI and HTTP API for RabbitMQ.
+
+## Installation
+
+This plugin is included in the RabbitMQ distribution. Like all [plugins](https://www.rabbitmq.com/plugins.html),
+it has to be [enabled](https://www.rabbitmq.com/plugins.html#basics) before it can be used.
+
+
+## Documentation
+
+ * [RabbitMQ management UI documentation](https://www.rabbitmq.com/management.html).
+ * [HTTP API documentation](https://www.rabbitmq.com/management.html#http-api) and [reference](https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html)
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_management/bin/rabbitmqadmin b/deps/rabbitmq_management/bin/rabbitmqadmin
new file mode 100755
index 0000000000..90509627a1
--- /dev/null
+++ b/deps/rabbitmq_management/bin/rabbitmqadmin
@@ -0,0 +1,1184 @@
+#!/usr/bin/env python3
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+#
+# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+from __future__ import print_function
+
+from optparse import OptionParser, TitledHelpFormatter
+
+import base64
+import copy
+import json
+import os
+import socket
+import ssl
+import traceback
+
+try:
+ from signal import signal, SIGPIPE, SIG_DFL
+ signal(SIGPIPE, SIG_DFL)
+except ImportError:
+ pass
+
+import sys
+
+
+def eprint(*args, **kwargs):
+ print(*args, file=sys.stderr, **kwargs)
+
+
+if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 6):
+ eprint("Sorry, rabbitmqadmin requires at least Python 2.6 (2.7.9 when HTTPS is enabled).")
+ sys.exit(1)
+
+if sys.version_info[0] == 2:
+ from ConfigParser import ConfigParser, NoSectionError
+ import httplib
+ import urlparse
+ from urllib import quote_plus
+ from urllib import quote
+
+ def b64(s):
+ return base64.b64encode(s)
+else:
+ from configparser import ConfigParser, NoSectionError
+ import http.client as httplib
+ import urllib.parse as urlparse
+ from urllib.parse import quote_plus
+ from urllib.parse import quote
+
+ def b64(s):
+ return base64.b64encode(s.encode('utf-8')).decode('utf-8')
+
+if sys.version_info[0] == 2:
+ class ConnectionError(OSError):
+ pass
+
+ class ConnectionRefusedError(ConnectionError):
+ pass
+
+VERSION = '%%VSN%%'
+
+LISTABLE = {'connections': {'vhost': False, 'cols': ['name', 'user', 'channels']},
+ 'channels': {'vhost': False, 'cols': ['name', 'user']},
+ 'consumers': {'vhost': True},
+ 'exchanges': {'vhost': True, 'cols': ['name', 'type']},
+ 'queues': {'vhost': True, 'cols': ['name', 'messages']},
+ 'bindings': {'vhost': True, 'cols': ['source', 'destination',
+ 'routing_key']},
+ 'users': {'vhost': False},
+ 'vhosts': {'vhost': False, 'cols': ['name', 'messages']},
+ 'permissions': {'vhost': False},
+ 'nodes': {'vhost': False, 'cols': ['name', 'type', 'mem_used']},
+ 'parameters': {'vhost': False, 'json': ['value']},
+ 'policies': {'vhost': False, 'json': ['definition']},
+ 'operator_policies': {'vhost': False, 'json': ['definition']},
+ 'vhost_limits': {'vhost': False, 'json': ['value']}}
+
+SHOWABLE = {'overview': {'vhost': False, 'cols': ['rabbitmq_version',
+ 'cluster_name',
+ 'queue_totals.messages',
+ 'object_totals.queues']}}
+
+PROMOTE_COLUMNS = ['vhost', 'name', 'type',
+ 'source', 'destination', 'destination_type', 'routing_key']
+
+URIS = {
+ 'exchange': '/exchanges/{vhost}/{name}',
+ 'queue': '/queues/{vhost}/{name}',
+ 'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}',
+ 'binding_del': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}',
+ 'vhost': '/vhosts/{name}',
+ 'user': '/users/{name}',
+ 'permission': '/permissions/{vhost}/{user}',
+ 'parameter': '/parameters/{component}/{vhost}/{name}',
+ 'policy': '/policies/{vhost}/{name}',
+ 'operator_policy': '/operator-policies/{vhost}/{name}',
+ 'vhost_limit': '/vhost-limits/{vhost}/{name}'
+ }
+
+
+def queue_upload_fixup(upload):
+ # rabbitmq/rabbitmq-management#761
+ #
+ # In general, the fixup_upload argument can be used to fixup/change the
+ # upload dict after all argument parsing is complete.
+ #
+ # This simplifies setting the queue type for a new queue by allowing the
+ # user to use a queue_type=quorum argument rather than the somewhat confusing
+ # arguments='{"x-queue-type":"quorum"}' parameter
+ #
+ if 'queue_type' in upload:
+ queue_type = upload.get('queue_type')
+ arguments = upload.get('arguments', {})
+ arguments['x-queue-type'] = queue_type
+ upload['arguments'] = arguments
+
+
+DECLARABLE = {
+ 'exchange': {'mandatory': ['name', 'type'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'internal': 'false', 'arguments': {}}},
+ 'queue': {'mandatory': ['name'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'arguments': {}, 'node': None, 'queue_type': None},
+ 'fixup_upload': queue_upload_fixup},
+ 'binding': {'mandatory': ['source', 'destination'],
+ 'json': ['arguments'],
+ 'optional': {'destination_type': 'queue',
+ 'routing_key': '', 'arguments': {}}},
+ 'vhost': {'mandatory': ['name'],
+ 'optional': {'tracing': None}},
+ 'user': {'mandatory': ['name', ['password', 'password_hash'], 'tags'],
+ 'optional': {'hashing_algorithm': None}},
+ 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'],
+ 'optional': {}},
+ 'parameter': {'mandatory': ['component', 'name', 'value'],
+ 'json': ['value'],
+ 'optional': {}},
+ # priority has to be converted to an integer
+ 'policy': {'mandatory': ['name', 'pattern', 'definition'],
+ 'json': ['definition', 'priority'],
+ 'optional': {'priority': 0, 'apply-to': None}},
+ 'operator_policy': {'mandatory': ['name', 'pattern', 'definition'],
+ 'json': ['definition', 'priority'],
+ 'optional': {'priority': 0, 'apply-to': None}},
+ 'vhost_limit': {'mandatory': ['vhost', 'name', 'value'],
+ 'json': ['value'],
+ 'optional': {}},
+ }
+
+DELETABLE = {
+ 'exchange': {'mandatory': ['name']},
+ 'queue': {'mandatory': ['name']},
+ 'binding': {'mandatory': ['source', 'destination_type', 'destination'],
+ 'optional': {'properties_key': '~'}},
+ 'vhost': {'mandatory': ['name']},
+ 'user': {'mandatory': ['name']},
+ 'permission': {'mandatory': ['vhost', 'user']},
+ 'parameter': {'mandatory': ['component', 'name']},
+ 'policy': {'mandatory': ['name']},
+ 'operator_policy': {'mandatory': ['name']},
+ 'vhost_limit': {'mandatory': ['vhost', 'name']}
+ }
+
+CLOSABLE = {
+ 'connection': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/connections/{name}'}
+ }
+
+PURGABLE = {
+ 'queue': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/queues/{vhost}/{name}/contents'}
+ }
+
+EXTRA_VERBS = {
+ 'publish': {'mandatory': ['routing_key'],
+ 'optional': {'payload': None,
+ 'properties': {},
+ 'exchange': 'amq.default',
+ 'payload_encoding': 'string'},
+ 'json': ['properties'],
+ 'uri': '/exchanges/{vhost}/{exchange}/publish'},
+ 'get': {'mandatory': ['queue'],
+ 'optional': {'count': '1', 'ackmode': 'ack_requeue_true',
+ 'payload_file': None, 'encoding': 'auto'},
+ 'uri': '/queues/{vhost}/{queue}/get'}
+}
+
+for k in DECLARABLE:
+ DECLARABLE[k]['uri'] = URIS[k]
+
+for k in DELETABLE:
+ DELETABLE[k]['uri'] = URIS[k]
+ DELETABLE[k]['optional'] = DELETABLE[k].get('optional', {})
+DELETABLE['binding']['uri'] = URIS['binding_del']
+
+
+def short_usage():
+ return "rabbitmqadmin [options] subcommand"
+
+
+def title(name):
+ return "\n%s\n%s\n\n" % (name, '=' * len(name))
+
+
+def subcommands_usage():
+ usage = """Usage
+=====
+ """ + short_usage() + """
+
+ where subcommand is one of:
+""" + title("Display")
+
+ for l in LISTABLE:
+ usage += " list {0} [<column>...]\n".format(l)
+ for s in SHOWABLE:
+ usage += " show {0} [<column>...]\n".format(s)
+ usage += title("Object Manipulation")
+ usage += fmt_usage_stanza(DECLARABLE, 'declare')
+ usage += fmt_usage_stanza(DELETABLE, 'delete')
+ usage += fmt_usage_stanza(CLOSABLE, 'close')
+ usage += fmt_usage_stanza(PURGABLE, 'purge')
+ usage += title("Broker Definitions")
+ usage += """ export <file>
+ import <file>
+"""
+ usage += title("Publishing and Consuming")
+ usage += fmt_usage_stanza(EXTRA_VERBS, '')
+ usage += """
+ * If payload is not specified on publish, standard input is used
+
+ * If payload_file is not specified on get, the payload will be shown on
+ standard output along with the message metadata
+
+ * If payload_file is specified on get, count must not be set
+"""
+ return usage
+
+
+def config_usage():
+ usage = "Usage\n=====\n" + short_usage()
+ usage += "\n" + title("Configuration File")
+ usage += """ It is possible to specify a configuration file from the command line.
+ Hosts can be configured easily in a configuration file and called
+ from the command line.
+"""
+ usage += title("Example")
+ usage += """ # rabbitmqadmin.conf.example START
+
+ [host_normal]
+ hostname = localhost
+ port = 15672
+ username = guest
+ password = guest
+ declare_vhost = / # Used as default for declare / delete only
+ vhost = / # Used as default for declare / delete / list
+
+ [host_ssl]
+ hostname = otherhost
+ port = 15672
+ username = guest
+ password = guest
+ ssl = True
+ ssl_key_file = /path/to/key.pem
+ ssl_cert_file = /path/to/cert.pem
+
+ # rabbitmqadmin.conf.example END
+"""
+ usage += title("Use")
+ usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ..."""
+ return usage
+
+
+def more_help():
+ return """
+More Help
+=========
+
+For more help use the help subcommand:
+
+ rabbitmqadmin help subcommands # For a list of available subcommands
+ rabbitmqadmin help config # For help with the configuration file
+"""
+
+
+def fmt_required_flag(val):
+ # when one of the options is required, e.g.
+ # password vs. password_hash
+ if type(val) is list:
+ # flag1=... OR flag2=... OR flag3=...
+ return "=... OR ".join(val)
+ else:
+ return val
+
+
+def fmt_optional_flag(val):
+ return val
+
+
+def fmt_usage_stanza(root, verb):
+ def fmt_args(args):
+ res = " ".join(["{0}=...".format(fmt_required_flag(a)) for a in args['mandatory']])
+ opts = " ".join("{0}=...".format(fmt_optional_flag(o)) for o in args['optional'].keys())
+ if opts != "":
+ res += " [{0}]".format(opts)
+ return res
+
+ text = ""
+ if verb != "":
+ verb = " " + verb
+ for k in root.keys():
+ text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k]))
+ return text
+
+
+default_options = {"hostname": "localhost",
+ "port": "15672",
+ # default config file section name
+ "node": "default",
+ "path_prefix": "",
+ "declare_vhost": "/",
+ "username": "guest",
+ "password": "guest",
+ "ssl": False,
+ "request_timeout": 120,
+ "verbose": True,
+ "format": "table",
+ "depth": 1,
+ "bash_completion": False}
+
+
+class MyFormatter(TitledHelpFormatter):
+ def format_epilog(self, epilog):
+ return epilog
+
+
+parser = OptionParser(usage=short_usage(),
+ formatter=MyFormatter(),
+ epilog=more_help())
+
+
+def make_parser():
+ def add(*args, **kwargs):
+ key = kwargs['dest']
+ if key in default_options:
+ default = " [default: %s]" % default_options[key]
+ kwargs['help'] = kwargs['help'] + default
+ parser.add_option(*args, **kwargs)
+
+ add("-c", "--config", dest="config",
+ help="configuration file [default: ~/.rabbitmqadmin.conf]",
+ metavar="CONFIG")
+ add("-N", "--node", dest="node",
+ help="node described in the configuration file [default: 'default' only if configuration file is specified]",
+ metavar="NODE")
+ add("-H", "--host", dest="hostname",
+ help="connect to host HOST",
+ metavar="HOST")
+ add("-P", "--port", dest="port",
+ help="connect to port PORT",
+ metavar="PORT")
+ add("--path-prefix", dest="path_prefix",
+ help="use specific URI path prefix for the RabbitMQ HTTP API. /api and operation path will be appended to it. (default: blank string)")
+ add("-V", "--vhost", dest="vhost",
+ help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]",
+ metavar="VHOST")
+ add("-u", "--username", dest="username",
+ help="connect using username USERNAME",
+ metavar="USERNAME")
+ add("-p", "--password", dest="password",
+ help="connect using password PASSWORD",
+ metavar="PASSWORD")
+ add("-U", "--base-uri", dest="base_uri",
+ help="connect using a base HTTP API URI. /api and operation path will be appended to it. Path will be ignored. --vhost has to be provided separately.",
+ metavar="URI")
+ add("-q", "--quiet", action="store_false", dest="verbose",
+ help="suppress status messages")
+ add("-s", "--ssl", action="store_true", dest="ssl",
+ help="connect with ssl")
+ add("--ssl-key-file", dest="ssl_key_file",
+ help="PEM format key file for SSL")
+ add("--ssl-cert-file", dest="ssl_cert_file",
+ help="PEM format certificate file for SSL")
+ add("--ssl-ca-cert-file", dest="ssl_ca_cert_file",
+ help="PEM format CA certificate file for SSL")
+ add("--ssl-disable-hostname-verification", dest="ssl_disable_hostname_verification",
+ help="Disables peer hostname verification", default=False, action="store_true")
+ add("-k", "--ssl-insecure", dest="ssl_insecure",
+ help="Disables all SSL validations like curl's '-k' argument", default=False, action="store_true")
+ add("-t", "--request-timeout", dest="request_timeout",
+ help="HTTP request timeout in seconds", type="int")
+ add("-f", "--format", dest="format",
+ help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]")
+ add("-S", "--sort", dest="sort", help="sort key for listing queries")
+ add("-R", "--sort-reverse", action="store_true", dest="sort_reverse",
+ help="reverse the sort order")
+ add("-d", "--depth", dest="depth",
+ help="maximum depth to recurse for listing tables")
+ add("--bash-completion", action="store_true",
+ dest="bash_completion",
+ help="Print bash completion script")
+ add("--version", action="store_true",
+ dest="version",
+ help="Display version and exit")
+
+
+def default_config():
+ home = os.getenv('USERPROFILE') or os.getenv('HOME')
+ if home is not None:
+ config_file = home + os.sep + ".rabbitmqadmin.conf"
+ if os.path.isfile(config_file):
+ return config_file
+ return None
+
+
+def make_configuration():
+ make_parser()
+ (cli_options, args) = parser.parse_args()
+
+ if cli_options.version:
+ print_version()
+
+ setattr(cli_options, "declare_vhost", None)
+ final_options = copy.copy(cli_options)
+
+ # Resolve config file path
+ if cli_options.config is None:
+ config_file = default_config()
+ if config_file is not None:
+ setattr(final_options, "config", config_file)
+ else:
+ if not os.path.isfile(cli_options.config):
+ assert_usage(False, "Could not read config file '%s'" % cli_options.config)
+
+ final_options = merge_default_options(cli_options, final_options)
+ final_options = merge_config_file_options(cli_options, final_options)
+ final_options = expand_base_uri_options(cli_options, final_options)
+
+ return (final_options, args)
+
+def merge_default_options(cli_options, final_options):
+ for (key, default_val) in default_options.items():
+ if getattr(cli_options, key) is None:
+ setattr(final_options, key, default_val)
+ return final_options
+
+def merge_config_file_options(cli_options, final_options):
+ # Parse config file and load it, making sure that CLI flags
+ # take precedence
+ if final_options.config is not None:
+ config_parser = ConfigParser()
+ try:
+ config_parser.read(final_options.config)
+ section_settings = dict(config_parser.items(final_options.node))
+ except NoSectionError as error:
+ # Report if an explicitly provided section (node) does not exist in the file
+ if final_options.node == "default":
+ pass
+ else:
+ msg = "Could not read section '%s' in config file '%s':\n %s" % (final_options.node, final_options.config, error)
+ assert_usage(False, msg)
+ else:
+ for key, section_val in section_settings.items():
+ # special case --ssl
+ if key == 'ssl':
+ setattr(final_options, key, section_val == "True")
+ else:
+ # if CLI options do not contain this key, set it from the config file
+ if getattr(cli_options, key) is None:
+ setattr(final_options, key, section_val)
+ return final_options
+
+def expand_base_uri_options(cli_options, final_options):
+ # if --base-uri is passed, set connection parameters from it
+ if final_options.base_uri is not None:
+ u = urlparse.urlparse(final_options.base_uri)
+ for key in ["hostname", "port", "username", "password"]:
+ if getattr(u, key) is not None:
+ setattr(final_options, key, getattr(u, key))
+
+ if u.path is not None and (u.path != "") and (u.path != "/"):
+ eprint("WARNING: path in --base-uri is ignored. Please specify --vhost and/or --path-prefix separately.\n")
+ return final_options
+
+def assert_usage(expr, error):
+ if not expr:
+ eprint("\nERROR: {0}\n".format(error))
+ eprint("{0} --help for help\n".format(os.path.basename(sys.argv[0])))
+ sys.exit(1)
+
+
+def print_version():
+ print("rabbitmqadmin {0}".format(VERSION))
+ sys.exit(0)
+
+
+def column_sort_key(col):
+ if col in PROMOTE_COLUMNS:
+ return (1, PROMOTE_COLUMNS.index(col))
+ else:
+ return (2, col)
+
+
+def main():
+ (options, args) = make_configuration()
+ if options.bash_completion:
+ print_bash_completion()
+ sys.exit(0)
+ assert_usage(len(args) > 0, 'Action not specified')
+ mgmt = Management(options, args[1:])
+ mode = "invoke_" + args[0]
+ assert_usage(hasattr(mgmt, mode),
+ 'Action {0} not understood'.format(args[0]))
+ method = getattr(mgmt, "invoke_%s" % args[0])
+ method()
+
+
+def die(s):
+ eprint("*** {0}\n".format(s))
+ sys.exit(1)
+
+
+def maybe_utf8(s):
+ if isinstance(s, int):
+ # s can be also an int for ex messages count
+ return str(s)
+ if isinstance(s, float):
+ # s can be also a float for message rate
+ return str(s)
+ if sys.version_info[0] == 3:
+ # It will have an encoding, which Python will respect
+ return s
+ else:
+ # It won't have an encoding, and Python will pick ASCII by default
+ return s.encode('utf-8')
+
+
+class Management:
+ def __init__(self, options, args):
+ self.options = options
+ self.args = args
+
+ def get(self, path):
+ return self.http("GET", "%s/api%s" % (self.options.path_prefix, path), "")
+
+ def put(self, path, body):
+ return self.http("PUT", "%s/api%s" % (self.options.path_prefix, path), body)
+
+ def post(self, path, body):
+ return self.http("POST", "%s/api%s" % (self.options.path_prefix, path), body)
+
+ def delete(self, path):
+ return self.http("DELETE", "%s/api%s" % (self.options.path_prefix, path), "")
+
+ def __initialize_connection(self, hostname, port):
+ if self.options.ssl:
+ return self.__initialize_https_connection(hostname, port)
+ else:
+ return httplib.HTTPConnection(hostname, port, timeout=self.options.request_timeout)
+
+ def __initialize_https_connection(self, hostname, port):
+ # Python 2.7.9+
+ if hasattr(ssl, 'create_default_context'):
+ return httplib.HTTPSConnection(hostname, port, context=self.__initialize_tls_context())
+ # Python < 2.7.8, note: those versions still have SSLv3 enabled
+ # and other limitations. See rabbitmq/rabbitmq-management#225
+ else:
+ eprint("WARNING: rabbitmqadmin requires Python 2.7.9+ when HTTPS is used.")
+ return httplib.HTTPSConnection(hostname, port,
+ cert_file=self.options.ssl_cert_file,
+ key_file=self.options.ssl_key_file)
+
+ def __initialize_tls_context(self):
+ # Python 2.7.9+ only
+ ssl_ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
+ ssl_ctx.options &= ~ssl.OP_NO_SSLv3
+
+ ssl_insecure = self.options.ssl_insecure
+ ssl_disable_hostname_verification = ssl_insecure or self.options.ssl_disable_hostname_verification
+ # Note: you must set check_hostname prior to verify_mode
+ if ssl_disable_hostname_verification:
+ ssl_ctx.check_hostname = False
+ if ssl_insecure:
+ ssl_ctx.verify_mode = ssl.CERT_NONE
+
+ if self.options.ssl_key_file:
+ ssl_ctx.load_cert_chain(self.options.ssl_cert_file,
+ self.options.ssl_key_file)
+ if self.options.ssl_ca_cert_file:
+ ssl_ctx.load_verify_locations(self.options.ssl_ca_cert_file)
+ return ssl_ctx
+
+ def http(self, method, path, body):
+ conn = self.__initialize_connection(self.options.hostname, self.options.port)
+ auth = (self.options.username + ":" + self.options.password)
+
+ headers = {"Authorization": "Basic " + b64(auth)}
+ if body != "":
+ headers["Content-Type"] = "application/json"
+ try:
+ conn.request(method, path, body, headers)
+ except ConnectionRefusedError as e:
+ die("Could not connect: {0}".format(e))
+ except socket.error as e:
+ traceback.print_exc()
+ die("Could not connect: {0}".format(e))
+ try:
+ resp = conn.getresponse()
+ except socket.timeout:
+ die("Timed out getting HTTP response (request timeout: {0} seconds)".format(
+ self.options.request_timeout))
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except (Exception):
+ e_fmt = traceback.format_exc()
+ die("Error getting HTTP response:\n\n{0}".format(e_fmt))
+ if resp.status == 400:
+ die(json.loads(resp.read())['reason'])
+ if resp.status == 401:
+ die("Access refused: {0}".format(path))
+ if resp.status == 404:
+ die("Not found: {0}".format(path))
+ if resp.status == 301:
+ url = urlparse.urlparse(resp.getheader('location'))
+ [host, port] = url.netloc.split(':')
+ self.options.hostname = host
+ self.options.port = int(port)
+ return self.http(method, url.path + '?' + url.query, body)
+ if resp.status > 400:
+ raise Exception("Received response %d %s for path %s\n%s"
+ % (resp.status, resp.reason, path, resp.read()))
+ return resp.read().decode('utf-8')
+
+ def verbose(self, string):
+ if self.options.verbose:
+ print(string)
+
+ def get_arg(self):
+ assert_usage(len(self.args) == 1, 'Exactly one argument required')
+ return self.args[0]
+
+ def use_cols(self):
+ # Deliberately do not cast to int here; we only care about the
+ # default, not explicit setting.
+ return self.options.depth == 1 and ('json' not in self.options.format)
+
+ def invoke_help(self):
+ if len(self.args) == 0:
+ parser.print_help()
+ else:
+ help_cmd = self.get_arg()
+ if help_cmd == 'subcommands':
+ usage = subcommands_usage()
+ elif help_cmd == 'config':
+ usage = config_usage()
+ else:
+ assert_usage(False, """help topic must be one of:
+ subcommands
+ config""")
+ print(usage)
+ sys.exit(0)
+
+ def invoke_publish(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
+ if 'payload' not in upload:
+ data = sys.stdin.read()
+ upload['payload'] = b64(data)
+ upload['payload_encoding'] = 'base64'
+ resp = json.loads(self.post(uri, json.dumps(upload)))
+ if resp['routed']:
+ self.verbose("Message published")
+ else:
+ self.verbose("Message published but NOT routed")
+
+ def invoke_get(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get'])
+ payload_file = 'payload_file' in upload and upload['payload_file'] or None
+ assert_usage(not payload_file or upload['count'] == '1',
+ 'Cannot get multiple messages using payload_file')
+ result = self.post(uri, json.dumps(upload))
+ if payload_file:
+ write_payload_file(payload_file, result)
+ columns = ['routing_key', 'exchange', 'message_count',
+ 'payload_bytes', 'redelivered']
+ format_list(result, columns, {}, self.options)
+ else:
+ format_list(result, [], {}, self.options)
+
+ def invoke_export(self):
+ path = self.get_arg()
+ uri = "/definitions"
+ if self.options.vhost:
+ uri += "/%s" % quote_plus(self.options.vhost)
+ definitions = self.get(uri)
+ with open(path, 'wb') as f:
+ f.write(definitions.encode())
+ self.verbose("Exported definitions for %s to \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_import(self):
+ path = self.get_arg()
+ with open(path, 'rb') as f:
+ definitions = f.read()
+ uri = "/definitions"
+ if self.options.vhost:
+ uri += "/%s" % quote_plus(self.options.vhost)
+ self.post(uri, definitions)
+ self.verbose("Uploaded definitions from \"%s\" to %s. The import process may take some time. Consult server logs to track progress."
+ % (self.options.hostname, path))
+
+ def invoke_list(self):
+ (uri, obj_info, cols) = self.list_show_uri(LISTABLE, 'list')
+ format_list(self.get(uri), cols, obj_info, self.options)
+
+ def invoke_show(self):
+ (uri, obj_info, cols) = self.list_show_uri(SHOWABLE, 'show')
+ format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
+
+ def _list_path_for_obj_type(self, obj_type):
+ # This returns a URL path for given object type, e.g.
+ # replaces underscores in command names with
+ # dashes that HTTP API endpoints use
+ return obj_type.replace("_", "-")
+
+ def list_show_uri(self, obj_types, verb):
+ obj_type = self.args[0]
+ assert_usage(obj_type in obj_types,
+ "Don't know how to {0} {1}".format(verb, obj_type))
+ obj_info = obj_types[obj_type]
+ uri = "/%s" % self._list_path_for_obj_type(obj_type)
+ query = []
+ if obj_info['vhost'] and self.options.vhost:
+ uri += "/%s" % quote_plus(self.options.vhost)
+ cols = self.args[1:]
+ if cols == [] and 'cols' in obj_info and self.use_cols():
+ cols = obj_info['cols']
+ if cols != []:
+ query.append("columns=" + ",".join(cols))
+ sort = self.options.sort
+ if sort:
+ query.append("sort=" + sort)
+ if self.options.sort_reverse:
+ query.append("sort_reverse=true")
+ query = "&".join(query)
+ if query != "":
+ uri += "?" + query
+ return (uri, obj_info, cols)
+
+ def invoke_declare(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
+ if obj_type == 'binding':
+ self.post(uri, json.dumps(upload))
+ else:
+ self.put(uri, json.dumps(upload))
+ self.verbose("{0} declared".format(obj_type))
+
+ def invoke_delete(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DELETABLE)
+ self.delete(uri)
+ self.verbose("{0} deleted".format(obj_type))
+
+ def invoke_close(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE)
+ self.delete(uri)
+ self.verbose("{0} closed".format(obj_type))
+
+ def invoke_purge(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(PURGABLE)
+ self.delete(uri)
+ self.verbose("{0} purged".format(obj_type))
+
+ def declare_delete_parse(self, root):
+ assert_usage(len(self.args) > 0, 'Type not specified')
+ obj_type = self.args[0]
+ assert_usage(obj_type in root,
+ 'Type {0} not recognised'.format(obj_type))
+ obj = root[obj_type]
+ (uri, upload) = self.parse_args(self.args[1:], obj)
+ return (obj_type, uri, upload)
+
+ def assert_mandatory_keys(self, mandatory, upload):
+ for m in mandatory:
+ if type(m) is list:
+ a_set = set(m)
+ b_set = set(upload.keys())
+ assert_usage((a_set & b_set),
+ 'one of mandatory arguments "{0}" is required'.format(m))
+ else:
+ assert_usage(m in upload.keys(),
+ 'mandatory argument "{0}" is required'.format(m))
+
+ def parse_args(self, args, obj):
+ mandatory = obj['mandatory']
+ optional = obj['optional']
+ uri_template = obj['uri']
+ upload = {}
+ for k in optional.keys():
+ if optional[k] is not None:
+ upload[k] = optional[k]
+ for arg in args:
+ assert_usage("=" in arg,
+ 'Argument "{0}" not in the name=value format'.format(arg))
+ (name, value) = arg.split("=", 1)
+ # flatten the list of mandatory keys
+ mandatory_keys = []
+ for key in mandatory:
+ if type(key) is list:
+ for subkey in key:
+ mandatory_keys.append(subkey)
+ else:
+ mandatory_keys.append(key)
+
+ assert_usage(name in mandatory_keys or name in optional.keys(),
+ 'Argument "{0}" is not recognised'.format(name))
+
+ if 'json' in obj and name in obj['json']:
+ upload[name] = self.parse_json(value)
+ else:
+ upload[name] = value
+ self.assert_mandatory_keys(mandatory, upload)
+ if 'vhost' not in mandatory:
+ upload['vhost'] = self.options.vhost or self.options.declare_vhost
+ uri_args = {}
+ for k in upload:
+ v = upload[k]
+ if v and isinstance(v, (str, bytes)):
+ uri_args[k] = quote(v, '')
+ if k == 'destination_type':
+ uri_args['destination_char'] = v[0]
+ uri = uri_template.format(**uri_args)
+ if 'fixup_upload' in obj:
+ fixup = obj['fixup_upload']
+ fixup(upload)
+ return (uri, upload)
+
+ def parse_json(self, text):
+ try:
+ return json.loads(text)
+ except ValueError:
+ eprint("ERROR: Could not parse JSON:\n {0}".format(text))
+ sys.exit(1)
+
+
+def format_list(json_list, columns, args, options):
+ format = options.format
+ formatter = None
+ if format == "raw_json":
+ print(json_list)
+ return
+ elif format == "pretty_json":
+ json_list_parsed = json.loads(json_list)
+ print(json.dumps(json_list_parsed,
+ skipkeys=False, ensure_ascii=False, check_circular=True,
+ allow_nan=True, sort_keys=True, indent=2))
+ return
+ else:
+ formatter = FORMATS[format]
+ assert_usage(formatter is not None,
+ "Format {0} not recognised".format(format))
+ formatter_instance = formatter(columns, args, options)
+ formatter_instance.display(json_list)
+
+
+class Lister:
+ def verbose(self, string):
+ if self.options.verbose:
+ print(string)
+
+ def display(self, json_list):
+ depth = sys.maxsize
+ if len(self.columns) == 0:
+ depth = int(self.options.depth)
+ (columns, table) = self.list_to_table(json.loads(json_list), depth)
+ if len(table) > 0:
+ self.display_list(columns, table)
+ else:
+ self.verbose("No items")
+
+ def list_to_table(self, items, max_depth):
+ columns = {}
+ column_ix = {}
+ row = None
+ table = []
+
+ def add(prefix, depth, item, fun):
+ for key in item:
+ column = prefix == '' and key or (prefix + '.' + key)
+ subitem = item[key]
+ if type(subitem) == dict:
+ if 'json' in self.obj_info and key in self.obj_info['json']:
+ fun(column, json.dumps(subitem))
+ else:
+ if depth < max_depth:
+ add(column, depth + 1, subitem, fun)
+ elif type(subitem) == list:
+ # The first branch has mirrors in queues in
+ # mind (which come out looking decent); the second
+ # one has applications in nodes (which look less
+ # so, but what would look good?).
+ if [x for x in subitem if type(x) != str] == []:
+ serialised = " ".join(subitem)
+ else:
+ serialised = json.dumps(subitem)
+ fun(column, serialised)
+ else:
+ fun(column, subitem)
+
+ def add_to_columns(col, val):
+ columns[col] = True
+
+ def add_to_row(col, val):
+ if col in column_ix:
+ if val is not None:
+ row[column_ix[col]] = maybe_utf8(val)
+ else:
+ row[column_ix[col]] = None
+
+ if len(self.columns) == 0:
+ for item in items:
+ add('', 1, item, add_to_columns)
+ columns = list(columns.keys())
+ columns.sort(key=column_sort_key)
+ else:
+ columns = self.columns
+
+ for i in range(0, len(columns)):
+ column_ix[columns[i]] = i
+ for item in items:
+ row = len(columns) * ['']
+ add('', 1, item, add_to_row)
+ table.append(row)
+
+ return (columns, table)
+
+
+class TSVList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ head = "\t".join(columns)
+ self.verbose(head)
+
+ for row in table:
+ line = "\t".join(row)
+ print(line)
+
+
+class LongList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ sep = "\n" + "-" * 80 + "\n"
+ max_width = 0
+ for col in columns:
+ max_width = max(max_width, len(col))
+ fmt = "{0:>" + str(max_width) + "}: {1}"
+ print(sep)
+ for i in range(0, len(table)):
+ for j in range(0, len(columns)):
+ print(fmt.format(columns[j], table[i][j]))
+ print(sep)
+
+
+class TableList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ total = [columns]
+ total.extend(table)
+ self.ascii_table(total)
+
+ def ascii_table(self, rows):
+ col_widths = [0] * len(rows[0])
+ for i in range(0, len(rows[0])):
+ for j in range(0, len(rows)):
+ col_widths[i] = max(col_widths[i], len(rows[j][i]))
+ self.ascii_bar(col_widths)
+ self.ascii_row(col_widths, rows[0], "^")
+ self.ascii_bar(col_widths)
+ for row in rows[1:]:
+ self.ascii_row(col_widths, row, "<")
+ self.ascii_bar(col_widths)
+
+ def ascii_row(self, col_widths, row, align):
+ txt = "|"
+ for i in range(0, len(col_widths)):
+ fmt = " {0:" + align + str(col_widths[i]) + "} "
+ txt += fmt.format(row[i]) + "|"
+ print(txt)
+
+ def ascii_bar(self, col_widths):
+ txt = "+"
+ for w in col_widths:
+ txt += ("-" * (w + 2)) + "+"
+ print(txt)
+
+
+class KeyValueList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ for i in range(0, len(table)):
+ row = []
+ for j in range(0, len(columns)):
+ row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
+ print(" ".join(row))
+
+
+# TODO handle spaces etc in completable names
+class BashList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ ix = None
+ for i in range(0, len(columns)):
+ if columns[i] == 'name':
+ ix = i
+ if ix is not None:
+ res = []
+ for row in table:
+ res.append(row[ix])
+ print(" ".join(res))
+
+
+FORMATS = {
+ # Special cased
+ 'raw_json': None,
+ # Ditto
+ 'pretty_json': None,
+ 'tsv': TSVList,
+ 'long': LongList,
+ 'table': TableList,
+ 'kvp': KeyValueList,
+ 'bash': BashList
+}
+
+
+def write_payload_file(payload_file, json_list):
+ result = json.loads(json_list)[0]
+ payload = result['payload']
+ payload_encoding = result['payload_encoding']
+ with open(payload_file, 'wb') as f:
+ if payload_encoding == 'base64':
+ data = base64.b64decode(payload)
+ else:
+ data = payload
+ f.write(data.encode("utf-8"))
+
+
+def print_bash_completion():
+ script = """# This is a bash completion script for rabbitmqadmin.
+# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
+# to get tab completion. rabbitmqadmin must be on your PATH for this to work.
+_rabbitmqadmin()
+{
+ local cur prev opts base
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+
+ opts="list show declare delete close purge import export get publish help"
+ fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse"
+
+ case "${prev}" in
+ list)
+ COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ show)
+ COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ declare)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ delete)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ close)
+ COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ purge)
+ COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ export)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ import)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ help)
+ opts="subcommands config"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -H)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ --host)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ -V)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --vhost)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -u)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --username)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -f)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+ --format)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+
+"""
+ for l in LISTABLE:
+ key = l[0:len(l) - 1]
+ script += " " + key + """)
+ opts="$(rabbitmqadmin -q -f bash list """ + l + """)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+"""
+ script += """ *)
+ ;;
+ esac
+
+ COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
+ return 0
+}
+complete -F _rabbitmqadmin rabbitmqadmin
+"""
+ print(script)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/rabbitmq_management/erlang.mk b/deps/rabbitmq_management/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_management/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_management/include/rabbit_mgmt.hrl b/deps/rabbitmq_management/include/rabbit_mgmt.hrl
new file mode 100644
index 0000000000..fc07271dbe
--- /dev/null
+++ b/deps/rabbitmq_management/include/rabbit_mgmt.hrl
@@ -0,0 +1,19 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(AUTH_REALM, "Basic realm=\"RabbitMQ Management\"").
+
+-define(HEALTH_CHECK_FAILURE_STATUS, 503).
diff --git a/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema b/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema
new file mode 100644
index 0000000000..61ec859162
--- /dev/null
+++ b/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema
@@ -0,0 +1,447 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Management Plugin
+%%
+%% See https://www.rabbitmq.com/management.html for details
+%% ----------------------------------------------------------------------------
+
+%% Load definitions from a JSON file or directory of files. See
+%% https://www.rabbitmq.com/management.html#load-definitions
+%%
+%% {load_definitions, "/path/to/schema.json"},
+%% {load_definitions, "/path/to/schemas"},
+{mapping, "management.load_definitions", "rabbitmq_management.load_definitions",
+ [{datatype, string},
+ {validators, ["file_accessible"]}]}.
+
+%% Log all requests to the management HTTP API to a file.
+%%
+%% {http_log_dir, "/path/to/access.log"},
+
+{mapping, "management.http_log_dir", "rabbitmq_management.http_log_dir",
+ [{datatype, string}]}.
+
+%% HTTP (TCP) listener options ========================================================
+
+%% HTTP listener consistent with Web STOMP and Web MQTT.
+%%
+%% {tcp_config, [{port, 15672},
+%% {ip, "127.0.0.1"}]}
+
+{mapping, "management.tcp.port", "rabbitmq_management.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "management.tcp.ip", "rabbitmq_management.tcp_config.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "management.tcp.compress", "rabbitmq_management.tcp_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "management.tcp.idle_timeout", "rabbitmq_management.tcp_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.inactivity_timeout", "rabbitmq_management.tcp_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.request_timeout", "rabbitmq_management.tcp_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.shutdown_timeout", "rabbitmq_management.tcp_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.max_keepalive", "rabbitmq_management.tcp_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+
+%% HTTPS (TLS) listener options ========================================================
+
+%% HTTPS listener consistent with Web STOMP and Web MQTT.
+%%
+%% {ssl_config, [{port, 15671},
+%% {ip, "127.0.0.1"},
+%% {cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}
+
+{mapping, "management.ssl.port", "rabbitmq_management.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "management.ssl.backlog", "rabbitmq_management.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "management.ssl.ip", "rabbitmq_management.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "management.ssl.certfile", "rabbitmq_management.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "management.ssl.keyfile", "rabbitmq_management.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "management.ssl.cacertfile", "rabbitmq_management.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "management.ssl.password", "rabbitmq_management.ssl_config.password",
+ [{datatype, string}]}.
+
+{mapping, "management.ssl.verify", "rabbitmq_management.ssl_config.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "management.ssl.fail_if_no_peer_cert", "rabbitmq_management.ssl_config.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.honor_cipher_order", "rabbitmq_management.ssl_config.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.honor_ecc_order", "rabbitmq_management.ssl_config.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.reuse_sessions", "rabbitmq_management.ssl_config.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.secure_renegotiate", "rabbitmq_management.ssl_config.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.client_renegotiation", "rabbitmq_management.ssl_config.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.depth", "rabbitmq_management.ssl_config.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "management.ssl.versions.$version", "rabbitmq_management.ssl_config.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_management.ssl_config.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.ssl.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "management.ssl.ciphers.$cipher", "rabbitmq_management.ssl_config.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.ssl_config.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.ssl.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+{mapping, "management.ssl.compress", "rabbitmq_management.ssl_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "management.ssl.idle_timeout", "rabbitmq_management.ssl_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.inactivity_timeout", "rabbitmq_management.ssl_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.request_timeout", "rabbitmq_management.ssl_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.shutdown_timeout", "rabbitmq_management.ssl_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.max_keepalive", "rabbitmq_management.ssl_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+
+
+%% Legacy listener options ========================================================
+
+%% Legacy (pre-3.7.9) TCP listener format.
+%%
+%% {listener, [{port, 12345},
+%% {ip, "127.0.0.1"},
+%% {ssl, true},
+%% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}]},
+
+{mapping, "management.listener.port", "rabbitmq_management.listener.port",
+ [{datatype, integer}]}.
+
+{mapping, "management.listener.ip", "rabbitmq_management.listener.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "management.listener.ssl", "rabbitmq_management.listener.ssl",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.server.compress", "rabbitmq_management.listener.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.server.idle_timeout", "rabbitmq_management.listener.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.inactivity_timeout", "rabbitmq_management.listener.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.request_timeout", "rabbitmq_management.listener.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.shutdown_timeout", "rabbitmq_management.listener.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.max_keepalive", "rabbitmq_management.listener.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+%% Legacy HTTPS listener options ========================================================
+
+{mapping, "management.listener.ssl_opts", "rabbitmq_management.listener.ssl_opts", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.listener.ssl_opts", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid management.listener.ssl_opts")
+ end
+end}.
+
+{mapping, "management.listener.ssl_opts.verify", "rabbitmq_management.listener.ssl_opts.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "management.listener.ssl_opts.fail_if_no_peer_cert", "rabbitmq_management.listener.ssl_opts.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.cacertfile", "rabbitmq_management.listener.ssl_opts.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.certfile", "rabbitmq_management.listener.ssl_opts.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.cacerts.$name", "rabbitmq_management.listener.ssl_opts.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "management.listener.ssl_opts.honor_cipher_order", "rabbitmq_management.listener.ssl_opts.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.honor_ecc_order", "rabbitmq_management.listener.ssl_opts.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.reuse_sessions", "rabbitmq_management.listener.ssl_opts.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.secure_renegotiate", "rabbitmq_management.listener.ssl_opts.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.client_renegotiation", "rabbitmq_management.listener.ssl_opts.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+
+{mapping, "management.listener.ssl_opts.versions.$version", "rabbitmq_management.listener.ssl_opts.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.versions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+
+{mapping, "management.listener.ssl_opts.cert", "rabbitmq_management.listener.ssl_opts.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.cert", Conf))
+end}.
+
+{mapping, "management.listener.ssl_opts.crl_check", "rabbitmq_management.listener.ssl_opts.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "management.listener.ssl_opts.depth", "rabbitmq_management.listener.ssl_opts.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "management.listener.ssl_opts.dh", "rabbitmq_management.listener.ssl_opts.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.dh", Conf))
+end}.
+
+{mapping, "management.listener.ssl_opts.dhfile", "rabbitmq_management.listener.ssl_opts.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.key.RSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.key.DSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.key.PrivateKeyInfo", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "management.listener.ssl_opts.keyfile", "rabbitmq_management.listener.ssl_opts.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.log_alert", "rabbitmq_management.listener.ssl_opts.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.password", "rabbitmq_management.listener.ssl_opts.password",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.psk_identity", "rabbitmq_management.listener.ssl_opts.psk_identity",
+ [{datatype, string}]}.
+
+
+%% A custom path prefix for all HTTP request handlers.
+%%
+%% {path_prefix, "/a/prefix"},
+
+{mapping, "management.path_prefix", "rabbitmq_management.path_prefix",
+ [{datatype, string}]}.
+
+%% Login session timeout in minutes
+
+{mapping, "management.login_session_timeout", "rabbitmq_management.login_session_timeout", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%%
+%% Inter-node query result caching
+%%
+
+{mapping, "management.db_cache_multiplier", "rabbitmq_management.management_db_cache_multiplier", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+
+%%
+%% CORS
+%%
+
+{mapping, "management.cors.allow_origins", "rabbitmq_management.cors_allow_origins", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "management.cors.allow_origins.$name", "rabbitmq_management.cors_allow_origins", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_management.cors_allow_origins",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.cors.allow_origins", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.cors.allow_origins", Conf),
+ [V || {_, V} <- Settings]
+ end
+end}.
+
+
+{mapping, "management.cors.max_age", "rabbitmq_management.cors_max_age", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbitmq_management.cors_max_age",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.cors.max_age", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% CSP (https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP)
+
+{mapping, "management.csp.policy", "rabbitmq_management.content_security_policy", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_management.content_security_policy",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.csp.policy", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% HSTS (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
+
+{mapping, "management.hsts.policy", "rabbitmq_management.strict_transport_security", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_management.strict_transport_security",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.hsts.policy", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% OAuth 2/SSO access only
+
+{mapping, "management.disable_basic_auth", "rabbitmq_management.disable_basic_auth",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Management only
+
+{mapping, "management.disable_stats", "rabbitmq_management.disable_management_stats", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "management.enable_queue_totals", "rabbitmq_management.enable_queue_totals", [
+ {datatype, {enum, [true, false]}}]}.
+
+%% ===========================================================================
+%% Authorization
+
+{mapping, "management.enable_uaa", "rabbitmq_management.enable_uaa",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.uaa_client_id", "rabbitmq_management.uaa_client_id",
+ [{datatype, string}]}.
+
+{mapping, "management.uaa_location", "rabbitmq_management.uaa_location",
+ [{datatype, string}]}.
+
+%% ===========================================================================
+
+
+%% One of 'basic', 'detailed' or 'none'. See
+%% https://www.rabbitmq.com/management.html#fine-stats for more details.
+%% {rates_mode, basic},
+{mapping, "management.rates_mode", "rabbitmq_management.rates_mode",
+ [{datatype, {enum, [basic, detailed, none]}}]}.
+
+%% Configure how long aggregated data (such as message rates and queue
+%% lengths) is retained. Please read the plugin's documentation in
+%% https://www.rabbitmq.com/management.html#configuration for more
+%% details.
+%%
+%% {sample_retention_policies,
+%% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]},
+%% {basic, [{60, 5}, {3600, 60}]},
+%% {detailed, [{10, 5}]}]}
+% ]},
+
+{mapping, "management.sample_retention_policies.$section.$interval",
+ "rabbitmq_management.sample_retention_policies",
+ [{datatype, integer}]}.
+
+{translation, "rabbitmq_management.sample_retention_policies",
+fun(Conf) ->
+ Global = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.global", Conf),
+ Basic = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.basic", Conf),
+ Detailed = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.detailed", Conf),
+ TranslateKey = fun("minute") -> 60;
+ ("hour") -> 3600;
+ ("day") -> 86400;
+ (Other) -> list_to_integer(Other)
+ end,
+ TranslatePolicy = fun(Section) ->
+ [ {TranslateKey(Key), Val} || {[_,_,_,Key], Val} <- Section ]
+ end,
+ [{global, TranslatePolicy(Global)},
+ {basic, TranslatePolicy(Basic)},
+ {detailed, TranslatePolicy(Detailed)}]
+end}.
+
+
+{validator, "is_dir", "is not directory",
+fun(File) ->
+ ReadFile = file:list_dir(File),
+ element(1, ReadFile) == ok
+end}.
diff --git a/deps/rabbitmq_management/priv/www/api/index.html b/deps/rabbitmq_management/priv/www/api/index.html
new file mode 100644
index 0000000000..a6e4921544
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/api/index.html
@@ -0,0 +1,2093 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>RabbitMQ Management HTTP API</title>
+ <style>
+ body { font: 12px Verdana,sans-serif; color: #444; padding: 8px 35px; }
+ td, th { font: 12px Verdana,sans-serif; color: #444; }
+ h1 { font-size: 2em; }
+ h2 { font-size: 1.5em; }
+ td.path { font-family: monospace; }
+ th { font-size 1em; font-weight: bold; }
+ table { border-collapse: collapse; }
+ table th, table td { vertical-align: top; border: 1px solid #bbb; padding: 5px; }
+ code { background: #ffa; }
+ pre { background: black; color: #0f0; padding: 10px; word-wrap: break-word;}
+ table pre { background: #ffa; color: black; }
+ </style>
+ </head>
+ <body>
+ <h1>RabbitMQ Management HTTP API</h1>
+
+ <h2>Introduction</h2>
+
+ <p>Apart from this help page, all URIs will serve only resources
+ of type <code>application/json</code>, and will require HTTP basic
+ authentication (using the standard RabbitMQ user database). The
+ default user is guest/guest.</p>
+
+ <p>Many URIs require the name of a virtual host as part of the
+ path, since names only uniquely identify objects within a virtual
+ host. As the default virtual host is called "<code>/</code>", this
+ will need to be encoded as "<code>%2F</code>".</p>
+
+ <p>PUTing a resource creates it. The JSON object you upload must
+ have certain mandatory keys (documented below) and may have
+ optional keys. Other keys are ignored. Missing mandatory keys
+ constitute an error.</p>
+
+ <p>Since bindings do not have names or IDs in AMQP we synthesise
+ one based on all its properties. Since predicting this name is
+ hard in the general case, you can also create bindings by POSTing
+ to a factory URI. See the example below.</p>
+
+ <p>Many URIs return lists. Such URIs can have the query string
+ parameters <code>sort</code> and <code>sort_reverse</code>
+ added. <code>sort</code> allows you to select a primary field to
+ sort by, and <code>sort_reverse</code> will reverse the sort order
+ if set to <code>true</code>. The <code>sort</code> parameter can
+ contain subfields separated by dots. This allows you to sort by a
+ nested component of the listed items; it does not allow you to
+ sort by more than one field. See the example below.</p>
+
+ <p>You can also restrict what information is returned per item
+ with the <code>columns</code> parameter. This is a comma-separated
+ list of subfields separated by dots. See the example below.</p>
+
+ <p>Most of the GET queries return many fields per
+ object. The second part of this guide covers those.</p>
+
+ <h2>Examples</h2>
+
+ <p>A few quick examples for Windows and Unix, using the command line
+ tool <code>curl</code>:</p>
+
+ <ul>
+ <li>
+ Get a list of vhosts:
+<pre>:: Windows
+C:\&gt; curl -i -u guest:guest http://localhost:15672/api/vhosts
+
+# Unix
+$ curl -i -u guest:guest http://localhost:15672/api/vhosts
+
+HTTP/1.1 200 OK
+cache-control: no-cache
+content-length: 196
+content-security-policy: default-src 'self'
+content-type: application/json
+date: Mon, 02 Sep 2019 07:51:49 GMT
+server: Cowboy
+vary: accept, accept-encoding, origin
+
+[{"cluster_state":{"rabbit@localhost":"running"},"description":"Default virtual host" <i>... (remainder elided)</i></pre>
+ </li>
+ <li>
+ Get a list of channels, fast publishers first, restricting the info
+ items we get back:
+<pre>:: Windows
+C:\&gt; curl -i -u guest:guest "http://localhost:15672/api/channels?sort=message_stats.publish_details.rate&amp;sort_reverse=true&amp;columns=name,message_stats.publish_details.rate,message_stats.deliver_get_details.rate"
+
+# Unix
+$ curl -i -u guest:guest 'http://localhost:15672/api/channels?sort=message_stats.publish_details.rate&amp;sort_reverse=true&amp;columns=name,message_stats.publish_details.rate,message_stats.deliver_get_details.rate'
+
+HTTP/1.1 200 OK
+cache-control: no-cache
+content-length: 2
+content-security-policy: default-src 'self'
+content-type: application/json
+date: Mon, 02 Sep 2019 07:54:35 GMT
+server: Cowboy
+vary: accept, accept-encoding, origin
+
+[{"message_stats":{"publish_details":{"rate" <i>... (remainder elided)</i></pre>
+ </li>
+ <li>
+ Create a new vhost:
+<pre>:: Windows
+C:\&gt; curl -i -u guest:guest -H "content-type:application/json" ^
+ -XPUT http://localhost:15672/api/vhosts/foo
+
+# Unix
+$ curl -i -u guest:guest -H "content-type:application/json" \
+ -XPUT http://localhost:15672/api/vhosts/foo
+
+HTTP/1.1 201 Created
+content-length: 0
+content-security-policy: default-src 'self'
+date: Mon, 02 Sep 2019 07:55:24 GMT
+server: Cowboy
+vary: accept, accept-encoding, origin</pre>
+ <p>Note: you must specify <code>application/json</code> as the
+ mime type.</p>
+ <p>Note: the name of the object is not needed in the JSON
+ object uploaded, since it is in the URI. As a virtual host
+ has no properties apart from its name, this means you do not
+ need to specify a body at all!</p>
+ </li>
+ <li>
+ Create a new exchange in the default virtual host:
+<pre>:: Windows
+C:\&gt; curl -i -u guest:guest -H "content-type:application/json" ^
+ -XPUT -d"{""type"":""direct"",""durable"":true}" ^
+ http://localhost:15672/api/exchanges/%2F/my-new-exchange
+
+# Unix
+$ curl -i -u guest:guest -H "content-type:application/json" \
+ -XPUT -d'{"type":"direct","durable":true}' \
+ http://localhost:15672/api/exchanges/%2F/my-new-exchange
+
+HTTP/1.1 201 Created
+content-length: 0
+content-security-policy: default-src 'self'
+date: Mon, 02 Sep 2019 07:56:06 GMT
+server: Cowboy
+vary: accept, accept-encoding, origin</pre>
+ <p>Note: we never return a body in response to a PUT or
+ DELETE, unless it fails.</p>
+ </li>
+ <li>
+ And delete it again:
+<pre>:: Windows
+C:\&gt; curl -i -u guest:guest -H "content-type:application/json" ^
+ -XDELETE http://localhost:15672/api/exchanges/%2F/my-new-exchange
+
+# Unix
+$ curl -i -u guest:guest -H "content-type:application/json" \
+ -XDELETE http://localhost:15672/api/exchanges/%2F/my-new-exchange
+
+HTTP/1.1 204 No Content
+content-security-policy: default-src 'self'
+date: Mon, 02 Sep 2019 07:56:59 GMT
+server: Cowboy
+vary: accept, accept-encoding, origin</pre>
+ </li>
+ </ul>
+
+ <h2>Reference</h2>
+
+ <table>
+ <tr>
+ <th>GET</th>
+ <th>PUT</th>
+ <th>DELETE</th>
+ <th>POST</th>
+ <th>Path</th>
+ <th>Description</th>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/overview</td>
+ <td>Various random bits of information that describe the whole
+ system.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/cluster-name</td>
+ <td>Name identifying this RabbitMQ cluster.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/nodes</td>
+ <td>A list of nodes in the RabbitMQ cluster.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/nodes/<i>name</i></td>
+ <td>
+ An individual node in the RabbitMQ cluster. Add
+ "?memory=true" to get memory statistics, and "?binary=true"
+ to get a breakdown of binary memory use (may be expensive if
+ there are many small binaries in the system).
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/extensions</td>
+ <td>A list of extensions to the management plugin.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/definitions<br/>
+ /api/all-configuration <em>(deprecated)</em>
+ </td>
+ <td>
+ The server definitions - exchanges, queues, bindings, users,
+ virtual hosts, permissions, topic permissions, and parameters. Everything apart from
+ messages. POST to upload an existing set of definitions. Note
+ that:
+ <ul>
+ <li>
+ The definitions are merged. Anything already existing on
+ the server but not in the uploaded definitions is
+ untouched.
+ </li>
+ <li>
+ Conflicting definitions on immutable objects (exchanges,
+ queues and bindings) will be ignored. The existing definition
+ will be preserved.
+ </li>
+ <li>
+ Conflicting definitions on mutable objects will cause
+ the object in the server to be overwritten with the
+ object from the definitions.
+ </li>
+ <li>
+ In the event of an error you will be left with a
+ part-applied set of definitions.
+ </li>
+ </ul>
+ For convenience you may upload a file from a browser to this
+ URI (i.e. you can use <code>multipart/form-data</code> as
+ well as <code>application/json</code>) in which case the
+ definitions should be uploaded as a form field named
+ "file".
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/definitions/<i>vhost</i><br/>
+ </td>
+ <td>
+ The server definitions for a given virtual host -
+ exchanges, queues, bindings and policies.
+ POST to upload an existing set of definitions. Note that:
+ <ul>
+ <li>
+ The definitions are merged. Anything already existing on
+ the server but not in the uploaded definitions is
+ untouched.
+ </li>
+ <li>
+ Conflicting definitions on immutable objects (exchanges,
+ queues and bindings) will be ignored. The existing definition
+ will be preserved.
+ </li>
+ <li>
+ Conflicting definitions on mutable objects will cause
+ the object in the server to be overwritten with the
+ object from the definitions.
+ </li>
+ <li>
+ In the event of an error you will be left with a
+ part-applied set of definitions.
+ </li>
+ </ul>
+ For convenience you may upload a file from a browser to this
+ URI (i.e. you can use <code>multipart/form-data</code> as
+ well as <code>application/json</code>) in which case the
+ definitions should be uploaded as a form field named
+ "file".
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/connections</td>
+ <td>A list of all open connections.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>vhost</i>/connections</td>
+ <td>A list of all open connections in a specific vhost.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/connections/<i>name</i></td>
+ <td>
+ An individual connection. DELETEing it will close the
+ connection. Optionally set the "X-Reason" header when
+ DELETEing to provide a reason.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/connections/<i>name</i>/channels</td>
+ <td>
+ List of all channels for a given connection.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/channels</td>
+ <td>A list of all open channels.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>vhost</i>/channels</td>
+ <td>A list of all open channels in a specific vhost.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/channels/<i>channel</i></td>
+ <td>Details about an individual channel.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/consumers</td>
+ <td>A list of all consumers.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/consumers/<i>vhost</i></td>
+ <td>A list of all consumers in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges</td>
+ <td>A list of all exchanges.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i></td>
+ <td>A list of all exchanges in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i></td>
+ <td>
+ An individual exchange. To PUT an exchange, you will need a body looking something like this:
+ <pre>{"type":"direct","auto_delete":false,"durable":true,"internal":false,"arguments":{}}</pre>
+ The <code>type</code> key is mandatory; other keys are optional.
+ <p>
+ When DELETEing an exchange you can add the query string
+ parameter <code>if-unused=true</code>. This prevents the
+ delete from succeeding if the exchange is bound to a queue
+ or as a source to another exchange.
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i>/bindings/source</td>
+ <td>A list of all bindings in which a given exchange is the source.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i>/bindings/destination</td>
+ <td>A list of all bindings in which a given exchange is the destination.</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/exchanges/<i>vhost</i>/<i>name</i>/publish</td>
+ <td>
+ Publish a message to a given exchange. You will need a body
+ looking something like:
+ <pre>{"properties":{},"routing_key":"my key","payload":"my body","payload_encoding":"string"}</pre>
+ All keys are mandatory. The <code>payload_encoding</code>
+ key should be either "string" (in which case the payload
+ will be taken to be the UTF-8 encoding of the payload field)
+ or "base64" (in which case the payload field is taken to be
+ base64 encoded).<br/>
+ If the message is published successfully, the response will
+ look like:
+ <pre>{"routed": true}</pre>
+ <code>routed</code> will be true if the message was sent to
+ at least one queue.
+ <p>
+ Please note that the HTTP API is not ideal for high
+ performance publishing; the need to create a new TCP
+ connection for each message published can limit message
+ throughput compared to AMQP or other protocols using
+ long-lived connections.
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/queues</td>
+ <td>A list of all queues.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i></td>
+ <td>A list of all queues in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i></td>
+ <td>
+ An individual queue. To PUT a queue, you will need a body looking something like this:
+ <pre>{"auto_delete":false,"durable":true,"arguments":{},"node":"rabbit@smacmullen"}</pre>
+ All keys are optional.
+ <p>
+ When DELETEing a queue you can add the query string
+ parameters <code>if-empty=true</code> and /
+ or <code>if-unused=true</code>. These prevent the delete
+ from succeeding if the queue contains messages, or has
+ consumers, respectively.
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/bindings</td>
+ <td>A list of all bindings on a given queue.</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/contents</td>
+ <td>Contents of a queue. DELETE to purge. Note you can't GET this.</td>
+ </tr>
+
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/actions</td>
+ <td>
+ Actions that can be taken on a queue. POST a body like:
+ <pre>{"action":"sync"}</pre> Currently the actions which are
+ supported are <code>sync</code> and <code>cancel_sync</code>.
+ </td>
+ </tr>
+
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/queues/<i>vhost</i>/<i>name</i>/get</td>
+ <td>
+ Get messages from a queue. (This is not an HTTP GET as it
+ will alter the state of the queue.) You should post a body looking like:
+ <pre>{"count":5,"ackmode":"ack_requeue_true","encoding":"auto","truncate":50000}</pre>
+ <ul>
+ <li><code>count</code> controls the maximum number of
+ messages to get. You may get fewer messages than this if
+ the queue cannot immediately provide them.</li>
+ <li><code>ackmode</code> determines whether the messages will be
+ removed from the queue. If ackmode is ack_requeue_true or reject_requeue_true they will be requeued -
+ if ackmode is ack_requeue_false or reject_requeue_false they will be removed.
+ <li><code>encoding</code> must be either "auto" (in which case the
+ payload will be returned as a string if it is valid UTF-8, and
+ base64 encoded otherwise), or "base64" (in which case the payload
+ will always be base64 encoded).</li>
+ <li>If <code>truncate</code> is present it will truncate the
+ message payload if it is larger than the size given (in bytes).</li>
+ </ul>
+ <p><code>truncate</code> is optional; all other keys are mandatory.</p>
+ <p>
+ Please note that the get path in the HTTP API is intended
+ for diagnostics etc - it does not implement reliable
+ delivery and so should be treated as a sysadmin's tool
+ rather than a general API for messaging.
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/bindings</td>
+ <td>A list of all bindings.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/bindings/<i>vhost</i></td>
+ <td>A list of all bindings in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>exchange</i>/q/<i>queue</i></td>
+ <td>
+ <p>
+ A list of all bindings between an exchange and a
+ queue. Remember, an exchange and a queue can be bound
+ together many times!
+ </p>
+ <p>
+ To create a new binding, POST to this
+ URI. Request body should be a JSON object optionally containing
+ two fields, <code>routing_key</code> (a string) and <code>arguments</code> (a map of optional arguments):
+ <pre>{"routing_key":"my_routing_key", "arguments":{"x-arg": "value"}}</pre>
+ All keys are optional.
+ The response will contain a <code>Location</code> header
+ telling you the URI of your new binding.
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>exchange</i>/q/<i>queue</i>/<i>props</i></td>
+ <td>An individual binding between an exchange and a queue.
+ The <i>props</i> part of the URI is a "name" for the binding
+ composed of its routing key and a hash of its
+ arguments. <i>props</i> is the field named "properties_key"
+ from a bindings listing response.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>source</i>/e/<i>destination</i></td>
+ <td>
+ <p>
+ A list of all bindings between two exchanges, similar to
+ the list of all bindings between an exchange and a queue,
+ above.
+ </p>
+ <p>
+ <p>
+ To create a new binding, POST to this
+ URI. Request body should be a JSON object optionally containing
+ two fields, <code>routing_key</code> (a string) and <code>arguments</code> (a map of optional arguments):
+ <pre>{"routing_key":"my_routing_key", "arguments":{"x-arg": "value"}}</pre>
+ All keys are optional.
+ The response will contain a <code>Location</code> header
+ telling you the URI of your new binding.
+ </p>
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/bindings/<i>vhost</i>/e/<i>source</i>/e/<i>destination</i>/<i>props</i></td>
+ <td>
+ An individual binding between two exchanges. Similar to
+ the individual binding between an exchange and a queue,
+ above.
+ </tD>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts</td>
+ <td>A list of all vhosts.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>name</i></td>
+ <td>An individual virtual host. As a virtual host usually only
+ has a name, you do not need an HTTP body when PUTing one of
+ these. To set metadata on creation, provide a body like the following:
+ <pre>{"description":"virtual host description", "tags":"accounts,production"}</pre>
+ <code>tags</code> is a comma-separated list of tags.
+ These metadata fields are optional.
+ To enable / disable tracing, provide a body looking like:
+ <pre>{"tracing":true}</pre></td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>name</i>/permissions</td>
+ <td>A list of all permissions for a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhosts/<i>name</i>/topic-permissions</td>
+ <td>A list of all topic permissions for a given virtual host.</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/vhosts/<i>name</i>/start/<i>node</i></td>
+ <td>Starts virtual host <i>name</i> on node <i>node</i>.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/users/</td>
+ <td>A list of all users.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/users/without-permissions</td>
+ <td>A list of users that do not have access to any virtual host.</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/users/bulk-delete</td>
+ <td>Bulk deletes a list of users. Request body must contain the list:
+ <pre>{"users" : ["user1", "user2", "user3"]}</pre></td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/users/<i>name</i></td>
+ <td>An individual user. To PUT a user, you will need a body looking something like this:
+<pre>{"password":"secret","tags":"administrator"}</pre>
+or:
+<pre>{"password_hash":"2lmoth8l4H0DViLaK9Fxi6l9ds8=", "tags":"administrator"}</pre>
+ The <code>tags</code> key is mandatory. Either
+ <code>password</code> or <code>password_hash</code>
+ can be set. If neither are set the user will not be able to log in with a password,
+ but other mechanisms like client certificates may be used.
+ Setting <code>password_hash</code> to <code>""</code> will ensure the
+ user cannot use a password to log in. <code>tags</code> is a
+ comma-separated list of tags for the user. Currently recognised tags
+ are <code>administrator</code>, <code>monitoring</code> and <code>management</code>.
+ <code>password_hash</code> must be generated using the algorithm described
+ <a href="https://rabbitmq.com/passwords.html#computing-password-hash">here</a>.
+ You may also specify the hash function being used by adding the <code>hashing_algorithm</code>
+ key to the body. Currently recognised algorithms are <code>rabbit_password_hashing_sha256</code>,
+ <code>rabbit_password_hashing_sha512</code>, and <code>rabbit_password_hashing_md5</code>.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/users/<i>user</i>/permissions</td>
+ <td>A list of all permissions for a given user.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/users/<i>user</i>/topic-permissions</td>
+ <td>A list of all topic permissions for a given user.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/user-limits</td>
+ <td>
+ Lists per-user limits for all users.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/user-limits/<i>user</i></td>
+ <td>
+ Lists per-user limits for a specific user.
+ </td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/user-limits/<i>user</i>/<i>name</i></td>
+ <td>
+ Set or delete per-user limit for <code>user</code>. The <code>name</code> URL path element
+ refers to the name of the limit (<code>max-connections</code>, <code>max-channels</code>).
+ Limits are set using a JSON document in the body: <pre>{"value": 100}</pre>. Example
+ request:</br>
+ <pre>curl -4u 'guest:guest' -H 'content-type:application/json' -X PUT localhost:15672/api/user-limits/guest/max-connections -d '{"value": 50}'</pre>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/whoami</td>
+ <td>Details of the currently authenticated user.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/permissions</td>
+ <td>A list of all permissions for all users.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/permissions/<i>vhost</i>/<i>user</i></td>
+ <td>An individual permission of a user and virtual host. To PUT a permission, you will need a body looking something like this:
+<pre>{"configure":".*","write":".*","read":".*"}</pre>
+ All keys are mandatory.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/topic-permissions</td>
+ <td>A list of all topic permissions for all users.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/topic-permissions/<i>vhost</i>/<i>user</i></td>
+ <td>Topic permissions for a user and virtual host. To PUT a topic permission, you will need a body looking something like this:
+ <pre>{"exchange":"amq.topic","write":"^a","read":".*"}</pre>
+ All keys are mandatory.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/parameters</td>
+ <td>A list of all vhost-scoped parameters.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/parameters/<i>component</i></td>
+ <td>A list of all vhost-scoped parameters for a given component.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/parameters/<i>component</i>/<i>vhost</i></td>
+ <td>A list of all vhost-scoped parameters for a given component and virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/parameters/<i>component</i>/<i>vhost</i>/<i>name</i></td>
+ <td>An individual vhost-scoped parameter. To PUT a parameter, you will need a body looking something like this:
+<pre>{"vhost": "/","component":"federation","name":"local_username","value":"guest"}</pre>
+</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/global-parameters</td>
+ <td>A list of all global parameters.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/global-parameters/<i>name</i></td>
+ <td>An individual global parameter. To PUT a parameter, you will need a body looking something like this:
+<pre>{"name":"user_vhost_mapping","value":{"guest":"/","rabbit":"warren"}}</pre>
+ </td>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/policies</td>
+ <td>A list of all policies.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/policies/<i>vhost</i></td>
+ <td>A list of all policies in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/policies/<i>vhost</i>/<i>name</i></td>
+ <td>
+ An individual policy. To PUT a policy, you will need a body looking something like this:
+<pre>{"pattern":"^amq.", "definition": {"federation-upstream-set":"all"}, "priority":0, "apply-to": "all"}</pre>
+ <code>pattern</code> and <code>definition</code> are mandatory, <code>priority</code> and <code>apply-to</code> are optional.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/operator-policies</td>
+ <td>A list of all operator policy overrides.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/operator-policies/<i>vhost</i></td>
+ <td>A list of all operator policy overrides in a given virtual host.</td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/operator-policies/<i>vhost</i>/<i>name</i></td>
+ <td>
+ An individual operator policy. To PUT a policy, you will need a body looking something like this:
+<pre>{"pattern":"^amq.", "definition": {"expires":100}, "priority":0, "apply-to": "queues"}</pre>
+ <code>pattern</code> and <code>definition</code> are mandatory, <code>priority</code> and <code>apply-to</code> are optional.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/aliveness-test/<i>vhost</i></td>
+ <td>
+ Declares a test queue on the target node, then publishes and consumes a
+ message. Intended to be used as a very basic health check.
+ Responds a 200 OK if the check succeeded,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/alarms</td>
+ <td>
+ Responds a 200 OK if there are no alarms in effect in the cluster,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/local-alarms</td>
+ <td>
+ Responds a 200 OK if there are no local alarms in effect on the target node,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/certificate-expiration/<i>within</i>/<i>unit</i></td>
+ <td>
+ <p>
+ Checks the expiration date on the certificates for every listener configured to use TLS.
+ Responds a 200 OK if all certificates are valid (have not expired),
+ otherwise responds with a 503 Service Unavailable.
+ </p>
+ <p>
+ Valid units: days, weeks, months, years. The value of the <i>within</i> argument is the number of
+ units. So, when <i>within</i> is 2 and <i>unit</i> is "months", the expiration period used by the check
+ will be the next two months.
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/port-listener/<i>port</i></td>
+ <td>
+ Responds a 200 OK if there is an active listener on the give port,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/protocol-listener/<i>protocol</i></td>
+ <td>
+ Responds a 200 OK if there is an active listener for the given protocol,
+ otherwise responds with a 503 Service Unavailable. Valid protocol names are: amqp091, amqp10, mqtt, stomp, web-mqtt, web-stomp.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/virtual-hosts</td>
+ <td>
+ Responds a 200 OK if all virtual hosts and running on the target node,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/node-is-mirror-sync-critical</td>
+ <td>
+ Checks if there are classic mirrored queues without synchronised mirrors online
+ (queues that would potentially lose data if the target node is shut down).
+ Responds a 200 OK if there are no such classic mirrored queues,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/health/checks/node-is-quorum-critical</td>
+ <td>
+ Checks if there are quorum queues with minimum online quorum (queues that
+ would lose their quorum and availability if the target node is shut down).
+ Responds a 200 OK if there are no such quorum queues,
+ otherwise responds with a 503 Service Unavailable.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhost-limits</td>
+ <td>
+ Lists per-vhost limits for all vhosts.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/vhost-limits/<i>vhost</i></td>
+ <td>
+ Lists per-vhost limits for specific vhost.
+ </td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>X</td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/vhost-limits/<i>vhost</i>/<i>name</i></td>
+ <td>
+ Set or delete per-vhost limit for <code>vhost</code>. The <code>name</code> URL path element
+ refers to the name of the limit (<code>max-connections</code>, <code>max-queues</code>).
+ Limits are set using a JSON document in the body: <pre>{"value": 100}</pre>. Example
+ request:</br>
+ <pre>curl -4u 'guest:guest' -H 'content-type:application/json' -X PUT localhost:15672/api/vhost-limits/my-vhost/max-connections -d '{"value": 50}'</pre>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/auth</td>
+ <td>
+ Details about the OAuth2 configuration. It will return HTTP
+ status 200 with body: <pre>{"enable_uaa":"boolean", "uaa_client_id":"string", "uaa_location":"string"}</pre>
+ </td>
+ </tr>
+ <tr>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td>X</td>
+ <td class="path">/api/rebalance/queues</td>
+ <td>
+ Rebalances all queues in all vhosts. This operation is asynchronous therefore please check
+ the RabbitMQ log file for messages regarding the success or failure of the operation.
+ <pre>curl -4u 'guest:guest' -XPOST localhost:15672/api/rebalance/queues/</pre>
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td class="path">/api/federation-links<br/>
+ /api/federation-links/<i>vhost</i></td>
+ <td>
+ Provides status for all federation links. Requires the <code>rabbitmq_federation_management</code> plugin to be enabled.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/auth/attempts/<i>node</i></td>
+ <td>
+ A list of authentication attempts.
+ </td>
+ </tr>
+ <tr>
+ <td>X</td>
+ <td></td>
+ <td>X</td>
+ <td></td>
+ <td class="path">/api/auth/attempts/<i>node</i>/source</td>
+ <td>
+ A list of authentication attempts by remote address and username.
+ </td>
+ </tr>
+ </table>
+
+
+ <h2>HTTP API Stats</h2>
+ <p>
+ Most of the GET requests you can issue to the HTTP API return
+ JSON objects with a large number of keys. While a few of these
+ keys represent things you set yourself in a PUT request or AMQP
+ command (e.g. queue durability or arguments), most of them
+ represent statistics to do with the object in question. This
+ page attempts to document them.
+ </p>
+
+ <p>
+ It should be read in conjunction with the manual page
+ for <code>rabbitmqctl</code> (see your installation if on Unix / Linux,
+ or <a href="https://www.rabbitmq.com/rabbitmqctl.8.html">the
+ RabbitMQ website</a> for the latest version). Any field which can
+ be returned by a command of the form <code>rabbitmqctl
+ list_<i>something</i></code> will also be returned in the
+ equivalent part of the HTTP API, so all those keys are not
+ documented here. However, the HTTP API also adds a lot of extra
+ fields which are not available in <code>rabbitmqctl</code>.
+ </p>
+
+ <h2>_details objects</h2>
+ <p>
+ Many fields represent a count of some kind: queue length,
+ messages acknowledged, bytes received and so on. Such absolute
+ counts returned by the HTTP API will often have a
+ corresponding <code>_details</code> object which offers
+ information on how this count has changed. So for example, from
+ a queue:
+ </p>
+<pre> "messages": 123619,
+ "messages_details": {
+ "avg": 41206.333333333336,
+ "avg_rate": 1030.1583333333333,
+ "rate": 24723.8,
+ "samples": [
+ {
+ "sample": 123619,
+ "timestamp": 1400680560000
+ },
+ {
+ "sample": 0,
+ "timestamp": 1400680500000
+ },
+ {
+ "sample": 0,
+ "timestamp": 1400680440000
+ }
+ ]
+ }</pre>
+
+ <p>
+ Here we have a <code>messages</code> count (the total messages
+ in the queue), with some additional data:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>avg</code></td>
+ <td>
+ The average value for the requested time period (see below).
+ </td>
+ </tr>
+ <tr>
+ <td><code>avg_rate</code></td>
+ <td>
+ The average rate for the requested time period.
+ </td>
+ </tr>
+ <tr>
+ <td><code>rate</code></td>
+ <td>
+ How much the count has changed per second in the most recent
+ sampling interval.
+ </td>
+ </tr>
+ <tr>
+ <td><code>samples</code></td>
+ <td>
+ Snapshots showing how the value has changed over the
+ requested time period.
+ </td>
+ </tr>
+ </table>
+
+ <p>
+ <code>avg</code>, <code>avg_rate</code> and <code>samples</code>
+ will only appear if you request a specific time period by
+ appending query parameters to the URL. To do this you need to
+ set an age and an increment for the samples you want. The end of
+ the range returned will always correspond to the present.
+ </p>
+
+ <p>
+ Different types of data take different query parameters to
+ return samples, as in the following table. You can specify more
+ than one set of parameters if the resource you are requesting
+ can generate more than one type of sample (for example, queues
+ can return message rates and queue lengths).
+ </p>
+
+ <table>
+ <tr>
+ <td>Messages sent and received</td>
+ <td><code>msg_rates_age</code> / <code>msg_rates_incr</code></td>
+ </tr>
+ <tr>
+ <td>Bytes sent and received</td>
+ <td><code>data_rates_age</code> / <code>data_rates_incr</code>
+ </td>
+ </tr>
+ <tr>
+ <td>Queue lengths</td>
+ <td><code>lengths_age</code> / <code>lengths_incr</code></td>
+ </tr>
+ <tr>
+ <td>Node statistics (e.g. file descriptors, disk space free)</td>
+ <td><code>node_stats_age</code> / <code>node_stats_incr</code></td>
+ </tr>
+ </table>
+
+ <p>
+ For example,
+ appending <code>?lengths_age=3600&lengths_incr=60</code> will
+ return the last hour's data on queue lengths, with a sample for
+ every minute.
+ </p>
+
+ <h2>message_stats objects</h2>
+ <p>
+ Many objects (including queues, exchanges and channels) will
+ return counts of messages passing through them. These are
+ included in a <code>message_stats</code> object (which in turn
+ will contain <code>_details</code> objects for each count, as
+ described above).
+ </p>
+ <p>
+ These can contain:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>publish</code></td>
+ <td>
+ Count of messages published.
+ </td>
+ </tr>
+ <tr>
+ <td><code>publish_in</code></td>
+ <td>
+ Count of messages published "in" to an exchange, i.e. not
+ taking account of routing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>publish_out</code></td>
+ <td>
+ Count of messages published "out" of an exchange,
+ i.e. taking account of routing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>confirm</code></td>
+ <td>
+ Count of messages confirmed.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliver</code></td>
+ <td>
+ Count of messages delivered in acknowledgement mode to consumers.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliver_no_ack</code></td>
+ <td>
+ Count of messages delivered in no-acknowledgement mode to consumers.
+ </td>
+ </tr>
+ <tr>
+ <td><code>get</code></td>
+ <td>
+ Count of messages delivered in acknowledgement mode in
+ response to basic.get.
+ </td>
+ </tr>
+ <tr>
+ <td><code>get_no_ack</code></td>
+ <td>
+ Count of messages delivered in no-acknowledgement mode in
+ response to basic.get.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliver_get</code></td>
+ <td>
+ Sum of all four of the above.
+ </td>
+ </tr>
+ <tr>
+ <td><code>redeliver</code></td>
+ <td>
+ Count of subset of messages in <code>deliver_get</code>
+ which had the redelivered flag set.
+ </td>
+ </tr>
+ <tr>
+ <td><code>drop_unroutable</code></td>
+ <td>
+ Count of messages dropped as unroutable.
+ </td>
+ </tr>
+ <tr>
+ <td><code>return_unroutable</code></td>
+ <td>
+ Count of messages returned to the publisher as unroutable.
+ </td>
+ </tr>
+ </table>
+
+ <p>
+ Only fields for which some activity has taken place will appear.
+ </p>
+
+ <h2>Detailed message stats objects</h2>
+ <p>
+ In addition, queues, exchanges and channels can return a
+ breakdown of message stats for each of their neighbours
+ (i.e. adjacent objects in the chain: channel -> exchange ->
+ queue -> channel). This will only happen if
+ the <code>rates_mode</code> configuration item has been switched
+ to <code>detailed</code> from its default of <code>basic</code>.
+ </p>
+ <p>
+ As this possibly constitutes a large quantity of data, it is also
+ only returned when querying a single channel, queue or exchange
+ rather than a list. Note also that the default sample retention
+ policy means that these detailed message stats do not retain
+ historical data for more than a few seconds.
+ </p>
+ <p>
+ The detailed message stats objects have different names
+ depending on where they are (documented below). Each set of
+ detailed stats consists of a list of objects with two fields,
+ one identifying the partner object and one <code>stats</code>
+ which is a message_stats object as described above.
+ </p>
+ <p>
+ For example, from a queue:
+ </p>
+ <pre> "incoming": [
+ {
+ "stats": {
+ "publish": 352593,
+ "publish_details": {
+ "rate": 100.2
+ }
+ },
+ "exchange": {
+ "name": "my-exchange",
+ "vhost": "/"
+ }
+ }
+ {
+ "stats": {
+ "publish": 543784,
+ "publish_details": {
+ "rate": 54.6
+ }
+ },
+ "exchange": {
+ "name": "amq.topic",
+ "vhost": "/"
+ }
+ }
+ ],</pre>
+
+ <p>
+ This queue is currently receiving messages from two exchanges:
+ 100.2 msg/s from "my-exchange" and 54.6 msg/s from "amq.topic".
+ </p>
+
+ <h2>/api/overview</h2>
+
+ <p>
+ This has the following fields:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>cluster_name</code></td>
+ <td>
+ The name of the entire cluster, as set with <code>rabbitmqctl
+ set_cluster_name</code>.
+ </td>
+ </tr>
+ <tr>
+ <td><code>contexts</code></td>
+ <td>
+ A list of web application contexts in the cluster.
+ </td>
+ </tr>
+ <tr>
+ <td><code>erlang_full_version</code></td>
+ <td>
+ A string with extended detail about the Erlang VM and how it
+ was compiled, for the node connected to.
+ </td>
+ </tr>
+ <tr>
+ <td><code>erlang_version</code></td>
+ <td>
+ A string with the Erlang version of the node connected
+ to. As clusters should all run the same version this can be
+ taken as representing the cluster.
+ </td>
+ </tr>
+ <tr>
+ <td><code>exchange_types</code></td>
+ <td>
+ A list of all exchange types available.
+ </td>
+ </tr>
+ <tr>
+ <td><code>listeners</code></td>
+ <td>
+ All (non-HTTP) network listeners for all nodes in the
+ cluster. (See <code>contexts</code>
+ in <code>/api/nodes</code> for HTTP).
+ </td>
+ </tr>
+ <tr>
+ <td><code>management_version</code></td>
+ <td>
+ Version of the management plugin in use.
+ </td>
+ </tr>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ A message_stats object for everything the user can see - for
+ all vhosts regardless of permissions in the case
+ of <code>monitoring</code> and <code>administrator</code>
+ users, and for all vhosts the user has access to for other
+ users.
+ </td>
+ </tr>
+ <tr>
+ <td><code>node</code></td>
+ <td>
+ The name of the cluster node this management plugin instance
+ is running on.
+ </td>
+ </tr>
+ <tr>
+ <td><code>object_totals</code></td>
+ <td>
+ An object containing global counts of all connections,
+ channels, exchanges, queues and consumers, subject to the
+ same visibility rules as for <code>message_stats</code>.
+ </td>
+ </tr>
+ <tr>
+ <td><code>queue_totals</code></td>
+ <td>
+ An object containing sums of
+ the <code>messages</code>, <code>messages_ready</code>
+ and <code>messages_unacknowledged</code> fields for all
+ queues, again subject to the same visibility rules as
+ for <code>message_stats</code>.
+ </td>
+ </tr>
+ <tr>
+ <td><code>rabbitmq_version</code></td>
+ <td>
+ Version of RabbitMQ on the node which processed this request.
+ </td>
+ </tr>
+ <tr>
+ <td><code>rates_mode</code></td>
+ <td>
+ 'none', 'basic' or 'detailed'.
+ </td>
+ </tr>
+ <tr>
+ <td><code>statistics_db_event_queue</code></td>
+ <td>
+ Number of outstanding statistics events yet to be processed
+ by the database.
+ </td>
+ </tr>
+ <tr>
+ <td><code>statistics_db_node</code></td>
+ <td>
+ Name of the cluster node hosting the management statistics database.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/nodes</h2>
+
+ <p>
+ This has the following fields:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>applications</code></td>
+ <td>
+ List of all Erlang applications running on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>auth_mechanisms</code></td>
+ <td>
+ List of all SASL authentication mechanisms installed on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>cluster_links</code></td>
+ <td>
+ A list of the other nodes in the cluster. For each node,
+ there are details of the TCP connection used to connect to
+ it and statistics on data that has been transferred.
+ </td>
+ </tr>
+ <tr>
+ <td><code>config_files</code></td>
+ <td>
+ List of config files read by the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>contexts</code></td>
+ <td>
+ List of all HTTP listeners on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>db_dir</code></td>
+ <td>
+ Location of the persistent storage used by the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>disk_free</code></td>
+ <td>
+ Disk free space in bytes.
+ </td>
+ </tr>
+ <tr>
+ <td><code>disk_free_alarm</code></td>
+ <td>
+ Whether the disk alarm has gone off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>disk_free_limit</code></td>
+ <td>
+ Point at which the disk alarm will go off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>enabled_plugins</code></td>
+ <td>
+ List of plugins which are both explicitly enabled and running.
+ </td>
+ </tr>
+ <tr>
+ <td><code>exchange_types</code></td>
+ <td>
+ Exchange types available on the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>fd_total</code></td>
+ <td>
+ File descriptors available.
+ </td>
+ </tr>
+ <tr>
+ <td><code>fd_used</code></td>
+ <td>
+ Used file descriptors.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_read_avg_time</code></td>
+ <td>
+ Average wall time (milliseconds) for each disk read operation in
+ the last statistics interval.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_read_bytes</code></td>
+ <td>
+ Total number of bytes read from disk by the persister.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_read_count</code></td>
+ <td>
+ Total number of read operations by the persister.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_reopen_count</code></td>
+ <td>
+ Total number of times the persister has needed to recycle
+ file handles between queues. In an ideal world this number
+ will be zero; if the number is large, performance might be
+ improved by increasing the number of file handles available
+ to RabbitMQ.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_seek_avg_time</code></td>
+ <td>
+ Average wall time (milliseconds) for each seek operation in
+ the last statistics interval.
+ </td>
+ </tr>
+ </tr>
+ <tr>
+ <td><code>io_seek_count</code></td>
+ <td>
+ Total number of seek operations by the persister.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_sync_avg_time</code></td>
+ <td>
+ Average wall time (milliseconds) for each fsync() operation in
+ the last statistics interval.
+ </td>
+ </tr>
+ </tr>
+ <tr>
+ <td><code>io_sync_count</code></td>
+ <td>
+ Total number of fsync() operations by the persister.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_write_avg_time</code></td>
+ <td>
+ Average wall time (milliseconds) for each disk write operation in
+ the last statistics interval.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_write_bytes</code></td>
+ <td>
+ Total number of bytes written to disk by the persister.
+ </td>
+ </tr>
+ <tr>
+ <td><code>io_write_count</code></td>
+ <td>
+ Total number of write operations by the persister.
+ </td>
+ </tr>
+ <tr>
+ <td><code>log_files</code></td>
+ <td>
+ List of log files used by the node. If the node also sends
+ messages to stdout, "<code>&lt;stdout&gt;</code>" is also
+ reported in the list.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mem_used</code></td>
+ <td>
+ Memory used in bytes.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mem_alarm</code></td>
+ <td>
+ Whether the memory alarm has gone off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mem_limit</code></td>
+ <td>
+ Point at which the memory alarm will go off.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mnesia_disk_tx_count</code></td>
+ <td>
+ Number of Mnesia transactions which have been performed that
+ required writes to disk. (e.g. creating a durable
+ queue). Only transactions which originated on this node are
+ included.
+ </td>
+ </tr>
+ <tr>
+ <td><code>mnesia_ram_tx_count</code></td>
+ <td>
+ Number of Mnesia transactions which have been performed that
+ did not require writes to disk. (e.g. creating a transient
+ queue). Only transactions which originated on this node are
+ included.
+ </td>
+ </tr>
+ <tr>
+ <td><code>msg_store_read_count</code></td>
+ <td>
+ Number of messages which have been read from the message store.
+ </td>
+ </tr>
+ <tr>
+ <td><code>msg_store_write_count</code></td>
+ <td>
+ Number of messages which have been written to the message store.
+ </td>
+ </tr>
+ <tr>
+ <td><code>name</code></td>
+ <td>
+ Node name.
+ </td>
+ </tr>
+ <tr>
+ <td><code>net_ticktime</code></td>
+ <td>
+ Current kernel net_ticktime setting for the node.
+ </td>
+ </tr>
+ <tr>
+ <td><code>os_pid</code></td>
+ <td>
+ Process identifier for the Operating System under which this
+ node is running.
+ </td>
+ </tr>
+ <tr>
+ <td><code>partitions</code></td>
+ <td>
+ List of network partitions this node is seeing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>proc_total</code></td>
+ <td>
+ Maximum number of Erlang processes.
+ </td>
+ </tr>
+ <tr>
+ <td><code>proc_used</code></td>
+ <td>
+ Number of Erlang processes in use.
+ </td>
+ </tr>
+ <tr>
+ <td><code>processors</code></td>
+ <td>
+ Number of cores detected and usable by Erlang.
+ </td>
+ </tr>
+ <tr>
+ <td><code>queue_index_journal_write_count</code></td>
+ <td>
+ Number of records written to the queue index journal. Each
+ record represents a message being published to a queue,
+ being delivered from a queue, and being acknowledged in a
+ queue.
+ </td>
+ </tr>
+ <tr>
+ <td><code>queue_index_read_count</code></td>
+ <td>
+ Number of records read from the queue index.
+ </td>
+ </tr>
+ <tr>
+ <td><code>queue_index_write_count</code></td>
+ <td>
+ Number of records written to the queue index.
+ </td>
+ </tr>
+ <tr>
+ <td><code>rates_mode</code></td>
+ <td>
+ 'none', 'basic' or 'detailed'.
+ </td>
+ </tr>
+ <tr>
+ <td><code>run_queue</code></td>
+ <td>
+ Average number of Erlang processes waiting to run.
+ </td>
+ </tr>
+ <tr>
+ <td><code>running</code></td>
+ <td>
+ Boolean for whether this node is up. Obviously if this is
+ false, most other stats will be missing.
+ </td>
+ </tr>
+ <tr>
+ <td><code>sasl_log_file</code></td>
+ <td>
+ Location of <a href="https://www.erlang.org/doc/man/sasl_app.html">sasl</a> log file.
+ </td>
+ </tr>
+ <tr>
+ <td><code>sockets_total</code></td>
+ <td>
+ File descriptors available for use as sockets.
+ </td>
+ </tr>
+ <tr>
+ <td><code>sockets_used</code></td>
+ <td>
+ File descriptors used as sockets.
+ </td>
+ </tr>
+ <tr>
+ <td><code>type</code></td>
+ <td>
+ 'disc' or 'ram'.
+ </td>
+ </tr>
+ <tr>
+ <td><code>uptime</code></td>
+ <td>
+ Time since the Erlang VM started, in milliseconds.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/nodes/(name)</h2>
+
+ <p>
+ All of the above, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>memory</code></td>
+ <td>
+ Detailed memory use statistics. Only appears
+ if <code>?memory=true</code> is appended to the URL.
+ </td>
+ </tr>
+ <tr>
+ <td><code>binary</code></td>
+ <td>
+ Detailed breakdown of the owners of binary memory. Only
+ appears if <code>?binary=true</code> is appended to the
+ URL. Note that this can be an expensive query if there are
+ many small binaries in the system.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/connections</h2>
+ <h2>/api/connections/(name)</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl
+ list_connections</code>. No additional fields,
+ although <code>pid</code> is replaced by <code>node</code>.
+ </p>
+
+ <p>
+ Note also that while non-AMQP connections will appear in this
+ list (unlike <code>rabbitmqctl list_connections</code>), they
+ will omit many of the connection-level statistics.
+ </p>
+
+ <h2>/api/connections/(name)/channels</h2>
+ <h2>/api/channels</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl list_channels</code>,
+ with <code>pid</code> replaced by <code>node</code>, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>connection_details</code></td>
+ <td>
+ Some basic details about the owning connection.
+ </td>
+ </tr>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ See the section on message_stats above.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/channels/(name)</h2>
+
+ <p>
+ All the above, plus
+ </p>
+
+ <table>
+ <tr>
+ <td><code>publishes</code></td>
+ <td>
+ Detailed message stats (see section above) for publishes to
+ exchanges.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliveries</code></td>
+ <td>
+ Detailed message stats for deliveries from queues.
+ </td>
+ </tr>
+ <tr>
+ <td><code>consumer_details</code></td>
+ <td>
+ List of consumers on this channel, with some details on each.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/exchanges</h2>
+ <h2>/api/exchanges/(vhost)</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl list_exchanges</code>, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ See the section on message_stats above.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/exchanges/(vhost)/(name)</h2>
+
+ <p>
+ All the above, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>incoming</code></td>
+ <td>
+ Detailed message stats (see section above) for publishes
+ from channels into this exchange.
+ </td>
+ </tr>
+ <tr>
+ <td><code>outgoing</code></td>
+ <td>
+ Detailed message stats for publishes from this exchange into
+ queues.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/queues</h2>
+ <h2>/api/queues/(vhost)</h2>
+
+ <p>
+ See documentation for <code>rabbitmqctl list_queues</code>, with
+ all references to <code>pid</code>s replaced by <code>node</code>s
+ plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ See the section on message_stats above.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/queues/(vhost)/(name)</h2>
+
+ <p>
+ All the above, plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>incoming</code></td>
+ <td>
+ Detailed message stats (see section above) for
+ publishes from exchanges into this queue.
+ </td>
+ </tr>
+ <tr>
+ <td><code>deliveries</code></td>
+ <td>
+ Detailed message stats for deliveries from this queue into
+ channels.
+ </td>
+ </tr>
+ <tr>
+ <td><code>consumer_details</code></td>
+ <td>
+ List of consumers on this channel, with some details on each.
+ </td>
+ </tr>
+ </table>
+
+ <h2>/api/vhosts/</h2>
+ <h2>/api/vhosts/(name)</h2>
+
+ <p>
+ All the fields from <code>rabbitmqctl list_vhosts</code>
+ (i.e. <code>name</code> and <code>tracing</code>) plus:
+ </p>
+
+ <table>
+ <tr>
+ <td><code>message_stats</code></td>
+ <td>
+ Global message_stats for this vhost. Note that activity for
+ other users in this vhost <b>is</b> shown, even for users
+ without the <code>monitoring</code> tag.
+ </td>
+ </tr>
+ <tr>
+ <td><code>messages</code> <code>messages_ready</code> <code>messages_acknowledged</code></td>
+ <td>
+ Sum of these fields for all queues in the vhost.
+ </td>
+ </tr>
+ <tr>
+ <td><code>recv_oct</code> <code>send_oct</code></td>
+ <td>
+ Sum of these fields for all connections to the vhost.
+ </td>
+ </tr>
+ </table>
+ </body>
+</html>
diff --git a/deps/rabbitmq_management/priv/www/cli/index.html b/deps/rabbitmq_management/priv/www/cli/index.html
new file mode 100644
index 0000000000..73f79da693
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/cli/index.html
@@ -0,0 +1,45 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>rabbitmqadmin</title>
+ <style>
+ body { font: 12px Verdana,sans-serif; color: #444; padding: 8px 35px; }
+ h1 { font-size: 2em; }
+ code { background: #ffa; }
+ </style>
+ </head>
+ <body>
+ <h1>rabbitmqadmin</h1>
+
+ <p>
+ <code>rabbitmqadmin</code> is an HTTP API-based management tool. It uses the same API as RabbitMQ management UI
+ and provides access to the subset of the functionality in the UI from the command line.
+ It is not meant to be a replacement for <a href="https://www.rabbitmq.com/man/rabbitmqctl.1.man.html">rabbitmqctl</a>
+ as there are operations that HTTP API intentionally does not expose.
+ </p>
+
+ <p>
+ To use it, <a href="rabbitmqadmin">download it</a> from this node (right click,
+ Save As), make sure it is executable, and drop it in your <code>PATH</code>. Note that
+ many browsers will rename the
+ file <code>rabbitmqadmin.txt</code>. <code>rabbitmqadmin</code> requires Python
+ 2.6 or later (both 2.x and 3.x series are supported).
+ To use <code>rabbitmqadmin</code> with HTTPS, Python 2.7.9 is the minimum supported version.
+ </p>
+
+ <p>
+ See <a href="https://www.rabbitmq.com/management-cli.html">the
+ rabbitmqadmin page on the website</a> for more information on
+ its use, or invoke <code>rabbitmqadmin --help</code> for usage
+ instructions.
+ </p>
+
+ <p>
+ Windows users will need to ensure Python is on
+ their <code>PATH</code>, and invoke rabbitmqadmin as <code>python.exe
+ rabbitmqadmin</code>.
+ </p>
+ </body>
+</html>
diff --git a/deps/rabbitmq_management/priv/www/css/evil.css b/deps/rabbitmq_management/priv/www/css/evil.css
new file mode 100644
index 0000000000..016ceb2eea
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/css/evil.css
@@ -0,0 +1 @@
+#login { text-align: center; }
diff --git a/deps/rabbitmq_management/priv/www/css/main.css b/deps/rabbitmq_management/priv/www/css/main.css
new file mode 100644
index 0000000000..05b8500e3f
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/css/main.css
@@ -0,0 +1,361 @@
+body { font: 12px Verdana, sans-serif; color: #484848; padding: 0; margin: 0; }
+input, button, a.button { font: 12px Verdana, sans-serif; }
+
+a { font-weight: bold; color: #444; text-decoration: none; }
+a:hover { color: #F60; }
+
+#outer { padding: 0 0 1em 0; width: 95%; margin: auto; }
+
+#login table { margin: auto; }
+#login p { text-align: center; }
+
+#logo { padding: 0 0 2em 0; }
+#logo img { margin: 1em 0 -0.3em 1em; border: none; }
+
+#versions abbr { background: #f0f0f0; margin: 0 0 0 1em; }
+
+.status-ok { }
+.status-error { color: #F00; }
+.status-timeout { color: #99EBFF; }
+
+#debug { position: fixed; bottom: 0; z-index: 9; width: 100%; text-align: center; padding: 0; margin: 0; }
+#debug p { background: #F60; color: white; margin: 0; padding: 1em; font-size: 2em; }
+
+#header { background: white; position: fixed; z-index: 1; width: 95%; margin: auto; padding: 1em 0 0 0; border-bottom: 1px solid #666; }
+
+#topnav { float: right; padding: 0; margin: 0; list-style-type: none; }
+#topnav form { display: inline; }
+#topnav input[type=submit] { padding: 3px 7px; display: inline; }
+#topnav li { text-align: right; padding: 2px 0; }
+
+#menu ul { padding: 0; margin: 0; overflow: auto; }
+#menu li { float: left; list-style-type: none; padding: 0 0.1em 0 0; }
+#menu li a { display: block; padding: 0.7em 1.3em; margin-right: 5px; }
+#menu a:hover { background-color: #F60; color: white; -moz-border-radius: 8px 8px 0 0; border-radius: 8px 8px 0 0; }
+#menu a.selected { background-color: #666; color: white; -moz-border-radius: 8px 8px 0 0; border-radius: 8px 8px 0 0; }
+#vhost-form { float: right; padding: 0; margin: 0; }
+
+#main { padding-top: 10em; }
+#main.with-rhs { margin-right: 210px; }
+#rhs { float: right; width: 200px; background-color: white; position: relative; padding-top: 10em; }
+#rhs ul { padding: 0; margin: 10px 0 0 0; }
+#rhs li { list-style-type: none; padding: 0; margin-bottom: 5px; }
+#rhs a { display: block; padding: 0.7em; font-weight: bold; text-decoration: none; }
+#rhs a:hover { background-color: #F60; color: white; -moz-border-radius: 8px 0 0 8px; border-radius: 8px 0 0 8px; }
+#rhs a.selected { background-color: #666; color: white; -moz-border-radius: 8px 0 0 8px; border-radius: 8px 0 0 8px; }
+
+h1 { font-size: 2em; font-weight: normal; padding: 0; margin-bottom: 0; }
+b, dt { color: black; font-weight: normal; }
+dd { margin-bottom: 5px; }
+div.box, div.section, div.section-hidden { overflow: auto; width: 100%; }
+
+.left { float: left; }
+.right { float: right; }
+.clear { clear: both; }
+
+.shortinput { width: 50px; text-align: right; }
+
+.help:after { content: '?'; }
+.help,
+.popup-options-link { background-color: #E4E4E4; padding: 2px 4px; cursor: pointer; }
+table th .help,
+table th .popup-options-link { border: none; }
+.help:hover,
+.popup-options-link:hover,
+.popup-owner { background-color: #F60; color: white; }
+
+.rate-visibility-option { cursor: pointer; padding: 4px; background: #fafafa; border: 1px solid #f0f0f0; border-radius: 3px; display:block; }
+.rate-visibility-option:hover { background: #ddf;
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #ddf),color-stop(1, #bbf));
+ border: 1px solid #88d;
+ border-radius: 3px; }
+
+.rate-visibility-option-hidden { text-decoration: line-through; color: #888; }
+
+
+table.legend { float: left; }
+table.legend th { padding: 4px 10px 4px 0; width: 80px; }
+table.legend td { padding: 4px 0 4px 10px; width: 130px; }
+
+.tag-link, .argument-link { color: #444; cursor: pointer; font-weight: bold; }
+.tag-link:hover, .argument-link:hover { color: #F60; }
+
+.filter { overflow: auto; width: 100%; margin-bottom: 10px; }
+.filter table { float: left; }
+.filter label { margin-top: 4px;}
+.filter input#filter-regex-mode { vertical-align: middle; }
+.filter p#filter-truncate { float: right; padding: 4px; margin: 0; }
+.filter p.filter-warning { border-radius: 5px; background: #ff8; }
+.filter-active { background: #99EBFF; border-radius: 5px; }
+.filter-highlight { background: #99EBFF; }
+
+input#truncate { width: 50px; text-align: right; }
+
+table { border-collapse: collapse; }
+table th { font-weight: normal; color: black; padding: 6px 5px 5px 5px; line-height: 1em; }
+table td { padding: 2px 5px; }
+table.list th, table.list td { vertical-align: top; min-width: 5em; width: auto; }
+
+table.list { border-width: 1px; margin-bottom: 1em; }
+table.list th, table.list td { border: 1px solid #ccc; }
+table.list th { text-align: left; }
+table.list th.plus-minus { border: none; min-width: 2em; }
+table.list td a { display: block; color: black; text-decoration: none; font-weight: bold; }
+table.list td a:hover { color: #F60; }
+table.list th a.sort { display: block; width: 100%; cursor: pointer; color: black; font-weight: bold; }
+table.list th a.sort .arrow { color: #F60; }
+table.list td p { margin: 0; padding: 1px 0 0 0; }
+table.list td p.warning { margin: 0; padding: 5px; }
+
+table.list td.plain, table.list td.plain td, table.list td.plain th { border: none; background: none; }
+table.list th.plain { border-left: none; border-top: none; border-right: none; background: none; }
+table.list th.plain h3 { margin: 0; border: 0; }
+
+#main .internal-purpose, #main .internal-purpose * { color: #aaa; }
+
+div.section table.list, div.section-hidden table.list { margin-bottom: 0; }
+
+div.memory-bar { margin: 10px 0 5px 0; border-radius: 5px; border: 1px solid #ddd; float: left; }
+div.memory-section { float: left; height: 30px; }
+div.colour-key { float: left; width: 10px; height: 10px; margin: 3px 5px 0 0;}
+div.memory-info { float: left; padding: 10px 10px 0 0; }
+button.memory-button { margin-top: 10px; }
+
+div.memory_queue { background: #bd4688; }
+div.memory_binary { background: url(../img/bg-binary.png); }
+div.memory_conn { background: #dada66; }
+div.memory_proc { background: #6abf59; }
+div.memory_table { background: #6679da; }
+div.memory_system { background: #999; }
+div.memory_unused { background: #955; }
+
+div.memory-bar div.memory_queue { border-right: solid 1px #eb50a6; }
+div.memory-bar div.memory_binary { border-right: solid 1px #eb50a6; }
+div.memory-bar div.memory_conn { border-right: solid 1px #ebeb8d; }
+div.memory-bar div.memory_proc { border-right: solid 1px #79da66; }
+div.memory-bar div.memory_table { border-right: solid 1px #8d9ceb; }
+div.memory-bar div.memory_system { border-right: solid 1px #bbb; }
+div.memory-bar div.memory_unused { border-right: solid 1px #bbb; }
+
+sub { display: block; font-size: 0.8em; color: #888; }
+small { font-size: 0.8em; color: #888; }
+#main sub a { color: #888; }
+#main sub a:hover { color: #444; }
+table.argument-links { color: #888; }
+table.argument-links td { vertical-align: top; }
+.unknown { color: #888; }
+
+table.facts { float: left; }
+table.facts th, table.legend th { color: black; text-align: right; border-right: 1px solid #ccc; }
+table.facts th, table.facts td { vertical-align: top; padding: 0 10px 10px 10px; }
+table.facts th.horizontal { border-right: none; padding: 0 10px 5px 10px; }
+
+table.facts-long th { text-align: right; font-weight: bold; }
+table.facts-long th, table.facts-long td { vertical-align: top; }
+
+table.facts-l { margin-right: 50px; }
+
+table.mini th { border: none; padding: 0 2px 2px 2px; text-align: right; }
+table.mini td { border: none; padding: 0 2px 2px 2px; }
+
+tr.alt1>td {
+ background: #eee;
+ background: -moz-linear-gradient(center top, #f0f0f0 0%,#e0e0e0 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #f0f0f0),color-stop(1, #e0e0e0));
+}
+tr.alt2>td {
+ background: #fff;
+ background: -moz-linear-gradient(center top, #F8F8F8 0%,#ffffff 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #F8F8F8),color-stop(1, #ffffff));
+}
+
+td span,
+td abbr {
+ display: inline-block;
+ padding: 2px 4px;
+ margin: 0 0 3px 0;
+}
+
+div.status-bar, div.status-red, div.status-yellow, div.status-green, div.status-grey { text-align: center; }
+div.status-bar-main, div.status-red, div.status-yellow, div.status-green, div.status-grey { border-radius: 3px; -moz-border-radius: 3px; padding: 3px; }
+div.status-bar sub { white-space: nowrap; }
+
+div.status-bar .grey, div.status-grey { background: #ddd; }
+div.status-bar .red, div.status-red { background: #ff7a7a; color: white; }
+div.status-bar .yellow, div.status-yellow { background: #ffff7b; }
+div.status-bar .green, div.status-green { background: #98f898; }
+div.status-bar .red-dark { background: #e24545; color: white; }
+/* yellow-dark and green-dark can never happen */
+div.status-bar .red *, div.status-bar .red-dark *, div.status-red * { color: white; }
+
+div.status-key-grey { background: #ddd; }
+div.status-key-red { background: #ff7a7a; color: white; }
+div.status-key-yellow { background: #ffff7b; }
+div.status-key-green { background: #98f898; }
+
+.l { text-align: left !important; }
+.c { text-align: center !important; }
+.r { text-align: right !important; }
+.t { vertical-align: top !important; }
+
+div.form-popup-warn,
+div.form-popup-info,
+div.form-popup-help,
+div.form-popup-options {
+ -moz-border-radius: 5px 0 0 5px;
+ background: #EEE;
+ border-radius: 5px 0 0 5px;
+ border: 1px solid #ccc;
+ right: 0;
+ margin: 10px 0 10px 0;
+ padding: 15px;
+ position: fixed;
+ top: 0;
+}
+div.form-popup-warn,
+div.form-popup-info,
+div.form-popup-help {
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+ left: 50%;
+ margin-left: -250px;
+ text-align: left;
+ top: 25%;
+ width: 500px;
+ z-index: 2;
+}
+p.warning, div.form-popup-warn { background: #FF9; }
+
+div.form-popup-options { z-index: 3; }
+
+div.form-popup-warn span,
+div.form-popup-info span,
+div.form-popup-help span,
+div.form-popup-options span {
+ color: white;
+ background-color: #666;
+ cursor: pointer;
+ padding: 4px 8px;
+ border-radius: 5px;
+ -moz-border-radius: 5px;
+}
+div.form-popup-warn span:hover,
+div.form-popup-info span:hover,
+div.form-popup-help span:hover,
+div.form-popup-options span:hover {
+ background-color: #F60;
+ cursor: pointer;
+}
+
+p.warning { padding: 15px; border-radius: 5px; -moz-border-radius: 5px; text-align: center; }
+
+.highlight { min-width: 120px; font-size: 120%; text-align:center; padding:10px; background-color: #ddd; margin: 0 20px 0 0; color: #888; border-radius: 5px; -moz-border-radius: 5px; }
+.highlight strong { font-size: 2em; display: block; color: #444; font-weight: normal; }
+.highlight { float: left; }
+
+.chart { margin: 0 20px 20px 0; float: left; }
+.chart-small { width: 400px; height: 100px; }
+.chart-medium { width: 600px; height: 200px; }
+.chart-large { width: 800px; height: 300px; }
+
+ul#global-counts { overflow: hidden; list-style-type: none; margin: 0; padding: 0; }
+ul#global-counts li { float: left; margin: 0 1em 0 0; }
+
+div.section, div.section-hidden { margin: 0 0 1em 0; }
+div.section-invisible div.hider { display: none; }
+div.section div.hider, div.section-hidden div.hider { padding: 0.5em 0; }
+div.section h2, div.section-hidden h2 { font-size: 1em; padding: 5px 5px 5px 25px; cursor: pointer; margin: 0; }
+div.section h2:hover, div.section-hidden h2:hover { color: black; }
+div.section-invisible h2 { background: white; background-image: url(../img/collapse.png); background-repeat:no-repeat; background-position:4px 4px; }
+div.section-visible h2 { background: #F8F8F8; background-image: url(../img/expand.png); background-repeat:no-repeat; background-position:4px 4px; }
+
+form { margin: 0; }
+form.inline-form { float: left; }
+form.inline-form-right { float: right; padding-left: 5px; }
+input, select { padding: 0.2em; }
+input[type=text], input[type=password] { font: 1.1em Andale Mono, Lucidatypewriter, Courier New, Courier, monospace; border: 1px solid #ccc; }
+textarea { width: 600px; height: 200px; border: 1px solid #ccc; }
+.mand { color: #f88; padding: 0 5px;}
+input[type=submit].wait { cursor: wait; }
+
+table.form { margin-bottom: 0.5em; }
+table.form th { text-align: right; vertical-align: top; }
+table.form input[type=text], table.form input[type=password] { width: 200px; }
+table.form input[type=text].wide, table.form input[type=password].wide { width: 300px; }
+table.form select { width: auto; }
+table.form select.narrow { width: 110px; }
+table.form .multifield { margin: 0; padding: 0; }
+table.form .multifield td { margin: 0; padding: 0; vertical-align: top; }
+table.form .multifield td.equals { padding: 3px; }
+table.form .multifield td input { float: left; }
+table.form .multifield td select { width: auto; display: block; float: left; margin-left: 5px; }
+table.form label { margin-top: 5px; display: block; }
+
+table.form table.subform { margin-bottom: 5px; }
+table.form table.subform th { text-align: left; }
+table.form table.subform th, table.form table.subform td { padding: 0; }
+
+.multifield-sub { border: 1px solid #ddd; background: #F8F8F8; padding: 10px; border-radius: 5px; -moz-border-radius: 5px; float: left; margin-bottom: 10px; }
+
+label.radio, label.checkbox { padding: 5px; cursor: pointer; border-radius: 5px; -moz-border-radius: 5px; border: 1px solid #ccc; }
+
+table.two-col-layout { width: 100%; }
+table.two-col-layout > tbody > tr > td { width: 50%; vertical-align: top; }
+
+input[type=submit], button, a.button { padding: 8px; border-radius: 5px; -moz-border-radius: 5px; text-decoration: none !important; cursor: pointer; display: block; font-weight: normal !important; }
+table.list input[type=submit], table.list button { padding: 3px 7px; margin: 0 0 3px 0; }
+table.list input[type=submit], table.list button, table.list a.button { padding: 3px 7px; margin: 0 0 3px 0; }
+
+input[type=submit], button, a.button {
+ background: #666;
+ color: #FFF !important;
+ border: 0;
+}
+input[type=submit]:hover, button:hover, a.button:hover {
+ background: #F60;
+ text-decoration: none !important;
+}
+
+input[type=submit][disabled], button[disabled], a.button.disabled { pointer-events: none; background: #aaa; }
+input[type=submit][disabled]:hover, button[disabled]:hover, a.button.disabled { background: #aaa; }
+
+h3 { padding: 0 0 2px 0; margin: 1em 0 1em 0; font-size: 1em; border-bottom: 1px solid #E4E4E4; font-weight: normal; }
+
+abbr { background: #99EBFF; padding: 2px 4px; border-radius: 5px; -moz-border-radius: 5px; border: none; cursor: default; text-decoration: none; }
+
+table.list td abbr a { display: inline; width: auto; }
+
+abbr.warning { background: red; }
+
+.status-red abbr, .status-yellow abbr, .status-green abbr, .status-grey abbr, small abbr, abbr.normal { background: none; color: inherit; padding: 0; border-bottom: 1px dotted; cursor: default; }
+
+abbr.status-grey { background: #ddd; }
+abbr.status-green { background: #98f898; }
+abbr.status-yellow { background: #ffff7b; }
+abbr.status-red { background: #ff7a7a; color: white; }
+
+abbr.type { background: none; color: inherit; padding: 0; border-bottom: 1px dotted #ddd; cursor: default; }
+
+div.bindings-wrapper { display: inline-block; }
+div.bindings-wrapper table { margin: auto; }
+div.bindings-wrapper p { margin: 10px; text-align: center; }
+div.bindings-wrapper span.exchange { border: 1px solid #bbb; padding: 10px; border-radius: 5px; -moz-border-radius: 5px; }
+div.bindings-wrapper span.queue { border: 1px solid #666; padding: 10px; }
+div.bindings-wrapper td span.exchange, div.bindings-wrapper td span.queue { background: white; display: block; }
+div.bindings-wrapper span.exchange a, div.bindings-wrapper span.queue a { font-weight: normal !important; }
+div.bindings-wrapper p.arrow { font-size: 200%; }
+
+#footer { overflow: auto; width: 100%; border-top: 1px solid #666; }
+#footer ul { list-style-type: none; padding: 0; margin: 0; }
+#footer ul li { float: left; }
+#footer ul li a { display: block; padding: 0.7em 1em; }
+
+#scratch { display: none; }
+
+.highlight, .mini-highlight, .micro-highlight {
+ background: -moz-linear-gradient(center top, #f0f0f0 0%,#e0e0e0 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #f0f0f0),color-stop(1, #e0e0e0));
+ border: 1px solid #e0e0e0;
+}
+
+table.dynamic-shovels td label {width: 200px; margin-right:10px;padding: 4px 0px 5px 0px}
diff --git a/deps/rabbitmq_management/priv/www/favicon.ico b/deps/rabbitmq_management/priv/www/favicon.ico
new file mode 100644
index 0000000000..cc5b8b9217
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/favicon.ico
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/bg-binary.png b/deps/rabbitmq_management/priv/www/img/bg-binary.png
new file mode 100644
index 0000000000..dc136bfe8d
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/bg-binary.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/bg-green-dark.png b/deps/rabbitmq_management/priv/www/img/bg-green-dark.png
new file mode 100644
index 0000000000..d121c00e6a
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/bg-green-dark.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/bg-red-dark.png b/deps/rabbitmq_management/priv/www/img/bg-red-dark.png
new file mode 100644
index 0000000000..af571cde93
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/bg-red-dark.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/bg-red.png b/deps/rabbitmq_management/priv/www/img/bg-red.png
new file mode 100644
index 0000000000..90fe4a9dea
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/bg-red.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/bg-yellow-dark.png b/deps/rabbitmq_management/priv/www/img/bg-yellow-dark.png
new file mode 100644
index 0000000000..7b2018afda
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/bg-yellow-dark.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/collapse.png b/deps/rabbitmq_management/priv/www/img/collapse.png
new file mode 100644
index 0000000000..20b09a038b
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/collapse.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/expand.png b/deps/rabbitmq_management/priv/www/img/expand.png
new file mode 100644
index 0000000000..4b5a8d871e
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/expand.png
Binary files differ
diff --git a/deps/rabbitmq_management/priv/www/img/rabbitmqlogo-master-copy.svg b/deps/rabbitmq_management/priv/www/img/rabbitmqlogo-master-copy.svg
new file mode 100644
index 0000000000..4f3bad62e2
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/rabbitmqlogo-master-copy.svg
@@ -0,0 +1,122 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="188.29454"
+ height="26.193336"
+ viewBox="0 0 188.29454 26.193336"
+ version="1.1"
+ id="svg42"
+ sodipodi:docname="rabbitmqlogo.svg"
+ inkscape:version="0.92.4 (5da689c313, 2019-01-14)">
+ <metadata
+ id="metadata46">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title>Artboard Copy</dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="3838"
+ inkscape:window-height="2128"
+ id="namedview44"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="5.6806225"
+ inkscape:cx="56.451823"
+ inkscape:cy="49.532666"
+ inkscape:window-x="0"
+ inkscape:window-y="30"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg42" />
+ <!-- Generator: Sketch 42 (36781) - http://www.bohemiancoding.com/sketch -->
+ <title
+ id="title2">Artboard Copy</title>
+ <desc
+ id="desc4">Created with Sketch.</desc>
+ <defs
+ id="defs6" />
+ <g
+ id="g996">
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="Shape"
+ d="M 22.870867,10.622731 H 15.346611 C 14.744892,10.550951 14.286702,10.020888 14.286702,9.3969602 V 1.5012317 c 0,-0.67362166 -0.546517,-1.21472753 -1.214481,-1.21472753 h -2.633213 c -0.6734844,0 -1.2144801,0.54662736 -1.2144801,1.21472753 V 9.4797825 C 9.1914058,10.081625 8.694573,10.567516 8.0983737,10.622731 H 6.1276038 C 5.5479656,10.550951 5.1008162,10.06506 5.0621736,9.4797825 V 1.5012317 c 0,-0.67362166 -0.546516,-1.21472753 -1.21448,-1.21472753 H 1.21448 C 0.54099565,0.28650417 0,0.83313153 0,1.5012317 V 11.837458 24.542403 c 0,0.673622 0.54651602,1.214727 1.21448,1.214727 h 2.6332136 6.5913144 2.633213 9.804166 c 0.673485,0 1.214481,-0.546627 1.214481,-1.214727 V 11.837458 c -0.0055,-0.673621 -0.546516,-1.214727 -1.220001,-1.214727 z m -3.466788,8.712908 c 0,0.673622 -0.546516,1.214728 -1.21448,1.214728 h -2.633214 c -0.673484,0 -1.21448,-0.546628 -1.21448,-1.214728 V 16.85097 c 0,-0.673622 0.546516,-1.214728 1.21448,-1.214728 h 2.633214 c 0.673484,0 1.21448,0.546627 1.21448,1.214728 z" />
+ <g
+ id="g984">
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path21"
+ d="M 41.623543,25.779216 40.3649,19.904353 c -0.182172,-0.877917 -0.375385,-1.656447 -0.596199,-2.330068 -0.215294,-0.679144 -0.518915,-1.247857 -0.89982,-1.711662 -0.386425,-0.463805 -0.888778,-0.811659 -1.512579,-1.054604 -0.623801,-0.237424 -1.440815,-0.358897 -2.451042,-0.358897 h -2.644254 v 11.330094 h -2.88163 V 0.33067608 h 6.304256 c 1.043348,0 2.053575,0.11042977 3.0362,0.32576782 0.977104,0.21533805 1.843801,0.5742348 2.594571,1.0711688 0.750769,0.4969339 1.352489,1.1595126 1.810679,1.9822143 0.45819,0.8227018 0.684525,1.8331342 0.684525,3.0368187 0,0.9220886 -0.110407,1.7392689 -0.325701,2.4404979 -0.215295,0.701229 -0.540996,1.3085924 -0.971584,1.8165694 -0.430589,0.507977 -0.971584,0.938653 -1.611947,1.286507 -0.640362,0.347854 -1.396652,0.62945 -2.257828,0.844788 v 0.07178 c 0.839095,0.215338 1.512579,0.491413 2.014932,0.81718 0.502353,0.33129 0.90534,0.723316 1.20896,1.181599 0.2981,0.458284 0.529955,0.982825 0.695566,1.573624 0.165611,0.590799 0.320181,1.269943 0.474751,2.026386 l 1.468417,6.97364 z M 40.922457,7.0503276 c 0,-1.0546043 -0.187693,-1.8773061 -0.557557,-2.4570624 C 39.995036,4.0135089 39.498203,3.5828328 38.890963,3.3067584 38.278202,3.030684 37.582637,2.8650393 36.804265,2.8098244 36.025894,2.7546095 35.225441,2.7270021 34.408427,2.7270021 h -2.141901 v 9.3313159 h 1.584345 c 0.866697,0 1.716833,-0.06074 2.566969,-0.187731 0.850136,-0.126994 1.600905,-0.36994 2.268869,-0.728836 0.667964,-0.358897 1.20896,-0.866874 1.622987,-1.5239313 0.402987,-0.6570571 0.612761,-1.5128878 0.612761,-2.5674921 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path23"
+ d="M 57.776127,25.779216 V 23.45467 h -0.07176 c -0.242896,0.458283 -0.563077,0.855831 -0.971584,1.198163 -0.408507,0.342332 -0.855657,0.629449 -1.341449,0.85583 -0.485792,0.226381 -0.999186,0.397548 -1.529141,0.513499 -0.535475,0.115951 -1.048869,0.171166 -1.540181,0.171166 -0.75629,0 -1.429774,-0.115951 -2.014933,-0.353375 -0.590679,-0.231903 -1.081991,-0.563192 -1.473937,-0.988347 -0.397466,-0.425154 -0.701086,-0.944174 -0.91086,-1.551538 -0.209774,-0.607364 -0.314661,-1.280985 -0.314661,-2.026386 0,-0.673622 0.132489,-1.269943 0.397466,-1.800005 0.264978,-0.530063 0.612761,-0.993868 1.05439,-1.396937 0.436109,-0.403069 0.943982,-0.745401 1.52362,-1.03804 0.579639,-0.292639 1.175838,-0.524541 1.794119,-0.701229 0.61828,-0.182209 1.242081,-0.314725 1.865883,-0.397547 0.623801,-0.08282 1.208959,-0.126994 1.744435,-0.126994 0.298099,0 0.590679,-0.0055 0.866697,-0.01104 0.276018,-0.0055 0.563077,-0.03865 0.866697,-0.09939 v -0.375461 c 0,-0.646014 -0.01104,-1.292028 -0.0276,-1.938043 -0.01656,-0.646014 -0.126968,-1.22577 -0.325702,-1.739268 -0.198733,-0.513499 -0.524434,-0.933132 -0.971584,-1.247857 -0.447149,-0.314725 -1.115113,-0.474848 -1.987331,-0.474848 -0.518914,0 -0.982624,0.04969 -1.396652,0.143559 -0.414027,0.09387 -0.772851,0.248467 -1.07095,0.463805 -0.2981,0.215338 -0.540996,0.502455 -0.728688,0.861352 -0.187693,0.358897 -0.314661,0.800616 -0.386426,1.325157 h -2.793304 c 0.01104,-0.888959 0.198733,-1.639882 0.568598,-2.258289 0.364344,-0.6184063 0.844616,-1.1208617 1.429774,-1.5128874 0.590679,-0.3920257 1.258643,-0.6736216 2.009412,-0.8558307 0.75077,-0.1822091 1.51258,-0.270553 2.296472,-0.270553 1.992851,0 3.472309,0.4527621 4.438372,1.3527647 0.966064,0.9000024 1.451856,2.3300684 1.451856,4.2846754 v 12.31844 z M 56.224905,17.81723 c -0.563077,0 -1.181358,0.06074 -1.843801,0.187731 -0.662444,0.126994 -1.286245,0.331289 -1.865883,0.623928 -0.579639,0.292639 -1.05991,0.6681 -1.451856,1.142948 -0.391946,0.474848 -0.585159,1.060126 -0.585159,1.755833 0,0.745401 0.226335,1.358286 0.673485,1.838656 0.45267,0.480369 1.048869,0.717794 1.794118,0.717794 0.441629,0 0.872218,-0.06626 1.280725,-0.209817 0.408506,-0.138037 0.789411,-0.325768 1.142715,-0.568713 0.353303,-0.237424 0.673484,-0.530063 0.966063,-0.866874 0.287059,-0.336811 0.524435,-0.701229 0.701087,-1.098776 0.143529,-0.325768 0.259457,-0.623929 0.353303,-0.88896 0.09385,-0.265031 0.160091,-0.541106 0.209774,-0.811659 0.04968,-0.270553 0.07728,-0.546627 0.08833,-0.839266 0.01104,-0.287117 0.01656,-0.618407 0.01656,-0.988346 h -1.479458 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path25"
+ d="m 79.200659,17.18778 c 0,1.153992 -0.115927,2.269332 -0.353303,3.351544 -0.237376,1.082212 -0.629321,2.042951 -1.181358,2.882217 -0.552036,0.839266 -1.275204,1.512888 -2.169503,2.015343 -0.894299,0.502456 -2.009412,0.756444 -3.33982,0.756444 -1.236561,0 -2.27439,-0.193252 -3.113485,-0.574235 -0.839095,-0.386504 -1.5181,-0.916567 -2.025973,-1.601231 -0.507874,-0.684665 -0.877738,-1.490802 -1.098553,-2.423934 -0.220814,-0.92761 -0.331222,-1.943564 -0.331222,-3.036819 V 0.33067608 h 2.500725 V 11.136229 h 0.08833 c 0.15457,-0.530063 0.391946,-0.993868 0.701086,-1.402458 0.314661,-0.4085902 0.679005,-0.7509225 1.098552,-1.0380399 0.419548,-0.2815959 0.883259,-0.4969339 1.385612,-0.6460141 0.502353,-0.1490802 1.026787,-0.2263811 1.567783,-0.2263811 0.839095,0 1.578824,0.1325158 2.213666,0.4030687 0.634842,0.270553 1.186878,0.6404927 1.645068,1.1098192 0.463711,0.4693265 0.850136,1.0104322 1.159277,1.6288392 0.314661,0.618407 0.557557,1.275464 0.739729,1.971171 0.182172,0.695708 0.30914,1.40798 0.386425,2.136816 0.08833,0.723315 0.126968,1.430066 0.126968,2.11473 z m -2.88163,-0.06626 c 0,-0.40859 -0.02208,-0.866873 -0.06072,-1.37485 -0.04416,-0.507977 -0.115927,-1.032518 -0.215294,-1.562581 -0.09937,-0.535585 -0.242896,-1.054605 -0.425068,-1.562582 -0.182172,-0.507977 -0.414027,-0.960739 -0.701086,-1.358286 -0.287059,-0.397547 -0.634842,-0.717793 -1.043349,-0.960739 -0.408507,-0.248467 -0.883258,-0.369939 -1.424253,-0.369939 -0.574118,0 -1.087512,0.132515 -1.529141,0.397547 -0.441629,0.265031 -0.828055,0.612885 -1.153756,1.043561 -0.325702,0.430676 -0.5962,0.922089 -0.811494,1.474238 -0.215294,0.552148 -0.386425,1.11534 -0.513394,1.689575 -0.126968,0.574235 -0.215294,1.137427 -0.270498,1.689575 -0.0552,0.552149 -0.08281,1.043562 -0.08281,1.474238 0,0.673621 0.04968,1.380372 0.143529,2.120251 0.09385,0.745401 0.287059,1.435587 0.568598,2.070559 0.281538,0.634971 0.684525,1.159512 1.20896,1.573624 0.524434,0.414111 1.21448,0.618407 2.081177,0.618407 0.574118,0 1.081991,-0.11043 1.52362,-0.325768 0.436109,-0.215338 0.811494,-0.507977 1.126154,-0.872396 0.314661,-0.364418 0.568598,-0.789572 0.772851,-1.269942 0.204254,-0.480369 0.364344,-0.977303 0.485792,-1.490802 0.121448,-0.513498 0.204254,-1.032518 0.253937,-1.546017 0.04416,-0.513498 0.06624,-0.999389 0.06624,-1.457673 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path27"
+ d="m 96.937588,17.18778 c 0,1.153992 -0.115927,2.269332 -0.353303,3.351544 -0.237376,1.082212 -0.629321,2.042951 -1.181358,2.882217 -0.552036,0.839266 -1.275204,1.512888 -2.169503,2.015343 -0.894299,0.502456 -2.009412,0.756444 -3.33982,0.756444 -1.236561,0 -2.27439,-0.193252 -3.113485,-0.574235 -0.839095,-0.386504 -1.5181,-0.916567 -2.025974,-1.601231 -0.507873,-0.684665 -0.877737,-1.490802 -1.098552,-2.423934 -0.220815,-0.92761 -0.331222,-1.943564 -0.331222,-3.036819 V 0.33067608 h 2.500725 V 11.136229 h 0.08833 c 0.15457,-0.530063 0.391946,-0.993868 0.701086,-1.402458 0.314661,-0.4085902 0.679005,-0.7509225 1.098552,-1.0380399 0.419548,-0.2815959 0.883259,-0.4969339 1.385612,-0.6460141 0.502353,-0.1490802 1.026787,-0.2263811 1.567783,-0.2263811 0.839095,0 1.578824,0.1325158 2.213666,0.4030687 0.634842,0.270553 1.186878,0.6404927 1.645068,1.1098192 0.463711,0.4693265 0.850136,1.0104322 1.159277,1.6288392 0.31466,0.618407 0.557556,1.275464 0.739729,1.971171 0.182171,0.695708 0.30914,1.40798 0.386425,2.136816 0.08833,0.723315 0.126968,1.430066 0.126968,2.11473 z m -2.88163,-0.06626 c 0,-0.40859 -0.02208,-0.866873 -0.06072,-1.37485 -0.04416,-0.507977 -0.115927,-1.032518 -0.215294,-1.562581 -0.09937,-0.535585 -0.242896,-1.054605 -0.425068,-1.562582 -0.182172,-0.507977 -0.414027,-0.960739 -0.701086,-1.358286 -0.287059,-0.397547 -0.634842,-0.717793 -1.043349,-0.960739 -0.408507,-0.248467 -0.883258,-0.369939 -1.424254,-0.369939 -0.574117,0 -1.087511,0.132515 -1.52914,0.397547 -0.441629,0.265031 -0.828055,0.612885 -1.153756,1.043561 -0.325702,0.430676 -0.5962,0.922089 -0.811494,1.474238 -0.215294,0.552148 -0.386425,1.11534 -0.513394,1.689575 -0.126968,0.574235 -0.215294,1.137427 -0.270498,1.689575 -0.0552,0.552149 -0.08281,1.043562 -0.08281,1.474238 0,0.673621 0.04968,1.380372 0.143529,2.120251 0.09385,0.745401 0.287059,1.435587 0.568598,2.070559 0.281538,0.634971 0.684525,1.159512 1.20896,1.573624 0.524434,0.414111 1.21448,0.618407 2.081177,0.618407 0.574118,0 1.081991,-0.11043 1.52362,-0.325768 0.436109,-0.215338 0.811494,-0.507977 1.126154,-0.872396 0.314661,-0.364418 0.568598,-0.789572 0.772851,-1.269942 0.204254,-0.480369 0.364344,-0.977303 0.485792,-1.490802 0.121448,-0.513498 0.204254,-1.032518 0.253937,-1.546017 0.04416,-0.513498 0.06624,-0.999389 0.06624,-1.457673 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path29"
+ d="M 101.36492,3.6435692 V 0.33067608 h 3.31222 V 3.6435692 Z m 0.39747,22.1356468 V 8.2208831 h 2.50072 V 25.779216 Z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path31"
+ d="m 114.15008,10.32457 v 10.805553 c 0,0.938653 0.22634,1.661968 0.68453,2.180988 0.45819,0.51902 1.18688,0.773009 2.18606,0.773009 0.34779,0 0.64588,-0.02761 0.88326,-0.08282 0.2429,-0.05522 0.51891,-0.143558 0.83358,-0.25951 v 2.252767 c -0.36987,0.06074 -0.73973,0.11043 -1.1096,0.143559 -0.36434,0.03865 -0.73421,0.05522 -1.10959,0.05522 -0.82806,0 -1.5457,-0.0773 -2.15294,-0.231902 -0.60724,-0.154602 -1.11512,-0.408591 -1.52362,-0.767487 -0.40851,-0.353376 -0.71213,-0.817181 -0.91086,-1.396937 -0.19874,-0.579756 -0.2981,-1.292028 -0.2981,-2.142337 V 10.32457 h -2.86507 V 8.2153616 h 2.86507 V 4.1294602 l 2.50072,-1.5846672 v 5.6705686 h 4.73647 V 10.32457 Z" />
+ <polygon
+ style="fill:#b8b8b8;fill-rule:nonzero;stroke:none;stroke-width:1"
+ points="113.99551,3.5589046 106.50438,25.943019 103.66139,25.943019 96.186819,3.5589046 96.098493,3.5589046 96.098493,25.943019 93.526004,25.943019 93.526004,0.5 97.848449,0.5 105.06908,22.149756 105.14085,22.149756 112.37805,0.5 116.64529,0.5 116.64529,25.94854 114.06728,25.94854 114.06728,3.5589046 "
+ id="polygon33"
+ transform="translate(29.379376,-0.10000002)" />
+ <path
+ style="fill:#b8b8b8;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path35"
+ d="m 160.212,25.857131 c -1.18688,0 -2.24127,-0.176688 -3.15213,-0.530063 -0.91086,-0.353376 -1.71131,-0.839267 -2.39583,-1.457673 -0.68453,-0.618407 -1.25865,-1.341722 -1.72788,-2.169945 -0.46923,-0.828224 -0.84461,-1.711662 -1.13167,-2.655836 -0.28706,-0.944175 -0.49683,-1.921478 -0.6238,-2.937432 -0.12697,-1.015954 -0.1877,-2.020865 -0.1877,-3.014733 0,-1.054604 0.0552,-2.109208 0.17114,-3.158291 0.11592,-1.049083 0.30362,-2.0595154 0.57411,-3.0368189 0.2705,-0.9773034 0.63485,-1.8773061 1.08752,-2.7220938 0.45819,-0.8392662 1.0323,-1.5681027 1.72787,-2.1865094 0.69557,-0.6184067 1.52362,-1.10429774 2.47312,-1.457673 C 157.98177,0.17668763 159.09689,0 160.36657,0 c 1.23656,0 2.31855,0.18220912 3.2515,0.54662736 0.92742,0.36441822 1.72787,0.86687374 2.39583,1.49632344 0.66797,0.6294496 1.22,1.3693291 1.65611,2.2251598 0.43611,0.8503092 0.7839,1.7613548 1.03783,2.7276153 0.25394,0.9662605 0.43059,1.9711714 0.52996,3.0147331 0.0994,1.043561 0.15457,2.070558 0.15457,3.08099 0,0.491413 -0.0166,1.054605 -0.0552,1.684054 -0.0386,0.62945 -0.0994,1.286507 -0.18769,1.971172 -0.0883,0.684664 -0.21529,1.37485 -0.37538,2.070558 -0.16009,0.695707 -0.36987,1.352764 -0.6238,1.96565 -0.25394,0.612885 -0.55756,1.170555 -0.91086,1.673011 -0.35331,0.502455 -0.76734,0.900002 -1.23105,1.18712 l 0.0718,0.07178 5.23882,-0.325768 v 2.468106 z m 6.05032,-12.804332 c 0,-0.662579 -0.0221,-1.385894 -0.0607,-2.169945 -0.0442,-0.784052 -0.13249,-1.5736244 -0.2705,-2.3576758 C 165.79309,7.7411269 165.58884,6.97364 165.32938,6.239282 165.06992,5.504924 164.7111,4.8478669 164.27499,4.2736321 163.83336,3.7049188 163.29789,3.2466352 162.66304,2.904303 162.0282,2.5619707 161.26087,2.3908045 160.37209,2.3908045 c -0.93846,0 -1.73891,0.1656447 -2.40688,0.496934 -0.66796,0.3312893 -1.23104,0.7730084 -1.69475,1.3306787 -0.46371,0.5576703 -0.83357,1.198163 -1.12063,1.9269995 -0.28154,0.7288365 -0.49683,1.4852804 -0.6514,2.2803747 -0.14905,0.7950944 -0.24842,1.5901886 -0.2981,2.3852826 -0.0497,0.800616 -0.0718,1.546017 -0.0718,2.241725 0,0.662578 0.0276,1.37485 0.0883,2.142337 0.0607,0.767487 0.16561,1.534974 0.3257,2.307982 0.15457,0.767487 0.37538,1.512888 0.65692,2.230682 0.28154,0.717793 0.64589,1.358286 1.09303,1.910435 0.44715,0.552149 0.98815,0.993868 1.62299,1.325157 0.63484,0.331289 1.39113,0.496934 2.25231,0.496934 0.97158,0 1.79964,-0.154602 2.47864,-0.469327 0.67901,-0.314724 1.24208,-0.734358 1.69475,-1.258899 0.45267,-0.530063 0.80598,-1.137427 1.06543,-1.827613 0.25946,-0.690186 0.45267,-1.424544 0.57964,-2.197552 0.12697,-0.773008 0.20426,-1.562581 0.23186,-2.357675 0.0276,-0.806138 0.0442,-1.573625 0.0442,-2.302461 z" />
+ </g>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:40px;line-height:1.25;font-family:Raleway-v4020;-inkscape-font-specification:'Raleway-v4020, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#b8b8b8;fill-opacity:1;stroke:none"
+ x="175.17902"
+ y="25.857132"
+ id="text956"><tspan
+ sodipodi:role="line"
+ id="tspan954"
+ x="175.17902"
+ y="25.857132"
+ style="font-style:normal;font-variant:normal;font-weight:600;font-stretch:normal;font-size:9.33333302px;font-family:Raleway-v4020;-inkscape-font-specification:'Raleway-v4020 Semi-Bold';fill:#b8b8b8;fill-opacity:1">TM</tspan></text>
+</svg>
diff --git a/deps/rabbitmq_management/priv/www/img/rabbitmqlogo.svg b/deps/rabbitmq_management/priv/www/img/rabbitmqlogo.svg
new file mode 100644
index 0000000000..b0b1af0a3a
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/img/rabbitmqlogo.svg
@@ -0,0 +1,124 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="188.29454"
+ height="26.193336"
+ viewBox="0 0 188.29454 26.193336"
+ version="1.1"
+ id="svg42"
+ sodipodi:docname="rabbitmqlogo.svg"
+ inkscape:version="0.92.4 (5da689c313, 2019-01-14)">
+ <metadata
+ id="metadata46">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title>Artboard Copy</dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="3838"
+ inkscape:window-height="2128"
+ id="namedview44"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="5.6806225"
+ inkscape:cx="56.451823"
+ inkscape:cy="49.532666"
+ inkscape:window-x="0"
+ inkscape:window-y="30"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg42" />
+ <!-- Generator: Sketch 42 (36781) - http://www.bohemiancoding.com/sketch -->
+ <title
+ id="title2">Artboard Copy</title>
+ <desc
+ id="desc4">Created with Sketch.</desc>
+ <defs
+ id="defs6" />
+ <g
+ id="g996">
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="Shape"
+ d="M 22.870867,10.622731 H 15.346611 C 14.744892,10.550951 14.286702,10.020888 14.286702,9.3969602 V 1.5012317 c 0,-0.67362166 -0.546517,-1.21472753 -1.214481,-1.21472753 h -2.633213 c -0.6734844,0 -1.2144801,0.54662736 -1.2144801,1.21472753 V 9.4797825 C 9.1914058,10.081625 8.694573,10.567516 8.0983737,10.622731 H 6.1276038 C 5.5479656,10.550951 5.1008162,10.06506 5.0621736,9.4797825 V 1.5012317 c 0,-0.67362166 -0.546516,-1.21472753 -1.21448,-1.21472753 H 1.21448 C 0.54099565,0.28650417 0,0.83313153 0,1.5012317 V 11.837458 24.542403 c 0,0.673622 0.54651602,1.214727 1.21448,1.214727 h 2.6332136 6.5913144 2.633213 9.804166 c 0.673485,0 1.214481,-0.546627 1.214481,-1.214727 V 11.837458 c -0.0055,-0.673621 -0.546516,-1.214727 -1.220001,-1.214727 z m -3.466788,8.712908 c 0,0.673622 -0.546516,1.214728 -1.21448,1.214728 h -2.633214 c -0.673484,0 -1.21448,-0.546628 -1.21448,-1.214728 V 16.85097 c 0,-0.673622 0.546516,-1.214728 1.21448,-1.214728 h 2.633214 c 0.673484,0 1.21448,0.546627 1.21448,1.214728 z" />
+ <g
+ id="g984">
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path21"
+ d="M 41.623543,25.779216 40.3649,19.904353 c -0.182172,-0.877917 -0.375385,-1.656447 -0.596199,-2.330068 -0.215294,-0.679144 -0.518915,-1.247857 -0.89982,-1.711662 -0.386425,-0.463805 -0.888778,-0.811659 -1.512579,-1.054604 -0.623801,-0.237424 -1.440815,-0.358897 -2.451042,-0.358897 h -2.644254 v 11.330094 h -2.88163 V 0.33067608 h 6.304256 c 1.043348,0 2.053575,0.11042977 3.0362,0.32576782 0.977104,0.21533805 1.843801,0.5742348 2.594571,1.0711688 0.750769,0.4969339 1.352489,1.1595126 1.810679,1.9822143 0.45819,0.8227018 0.684525,1.8331342 0.684525,3.0368187 0,0.9220886 -0.110407,1.7392689 -0.325701,2.4404979 -0.215295,0.701229 -0.540996,1.3085924 -0.971584,1.8165694 -0.430589,0.507977 -0.971584,0.938653 -1.611947,1.286507 -0.640362,0.347854 -1.396652,0.62945 -2.257828,0.844788 v 0.07178 c 0.839095,0.215338 1.512579,0.491413 2.014932,0.81718 0.502353,0.33129 0.90534,0.723316 1.20896,1.181599 0.2981,0.458284 0.529955,0.982825 0.695566,1.573624 0.165611,0.590799 0.320181,1.269943 0.474751,2.026386 l 1.468417,6.97364 z M 40.922457,7.0503276 c 0,-1.0546043 -0.187693,-1.8773061 -0.557557,-2.4570624 C 39.995036,4.0135089 39.498203,3.5828328 38.890963,3.3067584 38.278202,3.030684 37.582637,2.8650393 36.804265,2.8098244 36.025894,2.7546095 35.225441,2.7270021 34.408427,2.7270021 h -2.141901 v 9.3313159 h 1.584345 c 0.866697,0 1.716833,-0.06074 2.566969,-0.187731 0.850136,-0.126994 1.600905,-0.36994 2.268869,-0.728836 0.667964,-0.358897 1.20896,-0.866874 1.622987,-1.5239313 0.402987,-0.6570571 0.612761,-1.5128878 0.612761,-2.5674921 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path23"
+ d="M 57.776127,25.779216 V 23.45467 h -0.07176 c -0.242896,0.458283 -0.563077,0.855831 -0.971584,1.198163 -0.408507,0.342332 -0.855657,0.629449 -1.341449,0.85583 -0.485792,0.226381 -0.999186,0.397548 -1.529141,0.513499 -0.535475,0.115951 -1.048869,0.171166 -1.540181,0.171166 -0.75629,0 -1.429774,-0.115951 -2.014933,-0.353375 -0.590679,-0.231903 -1.081991,-0.563192 -1.473937,-0.988347 -0.397466,-0.425154 -0.701086,-0.944174 -0.91086,-1.551538 -0.209774,-0.607364 -0.314661,-1.280985 -0.314661,-2.026386 0,-0.673622 0.132489,-1.269943 0.397466,-1.800005 0.264978,-0.530063 0.612761,-0.993868 1.05439,-1.396937 0.436109,-0.403069 0.943982,-0.745401 1.52362,-1.03804 0.579639,-0.292639 1.175838,-0.524541 1.794119,-0.701229 0.61828,-0.182209 1.242081,-0.314725 1.865883,-0.397547 0.623801,-0.08282 1.208959,-0.126994 1.744435,-0.126994 0.298099,0 0.590679,-0.0055 0.866697,-0.01104 0.276018,-0.0055 0.563077,-0.03865 0.866697,-0.09939 v -0.375461 c 0,-0.646014 -0.01104,-1.292028 -0.0276,-1.938043 -0.01656,-0.646014 -0.126968,-1.22577 -0.325702,-1.739268 -0.198733,-0.513499 -0.524434,-0.933132 -0.971584,-1.247857 -0.447149,-0.314725 -1.115113,-0.474848 -1.987331,-0.474848 -0.518914,0 -0.982624,0.04969 -1.396652,0.143559 -0.414027,0.09387 -0.772851,0.248467 -1.07095,0.463805 -0.2981,0.215338 -0.540996,0.502455 -0.728688,0.861352 -0.187693,0.358897 -0.314661,0.800616 -0.386426,1.325157 h -2.793304 c 0.01104,-0.888959 0.198733,-1.639882 0.568598,-2.258289 0.364344,-0.6184063 0.844616,-1.1208617 1.429774,-1.5128874 0.590679,-0.3920257 1.258643,-0.6736216 2.009412,-0.8558307 0.75077,-0.1822091 1.51258,-0.270553 2.296472,-0.270553 1.992851,0 3.472309,0.4527621 4.438372,1.3527647 0.966064,0.9000024 1.451856,2.3300684 1.451856,4.2846754 v 12.31844 z M 56.224905,17.81723 c -0.563077,0 -1.181358,0.06074 -1.843801,0.187731 -0.662444,0.126994 -1.286245,0.331289 -1.865883,0.623928 -0.579639,0.292639 -1.05991,0.6681 -1.451856,1.142948 -0.391946,0.474848 -0.585159,1.060126 -0.585159,1.755833 0,0.745401 0.226335,1.358286 0.673485,1.838656 0.45267,0.480369 1.048869,0.717794 1.794118,0.717794 0.441629,0 0.872218,-0.06626 1.280725,-0.209817 0.408506,-0.138037 0.789411,-0.325768 1.142715,-0.568713 0.353303,-0.237424 0.673484,-0.530063 0.966063,-0.866874 0.287059,-0.336811 0.524435,-0.701229 0.701087,-1.098776 0.143529,-0.325768 0.259457,-0.623929 0.353303,-0.88896 0.09385,-0.265031 0.160091,-0.541106 0.209774,-0.811659 0.04968,-0.270553 0.07728,-0.546627 0.08833,-0.839266 0.01104,-0.287117 0.01656,-0.618407 0.01656,-0.988346 h -1.479458 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path25"
+ d="m 79.200659,17.18778 c 0,1.153992 -0.115927,2.269332 -0.353303,3.351544 -0.237376,1.082212 -0.629321,2.042951 -1.181358,2.882217 -0.552036,0.839266 -1.275204,1.512888 -2.169503,2.015343 -0.894299,0.502456 -2.009412,0.756444 -3.33982,0.756444 -1.236561,0 -2.27439,-0.193252 -3.113485,-0.574235 -0.839095,-0.386504 -1.5181,-0.916567 -2.025973,-1.601231 -0.507874,-0.684665 -0.877738,-1.490802 -1.098553,-2.423934 -0.220814,-0.92761 -0.331222,-1.943564 -0.331222,-3.036819 V 0.33067608 h 2.500725 V 11.136229 h 0.08833 c 0.15457,-0.530063 0.391946,-0.993868 0.701086,-1.402458 0.314661,-0.4085902 0.679005,-0.7509225 1.098552,-1.0380399 0.419548,-0.2815959 0.883259,-0.4969339 1.385612,-0.6460141 0.502353,-0.1490802 1.026787,-0.2263811 1.567783,-0.2263811 0.839095,0 1.578824,0.1325158 2.213666,0.4030687 0.634842,0.270553 1.186878,0.6404927 1.645068,1.1098192 0.463711,0.4693265 0.850136,1.0104322 1.159277,1.6288392 0.314661,0.618407 0.557557,1.275464 0.739729,1.971171 0.182172,0.695708 0.30914,1.40798 0.386425,2.136816 0.08833,0.723315 0.126968,1.430066 0.126968,2.11473 z m -2.88163,-0.06626 c 0,-0.40859 -0.02208,-0.866873 -0.06072,-1.37485 -0.04416,-0.507977 -0.115927,-1.032518 -0.215294,-1.562581 -0.09937,-0.535585 -0.242896,-1.054605 -0.425068,-1.562582 -0.182172,-0.507977 -0.414027,-0.960739 -0.701086,-1.358286 -0.287059,-0.397547 -0.634842,-0.717793 -1.043349,-0.960739 -0.408507,-0.248467 -0.883258,-0.369939 -1.424253,-0.369939 -0.574118,0 -1.087512,0.132515 -1.529141,0.397547 -0.441629,0.265031 -0.828055,0.612885 -1.153756,1.043561 -0.325702,0.430676 -0.5962,0.922089 -0.811494,1.474238 -0.215294,0.552148 -0.386425,1.11534 -0.513394,1.689575 -0.126968,0.574235 -0.215294,1.137427 -0.270498,1.689575 -0.0552,0.552149 -0.08281,1.043562 -0.08281,1.474238 0,0.673621 0.04968,1.380372 0.143529,2.120251 0.09385,0.745401 0.287059,1.435587 0.568598,2.070559 0.281538,0.634971 0.684525,1.159512 1.20896,1.573624 0.524434,0.414111 1.21448,0.618407 2.081177,0.618407 0.574118,0 1.081991,-0.11043 1.52362,-0.325768 0.436109,-0.215338 0.811494,-0.507977 1.126154,-0.872396 0.314661,-0.364418 0.568598,-0.789572 0.772851,-1.269942 0.204254,-0.480369 0.364344,-0.977303 0.485792,-1.490802 0.121448,-0.513498 0.204254,-1.032518 0.253937,-1.546017 0.04416,-0.513498 0.06624,-0.999389 0.06624,-1.457673 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path27"
+ d="m 96.937588,17.18778 c 0,1.153992 -0.115927,2.269332 -0.353303,3.351544 -0.237376,1.082212 -0.629321,2.042951 -1.181358,2.882217 -0.552036,0.839266 -1.275204,1.512888 -2.169503,2.015343 -0.894299,0.502456 -2.009412,0.756444 -3.33982,0.756444 -1.236561,0 -2.27439,-0.193252 -3.113485,-0.574235 -0.839095,-0.386504 -1.5181,-0.916567 -2.025974,-1.601231 -0.507873,-0.684665 -0.877737,-1.490802 -1.098552,-2.423934 -0.220815,-0.92761 -0.331222,-1.943564 -0.331222,-3.036819 V 0.33067608 h 2.500725 V 11.136229 h 0.08833 c 0.15457,-0.530063 0.391946,-0.993868 0.701086,-1.402458 0.314661,-0.4085902 0.679005,-0.7509225 1.098552,-1.0380399 0.419548,-0.2815959 0.883259,-0.4969339 1.385612,-0.6460141 0.502353,-0.1490802 1.026787,-0.2263811 1.567783,-0.2263811 0.839095,0 1.578824,0.1325158 2.213666,0.4030687 0.634842,0.270553 1.186878,0.6404927 1.645068,1.1098192 0.463711,0.4693265 0.850136,1.0104322 1.159277,1.6288392 0.31466,0.618407 0.557556,1.275464 0.739729,1.971171 0.182171,0.695708 0.30914,1.40798 0.386425,2.136816 0.08833,0.723315 0.126968,1.430066 0.126968,2.11473 z m -2.88163,-0.06626 c 0,-0.40859 -0.02208,-0.866873 -0.06072,-1.37485 -0.04416,-0.507977 -0.115927,-1.032518 -0.215294,-1.562581 -0.09937,-0.535585 -0.242896,-1.054605 -0.425068,-1.562582 -0.182172,-0.507977 -0.414027,-0.960739 -0.701086,-1.358286 -0.287059,-0.397547 -0.634842,-0.717793 -1.043349,-0.960739 -0.408507,-0.248467 -0.883258,-0.369939 -1.424254,-0.369939 -0.574117,0 -1.087511,0.132515 -1.52914,0.397547 -0.441629,0.265031 -0.828055,0.612885 -1.153756,1.043561 -0.325702,0.430676 -0.5962,0.922089 -0.811494,1.474238 -0.215294,0.552148 -0.386425,1.11534 -0.513394,1.689575 -0.126968,0.574235 -0.215294,1.137427 -0.270498,1.689575 -0.0552,0.552149 -0.08281,1.043562 -0.08281,1.474238 0,0.673621 0.04968,1.380372 0.143529,2.120251 0.09385,0.745401 0.287059,1.435587 0.568598,2.070559 0.281538,0.634971 0.684525,1.159512 1.20896,1.573624 0.524434,0.414111 1.21448,0.618407 2.081177,0.618407 0.574118,0 1.081991,-0.11043 1.52362,-0.325768 0.436109,-0.215338 0.811494,-0.507977 1.126154,-0.872396 0.314661,-0.364418 0.568598,-0.789572 0.772851,-1.269942 0.204254,-0.480369 0.364344,-0.977303 0.485792,-1.490802 0.121448,-0.513498 0.204254,-1.032518 0.253937,-1.546017 0.04416,-0.513498 0.06624,-0.999389 0.06624,-1.457673 z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path29"
+ d="M 101.36492,3.6435692 V 0.33067608 h 3.31222 V 3.6435692 Z m 0.39747,22.1356468 V 8.2208831 h 2.50072 V 25.779216 Z" />
+ <path
+ style="fill:#ff6600;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path31"
+ d="m 114.15008,10.32457 v 10.805553 c 0,0.938653 0.22634,1.661968 0.68453,2.180988 0.45819,0.51902 1.18688,0.773009 2.18606,0.773009 0.34779,0 0.64588,-0.02761 0.88326,-0.08282 0.2429,-0.05522 0.51891,-0.143558 0.83358,-0.25951 v 2.252767 c -0.36987,0.06074 -0.73973,0.11043 -1.1096,0.143559 -0.36434,0.03865 -0.73421,0.05522 -1.10959,0.05522 -0.82806,0 -1.5457,-0.0773 -2.15294,-0.231902 -0.60724,-0.154602 -1.11512,-0.408591 -1.52362,-0.767487 -0.40851,-0.353376 -0.71213,-0.817181 -0.91086,-1.396937 -0.19874,-0.579756 -0.2981,-1.292028 -0.2981,-2.142337 V 10.32457 h -2.86507 V 8.2153616 h 2.86507 V 4.1294602 l 2.50072,-1.5846672 v 5.6705686 h 4.73647 V 10.32457 Z" />
+ <polygon
+ style="fill:#b8b8b8;fill-rule:nonzero;stroke:none;stroke-width:1"
+ points="113.99551,3.5589046 106.50438,25.943019 103.66139,25.943019 96.186819,3.5589046 96.098493,3.5589046 96.098493,25.943019 93.526004,25.943019 93.526004,0.5 97.848449,0.5 105.06908,22.149756 105.14085,22.149756 112.37805,0.5 116.64529,0.5 116.64529,25.94854 114.06728,25.94854 114.06728,3.5589046 "
+ id="polygon33"
+ transform="translate(29.379376,-0.10000002)" />
+ <path
+ style="fill:#b8b8b8;fill-rule:nonzero;stroke:none;stroke-width:1"
+ inkscape:connector-curvature="0"
+ id="path35"
+ d="m 160.212,25.857131 c -1.18688,0 -2.24127,-0.176688 -3.15213,-0.530063 -0.91086,-0.353376 -1.71131,-0.839267 -2.39583,-1.457673 -0.68453,-0.618407 -1.25865,-1.341722 -1.72788,-2.169945 -0.46923,-0.828224 -0.84461,-1.711662 -1.13167,-2.655836 -0.28706,-0.944175 -0.49683,-1.921478 -0.6238,-2.937432 -0.12697,-1.015954 -0.1877,-2.020865 -0.1877,-3.014733 0,-1.054604 0.0552,-2.109208 0.17114,-3.158291 0.11592,-1.049083 0.30362,-2.0595154 0.57411,-3.0368189 0.2705,-0.9773034 0.63485,-1.8773061 1.08752,-2.7220938 0.45819,-0.8392662 1.0323,-1.5681027 1.72787,-2.1865094 0.69557,-0.6184067 1.52362,-1.10429774 2.47312,-1.457673 C 157.98177,0.17668763 159.09689,0 160.36657,0 c 1.23656,0 2.31855,0.18220912 3.2515,0.54662736 0.92742,0.36441822 1.72787,0.86687374 2.39583,1.49632344 0.66797,0.6294496 1.22,1.3693291 1.65611,2.2251598 0.43611,0.8503092 0.7839,1.7613548 1.03783,2.7276153 0.25394,0.9662605 0.43059,1.9711714 0.52996,3.0147331 0.0994,1.043561 0.15457,2.070558 0.15457,3.08099 0,0.491413 -0.0166,1.054605 -0.0552,1.684054 -0.0386,0.62945 -0.0994,1.286507 -0.18769,1.971172 -0.0883,0.684664 -0.21529,1.37485 -0.37538,2.070558 -0.16009,0.695707 -0.36987,1.352764 -0.6238,1.96565 -0.25394,0.612885 -0.55756,1.170555 -0.91086,1.673011 -0.35331,0.502455 -0.76734,0.900002 -1.23105,1.18712 l 0.0718,0.07178 5.23882,-0.325768 v 2.468106 z m 6.05032,-12.804332 c 0,-0.662579 -0.0221,-1.385894 -0.0607,-2.169945 -0.0442,-0.784052 -0.13249,-1.5736244 -0.2705,-2.3576758 C 165.79309,7.7411269 165.58884,6.97364 165.32938,6.239282 165.06992,5.504924 164.7111,4.8478669 164.27499,4.2736321 163.83336,3.7049188 163.29789,3.2466352 162.66304,2.904303 162.0282,2.5619707 161.26087,2.3908045 160.37209,2.3908045 c -0.93846,0 -1.73891,0.1656447 -2.40688,0.496934 -0.66796,0.3312893 -1.23104,0.7730084 -1.69475,1.3306787 -0.46371,0.5576703 -0.83357,1.198163 -1.12063,1.9269995 -0.28154,0.7288365 -0.49683,1.4852804 -0.6514,2.2803747 -0.14905,0.7950944 -0.24842,1.5901886 -0.2981,2.3852826 -0.0497,0.800616 -0.0718,1.546017 -0.0718,2.241725 0,0.662578 0.0276,1.37485 0.0883,2.142337 0.0607,0.767487 0.16561,1.534974 0.3257,2.307982 0.15457,0.767487 0.37538,1.512888 0.65692,2.230682 0.28154,0.717793 0.64589,1.358286 1.09303,1.910435 0.44715,0.552149 0.98815,0.993868 1.62299,1.325157 0.63484,0.331289 1.39113,0.496934 2.25231,0.496934 0.97158,0 1.79964,-0.154602 2.47864,-0.469327 0.67901,-0.314724 1.24208,-0.734358 1.69475,-1.258899 0.45267,-0.530063 0.80598,-1.137427 1.06543,-1.827613 0.25946,-0.690186 0.45267,-1.424544 0.57964,-2.197552 0.12697,-0.773008 0.20426,-1.562581 0.23186,-2.357675 0.0276,-0.806138 0.0442,-1.573625 0.0442,-2.302461 z" />
+ </g>
+ </g>
+ <g
+ aria-label="TM"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:40px;line-height:1.25;font-family:Raleway-v4020;-inkscape-font-specification:'Raleway-v4020, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#b8b8b8;fill-opacity:1;stroke:none"
+ id="text956">
+ <path
+ d="m 180.78835,20.154465 v -0.923999 h -5.46933 v 0.923999 h 2.212 v 5.702667 h 1.05466 v -5.702667 z"
+ style="font-style:normal;font-variant:normal;font-weight:600;font-stretch:normal;font-size:9.33333302px;font-family:Raleway-v4020;-inkscape-font-specification:'Raleway-v4020 Semi-Bold';fill:#b8b8b8;fill-opacity:1"
+ id="path878" />
+ <path
+ d="m 187.23987,25.857132 h 1.05467 v -6.626666 h -1.12 l -2.19334,4.050666 -2.19333,-4.050666 h -1.12 v 6.626666 h 1.04533 v -4.722667 l 1.96,3.584 h 0.616 l 1.95067,-3.584 z"
+ style="font-style:normal;font-variant:normal;font-weight:600;font-stretch:normal;font-size:9.33333302px;font-family:Raleway-v4020;-inkscape-font-specification:'Raleway-v4020 Semi-Bold';fill:#b8b8b8;fill-opacity:1"
+ id="path880" />
+ </g>
+</svg>
diff --git a/deps/rabbitmq_management/priv/www/index.html b/deps/rabbitmq_management/priv/www/index.html
new file mode 100644
index 0000000000..3e8ce4d433
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/index.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>RabbitMQ Management</title>
+ <script src="js/ejs-1.0.min.js" type="text/javascript"></script>
+ <script src="js/jquery-3.5.1.min.js"></script>
+ <script src="js/jquery.flot-0.8.1.min.js" type="text/javascript"></script>
+ <script src="js/jquery.flot-0.8.1.time.min.js" type="text/javascript"></script>
+ <script src="js/sammy-0.7.6.min.js" type="text/javascript"></script>
+ <script src="js/json2-2016.10.28.js" type="text/javascript"></script>
+ <script src="js/base64.js" type="text/javascript"></script>
+ <script src="js/global.js" type="text/javascript"></script>
+ <script src="js/main.js" type="text/javascript"></script>
+ <script src="js/prefs.js" type="text/javascript"></script>
+ <script src="js/formatters.js" type="text/javascript"></script>
+ <script src="js/charts.js" type="text/javascript"></script>
+ <script src="js/singular/singular.js" type="application/javascript"></script>
+
+ <link href="css/main.css" rel="stylesheet" type="text/css"/>
+ <link href="favicon.ico" rel="shortcut icon" type="image/x-icon"/>
+
+ <script type="application/javascript">
+ var uaa_logged_in = false;
+ var uaa_invalid = false;
+ var auth = JSON.parse(sync_get('/auth'));
+ enable_uaa = auth.enable_uaa;
+ uaa_client_id = auth.uaa_client_id;
+ uaa_location = auth.uaa_location;
+ if (enable_uaa) {
+ Singular.init({
+ singularLocation: './js/singular/',
+ uaaLocation: uaa_location,
+ clientId: uaa_client_id,
+ onIdentityChange: function (identity) {
+ uaa_logged_in = true;
+ start_app_login();
+ },
+ onLogout: function () {
+ uaa_logged_in = false;
+ var hash = window.location.hash.substring(1);
+ var params = {}
+ hash.split('&').map(hk => {
+ let temp = hk.split('=');
+ params[temp[0]] = temp[1]
+ });
+ if (params.error) {
+ uaa_invalid = true;
+ replace_content('login-status', '<p class="warning">' + decodeURIComponent(params.error) + ':' + decodeURIComponent(params.error_description) + '</p> <button id="loginWindow" onclick="uaa_login_window()">Click here to log out</button>');
+ } else {
+ replace_content('login-status', '<button id="loginWindow" onclick="uaa_login_window()">Click here to log in</button>');
+ }
+ }
+ });
+ }
+ </script>
+
+<!--[if lte IE 8]>
+ <script src="js/excanvas.min.js" type="text/javascript"></script>
+ <link href="css/evil.css" rel="stylesheet" type="text/css"/>
+<![endif]-->
+ </head>
+
+ <body>
+ <div id="outer"></div>
+ <div id="debug"></div>
+ <div id="scratch"></div>
+ </body>
+</html>
diff --git a/deps/rabbitmq_management/priv/www/js/base64.js b/deps/rabbitmq_management/priv/www/js/base64.js
new file mode 100644
index 0000000000..7955b3a028
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/base64.js
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2010 Nick Galbreath
+ * https://code.google.com/p/stringencoders/source/browse/#svn/trunk/javascript
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* base64 encode/decode compatible with window.btoa/atob
+ *
+ * window.atob/btoa is a Firefox extension to convert binary data (the "b")
+ * to base64 (ascii, the "a").
+ *
+ * It is also found in Safari and Chrome. It is not available in IE.
+ *
+ * if (!window.btoa) window.btoa = base64.encode
+ * if (!window.atob) window.atob = base64.decode
+ *
+ * The original spec's for atob/btoa are a bit lacking
+ * https://developer.mozilla.org/en/DOM/window.atob
+ * https://developer.mozilla.org/en/DOM/window.btoa
+ *
+ * window.btoa and base64.encode takes a string where charCodeAt is [0,255]
+ * If any character is not [0,255], then an exception is thrown.
+ *
+ * window.atob and base64.decode take a base64-encoded string
+ * If the input length is not a multiple of 4, or contains invalid characters
+ * then an exception is thrown.
+ */
+base64 = {};
+base64.PADCHAR = '=';
+base64.ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+base64.getbyte64 = function(s,i) {
+ // This is oddly fast, except on Chrome/V8.
+ // Minimal or no improvement in performance by using a
+ // object with properties mapping chars to value (eg. 'A': 0)
+ var idx = base64.ALPHA.indexOf(s.charAt(i));
+ if (idx == -1) {
+ throw "Cannot decode base64";
+ }
+ return idx;
+}
+
+base64.decode = function(s) {
+ // convert to string
+ s = "" + s;
+ var getbyte64 = base64.getbyte64;
+ var pads, i, b10;
+ var imax = s.length
+ if (imax == 0) {
+ return s;
+ }
+
+ if (imax % 4 != 0) {
+ throw "Cannot decode base64";
+ }
+
+ pads = 0
+ if (s.charAt(imax -1) == base64.PADCHAR) {
+ pads = 1;
+ if (s.charAt(imax -2) == base64.PADCHAR) {
+ pads = 2;
+ }
+ // either way, we want to ignore this last block
+ imax -= 4;
+ }
+
+ var x = [];
+ for (i = 0; i < imax; i += 4) {
+ b10 = (getbyte64(s,i) << 18) | (getbyte64(s,i+1) << 12) |
+ (getbyte64(s,i+2) << 6) | getbyte64(s,i+3);
+ x.push(String.fromCharCode(b10 >> 16, (b10 >> 8) & 0xff, b10 & 0xff));
+ }
+
+ switch (pads) {
+ case 1:
+ b10 = (getbyte64(s,i) << 18) | (getbyte64(s,i+1) << 12) | (getbyte64(s,i+2) << 6)
+ x.push(String.fromCharCode(b10 >> 16, (b10 >> 8) & 0xff));
+ break;
+ case 2:
+ b10 = (getbyte64(s,i) << 18) | (getbyte64(s,i+1) << 12);
+ x.push(String.fromCharCode(b10 >> 16));
+ break;
+ }
+ return x.join('');
+}
+
+base64.getbyte = function(s,i) {
+ var x = s.charCodeAt(i);
+ if (x > 255) {
+ throw "INVALID_CHARACTER_ERR: DOM Exception 5";
+ }
+ return x;
+}
+
+
+base64.encode = function(s) {
+ if (arguments.length != 1) {
+ throw "SyntaxError: Not enough arguments";
+ }
+ var padchar = base64.PADCHAR;
+ var alpha = base64.ALPHA;
+ var getbyte = base64.getbyte;
+
+ var i, b10;
+ var x = [];
+
+ // convert to string
+ s = "" + s;
+
+ var imax = s.length - s.length % 3;
+
+ if (s.length == 0) {
+ return s;
+ }
+ for (i = 0; i < imax; i += 3) {
+ b10 = (getbyte(s,i) << 16) | (getbyte(s,i+1) << 8) | getbyte(s,i+2);
+ x.push(alpha.charAt(b10 >> 18));
+ x.push(alpha.charAt((b10 >> 12) & 0x3F));
+ x.push(alpha.charAt((b10 >> 6) & 0x3f));
+ x.push(alpha.charAt(b10 & 0x3f));
+ }
+ switch (s.length - imax) {
+ case 1:
+ b10 = getbyte(s,i) << 16;
+ x.push(alpha.charAt(b10 >> 18) + alpha.charAt((b10 >> 12) & 0x3F) +
+ padchar + padchar);
+ break;
+ case 2:
+ b10 = (getbyte(s,i) << 16) | (getbyte(s,i+1) << 8);
+ x.push(alpha.charAt(b10 >> 18) + alpha.charAt((b10 >> 12) & 0x3F) +
+ alpha.charAt((b10 >> 6) & 0x3f) + padchar);
+ break;
+ }
+ return x.join('');
+}
diff --git a/deps/rabbitmq_management/priv/www/js/charts.js b/deps/rabbitmq_management/priv/www/js/charts.js
new file mode 100644
index 0000000000..97c66c40a7
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/charts.js
@@ -0,0 +1,329 @@
+//
+// Formatting side
+//
+
+function message_rates(id, stats) {
+ var items = [['Publish', 'publish'],
+ ['Publisher confirm', 'confirm'],
+ ['Publish (In)', 'publish_in'],
+ ['Publish (Out)', 'publish_out'],
+ ['Deliver (manual ack)', 'deliver'],
+ ['Deliver (auto ack)', 'deliver_no_ack'],
+ ['Consumer ack', 'ack'],
+ ['Redelivered', 'redeliver'],
+ ['Get (manual ack)', 'get'],
+ ['Get (auto ack)', 'get_no_ack'],
+ ['Get (empty)', 'get_empty'],
+ ['Unroutable (return)', 'return_unroutable'],
+ ['Unroutable (drop)', 'drop_unroutable'],
+ ['Disk read', 'disk_reads'],
+ ['Disk write', 'disk_writes']];
+ return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_axis, true, 'Message rates', 'message-rates');
+}
+
+function queue_lengths(id, stats) {
+ var items = [['Ready', 'messages_ready'],
+ ['Unacked', 'messages_unacknowledged'],
+ ['Total', 'messages']];
+ return rates_chart_or_text(id, stats, items, fmt_num_thousands, fmt_plain_axis, false, 'Queued messages', 'queued-messages');
+}
+
+function data_rates(id, stats) {
+ var items = [['From client', 'recv_oct'], ['To client', 'send_oct']];
+ return rates_chart_or_text(id, stats, items, fmt_rate_bytes, fmt_rate_bytes_axis, true, 'Data rates');
+}
+
+function data_reductions(id, stats) {
+ var items = [['Reductions', 'reductions']];
+ return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_axis, true, 'Reductions (per second)', 'process-reductions');
+}
+
+function rates_chart_or_text(id, stats, items, fmt, axis_fmt, chart_rates,
+ heading, heading_help) {
+ var prefix = chart_h3(id, heading, heading_help);
+
+ return prefix + rates_chart_or_text_no_heading(
+ id, id, stats, items, fmt, axis_fmt, chart_rates);
+}
+
+function rates_chart_or_text_no_heading(type_id, id, stats, items,
+ fmt, axis_fmt, chart_rates) {
+ var mode = get_pref('rate-mode-' + type_id);
+ var range = get_pref('chart-range');
+ var res;
+ if (keys(stats).length > 0) {
+ if (mode == 'chart') {
+ res = rates_chart(
+ type_id, id, items, stats, fmt, axis_fmt, 'full', chart_rates);
+ }
+ else {
+ res = rates_text(items, stats, mode, fmt, chart_rates);
+ }
+ if (res == "") res = '<p>Waiting for data...</p>';
+ }
+ else {
+ res = '<p>Currently idle</p>';
+ }
+ return res;
+}
+
+function chart_h3(id, heading, heading_help) {
+ var mode = get_pref('rate-mode-' + id);
+ var range = get_pref('chart-range');
+ return '<h3>' + heading +
+ ' <span class="popup-options-link" title="Click to change" ' +
+ 'type="rate" for="' + id + '">' + prefix_title(mode, range) +
+ '</span>' + (heading_help == undefined ? '' :
+ ' <span class="help" id="' + heading_help + '"></span>') +
+ '</h3>';
+}
+
+function prefix_title(mode, range) {
+ var desc = ALL_CHART_RANGES[range];
+ if (mode == 'chart') {
+ return desc.toLowerCase();
+ }
+ else if (mode == 'curr') {
+ return 'current value';
+ }
+ else {
+ return 'moving average: ' + desc.toLowerCase();
+ }
+}
+
+function node_stat_count(used_key, limit_key, stats, thresholds) {
+ var used = stats[used_key];
+ var limit = stats[limit_key];
+ if (typeof used == 'number') {
+ return node_stat(used_key, 'Used', limit_key, 'available', stats,
+ fmt_plain, fmt_plain_axis,
+ fmt_color(used / limit, thresholds));
+ } else {
+ return used;
+ }
+}
+
+function node_stat_count_bar(used_key, limit_key, stats, thresholds) {
+ var used = stats[used_key];
+ var limit = stats[limit_key];
+ if (typeof used == 'number') {
+ return node_stat_bar(used_key, limit_key, 'available', stats,
+ fmt_plain_axis,
+ fmt_color(used / limit, thresholds));
+ } else {
+ return used;
+ }
+}
+
+function node_stat(used_key, used_name, limit_key, suffix, stats, fmt,
+ axis_fmt, colour, help, invert) {
+ if (get_pref('rate-mode-node-stats') == 'chart') {
+ var items = [[used_name, used_key], ['Limit', limit_key]];
+ add_fake_limit_details(used_key, limit_key, stats);
+ return rates_chart('node-stats', 'node-stats-' + used_key, items, stats,
+ fmt, axis_fmt, 'node', false);
+ } else {
+ return node_stat_bar(used_key, limit_key, suffix, stats, axis_fmt,
+ colour, help, invert);
+ }
+}
+
+function add_fake_limit_details(used_key, limit_key, stats) {
+ var source = stats[used_key + '_details'].samples;
+ var limit = stats[limit_key];
+ var dest = [];
+ for (var i in source) {
+ dest[i] = {sample: limit, timestamp: source[i].timestamp};
+ }
+ stats[limit_key + '_details'] = {samples: dest};
+}
+
+function node_stat_bar(used_key, limit_key, suffix, stats, fmt, colour,
+ help, invert) {
+ var used = stats[used_key];
+ var limit = stats[limit_key];
+ var width = 120;
+
+ var res = '';
+ var other_colour = colour;
+ var ratio = invert ? (limit / used) : (used / limit);
+ if (ratio > 1) {
+ ratio = 1 / ratio;
+ inverted = true;
+ colour += '-dark';
+ }
+ else {
+ other_colour += '-dark';
+ }
+ var offset = Math.round(width * (1 - ratio));
+
+ res += '<div class="status-bar" style="width: ' + width + 'px;">';
+ res += '<div class="status-bar-main ' + colour + '" style="background-image: url(img/bg-' + other_colour + '.png); background-position: -' + offset + 'px 0px; background-repeat: no-repeat;">';
+ res += fmt(used);
+ if (help != null) {
+ res += ' <span class="help" id="' + help + '"></span>';
+ }
+ res += '</div>'; // status-bar-main
+ res += '<sub>' + fmt(limit) + ' ' + suffix + '</sub>';
+ res += '</div>'; // status-bar
+
+ return res;
+}
+
+function node_stats_prefs() {
+ return chart_h3('node-stats', 'Node statistics');
+}
+
+function rates_chart(type_id, id, items, stats, fmt, axis_fmt, type,
+ chart_rates) {
+ function show(key) {
+ return get_pref('chart-line-' + id + key) === 'true';
+ }
+
+ var size = get_pref('chart-size-' + type_id);
+ var legend = [];
+ chart_data[id] = {};
+ chart_data[id]['data'] = {};
+ chart_data[id]['fmt'] = axis_fmt;
+ var ix = 0;
+ for (var i in items) {
+ var name = items[i][0];
+ var key = items[i][1];
+ var key_details = key + '_details';
+ if (key_details in stats) {
+ if (show(key)) {
+ chart_data[id]['data'][name] = stats[key_details];
+ chart_data[id]['data'][name].ix = ix;
+ }
+ var value = chart_rates ? pick_rate(fmt, stats, key) :
+ pick_abs(fmt, stats, key);
+ legend.push({name: name,
+ key: key,
+ value: value,
+ show: show(key)});
+ ix++;
+ }
+ }
+ var html = '<div class="box"><div id="chart-' + id +
+ '" class="chart chart-' + type + ' chart-' + size +
+ (chart_rates ? ' chart-rates' : '') + '"></div>';
+ html += '<table class="legend">';
+ for (var i = 0; i < legend.length; i++) {
+ if (i % 3 == 0 && i < legend.length - 1) {
+ html += '</table><table class="legend">';
+ }
+
+ html += '<tr><th><span title="Click to toggle line" ';
+ html += 'class="rate-visibility-option';
+ html += legend[i].show ? '' : ' rate-visibility-option-hidden';
+ html += '" data-pref="chart-line-' + id + legend[i].key + '">';
+ html += legend[i].name + '</span></th><td>';
+ html += '<div class="colour-key" style="background: ' + chart_colors[type][i];
+ html += ';"></div>' + legend[i].value + '</td></tr>'
+ }
+ html += '</table></div>';
+ return legend.length > 0 ? html : '';
+}
+
+function rates_text(items, stats, mode, fmt, chart_rates) {
+ var res = '';
+ for (var i in items) {
+ var name = items[i][0];
+ var key = items[i][1];
+ var key_details = key + '_details';
+ if (key_details in stats) {
+ var details = stats[key_details];
+ res += '<div class="highlight">' + name + '<strong>';
+ res += chart_rates ? pick_rate(fmt, stats, key, mode) :
+ pick_abs(fmt, stats, key, mode);
+ res += '</strong></div>';
+ }
+ }
+ return res == '' ? '' : '<div class="box">' + res + '</div>';
+}
+
+//
+// Rendering side
+//
+
+function render_charts() {
+ $('.chart').map(function() {
+ render_chart($(this));
+ });
+}
+
+var chart_colors = {full: ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed', '#666666', '#aaaaaa',
+ '#7c79c3', '#8e6767', '#67808e', '#e5e4ae', '#4b4a55', '#bba0c1'],
+ node: ['#6ae26a', '#e24545']};
+
+var chart_chrome = {
+ series: { lines: { show: true } },
+ grid: { borderWidth: 2, borderColor: "#aaa" },
+ xaxis: { tickColor: "#fff", mode: "time", timezone: "browser" },
+ yaxis: { tickColor: "#eee", min: 0 },
+ legend: { show: false }
+};
+
+function chart_fill(mode, i) {
+ return mode =='node' && i == 0;
+}
+
+function render_chart(div) {
+ var id = div.attr('id').substring('chart-'.length);
+ var rate_mode = div.hasClass('chart-rates');
+ var out_data = [];
+ var data = chart_data[id]['data'];
+ var fmt = chart_data[id]['fmt'];
+
+ var mode = div.hasClass('chart-full') ? 'full': 'node';
+ var colors = chart_colors[mode];
+
+ for (var name in data) {
+ var series = data[name];
+ var samples = series.samples;
+ var i = series.ix;
+ var d = [];
+ for (var j = 1; j < samples.length; j++) {
+ var x = samples[j].timestamp;
+ var y;
+ if (rate_mode) {
+ // TODO This doesn't work well if you are looking at
+ // stuff in the browser that is finer granularity than
+ // the data we have in the DB (and thus we get
+ // duplicated entries). Do we care? We should just
+ // never allow that...
+ y = (samples[j - 1].sample - samples[j].sample) * 1000 /
+ (samples[j - 1].timestamp - samples[j].timestamp);
+ }
+ else {
+ y = samples[j].sample;
+ }
+ d.push([x, y]);
+ }
+ out_data.push({data: d, color: colors[i], shadowSize: 0,
+ lines: {show: true, fill: chart_fill(mode, i)}});
+ }
+ chart_data[id] = {};
+
+ chart_chrome.yaxis.tickFormatter = fmt_y_axis(fmt);
+ $.plot(div, out_data, chart_chrome);
+}
+
+function fmt_y_axis(fmt) {
+ return function (val, axis) {
+ // axis.ticks seems to include the bottom value but not the top
+ if (axis.max == 1 && axis.ticks.length > 1) {
+ var newTicks = [axis.ticks[0]];
+ axis.ticks = newTicks;
+ }
+ return fmt(val, axis.max);
+ }
+}
+
+function update_rate_options(sammy) {
+ var id = sammy.params['id'];
+ store_pref('rate-mode-' + id, sammy.params['mode']);
+ store_pref('chart-size-' + id, sammy.params['size']);
+ store_pref('chart-range', sammy.params['range']);
+ partial_update();
+}
diff --git a/deps/rabbitmq_management/priv/www/js/dispatcher.js b/deps/rabbitmq_management/priv/www/js/dispatcher.js
new file mode 100644
index 0000000000..d2842c2da8
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/dispatcher.js
@@ -0,0 +1,345 @@
+dispatcher_add(function(sammy) {
+ function path(p, r, t) {
+ sammy.get(p, function() {
+ render(r, t, p);
+ });
+ }
+ sammy.get('#/', function() {
+ var reqs = {'overview': {path: '/overview',
+ options: {ranges: ['lengths-over',
+ 'msg-rates-over']}},
+ 'vhosts': '/vhosts'};
+ if (user_monitor) {
+ reqs['nodes'] = '/nodes';
+ }
+ render(reqs, 'overview', '#/');
+ });
+
+ path('#/cluster-name', {'cluster_name': '/cluster-name'}, 'cluster-name');
+ sammy.put('#/cluster-name', function() {
+ if (sync_put(this, '/cluster-name')) {
+ setup_global_vars();
+ update();
+ }
+ return false;
+ });
+
+ sammy.get('#/nodes/:name', function() {
+ var name = esc(this.params['name']);
+ render({'node': {path: '/nodes/' + name,
+ options: {ranges: ['node-stats']}}},
+ 'node', '');
+ });
+
+ sammy.get('#/connections', function() {
+ renderConnections();
+ });
+
+
+ sammy.get('#/connections/:name', function() {
+ var name = esc(this.params['name']);
+ render({'connection': {path: '/connections/' + name,
+ options: {ranges: ['data-rates-conn']}},
+ 'channels': '/connections/' + name + '/channels'},
+ 'connection', '#/connections');
+ });
+ sammy.del('#/connections', function() {
+ var options = {headers: {
+ 'X-Reason': this.params['reason']
+ }};
+ if (sync_delete(this, '/connections/:name', options)) {
+ go_to('#/connections');
+ }
+
+ return false;
+ });
+
+ sammy.get('#/channels', function() {
+ renderChannels();
+ });
+
+ sammy.get('#/channels/:name', function() {
+ render({'channel': {path: '/channels/' + esc(this.params['name']),
+ options:{ranges:['data-rates-ch','msg-rates-ch']}}},
+ 'channel', '#/channels');
+ });
+
+
+ sammy.get('#/exchanges', function() {
+ renderExchanges();
+ });
+
+
+ sammy.get('#/exchanges/:vhost/:name', function() {
+ var path = '/exchanges/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
+ render({'exchange': {path: path,
+ options: {ranges:['msg-rates-x']}},
+ 'bindings_source': path + '/bindings/source',
+ 'bindings_destination': path + '/bindings/destination'},
+ 'exchange', '#/exchanges');
+ });
+ sammy.put('#/exchanges', function() {
+ if (sync_put(this, '/exchanges/:vhost/:name'))
+ update();
+ return false;
+ });
+ sammy.del('#/exchanges', function() {
+ if (sync_delete(this, '/exchanges/:vhost/:name'))
+ go_to('#/exchanges');
+ return false;
+ });
+ sammy.post('#/exchanges/publish', function() {
+ publish_msg(this.params);
+ return false;
+ });
+
+ sammy.get('#/queues', function() {
+ renderQueues();
+ });
+
+ sammy.get('#/queues/:vhost/:name', function() {
+ var path = '/queues/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
+ render({'queue': {path: path,
+ options: {ranges:['lengths-q', 'msg-rates-q', 'data-rates-q']}},
+ 'bindings': path + '/bindings'}, 'queue', '#/queues');
+ });
+ sammy.put('#/queues', function() {
+ if (sync_put(this, '/queues/:vhost/:name'))
+ update();
+ return false;
+ });
+ sammy.del('#/queues', function() {
+ if (this.params['mode'] == 'delete') {
+ if (sync_delete(this, '/queues/:vhost/:name'))
+ go_to('#/queues');
+ }
+ else if (this.params['mode'] == 'purge') {
+ if (sync_delete(this, '/queues/:vhost/:name/contents')) {
+ show_popup('info', "Queue purged");
+ partial_update();
+ }
+ }
+ return false;
+ });
+ sammy.post('#/queues/get', function() {
+ get_msgs(this.params);
+ return false;
+ });
+ sammy.post('#/queues/actions', function() {
+ if (sync_post(this, '/queues/:vhost/:name/actions'))
+ // We can't refresh fast enough, it's racy. So grey
+ // the button and wait for a normal refresh.
+ $('#action-button').addClass('wait').prop('disabled', true);
+ return false;
+ });
+ sammy.post('#/bindings', function() {
+ if (sync_post(this, '/bindings/:vhost/e/:source/:destination_type/:destination'))
+ update();
+ return false;
+ });
+ sammy.del('#/bindings', function() {
+ if (sync_delete(this, '/bindings/:vhost/e/:source/:destination_type/:destination/:properties_key'))
+ update();
+ return false;
+ });
+
+ path('#/vhosts', {'vhosts': {path: '/vhosts',
+ options: {sort:true}},
+ 'permissions': '/permissions'}, 'vhosts');
+ sammy.get('#/vhosts/:id', function() {
+ render({'vhost': {path: '/vhosts/' + esc(this.params['id']),
+ options: {ranges: ['lengths-vhost',
+ 'msg-rates-vhost',
+ 'data-rates-vhost']}},
+ 'permissions': '/vhosts/' + esc(this.params['id']) + '/permissions',
+ 'topic_permissions': '/vhosts/' + esc(this.params['id']) + '/topic-permissions',
+ 'users': '/users/',
+ 'exchanges' : '/exchanges/' + esc(this.params['id'])},
+ 'vhost', '#/vhosts');
+ });
+
+ sammy.put('#/vhosts', function() {
+ if (sync_put(this, '/vhosts/:name')) {
+ update_vhosts();
+ update();
+ }
+ return false;
+ });
+ sammy.del('#/vhosts', function() {
+ if (sync_delete(this, '/vhosts/:name')) {
+ update_vhosts();
+ go_to('#/vhosts');
+ }
+ return false;
+ });
+
+ path('#/users', {'users': {path: '/users',
+ options: {sort:true}},
+ 'permissions': '/permissions'}, 'users');
+ sammy.get('#/users/:id', function() {
+ var vhosts = JSON.parse(sync_get('/vhosts'));
+ render({'user': '/users/' + esc(this.params['id']),
+ 'permissions': '/users/' + esc(this.params['id']) + '/permissions',
+ 'topic_permissions': '/users/' + esc(this.params['id']) + '/topic-permissions',
+ 'vhosts': '/vhosts/',
+ 'exchanges': '/exchanges/' + esc(vhosts[0].name)}, 'user',
+ '#/users');
+ });
+ sammy.put('#/users-add', function() {
+ res = sync_put(this, '/users/:username');
+ if (res) {
+ if (res.http_status === 204) {
+ username = res.req_params.username;
+ show_popup('warn', "Updated an existing user: '" + username + "'");
+ }
+ update();
+ }
+ return false;
+ });
+ sammy.put('#/users-modify', function() {
+ if (sync_put(this, '/users/:username'))
+ go_to('#/users');
+ return false;
+ });
+ sammy.del('#/users', function() {
+ if (sync_delete(this, '/users/:username'))
+ go_to('#/users');
+ return false;
+ });
+
+ path('#/feature-flags', {'feature_flags': {path: '/feature-flags',
+ options: {sort:true}},
+ 'permissions': '/permissions'}, 'feature-flags');
+ sammy.put('#/feature-flags-enable', function() {
+ if (sync_put(this, '/feature-flags/:name/enable'))
+ update();
+ return false;
+ });
+
+ sammy.put('#/permissions', function() {
+ if (sync_put(this, '/permissions/:vhost/:username'))
+ update();
+ return false;
+ });
+ sammy.del('#/permissions', function() {
+ if (sync_delete(this, '/permissions/:vhost/:username'))
+ update();
+ return false;
+ });
+ sammy.put('#/topic-permissions', function() {
+ if (sync_put(this, '/topic-permissions/:vhost/:username'))
+ update();
+ return false;
+ });
+ sammy.del('#/topic-permissions', function() {
+ if (sync_delete(this, '/topic-permissions/:vhost/:username/:exchange'))
+ update();
+ return false;
+ });
+ path('#/policies', {'policies': '/policies',
+ 'operator_policies': '/operator-policies',
+ 'vhosts': '/vhosts'}, 'policies');
+ sammy.get('#/policies/:vhost/:id', function() {
+ render({'policy': '/policies/' + esc(this.params['vhost'])
+ + '/' + esc(this.params['id'])},
+ 'policy', '#/policies');
+ });
+ sammy.put('#/policies', function() {
+ put_cast_params(this, '/policies/:vhost/:name',
+ ['name', 'pattern', 'policy'], ['priority'], []);
+ return false;
+ });
+ sammy.del('#/policies', function() {
+ if (sync_delete(this, '/policies/:vhost/:name'))
+ go_to('#/policies');
+ return false;
+ });
+ sammy.put('#/operator_policies', function() {
+ this.params = rename_multifield(this.params, "definitionop", "definition");
+ put_cast_params(this, '/operator-policies/:vhost/:name',
+ ['name', 'pattern', 'policy'], ['priority'], []);
+ return false;
+ });
+ sammy.del('#/operator_policies', function() {
+ if (sync_delete(this, '/operator-policies/:vhost/:name'))
+ update();
+ });
+
+ sammy.put('#/logout', function() {
+ // clear a local storage value used by earlier versions
+ clear_pref('auth');
+ clear_cookie_value('auth');
+ if (uaa_logged_in) {
+ clear_pref('uaa_token');
+ var redirect;
+ if (window.location.hash != "") {
+ redirect = window.location.href.split(window.location.hash)[0];
+ } else {
+ redirect = window.location.href
+ };
+ uaa_logged_in = false;
+ var logoutRedirectUrl = Singular.properties.uaaLocation + '/logout.do?client_id=' + Singular.properties.clientId + '&redirect=' + redirect;
+ get(logoutRedirectUrl, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", function(req) { });
+ }
+ location.reload();
+ });
+
+ sammy.put('#/rate-options', function() {
+ update_rate_options(this);
+ });
+ sammy.put('#/column-options', function() {
+ update_column_options(this);
+ });
+ path('#/limits', {'limits': '/vhost-limits',
+ 'user_limits': '/user-limits',
+ 'users': '/users',
+ 'vhosts': '/vhosts'}, 'limits');
+
+ sammy.put('#/limits', function() {
+ var valAsInt = parseInt(this.params.value);
+ if (isNaN(valAsInt)) {
+ var e = 'Validation failed\n\n' +
+ this.params.name + ' should be a number, actually was "' +
+ this.params.value + '"';
+ show_popup('warn', fmt_escape_html(e));
+ } else {
+ this.params.value = valAsInt;
+ if (sync_put(this, '/vhost-limits/:vhost/:name')) {
+ update();
+ }
+ }
+ });
+ sammy.put('#/user-limits', function() {
+ var valAsInt = parseInt(this.params.value);
+ if (isNaN(valAsInt)) {
+ var e = 'Validation failed\n\n' +
+ this.params.name + ' should be a number, actually was "' +
+ this.params.value + '"';
+ show_popup('warn', fmt_escape_html(e));
+ } else {
+ this.params.value = valAsInt;
+ if (sync_put(this, '/user-limits/:user/:name')) {
+ update();
+ }
+ }
+ });
+ sammy.post('#/restart_vhost', function(){
+ if(sync_post(this, '/vhosts/:vhost/start/:node')) update();
+ })
+ sammy.del('#/limits', function() {
+ if (sync_delete(this, '/vhost-limits/:vhost/:name')) update();
+ });
+ sammy.del('#/user-limits', function() {
+ if (sync_delete(this, '/user-limits/:user/:name')) update();
+ });
+ sammy.del("#/reset", function(){
+ if(sync_delete(this, '/reset')){
+ update();
+ }
+ });
+ sammy.del("#/reset_node", function(){
+ if(sync_delete(this, '/reset/:node')){
+ update();
+ }
+ });
+});
diff --git a/deps/rabbitmq_management/priv/www/js/ejs-1.0.js b/deps/rabbitmq_management/priv/www/js/ejs-1.0.js
new file mode 100644
index 0000000000..259d87f748
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/ejs-1.0.js
@@ -0,0 +1,505 @@
+(function(){
+
+
+var rsplit = function(string, regex) {
+ var result = regex.exec(string),retArr = new Array(), first_idx, last_idx, first_bit;
+ while (result != null)
+ {
+ first_idx = result.index; last_idx = regex.lastIndex;
+ if ((first_idx) != 0)
+ {
+ first_bit = string.substring(0,first_idx);
+ retArr.push(string.substring(0,first_idx));
+ string = string.slice(first_idx);
+ }
+ retArr.push(result[0]);
+ string = string.slice(result[0].length);
+ result = regex.exec(string);
+ }
+ if (! string == '')
+ {
+ retArr.push(string);
+ }
+ return retArr;
+},
+chop = function(string){
+ return string.substr(0, string.length - 1);
+},
+extend = function(d, s){
+ for(var n in s){
+ if(s.hasOwnProperty(n)) d[n] = s[n]
+ }
+}
+
+
+EJS = function( options ){
+ options = typeof options == "string" ? {view: options} : options
+ this.set_options(options);
+ if(options.precompiled){
+ this.template = {};
+ this.template.process = options.precompiled;
+ EJS.update(this.name, this);
+ return;
+ }
+ if(options.element)
+ {
+ if(typeof options.element == 'string'){
+ var name = options.element
+ options.element = document.getElementById( options.element )
+ if(options.element == null) throw name+'does not exist!'
+ }
+ if(options.element.value){
+ this.text = options.element.value
+ }else{
+ this.text = options.element.innerHTML
+ }
+ this.name = options.element.id
+ this.type = '['
+ }else if(options.url){
+ options.url = EJS.endExt(options.url, this.extMatch);
+ this.name = this.name ? this.name : options.url;
+ var url = options.url
+ //options.view = options.absolute_url || options.view || options.;
+ var template = EJS.get(this.name /*url*/, this.cache);
+ if (template) return template;
+ if (template == EJS.INVALID_PATH) return null;
+ try{
+ this.text = EJS.request( url+(this.cache ? '' : '?'+Math.random() ));
+ }catch(e){}
+
+ if(this.text == null){
+ throw( {type: 'EJS', message: 'There is no template at '+url} );
+ }
+ //this.name = url;
+ }
+ var template = new EJS.Compiler(this.text, this.type);
+
+ template.compile(options, this.name);
+
+
+ EJS.update(this.name, this);
+ this.template = template;
+};
+/* @Prototype*/
+EJS.prototype = {
+ /**
+ * Renders an object with extra view helpers attached to the view.
+ * @param {Object} object data to be rendered
+ * @param {Object} extra_helpers an object with additonal view helpers
+ * @return {String} returns the result of the string
+ */
+ render : function(object, extra_helpers){
+ object = object || {};
+ this._extra_helpers = extra_helpers;
+ var v = new EJS.Helpers(object, extra_helpers || {});
+ return this.template.process.call(object, object,v);
+ },
+ update : function(element, options){
+ if(typeof element == 'string'){
+ element = document.getElementById(element)
+ }
+ if(options == null){
+ _template = this;
+ return function(object){
+ EJS.prototype.update.call(_template, element, object)
+ }
+ }
+ if(typeof options == 'string'){
+ params = {}
+ params.url = options
+ _template = this;
+ params.onComplete = function(request){
+ var object = eval( request.responseText )
+ EJS.prototype.update.call(_template, element, object)
+ }
+ EJS.ajax_request(params)
+ }else
+ {
+ element.innerHTML = this.render(options)
+ }
+ },
+ out : function(){
+ return this.template.out;
+ },
+ /**
+ * Sets options on this view to be rendered with.
+ * @param {Object} options
+ */
+ set_options : function(options){
+ this.type = options.type || EJS.type;
+ this.cache = options.cache != null ? options.cache : EJS.cache;
+ this.text = options.text || null;
+ this.name = options.name || null;
+ this.ext = options.ext || EJS.ext;
+ this.extMatch = new RegExp(this.ext.replace(/\./, '\.'));
+ }
+};
+EJS.endExt = function(path, match){
+ if(!path) return null;
+ match.lastIndex = 0
+ return path+ (match.test(path) ? '' : this.ext )
+}
+
+
+
+
+/* @Static*/
+EJS.Scanner = function(source, left, right) {
+
+ extend(this,
+ {left_delimiter: left +'%',
+ right_delimiter: '%'+right,
+ double_left: left+'%%',
+ double_right: '%%'+right,
+ left_equal: left+'%=',
+ left_comment: left+'%#'})
+
+ this.SplitRegexp = left=='[' ? /(\[%%)|(%%\])|(\[%=)|(\[%#)|(\[%)|(%\]\n)|(%\])|(\n)/ : new RegExp('('+this.double_left+')|(%%'+this.double_right+')|('+this.left_equal+')|('+this.left_comment+')|('+this.left_delimiter+')|('+this.right_delimiter+'\n)|('+this.right_delimiter+')|(\n)') ;
+
+ this.source = source;
+ this.stag = null;
+ this.lines = 0;
+};
+
+EJS.Scanner.to_text = function(input){
+ if(input == null || input === undefined)
+ return '';
+ if(input instanceof Date)
+ return input.toDateString();
+ if(input.toString)
+ return input.toString();
+ return '';
+};
+
+EJS.Scanner.prototype = {
+ scan: function(block) {
+ scanline = this.scanline;
+ regex = this.SplitRegexp;
+ if (! this.source == '')
+ {
+ var source_split = rsplit(this.source, /\n/);
+ for(var i=0; i<source_split.length; i++) {
+ var item = source_split[i];
+ this.scanline(item, regex, block);
+ }
+ }
+ },
+ scanline: function(line, regex, block) {
+ this.lines++;
+ var line_split = rsplit(line, regex);
+ for(var i=0; i<line_split.length; i++) {
+ var token = line_split[i];
+ if (token != null) {
+ try{
+ block(token, this);
+ }catch(e){
+ throw {type: 'EJS.Scanner', line: this.lines};
+ }
+ }
+ }
+ }
+};
+
+
+EJS.Buffer = function(pre_cmd, post_cmd) {
+ this.line = new Array();
+ this.script = "";
+ this.pre_cmd = pre_cmd;
+ this.post_cmd = post_cmd;
+ for (var i=0; i<this.pre_cmd.length; i++)
+ {
+ this.push(pre_cmd[i]);
+ }
+};
+EJS.Buffer.prototype = {
+
+ push: function(cmd) {
+ this.line.push(cmd);
+ },
+
+ cr: function() {
+ this.script = this.script + this.line.join('; ');
+ this.line = new Array();
+ this.script = this.script + "\n";
+ },
+
+ close: function() {
+ if (this.line.length > 0)
+ {
+ for (var i=0; i<this.post_cmd.length; i++){
+ this.push(pre_cmd[i]);
+ }
+ this.script = this.script + this.line.join('; ');
+ line = null;
+ }
+ }
+
+};
+
+
+EJS.Compiler = function(source, left) {
+ this.pre_cmd = ['var ___ViewO = [];'];
+ this.post_cmd = new Array();
+ this.source = ' ';
+ if (source != null)
+ {
+ if (typeof source == 'string')
+ {
+ source = source.replace(/\r\n/g, "\n");
+ source = source.replace(/\r/g, "\n");
+ this.source = source;
+ }else if (source.innerHTML){
+ this.source = source.innerHTML;
+ }
+ if (typeof this.source != 'string'){
+ this.source = "";
+ }
+ }
+ left = left || '<';
+ var right = '>';
+ switch(left) {
+ case '[':
+ right = ']';
+ break;
+ case '<':
+ break;
+ default:
+ throw left+' is not a supported deliminator';
+ break;
+ }
+ this.scanner = new EJS.Scanner(this.source, left, right);
+ this.out = '';
+};
+EJS.Compiler.prototype = {
+ compile: function(options, name) {
+ options = options || {};
+ this.out = '';
+ var put_cmd = "___ViewO.push(";
+ var insert_cmd = put_cmd;
+ var buff = new EJS.Buffer(this.pre_cmd, this.post_cmd);
+ var content = '';
+ var clean = function(content)
+ {
+ content = content.replace(/\\/g, '\\\\');
+ content = content.replace(/\n/g, '\\n');
+ content = content.replace(/"/g, '\\"');
+ return content;
+ };
+ this.scanner.scan(function(token, scanner) {
+ if (scanner.stag == null)
+ {
+ switch(token) {
+ case '\n':
+ content = content + "\n";
+ buff.push(put_cmd + '"' + clean(content) + '");');
+ buff.cr();
+ content = '';
+ break;
+ case scanner.left_delimiter:
+ case scanner.left_equal:
+ case scanner.left_comment:
+ scanner.stag = token;
+ if (content.length > 0)
+ {
+ buff.push(put_cmd + '"' + clean(content) + '")');
+ }
+ content = '';
+ break;
+ case scanner.double_left:
+ content = content + scanner.left_delimiter;
+ break;
+ default:
+ content = content + token;
+ break;
+ }
+ }
+ else {
+ switch(token) {
+ case scanner.right_delimiter:
+ switch(scanner.stag) {
+ case scanner.left_delimiter:
+ if (content[content.length - 1] == '\n')
+ {
+ content = chop(content);
+ buff.push(content);
+ buff.cr();
+ }
+ else {
+ buff.push(content);
+ }
+ break;
+ case scanner.left_equal:
+ buff.push(insert_cmd + "(EJS.Scanner.to_text(" + content + ")))");
+ break;
+ }
+ scanner.stag = null;
+ content = '';
+ break;
+ case scanner.double_right:
+ content = content + scanner.right_delimiter;
+ break;
+ default:
+ content = content + token;
+ break;
+ }
+ }
+ });
+ if (content.length > 0)
+ {
+ // Chould be content.dump in Ruby
+ buff.push(put_cmd + '"' + clean(content) + '")');
+ }
+ buff.close();
+ this.out = buff.script + ";";
+ var to_be_evaled = '/*'+name+'*/this.process = function(_CONTEXT,_VIEW) { try { with(_VIEW) { with (_CONTEXT) {'+this.out+" return ___ViewO.join('');}}}catch(e){e.lineNumber=null;throw e;}};";
+
+ try{
+ eval(to_be_evaled);
+ }catch(e){
+ if(typeof JSLINT != 'undefined'){
+ JSLINT(this.out);
+ for(var i = 0; i < JSLINT.errors.length; i++){
+ var error = JSLINT.errors[i];
+ if(error.reason != "Unnecessary semicolon."){
+ error.line++;
+ var e = new Error();
+ e.lineNumber = error.line;
+ e.message = error.reason;
+ if(options.view)
+ e.fileName = options.view;
+ throw e;
+ }
+ }
+ }else{
+ throw e;
+ }
+ }
+ }
+};
+
+
+//type, cache, folder
+/**
+ * Sets default options for all views
+ * @param {Object} options Set view with the following options
+ * <table class="options">
+ <tbody><tr><th>Option</th><th>Default</th><th>Description</th></tr>
+ <tr>
+ <td>type</td>
+ <td>'<'</td>
+ <td>type of magic tags. Options are '&lt;' or '['
+ </td>
+ </tr>
+ <tr>
+ <td>cache</td>
+ <td>true in production mode, false in other modes</td>
+ <td>true to cache template.
+ </td>
+ </tr>
+ </tbody></table>
+ *
+ */
+EJS.config = function(options){
+ EJS.cache = options.cache != null ? options.cache : EJS.cache;
+ EJS.type = options.type != null ? options.type : EJS.type;
+ EJS.ext = options.ext != null ? options.ext : EJS.ext;
+
+ var templates_directory = EJS.templates_directory || {}; //nice and private container
+ EJS.templates_directory = templates_directory;
+ EJS.get = function(path, cache){
+ if(cache == false) return null;
+ if(templates_directory[path]) return templates_directory[path];
+ return null;
+ };
+
+ EJS.update = function(path, template) {
+ if(path == null) return;
+ templates_directory[path] = template ;
+ };
+
+ EJS.INVALID_PATH = -1;
+};
+EJS.config( {cache: true, type: '<', ext: '.ejs' } );
+
+
+
+/**
+ * @constructor
+ * By adding functions to EJS.Helpers.prototype, those functions will be available in the
+ * views.
+ * @init Creates a view helper. This function is called internally. You should never call it.
+ * @param {Object} data The data passed to the view. Helpers have access to it through this._data
+ */
+EJS.Helpers = function(data, extras){
+ this._data = data;
+ this._extras = extras;
+ extend(this, extras );
+};
+/* @prototype*/
+EJS.Helpers.prototype = {
+ /**
+ * Renders a new view. If data is passed in, uses that to render the view.
+ * @param {Object} options standard options passed to a new view.
+ * @param {optional:Object} data
+ * @return {String}
+ */
+ view: function(options, data, helpers){
+ if(!helpers) helpers = this._extras
+ if(!data) data = this._data;
+ return new EJS(options).render(data, helpers);
+ },
+ /**
+ * For a given value, tries to create a human representation.
+ * @param {Object} input the value being converted.
+ * @param {Object} null_text what text should be present if input == null or undefined, defaults to ''
+ * @return {String}
+ */
+ to_text: function(input, null_text) {
+ if(input == null || input === undefined) return null_text || '';
+ if(input instanceof Date) return input.toDateString();
+ if(input.toString) return input.toString().replace(/\n/g, '<br />').replace(/''/g, "'");
+ return '';
+ }
+};
+ EJS.newRequest = function(){
+ var factories = [function() { return new ActiveXObject("Msxml2.XMLHTTP"); },function() { return new XMLHttpRequest(); },function() { return new ActiveXObject("Microsoft.XMLHTTP"); }];
+ for(var i = 0; i < factories.length; i++) {
+ try {
+ var request = factories[i]();
+ if (request != null) return request;
+ }
+ catch(e) { continue;}
+ }
+ }
+
+ EJS.request = function(path){
+ var request = new EJS.newRequest()
+ request.open("GET", path, false);
+
+ try{request.send(null);}
+ catch(e){return null;}
+
+ if ( request.status == 404 || request.status == 2 ||(request.status == 0 && request.responseText == '') ) return null;
+
+ return request.responseText
+ }
+ EJS.ajax_request = function(params){
+ params.method = ( params.method ? params.method : 'GET')
+
+ var request = new EJS.newRequest();
+ request.onreadystatechange = function(){
+ if(request.readyState == 4){
+ if(request.status == 200){
+ params.onComplete(request)
+ }else
+ {
+ params.onComplete(request)
+ }
+ }
+ }
+ request.open(params.method, params.url)
+ request.send(null)
+ }
+
+
+})(); \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/ejs-1.0.min.js b/deps/rabbitmq_management/priv/www/js/ejs-1.0.min.js
new file mode 100644
index 0000000000..ccce9220ee
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/ejs-1.0.min.js
@@ -0,0 +1 @@
+(function(){var rsplit=function(string,regex){var result=regex.exec(string),retArr=new Array(),first_idx,last_idx,first_bit;while(result!=null){first_idx=result.index;last_idx=regex.lastIndex;if((first_idx)!=0){first_bit=string.substring(0,first_idx);retArr.push(string.substring(0,first_idx));string=string.slice(first_idx)}retArr.push(result[0]);string=string.slice(result[0].length);result=regex.exec(string)}if(!string==""){retArr.push(string)}return retArr},chop=function(string){return string.substr(0,string.length-1)},extend=function(d,s){for(var n in s){if(s.hasOwnProperty(n)){d[n]=s[n]}}};EJS=function(options){options=typeof options=="string"?{view:options}:options;this.set_options(options);if(options.precompiled){this.template={};this.template.process=options.precompiled;EJS.update(this.name,this);return }if(options.element){if(typeof options.element=="string"){var name=options.element;options.element=document.getElementById(options.element);if(options.element==null){throw name+"does not exist!"}}if(options.element.value){this.text=options.element.value}else{this.text=options.element.innerHTML}this.name=options.element.id;this.type="["}else{if(options.url){options.url=EJS.endExt(options.url,this.extMatch);this.name=this.name?this.name:options.url;var url=options.url;var template=EJS.get(this.name,this.cache);if(template){return template}if(template==EJS.INVALID_PATH){return null}try{this.text=EJS.request(url+(this.cache?"":"?"+Math.random()))}catch(e){}if(this.text==null){throw ({type:"EJS",message:"There is no template at "+url})}}}var template=new EJS.Compiler(this.text,this.type);template.compile(options,this.name);EJS.update(this.name,this);this.template=template};EJS.prototype={render:function(object,extra_helpers){object=object||{};this._extra_helpers=extra_helpers;var v=new EJS.Helpers(object,extra_helpers||{});return this.template.process.call(object,object,v)},update:function(element,options){if(typeof element=="string"){element=document.getElementById(element)}if(options==null){_template=this;return function(object){EJS.prototype.update.call(_template,element,object)}}if(typeof options=="string"){params={};params.url=options;_template=this;params.onComplete=function(request){var object=eval(request.responseText);EJS.prototype.update.call(_template,element,object)};EJS.ajax_request(params)}else{element.innerHTML=this.render(options)}},out:function(){return this.template.out},set_options:function(options){this.type=options.type||EJS.type;this.cache=options.cache!=null?options.cache:EJS.cache;this.text=options.text||null;this.name=options.name||null;this.ext=options.ext||EJS.ext;this.extMatch=new RegExp(this.ext.replace(/\./,"."))}};EJS.endExt=function(path,match){if(!path){return null}match.lastIndex=0;return path+(match.test(path)?"":this.ext)};EJS.Scanner=function(source,left,right){extend(this,{left_delimiter:left+"%",right_delimiter:"%"+right,double_left:left+"%%",double_right:"%%"+right,left_equal:left+"%=",left_comment:left+"%#"});this.SplitRegexp=left=="["?/(\[%%)|(%%\])|(\[%=)|(\[%#)|(\[%)|(%\]\n)|(%\])|(\n)/:new RegExp("("+this.double_left+")|(%%"+this.double_right+")|("+this.left_equal+")|("+this.left_comment+")|("+this.left_delimiter+")|("+this.right_delimiter+"\n)|("+this.right_delimiter+")|(\n)");this.source=source;this.stag=null;this.lines=0};EJS.Scanner.to_text=function(input){if(input==null||input===undefined){return""}if(input instanceof Date){return input.toDateString()}if(input.toString){return input.toString()}return""};EJS.Scanner.prototype={scan:function(block){scanline=this.scanline;regex=this.SplitRegexp;if(!this.source==""){var source_split=rsplit(this.source,/\n/);for(var i=0;i<source_split.length;i++){var item=source_split[i];this.scanline(item,regex,block)}}},scanline:function(line,regex,block){this.lines++;var line_split=rsplit(line,regex);for(var i=0;i<line_split.length;i++){var token=line_split[i];if(token!=null){try{block(token,this)}catch(e){throw {type:"EJS.Scanner",line:this.lines}}}}}};EJS.Buffer=function(pre_cmd,post_cmd){this.line=new Array();this.script="";this.pre_cmd=pre_cmd;this.post_cmd=post_cmd;for(var i=0;i<this.pre_cmd.length;i++){this.push(pre_cmd[i])}};EJS.Buffer.prototype={push:function(cmd){this.line.push(cmd)},cr:function(){this.script=this.script+this.line.join("; ");this.line=new Array();this.script=this.script+"\n"},close:function(){if(this.line.length>0){for(var i=0;i<this.post_cmd.length;i++){this.push(pre_cmd[i])}this.script=this.script+this.line.join("; ");line=null}}};EJS.Compiler=function(source,left){this.pre_cmd=["var ___ViewO = [];"];this.post_cmd=new Array();this.source=" ";if(source!=null){if(typeof source=="string"){source=source.replace(/\r\n/g,"\n");source=source.replace(/\r/g,"\n");this.source=source}else{if(source.innerHTML){this.source=source.innerHTML}}if(typeof this.source!="string"){this.source=""}}left=left||"<";var right=">";switch(left){case"[":right="]";break;case"<":break;default:throw left+" is not a supported deliminator";break}this.scanner=new EJS.Scanner(this.source,left,right);this.out=""};EJS.Compiler.prototype={compile:function(options,name){options=options||{};this.out="";var put_cmd="___ViewO.push(";var insert_cmd=put_cmd;var buff=new EJS.Buffer(this.pre_cmd,this.post_cmd);var content="";var clean=function(content){content=content.replace(/\\/g,"\\\\");content=content.replace(/\n/g,"\\n");content=content.replace(/"/g,'\\"');return content};this.scanner.scan(function(token,scanner){if(scanner.stag==null){switch(token){case"\n":content=content+"\n";buff.push(put_cmd+'"'+clean(content)+'");');buff.cr();content="";break;case scanner.left_delimiter:case scanner.left_equal:case scanner.left_comment:scanner.stag=token;if(content.length>0){buff.push(put_cmd+'"'+clean(content)+'")')}content="";break;case scanner.double_left:content=content+scanner.left_delimiter;break;default:content=content+token;break}}else{switch(token){case scanner.right_delimiter:switch(scanner.stag){case scanner.left_delimiter:if(content[content.length-1]=="\n"){content=chop(content);buff.push(content);buff.cr()}else{buff.push(content)}break;case scanner.left_equal:buff.push(insert_cmd+"(EJS.Scanner.to_text("+content+")))");break}scanner.stag=null;content="";break;case scanner.double_right:content=content+scanner.right_delimiter;break;default:content=content+token;break}}});if(content.length>0){buff.push(put_cmd+'"'+clean(content)+'")')}buff.close();this.out=buff.script+";";var to_be_evaled="/*"+name+"*/this.process = function(_CONTEXT,_VIEW) { try { with(_VIEW) { with (_CONTEXT) {"+this.out+" return ___ViewO.join('');}}}catch(e){e.lineNumber=null;throw e;}};";try{eval(to_be_evaled)}catch(e){if(typeof JSLINT!="undefined"){JSLINT(this.out);for(var i=0;i<JSLINT.errors.length;i++){var error=JSLINT.errors[i];if(error.reason!="Unnecessary semicolon."){error.line++;var e=new Error();e.lineNumber=error.line;e.message=error.reason;if(options.view){e.fileName=options.view}throw e}}}else{throw e}}}};EJS.config=function(options){EJS.cache=options.cache!=null?options.cache:EJS.cache;EJS.type=options.type!=null?options.type:EJS.type;EJS.ext=options.ext!=null?options.ext:EJS.ext;var templates_directory=EJS.templates_directory||{};EJS.templates_directory=templates_directory;EJS.get=function(path,cache){if(cache==false){return null}if(templates_directory[path]){return templates_directory[path]}return null};EJS.update=function(path,template){if(path==null){return }templates_directory[path]=template};EJS.INVALID_PATH=-1};EJS.config({cache:true,type:"<",ext:".ejs"});EJS.Helpers=function(data,extras){this._data=data;this._extras=extras;extend(this,extras)};EJS.Helpers.prototype={view:function(options,data,helpers){if(!helpers){helpers=this._extras}if(!data){data=this._data}return new EJS(options).render(data,helpers)},to_text:function(input,null_text){if(input==null||input===undefined){return null_text||""}if(input instanceof Date){return input.toDateString()}if(input.toString){return input.toString().replace(/\n/g,"<br />").replace(/''/g,"'")}return""}};EJS.newRequest=function(){var factories=[function(){return new ActiveXObject("Msxml2.XMLHTTP")},function(){return new XMLHttpRequest()},function(){return new ActiveXObject("Microsoft.XMLHTTP")}];for(var i=0;i<factories.length;i++){try{var request=factories[i]();if(request!=null){return request}}catch(e){continue}}};EJS.request=function(path){var request=new EJS.newRequest();request.open("GET",path,false);try{request.send(null)}catch(e){return null}if(request.status==404||request.status==2||(request.status==0&&request.responseText=="")){return null}return request.responseText};EJS.ajax_request=function(params){params.method=(params.method?params.method:"GET");var request=new EJS.newRequest();request.onreadystatechange=function(){if(request.readyState==4){if(request.status==200){params.onComplete(request)}else{params.onComplete(request)}}};request.open(params.method,params.url);request.send(null)}})();EJS.Helpers.prototype.date_tag=function(C,O,A){if(!(O instanceof Date)){O=new Date()}var B=["January","February","March","April","May","June","July","August","September","October","November","December"];var G=[],D=[],P=[];var J=O.getFullYear();var H=O.getMonth();var N=O.getDate();for(var M=J-15;M<J+15;M++){G.push({value:M,text:M})}for(var E=0;E<12;E++){D.push({value:(E),text:B[E]})}for(var I=0;I<31;I++){P.push({value:(I+1),text:(I+1)})}var L=this.select_tag(C+"[year]",J,G,{id:C+"[year]"});var F=this.select_tag(C+"[month]",H,D,{id:C+"[month]"});var K=this.select_tag(C+"[day]",N,P,{id:C+"[day]"});return L+F+K};EJS.Helpers.prototype.form_tag=function(B,A){A=A||{};A.action=B;if(A.multipart==true){A.method="post";A.enctype="multipart/form-data"}return this.start_tag_for("form",A)};EJS.Helpers.prototype.form_tag_end=function(){return this.tag_end("form")};EJS.Helpers.prototype.hidden_field_tag=function(A,C,B){return this.input_field_tag(A,C,"hidden",B)};EJS.Helpers.prototype.input_field_tag=function(A,D,C,B){B=B||{};B.id=B.id||A;B.value=D||"";B.type=C||"text";B.name=A;return this.single_tag_for("input",B)};EJS.Helpers.prototype.is_current_page=function(A){return(window.location.href==A||window.location.pathname==A?true:false)};EJS.Helpers.prototype.link_to=function(B,A,C){if(!B){var B="null"}if(!C){var C={}}if(C.confirm){C.onclick=' var ret_confirm = confirm("'+C.confirm+'"); if(!ret_confirm){ return false;} ';C.confirm=null}C.href=A;return this.start_tag_for("a",C)+B+this.tag_end("a")};EJS.Helpers.prototype.submit_link_to=function(B,A,C){if(!B){var B="null"}if(!C){var C={}}C.onclick=C.onclick||"";if(C.confirm){C.onclick=' var ret_confirm = confirm("'+C.confirm+'"); if(!ret_confirm){ return false;} ';C.confirm=null}C.value=B;C.type="submit";C.onclick=C.onclick+(A?this.url_for(A):"")+"return false;";return this.start_tag_for("input",C)};EJS.Helpers.prototype.link_to_if=function(F,B,A,D,C,E){return this.link_to_unless((F==false),B,A,D,C,E)};EJS.Helpers.prototype.link_to_unless=function(E,B,A,C,D){C=C||{};if(E){if(D&&typeof D=="function"){return D(B,A,C,D)}else{return B}}else{return this.link_to(B,A,C)}};EJS.Helpers.prototype.link_to_unless_current=function(B,A,C,D){C=C||{};return this.link_to_unless(this.is_current_page(A),B,A,C,D)};EJS.Helpers.prototype.password_field_tag=function(A,C,B){return this.input_field_tag(A,C,"password",B)};EJS.Helpers.prototype.select_tag=function(D,G,H,F){F=F||{};F.id=F.id||D;F.value=G;F.name=D;var B="";B+=this.start_tag_for("select",F);for(var E=0;E<H.length;E++){var C=H[E];var A={value:C.value};if(C.value==G){A.selected="selected"}B+=this.start_tag_for("option",A)+C.text+this.tag_end("option")}B+=this.tag_end("select");return B};EJS.Helpers.prototype.single_tag_for=function(A,B){return this.tag(A,B,"/>")};EJS.Helpers.prototype.start_tag_for=function(A,B){return this.tag(A,B)};EJS.Helpers.prototype.submit_tag=function(A,B){B=B||{};B.type=B.type||"submit";B.value=A||"Submit";return this.single_tag_for("input",B)};EJS.Helpers.prototype.tag=function(C,E,D){if(!D){var D=">"}var B=" ";for(var A in E){if(E[A]!=null){var F=E[A].toString()}else{var F=""}if(A=="Class"){A="class"}if(F.indexOf("'")!=-1){B+=A+'="'+F+'" '}else{B+=A+"='"+F+"' "}}return"<"+C+B+D};EJS.Helpers.prototype.tag_end=function(A){return"</"+A+">"};EJS.Helpers.prototype.text_area_tag=function(A,C,B){B=B||{};B.id=B.id||A;B.name=B.name||A;C=C||"";if(B.size){B.cols=B.size.split("x")[0];B.rows=B.size.split("x")[1];delete B.size}B.cols=B.cols||50;B.rows=B.rows||4;return this.start_tag_for("textarea",B)+C+this.tag_end("textarea")};EJS.Helpers.prototype.text_tag=EJS.Helpers.prototype.text_area_tag;EJS.Helpers.prototype.text_field_tag=function(A,C,B){return this.input_field_tag(A,C,"text",B)};EJS.Helpers.prototype.url_for=function(A){return'window.location="'+A+'";'};EJS.Helpers.prototype.img_tag=function(B,C,A){A=A||{};A.src=B;A.alt=C;return this.single_tag_for("img",A)} \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/excanvas.js b/deps/rabbitmq_management/priv/www/js/excanvas.js
new file mode 100644
index 0000000000..5e1915f68a
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/excanvas.js
@@ -0,0 +1,1428 @@
+// Copyright 2006 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+// Known Issues:
+//
+// * Patterns only support repeat.
+// * Radial gradient are not implemented. The VML version of these look very
+// different from the canvas one.
+// * Clipping paths are not implemented.
+// * Coordsize. The width and height attribute have higher priority than the
+// width and height style values which isn't correct.
+// * Painting mode isn't implemented.
+// * Canvas width/height should is using content-box by default. IE in
+// Quirks mode will draw the canvas using border-box. Either change your
+// doctype to HTML5
+// (https://www.whatwg.org/specs/web-apps/current-work/#the-doctype)
+// or use Box Sizing Behavior from WebFX
+// (http://webfx.eae.net/dhtml/boxsizing/boxsizing.html)
+// * Non uniform scaling does not correctly scale strokes.
+// * Filling very large shapes (above 5000 points) is buggy.
+// * Optimize. There is always room for speed improvements.
+
+// Only add this code if we do not already have a canvas implementation
+if (!document.createElement('canvas').getContext) {
+
+(function() {
+
+ // alias some functions to make (compiled) code shorter
+ var m = Math;
+ var mr = m.round;
+ var ms = m.sin;
+ var mc = m.cos;
+ var abs = m.abs;
+ var sqrt = m.sqrt;
+
+ // this is used for sub pixel precision
+ var Z = 10;
+ var Z2 = Z / 2;
+
+ var IE_VERSION = +navigator.userAgent.match(/MSIE ([\d.]+)?/)[1];
+
+ /**
+ * This funtion is assigned to the <canvas> elements as element.getContext().
+ * @this {HTMLElement}
+ * @return {CanvasRenderingContext2D_}
+ */
+ function getContext() {
+ return this.context_ ||
+ (this.context_ = new CanvasRenderingContext2D_(this));
+ }
+
+ var slice = Array.prototype.slice;
+
+ /**
+ * Binds a function to an object. The returned function will always use the
+ * passed in {@code obj} as {@code this}.
+ *
+ * Example:
+ *
+ * g = bind(f, obj, a, b)
+ * g(c, d) // will do f.call(obj, a, b, c, d)
+ *
+ * @param {Function} f The function to bind the object to
+ * @param {Object} obj The object that should act as this when the function
+ * is called
+ * @param {*} var_args Rest arguments that will be used as the initial
+ * arguments when the function is called
+ * @return {Function} A new function that has bound this
+ */
+ function bind(f, obj, var_args) {
+ var a = slice.call(arguments, 2);
+ return function() {
+ return f.apply(obj, a.concat(slice.call(arguments)));
+ };
+ }
+
+ function encodeHtmlAttribute(s) {
+ return String(s).replace(/&/g, '&amp;').replace(/"/g, '&quot;');
+ }
+
+ function addNamespace(doc, prefix, urn) {
+ if (!doc.namespaces[prefix]) {
+ doc.namespaces.add(prefix, urn, '#default#VML');
+ }
+ }
+
+ function addNamespacesAndStylesheet(doc) {
+ addNamespace(doc, 'g_vml_', 'urn:schemas-microsoft-com:vml');
+ addNamespace(doc, 'g_o_', 'urn:schemas-microsoft-com:office:office');
+
+ // Setup default CSS. Only add one style sheet per document
+ if (!doc.styleSheets['ex_canvas_']) {
+ var ss = doc.createStyleSheet();
+ ss.owningElement.id = 'ex_canvas_';
+ ss.cssText = 'canvas{display:inline-block;overflow:hidden;' +
+ // default size is 300x150 in Gecko and Opera
+ 'text-align:left;width:300px;height:150px}';
+ }
+ }
+
+ // Add namespaces and stylesheet at startup.
+ addNamespacesAndStylesheet(document);
+
+ var G_vmlCanvasManager_ = {
+ init: function(opt_doc) {
+ var doc = opt_doc || document;
+ // Create a dummy element so that IE will allow canvas elements to be
+ // recognized.
+ doc.createElement('canvas');
+ doc.attachEvent('onreadystatechange', bind(this.init_, this, doc));
+ },
+
+ init_: function(doc) {
+ // find all canvas elements
+ var els = doc.getElementsByTagName('canvas');
+ for (var i = 0; i < els.length; i++) {
+ this.initElement(els[i]);
+ }
+ },
+
+ /**
+ * Public initializes a canvas element so that it can be used as canvas
+ * element from now on. This is called automatically before the page is
+ * loaded but if you are creating elements using createElement you need to
+ * make sure this is called on the element.
+ * @param {HTMLElement} el The canvas element to initialize.
+ * @return {HTMLElement} the element that was created.
+ */
+ initElement: function(el) {
+ if (!el.getContext) {
+ el.getContext = getContext;
+
+ // Add namespaces and stylesheet to document of the element.
+ addNamespacesAndStylesheet(el.ownerDocument);
+
+ // Remove fallback content. There is no way to hide text nodes so we
+ // just remove all childNodes. We could hide all elements and remove
+ // text nodes but who really cares about the fallback content.
+ el.innerHTML = '';
+
+ // do not use inline function because that will leak memory
+ el.attachEvent('onpropertychange', onPropertyChange);
+ el.attachEvent('onresize', onResize);
+
+ var attrs = el.attributes;
+ if (attrs.width && attrs.width.specified) {
+ // TODO: use runtimeStyle and coordsize
+ // el.getContext().setWidth_(attrs.width.nodeValue);
+ el.style.width = attrs.width.nodeValue + 'px';
+ } else {
+ el.width = el.clientWidth;
+ }
+ if (attrs.height && attrs.height.specified) {
+ // TODO: use runtimeStyle and coordsize
+ // el.getContext().setHeight_(attrs.height.nodeValue);
+ el.style.height = attrs.height.nodeValue + 'px';
+ } else {
+ el.height = el.clientHeight;
+ }
+ //el.getContext().setCoordsize_()
+ }
+ return el;
+ }
+ };
+
+ function onPropertyChange(e) {
+ var el = e.srcElement;
+
+ switch (e.propertyName) {
+ case 'width':
+ el.getContext().clearRect();
+ el.style.width = el.attributes.width.nodeValue + 'px';
+ // In IE8 this does not trigger onresize.
+ el.firstChild.style.width = el.clientWidth + 'px';
+ break;
+ case 'height':
+ el.getContext().clearRect();
+ el.style.height = el.attributes.height.nodeValue + 'px';
+ el.firstChild.style.height = el.clientHeight + 'px';
+ break;
+ }
+ }
+
+ function onResize(e) {
+ var el = e.srcElement;
+ if (el.firstChild) {
+ el.firstChild.style.width = el.clientWidth + 'px';
+ el.firstChild.style.height = el.clientHeight + 'px';
+ }
+ }
+
+ G_vmlCanvasManager_.init();
+
+ // precompute "00" to "FF"
+ var decToHex = [];
+ for (var i = 0; i < 16; i++) {
+ for (var j = 0; j < 16; j++) {
+ decToHex[i * 16 + j] = i.toString(16) + j.toString(16);
+ }
+ }
+
+ function createMatrixIdentity() {
+ return [
+ [1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 1]
+ ];
+ }
+
+ function matrixMultiply(m1, m2) {
+ var result = createMatrixIdentity();
+
+ for (var x = 0; x < 3; x++) {
+ for (var y = 0; y < 3; y++) {
+ var sum = 0;
+
+ for (var z = 0; z < 3; z++) {
+ sum += m1[x][z] * m2[z][y];
+ }
+
+ result[x][y] = sum;
+ }
+ }
+ return result;
+ }
+
+ function copyState(o1, o2) {
+ o2.fillStyle = o1.fillStyle;
+ o2.lineCap = o1.lineCap;
+ o2.lineJoin = o1.lineJoin;
+ o2.lineWidth = o1.lineWidth;
+ o2.miterLimit = o1.miterLimit;
+ o2.shadowBlur = o1.shadowBlur;
+ o2.shadowColor = o1.shadowColor;
+ o2.shadowOffsetX = o1.shadowOffsetX;
+ o2.shadowOffsetY = o1.shadowOffsetY;
+ o2.strokeStyle = o1.strokeStyle;
+ o2.globalAlpha = o1.globalAlpha;
+ o2.font = o1.font;
+ o2.textAlign = o1.textAlign;
+ o2.textBaseline = o1.textBaseline;
+ o2.arcScaleX_ = o1.arcScaleX_;
+ o2.arcScaleY_ = o1.arcScaleY_;
+ o2.lineScale_ = o1.lineScale_;
+ }
+
+ var colorData = {
+ aliceblue: '#F0F8FF',
+ antiquewhite: '#FAEBD7',
+ aquamarine: '#7FFFD4',
+ azure: '#F0FFFF',
+ beige: '#F5F5DC',
+ bisque: '#FFE4C4',
+ black: '#000000',
+ blanchedalmond: '#FFEBCD',
+ blueviolet: '#8A2BE2',
+ brown: '#A52A2A',
+ burlywood: '#DEB887',
+ cadetblue: '#5F9EA0',
+ chartreuse: '#7FFF00',
+ chocolate: '#D2691E',
+ coral: '#FF7F50',
+ cornflowerblue: '#6495ED',
+ cornsilk: '#FFF8DC',
+ crimson: '#DC143C',
+ cyan: '#00FFFF',
+ darkblue: '#00008B',
+ darkcyan: '#008B8B',
+ darkgoldenrod: '#B8860B',
+ darkgray: '#A9A9A9',
+ darkgreen: '#006400',
+ darkgrey: '#A9A9A9',
+ darkkhaki: '#BDB76B',
+ darkmagenta: '#8B008B',
+ darkolivegreen: '#556B2F',
+ darkorange: '#FF8C00',
+ darkorchid: '#9932CC',
+ darkred: '#8B0000',
+ darksalmon: '#E9967A',
+ darkseagreen: '#8FBC8F',
+ darkslateblue: '#483D8B',
+ darkslategray: '#2F4F4F',
+ darkslategrey: '#2F4F4F',
+ darkturquoise: '#00CED1',
+ darkviolet: '#9400D3',
+ deeppink: '#FF1493',
+ deepskyblue: '#00BFFF',
+ dimgray: '#696969',
+ dimgrey: '#696969',
+ dodgerblue: '#1E90FF',
+ firebrick: '#B22222',
+ floralwhite: '#FFFAF0',
+ forestgreen: '#228B22',
+ gainsboro: '#DCDCDC',
+ ghostwhite: '#F8F8FF',
+ gold: '#FFD700',
+ goldenrod: '#DAA520',
+ grey: '#808080',
+ greenyellow: '#ADFF2F',
+ honeydew: '#F0FFF0',
+ hotpink: '#FF69B4',
+ indianred: '#CD5C5C',
+ indigo: '#4B0082',
+ ivory: '#FFFFF0',
+ khaki: '#F0E68C',
+ lavender: '#E6E6FA',
+ lavenderblush: '#FFF0F5',
+ lawngreen: '#7CFC00',
+ lemonchiffon: '#FFFACD',
+ lightblue: '#ADD8E6',
+ lightcoral: '#F08080',
+ lightcyan: '#E0FFFF',
+ lightgoldenrodyellow: '#FAFAD2',
+ lightgreen: '#90EE90',
+ lightgrey: '#D3D3D3',
+ lightpink: '#FFB6C1',
+ lightsalmon: '#FFA07A',
+ lightseagreen: '#20B2AA',
+ lightskyblue: '#87CEFA',
+ lightslategray: '#778899',
+ lightslategrey: '#778899',
+ lightsteelblue: '#B0C4DE',
+ lightyellow: '#FFFFE0',
+ limegreen: '#32CD32',
+ linen: '#FAF0E6',
+ magenta: '#FF00FF',
+ mediumaquamarine: '#66CDAA',
+ mediumblue: '#0000CD',
+ mediumorchid: '#BA55D3',
+ mediumpurple: '#9370DB',
+ mediumseagreen: '#3CB371',
+ mediumslateblue: '#7B68EE',
+ mediumspringgreen: '#00FA9A',
+ mediumturquoise: '#48D1CC',
+ mediumvioletred: '#C71585',
+ midnightblue: '#191970',
+ mintcream: '#F5FFFA',
+ mistyrose: '#FFE4E1',
+ moccasin: '#FFE4B5',
+ navajowhite: '#FFDEAD',
+ oldlace: '#FDF5E6',
+ olivedrab: '#6B8E23',
+ orange: '#FFA500',
+ orangered: '#FF4500',
+ orchid: '#DA70D6',
+ palegoldenrod: '#EEE8AA',
+ palegreen: '#98FB98',
+ paleturquoise: '#AFEEEE',
+ palevioletred: '#DB7093',
+ papayawhip: '#FFEFD5',
+ peachpuff: '#FFDAB9',
+ peru: '#CD853F',
+ pink: '#FFC0CB',
+ plum: '#DDA0DD',
+ powderblue: '#B0E0E6',
+ rosybrown: '#BC8F8F',
+ royalblue: '#4169E1',
+ saddlebrown: '#8B4513',
+ salmon: '#FA8072',
+ sandybrown: '#F4A460',
+ seagreen: '#2E8B57',
+ seashell: '#FFF5EE',
+ sienna: '#A0522D',
+ skyblue: '#87CEEB',
+ slateblue: '#6A5ACD',
+ slategray: '#708090',
+ slategrey: '#708090',
+ snow: '#FFFAFA',
+ springgreen: '#00FF7F',
+ steelblue: '#4682B4',
+ tan: '#D2B48C',
+ thistle: '#D8BFD8',
+ tomato: '#FF6347',
+ turquoise: '#40E0D0',
+ violet: '#EE82EE',
+ wheat: '#F5DEB3',
+ whitesmoke: '#F5F5F5',
+ yellowgreen: '#9ACD32'
+ };
+
+
+ function getRgbHslContent(styleString) {
+ var start = styleString.indexOf('(', 3);
+ var end = styleString.indexOf(')', start + 1);
+ var parts = styleString.substring(start + 1, end).split(',');
+ // add alpha if needed
+ if (parts.length != 4 || styleString.charAt(3) != 'a') {
+ parts[3] = 1;
+ }
+ return parts;
+ }
+
+ function percent(s) {
+ return parseFloat(s) / 100;
+ }
+
+ function clamp(v, min, max) {
+ return Math.min(max, Math.max(min, v));
+ }
+
+ function hslToRgb(parts){
+ var r, g, b, h, s, l;
+ h = parseFloat(parts[0]) / 360 % 360;
+ if (h < 0)
+ h++;
+ s = clamp(percent(parts[1]), 0, 1);
+ l = clamp(percent(parts[2]), 0, 1);
+ if (s == 0) {
+ r = g = b = l; // achromatic
+ } else {
+ var q = l < 0.5 ? l * (1 + s) : l + s - l * s;
+ var p = 2 * l - q;
+ r = hueToRgb(p, q, h + 1 / 3);
+ g = hueToRgb(p, q, h);
+ b = hueToRgb(p, q, h - 1 / 3);
+ }
+
+ return '#' + decToHex[Math.floor(r * 255)] +
+ decToHex[Math.floor(g * 255)] +
+ decToHex[Math.floor(b * 255)];
+ }
+
+ function hueToRgb(m1, m2, h) {
+ if (h < 0)
+ h++;
+ if (h > 1)
+ h--;
+
+ if (6 * h < 1)
+ return m1 + (m2 - m1) * 6 * h;
+ else if (2 * h < 1)
+ return m2;
+ else if (3 * h < 2)
+ return m1 + (m2 - m1) * (2 / 3 - h) * 6;
+ else
+ return m1;
+ }
+
+ var processStyleCache = {};
+
+ function processStyle(styleString) {
+ if (styleString in processStyleCache) {
+ return processStyleCache[styleString];
+ }
+
+ var str, alpha = 1;
+
+ styleString = String(styleString);
+ if (styleString.charAt(0) == '#') {
+ str = styleString;
+ } else if (/^rgb/.test(styleString)) {
+ var parts = getRgbHslContent(styleString);
+ var str = '#', n;
+ for (var i = 0; i < 3; i++) {
+ if (parts[i].indexOf('%') != -1) {
+ n = Math.floor(percent(parts[i]) * 255);
+ } else {
+ n = +parts[i];
+ }
+ str += decToHex[clamp(n, 0, 255)];
+ }
+ alpha = +parts[3];
+ } else if (/^hsl/.test(styleString)) {
+ var parts = getRgbHslContent(styleString);
+ str = hslToRgb(parts);
+ alpha = parts[3];
+ } else {
+ str = colorData[styleString] || styleString;
+ }
+ return processStyleCache[styleString] = {color: str, alpha: alpha};
+ }
+
+ var DEFAULT_STYLE = {
+ style: 'normal',
+ variant: 'normal',
+ weight: 'normal',
+ size: 10,
+ family: 'sans-serif'
+ };
+
+ // Internal text style cache
+ var fontStyleCache = {};
+
+ function processFontStyle(styleString) {
+ if (fontStyleCache[styleString]) {
+ return fontStyleCache[styleString];
+ }
+
+ var el = document.createElement('div');
+ var style = el.style;
+ try {
+ style.font = styleString;
+ } catch (ex) {
+ // Ignore failures to set to invalid font.
+ }
+
+ return fontStyleCache[styleString] = {
+ style: style.fontStyle || DEFAULT_STYLE.style,
+ variant: style.fontVariant || DEFAULT_STYLE.variant,
+ weight: style.fontWeight || DEFAULT_STYLE.weight,
+ size: style.fontSize || DEFAULT_STYLE.size,
+ family: style.fontFamily || DEFAULT_STYLE.family
+ };
+ }
+
+ function getComputedStyle(style, element) {
+ var computedStyle = {};
+
+ for (var p in style) {
+ computedStyle[p] = style[p];
+ }
+
+ // Compute the size
+ var canvasFontSize = parseFloat(element.currentStyle.fontSize),
+ fontSize = parseFloat(style.size);
+
+ if (typeof style.size == 'number') {
+ computedStyle.size = style.size;
+ } else if (style.size.indexOf('px') != -1) {
+ computedStyle.size = fontSize;
+ } else if (style.size.indexOf('em') != -1) {
+ computedStyle.size = canvasFontSize * fontSize;
+ } else if(style.size.indexOf('%') != -1) {
+ computedStyle.size = (canvasFontSize / 100) * fontSize;
+ } else if (style.size.indexOf('pt') != -1) {
+ computedStyle.size = fontSize / .75;
+ } else {
+ computedStyle.size = canvasFontSize;
+ }
+
+ // Different scaling between normal text and VML text. This was found using
+ // trial and error to get the same size as non VML text.
+ computedStyle.size *= 0.981;
+
+ return computedStyle;
+ }
+
+ function buildStyle(style) {
+ return style.style + ' ' + style.variant + ' ' + style.weight + ' ' +
+ style.size + 'px ' + style.family;
+ }
+
+ var lineCapMap = {
+ 'butt': 'flat',
+ 'round': 'round'
+ };
+
+ function processLineCap(lineCap) {
+ return lineCapMap[lineCap] || 'square';
+ }
+
+ /**
+ * This class implements CanvasRenderingContext2D interface as described by
+ * the WHATWG.
+ * @param {HTMLElement} canvasElement The element that the 2D context should
+ * be associated with
+ */
+ function CanvasRenderingContext2D_(canvasElement) {
+ this.m_ = createMatrixIdentity();
+
+ this.mStack_ = [];
+ this.aStack_ = [];
+ this.currentPath_ = [];
+
+ // Canvas context properties
+ this.strokeStyle = '#000';
+ this.fillStyle = '#000';
+
+ this.lineWidth = 1;
+ this.lineJoin = 'miter';
+ this.lineCap = 'butt';
+ this.miterLimit = Z * 1;
+ this.globalAlpha = 1;
+ this.font = '10px sans-serif';
+ this.textAlign = 'left';
+ this.textBaseline = 'alphabetic';
+ this.canvas = canvasElement;
+
+ var cssText = 'width:' + canvasElement.clientWidth + 'px;height:' +
+ canvasElement.clientHeight + 'px;overflow:hidden;position:absolute';
+ var el = canvasElement.ownerDocument.createElement('div');
+ el.style.cssText = cssText;
+ canvasElement.appendChild(el);
+
+ var overlayEl = el.cloneNode(false);
+ // Use a non transparent background.
+ overlayEl.style.backgroundColor = 'red';
+ overlayEl.style.filter = 'alpha(opacity=0)';
+ canvasElement.appendChild(overlayEl);
+
+ this.element_ = el;
+ this.arcScaleX_ = 1;
+ this.arcScaleY_ = 1;
+ this.lineScale_ = 1;
+ }
+
+ var contextPrototype = CanvasRenderingContext2D_.prototype;
+ contextPrototype.clearRect = function() {
+ if (this.textMeasureEl_) {
+ this.textMeasureEl_.removeNode(true);
+ this.textMeasureEl_ = null;
+ }
+ this.element_.innerHTML = '';
+ };
+
+ contextPrototype.beginPath = function() {
+ // TODO: Branch current matrix so that save/restore has no effect
+ // as per safari docs.
+ this.currentPath_ = [];
+ };
+
+ contextPrototype.moveTo = function(aX, aY) {
+ var p = getCoords(this, aX, aY);
+ this.currentPath_.push({type: 'moveTo', x: p.x, y: p.y});
+ this.currentX_ = p.x;
+ this.currentY_ = p.y;
+ };
+
+ contextPrototype.lineTo = function(aX, aY) {
+ var p = getCoords(this, aX, aY);
+ this.currentPath_.push({type: 'lineTo', x: p.x, y: p.y});
+
+ this.currentX_ = p.x;
+ this.currentY_ = p.y;
+ };
+
+ contextPrototype.bezierCurveTo = function(aCP1x, aCP1y,
+ aCP2x, aCP2y,
+ aX, aY) {
+ var p = getCoords(this, aX, aY);
+ var cp1 = getCoords(this, aCP1x, aCP1y);
+ var cp2 = getCoords(this, aCP2x, aCP2y);
+ bezierCurveTo(this, cp1, cp2, p);
+ };
+
+ // Helper function that takes the already fixed cordinates.
+ function bezierCurveTo(self, cp1, cp2, p) {
+ self.currentPath_.push({
+ type: 'bezierCurveTo',
+ cp1x: cp1.x,
+ cp1y: cp1.y,
+ cp2x: cp2.x,
+ cp2y: cp2.y,
+ x: p.x,
+ y: p.y
+ });
+ self.currentX_ = p.x;
+ self.currentY_ = p.y;
+ }
+
+ contextPrototype.quadraticCurveTo = function(aCPx, aCPy, aX, aY) {
+ // the following is lifted almost directly from
+ // https://developer.mozilla.org/en/docs/Canvas_tutorial:Drawing_shapes
+
+ var cp = getCoords(this, aCPx, aCPy);
+ var p = getCoords(this, aX, aY);
+
+ var cp1 = {
+ x: this.currentX_ + 2.0 / 3.0 * (cp.x - this.currentX_),
+ y: this.currentY_ + 2.0 / 3.0 * (cp.y - this.currentY_)
+ };
+ var cp2 = {
+ x: cp1.x + (p.x - this.currentX_) / 3.0,
+ y: cp1.y + (p.y - this.currentY_) / 3.0
+ };
+
+ bezierCurveTo(this, cp1, cp2, p);
+ };
+
+ contextPrototype.arc = function(aX, aY, aRadius,
+ aStartAngle, aEndAngle, aClockwise) {
+ aRadius *= Z;
+ var arcType = aClockwise ? 'at' : 'wa';
+
+ var xStart = aX + mc(aStartAngle) * aRadius - Z2;
+ var yStart = aY + ms(aStartAngle) * aRadius - Z2;
+
+ var xEnd = aX + mc(aEndAngle) * aRadius - Z2;
+ var yEnd = aY + ms(aEndAngle) * aRadius - Z2;
+
+ // IE won't render arches drawn counter clockwise if xStart == xEnd.
+ if (xStart == xEnd && !aClockwise) {
+ xStart += 0.125; // Offset xStart by 1/80 of a pixel. Use something
+ // that can be represented in binary
+ }
+
+ var p = getCoords(this, aX, aY);
+ var pStart = getCoords(this, xStart, yStart);
+ var pEnd = getCoords(this, xEnd, yEnd);
+
+ this.currentPath_.push({type: arcType,
+ x: p.x,
+ y: p.y,
+ radius: aRadius,
+ xStart: pStart.x,
+ yStart: pStart.y,
+ xEnd: pEnd.x,
+ yEnd: pEnd.y});
+
+ };
+
+ contextPrototype.rect = function(aX, aY, aWidth, aHeight) {
+ this.moveTo(aX, aY);
+ this.lineTo(aX + aWidth, aY);
+ this.lineTo(aX + aWidth, aY + aHeight);
+ this.lineTo(aX, aY + aHeight);
+ this.closePath();
+ };
+
+ contextPrototype.strokeRect = function(aX, aY, aWidth, aHeight) {
+ var oldPath = this.currentPath_;
+ this.beginPath();
+
+ this.moveTo(aX, aY);
+ this.lineTo(aX + aWidth, aY);
+ this.lineTo(aX + aWidth, aY + aHeight);
+ this.lineTo(aX, aY + aHeight);
+ this.closePath();
+ this.stroke();
+
+ this.currentPath_ = oldPath;
+ };
+
+ contextPrototype.fillRect = function(aX, aY, aWidth, aHeight) {
+ var oldPath = this.currentPath_;
+ this.beginPath();
+
+ this.moveTo(aX, aY);
+ this.lineTo(aX + aWidth, aY);
+ this.lineTo(aX + aWidth, aY + aHeight);
+ this.lineTo(aX, aY + aHeight);
+ this.closePath();
+ this.fill();
+
+ this.currentPath_ = oldPath;
+ };
+
+ contextPrototype.createLinearGradient = function(aX0, aY0, aX1, aY1) {
+ var gradient = new CanvasGradient_('gradient');
+ gradient.x0_ = aX0;
+ gradient.y0_ = aY0;
+ gradient.x1_ = aX1;
+ gradient.y1_ = aY1;
+ return gradient;
+ };
+
+ contextPrototype.createRadialGradient = function(aX0, aY0, aR0,
+ aX1, aY1, aR1) {
+ var gradient = new CanvasGradient_('gradientradial');
+ gradient.x0_ = aX0;
+ gradient.y0_ = aY0;
+ gradient.r0_ = aR0;
+ gradient.x1_ = aX1;
+ gradient.y1_ = aY1;
+ gradient.r1_ = aR1;
+ return gradient;
+ };
+
+ contextPrototype.drawImage = function(image, var_args) {
+ var dx, dy, dw, dh, sx, sy, sw, sh;
+
+ // to find the original width we overide the width and height
+ var oldRuntimeWidth = image.runtimeStyle.width;
+ var oldRuntimeHeight = image.runtimeStyle.height;
+ image.runtimeStyle.width = 'auto';
+ image.runtimeStyle.height = 'auto';
+
+ // get the original size
+ var w = image.width;
+ var h = image.height;
+
+ // and remove overides
+ image.runtimeStyle.width = oldRuntimeWidth;
+ image.runtimeStyle.height = oldRuntimeHeight;
+
+ if (arguments.length == 3) {
+ dx = arguments[1];
+ dy = arguments[2];
+ sx = sy = 0;
+ sw = dw = w;
+ sh = dh = h;
+ } else if (arguments.length == 5) {
+ dx = arguments[1];
+ dy = arguments[2];
+ dw = arguments[3];
+ dh = arguments[4];
+ sx = sy = 0;
+ sw = w;
+ sh = h;
+ } else if (arguments.length == 9) {
+ sx = arguments[1];
+ sy = arguments[2];
+ sw = arguments[3];
+ sh = arguments[4];
+ dx = arguments[5];
+ dy = arguments[6];
+ dw = arguments[7];
+ dh = arguments[8];
+ } else {
+ throw Error('Invalid number of arguments');
+ }
+
+ var d = getCoords(this, dx, dy);
+
+ var w2 = sw / 2;
+ var h2 = sh / 2;
+
+ var vmlStr = [];
+
+ var W = 10;
+ var H = 10;
+
+ // For some reason that I've now forgotten, using divs didn't work
+ vmlStr.push(' <g_vml_:group',
+ ' coordsize="', Z * W, ',', Z * H, '"',
+ ' coordorigin="0,0"' ,
+ ' style="width:', W, 'px;height:', H, 'px;position:absolute;');
+
+ // If filters are necessary (rotation exists), create them
+ // filters are bog-slow, so only create them if abbsolutely necessary
+ // The following check doesn't account for skews (which don't exist
+ // in the canvas spec (yet) anyway.
+
+ if (this.m_[0][0] != 1 || this.m_[0][1] ||
+ this.m_[1][1] != 1 || this.m_[1][0]) {
+ var filter = [];
+
+ // Note the 12/21 reversal
+ filter.push('M11=', this.m_[0][0], ',',
+ 'M12=', this.m_[1][0], ',',
+ 'M21=', this.m_[0][1], ',',
+ 'M22=', this.m_[1][1], ',',
+ 'Dx=', mr(d.x / Z), ',',
+ 'Dy=', mr(d.y / Z), '');
+
+ // Bounding box calculation (need to minimize displayed area so that
+ // filters don't waste time on unused pixels.
+ var max = d;
+ var c2 = getCoords(this, dx + dw, dy);
+ var c3 = getCoords(this, dx, dy + dh);
+ var c4 = getCoords(this, dx + dw, dy + dh);
+
+ max.x = m.max(max.x, c2.x, c3.x, c4.x);
+ max.y = m.max(max.y, c2.y, c3.y, c4.y);
+
+ vmlStr.push('padding:0 ', mr(max.x / Z), 'px ', mr(max.y / Z),
+ 'px 0;filter:progid:DXImageTransform.Microsoft.Matrix(',
+ filter.join(''), ", sizingmethod='clip');");
+
+ } else {
+ vmlStr.push('top:', mr(d.y / Z), 'px;left:', mr(d.x / Z), 'px;');
+ }
+
+ vmlStr.push(' ">' ,
+ '<g_vml_:image src="', image.src, '"',
+ ' style="width:', Z * dw, 'px;',
+ ' height:', Z * dh, 'px"',
+ ' cropleft="', sx / w, '"',
+ ' croptop="', sy / h, '"',
+ ' cropright="', (w - sx - sw) / w, '"',
+ ' cropbottom="', (h - sy - sh) / h, '"',
+ ' />',
+ '</g_vml_:group>');
+
+ this.element_.insertAdjacentHTML('BeforeEnd', vmlStr.join(''));
+ };
+
+ contextPrototype.stroke = function(aFill) {
+ var W = 10;
+ var H = 10;
+ // Divide the shape into chunks if it's too long because IE has a limit
+ // somewhere for how long a VML shape can be. This simple division does
+ // not work with fills, only strokes, unfortunately.
+ var chunkSize = 5000;
+
+ var min = {x: null, y: null};
+ var max = {x: null, y: null};
+
+ for (var j = 0; j < this.currentPath_.length; j += chunkSize) {
+ var lineStr = [];
+ var lineOpen = false;
+
+ lineStr.push('<g_vml_:shape',
+ ' filled="', !!aFill, '"',
+ ' style="position:absolute;width:', W, 'px;height:', H, 'px;"',
+ ' coordorigin="0,0"',
+ ' coordsize="', Z * W, ',', Z * H, '"',
+ ' stroked="', !aFill, '"',
+ ' path="');
+
+ var newSeq = false;
+
+ for (var i = j; i < Math.min(j + chunkSize, this.currentPath_.length); i++) {
+ if (i % chunkSize == 0 && i > 0) { // move into position for next chunk
+ lineStr.push(' m ', mr(this.currentPath_[i-1].x), ',', mr(this.currentPath_[i-1].y));
+ }
+
+ var p = this.currentPath_[i];
+ var c;
+
+ switch (p.type) {
+ case 'moveTo':
+ c = p;
+ lineStr.push(' m ', mr(p.x), ',', mr(p.y));
+ break;
+ case 'lineTo':
+ lineStr.push(' l ', mr(p.x), ',', mr(p.y));
+ break;
+ case 'close':
+ lineStr.push(' x ');
+ p = null;
+ break;
+ case 'bezierCurveTo':
+ lineStr.push(' c ',
+ mr(p.cp1x), ',', mr(p.cp1y), ',',
+ mr(p.cp2x), ',', mr(p.cp2y), ',',
+ mr(p.x), ',', mr(p.y));
+ break;
+ case 'at':
+ case 'wa':
+ lineStr.push(' ', p.type, ' ',
+ mr(p.x - this.arcScaleX_ * p.radius), ',',
+ mr(p.y - this.arcScaleY_ * p.radius), ' ',
+ mr(p.x + this.arcScaleX_ * p.radius), ',',
+ mr(p.y + this.arcScaleY_ * p.radius), ' ',
+ mr(p.xStart), ',', mr(p.yStart), ' ',
+ mr(p.xEnd), ',', mr(p.yEnd));
+ break;
+ }
+
+
+ // TODO: Following is broken for curves due to
+ // move to proper paths.
+
+ // Figure out dimensions so we can do gradient fills
+ // properly
+ if (p) {
+ if (min.x == null || p.x < min.x) {
+ min.x = p.x;
+ }
+ if (max.x == null || p.x > max.x) {
+ max.x = p.x;
+ }
+ if (min.y == null || p.y < min.y) {
+ min.y = p.y;
+ }
+ if (max.y == null || p.y > max.y) {
+ max.y = p.y;
+ }
+ }
+ }
+ lineStr.push(' ">');
+
+ if (!aFill) {
+ appendStroke(this, lineStr);
+ } else {
+ appendFill(this, lineStr, min, max);
+ }
+
+ lineStr.push('</g_vml_:shape>');
+
+ this.element_.insertAdjacentHTML('beforeEnd', lineStr.join(''));
+ }
+ };
+
+ function appendStroke(ctx, lineStr) {
+ var a = processStyle(ctx.strokeStyle);
+ var color = a.color;
+ var opacity = a.alpha * ctx.globalAlpha;
+ var lineWidth = ctx.lineScale_ * ctx.lineWidth;
+
+ // VML cannot correctly render a line if the width is less than 1px.
+ // In that case, we dilute the color to make the line look thinner.
+ if (lineWidth < 1) {
+ opacity *= lineWidth;
+ }
+
+ lineStr.push(
+ '<g_vml_:stroke',
+ ' opacity="', opacity, '"',
+ ' joinstyle="', ctx.lineJoin, '"',
+ ' miterlimit="', ctx.miterLimit, '"',
+ ' endcap="', processLineCap(ctx.lineCap), '"',
+ ' weight="', lineWidth, 'px"',
+ ' color="', color, '" />'
+ );
+ }
+
+ function appendFill(ctx, lineStr, min, max) {
+ var fillStyle = ctx.fillStyle;
+ var arcScaleX = ctx.arcScaleX_;
+ var arcScaleY = ctx.arcScaleY_;
+ var width = max.x - min.x;
+ var height = max.y - min.y;
+ if (fillStyle instanceof CanvasGradient_) {
+ // TODO: Gradients transformed with the transformation matrix.
+ var angle = 0;
+ var focus = {x: 0, y: 0};
+
+ // additional offset
+ var shift = 0;
+ // scale factor for offset
+ var expansion = 1;
+
+ if (fillStyle.type_ == 'gradient') {
+ var x0 = fillStyle.x0_ / arcScaleX;
+ var y0 = fillStyle.y0_ / arcScaleY;
+ var x1 = fillStyle.x1_ / arcScaleX;
+ var y1 = fillStyle.y1_ / arcScaleY;
+ var p0 = getCoords(ctx, x0, y0);
+ var p1 = getCoords(ctx, x1, y1);
+ var dx = p1.x - p0.x;
+ var dy = p1.y - p0.y;
+ angle = Math.atan2(dx, dy) * 180 / Math.PI;
+
+ // The angle should be a non-negative number.
+ if (angle < 0) {
+ angle += 360;
+ }
+
+ // Very small angles produce an unexpected result because they are
+ // converted to a scientific notation string.
+ if (angle < 1e-6) {
+ angle = 0;
+ }
+ } else {
+ var p0 = getCoords(ctx, fillStyle.x0_, fillStyle.y0_);
+ focus = {
+ x: (p0.x - min.x) / width,
+ y: (p0.y - min.y) / height
+ };
+
+ width /= arcScaleX * Z;
+ height /= arcScaleY * Z;
+ var dimension = m.max(width, height);
+ shift = 2 * fillStyle.r0_ / dimension;
+ expansion = 2 * fillStyle.r1_ / dimension - shift;
+ }
+
+ // We need to sort the color stops in ascending order by offset,
+ // otherwise IE won't interpret it correctly.
+ var stops = fillStyle.colors_;
+ stops.sort(function(cs1, cs2) {
+ return cs1.offset - cs2.offset;
+ });
+
+ var length = stops.length;
+ var color1 = stops[0].color;
+ var color2 = stops[length - 1].color;
+ var opacity1 = stops[0].alpha * ctx.globalAlpha;
+ var opacity2 = stops[length - 1].alpha * ctx.globalAlpha;
+
+ var colors = [];
+ for (var i = 0; i < length; i++) {
+ var stop = stops[i];
+ colors.push(stop.offset * expansion + shift + ' ' + stop.color);
+ }
+
+ // When colors attribute is used, the meanings of opacity and o:opacity2
+ // are reversed.
+ lineStr.push('<g_vml_:fill type="', fillStyle.type_, '"',
+ ' method="none" focus="100%"',
+ ' color="', color1, '"',
+ ' color2="', color2, '"',
+ ' colors="', colors.join(','), '"',
+ ' opacity="', opacity2, '"',
+ ' g_o_:opacity2="', opacity1, '"',
+ ' angle="', angle, '"',
+ ' focusposition="', focus.x, ',', focus.y, '" />');
+ } else if (fillStyle instanceof CanvasPattern_) {
+ if (width && height) {
+ var deltaLeft = -min.x;
+ var deltaTop = -min.y;
+ lineStr.push('<g_vml_:fill',
+ ' position="',
+ deltaLeft / width * arcScaleX * arcScaleX, ',',
+ deltaTop / height * arcScaleY * arcScaleY, '"',
+ ' type="tile"',
+ // TODO: Figure out the correct size to fit the scale.
+ //' size="', w, 'px ', h, 'px"',
+ ' src="', fillStyle.src_, '" />');
+ }
+ } else {
+ var a = processStyle(ctx.fillStyle);
+ var color = a.color;
+ var opacity = a.alpha * ctx.globalAlpha;
+ lineStr.push('<g_vml_:fill color="', color, '" opacity="', opacity,
+ '" />');
+ }
+ }
+
+ contextPrototype.fill = function() {
+ this.stroke(true);
+ };
+
+ contextPrototype.closePath = function() {
+ this.currentPath_.push({type: 'close'});
+ };
+
+ function getCoords(ctx, aX, aY) {
+ var m = ctx.m_;
+ return {
+ x: Z * (aX * m[0][0] + aY * m[1][0] + m[2][0]) - Z2,
+ y: Z * (aX * m[0][1] + aY * m[1][1] + m[2][1]) - Z2
+ };
+ };
+
+ contextPrototype.save = function() {
+ var o = {};
+ copyState(this, o);
+ this.aStack_.push(o);
+ this.mStack_.push(this.m_);
+ this.m_ = matrixMultiply(createMatrixIdentity(), this.m_);
+ };
+
+ contextPrototype.restore = function() {
+ if (this.aStack_.length) {
+ copyState(this.aStack_.pop(), this);
+ this.m_ = this.mStack_.pop();
+ }
+ };
+
+ function matrixIsFinite(m) {
+ return isFinite(m[0][0]) && isFinite(m[0][1]) &&
+ isFinite(m[1][0]) && isFinite(m[1][1]) &&
+ isFinite(m[2][0]) && isFinite(m[2][1]);
+ }
+
+ function setM(ctx, m, updateLineScale) {
+ if (!matrixIsFinite(m)) {
+ return;
+ }
+ ctx.m_ = m;
+
+ if (updateLineScale) {
+ // Get the line scale.
+ // Determinant of this.m_ means how much the area is enlarged by the
+ // transformation. So its square root can be used as a scale factor
+ // for width.
+ var det = m[0][0] * m[1][1] - m[0][1] * m[1][0];
+ ctx.lineScale_ = sqrt(abs(det));
+ }
+ }
+
+ contextPrototype.translate = function(aX, aY) {
+ var m1 = [
+ [1, 0, 0],
+ [0, 1, 0],
+ [aX, aY, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), false);
+ };
+
+ contextPrototype.rotate = function(aRot) {
+ var c = mc(aRot);
+ var s = ms(aRot);
+
+ var m1 = [
+ [c, s, 0],
+ [-s, c, 0],
+ [0, 0, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), false);
+ };
+
+ contextPrototype.scale = function(aX, aY) {
+ this.arcScaleX_ *= aX;
+ this.arcScaleY_ *= aY;
+ var m1 = [
+ [aX, 0, 0],
+ [0, aY, 0],
+ [0, 0, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), true);
+ };
+
+ contextPrototype.transform = function(m11, m12, m21, m22, dx, dy) {
+ var m1 = [
+ [m11, m12, 0],
+ [m21, m22, 0],
+ [dx, dy, 1]
+ ];
+
+ setM(this, matrixMultiply(m1, this.m_), true);
+ };
+
+ contextPrototype.setTransform = function(m11, m12, m21, m22, dx, dy) {
+ var m = [
+ [m11, m12, 0],
+ [m21, m22, 0],
+ [dx, dy, 1]
+ ];
+
+ setM(this, m, true);
+ };
+
+ /**
+ * The text drawing function.
+ * The maxWidth argument isn't taken in account, since no browser supports
+ * it yet.
+ */
+ contextPrototype.drawText_ = function(text, x, y, maxWidth, stroke) {
+ var m = this.m_,
+ delta = 1000,
+ left = 0,
+ right = delta,
+ offset = {x: 0, y: 0},
+ lineStr = [];
+
+ var fontStyle = getComputedStyle(processFontStyle(this.font),
+ this.element_);
+
+ var fontStyleString = buildStyle(fontStyle);
+
+ var elementStyle = this.element_.currentStyle;
+ var textAlign = this.textAlign.toLowerCase();
+ switch (textAlign) {
+ case 'left':
+ case 'center':
+ case 'right':
+ break;
+ case 'end':
+ textAlign = elementStyle.direction == 'ltr' ? 'right' : 'left';
+ break;
+ case 'start':
+ textAlign = elementStyle.direction == 'rtl' ? 'right' : 'left';
+ break;
+ default:
+ textAlign = 'left';
+ }
+
+ // 1.75 is an arbitrary number, as there is no info about the text baseline
+ switch (this.textBaseline) {
+ case 'hanging':
+ case 'top':
+ offset.y = fontStyle.size / 1.75;
+ break;
+ case 'middle':
+ break;
+ default:
+ case null:
+ case 'alphabetic':
+ case 'ideographic':
+ case 'bottom':
+ offset.y = -fontStyle.size / 2.25;
+ break;
+ }
+
+ switch(textAlign) {
+ case 'right':
+ left = delta;
+ right = 0.05;
+ break;
+ case 'center':
+ left = right = delta / 2;
+ break;
+ }
+
+ var d = getCoords(this, x + offset.x, y + offset.y);
+
+ lineStr.push('<g_vml_:line from="', -left ,' 0" to="', right ,' 0.05" ',
+ ' coordsize="100 100" coordorigin="0 0"',
+ ' filled="', !stroke, '" stroked="', !!stroke,
+ '" style="position:absolute;width:1px;height:1px;">');
+
+ if (stroke) {
+ appendStroke(this, lineStr);
+ } else {
+ // TODO: Fix the min and max params.
+ appendFill(this, lineStr, {x: -left, y: 0},
+ {x: right, y: fontStyle.size});
+ }
+
+ var skewM = m[0][0].toFixed(3) + ',' + m[1][0].toFixed(3) + ',' +
+ m[0][1].toFixed(3) + ',' + m[1][1].toFixed(3) + ',0,0';
+
+ var skewOffset = mr(d.x / Z) + ',' + mr(d.y / Z);
+
+ lineStr.push('<g_vml_:skew on="t" matrix="', skewM ,'" ',
+ ' offset="', skewOffset, '" origin="', left ,' 0" />',
+ '<g_vml_:path textpathok="true" />',
+ '<g_vml_:textpath on="true" string="',
+ encodeHtmlAttribute(text),
+ '" style="v-text-align:', textAlign,
+ ';font:', encodeHtmlAttribute(fontStyleString),
+ '" /></g_vml_:line>');
+
+ this.element_.insertAdjacentHTML('beforeEnd', lineStr.join(''));
+ };
+
+ contextPrototype.fillText = function(text, x, y, maxWidth) {
+ this.drawText_(text, x, y, maxWidth, false);
+ };
+
+ contextPrototype.strokeText = function(text, x, y, maxWidth) {
+ this.drawText_(text, x, y, maxWidth, true);
+ };
+
+ contextPrototype.measureText = function(text) {
+ if (!this.textMeasureEl_) {
+ var s = '<span style="position:absolute;' +
+ 'top:-20000px;left:0;padding:0;margin:0;border:none;' +
+ 'white-space:pre;"></span>';
+ this.element_.insertAdjacentHTML('beforeEnd', s);
+ this.textMeasureEl_ = this.element_.lastChild;
+ }
+ var doc = this.element_.ownerDocument;
+ this.textMeasureEl_.innerHTML = '';
+ this.textMeasureEl_.style.font = this.font;
+ // Don't use innerHTML or innerText because they allow markup/whitespace.
+ this.textMeasureEl_.appendChild(doc.createTextNode(text));
+ return {width: this.textMeasureEl_.offsetWidth};
+ };
+
+ /******** STUBS ********/
+ contextPrototype.clip = function() {
+ // TODO: Implement
+ };
+
+ contextPrototype.arcTo = function() {
+ // TODO: Implement
+ };
+
+ contextPrototype.createPattern = function(image, repetition) {
+ return new CanvasPattern_(image, repetition);
+ };
+
+ // Gradient / Pattern Stubs
+ function CanvasGradient_(aType) {
+ this.type_ = aType;
+ this.x0_ = 0;
+ this.y0_ = 0;
+ this.r0_ = 0;
+ this.x1_ = 0;
+ this.y1_ = 0;
+ this.r1_ = 0;
+ this.colors_ = [];
+ }
+
+ CanvasGradient_.prototype.addColorStop = function(aOffset, aColor) {
+ aColor = processStyle(aColor);
+ this.colors_.push({offset: aOffset,
+ color: aColor.color,
+ alpha: aColor.alpha});
+ };
+
+ function CanvasPattern_(image, repetition) {
+ assertImageIsValid(image);
+ switch (repetition) {
+ case 'repeat':
+ case null:
+ case '':
+ this.repetition_ = 'repeat';
+ break
+ case 'repeat-x':
+ case 'repeat-y':
+ case 'no-repeat':
+ this.repetition_ = repetition;
+ break;
+ default:
+ throwException('SYNTAX_ERR');
+ }
+
+ this.src_ = image.src;
+ this.width_ = image.width;
+ this.height_ = image.height;
+ }
+
+ function throwException(s) {
+ throw new DOMException_(s);
+ }
+
+ function assertImageIsValid(img) {
+ if (!img || img.nodeType != 1 || img.tagName != 'IMG') {
+ throwException('TYPE_MISMATCH_ERR');
+ }
+ if (img.readyState != 'complete') {
+ throwException('INVALID_STATE_ERR');
+ }
+ }
+
+ function DOMException_(s) {
+ this.code = this[s];
+ this.message = s +': DOM Exception ' + this.code;
+ }
+ var p = DOMException_.prototype = new Error;
+ p.INDEX_SIZE_ERR = 1;
+ p.DOMSTRING_SIZE_ERR = 2;
+ p.HIERARCHY_REQUEST_ERR = 3;
+ p.WRONG_DOCUMENT_ERR = 4;
+ p.INVALID_CHARACTER_ERR = 5;
+ p.NO_DATA_ALLOWED_ERR = 6;
+ p.NO_MODIFICATION_ALLOWED_ERR = 7;
+ p.NOT_FOUND_ERR = 8;
+ p.NOT_SUPPORTED_ERR = 9;
+ p.INUSE_ATTRIBUTE_ERR = 10;
+ p.INVALID_STATE_ERR = 11;
+ p.SYNTAX_ERR = 12;
+ p.INVALID_MODIFICATION_ERR = 13;
+ p.NAMESPACE_ERR = 14;
+ p.INVALID_ACCESS_ERR = 15;
+ p.VALIDATION_ERR = 16;
+ p.TYPE_MISMATCH_ERR = 17;
+
+ // set up externs
+ G_vmlCanvasManager = G_vmlCanvasManager_;
+ CanvasRenderingContext2D = CanvasRenderingContext2D_;
+ CanvasGradient = CanvasGradient_;
+ CanvasPattern = CanvasPattern_;
+ DOMException = DOMException_;
+})();
+
+} // if
diff --git a/deps/rabbitmq_management/priv/www/js/excanvas.min.js b/deps/rabbitmq_management/priv/www/js/excanvas.min.js
new file mode 100644
index 0000000000..988f934a18
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/excanvas.min.js
@@ -0,0 +1 @@
+if(!document.createElement("canvas").getContext){(function(){var z=Math;var K=z.round;var J=z.sin;var U=z.cos;var b=z.abs;var k=z.sqrt;var D=10;var F=D/2;function T(){return this.context_||(this.context_=new W(this))}var O=Array.prototype.slice;function G(i,j,m){var Z=O.call(arguments,2);return function(){return i.apply(j,Z.concat(O.call(arguments)))}}function AD(Z){return String(Z).replace(/&/g,"&amp;").replace(/"/g,"&quot;")}function r(i){if(!i.namespaces.g_vml_){i.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml","#default#VML")}if(!i.namespaces.g_o_){i.namespaces.add("g_o_","urn:schemas-microsoft-com:office:office","#default#VML")}if(!i.styleSheets.ex_canvas_){var Z=i.createStyleSheet();Z.owningElement.id="ex_canvas_";Z.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}"}}r(document);var E={init:function(Z){if(/MSIE/.test(navigator.userAgent)&&!window.opera){var i=Z||document;i.createElement("canvas");i.attachEvent("onreadystatechange",G(this.init_,this,i))}},init_:function(m){var j=m.getElementsByTagName("canvas");for(var Z=0;Z<j.length;Z++){this.initElement(j[Z])}},initElement:function(i){if(!i.getContext){i.getContext=T;r(i.ownerDocument);i.innerHTML="";i.attachEvent("onpropertychange",S);i.attachEvent("onresize",w);var Z=i.attributes;if(Z.width&&Z.width.specified){i.style.width=Z.width.nodeValue+"px"}else{i.width=i.clientWidth}if(Z.height&&Z.height.specified){i.style.height=Z.height.nodeValue+"px"}else{i.height=i.clientHeight}}return i}};function S(i){var Z=i.srcElement;switch(i.propertyName){case"width":Z.getContext().clearRect();Z.style.width=Z.attributes.width.nodeValue+"px";Z.firstChild.style.width=Z.clientWidth+"px";break;case"height":Z.getContext().clearRect();Z.style.height=Z.attributes.height.nodeValue+"px";Z.firstChild.style.height=Z.clientHeight+"px";break}}function w(i){var Z=i.srcElement;if(Z.firstChild){Z.firstChild.style.width=Z.clientWidth+"px";Z.firstChild.style.height=Z.clientHeight+"px"}}E.init();var I=[];for(var AC=0;AC<16;AC++){for(var AB=0;AB<16;AB++){I[AC*16+AB]=AC.toString(16)+AB.toString(16)}}function V(){return[[1,0,0],[0,1,0],[0,0,1]]}function d(m,j){var i=V();for(var Z=0;Z<3;Z++){for(var AF=0;AF<3;AF++){var p=0;for(var AE=0;AE<3;AE++){p+=m[Z][AE]*j[AE][AF]}i[Z][AF]=p}}return i}function Q(i,Z){Z.fillStyle=i.fillStyle;Z.lineCap=i.lineCap;Z.lineJoin=i.lineJoin;Z.lineWidth=i.lineWidth;Z.miterLimit=i.miterLimit;Z.shadowBlur=i.shadowBlur;Z.shadowColor=i.shadowColor;Z.shadowOffsetX=i.shadowOffsetX;Z.shadowOffsetY=i.shadowOffsetY;Z.strokeStyle=i.strokeStyle;Z.globalAlpha=i.globalAlpha;Z.font=i.font;Z.textAlign=i.textAlign;Z.textBaseline=i.textBaseline;Z.arcScaleX_=i.arcScaleX_;Z.arcScaleY_=i.arcScaleY_;Z.lineScale_=i.lineScale_}var B={aliceblue:"#F0F8FF",antiquewhite:"#FAEBD7",aquamarine:"#7FFFD4",azure:"#F0FFFF",beige:"#F5F5DC",bisque:"#FFE4C4",black:"#000000",blanchedalmond:"#FFEBCD",blueviolet:"#8A2BE2",brown:"#A52A2A",burlywood:"#DEB887",cadetblue:"#5F9EA0",chartreuse:"#7FFF00",chocolate:"#D2691E",coral:"#FF7F50",cornflowerblue:"#6495ED",cornsilk:"#FFF8DC",crimson:"#DC143C",cyan:"#00FFFF",darkblue:"#00008B",darkcyan:"#008B8B",darkgoldenrod:"#B8860B",darkgray:"#A9A9A9",darkgreen:"#006400",darkgrey:"#A9A9A9",darkkhaki:"#BDB76B",darkmagenta:"#8B008B",darkolivegreen:"#556B2F",darkorange:"#FF8C00",darkorchid:"#9932CC",darkred:"#8B0000",darksalmon:"#E9967A",darkseagreen:"#8FBC8F",darkslateblue:"#483D8B",darkslategray:"#2F4F4F",darkslategrey:"#2F4F4F",darkturquoise:"#00CED1",darkviolet:"#9400D3",deeppink:"#FF1493",deepskyblue:"#00BFFF",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1E90FF",firebrick:"#B22222",floralwhite:"#FFFAF0",forestgreen:"#228B22",gainsboro:"#DCDCDC",ghostwhite:"#F8F8FF",gold:"#FFD700",goldenrod:"#DAA520",grey:"#808080",greenyellow:"#ADFF2F",honeydew:"#F0FFF0",hotpink:"#FF69B4",indianred:"#CD5C5C",indigo:"#4B0082",ivory:"#FFFFF0",khaki:"#F0E68C",lavender:"#E6E6FA",lavenderblush:"#FFF0F5",lawngreen:"#7CFC00",lemonchiffon:"#FFFACD",lightblue:"#ADD8E6",lightcoral:"#F08080",lightcyan:"#E0FFFF",lightgoldenrodyellow:"#FAFAD2",lightgreen:"#90EE90",lightgrey:"#D3D3D3",lightpink:"#FFB6C1",lightsalmon:"#FFA07A",lightseagreen:"#20B2AA",lightskyblue:"#87CEFA",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#B0C4DE",lightyellow:"#FFFFE0",limegreen:"#32CD32",linen:"#FAF0E6",magenta:"#FF00FF",mediumaquamarine:"#66CDAA",mediumblue:"#0000CD",mediumorchid:"#BA55D3",mediumpurple:"#9370DB",mediumseagreen:"#3CB371",mediumslateblue:"#7B68EE",mediumspringgreen:"#00FA9A",mediumturquoise:"#48D1CC",mediumvioletred:"#C71585",midnightblue:"#191970",mintcream:"#F5FFFA",mistyrose:"#FFE4E1",moccasin:"#FFE4B5",navajowhite:"#FFDEAD",oldlace:"#FDF5E6",olivedrab:"#6B8E23",orange:"#FFA500",orangered:"#FF4500",orchid:"#DA70D6",palegoldenrod:"#EEE8AA",palegreen:"#98FB98",paleturquoise:"#AFEEEE",palevioletred:"#DB7093",papayawhip:"#FFEFD5",peachpuff:"#FFDAB9",peru:"#CD853F",pink:"#FFC0CB",plum:"#DDA0DD",powderblue:"#B0E0E6",rosybrown:"#BC8F8F",royalblue:"#4169E1",saddlebrown:"#8B4513",salmon:"#FA8072",sandybrown:"#F4A460",seagreen:"#2E8B57",seashell:"#FFF5EE",sienna:"#A0522D",skyblue:"#87CEEB",slateblue:"#6A5ACD",slategray:"#708090",slategrey:"#708090",snow:"#FFFAFA",springgreen:"#00FF7F",steelblue:"#4682B4",tan:"#D2B48C",thistle:"#D8BFD8",tomato:"#FF6347",turquoise:"#40E0D0",violet:"#EE82EE",wheat:"#F5DEB3",whitesmoke:"#F5F5F5",yellowgreen:"#9ACD32"};function g(i){var m=i.indexOf("(",3);var Z=i.indexOf(")",m+1);var j=i.substring(m+1,Z).split(",");if(j.length==4&&i.substr(3,1)=="a"){alpha=Number(j[3])}else{j[3]=1}return j}function C(Z){return parseFloat(Z)/100}function N(i,j,Z){return Math.min(Z,Math.max(j,i))}function c(AF){var j,i,Z;h=parseFloat(AF[0])/360%360;if(h<0){h++}s=N(C(AF[1]),0,1);l=N(C(AF[2]),0,1);if(s==0){j=i=Z=l}else{var m=l<0.5?l*(1+s):l+s-l*s;var AE=2*l-m;j=A(AE,m,h+1/3);i=A(AE,m,h);Z=A(AE,m,h-1/3)}return"#"+I[Math.floor(j*255)]+I[Math.floor(i*255)]+I[Math.floor(Z*255)]}function A(i,Z,j){if(j<0){j++}if(j>1){j--}if(6*j<1){return i+(Z-i)*6*j}else{if(2*j<1){return Z}else{if(3*j<2){return i+(Z-i)*(2/3-j)*6}else{return i}}}}function Y(Z){var AE,p=1;Z=String(Z);if(Z.charAt(0)=="#"){AE=Z}else{if(/^rgb/.test(Z)){var m=g(Z);var AE="#",AF;for(var j=0;j<3;j++){if(m[j].indexOf("%")!=-1){AF=Math.floor(C(m[j])*255)}else{AF=Number(m[j])}AE+=I[N(AF,0,255)]}p=m[3]}else{if(/^hsl/.test(Z)){var m=g(Z);AE=c(m);p=m[3]}else{AE=B[Z]||Z}}}return{color:AE,alpha:p}}var L={style:"normal",variant:"normal",weight:"normal",size:10,family:"sans-serif"};var f={};function X(Z){if(f[Z]){return f[Z]}var m=document.createElement("div");var j=m.style;try{j.font=Z}catch(i){}return f[Z]={style:j.fontStyle||L.style,variant:j.fontVariant||L.variant,weight:j.fontWeight||L.weight,size:j.fontSize||L.size,family:j.fontFamily||L.family}}function P(j,i){var Z={};for(var AF in j){Z[AF]=j[AF]}var AE=parseFloat(i.currentStyle.fontSize),m=parseFloat(j.size);if(typeof j.size=="number"){Z.size=j.size}else{if(j.size.indexOf("px")!=-1){Z.size=m}else{if(j.size.indexOf("em")!=-1){Z.size=AE*m}else{if(j.size.indexOf("%")!=-1){Z.size=(AE/100)*m}else{if(j.size.indexOf("pt")!=-1){Z.size=m/0.75}else{Z.size=AE}}}}}Z.size*=0.981;return Z}function AA(Z){return Z.style+" "+Z.variant+" "+Z.weight+" "+Z.size+"px "+Z.family}function t(Z){switch(Z){case"butt":return"flat";case"round":return"round";case"square":default:return"square"}}function W(i){this.m_=V();this.mStack_=[];this.aStack_=[];this.currentPath_=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=D*1;this.globalAlpha=1;this.font="10px sans-serif";this.textAlign="left";this.textBaseline="alphabetic";this.canvas=i;var Z=i.ownerDocument.createElement("div");Z.style.width=i.clientWidth+"px";Z.style.height=i.clientHeight+"px";Z.style.overflow="hidden";Z.style.position="absolute";i.appendChild(Z);this.element_=Z;this.arcScaleX_=1;this.arcScaleY_=1;this.lineScale_=1}var M=W.prototype;M.clearRect=function(){if(this.textMeasureEl_){this.textMeasureEl_.removeNode(true);this.textMeasureEl_=null}this.element_.innerHTML=""};M.beginPath=function(){this.currentPath_=[]};M.moveTo=function(i,Z){var j=this.getCoords_(i,Z);this.currentPath_.push({type:"moveTo",x:j.x,y:j.y});this.currentX_=j.x;this.currentY_=j.y};M.lineTo=function(i,Z){var j=this.getCoords_(i,Z);this.currentPath_.push({type:"lineTo",x:j.x,y:j.y});this.currentX_=j.x;this.currentY_=j.y};M.bezierCurveTo=function(j,i,AI,AH,AG,AE){var Z=this.getCoords_(AG,AE);var AF=this.getCoords_(j,i);var m=this.getCoords_(AI,AH);e(this,AF,m,Z)};function e(Z,m,j,i){Z.currentPath_.push({type:"bezierCurveTo",cp1x:m.x,cp1y:m.y,cp2x:j.x,cp2y:j.y,x:i.x,y:i.y});Z.currentX_=i.x;Z.currentY_=i.y}M.quadraticCurveTo=function(AG,j,i,Z){var AF=this.getCoords_(AG,j);var AE=this.getCoords_(i,Z);var AH={x:this.currentX_+2/3*(AF.x-this.currentX_),y:this.currentY_+2/3*(AF.y-this.currentY_)};var m={x:AH.x+(AE.x-this.currentX_)/3,y:AH.y+(AE.y-this.currentY_)/3};e(this,AH,m,AE)};M.arc=function(AJ,AH,AI,AE,i,j){AI*=D;var AN=j?"at":"wa";var AK=AJ+U(AE)*AI-F;var AM=AH+J(AE)*AI-F;var Z=AJ+U(i)*AI-F;var AL=AH+J(i)*AI-F;if(AK==Z&&!j){AK+=0.125}var m=this.getCoords_(AJ,AH);var AG=this.getCoords_(AK,AM);var AF=this.getCoords_(Z,AL);this.currentPath_.push({type:AN,x:m.x,y:m.y,radius:AI,xStart:AG.x,yStart:AG.y,xEnd:AF.x,yEnd:AF.y})};M.rect=function(j,i,Z,m){this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath()};M.strokeRect=function(j,i,Z,m){var p=this.currentPath_;this.beginPath();this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath();this.stroke();this.currentPath_=p};M.fillRect=function(j,i,Z,m){var p=this.currentPath_;this.beginPath();this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath();this.fill();this.currentPath_=p};M.createLinearGradient=function(i,m,Z,j){var p=new v("gradient");p.x0_=i;p.y0_=m;p.x1_=Z;p.y1_=j;return p};M.createRadialGradient=function(m,AE,j,i,p,Z){var AF=new v("gradientradial");AF.x0_=m;AF.y0_=AE;AF.r0_=j;AF.x1_=i;AF.y1_=p;AF.r1_=Z;return AF};M.drawImage=function(AO,j){var AH,AF,AJ,AV,AM,AK,AQ,AX;var AI=AO.runtimeStyle.width;var AN=AO.runtimeStyle.height;AO.runtimeStyle.width="auto";AO.runtimeStyle.height="auto";var AG=AO.width;var AT=AO.height;AO.runtimeStyle.width=AI;AO.runtimeStyle.height=AN;if(arguments.length==3){AH=arguments[1];AF=arguments[2];AM=AK=0;AQ=AJ=AG;AX=AV=AT}else{if(arguments.length==5){AH=arguments[1];AF=arguments[2];AJ=arguments[3];AV=arguments[4];AM=AK=0;AQ=AG;AX=AT}else{if(arguments.length==9){AM=arguments[1];AK=arguments[2];AQ=arguments[3];AX=arguments[4];AH=arguments[5];AF=arguments[6];AJ=arguments[7];AV=arguments[8]}else{throw Error("Invalid number of arguments")}}}var AW=this.getCoords_(AH,AF);var m=AQ/2;var i=AX/2;var AU=[];var Z=10;var AE=10;AU.push(" <g_vml_:group",' coordsize="',D*Z,",",D*AE,'"',' coordorigin="0,0"',' style="width:',Z,"px;height:",AE,"px;position:absolute;");if(this.m_[0][0]!=1||this.m_[0][1]||this.m_[1][1]!=1||this.m_[1][0]){var p=[];p.push("M11=",this.m_[0][0],",","M12=",this.m_[1][0],",","M21=",this.m_[0][1],",","M22=",this.m_[1][1],",","Dx=",K(AW.x/D),",","Dy=",K(AW.y/D),"");var AS=AW;var AR=this.getCoords_(AH+AJ,AF);var AP=this.getCoords_(AH,AF+AV);var AL=this.getCoords_(AH+AJ,AF+AV);AS.x=z.max(AS.x,AR.x,AP.x,AL.x);AS.y=z.max(AS.y,AR.y,AP.y,AL.y);AU.push("padding:0 ",K(AS.x/D),"px ",K(AS.y/D),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",p.join(""),", sizingmethod='clip');")}else{AU.push("top:",K(AW.y/D),"px;left:",K(AW.x/D),"px;")}AU.push(' ">','<g_vml_:image src="',AO.src,'"',' style="width:',D*AJ,"px;"," height:",D*AV,'px"',' cropleft="',AM/AG,'"',' croptop="',AK/AT,'"',' cropright="',(AG-AM-AQ)/AG,'"',' cropbottom="',(AT-AK-AX)/AT,'"'," />","</g_vml_:group>");this.element_.insertAdjacentHTML("BeforeEnd",AU.join(""))};M.stroke=function(AM){var m=10;var AN=10;var AE=5000;var AG={x:null,y:null};var AL={x:null,y:null};for(var AH=0;AH<this.currentPath_.length;AH+=AE){var AK=[];var AF=false;AK.push("<g_vml_:shape",' filled="',!!AM,'"',' style="position:absolute;width:',m,"px;height:",AN,'px;"',' coordorigin="0,0"',' coordsize="',D*m,",",D*AN,'"',' stroked="',!AM,'"',' path="');var AO=false;for(var AI=AH;AI<Math.min(AH+AE,this.currentPath_.length);AI++){if(AI%AE==0&&AI>0){AK.push(" m ",K(this.currentPath_[AI-1].x),",",K(this.currentPath_[AI-1].y))}var Z=this.currentPath_[AI];var AJ;switch(Z.type){case"moveTo":AJ=Z;AK.push(" m ",K(Z.x),",",K(Z.y));break;case"lineTo":AK.push(" l ",K(Z.x),",",K(Z.y));break;case"close":AK.push(" x ");Z=null;break;case"bezierCurveTo":AK.push(" c ",K(Z.cp1x),",",K(Z.cp1y),",",K(Z.cp2x),",",K(Z.cp2y),",",K(Z.x),",",K(Z.y));break;case"at":case"wa":AK.push(" ",Z.type," ",K(Z.x-this.arcScaleX_*Z.radius),",",K(Z.y-this.arcScaleY_*Z.radius)," ",K(Z.x+this.arcScaleX_*Z.radius),",",K(Z.y+this.arcScaleY_*Z.radius)," ",K(Z.xStart),",",K(Z.yStart)," ",K(Z.xEnd),",",K(Z.yEnd));break}if(Z){if(AG.x==null||Z.x<AG.x){AG.x=Z.x}if(AL.x==null||Z.x>AL.x){AL.x=Z.x}if(AG.y==null||Z.y<AG.y){AG.y=Z.y}if(AL.y==null||Z.y>AL.y){AL.y=Z.y}}}AK.push(' ">');if(!AM){R(this,AK)}else{a(this,AK,AG,AL)}AK.push("</g_vml_:shape>");this.element_.insertAdjacentHTML("beforeEnd",AK.join(""))}};function R(j,AE){var i=Y(j.strokeStyle);var m=i.color;var p=i.alpha*j.globalAlpha;var Z=j.lineScale_*j.lineWidth;if(Z<1){p*=Z}AE.push("<g_vml_:stroke",' opacity="',p,'"',' joinstyle="',j.lineJoin,'"',' miterlimit="',j.miterLimit,'"',' endcap="',t(j.lineCap),'"',' weight="',Z,'px"',' color="',m,'" />')}function a(AO,AG,Ah,AP){var AH=AO.fillStyle;var AY=AO.arcScaleX_;var AX=AO.arcScaleY_;var Z=AP.x-Ah.x;var m=AP.y-Ah.y;if(AH instanceof v){var AL=0;var Ac={x:0,y:0};var AU=0;var AK=1;if(AH.type_=="gradient"){var AJ=AH.x0_/AY;var j=AH.y0_/AX;var AI=AH.x1_/AY;var Aj=AH.y1_/AX;var Ag=AO.getCoords_(AJ,j);var Af=AO.getCoords_(AI,Aj);var AE=Af.x-Ag.x;var p=Af.y-Ag.y;AL=Math.atan2(AE,p)*180/Math.PI;if(AL<0){AL+=360}if(AL<0.000001){AL=0}}else{var Ag=AO.getCoords_(AH.x0_,AH.y0_);Ac={x:(Ag.x-Ah.x)/Z,y:(Ag.y-Ah.y)/m};Z/=AY*D;m/=AX*D;var Aa=z.max(Z,m);AU=2*AH.r0_/Aa;AK=2*AH.r1_/Aa-AU}var AS=AH.colors_;AS.sort(function(Ak,i){return Ak.offset-i.offset});var AN=AS.length;var AR=AS[0].color;var AQ=AS[AN-1].color;var AW=AS[0].alpha*AO.globalAlpha;var AV=AS[AN-1].alpha*AO.globalAlpha;var Ab=[];for(var Ae=0;Ae<AN;Ae++){var AM=AS[Ae];Ab.push(AM.offset*AK+AU+" "+AM.color)}AG.push('<g_vml_:fill type="',AH.type_,'"',' method="none" focus="100%"',' color="',AR,'"',' color2="',AQ,'"',' colors="',Ab.join(","),'"',' opacity="',AV,'"',' g_o_:opacity2="',AW,'"',' angle="',AL,'"',' focusposition="',Ac.x,",",Ac.y,'" />')}else{if(AH instanceof u){if(Z&&m){var AF=-Ah.x;var AZ=-Ah.y;AG.push("<g_vml_:fill",' position="',AF/Z*AY*AY,",",AZ/m*AX*AX,'"',' type="tile"',' src="',AH.src_,'" />')}}else{var Ai=Y(AO.fillStyle);var AT=Ai.color;var Ad=Ai.alpha*AO.globalAlpha;AG.push('<g_vml_:fill color="',AT,'" opacity="',Ad,'" />')}}}M.fill=function(){this.stroke(true)};M.closePath=function(){this.currentPath_.push({type:"close"})};M.getCoords_=function(j,i){var Z=this.m_;return{x:D*(j*Z[0][0]+i*Z[1][0]+Z[2][0])-F,y:D*(j*Z[0][1]+i*Z[1][1]+Z[2][1])-F}};M.save=function(){var Z={};Q(this,Z);this.aStack_.push(Z);this.mStack_.push(this.m_);this.m_=d(V(),this.m_)};M.restore=function(){if(this.aStack_.length){Q(this.aStack_.pop(),this);this.m_=this.mStack_.pop()}};function H(Z){return isFinite(Z[0][0])&&isFinite(Z[0][1])&&isFinite(Z[1][0])&&isFinite(Z[1][1])&&isFinite(Z[2][0])&&isFinite(Z[2][1])}function y(i,Z,j){if(!H(Z)){return }i.m_=Z;if(j){var p=Z[0][0]*Z[1][1]-Z[0][1]*Z[1][0];i.lineScale_=k(b(p))}}M.translate=function(j,i){var Z=[[1,0,0],[0,1,0],[j,i,1]];y(this,d(Z,this.m_),false)};M.rotate=function(i){var m=U(i);var j=J(i);var Z=[[m,j,0],[-j,m,0],[0,0,1]];y(this,d(Z,this.m_),false)};M.scale=function(j,i){this.arcScaleX_*=j;this.arcScaleY_*=i;var Z=[[j,0,0],[0,i,0],[0,0,1]];y(this,d(Z,this.m_),true)};M.transform=function(p,m,AF,AE,i,Z){var j=[[p,m,0],[AF,AE,0],[i,Z,1]];y(this,d(j,this.m_),true)};M.setTransform=function(AE,p,AG,AF,j,i){var Z=[[AE,p,0],[AG,AF,0],[j,i,1]];y(this,Z,true)};M.drawText_=function(AK,AI,AH,AN,AG){var AM=this.m_,AQ=1000,i=0,AP=AQ,AF={x:0,y:0},AE=[];var Z=P(X(this.font),this.element_);var j=AA(Z);var AR=this.element_.currentStyle;var p=this.textAlign.toLowerCase();switch(p){case"left":case"center":case"right":break;case"end":p=AR.direction=="ltr"?"right":"left";break;case"start":p=AR.direction=="rtl"?"right":"left";break;default:p="left"}switch(this.textBaseline){case"hanging":case"top":AF.y=Z.size/1.75;break;case"middle":break;default:case null:case"alphabetic":case"ideographic":case"bottom":AF.y=-Z.size/2.25;break}switch(p){case"right":i=AQ;AP=0.05;break;case"center":i=AP=AQ/2;break}var AO=this.getCoords_(AI+AF.x,AH+AF.y);AE.push('<g_vml_:line from="',-i,' 0" to="',AP,' 0.05" ',' coordsize="100 100" coordorigin="0 0"',' filled="',!AG,'" stroked="',!!AG,'" style="position:absolute;width:1px;height:1px;">');if(AG){R(this,AE)}else{a(this,AE,{x:-i,y:0},{x:AP,y:Z.size})}var AL=AM[0][0].toFixed(3)+","+AM[1][0].toFixed(3)+","+AM[0][1].toFixed(3)+","+AM[1][1].toFixed(3)+",0,0";var AJ=K(AO.x/D)+","+K(AO.y/D);AE.push('<g_vml_:skew on="t" matrix="',AL,'" ',' offset="',AJ,'" origin="',i,' 0" />','<g_vml_:path textpathok="true" />','<g_vml_:textpath on="true" string="',AD(AK),'" style="v-text-align:',p,";font:",AD(j),'" /></g_vml_:line>');this.element_.insertAdjacentHTML("beforeEnd",AE.join(""))};M.fillText=function(j,Z,m,i){this.drawText_(j,Z,m,i,false)};M.strokeText=function(j,Z,m,i){this.drawText_(j,Z,m,i,true)};M.measureText=function(j){if(!this.textMeasureEl_){var Z='<span style="position:absolute;top:-20000px;left:0;padding:0;margin:0;border:none;white-space:pre;"></span>';this.element_.insertAdjacentHTML("beforeEnd",Z);this.textMeasureEl_=this.element_.lastChild}var i=this.element_.ownerDocument;this.textMeasureEl_.innerHTML="";this.textMeasureEl_.style.font=this.font;this.textMeasureEl_.appendChild(i.createTextNode(j));return{width:this.textMeasureEl_.offsetWidth}};M.clip=function(){};M.arcTo=function(){};M.createPattern=function(i,Z){return new u(i,Z)};function v(Z){this.type_=Z;this.x0_=0;this.y0_=0;this.r0_=0;this.x1_=0;this.y1_=0;this.r1_=0;this.colors_=[]}v.prototype.addColorStop=function(i,Z){Z=Y(Z);this.colors_.push({offset:i,color:Z.color,alpha:Z.alpha})};function u(i,Z){q(i);switch(Z){case"repeat":case null:case"":this.repetition_="repeat";break;case"repeat-x":case"repeat-y":case"no-repeat":this.repetition_=Z;break;default:n("SYNTAX_ERR")}this.src_=i.src;this.width_=i.width;this.height_=i.height}function n(Z){throw new o(Z)}function q(Z){if(!Z||Z.nodeType!=1||Z.tagName!="IMG"){n("TYPE_MISMATCH_ERR")}if(Z.readyState!="complete"){n("INVALID_STATE_ERR")}}function o(Z){this.code=this[Z];this.message=Z+": DOM Exception "+this.code}var x=o.prototype=new Error;x.INDEX_SIZE_ERR=1;x.DOMSTRING_SIZE_ERR=2;x.HIERARCHY_REQUEST_ERR=3;x.WRONG_DOCUMENT_ERR=4;x.INVALID_CHARACTER_ERR=5;x.NO_DATA_ALLOWED_ERR=6;x.NO_MODIFICATION_ALLOWED_ERR=7;x.NOT_FOUND_ERR=8;x.NOT_SUPPORTED_ERR=9;x.INUSE_ATTRIBUTE_ERR=10;x.INVALID_STATE_ERR=11;x.SYNTAX_ERR=12;x.INVALID_MODIFICATION_ERR=13;x.NAMESPACE_ERR=14;x.INVALID_ACCESS_ERR=15;x.VALIDATION_ERR=16;x.TYPE_MISMATCH_ERR=17;G_vmlCanvasManager=E;CanvasRenderingContext2D=W;CanvasGradient=v;CanvasPattern=u;DOMException=o})()};
diff --git a/deps/rabbitmq_management/priv/www/js/formatters.js b/deps/rabbitmq_management/priv/www/js/formatters.js
new file mode 100644
index 0000000000..8c0f55ed93
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/formatters.js
@@ -0,0 +1,1067 @@
+const UNKNOWN_REPR = '<span class="unknown">?</span>';
+const FD_THRESHOLDS = [[0.95, 'red'],
+ [0.8, 'yellow']];
+const SOCKETS_THRESHOLDS = [[1.0, 'red'],
+ [0.8, 'yellow']];
+const PROCESS_THRESHOLDS = [[0.75, 'red'],
+ [0.5, 'yellow']];
+
+const TAB_HIGHLIGHTER = "\u2192";
+const WHITESPACE_HIGHLIGHTER = "\u23B5";
+
+function fmt_string(str, unknown) {
+ if (unknown == undefined) {
+ unknown = UNKNOWN_REPR;
+ }
+ if (str == undefined) {
+ return unknown;
+ }
+ return fmt_escape_html("" + str);
+}
+
+function fmt_si_prefix(num0, max0, thousand, allow_fractions) {
+ if (num == 0) return 0;
+
+ function f(n, m, p) {
+ if (m > thousand) return f(n / thousand, m / thousand, p + 1);
+ else return [n, m, p];
+ }
+
+ var num_power = f(num0, max0, 0);
+ var num = num_power[0];
+ var max = num_power[1];
+ var power = num_power[2];
+ var powers = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'];
+ return (((power != 0 || allow_fractions) && max <= 10) ? num.toFixed(1) :
+ num.toFixed(0)) + " " + powers[power];
+}
+
+function fmt_boolean(b, unknown) {
+ if (unknown == undefined) unknown = UNKNOWN_REPR;
+ if (b == undefined) return unknown;
+
+ return b ? "&#9679;" : "&#9675;";
+}
+
+function fmt_date(d) {
+ var res = fmt_date0(d);
+ return res[0] + ' ' + res[1];
+}
+
+function fmt_date_mini(d) {
+ var res = fmt_date0(d);
+ return res[1] + '<sub>' + res[0] + '</sub>';
+}
+
+function fmt_date0(d) {
+ function f(i) {
+ return i < 10 ? "0" + i : i;
+ }
+
+ return [d.getFullYear() + "-" + f(d.getMonth() + 1) + "-" +
+ f(d.getDate()), f(d.getHours()) + ":" + f(d.getMinutes()) +
+ ":" + f(d.getSeconds())];
+}
+
+function fmt_timestamp(ts) {
+ return fmt_date(new Date(ts));
+}
+
+function fmt_timestamp_mini(ts) {
+ return fmt_date_mini(new Date(ts));
+}
+
+function fmt_time(t, suffix) {
+ if (typeof t === "undefined") return '';
+ return t + suffix;
+}
+
+function fmt_millis(millis) {
+ return Math.round(millis / 1000) + "s";
+}
+
+function fmt_features(obj) {
+ return fmt_table_short(args_to_features(obj));
+}
+
+function fmt_policy_short(obj) {
+ if (obj.policy != undefined && obj.policy != '') {
+ return '<abbr class="policy" title="Policy: ' +
+ fmt_escape_html(obj.policy) + '">' +
+ link_policy(obj.vhost, obj.policy) + '</abbr> ';
+ } else {
+ return '';
+ }
+}
+
+function fmt_op_policy_short(obj) {
+ if (obj.operator_policy != undefined && obj.operator_policy != '') {
+ return '<abbr class="policy" title="Operator policy: ' +
+ fmt_escape_html(obj.operator_policy) + '">' +
+ fmt_escape_html(obj.operator_policy) + '</abbr> ';
+ } else {
+ return '';
+ }
+}
+
+function fmt_features_short(obj) {
+ var res = '';
+ var features = args_to_features(obj);
+
+ if (obj.owner_pid_details != undefined) {
+ res += '<acronym title="Exclusive queue: click for owning connection">'
+ + link_conn(obj.owner_pid_details.name, "Excl") + '</acronym> ';
+ }
+
+ for (var k in ALL_ARGS) {
+ if (features[k] != undefined) {
+ res += '<abbr title="' + k + ': ' + fmt_string(features[k]) +
+ '">' + ALL_ARGS[k].short + '</abbr> ';
+ }
+ }
+
+ if (features.arguments) {
+ res += '<abbr title="' + fmt_table_flat(features.arguments) +
+ '">Args</abbr> ';
+ }
+ return res;
+}
+
+function fmt_activity_status(obj) {
+ return obj.replace('_', ' ');
+}
+
+function short_conn(name) {
+ var pat = /^(.*)->/;
+ var match = pat.exec(name);
+ return (match != null && match.length == 2) ? match[1] : name;
+}
+
+function short_chan(name) {
+ var pat = /^(.*)->.*( \(.*\))/;
+ var match = pat.exec(name);
+ return (match != null && match.length == 3) ? match[1] + match[2] : name;
+}
+
+function args_to_features(obj) {
+ var res = {};
+ for (var k in obj.arguments) {
+ if (k in KNOWN_ARGS) {
+ res[k] = fmt_escape_feature(obj.arguments[k]);
+ }
+ else {
+ if (res.arguments == undefined) res.arguments = {};
+ res.arguments[fmt_escape_html(k)] = fmt_escape_html(obj.arguments[k]);
+ }
+ }
+ if (obj.exclusive) {
+ res['exclusive'] = true;
+ }
+ if (obj.durable) {
+ res['durable'] = true;
+ }
+ if (obj.auto_delete) {
+ res['auto-delete'] = true;
+ }
+ if (obj.internal != undefined && obj.internal) {
+ res['internal'] = true;
+ }
+ if (obj.messages_delayed != undefined){
+ res['messages delayed'] = obj.messages_delayed;
+ }
+ return res;
+}
+
+function fmt_mirrors(queue) {
+ var synced = queue.synchronised_slave_nodes || [];
+ var unsynced = queue.slave_nodes || [];
+ unsynced = jQuery.grep(unsynced,
+ function (node, i) {
+ return jQuery.inArray(node, synced) == -1;
+ });
+ var res = '';
+ if (synced.length > 0) {
+ res += ' <abbr title="Synchronised mirrors: ' + synced + '">+' +
+ synced.length + '</abbr>';
+ }
+ if (synced.length == 0 && unsynced.length > 0) {
+ res += ' <abbr title="There are no synchronised mirrors">+0</abbr>';
+ }
+ if (unsynced.length > 0) {
+ res += ' <abbr class="warning" title="Unsynchronised mirrors: ' +
+ unsynced + '">+' + unsynced.length + '</abbr>';
+ }
+ return res;
+}
+
+function fmt_sync_state(queue) {
+ var res = '<p><b>Syncing: ';
+ res += (queue.messages == 0) ? 100 : Math.round(100 * queue.sync_messages /
+ queue.messages);
+ res += '%</b></p>';
+ return res;
+}
+
+function fmt_members(queue) {
+ var res = '';
+ var isMajority = (queue.online.length >= (Math.floor(queue.members.length / 2) + 1));
+ var followers = queue.online;
+ followers.splice(followers.indexOf(queue.node), 1);
+ if (isMajority) {
+ res += ' <abbr title="Followers: ' + followers + '">+' +
+ followers.length + '</abbr>';
+ } else {
+ res += ' <abbr class="warning" title="Cluster is in minority">+' + followers.length + '</abbr>';
+ }
+ return res;
+}
+
+function fmt_channel_mode(ch) {
+ if (ch.transactional) {
+ return '<abbr title="Transactional">T</abbr>';
+ }
+ else if (ch.confirm) {
+ return '<abbr title="Confirm">C</abbr>';
+ }
+ else {
+ return '';
+ }
+}
+
+function fmt_color(r, thresholds) {
+ if (r == undefined) return '';
+
+ for (var i in thresholds) {
+ var threshold = thresholds[i][0];
+ var color = thresholds[i][1];
+
+ if (r >= threshold) {
+ return color;
+ }
+ }
+ return 'green';
+}
+
+function fmt_rate_num(num) {
+ if (num == undefined) return UNKNOWN_REPR;
+ else if (num < 1) return num.toFixed(2);
+ else if (num < 10) return num.toFixed(1);
+ else return fmt_num_thousands(num);
+}
+
+function fmt_num_thousands(num) {
+ var conv_num = parseFloat(num); // to avoid errors, if someone calls fmt_num_thousands(someNumber.toFixed(0))
+ return fmt_num_thousands_unfixed(conv_num.toFixed(0));
+}
+
+function fmt_num_thousands_unfixed(num) {
+ if (num == undefined) return UNKNOWN_REPR;
+ num = '' + num;
+ if (num.length < 4) return num;
+ res= fmt_num_thousands_unfixed(num.slice(0, -3)) + ',' + num.slice(-3);
+ return res;
+}
+
+function fmt_percent(num) {
+ if (num === '') {
+ return 'N/A';
+ } else {
+ return Math.round(num * 100) + '%';
+ }
+}
+
+function pick_rate(fmt, obj, name, mode) {
+ if (obj == undefined || obj[name] == undefined ||
+ obj[name + '_details'] == undefined) return '';
+ var details = obj[name + '_details'];
+ return fmt(mode == 'avg' ? details.avg_rate : details.rate);
+}
+
+function pick_abs(fmt, obj, name, mode) {
+ if (obj == undefined || obj[name] == undefined ||
+ obj[name + '_details'] == undefined) return '';
+ var details = obj[name + '_details'];
+ return fmt(mode == 'avg' ? details.avg : obj[name]);
+}
+
+function fmt_detail_rate(obj, name, mode) {
+ return pick_rate(fmt_rate, obj, name, mode);
+}
+
+function fmt_detail_rate_bytes(obj, name, mode) {
+ return pick_rate(fmt_rate_bytes, obj, name, mode);
+}
+
+// ---------------------------------------------------------------------
+
+// These are pluggable for charts etc
+
+function fmt_plain(num) {
+ return num;
+}
+
+function fmt_plain_axis(num, max) {
+ return fmt_si_prefix(num, max, 1000, true);
+}
+
+function fmt_rate(num) {
+ return fmt_rate_num(num) + '/s';
+}
+
+function fmt_rate_axis(num, max) {
+ return fmt_plain_axis(num, max) + '/s';
+}
+
+function fmt_bytes(bytes) {
+ if (bytes == undefined) return UNKNOWN_REPR;
+ var prefix = fmt_si_prefix(bytes, bytes, 1024, false);
+ return prefix + (prefix != bytes ? 'iB' : 'B');
+}
+
+function fmt_bytes_axis(num, max) {
+ num = parseInt(num);
+ return fmt_bytes(isNaN(num) ? 0 : num);
+}
+
+function fmt_rate_bytes(num) {
+ return fmt_bytes(num) + '/s';
+}
+
+function fmt_rate_bytes_axis(num, max) {
+ return fmt_bytes_axis(num, max) + '/s';
+}
+
+function fmt_ms(num) {
+ return fmt_rate_num(num) + 'ms';
+}
+
+// ---------------------------------------------------------------------
+
+function fmt_maybe_vhost(name) {
+ return vhosts_interesting ?
+ ' in virtual host <b>' + fmt_escape_html(name) + '</b>'
+ : '';
+}
+
+function fmt_exchange(name) {
+ return fmt_escape_html(fmt_exchange0(name));
+}
+
+function fmt_exchange0(name) {
+ return name == '' ? '(AMQP default)' : name;
+}
+
+function fmt_exchange_type(type) {
+ for (var i in exchange_types) {
+ if (exchange_types[i].name == type) {
+ return fmt_escape_html(type);
+ }
+ }
+ return '<div class="status-red"><abbr title="Exchange type not found. ' +
+ 'Publishing to this exchange will fail.">' + fmt_escape_html(type) +
+ '</abbr></div>';
+}
+
+function fmt_exchange_url(name) {
+ return name == '' ? 'amq.default' : fmt_escape_html(name);
+}
+
+function fmt_download_filename(host) {
+ var now = new Date();
+ return host.replace('@', '_') + "_" + now.getFullYear() + "-" +
+ (now.getMonth() + 1) + "-" + now.getDate() + ".json";
+}
+
+function fmt_table_short(table) {
+ return '<table class="mini">' + fmt_table_body(table, ':') + '</table>';
+}
+
+function fmt_table_long(table) {
+ return '<table class="facts">' + fmt_table_body(table, '') +
+ '</table>';
+}
+
+function fmt_table_body(table, x) {
+ var res = '';
+ for (k in table) {
+ res += '<tr><th>' + fmt_escape_html(k) + x + '</th>' +
+ '<td>' + fmt_amqp_value(table[k]) + '</td>';
+ }
+ return res;
+}
+
+function fmt_amqp_value(val) {
+ if (val instanceof Array) {
+ var val2 = new Array();
+ for (var i = 0; i < val.length; i++) {
+ val2[i] = fmt_amqp_value(val[i]);
+ }
+ return val2.join("<br/>");
+ } else if (val instanceof Object) {
+ return fmt_table_short(val);
+ } else {
+ var t = typeof(val);
+ if (t == 'string') {
+ return '<abbr class="type" title="string">' +
+ fmt_escape_html(val) + '</abbr>';
+ } else {
+ return '<abbr class="type" title="' + t + '">' + val + '</abbr>';
+ }
+ }
+}
+
+function fmt_table_flat(table) {
+ var res = [];
+ for (k in table) {
+ res.push(fmt_escape_html(k) + ': ' + fmt_amqp_value_flat(table[k]));
+ }
+ return res.join(', ');
+}
+
+function fmt_amqp_value_flat(val) {
+ if (val instanceof Array) {
+ var val2 = new Array();
+ for (var i = 0; i < val.length; i++) {
+ val2[i] = fmt_amqp_value_flat(val[i]);
+ }
+ return '[' + val2.join(",") + ']';
+ } else if (val instanceof Object) {
+ return '(' + fmt_table_flat(val) + ')';
+ } else if (typeof(val) == 'string') {
+ return fmt_escape_html(val);
+ } else {
+ return val;
+ }
+}
+
+function fmt_uptime(u) {
+ var uptime = Math.floor(u / 1000);
+ var sec = uptime % 60;
+ var min = Math.floor(uptime / 60) % 60;
+ var hour = Math.floor(uptime / 3600) % 24;
+ var day = Math.floor(uptime / 86400);
+
+ if (day > 0)
+ return day + 'd ' + hour + 'h';
+ else if (hour > 0)
+ return hour + 'h ' + min + 'm';
+ else
+ return min + 'm ' + sec + 's';
+}
+
+function fmt_plugins_small(node) {
+ if (node.applications === undefined) return '';
+ var plugins = [];
+ for (var i = 0; i < node.applications.length; i++) {
+ var application = node.applications[i];
+ if (jQuery.inArray(application.name, node.enabled_plugins) != -1 ) {
+ plugins.push(application.name);
+ }
+ }
+ return '<abbr title="Enabled plugins: ' + plugins.join(", ") + '">' +
+ plugins.length + '</abbr>';
+}
+
+function get_plugins_list(node) {
+ var result = [];
+ for (var i = 0; i < node.applications.length; i++) {
+ var application = node.applications[i];
+ if (jQuery.inArray(application.name, node.enabled_plugins) != -1 ) {
+ result.push(application);
+ }
+ }
+ return result;
+}
+
+function fmt_rabbit_version(applications) {
+ for (var i in applications) {
+ if (applications[i].name == 'rabbit') {
+ return applications[i].version;
+ }
+ }
+ return 'unknown';
+}
+
+function fmt_strip_tags(txt) {
+ if(txt === null) {
+ return "";
+ }
+ if(txt === undefined) {
+ return "";
+ }
+ return ("" + txt).replace(/<(?:.|\n)*?>/gm, '');
+}
+
+function fmt_escape_feature(txt) {
+ // Exclude SAC which is the only boolean feature
+ if(txt === false) {
+ return undefined;
+ } else {
+ return fmt_escape_html0(txt).replace(/\n/g, '<br/>');
+ }
+}
+
+function fmt_escape_html(txt) {
+ return fmt_escape_html0(txt).replace(/\n/g, '<br/>');
+}
+
+function fmt_escape_html_one_line(txt) {
+ return fmt_escape_html0(txt).replace(/\n/g, '');
+}
+
+function fmt_escape_html0(txt) {
+ if(txt === null) {
+ return "";
+ }
+ if(txt === undefined) {
+ return "";
+ }
+
+ return ("" + txt).replace(/&/g, '&amp;')
+ .replace(/</g, '&lt;')
+ .replace(/>/g, '&gt;')
+ .replace(/\"/g, '&quot;');
+}
+
+function fmt_maybe_wrap(txt, encoding) {
+ if (encoding == 'string') return fmt_escape_html(txt);
+
+ var WRAP = 120;
+ var res = '';
+ while (txt != '') {
+ var i = txt.indexOf('\n');
+ if (i == -1 || i > WRAP) {
+ i = Math.min(WRAP, txt.length);
+ res += txt.substring(0, i) + '\n';
+ txt = txt.substring(i);
+ }
+ else {
+ res += txt.substring(0, i + 1);
+ txt = txt.substring(i + 1);
+ }
+ }
+ return fmt_escape_html(res);
+}
+
+function fmt_node(node_host) {
+ return fmt_string(node_host);
+}
+
+function fmt_object_state(obj) {
+ if (obj.state == undefined) return '';
+
+ var colour = 'green';
+ var text = obj.state;
+ var explanation;
+
+ if (obj.idle_since !== undefined) {
+ colour = 'grey';
+ explanation = 'Idle since ' + obj.idle_since;
+ text = 'idle';
+ }
+ // Only connections can be 'blocked' or 'blocking'
+ else if (obj.state == 'blocked') {
+ colour = 'red';
+ explanation = 'Resource alarm: connection blocked.';
+ }
+ else if (obj.state == 'blocking') {
+ colour = 'yellow';
+ explanation = 'Resource alarm: connection will block on publish.';
+ }
+ else if (obj.state == 'flow') {
+ colour = 'yellow';
+ explanation = 'Publishing rate recently throttled by server.';
+ }
+ else if (obj.state == 'terminated') {
+ colour = 'yellow';
+ var terminated_by = "";
+ if (obj.terminated_by) {
+ terminated_by = " by \"" + String(obj.terminated_by) + "\"";
+ }
+ explanation = 'The queue is being deleted' + terminated_by + ".";
+ }
+ else if (obj.state == 'down') {
+ colour = 'red';
+ explanation = 'The queue is located on a cluster node or nodes that ' +
+ 'are down.';
+ }
+ else if (obj.state == 'crashed') {
+ colour = 'red';
+ explanation = 'The queue has crashed repeatedly and been unable to ' +
+ 'restart.';
+ }
+ else if (obj.state == 'stopped') {
+ colour = 'red';
+ explanation = 'The queue process was stopped by the vhost supervisor.';
+ }
+
+ return fmt_state(colour, text, explanation);
+}
+
+function fmt_state(colour, text, explanation) {
+ var key;
+ if (explanation) {
+ key = '<abbr class="normal" title="' + explanation + '">' +
+ text + '</abbr>';
+ }
+ else {
+ key = text;
+ }
+
+ return '<div class="colour-key status-key-' + colour + '"></div>' + key;
+}
+
+function fmt_shortened_uri(uri) {
+ if (typeof uri == 'object') {
+ var res = '';
+ for (i in uri) {
+ res += fmt_shortened_uri(uri[i]) + '<br/>';
+ }
+ return res;
+ }
+ var uri = fmt_escape_html(uri);
+ if (uri.indexOf('?') == -1) {
+ return uri;
+ }
+ else {
+ return '<abbr title="' + uri + '">' +
+ uri.substr(0, uri.indexOf('?')) + '?...</abbr>';
+ }
+}
+
+function fmt_uri_with_credentials(uri) {
+ if (typeof uri == 'object') {
+ var res = [];
+ for (i in uri) {
+ res.push(fmt_uri_with_credentials(uri[i]));
+ }
+ return res;
+ }
+ else if (typeof uri == 'string') {
+ // mask password
+ var mask = /^([a-zA-Z0-9+-.]+):\/\/(.*):(.*)@/;
+ return uri.replace(mask, "$1://$2:[redacted]@");
+ } else {
+ return UNKNOWN_REPR;
+ }
+}
+
+function fmt_client_name(properties) {
+ var res = [];
+ if (properties.product != undefined) {
+ res.push(fmt_trunc(properties.product, 120));
+ }
+ if (properties.platform != undefined) {
+ res.push(fmt_trunc(properties.platform, 120));
+ }
+ res = res.join(" / ");
+
+ if (properties.version != undefined) {
+ res += '<sub>' + fmt_trunc(properties.version) + '</sub>';
+ }
+
+ if (properties.client_id != undefined) {
+ res += '<sub>' + fmt_trunc(properties.client_id, 120) + '</sub>';
+ }
+
+ return res;
+}
+
+function fmt_trunc(str, max_length) {
+ return str.length > max_length ?
+ ('<abbr class="normal" title="' + fmt_escape_html(str) + '">' +
+ fmt_escape_html(str.substring(0, max_length)) + '...</abbr>') :
+ fmt_escape_html(str);
+}
+
+function alt_rows(i, args) {
+ var css = [(i % 2 == 0) ? 'alt1' : 'alt2'];
+ if (args != undefined && args['x-internal-purpose'] != undefined) {
+ css.push('internal-purpose');
+ }
+ return ' class="' + css.join(' ') + '"';
+}
+
+function esc(str) {
+ return encodeURIComponent(str);
+}
+
+// Replaces a sequence of characters matched by a regular expression
+// group with the given character. Replaced group is combined with the stripped
+// original using the provided combine function.
+function replace_char_sequence_individually(input, regex, replacement, combine) {
+ let ms = input.match(regex);
+ if (ms == null) {
+ return input;
+ } else {
+ let n = ms[0].length;
+ return combine(replacement.repeat(n), input.replace(regex, ""));
+ }
+}
+
+// Replaces a leading sequence of characters matched by a regular expression
+// group with the given character.
+function replace_leading_chars(input, regex, replacement) {
+ return replace_char_sequence_individually(input, regex, replacement, function(patch, stripped_input) {
+ return patch + stripped_input;
+ });
+}
+
+// Replaces a trailing sequence of characters matched by a regular expression
+// group with the given character.
+function replace_trailing_chars(input, regex, replacement) {
+ return replace_char_sequence_individually(input, regex, replacement, function(patch, stripped_input) {
+ return stripped_input + patch;
+ });
+}
+
+// Highlights extra (leading and trailing) whitespace and tab characters
+// with suitable Unicode characters for improved visiblity.
+// Note that mid-word whitespace should not be highighted, which makes
+// the implementation trickier than a simple chain of replace/2 calls.
+function highlight_extra_whitespace(str) {
+ // Highlight leading and trailing whitespace individually but all tabs.
+ // This assumes that spaces are reasonable to use in the middle of a name
+ // but tabs are not used intentionally and must be highlighted even in the middle.
+ return [[replace_trailing_chars, /(\s+)$/g, WHITESPACE_HIGHLIGHTER],
+ [replace_leading_chars, /^(\s+)/g, WHITESPACE_HIGHLIGHTER]].reduce(function(acc, triplet) {
+ return triplet[0](acc, triplet[1], triplet[2]);
+ }, str.replace(/\t/g, TAB_HIGHLIGHTER));
+}
+
+function link_conn(name, desc) {
+ if (desc == undefined) {
+ return _link_to(short_conn(name), '#/connections/' + esc(name));
+ }
+ else {
+ return _link_to(desc, '#/connections/' + esc(name), false);
+ }
+}
+
+function link_channel(name) {
+ return _link_to(short_chan(name), '#/channels/' + esc(name));
+}
+
+function link_exchange(vhost, name, args) {
+ var url = esc(vhost) + '/' + (name == '' ? 'amq.default' : esc(name));
+ return _link_to(fmt_exchange0(highlight_extra_whitespace(name)), '#/exchanges/' + url, true, args);
+}
+
+function link_queue(vhost, name, args) {
+ return _link_to(highlight_extra_whitespace(name), '#/queues/' + esc(vhost) + '/' + esc(name), true, args);
+}
+
+function link_vhost(name) {
+ return _link_to(name, '#/vhosts/' + esc(name));
+}
+
+function link_user(name) {
+ return _link_to(name, '#/users/' + esc(name));
+}
+
+function link_node(name) {
+ return _link_to(fmt_node(name), '#/nodes/' + esc(name));
+}
+
+function link_policy(vhost, name) {
+ return _link_to(name, '#/policies/' + esc(vhost) + '/' + esc(name));
+}
+
+function _link_to(name, url, highlight, args) {
+ if (highlight == undefined) highlight = true;
+ var title = null;
+ if (args != undefined && args['x-internal-purpose'] != undefined) {
+ var purpose = args['x-internal-purpose'];
+ title = 'This is used internally by the ' + purpose + ' mechanism.';
+ }
+ return '<a href="' + url + '"' +
+ (title ? ' title="' + title + '"' : '') + '>' +
+ (highlight ? fmt_highlight_filter(name) : fmt_escape_html(name)) +
+ '</a>';
+}
+
+function fmt_highlight_filter(text) {
+ if (current_filter == '') return fmt_escape_html(text);
+
+ var text_to_match = current_filter.toLowerCase();
+ if (current_filter_regex) {
+ var potential_match = current_filter_regex.exec(text.toLowerCase());
+ if (potential_match) {
+ text_to_match = potential_match[0];
+ }
+ }
+ var ix = text.toLowerCase().indexOf(text_to_match);
+ var l = text_to_match.length;
+ if (ix == -1) {
+ return fmt_escape_html(text);
+ }
+ else {
+ return fmt_escape_html(text.substring(0, ix)) +
+ '<span class="filter-highlight">' +
+ fmt_escape_html(text.substring(ix, ix + l)) + '</span>' +
+ fmt_escape_html(text.substring(ix + l));
+ }
+}
+
+function filter_ui_pg(items, truncate, appendselect) {
+ var total = items.length;
+ if (current_filter != '') {
+ var items2 = [];
+ for (var i in items) {
+ var item = items[i];
+ var item_name = item.name.toLowerCase();
+ if ((current_filter_regex_on &&
+ current_filter_regex &&
+ current_filter_regex.test(item_name)) ||
+ item_name.indexOf(current_filter.toLowerCase()) != -1) {
+ items2.push(item);
+ }
+ }
+ items.length = items2.length;
+ for (var i in items2) items[i] = items2[i];
+ }
+
+ var res = '<div class="filter"><table' +
+ (current_filter == '' ? '' : ' class="filter-active"') +
+ '><tr><th>Filter:</th>' +
+ '<td><input id="filter" type="text" value="' +
+ fmt_escape_html(current_filter) + '"/>' +
+ '<input type="checkbox" name="filter-regex-mode" id="filter-regex-mode"' +
+ (current_filter_regex_on ? ' checked' : '') +
+ '/><label for="filter-regex-mode">Regex</label> <span class="help" id="filter-regex"></span>' +
+ '</td></tr></table>';
+
+ function items_desc(l) {
+ return l == 1 ? (l + ' item') : (l + ' items');
+ }
+
+ var selected = current_filter == '' ? (items_desc(items.length)) :
+ (items.length + ' of ' + items_desc(total) + ' selected');
+
+
+ selected += appendselect;
+
+ res += '<p id="filter-truncate"><span class="updatable">' + selected +
+ '</span>' + truncate + '</p>';
+ res += '</div>';
+
+ return res;
+}
+
+
+function filter_ui(items) {
+ var current_truncate = parseInt(get_pref('truncate'));
+ var truncate_input = '<input type="text" id="truncate" value="' +
+ current_truncate + '">';
+ var selected = '';
+ if (items.length > current_truncate) {
+ selected += '<span id="filter-warning-show"> ' +
+ '(only showing first</span> ';
+ items.length = current_truncate;
+ }
+ else {
+ selected += ', page size up to ';
+ }
+ return filter_ui_pg(items, truncate_input, selected);
+
+}
+
+function paginate_header_ui(pages, context){
+ var res = '<h2 class="updatable">';
+ res += ' All ' + context +' (' + pages.total_count + ((pages.filtered_count != pages.total_count) ? ', filtered down to ' + pages.filtered_count : '') + ')';
+ res += '</h2>';
+ return res;
+}
+
+function paginate_ui(pages, context){
+ var res = paginate_header_ui(pages, context);
+ res += '<div class="hider">';
+ res += '<h3>Pagination</h3>';
+ res += '<div class="filter">';
+ res += '<table class="updatable">';
+ res += '<tr>';
+ res += '<th><label for="'+ context +'-page">Page </label> <select id="'+ context +'-page" class="pagination_class pagination_class_select" >';
+ var page = fmt_page_number_request(context, pages.page);
+ if (pages.page_count > 0 && page > pages.page_count){
+ page = pages.page_count;
+ update_pages(context, page);
+ return;
+ };
+ for (var i = 1; i <= pages.page_count; i++) { ;
+ if (i == page) {;
+ res += ' <option selected="selected" value="'+ i + '">' + i + '</option>';
+ } else { ;
+ res += '<option value="' + i + '"> ' + i + '</option>';
+ } };
+ res += '</select> </th>';
+ res += '<th><label for="' + context +'-pageof">of </label> ' + pages.page_count +'</th>';
+ res += '<th><span><label for="'+ context +'-name"> - Filter: </label> <input id="'+ context +'-name" data-page-start="1" class="pagination_class pagination_class_input" type="text"';
+ res += 'value = ' + fmt_filter_name_request(context, "") + '>';
+ res += '</input></th></span>';
+
+ res += '<th> <input type="checkbox" data-page-start="1" class="pagination_class pagination_class_checkbox" id="'+ context +'-filter-regex-mode"' ;
+
+ res += fmt_regex_request(context, "") + '></input> <label for="filter-regex-mode">Regex</label> <span class="help" id="filter-regex"></span></th>' ;
+
+ res +=' </table>' ;
+ res += '<p id="filter-truncate"><span class="updatable">';
+ res += '<span><label for="'+ context +'-pagesize"> Displaying ' + pages.item_count + ' item'+ ((pages.item_count > 1) ? 's' : '' ) + ' , page size up to: </label> ';
+ res += ' <input id="'+ context +'-pagesize" data-page-start="1" class="pagination_class shortinput pagination_class_input" type="text" ';
+ res += 'value = "' + fmt_page_size_request(context, pages.page_size) +'"';
+ res += 'onkeypress = "return isNumberKey(event)"> </input></span></p>';
+ res += '</tr>';
+ res += '</div>';
+ res += '</div>';
+ return res;
+}
+
+
+function maybe_truncate(items) {
+ var maximum = 500;
+ var str = '';
+
+ if (items.length > maximum) {
+ str = '<p class="warning">Only ' + maximum + ' of ' +
+ items.length + ' items are shown.</p>';
+ items.length = maximum;
+ }
+
+ return str;
+}
+
+function fmt_sort(display, sort) {
+ var prefix = '';
+ if (current_sort == sort) {
+ prefix = '<span class="arrow">' +
+ (current_sort_reverse ? '&#9661; ' : '&#9651; ') +
+ '</span>';
+ }
+ return '<a class="sort" sort="' + sort + '">' + prefix + display + '</a>';
+}
+
+function group_count(mode, group, bools) {
+ var count = 0;
+ for (var i = 0; i < bools.length; i++) {
+ if (bools[i]) count++;
+ }
+
+ var options = COLUMNS[mode][group];
+ for (var i = 0; i < options.length; i++) {
+ var column = options[i][0];
+ if (show_column(mode, column)) count++;
+ }
+ return count;
+}
+
+function group_heading(mode, group, bools) {
+ var count = group_count(mode, group, bools);
+ if (count == 0) {
+ return '';
+ }
+ else {
+ return '<th colspan="' + count + '">' + group + '</th>';
+ }
+}
+
+function fmt_permissions(obj, permissions, lookup, show, warning) {
+ var res = [];
+ for (var i in permissions) {
+ var permission = permissions[i];
+ if (permission[lookup] == obj.name) {
+ res.push(fmt_escape_html(permission[show]));
+ }
+ }
+ return res.length == 0 ? warning : res.join(', ');
+}
+
+var radio_id = 0;
+
+function fmt_radio(name, text, value, current) {
+ radio_id++;
+ return '<label class="radio" for="radio-' + radio_id + '">' +
+ '<input type="radio" id="radio-' + radio_id + '" name="' + name +
+ '" value="' + value + '"' +
+ ((value == current) ? ' checked="checked"' : '') +
+ '>' + text + '</label>';
+}
+
+function fmt_checkbox(name, text, current) {
+ return '<label class="checkbox" for="checkbox-' + name + '">' +
+ '<input type="checkbox" id="checkbox-' + name + '" name="' + name +
+ '"' + (current ? ' checked="checked"' : '') + '>' + text + '</label>';
+}
+
+function properties_size(obj) {
+ var count = 0;
+ for (k in obj) {
+ if (obj.hasOwnProperty(k)) count++;
+ }
+ return count;
+}
+
+function stored_value_or_default(template, defaultValue){
+ var stored_value = get_pref(template);
+ var result = (((stored_value == null)
+ || (stored_value == undefined)
+ || (stored_value == '')) ? defaultValue :
+ stored_value);
+
+ return ((result == undefined) ? defaultValue : result);
+}
+
+function fmt_page_number_request(template, defaultPage){
+ if ((defaultPage == undefined) || (defaultPage <= 0)) {
+ defaultPage = 1;
+ }
+ return stored_value_or_default(template + '_current_page_number', defaultPage);
+}
+function fmt_page_size_request(template, defaultPageSize){
+ if ((defaultPageSize == undefined) || (defaultPageSize < 0)) {
+ defaultPageSize = 100;
+ }
+
+ var result = stored_value_or_default(template + '_current_page_size', defaultPageSize);
+ if (result > 500) {
+ // hard limit
+ result = 500;
+ }
+ return result;
+}
+
+function fmt_filter_name_request(template, defaultName){
+ return fmt_escape_html(stored_value_or_default(template + '_current_filter_name', defaultName));
+}
+
+function fmt_regex_request(template, defaultName){
+ return fmt_escape_html(stored_value_or_default(template + '_current_regex', defaultName));
+}
+
+function fmt_vhost_state(vhost){
+ var cluster_state = vhost.cluster_state;
+ var down_nodes = [];
+ var ok_count = 0;
+ var non_ok_count = 0;
+ for(var node in cluster_state){
+ var node_state = cluster_state[node];
+ if(cluster_state[node] == "stopped" || cluster_state[node] == "nodedown"){
+ non_ok_count++;
+ down_nodes.push(node);
+ } else if(cluster_state[node] == "running"){
+ ok_count++;
+ }
+ }
+ if(non_ok_count == 0 ){
+ return fmt_state('green', 'running', '');
+ } else if(non_ok_count > 0 && ok_count == 0){
+ return fmt_state('red', 'stopped', 'Vhost supervisor is not running.');
+ } else if(non_ok_count > 0 && ok_count > 0){
+ return fmt_state('yellow', 'partial', 'Vhost supervisor is stopped on some cluster nodes: ' + down_nodes.join(', '));
+ }
+}
+
+function isNumberKey(evt){
+ var charCode = (evt.which) ? evt.which : event.keyCode;
+ if (charCode > 31 && (charCode < 48 || charCode > 57))
+ return false;
+ return true;
+}
diff --git a/deps/rabbitmq_management/priv/www/js/global.js b/deps/rabbitmq_management/priv/www/js/global.js
new file mode 100644
index 0000000000..730b003d65
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/global.js
@@ -0,0 +1,798 @@
+///////////////////////
+// //
+// Genuine constants //
+// //
+///////////////////////
+
+// Just used below
+function map(list) {
+ var res = {};
+ for (i in list) {
+ res[list[i]] = '';
+ }
+ return res;
+}
+
+// Extension arguments that we know about and present specially in the UI.
+var KNOWN_ARGS = {'alternate-exchange': {'short': 'AE', 'type': 'string'},
+ 'x-message-ttl': {'short': 'TTL', 'type': 'int'},
+ 'x-expires': {'short': 'Exp', 'type': 'int'},
+ 'x-max-length': {'short': 'Lim', 'type': 'int'},
+ 'x-max-length-bytes': {'short': 'Lim B', 'type': 'int'},
+ 'x-delivery-limit': {'short': 'DlL', 'type': 'int'},
+ 'x-overflow': {'short': 'Ovfl', 'type': 'string'},
+ 'x-dead-letter-exchange': {'short': 'DLX', 'type': 'string'},
+ 'x-dead-letter-routing-key': {'short': 'DLK', 'type': 'string'},
+ 'x-queue-master-locator': {'short': 'ML', 'type': 'string'},
+ 'x-max-priority': {'short': 'Pri', 'type': 'int'},
+ 'x-single-active-consumer': {'short': 'SAC', 'type': 'boolean'}};
+
+// Things that are like arguments that we format the same way in listings.
+var IMPLICIT_ARGS = {'durable': {'short': 'D', 'type': 'boolean'},
+ 'auto-delete': {'short': 'AD', 'type': 'boolean'},
+ 'internal': {'short': 'I', 'type': 'boolean'},
+ 'exclusive': {'short': 'Excl', 'type': 'boolean'},
+ 'messages delayed':{'short': 'DM', 'type': 'int'}};
+
+// Both the above
+var ALL_ARGS = {};
+for (var k in IMPLICIT_ARGS) ALL_ARGS[k] = IMPLICIT_ARGS[k];
+for (var k in KNOWN_ARGS) ALL_ARGS[k] = KNOWN_ARGS[k];
+
+var NAVIGATION = {'Overview': ['#/', "management"],
+ 'Connections': ['#/connections', "management"],
+ 'Channels': ['#/channels', "management"],
+ 'Exchanges': ['#/exchanges', "management"],
+ 'Queues': ['#/queues', "management"],
+ 'Admin':
+ [{'Users': ['#/users', "administrator"],
+ 'Virtual Hosts': ['#/vhosts', "administrator"],
+ 'Feature Flags': ['#/feature-flags', "administrator"],
+ 'Policies': ['#/policies', "management"],
+ 'Limits': ['#/limits', "management"],
+ 'Cluster': ['#/cluster-name', "administrator"]},
+ "management"]
+ };
+
+var CHART_RANGES = {'global': [], 'basic': []};
+var ALL_CHART_RANGES = {};
+
+var ALL_COLUMNS =
+ {'exchanges' :
+ {'Overview': [['type', 'Type', true],
+ ['features', 'Features (with policy)', true],
+ ['features_no_policy', 'Features (no policy)', false],
+ ['policy', 'Policy', false]],
+ 'Message rates': [['rate-in', 'rate in', true],
+ ['rate-out', 'rate out', true]]},
+ 'queues' :
+ {'Overview': [['type', 'Type', true],
+ ['features', 'Features (with policy)', true],
+ ['features_no_policy', 'Features (no policy)', false],
+ ['policy', 'Policy', false],
+ ['consumers', 'Consumer count', false],
+ ['consumer_utilisation', 'Consumer utilisation', false],
+ ['state', 'State', true]],
+ 'Messages': [['msgs-ready', 'Ready', true],
+ ['msgs-unacked', 'Unacknowledged', true],
+ ['msgs-ram', 'In memory', false],
+ ['msgs-persistent', 'Persistent', false],
+ ['msgs-total', 'Total', true]],
+ 'Message bytes': [['msg-bytes-ready', 'Ready', false],
+ ['msg-bytes-unacked', 'Unacknowledged', false],
+ ['msg-bytes-ram', 'In memory', false],
+ ['msg-bytes-persistent', 'Persistent', false],
+ ['msg-bytes-total', 'Total', false]],
+ 'Message rates': [['rate-incoming', 'incoming', true],
+ ['rate-deliver', 'deliver / get', true],
+ ['rate-redeliver', 'redelivered', false],
+ ['rate-ack', 'ack', true]]},
+ 'channels' :
+ {'Overview': [['user', 'User name', true],
+ ['mode', 'Mode', true],
+ ['state', 'State', true]],
+ 'Details': [['msgs-unconfirmed', 'Unconfirmed', true],
+ ['prefetch', 'Prefetch', true],
+ ['msgs-unacked', 'Unacked', true]],
+ 'Transactions': [['msgs-uncommitted', 'Msgs uncommitted', false],
+ ['acks-uncommitted', 'Acks uncommitted', false]],
+ 'Message rates': [['rate-publish', 'publish', true],
+ ['rate-confirm', 'confirm', true],
+ ['rate-unroutable-drop', 'unroutable (drop)', true],
+ ['rate-unroutable-return', 'unroutable (return)', false],
+ ['rate-deliver', 'deliver / get', true],
+ ['rate-redeliver', 'redelivered', false],
+ ['rate-ack', 'ack', true]]},
+ 'connections':
+ {'Overview': [['user', 'User name', true],
+ ['state', 'State', true]],
+ 'Details': [['ssl', 'TLS', true],
+ ['ssl_info', 'TLS details', false],
+ ['protocol', 'Protocol', true],
+ ['channels', 'Channels', true],
+ ['channel_max', 'Channel max', false],
+ ['frame_max', 'Frame max', false],
+ ['auth_mechanism', 'Auth mechanism', false],
+ ['client', 'Client', false]],
+ 'Network': [['from_client', 'From client', true],
+ ['to_client', 'To client', true],
+ ['heartbeat', 'Heartbeat', false],
+ ['connected_at', 'Connected at', false]]},
+
+ 'vhosts':
+ {'Overview': [['cluster-state', 'Cluster state', false],
+ ['description', 'Description', false],
+ ['tags', 'Tags', false]],
+ 'Messages': [['msgs-ready', 'Ready', true],
+ ['msgs-unacked', 'Unacknowledged', true],
+ ['msgs-total', 'Total', true]],
+ 'Network': [['from_client', 'From client', true],
+ ['to_client', 'To client', true]],
+ 'Message rates': [['rate-publish', 'publish', true],
+ ['rate-deliver', 'deliver / get', true]]},
+ 'overview':
+ {'Statistics': [['file_descriptors', 'File descriptors', true],
+ ['socket_descriptors', 'Socket descriptors', true],
+ ['erlang_processes', 'Erlang processes', true],
+ ['memory', 'Memory', true],
+ ['disk_space', 'Disk space', true]],
+ 'General': [['uptime', 'Uptime', true],
+ ['info', 'Info', true],
+ ['reset_stats', 'Reset stats', true]]}};
+
+var DISABLED_STATS_COLUMNS =
+ {'exchanges' :
+ {'Overview': [['type', 'Type', true],
+ ['features', 'Features (with policy)', true],
+ ['features_no_policy', 'Features (no policy)', false],
+ ['policy', 'Policy', false]]},
+ 'queues' :
+ {'Overview': [['type', 'Type', true],
+ ['features', 'Features (with policy)', true],
+ ['features_no_policy', 'Features (no policy)', false],
+ ['policy', 'Policy', false],
+ ['state', 'State', true]],
+ 'Messages': [['msgs-ready', 'Ready', true],
+ ['msgs-unacked', 'Unacknowledged', true],
+ ['msgs-ram', 'In memory', false],
+ ['msgs-persistent', 'Persistent', false],
+ ['msgs-total', 'Total', true]]},
+ 'connections':
+ {'Overview': [['user', 'User name', true],
+ ['state', 'State', true]]},
+
+ 'vhosts':
+ {'Overview': [['cluster-state', 'Cluster state', false]]}};
+
+var COLUMNS;
+
+// All help ? popups
+var HELP = {
+ 'delivery-limit':
+ 'The number of allowed unsuccessful delivery attempts. Once a message has been delivered unsucessfully this many times it will be dropped or dead-lettered, depending on the queue configuration.',
+
+ 'exchange-auto-delete':
+ 'If yes, the exchange will delete itself after at least one queue or exchange has been bound to this one, and then all queues or exchanges have been unbound.',
+
+ 'exchange-internal':
+ 'If yes, clients cannot publish to this exchange directly. It can only be used with exchange to exchange bindings.',
+
+ 'exchange-alternate':
+ 'If messages to this exchange cannot otherwise be routed, send them to the alternate exchange named here.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/ae.html">alternate-exchange</a>" argument.)',
+
+ 'queue-message-ttl':
+ 'How long a message published to a queue can live before it is discarded (milliseconds).<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/ttl.html#per-queue-message-ttl">x-message-ttl</a>" argument.)',
+
+ 'queue-expires':
+ 'How long a queue can be unused for before it is automatically deleted (milliseconds).<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/ttl.html#queue-ttl">x-expires</a>" argument.)',
+
+ 'queue-max-length':
+ 'How many (ready) messages a queue can contain before it starts to drop them from its head.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/maxlength.html">x-max-length</a>" argument.)',
+
+ 'queue-max-length-bytes':
+ 'Total body size for ready messages a queue can contain before it starts to drop them from its head.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/maxlength.html">x-max-length-bytes</a>" argument.)',
+
+ 'queue-max-in-memory-length':
+ 'How many (ready) messages a quorum queue can contain in memory before it starts storing them on disk only.<br/>(Sets the x-max-in-memory-length argument.)',
+
+ 'queue-max-in-memory-bytes':
+ 'Total body size for ready messages a quorum queue can contain in memory before it starts storing them on disk only.<br/>(Sets the x-max-in-memory-bytes argument.)',
+
+ 'queue-max-age':
+ 'How long a message published to a stream queue can live before it is discarded.',
+
+ 'queue-max-segment-size':
+ 'Total segment size for stream segments on disk.<br/>(Sets the x-max-segment-size argument.)',
+
+ 'queue-auto-delete':
+ 'If yes, the queue will delete itself after at least one consumer has connected, and then all consumers have disconnected.',
+
+ 'queue-dead-letter-exchange':
+ 'Optional name of an exchange to which messages will be republished if they are rejected or expire.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/dlx.html">x-dead-letter-exchange</a>" argument.)',
+
+ 'queue-dead-letter-routing-key':
+ 'Optional replacement routing key to use when a message is dead-lettered. If this is not set, the message\'s original routing key will be used.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/dlx.html">x-dead-letter-routing-key</a>" argument.)',
+
+ 'queue-single-active-consumer':
+ 'If set, makes sure only one consumer at a time consumes from the queue and fails over to another registered consumer in case the active one is cancelled or dies.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/consumers.html#single-active-consumer">x-single-active-consumer</a>" argument.)',
+
+ 'queue-max-priority':
+ 'Maximum number of priority levels for the queue to support; if not set, the queue will not support message priorities.<br/>(Sets the "<a target="_blank" href="https://rabbitmq.com/priority.html">x-max-priority</a>" argument.)',
+
+ 'queue-max-age':
+ 'Retention policy for stream queues.<br/>(Sets the x-max-age argument.)',
+
+ 'queue-lazy':
+ 'Set the queue into lazy mode, keeping as many messages as possible on disk to reduce RAM usage; if not set, the queue will keep an in-memory cache to deliver messages as fast as possible.<br/>(Sets the "<a target="_blank" href="https://www.rabbitmq.com/lazy-queues.html">x-queue-mode</a>" argument.)',
+
+ 'queue-overflow':
+ 'Sets the <a target="_blank" href="https://www.rabbitmq.com/maxlength.html#overflow-behaviour">queue overflow behaviour</a>. This determines what happens to messages when the maximum length of a queue is reached. Valid values are <code>drop-head</code>, <code>reject-publish</code> or <code>reject-publish-dlx</code>. The quorum queue type only supports <code>drop-head</code> and <code>reject-publish</code>.',
+
+ 'queue-master-locator':
+ 'Set the queue into master location mode, determining the rule by which the queue master is located when declared on a cluster of nodes.<br/>(Sets the "<a target="_blank" href="https://www.rabbitmq.com/ha.html">x-queue-master-locator</a>" argument.)',
+
+ 'queue-leader-locator':
+ 'Set the queue into leader location mode, determining the rule by which the queue leader is located when declared on a cluster of nodes. Valid values are <code>client-local</code>, <code>random</code> and <code>least-leaders</code>.',
+
+ 'queue-initial-cluster-size':
+ 'Set the queue initial cluster size.',
+
+ 'queue-type':
+ 'Set the queue type, determining the type of queue to use: raft-based high availability or classic queue. Valid values are <code>quorum</code> or <code>classic</code>. It defaults to <code>classic<code>. <br/>',
+
+ 'queue-messages':
+ '<p>Message counts.</p><p>Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.</p>',
+
+ 'queue-message-body-bytes':
+ '<p>The sum total of the sizes of the message bodies in this queue. This only counts message bodies; it does not include message properties (including headers) or metadata used by the queue.</p><p>Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.</p><p>If a message is routed to multiple queues on publication, its body will be stored only once (in memory and on disk) and shared between queues. The value shown here does not take account of this effect.</p>',
+
+ 'queue-process-memory':
+ 'Total memory used by this queue process. This does not include in-memory message bodies (which may be shared between queues and will appear in the global "binaries" memory) but does include everything else.',
+
+ 'queue-consumer-utilisation':
+ 'Fraction of the time that the queue is able to immediately deliver messages to consumers. If this number is less than 100% you may be able to deliver messages faster if: \
+ <ul> \
+ <li>There were more consumers or</li> \
+ <li>The consumers were faster or</li> \
+ <li>The consumers had a higher prefetch count</li> \
+ </ul>',
+
+ 'internal-users-only':
+ 'Only users within the internal RabbitMQ database are shown here. Other users (e.g. those authenticated over LDAP) will not appear.',
+
+ 'export-definitions':
+ 'The definitions consist of users, virtual hosts, permissions, parameters, exchanges, queues, policies and bindings. They do not include the contents of queues. Exclusive queues will not be exported.',
+
+ 'export-definitions-vhost':
+ 'The definitions exported for a single virtual host consist of exchanges, queues, bindings and policies.',
+
+ 'import-definitions':
+ 'The definitions that are imported will be merged with the current definitions. If an error occurs during import, any changes made will not be rolled back.',
+
+ 'import-definitions-vhost':
+ 'For a single virtual host, only exchanges, queues, bindings and policies are imported.',
+
+ 'exchange-rates-incoming':
+ 'The incoming rate is the rate at which messages are published directly to this exchange.',
+
+ 'exchange-rates-outgoing':
+ 'The outgoing rate is the rate at which messages enter queues, having been published directly to this exchange.',
+
+ 'channel-mode':
+ 'Channel guarantee mode. Can be one of the following, or neither:<br/> \
+ <dl> \
+ <dt><abbr title="Confirm">C</abbr> &ndash; <a target="_blank" href="https://www.rabbitmq.com/confirms.html">confirm</a></dt> \
+ <dd>Channel will send streaming publish confirmations.</dd> \
+ <dt><abbr title="Transactional">T</abbr> &ndash; <a target="_blank" href="https://www.rabbitmq.com/amqp-0-9-1-reference.html#class.tx">transactional</a></dt> \
+ <dd>Channel is transactional.</dd> \
+ </dl>',
+
+ 'channel-prefetch':
+ 'Channel prefetch counts. \
+ <p> \
+ Each channel can have two prefetch counts: A per-consumer count, which \
+ will limit each new consumer created on the channel, and a global \
+ count, which is shared between all consumers on the channel.\
+ </p> \
+ <p> \
+ This column shows one, the other, or both limits if they are set. \
+ </p>',
+
+ 'file-descriptors':
+ '<p>File descriptor count and limit, as reported by the operating \
+ system. The count includes network sockets and file handles.</p> \
+ <p>To optimize disk access RabbitMQ uses as many free descriptors as are \
+ available, so the count may safely approach the limit. \
+ However, if most of the file descriptors are used by sockets then \
+ persister performance will be negatively impacted.</p> \
+ <p>To change the limit on Unix / Linux, use "ulimit -n". To change \
+ the limit on Windows, set the ERL_MAX_PORTS environment variable</p> \
+ <p>To report used file handles on Windows, handle.exe from \
+ sysinternals must be installed in your path. You can download it \
+ <a target="_blank" href="https://technet.microsoft.com/en-us/sysinternals/bb896655">here</a>.</p>',
+
+ 'socket-descriptors':
+ 'The network sockets count and limit managed by RabbitMQ.<br/> \
+ When the limit is exhausted RabbitMQ will stop accepting new \
+ network connections.',
+
+ 'memory-alarm':
+ '<p>The <a target="_blank" href="https://www.rabbitmq.com/memory.html#memsup">memory \
+ alarm</a> for this node has gone off. It will block \
+ incoming network traffic until the memory usage drops below \
+ the watermark.</p>\
+ <p>Note that the pale line in this case indicates the high watermark \
+ in relation to how much memory is used in total. </p>',
+
+ 'disk-free-alarm':
+ 'The <a target="_blank" href="https://www.rabbitmq.com/memory.html#diskfreesup">disk \
+ free space alarm</a> for this node has gone off. It will block \
+ incoming network traffic until the amount of free space exceeds \
+ the limit.',
+
+ 'message-get-requeue':
+ '<p>Clicking "Get Message(s)" will consume messages from the queue. \
+ If requeue is set the message will be put back into the queue in place, \
+ but "redelivered" will be set on the message.</p> \
+ <p>If requeue is not set messages will be removed from the queue.</p> \
+ <p>Furthermore, message payloads will be truncated to 50000 bytes.</p>',
+
+ 'message-publish-headers':
+ 'Headers can have any name. Only long string headers can be set here.',
+
+ 'message-publish-properties':
+ '<p>You can set other message properties here (delivery mode and headers \
+ are pulled out as the most common cases).</p>\
+ <p>Invalid properties will be ignored. Valid properties are:</p>\
+ <ul>\
+ <li>content_type</li>\
+ <li>content_encoding</li>\
+ <li>priority</li>\
+ <li>correlation_id</li>\
+ <li>reply_to</li>\
+ <li>expiration</li>\
+ <li>message_id</li>\
+ <li>timestamp</li>\
+ <li>type</li>\
+ <li>user_id</li>\
+ <li>app_id</li>\
+ <li>cluster_id</li>\
+ </ul>',
+
+ 'string-base64':
+ '<p>AMQP message payloads can contain any binary content. They can \
+ therefore be difficult to display in a browser. The options here \
+ have the following meanings:</p> \
+ <dl> \
+ <dt>Auto string / base64</dt> \
+ <dd>If the message payload can be interpreted as a string in UTF-8 \
+ encoding, do so. Otherwise return the payload encoded as \
+ base64.</dd> \
+ <dt>base64</dt> \
+ <dd>Return the payload encoded as base64 unconditionally.</dd> \
+ </dl>',
+
+ 'user-tags':
+ 'Comma-separated list of tags to apply to the user. Currently \
+ <a target="_blank" href="https://www.rabbitmq.com/management.html#permissions">supported \
+ by the management plugin</a>: \
+ <dl> \
+ <dt>management</dt> \
+ <dd> \
+ User can access the management plugin \
+ </dd> \
+ <dt>policymaker</dt> \
+ <dd> \
+ User can access the management plugin and manage policies and \
+ parameters for the vhosts they have access to. \
+ </dd> \
+ <dt>monitoring</dt> \
+ <dd> \
+ User can access the management plugin and see all connections and \
+ channels as well as node-related information. \
+ </dd> \
+ <dt>administrator</dt> \
+ <dd> \
+ User can do everything monitoring can do, manage users, \
+ vhosts and permissions, close other user\'s connections, and manage \
+ policies and parameters for all vhosts. \
+ </dd> \
+ </dl> \
+ <p> \
+ Note that you can set any tag here; the links for the above four \
+ tags are just for convenience. \
+ </p>',
+
+ 'queued-messages':
+ '<dl> \
+ <dt>Ready</dt>\
+ <dd>Number of messages that are available to be delivered now.</dd>\
+ <dt>Unacknowledged</dt>\
+ <dd>Number of messages for which the server is waiting for acknowledgement.</dd>\
+ <dt>Total</dt>\
+ <dd>The total of these two numbers.</dd>\
+ </dl>',
+
+ 'message-rates':
+ 'Only rates for which some activity is taking place will be shown.\
+ <dl>\
+ <dt>Publish</dt>\
+ <dd>Rate at which messages are entering the server.</dd>\
+ <dt>Publisher confirm</dt>\
+ <dd>Rate at which the server is confirming publishes.</dd>\
+ <dt>Deliver (manual ack)</dt>\
+ <dd>Rate at which messages are delivered to consumers that use manual acknowledgements.</dd>\
+ <dt>Deliver (auto ack)</dt>\
+ <dd>Rate at which messages are delivered to consumers that use automatic acknowledgements.</dd>\
+ <dt>Consumer ack</dt>\
+ <dd>Rate at which messages are being acknowledged by consumers.</dd>\
+ <dt>Redelivered</dt>\
+ <dd>Rate at which messages with the \'redelivered\' flag set are being delivered. Note that these messages will <b>also</b> be counted in one of the delivery rates above.</dd>\
+ <dt>Get (manual ack)</dt>\
+ <dd>Rate at which messages requiring acknowledgement are being delivered in response to basic.get.</dd>\
+ <dt>Get (auto ack)</dt>\
+ <dd>Rate at which messages not requiring acknowledgement are being delivered in response to basic.get.</dd>\
+ <dt>Get (empty)</dt>\
+ <dd>Rate at which empty queues are hit in response to basic.get.</dd>\
+ <dt>Return</dt>\
+ <dd>Rate at which basic.return is sent to publishers for unroutable messages published with the \'mandatory\' flag set.</dd>\
+ <dt>Disk read</dt>\
+ <dd>Rate at which queues read messages from disk.</dd>\
+ <dt>Disk write</dt>\
+ <dd>Rate at which queues write messages to disk.</dd>\
+ </dl>\
+ <p>\
+ Note that the last two items originate in queues rather than \
+ channels; they may therefore be slightly out of sync with other \
+ statistics.\
+ </p>',
+
+ 'disk-monitoring-no-watermark' : 'There is no <a target="_blank" href="https://www.rabbitmq.com/memory.html#diskfreesup">disk space low watermark</a> set. RabbitMQ will not take any action to avoid running out of disk space.',
+
+ 'resource-counts' : 'Shows total number of objects for all virtual hosts the current user has access to.',
+
+ 'memory-use' : '<p>Note that the memory details shown here are only updated on request - they could be too expensive to calculate every few seconds on a busy server.</p><p><a target="_blank" href="https://www.rabbitmq.com/memory-use.html">Read more</a> on memory use.</p>',
+
+ 'memory-calculation-strategy-breakdown' : '<p>The setting <code>vm_memory_calculation_strategy</code> defines which of the below memory values is used to check if the memory usage reaches the watermark or paging to disk is required.</p><p><a target="_blank" href="https://www.rabbitmq.com/memory-use.html">Read more</a> on memory use.</p>',
+
+ 'memory-calculation-strategy' : '<p>This value can be calculated using different strategies the <code>vm_memory_calculation_strategy</code> config setting.</p><p><a target="_blank" href="https://www.rabbitmq.com/memory-use.html">Read more</a> on memory use.</p>',
+
+ 'binary-use' : '<p>Binary accounting is not exact; binaries are shared between processes (and thus the same binary might be counted in more than one section), and the VM does not allow us to track binaries that are not associated with processes (so some binary use might not appear at all).</p>',
+
+ 'policy-ha-mode' : 'One of <code>all</code> (mirror to all nodes in the cluster), <code>exactly</code> (mirror to a set number of nodes) or <code>nodes</code> (mirror to an explicit list of nodes). If you choose one of the latter two, you must also set <code>ha-params</code>.',
+
+ 'policy-ha-params' : 'Absent if <code>ha-mode</code> is <code>all</code>, a number\
+ if <code>ha-mode</code> is <code>exactly</code>, or a list\
+ of strings if <code>ha-mode</code> is <code>nodes</code>.',
+
+ 'policy-ha-sync-mode' : 'One of <code>manual</code> or <code>automatic</code>. <a target="_blank" href="https://www.rabbitmq.com/ha.html#unsynchronised-mirrors">Learn more</a>',
+
+ 'policy-ha-promote-on-shutdown' : 'One of <code>when-synced</code> or <code>always</code>. <a target="_blank" href="https://www.rabbitmq.com/ha.html#unsynchronised-mirrors">Learn more</a>',
+
+ 'policy-ha-promote-on-failure' : 'One of <code>when-synced</code> or <code>always</code>. <a target="_blank" href="https://www.rabbitmq.com/ha.html#unsynchronised-mirrors">Learn more</a>',
+
+ 'policy-federation-upstream-set' :
+ 'A string; only if the federation plugin is enabled. Chooses the name of a set of upstreams to use with federation, or "all" to use all upstreams. Incompatible with <code>federation-upstream</code>.',
+
+ 'policy-federation-upstream' :
+ 'A string; only if the federation plugin is enabled. Chooses a specific upstream set to use for federation. Incompatible with <code>federation-upstream-set</code>.',
+
+ 'handle-exe' : 'In order to monitor the number of file descriptors in use on Windows, RabbitMQ needs the <a href="https://technet.microsoft.com/en-us/sysinternals/bb896655" target="_blank">handle.exe command line tool from Microsoft</a>. Download it and place it in the path (e.g. in C:\Windows).',
+
+ 'filter-regex' :
+ 'Whether to enable regular expression matching. Both string literals \
+ and regular expressions are matched in a case-insensitive manner.<br/><br/> \
+ (<a href="https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions" target="_blank">Regular expression reference</a>)',
+
+ 'consumer-active' :
+ 'Whether the consumer is active or not, i.e. whether the consumer can get messages from the queue. \
+ When single active consumer is enabled for the queue, only one consumer at a time is active. \
+ When single active consumer is disabled for the queue, consumers are active by default. \
+ For a quorum queue, a consumer can be inactive because its owning node is suspected down. <br/><br/> \
+ (<a href="https://www.rabbitmq.com/consumers.html#active-consumer" target="_blank">Documentation</a>)',
+
+ 'plugins' :
+ 'Note that only plugins which are both explicitly enabled and running are shown here.',
+
+ 'io-operations':
+ 'Rate of I/O operations. Only operations performed by the message \
+ persister are shown here (e.g. changes in the schema data store or writes \
+ to the log files are not shown).\
+ <dl>\
+ <dt>Read</dt>\
+ <dd>Rate at which data is read from the disk.</dd>\
+ <dt>Write</dt>\
+ <dd>Rate at which data is written to the disk.</dd>\
+ <dt>Seek</dt>\
+ <dd>Rate at which the broker switches position while reading or \
+ writing to disk.</dd>\
+ <dt>Sync</dt>\
+ <dd>Rate at which the node invokes <code>fsync()</code> to ensure \
+ data is flushed to disk.</dd>\
+ <dt>Reopen</dt>\
+ <dd>Rate at which the node recycles file handles in order to support \
+ more queues than it has file handles. If this operation is occurring \
+ frequently you may get a performance boost from increasing the number \
+ of file handles available.</dd>\
+ </dl>',
+
+ 'mnesia-transactions':
+ 'Rate at which schema data store transactions are initiated on this node (this node \
+ will also take part in the transactions initiated on other nodes).\
+ <dl>\
+ <dt>RAM only</dt>\
+ <dd>Rate at which RAM-only schema data store transactions take place (e.g. creation or \
+ deletion of transient queues).</dd>\
+ <dt>Disk</dt>\
+ <dd>Rate at which all schema data store transactions take place (e.g. \
+ creation or deletion of durable queues).</dd>\
+ </dl>',
+
+ 'persister-operations-msg':
+ 'Rate at which per-message persister operations take place on this node. See \
+ <a href="https://www.rabbitmq.com/persistence-conf.html" target="_blank">here</a> \
+ for more information on the persister. \
+ <dl>\
+ <dt>QI Journal</dt>\
+ <dd>Rate at which message information (publishes, deliveries and \
+ acknowledgements) is written to queue index journals.</dd>\
+ <dt>Store Read</dt>\
+ <dd>Rate at which messages are read from the message store.</dd>\
+ <dt>Store Write</dt>\
+ <dd>Rate at which messages are written to the message store.</dd>\
+ </dl>',
+
+ 'persister-operations-bulk':
+ 'Rate at which whole-file persister operations take place on this node. See \
+ <a href="https://www.rabbitmq.com/persistence-conf.html" target="_blank">here</a> \
+ for more information on the persister. \
+ <dl>\
+ <dt>QI Read</dt>\
+ <dd>Rate at which queue index segment files are read.</dd>\
+ <dt>QI Write</dt>\
+ <dd>Rate at which queue index segment files are written. </dd>\
+ </dl>',
+
+ 'gc-operations':
+ 'Rate at which garbage collection operations take place on this node.',
+
+ 'gc-bytes':
+ 'Rate at which memory is reclaimed by the garbage collector on this node.',
+
+ 'context-switches-operations':
+ 'Rate at which runtime context switching takes place on this node.',
+
+ 'process-reductions':
+ 'Rate at which reductions take place on this process.',
+
+ 'connection-operations':
+ ' <dl>\
+ <dt>Created</dt>\
+ <dd>Rate at which connections are created.</dd>\
+ <dt>Closed</dt>\
+ <dd>Rate at which connections are closed.</dd>\
+ </dl> ',
+
+ 'channel-operations':
+ ' <dl>\
+ <dt>Created</dt>\
+ <dd>Rate at which channels are created.</dd>\
+ <dt>Closed</dt>\
+ <dd>Rate at which channels are closed.</dd>\
+ </dl> ',
+
+ 'queue-operations':
+ ' <dl>\
+ <dt>Declared</dt>\
+ <dd>Rate at which queues are declared by clients.</dd>\
+ <dt>Created</dt>\
+ <dd>Rate at which queues are created. Declaring a queue that already exists counts for a "Declared" event, but not for a "Created" event. </dd>\
+ <dt>Deleted</dt>\
+ <dd>Rate at which queues are deleted.</dd>\
+ </dl> '
+
+};
+
+///////////////////////////////////////////////////////////////////////////
+// //
+// Mostly constant, typically get set once at startup (or rarely anyway) //
+// //
+///////////////////////////////////////////////////////////////////////////
+
+// All these are to do with hiding UI elements if
+var rates_mode; // ...there are no fine stats
+var user_administrator; // ...user is not an admin
+var is_user_policymaker; // ...user is not a policymaker
+var user_monitor; // ...user cannot monitor
+var nodes_interesting; // ...we are not in a cluster
+var vhosts_interesting; // ...there is only one vhost
+var queue_type;
+var rabbit_versions_interesting; // ...all cluster nodes run the same version
+var disable_stats; // ...disable all stats, management only mode
+
+// Extensions write to this, the dispatcher maker reads it
+var dispatcher_modules = [];
+
+// We need to know when all extension script files have loaded
+var extension_count;
+
+// The dispatcher needs access to the Sammy app
+var app;
+
+// Used for the new exchange form, and to display broken exchange types
+var exchange_types;
+
+// Used for access control
+var user_tags;
+var user;
+
+// Set up the above vars
+function setup_global_vars() {
+ var overview = JSON.parse(sync_get('/overview'));
+ rates_mode = overview.rates_mode;
+ user_tags = expand_user_tags(user.tags.split(","));
+ user_administrator = jQuery.inArray("administrator", user_tags) != -1;
+ is_user_policymaker = jQuery.inArray("policymaker", user_tags) != -1;
+ user_monitor = jQuery.inArray("monitoring", user_tags) != -1;
+ exchange_types = overview.exchange_types.map(function(xt) { return xt.name; });
+
+ cluster_name = fmt_escape_html(overview.cluster_name);
+ $('#logout').before(
+ '<li>Cluster ' + (user_administrator ? '<a href="#/cluster-name">' + cluster_name + '</a>' : cluster_name) + '</li>'
+ );
+
+ user_name = fmt_escape_html(user.name);
+ $('#header #logout').prepend(
+ 'User ' + (user_administrator ? '<a href="#/users/' + user_name + '">' + user_name + '</a>' : user_name)
+ );
+
+ var product = overview.rabbitmq_version;
+ if (overview.product_name && overview.product_version) {
+ product = overview.product_name + ' ' + overview.product_version;
+ }
+
+ $('#versions').html(
+ '<abbr title="Available exchange types: ' + exchange_types.join(", ") + '">' + fmt_escape_html(product) + '</abbr>' +
+ '<abbr title="' + fmt_escape_html(overview.erlang_full_version) + '">Erlang ' + fmt_escape_html(overview.erlang_version) + '</abbr>'
+ );
+ nodes_interesting = false;
+ rabbit_versions_interesting = false;
+ if (user_monitor) {
+ var nodes = JSON.parse(sync_get('/nodes'));
+ if (nodes.length > 1) {
+ nodes_interesting = true;
+ var v = '';
+ for (var i = 0; i < nodes.length; i++) {
+ var v1 = fmt_rabbit_version(nodes[i].applications);
+ if (v1 != 'unknown') {
+ if (v != '' && v != v1) rabbit_versions_interesting = true;
+ v = v1;
+ }
+ }
+ }
+ }
+ vhosts_interesting = JSON.parse(sync_get('/vhosts')).length > 1;
+
+ queue_type = "classic";
+ current_vhost = get_pref('vhost');
+ exchange_types = overview.exchange_types;
+
+ disable_stats = overview.disable_stats;
+ enable_queue_totals = overview.enable_queue_totals;
+ COLUMNS = disable_stats?DISABLED_STATS_COLUMNS:ALL_COLUMNS;
+
+ setup_chart_ranges(overview.sample_retention_policies);
+}
+
+function setup_chart_ranges(srp) {
+ var range_types = ['global', 'basic'];
+ var default_ranges = {
+ 60: ['60|5', 'Last minute'],
+ 600: ['600|5', 'Last ten minutes'],
+ 3600: ['3600|60', 'Last hour'],
+ 28800: ['28800|600', 'Last eight hours'],
+ 86400: ['86400|1800', 'Last day']
+ };
+
+ for (var range in default_ranges) {
+ var data = default_ranges[range];
+ var range = data[0];
+ var desc = data[1];
+ ALL_CHART_RANGES[range] = desc;
+ }
+
+ for (var i = 0; i < range_types.length; ++i) {
+ var range_type = range_types[i];
+ if (srp.hasOwnProperty(range_type)) {
+ var srp_range_types = srp[range_type];
+ var last_minute_added = false;
+ for (var j = 0; j < srp_range_types.length; ++j) {
+ var srp_range = srp_range_types[j];
+ if (default_ranges.hasOwnProperty(srp_range)) {
+ if (srp_range === 60) {
+ last_minute_added = true;
+ }
+ var v = default_ranges[srp_range];
+ CHART_RANGES[range_type].push(v);
+ }
+ }
+ if (!last_minute_added) {
+ var last_minute = default_ranges[60];
+ CHART_RANGES[range_type].unshift(last_minute);
+ }
+ }
+ }
+}
+
+function expand_user_tags(tags) {
+ var new_tags = [];
+ for (var i = 0; i < tags.length; i++) {
+ var tag = tags[i];
+ new_tags.push(tag);
+ switch (tag) { // Note deliberate fall-through
+ case "administrator": new_tags.push("monitoring");
+ new_tags.push("policymaker");
+ case "monitoring": new_tags.push("management");
+ break;
+ case "policymaker": new_tags.push("management");
+ default: break;
+ }
+ }
+ return new_tags;
+}
+
+////////////////////////////////////////////////////
+// //
+// Change frequently (typically every "new page") //
+// //
+////////////////////////////////////////////////////
+
+// Which top level template we're showing
+var current_template;
+
+// Which JSON requests do we need to populate it
+var current_reqs;
+
+// And which of those have yet to return (so we can cancel them when
+// changing current_template).
+var outstanding_reqs = [];
+
+// Which tab is highlighted
+var current_highlight;
+
+// Which vhost are we looking at
+var current_vhost = '';
+
+// What is our current sort order
+var current_sort;
+var current_sort_reverse = false;
+
+var current_filter = '';
+var current_filter_regex_on = false;
+
+var current_filter_regex;
+var current_truncate;
+
+// The timer object for auto-updates, and how often it goes off
+var timer;
+var timer_interval;
+
+// When did we last connect successfully (for the "could not connect" error)
+var last_successful_connect;
+
+// Every 200 updates without user interaction we do a full refresh, to
+// work around memory leaks in browser DOM implementations.
+// TODO: maybe we don't need this any more?
+var update_counter = 0;
+
+// Holds chart data in between writing the div in an ejs and rendering
+// the chart.
+var chart_data = {};
+
+// whenever a UI requests a page that doesn't exist
+// because things were deleted between refreshes
+var last_page_out_of_range_error = 0;
+
+var enable_uaa;
+var uaa_client_id;
+var uaa_location;
diff --git a/deps/rabbitmq_management/priv/www/js/jquery-3.5.1.js b/deps/rabbitmq_management/priv/www/js/jquery-3.5.1.js
new file mode 100644
index 0000000000..50937333b9
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/jquery-3.5.1.js
@@ -0,0 +1,10872 @@
+/*!
+ * jQuery JavaScript Library v3.5.1
+ * https://jquery.com/
+ *
+ * Includes Sizzle.js
+ * https://sizzlejs.com/
+ *
+ * Copyright JS Foundation and other contributors
+ * Released under the MIT license
+ * https://jquery.org/license
+ *
+ * Date: 2020-05-04T22:49Z
+ */
+( function( global, factory ) {
+
+ "use strict";
+
+ if ( typeof module === "object" && typeof module.exports === "object" ) {
+
+ // For CommonJS and CommonJS-like environments where a proper `window`
+ // is present, execute the factory and get jQuery.
+ // For environments that do not have a `window` with a `document`
+ // (such as Node.js), expose a factory as module.exports.
+ // This accentuates the need for the creation of a real `window`.
+ // e.g. var jQuery = require("jquery")(window);
+ // See ticket #14549 for more info.
+ module.exports = global.document ?
+ factory( global, true ) :
+ function( w ) {
+ if ( !w.document ) {
+ throw new Error( "jQuery requires a window with a document" );
+ }
+ return factory( w );
+ };
+ } else {
+ factory( global );
+ }
+
+// Pass this if window is not defined yet
+} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
+
+// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
+// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
+// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
+// enough that all such attempts are guarded in a try block.
+"use strict";
+
+var arr = [];
+
+var getProto = Object.getPrototypeOf;
+
+var slice = arr.slice;
+
+var flat = arr.flat ? function( array ) {
+ return arr.flat.call( array );
+} : function( array ) {
+ return arr.concat.apply( [], array );
+};
+
+
+var push = arr.push;
+
+var indexOf = arr.indexOf;
+
+var class2type = {};
+
+var toString = class2type.toString;
+
+var hasOwn = class2type.hasOwnProperty;
+
+var fnToString = hasOwn.toString;
+
+var ObjectFunctionString = fnToString.call( Object );
+
+var support = {};
+
+var isFunction = function isFunction( obj ) {
+
+ // Support: Chrome <=57, Firefox <=52
+ // In some browsers, typeof returns "function" for HTML <object> elements
+ // (i.e., `typeof document.createElement( "object" ) === "function"`).
+ // We don't want to classify *any* DOM node as a function.
+ return typeof obj === "function" && typeof obj.nodeType !== "number";
+ };
+
+
+var isWindow = function isWindow( obj ) {
+ return obj != null && obj === obj.window;
+ };
+
+
+var document = window.document;
+
+
+
+ var preservedScriptAttributes = {
+ type: true,
+ src: true,
+ nonce: true,
+ noModule: true
+ };
+
+ function DOMEval( code, node, doc ) {
+ doc = doc || document;
+
+ var i, val,
+ script = doc.createElement( "script" );
+
+ script.text = code;
+ if ( node ) {
+ for ( i in preservedScriptAttributes ) {
+
+ // Support: Firefox 64+, Edge 18+
+ // Some browsers don't support the "nonce" property on scripts.
+ // On the other hand, just using `getAttribute` is not enough as
+ // the `nonce` attribute is reset to an empty string whenever it
+ // becomes browsing-context connected.
+ // See https://github.com/whatwg/html/issues/2369
+ // See https://html.spec.whatwg.org/#nonce-attributes
+ // The `node.getAttribute` check was added for the sake of
+ // `jQuery.globalEval` so that it can fake a nonce-containing node
+ // via an object.
+ val = node[ i ] || node.getAttribute && node.getAttribute( i );
+ if ( val ) {
+ script.setAttribute( i, val );
+ }
+ }
+ }
+ doc.head.appendChild( script ).parentNode.removeChild( script );
+ }
+
+
+function toType( obj ) {
+ if ( obj == null ) {
+ return obj + "";
+ }
+
+ // Support: Android <=2.3 only (functionish RegExp)
+ return typeof obj === "object" || typeof obj === "function" ?
+ class2type[ toString.call( obj ) ] || "object" :
+ typeof obj;
+}
+/* global Symbol */
+// Defining this global in .eslintrc.json would create a danger of using the global
+// unguarded in another place, it seems safer to define global only for this module
+
+
+
+var
+ version = "3.5.1",
+
+ // Define a local copy of jQuery
+ jQuery = function( selector, context ) {
+
+ // The jQuery object is actually just the init constructor 'enhanced'
+ // Need init if jQuery is called (just allow error to be thrown if not included)
+ return new jQuery.fn.init( selector, context );
+ };
+
+jQuery.fn = jQuery.prototype = {
+
+ // The current version of jQuery being used
+ jquery: version,
+
+ constructor: jQuery,
+
+ // The default length of a jQuery object is 0
+ length: 0,
+
+ toArray: function() {
+ return slice.call( this );
+ },
+
+ // Get the Nth element in the matched element set OR
+ // Get the whole matched element set as a clean array
+ get: function( num ) {
+
+ // Return all the elements in a clean array
+ if ( num == null ) {
+ return slice.call( this );
+ }
+
+ // Return just the one element from the set
+ return num < 0 ? this[ num + this.length ] : this[ num ];
+ },
+
+ // Take an array of elements and push it onto the stack
+ // (returning the new matched element set)
+ pushStack: function( elems ) {
+
+ // Build a new jQuery matched element set
+ var ret = jQuery.merge( this.constructor(), elems );
+
+ // Add the old object onto the stack (as a reference)
+ ret.prevObject = this;
+
+ // Return the newly-formed element set
+ return ret;
+ },
+
+ // Execute a callback for every element in the matched set.
+ each: function( callback ) {
+ return jQuery.each( this, callback );
+ },
+
+ map: function( callback ) {
+ return this.pushStack( jQuery.map( this, function( elem, i ) {
+ return callback.call( elem, i, elem );
+ } ) );
+ },
+
+ slice: function() {
+ return this.pushStack( slice.apply( this, arguments ) );
+ },
+
+ first: function() {
+ return this.eq( 0 );
+ },
+
+ last: function() {
+ return this.eq( -1 );
+ },
+
+ even: function() {
+ return this.pushStack( jQuery.grep( this, function( _elem, i ) {
+ return ( i + 1 ) % 2;
+ } ) );
+ },
+
+ odd: function() {
+ return this.pushStack( jQuery.grep( this, function( _elem, i ) {
+ return i % 2;
+ } ) );
+ },
+
+ eq: function( i ) {
+ var len = this.length,
+ j = +i + ( i < 0 ? len : 0 );
+ return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] );
+ },
+
+ end: function() {
+ return this.prevObject || this.constructor();
+ },
+
+ // For internal use only.
+ // Behaves like an Array's method, not like a jQuery method.
+ push: push,
+ sort: arr.sort,
+ splice: arr.splice
+};
+
+jQuery.extend = jQuery.fn.extend = function() {
+ var options, name, src, copy, copyIsArray, clone,
+ target = arguments[ 0 ] || {},
+ i = 1,
+ length = arguments.length,
+ deep = false;
+
+ // Handle a deep copy situation
+ if ( typeof target === "boolean" ) {
+ deep = target;
+
+ // Skip the boolean and the target
+ target = arguments[ i ] || {};
+ i++;
+ }
+
+ // Handle case when target is a string or something (possible in deep copy)
+ if ( typeof target !== "object" && !isFunction( target ) ) {
+ target = {};
+ }
+
+ // Extend jQuery itself if only one argument is passed
+ if ( i === length ) {
+ target = this;
+ i--;
+ }
+
+ for ( ; i < length; i++ ) {
+
+ // Only deal with non-null/undefined values
+ if ( ( options = arguments[ i ] ) != null ) {
+
+ // Extend the base object
+ for ( name in options ) {
+ copy = options[ name ];
+
+ // Prevent Object.prototype pollution
+ // Prevent never-ending loop
+ if ( name === "__proto__" || target === copy ) {
+ continue;
+ }
+
+ // Recurse if we're merging plain objects or arrays
+ if ( deep && copy && ( jQuery.isPlainObject( copy ) ||
+ ( copyIsArray = Array.isArray( copy ) ) ) ) {
+ src = target[ name ];
+
+ // Ensure proper type for the source value
+ if ( copyIsArray && !Array.isArray( src ) ) {
+ clone = [];
+ } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) {
+ clone = {};
+ } else {
+ clone = src;
+ }
+ copyIsArray = false;
+
+ // Never move original objects, clone them
+ target[ name ] = jQuery.extend( deep, clone, copy );
+
+ // Don't bring in undefined values
+ } else if ( copy !== undefined ) {
+ target[ name ] = copy;
+ }
+ }
+ }
+ }
+
+ // Return the modified object
+ return target;
+};
+
+jQuery.extend( {
+
+ // Unique for each copy of jQuery on the page
+ expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ),
+
+ // Assume jQuery is ready without the ready module
+ isReady: true,
+
+ error: function( msg ) {
+ throw new Error( msg );
+ },
+
+ noop: function() {},
+
+ isPlainObject: function( obj ) {
+ var proto, Ctor;
+
+ // Detect obvious negatives
+ // Use toString instead of jQuery.type to catch host objects
+ if ( !obj || toString.call( obj ) !== "[object Object]" ) {
+ return false;
+ }
+
+ proto = getProto( obj );
+
+ // Objects with no prototype (e.g., `Object.create( null )`) are plain
+ if ( !proto ) {
+ return true;
+ }
+
+ // Objects with prototype are plain iff they were constructed by a global Object function
+ Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor;
+ return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString;
+ },
+
+ isEmptyObject: function( obj ) {
+ var name;
+
+ for ( name in obj ) {
+ return false;
+ }
+ return true;
+ },
+
+ // Evaluates a script in a provided context; falls back to the global one
+ // if not specified.
+ globalEval: function( code, options, doc ) {
+ DOMEval( code, { nonce: options && options.nonce }, doc );
+ },
+
+ each: function( obj, callback ) {
+ var length, i = 0;
+
+ if ( isArrayLike( obj ) ) {
+ length = obj.length;
+ for ( ; i < length; i++ ) {
+ if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( i in obj ) {
+ if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
+ break;
+ }
+ }
+ }
+
+ return obj;
+ },
+
+ // results is for internal usage only
+ makeArray: function( arr, results ) {
+ var ret = results || [];
+
+ if ( arr != null ) {
+ if ( isArrayLike( Object( arr ) ) ) {
+ jQuery.merge( ret,
+ typeof arr === "string" ?
+ [ arr ] : arr
+ );
+ } else {
+ push.call( ret, arr );
+ }
+ }
+
+ return ret;
+ },
+
+ inArray: function( elem, arr, i ) {
+ return arr == null ? -1 : indexOf.call( arr, elem, i );
+ },
+
+ // Support: Android <=4.0 only, PhantomJS 1 only
+ // push.apply(_, arraylike) throws on ancient WebKit
+ merge: function( first, second ) {
+ var len = +second.length,
+ j = 0,
+ i = first.length;
+
+ for ( ; j < len; j++ ) {
+ first[ i++ ] = second[ j ];
+ }
+
+ first.length = i;
+
+ return first;
+ },
+
+ grep: function( elems, callback, invert ) {
+ var callbackInverse,
+ matches = [],
+ i = 0,
+ length = elems.length,
+ callbackExpect = !invert;
+
+ // Go through the array, only saving the items
+ // that pass the validator function
+ for ( ; i < length; i++ ) {
+ callbackInverse = !callback( elems[ i ], i );
+ if ( callbackInverse !== callbackExpect ) {
+ matches.push( elems[ i ] );
+ }
+ }
+
+ return matches;
+ },
+
+ // arg is for internal usage only
+ map: function( elems, callback, arg ) {
+ var length, value,
+ i = 0,
+ ret = [];
+
+ // Go through the array, translating each of the items to their new values
+ if ( isArrayLike( elems ) ) {
+ length = elems.length;
+ for ( ; i < length; i++ ) {
+ value = callback( elems[ i ], i, arg );
+
+ if ( value != null ) {
+ ret.push( value );
+ }
+ }
+
+ // Go through every key on the object,
+ } else {
+ for ( i in elems ) {
+ value = callback( elems[ i ], i, arg );
+
+ if ( value != null ) {
+ ret.push( value );
+ }
+ }
+ }
+
+ // Flatten any nested arrays
+ return flat( ret );
+ },
+
+ // A global GUID counter for objects
+ guid: 1,
+
+ // jQuery.support is not used in Core but other projects attach their
+ // properties to it so it needs to exist.
+ support: support
+} );
+
+if ( typeof Symbol === "function" ) {
+ jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ];
+}
+
+// Populate the class2type map
+jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ),
+function( _i, name ) {
+ class2type[ "[object " + name + "]" ] = name.toLowerCase();
+} );
+
+function isArrayLike( obj ) {
+
+ // Support: real iOS 8.2 only (not reproducible in simulator)
+ // `in` check used to prevent JIT error (gh-2145)
+ // hasOwn isn't used here due to false negatives
+ // regarding Nodelist length in IE
+ var length = !!obj && "length" in obj && obj.length,
+ type = toType( obj );
+
+ if ( isFunction( obj ) || isWindow( obj ) ) {
+ return false;
+ }
+
+ return type === "array" || length === 0 ||
+ typeof length === "number" && length > 0 && ( length - 1 ) in obj;
+}
+var Sizzle =
+/*!
+ * Sizzle CSS Selector Engine v2.3.5
+ * https://sizzlejs.com/
+ *
+ * Copyright JS Foundation and other contributors
+ * Released under the MIT license
+ * https://js.foundation/
+ *
+ * Date: 2020-03-14
+ */
+( function( window ) {
+var i,
+ support,
+ Expr,
+ getText,
+ isXML,
+ tokenize,
+ compile,
+ select,
+ outermostContext,
+ sortInput,
+ hasDuplicate,
+
+ // Local document vars
+ setDocument,
+ document,
+ docElem,
+ documentIsHTML,
+ rbuggyQSA,
+ rbuggyMatches,
+ matches,
+ contains,
+
+ // Instance-specific data
+ expando = "sizzle" + 1 * new Date(),
+ preferredDoc = window.document,
+ dirruns = 0,
+ done = 0,
+ classCache = createCache(),
+ tokenCache = createCache(),
+ compilerCache = createCache(),
+ nonnativeSelectorCache = createCache(),
+ sortOrder = function( a, b ) {
+ if ( a === b ) {
+ hasDuplicate = true;
+ }
+ return 0;
+ },
+
+ // Instance methods
+ hasOwn = ( {} ).hasOwnProperty,
+ arr = [],
+ pop = arr.pop,
+ pushNative = arr.push,
+ push = arr.push,
+ slice = arr.slice,
+
+ // Use a stripped-down indexOf as it's faster than native
+ // https://jsperf.com/thor-indexof-vs-for/5
+ indexOf = function( list, elem ) {
+ var i = 0,
+ len = list.length;
+ for ( ; i < len; i++ ) {
+ if ( list[ i ] === elem ) {
+ return i;
+ }
+ }
+ return -1;
+ },
+
+ booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" +
+ "ismap|loop|multiple|open|readonly|required|scoped",
+
+ // Regular expressions
+
+ // http://www.w3.org/TR/css3-selectors/#whitespace
+ whitespace = "[\\x20\\t\\r\\n\\f]",
+
+ // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram
+ identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace +
+ "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+",
+
+ // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors
+ attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace +
+
+ // Operator (capture 2)
+ "*([*^$|!~]?=)" + whitespace +
+
+ // "Attribute values must be CSS identifiers [capture 5]
+ // or strings [capture 3 or capture 4]"
+ "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" +
+ whitespace + "*\\]",
+
+ pseudos = ":(" + identifier + ")(?:\\((" +
+
+ // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments:
+ // 1. quoted (capture 3; capture 4 or capture 5)
+ "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" +
+
+ // 2. simple (capture 6)
+ "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" +
+
+ // 3. anything else (capture 2)
+ ".*" +
+ ")\\)|)",
+
+ // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
+ rwhitespace = new RegExp( whitespace + "+", "g" ),
+ rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" +
+ whitespace + "+$", "g" ),
+
+ rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
+ rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace +
+ "*" ),
+ rdescend = new RegExp( whitespace + "|>" ),
+
+ rpseudo = new RegExp( pseudos ),
+ ridentifier = new RegExp( "^" + identifier + "$" ),
+
+ matchExpr = {
+ "ID": new RegExp( "^#(" + identifier + ")" ),
+ "CLASS": new RegExp( "^\\.(" + identifier + ")" ),
+ "TAG": new RegExp( "^(" + identifier + "|[*])" ),
+ "ATTR": new RegExp( "^" + attributes ),
+ "PSEUDO": new RegExp( "^" + pseudos ),
+ "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" +
+ whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" +
+ whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
+ "bool": new RegExp( "^(?:" + booleans + ")$", "i" ),
+
+ // For use in libraries implementing .is()
+ // We use this for POS matching in `select`
+ "needsContext": new RegExp( "^" + whitespace +
+ "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace +
+ "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" )
+ },
+
+ rhtml = /HTML$/i,
+ rinputs = /^(?:input|select|textarea|button)$/i,
+ rheader = /^h\d$/i,
+
+ rnative = /^[^{]+\{\s*\[native \w/,
+
+ // Easily-parseable/retrievable ID or TAG or CLASS selectors
+ rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,
+
+ rsibling = /[+~]/,
+
+ // CSS escapes
+ // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters
+ runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ),
+ funescape = function( escape, nonHex ) {
+ var high = "0x" + escape.slice( 1 ) - 0x10000;
+
+ return nonHex ?
+
+ // Strip the backslash prefix from a non-hex escape sequence
+ nonHex :
+
+ // Replace a hexadecimal escape sequence with the encoded Unicode code point
+ // Support: IE <=11+
+ // For values outside the Basic Multilingual Plane (BMP), manually construct a
+ // surrogate pair
+ high < 0 ?
+ String.fromCharCode( high + 0x10000 ) :
+ String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 );
+ },
+
+ // CSS string/identifier serialization
+ // https://drafts.csswg.org/cssom/#common-serializing-idioms
+ rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,
+ fcssescape = function( ch, asCodePoint ) {
+ if ( asCodePoint ) {
+
+ // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
+ if ( ch === "\0" ) {
+ return "\uFFFD";
+ }
+
+ // Control characters and (dependent upon position) numbers get escaped as code points
+ return ch.slice( 0, -1 ) + "\\" +
+ ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
+ }
+
+ // Other potentially-special ASCII characters get backslash-escaped
+ return "\\" + ch;
+ },
+
+ // Used for iframes
+ // See setDocument()
+ // Removing the function wrapper causes a "Permission Denied"
+ // error in IE
+ unloadHandler = function() {
+ setDocument();
+ },
+
+ inDisabledFieldset = addCombinator(
+ function( elem ) {
+ return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset";
+ },
+ { dir: "parentNode", next: "legend" }
+ );
+
+// Optimize for push.apply( _, NodeList )
+try {
+ push.apply(
+ ( arr = slice.call( preferredDoc.childNodes ) ),
+ preferredDoc.childNodes
+ );
+
+ // Support: Android<4.0
+ // Detect silently failing push.apply
+ // eslint-disable-next-line no-unused-expressions
+ arr[ preferredDoc.childNodes.length ].nodeType;
+} catch ( e ) {
+ push = { apply: arr.length ?
+
+ // Leverage slice if possible
+ function( target, els ) {
+ pushNative.apply( target, slice.call( els ) );
+ } :
+
+ // Support: IE<9
+ // Otherwise append directly
+ function( target, els ) {
+ var j = target.length,
+ i = 0;
+
+ // Can't trust NodeList.length
+ while ( ( target[ j++ ] = els[ i++ ] ) ) {}
+ target.length = j - 1;
+ }
+ };
+}
+
+function Sizzle( selector, context, results, seed ) {
+ var m, i, elem, nid, match, groups, newSelector,
+ newContext = context && context.ownerDocument,
+
+ // nodeType defaults to 9, since context defaults to document
+ nodeType = context ? context.nodeType : 9;
+
+ results = results || [];
+
+ // Return early from calls with invalid selector or context
+ if ( typeof selector !== "string" || !selector ||
+ nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) {
+
+ return results;
+ }
+
+ // Try to shortcut find operations (as opposed to filters) in HTML documents
+ if ( !seed ) {
+ setDocument( context );
+ context = context || document;
+
+ if ( documentIsHTML ) {
+
+ // If the selector is sufficiently simple, try using a "get*By*" DOM method
+ // (excepting DocumentFragment context, where the methods don't exist)
+ if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) {
+
+ // ID selector
+ if ( ( m = match[ 1 ] ) ) {
+
+ // Document context
+ if ( nodeType === 9 ) {
+ if ( ( elem = context.getElementById( m ) ) ) {
+
+ // Support: IE, Opera, Webkit
+ // TODO: identify versions
+ // getElementById can match elements by name instead of ID
+ if ( elem.id === m ) {
+ results.push( elem );
+ return results;
+ }
+ } else {
+ return results;
+ }
+
+ // Element context
+ } else {
+
+ // Support: IE, Opera, Webkit
+ // TODO: identify versions
+ // getElementById can match elements by name instead of ID
+ if ( newContext && ( elem = newContext.getElementById( m ) ) &&
+ contains( context, elem ) &&
+ elem.id === m ) {
+
+ results.push( elem );
+ return results;
+ }
+ }
+
+ // Type selector
+ } else if ( match[ 2 ] ) {
+ push.apply( results, context.getElementsByTagName( selector ) );
+ return results;
+
+ // Class selector
+ } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName &&
+ context.getElementsByClassName ) {
+
+ push.apply( results, context.getElementsByClassName( m ) );
+ return results;
+ }
+ }
+
+ // Take advantage of querySelectorAll
+ if ( support.qsa &&
+ !nonnativeSelectorCache[ selector + " " ] &&
+ ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) &&
+
+ // Support: IE 8 only
+ // Exclude object elements
+ ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) {
+
+ newSelector = selector;
+ newContext = context;
+
+ // qSA considers elements outside a scoping root when evaluating child or
+ // descendant combinators, which is not what we want.
+ // In such cases, we work around the behavior by prefixing every selector in the
+ // list with an ID selector referencing the scope context.
+ // The technique has to be used as well when a leading combinator is used
+ // as such selectors are not recognized by querySelectorAll.
+ // Thanks to Andrew Dupont for this technique.
+ if ( nodeType === 1 &&
+ ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) {
+
+ // Expand context for sibling selectors
+ newContext = rsibling.test( selector ) && testContext( context.parentNode ) ||
+ context;
+
+ // We can use :scope instead of the ID hack if the browser
+ // supports it & if we're not changing the context.
+ if ( newContext !== context || !support.scope ) {
+
+ // Capture the context ID, setting it first if necessary
+ if ( ( nid = context.getAttribute( "id" ) ) ) {
+ nid = nid.replace( rcssescape, fcssescape );
+ } else {
+ context.setAttribute( "id", ( nid = expando ) );
+ }
+ }
+
+ // Prefix every selector in the list
+ groups = tokenize( selector );
+ i = groups.length;
+ while ( i-- ) {
+ groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " +
+ toSelector( groups[ i ] );
+ }
+ newSelector = groups.join( "," );
+ }
+
+ try {
+ push.apply( results,
+ newContext.querySelectorAll( newSelector )
+ );
+ return results;
+ } catch ( qsaError ) {
+ nonnativeSelectorCache( selector, true );
+ } finally {
+ if ( nid === expando ) {
+ context.removeAttribute( "id" );
+ }
+ }
+ }
+ }
+ }
+
+ // All others
+ return select( selector.replace( rtrim, "$1" ), context, results, seed );
+}
+
+/**
+ * Create key-value caches of limited size
+ * @returns {function(string, object)} Returns the Object data after storing it on itself with
+ * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength)
+ * deleting the oldest entry
+ */
+function createCache() {
+ var keys = [];
+
+ function cache( key, value ) {
+
+ // Use (key + " ") to avoid collision with native prototype properties (see Issue #157)
+ if ( keys.push( key + " " ) > Expr.cacheLength ) {
+
+ // Only keep the most recent entries
+ delete cache[ keys.shift() ];
+ }
+ return ( cache[ key + " " ] = value );
+ }
+ return cache;
+}
+
+/**
+ * Mark a function for special use by Sizzle
+ * @param {Function} fn The function to mark
+ */
+function markFunction( fn ) {
+ fn[ expando ] = true;
+ return fn;
+}
+
+/**
+ * Support testing using an element
+ * @param {Function} fn Passed the created element and returns a boolean result
+ */
+function assert( fn ) {
+ var el = document.createElement( "fieldset" );
+
+ try {
+ return !!fn( el );
+ } catch ( e ) {
+ return false;
+ } finally {
+
+ // Remove from its parent by default
+ if ( el.parentNode ) {
+ el.parentNode.removeChild( el );
+ }
+
+ // release memory in IE
+ el = null;
+ }
+}
+
+/**
+ * Adds the same handler for all of the specified attrs
+ * @param {String} attrs Pipe-separated list of attributes
+ * @param {Function} handler The method that will be applied
+ */
+function addHandle( attrs, handler ) {
+ var arr = attrs.split( "|" ),
+ i = arr.length;
+
+ while ( i-- ) {
+ Expr.attrHandle[ arr[ i ] ] = handler;
+ }
+}
+
+/**
+ * Checks document order of two siblings
+ * @param {Element} a
+ * @param {Element} b
+ * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b
+ */
+function siblingCheck( a, b ) {
+ var cur = b && a,
+ diff = cur && a.nodeType === 1 && b.nodeType === 1 &&
+ a.sourceIndex - b.sourceIndex;
+
+ // Use IE sourceIndex if available on both nodes
+ if ( diff ) {
+ return diff;
+ }
+
+ // Check if b follows a
+ if ( cur ) {
+ while ( ( cur = cur.nextSibling ) ) {
+ if ( cur === b ) {
+ return -1;
+ }
+ }
+ }
+
+ return a ? 1 : -1;
+}
+
+/**
+ * Returns a function to use in pseudos for input types
+ * @param {String} type
+ */
+function createInputPseudo( type ) {
+ return function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return name === "input" && elem.type === type;
+ };
+}
+
+/**
+ * Returns a function to use in pseudos for buttons
+ * @param {String} type
+ */
+function createButtonPseudo( type ) {
+ return function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return ( name === "input" || name === "button" ) && elem.type === type;
+ };
+}
+
+/**
+ * Returns a function to use in pseudos for :enabled/:disabled
+ * @param {Boolean} disabled true for :disabled; false for :enabled
+ */
+function createDisabledPseudo( disabled ) {
+
+ // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable
+ return function( elem ) {
+
+ // Only certain elements can match :enabled or :disabled
+ // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled
+ // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled
+ if ( "form" in elem ) {
+
+ // Check for inherited disabledness on relevant non-disabled elements:
+ // * listed form-associated elements in a disabled fieldset
+ // https://html.spec.whatwg.org/multipage/forms.html#category-listed
+ // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled
+ // * option elements in a disabled optgroup
+ // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled
+ // All such elements have a "form" property.
+ if ( elem.parentNode && elem.disabled === false ) {
+
+ // Option elements defer to a parent optgroup if present
+ if ( "label" in elem ) {
+ if ( "label" in elem.parentNode ) {
+ return elem.parentNode.disabled === disabled;
+ } else {
+ return elem.disabled === disabled;
+ }
+ }
+
+ // Support: IE 6 - 11
+ // Use the isDisabled shortcut property to check for disabled fieldset ancestors
+ return elem.isDisabled === disabled ||
+
+ // Where there is no isDisabled, check manually
+ /* jshint -W018 */
+ elem.isDisabled !== !disabled &&
+ inDisabledFieldset( elem ) === disabled;
+ }
+
+ return elem.disabled === disabled;
+
+ // Try to winnow out elements that can't be disabled before trusting the disabled property.
+ // Some victims get caught in our net (label, legend, menu, track), but it shouldn't
+ // even exist on them, let alone have a boolean value.
+ } else if ( "label" in elem ) {
+ return elem.disabled === disabled;
+ }
+
+ // Remaining elements are neither :enabled nor :disabled
+ return false;
+ };
+}
+
+/**
+ * Returns a function to use in pseudos for positionals
+ * @param {Function} fn
+ */
+function createPositionalPseudo( fn ) {
+ return markFunction( function( argument ) {
+ argument = +argument;
+ return markFunction( function( seed, matches ) {
+ var j,
+ matchIndexes = fn( [], seed.length, argument ),
+ i = matchIndexes.length;
+
+ // Match elements found at the specified indexes
+ while ( i-- ) {
+ if ( seed[ ( j = matchIndexes[ i ] ) ] ) {
+ seed[ j ] = !( matches[ j ] = seed[ j ] );
+ }
+ }
+ } );
+ } );
+}
+
+/**
+ * Checks a node for validity as a Sizzle context
+ * @param {Element|Object=} context
+ * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value
+ */
+function testContext( context ) {
+ return context && typeof context.getElementsByTagName !== "undefined" && context;
+}
+
+// Expose support vars for convenience
+support = Sizzle.support = {};
+
+/**
+ * Detects XML nodes
+ * @param {Element|Object} elem An element or a document
+ * @returns {Boolean} True iff elem is a non-HTML XML node
+ */
+isXML = Sizzle.isXML = function( elem ) {
+ var namespace = elem.namespaceURI,
+ docElem = ( elem.ownerDocument || elem ).documentElement;
+
+ // Support: IE <=8
+ // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes
+ // https://bugs.jquery.com/ticket/4833
+ return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" );
+};
+
+/**
+ * Sets document-related variables once based on the current document
+ * @param {Element|Object} [doc] An element or document object to use to set the document
+ * @returns {Object} Returns the current document
+ */
+setDocument = Sizzle.setDocument = function( node ) {
+ var hasCompare, subWindow,
+ doc = node ? node.ownerDocument || node : preferredDoc;
+
+ // Return early if doc is invalid or already selected
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) {
+ return document;
+ }
+
+ // Update global variables
+ document = doc;
+ docElem = document.documentElement;
+ documentIsHTML = !isXML( document );
+
+ // Support: IE 9 - 11+, Edge 12 - 18+
+ // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936)
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( preferredDoc != document &&
+ ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) {
+
+ // Support: IE 11, Edge
+ if ( subWindow.addEventListener ) {
+ subWindow.addEventListener( "unload", unloadHandler, false );
+
+ // Support: IE 9 - 10 only
+ } else if ( subWindow.attachEvent ) {
+ subWindow.attachEvent( "onunload", unloadHandler );
+ }
+ }
+
+ // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only,
+ // Safari 4 - 5 only, Opera <=11.6 - 12.x only
+ // IE/Edge & older browsers don't support the :scope pseudo-class.
+ // Support: Safari 6.0 only
+ // Safari 6.0 supports :scope but it's an alias of :root there.
+ support.scope = assert( function( el ) {
+ docElem.appendChild( el ).appendChild( document.createElement( "div" ) );
+ return typeof el.querySelectorAll !== "undefined" &&
+ !el.querySelectorAll( ":scope fieldset div" ).length;
+ } );
+
+ /* Attributes
+ ---------------------------------------------------------------------- */
+
+ // Support: IE<8
+ // Verify that getAttribute really returns attributes and not properties
+ // (excepting IE8 booleans)
+ support.attributes = assert( function( el ) {
+ el.className = "i";
+ return !el.getAttribute( "className" );
+ } );
+
+ /* getElement(s)By*
+ ---------------------------------------------------------------------- */
+
+ // Check if getElementsByTagName("*") returns only elements
+ support.getElementsByTagName = assert( function( el ) {
+ el.appendChild( document.createComment( "" ) );
+ return !el.getElementsByTagName( "*" ).length;
+ } );
+
+ // Support: IE<9
+ support.getElementsByClassName = rnative.test( document.getElementsByClassName );
+
+ // Support: IE<10
+ // Check if getElementById returns elements by name
+ // The broken getElementById methods don't pick up programmatically-set names,
+ // so use a roundabout getElementsByName test
+ support.getById = assert( function( el ) {
+ docElem.appendChild( el ).id = expando;
+ return !document.getElementsByName || !document.getElementsByName( expando ).length;
+ } );
+
+ // ID filter and find
+ if ( support.getById ) {
+ Expr.filter[ "ID" ] = function( id ) {
+ var attrId = id.replace( runescape, funescape );
+ return function( elem ) {
+ return elem.getAttribute( "id" ) === attrId;
+ };
+ };
+ Expr.find[ "ID" ] = function( id, context ) {
+ if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
+ var elem = context.getElementById( id );
+ return elem ? [ elem ] : [];
+ }
+ };
+ } else {
+ Expr.filter[ "ID" ] = function( id ) {
+ var attrId = id.replace( runescape, funescape );
+ return function( elem ) {
+ var node = typeof elem.getAttributeNode !== "undefined" &&
+ elem.getAttributeNode( "id" );
+ return node && node.value === attrId;
+ };
+ };
+
+ // Support: IE 6 - 7 only
+ // getElementById is not reliable as a find shortcut
+ Expr.find[ "ID" ] = function( id, context ) {
+ if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
+ var node, i, elems,
+ elem = context.getElementById( id );
+
+ if ( elem ) {
+
+ // Verify the id attribute
+ node = elem.getAttributeNode( "id" );
+ if ( node && node.value === id ) {
+ return [ elem ];
+ }
+
+ // Fall back on getElementsByName
+ elems = context.getElementsByName( id );
+ i = 0;
+ while ( ( elem = elems[ i++ ] ) ) {
+ node = elem.getAttributeNode( "id" );
+ if ( node && node.value === id ) {
+ return [ elem ];
+ }
+ }
+ }
+
+ return [];
+ }
+ };
+ }
+
+ // Tag
+ Expr.find[ "TAG" ] = support.getElementsByTagName ?
+ function( tag, context ) {
+ if ( typeof context.getElementsByTagName !== "undefined" ) {
+ return context.getElementsByTagName( tag );
+
+ // DocumentFragment nodes don't have gEBTN
+ } else if ( support.qsa ) {
+ return context.querySelectorAll( tag );
+ }
+ } :
+
+ function( tag, context ) {
+ var elem,
+ tmp = [],
+ i = 0,
+
+ // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too
+ results = context.getElementsByTagName( tag );
+
+ // Filter out possible comments
+ if ( tag === "*" ) {
+ while ( ( elem = results[ i++ ] ) ) {
+ if ( elem.nodeType === 1 ) {
+ tmp.push( elem );
+ }
+ }
+
+ return tmp;
+ }
+ return results;
+ };
+
+ // Class
+ Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) {
+ if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) {
+ return context.getElementsByClassName( className );
+ }
+ };
+
+ /* QSA/matchesSelector
+ ---------------------------------------------------------------------- */
+
+ // QSA and matchesSelector support
+
+ // matchesSelector(:active) reports false when true (IE9/Opera 11.5)
+ rbuggyMatches = [];
+
+ // qSa(:focus) reports false when true (Chrome 21)
+ // We allow this because of a bug in IE8/9 that throws an error
+ // whenever `document.activeElement` is accessed on an iframe
+ // So, we allow :focus to pass through QSA all the time to avoid the IE error
+ // See https://bugs.jquery.com/ticket/13378
+ rbuggyQSA = [];
+
+ if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) {
+
+ // Build QSA regex
+ // Regex strategy adopted from Diego Perini
+ assert( function( el ) {
+
+ var input;
+
+ // Select is set to empty string on purpose
+ // This is to test IE's treatment of not explicitly
+ // setting a boolean content attribute,
+ // since its presence should be enough
+ // https://bugs.jquery.com/ticket/12359
+ docElem.appendChild( el ).innerHTML = "<a id='" + expando + "'></a>" +
+ "<select id='" + expando + "-\r\\' msallowcapture=''>" +
+ "<option selected=''></option></select>";
+
+ // Support: IE8, Opera 11-12.16
+ // Nothing should be selected when empty strings follow ^= or $= or *=
+ // The test attribute must be unknown in Opera but "safe" for WinRT
+ // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section
+ if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) {
+ rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" );
+ }
+
+ // Support: IE8
+ // Boolean attributes and "value" are not treated correctly
+ if ( !el.querySelectorAll( "[selected]" ).length ) {
+ rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" );
+ }
+
+ // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+
+ if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) {
+ rbuggyQSA.push( "~=" );
+ }
+
+ // Support: IE 11+, Edge 15 - 18+
+ // IE 11/Edge don't find elements on a `[name='']` query in some cases.
+ // Adding a temporary attribute to the document before the selection works
+ // around the issue.
+ // Interestingly, IE 10 & older don't seem to have the issue.
+ input = document.createElement( "input" );
+ input.setAttribute( "name", "" );
+ el.appendChild( input );
+ if ( !el.querySelectorAll( "[name='']" ).length ) {
+ rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" +
+ whitespace + "*(?:''|\"\")" );
+ }
+
+ // Webkit/Opera - :checked should return selected option elements
+ // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
+ // IE8 throws error here and will not see later tests
+ if ( !el.querySelectorAll( ":checked" ).length ) {
+ rbuggyQSA.push( ":checked" );
+ }
+
+ // Support: Safari 8+, iOS 8+
+ // https://bugs.webkit.org/show_bug.cgi?id=136851
+ // In-page `selector#id sibling-combinator selector` fails
+ if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) {
+ rbuggyQSA.push( ".#.+[+~]" );
+ }
+
+ // Support: Firefox <=3.6 - 5 only
+ // Old Firefox doesn't throw on a badly-escaped identifier.
+ el.querySelectorAll( "\\\f" );
+ rbuggyQSA.push( "[\\r\\n\\f]" );
+ } );
+
+ assert( function( el ) {
+ el.innerHTML = "<a href='' disabled='disabled'></a>" +
+ "<select disabled='disabled'><option/></select>";
+
+ // Support: Windows 8 Native Apps
+ // The type and name attributes are restricted during .innerHTML assignment
+ var input = document.createElement( "input" );
+ input.setAttribute( "type", "hidden" );
+ el.appendChild( input ).setAttribute( "name", "D" );
+
+ // Support: IE8
+ // Enforce case-sensitivity of name attribute
+ if ( el.querySelectorAll( "[name=d]" ).length ) {
+ rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" );
+ }
+
+ // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
+ // IE8 throws error here and will not see later tests
+ if ( el.querySelectorAll( ":enabled" ).length !== 2 ) {
+ rbuggyQSA.push( ":enabled", ":disabled" );
+ }
+
+ // Support: IE9-11+
+ // IE's :disabled selector does not pick up the children of disabled fieldsets
+ docElem.appendChild( el ).disabled = true;
+ if ( el.querySelectorAll( ":disabled" ).length !== 2 ) {
+ rbuggyQSA.push( ":enabled", ":disabled" );
+ }
+
+ // Support: Opera 10 - 11 only
+ // Opera 10-11 does not throw on post-comma invalid pseudos
+ el.querySelectorAll( "*,:x" );
+ rbuggyQSA.push( ",.*:" );
+ } );
+ }
+
+ if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches ||
+ docElem.webkitMatchesSelector ||
+ docElem.mozMatchesSelector ||
+ docElem.oMatchesSelector ||
+ docElem.msMatchesSelector ) ) ) ) {
+
+ assert( function( el ) {
+
+ // Check to see if it's possible to do matchesSelector
+ // on a disconnected node (IE 9)
+ support.disconnectedMatch = matches.call( el, "*" );
+
+ // This should fail with an exception
+ // Gecko does not error, returns false instead
+ matches.call( el, "[s!='']:x" );
+ rbuggyMatches.push( "!=", pseudos );
+ } );
+ }
+
+ rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) );
+ rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) );
+
+ /* Contains
+ ---------------------------------------------------------------------- */
+ hasCompare = rnative.test( docElem.compareDocumentPosition );
+
+ // Element contains another
+ // Purposefully self-exclusive
+ // As in, an element does not contain itself
+ contains = hasCompare || rnative.test( docElem.contains ) ?
+ function( a, b ) {
+ var adown = a.nodeType === 9 ? a.documentElement : a,
+ bup = b && b.parentNode;
+ return a === bup || !!( bup && bup.nodeType === 1 && (
+ adown.contains ?
+ adown.contains( bup ) :
+ a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16
+ ) );
+ } :
+ function( a, b ) {
+ if ( b ) {
+ while ( ( b = b.parentNode ) ) {
+ if ( b === a ) {
+ return true;
+ }
+ }
+ }
+ return false;
+ };
+
+ /* Sorting
+ ---------------------------------------------------------------------- */
+
+ // Document order sorting
+ sortOrder = hasCompare ?
+ function( a, b ) {
+
+ // Flag for duplicate removal
+ if ( a === b ) {
+ hasDuplicate = true;
+ return 0;
+ }
+
+ // Sort on method existence if only one input has compareDocumentPosition
+ var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
+ if ( compare ) {
+ return compare;
+ }
+
+ // Calculate position if both inputs belong to the same document
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ?
+ a.compareDocumentPosition( b ) :
+
+ // Otherwise we know they are disconnected
+ 1;
+
+ // Disconnected nodes
+ if ( compare & 1 ||
+ ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) {
+
+ // Choose the first element that is related to our preferred document
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( a == document || a.ownerDocument == preferredDoc &&
+ contains( preferredDoc, a ) ) {
+ return -1;
+ }
+
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( b == document || b.ownerDocument == preferredDoc &&
+ contains( preferredDoc, b ) ) {
+ return 1;
+ }
+
+ // Maintain original order
+ return sortInput ?
+ ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
+ 0;
+ }
+
+ return compare & 4 ? -1 : 1;
+ } :
+ function( a, b ) {
+
+ // Exit early if the nodes are identical
+ if ( a === b ) {
+ hasDuplicate = true;
+ return 0;
+ }
+
+ var cur,
+ i = 0,
+ aup = a.parentNode,
+ bup = b.parentNode,
+ ap = [ a ],
+ bp = [ b ];
+
+ // Parentless nodes are either documents or disconnected
+ if ( !aup || !bup ) {
+
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ /* eslint-disable eqeqeq */
+ return a == document ? -1 :
+ b == document ? 1 :
+ /* eslint-enable eqeqeq */
+ aup ? -1 :
+ bup ? 1 :
+ sortInput ?
+ ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
+ 0;
+
+ // If the nodes are siblings, we can do a quick check
+ } else if ( aup === bup ) {
+ return siblingCheck( a, b );
+ }
+
+ // Otherwise we need full lists of their ancestors for comparison
+ cur = a;
+ while ( ( cur = cur.parentNode ) ) {
+ ap.unshift( cur );
+ }
+ cur = b;
+ while ( ( cur = cur.parentNode ) ) {
+ bp.unshift( cur );
+ }
+
+ // Walk down the tree looking for a discrepancy
+ while ( ap[ i ] === bp[ i ] ) {
+ i++;
+ }
+
+ return i ?
+
+ // Do a sibling check if the nodes have a common ancestor
+ siblingCheck( ap[ i ], bp[ i ] ) :
+
+ // Otherwise nodes in our document sort first
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ /* eslint-disable eqeqeq */
+ ap[ i ] == preferredDoc ? -1 :
+ bp[ i ] == preferredDoc ? 1 :
+ /* eslint-enable eqeqeq */
+ 0;
+ };
+
+ return document;
+};
+
+Sizzle.matches = function( expr, elements ) {
+ return Sizzle( expr, null, null, elements );
+};
+
+Sizzle.matchesSelector = function( elem, expr ) {
+ setDocument( elem );
+
+ if ( support.matchesSelector && documentIsHTML &&
+ !nonnativeSelectorCache[ expr + " " ] &&
+ ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) &&
+ ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) {
+
+ try {
+ var ret = matches.call( elem, expr );
+
+ // IE 9's matchesSelector returns false on disconnected nodes
+ if ( ret || support.disconnectedMatch ||
+
+ // As well, disconnected nodes are said to be in a document
+ // fragment in IE 9
+ elem.document && elem.document.nodeType !== 11 ) {
+ return ret;
+ }
+ } catch ( e ) {
+ nonnativeSelectorCache( expr, true );
+ }
+ }
+
+ return Sizzle( expr, document, null, [ elem ] ).length > 0;
+};
+
+Sizzle.contains = function( context, elem ) {
+
+ // Set document vars if needed
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( ( context.ownerDocument || context ) != document ) {
+ setDocument( context );
+ }
+ return contains( context, elem );
+};
+
+Sizzle.attr = function( elem, name ) {
+
+ // Set document vars if needed
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( ( elem.ownerDocument || elem ) != document ) {
+ setDocument( elem );
+ }
+
+ var fn = Expr.attrHandle[ name.toLowerCase() ],
+
+ // Don't get fooled by Object.prototype properties (jQuery #13807)
+ val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ?
+ fn( elem, name, !documentIsHTML ) :
+ undefined;
+
+ return val !== undefined ?
+ val :
+ support.attributes || !documentIsHTML ?
+ elem.getAttribute( name ) :
+ ( val = elem.getAttributeNode( name ) ) && val.specified ?
+ val.value :
+ null;
+};
+
+Sizzle.escape = function( sel ) {
+ return ( sel + "" ).replace( rcssescape, fcssescape );
+};
+
+Sizzle.error = function( msg ) {
+ throw new Error( "Syntax error, unrecognized expression: " + msg );
+};
+
+/**
+ * Document sorting and removing duplicates
+ * @param {ArrayLike} results
+ */
+Sizzle.uniqueSort = function( results ) {
+ var elem,
+ duplicates = [],
+ j = 0,
+ i = 0;
+
+ // Unless we *know* we can detect duplicates, assume their presence
+ hasDuplicate = !support.detectDuplicates;
+ sortInput = !support.sortStable && results.slice( 0 );
+ results.sort( sortOrder );
+
+ if ( hasDuplicate ) {
+ while ( ( elem = results[ i++ ] ) ) {
+ if ( elem === results[ i ] ) {
+ j = duplicates.push( i );
+ }
+ }
+ while ( j-- ) {
+ results.splice( duplicates[ j ], 1 );
+ }
+ }
+
+ // Clear input after sorting to release objects
+ // See https://github.com/jquery/sizzle/pull/225
+ sortInput = null;
+
+ return results;
+};
+
+/**
+ * Utility function for retrieving the text value of an array of DOM nodes
+ * @param {Array|Element} elem
+ */
+getText = Sizzle.getText = function( elem ) {
+ var node,
+ ret = "",
+ i = 0,
+ nodeType = elem.nodeType;
+
+ if ( !nodeType ) {
+
+ // If no nodeType, this is expected to be an array
+ while ( ( node = elem[ i++ ] ) ) {
+
+ // Do not traverse comment nodes
+ ret += getText( node );
+ }
+ } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
+
+ // Use textContent for elements
+ // innerText usage removed for consistency of new lines (jQuery #11153)
+ if ( typeof elem.textContent === "string" ) {
+ return elem.textContent;
+ } else {
+
+ // Traverse its children
+ for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
+ ret += getText( elem );
+ }
+ }
+ } else if ( nodeType === 3 || nodeType === 4 ) {
+ return elem.nodeValue;
+ }
+
+ // Do not include comment or processing instruction nodes
+
+ return ret;
+};
+
+Expr = Sizzle.selectors = {
+
+ // Can be adjusted by the user
+ cacheLength: 50,
+
+ createPseudo: markFunction,
+
+ match: matchExpr,
+
+ attrHandle: {},
+
+ find: {},
+
+ relative: {
+ ">": { dir: "parentNode", first: true },
+ " ": { dir: "parentNode" },
+ "+": { dir: "previousSibling", first: true },
+ "~": { dir: "previousSibling" }
+ },
+
+ preFilter: {
+ "ATTR": function( match ) {
+ match[ 1 ] = match[ 1 ].replace( runescape, funescape );
+
+ // Move the given value to match[3] whether quoted or unquoted
+ match[ 3 ] = ( match[ 3 ] || match[ 4 ] ||
+ match[ 5 ] || "" ).replace( runescape, funescape );
+
+ if ( match[ 2 ] === "~=" ) {
+ match[ 3 ] = " " + match[ 3 ] + " ";
+ }
+
+ return match.slice( 0, 4 );
+ },
+
+ "CHILD": function( match ) {
+
+ /* matches from matchExpr["CHILD"]
+ 1 type (only|nth|...)
+ 2 what (child|of-type)
+ 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
+ 4 xn-component of xn+y argument ([+-]?\d*n|)
+ 5 sign of xn-component
+ 6 x of xn-component
+ 7 sign of y-component
+ 8 y of y-component
+ */
+ match[ 1 ] = match[ 1 ].toLowerCase();
+
+ if ( match[ 1 ].slice( 0, 3 ) === "nth" ) {
+
+ // nth-* requires argument
+ if ( !match[ 3 ] ) {
+ Sizzle.error( match[ 0 ] );
+ }
+
+ // numeric x and y parameters for Expr.filter.CHILD
+ // remember that false/true cast respectively to 0/1
+ match[ 4 ] = +( match[ 4 ] ?
+ match[ 5 ] + ( match[ 6 ] || 1 ) :
+ 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) );
+ match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" );
+
+ // other types prohibit arguments
+ } else if ( match[ 3 ] ) {
+ Sizzle.error( match[ 0 ] );
+ }
+
+ return match;
+ },
+
+ "PSEUDO": function( match ) {
+ var excess,
+ unquoted = !match[ 6 ] && match[ 2 ];
+
+ if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) {
+ return null;
+ }
+
+ // Accept quoted arguments as-is
+ if ( match[ 3 ] ) {
+ match[ 2 ] = match[ 4 ] || match[ 5 ] || "";
+
+ // Strip excess characters from unquoted arguments
+ } else if ( unquoted && rpseudo.test( unquoted ) &&
+
+ // Get excess from tokenize (recursively)
+ ( excess = tokenize( unquoted, true ) ) &&
+
+ // advance to the next closing parenthesis
+ ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) {
+
+ // excess is a negative index
+ match[ 0 ] = match[ 0 ].slice( 0, excess );
+ match[ 2 ] = unquoted.slice( 0, excess );
+ }
+
+ // Return only captures needed by the pseudo filter method (type and argument)
+ return match.slice( 0, 3 );
+ }
+ },
+
+ filter: {
+
+ "TAG": function( nodeNameSelector ) {
+ var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase();
+ return nodeNameSelector === "*" ?
+ function() {
+ return true;
+ } :
+ function( elem ) {
+ return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
+ };
+ },
+
+ "CLASS": function( className ) {
+ var pattern = classCache[ className + " " ];
+
+ return pattern ||
+ ( pattern = new RegExp( "(^|" + whitespace +
+ ")" + className + "(" + whitespace + "|$)" ) ) && classCache(
+ className, function( elem ) {
+ return pattern.test(
+ typeof elem.className === "string" && elem.className ||
+ typeof elem.getAttribute !== "undefined" &&
+ elem.getAttribute( "class" ) ||
+ ""
+ );
+ } );
+ },
+
+ "ATTR": function( name, operator, check ) {
+ return function( elem ) {
+ var result = Sizzle.attr( elem, name );
+
+ if ( result == null ) {
+ return operator === "!=";
+ }
+ if ( !operator ) {
+ return true;
+ }
+
+ result += "";
+
+ /* eslint-disable max-len */
+
+ return operator === "=" ? result === check :
+ operator === "!=" ? result !== check :
+ operator === "^=" ? check && result.indexOf( check ) === 0 :
+ operator === "*=" ? check && result.indexOf( check ) > -1 :
+ operator === "$=" ? check && result.slice( -check.length ) === check :
+ operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 :
+ operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" :
+ false;
+ /* eslint-enable max-len */
+
+ };
+ },
+
+ "CHILD": function( type, what, _argument, first, last ) {
+ var simple = type.slice( 0, 3 ) !== "nth",
+ forward = type.slice( -4 ) !== "last",
+ ofType = what === "of-type";
+
+ return first === 1 && last === 0 ?
+
+ // Shortcut for :nth-*(n)
+ function( elem ) {
+ return !!elem.parentNode;
+ } :
+
+ function( elem, _context, xml ) {
+ var cache, uniqueCache, outerCache, node, nodeIndex, start,
+ dir = simple !== forward ? "nextSibling" : "previousSibling",
+ parent = elem.parentNode,
+ name = ofType && elem.nodeName.toLowerCase(),
+ useCache = !xml && !ofType,
+ diff = false;
+
+ if ( parent ) {
+
+ // :(first|last|only)-(child|of-type)
+ if ( simple ) {
+ while ( dir ) {
+ node = elem;
+ while ( ( node = node[ dir ] ) ) {
+ if ( ofType ?
+ node.nodeName.toLowerCase() === name :
+ node.nodeType === 1 ) {
+
+ return false;
+ }
+ }
+
+ // Reverse direction for :only-* (if we haven't yet done so)
+ start = dir = type === "only" && !start && "nextSibling";
+ }
+ return true;
+ }
+
+ start = [ forward ? parent.firstChild : parent.lastChild ];
+
+ // non-xml :nth-child(...) stores cache data on `parent`
+ if ( forward && useCache ) {
+
+ // Seek `elem` from a previously-cached index
+
+ // ...in a gzip-friendly way
+ node = parent;
+ outerCache = node[ expando ] || ( node[ expando ] = {} );
+
+ // Support: IE <9 only
+ // Defend against cloned attroperties (jQuery gh-1709)
+ uniqueCache = outerCache[ node.uniqueID ] ||
+ ( outerCache[ node.uniqueID ] = {} );
+
+ cache = uniqueCache[ type ] || [];
+ nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
+ diff = nodeIndex && cache[ 2 ];
+ node = nodeIndex && parent.childNodes[ nodeIndex ];
+
+ while ( ( node = ++nodeIndex && node && node[ dir ] ||
+
+ // Fallback to seeking `elem` from the start
+ ( diff = nodeIndex = 0 ) || start.pop() ) ) {
+
+ // When found, cache indexes on `parent` and break
+ if ( node.nodeType === 1 && ++diff && node === elem ) {
+ uniqueCache[ type ] = [ dirruns, nodeIndex, diff ];
+ break;
+ }
+ }
+
+ } else {
+
+ // Use previously-cached element index if available
+ if ( useCache ) {
+
+ // ...in a gzip-friendly way
+ node = elem;
+ outerCache = node[ expando ] || ( node[ expando ] = {} );
+
+ // Support: IE <9 only
+ // Defend against cloned attroperties (jQuery gh-1709)
+ uniqueCache = outerCache[ node.uniqueID ] ||
+ ( outerCache[ node.uniqueID ] = {} );
+
+ cache = uniqueCache[ type ] || [];
+ nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
+ diff = nodeIndex;
+ }
+
+ // xml :nth-child(...)
+ // or :nth-last-child(...) or :nth(-last)?-of-type(...)
+ if ( diff === false ) {
+
+ // Use the same loop as above to seek `elem` from the start
+ while ( ( node = ++nodeIndex && node && node[ dir ] ||
+ ( diff = nodeIndex = 0 ) || start.pop() ) ) {
+
+ if ( ( ofType ?
+ node.nodeName.toLowerCase() === name :
+ node.nodeType === 1 ) &&
+ ++diff ) {
+
+ // Cache the index of each encountered element
+ if ( useCache ) {
+ outerCache = node[ expando ] ||
+ ( node[ expando ] = {} );
+
+ // Support: IE <9 only
+ // Defend against cloned attroperties (jQuery gh-1709)
+ uniqueCache = outerCache[ node.uniqueID ] ||
+ ( outerCache[ node.uniqueID ] = {} );
+
+ uniqueCache[ type ] = [ dirruns, diff ];
+ }
+
+ if ( node === elem ) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Incorporate the offset, then check against cycle size
+ diff -= last;
+ return diff === first || ( diff % first === 0 && diff / first >= 0 );
+ }
+ };
+ },
+
+ "PSEUDO": function( pseudo, argument ) {
+
+ // pseudo-class names are case-insensitive
+ // http://www.w3.org/TR/selectors/#pseudo-classes
+ // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
+ // Remember that setFilters inherits from pseudos
+ var args,
+ fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
+ Sizzle.error( "unsupported pseudo: " + pseudo );
+
+ // The user may use createPseudo to indicate that
+ // arguments are needed to create the filter function
+ // just as Sizzle does
+ if ( fn[ expando ] ) {
+ return fn( argument );
+ }
+
+ // But maintain support for old signatures
+ if ( fn.length > 1 ) {
+ args = [ pseudo, pseudo, "", argument ];
+ return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
+ markFunction( function( seed, matches ) {
+ var idx,
+ matched = fn( seed, argument ),
+ i = matched.length;
+ while ( i-- ) {
+ idx = indexOf( seed, matched[ i ] );
+ seed[ idx ] = !( matches[ idx ] = matched[ i ] );
+ }
+ } ) :
+ function( elem ) {
+ return fn( elem, 0, args );
+ };
+ }
+
+ return fn;
+ }
+ },
+
+ pseudos: {
+
+ // Potentially complex pseudos
+ "not": markFunction( function( selector ) {
+
+ // Trim the selector passed to compile
+ // to avoid treating leading and trailing
+ // spaces as combinators
+ var input = [],
+ results = [],
+ matcher = compile( selector.replace( rtrim, "$1" ) );
+
+ return matcher[ expando ] ?
+ markFunction( function( seed, matches, _context, xml ) {
+ var elem,
+ unmatched = matcher( seed, null, xml, [] ),
+ i = seed.length;
+
+ // Match elements unmatched by `matcher`
+ while ( i-- ) {
+ if ( ( elem = unmatched[ i ] ) ) {
+ seed[ i ] = !( matches[ i ] = elem );
+ }
+ }
+ } ) :
+ function( elem, _context, xml ) {
+ input[ 0 ] = elem;
+ matcher( input, null, xml, results );
+
+ // Don't keep the element (issue #299)
+ input[ 0 ] = null;
+ return !results.pop();
+ };
+ } ),
+
+ "has": markFunction( function( selector ) {
+ return function( elem ) {
+ return Sizzle( selector, elem ).length > 0;
+ };
+ } ),
+
+ "contains": markFunction( function( text ) {
+ text = text.replace( runescape, funescape );
+ return function( elem ) {
+ return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1;
+ };
+ } ),
+
+ // "Whether an element is represented by a :lang() selector
+ // is based solely on the element's language value
+ // being equal to the identifier C,
+ // or beginning with the identifier C immediately followed by "-".
+ // The matching of C against the element's language value is performed case-insensitively.
+ // The identifier C does not have to be a valid language name."
+ // http://www.w3.org/TR/selectors/#lang-pseudo
+ "lang": markFunction( function( lang ) {
+
+ // lang value must be a valid identifier
+ if ( !ridentifier.test( lang || "" ) ) {
+ Sizzle.error( "unsupported lang: " + lang );
+ }
+ lang = lang.replace( runescape, funescape ).toLowerCase();
+ return function( elem ) {
+ var elemLang;
+ do {
+ if ( ( elemLang = documentIsHTML ?
+ elem.lang :
+ elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) {
+
+ elemLang = elemLang.toLowerCase();
+ return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0;
+ }
+ } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 );
+ return false;
+ };
+ } ),
+
+ // Miscellaneous
+ "target": function( elem ) {
+ var hash = window.location && window.location.hash;
+ return hash && hash.slice( 1 ) === elem.id;
+ },
+
+ "root": function( elem ) {
+ return elem === docElem;
+ },
+
+ "focus": function( elem ) {
+ return elem === document.activeElement &&
+ ( !document.hasFocus || document.hasFocus() ) &&
+ !!( elem.type || elem.href || ~elem.tabIndex );
+ },
+
+ // Boolean properties
+ "enabled": createDisabledPseudo( false ),
+ "disabled": createDisabledPseudo( true ),
+
+ "checked": function( elem ) {
+
+ // In CSS3, :checked should return both checked and selected elements
+ // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
+ var nodeName = elem.nodeName.toLowerCase();
+ return ( nodeName === "input" && !!elem.checked ) ||
+ ( nodeName === "option" && !!elem.selected );
+ },
+
+ "selected": function( elem ) {
+
+ // Accessing this property makes selected-by-default
+ // options in Safari work properly
+ if ( elem.parentNode ) {
+ // eslint-disable-next-line no-unused-expressions
+ elem.parentNode.selectedIndex;
+ }
+
+ return elem.selected === true;
+ },
+
+ // Contents
+ "empty": function( elem ) {
+
+ // http://www.w3.org/TR/selectors/#empty-pseudo
+ // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5),
+ // but not by others (comment: 8; processing instruction: 7; etc.)
+ // nodeType < 6 works because attributes (2) do not appear as children
+ for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
+ if ( elem.nodeType < 6 ) {
+ return false;
+ }
+ }
+ return true;
+ },
+
+ "parent": function( elem ) {
+ return !Expr.pseudos[ "empty" ]( elem );
+ },
+
+ // Element/input types
+ "header": function( elem ) {
+ return rheader.test( elem.nodeName );
+ },
+
+ "input": function( elem ) {
+ return rinputs.test( elem.nodeName );
+ },
+
+ "button": function( elem ) {
+ var name = elem.nodeName.toLowerCase();
+ return name === "input" && elem.type === "button" || name === "button";
+ },
+
+ "text": function( elem ) {
+ var attr;
+ return elem.nodeName.toLowerCase() === "input" &&
+ elem.type === "text" &&
+
+ // Support: IE<8
+ // New HTML5 attribute values (e.g., "search") appear with elem.type === "text"
+ ( ( attr = elem.getAttribute( "type" ) ) == null ||
+ attr.toLowerCase() === "text" );
+ },
+
+ // Position-in-collection
+ "first": createPositionalPseudo( function() {
+ return [ 0 ];
+ } ),
+
+ "last": createPositionalPseudo( function( _matchIndexes, length ) {
+ return [ length - 1 ];
+ } ),
+
+ "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) {
+ return [ argument < 0 ? argument + length : argument ];
+ } ),
+
+ "even": createPositionalPseudo( function( matchIndexes, length ) {
+ var i = 0;
+ for ( ; i < length; i += 2 ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ } ),
+
+ "odd": createPositionalPseudo( function( matchIndexes, length ) {
+ var i = 1;
+ for ( ; i < length; i += 2 ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ } ),
+
+ "lt": createPositionalPseudo( function( matchIndexes, length, argument ) {
+ var i = argument < 0 ?
+ argument + length :
+ argument > length ?
+ length :
+ argument;
+ for ( ; --i >= 0; ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ } ),
+
+ "gt": createPositionalPseudo( function( matchIndexes, length, argument ) {
+ var i = argument < 0 ? argument + length : argument;
+ for ( ; ++i < length; ) {
+ matchIndexes.push( i );
+ }
+ return matchIndexes;
+ } )
+ }
+};
+
+Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ];
+
+// Add button/input type pseudos
+for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) {
+ Expr.pseudos[ i ] = createInputPseudo( i );
+}
+for ( i in { submit: true, reset: true } ) {
+ Expr.pseudos[ i ] = createButtonPseudo( i );
+}
+
+// Easy API for creating new setFilters
+function setFilters() {}
+setFilters.prototype = Expr.filters = Expr.pseudos;
+Expr.setFilters = new setFilters();
+
+tokenize = Sizzle.tokenize = function( selector, parseOnly ) {
+ var matched, match, tokens, type,
+ soFar, groups, preFilters,
+ cached = tokenCache[ selector + " " ];
+
+ if ( cached ) {
+ return parseOnly ? 0 : cached.slice( 0 );
+ }
+
+ soFar = selector;
+ groups = [];
+ preFilters = Expr.preFilter;
+
+ while ( soFar ) {
+
+ // Comma and first run
+ if ( !matched || ( match = rcomma.exec( soFar ) ) ) {
+ if ( match ) {
+
+ // Don't consume trailing commas as valid
+ soFar = soFar.slice( match[ 0 ].length ) || soFar;
+ }
+ groups.push( ( tokens = [] ) );
+ }
+
+ matched = false;
+
+ // Combinators
+ if ( ( match = rcombinators.exec( soFar ) ) ) {
+ matched = match.shift();
+ tokens.push( {
+ value: matched,
+
+ // Cast descendant combinators to space
+ type: match[ 0 ].replace( rtrim, " " )
+ } );
+ soFar = soFar.slice( matched.length );
+ }
+
+ // Filters
+ for ( type in Expr.filter ) {
+ if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] ||
+ ( match = preFilters[ type ]( match ) ) ) ) {
+ matched = match.shift();
+ tokens.push( {
+ value: matched,
+ type: type,
+ matches: match
+ } );
+ soFar = soFar.slice( matched.length );
+ }
+ }
+
+ if ( !matched ) {
+ break;
+ }
+ }
+
+ // Return the length of the invalid excess
+ // if we're just parsing
+ // Otherwise, throw an error or return tokens
+ return parseOnly ?
+ soFar.length :
+ soFar ?
+ Sizzle.error( selector ) :
+
+ // Cache the tokens
+ tokenCache( selector, groups ).slice( 0 );
+};
+
+function toSelector( tokens ) {
+ var i = 0,
+ len = tokens.length,
+ selector = "";
+ for ( ; i < len; i++ ) {
+ selector += tokens[ i ].value;
+ }
+ return selector;
+}
+
+function addCombinator( matcher, combinator, base ) {
+ var dir = combinator.dir,
+ skip = combinator.next,
+ key = skip || dir,
+ checkNonElements = base && key === "parentNode",
+ doneName = done++;
+
+ return combinator.first ?
+
+ // Check against closest ancestor/preceding element
+ function( elem, context, xml ) {
+ while ( ( elem = elem[ dir ] ) ) {
+ if ( elem.nodeType === 1 || checkNonElements ) {
+ return matcher( elem, context, xml );
+ }
+ }
+ return false;
+ } :
+
+ // Check against all ancestor/preceding elements
+ function( elem, context, xml ) {
+ var oldCache, uniqueCache, outerCache,
+ newCache = [ dirruns, doneName ];
+
+ // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching
+ if ( xml ) {
+ while ( ( elem = elem[ dir ] ) ) {
+ if ( elem.nodeType === 1 || checkNonElements ) {
+ if ( matcher( elem, context, xml ) ) {
+ return true;
+ }
+ }
+ }
+ } else {
+ while ( ( elem = elem[ dir ] ) ) {
+ if ( elem.nodeType === 1 || checkNonElements ) {
+ outerCache = elem[ expando ] || ( elem[ expando ] = {} );
+
+ // Support: IE <9 only
+ // Defend against cloned attroperties (jQuery gh-1709)
+ uniqueCache = outerCache[ elem.uniqueID ] ||
+ ( outerCache[ elem.uniqueID ] = {} );
+
+ if ( skip && skip === elem.nodeName.toLowerCase() ) {
+ elem = elem[ dir ] || elem;
+ } else if ( ( oldCache = uniqueCache[ key ] ) &&
+ oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) {
+
+ // Assign to newCache so results back-propagate to previous elements
+ return ( newCache[ 2 ] = oldCache[ 2 ] );
+ } else {
+
+ // Reuse newcache so results back-propagate to previous elements
+ uniqueCache[ key ] = newCache;
+
+ // A match means we're done; a fail means we have to keep checking
+ if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+ };
+}
+
+function elementMatcher( matchers ) {
+ return matchers.length > 1 ?
+ function( elem, context, xml ) {
+ var i = matchers.length;
+ while ( i-- ) {
+ if ( !matchers[ i ]( elem, context, xml ) ) {
+ return false;
+ }
+ }
+ return true;
+ } :
+ matchers[ 0 ];
+}
+
+function multipleContexts( selector, contexts, results ) {
+ var i = 0,
+ len = contexts.length;
+ for ( ; i < len; i++ ) {
+ Sizzle( selector, contexts[ i ], results );
+ }
+ return results;
+}
+
+function condense( unmatched, map, filter, context, xml ) {
+ var elem,
+ newUnmatched = [],
+ i = 0,
+ len = unmatched.length,
+ mapped = map != null;
+
+ for ( ; i < len; i++ ) {
+ if ( ( elem = unmatched[ i ] ) ) {
+ if ( !filter || filter( elem, context, xml ) ) {
+ newUnmatched.push( elem );
+ if ( mapped ) {
+ map.push( i );
+ }
+ }
+ }
+ }
+
+ return newUnmatched;
+}
+
+function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
+ if ( postFilter && !postFilter[ expando ] ) {
+ postFilter = setMatcher( postFilter );
+ }
+ if ( postFinder && !postFinder[ expando ] ) {
+ postFinder = setMatcher( postFinder, postSelector );
+ }
+ return markFunction( function( seed, results, context, xml ) {
+ var temp, i, elem,
+ preMap = [],
+ postMap = [],
+ preexisting = results.length,
+
+ // Get initial elements from seed or context
+ elems = seed || multipleContexts(
+ selector || "*",
+ context.nodeType ? [ context ] : context,
+ []
+ ),
+
+ // Prefilter to get matcher input, preserving a map for seed-results synchronization
+ matcherIn = preFilter && ( seed || !selector ) ?
+ condense( elems, preMap, preFilter, context, xml ) :
+ elems,
+
+ matcherOut = matcher ?
+
+ // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
+ postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
+
+ // ...intermediate processing is necessary
+ [] :
+
+ // ...otherwise use results directly
+ results :
+ matcherIn;
+
+ // Find primary matches
+ if ( matcher ) {
+ matcher( matcherIn, matcherOut, context, xml );
+ }
+
+ // Apply postFilter
+ if ( postFilter ) {
+ temp = condense( matcherOut, postMap );
+ postFilter( temp, [], context, xml );
+
+ // Un-match failing elements by moving them back to matcherIn
+ i = temp.length;
+ while ( i-- ) {
+ if ( ( elem = temp[ i ] ) ) {
+ matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem );
+ }
+ }
+ }
+
+ if ( seed ) {
+ if ( postFinder || preFilter ) {
+ if ( postFinder ) {
+
+ // Get the final matcherOut by condensing this intermediate into postFinder contexts
+ temp = [];
+ i = matcherOut.length;
+ while ( i-- ) {
+ if ( ( elem = matcherOut[ i ] ) ) {
+
+ // Restore matcherIn since elem is not yet a final match
+ temp.push( ( matcherIn[ i ] = elem ) );
+ }
+ }
+ postFinder( null, ( matcherOut = [] ), temp, xml );
+ }
+
+ // Move matched elements from seed to results to keep them synchronized
+ i = matcherOut.length;
+ while ( i-- ) {
+ if ( ( elem = matcherOut[ i ] ) &&
+ ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) {
+
+ seed[ temp ] = !( results[ temp ] = elem );
+ }
+ }
+ }
+
+ // Add elements to results, through postFinder if defined
+ } else {
+ matcherOut = condense(
+ matcherOut === results ?
+ matcherOut.splice( preexisting, matcherOut.length ) :
+ matcherOut
+ );
+ if ( postFinder ) {
+ postFinder( null, results, matcherOut, xml );
+ } else {
+ push.apply( results, matcherOut );
+ }
+ }
+ } );
+}
+
+function matcherFromTokens( tokens ) {
+ var checkContext, matcher, j,
+ len = tokens.length,
+ leadingRelative = Expr.relative[ tokens[ 0 ].type ],
+ implicitRelative = leadingRelative || Expr.relative[ " " ],
+ i = leadingRelative ? 1 : 0,
+
+ // The foundational matcher ensures that elements are reachable from top-level context(s)
+ matchContext = addCombinator( function( elem ) {
+ return elem === checkContext;
+ }, implicitRelative, true ),
+ matchAnyContext = addCombinator( function( elem ) {
+ return indexOf( checkContext, elem ) > -1;
+ }, implicitRelative, true ),
+ matchers = [ function( elem, context, xml ) {
+ var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
+ ( checkContext = context ).nodeType ?
+ matchContext( elem, context, xml ) :
+ matchAnyContext( elem, context, xml ) );
+
+ // Avoid hanging onto element (issue #299)
+ checkContext = null;
+ return ret;
+ } ];
+
+ for ( ; i < len; i++ ) {
+ if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) {
+ matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ];
+ } else {
+ matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches );
+
+ // Return special upon seeing a positional matcher
+ if ( matcher[ expando ] ) {
+
+ // Find the next relative operator (if any) for proper handling
+ j = ++i;
+ for ( ; j < len; j++ ) {
+ if ( Expr.relative[ tokens[ j ].type ] ) {
+ break;
+ }
+ }
+ return setMatcher(
+ i > 1 && elementMatcher( matchers ),
+ i > 1 && toSelector(
+
+ // If the preceding token was a descendant combinator, insert an implicit any-element `*`
+ tokens
+ .slice( 0, i - 1 )
+ .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } )
+ ).replace( rtrim, "$1" ),
+ matcher,
+ i < j && matcherFromTokens( tokens.slice( i, j ) ),
+ j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ),
+ j < len && toSelector( tokens )
+ );
+ }
+ matchers.push( matcher );
+ }
+ }
+
+ return elementMatcher( matchers );
+}
+
+function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
+ var bySet = setMatchers.length > 0,
+ byElement = elementMatchers.length > 0,
+ superMatcher = function( seed, context, xml, results, outermost ) {
+ var elem, j, matcher,
+ matchedCount = 0,
+ i = "0",
+ unmatched = seed && [],
+ setMatched = [],
+ contextBackup = outermostContext,
+
+ // We must always have either seed elements or outermost context
+ elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ),
+
+ // Use integer dirruns iff this is the outermost matcher
+ dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ),
+ len = elems.length;
+
+ if ( outermost ) {
+
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ outermostContext = context == document || context || outermost;
+ }
+
+ // Add elements passing elementMatchers directly to results
+ // Support: IE<9, Safari
+ // Tolerate NodeList properties (IE: "length"; Safari: <number>) matching elements by id
+ for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) {
+ if ( byElement && elem ) {
+ j = 0;
+
+ // Support: IE 11+, Edge 17 - 18+
+ // IE/Edge sometimes throw a "Permission denied" error when strict-comparing
+ // two documents; shallow comparisons work.
+ // eslint-disable-next-line eqeqeq
+ if ( !context && elem.ownerDocument != document ) {
+ setDocument( elem );
+ xml = !documentIsHTML;
+ }
+ while ( ( matcher = elementMatchers[ j++ ] ) ) {
+ if ( matcher( elem, context || document, xml ) ) {
+ results.push( elem );
+ break;
+ }
+ }
+ if ( outermost ) {
+ dirruns = dirrunsUnique;
+ }
+ }
+
+ // Track unmatched elements for set filters
+ if ( bySet ) {
+
+ // They will have gone through all possible matchers
+ if ( ( elem = !matcher && elem ) ) {
+ matchedCount--;
+ }
+
+ // Lengthen the array for every element, matched or not
+ if ( seed ) {
+ unmatched.push( elem );
+ }
+ }
+ }
+
+ // `i` is now the count of elements visited above, and adding it to `matchedCount`
+ // makes the latter nonnegative.
+ matchedCount += i;
+
+ // Apply set filters to unmatched elements
+ // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount`
+ // equals `i`), unless we didn't visit _any_ elements in the above loop because we have
+ // no element matchers and no seed.
+ // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that
+ // case, which will result in a "00" `matchedCount` that differs from `i` but is also
+ // numerically zero.
+ if ( bySet && i !== matchedCount ) {
+ j = 0;
+ while ( ( matcher = setMatchers[ j++ ] ) ) {
+ matcher( unmatched, setMatched, context, xml );
+ }
+
+ if ( seed ) {
+
+ // Reintegrate element matches to eliminate the need for sorting
+ if ( matchedCount > 0 ) {
+ while ( i-- ) {
+ if ( !( unmatched[ i ] || setMatched[ i ] ) ) {
+ setMatched[ i ] = pop.call( results );
+ }
+ }
+ }
+
+ // Discard index placeholder values to get only actual matches
+ setMatched = condense( setMatched );
+ }
+
+ // Add matches to results
+ push.apply( results, setMatched );
+
+ // Seedless set matches succeeding multiple successful matchers stipulate sorting
+ if ( outermost && !seed && setMatched.length > 0 &&
+ ( matchedCount + setMatchers.length ) > 1 ) {
+
+ Sizzle.uniqueSort( results );
+ }
+ }
+
+ // Override manipulation of globals by nested matchers
+ if ( outermost ) {
+ dirruns = dirrunsUnique;
+ outermostContext = contextBackup;
+ }
+
+ return unmatched;
+ };
+
+ return bySet ?
+ markFunction( superMatcher ) :
+ superMatcher;
+}
+
+compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) {
+ var i,
+ setMatchers = [],
+ elementMatchers = [],
+ cached = compilerCache[ selector + " " ];
+
+ if ( !cached ) {
+
+ // Generate a function of recursive functions that can be used to check each element
+ if ( !match ) {
+ match = tokenize( selector );
+ }
+ i = match.length;
+ while ( i-- ) {
+ cached = matcherFromTokens( match[ i ] );
+ if ( cached[ expando ] ) {
+ setMatchers.push( cached );
+ } else {
+ elementMatchers.push( cached );
+ }
+ }
+
+ // Cache the compiled function
+ cached = compilerCache(
+ selector,
+ matcherFromGroupMatchers( elementMatchers, setMatchers )
+ );
+
+ // Save selector and tokenization
+ cached.selector = selector;
+ }
+ return cached;
+};
+
+/**
+ * A low-level selection function that works with Sizzle's compiled
+ * selector functions
+ * @param {String|Function} selector A selector or a pre-compiled
+ * selector function built with Sizzle.compile
+ * @param {Element} context
+ * @param {Array} [results]
+ * @param {Array} [seed] A set of elements to match against
+ */
+select = Sizzle.select = function( selector, context, results, seed ) {
+ var i, tokens, token, type, find,
+ compiled = typeof selector === "function" && selector,
+ match = !seed && tokenize( ( selector = compiled.selector || selector ) );
+
+ results = results || [];
+
+ // Try to minimize operations if there is only one selector in the list and no seed
+ // (the latter of which guarantees us context)
+ if ( match.length === 1 ) {
+
+ // Reduce context if the leading compound selector is an ID
+ tokens = match[ 0 ] = match[ 0 ].slice( 0 );
+ if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" &&
+ context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) {
+
+ context = ( Expr.find[ "ID" ]( token.matches[ 0 ]
+ .replace( runescape, funescape ), context ) || [] )[ 0 ];
+ if ( !context ) {
+ return results;
+
+ // Precompiled matchers will still verify ancestry, so step up a level
+ } else if ( compiled ) {
+ context = context.parentNode;
+ }
+
+ selector = selector.slice( tokens.shift().value.length );
+ }
+
+ // Fetch a seed set for right-to-left matching
+ i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length;
+ while ( i-- ) {
+ token = tokens[ i ];
+
+ // Abort if we hit a combinator
+ if ( Expr.relative[ ( type = token.type ) ] ) {
+ break;
+ }
+ if ( ( find = Expr.find[ type ] ) ) {
+
+ // Search, expanding context for leading sibling combinators
+ if ( ( seed = find(
+ token.matches[ 0 ].replace( runescape, funescape ),
+ rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) ||
+ context
+ ) ) ) {
+
+ // If seed is empty or no tokens remain, we can return early
+ tokens.splice( i, 1 );
+ selector = seed.length && toSelector( tokens );
+ if ( !selector ) {
+ push.apply( results, seed );
+ return results;
+ }
+
+ break;
+ }
+ }
+ }
+ }
+
+ // Compile and execute a filtering function if one is not provided
+ // Provide `match` to avoid retokenization if we modified the selector above
+ ( compiled || compile( selector, match ) )(
+ seed,
+ context,
+ !documentIsHTML,
+ results,
+ !context || rsibling.test( selector ) && testContext( context.parentNode ) || context
+ );
+ return results;
+};
+
+// One-time assignments
+
+// Sort stability
+support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando;
+
+// Support: Chrome 14-35+
+// Always assume duplicates if they aren't passed to the comparison function
+support.detectDuplicates = !!hasDuplicate;
+
+// Initialize against the default document
+setDocument();
+
+// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27)
+// Detached nodes confoundingly follow *each other*
+support.sortDetached = assert( function( el ) {
+
+ // Should return 1, but returns 4 (following)
+ return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1;
+} );
+
+// Support: IE<8
+// Prevent attribute/property "interpolation"
+// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx
+if ( !assert( function( el ) {
+ el.innerHTML = "<a href='#'></a>";
+ return el.firstChild.getAttribute( "href" ) === "#";
+} ) ) {
+ addHandle( "type|href|height|width", function( elem, name, isXML ) {
+ if ( !isXML ) {
+ return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 );
+ }
+ } );
+}
+
+// Support: IE<9
+// Use defaultValue in place of getAttribute("value")
+if ( !support.attributes || !assert( function( el ) {
+ el.innerHTML = "<input/>";
+ el.firstChild.setAttribute( "value", "" );
+ return el.firstChild.getAttribute( "value" ) === "";
+} ) ) {
+ addHandle( "value", function( elem, _name, isXML ) {
+ if ( !isXML && elem.nodeName.toLowerCase() === "input" ) {
+ return elem.defaultValue;
+ }
+ } );
+}
+
+// Support: IE<9
+// Use getAttributeNode to fetch booleans when getAttribute lies
+if ( !assert( function( el ) {
+ return el.getAttribute( "disabled" ) == null;
+} ) ) {
+ addHandle( booleans, function( elem, name, isXML ) {
+ var val;
+ if ( !isXML ) {
+ return elem[ name ] === true ? name.toLowerCase() :
+ ( val = elem.getAttributeNode( name ) ) && val.specified ?
+ val.value :
+ null;
+ }
+ } );
+}
+
+return Sizzle;
+
+} )( window );
+
+
+
+jQuery.find = Sizzle;
+jQuery.expr = Sizzle.selectors;
+
+// Deprecated
+jQuery.expr[ ":" ] = jQuery.expr.pseudos;
+jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort;
+jQuery.text = Sizzle.getText;
+jQuery.isXMLDoc = Sizzle.isXML;
+jQuery.contains = Sizzle.contains;
+jQuery.escapeSelector = Sizzle.escape;
+
+
+
+
+var dir = function( elem, dir, until ) {
+ var matched = [],
+ truncate = until !== undefined;
+
+ while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) {
+ if ( elem.nodeType === 1 ) {
+ if ( truncate && jQuery( elem ).is( until ) ) {
+ break;
+ }
+ matched.push( elem );
+ }
+ }
+ return matched;
+};
+
+
+var siblings = function( n, elem ) {
+ var matched = [];
+
+ for ( ; n; n = n.nextSibling ) {
+ if ( n.nodeType === 1 && n !== elem ) {
+ matched.push( n );
+ }
+ }
+
+ return matched;
+};
+
+
+var rneedsContext = jQuery.expr.match.needsContext;
+
+
+
+function nodeName( elem, name ) {
+
+ return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase();
+
+};
+var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i );
+
+
+
+// Implement the identical functionality for filter and not
+function winnow( elements, qualifier, not ) {
+ if ( isFunction( qualifier ) ) {
+ return jQuery.grep( elements, function( elem, i ) {
+ return !!qualifier.call( elem, i, elem ) !== not;
+ } );
+ }
+
+ // Single element
+ if ( qualifier.nodeType ) {
+ return jQuery.grep( elements, function( elem ) {
+ return ( elem === qualifier ) !== not;
+ } );
+ }
+
+ // Arraylike of elements (jQuery, arguments, Array)
+ if ( typeof qualifier !== "string" ) {
+ return jQuery.grep( elements, function( elem ) {
+ return ( indexOf.call( qualifier, elem ) > -1 ) !== not;
+ } );
+ }
+
+ // Filtered directly for both simple and complex selectors
+ return jQuery.filter( qualifier, elements, not );
+}
+
+jQuery.filter = function( expr, elems, not ) {
+ var elem = elems[ 0 ];
+
+ if ( not ) {
+ expr = ":not(" + expr + ")";
+ }
+
+ if ( elems.length === 1 && elem.nodeType === 1 ) {
+ return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [];
+ }
+
+ return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) {
+ return elem.nodeType === 1;
+ } ) );
+};
+
+jQuery.fn.extend( {
+ find: function( selector ) {
+ var i, ret,
+ len = this.length,
+ self = this;
+
+ if ( typeof selector !== "string" ) {
+ return this.pushStack( jQuery( selector ).filter( function() {
+ for ( i = 0; i < len; i++ ) {
+ if ( jQuery.contains( self[ i ], this ) ) {
+ return true;
+ }
+ }
+ } ) );
+ }
+
+ ret = this.pushStack( [] );
+
+ for ( i = 0; i < len; i++ ) {
+ jQuery.find( selector, self[ i ], ret );
+ }
+
+ return len > 1 ? jQuery.uniqueSort( ret ) : ret;
+ },
+ filter: function( selector ) {
+ return this.pushStack( winnow( this, selector || [], false ) );
+ },
+ not: function( selector ) {
+ return this.pushStack( winnow( this, selector || [], true ) );
+ },
+ is: function( selector ) {
+ return !!winnow(
+ this,
+
+ // If this is a positional/relative selector, check membership in the returned set
+ // so $("p:first").is("p:last") won't return true for a doc with two "p".
+ typeof selector === "string" && rneedsContext.test( selector ) ?
+ jQuery( selector ) :
+ selector || [],
+ false
+ ).length;
+ }
+} );
+
+
+// Initialize a jQuery object
+
+
+// A central reference to the root jQuery(document)
+var rootjQuery,
+
+ // A simple way to check for HTML strings
+ // Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
+ // Strict HTML recognition (#11290: must start with <)
+ // Shortcut simple #id case for speed
+ rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,
+
+ init = jQuery.fn.init = function( selector, context, root ) {
+ var match, elem;
+
+ // HANDLE: $(""), $(null), $(undefined), $(false)
+ if ( !selector ) {
+ return this;
+ }
+
+ // Method init() accepts an alternate rootjQuery
+ // so migrate can support jQuery.sub (gh-2101)
+ root = root || rootjQuery;
+
+ // Handle HTML strings
+ if ( typeof selector === "string" ) {
+ if ( selector[ 0 ] === "<" &&
+ selector[ selector.length - 1 ] === ">" &&
+ selector.length >= 3 ) {
+
+ // Assume that strings that start and end with <> are HTML and skip the regex check
+ match = [ null, selector, null ];
+
+ } else {
+ match = rquickExpr.exec( selector );
+ }
+
+ // Match html or make sure no context is specified for #id
+ if ( match && ( match[ 1 ] || !context ) ) {
+
+ // HANDLE: $(html) -> $(array)
+ if ( match[ 1 ] ) {
+ context = context instanceof jQuery ? context[ 0 ] : context;
+
+ // Option to run scripts is true for back-compat
+ // Intentionally let the error be thrown if parseHTML is not present
+ jQuery.merge( this, jQuery.parseHTML(
+ match[ 1 ],
+ context && context.nodeType ? context.ownerDocument || context : document,
+ true
+ ) );
+
+ // HANDLE: $(html, props)
+ if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) {
+ for ( match in context ) {
+
+ // Properties of context are called as methods if possible
+ if ( isFunction( this[ match ] ) ) {
+ this[ match ]( context[ match ] );
+
+ // ...and otherwise set as attributes
+ } else {
+ this.attr( match, context[ match ] );
+ }
+ }
+ }
+
+ return this;
+
+ // HANDLE: $(#id)
+ } else {
+ elem = document.getElementById( match[ 2 ] );
+
+ if ( elem ) {
+
+ // Inject the element directly into the jQuery object
+ this[ 0 ] = elem;
+ this.length = 1;
+ }
+ return this;
+ }
+
+ // HANDLE: $(expr, $(...))
+ } else if ( !context || context.jquery ) {
+ return ( context || root ).find( selector );
+
+ // HANDLE: $(expr, context)
+ // (which is just equivalent to: $(context).find(expr)
+ } else {
+ return this.constructor( context ).find( selector );
+ }
+
+ // HANDLE: $(DOMElement)
+ } else if ( selector.nodeType ) {
+ this[ 0 ] = selector;
+ this.length = 1;
+ return this;
+
+ // HANDLE: $(function)
+ // Shortcut for document ready
+ } else if ( isFunction( selector ) ) {
+ return root.ready !== undefined ?
+ root.ready( selector ) :
+
+ // Execute immediately if ready is not present
+ selector( jQuery );
+ }
+
+ return jQuery.makeArray( selector, this );
+ };
+
+// Give the init function the jQuery prototype for later instantiation
+init.prototype = jQuery.fn;
+
+// Initialize central reference
+rootjQuery = jQuery( document );
+
+
+var rparentsprev = /^(?:parents|prev(?:Until|All))/,
+
+ // Methods guaranteed to produce a unique set when starting from a unique set
+ guaranteedUnique = {
+ children: true,
+ contents: true,
+ next: true,
+ prev: true
+ };
+
+jQuery.fn.extend( {
+ has: function( target ) {
+ var targets = jQuery( target, this ),
+ l = targets.length;
+
+ return this.filter( function() {
+ var i = 0;
+ for ( ; i < l; i++ ) {
+ if ( jQuery.contains( this, targets[ i ] ) ) {
+ return true;
+ }
+ }
+ } );
+ },
+
+ closest: function( selectors, context ) {
+ var cur,
+ i = 0,
+ l = this.length,
+ matched = [],
+ targets = typeof selectors !== "string" && jQuery( selectors );
+
+ // Positional selectors never match, since there's no _selection_ context
+ if ( !rneedsContext.test( selectors ) ) {
+ for ( ; i < l; i++ ) {
+ for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) {
+
+ // Always skip document fragments
+ if ( cur.nodeType < 11 && ( targets ?
+ targets.index( cur ) > -1 :
+
+ // Don't pass non-elements to Sizzle
+ cur.nodeType === 1 &&
+ jQuery.find.matchesSelector( cur, selectors ) ) ) {
+
+ matched.push( cur );
+ break;
+ }
+ }
+ }
+ }
+
+ return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched );
+ },
+
+ // Determine the position of an element within the set
+ index: function( elem ) {
+
+ // No argument, return index in parent
+ if ( !elem ) {
+ return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1;
+ }
+
+ // Index in selector
+ if ( typeof elem === "string" ) {
+ return indexOf.call( jQuery( elem ), this[ 0 ] );
+ }
+
+ // Locate the position of the desired element
+ return indexOf.call( this,
+
+ // If it receives a jQuery object, the first element is used
+ elem.jquery ? elem[ 0 ] : elem
+ );
+ },
+
+ add: function( selector, context ) {
+ return this.pushStack(
+ jQuery.uniqueSort(
+ jQuery.merge( this.get(), jQuery( selector, context ) )
+ )
+ );
+ },
+
+ addBack: function( selector ) {
+ return this.add( selector == null ?
+ this.prevObject : this.prevObject.filter( selector )
+ );
+ }
+} );
+
+function sibling( cur, dir ) {
+ while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {}
+ return cur;
+}
+
+jQuery.each( {
+ parent: function( elem ) {
+ var parent = elem.parentNode;
+ return parent && parent.nodeType !== 11 ? parent : null;
+ },
+ parents: function( elem ) {
+ return dir( elem, "parentNode" );
+ },
+ parentsUntil: function( elem, _i, until ) {
+ return dir( elem, "parentNode", until );
+ },
+ next: function( elem ) {
+ return sibling( elem, "nextSibling" );
+ },
+ prev: function( elem ) {
+ return sibling( elem, "previousSibling" );
+ },
+ nextAll: function( elem ) {
+ return dir( elem, "nextSibling" );
+ },
+ prevAll: function( elem ) {
+ return dir( elem, "previousSibling" );
+ },
+ nextUntil: function( elem, _i, until ) {
+ return dir( elem, "nextSibling", until );
+ },
+ prevUntil: function( elem, _i, until ) {
+ return dir( elem, "previousSibling", until );
+ },
+ siblings: function( elem ) {
+ return siblings( ( elem.parentNode || {} ).firstChild, elem );
+ },
+ children: function( elem ) {
+ return siblings( elem.firstChild );
+ },
+ contents: function( elem ) {
+ if ( elem.contentDocument != null &&
+
+ // Support: IE 11+
+ // <object> elements with no `data` attribute has an object
+ // `contentDocument` with a `null` prototype.
+ getProto( elem.contentDocument ) ) {
+
+ return elem.contentDocument;
+ }
+
+ // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only
+ // Treat the template element as a regular one in browsers that
+ // don't support it.
+ if ( nodeName( elem, "template" ) ) {
+ elem = elem.content || elem;
+ }
+
+ return jQuery.merge( [], elem.childNodes );
+ }
+}, function( name, fn ) {
+ jQuery.fn[ name ] = function( until, selector ) {
+ var matched = jQuery.map( this, fn, until );
+
+ if ( name.slice( -5 ) !== "Until" ) {
+ selector = until;
+ }
+
+ if ( selector && typeof selector === "string" ) {
+ matched = jQuery.filter( selector, matched );
+ }
+
+ if ( this.length > 1 ) {
+
+ // Remove duplicates
+ if ( !guaranteedUnique[ name ] ) {
+ jQuery.uniqueSort( matched );
+ }
+
+ // Reverse order for parents* and prev-derivatives
+ if ( rparentsprev.test( name ) ) {
+ matched.reverse();
+ }
+ }
+
+ return this.pushStack( matched );
+ };
+} );
+var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g );
+
+
+
+// Convert String-formatted options into Object-formatted ones
+function createOptions( options ) {
+ var object = {};
+ jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) {
+ object[ flag ] = true;
+ } );
+ return object;
+}
+
+/*
+ * Create a callback list using the following parameters:
+ *
+ * options: an optional list of space-separated options that will change how
+ * the callback list behaves or a more traditional option object
+ *
+ * By default a callback list will act like an event callback list and can be
+ * "fired" multiple times.
+ *
+ * Possible options:
+ *
+ * once: will ensure the callback list can only be fired once (like a Deferred)
+ *
+ * memory: will keep track of previous values and will call any callback added
+ * after the list has been fired right away with the latest "memorized"
+ * values (like a Deferred)
+ *
+ * unique: will ensure a callback can only be added once (no duplicate in the list)
+ *
+ * stopOnFalse: interrupt callings when a callback returns false
+ *
+ */
+jQuery.Callbacks = function( options ) {
+
+ // Convert options from String-formatted to Object-formatted if needed
+ // (we check in cache first)
+ options = typeof options === "string" ?
+ createOptions( options ) :
+ jQuery.extend( {}, options );
+
+ var // Flag to know if list is currently firing
+ firing,
+
+ // Last fire value for non-forgettable lists
+ memory,
+
+ // Flag to know if list was already fired
+ fired,
+
+ // Flag to prevent firing
+ locked,
+
+ // Actual callback list
+ list = [],
+
+ // Queue of execution data for repeatable lists
+ queue = [],
+
+ // Index of currently firing callback (modified by add/remove as needed)
+ firingIndex = -1,
+
+ // Fire callbacks
+ fire = function() {
+
+ // Enforce single-firing
+ locked = locked || options.once;
+
+ // Execute callbacks for all pending executions,
+ // respecting firingIndex overrides and runtime changes
+ fired = firing = true;
+ for ( ; queue.length; firingIndex = -1 ) {
+ memory = queue.shift();
+ while ( ++firingIndex < list.length ) {
+
+ // Run callback and check for early termination
+ if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false &&
+ options.stopOnFalse ) {
+
+ // Jump to end and forget the data so .add doesn't re-fire
+ firingIndex = list.length;
+ memory = false;
+ }
+ }
+ }
+
+ // Forget the data if we're done with it
+ if ( !options.memory ) {
+ memory = false;
+ }
+
+ firing = false;
+
+ // Clean up if we're done firing for good
+ if ( locked ) {
+
+ // Keep an empty list if we have data for future add calls
+ if ( memory ) {
+ list = [];
+
+ // Otherwise, this object is spent
+ } else {
+ list = "";
+ }
+ }
+ },
+
+ // Actual Callbacks object
+ self = {
+
+ // Add a callback or a collection of callbacks to the list
+ add: function() {
+ if ( list ) {
+
+ // If we have memory from a past run, we should fire after adding
+ if ( memory && !firing ) {
+ firingIndex = list.length - 1;
+ queue.push( memory );
+ }
+
+ ( function add( args ) {
+ jQuery.each( args, function( _, arg ) {
+ if ( isFunction( arg ) ) {
+ if ( !options.unique || !self.has( arg ) ) {
+ list.push( arg );
+ }
+ } else if ( arg && arg.length && toType( arg ) !== "string" ) {
+
+ // Inspect recursively
+ add( arg );
+ }
+ } );
+ } )( arguments );
+
+ if ( memory && !firing ) {
+ fire();
+ }
+ }
+ return this;
+ },
+
+ // Remove a callback from the list
+ remove: function() {
+ jQuery.each( arguments, function( _, arg ) {
+ var index;
+ while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
+ list.splice( index, 1 );
+
+ // Handle firing indexes
+ if ( index <= firingIndex ) {
+ firingIndex--;
+ }
+ }
+ } );
+ return this;
+ },
+
+ // Check if a given callback is in the list.
+ // If no argument is given, return whether or not list has callbacks attached.
+ has: function( fn ) {
+ return fn ?
+ jQuery.inArray( fn, list ) > -1 :
+ list.length > 0;
+ },
+
+ // Remove all callbacks from the list
+ empty: function() {
+ if ( list ) {
+ list = [];
+ }
+ return this;
+ },
+
+ // Disable .fire and .add
+ // Abort any current/pending executions
+ // Clear all callbacks and values
+ disable: function() {
+ locked = queue = [];
+ list = memory = "";
+ return this;
+ },
+ disabled: function() {
+ return !list;
+ },
+
+ // Disable .fire
+ // Also disable .add unless we have memory (since it would have no effect)
+ // Abort any pending executions
+ lock: function() {
+ locked = queue = [];
+ if ( !memory && !firing ) {
+ list = memory = "";
+ }
+ return this;
+ },
+ locked: function() {
+ return !!locked;
+ },
+
+ // Call all callbacks with the given context and arguments
+ fireWith: function( context, args ) {
+ if ( !locked ) {
+ args = args || [];
+ args = [ context, args.slice ? args.slice() : args ];
+ queue.push( args );
+ if ( !firing ) {
+ fire();
+ }
+ }
+ return this;
+ },
+
+ // Call all the callbacks with the given arguments
+ fire: function() {
+ self.fireWith( this, arguments );
+ return this;
+ },
+
+ // To know if the callbacks have already been called at least once
+ fired: function() {
+ return !!fired;
+ }
+ };
+
+ return self;
+};
+
+
+function Identity( v ) {
+ return v;
+}
+function Thrower( ex ) {
+ throw ex;
+}
+
+function adoptValue( value, resolve, reject, noValue ) {
+ var method;
+
+ try {
+
+ // Check for promise aspect first to privilege synchronous behavior
+ if ( value && isFunction( ( method = value.promise ) ) ) {
+ method.call( value ).done( resolve ).fail( reject );
+
+ // Other thenables
+ } else if ( value && isFunction( ( method = value.then ) ) ) {
+ method.call( value, resolve, reject );
+
+ // Other non-thenables
+ } else {
+
+ // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer:
+ // * false: [ value ].slice( 0 ) => resolve( value )
+ // * true: [ value ].slice( 1 ) => resolve()
+ resolve.apply( undefined, [ value ].slice( noValue ) );
+ }
+
+ // For Promises/A+, convert exceptions into rejections
+ // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in
+ // Deferred#then to conditionally suppress rejection.
+ } catch ( value ) {
+
+ // Support: Android 4.0 only
+ // Strict mode functions invoked without .call/.apply get global-object context
+ reject.apply( undefined, [ value ] );
+ }
+}
+
+jQuery.extend( {
+
+ Deferred: function( func ) {
+ var tuples = [
+
+ // action, add listener, callbacks,
+ // ... .then handlers, argument index, [final state]
+ [ "notify", "progress", jQuery.Callbacks( "memory" ),
+ jQuery.Callbacks( "memory" ), 2 ],
+ [ "resolve", "done", jQuery.Callbacks( "once memory" ),
+ jQuery.Callbacks( "once memory" ), 0, "resolved" ],
+ [ "reject", "fail", jQuery.Callbacks( "once memory" ),
+ jQuery.Callbacks( "once memory" ), 1, "rejected" ]
+ ],
+ state = "pending",
+ promise = {
+ state: function() {
+ return state;
+ },
+ always: function() {
+ deferred.done( arguments ).fail( arguments );
+ return this;
+ },
+ "catch": function( fn ) {
+ return promise.then( null, fn );
+ },
+
+ // Keep pipe for back-compat
+ pipe: function( /* fnDone, fnFail, fnProgress */ ) {
+ var fns = arguments;
+
+ return jQuery.Deferred( function( newDefer ) {
+ jQuery.each( tuples, function( _i, tuple ) {
+
+ // Map tuples (progress, done, fail) to arguments (done, fail, progress)
+ var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ];
+
+ // deferred.progress(function() { bind to newDefer or newDefer.notify })
+ // deferred.done(function() { bind to newDefer or newDefer.resolve })
+ // deferred.fail(function() { bind to newDefer or newDefer.reject })
+ deferred[ tuple[ 1 ] ]( function() {
+ var returned = fn && fn.apply( this, arguments );
+ if ( returned && isFunction( returned.promise ) ) {
+ returned.promise()
+ .progress( newDefer.notify )
+ .done( newDefer.resolve )
+ .fail( newDefer.reject );
+ } else {
+ newDefer[ tuple[ 0 ] + "With" ](
+ this,
+ fn ? [ returned ] : arguments
+ );
+ }
+ } );
+ } );
+ fns = null;
+ } ).promise();
+ },
+ then: function( onFulfilled, onRejected, onProgress ) {
+ var maxDepth = 0;
+ function resolve( depth, deferred, handler, special ) {
+ return function() {
+ var that = this,
+ args = arguments,
+ mightThrow = function() {
+ var returned, then;
+
+ // Support: Promises/A+ section 2.3.3.3.3
+ // https://promisesaplus.com/#point-59
+ // Ignore double-resolution attempts
+ if ( depth < maxDepth ) {
+ return;
+ }
+
+ returned = handler.apply( that, args );
+
+ // Support: Promises/A+ section 2.3.1
+ // https://promisesaplus.com/#point-48
+ if ( returned === deferred.promise() ) {
+ throw new TypeError( "Thenable self-resolution" );
+ }
+
+ // Support: Promises/A+ sections 2.3.3.1, 3.5
+ // https://promisesaplus.com/#point-54
+ // https://promisesaplus.com/#point-75
+ // Retrieve `then` only once
+ then = returned &&
+
+ // Support: Promises/A+ section 2.3.4
+ // https://promisesaplus.com/#point-64
+ // Only check objects and functions for thenability
+ ( typeof returned === "object" ||
+ typeof returned === "function" ) &&
+ returned.then;
+
+ // Handle a returned thenable
+ if ( isFunction( then ) ) {
+
+ // Special processors (notify) just wait for resolution
+ if ( special ) {
+ then.call(
+ returned,
+ resolve( maxDepth, deferred, Identity, special ),
+ resolve( maxDepth, deferred, Thrower, special )
+ );
+
+ // Normal processors (resolve) also hook into progress
+ } else {
+
+ // ...and disregard older resolution values
+ maxDepth++;
+
+ then.call(
+ returned,
+ resolve( maxDepth, deferred, Identity, special ),
+ resolve( maxDepth, deferred, Thrower, special ),
+ resolve( maxDepth, deferred, Identity,
+ deferred.notifyWith )
+ );
+ }
+
+ // Handle all other returned values
+ } else {
+
+ // Only substitute handlers pass on context
+ // and multiple values (non-spec behavior)
+ if ( handler !== Identity ) {
+ that = undefined;
+ args = [ returned ];
+ }
+
+ // Process the value(s)
+ // Default process is resolve
+ ( special || deferred.resolveWith )( that, args );
+ }
+ },
+
+ // Only normal processors (resolve) catch and reject exceptions
+ process = special ?
+ mightThrow :
+ function() {
+ try {
+ mightThrow();
+ } catch ( e ) {
+
+ if ( jQuery.Deferred.exceptionHook ) {
+ jQuery.Deferred.exceptionHook( e,
+ process.stackTrace );
+ }
+
+ // Support: Promises/A+ section 2.3.3.3.4.1
+ // https://promisesaplus.com/#point-61
+ // Ignore post-resolution exceptions
+ if ( depth + 1 >= maxDepth ) {
+
+ // Only substitute handlers pass on context
+ // and multiple values (non-spec behavior)
+ if ( handler !== Thrower ) {
+ that = undefined;
+ args = [ e ];
+ }
+
+ deferred.rejectWith( that, args );
+ }
+ }
+ };
+
+ // Support: Promises/A+ section 2.3.3.3.1
+ // https://promisesaplus.com/#point-57
+ // Re-resolve promises immediately to dodge false rejection from
+ // subsequent errors
+ if ( depth ) {
+ process();
+ } else {
+
+ // Call an optional hook to record the stack, in case of exception
+ // since it's otherwise lost when execution goes async
+ if ( jQuery.Deferred.getStackHook ) {
+ process.stackTrace = jQuery.Deferred.getStackHook();
+ }
+ window.setTimeout( process );
+ }
+ };
+ }
+
+ return jQuery.Deferred( function( newDefer ) {
+
+ // progress_handlers.add( ... )
+ tuples[ 0 ][ 3 ].add(
+ resolve(
+ 0,
+ newDefer,
+ isFunction( onProgress ) ?
+ onProgress :
+ Identity,
+ newDefer.notifyWith
+ )
+ );
+
+ // fulfilled_handlers.add( ... )
+ tuples[ 1 ][ 3 ].add(
+ resolve(
+ 0,
+ newDefer,
+ isFunction( onFulfilled ) ?
+ onFulfilled :
+ Identity
+ )
+ );
+
+ // rejected_handlers.add( ... )
+ tuples[ 2 ][ 3 ].add(
+ resolve(
+ 0,
+ newDefer,
+ isFunction( onRejected ) ?
+ onRejected :
+ Thrower
+ )
+ );
+ } ).promise();
+ },
+
+ // Get a promise for this deferred
+ // If obj is provided, the promise aspect is added to the object
+ promise: function( obj ) {
+ return obj != null ? jQuery.extend( obj, promise ) : promise;
+ }
+ },
+ deferred = {};
+
+ // Add list-specific methods
+ jQuery.each( tuples, function( i, tuple ) {
+ var list = tuple[ 2 ],
+ stateString = tuple[ 5 ];
+
+ // promise.progress = list.add
+ // promise.done = list.add
+ // promise.fail = list.add
+ promise[ tuple[ 1 ] ] = list.add;
+
+ // Handle state
+ if ( stateString ) {
+ list.add(
+ function() {
+
+ // state = "resolved" (i.e., fulfilled)
+ // state = "rejected"
+ state = stateString;
+ },
+
+ // rejected_callbacks.disable
+ // fulfilled_callbacks.disable
+ tuples[ 3 - i ][ 2 ].disable,
+
+ // rejected_handlers.disable
+ // fulfilled_handlers.disable
+ tuples[ 3 - i ][ 3 ].disable,
+
+ // progress_callbacks.lock
+ tuples[ 0 ][ 2 ].lock,
+
+ // progress_handlers.lock
+ tuples[ 0 ][ 3 ].lock
+ );
+ }
+
+ // progress_handlers.fire
+ // fulfilled_handlers.fire
+ // rejected_handlers.fire
+ list.add( tuple[ 3 ].fire );
+
+ // deferred.notify = function() { deferred.notifyWith(...) }
+ // deferred.resolve = function() { deferred.resolveWith(...) }
+ // deferred.reject = function() { deferred.rejectWith(...) }
+ deferred[ tuple[ 0 ] ] = function() {
+ deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments );
+ return this;
+ };
+
+ // deferred.notifyWith = list.fireWith
+ // deferred.resolveWith = list.fireWith
+ // deferred.rejectWith = list.fireWith
+ deferred[ tuple[ 0 ] + "With" ] = list.fireWith;
+ } );
+
+ // Make the deferred a promise
+ promise.promise( deferred );
+
+ // Call given func if any
+ if ( func ) {
+ func.call( deferred, deferred );
+ }
+
+ // All done!
+ return deferred;
+ },
+
+ // Deferred helper
+ when: function( singleValue ) {
+ var
+
+ // count of uncompleted subordinates
+ remaining = arguments.length,
+
+ // count of unprocessed arguments
+ i = remaining,
+
+ // subordinate fulfillment data
+ resolveContexts = Array( i ),
+ resolveValues = slice.call( arguments ),
+
+ // the master Deferred
+ master = jQuery.Deferred(),
+
+ // subordinate callback factory
+ updateFunc = function( i ) {
+ return function( value ) {
+ resolveContexts[ i ] = this;
+ resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value;
+ if ( !( --remaining ) ) {
+ master.resolveWith( resolveContexts, resolveValues );
+ }
+ };
+ };
+
+ // Single- and empty arguments are adopted like Promise.resolve
+ if ( remaining <= 1 ) {
+ adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject,
+ !remaining );
+
+ // Use .then() to unwrap secondary thenables (cf. gh-3000)
+ if ( master.state() === "pending" ||
+ isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) {
+
+ return master.then();
+ }
+ }
+
+ // Multiple arguments are aggregated like Promise.all array elements
+ while ( i-- ) {
+ adoptValue( resolveValues[ i ], updateFunc( i ), master.reject );
+ }
+
+ return master.promise();
+ }
+} );
+
+
+// These usually indicate a programmer mistake during development,
+// warn about them ASAP rather than swallowing them by default.
+var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;
+
+jQuery.Deferred.exceptionHook = function( error, stack ) {
+
+ // Support: IE 8 - 9 only
+ // Console exists when dev tools are open, which can happen at any time
+ if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) {
+ window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack );
+ }
+};
+
+
+
+
+jQuery.readyException = function( error ) {
+ window.setTimeout( function() {
+ throw error;
+ } );
+};
+
+
+
+
+// The deferred used on DOM ready
+var readyList = jQuery.Deferred();
+
+jQuery.fn.ready = function( fn ) {
+
+ readyList
+ .then( fn )
+
+ // Wrap jQuery.readyException in a function so that the lookup
+ // happens at the time of error handling instead of callback
+ // registration.
+ .catch( function( error ) {
+ jQuery.readyException( error );
+ } );
+
+ return this;
+};
+
+jQuery.extend( {
+
+ // Is the DOM ready to be used? Set to true once it occurs.
+ isReady: false,
+
+ // A counter to track how many items to wait for before
+ // the ready event fires. See #6781
+ readyWait: 1,
+
+ // Handle when the DOM is ready
+ ready: function( wait ) {
+
+ // Abort if there are pending holds or we're already ready
+ if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
+ return;
+ }
+
+ // Remember that the DOM is ready
+ jQuery.isReady = true;
+
+ // If a normal DOM Ready event fired, decrement, and wait if need be
+ if ( wait !== true && --jQuery.readyWait > 0 ) {
+ return;
+ }
+
+ // If there are functions bound, to execute
+ readyList.resolveWith( document, [ jQuery ] );
+ }
+} );
+
+jQuery.ready.then = readyList.then;
+
+// The ready event handler and self cleanup method
+function completed() {
+ document.removeEventListener( "DOMContentLoaded", completed );
+ window.removeEventListener( "load", completed );
+ jQuery.ready();
+}
+
+// Catch cases where $(document).ready() is called
+// after the browser event has already occurred.
+// Support: IE <=9 - 10 only
+// Older IE sometimes signals "interactive" too soon
+if ( document.readyState === "complete" ||
+ ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) {
+
+ // Handle it asynchronously to allow scripts the opportunity to delay ready
+ window.setTimeout( jQuery.ready );
+
+} else {
+
+ // Use the handy event callback
+ document.addEventListener( "DOMContentLoaded", completed );
+
+ // A fallback to window.onload, that will always work
+ window.addEventListener( "load", completed );
+}
+
+
+
+
+// Multifunctional method to get and set values of a collection
+// The value/s can optionally be executed if it's a function
+var access = function( elems, fn, key, value, chainable, emptyGet, raw ) {
+ var i = 0,
+ len = elems.length,
+ bulk = key == null;
+
+ // Sets many values
+ if ( toType( key ) === "object" ) {
+ chainable = true;
+ for ( i in key ) {
+ access( elems, fn, i, key[ i ], true, emptyGet, raw );
+ }
+
+ // Sets one value
+ } else if ( value !== undefined ) {
+ chainable = true;
+
+ if ( !isFunction( value ) ) {
+ raw = true;
+ }
+
+ if ( bulk ) {
+
+ // Bulk operations run against the entire set
+ if ( raw ) {
+ fn.call( elems, value );
+ fn = null;
+
+ // ...except when executing function values
+ } else {
+ bulk = fn;
+ fn = function( elem, _key, value ) {
+ return bulk.call( jQuery( elem ), value );
+ };
+ }
+ }
+
+ if ( fn ) {
+ for ( ; i < len; i++ ) {
+ fn(
+ elems[ i ], key, raw ?
+ value :
+ value.call( elems[ i ], i, fn( elems[ i ], key ) )
+ );
+ }
+ }
+ }
+
+ if ( chainable ) {
+ return elems;
+ }
+
+ // Gets
+ if ( bulk ) {
+ return fn.call( elems );
+ }
+
+ return len ? fn( elems[ 0 ], key ) : emptyGet;
+};
+
+
+// Matches dashed string for camelizing
+var rmsPrefix = /^-ms-/,
+ rdashAlpha = /-([a-z])/g;
+
+// Used by camelCase as callback to replace()
+function fcamelCase( _all, letter ) {
+ return letter.toUpperCase();
+}
+
+// Convert dashed to camelCase; used by the css and data modules
+// Support: IE <=9 - 11, Edge 12 - 15
+// Microsoft forgot to hump their vendor prefix (#9572)
+function camelCase( string ) {
+ return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
+}
+var acceptData = function( owner ) {
+
+ // Accepts only:
+ // - Node
+ // - Node.ELEMENT_NODE
+ // - Node.DOCUMENT_NODE
+ // - Object
+ // - Any
+ return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType );
+};
+
+
+
+
+function Data() {
+ this.expando = jQuery.expando + Data.uid++;
+}
+
+Data.uid = 1;
+
+Data.prototype = {
+
+ cache: function( owner ) {
+
+ // Check if the owner object already has a cache
+ var value = owner[ this.expando ];
+
+ // If not, create one
+ if ( !value ) {
+ value = {};
+
+ // We can accept data for non-element nodes in modern browsers,
+ // but we should not, see #8335.
+ // Always return an empty object.
+ if ( acceptData( owner ) ) {
+
+ // If it is a node unlikely to be stringify-ed or looped over
+ // use plain assignment
+ if ( owner.nodeType ) {
+ owner[ this.expando ] = value;
+
+ // Otherwise secure it in a non-enumerable property
+ // configurable must be true to allow the property to be
+ // deleted when data is removed
+ } else {
+ Object.defineProperty( owner, this.expando, {
+ value: value,
+ configurable: true
+ } );
+ }
+ }
+ }
+
+ return value;
+ },
+ set: function( owner, data, value ) {
+ var prop,
+ cache = this.cache( owner );
+
+ // Handle: [ owner, key, value ] args
+ // Always use camelCase key (gh-2257)
+ if ( typeof data === "string" ) {
+ cache[ camelCase( data ) ] = value;
+
+ // Handle: [ owner, { properties } ] args
+ } else {
+
+ // Copy the properties one-by-one to the cache object
+ for ( prop in data ) {
+ cache[ camelCase( prop ) ] = data[ prop ];
+ }
+ }
+ return cache;
+ },
+ get: function( owner, key ) {
+ return key === undefined ?
+ this.cache( owner ) :
+
+ // Always use camelCase key (gh-2257)
+ owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ];
+ },
+ access: function( owner, key, value ) {
+
+ // In cases where either:
+ //
+ // 1. No key was specified
+ // 2. A string key was specified, but no value provided
+ //
+ // Take the "read" path and allow the get method to determine
+ // which value to return, respectively either:
+ //
+ // 1. The entire cache object
+ // 2. The data stored at the key
+ //
+ if ( key === undefined ||
+ ( ( key && typeof key === "string" ) && value === undefined ) ) {
+
+ return this.get( owner, key );
+ }
+
+ // When the key is not a string, or both a key and value
+ // are specified, set or extend (existing objects) with either:
+ //
+ // 1. An object of properties
+ // 2. A key and value
+ //
+ this.set( owner, key, value );
+
+ // Since the "set" path can have two possible entry points
+ // return the expected data based on which path was taken[*]
+ return value !== undefined ? value : key;
+ },
+ remove: function( owner, key ) {
+ var i,
+ cache = owner[ this.expando ];
+
+ if ( cache === undefined ) {
+ return;
+ }
+
+ if ( key !== undefined ) {
+
+ // Support array or space separated string of keys
+ if ( Array.isArray( key ) ) {
+
+ // If key is an array of keys...
+ // We always set camelCase keys, so remove that.
+ key = key.map( camelCase );
+ } else {
+ key = camelCase( key );
+
+ // If a key with the spaces exists, use it.
+ // Otherwise, create an array by matching non-whitespace
+ key = key in cache ?
+ [ key ] :
+ ( key.match( rnothtmlwhite ) || [] );
+ }
+
+ i = key.length;
+
+ while ( i-- ) {
+ delete cache[ key[ i ] ];
+ }
+ }
+
+ // Remove the expando if there's no more data
+ if ( key === undefined || jQuery.isEmptyObject( cache ) ) {
+
+ // Support: Chrome <=35 - 45
+ // Webkit & Blink performance suffers when deleting properties
+ // from DOM nodes, so set to undefined instead
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted)
+ if ( owner.nodeType ) {
+ owner[ this.expando ] = undefined;
+ } else {
+ delete owner[ this.expando ];
+ }
+ }
+ },
+ hasData: function( owner ) {
+ var cache = owner[ this.expando ];
+ return cache !== undefined && !jQuery.isEmptyObject( cache );
+ }
+};
+var dataPriv = new Data();
+
+var dataUser = new Data();
+
+
+
+// Implementation Summary
+//
+// 1. Enforce API surface and semantic compatibility with 1.9.x branch
+// 2. Improve the module's maintainability by reducing the storage
+// paths to a single mechanism.
+// 3. Use the same single mechanism to support "private" and "user" data.
+// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData)
+// 5. Avoid exposing implementation details on user objects (eg. expando properties)
+// 6. Provide a clear path for implementation upgrade to WeakMap in 2014
+
+var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,
+ rmultiDash = /[A-Z]/g;
+
+function getData( data ) {
+ if ( data === "true" ) {
+ return true;
+ }
+
+ if ( data === "false" ) {
+ return false;
+ }
+
+ if ( data === "null" ) {
+ return null;
+ }
+
+ // Only convert to a number if it doesn't change the string
+ if ( data === +data + "" ) {
+ return +data;
+ }
+
+ if ( rbrace.test( data ) ) {
+ return JSON.parse( data );
+ }
+
+ return data;
+}
+
+function dataAttr( elem, key, data ) {
+ var name;
+
+ // If nothing was found internally, try to fetch any
+ // data from the HTML5 data-* attribute
+ if ( data === undefined && elem.nodeType === 1 ) {
+ name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase();
+ data = elem.getAttribute( name );
+
+ if ( typeof data === "string" ) {
+ try {
+ data = getData( data );
+ } catch ( e ) {}
+
+ // Make sure we set the data so it isn't changed later
+ dataUser.set( elem, key, data );
+ } else {
+ data = undefined;
+ }
+ }
+ return data;
+}
+
+jQuery.extend( {
+ hasData: function( elem ) {
+ return dataUser.hasData( elem ) || dataPriv.hasData( elem );
+ },
+
+ data: function( elem, name, data ) {
+ return dataUser.access( elem, name, data );
+ },
+
+ removeData: function( elem, name ) {
+ dataUser.remove( elem, name );
+ },
+
+ // TODO: Now that all calls to _data and _removeData have been replaced
+ // with direct calls to dataPriv methods, these can be deprecated.
+ _data: function( elem, name, data ) {
+ return dataPriv.access( elem, name, data );
+ },
+
+ _removeData: function( elem, name ) {
+ dataPriv.remove( elem, name );
+ }
+} );
+
+jQuery.fn.extend( {
+ data: function( key, value ) {
+ var i, name, data,
+ elem = this[ 0 ],
+ attrs = elem && elem.attributes;
+
+ // Gets all values
+ if ( key === undefined ) {
+ if ( this.length ) {
+ data = dataUser.get( elem );
+
+ if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) {
+ i = attrs.length;
+ while ( i-- ) {
+
+ // Support: IE 11 only
+ // The attrs elements can be null (#14894)
+ if ( attrs[ i ] ) {
+ name = attrs[ i ].name;
+ if ( name.indexOf( "data-" ) === 0 ) {
+ name = camelCase( name.slice( 5 ) );
+ dataAttr( elem, name, data[ name ] );
+ }
+ }
+ }
+ dataPriv.set( elem, "hasDataAttrs", true );
+ }
+ }
+
+ return data;
+ }
+
+ // Sets multiple values
+ if ( typeof key === "object" ) {
+ return this.each( function() {
+ dataUser.set( this, key );
+ } );
+ }
+
+ return access( this, function( value ) {
+ var data;
+
+ // The calling jQuery object (element matches) is not empty
+ // (and therefore has an element appears at this[ 0 ]) and the
+ // `value` parameter was not undefined. An empty jQuery object
+ // will result in `undefined` for elem = this[ 0 ] which will
+ // throw an exception if an attempt to read a data cache is made.
+ if ( elem && value === undefined ) {
+
+ // Attempt to get data from the cache
+ // The key will always be camelCased in Data
+ data = dataUser.get( elem, key );
+ if ( data !== undefined ) {
+ return data;
+ }
+
+ // Attempt to "discover" the data in
+ // HTML5 custom data-* attrs
+ data = dataAttr( elem, key );
+ if ( data !== undefined ) {
+ return data;
+ }
+
+ // We tried really hard, but the data doesn't exist.
+ return;
+ }
+
+ // Set the data...
+ this.each( function() {
+
+ // We always store the camelCased key
+ dataUser.set( this, key, value );
+ } );
+ }, null, value, arguments.length > 1, null, true );
+ },
+
+ removeData: function( key ) {
+ return this.each( function() {
+ dataUser.remove( this, key );
+ } );
+ }
+} );
+
+
+jQuery.extend( {
+ queue: function( elem, type, data ) {
+ var queue;
+
+ if ( elem ) {
+ type = ( type || "fx" ) + "queue";
+ queue = dataPriv.get( elem, type );
+
+ // Speed up dequeue by getting out quickly if this is just a lookup
+ if ( data ) {
+ if ( !queue || Array.isArray( data ) ) {
+ queue = dataPriv.access( elem, type, jQuery.makeArray( data ) );
+ } else {
+ queue.push( data );
+ }
+ }
+ return queue || [];
+ }
+ },
+
+ dequeue: function( elem, type ) {
+ type = type || "fx";
+
+ var queue = jQuery.queue( elem, type ),
+ startLength = queue.length,
+ fn = queue.shift(),
+ hooks = jQuery._queueHooks( elem, type ),
+ next = function() {
+ jQuery.dequeue( elem, type );
+ };
+
+ // If the fx queue is dequeued, always remove the progress sentinel
+ if ( fn === "inprogress" ) {
+ fn = queue.shift();
+ startLength--;
+ }
+
+ if ( fn ) {
+
+ // Add a progress sentinel to prevent the fx queue from being
+ // automatically dequeued
+ if ( type === "fx" ) {
+ queue.unshift( "inprogress" );
+ }
+
+ // Clear up the last queue stop function
+ delete hooks.stop;
+ fn.call( elem, next, hooks );
+ }
+
+ if ( !startLength && hooks ) {
+ hooks.empty.fire();
+ }
+ },
+
+ // Not public - generate a queueHooks object, or return the current one
+ _queueHooks: function( elem, type ) {
+ var key = type + "queueHooks";
+ return dataPriv.get( elem, key ) || dataPriv.access( elem, key, {
+ empty: jQuery.Callbacks( "once memory" ).add( function() {
+ dataPriv.remove( elem, [ type + "queue", key ] );
+ } )
+ } );
+ }
+} );
+
+jQuery.fn.extend( {
+ queue: function( type, data ) {
+ var setter = 2;
+
+ if ( typeof type !== "string" ) {
+ data = type;
+ type = "fx";
+ setter--;
+ }
+
+ if ( arguments.length < setter ) {
+ return jQuery.queue( this[ 0 ], type );
+ }
+
+ return data === undefined ?
+ this :
+ this.each( function() {
+ var queue = jQuery.queue( this, type, data );
+
+ // Ensure a hooks for this queue
+ jQuery._queueHooks( this, type );
+
+ if ( type === "fx" && queue[ 0 ] !== "inprogress" ) {
+ jQuery.dequeue( this, type );
+ }
+ } );
+ },
+ dequeue: function( type ) {
+ return this.each( function() {
+ jQuery.dequeue( this, type );
+ } );
+ },
+ clearQueue: function( type ) {
+ return this.queue( type || "fx", [] );
+ },
+
+ // Get a promise resolved when queues of a certain type
+ // are emptied (fx is the type by default)
+ promise: function( type, obj ) {
+ var tmp,
+ count = 1,
+ defer = jQuery.Deferred(),
+ elements = this,
+ i = this.length,
+ resolve = function() {
+ if ( !( --count ) ) {
+ defer.resolveWith( elements, [ elements ] );
+ }
+ };
+
+ if ( typeof type !== "string" ) {
+ obj = type;
+ type = undefined;
+ }
+ type = type || "fx";
+
+ while ( i-- ) {
+ tmp = dataPriv.get( elements[ i ], type + "queueHooks" );
+ if ( tmp && tmp.empty ) {
+ count++;
+ tmp.empty.add( resolve );
+ }
+ }
+ resolve();
+ return defer.promise( obj );
+ }
+} );
+var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source;
+
+var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" );
+
+
+var cssExpand = [ "Top", "Right", "Bottom", "Left" ];
+
+var documentElement = document.documentElement;
+
+
+
+ var isAttached = function( elem ) {
+ return jQuery.contains( elem.ownerDocument, elem );
+ },
+ composed = { composed: true };
+
+ // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only
+ // Check attachment across shadow DOM boundaries when possible (gh-3504)
+ // Support: iOS 10.0-10.2 only
+ // Early iOS 10 versions support `attachShadow` but not `getRootNode`,
+ // leading to errors. We need to check for `getRootNode`.
+ if ( documentElement.getRootNode ) {
+ isAttached = function( elem ) {
+ return jQuery.contains( elem.ownerDocument, elem ) ||
+ elem.getRootNode( composed ) === elem.ownerDocument;
+ };
+ }
+var isHiddenWithinTree = function( elem, el ) {
+
+ // isHiddenWithinTree might be called from jQuery#filter function;
+ // in that case, element will be second argument
+ elem = el || elem;
+
+ // Inline style trumps all
+ return elem.style.display === "none" ||
+ elem.style.display === "" &&
+
+ // Otherwise, check computed style
+ // Support: Firefox <=43 - 45
+ // Disconnected elements can have computed display: none, so first confirm that elem is
+ // in the document.
+ isAttached( elem ) &&
+
+ jQuery.css( elem, "display" ) === "none";
+ };
+
+
+
+function adjustCSS( elem, prop, valueParts, tween ) {
+ var adjusted, scale,
+ maxIterations = 20,
+ currentValue = tween ?
+ function() {
+ return tween.cur();
+ } :
+ function() {
+ return jQuery.css( elem, prop, "" );
+ },
+ initial = currentValue(),
+ unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ),
+
+ // Starting value computation is required for potential unit mismatches
+ initialInUnit = elem.nodeType &&
+ ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) &&
+ rcssNum.exec( jQuery.css( elem, prop ) );
+
+ if ( initialInUnit && initialInUnit[ 3 ] !== unit ) {
+
+ // Support: Firefox <=54
+ // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144)
+ initial = initial / 2;
+
+ // Trust units reported by jQuery.css
+ unit = unit || initialInUnit[ 3 ];
+
+ // Iteratively approximate from a nonzero starting point
+ initialInUnit = +initial || 1;
+
+ while ( maxIterations-- ) {
+
+ // Evaluate and update our best guess (doubling guesses that zero out).
+ // Finish if the scale equals or crosses 1 (making the old*new product non-positive).
+ jQuery.style( elem, prop, initialInUnit + unit );
+ if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) {
+ maxIterations = 0;
+ }
+ initialInUnit = initialInUnit / scale;
+
+ }
+
+ initialInUnit = initialInUnit * 2;
+ jQuery.style( elem, prop, initialInUnit + unit );
+
+ // Make sure we update the tween properties later on
+ valueParts = valueParts || [];
+ }
+
+ if ( valueParts ) {
+ initialInUnit = +initialInUnit || +initial || 0;
+
+ // Apply relative offset (+=/-=) if specified
+ adjusted = valueParts[ 1 ] ?
+ initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] :
+ +valueParts[ 2 ];
+ if ( tween ) {
+ tween.unit = unit;
+ tween.start = initialInUnit;
+ tween.end = adjusted;
+ }
+ }
+ return adjusted;
+}
+
+
+var defaultDisplayMap = {};
+
+function getDefaultDisplay( elem ) {
+ var temp,
+ doc = elem.ownerDocument,
+ nodeName = elem.nodeName,
+ display = defaultDisplayMap[ nodeName ];
+
+ if ( display ) {
+ return display;
+ }
+
+ temp = doc.body.appendChild( doc.createElement( nodeName ) );
+ display = jQuery.css( temp, "display" );
+
+ temp.parentNode.removeChild( temp );
+
+ if ( display === "none" ) {
+ display = "block";
+ }
+ defaultDisplayMap[ nodeName ] = display;
+
+ return display;
+}
+
+function showHide( elements, show ) {
+ var display, elem,
+ values = [],
+ index = 0,
+ length = elements.length;
+
+ // Determine new display value for elements that need to change
+ for ( ; index < length; index++ ) {
+ elem = elements[ index ];
+ if ( !elem.style ) {
+ continue;
+ }
+
+ display = elem.style.display;
+ if ( show ) {
+
+ // Since we force visibility upon cascade-hidden elements, an immediate (and slow)
+ // check is required in this first loop unless we have a nonempty display value (either
+ // inline or about-to-be-restored)
+ if ( display === "none" ) {
+ values[ index ] = dataPriv.get( elem, "display" ) || null;
+ if ( !values[ index ] ) {
+ elem.style.display = "";
+ }
+ }
+ if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) {
+ values[ index ] = getDefaultDisplay( elem );
+ }
+ } else {
+ if ( display !== "none" ) {
+ values[ index ] = "none";
+
+ // Remember what we're overwriting
+ dataPriv.set( elem, "display", display );
+ }
+ }
+ }
+
+ // Set the display of the elements in a second loop to avoid constant reflow
+ for ( index = 0; index < length; index++ ) {
+ if ( values[ index ] != null ) {
+ elements[ index ].style.display = values[ index ];
+ }
+ }
+
+ return elements;
+}
+
+jQuery.fn.extend( {
+ show: function() {
+ return showHide( this, true );
+ },
+ hide: function() {
+ return showHide( this );
+ },
+ toggle: function( state ) {
+ if ( typeof state === "boolean" ) {
+ return state ? this.show() : this.hide();
+ }
+
+ return this.each( function() {
+ if ( isHiddenWithinTree( this ) ) {
+ jQuery( this ).show();
+ } else {
+ jQuery( this ).hide();
+ }
+ } );
+ }
+} );
+var rcheckableType = ( /^(?:checkbox|radio)$/i );
+
+var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i );
+
+var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i );
+
+
+
+( function() {
+ var fragment = document.createDocumentFragment(),
+ div = fragment.appendChild( document.createElement( "div" ) ),
+ input = document.createElement( "input" );
+
+ // Support: Android 4.0 - 4.3 only
+ // Check state lost if the name is set (#11217)
+ // Support: Windows Web Apps (WWA)
+ // `name` and `type` must use .setAttribute for WWA (#14901)
+ input.setAttribute( "type", "radio" );
+ input.setAttribute( "checked", "checked" );
+ input.setAttribute( "name", "t" );
+
+ div.appendChild( input );
+
+ // Support: Android <=4.1 only
+ // Older WebKit doesn't clone checked state correctly in fragments
+ support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked;
+
+ // Support: IE <=11 only
+ // Make sure textarea (and checkbox) defaultValue is properly cloned
+ div.innerHTML = "<textarea>x</textarea>";
+ support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue;
+
+ // Support: IE <=9 only
+ // IE <=9 replaces <option> tags with their contents when inserted outside of
+ // the select element.
+ div.innerHTML = "<option></option>";
+ support.option = !!div.lastChild;
+} )();
+
+
+// We have to close these tags to support XHTML (#13200)
+var wrapMap = {
+
+ // XHTML parsers do not magically insert elements in the
+ // same way that tag soup parsers do. So we cannot shorten
+ // this by omitting <tbody> or other required elements.
+ thead: [ 1, "<table>", "</table>" ],
+ col: [ 2, "<table><colgroup>", "</colgroup></table>" ],
+ tr: [ 2, "<table><tbody>", "</tbody></table>" ],
+ td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
+
+ _default: [ 0, "", "" ]
+};
+
+wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
+wrapMap.th = wrapMap.td;
+
+// Support: IE <=9 only
+if ( !support.option ) {
+ wrapMap.optgroup = wrapMap.option = [ 1, "<select multiple='multiple'>", "</select>" ];
+}
+
+
+function getAll( context, tag ) {
+
+ // Support: IE <=9 - 11 only
+ // Use typeof to avoid zero-argument method invocation on host objects (#15151)
+ var ret;
+
+ if ( typeof context.getElementsByTagName !== "undefined" ) {
+ ret = context.getElementsByTagName( tag || "*" );
+
+ } else if ( typeof context.querySelectorAll !== "undefined" ) {
+ ret = context.querySelectorAll( tag || "*" );
+
+ } else {
+ ret = [];
+ }
+
+ if ( tag === undefined || tag && nodeName( context, tag ) ) {
+ return jQuery.merge( [ context ], ret );
+ }
+
+ return ret;
+}
+
+
+// Mark scripts as having already been evaluated
+function setGlobalEval( elems, refElements ) {
+ var i = 0,
+ l = elems.length;
+
+ for ( ; i < l; i++ ) {
+ dataPriv.set(
+ elems[ i ],
+ "globalEval",
+ !refElements || dataPriv.get( refElements[ i ], "globalEval" )
+ );
+ }
+}
+
+
+var rhtml = /<|&#?\w+;/;
+
+function buildFragment( elems, context, scripts, selection, ignored ) {
+ var elem, tmp, tag, wrap, attached, j,
+ fragment = context.createDocumentFragment(),
+ nodes = [],
+ i = 0,
+ l = elems.length;
+
+ for ( ; i < l; i++ ) {
+ elem = elems[ i ];
+
+ if ( elem || elem === 0 ) {
+
+ // Add nodes directly
+ if ( toType( elem ) === "object" ) {
+
+ // Support: Android <=4.0 only, PhantomJS 1 only
+ // push.apply(_, arraylike) throws on ancient WebKit
+ jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem );
+
+ // Convert non-html into a text node
+ } else if ( !rhtml.test( elem ) ) {
+ nodes.push( context.createTextNode( elem ) );
+
+ // Convert html into DOM nodes
+ } else {
+ tmp = tmp || fragment.appendChild( context.createElement( "div" ) );
+
+ // Deserialize a standard representation
+ tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase();
+ wrap = wrapMap[ tag ] || wrapMap._default;
+ tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ];
+
+ // Descend through wrappers to the right content
+ j = wrap[ 0 ];
+ while ( j-- ) {
+ tmp = tmp.lastChild;
+ }
+
+ // Support: Android <=4.0 only, PhantomJS 1 only
+ // push.apply(_, arraylike) throws on ancient WebKit
+ jQuery.merge( nodes, tmp.childNodes );
+
+ // Remember the top-level container
+ tmp = fragment.firstChild;
+
+ // Ensure the created nodes are orphaned (#12392)
+ tmp.textContent = "";
+ }
+ }
+ }
+
+ // Remove wrapper from fragment
+ fragment.textContent = "";
+
+ i = 0;
+ while ( ( elem = nodes[ i++ ] ) ) {
+
+ // Skip elements already in the context collection (trac-4087)
+ if ( selection && jQuery.inArray( elem, selection ) > -1 ) {
+ if ( ignored ) {
+ ignored.push( elem );
+ }
+ continue;
+ }
+
+ attached = isAttached( elem );
+
+ // Append to fragment
+ tmp = getAll( fragment.appendChild( elem ), "script" );
+
+ // Preserve script evaluation history
+ if ( attached ) {
+ setGlobalEval( tmp );
+ }
+
+ // Capture executables
+ if ( scripts ) {
+ j = 0;
+ while ( ( elem = tmp[ j++ ] ) ) {
+ if ( rscriptType.test( elem.type || "" ) ) {
+ scripts.push( elem );
+ }
+ }
+ }
+ }
+
+ return fragment;
+}
+
+
+var
+ rkeyEvent = /^key/,
+ rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/,
+ rtypenamespace = /^([^.]*)(?:\.(.+)|)/;
+
+function returnTrue() {
+ return true;
+}
+
+function returnFalse() {
+ return false;
+}
+
+// Support: IE <=9 - 11+
+// focus() and blur() are asynchronous, except when they are no-op.
+// So expect focus to be synchronous when the element is already active,
+// and blur to be synchronous when the element is not already active.
+// (focus and blur are always synchronous in other supported browsers,
+// this just defines when we can count on it).
+function expectSync( elem, type ) {
+ return ( elem === safeActiveElement() ) === ( type === "focus" );
+}
+
+// Support: IE <=9 only
+// Accessing document.activeElement can throw unexpectedly
+// https://bugs.jquery.com/ticket/13393
+function safeActiveElement() {
+ try {
+ return document.activeElement;
+ } catch ( err ) { }
+}
+
+function on( elem, types, selector, data, fn, one ) {
+ var origFn, type;
+
+ // Types can be a map of types/handlers
+ if ( typeof types === "object" ) {
+
+ // ( types-Object, selector, data )
+ if ( typeof selector !== "string" ) {
+
+ // ( types-Object, data )
+ data = data || selector;
+ selector = undefined;
+ }
+ for ( type in types ) {
+ on( elem, type, selector, data, types[ type ], one );
+ }
+ return elem;
+ }
+
+ if ( data == null && fn == null ) {
+
+ // ( types, fn )
+ fn = selector;
+ data = selector = undefined;
+ } else if ( fn == null ) {
+ if ( typeof selector === "string" ) {
+
+ // ( types, selector, fn )
+ fn = data;
+ data = undefined;
+ } else {
+
+ // ( types, data, fn )
+ fn = data;
+ data = selector;
+ selector = undefined;
+ }
+ }
+ if ( fn === false ) {
+ fn = returnFalse;
+ } else if ( !fn ) {
+ return elem;
+ }
+
+ if ( one === 1 ) {
+ origFn = fn;
+ fn = function( event ) {
+
+ // Can use an empty set, since event contains the info
+ jQuery().off( event );
+ return origFn.apply( this, arguments );
+ };
+
+ // Use same guid so caller can remove using origFn
+ fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
+ }
+ return elem.each( function() {
+ jQuery.event.add( this, types, fn, data, selector );
+ } );
+}
+
+/*
+ * Helper functions for managing events -- not part of the public interface.
+ * Props to Dean Edwards' addEvent library for many of the ideas.
+ */
+jQuery.event = {
+
+ global: {},
+
+ add: function( elem, types, handler, data, selector ) {
+
+ var handleObjIn, eventHandle, tmp,
+ events, t, handleObj,
+ special, handlers, type, namespaces, origType,
+ elemData = dataPriv.get( elem );
+
+ // Only attach events to objects that accept data
+ if ( !acceptData( elem ) ) {
+ return;
+ }
+
+ // Caller can pass in an object of custom data in lieu of the handler
+ if ( handler.handler ) {
+ handleObjIn = handler;
+ handler = handleObjIn.handler;
+ selector = handleObjIn.selector;
+ }
+
+ // Ensure that invalid selectors throw exceptions at attach time
+ // Evaluate against documentElement in case elem is a non-element node (e.g., document)
+ if ( selector ) {
+ jQuery.find.matchesSelector( documentElement, selector );
+ }
+
+ // Make sure that the handler has a unique ID, used to find/remove it later
+ if ( !handler.guid ) {
+ handler.guid = jQuery.guid++;
+ }
+
+ // Init the element's event structure and main handler, if this is the first
+ if ( !( events = elemData.events ) ) {
+ events = elemData.events = Object.create( null );
+ }
+ if ( !( eventHandle = elemData.handle ) ) {
+ eventHandle = elemData.handle = function( e ) {
+
+ // Discard the second event of a jQuery.event.trigger() and
+ // when an event is called after a page has unloaded
+ return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ?
+ jQuery.event.dispatch.apply( elem, arguments ) : undefined;
+ };
+ }
+
+ // Handle multiple events separated by a space
+ types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
+ t = types.length;
+ while ( t-- ) {
+ tmp = rtypenamespace.exec( types[ t ] ) || [];
+ type = origType = tmp[ 1 ];
+ namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
+
+ // There *must* be a type, no attaching namespace-only handlers
+ if ( !type ) {
+ continue;
+ }
+
+ // If event changes its type, use the special event handlers for the changed type
+ special = jQuery.event.special[ type ] || {};
+
+ // If selector defined, determine special event api type, otherwise given type
+ type = ( selector ? special.delegateType : special.bindType ) || type;
+
+ // Update special based on newly reset type
+ special = jQuery.event.special[ type ] || {};
+
+ // handleObj is passed to all event handlers
+ handleObj = jQuery.extend( {
+ type: type,
+ origType: origType,
+ data: data,
+ handler: handler,
+ guid: handler.guid,
+ selector: selector,
+ needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
+ namespace: namespaces.join( "." )
+ }, handleObjIn );
+
+ // Init the event handler queue if we're the first
+ if ( !( handlers = events[ type ] ) ) {
+ handlers = events[ type ] = [];
+ handlers.delegateCount = 0;
+
+ // Only use addEventListener if the special events handler returns false
+ if ( !special.setup ||
+ special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
+
+ if ( elem.addEventListener ) {
+ elem.addEventListener( type, eventHandle );
+ }
+ }
+ }
+
+ if ( special.add ) {
+ special.add.call( elem, handleObj );
+
+ if ( !handleObj.handler.guid ) {
+ handleObj.handler.guid = handler.guid;
+ }
+ }
+
+ // Add to the element's handler list, delegates in front
+ if ( selector ) {
+ handlers.splice( handlers.delegateCount++, 0, handleObj );
+ } else {
+ handlers.push( handleObj );
+ }
+
+ // Keep track of which events have ever been used, for event optimization
+ jQuery.event.global[ type ] = true;
+ }
+
+ },
+
+ // Detach an event or set of events from an element
+ remove: function( elem, types, handler, selector, mappedTypes ) {
+
+ var j, origCount, tmp,
+ events, t, handleObj,
+ special, handlers, type, namespaces, origType,
+ elemData = dataPriv.hasData( elem ) && dataPriv.get( elem );
+
+ if ( !elemData || !( events = elemData.events ) ) {
+ return;
+ }
+
+ // Once for each type.namespace in types; type may be omitted
+ types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
+ t = types.length;
+ while ( t-- ) {
+ tmp = rtypenamespace.exec( types[ t ] ) || [];
+ type = origType = tmp[ 1 ];
+ namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
+
+ // Unbind all events (on this namespace, if provided) for the element
+ if ( !type ) {
+ for ( type in events ) {
+ jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
+ }
+ continue;
+ }
+
+ special = jQuery.event.special[ type ] || {};
+ type = ( selector ? special.delegateType : special.bindType ) || type;
+ handlers = events[ type ] || [];
+ tmp = tmp[ 2 ] &&
+ new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" );
+
+ // Remove matching events
+ origCount = j = handlers.length;
+ while ( j-- ) {
+ handleObj = handlers[ j ];
+
+ if ( ( mappedTypes || origType === handleObj.origType ) &&
+ ( !handler || handler.guid === handleObj.guid ) &&
+ ( !tmp || tmp.test( handleObj.namespace ) ) &&
+ ( !selector || selector === handleObj.selector ||
+ selector === "**" && handleObj.selector ) ) {
+ handlers.splice( j, 1 );
+
+ if ( handleObj.selector ) {
+ handlers.delegateCount--;
+ }
+ if ( special.remove ) {
+ special.remove.call( elem, handleObj );
+ }
+ }
+ }
+
+ // Remove generic event handler if we removed something and no more handlers exist
+ // (avoids potential for endless recursion during removal of special event handlers)
+ if ( origCount && !handlers.length ) {
+ if ( !special.teardown ||
+ special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
+
+ jQuery.removeEvent( elem, type, elemData.handle );
+ }
+
+ delete events[ type ];
+ }
+ }
+
+ // Remove data and the expando if it's no longer used
+ if ( jQuery.isEmptyObject( events ) ) {
+ dataPriv.remove( elem, "handle events" );
+ }
+ },
+
+ dispatch: function( nativeEvent ) {
+
+ var i, j, ret, matched, handleObj, handlerQueue,
+ args = new Array( arguments.length ),
+
+ // Make a writable jQuery.Event from the native event object
+ event = jQuery.event.fix( nativeEvent ),
+
+ handlers = (
+ dataPriv.get( this, "events" ) || Object.create( null )
+ )[ event.type ] || [],
+ special = jQuery.event.special[ event.type ] || {};
+
+ // Use the fix-ed jQuery.Event rather than the (read-only) native event
+ args[ 0 ] = event;
+
+ for ( i = 1; i < arguments.length; i++ ) {
+ args[ i ] = arguments[ i ];
+ }
+
+ event.delegateTarget = this;
+
+ // Call the preDispatch hook for the mapped type, and let it bail if desired
+ if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
+ return;
+ }
+
+ // Determine handlers
+ handlerQueue = jQuery.event.handlers.call( this, event, handlers );
+
+ // Run delegates first; they may want to stop propagation beneath us
+ i = 0;
+ while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) {
+ event.currentTarget = matched.elem;
+
+ j = 0;
+ while ( ( handleObj = matched.handlers[ j++ ] ) &&
+ !event.isImmediatePropagationStopped() ) {
+
+ // If the event is namespaced, then each handler is only invoked if it is
+ // specially universal or its namespaces are a superset of the event's.
+ if ( !event.rnamespace || handleObj.namespace === false ||
+ event.rnamespace.test( handleObj.namespace ) ) {
+
+ event.handleObj = handleObj;
+ event.data = handleObj.data;
+
+ ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle ||
+ handleObj.handler ).apply( matched.elem, args );
+
+ if ( ret !== undefined ) {
+ if ( ( event.result = ret ) === false ) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+ }
+ }
+ }
+ }
+
+ // Call the postDispatch hook for the mapped type
+ if ( special.postDispatch ) {
+ special.postDispatch.call( this, event );
+ }
+
+ return event.result;
+ },
+
+ handlers: function( event, handlers ) {
+ var i, handleObj, sel, matchedHandlers, matchedSelectors,
+ handlerQueue = [],
+ delegateCount = handlers.delegateCount,
+ cur = event.target;
+
+ // Find delegate handlers
+ if ( delegateCount &&
+
+ // Support: IE <=9
+ // Black-hole SVG <use> instance trees (trac-13180)
+ cur.nodeType &&
+
+ // Support: Firefox <=42
+ // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861)
+ // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click
+ // Support: IE 11 only
+ // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343)
+ !( event.type === "click" && event.button >= 1 ) ) {
+
+ for ( ; cur !== this; cur = cur.parentNode || this ) {
+
+ // Don't check non-elements (#13208)
+ // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764)
+ if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) {
+ matchedHandlers = [];
+ matchedSelectors = {};
+ for ( i = 0; i < delegateCount; i++ ) {
+ handleObj = handlers[ i ];
+
+ // Don't conflict with Object.prototype properties (#13203)
+ sel = handleObj.selector + " ";
+
+ if ( matchedSelectors[ sel ] === undefined ) {
+ matchedSelectors[ sel ] = handleObj.needsContext ?
+ jQuery( sel, this ).index( cur ) > -1 :
+ jQuery.find( sel, this, null, [ cur ] ).length;
+ }
+ if ( matchedSelectors[ sel ] ) {
+ matchedHandlers.push( handleObj );
+ }
+ }
+ if ( matchedHandlers.length ) {
+ handlerQueue.push( { elem: cur, handlers: matchedHandlers } );
+ }
+ }
+ }
+ }
+
+ // Add the remaining (directly-bound) handlers
+ cur = this;
+ if ( delegateCount < handlers.length ) {
+ handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } );
+ }
+
+ return handlerQueue;
+ },
+
+ addProp: function( name, hook ) {
+ Object.defineProperty( jQuery.Event.prototype, name, {
+ enumerable: true,
+ configurable: true,
+
+ get: isFunction( hook ) ?
+ function() {
+ if ( this.originalEvent ) {
+ return hook( this.originalEvent );
+ }
+ } :
+ function() {
+ if ( this.originalEvent ) {
+ return this.originalEvent[ name ];
+ }
+ },
+
+ set: function( value ) {
+ Object.defineProperty( this, name, {
+ enumerable: true,
+ configurable: true,
+ writable: true,
+ value: value
+ } );
+ }
+ } );
+ },
+
+ fix: function( originalEvent ) {
+ return originalEvent[ jQuery.expando ] ?
+ originalEvent :
+ new jQuery.Event( originalEvent );
+ },
+
+ special: {
+ load: {
+
+ // Prevent triggered image.load events from bubbling to window.load
+ noBubble: true
+ },
+ click: {
+
+ // Utilize native event to ensure correct state for checkable inputs
+ setup: function( data ) {
+
+ // For mutual compressibility with _default, replace `this` access with a local var.
+ // `|| data` is dead code meant only to preserve the variable through minification.
+ var el = this || data;
+
+ // Claim the first handler
+ if ( rcheckableType.test( el.type ) &&
+ el.click && nodeName( el, "input" ) ) {
+
+ // dataPriv.set( el, "click", ... )
+ leverageNative( el, "click", returnTrue );
+ }
+
+ // Return false to allow normal processing in the caller
+ return false;
+ },
+ trigger: function( data ) {
+
+ // For mutual compressibility with _default, replace `this` access with a local var.
+ // `|| data` is dead code meant only to preserve the variable through minification.
+ var el = this || data;
+
+ // Force setup before triggering a click
+ if ( rcheckableType.test( el.type ) &&
+ el.click && nodeName( el, "input" ) ) {
+
+ leverageNative( el, "click" );
+ }
+
+ // Return non-false to allow normal event-path propagation
+ return true;
+ },
+
+ // For cross-browser consistency, suppress native .click() on links
+ // Also prevent it if we're currently inside a leveraged native-event stack
+ _default: function( event ) {
+ var target = event.target;
+ return rcheckableType.test( target.type ) &&
+ target.click && nodeName( target, "input" ) &&
+ dataPriv.get( target, "click" ) ||
+ nodeName( target, "a" );
+ }
+ },
+
+ beforeunload: {
+ postDispatch: function( event ) {
+
+ // Support: Firefox 20+
+ // Firefox doesn't alert if the returnValue field is not set.
+ if ( event.result !== undefined && event.originalEvent ) {
+ event.originalEvent.returnValue = event.result;
+ }
+ }
+ }
+ }
+};
+
+// Ensure the presence of an event listener that handles manually-triggered
+// synthetic events by interrupting progress until reinvoked in response to
+// *native* events that it fires directly, ensuring that state changes have
+// already occurred before other listeners are invoked.
+function leverageNative( el, type, expectSync ) {
+
+ // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add
+ if ( !expectSync ) {
+ if ( dataPriv.get( el, type ) === undefined ) {
+ jQuery.event.add( el, type, returnTrue );
+ }
+ return;
+ }
+
+ // Register the controller as a special universal handler for all event namespaces
+ dataPriv.set( el, type, false );
+ jQuery.event.add( el, type, {
+ namespace: false,
+ handler: function( event ) {
+ var notAsync, result,
+ saved = dataPriv.get( this, type );
+
+ if ( ( event.isTrigger & 1 ) && this[ type ] ) {
+
+ // Interrupt processing of the outer synthetic .trigger()ed event
+ // Saved data should be false in such cases, but might be a leftover capture object
+ // from an async native handler (gh-4350)
+ if ( !saved.length ) {
+
+ // Store arguments for use when handling the inner native event
+ // There will always be at least one argument (an event object), so this array
+ // will not be confused with a leftover capture object.
+ saved = slice.call( arguments );
+ dataPriv.set( this, type, saved );
+
+ // Trigger the native event and capture its result
+ // Support: IE <=9 - 11+
+ // focus() and blur() are asynchronous
+ notAsync = expectSync( this, type );
+ this[ type ]();
+ result = dataPriv.get( this, type );
+ if ( saved !== result || notAsync ) {
+ dataPriv.set( this, type, false );
+ } else {
+ result = {};
+ }
+ if ( saved !== result ) {
+
+ // Cancel the outer synthetic event
+ event.stopImmediatePropagation();
+ event.preventDefault();
+ return result.value;
+ }
+
+ // If this is an inner synthetic event for an event with a bubbling surrogate
+ // (focus or blur), assume that the surrogate already propagated from triggering the
+ // native event and prevent that from happening again here.
+ // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the
+ // bubbling surrogate propagates *after* the non-bubbling base), but that seems
+ // less bad than duplication.
+ } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) {
+ event.stopPropagation();
+ }
+
+ // If this is a native event triggered above, everything is now in order
+ // Fire an inner synthetic event with the original arguments
+ } else if ( saved.length ) {
+
+ // ...and capture the result
+ dataPriv.set( this, type, {
+ value: jQuery.event.trigger(
+
+ // Support: IE <=9 - 11+
+ // Extend with the prototype to reset the above stopImmediatePropagation()
+ jQuery.extend( saved[ 0 ], jQuery.Event.prototype ),
+ saved.slice( 1 ),
+ this
+ )
+ } );
+
+ // Abort handling of the native event
+ event.stopImmediatePropagation();
+ }
+ }
+ } );
+}
+
+jQuery.removeEvent = function( elem, type, handle ) {
+
+ // This "if" is needed for plain objects
+ if ( elem.removeEventListener ) {
+ elem.removeEventListener( type, handle );
+ }
+};
+
+jQuery.Event = function( src, props ) {
+
+ // Allow instantiation without the 'new' keyword
+ if ( !( this instanceof jQuery.Event ) ) {
+ return new jQuery.Event( src, props );
+ }
+
+ // Event object
+ if ( src && src.type ) {
+ this.originalEvent = src;
+ this.type = src.type;
+
+ // Events bubbling up the document may have been marked as prevented
+ // by a handler lower down the tree; reflect the correct value.
+ this.isDefaultPrevented = src.defaultPrevented ||
+ src.defaultPrevented === undefined &&
+
+ // Support: Android <=2.3 only
+ src.returnValue === false ?
+ returnTrue :
+ returnFalse;
+
+ // Create target properties
+ // Support: Safari <=6 - 7 only
+ // Target should not be a text node (#504, #13143)
+ this.target = ( src.target && src.target.nodeType === 3 ) ?
+ src.target.parentNode :
+ src.target;
+
+ this.currentTarget = src.currentTarget;
+ this.relatedTarget = src.relatedTarget;
+
+ // Event type
+ } else {
+ this.type = src;
+ }
+
+ // Put explicitly provided properties onto the event object
+ if ( props ) {
+ jQuery.extend( this, props );
+ }
+
+ // Create a timestamp if incoming event doesn't have one
+ this.timeStamp = src && src.timeStamp || Date.now();
+
+ // Mark it as fixed
+ this[ jQuery.expando ] = true;
+};
+
+// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
+// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
+jQuery.Event.prototype = {
+ constructor: jQuery.Event,
+ isDefaultPrevented: returnFalse,
+ isPropagationStopped: returnFalse,
+ isImmediatePropagationStopped: returnFalse,
+ isSimulated: false,
+
+ preventDefault: function() {
+ var e = this.originalEvent;
+
+ this.isDefaultPrevented = returnTrue;
+
+ if ( e && !this.isSimulated ) {
+ e.preventDefault();
+ }
+ },
+ stopPropagation: function() {
+ var e = this.originalEvent;
+
+ this.isPropagationStopped = returnTrue;
+
+ if ( e && !this.isSimulated ) {
+ e.stopPropagation();
+ }
+ },
+ stopImmediatePropagation: function() {
+ var e = this.originalEvent;
+
+ this.isImmediatePropagationStopped = returnTrue;
+
+ if ( e && !this.isSimulated ) {
+ e.stopImmediatePropagation();
+ }
+
+ this.stopPropagation();
+ }
+};
+
+// Includes all common event props including KeyEvent and MouseEvent specific props
+jQuery.each( {
+ altKey: true,
+ bubbles: true,
+ cancelable: true,
+ changedTouches: true,
+ ctrlKey: true,
+ detail: true,
+ eventPhase: true,
+ metaKey: true,
+ pageX: true,
+ pageY: true,
+ shiftKey: true,
+ view: true,
+ "char": true,
+ code: true,
+ charCode: true,
+ key: true,
+ keyCode: true,
+ button: true,
+ buttons: true,
+ clientX: true,
+ clientY: true,
+ offsetX: true,
+ offsetY: true,
+ pointerId: true,
+ pointerType: true,
+ screenX: true,
+ screenY: true,
+ targetTouches: true,
+ toElement: true,
+ touches: true,
+
+ which: function( event ) {
+ var button = event.button;
+
+ // Add which for key events
+ if ( event.which == null && rkeyEvent.test( event.type ) ) {
+ return event.charCode != null ? event.charCode : event.keyCode;
+ }
+
+ // Add which for click: 1 === left; 2 === middle; 3 === right
+ if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) {
+ if ( button & 1 ) {
+ return 1;
+ }
+
+ if ( button & 2 ) {
+ return 3;
+ }
+
+ if ( button & 4 ) {
+ return 2;
+ }
+
+ return 0;
+ }
+
+ return event.which;
+ }
+}, jQuery.event.addProp );
+
+jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) {
+ jQuery.event.special[ type ] = {
+
+ // Utilize native event if possible so blur/focus sequence is correct
+ setup: function() {
+
+ // Claim the first handler
+ // dataPriv.set( this, "focus", ... )
+ // dataPriv.set( this, "blur", ... )
+ leverageNative( this, type, expectSync );
+
+ // Return false to allow normal processing in the caller
+ return false;
+ },
+ trigger: function() {
+
+ // Force setup before trigger
+ leverageNative( this, type );
+
+ // Return non-false to allow normal event-path propagation
+ return true;
+ },
+
+ delegateType: delegateType
+ };
+} );
+
+// Create mouseenter/leave events using mouseover/out and event-time checks
+// so that event delegation works in jQuery.
+// Do the same for pointerenter/pointerleave and pointerover/pointerout
+//
+// Support: Safari 7 only
+// Safari sends mouseenter too often; see:
+// https://bugs.chromium.org/p/chromium/issues/detail?id=470258
+// for the description of the bug (it existed in older Chrome versions as well).
+jQuery.each( {
+ mouseenter: "mouseover",
+ mouseleave: "mouseout",
+ pointerenter: "pointerover",
+ pointerleave: "pointerout"
+}, function( orig, fix ) {
+ jQuery.event.special[ orig ] = {
+ delegateType: fix,
+ bindType: fix,
+
+ handle: function( event ) {
+ var ret,
+ target = this,
+ related = event.relatedTarget,
+ handleObj = event.handleObj;
+
+ // For mouseenter/leave call the handler if related is outside the target.
+ // NB: No relatedTarget if the mouse left/entered the browser window
+ if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) {
+ event.type = handleObj.origType;
+ ret = handleObj.handler.apply( this, arguments );
+ event.type = fix;
+ }
+ return ret;
+ }
+ };
+} );
+
+jQuery.fn.extend( {
+
+ on: function( types, selector, data, fn ) {
+ return on( this, types, selector, data, fn );
+ },
+ one: function( types, selector, data, fn ) {
+ return on( this, types, selector, data, fn, 1 );
+ },
+ off: function( types, selector, fn ) {
+ var handleObj, type;
+ if ( types && types.preventDefault && types.handleObj ) {
+
+ // ( event ) dispatched jQuery.Event
+ handleObj = types.handleObj;
+ jQuery( types.delegateTarget ).off(
+ handleObj.namespace ?
+ handleObj.origType + "." + handleObj.namespace :
+ handleObj.origType,
+ handleObj.selector,
+ handleObj.handler
+ );
+ return this;
+ }
+ if ( typeof types === "object" ) {
+
+ // ( types-object [, selector] )
+ for ( type in types ) {
+ this.off( type, selector, types[ type ] );
+ }
+ return this;
+ }
+ if ( selector === false || typeof selector === "function" ) {
+
+ // ( types [, fn] )
+ fn = selector;
+ selector = undefined;
+ }
+ if ( fn === false ) {
+ fn = returnFalse;
+ }
+ return this.each( function() {
+ jQuery.event.remove( this, types, fn, selector );
+ } );
+ }
+} );
+
+
+var
+
+ // Support: IE <=10 - 11, Edge 12 - 13 only
+ // In IE/Edge using regex groups here causes severe slowdowns.
+ // See https://connect.microsoft.com/IE/feedback/details/1736512/
+ rnoInnerhtml = /<script|<style|<link/i,
+
+ // checked="checked" or checked
+ rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
+ rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;
+
+// Prefer a tbody over its parent table for containing new rows
+function manipulationTarget( elem, content ) {
+ if ( nodeName( elem, "table" ) &&
+ nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) {
+
+ return jQuery( elem ).children( "tbody" )[ 0 ] || elem;
+ }
+
+ return elem;
+}
+
+// Replace/restore the type attribute of script elements for safe DOM manipulation
+function disableScript( elem ) {
+ elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type;
+ return elem;
+}
+function restoreScript( elem ) {
+ if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) {
+ elem.type = elem.type.slice( 5 );
+ } else {
+ elem.removeAttribute( "type" );
+ }
+
+ return elem;
+}
+
+function cloneCopyEvent( src, dest ) {
+ var i, l, type, pdataOld, udataOld, udataCur, events;
+
+ if ( dest.nodeType !== 1 ) {
+ return;
+ }
+
+ // 1. Copy private data: events, handlers, etc.
+ if ( dataPriv.hasData( src ) ) {
+ pdataOld = dataPriv.get( src );
+ events = pdataOld.events;
+
+ if ( events ) {
+ dataPriv.remove( dest, "handle events" );
+
+ for ( type in events ) {
+ for ( i = 0, l = events[ type ].length; i < l; i++ ) {
+ jQuery.event.add( dest, type, events[ type ][ i ] );
+ }
+ }
+ }
+ }
+
+ // 2. Copy user data
+ if ( dataUser.hasData( src ) ) {
+ udataOld = dataUser.access( src );
+ udataCur = jQuery.extend( {}, udataOld );
+
+ dataUser.set( dest, udataCur );
+ }
+}
+
+// Fix IE bugs, see support tests
+function fixInput( src, dest ) {
+ var nodeName = dest.nodeName.toLowerCase();
+
+ // Fails to persist the checked state of a cloned checkbox or radio button.
+ if ( nodeName === "input" && rcheckableType.test( src.type ) ) {
+ dest.checked = src.checked;
+
+ // Fails to return the selected option to the default selected state when cloning options
+ } else if ( nodeName === "input" || nodeName === "textarea" ) {
+ dest.defaultValue = src.defaultValue;
+ }
+}
+
+function domManip( collection, args, callback, ignored ) {
+
+ // Flatten any nested arrays
+ args = flat( args );
+
+ var fragment, first, scripts, hasScripts, node, doc,
+ i = 0,
+ l = collection.length,
+ iNoClone = l - 1,
+ value = args[ 0 ],
+ valueIsFunction = isFunction( value );
+
+ // We can't cloneNode fragments that contain checked, in WebKit
+ if ( valueIsFunction ||
+ ( l > 1 && typeof value === "string" &&
+ !support.checkClone && rchecked.test( value ) ) ) {
+ return collection.each( function( index ) {
+ var self = collection.eq( index );
+ if ( valueIsFunction ) {
+ args[ 0 ] = value.call( this, index, self.html() );
+ }
+ domManip( self, args, callback, ignored );
+ } );
+ }
+
+ if ( l ) {
+ fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored );
+ first = fragment.firstChild;
+
+ if ( fragment.childNodes.length === 1 ) {
+ fragment = first;
+ }
+
+ // Require either new content or an interest in ignored elements to invoke the callback
+ if ( first || ignored ) {
+ scripts = jQuery.map( getAll( fragment, "script" ), disableScript );
+ hasScripts = scripts.length;
+
+ // Use the original fragment for the last item
+ // instead of the first because it can end up
+ // being emptied incorrectly in certain situations (#8070).
+ for ( ; i < l; i++ ) {
+ node = fragment;
+
+ if ( i !== iNoClone ) {
+ node = jQuery.clone( node, true, true );
+
+ // Keep references to cloned scripts for later restoration
+ if ( hasScripts ) {
+
+ // Support: Android <=4.0 only, PhantomJS 1 only
+ // push.apply(_, arraylike) throws on ancient WebKit
+ jQuery.merge( scripts, getAll( node, "script" ) );
+ }
+ }
+
+ callback.call( collection[ i ], node, i );
+ }
+
+ if ( hasScripts ) {
+ doc = scripts[ scripts.length - 1 ].ownerDocument;
+
+ // Reenable scripts
+ jQuery.map( scripts, restoreScript );
+
+ // Evaluate executable scripts on first document insertion
+ for ( i = 0; i < hasScripts; i++ ) {
+ node = scripts[ i ];
+ if ( rscriptType.test( node.type || "" ) &&
+ !dataPriv.access( node, "globalEval" ) &&
+ jQuery.contains( doc, node ) ) {
+
+ if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) {
+
+ // Optional AJAX dependency, but won't run scripts if not present
+ if ( jQuery._evalUrl && !node.noModule ) {
+ jQuery._evalUrl( node.src, {
+ nonce: node.nonce || node.getAttribute( "nonce" )
+ }, doc );
+ }
+ } else {
+ DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return collection;
+}
+
+function remove( elem, selector, keepData ) {
+ var node,
+ nodes = selector ? jQuery.filter( selector, elem ) : elem,
+ i = 0;
+
+ for ( ; ( node = nodes[ i ] ) != null; i++ ) {
+ if ( !keepData && node.nodeType === 1 ) {
+ jQuery.cleanData( getAll( node ) );
+ }
+
+ if ( node.parentNode ) {
+ if ( keepData && isAttached( node ) ) {
+ setGlobalEval( getAll( node, "script" ) );
+ }
+ node.parentNode.removeChild( node );
+ }
+ }
+
+ return elem;
+}
+
+jQuery.extend( {
+ htmlPrefilter: function( html ) {
+ return html;
+ },
+
+ clone: function( elem, dataAndEvents, deepDataAndEvents ) {
+ var i, l, srcElements, destElements,
+ clone = elem.cloneNode( true ),
+ inPage = isAttached( elem );
+
+ // Fix IE cloning issues
+ if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) &&
+ !jQuery.isXMLDoc( elem ) ) {
+
+ // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2
+ destElements = getAll( clone );
+ srcElements = getAll( elem );
+
+ for ( i = 0, l = srcElements.length; i < l; i++ ) {
+ fixInput( srcElements[ i ], destElements[ i ] );
+ }
+ }
+
+ // Copy the events from the original to the clone
+ if ( dataAndEvents ) {
+ if ( deepDataAndEvents ) {
+ srcElements = srcElements || getAll( elem );
+ destElements = destElements || getAll( clone );
+
+ for ( i = 0, l = srcElements.length; i < l; i++ ) {
+ cloneCopyEvent( srcElements[ i ], destElements[ i ] );
+ }
+ } else {
+ cloneCopyEvent( elem, clone );
+ }
+ }
+
+ // Preserve script evaluation history
+ destElements = getAll( clone, "script" );
+ if ( destElements.length > 0 ) {
+ setGlobalEval( destElements, !inPage && getAll( elem, "script" ) );
+ }
+
+ // Return the cloned set
+ return clone;
+ },
+
+ cleanData: function( elems ) {
+ var data, elem, type,
+ special = jQuery.event.special,
+ i = 0;
+
+ for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) {
+ if ( acceptData( elem ) ) {
+ if ( ( data = elem[ dataPriv.expando ] ) ) {
+ if ( data.events ) {
+ for ( type in data.events ) {
+ if ( special[ type ] ) {
+ jQuery.event.remove( elem, type );
+
+ // This is a shortcut to avoid jQuery.event.remove's overhead
+ } else {
+ jQuery.removeEvent( elem, type, data.handle );
+ }
+ }
+ }
+
+ // Support: Chrome <=35 - 45+
+ // Assign undefined instead of using delete, see Data#remove
+ elem[ dataPriv.expando ] = undefined;
+ }
+ if ( elem[ dataUser.expando ] ) {
+
+ // Support: Chrome <=35 - 45+
+ // Assign undefined instead of using delete, see Data#remove
+ elem[ dataUser.expando ] = undefined;
+ }
+ }
+ }
+ }
+} );
+
+jQuery.fn.extend( {
+ detach: function( selector ) {
+ return remove( this, selector, true );
+ },
+
+ remove: function( selector ) {
+ return remove( this, selector );
+ },
+
+ text: function( value ) {
+ return access( this, function( value ) {
+ return value === undefined ?
+ jQuery.text( this ) :
+ this.empty().each( function() {
+ if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
+ this.textContent = value;
+ }
+ } );
+ }, null, value, arguments.length );
+ },
+
+ append: function() {
+ return domManip( this, arguments, function( elem ) {
+ if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
+ var target = manipulationTarget( this, elem );
+ target.appendChild( elem );
+ }
+ } );
+ },
+
+ prepend: function() {
+ return domManip( this, arguments, function( elem ) {
+ if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
+ var target = manipulationTarget( this, elem );
+ target.insertBefore( elem, target.firstChild );
+ }
+ } );
+ },
+
+ before: function() {
+ return domManip( this, arguments, function( elem ) {
+ if ( this.parentNode ) {
+ this.parentNode.insertBefore( elem, this );
+ }
+ } );
+ },
+
+ after: function() {
+ return domManip( this, arguments, function( elem ) {
+ if ( this.parentNode ) {
+ this.parentNode.insertBefore( elem, this.nextSibling );
+ }
+ } );
+ },
+
+ empty: function() {
+ var elem,
+ i = 0;
+
+ for ( ; ( elem = this[ i ] ) != null; i++ ) {
+ if ( elem.nodeType === 1 ) {
+
+ // Prevent memory leaks
+ jQuery.cleanData( getAll( elem, false ) );
+
+ // Remove any remaining nodes
+ elem.textContent = "";
+ }
+ }
+
+ return this;
+ },
+
+ clone: function( dataAndEvents, deepDataAndEvents ) {
+ dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
+ deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
+
+ return this.map( function() {
+ return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
+ } );
+ },
+
+ html: function( value ) {
+ return access( this, function( value ) {
+ var elem = this[ 0 ] || {},
+ i = 0,
+ l = this.length;
+
+ if ( value === undefined && elem.nodeType === 1 ) {
+ return elem.innerHTML;
+ }
+
+ // See if we can take a shortcut and just use innerHTML
+ if ( typeof value === "string" && !rnoInnerhtml.test( value ) &&
+ !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) {
+
+ value = jQuery.htmlPrefilter( value );
+
+ try {
+ for ( ; i < l; i++ ) {
+ elem = this[ i ] || {};
+
+ // Remove element nodes and prevent memory leaks
+ if ( elem.nodeType === 1 ) {
+ jQuery.cleanData( getAll( elem, false ) );
+ elem.innerHTML = value;
+ }
+ }
+
+ elem = 0;
+
+ // If using innerHTML throws an exception, use the fallback method
+ } catch ( e ) {}
+ }
+
+ if ( elem ) {
+ this.empty().append( value );
+ }
+ }, null, value, arguments.length );
+ },
+
+ replaceWith: function() {
+ var ignored = [];
+
+ // Make the changes, replacing each non-ignored context element with the new content
+ return domManip( this, arguments, function( elem ) {
+ var parent = this.parentNode;
+
+ if ( jQuery.inArray( this, ignored ) < 0 ) {
+ jQuery.cleanData( getAll( this ) );
+ if ( parent ) {
+ parent.replaceChild( elem, this );
+ }
+ }
+
+ // Force callback invocation
+ }, ignored );
+ }
+} );
+
+jQuery.each( {
+ appendTo: "append",
+ prependTo: "prepend",
+ insertBefore: "before",
+ insertAfter: "after",
+ replaceAll: "replaceWith"
+}, function( name, original ) {
+ jQuery.fn[ name ] = function( selector ) {
+ var elems,
+ ret = [],
+ insert = jQuery( selector ),
+ last = insert.length - 1,
+ i = 0;
+
+ for ( ; i <= last; i++ ) {
+ elems = i === last ? this : this.clone( true );
+ jQuery( insert[ i ] )[ original ]( elems );
+
+ // Support: Android <=4.0 only, PhantomJS 1 only
+ // .get() because push.apply(_, arraylike) throws on ancient WebKit
+ push.apply( ret, elems.get() );
+ }
+
+ return this.pushStack( ret );
+ };
+} );
+var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" );
+
+var getStyles = function( elem ) {
+
+ // Support: IE <=11 only, Firefox <=30 (#15098, #14150)
+ // IE throws on elements created in popups
+ // FF meanwhile throws on frame elements through "defaultView.getComputedStyle"
+ var view = elem.ownerDocument.defaultView;
+
+ if ( !view || !view.opener ) {
+ view = window;
+ }
+
+ return view.getComputedStyle( elem );
+ };
+
+var swap = function( elem, options, callback ) {
+ var ret, name,
+ old = {};
+
+ // Remember the old values, and insert the new ones
+ for ( name in options ) {
+ old[ name ] = elem.style[ name ];
+ elem.style[ name ] = options[ name ];
+ }
+
+ ret = callback.call( elem );
+
+ // Revert the old values
+ for ( name in options ) {
+ elem.style[ name ] = old[ name ];
+ }
+
+ return ret;
+};
+
+
+var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
+
+
+
+( function() {
+
+ // Executing both pixelPosition & boxSizingReliable tests require only one layout
+ // so they're executed at the same time to save the second computation.
+ function computeStyleTests() {
+
+ // This is a singleton, we need to execute it only once
+ if ( !div ) {
+ return;
+ }
+
+ container.style.cssText = "position:absolute;left:-11111px;width:60px;" +
+ "margin-top:1px;padding:0;border:0";
+ div.style.cssText =
+ "position:relative;display:block;box-sizing:border-box;overflow:scroll;" +
+ "margin:auto;border:1px;padding:1px;" +
+ "width:60%;top:1%";
+ documentElement.appendChild( container ).appendChild( div );
+
+ var divStyle = window.getComputedStyle( div );
+ pixelPositionVal = divStyle.top !== "1%";
+
+ // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44
+ reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12;
+
+ // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3
+ // Some styles come back with percentage values, even though they shouldn't
+ div.style.right = "60%";
+ pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36;
+
+ // Support: IE 9 - 11 only
+ // Detect misreporting of content dimensions for box-sizing:border-box elements
+ boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36;
+
+ // Support: IE 9 only
+ // Detect overflow:scroll screwiness (gh-3699)
+ // Support: Chrome <=64
+ // Don't get tricked when zoom affects offsetWidth (gh-4029)
+ div.style.position = "absolute";
+ scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12;
+
+ documentElement.removeChild( container );
+
+ // Nullify the div so it wouldn't be stored in the memory and
+ // it will also be a sign that checks already performed
+ div = null;
+ }
+
+ function roundPixelMeasures( measure ) {
+ return Math.round( parseFloat( measure ) );
+ }
+
+ var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal,
+ reliableTrDimensionsVal, reliableMarginLeftVal,
+ container = document.createElement( "div" ),
+ div = document.createElement( "div" );
+
+ // Finish early in limited (non-browser) environments
+ if ( !div.style ) {
+ return;
+ }
+
+ // Support: IE <=9 - 11 only
+ // Style of cloned element affects source element cloned (#8908)
+ div.style.backgroundClip = "content-box";
+ div.cloneNode( true ).style.backgroundClip = "";
+ support.clearCloneStyle = div.style.backgroundClip === "content-box";
+
+ jQuery.extend( support, {
+ boxSizingReliable: function() {
+ computeStyleTests();
+ return boxSizingReliableVal;
+ },
+ pixelBoxStyles: function() {
+ computeStyleTests();
+ return pixelBoxStylesVal;
+ },
+ pixelPosition: function() {
+ computeStyleTests();
+ return pixelPositionVal;
+ },
+ reliableMarginLeft: function() {
+ computeStyleTests();
+ return reliableMarginLeftVal;
+ },
+ scrollboxSize: function() {
+ computeStyleTests();
+ return scrollboxSizeVal;
+ },
+
+ // Support: IE 9 - 11+, Edge 15 - 18+
+ // IE/Edge misreport `getComputedStyle` of table rows with width/height
+ // set in CSS while `offset*` properties report correct values.
+ // Behavior in IE 9 is more subtle than in newer versions & it passes
+ // some versions of this test; make sure not to make it pass there!
+ reliableTrDimensions: function() {
+ var table, tr, trChild, trStyle;
+ if ( reliableTrDimensionsVal == null ) {
+ table = document.createElement( "table" );
+ tr = document.createElement( "tr" );
+ trChild = document.createElement( "div" );
+
+ table.style.cssText = "position:absolute;left:-11111px";
+ tr.style.height = "1px";
+ trChild.style.height = "9px";
+
+ documentElement
+ .appendChild( table )
+ .appendChild( tr )
+ .appendChild( trChild );
+
+ trStyle = window.getComputedStyle( tr );
+ reliableTrDimensionsVal = parseInt( trStyle.height ) > 3;
+
+ documentElement.removeChild( table );
+ }
+ return reliableTrDimensionsVal;
+ }
+ } );
+} )();
+
+
+function curCSS( elem, name, computed ) {
+ var width, minWidth, maxWidth, ret,
+
+ // Support: Firefox 51+
+ // Retrieving style before computed somehow
+ // fixes an issue with getting wrong values
+ // on detached elements
+ style = elem.style;
+
+ computed = computed || getStyles( elem );
+
+ // getPropertyValue is needed for:
+ // .css('filter') (IE 9 only, #12537)
+ // .css('--customProperty) (#3144)
+ if ( computed ) {
+ ret = computed.getPropertyValue( name ) || computed[ name ];
+
+ if ( ret === "" && !isAttached( elem ) ) {
+ ret = jQuery.style( elem, name );
+ }
+
+ // A tribute to the "awesome hack by Dean Edwards"
+ // Android Browser returns percentage for some values,
+ // but width seems to be reliably pixels.
+ // This is against the CSSOM draft spec:
+ // https://drafts.csswg.org/cssom/#resolved-values
+ if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) {
+
+ // Remember the original values
+ width = style.width;
+ minWidth = style.minWidth;
+ maxWidth = style.maxWidth;
+
+ // Put in the new values to get a computed value out
+ style.minWidth = style.maxWidth = style.width = ret;
+ ret = computed.width;
+
+ // Revert the changed values
+ style.width = width;
+ style.minWidth = minWidth;
+ style.maxWidth = maxWidth;
+ }
+ }
+
+ return ret !== undefined ?
+
+ // Support: IE <=9 - 11 only
+ // IE returns zIndex value as an integer.
+ ret + "" :
+ ret;
+}
+
+
+function addGetHookIf( conditionFn, hookFn ) {
+
+ // Define the hook, we'll check on the first run if it's really needed.
+ return {
+ get: function() {
+ if ( conditionFn() ) {
+
+ // Hook not needed (or it's not possible to use it due
+ // to missing dependency), remove it.
+ delete this.get;
+ return;
+ }
+
+ // Hook needed; redefine it so that the support test is not executed again.
+ return ( this.get = hookFn ).apply( this, arguments );
+ }
+ };
+}
+
+
+var cssPrefixes = [ "Webkit", "Moz", "ms" ],
+ emptyStyle = document.createElement( "div" ).style,
+ vendorProps = {};
+
+// Return a vendor-prefixed property or undefined
+function vendorPropName( name ) {
+
+ // Check for vendor prefixed names
+ var capName = name[ 0 ].toUpperCase() + name.slice( 1 ),
+ i = cssPrefixes.length;
+
+ while ( i-- ) {
+ name = cssPrefixes[ i ] + capName;
+ if ( name in emptyStyle ) {
+ return name;
+ }
+ }
+}
+
+// Return a potentially-mapped jQuery.cssProps or vendor prefixed property
+function finalPropName( name ) {
+ var final = jQuery.cssProps[ name ] || vendorProps[ name ];
+
+ if ( final ) {
+ return final;
+ }
+ if ( name in emptyStyle ) {
+ return name;
+ }
+ return vendorProps[ name ] = vendorPropName( name ) || name;
+}
+
+
+var
+
+ // Swappable if display is none or starts with table
+ // except "table", "table-cell", or "table-caption"
+ // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
+ rdisplayswap = /^(none|table(?!-c[ea]).+)/,
+ rcustomProp = /^--/,
+ cssShow = { position: "absolute", visibility: "hidden", display: "block" },
+ cssNormalTransform = {
+ letterSpacing: "0",
+ fontWeight: "400"
+ };
+
+function setPositiveNumber( _elem, value, subtract ) {
+
+ // Any relative (+/-) values have already been
+ // normalized at this point
+ var matches = rcssNum.exec( value );
+ return matches ?
+
+ // Guard against undefined "subtract", e.g., when used as in cssHooks
+ Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) :
+ value;
+}
+
+function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) {
+ var i = dimension === "width" ? 1 : 0,
+ extra = 0,
+ delta = 0;
+
+ // Adjustment may not be necessary
+ if ( box === ( isBorderBox ? "border" : "content" ) ) {
+ return 0;
+ }
+
+ for ( ; i < 4; i += 2 ) {
+
+ // Both box models exclude margin
+ if ( box === "margin" ) {
+ delta += jQuery.css( elem, box + cssExpand[ i ], true, styles );
+ }
+
+ // If we get here with a content-box, we're seeking "padding" or "border" or "margin"
+ if ( !isBorderBox ) {
+
+ // Add padding
+ delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
+
+ // For "border" or "margin", add border
+ if ( box !== "padding" ) {
+ delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
+
+ // But still keep track of it otherwise
+ } else {
+ extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
+ }
+
+ // If we get here with a border-box (content + padding + border), we're seeking "content" or
+ // "padding" or "margin"
+ } else {
+
+ // For "content", subtract padding
+ if ( box === "content" ) {
+ delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
+ }
+
+ // For "content" or "padding", subtract border
+ if ( box !== "margin" ) {
+ delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
+ }
+ }
+ }
+
+ // Account for positive content-box scroll gutter when requested by providing computedVal
+ if ( !isBorderBox && computedVal >= 0 ) {
+
+ // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border
+ // Assuming integer scroll gutter, subtract the rest and round down
+ delta += Math.max( 0, Math.ceil(
+ elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] -
+ computedVal -
+ delta -
+ extra -
+ 0.5
+
+ // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter
+ // Use an explicit zero to avoid NaN (gh-3964)
+ ) ) || 0;
+ }
+
+ return delta;
+}
+
+function getWidthOrHeight( elem, dimension, extra ) {
+
+ // Start with computed style
+ var styles = getStyles( elem ),
+
+ // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322).
+ // Fake content-box until we know it's needed to know the true value.
+ boxSizingNeeded = !support.boxSizingReliable() || extra,
+ isBorderBox = boxSizingNeeded &&
+ jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
+ valueIsBorderBox = isBorderBox,
+
+ val = curCSS( elem, dimension, styles ),
+ offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 );
+
+ // Support: Firefox <=54
+ // Return a confounding non-pixel value or feign ignorance, as appropriate.
+ if ( rnumnonpx.test( val ) ) {
+ if ( !extra ) {
+ return val;
+ }
+ val = "auto";
+ }
+
+
+ // Support: IE 9 - 11 only
+ // Use offsetWidth/offsetHeight for when box sizing is unreliable.
+ // In those cases, the computed value can be trusted to be border-box.
+ if ( ( !support.boxSizingReliable() && isBorderBox ||
+
+ // Support: IE 10 - 11+, Edge 15 - 18+
+ // IE/Edge misreport `getComputedStyle` of table rows with width/height
+ // set in CSS while `offset*` properties report correct values.
+ // Interestingly, in some cases IE 9 doesn't suffer from this issue.
+ !support.reliableTrDimensions() && nodeName( elem, "tr" ) ||
+
+ // Fall back to offsetWidth/offsetHeight when value is "auto"
+ // This happens for inline elements with no explicit setting (gh-3571)
+ val === "auto" ||
+
+ // Support: Android <=4.1 - 4.3 only
+ // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602)
+ !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) &&
+
+ // Make sure the element is visible & connected
+ elem.getClientRects().length ) {
+
+ isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box";
+
+ // Where available, offsetWidth/offsetHeight approximate border box dimensions.
+ // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the
+ // retrieved value as a content box dimension.
+ valueIsBorderBox = offsetProp in elem;
+ if ( valueIsBorderBox ) {
+ val = elem[ offsetProp ];
+ }
+ }
+
+ // Normalize "" and auto
+ val = parseFloat( val ) || 0;
+
+ // Adjust for the element's box model
+ return ( val +
+ boxModelAdjustment(
+ elem,
+ dimension,
+ extra || ( isBorderBox ? "border" : "content" ),
+ valueIsBorderBox,
+ styles,
+
+ // Provide the current computed size to request scroll gutter calculation (gh-3589)
+ val
+ )
+ ) + "px";
+}
+
+jQuery.extend( {
+
+ // Add in style property hooks for overriding the default
+ // behavior of getting and setting a style property
+ cssHooks: {
+ opacity: {
+ get: function( elem, computed ) {
+ if ( computed ) {
+
+ // We should always get a number back from opacity
+ var ret = curCSS( elem, "opacity" );
+ return ret === "" ? "1" : ret;
+ }
+ }
+ }
+ },
+
+ // Don't automatically add "px" to these possibly-unitless properties
+ cssNumber: {
+ "animationIterationCount": true,
+ "columnCount": true,
+ "fillOpacity": true,
+ "flexGrow": true,
+ "flexShrink": true,
+ "fontWeight": true,
+ "gridArea": true,
+ "gridColumn": true,
+ "gridColumnEnd": true,
+ "gridColumnStart": true,
+ "gridRow": true,
+ "gridRowEnd": true,
+ "gridRowStart": true,
+ "lineHeight": true,
+ "opacity": true,
+ "order": true,
+ "orphans": true,
+ "widows": true,
+ "zIndex": true,
+ "zoom": true
+ },
+
+ // Add in properties whose names you wish to fix before
+ // setting or getting the value
+ cssProps: {},
+
+ // Get and set the style property on a DOM Node
+ style: function( elem, name, value, extra ) {
+
+ // Don't set styles on text and comment nodes
+ if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
+ return;
+ }
+
+ // Make sure that we're working with the right name
+ var ret, type, hooks,
+ origName = camelCase( name ),
+ isCustomProp = rcustomProp.test( name ),
+ style = elem.style;
+
+ // Make sure that we're working with the right name. We don't
+ // want to query the value if it is a CSS custom property
+ // since they are user-defined.
+ if ( !isCustomProp ) {
+ name = finalPropName( origName );
+ }
+
+ // Gets hook for the prefixed version, then unprefixed version
+ hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
+
+ // Check if we're setting a value
+ if ( value !== undefined ) {
+ type = typeof value;
+
+ // Convert "+=" or "-=" to relative numbers (#7345)
+ if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) {
+ value = adjustCSS( elem, name, ret );
+
+ // Fixes bug #9237
+ type = "number";
+ }
+
+ // Make sure that null and NaN values aren't set (#7116)
+ if ( value == null || value !== value ) {
+ return;
+ }
+
+ // If a number was passed in, add the unit (except for certain CSS properties)
+ // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append
+ // "px" to a few hardcoded values.
+ if ( type === "number" && !isCustomProp ) {
+ value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" );
+ }
+
+ // background-* props affect original clone's values
+ if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) {
+ style[ name ] = "inherit";
+ }
+
+ // If a hook was provided, use that value, otherwise just set the specified value
+ if ( !hooks || !( "set" in hooks ) ||
+ ( value = hooks.set( elem, value, extra ) ) !== undefined ) {
+
+ if ( isCustomProp ) {
+ style.setProperty( name, value );
+ } else {
+ style[ name ] = value;
+ }
+ }
+
+ } else {
+
+ // If a hook was provided get the non-computed value from there
+ if ( hooks && "get" in hooks &&
+ ( ret = hooks.get( elem, false, extra ) ) !== undefined ) {
+
+ return ret;
+ }
+
+ // Otherwise just get the value from the style object
+ return style[ name ];
+ }
+ },
+
+ css: function( elem, name, extra, styles ) {
+ var val, num, hooks,
+ origName = camelCase( name ),
+ isCustomProp = rcustomProp.test( name );
+
+ // Make sure that we're working with the right name. We don't
+ // want to modify the value if it is a CSS custom property
+ // since they are user-defined.
+ if ( !isCustomProp ) {
+ name = finalPropName( origName );
+ }
+
+ // Try prefixed name followed by the unprefixed name
+ hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
+
+ // If a hook was provided get the computed value from there
+ if ( hooks && "get" in hooks ) {
+ val = hooks.get( elem, true, extra );
+ }
+
+ // Otherwise, if a way to get the computed value exists, use that
+ if ( val === undefined ) {
+ val = curCSS( elem, name, styles );
+ }
+
+ // Convert "normal" to computed value
+ if ( val === "normal" && name in cssNormalTransform ) {
+ val = cssNormalTransform[ name ];
+ }
+
+ // Make numeric if forced or a qualifier was provided and val looks numeric
+ if ( extra === "" || extra ) {
+ num = parseFloat( val );
+ return extra === true || isFinite( num ) ? num || 0 : val;
+ }
+
+ return val;
+ }
+} );
+
+jQuery.each( [ "height", "width" ], function( _i, dimension ) {
+ jQuery.cssHooks[ dimension ] = {
+ get: function( elem, computed, extra ) {
+ if ( computed ) {
+
+ // Certain elements can have dimension info if we invisibly show them
+ // but it must have a current display style that would benefit
+ return rdisplayswap.test( jQuery.css( elem, "display" ) ) &&
+
+ // Support: Safari 8+
+ // Table columns in Safari have non-zero offsetWidth & zero
+ // getBoundingClientRect().width unless display is changed.
+ // Support: IE <=11 only
+ // Running getBoundingClientRect on a disconnected node
+ // in IE throws an error.
+ ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ?
+ swap( elem, cssShow, function() {
+ return getWidthOrHeight( elem, dimension, extra );
+ } ) :
+ getWidthOrHeight( elem, dimension, extra );
+ }
+ },
+
+ set: function( elem, value, extra ) {
+ var matches,
+ styles = getStyles( elem ),
+
+ // Only read styles.position if the test has a chance to fail
+ // to avoid forcing a reflow.
+ scrollboxSizeBuggy = !support.scrollboxSize() &&
+ styles.position === "absolute",
+
+ // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991)
+ boxSizingNeeded = scrollboxSizeBuggy || extra,
+ isBorderBox = boxSizingNeeded &&
+ jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
+ subtract = extra ?
+ boxModelAdjustment(
+ elem,
+ dimension,
+ extra,
+ isBorderBox,
+ styles
+ ) :
+ 0;
+
+ // Account for unreliable border-box dimensions by comparing offset* to computed and
+ // faking a content-box to get border and padding (gh-3699)
+ if ( isBorderBox && scrollboxSizeBuggy ) {
+ subtract -= Math.ceil(
+ elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] -
+ parseFloat( styles[ dimension ] ) -
+ boxModelAdjustment( elem, dimension, "border", false, styles ) -
+ 0.5
+ );
+ }
+
+ // Convert to pixels if value adjustment is needed
+ if ( subtract && ( matches = rcssNum.exec( value ) ) &&
+ ( matches[ 3 ] || "px" ) !== "px" ) {
+
+ elem.style[ dimension ] = value;
+ value = jQuery.css( elem, dimension );
+ }
+
+ return setPositiveNumber( elem, value, subtract );
+ }
+ };
+} );
+
+jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft,
+ function( elem, computed ) {
+ if ( computed ) {
+ return ( parseFloat( curCSS( elem, "marginLeft" ) ) ||
+ elem.getBoundingClientRect().left -
+ swap( elem, { marginLeft: 0 }, function() {
+ return elem.getBoundingClientRect().left;
+ } )
+ ) + "px";
+ }
+ }
+);
+
+// These hooks are used by animate to expand properties
+jQuery.each( {
+ margin: "",
+ padding: "",
+ border: "Width"
+}, function( prefix, suffix ) {
+ jQuery.cssHooks[ prefix + suffix ] = {
+ expand: function( value ) {
+ var i = 0,
+ expanded = {},
+
+ // Assumes a single number if not a string
+ parts = typeof value === "string" ? value.split( " " ) : [ value ];
+
+ for ( ; i < 4; i++ ) {
+ expanded[ prefix + cssExpand[ i ] + suffix ] =
+ parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
+ }
+
+ return expanded;
+ }
+ };
+
+ if ( prefix !== "margin" ) {
+ jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
+ }
+} );
+
+jQuery.fn.extend( {
+ css: function( name, value ) {
+ return access( this, function( elem, name, value ) {
+ var styles, len,
+ map = {},
+ i = 0;
+
+ if ( Array.isArray( name ) ) {
+ styles = getStyles( elem );
+ len = name.length;
+
+ for ( ; i < len; i++ ) {
+ map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles );
+ }
+
+ return map;
+ }
+
+ return value !== undefined ?
+ jQuery.style( elem, name, value ) :
+ jQuery.css( elem, name );
+ }, name, value, arguments.length > 1 );
+ }
+} );
+
+
+function Tween( elem, options, prop, end, easing ) {
+ return new Tween.prototype.init( elem, options, prop, end, easing );
+}
+jQuery.Tween = Tween;
+
+Tween.prototype = {
+ constructor: Tween,
+ init: function( elem, options, prop, end, easing, unit ) {
+ this.elem = elem;
+ this.prop = prop;
+ this.easing = easing || jQuery.easing._default;
+ this.options = options;
+ this.start = this.now = this.cur();
+ this.end = end;
+ this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" );
+ },
+ cur: function() {
+ var hooks = Tween.propHooks[ this.prop ];
+
+ return hooks && hooks.get ?
+ hooks.get( this ) :
+ Tween.propHooks._default.get( this );
+ },
+ run: function( percent ) {
+ var eased,
+ hooks = Tween.propHooks[ this.prop ];
+
+ if ( this.options.duration ) {
+ this.pos = eased = jQuery.easing[ this.easing ](
+ percent, this.options.duration * percent, 0, 1, this.options.duration
+ );
+ } else {
+ this.pos = eased = percent;
+ }
+ this.now = ( this.end - this.start ) * eased + this.start;
+
+ if ( this.options.step ) {
+ this.options.step.call( this.elem, this.now, this );
+ }
+
+ if ( hooks && hooks.set ) {
+ hooks.set( this );
+ } else {
+ Tween.propHooks._default.set( this );
+ }
+ return this;
+ }
+};
+
+Tween.prototype.init.prototype = Tween.prototype;
+
+Tween.propHooks = {
+ _default: {
+ get: function( tween ) {
+ var result;
+
+ // Use a property on the element directly when it is not a DOM element,
+ // or when there is no matching style property that exists.
+ if ( tween.elem.nodeType !== 1 ||
+ tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) {
+ return tween.elem[ tween.prop ];
+ }
+
+ // Passing an empty string as a 3rd parameter to .css will automatically
+ // attempt a parseFloat and fallback to a string if the parse fails.
+ // Simple values such as "10px" are parsed to Float;
+ // complex values such as "rotate(1rad)" are returned as-is.
+ result = jQuery.css( tween.elem, tween.prop, "" );
+
+ // Empty strings, null, undefined and "auto" are converted to 0.
+ return !result || result === "auto" ? 0 : result;
+ },
+ set: function( tween ) {
+
+ // Use step hook for back compat.
+ // Use cssHook if its there.
+ // Use .style if available and use plain properties where available.
+ if ( jQuery.fx.step[ tween.prop ] ) {
+ jQuery.fx.step[ tween.prop ]( tween );
+ } else if ( tween.elem.nodeType === 1 && (
+ jQuery.cssHooks[ tween.prop ] ||
+ tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) {
+ jQuery.style( tween.elem, tween.prop, tween.now + tween.unit );
+ } else {
+ tween.elem[ tween.prop ] = tween.now;
+ }
+ }
+ }
+};
+
+// Support: IE <=9 only
+// Panic based approach to setting things on disconnected nodes
+Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = {
+ set: function( tween ) {
+ if ( tween.elem.nodeType && tween.elem.parentNode ) {
+ tween.elem[ tween.prop ] = tween.now;
+ }
+ }
+};
+
+jQuery.easing = {
+ linear: function( p ) {
+ return p;
+ },
+ swing: function( p ) {
+ return 0.5 - Math.cos( p * Math.PI ) / 2;
+ },
+ _default: "swing"
+};
+
+jQuery.fx = Tween.prototype.init;
+
+// Back compat <1.8 extension point
+jQuery.fx.step = {};
+
+
+
+
+var
+ fxNow, inProgress,
+ rfxtypes = /^(?:toggle|show|hide)$/,
+ rrun = /queueHooks$/;
+
+function schedule() {
+ if ( inProgress ) {
+ if ( document.hidden === false && window.requestAnimationFrame ) {
+ window.requestAnimationFrame( schedule );
+ } else {
+ window.setTimeout( schedule, jQuery.fx.interval );
+ }
+
+ jQuery.fx.tick();
+ }
+}
+
+// Animations created synchronously will run synchronously
+function createFxNow() {
+ window.setTimeout( function() {
+ fxNow = undefined;
+ } );
+ return ( fxNow = Date.now() );
+}
+
+// Generate parameters to create a standard animation
+function genFx( type, includeWidth ) {
+ var which,
+ i = 0,
+ attrs = { height: type };
+
+ // If we include width, step value is 1 to do all cssExpand values,
+ // otherwise step value is 2 to skip over Left and Right
+ includeWidth = includeWidth ? 1 : 0;
+ for ( ; i < 4; i += 2 - includeWidth ) {
+ which = cssExpand[ i ];
+ attrs[ "margin" + which ] = attrs[ "padding" + which ] = type;
+ }
+
+ if ( includeWidth ) {
+ attrs.opacity = attrs.width = type;
+ }
+
+ return attrs;
+}
+
+function createTween( value, prop, animation ) {
+ var tween,
+ collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ),
+ index = 0,
+ length = collection.length;
+ for ( ; index < length; index++ ) {
+ if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) {
+
+ // We're done with this property
+ return tween;
+ }
+ }
+}
+
+function defaultPrefilter( elem, props, opts ) {
+ var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display,
+ isBox = "width" in props || "height" in props,
+ anim = this,
+ orig = {},
+ style = elem.style,
+ hidden = elem.nodeType && isHiddenWithinTree( elem ),
+ dataShow = dataPriv.get( elem, "fxshow" );
+
+ // Queue-skipping animations hijack the fx hooks
+ if ( !opts.queue ) {
+ hooks = jQuery._queueHooks( elem, "fx" );
+ if ( hooks.unqueued == null ) {
+ hooks.unqueued = 0;
+ oldfire = hooks.empty.fire;
+ hooks.empty.fire = function() {
+ if ( !hooks.unqueued ) {
+ oldfire();
+ }
+ };
+ }
+ hooks.unqueued++;
+
+ anim.always( function() {
+
+ // Ensure the complete handler is called before this completes
+ anim.always( function() {
+ hooks.unqueued--;
+ if ( !jQuery.queue( elem, "fx" ).length ) {
+ hooks.empty.fire();
+ }
+ } );
+ } );
+ }
+
+ // Detect show/hide animations
+ for ( prop in props ) {
+ value = props[ prop ];
+ if ( rfxtypes.test( value ) ) {
+ delete props[ prop ];
+ toggle = toggle || value === "toggle";
+ if ( value === ( hidden ? "hide" : "show" ) ) {
+
+ // Pretend to be hidden if this is a "show" and
+ // there is still data from a stopped show/hide
+ if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) {
+ hidden = true;
+
+ // Ignore all other no-op show/hide data
+ } else {
+ continue;
+ }
+ }
+ orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop );
+ }
+ }
+
+ // Bail out if this is a no-op like .hide().hide()
+ propTween = !jQuery.isEmptyObject( props );
+ if ( !propTween && jQuery.isEmptyObject( orig ) ) {
+ return;
+ }
+
+ // Restrict "overflow" and "display" styles during box animations
+ if ( isBox && elem.nodeType === 1 ) {
+
+ // Support: IE <=9 - 11, Edge 12 - 15
+ // Record all 3 overflow attributes because IE does not infer the shorthand
+ // from identically-valued overflowX and overflowY and Edge just mirrors
+ // the overflowX value there.
+ opts.overflow = [ style.overflow, style.overflowX, style.overflowY ];
+
+ // Identify a display type, preferring old show/hide data over the CSS cascade
+ restoreDisplay = dataShow && dataShow.display;
+ if ( restoreDisplay == null ) {
+ restoreDisplay = dataPriv.get( elem, "display" );
+ }
+ display = jQuery.css( elem, "display" );
+ if ( display === "none" ) {
+ if ( restoreDisplay ) {
+ display = restoreDisplay;
+ } else {
+
+ // Get nonempty value(s) by temporarily forcing visibility
+ showHide( [ elem ], true );
+ restoreDisplay = elem.style.display || restoreDisplay;
+ display = jQuery.css( elem, "display" );
+ showHide( [ elem ] );
+ }
+ }
+
+ // Animate inline elements as inline-block
+ if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) {
+ if ( jQuery.css( elem, "float" ) === "none" ) {
+
+ // Restore the original display value at the end of pure show/hide animations
+ if ( !propTween ) {
+ anim.done( function() {
+ style.display = restoreDisplay;
+ } );
+ if ( restoreDisplay == null ) {
+ display = style.display;
+ restoreDisplay = display === "none" ? "" : display;
+ }
+ }
+ style.display = "inline-block";
+ }
+ }
+ }
+
+ if ( opts.overflow ) {
+ style.overflow = "hidden";
+ anim.always( function() {
+ style.overflow = opts.overflow[ 0 ];
+ style.overflowX = opts.overflow[ 1 ];
+ style.overflowY = opts.overflow[ 2 ];
+ } );
+ }
+
+ // Implement show/hide animations
+ propTween = false;
+ for ( prop in orig ) {
+
+ // General show/hide setup for this element animation
+ if ( !propTween ) {
+ if ( dataShow ) {
+ if ( "hidden" in dataShow ) {
+ hidden = dataShow.hidden;
+ }
+ } else {
+ dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } );
+ }
+
+ // Store hidden/visible for toggle so `.stop().toggle()` "reverses"
+ if ( toggle ) {
+ dataShow.hidden = !hidden;
+ }
+
+ // Show elements before animating them
+ if ( hidden ) {
+ showHide( [ elem ], true );
+ }
+
+ /* eslint-disable no-loop-func */
+
+ anim.done( function() {
+
+ /* eslint-enable no-loop-func */
+
+ // The final step of a "hide" animation is actually hiding the element
+ if ( !hidden ) {
+ showHide( [ elem ] );
+ }
+ dataPriv.remove( elem, "fxshow" );
+ for ( prop in orig ) {
+ jQuery.style( elem, prop, orig[ prop ] );
+ }
+ } );
+ }
+
+ // Per-property setup
+ propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim );
+ if ( !( prop in dataShow ) ) {
+ dataShow[ prop ] = propTween.start;
+ if ( hidden ) {
+ propTween.end = propTween.start;
+ propTween.start = 0;
+ }
+ }
+ }
+}
+
+function propFilter( props, specialEasing ) {
+ var index, name, easing, value, hooks;
+
+ // camelCase, specialEasing and expand cssHook pass
+ for ( index in props ) {
+ name = camelCase( index );
+ easing = specialEasing[ name ];
+ value = props[ index ];
+ if ( Array.isArray( value ) ) {
+ easing = value[ 1 ];
+ value = props[ index ] = value[ 0 ];
+ }
+
+ if ( index !== name ) {
+ props[ name ] = value;
+ delete props[ index ];
+ }
+
+ hooks = jQuery.cssHooks[ name ];
+ if ( hooks && "expand" in hooks ) {
+ value = hooks.expand( value );
+ delete props[ name ];
+
+ // Not quite $.extend, this won't overwrite existing keys.
+ // Reusing 'index' because we have the correct "name"
+ for ( index in value ) {
+ if ( !( index in props ) ) {
+ props[ index ] = value[ index ];
+ specialEasing[ index ] = easing;
+ }
+ }
+ } else {
+ specialEasing[ name ] = easing;
+ }
+ }
+}
+
+function Animation( elem, properties, options ) {
+ var result,
+ stopped,
+ index = 0,
+ length = Animation.prefilters.length,
+ deferred = jQuery.Deferred().always( function() {
+
+ // Don't match elem in the :animated selector
+ delete tick.elem;
+ } ),
+ tick = function() {
+ if ( stopped ) {
+ return false;
+ }
+ var currentTime = fxNow || createFxNow(),
+ remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ),
+
+ // Support: Android 2.3 only
+ // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497)
+ temp = remaining / animation.duration || 0,
+ percent = 1 - temp,
+ index = 0,
+ length = animation.tweens.length;
+
+ for ( ; index < length; index++ ) {
+ animation.tweens[ index ].run( percent );
+ }
+
+ deferred.notifyWith( elem, [ animation, percent, remaining ] );
+
+ // If there's more to do, yield
+ if ( percent < 1 && length ) {
+ return remaining;
+ }
+
+ // If this was an empty animation, synthesize a final progress notification
+ if ( !length ) {
+ deferred.notifyWith( elem, [ animation, 1, 0 ] );
+ }
+
+ // Resolve the animation and report its conclusion
+ deferred.resolveWith( elem, [ animation ] );
+ return false;
+ },
+ animation = deferred.promise( {
+ elem: elem,
+ props: jQuery.extend( {}, properties ),
+ opts: jQuery.extend( true, {
+ specialEasing: {},
+ easing: jQuery.easing._default
+ }, options ),
+ originalProperties: properties,
+ originalOptions: options,
+ startTime: fxNow || createFxNow(),
+ duration: options.duration,
+ tweens: [],
+ createTween: function( prop, end ) {
+ var tween = jQuery.Tween( elem, animation.opts, prop, end,
+ animation.opts.specialEasing[ prop ] || animation.opts.easing );
+ animation.tweens.push( tween );
+ return tween;
+ },
+ stop: function( gotoEnd ) {
+ var index = 0,
+
+ // If we are going to the end, we want to run all the tweens
+ // otherwise we skip this part
+ length = gotoEnd ? animation.tweens.length : 0;
+ if ( stopped ) {
+ return this;
+ }
+ stopped = true;
+ for ( ; index < length; index++ ) {
+ animation.tweens[ index ].run( 1 );
+ }
+
+ // Resolve when we played the last frame; otherwise, reject
+ if ( gotoEnd ) {
+ deferred.notifyWith( elem, [ animation, 1, 0 ] );
+ deferred.resolveWith( elem, [ animation, gotoEnd ] );
+ } else {
+ deferred.rejectWith( elem, [ animation, gotoEnd ] );
+ }
+ return this;
+ }
+ } ),
+ props = animation.props;
+
+ propFilter( props, animation.opts.specialEasing );
+
+ for ( ; index < length; index++ ) {
+ result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts );
+ if ( result ) {
+ if ( isFunction( result.stop ) ) {
+ jQuery._queueHooks( animation.elem, animation.opts.queue ).stop =
+ result.stop.bind( result );
+ }
+ return result;
+ }
+ }
+
+ jQuery.map( props, createTween, animation );
+
+ if ( isFunction( animation.opts.start ) ) {
+ animation.opts.start.call( elem, animation );
+ }
+
+ // Attach callbacks from options
+ animation
+ .progress( animation.opts.progress )
+ .done( animation.opts.done, animation.opts.complete )
+ .fail( animation.opts.fail )
+ .always( animation.opts.always );
+
+ jQuery.fx.timer(
+ jQuery.extend( tick, {
+ elem: elem,
+ anim: animation,
+ queue: animation.opts.queue
+ } )
+ );
+
+ return animation;
+}
+
+jQuery.Animation = jQuery.extend( Animation, {
+
+ tweeners: {
+ "*": [ function( prop, value ) {
+ var tween = this.createTween( prop, value );
+ adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween );
+ return tween;
+ } ]
+ },
+
+ tweener: function( props, callback ) {
+ if ( isFunction( props ) ) {
+ callback = props;
+ props = [ "*" ];
+ } else {
+ props = props.match( rnothtmlwhite );
+ }
+
+ var prop,
+ index = 0,
+ length = props.length;
+
+ for ( ; index < length; index++ ) {
+ prop = props[ index ];
+ Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || [];
+ Animation.tweeners[ prop ].unshift( callback );
+ }
+ },
+
+ prefilters: [ defaultPrefilter ],
+
+ prefilter: function( callback, prepend ) {
+ if ( prepend ) {
+ Animation.prefilters.unshift( callback );
+ } else {
+ Animation.prefilters.push( callback );
+ }
+ }
+} );
+
+jQuery.speed = function( speed, easing, fn ) {
+ var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : {
+ complete: fn || !fn && easing ||
+ isFunction( speed ) && speed,
+ duration: speed,
+ easing: fn && easing || easing && !isFunction( easing ) && easing
+ };
+
+ // Go to the end state if fx are off
+ if ( jQuery.fx.off ) {
+ opt.duration = 0;
+
+ } else {
+ if ( typeof opt.duration !== "number" ) {
+ if ( opt.duration in jQuery.fx.speeds ) {
+ opt.duration = jQuery.fx.speeds[ opt.duration ];
+
+ } else {
+ opt.duration = jQuery.fx.speeds._default;
+ }
+ }
+ }
+
+ // Normalize opt.queue - true/undefined/null -> "fx"
+ if ( opt.queue == null || opt.queue === true ) {
+ opt.queue = "fx";
+ }
+
+ // Queueing
+ opt.old = opt.complete;
+
+ opt.complete = function() {
+ if ( isFunction( opt.old ) ) {
+ opt.old.call( this );
+ }
+
+ if ( opt.queue ) {
+ jQuery.dequeue( this, opt.queue );
+ }
+ };
+
+ return opt;
+};
+
+jQuery.fn.extend( {
+ fadeTo: function( speed, to, easing, callback ) {
+
+ // Show any hidden elements after setting opacity to 0
+ return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show()
+
+ // Animate to the value specified
+ .end().animate( { opacity: to }, speed, easing, callback );
+ },
+ animate: function( prop, speed, easing, callback ) {
+ var empty = jQuery.isEmptyObject( prop ),
+ optall = jQuery.speed( speed, easing, callback ),
+ doAnimation = function() {
+
+ // Operate on a copy of prop so per-property easing won't be lost
+ var anim = Animation( this, jQuery.extend( {}, prop ), optall );
+
+ // Empty animations, or finishing resolves immediately
+ if ( empty || dataPriv.get( this, "finish" ) ) {
+ anim.stop( true );
+ }
+ };
+ doAnimation.finish = doAnimation;
+
+ return empty || optall.queue === false ?
+ this.each( doAnimation ) :
+ this.queue( optall.queue, doAnimation );
+ },
+ stop: function( type, clearQueue, gotoEnd ) {
+ var stopQueue = function( hooks ) {
+ var stop = hooks.stop;
+ delete hooks.stop;
+ stop( gotoEnd );
+ };
+
+ if ( typeof type !== "string" ) {
+ gotoEnd = clearQueue;
+ clearQueue = type;
+ type = undefined;
+ }
+ if ( clearQueue ) {
+ this.queue( type || "fx", [] );
+ }
+
+ return this.each( function() {
+ var dequeue = true,
+ index = type != null && type + "queueHooks",
+ timers = jQuery.timers,
+ data = dataPriv.get( this );
+
+ if ( index ) {
+ if ( data[ index ] && data[ index ].stop ) {
+ stopQueue( data[ index ] );
+ }
+ } else {
+ for ( index in data ) {
+ if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) {
+ stopQueue( data[ index ] );
+ }
+ }
+ }
+
+ for ( index = timers.length; index--; ) {
+ if ( timers[ index ].elem === this &&
+ ( type == null || timers[ index ].queue === type ) ) {
+
+ timers[ index ].anim.stop( gotoEnd );
+ dequeue = false;
+ timers.splice( index, 1 );
+ }
+ }
+
+ // Start the next in the queue if the last step wasn't forced.
+ // Timers currently will call their complete callbacks, which
+ // will dequeue but only if they were gotoEnd.
+ if ( dequeue || !gotoEnd ) {
+ jQuery.dequeue( this, type );
+ }
+ } );
+ },
+ finish: function( type ) {
+ if ( type !== false ) {
+ type = type || "fx";
+ }
+ return this.each( function() {
+ var index,
+ data = dataPriv.get( this ),
+ queue = data[ type + "queue" ],
+ hooks = data[ type + "queueHooks" ],
+ timers = jQuery.timers,
+ length = queue ? queue.length : 0;
+
+ // Enable finishing flag on private data
+ data.finish = true;
+
+ // Empty the queue first
+ jQuery.queue( this, type, [] );
+
+ if ( hooks && hooks.stop ) {
+ hooks.stop.call( this, true );
+ }
+
+ // Look for any active animations, and finish them
+ for ( index = timers.length; index--; ) {
+ if ( timers[ index ].elem === this && timers[ index ].queue === type ) {
+ timers[ index ].anim.stop( true );
+ timers.splice( index, 1 );
+ }
+ }
+
+ // Look for any animations in the old queue and finish them
+ for ( index = 0; index < length; index++ ) {
+ if ( queue[ index ] && queue[ index ].finish ) {
+ queue[ index ].finish.call( this );
+ }
+ }
+
+ // Turn off finishing flag
+ delete data.finish;
+ } );
+ }
+} );
+
+jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) {
+ var cssFn = jQuery.fn[ name ];
+ jQuery.fn[ name ] = function( speed, easing, callback ) {
+ return speed == null || typeof speed === "boolean" ?
+ cssFn.apply( this, arguments ) :
+ this.animate( genFx( name, true ), speed, easing, callback );
+ };
+} );
+
+// Generate shortcuts for custom animations
+jQuery.each( {
+ slideDown: genFx( "show" ),
+ slideUp: genFx( "hide" ),
+ slideToggle: genFx( "toggle" ),
+ fadeIn: { opacity: "show" },
+ fadeOut: { opacity: "hide" },
+ fadeToggle: { opacity: "toggle" }
+}, function( name, props ) {
+ jQuery.fn[ name ] = function( speed, easing, callback ) {
+ return this.animate( props, speed, easing, callback );
+ };
+} );
+
+jQuery.timers = [];
+jQuery.fx.tick = function() {
+ var timer,
+ i = 0,
+ timers = jQuery.timers;
+
+ fxNow = Date.now();
+
+ for ( ; i < timers.length; i++ ) {
+ timer = timers[ i ];
+
+ // Run the timer and safely remove it when done (allowing for external removal)
+ if ( !timer() && timers[ i ] === timer ) {
+ timers.splice( i--, 1 );
+ }
+ }
+
+ if ( !timers.length ) {
+ jQuery.fx.stop();
+ }
+ fxNow = undefined;
+};
+
+jQuery.fx.timer = function( timer ) {
+ jQuery.timers.push( timer );
+ jQuery.fx.start();
+};
+
+jQuery.fx.interval = 13;
+jQuery.fx.start = function() {
+ if ( inProgress ) {
+ return;
+ }
+
+ inProgress = true;
+ schedule();
+};
+
+jQuery.fx.stop = function() {
+ inProgress = null;
+};
+
+jQuery.fx.speeds = {
+ slow: 600,
+ fast: 200,
+
+ // Default speed
+ _default: 400
+};
+
+
+// Based off of the plugin by Clint Helfers, with permission.
+// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/
+jQuery.fn.delay = function( time, type ) {
+ time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
+ type = type || "fx";
+
+ return this.queue( type, function( next, hooks ) {
+ var timeout = window.setTimeout( next, time );
+ hooks.stop = function() {
+ window.clearTimeout( timeout );
+ };
+ } );
+};
+
+
+( function() {
+ var input = document.createElement( "input" ),
+ select = document.createElement( "select" ),
+ opt = select.appendChild( document.createElement( "option" ) );
+
+ input.type = "checkbox";
+
+ // Support: Android <=4.3 only
+ // Default value for a checkbox should be "on"
+ support.checkOn = input.value !== "";
+
+ // Support: IE <=11 only
+ // Must access selectedIndex to make default options select
+ support.optSelected = opt.selected;
+
+ // Support: IE <=11 only
+ // An input loses its value after becoming a radio
+ input = document.createElement( "input" );
+ input.value = "t";
+ input.type = "radio";
+ support.radioValue = input.value === "t";
+} )();
+
+
+var boolHook,
+ attrHandle = jQuery.expr.attrHandle;
+
+jQuery.fn.extend( {
+ attr: function( name, value ) {
+ return access( this, jQuery.attr, name, value, arguments.length > 1 );
+ },
+
+ removeAttr: function( name ) {
+ return this.each( function() {
+ jQuery.removeAttr( this, name );
+ } );
+ }
+} );
+
+jQuery.extend( {
+ attr: function( elem, name, value ) {
+ var ret, hooks,
+ nType = elem.nodeType;
+
+ // Don't get/set attributes on text, comment and attribute nodes
+ if ( nType === 3 || nType === 8 || nType === 2 ) {
+ return;
+ }
+
+ // Fallback to prop when attributes are not supported
+ if ( typeof elem.getAttribute === "undefined" ) {
+ return jQuery.prop( elem, name, value );
+ }
+
+ // Attribute hooks are determined by the lowercase version
+ // Grab necessary hook if one is defined
+ if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
+ hooks = jQuery.attrHooks[ name.toLowerCase() ] ||
+ ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined );
+ }
+
+ if ( value !== undefined ) {
+ if ( value === null ) {
+ jQuery.removeAttr( elem, name );
+ return;
+ }
+
+ if ( hooks && "set" in hooks &&
+ ( ret = hooks.set( elem, value, name ) ) !== undefined ) {
+ return ret;
+ }
+
+ elem.setAttribute( name, value + "" );
+ return value;
+ }
+
+ if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
+ return ret;
+ }
+
+ ret = jQuery.find.attr( elem, name );
+
+ // Non-existent attributes return null, we normalize to undefined
+ return ret == null ? undefined : ret;
+ },
+
+ attrHooks: {
+ type: {
+ set: function( elem, value ) {
+ if ( !support.radioValue && value === "radio" &&
+ nodeName( elem, "input" ) ) {
+ var val = elem.value;
+ elem.setAttribute( "type", value );
+ if ( val ) {
+ elem.value = val;
+ }
+ return value;
+ }
+ }
+ }
+ },
+
+ removeAttr: function( elem, value ) {
+ var name,
+ i = 0,
+
+ // Attribute names can contain non-HTML whitespace characters
+ // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2
+ attrNames = value && value.match( rnothtmlwhite );
+
+ if ( attrNames && elem.nodeType === 1 ) {
+ while ( ( name = attrNames[ i++ ] ) ) {
+ elem.removeAttribute( name );
+ }
+ }
+ }
+} );
+
+// Hooks for boolean attributes
+boolHook = {
+ set: function( elem, value, name ) {
+ if ( value === false ) {
+
+ // Remove boolean attributes when set to false
+ jQuery.removeAttr( elem, name );
+ } else {
+ elem.setAttribute( name, name );
+ }
+ return name;
+ }
+};
+
+jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) {
+ var getter = attrHandle[ name ] || jQuery.find.attr;
+
+ attrHandle[ name ] = function( elem, name, isXML ) {
+ var ret, handle,
+ lowercaseName = name.toLowerCase();
+
+ if ( !isXML ) {
+
+ // Avoid an infinite loop by temporarily removing this function from the getter
+ handle = attrHandle[ lowercaseName ];
+ attrHandle[ lowercaseName ] = ret;
+ ret = getter( elem, name, isXML ) != null ?
+ lowercaseName :
+ null;
+ attrHandle[ lowercaseName ] = handle;
+ }
+ return ret;
+ };
+} );
+
+
+
+
+var rfocusable = /^(?:input|select|textarea|button)$/i,
+ rclickable = /^(?:a|area)$/i;
+
+jQuery.fn.extend( {
+ prop: function( name, value ) {
+ return access( this, jQuery.prop, name, value, arguments.length > 1 );
+ },
+
+ removeProp: function( name ) {
+ return this.each( function() {
+ delete this[ jQuery.propFix[ name ] || name ];
+ } );
+ }
+} );
+
+jQuery.extend( {
+ prop: function( elem, name, value ) {
+ var ret, hooks,
+ nType = elem.nodeType;
+
+ // Don't get/set properties on text, comment and attribute nodes
+ if ( nType === 3 || nType === 8 || nType === 2 ) {
+ return;
+ }
+
+ if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
+
+ // Fix name and attach hooks
+ name = jQuery.propFix[ name ] || name;
+ hooks = jQuery.propHooks[ name ];
+ }
+
+ if ( value !== undefined ) {
+ if ( hooks && "set" in hooks &&
+ ( ret = hooks.set( elem, value, name ) ) !== undefined ) {
+ return ret;
+ }
+
+ return ( elem[ name ] = value );
+ }
+
+ if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
+ return ret;
+ }
+
+ return elem[ name ];
+ },
+
+ propHooks: {
+ tabIndex: {
+ get: function( elem ) {
+
+ // Support: IE <=9 - 11 only
+ // elem.tabIndex doesn't always return the
+ // correct value when it hasn't been explicitly set
+ // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
+ // Use proper attribute retrieval(#12072)
+ var tabindex = jQuery.find.attr( elem, "tabindex" );
+
+ if ( tabindex ) {
+ return parseInt( tabindex, 10 );
+ }
+
+ if (
+ rfocusable.test( elem.nodeName ) ||
+ rclickable.test( elem.nodeName ) &&
+ elem.href
+ ) {
+ return 0;
+ }
+
+ return -1;
+ }
+ }
+ },
+
+ propFix: {
+ "for": "htmlFor",
+ "class": "className"
+ }
+} );
+
+// Support: IE <=11 only
+// Accessing the selectedIndex property
+// forces the browser to respect setting selected
+// on the option
+// The getter ensures a default option is selected
+// when in an optgroup
+// eslint rule "no-unused-expressions" is disabled for this code
+// since it considers such accessions noop
+if ( !support.optSelected ) {
+ jQuery.propHooks.selected = {
+ get: function( elem ) {
+
+ /* eslint no-unused-expressions: "off" */
+
+ var parent = elem.parentNode;
+ if ( parent && parent.parentNode ) {
+ parent.parentNode.selectedIndex;
+ }
+ return null;
+ },
+ set: function( elem ) {
+
+ /* eslint no-unused-expressions: "off" */
+
+ var parent = elem.parentNode;
+ if ( parent ) {
+ parent.selectedIndex;
+
+ if ( parent.parentNode ) {
+ parent.parentNode.selectedIndex;
+ }
+ }
+ }
+ };
+}
+
+jQuery.each( [
+ "tabIndex",
+ "readOnly",
+ "maxLength",
+ "cellSpacing",
+ "cellPadding",
+ "rowSpan",
+ "colSpan",
+ "useMap",
+ "frameBorder",
+ "contentEditable"
+], function() {
+ jQuery.propFix[ this.toLowerCase() ] = this;
+} );
+
+
+
+
+ // Strip and collapse whitespace according to HTML spec
+ // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace
+ function stripAndCollapse( value ) {
+ var tokens = value.match( rnothtmlwhite ) || [];
+ return tokens.join( " " );
+ }
+
+
+function getClass( elem ) {
+ return elem.getAttribute && elem.getAttribute( "class" ) || "";
+}
+
+function classesToArray( value ) {
+ if ( Array.isArray( value ) ) {
+ return value;
+ }
+ if ( typeof value === "string" ) {
+ return value.match( rnothtmlwhite ) || [];
+ }
+ return [];
+}
+
+jQuery.fn.extend( {
+ addClass: function( value ) {
+ var classes, elem, cur, curValue, clazz, j, finalValue,
+ i = 0;
+
+ if ( isFunction( value ) ) {
+ return this.each( function( j ) {
+ jQuery( this ).addClass( value.call( this, j, getClass( this ) ) );
+ } );
+ }
+
+ classes = classesToArray( value );
+
+ if ( classes.length ) {
+ while ( ( elem = this[ i++ ] ) ) {
+ curValue = getClass( elem );
+ cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
+
+ if ( cur ) {
+ j = 0;
+ while ( ( clazz = classes[ j++ ] ) ) {
+ if ( cur.indexOf( " " + clazz + " " ) < 0 ) {
+ cur += clazz + " ";
+ }
+ }
+
+ // Only assign if different to avoid unneeded rendering.
+ finalValue = stripAndCollapse( cur );
+ if ( curValue !== finalValue ) {
+ elem.setAttribute( "class", finalValue );
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ removeClass: function( value ) {
+ var classes, elem, cur, curValue, clazz, j, finalValue,
+ i = 0;
+
+ if ( isFunction( value ) ) {
+ return this.each( function( j ) {
+ jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) );
+ } );
+ }
+
+ if ( !arguments.length ) {
+ return this.attr( "class", "" );
+ }
+
+ classes = classesToArray( value );
+
+ if ( classes.length ) {
+ while ( ( elem = this[ i++ ] ) ) {
+ curValue = getClass( elem );
+
+ // This expression is here for better compressibility (see addClass)
+ cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
+
+ if ( cur ) {
+ j = 0;
+ while ( ( clazz = classes[ j++ ] ) ) {
+
+ // Remove *all* instances
+ while ( cur.indexOf( " " + clazz + " " ) > -1 ) {
+ cur = cur.replace( " " + clazz + " ", " " );
+ }
+ }
+
+ // Only assign if different to avoid unneeded rendering.
+ finalValue = stripAndCollapse( cur );
+ if ( curValue !== finalValue ) {
+ elem.setAttribute( "class", finalValue );
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ toggleClass: function( value, stateVal ) {
+ var type = typeof value,
+ isValidValue = type === "string" || Array.isArray( value );
+
+ if ( typeof stateVal === "boolean" && isValidValue ) {
+ return stateVal ? this.addClass( value ) : this.removeClass( value );
+ }
+
+ if ( isFunction( value ) ) {
+ return this.each( function( i ) {
+ jQuery( this ).toggleClass(
+ value.call( this, i, getClass( this ), stateVal ),
+ stateVal
+ );
+ } );
+ }
+
+ return this.each( function() {
+ var className, i, self, classNames;
+
+ if ( isValidValue ) {
+
+ // Toggle individual class names
+ i = 0;
+ self = jQuery( this );
+ classNames = classesToArray( value );
+
+ while ( ( className = classNames[ i++ ] ) ) {
+
+ // Check each className given, space separated list
+ if ( self.hasClass( className ) ) {
+ self.removeClass( className );
+ } else {
+ self.addClass( className );
+ }
+ }
+
+ // Toggle whole class name
+ } else if ( value === undefined || type === "boolean" ) {
+ className = getClass( this );
+ if ( className ) {
+
+ // Store className if set
+ dataPriv.set( this, "__className__", className );
+ }
+
+ // If the element has a class name or if we're passed `false`,
+ // then remove the whole classname (if there was one, the above saved it).
+ // Otherwise bring back whatever was previously saved (if anything),
+ // falling back to the empty string if nothing was stored.
+ if ( this.setAttribute ) {
+ this.setAttribute( "class",
+ className || value === false ?
+ "" :
+ dataPriv.get( this, "__className__" ) || ""
+ );
+ }
+ }
+ } );
+ },
+
+ hasClass: function( selector ) {
+ var className, elem,
+ i = 0;
+
+ className = " " + selector + " ";
+ while ( ( elem = this[ i++ ] ) ) {
+ if ( elem.nodeType === 1 &&
+ ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+} );
+
+
+
+
+var rreturn = /\r/g;
+
+jQuery.fn.extend( {
+ val: function( value ) {
+ var hooks, ret, valueIsFunction,
+ elem = this[ 0 ];
+
+ if ( !arguments.length ) {
+ if ( elem ) {
+ hooks = jQuery.valHooks[ elem.type ] ||
+ jQuery.valHooks[ elem.nodeName.toLowerCase() ];
+
+ if ( hooks &&
+ "get" in hooks &&
+ ( ret = hooks.get( elem, "value" ) ) !== undefined
+ ) {
+ return ret;
+ }
+
+ ret = elem.value;
+
+ // Handle most common string cases
+ if ( typeof ret === "string" ) {
+ return ret.replace( rreturn, "" );
+ }
+
+ // Handle cases where value is null/undef or number
+ return ret == null ? "" : ret;
+ }
+
+ return;
+ }
+
+ valueIsFunction = isFunction( value );
+
+ return this.each( function( i ) {
+ var val;
+
+ if ( this.nodeType !== 1 ) {
+ return;
+ }
+
+ if ( valueIsFunction ) {
+ val = value.call( this, i, jQuery( this ).val() );
+ } else {
+ val = value;
+ }
+
+ // Treat null/undefined as ""; convert numbers to string
+ if ( val == null ) {
+ val = "";
+
+ } else if ( typeof val === "number" ) {
+ val += "";
+
+ } else if ( Array.isArray( val ) ) {
+ val = jQuery.map( val, function( value ) {
+ return value == null ? "" : value + "";
+ } );
+ }
+
+ hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ];
+
+ // If set returns undefined, fall back to normal setting
+ if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) {
+ this.value = val;
+ }
+ } );
+ }
+} );
+
+jQuery.extend( {
+ valHooks: {
+ option: {
+ get: function( elem ) {
+
+ var val = jQuery.find.attr( elem, "value" );
+ return val != null ?
+ val :
+
+ // Support: IE <=10 - 11 only
+ // option.text throws exceptions (#14686, #14858)
+ // Strip and collapse whitespace
+ // https://html.spec.whatwg.org/#strip-and-collapse-whitespace
+ stripAndCollapse( jQuery.text( elem ) );
+ }
+ },
+ select: {
+ get: function( elem ) {
+ var value, option, i,
+ options = elem.options,
+ index = elem.selectedIndex,
+ one = elem.type === "select-one",
+ values = one ? null : [],
+ max = one ? index + 1 : options.length;
+
+ if ( index < 0 ) {
+ i = max;
+
+ } else {
+ i = one ? index : 0;
+ }
+
+ // Loop through all the selected options
+ for ( ; i < max; i++ ) {
+ option = options[ i ];
+
+ // Support: IE <=9 only
+ // IE8-9 doesn't update selected after form reset (#2551)
+ if ( ( option.selected || i === index ) &&
+
+ // Don't return options that are disabled or in a disabled optgroup
+ !option.disabled &&
+ ( !option.parentNode.disabled ||
+ !nodeName( option.parentNode, "optgroup" ) ) ) {
+
+ // Get the specific value for the option
+ value = jQuery( option ).val();
+
+ // We don't need an array for one selects
+ if ( one ) {
+ return value;
+ }
+
+ // Multi-Selects return an array
+ values.push( value );
+ }
+ }
+
+ return values;
+ },
+
+ set: function( elem, value ) {
+ var optionSet, option,
+ options = elem.options,
+ values = jQuery.makeArray( value ),
+ i = options.length;
+
+ while ( i-- ) {
+ option = options[ i ];
+
+ /* eslint-disable no-cond-assign */
+
+ if ( option.selected =
+ jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1
+ ) {
+ optionSet = true;
+ }
+
+ /* eslint-enable no-cond-assign */
+ }
+
+ // Force browsers to behave consistently when non-matching value is set
+ if ( !optionSet ) {
+ elem.selectedIndex = -1;
+ }
+ return values;
+ }
+ }
+ }
+} );
+
+// Radios and checkboxes getter/setter
+jQuery.each( [ "radio", "checkbox" ], function() {
+ jQuery.valHooks[ this ] = {
+ set: function( elem, value ) {
+ if ( Array.isArray( value ) ) {
+ return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 );
+ }
+ }
+ };
+ if ( !support.checkOn ) {
+ jQuery.valHooks[ this ].get = function( elem ) {
+ return elem.getAttribute( "value" ) === null ? "on" : elem.value;
+ };
+ }
+} );
+
+
+
+
+// Return jQuery for attributes-only inclusion
+
+
+support.focusin = "onfocusin" in window;
+
+
+var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/,
+ stopPropagationCallback = function( e ) {
+ e.stopPropagation();
+ };
+
+jQuery.extend( jQuery.event, {
+
+ trigger: function( event, data, elem, onlyHandlers ) {
+
+ var i, cur, tmp, bubbleType, ontype, handle, special, lastElement,
+ eventPath = [ elem || document ],
+ type = hasOwn.call( event, "type" ) ? event.type : event,
+ namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : [];
+
+ cur = lastElement = tmp = elem = elem || document;
+
+ // Don't do events on text and comment nodes
+ if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return;
+ }
+
+ // focus/blur morphs to focusin/out; ensure we're not firing them right now
+ if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
+ return;
+ }
+
+ if ( type.indexOf( "." ) > -1 ) {
+
+ // Namespaced trigger; create a regexp to match event type in handle()
+ namespaces = type.split( "." );
+ type = namespaces.shift();
+ namespaces.sort();
+ }
+ ontype = type.indexOf( ":" ) < 0 && "on" + type;
+
+ // Caller can pass in a jQuery.Event object, Object, or just an event type string
+ event = event[ jQuery.expando ] ?
+ event :
+ new jQuery.Event( type, typeof event === "object" && event );
+
+ // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true)
+ event.isTrigger = onlyHandlers ? 2 : 3;
+ event.namespace = namespaces.join( "." );
+ event.rnamespace = event.namespace ?
+ new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) :
+ null;
+
+ // Clean up the event in case it is being reused
+ event.result = undefined;
+ if ( !event.target ) {
+ event.target = elem;
+ }
+
+ // Clone any incoming data and prepend the event, creating the handler arg list
+ data = data == null ?
+ [ event ] :
+ jQuery.makeArray( data, [ event ] );
+
+ // Allow special events to draw outside the lines
+ special = jQuery.event.special[ type ] || {};
+ if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) {
+ return;
+ }
+
+ // Determine event propagation path in advance, per W3C events spec (#9951)
+ // Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
+ if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) {
+
+ bubbleType = special.delegateType || type;
+ if ( !rfocusMorph.test( bubbleType + type ) ) {
+ cur = cur.parentNode;
+ }
+ for ( ; cur; cur = cur.parentNode ) {
+ eventPath.push( cur );
+ tmp = cur;
+ }
+
+ // Only add window if we got to document (e.g., not plain obj or detached DOM)
+ if ( tmp === ( elem.ownerDocument || document ) ) {
+ eventPath.push( tmp.defaultView || tmp.parentWindow || window );
+ }
+ }
+
+ // Fire handlers on the event path
+ i = 0;
+ while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) {
+ lastElement = cur;
+ event.type = i > 1 ?
+ bubbleType :
+ special.bindType || type;
+
+ // jQuery handler
+ handle = (
+ dataPriv.get( cur, "events" ) || Object.create( null )
+ )[ event.type ] &&
+ dataPriv.get( cur, "handle" );
+ if ( handle ) {
+ handle.apply( cur, data );
+ }
+
+ // Native handler
+ handle = ontype && cur[ ontype ];
+ if ( handle && handle.apply && acceptData( cur ) ) {
+ event.result = handle.apply( cur, data );
+ if ( event.result === false ) {
+ event.preventDefault();
+ }
+ }
+ }
+ event.type = type;
+
+ // If nobody prevented the default action, do it now
+ if ( !onlyHandlers && !event.isDefaultPrevented() ) {
+
+ if ( ( !special._default ||
+ special._default.apply( eventPath.pop(), data ) === false ) &&
+ acceptData( elem ) ) {
+
+ // Call a native DOM method on the target with the same name as the event.
+ // Don't do default actions on window, that's where global variables be (#6170)
+ if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) {
+
+ // Don't re-trigger an onFOO event when we call its FOO() method
+ tmp = elem[ ontype ];
+
+ if ( tmp ) {
+ elem[ ontype ] = null;
+ }
+
+ // Prevent re-triggering of the same event, since we already bubbled it above
+ jQuery.event.triggered = type;
+
+ if ( event.isPropagationStopped() ) {
+ lastElement.addEventListener( type, stopPropagationCallback );
+ }
+
+ elem[ type ]();
+
+ if ( event.isPropagationStopped() ) {
+ lastElement.removeEventListener( type, stopPropagationCallback );
+ }
+
+ jQuery.event.triggered = undefined;
+
+ if ( tmp ) {
+ elem[ ontype ] = tmp;
+ }
+ }
+ }
+ }
+
+ return event.result;
+ },
+
+ // Piggyback on a donor event to simulate a different one
+ // Used only for `focus(in | out)` events
+ simulate: function( type, elem, event ) {
+ var e = jQuery.extend(
+ new jQuery.Event(),
+ event,
+ {
+ type: type,
+ isSimulated: true
+ }
+ );
+
+ jQuery.event.trigger( e, null, elem );
+ }
+
+} );
+
+jQuery.fn.extend( {
+
+ trigger: function( type, data ) {
+ return this.each( function() {
+ jQuery.event.trigger( type, data, this );
+ } );
+ },
+ triggerHandler: function( type, data ) {
+ var elem = this[ 0 ];
+ if ( elem ) {
+ return jQuery.event.trigger( type, data, elem, true );
+ }
+ }
+} );
+
+
+// Support: Firefox <=44
+// Firefox doesn't have focus(in | out) events
+// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787
+//
+// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1
+// focus(in | out) events fire after focus & blur events,
+// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order
+// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857
+if ( !support.focusin ) {
+ jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) {
+
+ // Attach a single capturing handler on the document while someone wants focusin/focusout
+ var handler = function( event ) {
+ jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) );
+ };
+
+ jQuery.event.special[ fix ] = {
+ setup: function() {
+
+ // Handle: regular nodes (via `this.ownerDocument`), window
+ // (via `this.document`) & document (via `this`).
+ var doc = this.ownerDocument || this.document || this,
+ attaches = dataPriv.access( doc, fix );
+
+ if ( !attaches ) {
+ doc.addEventListener( orig, handler, true );
+ }
+ dataPriv.access( doc, fix, ( attaches || 0 ) + 1 );
+ },
+ teardown: function() {
+ var doc = this.ownerDocument || this.document || this,
+ attaches = dataPriv.access( doc, fix ) - 1;
+
+ if ( !attaches ) {
+ doc.removeEventListener( orig, handler, true );
+ dataPriv.remove( doc, fix );
+
+ } else {
+ dataPriv.access( doc, fix, attaches );
+ }
+ }
+ };
+ } );
+}
+var location = window.location;
+
+var nonce = { guid: Date.now() };
+
+var rquery = ( /\?/ );
+
+
+
+// Cross-browser xml parsing
+jQuery.parseXML = function( data ) {
+ var xml;
+ if ( !data || typeof data !== "string" ) {
+ return null;
+ }
+
+ // Support: IE 9 - 11 only
+ // IE throws on parseFromString with invalid input.
+ try {
+ xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" );
+ } catch ( e ) {
+ xml = undefined;
+ }
+
+ if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) {
+ jQuery.error( "Invalid XML: " + data );
+ }
+ return xml;
+};
+
+
+var
+ rbracket = /\[\]$/,
+ rCRLF = /\r?\n/g,
+ rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i,
+ rsubmittable = /^(?:input|select|textarea|keygen)/i;
+
+function buildParams( prefix, obj, traditional, add ) {
+ var name;
+
+ if ( Array.isArray( obj ) ) {
+
+ // Serialize array item.
+ jQuery.each( obj, function( i, v ) {
+ if ( traditional || rbracket.test( prefix ) ) {
+
+ // Treat each array item as a scalar.
+ add( prefix, v );
+
+ } else {
+
+ // Item is non-scalar (array or object), encode its numeric index.
+ buildParams(
+ prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]",
+ v,
+ traditional,
+ add
+ );
+ }
+ } );
+
+ } else if ( !traditional && toType( obj ) === "object" ) {
+
+ // Serialize object item.
+ for ( name in obj ) {
+ buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
+ }
+
+ } else {
+
+ // Serialize scalar item.
+ add( prefix, obj );
+ }
+}
+
+// Serialize an array of form elements or a set of
+// key/values into a query string
+jQuery.param = function( a, traditional ) {
+ var prefix,
+ s = [],
+ add = function( key, valueOrFunction ) {
+
+ // If value is a function, invoke it and use its return value
+ var value = isFunction( valueOrFunction ) ?
+ valueOrFunction() :
+ valueOrFunction;
+
+ s[ s.length ] = encodeURIComponent( key ) + "=" +
+ encodeURIComponent( value == null ? "" : value );
+ };
+
+ if ( a == null ) {
+ return "";
+ }
+
+ // If an array was passed in, assume that it is an array of form elements.
+ if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
+
+ // Serialize the form elements
+ jQuery.each( a, function() {
+ add( this.name, this.value );
+ } );
+
+ } else {
+
+ // If traditional, encode the "old" way (the way 1.3.2 or older
+ // did it), otherwise encode params recursively.
+ for ( prefix in a ) {
+ buildParams( prefix, a[ prefix ], traditional, add );
+ }
+ }
+
+ // Return the resulting serialization
+ return s.join( "&" );
+};
+
+jQuery.fn.extend( {
+ serialize: function() {
+ return jQuery.param( this.serializeArray() );
+ },
+ serializeArray: function() {
+ return this.map( function() {
+
+ // Can add propHook for "elements" to filter or add form elements
+ var elements = jQuery.prop( this, "elements" );
+ return elements ? jQuery.makeArray( elements ) : this;
+ } )
+ .filter( function() {
+ var type = this.type;
+
+ // Use .is( ":disabled" ) so that fieldset[disabled] works
+ return this.name && !jQuery( this ).is( ":disabled" ) &&
+ rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) &&
+ ( this.checked || !rcheckableType.test( type ) );
+ } )
+ .map( function( _i, elem ) {
+ var val = jQuery( this ).val();
+
+ if ( val == null ) {
+ return null;
+ }
+
+ if ( Array.isArray( val ) ) {
+ return jQuery.map( val, function( val ) {
+ return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
+ } );
+ }
+
+ return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
+ } ).get();
+ }
+} );
+
+
+var
+ r20 = /%20/g,
+ rhash = /#.*$/,
+ rantiCache = /([?&])_=[^&]*/,
+ rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg,
+
+ // #7653, #8125, #8152: local protocol detection
+ rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/,
+ rnoContent = /^(?:GET|HEAD)$/,
+ rprotocol = /^\/\//,
+
+ /* Prefilters
+ * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
+ * 2) These are called:
+ * - BEFORE asking for a transport
+ * - AFTER param serialization (s.data is a string if s.processData is true)
+ * 3) key is the dataType
+ * 4) the catchall symbol "*" can be used
+ * 5) execution will start with transport dataType and THEN continue down to "*" if needed
+ */
+ prefilters = {},
+
+ /* Transports bindings
+ * 1) key is the dataType
+ * 2) the catchall symbol "*" can be used
+ * 3) selection will start with transport dataType and THEN go to "*" if needed
+ */
+ transports = {},
+
+ // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
+ allTypes = "*/".concat( "*" ),
+
+ // Anchor tag for parsing the document origin
+ originAnchor = document.createElement( "a" );
+ originAnchor.href = location.href;
+
+// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
+function addToPrefiltersOrTransports( structure ) {
+
+ // dataTypeExpression is optional and defaults to "*"
+ return function( dataTypeExpression, func ) {
+
+ if ( typeof dataTypeExpression !== "string" ) {
+ func = dataTypeExpression;
+ dataTypeExpression = "*";
+ }
+
+ var dataType,
+ i = 0,
+ dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || [];
+
+ if ( isFunction( func ) ) {
+
+ // For each dataType in the dataTypeExpression
+ while ( ( dataType = dataTypes[ i++ ] ) ) {
+
+ // Prepend if requested
+ if ( dataType[ 0 ] === "+" ) {
+ dataType = dataType.slice( 1 ) || "*";
+ ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func );
+
+ // Otherwise append
+ } else {
+ ( structure[ dataType ] = structure[ dataType ] || [] ).push( func );
+ }
+ }
+ }
+ };
+}
+
+// Base inspection function for prefilters and transports
+function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) {
+
+ var inspected = {},
+ seekingTransport = ( structure === transports );
+
+ function inspect( dataType ) {
+ var selected;
+ inspected[ dataType ] = true;
+ jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) {
+ var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR );
+ if ( typeof dataTypeOrTransport === "string" &&
+ !seekingTransport && !inspected[ dataTypeOrTransport ] ) {
+
+ options.dataTypes.unshift( dataTypeOrTransport );
+ inspect( dataTypeOrTransport );
+ return false;
+ } else if ( seekingTransport ) {
+ return !( selected = dataTypeOrTransport );
+ }
+ } );
+ return selected;
+ }
+
+ return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" );
+}
+
+// A special extend for ajax options
+// that takes "flat" options (not to be deep extended)
+// Fixes #9887
+function ajaxExtend( target, src ) {
+ var key, deep,
+ flatOptions = jQuery.ajaxSettings.flatOptions || {};
+
+ for ( key in src ) {
+ if ( src[ key ] !== undefined ) {
+ ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
+ }
+ }
+ if ( deep ) {
+ jQuery.extend( true, target, deep );
+ }
+
+ return target;
+}
+
+/* Handles responses to an ajax request:
+ * - finds the right dataType (mediates between content-type and expected dataType)
+ * - returns the corresponding response
+ */
+function ajaxHandleResponses( s, jqXHR, responses ) {
+
+ var ct, type, finalDataType, firstDataType,
+ contents = s.contents,
+ dataTypes = s.dataTypes;
+
+ // Remove auto dataType and get content-type in the process
+ while ( dataTypes[ 0 ] === "*" ) {
+ dataTypes.shift();
+ if ( ct === undefined ) {
+ ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" );
+ }
+ }
+
+ // Check if we're dealing with a known content-type
+ if ( ct ) {
+ for ( type in contents ) {
+ if ( contents[ type ] && contents[ type ].test( ct ) ) {
+ dataTypes.unshift( type );
+ break;
+ }
+ }
+ }
+
+ // Check to see if we have a response for the expected dataType
+ if ( dataTypes[ 0 ] in responses ) {
+ finalDataType = dataTypes[ 0 ];
+ } else {
+
+ // Try convertible dataTypes
+ for ( type in responses ) {
+ if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) {
+ finalDataType = type;
+ break;
+ }
+ if ( !firstDataType ) {
+ firstDataType = type;
+ }
+ }
+
+ // Or just use first one
+ finalDataType = finalDataType || firstDataType;
+ }
+
+ // If we found a dataType
+ // We add the dataType to the list if needed
+ // and return the corresponding response
+ if ( finalDataType ) {
+ if ( finalDataType !== dataTypes[ 0 ] ) {
+ dataTypes.unshift( finalDataType );
+ }
+ return responses[ finalDataType ];
+ }
+}
+
+/* Chain conversions given the request and the original response
+ * Also sets the responseXXX fields on the jqXHR instance
+ */
+function ajaxConvert( s, response, jqXHR, isSuccess ) {
+ var conv2, current, conv, tmp, prev,
+ converters = {},
+
+ // Work with a copy of dataTypes in case we need to modify it for conversion
+ dataTypes = s.dataTypes.slice();
+
+ // Create converters map with lowercased keys
+ if ( dataTypes[ 1 ] ) {
+ for ( conv in s.converters ) {
+ converters[ conv.toLowerCase() ] = s.converters[ conv ];
+ }
+ }
+
+ current = dataTypes.shift();
+
+ // Convert to each sequential dataType
+ while ( current ) {
+
+ if ( s.responseFields[ current ] ) {
+ jqXHR[ s.responseFields[ current ] ] = response;
+ }
+
+ // Apply the dataFilter if provided
+ if ( !prev && isSuccess && s.dataFilter ) {
+ response = s.dataFilter( response, s.dataType );
+ }
+
+ prev = current;
+ current = dataTypes.shift();
+
+ if ( current ) {
+
+ // There's only work to do if current dataType is non-auto
+ if ( current === "*" ) {
+
+ current = prev;
+
+ // Convert response if prev dataType is non-auto and differs from current
+ } else if ( prev !== "*" && prev !== current ) {
+
+ // Seek a direct converter
+ conv = converters[ prev + " " + current ] || converters[ "* " + current ];
+
+ // If none found, seek a pair
+ if ( !conv ) {
+ for ( conv2 in converters ) {
+
+ // If conv2 outputs current
+ tmp = conv2.split( " " );
+ if ( tmp[ 1 ] === current ) {
+
+ // If prev can be converted to accepted input
+ conv = converters[ prev + " " + tmp[ 0 ] ] ||
+ converters[ "* " + tmp[ 0 ] ];
+ if ( conv ) {
+
+ // Condense equivalence converters
+ if ( conv === true ) {
+ conv = converters[ conv2 ];
+
+ // Otherwise, insert the intermediate dataType
+ } else if ( converters[ conv2 ] !== true ) {
+ current = tmp[ 0 ];
+ dataTypes.unshift( tmp[ 1 ] );
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ // Apply converter (if not an equivalence)
+ if ( conv !== true ) {
+
+ // Unless errors are allowed to bubble, catch and return them
+ if ( conv && s.throws ) {
+ response = conv( response );
+ } else {
+ try {
+ response = conv( response );
+ } catch ( e ) {
+ return {
+ state: "parsererror",
+ error: conv ? e : "No conversion from " + prev + " to " + current
+ };
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return { state: "success", data: response };
+}
+
+jQuery.extend( {
+
+ // Counter for holding the number of active queries
+ active: 0,
+
+ // Last-Modified header cache for next request
+ lastModified: {},
+ etag: {},
+
+ ajaxSettings: {
+ url: location.href,
+ type: "GET",
+ isLocal: rlocalProtocol.test( location.protocol ),
+ global: true,
+ processData: true,
+ async: true,
+ contentType: "application/x-www-form-urlencoded; charset=UTF-8",
+
+ /*
+ timeout: 0,
+ data: null,
+ dataType: null,
+ username: null,
+ password: null,
+ cache: null,
+ throws: false,
+ traditional: false,
+ headers: {},
+ */
+
+ accepts: {
+ "*": allTypes,
+ text: "text/plain",
+ html: "text/html",
+ xml: "application/xml, text/xml",
+ json: "application/json, text/javascript"
+ },
+
+ contents: {
+ xml: /\bxml\b/,
+ html: /\bhtml/,
+ json: /\bjson\b/
+ },
+
+ responseFields: {
+ xml: "responseXML",
+ text: "responseText",
+ json: "responseJSON"
+ },
+
+ // Data converters
+ // Keys separate source (or catchall "*") and destination types with a single space
+ converters: {
+
+ // Convert anything to text
+ "* text": String,
+
+ // Text to html (true = no transformation)
+ "text html": true,
+
+ // Evaluate text as a json expression
+ "text json": JSON.parse,
+
+ // Parse text as xml
+ "text xml": jQuery.parseXML
+ },
+
+ // For options that shouldn't be deep extended:
+ // you can add your own custom options here if
+ // and when you create one that shouldn't be
+ // deep extended (see ajaxExtend)
+ flatOptions: {
+ url: true,
+ context: true
+ }
+ },
+
+ // Creates a full fledged settings object into target
+ // with both ajaxSettings and settings fields.
+ // If target is omitted, writes into ajaxSettings.
+ ajaxSetup: function( target, settings ) {
+ return settings ?
+
+ // Building a settings object
+ ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) :
+
+ // Extending ajaxSettings
+ ajaxExtend( jQuery.ajaxSettings, target );
+ },
+
+ ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
+ ajaxTransport: addToPrefiltersOrTransports( transports ),
+
+ // Main method
+ ajax: function( url, options ) {
+
+ // If url is an object, simulate pre-1.5 signature
+ if ( typeof url === "object" ) {
+ options = url;
+ url = undefined;
+ }
+
+ // Force options to be an object
+ options = options || {};
+
+ var transport,
+
+ // URL without anti-cache param
+ cacheURL,
+
+ // Response headers
+ responseHeadersString,
+ responseHeaders,
+
+ // timeout handle
+ timeoutTimer,
+
+ // Url cleanup var
+ urlAnchor,
+
+ // Request state (becomes false upon send and true upon completion)
+ completed,
+
+ // To know if global events are to be dispatched
+ fireGlobals,
+
+ // Loop variable
+ i,
+
+ // uncached part of the url
+ uncached,
+
+ // Create the final options object
+ s = jQuery.ajaxSetup( {}, options ),
+
+ // Callbacks context
+ callbackContext = s.context || s,
+
+ // Context for global events is callbackContext if it is a DOM node or jQuery collection
+ globalEventContext = s.context &&
+ ( callbackContext.nodeType || callbackContext.jquery ) ?
+ jQuery( callbackContext ) :
+ jQuery.event,
+
+ // Deferreds
+ deferred = jQuery.Deferred(),
+ completeDeferred = jQuery.Callbacks( "once memory" ),
+
+ // Status-dependent callbacks
+ statusCode = s.statusCode || {},
+
+ // Headers (they are sent all at once)
+ requestHeaders = {},
+ requestHeadersNames = {},
+
+ // Default abort message
+ strAbort = "canceled",
+
+ // Fake xhr
+ jqXHR = {
+ readyState: 0,
+
+ // Builds headers hashtable if needed
+ getResponseHeader: function( key ) {
+ var match;
+ if ( completed ) {
+ if ( !responseHeaders ) {
+ responseHeaders = {};
+ while ( ( match = rheaders.exec( responseHeadersString ) ) ) {
+ responseHeaders[ match[ 1 ].toLowerCase() + " " ] =
+ ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] )
+ .concat( match[ 2 ] );
+ }
+ }
+ match = responseHeaders[ key.toLowerCase() + " " ];
+ }
+ return match == null ? null : match.join( ", " );
+ },
+
+ // Raw string
+ getAllResponseHeaders: function() {
+ return completed ? responseHeadersString : null;
+ },
+
+ // Caches the header
+ setRequestHeader: function( name, value ) {
+ if ( completed == null ) {
+ name = requestHeadersNames[ name.toLowerCase() ] =
+ requestHeadersNames[ name.toLowerCase() ] || name;
+ requestHeaders[ name ] = value;
+ }
+ return this;
+ },
+
+ // Overrides response content-type header
+ overrideMimeType: function( type ) {
+ if ( completed == null ) {
+ s.mimeType = type;
+ }
+ return this;
+ },
+
+ // Status-dependent callbacks
+ statusCode: function( map ) {
+ var code;
+ if ( map ) {
+ if ( completed ) {
+
+ // Execute the appropriate callbacks
+ jqXHR.always( map[ jqXHR.status ] );
+ } else {
+
+ // Lazy-add the new callbacks in a way that preserves old ones
+ for ( code in map ) {
+ statusCode[ code ] = [ statusCode[ code ], map[ code ] ];
+ }
+ }
+ }
+ return this;
+ },
+
+ // Cancel the request
+ abort: function( statusText ) {
+ var finalText = statusText || strAbort;
+ if ( transport ) {
+ transport.abort( finalText );
+ }
+ done( 0, finalText );
+ return this;
+ }
+ };
+
+ // Attach deferreds
+ deferred.promise( jqXHR );
+
+ // Add protocol if not provided (prefilters might expect it)
+ // Handle falsy url in the settings object (#10093: consistency with old signature)
+ // We also use the url parameter if available
+ s.url = ( ( url || s.url || location.href ) + "" )
+ .replace( rprotocol, location.protocol + "//" );
+
+ // Alias method option to type as per ticket #12004
+ s.type = options.method || options.type || s.method || s.type;
+
+ // Extract dataTypes list
+ s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ];
+
+ // A cross-domain request is in order when the origin doesn't match the current origin.
+ if ( s.crossDomain == null ) {
+ urlAnchor = document.createElement( "a" );
+
+ // Support: IE <=8 - 11, Edge 12 - 15
+ // IE throws exception on accessing the href property if url is malformed,
+ // e.g. http://example.com:80x/
+ try {
+ urlAnchor.href = s.url;
+
+ // Support: IE <=8 - 11 only
+ // Anchor's host property isn't correctly set when s.url is relative
+ urlAnchor.href = urlAnchor.href;
+ s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !==
+ urlAnchor.protocol + "//" + urlAnchor.host;
+ } catch ( e ) {
+
+ // If there is an error parsing the URL, assume it is crossDomain,
+ // it can be rejected by the transport if it is invalid
+ s.crossDomain = true;
+ }
+ }
+
+ // Convert data if not already a string
+ if ( s.data && s.processData && typeof s.data !== "string" ) {
+ s.data = jQuery.param( s.data, s.traditional );
+ }
+
+ // Apply prefilters
+ inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
+
+ // If request was aborted inside a prefilter, stop there
+ if ( completed ) {
+ return jqXHR;
+ }
+
+ // We can fire global events as of now if asked to
+ // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118)
+ fireGlobals = jQuery.event && s.global;
+
+ // Watch for a new set of requests
+ if ( fireGlobals && jQuery.active++ === 0 ) {
+ jQuery.event.trigger( "ajaxStart" );
+ }
+
+ // Uppercase the type
+ s.type = s.type.toUpperCase();
+
+ // Determine if request has content
+ s.hasContent = !rnoContent.test( s.type );
+
+ // Save the URL in case we're toying with the If-Modified-Since
+ // and/or If-None-Match header later on
+ // Remove hash to simplify url manipulation
+ cacheURL = s.url.replace( rhash, "" );
+
+ // More options handling for requests with no content
+ if ( !s.hasContent ) {
+
+ // Remember the hash so we can put it back
+ uncached = s.url.slice( cacheURL.length );
+
+ // If data is available and should be processed, append data to url
+ if ( s.data && ( s.processData || typeof s.data === "string" ) ) {
+ cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data;
+
+ // #9682: remove data so that it's not used in an eventual retry
+ delete s.data;
+ }
+
+ // Add or update anti-cache param if needed
+ if ( s.cache === false ) {
+ cacheURL = cacheURL.replace( rantiCache, "$1" );
+ uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) +
+ uncached;
+ }
+
+ // Put hash and anti-cache on the URL that will be requested (gh-1732)
+ s.url = cacheURL + uncached;
+
+ // Change '%20' to '+' if this is encoded form body content (gh-2658)
+ } else if ( s.data && s.processData &&
+ ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) {
+ s.data = s.data.replace( r20, "+" );
+ }
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+ if ( jQuery.lastModified[ cacheURL ] ) {
+ jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] );
+ }
+ if ( jQuery.etag[ cacheURL ] ) {
+ jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] );
+ }
+ }
+
+ // Set the correct header, if data is being sent
+ if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
+ jqXHR.setRequestHeader( "Content-Type", s.contentType );
+ }
+
+ // Set the Accepts header for the server, depending on the dataType
+ jqXHR.setRequestHeader(
+ "Accept",
+ s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ?
+ s.accepts[ s.dataTypes[ 0 ] ] +
+ ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
+ s.accepts[ "*" ]
+ );
+
+ // Check for headers option
+ for ( i in s.headers ) {
+ jqXHR.setRequestHeader( i, s.headers[ i ] );
+ }
+
+ // Allow custom headers/mimetypes and early abort
+ if ( s.beforeSend &&
+ ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) {
+
+ // Abort if not done already and return
+ return jqXHR.abort();
+ }
+
+ // Aborting is no longer a cancellation
+ strAbort = "abort";
+
+ // Install callbacks on deferreds
+ completeDeferred.add( s.complete );
+ jqXHR.done( s.success );
+ jqXHR.fail( s.error );
+
+ // Get transport
+ transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
+
+ // If no transport, we auto-abort
+ if ( !transport ) {
+ done( -1, "No Transport" );
+ } else {
+ jqXHR.readyState = 1;
+
+ // Send global event
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
+ }
+
+ // If request was aborted inside ajaxSend, stop there
+ if ( completed ) {
+ return jqXHR;
+ }
+
+ // Timeout
+ if ( s.async && s.timeout > 0 ) {
+ timeoutTimer = window.setTimeout( function() {
+ jqXHR.abort( "timeout" );
+ }, s.timeout );
+ }
+
+ try {
+ completed = false;
+ transport.send( requestHeaders, done );
+ } catch ( e ) {
+
+ // Rethrow post-completion exceptions
+ if ( completed ) {
+ throw e;
+ }
+
+ // Propagate others as results
+ done( -1, e );
+ }
+ }
+
+ // Callback for when everything is done
+ function done( status, nativeStatusText, responses, headers ) {
+ var isSuccess, success, error, response, modified,
+ statusText = nativeStatusText;
+
+ // Ignore repeat invocations
+ if ( completed ) {
+ return;
+ }
+
+ completed = true;
+
+ // Clear timeout if it exists
+ if ( timeoutTimer ) {
+ window.clearTimeout( timeoutTimer );
+ }
+
+ // Dereference transport for early garbage collection
+ // (no matter how long the jqXHR object will be used)
+ transport = undefined;
+
+ // Cache response headers
+ responseHeadersString = headers || "";
+
+ // Set readyState
+ jqXHR.readyState = status > 0 ? 4 : 0;
+
+ // Determine if successful
+ isSuccess = status >= 200 && status < 300 || status === 304;
+
+ // Get response data
+ if ( responses ) {
+ response = ajaxHandleResponses( s, jqXHR, responses );
+ }
+
+ // Use a noop converter for missing script
+ if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) {
+ s.converters[ "text script" ] = function() {};
+ }
+
+ // Convert no matter what (that way responseXXX fields are always set)
+ response = ajaxConvert( s, response, jqXHR, isSuccess );
+
+ // If successful, handle type chaining
+ if ( isSuccess ) {
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+ modified = jqXHR.getResponseHeader( "Last-Modified" );
+ if ( modified ) {
+ jQuery.lastModified[ cacheURL ] = modified;
+ }
+ modified = jqXHR.getResponseHeader( "etag" );
+ if ( modified ) {
+ jQuery.etag[ cacheURL ] = modified;
+ }
+ }
+
+ // if no content
+ if ( status === 204 || s.type === "HEAD" ) {
+ statusText = "nocontent";
+
+ // if not modified
+ } else if ( status === 304 ) {
+ statusText = "notmodified";
+
+ // If we have data, let's convert it
+ } else {
+ statusText = response.state;
+ success = response.data;
+ error = response.error;
+ isSuccess = !error;
+ }
+ } else {
+
+ // Extract error from statusText and normalize for non-aborts
+ error = statusText;
+ if ( status || !statusText ) {
+ statusText = "error";
+ if ( status < 0 ) {
+ status = 0;
+ }
+ }
+ }
+
+ // Set data for the fake xhr object
+ jqXHR.status = status;
+ jqXHR.statusText = ( nativeStatusText || statusText ) + "";
+
+ // Success/Error
+ if ( isSuccess ) {
+ deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
+ } else {
+ deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
+ }
+
+ // Status-dependent callbacks
+ jqXHR.statusCode( statusCode );
+ statusCode = undefined;
+
+ if ( fireGlobals ) {
+ globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError",
+ [ jqXHR, s, isSuccess ? success : error ] );
+ }
+
+ // Complete
+ completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] );
+
+ if ( fireGlobals ) {
+ globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
+
+ // Handle the global AJAX counter
+ if ( !( --jQuery.active ) ) {
+ jQuery.event.trigger( "ajaxStop" );
+ }
+ }
+ }
+
+ return jqXHR;
+ },
+
+ getJSON: function( url, data, callback ) {
+ return jQuery.get( url, data, callback, "json" );
+ },
+
+ getScript: function( url, callback ) {
+ return jQuery.get( url, undefined, callback, "script" );
+ }
+} );
+
+jQuery.each( [ "get", "post" ], function( _i, method ) {
+ jQuery[ method ] = function( url, data, callback, type ) {
+
+ // Shift arguments if data argument was omitted
+ if ( isFunction( data ) ) {
+ type = type || callback;
+ callback = data;
+ data = undefined;
+ }
+
+ // The url can be an options object (which then must have .url)
+ return jQuery.ajax( jQuery.extend( {
+ url: url,
+ type: method,
+ dataType: type,
+ data: data,
+ success: callback
+ }, jQuery.isPlainObject( url ) && url ) );
+ };
+} );
+
+jQuery.ajaxPrefilter( function( s ) {
+ var i;
+ for ( i in s.headers ) {
+ if ( i.toLowerCase() === "content-type" ) {
+ s.contentType = s.headers[ i ] || "";
+ }
+ }
+} );
+
+
+jQuery._evalUrl = function( url, options, doc ) {
+ return jQuery.ajax( {
+ url: url,
+
+ // Make this explicit, since user can override this through ajaxSetup (#11264)
+ type: "GET",
+ dataType: "script",
+ cache: true,
+ async: false,
+ global: false,
+
+ // Only evaluate the response if it is successful (gh-4126)
+ // dataFilter is not invoked for failure responses, so using it instead
+ // of the default converter is kludgy but it works.
+ converters: {
+ "text script": function() {}
+ },
+ dataFilter: function( response ) {
+ jQuery.globalEval( response, options, doc );
+ }
+ } );
+};
+
+
+jQuery.fn.extend( {
+ wrapAll: function( html ) {
+ var wrap;
+
+ if ( this[ 0 ] ) {
+ if ( isFunction( html ) ) {
+ html = html.call( this[ 0 ] );
+ }
+
+ // The elements to wrap the target around
+ wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true );
+
+ if ( this[ 0 ].parentNode ) {
+ wrap.insertBefore( this[ 0 ] );
+ }
+
+ wrap.map( function() {
+ var elem = this;
+
+ while ( elem.firstElementChild ) {
+ elem = elem.firstElementChild;
+ }
+
+ return elem;
+ } ).append( this );
+ }
+
+ return this;
+ },
+
+ wrapInner: function( html ) {
+ if ( isFunction( html ) ) {
+ return this.each( function( i ) {
+ jQuery( this ).wrapInner( html.call( this, i ) );
+ } );
+ }
+
+ return this.each( function() {
+ var self = jQuery( this ),
+ contents = self.contents();
+
+ if ( contents.length ) {
+ contents.wrapAll( html );
+
+ } else {
+ self.append( html );
+ }
+ } );
+ },
+
+ wrap: function( html ) {
+ var htmlIsFunction = isFunction( html );
+
+ return this.each( function( i ) {
+ jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html );
+ } );
+ },
+
+ unwrap: function( selector ) {
+ this.parent( selector ).not( "body" ).each( function() {
+ jQuery( this ).replaceWith( this.childNodes );
+ } );
+ return this;
+ }
+} );
+
+
+jQuery.expr.pseudos.hidden = function( elem ) {
+ return !jQuery.expr.pseudos.visible( elem );
+};
+jQuery.expr.pseudos.visible = function( elem ) {
+ return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length );
+};
+
+
+
+
+jQuery.ajaxSettings.xhr = function() {
+ try {
+ return new window.XMLHttpRequest();
+ } catch ( e ) {}
+};
+
+var xhrSuccessStatus = {
+
+ // File protocol always yields status code 0, assume 200
+ 0: 200,
+
+ // Support: IE <=9 only
+ // #1450: sometimes IE returns 1223 when it should be 204
+ 1223: 204
+ },
+ xhrSupported = jQuery.ajaxSettings.xhr();
+
+support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported );
+support.ajax = xhrSupported = !!xhrSupported;
+
+jQuery.ajaxTransport( function( options ) {
+ var callback, errorCallback;
+
+ // Cross domain only allowed if supported through XMLHttpRequest
+ if ( support.cors || xhrSupported && !options.crossDomain ) {
+ return {
+ send: function( headers, complete ) {
+ var i,
+ xhr = options.xhr();
+
+ xhr.open(
+ options.type,
+ options.url,
+ options.async,
+ options.username,
+ options.password
+ );
+
+ // Apply custom fields if provided
+ if ( options.xhrFields ) {
+ for ( i in options.xhrFields ) {
+ xhr[ i ] = options.xhrFields[ i ];
+ }
+ }
+
+ // Override mime type if needed
+ if ( options.mimeType && xhr.overrideMimeType ) {
+ xhr.overrideMimeType( options.mimeType );
+ }
+
+ // X-Requested-With header
+ // For cross-domain requests, seeing as conditions for a preflight are
+ // akin to a jigsaw puzzle, we simply never set it to be sure.
+ // (it can always be set on a per-request basis or even using ajaxSetup)
+ // For same-domain requests, won't change header if already provided.
+ if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) {
+ headers[ "X-Requested-With" ] = "XMLHttpRequest";
+ }
+
+ // Set headers
+ for ( i in headers ) {
+ xhr.setRequestHeader( i, headers[ i ] );
+ }
+
+ // Callback
+ callback = function( type ) {
+ return function() {
+ if ( callback ) {
+ callback = errorCallback = xhr.onload =
+ xhr.onerror = xhr.onabort = xhr.ontimeout =
+ xhr.onreadystatechange = null;
+
+ if ( type === "abort" ) {
+ xhr.abort();
+ } else if ( type === "error" ) {
+
+ // Support: IE <=9 only
+ // On a manual native abort, IE9 throws
+ // errors on any property access that is not readyState
+ if ( typeof xhr.status !== "number" ) {
+ complete( 0, "error" );
+ } else {
+ complete(
+
+ // File: protocol always yields status 0; see #8605, #14207
+ xhr.status,
+ xhr.statusText
+ );
+ }
+ } else {
+ complete(
+ xhrSuccessStatus[ xhr.status ] || xhr.status,
+ xhr.statusText,
+
+ // Support: IE <=9 only
+ // IE9 has no XHR2 but throws on binary (trac-11426)
+ // For XHR2 non-text, let the caller handle it (gh-2498)
+ ( xhr.responseType || "text" ) !== "text" ||
+ typeof xhr.responseText !== "string" ?
+ { binary: xhr.response } :
+ { text: xhr.responseText },
+ xhr.getAllResponseHeaders()
+ );
+ }
+ }
+ };
+ };
+
+ // Listen to events
+ xhr.onload = callback();
+ errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" );
+
+ // Support: IE 9 only
+ // Use onreadystatechange to replace onabort
+ // to handle uncaught aborts
+ if ( xhr.onabort !== undefined ) {
+ xhr.onabort = errorCallback;
+ } else {
+ xhr.onreadystatechange = function() {
+
+ // Check readyState before timeout as it changes
+ if ( xhr.readyState === 4 ) {
+
+ // Allow onerror to be called first,
+ // but that will not handle a native abort
+ // Also, save errorCallback to a variable
+ // as xhr.onerror cannot be accessed
+ window.setTimeout( function() {
+ if ( callback ) {
+ errorCallback();
+ }
+ } );
+ }
+ };
+ }
+
+ // Create the abort callback
+ callback = callback( "abort" );
+
+ try {
+
+ // Do send the request (this may raise an exception)
+ xhr.send( options.hasContent && options.data || null );
+ } catch ( e ) {
+
+ // #14683: Only rethrow if this hasn't been notified as an error yet
+ if ( callback ) {
+ throw e;
+ }
+ }
+ },
+
+ abort: function() {
+ if ( callback ) {
+ callback();
+ }
+ }
+ };
+ }
+} );
+
+
+
+
+// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432)
+jQuery.ajaxPrefilter( function( s ) {
+ if ( s.crossDomain ) {
+ s.contents.script = false;
+ }
+} );
+
+// Install script dataType
+jQuery.ajaxSetup( {
+ accepts: {
+ script: "text/javascript, application/javascript, " +
+ "application/ecmascript, application/x-ecmascript"
+ },
+ contents: {
+ script: /\b(?:java|ecma)script\b/
+ },
+ converters: {
+ "text script": function( text ) {
+ jQuery.globalEval( text );
+ return text;
+ }
+ }
+} );
+
+// Handle cache's special case and crossDomain
+jQuery.ajaxPrefilter( "script", function( s ) {
+ if ( s.cache === undefined ) {
+ s.cache = false;
+ }
+ if ( s.crossDomain ) {
+ s.type = "GET";
+ }
+} );
+
+// Bind script tag hack transport
+jQuery.ajaxTransport( "script", function( s ) {
+
+ // This transport only deals with cross domain or forced-by-attrs requests
+ if ( s.crossDomain || s.scriptAttrs ) {
+ var script, callback;
+ return {
+ send: function( _, complete ) {
+ script = jQuery( "<script>" )
+ .attr( s.scriptAttrs || {} )
+ .prop( { charset: s.scriptCharset, src: s.url } )
+ .on( "load error", callback = function( evt ) {
+ script.remove();
+ callback = null;
+ if ( evt ) {
+ complete( evt.type === "error" ? 404 : 200, evt.type );
+ }
+ } );
+
+ // Use native DOM manipulation to avoid our domManip AJAX trickery
+ document.head.appendChild( script[ 0 ] );
+ },
+ abort: function() {
+ if ( callback ) {
+ callback();
+ }
+ }
+ };
+ }
+} );
+
+
+
+
+var oldCallbacks = [],
+ rjsonp = /(=)\?(?=&|$)|\?\?/;
+
+// Default jsonp settings
+jQuery.ajaxSetup( {
+ jsonp: "callback",
+ jsonpCallback: function() {
+ var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce.guid++ ) );
+ this[ callback ] = true;
+ return callback;
+ }
+} );
+
+// Detect, normalize options and install callbacks for jsonp requests
+jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
+
+ var callbackName, overwritten, responseContainer,
+ jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ?
+ "url" :
+ typeof s.data === "string" &&
+ ( s.contentType || "" )
+ .indexOf( "application/x-www-form-urlencoded" ) === 0 &&
+ rjsonp.test( s.data ) && "data"
+ );
+
+ // Handle iff the expected data type is "jsonp" or we have a parameter to set
+ if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) {
+
+ // Get callback name, remembering preexisting value associated with it
+ callbackName = s.jsonpCallback = isFunction( s.jsonpCallback ) ?
+ s.jsonpCallback() :
+ s.jsonpCallback;
+
+ // Insert callback into url or form data
+ if ( jsonProp ) {
+ s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName );
+ } else if ( s.jsonp !== false ) {
+ s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName;
+ }
+
+ // Use data converter to retrieve json after script execution
+ s.converters[ "script json" ] = function() {
+ if ( !responseContainer ) {
+ jQuery.error( callbackName + " was not called" );
+ }
+ return responseContainer[ 0 ];
+ };
+
+ // Force json dataType
+ s.dataTypes[ 0 ] = "json";
+
+ // Install callback
+ overwritten = window[ callbackName ];
+ window[ callbackName ] = function() {
+ responseContainer = arguments;
+ };
+
+ // Clean-up function (fires after converters)
+ jqXHR.always( function() {
+
+ // If previous value didn't exist - remove it
+ if ( overwritten === undefined ) {
+ jQuery( window ).removeProp( callbackName );
+
+ // Otherwise restore preexisting value
+ } else {
+ window[ callbackName ] = overwritten;
+ }
+
+ // Save back as free
+ if ( s[ callbackName ] ) {
+
+ // Make sure that re-using the options doesn't screw things around
+ s.jsonpCallback = originalSettings.jsonpCallback;
+
+ // Save the callback name for future use
+ oldCallbacks.push( callbackName );
+ }
+
+ // Call if it was a function and we have a response
+ if ( responseContainer && isFunction( overwritten ) ) {
+ overwritten( responseContainer[ 0 ] );
+ }
+
+ responseContainer = overwritten = undefined;
+ } );
+
+ // Delegate to script
+ return "script";
+ }
+} );
+
+
+
+
+// Support: Safari 8 only
+// In Safari 8 documents created via document.implementation.createHTMLDocument
+// collapse sibling forms: the second one becomes a child of the first one.
+// Because of that, this security measure has to be disabled in Safari 8.
+// https://bugs.webkit.org/show_bug.cgi?id=137337
+support.createHTMLDocument = ( function() {
+ var body = document.implementation.createHTMLDocument( "" ).body;
+ body.innerHTML = "<form></form><form></form>";
+ return body.childNodes.length === 2;
+} )();
+
+
+// Argument "data" should be string of html
+// context (optional): If specified, the fragment will be created in this context,
+// defaults to document
+// keepScripts (optional): If true, will include scripts passed in the html string
+jQuery.parseHTML = function( data, context, keepScripts ) {
+ if ( typeof data !== "string" ) {
+ return [];
+ }
+ if ( typeof context === "boolean" ) {
+ keepScripts = context;
+ context = false;
+ }
+
+ var base, parsed, scripts;
+
+ if ( !context ) {
+
+ // Stop scripts or inline event handlers from being executed immediately
+ // by using document.implementation
+ if ( support.createHTMLDocument ) {
+ context = document.implementation.createHTMLDocument( "" );
+
+ // Set the base href for the created document
+ // so any parsed elements with URLs
+ // are based on the document's URL (gh-2965)
+ base = context.createElement( "base" );
+ base.href = document.location.href;
+ context.head.appendChild( base );
+ } else {
+ context = document;
+ }
+ }
+
+ parsed = rsingleTag.exec( data );
+ scripts = !keepScripts && [];
+
+ // Single tag
+ if ( parsed ) {
+ return [ context.createElement( parsed[ 1 ] ) ];
+ }
+
+ parsed = buildFragment( [ data ], context, scripts );
+
+ if ( scripts && scripts.length ) {
+ jQuery( scripts ).remove();
+ }
+
+ return jQuery.merge( [], parsed.childNodes );
+};
+
+
+/**
+ * Load a url into a page
+ */
+jQuery.fn.load = function( url, params, callback ) {
+ var selector, type, response,
+ self = this,
+ off = url.indexOf( " " );
+
+ if ( off > -1 ) {
+ selector = stripAndCollapse( url.slice( off ) );
+ url = url.slice( 0, off );
+ }
+
+ // If it's a function
+ if ( isFunction( params ) ) {
+
+ // We assume that it's the callback
+ callback = params;
+ params = undefined;
+
+ // Otherwise, build a param string
+ } else if ( params && typeof params === "object" ) {
+ type = "POST";
+ }
+
+ // If we have elements to modify, make the request
+ if ( self.length > 0 ) {
+ jQuery.ajax( {
+ url: url,
+
+ // If "type" variable is undefined, then "GET" method will be used.
+ // Make value of this field explicit since
+ // user can override it through ajaxSetup method
+ type: type || "GET",
+ dataType: "html",
+ data: params
+ } ).done( function( responseText ) {
+
+ // Save response for use in complete callback
+ response = arguments;
+
+ self.html( selector ?
+
+ // If a selector was specified, locate the right elements in a dummy div
+ // Exclude scripts to avoid IE 'Permission Denied' errors
+ jQuery( "<div>" ).append( jQuery.parseHTML( responseText ) ).find( selector ) :
+
+ // Otherwise use the full result
+ responseText );
+
+ // If the request succeeds, this function gets "data", "status", "jqXHR"
+ // but they are ignored because response was set above.
+ // If it fails, this function gets "jqXHR", "status", "error"
+ } ).always( callback && function( jqXHR, status ) {
+ self.each( function() {
+ callback.apply( this, response || [ jqXHR.responseText, status, jqXHR ] );
+ } );
+ } );
+ }
+
+ return this;
+};
+
+
+
+
+jQuery.expr.pseudos.animated = function( elem ) {
+ return jQuery.grep( jQuery.timers, function( fn ) {
+ return elem === fn.elem;
+ } ).length;
+};
+
+
+
+
+jQuery.offset = {
+ setOffset: function( elem, options, i ) {
+ var curPosition, curLeft, curCSSTop, curTop, curOffset, curCSSLeft, calculatePosition,
+ position = jQuery.css( elem, "position" ),
+ curElem = jQuery( elem ),
+ props = {};
+
+ // Set position first, in-case top/left are set even on static elem
+ if ( position === "static" ) {
+ elem.style.position = "relative";
+ }
+
+ curOffset = curElem.offset();
+ curCSSTop = jQuery.css( elem, "top" );
+ curCSSLeft = jQuery.css( elem, "left" );
+ calculatePosition = ( position === "absolute" || position === "fixed" ) &&
+ ( curCSSTop + curCSSLeft ).indexOf( "auto" ) > -1;
+
+ // Need to be able to calculate position if either
+ // top or left is auto and position is either absolute or fixed
+ if ( calculatePosition ) {
+ curPosition = curElem.position();
+ curTop = curPosition.top;
+ curLeft = curPosition.left;
+
+ } else {
+ curTop = parseFloat( curCSSTop ) || 0;
+ curLeft = parseFloat( curCSSLeft ) || 0;
+ }
+
+ if ( isFunction( options ) ) {
+
+ // Use jQuery.extend here to allow modification of coordinates argument (gh-1848)
+ options = options.call( elem, i, jQuery.extend( {}, curOffset ) );
+ }
+
+ if ( options.top != null ) {
+ props.top = ( options.top - curOffset.top ) + curTop;
+ }
+ if ( options.left != null ) {
+ props.left = ( options.left - curOffset.left ) + curLeft;
+ }
+
+ if ( "using" in options ) {
+ options.using.call( elem, props );
+
+ } else {
+ if ( typeof props.top === "number" ) {
+ props.top += "px";
+ }
+ if ( typeof props.left === "number" ) {
+ props.left += "px";
+ }
+ curElem.css( props );
+ }
+ }
+};
+
+jQuery.fn.extend( {
+
+ // offset() relates an element's border box to the document origin
+ offset: function( options ) {
+
+ // Preserve chaining for setter
+ if ( arguments.length ) {
+ return options === undefined ?
+ this :
+ this.each( function( i ) {
+ jQuery.offset.setOffset( this, options, i );
+ } );
+ }
+
+ var rect, win,
+ elem = this[ 0 ];
+
+ if ( !elem ) {
+ return;
+ }
+
+ // Return zeros for disconnected and hidden (display: none) elements (gh-2310)
+ // Support: IE <=11 only
+ // Running getBoundingClientRect on a
+ // disconnected node in IE throws an error
+ if ( !elem.getClientRects().length ) {
+ return { top: 0, left: 0 };
+ }
+
+ // Get document-relative position by adding viewport scroll to viewport-relative gBCR
+ rect = elem.getBoundingClientRect();
+ win = elem.ownerDocument.defaultView;
+ return {
+ top: rect.top + win.pageYOffset,
+ left: rect.left + win.pageXOffset
+ };
+ },
+
+ // position() relates an element's margin box to its offset parent's padding box
+ // This corresponds to the behavior of CSS absolute positioning
+ position: function() {
+ if ( !this[ 0 ] ) {
+ return;
+ }
+
+ var offsetParent, offset, doc,
+ elem = this[ 0 ],
+ parentOffset = { top: 0, left: 0 };
+
+ // position:fixed elements are offset from the viewport, which itself always has zero offset
+ if ( jQuery.css( elem, "position" ) === "fixed" ) {
+
+ // Assume position:fixed implies availability of getBoundingClientRect
+ offset = elem.getBoundingClientRect();
+
+ } else {
+ offset = this.offset();
+
+ // Account for the *real* offset parent, which can be the document or its root element
+ // when a statically positioned element is identified
+ doc = elem.ownerDocument;
+ offsetParent = elem.offsetParent || doc.documentElement;
+ while ( offsetParent &&
+ ( offsetParent === doc.body || offsetParent === doc.documentElement ) &&
+ jQuery.css( offsetParent, "position" ) === "static" ) {
+
+ offsetParent = offsetParent.parentNode;
+ }
+ if ( offsetParent && offsetParent !== elem && offsetParent.nodeType === 1 ) {
+
+ // Incorporate borders into its offset, since they are outside its content origin
+ parentOffset = jQuery( offsetParent ).offset();
+ parentOffset.top += jQuery.css( offsetParent, "borderTopWidth", true );
+ parentOffset.left += jQuery.css( offsetParent, "borderLeftWidth", true );
+ }
+ }
+
+ // Subtract parent offsets and element margins
+ return {
+ top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ),
+ left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true )
+ };
+ },
+
+ // This method will return documentElement in the following cases:
+ // 1) For the element inside the iframe without offsetParent, this method will return
+ // documentElement of the parent window
+ // 2) For the hidden or detached element
+ // 3) For body or html element, i.e. in case of the html node - it will return itself
+ //
+ // but those exceptions were never presented as a real life use-cases
+ // and might be considered as more preferable results.
+ //
+ // This logic, however, is not guaranteed and can change at any point in the future
+ offsetParent: function() {
+ return this.map( function() {
+ var offsetParent = this.offsetParent;
+
+ while ( offsetParent && jQuery.css( offsetParent, "position" ) === "static" ) {
+ offsetParent = offsetParent.offsetParent;
+ }
+
+ return offsetParent || documentElement;
+ } );
+ }
+} );
+
+// Create scrollLeft and scrollTop methods
+jQuery.each( { scrollLeft: "pageXOffset", scrollTop: "pageYOffset" }, function( method, prop ) {
+ var top = "pageYOffset" === prop;
+
+ jQuery.fn[ method ] = function( val ) {
+ return access( this, function( elem, method, val ) {
+
+ // Coalesce documents and windows
+ var win;
+ if ( isWindow( elem ) ) {
+ win = elem;
+ } else if ( elem.nodeType === 9 ) {
+ win = elem.defaultView;
+ }
+
+ if ( val === undefined ) {
+ return win ? win[ prop ] : elem[ method ];
+ }
+
+ if ( win ) {
+ win.scrollTo(
+ !top ? val : win.pageXOffset,
+ top ? val : win.pageYOffset
+ );
+
+ } else {
+ elem[ method ] = val;
+ }
+ }, method, val, arguments.length );
+ };
+} );
+
+// Support: Safari <=7 - 9.1, Chrome <=37 - 49
+// Add the top/left cssHooks using jQuery.fn.position
+// Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084
+// Blink bug: https://bugs.chromium.org/p/chromium/issues/detail?id=589347
+// getComputedStyle returns percent when specified for top/left/bottom/right;
+// rather than make the css module depend on the offset module, just check for it here
+jQuery.each( [ "top", "left" ], function( _i, prop ) {
+ jQuery.cssHooks[ prop ] = addGetHookIf( support.pixelPosition,
+ function( elem, computed ) {
+ if ( computed ) {
+ computed = curCSS( elem, prop );
+
+ // If curCSS returns percentage, fallback to offset
+ return rnumnonpx.test( computed ) ?
+ jQuery( elem ).position()[ prop ] + "px" :
+ computed;
+ }
+ }
+ );
+} );
+
+
+// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods
+jQuery.each( { Height: "height", Width: "width" }, function( name, type ) {
+ jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name },
+ function( defaultExtra, funcName ) {
+
+ // Margin is only for outerHeight, outerWidth
+ jQuery.fn[ funcName ] = function( margin, value ) {
+ var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ),
+ extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" );
+
+ return access( this, function( elem, type, value ) {
+ var doc;
+
+ if ( isWindow( elem ) ) {
+
+ // $( window ).outerWidth/Height return w/h including scrollbars (gh-1729)
+ return funcName.indexOf( "outer" ) === 0 ?
+ elem[ "inner" + name ] :
+ elem.document.documentElement[ "client" + name ];
+ }
+
+ // Get document width or height
+ if ( elem.nodeType === 9 ) {
+ doc = elem.documentElement;
+
+ // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height],
+ // whichever is greatest
+ return Math.max(
+ elem.body[ "scroll" + name ], doc[ "scroll" + name ],
+ elem.body[ "offset" + name ], doc[ "offset" + name ],
+ doc[ "client" + name ]
+ );
+ }
+
+ return value === undefined ?
+
+ // Get width or height on the element, requesting but not forcing parseFloat
+ jQuery.css( elem, type, extra ) :
+
+ // Set width or height on the element
+ jQuery.style( elem, type, value, extra );
+ }, type, chainable ? margin : undefined, chainable );
+ };
+ } );
+} );
+
+
+jQuery.each( [
+ "ajaxStart",
+ "ajaxStop",
+ "ajaxComplete",
+ "ajaxError",
+ "ajaxSuccess",
+ "ajaxSend"
+], function( _i, type ) {
+ jQuery.fn[ type ] = function( fn ) {
+ return this.on( type, fn );
+ };
+} );
+
+
+
+
+jQuery.fn.extend( {
+
+ bind: function( types, data, fn ) {
+ return this.on( types, null, data, fn );
+ },
+ unbind: function( types, fn ) {
+ return this.off( types, null, fn );
+ },
+
+ delegate: function( selector, types, data, fn ) {
+ return this.on( types, selector, data, fn );
+ },
+ undelegate: function( selector, types, fn ) {
+
+ // ( namespace ) or ( selector, types [, fn] )
+ return arguments.length === 1 ?
+ this.off( selector, "**" ) :
+ this.off( types, selector || "**", fn );
+ },
+
+ hover: function( fnOver, fnOut ) {
+ return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
+ }
+} );
+
+jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " +
+ "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
+ "change select submit keydown keypress keyup contextmenu" ).split( " " ),
+ function( _i, name ) {
+
+ // Handle event binding
+ jQuery.fn[ name ] = function( data, fn ) {
+ return arguments.length > 0 ?
+ this.on( name, null, data, fn ) :
+ this.trigger( name );
+ };
+ } );
+
+
+
+
+// Support: Android <=4.0 only
+// Make sure we trim BOM and NBSP
+var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;
+
+// Bind a function to a context, optionally partially applying any
+// arguments.
+// jQuery.proxy is deprecated to promote standards (specifically Function#bind)
+// However, it is not slated for removal any time soon
+jQuery.proxy = function( fn, context ) {
+ var tmp, args, proxy;
+
+ if ( typeof context === "string" ) {
+ tmp = fn[ context ];
+ context = fn;
+ fn = tmp;
+ }
+
+ // Quick check to determine if target is callable, in the spec
+ // this throws a TypeError, but we will just return undefined.
+ if ( !isFunction( fn ) ) {
+ return undefined;
+ }
+
+ // Simulated bind
+ args = slice.call( arguments, 2 );
+ proxy = function() {
+ return fn.apply( context || this, args.concat( slice.call( arguments ) ) );
+ };
+
+ // Set the guid of unique handler to the same of original handler, so it can be removed
+ proxy.guid = fn.guid = fn.guid || jQuery.guid++;
+
+ return proxy;
+};
+
+jQuery.holdReady = function( hold ) {
+ if ( hold ) {
+ jQuery.readyWait++;
+ } else {
+ jQuery.ready( true );
+ }
+};
+jQuery.isArray = Array.isArray;
+jQuery.parseJSON = JSON.parse;
+jQuery.nodeName = nodeName;
+jQuery.isFunction = isFunction;
+jQuery.isWindow = isWindow;
+jQuery.camelCase = camelCase;
+jQuery.type = toType;
+
+jQuery.now = Date.now;
+
+jQuery.isNumeric = function( obj ) {
+
+ // As of jQuery 3.0, isNumeric is limited to
+ // strings and numbers (primitives or objects)
+ // that can be coerced to finite numbers (gh-2662)
+ var type = jQuery.type( obj );
+ return ( type === "number" || type === "string" ) &&
+
+ // parseFloat NaNs numeric-cast false positives ("")
+ // ...but misinterprets leading-number strings, particularly hex literals ("0x...")
+ // subtraction forces infinities to NaN
+ !isNaN( obj - parseFloat( obj ) );
+};
+
+jQuery.trim = function( text ) {
+ return text == null ?
+ "" :
+ ( text + "" ).replace( rtrim, "" );
+};
+
+
+
+// Register as a named AMD module, since jQuery can be concatenated with other
+// files that may use define, but not via a proper concatenation script that
+// understands anonymous AMD modules. A named AMD is safest and most robust
+// way to register. Lowercase jquery is used because AMD module names are
+// derived from file names, and jQuery is normally delivered in a lowercase
+// file name. Do this after creating the global so that if an AMD module wants
+// to call noConflict to hide this version of jQuery, it will work.
+
+// Note that for maximum portability, libraries that are not jQuery should
+// declare themselves as anonymous modules, and avoid setting a global if an
+// AMD loader is present. jQuery is a special case. For more information, see
+// https://github.com/jrburke/requirejs/wiki/Updating-existing-libraries#wiki-anon
+
+if ( typeof define === "function" && define.amd ) {
+ define( "jquery", [], function() {
+ return jQuery;
+ } );
+}
+
+
+
+
+var
+
+ // Map over jQuery in case of overwrite
+ _jQuery = window.jQuery,
+
+ // Map over the $ in case of overwrite
+ _$ = window.$;
+
+jQuery.noConflict = function( deep ) {
+ if ( window.$ === jQuery ) {
+ window.$ = _$;
+ }
+
+ if ( deep && window.jQuery === jQuery ) {
+ window.jQuery = _jQuery;
+ }
+
+ return jQuery;
+};
+
+// Expose jQuery and $ identifiers, even in AMD
+// (#7102#comment:10, https://github.com/jquery/jquery/pull/557)
+// and CommonJS for browser emulators (#13566)
+if ( typeof noGlobal === "undefined" ) {
+ window.jQuery = window.$ = jQuery;
+}
+
+
+
+
+return jQuery;
+} );
diff --git a/deps/rabbitmq_management/priv/www/js/jquery-3.5.1.min.js b/deps/rabbitmq_management/priv/www/js/jquery-3.5.1.min.js
new file mode 100644
index 0000000000..b0614034ad
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/jquery-3.5.1.min.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.5.1 | (c) JS Foundation and other contributors | jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.5.1",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}S.fn=S.prototype={jquery:f,constructor:S,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=S.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return S.each(this,e)},map:function(n){return this.pushStack(S.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},even:function(){return this.pushStack(S.grep(this,function(e,t){return(t+1)%2}))},odd:function(){return this.pushStack(S.grep(this,function(e,t){return t%2}))},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},S.extend=S.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(S.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||S.isPlainObject(n)?n:{},i=!1,a[t]=S.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},S.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t,n){b(e,{nonce:t&&t.nonce},n)},each:function(e,t){var n,r=0;if(p(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},makeArray:function(e,t){var n=t||[];return null!=e&&(p(Object(e))?S.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(p(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g(a)},guid:1,support:y}),"function"==typeof Symbol&&(S.fn[Symbol.iterator]=t[Symbol.iterator]),S.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var d=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,S="sizzle"+1*new Date,p=n.document,k=0,r=0,m=ue(),x=ue(),A=ue(),N=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",F=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",B=new RegExp(M+"+","g"),$=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="<a id='"+S+"'></a><select id='"+S+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!=C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!=C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(B," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===k&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[k,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===k&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[k,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[S]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace($,"$1"));return s[S]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[k,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[S]||(e[S]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===k&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[S]&&(v=Ce(v)),y&&!y[S]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[S]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace($,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace($," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=A[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[S]?i.push(a):o.push(a);(a=A(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=k+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t==C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument==C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(k=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(k=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=S.split("").sort(D).join("")===S,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);S.find=d,S.expr=d.selectors,S.expr[":"]=S.expr.pseudos,S.uniqueSort=S.unique=d.uniqueSort,S.text=d.getText,S.isXMLDoc=d.isXML,S.contains=d.contains,S.escapeSelector=d.escape;var h=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&S(e).is(n))break;r.push(e)}return r},T=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},k=S.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var N=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1<i.call(n,e)!==r}):S.filter(n,e,r)}S.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?S.find.matchesSelector(r,e)?[r]:[]:S.find.matches(e,S.grep(t,function(e){return 1===e.nodeType}))},S.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(S(e).filter(function(){for(t=0;t<r;t++)if(S.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)S.find(e,i[t],n);return 1<r?S.uniqueSort(n):n},filter:function(e){return this.pushStack(D(this,e||[],!1))},not:function(e){return this.pushStack(D(this,e||[],!0))},is:function(e){return!!D(this,"string"==typeof e&&k.test(e)?S(e):e||[],!1).length}});var j,q=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(S.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&S(e);if(!k.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&S.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?S.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(S(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(S.uniqueSort(S.merge(this.get(),S(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),S.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return h(e,"parentNode")},parentsUntil:function(e,t,n){return h(e,"parentNode",n)},next:function(e){return O(e,"nextSibling")},prev:function(e){return O(e,"previousSibling")},nextAll:function(e){return h(e,"nextSibling")},prevAll:function(e){return h(e,"previousSibling")},nextUntil:function(e,t,n){return h(e,"nextSibling",n)},prevUntil:function(e,t,n){return h(e,"previousSibling",n)},siblings:function(e){return T((e.parentNode||{}).firstChild,e)},children:function(e){return T(e.firstChild)},contents:function(e){return null!=e.contentDocument&&r(e.contentDocument)?e.contentDocument:(A(e,"template")&&(e=e.content||e),S.merge([],e.childNodes))}},function(r,i){S.fn[r]=function(e,t){var n=S.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=S.filter(t,n)),1<this.length&&(H[r]||S.uniqueSort(n),L.test(r)&&n.reverse()),this.pushStack(n)}});var P=/[^\x20\t\r\n\f]+/g;function R(e){return e}function M(e){throw e}function I(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}S.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},S.each(e.match(P)||[],function(e,t){n[t]=!0}),n):S.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){S.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return S.each(arguments,function(e,t){var n;while(-1<(n=S.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<S.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},S.extend({Deferred:function(e){var o=[["notify","progress",S.Callbacks("memory"),S.Callbacks("memory"),2],["resolve","done",S.Callbacks("once memory"),S.Callbacks("once memory"),0,"resolved"],["reject","fail",S.Callbacks("once memory"),S.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return S.Deferred(function(r){S.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,R,s),l(u,o,M,s)):(u++,t.call(e,l(u,o,R,s),l(u,o,M,s),l(u,o,R,o.notifyWith))):(a!==R&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){S.Deferred.exceptionHook&&S.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==M&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(S.Deferred.getStackHook&&(t.stackTrace=S.Deferred.getStackHook()),C.setTimeout(t))}}return S.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:R,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:R)),o[2][3].add(l(0,e,m(n)?n:M))}).promise()},promise:function(e){return null!=e?S.extend(e,a):a}},s={};return S.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=S.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(I(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)I(i[t],a(t),o.reject);return o.promise()}});var W=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;S.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&W.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},S.readyException=function(e){C.setTimeout(function(){throw e})};var F=S.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),S.ready()}S.fn.ready=function(e){return F.then(e)["catch"](function(e){S.readyException(e)}),this},S.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--S.readyWait:S.isReady)||(S.isReady=!0)!==e&&0<--S.readyWait||F.resolveWith(E,[S])}}),S.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(S.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var $=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)$(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(S(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},_=/^-ms-/,z=/-([a-z])/g;function U(e,t){return t.toUpperCase()}function X(e){return e.replace(_,"ms-").replace(z,U)}var V=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function G(){this.expando=S.expando+G.uid++}G.uid=1,G.prototype={cache:function(e){var t=e[this.expando];return t||(t={},V(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[X(t)]=n;else for(r in t)i[X(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][X(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(X):(t=X(t))in r?[t]:t.match(P)||[]).length;while(n--)delete r[t[n]]}(void 0===t||S.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!S.isEmptyObject(t)}};var Y=new G,Q=new G,J=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,K=/[A-Z]/g;function Z(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(K,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:J.test(i)?JSON.parse(i):i)}catch(e){}Q.set(e,t,n)}else n=void 0;return n}S.extend({hasData:function(e){return Q.hasData(e)||Y.hasData(e)},data:function(e,t,n){return Q.access(e,t,n)},removeData:function(e,t){Q.remove(e,t)},_data:function(e,t,n){return Y.access(e,t,n)},_removeData:function(e,t){Y.remove(e,t)}}),S.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=Q.get(o),1===o.nodeType&&!Y.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=X(r.slice(5)),Z(o,r,i[r]));Y.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){Q.set(this,n)}):$(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=Q.get(o,n))?t:void 0!==(t=Z(o,n))?t:void 0;this.each(function(){Q.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){Q.remove(this,e)})}}),S.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Y.get(e,t),n&&(!r||Array.isArray(n)?r=Y.access(e,t,S.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=S.queue(e,t),r=n.length,i=n.shift(),o=S._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){S.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Y.get(e,n)||Y.access(e,n,{empty:S.Callbacks("once memory").add(function(){Y.remove(e,[t+"queue",n])})})}}),S.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?S.queue(this[0],t):void 0===n?this:this.each(function(){var e=S.queue(this,t,n);S._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&S.dequeue(this,t)})},dequeue:function(e){return this.each(function(){S.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=S.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Y.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var ee=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,te=new RegExp("^(?:([+-])=|)("+ee+")([a-z%]*)$","i"),ne=["Top","Right","Bottom","Left"],re=E.documentElement,ie=function(e){return S.contains(e.ownerDocument,e)},oe={composed:!0};re.getRootNode&&(ie=function(e){return S.contains(e.ownerDocument,e)||e.getRootNode(oe)===e.ownerDocument});var ae=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&ie(e)&&"none"===S.css(e,"display")};function se(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return S.css(e,t,"")},u=s(),l=n&&n[3]||(S.cssNumber[t]?"":"px"),c=e.nodeType&&(S.cssNumber[t]||"px"!==l&&+u)&&te.exec(S.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)S.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,S.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ue={};function le(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Y.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&ae(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ue[s])||(o=a.body.appendChild(a.createElement(s)),u=S.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ue[s]=u)))):"none"!==n&&(l[c]="none",Y.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}S.fn.extend({show:function(){return le(this,!0)},hide:function(){return le(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?S(this).show():S(this).hide()})}});var ce,fe,pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="<option></option>",y.option=!!ce.lastChild;var ge={thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Y.set(e[n],"globalEval",!t||Y.get(t[n],"globalEval"))}ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td,y.option||(ge.optgroup=ge.option=[1,"<select multiple='multiple'>","</select>"]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))S.merge(p,o.nodeType?[o]:o);else if(me.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+S.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;S.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<S.inArray(o,r))i&&i.push(o);else if(l=ie(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}var be=/^key/,we=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Te=/^([^.]*)(?:\.(.+)|)/;function Ce(){return!0}function Ee(){return!1}function Se(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function ke(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)ke(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Ee;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return S().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=S.guid++)),e.each(function(){S.event.add(this,t,i,r,n)})}function Ae(e,i,o){o?(Y.set(e,i,!1),S.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Y.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(S.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Y.set(this,i,r),t=o(this,i),this[i](),r!==(n=Y.get(this,i))||t?Y.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Y.set(this,i,{value:S.event.trigger(S.extend(r[0],S.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Y.get(e,i)&&S.event.add(e,i,Ce)}S.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Y.get(t);if(V(t)){n.handler&&(n=(o=n).handler,i=o.selector),i&&S.find.matchesSelector(re,i),n.guid||(n.guid=S.guid++),(u=v.events)||(u=v.events=Object.create(null)),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof S&&S.event.triggered!==e.type?S.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(P)||[""]).length;while(l--)d=g=(s=Te.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=S.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=S.event.special[d]||{},c=S.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&S.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),S.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Y.hasData(e)&&Y.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(P)||[""]).length;while(l--)if(d=g=(s=Te.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=S.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||S.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)S.event.remove(e,d+t[l],n,r,!0);S.isEmptyObject(u)&&Y.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=new Array(arguments.length),u=S.event.fix(e),l=(Y.get(this,"events")||Object.create(null))[u.type]||[],c=S.event.special[u.type]||{};for(s[0]=u,t=1;t<arguments.length;t++)s[t]=arguments[t];if(u.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,u)){a=S.event.handlers.call(this,u,l),t=0;while((i=a[t++])&&!u.isPropagationStopped()){u.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!u.isImmediatePropagationStopped())u.rnamespace&&!1!==o.namespace&&!u.rnamespace.test(o.namespace)||(u.handleObj=o,u.data=o.data,void 0!==(r=((S.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,s))&&!1===(u.result=r)&&(u.preventDefault(),u.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,u),u.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<S(i,this).index(l):S.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(S.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[S.expando]?e:new S.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&Ae(t,"click",Ce),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&Ae(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Y.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},S.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},S.Event=function(e,t){if(!(this instanceof S.Event))return new S.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Ce:Ee,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&S.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[S.expando]=!0},S.Event.prototype={constructor:S.Event,isDefaultPrevented:Ee,isPropagationStopped:Ee,isImmediatePropagationStopped:Ee,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Ce,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Ce,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Ce,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},S.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&be.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&we.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},S.event.addProp),S.each({focus:"focusin",blur:"focusout"},function(e,t){S.event.special[e]={setup:function(){return Ae(this,e,Se),!1},trigger:function(){return Ae(this,e),!0},delegateType:t}}),S.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){S.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||S.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),S.fn.extend({on:function(e,t,n,r){return ke(this,e,t,n,r)},one:function(e,t,n,r){return ke(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,S(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Ee),this.each(function(){S.event.remove(this,e,n,t)})}});var Ne=/<script|<style|<link/i,De=/checked\s*(?:[^=]|=\s*.checked.)/i,je=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n<r;n++)S.event.add(t,i,s[i][n]);Q.hasData(e)&&(o=Q.access(e),a=S.extend({},o),Q.set(t,a))}}function Pe(n,r,i,o){r=g(r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&De.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Pe(t,r,i,o)});if(f&&(t=(e=xe(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=S.map(ve(e,"script"),Le)).length;c<f;c++)u=e,c!==p&&(u=S.clone(u,!0,!0),s&&S.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,S.map(a,He),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Y.access(u,"globalEval")&&S.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?S._evalUrl&&!u.noModule&&S._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")},l):b(u.textContent.replace(je,""),u,l))}return n}function Re(e,t,n){for(var r,i=t?S.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||S.cleanData(ve(r)),r.parentNode&&(n&&ie(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}S.extend({htmlPrefilter:function(e){return e},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=ie(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||S.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Oe(o[r],a[r]);else Oe(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=S.event.special,o=0;void 0!==(n=e[o]);o++)if(V(n)){if(t=n[Y.expando]){if(t.events)for(r in t.events)i[r]?S.event.remove(n,r):S.removeEvent(n,r,t.handle);n[Y.expando]=void 0}n[Q.expando]&&(n[Q.expando]=void 0)}}}),S.fn.extend({detach:function(e){return Re(this,e,!0)},remove:function(e){return Re(this,e)},text:function(e){return $(this,function(e){return void 0===e?S.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Pe(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||qe(this,e).appendChild(e)})},prepend:function(){return Pe(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=qe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Pe(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Pe(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(S.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return S.clone(this,e,t)})},html:function(e){return $(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ne.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=S.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(S.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Pe(this,arguments,function(e){var t=this.parentNode;S.inArray(this,n)<0&&(S.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),S.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){S.fn[e]=function(e){for(var t,n=[],r=S(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),S(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var Me=new RegExp("^("+ee+")(?!px)[a-z%]+$","i"),Ie=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},We=function(e,t,n){var r,i,o={};for(i in t)o[i]=e.style[i],e.style[i]=t[i];for(i in r=n.call(e),t)e.style[i]=o[i];return r},Fe=new RegExp(ne.join("|"),"i");function Be(e,t,n){var r,i,o,a,s=e.style;return(n=n||Ie(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||ie(e)||(a=S.style(e,t)),!y.pixelBoxStyles()&&Me.test(a)&&Fe.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function $e(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(l){u.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",l.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",re.appendChild(u).appendChild(l);var e=C.getComputedStyle(l);n="1%"!==e.top,s=12===t(e.marginLeft),l.style.right="60%",o=36===t(e.right),r=36===t(e.width),l.style.position="absolute",i=12===t(l.offsetWidth/3),re.removeChild(u),l=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s,u=E.createElement("div"),l=E.createElement("div");l.style&&(l.style.backgroundClip="content-box",l.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===l.style.backgroundClip,S.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),s},scrollboxSize:function(){return e(),i},reliableTrDimensions:function(){var e,t,n,r;return null==a&&(e=E.createElement("table"),t=E.createElement("tr"),n=E.createElement("div"),e.style.cssText="position:absolute;left:-11111px",t.style.height="1px",n.style.height="9px",re.appendChild(e).appendChild(t).appendChild(n),r=C.getComputedStyle(t),a=3<parseInt(r.height),re.removeChild(e)),a}}))}();var _e=["Webkit","Moz","ms"],ze=E.createElement("div").style,Ue={};function Xe(e){var t=S.cssProps[e]||Ue[e];return t||(e in ze?e:Ue[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=_e.length;while(n--)if((e=_e[n]+t)in ze)return e}(e)||e)}var Ve=/^(none|table(?!-c[ea]).+)/,Ge=/^--/,Ye={position:"absolute",visibility:"hidden",display:"block"},Qe={letterSpacing:"0",fontWeight:"400"};function Je(e,t,n){var r=te.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function Ke(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=S.css(e,n+ne[a],!0,i)),r?("content"===n&&(u-=S.css(e,"padding"+ne[a],!0,i)),"margin"!==n&&(u-=S.css(e,"border"+ne[a]+"Width",!0,i))):(u+=S.css(e,"padding"+ne[a],!0,i),"padding"!==n?u+=S.css(e,"border"+ne[a]+"Width",!0,i):s+=S.css(e,"border"+ne[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function Ze(e,t,n){var r=Ie(e),i=(!y.boxSizingReliable()||n)&&"border-box"===S.css(e,"boxSizing",!1,r),o=i,a=Be(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if(Me.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||!y.reliableTrDimensions()&&A(e,"tr")||"auto"===a||!parseFloat(a)&&"inline"===S.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===S.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+Ke(e,t,n||(i?"border":"content"),o,r,a)+"px"}function et(e,t,n,r,i){return new et.prototype.init(e,t,n,r,i)}S.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Be(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=X(t),u=Ge.test(t),l=e.style;if(u||(t=Xe(s)),a=S.cssHooks[t]||S.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=te.exec(n))&&i[1]&&(n=se(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(S.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=X(t);return Ge.test(t)||(t=Xe(s)),(a=S.cssHooks[t]||S.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Be(e,t,r)),"normal"===i&&t in Qe&&(i=Qe[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),S.each(["height","width"],function(e,u){S.cssHooks[u]={get:function(e,t,n){if(t)return!Ve.test(S.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?Ze(e,u,n):We(e,Ye,function(){return Ze(e,u,n)})},set:function(e,t,n){var r,i=Ie(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===S.css(e,"boxSizing",!1,i),s=n?Ke(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-Ke(e,u,"border",!1,i)-.5)),s&&(r=te.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=S.css(e,u)),Je(0,t,s)}}}),S.cssHooks.marginLeft=$e(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Be(e,"marginLeft"))||e.getBoundingClientRect().left-We(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),S.each({margin:"",padding:"",border:"Width"},function(i,o){S.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+ne[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(S.cssHooks[i+o].set=Je)}),S.fn.extend({css:function(e,t){return $(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Ie(e),i=t.length;a<i;a++)o[t[a]]=S.css(e,t[a],!1,r);return o}return void 0!==n?S.style(e,t,n):S.css(e,t)},e,t,1<arguments.length)}}),((S.Tween=et).prototype={constructor:et,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||S.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(S.cssNumber[n]?"":"px")},cur:function(){var e=et.propHooks[this.prop];return e&&e.get?e.get(this):et.propHooks._default.get(this)},run:function(e){var t,n=et.propHooks[this.prop];return this.options.duration?this.pos=t=S.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):et.propHooks._default.set(this),this}}).init.prototype=et.prototype,(et.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=S.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){S.fx.step[e.prop]?S.fx.step[e.prop](e):1!==e.elem.nodeType||!S.cssHooks[e.prop]&&null==e.elem.style[Xe(e.prop)]?e.elem[e.prop]=e.now:S.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=et.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},S.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},S.fx=et.prototype.init,S.fx.step={};var tt,nt,rt,it,ot=/^(?:toggle|show|hide)$/,at=/queueHooks$/;function st(){nt&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(st):C.setTimeout(st,S.fx.interval),S.fx.tick())}function ut(){return C.setTimeout(function(){tt=void 0}),tt=Date.now()}function lt(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=ne[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function ct(e,t,n){for(var r,i=(ft.tweeners[t]||[]).concat(ft.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function ft(o,e,t){var n,a,r=0,i=ft.prefilters.length,s=S.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=tt||ut(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:S.extend({},e),opts:S.extend(!0,{specialEasing:{},easing:S.easing._default},t),originalProperties:e,originalOptions:t,startTime:tt||ut(),duration:t.duration,tweens:[],createTween:function(e,t){var n=S.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=X(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=S.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=ft.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(S._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return S.map(c,ct,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),S.fx.timer(S.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}S.Animation=S.extend(ft,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return se(n.elem,e,te.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(P);for(var n,r=0,i=e.length;r<i;r++)n=e[r],ft.tweeners[n]=ft.tweeners[n]||[],ft.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&ae(e),v=Y.get(e,"fxshow");for(r in n.queue||(null==(a=S._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,S.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],ot.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||S.style(e,r)}if((u=!S.isEmptyObject(t))||!S.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Y.get(e,"display")),"none"===(c=S.css(e,"display"))&&(l?c=l:(le([e],!0),l=e.style.display||l,c=S.css(e,"display"),le([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===S.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Y.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&le([e],!0),p.done(function(){for(r in g||le([e]),Y.remove(e,"fxshow"),d)S.style(e,r,d[r])})),u=ct(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?ft.prefilters.unshift(e):ft.prefilters.push(e)}}),S.speed=function(e,t,n){var r=e&&"object"==typeof e?S.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return S.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in S.fx.speeds?r.duration=S.fx.speeds[r.duration]:r.duration=S.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&S.dequeue(this,r.queue)},r},S.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=S.isEmptyObject(t),o=S.speed(e,n,r),a=function(){var e=ft(this,S.extend({},t),o);(i||Y.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=S.timers,r=Y.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&at.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||S.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Y.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=S.timers,o=n?n.length:0;for(t.finish=!0,S.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),S.each(["toggle","show","hide"],function(e,r){var i=S.fn[r];S.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(lt(r,!0),e,t,n)}}),S.each({slideDown:lt("show"),slideUp:lt("hide"),slideToggle:lt("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){S.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),S.timers=[],S.fx.tick=function(){var e,t=0,n=S.timers;for(tt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||S.fx.stop(),tt=void 0},S.fx.timer=function(e){S.timers.push(e),S.fx.start()},S.fx.interval=13,S.fx.start=function(){nt||(nt=!0,st())},S.fx.stop=function(){nt=null},S.fx.speeds={slow:600,fast:200,_default:400},S.fn.delay=function(r,e){return r=S.fx&&S.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},rt=E.createElement("input"),it=E.createElement("select").appendChild(E.createElement("option")),rt.type="checkbox",y.checkOn=""!==rt.value,y.optSelected=it.selected,(rt=E.createElement("input")).value="t",rt.type="radio",y.radioValue="t"===rt.value;var pt,dt=S.expr.attrHandle;S.fn.extend({attr:function(e,t){return $(this,S.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){S.removeAttr(this,e)})}}),S.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?S.prop(e,t,n):(1===o&&S.isXMLDoc(e)||(i=S.attrHooks[t.toLowerCase()]||(S.expr.match.bool.test(t)?pt:void 0)),void 0!==n?null===n?void S.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=S.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(P);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),pt={set:function(e,t,n){return!1===t?S.removeAttr(e,n):e.setAttribute(n,n),n}},S.each(S.expr.match.bool.source.match(/\w+/g),function(e,t){var a=dt[t]||S.find.attr;dt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=dt[o],dt[o]=r,r=null!=a(e,t,n)?o:null,dt[o]=i),r}});var ht=/^(?:input|select|textarea|button)$/i,gt=/^(?:a|area)$/i;function vt(e){return(e.match(P)||[]).join(" ")}function yt(e){return e.getAttribute&&e.getAttribute("class")||""}function mt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(P)||[]}S.fn.extend({prop:function(e,t){return $(this,S.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[S.propFix[e]||e]})}}),S.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&S.isXMLDoc(e)||(t=S.propFix[t]||t,i=S.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=S.find.attr(e,"tabindex");return t?parseInt(t,10):ht.test(e.nodeName)||gt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(S.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),S.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){S.propFix[this.toLowerCase()]=this}),S.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){S(this).addClass(t.call(this,e,yt(this)))});if((e=mt(t)).length)while(n=this[u++])if(i=yt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){S(this).removeClass(t.call(this,e,yt(this)))});if(!arguments.length)return this.attr("class","");if((e=mt(t)).length)while(n=this[u++])if(i=yt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){S(this).toggleClass(i.call(this,e,yt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=S(this),r=mt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=yt(this))&&Y.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Y.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+vt(yt(n))+" ").indexOf(t))return!0;return!1}});var xt=/\r/g;S.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,S(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=S.map(t,function(e){return null==e?"":e+""})),(r=S.valHooks[this.type]||S.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=S.valHooks[t.type]||S.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(xt,""):null==e?"":e:void 0}}),S.extend({valHooks:{option:{get:function(e){var t=S.find.attr(e,"value");return null!=t?t:vt(S.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=S(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=S.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<S.inArray(S.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),S.each(["radio","checkbox"],function(){S.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<S.inArray(S(e).val(),t)}},y.checkOn||(S.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var bt=/^(?:focusinfocus|focusoutblur)$/,wt=function(e){e.stopPropagation()};S.extend(S.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!bt.test(d+S.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[S.expando]?e:new S.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:S.makeArray(t,[e]),c=S.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,bt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Y.get(o,"events")||Object.create(null))[e.type]&&Y.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&V(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!V(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),S.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,wt),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,wt),S.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=S.extend(new S.Event,n,{type:e,isSimulated:!0});S.event.trigger(r,null,t)}}),S.fn.extend({trigger:function(e,t){return this.each(function(){S.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return S.event.trigger(e,t,n,!0)}}),y.focusin||S.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){S.event.simulate(r,e.target,S.event.fix(e))};S.event.special[r]={setup:function(){var e=this.ownerDocument||this.document||this,t=Y.access(e,r);t||e.addEventListener(n,i,!0),Y.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this.document||this,t=Y.access(e,r)-1;t?Y.access(e,r,t):(e.removeEventListener(n,i,!0),Y.remove(e,r))}}});var Tt=C.location,Ct={guid:Date.now()},Et=/\?/;S.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||S.error("Invalid XML: "+e),t};var St=/\[\]$/,kt=/\r?\n/g,At=/^(?:submit|button|image|reset|file)$/i,Nt=/^(?:input|select|textarea|keygen)/i;function Dt(n,e,r,i){var t;if(Array.isArray(e))S.each(e,function(e,t){r||St.test(n)?i(n,t):Dt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)Dt(n+"["+t+"]",e[t],r,i)}S.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!S.isPlainObject(e))S.each(e,function(){i(this.name,this.value)});else for(n in e)Dt(n,e[n],t,i);return r.join("&")},S.fn.extend({serialize:function(){return S.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=S.prop(this,"elements");return e?S.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!S(this).is(":disabled")&&Nt.test(this.nodeName)&&!At.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=S(this).val();return null==n?null:Array.isArray(n)?S.map(n,function(e){return{name:t.name,value:e.replace(kt,"\r\n")}}):{name:t.name,value:n.replace(kt,"\r\n")}}).get()}});var jt=/%20/g,qt=/#.*$/,Lt=/([?&])_=[^&]*/,Ht=/^(.*?):[ \t]*([^\r\n]*)$/gm,Ot=/^(?:GET|HEAD)$/,Pt=/^\/\//,Rt={},Mt={},It="*/".concat("*"),Wt=E.createElement("a");function Ft(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(P)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function Bt(t,i,o,a){var s={},u=t===Mt;function l(e){var r;return s[e]=!0,S.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function $t(e,t){var n,r,i=S.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&S.extend(!0,e,r),e}Wt.href=Tt.href,S.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Tt.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Tt.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":It,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":S.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?$t($t(e,S.ajaxSettings),t):$t(S.ajaxSettings,e)},ajaxPrefilter:Ft(Rt),ajaxTransport:Ft(Mt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=S.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?S(y):S.event,x=S.Deferred(),b=S.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Ht.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Tt.href)+"").replace(Pt,Tt.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(P)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Wt.protocol+"//"+Wt.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=S.param(v.data,v.traditional)),Bt(Rt,v,t,T),h)return T;for(i in(g=S.event&&v.global)&&0==S.active++&&S.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Ot.test(v.type),f=v.url.replace(qt,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(jt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(Et.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Lt,"$1"),o=(Et.test(f)?"&":"?")+"_="+Ct.guid+++o),v.url=f+o),v.ifModified&&(S.lastModified[f]&&T.setRequestHeader("If-Modified-Since",S.lastModified[f]),S.etag[f]&&T.setRequestHeader("If-None-Match",S.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+It+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=Bt(Mt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),!i&&-1<S.inArray("script",v.dataTypes)&&(v.converters["text script"]=function(){}),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(S.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(S.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--S.active||S.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return S.get(e,t,n,"json")},getScript:function(e,t){return S.get(e,void 0,t,"script")}}),S.each(["get","post"],function(e,i){S[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),S.ajax(S.extend({url:e,type:i,dataType:r,data:t,success:n},S.isPlainObject(e)&&e))}}),S.ajaxPrefilter(function(e){var t;for(t in e.headers)"content-type"===t.toLowerCase()&&(e.contentType=e.headers[t]||"")}),S._evalUrl=function(e,t,n){return S.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){S.globalEval(e,t,n)}})},S.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=S(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){S(this).wrapInner(n.call(this,e))}):this.each(function(){var e=S(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){S(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){S(this).replaceWith(this.childNodes)}),this}}),S.expr.pseudos.hidden=function(e){return!S.expr.pseudos.visible(e)},S.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},S.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var _t={0:200,1223:204},zt=S.ajaxSettings.xhr();y.cors=!!zt&&"withCredentials"in zt,y.ajax=zt=!!zt,S.ajaxTransport(function(i){var o,a;if(y.cors||zt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(_t[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),S.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),S.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return S.globalEval(e),e}}}),S.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),S.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=S("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=vt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&S.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?S("<div>").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}});var Gt=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;S.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||S.guid++,i},S.holdReady=function(e){e?S.readyWait++:S.ready(!0)},S.isArray=Array.isArray,S.parseJSON=JSON.parse,S.nodeName=A,S.isFunction=m,S.isWindow=x,S.camelCase=X,S.type=w,S.now=Date.now,S.isNumeric=function(e){var t=S.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},S.trim=function(e){return null==e?"":(e+"").replace(Gt,"")},"function"==typeof define&&define.amd&&define("jquery",[],function(){return S});var Yt=C.jQuery,Qt=C.$;return S.noConflict=function(e){return C.$===S&&(C.$=Qt),e&&C.jQuery===S&&(C.jQuery=Yt),S},"undefined"==typeof e&&(C.jQuery=C.$=S),S});
diff --git a/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.js b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.js
new file mode 100644
index 0000000000..4b34fd68eb
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.js
@@ -0,0 +1,3061 @@
+/* Javascript plotting library for jQuery, version 0.8.1.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+*/
+
+// first an inline dependency, jquery.colorhelpers.js, we inline it here
+// for convenience
+
+/* Plugin for jQuery for working with colors.
+ *
+ * Version 1.1.
+ *
+ * Inspiration from jQuery color animation plugin by John Resig.
+ *
+ * Released under the MIT license by Ole Laursen, October 2009.
+ *
+ * Examples:
+ *
+ * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
+ * var c = $.color.extract($("#mydiv"), 'background-color');
+ * console.log(c.r, c.g, c.b, c.a);
+ * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
+ *
+ * Note that .scale() and .add() return the same modified object
+ * instead of making a new one.
+ *
+ * V. 1.1: Fix error handling so e.g. parsing an empty string does
+ * produce a color rather than just crashing.
+ */
+(function(B){B.color={};B.color.make=function(F,E,C,D){var G={};G.r=F||0;G.g=E||0;G.b=C||0;G.a=D!=null?D:1;G.add=function(J,I){for(var H=0;H<J.length;++H){G[J.charAt(H)]+=I}return G.normalize()};G.scale=function(J,I){for(var H=0;H<J.length;++H){G[J.charAt(H)]*=I}return G.normalize()};G.toString=function(){if(G.a>=1){return"rgb("+[G.r,G.g,G.b].join(",")+")"}else{return"rgba("+[G.r,G.g,G.b,G.a].join(",")+")"}};G.normalize=function(){function H(J,K,I){return K<J?J:(K>I?I:K)}G.r=H(0,parseInt(G.r),255);G.g=H(0,parseInt(G.g),255);G.b=H(0,parseInt(G.b),255);G.a=H(0,G.a,1);return G};G.clone=function(){return B.color.make(G.r,G.b,G.g,G.a)};return G.normalize()};B.color.extract=function(D,C){var E;do{E=D.css(C).toLowerCase();if(E!=""&&E!="transparent"){break}D=D.parent()}while(!B.nodeName(D.get(0),"body"));if(E=="rgba(0, 0, 0, 0)"){E="transparent"}return B.color.parse(E)};B.color.parse=function(F){var E,C=B.color.make;if(E=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(F)){return C(parseInt(E[1],10),parseInt(E[2],10),parseInt(E[3],10))}if(E=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(F)){return C(parseInt(E[1],10),parseInt(E[2],10),parseInt(E[3],10),parseFloat(E[4]))}if(E=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(F)){return C(parseFloat(E[1])*2.55,parseFloat(E[2])*2.55,parseFloat(E[3])*2.55)}if(E=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(F)){return C(parseFloat(E[1])*2.55,parseFloat(E[2])*2.55,parseFloat(E[3])*2.55,parseFloat(E[4]))}if(E=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(F)){return C(parseInt(E[1],16),parseInt(E[2],16),parseInt(E[3],16))}if(E=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(F)){return C(parseInt(E[1]+E[1],16),parseInt(E[2]+E[2],16),parseInt(E[3]+E[3],16))}var D=B.trim(F).toLowerCase();if(D=="transparent"){return C(255,255,255,0)}else{E=A[D]||[0,0,0];return C(E[0],E[1],E[2])}};var A={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery);
+
+// the actual Flot code
+(function($) {
+
+ // Cache the prototype hasOwnProperty for faster access
+
+ var hasOwnProperty = Object.prototype.hasOwnProperty;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The Canvas object is a wrapper around an HTML5 <canvas> tag.
+ //
+ // @constructor
+ // @param {string} cls List of classes to apply to the canvas.
+ // @param {element} container Element onto which to append the canvas.
+ //
+ // Requiring a container is a little iffy, but unfortunately canvas
+ // operations don't work unless the canvas is attached to the DOM.
+
+ function Canvas(cls, container) {
+
+ var element = container.children("." + cls)[0];
+
+ if (element == null) {
+
+ element = document.createElement("canvas");
+ element.className = cls;
+
+ $(element).css({ direction: "ltr", position: "absolute", left: 0, top: 0 })
+ .appendTo(container);
+
+ // If HTML5 Canvas isn't available, fall back to [Ex|Flash]canvas
+
+ if (!element.getContext) {
+ if (window.G_vmlCanvasManager) {
+ element = window.G_vmlCanvasManager.initElement(element);
+ } else {
+ throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");
+ }
+ }
+ }
+
+ this.element = element;
+
+ var context = this.context = element.getContext("2d");
+
+ // Determine the screen's ratio of physical to device-independent
+ // pixels. This is the ratio between the canvas width that the browser
+ // advertises and the number of pixels actually present in that space.
+
+ // The iPhone 4, for example, has a device-independent width of 320px,
+ // but its screen is actually 640px wide. It therefore has a pixel
+ // ratio of 2, while most normal devices have a ratio of 1.
+
+ var devicePixelRatio = window.devicePixelRatio || 1,
+ backingStoreRatio =
+ context.webkitBackingStorePixelRatio ||
+ context.mozBackingStorePixelRatio ||
+ context.msBackingStorePixelRatio ||
+ context.oBackingStorePixelRatio ||
+ context.backingStorePixelRatio || 1;
+
+ this.pixelRatio = devicePixelRatio / backingStoreRatio;
+
+ // Size the canvas to match the internal dimensions of its container
+
+ this.resize(container.width(), container.height());
+
+ // Collection of HTML div layers for text overlaid onto the canvas
+
+ this.textContainer = null;
+ this.text = {};
+
+ // Cache of text fragments and metrics, so we can avoid expensively
+ // re-calculating them when the plot is re-rendered in a loop.
+
+ this._textCache = {};
+ }
+
+ // Resizes the canvas to the given dimensions.
+ //
+ // @param {number} width New width of the canvas, in pixels.
+ // @param {number} width New height of the canvas, in pixels.
+
+ Canvas.prototype.resize = function(width, height) {
+
+ if (width <= 0 || height <= 0) {
+ throw new Error("Invalid dimensions for plot, width = " + width + ", height = " + height);
+ }
+
+ var element = this.element,
+ context = this.context,
+ pixelRatio = this.pixelRatio;
+
+ // Resize the canvas, increasing its density based on the display's
+ // pixel ratio; basically giving it more pixels without increasing the
+ // size of its element, to take advantage of the fact that retina
+ // displays have that many more pixels in the same advertised space.
+
+ // Resizing should reset the state (excanvas seems to be buggy though)
+
+ if (this.width != width) {
+ element.width = width * pixelRatio;
+ element.style.width = width + "px";
+ this.width = width;
+ }
+
+ if (this.height != height) {
+ element.height = height * pixelRatio;
+ element.style.height = height + "px";
+ this.height = height;
+ }
+
+ // Save the context, so we can reset in case we get replotted. The
+ // restore ensure that we're really back at the initial state, and
+ // should be safe even if we haven't saved the initial state yet.
+
+ context.restore();
+ context.save();
+
+ // Scale the coordinate space to match the display density; so even though we
+ // may have twice as many pixels, we still want lines and other drawing to
+ // appear at the same size; the extra pixels will just make them crisper.
+
+ context.scale(pixelRatio, pixelRatio);
+ };
+
+ // Clears the entire canvas area, not including any overlaid HTML text
+
+ Canvas.prototype.clear = function() {
+ this.context.clearRect(0, 0, this.width, this.height);
+ };
+
+ // Finishes rendering the canvas, including managing the text overlay.
+
+ Canvas.prototype.render = function() {
+
+ var cache = this._textCache;
+
+ // For each text layer, add elements marked as active that haven't
+ // already been rendered, and remove those that are no longer active.
+
+ for (var layerKey in cache) {
+ if (hasOwnProperty.call(cache, layerKey)) {
+
+ var layer = this.getTextLayer(layerKey),
+ layerCache = cache[layerKey];
+
+ layer.hide();
+
+ for (var styleKey in layerCache) {
+ if (hasOwnProperty.call(layerCache, styleKey)) {
+ var styleCache = layerCache[styleKey];
+ for (var key in styleCache) {
+ if (hasOwnProperty.call(styleCache, key)) {
+
+ var positions = styleCache[key].positions;
+
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.active) {
+ if (!position.rendered) {
+ layer.append(position.element);
+ position.rendered = true;
+ }
+ } else {
+ positions.splice(i--, 1);
+ if (position.rendered) {
+ position.element.detach();
+ }
+ }
+ }
+
+ if (positions.length == 0) {
+ delete styleCache[key];
+ }
+ }
+ }
+ }
+ }
+
+ layer.show();
+ }
+ }
+ };
+
+ // Creates (if necessary) and returns the text overlay container.
+ //
+ // @param {string} classes String of space-separated CSS classes used to
+ // uniquely identify the text layer.
+ // @return {object} The jQuery-wrapped text-layer div.
+
+ Canvas.prototype.getTextLayer = function(classes) {
+
+ var layer = this.text[classes];
+
+ // Create the text layer if it doesn't exist
+
+ if (layer == null) {
+
+ // Create the text layer container, if it doesn't exist
+
+ if (this.textContainer == null) {
+ this.textContainer = $("<div class='flot-text'></div>")
+ .css({
+ position: "absolute",
+ top: 0,
+ left: 0,
+ bottom: 0,
+ right: 0,
+ 'font-size': "smaller",
+ color: "#545454"
+ })
+ .insertAfter(this.element);
+ }
+
+ layer = this.text[classes] = $("<div></div>")
+ .addClass(classes)
+ .css({
+ position: "absolute",
+ top: 0,
+ left: 0,
+ bottom: 0,
+ right: 0
+ })
+ .appendTo(this.textContainer);
+ }
+
+ return layer;
+ };
+
+ // Creates (if necessary) and returns a text info object.
+ //
+ // The object looks like this:
+ //
+ // {
+ // width: Width of the text's wrapper div.
+ // height: Height of the text's wrapper div.
+ // element: The jQuery-wrapped HTML div containing the text.
+ // positions: Array of positions at which this text is drawn.
+ // }
+ //
+ // The positions array contains objects that look like this:
+ //
+ // {
+ // active: Flag indicating whether the text should be visible.
+ // rendered: Flag indicating whether the text is currently visible.
+ // element: The jQuery-wrapped HTML div containing the text.
+ // x: X coordinate at which to draw the text.
+ // y: Y coordinate at which to draw the text.
+ // }
+ //
+ // Each position after the first receives a clone of the original element.
+ //
+ // The idea is that that the width, height, and general 'identity' of the
+ // text is constant no matter where it is placed; the placements are a
+ // secondary property.
+ //
+ // Canvas maintains a cache of recently-used text info objects; getTextInfo
+ // either returns the cached element or creates a new entry.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {string} text Text string to retrieve info for.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which to rotate the text, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+ // @param {number=} width Maximum width of the text before it wraps.
+ // @return {object} a text info object.
+
+ Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) {
+
+ var textStyle, layerCache, styleCache, info;
+
+ // Cast the value to a string, in case we were given a number or such
+
+ text = "" + text;
+
+ // If the font is a font-spec object, generate a CSS font definition
+
+ if (typeof font === "object") {
+ textStyle = font.style + " " + font.variant + " " + font.weight + " " + font.size + "px/" + font.lineHeight + "px " + font.family;
+ } else {
+ textStyle = font;
+ }
+
+ // Retrieve (or create) the cache for the text's layer and styles
+
+ layerCache = this._textCache[layer];
+
+ if (layerCache == null) {
+ layerCache = this._textCache[layer] = {};
+ }
+
+ styleCache = layerCache[textStyle];
+
+ if (styleCache == null) {
+ styleCache = layerCache[textStyle] = {};
+ }
+
+ info = styleCache[text];
+
+ // If we can't find a matching element in our cache, create a new one
+
+ if (info == null) {
+
+ var element = $("<div></div>").html(text)
+ .css({
+ position: "absolute",
+ 'max-width': width,
+ top: -9999
+ })
+ .appendTo(this.getTextLayer(layer));
+
+ if (typeof font === "object") {
+ element.css({
+ font: textStyle,
+ color: font.color
+ });
+ } else if (typeof font === "string") {
+ element.addClass(font);
+ }
+
+ info = styleCache[text] = {
+ width: element.outerWidth(true),
+ height: element.outerHeight(true),
+ element: element,
+ positions: []
+ };
+
+ element.detach();
+ }
+
+ return info;
+ };
+
+ // Adds a text string to the canvas text overlay.
+ //
+ // The text isn't drawn immediately; it is marked as rendering, which will
+ // result in its addition to the canvas on the next render pass.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {number} x X coordinate at which to draw the text.
+ // @param {number} y Y coordinate at which to draw the text.
+ // @param {string} text Text string to draw.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which to rotate the text, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+ // @param {number=} width Maximum width of the text before it wraps.
+ // @param {string=} halign Horizontal alignment of the text; either "left",
+ // "center" or "right".
+ // @param {string=} valign Vertical alignment of the text; either "top",
+ // "middle" or "bottom".
+
+ Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign) {
+
+ var info = this.getTextInfo(layer, text, font, angle, width),
+ positions = info.positions;
+
+ // Tweak the div's position to match the text's alignment
+
+ if (halign == "center") {
+ x -= info.width / 2;
+ } else if (halign == "right") {
+ x -= info.width;
+ }
+
+ if (valign == "middle") {
+ y -= info.height / 2;
+ } else if (valign == "bottom") {
+ y -= info.height;
+ }
+
+ // Determine whether this text already exists at this position.
+ // If so, mark it for inclusion in the next render pass.
+
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.x == x && position.y == y) {
+ position.active = true;
+ return;
+ }
+ }
+
+ // If the text doesn't exist at this position, create a new entry
+
+ // For the very first position we'll re-use the original element,
+ // while for subsequent ones we'll clone it.
+
+ position = {
+ active: true,
+ rendered: false,
+ element: positions.length ? info.element.clone() : info.element,
+ x: x,
+ y: y
+ }
+
+ positions.push(position);
+
+ // Move the element to its final position within the container
+
+ position.element.css({
+ top: Math.round(y),
+ left: Math.round(x),
+ 'text-align': halign // In case the text wraps
+ });
+ };
+
+ // Removes one or more text strings from the canvas text overlay.
+ //
+ // If no parameters are given, all text within the layer is removed.
+ //
+ // Note that the text is not immediately removed; it is simply marked as
+ // inactive, which will result in its removal on the next render pass.
+ // This avoids the performance penalty for 'clear and redraw' behavior,
+ // where we potentially get rid of all text on a layer, but will likely
+ // add back most or all of it later, as when redrawing axes, for example.
+ //
+ // @param {string} layer A string of space-separated CSS classes uniquely
+ // identifying the layer containing this text.
+ // @param {number=} x X coordinate of the text.
+ // @param {number=} y Y coordinate of the text.
+ // @param {string=} text Text string to remove.
+ // @param {(string|object)=} font Either a string of space-separated CSS
+ // classes or a font-spec object, defining the text's font and style.
+ // @param {number=} angle Angle at which the text is rotated, in degrees.
+ // Angle is currently unused, it will be implemented in the future.
+
+ Canvas.prototype.removeText = function(layer, x, y, text, font, angle) {
+ if (text == null) {
+ var layerCache = this._textCache[layer];
+ if (layerCache != null) {
+ for (var styleKey in layerCache) {
+ if (hasOwnProperty.call(layerCache, styleKey)) {
+ var styleCache = layerCache[styleKey];
+ for (var key in styleCache) {
+ if (hasOwnProperty.call(styleCache, key)) {
+ var positions = styleCache[key].positions;
+ for (var i = 0, position; position = positions[i]; i++) {
+ position.active = false;
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ var positions = this.getTextInfo(layer, text, font, angle).positions;
+ for (var i = 0, position; position = positions[i]; i++) {
+ if (position.x == x && position.y == y) {
+ position.active = false;
+ }
+ }
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The top-level container for the entire plot.
+
+ function Plot(placeholder, data_, options_, plugins) {
+ // data is on the form:
+ // [ series1, series2 ... ]
+ // where series is either just the data as [ [x1, y1], [x2, y2], ... ]
+ // or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... }
+
+ var series = [],
+ options = {
+ // the color theme used for graphs
+ colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"],
+ legend: {
+ show: true,
+ noColumns: 1, // number of colums in legend table
+ labelFormatter: null, // fn: string -> string
+ labelBoxBorderColor: "#ccc", // border color for the little label boxes
+ container: null, // container (as jQuery object) to put legend in, null means default on top of graph
+ position: "ne", // position of default legend container within plot
+ margin: 5, // distance from grid edge to default legend container within plot
+ backgroundColor: null, // null means auto-detect
+ backgroundOpacity: 0.85, // set to 0 to avoid background
+ sorted: null // default to no legend sorting
+ },
+ xaxis: {
+ show: null, // null = auto-detect, true = always, false = never
+ position: "bottom", // or "top"
+ mode: null, // null or "time"
+ font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" }
+ color: null, // base color, labels, ticks
+ tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)"
+ transform: null, // null or f: number -> number to transform axis
+ inverseTransform: null, // if transform is set, this should be the inverse function
+ min: null, // min. value to show, null means set automatically
+ max: null, // max. value to show, null means set automatically
+ autoscaleMargin: null, // margin in % to add if auto-setting min/max
+ ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks
+ tickFormatter: null, // fn: number -> string
+ labelWidth: null, // size of tick labels in pixels
+ labelHeight: null,
+ reserveSpace: null, // whether to reserve space even if axis isn't shown
+ tickLength: null, // size in pixels of ticks, or "full" for whole line
+ alignTicksWithAxis: null, // axis number or null for no sync
+ tickDecimals: null, // no. of decimals, null means auto
+ tickSize: null, // number or [number, "unit"]
+ minTickSize: null // number or [number, "unit"]
+ },
+ yaxis: {
+ autoscaleMargin: 0.02,
+ position: "left" // or "right"
+ },
+ xaxes: [],
+ yaxes: [],
+ series: {
+ points: {
+ show: false,
+ radius: 3,
+ lineWidth: 2, // in pixels
+ fill: true,
+ fillColor: "#ffffff",
+ symbol: "circle" // or callback
+ },
+ lines: {
+ // we don't put in show: false so we can see
+ // whether lines were actively disabled
+ lineWidth: 2, // in pixels
+ fill: false,
+ fillColor: null,
+ steps: false
+ // Omit 'zero', so we can later default its value to
+ // match that of the 'fill' option.
+ },
+ bars: {
+ show: false,
+ lineWidth: 2, // in pixels
+ barWidth: 1, // in units of the x axis
+ fill: true,
+ fillColor: null,
+ align: "left", // "left", "right", or "center"
+ horizontal: false,
+ zero: true
+ },
+ shadowSize: 3,
+ highlightColor: null
+ },
+ grid: {
+ show: true,
+ aboveData: false,
+ color: "#545454", // primary color used for outline and labels
+ backgroundColor: null, // null for transparent, else color
+ borderColor: null, // set if different from the grid color
+ tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)"
+ margin: 0, // distance from the canvas edge to the grid
+ labelMargin: 5, // in pixels
+ axisMargin: 8, // in pixels
+ borderWidth: 2, // in pixels
+ minBorderMargin: null, // in pixels, null means taken from points radius
+ markings: null, // array of ranges or fn: axes -> array of ranges
+ markingsColor: "#f4f4f4",
+ markingsLineWidth: 2,
+ // interactive stuff
+ clickable: false,
+ hoverable: false,
+ autoHighlight: true, // highlight in case mouse is near
+ mouseActiveRadius: 10 // how far the mouse can be away to activate an item
+ },
+ interaction: {
+ redrawOverlayInterval: 1000/60 // time between updates, -1 means in same flow
+ },
+ hooks: {}
+ },
+ surface = null, // the canvas for the plot itself
+ overlay = null, // canvas for interactive stuff on top of plot
+ eventHolder = null, // jQuery object that events should be bound to
+ ctx = null, octx = null,
+ xaxes = [], yaxes = [],
+ plotOffset = { left: 0, right: 0, top: 0, bottom: 0},
+ plotWidth = 0, plotHeight = 0,
+ hooks = {
+ processOptions: [],
+ processRawData: [],
+ processDatapoints: [],
+ processOffset: [],
+ drawBackground: [],
+ drawSeries: [],
+ draw: [],
+ bindEvents: [],
+ drawOverlay: [],
+ shutdown: []
+ },
+ plot = this;
+
+ // public functions
+ plot.setData = setData;
+ plot.setupGrid = setupGrid;
+ plot.draw = draw;
+ plot.getPlaceholder = function() { return placeholder; };
+ plot.getCanvas = function() { return surface.element; };
+ plot.getPlotOffset = function() { return plotOffset; };
+ plot.width = function () { return plotWidth; };
+ plot.height = function () { return plotHeight; };
+ plot.offset = function () {
+ var o = eventHolder.offset();
+ o.left += plotOffset.left;
+ o.top += plotOffset.top;
+ return o;
+ };
+ plot.getData = function () { return series; };
+ plot.getAxes = function () {
+ var res = {}, i;
+ $.each(xaxes.concat(yaxes), function (_, axis) {
+ if (axis)
+ res[axis.direction + (axis.n != 1 ? axis.n : "") + "axis"] = axis;
+ });
+ return res;
+ };
+ plot.getXAxes = function () { return xaxes; };
+ plot.getYAxes = function () { return yaxes; };
+ plot.c2p = canvasToAxisCoords;
+ plot.p2c = axisToCanvasCoords;
+ plot.getOptions = function () { return options; };
+ plot.highlight = highlight;
+ plot.unhighlight = unhighlight;
+ plot.triggerRedrawOverlay = triggerRedrawOverlay;
+ plot.pointOffset = function(point) {
+ return {
+ left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10),
+ top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10)
+ };
+ };
+ plot.shutdown = shutdown;
+ plot.resize = function () {
+ var width = placeholder.width(),
+ height = placeholder.height();
+ surface.resize(width, height);
+ overlay.resize(width, height);
+ };
+
+ // public attributes
+ plot.hooks = hooks;
+
+ // initialize
+ initPlugins(plot);
+ parseOptions(options_);
+ setupCanvases();
+ setData(data_);
+ setupGrid();
+ draw();
+ bindEvents();
+
+
+ function executeHooks(hook, args) {
+ args = [plot].concat(args);
+ for (var i = 0; i < hook.length; ++i)
+ hook[i].apply(this, args);
+ }
+
+ function initPlugins() {
+
+ // References to key classes, allowing plugins to modify them
+
+ var classes = {
+ Canvas: Canvas
+ };
+
+ for (var i = 0; i < plugins.length; ++i) {
+ var p = plugins[i];
+ p.init(plot, classes);
+ if (p.options)
+ $.extend(true, options, p.options);
+ }
+ }
+
+ function parseOptions(opts) {
+
+ $.extend(true, options, opts);
+
+ // $.extend merges arrays, rather than replacing them. When less
+ // colors are provided than the size of the default palette, we
+ // end up with those colors plus the remaining defaults, which is
+ // not expected behavior; avoid it by replacing them here.
+
+ if (opts && opts.colors) {
+ options.colors = opts.colors;
+ }
+
+ if (options.xaxis.color == null)
+ options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+ if (options.yaxis.color == null)
+ options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+
+ if (options.xaxis.tickColor == null) // grid.tickColor for back-compatibility
+ options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color;
+ if (options.yaxis.tickColor == null) // grid.tickColor for back-compatibility
+ options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color;
+
+ if (options.grid.borderColor == null)
+ options.grid.borderColor = options.grid.color;
+ if (options.grid.tickColor == null)
+ options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString();
+
+ // Fill in defaults for axis options, including any unspecified
+ // font-spec fields, if a font-spec was provided.
+
+ // If no x/y axis options were provided, create one of each anyway,
+ // since the rest of the code assumes that they exist.
+
+ var i, axisOptions, axisCount,
+ fontDefaults = {
+ style: placeholder.css("font-style"),
+ size: Math.round(0.8 * (+placeholder.css("font-size").replace("px", "") || 13)),
+ variant: placeholder.css("font-variant"),
+ weight: placeholder.css("font-weight"),
+ family: placeholder.css("font-family")
+ };
+
+ fontDefaults.lineHeight = fontDefaults.size * 1.15;
+
+ axisCount = options.xaxes.length || 1;
+ for (i = 0; i < axisCount; ++i) {
+
+ axisOptions = options.xaxes[i];
+ if (axisOptions && !axisOptions.tickColor) {
+ axisOptions.tickColor = axisOptions.color;
+ }
+
+ axisOptions = $.extend(true, {}, options.xaxis, axisOptions);
+ options.xaxes[i] = axisOptions;
+
+ if (axisOptions.font) {
+ axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
+ if (!axisOptions.font.color) {
+ axisOptions.font.color = axisOptions.color;
+ }
+ }
+ }
+
+ axisCount = options.yaxes.length || 1;
+ for (i = 0; i < axisCount; ++i) {
+
+ axisOptions = options.yaxes[i];
+ if (axisOptions && !axisOptions.tickColor) {
+ axisOptions.tickColor = axisOptions.color;
+ }
+
+ axisOptions = $.extend(true, {}, options.yaxis, axisOptions);
+ options.yaxes[i] = axisOptions;
+
+ if (axisOptions.font) {
+ axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
+ if (!axisOptions.font.color) {
+ axisOptions.font.color = axisOptions.color;
+ }
+ }
+ }
+
+ // backwards compatibility, to be removed in future
+ if (options.xaxis.noTicks && options.xaxis.ticks == null)
+ options.xaxis.ticks = options.xaxis.noTicks;
+ if (options.yaxis.noTicks && options.yaxis.ticks == null)
+ options.yaxis.ticks = options.yaxis.noTicks;
+ if (options.x2axis) {
+ options.xaxes[1] = $.extend(true, {}, options.xaxis, options.x2axis);
+ options.xaxes[1].position = "top";
+ }
+ if (options.y2axis) {
+ options.yaxes[1] = $.extend(true, {}, options.yaxis, options.y2axis);
+ options.yaxes[1].position = "right";
+ }
+ if (options.grid.coloredAreas)
+ options.grid.markings = options.grid.coloredAreas;
+ if (options.grid.coloredAreasColor)
+ options.grid.markingsColor = options.grid.coloredAreasColor;
+ if (options.lines)
+ $.extend(true, options.series.lines, options.lines);
+ if (options.points)
+ $.extend(true, options.series.points, options.points);
+ if (options.bars)
+ $.extend(true, options.series.bars, options.bars);
+ if (options.shadowSize != null)
+ options.series.shadowSize = options.shadowSize;
+ if (options.highlightColor != null)
+ options.series.highlightColor = options.highlightColor;
+
+ // save options on axes for future reference
+ for (i = 0; i < options.xaxes.length; ++i)
+ getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i];
+ for (i = 0; i < options.yaxes.length; ++i)
+ getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i];
+
+ // add hooks from options
+ for (var n in hooks)
+ if (options.hooks[n] && options.hooks[n].length)
+ hooks[n] = hooks[n].concat(options.hooks[n]);
+
+ executeHooks(hooks.processOptions, [options]);
+ }
+
+ function setData(d) {
+ series = parseData(d);
+ fillInSeriesOptions();
+ processData();
+ }
+
+ function parseData(d) {
+ var res = [];
+ for (var i = 0; i < d.length; ++i) {
+ var s = $.extend(true, {}, options.series);
+
+ if (d[i].data != null) {
+ s.data = d[i].data; // move the data instead of deep-copy
+ delete d[i].data;
+
+ $.extend(true, s, d[i]);
+
+ d[i].data = s.data;
+ }
+ else
+ s.data = d[i];
+ res.push(s);
+ }
+
+ return res;
+ }
+
+ function axisNumber(obj, coord) {
+ var a = obj[coord + "axis"];
+ if (typeof a == "object") // if we got a real axis, extract number
+ a = a.n;
+ if (typeof a != "number")
+ a = 1; // default to first axis
+ return a;
+ }
+
+ function allAxes() {
+ // return flat array without annoying null entries
+ return $.grep(xaxes.concat(yaxes), function (a) { return a; });
+ }
+
+ function canvasToAxisCoords(pos) {
+ // return an object with x/y corresponding to all used axes
+ var res = {}, i, axis;
+ for (i = 0; i < xaxes.length; ++i) {
+ axis = xaxes[i];
+ if (axis && axis.used)
+ res["x" + axis.n] = axis.c2p(pos.left);
+ }
+
+ for (i = 0; i < yaxes.length; ++i) {
+ axis = yaxes[i];
+ if (axis && axis.used)
+ res["y" + axis.n] = axis.c2p(pos.top);
+ }
+
+ if (res.x1 !== undefined)
+ res.x = res.x1;
+ if (res.y1 !== undefined)
+ res.y = res.y1;
+
+ return res;
+ }
+
+ function axisToCanvasCoords(pos) {
+ // get canvas coords from the first pair of x/y found in pos
+ var res = {}, i, axis, key;
+
+ for (i = 0; i < xaxes.length; ++i) {
+ axis = xaxes[i];
+ if (axis && axis.used) {
+ key = "x" + axis.n;
+ if (pos[key] == null && axis.n == 1)
+ key = "x";
+
+ if (pos[key] != null) {
+ res.left = axis.p2c(pos[key]);
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < yaxes.length; ++i) {
+ axis = yaxes[i];
+ if (axis && axis.used) {
+ key = "y" + axis.n;
+ if (pos[key] == null && axis.n == 1)
+ key = "y";
+
+ if (pos[key] != null) {
+ res.top = axis.p2c(pos[key]);
+ break;
+ }
+ }
+ }
+
+ return res;
+ }
+
+ function getOrCreateAxis(axes, number) {
+ if (!axes[number - 1])
+ axes[number - 1] = {
+ n: number, // save the number for future reference
+ direction: axes == xaxes ? "x" : "y",
+ options: $.extend(true, {}, axes == xaxes ? options.xaxis : options.yaxis)
+ };
+
+ return axes[number - 1];
+ }
+
+ function fillInSeriesOptions() {
+
+ var neededColors = series.length, maxIndex = -1, i;
+
+ // Subtract the number of series that already have fixed colors or
+ // color indexes from the number that we still need to generate.
+
+ for (i = 0; i < series.length; ++i) {
+ var sc = series[i].color;
+ if (sc != null) {
+ neededColors--;
+ if (typeof sc == "number" && sc > maxIndex) {
+ maxIndex = sc;
+ }
+ }
+ }
+
+ // If any of the series have fixed color indexes, then we need to
+ // generate at least as many colors as the highest index.
+
+ if (neededColors <= maxIndex) {
+ neededColors = maxIndex + 1;
+ }
+
+ // Generate all the colors, using first the option colors and then
+ // variations on those colors once they're exhausted.
+
+ var c, colors = [], colorPool = options.colors,
+ colorPoolSize = colorPool.length, variation = 0;
+
+ for (i = 0; i < neededColors; i++) {
+
+ c = $.color.parse(colorPool[i % colorPoolSize] || "#666");
+
+ // Each time we exhaust the colors in the pool we adjust
+ // a scaling factor used to produce more variations on
+ // those colors. The factor alternates negative/positive
+ // to produce lighter/darker colors.
+
+ // Reset the variation after every few cycles, or else
+ // it will end up producing only white or black colors.
+
+ if (i % colorPoolSize == 0 && i) {
+ if (variation >= 0) {
+ if (variation < 0.5) {
+ variation = -variation - 0.2;
+ } else variation = 0;
+ } else variation = -variation;
+ }
+
+ colors[i] = c.scale('rgb', 1 + variation);
+ }
+
+ // Finalize the series options, filling in their colors
+
+ var colori = 0, s;
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ // assign colors
+ if (s.color == null) {
+ s.color = colors[colori].toString();
+ ++colori;
+ }
+ else if (typeof s.color == "number")
+ s.color = colors[s.color].toString();
+
+ // turn on lines automatically in case nothing is set
+ if (s.lines.show == null) {
+ var v, show = true;
+ for (v in s)
+ if (s[v] && s[v].show) {
+ show = false;
+ break;
+ }
+ if (show)
+ s.lines.show = true;
+ }
+
+ // If nothing was provided for lines.zero, default it to match
+ // lines.fill, since areas by default should extend to zero.
+
+ if (s.lines.zero == null) {
+ s.lines.zero = !!s.lines.fill;
+ }
+
+ // setup axes
+ s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x"));
+ s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y"));
+ }
+ }
+
+ function processData() {
+ var topSentry = Number.POSITIVE_INFINITY,
+ bottomSentry = Number.NEGATIVE_INFINITY,
+ fakeInfinity = Number.MAX_VALUE,
+ i, j, k, m, length,
+ s, points, ps, x, y, axis, val, f, p,
+ data, format;
+
+ function updateAxis(axis, min, max) {
+ if (min < axis.datamin && min != -fakeInfinity)
+ axis.datamin = min;
+ if (max > axis.datamax && max != fakeInfinity)
+ axis.datamax = max;
+ }
+
+ $.each(allAxes(), function (_, axis) {
+ // init axis
+ axis.datamin = topSentry;
+ axis.datamax = bottomSentry;
+ axis.used = false;
+ });
+
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+ s.datapoints = { points: [] };
+
+ executeHooks(hooks.processRawData, [ s, s.data, s.datapoints ]);
+ }
+
+ // first pass: clean and copy data
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ data = s.data;
+ format = s.datapoints.format;
+
+ if (!format) {
+ format = [];
+ // find out how to copy
+ format.push({ x: true, number: true, required: true });
+ format.push({ y: true, number: true, required: true });
+
+ if (s.bars.show || (s.lines.show && s.lines.fill)) {
+ var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
+ format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
+ if (s.bars.horizontal) {
+ delete format[format.length - 1].y;
+ format[format.length - 1].x = true;
+ }
+ }
+
+ s.datapoints.format = format;
+ }
+
+ if (s.datapoints.pointsize != null)
+ continue; // already filled in
+
+ s.datapoints.pointsize = format.length;
+
+ ps = s.datapoints.pointsize;
+ points = s.datapoints.points;
+
+ var insertSteps = s.lines.show && s.lines.steps;
+ s.xaxis.used = s.yaxis.used = true;
+
+ for (j = k = 0; j < data.length; ++j, k += ps) {
+ p = data[j];
+
+ var nullify = p == null;
+ if (!nullify) {
+ for (m = 0; m < ps; ++m) {
+ val = p[m];
+ f = format[m];
+
+ if (f) {
+ if (f.number && val != null) {
+ val = +val; // convert to number
+ if (isNaN(val))
+ val = null;
+ else if (val == Infinity)
+ val = fakeInfinity;
+ else if (val == -Infinity)
+ val = -fakeInfinity;
+ }
+
+ if (val == null) {
+ if (f.required)
+ nullify = true;
+
+ if (f.defaultValue != null)
+ val = f.defaultValue;
+ }
+ }
+
+ points[k + m] = val;
+ }
+ }
+
+ if (nullify) {
+ for (m = 0; m < ps; ++m) {
+ val = points[k + m];
+ if (val != null) {
+ f = format[m];
+ // extract min/max info
+ if (f.autoscale) {
+ if (f.x) {
+ updateAxis(s.xaxis, val, val);
+ }
+ if (f.y) {
+ updateAxis(s.yaxis, val, val);
+ }
+ }
+ }
+ points[k + m] = null;
+ }
+ }
+ else {
+ // a little bit of line specific stuff that
+ // perhaps shouldn't be here, but lacking
+ // better means...
+ if (insertSteps && k > 0
+ && points[k - ps] != null
+ && points[k - ps] != points[k]
+ && points[k - ps + 1] != points[k + 1]) {
+ // copy the point to make room for a middle point
+ for (m = 0; m < ps; ++m)
+ points[k + ps + m] = points[k + m];
+
+ // middle point has same y
+ points[k + 1] = points[k - ps + 1];
+
+ // we've added a point, better reflect that
+ k += ps;
+ }
+ }
+ }
+ }
+
+ // give the hooks a chance to run
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+
+ executeHooks(hooks.processDatapoints, [ s, s.datapoints]);
+ }
+
+ // second pass: find datamax/datamin for auto-scaling
+ for (i = 0; i < series.length; ++i) {
+ s = series[i];
+ points = s.datapoints.points;
+ ps = s.datapoints.pointsize;
+ format = s.datapoints.format;
+
+ var xmin = topSentry, ymin = topSentry,
+ xmax = bottomSentry, ymax = bottomSentry;
+
+ for (j = 0; j < points.length; j += ps) {
+ if (points[j] == null)
+ continue;
+
+ for (m = 0; m < ps; ++m) {
+ val = points[j + m];
+ f = format[m];
+ if (!f || f.autoscale === false || val == fakeInfinity || val == -fakeInfinity)
+ continue;
+
+ if (f.x) {
+ if (val < xmin)
+ xmin = val;
+ if (val > xmax)
+ xmax = val;
+ }
+ if (f.y) {
+ if (val < ymin)
+ ymin = val;
+ if (val > ymax)
+ ymax = val;
+ }
+ }
+ }
+
+ if (s.bars.show) {
+ // make sure we got room for the bar on the dancing floor
+ var delta;
+
+ switch (s.bars.align) {
+ case "left":
+ delta = 0;
+ break;
+ case "right":
+ delta = -s.bars.barWidth;
+ break;
+ case "center":
+ delta = -s.bars.barWidth / 2;
+ break;
+ default:
+ throw new Error("Invalid bar alignment: " + s.bars.align);
+ }
+
+ if (s.bars.horizontal) {
+ ymin += delta;
+ ymax += delta + s.bars.barWidth;
+ }
+ else {
+ xmin += delta;
+ xmax += delta + s.bars.barWidth;
+ }
+ }
+
+ updateAxis(s.xaxis, xmin, xmax);
+ updateAxis(s.yaxis, ymin, ymax);
+ }
+
+ $.each(allAxes(), function (_, axis) {
+ if (axis.datamin == topSentry)
+ axis.datamin = null;
+ if (axis.datamax == bottomSentry)
+ axis.datamax = null;
+ });
+ }
+
+ function setupCanvases() {
+
+ // Make sure the placeholder is clear of everything except canvases
+ // from a previous plot in this container that we'll try to re-use.
+
+ placeholder.css("padding", 0) // padding messes up the positioning
+ .children(":not(.flot-base,.flot-overlay)").remove();
+
+ if (placeholder.css("position") == 'static')
+ placeholder.css("position", "relative"); // for positioning labels and overlay
+
+ surface = new Canvas("flot-base", placeholder);
+ overlay = new Canvas("flot-overlay", placeholder); // overlay canvas for interactive features
+
+ ctx = surface.context;
+ octx = overlay.context;
+
+ // define which element we're listening for events on
+ eventHolder = $(overlay.element).unbind();
+
+ // If we're re-using a plot object, shut down the old one
+
+ var existing = placeholder.data("plot");
+
+ if (existing) {
+ existing.shutdown();
+ overlay.clear();
+ }
+
+ // save in case we get replotted
+ placeholder.data("plot", plot);
+ }
+
+ function bindEvents() {
+ // bind events
+ if (options.grid.hoverable) {
+ eventHolder.mousemove(onMouseMove);
+
+ // Use bind, rather than .mouseleave, because we officially
+ // still support jQuery 1.2.6, which doesn't define a shortcut
+ // for mouseenter or mouseleave. This was a bug/oversight that
+ // was fixed somewhere around 1.3.x. We can return to using
+ // .mouseleave when we drop support for 1.2.6.
+
+ eventHolder.bind("mouseleave", onMouseLeave);
+ }
+
+ if (options.grid.clickable)
+ eventHolder.click(onClick);
+
+ executeHooks(hooks.bindEvents, [eventHolder]);
+ }
+
+ function shutdown() {
+ if (redrawTimeout)
+ clearTimeout(redrawTimeout);
+
+ eventHolder.unbind("mousemove", onMouseMove);
+ eventHolder.unbind("mouseleave", onMouseLeave);
+ eventHolder.unbind("click", onClick);
+
+ executeHooks(hooks.shutdown, [eventHolder]);
+ }
+
+ function setTransformationHelpers(axis) {
+ // set helper functions on the axis, assumes plot area
+ // has been computed already
+
+ function identity(x) { return x; }
+
+ var s, m, t = axis.options.transform || identity,
+ it = axis.options.inverseTransform;
+
+ // precompute how much the axis is scaling a point
+ // in canvas space
+ if (axis.direction == "x") {
+ s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min));
+ m = Math.min(t(axis.max), t(axis.min));
+ }
+ else {
+ s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min));
+ s = -s;
+ m = Math.max(t(axis.max), t(axis.min));
+ }
+
+ // data point to canvas coordinate
+ if (t == identity) // slight optimization
+ axis.p2c = function (p) { return (p - m) * s; };
+ else
+ axis.p2c = function (p) { return (t(p) - m) * s; };
+ // canvas coordinate to data point
+ if (!it)
+ axis.c2p = function (c) { return m + c / s; };
+ else
+ axis.c2p = function (c) { return it(m + c / s); };
+ }
+
+ function measureTickLabels(axis) {
+
+ var opts = axis.options,
+ ticks = axis.ticks || [],
+ labelWidth = opts.labelWidth || 0,
+ labelHeight = opts.labelHeight || 0,
+ maxWidth = labelWidth || axis.direction == "x" ? Math.floor(surface.width / (ticks.length || 1)) : null;
+ legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
+ layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
+ font = opts.font || "flot-tick-label tickLabel";
+
+ for (var i = 0; i < ticks.length; ++i) {
+
+ var t = ticks[i];
+
+ if (!t.label)
+ continue;
+
+ var info = surface.getTextInfo(layer, t.label, font, null, maxWidth);
+
+ labelWidth = Math.max(labelWidth, info.width);
+ labelHeight = Math.max(labelHeight, info.height);
+ }
+
+ axis.labelWidth = opts.labelWidth || labelWidth;
+ axis.labelHeight = opts.labelHeight || labelHeight;
+ }
+
+ function allocateAxisBoxFirstPhase(axis) {
+ // find the bounding box of the axis by looking at label
+ // widths/heights and ticks, make room by diminishing the
+ // plotOffset; this first phase only looks at one
+ // dimension per axis, the other dimension depends on the
+ // other axes so will have to wait
+
+ var lw = axis.labelWidth,
+ lh = axis.labelHeight,
+ pos = axis.options.position,
+ tickLength = axis.options.tickLength,
+ axisMargin = options.grid.axisMargin,
+ padding = options.grid.labelMargin,
+ all = axis.direction == "x" ? xaxes : yaxes,
+ index, innermost;
+
+ // determine axis margin
+ var samePosition = $.grep(all, function (a) {
+ return a && a.options.position == pos && a.reserveSpace;
+ });
+ if ($.inArray(axis, samePosition) == samePosition.length - 1)
+ axisMargin = 0; // outermost
+
+ // determine tick length - if we're innermost, we can use "full"
+ if (tickLength == null) {
+ var sameDirection = $.grep(all, function (a) {
+ return a && a.reserveSpace;
+ });
+
+ innermost = $.inArray(axis, sameDirection) == 0;
+ if (innermost)
+ tickLength = "full";
+ else
+ tickLength = 5;
+ }
+
+ if (!isNaN(+tickLength))
+ padding += +tickLength;
+
+ // compute box
+ if (axis.direction == "x") {
+ lh += padding;
+
+ if (pos == "bottom") {
+ plotOffset.bottom += lh + axisMargin;
+ axis.box = { top: surface.height - plotOffset.bottom, height: lh };
+ }
+ else {
+ axis.box = { top: plotOffset.top + axisMargin, height: lh };
+ plotOffset.top += lh + axisMargin;
+ }
+ }
+ else {
+ lw += padding;
+
+ if (pos == "left") {
+ axis.box = { left: plotOffset.left + axisMargin, width: lw };
+ plotOffset.left += lw + axisMargin;
+ }
+ else {
+ plotOffset.right += lw + axisMargin;
+ axis.box = { left: surface.width - plotOffset.right, width: lw };
+ }
+ }
+
+ // save for future reference
+ axis.position = pos;
+ axis.tickLength = tickLength;
+ axis.box.padding = padding;
+ axis.innermost = innermost;
+ }
+
+ function allocateAxisBoxSecondPhase(axis) {
+ // now that all axis boxes have been placed in one
+ // dimension, we can set the remaining dimension coordinates
+ if (axis.direction == "x") {
+ axis.box.left = plotOffset.left - axis.labelWidth / 2;
+ axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth;
+ }
+ else {
+ axis.box.top = plotOffset.top - axis.labelHeight / 2;
+ axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight;
+ }
+ }
+
+ function adjustLayoutForThingsStickingOut() {
+ // possibly adjust plot offset to ensure everything stays
+ // inside the canvas and isn't clipped off
+
+ var minMargin = options.grid.minBorderMargin,
+ margins = { x: 0, y: 0 }, i, axis;
+
+ // check stuff from the plot (FIXME: this should just read
+ // a value from the series, otherwise it's impossible to
+ // customize)
+ if (minMargin == null) {
+ minMargin = 0;
+ for (i = 0; i < series.length; ++i)
+ minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth/2));
+ }
+
+ margins.x = margins.y = Math.ceil(minMargin);
+
+ // check axis labels, note we don't check the actual
+ // labels but instead use the overall width/height to not
+ // jump as much around with replots
+ $.each(allAxes(), function (_, axis) {
+ var dir = axis.direction;
+ if (axis.reserveSpace)
+ margins[dir] = Math.ceil(Math.max(margins[dir], (dir == "x" ? axis.labelWidth : axis.labelHeight) / 2));
+ });
+
+ plotOffset.left = Math.max(margins.x, plotOffset.left);
+ plotOffset.right = Math.max(margins.x, plotOffset.right);
+ plotOffset.top = Math.max(margins.y, plotOffset.top);
+ plotOffset.bottom = Math.max(margins.y, plotOffset.bottom);
+ }
+
+ function setupGrid() {
+ var i, axes = allAxes(), showGrid = options.grid.show;
+
+ // Initialize the plot's offset from the edge of the canvas
+
+ for (var a in plotOffset) {
+ var margin = options.grid.margin || 0;
+ plotOffset[a] = typeof margin == "number" ? margin : margin[a] || 0;
+ }
+
+ executeHooks(hooks.processOffset, [plotOffset]);
+
+ // If the grid is visible, add its border width to the offset
+
+ for (var a in plotOffset) {
+ if(typeof(options.grid.borderWidth) == "object") {
+ plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0;
+ }
+ else {
+ plotOffset[a] += showGrid ? options.grid.borderWidth : 0;
+ }
+ }
+
+ // init axes
+ $.each(axes, function (_, axis) {
+ axis.show = axis.options.show;
+ if (axis.show == null)
+ axis.show = axis.used; // by default an axis is visible if it's got data
+
+ axis.reserveSpace = axis.show || axis.options.reserveSpace;
+
+ setRange(axis);
+ });
+
+ if (showGrid) {
+
+ var allocatedAxes = $.grep(axes, function (axis) { return axis.reserveSpace; });
+
+ $.each(allocatedAxes, function (_, axis) {
+ // make the ticks
+ setupTickGeneration(axis);
+ setTicks(axis);
+ snapRangeToTicks(axis, axis.ticks);
+ // find labelWidth/Height for axis
+ measureTickLabels(axis);
+ });
+
+ // with all dimensions calculated, we can compute the
+ // axis bounding boxes, start from the outside
+ // (reverse order)
+ for (i = allocatedAxes.length - 1; i >= 0; --i)
+ allocateAxisBoxFirstPhase(allocatedAxes[i]);
+
+ // make sure we've got enough space for things that
+ // might stick out
+ adjustLayoutForThingsStickingOut();
+
+ $.each(allocatedAxes, function (_, axis) {
+ allocateAxisBoxSecondPhase(axis);
+ });
+ }
+
+ plotWidth = surface.width - plotOffset.left - plotOffset.right;
+ plotHeight = surface.height - plotOffset.bottom - plotOffset.top;
+
+ // now we got the proper plot dimensions, we can compute the scaling
+ $.each(axes, function (_, axis) {
+ setTransformationHelpers(axis);
+ });
+
+ if (showGrid) {
+ drawAxisLabels();
+ }
+
+ insertLegend();
+ }
+
+ function setRange(axis) {
+ var opts = axis.options,
+ min = +(opts.min != null ? opts.min : axis.datamin),
+ max = +(opts.max != null ? opts.max : axis.datamax),
+ delta = max - min;
+
+ if (delta == 0.0) {
+ // degenerate case
+ var widen = max == 0 ? 1 : 0.01;
+
+ if (opts.min == null)
+ min -= widen;
+ // always widen max if we couldn't widen min to ensure we
+ // don't fall into min == max which doesn't work
+ if (opts.max == null || opts.min != null)
+ max += widen;
+ }
+ else {
+ // consider autoscaling
+ var margin = opts.autoscaleMargin;
+ if (margin != null) {
+ if (opts.min == null) {
+ min -= delta * margin;
+ // make sure we don't go below zero if all values
+ // are positive
+ if (min < 0 && axis.datamin != null && axis.datamin >= 0)
+ min = 0;
+ }
+ if (opts.max == null) {
+ max += delta * margin;
+ if (max > 0 && axis.datamax != null && axis.datamax <= 0)
+ max = 0;
+ }
+ }
+ }
+ axis.min = min;
+ axis.max = max;
+ }
+
+ function setupTickGeneration(axis) {
+ var opts = axis.options;
+
+ // estimate number of ticks
+ var noTicks;
+ if (typeof opts.ticks == "number" && opts.ticks > 0)
+ noTicks = opts.ticks;
+ else
+ // heuristic based on the model a*sqrt(x) fitted to
+ // some data points that seemed reasonable
+ noTicks = 0.3 * Math.sqrt(axis.direction == "x" ? surface.width : surface.height);
+
+ var delta = (axis.max - axis.min) / noTicks,
+ dec = -Math.floor(Math.log(delta) / Math.LN10),
+ maxDec = opts.tickDecimals;
+
+ if (maxDec != null && dec > maxDec) {
+ dec = maxDec;
+ }
+
+ var magn = Math.pow(10, -dec),
+ norm = delta / magn, // norm is between 1.0 and 10.0
+ size;
+
+ if (norm < 1.5) {
+ size = 1;
+ } else if (norm < 3) {
+ size = 2;
+ // special case for 2.5, requires an extra decimal
+ if (norm > 2.25 && (maxDec == null || dec + 1 <= maxDec)) {
+ size = 2.5;
+ ++dec;
+ }
+ } else if (norm < 7.5) {
+ size = 5;
+ } else {
+ size = 10;
+ }
+
+ size *= magn;
+
+ if (opts.minTickSize != null && size < opts.minTickSize) {
+ size = opts.minTickSize;
+ }
+
+ axis.delta = delta;
+ axis.tickDecimals = Math.max(0, maxDec != null ? maxDec : dec);
+ axis.tickSize = opts.tickSize || size;
+
+ // Time mode was moved to a plug-in in 0.8, but since so many people use this
+ // we'll add an especially friendly make sure they remembered to include it.
+
+ if (opts.mode == "time" && !axis.tickGenerator) {
+ throw new Error("Time mode requires the flot.time plugin.");
+ }
+
+ // Flot supports base-10 axes; any other mode else is handled by a plug-in,
+ // like flot.time.js.
+
+ if (!axis.tickGenerator) {
+
+ axis.tickGenerator = function (axis) {
+
+ var ticks = [],
+ start = floorInBase(axis.min, axis.tickSize),
+ i = 0,
+ v = Number.NaN,
+ prev;
+
+ do {
+ prev = v;
+ v = start + i * axis.tickSize;
+ ticks.push(v);
+ ++i;
+ } while (v < axis.max && v != prev);
+ return ticks;
+ };
+
+ axis.tickFormatter = function (value, axis) {
+
+ var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1;
+ var formatted = "" + Math.round(value * factor) / factor;
+
+ // If tickDecimals was specified, ensure that we have exactly that
+ // much precision; otherwise default to the value's own precision.
+
+ if (axis.tickDecimals != null) {
+ var decimal = formatted.indexOf(".");
+ var precision = decimal == -1 ? 0 : formatted.length - decimal - 1;
+ if (precision < axis.tickDecimals) {
+ return (precision ? formatted : formatted + ".") + ("" + factor).substr(1, axis.tickDecimals - precision);
+ }
+ }
+
+ return formatted;
+ };
+ }
+
+ if ($.isFunction(opts.tickFormatter))
+ axis.tickFormatter = function (v, axis) { return "" + opts.tickFormatter(v, axis); };
+
+ if (opts.alignTicksWithAxis != null) {
+ var otherAxis = (axis.direction == "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1];
+ if (otherAxis && otherAxis.used && otherAxis != axis) {
+ // consider snapping min/max to outermost nice ticks
+ var niceTicks = axis.tickGenerator(axis);
+ if (niceTicks.length > 0) {
+ if (opts.min == null)
+ axis.min = Math.min(axis.min, niceTicks[0]);
+ if (opts.max == null && niceTicks.length > 1)
+ axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]);
+ }
+
+ axis.tickGenerator = function (axis) {
+ // copy ticks, scaled to this axis
+ var ticks = [], v, i;
+ for (i = 0; i < otherAxis.ticks.length; ++i) {
+ v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min);
+ v = axis.min + v * (axis.max - axis.min);
+ ticks.push(v);
+ }
+ return ticks;
+ };
+
+ // we might need an extra decimal since forced
+ // ticks don't necessarily fit naturally
+ if (!axis.mode && opts.tickDecimals == null) {
+ var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1),
+ ts = axis.tickGenerator(axis);
+
+ // only proceed if the tick interval rounded
+ // with an extra decimal doesn't give us a
+ // zero at end
+ if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec))))
+ axis.tickDecimals = extraDec;
+ }
+ }
+ }
+ }
+
+ function setTicks(axis) {
+ var oticks = axis.options.ticks, ticks = [];
+ if (oticks == null || (typeof oticks == "number" && oticks > 0))
+ ticks = axis.tickGenerator(axis);
+ else if (oticks) {
+ if ($.isFunction(oticks))
+ // generate the ticks
+ ticks = oticks(axis);
+ else
+ ticks = oticks;
+ }
+
+ // clean up/labelify the supplied ticks, copy them over
+ var i, v;
+ axis.ticks = [];
+ for (i = 0; i < ticks.length; ++i) {
+ var label = null;
+ var t = ticks[i];
+ if (typeof t == "object") {
+ v = +t[0];
+ if (t.length > 1)
+ label = t[1];
+ }
+ else
+ v = +t;
+ if (label == null)
+ label = axis.tickFormatter(v, axis);
+ if (!isNaN(v))
+ axis.ticks.push({ v: v, label: label });
+ }
+ }
+
+ function snapRangeToTicks(axis, ticks) {
+ if (axis.options.autoscaleMargin && ticks.length > 0) {
+ // snap to ticks
+ if (axis.options.min == null)
+ axis.min = Math.min(axis.min, ticks[0].v);
+ if (axis.options.max == null && ticks.length > 1)
+ axis.max = Math.max(axis.max, ticks[ticks.length - 1].v);
+ }
+ }
+
+ function draw() {
+
+ surface.clear();
+
+ executeHooks(hooks.drawBackground, [ctx]);
+
+ var grid = options.grid;
+
+ // draw background, if any
+ if (grid.show && grid.backgroundColor)
+ drawBackground();
+
+ if (grid.show && !grid.aboveData) {
+ drawGrid();
+ }
+
+ for (var i = 0; i < series.length; ++i) {
+ executeHooks(hooks.drawSeries, [ctx, series[i]]);
+ drawSeries(series[i]);
+ }
+
+ executeHooks(hooks.draw, [ctx]);
+
+ if (grid.show && grid.aboveData) {
+ drawGrid();
+ }
+
+ surface.render();
+
+ // A draw implies that either the axes or data have changed, so we
+ // should probably update the overlay highlights as well.
+
+ triggerRedrawOverlay();
+ }
+
+ function extractRange(ranges, coord) {
+ var axis, from, to, key, axes = allAxes();
+
+ for (var i = 0; i < axes.length; ++i) {
+ axis = axes[i];
+ if (axis.direction == coord) {
+ key = coord + axis.n + "axis";
+ if (!ranges[key] && axis.n == 1)
+ key = coord + "axis"; // support x1axis as xaxis
+ if (ranges[key]) {
+ from = ranges[key].from;
+ to = ranges[key].to;
+ break;
+ }
+ }
+ }
+
+ // backwards-compat stuff - to be removed in future
+ if (!ranges[key]) {
+ axis = coord == "x" ? xaxes[0] : yaxes[0];
+ from = ranges[coord + "1"];
+ to = ranges[coord + "2"];
+ }
+
+ // auto-reverse as an added bonus
+ if (from != null && to != null && from > to) {
+ var tmp = from;
+ from = to;
+ to = tmp;
+ }
+
+ return { from: from, to: to, axis: axis };
+ }
+
+ function drawBackground() {
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)");
+ ctx.fillRect(0, 0, plotWidth, plotHeight);
+ ctx.restore();
+ }
+
+ function drawGrid() {
+ var i, axes, bw, bc;
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ // draw markings
+ var markings = options.grid.markings;
+ if (markings) {
+ if ($.isFunction(markings)) {
+ axes = plot.getAxes();
+ // xmin etc. is backwards compatibility, to be
+ // removed in the future
+ axes.xmin = axes.xaxis.min;
+ axes.xmax = axes.xaxis.max;
+ axes.ymin = axes.yaxis.min;
+ axes.ymax = axes.yaxis.max;
+
+ markings = markings(axes);
+ }
+
+ for (i = 0; i < markings.length; ++i) {
+ var m = markings[i],
+ xrange = extractRange(m, "x"),
+ yrange = extractRange(m, "y");
+
+ // fill in missing
+ if (xrange.from == null)
+ xrange.from = xrange.axis.min;
+ if (xrange.to == null)
+ xrange.to = xrange.axis.max;
+ if (yrange.from == null)
+ yrange.from = yrange.axis.min;
+ if (yrange.to == null)
+ yrange.to = yrange.axis.max;
+
+ // clip
+ if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max ||
+ yrange.to < yrange.axis.min || yrange.from > yrange.axis.max)
+ continue;
+
+ xrange.from = Math.max(xrange.from, xrange.axis.min);
+ xrange.to = Math.min(xrange.to, xrange.axis.max);
+ yrange.from = Math.max(yrange.from, yrange.axis.min);
+ yrange.to = Math.min(yrange.to, yrange.axis.max);
+
+ if (xrange.from == xrange.to && yrange.from == yrange.to)
+ continue;
+
+ // then draw
+ xrange.from = xrange.axis.p2c(xrange.from);
+ xrange.to = xrange.axis.p2c(xrange.to);
+ yrange.from = yrange.axis.p2c(yrange.from);
+ yrange.to = yrange.axis.p2c(yrange.to);
+
+ if (xrange.from == xrange.to || yrange.from == yrange.to) {
+ // draw line
+ ctx.beginPath();
+ ctx.strokeStyle = m.color || options.grid.markingsColor;
+ ctx.lineWidth = m.lineWidth || options.grid.markingsLineWidth;
+ ctx.moveTo(xrange.from, yrange.from);
+ ctx.lineTo(xrange.to, yrange.to);
+ ctx.stroke();
+ }
+ else {
+ // fill area
+ ctx.fillStyle = m.color || options.grid.markingsColor;
+ ctx.fillRect(xrange.from, yrange.to,
+ xrange.to - xrange.from,
+ yrange.from - yrange.to);
+ }
+ }
+ }
+
+ // draw the ticks
+ axes = allAxes();
+ bw = options.grid.borderWidth;
+
+ for (var j = 0; j < axes.length; ++j) {
+ var axis = axes[j], box = axis.box,
+ t = axis.tickLength, x, y, xoff, yoff;
+ if (!axis.show || axis.ticks.length == 0)
+ continue;
+
+ ctx.lineWidth = 1;
+
+ // find the edges
+ if (axis.direction == "x") {
+ x = 0;
+ if (t == "full")
+ y = (axis.position == "top" ? 0 : plotHeight);
+ else
+ y = box.top - plotOffset.top + (axis.position == "top" ? box.height : 0);
+ }
+ else {
+ y = 0;
+ if (t == "full")
+ x = (axis.position == "left" ? 0 : plotWidth);
+ else
+ x = box.left - plotOffset.left + (axis.position == "left" ? box.width : 0);
+ }
+
+ // draw tick bar
+ if (!axis.innermost) {
+ ctx.strokeStyle = axis.options.color;
+ ctx.beginPath();
+ xoff = yoff = 0;
+ if (axis.direction == "x")
+ xoff = plotWidth + 1;
+ else
+ yoff = plotHeight + 1;
+
+ if (ctx.lineWidth == 1) {
+ if (axis.direction == "x") {
+ y = Math.floor(y) + 0.5;
+ } else {
+ x = Math.floor(x) + 0.5;
+ }
+ }
+
+ ctx.moveTo(x, y);
+ ctx.lineTo(x + xoff, y + yoff);
+ ctx.stroke();
+ }
+
+ // draw ticks
+
+ ctx.strokeStyle = axis.options.tickColor;
+
+ ctx.beginPath();
+ for (i = 0; i < axis.ticks.length; ++i) {
+ var v = axis.ticks[i].v;
+
+ xoff = yoff = 0;
+
+ if (isNaN(v) || v < axis.min || v > axis.max
+ // skip those lying on the axes if we got a border
+ || (t == "full"
+ && ((typeof bw == "object" && bw[axis.position] > 0) || bw > 0)
+ && (v == axis.min || v == axis.max)))
+ continue;
+
+ if (axis.direction == "x") {
+ x = axis.p2c(v);
+ yoff = t == "full" ? -plotHeight : t;
+
+ if (axis.position == "top")
+ yoff = -yoff;
+ }
+ else {
+ y = axis.p2c(v);
+ xoff = t == "full" ? -plotWidth : t;
+
+ if (axis.position == "left")
+ xoff = -xoff;
+ }
+
+ if (ctx.lineWidth == 1) {
+ if (axis.direction == "x")
+ x = Math.floor(x) + 0.5;
+ else
+ y = Math.floor(y) + 0.5;
+ }
+
+ ctx.moveTo(x, y);
+ ctx.lineTo(x + xoff, y + yoff);
+ }
+
+ ctx.stroke();
+ }
+
+
+ // draw border
+ if (bw) {
+ // If either borderWidth or borderColor is an object, then draw the border
+ // line by line instead of as one rectangle
+ bc = options.grid.borderColor;
+ if(typeof bw == "object" || typeof bc == "object") {
+ if (typeof bw !== "object") {
+ bw = {top: bw, right: bw, bottom: bw, left: bw};
+ }
+ if (typeof bc !== "object") {
+ bc = {top: bc, right: bc, bottom: bc, left: bc};
+ }
+
+ if (bw.top > 0) {
+ ctx.strokeStyle = bc.top;
+ ctx.lineWidth = bw.top;
+ ctx.beginPath();
+ ctx.moveTo(0 - bw.left, 0 - bw.top/2);
+ ctx.lineTo(plotWidth, 0 - bw.top/2);
+ ctx.stroke();
+ }
+
+ if (bw.right > 0) {
+ ctx.strokeStyle = bc.right;
+ ctx.lineWidth = bw.right;
+ ctx.beginPath();
+ ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top);
+ ctx.lineTo(plotWidth + bw.right / 2, plotHeight);
+ ctx.stroke();
+ }
+
+ if (bw.bottom > 0) {
+ ctx.strokeStyle = bc.bottom;
+ ctx.lineWidth = bw.bottom;
+ ctx.beginPath();
+ ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2);
+ ctx.lineTo(0, plotHeight + bw.bottom / 2);
+ ctx.stroke();
+ }
+
+ if (bw.left > 0) {
+ ctx.strokeStyle = bc.left;
+ ctx.lineWidth = bw.left;
+ ctx.beginPath();
+ ctx.moveTo(0 - bw.left/2, plotHeight + bw.bottom);
+ ctx.lineTo(0- bw.left/2, 0);
+ ctx.stroke();
+ }
+ }
+ else {
+ ctx.lineWidth = bw;
+ ctx.strokeStyle = options.grid.borderColor;
+ ctx.strokeRect(-bw/2, -bw/2, plotWidth + bw, plotHeight + bw);
+ }
+ }
+
+ ctx.restore();
+ }
+
+ function drawAxisLabels() {
+
+ $.each(allAxes(), function (_, axis) {
+ if (!axis.show || axis.ticks.length == 0)
+ return;
+
+ var box = axis.box,
+ legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
+ layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
+ font = axis.options.font || "flot-tick-label tickLabel",
+ tick, x, y, halign, valign;
+
+ surface.removeText(layer);
+
+ for (var i = 0; i < axis.ticks.length; ++i) {
+
+ tick = axis.ticks[i];
+ if (!tick.label || tick.v < axis.min || tick.v > axis.max)
+ continue;
+
+ if (axis.direction == "x") {
+ halign = "center";
+ x = plotOffset.left + axis.p2c(tick.v);
+ if (axis.position == "bottom") {
+ y = box.top + box.padding;
+ } else {
+ y = box.top + box.height - box.padding;
+ valign = "bottom";
+ }
+ } else {
+ valign = "middle";
+ y = plotOffset.top + axis.p2c(tick.v);
+ if (axis.position == "left") {
+ x = box.left + box.width - box.padding;
+ halign = "right";
+ } else {
+ x = box.left + box.padding;
+ }
+ }
+
+ surface.addText(layer, x, y, tick.label, font, null, null, halign, valign);
+ }
+ });
+ }
+
+ function drawSeries(series) {
+ if (series.lines.show)
+ drawSeriesLines(series);
+ if (series.bars.show)
+ drawSeriesBars(series);
+ if (series.points.show)
+ drawSeriesPoints(series);
+ }
+
+ function drawSeriesLines(series) {
+ function plotLine(datapoints, xoffset, yoffset, axisx, axisy) {
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ prevx = null, prevy = null;
+
+ ctx.beginPath();
+ for (var i = ps; i < points.length; i += ps) {
+ var x1 = points[i - ps], y1 = points[i - ps + 1],
+ x2 = points[i], y2 = points[i + 1];
+
+ if (x1 == null || x2 == null)
+ continue;
+
+ // clip with ymin
+ if (y1 <= y2 && y1 < axisy.min) {
+ if (y2 < axisy.min)
+ continue; // line segment is outside
+ // compute new intersection point
+ x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.min;
+ }
+ else if (y2 <= y1 && y2 < axisy.min) {
+ if (y1 < axisy.min)
+ continue;
+ x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.min;
+ }
+
+ // clip with ymax
+ if (y1 >= y2 && y1 > axisy.max) {
+ if (y2 > axisy.max)
+ continue;
+ x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.max;
+ }
+ else if (y2 >= y1 && y2 > axisy.max) {
+ if (y1 > axisy.max)
+ continue;
+ x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.max;
+ }
+
+ // clip with xmin
+ if (x1 <= x2 && x1 < axisx.min) {
+ if (x2 < axisx.min)
+ continue;
+ y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.min;
+ }
+ else if (x2 <= x1 && x2 < axisx.min) {
+ if (x1 < axisx.min)
+ continue;
+ y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.min;
+ }
+
+ // clip with xmax
+ if (x1 >= x2 && x1 > axisx.max) {
+ if (x2 > axisx.max)
+ continue;
+ y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.max;
+ }
+ else if (x2 >= x1 && x2 > axisx.max) {
+ if (x1 > axisx.max)
+ continue;
+ y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.max;
+ }
+
+ if (x1 != prevx || y1 != prevy)
+ ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset);
+
+ prevx = x2;
+ prevy = y2;
+ ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset);
+ }
+ ctx.stroke();
+ }
+
+ function plotLineArea(datapoints, axisx, axisy) {
+ var points = datapoints.points,
+ ps = datapoints.pointsize,
+ bottom = Math.min(Math.max(0, axisy.min), axisy.max),
+ i = 0, top, areaOpen = false,
+ ypos = 1, segmentStart = 0, segmentEnd = 0;
+
+ // we process each segment in two turns, first forward
+ // direction to sketch out top, then once we hit the
+ // end we go backwards to sketch the bottom
+ while (true) {
+ if (ps > 0 && i > points.length + ps)
+ break;
+
+ i += ps; // ps is negative if going backwards
+
+ var x1 = points[i - ps],
+ y1 = points[i - ps + ypos],
+ x2 = points[i], y2 = points[i + ypos];
+
+ if (areaOpen) {
+ if (ps > 0 && x1 != null && x2 == null) {
+ // at turning point
+ segmentEnd = i;
+ ps = -ps;
+ ypos = 2;
+ continue;
+ }
+
+ if (ps < 0 && i == segmentStart + ps) {
+ // done with the reverse sweep
+ ctx.fill();
+ areaOpen = false;
+ ps = -ps;
+ ypos = 1;
+ i = segmentStart = segmentEnd + ps;
+ continue;
+ }
+ }
+
+ if (x1 == null || x2 == null)
+ continue;
+
+ // clip x values
+
+ // clip with xmin
+ if (x1 <= x2 && x1 < axisx.min) {
+ if (x2 < axisx.min)
+ continue;
+ y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.min;
+ }
+ else if (x2 <= x1 && x2 < axisx.min) {
+ if (x1 < axisx.min)
+ continue;
+ y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.min;
+ }
+
+ // clip with xmax
+ if (x1 >= x2 && x1 > axisx.max) {
+ if (x2 > axisx.max)
+ continue;
+ y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x1 = axisx.max;
+ }
+ else if (x2 >= x1 && x2 > axisx.max) {
+ if (x1 > axisx.max)
+ continue;
+ y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
+ x2 = axisx.max;
+ }
+
+ if (!areaOpen) {
+ // open area
+ ctx.beginPath();
+ ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom));
+ areaOpen = true;
+ }
+
+ // now first check the case where both is outside
+ if (y1 >= axisy.max && y2 >= axisy.max) {
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max));
+ continue;
+ }
+ else if (y1 <= axisy.min && y2 <= axisy.min) {
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min));
+ continue;
+ }
+
+ // else it's a bit more complicated, there might
+ // be a flat maxed out rectangle first, then a
+ // triangular cutout or reverse; to find these
+ // keep track of the current x values
+ var x1old = x1, x2old = x2;
+
+ // clip the y values, without shortcutting, we
+ // go through all cases in turn
+
+ // clip with ymin
+ if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) {
+ x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.min;
+ }
+ else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) {
+ x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.min;
+ }
+
+ // clip with ymax
+ if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) {
+ x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y1 = axisy.max;
+ }
+ else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) {
+ x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
+ y2 = axisy.max;
+ }
+
+ // if the x value was changed we got a rectangle
+ // to fill
+ if (x1 != x1old) {
+ ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1));
+ // it goes to (x1, y1), but we fill that below
+ }
+
+ // fill triangular section, this sometimes result
+ // in redundant points if (x1, y1) hasn't changed
+ // from previous line to, but we just ignore that
+ ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1));
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
+
+ // fill the other rectangle if it's there
+ if (x2 != x2old) {
+ ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
+ ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2));
+ }
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+ ctx.lineJoin = "round";
+
+ var lw = series.lines.lineWidth,
+ sw = series.shadowSize;
+ // FIXME: consider another form of shadow when filling is turned on
+ if (lw > 0 && sw > 0) {
+ // draw shadow as a thick and thin line with transparency
+ ctx.lineWidth = sw;
+ ctx.strokeStyle = "rgba(0,0,0,0.1)";
+ // position shadow at angle from the mid of line
+ var angle = Math.PI/18;
+ plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/2), Math.cos(angle) * (lw/2 + sw/2), series.xaxis, series.yaxis);
+ ctx.lineWidth = sw/2;
+ plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/4), Math.cos(angle) * (lw/2 + sw/4), series.xaxis, series.yaxis);
+ }
+
+ ctx.lineWidth = lw;
+ ctx.strokeStyle = series.color;
+ var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight);
+ if (fillStyle) {
+ ctx.fillStyle = fillStyle;
+ plotLineArea(series.datapoints, series.xaxis, series.yaxis);
+ }
+
+ if (lw > 0)
+ plotLine(series.datapoints, 0, 0, series.xaxis, series.yaxis);
+ ctx.restore();
+ }
+
+ function drawSeriesPoints(series) {
+ function plotPoints(datapoints, radius, fillStyle, offset, shadow, axisx, axisy, symbol) {
+ var points = datapoints.points, ps = datapoints.pointsize;
+
+ for (var i = 0; i < points.length; i += ps) {
+ var x = points[i], y = points[i + 1];
+ if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
+ continue;
+
+ ctx.beginPath();
+ x = axisx.p2c(x);
+ y = axisy.p2c(y) + offset;
+ if (symbol == "circle")
+ ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false);
+ else
+ symbol(ctx, x, y, radius, shadow);
+ ctx.closePath();
+
+ if (fillStyle) {
+ ctx.fillStyle = fillStyle;
+ ctx.fill();
+ }
+ ctx.stroke();
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ var lw = series.points.lineWidth,
+ sw = series.shadowSize,
+ radius = series.points.radius,
+ symbol = series.points.symbol;
+
+ // If the user sets the line width to 0, we change it to a very
+ // small value. A line width of 0 seems to force the default of 1.
+ // Doing the conditional here allows the shadow setting to still be
+ // optional even with a lineWidth of 0.
+
+ if( lw == 0 )
+ lw = 0.0001;
+
+ if (lw > 0 && sw > 0) {
+ // draw shadow in two steps
+ var w = sw / 2;
+ ctx.lineWidth = w;
+ ctx.strokeStyle = "rgba(0,0,0,0.1)";
+ plotPoints(series.datapoints, radius, null, w + w/2, true,
+ series.xaxis, series.yaxis, symbol);
+
+ ctx.strokeStyle = "rgba(0,0,0,0.2)";
+ plotPoints(series.datapoints, radius, null, w/2, true,
+ series.xaxis, series.yaxis, symbol);
+ }
+
+ ctx.lineWidth = lw;
+ ctx.strokeStyle = series.color;
+ plotPoints(series.datapoints, radius,
+ getFillStyle(series.points, series.color), 0, false,
+ series.xaxis, series.yaxis, symbol);
+ ctx.restore();
+ }
+
+ function drawBar(x, y, b, barLeft, barRight, offset, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) {
+ var left, right, bottom, top,
+ drawLeft, drawRight, drawTop, drawBottom,
+ tmp;
+
+ // in horizontal mode, we start the bar from the left
+ // instead of from the bottom so it appears to be
+ // horizontal rather than vertical
+ if (horizontal) {
+ drawBottom = drawRight = drawTop = true;
+ drawLeft = false;
+ left = b;
+ right = x;
+ top = y + barLeft;
+ bottom = y + barRight;
+
+ // account for negative bars
+ if (right < left) {
+ tmp = right;
+ right = left;
+ left = tmp;
+ drawLeft = true;
+ drawRight = false;
+ }
+ }
+ else {
+ drawLeft = drawRight = drawTop = true;
+ drawBottom = false;
+ left = x + barLeft;
+ right = x + barRight;
+ bottom = b;
+ top = y;
+
+ // account for negative bars
+ if (top < bottom) {
+ tmp = top;
+ top = bottom;
+ bottom = tmp;
+ drawBottom = true;
+ drawTop = false;
+ }
+ }
+
+ // clip
+ if (right < axisx.min || left > axisx.max ||
+ top < axisy.min || bottom > axisy.max)
+ return;
+
+ if (left < axisx.min) {
+ left = axisx.min;
+ drawLeft = false;
+ }
+
+ if (right > axisx.max) {
+ right = axisx.max;
+ drawRight = false;
+ }
+
+ if (bottom < axisy.min) {
+ bottom = axisy.min;
+ drawBottom = false;
+ }
+
+ if (top > axisy.max) {
+ top = axisy.max;
+ drawTop = false;
+ }
+
+ left = axisx.p2c(left);
+ bottom = axisy.p2c(bottom);
+ right = axisx.p2c(right);
+ top = axisy.p2c(top);
+
+ // fill the bar
+ if (fillStyleCallback) {
+ c.beginPath();
+ c.moveTo(left, bottom);
+ c.lineTo(left, top);
+ c.lineTo(right, top);
+ c.lineTo(right, bottom);
+ c.fillStyle = fillStyleCallback(bottom, top);
+ c.fill();
+ }
+
+ // draw outline
+ if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) {
+ c.beginPath();
+
+ // FIXME: inline moveTo is buggy with excanvas
+ c.moveTo(left, bottom + offset);
+ if (drawLeft)
+ c.lineTo(left, top + offset);
+ else
+ c.moveTo(left, top + offset);
+ if (drawTop)
+ c.lineTo(right, top + offset);
+ else
+ c.moveTo(right, top + offset);
+ if (drawRight)
+ c.lineTo(right, bottom + offset);
+ else
+ c.moveTo(right, bottom + offset);
+ if (drawBottom)
+ c.lineTo(left, bottom + offset);
+ else
+ c.moveTo(left, bottom + offset);
+ c.stroke();
+ }
+ }
+
+ function drawSeriesBars(series) {
+ function plotBars(datapoints, barLeft, barRight, offset, fillStyleCallback, axisx, axisy) {
+ var points = datapoints.points, ps = datapoints.pointsize;
+
+ for (var i = 0; i < points.length; i += ps) {
+ if (points[i] == null)
+ continue;
+ drawBar(points[i], points[i + 1], points[i + 2], barLeft, barRight, offset, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth);
+ }
+ }
+
+ ctx.save();
+ ctx.translate(plotOffset.left, plotOffset.top);
+
+ // FIXME: figure out a way to add shadows (for instance along the right edge)
+ ctx.lineWidth = series.bars.lineWidth;
+ ctx.strokeStyle = series.color;
+
+ var barLeft;
+
+ switch (series.bars.align) {
+ case "left":
+ barLeft = 0;
+ break;
+ case "right":
+ barLeft = -series.bars.barWidth;
+ break;
+ case "center":
+ barLeft = -series.bars.barWidth / 2;
+ break;
+ default:
+ throw new Error("Invalid bar alignment: " + series.bars.align);
+ }
+
+ var fillStyleCallback = series.bars.fill ? function (bottom, top) { return getFillStyle(series.bars, series.color, bottom, top); } : null;
+ plotBars(series.datapoints, barLeft, barLeft + series.bars.barWidth, 0, fillStyleCallback, series.xaxis, series.yaxis);
+ ctx.restore();
+ }
+
+ function getFillStyle(filloptions, seriesColor, bottom, top) {
+ var fill = filloptions.fill;
+ if (!fill)
+ return null;
+
+ if (filloptions.fillColor)
+ return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor);
+
+ var c = $.color.parse(seriesColor);
+ c.a = typeof fill == "number" ? fill : 0.4;
+ c.normalize();
+ return c.toString();
+ }
+
+ function insertLegend() {
+
+ placeholder.find(".legend").remove();
+
+ if (!options.legend.show)
+ return;
+
+ var fragments = [], entries = [], rowStarted = false,
+ lf = options.legend.labelFormatter, s, label;
+
+ // Build a list of legend entries, with each having a label and a color
+
+ for (var i = 0; i < series.length; ++i) {
+ s = series[i];
+ if (s.label) {
+ label = lf ? lf(s.label, s) : s.label;
+ if (label) {
+ entries.push({
+ label: label,
+ color: s.color
+ });
+ }
+ }
+ }
+
+ // Sort the legend using either the default or a custom comparator
+
+ if (options.legend.sorted) {
+ if ($.isFunction(options.legend.sorted)) {
+ entries.sort(options.legend.sorted);
+ } else if (options.legend.sorted == "reverse") {
+ entries.reverse();
+ } else {
+ var ascending = options.legend.sorted != "descending";
+ entries.sort(function(a, b) {
+ return a.label == b.label ? 0 : (
+ (a.label < b.label) != ascending ? 1 : -1 // Logical XOR
+ );
+ });
+ }
+ }
+
+ // Generate markup for the list of entries, in their final order
+
+ for (var i = 0; i < entries.length; ++i) {
+
+ var entry = entries[i];
+
+ if (i % options.legend.noColumns == 0) {
+ if (rowStarted)
+ fragments.push('</tr>');
+ fragments.push('<tr>');
+ rowStarted = true;
+ }
+
+ fragments.push(
+ '<td class="legendColorBox"><div style="border:1px solid ' + options.legend.labelBoxBorderColor + ';padding:1px"><div style="width:4px;height:0;border:5px solid ' + entry.color + ';overflow:hidden"></div></div></td>' +
+ '<td class="legendLabel">' + entry.label + '</td>'
+ );
+ }
+
+ if (rowStarted)
+ fragments.push('</tr>');
+
+ if (fragments.length == 0)
+ return;
+
+ var table = '<table style="font-size:smaller;color:' + options.grid.color + '">' + fragments.join("") + '</table>';
+ if (options.legend.container != null)
+ $(options.legend.container).html(table);
+ else {
+ var pos = "",
+ p = options.legend.position,
+ m = options.legend.margin;
+ if (m[0] == null)
+ m = [m, m];
+ if (p.charAt(0) == "n")
+ pos += 'top:' + (m[1] + plotOffset.top) + 'px;';
+ else if (p.charAt(0) == "s")
+ pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;';
+ if (p.charAt(1) == "e")
+ pos += 'right:' + (m[0] + plotOffset.right) + 'px;';
+ else if (p.charAt(1) == "w")
+ pos += 'left:' + (m[0] + plotOffset.left) + 'px;';
+ var legend = $('<div class="legend">' + table.replace('style="', 'style="position:absolute;' + pos +';') + '</div>').appendTo(placeholder);
+ if (options.legend.backgroundOpacity != 0.0) {
+ // put in the transparent background
+ // separately to avoid blended labels and
+ // label boxes
+ var c = options.legend.backgroundColor;
+ if (c == null) {
+ c = options.grid.backgroundColor;
+ if (c && typeof c == "string")
+ c = $.color.parse(c);
+ else
+ c = $.color.extract(legend, 'background-color');
+ c.a = 1;
+ c = c.toString();
+ }
+ var div = legend.children();
+ $('<div style="position:absolute;width:' + div.width() + 'px;height:' + div.height() + 'px;' + pos +'background-color:' + c + ';"> </div>').prependTo(legend).css('opacity', options.legend.backgroundOpacity);
+ }
+ }
+ }
+
+
+ // interactive features
+
+ var highlights = [],
+ redrawTimeout = null;
+
+ // returns the data item the mouse is over, or null if none is found
+ function findNearbyItem(mouseX, mouseY, seriesFilter) {
+ var maxDistance = options.grid.mouseActiveRadius,
+ smallestDistance = maxDistance * maxDistance + 1,
+ item = null, foundPoint = false, i, j, ps;
+
+ for (i = series.length - 1; i >= 0; --i) {
+ if (!seriesFilter(series[i]))
+ continue;
+
+ var s = series[i],
+ axisx = s.xaxis,
+ axisy = s.yaxis,
+ points = s.datapoints.points,
+ mx = axisx.c2p(mouseX), // precompute some stuff to make the loop faster
+ my = axisy.c2p(mouseY),
+ maxx = maxDistance / axisx.scale,
+ maxy = maxDistance / axisy.scale;
+
+ ps = s.datapoints.pointsize;
+ // with inverse transforms, we can't use the maxx/maxy
+ // optimization, sadly
+ if (axisx.options.inverseTransform)
+ maxx = Number.MAX_VALUE;
+ if (axisy.options.inverseTransform)
+ maxy = Number.MAX_VALUE;
+
+ if (s.lines.show || s.points.show) {
+ for (j = 0; j < points.length; j += ps) {
+ var x = points[j], y = points[j + 1];
+ if (x == null)
+ continue;
+
+ // For points and lines, the cursor must be within a
+ // certain distance to the data point
+ if (x - mx > maxx || x - mx < -maxx ||
+ y - my > maxy || y - my < -maxy)
+ continue;
+
+ // We have to calculate distances in pixels, not in
+ // data units, because the scales of the axes may be different
+ var dx = Math.abs(axisx.p2c(x) - mouseX),
+ dy = Math.abs(axisy.p2c(y) - mouseY),
+ dist = dx * dx + dy * dy; // we save the sqrt
+
+ // use <= to ensure last point takes precedence
+ // (last generally means on top of)
+ if (dist < smallestDistance) {
+ smallestDistance = dist;
+ item = [i, j / ps];
+ }
+ }
+ }
+
+ if (s.bars.show && !item) { // no other point can be nearby
+ var barLeft = s.bars.align == "left" ? 0 : -s.bars.barWidth/2,
+ barRight = barLeft + s.bars.barWidth;
+
+ for (j = 0; j < points.length; j += ps) {
+ var x = points[j], y = points[j + 1], b = points[j + 2];
+ if (x == null)
+ continue;
+
+ // for a bar graph, the cursor must be inside the bar
+ if (series[i].bars.horizontal ?
+ (mx <= Math.max(b, x) && mx >= Math.min(b, x) &&
+ my >= y + barLeft && my <= y + barRight) :
+ (mx >= x + barLeft && mx <= x + barRight &&
+ my >= Math.min(b, y) && my <= Math.max(b, y)))
+ item = [i, j / ps];
+ }
+ }
+ }
+
+ if (item) {
+ i = item[0];
+ j = item[1];
+ ps = series[i].datapoints.pointsize;
+
+ return { datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps),
+ dataIndex: j,
+ series: series[i],
+ seriesIndex: i };
+ }
+
+ return null;
+ }
+
+ function onMouseMove(e) {
+ if (options.grid.hoverable)
+ triggerClickHoverEvent("plothover", e,
+ function (s) { return s["hoverable"] != false; });
+ }
+
+ function onMouseLeave(e) {
+ if (options.grid.hoverable)
+ triggerClickHoverEvent("plothover", e,
+ function (s) { return false; });
+ }
+
+ function onClick(e) {
+ triggerClickHoverEvent("plotclick", e,
+ function (s) { return s["clickable"] != false; });
+ }
+
+ // trigger click or hover event (they send the same parameters
+ // so we share their code)
+ function triggerClickHoverEvent(eventname, event, seriesFilter) {
+ var offset = eventHolder.offset(),
+ canvasX = event.pageX - offset.left - plotOffset.left,
+ canvasY = event.pageY - offset.top - plotOffset.top,
+ pos = canvasToAxisCoords({ left: canvasX, top: canvasY });
+
+ pos.pageX = event.pageX;
+ pos.pageY = event.pageY;
+
+ var item = findNearbyItem(canvasX, canvasY, seriesFilter);
+
+ if (item) {
+ // fill in mouse pos for any listeners out there
+ item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left + plotOffset.left, 10);
+ item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top + plotOffset.top, 10);
+ }
+
+ if (options.grid.autoHighlight) {
+ // clear auto-highlights
+ for (var i = 0; i < highlights.length; ++i) {
+ var h = highlights[i];
+ if (h.auto == eventname &&
+ !(item && h.series == item.series &&
+ h.point[0] == item.datapoint[0] &&
+ h.point[1] == item.datapoint[1]))
+ unhighlight(h.series, h.point);
+ }
+
+ if (item)
+ highlight(item.series, item.datapoint, eventname);
+ }
+
+ placeholder.trigger(eventname, [ pos, item ]);
+ }
+
+ function triggerRedrawOverlay() {
+ var t = options.interaction.redrawOverlayInterval;
+ if (t == -1) { // skip event queue
+ drawOverlay();
+ return;
+ }
+
+ if (!redrawTimeout)
+ redrawTimeout = setTimeout(drawOverlay, t);
+ }
+
+ function drawOverlay() {
+ redrawTimeout = null;
+
+ // draw highlights
+ octx.save();
+ overlay.clear();
+ octx.translate(plotOffset.left, plotOffset.top);
+
+ var i, hi;
+ for (i = 0; i < highlights.length; ++i) {
+ hi = highlights[i];
+
+ if (hi.series.bars.show)
+ drawBarHighlight(hi.series, hi.point);
+ else
+ drawPointHighlight(hi.series, hi.point);
+ }
+ octx.restore();
+
+ executeHooks(hooks.drawOverlay, [octx]);
+ }
+
+ function highlight(s, point, auto) {
+ if (typeof s == "number")
+ s = series[s];
+
+ if (typeof point == "number") {
+ var ps = s.datapoints.pointsize;
+ point = s.datapoints.points.slice(ps * point, ps * (point + 1));
+ }
+
+ var i = indexOfHighlight(s, point);
+ if (i == -1) {
+ highlights.push({ series: s, point: point, auto: auto });
+
+ triggerRedrawOverlay();
+ }
+ else if (!auto)
+ highlights[i].auto = false;
+ }
+
+ function unhighlight(s, point) {
+ if (s == null && point == null) {
+ highlights = [];
+ triggerRedrawOverlay();
+ return;
+ }
+
+ if (typeof s == "number")
+ s = series[s];
+
+ if (typeof point == "number") {
+ var ps = s.datapoints.pointsize;
+ point = s.datapoints.points.slice(ps * point, ps * (point + 1));
+ }
+
+ var i = indexOfHighlight(s, point);
+ if (i != -1) {
+ highlights.splice(i, 1);
+
+ triggerRedrawOverlay();
+ }
+ }
+
+ function indexOfHighlight(s, p) {
+ for (var i = 0; i < highlights.length; ++i) {
+ var h = highlights[i];
+ if (h.series == s && h.point[0] == p[0]
+ && h.point[1] == p[1])
+ return i;
+ }
+ return -1;
+ }
+
+ function drawPointHighlight(series, point) {
+ var x = point[0], y = point[1],
+ axisx = series.xaxis, axisy = series.yaxis,
+ highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString();
+
+ if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
+ return;
+
+ var pointRadius = series.points.radius + series.points.lineWidth / 2;
+ octx.lineWidth = pointRadius;
+ octx.strokeStyle = highlightColor;
+ var radius = 1.5 * pointRadius;
+ x = axisx.p2c(x);
+ y = axisy.p2c(y);
+
+ octx.beginPath();
+ if (series.points.symbol == "circle")
+ octx.arc(x, y, radius, 0, 2 * Math.PI, false);
+ else
+ series.points.symbol(octx, x, y, radius, false);
+ octx.closePath();
+ octx.stroke();
+ }
+
+ function drawBarHighlight(series, point) {
+ var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(),
+ fillStyle = highlightColor,
+ barLeft = series.bars.align == "left" ? 0 : -series.bars.barWidth/2;
+
+ octx.lineWidth = series.bars.lineWidth;
+ octx.strokeStyle = highlightColor;
+
+ drawBar(point[0], point[1], point[2] || 0, barLeft, barLeft + series.bars.barWidth,
+ 0, function () { return fillStyle; }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth);
+ }
+
+ function getColorOrGradient(spec, bottom, top, defaultColor) {
+ if (typeof spec == "string")
+ return spec;
+ else {
+ // assume this is a gradient spec; IE currently only
+ // supports a simple vertical gradient properly, so that's
+ // what we support too
+ var gradient = ctx.createLinearGradient(0, top, 0, bottom);
+
+ for (var i = 0, l = spec.colors.length; i < l; ++i) {
+ var c = spec.colors[i];
+ if (typeof c != "string") {
+ var co = $.color.parse(defaultColor);
+ if (c.brightness != null)
+ co = co.scale('rgb', c.brightness);
+ if (c.opacity != null)
+ co.a *= c.opacity;
+ c = co.toString();
+ }
+ gradient.addColorStop(i / (l - 1), c);
+ }
+
+ return gradient;
+ }
+ }
+ }
+
+ // Add the plot function to the top level of the jQuery object
+
+ $.plot = function(placeholder, data, options) {
+ //var t0 = new Date();
+ var plot = new Plot($(placeholder), data, options, $.plot.plugins);
+ //(window.console ? console.log : alert)("time used (msecs): " + ((new Date()).getTime() - t0.getTime()));
+ return plot;
+ };
+
+ $.plot.version = "0.8.1";
+
+ $.plot.plugins = [];
+
+ // Also add the plot function as a chainable property
+
+ $.fn.plot = function(data, options) {
+ return this.each(function() {
+ $.plot(this, data, options);
+ });
+ };
+
+ // round to nearby lower multiple of base
+ function floorInBase(n, base) {
+ return base * Math.floor(n / base);
+ }
+
+})(jQuery);
diff --git a/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.min.js b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.min.js
new file mode 100644
index 0000000000..3706512c48
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.min.js
@@ -0,0 +1,29 @@
+/* Javascript plotting library for jQuery, version 0.8.1.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+*/// first an inline dependency, jquery.colorhelpers.js, we inline it here
+// for convenience
+/* Plugin for jQuery for working with colors.
+ *
+ * Version 1.1.
+ *
+ * Inspiration from jQuery color animation plugin by John Resig.
+ *
+ * Released under the MIT license by Ole Laursen, October 2009.
+ *
+ * Examples:
+ *
+ * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
+ * var c = $.color.extract($("#mydiv"), 'background-color');
+ * console.log(c.r, c.g, c.b, c.a);
+ * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
+ *
+ * Note that .scale() and .add() return the same modified object
+ * instead of making a new one.
+ *
+ * V. 1.1: Fix error handling so e.g. parsing an empty string does
+ * produce a color rather than just crashing.
+ */(function(e){e.color={},e.color.make=function(t,n,r,i){var s={};return s.r=t||0,s.g=n||0,s.b=r||0,s.a=i!=null?i:1,s.add=function(e,t){for(var n=0;n<e.length;++n)s[e.charAt(n)]+=t;return s.normalize()},s.scale=function(e,t){for(var n=0;n<e.length;++n)s[e.charAt(n)]*=t;return s.normalize()},s.toString=function(){return s.a>=1?"rgb("+[s.r,s.g,s.b].join(",")+")":"rgba("+[s.r,s.g,s.b,s.a].join(",")+")"},s.normalize=function(){function e(e,t,n){return t<e?e:t>n?n:t}return s.r=e(0,parseInt(s.r),255),s.g=e(0,parseInt(s.g),255),s.b=e(0,parseInt(s.b),255),s.a=e(0,s.a,1),s},s.clone=function(){return e.color.make(s.r,s.b,s.g,s.a)},s.normalize()},e.color.extract=function(t,n){var r;do{r=t.css(n).toLowerCase();if(r!=""&&r!="transparent")break;t=t.parent()}while(!e.nodeName(t.get(0),"body"));return r=="rgba(0, 0, 0, 0)"&&(r="transparent"),e.color.parse(r)},e.color.parse=function(n){var r,i=e.color.make;if(r=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(n))return i(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10));if(r=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(n))return i(parseInt(r[1],10),parseInt(r[2],10),parseInt(r[3],10),parseFloat(r[4]));if(r=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(n))return i(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55);if(r=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(n))return i(parseFloat(r[1])*2.55,parseFloat(r[2])*2.55,parseFloat(r[3])*2.55,parseFloat(r[4]));if(r=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(n))return i(parseInt(r[1],16),parseInt(r[2],16),parseInt(r[3],16));if(r=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(n))return i(parseInt(r[1]+r[1],16),parseInt(r[2]+r[2],16),parseInt(r[3]+r[3],16));var s=e.trim(n).toLowerCase();return s=="transparent"?i(255,255,255,0):(r=t[s]||[0,0,0],i(r[0],r[1],r[2]))};var t={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery),function(e){function n(t,n){var r=n.children("."+t)[0];if(r==null){r=document.createElement("canvas"),r.className=t,e(r).css({direction:"ltr",position:"absolute",left:0,top:0}).appendTo(n);if(!r.getContext){if(!window.G_vmlCanvasManager)throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");r=window.G_vmlCanvasManager.initElement(r)}}this.element=r;var i=this.context=r.getContext("2d"),s=window.devicePixelRatio||1,o=i.webkitBackingStorePixelRatio||i.mozBackingStorePixelRatio||i.msBackingStorePixelRatio||i.oBackingStorePixelRatio||i.backingStorePixelRatio||1;this.pixelRatio=s/o,this.resize(n.width(),n.height()),this.textContainer=null,this.text={},this._textCache={}}function r(t,r,s,o){function E(e,t){t=[w].concat(t);for(var n=0;n<e.length;++n)e[n].apply(this,t)}function S(){var t={Canvas:n};for(var r=0;r<o.length;++r){var i=o[r];i.init(w,t),i.options&&e.extend(!0,a,i.options)}}function x(n){e.extend(!0,a,n),n&&n.colors&&(a.colors=n.colors),a.xaxis.color==null&&(a.xaxis.color=e.color.parse(a.grid.color).scale("a",.22).toString()),a.yaxis.color==null&&(a.yaxis.color=e.color.parse(a.grid.color).scale("a",.22).toString()),a.xaxis.tickColor==null&&(a.xaxis.tickColor=a.grid.tickColor||a.xaxis.color),a.yaxis.tickColor==null&&(a.yaxis.tickColor=a.grid.tickColor||a.yaxis.color),a.grid.borderColor==null&&(a.grid.borderColor=a.grid.color),a.grid.tickColor==null&&(a.grid.tickColor=e.color.parse(a.grid.color).scale("a",.22).toString());var r,i,s,o={style:t.css("font-style"),size:Math.round(.8*(+t.css("font-size").replace("px","")||13)),variant:t.css("font-variant"),weight:t.css("font-weight"),family:t.css("font-family")};o.lineHeight=o.size*1.15,s=a.xaxes.length||1;for(r=0;r<s;++r)i=a.xaxes[r],i&&!i.tickColor&&(i.tickColor=i.color),i=e.extend(!0,{},a.xaxis,i),a.xaxes[r]=i,i.font&&(i.font=e.extend({},o,i.font),i.font.color||(i.font.color=i.color));s=a.yaxes.length||1;for(r=0;r<s;++r)i=a.yaxes[r],i&&!i.tickColor&&(i.tickColor=i.color),i=e.extend(!0,{},a.yaxis,i),a.yaxes[r]=i,i.font&&(i.font=e.extend({},o,i.font),i.font.color||(i.font.color=i.color));a.xaxis.noTicks&&a.xaxis.ticks==null&&(a.xaxis.ticks=a.xaxis.noTicks),a.yaxis.noTicks&&a.yaxis.ticks==null&&(a.yaxis.ticks=a.yaxis.noTicks),a.x2axis&&(a.xaxes[1]=e.extend(!0,{},a.xaxis,a.x2axis),a.xaxes[1].position="top"),a.y2axis&&(a.yaxes[1]=e.extend(!0,{},a.yaxis,a.y2axis),a.yaxes[1].position="right"),a.grid.coloredAreas&&(a.grid.markings=a.grid.coloredAreas),a.grid.coloredAreasColor&&(a.grid.markingsColor=a.grid.coloredAreasColor),a.lines&&e.extend(!0,a.series.lines,a.lines),a.points&&e.extend(!0,a.series.points,a.points),a.bars&&e.extend(!0,a.series.bars,a.bars),a.shadowSize!=null&&(a.series.shadowSize=a.shadowSize),a.highlightColor!=null&&(a.series.highlightColor=a.highlightColor);for(r=0;r<a.xaxes.length;++r)O(d,r+1).options=a.xaxes[r];for(r=0;r<a.yaxes.length;++r)O(v,r+1).options=a.yaxes[r];for(var u in b)a.hooks[u]&&a.hooks[u].length&&(b[u]=b[u].concat(a.hooks[u]));E(b.processOptions,[a])}function T(e){u=N(e),M(),_()}function N(t){var n=[];for(var r=0;r<t.length;++r){var i=e.extend(!0,{},a.series);t[r].data!=null?(i.data=t[r].data,delete t[r].data,e.extend(!0,i,t[r]),t[r].data=i.data):i.data=t[r],n.push(i)}return n}function C(e,t){var n=e[t+"axis"];return typeof n=="object"&&(n=n.n),typeof n!="number"&&(n=1),n}function k(){return e.grep(d.concat(v),function(e){return e})}function L(e){var t={},n,r;for(n=0;n<d.length;++n)r=d[n],r&&r.used&&(t["x"+r.n]=r.c2p(e.left));for(n=0;n<v.length;++n)r=v[n],r&&r.used&&(t["y"+r.n]=r.c2p(e.top));return t.x1!==undefined&&(t.x=t.x1),t.y1!==undefined&&(t.y=t.y1),t}function A(e){var t={},n,r,i;for(n=0;n<d.length;++n){r=d[n];if(r&&r.used){i="x"+r.n,e[i]==null&&r.n==1&&(i="x");if(e[i]!=null){t.left=r.p2c(e[i]);break}}}for(n=0;n<v.length;++n){r=v[n];if(r&&r.used){i="y"+r.n,e[i]==null&&r.n==1&&(i="y");if(e[i]!=null){t.top=r.p2c(e[i]);break}}}return t}function O(t,n){return t[n-1]||(t[n-1]={n:n,direction:t==d?"x":"y",options:e.extend(!0,{},t==d?a.xaxis:a.yaxis)}),t[n-1]}function M(){var t=u.length,n=-1,r;for(r=0;r<u.length;++r){var i=u[r].color;i!=null&&(t--,typeof i=="number"&&i>n&&(n=i))}t<=n&&(t=n+1);var s,o=[],f=a.colors,l=f.length,c=0;for(r=0;r<t;r++)s=e.color.parse(f[r%l]||"#666"),r%l==0&&r&&(c>=0?c<.5?c=-c-.2:c=0:c=-c),o[r]=s.scale("rgb",1+c);var h=0,p;for(r=0;r<u.length;++r){p=u[r],p.color==null?(p.color=o[h].toString(),++h):typeof p.color=="number"&&(p.color=o[p.color].toString());if(p.lines.show==null){var m,g=!0;for(m in p)if(p[m]&&p[m].show){g=!1;break}g&&(p.lines.show=!0)}p.lines.zero==null&&(p.lines.zero=!!p.lines.fill),p.xaxis=O(d,C(p,"x")),p.yaxis=O(v,C(p,"y"))}}function _(){function x(e,t,n){t<e.datamin&&t!=-r&&(e.datamin=t),n>e.datamax&&n!=r&&(e.datamax=n)}var t=Number.POSITIVE_INFINITY,n=Number.NEGATIVE_INFINITY,r=Number.MAX_VALUE,i,s,o,a,f,l,c,h,p,d,v,m,g,y,w,S;e.each(k(),function(e,r){r.datamin=t,r.datamax=n,r.used=!1});for(i=0;i<u.length;++i)l=u[i],l.datapoints={points:[]},E(b.processRawData,[l,l.data,l.datapoints]);for(i=0;i<u.length;++i){l=u[i],w=l.data,S=l.datapoints.format;if(!S){S=[],S.push({x:!0,number:!0,required:!0}),S.push({y:!0,number:!0,required:!0});if(l.bars.show||l.lines.show&&l.lines.fill){var T=!!(l.bars.show&&l.bars.zero||l.lines.show&&l.lines.zero);S.push({y:!0,number:!0,required:!1,defaultValue:0,autoscale:T}),l.bars.horizontal&&(delete S[S.length-1].y,S[S.length-1].x=!0)}l.datapoints.format=S}if(l.datapoints.pointsize!=null)continue;l.datapoints.pointsize=S.length,h=l.datapoints.pointsize,c=l.datapoints.points;var N=l.lines.show&&l.lines.steps;l.xaxis.used=l.yaxis.used=!0;for(s=o=0;s<w.length;++s,o+=h){y=w[s];var C=y==null;if(!C)for(a=0;a<h;++a)m=y[a],g=S[a],g&&(g.number&&m!=null&&(m=+m,isNaN(m)?m=null:m==Infinity?m=r:m==-Infinity&&(m=-r)),m==null&&(g.required&&(C=!0),g.defaultValue!=null&&(m=g.defaultValue))),c[o+a]=m;if(C)for(a=0;a<h;++a)m=c[o+a],m!=null&&(g=S[a],g.autoscale&&(g.x&&x(l.xaxis,m,m),g.y&&x(l.yaxis,m,m))),c[o+a]=null;else if(N&&o>0&&c[o-h]!=null&&c[o-h]!=c[o]&&c[o-h+1]!=c[o+1]){for(a=0;a<h;++a)c[o+h+a]=c[o+a];c[o+1]=c[o-h+1],o+=h}}}for(i=0;i<u.length;++i)l=u[i],E(b.processDatapoints,[l,l.datapoints]);for(i=0;i<u.length;++i){l=u[i],c=l.datapoints.points,h=l.datapoints.pointsize,S=l.datapoints.format;var L=t,A=t,O=n,M=n;for(s=0;s<c.length;s+=h){if(c[s]==null)continue;for(a=0;a<h;++a){m=c[s+a],g=S[a];if(!g||g.autoscale===!1||m==r||m==-r)continue;g.x&&(m<L&&(L=m),m>O&&(O=m)),g.y&&(m<A&&(A=m),m>M&&(M=m))}}if(l.bars.show){var _;switch(l.bars.align){case"left":_=0;break;case"right":_=-l.bars.barWidth;break;case"center":_=-l.bars.barWidth/2;break;default:throw new Error("Invalid bar alignment: "+l.bars.align)}l.bars.horizontal?(A+=_,M+=_+l.bars.barWidth):(L+=_,O+=_+l.bars.barWidth)}x(l.xaxis,L,O),x(l.yaxis,A,M)}e.each(k(),function(e,r){r.datamin==t&&(r.datamin=null),r.datamax==n&&(r.datamax=null)})}function D(){t.css("padding",0).children(":not(.flot-base,.flot-overlay)").remove(),t.css("position")=="static"&&t.css("position","relative"),f=new n("flot-base",t),l=new n("flot-overlay",t),h=f.context,p=l.context,c=e(l.element).unbind();var r=t.data("plot");r&&(r.shutdown(),l.clear()),t.data("plot",w)}function P(){a.grid.hoverable&&(c.mousemove(at),c.bind("mouseleave",ft)),a.grid.clickable&&c.click(lt),E(b.bindEvents,[c])}function H(){ot&&clearTimeout(ot),c.unbind("mousemove",at),c.unbind("mouseleave",ft),c.unbind("click",lt),E(b.shutdown,[c])}function B(e){function t(e){return e}var n,r,i=e.options.transform||t,s=e.options.inverseTransform;e.direction=="x"?(n=e.scale=g/Math.abs(i(e.max)-i(e.min)),r=Math.min(i(e.max),i(e.min))):(n=e.scale=y/Math.abs(i(e.max)-i(e.min)),n=-n,r=Math.max(i(e.max),i(e.min))),i==t?e.p2c=function(e){return(e-r)*n}:e.p2c=function(e){return(i(e)-r)*n},s?e.c2p=function(e){return s(r+e/n)}:e.c2p=function(e){return r+e/n}}function j(e){var t=e.options,n=e.ticks||[],r=t.labelWidth||0,i=t.labelHeight||0,s=r||e.direction=="x"?Math.floor(f.width/(n.length||1)):null;legacyStyles=e.direction+"Axis "+e.direction+e.n+"Axis",layer="flot-"+e.direction+"-axis flot-"+e.direction+e.n+"-axis "+legacyStyles,font=t.font||"flot-tick-label tickLabel";for(var o=0;o<n.length;++o){var u=n[o];if(!u.label)continue;var a=f.getTextInfo(layer,u.label,font,null,s);r=Math.max(r,a.width),i=Math.max(i,a.height)}e.labelWidth=t.labelWidth||r,e.labelHeight=t.labelHeight||i}function F(t){var n=t.labelWidth,r=t.labelHeight,i=t.options.position,s=t.options.tickLength,o=a.grid.axisMargin,u=a.grid.labelMargin,l=t.direction=="x"?d:v,c,h,p=e.grep(l,function(e){return e&&e.options.position==i&&e.reserveSpace});e.inArray(t,p)==p.length-1&&(o=0);if(s==null){var g=e.grep(l,function(e){return e&&e.reserveSpace});h=e.inArray(t,g)==0,h?s="full":s=5}isNaN(+s)||(u+=+s),t.direction=="x"?(r+=u,i=="bottom"?(m.bottom+=r+o,t.box={top:f.height-m.bottom,height:r}):(t.box={top:m.top+o,height:r},m.top+=r+o)):(n+=u,i=="left"?(t.box={left:m.left+o,width:n},m.left+=n+o):(m.right+=n+o,t.box={left:f.width-m.right,width:n})),t.position=i,t.tickLength=s,t.box.padding=u,t.innermost=h}function I(e){e.direction=="x"?(e.box.left=m.left-e.labelWidth/2,e.box.width=f.width-m.left-m.right+e.labelWidth):(e.box.top=m.top-e.labelHeight/2,e.box.height=f.height-m.bottom-m.top+e.labelHeight)}function q(){var t=a.grid.minBorderMargin,n={x:0,y:0},r,i;if(t==null){t=0;for(r=0;r<u.length;++r)t=Math.max(t,2*(u[r].points.radius+u[r].points.lineWidth/2))}n.x=n.y=Math.ceil(t),e.each(k(),function(e,t){var r=t.direction;t.reserveSpace&&(n[r]=Math.ceil(Math.max(n[r],(r=="x"?t.labelWidth:t.labelHeight)/2)))}),m.left=Math.max(n.x,m.left),m.right=Math.max(n.x,m.right),m.top=Math.max(n.y,m.top),m.bottom=Math.max(n.y,m.bottom)}function R(){var t,n=k(),r=a.grid.show;for(var i in m){var s=a.grid.margin||0;m[i]=typeof s=="number"?s:s[i]||0}E(b.processOffset,[m]);for(var i in m)typeof a.grid.borderWidth=="object"?m[i]+=r?a.grid.borderWidth[i]:0:m[i]+=r?a.grid.borderWidth:0;e.each(n,function(e,t){t.show=t.options.show,t.show==null&&(t.show=t.used),t.reserveSpace=t.show||t.options.reserveSpace,U(t)});if(r){var o=e.grep(n,function(e){return e.reserveSpace});e.each(o,function(e,t){z(t),W(t),X(t,t.ticks),j(t)});for(t=o.length-1;t>=0;--t)F(o[t]);q(),e.each(o,function(e,t){I(t)})}g=f.width-m.left-m.right,y=f.height-m.bottom-m.top,e.each(n,function(e,t){B(t)}),r&&G(),it()}function U(e){var t=e.options,n=+(t.min!=null?t.min:e.datamin),r=+(t.max!=null?t.max:e.datamax),i=r-n;if(i==0){var s=r==0?1:.01;t.min==null&&(n-=s);if(t.max==null||t.min!=null)r+=s}else{var o=t.autoscaleMargin;o!=null&&(t.min==null&&(n-=i*o,n<0&&e.datamin!=null&&e.datamin>=0&&(n=0)),t.max==null&&(r+=i*o,r>0&&e.datamax!=null&&e.datamax<=0&&(r=0)))}e.min=n,e.max=r}function z(t){var n=t.options,r;typeof n.ticks=="number"&&n.ticks>0?r=n.ticks:r=.3*Math.sqrt(t.direction=="x"?f.width:f.height);var s=(t.max-t.min)/r,o=-Math.floor(Math.log(s)/Math.LN10),u=n.tickDecimals;u!=null&&o>u&&(o=u);var a=Math.pow(10,-o),l=s/a,c;l<1.5?c=1:l<3?(c=2,l>2.25&&(u==null||o+1<=u)&&(c=2.5,++o)):l<7.5?c=5:c=10,c*=a,n.minTickSize!=null&&c<n.minTickSize&&(c=n.minTickSize),t.delta=s,t.tickDecimals=Math.max(0,u!=null?u:o),t.tickSize=n.tickSize||c;if(n.mode=="time"&&!t.tickGenerator)throw new Error("Time mode requires the flot.time plugin.");t.tickGenerator||(t.tickGenerator=function(e){var t=[],n=i(e.min,e.tickSize),r=0,s=Number.NaN,o;do o=s,s=n+r*e.tickSize,t.push(s),++r;while(s<e.max&&s!=o);return t},t.tickFormatter=function(e,t){var n=t.tickDecimals?Math.pow(10,t.tickDecimals):1,r=""+Math.round(e*n)/n;if(t.tickDecimals!=null){var i=r.indexOf("."),s=i==-1?0:r.length-i-1;if(s<t.tickDecimals)return(s?r:r+".")+(""+n).substr(1,t.tickDecimals-s)}return r}),e.isFunction(n.tickFormatter)&&(t.tickFormatter=function(e,t){return""+n.tickFormatter(e,t)});if(n.alignTicksWithAxis!=null){var h=(t.direction=="x"?d:v)[n.alignTicksWithAxis-1];if(h&&h.used&&h!=t){var p=t.tickGenerator(t);p.length>0&&(n.min==null&&(t.min=Math.min(t.min,p[0])),n.max==null&&p.length>1&&(t.max=Math.max(t.max,p[p.length-1]))),t.tickGenerator=function(e){var t=[],n,r;for(r=0;r<h.ticks.length;++r)n=(h.ticks[r].v-h.min)/(h.max-h.min),n=e.min+n*(e.max-e.min),t.push(n);return t};if(!t.mode&&n.tickDecimals==null){var m=Math.max(0,-Math.floor(Math.log(t.delta)/Math.LN10)+1),g=t.tickGenerator(t);g.length>1&&/\..*0$/.test((g[1]-g[0]).toFixed(m))||(t.tickDecimals=m)}}}}function W(t){var n=t.options.ticks,r=[];n==null||typeof n=="number"&&n>0?r=t.tickGenerator(t):n&&(e.isFunction(n)?r=n(t):r=n);var i,s;t.ticks=[];for(i=0;i<r.length;++i){var o=null,u=r[i];typeof u=="object"?(s=+u[0],u.length>1&&(o=u[1])):s=+u,o==null&&(o=t.tickFormatter(s,t)),isNaN(s)||t.ticks.push({v:s,label:o})}}function X(e,t){e.options.autoscaleMargin&&t.length>0&&(e.options.min==null&&(e.min=Math.min(e.min,t[0].v)),e.options.max==null&&t.length>1&&(e.max=Math.max(e.max,t[t.length-1].v)))}function V(){f.clear(),E(b.drawBackground,[h]);var e=a.grid;e.show&&e.backgroundColor&&K(),e.show&&!e.aboveData&&Q();for(var t=0;t<u.length;++t)E(b.drawSeries,[h,u[t]]),Y(u[t]);E(b.draw,[h]),e.show&&e.aboveData&&Q(),f.render(),ht()}function J(e,t){var n,r,i,s,o=k();for(var u=0;u<o.length;++u){n=o[u];if(n.direction==t){s=t+n.n+"axis",!e[s]&&n.n==1&&(s=t+"axis");if(e[s]){r=e[s].from,i=e[s].to;break}}}e[s]||(n=t=="x"?d[0]:v[0],r=e[t+"1"],i=e[t+"2"]);if(r!=null&&i!=null&&r>i){var a=r;r=i,i=a}return{from:r,to:i,axis:n}}function K(){h.save(),h.translate(m.left,m.top),h.fillStyle=bt(a.grid.backgroundColor,y,0,"rgba(255, 255, 255, 0)"),h.fillRect(0,0,g,y),h.restore()}function Q(){var t,n,r,i;h.save(),h.translate(m.left,m.top);var s=a.grid.markings;if(s){e.isFunction(s)&&(n=w.getAxes(),n.xmin=n.xaxis.min,n.xmax=n.xaxis.max,n.ymin=n.yaxis.min,n.ymax=n.yaxis.max,s=s(n));for(t=0;t<s.length;++t){var o=s[t],u=J(o,"x"),f=J(o,"y");u.from==null&&(u.from=u.axis.min),u.to==null&&(u.to=u.axis.max),f.from==null&&(f.from=f.axis.min),f.to==null&&(f.to=f.axis.max);if(u.to<u.axis.min||u.from>u.axis.max||f.to<f.axis.min||f.from>f.axis.max)continue;u.from=Math.max(u.from,u.axis.min),u.to=Math.min(u.to,u.axis.max),f.from=Math.max(f.from,f.axis.min),f.to=Math.min(f.to,f.axis.max);if(u.from==u.to&&f.from==f.to)continue;u.from=u.axis.p2c(u.from),u.to=u.axis.p2c(u.to),f.from=f.axis.p2c(f.from),f.to=f.axis.p2c(f.to),u.from==u.to||f.from==f.to?(h.beginPath(),h.strokeStyle=o.color||a.grid.markingsColor,h.lineWidth=o.lineWidth||a.grid.markingsLineWidth,h.moveTo(u.from,f.from),h.lineTo(u.to,f.to),h.stroke()):(h.fillStyle=o.color||a.grid.markingsColor,h.fillRect(u.from,f.to,u.to-u.from,f.from-f.to))}}n=k(),r=a.grid.borderWidth;for(var l=0;l<n.length;++l){var c=n[l],p=c.box,d=c.tickLength,v,b,E,S;if(!c.show||c.ticks.length==0)continue;h.lineWidth=1,c.direction=="x"?(v=0,d=="full"?b=c.position=="top"?0:y:b=p.top-m.top+(c.position=="top"?p.height:0)):(b=0,d=="full"?v=c.position=="left"?0:g:v=p.left-m.left+(c.position=="left"?p.width:0)),c.innermost||(h.strokeStyle=c.options.color,h.beginPath(),E=S=0,c.direction=="x"?E=g+1:S=y+1,h.lineWidth==1&&(c.direction=="x"?b=Math.floor(b)+.5:v=Math.floor(v)+.5),h.moveTo(v,b),h.lineTo(v+E,b+S),h.stroke()),h.strokeStyle=c.options.tickColor,h.beginPath();for(t=0;t<c.ticks.length;++t){var x=c.ticks[t].v;E=S=0;if(isNaN(x)||x<c.min||x>c.max||d=="full"&&(typeof r=="object"&&r[c.position]>0||r>0)&&(x==c.min||x==c.max))continue;c.direction=="x"?(v=c.p2c(x),S=d=="full"?-y:d,c.position=="top"&&(S=-S)):(b=c.p2c(x),E=d=="full"?-g:d,c.position=="left"&&(E=-E)),h.lineWidth==1&&(c.direction=="x"?v=Math.floor(v)+.5:b=Math.floor(b)+.5),h.moveTo(v,b),h.lineTo(v+E,b+S)}h.stroke()}r&&(i=a.grid.borderColor,typeof r=="object"||typeof i=="object"?(typeof r!="object"&&(r={top:r,right:r,bottom:r,left:r}),typeof i!="object"&&(i={top:i,right:i,bottom:i,left:i}),r.top>0&&(h.strokeStyle=i.top,h.lineWidth=r.top,h.beginPath(),h.moveTo(0-r.left,0-r.top/2),h.lineTo(g,0-r.top/2),h.stroke()),r.right>0&&(h.strokeStyle=i.right,h.lineWidth=r.right,h.beginPath(),h.moveTo(g+r.right/2,0-r.top),h.lineTo(g+r.right/2,y),h.stroke()),r.bottom>0&&(h.strokeStyle=i.bottom,h.lineWidth=r.bottom,h.beginPath(),h.moveTo(g+r.right,y+r.bottom/2),h.lineTo(0,y+r.bottom/2),h.stroke()),r.left>0&&(h.strokeStyle=i.left,h.lineWidth=r.left,h.beginPath(),h.moveTo(0-r.left/2,y+r.bottom),h.lineTo(0-r.left/2,0),h.stroke())):(h.lineWidth=r,h.strokeStyle=a.grid.borderColor,h.strokeRect(-r/2,-r/2,g+r,y+r))),h.restore()}function G(){e.each(k(),function(e,t){if(!t.show||t.ticks.length==0)return;var n=t.box,r=t.direction+"Axis "+t.direction+t.n+"Axis",i="flot-"+t.direction+"-axis flot-"+t.direction+t.n+"-axis "+r,s=t.options.font||"flot-tick-label tickLabel",o,u,a,l,c;f.removeText(i);for(var h=0;h<t.ticks.length;++h){o=t.ticks[h];if(!o.label||o.v<t.min||o.v>t.max)continue;t.direction=="x"?(l="center",u=m.left+t.p2c(o.v),t.position=="bottom"?a=n.top+n.padding:(a=n.top+n.height-n.padding,c="bottom")):(c="middle",a=m.top+t.p2c(o.v),t.position=="left"?(u=n.left+n.width-n.padding,l="right"):u=n.left+n.padding),f.addText(i,u,a,o.label,s,null,null,l,c)}})}function Y(e){e.lines.show&&Z(e),e.bars.show&&nt(e),e.points.show&&et(e)}function Z(e){function t(e,t,n,r,i){var s=e.points,o=e.pointsize,u=null,a=null;h.beginPath();for(var f=o;f<s.length;f+=o){var l=s[f-o],c=s[f-o+1],p=s[f],d=s[f+1];if(l==null||p==null)continue;if(c<=d&&c<i.min){if(d<i.min)continue;l=(i.min-c)/(d-c)*(p-l)+l,c=i.min}else if(d<=c&&d<i.min){if(c<i.min)continue;p=(i.min-c)/(d-c)*(p-l)+l,d=i.min}if(c>=d&&c>i.max){if(d>i.max)continue;l=(i.max-c)/(d-c)*(p-l)+l,c=i.max}else if(d>=c&&d>i.max){if(c>i.max)continue;p=(i.max-c)/(d-c)*(p-l)+l,d=i.max}if(l<=p&&l<r.min){if(p<r.min)continue;c=(r.min-l)/(p-l)*(d-c)+c,l=r.min}else if(p<=l&&p<r.min){if(l<r.min)continue;d=(r.min-l)/(p-l)*(d-c)+c,p=r.min}if(l>=p&&l>r.max){if(p>r.max)continue;c=(r.max-l)/(p-l)*(d-c)+c,l=r.max}else if(p>=l&&p>r.max){if(l>r.max)continue;d=(r.max-l)/(p-l)*(d-c)+c,p=r.max}(l!=u||c!=a)&&h.moveTo(r.p2c(l)+t,i.p2c(c)+n),u=p,a=d,h.lineTo(r.p2c(p)+t,i.p2c(d)+n)}h.stroke()}function n(e,t,n){var r=e.points,i=e.pointsize,s=Math.min(Math.max(0,n.min),n.max),o=0,u,a=!1,f=1,l=0,c=0;for(;;){if(i>0&&o>r.length+i)break;o+=i;var p=r[o-i],d=r[o-i+f],v=r[o],m=r[o+f];if(a){if(i>0&&p!=null&&v==null){c=o,i=-i,f=2;continue}if(i<0&&o==l+i){h.fill(),a=!1,i=-i,f=1,o=l=c+i;continue}}if(p==null||v==null)continue;if(p<=v&&p<t.min){if(v<t.min)continue;d=(t.min-p)/(v-p)*(m-d)+d,p=t.min}else if(v<=p&&v<t.min){if(p<t.min)continue;m=(t.min-p)/(v-p)*(m-d)+d,v=t.min}if(p>=v&&p>t.max){if(v>t.max)continue;d=(t.max-p)/(v-p)*(m-d)+d,p=t.max}else if(v>=p&&v>t.max){if(p>t.max)continue;m=(t.max-p)/(v-p)*(m-d)+d,v=t.max}a||(h.beginPath(),h.moveTo(t.p2c(p),n.p2c(s)),a=!0);if(d>=n.max&&m>=n.max){h.lineTo(t.p2c(p),n.p2c(n.max)),h.lineTo(t.p2c(v),n.p2c(n.max));continue}if(d<=n.min&&m<=n.min){h.lineTo(t.p2c(p),n.p2c(n.min)),h.lineTo(t.p2c(v),n.p2c(n.min));continue}var g=p,y=v;d<=m&&d<n.min&&m>=n.min?(p=(n.min-d)/(m-d)*(v-p)+p,d=n.min):m<=d&&m<n.min&&d>=n.min&&(v=(n.min-d)/(m-d)*(v-p)+p,m=n.min),d>=m&&d>n.max&&m<=n.max?(p=(n.max-d)/(m-d)*(v-p)+p,d=n.max):m>=d&&m>n.max&&d<=n.max&&(v=(n.max-d)/(m-d)*(v-p)+p,m=n.max),p!=g&&h.lineTo(t.p2c(g),n.p2c(d)),h.lineTo(t.p2c(p),n.p2c(d)),h.lineTo(t.p2c(v),n.p2c(m)),v!=y&&(h.lineTo(t.p2c(v),n.p2c(m)),h.lineTo(t.p2c(y),n.p2c(m)))}}h.save(),h.translate(m.left,m.top),h.lineJoin="round";var r=e.lines.lineWidth,i=e.shadowSize;if(r>0&&i>0){h.lineWidth=i,h.strokeStyle="rgba(0,0,0,0.1)";var s=Math.PI/18;t(e.datapoints,Math.sin(s)*(r/2+i/2),Math.cos(s)*(r/2+i/2),e.xaxis,e.yaxis),h.lineWidth=i/2,t(e.datapoints,Math.sin(s)*(r/2+i/4),Math.cos(s)*(r/2+i/4),e.xaxis,e.yaxis)}h.lineWidth=r,h.strokeStyle=e.color;var o=rt(e.lines,e.color,0,y);o&&(h.fillStyle=o,n(e.datapoints,e.xaxis,e.yaxis)),r>0&&t(e.datapoints,0,0,e.xaxis,e.yaxis),h.restore()}function et(e){function t(e,t,n,r,i,s,o,u){var a=e.points,f=e.pointsize;for(var l=0;l<a.length;l+=f){var c=a[l],p=a[l+1];if(c==null||c<s.min||c>s.max||p<o.min||p>o.max)continue;h.beginPath(),c=s.p2c(c),p=o.p2c(p)+r,u=="circle"?h.arc(c,p,t,0,i?Math.PI:Math.PI*2,!1):u(h,c,p,t,i),h.closePath(),n&&(h.fillStyle=n,h.fill()),h.stroke()}}h.save(),h.translate(m.left,m.top);var n=e.points.lineWidth,r=e.shadowSize,i=e.points.radius,s=e.points.symbol;n==0&&(n=1e-4);if(n>0&&r>0){var o=r/2;h.lineWidth=o,h.strokeStyle="rgba(0,0,0,0.1)",t(e.datapoints,i,null,o+o/2,!0,e.xaxis,e.yaxis,s),h.strokeStyle="rgba(0,0,0,0.2)",t(e.datapoints,i,null,o/2,!0,e.xaxis,e.yaxis,s)}h.lineWidth=n,h.strokeStyle=e.color,t(e.datapoints,i,rt(e.points,e.color),0,!1,e.xaxis,e.yaxis,s),h.restore()}function tt(e,t,n,r,i,s,o,u,a,f,l,c){var h,p,d,v,m,g,y,b,w;l?(b=g=y=!0,m=!1,h=n,p=e,v=t+r,d=t+i,p<h&&(w=p,p=h,h=w,m=!0,g=!1)):(m=g=y=!0,b=!1,h=e+r,p=e+i,d=n,v=t,v<d&&(w=v,v=d,d=w,b=!0,y=!1));if(p<u.min||h>u.max||v<a.min||d>a.max)return;h<u.min&&(h=u.min,m=!1),p>u.max&&(p=u.max,g=!1),d<a.min&&(d=a.min,b=!1),v>a.max&&(v=a.max,y=!1),h=u.p2c(h),d=a.p2c(d),p=u.p2c(p),v=a.p2c(v),o&&(f.beginPath(),f.moveTo(h,d),f.lineTo(h,v),f.lineTo(p,v),f.lineTo(p,d),f.fillStyle=o(d,v),f.fill()),c>0&&(m||g||y||b)&&(f.beginPath(),f.moveTo(h,d+s),m?f.lineTo(h,v+s):f.moveTo(h,v+s),y?f.lineTo(p,v+s):f.moveTo(p,v+s),g?f.lineTo(p,d+s):f.moveTo(p,d+s),b?f.lineTo(h,d+s):f.moveTo(h,d+s),f.stroke())}function nt(e){function t(t,n,r,i,s,o,u){var a=t.points,f=t.pointsize;for(var l=0;l<a.length;l+=f){if(a[l]==null)continue;tt(a[l],a[l+1],a[l+2],n,r,i,s,o,u,h,e.bars.horizontal,e.bars.lineWidth)}}h.save(),h.translate(m.left,m.top),h.lineWidth=e.bars.lineWidth,h.strokeStyle=e.color;var n;switch(e.bars.align){case"left":n=0;break;case"right":n=-e.bars.barWidth;break;case"center":n=-e.bars.barWidth/2;break;default:throw new Error("Invalid bar alignment: "+e.bars.align)}var r=e.bars.fill?function(t,n){return rt(e.bars,e.color,t,n)}:null;t(e.datapoints,n,n+e.bars.barWidth,0,r,e.xaxis,e.yaxis),h.restore()}function rt(t,n,r,i){var s=t.fill;if(!s)return null;if(t.fillColor)return bt(t.fillColor,r,i,n);var o=e.color.parse(n);return o.a=typeof s=="number"?s:.4,o.normalize(),o.toString()}function it(){t.find(".legend").remove();if(!a.legend.show)return;var n=[],r=[],i=!1,s=a.legend.labelFormatter,o,f;for(var l=0;l<u.length;++l)o=u[l],o.label&&(f=s?s(o.label,o):o.label,f&&r.push({label:f,color:o.color}));if(a.legend.sorted)if(e.isFunction(a.legend.sorted))r.sort(a.legend.sorted);else if(a.legend.sorted=="reverse")r.reverse();else{var c=a.legend.sorted!="descending";r.sort(function(e,t){return e.label==t.label?0:e.label<t.label!=c?1:-1})}for(var l=0;l<r.length;++l){var h=r[l];l%a.legend.noColumns==0&&(i&&n.push("</tr>"),n.push("<tr>"),i=!0),n.push('<td class="legendColorBox"><div style="border:1px solid '+a.legend.labelBoxBorderColor+';padding:1px"><div style="width:4px;height:0;border:5px solid '+h.color+';overflow:hidden"></div></div></td>'+'<td class="legendLabel">'+h.label+"</td>")}i&&n.push("</tr>");if(n.length==0)return;var p='<table style="font-size:smaller;color:'+a.grid.color+'">'+n.join("")+"</table>";if(a.legend.container!=null)e(a.legend.container).html(p);else{var d="",v=a.legend.position,g=a.legend.margin;g[0]==null&&(g=[g,g]),v.charAt(0)=="n"?d+="top:"+(g[1]+m.top)+"px;":v.charAt(0)=="s"&&(d+="bottom:"+(g[1]+m.bottom)+"px;"),v.charAt(1)=="e"?d+="right:"+(g[0]+m.right)+"px;":v.charAt(1)=="w"&&(d+="left:"+(g[0]+m.left)+"px;");var y=e('<div class="legend">'+p.replace('style="','style="position:absolute;'+d+";")+"</div>").appendTo(t);if(a.legend.backgroundOpacity!=0){var b=a.legend.backgroundColor;b==null&&(b=a.grid.backgroundColor,b&&typeof b=="string"?b=e.color.parse(b):b=e.color.extract(y,"background-color"),b.a=1,b=b.toString());var w=y.children();e('<div style="position:absolute;width:'+w.width()+"px;height:"+w.height()+"px;"+d+"background-color:"+b+';"> </div>').prependTo(y).css("opacity",a.legend.backgroundOpacity)}}}function ut(e,t,n){var r=a.grid.mouseActiveRadius,i=r*r+1,s=null,o=!1,f,l,c;for(f=u.length-1;f>=0;--f){if(!n(u[f]))continue;var h=u[f],p=h.xaxis,d=h.yaxis,v=h.datapoints.points,m=p.c2p(e),g=d.c2p(t),y=r/p.scale,b=r/d.scale;c=h.datapoints.pointsize,p.options.inverseTransform&&(y=Number.MAX_VALUE),d.options.inverseTransform&&(b=Number.MAX_VALUE);if(h.lines.show||h.points.show)for(l=0;l<v.length;l+=c){var w=v[l],E=v[l+1];if(w==null)continue;if(w-m>y||w-m<-y||E-g>b||E-g<-b)continue;var S=Math.abs(p.p2c(w)-e),x=Math.abs(d.p2c(E)-t),T=S*S+x*x;T<i&&(i=T,s=[f,l/c])}if(h.bars.show&&!s){var N=h.bars.align=="left"?0:-h.bars.barWidth/2,C=N+h.bars.barWidth;for(l=0;l<v.length;l+=c){var w=v[l],E=v[l+1],k=v[l+2];if(w==null)continue;if(u[f].bars.horizontal?m<=Math.max(k,w)&&m>=Math.min(k,w)&&g>=E+N&&g<=E+C:m>=w+N&&m<=w+C&&g>=Math.min(k,E)&&g<=Math.max(k,E))s=[f,l/c]}}}return s?(f=s[0],l=s[1],c=u[f].datapoints.pointsize,{datapoint:u[f].datapoints.points.slice(l*c,(l+1)*c),dataIndex:l,series:u[f],seriesIndex:f}):null}function at(e){a.grid.hoverable&&ct("plothover",e,function(e){return e["hoverable"]!=0})}function ft(e){a.grid.hoverable&&ct("plothover",e,function(e){return!1})}function lt(e){ct("plotclick",e,function(e){return e["clickable"]!=0})}function ct(e,n,r){var i=c.offset(),s=n.pageX-i.left-m.left,o=n.pageY-i.top-m.top,u=L({left:s,top:o});u.pageX=n.pageX,u.pageY=n.pageY;var f=ut(s,o,r);f&&(f.pageX=parseInt(f.series.xaxis.p2c(f.datapoint[0])+i.left+m.left,10),f.pageY=parseInt(f.series.yaxis.p2c(f.datapoint[1])+i.top+m.top,10));if(a.grid.autoHighlight){for(var l=0;l<st.length;++l){var h=st[l];h.auto==e&&(!f||h.series!=f.series||h.point[0]!=f.datapoint[0]||h.point[1]!=f.datapoint[1])&&vt(h.series,h.point)}f&&dt(f.series,f.datapoint,e)}t.trigger(e,[u,f])}function ht(){var e=a.interaction.redrawOverlayInterval;if(e==-1){pt();return}ot||(ot=setTimeout(pt,e))}function pt(){ot=null,p.save(),l.clear(),p.translate(m.left,m.top);var e,t;for(e=0;e<st.length;++e)t=st[e],t.series.bars.show?yt(t.series,t.point):gt(t.series,t.point);p.restore(),E(b.drawOverlay,[p])}function dt(e,t,n){typeof e=="number"&&(e=u[e]);if(typeof t=="number"){var r=e.datapoints.pointsize;t=e.datapoints.points.slice(r*t,r*(t+1))}var i=mt(e,t);i==-1?(st.push({series:e,point:t,auto:n}),ht()):n||(st[i].auto=!1)}function vt(e,t){if(e==null&&t==null){st=[],ht();return}typeof e=="number"&&(e=u[e]);if(typeof t=="number"){var n=e.datapoints.pointsize;t=e.datapoints.points.slice(n*t,n*(t+1))}var r=mt(e,t);r!=-1&&(st.splice(r,1),ht())}function mt(e,t){for(var n=0;n<st.length;++n){var r=st[n];if(r.series==e&&r.point[0]==t[0]&&r.point[1]==t[1])return n}return-1}function gt(t,n){var r=n[0],i=n[1],s=t.xaxis,o=t.yaxis,u=typeof t.highlightColor=="string"?t.highlightColor:e.color.parse(t.color).scale("a",.5).toString();if(r<s.min||r>s.max||i<o.min||i>o.max)return;var a=t.points.radius+t.points.lineWidth/2;p.lineWidth=a,p.strokeStyle=u;var f=1.5*a;r=s.p2c(r),i=o.p2c(i),p.beginPath(),t.points.symbol=="circle"?p.arc(r,i,f,0,2*Math.PI,!1):t.points.symbol(p,r,i,f,!1),p.closePath(),p.stroke()}function yt(t,n){var r=typeof t.highlightColor=="string"?t.highlightColor:e.color.parse(t.color).scale("a",.5).toString(),i=r,s=t.bars.align=="left"?0:-t.bars.barWidth/2;p.lineWidth=t.bars.lineWidth,p.strokeStyle=r,tt(n[0],n[1],n[2]||0,s,s+t.bars.barWidth,0,function(){return i},t.xaxis,t.yaxis,p,t.bars.horizontal,t.bars.lineWidth)}function bt(t,n,r,i){if(typeof t=="string")return t;var s=h.createLinearGradient(0,r,0,n);for(var o=0,u=t.colors.length;o<u;++o){var a=t.colors[o];if(typeof a!="string"){var f=e.color.parse(i);a.brightness!=null&&(f=f.scale("rgb",a.brightness)),a.opacity!=null&&(f.a*=a.opacity),a=f.toString()}s.addColorStop(o/(u-1),a)}return s}var u=[],a={colors:["#edc240","#afd8f8","#cb4b4b","#4da74d","#9440ed"],legend:{show:!0,noColumns:1,labelFormatter:null,labelBoxBorderColor:"#ccc",container:null,position:"ne",margin:5,backgroundColor:null,backgroundOpacity:.85,sorted:null},xaxis:{show:null,position:"bottom",mode:null,font:null,color:null,tickColor:null,transform:null,inverseTransform:null,min:null,max:null,autoscaleMargin:null,ticks:null,tickFormatter:null,labelWidth:null,labelHeight:null,reserveSpace:null,tickLength:null,alignTicksWithAxis:null,tickDecimals:null,tickSize:null,minTickSize:null},yaxis:{autoscaleMargin:.02,position:"left"},xaxes:[],yaxes:[],series:{points:{show:!1,radius:3,lineWidth:2,fill:!0,fillColor:"#ffffff",symbol:"circle"},lines:{lineWidth:2,fill:!1,fillColor:null,steps:!1},bars:{show:!1,lineWidth:2,barWidth:1,fill:!0,fillColor:null,align:"left",horizontal:!1,zero:!0},shadowSize:3,highlightColor:null},grid:{show:!0,aboveData:!1,color:"#545454",backgroundColor:null,borderColor:null,tickColor:null,margin:0,labelMargin:5,axisMargin:8,borderWidth:2,minBorderMargin:null,markings:null,markingsColor:"#f4f4f4",markingsLineWidth:2,clickable:!1,hoverable:!1,autoHighlight:!0,mouseActiveRadius:10},interaction:{redrawOverlayInterval:1e3/60},hooks:{}},f=null,l=null,c=null,h=null,p=null,d=[],v=[],m={left:0,right:0,top:0,bottom
+:0},g=0,y=0,b={processOptions:[],processRawData:[],processDatapoints:[],processOffset:[],drawBackground:[],drawSeries:[],draw:[],bindEvents:[],drawOverlay:[],shutdown:[]},w=this;w.setData=T,w.setupGrid=R,w.draw=V,w.getPlaceholder=function(){return t},w.getCanvas=function(){return f.element},w.getPlotOffset=function(){return m},w.width=function(){return g},w.height=function(){return y},w.offset=function(){var e=c.offset();return e.left+=m.left,e.top+=m.top,e},w.getData=function(){return u},w.getAxes=function(){var t={},n;return e.each(d.concat(v),function(e,n){n&&(t[n.direction+(n.n!=1?n.n:"")+"axis"]=n)}),t},w.getXAxes=function(){return d},w.getYAxes=function(){return v},w.c2p=L,w.p2c=A,w.getOptions=function(){return a},w.highlight=dt,w.unhighlight=vt,w.triggerRedrawOverlay=ht,w.pointOffset=function(e){return{left:parseInt(d[C(e,"x")-1].p2c(+e.x)+m.left,10),top:parseInt(v[C(e,"y")-1].p2c(+e.y)+m.top,10)}},w.shutdown=H,w.resize=function(){var e=t.width(),n=t.height();f.resize(e,n),l.resize(e,n)},w.hooks=b,S(w),x(s),D(),T(r),R(),V(),P();var st=[],ot=null}function i(e,t){return t*Math.floor(e/t)}var t=Object.prototype.hasOwnProperty;n.prototype.resize=function(e,t){if(e<=0||t<=0)throw new Error("Invalid dimensions for plot, width = "+e+", height = "+t);var n=this.element,r=this.context,i=this.pixelRatio;this.width!=e&&(n.width=e*i,n.style.width=e+"px",this.width=e),this.height!=t&&(n.height=t*i,n.style.height=t+"px",this.height=t),r.restore(),r.save(),r.scale(i,i)},n.prototype.clear=function(){this.context.clearRect(0,0,this.width,this.height)},n.prototype.render=function(){var e=this._textCache;for(var n in e)if(t.call(e,n)){var r=this.getTextLayer(n),i=e[n];r.hide();for(var s in i)if(t.call(i,s)){var o=i[s];for(var u in o)if(t.call(o,u)){var a=o[u].positions;for(var f=0,l;l=a[f];f++)l.active?l.rendered||(r.append(l.element),l.rendered=!0):(a.splice(f--,1),l.rendered&&l.element.detach());a.length==0&&delete o[u]}}r.show()}},n.prototype.getTextLayer=function(t){var n=this.text[t];return n==null&&(this.textContainer==null&&(this.textContainer=e("<div class='flot-text'></div>").css({position:"absolute",top:0,left:0,bottom:0,right:0,"font-size":"smaller",color:"#545454"}).insertAfter(this.element)),n=this.text[t]=e("<div></div>").addClass(t).css({position:"absolute",top:0,left:0,bottom:0,right:0}).appendTo(this.textContainer)),n},n.prototype.getTextInfo=function(t,n,r,i,s){var o,u,a,f;n=""+n,typeof r=="object"?o=r.style+" "+r.variant+" "+r.weight+" "+r.size+"px/"+r.lineHeight+"px "+r.family:o=r,u=this._textCache[t],u==null&&(u=this._textCache[t]={}),a=u[o],a==null&&(a=u[o]={}),f=a[n];if(f==null){var l=e("<div></div>").html(n).css({position:"absolute","max-width":s,top:-9999}).appendTo(this.getTextLayer(t));typeof r=="object"?l.css({font:o,color:r.color}):typeof r=="string"&&l.addClass(r),f=a[n]={width:l.outerWidth(!0),height:l.outerHeight(!0),element:l,positions:[]},l.detach()}return f},n.prototype.addText=function(e,t,n,r,i,s,o,u,a){var f=this.getTextInfo(e,r,i,s,o),l=f.positions;u=="center"?t-=f.width/2:u=="right"&&(t-=f.width),a=="middle"?n-=f.height/2:a=="bottom"&&(n-=f.height);for(var c=0,h;h=l[c];c++)if(h.x==t&&h.y==n){h.active=!0;return}h={active:!0,rendered:!1,element:l.length?f.element.clone():f.element,x:t,y:n},l.push(h),h.element.css({top:Math.round(n),left:Math.round(t),"text-align":u})},n.prototype.removeText=function(e,n,r,i,s,o){if(i==null){var u=this._textCache[e];if(u!=null)for(var a in u)if(t.call(u,a)){var f=u[a];for(var l in f)if(t.call(f,l)){var c=f[l].positions;for(var h=0,p;p=c[h];h++)p.active=!1}}}else{var c=this.getTextInfo(e,i,s,o).positions;for(var h=0,p;p=c[h];h++)p.x==n&&p.y==r&&(p.active=!1)}},e.plot=function(t,n,i){var s=new r(e(t),n,i,e.plot.plugins);return s},e.plot.version="0.8.1",e.plot.plugins=[],e.fn.plot=function(t,n){return this.each(function(){e.plot(this,t,n)})}}(jQuery); \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.js b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.js
new file mode 100644
index 0000000000..7ec12753c1
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.js
@@ -0,0 +1,431 @@
+/* Pretty handling of time axes.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+Set axis.mode to "time" to enable. See the section "Time series data" in
+API.txt for details.
+
+*/
+
+(function($) {
+
+ var options = {
+ xaxis: {
+ timezone: null, // "browser" for local to the client or timezone for timezone-js
+ timeformat: null, // format string to use
+ twelveHourClock: false, // 12 or 24 time in time mode
+ monthNames: null // list of names of months
+ }
+ };
+
+ // round to nearby lower multiple of base
+
+ function floorInBase(n, base) {
+ return base * Math.floor(n / base);
+ }
+
+ // Returns a string with the date d formatted according to fmt.
+ // A subset of the Open Group's strftime format is supported.
+
+ function formatDate(d, fmt, monthNames, dayNames) {
+
+ if (typeof d.strftime == "function") {
+ return d.strftime(fmt);
+ }
+
+ var leftPad = function(n, pad) {
+ n = "" + n;
+ pad = "" + (pad == null ? "0" : pad);
+ return n.length == 1 ? pad + n : n;
+ };
+
+ var r = [];
+ var escape = false;
+ var hours = d.getHours();
+ var isAM = hours < 12;
+
+ if (monthNames == null) {
+ monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
+ }
+
+ if (dayNames == null) {
+ dayNames = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
+ }
+
+ var hours12;
+
+ if (hours > 12) {
+ hours12 = hours - 12;
+ } else if (hours == 0) {
+ hours12 = 12;
+ } else {
+ hours12 = hours;
+ }
+
+ for (var i = 0; i < fmt.length; ++i) {
+
+ var c = fmt.charAt(i);
+
+ if (escape) {
+ switch (c) {
+ case 'a': c = "" + dayNames[d.getDay()]; break;
+ case 'b': c = "" + monthNames[d.getMonth()]; break;
+ case 'd': c = leftPad(d.getDate()); break;
+ case 'e': c = leftPad(d.getDate(), " "); break;
+ case 'h': // For back-compat with 0.7; remove in 1.0
+ case 'H': c = leftPad(hours); break;
+ case 'I': c = leftPad(hours12); break;
+ case 'l': c = leftPad(hours12, " "); break;
+ case 'm': c = leftPad(d.getMonth() + 1); break;
+ case 'M': c = leftPad(d.getMinutes()); break;
+ // quarters not in Open Group's strftime specification
+ case 'q':
+ c = "" + (Math.floor(d.getMonth() / 3) + 1); break;
+ case 'S': c = leftPad(d.getSeconds()); break;
+ case 'y': c = leftPad(d.getFullYear() % 100); break;
+ case 'Y': c = "" + d.getFullYear(); break;
+ case 'p': c = (isAM) ? ("" + "am") : ("" + "pm"); break;
+ case 'P': c = (isAM) ? ("" + "AM") : ("" + "PM"); break;
+ case 'w': c = "" + d.getDay(); break;
+ }
+ r.push(c);
+ escape = false;
+ } else {
+ if (c == "%") {
+ escape = true;
+ } else {
+ r.push(c);
+ }
+ }
+ }
+
+ return r.join("");
+ }
+
+ // To have a consistent view of time-based data independent of which time
+ // zone the client happens to be in we need a date-like object independent
+ // of time zones. This is done through a wrapper that only calls the UTC
+ // versions of the accessor methods.
+
+ function makeUtcWrapper(d) {
+
+ function addProxyMethod(sourceObj, sourceMethod, targetObj, targetMethod) {
+ sourceObj[sourceMethod] = function() {
+ return targetObj[targetMethod].apply(targetObj, arguments);
+ };
+ };
+
+ var utc = {
+ date: d
+ };
+
+ // support strftime, if found
+
+ if (d.strftime != undefined) {
+ addProxyMethod(utc, "strftime", d, "strftime");
+ }
+
+ addProxyMethod(utc, "getTime", d, "getTime");
+ addProxyMethod(utc, "setTime", d, "setTime");
+
+ var props = ["Date", "Day", "FullYear", "Hours", "Milliseconds", "Minutes", "Month", "Seconds"];
+
+ for (var p = 0; p < props.length; p++) {
+ addProxyMethod(utc, "get" + props[p], d, "getUTC" + props[p]);
+ addProxyMethod(utc, "set" + props[p], d, "setUTC" + props[p]);
+ }
+
+ return utc;
+ };
+
+ // select time zone strategy. This returns a date-like object tied to the
+ // desired timezone
+
+ function dateGenerator(ts, opts) {
+ if (opts.timezone == "browser") {
+ return new Date(ts);
+ } else if (!opts.timezone || opts.timezone == "utc") {
+ return makeUtcWrapper(new Date(ts));
+ } else if (typeof timezoneJS != "undefined" && typeof timezoneJS.Date != "undefined") {
+ var d = new timezoneJS.Date();
+ // timezone-js is fickle, so be sure to set the time zone before
+ // setting the time.
+ d.setTimezone(opts.timezone);
+ d.setTime(ts);
+ return d;
+ } else {
+ return makeUtcWrapper(new Date(ts));
+ }
+ }
+
+ // map of app. size of time units in milliseconds
+
+ var timeUnitSize = {
+ "second": 1000,
+ "minute": 60 * 1000,
+ "hour": 60 * 60 * 1000,
+ "day": 24 * 60 * 60 * 1000,
+ "month": 30 * 24 * 60 * 60 * 1000,
+ "quarter": 3 * 30 * 24 * 60 * 60 * 1000,
+ "year": 365.2425 * 24 * 60 * 60 * 1000
+ };
+
+ // the allowed tick sizes, after 1 year we use
+ // an integer algorithm
+
+ var baseSpec = [
+ [1, "second"], [2, "second"], [5, "second"], [10, "second"],
+ [30, "second"],
+ [1, "minute"], [2, "minute"], [5, "minute"], [10, "minute"],
+ [30, "minute"],
+ [1, "hour"], [2, "hour"], [4, "hour"],
+ [8, "hour"], [12, "hour"],
+ [1, "day"], [2, "day"], [3, "day"],
+ [0.25, "month"], [0.5, "month"], [1, "month"],
+ [2, "month"]
+ ];
+
+ // we don't know which variant(s) we'll need yet, but generating both is
+ // cheap
+
+ var specMonths = baseSpec.concat([[3, "month"], [6, "month"],
+ [1, "year"]]);
+ var specQuarters = baseSpec.concat([[1, "quarter"], [2, "quarter"],
+ [1, "year"]]);
+
+ function init(plot) {
+ plot.hooks.processOptions.push(function (plot, options) {
+ $.each(plot.getAxes(), function(axisName, axis) {
+
+ var opts = axis.options;
+
+ if (opts.mode == "time") {
+ axis.tickGenerator = function(axis) {
+
+ var ticks = [];
+ var d = dateGenerator(axis.min, opts);
+ var minSize = 0;
+
+ // make quarter use a possibility if quarters are
+ // mentioned in either of these options
+
+ var spec = (opts.tickSize && opts.tickSize[1] ===
+ "quarter") ||
+ (opts.minTickSize && opts.minTickSize[1] ===
+ "quarter") ? specQuarters : specMonths;
+
+ if (opts.minTickSize != null) {
+ if (typeof opts.tickSize == "number") {
+ minSize = opts.tickSize;
+ } else {
+ minSize = opts.minTickSize[0] * timeUnitSize[opts.minTickSize[1]];
+ }
+ }
+
+ for (var i = 0; i < spec.length - 1; ++i) {
+ if (axis.delta < (spec[i][0] * timeUnitSize[spec[i][1]]
+ + spec[i + 1][0] * timeUnitSize[spec[i + 1][1]]) / 2
+ && spec[i][0] * timeUnitSize[spec[i][1]] >= minSize) {
+ break;
+ }
+ }
+
+ var size = spec[i][0];
+ var unit = spec[i][1];
+
+ // special-case the possibility of several years
+
+ if (unit == "year") {
+
+ // if given a minTickSize in years, just use it,
+ // ensuring that it's an integer
+
+ if (opts.minTickSize != null && opts.minTickSize[1] == "year") {
+ size = Math.floor(opts.minTickSize[0]);
+ } else {
+
+ var magn = Math.pow(10, Math.floor(Math.log(axis.delta / timeUnitSize.year) / Math.LN10));
+ var norm = (axis.delta / timeUnitSize.year) / magn;
+
+ if (norm < 1.5) {
+ size = 1;
+ } else if (norm < 3) {
+ size = 2;
+ } else if (norm < 7.5) {
+ size = 5;
+ } else {
+ size = 10;
+ }
+
+ size *= magn;
+ }
+
+ // minimum size for years is 1
+
+ if (size < 1) {
+ size = 1;
+ }
+ }
+
+ axis.tickSize = opts.tickSize || [size, unit];
+ var tickSize = axis.tickSize[0];
+ unit = axis.tickSize[1];
+
+ var step = tickSize * timeUnitSize[unit];
+
+ if (unit == "second") {
+ d.setSeconds(floorInBase(d.getSeconds(), tickSize));
+ } else if (unit == "minute") {
+ d.setMinutes(floorInBase(d.getMinutes(), tickSize));
+ } else if (unit == "hour") {
+ d.setHours(floorInBase(d.getHours(), tickSize));
+ } else if (unit == "month") {
+ d.setMonth(floorInBase(d.getMonth(), tickSize));
+ } else if (unit == "quarter") {
+ d.setMonth(3 * floorInBase(d.getMonth() / 3,
+ tickSize));
+ } else if (unit == "year") {
+ d.setFullYear(floorInBase(d.getFullYear(), tickSize));
+ }
+
+ // reset smaller components
+
+ d.setMilliseconds(0);
+
+ if (step >= timeUnitSize.minute) {
+ d.setSeconds(0);
+ }
+ if (step >= timeUnitSize.hour) {
+ d.setMinutes(0);
+ }
+ if (step >= timeUnitSize.day) {
+ d.setHours(0);
+ }
+ if (step >= timeUnitSize.day * 4) {
+ d.setDate(1);
+ }
+ if (step >= timeUnitSize.month * 2) {
+ d.setMonth(floorInBase(d.getMonth(), 3));
+ }
+ if (step >= timeUnitSize.quarter * 2) {
+ d.setMonth(floorInBase(d.getMonth(), 6));
+ }
+ if (step >= timeUnitSize.year) {
+ d.setMonth(0);
+ }
+
+ var carry = 0;
+ var v = Number.NaN;
+ var prev;
+
+ do {
+
+ prev = v;
+ v = d.getTime();
+ ticks.push(v);
+
+ if (unit == "month" || unit == "quarter") {
+ if (tickSize < 1) {
+
+ // a bit complicated - we'll divide the
+ // month/quarter up but we need to take
+ // care of fractions so we don't end up in
+ // the middle of a day
+
+ d.setDate(1);
+ var start = d.getTime();
+ d.setMonth(d.getMonth() +
+ (unit == "quarter" ? 3 : 1));
+ var end = d.getTime();
+ d.setTime(v + carry * timeUnitSize.hour + (end - start) * tickSize);
+ carry = d.getHours();
+ d.setHours(0);
+ } else {
+ d.setMonth(d.getMonth() +
+ tickSize * (unit == "quarter" ? 3 : 1));
+ }
+ } else if (unit == "year") {
+ d.setFullYear(d.getFullYear() + tickSize);
+ } else {
+ d.setTime(v + step);
+ }
+ } while (v < axis.max && v != prev);
+
+ return ticks;
+ };
+
+ axis.tickFormatter = function (v, axis) {
+
+ var d = dateGenerator(v, axis.options);
+
+ // first check global format
+
+ if (opts.timeformat != null) {
+ return formatDate(d, opts.timeformat, opts.monthNames, opts.dayNames);
+ }
+
+ // possibly use quarters if quarters are mentioned in
+ // any of these places
+
+ var useQuarters = (axis.options.tickSize &&
+ axis.options.tickSize[1] == "quarter") ||
+ (axis.options.minTickSize &&
+ axis.options.minTickSize[1] == "quarter");
+
+ var t = axis.tickSize[0] * timeUnitSize[axis.tickSize[1]];
+ var span = axis.max - axis.min;
+ var suffix = (opts.twelveHourClock) ? " %p" : "";
+ var hourCode = (opts.twelveHourClock) ? "%I" : "%H";
+ var fmt;
+
+ if (t < timeUnitSize.minute) {
+ fmt = hourCode + ":%M:%S" + suffix;
+ } else if (t < timeUnitSize.day) {
+ if (span < 2 * timeUnitSize.day) {
+ fmt = hourCode + ":%M" + suffix;
+ } else {
+ fmt = "%b %d " + hourCode + ":%M" + suffix;
+ }
+ } else if (t < timeUnitSize.month) {
+ fmt = "%b %d";
+ } else if ((useQuarters && t < timeUnitSize.quarter) ||
+ (!useQuarters && t < timeUnitSize.year)) {
+ if (span < timeUnitSize.year) {
+ fmt = "%b";
+ } else {
+ fmt = "%b %Y";
+ }
+ } else if (useQuarters && t < timeUnitSize.year) {
+ if (span < timeUnitSize.year) {
+ fmt = "Q%q";
+ } else {
+ fmt = "Q%q %Y";
+ }
+ } else {
+ fmt = "%Y";
+ }
+
+ var rt = formatDate(d, fmt, opts.monthNames, opts.dayNames);
+
+ return rt;
+ };
+ }
+ });
+ });
+ }
+
+ $.plot.plugins.push({
+ init: init,
+ options: options,
+ name: 'time',
+ version: '1.0'
+ });
+
+ // Time-axis support used to be in Flot core, which exposed the
+ // formatDate function on the plot object. Various plugins depend
+ // on the function, so we need to re-expose it here.
+
+ $.plot.formatDate = formatDate;
+
+})(jQuery);
diff --git a/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.min.js b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.min.js
new file mode 100644
index 0000000000..21d8477238
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/jquery.flot-0.8.1.time.min.js
@@ -0,0 +1,9 @@
+/* Pretty handling of time axes.
+
+Copyright (c) 2007-2013 IOLA and Ole Laursen.
+Licensed under the MIT license.
+
+Set axis.mode to "time" to enable. See the section "Time series data" in
+API.txt for details.
+
+*/(function(e){function n(e,t){return t*Math.floor(e/t)}function r(e,t,n,r){if(typeof e.strftime=="function")return e.strftime(t);var i=function(e,t){return e=""+e,t=""+(t==null?"0":t),e.length==1?t+e:e},s=[],o=!1,u=e.getHours(),a=u<12;n==null&&(n=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]),r==null&&(r=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"]);var f;u>12?f=u-12:u==0?f=12:f=u;for(var l=0;l<t.length;++l){var c=t.charAt(l);if(o){switch(c){case"a":c=""+r[e.getDay()];break;case"b":c=""+n[e.getMonth()];break;case"d":c=i(e.getDate());break;case"e":c=i(e.getDate()," ");break;case"h":case"H":c=i(u);break;case"I":c=i(f);break;case"l":c=i(f," ");break;case"m":c=i(e.getMonth()+1);break;case"M":c=i(e.getMinutes());break;case"q":c=""+(Math.floor(e.getMonth()/3)+1);break;case"S":c=i(e.getSeconds());break;case"y":c=i(e.getFullYear()%100);break;case"Y":c=""+e.getFullYear();break;case"p":c=a?"am":"pm";break;case"P":c=a?"AM":"PM";break;case"w":c=""+e.getDay()}s.push(c),o=!1}else c=="%"?o=!0:s.push(c)}return s.join("")}function i(e){function t(e,t,n,r){e[t]=function(){return n[r].apply(n,arguments)}}var n={date:e};e.strftime!=undefined&&t(n,"strftime",e,"strftime"),t(n,"getTime",e,"getTime"),t(n,"setTime",e,"setTime");var r=["Date","Day","FullYear","Hours","Milliseconds","Minutes","Month","Seconds"];for(var i=0;i<r.length;i++)t(n,"get"+r[i],e,"getUTC"+r[i]),t(n,"set"+r[i],e,"setUTC"+r[i]);return n}function s(e,t){if(t.timezone=="browser")return new Date(e);if(!t.timezone||t.timezone=="utc")return i(new Date(e));if(typeof timezoneJS!="undefined"&&typeof timezoneJS.Date!="undefined"){var n=new timezoneJS.Date;return n.setTimezone(t.timezone),n.setTime(e),n}return i(new Date(e))}function l(t){t.hooks.processOptions.push(function(t,i){e.each(t.getAxes(),function(e,t){var i=t.options;i.mode=="time"&&(t.tickGenerator=function(e){var t=[],r=s(e.min,i),u=0,l=i.tickSize&&i.tickSize[1]==="quarter"||i.minTickSize&&i.minTickSize[1]==="quarter"?f:a;i.minTickSize!=null&&(typeof i.tickSize=="number"?u=i.tickSize:u=i.minTickSize[0]*o[i.minTickSize[1]]);for(var c=0;c<l.length-1;++c)if(e.delta<(l[c][0]*o[l[c][1]]+l[c+1][0]*o[l[c+1][1]])/2&&l[c][0]*o[l[c][1]]>=u)break;var h=l[c][0],p=l[c][1];if(p=="year"){if(i.minTickSize!=null&&i.minTickSize[1]=="year")h=Math.floor(i.minTickSize[0]);else{var d=Math.pow(10,Math.floor(Math.log(e.delta/o.year)/Math.LN10)),v=e.delta/o.year/d;v<1.5?h=1:v<3?h=2:v<7.5?h=5:h=10,h*=d}h<1&&(h=1)}e.tickSize=i.tickSize||[h,p];var m=e.tickSize[0];p=e.tickSize[1];var g=m*o[p];p=="second"?r.setSeconds(n(r.getSeconds(),m)):p=="minute"?r.setMinutes(n(r.getMinutes(),m)):p=="hour"?r.setHours(n(r.getHours(),m)):p=="month"?r.setMonth(n(r.getMonth(),m)):p=="quarter"?r.setMonth(3*n(r.getMonth()/3,m)):p=="year"&&r.setFullYear(n(r.getFullYear(),m)),r.setMilliseconds(0),g>=o.minute&&r.setSeconds(0),g>=o.hour&&r.setMinutes(0),g>=o.day&&r.setHours(0),g>=o.day*4&&r.setDate(1),g>=o.month*2&&r.setMonth(n(r.getMonth(),3)),g>=o.quarter*2&&r.setMonth(n(r.getMonth(),6)),g>=o.year&&r.setMonth(0);var y=0,b=Number.NaN,w;do{w=b,b=r.getTime(),t.push(b);if(p=="month"||p=="quarter")if(m<1){r.setDate(1);var E=r.getTime();r.setMonth(r.getMonth()+(p=="quarter"?3:1));var S=r.getTime();r.setTime(b+y*o.hour+(S-E)*m),y=r.getHours(),r.setHours(0)}else r.setMonth(r.getMonth()+m*(p=="quarter"?3:1));else p=="year"?r.setFullYear(r.getFullYear()+m):r.setTime(b+g)}while(b<e.max&&b!=w);return t},t.tickFormatter=function(e,t){var n=s(e,t.options);if(i.timeformat!=null)return r(n,i.timeformat,i.monthNames,i.dayNames);var u=t.options.tickSize&&t.options.tickSize[1]=="quarter"||t.options.minTickSize&&t.options.minTickSize[1]=="quarter",a=t.tickSize[0]*o[t.tickSize[1]],f=t.max-t.min,l=i.twelveHourClock?" %p":"",c=i.twelveHourClock?"%I":"%H",h;a<o.minute?h=c+":%M:%S"+l:a<o.day?f<2*o.day?h=c+":%M"+l:h="%b %d "+c+":%M"+l:a<o.month?h="%b %d":u&&a<o.quarter||!u&&a<o.year?f<o.year?h="%b":h="%b %Y":u&&a<o.year?f<o.year?h="Q%q":h="Q%q %Y":h="%Y";var p=r(n,h,i.monthNames,i.dayNames);return p})})})}var t={xaxis:{timezone:null,timeformat:null,twelveHourClock:!1,monthNames:null}},o={second:1e3,minute:6e4,hour:36e5,day:864e5,month:2592e6,quarter:7776e6,year:525949.2*60*1e3},u=[[1,"second"],[2,"second"],[5,"second"],[10,"second"],[30,"second"],[1,"minute"],[2,"minute"],[5,"minute"],[10,"minute"],[30,"minute"],[1,"hour"],[2,"hour"],[4,"hour"],[8,"hour"],[12,"hour"],[1,"day"],[2,"day"],[3,"day"],[.25,"month"],[.5,"month"],[1,"month"],[2,"month"]],a=u.concat([[3,"month"],[6,"month"],[1,"year"]]),f=u.concat([[1,"quarter"],[2,"quarter"],[1,"year"]]);e.plot.plugins.push({init:l,options:t,name:"time",version:"1.0"}),e.plot.formatDate=r})(jQuery); \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/json2-2016.10.28.js b/deps/rabbitmq_management/priv/www/js/json2-2016.10.28.js
new file mode 100644
index 0000000000..c69dd69ff1
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/json2-2016.10.28.js
@@ -0,0 +1,506 @@
+// json2.js
+// 2016-10-28
+// Public Domain.
+// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+// See https://www.JSON.org/js.html
+// This code should be minified before deployment.
+// See https://javascript.crockford.com/jsmin.html
+
+// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
+// NOT CONTROL.
+
+// This file creates a global JSON object containing two methods: stringify
+// and parse. This file provides the ES5 JSON capability to ES3 systems.
+// If a project might run on IE8 or earlier, then this file should be included.
+// This file does nothing on ES5 systems.
+
+// JSON.stringify(value, replacer, space)
+// value any JavaScript value, usually an object or array.
+// replacer an optional parameter that determines how object
+// values are stringified for objects. It can be a
+// function or an array of strings.
+// space an optional parameter that specifies the indentation
+// of nested structures. If it is omitted, the text will
+// be packed without extra whitespace. If it is a number,
+// it will specify the number of spaces to indent at each
+// level. If it is a string (such as "\t" or "&nbsp;"),
+// it contains the characters used to indent at each level.
+// This method produces a JSON text from a JavaScript value.
+// When an object value is found, if the object contains a toJSON
+// method, its toJSON method will be called and the result will be
+// stringified. A toJSON method does not serialize: it returns the
+// value represented by the name/value pair that should be serialized,
+// or undefined if nothing should be serialized. The toJSON method
+// will be passed the key associated with the value, and this will be
+// bound to the value.
+
+// For example, this would serialize Dates as ISO strings.
+
+// Date.prototype.toJSON = function (key) {
+// function f(n) {
+// // Format integers to have at least two digits.
+// return (n < 10)
+// ? "0" + n
+// : n;
+// }
+// return this.getUTCFullYear() + "-" +
+// f(this.getUTCMonth() + 1) + "-" +
+// f(this.getUTCDate()) + "T" +
+// f(this.getUTCHours()) + ":" +
+// f(this.getUTCMinutes()) + ":" +
+// f(this.getUTCSeconds()) + "Z";
+// };
+
+// You can provide an optional replacer method. It will be passed the
+// key and value of each member, with this bound to the containing
+// object. The value that is returned from your method will be
+// serialized. If your method returns undefined, then the member will
+// be excluded from the serialization.
+
+// If the replacer parameter is an array of strings, then it will be
+// used to select the members to be serialized. It filters the results
+// such that only members with keys listed in the replacer array are
+// stringified.
+
+// Values that do not have JSON representations, such as undefined or
+// functions, will not be serialized. Such values in objects will be
+// dropped; in arrays they will be replaced with null. You can use
+// a replacer function to replace those with JSON values.
+
+// JSON.stringify(undefined) returns undefined.
+
+// The optional space parameter produces a stringification of the
+// value that is filled with line breaks and indentation to make it
+// easier to read.
+
+// If the space parameter is a non-empty string, then that string will
+// be used for indentation. If the space parameter is a number, then
+// the indentation will be that many spaces.
+
+// Example:
+
+// text = JSON.stringify(["e", {pluribus: "unum"}]);
+// // text is '["e",{"pluribus":"unum"}]'
+
+// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t");
+// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
+
+// text = JSON.stringify([new Date()], function (key, value) {
+// return this[key] instanceof Date
+// ? "Date(" + this[key] + ")"
+// : value;
+// });
+// // text is '["Date(---current time---)"]'
+
+// JSON.parse(text, reviver)
+// This method parses a JSON text to produce an object or array.
+// It can throw a SyntaxError exception.
+
+// The optional reviver parameter is a function that can filter and
+// transform the results. It receives each of the keys and values,
+// and its return value is used instead of the original value.
+// If it returns what it received, then the structure is not modified.
+// If it returns undefined then the member is deleted.
+
+// Example:
+
+// // Parse the text. Values that look like ISO date strings will
+// // be converted to Date objects.
+
+// myData = JSON.parse(text, function (key, value) {
+// var a;
+// if (typeof value === "string") {
+// a =
+// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
+// if (a) {
+// return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
+// +a[5], +a[6]));
+// }
+// }
+// return value;
+// });
+
+// myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
+// var d;
+// if (typeof value === "string" &&
+// value.slice(0, 5) === "Date(" &&
+// value.slice(-1) === ")") {
+// d = new Date(value.slice(5, -1));
+// if (d) {
+// return d;
+// }
+// }
+// return value;
+// });
+
+// This is a reference implementation. You are free to copy, modify, or
+// redistribute.
+
+/*jslint
+ eval, for, this
+*/
+
+/*property
+ JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
+ getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
+ lastIndex, length, parse, prototype, push, replace, slice, stringify,
+ test, toJSON, toString, valueOf
+*/
+
+
+// Create a JSON object only if one does not already exist. We create the
+// methods in a closure to avoid creating global variables.
+
+if (typeof JSON !== "object") {
+ JSON = {};
+}
+
+(function () {
+ "use strict";
+
+ var rx_one = /^[\],:{}\s]*$/;
+ var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g;
+ var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g;
+ var rx_four = /(?:^|:|,)(?:\s*\[)+/g;
+ var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
+ var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
+
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10
+ ? "0" + n
+ : n;
+ }
+
+ function this_value() {
+ return this.valueOf();
+ }
+
+ if (typeof Date.prototype.toJSON !== "function") {
+
+ Date.prototype.toJSON = function () {
+
+ return isFinite(this.valueOf())
+ ? this.getUTCFullYear() + "-" +
+ f(this.getUTCMonth() + 1) + "-" +
+ f(this.getUTCDate()) + "T" +
+ f(this.getUTCHours()) + ":" +
+ f(this.getUTCMinutes()) + ":" +
+ f(this.getUTCSeconds()) + "Z"
+ : null;
+ };
+
+ Boolean.prototype.toJSON = this_value;
+ Number.prototype.toJSON = this_value;
+ String.prototype.toJSON = this_value;
+ }
+
+ var gap;
+ var indent;
+ var meta;
+ var rep;
+
+
+ function quote(string) {
+
+// If the string contains no control characters, no quote characters, and no
+// backslash characters, then we can safely slap some quotes around it.
+// Otherwise we must also replace the offending characters with safe escape
+// sequences.
+
+ rx_escapable.lastIndex = 0;
+ return rx_escapable.test(string)
+ ? "\"" + string.replace(rx_escapable, function (a) {
+ var c = meta[a];
+ return typeof c === "string"
+ ? c
+ : "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
+ }) + "\""
+ : "\"" + string + "\"";
+ }
+
+
+ function str(key, holder) {
+
+// Produce a string from holder[key].
+
+ var i; // The loop counter.
+ var k; // The member key.
+ var v; // The member value.
+ var length;
+ var mind = gap;
+ var partial;
+ var value = holder[key];
+
+// If the value has a toJSON method, call it to obtain a replacement value.
+
+ if (value && typeof value === "object" &&
+ typeof value.toJSON === "function") {
+ value = value.toJSON(key);
+ }
+
+// If we were called with a replacer function, then call the replacer to
+// obtain a replacement value.
+
+ if (typeof rep === "function") {
+ value = rep.call(holder, key, value);
+ }
+
+// What happens next depends on the value's type.
+
+ switch (typeof value) {
+ case "string":
+ return quote(value);
+
+ case "number":
+
+// JSON numbers must be finite. Encode non-finite numbers as null.
+
+ return isFinite(value)
+ ? String(value)
+ : "null";
+
+ case "boolean":
+ case "null":
+
+// If the value is a boolean or null, convert it to a string. Note:
+// typeof null does not produce "null". The case is included here in
+// the remote chance that this gets fixed someday.
+
+ return String(value);
+
+// If the type is "object", we might be dealing with an object or an array or
+// null.
+
+ case "object":
+
+// Due to a specification blunder in ECMAScript, typeof null is "object",
+// so watch out for that case.
+
+ if (!value) {
+ return "null";
+ }
+
+// Make an array to hold the partial results of stringifying this object value.
+
+ gap += indent;
+ partial = [];
+
+// Is the value an array?
+
+ if (Object.prototype.toString.apply(value) === "[object Array]") {
+
+// The value is an array. Stringify every element. Use null as a placeholder
+// for non-JSON values.
+
+ length = value.length;
+ for (i = 0; i < length; i += 1) {
+ partial[i] = str(i, value) || "null";
+ }
+
+// Join all of the elements together, separated with commas, and wrap them in
+// brackets.
+
+ v = partial.length === 0
+ ? "[]"
+ : gap
+ ? "[\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "]"
+ : "[" + partial.join(",") + "]";
+ gap = mind;
+ return v;
+ }
+
+// If the replacer is an array, use it to select the members to be stringified.
+
+ if (rep && typeof rep === "object") {
+ length = rep.length;
+ for (i = 0; i < length; i += 1) {
+ if (typeof rep[i] === "string") {
+ k = rep[i];
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (
+ gap
+ ? ": "
+ : ":"
+ ) + v);
+ }
+ }
+ }
+ } else {
+
+// Otherwise, iterate through all of the keys in the object.
+
+ for (k in value) {
+ if (Object.prototype.hasOwnProperty.call(value, k)) {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (
+ gap
+ ? ": "
+ : ":"
+ ) + v);
+ }
+ }
+ }
+ }
+
+// Join all of the member texts together, separated with commas,
+// and wrap them in braces.
+
+ v = partial.length === 0
+ ? "{}"
+ : gap
+ ? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}"
+ : "{" + partial.join(",") + "}";
+ gap = mind;
+ return v;
+ }
+ }
+
+// If the JSON object does not yet have a stringify method, give it one.
+
+ if (typeof JSON.stringify !== "function") {
+ meta = { // table of character substitutions
+ "\b": "\\b",
+ "\t": "\\t",
+ "\n": "\\n",
+ "\f": "\\f",
+ "\r": "\\r",
+ "\"": "\\\"",
+ "\\": "\\\\"
+ };
+ JSON.stringify = function (value, replacer, space) {
+
+// The stringify method takes a value and an optional replacer, and an optional
+// space parameter, and returns a JSON text. The replacer can be a function
+// that can replace values, or an array of strings that will select the keys.
+// A default replacer method can be provided. Use of the space parameter can
+// produce text that is more easily readable.
+
+ var i;
+ gap = "";
+ indent = "";
+
+// If the space parameter is a number, make an indent string containing that
+// many spaces.
+
+ if (typeof space === "number") {
+ for (i = 0; i < space; i += 1) {
+ indent += " ";
+ }
+
+// If the space parameter is a string, it will be used as the indent string.
+
+ } else if (typeof space === "string") {
+ indent = space;
+ }
+
+// If there is a replacer, it must be a function or an array.
+// Otherwise, throw an error.
+
+ rep = replacer;
+ if (replacer && typeof replacer !== "function" &&
+ (typeof replacer !== "object" ||
+ typeof replacer.length !== "number")) {
+ throw new Error("JSON.stringify");
+ }
+
+// Make a fake root object containing our value under the key of "".
+// Return the result of stringifying the value.
+
+ return str("", {"": value});
+ };
+ }
+
+
+// If the JSON object does not yet have a parse method, give it one.
+
+ if (typeof JSON.parse !== "function") {
+ JSON.parse = function (text, reviver) {
+
+// The parse method takes a text and an optional reviver function, and returns
+// a JavaScript value if the text is a valid JSON text.
+
+ var j;
+
+ function walk(holder, key) {
+
+// The walk method is used to recursively walk the resulting structure so
+// that modifications can be made.
+
+ var k;
+ var v;
+ var value = holder[key];
+ if (value && typeof value === "object") {
+ for (k in value) {
+ if (Object.prototype.hasOwnProperty.call(value, k)) {
+ v = walk(value, k);
+ if (v !== undefined) {
+ value[k] = v;
+ } else {
+ delete value[k];
+ }
+ }
+ }
+ }
+ return reviver.call(holder, key, value);
+ }
+
+
+// Parsing happens in four stages. In the first stage, we replace certain
+// Unicode characters with escape sequences. JavaScript handles many characters
+// incorrectly, either silently deleting them, or treating them as line endings.
+
+ text = String(text);
+ rx_dangerous.lastIndex = 0;
+ if (rx_dangerous.test(text)) {
+ text = text.replace(rx_dangerous, function (a) {
+ return "\\u" +
+ ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
+ });
+ }
+
+// In the second stage, we run the text against regular expressions that look
+// for non-JSON patterns. We are especially concerned with "()" and "new"
+// because they can cause invocation, and "=" because it can cause mutation.
+// But just to be safe, we want to reject all unexpected forms.
+
+// We split the second stage into 4 regexp operations in order to work around
+// crippling inefficiencies in IE's and Safari's regexp engines. First we
+// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we
+// replace all simple value tokens with "]" characters. Third, we delete all
+// open brackets that follow a colon or comma or that begin the text. Finally,
+// we look to see that the remaining characters are only whitespace or "]" or
+// "," or ":" or "{" or "}". If that is so, then the text is safe for eval.
+
+ if (
+ rx_one.test(
+ text
+ .replace(rx_two, "@")
+ .replace(rx_three, "]")
+ .replace(rx_four, "")
+ )
+ ) {
+
+// In the third stage we use the eval function to compile the text into a
+// JavaScript structure. The "{" operator is subject to a syntactic ambiguity
+// in JavaScript: it can begin a block or an object literal. We wrap the text
+// in parens to eliminate the ambiguity.
+
+ j = eval("(" + text + ")");
+
+// In the optional fourth stage, we recursively walk the new structure, passing
+// each name/value pair to a reviver function for possible transformation.
+
+ return (typeof reviver === "function")
+ ? walk({"": j}, "")
+ : j;
+ }
+
+// If the text is not JSON parseable, then a SyntaxError is thrown.
+
+ throw new SyntaxError("JSON.parse");
+ };
+ }
+}());
diff --git a/deps/rabbitmq_management/priv/www/js/main.js b/deps/rabbitmq_management/priv/www/js/main.js
new file mode 100644
index 0000000000..46da9698d9
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/main.js
@@ -0,0 +1,1717 @@
+$(document).ready(function() {
+ if (enable_uaa) {
+ get(uaa_location + "/info", "application/json", function(req) {
+ if (req.status !== 200) {
+ replace_content('outer', format('login_uaa', {}));
+ replace_content('login-status', '<p class="warning">' + uaa_location + " does not appear to be a running UAA instance or may not have a trusted SSL certificate" + '</p> <button id="loginWindow" onclick="uaa_login_window()">Single Sign On</button>');
+ } else {
+ replace_content('outer', format('login_uaa', {}));
+ }
+ });
+ } else {
+ replace_content('outer', format('login', {}));
+ start_app_login();
+ }
+});
+
+function dispatcher_add(fun) {
+ dispatcher_modules.push(fun);
+ if (dispatcher_modules.length == extension_count) {
+ start_app();
+ }
+}
+
+function dispatcher() {
+ for (var i in dispatcher_modules) {
+ dispatcher_modules[i](this);
+ }
+}
+
+function set_auth_pref(userinfo) {
+ // clear a local storage value used by earlier versions
+ clear_local_pref('auth');
+
+ var b64 = b64_encode_utf8(userinfo);
+ var date = new Date();
+ var login_session_timeout = get_login_session_timeout();
+
+ if (login_session_timeout) {
+ date.setMinutes(date.getMinutes() + login_session_timeout);
+ } else {
+ // 8 hours from now
+ date.setHours(date.getHours() + 8);
+ }
+ store_cookie_value_with_expiration('auth', encodeURIComponent(b64), date);
+}
+
+function getParameterByName(name) {
+ var match = RegExp('[#&]' + name + '=([^&]*)').exec(window.location.hash);
+ return match && decodeURIComponent(match[1].replace(/\+/g, ' '));
+}
+
+function getAccessToken() {
+ return getParameterByName('access_token');
+}
+
+function start_app_login() {
+ app = new Sammy.Application(function () {
+ this.get('#/', function() {});
+ this.put('#/login', function() {
+ username = this.params['username'];
+ password = this.params['password'];
+ set_auth_pref(username + ':' + password);
+ check_login();
+ });
+ });
+ if (enable_uaa) {
+ var token = getAccessToken();
+ if (token != null) {
+ set_auth_pref(uaa_client_id + ':' + token);
+ store_pref('uaa_token', token);
+ check_login();
+ } else if(has_auth_cookie_value()) {
+ check_login();
+ };
+ } else {
+ app.run();
+ if (get_cookie_value('auth') != null) {
+ check_login();
+ }
+ }
+}
+
+
+function uaa_logout_window() {
+ uaa_invalid = true;
+ uaa_login_window();
+}
+
+function uaa_login_window() {
+ var redirect;
+ if (window.location.hash != "") {
+ redirect = window.location.href.split(window.location.hash)[0];
+ } else {
+ redirect = window.location.href
+ };
+ var loginRedirectUrl;
+ if (uaa_invalid) {
+ loginRedirectUrl = Singular.properties.uaaLocation + '/logout.do?client_id=' + Singular.properties.clientId + '&redirect=' + redirect;
+ } else {
+ loginRedirectUrl = Singular.properties.uaaLocation + '/oauth/authorize?response_type=token&client_id=' + Singular.properties.clientId + '&redirect_uri=' + redirect;
+ };
+ window.open(loginRedirectUrl, "LOGIN_WINDOW");
+}
+
+function check_login() {
+ user = JSON.parse(sync_get('/whoami'));
+ if (user == false) {
+ // clear a local storage value used by earlier versions
+ clear_pref('auth');
+ clear_pref('uaa_token');
+ clear_cookie_value('auth');
+ if (enable_uaa) {
+ uaa_invalid = true;
+ replace_content('login-status', '<button id="loginWindow" onclick="uaa_login_window()">Log out</button>');
+ } else {
+ replace_content('login-status', '<p>Login failed</p>');
+ }
+ }
+ else {
+ hide_popup_warn();
+ replace_content('outer', format('layout', {}));
+ var user_login_session_timeout = parseInt(user.login_session_timeout);
+ // Update auth login_session_timeout if changed
+ if (has_auth_cookie_value() && !isNaN(user_login_session_timeout) &&
+ user_login_session_timeout !== get_login_session_timeout()) {
+
+ update_login_session_timeout(user_login_session_timeout);
+ }
+ setup_global_vars();
+ setup_constant_events();
+ update_vhosts();
+ update_interval();
+ setup_extensions();
+ }
+}
+
+function get_login_session_timeout() {
+ parseInt(get_cookie_value('login_session_timeout'));
+}
+
+function update_login_session_timeout(login_session_timeout) {
+ var auth_info = get_cookie_value('auth');
+ var date = new Date();
+ // `login_session_timeout` minutes from now
+ date.setMinutes(date.getMinutes() + login_session_timeout);
+ store_cookie_value('login_session_timeout', login_session_timeout);
+ store_cookie_value_with_expiration('auth', auth_info, date);
+}
+
+function start_app() {
+ if (app !== undefined) {
+ app.unload();
+ }
+ // Oh boy. Sammy uses various different methods to determine if
+ // the URL hash has changed. Unsurprisingly this is a native event
+ // in modern browsers, and falls back to an icky polling function
+ // in MSIE. But it looks like there's a bug. The polling function
+ // should get installed when the app is started. But it's guarded
+ // behind if (Sammy.HashLocationProxy._interval != null). And of
+ // course that's not specific to the application; it's pretty
+ // global. So we need to manually clear that in order for links to
+ // work in MSIE.
+ // Filed as https://github.com/quirkey/sammy/issues/171
+ //
+ // Note for when we upgrade: HashLocationProxy has become
+ // DefaultLocationProxy in later versions, but otherwise the issue
+ // remains.
+
+ // updated to the version 0.7.6 this _interval = null is fixed
+ // just leave the history here.
+ //Sammy.HashLocationProxy._interval = null;
+
+ app = new Sammy.Application(dispatcher);
+ app.run();
+
+ var url = this.location.toString();
+ var hash = this.location.hash;
+ var pathname = this.location.pathname;
+ if (url.indexOf('#') == -1) {
+ this.location = url + '#/';
+ } else if (hash.indexOf('#token_type') != - 1 && pathname == '/') {
+ // This is equivalent to previous `if` clause when uaa authorisation is used.
+ // Tokens are passed in the url hash, so the url always contains a #.
+ // We need to check the current path is `/` and token is present,
+ // so we can redirect to `/#/`
+ this.location = url.replace(/#token_type.+/gi, "#/");
+ }
+}
+
+function setup_constant_events() {
+ $('#update-every').on('change', function() {
+ var interval = $(this).val();
+ store_pref('interval', interval);
+ if (interval == '')
+ interval = null;
+ else
+ interval = parseInt(interval);
+ set_timer_interval(interval);
+ });
+ $('#show-vhost').on('change', function() {
+ current_vhost = $(this).val();
+ store_pref('vhost', current_vhost);
+ update();
+ });
+ if (!vhosts_interesting) {
+ $('#vhost-form').hide();
+ }
+}
+
+function update_vhosts() {
+ var vhosts = JSON.parse(sync_get('/vhosts'));
+ vhosts_interesting = vhosts.length > 1;
+ if (vhosts_interesting)
+ $('#vhost-form').show();
+ else
+ $('#vhost-form').hide();
+ var select = $('#show-vhost').get(0);
+ select.options.length = vhosts.length + 1;
+ var index = 0;
+ for (var i = 0; i < vhosts.length; i++) {
+ var vhost = vhosts[i].name;
+ select.options[i + 1] = new Option(vhost, vhost);
+ if (vhost == current_vhost) index = i + 1;
+ }
+ select.selectedIndex = index;
+ current_vhost = select.options[index].value;
+ store_pref('vhost', current_vhost);
+}
+
+function setup_extensions() {
+ var extensions = JSON.parse(sync_get('/extensions'));
+ extension_count = 0;
+ for (var i in extensions) {
+ var extension = extensions[i];
+ if ($.isPlainObject(extension) && extension.hasOwnProperty("javascript")) {
+ dynamic_load(extension.javascript);
+ extension_count++;
+ }
+ }
+}
+
+function dynamic_load(filename) {
+ var element = document.createElement('script');
+ element.setAttribute('type', 'text/javascript');
+ element.setAttribute('src', 'js/' + filename);
+ document.getElementsByTagName("head")[0].appendChild(element);
+}
+
+function update_interval() {
+ var intervalStr = get_pref('interval');
+ var interval;
+
+ if (intervalStr == null) interval = 5000;
+ else if (intervalStr == '') interval = null;
+ else interval = parseInt(intervalStr);
+
+ if (isNaN(interval)) interval = null; // Prevent DoS if cookie malformed
+
+ set_timer_interval(interval);
+
+ var select = $('#update-every').get(0);
+ var opts = select.options;
+ for (var i = 0; i < opts.length; i++) {
+ if (opts[i].value == intervalStr) {
+ select.selectedIndex = i;
+ break;
+ }
+ }
+}
+
+function go_to(url) {
+ this.location = url;
+}
+
+function set_timer_interval(interval) {
+ timer_interval = interval;
+ reset_timer();
+}
+
+function reset_timer() {
+ clearInterval(timer);
+ if (timer_interval != null) {
+ timer = setInterval(partial_update, timer_interval);
+ }
+}
+
+function update_manual(div, query) {
+ var path;
+ var template;
+ if (query == 'memory' || query == 'binary') {
+ path = current_reqs['node']['path'] + '?' + query + '=true';
+ template = query;
+ }
+
+ var data = JSON.parse(sync_get(path));
+
+ replace_content(div, format(template, data));
+ postprocess_partial();
+}
+
+function render(reqs, template, highlight) {
+ var old_template = current_template;
+ current_template = template;
+ current_reqs = reqs;
+ for (var i in outstanding_reqs) {
+ outstanding_reqs[i].abort();
+ }
+ outstanding_reqs = [];
+ current_highlight = highlight;
+ if (old_template !== current_template) {
+ window.scrollTo(0, 0);
+ }
+ update();
+}
+
+function update() {
+ replace_content('debug', '');
+ clearInterval(timer);
+ with_update(function(html) {
+ update_navigation();
+ replace_content('main', html);
+ postprocess();
+ postprocess_partial();
+ render_charts();
+ maybe_scroll();
+ reset_timer();
+ });
+}
+
+function partial_update() {
+ if (!$(".pagination_class").is(":focus")) {
+ if ($('.updatable').length > 0) {
+ if (update_counter >= 200) {
+ update_counter = 0;
+ full_refresh();
+ return;
+ }
+ with_update(function(html) {
+ update_counter++;
+ replace_content('scratch', html);
+ var befores = $('#main .updatable');
+ var afters = $('#scratch .updatable');
+ if (befores.length != afters.length) {
+ console.log("before/after mismatch! Doing a full reload...");
+ full_refresh();
+ }
+ for (var i = 0; i < befores.length; i++) {
+ $(befores[i]).empty().append($(afters[i]).contents());
+ }
+ replace_content('scratch', '');
+ postprocess_partial();
+ render_charts();
+ });
+ }
+ }
+}
+
+function update_navigation() {
+ var l1 = '';
+ var l2 = '';
+ var descend = null;
+
+ for (var k in NAVIGATION) {
+ var val = NAVIGATION[k];
+ var path = val;
+ while (!leaf(path)) {
+ path = first_showable_child(path);
+ }
+ var selected = false;
+ if (contains_current_highlight(val)) {
+ selected = true;
+ if (!leaf(val)) {
+ descend = nav(val);
+ }
+ }
+ if (show(path)) {
+ l1 += '<li><a href="' + nav(path) + '"' +
+ (selected ? ' class="selected"' : '') + '>' + k + '</a></li>';
+ }
+ }
+
+ if (descend) {
+ l2 = obj_to_ul(descend);
+ $('#main').addClass('with-rhs');
+ }
+ else {
+ $('#main').removeClass('with-rhs');
+ }
+
+ replace_content('tabs', l1);
+ replace_content('rhs', l2);
+}
+
+function nav(pair) {
+ return pair[0];
+}
+
+function show(pair) {
+ return jQuery.inArray(pair[1], user_tags) != -1;
+}
+
+function leaf(pair) {
+ return typeof(nav(pair)) == 'string';
+}
+
+function first_showable_child(pair) {
+ var items = pair[0];
+ var ks = keys(items);
+ for (var i = 0; i < ks.length; i++) {
+ var child = items[ks[i]];
+ if (show(child)) return child;
+ }
+ return items[ks[0]]; // We'll end up not showing it anyway
+}
+
+function contains_current_highlight(val) {
+ if (leaf(val)) {
+ return current_highlight == nav(val);
+ }
+ else {
+ var b = false;
+ for (var k in val) {
+ b |= contains_current_highlight(val[k]);
+ }
+ return b;
+ }
+}
+
+function obj_to_ul(val) {
+ var res = '<ul>';
+ for (var k in val) {
+ var obj = val[k];
+ if (show(obj)) {
+ res += '<li>';
+ if (leaf(obj)) {
+ res += '<a href="' + nav(obj) + '"' +
+ (current_highlight == nav(obj) ? ' class="selected"' : '') +
+ '>' + k + '</a>';
+ }
+ else {
+ res += obj_to_ul(nav(obj));
+ }
+ res += '</li>';
+ }
+ }
+ return res + '</ul>';
+}
+
+function full_refresh() {
+ store_pref('position', x_position() + ',' + y_position());
+ location.reload();
+}
+
+function maybe_scroll() {
+ var pos = get_pref('position');
+ if (pos) {
+ clear_pref('position');
+ var xy = pos.split(",");
+ window.scrollTo(parseInt(xy[0]), parseInt(xy[1]));
+ }
+}
+
+function x_position() {
+ return window.pageXOffset ?
+ window.pageXOffset :
+ document.documentElement.scrollLeft ?
+ document.documentElement.scrollLeft :
+ document.body.scrollLeft;
+}
+
+function y_position() {
+ return window.pageYOffset ?
+ window.pageYOffset :
+ document.documentElement.scrollTop ?
+ document.documentElement.scrollTop :
+ document.body.scrollTop;
+}
+
+function with_update(fun) {
+ if(outstanding_reqs.length > 0){
+ return false;
+ }
+ with_reqs(apply_state(current_reqs), [], function(json) {
+ var html = format(current_template, json);
+ fun(html);
+ update_status('ok');
+ });
+ return true;
+}
+
+function apply_state(reqs) {
+ var reqs2 = {};
+ for (k in reqs) {
+ var req = reqs[k];
+ var options = {};
+ if (typeof(req) == "object") {
+ options = req.options;
+ req = req.path;
+ }
+ var req2;
+ if (options['vhost'] != undefined && current_vhost != '') {
+ var indexPage = req.indexOf("?page=");
+ if (indexPage >- 1) {
+ pageUrl = req.substr(indexPage);
+ req2 = req.substr(0,indexPage) + '/' + esc(current_vhost) + pageUrl;
+ } else
+
+ req2 = req + '/' + esc(current_vhost);
+ }
+ else {
+ req2 = req;
+ }
+ var qs = [];
+ if (options['sort'] != undefined && current_sort != null) {
+ qs.push('sort=' + current_sort);
+ qs.push('sort_reverse=' + current_sort_reverse);
+ }
+ if (options['ranges'] != undefined) {
+ for (i in options['ranges']) {
+ var type = options['ranges'][i];
+ var range = get_pref('chart-range').split('|');
+ var prefix;
+ if (type.substring(0, 8) == 'lengths-') {
+ prefix = 'lengths';
+ }
+ else if (type.substring(0, 10) == 'msg-rates-') {
+ prefix = 'msg_rates';
+ }
+ else if (type.substring(0, 11) == 'data-rates-') {
+ prefix = 'data_rates';
+ }
+ else if (type == 'node-stats') {
+ prefix = 'node_stats';
+ }
+ qs.push(prefix + '_age=' + parseInt(range[0]));
+ qs.push(prefix + '_incr=' + parseInt(range[1]));
+ }
+ }
+ /* Unknown options are used as query parameters as is. */
+ Object.keys(options).forEach(function (key) {
+ /* Skip known keys we already handled and undefined parameters. */
+ if (key == 'vhost' || key == 'sort' || key == 'ranges')
+ return;
+ if (!key || options[key] == undefined)
+ return;
+
+ qs.push(esc(key) + '=' + esc(options[key]));
+ });
+ qs = qs.join('&');
+ if (qs != '')
+ if (req2.indexOf("?page=") >- 1)
+ qs = '&' + qs;
+ else
+ qs = '?' + qs;
+
+ reqs2[k] = req2 + qs;
+ }
+ return reqs2;
+}
+
+function show_popup(type, text, _mode) {
+ var cssClass = '.form-popup-' + type;
+ function hide() {
+ $(cssClass).fadeOut(100, function() {
+ $(this).remove();
+ });
+ }
+ hide();
+ $('#outer').after(format('popup', {'type': type, 'text': text}));
+ $(cssClass).fadeIn(100);
+ $(cssClass + ' span').on('click', function () {
+ $('.popup-owner').removeClass('popup-owner');
+ hide();
+ });
+}
+
+function hide_popup_warn() {
+ var cssClass = '.form-popup-warn';
+ $('.popup-owner').removeClass('popup-owner');
+ $(cssClass).fadeOut(100, function() {
+ $(this).remove();
+ });
+}
+
+function submit_import(form) {
+ if (form.file.value) {
+ var confirm_upload = confirm('Are you sure you want to import a definitions file? Some entities (vhosts, users, queues, etc) may be overwritten!');
+ if (confirm_upload === true) {
+ var file = form.file.files[0]; // FUTURE: limit upload file size (?)
+ var vhost_upload = $("select[name='vhost-upload'] option:selected");
+ var vhost_selected = vhost_upload.index() > 0;
+
+ var vhost_name = null;
+ if (vhost_selected) {
+ vhost_name = vhost_upload.val();
+ }
+
+ var vhost_part = '';
+ if (vhost_name) {
+ vhost_part = '/' + esc(vhost_name);
+ }
+
+ if (enable_uaa) {
+ var form_action = "/definitions" + vhost_part + '?token=' + get_pref('uaa_token');
+ } else {
+ var form_action = "/definitions" + vhost_part + '?auth=' + get_cookie_value('auth');
+ };
+ var fd = new FormData();
+ fd.append('file', file);
+ with_req('POST', form_action, fd, function(resp) {
+ show_popup('info', 'Your definitions were imported successfully.');
+ });
+ }
+ }
+ return false;
+};
+
+function postprocess() {
+ $('form.confirm-queue').on('submit', function() {
+ return confirm("Are you sure? The queue is going to be deleted. " +
+ "Messages cannot be recovered after deletion.");
+ });
+
+ $('form.confirm-purge-queue').on('submit', function() {
+ return confirm("Are you sure? Messages cannot be recovered after purging.");
+ });
+
+ $('form.confirm').on('submit', function() {
+ return confirm("Are you sure? This object cannot be recovered " +
+ "after deletion.");
+ });
+
+ $('label').map(function() {
+ if ($(this).attr('for') == '') {
+ var id = 'auto-label-' + Math.floor(Math.random()*1000000000);
+ var input = $(this).parents('tr').first().find('input, select');
+ if (input.attr('id') == '') {
+ $(this).attr('for', id);
+ input.attr('id', id);
+ }
+ }
+ });
+
+ $('#download-definitions').on('click', function() {
+ var idx = $("select[name='vhost-download'] option:selected").index();
+ var vhost = ((idx <=0 ) ? "" : "/" + esc($("select[name='vhost-download'] option:selected").val()));
+ if (enable_uaa) {
+ var path = 'api/definitions' + vhost + '?download=' +
+ esc($('#download-filename').val()) +
+ '&token=' + get_pref('uaa_token');
+ } else {
+ var path = 'api/definitions' + vhost + '?download=' +
+ esc($('#download-filename').val()) +
+ '&auth=' + get_cookie_value('auth');
+ };
+ window.location = path;
+ setTimeout('app.run()');
+ return false;
+ });
+
+ $('.update-manual').on('click', function() {
+ update_manual($(this).attr('for'), $(this).attr('query'));
+ });
+
+ $(document).on('keyup', '.multifield input', function() {
+ update_multifields();
+ });
+
+ $(document).on('change', '.multifield select', function() {
+ update_multifields();
+ });
+
+ $('.controls-appearance').on('change', function() {
+ var params = $(this).get(0).options;
+ var selected = $(this).val();
+
+ for (i = 0; i < params.length; i++) {
+ var param = params[i].value;
+ if (param == selected) {
+ $('#' + param + '-div').slideDown(100);
+ } else {
+ $('#' + param + '-div').slideUp(100);
+ }
+ }
+ });
+
+ $(document).on('click', '.help', function() {
+ show_popup('help', HELP[$(this).attr('id')]);
+ });
+
+ $(document).on('click', '.popup-options-link', function() {
+ $('.popup-owner').removeClass('popup-owner');
+ $(this).addClass('popup-owner');
+ var template = $(this).attr('type') + '-options';
+ show_popup('options', format(template, {span: $(this)}), 'fade');
+ });
+
+ $(document).on('click', '.rate-visibility-option', function() {
+ var k = $(this).attr('data-pref');
+ var show = get_pref(k) !== 'true';
+ store_pref(k, '' + show);
+ partial_update();
+ });
+
+ $(document).on('focus', 'input, select', function() {
+ update_counter = 0; // If there's interaction, reset the counter.
+ });
+
+ $('.tag-link').on('click', function() {
+ $('#tags').val($(this).attr('tag'));
+ });
+
+ $('.argument-link').on('click', function() {
+ var field = $(this).attr('field');
+ var row = $('#' + field).find('.mf').last();
+ var key = row.find('input').first();
+ var value = row.find('input').last();
+ var type = row.find('select').last();
+ key.val($(this).attr('key'));
+ value.val($(this).attr('value'));
+ type.val($(this).attr('type'));
+ update_multifields();
+ });
+
+ $(document).on('click', 'form.auto-submit select, form.auto-submit input', function(){
+ $(this).parents('form').submit();
+ });
+
+ $('#filter').on('keyup', debounce(update_filter, 500));
+
+ $('#filter-regex-mode').on('change', update_filter_regex_mode);
+
+ $('#truncate').on('keyup', debounce(update_truncate, 500));
+
+ if (! user_administrator) {
+ $('.administrator-only').remove();
+ }
+
+ update_multifields();
+}
+
+function url_pagination_template(template, defaultPage, defaultPageSize){
+ var page_number_request = fmt_page_number_request(template, defaultPage);
+ var page_size = fmt_page_size_request(template, defaultPageSize);
+ var name_request = fmt_filter_name_request(template, "");
+ var use_regex = fmt_regex_request(template, "") == "checked";
+ if (use_regex) {
+ name_request = esc(name_request);
+ }
+ return '/' + template +
+ '?page=' + page_number_request +
+ '&page_size=' + page_size +
+ '&name=' + name_request +
+ '&use_regex=' + use_regex;
+}
+
+function stored_page_info(template, page_start){
+ var pageSize = fmt_strip_tags($('#' + template+'-pagesize').val());
+ var filterName = fmt_strip_tags($('#' + template+'-name').val());
+
+ store_pref(template + '_current_page_number', page_start);
+ if (filterName != null && filterName != undefined) {
+ store_pref(template + '_current_filter_name', filterName);
+ }
+ var regex_on = $("#" + template + "-filter-regex-mode").is(':checked');
+
+ if (regex_on != null && regex_on != undefined) {
+ store_pref(template + '_current_regex', regex_on ? "checked" : " " );
+ }
+
+ if (pageSize != null && pageSize != undefined) {
+ store_pref(template + '_current_page_size', pageSize);
+ }
+}
+
+function update_pages(template, page_start){
+ stored_page_info(template, page_start);
+ switch (template) {
+ case 'queues' : renderQueues(); break;
+ case 'exchanges' : renderExchanges(); break;
+ case 'connections' : renderConnections(); break;
+ case 'channels' : renderChannels(); break;
+ }
+}
+
+function renderQueues() {
+ ensure_queues_chart_range();
+ render({'queues': {
+ path: url_pagination_template('queues', 1, 100),
+ options: {
+ sort: true,
+ vhost: true,
+ pagination: true
+ }
+ }, 'vhosts': '/vhosts'}, 'queues', '#/queues');
+}
+
+function renderExchanges() {
+ render({'exchanges': {path: url_pagination_template('exchanges', 1, 100),
+ options: {sort:true, vhost:true, pagination:true}},
+ 'vhosts': '/vhosts'}, 'exchanges', '#/exchanges');
+}
+
+function renderConnections() {
+ render({'connections': {path: url_pagination_template('connections', 1, 100),
+ options: {sort:true}}},
+ 'connections', '#/connections');
+}
+
+function renderChannels() {
+ render({'channels': {path: url_pagination_template('channels', 1, 100),
+ options: {sort:true}}},
+ 'channels', '#/channels');
+}
+
+function update_pages_from_ui(sender) {
+ var val = $(sender).val();
+ var raw = !!$(sender).attr('data-page-start') ? $(sender).attr('data-page-start') : val;
+ var s = fmt_escape_html(fmt_strip_tags(raw));
+ update_pages(current_template, s);
+}
+
+function postprocess_partial() {
+ $('.pagination_class_input').on('keypress', function(e) {
+ if (e.keyCode == 13) {
+ update_pages_from_ui(this);
+ }
+ });
+
+ $('.pagination_class_checkbox').on('click', function(e) {
+ update_pages_from_ui(this);
+ });
+
+ $('.pagination_class_select').on('change', function(e) {
+ update_pages_from_ui(this);
+ });
+
+ setup_visibility();
+
+ $('#main').off('click', 'div.section h2, div.section-hidden h2');
+ $('#main').on('click', 'div.section h2, div.section-hidden h2', function() {
+ toggle_visibility($(this));
+ });
+
+ $('.sort').on('click', function() {
+ var sort = $(this).attr('sort');
+ if (current_sort == sort) {
+ current_sort_reverse = ! current_sort_reverse;
+ }
+ else {
+ current_sort = sort;
+ current_sort_reverse = false;
+ }
+ update();
+ });
+
+ // TODO remove this hack when we get rid of "updatable"
+ if ($('#filter-warning-show').length > 0) {
+ $('#filter-truncate').addClass('filter-warning');
+ }
+ else {
+ $('#filter-truncate').removeClass('filter-warning');
+ }
+}
+
+function update_multifields() {
+ $('div.multifield').each(function(index) {
+ update_multifield($(this), true);
+ });
+}
+
+function update_multifield(multifield, dict) {
+ var largest_id = 0;
+ var empty_found = false;
+ var name = multifield.attr('id');
+ var type_inputs = $('#' + name + ' *[name$="_mftype"]');
+ type_inputs.each(function(index) {
+ var re = new RegExp(name + '_([0-9]*)_mftype');
+ var match = $(this).attr('name').match(re);
+ if (!match) return;
+ var id = parseInt(match[1]);
+ largest_id = Math.max(id, largest_id);
+ var prefix = name + '_' + id;
+ var type = $(this).val();
+ var input = $('#' + prefix + '_mfvalue');
+ if (type == 'list') {
+ if (input.length == 1) {
+ input.replaceWith('<div class="multifield-sub" id="' + prefix +
+ '"></div>');
+ }
+ update_multifield($('#' + prefix), false);
+ }
+ else {
+ if (input.length == 1) {
+ var key = dict ? $('#' + prefix + '_mfkey').val() : '';
+ var value = input.val();
+ if (key == '' && value == '') {
+ if (index == type_inputs.length - 1) {
+ empty_found = true;
+ }
+ else {
+ $(this).parents('.mf').first().remove();
+ }
+ }
+ }
+ else {
+ $('#' + prefix).replaceWith(multifield_input(prefix, 'value',
+ 'text'));
+ }
+ }
+ });
+ if (!empty_found) {
+ var prefix = name + '_' + (largest_id + 1);
+ var t = multifield.hasClass('string-only') ? 'hidden' : 'select';
+ var val_type = multifield_input(prefix, 'value', 'text') + ' ' +
+ multifield_input(prefix, 'type', t);
+
+ if (dict) {
+ multifield.append('<table class="mf"><tr><td>' +
+ multifield_input(prefix, 'key', 'text') +
+ '</td><td class="equals"> = </td><td>' +
+ val_type + '</td></tr></table>');
+ }
+ else {
+ multifield.append('<div class="mf">' + val_type + '</div>');
+ }
+ }
+}
+
+function multifield_input(prefix, suffix, type) {
+ if (type == 'hidden' ) {
+ return '<input type="hidden" id="' + prefix + '_mf' + suffix +
+ '" name="' + prefix + '_mf' + suffix + '" value="string"/>';
+ }
+ else if (type == 'text' ) {
+ return '<input type="text" id="' + prefix + '_mf' + suffix +
+ '" name="' + prefix + '_mf' + suffix + '" value=""/>';
+ }
+ else if (type == 'select' ) {
+ return '<select id="' + prefix + '_mf' + suffix + '" name="' + prefix +
+ '_mf' + suffix + '">' +
+ '<option value="string">String</option>' +
+ '<option value="number">Number</option>' +
+ '<option value="boolean">Boolean</option>' +
+ '<option value="list">List</option>' +
+ '</select>';
+ }
+}
+
+function update_filter_regex(jElem) {
+ current_filter_regex = null;
+ jElem.parents('.filter').children('.status-error').remove();
+ if (current_filter_regex_on && $.trim(current_filter).length > 0) {
+ try {
+ current_filter_regex = new RegExp(current_filter,'i');
+ } catch (e) {
+ jElem.parents('.filter').append('<p class="status-error">' +
+ fmt_escape_html(e.message) + '</p>');
+ }
+ }
+}
+
+function update_filter_regex_mode() {
+ current_filter_regex_on = $(this).is(':checked');
+ update_filter_regex($(this));
+ partial_update();
+}
+
+function update_filter() {
+ current_filter = $(this).val();
+ var table = $(this).parents('table').first();
+ table.removeClass('filter-active');
+ if ($(this).val() != '') {
+ table.addClass('filter-active');
+ }
+ update_filter_regex($(this));
+ partial_update();
+}
+
+function update_truncate() {
+ var current_truncate_str =
+ $(this).val().replace(new RegExp('\\D', 'g'), '');
+ if (current_truncate_str == '') {
+ current_truncate_str = '0';
+ }
+ if ($(this).val() != current_truncate_str) {
+ $(this).val(current_truncate_str);
+ }
+ var current_truncate = parseInt(current_truncate_str, 10);
+ store_pref('truncate', current_truncate);
+ partial_update();
+}
+
+function setup_visibility() {
+ $('div.section,div.section-hidden').each(function(_index) {
+ var pref = section_pref(current_template,
+ $(this).children('h2').text());
+ var show = get_pref(pref);
+ if (show == null) {
+ show = $(this).hasClass('section');
+ }
+ else {
+ show = show == 't';
+ }
+ if (show) {
+ $(this).addClass('section-visible');
+ // Workaround for... something. Although div.hider is
+ // display:block anyway, not explicitly setting this
+ // prevents the first slideToggle() from animating
+ // successfully; instead the element just vanishes.
+ $(this).find('.hider').attr('style', 'display:block;');
+ }
+ else {
+ $(this).addClass('section-invisible');
+ }
+ });
+}
+
+function toggle_visibility(item) {
+ var hider = item.next();
+ var all = item.parent();
+ var pref = section_pref(current_template, item.text());
+ item.next().slideToggle(100);
+ if (all.hasClass('section-visible')) {
+ if (all.hasClass('section'))
+ store_pref(pref, 'f');
+ else
+ clear_pref(pref);
+ all.removeClass('section-visible');
+ all.addClass('section-invisible');
+ }
+ else {
+ if (all.hasClass('section-hidden')) {
+ store_pref(pref, 't');
+ } else {
+ clear_pref(pref);
+ }
+ all.removeClass('section-invisible');
+ all.addClass('section-visible');
+ }
+}
+
+function publish_msg(params0) {
+ try {
+ var params = params_magic(params0);
+ publish_msg0(params);
+ } catch (e) {
+ show_popup('warn', fmt_escape_html(e));
+ return false;
+ }
+}
+
+function publish_msg0(params) {
+ var path = fill_path_template('/exchanges/:vhost/:name/publish', params);
+ params['payload_encoding'] = 'string';
+ params['properties'] = {};
+ params['properties']['delivery_mode'] = parseInt(params['delivery_mode']);
+ if (params['headers'] != '')
+ params['properties']['headers'] = params['headers'];
+ var props = [['content_type', 'str'],
+ ['content_encoding', 'str'],
+ ['correlation_id', 'str'],
+ ['reply_to', 'str'],
+ ['expiration', 'str'],
+ ['message_id', 'str'],
+ ['type', 'str'],
+ ['user_id', 'str'],
+ ['app_id', 'str'],
+ ['cluster_id', 'str'],
+ ['priority', 'int'],
+ ['timestamp', 'int']];
+ for (var i in props) {
+ var name = props[i][0];
+ var type = props[i][1];
+ if (params['props'][name] != undefined && params['props'][name] != '') {
+ var value = params['props'][name];
+ if (type == 'int') value = parseInt(value);
+ params['properties'][name] = value;
+ }
+ }
+ with_req('POST', path, JSON.stringify(params), function(resp) {
+ var result = JSON.parse(resp.responseText);
+ if (result.routed) {
+ show_popup('info', 'Message published.');
+ } else {
+ show_popup('warn', 'Message published, but not routed.');
+ }
+ });
+}
+
+function get_msgs(params) {
+ var path = fill_path_template('/queues/:vhost/:name/get', params);
+ with_req('POST', path, JSON.stringify(params), function(resp) {
+ var msgs = JSON.parse(resp.responseText);
+ if (msgs.length == 0) {
+ show_popup('info', 'Queue is empty');
+ } else {
+ $('#msg-wrapper').slideUp(200);
+ replace_content('msg-wrapper', format('messages', {'msgs': msgs}));
+ $('#msg-wrapper').slideDown(200);
+ }
+ });
+}
+
+function with_reqs(reqs, acc, fun) {
+ if (keys(reqs).length > 0) {
+ var key = keys(reqs)[0];
+ with_req('GET', reqs[key], null, function(resp) {
+ acc[key] = JSON.parse(resp.responseText);
+ var remainder = {};
+ for (var k in reqs) {
+ if (k != key) remainder[k] = reqs[k];
+ }
+ with_reqs(remainder, acc, fun);
+ });
+ }
+ else {
+ fun(acc);
+ }
+}
+
+function replace_content(id, html) {
+ $("#" + id).html(html);
+}
+
+var ejs_cached = {};
+
+function format(template, json) {
+ try {
+ var cache = true;
+ if (!(template in ejs_cached)) {
+ ejs_cached[template] = true;
+ cache = false;
+ }
+ var tmpl = new EJS({url: 'js/tmpl/' + template + '.ejs', cache: cache});
+ return tmpl.render(json);
+ } catch (err) {
+ clearInterval(timer);
+ console.log("Uncaught error: " + err);
+ console.log("Stack: " + err['stack']);
+ debug(err['name'] + ": " + err['message'] + "\n" + err['stack'] + "\n");
+ }
+}
+
+function update_status(status) {
+ var text;
+ if (status == 'ok')
+ text = "Refreshed " + fmt_date(new Date());
+ else if (status == 'error') {
+ var next_try = new Date(new Date().getTime() + timer_interval);
+ text = "Error: could not connect to server since " +
+ fmt_date(last_successful_connect) + ". Will retry at " +
+ fmt_date(next_try) + ".";
+ }
+ else
+ throw("Unknown status " + status);
+
+ var html = format('status', {status: status, text: text});
+ replace_content('status', html);
+}
+
+function has_auth_cookie_value() {
+ return get_cookie_value('auth') != null;
+}
+
+function auth_header() {
+ if(has_auth_cookie_value() && enable_uaa) {
+ return "Bearer " + decodeURIComponent(get_pref('uaa_token'));
+ } else {
+ if(has_auth_cookie_value()) {
+ return "Basic " + decodeURIComponent(get_cookie_value('auth'));
+ } else {
+ return null;
+ }
+ }
+}
+
+function with_req(method, path, body, fun) {
+ if(!has_auth_cookie_value()) {
+ // navigate to the login form
+ location.reload();
+ return;
+ }
+
+ var json;
+ var req = xmlHttpRequest();
+ req.open(method, 'api' + path, true );
+ var header = auth_header();
+ if (header !== null) {
+ req.setRequestHeader('authorization', header);
+ }
+ req.setRequestHeader('x-vhost', current_vhost);
+ req.onreadystatechange = function () {
+ if (req.readyState == 4) {
+ var ix = jQuery.inArray(req, outstanding_reqs);
+ if (ix != -1) {
+ outstanding_reqs.splice(ix, 1);
+ }
+ if (check_bad_response(req, true)) {
+ last_successful_connect = new Date();
+ fun(req);
+ }
+ }
+ };
+ outstanding_reqs.push(req);
+ req.send(body);
+}
+
+function get(url, accept, callback) {
+ var req = new XMLHttpRequest();
+ req.open("GET", url);
+ req.setRequestHeader("Accept", accept);
+ req.send();
+
+ req.onreadystatechange = function() {
+ if (req.readyState == XMLHttpRequest.DONE) {
+ callback(req);
+ }
+ };
+}
+
+function sync_get(path) {
+ return sync_req('GET', [], path);
+}
+
+function sync_put(sammy, path_template) {
+ return sync_req('PUT', sammy.params, path_template);
+}
+
+function sync_delete(sammy, path_template, options) {
+ return sync_req('DELETE', sammy.params, path_template, options);
+}
+
+function sync_post(sammy, path_template) {
+ return sync_req('POST', sammy.params, path_template);
+}
+
+function sync_req(type, params0, path_template, options) {
+ var params;
+ var path;
+ try {
+ params = params_magic(params0);
+ path = fill_path_template(path_template, params);
+ } catch (e) {
+ show_popup('warn', fmt_escape_html(e));
+ return false;
+ }
+ var req = xmlHttpRequest();
+ req.open(type, 'api' + path, false);
+ req.setRequestHeader('content-type', 'application/json');
+ req.setRequestHeader('authorization', auth_header());
+
+ if (options != undefined || options != null) {
+ if (options.headers != undefined || options.headers != null) {
+ jQuery.each(options.headers, function (k, v) {
+ req.setRequestHeader(k, v);
+ });
+ }
+ }
+
+ try {
+ if (type == 'GET')
+ req.send(null);
+ else
+ req.send(JSON.stringify(params));
+ }
+ catch (e) {
+ if (e.number == 0x80004004) {
+ // 0x80004004 means "Operation aborted."
+ // https://support.microsoft.com/kb/186063
+ // MSIE6 appears to do this in response to HTTP 204.
+ }
+ }
+
+ if (check_bad_response(req, false)) {
+ if (type == 'GET')
+ return req.responseText;
+ else
+ // rabbitmq/rabbitmq-management#732
+ // https://developer.mozilla.org/en-US/docs/Glossary/Truthy
+ return {result: true, http_status: req.status, req_params: params};
+ }
+ else {
+ return false;
+ }
+}
+
+function check_bad_response(req, full_page_404) {
+ // 1223 == 204 - see https://www.enhanceie.com/ie/bugs.asp
+ // MSIE7 and 8 appear to do this in response to HTTP 204.
+ if ((req.status >= 200 && req.status < 300) || req.status == 1223) {
+ return true;
+ }
+ else if (req.status == 404 && full_page_404) {
+ var html = format('404', {});
+ replace_content('main', html);
+ }
+ else if (req.status >= 400 && req.status <= 404) {
+ var reason = JSON.parse(req.responseText).reason;
+ if (typeof(reason) != 'string') reason = JSON.stringify(reason);
+
+ var error = JSON.parse(req.responseText).error;
+ if (typeof(error) != 'string') error = JSON.stringify(error);
+
+ if (error == 'bad_request' || error == 'not_found' || error == 'not_authorised') {
+ show_popup('warn', fmt_escape_html(reason));
+ } else if (error == 'page_out_of_range') {
+ var seconds = 60;
+ if (last_page_out_of_range_error > 0)
+ seconds = (new Date().getTime() - last_page_out_of_range_error.getTime())/1000;
+ if (seconds > 3) {
+ Sammy.log('server reports page is out of range, redirecting to page 1');
+ var contexts = ["queues", "exchanges", "connections", "channels"];
+ var matches = /api\/(.*)\?/.exec(req.responseURL);
+ if (matches != null && matches.length > 1) {
+ contexts.forEach(function(item) {
+ if (matches[1].indexOf(item) == 0) {update_pages(item, 1)};
+ });
+ } else update_pages(current_template, 1);
+
+ last_page_out_of_range_error = new Date();
+ }
+ }
+ }
+ else if (req.status == 408) {
+ update_status('timeout');
+ }
+ else if (req.status == 0) { // Non-MSIE: could not connect
+ update_status('error');
+ }
+ else if (req.status > 12000) { // MSIE: could not connect
+ update_status('error');
+ }
+ else if (req.status == 503) { // Proxy: could not connect
+ update_status('error');
+ }
+ else {
+ debug("Management API returned status code " + req.status + " - <strong>" + fmt_escape_html_one_line(req.responseText) + "</strong>");
+ clearInterval(timer);
+ }
+
+ return false;
+}
+
+function fill_path_template(template, params) {
+ var re = /:[a-zA-Z_]*/g;
+ return template.replace(re, function(m) {
+ var str = esc(params[m.substring(1)]);
+ if (str == '') {
+ throw(m.substring(1) + " is required");
+ }
+ return str;
+ });
+}
+
+function params_magic(params) {
+ return check_password(maybe_remove_fields(collapse_multifields(params)));
+}
+
+function collapse_multifields(params0) {
+ function set(x) { return x != '' && x != undefined }
+
+ var params = {};
+ var ks = keys(params0);
+ var ids = [];
+ for (i in ks) {
+ var key = ks[i];
+ var match = key.match(/([a-z]*)_([0-9_]*)_mftype/);
+ var match2 = key.match(/[a-z]*_[0-9_]*_mfkey/);
+ var match3 = key.match(/[a-z]*_[0-9_]*_mfvalue/);
+ if (match == null && match2 == null && match3 == null) {
+ params[key] = params0[key];
+ }
+ else if (match == null) {
+ // Do nothing, value is handled below
+ }
+ else {
+ var name = match[1];
+ var id = match[2];
+ ids.push([name, id]);
+ }
+ }
+ ids.sort();
+ var id_map = {};
+ for (i in ids) {
+ var name = ids[i][0];
+ var id = ids[i][1];
+ if (params[name] == undefined) {
+ params[name] = {};
+ id_map[name] = {};
+ }
+ var id_parts = id.split('_');
+ var k = params0[name + '_' + id_parts[0] + '_mfkey'];
+ var v = params0[name + '_' + id + '_mfvalue'];
+ var t = params0[name + '_' + id + '_mftype'];
+ var val = null;
+ var top_level = id_parts.length == 1;
+ if (t == 'list') {
+ val = [];
+ id_map[name][id] = val;
+ }
+ else if ((set(k) && top_level) || set(v)) {
+ if (t == 'boolean') {
+ if (v != 'true' && v != 'false')
+ throw(k + ' must be "true" or "false"; got ' + v);
+ val = (v == 'true');
+ }
+ else if (t == 'number') {
+ var n = parseFloat(v);
+ if (isNaN(n))
+ throw(k + ' must be a number; got ' + v);
+ val = n;
+ }
+ else {
+ val = v;
+ }
+ }
+ if (val != null) {
+ if (top_level) {
+ params[name][k] = val;
+ }
+ else {
+ var prefix = id_parts.slice(0, id_parts.length - 1).join('_');
+ id_map[name][prefix].push(val);
+ }
+ }
+ }
+ if (params.hasOwnProperty('queuetype')) {
+ delete params['queuetype'];
+ params['arguments']['x-queue-type'] = queue_type;
+ if (queue_type == 'quorum' ||
+ queue_type == 'stream') {
+ params['durable'] = true;
+ params['auto_delete'] = false;
+ }
+ }
+ return params;
+}
+
+function check_password(params) {
+ if (params['password'] != undefined) {
+ if (params['password'] == '') {
+ throw("Please specify a password.");
+ }
+ if (params['password'] != params['password_confirm']) {
+ throw("Passwords do not match.");
+ }
+ delete params['password_confirm'];
+ }
+
+ return params;
+}
+
+function maybe_remove_fields(params) {
+ $('.controls-appearance').each(function(index) {
+ var options = $(this).get(0).options;
+ var selected = $(this).val();
+
+ for (i = 0; i < options.length; i++) {
+ var option = options[i].value;
+ if (option != selected) {
+ delete params[option];
+ }
+ }
+ delete params[$(this).attr('name')];
+ });
+ return params;
+}
+
+function put_parameter(sammy, mandatory_keys, num_keys, bool_keys,
+ arrayable_keys) {
+ for (var i in sammy.params) {
+ if (i === 'length' || !sammy.params.hasOwnProperty(i)) continue;
+ if (sammy.params[i] == '' && jQuery.inArray(i, mandatory_keys) == -1) {
+ delete sammy.params[i];
+ }
+ else if (jQuery.inArray(i, num_keys) != -1) {
+ sammy.params[i] = parseInt(sammy.params[i]);
+ }
+ else if (jQuery.inArray(i, bool_keys) != -1) {
+ sammy.params[i] = sammy.params[i] == 'true';
+ }
+ else if (jQuery.inArray(i, arrayable_keys) != -1) {
+ sammy.params[i] = sammy.params[i].split(' ');
+ if (sammy.params[i].length == 1) {
+ sammy.params[i] = sammy.params[i][0];
+ }
+ }
+ }
+ var params = {"component": sammy.params.component,
+ "vhost": sammy.params.vhost,
+ "name": sammy.params.name,
+ "value": params_magic(sammy.params)};
+ delete params.value.vhost;
+ delete params.value.component;
+ delete params.value.name;
+ sammy.params = params;
+ if (sync_put(sammy, '/parameters/:component/:vhost/:name')) update();
+}
+
+function put_cast_params(sammy, path, mandatory_keys, num_keys, bool_keys) {
+ for (var i in sammy.params) {
+ if (i === 'length' || !sammy.params.hasOwnProperty(i)) continue;
+ if (sammy.params[i] == '' && jQuery.inArray(i, mandatory_keys) == -1) {
+ delete sammy.params[i];
+ }
+ else if (jQuery.inArray(i, num_keys) != -1) {
+ sammy.params[i] = parseInt(sammy.params[i]);
+ }
+ else if (jQuery.inArray(i, bool_keys) != -1) {
+ sammy.params[i] = sammy.params[i] == 'true';
+ }
+ }
+ if (sync_put(sammy, path)) update();
+}
+
+function update_column_options(sammy) {
+ var mode = sammy.params['mode'];
+ for (var group in COLUMNS[mode]) {
+ var options = COLUMNS[mode][group];
+ for (var i = 0; i < options.length; i++) {
+ var key = options[i][0];
+ var value = sammy.params[mode + '-' + key] != undefined;
+ store_pref('column-' + mode + '-' + key, value);
+ }
+ }
+
+ partial_update();
+}
+
+function debug(str) {
+ $('<p>' + str + '</p>').appendTo('#debug');
+}
+
+function keys(obj) {
+ var ks = [];
+ for (var k in obj) {
+ ks.push(k);
+ }
+ return ks;
+}
+
+// Don't use the jQuery AJAX support, it seems to have trouble reporting
+// server-down type errors.
+function xmlHttpRequest() {
+ var res;
+ try {
+ res = new XMLHttpRequest();
+ }
+ catch(e) {
+ res = new ActiveXObject("Microsoft.XMLHttp");
+ }
+ return res;
+}
+
+// Our base64 library takes a string that is really a byte sequence,
+// and will throw if given a string with chars > 255 (and hence not
+// DTRT for chars > 127). So encode a unicode string as a UTF-8
+// sequence of "bytes".
+function b64_encode_utf8(str) {
+ return base64.encode(encode_utf8(str));
+}
+
+// encodeURIComponent handles utf-8, unescape does not. Neat!
+function encode_utf8(str) {
+ return unescape(encodeURIComponent(str));
+}
+
+(function($){
+ $.fn.extend({
+ center: function () {
+ return this.each(function() {
+ var top = ($(window).height() - $(this).outerHeight()) / 2;
+ var left = ($(window).width() - $(this).outerWidth()) / 2;
+ $(this).css({margin:0, top: (top > 0 ? top : 0)+'px', left: (left > 0 ? left : 0)+'px'});
+ });
+ }
+ });
+})(jQuery);
+
+function debounce(f, delay) {
+ var timeout = null;
+
+ return function() {
+ var obj = this;
+ var args = arguments;
+
+ function delayed () {
+ f.apply(obj, args);
+ timeout = null;
+ }
+ if (timeout) clearTimeout(timeout);
+ timeout = setTimeout(delayed, delay);
+ }
+}
+
+function rename_multifield(params, from, to) {
+ var new_params = {};
+ for(var key in params){
+ var match = key.match("^" + from + "_[0-9_]*_mftype$");
+ var match2 = key.match("^" + from + "_[0-9_]*_mfkey$");
+ var match3 = key.match("^" + from + "_[0-9_]*_mfvalue$");
+ if (match != null) {
+ new_params[match[0].replace(from, to)] = params[match];
+ }
+ else if (match2 != null) {
+ new_params[match2[0].replace(from, to)] = params[match2];
+ }
+ else if (match3 != null) {
+ new_params[match3[0].replace(from, to)] = params[match3];
+ }
+ else {
+ new_params[key] = params[key]
+ }
+ }
+ return new_params;
+}
+
+function select_queue_type(queuetype) {
+ queue_type = queuetype.value;
+ update();
+}
+
+function is_quorum(queue) {
+ if (queue["arguments"]) {
+ if (queue["arguments"]["x-queue-type"]) {
+ return queue["arguments"]["x-queue-type"] === "quorum";
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+}
+
+function is_stream(queue) {
+ if (queue["arguments"]) {
+ if (queue["arguments"]["x-queue-type"]) {
+ return queue["arguments"]["x-queue-type"] === "stream";
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+}
+
+function is_classic(queue) {
+ if (queue["arguments"]) {
+ if (queue["arguments"]["x-queue-type"]) {
+ return queue["arguments"]["x-queue-type"] === "classic";
+ } else {
+ return true;
+ }
+ } else {
+ return true;
+ }
+}
+
+function ensure_queues_chart_range() {
+ var range = get_pref('chart-range');
+ // Note: the queues page uses the 'basic' range type
+ var fixup_range;
+ var valid_range = false;
+ var range_type = get_chart_range_type('queues');
+ var chart_periods = CHART_RANGES[range_type];
+ for (var i = 0; i < chart_periods.length; ++i) {
+ var data = chart_periods[i];
+ var val = data[0];
+ if (range === val) {
+ valid_range = true;
+ break;
+ }
+ // If the range needs to be adjusted, use the last
+ // valid one
+ fixup_range = val;
+ }
+ if (!valid_range) {
+ store_pref('chart-range', fixup_range);
+ }
+}
+
+function get_chart_range_type(arg) {
+ /*
+ * 'arg' can be:
+ * lengths-over for the Overview page
+ * lengths-q for the per-queue page
+ * queues for setting up the queues range
+ */
+ if (arg === 'lengths-over') {
+ return 'global';
+ }
+ if (arg === 'msg-rates-over') {
+ return 'global';
+ }
+ if (arg === 'lengths-q') {
+ return 'basic';
+ }
+ if (arg === 'msg-rates-q') {
+ return 'basic';
+ }
+ if (arg === 'queues') {
+ return 'basic';
+ }
+ if (arg === 'queue-churn') {
+ return 'basic';
+ }
+ if (arg === 'channel-churn') {
+ return 'basic';
+ }
+ if (arg === 'connection-churn') {
+ return 'basic';
+ }
+
+ console.log('[WARNING]: range type not found for arg: ' + arg);
+ return 'basic';
+}
diff --git a/deps/rabbitmq_management/priv/www/js/prefs.js b/deps/rabbitmq_management/priv/www/js/prefs.js
new file mode 100644
index 0000000000..b2ad9fba46
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/prefs.js
@@ -0,0 +1,164 @@
+function local_storage_available() {
+ try {
+ return 'localStorage' in window && window['localStorage'] !== null;
+ } catch (e) {
+ return false;
+ }
+}
+
+function store_cookie_value(k, v) {
+ var d = parse_cookie();
+ d[short_key(k)] = v;
+ store_cookie(d);
+}
+
+function store_cookie_value_with_expiration(k, v, expiration_date) {
+ var d = parse_cookie();
+ d[short_key(k)] = v;
+ store_cookie_with_expiration(d, expiration_date);
+}
+
+function clear_cookie_value(k) {
+ var d = parse_cookie();
+ delete d[short_key(k)];
+ store_cookie(d);
+}
+
+function get_cookie_value(k) {
+ var r;
+ r = parse_cookie()[short_key(k)];
+ return r == undefined ? default_pref(k) : r;
+}
+
+function store_pref(k, v) {
+ if (local_storage_available()) {
+ window.localStorage['rabbitmq.' + k] = v;
+ }
+ else {
+ var d = parse_cookie();
+ d[short_key(k)] = v;
+ store_cookie(d);
+ }
+}
+
+function clear_pref(k) {
+ if (local_storage_available()) {
+ window.localStorage.removeItem('rabbitmq.' + k);
+ }
+ else {
+ var d = parse_cookie();
+ delete d[short_key(k)];
+ store_cookie(d);
+ }
+}
+
+function clear_local_pref(k) {
+ if (local_storage_available()) {
+ window.localStorage.removeItem('rabbitmq.' + k);
+ }
+}
+
+function get_pref(k) {
+ var val;
+ if (local_storage_available()) {
+ val = window.localStorage['rabbitmq.' + k];
+ }
+ else {
+ val = parse_cookie()[short_key(k)];
+
+ }
+ var res = (val == undefined) ? default_pref(k) : val;
+ return res;
+}
+
+function section_pref(template, name) {
+ return 'visible|' + template + '|' + name;
+}
+
+function show_column(mode, column) {
+ return get_pref('column-' + mode + '-' + column) == 'true';
+}
+
+// ---------------------------------------------------------------------------
+
+function default_pref(k) {
+ if (k.substring(0, 11) == 'chart-size-') return 'small';
+ if (k.substring(0, 10) == 'rate-mode-') return 'chart';
+ if (k.substring(0, 11) == 'chart-line-') return 'true';
+ if (k == 'truncate') return '100';
+ if (k == 'chart-range') return '60|5';
+ if (k.substring(0, 7) == 'column-')
+ return default_column_pref(k.substring(7));
+ return null;
+}
+
+function default_column_pref(key0) {
+ var ix = key0.indexOf('-');
+ var mode = key0.substring(0, ix);
+ var key = key0.substring(ix + 1);
+ for (var group in COLUMNS[mode]) {
+ var options = COLUMNS[mode][group];
+ for (var i = 0; i < options.length; i++) {
+ if (options[i][0] == key) {
+ return '' + options[i][2];
+ }
+ }
+ }
+ return 'false';
+}
+
+// ---------------------------------------------------------------------------
+
+function parse_cookie() {
+ var c = get_cookie('m');
+ var items = c.length == 0 ? [] : c.split('|');
+
+ var start = 0;
+ var dict = {};
+ for (var i in items) {
+ var kv = items[i].split(':');
+ dict[kv[0]] = unescape(kv[1]);
+ }
+ return dict;
+}
+
+function store_cookie(dict) {
+ var date = new Date();
+ date.setFullYear(date.getFullYear() + 1);
+ store_cookie_with_expiration(dict, date);
+}
+
+function store_cookie_with_expiration(dict, expiration_date) {
+ var enc = [];
+ for (var k in dict) {
+ enc.push(k + ':' + escape(dict[k]));
+ }
+ document.cookie = 'm=' + enc.join('|') + '; expires=' + expiration_date.toUTCString();
+}
+
+function get_cookie(key) {
+ var cookies = document.cookie.split(';');
+ for (var i in cookies) {
+ var kv = jQuery.trim(cookies[i]).split('=');
+ if (kv[0] == key) return kv[1];
+ }
+ return '';
+}
+
+// Try to economise on space since cookies have limited length.
+function short_key(k) {
+ var res = Math.abs(k.hashCode() << 16 >> 16);
+ res = res.toString(16);
+ return res;
+}
+
+String.prototype.hashCode = function() {
+ var hash = 0;
+ if (this.length == 0) return code;
+ for (i = 0; i < this.length; i++) {
+ char = this.charCodeAt(i);
+ hash = 31*hash+char;
+ hash = hash & hash; // Convert to 32bit integer
+ }
+ return hash;
+}
diff --git a/deps/rabbitmq_management/priv/www/js/sammy-0.7.6.js b/deps/rabbitmq_management/priv/www/js/sammy-0.7.6.js
new file mode 100644
index 0000000000..b0ff981c06
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/sammy-0.7.6.js
@@ -0,0 +1,2156 @@
+// name: sammy
+// version: 0.7.6
+
+// Sammy.js / http://sammyjs.org
+
+(function(factory){
+ // Support module loading scenarios
+ if (typeof define === 'function' && define.amd){
+ // AMD Anonymous Module
+ define(['jquery'], factory);
+ } else {
+ // No module loader (plain <script> tag) - put directly in global namespace
+ jQuery.sammy = window.Sammy = factory(jQuery);
+ }
+})(function($){
+
+ var Sammy,
+ PATH_REPLACER = "([^\/]+)",
+ PATH_NAME_MATCHER = /:([\w\d]+)/g,
+ QUERY_STRING_MATCHER = /\?([^#]*)?$/,
+ // mainly for making `arguments` an Array
+ _makeArray = function(nonarray) { return Array.prototype.slice.call(nonarray); },
+ // borrowed from jQuery
+ _isFunction = function( obj ) { return Object.prototype.toString.call(obj) === "[object Function]"; },
+ _isArray = function( obj ) { return Object.prototype.toString.call(obj) === "[object Array]"; },
+ _isRegExp = function( obj ) { return Object.prototype.toString.call(obj) === "[object RegExp]"; },
+ _decode = function( str ) { return decodeURIComponent((str || '').replace(/\+/g, ' ')); },
+ _encode = encodeURIComponent,
+ _escapeHTML = function(s) {
+ return String(s).replace(/&(?!\w+;)/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;');
+ },
+ _routeWrapper = function(verb) {
+ return function() {
+ return this.route.apply(this, [verb].concat(Array.prototype.slice.call(arguments)));
+ };
+ },
+ _template_cache = {},
+ _has_history = !!(window.history && history.pushState),
+ loggers = [];
+
+
+ // `Sammy` (also aliased as $.sammy) is not only the namespace for a
+ // number of prototypes, its also a top level method that allows for easy
+ // creation/management of `Sammy.Application` instances. There are a
+ // number of different forms for `Sammy()` but each returns an instance
+ // of `Sammy.Application`. When a new instance is created using
+ // `Sammy` it is added to an Object called `Sammy.apps`. This
+ // provides for an easy way to get at existing Sammy applications. Only one
+ // instance is allowed per `element_selector` so when calling
+ // `Sammy('selector')` multiple times, the first time will create
+ // the application and the following times will extend the application
+ // already added to that selector.
+ //
+ // ### Example
+ //
+ // // returns the app at #main or a new app
+ // Sammy('#main')
+ //
+ // // equivalent to "new Sammy.Application", except appends to apps
+ // Sammy();
+ // Sammy(function() { ... });
+ //
+ // // extends the app at '#main' with function.
+ // Sammy('#main', function() { ... });
+ //
+ Sammy = function() {
+ var args = _makeArray(arguments),
+ app, selector;
+ Sammy.apps = Sammy.apps || {};
+ if (args.length === 0 || args[0] && _isFunction(args[0])) { // Sammy()
+ return Sammy.apply(Sammy, ['body'].concat(args));
+ } else if (typeof (selector = args.shift()) == 'string') { // Sammy('#main')
+ app = Sammy.apps[selector] || new Sammy.Application();
+ app.element_selector = selector;
+ if (args.length > 0) {
+ $.each(args, function(i, plugin) {
+ app.use(plugin);
+ });
+ }
+ // if the selector changes make sure the reference in Sammy.apps changes
+ if (app.element_selector != selector) {
+ delete Sammy.apps[selector];
+ }
+ Sammy.apps[app.element_selector] = app;
+ return app;
+ }
+ };
+
+ Sammy.VERSION = '0.7.6';
+
+ // Add to the global logger pool. Takes a function that accepts an
+ // unknown number of arguments and should print them or send them somewhere
+ // The first argument is always a timestamp.
+ Sammy.addLogger = function(logger) {
+ loggers.push(logger);
+ };
+
+ // Sends a log message to each logger listed in the global
+ // loggers pool. Can take any number of arguments.
+ // Also prefixes the arguments with a timestamp.
+ Sammy.log = function() {
+ var args = _makeArray(arguments);
+ args.unshift("[" + Date() + "]");
+ $.each(loggers, function(i, logger) {
+ logger.apply(Sammy, args);
+ });
+ };
+
+ if (typeof window.console != 'undefined') {
+ if (typeof window.console.log === 'function' && _isFunction(window.console.log.apply)) {
+ Sammy.addLogger(function() {
+ window.console.log.apply(window.console, arguments);
+ });
+ } else {
+ Sammy.addLogger(function() {
+ window.console.log(arguments);
+ });
+ }
+ } else if (typeof console != 'undefined') {
+ Sammy.addLogger(function() {
+ console.log.apply(console, arguments);
+ });
+ }
+
+ $.extend(Sammy, {
+ makeArray: _makeArray,
+ isFunction: _isFunction,
+ isArray: _isArray
+ });
+
+ // Sammy.Object is the base for all other Sammy classes. It provides some useful
+ // functionality, including cloning, iterating, etc.
+ Sammy.Object = function(obj) { // constructor
+ return $.extend(this, obj || {});
+ };
+
+ $.extend(Sammy.Object.prototype, {
+
+ // Escape HTML in string, use in templates to prevent script injection.
+ // Also aliased as `h()`
+ escapeHTML: _escapeHTML,
+ h: _escapeHTML,
+
+ // Returns a copy of the object with Functions removed.
+ toHash: function() {
+ var json = {};
+ $.each(this, function(k,v) {
+ if (!_isFunction(v)) {
+ json[k] = v;
+ }
+ });
+ return json;
+ },
+
+ // Renders a simple HTML version of this Objects attributes.
+ // Does not render functions.
+ // For example. Given this Sammy.Object:
+ //
+ // var s = new Sammy.Object({first_name: 'Sammy', last_name: 'Davis Jr.'});
+ // s.toHTML()
+ // //=> '<strong>first_name</strong> Sammy<br /><strong>last_name</strong> Davis Jr.<br />'
+ //
+ toHTML: function() {
+ var display = "";
+ $.each(this, function(k, v) {
+ if (!_isFunction(v)) {
+ display += "<strong>" + k + "</strong> " + v + "<br />";
+ }
+ });
+ return display;
+ },
+
+ // Returns an array of keys for this object. If `attributes_only`
+ // is true will not return keys that map to a `function()`
+ keys: function(attributes_only) {
+ var keys = [];
+ for (var property in this) {
+ if (!_isFunction(this[property]) || !attributes_only) {
+ keys.push(property);
+ }
+ }
+ return keys;
+ },
+
+ // Checks if the object has a value at `key` and that the value is not empty
+ has: function(key) {
+ return this[key] && $.trim(this[key].toString()) !== '';
+ },
+
+ // convenience method to join as many arguments as you want
+ // by the first argument - useful for making paths
+ join: function() {
+ var args = _makeArray(arguments);
+ var delimiter = args.shift();
+ return args.join(delimiter);
+ },
+
+ // Shortcut to Sammy.log
+ log: function() {
+ Sammy.log.apply(Sammy, arguments);
+ },
+
+ // Returns a string representation of this object.
+ // if `include_functions` is true, it will also toString() the
+ // methods of this object. By default only prints the attributes.
+ toString: function(include_functions) {
+ var s = [];
+ $.each(this, function(k, v) {
+ if (!_isFunction(v) || include_functions) {
+ s.push('"' + k + '": ' + v.toString());
+ }
+ });
+ return "Sammy.Object: {" + s.join(',') + "}";
+ }
+ });
+
+
+ // Return whether the event targets this window.
+ Sammy.targetIsThisWindow = function targetIsThisWindow(event, tagName) {
+ var targetElement = $(event.target).closest(tagName);
+ if (targetElement.length === 0) { return true; }
+
+ var targetWindow = targetElement.attr('target');
+ if (!targetWindow || targetWindow === window.name || targetWindow === '_self') { return true; }
+ if (targetWindow === '_blank') { return false; }
+ if (targetWindow === 'top' && window === window.top) { return true; }
+ return false;
+ };
+
+
+ // The DefaultLocationProxy is the default location proxy for all Sammy applications.
+ // A location proxy is a prototype that conforms to a simple interface. The purpose
+ // of a location proxy is to notify the Sammy.Application its bound to when the location
+ // or 'external state' changes.
+ //
+ // The `DefaultLocationProxy` watches for changes to the path of the current window and
+ // is also able to set the path based on changes in the application. It does this by
+ // using different methods depending on what is available in the current browser. In
+ // the latest and greatest browsers it used the HTML5 History API and the `pushState`
+ // `popState` events/methods. This allows you to use Sammy to serve a site behind normal
+ // URI paths as opposed to the older default of hash (#) based routing. Because the server
+ // can interpret the changed path on a refresh or re-entry, though, it requires additional
+ // support on the server side. If you'd like to force disable HTML5 history support, please
+ // use the `disable_push_state` setting on `Sammy.Application`. If pushState support
+ // is enabled, `DefaultLocationProxy` also binds to all links on the page. If a link is clicked
+ // that matches the current set of routes, the URL is changed using pushState instead of
+ // fully setting the location and the app is notified of the change.
+ //
+ // If the browser does not have support for HTML5 History, `DefaultLocationProxy` automatically
+ // falls back to the older hash based routing. The newest browsers (IE, Safari > 4, FF >= 3.6)
+ // support a 'onhashchange' DOM event, thats fired whenever the location.hash changes.
+ // In this situation the DefaultLocationProxy just binds to this event and delegates it to
+ // the application. In the case of older browsers a poller is set up to track changes to the
+ // hash.
+ Sammy.DefaultLocationProxy = function(app, run_interval_every) {
+ this.app = app;
+ // set is native to false and start the poller immediately
+ this.is_native = false;
+ this.has_history = _has_history;
+ this._startPolling(run_interval_every);
+ };
+
+ Sammy.DefaultLocationProxy.fullPath = function(location_obj) {
+ // Bypass the `window.location.hash` attribute. If a question mark
+ // appears in the hash IE6 will strip it and all of the following
+ // characters from `window.location.hash`.
+ var matches = location_obj.toString().match(/^[^#]*(#.+)$/);
+ var hash = matches ? matches[1] : '';
+ return [location_obj.pathname, location_obj.search, hash].join('');
+ };
+$.extend(Sammy.DefaultLocationProxy.prototype , {
+ // bind the proxy events to the current app.
+ bind: function() {
+ var proxy = this, app = this.app, lp = Sammy.DefaultLocationProxy;
+ $(window).bind('hashchange.' + this.app.eventNamespace(), function(e, non_native) {
+ // if we receive a native hash change event, set the proxy accordingly
+ // and stop polling
+ if (proxy.is_native === false && !non_native) {
+ proxy.is_native = true;
+ window.clearInterval(lp._interval);
+ lp._interval = null;
+ }
+ app.trigger('location-changed');
+ });
+ if (_has_history && !app.disable_push_state) {
+ // bind to popstate
+ $(window).bind('popstate.' + this.app.eventNamespace(), function(e) {
+ app.trigger('location-changed');
+ });
+ // bind to link clicks that have routes
+ $(document).delegate('a', 'click.history-' + this.app.eventNamespace(), function (e) {
+ if (e.isDefaultPrevented() || e.metaKey || e.ctrlKey) {
+ return;
+ }
+ var full_path = lp.fullPath(this),
+ // Get anchor's host name in a cross browser compatible way.
+ // IE looses hostname property when setting href in JS
+ // with a relative URL, e.g. a.setAttribute('href',"/whatever").
+ // Circumvent this problem by creating a new link with given URL and
+ // querying that for a hostname.
+ hostname = this.hostname ? this.hostname : function (a) {
+ var l = document.createElement("a");
+ l.href = a.href;
+ return l.hostname;
+ }(this);
+
+ if (hostname == window.location.hostname &&
+ app.lookupRoute('get', full_path) &&
+ Sammy.targetIsThisWindow(e, 'a')) {
+ e.preventDefault();
+ proxy.setLocation(full_path);
+ return false;
+ }
+ });
+ }
+ if (!lp._bindings) {
+ lp._bindings = 0;
+ }
+ lp._bindings++;
+ },
+
+ // unbind the proxy events from the current app
+ unbind: function() {
+ $(window).unbind('hashchange.' + this.app.eventNamespace());
+ $(window).unbind('popstate.' + this.app.eventNamespace());
+ $(document).undelegate('a', 'click.history-' + this.app.eventNamespace());
+ Sammy.DefaultLocationProxy._bindings--;
+ if (Sammy.DefaultLocationProxy._bindings <= 0) {
+ window.clearInterval(Sammy.DefaultLocationProxy._interval);
+ Sammy.DefaultLocationProxy._interval = null;
+ }
+ },
+
+ // get the current location from the hash.
+ getLocation: function() {
+ return Sammy.DefaultLocationProxy.fullPath(window.location);
+ },
+
+ // set the current location to `new_location`
+ setLocation: function(new_location) {
+ if (/^([^#\/]|$)/.test(new_location)) { // non-prefixed url
+ if (_has_history && !this.app.disable_push_state) {
+ new_location = '/' + new_location;
+ } else {
+ new_location = '#!/' + new_location;
+ }
+ }
+ if (new_location != this.getLocation()) {
+ // HTML5 History exists and new_location is a full path
+ if (_has_history && !this.app.disable_push_state && /^\//.test(new_location)) {
+ history.pushState({ path: new_location }, window.title, new_location);
+ this.app.trigger('location-changed');
+ } else {
+ return (window.location = new_location);
+ }
+ }
+ },
+
+ _startPolling: function(every) {
+ // set up interval
+ var proxy = this;
+ if (!Sammy.DefaultLocationProxy._interval) {
+ if (!every) { every = 10; }
+ var hashCheck = function() {
+ var current_location = proxy.getLocation();
+ if (typeof Sammy.DefaultLocationProxy._last_location == 'undefined' ||
+ current_location != Sammy.DefaultLocationProxy._last_location) {
+ window.setTimeout(function() {
+ $(window).trigger('hashchange', [true]);
+ }, 0);
+ }
+ Sammy.DefaultLocationProxy._last_location = current_location;
+ };
+ hashCheck();
+ Sammy.DefaultLocationProxy._interval = window.setInterval(hashCheck, every);
+ }
+ }
+ });
+
+
+ // Sammy.Application is the Base prototype for defining 'applications'.
+ // An 'application' is a collection of 'routes' and bound events that is
+ // attached to an element when `run()` is called.
+ // The only argument an 'app_function' is evaluated within the context of the application.
+ Sammy.Application = function(app_function) {
+ var app = this;
+ this.routes = {};
+ this.listeners = new Sammy.Object({});
+ this.arounds = [];
+ this.befores = [];
+ // generate a unique namespace
+ this.namespace = (new Date()).getTime() + '-' + parseInt(Math.random() * 1000, 10);
+ this.context_prototype = function() { Sammy.EventContext.apply(this, arguments); };
+ this.context_prototype.prototype = new Sammy.EventContext();
+
+ if (_isFunction(app_function)) {
+ app_function.apply(this, [this]);
+ }
+ // set the location proxy if not defined to the default (DefaultLocationProxy)
+ if (!this._location_proxy) {
+ this.setLocationProxy(new Sammy.DefaultLocationProxy(this, this.run_interval_every));
+ }
+ if (this.debug) {
+ this.bindToAllEvents(function(e, data) {
+ app.log(app.toString(), e.cleaned_type, data || {});
+ });
+ }
+ };
+
+ Sammy.Application.prototype = $.extend({}, Sammy.Object.prototype, {
+
+ // the four route verbs
+ ROUTE_VERBS: ['get','post','put','delete'],
+
+ // An array of the default events triggered by the
+ // application during its lifecycle
+ APP_EVENTS: ['run', 'unload', 'lookup-route', 'run-route', 'route-found', 'event-context-before', 'event-context-after', 'changed', 'error', 'check-form-submission', 'redirect', 'location-changed'],
+
+ _last_route: null,
+ _location_proxy: null,
+ _running: false,
+
+ // Defines what element the application is bound to. Provide a selector
+ // (parseable by `jQuery()`) and this will be used by `$element()`
+ element_selector: 'body',
+
+ // When set to true, logs all of the default events using `log()`
+ debug: false,
+
+ // When set to true, and the error() handler is not overridden, will actually
+ // raise JS errors in routes (500) and when routes can't be found (404)
+ raise_errors: false,
+
+ // The time in milliseconds that the URL is queried for changes
+ run_interval_every: 50,
+
+ // if using the `DefaultLocationProxy` setting this to true will force the app to use
+ // traditional hash based routing as opposed to the new HTML5 PushState support
+ disable_push_state: false,
+
+ // The default template engine to use when using `partial()` in an
+ // `EventContext`. `template_engine` can either be a string that
+ // corresponds to the name of a method/helper on EventContext or it can be a function
+ // that takes two arguments, the content of the unrendered partial and an optional
+ // JS object that contains interpolation data. Template engine is only called/referred
+ // to if the extension of the partial is null or unknown. See `partial()`
+ // for more information
+ template_engine: null,
+
+ // //=> Sammy.Application: body
+ toString: function() {
+ return 'Sammy.Application:' + this.element_selector;
+ },
+
+ // returns a jQuery object of the Applications bound element.
+ $element: function(selector) {
+ return selector ? $(this.element_selector).find(selector) : $(this.element_selector);
+ },
+
+ // `use()` is the entry point for including Sammy plugins.
+ // The first argument to use should be a function() that is evaluated
+ // in the context of the current application, just like the `app_function`
+ // argument to the `Sammy.Application` constructor.
+ //
+ // Any additional arguments are passed to the app function sequentially.
+ //
+ // For much more detail about plugins, check out:
+ // [http://sammyjs.org/docs/plugins](http://sammyjs.org/docs/plugins)
+ //
+ // ### Example
+ //
+ // var MyPlugin = function(app, prepend) {
+ //
+ // this.helpers({
+ // myhelper: function(text) {
+ // alert(prepend + " " + text);
+ // }
+ // });
+ //
+ // };
+ //
+ // var app = $.sammy(function() {
+ //
+ // this.use(MyPlugin, 'This is my plugin');
+ //
+ // this.get('#/', function() {
+ // this.myhelper('and dont you forget it!');
+ // //=> Alerts: This is my plugin and dont you forget it!
+ // });
+ //
+ // });
+ //
+ // If plugin is passed as a string it assumes your are trying to load
+ // Sammy."Plugin". This is the preferred way of loading core Sammy plugins
+ // as it allows for better error-messaging.
+ //
+ // ### Example
+ //
+ // $.sammy(function() {
+ // this.use('Mustache'); //=> Sammy.Mustache
+ // this.use('Storage'); //=> Sammy.Storage
+ // });
+ //
+ use: function() {
+ // flatten the arguments
+ var args = _makeArray(arguments),
+ plugin = args.shift(),
+ plugin_name = plugin || '';
+ try {
+ args.unshift(this);
+ if (typeof plugin == 'string') {
+ plugin_name = 'Sammy.' + plugin;
+ plugin = Sammy[plugin];
+ }
+ plugin.apply(this, args);
+ } catch(e) {
+ if (typeof plugin === 'undefined') {
+ this.error("Plugin Error: called use() but plugin (" + plugin_name.toString() + ") is not defined", e);
+ } else if (!_isFunction(plugin)) {
+ this.error("Plugin Error: called use() but '" + plugin_name.toString() + "' is not a function", e);
+ } else {
+ this.error("Plugin Error", e);
+ }
+ }
+ return this;
+ },
+
+ // Sets the location proxy for the current app. By default this is set to
+ // a new `Sammy.DefaultLocationProxy` on initialization. However, you can set
+ // the location_proxy inside you're app function to give your app a custom
+ // location mechanism. See `Sammy.DefaultLocationProxy` and `Sammy.DataLocationProxy`
+ // for examples.
+ //
+ // `setLocationProxy()` takes an initialized location proxy.
+ //
+ // ### Example
+ //
+ // // to bind to data instead of the default hash;
+ // var app = $.sammy(function() {
+ // this.setLocationProxy(new Sammy.DataLocationProxy(this));
+ // });
+ //
+ setLocationProxy: function(new_proxy) {
+ var original_proxy = this._location_proxy;
+ this._location_proxy = new_proxy;
+ if (this.isRunning()) {
+ if (original_proxy) {
+ // if there is already a location proxy, unbind it.
+ original_proxy.unbind();
+ }
+ this._location_proxy.bind();
+ }
+ },
+
+ // provide log() override for inside an app that includes the relevant application element_selector
+ log: function() {
+ Sammy.log.apply(Sammy, Array.prototype.concat.apply([this.element_selector],arguments));
+ },
+
+
+ // `route()` is the main method for defining routes within an application.
+ // For great detail on routes, check out:
+ // [http://sammyjs.org/docs/routes](http://sammyjs.org/docs/routes)
+ //
+ // This method also has aliases for each of the different verbs (eg. `get()`, `post()`, etc.)
+ //
+ // ### Arguments
+ //
+ // * `verb` A String in the set of ROUTE_VERBS or 'any'. 'any' will add routes for each
+ // of the ROUTE_VERBS. If only two arguments are passed,
+ // the first argument is the path, the second is the callback and the verb
+ // is assumed to be 'any'.
+ // * `path` A Regexp or a String representing the path to match to invoke this verb.
+ // * `callback` A Function that is called/evaluated when the route is run see: `runRoute()`.
+ // It is also possible to pass a string as the callback, which is looked up as the name
+ // of a method on the application.
+ //
+ route: function(verb, path) {
+ var app = this, param_names = [], add_route, path_match, callback = Array.prototype.slice.call(arguments,2);
+
+ // if the method signature is just (path, callback)
+ // assume the verb is 'any'
+ if (callback.length === 0 && _isFunction(path)) {
+ callback = [path];
+ path = verb;
+ verb = 'any';
+ }
+
+ verb = verb.toLowerCase(); // ensure verb is lower case
+
+ // if path is a string turn it into a regex
+ if (path.constructor == String) {
+
+ // Needs to be explicitly set because IE will maintain the index unless NULL is returned,
+ // which means that with two consecutive routes that contain params, the second set of params will not be found and end up in splat instead of params
+ // https://developer.mozilla.org/en/Core_JavaScript_1.5_Reference/Global_Objects/RegExp/lastIndex
+ PATH_NAME_MATCHER.lastIndex = 0;
+
+ // find the names
+ while ((path_match = PATH_NAME_MATCHER.exec(path)) !== null) {
+ param_names.push(path_match[1]);
+ }
+ // replace with the path replacement
+ path = new RegExp(path.replace(PATH_NAME_MATCHER, PATH_REPLACER) + "$");
+ }
+ // lookup callbacks
+ $.each(callback,function(i,cb){
+ if (typeof(cb) === 'string') {
+ callback[i] = app[cb];
+ }
+ });
+
+ add_route = function(with_verb) {
+ var r = {verb: with_verb, path: path, callback: callback, param_names: param_names};
+ // add route to routes array
+ app.routes[with_verb] = app.routes[with_verb] || [];
+ // place routes in order of definition
+ app.routes[with_verb].push(r);
+ };
+
+ if (verb === 'any') {
+ $.each(this.ROUTE_VERBS, function(i, v) { add_route(v); });
+ } else {
+ add_route(verb);
+ }
+
+ // return the app
+ return this;
+ },
+
+ // Alias for route('get', ...)
+ get: _routeWrapper('get'),
+
+ // Alias for route('post', ...)
+ post: _routeWrapper('post'),
+
+ // Alias for route('put', ...)
+ put: _routeWrapper('put'),
+
+ // Alias for route('delete', ...)
+ del: _routeWrapper('delete'),
+
+ // Alias for route('any', ...)
+ any: _routeWrapper('any'),
+
+ // `mapRoutes` takes an array of arrays, each array being passed to route()
+ // as arguments, this allows for mass definition of routes. Another benefit is
+ // this makes it possible/easier to load routes via remote JSON.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // this.mapRoutes([
+ // ['get', '#/', function() { this.log('index'); }],
+ // // strings in callbacks are looked up as methods on the app
+ // ['post', '#/create', 'addUser'],
+ // // No verb assumes 'any' as the verb
+ // [/dowhatever/, function() { this.log(this.verb, this.path)}];
+ // ]);
+ // });
+ //
+ mapRoutes: function(route_array) {
+ var app = this;
+ $.each(route_array, function(i, route_args) {
+ app.route.apply(app, route_args);
+ });
+ return this;
+ },
+
+ // A unique event namespace defined per application.
+ // All events bound with `bind()` are automatically bound within this space.
+ eventNamespace: function() {
+ return ['sammy-app', this.namespace].join('-');
+ },
+
+ // Works just like `jQuery.fn.bind()` with a couple notable differences.
+ //
+ // * It binds all events to the application element
+ // * All events are bound within the `eventNamespace()`
+ // * Events are not actually bound until the application is started with `run()`
+ // * callbacks are evaluated within the context of a Sammy.EventContext
+ //
+ bind: function(name, data, callback) {
+ var app = this;
+ // build the callback
+ // if the arity is 2, callback is the second argument
+ if (typeof callback == 'undefined') { callback = data; }
+ var listener_callback = function() {
+ // pull off the context from the arguments to the callback
+ var e, context, data;
+ e = arguments[0];
+ data = arguments[1];
+ if (data && data.context) {
+ context = data.context;
+ delete data.context;
+ } else {
+ context = new app.context_prototype(app, 'bind', e.type, data, e.target);
+ }
+ e.cleaned_type = e.type.replace(app.eventNamespace(), '');
+ callback.apply(context, [e, data]);
+ };
+
+ // it could be that the app element doesnt exist yet
+ // so attach to the listeners array and then run()
+ // will actually bind the event.
+ if (!this.listeners[name]) { this.listeners[name] = []; }
+ this.listeners[name].push(listener_callback);
+ if (this.isRunning()) {
+ // if the app is running
+ // *actually* bind the event to the app element
+ this._listen(name, listener_callback);
+ }
+ return this;
+ },
+
+ // Triggers custom events defined with `bind()`
+ //
+ // ### Arguments
+ //
+ // * `name` The name of the event. Automatically prefixed with the `eventNamespace()`
+ // * `data` An optional Object that can be passed to the bound callback.
+ // * `context` An optional context/Object in which to execute the bound callback.
+ // If no context is supplied a the context is a new `Sammy.EventContext`
+ //
+ trigger: function(name, data) {
+ this.$element().trigger([name, this.eventNamespace()].join('.'), [data]);
+ return this;
+ },
+
+ // Reruns the current route
+ refresh: function() {
+ this.last_location = null;
+ this.trigger('location-changed');
+ return this;
+ },
+
+ // Takes a single callback that is pushed on to a stack.
+ // Before any route is run, the callbacks are evaluated in order within
+ // the current `Sammy.EventContext`
+ //
+ // If any of the callbacks explicitly return false, execution of any
+ // further callbacks and the route itself is halted.
+ //
+ // You can also provide a set of options that will define when to run this
+ // before based on the route it proceeds.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // // will run at #/route but not at #/
+ // this.before('#/route', function() {
+ // //...
+ // });
+ //
+ // // will run at #/ but not at #/route
+ // this.before({except: {path: '#/route'}}, function() {
+ // this.log('not before #/route');
+ // });
+ //
+ // this.get('#/', function() {});
+ //
+ // this.get('#/route', function() {});
+ //
+ // });
+ //
+ // See `contextMatchesOptions()` for a full list of supported options
+ //
+ before: function(options, callback) {
+ if (_isFunction(options)) {
+ callback = options;
+ options = {};
+ }
+ this.befores.push([options, callback]);
+ return this;
+ },
+
+ // A shortcut for binding a callback to be run after a route is executed.
+ // After callbacks have no guarunteed order.
+ after: function(callback) {
+ return this.bind('event-context-after', callback);
+ },
+
+
+ // Adds an around filter to the application. around filters are functions
+ // that take a single argument `callback` which is the entire route
+ // execution path wrapped up in a closure. This means you can decide whether
+ // or not to proceed with execution by not invoking `callback` or,
+ // more usefully wrapping callback inside the result of an asynchronous execution.
+ //
+ // ### Example
+ //
+ // The most common use case for around() is calling a _possibly_ async function
+ // and executing the route within the functions callback:
+ //
+ // var app = $.sammy(function() {
+ //
+ // var current_user = false;
+ //
+ // function checkLoggedIn(callback) {
+ // // /session returns a JSON representation of the logged in user
+ // // or an empty object
+ // if (!current_user) {
+ // $.getJSON('/session', function(json) {
+ // if (json.login) {
+ // // show the user as logged in
+ // current_user = json;
+ // // execute the route path
+ // callback();
+ // } else {
+ // // show the user as not logged in
+ // current_user = false;
+ // // the context of aroundFilters is an EventContext
+ // this.redirect('#/login');
+ // }
+ // });
+ // } else {
+ // // execute the route path
+ // callback();
+ // }
+ // };
+ //
+ // this.around(checkLoggedIn);
+ //
+ // });
+ //
+ around: function(callback) {
+ this.arounds.push(callback);
+ return this;
+ },
+
+ // Adds a onComplete function to the application. onComplete functions are executed
+ // at the end of a chain of route callbacks, if they call next(). Unlike after,
+ // which is called as soon as the route is complete, onComplete is like a final next()
+ // for all routes, and is thus run asynchronously
+ //
+ // ### Example
+ //
+ // app.get('/chain',function(context,next) {
+ // console.log('chain1');
+ // next();
+ // },function(context,next) {
+ // console.log('chain2');
+ // next();
+ // });
+ //
+ // app.get('/link',function(context,next) {
+ // console.log('link1');
+ // next();
+ // },function(context,next) {
+ // console.log('link2');
+ // next();
+ // });
+ //
+ // app.onComplete(function() {
+ // console.log("Running finally");
+ // });
+ //
+ // If you go to '/chain', you will get the following messages:
+ //
+ // chain1
+ // chain2
+ // Running onComplete
+ //
+ //
+ // If you go to /link, you will get the following messages:
+ //
+ // link1
+ // link2
+ // Running onComplete
+ //
+ //
+ // It really comes to play when doing asynchronous:
+ //
+ // app.get('/chain',function(context,next) {
+ // $.get('/my/url',function() {
+ // console.log('chain1');
+ // next();
+ // });
+ // },function(context,next) {
+ // console.log('chain2');
+ // next();
+ // });
+ //
+ onComplete: function(callback) {
+ this._onComplete = callback;
+ return this;
+ },
+
+ // Returns `true` if the current application is running.
+ isRunning: function() {
+ return this._running;
+ },
+
+ // Helpers extends the EventContext prototype specific to this app.
+ // This allows you to define app specific helper functions that can be used
+ // whenever you're inside of an event context (templates, routes, bind).
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // helpers({
+ // upcase: function(text) {
+ // return text.toString().toUpperCase();
+ // }
+ // });
+ //
+ // get('#/', function() { with(this) {
+ // // inside of this context I can use the helpers
+ // $('#main').html(upcase($('#main').text());
+ // }});
+ //
+ // });
+ //
+ //
+ // ### Arguments
+ //
+ // * `extensions` An object collection of functions to extend the context.
+ //
+ helpers: function(extensions) {
+ $.extend(this.context_prototype.prototype, extensions);
+ return this;
+ },
+
+ // Helper extends the event context just like `helpers()` but does it
+ // a single method at a time. This is especially useful for dynamically named
+ // helpers
+ //
+ // ### Example
+ //
+ // // Trivial example that adds 3 helper methods to the context dynamically
+ // var app = $.sammy(function(app) {
+ //
+ // $.each([1,2,3], function(i, num) {
+ // app.helper('helper' + num, function() {
+ // this.log("I'm helper number " + num);
+ // });
+ // });
+ //
+ // this.get('#/', function() {
+ // this.helper2(); //=> I'm helper number 2
+ // });
+ // });
+ //
+ // ### Arguments
+ //
+ // * `name` The name of the method
+ // * `method` The function to be added to the prototype at `name`
+ //
+ helper: function(name, method) {
+ this.context_prototype.prototype[name] = method;
+ return this;
+ },
+
+ // Actually starts the application's lifecycle. `run()` should be invoked
+ // within a document.ready block to ensure the DOM exists before binding events, etc.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() { ... }); // your application
+ // $(function() { // document.ready
+ // app.run();
+ // });
+ //
+ // ### Arguments
+ //
+ // * `start_url` Optionally, a String can be passed which the App will redirect to
+ // after the events/routes have been bound.
+ run: function(start_url) {
+ if (this.isRunning()) { return false; }
+ var app = this;
+
+ // actually bind all the listeners
+ $.each(this.listeners.toHash(), function(name, callbacks) {
+ $.each(callbacks, function(i, listener_callback) {
+ app._listen(name, listener_callback);
+ });
+ });
+
+ this.trigger('run', {start_url: start_url});
+ this._running = true;
+ // set last location
+ this.last_location = null;
+ if (!(/\#(.+)/.test(this.getLocation())) && typeof start_url != 'undefined') {
+ this.setLocation(start_url);
+ }
+ // check url
+ this._checkLocation();
+ this._location_proxy.bind();
+ this.bind('location-changed', function() {
+ app._checkLocation();
+ });
+
+ // bind to submit to capture post/put/delete routes
+ this.bind('submit', function(e) {
+ if ( !Sammy.targetIsThisWindow(e, 'form') ) { return true; }
+ var returned = app._checkFormSubmission($(e.target).closest('form'));
+ return (returned === false) ? e.preventDefault() : false;
+ });
+
+ // bind unload to body unload
+ $(window).bind('unload', function() {
+ app.unload();
+ });
+
+ // trigger html changed
+ return this.trigger('changed');
+ },
+
+ // The opposite of `run()`, un-binds all event listeners and intervals
+ // `run()` Automatically binds a `onunload` event to run this when
+ // the document is closed.
+ unload: function() {
+ if (!this.isRunning()) { return false; }
+ var app = this;
+ this.trigger('unload');
+ // clear interval
+ this._location_proxy.unbind();
+ // unbind form submits
+ this.$element().unbind('submit').removeClass(app.eventNamespace());
+ // unbind all events
+ $.each(this.listeners.toHash() , function(name, listeners) {
+ $.each(listeners, function(i, listener_callback) {
+ app._unlisten(name, listener_callback);
+ });
+ });
+ this._running = false;
+ return this;
+ },
+
+ // Not only runs `unbind` but also destroys the app reference.
+ destroy: function() {
+ this.unload();
+ delete Sammy.apps[this.element_selector];
+ return this;
+ },
+
+ // Will bind a single callback function to every event that is already
+ // being listened to in the app. This includes all the `APP_EVENTS`
+ // as well as any custom events defined with `bind()`.
+ //
+ // Used internally for debug logging.
+ bindToAllEvents: function(callback) {
+ var app = this;
+ // bind to the APP_EVENTS first
+ $.each(this.APP_EVENTS, function(i, e) {
+ app.bind(e, callback);
+ });
+ // next, bind to listener names (only if they dont exist in APP_EVENTS)
+ $.each(this.listeners.keys(true), function(i, name) {
+ if ($.inArray(name, app.APP_EVENTS) == -1) {
+ app.bind(name, callback);
+ }
+ });
+ return this;
+ },
+
+ // Returns a copy of the given path with any query string after the hash
+ // removed.
+ routablePath: function(path) {
+ return path.replace(QUERY_STRING_MATCHER, '');
+ },
+
+ // Given a verb and a String path, will return either a route object or false
+ // if a matching route can be found within the current defined set.
+ lookupRoute: function(verb, path) {
+ var app = this, routed = false, i = 0, l, route;
+ if (typeof this.routes[verb] != 'undefined') {
+ l = this.routes[verb].length;
+ for (; i < l; i++) {
+ route = this.routes[verb][i];
+ if (app.routablePath(path).match(route.path)) {
+ routed = route;
+ break;
+ }
+ }
+ }
+ return routed;
+ },
+
+ // First, invokes `lookupRoute()` and if a route is found, parses the
+ // possible URL params and then invokes the route's callback within a new
+ // `Sammy.EventContext`. If the route can not be found, it calls
+ // `notFound()`. If `raise_errors` is set to `true` and
+ // the `error()` has not been overridden, it will throw an actual JS
+ // error.
+ //
+ // You probably will never have to call this directly.
+ //
+ // ### Arguments
+ //
+ // * `verb` A String for the verb.
+ // * `path` A String path to lookup.
+ // * `params` An Object of Params pulled from the URI or passed directly.
+ //
+ // ### Returns
+ //
+ // Either returns the value returned by the route callback or raises a 404 Not Found error.
+ //
+ runRoute: function(verb, path, params, target) {
+ var app = this,
+ route = this.lookupRoute(verb, path),
+ context,
+ wrapped_route,
+ arounds,
+ around,
+ befores,
+ before,
+ callback_args,
+ path_params,
+ final_returned;
+
+ if (this.debug) {
+ this.log('runRoute', [verb, path].join(' '));
+ }
+
+ this.trigger('run-route', {verb: verb, path: path, params: params});
+ if (typeof params == 'undefined') { params = {}; }
+
+ $.extend(params, this._parseQueryString(path));
+
+ if (route) {
+ this.trigger('route-found', {route: route});
+ // pull out the params from the path
+ if ((path_params = route.path.exec(this.routablePath(path))) !== null) {
+ // first match is the full path
+ path_params.shift();
+ // for each of the matches
+ $.each(path_params, function(i, param) {
+ // if theres a matching param name
+ if (route.param_names[i]) {
+ // set the name to the match
+ params[route.param_names[i]] = _decode(param);
+ } else {
+ // initialize 'splat'
+ if (!params.splat) { params.splat = []; }
+ params.splat.push(_decode(param));
+ }
+ });
+ }
+
+ // set event context
+ context = new this.context_prototype(this, verb, path, params, target);
+ // ensure arrays
+ arounds = this.arounds.slice(0);
+ befores = this.befores.slice(0);
+ // set the callback args to the context + contents of the splat
+ callback_args = [context];
+ if (params.splat) {
+ callback_args = callback_args.concat(params.splat);
+ }
+ // wrap the route up with the before filters
+ wrapped_route = function() {
+ var returned, i, nextRoute;
+ while (befores.length > 0) {
+ before = befores.shift();
+ // check the options
+ if (app.contextMatchesOptions(context, before[0])) {
+ returned = before[1].apply(context, [context]);
+ if (returned === false) { return false; }
+ }
+ }
+ app.last_route = route;
+ context.trigger('event-context-before', {context: context});
+ // run multiple callbacks
+ if (typeof(route.callback) === "function") {
+ route.callback = [route.callback];
+ }
+ if (route.callback && route.callback.length) {
+ i = -1;
+ nextRoute = function() {
+ i++;
+ if (route.callback[i]) {
+ returned = route.callback[i].apply(context,callback_args);
+ } else if (app._onComplete && typeof(app._onComplete === "function")) {
+ app._onComplete(context);
+ }
+ };
+ callback_args.push(nextRoute);
+ nextRoute();
+ }
+ context.trigger('event-context-after', {context: context});
+ return returned;
+ };
+ $.each(arounds.reverse(), function(i, around) {
+ var last_wrapped_route = wrapped_route;
+ wrapped_route = function() { return around.apply(context, [last_wrapped_route]); };
+ });
+ try {
+ final_returned = wrapped_route();
+ } catch(e) {
+ this.error(['500 Error', verb, path].join(' '), e);
+ }
+ return final_returned;
+ } else {
+ return this.notFound(verb, path);
+ }
+ },
+
+ // Matches an object of options against an `EventContext` like object that
+ // contains `path` and `verb` attributes. Internally Sammy uses this
+ // for matching `before()` filters against specific options. You can set the
+ // object to _only_ match certain paths or verbs, or match all paths or verbs _except_
+ // those that match the options.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(),
+ // context = {verb: 'get', path: '#/mypath'};
+ //
+ // // match against a path string
+ // app.contextMatchesOptions(context, '#/mypath'); //=> true
+ // app.contextMatchesOptions(context, '#/otherpath'); //=> false
+ // // equivalent to
+ // app.contextMatchesOptions(context, {only: {path:'#/mypath'}}); //=> true
+ // app.contextMatchesOptions(context, {only: {path:'#/otherpath'}}); //=> false
+ // // match against a path regexp
+ // app.contextMatchesOptions(context, /path/); //=> true
+ // app.contextMatchesOptions(context, /^path/); //=> false
+ // // match only a verb
+ // app.contextMatchesOptions(context, {only: {verb:'get'}}); //=> true
+ // app.contextMatchesOptions(context, {only: {verb:'post'}}); //=> false
+ // // match all except a verb
+ // app.contextMatchesOptions(context, {except: {verb:'post'}}); //=> true
+ // app.contextMatchesOptions(context, {except: {verb:'get'}}); //=> false
+ // // match all except a path
+ // app.contextMatchesOptions(context, {except: {path:'#/otherpath'}}); //=> true
+ // app.contextMatchesOptions(context, {except: {path:'#/mypath'}}); //=> false
+ // // match all except a verb and a path
+ // app.contextMatchesOptions(context, {except: {path:'#/otherpath', verb:'post'}}); //=> true
+ // app.contextMatchesOptions(context, {except: {path:'#/mypath', verb:'post'}}); //=> true
+ // app.contextMatchesOptions(context, {except: {path:'#/mypath', verb:'get'}}); //=> false
+ // // match multiple paths
+ // app.contextMatchesOptions(context, {path: ['#/mypath', '#/otherpath']}); //=> true
+ // app.contextMatchesOptions(context, {path: ['#/otherpath', '#/thirdpath']}); //=> false
+ // // equivalent to
+ // app.contextMatchesOptions(context, {only: {path: ['#/mypath', '#/otherpath']}}); //=> true
+ // app.contextMatchesOptions(context, {only: {path: ['#/otherpath', '#/thirdpath']}}); //=> false
+ // // match all except multiple paths
+ // app.contextMatchesOptions(context, {except: {path: ['#/mypath', '#/otherpath']}}); //=> false
+ // app.contextMatchesOptions(context, {except: {path: ['#/otherpath', '#/thirdpath']}}); //=> true
+ // // match all except multiple paths and verbs
+ // app.contextMatchesOptions(context, {except: {path: ['#/mypath', '#/otherpath'], verb: ['get', 'post']}}); //=> false
+ // app.contextMatchesOptions(context, {except: {path: ['#/otherpath', '#/thirdpath'], verb: ['get', 'post']}}); //=> true
+ //
+ contextMatchesOptions: function(context, match_options, positive) {
+ var options = match_options;
+ // normalize options
+ if (typeof options === 'string' || _isRegExp(options)) {
+ options = {path: options};
+ }
+ if (typeof positive === 'undefined') {
+ positive = true;
+ }
+ // empty options always match
+ if ($.isEmptyObject(options)) {
+ return true;
+ }
+ // Do we have to match against multiple paths?
+ if (_isArray(options.path)){
+ var results, numopt, opts, len;
+ results = [];
+ for (numopt = 0, len = options.path.length; numopt < len; numopt += 1) {
+ opts = $.extend({}, options, {path: options.path[numopt]});
+ results.push(this.contextMatchesOptions(context, opts));
+ }
+ var matched = $.inArray(true, results) > -1 ? true : false;
+ return positive ? matched : !matched;
+ }
+ if (options.only) {
+ return this.contextMatchesOptions(context, options.only, true);
+ } else if (options.except) {
+ return this.contextMatchesOptions(context, options.except, false);
+ }
+ var path_matched = true, verb_matched = true;
+ if (options.path) {
+ if (!_isRegExp(options.path)) {
+ options.path = new RegExp(options.path.toString() + '$');
+ }
+ path_matched = options.path.test(context.path);
+ }
+ if (options.verb) {
+ if(typeof options.verb === 'string') {
+ verb_matched = options.verb === context.verb;
+ } else {
+ verb_matched = options.verb.indexOf(context.verb) > -1;
+ }
+ }
+ return positive ? (verb_matched && path_matched) : !(verb_matched && path_matched);
+ },
+
+
+ // Delegates to the `location_proxy` to get the current location.
+ // See `Sammy.DefaultLocationProxy` for more info on location proxies.
+ getLocation: function() {
+ return this._location_proxy.getLocation();
+ },
+
+ // Delegates to the `location_proxy` to set the current location.
+ // See `Sammy.DefaultLocationProxy` for more info on location proxies.
+ //
+ // ### Arguments
+ //
+ // * `new_location` A new location string (e.g. '#/')
+ //
+ setLocation: function(new_location) {
+ return this._location_proxy.setLocation(new_location);
+ },
+
+ // Swaps the content of `$element()` with `content`
+ // You can override this method to provide an alternate swap behavior
+ // for `EventContext.partial()`.
+ //
+ // ### Example
+ //
+ // var app = $.sammy(function() {
+ //
+ // // implements a 'fade out'/'fade in'
+ // this.swap = function(content, callback) {
+ // var context = this;
+ // context.$element().fadeOut('slow', function() {
+ // context.$element().html(content);
+ // context.$element().fadeIn('slow', function() {
+ // if (callback) {
+ // callback.apply();
+ // }
+ // });
+ // });
+ // };
+ //
+ // });
+ //
+ swap: function(content, callback) {
+ var $el = this.$element().html(content);
+ if (_isFunction(callback)) { callback(content); }
+ return $el;
+ },
+
+ // a simple global cache for templates. Uses the same semantics as
+ // `Sammy.Cache` and `Sammy.Storage` so can easily be replaced with
+ // a persistent storage that lasts beyond the current request.
+ templateCache: function(key, value) {
+ if (typeof value != 'undefined') {
+ return _template_cache[key] = value;
+ } else {
+ return _template_cache[key];
+ }
+ },
+
+ // clear the templateCache
+ clearTemplateCache: function() {
+ return (_template_cache = {});
+ },
+
+ // This throws a '404 Not Found' error by invoking `error()`.
+ // Override this method or `error()` to provide custom
+ // 404 behavior (i.e redirecting to / or showing a warning)
+ notFound: function(verb, path) {
+ var ret = this.error(['404 Not Found', verb, path].join(' '));
+ return (verb === 'get') ? ret : true;
+ },
+
+ // The base error handler takes a string `message` and an `Error`
+ // object. If `raise_errors` is set to `true` on the app level,
+ // this will re-throw the error to the browser. Otherwise it will send the error
+ // to `log()`. Override this method to provide custom error handling
+ // e.g logging to a server side component or displaying some feedback to the
+ // user.
+ error: function(message, original_error) {
+ if (!original_error) { original_error = new Error(); }
+ original_error.message = [message, original_error.message].join(' ');
+ this.trigger('error', {message: original_error.message, error: original_error});
+ if (this.raise_errors) {
+ throw(original_error);
+ } else {
+ this.log(original_error.message, original_error);
+ }
+ },
+
+ _checkLocation: function() {
+ var location, returned;
+ // get current location
+ location = this.getLocation();
+ // compare to see if hash has changed
+ if (!this.last_location || this.last_location[0] != 'get' || this.last_location[1] != location) {
+ // reset last location
+ this.last_location = ['get', location];
+ // lookup route for current hash
+ returned = this.runRoute('get', location);
+ }
+ return returned;
+ },
+
+ _getFormVerb: function(form) {
+ var $form = $(form), verb, $_method;
+ $_method = $form.find('input[name="_method"]');
+ if ($_method.length > 0) { verb = $_method.val(); }
+ if (!verb) { verb = $form[0].getAttribute('method'); }
+ if (!verb || verb === '') { verb = 'get'; }
+ return $.trim(verb.toString().toLowerCase());
+ },
+
+ _checkFormSubmission: function(form) {
+ var $form, path, verb, params, returned;
+ this.trigger('check-form-submission', {form: form});
+ $form = $(form);
+ path = $form.attr('action') || '';
+ verb = this._getFormVerb($form);
+
+ if (this.debug) {
+ this.log('_checkFormSubmission', $form, path, verb);
+ }
+
+ if (verb === 'get') {
+ params = this._serializeFormParams($form);
+ if (params !== '') { path += '?' + params; }
+ this.setLocation(path);
+ returned = false;
+ } else {
+ params = $.extend({}, this._parseFormParams($form));
+ returned = this.runRoute(verb, path, params, form.get(0));
+ }
+ return (typeof returned == 'undefined') ? false : returned;
+ },
+
+ _serializeFormParams: function($form) {
+ var queryString = "",
+ fields = $form.serializeArray(),
+ i;
+ if (fields.length > 0) {
+ queryString = this._encodeFormPair(fields[0].name, fields[0].value);
+ for (i = 1; i < fields.length; i++) {
+ queryString = queryString + "&" + this._encodeFormPair(fields[i].name, fields[i].value);
+ }
+ }
+ return queryString;
+ },
+
+ _encodeFormPair: function(name, value){
+ return _encode(name) + "=" + _encode(value);
+ },
+
+ _parseFormParams: function($form) {
+ var params = {},
+ form_fields = $form.serializeArray(),
+ i;
+ for (i = 0; i < form_fields.length; i++) {
+ params = this._parseParamPair(params, form_fields[i].name, form_fields[i].value);
+ }
+ return params;
+ },
+
+ _parseQueryString: function(path) {
+ var params = {}, parts, pairs, pair, i;
+
+ parts = path.match(QUERY_STRING_MATCHER);
+ if (parts && parts[1]) {
+ pairs = parts[1].split('&');
+ for (i = 0; i < pairs.length; i++) {
+ pair = pairs[i].split('=');
+ params = this._parseParamPair(params, _decode(pair[0]), _decode(pair[1] || ""));
+ }
+ }
+ return params;
+ },
+
+ _parseParamPair: function(params, key, value) {
+ if (typeof params[key] !== 'undefined') {
+ if (_isArray(params[key])) {
+ params[key].push(value);
+ } else {
+ params[key] = [params[key], value];
+ }
+ } else {
+ params[key] = value;
+ }
+ return params;
+ },
+
+ _listen: function(name, callback) {
+ return this.$element().bind([name, this.eventNamespace()].join('.'), callback);
+ },
+
+ _unlisten: function(name, callback) {
+ return this.$element().unbind([name, this.eventNamespace()].join('.'), callback);
+ }
+
+ });
+
+ // `Sammy.RenderContext` is an object that makes sequential template loading,
+ // rendering and interpolation seamless even when dealing with asynchronous
+ // operations.
+ //
+ // `RenderContext` objects are not usually created directly, rather they are
+ // instantiated from an `Sammy.EventContext` by using `render()`, `load()` or
+ // `partial()` which all return `RenderContext` objects.
+ //
+ // `RenderContext` methods always returns a modified `RenderContext`
+ // for chaining (like jQuery itself).
+ //
+ // The core magic is in the `then()` method which puts the callback passed as
+ // an argument into a queue to be executed once the previous callback is complete.
+ // All the methods of `RenderContext` are wrapped in `then()` which allows you
+ // to queue up methods by chaining, but maintaining a guaranteed execution order
+ // even with remote calls to fetch templates.
+ //
+ Sammy.RenderContext = function(event_context) {
+ this.event_context = event_context;
+ this.callbacks = [];
+ this.previous_content = null;
+ this.content = null;
+ this.next_engine = false;
+ this.waiting = false;
+ };
+
+ Sammy.RenderContext.prototype = $.extend({}, Sammy.Object.prototype, {
+
+ // The "core" of the `RenderContext` object, adds the `callback` to the
+ // queue. If the context is `waiting` (meaning an async operation is happening)
+ // then the callback will be executed in order, once the other operations are
+ // complete. If there is no currently executing operation, the `callback`
+ // is executed immediately.
+ //
+ // The value returned from the callback is stored in `content` for the
+ // subsequent operation. If you return `false`, the queue will pause, and
+ // the next callback in the queue will not be executed until `next()` is
+ // called. This allows for the guaranteed order of execution while working
+ // with async operations.
+ //
+ // If then() is passed a string instead of a function, the string is looked
+ // up as a helper method on the event context.
+ //
+ // ### Example
+ //
+ // this.get('#/', function() {
+ // // initialize the RenderContext
+ // // Even though `load()` executes async, the next `then()`
+ // // wont execute until the load finishes
+ // this.load('myfile.txt')
+ // .then(function(content) {
+ // // the first argument to then is the content of the
+ // // prev operation
+ // $('#main').html(content);
+ // });
+ // });
+ //
+ then: function(callback) {
+ if (!_isFunction(callback)) {
+ // if a string is passed to then, assume we want to call
+ // a helper on the event context in its context
+ if (typeof callback === 'string' && callback in this.event_context) {
+ var helper = this.event_context[callback];
+ callback = function(content) {
+ return helper.apply(this.event_context, [content]);
+ };
+ } else {
+ return this;
+ }
+ }
+ var context = this;
+ if (this.waiting) {
+ this.callbacks.push(callback);
+ } else {
+ this.wait();
+ window.setTimeout(function() {
+ var returned = callback.apply(context, [context.content, context.previous_content]);
+ if (returned !== false) {
+ context.next(returned);
+ }
+ }, 0);
+ }
+ return this;
+ },
+
+ // Pause the `RenderContext` queue. Combined with `next()` allows for async
+ // operations.
+ //
+ // ### Example
+ //
+ // this.get('#/', function() {
+ // this.load('mytext.json')
+ // .then(function(content) {
+ // var context = this,
+ // data = JSON.parse(content);
+ // // pause execution
+ // context.wait();
+ // // post to a url
+ // $.post(data.url, {}, function(response) {
+ // context.next(JSON.parse(response));
+ // });
+ // })
+ // .then(function(data) {
+ // // data is json from the previous post
+ // $('#message').text(data.status);
+ // });
+ // });
+ wait: function() {
+ this.waiting = true;
+ },
+
+ // Resume the queue, setting `content` to be used in the next operation.
+ // See `wait()` for an example.
+ next: function(content) {
+ this.waiting = false;
+ if (typeof content !== 'undefined') {
+ this.previous_content = this.content;
+ this.content = content;
+ }
+ if (this.callbacks.length > 0) {
+ this.then(this.callbacks.shift());
+ }
+ },
+
+ // Load a template into the context.
+ // The `location` can either be a string specifying the remote path to the
+ // file, a jQuery object, or a DOM element.
+ //
+ // No interpolation happens by default, the content is stored in
+ // `content`.
+ //
+ // In the case of a path, unless the option `{cache: false}` is passed the
+ // data is stored in the app's `templateCache()`.
+ //
+ // If a jQuery or DOM object is passed the `innerHTML` of the node is pulled in.
+ // This is useful for nesting templates as part of the initial page load wrapped
+ // in invisible elements or `<script>` tags. With template paths, the template
+ // engine is looked up by the extension. For DOM/jQuery embedded templates,
+ // this isnt possible, so there are a couple of options:
+ //
+ // * pass an `{engine:}` option.
+ // * define the engine in the `data-engine` attribute of the passed node.
+ // * just store the raw template data and use `interpolate()` manually
+ //
+ // If a `callback` is passed it is executed after the template load.
+ load: function(location, options, callback) {
+ var context = this;
+ return this.then(function() {
+ var should_cache, cached, is_json, location_array;
+ if (_isFunction(options)) {
+ callback = options;
+ options = {};
+ } else {
+ options = $.extend({}, options);
+ }
+ if (callback) { this.then(callback); }
+ if (typeof location === 'string') {
+ // it's a path
+ is_json = (location.match(/\.json(\?|$)/) || options.json);
+ should_cache = is_json ? options.cache === true : options.cache !== false;
+ context.next_engine = context.event_context.engineFor(location);
+ delete options.cache;
+ delete options.json;
+ if (options.engine) {
+ context.next_engine = options.engine;
+ delete options.engine;
+ }
+ if (should_cache && (cached = this.event_context.app.templateCache(location))) {
+ return cached;
+ }
+ this.wait();
+ $.ajax($.extend({
+ url: location,
+ data: {},
+ dataType: is_json ? 'json' : 'text',
+ type: 'get',
+ success: function(data) {
+ if (should_cache) {
+ context.event_context.app.templateCache(location, data);
+ }
+ context.next(data);
+ }
+ }, options));
+ return false;
+ } else {
+ // it's a dom/jQuery
+ if (location.nodeType) {
+ return location.innerHTML;
+ }
+ if (location.selector) {
+ // it's a jQuery
+ context.next_engine = location.attr('data-engine');
+ if (options.clone === false) {
+ return location.remove()[0].innerHTML.toString();
+ } else {
+ return location[0].innerHTML.toString();
+ }
+ }
+ }
+ });
+ },
+
+ // Load partials
+ //
+ // ### Example
+ //
+ // this.loadPartials({mypartial: '/path/to/partial'});
+ //
+ loadPartials: function(partials) {
+ var name;
+ if(partials) {
+ this.partials = this.partials || {};
+ for(name in partials) {
+ (function(context, name) {
+ context.load(partials[name])
+ .then(function(template) {
+ this.partials[name] = template;
+ });
+ })(this, name);
+ }
+ }
+ return this;
+ },
+
+ // `load()` a template and then `interpolate()` it with data.
+ //
+ // can be called with multiple different signatures:
+ //
+ // this.render(callback);
+ // this.render('/location');
+ // this.render('/location', {some: data});
+ // this.render('/location', callback);
+ // this.render('/location', {some: data}, callback);
+ // this.render('/location', {some: data}, {my: partials});
+ // this.render('/location', callback, {my: partials});
+ // this.render('/location', {some: data}, callback, {my: partials});
+ //
+ // ### Example
+ //
+ // this.get('#/', function() {
+ // this.render('mytemplate.template', {name: 'test'});
+ // });
+ //
+ render: function(location, data, callback, partials) {
+ if (_isFunction(location) && !data) {
+ // invoked as render(callback)
+ return this.then(location);
+ } else {
+ if(_isFunction(data)) {
+ // invoked as render(location, callback, [partials])
+ partials = callback;
+ callback = data;
+ data = null;
+ } else if(callback && !_isFunction(callback)) {
+ // invoked as render(location, data, partials)
+ partials = callback;
+ callback = null;
+ }
+
+ return this.loadPartials(partials)
+ .load(location)
+ .interpolate(data, location)
+ .then(callback);
+ }
+ },
+
+ // `render()` the `location` with `data` and then `swap()` the
+ // app's `$element` with the rendered content.
+ partial: function(location, data, callback, partials) {
+ if (_isFunction(callback)) {
+ // invoked as partial(location, data, callback, [partials])
+ return this.render(location, data, partials).swap(callback);
+ } else if (_isFunction(data)) {
+ // invoked as partial(location, callback, [partials])
+ return this.render(location, {}, callback).swap(data);
+ } else {
+ // invoked as partial(location, data, [partials])
+ return this.render(location, data, callback).swap();
+ }
+ },
+
+ // defers the call of function to occur in order of the render queue.
+ // The function can accept any number of arguments as long as the last
+ // argument is a callback function. This is useful for putting arbitrary
+ // asynchronous functions into the queue. The content passed to the
+ // callback is passed as `content` to the next item in the queue.
+ //
+ // ### Example
+ //
+ // this.send($.getJSON, '/app.json')
+ // .then(function(json) {
+ // $('#message).text(json['message']);
+ // });
+ //
+ //
+ send: function() {
+ var context = this,
+ args = _makeArray(arguments),
+ fun = args.shift();
+
+ if (_isArray(args[0])) { args = args[0]; }
+
+ return this.then(function(content) {
+ args.push(function(response) { context.next(response); });
+ context.wait();
+ fun.apply(fun, args);
+ return false;
+ });
+ },
+
+ // iterates over an array, applying the callback for each item item. the
+ // callback takes the same style of arguments as `jQuery.each()` (index, item).
+ // The return value of each callback is collected as a single string and stored
+ // as `content` to be used in the next iteration of the `RenderContext`.
+ collect: function(array, callback, now) {
+ var context = this;
+ var coll = function() {
+ if (_isFunction(array)) {
+ callback = array;
+ array = this.content;
+ }
+ var contents = [], doms = false;
+ $.each(array, function(i, item) {
+ var returned = callback.apply(context, [i, item]);
+ if (returned.jquery && returned.length == 1) {
+ returned = returned[0];
+ doms = true;
+ }
+ contents.push(returned);
+ return returned;
+ });
+ return doms ? contents : contents.join('');
+ };
+ return now ? coll() : this.then(coll);
+ },
+
+ // loads a template, and then interpolates it for each item in the `data`
+ // array. If a callback is passed, it will call the callback with each
+ // item in the array _after_ interpolation
+ renderEach: function(location, name, data, callback) {
+ if (_isArray(name)) {
+ callback = data;
+ data = name;
+ name = null;
+ }
+ return this.load(location).then(function(content) {
+ var rctx = this;
+ if (!data) {
+ data = _isArray(this.previous_content) ? this.previous_content : [];
+ }
+ if (callback) {
+ $.each(data, function(i, value) {
+ var idata = {}, engine = this.next_engine || location;
+ if (name) {
+ idata[name] = value;
+ } else {
+ idata = value;
+ }
+ callback(value, rctx.event_context.interpolate(content, idata, engine));
+ });
+ } else {
+ return this.collect(data, function(i, value) {
+ var idata = {}, engine = this.next_engine || location;
+ if (name) {
+ idata[name] = value;
+ } else {
+ idata = value;
+ }
+ return this.event_context.interpolate(content, idata, engine);
+ }, true);
+ }
+ });
+ },
+
+ // uses the previous loaded `content` and the `data` object to interpolate
+ // a template. `engine` defines the templating/interpolation method/engine
+ // that should be used. If `engine` is not passed, the `next_engine` is
+ // used. If `retain` is `true`, the final interpolated data is appended to
+ // the `previous_content` instead of just replacing it.
+ interpolate: function(data, engine, retain) {
+ var context = this;
+ return this.then(function(content, prev) {
+ if (!data && prev) { data = prev; }
+ if (this.next_engine) {
+ engine = this.next_engine;
+ this.next_engine = false;
+ }
+ var rendered = context.event_context.interpolate(content, data, engine, this.partials);
+ return retain ? prev + rendered : rendered;
+ });
+ },
+
+ // Swap the return contents ensuring order. See `Application#swap`
+ swap: function(callback) {
+ return this.then(function(content) {
+ this.event_context.swap(content, callback);
+ return content;
+ }).trigger('changed', {});
+ },
+
+ // Same usage as `jQuery.fn.appendTo()` but uses `then()` to ensure order
+ appendTo: function(selector) {
+ return this.then(function(content) {
+ $(selector).append(content);
+ }).trigger('changed', {});
+ },
+
+ // Same usage as `jQuery.fn.prependTo()` but uses `then()` to ensure order
+ prependTo: function(selector) {
+ return this.then(function(content) {
+ $(selector).prepend(content);
+ }).trigger('changed', {});
+ },
+
+ // Replaces the `$(selector)` using `html()` with the previously loaded
+ // `content`
+ replace: function(selector) {
+ return this.then(function(content) {
+ $(selector).html(content);
+ }).trigger('changed', {});
+ },
+
+ // trigger the event in the order of the event context. Same semantics
+ // as `Sammy.EventContext#trigger()`. If data is omitted, `content`
+ // is sent as `{content: content}`
+ trigger: function(name, data) {
+ return this.then(function(content) {
+ if (typeof data == 'undefined') { data = {content: content}; }
+ this.event_context.trigger(name, data);
+ return content;
+ });
+ }
+
+ });
+
+ // `Sammy.EventContext` objects are created every time a route is run or a
+ // bound event is triggered. The callbacks for these events are evaluated within a `Sammy.EventContext`
+ // This within these callbacks the special methods of `EventContext` are available.
+ //
+ // ### Example
+ //
+ // $.sammy(function() {
+ // // The context here is this Sammy.Application
+ // this.get('#/:name', function() {
+ // // The context here is a new Sammy.EventContext
+ // if (this.params['name'] == 'sammy') {
+ // this.partial('name.html.erb', {name: 'Sammy'});
+ // } else {
+ // this.redirect('#/somewhere-else')
+ // }
+ // });
+ // });
+ //
+ // Initialize a new EventContext
+ //
+ // ### Arguments
+ //
+ // * `app` The `Sammy.Application` this event is called within.
+ // * `verb` The verb invoked to run this context/route.
+ // * `path` The string path invoked to run this context/route.
+ // * `params` An Object of optional params to pass to the context. Is converted
+ // to a `Sammy.Object`.
+ // * `target` a DOM element that the event that holds this context originates
+ // from. For post, put and del routes, this is the form element that triggered
+ // the route.
+ //
+ Sammy.EventContext = function(app, verb, path, params, target) {
+ this.app = app;
+ this.verb = verb;
+ this.path = path;
+ this.params = new Sammy.Object(params);
+ this.target = target;
+ };
+
+ Sammy.EventContext.prototype = $.extend({}, Sammy.Object.prototype, {
+
+ // A shortcut to the app's `$element()`
+ $element: function() {
+ return this.app.$element(_makeArray(arguments).shift());
+ },
+
+ // Look up a templating engine within the current app and context.
+ // `engine` can be one of the following:
+ //
+ // * a function: should conform to `function(content, data) { return interpolated; }`
+ // * a template path: 'template.ejs', looks up the extension to match to
+ // the `ejs()` helper
+ // * a string referring to the helper: "mustache" => `mustache()`
+ //
+ // If no engine is found, use the app's default `template_engine`
+ //
+ engineFor: function(engine) {
+ var context = this, engine_match;
+ // if path is actually an engine function just return it
+ if (_isFunction(engine)) { return engine; }
+ // lookup engine name by path extension
+ engine = (engine || context.app.template_engine).toString();
+ if ((engine_match = engine.match(/\.([^\.\?\#]+)(\?|$)/))) {
+ engine = engine_match[1];
+ }
+ // set the engine to the default template engine if no match is found
+ if (engine && _isFunction(context[engine])) {
+ return context[engine];
+ }
+
+ if (context.app.template_engine) {
+ return this.engineFor(context.app.template_engine);
+ }
+ return function(content, data) { return content; };
+ },
+
+ // using the template `engine` found with `engineFor()`, interpolate the
+ // `data` into `content`
+ interpolate: function(content, data, engine, partials) {
+ return this.engineFor(engine).apply(this, [content, data, partials]);
+ },
+
+ // Create and return a `Sammy.RenderContext` calling `render()` on it.
+ // Loads the template and interpolate the data, however does not actual
+ // place it in the DOM.
+ //
+ // ### Example
+ //
+ // // mytemplate.mustache <div class="name">{{name}}</div>
+ // render('mytemplate.mustache', {name: 'quirkey'});
+ // // sets the `content` to <div class="name">quirkey</div>
+ // render('mytemplate.mustache', {name: 'quirkey'})
+ // .appendTo('ul');
+ // // appends the rendered content to $('ul')
+ //
+ render: function(location, data, callback, partials) {
+ return new Sammy.RenderContext(this).render(location, data, callback, partials);
+ },
+
+ // Create and return a `Sammy.RenderContext` calling `renderEach()` on it.
+ // Loads the template and interpolates the data for each item,
+ // however does not actually place it in the DOM.
+ //
+ // `name` is an optional parameter (if it is an array, it is used as `data`,
+ // and the third parameter used as `callback`, if set).
+ //
+ // If `data` is not provided, content from the previous step in the chain
+ // (if it is an array) is used, and `name` is used as the key for each
+ // element of the array (useful for referencing in template).
+ //
+ // ### Example
+ //
+ // // mytemplate.mustache <div class="name">{{name}}</div>
+ // renderEach('mytemplate.mustache', [{name: 'quirkey'}, {name: 'endor'}])
+ // // sets the `content` to <div class="name">quirkey</div><div class="name">endor</div>
+ // renderEach('mytemplate.mustache', [{name: 'quirkey'}, {name: 'endor'}]).appendTo('ul');
+ // // appends the rendered content to $('ul')
+ //
+ // // names.json: ["quirkey", "endor"]
+ // this.load('names.json').renderEach('mytemplate.mustache', 'name').appendTo('ul');
+ // // uses the template to render each item in the JSON array
+ //
+ renderEach: function(location, name, data, callback) {
+ return new Sammy.RenderContext(this).renderEach(location, name, data, callback);
+ },
+
+ // create a new `Sammy.RenderContext` calling `load()` with `location` and
+ // `options`. Called without interpolation or placement, this allows for
+ // preloading/caching the templates.
+ load: function(location, options, callback) {
+ return new Sammy.RenderContext(this).load(location, options, callback);
+ },
+
+ // create a new `Sammy.RenderContext` calling `loadPartials()` with `partials`.
+ loadPartials: function(partials) {
+ return new Sammy.RenderContext(this).loadPartials(partials);
+ },
+
+ // `render()` the `location` with `data` and then `swap()` the
+ // app's `$element` with the rendered content.
+ partial: function(location, data, callback, partials) {
+ return new Sammy.RenderContext(this).partial(location, data, callback, partials);
+ },
+
+ // create a new `Sammy.RenderContext` calling `send()` with an arbitrary
+ // function
+ send: function() {
+ var rctx = new Sammy.RenderContext(this);
+ return rctx.send.apply(rctx, arguments);
+ },
+
+ // Changes the location of the current window. If `to` begins with
+ // '#' it only changes the document's hash. If passed more than 1 argument
+ // redirect will join them together with forward slashes.
+ //
+ // ### Example
+ //
+ // redirect('#/other/route');
+ // // equivalent to
+ // redirect('#', 'other', 'route');
+ //
+ redirect: function() {
+ var to, args = _makeArray(arguments),
+ current_location = this.app.getLocation(),
+ l = args.length;
+ if (l > 1) {
+ var i = 0, paths = [], pairs = [], params = {}, has_params = false;
+ for (; i < l; i++) {
+ if (typeof args[i] == 'string') {
+ paths.push(args[i]);
+ } else {
+ $.extend(params, args[i]);
+ has_params = true;
+ }
+ }
+ to = paths.join('/');
+ if (has_params) {
+ for (var k in params) {
+ pairs.push(this.app._encodeFormPair(k, params[k]));
+ }
+ to += '?' + pairs.join('&');
+ }
+ } else {
+ to = args[0];
+ }
+ this.trigger('redirect', {to: to});
+ this.app.last_location = [this.verb, this.path];
+ this.app.setLocation(to);
+ if (new RegExp(to).test(current_location)) {
+ this.app.trigger('location-changed');
+ }
+ },
+
+ // Triggers events on `app` within the current context.
+ trigger: function(name, data) {
+ if (typeof data == 'undefined') { data = {}; }
+ if (!data.context) { data.context = this; }
+ return this.app.trigger(name, data);
+ },
+
+ // A shortcut to app's `eventNamespace()`
+ eventNamespace: function() {
+ return this.app.eventNamespace();
+ },
+
+ // A shortcut to app's `swap()`
+ swap: function(contents, callback) {
+ return this.app.swap(contents, callback);
+ },
+
+ // Raises a possible `notFound()` error for the current path.
+ notFound: function() {
+ return this.app.notFound(this.verb, this.path);
+ },
+
+ // Default JSON parsing uses jQuery's `parseJSON()`. Include `Sammy.JSON`
+ // plugin for the more conformant "crockford special".
+ json: function(string) {
+ return $.parseJSON(string);
+ },
+
+ // //=> Sammy.EventContext: get #/ {}
+ toString: function() {
+ return "Sammy.EventContext: " + [this.verb, this.path, this.params].join(' ');
+ }
+
+ });
+
+ return Sammy;
+});
diff --git a/deps/rabbitmq_management/priv/www/js/sammy-0.7.6.min.js b/deps/rabbitmq_management/priv/www/js/sammy-0.7.6.min.js
new file mode 100644
index 0000000000..a08d509b59
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/sammy-0.7.6.min.js
@@ -0,0 +1,5 @@
+// -- Sammy.js -- /sammy.js
+// http://sammyjs.org
+// Version: 0.7.6
+// Built: 2014-08-26 10:45:34 +0300
+(function(factory){if(typeof define==="function"&&define.amd){define(["jquery"],factory)}else{jQuery.sammy=window.Sammy=factory(jQuery)}})(function($){var Sammy,PATH_REPLACER="([^/]+)",PATH_NAME_MATCHER=/:([\w\d]+)/g,QUERY_STRING_MATCHER=/\?([^#]*)?$/,_makeArray=function(nonarray){return Array.prototype.slice.call(nonarray)},_isFunction=function(obj){return Object.prototype.toString.call(obj)==="[object Function]"},_isArray=function(obj){return Object.prototype.toString.call(obj)==="[object Array]"},_isRegExp=function(obj){return Object.prototype.toString.call(obj)==="[object RegExp]"},_decode=function(str){return decodeURIComponent((str||"").replace(/\+/g," "))},_encode=encodeURIComponent,_escapeHTML=function(s){return String(s).replace(/&(?!\w+;)/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(/"/g,"&quot;")},_routeWrapper=function(verb){return function(){return this.route.apply(this,[verb].concat(Array.prototype.slice.call(arguments)))}},_template_cache={},_has_history=!!(window.history&&history.pushState),loggers=[];Sammy=function(){var args=_makeArray(arguments),app,selector;Sammy.apps=Sammy.apps||{};if(args.length===0||args[0]&&_isFunction(args[0])){return Sammy.apply(Sammy,["body"].concat(args))}else if(typeof(selector=args.shift())=="string"){app=Sammy.apps[selector]||new Sammy.Application;app.element_selector=selector;if(args.length>0){$.each(args,function(i,plugin){app.use(plugin)})}if(app.element_selector!=selector){delete Sammy.apps[selector]}Sammy.apps[app.element_selector]=app;return app}};Sammy.VERSION="0.7.6";Sammy.addLogger=function(logger){loggers.push(logger)};Sammy.log=function(){var args=_makeArray(arguments);args.unshift("["+Date()+"]");$.each(loggers,function(i,logger){logger.apply(Sammy,args)})};if(typeof window.console!="undefined"){if(typeof window.console.log==="function"&&_isFunction(window.console.log.apply)){Sammy.addLogger(function(){window.console.log.apply(window.console,arguments)})}else{Sammy.addLogger(function(){window.console.log(arguments)})}}else if(typeof console!="undefined"){Sammy.addLogger(function(){console.log.apply(console,arguments)})}$.extend(Sammy,{makeArray:_makeArray,isFunction:_isFunction,isArray:_isArray});Sammy.Object=function(obj){return $.extend(this,obj||{})};$.extend(Sammy.Object.prototype,{escapeHTML:_escapeHTML,h:_escapeHTML,toHash:function(){var json={};$.each(this,function(k,v){if(!_isFunction(v)){json[k]=v}});return json},toHTML:function(){var display="";$.each(this,function(k,v){if(!_isFunction(v)){display+="<strong>"+k+"</strong> "+v+"<br />"}});return display},keys:function(attributes_only){var keys=[];for(var property in this){if(!_isFunction(this[property])||!attributes_only){keys.push(property)}}return keys},has:function(key){return this[key]&&$.trim(this[key].toString())!==""},join:function(){var args=_makeArray(arguments);var delimiter=args.shift();return args.join(delimiter)},log:function(){Sammy.log.apply(Sammy,arguments)},toString:function(include_functions){var s=[];$.each(this,function(k,v){if(!_isFunction(v)||include_functions){s.push('"'+k+'": '+v.toString())}});return"Sammy.Object: {"+s.join(",")+"}"}});Sammy.targetIsThisWindow=function targetIsThisWindow(event,tagName){var targetElement=$(event.target).closest(tagName);if(targetElement.length===0){return true}var targetWindow=targetElement.attr("target");if(!targetWindow||targetWindow===window.name||targetWindow==="_self"){return true}if(targetWindow==="_blank"){return false}if(targetWindow==="top"&&window===window.top){return true}return false};Sammy.DefaultLocationProxy=function(app,run_interval_every){this.app=app;this.is_native=false;this.has_history=_has_history;this._startPolling(run_interval_every)};Sammy.DefaultLocationProxy.fullPath=function(location_obj){var matches=location_obj.toString().match(/^[^#]*(#.+)$/);var hash=matches?matches[1]:"";return[location_obj.pathname,location_obj.search,hash].join("")};$.extend(Sammy.DefaultLocationProxy.prototype,{bind:function(){var proxy=this,app=this.app,lp=Sammy.DefaultLocationProxy;$(window).bind("hashchange."+this.app.eventNamespace(),function(e,non_native){if(proxy.is_native===false&&!non_native){proxy.is_native=true;window.clearInterval(lp._interval);lp._interval=null}app.trigger("location-changed")});if(_has_history&&!app.disable_push_state){$(window).bind("popstate."+this.app.eventNamespace(),function(e){app.trigger("location-changed")});$(document).delegate("a","click.history-"+this.app.eventNamespace(),function(e){if(e.isDefaultPrevented()||e.metaKey||e.ctrlKey){return}var full_path=lp.fullPath(this),hostname=this.hostname?this.hostname:function(a){var l=document.createElement("a");l.href=a.href;return l.hostname}(this);if(hostname==window.location.hostname&&app.lookupRoute("get",full_path)&&Sammy.targetIsThisWindow(e,"a")){e.preventDefault();proxy.setLocation(full_path);return false}})}if(!lp._bindings){lp._bindings=0}lp._bindings++},unbind:function(){$(window).unbind("hashchange."+this.app.eventNamespace());$(window).unbind("popstate."+this.app.eventNamespace());$(document).undelegate("a","click.history-"+this.app.eventNamespace());Sammy.DefaultLocationProxy._bindings--;if(Sammy.DefaultLocationProxy._bindings<=0){window.clearInterval(Sammy.DefaultLocationProxy._interval);Sammy.DefaultLocationProxy._interval=null}},getLocation:function(){return Sammy.DefaultLocationProxy.fullPath(window.location)},setLocation:function(new_location){if(/^([^#\/]|$)/.test(new_location)){if(_has_history&&!this.app.disable_push_state){new_location="/"+new_location}else{new_location="#!/"+new_location}}if(new_location!=this.getLocation()){if(_has_history&&!this.app.disable_push_state&&/^\//.test(new_location)){history.pushState({path:new_location},window.title,new_location);this.app.trigger("location-changed")}else{return window.location=new_location}}},_startPolling:function(every){var proxy=this;if(!Sammy.DefaultLocationProxy._interval){if(!every){every=10}var hashCheck=function(){var current_location=proxy.getLocation();if(typeof Sammy.DefaultLocationProxy._last_location=="undefined"||current_location!=Sammy.DefaultLocationProxy._last_location){window.setTimeout(function(){$(window).trigger("hashchange",[true])},0)}Sammy.DefaultLocationProxy._last_location=current_location};hashCheck();Sammy.DefaultLocationProxy._interval=window.setInterval(hashCheck,every)}}});Sammy.Application=function(app_function){var app=this;this.routes={};this.listeners=new Sammy.Object({});this.arounds=[];this.befores=[];this.namespace=(new Date).getTime()+"-"+parseInt(Math.random()*1e3,10);this.context_prototype=function(){Sammy.EventContext.apply(this,arguments)};this.context_prototype.prototype=new Sammy.EventContext;if(_isFunction(app_function)){app_function.apply(this,[this])}if(!this._location_proxy){this.setLocationProxy(new Sammy.DefaultLocationProxy(this,this.run_interval_every))}if(this.debug){this.bindToAllEvents(function(e,data){app.log(app.toString(),e.cleaned_type,data||{})})}};Sammy.Application.prototype=$.extend({},Sammy.Object.prototype,{ROUTE_VERBS:["get","post","put","delete"],APP_EVENTS:["run","unload","lookup-route","run-route","route-found","event-context-before","event-context-after","changed","error","check-form-submission","redirect","location-changed"],_last_route:null,_location_proxy:null,_running:false,element_selector:"body",debug:false,raise_errors:false,run_interval_every:50,disable_push_state:false,template_engine:null,toString:function(){return"Sammy.Application:"+this.element_selector},$element:function(selector){return selector?$(this.element_selector).find(selector):$(this.element_selector)},use:function(){var args=_makeArray(arguments),plugin=args.shift(),plugin_name=plugin||"";try{args.unshift(this);if(typeof plugin=="string"){plugin_name="Sammy."+plugin;plugin=Sammy[plugin]}plugin.apply(this,args)}catch(e){if(typeof plugin==="undefined"){this.error("Plugin Error: called use() but plugin ("+plugin_name.toString()+") is not defined",e)}else if(!_isFunction(plugin)){this.error("Plugin Error: called use() but '"+plugin_name.toString()+"' is not a function",e)}else{this.error("Plugin Error",e)}}return this},setLocationProxy:function(new_proxy){var original_proxy=this._location_proxy;this._location_proxy=new_proxy;if(this.isRunning()){if(original_proxy){original_proxy.unbind()}this._location_proxy.bind()}},log:function(){Sammy.log.apply(Sammy,Array.prototype.concat.apply([this.element_selector],arguments))},route:function(verb,path){var app=this,param_names=[],add_route,path_match,callback=Array.prototype.slice.call(arguments,2);if(callback.length===0&&_isFunction(path)){callback=[path];path=verb;verb="any"}verb=verb.toLowerCase();if(path.constructor==String){PATH_NAME_MATCHER.lastIndex=0;while((path_match=PATH_NAME_MATCHER.exec(path))!==null){param_names.push(path_match[1])}path=new RegExp(path.replace(PATH_NAME_MATCHER,PATH_REPLACER)+"$")}$.each(callback,function(i,cb){if(typeof cb==="string"){callback[i]=app[cb]}});add_route=function(with_verb){var r={verb:with_verb,path:path,callback:callback,param_names:param_names};app.routes[with_verb]=app.routes[with_verb]||[];app.routes[with_verb].push(r)};if(verb==="any"){$.each(this.ROUTE_VERBS,function(i,v){add_route(v)})}else{add_route(verb)}return this},get:_routeWrapper("get"),post:_routeWrapper("post"),put:_routeWrapper("put"),del:_routeWrapper("delete"),any:_routeWrapper("any"),mapRoutes:function(route_array){var app=this;$.each(route_array,function(i,route_args){app.route.apply(app,route_args)});return this},eventNamespace:function(){return["sammy-app",this.namespace].join("-")},bind:function(name,data,callback){var app=this;if(typeof callback=="undefined"){callback=data}var listener_callback=function(){var e,context,data;e=arguments[0];data=arguments[1];if(data&&data.context){context=data.context;delete data.context}else{context=new app.context_prototype(app,"bind",e.type,data,e.target)}e.cleaned_type=e.type.replace(app.eventNamespace(),"");callback.apply(context,[e,data])};if(!this.listeners[name]){this.listeners[name]=[]}this.listeners[name].push(listener_callback);if(this.isRunning()){this._listen(name,listener_callback)}return this},trigger:function(name,data){this.$element().trigger([name,this.eventNamespace()].join("."),[data]);return this},refresh:function(){this.last_location=null;this.trigger("location-changed");return this},before:function(options,callback){if(_isFunction(options)){callback=options;options={}}this.befores.push([options,callback]);return this},after:function(callback){return this.bind("event-context-after",callback)},around:function(callback){this.arounds.push(callback);return this},onComplete:function(callback){this._onComplete=callback;return this},isRunning:function(){return this._running},helpers:function(extensions){$.extend(this.context_prototype.prototype,extensions);return this},helper:function(name,method){this.context_prototype.prototype[name]=method;return this},run:function(start_url){if(this.isRunning()){return false}var app=this;$.each(this.listeners.toHash(),function(name,callbacks){$.each(callbacks,function(i,listener_callback){app._listen(name,listener_callback)})});this.trigger("run",{start_url:start_url});this._running=true;this.last_location=null;if(!/\#(.+)/.test(this.getLocation())&&typeof start_url!="undefined"){this.setLocation(start_url)}this._checkLocation();this._location_proxy.bind();this.bind("location-changed",function(){app._checkLocation()});this.bind("submit",function(e){if(!Sammy.targetIsThisWindow(e,"form")){return true}var returned=app._checkFormSubmission($(e.target).closest("form"));return returned===false?e.preventDefault():false});$(window).bind("unload",function(){app.unload()});return this.trigger("changed")},unload:function(){if(!this.isRunning()){return false}var app=this;this.trigger("unload");this._location_proxy.unbind();this.$element().unbind("submit").removeClass(app.eventNamespace());$.each(this.listeners.toHash(),function(name,listeners){$.each(listeners,function(i,listener_callback){app._unlisten(name,listener_callback)})});this._running=false;return this},destroy:function(){this.unload();delete Sammy.apps[this.element_selector];return this},bindToAllEvents:function(callback){var app=this;$.each(this.APP_EVENTS,function(i,e){app.bind(e,callback)});$.each(this.listeners.keys(true),function(i,name){if($.inArray(name,app.APP_EVENTS)==-1){app.bind(name,callback)}});return this},routablePath:function(path){return path.replace(QUERY_STRING_MATCHER,"")},lookupRoute:function(verb,path){var app=this,routed=false,i=0,l,route;if(typeof this.routes[verb]!="undefined"){l=this.routes[verb].length;for(;i<l;i++){route=this.routes[verb][i];if(app.routablePath(path).match(route.path)){routed=route;break}}}return routed},runRoute:function(verb,path,params,target){var app=this,route=this.lookupRoute(verb,path),context,wrapped_route,arounds,around,befores,before,callback_args,path_params,final_returned;if(this.debug){this.log("runRoute",[verb,path].join(" "))}this.trigger("run-route",{verb:verb,path:path,params:params});if(typeof params=="undefined"){params={}}$.extend(params,this._parseQueryString(path));if(route){this.trigger("route-found",{route:route});if((path_params=route.path.exec(this.routablePath(path)))!==null){path_params.shift();$.each(path_params,function(i,param){if(route.param_names[i]){params[route.param_names[i]]=_decode(param)}else{if(!params.splat){params.splat=[]}params.splat.push(_decode(param))}})}context=new this.context_prototype(this,verb,path,params,target);arounds=this.arounds.slice(0);befores=this.befores.slice(0);callback_args=[context];if(params.splat){callback_args=callback_args.concat(params.splat)}wrapped_route=function(){var returned,i,nextRoute;while(befores.length>0){before=befores.shift();if(app.contextMatchesOptions(context,before[0])){returned=before[1].apply(context,[context]);if(returned===false){return false}}}app.last_route=route;context.trigger("event-context-before",{context:context});if(typeof route.callback==="function"){route.callback=[route.callback]}if(route.callback&&route.callback.length){i=-1;nextRoute=function(){i++;if(route.callback[i]){returned=route.callback[i].apply(context,callback_args)}else if(app._onComplete&&typeof(app._onComplete==="function")){app._onComplete(context)}};callback_args.push(nextRoute);nextRoute()}context.trigger("event-context-after",{context:context});return returned};$.each(arounds.reverse(),function(i,around){var last_wrapped_route=wrapped_route;wrapped_route=function(){return around.apply(context,[last_wrapped_route])}});try{final_returned=wrapped_route()}catch(e){this.error(["500 Error",verb,path].join(" "),e)}return final_returned}else{return this.notFound(verb,path)}},contextMatchesOptions:function(context,match_options,positive){var options=match_options;if(typeof options==="string"||_isRegExp(options)){options={path:options}}if(typeof positive==="undefined"){positive=true}if($.isEmptyObject(options)){return true}if(_isArray(options.path)){var results,numopt,opts,len;results=[];for(numopt=0,len=options.path.length;numopt<len;numopt+=1){opts=$.extend({},options,{path:options.path[numopt]});results.push(this.contextMatchesOptions(context,opts))}var matched=$.inArray(true,results)>-1?true:false;return positive?matched:!matched}if(options.only){return this.contextMatchesOptions(context,options.only,true)}else if(options.except){return this.contextMatchesOptions(context,options.except,false)}var path_matched=true,verb_matched=true;if(options.path){if(!_isRegExp(options.path)){options.path=new RegExp(options.path.toString()+"$")}path_matched=options.path.test(context.path)}if(options.verb){if(typeof options.verb==="string"){verb_matched=options.verb===context.verb}else{verb_matched=options.verb.indexOf(context.verb)>-1}}return positive?verb_matched&&path_matched:!(verb_matched&&path_matched)},getLocation:function(){return this._location_proxy.getLocation()},setLocation:function(new_location){return this._location_proxy.setLocation(new_location)},swap:function(content,callback){var $el=this.$element().html(content);if(_isFunction(callback)){callback(content)}return $el},templateCache:function(key,value){if(typeof value!="undefined"){return _template_cache[key]=value}else{return _template_cache[key]}},clearTemplateCache:function(){return _template_cache={}},notFound:function(verb,path){var ret=this.error(["404 Not Found",verb,path].join(" "));return verb==="get"?ret:true},error:function(message,original_error){if(!original_error){original_error=new Error}original_error.message=[message,original_error.message].join(" ");this.trigger("error",{message:original_error.message,error:original_error});if(this.raise_errors){throw original_error}else{this.log(original_error.message,original_error)}},_checkLocation:function(){var location,returned;location=this.getLocation();if(!this.last_location||this.last_location[0]!="get"||this.last_location[1]!=location){this.last_location=["get",location];returned=this.runRoute("get",location)}return returned},_getFormVerb:function(form){var $form=$(form),verb,$_method;$_method=$form.find('input[name="_method"]');if($_method.length>0){verb=$_method.val()}if(!verb){verb=$form[0].getAttribute("method")}if(!verb||verb===""){verb="get"}return $.trim(verb.toString().toLowerCase())},_checkFormSubmission:function(form){var $form,path,verb,params,returned;this.trigger("check-form-submission",{form:form});$form=$(form);path=$form.attr("action")||"";verb=this._getFormVerb($form);if(this.debug){this.log("_checkFormSubmission",$form,path,verb)}if(verb==="get"){params=this._serializeFormParams($form);if(params!==""){path+="?"+params}this.setLocation(path);returned=false}else{params=$.extend({},this._parseFormParams($form));returned=this.runRoute(verb,path,params,form.get(0))}return typeof returned=="undefined"?false:returned},_serializeFormParams:function($form){var queryString="",fields=$form.serializeArray(),i;if(fields.length>0){queryString=this._encodeFormPair(fields[0].name,fields[0].value);for(i=1;i<fields.length;i++){queryString=queryString+"&"+this._encodeFormPair(fields[i].name,fields[i].value)}}return queryString},_encodeFormPair:function(name,value){return _encode(name)+"="+_encode(value)},_parseFormParams:function($form){var params={},form_fields=$form.serializeArray(),i;for(i=0;i<form_fields.length;i++){params=this._parseParamPair(params,form_fields[i].name,form_fields[i].value)}return params},_parseQueryString:function(path){var params={},parts,pairs,pair,i;parts=path.match(QUERY_STRING_MATCHER);if(parts&&parts[1]){pairs=parts[1].split("&");for(i=0;i<pairs.length;i++){pair=pairs[i].split("=");params=this._parseParamPair(params,_decode(pair[0]),_decode(pair[1]||""))}}return params},_parseParamPair:function(params,key,value){if(typeof params[key]!=="undefined"){if(_isArray(params[key])){params[key].push(value)}else{params[key]=[params[key],value]}}else{params[key]=value}return params},_listen:function(name,callback){return this.$element().bind([name,this.eventNamespace()].join("."),callback)},_unlisten:function(name,callback){return this.$element().unbind([name,this.eventNamespace()].join("."),callback)}});Sammy.RenderContext=function(event_context){this.event_context=event_context;this.callbacks=[];this.previous_content=null;this.content=null;this.next_engine=false;this.waiting=false};Sammy.RenderContext.prototype=$.extend({},Sammy.Object.prototype,{then:function(callback){if(!_isFunction(callback)){if(typeof callback==="string"&&callback in this.event_context){var helper=this.event_context[callback];callback=function(content){return helper.apply(this.event_context,[content])}}else{return this}}var context=this;if(this.waiting){this.callbacks.push(callback)}else{this.wait();window.setTimeout(function(){var returned=callback.apply(context,[context.content,context.previous_content]);if(returned!==false){context.next(returned)}},0)}return this},wait:function(){this.waiting=true},next:function(content){this.waiting=false;if(typeof content!=="undefined"){this.previous_content=this.content;this.content=content}if(this.callbacks.length>0){this.then(this.callbacks.shift())}},load:function(location,options,callback){var context=this;return this.then(function(){var should_cache,cached,is_json,location_array;if(_isFunction(options)){callback=options;options={}}else{options=$.extend({},options)}if(callback){this.then(callback)}if(typeof location==="string"){is_json=location.match(/\.json(\?|$)/)||options.json;should_cache=is_json?options.cache===true:options.cache!==false;context.next_engine=context.event_context.engineFor(location);delete options.cache;delete options.json;if(options.engine){context.next_engine=options.engine;delete options.engine}if(should_cache&&(cached=this.event_context.app.templateCache(location))){return cached}this.wait();$.ajax($.extend({url:location,data:{},dataType:is_json?"json":"text",type:"get",success:function(data){if(should_cache){context.event_context.app.templateCache(location,data)}context.next(data)}},options));return false}else{if(location.nodeType){return location.innerHTML}if(location.selector){context.next_engine=location.attr("data-engine");if(options.clone===false){return location.remove()[0].innerHTML.toString()}else{return location[0].innerHTML.toString()}}}})},loadPartials:function(partials){var name;if(partials){this.partials=this.partials||{};for(name in partials){(function(context,name){context.load(partials[name]).then(function(template){this.partials[name]=template})})(this,name)}}return this},render:function(location,data,callback,partials){if(_isFunction(location)&&!data){return this.then(location)}else{if(_isFunction(data)){partials=callback;callback=data;data=null}else if(callback&&!_isFunction(callback)){partials=callback;callback=null}return this.loadPartials(partials).load(location).interpolate(data,location).then(callback)}},partial:function(location,data,callback,partials){if(_isFunction(callback)){return this.render(location,data,partials).swap(callback)}else if(_isFunction(data)){return this.render(location,{},callback).swap(data)}else{return this.render(location,data,callback).swap()}},send:function(){var context=this,args=_makeArray(arguments),fun=args.shift();if(_isArray(args[0])){args=args[0]}return this.then(function(content){args.push(function(response){context.next(response)});context.wait();fun.apply(fun,args);return false})},collect:function(array,callback,now){var context=this;var coll=function(){if(_isFunction(array)){callback=array;array=this.content}var contents=[],doms=false;$.each(array,function(i,item){var returned=callback.apply(context,[i,item]);if(returned.jquery&&returned.length==1){returned=returned[0];doms=true}contents.push(returned);return returned});return doms?contents:contents.join("")};return now?coll():this.then(coll)},renderEach:function(location,name,data,callback){if(_isArray(name)){callback=data;data=name;name=null}return this.load(location).then(function(content){var rctx=this;if(!data){data=_isArray(this.previous_content)?this.previous_content:[]}if(callback){$.each(data,function(i,value){var idata={},engine=this.next_engine||location;if(name){idata[name]=value}else{idata=value}callback(value,rctx.event_context.interpolate(content,idata,engine))})}else{return this.collect(data,function(i,value){var idata={},engine=this.next_engine||location;if(name){idata[name]=value}else{idata=value}return this.event_context.interpolate(content,idata,engine)},true)}})},interpolate:function(data,engine,retain){var context=this;return this.then(function(content,prev){if(!data&&prev){data=prev}if(this.next_engine){engine=this.next_engine;this.next_engine=false}var rendered=context.event_context.interpolate(content,data,engine,this.partials);return retain?prev+rendered:rendered})},swap:function(callback){return this.then(function(content){this.event_context.swap(content,callback);return content}).trigger("changed",{})},appendTo:function(selector){return this.then(function(content){$(selector).append(content)}).trigger("changed",{})},prependTo:function(selector){return this.then(function(content){$(selector).prepend(content)}).trigger("changed",{})},replace:function(selector){return this.then(function(content){$(selector).html(content)}).trigger("changed",{})},trigger:function(name,data){return this.then(function(content){if(typeof data=="undefined"){data={content:content}}this.event_context.trigger(name,data);return content})}});Sammy.EventContext=function(app,verb,path,params,target){this.app=app;this.verb=verb;this.path=path;this.params=new Sammy.Object(params);this.target=target};Sammy.EventContext.prototype=$.extend({},Sammy.Object.prototype,{$element:function(){return this.app.$element(_makeArray(arguments).shift())},engineFor:function(engine){var context=this,engine_match;if(_isFunction(engine)){return engine}engine=(engine||context.app.template_engine).toString();if(engine_match=engine.match(/\.([^\.\?\#]+)(\?|$)/)){engine=engine_match[1]}if(engine&&_isFunction(context[engine])){return context[engine]}if(context.app.template_engine){return this.engineFor(context.app.template_engine)}return function(content,data){return content}},interpolate:function(content,data,engine,partials){return this.engineFor(engine).apply(this,[content,data,partials])},render:function(location,data,callback,partials){return new Sammy.RenderContext(this).render(location,data,callback,partials)},renderEach:function(location,name,data,callback){return new Sammy.RenderContext(this).renderEach(location,name,data,callback)},load:function(location,options,callback){return new Sammy.RenderContext(this).load(location,options,callback)},loadPartials:function(partials){return new Sammy.RenderContext(this).loadPartials(partials)},partial:function(location,data,callback,partials){return new Sammy.RenderContext(this).partial(location,data,callback,partials)},send:function(){var rctx=new Sammy.RenderContext(this);return rctx.send.apply(rctx,arguments)},redirect:function(){var to,args=_makeArray(arguments),current_location=this.app.getLocation(),l=args.length;if(l>1){var i=0,paths=[],pairs=[],params={},has_params=false;for(;i<l;i++){if(typeof args[i]=="string"){paths.push(args[i])}else{$.extend(params,args[i]);has_params=true}}to=paths.join("/");if(has_params){for(var k in params){pairs.push(this.app._encodeFormPair(k,params[k]))}to+="?"+pairs.join("&")}}else{to=args[0]}this.trigger("redirect",{to:to});this.app.last_location=[this.verb,this.path];this.app.setLocation(to);if(new RegExp(to).test(current_location)){this.app.trigger("location-changed")}},trigger:function(name,data){if(typeof data=="undefined"){data={}}if(!data.context){data.context=this}return this.app.trigger(name,data)},eventNamespace:function(){return this.app.eventNamespace()},swap:function(contents,callback){return this.app.swap(contents,callback)},notFound:function(){return this.app.notFound(this.verb,this.path)},json:function(string){return $.parseJSON(string)},toString:function(){return"Sammy.EventContext: "+[this.verb,this.path,this.params].join(" ")}});return Sammy}); \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/singular/client_frame.html b/deps/rabbitmq_management/priv/www/js/singular/client_frame.html
new file mode 100644
index 0000000000..7c0f98a2e2
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/singular/client_frame.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html>
+<head>
+</head>
+<body>
+<iframe style="display: none;" id="authorizeFrame" src="about:blank"></iframe>
+</body>
+<script type="application/javascript" src="./rpFrame.js"></script>
+<script type="application/javascript">
+ var authorizeWindow = document.getElementById('authorizeFrame').contentWindow;
+ var rpFrame = RPFrame(window.parent.Singular, authorizeWindow, window);
+ window.addEventListener('message', rpFrame.receiveMessage, false);
+
+ rpFrame.checkClientConfig();
+ rpFrame.startCheckingSession();
+
+ window.fetchAccessToken = rpFrame.fetchAccessToken;
+ window.afterAccess = rpFrame.afterAccess;
+ window.afterAuthorize = rpFrame.afterAuthorize;
+</script>
+</html>
diff --git a/deps/rabbitmq_management/priv/www/js/singular/postaccess.html b/deps/rabbitmq_management/priv/www/js/singular/postaccess.html
new file mode 100644
index 0000000000..7e6799550e
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/singular/postaccess.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <script type="application/javascript">
+ window.parent.afterAccess(parseInt(window.location.search.substring(1)));
+ </script>
+</head>
+<body>
+</body>
+</html>
diff --git a/deps/rabbitmq_management/priv/www/js/singular/postauth.html b/deps/rabbitmq_management/priv/www/js/singular/postauth.html
new file mode 100644
index 0000000000..9c6dd1c021
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/singular/postauth.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <script type="application/javascript">
+ window.parent.afterAuthorize();
+ </script>
+</head>
+<body>
+</body>
+</html> \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/singular/rpFrame.js b/deps/rabbitmq_management/priv/www/js/singular/rpFrame.js
new file mode 100644
index 0000000000..ba5279b36d
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/singular/rpFrame.js
@@ -0,0 +1 @@
+var RPFrame=function(t){var n={};function r(e){if(n[e])return n[e].exports;var i=n[e]={i:e,l:!1,exports:{}};return t[e].call(i.exports,i,i.exports,r),i.l=!0,i.exports}return r.m=t,r.c=n,r.d=function(t,n,e){r.o(t,n)||Object.defineProperty(t,n,{enumerable:!0,get:e})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(t,n){if(1&n&&(t=r(t)),8&n)return t;if(4&n&&"object"==typeof t&&t&&t.__esModule)return t;var e=Object.create(null);if(r.r(e),Object.defineProperty(e,"default",{enumerable:!0,value:t}),2&n&&"string"!=typeof t)for(var i in t)r.d(e,i,function(n){return t[n]}.bind(null,i));return e},r.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(n,"a",n),n},r.o=function(t,n){return Object.prototype.hasOwnProperty.call(t,n)},r.p="",r(r.s=127)}([function(t,n,r){var e=r(2),i=r(18),o=r(11),u=r(12),c=r(19),a=function(t,n,r){var f,s,l,h,v=t&a.F,p=t&a.G,g=t&a.S,d=t&a.P,y=t&a.B,m=p?e:g?e[n]||(e[n]={}):(e[n]||{}).prototype,b=p?i:i[n]||(i[n]={}),S=b.prototype||(b.prototype={});for(f in p&&(r=n),r)l=((s=!v&&m&&void 0!==m[f])?m:r)[f],h=y&&s?c(l,e):d&&"function"==typeof l?c(Function.call,l):l,m&&u(m,f,l,t&a.U),b[f]!=l&&o(b,f,h),d&&S[f]!=l&&(S[f]=l)};e.core=i,a.F=1,a.G=2,a.S=4,a.P=8,a.B=16,a.W=32,a.U=64,a.R=128,t.exports=a},function(t,n,r){var e=r(4);t.exports=function(t){if(!e(t))throw TypeError(t+" is not an object!");return t}},function(t,n){var r=t.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=r)},function(t,n){t.exports=function(t){try{return!!t()}catch(t){return!0}}},function(t,n){t.exports=function(t){return"object"==typeof t?null!==t:"function"==typeof t}},function(t,n,r){var e=r(49)("wks"),i=r(33),o=r(2).Symbol,u="function"==typeof o;(t.exports=function(t){return e[t]||(e[t]=u&&o[t]||(u?o:i)("Symbol."+t))}).store=e},function(t,n,r){t.exports=!r(3)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(t,n,r){var e=r(1),i=r(91),o=r(22),u=Object.defineProperty;n.f=r(6)?Object.defineProperty:function(t,n,r){if(e(t),n=o(n,!0),e(r),i)try{return u(t,n,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported!");return"value"in r&&(t[n]=r.value),t}},function(t,n,r){var e=r(24),i=Math.min;t.exports=function(t){return t>0?i(e(t),9007199254740991):0}},function(t,n,r){var e=r(23);t.exports=function(t){return Object(e(t))}},function(t,n){t.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},function(t,n,r){var e=r(7),i=r(32);t.exports=r(6)?function(t,n,r){return e.f(t,n,i(1,r))}:function(t,n,r){return t[n]=r,t}},function(t,n,r){var e=r(2),i=r(11),o=r(14),u=r(33)("src"),c=Function.toString,a=(""+c).split("toString");r(18).inspectSource=function(t){return c.call(t)},(t.exports=function(t,n,r,c){var f="function"==typeof r;f&&(o(r,"name")||i(r,"name",n)),t[n]!==r&&(f&&(o(r,u)||i(r,u,t[n]?""+t[n]:a.join(String(n)))),t===e?t[n]=r:c?t[n]?t[n]=r:i(t,n,r):(delete t[n],i(t,n,r)))})(Function.prototype,"toString",function(){return"function"==typeof this&&this[u]||c.call(this)})},function(t,n,r){var e=r(0),i=r(3),o=r(23),u=/"/g,c=function(t,n,r,e){var i=String(o(t)),c="<"+n;return""!==r&&(c+=" "+r+'="'+String(e).replace(u,"&quot;")+'"'),c+">"+i+"</"+n+">"};t.exports=function(t,n){var r={};r[t]=n(c),e(e.P+e.F*i(function(){var n=""[t]('"');return n!==n.toLowerCase()||n.split('"').length>3}),"String",r)}},function(t,n){var r={}.hasOwnProperty;t.exports=function(t,n){return r.call(t,n)}},function(t,n,r){var e=r(46),i=r(23);t.exports=function(t){return e(i(t))}},function(t,n,r){var e=r(47),i=r(32),o=r(15),u=r(22),c=r(14),a=r(91),f=Object.getOwnPropertyDescriptor;n.f=r(6)?f:function(t,n){if(t=o(t),n=u(n,!0),a)try{return f(t,n)}catch(t){}if(c(t,n))return i(!e.f.call(t,n),t[n])}},function(t,n,r){var e=r(14),i=r(9),o=r(67)("IE_PROTO"),u=Object.prototype;t.exports=Object.getPrototypeOf||function(t){return t=i(t),e(t,o)?t[o]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?u:null}},function(t,n){var r=t.exports={version:"2.5.6"};"number"==typeof __e&&(__e=r)},function(t,n,r){var e=r(10);t.exports=function(t,n,r){if(e(t),void 0===n)return t;switch(r){case 1:return function(r){return t.call(n,r)};case 2:return function(r,e){return t.call(n,r,e)};case 3:return function(r,e,i){return t.call(n,r,e,i)}}return function(){return t.apply(n,arguments)}}},function(t,n){var r={}.toString;t.exports=function(t){return r.call(t).slice(8,-1)}},function(t,n,r){"use strict";var e=r(3);t.exports=function(t,n){return!!t&&e(function(){n?t.call(null,function(){},1):t.call(null)})}},function(t,n,r){var e=r(4);t.exports=function(t,n){if(!e(t))return t;var r,i;if(n&&"function"==typeof(r=t.toString)&&!e(i=r.call(t)))return i;if("function"==typeof(r=t.valueOf)&&!e(i=r.call(t)))return i;if(!n&&"function"==typeof(r=t.toString)&&!e(i=r.call(t)))return i;throw TypeError("Can't convert object to primitive value")}},function(t,n){t.exports=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t}},function(t,n){var r=Math.ceil,e=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?e:r)(t)}},function(t,n,r){var e=r(0),i=r(18),o=r(3);t.exports=function(t,n){var r=(i.Object||{})[t]||Object[t],u={};u[t]=n(r),e(e.S+e.F*o(function(){r(1)}),"Object",u)}},function(t,n,r){var e=r(19),i=r(46),o=r(9),u=r(8),c=r(84);t.exports=function(t,n){var r=1==t,a=2==t,f=3==t,s=4==t,l=6==t,h=5==t||l,v=n||c;return function(n,c,p){for(var g,d,y=o(n),m=i(y),b=e(c,p,3),S=u(m.length),w=0,x=r?v(n,S):a?v(n,0):void 0;S>w;w++)if((h||w in m)&&(d=b(g=m[w],w,y),t))if(r)x[w]=d;else if(d)switch(t){case 3:return!0;case 5:return g;case 6:return w;case 2:x.push(g)}else if(s)return!1;return l?-1:f||s?s:x}}},function(t,n,r){"use strict";if(r(6)){var e=r(30),i=r(2),o=r(3),u=r(0),c=r(60),a=r(90),f=r(19),s=r(39),l=r(32),h=r(11),v=r(41),p=r(24),g=r(8),d=r(117),y=r(35),m=r(22),b=r(14),S=r(48),w=r(4),x=r(9),_=r(81),E=r(36),O=r(17),M=r(37).f,P=r(83),F=r(33),A=r(5),I=r(26),j=r(50),N=r(57),k=r(86),T=r(44),L=r(54),R=r(38),C=r(85),W=r(107),D=r(7),U=r(16),G=D.f,V=U.f,B=i.RangeError,z=i.TypeError,J=i.Uint8Array,q=Array.prototype,Y=a.ArrayBuffer,K=a.DataView,H=I(0),X=I(2),$=I(3),Z=I(4),Q=I(5),tt=I(6),nt=j(!0),rt=j(!1),et=k.values,it=k.keys,ot=k.entries,ut=q.lastIndexOf,ct=q.reduce,at=q.reduceRight,ft=q.join,st=q.sort,lt=q.slice,ht=q.toString,vt=q.toLocaleString,pt=A("iterator"),gt=A("toStringTag"),dt=F("typed_constructor"),yt=F("def_constructor"),mt=c.CONSTR,bt=c.TYPED,St=c.VIEW,wt=I(1,function(t,n){return Mt(N(t,t[yt]),n)}),xt=o(function(){return 1===new J(new Uint16Array([1]).buffer)[0]}),_t=!!J&&!!J.prototype.set&&o(function(){new J(1).set({})}),Et=function(t,n){var r=p(t);if(r<0||r%n)throw B("Wrong offset!");return r},Ot=function(t){if(w(t)&&bt in t)return t;throw z(t+" is not a typed array!")},Mt=function(t,n){if(!(w(t)&&dt in t))throw z("It is not a typed array constructor!");return new t(n)},Pt=function(t,n){return Ft(N(t,t[yt]),n)},Ft=function(t,n){for(var r=0,e=n.length,i=Mt(t,e);e>r;)i[r]=n[r++];return i},At=function(t,n,r){G(t,n,{get:function(){return this._d[r]}})},It=function(t){var n,r,e,i,o,u,c=x(t),a=arguments.length,s=a>1?arguments[1]:void 0,l=void 0!==s,h=P(c);if(null!=h&&!_(h)){for(u=h.call(c),e=[],n=0;!(o=u.next()).done;n++)e.push(o.value);c=e}for(l&&a>2&&(s=f(s,arguments[2],2)),n=0,r=g(c.length),i=Mt(this,r);r>n;n++)i[n]=l?s(c[n],n):c[n];return i},jt=function(){for(var t=0,n=arguments.length,r=Mt(this,n);n>t;)r[t]=arguments[t++];return r},Nt=!!J&&o(function(){vt.call(new J(1))}),kt=function(){return vt.apply(Nt?lt.call(Ot(this)):Ot(this),arguments)},Tt={copyWithin:function(t,n){return W.call(Ot(this),t,n,arguments.length>2?arguments[2]:void 0)},every:function(t){return Z(Ot(this),t,arguments.length>1?arguments[1]:void 0)},fill:function(t){return C.apply(Ot(this),arguments)},filter:function(t){return Pt(this,X(Ot(this),t,arguments.length>1?arguments[1]:void 0))},find:function(t){return Q(Ot(this),t,arguments.length>1?arguments[1]:void 0)},findIndex:function(t){return tt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},forEach:function(t){H(Ot(this),t,arguments.length>1?arguments[1]:void 0)},indexOf:function(t){return rt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},includes:function(t){return nt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},join:function(t){return ft.apply(Ot(this),arguments)},lastIndexOf:function(t){return ut.apply(Ot(this),arguments)},map:function(t){return wt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},reduce:function(t){return ct.apply(Ot(this),arguments)},reduceRight:function(t){return at.apply(Ot(this),arguments)},reverse:function(){for(var t,n=Ot(this).length,r=Math.floor(n/2),e=0;e<r;)t=this[e],this[e++]=this[--n],this[n]=t;return this},some:function(t){return $(Ot(this),t,arguments.length>1?arguments[1]:void 0)},sort:function(t){return st.call(Ot(this),t)},subarray:function(t,n){var r=Ot(this),e=r.length,i=y(t,e);return new(N(r,r[yt]))(r.buffer,r.byteOffset+i*r.BYTES_PER_ELEMENT,g((void 0===n?e:y(n,e))-i))}},Lt=function(t,n){return Pt(this,lt.call(Ot(this),t,n))},Rt=function(t){Ot(this);var n=Et(arguments[1],1),r=this.length,e=x(t),i=g(e.length),o=0;if(i+n>r)throw B("Wrong length!");for(;o<i;)this[n+o]=e[o++]},Ct={entries:function(){return ot.call(Ot(this))},keys:function(){return it.call(Ot(this))},values:function(){return et.call(Ot(this))}},Wt=function(t,n){return w(t)&&t[bt]&&"symbol"!=typeof n&&n in t&&String(+n)==String(n)},Dt=function(t,n){return Wt(t,n=m(n,!0))?l(2,t[n]):V(t,n)},Ut=function(t,n,r){return!(Wt(t,n=m(n,!0))&&w(r)&&b(r,"value"))||b(r,"get")||b(r,"set")||r.configurable||b(r,"writable")&&!r.writable||b(r,"enumerable")&&!r.enumerable?G(t,n,r):(t[n]=r.value,t)};mt||(U.f=Dt,D.f=Ut),u(u.S+u.F*!mt,"Object",{getOwnPropertyDescriptor:Dt,defineProperty:Ut}),o(function(){ht.call({})})&&(ht=vt=function(){return ft.call(this)});var Gt=v({},Tt);v(Gt,Ct),h(Gt,pt,Ct.values),v(Gt,{slice:Lt,set:Rt,constructor:function(){},toString:ht,toLocaleString:kt}),At(Gt,"buffer","b"),At(Gt,"byteOffset","o"),At(Gt,"byteLength","l"),At(Gt,"length","e"),G(Gt,gt,{get:function(){return this[bt]}}),t.exports=function(t,n,r,a){var f=t+((a=!!a)?"Clamped":"")+"Array",l="get"+t,v="set"+t,p=i[f],y=p||{},m=p&&O(p),b=!p||!c.ABV,x={},_=p&&p.prototype,P=function(t,r){G(t,r,{get:function(){return function(t,r){var e=t._d;return e.v[l](r*n+e.o,xt)}(this,r)},set:function(t){return function(t,r,e){var i=t._d;a&&(e=(e=Math.round(e))<0?0:e>255?255:255&e),i.v[v](r*n+i.o,e,xt)}(this,r,t)},enumerable:!0})};b?(p=r(function(t,r,e,i){s(t,p,f,"_d");var o,u,c,a,l=0,v=0;if(w(r)){if(!(r instanceof Y||"ArrayBuffer"==(a=S(r))||"SharedArrayBuffer"==a))return bt in r?Ft(p,r):It.call(p,r);o=r,v=Et(e,n);var y=r.byteLength;if(void 0===i){if(y%n)throw B("Wrong length!");if((u=y-v)<0)throw B("Wrong length!")}else if((u=g(i)*n)+v>y)throw B("Wrong length!");c=u/n}else c=d(r),o=new Y(u=c*n);for(h(t,"_d",{b:o,o:v,l:u,e:c,v:new K(o)});l<c;)P(t,l++)}),_=p.prototype=E(Gt),h(_,"constructor",p)):o(function(){p(1)})&&o(function(){new p(-1)})&&L(function(t){new p,new p(null),new p(1.5),new p(t)},!0)||(p=r(function(t,r,e,i){var o;return s(t,p,f),w(r)?r instanceof Y||"ArrayBuffer"==(o=S(r))||"SharedArrayBuffer"==o?void 0!==i?new y(r,Et(e,n),i):void 0!==e?new y(r,Et(e,n)):new y(r):bt in r?Ft(p,r):It.call(p,r):new y(d(r))}),H(m!==Function.prototype?M(y).concat(M(m)):M(y),function(t){t in p||h(p,t,y[t])}),p.prototype=_,e||(_.constructor=p));var F=_[pt],A=!!F&&("values"==F.name||null==F.name),I=Ct.values;h(p,dt,!0),h(_,bt,f),h(_,St,!0),h(_,yt,p),(a?new p(1)[gt]==f:gt in _)||G(_,gt,{get:function(){return f}}),x[f]=p,u(u.G+u.W+u.F*(p!=y),x),u(u.S,f,{BYTES_PER_ELEMENT:n}),u(u.S+u.F*o(function(){y.of.call(p,1)}),f,{from:It,of:jt}),"BYTES_PER_ELEMENT"in _||h(_,"BYTES_PER_ELEMENT",n),u(u.P,f,Tt),R(f),u(u.P+u.F*_t,f,{set:Rt}),u(u.P+u.F*!A,f,Ct),e||_.toString==ht||(_.toString=ht),u(u.P+u.F*o(function(){new p(1).slice()}),f,{slice:Lt}),u(u.P+u.F*(o(function(){return[1,2].toLocaleString()!=new p([1,2]).toLocaleString()})||!o(function(){_.toLocaleString.call([1,2])})),f,{toLocaleString:kt}),T[f]=A?F:I,e||A||h(_,pt,I)}}else t.exports=function(){}},function(t,n,r){var e=r(112),i=r(0),o=r(49)("metadata"),u=o.store||(o.store=new(r(115))),c=function(t,n,r){var i=u.get(t);if(!i){if(!r)return;u.set(t,i=new e)}var o=i.get(n);if(!o){if(!r)return;i.set(n,o=new e)}return o};t.exports={store:u,map:c,has:function(t,n,r){var e=c(n,r,!1);return void 0!==e&&e.has(t)},get:function(t,n,r){var e=c(n,r,!1);return void 0===e?void 0:e.get(t)},set:function(t,n,r,e){c(r,e,!0).set(t,n)},keys:function(t,n){var r=c(t,n,!1),e=[];return r&&r.forEach(function(t,n){e.push(n)}),e},key:function(t){return void 0===t||"symbol"==typeof t?t:String(t)},exp:function(t){i(i.S,"Reflect",t)}}},function(t,n,r){var e=r(33)("meta"),i=r(4),o=r(14),u=r(7).f,c=0,a=Object.isExtensible||function(){return!0},f=!r(3)(function(){return a(Object.preventExtensions({}))}),s=function(t){u(t,e,{value:{i:"O"+ ++c,w:{}}})},l=t.exports={KEY:e,NEED:!1,fastKey:function(t,n){if(!i(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!o(t,e)){if(!a(t))return"F";if(!n)return"E";s(t)}return t[e].i},getWeak:function(t,n){if(!o(t,e)){if(!a(t))return!0;if(!n)return!1;s(t)}return t[e].w},onFreeze:function(t){return f&&l.NEED&&a(t)&&!o(t,e)&&s(t),t}}},function(t,n){t.exports=!1},function(t,n,r){var e=r(5)("unscopables"),i=Array.prototype;null==i[e]&&r(11)(i,e,{}),t.exports=function(t){i[e][t]=!0}},function(t,n){t.exports=function(t,n){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:n}}},function(t,n){var r=0,e=Math.random();t.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++r+e).toString(36))}},function(t,n,r){var e=r(93),i=r(68);t.exports=Object.keys||function(t){return e(t,i)}},function(t,n,r){var e=r(24),i=Math.max,o=Math.min;t.exports=function(t,n){return(t=e(t))<0?i(t+n,0):o(t,n)}},function(t,n,r){var e=r(1),i=r(94),o=r(68),u=r(67)("IE_PROTO"),c=function(){},a=function(){var t,n=r(65)("iframe"),e=o.length;for(n.style.display="none",r(69).appendChild(n),n.src="javascript:",(t=n.contentWindow.document).open(),t.write("<script>document.F=Object<\/script>"),t.close(),a=t.F;e--;)delete a.prototype[o[e]];return a()};t.exports=Object.create||function(t,n){var r;return null!==t?(c.prototype=e(t),r=new c,c.prototype=null,r[u]=t):r=a(),void 0===n?r:i(r,n)}},function(t,n,r){var e=r(93),i=r(68).concat("length","prototype");n.f=Object.getOwnPropertyNames||function(t){return e(t,i)}},function(t,n,r){"use strict";var e=r(2),i=r(7),o=r(6),u=r(5)("species");t.exports=function(t){var n=e[t];o&&n&&!n[u]&&i.f(n,u,{configurable:!0,get:function(){return this}})}},function(t,n){t.exports=function(t,n,r,e){if(!(t instanceof n)||void 0!==e&&e in t)throw TypeError(r+": incorrect invocation!");return t}},function(t,n,r){var e=r(19),i=r(105),o=r(81),u=r(1),c=r(8),a=r(83),f={},s={};(n=t.exports=function(t,n,r,l,h){var v,p,g,d,y=h?function(){return t}:a(t),m=e(r,l,n?2:1),b=0;if("function"!=typeof y)throw TypeError(t+" is not iterable!");if(o(y)){for(v=c(t.length);v>b;b++)if((d=n?m(u(p=t[b])[0],p[1]):m(t[b]))===f||d===s)return d}else for(g=y.call(t);!(p=g.next()).done;)if((d=i(g,m,p.value,n))===f||d===s)return d}).BREAK=f,n.RETURN=s},function(t,n,r){var e=r(12);t.exports=function(t,n,r){for(var i in n)e(t,i,n[i],r);return t}},function(t,n,r){var e=r(7).f,i=r(14),o=r(5)("toStringTag");t.exports=function(t,n,r){t&&!i(t=r?t:t.prototype,o)&&e(t,o,{configurable:!0,value:n})}},function(t,n,r){var e=r(0),i=r(23),o=r(3),u=r(71),c="["+u+"]",a=RegExp("^"+c+c+"*"),f=RegExp(c+c+"*$"),s=function(t,n,r){var i={},c=o(function(){return!!u[t]()||"​…"!="​…"[t]()}),a=i[t]=c?n(l):u[t];r&&(i[r]=a),e(e.P+e.F*c,"String",i)},l=s.trim=function(t,n){return t=String(i(t)),1&n&&(t=t.replace(a,"")),2&n&&(t=t.replace(f,"")),t};t.exports=s},function(t,n){t.exports={}},function(t,n,r){var e=r(4);t.exports=function(t,n){if(!e(t)||t._t!==n)throw TypeError("Incompatible receiver, "+n+" required!");return t}},function(t,n,r){var e=r(20);t.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==e(t)?t.split(""):Object(t)}},function(t,n){n.f={}.propertyIsEnumerable},function(t,n,r){var e=r(20),i=r(5)("toStringTag"),o="Arguments"==e(function(){return arguments}());t.exports=function(t){var n,r,u;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(r=function(t,n){try{return t[n]}catch(t){}}(n=Object(t),i))?r:o?e(n):"Object"==(u=e(n))&&"function"==typeof n.callee?"Arguments":u}},function(t,n,r){var e=r(18),i=r(2),o=i["__core-js_shared__"]||(i["__core-js_shared__"]={});(t.exports=function(t,n){return o[t]||(o[t]=void 0!==n?n:{})})("versions",[]).push({version:e.version,mode:r(30)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(t,n,r){var e=r(15),i=r(8),o=r(35);t.exports=function(t){return function(n,r,u){var c,a=e(n),f=i(a.length),s=o(u,f);if(t&&r!=r){for(;f>s;)if((c=a[s++])!=c)return!0}else for(;f>s;s++)if((t||s in a)&&a[s]===r)return t||s||0;return!t&&-1}}},function(t,n){n.f=Object.getOwnPropertySymbols},function(t,n,r){var e=r(20);t.exports=Array.isArray||function(t){return"Array"==e(t)}},function(t,n,r){var e=r(4),i=r(20),o=r(5)("match");t.exports=function(t){var n;return e(t)&&(void 0!==(n=t[o])?!!n:"RegExp"==i(t))}},function(t,n,r){var e=r(5)("iterator"),i=!1;try{var o=[7][e]();o.return=function(){i=!0},Array.from(o,function(){throw 2})}catch(t){}t.exports=function(t,n){if(!n&&!i)return!1;var r=!1;try{var o=[7],u=o[e]();u.next=function(){return{done:r=!0}},o[e]=function(){return u},t(o)}catch(t){}return r}},function(t,n,r){"use strict";var e=r(1);t.exports=function(){var t=e(this),n="";return t.global&&(n+="g"),t.ignoreCase&&(n+="i"),t.multiline&&(n+="m"),t.unicode&&(n+="u"),t.sticky&&(n+="y"),n}},function(t,n,r){"use strict";var e=r(11),i=r(12),o=r(3),u=r(23),c=r(5);t.exports=function(t,n,r){var a=c(t),f=r(u,a,""[t]),s=f[0],l=f[1];o(function(){var n={};return n[a]=function(){return 7},7!=""[t](n)})&&(i(String.prototype,t,s),e(RegExp.prototype,a,2==n?function(t,n){return l.call(t,this,n)}:function(t){return l.call(t,this)}))}},function(t,n,r){var e=r(1),i=r(10),o=r(5)("species");t.exports=function(t,n){var r,u=e(t).constructor;return void 0===u||null==(r=e(u)[o])?n:i(r)}},function(t,n,r){var e=r(2).navigator;t.exports=e&&e.userAgent||""},function(t,n,r){"use strict";var e=r(2),i=r(0),o=r(12),u=r(41),c=r(29),a=r(40),f=r(39),s=r(4),l=r(3),h=r(54),v=r(42),p=r(72);t.exports=function(t,n,r,g,d,y){var m=e[t],b=m,S=d?"set":"add",w=b&&b.prototype,x={},_=function(t){var n=w[t];o(w,t,"delete"==t?function(t){return!(y&&!s(t))&&n.call(this,0===t?0:t)}:"has"==t?function(t){return!(y&&!s(t))&&n.call(this,0===t?0:t)}:"get"==t?function(t){return y&&!s(t)?void 0:n.call(this,0===t?0:t)}:"add"==t?function(t){return n.call(this,0===t?0:t),this}:function(t,r){return n.call(this,0===t?0:t,r),this})};if("function"==typeof b&&(y||w.forEach&&!l(function(){(new b).entries().next()}))){var E=new b,O=E[S](y?{}:-0,1)!=E,M=l(function(){E.has(1)}),P=h(function(t){new b(t)}),F=!y&&l(function(){for(var t=new b,n=5;n--;)t[S](n,n);return!t.has(-0)});P||((b=n(function(n,r){f(n,b,t);var e=p(new m,n,b);return null!=r&&a(r,d,e[S],e),e})).prototype=w,w.constructor=b),(M||F)&&(_("delete"),_("has"),d&&_("get")),(F||O)&&_(S),y&&w.clear&&delete w.clear}else b=g.getConstructor(n,t,d,S),u(b.prototype,r),c.NEED=!0;return v(b,t),x[t]=b,i(i.G+i.W+i.F*(b!=m),x),y||g.setStrong(b,t,d),b}},function(t,n,r){for(var e,i=r(2),o=r(11),u=r(33),c=u("typed_array"),a=u("view"),f=!(!i.ArrayBuffer||!i.DataView),s=f,l=0,h="Int8Array,Uint8Array,Uint8ClampedArray,Int16Array,Uint16Array,Int32Array,Uint32Array,Float32Array,Float64Array".split(",");l<9;)(e=i[h[l++]])?(o(e.prototype,c,!0),o(e.prototype,a,!0)):s=!1;t.exports={ABV:f,CONSTR:s,TYPED:c,VIEW:a}},function(t,n,r){"use strict";t.exports=r(30)||!r(3)(function(){var t=Math.random();__defineSetter__.call(null,t,function(){}),delete r(2)[t]})},function(t,n,r){"use strict";var e=r(0);t.exports=function(t){e(e.S,t,{of:function(){for(var t=arguments.length,n=new Array(t);t--;)n[t]=arguments[t];return new this(n)}})}},function(t,n,r){"use strict";var e=r(0),i=r(10),o=r(19),u=r(40);t.exports=function(t){e(e.S,t,{from:function(t){var n,r,e,c,a=arguments[1];return i(this),(n=void 0!==a)&&i(a),null==t?new this:(r=[],n?(e=0,c=o(a,arguments[2],2),u(t,!1,function(t){r.push(c(t,e++))})):u(t,!1,r.push,r),new this(r))}})}},function(t,n){var r;r=function(){return this}();try{r=r||new Function("return this")()}catch(t){"object"==typeof window&&(r=window)}t.exports=r},function(t,n,r){var e=r(4),i=r(2).document,o=e(i)&&e(i.createElement);t.exports=function(t){return o?i.createElement(t):{}}},function(t,n,r){var e=r(2),i=r(18),o=r(30),u=r(92),c=r(7).f;t.exports=function(t){var n=i.Symbol||(i.Symbol=o?{}:e.Symbol||{});"_"==t.charAt(0)||t in n||c(n,t,{value:u.f(t)})}},function(t,n,r){var e=r(49)("keys"),i=r(33);t.exports=function(t){return e[t]||(e[t]=i(t))}},function(t,n){t.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(t,n,r){var e=r(2).document;t.exports=e&&e.documentElement},function(t,n,r){var e=r(4),i=r(1),o=function(t,n){if(i(t),!e(n)&&null!==n)throw TypeError(n+": can't set as prototype!")};t.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(t,n,e){try{(e=r(19)(Function.call,r(16).f(Object.prototype,"__proto__").set,2))(t,[]),n=!(t instanceof Array)}catch(t){n=!0}return function(t,r){return o(t,r),n?t.__proto__=r:e(t,r),t}}({},!1):void 0),check:o}},function(t,n){t.exports="\t\n\v\f\r   ᠎              \u2028\u2029\ufeff"},function(t,n,r){var e=r(4),i=r(70).set;t.exports=function(t,n,r){var o,u=n.constructor;return u!==r&&"function"==typeof u&&(o=u.prototype)!==r.prototype&&e(o)&&i&&i(t,o),t}},function(t,n,r){"use strict";var e=r(24),i=r(23);t.exports=function(t){var n=String(i(this)),r="",o=e(t);if(o<0||o==1/0)throw RangeError("Count can't be negative");for(;o>0;(o>>>=1)&&(n+=n))1&o&&(r+=n);return r}},function(t,n){t.exports=Math.sign||function(t){return 0==(t=+t)||t!=t?t:t<0?-1:1}},function(t,n){var r=Math.expm1;t.exports=!r||r(10)>22025.465794806718||r(10)<22025.465794806718||-2e-17!=r(-2e-17)?function(t){return 0==(t=+t)?t:t>-1e-6&&t<1e-6?t+t*t/2:Math.exp(t)-1}:r},function(t,n,r){var e=r(24),i=r(23);t.exports=function(t){return function(n,r){var o,u,c=String(i(n)),a=e(r),f=c.length;return a<0||a>=f?t?"":void 0:(o=c.charCodeAt(a))<55296||o>56319||a+1===f||(u=c.charCodeAt(a+1))<56320||u>57343?t?c.charAt(a):o:t?c.slice(a,a+2):u-56320+(o-55296<<10)+65536}}},function(t,n,r){"use strict";var e=r(30),i=r(0),o=r(12),u=r(11),c=r(44),a=r(78),f=r(42),s=r(17),l=r(5)("iterator"),h=!([].keys&&"next"in[].keys()),v=function(){return this};t.exports=function(t,n,r,p,g,d,y){a(r,n,p);var m,b,S,w=function(t){if(!h&&t in O)return O[t];switch(t){case"keys":case"values":return function(){return new r(this,t)}}return function(){return new r(this,t)}},x=n+" Iterator",_="values"==g,E=!1,O=t.prototype,M=O[l]||O["@@iterator"]||g&&O[g],P=M||w(g),F=g?_?w("entries"):P:void 0,A="Array"==n&&O.entries||M;if(A&&(S=s(A.call(new t)))!==Object.prototype&&S.next&&(f(S,x,!0),e||"function"==typeof S[l]||u(S,l,v)),_&&M&&"values"!==M.name&&(E=!0,P=function(){return M.call(this)}),e&&!y||!h&&!E&&O[l]||u(O,l,P),c[n]=P,c[x]=v,g)if(m={values:_?P:w("values"),keys:d?P:w("keys"),entries:F},y)for(b in m)b in O||o(O,b,m[b]);else i(i.P+i.F*(h||E),n,m);return m}},function(t,n,r){"use strict";var e=r(36),i=r(32),o=r(42),u={};r(11)(u,r(5)("iterator"),function(){return this}),t.exports=function(t,n,r){t.prototype=e(u,{next:i(1,r)}),o(t,n+" Iterator")}},function(t,n,r){var e=r(53),i=r(23);t.exports=function(t,n,r){if(e(n))throw TypeError("String#"+r+" doesn't accept regex!");return String(i(t))}},function(t,n,r){var e=r(5)("match");t.exports=function(t){var n=/./;try{"/./"[t](n)}catch(r){try{return n[e]=!1,!"/./"[t](n)}catch(t){}}return!0}},function(t,n,r){var e=r(44),i=r(5)("iterator"),o=Array.prototype;t.exports=function(t){return void 0!==t&&(e.Array===t||o[i]===t)}},function(t,n,r){"use strict";var e=r(7),i=r(32);t.exports=function(t,n,r){n in t?e.f(t,n,i(0,r)):t[n]=r}},function(t,n,r){var e=r(48),i=r(5)("iterator"),o=r(44);t.exports=r(18).getIteratorMethod=function(t){if(null!=t)return t[i]||t["@@iterator"]||o[e(t)]}},function(t,n,r){var e=r(221);t.exports=function(t,n){return new(e(t))(n)}},function(t,n,r){"use strict";var e=r(9),i=r(35),o=r(8);t.exports=function(t){for(var n=e(this),r=o(n.length),u=arguments.length,c=i(u>1?arguments[1]:void 0,r),a=u>2?arguments[2]:void 0,f=void 0===a?r:i(a,r);f>c;)n[c++]=t;return n}},function(t,n,r){"use strict";var e=r(31),i=r(108),o=r(44),u=r(15);t.exports=r(77)(Array,"Array",function(t,n){this._t=u(t),this._i=0,this._k=n},function(){var t=this._t,n=this._k,r=this._i++;return!t||r>=t.length?(this._t=void 0,i(1)):i(0,"keys"==n?r:"values"==n?t[r]:[r,t[r]])},"values"),o.Arguments=o.Array,e("keys"),e("values"),e("entries")},function(t,n,r){var e,i,o,u=r(19),c=r(98),a=r(69),f=r(65),s=r(2),l=s.process,h=s.setImmediate,v=s.clearImmediate,p=s.MessageChannel,g=s.Dispatch,d=0,y={},m=function(){var t=+this;if(y.hasOwnProperty(t)){var n=y[t];delete y[t],n()}},b=function(t){m.call(t.data)};h&&v||(h=function(t){for(var n=[],r=1;arguments.length>r;)n.push(arguments[r++]);return y[++d]=function(){c("function"==typeof t?t:Function(t),n)},e(d),d},v=function(t){delete y[t]},"process"==r(20)(l)?e=function(t){l.nextTick(u(m,t,1))}:g&&g.now?e=function(t){g.now(u(m,t,1))}:p?(o=(i=new p).port2,i.port1.onmessage=b,e=u(o.postMessage,o,1)):s.addEventListener&&"function"==typeof postMessage&&!s.importScripts?(e=function(t){s.postMessage(t+"","*")},s.addEventListener("message",b,!1)):e="onreadystatechange"in f("script")?function(t){a.appendChild(f("script")).onreadystatechange=function(){a.removeChild(this),m.call(t)}}:function(t){setTimeout(u(m,t,1),0)}),t.exports={set:h,clear:v}},function(t,n,r){var e=r(2),i=r(87).set,o=e.MutationObserver||e.WebKitMutationObserver,u=e.process,c=e.Promise,a="process"==r(20)(u);t.exports=function(){var t,n,r,f=function(){var e,i;for(a&&(e=u.domain)&&e.exit();t;){i=t.fn,t=t.next;try{i()}catch(e){throw t?r():n=void 0,e}}n=void 0,e&&e.enter()};if(a)r=function(){u.nextTick(f)};else if(!o||e.navigator&&e.navigator.standalone)if(c&&c.resolve){var s=c.resolve(void 0);r=function(){s.then(f)}}else r=function(){i.call(e,f)};else{var l=!0,h=document.createTextNode("");new o(f).observe(h,{characterData:!0}),r=function(){h.data=l=!l}}return function(e){var i={fn:e,next:void 0};n&&(n.next=i),t||(t=i,r()),n=i}}},function(t,n,r){"use strict";var e=r(10);function i(t){var n,r;this.promise=new t(function(t,e){if(void 0!==n||void 0!==r)throw TypeError("Bad Promise constructor");n=t,r=e}),this.resolve=e(n),this.reject=e(r)}t.exports.f=function(t){return new i(t)}},function(t,n,r){"use strict";var e=r(2),i=r(6),o=r(30),u=r(60),c=r(11),a=r(41),f=r(3),s=r(39),l=r(24),h=r(8),v=r(117),p=r(37).f,g=r(7).f,d=r(85),y=r(42),m="prototype",b="Wrong index!",S=e.ArrayBuffer,w=e.DataView,x=e.Math,_=e.RangeError,E=e.Infinity,O=S,M=x.abs,P=x.pow,F=x.floor,A=x.log,I=x.LN2,j=i?"_b":"buffer",N=i?"_l":"byteLength",k=i?"_o":"byteOffset";function T(t,n,r){var e,i,o,u=new Array(r),c=8*r-n-1,a=(1<<c)-1,f=a>>1,s=23===n?P(2,-24)-P(2,-77):0,l=0,h=t<0||0===t&&1/t<0?1:0;for((t=M(t))!=t||t===E?(i=t!=t?1:0,e=a):(e=F(A(t)/I),t*(o=P(2,-e))<1&&(e--,o*=2),(t+=e+f>=1?s/o:s*P(2,1-f))*o>=2&&(e++,o/=2),e+f>=a?(i=0,e=a):e+f>=1?(i=(t*o-1)*P(2,n),e+=f):(i=t*P(2,f-1)*P(2,n),e=0));n>=8;u[l++]=255&i,i/=256,n-=8);for(e=e<<n|i,c+=n;c>0;u[l++]=255&e,e/=256,c-=8);return u[--l]|=128*h,u}function L(t,n,r){var e,i=8*r-n-1,o=(1<<i)-1,u=o>>1,c=i-7,a=r-1,f=t[a--],s=127&f;for(f>>=7;c>0;s=256*s+t[a],a--,c-=8);for(e=s&(1<<-c)-1,s>>=-c,c+=n;c>0;e=256*e+t[a],a--,c-=8);if(0===s)s=1-u;else{if(s===o)return e?NaN:f?-E:E;e+=P(2,n),s-=u}return(f?-1:1)*e*P(2,s-n)}function R(t){return t[3]<<24|t[2]<<16|t[1]<<8|t[0]}function C(t){return[255&t]}function W(t){return[255&t,t>>8&255]}function D(t){return[255&t,t>>8&255,t>>16&255,t>>24&255]}function U(t){return T(t,52,8)}function G(t){return T(t,23,4)}function V(t,n,r){g(t[m],n,{get:function(){return this[r]}})}function B(t,n,r,e){var i=v(+r);if(i+n>t[N])throw _(b);var o=t[j]._b,u=i+t[k],c=o.slice(u,u+n);return e?c:c.reverse()}function z(t,n,r,e,i,o){var u=v(+r);if(u+n>t[N])throw _(b);for(var c=t[j]._b,a=u+t[k],f=e(+i),s=0;s<n;s++)c[a+s]=f[o?s:n-s-1]}if(u.ABV){if(!f(function(){S(1)})||!f(function(){new S(-1)})||f(function(){return new S,new S(1.5),new S(NaN),"ArrayBuffer"!=S.name})){for(var J,q=(S=function(t){return s(this,S),new O(v(t))})[m]=O[m],Y=p(O),K=0;Y.length>K;)(J=Y[K++])in S||c(S,J,O[J]);o||(q.constructor=S)}var H=new w(new S(2)),X=w[m].setInt8;H.setInt8(0,2147483648),H.setInt8(1,2147483649),!H.getInt8(0)&&H.getInt8(1)||a(w[m],{setInt8:function(t,n){X.call(this,t,n<<24>>24)},setUint8:function(t,n){X.call(this,t,n<<24>>24)}},!0)}else S=function(t){s(this,S,"ArrayBuffer");var n=v(t);this._b=d.call(new Array(n),0),this[N]=n},w=function(t,n,r){s(this,w,"DataView"),s(t,S,"DataView");var e=t[N],i=l(n);if(i<0||i>e)throw _("Wrong offset!");if(i+(r=void 0===r?e-i:h(r))>e)throw _("Wrong length!");this[j]=t,this[k]=i,this[N]=r},i&&(V(S,"byteLength","_l"),V(w,"buffer","_b"),V(w,"byteLength","_l"),V(w,"byteOffset","_o")),a(w[m],{getInt8:function(t){return B(this,1,t)[0]<<24>>24},getUint8:function(t){return B(this,1,t)[0]},getInt16:function(t){var n=B(this,2,t,arguments[1]);return(n[1]<<8|n[0])<<16>>16},getUint16:function(t){var n=B(this,2,t,arguments[1]);return n[1]<<8|n[0]},getInt32:function(t){return R(B(this,4,t,arguments[1]))},getUint32:function(t){return R(B(this,4,t,arguments[1]))>>>0},getFloat32:function(t){return L(B(this,4,t,arguments[1]),23,4)},getFloat64:function(t){return L(B(this,8,t,arguments[1]),52,8)},setInt8:function(t,n){z(this,1,t,C,n)},setUint8:function(t,n){z(this,1,t,C,n)},setInt16:function(t,n){z(this,2,t,W,n,arguments[2])},setUint16:function(t,n){z(this,2,t,W,n,arguments[2])},setInt32:function(t,n){z(this,4,t,D,n,arguments[2])},setUint32:function(t,n){z(this,4,t,D,n,arguments[2])},setFloat32:function(t,n){z(this,4,t,G,n,arguments[2])},setFloat64:function(t,n){z(this,8,t,U,n,arguments[2])}});y(S,"ArrayBuffer"),y(w,"DataView"),c(w[m],u.VIEW,!0),n.ArrayBuffer=S,n.DataView=w},function(t,n,r){t.exports=!r(6)&&!r(3)(function(){return 7!=Object.defineProperty(r(65)("div"),"a",{get:function(){return 7}}).a})},function(t,n,r){n.f=r(5)},function(t,n,r){var e=r(14),i=r(15),o=r(50)(!1),u=r(67)("IE_PROTO");t.exports=function(t,n){var r,c=i(t),a=0,f=[];for(r in c)r!=u&&e(c,r)&&f.push(r);for(;n.length>a;)e(c,r=n[a++])&&(~o(f,r)||f.push(r));return f}},function(t,n,r){var e=r(7),i=r(1),o=r(34);t.exports=r(6)?Object.defineProperties:function(t,n){i(t);for(var r,u=o(n),c=u.length,a=0;c>a;)e.f(t,r=u[a++],n[r]);return t}},function(t,n,r){var e=r(15),i=r(37).f,o={}.toString,u="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];t.exports.f=function(t){return u&&"[object Window]"==o.call(t)?function(t){try{return i(t)}catch(t){return u.slice()}}(t):i(e(t))}},function(t,n,r){"use strict";var e=r(34),i=r(51),o=r(47),u=r(9),c=r(46),a=Object.assign;t.exports=!a||r(3)(function(){var t={},n={},r=Symbol(),e="abcdefghijklmnopqrst";return t[r]=7,e.split("").forEach(function(t){n[t]=t}),7!=a({},t)[r]||Object.keys(a({},n)).join("")!=e})?function(t,n){for(var r=u(t),a=arguments.length,f=1,s=i.f,l=o.f;a>f;)for(var h,v=c(arguments[f++]),p=s?e(v).concat(s(v)):e(v),g=p.length,d=0;g>d;)l.call(v,h=p[d++])&&(r[h]=v[h]);return r}:a},function(t,n,r){"use strict";var e=r(10),i=r(4),o=r(98),u=[].slice,c={};t.exports=Function.bind||function(t){var n=e(this),r=u.call(arguments,1),a=function(){var e=r.concat(u.call(arguments));return this instanceof a?function(t,n,r){if(!(n in c)){for(var e=[],i=0;i<n;i++)e[i]="a["+i+"]";c[n]=Function("F,a","return new F("+e.join(",")+")")}return c[n](t,r)}(n,e.length,e):o(n,e,t)};return i(n.prototype)&&(a.prototype=n.prototype),a}},function(t,n){t.exports=function(t,n,r){var e=void 0===r;switch(n.length){case 0:return e?t():t.call(r);case 1:return e?t(n[0]):t.call(r,n[0]);case 2:return e?t(n[0],n[1]):t.call(r,n[0],n[1]);case 3:return e?t(n[0],n[1],n[2]):t.call(r,n[0],n[1],n[2]);case 4:return e?t(n[0],n[1],n[2],n[3]):t.call(r,n[0],n[1],n[2],n[3])}return t.apply(r,n)}},function(t,n,r){var e=r(2).parseInt,i=r(43).trim,o=r(71),u=/^[-+]?0[xX]/;t.exports=8!==e(o+"08")||22!==e(o+"0x16")?function(t,n){var r=i(String(t),3);return e(r,n>>>0||(u.test(r)?16:10))}:e},function(t,n,r){var e=r(2).parseFloat,i=r(43).trim;t.exports=1/e(r(71)+"-0")!=-1/0?function(t){var n=i(String(t),3),r=e(n);return 0===r&&"-"==n.charAt(0)?-0:r}:e},function(t,n,r){var e=r(20);t.exports=function(t,n){if("number"!=typeof t&&"Number"!=e(t))throw TypeError(n);return+t}},function(t,n,r){var e=r(4),i=Math.floor;t.exports=function(t){return!e(t)&&isFinite(t)&&i(t)===t}},function(t,n){t.exports=Math.log1p||function(t){return(t=+t)>-1e-8&&t<1e-8?t-t*t/2:Math.log(1+t)}},function(t,n,r){var e=r(74),i=Math.pow,o=i(2,-52),u=i(2,-23),c=i(2,127)*(2-u),a=i(2,-126);t.exports=Math.fround||function(t){var n,r,i=Math.abs(t),f=e(t);return i<a?f*(i/a/u+1/o-1/o)*a*u:(r=(n=(1+u/o)*i)-(n-i))>c||r!=r?f*(1/0):f*r}},function(t,n,r){var e=r(1);t.exports=function(t,n,r,i){try{return i?n(e(r)[0],r[1]):n(r)}catch(n){var o=t.return;throw void 0!==o&&e(o.call(t)),n}}},function(t,n,r){var e=r(10),i=r(9),o=r(46),u=r(8);t.exports=function(t,n,r,c,a){e(n);var f=i(t),s=o(f),l=u(f.length),h=a?l-1:0,v=a?-1:1;if(r<2)for(;;){if(h in s){c=s[h],h+=v;break}if(h+=v,a?h<0:l<=h)throw TypeError("Reduce of empty array with no initial value")}for(;a?h>=0:l>h;h+=v)h in s&&(c=n(c,s[h],h,f));return c}},function(t,n,r){"use strict";var e=r(9),i=r(35),o=r(8);t.exports=[].copyWithin||function(t,n){var r=e(this),u=o(r.length),c=i(t,u),a=i(n,u),f=arguments.length>2?arguments[2]:void 0,s=Math.min((void 0===f?u:i(f,u))-a,u-c),l=1;for(a<c&&c<a+s&&(l=-1,a+=s-1,c+=s-1);s-- >0;)a in r?r[c]=r[a]:delete r[c],c+=l,a+=l;return r}},function(t,n){t.exports=function(t,n){return{value:n,done:!!t}}},function(t,n,r){r(6)&&"g"!=/./g.flags&&r(7).f(RegExp.prototype,"flags",{configurable:!0,get:r(55)})},function(t,n){t.exports=function(t){try{return{e:!1,v:t()}}catch(t){return{e:!0,v:t}}}},function(t,n,r){var e=r(1),i=r(4),o=r(89);t.exports=function(t,n){if(e(t),i(n)&&n.constructor===t)return n;var r=o.f(t);return(0,r.resolve)(n),r.promise}},function(t,n,r){"use strict";var e=r(113),i=r(45);t.exports=r(59)("Map",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{get:function(t){var n=e.getEntry(i(this,"Map"),t);return n&&n.v},set:function(t,n){return e.def(i(this,"Map"),0===t?0:t,n)}},e,!0)},function(t,n,r){"use strict";var e=r(7).f,i=r(36),o=r(41),u=r(19),c=r(39),a=r(40),f=r(77),s=r(108),l=r(38),h=r(6),v=r(29).fastKey,p=r(45),g=h?"_s":"size",d=function(t,n){var r,e=v(n);if("F"!==e)return t._i[e];for(r=t._f;r;r=r.n)if(r.k==n)return r};t.exports={getConstructor:function(t,n,r,f){var s=t(function(t,e){c(t,s,n,"_i"),t._t=n,t._i=i(null),t._f=void 0,t._l=void 0,t[g]=0,null!=e&&a(e,r,t[f],t)});return o(s.prototype,{clear:function(){for(var t=p(this,n),r=t._i,e=t._f;e;e=e.n)e.r=!0,e.p&&(e.p=e.p.n=void 0),delete r[e.i];t._f=t._l=void 0,t[g]=0},delete:function(t){var r=p(this,n),e=d(r,t);if(e){var i=e.n,o=e.p;delete r._i[e.i],e.r=!0,o&&(o.n=i),i&&(i.p=o),r._f==e&&(r._f=i),r._l==e&&(r._l=o),r[g]--}return!!e},forEach:function(t){p(this,n);for(var r,e=u(t,arguments.length>1?arguments[1]:void 0,3);r=r?r.n:this._f;)for(e(r.v,r.k,this);r&&r.r;)r=r.p},has:function(t){return!!d(p(this,n),t)}}),h&&e(s.prototype,"size",{get:function(){return p(this,n)[g]}}),s},def:function(t,n,r){var e,i,o=d(t,n);return o?o.v=r:(t._l=o={i:i=v(n,!0),k:n,v:r,p:e=t._l,n:void 0,r:!1},t._f||(t._f=o),e&&(e.n=o),t[g]++,"F"!==i&&(t._i[i]=o)),t},getEntry:d,setStrong:function(t,n,r){f(t,n,function(t,r){this._t=p(t,n),this._k=r,this._l=void 0},function(){for(var t=this._k,n=this._l;n&&n.r;)n=n.p;return this._t&&(this._l=n=n?n.n:this._t._f)?s(0,"keys"==t?n.k:"values"==t?n.v:[n.k,n.v]):(this._t=void 0,s(1))},r?"entries":"values",!r,!0),l(n)}}},function(t,n,r){"use strict";var e=r(113),i=r(45);t.exports=r(59)("Set",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{add:function(t){return e.def(i(this,"Set"),t=0===t?0:t,t)}},e)},function(t,n,r){"use strict";var e,i=r(26)(0),o=r(12),u=r(29),c=r(96),a=r(116),f=r(4),s=r(3),l=r(45),h=u.getWeak,v=Object.isExtensible,p=a.ufstore,g={},d=function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},y={get:function(t){if(f(t)){var n=h(t);return!0===n?p(l(this,"WeakMap")).get(t):n?n[this._i]:void 0}},set:function(t,n){return a.def(l(this,"WeakMap"),t,n)}},m=t.exports=r(59)("WeakMap",d,y,a,!0,!0);s(function(){return 7!=(new m).set((Object.freeze||Object)(g),7).get(g)})&&(c((e=a.getConstructor(d,"WeakMap")).prototype,y),u.NEED=!0,i(["delete","has","get","set"],function(t){var n=m.prototype,r=n[t];o(n,t,function(n,i){if(f(n)&&!v(n)){this._f||(this._f=new e);var o=this._f[t](n,i);return"set"==t?this:o}return r.call(this,n,i)})}))},function(t,n,r){"use strict";var e=r(41),i=r(29).getWeak,o=r(1),u=r(4),c=r(39),a=r(40),f=r(26),s=r(14),l=r(45),h=f(5),v=f(6),p=0,g=function(t){return t._l||(t._l=new d)},d=function(){this.a=[]},y=function(t,n){return h(t.a,function(t){return t[0]===n})};d.prototype={get:function(t){var n=y(this,t);if(n)return n[1]},has:function(t){return!!y(this,t)},set:function(t,n){var r=y(this,t);r?r[1]=n:this.a.push([t,n])},delete:function(t){var n=v(this.a,function(n){return n[0]===t});return~n&&this.a.splice(n,1),!!~n}},t.exports={getConstructor:function(t,n,r,o){var f=t(function(t,e){c(t,f,n,"_i"),t._t=n,t._i=p++,t._l=void 0,null!=e&&a(e,r,t[o],t)});return e(f.prototype,{delete:function(t){if(!u(t))return!1;var r=i(t);return!0===r?g(l(this,n)).delete(t):r&&s(r,this._i)&&delete r[this._i]},has:function(t){if(!u(t))return!1;var r=i(t);return!0===r?g(l(this,n)).has(t):r&&s(r,this._i)}}),f},def:function(t,n,r){var e=i(o(n),!0);return!0===e?g(t).set(n,r):e[t._i]=r,t},ufstore:g}},function(t,n,r){var e=r(24),i=r(8);t.exports=function(t){if(void 0===t)return 0;var n=e(t),r=i(n);if(n!==r)throw RangeError("Wrong length!");return r}},function(t,n,r){var e=r(37),i=r(51),o=r(1),u=r(2).Reflect;t.exports=u&&u.ownKeys||function(t){var n=e.f(o(t)),r=i.f;return r?n.concat(r(t)):n}},function(t,n,r){"use strict";var e=r(52),i=r(4),o=r(8),u=r(19),c=r(5)("isConcatSpreadable");t.exports=function t(n,r,a,f,s,l,h,v){for(var p,g,d=s,y=0,m=!!h&&u(h,v,3);y<f;){if(y in a){if(p=m?m(a[y],y,r):a[y],g=!1,i(p)&&(g=void 0!==(g=p[c])?!!g:e(p)),g&&l>0)d=t(n,r,p,o(p.length),d,l-1)-1;else{if(d>=9007199254740991)throw TypeError();n[d]=p}d++}y++}return d}},function(t,n,r){var e=r(8),i=r(73),o=r(23);t.exports=function(t,n,r,u){var c=String(o(t)),a=c.length,f=void 0===r?" ":String(r),s=e(n);if(s<=a||""==f)return c;var l=s-a,h=i.call(f,Math.ceil(l/f.length));return h.length>l&&(h=h.slice(0,l)),u?h+c:c+h}},function(t,n,r){var e=r(34),i=r(15),o=r(47).f;t.exports=function(t){return function(n){for(var r,u=i(n),c=e(u),a=c.length,f=0,s=[];a>f;)o.call(u,r=c[f++])&&s.push(t?[r,u[r]]:u[r]);return s}}},function(t,n,r){var e=r(48),i=r(123);t.exports=function(t){return function(){if(e(this)!=t)throw TypeError(t+"#toJSON isn't generic");return i(this)}}},function(t,n,r){var e=r(40);t.exports=function(t,n){var r=[];return e(t,!1,r.push,r,n),r}},function(t,n){t.exports=Math.scale||function(t,n,r,e,i){return 0===arguments.length||t!=t||n!=n||r!=r||e!=e||i!=i?NaN:t===1/0||t===-1/0?t:(t-n)*(i-e)/(r-n)+e}},function(t,n,r){t.exports=r.p+"postaccess.html"},function(t,n,r){t.exports=r.p+"postauth.html"},function(t,n,r){r(128),t.exports=r(331)},function(t,n,r){"use strict";(function(t){function e(){return t._babelPolyfill||"undefined"!=typeof window&&window._babelPolyfill?null:r(129)}Object.defineProperty(n,"__esModule",{value:!0}),n.idempotentBabelPolyfill=e,n.default=e()}).call(this,r(64))},function(t,n,r){"use strict";(function(t){if(r(130),r(327),r(328),t._babelPolyfill)throw new Error("only one instance of babel-polyfill is allowed");t._babelPolyfill=!0;var n="defineProperty";function e(t,r,e){t[r]||Object[n](t,r,{writable:!0,configurable:!0,value:e})}e(String.prototype,"padLeft","".padStart),e(String.prototype,"padRight","".padEnd),"pop,reverse,shift,keys,values,entries,indexOf,every,some,forEach,map,filter,find,findIndex,includes,join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill".split(",").forEach(function(t){[][t]&&e(Array,t,Function.call.bind([][t]))})}).call(this,r(64))},function(t,n,r){r(131),r(133),r(134),r(135),r(136),r(137),r(138),r(139),r(140),r(141),r(142),r(143),r(144),r(145),r(146),r(147),r(149),r(150),r(151),r(152),r(153),r(154),r(155),r(156),r(157),r(158),r(159),r(160),r(161),r(162),r(163),r(164),r(165),r(166),r(167),r(168),r(169),r(170),r(171),r(172),r(173),r(174),r(175),r(176),r(177),r(178),r(179),r(180),r(181),r(182),r(183),r(184),r(185),r(186),r(187),r(188),r(189),r(190),r(191),r(192),r(193),r(194),r(195),r(196),r(197),r(198),r(199),r(200),r(201),r(202),r(203),r(204),r(205),r(206),r(207),r(208),r(209),r(211),r(212),r(214),r(215),r(216),r(217),r(218),r(219),r(220),r(222),r(223),r(224),r(225),r(226),r(227),r(228),r(229),r(230),r(231),r(232),r(233),r(234),r(86),r(235),r(236),r(109),r(237),r(238),r(239),r(240),r(241),r(112),r(114),r(115),r(242),r(243),r(244),r(245),r(246),r(247),r(248),r(249),r(250),r(251),r(252),r(253),r(254),r(255),r(256),r(257),r(258),r(259),r(260),r(261),r(262),r(263),r(264),r(265),r(266),r(267),r(268),r(269),r(270),r(271),r(272),r(273),r(274),r(275),r(276),r(277),r(278),r(279),r(280),r(281),r(282),r(283),r(284),r(285),r(286),r(287),r(288),r(289),r(290),r(291),r(292),r(293),r(294),r(295),r(296),r(297),r(298),r(299),r(300),r(301),r(302),r(303),r(304),r(305),r(306),r(307),r(308),r(309),r(310),r(311),r(312),r(313),r(314),r(315),r(316),r(317),r(318),r(319),r(320),r(321),r(322),r(323),r(324),r(325),r(326),t.exports=r(18)},function(t,n,r){"use strict";var e=r(2),i=r(14),o=r(6),u=r(0),c=r(12),a=r(29).KEY,f=r(3),s=r(49),l=r(42),h=r(33),v=r(5),p=r(92),g=r(66),d=r(132),y=r(52),m=r(1),b=r(4),S=r(15),w=r(22),x=r(32),_=r(36),E=r(95),O=r(16),M=r(7),P=r(34),F=O.f,A=M.f,I=E.f,j=e.Symbol,N=e.JSON,k=N&&N.stringify,T=v("_hidden"),L=v("toPrimitive"),R={}.propertyIsEnumerable,C=s("symbol-registry"),W=s("symbols"),D=s("op-symbols"),U=Object.prototype,G="function"==typeof j,V=e.QObject,B=!V||!V.prototype||!V.prototype.findChild,z=o&&f(function(){return 7!=_(A({},"a",{get:function(){return A(this,"a",{value:7}).a}})).a})?function(t,n,r){var e=F(U,n);e&&delete U[n],A(t,n,r),e&&t!==U&&A(U,n,e)}:A,J=function(t){var n=W[t]=_(j.prototype);return n._k=t,n},q=G&&"symbol"==typeof j.iterator?function(t){return"symbol"==typeof t}:function(t){return t instanceof j},Y=function(t,n,r){return t===U&&Y(D,n,r),m(t),n=w(n,!0),m(r),i(W,n)?(r.enumerable?(i(t,T)&&t[T][n]&&(t[T][n]=!1),r=_(r,{enumerable:x(0,!1)})):(i(t,T)||A(t,T,x(1,{})),t[T][n]=!0),z(t,n,r)):A(t,n,r)},K=function(t,n){m(t);for(var r,e=d(n=S(n)),i=0,o=e.length;o>i;)Y(t,r=e[i++],n[r]);return t},H=function(t){var n=R.call(this,t=w(t,!0));return!(this===U&&i(W,t)&&!i(D,t))&&(!(n||!i(this,t)||!i(W,t)||i(this,T)&&this[T][t])||n)},X=function(t,n){if(t=S(t),n=w(n,!0),t!==U||!i(W,n)||i(D,n)){var r=F(t,n);return!r||!i(W,n)||i(t,T)&&t[T][n]||(r.enumerable=!0),r}},$=function(t){for(var n,r=I(S(t)),e=[],o=0;r.length>o;)i(W,n=r[o++])||n==T||n==a||e.push(n);return e},Z=function(t){for(var n,r=t===U,e=I(r?D:S(t)),o=[],u=0;e.length>u;)!i(W,n=e[u++])||r&&!i(U,n)||o.push(W[n]);return o};G||(c((j=function(){if(this instanceof j)throw TypeError("Symbol is not a constructor!");var t=h(arguments.length>0?arguments[0]:void 0),n=function(r){this===U&&n.call(D,r),i(this,T)&&i(this[T],t)&&(this[T][t]=!1),z(this,t,x(1,r))};return o&&B&&z(U,t,{configurable:!0,set:n}),J(t)}).prototype,"toString",function(){return this._k}),O.f=X,M.f=Y,r(37).f=E.f=$,r(47).f=H,r(51).f=Z,o&&!r(30)&&c(U,"propertyIsEnumerable",H,!0),p.f=function(t){return J(v(t))}),u(u.G+u.W+u.F*!G,{Symbol:j});for(var Q="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),tt=0;Q.length>tt;)v(Q[tt++]);for(var nt=P(v.store),rt=0;nt.length>rt;)g(nt[rt++]);u(u.S+u.F*!G,"Symbol",{for:function(t){return i(C,t+="")?C[t]:C[t]=j(t)},keyFor:function(t){if(!q(t))throw TypeError(t+" is not a symbol!");for(var n in C)if(C[n]===t)return n},useSetter:function(){B=!0},useSimple:function(){B=!1}}),u(u.S+u.F*!G,"Object",{create:function(t,n){return void 0===n?_(t):K(_(t),n)},defineProperty:Y,defineProperties:K,getOwnPropertyDescriptor:X,getOwnPropertyNames:$,getOwnPropertySymbols:Z}),N&&u(u.S+u.F*(!G||f(function(){var t=j();return"[null]"!=k([t])||"{}"!=k({a:t})||"{}"!=k(Object(t))})),"JSON",{stringify:function(t){for(var n,r,e=[t],i=1;arguments.length>i;)e.push(arguments[i++]);if(r=n=e[1],(b(n)||void 0!==t)&&!q(t))return y(n)||(n=function(t,n){if("function"==typeof r&&(n=r.call(this,t,n)),!q(n))return n}),e[1]=n,k.apply(N,e)}}),j.prototype[L]||r(11)(j.prototype,L,j.prototype.valueOf),l(j,"Symbol"),l(Math,"Math",!0),l(e.JSON,"JSON",!0)},function(t,n,r){var e=r(34),i=r(51),o=r(47);t.exports=function(t){var n=e(t),r=i.f;if(r)for(var u,c=r(t),a=o.f,f=0;c.length>f;)a.call(t,u=c[f++])&&n.push(u);return n}},function(t,n,r){var e=r(0);e(e.S,"Object",{create:r(36)})},function(t,n,r){var e=r(0);e(e.S+e.F*!r(6),"Object",{defineProperty:r(7).f})},function(t,n,r){var e=r(0);e(e.S+e.F*!r(6),"Object",{defineProperties:r(94)})},function(t,n,r){var e=r(15),i=r(16).f;r(25)("getOwnPropertyDescriptor",function(){return function(t,n){return i(e(t),n)}})},function(t,n,r){var e=r(9),i=r(17);r(25)("getPrototypeOf",function(){return function(t){return i(e(t))}})},function(t,n,r){var e=r(9),i=r(34);r(25)("keys",function(){return function(t){return i(e(t))}})},function(t,n,r){r(25)("getOwnPropertyNames",function(){return r(95).f})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("freeze",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("seal",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("preventExtensions",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4);r(25)("isFrozen",function(t){return function(n){return!e(n)||!!t&&t(n)}})},function(t,n,r){var e=r(4);r(25)("isSealed",function(t){return function(n){return!e(n)||!!t&&t(n)}})},function(t,n,r){var e=r(4);r(25)("isExtensible",function(t){return function(n){return!!e(n)&&(!t||t(n))}})},function(t,n,r){var e=r(0);e(e.S+e.F,"Object",{assign:r(96)})},function(t,n,r){var e=r(0);e(e.S,"Object",{is:r(148)})},function(t,n){t.exports=Object.is||function(t,n){return t===n?0!==t||1/t==1/n:t!=t&&n!=n}},function(t,n,r){var e=r(0);e(e.S,"Object",{setPrototypeOf:r(70).set})},function(t,n,r){"use strict";var e=r(48),i={};i[r(5)("toStringTag")]="z",i+""!="[object z]"&&r(12)(Object.prototype,"toString",function(){return"[object "+e(this)+"]"},!0)},function(t,n,r){var e=r(0);e(e.P,"Function",{bind:r(97)})},function(t,n,r){var e=r(7).f,i=Function.prototype,o=/^\s*function ([^ (]*)/;"name"in i||r(6)&&e(i,"name",{configurable:!0,get:function(){try{return(""+this).match(o)[1]}catch(t){return""}}})},function(t,n,r){"use strict";var e=r(4),i=r(17),o=r(5)("hasInstance"),u=Function.prototype;o in u||r(7).f(u,o,{value:function(t){if("function"!=typeof this||!e(t))return!1;if(!e(this.prototype))return t instanceof this;for(;t=i(t);)if(this.prototype===t)return!0;return!1}})},function(t,n,r){var e=r(0),i=r(99);e(e.G+e.F*(parseInt!=i),{parseInt:i})},function(t,n,r){var e=r(0),i=r(100);e(e.G+e.F*(parseFloat!=i),{parseFloat:i})},function(t,n,r){"use strict";var e=r(2),i=r(14),o=r(20),u=r(72),c=r(22),a=r(3),f=r(37).f,s=r(16).f,l=r(7).f,h=r(43).trim,v=e.Number,p=v,g=v.prototype,d="Number"==o(r(36)(g)),y="trim"in String.prototype,m=function(t){var n=c(t,!1);if("string"==typeof n&&n.length>2){var r,e,i,o=(n=y?n.trim():h(n,3)).charCodeAt(0);if(43===o||45===o){if(88===(r=n.charCodeAt(2))||120===r)return NaN}else if(48===o){switch(n.charCodeAt(1)){case 66:case 98:e=2,i=49;break;case 79:case 111:e=8,i=55;break;default:return+n}for(var u,a=n.slice(2),f=0,s=a.length;f<s;f++)if((u=a.charCodeAt(f))<48||u>i)return NaN;return parseInt(a,e)}}return+n};if(!v(" 0o1")||!v("0b1")||v("+0x1")){v=function(t){var n=arguments.length<1?0:t,r=this;return r instanceof v&&(d?a(function(){g.valueOf.call(r)}):"Number"!=o(r))?u(new p(m(n)),r,v):m(n)};for(var b,S=r(6)?f(p):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),w=0;S.length>w;w++)i(p,b=S[w])&&!i(v,b)&&l(v,b,s(p,b));v.prototype=g,g.constructor=v,r(12)(e,"Number",v)}},function(t,n,r){"use strict";var e=r(0),i=r(24),o=r(101),u=r(73),c=1..toFixed,a=Math.floor,f=[0,0,0,0,0,0],s="Number.toFixed: incorrect invocation!",l=function(t,n){for(var r=-1,e=n;++r<6;)e+=t*f[r],f[r]=e%1e7,e=a(e/1e7)},h=function(t){for(var n=6,r=0;--n>=0;)r+=f[n],f[n]=a(r/t),r=r%t*1e7},v=function(){for(var t=6,n="";--t>=0;)if(""!==n||0===t||0!==f[t]){var r=String(f[t]);n=""===n?r:n+u.call("0",7-r.length)+r}return n},p=function(t,n,r){return 0===n?r:n%2==1?p(t,n-1,r*t):p(t*t,n/2,r)};e(e.P+e.F*(!!c&&("0.000"!==8e-5.toFixed(3)||"1"!==.9.toFixed(0)||"1.25"!==1.255.toFixed(2)||"1000000000000000128"!==(0xde0b6b3a7640080).toFixed(0))||!r(3)(function(){c.call({})})),"Number",{toFixed:function(t){var n,r,e,c,a=o(this,s),f=i(t),g="",d="0";if(f<0||f>20)throw RangeError(s);if(a!=a)return"NaN";if(a<=-1e21||a>=1e21)return String(a);if(a<0&&(g="-",a=-a),a>1e-21)if(r=(n=function(t){for(var n=0,r=t;r>=4096;)n+=12,r/=4096;for(;r>=2;)n+=1,r/=2;return n}(a*p(2,69,1))-69)<0?a*p(2,-n,1):a/p(2,n,1),r*=4503599627370496,(n=52-n)>0){for(l(0,r),e=f;e>=7;)l(1e7,0),e-=7;for(l(p(10,e,1),0),e=n-1;e>=23;)h(1<<23),e-=23;h(1<<e),l(1,1),h(2),d=v()}else l(0,r),l(1<<-n,0),d=v()+u.call("0",f);return d=f>0?g+((c=d.length)<=f?"0."+u.call("0",f-c)+d:d.slice(0,c-f)+"."+d.slice(c-f)):g+d}})},function(t,n,r){"use strict";var e=r(0),i=r(3),o=r(101),u=1..toPrecision;e(e.P+e.F*(i(function(){return"1"!==u.call(1,void 0)})||!i(function(){u.call({})})),"Number",{toPrecision:function(t){var n=o(this,"Number#toPrecision: incorrect invocation!");return void 0===t?u.call(n):u.call(n,t)}})},function(t,n,r){var e=r(0);e(e.S,"Number",{EPSILON:Math.pow(2,-52)})},function(t,n,r){var e=r(0),i=r(2).isFinite;e(e.S,"Number",{isFinite:function(t){return"number"==typeof t&&i(t)}})},function(t,n,r){var e=r(0);e(e.S,"Number",{isInteger:r(102)})},function(t,n,r){var e=r(0);e(e.S,"Number",{isNaN:function(t){return t!=t}})},function(t,n,r){var e=r(0),i=r(102),o=Math.abs;e(e.S,"Number",{isSafeInteger:function(t){return i(t)&&o(t)<=9007199254740991}})},function(t,n,r){var e=r(0);e(e.S,"Number",{MAX_SAFE_INTEGER:9007199254740991})},function(t,n,r){var e=r(0);e(e.S,"Number",{MIN_SAFE_INTEGER:-9007199254740991})},function(t,n,r){var e=r(0),i=r(100);e(e.S+e.F*(Number.parseFloat!=i),"Number",{parseFloat:i})},function(t,n,r){var e=r(0),i=r(99);e(e.S+e.F*(Number.parseInt!=i),"Number",{parseInt:i})},function(t,n,r){var e=r(0),i=r(103),o=Math.sqrt,u=Math.acosh;e(e.S+e.F*!(u&&710==Math.floor(u(Number.MAX_VALUE))&&u(1/0)==1/0),"Math",{acosh:function(t){return(t=+t)<1?NaN:t>94906265.62425156?Math.log(t)+Math.LN2:i(t-1+o(t-1)*o(t+1))}})},function(t,n,r){var e=r(0),i=Math.asinh;e(e.S+e.F*!(i&&1/i(0)>0),"Math",{asinh:function t(n){return isFinite(n=+n)&&0!=n?n<0?-t(-n):Math.log(n+Math.sqrt(n*n+1)):n}})},function(t,n,r){var e=r(0),i=Math.atanh;e(e.S+e.F*!(i&&1/i(-0)<0),"Math",{atanh:function(t){return 0==(t=+t)?t:Math.log((1+t)/(1-t))/2}})},function(t,n,r){var e=r(0),i=r(74);e(e.S,"Math",{cbrt:function(t){return i(t=+t)*Math.pow(Math.abs(t),1/3)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{clz32:function(t){return(t>>>=0)?31-Math.floor(Math.log(t+.5)*Math.LOG2E):32}})},function(t,n,r){var e=r(0),i=Math.exp;e(e.S,"Math",{cosh:function(t){return(i(t=+t)+i(-t))/2}})},function(t,n,r){var e=r(0),i=r(75);e(e.S+e.F*(i!=Math.expm1),"Math",{expm1:i})},function(t,n,r){var e=r(0);e(e.S,"Math",{fround:r(104)})},function(t,n,r){var e=r(0),i=Math.abs;e(e.S,"Math",{hypot:function(t,n){for(var r,e,o=0,u=0,c=arguments.length,a=0;u<c;)a<(r=i(arguments[u++]))?(o=o*(e=a/r)*e+1,a=r):o+=r>0?(e=r/a)*e:r;return a===1/0?1/0:a*Math.sqrt(o)}})},function(t,n,r){var e=r(0),i=Math.imul;e(e.S+e.F*r(3)(function(){return-5!=i(4294967295,5)||2!=i.length}),"Math",{imul:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e;return 0|i*o+((65535&r>>>16)*o+i*(65535&e>>>16)<<16>>>0)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{log10:function(t){return Math.log(t)*Math.LOG10E}})},function(t,n,r){var e=r(0);e(e.S,"Math",{log1p:r(103)})},function(t,n,r){var e=r(0);e(e.S,"Math",{log2:function(t){return Math.log(t)/Math.LN2}})},function(t,n,r){var e=r(0);e(e.S,"Math",{sign:r(74)})},function(t,n,r){var e=r(0),i=r(75),o=Math.exp;e(e.S+e.F*r(3)(function(){return-2e-17!=!Math.sinh(-2e-17)}),"Math",{sinh:function(t){return Math.abs(t=+t)<1?(i(t)-i(-t))/2:(o(t-1)-o(-t-1))*(Math.E/2)}})},function(t,n,r){var e=r(0),i=r(75),o=Math.exp;e(e.S,"Math",{tanh:function(t){var n=i(t=+t),r=i(-t);return n==1/0?1:r==1/0?-1:(n-r)/(o(t)+o(-t))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{trunc:function(t){return(t>0?Math.floor:Math.ceil)(t)}})},function(t,n,r){var e=r(0),i=r(35),o=String.fromCharCode,u=String.fromCodePoint;e(e.S+e.F*(!!u&&1!=u.length),"String",{fromCodePoint:function(t){for(var n,r=[],e=arguments.length,u=0;e>u;){if(n=+arguments[u++],i(n,1114111)!==n)throw RangeError(n+" is not a valid code point");r.push(n<65536?o(n):o(55296+((n-=65536)>>10),n%1024+56320))}return r.join("")}})},function(t,n,r){var e=r(0),i=r(15),o=r(8);e(e.S,"String",{raw:function(t){for(var n=i(t.raw),r=o(n.length),e=arguments.length,u=[],c=0;r>c;)u.push(String(n[c++])),c<e&&u.push(String(arguments[c]));return u.join("")}})},function(t,n,r){"use strict";r(43)("trim",function(t){return function(){return t(this,3)}})},function(t,n,r){"use strict";var e=r(76)(!0);r(77)(String,"String",function(t){this._t=String(t),this._i=0},function(){var t,n=this._t,r=this._i;return r>=n.length?{value:void 0,done:!0}:(t=e(n,r),this._i+=t.length,{value:t,done:!1})})},function(t,n,r){"use strict";var e=r(0),i=r(76)(!1);e(e.P,"String",{codePointAt:function(t){return i(this,t)}})},function(t,n,r){"use strict";var e=r(0),i=r(8),o=r(79),u="".endsWith;e(e.P+e.F*r(80)("endsWith"),"String",{endsWith:function(t){var n=o(this,t,"endsWith"),r=arguments.length>1?arguments[1]:void 0,e=i(n.length),c=void 0===r?e:Math.min(i(r),e),a=String(t);return u?u.call(n,a,c):n.slice(c-a.length,c)===a}})},function(t,n,r){"use strict";var e=r(0),i=r(79);e(e.P+e.F*r(80)("includes"),"String",{includes:function(t){return!!~i(this,t,"includes").indexOf(t,arguments.length>1?arguments[1]:void 0)}})},function(t,n,r){var e=r(0);e(e.P,"String",{repeat:r(73)})},function(t,n,r){"use strict";var e=r(0),i=r(8),o=r(79),u="".startsWith;e(e.P+e.F*r(80)("startsWith"),"String",{startsWith:function(t){var n=o(this,t,"startsWith"),r=i(Math.min(arguments.length>1?arguments[1]:void 0,n.length)),e=String(t);return u?u.call(n,e,r):n.slice(r,r+e.length)===e}})},function(t,n,r){"use strict";r(13)("anchor",function(t){return function(n){return t(this,"a","name",n)}})},function(t,n,r){"use strict";r(13)("big",function(t){return function(){return t(this,"big","","")}})},function(t,n,r){"use strict";r(13)("blink",function(t){return function(){return t(this,"blink","","")}})},function(t,n,r){"use strict";r(13)("bold",function(t){return function(){return t(this,"b","","")}})},function(t,n,r){"use strict";r(13)("fixed",function(t){return function(){return t(this,"tt","","")}})},function(t,n,r){"use strict";r(13)("fontcolor",function(t){return function(n){return t(this,"font","color",n)}})},function(t,n,r){"use strict";r(13)("fontsize",function(t){return function(n){return t(this,"font","size",n)}})},function(t,n,r){"use strict";r(13)("italics",function(t){return function(){return t(this,"i","","")}})},function(t,n,r){"use strict";r(13)("link",function(t){return function(n){return t(this,"a","href",n)}})},function(t,n,r){"use strict";r(13)("small",function(t){return function(){return t(this,"small","","")}})},function(t,n,r){"use strict";r(13)("strike",function(t){return function(){return t(this,"strike","","")}})},function(t,n,r){"use strict";r(13)("sub",function(t){return function(){return t(this,"sub","","")}})},function(t,n,r){"use strict";r(13)("sup",function(t){return function(){return t(this,"sup","","")}})},function(t,n,r){var e=r(0);e(e.S,"Date",{now:function(){return(new Date).getTime()}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22);e(e.P+e.F*r(3)(function(){return null!==new Date(NaN).toJSON()||1!==Date.prototype.toJSON.call({toISOString:function(){return 1}})}),"Date",{toJSON:function(t){var n=i(this),r=o(n);return"number"!=typeof r||isFinite(r)?n.toISOString():null}})},function(t,n,r){var e=r(0),i=r(210);e(e.P+e.F*(Date.prototype.toISOString!==i),"Date",{toISOString:i})},function(t,n,r){"use strict";var e=r(3),i=Date.prototype.getTime,o=Date.prototype.toISOString,u=function(t){return t>9?t:"0"+t};t.exports=e(function(){return"0385-07-25T07:06:39.999Z"!=o.call(new Date(-5e13-1))})||!e(function(){o.call(new Date(NaN))})?function(){if(!isFinite(i.call(this)))throw RangeError("Invalid time value");var t=this,n=t.getUTCFullYear(),r=t.getUTCMilliseconds(),e=n<0?"-":n>9999?"+":"";return e+("00000"+Math.abs(n)).slice(e?-6:-4)+"-"+u(t.getUTCMonth()+1)+"-"+u(t.getUTCDate())+"T"+u(t.getUTCHours())+":"+u(t.getUTCMinutes())+":"+u(t.getUTCSeconds())+"."+(r>99?r:"0"+u(r))+"Z"}:o},function(t,n,r){var e=Date.prototype,i=e.toString,o=e.getTime;new Date(NaN)+""!="Invalid Date"&&r(12)(e,"toString",function(){var t=o.call(this);return t==t?i.call(this):"Invalid Date"})},function(t,n,r){var e=r(5)("toPrimitive"),i=Date.prototype;e in i||r(11)(i,e,r(213))},function(t,n,r){"use strict";var e=r(1),i=r(22);t.exports=function(t){if("string"!==t&&"number"!==t&&"default"!==t)throw TypeError("Incorrect hint");return i(e(this),"number"!=t)}},function(t,n,r){var e=r(0);e(e.S,"Array",{isArray:r(52)})},function(t,n,r){"use strict";var e=r(19),i=r(0),o=r(9),u=r(105),c=r(81),a=r(8),f=r(82),s=r(83);i(i.S+i.F*!r(54)(function(t){Array.from(t)}),"Array",{from:function(t){var n,r,i,l,h=o(t),v="function"==typeof this?this:Array,p=arguments.length,g=p>1?arguments[1]:void 0,d=void 0!==g,y=0,m=s(h);if(d&&(g=e(g,p>2?arguments[2]:void 0,2)),null==m||v==Array&&c(m))for(r=new v(n=a(h.length));n>y;y++)f(r,y,d?g(h[y],y):h[y]);else for(l=m.call(h),r=new v;!(i=l.next()).done;y++)f(r,y,d?u(l,g,[i.value,y],!0):i.value);return r.length=y,r}})},function(t,n,r){"use strict";var e=r(0),i=r(82);e(e.S+e.F*r(3)(function(){function t(){}return!(Array.of.call(t)instanceof t)}),"Array",{of:function(){for(var t=0,n=arguments.length,r=new("function"==typeof this?this:Array)(n);n>t;)i(r,t,arguments[t++]);return r.length=n,r}})},function(t,n,r){"use strict";var e=r(0),i=r(15),o=[].join;e(e.P+e.F*(r(46)!=Object||!r(21)(o)),"Array",{join:function(t){return o.call(i(this),void 0===t?",":t)}})},function(t,n,r){"use strict";var e=r(0),i=r(69),o=r(20),u=r(35),c=r(8),a=[].slice;e(e.P+e.F*r(3)(function(){i&&a.call(i)}),"Array",{slice:function(t,n){var r=c(this.length),e=o(this);if(n=void 0===n?r:n,"Array"==e)return a.call(this,t,n);for(var i=u(t,r),f=u(n,r),s=c(f-i),l=new Array(s),h=0;h<s;h++)l[h]="String"==e?this.charAt(i+h):this[i+h];return l}})},function(t,n,r){"use strict";var e=r(0),i=r(10),o=r(9),u=r(3),c=[].sort,a=[1,2,3];e(e.P+e.F*(u(function(){a.sort(void 0)})||!u(function(){a.sort(null)})||!r(21)(c)),"Array",{sort:function(t){return void 0===t?c.call(o(this)):c.call(o(this),i(t))}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(0),o=r(21)([].forEach,!0);e(e.P+e.F*!o,"Array",{forEach:function(t){return i(this,t,arguments[1])}})},function(t,n,r){var e=r(4),i=r(52),o=r(5)("species");t.exports=function(t){var n;return i(t)&&("function"!=typeof(n=t.constructor)||n!==Array&&!i(n.prototype)||(n=void 0),e(n)&&null===(n=n[o])&&(n=void 0)),void 0===n?Array:n}},function(t,n,r){"use strict";var e=r(0),i=r(26)(1);e(e.P+e.F*!r(21)([].map,!0),"Array",{map:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(2);e(e.P+e.F*!r(21)([].filter,!0),"Array",{filter:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(3);e(e.P+e.F*!r(21)([].some,!0),"Array",{some:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(4);e(e.P+e.F*!r(21)([].every,!0),"Array",{every:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(106);e(e.P+e.F*!r(21)([].reduce,!0),"Array",{reduce:function(t){return i(this,t,arguments.length,arguments[1],!1)}})},function(t,n,r){"use strict";var e=r(0),i=r(106);e(e.P+e.F*!r(21)([].reduceRight,!0),"Array",{reduceRight:function(t){return i(this,t,arguments.length,arguments[1],!0)}})},function(t,n,r){"use strict";var e=r(0),i=r(50)(!1),o=[].indexOf,u=!!o&&1/[1].indexOf(1,-0)<0;e(e.P+e.F*(u||!r(21)(o)),"Array",{indexOf:function(t){return u?o.apply(this,arguments)||0:i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(15),o=r(24),u=r(8),c=[].lastIndexOf,a=!!c&&1/[1].lastIndexOf(1,-0)<0;e(e.P+e.F*(a||!r(21)(c)),"Array",{lastIndexOf:function(t){if(a)return c.apply(this,arguments)||0;var n=i(this),r=u(n.length),e=r-1;for(arguments.length>1&&(e=Math.min(e,o(arguments[1]))),e<0&&(e=r+e);e>=0;e--)if(e in n&&n[e]===t)return e||0;return-1}})},function(t,n,r){var e=r(0);e(e.P,"Array",{copyWithin:r(107)}),r(31)("copyWithin")},function(t,n,r){var e=r(0);e(e.P,"Array",{fill:r(85)}),r(31)("fill")},function(t,n,r){"use strict";var e=r(0),i=r(26)(5),o=!0;"find"in[]&&Array(1).find(function(){o=!1}),e(e.P+e.F*o,"Array",{find:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)("find")},function(t,n,r){"use strict";var e=r(0),i=r(26)(6),o="findIndex",u=!0;o in[]&&Array(1)[o](function(){u=!1}),e(e.P+e.F*u,"Array",{findIndex:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)(o)},function(t,n,r){r(38)("Array")},function(t,n,r){var e=r(2),i=r(72),o=r(7).f,u=r(37).f,c=r(53),a=r(55),f=e.RegExp,s=f,l=f.prototype,h=/a/g,v=/a/g,p=new f(h)!==h;if(r(6)&&(!p||r(3)(function(){return v[r(5)("match")]=!1,f(h)!=h||f(v)==v||"/a/i"!=f(h,"i")}))){f=function(t,n){var r=this instanceof f,e=c(t),o=void 0===n;return!r&&e&&t.constructor===f&&o?t:i(p?new s(e&&!o?t.source:t,n):s((e=t instanceof f)?t.source:t,e&&o?a.call(t):n),r?this:l,f)};for(var g=function(t){t in f||o(f,t,{configurable:!0,get:function(){return s[t]},set:function(n){s[t]=n}})},d=u(s),y=0;d.length>y;)g(d[y++]);l.constructor=f,f.prototype=l,r(12)(e,"RegExp",f)}r(38)("RegExp")},function(t,n,r){"use strict";r(109);var e=r(1),i=r(55),o=r(6),u=/./.toString,c=function(t){r(12)(RegExp.prototype,"toString",t,!0)};r(3)(function(){return"/a/b"!=u.call({source:"a",flags:"b"})})?c(function(){var t=e(this);return"/".concat(t.source,"/","flags"in t?t.flags:!o&&t instanceof RegExp?i.call(t):void 0)}):"toString"!=u.name&&c(function(){return u.call(this)})},function(t,n,r){r(56)("match",1,function(t,n,r){return[function(r){"use strict";var e=t(this),i=null==r?void 0:r[n];return void 0!==i?i.call(r,e):new RegExp(r)[n](String(e))},r]})},function(t,n,r){r(56)("replace",2,function(t,n,r){return[function(e,i){"use strict";var o=t(this),u=null==e?void 0:e[n];return void 0!==u?u.call(e,o,i):r.call(String(o),e,i)},r]})},function(t,n,r){r(56)("search",1,function(t,n,r){return[function(r){"use strict";var e=t(this),i=null==r?void 0:r[n];return void 0!==i?i.call(r,e):new RegExp(r)[n](String(e))},r]})},function(t,n,r){r(56)("split",2,function(t,n,e){"use strict";var i=r(53),o=e,u=[].push;if("c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length){var c=void 0===/()??/.exec("")[1];e=function(t,n){var r=String(this);if(void 0===t&&0===n)return[];if(!i(t))return o.call(r,t,n);var e,a,f,s,l,h=[],v=(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":""),p=0,g=void 0===n?4294967295:n>>>0,d=new RegExp(t.source,v+"g");for(c||(e=new RegExp("^"+d.source+"$(?!\\s)",v));(a=d.exec(r))&&!((f=a.index+a[0].length)>p&&(h.push(r.slice(p,a.index)),!c&&a.length>1&&a[0].replace(e,function(){for(l=1;l<arguments.length-2;l++)void 0===arguments[l]&&(a[l]=void 0)}),a.length>1&&a.index<r.length&&u.apply(h,a.slice(1)),s=a[0].length,p=f,h.length>=g));)d.lastIndex===a.index&&d.lastIndex++;return p===r.length?!s&&d.test("")||h.push(""):h.push(r.slice(p)),h.length>g?h.slice(0,g):h}}else"0".split(void 0,0).length&&(e=function(t,n){return void 0===t&&0===n?[]:o.call(this,t,n)});return[function(r,i){var o=t(this),u=null==r?void 0:r[n];return void 0!==u?u.call(r,o,i):e.call(String(o),r,i)},e]})},function(t,n,r){"use strict";var e,i,o,u,c=r(30),a=r(2),f=r(19),s=r(48),l=r(0),h=r(4),v=r(10),p=r(39),g=r(40),d=r(57),y=r(87).set,m=r(88)(),b=r(89),S=r(110),w=r(58),x=r(111),_=a.TypeError,E=a.process,O=E&&E.versions,M=O&&O.v8||"",P=a.Promise,F="process"==s(E),A=function(){},I=i=b.f,j=!!function(){try{var t=P.resolve(1),n=(t.constructor={})[r(5)("species")]=function(t){t(A,A)};return(F||"function"==typeof PromiseRejectionEvent)&&t.then(A)instanceof n&&0!==M.indexOf("6.6")&&-1===w.indexOf("Chrome/66")}catch(t){}}(),N=function(t){var n;return!(!h(t)||"function"!=typeof(n=t.then))&&n},k=function(t,n){if(!t._n){t._n=!0;var r=t._c;m(function(){for(var e=t._v,i=1==t._s,o=0,u=function(n){var r,o,u,c=i?n.ok:n.fail,a=n.resolve,f=n.reject,s=n.domain;try{c?(i||(2==t._h&&R(t),t._h=1),!0===c?r=e:(s&&s.enter(),r=c(e),s&&(s.exit(),u=!0)),r===n.promise?f(_("Promise-chain cycle")):(o=N(r))?o.call(r,a,f):a(r)):f(e)}catch(t){s&&!u&&s.exit(),f(t)}};r.length>o;)u(r[o++]);t._c=[],t._n=!1,n&&!t._h&&T(t)})}},T=function(t){y.call(a,function(){var n,r,e,i=t._v,o=L(t);if(o&&(n=S(function(){F?E.emit("unhandledRejection",i,t):(r=a.onunhandledrejection)?r({promise:t,reason:i}):(e=a.console)&&e.error&&e.error("Unhandled promise rejection",i)}),t._h=F||L(t)?2:1),t._a=void 0,o&&n.e)throw n.v})},L=function(t){return 1!==t._h&&0===(t._a||t._c).length},R=function(t){y.call(a,function(){var n;F?E.emit("rejectionHandled",t):(n=a.onrejectionhandled)&&n({promise:t,reason:t._v})})},C=function(t){var n=this;n._d||(n._d=!0,(n=n._w||n)._v=t,n._s=2,n._a||(n._a=n._c.slice()),k(n,!0))},W=function(t){var n,r=this;if(!r._d){r._d=!0,r=r._w||r;try{if(r===t)throw _("Promise can't be resolved itself");(n=N(t))?m(function(){var e={_w:r,_d:!1};try{n.call(t,f(W,e,1),f(C,e,1))}catch(t){C.call(e,t)}}):(r._v=t,r._s=1,k(r,!1))}catch(t){C.call({_w:r,_d:!1},t)}}};j||(P=function(t){p(this,P,"Promise","_h"),v(t),e.call(this);try{t(f(W,this,1),f(C,this,1))}catch(t){C.call(this,t)}},(e=function(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=r(41)(P.prototype,{then:function(t,n){var r=I(d(this,P));return r.ok="function"!=typeof t||t,r.fail="function"==typeof n&&n,r.domain=F?E.domain:void 0,this._c.push(r),this._a&&this._a.push(r),this._s&&k(this,!1),r.promise},catch:function(t){return this.then(void 0,t)}}),o=function(){var t=new e;this.promise=t,this.resolve=f(W,t,1),this.reject=f(C,t,1)},b.f=I=function(t){return t===P||t===u?new o(t):i(t)}),l(l.G+l.W+l.F*!j,{Promise:P}),r(42)(P,"Promise"),r(38)("Promise"),u=r(18).Promise,l(l.S+l.F*!j,"Promise",{reject:function(t){var n=I(this);return(0,n.reject)(t),n.promise}}),l(l.S+l.F*(c||!j),"Promise",{resolve:function(t){return x(c&&this===u?P:this,t)}}),l(l.S+l.F*!(j&&r(54)(function(t){P.all(t).catch(A)})),"Promise",{all:function(t){var n=this,r=I(n),e=r.resolve,i=r.reject,o=S(function(){var r=[],o=0,u=1;g(t,!1,function(t){var c=o++,a=!1;r.push(void 0),u++,n.resolve(t).then(function(t){a||(a=!0,r[c]=t,--u||e(r))},i)}),--u||e(r)});return o.e&&i(o.v),r.promise},race:function(t){var n=this,r=I(n),e=r.reject,i=S(function(){g(t,!1,function(t){n.resolve(t).then(r.resolve,e)})});return i.e&&e(i.v),r.promise}})},function(t,n,r){"use strict";var e=r(116),i=r(45);r(59)("WeakSet",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{add:function(t){return e.def(i(this,"WeakSet"),t,!0)}},e,!1,!0)},function(t,n,r){"use strict";var e=r(0),i=r(60),o=r(90),u=r(1),c=r(35),a=r(8),f=r(4),s=r(2).ArrayBuffer,l=r(57),h=o.ArrayBuffer,v=o.DataView,p=i.ABV&&s.isView,g=h.prototype.slice,d=i.VIEW;e(e.G+e.W+e.F*(s!==h),{ArrayBuffer:h}),e(e.S+e.F*!i.CONSTR,"ArrayBuffer",{isView:function(t){return p&&p(t)||f(t)&&d in t}}),e(e.P+e.U+e.F*r(3)(function(){return!new h(2).slice(1,void 0).byteLength}),"ArrayBuffer",{slice:function(t,n){if(void 0!==g&&void 0===n)return g.call(u(this),t);for(var r=u(this).byteLength,e=c(t,r),i=c(void 0===n?r:n,r),o=new(l(this,h))(a(i-e)),f=new v(this),s=new v(o),p=0;e<i;)s.setUint8(p++,f.getUint8(e++));return o}}),r(38)("ArrayBuffer")},function(t,n,r){var e=r(0);e(e.G+e.W+e.F*!r(60).ABV,{DataView:r(90).DataView})},function(t,n,r){r(27)("Int8",1,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint8",1,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint8",1,function(t){return function(n,r,e){return t(this,n,r,e)}},!0)},function(t,n,r){r(27)("Int16",2,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint16",2,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Int32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Float32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Float64",8,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){var e=r(0),i=r(10),o=r(1),u=(r(2).Reflect||{}).apply,c=Function.apply;e(e.S+e.F*!r(3)(function(){u(function(){})}),"Reflect",{apply:function(t,n,r){var e=i(t),a=o(r);return u?u(e,n,a):c.call(e,n,a)}})},function(t,n,r){var e=r(0),i=r(36),o=r(10),u=r(1),c=r(4),a=r(3),f=r(97),s=(r(2).Reflect||{}).construct,l=a(function(){function t(){}return!(s(function(){},[],t)instanceof t)}),h=!a(function(){s(function(){})});e(e.S+e.F*(l||h),"Reflect",{construct:function(t,n){o(t),u(n);var r=arguments.length<3?t:o(arguments[2]);if(h&&!l)return s(t,n,r);if(t==r){switch(n.length){case 0:return new t;case 1:return new t(n[0]);case 2:return new t(n[0],n[1]);case 3:return new t(n[0],n[1],n[2]);case 4:return new t(n[0],n[1],n[2],n[3])}var e=[null];return e.push.apply(e,n),new(f.apply(t,e))}var a=r.prototype,v=i(c(a)?a:Object.prototype),p=Function.apply.call(t,v,n);return c(p)?p:v}})},function(t,n,r){var e=r(7),i=r(0),o=r(1),u=r(22);i(i.S+i.F*r(3)(function(){Reflect.defineProperty(e.f({},1,{value:1}),1,{value:2})}),"Reflect",{defineProperty:function(t,n,r){o(t),n=u(n,!0),o(r);try{return e.f(t,n,r),!0}catch(t){return!1}}})},function(t,n,r){var e=r(0),i=r(16).f,o=r(1);e(e.S,"Reflect",{deleteProperty:function(t,n){var r=i(o(t),n);return!(r&&!r.configurable)&&delete t[n]}})},function(t,n,r){"use strict";var e=r(0),i=r(1),o=function(t){this._t=i(t),this._i=0;var n,r=this._k=[];for(n in t)r.push(n)};r(78)(o,"Object",function(){var t,n=this._k;do{if(this._i>=n.length)return{value:void 0,done:!0}}while(!((t=n[this._i++])in this._t));return{value:t,done:!1}}),e(e.S,"Reflect",{enumerate:function(t){return new o(t)}})},function(t,n,r){var e=r(16),i=r(17),o=r(14),u=r(0),c=r(4),a=r(1);u(u.S,"Reflect",{get:function t(n,r){var u,f,s=arguments.length<3?n:arguments[2];return a(n)===s?n[r]:(u=e.f(n,r))?o(u,"value")?u.value:void 0!==u.get?u.get.call(s):void 0:c(f=i(n))?t(f,r,s):void 0}})},function(t,n,r){var e=r(16),i=r(0),o=r(1);i(i.S,"Reflect",{getOwnPropertyDescriptor:function(t,n){return e.f(o(t),n)}})},function(t,n,r){var e=r(0),i=r(17),o=r(1);e(e.S,"Reflect",{getPrototypeOf:function(t){return i(o(t))}})},function(t,n,r){var e=r(0);e(e.S,"Reflect",{has:function(t,n){return n in t}})},function(t,n,r){var e=r(0),i=r(1),o=Object.isExtensible;e(e.S,"Reflect",{isExtensible:function(t){return i(t),!o||o(t)}})},function(t,n,r){var e=r(0);e(e.S,"Reflect",{ownKeys:r(118)})},function(t,n,r){var e=r(0),i=r(1),o=Object.preventExtensions;e(e.S,"Reflect",{preventExtensions:function(t){i(t);try{return o&&o(t),!0}catch(t){return!1}}})},function(t,n,r){var e=r(7),i=r(16),o=r(17),u=r(14),c=r(0),a=r(32),f=r(1),s=r(4);c(c.S,"Reflect",{set:function t(n,r,c){var l,h,v=arguments.length<4?n:arguments[3],p=i.f(f(n),r);if(!p){if(s(h=o(n)))return t(h,r,c,v);p=a(0)}if(u(p,"value")){if(!1===p.writable||!s(v))return!1;if(l=i.f(v,r)){if(l.get||l.set||!1===l.writable)return!1;l.value=c,e.f(v,r,l)}else e.f(v,r,a(0,c));return!0}return void 0!==p.set&&(p.set.call(v,c),!0)}})},function(t,n,r){var e=r(0),i=r(70);i&&e(e.S,"Reflect",{setPrototypeOf:function(t,n){i.check(t,n);try{return i.set(t,n),!0}catch(t){return!1}}})},function(t,n,r){"use strict";var e=r(0),i=r(50)(!0);e(e.P,"Array",{includes:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)("includes")},function(t,n,r){"use strict";var e=r(0),i=r(119),o=r(9),u=r(8),c=r(10),a=r(84);e(e.P,"Array",{flatMap:function(t){var n,r,e=o(this);return c(t),n=u(e.length),r=a(e,0),i(r,e,e,n,0,1,t,arguments[1]),r}}),r(31)("flatMap")},function(t,n,r){"use strict";var e=r(0),i=r(119),o=r(9),u=r(8),c=r(24),a=r(84);e(e.P,"Array",{flatten:function(){var t=arguments[0],n=o(this),r=u(n.length),e=a(n,0);return i(e,n,n,r,0,void 0===t?1:c(t)),e}}),r(31)("flatten")},function(t,n,r){"use strict";var e=r(0),i=r(76)(!0);e(e.P,"String",{at:function(t){return i(this,t)}})},function(t,n,r){"use strict";var e=r(0),i=r(120),o=r(58);e(e.P+e.F*/Version\/10\.\d+(\.\d+)? Safari\//.test(o),"String",{padStart:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0,!0)}})},function(t,n,r){"use strict";var e=r(0),i=r(120),o=r(58);e(e.P+e.F*/Version\/10\.\d+(\.\d+)? Safari\//.test(o),"String",{padEnd:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0,!1)}})},function(t,n,r){"use strict";r(43)("trimLeft",function(t){return function(){return t(this,1)}},"trimStart")},function(t,n,r){"use strict";r(43)("trimRight",function(t){return function(){return t(this,2)}},"trimEnd")},function(t,n,r){"use strict";var e=r(0),i=r(23),o=r(8),u=r(53),c=r(55),a=RegExp.prototype,f=function(t,n){this._r=t,this._s=n};r(78)(f,"RegExp String",function(){var t=this._r.exec(this._s);return{value:t,done:null===t}}),e(e.P,"String",{matchAll:function(t){if(i(this),!u(t))throw TypeError(t+" is not a regexp!");var n=String(this),r="flags"in a?String(t.flags):c.call(t),e=new RegExp(t.source,~r.indexOf("g")?r:"g"+r);return e.lastIndex=o(t.lastIndex),new f(e,n)}})},function(t,n,r){r(66)("asyncIterator")},function(t,n,r){r(66)("observable")},function(t,n,r){var e=r(0),i=r(118),o=r(15),u=r(16),c=r(82);e(e.S,"Object",{getOwnPropertyDescriptors:function(t){for(var n,r,e=o(t),a=u.f,f=i(e),s={},l=0;f.length>l;)void 0!==(r=a(e,n=f[l++]))&&c(s,n,r);return s}})},function(t,n,r){var e=r(0),i=r(121)(!1);e(e.S,"Object",{values:function(t){return i(t)}})},function(t,n,r){var e=r(0),i=r(121)(!0);e(e.S,"Object",{entries:function(t){return i(t)}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(10),u=r(7);r(6)&&e(e.P+r(61),"Object",{__defineGetter__:function(t,n){u.f(i(this),t,{get:o(n),enumerable:!0,configurable:!0})}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(10),u=r(7);r(6)&&e(e.P+r(61),"Object",{__defineSetter__:function(t,n){u.f(i(this),t,{set:o(n),enumerable:!0,configurable:!0})}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22),u=r(17),c=r(16).f;r(6)&&e(e.P+r(61),"Object",{__lookupGetter__:function(t){var n,r=i(this),e=o(t,!0);do{if(n=c(r,e))return n.get}while(r=u(r))}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22),u=r(17),c=r(16).f;r(6)&&e(e.P+r(61),"Object",{__lookupSetter__:function(t){var n,r=i(this),e=o(t,!0);do{if(n=c(r,e))return n.set}while(r=u(r))}})},function(t,n,r){var e=r(0);e(e.P+e.R,"Map",{toJSON:r(122)("Map")})},function(t,n,r){var e=r(0);e(e.P+e.R,"Set",{toJSON:r(122)("Set")})},function(t,n,r){r(62)("Map")},function(t,n,r){r(62)("Set")},function(t,n,r){r(62)("WeakMap")},function(t,n,r){r(62)("WeakSet")},function(t,n,r){r(63)("Map")},function(t,n,r){r(63)("Set")},function(t,n,r){r(63)("WeakMap")},function(t,n,r){r(63)("WeakSet")},function(t,n,r){var e=r(0);e(e.G,{global:r(2)})},function(t,n,r){var e=r(0);e(e.S,"System",{global:r(2)})},function(t,n,r){var e=r(0),i=r(20);e(e.S,"Error",{isError:function(t){return"Error"===i(t)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{clamp:function(t,n,r){return Math.min(r,Math.max(n,t))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{DEG_PER_RAD:Math.PI/180})},function(t,n,r){var e=r(0),i=180/Math.PI;e(e.S,"Math",{degrees:function(t){return t*i}})},function(t,n,r){var e=r(0),i=r(124),o=r(104);e(e.S,"Math",{fscale:function(t,n,r,e,u){return o(i(t,n,r,e,u))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{iaddh:function(t,n,r,e){var i=t>>>0,o=r>>>0;return(n>>>0)+(e>>>0)+((i&o|(i|o)&~(i+o>>>0))>>>31)|0}})},function(t,n,r){var e=r(0);e(e.S,"Math",{isubh:function(t,n,r,e){var i=t>>>0,o=r>>>0;return(n>>>0)-(e>>>0)-((~i&o|~(i^o)&i-o>>>0)>>>31)|0}})},function(t,n,r){var e=r(0);e(e.S,"Math",{imulh:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e,u=r>>16,c=e>>16,a=(u*o>>>0)+(i*o>>>16);return u*c+(a>>16)+((i*c>>>0)+(65535&a)>>16)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{RAD_PER_DEG:180/Math.PI})},function(t,n,r){var e=r(0),i=Math.PI/180;e(e.S,"Math",{radians:function(t){return t*i}})},function(t,n,r){var e=r(0);e(e.S,"Math",{scale:r(124)})},function(t,n,r){var e=r(0);e(e.S,"Math",{umulh:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e,u=r>>>16,c=e>>>16,a=(u*o>>>0)+(i*o>>>16);return u*c+(a>>>16)+((i*c>>>0)+(65535&a)>>>16)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{signbit:function(t){return(t=+t)!=t?t:0==t?1/t==1/0:t>0}})},function(t,n,r){"use strict";var e=r(0),i=r(18),o=r(2),u=r(57),c=r(111);e(e.P+e.R,"Promise",{finally:function(t){var n=u(this,i.Promise||o.Promise),r="function"==typeof t;return this.then(r?function(r){return c(n,t()).then(function(){return r})}:t,r?function(r){return c(n,t()).then(function(){throw r})}:t)}})},function(t,n,r){"use strict";var e=r(0),i=r(89),o=r(110);e(e.S,"Promise",{try:function(t){var n=i.f(this),r=o(t);return(r.e?n.reject:n.resolve)(r.v),n.promise}})},function(t,n,r){var e=r(28),i=r(1),o=e.key,u=e.set;e.exp({defineMetadata:function(t,n,r,e){u(t,n,i(r),o(e))}})},function(t,n,r){var e=r(28),i=r(1),o=e.key,u=e.map,c=e.store;e.exp({deleteMetadata:function(t,n){var r=arguments.length<3?void 0:o(arguments[2]),e=u(i(n),r,!1);if(void 0===e||!e.delete(t))return!1;if(e.size)return!0;var a=c.get(n);return a.delete(r),!!a.size||c.delete(n)}})},function(t,n,r){var e=r(28),i=r(1),o=r(17),u=e.has,c=e.get,a=e.key,f=function(t,n,r){if(u(t,n,r))return c(t,n,r);var e=o(n);return null!==e?f(t,e,r):void 0};e.exp({getMetadata:function(t,n){return f(t,i(n),arguments.length<3?void 0:a(arguments[2]))}})},function(t,n,r){var e=r(114),i=r(123),o=r(28),u=r(1),c=r(17),a=o.keys,f=o.key,s=function(t,n){var r=a(t,n),o=c(t);if(null===o)return r;var u=s(o,n);return u.length?r.length?i(new e(r.concat(u))):u:r};o.exp({getMetadataKeys:function(t){return s(u(t),arguments.length<2?void 0:f(arguments[1]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.get,u=e.key;e.exp({getOwnMetadata:function(t,n){return o(t,i(n),arguments.length<3?void 0:u(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.keys,u=e.key;e.exp({getOwnMetadataKeys:function(t){return o(i(t),arguments.length<2?void 0:u(arguments[1]))}})},function(t,n,r){var e=r(28),i=r(1),o=r(17),u=e.has,c=e.key,a=function(t,n,r){if(u(t,n,r))return!0;var e=o(n);return null!==e&&a(t,e,r)};e.exp({hasMetadata:function(t,n){return a(t,i(n),arguments.length<3?void 0:c(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.has,u=e.key;e.exp({hasOwnMetadata:function(t,n){return o(t,i(n),arguments.length<3?void 0:u(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=r(10),u=e.key,c=e.set;e.exp({metadata:function(t,n){return function(r,e){c(t,n,(void 0!==e?i:o)(r),u(e))}}})},function(t,n,r){var e=r(0),i=r(88)(),o=r(2).process,u="process"==r(20)(o);e(e.G,{asap:function(t){var n=u&&o.domain;i(n?n.bind(t):t)}})},function(t,n,r){"use strict";var e=r(0),i=r(2),o=r(18),u=r(88)(),c=r(5)("observable"),a=r(10),f=r(1),s=r(39),l=r(41),h=r(11),v=r(40),p=v.RETURN,g=function(t){return null==t?void 0:a(t)},d=function(t){var n=t._c;n&&(t._c=void 0,n())},y=function(t){return void 0===t._o},m=function(t){y(t)||(t._o=void 0,d(t))},b=function(t,n){f(t),this._c=void 0,this._o=t,t=new S(this);try{var r=n(t),e=r;null!=r&&("function"==typeof r.unsubscribe?r=function(){e.unsubscribe()}:a(r),this._c=r)}catch(n){return void t.error(n)}y(this)&&d(this)};b.prototype=l({},{unsubscribe:function(){m(this)}});var S=function(t){this._s=t};S.prototype=l({},{next:function(t){var n=this._s;if(!y(n)){var r=n._o;try{var e=g(r.next);if(e)return e.call(r,t)}catch(t){try{m(n)}finally{throw t}}}},error:function(t){var n=this._s;if(y(n))throw t;var r=n._o;n._o=void 0;try{var e=g(r.error);if(!e)throw t;t=e.call(r,t)}catch(t){try{d(n)}finally{throw t}}return d(n),t},complete:function(t){var n=this._s;if(!y(n)){var r=n._o;n._o=void 0;try{var e=g(r.complete);t=e?e.call(r,t):void 0}catch(t){try{d(n)}finally{throw t}}return d(n),t}}});var w=function(t){s(this,w,"Observable","_f")._f=a(t)};l(w.prototype,{subscribe:function(t){return new b(t,this._f)},forEach:function(t){var n=this;return new(o.Promise||i.Promise)(function(r,e){a(t);var i=n.subscribe({next:function(n){try{return t(n)}catch(t){e(t),i.unsubscribe()}},error:e,complete:r})})}}),l(w,{from:function(t){var n="function"==typeof this?this:w,r=g(f(t)[c]);if(r){var e=f(r.call(t));return e.constructor===n?e:new n(function(t){return e.subscribe(t)})}return new n(function(n){var r=!1;return u(function(){if(!r){try{if(v(t,!1,function(t){if(n.next(t),r)return p})===p)return}catch(t){if(r)throw t;return void n.error(t)}n.complete()}}),function(){r=!0}})},of:function(){for(var t=0,n=arguments.length,r=new Array(n);t<n;)r[t]=arguments[t++];return new("function"==typeof this?this:w)(function(t){var n=!1;return u(function(){if(!n){for(var e=0;e<r.length;++e)if(t.next(r[e]),n)return;t.complete()}}),function(){n=!0}})}}),h(w.prototype,c,function(){return this}),e(e.G,{Observable:w}),r(38)("Observable")},function(t,n,r){var e=r(2),i=r(0),o=r(58),u=[].slice,c=/MSIE .\./.test(o),a=function(t){return function(n,r){var e=arguments.length>2,i=!!e&&u.call(arguments,2);return t(e?function(){("function"==typeof n?n:Function(n)).apply(this,i)}:n,r)}};i(i.G+i.B+i.F*c,{setTimeout:a(e.setTimeout),setInterval:a(e.setInterval)})},function(t,n,r){var e=r(0),i=r(87);e(e.G+e.B,{setImmediate:i.set,clearImmediate:i.clear})},function(t,n,r){for(var e=r(86),i=r(34),o=r(12),u=r(2),c=r(11),a=r(44),f=r(5),s=f("iterator"),l=f("toStringTag"),h=a.Array,v={CSSRuleList:!0,CSSStyleDeclaration:!1,CSSValueList:!1,ClientRectList:!1,DOMRectList:!1,DOMStringList:!1,DOMTokenList:!0,DataTransferItemList:!1,FileList:!1,HTMLAllCollection:!1,HTMLCollection:!1,HTMLFormElement:!1,HTMLSelectElement:!1,MediaList:!0,MimeTypeArray:!1,NamedNodeMap:!1,NodeList:!0,PaintRequestList:!1,Plugin:!1,PluginArray:!1,SVGLengthList:!1,SVGNumberList:!1,SVGPathSegList:!1,SVGPointList:!1,SVGStringList:!1,SVGTransformList:!1,SourceBufferList:!1,StyleSheetList:!0,TextTrackCueList:!1,TextTrackList:!1,TouchList:!1},p=i(v),g=0;g<p.length;g++){var d,y=p[g],m=v[y],b=u[y],S=b&&b.prototype;if(S&&(S[s]||c(S,s,h),S[l]||c(S,l,y),a[y]=h,m))for(d in e)S[d]||o(S,d,e[d],!0)}},function(t,n,r){(function(n){!function(n){"use strict";var r,e=Object.prototype,i=e.hasOwnProperty,o="function"==typeof Symbol?Symbol:{},u=o.iterator||"@@iterator",c=o.asyncIterator||"@@asyncIterator",a=o.toStringTag||"@@toStringTag",f="object"==typeof t,s=n.regeneratorRuntime;if(s)f&&(t.exports=s);else{(s=n.regeneratorRuntime=f?t.exports:{}).wrap=S;var l="suspendedStart",h="suspendedYield",v="executing",p="completed",g={},d={};d[u]=function(){return this};var y=Object.getPrototypeOf,m=y&&y(y(j([])));m&&m!==e&&i.call(m,u)&&(d=m);var b=E.prototype=x.prototype=Object.create(d);_.prototype=b.constructor=E,E.constructor=_,E[a]=_.displayName="GeneratorFunction",s.isGeneratorFunction=function(t){var n="function"==typeof t&&t.constructor;return!!n&&(n===_||"GeneratorFunction"===(n.displayName||n.name))},s.mark=function(t){return Object.setPrototypeOf?Object.setPrototypeOf(t,E):(t.__proto__=E,a in t||(t[a]="GeneratorFunction")),t.prototype=Object.create(b),t},s.awrap=function(t){return{__await:t}},O(M.prototype),M.prototype[c]=function(){return this},s.AsyncIterator=M,s.async=function(t,n,r,e){var i=new M(S(t,n,r,e));return s.isGeneratorFunction(n)?i:i.next().then(function(t){return t.done?t.value:i.next()})},O(b),b[a]="Generator",b[u]=function(){return this},b.toString=function(){return"[object Generator]"},s.keys=function(t){var n=[];for(var r in t)n.push(r);return n.reverse(),function r(){for(;n.length;){var e=n.pop();if(e in t)return r.value=e,r.done=!1,r}return r.done=!0,r}},s.values=j,I.prototype={constructor:I,reset:function(t){if(this.prev=0,this.next=0,this.sent=this._sent=r,this.done=!1,this.delegate=null,this.method="next",this.arg=r,this.tryEntries.forEach(A),!t)for(var n in this)"t"===n.charAt(0)&&i.call(this,n)&&!isNaN(+n.slice(1))&&(this[n]=r)},stop:function(){this.done=!0;var t=this.tryEntries[0].completion;if("throw"===t.type)throw t.arg;return this.rval},dispatchException:function(t){if(this.done)throw t;var n=this;function e(e,i){return c.type="throw",c.arg=t,n.next=e,i&&(n.method="next",n.arg=r),!!i}for(var o=this.tryEntries.length-1;o>=0;--o){var u=this.tryEntries[o],c=u.completion;if("root"===u.tryLoc)return e("end");if(u.tryLoc<=this.prev){var a=i.call(u,"catchLoc"),f=i.call(u,"finallyLoc");if(a&&f){if(this.prev<u.catchLoc)return e(u.catchLoc,!0);if(this.prev<u.finallyLoc)return e(u.finallyLoc)}else if(a){if(this.prev<u.catchLoc)return e(u.catchLoc,!0)}else{if(!f)throw new Error("try statement without catch or finally");if(this.prev<u.finallyLoc)return e(u.finallyLoc)}}}},abrupt:function(t,n){for(var r=this.tryEntries.length-1;r>=0;--r){var e=this.tryEntries[r];if(e.tryLoc<=this.prev&&i.call(e,"finallyLoc")&&this.prev<e.finallyLoc){var o=e;break}}o&&("break"===t||"continue"===t)&&o.tryLoc<=n&&n<=o.finallyLoc&&(o=null);var u=o?o.completion:{};return u.type=t,u.arg=n,o?(this.method="next",this.next=o.finallyLoc,g):this.complete(u)},complete:function(t,n){if("throw"===t.type)throw t.arg;return"break"===t.type||"continue"===t.type?this.next=t.arg:"return"===t.type?(this.rval=this.arg=t.arg,this.method="return",this.next="end"):"normal"===t.type&&n&&(this.next=n),g},finish:function(t){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.finallyLoc===t)return this.complete(r.completion,r.afterLoc),A(r),g}},catch:function(t){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.tryLoc===t){var e=r.completion;if("throw"===e.type){var i=e.arg;A(r)}return i}}throw new Error("illegal catch attempt")},delegateYield:function(t,n,e){return this.delegate={iterator:j(t),resultName:n,nextLoc:e},"next"===this.method&&(this.arg=r),g}}}function S(t,n,r,e){var i=n&&n.prototype instanceof x?n:x,o=Object.create(i.prototype),u=new I(e||[]);return o._invoke=function(t,n,r){var e=l;return function(i,o){if(e===v)throw new Error("Generator is already running");if(e===p){if("throw"===i)throw o;return N()}for(r.method=i,r.arg=o;;){var u=r.delegate;if(u){var c=P(u,r);if(c){if(c===g)continue;return c}}if("next"===r.method)r.sent=r._sent=r.arg;else if("throw"===r.method){if(e===l)throw e=p,r.arg;r.dispatchException(r.arg)}else"return"===r.method&&r.abrupt("return",r.arg);e=v;var a=w(t,n,r);if("normal"===a.type){if(e=r.done?p:h,a.arg===g)continue;return{value:a.arg,done:r.done}}"throw"===a.type&&(e=p,r.method="throw",r.arg=a.arg)}}}(t,r,u),o}function w(t,n,r){try{return{type:"normal",arg:t.call(n,r)}}catch(t){return{type:"throw",arg:t}}}function x(){}function _(){}function E(){}function O(t){["next","throw","return"].forEach(function(n){t[n]=function(t){return this._invoke(n,t)}})}function M(t){function r(n,e,o,u){var c=w(t[n],t,e);if("throw"!==c.type){var a=c.arg,f=a.value;return f&&"object"==typeof f&&i.call(f,"__await")?Promise.resolve(f.__await).then(function(t){r("next",t,o,u)},function(t){r("throw",t,o,u)}):Promise.resolve(f).then(function(t){a.value=t,o(a)},u)}u(c.arg)}var e;"object"==typeof n.process&&n.process.domain&&(r=n.process.domain.bind(r)),this._invoke=function(t,n){function i(){return new Promise(function(e,i){r(t,n,e,i)})}return e=e?e.then(i,i):i()}}function P(t,n){var e=t.iterator[n.method];if(e===r){if(n.delegate=null,"throw"===n.method){if(t.iterator.return&&(n.method="return",n.arg=r,P(t,n),"throw"===n.method))return g;n.method="throw",n.arg=new TypeError("The iterator does not provide a 'throw' method")}return g}var i=w(e,t.iterator,n.arg);if("throw"===i.type)return n.method="throw",n.arg=i.arg,n.delegate=null,g;var o=i.arg;return o?o.done?(n[t.resultName]=o.value,n.next=t.nextLoc,"return"!==n.method&&(n.method="next",n.arg=r),n.delegate=null,g):o:(n.method="throw",n.arg=new TypeError("iterator result is not an object"),n.delegate=null,g)}function F(t){var n={tryLoc:t[0]};1 in t&&(n.catchLoc=t[1]),2 in t&&(n.finallyLoc=t[2],n.afterLoc=t[3]),this.tryEntries.push(n)}function A(t){var n=t.completion||{};n.type="normal",delete n.arg,t.completion=n}function I(t){this.tryEntries=[{tryLoc:"root"}],t.forEach(F,this),this.reset(!0)}function j(t){if(t){var n=t[u];if(n)return n.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var e=-1,o=function n(){for(;++e<t.length;)if(i.call(t,e))return n.value=t[e],n.done=!1,n;return n.value=r,n.done=!0,n};return o.next=o}}return{next:N}}function N(){return{value:r,done:!0}}}("object"==typeof n?n:"object"==typeof window?window:"object"==typeof self?self:this)}).call(this,r(64))},function(t,n,r){r(329),t.exports=r(18).RegExp.escape},function(t,n,r){var e=r(0),i=r(330)(/[\\^$*+?.()|[\]{}]/g,"\\$&");e(e.S,"RegExp",{escape:function(t){return i(t)}})},function(t,n){t.exports=function(t,n){var r=n===Object(n)?function(t){return n[t]}:n;return function(n){return String(n).replace(t,r)}}},function(t,n,r){"use strict";r.r(n);var e=r(125),i=r.n(e),o=r(126),u=r.n(o);const c=r(332);n.default=function(t,n,r){var e,o=null,a=t.properties,f=c,s="nostate.nosalt",l=d()+"/"+u.a,h=d()+"/"+i.a,v=function(t){try{return/^(\S+?:\/\/[a-z0-9-.]+(:[0-9]+)?)/gi.exec(t)[0]+"/"}catch(t){return null}}(a.uaaLocation),p={},g=0;function d(){return r.location.href.substring(0,r.location.href.lastIndexOf("/"))}function y(t){var n=t.substring(1).split("&"),r="";return n.forEach(function(t){var n=t.split("=");"session_state"!==n[0]||(r=n[1])}),r}function m(t,n){if(t)e=!0,function(t,n){localStorage&&(localStorage.setItem(_(),JSON.stringify(t)),localStorage.setItem(E(),n))}(t,n),a.onIdentityChange(t);else{if(!1===e)return void localStorage.setItem(E(),n);e=!1,O(),localStorage.setItem(E(),n),a.onLogout()}}function b(){if(!o){var n=t.opFrame.contentWindow,r=a.clientId+" "+x();n.postMessage(r,v)}}function S(t,n,r){return a.uaaLocation+"/oauth/authorize?response_type=token&scope="+encodeURIComponent(t)+"&client_id="+a.clientId+"&prompt=none&redirect_uri="+encodeURIComponent(n+"?"+r)}function w(t,n){var r=t.split("&");for(var e in r)if(r[e].startsWith(n+"="))return r[e].substring(n.length+1);return null}function x(){var t=localStorage.getItem(E());return t||s}function _(){return a.storageKey+"-claims"}function E(){return a.storageKey+"-session_state"}function O(){localStorage&&(localStorage.removeItem(_()),localStorage.removeItem(E()))}function M(){return f}return{fetchAccessToken:function(t,n){var r=this.opFrame=document.createElement("iframe");p[g]={callback:n,frame:r},r.setAttribute("src",S(t,h,g)),document.body.appendChild(r),g++},afterAccess:function(t){var n,r=p[t],e=w(r.frame.contentWindow.location.hash.substring(1),"error");null==e?n=w(r.frame.contentWindow.location.hash.substring(1),"access_token"):e=w(r.frame.contentWindow.location.hash.substring(1),"error"),r.callback(n,e),document.body.removeChild(r.frame),delete p[t]},afterAuthorize:function(){clearTimeout(o);var r,e,i=n.location.hash;i&&i.length>0&&(r=function(n,r){var e=w(n,r);return e?t.decodeJwt(e):null}(i.substring(1),"id_token"),e=y(i)),m(r,e),o=null},receiveMessage:function(t){var r=t.data;"changed"===r?(O(),o=setTimeout(function(){n.location="about:blank",m(null,s),o=null},a.authTimeout),n.location=a.uaaLocation+"/oauth/authorize?response_type=id_token&client_id="+a.clientId+"&prompt=none&redirect_uri="+encodeURIComponent(l)):"unchanged"===r?void 0===e&&m(function(){if(!localStorage)return null;try{return JSON.parse(localStorage.getItem(_()))}catch(t){return O(),null}}(),x()):m(null,s)},startCheckingSession:function(){b(),setInterval(b,a.checkInterval)},extractSessionState:y,announceIdentity:m,getAccessTokenCallbackCount:function(){return Object.keys(p).length},getUaaValidator:M,setUaaValidator:function(t){f=t},checkClientConfig:function(){var t=S("openid",h,g);M().checkClientConfiguration(t,a.clientId)}}}},function(t,n,r){var e=r(333);function i(t,n){var r=new XMLHttpRequest;r.open("GET",t),r.setRequestHeader("Accept","application/json"),r.send(),r.onreadystatechange=function(){r.readyState==XMLHttpRequest.DONE&&n(r)}}t.exports={isValidUAA:function(t){i(t+"/info",function(n){if(200!==n.status)throw t+" does not appear to be a running UAA instance or may not have a trusted SSL certificate";var r=JSON.parse(n.response).app.version;if(!e.isGreaterThanOrEqualTo(r,"4.10.0"))throw"The UAA instance running at "+t+" has version "+r+". This uaa-singular library requires UAA version 4.10.0 or higher."})},checkClientConfiguration:function(t,n){i(t,function(r){if(400===r.status){var e=window.location.href.substring(0,window.location.href.lastIndexOf("/"))+"/**";throw"Error while calling /oauth/authorize. Is the UAA client "+n+" configured to allow redirects to "+e+" ? Visit "+t+" in the browser to see the UAA's error messages."}})}}},function(t,n){t.exports={getMajor:function(t){return parseInt(t.split(".")[0])},getMinor:function(t){return parseInt(t.split(".")[1])},getPatch:function(t){return parseInt(t.split("-")[0].split(".")[2])},isGreaterThanOrEqualTo:function(t,n){var r=this.getMajor(t),e=this.getMinor(t),i=this.getPatch(t),o=this.getMajor(n),u=this.getMinor(n),c=this.getPatch(n);return r>o||(r===o&&e>u||(r===o&&e===u&&i>c||r===o&&e===u&&i===c))}}}]).default; \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/singular/singular.js b/deps/rabbitmq_management/priv/www/js/singular/singular.js
new file mode 100644
index 0000000000..0f9ef22059
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/singular/singular.js
@@ -0,0 +1 @@
+var Singular=function(t){var n={};function r(e){if(n[e])return n[e].exports;var i=n[e]={i:e,l:!1,exports:{}};return t[e].call(i.exports,i,i.exports,r),i.l=!0,i.exports}return r.m=t,r.c=n,r.d=function(t,n,e){r.o(t,n)||Object.defineProperty(t,n,{enumerable:!0,get:e})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(t,n){if(1&n&&(t=r(t)),8&n)return t;if(4&n&&"object"==typeof t&&t&&t.__esModule)return t;var e=Object.create(null);if(r.r(e),Object.defineProperty(e,"default",{enumerable:!0,value:t}),2&n&&"string"!=typeof t)for(var i in t)r.d(e,i,function(n){return t[n]}.bind(null,i));return e},r.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(n,"a",n),n},r.o=function(t,n){return Object.prototype.hasOwnProperty.call(t,n)},r.p="",r(r.s=127)}([function(t,n,r){var e=r(2),i=r(18),o=r(11),u=r(12),c=r(19),a=function(t,n,r){var f,s,l,h,v=t&a.F,p=t&a.G,d=t&a.S,g=t&a.P,y=t&a.B,m=p?e:d?e[n]||(e[n]={}):(e[n]||{}).prototype,b=p?i:i[n]||(i[n]={}),w=b.prototype||(b.prototype={});for(f in p&&(r=n),r)l=((s=!v&&m&&void 0!==m[f])?m:r)[f],h=y&&s?c(l,e):g&&"function"==typeof l?c(Function.call,l):l,m&&u(m,f,l,t&a.U),b[f]!=l&&o(b,f,h),g&&w[f]!=l&&(w[f]=l)};e.core=i,a.F=1,a.G=2,a.S=4,a.P=8,a.B=16,a.W=32,a.U=64,a.R=128,t.exports=a},function(t,n,r){var e=r(4);t.exports=function(t){if(!e(t))throw TypeError(t+" is not an object!");return t}},function(t,n){var r=t.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=r)},function(t,n){t.exports=function(t){try{return!!t()}catch(t){return!0}}},function(t,n){t.exports=function(t){return"object"==typeof t?null!==t:"function"==typeof t}},function(t,n,r){var e=r(49)("wks"),i=r(33),o=r(2).Symbol,u="function"==typeof o;(t.exports=function(t){return e[t]||(e[t]=u&&o[t]||(u?o:i)("Symbol."+t))}).store=e},function(t,n,r){t.exports=!r(3)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(t,n,r){var e=r(1),i=r(91),o=r(22),u=Object.defineProperty;n.f=r(6)?Object.defineProperty:function(t,n,r){if(e(t),n=o(n,!0),e(r),i)try{return u(t,n,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported!");return"value"in r&&(t[n]=r.value),t}},function(t,n,r){var e=r(24),i=Math.min;t.exports=function(t){return t>0?i(e(t),9007199254740991):0}},function(t,n,r){var e=r(23);t.exports=function(t){return Object(e(t))}},function(t,n){t.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},function(t,n,r){var e=r(7),i=r(32);t.exports=r(6)?function(t,n,r){return e.f(t,n,i(1,r))}:function(t,n,r){return t[n]=r,t}},function(t,n,r){var e=r(2),i=r(11),o=r(14),u=r(33)("src"),c=Function.toString,a=(""+c).split("toString");r(18).inspectSource=function(t){return c.call(t)},(t.exports=function(t,n,r,c){var f="function"==typeof r;f&&(o(r,"name")||i(r,"name",n)),t[n]!==r&&(f&&(o(r,u)||i(r,u,t[n]?""+t[n]:a.join(String(n)))),t===e?t[n]=r:c?t[n]?t[n]=r:i(t,n,r):(delete t[n],i(t,n,r)))})(Function.prototype,"toString",function(){return"function"==typeof this&&this[u]||c.call(this)})},function(t,n,r){var e=r(0),i=r(3),o=r(23),u=/"/g,c=function(t,n,r,e){var i=String(o(t)),c="<"+n;return""!==r&&(c+=" "+r+'="'+String(e).replace(u,"&quot;")+'"'),c+">"+i+"</"+n+">"};t.exports=function(t,n){var r={};r[t]=n(c),e(e.P+e.F*i(function(){var n=""[t]('"');return n!==n.toLowerCase()||n.split('"').length>3}),"String",r)}},function(t,n){var r={}.hasOwnProperty;t.exports=function(t,n){return r.call(t,n)}},function(t,n,r){var e=r(46),i=r(23);t.exports=function(t){return e(i(t))}},function(t,n,r){var e=r(47),i=r(32),o=r(15),u=r(22),c=r(14),a=r(91),f=Object.getOwnPropertyDescriptor;n.f=r(6)?f:function(t,n){if(t=o(t),n=u(n,!0),a)try{return f(t,n)}catch(t){}if(c(t,n))return i(!e.f.call(t,n),t[n])}},function(t,n,r){var e=r(14),i=r(9),o=r(67)("IE_PROTO"),u=Object.prototype;t.exports=Object.getPrototypeOf||function(t){return t=i(t),e(t,o)?t[o]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?u:null}},function(t,n){var r=t.exports={version:"2.5.6"};"number"==typeof __e&&(__e=r)},function(t,n,r){var e=r(10);t.exports=function(t,n,r){if(e(t),void 0===n)return t;switch(r){case 1:return function(r){return t.call(n,r)};case 2:return function(r,e){return t.call(n,r,e)};case 3:return function(r,e,i){return t.call(n,r,e,i)}}return function(){return t.apply(n,arguments)}}},function(t,n){var r={}.toString;t.exports=function(t){return r.call(t).slice(8,-1)}},function(t,n,r){"use strict";var e=r(3);t.exports=function(t,n){return!!t&&e(function(){n?t.call(null,function(){},1):t.call(null)})}},function(t,n,r){var e=r(4);t.exports=function(t,n){if(!e(t))return t;var r,i;if(n&&"function"==typeof(r=t.toString)&&!e(i=r.call(t)))return i;if("function"==typeof(r=t.valueOf)&&!e(i=r.call(t)))return i;if(!n&&"function"==typeof(r=t.toString)&&!e(i=r.call(t)))return i;throw TypeError("Can't convert object to primitive value")}},function(t,n){t.exports=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t}},function(t,n){var r=Math.ceil,e=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?e:r)(t)}},function(t,n,r){var e=r(0),i=r(18),o=r(3);t.exports=function(t,n){var r=(i.Object||{})[t]||Object[t],u={};u[t]=n(r),e(e.S+e.F*o(function(){r(1)}),"Object",u)}},function(t,n,r){var e=r(19),i=r(46),o=r(9),u=r(8),c=r(84);t.exports=function(t,n){var r=1==t,a=2==t,f=3==t,s=4==t,l=6==t,h=5==t||l,v=n||c;return function(n,c,p){for(var d,g,y=o(n),m=i(y),b=e(c,p,3),w=u(m.length),S=0,x=r?v(n,w):a?v(n,0):void 0;w>S;S++)if((h||S in m)&&(g=b(d=m[S],S,y),t))if(r)x[S]=g;else if(g)switch(t){case 3:return!0;case 5:return d;case 6:return S;case 2:x.push(d)}else if(s)return!1;return l?-1:f||s?s:x}}},function(t,n,r){"use strict";if(r(6)){var e=r(30),i=r(2),o=r(3),u=r(0),c=r(60),a=r(90),f=r(19),s=r(39),l=r(32),h=r(11),v=r(41),p=r(24),d=r(8),g=r(117),y=r(35),m=r(22),b=r(14),w=r(48),S=r(4),x=r(9),_=r(81),E=r(36),O=r(17),M=r(37).f,P=r(83),F=r(33),A=r(5),j=r(26),I=r(50),N=r(57),L=r(86),T=r(44),k=r(54),R=r(38),C=r(85),D=r(107),W=r(7),U=r(16),G=W.f,V=U.f,B=i.RangeError,z=i.TypeError,q=i.Uint8Array,J=Array.prototype,Y=a.ArrayBuffer,K=a.DataView,H=j(0),X=j(2),$=j(3),Z=j(4),Q=j(5),tt=j(6),nt=I(!0),rt=I(!1),et=L.values,it=L.keys,ot=L.entries,ut=J.lastIndexOf,ct=J.reduce,at=J.reduceRight,ft=J.join,st=J.sort,lt=J.slice,ht=J.toString,vt=J.toLocaleString,pt=A("iterator"),dt=A("toStringTag"),gt=F("typed_constructor"),yt=F("def_constructor"),mt=c.CONSTR,bt=c.TYPED,wt=c.VIEW,St=j(1,function(t,n){return Mt(N(t,t[yt]),n)}),xt=o(function(){return 1===new q(new Uint16Array([1]).buffer)[0]}),_t=!!q&&!!q.prototype.set&&o(function(){new q(1).set({})}),Et=function(t,n){var r=p(t);if(r<0||r%n)throw B("Wrong offset!");return r},Ot=function(t){if(S(t)&&bt in t)return t;throw z(t+" is not a typed array!")},Mt=function(t,n){if(!(S(t)&&gt in t))throw z("It is not a typed array constructor!");return new t(n)},Pt=function(t,n){return Ft(N(t,t[yt]),n)},Ft=function(t,n){for(var r=0,e=n.length,i=Mt(t,e);e>r;)i[r]=n[r++];return i},At=function(t,n,r){G(t,n,{get:function(){return this._d[r]}})},jt=function(t){var n,r,e,i,o,u,c=x(t),a=arguments.length,s=a>1?arguments[1]:void 0,l=void 0!==s,h=P(c);if(null!=h&&!_(h)){for(u=h.call(c),e=[],n=0;!(o=u.next()).done;n++)e.push(o.value);c=e}for(l&&a>2&&(s=f(s,arguments[2],2)),n=0,r=d(c.length),i=Mt(this,r);r>n;n++)i[n]=l?s(c[n],n):c[n];return i},It=function(){for(var t=0,n=arguments.length,r=Mt(this,n);n>t;)r[t]=arguments[t++];return r},Nt=!!q&&o(function(){vt.call(new q(1))}),Lt=function(){return vt.apply(Nt?lt.call(Ot(this)):Ot(this),arguments)},Tt={copyWithin:function(t,n){return D.call(Ot(this),t,n,arguments.length>2?arguments[2]:void 0)},every:function(t){return Z(Ot(this),t,arguments.length>1?arguments[1]:void 0)},fill:function(t){return C.apply(Ot(this),arguments)},filter:function(t){return Pt(this,X(Ot(this),t,arguments.length>1?arguments[1]:void 0))},find:function(t){return Q(Ot(this),t,arguments.length>1?arguments[1]:void 0)},findIndex:function(t){return tt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},forEach:function(t){H(Ot(this),t,arguments.length>1?arguments[1]:void 0)},indexOf:function(t){return rt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},includes:function(t){return nt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},join:function(t){return ft.apply(Ot(this),arguments)},lastIndexOf:function(t){return ut.apply(Ot(this),arguments)},map:function(t){return St(Ot(this),t,arguments.length>1?arguments[1]:void 0)},reduce:function(t){return ct.apply(Ot(this),arguments)},reduceRight:function(t){return at.apply(Ot(this),arguments)},reverse:function(){for(var t,n=Ot(this).length,r=Math.floor(n/2),e=0;e<r;)t=this[e],this[e++]=this[--n],this[n]=t;return this},some:function(t){return $(Ot(this),t,arguments.length>1?arguments[1]:void 0)},sort:function(t){return st.call(Ot(this),t)},subarray:function(t,n){var r=Ot(this),e=r.length,i=y(t,e);return new(N(r,r[yt]))(r.buffer,r.byteOffset+i*r.BYTES_PER_ELEMENT,d((void 0===n?e:y(n,e))-i))}},kt=function(t,n){return Pt(this,lt.call(Ot(this),t,n))},Rt=function(t){Ot(this);var n=Et(arguments[1],1),r=this.length,e=x(t),i=d(e.length),o=0;if(i+n>r)throw B("Wrong length!");for(;o<i;)this[n+o]=e[o++]},Ct={entries:function(){return ot.call(Ot(this))},keys:function(){return it.call(Ot(this))},values:function(){return et.call(Ot(this))}},Dt=function(t,n){return S(t)&&t[bt]&&"symbol"!=typeof n&&n in t&&String(+n)==String(n)},Wt=function(t,n){return Dt(t,n=m(n,!0))?l(2,t[n]):V(t,n)},Ut=function(t,n,r){return!(Dt(t,n=m(n,!0))&&S(r)&&b(r,"value"))||b(r,"get")||b(r,"set")||r.configurable||b(r,"writable")&&!r.writable||b(r,"enumerable")&&!r.enumerable?G(t,n,r):(t[n]=r.value,t)};mt||(U.f=Wt,W.f=Ut),u(u.S+u.F*!mt,"Object",{getOwnPropertyDescriptor:Wt,defineProperty:Ut}),o(function(){ht.call({})})&&(ht=vt=function(){return ft.call(this)});var Gt=v({},Tt);v(Gt,Ct),h(Gt,pt,Ct.values),v(Gt,{slice:kt,set:Rt,constructor:function(){},toString:ht,toLocaleString:Lt}),At(Gt,"buffer","b"),At(Gt,"byteOffset","o"),At(Gt,"byteLength","l"),At(Gt,"length","e"),G(Gt,dt,{get:function(){return this[bt]}}),t.exports=function(t,n,r,a){var f=t+((a=!!a)?"Clamped":"")+"Array",l="get"+t,v="set"+t,p=i[f],y=p||{},m=p&&O(p),b=!p||!c.ABV,x={},_=p&&p.prototype,P=function(t,r){G(t,r,{get:function(){return function(t,r){var e=t._d;return e.v[l](r*n+e.o,xt)}(this,r)},set:function(t){return function(t,r,e){var i=t._d;a&&(e=(e=Math.round(e))<0?0:e>255?255:255&e),i.v[v](r*n+i.o,e,xt)}(this,r,t)},enumerable:!0})};b?(p=r(function(t,r,e,i){s(t,p,f,"_d");var o,u,c,a,l=0,v=0;if(S(r)){if(!(r instanceof Y||"ArrayBuffer"==(a=w(r))||"SharedArrayBuffer"==a))return bt in r?Ft(p,r):jt.call(p,r);o=r,v=Et(e,n);var y=r.byteLength;if(void 0===i){if(y%n)throw B("Wrong length!");if((u=y-v)<0)throw B("Wrong length!")}else if((u=d(i)*n)+v>y)throw B("Wrong length!");c=u/n}else c=g(r),o=new Y(u=c*n);for(h(t,"_d",{b:o,o:v,l:u,e:c,v:new K(o)});l<c;)P(t,l++)}),_=p.prototype=E(Gt),h(_,"constructor",p)):o(function(){p(1)})&&o(function(){new p(-1)})&&k(function(t){new p,new p(null),new p(1.5),new p(t)},!0)||(p=r(function(t,r,e,i){var o;return s(t,p,f),S(r)?r instanceof Y||"ArrayBuffer"==(o=w(r))||"SharedArrayBuffer"==o?void 0!==i?new y(r,Et(e,n),i):void 0!==e?new y(r,Et(e,n)):new y(r):bt in r?Ft(p,r):jt.call(p,r):new y(g(r))}),H(m!==Function.prototype?M(y).concat(M(m)):M(y),function(t){t in p||h(p,t,y[t])}),p.prototype=_,e||(_.constructor=p));var F=_[pt],A=!!F&&("values"==F.name||null==F.name),j=Ct.values;h(p,gt,!0),h(_,bt,f),h(_,wt,!0),h(_,yt,p),(a?new p(1)[dt]==f:dt in _)||G(_,dt,{get:function(){return f}}),x[f]=p,u(u.G+u.W+u.F*(p!=y),x),u(u.S,f,{BYTES_PER_ELEMENT:n}),u(u.S+u.F*o(function(){y.of.call(p,1)}),f,{from:jt,of:It}),"BYTES_PER_ELEMENT"in _||h(_,"BYTES_PER_ELEMENT",n),u(u.P,f,Tt),R(f),u(u.P+u.F*_t,f,{set:Rt}),u(u.P+u.F*!A,f,Ct),e||_.toString==ht||(_.toString=ht),u(u.P+u.F*o(function(){new p(1).slice()}),f,{slice:kt}),u(u.P+u.F*(o(function(){return[1,2].toLocaleString()!=new p([1,2]).toLocaleString()})||!o(function(){_.toLocaleString.call([1,2])})),f,{toLocaleString:Lt}),T[f]=A?F:j,e||A||h(_,pt,j)}}else t.exports=function(){}},function(t,n,r){var e=r(112),i=r(0),o=r(49)("metadata"),u=o.store||(o.store=new(r(115))),c=function(t,n,r){var i=u.get(t);if(!i){if(!r)return;u.set(t,i=new e)}var o=i.get(n);if(!o){if(!r)return;i.set(n,o=new e)}return o};t.exports={store:u,map:c,has:function(t,n,r){var e=c(n,r,!1);return void 0!==e&&e.has(t)},get:function(t,n,r){var e=c(n,r,!1);return void 0===e?void 0:e.get(t)},set:function(t,n,r,e){c(r,e,!0).set(t,n)},keys:function(t,n){var r=c(t,n,!1),e=[];return r&&r.forEach(function(t,n){e.push(n)}),e},key:function(t){return void 0===t||"symbol"==typeof t?t:String(t)},exp:function(t){i(i.S,"Reflect",t)}}},function(t,n,r){var e=r(33)("meta"),i=r(4),o=r(14),u=r(7).f,c=0,a=Object.isExtensible||function(){return!0},f=!r(3)(function(){return a(Object.preventExtensions({}))}),s=function(t){u(t,e,{value:{i:"O"+ ++c,w:{}}})},l=t.exports={KEY:e,NEED:!1,fastKey:function(t,n){if(!i(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!o(t,e)){if(!a(t))return"F";if(!n)return"E";s(t)}return t[e].i},getWeak:function(t,n){if(!o(t,e)){if(!a(t))return!0;if(!n)return!1;s(t)}return t[e].w},onFreeze:function(t){return f&&l.NEED&&a(t)&&!o(t,e)&&s(t),t}}},function(t,n){t.exports=!1},function(t,n,r){var e=r(5)("unscopables"),i=Array.prototype;null==i[e]&&r(11)(i,e,{}),t.exports=function(t){i[e][t]=!0}},function(t,n){t.exports=function(t,n){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:n}}},function(t,n){var r=0,e=Math.random();t.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++r+e).toString(36))}},function(t,n,r){var e=r(93),i=r(68);t.exports=Object.keys||function(t){return e(t,i)}},function(t,n,r){var e=r(24),i=Math.max,o=Math.min;t.exports=function(t,n){return(t=e(t))<0?i(t+n,0):o(t,n)}},function(t,n,r){var e=r(1),i=r(94),o=r(68),u=r(67)("IE_PROTO"),c=function(){},a=function(){var t,n=r(65)("iframe"),e=o.length;for(n.style.display="none",r(69).appendChild(n),n.src="javascript:",(t=n.contentWindow.document).open(),t.write("<script>document.F=Object<\/script>"),t.close(),a=t.F;e--;)delete a.prototype[o[e]];return a()};t.exports=Object.create||function(t,n){var r;return null!==t?(c.prototype=e(t),r=new c,c.prototype=null,r[u]=t):r=a(),void 0===n?r:i(r,n)}},function(t,n,r){var e=r(93),i=r(68).concat("length","prototype");n.f=Object.getOwnPropertyNames||function(t){return e(t,i)}},function(t,n,r){"use strict";var e=r(2),i=r(7),o=r(6),u=r(5)("species");t.exports=function(t){var n=e[t];o&&n&&!n[u]&&i.f(n,u,{configurable:!0,get:function(){return this}})}},function(t,n){t.exports=function(t,n,r,e){if(!(t instanceof n)||void 0!==e&&e in t)throw TypeError(r+": incorrect invocation!");return t}},function(t,n,r){var e=r(19),i=r(105),o=r(81),u=r(1),c=r(8),a=r(83),f={},s={};(n=t.exports=function(t,n,r,l,h){var v,p,d,g,y=h?function(){return t}:a(t),m=e(r,l,n?2:1),b=0;if("function"!=typeof y)throw TypeError(t+" is not iterable!");if(o(y)){for(v=c(t.length);v>b;b++)if((g=n?m(u(p=t[b])[0],p[1]):m(t[b]))===f||g===s)return g}else for(d=y.call(t);!(p=d.next()).done;)if((g=i(d,m,p.value,n))===f||g===s)return g}).BREAK=f,n.RETURN=s},function(t,n,r){var e=r(12);t.exports=function(t,n,r){for(var i in n)e(t,i,n[i],r);return t}},function(t,n,r){var e=r(7).f,i=r(14),o=r(5)("toStringTag");t.exports=function(t,n,r){t&&!i(t=r?t:t.prototype,o)&&e(t,o,{configurable:!0,value:n})}},function(t,n,r){var e=r(0),i=r(23),o=r(3),u=r(71),c="["+u+"]",a=RegExp("^"+c+c+"*"),f=RegExp(c+c+"*$"),s=function(t,n,r){var i={},c=o(function(){return!!u[t]()||"​…"!="​…"[t]()}),a=i[t]=c?n(l):u[t];r&&(i[r]=a),e(e.P+e.F*c,"String",i)},l=s.trim=function(t,n){return t=String(i(t)),1&n&&(t=t.replace(a,"")),2&n&&(t=t.replace(f,"")),t};t.exports=s},function(t,n){t.exports={}},function(t,n,r){var e=r(4);t.exports=function(t,n){if(!e(t)||t._t!==n)throw TypeError("Incompatible receiver, "+n+" required!");return t}},function(t,n,r){var e=r(20);t.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==e(t)?t.split(""):Object(t)}},function(t,n){n.f={}.propertyIsEnumerable},function(t,n,r){var e=r(20),i=r(5)("toStringTag"),o="Arguments"==e(function(){return arguments}());t.exports=function(t){var n,r,u;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(r=function(t,n){try{return t[n]}catch(t){}}(n=Object(t),i))?r:o?e(n):"Object"==(u=e(n))&&"function"==typeof n.callee?"Arguments":u}},function(t,n,r){var e=r(18),i=r(2),o=i["__core-js_shared__"]||(i["__core-js_shared__"]={});(t.exports=function(t,n){return o[t]||(o[t]=void 0!==n?n:{})})("versions",[]).push({version:e.version,mode:r(30)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(t,n,r){var e=r(15),i=r(8),o=r(35);t.exports=function(t){return function(n,r,u){var c,a=e(n),f=i(a.length),s=o(u,f);if(t&&r!=r){for(;f>s;)if((c=a[s++])!=c)return!0}else for(;f>s;s++)if((t||s in a)&&a[s]===r)return t||s||0;return!t&&-1}}},function(t,n){n.f=Object.getOwnPropertySymbols},function(t,n,r){var e=r(20);t.exports=Array.isArray||function(t){return"Array"==e(t)}},function(t,n,r){var e=r(4),i=r(20),o=r(5)("match");t.exports=function(t){var n;return e(t)&&(void 0!==(n=t[o])?!!n:"RegExp"==i(t))}},function(t,n,r){var e=r(5)("iterator"),i=!1;try{var o=[7][e]();o.return=function(){i=!0},Array.from(o,function(){throw 2})}catch(t){}t.exports=function(t,n){if(!n&&!i)return!1;var r=!1;try{var o=[7],u=o[e]();u.next=function(){return{done:r=!0}},o[e]=function(){return u},t(o)}catch(t){}return r}},function(t,n,r){"use strict";var e=r(1);t.exports=function(){var t=e(this),n="";return t.global&&(n+="g"),t.ignoreCase&&(n+="i"),t.multiline&&(n+="m"),t.unicode&&(n+="u"),t.sticky&&(n+="y"),n}},function(t,n,r){"use strict";var e=r(11),i=r(12),o=r(3),u=r(23),c=r(5);t.exports=function(t,n,r){var a=c(t),f=r(u,a,""[t]),s=f[0],l=f[1];o(function(){var n={};return n[a]=function(){return 7},7!=""[t](n)})&&(i(String.prototype,t,s),e(RegExp.prototype,a,2==n?function(t,n){return l.call(t,this,n)}:function(t){return l.call(t,this)}))}},function(t,n,r){var e=r(1),i=r(10),o=r(5)("species");t.exports=function(t,n){var r,u=e(t).constructor;return void 0===u||null==(r=e(u)[o])?n:i(r)}},function(t,n,r){var e=r(2).navigator;t.exports=e&&e.userAgent||""},function(t,n,r){"use strict";var e=r(2),i=r(0),o=r(12),u=r(41),c=r(29),a=r(40),f=r(39),s=r(4),l=r(3),h=r(54),v=r(42),p=r(72);t.exports=function(t,n,r,d,g,y){var m=e[t],b=m,w=g?"set":"add",S=b&&b.prototype,x={},_=function(t){var n=S[t];o(S,t,"delete"==t?function(t){return!(y&&!s(t))&&n.call(this,0===t?0:t)}:"has"==t?function(t){return!(y&&!s(t))&&n.call(this,0===t?0:t)}:"get"==t?function(t){return y&&!s(t)?void 0:n.call(this,0===t?0:t)}:"add"==t?function(t){return n.call(this,0===t?0:t),this}:function(t,r){return n.call(this,0===t?0:t,r),this})};if("function"==typeof b&&(y||S.forEach&&!l(function(){(new b).entries().next()}))){var E=new b,O=E[w](y?{}:-0,1)!=E,M=l(function(){E.has(1)}),P=h(function(t){new b(t)}),F=!y&&l(function(){for(var t=new b,n=5;n--;)t[w](n,n);return!t.has(-0)});P||((b=n(function(n,r){f(n,b,t);var e=p(new m,n,b);return null!=r&&a(r,g,e[w],e),e})).prototype=S,S.constructor=b),(M||F)&&(_("delete"),_("has"),g&&_("get")),(F||O)&&_(w),y&&S.clear&&delete S.clear}else b=d.getConstructor(n,t,g,w),u(b.prototype,r),c.NEED=!0;return v(b,t),x[t]=b,i(i.G+i.W+i.F*(b!=m),x),y||d.setStrong(b,t,g),b}},function(t,n,r){for(var e,i=r(2),o=r(11),u=r(33),c=u("typed_array"),a=u("view"),f=!(!i.ArrayBuffer||!i.DataView),s=f,l=0,h="Int8Array,Uint8Array,Uint8ClampedArray,Int16Array,Uint16Array,Int32Array,Uint32Array,Float32Array,Float64Array".split(",");l<9;)(e=i[h[l++]])?(o(e.prototype,c,!0),o(e.prototype,a,!0)):s=!1;t.exports={ABV:f,CONSTR:s,TYPED:c,VIEW:a}},function(t,n,r){"use strict";t.exports=r(30)||!r(3)(function(){var t=Math.random();__defineSetter__.call(null,t,function(){}),delete r(2)[t]})},function(t,n,r){"use strict";var e=r(0);t.exports=function(t){e(e.S,t,{of:function(){for(var t=arguments.length,n=new Array(t);t--;)n[t]=arguments[t];return new this(n)}})}},function(t,n,r){"use strict";var e=r(0),i=r(10),o=r(19),u=r(40);t.exports=function(t){e(e.S,t,{from:function(t){var n,r,e,c,a=arguments[1];return i(this),(n=void 0!==a)&&i(a),null==t?new this:(r=[],n?(e=0,c=o(a,arguments[2],2),u(t,!1,function(t){r.push(c(t,e++))})):u(t,!1,r.push,r),new this(r))}})}},function(t,n){var r;r=function(){return this}();try{r=r||new Function("return this")()}catch(t){"object"==typeof window&&(r=window)}t.exports=r},function(t,n,r){var e=r(4),i=r(2).document,o=e(i)&&e(i.createElement);t.exports=function(t){return o?i.createElement(t):{}}},function(t,n,r){var e=r(2),i=r(18),o=r(30),u=r(92),c=r(7).f;t.exports=function(t){var n=i.Symbol||(i.Symbol=o?{}:e.Symbol||{});"_"==t.charAt(0)||t in n||c(n,t,{value:u.f(t)})}},function(t,n,r){var e=r(49)("keys"),i=r(33);t.exports=function(t){return e[t]||(e[t]=i(t))}},function(t,n){t.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(t,n,r){var e=r(2).document;t.exports=e&&e.documentElement},function(t,n,r){var e=r(4),i=r(1),o=function(t,n){if(i(t),!e(n)&&null!==n)throw TypeError(n+": can't set as prototype!")};t.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(t,n,e){try{(e=r(19)(Function.call,r(16).f(Object.prototype,"__proto__").set,2))(t,[]),n=!(t instanceof Array)}catch(t){n=!0}return function(t,r){return o(t,r),n?t.__proto__=r:e(t,r),t}}({},!1):void 0),check:o}},function(t,n){t.exports="\t\n\v\f\r   ᠎              \u2028\u2029\ufeff"},function(t,n,r){var e=r(4),i=r(70).set;t.exports=function(t,n,r){var o,u=n.constructor;return u!==r&&"function"==typeof u&&(o=u.prototype)!==r.prototype&&e(o)&&i&&i(t,o),t}},function(t,n,r){"use strict";var e=r(24),i=r(23);t.exports=function(t){var n=String(i(this)),r="",o=e(t);if(o<0||o==1/0)throw RangeError("Count can't be negative");for(;o>0;(o>>>=1)&&(n+=n))1&o&&(r+=n);return r}},function(t,n){t.exports=Math.sign||function(t){return 0==(t=+t)||t!=t?t:t<0?-1:1}},function(t,n){var r=Math.expm1;t.exports=!r||r(10)>22025.465794806718||r(10)<22025.465794806718||-2e-17!=r(-2e-17)?function(t){return 0==(t=+t)?t:t>-1e-6&&t<1e-6?t+t*t/2:Math.exp(t)-1}:r},function(t,n,r){var e=r(24),i=r(23);t.exports=function(t){return function(n,r){var o,u,c=String(i(n)),a=e(r),f=c.length;return a<0||a>=f?t?"":void 0:(o=c.charCodeAt(a))<55296||o>56319||a+1===f||(u=c.charCodeAt(a+1))<56320||u>57343?t?c.charAt(a):o:t?c.slice(a,a+2):u-56320+(o-55296<<10)+65536}}},function(t,n,r){"use strict";var e=r(30),i=r(0),o=r(12),u=r(11),c=r(44),a=r(78),f=r(42),s=r(17),l=r(5)("iterator"),h=!([].keys&&"next"in[].keys()),v=function(){return this};t.exports=function(t,n,r,p,d,g,y){a(r,n,p);var m,b,w,S=function(t){if(!h&&t in O)return O[t];switch(t){case"keys":case"values":return function(){return new r(this,t)}}return function(){return new r(this,t)}},x=n+" Iterator",_="values"==d,E=!1,O=t.prototype,M=O[l]||O["@@iterator"]||d&&O[d],P=M||S(d),F=d?_?S("entries"):P:void 0,A="Array"==n&&O.entries||M;if(A&&(w=s(A.call(new t)))!==Object.prototype&&w.next&&(f(w,x,!0),e||"function"==typeof w[l]||u(w,l,v)),_&&M&&"values"!==M.name&&(E=!0,P=function(){return M.call(this)}),e&&!y||!h&&!E&&O[l]||u(O,l,P),c[n]=P,c[x]=v,d)if(m={values:_?P:S("values"),keys:g?P:S("keys"),entries:F},y)for(b in m)b in O||o(O,b,m[b]);else i(i.P+i.F*(h||E),n,m);return m}},function(t,n,r){"use strict";var e=r(36),i=r(32),o=r(42),u={};r(11)(u,r(5)("iterator"),function(){return this}),t.exports=function(t,n,r){t.prototype=e(u,{next:i(1,r)}),o(t,n+" Iterator")}},function(t,n,r){var e=r(53),i=r(23);t.exports=function(t,n,r){if(e(n))throw TypeError("String#"+r+" doesn't accept regex!");return String(i(t))}},function(t,n,r){var e=r(5)("match");t.exports=function(t){var n=/./;try{"/./"[t](n)}catch(r){try{return n[e]=!1,!"/./"[t](n)}catch(t){}}return!0}},function(t,n,r){var e=r(44),i=r(5)("iterator"),o=Array.prototype;t.exports=function(t){return void 0!==t&&(e.Array===t||o[i]===t)}},function(t,n,r){"use strict";var e=r(7),i=r(32);t.exports=function(t,n,r){n in t?e.f(t,n,i(0,r)):t[n]=r}},function(t,n,r){var e=r(48),i=r(5)("iterator"),o=r(44);t.exports=r(18).getIteratorMethod=function(t){if(null!=t)return t[i]||t["@@iterator"]||o[e(t)]}},function(t,n,r){var e=r(221);t.exports=function(t,n){return new(e(t))(n)}},function(t,n,r){"use strict";var e=r(9),i=r(35),o=r(8);t.exports=function(t){for(var n=e(this),r=o(n.length),u=arguments.length,c=i(u>1?arguments[1]:void 0,r),a=u>2?arguments[2]:void 0,f=void 0===a?r:i(a,r);f>c;)n[c++]=t;return n}},function(t,n,r){"use strict";var e=r(31),i=r(108),o=r(44),u=r(15);t.exports=r(77)(Array,"Array",function(t,n){this._t=u(t),this._i=0,this._k=n},function(){var t=this._t,n=this._k,r=this._i++;return!t||r>=t.length?(this._t=void 0,i(1)):i(0,"keys"==n?r:"values"==n?t[r]:[r,t[r]])},"values"),o.Arguments=o.Array,e("keys"),e("values"),e("entries")},function(t,n,r){var e,i,o,u=r(19),c=r(98),a=r(69),f=r(65),s=r(2),l=s.process,h=s.setImmediate,v=s.clearImmediate,p=s.MessageChannel,d=s.Dispatch,g=0,y={},m=function(){var t=+this;if(y.hasOwnProperty(t)){var n=y[t];delete y[t],n()}},b=function(t){m.call(t.data)};h&&v||(h=function(t){for(var n=[],r=1;arguments.length>r;)n.push(arguments[r++]);return y[++g]=function(){c("function"==typeof t?t:Function(t),n)},e(g),g},v=function(t){delete y[t]},"process"==r(20)(l)?e=function(t){l.nextTick(u(m,t,1))}:d&&d.now?e=function(t){d.now(u(m,t,1))}:p?(o=(i=new p).port2,i.port1.onmessage=b,e=u(o.postMessage,o,1)):s.addEventListener&&"function"==typeof postMessage&&!s.importScripts?(e=function(t){s.postMessage(t+"","*")},s.addEventListener("message",b,!1)):e="onreadystatechange"in f("script")?function(t){a.appendChild(f("script")).onreadystatechange=function(){a.removeChild(this),m.call(t)}}:function(t){setTimeout(u(m,t,1),0)}),t.exports={set:h,clear:v}},function(t,n,r){var e=r(2),i=r(87).set,o=e.MutationObserver||e.WebKitMutationObserver,u=e.process,c=e.Promise,a="process"==r(20)(u);t.exports=function(){var t,n,r,f=function(){var e,i;for(a&&(e=u.domain)&&e.exit();t;){i=t.fn,t=t.next;try{i()}catch(e){throw t?r():n=void 0,e}}n=void 0,e&&e.enter()};if(a)r=function(){u.nextTick(f)};else if(!o||e.navigator&&e.navigator.standalone)if(c&&c.resolve){var s=c.resolve(void 0);r=function(){s.then(f)}}else r=function(){i.call(e,f)};else{var l=!0,h=document.createTextNode("");new o(f).observe(h,{characterData:!0}),r=function(){h.data=l=!l}}return function(e){var i={fn:e,next:void 0};n&&(n.next=i),t||(t=i,r()),n=i}}},function(t,n,r){"use strict";var e=r(10);function i(t){var n,r;this.promise=new t(function(t,e){if(void 0!==n||void 0!==r)throw TypeError("Bad Promise constructor");n=t,r=e}),this.resolve=e(n),this.reject=e(r)}t.exports.f=function(t){return new i(t)}},function(t,n,r){"use strict";var e=r(2),i=r(6),o=r(30),u=r(60),c=r(11),a=r(41),f=r(3),s=r(39),l=r(24),h=r(8),v=r(117),p=r(37).f,d=r(7).f,g=r(85),y=r(42),m="prototype",b="Wrong index!",w=e.ArrayBuffer,S=e.DataView,x=e.Math,_=e.RangeError,E=e.Infinity,O=w,M=x.abs,P=x.pow,F=x.floor,A=x.log,j=x.LN2,I=i?"_b":"buffer",N=i?"_l":"byteLength",L=i?"_o":"byteOffset";function T(t,n,r){var e,i,o,u=new Array(r),c=8*r-n-1,a=(1<<c)-1,f=a>>1,s=23===n?P(2,-24)-P(2,-77):0,l=0,h=t<0||0===t&&1/t<0?1:0;for((t=M(t))!=t||t===E?(i=t!=t?1:0,e=a):(e=F(A(t)/j),t*(o=P(2,-e))<1&&(e--,o*=2),(t+=e+f>=1?s/o:s*P(2,1-f))*o>=2&&(e++,o/=2),e+f>=a?(i=0,e=a):e+f>=1?(i=(t*o-1)*P(2,n),e+=f):(i=t*P(2,f-1)*P(2,n),e=0));n>=8;u[l++]=255&i,i/=256,n-=8);for(e=e<<n|i,c+=n;c>0;u[l++]=255&e,e/=256,c-=8);return u[--l]|=128*h,u}function k(t,n,r){var e,i=8*r-n-1,o=(1<<i)-1,u=o>>1,c=i-7,a=r-1,f=t[a--],s=127&f;for(f>>=7;c>0;s=256*s+t[a],a--,c-=8);for(e=s&(1<<-c)-1,s>>=-c,c+=n;c>0;e=256*e+t[a],a--,c-=8);if(0===s)s=1-u;else{if(s===o)return e?NaN:f?-E:E;e+=P(2,n),s-=u}return(f?-1:1)*e*P(2,s-n)}function R(t){return t[3]<<24|t[2]<<16|t[1]<<8|t[0]}function C(t){return[255&t]}function D(t){return[255&t,t>>8&255]}function W(t){return[255&t,t>>8&255,t>>16&255,t>>24&255]}function U(t){return T(t,52,8)}function G(t){return T(t,23,4)}function V(t,n,r){d(t[m],n,{get:function(){return this[r]}})}function B(t,n,r,e){var i=v(+r);if(i+n>t[N])throw _(b);var o=t[I]._b,u=i+t[L],c=o.slice(u,u+n);return e?c:c.reverse()}function z(t,n,r,e,i,o){var u=v(+r);if(u+n>t[N])throw _(b);for(var c=t[I]._b,a=u+t[L],f=e(+i),s=0;s<n;s++)c[a+s]=f[o?s:n-s-1]}if(u.ABV){if(!f(function(){w(1)})||!f(function(){new w(-1)})||f(function(){return new w,new w(1.5),new w(NaN),"ArrayBuffer"!=w.name})){for(var q,J=(w=function(t){return s(this,w),new O(v(t))})[m]=O[m],Y=p(O),K=0;Y.length>K;)(q=Y[K++])in w||c(w,q,O[q]);o||(J.constructor=w)}var H=new S(new w(2)),X=S[m].setInt8;H.setInt8(0,2147483648),H.setInt8(1,2147483649),!H.getInt8(0)&&H.getInt8(1)||a(S[m],{setInt8:function(t,n){X.call(this,t,n<<24>>24)},setUint8:function(t,n){X.call(this,t,n<<24>>24)}},!0)}else w=function(t){s(this,w,"ArrayBuffer");var n=v(t);this._b=g.call(new Array(n),0),this[N]=n},S=function(t,n,r){s(this,S,"DataView"),s(t,w,"DataView");var e=t[N],i=l(n);if(i<0||i>e)throw _("Wrong offset!");if(i+(r=void 0===r?e-i:h(r))>e)throw _("Wrong length!");this[I]=t,this[L]=i,this[N]=r},i&&(V(w,"byteLength","_l"),V(S,"buffer","_b"),V(S,"byteLength","_l"),V(S,"byteOffset","_o")),a(S[m],{getInt8:function(t){return B(this,1,t)[0]<<24>>24},getUint8:function(t){return B(this,1,t)[0]},getInt16:function(t){var n=B(this,2,t,arguments[1]);return(n[1]<<8|n[0])<<16>>16},getUint16:function(t){var n=B(this,2,t,arguments[1]);return n[1]<<8|n[0]},getInt32:function(t){return R(B(this,4,t,arguments[1]))},getUint32:function(t){return R(B(this,4,t,arguments[1]))>>>0},getFloat32:function(t){return k(B(this,4,t,arguments[1]),23,4)},getFloat64:function(t){return k(B(this,8,t,arguments[1]),52,8)},setInt8:function(t,n){z(this,1,t,C,n)},setUint8:function(t,n){z(this,1,t,C,n)},setInt16:function(t,n){z(this,2,t,D,n,arguments[2])},setUint16:function(t,n){z(this,2,t,D,n,arguments[2])},setInt32:function(t,n){z(this,4,t,W,n,arguments[2])},setUint32:function(t,n){z(this,4,t,W,n,arguments[2])},setFloat32:function(t,n){z(this,4,t,G,n,arguments[2])},setFloat64:function(t,n){z(this,8,t,U,n,arguments[2])}});y(w,"ArrayBuffer"),y(S,"DataView"),c(S[m],u.VIEW,!0),n.ArrayBuffer=w,n.DataView=S},function(t,n,r){t.exports=!r(6)&&!r(3)(function(){return 7!=Object.defineProperty(r(65)("div"),"a",{get:function(){return 7}}).a})},function(t,n,r){n.f=r(5)},function(t,n,r){var e=r(14),i=r(15),o=r(50)(!1),u=r(67)("IE_PROTO");t.exports=function(t,n){var r,c=i(t),a=0,f=[];for(r in c)r!=u&&e(c,r)&&f.push(r);for(;n.length>a;)e(c,r=n[a++])&&(~o(f,r)||f.push(r));return f}},function(t,n,r){var e=r(7),i=r(1),o=r(34);t.exports=r(6)?Object.defineProperties:function(t,n){i(t);for(var r,u=o(n),c=u.length,a=0;c>a;)e.f(t,r=u[a++],n[r]);return t}},function(t,n,r){var e=r(15),i=r(37).f,o={}.toString,u="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];t.exports.f=function(t){return u&&"[object Window]"==o.call(t)?function(t){try{return i(t)}catch(t){return u.slice()}}(t):i(e(t))}},function(t,n,r){"use strict";var e=r(34),i=r(51),o=r(47),u=r(9),c=r(46),a=Object.assign;t.exports=!a||r(3)(function(){var t={},n={},r=Symbol(),e="abcdefghijklmnopqrst";return t[r]=7,e.split("").forEach(function(t){n[t]=t}),7!=a({},t)[r]||Object.keys(a({},n)).join("")!=e})?function(t,n){for(var r=u(t),a=arguments.length,f=1,s=i.f,l=o.f;a>f;)for(var h,v=c(arguments[f++]),p=s?e(v).concat(s(v)):e(v),d=p.length,g=0;d>g;)l.call(v,h=p[g++])&&(r[h]=v[h]);return r}:a},function(t,n,r){"use strict";var e=r(10),i=r(4),o=r(98),u=[].slice,c={};t.exports=Function.bind||function(t){var n=e(this),r=u.call(arguments,1),a=function(){var e=r.concat(u.call(arguments));return this instanceof a?function(t,n,r){if(!(n in c)){for(var e=[],i=0;i<n;i++)e[i]="a["+i+"]";c[n]=Function("F,a","return new F("+e.join(",")+")")}return c[n](t,r)}(n,e.length,e):o(n,e,t)};return i(n.prototype)&&(a.prototype=n.prototype),a}},function(t,n){t.exports=function(t,n,r){var e=void 0===r;switch(n.length){case 0:return e?t():t.call(r);case 1:return e?t(n[0]):t.call(r,n[0]);case 2:return e?t(n[0],n[1]):t.call(r,n[0],n[1]);case 3:return e?t(n[0],n[1],n[2]):t.call(r,n[0],n[1],n[2]);case 4:return e?t(n[0],n[1],n[2],n[3]):t.call(r,n[0],n[1],n[2],n[3])}return t.apply(r,n)}},function(t,n,r){var e=r(2).parseInt,i=r(43).trim,o=r(71),u=/^[-+]?0[xX]/;t.exports=8!==e(o+"08")||22!==e(o+"0x16")?function(t,n){var r=i(String(t),3);return e(r,n>>>0||(u.test(r)?16:10))}:e},function(t,n,r){var e=r(2).parseFloat,i=r(43).trim;t.exports=1/e(r(71)+"-0")!=-1/0?function(t){var n=i(String(t),3),r=e(n);return 0===r&&"-"==n.charAt(0)?-0:r}:e},function(t,n,r){var e=r(20);t.exports=function(t,n){if("number"!=typeof t&&"Number"!=e(t))throw TypeError(n);return+t}},function(t,n,r){var e=r(4),i=Math.floor;t.exports=function(t){return!e(t)&&isFinite(t)&&i(t)===t}},function(t,n){t.exports=Math.log1p||function(t){return(t=+t)>-1e-8&&t<1e-8?t-t*t/2:Math.log(1+t)}},function(t,n,r){var e=r(74),i=Math.pow,o=i(2,-52),u=i(2,-23),c=i(2,127)*(2-u),a=i(2,-126);t.exports=Math.fround||function(t){var n,r,i=Math.abs(t),f=e(t);return i<a?f*(i/a/u+1/o-1/o)*a*u:(r=(n=(1+u/o)*i)-(n-i))>c||r!=r?f*(1/0):f*r}},function(t,n,r){var e=r(1);t.exports=function(t,n,r,i){try{return i?n(e(r)[0],r[1]):n(r)}catch(n){var o=t.return;throw void 0!==o&&e(o.call(t)),n}}},function(t,n,r){var e=r(10),i=r(9),o=r(46),u=r(8);t.exports=function(t,n,r,c,a){e(n);var f=i(t),s=o(f),l=u(f.length),h=a?l-1:0,v=a?-1:1;if(r<2)for(;;){if(h in s){c=s[h],h+=v;break}if(h+=v,a?h<0:l<=h)throw TypeError("Reduce of empty array with no initial value")}for(;a?h>=0:l>h;h+=v)h in s&&(c=n(c,s[h],h,f));return c}},function(t,n,r){"use strict";var e=r(9),i=r(35),o=r(8);t.exports=[].copyWithin||function(t,n){var r=e(this),u=o(r.length),c=i(t,u),a=i(n,u),f=arguments.length>2?arguments[2]:void 0,s=Math.min((void 0===f?u:i(f,u))-a,u-c),l=1;for(a<c&&c<a+s&&(l=-1,a+=s-1,c+=s-1);s-- >0;)a in r?r[c]=r[a]:delete r[c],c+=l,a+=l;return r}},function(t,n){t.exports=function(t,n){return{value:n,done:!!t}}},function(t,n,r){r(6)&&"g"!=/./g.flags&&r(7).f(RegExp.prototype,"flags",{configurable:!0,get:r(55)})},function(t,n){t.exports=function(t){try{return{e:!1,v:t()}}catch(t){return{e:!0,v:t}}}},function(t,n,r){var e=r(1),i=r(4),o=r(89);t.exports=function(t,n){if(e(t),i(n)&&n.constructor===t)return n;var r=o.f(t);return(0,r.resolve)(n),r.promise}},function(t,n,r){"use strict";var e=r(113),i=r(45);t.exports=r(59)("Map",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{get:function(t){var n=e.getEntry(i(this,"Map"),t);return n&&n.v},set:function(t,n){return e.def(i(this,"Map"),0===t?0:t,n)}},e,!0)},function(t,n,r){"use strict";var e=r(7).f,i=r(36),o=r(41),u=r(19),c=r(39),a=r(40),f=r(77),s=r(108),l=r(38),h=r(6),v=r(29).fastKey,p=r(45),d=h?"_s":"size",g=function(t,n){var r,e=v(n);if("F"!==e)return t._i[e];for(r=t._f;r;r=r.n)if(r.k==n)return r};t.exports={getConstructor:function(t,n,r,f){var s=t(function(t,e){c(t,s,n,"_i"),t._t=n,t._i=i(null),t._f=void 0,t._l=void 0,t[d]=0,null!=e&&a(e,r,t[f],t)});return o(s.prototype,{clear:function(){for(var t=p(this,n),r=t._i,e=t._f;e;e=e.n)e.r=!0,e.p&&(e.p=e.p.n=void 0),delete r[e.i];t._f=t._l=void 0,t[d]=0},delete:function(t){var r=p(this,n),e=g(r,t);if(e){var i=e.n,o=e.p;delete r._i[e.i],e.r=!0,o&&(o.n=i),i&&(i.p=o),r._f==e&&(r._f=i),r._l==e&&(r._l=o),r[d]--}return!!e},forEach:function(t){p(this,n);for(var r,e=u(t,arguments.length>1?arguments[1]:void 0,3);r=r?r.n:this._f;)for(e(r.v,r.k,this);r&&r.r;)r=r.p},has:function(t){return!!g(p(this,n),t)}}),h&&e(s.prototype,"size",{get:function(){return p(this,n)[d]}}),s},def:function(t,n,r){var e,i,o=g(t,n);return o?o.v=r:(t._l=o={i:i=v(n,!0),k:n,v:r,p:e=t._l,n:void 0,r:!1},t._f||(t._f=o),e&&(e.n=o),t[d]++,"F"!==i&&(t._i[i]=o)),t},getEntry:g,setStrong:function(t,n,r){f(t,n,function(t,r){this._t=p(t,n),this._k=r,this._l=void 0},function(){for(var t=this._k,n=this._l;n&&n.r;)n=n.p;return this._t&&(this._l=n=n?n.n:this._t._f)?s(0,"keys"==t?n.k:"values"==t?n.v:[n.k,n.v]):(this._t=void 0,s(1))},r?"entries":"values",!r,!0),l(n)}}},function(t,n,r){"use strict";var e=r(113),i=r(45);t.exports=r(59)("Set",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{add:function(t){return e.def(i(this,"Set"),t=0===t?0:t,t)}},e)},function(t,n,r){"use strict";var e,i=r(26)(0),o=r(12),u=r(29),c=r(96),a=r(116),f=r(4),s=r(3),l=r(45),h=u.getWeak,v=Object.isExtensible,p=a.ufstore,d={},g=function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},y={get:function(t){if(f(t)){var n=h(t);return!0===n?p(l(this,"WeakMap")).get(t):n?n[this._i]:void 0}},set:function(t,n){return a.def(l(this,"WeakMap"),t,n)}},m=t.exports=r(59)("WeakMap",g,y,a,!0,!0);s(function(){return 7!=(new m).set((Object.freeze||Object)(d),7).get(d)})&&(c((e=a.getConstructor(g,"WeakMap")).prototype,y),u.NEED=!0,i(["delete","has","get","set"],function(t){var n=m.prototype,r=n[t];o(n,t,function(n,i){if(f(n)&&!v(n)){this._f||(this._f=new e);var o=this._f[t](n,i);return"set"==t?this:o}return r.call(this,n,i)})}))},function(t,n,r){"use strict";var e=r(41),i=r(29).getWeak,o=r(1),u=r(4),c=r(39),a=r(40),f=r(26),s=r(14),l=r(45),h=f(5),v=f(6),p=0,d=function(t){return t._l||(t._l=new g)},g=function(){this.a=[]},y=function(t,n){return h(t.a,function(t){return t[0]===n})};g.prototype={get:function(t){var n=y(this,t);if(n)return n[1]},has:function(t){return!!y(this,t)},set:function(t,n){var r=y(this,t);r?r[1]=n:this.a.push([t,n])},delete:function(t){var n=v(this.a,function(n){return n[0]===t});return~n&&this.a.splice(n,1),!!~n}},t.exports={getConstructor:function(t,n,r,o){var f=t(function(t,e){c(t,f,n,"_i"),t._t=n,t._i=p++,t._l=void 0,null!=e&&a(e,r,t[o],t)});return e(f.prototype,{delete:function(t){if(!u(t))return!1;var r=i(t);return!0===r?d(l(this,n)).delete(t):r&&s(r,this._i)&&delete r[this._i]},has:function(t){if(!u(t))return!1;var r=i(t);return!0===r?d(l(this,n)).has(t):r&&s(r,this._i)}}),f},def:function(t,n,r){var e=i(o(n),!0);return!0===e?d(t).set(n,r):e[t._i]=r,t},ufstore:d}},function(t,n,r){var e=r(24),i=r(8);t.exports=function(t){if(void 0===t)return 0;var n=e(t),r=i(n);if(n!==r)throw RangeError("Wrong length!");return r}},function(t,n,r){var e=r(37),i=r(51),o=r(1),u=r(2).Reflect;t.exports=u&&u.ownKeys||function(t){var n=e.f(o(t)),r=i.f;return r?n.concat(r(t)):n}},function(t,n,r){"use strict";var e=r(52),i=r(4),o=r(8),u=r(19),c=r(5)("isConcatSpreadable");t.exports=function t(n,r,a,f,s,l,h,v){for(var p,d,g=s,y=0,m=!!h&&u(h,v,3);y<f;){if(y in a){if(p=m?m(a[y],y,r):a[y],d=!1,i(p)&&(d=void 0!==(d=p[c])?!!d:e(p)),d&&l>0)g=t(n,r,p,o(p.length),g,l-1)-1;else{if(g>=9007199254740991)throw TypeError();n[g]=p}g++}y++}return g}},function(t,n,r){var e=r(8),i=r(73),o=r(23);t.exports=function(t,n,r,u){var c=String(o(t)),a=c.length,f=void 0===r?" ":String(r),s=e(n);if(s<=a||""==f)return c;var l=s-a,h=i.call(f,Math.ceil(l/f.length));return h.length>l&&(h=h.slice(0,l)),u?h+c:c+h}},function(t,n,r){var e=r(34),i=r(15),o=r(47).f;t.exports=function(t){return function(n){for(var r,u=i(n),c=e(u),a=c.length,f=0,s=[];a>f;)o.call(u,r=c[f++])&&s.push(t?[r,u[r]]:u[r]);return s}}},function(t,n,r){var e=r(48),i=r(123);t.exports=function(t){return function(){if(e(this)!=t)throw TypeError(t+"#toJSON isn't generic");return i(this)}}},function(t,n,r){var e=r(40);t.exports=function(t,n){var r=[];return e(t,!1,r.push,r,n),r}},function(t,n){t.exports=Math.scale||function(t,n,r,e,i){return 0===arguments.length||t!=t||n!=n||r!=r||e!=e||i!=i?NaN:t===1/0||t===-1/0?t:(t-n)*(i-e)/(r-n)+e}},function(t,n){t.exports={getMajor:function(t){return parseInt(t.split(".")[0])},getMinor:function(t){return parseInt(t.split(".")[1])},getPatch:function(t){return parseInt(t.split("-")[0].split(".")[2])},isGreaterThanOrEqualTo:function(t,n){var r=this.getMajor(t),e=this.getMinor(t),i=this.getPatch(t),o=this.getMajor(n),u=this.getMinor(n),c=this.getPatch(n);return r>o||(r===o&&e>u||(r===o&&e===u&&i>c||r===o&&e===u&&i===c))}}},function(t,n,r){t.exports=r.p+"client_frame.html"},function(t,n,r){r(128),t.exports=r(331)},function(t,n,r){"use strict";(function(t){function e(){return t._babelPolyfill||"undefined"!=typeof window&&window._babelPolyfill?null:r(129)}Object.defineProperty(n,"__esModule",{value:!0}),n.idempotentBabelPolyfill=e,n.default=e()}).call(this,r(64))},function(t,n,r){"use strict";(function(t){if(r(130),r(327),r(328),t._babelPolyfill)throw new Error("only one instance of babel-polyfill is allowed");t._babelPolyfill=!0;var n="defineProperty";function e(t,r,e){t[r]||Object[n](t,r,{writable:!0,configurable:!0,value:e})}e(String.prototype,"padLeft","".padStart),e(String.prototype,"padRight","".padEnd),"pop,reverse,shift,keys,values,entries,indexOf,every,some,forEach,map,filter,find,findIndex,includes,join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill".split(",").forEach(function(t){[][t]&&e(Array,t,Function.call.bind([][t]))})}).call(this,r(64))},function(t,n,r){r(131),r(133),r(134),r(135),r(136),r(137),r(138),r(139),r(140),r(141),r(142),r(143),r(144),r(145),r(146),r(147),r(149),r(150),r(151),r(152),r(153),r(154),r(155),r(156),r(157),r(158),r(159),r(160),r(161),r(162),r(163),r(164),r(165),r(166),r(167),r(168),r(169),r(170),r(171),r(172),r(173),r(174),r(175),r(176),r(177),r(178),r(179),r(180),r(181),r(182),r(183),r(184),r(185),r(186),r(187),r(188),r(189),r(190),r(191),r(192),r(193),r(194),r(195),r(196),r(197),r(198),r(199),r(200),r(201),r(202),r(203),r(204),r(205),r(206),r(207),r(208),r(209),r(211),r(212),r(214),r(215),r(216),r(217),r(218),r(219),r(220),r(222),r(223),r(224),r(225),r(226),r(227),r(228),r(229),r(230),r(231),r(232),r(233),r(234),r(86),r(235),r(236),r(109),r(237),r(238),r(239),r(240),r(241),r(112),r(114),r(115),r(242),r(243),r(244),r(245),r(246),r(247),r(248),r(249),r(250),r(251),r(252),r(253),r(254),r(255),r(256),r(257),r(258),r(259),r(260),r(261),r(262),r(263),r(264),r(265),r(266),r(267),r(268),r(269),r(270),r(271),r(272),r(273),r(274),r(275),r(276),r(277),r(278),r(279),r(280),r(281),r(282),r(283),r(284),r(285),r(286),r(287),r(288),r(289),r(290),r(291),r(292),r(293),r(294),r(295),r(296),r(297),r(298),r(299),r(300),r(301),r(302),r(303),r(304),r(305),r(306),r(307),r(308),r(309),r(310),r(311),r(312),r(313),r(314),r(315),r(316),r(317),r(318),r(319),r(320),r(321),r(322),r(323),r(324),r(325),r(326),t.exports=r(18)},function(t,n,r){"use strict";var e=r(2),i=r(14),o=r(6),u=r(0),c=r(12),a=r(29).KEY,f=r(3),s=r(49),l=r(42),h=r(33),v=r(5),p=r(92),d=r(66),g=r(132),y=r(52),m=r(1),b=r(4),w=r(15),S=r(22),x=r(32),_=r(36),E=r(95),O=r(16),M=r(7),P=r(34),F=O.f,A=M.f,j=E.f,I=e.Symbol,N=e.JSON,L=N&&N.stringify,T=v("_hidden"),k=v("toPrimitive"),R={}.propertyIsEnumerable,C=s("symbol-registry"),D=s("symbols"),W=s("op-symbols"),U=Object.prototype,G="function"==typeof I,V=e.QObject,B=!V||!V.prototype||!V.prototype.findChild,z=o&&f(function(){return 7!=_(A({},"a",{get:function(){return A(this,"a",{value:7}).a}})).a})?function(t,n,r){var e=F(U,n);e&&delete U[n],A(t,n,r),e&&t!==U&&A(U,n,e)}:A,q=function(t){var n=D[t]=_(I.prototype);return n._k=t,n},J=G&&"symbol"==typeof I.iterator?function(t){return"symbol"==typeof t}:function(t){return t instanceof I},Y=function(t,n,r){return t===U&&Y(W,n,r),m(t),n=S(n,!0),m(r),i(D,n)?(r.enumerable?(i(t,T)&&t[T][n]&&(t[T][n]=!1),r=_(r,{enumerable:x(0,!1)})):(i(t,T)||A(t,T,x(1,{})),t[T][n]=!0),z(t,n,r)):A(t,n,r)},K=function(t,n){m(t);for(var r,e=g(n=w(n)),i=0,o=e.length;o>i;)Y(t,r=e[i++],n[r]);return t},H=function(t){var n=R.call(this,t=S(t,!0));return!(this===U&&i(D,t)&&!i(W,t))&&(!(n||!i(this,t)||!i(D,t)||i(this,T)&&this[T][t])||n)},X=function(t,n){if(t=w(t),n=S(n,!0),t!==U||!i(D,n)||i(W,n)){var r=F(t,n);return!r||!i(D,n)||i(t,T)&&t[T][n]||(r.enumerable=!0),r}},$=function(t){for(var n,r=j(w(t)),e=[],o=0;r.length>o;)i(D,n=r[o++])||n==T||n==a||e.push(n);return e},Z=function(t){for(var n,r=t===U,e=j(r?W:w(t)),o=[],u=0;e.length>u;)!i(D,n=e[u++])||r&&!i(U,n)||o.push(D[n]);return o};G||(c((I=function(){if(this instanceof I)throw TypeError("Symbol is not a constructor!");var t=h(arguments.length>0?arguments[0]:void 0),n=function(r){this===U&&n.call(W,r),i(this,T)&&i(this[T],t)&&(this[T][t]=!1),z(this,t,x(1,r))};return o&&B&&z(U,t,{configurable:!0,set:n}),q(t)}).prototype,"toString",function(){return this._k}),O.f=X,M.f=Y,r(37).f=E.f=$,r(47).f=H,r(51).f=Z,o&&!r(30)&&c(U,"propertyIsEnumerable",H,!0),p.f=function(t){return q(v(t))}),u(u.G+u.W+u.F*!G,{Symbol:I});for(var Q="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),tt=0;Q.length>tt;)v(Q[tt++]);for(var nt=P(v.store),rt=0;nt.length>rt;)d(nt[rt++]);u(u.S+u.F*!G,"Symbol",{for:function(t){return i(C,t+="")?C[t]:C[t]=I(t)},keyFor:function(t){if(!J(t))throw TypeError(t+" is not a symbol!");for(var n in C)if(C[n]===t)return n},useSetter:function(){B=!0},useSimple:function(){B=!1}}),u(u.S+u.F*!G,"Object",{create:function(t,n){return void 0===n?_(t):K(_(t),n)},defineProperty:Y,defineProperties:K,getOwnPropertyDescriptor:X,getOwnPropertyNames:$,getOwnPropertySymbols:Z}),N&&u(u.S+u.F*(!G||f(function(){var t=I();return"[null]"!=L([t])||"{}"!=L({a:t})||"{}"!=L(Object(t))})),"JSON",{stringify:function(t){for(var n,r,e=[t],i=1;arguments.length>i;)e.push(arguments[i++]);if(r=n=e[1],(b(n)||void 0!==t)&&!J(t))return y(n)||(n=function(t,n){if("function"==typeof r&&(n=r.call(this,t,n)),!J(n))return n}),e[1]=n,L.apply(N,e)}}),I.prototype[k]||r(11)(I.prototype,k,I.prototype.valueOf),l(I,"Symbol"),l(Math,"Math",!0),l(e.JSON,"JSON",!0)},function(t,n,r){var e=r(34),i=r(51),o=r(47);t.exports=function(t){var n=e(t),r=i.f;if(r)for(var u,c=r(t),a=o.f,f=0;c.length>f;)a.call(t,u=c[f++])&&n.push(u);return n}},function(t,n,r){var e=r(0);e(e.S,"Object",{create:r(36)})},function(t,n,r){var e=r(0);e(e.S+e.F*!r(6),"Object",{defineProperty:r(7).f})},function(t,n,r){var e=r(0);e(e.S+e.F*!r(6),"Object",{defineProperties:r(94)})},function(t,n,r){var e=r(15),i=r(16).f;r(25)("getOwnPropertyDescriptor",function(){return function(t,n){return i(e(t),n)}})},function(t,n,r){var e=r(9),i=r(17);r(25)("getPrototypeOf",function(){return function(t){return i(e(t))}})},function(t,n,r){var e=r(9),i=r(34);r(25)("keys",function(){return function(t){return i(e(t))}})},function(t,n,r){r(25)("getOwnPropertyNames",function(){return r(95).f})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("freeze",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("seal",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("preventExtensions",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4);r(25)("isFrozen",function(t){return function(n){return!e(n)||!!t&&t(n)}})},function(t,n,r){var e=r(4);r(25)("isSealed",function(t){return function(n){return!e(n)||!!t&&t(n)}})},function(t,n,r){var e=r(4);r(25)("isExtensible",function(t){return function(n){return!!e(n)&&(!t||t(n))}})},function(t,n,r){var e=r(0);e(e.S+e.F,"Object",{assign:r(96)})},function(t,n,r){var e=r(0);e(e.S,"Object",{is:r(148)})},function(t,n){t.exports=Object.is||function(t,n){return t===n?0!==t||1/t==1/n:t!=t&&n!=n}},function(t,n,r){var e=r(0);e(e.S,"Object",{setPrototypeOf:r(70).set})},function(t,n,r){"use strict";var e=r(48),i={};i[r(5)("toStringTag")]="z",i+""!="[object z]"&&r(12)(Object.prototype,"toString",function(){return"[object "+e(this)+"]"},!0)},function(t,n,r){var e=r(0);e(e.P,"Function",{bind:r(97)})},function(t,n,r){var e=r(7).f,i=Function.prototype,o=/^\s*function ([^ (]*)/;"name"in i||r(6)&&e(i,"name",{configurable:!0,get:function(){try{return(""+this).match(o)[1]}catch(t){return""}}})},function(t,n,r){"use strict";var e=r(4),i=r(17),o=r(5)("hasInstance"),u=Function.prototype;o in u||r(7).f(u,o,{value:function(t){if("function"!=typeof this||!e(t))return!1;if(!e(this.prototype))return t instanceof this;for(;t=i(t);)if(this.prototype===t)return!0;return!1}})},function(t,n,r){var e=r(0),i=r(99);e(e.G+e.F*(parseInt!=i),{parseInt:i})},function(t,n,r){var e=r(0),i=r(100);e(e.G+e.F*(parseFloat!=i),{parseFloat:i})},function(t,n,r){"use strict";var e=r(2),i=r(14),o=r(20),u=r(72),c=r(22),a=r(3),f=r(37).f,s=r(16).f,l=r(7).f,h=r(43).trim,v=e.Number,p=v,d=v.prototype,g="Number"==o(r(36)(d)),y="trim"in String.prototype,m=function(t){var n=c(t,!1);if("string"==typeof n&&n.length>2){var r,e,i,o=(n=y?n.trim():h(n,3)).charCodeAt(0);if(43===o||45===o){if(88===(r=n.charCodeAt(2))||120===r)return NaN}else if(48===o){switch(n.charCodeAt(1)){case 66:case 98:e=2,i=49;break;case 79:case 111:e=8,i=55;break;default:return+n}for(var u,a=n.slice(2),f=0,s=a.length;f<s;f++)if((u=a.charCodeAt(f))<48||u>i)return NaN;return parseInt(a,e)}}return+n};if(!v(" 0o1")||!v("0b1")||v("+0x1")){v=function(t){var n=arguments.length<1?0:t,r=this;return r instanceof v&&(g?a(function(){d.valueOf.call(r)}):"Number"!=o(r))?u(new p(m(n)),r,v):m(n)};for(var b,w=r(6)?f(p):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),S=0;w.length>S;S++)i(p,b=w[S])&&!i(v,b)&&l(v,b,s(p,b));v.prototype=d,d.constructor=v,r(12)(e,"Number",v)}},function(t,n,r){"use strict";var e=r(0),i=r(24),o=r(101),u=r(73),c=1..toFixed,a=Math.floor,f=[0,0,0,0,0,0],s="Number.toFixed: incorrect invocation!",l=function(t,n){for(var r=-1,e=n;++r<6;)e+=t*f[r],f[r]=e%1e7,e=a(e/1e7)},h=function(t){for(var n=6,r=0;--n>=0;)r+=f[n],f[n]=a(r/t),r=r%t*1e7},v=function(){for(var t=6,n="";--t>=0;)if(""!==n||0===t||0!==f[t]){var r=String(f[t]);n=""===n?r:n+u.call("0",7-r.length)+r}return n},p=function(t,n,r){return 0===n?r:n%2==1?p(t,n-1,r*t):p(t*t,n/2,r)};e(e.P+e.F*(!!c&&("0.000"!==8e-5.toFixed(3)||"1"!==.9.toFixed(0)||"1.25"!==1.255.toFixed(2)||"1000000000000000128"!==(0xde0b6b3a7640080).toFixed(0))||!r(3)(function(){c.call({})})),"Number",{toFixed:function(t){var n,r,e,c,a=o(this,s),f=i(t),d="",g="0";if(f<0||f>20)throw RangeError(s);if(a!=a)return"NaN";if(a<=-1e21||a>=1e21)return String(a);if(a<0&&(d="-",a=-a),a>1e-21)if(r=(n=function(t){for(var n=0,r=t;r>=4096;)n+=12,r/=4096;for(;r>=2;)n+=1,r/=2;return n}(a*p(2,69,1))-69)<0?a*p(2,-n,1):a/p(2,n,1),r*=4503599627370496,(n=52-n)>0){for(l(0,r),e=f;e>=7;)l(1e7,0),e-=7;for(l(p(10,e,1),0),e=n-1;e>=23;)h(1<<23),e-=23;h(1<<e),l(1,1),h(2),g=v()}else l(0,r),l(1<<-n,0),g=v()+u.call("0",f);return g=f>0?d+((c=g.length)<=f?"0."+u.call("0",f-c)+g:g.slice(0,c-f)+"."+g.slice(c-f)):d+g}})},function(t,n,r){"use strict";var e=r(0),i=r(3),o=r(101),u=1..toPrecision;e(e.P+e.F*(i(function(){return"1"!==u.call(1,void 0)})||!i(function(){u.call({})})),"Number",{toPrecision:function(t){var n=o(this,"Number#toPrecision: incorrect invocation!");return void 0===t?u.call(n):u.call(n,t)}})},function(t,n,r){var e=r(0);e(e.S,"Number",{EPSILON:Math.pow(2,-52)})},function(t,n,r){var e=r(0),i=r(2).isFinite;e(e.S,"Number",{isFinite:function(t){return"number"==typeof t&&i(t)}})},function(t,n,r){var e=r(0);e(e.S,"Number",{isInteger:r(102)})},function(t,n,r){var e=r(0);e(e.S,"Number",{isNaN:function(t){return t!=t}})},function(t,n,r){var e=r(0),i=r(102),o=Math.abs;e(e.S,"Number",{isSafeInteger:function(t){return i(t)&&o(t)<=9007199254740991}})},function(t,n,r){var e=r(0);e(e.S,"Number",{MAX_SAFE_INTEGER:9007199254740991})},function(t,n,r){var e=r(0);e(e.S,"Number",{MIN_SAFE_INTEGER:-9007199254740991})},function(t,n,r){var e=r(0),i=r(100);e(e.S+e.F*(Number.parseFloat!=i),"Number",{parseFloat:i})},function(t,n,r){var e=r(0),i=r(99);e(e.S+e.F*(Number.parseInt!=i),"Number",{parseInt:i})},function(t,n,r){var e=r(0),i=r(103),o=Math.sqrt,u=Math.acosh;e(e.S+e.F*!(u&&710==Math.floor(u(Number.MAX_VALUE))&&u(1/0)==1/0),"Math",{acosh:function(t){return(t=+t)<1?NaN:t>94906265.62425156?Math.log(t)+Math.LN2:i(t-1+o(t-1)*o(t+1))}})},function(t,n,r){var e=r(0),i=Math.asinh;e(e.S+e.F*!(i&&1/i(0)>0),"Math",{asinh:function t(n){return isFinite(n=+n)&&0!=n?n<0?-t(-n):Math.log(n+Math.sqrt(n*n+1)):n}})},function(t,n,r){var e=r(0),i=Math.atanh;e(e.S+e.F*!(i&&1/i(-0)<0),"Math",{atanh:function(t){return 0==(t=+t)?t:Math.log((1+t)/(1-t))/2}})},function(t,n,r){var e=r(0),i=r(74);e(e.S,"Math",{cbrt:function(t){return i(t=+t)*Math.pow(Math.abs(t),1/3)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{clz32:function(t){return(t>>>=0)?31-Math.floor(Math.log(t+.5)*Math.LOG2E):32}})},function(t,n,r){var e=r(0),i=Math.exp;e(e.S,"Math",{cosh:function(t){return(i(t=+t)+i(-t))/2}})},function(t,n,r){var e=r(0),i=r(75);e(e.S+e.F*(i!=Math.expm1),"Math",{expm1:i})},function(t,n,r){var e=r(0);e(e.S,"Math",{fround:r(104)})},function(t,n,r){var e=r(0),i=Math.abs;e(e.S,"Math",{hypot:function(t,n){for(var r,e,o=0,u=0,c=arguments.length,a=0;u<c;)a<(r=i(arguments[u++]))?(o=o*(e=a/r)*e+1,a=r):o+=r>0?(e=r/a)*e:r;return a===1/0?1/0:a*Math.sqrt(o)}})},function(t,n,r){var e=r(0),i=Math.imul;e(e.S+e.F*r(3)(function(){return-5!=i(4294967295,5)||2!=i.length}),"Math",{imul:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e;return 0|i*o+((65535&r>>>16)*o+i*(65535&e>>>16)<<16>>>0)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{log10:function(t){return Math.log(t)*Math.LOG10E}})},function(t,n,r){var e=r(0);e(e.S,"Math",{log1p:r(103)})},function(t,n,r){var e=r(0);e(e.S,"Math",{log2:function(t){return Math.log(t)/Math.LN2}})},function(t,n,r){var e=r(0);e(e.S,"Math",{sign:r(74)})},function(t,n,r){var e=r(0),i=r(75),o=Math.exp;e(e.S+e.F*r(3)(function(){return-2e-17!=!Math.sinh(-2e-17)}),"Math",{sinh:function(t){return Math.abs(t=+t)<1?(i(t)-i(-t))/2:(o(t-1)-o(-t-1))*(Math.E/2)}})},function(t,n,r){var e=r(0),i=r(75),o=Math.exp;e(e.S,"Math",{tanh:function(t){var n=i(t=+t),r=i(-t);return n==1/0?1:r==1/0?-1:(n-r)/(o(t)+o(-t))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{trunc:function(t){return(t>0?Math.floor:Math.ceil)(t)}})},function(t,n,r){var e=r(0),i=r(35),o=String.fromCharCode,u=String.fromCodePoint;e(e.S+e.F*(!!u&&1!=u.length),"String",{fromCodePoint:function(t){for(var n,r=[],e=arguments.length,u=0;e>u;){if(n=+arguments[u++],i(n,1114111)!==n)throw RangeError(n+" is not a valid code point");r.push(n<65536?o(n):o(55296+((n-=65536)>>10),n%1024+56320))}return r.join("")}})},function(t,n,r){var e=r(0),i=r(15),o=r(8);e(e.S,"String",{raw:function(t){for(var n=i(t.raw),r=o(n.length),e=arguments.length,u=[],c=0;r>c;)u.push(String(n[c++])),c<e&&u.push(String(arguments[c]));return u.join("")}})},function(t,n,r){"use strict";r(43)("trim",function(t){return function(){return t(this,3)}})},function(t,n,r){"use strict";var e=r(76)(!0);r(77)(String,"String",function(t){this._t=String(t),this._i=0},function(){var t,n=this._t,r=this._i;return r>=n.length?{value:void 0,done:!0}:(t=e(n,r),this._i+=t.length,{value:t,done:!1})})},function(t,n,r){"use strict";var e=r(0),i=r(76)(!1);e(e.P,"String",{codePointAt:function(t){return i(this,t)}})},function(t,n,r){"use strict";var e=r(0),i=r(8),o=r(79),u="".endsWith;e(e.P+e.F*r(80)("endsWith"),"String",{endsWith:function(t){var n=o(this,t,"endsWith"),r=arguments.length>1?arguments[1]:void 0,e=i(n.length),c=void 0===r?e:Math.min(i(r),e),a=String(t);return u?u.call(n,a,c):n.slice(c-a.length,c)===a}})},function(t,n,r){"use strict";var e=r(0),i=r(79);e(e.P+e.F*r(80)("includes"),"String",{includes:function(t){return!!~i(this,t,"includes").indexOf(t,arguments.length>1?arguments[1]:void 0)}})},function(t,n,r){var e=r(0);e(e.P,"String",{repeat:r(73)})},function(t,n,r){"use strict";var e=r(0),i=r(8),o=r(79),u="".startsWith;e(e.P+e.F*r(80)("startsWith"),"String",{startsWith:function(t){var n=o(this,t,"startsWith"),r=i(Math.min(arguments.length>1?arguments[1]:void 0,n.length)),e=String(t);return u?u.call(n,e,r):n.slice(r,r+e.length)===e}})},function(t,n,r){"use strict";r(13)("anchor",function(t){return function(n){return t(this,"a","name",n)}})},function(t,n,r){"use strict";r(13)("big",function(t){return function(){return t(this,"big","","")}})},function(t,n,r){"use strict";r(13)("blink",function(t){return function(){return t(this,"blink","","")}})},function(t,n,r){"use strict";r(13)("bold",function(t){return function(){return t(this,"b","","")}})},function(t,n,r){"use strict";r(13)("fixed",function(t){return function(){return t(this,"tt","","")}})},function(t,n,r){"use strict";r(13)("fontcolor",function(t){return function(n){return t(this,"font","color",n)}})},function(t,n,r){"use strict";r(13)("fontsize",function(t){return function(n){return t(this,"font","size",n)}})},function(t,n,r){"use strict";r(13)("italics",function(t){return function(){return t(this,"i","","")}})},function(t,n,r){"use strict";r(13)("link",function(t){return function(n){return t(this,"a","href",n)}})},function(t,n,r){"use strict";r(13)("small",function(t){return function(){return t(this,"small","","")}})},function(t,n,r){"use strict";r(13)("strike",function(t){return function(){return t(this,"strike","","")}})},function(t,n,r){"use strict";r(13)("sub",function(t){return function(){return t(this,"sub","","")}})},function(t,n,r){"use strict";r(13)("sup",function(t){return function(){return t(this,"sup","","")}})},function(t,n,r){var e=r(0);e(e.S,"Date",{now:function(){return(new Date).getTime()}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22);e(e.P+e.F*r(3)(function(){return null!==new Date(NaN).toJSON()||1!==Date.prototype.toJSON.call({toISOString:function(){return 1}})}),"Date",{toJSON:function(t){var n=i(this),r=o(n);return"number"!=typeof r||isFinite(r)?n.toISOString():null}})},function(t,n,r){var e=r(0),i=r(210);e(e.P+e.F*(Date.prototype.toISOString!==i),"Date",{toISOString:i})},function(t,n,r){"use strict";var e=r(3),i=Date.prototype.getTime,o=Date.prototype.toISOString,u=function(t){return t>9?t:"0"+t};t.exports=e(function(){return"0385-07-25T07:06:39.999Z"!=o.call(new Date(-5e13-1))})||!e(function(){o.call(new Date(NaN))})?function(){if(!isFinite(i.call(this)))throw RangeError("Invalid time value");var t=this,n=t.getUTCFullYear(),r=t.getUTCMilliseconds(),e=n<0?"-":n>9999?"+":"";return e+("00000"+Math.abs(n)).slice(e?-6:-4)+"-"+u(t.getUTCMonth()+1)+"-"+u(t.getUTCDate())+"T"+u(t.getUTCHours())+":"+u(t.getUTCMinutes())+":"+u(t.getUTCSeconds())+"."+(r>99?r:"0"+u(r))+"Z"}:o},function(t,n,r){var e=Date.prototype,i=e.toString,o=e.getTime;new Date(NaN)+""!="Invalid Date"&&r(12)(e,"toString",function(){var t=o.call(this);return t==t?i.call(this):"Invalid Date"})},function(t,n,r){var e=r(5)("toPrimitive"),i=Date.prototype;e in i||r(11)(i,e,r(213))},function(t,n,r){"use strict";var e=r(1),i=r(22);t.exports=function(t){if("string"!==t&&"number"!==t&&"default"!==t)throw TypeError("Incorrect hint");return i(e(this),"number"!=t)}},function(t,n,r){var e=r(0);e(e.S,"Array",{isArray:r(52)})},function(t,n,r){"use strict";var e=r(19),i=r(0),o=r(9),u=r(105),c=r(81),a=r(8),f=r(82),s=r(83);i(i.S+i.F*!r(54)(function(t){Array.from(t)}),"Array",{from:function(t){var n,r,i,l,h=o(t),v="function"==typeof this?this:Array,p=arguments.length,d=p>1?arguments[1]:void 0,g=void 0!==d,y=0,m=s(h);if(g&&(d=e(d,p>2?arguments[2]:void 0,2)),null==m||v==Array&&c(m))for(r=new v(n=a(h.length));n>y;y++)f(r,y,g?d(h[y],y):h[y]);else for(l=m.call(h),r=new v;!(i=l.next()).done;y++)f(r,y,g?u(l,d,[i.value,y],!0):i.value);return r.length=y,r}})},function(t,n,r){"use strict";var e=r(0),i=r(82);e(e.S+e.F*r(3)(function(){function t(){}return!(Array.of.call(t)instanceof t)}),"Array",{of:function(){for(var t=0,n=arguments.length,r=new("function"==typeof this?this:Array)(n);n>t;)i(r,t,arguments[t++]);return r.length=n,r}})},function(t,n,r){"use strict";var e=r(0),i=r(15),o=[].join;e(e.P+e.F*(r(46)!=Object||!r(21)(o)),"Array",{join:function(t){return o.call(i(this),void 0===t?",":t)}})},function(t,n,r){"use strict";var e=r(0),i=r(69),o=r(20),u=r(35),c=r(8),a=[].slice;e(e.P+e.F*r(3)(function(){i&&a.call(i)}),"Array",{slice:function(t,n){var r=c(this.length),e=o(this);if(n=void 0===n?r:n,"Array"==e)return a.call(this,t,n);for(var i=u(t,r),f=u(n,r),s=c(f-i),l=new Array(s),h=0;h<s;h++)l[h]="String"==e?this.charAt(i+h):this[i+h];return l}})},function(t,n,r){"use strict";var e=r(0),i=r(10),o=r(9),u=r(3),c=[].sort,a=[1,2,3];e(e.P+e.F*(u(function(){a.sort(void 0)})||!u(function(){a.sort(null)})||!r(21)(c)),"Array",{sort:function(t){return void 0===t?c.call(o(this)):c.call(o(this),i(t))}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(0),o=r(21)([].forEach,!0);e(e.P+e.F*!o,"Array",{forEach:function(t){return i(this,t,arguments[1])}})},function(t,n,r){var e=r(4),i=r(52),o=r(5)("species");t.exports=function(t){var n;return i(t)&&("function"!=typeof(n=t.constructor)||n!==Array&&!i(n.prototype)||(n=void 0),e(n)&&null===(n=n[o])&&(n=void 0)),void 0===n?Array:n}},function(t,n,r){"use strict";var e=r(0),i=r(26)(1);e(e.P+e.F*!r(21)([].map,!0),"Array",{map:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(2);e(e.P+e.F*!r(21)([].filter,!0),"Array",{filter:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(3);e(e.P+e.F*!r(21)([].some,!0),"Array",{some:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(4);e(e.P+e.F*!r(21)([].every,!0),"Array",{every:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(106);e(e.P+e.F*!r(21)([].reduce,!0),"Array",{reduce:function(t){return i(this,t,arguments.length,arguments[1],!1)}})},function(t,n,r){"use strict";var e=r(0),i=r(106);e(e.P+e.F*!r(21)([].reduceRight,!0),"Array",{reduceRight:function(t){return i(this,t,arguments.length,arguments[1],!0)}})},function(t,n,r){"use strict";var e=r(0),i=r(50)(!1),o=[].indexOf,u=!!o&&1/[1].indexOf(1,-0)<0;e(e.P+e.F*(u||!r(21)(o)),"Array",{indexOf:function(t){return u?o.apply(this,arguments)||0:i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(15),o=r(24),u=r(8),c=[].lastIndexOf,a=!!c&&1/[1].lastIndexOf(1,-0)<0;e(e.P+e.F*(a||!r(21)(c)),"Array",{lastIndexOf:function(t){if(a)return c.apply(this,arguments)||0;var n=i(this),r=u(n.length),e=r-1;for(arguments.length>1&&(e=Math.min(e,o(arguments[1]))),e<0&&(e=r+e);e>=0;e--)if(e in n&&n[e]===t)return e||0;return-1}})},function(t,n,r){var e=r(0);e(e.P,"Array",{copyWithin:r(107)}),r(31)("copyWithin")},function(t,n,r){var e=r(0);e(e.P,"Array",{fill:r(85)}),r(31)("fill")},function(t,n,r){"use strict";var e=r(0),i=r(26)(5),o=!0;"find"in[]&&Array(1).find(function(){o=!1}),e(e.P+e.F*o,"Array",{find:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)("find")},function(t,n,r){"use strict";var e=r(0),i=r(26)(6),o="findIndex",u=!0;o in[]&&Array(1)[o](function(){u=!1}),e(e.P+e.F*u,"Array",{findIndex:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)(o)},function(t,n,r){r(38)("Array")},function(t,n,r){var e=r(2),i=r(72),o=r(7).f,u=r(37).f,c=r(53),a=r(55),f=e.RegExp,s=f,l=f.prototype,h=/a/g,v=/a/g,p=new f(h)!==h;if(r(6)&&(!p||r(3)(function(){return v[r(5)("match")]=!1,f(h)!=h||f(v)==v||"/a/i"!=f(h,"i")}))){f=function(t,n){var r=this instanceof f,e=c(t),o=void 0===n;return!r&&e&&t.constructor===f&&o?t:i(p?new s(e&&!o?t.source:t,n):s((e=t instanceof f)?t.source:t,e&&o?a.call(t):n),r?this:l,f)};for(var d=function(t){t in f||o(f,t,{configurable:!0,get:function(){return s[t]},set:function(n){s[t]=n}})},g=u(s),y=0;g.length>y;)d(g[y++]);l.constructor=f,f.prototype=l,r(12)(e,"RegExp",f)}r(38)("RegExp")},function(t,n,r){"use strict";r(109);var e=r(1),i=r(55),o=r(6),u=/./.toString,c=function(t){r(12)(RegExp.prototype,"toString",t,!0)};r(3)(function(){return"/a/b"!=u.call({source:"a",flags:"b"})})?c(function(){var t=e(this);return"/".concat(t.source,"/","flags"in t?t.flags:!o&&t instanceof RegExp?i.call(t):void 0)}):"toString"!=u.name&&c(function(){return u.call(this)})},function(t,n,r){r(56)("match",1,function(t,n,r){return[function(r){"use strict";var e=t(this),i=null==r?void 0:r[n];return void 0!==i?i.call(r,e):new RegExp(r)[n](String(e))},r]})},function(t,n,r){r(56)("replace",2,function(t,n,r){return[function(e,i){"use strict";var o=t(this),u=null==e?void 0:e[n];return void 0!==u?u.call(e,o,i):r.call(String(o),e,i)},r]})},function(t,n,r){r(56)("search",1,function(t,n,r){return[function(r){"use strict";var e=t(this),i=null==r?void 0:r[n];return void 0!==i?i.call(r,e):new RegExp(r)[n](String(e))},r]})},function(t,n,r){r(56)("split",2,function(t,n,e){"use strict";var i=r(53),o=e,u=[].push;if("c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length){var c=void 0===/()??/.exec("")[1];e=function(t,n){var r=String(this);if(void 0===t&&0===n)return[];if(!i(t))return o.call(r,t,n);var e,a,f,s,l,h=[],v=(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":""),p=0,d=void 0===n?4294967295:n>>>0,g=new RegExp(t.source,v+"g");for(c||(e=new RegExp("^"+g.source+"$(?!\\s)",v));(a=g.exec(r))&&!((f=a.index+a[0].length)>p&&(h.push(r.slice(p,a.index)),!c&&a.length>1&&a[0].replace(e,function(){for(l=1;l<arguments.length-2;l++)void 0===arguments[l]&&(a[l]=void 0)}),a.length>1&&a.index<r.length&&u.apply(h,a.slice(1)),s=a[0].length,p=f,h.length>=d));)g.lastIndex===a.index&&g.lastIndex++;return p===r.length?!s&&g.test("")||h.push(""):h.push(r.slice(p)),h.length>d?h.slice(0,d):h}}else"0".split(void 0,0).length&&(e=function(t,n){return void 0===t&&0===n?[]:o.call(this,t,n)});return[function(r,i){var o=t(this),u=null==r?void 0:r[n];return void 0!==u?u.call(r,o,i):e.call(String(o),r,i)},e]})},function(t,n,r){"use strict";var e,i,o,u,c=r(30),a=r(2),f=r(19),s=r(48),l=r(0),h=r(4),v=r(10),p=r(39),d=r(40),g=r(57),y=r(87).set,m=r(88)(),b=r(89),w=r(110),S=r(58),x=r(111),_=a.TypeError,E=a.process,O=E&&E.versions,M=O&&O.v8||"",P=a.Promise,F="process"==s(E),A=function(){},j=i=b.f,I=!!function(){try{var t=P.resolve(1),n=(t.constructor={})[r(5)("species")]=function(t){t(A,A)};return(F||"function"==typeof PromiseRejectionEvent)&&t.then(A)instanceof n&&0!==M.indexOf("6.6")&&-1===S.indexOf("Chrome/66")}catch(t){}}(),N=function(t){var n;return!(!h(t)||"function"!=typeof(n=t.then))&&n},L=function(t,n){if(!t._n){t._n=!0;var r=t._c;m(function(){for(var e=t._v,i=1==t._s,o=0,u=function(n){var r,o,u,c=i?n.ok:n.fail,a=n.resolve,f=n.reject,s=n.domain;try{c?(i||(2==t._h&&R(t),t._h=1),!0===c?r=e:(s&&s.enter(),r=c(e),s&&(s.exit(),u=!0)),r===n.promise?f(_("Promise-chain cycle")):(o=N(r))?o.call(r,a,f):a(r)):f(e)}catch(t){s&&!u&&s.exit(),f(t)}};r.length>o;)u(r[o++]);t._c=[],t._n=!1,n&&!t._h&&T(t)})}},T=function(t){y.call(a,function(){var n,r,e,i=t._v,o=k(t);if(o&&(n=w(function(){F?E.emit("unhandledRejection",i,t):(r=a.onunhandledrejection)?r({promise:t,reason:i}):(e=a.console)&&e.error&&e.error("Unhandled promise rejection",i)}),t._h=F||k(t)?2:1),t._a=void 0,o&&n.e)throw n.v})},k=function(t){return 1!==t._h&&0===(t._a||t._c).length},R=function(t){y.call(a,function(){var n;F?E.emit("rejectionHandled",t):(n=a.onrejectionhandled)&&n({promise:t,reason:t._v})})},C=function(t){var n=this;n._d||(n._d=!0,(n=n._w||n)._v=t,n._s=2,n._a||(n._a=n._c.slice()),L(n,!0))},D=function(t){var n,r=this;if(!r._d){r._d=!0,r=r._w||r;try{if(r===t)throw _("Promise can't be resolved itself");(n=N(t))?m(function(){var e={_w:r,_d:!1};try{n.call(t,f(D,e,1),f(C,e,1))}catch(t){C.call(e,t)}}):(r._v=t,r._s=1,L(r,!1))}catch(t){C.call({_w:r,_d:!1},t)}}};I||(P=function(t){p(this,P,"Promise","_h"),v(t),e.call(this);try{t(f(D,this,1),f(C,this,1))}catch(t){C.call(this,t)}},(e=function(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=r(41)(P.prototype,{then:function(t,n){var r=j(g(this,P));return r.ok="function"!=typeof t||t,r.fail="function"==typeof n&&n,r.domain=F?E.domain:void 0,this._c.push(r),this._a&&this._a.push(r),this._s&&L(this,!1),r.promise},catch:function(t){return this.then(void 0,t)}}),o=function(){var t=new e;this.promise=t,this.resolve=f(D,t,1),this.reject=f(C,t,1)},b.f=j=function(t){return t===P||t===u?new o(t):i(t)}),l(l.G+l.W+l.F*!I,{Promise:P}),r(42)(P,"Promise"),r(38)("Promise"),u=r(18).Promise,l(l.S+l.F*!I,"Promise",{reject:function(t){var n=j(this);return(0,n.reject)(t),n.promise}}),l(l.S+l.F*(c||!I),"Promise",{resolve:function(t){return x(c&&this===u?P:this,t)}}),l(l.S+l.F*!(I&&r(54)(function(t){P.all(t).catch(A)})),"Promise",{all:function(t){var n=this,r=j(n),e=r.resolve,i=r.reject,o=w(function(){var r=[],o=0,u=1;d(t,!1,function(t){var c=o++,a=!1;r.push(void 0),u++,n.resolve(t).then(function(t){a||(a=!0,r[c]=t,--u||e(r))},i)}),--u||e(r)});return o.e&&i(o.v),r.promise},race:function(t){var n=this,r=j(n),e=r.reject,i=w(function(){d(t,!1,function(t){n.resolve(t).then(r.resolve,e)})});return i.e&&e(i.v),r.promise}})},function(t,n,r){"use strict";var e=r(116),i=r(45);r(59)("WeakSet",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{add:function(t){return e.def(i(this,"WeakSet"),t,!0)}},e,!1,!0)},function(t,n,r){"use strict";var e=r(0),i=r(60),o=r(90),u=r(1),c=r(35),a=r(8),f=r(4),s=r(2).ArrayBuffer,l=r(57),h=o.ArrayBuffer,v=o.DataView,p=i.ABV&&s.isView,d=h.prototype.slice,g=i.VIEW;e(e.G+e.W+e.F*(s!==h),{ArrayBuffer:h}),e(e.S+e.F*!i.CONSTR,"ArrayBuffer",{isView:function(t){return p&&p(t)||f(t)&&g in t}}),e(e.P+e.U+e.F*r(3)(function(){return!new h(2).slice(1,void 0).byteLength}),"ArrayBuffer",{slice:function(t,n){if(void 0!==d&&void 0===n)return d.call(u(this),t);for(var r=u(this).byteLength,e=c(t,r),i=c(void 0===n?r:n,r),o=new(l(this,h))(a(i-e)),f=new v(this),s=new v(o),p=0;e<i;)s.setUint8(p++,f.getUint8(e++));return o}}),r(38)("ArrayBuffer")},function(t,n,r){var e=r(0);e(e.G+e.W+e.F*!r(60).ABV,{DataView:r(90).DataView})},function(t,n,r){r(27)("Int8",1,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint8",1,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint8",1,function(t){return function(n,r,e){return t(this,n,r,e)}},!0)},function(t,n,r){r(27)("Int16",2,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint16",2,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Int32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Float32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Float64",8,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){var e=r(0),i=r(10),o=r(1),u=(r(2).Reflect||{}).apply,c=Function.apply;e(e.S+e.F*!r(3)(function(){u(function(){})}),"Reflect",{apply:function(t,n,r){var e=i(t),a=o(r);return u?u(e,n,a):c.call(e,n,a)}})},function(t,n,r){var e=r(0),i=r(36),o=r(10),u=r(1),c=r(4),a=r(3),f=r(97),s=(r(2).Reflect||{}).construct,l=a(function(){function t(){}return!(s(function(){},[],t)instanceof t)}),h=!a(function(){s(function(){})});e(e.S+e.F*(l||h),"Reflect",{construct:function(t,n){o(t),u(n);var r=arguments.length<3?t:o(arguments[2]);if(h&&!l)return s(t,n,r);if(t==r){switch(n.length){case 0:return new t;case 1:return new t(n[0]);case 2:return new t(n[0],n[1]);case 3:return new t(n[0],n[1],n[2]);case 4:return new t(n[0],n[1],n[2],n[3])}var e=[null];return e.push.apply(e,n),new(f.apply(t,e))}var a=r.prototype,v=i(c(a)?a:Object.prototype),p=Function.apply.call(t,v,n);return c(p)?p:v}})},function(t,n,r){var e=r(7),i=r(0),o=r(1),u=r(22);i(i.S+i.F*r(3)(function(){Reflect.defineProperty(e.f({},1,{value:1}),1,{value:2})}),"Reflect",{defineProperty:function(t,n,r){o(t),n=u(n,!0),o(r);try{return e.f(t,n,r),!0}catch(t){return!1}}})},function(t,n,r){var e=r(0),i=r(16).f,o=r(1);e(e.S,"Reflect",{deleteProperty:function(t,n){var r=i(o(t),n);return!(r&&!r.configurable)&&delete t[n]}})},function(t,n,r){"use strict";var e=r(0),i=r(1),o=function(t){this._t=i(t),this._i=0;var n,r=this._k=[];for(n in t)r.push(n)};r(78)(o,"Object",function(){var t,n=this._k;do{if(this._i>=n.length)return{value:void 0,done:!0}}while(!((t=n[this._i++])in this._t));return{value:t,done:!1}}),e(e.S,"Reflect",{enumerate:function(t){return new o(t)}})},function(t,n,r){var e=r(16),i=r(17),o=r(14),u=r(0),c=r(4),a=r(1);u(u.S,"Reflect",{get:function t(n,r){var u,f,s=arguments.length<3?n:arguments[2];return a(n)===s?n[r]:(u=e.f(n,r))?o(u,"value")?u.value:void 0!==u.get?u.get.call(s):void 0:c(f=i(n))?t(f,r,s):void 0}})},function(t,n,r){var e=r(16),i=r(0),o=r(1);i(i.S,"Reflect",{getOwnPropertyDescriptor:function(t,n){return e.f(o(t),n)}})},function(t,n,r){var e=r(0),i=r(17),o=r(1);e(e.S,"Reflect",{getPrototypeOf:function(t){return i(o(t))}})},function(t,n,r){var e=r(0);e(e.S,"Reflect",{has:function(t,n){return n in t}})},function(t,n,r){var e=r(0),i=r(1),o=Object.isExtensible;e(e.S,"Reflect",{isExtensible:function(t){return i(t),!o||o(t)}})},function(t,n,r){var e=r(0);e(e.S,"Reflect",{ownKeys:r(118)})},function(t,n,r){var e=r(0),i=r(1),o=Object.preventExtensions;e(e.S,"Reflect",{preventExtensions:function(t){i(t);try{return o&&o(t),!0}catch(t){return!1}}})},function(t,n,r){var e=r(7),i=r(16),o=r(17),u=r(14),c=r(0),a=r(32),f=r(1),s=r(4);c(c.S,"Reflect",{set:function t(n,r,c){var l,h,v=arguments.length<4?n:arguments[3],p=i.f(f(n),r);if(!p){if(s(h=o(n)))return t(h,r,c,v);p=a(0)}if(u(p,"value")){if(!1===p.writable||!s(v))return!1;if(l=i.f(v,r)){if(l.get||l.set||!1===l.writable)return!1;l.value=c,e.f(v,r,l)}else e.f(v,r,a(0,c));return!0}return void 0!==p.set&&(p.set.call(v,c),!0)}})},function(t,n,r){var e=r(0),i=r(70);i&&e(e.S,"Reflect",{setPrototypeOf:function(t,n){i.check(t,n);try{return i.set(t,n),!0}catch(t){return!1}}})},function(t,n,r){"use strict";var e=r(0),i=r(50)(!0);e(e.P,"Array",{includes:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)("includes")},function(t,n,r){"use strict";var e=r(0),i=r(119),o=r(9),u=r(8),c=r(10),a=r(84);e(e.P,"Array",{flatMap:function(t){var n,r,e=o(this);return c(t),n=u(e.length),r=a(e,0),i(r,e,e,n,0,1,t,arguments[1]),r}}),r(31)("flatMap")},function(t,n,r){"use strict";var e=r(0),i=r(119),o=r(9),u=r(8),c=r(24),a=r(84);e(e.P,"Array",{flatten:function(){var t=arguments[0],n=o(this),r=u(n.length),e=a(n,0);return i(e,n,n,r,0,void 0===t?1:c(t)),e}}),r(31)("flatten")},function(t,n,r){"use strict";var e=r(0),i=r(76)(!0);e(e.P,"String",{at:function(t){return i(this,t)}})},function(t,n,r){"use strict";var e=r(0),i=r(120),o=r(58);e(e.P+e.F*/Version\/10\.\d+(\.\d+)? Safari\//.test(o),"String",{padStart:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0,!0)}})},function(t,n,r){"use strict";var e=r(0),i=r(120),o=r(58);e(e.P+e.F*/Version\/10\.\d+(\.\d+)? Safari\//.test(o),"String",{padEnd:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0,!1)}})},function(t,n,r){"use strict";r(43)("trimLeft",function(t){return function(){return t(this,1)}},"trimStart")},function(t,n,r){"use strict";r(43)("trimRight",function(t){return function(){return t(this,2)}},"trimEnd")},function(t,n,r){"use strict";var e=r(0),i=r(23),o=r(8),u=r(53),c=r(55),a=RegExp.prototype,f=function(t,n){this._r=t,this._s=n};r(78)(f,"RegExp String",function(){var t=this._r.exec(this._s);return{value:t,done:null===t}}),e(e.P,"String",{matchAll:function(t){if(i(this),!u(t))throw TypeError(t+" is not a regexp!");var n=String(this),r="flags"in a?String(t.flags):c.call(t),e=new RegExp(t.source,~r.indexOf("g")?r:"g"+r);return e.lastIndex=o(t.lastIndex),new f(e,n)}})},function(t,n,r){r(66)("asyncIterator")},function(t,n,r){r(66)("observable")},function(t,n,r){var e=r(0),i=r(118),o=r(15),u=r(16),c=r(82);e(e.S,"Object",{getOwnPropertyDescriptors:function(t){for(var n,r,e=o(t),a=u.f,f=i(e),s={},l=0;f.length>l;)void 0!==(r=a(e,n=f[l++]))&&c(s,n,r);return s}})},function(t,n,r){var e=r(0),i=r(121)(!1);e(e.S,"Object",{values:function(t){return i(t)}})},function(t,n,r){var e=r(0),i=r(121)(!0);e(e.S,"Object",{entries:function(t){return i(t)}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(10),u=r(7);r(6)&&e(e.P+r(61),"Object",{__defineGetter__:function(t,n){u.f(i(this),t,{get:o(n),enumerable:!0,configurable:!0})}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(10),u=r(7);r(6)&&e(e.P+r(61),"Object",{__defineSetter__:function(t,n){u.f(i(this),t,{set:o(n),enumerable:!0,configurable:!0})}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22),u=r(17),c=r(16).f;r(6)&&e(e.P+r(61),"Object",{__lookupGetter__:function(t){var n,r=i(this),e=o(t,!0);do{if(n=c(r,e))return n.get}while(r=u(r))}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22),u=r(17),c=r(16).f;r(6)&&e(e.P+r(61),"Object",{__lookupSetter__:function(t){var n,r=i(this),e=o(t,!0);do{if(n=c(r,e))return n.set}while(r=u(r))}})},function(t,n,r){var e=r(0);e(e.P+e.R,"Map",{toJSON:r(122)("Map")})},function(t,n,r){var e=r(0);e(e.P+e.R,"Set",{toJSON:r(122)("Set")})},function(t,n,r){r(62)("Map")},function(t,n,r){r(62)("Set")},function(t,n,r){r(62)("WeakMap")},function(t,n,r){r(62)("WeakSet")},function(t,n,r){r(63)("Map")},function(t,n,r){r(63)("Set")},function(t,n,r){r(63)("WeakMap")},function(t,n,r){r(63)("WeakSet")},function(t,n,r){var e=r(0);e(e.G,{global:r(2)})},function(t,n,r){var e=r(0);e(e.S,"System",{global:r(2)})},function(t,n,r){var e=r(0),i=r(20);e(e.S,"Error",{isError:function(t){return"Error"===i(t)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{clamp:function(t,n,r){return Math.min(r,Math.max(n,t))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{DEG_PER_RAD:Math.PI/180})},function(t,n,r){var e=r(0),i=180/Math.PI;e(e.S,"Math",{degrees:function(t){return t*i}})},function(t,n,r){var e=r(0),i=r(124),o=r(104);e(e.S,"Math",{fscale:function(t,n,r,e,u){return o(i(t,n,r,e,u))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{iaddh:function(t,n,r,e){var i=t>>>0,o=r>>>0;return(n>>>0)+(e>>>0)+((i&o|(i|o)&~(i+o>>>0))>>>31)|0}})},function(t,n,r){var e=r(0);e(e.S,"Math",{isubh:function(t,n,r,e){var i=t>>>0,o=r>>>0;return(n>>>0)-(e>>>0)-((~i&o|~(i^o)&i-o>>>0)>>>31)|0}})},function(t,n,r){var e=r(0);e(e.S,"Math",{imulh:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e,u=r>>16,c=e>>16,a=(u*o>>>0)+(i*o>>>16);return u*c+(a>>16)+((i*c>>>0)+(65535&a)>>16)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{RAD_PER_DEG:180/Math.PI})},function(t,n,r){var e=r(0),i=Math.PI/180;e(e.S,"Math",{radians:function(t){return t*i}})},function(t,n,r){var e=r(0);e(e.S,"Math",{scale:r(124)})},function(t,n,r){var e=r(0);e(e.S,"Math",{umulh:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e,u=r>>>16,c=e>>>16,a=(u*o>>>0)+(i*o>>>16);return u*c+(a>>>16)+((i*c>>>0)+(65535&a)>>>16)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{signbit:function(t){return(t=+t)!=t?t:0==t?1/t==1/0:t>0}})},function(t,n,r){"use strict";var e=r(0),i=r(18),o=r(2),u=r(57),c=r(111);e(e.P+e.R,"Promise",{finally:function(t){var n=u(this,i.Promise||o.Promise),r="function"==typeof t;return this.then(r?function(r){return c(n,t()).then(function(){return r})}:t,r?function(r){return c(n,t()).then(function(){throw r})}:t)}})},function(t,n,r){"use strict";var e=r(0),i=r(89),o=r(110);e(e.S,"Promise",{try:function(t){var n=i.f(this),r=o(t);return(r.e?n.reject:n.resolve)(r.v),n.promise}})},function(t,n,r){var e=r(28),i=r(1),o=e.key,u=e.set;e.exp({defineMetadata:function(t,n,r,e){u(t,n,i(r),o(e))}})},function(t,n,r){var e=r(28),i=r(1),o=e.key,u=e.map,c=e.store;e.exp({deleteMetadata:function(t,n){var r=arguments.length<3?void 0:o(arguments[2]),e=u(i(n),r,!1);if(void 0===e||!e.delete(t))return!1;if(e.size)return!0;var a=c.get(n);return a.delete(r),!!a.size||c.delete(n)}})},function(t,n,r){var e=r(28),i=r(1),o=r(17),u=e.has,c=e.get,a=e.key,f=function(t,n,r){if(u(t,n,r))return c(t,n,r);var e=o(n);return null!==e?f(t,e,r):void 0};e.exp({getMetadata:function(t,n){return f(t,i(n),arguments.length<3?void 0:a(arguments[2]))}})},function(t,n,r){var e=r(114),i=r(123),o=r(28),u=r(1),c=r(17),a=o.keys,f=o.key,s=function(t,n){var r=a(t,n),o=c(t);if(null===o)return r;var u=s(o,n);return u.length?r.length?i(new e(r.concat(u))):u:r};o.exp({getMetadataKeys:function(t){return s(u(t),arguments.length<2?void 0:f(arguments[1]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.get,u=e.key;e.exp({getOwnMetadata:function(t,n){return o(t,i(n),arguments.length<3?void 0:u(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.keys,u=e.key;e.exp({getOwnMetadataKeys:function(t){return o(i(t),arguments.length<2?void 0:u(arguments[1]))}})},function(t,n,r){var e=r(28),i=r(1),o=r(17),u=e.has,c=e.key,a=function(t,n,r){if(u(t,n,r))return!0;var e=o(n);return null!==e&&a(t,e,r)};e.exp({hasMetadata:function(t,n){return a(t,i(n),arguments.length<3?void 0:c(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.has,u=e.key;e.exp({hasOwnMetadata:function(t,n){return o(t,i(n),arguments.length<3?void 0:u(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=r(10),u=e.key,c=e.set;e.exp({metadata:function(t,n){return function(r,e){c(t,n,(void 0!==e?i:o)(r),u(e))}}})},function(t,n,r){var e=r(0),i=r(88)(),o=r(2).process,u="process"==r(20)(o);e(e.G,{asap:function(t){var n=u&&o.domain;i(n?n.bind(t):t)}})},function(t,n,r){"use strict";var e=r(0),i=r(2),o=r(18),u=r(88)(),c=r(5)("observable"),a=r(10),f=r(1),s=r(39),l=r(41),h=r(11),v=r(40),p=v.RETURN,d=function(t){return null==t?void 0:a(t)},g=function(t){var n=t._c;n&&(t._c=void 0,n())},y=function(t){return void 0===t._o},m=function(t){y(t)||(t._o=void 0,g(t))},b=function(t,n){f(t),this._c=void 0,this._o=t,t=new w(this);try{var r=n(t),e=r;null!=r&&("function"==typeof r.unsubscribe?r=function(){e.unsubscribe()}:a(r),this._c=r)}catch(n){return void t.error(n)}y(this)&&g(this)};b.prototype=l({},{unsubscribe:function(){m(this)}});var w=function(t){this._s=t};w.prototype=l({},{next:function(t){var n=this._s;if(!y(n)){var r=n._o;try{var e=d(r.next);if(e)return e.call(r,t)}catch(t){try{m(n)}finally{throw t}}}},error:function(t){var n=this._s;if(y(n))throw t;var r=n._o;n._o=void 0;try{var e=d(r.error);if(!e)throw t;t=e.call(r,t)}catch(t){try{g(n)}finally{throw t}}return g(n),t},complete:function(t){var n=this._s;if(!y(n)){var r=n._o;n._o=void 0;try{var e=d(r.complete);t=e?e.call(r,t):void 0}catch(t){try{g(n)}finally{throw t}}return g(n),t}}});var S=function(t){s(this,S,"Observable","_f")._f=a(t)};l(S.prototype,{subscribe:function(t){return new b(t,this._f)},forEach:function(t){var n=this;return new(o.Promise||i.Promise)(function(r,e){a(t);var i=n.subscribe({next:function(n){try{return t(n)}catch(t){e(t),i.unsubscribe()}},error:e,complete:r})})}}),l(S,{from:function(t){var n="function"==typeof this?this:S,r=d(f(t)[c]);if(r){var e=f(r.call(t));return e.constructor===n?e:new n(function(t){return e.subscribe(t)})}return new n(function(n){var r=!1;return u(function(){if(!r){try{if(v(t,!1,function(t){if(n.next(t),r)return p})===p)return}catch(t){if(r)throw t;return void n.error(t)}n.complete()}}),function(){r=!0}})},of:function(){for(var t=0,n=arguments.length,r=new Array(n);t<n;)r[t]=arguments[t++];return new("function"==typeof this?this:S)(function(t){var n=!1;return u(function(){if(!n){for(var e=0;e<r.length;++e)if(t.next(r[e]),n)return;t.complete()}}),function(){n=!0}})}}),h(S.prototype,c,function(){return this}),e(e.G,{Observable:S}),r(38)("Observable")},function(t,n,r){var e=r(2),i=r(0),o=r(58),u=[].slice,c=/MSIE .\./.test(o),a=function(t){return function(n,r){var e=arguments.length>2,i=!!e&&u.call(arguments,2);return t(e?function(){("function"==typeof n?n:Function(n)).apply(this,i)}:n,r)}};i(i.G+i.B+i.F*c,{setTimeout:a(e.setTimeout),setInterval:a(e.setInterval)})},function(t,n,r){var e=r(0),i=r(87);e(e.G+e.B,{setImmediate:i.set,clearImmediate:i.clear})},function(t,n,r){for(var e=r(86),i=r(34),o=r(12),u=r(2),c=r(11),a=r(44),f=r(5),s=f("iterator"),l=f("toStringTag"),h=a.Array,v={CSSRuleList:!0,CSSStyleDeclaration:!1,CSSValueList:!1,ClientRectList:!1,DOMRectList:!1,DOMStringList:!1,DOMTokenList:!0,DataTransferItemList:!1,FileList:!1,HTMLAllCollection:!1,HTMLCollection:!1,HTMLFormElement:!1,HTMLSelectElement:!1,MediaList:!0,MimeTypeArray:!1,NamedNodeMap:!1,NodeList:!0,PaintRequestList:!1,Plugin:!1,PluginArray:!1,SVGLengthList:!1,SVGNumberList:!1,SVGPathSegList:!1,SVGPointList:!1,SVGStringList:!1,SVGTransformList:!1,SourceBufferList:!1,StyleSheetList:!0,TextTrackCueList:!1,TextTrackList:!1,TouchList:!1},p=i(v),d=0;d<p.length;d++){var g,y=p[d],m=v[y],b=u[y],w=b&&b.prototype;if(w&&(w[s]||c(w,s,h),w[l]||c(w,l,y),a[y]=h,m))for(g in e)w[g]||o(w,g,e[g],!0)}},function(t,n,r){(function(n){!function(n){"use strict";var r,e=Object.prototype,i=e.hasOwnProperty,o="function"==typeof Symbol?Symbol:{},u=o.iterator||"@@iterator",c=o.asyncIterator||"@@asyncIterator",a=o.toStringTag||"@@toStringTag",f="object"==typeof t,s=n.regeneratorRuntime;if(s)f&&(t.exports=s);else{(s=n.regeneratorRuntime=f?t.exports:{}).wrap=w;var l="suspendedStart",h="suspendedYield",v="executing",p="completed",d={},g={};g[u]=function(){return this};var y=Object.getPrototypeOf,m=y&&y(y(I([])));m&&m!==e&&i.call(m,u)&&(g=m);var b=E.prototype=x.prototype=Object.create(g);_.prototype=b.constructor=E,E.constructor=_,E[a]=_.displayName="GeneratorFunction",s.isGeneratorFunction=function(t){var n="function"==typeof t&&t.constructor;return!!n&&(n===_||"GeneratorFunction"===(n.displayName||n.name))},s.mark=function(t){return Object.setPrototypeOf?Object.setPrototypeOf(t,E):(t.__proto__=E,a in t||(t[a]="GeneratorFunction")),t.prototype=Object.create(b),t},s.awrap=function(t){return{__await:t}},O(M.prototype),M.prototype[c]=function(){return this},s.AsyncIterator=M,s.async=function(t,n,r,e){var i=new M(w(t,n,r,e));return s.isGeneratorFunction(n)?i:i.next().then(function(t){return t.done?t.value:i.next()})},O(b),b[a]="Generator",b[u]=function(){return this},b.toString=function(){return"[object Generator]"},s.keys=function(t){var n=[];for(var r in t)n.push(r);return n.reverse(),function r(){for(;n.length;){var e=n.pop();if(e in t)return r.value=e,r.done=!1,r}return r.done=!0,r}},s.values=I,j.prototype={constructor:j,reset:function(t){if(this.prev=0,this.next=0,this.sent=this._sent=r,this.done=!1,this.delegate=null,this.method="next",this.arg=r,this.tryEntries.forEach(A),!t)for(var n in this)"t"===n.charAt(0)&&i.call(this,n)&&!isNaN(+n.slice(1))&&(this[n]=r)},stop:function(){this.done=!0;var t=this.tryEntries[0].completion;if("throw"===t.type)throw t.arg;return this.rval},dispatchException:function(t){if(this.done)throw t;var n=this;function e(e,i){return c.type="throw",c.arg=t,n.next=e,i&&(n.method="next",n.arg=r),!!i}for(var o=this.tryEntries.length-1;o>=0;--o){var u=this.tryEntries[o],c=u.completion;if("root"===u.tryLoc)return e("end");if(u.tryLoc<=this.prev){var a=i.call(u,"catchLoc"),f=i.call(u,"finallyLoc");if(a&&f){if(this.prev<u.catchLoc)return e(u.catchLoc,!0);if(this.prev<u.finallyLoc)return e(u.finallyLoc)}else if(a){if(this.prev<u.catchLoc)return e(u.catchLoc,!0)}else{if(!f)throw new Error("try statement without catch or finally");if(this.prev<u.finallyLoc)return e(u.finallyLoc)}}}},abrupt:function(t,n){for(var r=this.tryEntries.length-1;r>=0;--r){var e=this.tryEntries[r];if(e.tryLoc<=this.prev&&i.call(e,"finallyLoc")&&this.prev<e.finallyLoc){var o=e;break}}o&&("break"===t||"continue"===t)&&o.tryLoc<=n&&n<=o.finallyLoc&&(o=null);var u=o?o.completion:{};return u.type=t,u.arg=n,o?(this.method="next",this.next=o.finallyLoc,d):this.complete(u)},complete:function(t,n){if("throw"===t.type)throw t.arg;return"break"===t.type||"continue"===t.type?this.next=t.arg:"return"===t.type?(this.rval=this.arg=t.arg,this.method="return",this.next="end"):"normal"===t.type&&n&&(this.next=n),d},finish:function(t){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.finallyLoc===t)return this.complete(r.completion,r.afterLoc),A(r),d}},catch:function(t){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.tryLoc===t){var e=r.completion;if("throw"===e.type){var i=e.arg;A(r)}return i}}throw new Error("illegal catch attempt")},delegateYield:function(t,n,e){return this.delegate={iterator:I(t),resultName:n,nextLoc:e},"next"===this.method&&(this.arg=r),d}}}function w(t,n,r,e){var i=n&&n.prototype instanceof x?n:x,o=Object.create(i.prototype),u=new j(e||[]);return o._invoke=function(t,n,r){var e=l;return function(i,o){if(e===v)throw new Error("Generator is already running");if(e===p){if("throw"===i)throw o;return N()}for(r.method=i,r.arg=o;;){var u=r.delegate;if(u){var c=P(u,r);if(c){if(c===d)continue;return c}}if("next"===r.method)r.sent=r._sent=r.arg;else if("throw"===r.method){if(e===l)throw e=p,r.arg;r.dispatchException(r.arg)}else"return"===r.method&&r.abrupt("return",r.arg);e=v;var a=S(t,n,r);if("normal"===a.type){if(e=r.done?p:h,a.arg===d)continue;return{value:a.arg,done:r.done}}"throw"===a.type&&(e=p,r.method="throw",r.arg=a.arg)}}}(t,r,u),o}function S(t,n,r){try{return{type:"normal",arg:t.call(n,r)}}catch(t){return{type:"throw",arg:t}}}function x(){}function _(){}function E(){}function O(t){["next","throw","return"].forEach(function(n){t[n]=function(t){return this._invoke(n,t)}})}function M(t){function r(n,e,o,u){var c=S(t[n],t,e);if("throw"!==c.type){var a=c.arg,f=a.value;return f&&"object"==typeof f&&i.call(f,"__await")?Promise.resolve(f.__await).then(function(t){r("next",t,o,u)},function(t){r("throw",t,o,u)}):Promise.resolve(f).then(function(t){a.value=t,o(a)},u)}u(c.arg)}var e;"object"==typeof n.process&&n.process.domain&&(r=n.process.domain.bind(r)),this._invoke=function(t,n){function i(){return new Promise(function(e,i){r(t,n,e,i)})}return e=e?e.then(i,i):i()}}function P(t,n){var e=t.iterator[n.method];if(e===r){if(n.delegate=null,"throw"===n.method){if(t.iterator.return&&(n.method="return",n.arg=r,P(t,n),"throw"===n.method))return d;n.method="throw",n.arg=new TypeError("The iterator does not provide a 'throw' method")}return d}var i=S(e,t.iterator,n.arg);if("throw"===i.type)return n.method="throw",n.arg=i.arg,n.delegate=null,d;var o=i.arg;return o?o.done?(n[t.resultName]=o.value,n.next=t.nextLoc,"return"!==n.method&&(n.method="next",n.arg=r),n.delegate=null,d):o:(n.method="throw",n.arg=new TypeError("iterator result is not an object"),n.delegate=null,d)}function F(t){var n={tryLoc:t[0]};1 in t&&(n.catchLoc=t[1]),2 in t&&(n.finallyLoc=t[2],n.afterLoc=t[3]),this.tryEntries.push(n)}function A(t){var n=t.completion||{};n.type="normal",delete n.arg,t.completion=n}function j(t){this.tryEntries=[{tryLoc:"root"}],t.forEach(F,this),this.reset(!0)}function I(t){if(t){var n=t[u];if(n)return n.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var e=-1,o=function n(){for(;++e<t.length;)if(i.call(t,e))return n.value=t[e],n.done=!1,n;return n.value=r,n.done=!0,n};return o.next=o}}return{next:N}}function N(){return{value:r,done:!0}}}("object"==typeof n?n:"object"==typeof window?window:"object"==typeof self?self:this)}).call(this,r(64))},function(t,n,r){r(329),t.exports=r(18).RegExp.escape},function(t,n,r){var e=r(0),i=r(330)(/[\\^$*+?.()|[\]{}]/g,"\\$&");e(e.S,"RegExp",{escape:function(t){return i(t)}})},function(t,n){t.exports=function(t,n){var r=n===Object(n)?function(t){return n[t]}:n;return function(n){return String(n).replace(t,r)}}},function(t,n,r){"use strict";r.r(n);var e=r(126),i=r.n(e);r(125);const o=r(332);function u(t,n){var r=document.createElement("iframe");return a[t]=r,r.setAttribute("style","display: none;"),r.setAttribute("src",n),r}function c(t,n){window.parent.Singular=a,document.body.appendChild(t),document.body.appendChild(n)}var a={properties:{singularLocation:"",onIdentityChange:function(){},onLogout:function(){},clientId:"",checkInterval:1e3,uaaLocation:"",storageKey:"singularUserIdentityClaims",authTimeout:2e4,appendFramesOnInit:!1},rpFrameLoaded:!1,singularLocation:function(){if(""!==a.properties.singularLocation)return a.properties.singularLocation;if(document.getElementById("singular_script").src){var t=document.getElementById("singular_script").src;return t.substring(0,t.lastIndexOf("/"))}throw"singularLocation must not be blank"},init:function(t){if(t)for(var n in t)a.properties[n]=t[n];this.validateProperties(t);var r=u("opFrame",a.properties.uaaLocation+"/session_management?clientId="+a.properties.clientId+"&messageOrigin="+encodeURIComponent(window.location.origin)),e=u("rpFrame",this.singularLocation()+"/"+i.a);e.onload=function(){a.rpFrameLoaded=!0},!0===a.properties.appendFramesOnInit?c(r,e):document.addEventListener("DOMContentLoaded",function(){c(r,e)})},validateProperties:function(t){var n=["uaaLocation","clientId"];for(var r in n){var e=n[r];if(!t[e])throw'The "'+e+'" field must be set and not empty'}this.getUaaValidator().isValidUAA(t.uaaLocation)},decodeJwt:function(t){var n=t.split(".")[1].replace("-","+").replace("_","/");return JSON.parse(window.atob(n))},access:function(t){var n=a.rpFrame;return new Promise(function(r,e){var i=function(){n.contentWindow.fetchAccessToken(t,function(t,n){n||null==t?e(n):r(t)})};a.rpFrameLoaded?i():a.rpFrame.addEventListener("load",i)})},getUaaValidator:function(){return this.validator||o},setUaaValidator:function(t){this.validator=t}};n.default=a},function(t,n,r){var e=r(125);function i(t,n){var r=new XMLHttpRequest;r.open("GET",t),r.setRequestHeader("Accept","application/json"),r.send(),r.onreadystatechange=function(){r.readyState==XMLHttpRequest.DONE&&n(r)}}t.exports={isValidUAA:function(t){i(t+"/info",function(n){if(200!==n.status)throw t+" does not appear to be a running UAA instance or may not have a trusted SSL certificate";var r=JSON.parse(n.response).app.version;if(!e.isGreaterThanOrEqualTo(r,"4.10.0"))throw"The UAA instance running at "+t+" has version "+r+". This uaa-singular library requires UAA version 4.10.0 or higher."})},checkClientConfiguration:function(t,n){i(t,function(r){if(400===r.status){var e=window.location.href.substring(0,window.location.href.lastIndexOf("/"))+"/**";throw"Error while calling /oauth/authorize. Is the UAA client "+n+" configured to allow redirects to "+e+" ? Visit "+t+" in the browser to see the UAA's error messages."}})}}}]).default; \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/singular/singular.umd.js b/deps/rabbitmq_management/priv/www/js/singular/singular.umd.js
new file mode 100644
index 0000000000..01fc1536d9
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/singular/singular.umd.js
@@ -0,0 +1 @@
+!function(t,n){"object"==typeof exports&&"object"==typeof module?module.exports=n():"function"==typeof define&&define.amd?define([],n):"object"==typeof exports?exports.Singular=n():t.Singular=n()}(window,function(){return function(t){var n={};function r(e){if(n[e])return n[e].exports;var i=n[e]={i:e,l:!1,exports:{}};return t[e].call(i.exports,i,i.exports,r),i.l=!0,i.exports}return r.m=t,r.c=n,r.d=function(t,n,e){r.o(t,n)||Object.defineProperty(t,n,{enumerable:!0,get:e})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(t,n){if(1&n&&(t=r(t)),8&n)return t;if(4&n&&"object"==typeof t&&t&&t.__esModule)return t;var e=Object.create(null);if(r.r(e),Object.defineProperty(e,"default",{enumerable:!0,value:t}),2&n&&"string"!=typeof t)for(var i in t)r.d(e,i,function(n){return t[n]}.bind(null,i));return e},r.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(n,"a",n),n},r.o=function(t,n){return Object.prototype.hasOwnProperty.call(t,n)},r.p="",r(r.s=127)}([function(t,n,r){var e=r(2),i=r(18),o=r(11),u=r(12),c=r(19),a=function(t,n,r){var f,s,l,h,v=t&a.F,p=t&a.G,d=t&a.S,g=t&a.P,y=t&a.B,m=p?e:d?e[n]||(e[n]={}):(e[n]||{}).prototype,b=p?i:i[n]||(i[n]={}),w=b.prototype||(b.prototype={});for(f in p&&(r=n),r)l=((s=!v&&m&&void 0!==m[f])?m:r)[f],h=y&&s?c(l,e):g&&"function"==typeof l?c(Function.call,l):l,m&&u(m,f,l,t&a.U),b[f]!=l&&o(b,f,h),g&&w[f]!=l&&(w[f]=l)};e.core=i,a.F=1,a.G=2,a.S=4,a.P=8,a.B=16,a.W=32,a.U=64,a.R=128,t.exports=a},function(t,n,r){var e=r(4);t.exports=function(t){if(!e(t))throw TypeError(t+" is not an object!");return t}},function(t,n){var r=t.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=r)},function(t,n){t.exports=function(t){try{return!!t()}catch(t){return!0}}},function(t,n){t.exports=function(t){return"object"==typeof t?null!==t:"function"==typeof t}},function(t,n,r){var e=r(49)("wks"),i=r(33),o=r(2).Symbol,u="function"==typeof o;(t.exports=function(t){return e[t]||(e[t]=u&&o[t]||(u?o:i)("Symbol."+t))}).store=e},function(t,n,r){t.exports=!r(3)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(t,n,r){var e=r(1),i=r(91),o=r(22),u=Object.defineProperty;n.f=r(6)?Object.defineProperty:function(t,n,r){if(e(t),n=o(n,!0),e(r),i)try{return u(t,n,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported!");return"value"in r&&(t[n]=r.value),t}},function(t,n,r){var e=r(24),i=Math.min;t.exports=function(t){return t>0?i(e(t),9007199254740991):0}},function(t,n,r){var e=r(23);t.exports=function(t){return Object(e(t))}},function(t,n){t.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},function(t,n,r){var e=r(7),i=r(32);t.exports=r(6)?function(t,n,r){return e.f(t,n,i(1,r))}:function(t,n,r){return t[n]=r,t}},function(t,n,r){var e=r(2),i=r(11),o=r(14),u=r(33)("src"),c=Function.toString,a=(""+c).split("toString");r(18).inspectSource=function(t){return c.call(t)},(t.exports=function(t,n,r,c){var f="function"==typeof r;f&&(o(r,"name")||i(r,"name",n)),t[n]!==r&&(f&&(o(r,u)||i(r,u,t[n]?""+t[n]:a.join(String(n)))),t===e?t[n]=r:c?t[n]?t[n]=r:i(t,n,r):(delete t[n],i(t,n,r)))})(Function.prototype,"toString",function(){return"function"==typeof this&&this[u]||c.call(this)})},function(t,n,r){var e=r(0),i=r(3),o=r(23),u=/"/g,c=function(t,n,r,e){var i=String(o(t)),c="<"+n;return""!==r&&(c+=" "+r+'="'+String(e).replace(u,"&quot;")+'"'),c+">"+i+"</"+n+">"};t.exports=function(t,n){var r={};r[t]=n(c),e(e.P+e.F*i(function(){var n=""[t]('"');return n!==n.toLowerCase()||n.split('"').length>3}),"String",r)}},function(t,n){var r={}.hasOwnProperty;t.exports=function(t,n){return r.call(t,n)}},function(t,n,r){var e=r(46),i=r(23);t.exports=function(t){return e(i(t))}},function(t,n,r){var e=r(47),i=r(32),o=r(15),u=r(22),c=r(14),a=r(91),f=Object.getOwnPropertyDescriptor;n.f=r(6)?f:function(t,n){if(t=o(t),n=u(n,!0),a)try{return f(t,n)}catch(t){}if(c(t,n))return i(!e.f.call(t,n),t[n])}},function(t,n,r){var e=r(14),i=r(9),o=r(67)("IE_PROTO"),u=Object.prototype;t.exports=Object.getPrototypeOf||function(t){return t=i(t),e(t,o)?t[o]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?u:null}},function(t,n){var r=t.exports={version:"2.5.6"};"number"==typeof __e&&(__e=r)},function(t,n,r){var e=r(10);t.exports=function(t,n,r){if(e(t),void 0===n)return t;switch(r){case 1:return function(r){return t.call(n,r)};case 2:return function(r,e){return t.call(n,r,e)};case 3:return function(r,e,i){return t.call(n,r,e,i)}}return function(){return t.apply(n,arguments)}}},function(t,n){var r={}.toString;t.exports=function(t){return r.call(t).slice(8,-1)}},function(t,n,r){"use strict";var e=r(3);t.exports=function(t,n){return!!t&&e(function(){n?t.call(null,function(){},1):t.call(null)})}},function(t,n,r){var e=r(4);t.exports=function(t,n){if(!e(t))return t;var r,i;if(n&&"function"==typeof(r=t.toString)&&!e(i=r.call(t)))return i;if("function"==typeof(r=t.valueOf)&&!e(i=r.call(t)))return i;if(!n&&"function"==typeof(r=t.toString)&&!e(i=r.call(t)))return i;throw TypeError("Can't convert object to primitive value")}},function(t,n){t.exports=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t}},function(t,n){var r=Math.ceil,e=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?e:r)(t)}},function(t,n,r){var e=r(0),i=r(18),o=r(3);t.exports=function(t,n){var r=(i.Object||{})[t]||Object[t],u={};u[t]=n(r),e(e.S+e.F*o(function(){r(1)}),"Object",u)}},function(t,n,r){var e=r(19),i=r(46),o=r(9),u=r(8),c=r(84);t.exports=function(t,n){var r=1==t,a=2==t,f=3==t,s=4==t,l=6==t,h=5==t||l,v=n||c;return function(n,c,p){for(var d,g,y=o(n),m=i(y),b=e(c,p,3),w=u(m.length),S=0,x=r?v(n,w):a?v(n,0):void 0;w>S;S++)if((h||S in m)&&(g=b(d=m[S],S,y),t))if(r)x[S]=g;else if(g)switch(t){case 3:return!0;case 5:return d;case 6:return S;case 2:x.push(d)}else if(s)return!1;return l?-1:f||s?s:x}}},function(t,n,r){"use strict";if(r(6)){var e=r(30),i=r(2),o=r(3),u=r(0),c=r(60),a=r(90),f=r(19),s=r(39),l=r(32),h=r(11),v=r(41),p=r(24),d=r(8),g=r(117),y=r(35),m=r(22),b=r(14),w=r(48),S=r(4),x=r(9),_=r(81),E=r(36),O=r(17),M=r(37).f,P=r(83),F=r(33),A=r(5),j=r(26),I=r(50),N=r(57),L=r(86),T=r(44),k=r(54),R=r(38),C=r(85),D=r(107),W=r(7),U=r(16),G=W.f,V=U.f,B=i.RangeError,z=i.TypeError,q=i.Uint8Array,J=Array.prototype,Y=a.ArrayBuffer,K=a.DataView,H=j(0),X=j(2),$=j(3),Z=j(4),Q=j(5),tt=j(6),nt=I(!0),rt=I(!1),et=L.values,it=L.keys,ot=L.entries,ut=J.lastIndexOf,ct=J.reduce,at=J.reduceRight,ft=J.join,st=J.sort,lt=J.slice,ht=J.toString,vt=J.toLocaleString,pt=A("iterator"),dt=A("toStringTag"),gt=F("typed_constructor"),yt=F("def_constructor"),mt=c.CONSTR,bt=c.TYPED,wt=c.VIEW,St=j(1,function(t,n){return Mt(N(t,t[yt]),n)}),xt=o(function(){return 1===new q(new Uint16Array([1]).buffer)[0]}),_t=!!q&&!!q.prototype.set&&o(function(){new q(1).set({})}),Et=function(t,n){var r=p(t);if(r<0||r%n)throw B("Wrong offset!");return r},Ot=function(t){if(S(t)&&bt in t)return t;throw z(t+" is not a typed array!")},Mt=function(t,n){if(!(S(t)&&gt in t))throw z("It is not a typed array constructor!");return new t(n)},Pt=function(t,n){return Ft(N(t,t[yt]),n)},Ft=function(t,n){for(var r=0,e=n.length,i=Mt(t,e);e>r;)i[r]=n[r++];return i},At=function(t,n,r){G(t,n,{get:function(){return this._d[r]}})},jt=function(t){var n,r,e,i,o,u,c=x(t),a=arguments.length,s=a>1?arguments[1]:void 0,l=void 0!==s,h=P(c);if(null!=h&&!_(h)){for(u=h.call(c),e=[],n=0;!(o=u.next()).done;n++)e.push(o.value);c=e}for(l&&a>2&&(s=f(s,arguments[2],2)),n=0,r=d(c.length),i=Mt(this,r);r>n;n++)i[n]=l?s(c[n],n):c[n];return i},It=function(){for(var t=0,n=arguments.length,r=Mt(this,n);n>t;)r[t]=arguments[t++];return r},Nt=!!q&&o(function(){vt.call(new q(1))}),Lt=function(){return vt.apply(Nt?lt.call(Ot(this)):Ot(this),arguments)},Tt={copyWithin:function(t,n){return D.call(Ot(this),t,n,arguments.length>2?arguments[2]:void 0)},every:function(t){return Z(Ot(this),t,arguments.length>1?arguments[1]:void 0)},fill:function(t){return C.apply(Ot(this),arguments)},filter:function(t){return Pt(this,X(Ot(this),t,arguments.length>1?arguments[1]:void 0))},find:function(t){return Q(Ot(this),t,arguments.length>1?arguments[1]:void 0)},findIndex:function(t){return tt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},forEach:function(t){H(Ot(this),t,arguments.length>1?arguments[1]:void 0)},indexOf:function(t){return rt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},includes:function(t){return nt(Ot(this),t,arguments.length>1?arguments[1]:void 0)},join:function(t){return ft.apply(Ot(this),arguments)},lastIndexOf:function(t){return ut.apply(Ot(this),arguments)},map:function(t){return St(Ot(this),t,arguments.length>1?arguments[1]:void 0)},reduce:function(t){return ct.apply(Ot(this),arguments)},reduceRight:function(t){return at.apply(Ot(this),arguments)},reverse:function(){for(var t,n=Ot(this).length,r=Math.floor(n/2),e=0;e<r;)t=this[e],this[e++]=this[--n],this[n]=t;return this},some:function(t){return $(Ot(this),t,arguments.length>1?arguments[1]:void 0)},sort:function(t){return st.call(Ot(this),t)},subarray:function(t,n){var r=Ot(this),e=r.length,i=y(t,e);return new(N(r,r[yt]))(r.buffer,r.byteOffset+i*r.BYTES_PER_ELEMENT,d((void 0===n?e:y(n,e))-i))}},kt=function(t,n){return Pt(this,lt.call(Ot(this),t,n))},Rt=function(t){Ot(this);var n=Et(arguments[1],1),r=this.length,e=x(t),i=d(e.length),o=0;if(i+n>r)throw B("Wrong length!");for(;o<i;)this[n+o]=e[o++]},Ct={entries:function(){return ot.call(Ot(this))},keys:function(){return it.call(Ot(this))},values:function(){return et.call(Ot(this))}},Dt=function(t,n){return S(t)&&t[bt]&&"symbol"!=typeof n&&n in t&&String(+n)==String(n)},Wt=function(t,n){return Dt(t,n=m(n,!0))?l(2,t[n]):V(t,n)},Ut=function(t,n,r){return!(Dt(t,n=m(n,!0))&&S(r)&&b(r,"value"))||b(r,"get")||b(r,"set")||r.configurable||b(r,"writable")&&!r.writable||b(r,"enumerable")&&!r.enumerable?G(t,n,r):(t[n]=r.value,t)};mt||(U.f=Wt,W.f=Ut),u(u.S+u.F*!mt,"Object",{getOwnPropertyDescriptor:Wt,defineProperty:Ut}),o(function(){ht.call({})})&&(ht=vt=function(){return ft.call(this)});var Gt=v({},Tt);v(Gt,Ct),h(Gt,pt,Ct.values),v(Gt,{slice:kt,set:Rt,constructor:function(){},toString:ht,toLocaleString:Lt}),At(Gt,"buffer","b"),At(Gt,"byteOffset","o"),At(Gt,"byteLength","l"),At(Gt,"length","e"),G(Gt,dt,{get:function(){return this[bt]}}),t.exports=function(t,n,r,a){var f=t+((a=!!a)?"Clamped":"")+"Array",l="get"+t,v="set"+t,p=i[f],y=p||{},m=p&&O(p),b=!p||!c.ABV,x={},_=p&&p.prototype,P=function(t,r){G(t,r,{get:function(){return function(t,r){var e=t._d;return e.v[l](r*n+e.o,xt)}(this,r)},set:function(t){return function(t,r,e){var i=t._d;a&&(e=(e=Math.round(e))<0?0:e>255?255:255&e),i.v[v](r*n+i.o,e,xt)}(this,r,t)},enumerable:!0})};b?(p=r(function(t,r,e,i){s(t,p,f,"_d");var o,u,c,a,l=0,v=0;if(S(r)){if(!(r instanceof Y||"ArrayBuffer"==(a=w(r))||"SharedArrayBuffer"==a))return bt in r?Ft(p,r):jt.call(p,r);o=r,v=Et(e,n);var y=r.byteLength;if(void 0===i){if(y%n)throw B("Wrong length!");if((u=y-v)<0)throw B("Wrong length!")}else if((u=d(i)*n)+v>y)throw B("Wrong length!");c=u/n}else c=g(r),o=new Y(u=c*n);for(h(t,"_d",{b:o,o:v,l:u,e:c,v:new K(o)});l<c;)P(t,l++)}),_=p.prototype=E(Gt),h(_,"constructor",p)):o(function(){p(1)})&&o(function(){new p(-1)})&&k(function(t){new p,new p(null),new p(1.5),new p(t)},!0)||(p=r(function(t,r,e,i){var o;return s(t,p,f),S(r)?r instanceof Y||"ArrayBuffer"==(o=w(r))||"SharedArrayBuffer"==o?void 0!==i?new y(r,Et(e,n),i):void 0!==e?new y(r,Et(e,n)):new y(r):bt in r?Ft(p,r):jt.call(p,r):new y(g(r))}),H(m!==Function.prototype?M(y).concat(M(m)):M(y),function(t){t in p||h(p,t,y[t])}),p.prototype=_,e||(_.constructor=p));var F=_[pt],A=!!F&&("values"==F.name||null==F.name),j=Ct.values;h(p,gt,!0),h(_,bt,f),h(_,wt,!0),h(_,yt,p),(a?new p(1)[dt]==f:dt in _)||G(_,dt,{get:function(){return f}}),x[f]=p,u(u.G+u.W+u.F*(p!=y),x),u(u.S,f,{BYTES_PER_ELEMENT:n}),u(u.S+u.F*o(function(){y.of.call(p,1)}),f,{from:jt,of:It}),"BYTES_PER_ELEMENT"in _||h(_,"BYTES_PER_ELEMENT",n),u(u.P,f,Tt),R(f),u(u.P+u.F*_t,f,{set:Rt}),u(u.P+u.F*!A,f,Ct),e||_.toString==ht||(_.toString=ht),u(u.P+u.F*o(function(){new p(1).slice()}),f,{slice:kt}),u(u.P+u.F*(o(function(){return[1,2].toLocaleString()!=new p([1,2]).toLocaleString()})||!o(function(){_.toLocaleString.call([1,2])})),f,{toLocaleString:Lt}),T[f]=A?F:j,e||A||h(_,pt,j)}}else t.exports=function(){}},function(t,n,r){var e=r(112),i=r(0),o=r(49)("metadata"),u=o.store||(o.store=new(r(115))),c=function(t,n,r){var i=u.get(t);if(!i){if(!r)return;u.set(t,i=new e)}var o=i.get(n);if(!o){if(!r)return;i.set(n,o=new e)}return o};t.exports={store:u,map:c,has:function(t,n,r){var e=c(n,r,!1);return void 0!==e&&e.has(t)},get:function(t,n,r){var e=c(n,r,!1);return void 0===e?void 0:e.get(t)},set:function(t,n,r,e){c(r,e,!0).set(t,n)},keys:function(t,n){var r=c(t,n,!1),e=[];return r&&r.forEach(function(t,n){e.push(n)}),e},key:function(t){return void 0===t||"symbol"==typeof t?t:String(t)},exp:function(t){i(i.S,"Reflect",t)}}},function(t,n,r){var e=r(33)("meta"),i=r(4),o=r(14),u=r(7).f,c=0,a=Object.isExtensible||function(){return!0},f=!r(3)(function(){return a(Object.preventExtensions({}))}),s=function(t){u(t,e,{value:{i:"O"+ ++c,w:{}}})},l=t.exports={KEY:e,NEED:!1,fastKey:function(t,n){if(!i(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!o(t,e)){if(!a(t))return"F";if(!n)return"E";s(t)}return t[e].i},getWeak:function(t,n){if(!o(t,e)){if(!a(t))return!0;if(!n)return!1;s(t)}return t[e].w},onFreeze:function(t){return f&&l.NEED&&a(t)&&!o(t,e)&&s(t),t}}},function(t,n){t.exports=!1},function(t,n,r){var e=r(5)("unscopables"),i=Array.prototype;null==i[e]&&r(11)(i,e,{}),t.exports=function(t){i[e][t]=!0}},function(t,n){t.exports=function(t,n){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:n}}},function(t,n){var r=0,e=Math.random();t.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++r+e).toString(36))}},function(t,n,r){var e=r(93),i=r(68);t.exports=Object.keys||function(t){return e(t,i)}},function(t,n,r){var e=r(24),i=Math.max,o=Math.min;t.exports=function(t,n){return(t=e(t))<0?i(t+n,0):o(t,n)}},function(t,n,r){var e=r(1),i=r(94),o=r(68),u=r(67)("IE_PROTO"),c=function(){},a=function(){var t,n=r(65)("iframe"),e=o.length;for(n.style.display="none",r(69).appendChild(n),n.src="javascript:",(t=n.contentWindow.document).open(),t.write("<script>document.F=Object<\/script>"),t.close(),a=t.F;e--;)delete a.prototype[o[e]];return a()};t.exports=Object.create||function(t,n){var r;return null!==t?(c.prototype=e(t),r=new c,c.prototype=null,r[u]=t):r=a(),void 0===n?r:i(r,n)}},function(t,n,r){var e=r(93),i=r(68).concat("length","prototype");n.f=Object.getOwnPropertyNames||function(t){return e(t,i)}},function(t,n,r){"use strict";var e=r(2),i=r(7),o=r(6),u=r(5)("species");t.exports=function(t){var n=e[t];o&&n&&!n[u]&&i.f(n,u,{configurable:!0,get:function(){return this}})}},function(t,n){t.exports=function(t,n,r,e){if(!(t instanceof n)||void 0!==e&&e in t)throw TypeError(r+": incorrect invocation!");return t}},function(t,n,r){var e=r(19),i=r(105),o=r(81),u=r(1),c=r(8),a=r(83),f={},s={};(n=t.exports=function(t,n,r,l,h){var v,p,d,g,y=h?function(){return t}:a(t),m=e(r,l,n?2:1),b=0;if("function"!=typeof y)throw TypeError(t+" is not iterable!");if(o(y)){for(v=c(t.length);v>b;b++)if((g=n?m(u(p=t[b])[0],p[1]):m(t[b]))===f||g===s)return g}else for(d=y.call(t);!(p=d.next()).done;)if((g=i(d,m,p.value,n))===f||g===s)return g}).BREAK=f,n.RETURN=s},function(t,n,r){var e=r(12);t.exports=function(t,n,r){for(var i in n)e(t,i,n[i],r);return t}},function(t,n,r){var e=r(7).f,i=r(14),o=r(5)("toStringTag");t.exports=function(t,n,r){t&&!i(t=r?t:t.prototype,o)&&e(t,o,{configurable:!0,value:n})}},function(t,n,r){var e=r(0),i=r(23),o=r(3),u=r(71),c="["+u+"]",a=RegExp("^"+c+c+"*"),f=RegExp(c+c+"*$"),s=function(t,n,r){var i={},c=o(function(){return!!u[t]()||"​…"!="​…"[t]()}),a=i[t]=c?n(l):u[t];r&&(i[r]=a),e(e.P+e.F*c,"String",i)},l=s.trim=function(t,n){return t=String(i(t)),1&n&&(t=t.replace(a,"")),2&n&&(t=t.replace(f,"")),t};t.exports=s},function(t,n){t.exports={}},function(t,n,r){var e=r(4);t.exports=function(t,n){if(!e(t)||t._t!==n)throw TypeError("Incompatible receiver, "+n+" required!");return t}},function(t,n,r){var e=r(20);t.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==e(t)?t.split(""):Object(t)}},function(t,n){n.f={}.propertyIsEnumerable},function(t,n,r){var e=r(20),i=r(5)("toStringTag"),o="Arguments"==e(function(){return arguments}());t.exports=function(t){var n,r,u;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(r=function(t,n){try{return t[n]}catch(t){}}(n=Object(t),i))?r:o?e(n):"Object"==(u=e(n))&&"function"==typeof n.callee?"Arguments":u}},function(t,n,r){var e=r(18),i=r(2),o=i["__core-js_shared__"]||(i["__core-js_shared__"]={});(t.exports=function(t,n){return o[t]||(o[t]=void 0!==n?n:{})})("versions",[]).push({version:e.version,mode:r(30)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(t,n,r){var e=r(15),i=r(8),o=r(35);t.exports=function(t){return function(n,r,u){var c,a=e(n),f=i(a.length),s=o(u,f);if(t&&r!=r){for(;f>s;)if((c=a[s++])!=c)return!0}else for(;f>s;s++)if((t||s in a)&&a[s]===r)return t||s||0;return!t&&-1}}},function(t,n){n.f=Object.getOwnPropertySymbols},function(t,n,r){var e=r(20);t.exports=Array.isArray||function(t){return"Array"==e(t)}},function(t,n,r){var e=r(4),i=r(20),o=r(5)("match");t.exports=function(t){var n;return e(t)&&(void 0!==(n=t[o])?!!n:"RegExp"==i(t))}},function(t,n,r){var e=r(5)("iterator"),i=!1;try{var o=[7][e]();o.return=function(){i=!0},Array.from(o,function(){throw 2})}catch(t){}t.exports=function(t,n){if(!n&&!i)return!1;var r=!1;try{var o=[7],u=o[e]();u.next=function(){return{done:r=!0}},o[e]=function(){return u},t(o)}catch(t){}return r}},function(t,n,r){"use strict";var e=r(1);t.exports=function(){var t=e(this),n="";return t.global&&(n+="g"),t.ignoreCase&&(n+="i"),t.multiline&&(n+="m"),t.unicode&&(n+="u"),t.sticky&&(n+="y"),n}},function(t,n,r){"use strict";var e=r(11),i=r(12),o=r(3),u=r(23),c=r(5);t.exports=function(t,n,r){var a=c(t),f=r(u,a,""[t]),s=f[0],l=f[1];o(function(){var n={};return n[a]=function(){return 7},7!=""[t](n)})&&(i(String.prototype,t,s),e(RegExp.prototype,a,2==n?function(t,n){return l.call(t,this,n)}:function(t){return l.call(t,this)}))}},function(t,n,r){var e=r(1),i=r(10),o=r(5)("species");t.exports=function(t,n){var r,u=e(t).constructor;return void 0===u||null==(r=e(u)[o])?n:i(r)}},function(t,n,r){var e=r(2).navigator;t.exports=e&&e.userAgent||""},function(t,n,r){"use strict";var e=r(2),i=r(0),o=r(12),u=r(41),c=r(29),a=r(40),f=r(39),s=r(4),l=r(3),h=r(54),v=r(42),p=r(72);t.exports=function(t,n,r,d,g,y){var m=e[t],b=m,w=g?"set":"add",S=b&&b.prototype,x={},_=function(t){var n=S[t];o(S,t,"delete"==t?function(t){return!(y&&!s(t))&&n.call(this,0===t?0:t)}:"has"==t?function(t){return!(y&&!s(t))&&n.call(this,0===t?0:t)}:"get"==t?function(t){return y&&!s(t)?void 0:n.call(this,0===t?0:t)}:"add"==t?function(t){return n.call(this,0===t?0:t),this}:function(t,r){return n.call(this,0===t?0:t,r),this})};if("function"==typeof b&&(y||S.forEach&&!l(function(){(new b).entries().next()}))){var E=new b,O=E[w](y?{}:-0,1)!=E,M=l(function(){E.has(1)}),P=h(function(t){new b(t)}),F=!y&&l(function(){for(var t=new b,n=5;n--;)t[w](n,n);return!t.has(-0)});P||((b=n(function(n,r){f(n,b,t);var e=p(new m,n,b);return null!=r&&a(r,g,e[w],e),e})).prototype=S,S.constructor=b),(M||F)&&(_("delete"),_("has"),g&&_("get")),(F||O)&&_(w),y&&S.clear&&delete S.clear}else b=d.getConstructor(n,t,g,w),u(b.prototype,r),c.NEED=!0;return v(b,t),x[t]=b,i(i.G+i.W+i.F*(b!=m),x),y||d.setStrong(b,t,g),b}},function(t,n,r){for(var e,i=r(2),o=r(11),u=r(33),c=u("typed_array"),a=u("view"),f=!(!i.ArrayBuffer||!i.DataView),s=f,l=0,h="Int8Array,Uint8Array,Uint8ClampedArray,Int16Array,Uint16Array,Int32Array,Uint32Array,Float32Array,Float64Array".split(",");l<9;)(e=i[h[l++]])?(o(e.prototype,c,!0),o(e.prototype,a,!0)):s=!1;t.exports={ABV:f,CONSTR:s,TYPED:c,VIEW:a}},function(t,n,r){"use strict";t.exports=r(30)||!r(3)(function(){var t=Math.random();__defineSetter__.call(null,t,function(){}),delete r(2)[t]})},function(t,n,r){"use strict";var e=r(0);t.exports=function(t){e(e.S,t,{of:function(){for(var t=arguments.length,n=new Array(t);t--;)n[t]=arguments[t];return new this(n)}})}},function(t,n,r){"use strict";var e=r(0),i=r(10),o=r(19),u=r(40);t.exports=function(t){e(e.S,t,{from:function(t){var n,r,e,c,a=arguments[1];return i(this),(n=void 0!==a)&&i(a),null==t?new this:(r=[],n?(e=0,c=o(a,arguments[2],2),u(t,!1,function(t){r.push(c(t,e++))})):u(t,!1,r.push,r),new this(r))}})}},function(t,n){var r;r=function(){return this}();try{r=r||new Function("return this")()}catch(t){"object"==typeof window&&(r=window)}t.exports=r},function(t,n,r){var e=r(4),i=r(2).document,o=e(i)&&e(i.createElement);t.exports=function(t){return o?i.createElement(t):{}}},function(t,n,r){var e=r(2),i=r(18),o=r(30),u=r(92),c=r(7).f;t.exports=function(t){var n=i.Symbol||(i.Symbol=o?{}:e.Symbol||{});"_"==t.charAt(0)||t in n||c(n,t,{value:u.f(t)})}},function(t,n,r){var e=r(49)("keys"),i=r(33);t.exports=function(t){return e[t]||(e[t]=i(t))}},function(t,n){t.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(t,n,r){var e=r(2).document;t.exports=e&&e.documentElement},function(t,n,r){var e=r(4),i=r(1),o=function(t,n){if(i(t),!e(n)&&null!==n)throw TypeError(n+": can't set as prototype!")};t.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(t,n,e){try{(e=r(19)(Function.call,r(16).f(Object.prototype,"__proto__").set,2))(t,[]),n=!(t instanceof Array)}catch(t){n=!0}return function(t,r){return o(t,r),n?t.__proto__=r:e(t,r),t}}({},!1):void 0),check:o}},function(t,n){t.exports="\t\n\v\f\r   ᠎              \u2028\u2029\ufeff"},function(t,n,r){var e=r(4),i=r(70).set;t.exports=function(t,n,r){var o,u=n.constructor;return u!==r&&"function"==typeof u&&(o=u.prototype)!==r.prototype&&e(o)&&i&&i(t,o),t}},function(t,n,r){"use strict";var e=r(24),i=r(23);t.exports=function(t){var n=String(i(this)),r="",o=e(t);if(o<0||o==1/0)throw RangeError("Count can't be negative");for(;o>0;(o>>>=1)&&(n+=n))1&o&&(r+=n);return r}},function(t,n){t.exports=Math.sign||function(t){return 0==(t=+t)||t!=t?t:t<0?-1:1}},function(t,n){var r=Math.expm1;t.exports=!r||r(10)>22025.465794806718||r(10)<22025.465794806718||-2e-17!=r(-2e-17)?function(t){return 0==(t=+t)?t:t>-1e-6&&t<1e-6?t+t*t/2:Math.exp(t)-1}:r},function(t,n,r){var e=r(24),i=r(23);t.exports=function(t){return function(n,r){var o,u,c=String(i(n)),a=e(r),f=c.length;return a<0||a>=f?t?"":void 0:(o=c.charCodeAt(a))<55296||o>56319||a+1===f||(u=c.charCodeAt(a+1))<56320||u>57343?t?c.charAt(a):o:t?c.slice(a,a+2):u-56320+(o-55296<<10)+65536}}},function(t,n,r){"use strict";var e=r(30),i=r(0),o=r(12),u=r(11),c=r(44),a=r(78),f=r(42),s=r(17),l=r(5)("iterator"),h=!([].keys&&"next"in[].keys()),v=function(){return this};t.exports=function(t,n,r,p,d,g,y){a(r,n,p);var m,b,w,S=function(t){if(!h&&t in O)return O[t];switch(t){case"keys":case"values":return function(){return new r(this,t)}}return function(){return new r(this,t)}},x=n+" Iterator",_="values"==d,E=!1,O=t.prototype,M=O[l]||O["@@iterator"]||d&&O[d],P=M||S(d),F=d?_?S("entries"):P:void 0,A="Array"==n&&O.entries||M;if(A&&(w=s(A.call(new t)))!==Object.prototype&&w.next&&(f(w,x,!0),e||"function"==typeof w[l]||u(w,l,v)),_&&M&&"values"!==M.name&&(E=!0,P=function(){return M.call(this)}),e&&!y||!h&&!E&&O[l]||u(O,l,P),c[n]=P,c[x]=v,d)if(m={values:_?P:S("values"),keys:g?P:S("keys"),entries:F},y)for(b in m)b in O||o(O,b,m[b]);else i(i.P+i.F*(h||E),n,m);return m}},function(t,n,r){"use strict";var e=r(36),i=r(32),o=r(42),u={};r(11)(u,r(5)("iterator"),function(){return this}),t.exports=function(t,n,r){t.prototype=e(u,{next:i(1,r)}),o(t,n+" Iterator")}},function(t,n,r){var e=r(53),i=r(23);t.exports=function(t,n,r){if(e(n))throw TypeError("String#"+r+" doesn't accept regex!");return String(i(t))}},function(t,n,r){var e=r(5)("match");t.exports=function(t){var n=/./;try{"/./"[t](n)}catch(r){try{return n[e]=!1,!"/./"[t](n)}catch(t){}}return!0}},function(t,n,r){var e=r(44),i=r(5)("iterator"),o=Array.prototype;t.exports=function(t){return void 0!==t&&(e.Array===t||o[i]===t)}},function(t,n,r){"use strict";var e=r(7),i=r(32);t.exports=function(t,n,r){n in t?e.f(t,n,i(0,r)):t[n]=r}},function(t,n,r){var e=r(48),i=r(5)("iterator"),o=r(44);t.exports=r(18).getIteratorMethod=function(t){if(null!=t)return t[i]||t["@@iterator"]||o[e(t)]}},function(t,n,r){var e=r(221);t.exports=function(t,n){return new(e(t))(n)}},function(t,n,r){"use strict";var e=r(9),i=r(35),o=r(8);t.exports=function(t){for(var n=e(this),r=o(n.length),u=arguments.length,c=i(u>1?arguments[1]:void 0,r),a=u>2?arguments[2]:void 0,f=void 0===a?r:i(a,r);f>c;)n[c++]=t;return n}},function(t,n,r){"use strict";var e=r(31),i=r(108),o=r(44),u=r(15);t.exports=r(77)(Array,"Array",function(t,n){this._t=u(t),this._i=0,this._k=n},function(){var t=this._t,n=this._k,r=this._i++;return!t||r>=t.length?(this._t=void 0,i(1)):i(0,"keys"==n?r:"values"==n?t[r]:[r,t[r]])},"values"),o.Arguments=o.Array,e("keys"),e("values"),e("entries")},function(t,n,r){var e,i,o,u=r(19),c=r(98),a=r(69),f=r(65),s=r(2),l=s.process,h=s.setImmediate,v=s.clearImmediate,p=s.MessageChannel,d=s.Dispatch,g=0,y={},m=function(){var t=+this;if(y.hasOwnProperty(t)){var n=y[t];delete y[t],n()}},b=function(t){m.call(t.data)};h&&v||(h=function(t){for(var n=[],r=1;arguments.length>r;)n.push(arguments[r++]);return y[++g]=function(){c("function"==typeof t?t:Function(t),n)},e(g),g},v=function(t){delete y[t]},"process"==r(20)(l)?e=function(t){l.nextTick(u(m,t,1))}:d&&d.now?e=function(t){d.now(u(m,t,1))}:p?(o=(i=new p).port2,i.port1.onmessage=b,e=u(o.postMessage,o,1)):s.addEventListener&&"function"==typeof postMessage&&!s.importScripts?(e=function(t){s.postMessage(t+"","*")},s.addEventListener("message",b,!1)):e="onreadystatechange"in f("script")?function(t){a.appendChild(f("script")).onreadystatechange=function(){a.removeChild(this),m.call(t)}}:function(t){setTimeout(u(m,t,1),0)}),t.exports={set:h,clear:v}},function(t,n,r){var e=r(2),i=r(87).set,o=e.MutationObserver||e.WebKitMutationObserver,u=e.process,c=e.Promise,a="process"==r(20)(u);t.exports=function(){var t,n,r,f=function(){var e,i;for(a&&(e=u.domain)&&e.exit();t;){i=t.fn,t=t.next;try{i()}catch(e){throw t?r():n=void 0,e}}n=void 0,e&&e.enter()};if(a)r=function(){u.nextTick(f)};else if(!o||e.navigator&&e.navigator.standalone)if(c&&c.resolve){var s=c.resolve(void 0);r=function(){s.then(f)}}else r=function(){i.call(e,f)};else{var l=!0,h=document.createTextNode("");new o(f).observe(h,{characterData:!0}),r=function(){h.data=l=!l}}return function(e){var i={fn:e,next:void 0};n&&(n.next=i),t||(t=i,r()),n=i}}},function(t,n,r){"use strict";var e=r(10);function i(t){var n,r;this.promise=new t(function(t,e){if(void 0!==n||void 0!==r)throw TypeError("Bad Promise constructor");n=t,r=e}),this.resolve=e(n),this.reject=e(r)}t.exports.f=function(t){return new i(t)}},function(t,n,r){"use strict";var e=r(2),i=r(6),o=r(30),u=r(60),c=r(11),a=r(41),f=r(3),s=r(39),l=r(24),h=r(8),v=r(117),p=r(37).f,d=r(7).f,g=r(85),y=r(42),m="prototype",b="Wrong index!",w=e.ArrayBuffer,S=e.DataView,x=e.Math,_=e.RangeError,E=e.Infinity,O=w,M=x.abs,P=x.pow,F=x.floor,A=x.log,j=x.LN2,I=i?"_b":"buffer",N=i?"_l":"byteLength",L=i?"_o":"byteOffset";function T(t,n,r){var e,i,o,u=new Array(r),c=8*r-n-1,a=(1<<c)-1,f=a>>1,s=23===n?P(2,-24)-P(2,-77):0,l=0,h=t<0||0===t&&1/t<0?1:0;for((t=M(t))!=t||t===E?(i=t!=t?1:0,e=a):(e=F(A(t)/j),t*(o=P(2,-e))<1&&(e--,o*=2),(t+=e+f>=1?s/o:s*P(2,1-f))*o>=2&&(e++,o/=2),e+f>=a?(i=0,e=a):e+f>=1?(i=(t*o-1)*P(2,n),e+=f):(i=t*P(2,f-1)*P(2,n),e=0));n>=8;u[l++]=255&i,i/=256,n-=8);for(e=e<<n|i,c+=n;c>0;u[l++]=255&e,e/=256,c-=8);return u[--l]|=128*h,u}function k(t,n,r){var e,i=8*r-n-1,o=(1<<i)-1,u=o>>1,c=i-7,a=r-1,f=t[a--],s=127&f;for(f>>=7;c>0;s=256*s+t[a],a--,c-=8);for(e=s&(1<<-c)-1,s>>=-c,c+=n;c>0;e=256*e+t[a],a--,c-=8);if(0===s)s=1-u;else{if(s===o)return e?NaN:f?-E:E;e+=P(2,n),s-=u}return(f?-1:1)*e*P(2,s-n)}function R(t){return t[3]<<24|t[2]<<16|t[1]<<8|t[0]}function C(t){return[255&t]}function D(t){return[255&t,t>>8&255]}function W(t){return[255&t,t>>8&255,t>>16&255,t>>24&255]}function U(t){return T(t,52,8)}function G(t){return T(t,23,4)}function V(t,n,r){d(t[m],n,{get:function(){return this[r]}})}function B(t,n,r,e){var i=v(+r);if(i+n>t[N])throw _(b);var o=t[I]._b,u=i+t[L],c=o.slice(u,u+n);return e?c:c.reverse()}function z(t,n,r,e,i,o){var u=v(+r);if(u+n>t[N])throw _(b);for(var c=t[I]._b,a=u+t[L],f=e(+i),s=0;s<n;s++)c[a+s]=f[o?s:n-s-1]}if(u.ABV){if(!f(function(){w(1)})||!f(function(){new w(-1)})||f(function(){return new w,new w(1.5),new w(NaN),"ArrayBuffer"!=w.name})){for(var q,J=(w=function(t){return s(this,w),new O(v(t))})[m]=O[m],Y=p(O),K=0;Y.length>K;)(q=Y[K++])in w||c(w,q,O[q]);o||(J.constructor=w)}var H=new S(new w(2)),X=S[m].setInt8;H.setInt8(0,2147483648),H.setInt8(1,2147483649),!H.getInt8(0)&&H.getInt8(1)||a(S[m],{setInt8:function(t,n){X.call(this,t,n<<24>>24)},setUint8:function(t,n){X.call(this,t,n<<24>>24)}},!0)}else w=function(t){s(this,w,"ArrayBuffer");var n=v(t);this._b=g.call(new Array(n),0),this[N]=n},S=function(t,n,r){s(this,S,"DataView"),s(t,w,"DataView");var e=t[N],i=l(n);if(i<0||i>e)throw _("Wrong offset!");if(i+(r=void 0===r?e-i:h(r))>e)throw _("Wrong length!");this[I]=t,this[L]=i,this[N]=r},i&&(V(w,"byteLength","_l"),V(S,"buffer","_b"),V(S,"byteLength","_l"),V(S,"byteOffset","_o")),a(S[m],{getInt8:function(t){return B(this,1,t)[0]<<24>>24},getUint8:function(t){return B(this,1,t)[0]},getInt16:function(t){var n=B(this,2,t,arguments[1]);return(n[1]<<8|n[0])<<16>>16},getUint16:function(t){var n=B(this,2,t,arguments[1]);return n[1]<<8|n[0]},getInt32:function(t){return R(B(this,4,t,arguments[1]))},getUint32:function(t){return R(B(this,4,t,arguments[1]))>>>0},getFloat32:function(t){return k(B(this,4,t,arguments[1]),23,4)},getFloat64:function(t){return k(B(this,8,t,arguments[1]),52,8)},setInt8:function(t,n){z(this,1,t,C,n)},setUint8:function(t,n){z(this,1,t,C,n)},setInt16:function(t,n){z(this,2,t,D,n,arguments[2])},setUint16:function(t,n){z(this,2,t,D,n,arguments[2])},setInt32:function(t,n){z(this,4,t,W,n,arguments[2])},setUint32:function(t,n){z(this,4,t,W,n,arguments[2])},setFloat32:function(t,n){z(this,4,t,G,n,arguments[2])},setFloat64:function(t,n){z(this,8,t,U,n,arguments[2])}});y(w,"ArrayBuffer"),y(S,"DataView"),c(S[m],u.VIEW,!0),n.ArrayBuffer=w,n.DataView=S},function(t,n,r){t.exports=!r(6)&&!r(3)(function(){return 7!=Object.defineProperty(r(65)("div"),"a",{get:function(){return 7}}).a})},function(t,n,r){n.f=r(5)},function(t,n,r){var e=r(14),i=r(15),o=r(50)(!1),u=r(67)("IE_PROTO");t.exports=function(t,n){var r,c=i(t),a=0,f=[];for(r in c)r!=u&&e(c,r)&&f.push(r);for(;n.length>a;)e(c,r=n[a++])&&(~o(f,r)||f.push(r));return f}},function(t,n,r){var e=r(7),i=r(1),o=r(34);t.exports=r(6)?Object.defineProperties:function(t,n){i(t);for(var r,u=o(n),c=u.length,a=0;c>a;)e.f(t,r=u[a++],n[r]);return t}},function(t,n,r){var e=r(15),i=r(37).f,o={}.toString,u="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];t.exports.f=function(t){return u&&"[object Window]"==o.call(t)?function(t){try{return i(t)}catch(t){return u.slice()}}(t):i(e(t))}},function(t,n,r){"use strict";var e=r(34),i=r(51),o=r(47),u=r(9),c=r(46),a=Object.assign;t.exports=!a||r(3)(function(){var t={},n={},r=Symbol(),e="abcdefghijklmnopqrst";return t[r]=7,e.split("").forEach(function(t){n[t]=t}),7!=a({},t)[r]||Object.keys(a({},n)).join("")!=e})?function(t,n){for(var r=u(t),a=arguments.length,f=1,s=i.f,l=o.f;a>f;)for(var h,v=c(arguments[f++]),p=s?e(v).concat(s(v)):e(v),d=p.length,g=0;d>g;)l.call(v,h=p[g++])&&(r[h]=v[h]);return r}:a},function(t,n,r){"use strict";var e=r(10),i=r(4),o=r(98),u=[].slice,c={};t.exports=Function.bind||function(t){var n=e(this),r=u.call(arguments,1),a=function(){var e=r.concat(u.call(arguments));return this instanceof a?function(t,n,r){if(!(n in c)){for(var e=[],i=0;i<n;i++)e[i]="a["+i+"]";c[n]=Function("F,a","return new F("+e.join(",")+")")}return c[n](t,r)}(n,e.length,e):o(n,e,t)};return i(n.prototype)&&(a.prototype=n.prototype),a}},function(t,n){t.exports=function(t,n,r){var e=void 0===r;switch(n.length){case 0:return e?t():t.call(r);case 1:return e?t(n[0]):t.call(r,n[0]);case 2:return e?t(n[0],n[1]):t.call(r,n[0],n[1]);case 3:return e?t(n[0],n[1],n[2]):t.call(r,n[0],n[1],n[2]);case 4:return e?t(n[0],n[1],n[2],n[3]):t.call(r,n[0],n[1],n[2],n[3])}return t.apply(r,n)}},function(t,n,r){var e=r(2).parseInt,i=r(43).trim,o=r(71),u=/^[-+]?0[xX]/;t.exports=8!==e(o+"08")||22!==e(o+"0x16")?function(t,n){var r=i(String(t),3);return e(r,n>>>0||(u.test(r)?16:10))}:e},function(t,n,r){var e=r(2).parseFloat,i=r(43).trim;t.exports=1/e(r(71)+"-0")!=-1/0?function(t){var n=i(String(t),3),r=e(n);return 0===r&&"-"==n.charAt(0)?-0:r}:e},function(t,n,r){var e=r(20);t.exports=function(t,n){if("number"!=typeof t&&"Number"!=e(t))throw TypeError(n);return+t}},function(t,n,r){var e=r(4),i=Math.floor;t.exports=function(t){return!e(t)&&isFinite(t)&&i(t)===t}},function(t,n){t.exports=Math.log1p||function(t){return(t=+t)>-1e-8&&t<1e-8?t-t*t/2:Math.log(1+t)}},function(t,n,r){var e=r(74),i=Math.pow,o=i(2,-52),u=i(2,-23),c=i(2,127)*(2-u),a=i(2,-126);t.exports=Math.fround||function(t){var n,r,i=Math.abs(t),f=e(t);return i<a?f*(i/a/u+1/o-1/o)*a*u:(r=(n=(1+u/o)*i)-(n-i))>c||r!=r?f*(1/0):f*r}},function(t,n,r){var e=r(1);t.exports=function(t,n,r,i){try{return i?n(e(r)[0],r[1]):n(r)}catch(n){var o=t.return;throw void 0!==o&&e(o.call(t)),n}}},function(t,n,r){var e=r(10),i=r(9),o=r(46),u=r(8);t.exports=function(t,n,r,c,a){e(n);var f=i(t),s=o(f),l=u(f.length),h=a?l-1:0,v=a?-1:1;if(r<2)for(;;){if(h in s){c=s[h],h+=v;break}if(h+=v,a?h<0:l<=h)throw TypeError("Reduce of empty array with no initial value")}for(;a?h>=0:l>h;h+=v)h in s&&(c=n(c,s[h],h,f));return c}},function(t,n,r){"use strict";var e=r(9),i=r(35),o=r(8);t.exports=[].copyWithin||function(t,n){var r=e(this),u=o(r.length),c=i(t,u),a=i(n,u),f=arguments.length>2?arguments[2]:void 0,s=Math.min((void 0===f?u:i(f,u))-a,u-c),l=1;for(a<c&&c<a+s&&(l=-1,a+=s-1,c+=s-1);s-- >0;)a in r?r[c]=r[a]:delete r[c],c+=l,a+=l;return r}},function(t,n){t.exports=function(t,n){return{value:n,done:!!t}}},function(t,n,r){r(6)&&"g"!=/./g.flags&&r(7).f(RegExp.prototype,"flags",{configurable:!0,get:r(55)})},function(t,n){t.exports=function(t){try{return{e:!1,v:t()}}catch(t){return{e:!0,v:t}}}},function(t,n,r){var e=r(1),i=r(4),o=r(89);t.exports=function(t,n){if(e(t),i(n)&&n.constructor===t)return n;var r=o.f(t);return(0,r.resolve)(n),r.promise}},function(t,n,r){"use strict";var e=r(113),i=r(45);t.exports=r(59)("Map",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{get:function(t){var n=e.getEntry(i(this,"Map"),t);return n&&n.v},set:function(t,n){return e.def(i(this,"Map"),0===t?0:t,n)}},e,!0)},function(t,n,r){"use strict";var e=r(7).f,i=r(36),o=r(41),u=r(19),c=r(39),a=r(40),f=r(77),s=r(108),l=r(38),h=r(6),v=r(29).fastKey,p=r(45),d=h?"_s":"size",g=function(t,n){var r,e=v(n);if("F"!==e)return t._i[e];for(r=t._f;r;r=r.n)if(r.k==n)return r};t.exports={getConstructor:function(t,n,r,f){var s=t(function(t,e){c(t,s,n,"_i"),t._t=n,t._i=i(null),t._f=void 0,t._l=void 0,t[d]=0,null!=e&&a(e,r,t[f],t)});return o(s.prototype,{clear:function(){for(var t=p(this,n),r=t._i,e=t._f;e;e=e.n)e.r=!0,e.p&&(e.p=e.p.n=void 0),delete r[e.i];t._f=t._l=void 0,t[d]=0},delete:function(t){var r=p(this,n),e=g(r,t);if(e){var i=e.n,o=e.p;delete r._i[e.i],e.r=!0,o&&(o.n=i),i&&(i.p=o),r._f==e&&(r._f=i),r._l==e&&(r._l=o),r[d]--}return!!e},forEach:function(t){p(this,n);for(var r,e=u(t,arguments.length>1?arguments[1]:void 0,3);r=r?r.n:this._f;)for(e(r.v,r.k,this);r&&r.r;)r=r.p},has:function(t){return!!g(p(this,n),t)}}),h&&e(s.prototype,"size",{get:function(){return p(this,n)[d]}}),s},def:function(t,n,r){var e,i,o=g(t,n);return o?o.v=r:(t._l=o={i:i=v(n,!0),k:n,v:r,p:e=t._l,n:void 0,r:!1},t._f||(t._f=o),e&&(e.n=o),t[d]++,"F"!==i&&(t._i[i]=o)),t},getEntry:g,setStrong:function(t,n,r){f(t,n,function(t,r){this._t=p(t,n),this._k=r,this._l=void 0},function(){for(var t=this._k,n=this._l;n&&n.r;)n=n.p;return this._t&&(this._l=n=n?n.n:this._t._f)?s(0,"keys"==t?n.k:"values"==t?n.v:[n.k,n.v]):(this._t=void 0,s(1))},r?"entries":"values",!r,!0),l(n)}}},function(t,n,r){"use strict";var e=r(113),i=r(45);t.exports=r(59)("Set",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{add:function(t){return e.def(i(this,"Set"),t=0===t?0:t,t)}},e)},function(t,n,r){"use strict";var e,i=r(26)(0),o=r(12),u=r(29),c=r(96),a=r(116),f=r(4),s=r(3),l=r(45),h=u.getWeak,v=Object.isExtensible,p=a.ufstore,d={},g=function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},y={get:function(t){if(f(t)){var n=h(t);return!0===n?p(l(this,"WeakMap")).get(t):n?n[this._i]:void 0}},set:function(t,n){return a.def(l(this,"WeakMap"),t,n)}},m=t.exports=r(59)("WeakMap",g,y,a,!0,!0);s(function(){return 7!=(new m).set((Object.freeze||Object)(d),7).get(d)})&&(c((e=a.getConstructor(g,"WeakMap")).prototype,y),u.NEED=!0,i(["delete","has","get","set"],function(t){var n=m.prototype,r=n[t];o(n,t,function(n,i){if(f(n)&&!v(n)){this._f||(this._f=new e);var o=this._f[t](n,i);return"set"==t?this:o}return r.call(this,n,i)})}))},function(t,n,r){"use strict";var e=r(41),i=r(29).getWeak,o=r(1),u=r(4),c=r(39),a=r(40),f=r(26),s=r(14),l=r(45),h=f(5),v=f(6),p=0,d=function(t){return t._l||(t._l=new g)},g=function(){this.a=[]},y=function(t,n){return h(t.a,function(t){return t[0]===n})};g.prototype={get:function(t){var n=y(this,t);if(n)return n[1]},has:function(t){return!!y(this,t)},set:function(t,n){var r=y(this,t);r?r[1]=n:this.a.push([t,n])},delete:function(t){var n=v(this.a,function(n){return n[0]===t});return~n&&this.a.splice(n,1),!!~n}},t.exports={getConstructor:function(t,n,r,o){var f=t(function(t,e){c(t,f,n,"_i"),t._t=n,t._i=p++,t._l=void 0,null!=e&&a(e,r,t[o],t)});return e(f.prototype,{delete:function(t){if(!u(t))return!1;var r=i(t);return!0===r?d(l(this,n)).delete(t):r&&s(r,this._i)&&delete r[this._i]},has:function(t){if(!u(t))return!1;var r=i(t);return!0===r?d(l(this,n)).has(t):r&&s(r,this._i)}}),f},def:function(t,n,r){var e=i(o(n),!0);return!0===e?d(t).set(n,r):e[t._i]=r,t},ufstore:d}},function(t,n,r){var e=r(24),i=r(8);t.exports=function(t){if(void 0===t)return 0;var n=e(t),r=i(n);if(n!==r)throw RangeError("Wrong length!");return r}},function(t,n,r){var e=r(37),i=r(51),o=r(1),u=r(2).Reflect;t.exports=u&&u.ownKeys||function(t){var n=e.f(o(t)),r=i.f;return r?n.concat(r(t)):n}},function(t,n,r){"use strict";var e=r(52),i=r(4),o=r(8),u=r(19),c=r(5)("isConcatSpreadable");t.exports=function t(n,r,a,f,s,l,h,v){for(var p,d,g=s,y=0,m=!!h&&u(h,v,3);y<f;){if(y in a){if(p=m?m(a[y],y,r):a[y],d=!1,i(p)&&(d=void 0!==(d=p[c])?!!d:e(p)),d&&l>0)g=t(n,r,p,o(p.length),g,l-1)-1;else{if(g>=9007199254740991)throw TypeError();n[g]=p}g++}y++}return g}},function(t,n,r){var e=r(8),i=r(73),o=r(23);t.exports=function(t,n,r,u){var c=String(o(t)),a=c.length,f=void 0===r?" ":String(r),s=e(n);if(s<=a||""==f)return c;var l=s-a,h=i.call(f,Math.ceil(l/f.length));return h.length>l&&(h=h.slice(0,l)),u?h+c:c+h}},function(t,n,r){var e=r(34),i=r(15),o=r(47).f;t.exports=function(t){return function(n){for(var r,u=i(n),c=e(u),a=c.length,f=0,s=[];a>f;)o.call(u,r=c[f++])&&s.push(t?[r,u[r]]:u[r]);return s}}},function(t,n,r){var e=r(48),i=r(123);t.exports=function(t){return function(){if(e(this)!=t)throw TypeError(t+"#toJSON isn't generic");return i(this)}}},function(t,n,r){var e=r(40);t.exports=function(t,n){var r=[];return e(t,!1,r.push,r,n),r}},function(t,n){t.exports=Math.scale||function(t,n,r,e,i){return 0===arguments.length||t!=t||n!=n||r!=r||e!=e||i!=i?NaN:t===1/0||t===-1/0?t:(t-n)*(i-e)/(r-n)+e}},function(t,n){t.exports={getMajor:function(t){return parseInt(t.split(".")[0])},getMinor:function(t){return parseInt(t.split(".")[1])},getPatch:function(t){return parseInt(t.split("-")[0].split(".")[2])},isGreaterThanOrEqualTo:function(t,n){var r=this.getMajor(t),e=this.getMinor(t),i=this.getPatch(t),o=this.getMajor(n),u=this.getMinor(n),c=this.getPatch(n);return r>o||(r===o&&e>u||(r===o&&e===u&&i>c||r===o&&e===u&&i===c))}}},function(t,n,r){t.exports=r.p+"client_frame.html"},function(t,n,r){r(128),t.exports=r(331)},function(t,n,r){"use strict";(function(t){function e(){return t._babelPolyfill||"undefined"!=typeof window&&window._babelPolyfill?null:r(129)}Object.defineProperty(n,"__esModule",{value:!0}),n.idempotentBabelPolyfill=e,n.default=e()}).call(this,r(64))},function(t,n,r){"use strict";(function(t){if(r(130),r(327),r(328),t._babelPolyfill)throw new Error("only one instance of babel-polyfill is allowed");t._babelPolyfill=!0;var n="defineProperty";function e(t,r,e){t[r]||Object[n](t,r,{writable:!0,configurable:!0,value:e})}e(String.prototype,"padLeft","".padStart),e(String.prototype,"padRight","".padEnd),"pop,reverse,shift,keys,values,entries,indexOf,every,some,forEach,map,filter,find,findIndex,includes,join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill".split(",").forEach(function(t){[][t]&&e(Array,t,Function.call.bind([][t]))})}).call(this,r(64))},function(t,n,r){r(131),r(133),r(134),r(135),r(136),r(137),r(138),r(139),r(140),r(141),r(142),r(143),r(144),r(145),r(146),r(147),r(149),r(150),r(151),r(152),r(153),r(154),r(155),r(156),r(157),r(158),r(159),r(160),r(161),r(162),r(163),r(164),r(165),r(166),r(167),r(168),r(169),r(170),r(171),r(172),r(173),r(174),r(175),r(176),r(177),r(178),r(179),r(180),r(181),r(182),r(183),r(184),r(185),r(186),r(187),r(188),r(189),r(190),r(191),r(192),r(193),r(194),r(195),r(196),r(197),r(198),r(199),r(200),r(201),r(202),r(203),r(204),r(205),r(206),r(207),r(208),r(209),r(211),r(212),r(214),r(215),r(216),r(217),r(218),r(219),r(220),r(222),r(223),r(224),r(225),r(226),r(227),r(228),r(229),r(230),r(231),r(232),r(233),r(234),r(86),r(235),r(236),r(109),r(237),r(238),r(239),r(240),r(241),r(112),r(114),r(115),r(242),r(243),r(244),r(245),r(246),r(247),r(248),r(249),r(250),r(251),r(252),r(253),r(254),r(255),r(256),r(257),r(258),r(259),r(260),r(261),r(262),r(263),r(264),r(265),r(266),r(267),r(268),r(269),r(270),r(271),r(272),r(273),r(274),r(275),r(276),r(277),r(278),r(279),r(280),r(281),r(282),r(283),r(284),r(285),r(286),r(287),r(288),r(289),r(290),r(291),r(292),r(293),r(294),r(295),r(296),r(297),r(298),r(299),r(300),r(301),r(302),r(303),r(304),r(305),r(306),r(307),r(308),r(309),r(310),r(311),r(312),r(313),r(314),r(315),r(316),r(317),r(318),r(319),r(320),r(321),r(322),r(323),r(324),r(325),r(326),t.exports=r(18)},function(t,n,r){"use strict";var e=r(2),i=r(14),o=r(6),u=r(0),c=r(12),a=r(29).KEY,f=r(3),s=r(49),l=r(42),h=r(33),v=r(5),p=r(92),d=r(66),g=r(132),y=r(52),m=r(1),b=r(4),w=r(15),S=r(22),x=r(32),_=r(36),E=r(95),O=r(16),M=r(7),P=r(34),F=O.f,A=M.f,j=E.f,I=e.Symbol,N=e.JSON,L=N&&N.stringify,T=v("_hidden"),k=v("toPrimitive"),R={}.propertyIsEnumerable,C=s("symbol-registry"),D=s("symbols"),W=s("op-symbols"),U=Object.prototype,G="function"==typeof I,V=e.QObject,B=!V||!V.prototype||!V.prototype.findChild,z=o&&f(function(){return 7!=_(A({},"a",{get:function(){return A(this,"a",{value:7}).a}})).a})?function(t,n,r){var e=F(U,n);e&&delete U[n],A(t,n,r),e&&t!==U&&A(U,n,e)}:A,q=function(t){var n=D[t]=_(I.prototype);return n._k=t,n},J=G&&"symbol"==typeof I.iterator?function(t){return"symbol"==typeof t}:function(t){return t instanceof I},Y=function(t,n,r){return t===U&&Y(W,n,r),m(t),n=S(n,!0),m(r),i(D,n)?(r.enumerable?(i(t,T)&&t[T][n]&&(t[T][n]=!1),r=_(r,{enumerable:x(0,!1)})):(i(t,T)||A(t,T,x(1,{})),t[T][n]=!0),z(t,n,r)):A(t,n,r)},K=function(t,n){m(t);for(var r,e=g(n=w(n)),i=0,o=e.length;o>i;)Y(t,r=e[i++],n[r]);return t},H=function(t){var n=R.call(this,t=S(t,!0));return!(this===U&&i(D,t)&&!i(W,t))&&(!(n||!i(this,t)||!i(D,t)||i(this,T)&&this[T][t])||n)},X=function(t,n){if(t=w(t),n=S(n,!0),t!==U||!i(D,n)||i(W,n)){var r=F(t,n);return!r||!i(D,n)||i(t,T)&&t[T][n]||(r.enumerable=!0),r}},$=function(t){for(var n,r=j(w(t)),e=[],o=0;r.length>o;)i(D,n=r[o++])||n==T||n==a||e.push(n);return e},Z=function(t){for(var n,r=t===U,e=j(r?W:w(t)),o=[],u=0;e.length>u;)!i(D,n=e[u++])||r&&!i(U,n)||o.push(D[n]);return o};G||(c((I=function(){if(this instanceof I)throw TypeError("Symbol is not a constructor!");var t=h(arguments.length>0?arguments[0]:void 0),n=function(r){this===U&&n.call(W,r),i(this,T)&&i(this[T],t)&&(this[T][t]=!1),z(this,t,x(1,r))};return o&&B&&z(U,t,{configurable:!0,set:n}),q(t)}).prototype,"toString",function(){return this._k}),O.f=X,M.f=Y,r(37).f=E.f=$,r(47).f=H,r(51).f=Z,o&&!r(30)&&c(U,"propertyIsEnumerable",H,!0),p.f=function(t){return q(v(t))}),u(u.G+u.W+u.F*!G,{Symbol:I});for(var Q="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),tt=0;Q.length>tt;)v(Q[tt++]);for(var nt=P(v.store),rt=0;nt.length>rt;)d(nt[rt++]);u(u.S+u.F*!G,"Symbol",{for:function(t){return i(C,t+="")?C[t]:C[t]=I(t)},keyFor:function(t){if(!J(t))throw TypeError(t+" is not a symbol!");for(var n in C)if(C[n]===t)return n},useSetter:function(){B=!0},useSimple:function(){B=!1}}),u(u.S+u.F*!G,"Object",{create:function(t,n){return void 0===n?_(t):K(_(t),n)},defineProperty:Y,defineProperties:K,getOwnPropertyDescriptor:X,getOwnPropertyNames:$,getOwnPropertySymbols:Z}),N&&u(u.S+u.F*(!G||f(function(){var t=I();return"[null]"!=L([t])||"{}"!=L({a:t})||"{}"!=L(Object(t))})),"JSON",{stringify:function(t){for(var n,r,e=[t],i=1;arguments.length>i;)e.push(arguments[i++]);if(r=n=e[1],(b(n)||void 0!==t)&&!J(t))return y(n)||(n=function(t,n){if("function"==typeof r&&(n=r.call(this,t,n)),!J(n))return n}),e[1]=n,L.apply(N,e)}}),I.prototype[k]||r(11)(I.prototype,k,I.prototype.valueOf),l(I,"Symbol"),l(Math,"Math",!0),l(e.JSON,"JSON",!0)},function(t,n,r){var e=r(34),i=r(51),o=r(47);t.exports=function(t){var n=e(t),r=i.f;if(r)for(var u,c=r(t),a=o.f,f=0;c.length>f;)a.call(t,u=c[f++])&&n.push(u);return n}},function(t,n,r){var e=r(0);e(e.S,"Object",{create:r(36)})},function(t,n,r){var e=r(0);e(e.S+e.F*!r(6),"Object",{defineProperty:r(7).f})},function(t,n,r){var e=r(0);e(e.S+e.F*!r(6),"Object",{defineProperties:r(94)})},function(t,n,r){var e=r(15),i=r(16).f;r(25)("getOwnPropertyDescriptor",function(){return function(t,n){return i(e(t),n)}})},function(t,n,r){var e=r(9),i=r(17);r(25)("getPrototypeOf",function(){return function(t){return i(e(t))}})},function(t,n,r){var e=r(9),i=r(34);r(25)("keys",function(){return function(t){return i(e(t))}})},function(t,n,r){r(25)("getOwnPropertyNames",function(){return r(95).f})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("freeze",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("seal",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4),i=r(29).onFreeze;r(25)("preventExtensions",function(t){return function(n){return t&&e(n)?t(i(n)):n}})},function(t,n,r){var e=r(4);r(25)("isFrozen",function(t){return function(n){return!e(n)||!!t&&t(n)}})},function(t,n,r){var e=r(4);r(25)("isSealed",function(t){return function(n){return!e(n)||!!t&&t(n)}})},function(t,n,r){var e=r(4);r(25)("isExtensible",function(t){return function(n){return!!e(n)&&(!t||t(n))}})},function(t,n,r){var e=r(0);e(e.S+e.F,"Object",{assign:r(96)})},function(t,n,r){var e=r(0);e(e.S,"Object",{is:r(148)})},function(t,n){t.exports=Object.is||function(t,n){return t===n?0!==t||1/t==1/n:t!=t&&n!=n}},function(t,n,r){var e=r(0);e(e.S,"Object",{setPrototypeOf:r(70).set})},function(t,n,r){"use strict";var e=r(48),i={};i[r(5)("toStringTag")]="z",i+""!="[object z]"&&r(12)(Object.prototype,"toString",function(){return"[object "+e(this)+"]"},!0)},function(t,n,r){var e=r(0);e(e.P,"Function",{bind:r(97)})},function(t,n,r){var e=r(7).f,i=Function.prototype,o=/^\s*function ([^ (]*)/;"name"in i||r(6)&&e(i,"name",{configurable:!0,get:function(){try{return(""+this).match(o)[1]}catch(t){return""}}})},function(t,n,r){"use strict";var e=r(4),i=r(17),o=r(5)("hasInstance"),u=Function.prototype;o in u||r(7).f(u,o,{value:function(t){if("function"!=typeof this||!e(t))return!1;if(!e(this.prototype))return t instanceof this;for(;t=i(t);)if(this.prototype===t)return!0;return!1}})},function(t,n,r){var e=r(0),i=r(99);e(e.G+e.F*(parseInt!=i),{parseInt:i})},function(t,n,r){var e=r(0),i=r(100);e(e.G+e.F*(parseFloat!=i),{parseFloat:i})},function(t,n,r){"use strict";var e=r(2),i=r(14),o=r(20),u=r(72),c=r(22),a=r(3),f=r(37).f,s=r(16).f,l=r(7).f,h=r(43).trim,v=e.Number,p=v,d=v.prototype,g="Number"==o(r(36)(d)),y="trim"in String.prototype,m=function(t){var n=c(t,!1);if("string"==typeof n&&n.length>2){var r,e,i,o=(n=y?n.trim():h(n,3)).charCodeAt(0);if(43===o||45===o){if(88===(r=n.charCodeAt(2))||120===r)return NaN}else if(48===o){switch(n.charCodeAt(1)){case 66:case 98:e=2,i=49;break;case 79:case 111:e=8,i=55;break;default:return+n}for(var u,a=n.slice(2),f=0,s=a.length;f<s;f++)if((u=a.charCodeAt(f))<48||u>i)return NaN;return parseInt(a,e)}}return+n};if(!v(" 0o1")||!v("0b1")||v("+0x1")){v=function(t){var n=arguments.length<1?0:t,r=this;return r instanceof v&&(g?a(function(){d.valueOf.call(r)}):"Number"!=o(r))?u(new p(m(n)),r,v):m(n)};for(var b,w=r(6)?f(p):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),S=0;w.length>S;S++)i(p,b=w[S])&&!i(v,b)&&l(v,b,s(p,b));v.prototype=d,d.constructor=v,r(12)(e,"Number",v)}},function(t,n,r){"use strict";var e=r(0),i=r(24),o=r(101),u=r(73),c=1..toFixed,a=Math.floor,f=[0,0,0,0,0,0],s="Number.toFixed: incorrect invocation!",l=function(t,n){for(var r=-1,e=n;++r<6;)e+=t*f[r],f[r]=e%1e7,e=a(e/1e7)},h=function(t){for(var n=6,r=0;--n>=0;)r+=f[n],f[n]=a(r/t),r=r%t*1e7},v=function(){for(var t=6,n="";--t>=0;)if(""!==n||0===t||0!==f[t]){var r=String(f[t]);n=""===n?r:n+u.call("0",7-r.length)+r}return n},p=function(t,n,r){return 0===n?r:n%2==1?p(t,n-1,r*t):p(t*t,n/2,r)};e(e.P+e.F*(!!c&&("0.000"!==8e-5.toFixed(3)||"1"!==.9.toFixed(0)||"1.25"!==1.255.toFixed(2)||"1000000000000000128"!==(0xde0b6b3a7640080).toFixed(0))||!r(3)(function(){c.call({})})),"Number",{toFixed:function(t){var n,r,e,c,a=o(this,s),f=i(t),d="",g="0";if(f<0||f>20)throw RangeError(s);if(a!=a)return"NaN";if(a<=-1e21||a>=1e21)return String(a);if(a<0&&(d="-",a=-a),a>1e-21)if(r=(n=function(t){for(var n=0,r=t;r>=4096;)n+=12,r/=4096;for(;r>=2;)n+=1,r/=2;return n}(a*p(2,69,1))-69)<0?a*p(2,-n,1):a/p(2,n,1),r*=4503599627370496,(n=52-n)>0){for(l(0,r),e=f;e>=7;)l(1e7,0),e-=7;for(l(p(10,e,1),0),e=n-1;e>=23;)h(1<<23),e-=23;h(1<<e),l(1,1),h(2),g=v()}else l(0,r),l(1<<-n,0),g=v()+u.call("0",f);return g=f>0?d+((c=g.length)<=f?"0."+u.call("0",f-c)+g:g.slice(0,c-f)+"."+g.slice(c-f)):d+g}})},function(t,n,r){"use strict";var e=r(0),i=r(3),o=r(101),u=1..toPrecision;e(e.P+e.F*(i(function(){return"1"!==u.call(1,void 0)})||!i(function(){u.call({})})),"Number",{toPrecision:function(t){var n=o(this,"Number#toPrecision: incorrect invocation!");return void 0===t?u.call(n):u.call(n,t)}})},function(t,n,r){var e=r(0);e(e.S,"Number",{EPSILON:Math.pow(2,-52)})},function(t,n,r){var e=r(0),i=r(2).isFinite;e(e.S,"Number",{isFinite:function(t){return"number"==typeof t&&i(t)}})},function(t,n,r){var e=r(0);e(e.S,"Number",{isInteger:r(102)})},function(t,n,r){var e=r(0);e(e.S,"Number",{isNaN:function(t){return t!=t}})},function(t,n,r){var e=r(0),i=r(102),o=Math.abs;e(e.S,"Number",{isSafeInteger:function(t){return i(t)&&o(t)<=9007199254740991}})},function(t,n,r){var e=r(0);e(e.S,"Number",{MAX_SAFE_INTEGER:9007199254740991})},function(t,n,r){var e=r(0);e(e.S,"Number",{MIN_SAFE_INTEGER:-9007199254740991})},function(t,n,r){var e=r(0),i=r(100);e(e.S+e.F*(Number.parseFloat!=i),"Number",{parseFloat:i})},function(t,n,r){var e=r(0),i=r(99);e(e.S+e.F*(Number.parseInt!=i),"Number",{parseInt:i})},function(t,n,r){var e=r(0),i=r(103),o=Math.sqrt,u=Math.acosh;e(e.S+e.F*!(u&&710==Math.floor(u(Number.MAX_VALUE))&&u(1/0)==1/0),"Math",{acosh:function(t){return(t=+t)<1?NaN:t>94906265.62425156?Math.log(t)+Math.LN2:i(t-1+o(t-1)*o(t+1))}})},function(t,n,r){var e=r(0),i=Math.asinh;e(e.S+e.F*!(i&&1/i(0)>0),"Math",{asinh:function t(n){return isFinite(n=+n)&&0!=n?n<0?-t(-n):Math.log(n+Math.sqrt(n*n+1)):n}})},function(t,n,r){var e=r(0),i=Math.atanh;e(e.S+e.F*!(i&&1/i(-0)<0),"Math",{atanh:function(t){return 0==(t=+t)?t:Math.log((1+t)/(1-t))/2}})},function(t,n,r){var e=r(0),i=r(74);e(e.S,"Math",{cbrt:function(t){return i(t=+t)*Math.pow(Math.abs(t),1/3)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{clz32:function(t){return(t>>>=0)?31-Math.floor(Math.log(t+.5)*Math.LOG2E):32}})},function(t,n,r){var e=r(0),i=Math.exp;e(e.S,"Math",{cosh:function(t){return(i(t=+t)+i(-t))/2}})},function(t,n,r){var e=r(0),i=r(75);e(e.S+e.F*(i!=Math.expm1),"Math",{expm1:i})},function(t,n,r){var e=r(0);e(e.S,"Math",{fround:r(104)})},function(t,n,r){var e=r(0),i=Math.abs;e(e.S,"Math",{hypot:function(t,n){for(var r,e,o=0,u=0,c=arguments.length,a=0;u<c;)a<(r=i(arguments[u++]))?(o=o*(e=a/r)*e+1,a=r):o+=r>0?(e=r/a)*e:r;return a===1/0?1/0:a*Math.sqrt(o)}})},function(t,n,r){var e=r(0),i=Math.imul;e(e.S+e.F*r(3)(function(){return-5!=i(4294967295,5)||2!=i.length}),"Math",{imul:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e;return 0|i*o+((65535&r>>>16)*o+i*(65535&e>>>16)<<16>>>0)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{log10:function(t){return Math.log(t)*Math.LOG10E}})},function(t,n,r){var e=r(0);e(e.S,"Math",{log1p:r(103)})},function(t,n,r){var e=r(0);e(e.S,"Math",{log2:function(t){return Math.log(t)/Math.LN2}})},function(t,n,r){var e=r(0);e(e.S,"Math",{sign:r(74)})},function(t,n,r){var e=r(0),i=r(75),o=Math.exp;e(e.S+e.F*r(3)(function(){return-2e-17!=!Math.sinh(-2e-17)}),"Math",{sinh:function(t){return Math.abs(t=+t)<1?(i(t)-i(-t))/2:(o(t-1)-o(-t-1))*(Math.E/2)}})},function(t,n,r){var e=r(0),i=r(75),o=Math.exp;e(e.S,"Math",{tanh:function(t){var n=i(t=+t),r=i(-t);return n==1/0?1:r==1/0?-1:(n-r)/(o(t)+o(-t))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{trunc:function(t){return(t>0?Math.floor:Math.ceil)(t)}})},function(t,n,r){var e=r(0),i=r(35),o=String.fromCharCode,u=String.fromCodePoint;e(e.S+e.F*(!!u&&1!=u.length),"String",{fromCodePoint:function(t){for(var n,r=[],e=arguments.length,u=0;e>u;){if(n=+arguments[u++],i(n,1114111)!==n)throw RangeError(n+" is not a valid code point");r.push(n<65536?o(n):o(55296+((n-=65536)>>10),n%1024+56320))}return r.join("")}})},function(t,n,r){var e=r(0),i=r(15),o=r(8);e(e.S,"String",{raw:function(t){for(var n=i(t.raw),r=o(n.length),e=arguments.length,u=[],c=0;r>c;)u.push(String(n[c++])),c<e&&u.push(String(arguments[c]));return u.join("")}})},function(t,n,r){"use strict";r(43)("trim",function(t){return function(){return t(this,3)}})},function(t,n,r){"use strict";var e=r(76)(!0);r(77)(String,"String",function(t){this._t=String(t),this._i=0},function(){var t,n=this._t,r=this._i;return r>=n.length?{value:void 0,done:!0}:(t=e(n,r),this._i+=t.length,{value:t,done:!1})})},function(t,n,r){"use strict";var e=r(0),i=r(76)(!1);e(e.P,"String",{codePointAt:function(t){return i(this,t)}})},function(t,n,r){"use strict";var e=r(0),i=r(8),o=r(79),u="".endsWith;e(e.P+e.F*r(80)("endsWith"),"String",{endsWith:function(t){var n=o(this,t,"endsWith"),r=arguments.length>1?arguments[1]:void 0,e=i(n.length),c=void 0===r?e:Math.min(i(r),e),a=String(t);return u?u.call(n,a,c):n.slice(c-a.length,c)===a}})},function(t,n,r){"use strict";var e=r(0),i=r(79);e(e.P+e.F*r(80)("includes"),"String",{includes:function(t){return!!~i(this,t,"includes").indexOf(t,arguments.length>1?arguments[1]:void 0)}})},function(t,n,r){var e=r(0);e(e.P,"String",{repeat:r(73)})},function(t,n,r){"use strict";var e=r(0),i=r(8),o=r(79),u="".startsWith;e(e.P+e.F*r(80)("startsWith"),"String",{startsWith:function(t){var n=o(this,t,"startsWith"),r=i(Math.min(arguments.length>1?arguments[1]:void 0,n.length)),e=String(t);return u?u.call(n,e,r):n.slice(r,r+e.length)===e}})},function(t,n,r){"use strict";r(13)("anchor",function(t){return function(n){return t(this,"a","name",n)}})},function(t,n,r){"use strict";r(13)("big",function(t){return function(){return t(this,"big","","")}})},function(t,n,r){"use strict";r(13)("blink",function(t){return function(){return t(this,"blink","","")}})},function(t,n,r){"use strict";r(13)("bold",function(t){return function(){return t(this,"b","","")}})},function(t,n,r){"use strict";r(13)("fixed",function(t){return function(){return t(this,"tt","","")}})},function(t,n,r){"use strict";r(13)("fontcolor",function(t){return function(n){return t(this,"font","color",n)}})},function(t,n,r){"use strict";r(13)("fontsize",function(t){return function(n){return t(this,"font","size",n)}})},function(t,n,r){"use strict";r(13)("italics",function(t){return function(){return t(this,"i","","")}})},function(t,n,r){"use strict";r(13)("link",function(t){return function(n){return t(this,"a","href",n)}})},function(t,n,r){"use strict";r(13)("small",function(t){return function(){return t(this,"small","","")}})},function(t,n,r){"use strict";r(13)("strike",function(t){return function(){return t(this,"strike","","")}})},function(t,n,r){"use strict";r(13)("sub",function(t){return function(){return t(this,"sub","","")}})},function(t,n,r){"use strict";r(13)("sup",function(t){return function(){return t(this,"sup","","")}})},function(t,n,r){var e=r(0);e(e.S,"Date",{now:function(){return(new Date).getTime()}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22);e(e.P+e.F*r(3)(function(){return null!==new Date(NaN).toJSON()||1!==Date.prototype.toJSON.call({toISOString:function(){return 1}})}),"Date",{toJSON:function(t){var n=i(this),r=o(n);return"number"!=typeof r||isFinite(r)?n.toISOString():null}})},function(t,n,r){var e=r(0),i=r(210);e(e.P+e.F*(Date.prototype.toISOString!==i),"Date",{toISOString:i})},function(t,n,r){"use strict";var e=r(3),i=Date.prototype.getTime,o=Date.prototype.toISOString,u=function(t){return t>9?t:"0"+t};t.exports=e(function(){return"0385-07-25T07:06:39.999Z"!=o.call(new Date(-5e13-1))})||!e(function(){o.call(new Date(NaN))})?function(){if(!isFinite(i.call(this)))throw RangeError("Invalid time value");var t=this,n=t.getUTCFullYear(),r=t.getUTCMilliseconds(),e=n<0?"-":n>9999?"+":"";return e+("00000"+Math.abs(n)).slice(e?-6:-4)+"-"+u(t.getUTCMonth()+1)+"-"+u(t.getUTCDate())+"T"+u(t.getUTCHours())+":"+u(t.getUTCMinutes())+":"+u(t.getUTCSeconds())+"."+(r>99?r:"0"+u(r))+"Z"}:o},function(t,n,r){var e=Date.prototype,i=e.toString,o=e.getTime;new Date(NaN)+""!="Invalid Date"&&r(12)(e,"toString",function(){var t=o.call(this);return t==t?i.call(this):"Invalid Date"})},function(t,n,r){var e=r(5)("toPrimitive"),i=Date.prototype;e in i||r(11)(i,e,r(213))},function(t,n,r){"use strict";var e=r(1),i=r(22);t.exports=function(t){if("string"!==t&&"number"!==t&&"default"!==t)throw TypeError("Incorrect hint");return i(e(this),"number"!=t)}},function(t,n,r){var e=r(0);e(e.S,"Array",{isArray:r(52)})},function(t,n,r){"use strict";var e=r(19),i=r(0),o=r(9),u=r(105),c=r(81),a=r(8),f=r(82),s=r(83);i(i.S+i.F*!r(54)(function(t){Array.from(t)}),"Array",{from:function(t){var n,r,i,l,h=o(t),v="function"==typeof this?this:Array,p=arguments.length,d=p>1?arguments[1]:void 0,g=void 0!==d,y=0,m=s(h);if(g&&(d=e(d,p>2?arguments[2]:void 0,2)),null==m||v==Array&&c(m))for(r=new v(n=a(h.length));n>y;y++)f(r,y,g?d(h[y],y):h[y]);else for(l=m.call(h),r=new v;!(i=l.next()).done;y++)f(r,y,g?u(l,d,[i.value,y],!0):i.value);return r.length=y,r}})},function(t,n,r){"use strict";var e=r(0),i=r(82);e(e.S+e.F*r(3)(function(){function t(){}return!(Array.of.call(t)instanceof t)}),"Array",{of:function(){for(var t=0,n=arguments.length,r=new("function"==typeof this?this:Array)(n);n>t;)i(r,t,arguments[t++]);return r.length=n,r}})},function(t,n,r){"use strict";var e=r(0),i=r(15),o=[].join;e(e.P+e.F*(r(46)!=Object||!r(21)(o)),"Array",{join:function(t){return o.call(i(this),void 0===t?",":t)}})},function(t,n,r){"use strict";var e=r(0),i=r(69),o=r(20),u=r(35),c=r(8),a=[].slice;e(e.P+e.F*r(3)(function(){i&&a.call(i)}),"Array",{slice:function(t,n){var r=c(this.length),e=o(this);if(n=void 0===n?r:n,"Array"==e)return a.call(this,t,n);for(var i=u(t,r),f=u(n,r),s=c(f-i),l=new Array(s),h=0;h<s;h++)l[h]="String"==e?this.charAt(i+h):this[i+h];return l}})},function(t,n,r){"use strict";var e=r(0),i=r(10),o=r(9),u=r(3),c=[].sort,a=[1,2,3];e(e.P+e.F*(u(function(){a.sort(void 0)})||!u(function(){a.sort(null)})||!r(21)(c)),"Array",{sort:function(t){return void 0===t?c.call(o(this)):c.call(o(this),i(t))}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(0),o=r(21)([].forEach,!0);e(e.P+e.F*!o,"Array",{forEach:function(t){return i(this,t,arguments[1])}})},function(t,n,r){var e=r(4),i=r(52),o=r(5)("species");t.exports=function(t){var n;return i(t)&&("function"!=typeof(n=t.constructor)||n!==Array&&!i(n.prototype)||(n=void 0),e(n)&&null===(n=n[o])&&(n=void 0)),void 0===n?Array:n}},function(t,n,r){"use strict";var e=r(0),i=r(26)(1);e(e.P+e.F*!r(21)([].map,!0),"Array",{map:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(2);e(e.P+e.F*!r(21)([].filter,!0),"Array",{filter:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(3);e(e.P+e.F*!r(21)([].some,!0),"Array",{some:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(26)(4);e(e.P+e.F*!r(21)([].every,!0),"Array",{every:function(t){return i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(106);e(e.P+e.F*!r(21)([].reduce,!0),"Array",{reduce:function(t){return i(this,t,arguments.length,arguments[1],!1)}})},function(t,n,r){"use strict";var e=r(0),i=r(106);e(e.P+e.F*!r(21)([].reduceRight,!0),"Array",{reduceRight:function(t){return i(this,t,arguments.length,arguments[1],!0)}})},function(t,n,r){"use strict";var e=r(0),i=r(50)(!1),o=[].indexOf,u=!!o&&1/[1].indexOf(1,-0)<0;e(e.P+e.F*(u||!r(21)(o)),"Array",{indexOf:function(t){return u?o.apply(this,arguments)||0:i(this,t,arguments[1])}})},function(t,n,r){"use strict";var e=r(0),i=r(15),o=r(24),u=r(8),c=[].lastIndexOf,a=!!c&&1/[1].lastIndexOf(1,-0)<0;e(e.P+e.F*(a||!r(21)(c)),"Array",{lastIndexOf:function(t){if(a)return c.apply(this,arguments)||0;var n=i(this),r=u(n.length),e=r-1;for(arguments.length>1&&(e=Math.min(e,o(arguments[1]))),e<0&&(e=r+e);e>=0;e--)if(e in n&&n[e]===t)return e||0;return-1}})},function(t,n,r){var e=r(0);e(e.P,"Array",{copyWithin:r(107)}),r(31)("copyWithin")},function(t,n,r){var e=r(0);e(e.P,"Array",{fill:r(85)}),r(31)("fill")},function(t,n,r){"use strict";var e=r(0),i=r(26)(5),o=!0;"find"in[]&&Array(1).find(function(){o=!1}),e(e.P+e.F*o,"Array",{find:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)("find")},function(t,n,r){"use strict";var e=r(0),i=r(26)(6),o="findIndex",u=!0;o in[]&&Array(1)[o](function(){u=!1}),e(e.P+e.F*u,"Array",{findIndex:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)(o)},function(t,n,r){r(38)("Array")},function(t,n,r){var e=r(2),i=r(72),o=r(7).f,u=r(37).f,c=r(53),a=r(55),f=e.RegExp,s=f,l=f.prototype,h=/a/g,v=/a/g,p=new f(h)!==h;if(r(6)&&(!p||r(3)(function(){return v[r(5)("match")]=!1,f(h)!=h||f(v)==v||"/a/i"!=f(h,"i")}))){f=function(t,n){var r=this instanceof f,e=c(t),o=void 0===n;return!r&&e&&t.constructor===f&&o?t:i(p?new s(e&&!o?t.source:t,n):s((e=t instanceof f)?t.source:t,e&&o?a.call(t):n),r?this:l,f)};for(var d=function(t){t in f||o(f,t,{configurable:!0,get:function(){return s[t]},set:function(n){s[t]=n}})},g=u(s),y=0;g.length>y;)d(g[y++]);l.constructor=f,f.prototype=l,r(12)(e,"RegExp",f)}r(38)("RegExp")},function(t,n,r){"use strict";r(109);var e=r(1),i=r(55),o=r(6),u=/./.toString,c=function(t){r(12)(RegExp.prototype,"toString",t,!0)};r(3)(function(){return"/a/b"!=u.call({source:"a",flags:"b"})})?c(function(){var t=e(this);return"/".concat(t.source,"/","flags"in t?t.flags:!o&&t instanceof RegExp?i.call(t):void 0)}):"toString"!=u.name&&c(function(){return u.call(this)})},function(t,n,r){r(56)("match",1,function(t,n,r){return[function(r){"use strict";var e=t(this),i=null==r?void 0:r[n];return void 0!==i?i.call(r,e):new RegExp(r)[n](String(e))},r]})},function(t,n,r){r(56)("replace",2,function(t,n,r){return[function(e,i){"use strict";var o=t(this),u=null==e?void 0:e[n];return void 0!==u?u.call(e,o,i):r.call(String(o),e,i)},r]})},function(t,n,r){r(56)("search",1,function(t,n,r){return[function(r){"use strict";var e=t(this),i=null==r?void 0:r[n];return void 0!==i?i.call(r,e):new RegExp(r)[n](String(e))},r]})},function(t,n,r){r(56)("split",2,function(t,n,e){"use strict";var i=r(53),o=e,u=[].push;if("c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length){var c=void 0===/()??/.exec("")[1];e=function(t,n){var r=String(this);if(void 0===t&&0===n)return[];if(!i(t))return o.call(r,t,n);var e,a,f,s,l,h=[],v=(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":""),p=0,d=void 0===n?4294967295:n>>>0,g=new RegExp(t.source,v+"g");for(c||(e=new RegExp("^"+g.source+"$(?!\\s)",v));(a=g.exec(r))&&!((f=a.index+a[0].length)>p&&(h.push(r.slice(p,a.index)),!c&&a.length>1&&a[0].replace(e,function(){for(l=1;l<arguments.length-2;l++)void 0===arguments[l]&&(a[l]=void 0)}),a.length>1&&a.index<r.length&&u.apply(h,a.slice(1)),s=a[0].length,p=f,h.length>=d));)g.lastIndex===a.index&&g.lastIndex++;return p===r.length?!s&&g.test("")||h.push(""):h.push(r.slice(p)),h.length>d?h.slice(0,d):h}}else"0".split(void 0,0).length&&(e=function(t,n){return void 0===t&&0===n?[]:o.call(this,t,n)});return[function(r,i){var o=t(this),u=null==r?void 0:r[n];return void 0!==u?u.call(r,o,i):e.call(String(o),r,i)},e]})},function(t,n,r){"use strict";var e,i,o,u,c=r(30),a=r(2),f=r(19),s=r(48),l=r(0),h=r(4),v=r(10),p=r(39),d=r(40),g=r(57),y=r(87).set,m=r(88)(),b=r(89),w=r(110),S=r(58),x=r(111),_=a.TypeError,E=a.process,O=E&&E.versions,M=O&&O.v8||"",P=a.Promise,F="process"==s(E),A=function(){},j=i=b.f,I=!!function(){try{var t=P.resolve(1),n=(t.constructor={})[r(5)("species")]=function(t){t(A,A)};return(F||"function"==typeof PromiseRejectionEvent)&&t.then(A)instanceof n&&0!==M.indexOf("6.6")&&-1===S.indexOf("Chrome/66")}catch(t){}}(),N=function(t){var n;return!(!h(t)||"function"!=typeof(n=t.then))&&n},L=function(t,n){if(!t._n){t._n=!0;var r=t._c;m(function(){for(var e=t._v,i=1==t._s,o=0,u=function(n){var r,o,u,c=i?n.ok:n.fail,a=n.resolve,f=n.reject,s=n.domain;try{c?(i||(2==t._h&&R(t),t._h=1),!0===c?r=e:(s&&s.enter(),r=c(e),s&&(s.exit(),u=!0)),r===n.promise?f(_("Promise-chain cycle")):(o=N(r))?o.call(r,a,f):a(r)):f(e)}catch(t){s&&!u&&s.exit(),f(t)}};r.length>o;)u(r[o++]);t._c=[],t._n=!1,n&&!t._h&&T(t)})}},T=function(t){y.call(a,function(){var n,r,e,i=t._v,o=k(t);if(o&&(n=w(function(){F?E.emit("unhandledRejection",i,t):(r=a.onunhandledrejection)?r({promise:t,reason:i}):(e=a.console)&&e.error&&e.error("Unhandled promise rejection",i)}),t._h=F||k(t)?2:1),t._a=void 0,o&&n.e)throw n.v})},k=function(t){return 1!==t._h&&0===(t._a||t._c).length},R=function(t){y.call(a,function(){var n;F?E.emit("rejectionHandled",t):(n=a.onrejectionhandled)&&n({promise:t,reason:t._v})})},C=function(t){var n=this;n._d||(n._d=!0,(n=n._w||n)._v=t,n._s=2,n._a||(n._a=n._c.slice()),L(n,!0))},D=function(t){var n,r=this;if(!r._d){r._d=!0,r=r._w||r;try{if(r===t)throw _("Promise can't be resolved itself");(n=N(t))?m(function(){var e={_w:r,_d:!1};try{n.call(t,f(D,e,1),f(C,e,1))}catch(t){C.call(e,t)}}):(r._v=t,r._s=1,L(r,!1))}catch(t){C.call({_w:r,_d:!1},t)}}};I||(P=function(t){p(this,P,"Promise","_h"),v(t),e.call(this);try{t(f(D,this,1),f(C,this,1))}catch(t){C.call(this,t)}},(e=function(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=r(41)(P.prototype,{then:function(t,n){var r=j(g(this,P));return r.ok="function"!=typeof t||t,r.fail="function"==typeof n&&n,r.domain=F?E.domain:void 0,this._c.push(r),this._a&&this._a.push(r),this._s&&L(this,!1),r.promise},catch:function(t){return this.then(void 0,t)}}),o=function(){var t=new e;this.promise=t,this.resolve=f(D,t,1),this.reject=f(C,t,1)},b.f=j=function(t){return t===P||t===u?new o(t):i(t)}),l(l.G+l.W+l.F*!I,{Promise:P}),r(42)(P,"Promise"),r(38)("Promise"),u=r(18).Promise,l(l.S+l.F*!I,"Promise",{reject:function(t){var n=j(this);return(0,n.reject)(t),n.promise}}),l(l.S+l.F*(c||!I),"Promise",{resolve:function(t){return x(c&&this===u?P:this,t)}}),l(l.S+l.F*!(I&&r(54)(function(t){P.all(t).catch(A)})),"Promise",{all:function(t){var n=this,r=j(n),e=r.resolve,i=r.reject,o=w(function(){var r=[],o=0,u=1;d(t,!1,function(t){var c=o++,a=!1;r.push(void 0),u++,n.resolve(t).then(function(t){a||(a=!0,r[c]=t,--u||e(r))},i)}),--u||e(r)});return o.e&&i(o.v),r.promise},race:function(t){var n=this,r=j(n),e=r.reject,i=w(function(){d(t,!1,function(t){n.resolve(t).then(r.resolve,e)})});return i.e&&e(i.v),r.promise}})},function(t,n,r){"use strict";var e=r(116),i=r(45);r(59)("WeakSet",function(t){return function(){return t(this,arguments.length>0?arguments[0]:void 0)}},{add:function(t){return e.def(i(this,"WeakSet"),t,!0)}},e,!1,!0)},function(t,n,r){"use strict";var e=r(0),i=r(60),o=r(90),u=r(1),c=r(35),a=r(8),f=r(4),s=r(2).ArrayBuffer,l=r(57),h=o.ArrayBuffer,v=o.DataView,p=i.ABV&&s.isView,d=h.prototype.slice,g=i.VIEW;e(e.G+e.W+e.F*(s!==h),{ArrayBuffer:h}),e(e.S+e.F*!i.CONSTR,"ArrayBuffer",{isView:function(t){return p&&p(t)||f(t)&&g in t}}),e(e.P+e.U+e.F*r(3)(function(){return!new h(2).slice(1,void 0).byteLength}),"ArrayBuffer",{slice:function(t,n){if(void 0!==d&&void 0===n)return d.call(u(this),t);for(var r=u(this).byteLength,e=c(t,r),i=c(void 0===n?r:n,r),o=new(l(this,h))(a(i-e)),f=new v(this),s=new v(o),p=0;e<i;)s.setUint8(p++,f.getUint8(e++));return o}}),r(38)("ArrayBuffer")},function(t,n,r){var e=r(0);e(e.G+e.W+e.F*!r(60).ABV,{DataView:r(90).DataView})},function(t,n,r){r(27)("Int8",1,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint8",1,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint8",1,function(t){return function(n,r,e){return t(this,n,r,e)}},!0)},function(t,n,r){r(27)("Int16",2,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint16",2,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Int32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Uint32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Float32",4,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){r(27)("Float64",8,function(t){return function(n,r,e){return t(this,n,r,e)}})},function(t,n,r){var e=r(0),i=r(10),o=r(1),u=(r(2).Reflect||{}).apply,c=Function.apply;e(e.S+e.F*!r(3)(function(){u(function(){})}),"Reflect",{apply:function(t,n,r){var e=i(t),a=o(r);return u?u(e,n,a):c.call(e,n,a)}})},function(t,n,r){var e=r(0),i=r(36),o=r(10),u=r(1),c=r(4),a=r(3),f=r(97),s=(r(2).Reflect||{}).construct,l=a(function(){function t(){}return!(s(function(){},[],t)instanceof t)}),h=!a(function(){s(function(){})});e(e.S+e.F*(l||h),"Reflect",{construct:function(t,n){o(t),u(n);var r=arguments.length<3?t:o(arguments[2]);if(h&&!l)return s(t,n,r);if(t==r){switch(n.length){case 0:return new t;case 1:return new t(n[0]);case 2:return new t(n[0],n[1]);case 3:return new t(n[0],n[1],n[2]);case 4:return new t(n[0],n[1],n[2],n[3])}var e=[null];return e.push.apply(e,n),new(f.apply(t,e))}var a=r.prototype,v=i(c(a)?a:Object.prototype),p=Function.apply.call(t,v,n);return c(p)?p:v}})},function(t,n,r){var e=r(7),i=r(0),o=r(1),u=r(22);i(i.S+i.F*r(3)(function(){Reflect.defineProperty(e.f({},1,{value:1}),1,{value:2})}),"Reflect",{defineProperty:function(t,n,r){o(t),n=u(n,!0),o(r);try{return e.f(t,n,r),!0}catch(t){return!1}}})},function(t,n,r){var e=r(0),i=r(16).f,o=r(1);e(e.S,"Reflect",{deleteProperty:function(t,n){var r=i(o(t),n);return!(r&&!r.configurable)&&delete t[n]}})},function(t,n,r){"use strict";var e=r(0),i=r(1),o=function(t){this._t=i(t),this._i=0;var n,r=this._k=[];for(n in t)r.push(n)};r(78)(o,"Object",function(){var t,n=this._k;do{if(this._i>=n.length)return{value:void 0,done:!0}}while(!((t=n[this._i++])in this._t));return{value:t,done:!1}}),e(e.S,"Reflect",{enumerate:function(t){return new o(t)}})},function(t,n,r){var e=r(16),i=r(17),o=r(14),u=r(0),c=r(4),a=r(1);u(u.S,"Reflect",{get:function t(n,r){var u,f,s=arguments.length<3?n:arguments[2];return a(n)===s?n[r]:(u=e.f(n,r))?o(u,"value")?u.value:void 0!==u.get?u.get.call(s):void 0:c(f=i(n))?t(f,r,s):void 0}})},function(t,n,r){var e=r(16),i=r(0),o=r(1);i(i.S,"Reflect",{getOwnPropertyDescriptor:function(t,n){return e.f(o(t),n)}})},function(t,n,r){var e=r(0),i=r(17),o=r(1);e(e.S,"Reflect",{getPrototypeOf:function(t){return i(o(t))}})},function(t,n,r){var e=r(0);e(e.S,"Reflect",{has:function(t,n){return n in t}})},function(t,n,r){var e=r(0),i=r(1),o=Object.isExtensible;e(e.S,"Reflect",{isExtensible:function(t){return i(t),!o||o(t)}})},function(t,n,r){var e=r(0);e(e.S,"Reflect",{ownKeys:r(118)})},function(t,n,r){var e=r(0),i=r(1),o=Object.preventExtensions;e(e.S,"Reflect",{preventExtensions:function(t){i(t);try{return o&&o(t),!0}catch(t){return!1}}})},function(t,n,r){var e=r(7),i=r(16),o=r(17),u=r(14),c=r(0),a=r(32),f=r(1),s=r(4);c(c.S,"Reflect",{set:function t(n,r,c){var l,h,v=arguments.length<4?n:arguments[3],p=i.f(f(n),r);if(!p){if(s(h=o(n)))return t(h,r,c,v);p=a(0)}if(u(p,"value")){if(!1===p.writable||!s(v))return!1;if(l=i.f(v,r)){if(l.get||l.set||!1===l.writable)return!1;l.value=c,e.f(v,r,l)}else e.f(v,r,a(0,c));return!0}return void 0!==p.set&&(p.set.call(v,c),!0)}})},function(t,n,r){var e=r(0),i=r(70);i&&e(e.S,"Reflect",{setPrototypeOf:function(t,n){i.check(t,n);try{return i.set(t,n),!0}catch(t){return!1}}})},function(t,n,r){"use strict";var e=r(0),i=r(50)(!0);e(e.P,"Array",{includes:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0)}}),r(31)("includes")},function(t,n,r){"use strict";var e=r(0),i=r(119),o=r(9),u=r(8),c=r(10),a=r(84);e(e.P,"Array",{flatMap:function(t){var n,r,e=o(this);return c(t),n=u(e.length),r=a(e,0),i(r,e,e,n,0,1,t,arguments[1]),r}}),r(31)("flatMap")},function(t,n,r){"use strict";var e=r(0),i=r(119),o=r(9),u=r(8),c=r(24),a=r(84);e(e.P,"Array",{flatten:function(){var t=arguments[0],n=o(this),r=u(n.length),e=a(n,0);return i(e,n,n,r,0,void 0===t?1:c(t)),e}}),r(31)("flatten")},function(t,n,r){"use strict";var e=r(0),i=r(76)(!0);e(e.P,"String",{at:function(t){return i(this,t)}})},function(t,n,r){"use strict";var e=r(0),i=r(120),o=r(58);e(e.P+e.F*/Version\/10\.\d+(\.\d+)? Safari\//.test(o),"String",{padStart:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0,!0)}})},function(t,n,r){"use strict";var e=r(0),i=r(120),o=r(58);e(e.P+e.F*/Version\/10\.\d+(\.\d+)? Safari\//.test(o),"String",{padEnd:function(t){return i(this,t,arguments.length>1?arguments[1]:void 0,!1)}})},function(t,n,r){"use strict";r(43)("trimLeft",function(t){return function(){return t(this,1)}},"trimStart")},function(t,n,r){"use strict";r(43)("trimRight",function(t){return function(){return t(this,2)}},"trimEnd")},function(t,n,r){"use strict";var e=r(0),i=r(23),o=r(8),u=r(53),c=r(55),a=RegExp.prototype,f=function(t,n){this._r=t,this._s=n};r(78)(f,"RegExp String",function(){var t=this._r.exec(this._s);return{value:t,done:null===t}}),e(e.P,"String",{matchAll:function(t){if(i(this),!u(t))throw TypeError(t+" is not a regexp!");var n=String(this),r="flags"in a?String(t.flags):c.call(t),e=new RegExp(t.source,~r.indexOf("g")?r:"g"+r);return e.lastIndex=o(t.lastIndex),new f(e,n)}})},function(t,n,r){r(66)("asyncIterator")},function(t,n,r){r(66)("observable")},function(t,n,r){var e=r(0),i=r(118),o=r(15),u=r(16),c=r(82);e(e.S,"Object",{getOwnPropertyDescriptors:function(t){for(var n,r,e=o(t),a=u.f,f=i(e),s={},l=0;f.length>l;)void 0!==(r=a(e,n=f[l++]))&&c(s,n,r);return s}})},function(t,n,r){var e=r(0),i=r(121)(!1);e(e.S,"Object",{values:function(t){return i(t)}})},function(t,n,r){var e=r(0),i=r(121)(!0);e(e.S,"Object",{entries:function(t){return i(t)}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(10),u=r(7);r(6)&&e(e.P+r(61),"Object",{__defineGetter__:function(t,n){u.f(i(this),t,{get:o(n),enumerable:!0,configurable:!0})}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(10),u=r(7);r(6)&&e(e.P+r(61),"Object",{__defineSetter__:function(t,n){u.f(i(this),t,{set:o(n),enumerable:!0,configurable:!0})}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22),u=r(17),c=r(16).f;r(6)&&e(e.P+r(61),"Object",{__lookupGetter__:function(t){var n,r=i(this),e=o(t,!0);do{if(n=c(r,e))return n.get}while(r=u(r))}})},function(t,n,r){"use strict";var e=r(0),i=r(9),o=r(22),u=r(17),c=r(16).f;r(6)&&e(e.P+r(61),"Object",{__lookupSetter__:function(t){var n,r=i(this),e=o(t,!0);do{if(n=c(r,e))return n.set}while(r=u(r))}})},function(t,n,r){var e=r(0);e(e.P+e.R,"Map",{toJSON:r(122)("Map")})},function(t,n,r){var e=r(0);e(e.P+e.R,"Set",{toJSON:r(122)("Set")})},function(t,n,r){r(62)("Map")},function(t,n,r){r(62)("Set")},function(t,n,r){r(62)("WeakMap")},function(t,n,r){r(62)("WeakSet")},function(t,n,r){r(63)("Map")},function(t,n,r){r(63)("Set")},function(t,n,r){r(63)("WeakMap")},function(t,n,r){r(63)("WeakSet")},function(t,n,r){var e=r(0);e(e.G,{global:r(2)})},function(t,n,r){var e=r(0);e(e.S,"System",{global:r(2)})},function(t,n,r){var e=r(0),i=r(20);e(e.S,"Error",{isError:function(t){return"Error"===i(t)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{clamp:function(t,n,r){return Math.min(r,Math.max(n,t))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{DEG_PER_RAD:Math.PI/180})},function(t,n,r){var e=r(0),i=180/Math.PI;e(e.S,"Math",{degrees:function(t){return t*i}})},function(t,n,r){var e=r(0),i=r(124),o=r(104);e(e.S,"Math",{fscale:function(t,n,r,e,u){return o(i(t,n,r,e,u))}})},function(t,n,r){var e=r(0);e(e.S,"Math",{iaddh:function(t,n,r,e){var i=t>>>0,o=r>>>0;return(n>>>0)+(e>>>0)+((i&o|(i|o)&~(i+o>>>0))>>>31)|0}})},function(t,n,r){var e=r(0);e(e.S,"Math",{isubh:function(t,n,r,e){var i=t>>>0,o=r>>>0;return(n>>>0)-(e>>>0)-((~i&o|~(i^o)&i-o>>>0)>>>31)|0}})},function(t,n,r){var e=r(0);e(e.S,"Math",{imulh:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e,u=r>>16,c=e>>16,a=(u*o>>>0)+(i*o>>>16);return u*c+(a>>16)+((i*c>>>0)+(65535&a)>>16)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{RAD_PER_DEG:180/Math.PI})},function(t,n,r){var e=r(0),i=Math.PI/180;e(e.S,"Math",{radians:function(t){return t*i}})},function(t,n,r){var e=r(0);e(e.S,"Math",{scale:r(124)})},function(t,n,r){var e=r(0);e(e.S,"Math",{umulh:function(t,n){var r=+t,e=+n,i=65535&r,o=65535&e,u=r>>>16,c=e>>>16,a=(u*o>>>0)+(i*o>>>16);return u*c+(a>>>16)+((i*c>>>0)+(65535&a)>>>16)}})},function(t,n,r){var e=r(0);e(e.S,"Math",{signbit:function(t){return(t=+t)!=t?t:0==t?1/t==1/0:t>0}})},function(t,n,r){"use strict";var e=r(0),i=r(18),o=r(2),u=r(57),c=r(111);e(e.P+e.R,"Promise",{finally:function(t){var n=u(this,i.Promise||o.Promise),r="function"==typeof t;return this.then(r?function(r){return c(n,t()).then(function(){return r})}:t,r?function(r){return c(n,t()).then(function(){throw r})}:t)}})},function(t,n,r){"use strict";var e=r(0),i=r(89),o=r(110);e(e.S,"Promise",{try:function(t){var n=i.f(this),r=o(t);return(r.e?n.reject:n.resolve)(r.v),n.promise}})},function(t,n,r){var e=r(28),i=r(1),o=e.key,u=e.set;e.exp({defineMetadata:function(t,n,r,e){u(t,n,i(r),o(e))}})},function(t,n,r){var e=r(28),i=r(1),o=e.key,u=e.map,c=e.store;e.exp({deleteMetadata:function(t,n){var r=arguments.length<3?void 0:o(arguments[2]),e=u(i(n),r,!1);if(void 0===e||!e.delete(t))return!1;if(e.size)return!0;var a=c.get(n);return a.delete(r),!!a.size||c.delete(n)}})},function(t,n,r){var e=r(28),i=r(1),o=r(17),u=e.has,c=e.get,a=e.key,f=function(t,n,r){if(u(t,n,r))return c(t,n,r);var e=o(n);return null!==e?f(t,e,r):void 0};e.exp({getMetadata:function(t,n){return f(t,i(n),arguments.length<3?void 0:a(arguments[2]))}})},function(t,n,r){var e=r(114),i=r(123),o=r(28),u=r(1),c=r(17),a=o.keys,f=o.key,s=function(t,n){var r=a(t,n),o=c(t);if(null===o)return r;var u=s(o,n);return u.length?r.length?i(new e(r.concat(u))):u:r};o.exp({getMetadataKeys:function(t){return s(u(t),arguments.length<2?void 0:f(arguments[1]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.get,u=e.key;e.exp({getOwnMetadata:function(t,n){return o(t,i(n),arguments.length<3?void 0:u(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.keys,u=e.key;e.exp({getOwnMetadataKeys:function(t){return o(i(t),arguments.length<2?void 0:u(arguments[1]))}})},function(t,n,r){var e=r(28),i=r(1),o=r(17),u=e.has,c=e.key,a=function(t,n,r){if(u(t,n,r))return!0;var e=o(n);return null!==e&&a(t,e,r)};e.exp({hasMetadata:function(t,n){return a(t,i(n),arguments.length<3?void 0:c(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=e.has,u=e.key;e.exp({hasOwnMetadata:function(t,n){return o(t,i(n),arguments.length<3?void 0:u(arguments[2]))}})},function(t,n,r){var e=r(28),i=r(1),o=r(10),u=e.key,c=e.set;e.exp({metadata:function(t,n){return function(r,e){c(t,n,(void 0!==e?i:o)(r),u(e))}}})},function(t,n,r){var e=r(0),i=r(88)(),o=r(2).process,u="process"==r(20)(o);e(e.G,{asap:function(t){var n=u&&o.domain;i(n?n.bind(t):t)}})},function(t,n,r){"use strict";var e=r(0),i=r(2),o=r(18),u=r(88)(),c=r(5)("observable"),a=r(10),f=r(1),s=r(39),l=r(41),h=r(11),v=r(40),p=v.RETURN,d=function(t){return null==t?void 0:a(t)},g=function(t){var n=t._c;n&&(t._c=void 0,n())},y=function(t){return void 0===t._o},m=function(t){y(t)||(t._o=void 0,g(t))},b=function(t,n){f(t),this._c=void 0,this._o=t,t=new w(this);try{var r=n(t),e=r;null!=r&&("function"==typeof r.unsubscribe?r=function(){e.unsubscribe()}:a(r),this._c=r)}catch(n){return void t.error(n)}y(this)&&g(this)};b.prototype=l({},{unsubscribe:function(){m(this)}});var w=function(t){this._s=t};w.prototype=l({},{next:function(t){var n=this._s;if(!y(n)){var r=n._o;try{var e=d(r.next);if(e)return e.call(r,t)}catch(t){try{m(n)}finally{throw t}}}},error:function(t){var n=this._s;if(y(n))throw t;var r=n._o;n._o=void 0;try{var e=d(r.error);if(!e)throw t;t=e.call(r,t)}catch(t){try{g(n)}finally{throw t}}return g(n),t},complete:function(t){var n=this._s;if(!y(n)){var r=n._o;n._o=void 0;try{var e=d(r.complete);t=e?e.call(r,t):void 0}catch(t){try{g(n)}finally{throw t}}return g(n),t}}});var S=function(t){s(this,S,"Observable","_f")._f=a(t)};l(S.prototype,{subscribe:function(t){return new b(t,this._f)},forEach:function(t){var n=this;return new(o.Promise||i.Promise)(function(r,e){a(t);var i=n.subscribe({next:function(n){try{return t(n)}catch(t){e(t),i.unsubscribe()}},error:e,complete:r})})}}),l(S,{from:function(t){var n="function"==typeof this?this:S,r=d(f(t)[c]);if(r){var e=f(r.call(t));return e.constructor===n?e:new n(function(t){return e.subscribe(t)})}return new n(function(n){var r=!1;return u(function(){if(!r){try{if(v(t,!1,function(t){if(n.next(t),r)return p})===p)return}catch(t){if(r)throw t;return void n.error(t)}n.complete()}}),function(){r=!0}})},of:function(){for(var t=0,n=arguments.length,r=new Array(n);t<n;)r[t]=arguments[t++];return new("function"==typeof this?this:S)(function(t){var n=!1;return u(function(){if(!n){for(var e=0;e<r.length;++e)if(t.next(r[e]),n)return;t.complete()}}),function(){n=!0}})}}),h(S.prototype,c,function(){return this}),e(e.G,{Observable:S}),r(38)("Observable")},function(t,n,r){var e=r(2),i=r(0),o=r(58),u=[].slice,c=/MSIE .\./.test(o),a=function(t){return function(n,r){var e=arguments.length>2,i=!!e&&u.call(arguments,2);return t(e?function(){("function"==typeof n?n:Function(n)).apply(this,i)}:n,r)}};i(i.G+i.B+i.F*c,{setTimeout:a(e.setTimeout),setInterval:a(e.setInterval)})},function(t,n,r){var e=r(0),i=r(87);e(e.G+e.B,{setImmediate:i.set,clearImmediate:i.clear})},function(t,n,r){for(var e=r(86),i=r(34),o=r(12),u=r(2),c=r(11),a=r(44),f=r(5),s=f("iterator"),l=f("toStringTag"),h=a.Array,v={CSSRuleList:!0,CSSStyleDeclaration:!1,CSSValueList:!1,ClientRectList:!1,DOMRectList:!1,DOMStringList:!1,DOMTokenList:!0,DataTransferItemList:!1,FileList:!1,HTMLAllCollection:!1,HTMLCollection:!1,HTMLFormElement:!1,HTMLSelectElement:!1,MediaList:!0,MimeTypeArray:!1,NamedNodeMap:!1,NodeList:!0,PaintRequestList:!1,Plugin:!1,PluginArray:!1,SVGLengthList:!1,SVGNumberList:!1,SVGPathSegList:!1,SVGPointList:!1,SVGStringList:!1,SVGTransformList:!1,SourceBufferList:!1,StyleSheetList:!0,TextTrackCueList:!1,TextTrackList:!1,TouchList:!1},p=i(v),d=0;d<p.length;d++){var g,y=p[d],m=v[y],b=u[y],w=b&&b.prototype;if(w&&(w[s]||c(w,s,h),w[l]||c(w,l,y),a[y]=h,m))for(g in e)w[g]||o(w,g,e[g],!0)}},function(t,n,r){(function(n){!function(n){"use strict";var r,e=Object.prototype,i=e.hasOwnProperty,o="function"==typeof Symbol?Symbol:{},u=o.iterator||"@@iterator",c=o.asyncIterator||"@@asyncIterator",a=o.toStringTag||"@@toStringTag",f="object"==typeof t,s=n.regeneratorRuntime;if(s)f&&(t.exports=s);else{(s=n.regeneratorRuntime=f?t.exports:{}).wrap=w;var l="suspendedStart",h="suspendedYield",v="executing",p="completed",d={},g={};g[u]=function(){return this};var y=Object.getPrototypeOf,m=y&&y(y(I([])));m&&m!==e&&i.call(m,u)&&(g=m);var b=E.prototype=x.prototype=Object.create(g);_.prototype=b.constructor=E,E.constructor=_,E[a]=_.displayName="GeneratorFunction",s.isGeneratorFunction=function(t){var n="function"==typeof t&&t.constructor;return!!n&&(n===_||"GeneratorFunction"===(n.displayName||n.name))},s.mark=function(t){return Object.setPrototypeOf?Object.setPrototypeOf(t,E):(t.__proto__=E,a in t||(t[a]="GeneratorFunction")),t.prototype=Object.create(b),t},s.awrap=function(t){return{__await:t}},O(M.prototype),M.prototype[c]=function(){return this},s.AsyncIterator=M,s.async=function(t,n,r,e){var i=new M(w(t,n,r,e));return s.isGeneratorFunction(n)?i:i.next().then(function(t){return t.done?t.value:i.next()})},O(b),b[a]="Generator",b[u]=function(){return this},b.toString=function(){return"[object Generator]"},s.keys=function(t){var n=[];for(var r in t)n.push(r);return n.reverse(),function r(){for(;n.length;){var e=n.pop();if(e in t)return r.value=e,r.done=!1,r}return r.done=!0,r}},s.values=I,j.prototype={constructor:j,reset:function(t){if(this.prev=0,this.next=0,this.sent=this._sent=r,this.done=!1,this.delegate=null,this.method="next",this.arg=r,this.tryEntries.forEach(A),!t)for(var n in this)"t"===n.charAt(0)&&i.call(this,n)&&!isNaN(+n.slice(1))&&(this[n]=r)},stop:function(){this.done=!0;var t=this.tryEntries[0].completion;if("throw"===t.type)throw t.arg;return this.rval},dispatchException:function(t){if(this.done)throw t;var n=this;function e(e,i){return c.type="throw",c.arg=t,n.next=e,i&&(n.method="next",n.arg=r),!!i}for(var o=this.tryEntries.length-1;o>=0;--o){var u=this.tryEntries[o],c=u.completion;if("root"===u.tryLoc)return e("end");if(u.tryLoc<=this.prev){var a=i.call(u,"catchLoc"),f=i.call(u,"finallyLoc");if(a&&f){if(this.prev<u.catchLoc)return e(u.catchLoc,!0);if(this.prev<u.finallyLoc)return e(u.finallyLoc)}else if(a){if(this.prev<u.catchLoc)return e(u.catchLoc,!0)}else{if(!f)throw new Error("try statement without catch or finally");if(this.prev<u.finallyLoc)return e(u.finallyLoc)}}}},abrupt:function(t,n){for(var r=this.tryEntries.length-1;r>=0;--r){var e=this.tryEntries[r];if(e.tryLoc<=this.prev&&i.call(e,"finallyLoc")&&this.prev<e.finallyLoc){var o=e;break}}o&&("break"===t||"continue"===t)&&o.tryLoc<=n&&n<=o.finallyLoc&&(o=null);var u=o?o.completion:{};return u.type=t,u.arg=n,o?(this.method="next",this.next=o.finallyLoc,d):this.complete(u)},complete:function(t,n){if("throw"===t.type)throw t.arg;return"break"===t.type||"continue"===t.type?this.next=t.arg:"return"===t.type?(this.rval=this.arg=t.arg,this.method="return",this.next="end"):"normal"===t.type&&n&&(this.next=n),d},finish:function(t){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.finallyLoc===t)return this.complete(r.completion,r.afterLoc),A(r),d}},catch:function(t){for(var n=this.tryEntries.length-1;n>=0;--n){var r=this.tryEntries[n];if(r.tryLoc===t){var e=r.completion;if("throw"===e.type){var i=e.arg;A(r)}return i}}throw new Error("illegal catch attempt")},delegateYield:function(t,n,e){return this.delegate={iterator:I(t),resultName:n,nextLoc:e},"next"===this.method&&(this.arg=r),d}}}function w(t,n,r,e){var i=n&&n.prototype instanceof x?n:x,o=Object.create(i.prototype),u=new j(e||[]);return o._invoke=function(t,n,r){var e=l;return function(i,o){if(e===v)throw new Error("Generator is already running");if(e===p){if("throw"===i)throw o;return N()}for(r.method=i,r.arg=o;;){var u=r.delegate;if(u){var c=P(u,r);if(c){if(c===d)continue;return c}}if("next"===r.method)r.sent=r._sent=r.arg;else if("throw"===r.method){if(e===l)throw e=p,r.arg;r.dispatchException(r.arg)}else"return"===r.method&&r.abrupt("return",r.arg);e=v;var a=S(t,n,r);if("normal"===a.type){if(e=r.done?p:h,a.arg===d)continue;return{value:a.arg,done:r.done}}"throw"===a.type&&(e=p,r.method="throw",r.arg=a.arg)}}}(t,r,u),o}function S(t,n,r){try{return{type:"normal",arg:t.call(n,r)}}catch(t){return{type:"throw",arg:t}}}function x(){}function _(){}function E(){}function O(t){["next","throw","return"].forEach(function(n){t[n]=function(t){return this._invoke(n,t)}})}function M(t){function r(n,e,o,u){var c=S(t[n],t,e);if("throw"!==c.type){var a=c.arg,f=a.value;return f&&"object"==typeof f&&i.call(f,"__await")?Promise.resolve(f.__await).then(function(t){r("next",t,o,u)},function(t){r("throw",t,o,u)}):Promise.resolve(f).then(function(t){a.value=t,o(a)},u)}u(c.arg)}var e;"object"==typeof n.process&&n.process.domain&&(r=n.process.domain.bind(r)),this._invoke=function(t,n){function i(){return new Promise(function(e,i){r(t,n,e,i)})}return e=e?e.then(i,i):i()}}function P(t,n){var e=t.iterator[n.method];if(e===r){if(n.delegate=null,"throw"===n.method){if(t.iterator.return&&(n.method="return",n.arg=r,P(t,n),"throw"===n.method))return d;n.method="throw",n.arg=new TypeError("The iterator does not provide a 'throw' method")}return d}var i=S(e,t.iterator,n.arg);if("throw"===i.type)return n.method="throw",n.arg=i.arg,n.delegate=null,d;var o=i.arg;return o?o.done?(n[t.resultName]=o.value,n.next=t.nextLoc,"return"!==n.method&&(n.method="next",n.arg=r),n.delegate=null,d):o:(n.method="throw",n.arg=new TypeError("iterator result is not an object"),n.delegate=null,d)}function F(t){var n={tryLoc:t[0]};1 in t&&(n.catchLoc=t[1]),2 in t&&(n.finallyLoc=t[2],n.afterLoc=t[3]),this.tryEntries.push(n)}function A(t){var n=t.completion||{};n.type="normal",delete n.arg,t.completion=n}function j(t){this.tryEntries=[{tryLoc:"root"}],t.forEach(F,this),this.reset(!0)}function I(t){if(t){var n=t[u];if(n)return n.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var e=-1,o=function n(){for(;++e<t.length;)if(i.call(t,e))return n.value=t[e],n.done=!1,n;return n.value=r,n.done=!0,n};return o.next=o}}return{next:N}}function N(){return{value:r,done:!0}}}("object"==typeof n?n:"object"==typeof window?window:"object"==typeof self?self:this)}).call(this,r(64))},function(t,n,r){r(329),t.exports=r(18).RegExp.escape},function(t,n,r){var e=r(0),i=r(330)(/[\\^$*+?.()|[\]{}]/g,"\\$&");e(e.S,"RegExp",{escape:function(t){return i(t)}})},function(t,n){t.exports=function(t,n){var r=n===Object(n)?function(t){return n[t]}:n;return function(n){return String(n).replace(t,r)}}},function(t,n,r){"use strict";r.r(n);var e=r(126),i=r.n(e);r(125);const o=r(332);function u(t,n){var r=document.createElement("iframe");return a[t]=r,r.setAttribute("style","display: none;"),r.setAttribute("src",n),r}function c(t,n){window.parent.Singular=a,document.body.appendChild(t),document.body.appendChild(n)}var a={properties:{singularLocation:"",onIdentityChange:function(){},onLogout:function(){},clientId:"",checkInterval:1e3,uaaLocation:"",storageKey:"singularUserIdentityClaims",authTimeout:2e4,appendFramesOnInit:!1},rpFrameLoaded:!1,singularLocation:function(){if(""!==a.properties.singularLocation)return a.properties.singularLocation;if(document.getElementById("singular_script").src){var t=document.getElementById("singular_script").src;return t.substring(0,t.lastIndexOf("/"))}throw"singularLocation must not be blank"},init:function(t){if(t)for(var n in t)a.properties[n]=t[n];this.validateProperties(t);var r=u("opFrame",a.properties.uaaLocation+"/session_management?clientId="+a.properties.clientId+"&messageOrigin="+encodeURIComponent(window.location.origin)),e=u("rpFrame",this.singularLocation()+"/"+i.a);e.onload=function(){a.rpFrameLoaded=!0},!0===a.properties.appendFramesOnInit?c(r,e):document.addEventListener("DOMContentLoaded",function(){c(r,e)})},validateProperties:function(t){var n=["uaaLocation","clientId"];for(var r in n){var e=n[r];if(!t[e])throw'The "'+e+'" field must be set and not empty'}this.getUaaValidator().isValidUAA(t.uaaLocation)},decodeJwt:function(t){var n=t.split(".")[1].replace("-","+").replace("_","/");return JSON.parse(window.atob(n))},access:function(t){var n=a.rpFrame;return new Promise(function(r,e){var i=function(){n.contentWindow.fetchAccessToken(t,function(t,n){n||null==t?e(n):r(t)})};a.rpFrameLoaded?i():a.rpFrame.addEventListener("load",i)})},getUaaValidator:function(){return this.validator||o},setUaaValidator:function(t){this.validator=t}};n.default=a},function(t,n,r){var e=r(125);function i(t,n){var r=new XMLHttpRequest;r.open("GET",t),r.setRequestHeader("Accept","application/json"),r.send(),r.onreadystatechange=function(){r.readyState==XMLHttpRequest.DONE&&n(r)}}t.exports={isValidUAA:function(t){i(t+"/info",function(n){if(200!==n.status)throw t+" does not appear to be a running UAA instance or may not have a trusted SSL certificate";var r=JSON.parse(n.response).app.version;if(!e.isGreaterThanOrEqualTo(r,"4.10.0"))throw"The UAA instance running at "+t+" has version "+r+". This uaa-singular library requires UAA version 4.10.0 or higher."})},checkClientConfiguration:function(t,n){i(t,function(r){if(400===r.status){var e=window.location.href.substring(0,window.location.href.lastIndexOf("/"))+"/**";throw"Error while calling /oauth/authorize. Is the UAA client "+n+" configured to allow redirects to "+e+" ? Visit "+t+" in the browser to see the UAA's error messages."}})}}}]).default}); \ No newline at end of file
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/404.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/404.ejs
new file mode 100644
index 0000000000..d6279481c8
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/404.ejs
@@ -0,0 +1,3 @@
+<h1>Not found</h1>
+
+<p>The object you clicked on was not found; it may have been deleted on the server.</p>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/add-binding.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/add-binding.ejs
new file mode 100644
index 0000000000..3f5b83205b
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/add-binding.ejs
@@ -0,0 +1,49 @@
+<% if (mode == 'queue') { %>
+ <h3 style="padding-top: 20px;">Add binding to this queue</h3>
+<% } else { %>
+ <h3 style="padding-top: 20px;">Add binding from this exchange</h3>
+<% } %>
+ <form action="#/bindings" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(parent.vhost) %>"/>
+<% if (mode == 'queue') { %>
+ <input type="hidden" name="destination" value="<%= fmt_string(parent.name) %>"/>
+<% } else { %>
+ <input type="hidden" name="source" value="<%= fmt_string(parent.name) %>"/>
+<% } %>
+ <table class="form">
+<% if (mode == 'queue') { %>
+ <tr>
+ <th>
+ <label>From exchange:</label>
+ </th>
+ <td>
+ <input type="hidden" name="destination_type" value="q"/>
+ <input type="text" name="source" value=""/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+<% } else { %>
+ <tr>
+ <th>
+ <select name="destination_type" class="narrow">
+ <option value="e">To exchange</option>
+ <option value="q" selected="selected">To queue</option>
+ </select>:
+ </th>
+ <td>
+ <input type="text" name="destination" value=""/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+<% } %>
+ <tr>
+ <th><label>Routing key:</label></th>
+ <td><input type="text" name="routing_key" value=""/></td>
+ </tr>
+ <tr>
+ <th><label>Arguments:</label></th>
+ <td><div class="multifield" id="arguments"></div></td>
+ </tr>
+ </table>
+ <input type="submit" value="Bind"/>
+ </form>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs
new file mode 100644
index 0000000000..e19921e12d
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/binary.ejs
@@ -0,0 +1,61 @@
+<%
+ if (binary == "not_available") {
+%>
+<p class="warning">
+ Binary statistics not available.
+</p>
+<% } else { %>
+<%
+ var sections = {'queue_procs' : ['queue', 'Classic queues (masters)'],
+ 'queue_slave_procs' : ['queue', 'Classic queues (mirrors)'],
+ 'quorum_queue_procs' : ['queue', 'Quorum queues'],
+ 'stream_queue_procs' : ['queue', 'Stream queues'],
+ 'stream_queue_replica_reader_procs' : ['queue', 'Stream queues (replica reader)'],
+ 'stream_queue_coordinator_procs' : ['queue', 'Stream queues (coordinator)'],
+ 'connection_readers' : ['conn', 'Connection readers'],
+ 'connection_writers' : ['conn', 'Connection writers'],
+ 'connection_channels' : ['conn', 'Connection channels'],
+ 'connection_other' : ['conn', 'Connections (other)'],
+ 'msg_index' : ['table', 'Message store index'],
+ 'mgmt_db' : ['table', 'Management database'],
+ 'plugins' : ['proc', 'Plugins'],
+ 'other' : ['system', 'Other binary references']};
+ var total_out = [];
+%>
+<%= format('memory-bar', {sections: sections, memory: binary, total_out: total_out}) %>
+<span class="clear">&nbsp;</span>
+<div class="box">
+<%
+var key = [[{name: 'Queues', colour: 'queue',
+ keys: [['queue_procs', 'queues'],
+ ['queue_slave_procs', 'mirrors'],
+ ['quorum_queue_procs', 'quorum'],
+ ['stream_queue_procs', 'stream'],
+ ['stream_queue_replica_reader_procs', 'stream replica reader'],
+ ['stream_queue_coordinator_procs', 'stream coordinator']]}],
+
+ [{name: 'Connections', colour: 'conn',
+ keys: [['connection_readers', 'readers'],
+ ['connection_writers', 'writers'],
+ ['connection_channels', 'channels'],
+ ['connection_other', 'other']]}],
+
+ [{name: 'Tables', colour: 'table',
+ keys: [['msg_index', 'message store index'],
+ ['mgmt_db', 'management database']]}],
+
+ [{name: 'Processes', colour: 'proc',
+ keys: [['plugins', 'plugins']]},
+ {name: 'System', colour: 'system',
+ keys: [['other', 'other']]}]];
+%>
+<%= format('memory-table', {key: key, memory: binary}) %>
+</div>
+
+<div class="memory-info">
+ Last updated: <b><%= fmt_date(new Date()) %></b>.<br/>
+ Total referenced binaries at last update: <b><%= fmt_bytes(total_out[0]) %></b>
+ <span class="help" id="binary-use"></span>
+</div>
+
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/bindings.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/bindings.ejs
new file mode 100644
index 0000000000..a4e77e4446
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/bindings.ejs
@@ -0,0 +1,66 @@
+<%= maybe_truncate(bindings) %>
+<% if (bindings.length > 0) { %>
+ <table class="list updatable">
+ <thead>
+ <tr>
+<% if (mode == 'exchange_source') { %>
+ <th>To</th>
+<% } else { %>
+ <th>From</th>
+<% } %>
+ <th>Routing key</th>
+ <th>Arguments</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < bindings.length; i++) {
+ var binding = bindings[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+<% if (binding.source == '') { %>
+ <td colspan="4">
+ (Default exchange binding)
+ </td>
+<% } else { %>
+<% if (mode == 'queue' || mode == 'exchange_destination') { %>
+ <td>
+ <span class="exchange">
+ <%= link_exchange(binding.vhost, binding.source) %>
+ </span>
+ </td>
+<% } else if (binding.destination_type == 'exchange') { %>
+ <td>
+ <span class="exchange" title="Exchange">
+ <%= link_exchange(binding.vhost, binding.destination) %>
+ </span>
+ </td>
+<% } else { %>
+ <td>
+ <span class="queue" title="Queue">
+ <%= link_queue(binding.vhost, binding.destination) %>
+ </span>
+ </td>
+<% } %>
+ <td><%= fmt_string(binding.routing_key) %></td>
+ <td><%= fmt_table_short(binding.arguments) %></td>
+ <td class="c">
+ <form action="#/bindings" method="delete" class="confirm">
+ <input type="hidden" name="vhost" value="<%= fmt_string(binding.vhost) %>"/>
+ <input type="hidden" name="source" value="<%= fmt_exchange_url(binding.source) %>"/>
+ <input type="hidden" name="destination" value="<%= fmt_string(binding.destination) %>"/>
+ <input type="hidden" name="destination_type" value="<%= binding.destination_type.substring(0, 1) %>"/>
+ <input type="hidden" name="properties_key" value="<%= fmt_string(binding.properties_key) %>"/>
+ <input type="submit" value="Unbind"/>
+ </form>
+ </td>
+ <% } %>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+
+<% } else { %>
+ <p>... no bindings ...</p>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs
new file mode 100644
index 0000000000..5ecd7f801e
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs
@@ -0,0 +1,144 @@
+<h1>Channel: <b><%= fmt_escape_html(channel.name) %></b><%= fmt_maybe_vhost(channel.vhost) %></h1>
+
+<div class="section">
+<h2>Overview</h2>
+<div class="hider updatable">
+<% if (rates_mode != 'none') { %>
+ <%= message_rates('msg-rates-ch', channel.message_stats) %>
+<% } %>
+
+<h3>Details</h3>
+<table class="facts facts-l">
+ <tr>
+ <th>Connection</th>
+ <td><%= link_conn(channel.connection_details.name) %></td>
+ </tr>
+<% if (nodes_interesting) { %>
+ <tr>
+ <th>Node</th>
+ <td><%= fmt_node(channel.node) %></td>
+ </tr>
+<% } %>
+ <tr>
+ <th>Username</th>
+ <td><%= fmt_string(channel.user) %></td>
+ </tr>
+ <tr>
+ <th>Mode <span class="help" id="channel-mode"></span></th>
+ <td><%= fmt_channel_mode(channel) %></td>
+ </tr>
+</table>
+
+<table class="facts facts-l">
+ <tr>
+ <th>State</th>
+ <td><%= fmt_object_state(channel) %></td>
+ </tr>
+ <tr>
+ <th>Prefetch count</th>
+ <td><%= channel.prefetch_count %></td>
+ </tr>
+ <tr>
+ <th>Global prefetch count</th>
+ <td><%= channel.global_prefetch_count %></td>
+ </tr>
+</table>
+
+<table class="facts">
+ <tr>
+ <th>Messages unacknowledged</th>
+ <td><%= channel.messages_unacknowledged %></td>
+ </tr>
+ <tr>
+ <th>Messages unconfirmed</th>
+ <td><%= channel.messages_unconfirmed %></td>
+ </tr>
+ <tr>
+ <th>Messages uncommitted</th>
+ <td><%= channel.messages_uncommitted %></td>
+ </tr>
+ <tr>
+ <th>Acks uncommitted</th>
+ <td><%= channel.acks_uncommitted %></td>
+ </tr>
+</table>
+
+<table class="facts">
+ <tr>
+ <th>Pending Raft commands</th>
+ <td><%= channel.pending_raft_commands %></td>
+ </tr>
+</table>
+
+</div>
+</div>
+
+<div class="section">
+ <h2>Consumers</h2>
+ <div class="hider updatable">
+<%= format('consumers', {'mode': 'channel', 'consumers': channel.consumer_details}) %>
+ </div>
+</div>
+
+<% if (rates_mode == 'detailed') { %>
+<div class="section">
+<h2>Message rates breakdown</h2>
+<div class="hider updatable">
+<table class="two-col-layout">
+ <tr>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'channel',
+ 'object': channel.publishes,
+ 'label': 'Publishes'}) %>
+ </td>
+ <td>
+ <%= format('msg-detail-deliveries',
+ {'mode': 'channel',
+ 'object': channel.deliveries}) %>
+ </td>
+ </tr>
+</table>
+</div>
+</div>
+<% } %>
+
+<% if(channel.reductions || channel.garbage_collection) { %>
+<div class="section-hidden">
+<h2>Runtime Metrics (Advanced)</h2>
+ <div class="hider updatable">
+ <%= data_reductions('reductions-rates-conn', channel) %>
+ <table class="facts">
+ <% if (channel.garbage_collection.min_bin_vheap_size) { %>
+ <tr>
+ <th>Minimum binary virtual heap size in words (min_bin_vheap_size)</th>
+ <td><%= channel.garbage_collection.min_bin_vheap_size %></td>
+ </tr>
+ <% } %>
+
+ <% if (channel.garbage_collection.min_heap_size) { %>
+ <tr>
+ <th>Minimum heap size in words (min_heap_size)</th>
+ <td><%= channel.garbage_collection.min_heap_size %></td>
+ </tr>
+ <% } %>
+
+ <% if (channel.garbage_collection.fullsweep_after) { %>
+ <tr>
+ <th>Maximum generational collections before fullsweep (fullsweep_after)</th>
+ <td><%= channel.garbage_collection.fullsweep_after %></td>
+ </tr>
+ <% } %>
+
+ <% if (channel.garbage_collection.minor_gcs) { %>
+ <tr>
+ <th>Number of minor GCs (minor_gcs)</th>
+ <td><%= channel.garbage_collection.minor_gcs %></td>
+ </tr>
+ <% } %>
+ </table>
+ </div>
+</div>
+
+<% } %>
+
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs
new file mode 100644
index 0000000000..8acf57ab4d
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs
@@ -0,0 +1,203 @@
+<% if (channels.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'standalone') { %>
+ <%= group_heading('channels', 'Overview', [true, vhosts_interesting, nodes_interesting]) %>
+<% } else { %>
+ <%= group_heading('channels', 'Overview', [true]) %>
+<% } %>
+ <%= group_heading('channels', 'Details', []) %>
+ <%= group_heading('channels', 'Transactions', []) %>
+<% if (rates_mode != 'none') { %>
+ <%= group_heading('channels', 'Message rates', []) %>
+<% } %>
+ <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="channels">+/-</span></th>
+ </tr>
+ <tr>
+<% if (mode == 'standalone') { %>
+ <th><%= fmt_sort('Channel', 'name') %></th>
+<% if (nodes_interesting) { %>
+ <th><%= fmt_sort('Node', 'node') %></th>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+<% if (show_column('channels', 'user')) { %>
+ <th><%= fmt_sort('User name', 'user') %></th>
+<% } %>
+<% if (show_column('channels', 'mode')) { %>
+ <th>Mode <span class="help" id="channel-mode"></span></th>
+<% } %>
+<% if (show_column('channels', 'state')) { %>
+ <th><%= fmt_sort('State', 'state') %></th>
+<% } %>
+<% if (show_column('channels', 'msgs-unconfirmed')) { %>
+ <th><%= fmt_sort('Unconfirmed', 'messages_unconfirmed') %></th>
+<% } %>
+<% if (show_column('channels', 'prefetch')) { %>
+ <th>Prefetch <span class="help" id="channel-prefetch"></span></th>
+<% } %>
+<% if (show_column('channels', 'msgs-unacked')) { %>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+<% } %>
+<% if (show_column('channels', 'msgs-uncommitted')) { %>
+ <th><%= fmt_sort('Uncommitted msgs', 'messages_uncommitted') %></th>
+<% } %>
+<% if (show_column('channels', 'acks-uncommitted')) { %>
+ <th><%= fmt_sort('Uncommitted acks', 'acks_uncommitted') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('channels', 'rate-publish')) { %>
+ <th><%= fmt_sort('publish', 'message_stats.publish_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-confirm')) { %>
+ <th><%= fmt_sort('confirm', 'message_stats.confirm_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-unroutable-drop')) { %>
+ <th><%= fmt_sort('unroutable (drop)', 'message_stats.drop_unroutable_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-unroutable-return')) { %>
+ <th><%= fmt_sort('unroutable (return)', 'message_stats.return_unroutable_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-deliver')) { %>
+ <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-redeliver')) { %>
+ <th><%= fmt_sort('redelivered', 'message_stats.redeliver_details.rate') %></th>
+<% } %>
+<% if (show_column('channels', 'rate-ack')) { %>
+ <th><%= fmt_sort('ack', 'message_stats.ack_details.rate') %></th>
+<% } %>
+<% } %>
+<% } else { %>
+<!-- TODO make sortable after bug 23401 -->
+ <th>Channel</th>
+<% if (show_column('channels', 'user')) { %>
+ <th>User name</th>
+<% } %>
+<% if (show_column('channels', 'mode')) { %>
+ <th>Mode <span class="help" id="channel-mode"></span></th>
+<% } %>
+<% if (show_column('channels', 'state')) { %>
+ <th>State</th>
+<% } %>
+<% if (show_column('channels', 'msgs-unconfirmed')) { %>
+ <th>Unconfirmed</th>
+<% } %>
+<% if (show_column('channels', 'prefetch')) { %>
+ <th>Prefetch <span class="help" id="channel-prefetch"></span></th>
+<% } %>
+<% if (show_column('channels', 'msgs-unacked')) { %>
+ <th>Unacked</th>
+<% } %>
+<% if (show_column('channels', 'msgs-uncommitted')) { %>
+ <th>Uncommitted msgs</th>
+<% } %>
+<% if (show_column('channels', 'acks-uncommitted')) { %>
+ <th>Uncommitted acks</th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('channels', 'rate-publish')) { %>
+ <th>publish</th>
+<% } %>
+<% if (show_column('channels', 'rate-confirm')) { %>
+ <th>confirm</th>
+<% } %>
+<% if (show_column('channels', 'rate-unroutable-drop')) { %>
+ <th>unroutable (drop)</th>
+<% } %>
+<% if (show_column('channels', 'rate-unroutable-return')) { %>
+ <th>unroutable (return)</th>
+<% } %>
+<% if (show_column('channels', 'rate-deliver')) { %>
+ <th>deliver / get</th>
+<% } %>
+<% if (show_column('channels', 'rate-redeliver')) { %>
+ <th>redelivered</th>
+<% } %>
+<% if (show_column('channels', 'rate-ack')) { %>
+ <th>ack</th>
+<% } %>
+<% } %>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < channels.length; i++) {
+ var channel = channels[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= link_channel(channel.name) %>
+ </td>
+<% if (mode == 'standalone' && nodes_interesting) { %>
+ <td><%= fmt_node(channel.node) %></td>
+<% } %>
+<% if (mode == 'standalone' && vhosts_interesting) { %>
+ <td class="c"><%= fmt_string(channel.vhost) %></td>
+<% } %>
+<% if (show_column('channels', 'user')) { %>
+ <td class="c"><%= fmt_string(channel.user) %></td>
+<% } %>
+<% if (show_column('channels', 'mode')) { %>
+ <td class="c">
+ <%= fmt_channel_mode(channel) %>
+ </td>
+<% } %>
+<% if (show_column('channels', 'state')) { %>
+ <td class="c"><%= fmt_object_state(channel) %></td>
+<% } %>
+<% if (show_column('channels', 'msgs-unconfirmed')) { %>
+ <td class="c"><%= channel.messages_unconfirmed %></td>
+<% } %>
+<% if (show_column('channels', 'prefetch')) { %>
+ <td class="c">
+ <% if (channel.prefetch_count != 0) { %>
+ <%= channel.prefetch_count %><br/>
+ <% } %>
+ <% if (channel.global_prefetch_count != 0) { %>
+ <%= channel.global_prefetch_count %> (global)
+ <% } %>
+ </td>
+<% } %>
+<% if (show_column('channels', 'msgs-unacked')) { %>
+ <td class="c"><%= channel.messages_unacknowledged %></td>
+<% } %>
+<% if (show_column('channels', 'msgs-uncommitted')) { %>
+ <td class="c"><%= channel.messages_uncommitted %></td>
+<% } %>
+<% if (show_column('channels', 'acks-uncommitted')) { %>
+ <td class="c"><%= channel.acks_uncommitted %></td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('channels', 'rate-publish')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'publish') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-confirm')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'confirm') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-unroutable-drop')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'drop_unroutable') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-unroutable-return')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'return_unroutable') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-deliver')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'deliver_get') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-redeliver')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'redeliver') %></td>
+<% } %>
+<% if (show_column('channels', 'rate-ack')) { %>
+ <td class="r"><%= fmt_detail_rate(channel.message_stats, 'ack') %></td>
+<% } %>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no channels ...</p>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/channels.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/channels.ejs
new file mode 100644
index 0000000000..a29553f96c
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/channels.ejs
@@ -0,0 +1,7 @@
+<h1>Channels</h1>
+<div class="section">
+ <%= paginate_ui(channels, 'channels') %>
+</div>
+<div class="updatable">
+ <%= format('channels-list', {'channels': channels.items, 'mode': 'standalone'}) %>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/cluster-name.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/cluster-name.ejs
new file mode 100644
index 0000000000..fbaa3cd24e
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/cluster-name.ejs
@@ -0,0 +1,31 @@
+<h1>Cluster name: <b><%= fmt_string(cluster_name.name) %></b></h1>
+
+<p>
+ The cluster name can be used by clients to identify clusters over
+ AMQP connections, and is used by the shovel and federation plugins
+ to identify which clusters a message has been routed through.
+</p>
+<p>
+ Note that the cluster name is announced to clients in the AMQP
+ server properties; i.e. before authentication has taken
+ place. Therefore it should not be considered secret.
+</p>
+<p>
+ The cluster name is generated by default from the name of the first
+ node in the cluster, but can be changed.
+</p>
+
+<div class="section-hidden">
+ <h2>Change name</h2>
+ <div class="hider">
+ <form action="#/cluster-name" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name" value="<%= fmt_string(cluster_name.name) %>"/><span class="mand">*</span></td>
+ </tr>
+ </table>
+ <input type="submit" value="Change name"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/columns-options.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/columns-options.ejs
new file mode 100644
index 0000000000..20594603c0
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/columns-options.ejs
@@ -0,0 +1,25 @@
+<%
+ var mode = span.attr('for');
+%>
+
+<form action="#/column-options" method="put" class="auto-submit">
+ <input type="hidden" name="mode" value="<%= mode %>"/>
+ <table class="form" width="100%">
+ <tr>
+ <td colspan="2">
+ <h3>Columns for this table</h3>
+ </td>
+ </tr>
+<% for (var group in COLUMNS[mode]) {
+ var options = COLUMNS[mode][group]; %>
+ <tr>
+ <th><label><%= group %>:</label></th>
+ <td>
+ <% for (var i = 0; i < options.length; i++) { %>
+ <%= fmt_checkbox(mode + '-' + options[i][0], options[i][1], get_pref('column-' + mode + '-' + options[i][0]) == 'true') %>
+ <% } %>
+ </td>
+<% } %>
+ </tr>
+ </table>
+</form>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs
new file mode 100644
index 0000000000..9a09f89b72
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs
@@ -0,0 +1,194 @@
+<h1>Connection <%= fmt_string(connection.name) %> <%= fmt_maybe_vhost(connection.vhost) %></h1>
+
+<% if (!disable_stats) { %>
+<div class="section">
+<h2>Overview</h2>
+<div class="hider updatable">
+ <%= data_rates('data-rates-conn', connection, 'Data rates') %>
+
+<h3>Details</h3>
+<table class="facts facts-l">
+<% if (nodes_interesting) { %>
+<tr>
+ <th>Node</th>
+ <td><%= fmt_node(connection.node) %></td>
+</tr>
+<% } %>
+
+<% if (connection.client_properties.connection_name) { %>
+<tr>
+ <th>Client-provided name</th>
+ <td><%= fmt_string(connection.client_properties.connection_name) %></td>
+</tr>
+<% } %>
+
+<tr>
+ <th>Username</th>
+ <td><%= fmt_string(connection.user) %></td>
+</tr>
+<tr>
+ <th>Protocol</th>
+ <td><%= connection.protocol %></td>
+</tr>
+<tr>
+ <th>Connected at</th>
+ <td><%= fmt_timestamp(connection.connected_at) %></td>
+</tr>
+
+<% if (connection.ssl) { %>
+<tr>
+ <th>SSL</th>
+ <td><%= fmt_boolean(connection.ssl) %></td>
+</tr>
+<% } %>
+
+<% if (connection.auth_mechanism) { %>
+<tr>
+ <th>Authentication</th>
+ <td><%= connection.auth_mechanism %></td>
+</tr>
+<% } %>
+</table>
+
+<% if (connection.state) { %>
+<table class="facts">
+<tr>
+ <th>State</th>
+ <td><%= fmt_object_state(connection) %></td>
+</tr>
+<tr>
+ <th>Heartbeat</th>
+ <td><%= fmt_time(connection.timeout, 's') %></td>
+</tr>
+<tr>
+ <th>Frame max</th>
+ <td><%= connection.frame_max %> bytes</td>
+</tr>
+<tr>
+ <th>Channel limit</th>
+ <td><%= connection.channel_max %> channels</td>
+</tr>
+</table>
+
+<% } %>
+
+</div>
+</div>
+
+<div class="section">
+ <h2>Channels</h2>
+ <div class="hider updatable">
+ <%= format('channels-list', {'channels': channels, 'mode': 'connection'}) %>
+ </div>
+</div>
+
+<% if (connection.ssl) { %>
+<div class="section">
+<h2>SSL</h2>
+<div class="hider">
+
+<table class="facts">
+ <tr>
+ <th>Protocol Version</th>
+ <td><%= connection.ssl_protocol %></td>
+ </tr>
+ <tr>
+ <th>Key Exchange Algorithm</th>
+ <td><%= connection.ssl_key_exchange %></td>
+ </tr>
+ <tr>
+ <th>Cipher Algorithm</th>
+ <td><%= connection.ssl_cipher %></td>
+ </tr>
+ <tr>
+ <th>Hash Algorithm</th>
+ <td><%= connection.ssl_hash %></td>
+ </tr>
+</table>
+
+<% if (connection.peer_cert_issuer != '') { %>
+<table class="facts">
+ <tr>
+ <th>Peer Certificate Issuer</th>
+ <td><%= connection.peer_cert_issuer %></td>
+ </tr>
+ <tr>
+ <th>Peer Certificate Subject</th>
+ <td><%= connection.peer_cert_subject %></td>
+ </tr>
+ <tr>
+ <th>Peer Certificate Validity</th>
+ <td><%= connection.peer_cert_validity %></td>
+ </tr>
+</table>
+<% } %>
+</div>
+</div>
+<% } %>
+
+<% if (properties_size(connection.client_properties) > 0) { %>
+<div class="section-hidden">
+<h2>Client properties</h2>
+<div class="hider">
+<%= fmt_table_long(connection.client_properties) %>
+</div>
+</div>
+<% } %>
+
+<% if(connection.reductions || connection.garbage_collection) { %>
+<div class="section-hidden">
+<h2>Runtime Metrics (Advanced)</h2>
+ <div class="hider updatable">
+ <%= data_reductions('reductions-rates-conn', connection) %>
+ <table class="facts">
+ <% if (connection.garbage_collection.min_bin_vheap_size) { %>
+ <tr>
+ <th>Minimum binary virtual heap size in words (min_bin_vheap_size)</th>
+ <td><%= connection.garbage_collection.min_bin_vheap_size %></td>
+ </tr>
+ <% } %>
+
+ <% if (connection.garbage_collection.min_heap_size) { %>
+ <tr>
+ <th>Minimum heap size in words (min_heap_size)</th>
+ <td><%= connection.garbage_collection.min_heap_size %></td>
+ </tr>
+ <% } %>
+
+ <% if (connection.garbage_collection.fullsweep_after) { %>
+ <tr>
+ <th>Maximum generational collections before fullsweep (fullsweep_after)</th>
+ <td><%= connection.garbage_collection.fullsweep_after %></td>
+ </tr>
+ <% } %>
+
+ <% if (connection.garbage_collection.minor_gcs) { %>
+ <tr>
+ <th>Number of minor GCs (minor_gcs)</th>
+ <td><%= connection.garbage_collection.minor_gcs %></td>
+ </tr>
+ <% } %>
+ </table>
+ </div>
+</div>
+
+<% } %>
+<% } %>
+
+<div class="section-hidden">
+ <h2>Close this connection</h2>
+ <div class="hider">
+ <form action="#/connections" method="delete" class="confirm">
+ <input type="hidden" name="name" value="<%= fmt_string(connection.name) %>"/>
+ <table class="form">
+ <tr>
+ <th><label>Reason:</label></th>
+ <td>
+ <input type="text" name="reason" value="Closed via management plugin" class="wide"/>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Force Close"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs
new file mode 100644
index 0000000000..a75fa7f560
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs
@@ -0,0 +1,155 @@
+<h1>Connections</h1>
+<div class="section">
+ <%= paginate_ui(connections, 'connections') %>
+</div>
+<div class="updatable">
+<% if (connections.items.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <%= group_heading('connections', 'Overview', [vhosts_interesting, nodes_interesting, true]) %>
+ <% if (!disable_stats) { %>
+ <%= group_heading('connections', 'Details', []) %>
+ <%= group_heading('connections', 'Network', []) %>
+ <% } %>
+ <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="connections">+/-</span></th>
+ </tr>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+<% if(disable_stats) { %>
+ <th><%= fmt_sort('Name', 'name') %></th>
+<% } else { %>
+ <th><%= fmt_sort('Name', 'client_properties.connection_name') %></th>
+<% } %>
+<% if (nodes_interesting) { %>
+ <th><%= fmt_sort('Node', 'node') %></th>
+<% } %>
+<% if (show_column('connections', 'user')) { %>
+ <th><%= fmt_sort('User name', 'user') %></th>
+<% } %>
+<% if (!disable_stats) { %>
+<% if (show_column('connections', 'state')) { %>
+ <th><%= fmt_sort('State', 'state') %></th>
+<% } %>
+<% if (show_column('connections', 'ssl')) { %>
+ <th><%= fmt_sort('SSL / TLS', 'ssl') %></th>
+<% } %>
+<% if (show_column('connections', 'ssl_info')) { %>
+ <th>SSL Details</th>
+<% } %>
+<% if (show_column('connections', 'protocol')) { %>
+ <th><%= fmt_sort('Protocol', 'protocol') %></th>
+<% } %>
+<% if (show_column('connections', 'channels')) { %>
+ <th><%= fmt_sort('Channels', 'channels') %></th>
+<% } %>
+<% if (show_column('connections', 'channel_max')) { %>
+ <th><%= fmt_sort('Channel max', 'channel_max') %></th>
+<% } %>
+<% if (show_column('connections', 'frame_max')) { %>
+ <th><%= fmt_sort('Frame max', 'frame_max') %></th>
+<% } %>
+<% if (show_column('connections', 'auth_mechanism')) { %>
+ <th><%= fmt_sort('Auth mechanism', 'auth_mechanism') %></th>
+<% } %>
+<% if (show_column('connections', 'client')) { %>
+ <th><%= fmt_sort('Client', 'properties') %></th>
+<% } %>
+<% if (show_column('connections', 'from_client')) { %>
+ <th><%= fmt_sort('From client', 'recv_oct_details.rate') %></th>
+<% } %>
+<% if (show_column('connections', 'to_client')) { %>
+ <th><%= fmt_sort('To client', 'send_oct_details.rate') %></th>
+<% } %>
+<% if (show_column('connections', 'heartbeat')) { %>
+ <th><%= fmt_sort('Heartbeat', 'timeout') %></th>
+<% } %>
+<% if (show_column('connections', 'connected_at')) { %>
+ <th><%= fmt_sort('Connected at', 'connected_at') %></th>
+<% } %>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < connections.items.length; i++) {
+ var connection = connections.items[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(connection.vhost) %></td>
+<% } %>
+<% if(connection.client_properties) { %>
+ <td>
+ <%= link_conn(connection.name) %>
+ <sub><%= fmt_string(short_conn(connection.client_properties.connection_name)) %></sub>
+ </td>
+<% } else { %>
+ <td><%= link_conn(connection.name) %></td>
+<% } %>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(connection.node) %></td>
+<% } %>
+<% if (show_column('connections', 'user')) { %>
+ <td class="c"><%= fmt_string(connection.user) %></td>
+<% } %>
+<% if (!disable_stats) { %>
+<% if (show_column('connections', 'state')) { %>
+ <td><%= fmt_object_state(connection) %></td>
+<% } %>
+<% if (show_column('connections', 'ssl')) { %>
+ <td class="c"><%= fmt_boolean(connection.ssl, '') %></td>
+<% } %>
+<% if (show_column('connections', 'ssl_info')) { %>
+ <td>
+ <% if (connection.ssl) { %>
+ <%= connection.ssl_protocol %>
+ <sub>
+ <%= connection.ssl_key_exchange %>
+ <%= connection.ssl_cipher %>
+ <%= connection.ssl_hash %>
+ </sub>
+ <% } %>
+ </td>
+<% } %>
+<% if (show_column('connections', 'protocol')) { %>
+ <td class="c"><%= connection.protocol %></td>
+<% } %>
+<% if (show_column('connections', 'channels')) { %>
+ <td class="r"><%= fmt_string(connection.channels, '') %></td>
+<% } %>
+<% if (show_column('connections', 'channel_max')) { %>
+ <td class="r"><%= fmt_string(connection.channel_max, '') %></td>
+<% } %>
+<% if (show_column('connections', 'frame_max')) { %>
+ <td class="r"><%= fmt_string(connection.frame_max, '') %></td>
+<% } %>
+<% if (show_column('connections', 'auth_mechanism')) { %>
+ <td class="c"><%= fmt_string(connection.auth_mechanism, '') %></td>
+<% } %>
+<% if (show_column('connections', 'client')) { %>
+ <td><%= fmt_client_name(connection.client_properties) %></td>
+<% } %>
+<% if (show_column('connections', 'from_client')) { %>
+ <td><%= fmt_detail_rate_bytes(connection, 'recv_oct') %></td>
+<% } %>
+<% if (show_column('connections', 'to_client')) { %>
+ <td><%= fmt_detail_rate_bytes(connection, 'send_oct') %></td>
+<% } %>
+<% if (show_column('connections', 'heartbeat')) { %>
+ <td class="r"><%= fmt_time(connection.timeout, 's') %></td>
+<% } %>
+<% if (show_column('connections', 'connected_at')) { %>
+ <td><%= fmt_timestamp_mini(connection.connected_at) %></td>
+<% } %>
+ <% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no connections ...</p>
+<% } %>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/consumers.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/consumers.ejs
new file mode 100644
index 0000000000..ffd7b7b603
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/consumers.ejs
@@ -0,0 +1,43 @@
+<% if (consumers.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'queue') { %>
+ <th>Channel</th>
+ <th>Consumer tag</th>
+<% } else { %>
+ <th>Consumer tag</th>
+ <th>Queue</th>
+<% } %>
+ <th>Ack required</th>
+ <th>Exclusive</th>
+ <th>Prefetch count</th>
+ <th>Active <span class="help" id="consumer-active"></span></th>
+ <th>Activity status</th>
+ <th>Arguments</th>
+ </tr>
+ </thead>
+<%
+ for (var i = 0; i < consumers.length; i++) {
+ var consumer = consumers[i];
+%>
+ <tr<%= alt_rows(i) %>>
+<% if (mode == 'queue') { %>
+ <td><%= link_channel(consumer.channel_details.name) %></td>
+ <td><%= fmt_string(consumer.consumer_tag) %></td>
+<% } else { %>
+ <td><%= fmt_string(consumer.consumer_tag) %></td>
+ <td><%= link_queue(consumer.queue.vhost, consumer.queue.name) %></td>
+<% } %>
+ <td class="c"><%= fmt_boolean(consumer.ack_required) %></td>
+ <td class="c"><%= fmt_boolean(consumer.exclusive) %></td>
+ <td class="c"><%= consumer.prefetch_count %></td>
+ <td class="c"><%= fmt_boolean(consumer.active) %></td>
+ <td class="c"><%= fmt_activity_status(consumer.activity_status) %></td>
+ <td class="c"><%= fmt_table_short(consumer.arguments) %></td>
+ </tr>
+<% } %>
+ </table>
+<% } else { %>
+ <p>... no consumers ...</p>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/exchange.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/exchange.ejs
new file mode 100644
index 0000000000..bff57441ec
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/exchange.ejs
@@ -0,0 +1,97 @@
+<h1>Exchange: <b><%= fmt_exchange(highlight_extra_whitespace(exchange.name)) %></b><%= fmt_maybe_vhost(exchange.vhost) %></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider updatable">
+<% if (!disable_stats) { %>
+<% if (rates_mode != 'none') { %>
+ <%= message_rates('msg-rates-x', exchange.message_stats) %>
+<% } %>
+<% } %>
+ <h3>Details</h3>
+ <table class="facts">
+ <tr>
+ <th>Type</th>
+ <td class="l"><%= fmt_exchange_type(exchange.type) %></td>
+ </tr>
+ <tr>
+ <th>Features</th>
+ <td><%= fmt_features(exchange) %></td>
+ </tr>
+ <tr>
+ <th>Policy</th>
+ <td><%= link_policy(exchange.vhost, exchange.policy) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<% if (!disable_stats) { %>
+<% if (rates_mode == 'detailed') { %>
+<div class="section-hidden">
+<h2>Message rates breakdown</h2>
+<div class="hider updatable">
+<table class="two-col-layout">
+ <tr>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'exchange-incoming',
+ 'object': exchange.incoming,
+ 'label': 'Incoming <span class="help" id="exchange-rates-incoming"></span>'}) %>
+ </td>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'exchange-outgoing',
+ 'object': exchange.outgoing,
+ 'label': 'Outgoing <span class="help" id="exchange-rates-outgoing"></span>'}) %>
+ </td>
+ </tr>
+</table>
+</div>
+</div>
+<% } %>
+<% } %>
+
+
+<div class="section-hidden">
+ <h2>Bindings</h2>
+ <div class="hider">
+<% if (exchange.name == "") { %>
+ <h3>Default exchange</h3>
+ <p>
+ The default exchange is implicitly bound to every queue, with a
+ routing key equal to the queue name. It is not possible to
+ explicitly bind to, or unbind from the default exchange. It also
+ cannot be deleted.
+ </p>
+<% } else { %>
+<div class="bindings-wrapper">
+<% if (bindings_destination.length > 0) { %>
+ <%= format('bindings', {'mode': 'exchange_destination', 'bindings': bindings_destination}) %>
+ <p class="arrow">&dArr;</p>
+<% } %>
+ <p><span class="exchange">This exchange</span></p>
+ <p class="arrow">&dArr;</p>
+ <%= format('bindings', {'mode': 'exchange_source', 'bindings': bindings_source}) %>
+</div>
+ <%= format('add-binding', {'mode': 'exchange_source', 'parent': exchange}) %>
+<% } %>
+</div>
+</div>
+
+<% if (!exchange.internal) { %>
+<%= format('publish', {'mode': 'exchange', 'exchange': exchange}) %>
+<% } %>
+
+<% if (exchange.name != "") { %>
+<div class="section-hidden">
+ <h2>Delete this exchange</h2>
+ <div class="hider">
+ <form action="#/exchanges" method="delete" class="confirm">
+ <input type="hidden" name="vhost" value="<%= fmt_string(exchange.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_exchange_url(exchange.name) %>"/>
+ <input type="submit" value="Delete"/>
+ </form>
+ </div>
+</div>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/exchanges.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/exchanges.ejs
new file mode 100644
index 0000000000..f4fd5c4c8d
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/exchanges.ejs
@@ -0,0 +1,169 @@
+<h1>Exchanges</h1>
+<div class="section">
+ <%= paginate_ui(exchanges, 'exchanges') %>
+</div>
+<div class="updatable">
+<% if (exchanges.items.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+ <th><%= fmt_sort('Name', 'name') %></th>
+<% if (show_column('exchanges', 'type')) { %>
+ <th><%= fmt_sort('Type', 'type') %></th>
+<% } %>
+<% if (show_column('exchanges', 'features')) { %>
+ <th>Features</th>
+<% } %>
+<% if (show_column('exchanges', 'features_no_policy')) { %>
+ <th>Features</th>
+<% } %>
+<% if (show_column('exchanges', 'policy')) { %>
+ <th><%= fmt_sort('Policy','policy') %></th>
+<% } %>
+<% if (!disable_stats) { %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('exchanges', 'rate-in')) { %>
+ <th><%= fmt_sort('Message rate in', 'message_stats.publish_in_details.rate') %></th>
+<% } %>
+<% if (show_column('exchanges', 'rate-out')) { %>
+ <th><%= fmt_sort('Message rate out', 'message_stats.publish_out_details.rate') %></th>
+<% } %>
+<% } %>
+<% } %>
+ <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="exchanges">+/-</span></th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < exchanges.items.length; i++) {
+ var exchange = exchanges.items[i];
+%>
+ <tr<%= alt_rows(i, exchange.arguments)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(exchange.vhost) %></td>
+<% } %>
+ <td><%= link_exchange(exchange.vhost, exchange.name, exchange.arguments) %></td>
+ <td class="c"><%= fmt_exchange_type(exchange.type) %></td>
+<% if (show_column('exchanges', 'features')) { %>
+ <td class="c">
+ <%= fmt_features_short(exchange) %>
+ <%= fmt_policy_short(exchange) %>
+ </td>
+<% } %>
+<% if (show_column('exchanges', 'features_no_policy')) { %>
+ <td class="c">
+ <%= fmt_features_short(exchange) %>
+ </td>
+<% } %>
+<% if (show_column('exchanges', 'policy')) { %>
+ <td class="c">
+ <%= link_policy(exchange.vhost, exchange.policy) %>
+ </td>
+<% } %>
+<% if (!disable_stats) { %>
+<% if (rates_mode != 'none') { %>
+<% if (show_column('exchanges', 'rate-in')) { %>
+ <td class="r"><%= fmt_detail_rate(exchange.message_stats, 'publish_in') %></td>
+<% } %>
+<% if (show_column('exchanges', 'rate-out')) { %>
+ <td class="r"><%= fmt_detail_rate(exchange.message_stats, 'publish_out') %></td>
+<% } %>
+<% } %>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no exchanges ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new exchange</h2>
+ <div class="hider">
+ <form action="#/exchanges" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>" <%= (vhosts[i].name === current_vhost) ? 'selected="selected"' : '' %>><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Type:</label></th>
+ <td>
+ <select name="type">
+ <% for (var i = 0; i < exchange_types.length; i++) {
+ var type = exchange_types[i];
+ if (type.internal_purpose == undefined) { %>
+ <option value="<%= fmt_string(type.name) %>"><%= fmt_string(type.name) %></option>
+ <% }
+ } %>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Durability:</label></th>
+ <td>
+ <select name="durable">
+ <option value="true">Durable</option>
+ <option value="false">Transient</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Auto delete: <span class="help" id="exchange-auto-delete"></span></label></th>
+ <td>
+ <select name="auto_delete">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Internal: <span class="help" id="exchange-internal"></span></label></th>
+ <td>
+ <select name="internal">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Arguments:</label></th>
+ <td>
+ <div class="multifield" id="arguments"></div>
+ <table class="argument-links">
+ <tr>
+ <td>Add</td>
+ <td>
+ <span class="argument-link" field="arguments" key="alternate-exchange" type="string">Alternate exchange</span>
+ <span class="help" id="exchange-alternate"></span>
+ </td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add exchange"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs
new file mode 100644
index 0000000000..23b0b73071
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs
@@ -0,0 +1,60 @@
+<h1>Feature Flags</h1>
+<div class="section">
+ <h2>All Feature Flags</h2>
+ <div class="hider">
+<%= filter_ui(feature_flags) %>
+ <div class="updatable">
+<% if (feature_flags.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th class="c"><%= fmt_sort('State', 'state') %></th>
+ <th>Description</th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < feature_flags.length; i++) {
+ var feature_flag = feature_flags[i];
+ var state_color = "grey";
+ if (feature_flag.state == "enabled") {
+ state_color = "green";
+ } else if (feature_flag.state == "disabled") {
+ state_color = "yellow";
+ } else if (feature_flag.state == "unsupported") {
+ state_color = "red";
+ }
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= fmt_string(feature_flag.name) %></td>
+ <td class="c">
+ <% if (feature_flag.state == "disabled") { %>
+ <form action="#/feature-flags-enable" method="put" style="display: inline-block">
+ <input type="hidden" name="name" value="<%= fmt_string(feature_flag.name) %>"/>
+ <input type="submit" value="Enable" class="c"/>
+ </form>
+ <% } else { %>
+ <abbr class="status-<%= fmt_string(state_color) %>"
+ style="text-transform: capitalize"
+ title="Feature flag state: <%= fmt_string(feature_flag.state) %>">
+ <%= fmt_string(feature_flag.state) %>
+ </abbr>
+ <% } %>
+ </td>
+ <td>
+ <p><%= fmt_string(feature_flag.desc) %></p>
+ <% if (feature_flag.doc_url) { %>
+ <p><a href="<%= fmt_string(feature_flag.doc_url) %>">[Learn more]</a></p>
+ <% } %>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no feature_flags ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/layout.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/layout.ejs
new file mode 100644
index 0000000000..e10a71553b
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/layout.ejs
@@ -0,0 +1,58 @@
+<div id="header">
+ <ul id="topnav">
+ <li id="interval">
+ <label for="update-every" id="status"></label>
+ <select id="update-every">
+ <% if(disable_stats) { %>
+ <option value="">Do not refresh</option>
+ <option value="5000">Refresh every 5 seconds</option>
+ <% } else { %>
+ <option value="5000">Refresh every 5 seconds</option>
+ <% } %>
+ <option value="10000">Refresh every 10 seconds</option>
+ <option value="30000">Refresh every 30 seconds</option>
+ <% if(!disable_stats) { %>
+ <option value="">Do not refresh</option>
+ <% } %>
+ </select>
+ </li>
+ <li id="vhost">
+ <label for="show-vhost">Virtual host </label>
+ <select id="show-vhost">
+ <option value="">All</option>
+ </select>
+ </li>
+ <li id="logout">
+ <% if (enable_uaa) { %>
+ <input type="submit" id="loginWindow" onclick="uaa_logout_window()" value="Log out"/>
+ <% } else { %>
+ <form action="#/logout" method="put">
+ <input type="submit" value="Log out"/>
+ </form>
+ <% } %>
+ </li>
+ </ul>
+ <div id="logo">
+ <a href="#/"><img src="img/rabbitmqlogo.svg" alt="RabbitMQ logo" width="204" height="37"/></a>
+ <span id="versions"></span>
+ </div>
+ <div id="menu">
+ <ul id="tabs">
+ </ul>
+ </div>
+</div>
+<div id="rhs"></div>
+<div id="main"></div>
+<div id="footer">
+ <ul>
+ <li><a href="api/" target="_blank">HTTP API</a></li>
+ <li><a href="https://www.rabbitmq.com/admin-guide.html" target="_blank">Server Docs</a></li>
+ <li><a href="https://www.rabbitmq.com/getstarted.html" target="_blank">Tutorials</a></li>
+ <li><a href="https://groups.google.com/forum/#!forum/rabbitmq-users" target="_blank">Community Support</a></li>
+ <li><a href="https://rabbitmq-slack.herokuapp.com/" target="_blank">Community Slack</a></li>
+ <li><a href="https://www.rabbitmq.com/services.html" target="_blank">Commercial Support</a></li>
+ <li><a href="https://www.rabbitmq.com/plugins.html" target="_blank">Plugins</a></li>
+ <li><a href="https://www.rabbitmq.com/github.html" target="_blank">GitHub</a></li>
+ <li><a href="https://www.rabbitmq.com/changelog.html" target="_blank">Changelog</a></li>
+ </ul>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/limits.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/limits.ejs
new file mode 100644
index 0000000000..7b0534e5b1
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/limits.ejs
@@ -0,0 +1,177 @@
+<h1>Limits</h1>
+
+<div class="section">
+ <h2>Virtual host Limits</h2>
+ <div class="hider">
+ <div class="updatable">
+
+ <% if (limits.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+ <th>Virtual Host</th>
+ <th>Limit</th>
+ <th>Value</th>
+ <th class="administrator-only"></th>
+ </tr>
+ </thead>
+ <tbody>
+ <% for (var i = 0; i < limits.length; i++) {
+ var limit = limits[i];
+ var limit_values = Object.keys(limit.value).sort().map(
+ function(k) { return {name: k, value: limit.value[k]};});
+ %>
+
+ <% for (var j = 0; j < limit_values.length; j++) {
+ var limit_value = limit_values[j];
+ %>
+
+ <tr<%= alt_rows(j+1)%>>
+ <% if(j == 0) { %>
+ <td rowspan="<%= limit_values.length %>"> <%= fmt_string(limit.vhost) %> </td>
+ <% } %>
+ <td><%= limit_value.name %></td>
+ <td><%= limit_value.value %></td>
+ <td class="administrator-only">
+ <form action="#/limits" method="delete" class="confirm">
+ <input type="hidden" name="name" value="<%= fmt_string(limit_value.name) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(limit.vhost) %>"/>
+ <input type="submit" value="Clear"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ <% } %>
+ </tbody>
+ </table>
+ <% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section administrator-only">
+ <h2>Set / update a virtual host limit</h2>
+ <div class="hider">
+ <form action="#/limits" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>">
+ <%= fmt_string(vhosts[i].name) %>
+ </option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Limit:</label></th>
+ <td>
+ <select name="name">
+ <option value="max-connections">max-connections</option>
+ <option value="max-queues">max-queues</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Value:</label></th>
+ <td>
+ <input type="text" name="value"/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Set / update limit"/>
+ </form>
+ </div>
+</div>
+
+<div class="section">
+ <h2>User Limits</h2>
+ <div class="hider">
+ <div class="updatable">
+
+ <% if (user_limits.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+ <th>User</th>
+ <th>Limit</th>
+ <th>Value</th>
+ <th class="administrator-only"></th>
+ </tr>
+ </thead>
+ <tbody>
+ <% for (var i = 0; i < user_limits.length; i++) {
+ var user_limit = user_limits[i];
+ var user_limit_values = Object.keys(user_limit.value).sort().map(
+ function(k) { return {name: k, value: user_limit.value[k]};});
+ %>
+
+ <% for (var j = 0; j < user_limit_values.length; j++) {
+ var user_limit_value = user_limit_values[j];
+ %>
+
+ <tr<%= alt_rows(j+1)%>>
+ <% if(j == 0) { %>
+ <td rowspan="<%= user_limit_values.length %>"> <%= fmt_string(user_limit.user) %> </td>
+ <% } %>
+ <td><%= user_limit_value.name %></td>
+ <td><%= user_limit_value.value %></td>
+ <td class="administrator-only">
+ <form action="#/user-limits" method="delete" class="confirm">
+ <input type="hidden" name="name" value="<%= fmt_string(user_limit_value.name) %>"/>
+ <input type="hidden" name="user" value="<%= fmt_string(user_limit.user) %>"/>
+ <input type="submit" value="Clear"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ <% } %>
+ </tbody>
+ </table>
+ <% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section administrator-only">
+ <h2>Set / update a user limit</h2>
+ <div class="hider">
+ <form action="#/user-limits" method="put">
+ <table class="form">
+ <tr>
+ <th><label>User:</label></th>
+ <td>
+ <select name="user">
+ <% for (var i = 0; i < users.length; i++) { %>
+ <option value="<%= fmt_string(users[i].name) %>">
+ <%= fmt_string(users[i].name) %>
+ </option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Limit:</label></th>
+ <td>
+ <select name="name">
+ <option value="max-connections">max-connections</option>
+ <option value="max-channels">max-channels</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Value:</label></th>
+ <td>
+ <input type="text" name="value"/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Set / update limit"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/list-exchanges.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/list-exchanges.ejs
new file mode 100644
index 0000000000..f449dfe001
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/list-exchanges.ejs
@@ -0,0 +1,5 @@
+<select name="exchange">
+ <% for (var i = 0; i < exchanges.length; i++) { %>
+ <option value="<%= fmt_string(exchanges[i].name) %>"><%= fmt_exchange(exchanges[i].name) %></option>
+ <% } %>
+</select>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/login.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/login.ejs
new file mode 100644
index 0000000000..d22262fc3e
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/login.ejs
@@ -0,0 +1,21 @@
+<div id="login">
+ <p><img src="img/rabbitmqlogo.svg" alt="RabbitMQ logo" width="204" height="37"/></p>
+
+ <form action="#/login" method="put">
+ <div id="login-status"></div>
+ <table class="form">
+ <tr>
+ <th><label>Username:</label></th>
+ <td><input type="text" name="username"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Password:</label></th>
+ <td><input type="password" name="password"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th>&nbsp;</th>
+ <td><input type="submit" value="Login"/></td>
+ </tr>
+ </table>
+ </form>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/login_uaa.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/login_uaa.ejs
new file mode 100644
index 0000000000..c4bba22ce7
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/login_uaa.ejs
@@ -0,0 +1,5 @@
+<div id="login">
+ <p><img src="img/rabbitmqlogo.svg" alt="RabbitMQ logo" width="204" height="37"/></p>
+
+ <div id="login-status"></div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/memory-bar.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/memory-bar.ejs
new file mode 100644
index 0000000000..a6d6ab631a
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/memory-bar.ejs
@@ -0,0 +1,24 @@
+<div class="memory-bar">
+<%
+ var width = 800;
+
+ var pseudo_total = 0
+ for (var section in sections) {
+ pseudo_total += memory[section];
+ }
+
+ total_out[0] = pseudo_total;
+
+ for (var section in sections) {
+ if (memory[section] > 0) {
+ var section_width = Math.round(width * memory[section] / pseudo_total);
+%>
+ <div class="memory-section memory_<%= sections[section][0] %>"
+ style="width: <%= section_width %>px;"
+ title="<%= sections[section][1] %> <%= fmt_bytes(memory[section]) %>">
+ </div>
+<%
+ }
+ }
+%>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/memory-table.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/memory-table.ejs
new file mode 100644
index 0000000000..7f90ab82b5
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/memory-table.ejs
@@ -0,0 +1,28 @@
+<%
+ for (var i in key) {
+%>
+<table class="facts">
+<%
+ for (var j in key[i]) {
+ var group = key[i][j];
+%>
+ <tr>
+ <th><div class="colour-key memory_<%= group.colour %>"></div><%= group.name %></th>
+ <td>
+ <table class="mini">
+<%
+ for (var k in group.keys) {
+ var name = group.keys[k][0];
+ var label = group.keys[k][1];
+%>
+ <tr>
+ <td class="r"><%= fmt_bytes(memory[name]) %></td>
+ <td><%= label %></td>
+ </tr>
+<% } %>
+ </table>
+ </td>
+ </tr>
+<% } %>
+</table>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs
new file mode 100644
index 0000000000..9bb129f446
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/memory.ejs
@@ -0,0 +1,97 @@
+<%
+ if (memory == "not_available") {
+%>
+<p class="warning">
+ Memory statistics not available.
+</p>
+<% } else { %>
+<%
+ var sections = {'queue_procs' : ['queue', 'Classic queues (masters)'],
+ 'queue_slave_procs' : ['queue', 'Classic queues (mirrors)'],
+ 'quorum_queue_procs' : ['quorum', 'Quorum queues'],
+ 'stream_queue_procs' : ['stream', 'Stream queues'],
+ 'stream_queue_replica_reader_procs' : ['stream', 'Stream queues (replica reader)'],
+ 'stream_queue_coordinator_procs' : ['stream', 'Stream queues (coordinator)'],
+ 'binary' : ['binary', 'Binaries'],
+ 'connection_readers' : ['conn', 'Connection readers'],
+ 'connection_writers' : ['conn', 'Connection writers'],
+ 'connection_channels' : ['conn', 'Connection channels'],
+ 'connection_other' : ['conn', 'Connections (other)'],
+ 'mnesia' : ['table', 'Mnesia'],
+ 'msg_index' : ['table', 'Message store index'],
+ 'mgmt_db' : ['table', 'Management database'],
+ 'quorum_ets' : ['table', 'Quorum queue ETS tables'],
+ 'other_ets' : ['table', 'Other ETS tables'],
+ 'plugins' : ['proc', 'Plugins'],
+ 'other_proc' : ['proc', 'Other process memory'],
+ 'code' : ['system', 'Code'],
+ 'atom' : ['system', 'Atoms'],
+ 'other_system' : ['system', 'Other system'],
+ 'allocated_unused' : ['unused', 'Allocated unused'],
+ 'reserved_unallocated': ['unused', 'Unallocated reserved by the OS']};
+%>
+<%= format('memory-bar', {sections: sections, memory: memory, total_out: []}) %>
+<span class="clear">&nbsp;</span>
+<div class="box">
+<%
+var key = [[{name: 'Queues', colour: 'queue',
+ keys: [['queue_procs', 'queues'],
+ ['queue_slave_procs', 'mirrors'],
+ ['quorum_queue_procs', 'quorum'],
+ ['stream_queue_procs', 'stream'],
+ ['stream_queue_replica_reader_procs', 'stream replica reader'],
+ ['stream_queue_coordinator_procs', 'stream coordinator']]},
+
+ {name: 'Binaries', colour: 'binary',
+ keys: [['binary', '']]}],
+
+ [{name: 'Connections', colour: 'conn',
+ keys: [['connection_readers', 'readers'],
+ ['connection_writers', 'writers'],
+ ['connection_channels', 'channels'],
+ ['connection_other', 'other']]}],
+
+ [{name: 'Tables', colour: 'table',
+ keys: [['mnesia', 'internal database tables'],
+ ['msg_index', 'message store index'],
+ ['mgmt_db', 'management database'],
+ ['quorum_ets', 'quorum queue tables'],
+ ['other_ets', 'other']]}],
+
+ [{name: 'Processes', colour: 'proc',
+ keys: [['plugins', 'plugins'],
+ ['other_proc', 'other']]},
+ {name: 'System', colour: 'system',
+ keys: [['code', 'code'],
+ ['atom', 'atoms'],
+ ['other_system', 'other']
+ ]}],
+
+ [{name: 'Preallocated memory', colour: 'unused',
+ keys: [['allocated_unused', 'preallocated by runtime, unused'],
+ ['reserved_unallocated', 'unallocated, reserved by the OS']]}]];
+%>
+<%= format('memory-table', {key: key, memory: memory}) %>
+</div>
+
+<div class="memory-info">
+ Last updated: <b><%= fmt_date(new Date()) %></b>.<br/>
+ Memory calculation strategy: <b><%= memory.strategy %></b>. <span class="help" id="memory-calculation-strategy-breakdown"></span><br/><br/>
+ Amount of memory used vs. allocated during last update: <span class="help" id="memory-use"></span><br/>
+ <table class="facts">
+ <tr>
+ <th>Runtime Used</th>
+ <td><%= fmt_bytes(memory.total.erlang) %></td>
+ </tr>
+ <tr>
+ <th>Runtime Allocated</th>
+ <td><%= fmt_bytes(memory.total.allocated) %></td>
+ </tr>
+ <tr>
+ <th>Resident Set Size (RSS) reported by the OS</th>
+ <td><%= fmt_bytes(memory.total.rss) %></td>
+ </tr>
+ </table>
+</div>
+
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/messages.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/messages.ejs
new file mode 100644
index 0000000000..06836a1e23
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/messages.ejs
@@ -0,0 +1,39 @@
+<%
+ for (var i = 0; i < msgs.length; i++) {
+ var msg = msgs[i];
+%>
+<div class="box">
+<h3>Message <%= i+1 %></h3>
+<p>The server reported <b><%= msg.message_count %></b> messages remaining.</p>
+<table class="facts">
+ <tr>
+ <th>Exchange</th>
+ <td><%= fmt_exchange(msg.exchange) %></td>
+ </tr>
+ <tr>
+ <th>Routing Key</th>
+ <td><%= fmt_string(msg.routing_key) %></td>
+ </tr>
+ <tr>
+ <th>Redelivered</th>
+ <td><%= fmt_boolean(msg.redelivered) %></td>
+ </tr>
+ <tr>
+ <th>Properties</th>
+ <td><%= fmt_table_short(msg.properties) %></td>
+ </tr>
+ <tr>
+ <th>
+ Payload
+ <sub><%= msg.payload_bytes %> bytes</sub>
+ <sub>Encoding: <%= msg.payload_encoding %></sub>
+ </th>
+ <td>
+ <pre class="msg-payload"><%= fmt_maybe_wrap(msg.payload, msg.payload_encoding) %></pre>
+ </td>
+ </tr>
+</table>
+</div>
+<%
+ }
+%>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-deliveries.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-deliveries.ejs
new file mode 100644
index 0000000000..ee9e6f6500
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-deliveries.ejs
@@ -0,0 +1,30 @@
+<h3>Deliveries</h3>
+<% if (object && object.length > 0) { %>
+<table class="list">
+ <tr>
+<% if (mode == 'queue') { %>
+ <th>Channel</th>
+<% } else { %>
+ <th>Queue</th>
+<% } %>
+ <th>deliver / get</th>
+ <th>ack</th>
+ </tr>
+<%
+ for (var i = 0; i < object.length; i++) {
+ var del = object[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (mode == 'queue') { %>
+ <td><%= link_channel(del.channel_details.name) %></td>
+<% } else { %>
+ <td><%= link_queue(del.queue.vhost, del.queue.name) %></td>
+<% } %>
+ <td class="r"><%= fmt_detail_rate(del.stats, 'deliver_get') %></td>
+ <td class="r"><%= fmt_detail_rate(del.stats, 'ack') %></td>
+ </tr>
+<% } %>
+</table>
+<% } else { %>
+<p> ... no deliveries ...</p>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-publishes.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-publishes.ejs
new file mode 100644
index 0000000000..4ea959ac52
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/msg-detail-publishes.ejs
@@ -0,0 +1,46 @@
+<h3><%= label %></h3>
+<% if (object && object.length > 0) { %>
+<%
+ var col_confirm = mode != 'exchange-outgoing';
+%>
+<table class="list">
+ <tr>
+<% if (mode == 'channel') { %>
+ <th>Exchange</th>
+<% } else if (mode == 'exchange-incoming') { %>
+ <th>Channel</th>
+<% } else if (mode == 'exchange-outgoing') { %>
+ <th>Queue</th>
+<% } else { %>
+ <th>Exchange</th>
+<% } %>
+ <th>publish</th>
+<% if (col_confirm) { %>
+ <th>confirm</th>
+<% } %>
+ </tr>
+<%
+ for (var i = 0; i < object.length; i++) {
+ var pub = object[i];
+%>
+ <tr<%= alt_rows(i)%>>
+
+<% if (mode == 'channel') { %>
+ <td><%= link_exchange(pub.exchange.vhost, pub.exchange.name) %></td>
+<% } else if (mode == 'exchange-incoming') { %>
+ <td><%= link_channel(pub.channel_details.name) %></td>
+<% } else if (mode == 'exchange-outgoing') { %>
+ <td><%= link_queue(pub.queue.vhost, pub.queue.name) %></td>
+<% } else { %>
+ <td><%= link_exchange(pub.exchange.vhost, pub.exchange.name) %></td>
+<% } %>
+ <td class="r"><%= fmt_detail_rate(pub.stats, 'publish') %></td>
+<% if (col_confirm) { %>
+ <td class="r"><%= fmt_detail_rate(pub.stats, 'confirm') %></td>
+<% } %>
+ </tr>
+<% } %>
+</table>
+<% } else { %>
+<p> ... no publishes ...</p>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs
new file mode 100644
index 0000000000..884ccd6e88
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs
@@ -0,0 +1,416 @@
+<h1>Node <b><%= node.name %></b></h1>
+<div class="updatable">
+
+<% if (!node.running) { %>
+<p class="warning">Node not running</p>
+<% } else if ((node.os_pid == undefined) && (!disable_stats)) { %>
+<p class="warning">Node statistics not available</p>
+<% } else { %>
+
+<% if(!disable_stats) { %>
+<div class="section">
+<h2>Overview</h2>
+<div class="hider">
+ <div class="box">
+ <table class="facts facts-l">
+ <tr>
+ <th>Uptime</th>
+ <td><%= fmt_uptime(node.uptime) %></td>
+ </tr>
+<% if (rabbit_versions_interesting) { %>
+ <tr>
+ <th>RabbitMQ Version</th>
+ <td><%= fmt_rabbit_version(node.applications) %></td>
+ </tr>
+<% } %>
+ <tr>
+ <th>Type</th>
+ <td>
+ <% if (node.type == 'disc') { %>
+ <abbr title="Broker definitions are held on disc.">Disc</abbr>
+ <% } else { %>
+ <abbr title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</abbr>
+ <% } %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <a href="https://www.rabbitmq.com/configure.html" target="_blank">Config file</a>
+ </th>
+ <td>
+ <%
+ for (var i = 0; i < node.config_files.length; i++) {
+ var config = fmt_escape_html(node.config_files[i]);
+ %>
+ <code><%= config %></code>
+ <% } %>
+ </td>
+ </tr>
+ <tr>
+ <th>Database directory</th>
+ <td>
+ <code><%= node.db_dir %></code>
+ </td>
+ </tr>
+ <tr>
+ <th>
+<% if (node.log_files.length == 1) { %>
+ Log file
+<% } else { %>
+ Log files
+<% } %>
+ </th>
+ <td>
+ <pre style="margin-top: 0px; margin-bottom: 0px;"><%
+ for (var i = 0; i < node.log_files.length; i++) {
+ var config = fmt_escape_html(node.log_files[i]);
+%><%= config %>
+<% } %></pre>
+ </td>
+ </tr>
+ </table>
+ </div>
+</div>
+</div>
+
+<% if(!disable_stats) { %>
+<div class="section">
+<h2>Process statistics</h2>
+<div class="hider">
+ <%= node_stats_prefs() %>
+ <table class="facts">
+ <tr>
+ <th>
+ File descriptors <span class="help" id="file-descriptors"></span>
+ </th>
+ <td>
+<% if (node.fd_used != 'install_handle_from_sysinternals') { %>
+ <%= node_stat_count('fd_used', 'fd_total', node, FD_THRESHOLDS) %>
+<% } else { %>
+ <p class="c">handle.exe missing <span class="help" id="handle-exe"></span><sub><%= node.fd_total %> available</sub></p>
+
+<% } %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Socket descriptors <span class="help" id="socket-descriptors"></span>
+ </th>
+ <td>
+ <%= node_stat_count('sockets_used', 'sockets_total', node, FD_THRESHOLDS) %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Erlang processes
+ </th>
+ <td>
+ <%= node_stat_count('proc_used', 'proc_total', node, PROCESS_THRESHOLDS) %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Memory <span class="help" id="memory-calculation-strategy"></span>
+ </th>
+ <td>
+<% if (node.mem_limit != 'memory_monitoring_disabled') { %>
+ <%= node_stat('mem_used', 'Used', 'mem_limit', 'high watermark', node,
+ fmt_bytes, fmt_bytes_axis,
+ node.mem_alarm ? 'red' : 'green',
+ node.mem_alarm ? 'memory-alarm' : null) %>
+<% } else { %>
+ <%= fmt_bytes(node.mem_used) %>
+<% } %>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ Disk space
+ </th>
+ <td>
+<% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
+ <%= node_stat('disk_free', 'Free', 'disk_free_limit', 'low watermark', node,
+ fmt_bytes, fmt_bytes_axis,
+ node.disk_free_alarm ? 'red' : 'green',
+ node.disk_free_alarm ? 'disk_free-alarm' : null,
+ true) %>
+<% } else { %>
+ (not available)
+<% } %>
+ </td>
+ </tr>
+ </table>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Persistence statistics</h2>
+<div class="hider">
+ <%= rates_chart_or_text('mnesia-stats-count', node,
+ [['RAM only', 'mnesia_ram_tx_count'],
+ ['Disk', 'mnesia_disk_tx_count']],
+ fmt_rate, fmt_rate_axis, true, 'Schema data store transactions', 'mnesia-transactions') %>
+
+ <%= rates_chart_or_text('persister-msg-stats-count', node,
+ [['QI Journal', 'queue_index_journal_write_count'],
+ ['Store Read', 'msg_store_read_count'],
+ ['Store Write', 'msg_store_write_count']],
+ fmt_rate, fmt_rate_axis, true, 'Persistence operations (messages)', 'persister-operations-msg') %>
+
+ <%= rates_chart_or_text('persister-bulk-stats-count', node,
+ [['QI Read', 'queue_index_read_count'],
+ ['QI Write', 'queue_index_write_count']],
+ fmt_rate, fmt_rate_axis, true, 'Persistence operations (bulk)', 'persister-operations-bulk') %>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>I/O statistics</h2>
+<div class="hider">
+ <%= rates_chart_or_text('persister-io-stats-count', node,
+ [['Read', 'io_read_count'],
+ ['Write', 'io_write_count'],
+ ['Seek', 'io_seek_count'],
+ ['Sync', 'io_sync_count'],
+ ['File handle reopen', 'io_reopen_count'],
+ ['File handle open attempt', 'io_file_handle_open_attempt_count']],
+ fmt_rate, fmt_rate_axis, true, 'I/O operations', 'io-operations') %>
+
+ <%= rates_chart_or_text('persister-io-stats-bytes', node,
+ [['Read', 'io_read_bytes'],
+ ['Write', 'io_write_bytes']],
+ fmt_rate_bytes, fmt_rate_bytes_axis, true, 'I/O data rates') %>
+
+ <%= rates_chart_or_text('persister-io-stats-time', node,
+ [['Read', 'io_read_avg_time'],
+ ['Write', 'io_write_avg_time'],
+ ['Seek', 'io_seek_avg_time'],
+ ['Sync', 'io_sync_avg_time'],
+ ['File handle open attempt', 'io_file_handle_open_attempt_avg_time']],
+ fmt_ms, fmt_ms, false, 'I/O average time per operation') %>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Churn statistics</h2>
+<div class="hider">
+ <%= rates_chart_or_text('connection-churn', node,
+ [['Created', 'connection_created'],
+ ['Closed', 'connection_closed']],
+ fmt_rate, fmt_rate_axis, true, 'Connection operations', 'connection-operations') %>
+
+ <%= rates_chart_or_text('channel-churn', node,
+ [['Created', 'channel_created'],
+ ['Closed', 'channel_closed']],
+ fmt_rate, fmt_rate_axis, true, 'Channel operations', 'channel-operations') %>
+
+ <%= rates_chart_or_text('queue-churn', node,
+ [['Declared', 'queue_declared'],
+ ['Created', 'queue_created'],
+ ['Deleted', 'queue_deleted']],
+ fmt_rate, fmt_rate_axis, true, 'Queue operations', 'queue-operations') %>
+
+</div>
+</div>
+<% } %>
+
+<div class="section-hidden">
+<h2>Cluster links</h2>
+<div class="hider">
+<% if (node.cluster_links.length > 0) { %>
+<table class="list">
+ <tr>
+ <th>Remote node</th>
+ <th>Local address</th>
+ <th>Local port</th>
+ <th>Remote address</th>
+ <th>Remote port</th>
+ <th class="plain">
+ <%= chart_h3('cluster-link-data-rates', 'Data rates') %>
+ </th>
+ </tr>
+ <%
+ for (var i = 0; i < node.cluster_links.length; i++) {
+ var link = node.cluster_links[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_node(link.name) %></td>
+ <td><%= fmt_string(link.sock_addr) %></td>
+ <td><%= fmt_string(link.sock_port) %></td>
+ <td><%= fmt_string(link.peer_addr) %></td>
+ <td><%= fmt_string(link.peer_port) %></td>
+ <td class="plain">
+ <%= rates_chart_or_text_no_heading(
+ 'cluster-link-data-rates', 'cluster-link-data-rates' + link.name,
+ link.stats,
+ [['Recv', 'recv_bytes'],
+ ['Send', 'send_bytes']],
+ fmt_rate_bytes, fmt_rate_bytes_axis, true) %>
+ </td>
+ </tr>
+<% } %>
+</table>
+<% } else { %>
+ <p>... no cluster links ...</p>
+<% } %>
+</div>
+</div>
+
+<% } %>
+
+</div>
+<% } %>
+
+<!--
+ The next two need to be non-updatable or we will wipe the memory details
+ as soon as we have drawn it.
+ -->
+
+<% if (node.running && (node.os_pid != undefined || disable_stats)) { %>
+
+<div class="section">
+<h2>Memory details</h2>
+<div class="hider">
+ <div id="memory-details"></div>
+ <button class="update-manual memory-button" for="memory-details" query="memory">Update</button>
+</div>
+</div>
+
+<div class="section-hidden">
+<h2>Binary references</h2>
+<div class="hider">
+ <p>
+ <b>Warning:</b> Calculating binary memory use can be expensive if
+ there are many small binaries in the system.
+ </p>
+ <div id="binary-details"></div>
+ <button class="update-manual memory-button" for="binary-details" query="binary">Update</button>
+</div>
+</div>
+
+<% } %>
+
+<% if(!disable_stats) { %>
+<div class="updatable">
+<% if (node.running && node.os_pid != undefined) { %>
+
+<div class="section-hidden">
+<h2>Advanced</h2>
+<div class="hider">
+ <div class="box">
+ <h3>VM</h3>
+ <table class="facts">
+ <tr>
+ <th>OS pid</th>
+ <td><%= node.os_pid %></td>
+ </tr>
+ <tr>
+ <th>Rates mode</th>
+ <td><%= node.rates_mode %></td>
+ </tr>
+ <tr>
+ <th>Net ticktime</th>
+ <td><%= node.net_ticktime %>s</td>
+ </tr>
+ </table>
+
+ <table class="facts">
+ <tr>
+ <th>Run queue</th>
+ <td><%= node.run_queue %></td>
+ </tr>
+ <tr>
+ <th>Processors</th>
+ <td><%= node.processors %></td>
+ </tr>
+ </table>
+ </div>
+
+ <%= rates_chart_or_text('advanced-gc-stats-count', node,
+ [['GC', 'gc_num']],
+ fmt_rate, fmt_rate_axis, true, 'GC operations', 'gc-operations') %>
+
+ <%= rates_chart_or_text('advanced-gc-bytes-stats-count', node,
+ [['GC bytes reclaimed', 'gc_bytes_reclaimed']],
+ fmt_rate, fmt_rate_axis, true, 'GC bytes reclaimed', 'gc-bytes') %>
+
+ <%= rates_chart_or_text('advanced-context-switches-stats-count', node,
+ [['Context switches', 'context_switches']],
+ fmt_rate, fmt_rate_axis, true, 'Context switch operations', 'context-switches-operations') %>
+
+<div class="box">
+ <h3>Management GC queue length</h3>
+ <table class="facts">
+ <% for(var k in node.metrics_gc_queue_length) {
+ if(node.metrics_gc_queue_length.hasOwnProperty(k)) { %>
+ <tr>
+ <th><%= k %></th>
+ <td><%= node.metrics_gc_queue_length[k] %></td>
+ </tr>
+ <% } } %>
+ </table>
+</div>
+
+<div class="box">
+ <h3>Quorum queue open file metrics</h3>
+ <table class="facts">
+ <% for(var k in node.ra_open_file_metrics) { %>
+ <tr>
+ <th><%= k %></th>
+ <td><%= node.ra_open_file_metrics[k] %></td>
+ </tr>
+ <% } %>
+ </table>
+</div>
+
+<h3>Plugins <span class="help" id="plugins"></span></h3>
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <th>Version</th>
+ <th>Description</th>
+ </tr>
+ <%
+ var plugins = get_plugins_list(node);
+ for (var j = 0; j < plugins.length; j++) {
+ var application = plugins[j];
+ %>
+ <tr<%= alt_rows(j)%>>
+ <td><%= application.name %></td>
+ <td><%= application.version %></td>
+ <td><%= application.description %></td>
+ </tr>
+ <% } %>
+</table>
+
+<h3>All applications</h3>
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <th>Version</th>
+ <th>Description</th>
+ </tr>
+ <%
+ for (var j = 0; j < node.applications.length; j++) {
+ var application = node.applications[j];
+ %>
+ <tr<%= alt_rows(j)%>>
+ <td><%= application.name %></td>
+ <td><%= application.version %></td>
+ <td><%= application.description %></td>
+ </tr>
+ <% } %>
+</table>
+
+<h3>Exchange types</h3>
+<%= format('registry', {'list': node.exchange_types, 'node': node, 'show_enabled': false} ) %>
+<h3>Authentication mechanisms</h3>
+<%= format('registry', {'list': node.auth_mechanisms, 'node': node, 'show_enabled': true} ) %>
+
+</div>
+</div>
+
+<% } %>
+<% } %>
+
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs
new file mode 100644
index 0000000000..4b6dce1a99
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/overview.ejs
@@ -0,0 +1,399 @@
+<% if(disable_stats) { %>
+ <h1>Overview: Management only mode</h1>
+<% } else { %>
+ <h1>Overview</h1>
+<% } %>
+<% if (user_monitor) { %>
+<%= format('partition', {'nodes': nodes}) %>
+<% } %>
+
+<div class="updatable">
+<% if (overview.statistics_db_event_queue > 1000) { %>
+<p class="warning">
+ The management statistics database currently has a queue
+ of <b><%= overview.statistics_db_event_queue %></b> events to
+ process. If this number keeps increasing, so will the memory used by
+ the management plugin.
+
+ <% if (overview.rates_mode != 'none') { %>
+ You may find it useful to set the <code>rates_mode</code> config item
+ to <code>none</code>.
+ <% } %>
+</p>
+<% } %>
+<% for (i = 0; i < vhosts.length; i++)
+{
+ for (var vhost_status_node in vhosts[i].cluster_state) {
+ if (vhosts[i].cluster_state[vhost_status_node] != 'running') {
+%>
+<p class="warning">
+ Virtual host <b><%= vhosts[i].name %></b> experienced an error on node <b><%= vhost_status_node %></b> and may be inaccessible
+</p>
+<% }}} %>
+</div>
+
+<div class="section">
+<h2>Totals</h2>
+<div class="hider updatable">
+<% if(!disable_stats) { %>
+<%= queue_lengths('lengths-over', overview.queue_totals) %>
+<% if (rates_mode != 'none') { %>
+ <%= message_rates('msg-rates-over', overview.message_stats) %>
+<% } %>
+<% } %>
+
+<% if (overview.object_totals) { %>
+ <h3>Global counts <span class="help" id="resource-counts"></span></h3>
+
+ <ul id="global-counts">
+ <li>
+ <a href="#/connections" class="button">Connections: <strong><%= overview.object_totals.connections %></strong></a>
+ </li>
+<% if(!disable_stats) { %>
+ <li>
+ <a href="#/channels" class="button">Channels: <strong><%= overview.object_totals.channels %></strong></a>
+ </li>
+<% } %>
+ <li>
+ <a href="#/exchanges" class="button">Exchanges: <strong><%= overview.object_totals.exchanges %></strong></a>
+ </li>
+ <li>
+ <a href="#/queues" class="button">Queues: <strong><%= overview.object_totals.queues %></strong></a>
+ </li>
+<% if (overview.object_totals['consumers'] != undefined) { %>
+ <li>
+ <a href="#" class="button disabled">Consumers: <strong><%= overview.object_totals.consumers %></strong></a>
+ </li>
+<% } %>
+ </ul>
+<% } %>
+
+</div>
+</div>
+
+<% if (user_monitor) { %>
+<div class="section">
+<h2>Nodes</h2>
+
+<div class="hider updatable">
+
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <% if(!disable_stats) { %>
+ <% if (show_column('overview', 'file_descriptors')) { %>
+ <th>File descriptors <span class="help" id="file-descriptors"></span></th>
+ <% } %>
+ <% if (show_column('overview', 'socket_descriptors')) { %>
+ <th>Socket descriptors <span class="help" id="socket-descriptors"></span></th>
+ <% } %>
+ <% if (show_column('overview', 'erlang_processes')) { %>
+ <th>Erlang processes</th>
+ <% } %>
+ <% if (show_column('overview', 'memory')) { %>
+ <th>Memory <span class="help" id="memory-calculation-strategy"></span></th>
+ <% } %>
+ <% if (show_column('overview', 'disk_space')) { %>
+ <th>Disk space</th>
+ <% } %>
+ <% if (show_column('overview', 'uptime')) { %>
+ <th>Uptime</th>
+ <% } %>
+ <% if (show_column('overview', 'info')) { %>
+ <th>Info</th>
+ <% } %>
+ <% if (user_administrator && show_column('overview', 'reset_stats')) { %>
+ <th>Reset stats</th>
+ <% } %>
+ <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="overview">+/-</span></th>
+ <% } %>
+ </tr>
+<%
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ if(!disable_stats) {
+ var colspan = group_count('overview', 'Statistics', []) +
+ group_count('overview', 'General', []);
+ } else {
+ var colspan = [];
+ }
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <a href="#/nodes/<%= esc(node.name) %>" class="button"><%= fmt_node(node.name) %></a>
+ <% if (rabbit_versions_interesting) { %>
+ <sub>RabbitMQ <%= fmt_rabbit_version(node.applications) %></sub>
+ <% } %>
+ </td>
+<% if(!disable_stats) { %>
+<% if (!node.running) { %>
+ <td colspan="<%= colspan %>">
+ <div class="status-red">
+ Node not running
+ </div>
+ </td>
+<% } else if (node.os_pid == undefined) { %>
+ <td colspan="<%= colspan %>">
+ <div class="status-yellow">
+ <abbr title="The rabbitmq_management_agent plugin should be enabled on this node. If it is not, various statistics will be inaccurate.">
+ Node statistics not available</abbr>
+ </div>
+ </td>
+<% } else { %>
+ <% if (show_column('overview', 'file_descriptors')) { %>
+ <td>
+ <% if (node.fd_used != 'install_handle_from_sysinternals') { %>
+ <%= node_stat_count_bar('fd_used', 'fd_total', node, FD_THRESHOLDS) %>
+ <% } else { %>
+ <p class="c">handle.exe missing <span class="help" id="handle-exe"></span><sub><%= node.fd_total %> available</sub></p>
+
+ <% } %>
+ </td>
+ <% } %>
+ <% if (show_column('overview', 'socket_descriptors')) { %>
+ <td>
+ <%= node_stat_count_bar('sockets_used', 'sockets_total', node, FD_THRESHOLDS) %>
+ </td>
+ <% } %>
+ <% if (show_column('overview', 'erlang_processes')) { %>
+ <td>
+
+ <%= node_stat_count_bar('proc_used', 'proc_total', node, PROCESS_THRESHOLDS) %>
+ </td>
+ <% } %>
+ <% if (show_column('overview', 'memory')) { %>
+ <td>
+
+ <% if (node.mem_limit != 'memory_monitoring_disabled') { %>
+ <%= node_stat_bar('mem_used', 'mem_limit', 'high watermark', node, fmt_bytes_axis,
+ node.mem_alarm ? 'red' : 'green',
+ node.mem_alarm ? 'memory-alarm' : null) %>
+ <% } else { %>
+ <%= fmt_bytes(node.mem_used) %>
+ <% } %>
+ </td>
+ <% } %>
+ <% if (show_column('overview', 'disk_space')) { %>
+ <td>
+
+ <% if (node.disk_free_limit != 'disk_free_monitoring_disabled') { %>
+ <%= node_stat_bar('disk_free', 'disk_free_limit', 'low watermark', node, fmt_bytes_axis,
+ node.disk_free_alarm ? 'red' : 'green',
+ node.disk_free_alarm ? 'disk_free-alarm' : null, true) %>
+ <% } else { %>
+ (not available)
+ <% } %>
+ </td>
+ <% } %>
+ <% if (show_column('overview', 'uptime')) { %>
+ <td><span><%= fmt_uptime(node.uptime) %></span></td>
+ <% } %>
+ <% if (show_column('overview', 'info')) { %>
+ <td>
+ <abbr title="Message rates"><%= fmt_string(node.rates_mode) %></abbr>
+ <% if (node.type == 'disc') { %>
+ <abbr title="Broker definitions are held on disc.">disc</abbr>
+ <% } else { %>
+ <abbr title="Broker definitions are held in RAM. Messages will still be written to disc if necessary.">RAM</abbr>
+ <% } %>
+ <%= fmt_plugins_small(node) %>
+ <abbr title="Memory calculation strategy"><%= fmt_string(node.mem_calculation_strategy) %></abbr>
+ </td>
+ <% } %>
+ <% if(user_administrator && show_column('overview', 'reset_stats')) { %>
+ <td>
+ <form action="#/reset_node" method="delete" class="confirm inline-form">
+ <input type="hidden" name="node" value="<%= node.name %>"/>
+ <input type="submit" value="This node"/>
+ </form>
+ <form action="#/reset" method="delete" class="confirm inline-form-right">
+ <input type="submit" value="All nodes"/>
+ </form>
+ <% } %>
+<% } %>
+<% } %>
+ </tr>
+<% } %>
+</table>
+
+</div>
+</div>
+
+<% if(!disable_stats) { %>
+<div class="section-hidden">
+<h2>Churn statistics</h2>
+<div class="hider updatable">
+ <%= rates_chart_or_text('connection-churn', overview.churn_rates,
+ [['Created', 'connection_created'],
+ ['Closed', 'connection_closed']],
+ fmt_rate, fmt_rate_axis, true, 'Connection operations', 'connection-operations') %>
+
+ <%= rates_chart_or_text('channel-churn', overview.churn_rates,
+ [['Created', 'channel_created'],
+ ['Closed', 'channel_closed']],
+ fmt_rate, fmt_rate_axis, true, 'Channel operations', 'channel-operations') %>
+
+ <%= rates_chart_or_text('queue-churn', overview.churn_rates,
+ [['Declared', 'queue_declared'],
+ ['Created', 'queue_created'],
+ ['Deleted', 'queue_deleted']],
+ fmt_rate, fmt_rate_axis, true, 'Queue operations', 'queue-operations') %>
+
+</div>
+</div>
+<% } %>
+
+<% if(!disable_stats) { %>
+<div class="section-hidden">
+<h2>Ports and contexts</h2>
+<div class="hider updatable">
+<h3>Listening ports</h3>
+<table class="list">
+ <tr>
+ <th>Protocol</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+ <th>Bound to</th>
+ <th>Port</th>
+ </tr>
+ <%
+ for (var i = 0; i < overview.listeners.length; i++) {
+ var listener = overview.listeners[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= listener.protocol %></td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(listener.node) %></td>
+<% } %>
+ <td><%= listener.ip_address %></td>
+ <td><%= listener.port %></td>
+ </tr>
+ <% } %>
+</table>
+<h3>Web contexts</h3>
+<table class="list">
+ <tr>
+ <th>Context</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+ <th>Bound to</th>
+ <th>Port</th>
+ <th>SSL</th>
+ <th>Path</th>
+ </tr>
+ <%
+ for (var i = 0; i < overview.contexts.length; i++) {
+ var context = overview.contexts[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= context.description %></td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(context.node) %></td>
+<% } %>
+ <td><%= (context.ip != undefined) ? context.ip : "0.0.0.0" %></td>
+ <td><%= context.port %></td>
+ <td class="c"><%= fmt_boolean(context.ssl || false) %></td>
+ <td><%= context.path %></td>
+ </tr>
+ <% } %>
+</table>
+</div>
+</div>
+<% } %>
+
+<div class="section-hidden administrator-only">
+<h2>Export definitions</h2>
+<div class="hider">
+ <table class="two-col-layout">
+ <tr>
+ <td>
+ <p>
+ <label for="download-filename">Filename for download:</label><br/>
+ <input type="text" id="download-filename" value="<%= fmt_download_filename(overview.node) %>" class="wide" />
+ </p>
+ </td>
+ <td>
+ <p>
+ <button id="download-definitions">Download broker definitions</button>
+ <span class="help" id="export-definitions"></span>
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <% if (vhosts_interesting) { %>
+ <label>Virtual host:</label>
+ <select name="vhost-download">
+ <option value="all">All</option>
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select> <span class="help" id="export-definitions-vhost"></span>
+<% } else { %>
+ <input type="hidden" name="vhost" value="all"/>
+<% } %>
+ </td>
+ </tr>
+ </table>
+</div>
+</div>
+
+<div class="section-hidden administrator-only">
+<h2>Import definitions</h2>
+<div class="hider">
+ <form method="post" enctype="multipart/form-data">
+ <table class="two-col-layout">
+ <tr>
+ <td>
+ <p>
+ <label>Definitions file:</label><br/>
+ <input type="file" name="file"/>
+ </p>
+ </td>
+ <td>
+ <p>
+ <input type="submit" value="Upload broker definitions" onclick="submit_import($(this).closest('form')[0]); return false"/>
+ <span class="help" id="import-definitions"></span>
+ </p>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <% if (vhosts_interesting) { %>
+ <label>Virtual host:</label>
+ <select name="vhost-upload">
+ <option value="all">All</option>
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select> <span class="help" id="import-definitions-vhost"></span>
+
+<% } else { %>
+ <input type="hidden" name="vhost" value="all"/>
+<% } %>
+ </td>
+ </tr>
+ </table>
+ </form>
+</div>
+</div>
+
+<% if (overview.rates_mode == 'none') { %>
+<div class="section-hidden">
+<h2>Message rates disabled</h2>
+<div class="hider">
+<p>
+ Message rates are currently disabled.
+</p>
+<p>
+ To re-enable message rates, edit your configuration file and
+ set <code>rates_mode</code> to <code>basic</code>
+ or <code>detailed</code> in the <code>rabbitmq_management</code>
+ application
+</p>
+</div>
+</div>
+<% } %>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/partition.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/partition.ejs
new file mode 100644
index 0000000000..7c40fa88f4
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/partition.ejs
@@ -0,0 +1,105 @@
+<div class="updatable">
+<%
+ var partitions = [];
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ if (node.partitions != undefined && node.partitions.length != 0) {
+ partitions.push({'node': node.name,
+ 'others': node.partitions});
+ }
+ }
+ if (partitions.length > 0) {
+%>
+<p class="status-error">
+ Network partition detected<br/><br/>
+ Mnesia reports that this RabbitMQ cluster has experienced a
+ network partition. There is a risk of losing data. Please read
+ <a href="https://www.rabbitmq.com/partitions.html">RabbitMQ
+ documentation about network partitions and the possible solutions</a>.
+</p>
+<p>
+ The nature of the partition is as follows:
+</p>
+ <table class="list">
+ <tr>
+ <th>Node</th><th>Was partitioned from</th>
+ </tr>
+
+<%
+ for (var i = 0; i < partitions.length; i++) {
+ var partition = partitions[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td><%= fmt_node(partition.node) %></td>
+ <td>
+<%
+ for (var j = 0; j < partition.others.length; j++) {
+ var other = partition.others[j];
+%>
+ <%= other %><br/>
+<% } %>
+ </td>
+ </tr>
+<% } %>
+ </table>
+<p>
+ While running in this partitioned state, changes (such as queue or
+ exchange declaration and binding) which take place in one partition
+ will not be visible to other partition(s). Other behaviour is not
+ guaranteed.
+</p>
+<p>
+ <a target="_blank"
+ href="https://www.rabbitmq.com/partitions.html">More information on
+ network partitions.</a>
+</p>
+<% } %>
+<%
+ var ticktime = null;
+ var ticktimes_unequal = false;
+ for (var i = 0; i < nodes.length; i++) {
+ var node_ticktime = nodes[i].net_ticktime;
+ if (node_ticktime != undefined) {
+
+ if (ticktime != null && node_ticktime != ticktime) {
+ ticktimes_unequal = true;
+ }
+ ticktime = nodes[i].net_ticktime;
+ }
+ }
+ if (ticktimes_unequal) {
+%>
+<p class="status-error">
+ The <code>kernel</code> <code>net_ticktime</code> values are set
+ differently for different nodes in this cluster.
+</p>
+<p>
+ The values are:
+</p>
+ <table class="list">
+ <tr><th>Node</th><th>net_ticktime</th></tr>
+<%
+ for (var i = 0; i < nodes.length; i++) {
+%>
+ <tr<%= alt_rows(i)%>>
+ <td><%= nodes[i].name %></td>
+ <td><%= nodes[i].net_ticktime %></td>
+ </tr>
+<%
+ }
+%>
+ </table>
+<p>
+ This is a dangerous configuration; use of substantially
+ unequal <code>net_ticktime</code> values can lead to partitions
+ being falsely detected.
+</p>
+<p>
+ <a target="_blank"
+ href="https://www.rabbitmq.com/nettick.html">More information on
+ <code>net_ticktime</code>.</a>
+</p>
+<%
+ }
+%>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/permissions.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/permissions.ejs
new file mode 100644
index 0000000000..ccb066bc44
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/permissions.ejs
@@ -0,0 +1,91 @@
+<div class="section">
+ <h2>Permissions</h2>
+ <div class="hider">
+ <h3>Current permissions</h3>
+ <% if (permissions.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'vhost') { %>
+ <th>User</th>
+<% } else { %>
+ <th>Virtual host</th>
+<% } %>
+ <th>Configure regexp</th>
+ <th>Write regexp</th>
+ <th>Read regexp</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+for (var i = 0; i < permissions.length; i++) {
+ var permission = permissions[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (mode == 'vhost') { %>
+ <td><%= link_user(permission.user) %></td>
+<% } else { %>
+ <td><%= link_vhost(permission.vhost) %></td>
+<% } %>
+ <td><%= fmt_string(permission.configure) %></td>
+ <td><%= fmt_string(permission.write) %></td>
+ <td><%= fmt_string(permission.read) %></td>
+ <td class="c">
+ <form action="#/permissions" method="delete" class="confirm">
+ <input type="hidden" name="username" value="<%= fmt_string(permission.user) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(permission.vhost) %>"/>
+ <input type="submit" value="Clear"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no permissions ...</p>
+ <% } %>
+
+ <h3>Set permission</h3>
+ <form action="#/permissions" method="put">
+ <table class="form">
+ <tr>
+<% if (mode == 'vhost') { %>
+ <th>User</th>
+ <td>
+ <input type="hidden" name="vhost" value="<%= fmt_string(parent.name) %>"/>
+ <select name="username">
+ <% for (var i = 0; i < users.length; i++) { %>
+ <option value="<%= fmt_string(users[i].name) %>"><%= fmt_string(users[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+<% } else { %>
+ <th><label>Virtual Host:</label></th>
+ <td>
+ <input type="hidden" name="username" value="<%= fmt_string(parent.name) %>"/>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+<% } %>
+ </tr>
+ <tr>
+ <th><label>Configure regexp:</label></th>
+ <td><input type="text" name="configure" value=".*"/></td>
+ </tr>
+ <tr>
+ <th><label>Write regexp:</label></th>
+ <td><input type="text" name="write" value=".*"/></td>
+ </tr>
+ <tr>
+ <th><label>Read regexp:</label></th>
+ <td><input type="text" name="read" value=".*"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Set permission"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs
new file mode 100644
index 0000000000..59c922ad46
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs
@@ -0,0 +1,305 @@
+<h1>Policies</h1>
+<div class="section">
+ <h2>User policies</h2>
+ <div class="hider">
+<%= filter_ui(policies) %>
+ <div class="updatable">
+<% if (policies.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th>Pattern</th>
+ <th>Apply to</th>
+ <th>Definition</th>
+ <th>Priority</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < policies.length; i++) {
+ var policy = policies[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(policy.vhost) %></td>
+<% } %>
+<% if (is_user_policymaker) { %>
+ <td><%= link_policy(policy.vhost, policy.name) %></td>
+<% } else { %>
+ <td><%= fmt_string(policy.name) %></td>
+<% } %>
+ <td><%= fmt_string(policy.pattern) %></td>
+ <td><%= fmt_string(policy['apply-to']) %></td>
+ <td><%= fmt_table_short(policy.definition) %></td>
+ <td><%= fmt_string(policy.priority) %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no policies ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<% if (is_user_policymaker && vhosts.length > 0) { %>
+
+<div class="section-hidden">
+ <h2>Add / update a policy</h2>
+ <div class="hider">
+ <form action="#/policies" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>" <%= (vhosts[i].name === current_vhost) ? 'selected="selected"' : '' %>><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Pattern:</label></th>
+ <td><input type="text" name="pattern"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Apply to:</label></th>
+ <td>
+ <select name="apply-to">
+ <option value="all">Exchanges and queues</option>
+ <option value="exchanges">Exchanges</option>
+ <option value="queues">Queues</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Priority:</label></th>
+ <td><input type="text" name="priority"/></td>
+ </tr>
+ <tr>
+ <th><label>Definition:</label></th>
+ <td>
+ <div class="multifield" id="definition"></div>
+ <table class="argument-links">
+ <tr>
+ <td>Queues [All types]</td>
+ <td>
+ <span class="argument-link" field="definition" key="max-length" type="number">Max length</span> |
+ <span class="argument-link" field="definition" key="max-length-bytes" type="number">Max length bytes</span> |
+ <span class="argument-link" field="definition" key="overflow" type="string">Overflow behaviour</span>
+ <span class="argument-link" field="definition" key="expires" type="number">Auto expire</span> |
+ <span class="help" id="queue-overflow"></span> </br>
+ <span class="argument-link" field="definition" key="dead-letter-exchange" type="string">Dead letter exchange</span> |
+ <span class="argument-link" field="definition" key="dead-letter-routing-key" type="string">Dead letter routing key</span><br />
+ </td>
+ <tr>
+ <td>Queues [Classic]</td>
+ <td>
+ <span class="argument-link" field="definition" key="ha-mode" type="string">HA mode</span> <span class="help" id="policy-ha-mode"></span> |
+ <span class="argument-link" field="definition" key="ha-params" type="number">HA params</span> <span class="help" id="policy-ha-params"></span> |
+ <span class="argument-link" field="definition" key="ha-sync-mode" type="string">HA sync mode</span> <span class="help" id="policy-ha-sync-mode"></span> </br>
+ <span class="argument-link" field="definition" key="ha-promote-on-shutdown" type="string" value="">HA mirror promotion on shutdown</span> <span class="help" id="policy-ha-promote-on-shutdown"></span> |
+ <span class="argument-link" field="definition" key="ha-promote-on-failure" type="string" value="">HA mirror promotion on failure</span> <span class="help" id="policy-ha-promote-on-failure"></span>
+ </br>
+ <span class="argument-link" field="definition" key="message-ttl" type="number">Message TTL</span> |
+ <span class="argument-link" field="definition" key="queue-mode" type="string" value="lazy">Lazy mode</span> |
+ <span class="argument-link" field="definition" key="queue-master-locator" type="string">Master Locator</span></br>
+ </td>
+ </tr>
+ <tr>
+ <td>Queues [Quorum]</td>
+ <td>
+ <span class="argument-link" field="definition" key="max-in-memory-length" type="number">Max in memory length</span>
+ <span class="help" id="queue-max-in-memory-length"></span> |
+ <span class="argument-link" field="definition" key="max-in-memory-bytes" type="number">Max in memory bytes</span>
+ <span class="help" id="queue-max-in-memory-bytes"></span> |
+ <span class="argument-link" field="definition" key="delivery-limit" type="number">Delivery limit</span>
+ <span class="help" id="delivery-limit"></span>
+ </td>
+ </tr>
+ <tr>
+ <td>Queues [Stream]</td>
+ <td>
+ <span class="argument-link" field="definition" key="max-age" type="string">Max age</span>
+ <span class="help" id="queue-max-age"></span> |
+ <span class="argument-link" field="definition" key="max-segment-size" type="number">Max segment size</span>
+ <span class="help" id="queue-max-segment-size"></span>
+ </td>
+ </tr>
+ <tr>
+ <td>Exchanges</td>
+ <td>
+ <span class="argument-link" field="definition" key="alternate-exchange" type="string">Alternate exchange</span>
+ <span class="help" id="exchange-alternate"></span>
+ </td>
+ </tr>
+ <tr>
+ <td>Federation</td>
+ <td>
+ <span class="argument-link" field="definition" key="federation-upstream-set" type="string">Federation upstream set</span> <span class="help" id="policy-federation-upstream-set"></span> |
+ <span class="argument-link" field="definition" key="federation-upstream" type="string">Federation upstream</span>
+ <span class="help" id="policy-federation-upstream"></span>
+ </td>
+ </tr>
+ </table>
+ </td>
+ <td class="t"><span class="mand">*</span></td>
+ </tr>
+ </table>
+ <input type="submit" value="Add / update policy"/>
+ </form>
+ </div>
+</div>
+<% } %>
+
+
+<div class="section">
+ <h2>Operator policies</h2>
+ <div class="hider">
+<%= filter_ui(operator_policies) %>
+ <div class="updatable">
+<% if (operator_policies.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th>Pattern</th>
+ <th>Apply to</th>
+ <th>Definition</th>
+ <th>Priority</th>
+ <th class="administrator-only">Clear</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < operator_policies.length; i++) {
+ var policy = operator_policies[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(policy.vhost) %></td>
+<% } %>
+ <td><%= fmt_string(policy.name) %></td>
+ <td><%= fmt_string(policy.pattern) %></td>
+ <td><%= fmt_string(policy['apply-to']) %></td>
+ <td><%= fmt_table_short(policy.definition) %></td>
+ <td><%= fmt_string(policy.priority) %></td>
+ <td class="administrator-only">
+ <form action="#/operator_policies" method="delete" class="confirm">
+ <input type="hidden" name="name" value="<%= fmt_string(policy.name) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(policy.vhost) %>"/>
+ <input type="submit" value="Clear"/>
+ </form>
+ </td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no policies ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<% if (user_administrator && vhosts.length > 0) { %>
+
+<div class="section-hidden">
+ <h2>Add / update an operator policy</h2>
+ <div class="hider">
+ <form action="#/operator_policies" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Pattern:</label></th>
+ <td><input type="text" name="pattern"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Apply to:</label></th>
+ <td>
+ <select name="apply-to">
+ <option value="queues">Queues</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Priority:</label></th>
+ <td><input type="text" name="priority"/></td>
+ </tr>
+ <tr>
+ <th><label>Definition:</label></th>
+ <td>
+ <div class="multifield" id="definitionop"></div>
+ <table class="argument-links">
+ <tr>
+ <td>Queues [All types]</td>
+ <td>
+ <span class="argument-link" field="definitionop" key="max-length" type="number">Max length</span> |
+ <span class="argument-link" field="definitionop" key="max-length-bytes" type="number">Max length bytes</span> |
+ <span class="argument-link" field="definitionop" key="overflow" type="string">Overflow behaviour</span>
+ <span class="help" id="queue-overflow"></span>
+ </td>
+ </tr>
+ <tr>
+ <td>Queues [Classic]</td>
+ <td>
+ <span class="argument-link" field="definitionop" key="message-ttl" type="number">Message TTL</span> |
+ <span class="argument-link" field="definitionop" key="expires" type="number">Auto expire</span>
+ </td>
+ </tr>
+ <tr>
+ <td>Queues [Quorum]</td>
+ <td>
+ <span class="argument-link" field="definitionop" key="max-in-memory-length" type="number">Max in memory length</span>
+ <span class="help" id="queue-max-in-memory-length"></span> |
+ <span class="argument-link" field="definitionop" key="max-in-memory-bytes" type="number">Max in memory bytes</span>
+ <span class="help" id="queue-max-in-memory-bytes"></span> |
+ <span class="argument-link" field="definitionop" key="delivery-limit" type="string">Delivery limit</span>
+ <span class="help" id="delivery-limit"></span>
+ </td>
+ </tr>
+ </table>
+ </td>
+ <td class="t"><span class="mand">*</span></td>
+ </tr>
+ </table>
+ <input type="submit" value="Add / update operator policy"/>
+ </form>
+ </div>
+</div>
+<% } %>
+
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/policy.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/policy.ejs
new file mode 100644
index 0000000000..c4a58c44b6
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/policy.ejs
@@ -0,0 +1,37 @@
+<h1>Policy: <b><%= fmt_string(policy.name) %></b><%= fmt_maybe_vhost(policy.vhost) %></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <table class="facts">
+ <tr>
+ <th>Pattern</th>
+ <td><%= fmt_string(policy.pattern) %></td>
+ </tr>
+ <tr>
+ <th>Apply to</th>
+ <td><%= fmt_string(policy['apply-to']) %></td>
+ </tr>
+ <tr>
+ <th>Definition</th>
+ <td><%= fmt_table_short(policy.definition) %></td>
+ </tr>
+ <tr>
+ <th>Priority</th>
+ <td><%= fmt_string(policy.priority) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this policy</h2>
+ <div class="hider">
+ <form action="#/policies" method="delete" class="confirm">
+ <input type="hidden" name="component" value="policy"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(policy.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(policy.name) %>"/>
+ <input type="submit" value="Delete this policy"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/popup.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/popup.ejs
new file mode 100644
index 0000000000..bf9081fab6
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/popup.ejs
@@ -0,0 +1,6 @@
+<div class="form-popup-<%= type %>">
+ <%= text %>
+ <br/>
+ <br/>
+ <span>Close</span>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/publish.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/publish.ejs
new file mode 100644
index 0000000000..d57724e12a
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/publish.ejs
@@ -0,0 +1,67 @@
+<div class="section-hidden">
+ <h2>Publish message</h2>
+ <div class="hider">
+ <form action="#/exchanges/publish" method="post">
+<% if (mode == 'queue') { %>
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="amq.default"/>
+<% } else { %>
+ <input type="hidden" name="vhost" value="<%= fmt_string(exchange.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_exchange_url(exchange.name) %>"/>
+<% } %>
+ <input type="hidden" name="properties" value=""/>
+ <table class="form">
+<% if (mode == 'queue') { %>
+ <tr>
+ <td colspan="2"><input type="hidden" name="routing_key" value="<%= fmt_string(queue.name) %>"/> Message will be published to the default exchange with routing key <strong><%= fmt_string(queue.name) %></strong>, routing it to this queue.</td>
+ </tr>
+<% } else { %>
+ <tr>
+ <th><label>Routing key:</label></th>
+ <td><input type="text" name="routing_key" value=""/></td>
+ </tr>
+<% } %>
+ <% if (mode == 'queue' && is_classic(queue)) { %>
+ <tr>
+ <th><label>Delivery mode:</label></th>
+ <td>
+ <select name="delivery_mode">
+ <option value="1">1 - Non-persistent</option>
+ <option value="2">2 - Persistent</option>
+ </select>
+ </td>
+ </tr>
+ <% } else { %>
+ <input type="hidden" name="delivery_mode" value="2">
+ <% } %>
+ <tr>
+ <th>
+ <label>
+ Headers:
+ <span class="help" id="message-publish-headers"></span>
+ </label>
+ </th>
+ <td>
+ <div class="multifield" id="headers"></div>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Properties:
+ <span class="help" id="message-publish-properties"></span>
+ </label>
+ </th>
+ <td>
+ <div class="multifield string-only" id="props"></div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Payload:</label></th>
+ <td><textarea name="payload"></textarea></td>
+ </tr>
+ </table>
+ <input type="submit" value="Publish message" />
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs
new file mode 100644
index 0000000000..27297b3757
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs
@@ -0,0 +1,431 @@
+<h1>Queue <b><%= fmt_string(highlight_extra_whitespace(queue.name)) %></b><%= fmt_maybe_vhost(queue.vhost) %></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+<% if(!disable_stats) { %>
+ <div class="hider updatable">
+ <%= queue_lengths('lengths-q', queue) %>
+<% if (rates_mode != 'none') { %>
+ <%= message_rates('msg-rates-q', queue.message_stats) %>
+<% } %>
+<% } %>
+
+ <h3>Details</h3>
+ <table class="facts facts-l">
+ <tr>
+ <th>Features</th>
+ <td><%= fmt_features(queue) %></td>
+ </tr>
+<% if(!disable_stats) { %>
+ <tr>
+ <th>Policy</th>
+ <td><%= link_policy(queue.vhost, queue.policy) %></td>
+ </tr>
+ <tr>
+ <th>Operator policy</th>
+ <td><%= fmt_string(queue.operator_policy, '') %></td>
+ </tr>
+ <% if (queue.owner_pid_details != undefined) { %>
+ <tr>
+ <th>Exclusive owner</th>
+ <td><%= link_conn(queue.owner_pid_details.name) %></td>
+ </tr>
+ <% } %>
+ <tr>
+ <th>Effective policy definition</th>
+ <td><%= fmt_table_short(queue.effective_policy_definition) %></td>
+ </tr>
+<% } %>
+<% if (nodes_interesting) { %>
+ <tr>
+ <% if (is_quorum(queue) || is_stream(queue)) { %>
+ <th>Leader</th>
+ <% } else { %>
+ <th>Node</th>
+ <% } %>
+ <% if (queue.leader) { %>
+ <td><%= fmt_node(queue.leader) %></td>
+ <% } else { %>
+ <td><%= fmt_node(queue.node) %></td>
+ <% } %>
+ </tr>
+ <% if (is_quorum(queue) || is_stream(queue)) { %>
+ <tr>
+ <th>Online</th>
+ <td>
+ <%
+ for (var i in queue.online) {
+ %>
+ <%= fmt_node(queue.online[i]) %>
+ <br/>
+ <% } %>
+ </td>
+ </tr>
+ <th>Members</th>
+ <td>
+ <%
+ for (var i in queue.members) {
+ %>
+ <%= fmt_node(queue.members[i]) %>
+ <br/>
+ <% } %>
+ </td>
+ </tr>
+ <% } else { %>
+ <% if (!queue.exclusive) { %>
+ <tr>
+ <th>Mirrors</th>
+ <td>
+ <%
+ var has_unsynced_node = false;
+ for (var i in queue.slave_nodes) {
+ var node = queue.slave_nodes[i];
+ %>
+ <%
+ if (jQuery.inArray(node, queue.synchronised_slave_nodes) == -1) {
+ has_unsynced_node = true;
+ %>
+ <%= fmt_node(node) %> <b>(unsynchronised)</b>
+ <% } else { %>
+ <%= fmt_node(node) %>
+ <% } %>
+ <br/>
+ <% } %>
+ <% if (queue.state == 'syncing') { %>
+ <table>
+ <tr>
+ <td>
+ <%= fmt_sync_state(queue) %>
+ </td>
+ <td>
+ <form action="#/queues/actions" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="action" value="cancel_sync"/>
+ <input type="submit" value="Cancel" id="action-button" />
+ </form>
+ </td>
+ </tr>
+ </table>
+ <% } else if (has_unsynced_node) { %>
+ <form action="#/queues/actions" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="action" value="sync"/>
+ <input type="submit" value="Synchronise" id="action-button" />
+ </form>
+ <% } %>
+ </td>
+ </tr>
+ <% } %>
+ <% } %>
+<% } %>
+ </table>
+
+<% if(!disable_stats) { %>
+ <table class="facts facts-l">
+ <tr>
+ <th>State</th>
+ <td><%= fmt_object_state(queue) %></td>
+ </tr>
+ <tr>
+ <th>Consumers</th>
+ <td><%= fmt_string(queue.consumers) %></td>
+ </tr>
+ <tr>
+ <th>Consumer utilisation <span class="help" id="queue-consumer-utilisation"></th>
+ <td><%= fmt_percent(queue.consumer_utilisation) %></td>
+ </tr>
+ <% if (is_quorum(queue) || is_stream(queue)) { %>
+ <tr>
+ <th>Open files</th>
+ <td><%= fmt_table_short(queue.open_files) %></td>
+ </tr>
+ <% } %>
+ </table>
+
+ <table class="facts">
+ <tr>
+ <td></td>
+ <th class="horizontal">Total</th>
+ <th class="horizontal">Ready</th>
+ <th class="horizontal">Unacked</th>
+ <% if (is_quorum(queue)) { %>
+ <th class="horizontal">In memory ready</th>
+ <% } %>
+ <% if (is_classic(queue)) { %>
+ <th class="horizontal">In memory</th>
+ <th class="horizontal">Persistent</th>
+ <th class="horizontal">Transient, Paged Out</th>
+ <% } %>
+ </tr>
+ <tr>
+ <th>
+ Messages
+ <span class="help" id="queue-messages"></span>
+ </th>
+ <td class="r">
+ <%= fmt_num_thousands(queue.messages) %>
+ </td>
+ <td class="r">
+ <%= fmt_num_thousands(queue.messages_ready) %>
+ </td>
+ <td class="r">
+ <%= fmt_num_thousands(queue.messages_unacknowledged) %>
+ </td>
+ <td class="r">
+ <%= fmt_num_thousands(queue.messages_ram) %>
+ </td>
+ <% if (is_classic(queue)) { %>
+ <td class="r">
+ <%= fmt_num_thousands(queue.messages_persistent) %>
+ </td>
+ <td class="r">
+ <%= fmt_num_thousands(queue.messages_paged_out) %>
+ </td>
+ <% } %>
+ </tr>
+ <tr>
+ <th>
+ Message body bytes
+ <span class="help" id="queue-message-body-bytes"></span>
+ </th>
+ <td class="r">
+ <%= fmt_bytes(queue.message_bytes) %>
+ </td>
+ <td class="r">
+ <%= fmt_bytes(queue.message_bytes_ready) %>
+ </td>
+ <td class="r">
+ <%= fmt_bytes(queue.message_bytes_unacknowledged) %>
+ </td>
+ <td class="r">
+ <%= fmt_bytes(queue.message_bytes_ram) %>
+ </td>
+ <% if (is_classic(queue)) { %>
+ <td class="r">
+ <%= fmt_bytes(queue.message_bytes_persistent) %>
+ </td>
+ <td class="r">
+ <%= fmt_bytes(queue.message_bytes_paged_out) %>
+ </td>
+ <% } %>
+ </tr>
+ <tr>
+ <th>
+ Process memory
+ <span class="help" id="queue-process-memory"></span>
+ </th>
+ <td class="r"><%= fmt_bytes(queue.memory) %></td>
+ </tr>
+ </table>
+ <% } %>
+ </div>
+</div>
+
+<% if (rates_mode == 'detailed') { %>
+<div class="section-hidden">
+<h2>Message rates breakdown</h2>
+<div class="hider updatable">
+<table class="two-col-layout">
+ <tr>
+ <td>
+ <%= format('msg-detail-publishes',
+ {'mode': 'queue',
+ 'object': queue.incoming,
+ 'label': 'Incoming'}) %>
+
+ </td>
+ <td>
+ <%= format('msg-detail-deliveries',
+ {'mode': 'queue',
+ 'object': queue.deliveries}) %>
+ </td>
+ </tr>
+</table>
+</div>
+</div>
+
+<% } %>
+
+
+<% if(!disable_stats) { %>
+<div class="section-hidden">
+ <h2>Consumers</h2>
+ <div class="hider updatable">
+<%= format('consumers', {'mode': 'queue', 'consumers': queue.consumer_details}) %>
+ </div>
+</div>
+<% } %>
+
+<div class="section-hidden">
+ <h2>Bindings</h2>
+ <div class="hider">
+ <div class="bindings-wrapper">
+ <%= format('bindings', {'mode': 'queue', 'bindings': bindings}) %>
+ <p class="arrow">&dArr;</p>
+ <p><span class="queue">This queue</span></p>
+
+ <%= format('add-binding', {'mode': 'queue', 'parent': queue}) %>
+ </div>
+ </div>
+</div>
+
+<%= format('publish', {'mode': 'queue', 'queue': queue}) %>
+
+<% if (!is_stream(queue)) { %>
+<div class="section-hidden">
+ <h2>Get messages</h2>
+ <div class="hider">
+ <p>
+ Warning: getting messages from a queue is a destructive action.
+ <span class="help" id="message-get-requeue"></span>
+ </p>
+ <form action="#/queues/get" method="post">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="truncate" value="50000"/>
+ <table class="form">
+ <tr>
+ <th><label>Ack Mode:</label></th>
+ <td>
+ <select name="ackmode">
+ <option value="ack_requeue_true" selected>Nack message requeue true</option>
+ <option value="ack_requeue_false">Ack message requeue false</option>
+ <option value="reject_requeue_true">Reject requeue true</option>
+ <option value="reject_requeue_false">Reject requeue false</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Encoding:</label></th>
+ <td>
+ <select name="encoding">
+ <option value="auto">Auto string / base64</option>
+ <option value="base64">base64</option>
+ </select>
+ <span class="help" id="string-base64"></span>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Messages:</label></th>
+ <td><input type="text" name="count" value="1"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Get Message(s)" />
+ </form>
+ <div id="msg-wrapper"></div>
+ </div>
+</div>
+<% } %>
+
+<% if (is_user_policymaker) { %>
+<div class="section-hidden">
+ <h2>Move messages</h2>
+ <div class="hider">
+ <% if (NAVIGATION['Admin'][0]['Shovel Management'] == undefined) { %>
+ <p>To move messages, the shovel plugin must be enabled, try:</p>
+ <pre>$ rabbitmq-plugins enable rabbitmq_shovel rabbitmq_shovel_management</pre>
+ <% } else { %>
+ <p>
+ The shovel plugin can be used to move messages from this queue
+ to another one. The form below will create a temporary shovel to
+ move messages to another queue on the same virtual host, with
+ default settings.
+ </p>
+ <p>
+ For more options <a href="#/dynamic-shovels">see the shovel
+ interface</a>.
+ </p>
+ <form action="#/shovel-parameters-move-messages" method="put">
+ <input type="hidden" name="component" value="shovel"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="Move from <%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="src-uri" value="amqp:///<%= esc(queue.vhost) %>"/>
+ <input type="hidden" name="src-queue" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="src-protocol" value="amqp091"/>
+ <input type="hidden" name="src-prefetch-count" value="1000"/>
+ <input type="hidden" name="src-delete-after" value="queue-length"/>
+ <input type="hidden" name="dest-protocol" value="amqp091"/>
+ <input type="hidden" name="dest-uri" value="amqp:///<%= esc(queue.vhost) %>"/>
+ <input type="hidden" name="dest-add-forward-headers" value="false"/>
+ <input type="hidden" name="ack-mode" value="on-confirm"/>
+ <input type="hidden" name="redirect" value="#/queues"/>
+
+ <table class="form">
+ <tr>
+ <th>Destination queue:</th>
+ <td><input type="text" name="dest-queue"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Move messages"/>
+ </form>
+ <% } %>
+ </div>
+</div>
+<% } %>
+
+<div class="section-hidden">
+ <h2>Delete</h2>
+ <div class="hider">
+ <form action="#/queues" method="delete" class="confirm-queue inline-form">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="mode" value="delete"/>
+ <input type="submit" value="Delete Queue" />
+ </form>
+ </div>
+</div>
+
+<% if (!is_stream(queue)) { %>
+<div class="section-hidden">
+ <h2>Purge</h2>
+ <div class="hider">
+ <form action="#/queues" method="delete" class="confirm-purge-queue inline-form">
+ <input type="hidden" name="vhost" value="<%= fmt_string(queue.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(queue.name) %>"/>
+ <input type="hidden" name="mode" value="purge"/>
+ <input type="submit" value="Purge Messages" />
+ </form>
+ </div>
+</div>
+<% } %>
+
+<% if(queue.reductions || queue.garbage_collection) { %>
+<div class="section-hidden">
+<h2>Runtime Metrics (Advanced)</h2>
+ <div class="hider updatable">
+ <%= data_reductions('reductions-rates-queue', queue) %>
+ <table class="facts">
+ <% if (queue.garbage_collection.min_bin_vheap_size) { %>
+ <tr>
+ <th>Minimum binary virtual heap size in words (min_bin_vheap_size)</th>
+ <td><%= queue.garbage_collection.min_bin_vheap_size %></td>
+ </tr>
+ <% } %>
+
+ <% if (queue.garbage_collection.min_heap_size) { %>
+ <tr>
+ <th>Minimum heap size in words (min_heap_size)</th>
+ <td><%= queue.garbage_collection.min_heap_size %></td>
+ </tr>
+ <% } %>
+
+ <% if (queue.garbage_collection.fullsweep_after) { %>
+ <tr>
+ <th>Maximum generational collections before fullsweep (fullsweep_after)</th>
+ <td><%= queue.garbage_collection.fullsweep_after %></td>
+ </tr>
+ <% } %>
+
+ <% if (queue.garbage_collection.minor_gcs) { %>
+ <tr>
+ <th>Number of minor GCs (minor_gcs)</th>
+ <td><%= queue.garbage_collection.minor_gcs %></td>
+ </tr>
+ <% } %>
+ </table>
+ </div>
+</div>
+
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs
new file mode 100644
index 0000000000..6e13f84361
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs
@@ -0,0 +1,353 @@
+
+<h1>Queues</h1>
+<div class="section">
+ <%= paginate_ui(queues, 'queues') %>
+</div>
+<div class="updatable">
+<% if (queues.items.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <%= group_heading('queues', 'Overview', [vhosts_interesting, nodes_interesting, true]) %>
+<% if(disable_stats && enable_queue_totals) { %>
+ <%= group_heading('queues', 'Messages', []) %>
+<% } else { %>
+<% if(!disable_stats) { %>
+ <%= group_heading('queues', 'Messages', []) %>
+ <%= group_heading('queues', 'Message bytes', []) %>
+<% if (rates_mode != 'none') { %>
+ <%= group_heading('queues', 'Message rates', []) %>
+<% } %>
+<% } %>
+<% } %>
+ <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="queues">+/-</span></th>
+ </tr>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th><%= fmt_sort('Virtual host', 'vhost') %></th>
+<% } %>
+ <th><%= fmt_sort('Name', 'name') %></th>
+<% if (nodes_interesting) { %>
+ <th><%= fmt_sort('Node', 'node') %></th>
+<% } %>
+<% if (show_column('queues', 'type')) { %>
+ <th><%= fmt_sort('Type', 'type') %></th>
+<% } %>
+<% if (show_column('queues', 'features')) { %>
+ <th>Features</th>
+<% } %>
+<% if (show_column('queues', 'features_no_policy')) { %>
+ <th>Features</th>
+<% } %>
+<% if (show_column('queues', 'policy')) { %>
+ <th><%= fmt_sort('Policy','policy') %></th>
+<% } %>
+<% if (show_column('queues', 'consumers')) { %>
+ <th><%= fmt_sort('Consumers', 'consumers') %></th>
+<% } %>
+<% if (show_column('queues', 'consumer_utilisation')) { %>
+ <th><%= fmt_sort('Consumer utilisation', 'consumer_utilisation') %></th>
+<% } %>
+<% if (show_column('queues', 'state')) { %>
+ <th><%= fmt_sort('State', 'state') %></th>
+<% } %>
+<% if(disable_stats && enable_queue_totals) { %>
+<% if (show_column('queues', 'msgs-ready')) { %>
+ <th><%= fmt_sort('Ready', 'messages_ready') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-unacked')) { %>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-total')) { %>
+ <th><%= fmt_sort('Total', 'messages') %></th>
+<% } %>
+<% } %>
+<% if(!disable_stats) { %>
+<% if (show_column('queues', 'msgs-ready')) { %>
+ <th><%= fmt_sort('Ready', 'messages_ready') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-unacked')) { %>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-ram')) { %>
+ <th><%= fmt_sort('In Memory', 'messages_ram') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-persistent')) { %>
+ <th><%= fmt_sort('Persistent', 'messages_persistent') %></th>
+<% } %>
+<% if (show_column('queues', 'msgs-total')) { %>
+ <th><%= fmt_sort('Total', 'messages') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ready')) { %>
+ <th><%= fmt_sort('Ready', 'message_bytes_ready') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-unacked')) { %>
+ <th><%= fmt_sort('Unacked', 'message_bytes_unacknowledged') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ram')) { %>
+ <th><%= fmt_sort('In Memory', 'message_bytes_ram') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-persistent')) { %>
+ <th><%= fmt_sort('Persistent', 'message_bytes_persistent') %></th>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-total')) { %>
+ <th><%= fmt_sort('Total', 'message_bytes') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+ <% if (show_column('queues', 'rate-incoming')) { %>
+ <th><%= fmt_sort('incoming', 'message_stats.publish_details.rate') %></th>
+ <% } %>
+ <% if (show_column('queues', 'rate-deliver')) { %>
+ <th><%= fmt_sort('deliver / get', 'message_stats.deliver_get_details.rate') %></th>
+ <% } %>
+ <% if (show_column('queues', 'rate-redeliver')) { %>
+ <th><%= fmt_sort('redelivered', 'message_stats.redeliver_details.rate') %></th>
+ <% } %>
+ <% if (show_column('queues', 'rate-ack')) { %>
+ <th><%= fmt_sort('ack', 'message_stats.ack_details.rate') %></th>
+ <% } %>
+<% } %>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < queues.items.length; i++) {
+ var queue = queues.items[i];
+%>
+ <tr<%= alt_rows(i, queue.arguments) %>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(queue.vhost) %></td>
+<% } %>
+ <td><%= link_queue(queue.vhost, queue.name, queue.arguments) %></td>
+<% if (nodes_interesting) { %>
+ <td>
+ <% if (queue.node) { %>
+ <%= fmt_node(queue.node) %>
+ <% } else { %>
+ <%= fmt_node(queue.leader) %>
+ <% } %>
+ <% if (queue.hasOwnProperty('members')) { %>
+ <%= fmt_members(queue) %>
+ <% } else { %>
+ <%= fmt_mirrors(queue) %>
+ <% if (queue.state == 'syncing') { %>
+ <%= fmt_sync_state(queue) %>
+ <% } %>
+ <% } %>
+ </td>
+<% } %>
+<% if (show_column('queues', 'type')) { %>
+ <td><%= fmt_string(queue.type, '') %></td>
+<% } %>
+<% if (show_column('queues', 'features')) { %>
+ <td class="c">
+ <%= fmt_features_short(queue) %>
+ <%= fmt_policy_short(queue) %>
+ <%= fmt_op_policy_short(queue) %>
+ </td>
+<% } %>
+<% if (show_column('queues', 'features_no_policy')) { %>
+ <td class="c"><%= fmt_features_short(queue) %></td>
+<% } %>
+<% if (show_column('queues', 'policy')) { %>
+ <td class="c"><%= link_policy(queue.vhost, queue.policy) %>
+ <%= fmt_string(queue.operator_policy) %></td>
+<% } %>
+<% if (show_column('queues', 'consumers')) { %>
+ <td class="c"><%= fmt_string(queue.consumers) %></td>
+<% } %>
+<% if (show_column('queues', 'consumer_utilisation')) { %>
+ <td class="c"><%= fmt_percent(queue.consumer_utilisation) %></td>
+<% } %>
+<% if (show_column('queues', 'state')) { %>
+ <td class="c"><%= fmt_object_state(queue) %></td>
+<% } %>
+<% if(!disable_stats || (disable_stats && enable_queue_totals)) { %>
+<% if (show_column('queues', 'msgs-ready')) { %>
+ <td class="r"><%= fmt_num_thousands(queue.messages_ready) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-unacked')) { %>
+ <td class="r"><%= fmt_num_thousands(queue.messages_unacknowledged) %></td>
+<% } %>
+<% } %>
+<% if(!disable_stats) { %>
+<% if (show_column('queues', 'msgs-ram')) { %>
+ <td class="r"><%= fmt_num_thousands(queue.messages_ram) %></td>
+<% } %>
+<% if (show_column('queues', 'msgs-persistent')) { %>
+ <td class="r"><%= fmt_num_thousands(queue.messages_persistent) %></td>
+<% } %>
+<% } %>
+<% if(!disable_stats || (disable_stats && enable_queue_totals)) { %>
+<% if (show_column('queues', 'msgs-total')) { %>
+ <td class="r"><%= fmt_num_thousands(queue.messages) %></td>
+<% } %>
+<% } %>
+<% if(!disable_stats) { %>
+<% if (show_column('queues', 'msg-bytes-ready')) { %>
+ <td class="r"><%= fmt_bytes(queue.message_bytes_ready) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-unacked')) { %>
+ <td class="r"><%= fmt_bytes(queue.message_bytes_unacknowledged) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-ram')) { %>
+ <td class="r"><%= fmt_bytes(queue.message_bytes_ram) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-persistent')) { %>
+ <td class="r"><%= fmt_bytes(queue.message_bytes_persistent) %></td>
+<% } %>
+<% if (show_column('queues', 'msg-bytes-total')) { %>
+ <td class="r"><%= fmt_bytes(queue.message_bytes) %></td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+ <% if (show_column('queues', 'rate-incoming')) { %>
+ <td class="r"><%= fmt_detail_rate(queue.message_stats, 'publish') %></td>
+ <% } %>
+ <% if (show_column('queues', 'rate-deliver')) { %>
+ <td class="r"><%= fmt_detail_rate(queue.message_stats, 'deliver_get') %></td>
+ <% } %>
+ <% if (show_column('queues', 'rate-redeliver')) { %>
+ <td class="r"><%= fmt_detail_rate(queue.message_stats, 'redeliver') %></td>
+ <% } %>
+ <% if (show_column('queues', 'rate-ack')) { %>
+ <td class="r"><%= fmt_detail_rate(queue.message_stats, 'ack') %></td>
+ <% } %>
+<% } %>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no queues ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new queue</h2>
+ <div class="hider">
+ <form action="#/queues" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>" <%= (vhosts[i].name === current_vhost) ? 'selected="selected"' : '' %>><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Type:</label></th>
+ <td>
+ <select name="queuetype" onchange="select_queue_type(queuetype)">
+ <option value="classic">Classic</option>
+ <% if (queue_type == "quorum") { %>
+ <option value="quorum" selected>Quorum</option>
+ <% } else { %>
+ <option value="quorum">Quorum</option>
+ <% } %>
+ <% if (queue_type == "stream") { %>
+ <option value="stream" selected>Stream</option>
+ <% } else { %>
+ <option value="stream">Stream</option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+<% if (queue_type == "classic") { %>
+ <tr>
+ <th><label>Durability:</label></th>
+ <td>
+ <select name="durable">
+ <option value="true">Durable</option>
+ <option value="false">Transient</option>
+ </select>
+ </td>
+ </tr>
+<% } %>
+<%
+ if (nodes_interesting) {
+ var nodes = JSON.parse(sync_get('/nodes'));
+%>
+ <tr>
+ <th><label>Node:</label></th>
+ <td>
+ <select name="node">
+ <% for (var i = 0; i < nodes.length; i++) { %>
+ <option value="<%= fmt_string(nodes[i].name) %>"><%= fmt_node(nodes[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } %>
+<% if (queue_type == "classic") { %>
+ <tr>
+ <th><label>Auto delete: <span class="help" id="queue-auto-delete"></span></label></th>
+ <td>
+ <select name="auto_delete">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ <% } %>
+ <tr>
+ <th><label>Arguments:</label></th>
+ <td>
+ <div class="multifield" id="arguments"></div>
+ <table class="argument-links">
+ <tr>
+ <td>Add</td>
+ <td>
+ <% if (queue_type == "classic") { %>
+ <span class="argument-link" field="arguments" key="x-message-ttl" type="number">Message TTL</span> <span class="help" id="queue-message-ttl"></span> |
+ <% } %>
+ <% if (queue_type != "stream") { %>
+ <span class="argument-link" field="arguments" key="x-expires" type="number">Auto expire</span> <span class="help" id="queue-expires"></span> |
+ <span class="argument-link" field="arguments" key="x-overflow" type="string">Overflow behaviour</span> <span class="help" id="queue-overflow"></span> |
+ <span class="argument-link" field="arguments" key="x-single-active-consumer" type="boolean">Single active consumer</span> <span class="help" id="queue-single-active-consumer"></span><br/>
+ <span class="argument-link" field="arguments" key="x-dead-letter-exchange" type="string">Dead letter exchange</span> <span class="help" id="queue-dead-letter-exchange"></span> |
+ <span class="argument-link" field="arguments" key="x-dead-letter-routing-key" type="string">Dead letter routing key</span> <span class="help" id="queue-dead-letter-routing-key"></span><br/>
+ <span class="argument-link" field="arguments" key="x-max-length" type="number">Max length</span> <span class="help" id="queue-max-length"></span> |
+ <% } %>
+ <span class="argument-link" field="arguments" key="x-max-length-bytes" type="number">Max length bytes</span> <span class="help" id="queue-max-length-bytes"></span><br/>
+ <% if (queue_type == "classic") { %>
+ <span class="argument-link" field="arguments" key="x-max-priority" type="number">Maximum priority</span> <span class="help" id="queue-max-priority"></span>
+ | <span class="argument-link" field="arguments" key="x-queue-mode" type="string" value="lazy">Lazy mode</span> <span class="help" id="queue-lazy"></span>
+ | <span class="argument-link" field="arguments" key="x-queue-master-locator" type="string" value="">Master locator</span> <span class="help" id="queue-master-locator"></span>
+ <% } %>
+ <% if (queue_type == "quorum") { %>
+ <span class="argument-link" field="arguments" key="x-delivery-limit" type="number">Delivery limit</span><span class="help" id="delivery-limit"></span>
+ | <span class="argument-link" field="arguments" key="x-max-in-memory-length" type="number">Max in memory length</span><span class="help" id="queue-max-in-memory-length"></span>
+ | <span class="argument-link" field="arguments" key="x-max-in-memory-bytes" type="number">Max in memory bytes</span><span class="help" id="queue-max-in-memory-bytes"></span>
+ | <span class="argument-link" field="arguments" key="x-quorum-initial-group-size" type="number">Initial cluster size</span><span class="help" id="queue-quorum-initial-group-size"></span><br/>
+ <% } %>
+ <% if (queue_type == "stream") { %>
+ <span class="argument-link" field="arguments" key="x-max-age" type="string">Max time retention</span><span class="help" id="queue-max-age"></span>
+ | <span class="argument-link" field="arguments" key="x-max-segment-size" type="number">Max segment size</span><span class="help" id="queue-max-segment-size"></span>
+ | <span class="argument-link" field="arguments" key="x-initial-cluster-size" type="number">Initial cluster size</span><span class="help" id="queue-initial-cluster-size"></span>
+ | <span class="argument-link" field="arguments" key="x-queue-leader-locator" type="string">Leader locator</span><span class="help" id="queue-leader-locator"></span>
+ <% } %>
+ </td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add queue"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/rate-options.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/rate-options.ejs
new file mode 100644
index 0000000000..6ec5c75c94
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/rate-options.ejs
@@ -0,0 +1,56 @@
+<%
+ var id = span.attr('for');
+ var mode = get_pref('rate-mode-' + id);
+ var size = get_pref('chart-size-' + id);
+ var range_pref = get_pref('chart-range');
+%>
+
+<form action="#/rate-options" method="put" class="auto-submit">
+ <input type="hidden" name="id" value="<%= id %>"/>
+ <table class="form" width="100%">
+ <tr>
+ <td colspan="2">
+ <h3>This time series</h3>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Display:</label></th>
+ <td>
+ <%= fmt_radio('mode', 'Chart', 'chart', mode) %>
+ <%= fmt_radio('mode', 'Current value', 'curr', mode) %>
+ <% if (id != 'node-stats') { %>
+ <%= fmt_radio('mode', 'Moving average', 'avg', mode) %>
+ <% } %>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Chart size:</label></th>
+ <td>
+ <%= fmt_radio('size', 'Small', 'small', size) %>
+ <%= fmt_radio('size', 'Medium', 'medium', size) %>
+ <%= fmt_radio('size', 'Large', 'large', size) %>
+ </td>
+ </tr>
+ <tr>
+ <td colspan="2">
+ <h3>All time series</h3>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Chart range:</label></th>
+ <td>
+<%
+ var range_type = get_chart_range_type(id);
+ for (var i = 0; i < CHART_RANGES[range_type].length; ++i) {
+ var data = CHART_RANGES[range_type][i];
+ var range = data[0];
+ var desc = data[1];
+%>
+ <%= fmt_radio('range', desc, range, range_pref) %>
+<%
+ }
+%>
+ </td>
+ </tr>
+ </table>
+</form>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/registry.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/registry.ejs
new file mode 100644
index 0000000000..38d98e1ebb
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/registry.ejs
@@ -0,0 +1,25 @@
+<% if (node.running) { %>
+<table class="list">
+ <tr>
+ <th>Name</th>
+ <th>Description</th>
+<% if (show_enabled) { %>
+ <th>Enabled</th>
+<% } %>
+ </tr>
+ <%
+ for (var i = 0; i < list.length; i++) {
+ var item = list[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= fmt_string(item.name) %></td>
+ <td><%= fmt_string(item.description) %></td>
+<% if (show_enabled) { %>
+ <td class="c"><%= fmt_boolean(item.enabled) %></td>
+<% } %>
+ </tr>
+ <% } %>
+</table>
+<% } else {%>
+<p>...node not running...</p>
+<% } %>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/status.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/status.ejs
new file mode 100644
index 0000000000..d5da971f8b
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/status.ejs
@@ -0,0 +1 @@
+<span class="status-<%= fmt_string(status) %>"><%= text %></span>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/topic-permissions.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/topic-permissions.ejs
new file mode 100644
index 0000000000..02f5459516
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/topic-permissions.ejs
@@ -0,0 +1,96 @@
+<div class="section">
+ <h2>Topic permissions</h2>
+ <div class="hider">
+ <h3>Current topic permissions</h3>
+ <% if (topic_permissions.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+<% if (mode == 'vhost') { %>
+ <th>User</th>
+<% } else { %>
+ <th>Virtual host</th>
+<% } %>
+ <th>Exchange</th>
+ <th>Write regexp</th>
+ <th>Read regexp</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+for (var i = 0; i < topic_permissions.length; i++) {
+ var permission = topic_permissions[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (mode == 'vhost') { %>
+ <td><%= link_user(permission.user) %></td>
+<% } else { %>
+ <td><%= link_vhost(permission.vhost) %></td>
+<% } %>
+ <td><%= fmt_exchange(permission.exchange) %></td>
+ <td><%= fmt_string(permission.write) %></td>
+ <td><%= fmt_string(permission.read) %></td>
+ <td class="c">
+ <form action="#/topic-permissions" method="delete" class="confirm">
+ <input type="hidden" name="username" value="<%= fmt_string(permission.user) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(permission.vhost) %>"/>
+ <input type="hidden" name="exchange" value="<%= fmt_exchange_url(permission.exchange) %>"/>
+ <input type="submit" value="Clear"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no topic permissions ...</p>
+ <% } %>
+
+<h3>Set topic permission</h3>
+ <form action="#/topic-permissions" method="put">
+ <table class="form">
+ <tr>
+<% if (mode == 'vhost') { %>
+ <th>User</th>
+ <td>
+ <input type="hidden" name="vhost" value="<%= fmt_string(parent.name) %>"/>
+ <select name="username">
+ <% for (var i = 0; i < users.length; i++) { %>
+ <option value="<%= fmt_string(users[i].name) %>"><%= fmt_string(users[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+<% } else { %>
+ <th><label>Virtual Host:</label></th>
+ <td>
+ <input type="hidden" name="username" value="<%= fmt_string(parent.name) %>"/>
+ <select name="vhost" class="list-exchanges">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+<% } %>
+ </tr>
+ <tr>
+ <th><label>Exchange:</label></th>
+ <td>
+ <div id='list-exchanges'>
+ <%= format('list-exchanges', {'exchanges': exchanges}) %>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Write regexp:</label></th>
+ <td><input type="text" name="write" value=".*"/></td>
+ </tr>
+ <tr>
+ <th><label>Read regexp:</label></th>
+ <td><input type="text" name="read" value=".*"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Set topic permission"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/user.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/user.ejs
new file mode 100644
index 0000000000..e4f49b104e
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/user.ejs
@@ -0,0 +1,100 @@
+<h1>User: <b><%= fmt_string(user.name) %></b></h1>
+
+<% if (permissions.length == 0) { %>
+<p class="warning">
+ This user does not have permission to access any virtual hosts.<br/>
+ Use "Set Permission" below to grant permission to access virtual hosts.
+</p>
+<% } %>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+<table class="facts">
+ <tr>
+ <th>Tags</th>
+ <td><%= fmt_string(user.tags) %></td>
+ </tr>
+ <tr>
+ <th>Can log in with password</th>
+ <td><%= fmt_boolean(user.password_hash.length > 0) %></td>
+ </tr>
+</table>
+ </div>
+</div>
+
+<%= format('permissions', {'mode': 'user', 'permissions': permissions, 'vhosts': vhosts, 'parent': user}) %>
+
+<%= format('topic-permissions', {'mode': 'user', 'topic_permissions': topic_permissions, 'vhosts': vhosts, 'parent': user, 'exchanges': exchanges}) %>
+
+<div class="section-hidden">
+ <h2>Update this user</h2>
+ <div class="hider">
+ <form action="#/users-modify" method="put">
+ <input type="hidden" name="username" value="<%= fmt_string(user.name) %>"/>
+ <table class="form">
+ <tr>
+ <th>
+ <label>
+ <select name="has-password" class="narrow controls-appearance">
+ <% if (user.password_hash.length > 0) { %>
+ <option value="password" selected="selected">Password:</option>
+ <option value="no-password">No password</option>
+ <% } else { %>
+ <option value="password">Password:</option>
+ <option value="no-password" selected="selected">No password</option>
+ <% } %>
+ </select>
+ </label>
+ </th>
+ <td>
+ <% if (user.password_hash.length > 0) { %>
+ <div id="password-div">
+ <% } else { %>
+ <div id="password-div" style="display: none;">
+ <% } %>
+ <input type="password" name="password" />
+ <span class="mand">*</span><br/>
+ <input type="password" name="password_confirm" />
+ <span class="mand">*</span>
+ (confirm)
+ </div>
+ <% if (user.password_hash.length > 0) { %>
+ <div id="no-password-div" style="display: none;">
+ <% } else { %>
+ <div id="no-password-div">
+ <% } %>
+ User cannot log in using password.
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Tags:</label></th>
+ <td>
+ <input type="text" name="tags" id="tags" value="<%= fmt_string(user.tags) %>" />
+ <span class="help" id="user-tags"/>
+ <sub>
+ [<span class="tag-link" tag="administrator">Admin</span>]
+ [<span class="tag-link" tag="monitoring">Monitoring</span>]
+ [<span class="tag-link" tag="policymaker">Policymaker</span>]
+ [<span class="tag-link" tag="management">Management</span>]
+ [<span class="tag-link" tag="impersonator">Impersonator</span>]
+ [<span class="tag-link" tag="">None</span>]
+ </sub>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Update user"/>
+ </form>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this user</h2>
+ <div class="hider">
+ <form action="#/users" method="delete" class="confirm">
+ <input type="hidden" name="username" value="<%= fmt_string(user.name) %>"/>
+ <input type="submit" value="Delete"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/users.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/users.ejs
new file mode 100644
index 0000000000..fb6ddc5f92
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/users.ejs
@@ -0,0 +1,98 @@
+<h1>Users</h1>
+<div class="section">
+ <h2>All users</h2>
+ <div class="hider">
+<%= filter_ui(users) %>
+ <div class="updatable">
+<% if (users.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th><%= fmt_sort('Tags', 'tags') %></th>
+ <th>Can access virtual hosts</th>
+ <th>Has password</th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < users.length; i++) {
+ var user = users[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_user(user.name) %></td>
+ <td class="c"><%= fmt_string(user.tags) %></td>
+ <td class="c"><%= fmt_permissions(user, permissions, 'user', 'vhost',
+ '<p class="warning">No access</p>') %></td>
+ <td class="c"><%= fmt_boolean(user.password_hash.length > 0) %></td>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no users ...</p>
+<% } %>
+ <p><span class="help" id="internal-users-only"></span></p>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a user</h2>
+ <div class="hider">
+ <form action="#/users-add" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Username:</label></th>
+ <td>
+ <input type="text" name="username"/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ <select name="has-password" class="narrow controls-appearance">
+ <option value="password">Password:</option>
+ <option value="no-password">No password</option>
+ </select>
+ </label>
+ </th>
+ <td>
+ <div id="password-div">
+ <input type="password" name="password" />
+ <span class="mand">*</span><br/>
+ <input type="password" name="password_confirm" />
+ <span class="mand">*</span>
+ (confirm)
+ </div>
+ <div id="no-password-div" style="display: none;">
+ User cannot log in using password.
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Tags:</label></th>
+ <td>
+ <input type="text" name="tags" id="tags" />
+ <span class="help" id="user-tags"/>
+ <table class="argument-links">
+ <tr>
+ <td>Set</td>
+ <td>
+ <span class="tag-link" tag="administrator">Admin</span> |
+ <span class="tag-link" tag="monitoring">Monitoring</span> |
+ <span class="tag-link" tag="policymaker">Policymaker</span><br />
+ <span class="tag-link" tag="management">Management</span> |
+ <span class="tag-link" tag="impersonator">Impersonator</span> |
+ <span class="tag-link" tag="">None</span>
+ </td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add user"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/vhost.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/vhost.ejs
new file mode 100644
index 0000000000..2639224954
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/vhost.ejs
@@ -0,0 +1,62 @@
+<h1>Virtual Host: <b><%= fmt_string(vhost.name) %></b></h1>
+
+<% if (permissions.length == 0) { %>
+<p class="warning">
+ No users have permission to access this virtual host.<br/>
+ Use "Set Permission" below to grant users permission to access this virtual host.
+</p>
+<% } %>
+
+<% if (!disable_stats) { %>
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider updatable">
+ <%= queue_lengths('lengths-vhost', vhost) %>
+<% if (rates_mode != 'none') { %>
+ <%= message_rates('msg-rates-vhost', vhost.message_stats) %>
+<% } %>
+ <%= data_rates('data-rates-vhost', vhost, 'Data rates') %>
+ <h3>Details</h3>
+ <table class="facts">
+ <tr>
+ <th>Tracing enabled:</th>
+ <td><%= fmt_boolean(vhost.tracing) %></td>
+ <tr>
+ <th>State:</th>
+ <td>
+ <table class="mini">
+ <% for (var node in vhost.cluster_state) { %>
+ <tr>
+ <th><%= fmt_escape_html(node) %> :</th>
+ <td><%= vhost.cluster_state[node] %>
+ <% if (vhost.cluster_state[node] == "stopped"){ %>
+ <form action="#/restart_vhost" method="post">
+ <input type="hidden" name="node" value="<%= node %>"/>
+ <input type="hidden" name="vhost" value="<%= vhost.name %>"/>
+ <input type="submit" value="Restart"/>
+ </form>
+ <% } %>
+ </td>
+ </tr>
+ <% } %>
+ </table>
+ </td>
+ </tr>
+ </table>
+</div>
+</div>
+<% } %>
+
+<%= format('permissions', {'mode': 'vhost', 'permissions': permissions, 'users': users, 'parent': vhost}) %>
+
+<%= format('topic-permissions', {'mode': 'vhost', 'topic_permissions': topic_permissions, 'users':users, 'parent': vhost, 'exchanges': exchanges}) %>
+
+<div class="section-hidden">
+<h2>Delete this vhost</h2>
+<div class="hider">
+<form action="#/vhosts" method="delete" class="confirm">
+<input type="hidden" name="name" value="<%= fmt_string(vhost.name) %>"/>
+<input type="submit" value="Delete this virtual host"/>
+</form>
+</div>
+</div>
diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/vhosts.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/vhosts.ejs
new file mode 100644
index 0000000000..ef1b7206f2
--- /dev/null
+++ b/deps/rabbitmq_management/priv/www/js/tmpl/vhosts.ejs
@@ -0,0 +1,163 @@
+<h1>Virtual Hosts</h1>
+
+<div class="section">
+ <h2>All virtual hosts</h2>
+ <div class="hider">
+<%= filter_ui(vhosts) %>
+ <div class="updatable">
+<% if (vhosts.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <%= group_heading('vhosts', 'Overview', [true, true, true]) %>
+ <% if (!disable_stats) { %>
+ <%= group_heading('vhosts', 'Messages', []) %>
+ <%= group_heading('vhosts', 'Network', []) %>
+<% if (rates_mode != 'none') { %>
+ <%= group_heading('vhosts', 'Message rates', []) %>
+<% } %>
+ <% } %>
+ <th class="plus-minus"><span class="popup-options-link" title="Click to change columns" type="columns" for="vhosts">+/-</span></th>
+ </tr>
+ <tr>
+ <th><%= fmt_sort('Name', 'name') %></th>
+ <th>Users <span class="help" id="internal-users-only"></span></th>
+ <th>State</th>
+<% if (show_column('vhosts', 'cluster-state')) { %>
+ <th>Cluster state</th>
+<% } %>
+<% if (show_column('vhosts', 'description')) { %>
+ <th>Description</th>
+<% } %>
+<% if (show_column('vhosts', 'tags')) { %>
+ <th>Tags</th>
+<% } %>
+<% if (!disable_stats) { %>
+<% if (show_column('vhosts', 'msgs-ready')) { %>
+ <th><%= fmt_sort('Ready', 'messages_ready') %></th>
+<% } %>
+<% if (show_column('vhosts', 'msgs-unacked')) { %>
+ <th><%= fmt_sort('Unacked', 'messages_unacknowledged') %></th>
+<% } %>
+<% if (show_column('vhosts', 'msgs-total')) { %>
+ <th><%= fmt_sort('Total', 'messages') %></th>
+<% } %>
+<% if (show_column('vhosts', 'from_client')) { %>
+ <th><%= fmt_sort('From client', 'recv_oct_details.rate') %></th>
+<% } %>
+<% if (show_column('vhosts', 'to_client')) { %>
+ <th><%= fmt_sort('To client', 'send_oct_details.rate') %></th>
+<% } %>
+<% if (rates_mode != 'none') { %>
+ <% if (show_column('vhosts', 'rate-publish')) { %>
+ <th><%= fmt_sort('publish', 'message_stats.publish_details.rate') %></th>
+ <% } %>
+ <% if (show_column('vhosts', 'rate-deliver')) { %>
+ <th><%= fmt_sort('deliver / get','message_stats.deliver_get_details.rate') %></th>
+ <% } %>
+<% } %>
+<% } %>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < vhosts.length; i++) {
+ var vhost = vhosts[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_vhost(vhost.name) %></td>
+ <td class="c"><%= fmt_permissions(vhost, permissions, 'vhost', 'user',
+ '<p class="warning">No users</p>') %></td>
+ <td><%= fmt_vhost_state(vhost) %></td>
+<% if (show_column('vhosts', 'cluster-state')) { %>
+ <td>
+ <table>
+ <tbody>
+ <%
+ for (var node in vhost.cluster_state) {
+ var state = vhost.cluster_state[node];
+ %>
+ <tr>
+ <td><%= node %></td>
+ <td>
+ <%= state %>
+ <% if (state == "stopped"){ %>
+ <form action="#/restart_vhost" method="post" class="confirm">
+ <input type="hidden" name="node" value="<%= node %>"/>
+ <input type="hidden" name="vhost" value="<%= vhost.name %>"/>
+ <input type="submit" value="Restart"/>
+ </form>
+ <% } %>
+ </td>
+ </tr>
+ <%
+ }
+ %>
+ </tbody>
+ </table>
+ </td>
+<% } %>
+<% if (show_column('vhosts', 'description')) { %>
+ <td class="r"><%= fmt_string(vhost.description) %></td>
+<% } %>
+<% if (show_column('vhosts', 'tags')) { %>
+ <td class="r"><%= fmt_string(vhost.tags) %></td>
+<% } %>
+<% if (!disable_stats) { %>
+<% if (show_column('vhosts', 'msgs-ready')) { %>
+ <td class="r"><%= fmt_num_thousands(vhost.messages_ready) %></td>
+<% } %>
+<% if (show_column('vhosts', 'msgs-unacked')) { %>
+ <td class="r"><%= fmt_num_thousands(vhost.messages_unacknowledged) %></td>
+<% } %>
+<% if (show_column('vhosts', 'msgs-total')) { %>
+ <td class="r"><%= fmt_num_thousands(vhost.messages) %></td>
+<% } %>
+<% if (show_column('vhosts', 'from_client')) { %>
+ <td><%= fmt_detail_rate_bytes(vhost, 'recv_oct') %></td>
+<% } %>
+<% if (show_column('vhosts', 'to_client')) { %>
+ <td><%= fmt_detail_rate_bytes(vhost, 'send_oct') %></td>
+<% } %>
+<% if (rates_mode != 'none') { %>
+ <% if (show_column('vhosts', 'rate-publish')) { %>
+ <td class="r"><%= fmt_detail_rate(vhost.message_stats, 'publish') %></td>
+ <% } %>
+ <% if (show_column('vhosts', 'rate-deliver')) { %>
+ <td class="r"><%= fmt_detail_rate(vhost.message_stats, 'deliver_get') %></td>
+ <% } %>
+<% } %>
+<% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no vhosts ...</p>
+<% } %>
+ </div>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new virtual host</h2>
+ <div class="hider">
+ <form action="#/vhosts" method="put">
+ <table class="form">
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Description:</label></th>
+ <td><input type="text" name="description"/></td>
+ </tr>
+ <tr>
+ <th><label>Tags:</label></th>
+ <td><input type="text" name="tags"/></td>
+ </tr>
+ </table>
+ <input type="submit" value="Add virtual host"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_management/rabbitmq-components.mk b/deps/rabbitmq_management/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_management/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_management/scripts/seed.sh b/deps/rabbitmq_management/scripts/seed.sh
new file mode 100755
index 0000000000..0b62d3d983
--- /dev/null
+++ b/deps/rabbitmq_management/scripts/seed.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env sh
+
+# use admin context to add user and client
+uaac token client get admin -s adminsecret
+
+uaac user add rabbit_user -p rabbit_password --email rabbit_user@example.com
+
+# these groups will end up in the scope of the users
+uaac group add "rabbitmq.read:*/*"
+uaac group add "rabbitmq.write:*/*"
+uaac group add "rabbitmq.configure:*/*"
+uaac group add "rabbitmq.tag:management"
+uaac group add "rabbitmq.tag:administrator"
+
+uaac member add "rabbitmq.read:*/*" rabbit_user
+uaac member add "rabbitmq.write:*/*" rabbit_user
+uaac member add "rabbitmq.configure:*/*" rabbit_user
+uaac member add "rabbitmq.tag:management" rabbit_user
+uaac member add "rabbitmq.tag:administrator" rabbit_user
+
+# add the client for the management plugin. It has the implicit grant type.
+# add e.g. --access_token_validity 60 --refresh_token_validity 3600 to experiment with token validity
+uaac client add rabbit_user_client \
+ --name rabbit_user_client \
+ --secret '' \
+ --scope 'rabbitmq.* openid' \
+ --authorized_grant_types implicit \
+ --autoapprove true \
+ --redirect_uri 'http://localhost:15672/**'
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_app.erl b/deps/rabbitmq_management/src/rabbit_mgmt_app.erl
new file mode 100644
index 0000000000..256ae56cc6
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_app.erl
@@ -0,0 +1,206 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_app).
+
+-behaviour(application).
+-export([start/2, stop/1, reset_dispatcher/1]).
+
+-ifdef(TEST).
+-export([get_listeners_config/0]).
+-endif.
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(TCP_CONTEXT, rabbitmq_management_tcp).
+-define(TLS_CONTEXT, rabbitmq_management_tls).
+-define(DEFAULT_PORT, 15672).
+-define(DEFAULT_TLS_PORT, 15671).
+
+-rabbit_boot_step({rabbit_management_load_definitions,
+ [{description, "Imports definition file at management.load_definitions"},
+ {mfa, {rabbit_mgmt_load_definitions, boot, []}},
+ {enables, empty_db_check}]}).
+
+start(_Type, _StartArgs) ->
+ case application:get_env(rabbitmq_management_agent, disable_metrics_collector, false) of
+ false ->
+ start();
+ true ->
+ rabbit_log:warning("Metrics collection disabled in management agent, "
+ "management only interface started", []),
+ start()
+ end.
+
+stop(_State) ->
+ unregister_all_contexts(),
+ ok.
+
+%% At the point at which this is invoked we have both newly enabled
+%% apps and about-to-disable apps running (so that
+%% rabbit_mgmt_reset_handler can look at all of them to find
+%% extensions). Therefore we have to explicitly exclude
+%% about-to-disable apps from our new dispatcher.
+reset_dispatcher(IgnoreApps) ->
+ unregister_all_contexts(),
+ start_configured_listeners(IgnoreApps, false).
+
+-spec start_configured_listeners([atom()], boolean()) -> ok.
+start_configured_listeners(IgnoreApps, NeedLogStartup) ->
+ [start_listener(Listener, IgnoreApps, NeedLogStartup)
+ || Listener <- get_listeners_config()],
+ ok.
+
+get_listeners_config() ->
+ Listeners = case {has_configured_legacy_listener(),
+ has_configured_tcp_listener(),
+ has_configured_tls_listener()} of
+ {false, false, false} ->
+ %% nothing is configured
+ [get_tcp_listener()];
+ {false, false, true} ->
+ [get_tls_listener()];
+ {false, true, false} ->
+ [get_tcp_listener()];
+ {false, true, true} ->
+ [get_tcp_listener(),
+ get_tls_listener()];
+ {true, false, false} ->
+ [get_legacy_listener()];
+ {true, false, true} ->
+ [get_legacy_listener(),
+ get_tls_listener()];
+ {true, true, false} ->
+ %% This combination makes some sense:
+ %% legacy listener can be used to set up TLS :/
+ [get_legacy_listener(),
+ get_tcp_listener()];
+ {true, true, true} ->
+ %% what is happening?
+ rabbit_log:warning("Management plugin: TCP, TLS and a legacy (management.listener.*) listener are all configured. "
+ "Only two listeners at a time are supported. "
+ "Ignoring the legacy listener"),
+ [get_tcp_listener(),
+ get_tls_listener()]
+ end,
+ maybe_disable_sendfile(Listeners).
+
+maybe_disable_sendfile(Listeners) ->
+ DisableSendfile = #{sendfile => false},
+ F = fun(L0) ->
+ CowboyOptsL0 = proplists:get_value(cowboy_opts, L0, []),
+ CowboyOptsM0 = maps:from_list(CowboyOptsL0),
+ CowboyOptsM1 = maps:merge(DisableSendfile, CowboyOptsM0),
+ CowboyOptsL1 = maps:to_list(CowboyOptsM1),
+ L1 = lists:keydelete(cowboy_opts, 1, L0),
+ [{cowboy_opts, CowboyOptsL1}|L1]
+ end,
+ lists:map(F, Listeners).
+
+has_configured_legacy_listener() ->
+ has_configured_listener(listener).
+
+has_configured_tcp_listener() ->
+ has_configured_listener(tcp_config).
+
+has_configured_tls_listener() ->
+ has_configured_listener(ssl_config).
+
+has_configured_listener(Key) ->
+ case application:get_env(rabbitmq_management, Key, undefined) of
+ undefined -> false;
+ _ -> true
+ end.
+
+get_legacy_listener() ->
+ {ok, Listener0} = application:get_env(rabbitmq_management, listener),
+ {ok, Listener1} = ensure_port(tcp, Listener0),
+ Listener1.
+
+get_tls_listener() ->
+ {ok, Listener0} = application:get_env(rabbitmq_management, ssl_config),
+ {ok, Listener1} = ensure_port(tls, Listener0),
+ Port = proplists:get_value(port, Listener1),
+ case proplists:get_value(cowboy_opts, Listener0) of
+ undefined ->
+ [
+ {port, Port},
+ {ssl, true},
+ {ssl_opts, Listener0}
+ ];
+ CowboyOpts ->
+ Listener1 = lists:keydelete(cowboy_opts, 1, Listener0),
+ [
+ {port, Port},
+ {ssl, true},
+ {ssl_opts, Listener1},
+ {cowboy_opts, CowboyOpts}
+ ]
+ end.
+
+get_tcp_listener() ->
+ Listener0 = application:get_env(rabbitmq_management, tcp_config, []),
+ {ok, Listener1} = ensure_port(tcp, Listener0),
+ Listener1.
+
+start_listener(Listener, IgnoreApps, NeedLogStartup) ->
+ {Type, ContextName} = case is_tls(Listener) of
+ true -> {tls, ?TLS_CONTEXT};
+ false -> {tcp, ?TCP_CONTEXT}
+ end,
+ {ok, _} = register_context(ContextName, Listener, IgnoreApps),
+ case NeedLogStartup of
+ true -> log_startup(Type, Listener);
+ false -> ok
+ end,
+ ok.
+
+register_context(ContextName, Listener, IgnoreApps) ->
+ Dispatcher = rabbit_mgmt_dispatcher:build_dispatcher(IgnoreApps),
+ rabbit_web_dispatch:register_context_handler(
+ ContextName, Listener, "",
+ Dispatcher, "RabbitMQ Management").
+
+unregister_all_contexts() ->
+ rabbit_web_dispatch:unregister_context(?TCP_CONTEXT),
+ rabbit_web_dispatch:unregister_context(?TLS_CONTEXT).
+
+ensure_port(tls, Listener) ->
+ do_ensure_port(?DEFAULT_TLS_PORT, Listener);
+ensure_port(tcp, Listener) ->
+ do_ensure_port(?DEFAULT_PORT, Listener).
+
+do_ensure_port(Port, Listener) ->
+ %% include default port if it's not provided in the config
+ %% as Cowboy won't start if the port is missing
+ M0 = maps:from_list(Listener),
+ M1 = maps:merge(#{port => Port}, M0),
+ {ok, maps:to_list(M1)}.
+
+log_startup(tcp, Listener) ->
+ rabbit_log:info("Management plugin: HTTP (non-TLS) listener started on port ~w", [port(Listener)]);
+log_startup(tls, Listener) ->
+ rabbit_log:info("Management plugin: HTTPS listener started on port ~w", [port(Listener)]).
+
+
+port(Listener) ->
+ proplists:get_value(port, Listener, ?DEFAULT_PORT).
+
+is_tls(Listener) ->
+ case proplists:get_value(ssl, Listener) of
+ undefined -> false;
+ false -> false;
+ _ -> true
+ end.
+
+start() ->
+ %% Modern TCP listener uses management.tcp.*.
+ %% Legacy TCP (or TLS) listener uses management.listener.*.
+ %% Modern TLS listener uses management.ssl.*
+ start_configured_listeners([], true),
+ rabbit_mgmt_sup_sup:start_link().
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl b/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl
new file mode 100644
index 0000000000..3871c1f8e2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Useful documentation about CORS:
+%% * https://tools.ietf.org/html/rfc6454
+%% * https://www.w3.org/TR/cors/
+%% * https://staticapps.org/articles/cross-domain-requests-with-cors/
+-module(rabbit_mgmt_cors).
+
+-export([set_headers/2]).
+
+set_headers(ReqData, Module) ->
+ %% Send vary: origin by default if nothing else was set.
+ ReqData1 = cowboy_req:set_resp_header(<<"vary">>, <<"origin">>, ReqData),
+ case match_origin(ReqData1) of
+ false ->
+ ReqData1;
+ Origin ->
+ ReqData2 = case cowboy_req:method(ReqData1) of
+ <<"OPTIONS">> -> handle_options(ReqData1, Module);
+ _ -> ReqData1
+ end,
+ ReqData3 = cowboy_req:set_resp_header(<<"access-control-allow-origin">>,
+ Origin,
+ ReqData2),
+ cowboy_req:set_resp_header(<<"access-control-allow-credentials">>,
+ "true",
+ ReqData3)
+ end.
+
+%% Set max-age from configuration (default: 30 minutes).
+%% Set allow-methods from what is defined in Module:allowed_methods/2.
+%% Set allow-headers to the same as the request (accept all headers).
+handle_options(ReqData0, Module) ->
+ MaxAge = application:get_env(rabbitmq_management, cors_max_age, 1800),
+ Methods = case erlang:function_exported(Module, allowed_methods, 2) of
+ false -> [<<"HEAD">>, <<"GET">>, <<"OPTIONS">>];
+ true -> element(1, Module:allowed_methods(undefined, undefined))
+ end,
+ AllowMethods = string:join([binary_to_list(M) || M <- Methods], ", "),
+ ReqHeaders = cowboy_req:header(<<"access-control-request-headers">>, ReqData0),
+
+ ReqData1 = case MaxAge of
+ undefined -> ReqData0;
+ _ -> cowboy_req:set_resp_header(<<"access-control-max-age">>,
+ integer_to_list(MaxAge),
+ ReqData0)
+ end,
+ ReqData2 = case ReqHeaders of
+ undefined -> ReqData1;
+ _ -> cowboy_req:set_resp_header(<<"access-control-allow-headers">>,
+ ReqHeaders,
+ ReqData0)
+ end,
+ cowboy_req:set_resp_header(<<"access-control-allow-methods">>,
+ AllowMethods,
+ ReqData2).
+
+%% If the origin header is missing or "null", we disable CORS.
+%% Otherwise, we only enable it if the origin is found in the
+%% cors_allow_origins configuration variable, or if "*" is (it
+%% allows all origins).
+match_origin(ReqData) ->
+ case cowboy_req:header(<<"origin">>, ReqData) of
+ undefined -> false;
+ <<"null">> -> false;
+ Origin ->
+ AllowedOrigins = application:get_env(rabbitmq_management,
+ cors_allow_origins, []),
+ case lists:member(binary_to_list(Origin), AllowedOrigins) of
+ true ->
+ Origin;
+ false ->
+ %% Maybe the configuration explicitly allows "*".
+ case lists:member("*", AllowedOrigins) of
+ true -> Origin;
+ false -> false
+ end
+ end
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl b/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl
new file mode 100644
index 0000000000..80bba64fc2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_csp.erl
@@ -0,0 +1,28 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Sets CSP header(s) on the response if configured,
+%% see https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP.
+
+-module(rabbit_mgmt_csp).
+
+-export([set_headers/1]).
+
+-define(CSP_HEADER, <<"content-security-policy">>).
+
+%%
+%% API
+%%
+
+set_headers(ReqData) ->
+ case application:get_env(rabbitmq_management, content_security_policy) of
+ undefined -> ReqData;
+ {ok, Value} ->
+ cowboy_req:set_resp_header(?CSP_HEADER,
+ rabbit_data_coercion:to_binary(Value),
+ ReqData)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_db.erl b/deps/rabbitmq_management/src/rabbit_mgmt_db.erl
new file mode 100644
index 0000000000..c45e7b86f6
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_db.erl
@@ -0,0 +1,799 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_db).
+
+%% pg2 is deprecated in OTP 23.
+-compile(nowarn_deprecated_function).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/0]).
+
+-export([augment_exchanges/3, augment_queues/3,
+ augment_nodes/2, augment_vhosts/2,
+ get_channel/2, get_connection/2,
+ get_all_channels/1, get_all_connections/1,
+ get_all_consumers/0, get_all_consumers/1,
+ get_overview/2, get_overview/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, handle_pre_hibernate/1,
+ format_message_queue/2]).
+
+-import(rabbit_misc, [pget/3]).
+
+-type maybe_slide() :: exometer_slide:slide() | not_found.
+-type slide_data() :: #{atom() => {maybe_slide(), maybe_slide()}}.
+-type maybe_range() :: rabbit_mgmt_stats:maybe_range().
+-type ranges() :: {maybe_range(), maybe_range(), maybe_range(), maybe_range()}.
+-type mfargs() :: {module(), atom(), [any()]}.
+-type lookup_key() :: atom() | {atom(), any()}.
+
+-define(NO_RANGES, {no_range, no_range, no_range, no_range}).
+
+-define(DEFAULT_TIMEOUT, 30000).
+
+%% The management database responds to queries from the various
+%% rabbit_mgmt_wm_* modules. It calls out to all rabbit nodes to fetch
+%% node-local data and aggregates it before returning it. It uses a worker-
+%% pool to provide a degree of parallelism.
+%%
+%% The management database reads metrics and stats written by the
+%% rabbit_mgmt_metrics_collector(s).
+%%
+%% The metrics collectors (there is one for each stats table - see ?TABLES
+%% in rabbit_mgmt_metrics.hrl) periodically read their corresponding core
+%% metrics ETS tables and aggregate the data into the management specific ETS
+%% tables.
+%%
+%% The metric collectors consume two types of core metrics: created (when an
+%% object is created, containing immutable facts about it) and stats (emitted on
+%% a timer, with mutable facts about the object). Deleted events are handled
+%% by the rabbit_mgmt_metrics_gc process.
+%% In this context "objects" means connections, channels, exchanges, queues,
+%% consumers, vhosts and nodes. Note that we do not care about users,
+%% permissions, bindings, parameters or policies.
+%%
+%% Connections and channels are identified by pids. Queues and
+%% exchanges are identified by names (which are #resource{}s). VHosts
+%% and nodes are identified by names which are binaries. And consumers
+%% are identified by {ChPid, QName, CTag}.
+%%
+%% The management collectors records the "created" metrics for
+%% connections, channels and consumers, and can thus be authoritative
+%% about those objects. For queues, exchanges and nodes we go to
+%% Mnesia to find out the immutable details of the objects.
+%%
+%% For everything other than consumers, the collectors can then augment
+%% these immutable details with stats, as the object changes. (We
+%% never emit anything very interesting about consumers).
+%%
+%% Stats on the inbound side are referred to as coarse and
+%% fine-grained. Fine grained statistics are the message rates
+%% maintained by channels and associated with tuples: {publishing
+%% channel, exchange}, {publishing channel, exchange, queue} and
+%% {queue, consuming channel}. Coarse grained stats are everything
+%% else and are associated with only one object, not a tuple.
+%%
+%% Within the management database though we rearrange things a bit: we
+%% refer to basic stats, simple stats and detail stats.
+%%
+%% Basic stats are those coarse grained stats for which we do not
+%% retain a history and do not perform any calculations -
+%% e.g. connection.state or channel.prefetch_count.
+%%
+%% Simple stats are those for which we do history / calculations which
+%% are associated with one object *after aggregation* - so these might
+%% originate with coarse grained stats - e.g. connection.send_oct or
+%% queue.messages_ready. But they might also originate from fine
+%% grained stats which have been aggregated - e.g. the message rates
+%% for a vhost or queue.
+%%
+%% Finally, detailed stats are those for which we do history /
+%% calculations which are associated with two objects. These
+%% have to have originated as fine grained stats, but can still have
+%% been aggregated.
+%%
+%% Created metrics and basic stats are stored in ETS tables by object.
+%% Simple and detailed stats (which only differ depending on how
+%% they're keyed) are stored in aggregated stats tables
+%% (see rabbit_mgmt_stats.erl and include/rabbit_mgmt_metrics.hrl)
+%%
+%% Keys from simple and detailed stats are aggregated in several
+%% records, stored in different ETS tables. We store a base counter
+%% for everything that happened before the samples we have kept,
+%% and a series of records which add the timestamp as part of the key.
+%%
+%% There is also a GC process to handle the deleted/closed
+%% rabbit events to remove the corresponding objects from the aggregated
+%% stats ETS tables.
+%%
+%% We also have an old_aggr_stats table to let us calculate instantaneous
+%% rates, in order to apportion simple / detailed stats into time
+%% slices as they come in. These instantaneous rates are not returned
+%% in response to any query, the rates shown in the API are calculated
+%% at query time. old_aggr_stats contains both coarse and fine
+%% entries. Coarse entries are pruned when the corresponding object is
+%% deleted, and fine entries are pruned when the emitting channel is
+%% closed, and whenever we receive new fine stats from a channel. So
+%% it's quite close to being a cache of "the previous stats we
+%% received".
+%%
+%% Overall the object is to do some aggregation when metrics are read
+%% and only aggregate metrics between nodes at query time.
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server2:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+augment_exchanges(Xs, Ranges, basic) ->
+ submit(fun(Interval) -> list_exchange_stats(Ranges, Xs, Interval) end);
+augment_exchanges(Xs, Ranges, _) ->
+ submit(fun(Interval) -> detail_exchange_stats(Ranges, Xs, Interval) end).
+
+%% we can only cache if no ranges are requested.
+%% The mgmt ui doesn't use ranges for queue listings
+-spec augment_queues([proplists:proplist()], ranges(), basic | full) -> any().
+augment_queues(Qs, ?NO_RANGES = Ranges, basic) ->
+ submit_cached(queues,
+ fun(Interval, Queues) ->
+ list_queue_stats(Ranges, Queues, Interval)
+ end, Qs, max(60000, length(Qs) * 2));
+augment_queues(Qs, Ranges, basic) ->
+ submit(fun(Interval) -> list_queue_stats(Ranges, Qs, Interval) end);
+augment_queues(Qs, Ranges, _) ->
+ submit(fun(Interval) -> detail_queue_stats(Ranges, Qs, Interval) end).
+
+augment_vhosts(VHosts, Ranges) ->
+ submit(fun(Interval) -> vhost_stats(Ranges, VHosts, Interval) end).
+
+augment_nodes(Nodes, Ranges) ->
+ submit(fun(Interval) -> node_stats(Ranges, Nodes, Interval) end).
+
+get_channel(Name, Ranges) ->
+ submit(fun(Interval) ->
+ case created_stats_delegated(Name, channel_created_stats) of
+ not_found -> not_found;
+ Ch -> [Result] =
+ detail_channel_stats(Ranges, [Ch], Interval),
+ Result
+ end
+ end).
+
+get_connection(Name, Ranges) ->
+ submit(fun(Interval) ->
+ case created_stats_delegated(Name, connection_created_stats) of
+ not_found -> not_found;
+ C ->
+ [Result] = connection_stats(Ranges, [C], Interval),
+ Result
+ end
+ end).
+
+get_all_channels(?NO_RANGES = Ranges) ->
+ submit_cached(channels,
+ fun(Interval) ->
+ Chans = created_stats_delegated(channel_created_stats),
+ list_channel_stats(Ranges, Chans, Interval)
+ end);
+get_all_channels(Ranges) ->
+ submit(fun(Interval) ->
+ Chans = created_stats_delegated(channel_created_stats),
+ list_channel_stats(Ranges, Chans, Interval)
+ end).
+
+get_all_connections(?NO_RANGES = Ranges) ->
+ submit_cached(connections,
+ fun(Interval) ->
+ Chans = created_stats_delegated(connection_created_stats),
+ connection_stats(Ranges, Chans, Interval)
+ end);
+get_all_connections(Ranges) ->
+ submit(fun(Interval) ->
+ Chans = created_stats_delegated(connection_created_stats),
+ connection_stats(Ranges, Chans, Interval)
+ end).
+
+get_all_consumers() -> get_all_consumers(all).
+get_all_consumers(VHosts) ->
+ submit(fun(_Interval) -> consumers_stats(VHosts) end).
+
+get_overview(Ranges) -> get_overview(all, Ranges).
+get_overview(User, Ranges) ->
+ submit(fun(Interval) -> overview(User, Ranges, Interval) end).
+
+%%----------------------------------------------------------------------------
+%% Internal, gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+-record(state, {interval}).
+
+init([]) ->
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ rabbit_log:info("Statistics database started."),
+ {ok, #state{interval = Interval}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(_Request, _From, State) ->
+ reply(not_understood, State).
+
+handle_cast(_Request, State) ->
+ noreply(State).
+
+handle_info(_Info, State) ->
+ noreply(State).
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
+noreply(NewState) -> {noreply, NewState, hibernate}.
+
+handle_pre_hibernate(State) ->
+ %% rabbit_event can end up holding on to some memory after a busy
+ %% workout, but it's not a gen_server so we can't make it
+ %% hibernate. The best we can do is forcibly GC it here (if
+ %% rabbit_mgmt_db is hibernating the odds are rabbit_event is
+ %% quiescing in some way too).
+ _ = rpc:multicall(
+ rabbit_nodes:all_running(), rabbit_mgmt_db_handler, gc, []),
+ {hibernate, State}.
+
+format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+%%----------------------------------------------------------------------------
+%% Internal, utilities
+%%----------------------------------------------------------------------------
+
+pget(Key, List) -> pget(Key, List, unknown).
+
+-type id_name() :: 'name' | 'route' | 'pid'.
+-spec id_name(atom()) -> id_name().
+
+%% id_name() and id() are for use when handling events, id_lookup()
+%% for when augmenting. The difference is that when handling events a
+%% queue name will be a resource, but when augmenting we will be
+%% passed a queue proplist that will already have been formatted -
+%% i.e. it will have name and vhost keys.
+id_name(node_stats) -> name;
+id_name(node_node_stats) -> route;
+id_name(vhost_stats) -> name;
+id_name(queue_stats) -> name;
+id_name(exchange_stats) -> name;
+id_name(channel_stats) -> pid;
+id_name(connection_stats) -> pid.
+
+id(Type, List) -> pget(id_name(Type), List).
+
+id_lookup(queue_stats, List) ->
+ rabbit_misc:r(pget(vhost, List), queue, pget(name, List));
+id_lookup(exchange_stats, List) ->
+ rabbit_misc:r(pget(vhost, List), exchange, pget(name, List));
+id_lookup(Type, List) ->
+ id(Type, List).
+
+
+%%----------------------------------------------------------------------------
+%% Internal, querying side api
+%%----------------------------------------------------------------------------
+
+overview(User, Ranges, Interval) ->
+ VHosts = case User of
+ all -> rabbit_vhost:list_names();
+ _ -> rabbit_mgmt_util:list_visible_vhosts_names(User)
+ end,
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, overview_data,
+ [User, Ranges, VHosts]}),
+ MessageStats = lists:append(
+ [format_range(DataLookup, vhost_stats_fine_stats,
+ pick_range(fine_stats, Ranges), Interval),
+ format_range(DataLookup, vhost_msg_rates,
+ pick_range(queue_msg_rates, Ranges), Interval),
+ format_range(DataLookup, vhost_stats_deliver_stats,
+ pick_range(deliver_get, Ranges), Interval)]),
+
+ ChurnRates = format_range(DataLookup, connection_churn_rates,
+ pick_range(queue_msg_counts, Ranges), Interval),
+ QueueStats = format_range(DataLookup, vhost_msg_stats,
+ pick_range(queue_msg_counts, Ranges), Interval),
+ %% Filtering out the user's consumers would be rather expensive so let's
+ %% just not show it
+ Consumers = case User of
+ all -> [{consumers, maps:get(consumers_count, DataLookup)}];
+ _ -> []
+ end,
+ ObjectTotals = Consumers ++
+ [{queues, length([Q || V <- VHosts, Q <- rabbit_amqqueue:list(V)])},
+ {exchanges, length([X || V <- VHosts, X <- rabbit_exchange:list(V)])},
+ {connections, maps:get(connections_count, DataLookup)},
+ {channels, maps:get(channels_count, DataLookup)}],
+
+ [{message_stats, MessageStats},
+ {churn_rates, ChurnRates},
+ {queue_totals, QueueStats},
+ {object_totals, ObjectTotals},
+ {statistics_db_event_queue, event_queue()}]. % TODO: event queue?
+
+event_queue() ->
+ lists:foldl(fun ({T, _}, Sum) ->
+ case whereis(rabbit_mgmt_metrics_collector:name(T)) of
+ P when is_pid(P) ->
+ {message_queue_len, Len} =
+ erlang:process_info(P, message_queue_len),
+ Sum + Len;
+ _ -> Sum
+ end
+ end, 0, ?CORE_TABLES).
+
+consumers_stats(VHost) ->
+ Data = get_data_from_nodes({rabbit_mgmt_data, consumer_data, [VHost]}),
+ Consumers = rabbit_mgmt_data_compat:fill_consumer_active_fields(
+ [V || {_,V} <- maps:to_list(Data)]),
+ ChPids = [ pget(channel_pid, Con)
+ || Con <- Consumers, [] =:= pget(channel_details, Con)],
+ ChDets = get_channel_detail_lookup(ChPids),
+ [merge_channel_into_obj(Con, ChDets) || Con <- Consumers].
+
+-spec list_queue_stats(ranges(), [proplists:proplist()], integer()) ->
+ [proplists:proplist()].
+list_queue_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(queue_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_list_queue_data, [Ids, Ranges]}),
+ adjust_hibernated_memory_use(
+ [begin
+ Id = id_lookup(queue_stats, Obj),
+ Pid = pget(pid, Obj),
+ QueueData = maps:get(Id, DataLookup),
+ Props = maps:get(queue_stats, QueueData),
+ Stats = queue_stats(QueueData, Ranges, Interval),
+ {Pid, combine(Props, Obj) ++ Stats}
+ end || Obj <- Objs]).
+
+detail_queue_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(queue_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_detail_queue_data,
+ [Ids, Ranges]}),
+
+ QueueStats = adjust_hibernated_memory_use(
+ [begin
+ Id = id_lookup(queue_stats, Obj),
+ Pid = pget(pid, Obj),
+ QueueData = maps:get(Id, DataLookup),
+ Props = maps:get(queue_stats, QueueData),
+ Stats = queue_stats(QueueData, Ranges, Interval),
+ ConsumerStats = rabbit_mgmt_data_compat:fill_consumer_active_fields(
+ maps:get(consumer_stats, QueueData)),
+ Consumers = [{consumer_details, ConsumerStats}],
+ StatsD = [{deliveries,
+ detail_stats(QueueData, channel_queue_stats_deliver_stats,
+ deliver_get, second(Id), Ranges, Interval)},
+ {incoming,
+ detail_stats(QueueData, queue_exchange_stats_publish,
+ fine_stats, first(Id), Ranges, Interval)}],
+ Details = augment_details(Obj, []),
+ {Pid, combine(Props, Obj) ++ Stats ++ StatsD ++ Consumers ++ Details}
+ end || Obj <- Objs]),
+
+ % patch up missing channel details
+ ChPids = lists:usort(get_pids_for_missing_channel_details(QueueStats)),
+ ChDets = get_channel_detail_lookup(ChPids),
+ Merged = merge_channel_details(QueueStats, ChDets),
+ Merged.
+
+node_node_stats(Lookup, Node, Ranges, Interval) ->
+ LocalNodeNodeMetrics = maps:from_list(ets:tab2list(node_node_metrics)),
+ RemoteNodeNodeMetrics = maps:get(node_node_metrics, Lookup),
+ NodeNodeMetrics = maps:merge(LocalNodeNodeMetrics, RemoteNodeNodeMetrics),
+ node_node_stats(Lookup, Node, Ranges, Interval, NodeNodeMetrics).
+
+node_node_stats(Lookup, Node, Ranges, Interval, NodeNodeMetrics) ->
+ Table = node_node_coarse_stats,
+ Type = coarse_node_node_stats,
+ [begin
+ {Stats, DetailId} = get_detail_stats(Key, Lookup, Table, Type,
+ first(Node), Ranges, Interval),
+ NodeMetrics = maybe_fetch_value(Key, NodeNodeMetrics),
+ lists:flatten([{stats, Stats}, DetailId, NodeMetrics])
+ end || {{T, Key}, _} <- maps:to_list(Lookup), T =:= Table].
+
+detail_stats(Lookup, Table, Type, Id, Ranges, Interval) ->
+ [begin
+ {Stats, DetailId} = get_detail_stats(Key, Lookup, Table, Type,
+ Id, Ranges, Interval),
+ [{stats, Stats}|DetailId] %TODO: not actually delegated
+ end || {{T, Key}, _} <- maps:to_list(Lookup), T =:= Table].
+
+get_detail_stats(Key, Lookup, Table, Type, Id, Ranges, Interval) ->
+ Range = pick_range(Type, Ranges),
+ Stats = format_range(Lookup, {Table, Key}, Range, Interval),
+ DetailId = format_detail_id(revert(Id, Key)),
+ {Stats, DetailId}.
+
+queue_stats(QueueData, Ranges, Interval) ->
+ message_stats(format_range(QueueData, queue_stats_publish,
+ pick_range(fine_stats, Ranges), Interval) ++
+ format_range(QueueData, queue_stats_deliver_stats,
+ pick_range(deliver_get, Ranges), Interval)) ++
+ format_range(QueueData, queue_process_stats,
+ pick_range(process_stats, Ranges), Interval) ++
+ format_range(QueueData, queue_msg_stats,
+ pick_range(queue_msg_counts, Ranges), Interval).
+
+channel_stats(ChannelData, Ranges, Interval) ->
+ message_stats(format_range(ChannelData, channel_stats_fine_stats,
+ pick_range(fine_stats, Ranges), Interval) ++
+ format_range(ChannelData, channel_stats_deliver_stats,
+ pick_range(deliver_get, Ranges), Interval)) ++
+ format_range(ChannelData, channel_process_stats,
+ pick_range(process_stats, Ranges), Interval).
+
+-spec format_range(slide_data(), lookup_key(), maybe_range(), non_neg_integer()) ->
+ proplists:proplist().
+format_range(Data, Key, Range0, Interval) ->
+ Table = case Key of
+ {T, _} -> T;
+ T -> T
+ end,
+ InstantRateFun = fun() -> fetch_slides(1, Key, Data) end,
+ SamplesFun = fun() -> fetch_slides(2, Key, Data) end,
+ Now = exometer_slide:timestamp(),
+ rabbit_mgmt_stats:format_range(Range0, Now, Table, Interval, InstantRateFun,
+ SamplesFun).
+
+%% basic.get-empty metric
+fetch_slides(Ele, Key, Data)
+ when Key =:= channel_queue_stats_deliver_stats orelse
+ Key =:= channel_stats_deliver_stats orelse
+ Key =:= queue_stats_deliver_stats orelse
+ Key =:= vhost_stats_deliver_stats orelse
+ (is_tuple(Key) andalso
+ (element(1, Key) =:= channel_queue_stats_deliver_stats orelse
+ element(1, Key) =:= channel_stats_deliver_stats orelse
+ element(1, Key) =:= queue_stats_deliver_stats orelse
+ element(1, Key) =:= vhost_stats_deliver_stats)) ->
+ case element(Ele, maps:get(Key, Data)) of
+ not_found -> [];
+ Slides when is_list(Slides) ->
+ [rabbit_mgmt_data_compat:fill_get_empty_queue_metric(S)
+ || S <- Slides, not_found =/= S];
+ Slide ->
+ [rabbit_mgmt_data_compat:fill_get_empty_queue_metric(Slide)]
+ end;
+%% drop_unroutable metric
+fetch_slides(Ele, Key, Data)
+ when Key =:= channel_stats_fine_stats orelse
+ Key =:= channel_exchange_stats_fine_stats orelse
+ Key =:= vhost_stats_fine_stats orelse
+ (is_tuple(Key) andalso
+ (element(1, Key) =:= channel_stats_fine_stats orelse
+ element(1, Key) =:= channel_exchange_stats_fine_stats orelse
+ element(1, Key) =:= vhost_stats_fine_stats)) ->
+ case element(Ele, maps:get(Key, Data)) of
+ not_found -> [];
+ Slides when is_list(Slides) ->
+ [rabbit_mgmt_data_compat:fill_drop_unroutable_metric(S)
+ || S <- Slides, not_found =/= S];
+ Slide ->
+ [rabbit_mgmt_data_compat:fill_drop_unroutable_metric(Slide)]
+ end;
+fetch_slides(Ele, Key, Data) ->
+ case element(Ele, maps:get(Key, Data)) of
+ not_found -> [];
+ Slides when is_list(Slides) ->
+ [S || S <- Slides, not_found =/= S];
+ Slide ->
+ [Slide]
+ end.
+
+get_channel_detail_lookup(ChPids) ->
+ ChDets = delegate_invoke({rabbit_mgmt_data, augment_channel_pids, [ChPids]}),
+ maps:from_list([{pget(pid, C), C} || [_|_] = C <- lists:append(ChDets)]).
+
+merge_channel_details(QueueStats, Lookup) ->
+ [begin
+ Cons = pget(consumer_details, QueueStat),
+ Cons1 = [merge_channel_into_obj(Con, Lookup) || Con <- Cons],
+ rabbit_misc:pset(consumer_details, Cons1, QueueStat)
+ end || QueueStat <- QueueStats].
+
+merge_channel_into_obj(Obj, ChDet) ->
+ case pget(channel_details, Obj) of
+ [] -> case maps:find(pget(channel_pid, Obj), ChDet) of
+ {ok, CHd} ->
+ rabbit_misc:pset(channel_details, CHd, Obj);
+ error ->
+ Obj
+ end;
+ _ -> Obj
+ end.
+
+get_pids_for_missing_channel_details(QueueStats) ->
+ CDs = lists:append([pget(consumer_details, QueueStat) || QueueStat <- QueueStats]),
+ [ pget(channel_pid, CD) || CD <- CDs, [] =:= pget(channel_details, CD)].
+
+
+list_exchange_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(exchange_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_exchange_data, [Ids, Ranges]}),
+ [begin
+ Id = id_lookup(exchange_stats, Obj),
+ ExData = maps:get(Id, DataLookup),
+ Stats = message_stats(format_range(ExData, exchange_stats_publish_out,
+ pick_range(fine_stats, Ranges), Interval) ++
+ format_range(ExData, exchange_stats_publish_in,
+ pick_range(deliver_get, Ranges), Interval)),
+ Obj ++ Stats
+ end || Obj <- Objs].
+
+detail_exchange_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(exchange_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_exchange_data, [Ids, Ranges]}),
+ [begin
+ Id = id_lookup(exchange_stats, Obj),
+ ExData = maps:get(Id, DataLookup),
+ Stats = message_stats(format_range(ExData, exchange_stats_publish_out,
+ pick_range(fine_stats, Ranges), Interval) ++
+ format_range(ExData, exchange_stats_publish_in,
+ pick_range(deliver_get, Ranges), Interval)),
+ StatsD = [{incoming,
+ detail_stats(ExData, channel_exchange_stats_fine_stats,
+ fine_stats, second(Id), Ranges, Interval)},
+ {outgoing,
+ detail_stats(ExData, queue_exchange_stats_publish,
+ fine_stats, second(Id), Ranges, Interval)}],
+ %% remove live state? not sure it has!
+ Obj ++ StatsD ++ Stats
+ end || Obj <- Objs].
+
+connection_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(connection_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_connection_data, [Ids, Ranges]}),
+ [begin
+ Id = id_lookup(connection_stats, Obj),
+ ConnData = maps:get(Id, DataLookup),
+ Props = maps:get(connection_stats, ConnData),
+ Stats = format_range(ConnData, connection_stats_coarse_conn_stats,
+ pick_range(coarse_conn_stats, Ranges), Interval),
+ Details = augment_details(Obj, []), % TODO: not delegated
+ combine(Props, Obj) ++ Details ++ Stats
+ end || Obj <- Objs].
+
+list_channel_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(channel_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_list_channel_data, [Ids, Ranges]}),
+ ChannelStats =
+ [begin
+ Id = id_lookup(channel_stats, Obj),
+ ChannelData = maps:get(Id, DataLookup),
+ Props = maps:get(channel_stats, ChannelData),
+ Stats = channel_stats(ChannelData, Ranges, Interval),
+ combine(Props, Obj) ++ Stats
+ end || Obj <- Objs],
+ ChannelStats.
+
+detail_channel_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(channel_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_detail_channel_data,
+ [Ids, Ranges]}),
+ ChannelStats =
+ [begin
+ Id = id_lookup(channel_stats, Obj),
+ ChannelData = maps:get(Id, DataLookup),
+ Props = maps:get(channel_stats, ChannelData),
+ Stats = channel_stats(ChannelData, Ranges, Interval),
+ ConsumerStats = rabbit_mgmt_data_compat:fill_consumer_active_fields(
+ maps:get(consumer_stats, ChannelData)),
+ Consumers = [{consumer_details, ConsumerStats}],
+ StatsD = [{publishes,
+ detail_stats(ChannelData, channel_exchange_stats_fine_stats,
+ fine_stats, first(Id), Ranges, Interval)},
+ {deliveries,
+ detail_stats(ChannelData, channel_queue_stats_deliver_stats,
+ fine_stats, first(Id), Ranges, Interval)}],
+ combine(Props, Obj) ++ Consumers ++ Stats ++ StatsD
+ end || Obj <- Objs],
+ rabbit_mgmt_format:strip_pids(ChannelStats).
+
+vhost_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(vhost_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_vhost_data, [Ids, Ranges]}),
+ [begin
+ Id = id_lookup(vhost_stats, Obj),
+ VData = maps:get(Id, DataLookup),
+ Stats = format_range(VData, vhost_stats_coarse_conn_stats,
+ pick_range(coarse_conn_stats, Ranges), Interval) ++
+ format_range(VData, vhost_msg_stats,
+ pick_range(queue_msg_rates, Ranges), Interval),
+ StatsD = message_stats(format_range(VData, vhost_stats_fine_stats,
+ pick_range(fine_stats, Ranges), Interval) ++
+ format_range(VData, vhost_stats_deliver_stats,
+ pick_range(deliver_get, Ranges), Interval)),
+ Details = augment_details(Obj, []),
+ Obj ++ Details ++ Stats ++ StatsD
+ end || Obj <- Objs].
+
+node_stats(Ranges, Objs, Interval) ->
+ Ids = [id_lookup(node_stats, Obj) || Obj <- Objs],
+ DataLookup = get_data_from_nodes({rabbit_mgmt_data, all_node_data, [Ids, Ranges]}),
+ [begin
+ Id = id_lookup(node_stats, Obj),
+ NData = maps:get(Id, DataLookup),
+ Props = maps:get(node_stats, NData),
+ Stats = format_range(NData, node_coarse_stats,
+ pick_range(coarse_node_stats, Ranges), Interval) ++
+ format_range(NData, node_persister_stats,
+ pick_range(coarse_node_stats, Ranges), Interval) ++
+ format_range(NData, connection_churn_rates,
+ pick_range(churn_rates, Ranges), Interval),
+ NodeNodeStats = node_node_stats(NData, Id, Ranges, Interval),
+ StatsD = [{cluster_links, NodeNodeStats}],
+ MgmtStats = maps:get(mgmt_stats, NData),
+ Details = augment_details(Obj, []), % augmentation needs to be node local
+ combine(Props, Obj) ++ Details ++ Stats ++ StatsD ++ MgmtStats
+ end || Obj <- Objs].
+
+combine(New, Old) ->
+ case pget(state, Old) of
+ unknown -> New ++ Old;
+ live -> New ++ lists:keydelete(state, 1, Old);
+ _ -> lists:keydelete(state, 1, New) ++ Old
+ end.
+
+revert({'_', _}, {Id, _}) ->
+ Id;
+revert({_, '_'}, {_, Id}) ->
+ Id.
+
+%%----------------------------------------------------------------------------
+%% Internal, delegated operations
+%%----------------------------------------------------------------------------
+
+-spec get_data_from_nodes(mfargs()) -> #{atom() => any()}.
+get_data_from_nodes(MFA) ->
+ Data = delegate_invoke(MFA),
+ lists:foldl(fun(D, Agg) ->
+ maps_merge(fun merge_data/3, D, Agg)
+ end, #{}, Data).
+
+maps_merge(Fun, M1, M2) ->
+ JustM2 = maps:without(maps:keys(M1), M2),
+ maps:merge(JustM2,
+ maps:map(fun(K, V1) ->
+ case maps:find(K, M2) of
+ {ok, V2} -> Fun(K, V1, V2);
+ error -> V1
+ end
+ end,
+ M1)).
+
+-spec merge_data(atom(), any(), any()) -> any().
+merge_data(_, A, B) when is_integer(A), is_integer(B) -> A + B;
+merge_data(_, A, B) when is_list(A), is_list(B) ->
+ A ++ B;
+merge_data(_, {A1, B1}, {[_|_] = A2, [_|_] = B2}) ->
+ {[A1 | A2], [B1 | B2]};
+merge_data(_, {A1, B1}, {A2, B2}) -> % first slide
+ {[A1, A2], [B1, B2]};
+merge_data(_, D1, D2) -> % we assume if we get here both values a maps
+ try
+ maps_merge(fun merge_data/3, D1, D2)
+ catch
+ error:Err ->
+ rabbit_log:debug("merge_data err ~p got: ~p ~p ~n", [Err, D1, D2]),
+ case is_map(D1) of
+ true -> D1;
+ false -> D2
+ end
+ end.
+
+%% We do this when retrieving the queue record rather than when
+%% storing it since the memory use will drop *after* we find out about
+%% hibernation, so to do it when we receive a queue stats event would
+%% be fiddly and racy. This should be quite cheap though.
+adjust_hibernated_memory_use(Qs) ->
+ Pids = [Pid || {Pid, Q} <- Qs, pget(idle_since, Q, not_idle) =/= not_idle],
+ %% We use delegate here not for ordering reasons but because we
+ %% want to get the right amount of parallelism and minimise
+ %% cross-cluster communication.
+ {Mem, _BadNodes} = delegate:invoke(Pids, {erlang, process_info, [memory]}),
+ MemDict = maps:from_list([{P, M} || {P, M = {memory, _}} <- Mem]),
+ [case maps:find(Pid, MemDict) of
+ error -> Q;
+ {ok, Memory} -> [Memory | proplists:delete(memory, Q)]
+ end || {Pid, Q} <- Qs].
+
+-spec created_stats_delegated(any(), fun((any()) -> any()) | atom()) -> not_found | any().
+created_stats_delegated(Key, Type) ->
+ Data = delegate_invoke({rabbit_mgmt_data, augmented_created_stats, [Key, Type]}),
+ case [X || X <- Data, X =/= not_found] of
+ [] -> not_found;
+ [X] -> X
+ end.
+
+created_stats_delegated(Type) ->
+ lists:append(
+ delegate_invoke({rabbit_mgmt_data, augmented_created_stats, [Type]})).
+
+-spec delegate_invoke(mfargs()) -> [any()].
+delegate_invoke(FunOrMFA) ->
+ MemberPids = [P || P <- pg2:get_members(management_db)],
+ {Results, Errors} = delegate:invoke(MemberPids, ?DELEGATE_PREFIX, FunOrMFA),
+ case Errors of
+ [] -> ok;
+ _ -> rabbit_log:warning("Management delegate query returned errors:~n~p", [Errors])
+ end,
+ [R || {_, R} <- Results].
+
+submit(Fun) ->
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ worker_pool:submit(management_worker_pool, fun() -> Fun(Interval) end, reuse).
+
+submit_cached(Key, Fun) ->
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ {ok, Res} = rabbit_mgmt_db_cache:fetch(Key, fun() -> Fun(Interval) end),
+ Res.
+
+submit_cached(Key, Fun, Arg, Timeout) ->
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ {ok, Res} = rabbit_mgmt_db_cache:fetch(Key,
+ fun(A) -> Fun(Interval, A) end,
+ [Arg], Timeout),
+ Res.
+
+%% Note: Assumes Key is a two-tuple.
+%% If not found at first, Key is reversed and tried again.
+%% Seems to be necessary based on node_node_metrics table keys
+%% and Key values in Lookup
+maybe_fetch_value(Key, Dict) ->
+ maybe_fetch_value(maps:is_key(Key, Dict), go, Key, Dict).
+
+maybe_fetch_value(true, _Cont, Key, Dict) ->
+ maps:get(Key, Dict);
+maybe_fetch_value(false, stop, _Key, _Dict) ->
+ [];
+maybe_fetch_value(false, go, Key, Dict) ->
+ Key2 = reverse_key(Key),
+ maybe_fetch_value(maps:is_key(Key2, Dict), stop, Key2, Dict).
+
+message_stats([]) ->
+ [];
+message_stats(Stats) ->
+ [{message_stats, Stats}].
+
+pick_range(Table, Ranges) ->
+ rabbit_mgmt_data:pick_range(Table, Ranges).
+
+first(Id) ->
+ {Id, '_'}.
+
+second(Id) ->
+ {'_', Id}.
+
+reverse_key({K1, K2}) ->
+ {K2, K1}.
+
+augment_details(Obj, Acc) ->
+ rabbit_mgmt_data:augment_details(Obj, Acc).
+
+format_detail_id(ChPid) when is_pid(ChPid) ->
+ augment_details([{channel, ChPid}], []);
+format_detail_id(#resource{name = Name, virtual_host = Vhost, kind = Kind}) ->
+ [{Kind, [{name, Name}, {vhost, Vhost}]}];
+format_detail_id(Node) when is_atom(Node) ->
+ [{name, Node}].
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl
new file mode 100644
index 0000000000..b4d92dd543
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache.erl
@@ -0,0 +1,143 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(rabbit_mgmt_db_cache).
+
+-behaviour(gen_server).
+
+%% API functions
+-export([start_link/1]).
+-export([process_name/1,
+ fetch/2,
+ fetch/3,
+ fetch/4]).
+
+%% gen_server callbacks
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-record(state, {data :: any() | none,
+ args :: [any()],
+ timer_ref :: undefined | reference(),
+ multiplier :: integer()}).
+
+-type error_desc() :: key_not_found | timeout | {throw, atom()}.
+-type fetch_fun() :: fun((_) -> any()) | fun(() -> any()).
+-type fetch_ret() :: {ok, any()} | {error, error_desc()}.
+
+-define(DEFAULT_MULT, 5).
+-define(DEFAULT_TIMEOUT, 60000).
+-define(CHILD(Key), {rabbit_mgmt_db_cache:process_name(Key),
+ {rabbit_mgmt_db_cache, start_link, [Key]},
+ permanent, 5000, worker,
+ [rabbit_mgmt_db_cache]}).
+-define(RESET_STATE(State), State#state{data = none, args = []}).
+
+%% Implements an adaptive cache that times the value generating fun
+%% and uses the return value as the cached value for the time it took
+%% to produce multiplied by some factor (defaults to 5).
+%% There is one cache process per key. New processes are started as
+%% required. The cache is invalidated if the arguments to the fetch
+%% fun have changed.
+
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+-spec fetch(atom(), fetch_fun()) -> fetch_ret().
+fetch(Key, FetchFun) ->
+ fetch(Key, FetchFun, []).
+
+-spec fetch(atom(), fetch_fun(), [any()]) -> fetch_ret().
+fetch(Key, FetchFun, Args) when is_list(Args) ->
+ fetch(Key, FetchFun, Args, ?DEFAULT_TIMEOUT).
+
+-spec fetch(atom(), fetch_fun(), [any()], integer()) -> fetch_ret().
+fetch(Key, FetchFun, FunArgs, Timeout) ->
+ ProcName = process_name(Key),
+ Pid = case whereis(ProcName) of
+ undefined ->
+ {ok, P} = supervisor:start_child(rabbit_mgmt_db_cache_sup,
+ ?CHILD(Key)),
+ P;
+ P -> P
+ end,
+ gen_server:call(Pid, {fetch, FetchFun, FunArgs}, Timeout).
+
+
+-spec process_name(atom()) -> atom().
+process_name(Key) ->
+ list_to_atom(atom_to_list(?MODULE) ++ "_" ++ atom_to_list(Key)).
+
+-spec start_link(atom()) -> {ok, pid()} | ignore | {error, any()}.
+start_link(Key) ->
+ gen_server:start_link({local, process_name(Key)}, ?MODULE, [], []).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ Mult = application:get_env(rabbitmq_management, management_db_cache_multiplier,
+ ?DEFAULT_MULT),
+ {ok, #state{data = none,
+ args = [],
+ multiplier = Mult}}.
+
+handle_call({fetch, _FetchFun, FunArgs} = Msg, From,
+ #state{data = CachedData, args = Args} = State) when
+ CachedData =/= none andalso Args =/= FunArgs ->
+ %% there is cached data that needs to be invalidated
+ handle_call(Msg, From, ?RESET_STATE(State));
+handle_call({fetch, FetchFun, FunArgs}, _From,
+ #state{data = none,
+ multiplier = Mult, timer_ref = Ref} = State) ->
+ %% force a gc here to clean up previously cleared data
+ garbage_collect(),
+ case Ref of
+ R when is_reference(R) ->
+ _ = erlang:cancel_timer(R);
+ _ -> ok
+ end,
+
+ try timer:tc(FetchFun, FunArgs) of
+ {Time, Data} ->
+ case trunc(Time / 1000 * Mult) of
+ 0 -> {reply, {ok, Data}, ?RESET_STATE(State)}; % no need to cache that
+ T ->
+ TimerRef = erlang:send_after(T, self(), purge_cache),
+ {reply, {ok, Data}, State#state{data = Data,
+ timer_ref = TimerRef,
+ args = FunArgs}}
+ end
+ catch
+ Throw -> {reply, {error, {throw, Throw}}, State}
+ end;
+handle_call({fetch, _FetchFun, _}, _From, #state{data = Data} = State) ->
+ Reply = {ok, Data},
+ {reply, Reply, State};
+handle_call(purge_cache, _From, State) ->
+ {reply, ok, ?RESET_STATE(State), hibernate}.
+
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(purge_cache, State) ->
+ {noreply, ?RESET_STATE(State), hibernate};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl
new file mode 100644
index 0000000000..1805f25872
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_db_cache_sup.erl
@@ -0,0 +1,29 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(rabbit_mgmt_db_cache_sup).
+
+-behaviour(supervisor).
+
+%% API functions
+-export([start_link/0]).
+
+%% Supervisor callbacks
+-export([init/1]).
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%%%===================================================================
+%%% Supervisor callbacks
+%%%===================================================================
+
+init([]) ->
+ {ok, {{one_for_one, 5, 10}, []}}.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl b/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl
new file mode 100644
index 0000000000..e0de5c8bce
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl
@@ -0,0 +1,185 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_dispatcher).
+
+-export([modules/1, build_dispatcher/1]).
+
+-behaviour(rabbit_mgmt_extension).
+-export([dispatcher/0, web_ui/0]).
+
+build_dispatcher(Ignore) ->
+ Routes = build_routes(Ignore),
+ cowboy_router:compile(Routes).
+
+build_routes(Ignore) ->
+ ManagementApp = module_app(?MODULE),
+ Prefix = rabbit_mgmt_util:get_path_prefix(),
+ RootIdxRtes = build_root_index_routes(Prefix, ManagementApp),
+ ApiRdrRte = build_redirect_route("/api", Prefix ++ "/api/index.html"),
+ CliRdrRte = build_redirect_route("/cli", Prefix ++ "/cli/index.html"),
+ StatsRdrRte1 = build_redirect_route("/stats", Prefix ++ "/api/index.html"),
+ StatsRdrRte2 = build_redirect_route("/doc/stats.html", Prefix ++ "/api/index.html"),
+ MgmtRdrRte = {"/mgmt", rabbit_mgmt_wm_redirect, "/"},
+ LocalPaths = [{module_app(M), "www"} || M <- modules(Ignore)],
+ LocalStaticRte = {"/[...]", rabbit_mgmt_wm_static, LocalPaths},
+ % NB: order is significant in the routing list
+ Routes0 = build_module_routes(Ignore) ++
+ [ApiRdrRte, CliRdrRte, MgmtRdrRte, StatsRdrRte1, StatsRdrRte2, LocalStaticRte],
+ Routes1 = maybe_add_path_prefix(Routes0, Prefix),
+ % NB: ensure the root routes are first
+ Routes2 = RootIdxRtes ++ Routes1,
+ [{'_', Routes2}].
+
+build_root_index_routes("", ManagementApp) ->
+ [{"/", rabbit_mgmt_wm_static, root_idx_file(ManagementApp)}];
+build_root_index_routes(Prefix, ManagementApp) ->
+ [{"/", rabbit_mgmt_wm_redirect, Prefix ++ "/"},
+ {Prefix, rabbit_mgmt_wm_static, root_idx_file(ManagementApp)}].
+
+build_redirect_route(Path, Location) ->
+ {Path, rabbit_mgmt_wm_redirect, Location}.
+
+root_idx_file(ManagementApp) ->
+ {priv_file, ManagementApp, "www/index.html"}.
+
+maybe_add_path_prefix(Routes, "") ->
+ Routes;
+maybe_add_path_prefix(Routes, Prefix) ->
+ [{Prefix ++ Path, Mod, Args} || {Path, Mod, Args} <- Routes].
+
+build_module_routes(Ignore) ->
+ Routes = [Module:dispatcher() || Module <- modules(Ignore)],
+ [{"/api" ++ Path, Mod, Args} || {Path, Mod, Args} <- lists:append(Routes)].
+
+modules(IgnoreApps) ->
+ [Module || {App, Module, Behaviours} <-
+ %% Sort rabbitmq_management modules first. This is
+ %% a microoptimization because most files belong to
+ %% this application. Making it first avoids several
+ %% stats(2) which have a great chance of failing in other
+ %% applications.
+ lists:sort(
+ fun
+ ({rabbitmq_management, _, _}, _) -> true;
+ (_, {rabbitmq_management, _, _}) -> false;
+ ({A, _, _}, {B, _, _}) -> A =< B
+ end,
+ rabbit_misc:all_module_attributes(behaviour)),
+ not lists:member(App, IgnoreApps),
+ lists:member(rabbit_mgmt_extension, Behaviours)].
+
+module_app(Module) ->
+ {ok, App} = application:get_application(Module),
+ App.
+
+%%----------------------------------------------------------------------------
+
+web_ui() -> [{javascript, <<"dispatcher.js">>}].
+
+dispatcher() ->
+ [{"/overview", rabbit_mgmt_wm_overview, []},
+ {"/cluster-name", rabbit_mgmt_wm_cluster_name, []},
+ {"/nodes", rabbit_mgmt_wm_nodes, []},
+ {"/nodes/:node", rabbit_mgmt_wm_node, []},
+ {"/nodes/:node/memory", rabbit_mgmt_wm_node_memory, [absolute]},
+ {"/nodes/:node/memory/relative", rabbit_mgmt_wm_node_memory, [relative]},
+ {"/nodes/:node/memory/ets", rabbit_mgmt_wm_node_memory_ets, [absolute]},
+ {"/nodes/:node/memory/ets/relative", rabbit_mgmt_wm_node_memory_ets, [relative]},
+ {"/nodes/:node/memory/ets/:filter", rabbit_mgmt_wm_node_memory_ets, [absolute]},
+ {"/nodes/:node/memory/ets/:filter/relative", rabbit_mgmt_wm_node_memory_ets, [relative]},
+ {"/extensions", rabbit_mgmt_wm_extensions, []},
+ {"/all-configuration", rabbit_mgmt_wm_definitions, []}, %% This was the old name, let's not break things gratuitously.
+ {"/definitions", rabbit_mgmt_wm_definitions, []},
+ {"/definitions/:vhost", rabbit_mgmt_wm_definitions, []},
+ {"/parameters", rabbit_mgmt_wm_parameters, []},
+ {"/parameters/:component", rabbit_mgmt_wm_parameters, []},
+ {"/parameters/:component/:vhost", rabbit_mgmt_wm_parameters, []},
+ {"/parameters/:component/:vhost/:name", rabbit_mgmt_wm_parameter, []},
+ {"/global-parameters", rabbit_mgmt_wm_global_parameters, []},
+ {"/global-parameters/:name", rabbit_mgmt_wm_global_parameter, []},
+ {"/policies", rabbit_mgmt_wm_policies, []},
+ {"/policies/:vhost", rabbit_mgmt_wm_policies, []},
+ {"/policies/:vhost/:name", rabbit_mgmt_wm_policy, []},
+ {"/operator-policies", rabbit_mgmt_wm_operator_policies, []},
+ {"/operator-policies/:vhost", rabbit_mgmt_wm_operator_policies, []},
+ {"/operator-policies", rabbit_mgmt_wm_operator_policies, []},
+ {"/operator-policies/:vhost/:name", rabbit_mgmt_wm_operator_policy, []},
+ {"/vhost-limits/:vhost/:name", rabbit_mgmt_wm_limit, []},
+ {"/vhost-limits", rabbit_mgmt_wm_limits, []},
+ {"/vhost-limits/:vhost", rabbit_mgmt_wm_limits, []},
+ {"/connections", rabbit_mgmt_wm_connections, []},
+ {"/connections/:connection", rabbit_mgmt_wm_connection, []},
+ {"/connections/:connection/channels", rabbit_mgmt_wm_connection_channels, []},
+ {"/channels", rabbit_mgmt_wm_channels, []},
+ {"/channels/:channel", rabbit_mgmt_wm_channel, []},
+ {"/consumers", rabbit_mgmt_wm_consumers, []},
+ {"/consumers/:vhost", rabbit_mgmt_wm_consumers, []},
+ {"/exchanges", rabbit_mgmt_wm_exchanges, []},
+ {"/exchanges/:vhost", rabbit_mgmt_wm_exchanges, []},
+ {"/exchanges/:vhost/:exchange", rabbit_mgmt_wm_exchange, []},
+ {"/exchanges/:vhost/:exchange/publish", rabbit_mgmt_wm_exchange_publish, []},
+ {"/exchanges/:vhost/:exchange/bindings/source", rabbit_mgmt_wm_bindings, [exchange_source]},
+ {"/exchanges/:vhost/:exchange/bindings/destination", rabbit_mgmt_wm_bindings, [exchange_destination]},
+ {"/queues", rabbit_mgmt_wm_queues, []},
+ {"/queues/:vhost", rabbit_mgmt_wm_queues, []},
+ {"/queues/:vhost/:queue", rabbit_mgmt_wm_queue, []},
+ {"/queues/:vhost/:destination/bindings", rabbit_mgmt_wm_bindings, [queue]},
+ {"/queues/:vhost/:queue/contents", rabbit_mgmt_wm_queue_purge, []},
+ {"/queues/:vhost/:queue/get", rabbit_mgmt_wm_queue_get, []},
+ {"/queues/:vhost/:queue/actions", rabbit_mgmt_wm_queue_actions, []},
+ {"/bindings", rabbit_mgmt_wm_bindings, [all]},
+ {"/bindings/:vhost", rabbit_mgmt_wm_bindings, [all]},
+ {"/bindings/:vhost/e/:source/:dtype/:destination", rabbit_mgmt_wm_bindings, [source_destination]},
+ {"/bindings/:vhost/e/:source/:dtype/:destination/:props", rabbit_mgmt_wm_binding, []},
+ {"/vhosts", rabbit_mgmt_wm_vhosts, []},
+ {"/vhosts/:vhost", rabbit_mgmt_wm_vhost, []},
+ {"/vhosts/:vhost/start/:node", rabbit_mgmt_wm_vhost_restart, []},
+ {"/vhosts/:vhost/permissions", rabbit_mgmt_wm_permissions_vhost, []},
+ {"/vhosts/:vhost/topic-permissions", rabbit_mgmt_wm_topic_permissions_vhost, []},
+ %% /connections/:connection is already taken, we cannot use our standard scheme here
+ {"/vhosts/:vhost/connections", rabbit_mgmt_wm_connections_vhost, []},
+ %% /channels/:channel is already taken, we cannot use our standard scheme here
+ {"/vhosts/:vhost/channels", rabbit_mgmt_wm_channels_vhost, []},
+ {"/users/bulk-delete", rabbit_mgmt_wm_users_bulk_delete, []},
+ {"/users/without-permissions", rabbit_mgmt_wm_users, [without_permissions]},
+ {"/users", rabbit_mgmt_wm_users, [all]},
+ {"/users/:user", rabbit_mgmt_wm_user, []},
+ {"/users/:user/permissions", rabbit_mgmt_wm_permissions_user, []},
+ {"/users/:user/topic-permissions", rabbit_mgmt_wm_topic_permissions_user, []},
+ {"/user-limits/:user/:name", rabbit_mgmt_wm_user_limit, []},
+ {"/user-limits", rabbit_mgmt_wm_user_limits, []},
+ {"/user-limits/:user", rabbit_mgmt_wm_user_limits, []},
+ {"/feature-flags", rabbit_mgmt_wm_feature_flags, []},
+ {"/feature-flags/:name/enable", rabbit_mgmt_wm_feature_flag_enable, []},
+ {"/whoami", rabbit_mgmt_wm_whoami, []},
+ {"/permissions", rabbit_mgmt_wm_permissions, []},
+ {"/permissions/:vhost/:user", rabbit_mgmt_wm_permission, []},
+ {"/topic-permissions", rabbit_mgmt_wm_topic_permissions, []},
+ {"/topic-permissions/:vhost/:user", rabbit_mgmt_wm_topic_permission, []},
+ {"/topic-permissions/:vhost/:user/:exchange", rabbit_mgmt_wm_topic_permission, []},
+ {"/aliveness-test/:vhost", rabbit_mgmt_wm_aliveness_test, []},
+ %% now deprecated
+ {"/healthchecks/node", rabbit_mgmt_wm_healthchecks, []},
+ {"/healthchecks/node/:node", rabbit_mgmt_wm_healthchecks, []},
+ %% modern generation of fine-grained health checks
+ {"/health/checks/alarms", rabbit_mgmt_wm_health_check_alarms, []},
+ {"/health/checks/local-alarms", rabbit_mgmt_wm_health_check_local_alarms, []},
+ {"/health/checks/certificate-expiration/:within/:unit", rabbit_mgmt_wm_health_check_certificate_expiration, []},
+ {"/health/checks/port-listener/:port", rabbit_mgmt_wm_health_check_port_listener, []},
+ {"/health/checks/protocol-listener/:protocol", rabbit_mgmt_wm_health_check_protocol_listener, []},
+ {"/health/checks/virtual-hosts", rabbit_mgmt_wm_health_check_virtual_hosts, []},
+ {"/health/checks/node-is-mirror-sync-critical", rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical, []},
+ {"/health/checks/node-is-quorum-critical", rabbit_mgmt_wm_health_check_node_is_quorum_critical, []},
+ {"/reset", rabbit_mgmt_wm_reset, []},
+ {"/reset/:node", rabbit_mgmt_wm_reset, []},
+ {"/rebalance/queues", rabbit_mgmt_wm_rebalance_queues, [{queues, all}]},
+ {"/auth", rabbit_mgmt_wm_auth, []},
+ {"/auth/attempts/:node", rabbit_mgmt_wm_auth_attempts, [all]},
+ {"/auth/attempts/:node/source", rabbit_mgmt_wm_auth_attempts, [by_source]},
+ {"/login", rabbit_mgmt_wm_login, []}
+ ].
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl b/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl
new file mode 100644
index 0000000000..1ee72aaf36
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_extension.erl
@@ -0,0 +1,16 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_extension).
+
+%% Return a Cowboy dispatcher table to integrate
+-callback dispatcher() -> [{string(), atom(), [atom()]}].
+
+%% Return a proplist of information for the web UI to integrate
+%% this extension. Currently the proplist should have one key,
+%% 'javascript', the name of a javascript file to load and run.
+-callback web_ui() -> proplists:proplist().
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl b/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl
new file mode 100644
index 0000000000..89bb464c01
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_headers.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module contains helper functions that control
+%% response headers related to CORS, CSP, HSTS and so on.
+-module(rabbit_mgmt_headers).
+
+-export([set_common_permission_headers/2]).
+-export([set_cors_headers/2, set_hsts_headers/2, set_csp_headers/2]).
+
+%%
+%% API
+%%
+
+set_cors_headers(ReqData, Module) ->
+ rabbit_mgmt_cors:set_headers(ReqData, Module).
+
+set_hsts_headers(ReqData, _Module) ->
+ rabbit_mgmt_hsts:set_headers(ReqData).
+
+set_csp_headers(ReqData, _Module) ->
+ rabbit_mgmt_csp:set_headers(ReqData).
+
+set_common_permission_headers(ReqData0, EndpointModule) ->
+ lists:foldl(fun(Fun, ReqData) ->
+ Fun(ReqData, EndpointModule)
+ end, ReqData0,
+ [fun set_csp_headers/2,
+ fun set_hsts_headers/2,
+ fun set_cors_headers/2]).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl
new file mode 100644
index 0000000000..004f3d4e7e
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_hsts.erl
@@ -0,0 +1,28 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Sets HSTS header(s) on the response if configured,
+%% see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security.
+
+-module(rabbit_mgmt_hsts).
+
+-export([set_headers/1]).
+
+-define(HSTS_HEADER, <<"strict-transport-security">>).
+
+%%
+%% API
+%%
+
+set_headers(ReqData) ->
+ case application:get_env(rabbitmq_management, strict_transport_security) of
+ undefined -> ReqData;
+ {ok, Value} ->
+ cowboy_req:set_resp_header(?HSTS_HEADER,
+ rabbit_data_coercion:to_binary(Value),
+ ReqData)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl
new file mode 100644
index 0000000000..ba9962d6cf
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_load_definitions.erl
@@ -0,0 +1,27 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_load_definitions).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([boot/0, maybe_load_definitions/0, maybe_load_definitions_from/2]).
+
+%% This module exists for backwards compatibility only.
+%% Definition import functionality is now a core server feature.
+
+boot() ->
+ rabbit_log:debug("Will import definitions file from management.load_definitions"),
+ rabbit_definitions:maybe_load_definitions(rabbitmq_management, load_definitions).
+
+maybe_load_definitions() ->
+ rabbit_definitions:maybe_load_definitions().
+
+maybe_load_definitions_from(true, Dir) ->
+ rabbit_definitions:maybe_load_definitions_from(true, Dir);
+maybe_load_definitions_from(false, File) ->
+ rabbit_definitions:maybe_load_definitions_from(false, File).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl b/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl
new file mode 100644
index 0000000000..4eb0219e46
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_reset_handler.erl
@@ -0,0 +1,79 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% When management extensions are enabled and/or disabled at runtime, the
+%% management web dispatch mechanism needs to be reset. This event handler
+%% deals with responding to 'plugins_changed' events for management
+%% extensions, forcing a reset when necessary.
+
+-module(rabbit_mgmt_reset_handler).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "management extension handling"},
+ {mfa, {gen_event, add_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {cleanup, {gen_event, delete_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {requires, rabbit_event},
+ {enables, recovery}]}).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, []}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(#event{type = plugins_changed, props = Details}, State) ->
+ Enabled = pget(enabled, Details),
+ Disabled = pget(disabled, Details),
+ case extensions_changed(Enabled ++ Disabled) of
+ true ->
+ _ = rabbit_mgmt_app:reset_dispatcher(Disabled),
+ ok;
+ false -> ok
+ end,
+ {ok, State};
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+%% We explicitly ignore the case where management has been
+%% started/stopped since the dispatcher is either freshly created or
+%% about to vanish.
+extensions_changed(Apps) ->
+ not lists:member(rabbitmq_management, Apps) andalso
+ lists:any(fun is_extension/1, [Mod || App <- Apps, Mod <- mods(App)]).
+
+is_extension(Mod) ->
+ lists:member(rabbit_mgmt_extension,
+ pget(behaviour, Mod:module_info(attributes), [])).
+
+mods(App) ->
+ {ok, Modules} = application:get_key(App, modules),
+ Modules.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl b/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl
new file mode 100644
index 0000000000..14b3432790
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl
@@ -0,0 +1,739 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_stats).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+
+-export([format_range/6]).
+
+-define(MICRO_TO_MILLI, 1000).
+
+-type maybe_range() :: no_range | #range{}.
+-type maybe_slide() :: exometer_slide:slide() | not_found.
+-type maybe_slide_fun() :: fun(() -> [maybe_slide()]).
+
+-export_type([maybe_range/0, maybe_slide/0, maybe_slide_fun/0]).
+
+
+%%----------------------------------------------------------------------------
+%% Query-time
+%%----------------------------------------------------------------------------
+
+-spec format_range(maybe_range(), exometer_slide:timestamp(), atom(),
+ non_neg_integer(), maybe_slide_fun(), maybe_slide_fun()) ->
+ proplists:proplist().
+format_range(no_range, Now, Table, Interval, InstantRateFun, _SamplesFun) ->
+ format_no_range(Table, Now, Interval, InstantRateFun);
+format_range(#range{last = Last, first = First, incr = Incr}, _Now, Table,
+ Interval, _InstantRateFun, SamplesFun) ->
+ case SamplesFun() of
+ [] ->
+ [];
+ Slides ->
+ Empty = empty(Table, 0),
+ Slide = exometer_slide:sum(Last, First, Incr, Slides, Empty),
+ List = exometer_slide:to_list(Last, First, Slide),
+ Length = length(List),
+ RangePoint = Last - Interval,
+ LastTwo = get_last_two(List, Length),
+ {Total, Rate} = calculate_rate(LastTwo, Table, RangePoint),
+ {Samples, SampleTotals} = format_samples(List, empty(Table, []),
+ Empty),
+ format_rate(Table, Total, Rate, Samples, SampleTotals, Length)
+ end.
+
+-spec format_no_range(atom(), exometer_slide:timestamp(), non_neg_integer(),
+ maybe_slide_fun()) -> proplists:proplist().
+format_no_range(Table, Now, Interval, InstantRateFun) ->
+ RangePoint = ((Now div Interval) * Interval) - Interval,
+ case calculate_instant_rate(InstantRateFun, Table, RangePoint) of
+ {Total, Rate} -> format_rate(Table, Total, Rate);
+ not_found -> []
+ end.
+
+-spec calculate_instant_rate(maybe_slide_fun(), atom(), integer()) ->
+ 'not_found' | {integer(), number()} | {tuple(), tuple()}.
+calculate_instant_rate(Fun, Table, RangePoint) ->
+ case Fun() of
+ [] ->
+ not_found;
+ Slides ->
+ Slide = exometer_slide:sum(Slides),
+ case exometer_slide:last_two(Slide) of
+ [] -> {empty(Table, 0), empty(Table, 0.0)};
+ [Last | T] ->
+ Total = get_total(Slide, Table),
+ Rate = rate_from_last_increment(Table, Last, T, RangePoint),
+ {Total, Rate}
+ end
+ end.
+
+calculate_rate([], Table, _RangePoint) ->
+ {empty(Table, 0), empty(Table, 0.0)};
+calculate_rate([{_, Total} = Last | T], Table, RangePoint) ->
+ Rate = rate_from_last_increment(Table, Last, T, RangePoint),
+ {Total, Rate}.
+
+get_last_two(List, Length) when Length =< 2 ->
+ lists:reverse(List);
+get_last_two(List, Length) ->
+ lists:reverse(lists:nthtail(Length - 2, List)).
+
+get_total(Slide, Table) ->
+ case exometer_slide:last(Slide) of
+ undefined -> empty(Table, 0);
+ Other -> Other
+ end.
+
+format_samples(Samples, ESamples, ETotal) ->
+ lists:foldl(fun({TS, Sample}, {SamplesAcc, TotalsAcc}) ->
+ append_full_sample(TS, Sample, SamplesAcc, TotalsAcc)
+ end, {ESamples, ETotal}, Samples).
+
+%% Ts for "totals", Ss for "samples", Vs for "values"
+%%
+%% connection_stats_coarse_conn_stats, channel_stats_fine_stats,
+%% vhost_stats_fine_stats, queue_msg_stats, vhost_msg_stats
+append_full_sample(TS, {V1, V2, V3}, {S1, S2, S3}, {T1, T2, T3}) ->
+ {{append_sample(V1, TS, S1), append_sample(V2, TS, S2), append_sample(V3, TS, S3)},
+ {V1 + T1, V2 + T2, V3 + T3}};
+%% channel_exchange_stats_fine_stats
+append_full_sample(TS, {V1, V2, V3, V4}, {S1, S2, S3, S4}, {T1, T2, T3, T4}) ->
+ {{append_sample(V1, TS, S1), append_sample(V2, TS, S2), append_sample(V3, TS, S3),
+ append_sample(V4, TS, S4)},
+ {V1 + T1, V2 + T2, V3 + T3, V4 + T4}};
+%% connection_churn_rates
+append_full_sample(TS, {V1, V2, V3, V4, V5, V6, V7},
+ {S1, S2, S3, S4, S5, S6, S7},
+ {T1, T2, T3, T4, T5, T6, T7}) ->
+ {{append_sample(V1, TS, S1), append_sample(V2, TS, S2),
+ append_sample(V3, TS, S3), append_sample(V4, TS, S4),
+ append_sample(V5, TS, S5), append_sample(V6, TS, S6),
+ append_sample(V7, TS, S7)},
+ {V1 + T1, V2 + T2, V3 + T3, V4 + T4, V5 + T5, V6 + T6, V7 + T7}};
+%% channel_queue_stats_deliver_stats, queue_stats_deliver_stats,
+%% vhost_stats_deliver_stats, channel_stats_deliver_stats
+%% node_coarse_stats
+append_full_sample(TS, {V1, V2, V3, V4, V5, V6, V7, V8},
+ {S1, S2, S3, S4, S5, S6, S7, S8},
+ {T1, T2, T3, T4, T5, T6, T7, T8}) ->
+ {{append_sample(V1, TS, S1), append_sample(V2, TS, S2),
+ append_sample(V3, TS, S3), append_sample(V4, TS, S4),
+ append_sample(V5, TS, S5), append_sample(V6, TS, S6),
+ append_sample(V7, TS, S7), append_sample(V8, TS, S8)},
+ {V1 + T1, V2 + T2, V3 + T3, V4 + T4, V5 + T5, V6 + T6, V7 + T7, V8 + T8}};
+%% channel_process_stats, queue_stats_publish, queue_exchange_stats_publish,
+%% exchange_stats_publish_out, exchange_stats_publish_in, queue_process_stats
+append_full_sample(TS, {V1}, {S1}, {T1}) ->
+ {{append_sample(V1, TS, S1)}, {V1 + T1}};
+%% node_persister_stats
+append_full_sample(TS,
+ {V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14,
+ V15, V16, V17, V18, V19, V20},
+ {S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14,
+ S15, S16, S17, S18, S19, S20},
+ {T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20}
+ ) ->
+ {{append_sample(V1, TS, S1), append_sample(V2, TS, S2),
+ append_sample(V3, TS, S3), append_sample(V4, TS, S4),
+ append_sample(V5, TS, S5), append_sample(V6, TS, S6),
+ append_sample(V7, TS, S7), append_sample(V8, TS, S8),
+ append_sample(V9, TS, S9), append_sample(V10, TS, S10),
+ append_sample(V11, TS, S11), append_sample(V12, TS, S12),
+ append_sample(V13, TS, S13), append_sample(V14, TS, S14),
+ append_sample(V15, TS, S15), append_sample(V16, TS, S16),
+ append_sample(V17, TS, S17), append_sample(V18, TS, S18),
+ append_sample(V19, TS, S19), append_sample(V20, TS, S20)},
+ {V1 + T1, V2 + T2, V3 + T3, V4 + T4, V5 + T5, V6 + T6, V7 + T7, V8 + T8,
+ V9 + T9, V10 + T10, V11 + T11, V12 + T12, V13 + T13, V14 + T14, V15 + T15,
+ V16 + T16, V17 + T17, V18 + T18, V19 + T19, V20 + T20}};
+%% node_node_coarse_stats, vhost_stats_coarse_connection_stats, queue_msg_rates,
+%% vhost_msg_rates
+append_full_sample(TS, {V1, V2}, {S1, S2}, {T1, T2}) ->
+ {{append_sample(V1, TS, S1), append_sample(V2, TS, S2)}, {V1 + T1, V2 + T2}}.
+
+
+format_rate(connection_stats_coarse_conn_stats, {TR, TS, TRe}, {RR, RS, RRe}) ->
+ [
+ {send_oct, TS},
+ {send_oct_details, [{rate, RS}]},
+ {recv_oct, TR},
+ {recv_oct_details, [{rate, RR}]},
+ {reductions, TRe},
+ {reductions_details, [{rate, RRe}]}
+ ];
+format_rate(vhost_stats_coarse_conn_stats, {TR, TS}, {RR, RS}) ->
+ [
+ {send_oct, TS},
+ {send_oct_details, [{rate, RS}]},
+ {recv_oct, TR},
+ {recv_oct_details, [{rate, RR}]}
+ ];
+format_rate(Type, {TR, TW}, {RR, RW}) when Type =:= vhost_msg_rates;
+ Type =:= queue_msg_rates ->
+ [
+ {disk_reads, TR},
+ {disk_reads_details, [{rate, RR}]},
+ {disk_writes, TW},
+ {disk_writes_details, [{rate, RW}]}
+ ];
+format_rate(Type, {TP, TC, TRe, TDrop}, {RP, RC, RRe, RDrop})
+ when Type =:= channel_stats_fine_stats;
+ Type =:= vhost_stats_fine_stats;
+ Type =:= channel_exchange_stats_fine_stats ->
+ [
+ {publish, TP},
+ {publish_details, [{rate, RP}]},
+ {confirm, TC},
+ {confirm_details, [{rate, RC}]},
+ {return_unroutable, TRe},
+ {return_unroutable_details, [{rate, RRe}]},
+ {drop_unroutable, TDrop},
+ {drop_unroutable_details, [{rate, RDrop}]}
+ ];
+format_rate(Type, {TG, TGN, TD, TDN, TR, TA, TDG, TGE},
+ {RG, RGN, RD, RDN, RR, RA, RDG, RGE})
+ when Type =:= channel_queue_stats_deliver_stats;
+ Type =:= channel_stats_deliver_stats;
+ Type =:= vhost_stats_deliver_stats;
+ Type =:= queue_stats_deliver_stats ->
+ [
+ {get, TG},
+ {get_details, [{rate, RG}]},
+ {get_no_ack, TGN},
+ {get_no_ack_details, [{rate, RGN}]},
+ {deliver, TD},
+ {deliver_details, [{rate, RD}]},
+ {deliver_no_ack, TDN},
+ {deliver_no_ack_details, [{rate, RDN}]},
+ {redeliver, TR},
+ {redeliver_details, [{rate, RR}]},
+ {ack, TA},
+ {ack_details, [{rate, RA}]},
+ {deliver_get, TDG},
+ {deliver_get_details, [{rate, RDG}]},
+ {get_empty, TGE},
+ {get_empty_details, [{rate, RGE}]}
+ ];
+format_rate(Type, {TR}, {RR}) when Type =:= channel_process_stats;
+ Type =:= queue_process_stats ->
+ [
+ {reductions, TR},
+ {reductions_details, [{rate, RR}]}
+ ];
+format_rate(exchange_stats_publish_out, {TP}, {RP}) ->
+ [
+ {publish_out, TP},
+ {publish_out_details, [{rate, RP}]}
+ ];
+format_rate(exchange_stats_publish_in, {TP}, {RP}) ->
+ [
+ {publish_in, TP},
+ {publish_in_details, [{rate, RP}]}
+ ];
+format_rate(Type, {TP}, {RP}) when Type =:= queue_stats_publish;
+ Type =:= queue_exchange_stats_publish ->
+ [
+ {publish, TP},
+ {publish_details, [{rate, RP}]}
+ ];
+format_rate(Type, {TR, TU, TM}, {RR, RU, RM}) when Type =:= queue_msg_stats;
+ Type =:= vhost_msg_stats ->
+ [
+ {messages_ready, TR},
+ {messages_ready_details, [{rate, RR}]},
+ {messages_unacknowledged, TU},
+ {messages_unacknowledged_details, [{rate, RU}]},
+ {messages, TM},
+ {messages_details, [{rate, RM}]}
+ ];
+format_rate(node_coarse_stats, {TF, TS, TM, TD, TP, TGC, TGCW, TCS},
+ {RF, RS, RM, RD, RP, RGC, RGCW, RCS}) ->
+ [
+ {mem_used, TM},
+ {mem_used_details, [{rate, RM}]},
+ {fd_used, TF},
+ {fd_used_details, [{rate, RF}]},
+ {sockets_used, TS},
+ {sockets_used_details, [{rate, RS}]},
+ {proc_used, TP},
+ {proc_used_details, [{rate, RP}]},
+ {disk_free, TD},
+ {disk_free_details, [{rate, RD}]},
+ {gc_num, TGC},
+ {gc_num_details, [{rate, RGC}]},
+ {gc_bytes_reclaimed, TGCW},
+ {gc_bytes_reclaimed_details, [{rate, RGCW}]},
+ {context_switches, TCS},
+ {context_switches_details, [{rate, RCS}]}
+ ];
+format_rate(node_persister_stats,
+ {TIR, TIB, TIA, TIWC, TIWB, TIWAT, TIS, TISAT, TISC,
+ TISEAT, TIRC, TMRTC, TMDTC, TMSRC, TMSWC, TQIJWC, TQIWC, TQIRC,
+ TIO, TIOAT},
+ {RIR, RIB, RIA, RIWC, RIWB, RIWAT, RIS, RISAT, RISC,
+ RISEAT, RIRC, RMRTC, RMDTC, RMSRC, RMSWC, RQIJWC, RQIWC, RQIRC,
+ RIO, RIOAT}) ->
+ %% Calculates average times for read/write/sync/seek from the
+ %% accumulated time and count
+ %% io_<op>_avg_time is the average operation time for the life of the node
+ %% io_<op>_avg_time_details/rate is the average operation time during the
+ %% last time unit calculated (thus similar to an instant rate)
+ [
+ {io_read_count, TIR},
+ {io_read_count_details, [{rate, RIR}]},
+ {io_read_bytes, TIB},
+ {io_read_bytes_details, [{rate, RIB}]},
+ {io_read_avg_time, avg_time(TIA, TIR)},
+ {io_read_avg_time_details, [{rate, avg_time(RIA, RIR)}]},
+ {io_write_count, TIWC},
+ {io_write_count_details, [{rate, RIWC}]},
+ {io_write_bytes, TIWB},
+ {io_write_bytes_details, [{rate, RIWB}]},
+ {io_write_avg_time, avg_time(TIWAT, TIWC)},
+ {io_write_avg_time_details, [{rate, avg_time(RIWAT, RIWC)}]},
+ {io_sync_count, TIS},
+ {io_sync_count_details, [{rate, RIS}]},
+ {io_sync_avg_time, avg_time(TISAT, TIS)},
+ {io_sync_avg_time_details, [{rate, avg_time(RISAT, RIS)}]},
+ {io_seek_count, TISC},
+ {io_seek_count_details, [{rate, RISC}]},
+ {io_seek_avg_time, avg_time(TISEAT, TISC)},
+ {io_seek_avg_time_details, [{rate, avg_time(RISEAT, RISC)}]},
+ {io_reopen_count, TIRC},
+ {io_reopen_count_details, [{rate, RIRC}]},
+ {mnesia_ram_tx_count, TMRTC},
+ {mnesia_ram_tx_count_details, [{rate, RMRTC}]},
+ {mnesia_disk_tx_count, TMDTC},
+ {mnesia_disk_tx_count_details, [{rate, RMDTC}]},
+ {msg_store_read_count, TMSRC},
+ {msg_store_read_count_details, [{rate, RMSRC}]},
+ {msg_store_write_count, TMSWC},
+ {msg_store_write_count_details, [{rate, RMSWC}]},
+ {queue_index_journal_write_count, TQIJWC},
+ {queue_index_journal_write_count_details, [{rate, RQIJWC}]},
+ {queue_index_write_count, TQIWC},
+ {queue_index_write_count_details, [{rate, RQIWC}]},
+ {queue_index_read_count, TQIRC},
+ {queue_index_read_count_details, [{rate, RQIRC}]},
+ {io_file_handle_open_attempt_count, TIO},
+ {io_file_handle_open_attempt_count_details, [{rate, RIO}]},
+ {io_file_handle_open_attempt_avg_time, avg_time(TIOAT, TIO)},
+ {io_file_handle_open_attempt_avg_time_details, [{rate, avg_time(RIOAT, RIO)}]}
+ ];
+format_rate(node_node_coarse_stats, {TS, TR}, {RS, RR}) ->
+ [
+ {send_bytes, TS},
+ {send_bytes_details, [{rate, RS}]},
+ {recv_bytes, TR},
+ {recv_bytes_details, [{rate, RR}]}
+ ];
+format_rate(connection_churn_rates, {TCCr, TCCo, TChCr, TChCo, TQD, TQCr, TQCo},
+ {RCCr, RCCo, RChCr, RChCo, RQD, RQCr, RQCo}) ->
+ [
+ {connection_created, TCCr},
+ {connection_created_details, [{rate, RCCr}]},
+ {connection_closed, TCCo},
+ {connection_closed_details, [{rate, RCCo}]},
+ {channel_created, TChCr},
+ {channel_created_details, [{rate, RChCr}]},
+ {channel_closed, TChCo},
+ {channel_closed_details, [{rate, RChCo}]},
+ {queue_declared, TQD},
+ {queue_declared_details, [{rate, RQD}]},
+ {queue_created, TQCr},
+ {queue_created_details, [{rate, RQCr}]},
+ {queue_deleted, TQCo},
+ {queue_deleted_details, [{rate, RQCo}]}
+ ].
+
+format_rate(connection_stats_coarse_conn_stats, {TR, TS, TRe}, {RR, RS, RRe},
+ {SR, SS, SRe}, {STR, STS, STRe}, Length) ->
+ [
+ {send_oct, TS},
+ {send_oct_details, [{rate, RS},
+ {samples, SS}] ++ average(SS, STS, Length)},
+ {recv_oct, TR},
+ {recv_oct_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)},
+ {reductions, TRe},
+ {reductions_details, [{rate, RRe},
+ {samples, SRe}] ++ average(SRe, STRe, Length)}
+ ];
+format_rate(vhost_stats_coarse_conn_stats, {TR, TS}, {RR, RS}, {SR, SS},
+ {STR, STS}, Length) ->
+ [
+ {send_oct, TS},
+ {send_oct_details, [{rate, RS},
+ {samples, SS}] ++ average(SS, STS, Length)},
+ {recv_oct, TR},
+ {recv_oct_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)}
+ ];
+format_rate(Type, {TR, TW}, {RR, RW}, {SR, SW}, {STR, STW}, Length)
+ when Type =:= vhost_msg_rates;
+ Type =:= queue_msg_rates ->
+ [
+ {disk_reads, TR},
+ {disk_reads_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)},
+ {disk_writes, TW},
+ {disk_writes_details, [{rate, RW},
+ {samples, SW}] ++ average(SW, STW, Length)}
+ ];
+format_rate(Type, {TP, TC, TRe, TDrop}, {RP, RC, RRe, RDrop},
+ {SP, SC, SRe, SDrop}, {STP, STC, STRe, STDrop}, Length)
+ when Type =:= channel_stats_fine_stats;
+ Type =:= vhost_stats_fine_stats;
+ Type =:= channel_exchange_stats_fine_stats ->
+ [
+ {publish, TP},
+ {publish_details, [{rate, RP},
+ {samples, SP}] ++ average(SP, STP, Length)},
+ {confirm, TC},
+ {confirm_details, [{rate, RC},
+ {samples, SC}] ++ average(SC, STC, Length)},
+ {return_unroutable, TRe},
+ {return_unroutable_details, [{rate, RRe},
+ {samples, SRe}] ++ average(SRe, STRe, Length)},
+ {drop_unroutable, TDrop},
+ {drop_unroutable_details, [{rate, RDrop},
+ {samples, SDrop}] ++ average(SDrop, STDrop, Length)}
+ ];
+format_rate(Type, {TG, TGN, TD, TDN, TR, TA, TDG, TGE},
+ {RG, RGN, RD, RDN, RR, RA, RDG, RGE},
+ {SG, SGN, SD, SDN, SR, SA, SDG, SGE},
+ {STG, STGN, STD, STDN, STR, STA, STDG, STGE},
+ Length)
+ when Type =:= channel_queue_stats_deliver_stats;
+ Type =:= channel_stats_deliver_stats;
+ Type =:= vhost_stats_deliver_stats;
+ Type =:= queue_stats_deliver_stats ->
+ [
+ {get, TG},
+ {get_details, [{rate, RG},
+ {samples, SG}] ++ average(SG, STG, Length)},
+ {get_no_ack, TGN},
+ {get_no_ack_details, [{rate, RGN},
+ {samples, SGN}] ++ average(SGN, STGN, Length)},
+ {deliver, TD},
+ {deliver_details, [{rate, RD},
+ {samples, SD}] ++ average(SD, STD, Length)},
+ {deliver_no_ack, TDN},
+ {deliver_no_ack_details, [{rate, RDN},
+ {samples, SDN}] ++ average(SDN, STDN, Length)},
+ {redeliver, TR},
+ {redeliver_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)},
+ {ack, TA},
+ {ack_details, [{rate, RA},
+ {samples, SA}] ++ average(SA, STA, Length)},
+ {deliver_get, TDG},
+ {deliver_get_details, [{rate, RDG},
+ {samples, SDG}] ++ average(SDG, STDG, Length)},
+ {get_empty, TGE},
+ {get_empty_details, [{rate, RGE},
+ {samples, SGE}] ++ average(SGE, STGE, Length)}
+ ];
+format_rate(Type, {TR}, {RR}, {SR}, {STR}, Length)
+ when Type =:= channel_process_stats;
+ Type =:= queue_process_stats ->
+ [
+ {reductions, TR},
+ {reductions_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)}
+ ];
+format_rate(exchange_stats_publish_out, {TP}, {RP}, {SP}, {STP}, Length) ->
+ [
+ {publish_out, TP},
+ {publish_out_details, [{rate, RP},
+ {samples, SP}] ++ average(SP, STP, Length)}
+ ];
+format_rate(exchange_stats_publish_in, {TP}, {RP}, {SP}, {STP}, Length) ->
+ [
+ {publish_in, TP},
+ {publish_in_details, [{rate, RP},
+ {samples, SP}] ++ average(SP, STP, Length)}
+ ];
+format_rate(Type, {TP}, {RP}, {SP}, {STP}, Length)
+ when Type =:= queue_stats_publish;
+ Type =:= queue_exchange_stats_publish ->
+ [
+ {publish, TP},
+ {publish_details, [{rate, RP},
+ {samples, SP}] ++ average(SP, STP, Length)}
+ ];
+format_rate(Type, {TR, TU, TM}, {RR, RU, RM}, {SR, SU, SM}, {STR, STU, STM},
+ Length) when Type =:= queue_msg_stats;
+ Type =:= vhost_msg_stats ->
+ [
+ {messages_ready, TR},
+ {messages_ready_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)},
+ {messages_unacknowledged, TU},
+ {messages_unacknowledged_details, [{rate, RU},
+ {samples, SU}] ++ average(SU, STU, Length)},
+ {messages, TM},
+ {messages_details, [{rate, RM},
+ {samples, SM}] ++ average(SM, STM, Length)}
+ ];
+format_rate(node_coarse_stats, {TF, TS, TM, TD, TP, TGC, TGCW, TCS},
+ {RF, RS, RM, RD, RP, RGC, RGCW, RCS},
+ {SF, SS, SM, SD, SP, SGC, SGCW, SCS},
+ {STF, STS, STM, STD, STP, STGC, STGCW, STCS}, Length) ->
+ [
+ {mem_used, TM},
+ {mem_used_details, [{rate, RM},
+ {samples, SM}] ++ average(SM, STM, Length)},
+ {fd_used, TF},
+ {fd_used_details, [{rate, RF},
+ {samples, SF}] ++ average(SF, STF, Length)},
+ {sockets_used, TS},
+ {sockets_used_details, [{rate, RS},
+ {samples, SS}] ++ average(SS, STS, Length)},
+ {proc_used, TP},
+ {proc_used_details, [{rate, RP},
+ {samples, SP}] ++ average(SP, STP, Length)},
+ {disk_free, TD},
+ {disk_free_details, [{rate, RD},
+ {samples, SD}] ++ average(SD, STD, Length)},
+ {gc_num, TGC},
+ {gc_num_details, [{rate, RGC},
+ {samples, SGC}] ++ average(SGC, STGC, Length)},
+ {gc_bytes_reclaimed, TGCW},
+ {gc_bytes_reclaimed_details, [{rate, RGCW},
+ {samples, SGCW}] ++ average(SGCW, STGCW, Length)},
+ {context_switches, TCS},
+ {context_switches_details, [{rate, RCS},
+ {samples, SCS}] ++ average(SCS, STCS, Length)}
+ ];
+format_rate(node_persister_stats,
+ {TIR, TIB, TIA, TIWC, TIWB, TIWAT, TIS, TISAT, TISC,
+ TISEAT, TIRC, TMRTC, TMDTC, TMSRC, TMSWC, TQIJWC, TQIWC, TQIRC,
+ TIO, TIOAT},
+ {RIR, RIB, _RIA, RIWC, RIWB, _RIWAT, RIS, _RISAT, RISC,
+ _RISEAT, RIRC, RMRTC, RMDTC, RMSRC, RMSWC, RQIJWC, RQIWC, RQIRC,
+ RIO, _RIOAT},
+ {SIR, SIB, SIA, SIWC, SIWB, SIWAT, SIS, SISAT, SISC,
+ SISEAT, SIRC, SMRTC, SMDTC, SMSRC, SMSWC, SQIJWC, SQIWC, SQIRC,
+ SIO, SIOAT},
+ {STIR, STIB, _STIA, STIWC, STIWB, _STIWAT, STIS, _STISAT, STISC,
+ _STISEAT, STIRC, STMRTC, STMDTC, STMSRC, STMSWC, STQIJWC, STQIWC, STQIRC,
+ STIO, _STIOAT}, Length) ->
+ %% Calculates average times for read/write/sync/seek from the
+ %% accumulated time and count
+ %% io_<op>_avg_time is the average operation time for the life of the node
+ %% io_<op>_avg_time_details/rate is the average operation time during the
+ %% last time unit calculated (thus similar to an instant rate)
+
+ [
+ {io_read_count, TIR},
+ {io_read_count_details, [{rate, RIR},
+ {samples, SIR}] ++ average(SIR, STIR, Length)},
+ {io_read_bytes, TIB},
+ {io_read_bytes_details, [{rate, RIB},
+ {samples, SIB}] ++ average(SIB, STIB, Length)},
+ {io_read_avg_time, avg_time(TIA, TIR)},
+ {io_read_avg_time_details, [{samples, unit_samples(SIA, SIR)}] ++
+ avg_time_details(avg_time(TIA, TIR))},
+ {io_write_count, TIWC},
+ {io_write_count_details, [{rate, RIWC},
+ {samples, SIWC}] ++ average(SIWC, STIWC, Length)},
+ {io_write_bytes, TIWB},
+ {io_write_bytes_details, [{rate, RIWB},
+ {samples, SIWB}] ++ average(SIWB, STIWB, Length)},
+ {io_write_avg_time, avg_time(TIWAT, TIWC)},
+ {io_write_avg_time_details, [{samples, unit_samples(SIWAT, SIWC)}] ++
+ avg_time_details(avg_time(TIWAT, TIWC))},
+ {io_sync_count, TIS},
+ {io_sync_count_details, [{rate, RIS},
+ {samples, SIS}] ++ average(SIS, STIS, Length)},
+ {io_sync_avg_time, avg_time(TISAT, TIS)},
+ {io_sync_avg_time_details, [{samples, unit_samples(SISAT, SIS)}] ++
+ avg_time_details(avg_time(TISAT, TIS))},
+ {io_seek_count, TISC},
+ {io_seek_count_details, [{rate, RISC},
+ {samples, SISC}] ++ average(SISC, STISC, Length)},
+ {io_seek_avg_time, avg_time(TISEAT, TISC)},
+ {io_seek_avg_time_details, [{samples, unit_samples(SISEAT, SISC)}] ++
+ avg_time_details(avg_time(TISEAT, TISC))},
+ {io_reopen_count, TIRC},
+ {io_reopen_count_details, [{rate, RIRC},
+ {samples, SIRC}] ++ average(SIRC, STIRC, Length)},
+ {mnesia_ram_tx_count, TMRTC},
+ {mnesia_ram_tx_count_details, [{rate, RMRTC},
+ {samples, SMRTC}] ++ average(SMRTC, STMRTC, Length)},
+ {mnesia_disk_tx_count, TMDTC},
+ {mnesia_disk_tx_count_details, [{rate, RMDTC},
+ {samples, SMDTC}] ++ average(SMDTC, STMDTC, Length)},
+ {msg_store_read_count, TMSRC},
+ {msg_store_read_count_details, [{rate, RMSRC},
+ {samples, SMSRC}] ++ average(SMSRC, STMSRC, Length)},
+ {msg_store_write_count, TMSWC},
+ {msg_store_write_count_details, [{rate, RMSWC},
+ {samples, SMSWC}] ++ average(SMSWC, STMSWC, Length)},
+ {queue_index_journal_write_count, TQIJWC},
+ {queue_index_journal_write_count_details, [{rate, RQIJWC},
+ {samples, SQIJWC}] ++ average(SQIJWC, STQIJWC, Length)},
+ {queue_index_write_count, TQIWC},
+ {queue_index_write_count_details, [{rate, RQIWC},
+ {samples, SQIWC}] ++ average(SQIWC, STQIWC, Length)},
+ {queue_index_read_count, TQIRC},
+ {queue_index_read_count_details, [{rate, RQIRC},
+ {samples, SQIRC}] ++ average(SQIRC, STQIRC, Length)},
+ {io_file_handle_open_attempt_count, TIO},
+ {io_file_handle_open_attempt_count_details, [{rate, RIO},
+ {samples, SIO}] ++ average(SIO, STIO, Length)},
+ {io_file_handle_open_attempt_avg_time, avg_time(TIOAT, TIO)},
+ {io_file_handle_open_attempt_avg_time_details,
+ [{samples, unit_samples(SIOAT, SIO)}] ++ avg_time_details(avg_time(TIOAT, TIO))}
+ ];
+format_rate(node_node_coarse_stats, {TS, TR}, {RS, RR}, {SS, SR}, {STS, STR}, Length) ->
+ [
+ {send_bytes, TS},
+ {send_bytes_details, [{rate, RS},
+ {samples, SS}] ++ average(SS, STS, Length)},
+ {recv_bytes, TR},
+ {recv_bytes_details, [{rate, RR},
+ {samples, SR}] ++ average(SR, STR, Length)}
+ ];
+format_rate(connection_churn_rates, {TCCr, TCCo, TChCr, TChCo, TQD, TQCr, TQCo},
+ {RCCr, RCCo, RChCr, RChCo, RQD, RQCr, RQCo},
+ {SCCr, SCCo, SChCr, SChCo, SQD, SQCr, SQCo},
+ {STCCr, STCCo, STChCr, STChCo, STQD, STQCr, STQCo}, Length) ->
+ [
+ {connection_created, TCCr},
+ {connection_created_details, [{rate, RCCr},
+ {samples, SCCr}] ++ average(SCCr, STCCr, Length)},
+ {connection_closed, TCCo},
+ {connection_closed_details, [{rate, RCCo},
+ {samples, SCCo}] ++ average(SCCo, STCCo, Length)},
+ {channel_created, TChCr},
+ {channel_created_details, [{rate, RChCr},
+ {samples, SChCr}] ++ average(SChCr, STChCr, Length)},
+ {channel_closed, TChCo},
+ {channel_closed_details, [{rate, RChCo},
+ {samples, SChCo}] ++ average(SChCo, STChCo, Length)},
+ {queue_declared, TQD},
+ {queue_declared_details, [{rate, RQD},
+ {samples, SQD}] ++ average(SQD, STQD, Length)},
+ {queue_created, TQCr},
+ {queue_created_details, [{rate, RQCr},
+ {samples, SQCr}] ++ average(SQCr, STQCr, Length)},
+ {queue_deleted, TQCo},
+ {queue_deleted_details, [{rate, RQCo},
+ {samples, SQCo}] ++ average(SQCo, STQCo, Length)}
+ ].
+
+avg_time_details(Avg) ->
+ %% Rates don't make sense here, populate it with the average.
+ [{rate, Avg}, {avg_rate, Avg}, {avg, Avg}].
+
+average(_Samples, _Total, Length) when Length =< 1->
+ [];
+average(Samples, Total, Length) ->
+ [{sample, S2}, {timestamp, T2}] = hd(Samples),
+ [{sample, S1}, {timestamp, T1}] = lists:last(Samples),
+ [{avg_rate, (S2 - S1) * 1000 / (T2 - T1)},
+ {avg, Total / Length}].
+
+rate_from_last_increment(Table, {TS, _} = Last, T, RangePoint) ->
+ case TS - RangePoint of % [0]
+ D when D >= 0 ->
+ case rate_from_last_increment(Last, T) of
+ unknown ->
+ empty(Table, 0.0);
+ Rate ->
+ Rate
+ end;
+ _ ->
+ empty(Table, 0.0)
+ end.
+
+%% [0] Only display the rate if it's live - i.e. ((the end of the
+%% range) - interval) corresponds to the last data point we have
+rate_from_last_increment(_Total, []) ->
+ unknown;
+rate_from_last_increment(Total, [H | _T]) ->
+ rate_from_difference(Total, H).
+
+rate_from_difference({TS0, {A0, A1}}, {TS1, {B0, B1}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval)};
+rate_from_difference({TS0, {A0, A1, A2}}, {TS1, {B0, B1, B2}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval)};
+rate_from_difference({TS0, {A0, A1, A2, A3}}, {TS1, {B0, B1, B2, B3}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval), rate(A3, B3, Interval)};
+rate_from_difference({TS0, {A0, A1, A2, A3, A4}}, {TS1, {B0, B1, B2, B3, B4}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval), rate(A3, B3, Interval),
+ rate(A4, B4, Interval)};
+rate_from_difference({TS0, {A0, A1, A2, A3, A4, A5}}, {TS1, {B0, B1, B2, B3, B4, B5}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval), rate(A3, B3, Interval),
+ rate(A4, B4, Interval), rate(A5, B5, Interval)};
+rate_from_difference({TS0, {A0, A1, A2, A3, A4, A5, A6}},
+ {TS1, {B0, B1, B2, B3, B4, B5, B6}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval),
+ rate(A3, B3, Interval), rate(A4, B4, Interval), rate(A5, B5, Interval),
+ rate(A6, B6, Interval)};
+rate_from_difference({TS0, {A0, A1, A2, A3, A4, A5, A6, A7}},
+ {TS1, {B0, B1, B2, B3, B4, B5, B6, B7}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval),
+ rate(A3, B3, Interval), rate(A4, B4, Interval), rate(A5, B5, Interval),
+ rate(A6, B6, Interval), rate(A7, B7, Interval)};
+rate_from_difference({TS0, {A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13,
+ A14, A15, A16, A17, A18, A19}},
+ {TS1, {B0, B1, B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13,
+ B14, B15, B16, B17, B18, B19}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval), rate(A1, B1, Interval), rate(A2, B2, Interval),
+ rate(A3, B3, Interval), rate(A4, B4, Interval), rate(A5, B5, Interval),
+ rate(A6, B6, Interval), rate(A7, B7, Interval), rate(A8, B8, Interval),
+ rate(A9, B9, Interval), rate(A10, B10, Interval), rate(A11, B11, Interval),
+ rate(A12, B12, Interval), rate(A13, B13, Interval), rate(A14, B14, Interval),
+ rate(A15, B15, Interval), rate(A16, B16, Interval), rate(A17, B17, Interval),
+ rate(A18, B18, Interval), rate(A19, B19, Interval)};
+rate_from_difference({TS0, {A0}}, {TS1, {B0}}) ->
+ Interval = TS0 - TS1,
+ {rate(A0, B0, Interval)}.
+
+rate(V1, V2, Interval) when is_number(V1), is_number(V2) ->
+ (V1 - V2) * 1000 / Interval;
+rate(_, _, _) ->
+ 0.
+
+append_sample(S, TS, List) ->
+ [[{sample, S}, {timestamp, TS}] | List].
+
+%%----------------------------------------------------------------------------
+%% Match specs to select from the ETS tables
+%%----------------------------------------------------------------------------
+
+avg_time(_Total, Count) when Count == 0;
+ Count == 0.0 ->
+ 0.0;
+avg_time(Total, Count) ->
+ (Total / Count) / ?MICRO_TO_MILLI.
+
+unit_samples(Total, Count) ->
+ lists:zipwith(fun(T, C) ->
+ TS = proplists:get_value(timestamp, T),
+ Sample = avg_time(proplists:get_value(sample, T),
+ proplists:get_value(sample, C)),
+ [{sample, Sample}, {timestamp, TS}]
+ end, Total, Count).
+
+empty(Table, Def) ->
+ rabbit_mgmt_data:empty(Table, Def).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl b/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl
new file mode 100644
index 0000000000..5f34f0d160
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_sup).
+
+-behaviour(supervisor).
+
+-export([init/1]).
+-export([start_link/0]).
+-export([setup_wm_logging/0]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+
+init([]) ->
+ DB = {rabbit_mgmt_db, {rabbit_mgmt_db, start_link, []},
+ permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_db]},
+ WP = {management_worker_pool_sup, {worker_pool_sup, start_link, [3, management_worker_pool]},
+ permanent, ?SUPERVISOR_WAIT, supervisor, [management_worker_pool_sup]},
+ DBC = {rabbit_mgmt_db_cache_sup, {rabbit_mgmt_db_cache_sup, start_link, []},
+ permanent, ?SUPERVISOR_WAIT, supervisor, [rabbit_mgmt_db_cache_sup]},
+ {ok, {{one_for_one, 100, 1}, [DB, WP, DBC]}}.
+
+start_link() ->
+ Res = supervisor:start_link({local, ?MODULE}, ?MODULE, []),
+ setup_wm_logging(),
+ Res.
+
+%% While the project has switched to Cowboy for HTTP handling, we still use
+%% the logger from Webmachine; at least until RabbitMQ switches to Lager or
+%% similar.
+setup_wm_logging() ->
+ {ok, LogDir} = application:get_env(rabbitmq_management, http_log_dir),
+ case LogDir of
+ none -> ok;
+ _ -> webmachine_log:add_handler(webmachine_log_handler, [LogDir])
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl b/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl
new file mode 100644
index 0000000000..e8dd164869
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl
@@ -0,0 +1,28 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([init/1]).
+-export([start_link/0, start_child/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+start_child() ->
+ supervisor2:start_child(?MODULE, sup()).
+
+sup() ->
+ {rabbit_mgmt_sup, {rabbit_mgmt_sup, start_link, []},
+ temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_mgmt_sup]}.
+
+init([]) ->
+ {ok, {{one_for_one, 0, 1}, [sup()]}}.
+
+start_link() ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, []).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_util.erl b/deps/rabbitmq_management/src/rabbit_mgmt_util.erl
new file mode 100644
index 0000000000..93f167e480
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_util.erl
@@ -0,0 +1,1220 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_util).
+
+%% TODO sort all this out; maybe there's scope for rabbit_mgmt_request?
+
+-export([is_authorized/2, is_authorized_admin/2, is_authorized_admin/4,
+ is_authorized_admin/3, vhost/1, vhost_from_headers/1]).
+-export([is_authorized_vhost/2, is_authorized_user/3,
+ is_authorized_user/4,
+ is_authorized_monitor/2, is_authorized_policies/2,
+ is_authorized_vhost_visible/2,
+ is_authorized_global_parameters/2]).
+
+-export([bad_request/3, bad_request_exception/4, internal_server_error/4,
+ id/2, parse_bool/1, parse_int/1]).
+-export([with_decode/4, not_found/3]).
+-export([with_channel/4, with_channel/5]).
+-export([props_to_method/2, props_to_method/4]).
+-export([all_or_one_vhost/2, reply/3, responder_map/1,
+ filter_vhost/3]).
+-export([filter_conn_ch_list/3, filter_user/2, list_login_vhosts/2,
+ list_login_vhosts_names/2]).
+-export([filter_tracked_conn_list/3]).
+-export([with_decode/5, decode/1, decode/2, set_resp_header/3,
+ args/1, read_complete_body/1]).
+-export([reply_list/3, reply_list/5, reply_list/4,
+ sort_list/2, destination_type/1, reply_list_or_paginate/3
+ ]).
+-export([post_respond/1, columns/1, is_monitor/1]).
+-export([list_visible_vhosts/1, list_visible_vhosts_names/1,
+ b64decode_or_throw/1, no_range/0, range/1,
+ range_ceil/1, floor/2, ceil/1, ceil/2]).
+-export([pagination_params/1,
+ maybe_filter_by_keyword/4,
+ get_value_param/2,
+ augment_resources/6
+ ]).
+-export([direct_request/6]).
+-export([qs_val/2]).
+-export([get_path_prefix/0]).
+-export([catch_no_such_user_or_vhost/2]).
+
+-export([disable_stats/1, enable_queue_totals/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(FRAMING, rabbit_framing_amqp_0_9_1).
+-define(DEFAULT_PAGE_SIZE, 100).
+-define(MAX_PAGE_SIZE, 500).
+
+-define(MAX_RANGE, 500).
+
+-record(pagination, {page = undefined, page_size = undefined,
+ name = undefined, use_regex = undefined}).
+
+-record(aug_ctx, {req_data :: cowboy_req:req(),
+ pagination :: #pagination{},
+ sort = [] :: [atom()],
+ columns = [] :: [atom()],
+ data :: term()}).
+
+%%--------------------------------------------------------------------
+
+is_authorized(ReqData, Context) ->
+ is_authorized(ReqData, Context, '', fun(_) -> true end).
+
+is_authorized_admin(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"Not administrator user">>,
+ fun(#user{tags = Tags}) -> is_admin(Tags) end).
+
+is_authorized_admin(ReqData, Context, Token) ->
+ is_authorized(ReqData, Context,
+ rabbit_data_coercion:to_binary(
+ application:get_env(rabbitmq_management, uaa_client_id, "")),
+ Token, <<"Not administrator user">>,
+ fun(#user{tags = Tags}) -> is_admin(Tags) end).
+
+is_authorized_admin(ReqData, Context, Username, Password) ->
+ case is_basic_auth_disabled() of
+ true ->
+ Msg = "HTTP access denied: basic auth disabled",
+ rabbit_log:warning(Msg),
+ not_authorised(Msg, ReqData, Context);
+ false ->
+ is_authorized(ReqData, Context, Username, Password,
+ <<"Not administrator user">>,
+ fun(#user{tags = Tags}) -> is_admin(Tags) end)
+ end.
+
+is_authorized_monitor(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"Not monitor user">>,
+ fun(#user{tags = Tags}) -> is_monitor(Tags) end).
+
+is_authorized_vhost(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access virtual host">>,
+ fun(#user{tags = Tags} = User) ->
+ is_admin(Tags) orelse user_matches_vhost(ReqData, User)
+ end).
+
+is_authorized_vhost_visible(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access virtual host">>,
+ fun(#user{tags = Tags} = User) ->
+ is_admin(Tags) orelse user_matches_vhost_visible(ReqData, User)
+ end).
+
+disable_stats(ReqData) ->
+ MgmtOnly = case qs_val(<<"disable_stats">>, ReqData) of
+ <<"true">> -> true;
+ _ -> false
+ end,
+ MgmtOnly orelse get_bool_env(rabbitmq_management, disable_management_stats, false)
+ orelse get_bool_env(rabbitmq_management_agent, disable_metrics_collector, false).
+
+enable_queue_totals(ReqData) ->
+ EnableTotals = case qs_val(<<"enable_queue_totals">>, ReqData) of
+ <<"true">> -> true;
+ _ -> false
+ end,
+ EnableTotals orelse get_bool_env(rabbitmq_management, enable_queue_totals, false).
+
+get_bool_env(Application, Par, Default) ->
+ case application:get_env(Application, Par, Default) of
+ true -> true;
+ false -> false;
+ Other ->
+ rabbit_log:warning("Invalid configuration for application ~p: ~p set to ~p",
+ [Application, Par, Other]),
+ Default
+ end.
+
+user_matches_vhost(ReqData, User) ->
+ case vhost(ReqData) of
+ not_found -> true;
+ none -> true;
+ V ->
+ AuthzData = get_authz_data(ReqData),
+ lists:member(V, list_login_vhosts_names(User, AuthzData))
+ end.
+
+user_matches_vhost_visible(ReqData, User) ->
+ case vhost(ReqData) of
+ not_found -> true;
+ none -> true;
+ V ->
+ AuthzData = get_authz_data(ReqData),
+ lists:member(V, list_visible_vhosts_names(User, AuthzData))
+ end.
+
+get_authz_data(ReqData) ->
+ {PeerAddress, _PeerPort} = cowboy_req:peer(ReqData),
+ {ip, PeerAddress}.
+
+%% Used for connections / channels. A normal user can only see / delete
+%% their own stuff. Monitors can see other users' and delete their
+%% own. Admins can do it all.
+is_authorized_user(ReqData, Context, Item) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access object">>,
+ fun(#user{username = Username, tags = Tags}) ->
+ case cowboy_req:method(ReqData) of
+ <<"DELETE">> -> is_admin(Tags);
+ _ -> is_monitor(Tags)
+ end orelse Username == pget(user, Item)
+ end).
+
+%% For policies / parameters. Like is_authorized_vhost but you have to
+%% be a policymaker.
+is_authorized_policies(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access object">>,
+ fun(User = #user{tags = Tags}) ->
+ is_admin(Tags) orelse
+ (is_policymaker(Tags) andalso
+ user_matches_vhost(ReqData, User))
+ end).
+
+%% For global parameters. Must be policymaker.
+is_authorized_global_parameters(ReqData, Context) ->
+ is_authorized(ReqData, Context,
+ <<"User not authorised to access object">>,
+ fun(#user{tags = Tags}) ->
+ is_policymaker(Tags)
+ end).
+
+is_basic_auth_disabled() ->
+ get_bool_env(rabbitmq_management, disable_basic_auth, false).
+
+is_authorized(ReqData, Context, ErrorMsg, Fun) ->
+ case cowboy_req:method(ReqData) of
+ <<"OPTIONS">> -> {true, ReqData, Context};
+ _ -> is_authorized1(ReqData, Context, ErrorMsg, Fun)
+ end.
+
+is_authorized1(ReqData, Context, ErrorMsg, Fun) ->
+ case cowboy_req:parse_header(<<"authorization">>, ReqData) of
+ {basic, Username, Password} ->
+ case is_basic_auth_disabled() of
+ true ->
+ Msg = "HTTP access denied: basic auth disabled",
+ rabbit_log:warning(Msg),
+ not_authorised(Msg, ReqData, Context);
+ false ->
+ is_authorized(ReqData, Context,
+ Username, Password,
+ ErrorMsg, Fun)
+ end;
+ {bearer, Token} ->
+ Username = rabbit_data_coercion:to_binary(
+ application:get_env(rabbitmq_management, uaa_client_id, "")),
+ is_authorized(ReqData, Context, Username, Token, ErrorMsg, Fun);
+ _ ->
+ case is_basic_auth_disabled() of
+ true ->
+ Msg = "HTTP access denied: basic auth disabled",
+ rabbit_log:warning(Msg),
+ not_authorised(Msg, ReqData, Context);
+ false ->
+ {{false, ?AUTH_REALM}, ReqData, Context}
+ end
+ end.
+
+is_authorized_user(ReqData, Context, Username, Password) ->
+ Msg = <<"User not authorized">>,
+ Fun = fun(_) -> true end,
+ is_authorized(ReqData, Context, Username, Password, Msg, Fun).
+
+is_authorized(ReqData, Context, Username, Password, ErrorMsg, Fun) ->
+ ErrFun = fun (Msg) ->
+ rabbit_log:warning("HTTP access denied: user '~s' - ~s",
+ [Username, Msg]),
+ not_authorised(Msg, ReqData, Context)
+ end,
+ AuthProps = [{password, Password}] ++ case vhost(ReqData) of
+ VHost when is_binary(VHost) -> [{vhost, VHost}];
+ _ -> []
+ end,
+ {IP, _} = cowboy_req:peer(ReqData),
+ RemoteAddress = list_to_binary(inet:ntoa(IP)),
+ case rabbit_access_control:check_user_login(Username, AuthProps) of
+ {ok, User = #user{tags = Tags}} ->
+ case rabbit_access_control:check_user_loopback(Username, IP) of
+ ok ->
+ case is_mgmt_user(Tags) of
+ true ->
+ case Fun(User) of
+ true ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress,
+ Username, http),
+ {true, ReqData,
+ Context#context{user = User,
+ password = Password}};
+ false ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress,
+ Username, http),
+ ErrFun(ErrorMsg)
+ end;
+ false ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, http),
+ ErrFun(<<"Not management user">>)
+ end;
+ not_allowed ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, http),
+ ErrFun(<<"User can only log in via localhost">>)
+ end;
+ {refused, _Username, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, http),
+ rabbit_log:warning("HTTP access denied: ~s",
+ [rabbit_misc:format(Msg, Args)]),
+ not_authorised(<<"Login failed">>, ReqData, Context)
+ end.
+
+vhost_from_headers(ReqData) ->
+ case cowboy_req:header(<<"x-vhost">>, ReqData) of
+ undefined -> none;
+ %% blank x-vhost means "All hosts" is selected in the UI
+ <<>> -> none;
+ VHost -> VHost
+ end.
+
+vhost(ReqData) ->
+ case id(vhost, ReqData) of
+ none -> vhost_from_headers(ReqData);
+ VHost -> case rabbit_vhost:exists(VHost) of
+ true -> VHost;
+ false -> not_found
+ end
+ end.
+
+destination_type(ReqData) ->
+ case id(dtype, ReqData) of
+ <<"e">> -> exchange;
+ <<"q">> -> queue
+ end.
+
+%% Provides a map of content type-to-responder that are supported by
+%% reply/3. The map can be used in the content_types_provided/2 callback
+%% used by cowboy_rest. Responder functions must be
+%% exported from the caller module and must use reply/3
+%% under the hood.
+responder_map(FunctionName) ->
+ [
+ {<<"application/json">>, FunctionName},
+ {<<"application/bert">>, FunctionName}
+ ].
+
+reply({stop, _, _} = Reply, _ReqData, _Context) ->
+ Reply;
+reply(Facts, ReqData, Context) ->
+ reply0(extract_columns(Facts, ReqData), ReqData, Context).
+
+reply0(Facts, ReqData, Context) ->
+ ReqData1 = set_resp_header(<<"cache-control">>, "no-cache", ReqData),
+ try
+ case maps:get(media_type, ReqData1, undefined) of
+ {<<"application">>, <<"bert">>, _} ->
+ {term_to_binary(Facts), ReqData1, Context};
+ _ ->
+ {rabbit_json:encode(rabbit_mgmt_format:format_nulls(Facts)),
+ ReqData1, Context}
+ end
+ catch exit:{json_encode, E} ->
+ Error = iolist_to_binary(
+ io_lib:format("JSON encode error: ~p", [E])),
+ Reason = iolist_to_binary(
+ io_lib:format("While encoding: ~n~p", [Facts])),
+ internal_server_error(Error, Reason, ReqData1, Context)
+ end.
+
+reply_list(Facts, ReqData, Context) ->
+ reply_list(Facts, ["vhost", "name"], ReqData, Context, undefined).
+
+reply_list(Facts, DefaultSorts, ReqData, Context) ->
+ reply_list(Facts, DefaultSorts, ReqData, Context, undefined).
+
+get_value_param(Name, ReqData) ->
+ case qs_val(Name, ReqData) of
+ undefined -> undefined;
+ Bin -> binary_to_list(Bin)
+ end.
+
+reply_list(Facts, DefaultSorts, ReqData, Context, Pagination) ->
+ SortList =
+ sort_list_and_paginate(
+ extract_columns_list(Facts, ReqData),
+ DefaultSorts,
+ get_value_param(<<"sort">>, ReqData),
+ get_sort_reverse(ReqData), Pagination),
+
+ reply(SortList, ReqData, Context).
+
+-spec get_sort_reverse(cowboy_req:req()) -> atom().
+get_sort_reverse(ReqData) ->
+ case get_value_param(<<"sort_reverse">>, ReqData) of
+ undefined -> false;
+ V -> list_to_atom(V)
+ end.
+
+
+-spec is_pagination_requested(#pagination{} | undefined) -> boolean().
+is_pagination_requested(undefined) ->
+ false;
+is_pagination_requested(#pagination{}) ->
+ true.
+
+
+with_valid_pagination(ReqData, Context, Fun) ->
+ try
+ Pagination = pagination_params(ReqData),
+ Fun(Pagination)
+ catch error:badarg ->
+ Reason = iolist_to_binary(
+ io_lib:format("Pagination parameters are invalid", [])),
+ invalid_pagination(bad_request, Reason, ReqData, Context);
+ {error, ErrorType, S} ->
+ Reason = iolist_to_binary(S),
+ invalid_pagination(ErrorType, Reason, ReqData, Context)
+ end.
+
+reply_list_or_paginate(Facts, ReqData, Context) ->
+ with_valid_pagination(
+ ReqData, Context,
+ fun(Pagination) ->
+ reply_list(Facts, ["vhost", "name"], ReqData, Context, Pagination)
+ end).
+
+merge_sorts(DefaultSorts, Extra) ->
+ case Extra of
+ undefined -> DefaultSorts;
+ Extra -> [Extra | DefaultSorts]
+ end.
+
+%% Resource augmentation. Works out the most optimal configuration of the operations:
+%% sort, page, augment and executes it returning the result.
+
+column_strategy(all, _) -> extended;
+column_strategy(Cols, BasicColumns) ->
+ case Cols -- BasicColumns of
+ [] -> basic;
+ _ -> extended
+ end.
+
+% columns are [[binary()]] - this takes the first item
+columns_as_strings(all) -> all;
+columns_as_strings(Columns0) ->
+ [rabbit_data_coercion:to_list(C) || [C | _] <- Columns0].
+
+augment_resources(Resources, DefaultSort, BasicColumns, ReqData, Context,
+ AugmentFun) ->
+ with_valid_pagination(ReqData, Context,
+ fun (Pagination) ->
+ augment_resources0(Resources, DefaultSort,
+ BasicColumns, Pagination,
+ ReqData, AugmentFun)
+ end).
+
+augment_resources0(Resources, DefaultSort, BasicColumns, Pagination, ReqData,
+ AugmentFun) ->
+ SortFun = fun (AugCtx) -> sort(DefaultSort, AugCtx) end,
+ AugFun = fun (AugCtx) -> augment(AugmentFun, AugCtx) end,
+ PageFun = fun page/1,
+ Pagination = pagination_params(ReqData),
+ Sort = def(get_value_param(<<"sort">>, ReqData), DefaultSort),
+ Columns = def(columns(ReqData), all),
+ ColumnsAsStrings = columns_as_strings(Columns),
+ Pipeline =
+ case {Pagination =/= undefined,
+ column_strategy(Sort, BasicColumns),
+ column_strategy(ColumnsAsStrings, BasicColumns)} of
+ {false, basic, basic} -> % no pagination, no extended fields
+ [SortFun];
+ {false, _, _} ->
+ % no pagination, extended columns means we need to augment all - SLOW
+ [AugFun, SortFun];
+ {true, basic, basic} ->
+ [SortFun, PageFun];
+ {true, extended, _} ->
+ % pagination with extended sort columns - SLOW
+ [AugFun, SortFun, PageFun];
+ {true, basic, extended} ->
+ % pagination with extended columns and sorting on basic
+ % here we can reduce the augmentation set before
+ % augmenting
+ [SortFun, PageFun, AugFun]
+ end,
+ #aug_ctx{data = {_, Reply}} = run_augmentation(
+ #aug_ctx{req_data = ReqData,
+ pagination = Pagination,
+ sort = Sort,
+ columns = Columns,
+ data = {loaded, Resources}},
+ Pipeline),
+ rabbit_mgmt_format:strip_pids(Reply).
+
+run_augmentation(C, []) -> C;
+run_augmentation(C, [Next | Rem]) ->
+ C2 = Next(C),
+ run_augmentation(C2, Rem).
+
+sort(DefaultSort, Ctx = #aug_ctx{data = Data0,
+ req_data = ReqData,
+ sort = Sort}) ->
+ Data1 = get_data(Data0),
+ Data = sort_list(Data1, DefaultSort, Sort,
+ get_sort_reverse(ReqData)),
+ Ctx#aug_ctx{data = update_data(Data0, {sorted, Data})}.
+
+page(Ctx = #aug_ctx{data = Data0,
+ pagination = Pagination}) ->
+ Data = filter_and_paginate(get_data(Data0), Pagination),
+ Ctx#aug_ctx{data = update_data(Data0, {paged, Data})}.
+
+update_data({paged, Old}, New) ->
+ {paged, rabbit_misc:pset(items, get_data(New), Old)};
+update_data(_, New) ->
+ New.
+
+augment(AugmentFun, Ctx = #aug_ctx{data = Data0, req_data = ReqData}) ->
+ Data1 = get_data(Data0),
+ Data = AugmentFun(Data1, ReqData),
+ Ctx#aug_ctx{data = update_data(Data0, {augmented, Data})}.
+
+get_data({paged, Data}) ->
+ rabbit_misc:pget(items, Data);
+get_data({_, Data}) ->
+ Data.
+
+get_path_prefix() ->
+ EnvPrefix = rabbit_misc:get_env(rabbitmq_management, path_prefix, ""),
+ fixup_prefix(EnvPrefix).
+
+fixup_prefix("") ->
+ "";
+fixup_prefix([Char|_Rest]=EnvPrefix) when is_list(EnvPrefix), Char =:= $/ ->
+ EnvPrefix;
+fixup_prefix(EnvPrefix) when is_list(EnvPrefix) ->
+ "/" ++ EnvPrefix;
+fixup_prefix(EnvPrefix) when is_binary(EnvPrefix) ->
+ fixup_prefix(rabbit_data_coercion:to_list(EnvPrefix)).
+
+%% XXX sort_list_and_paginate/2 is a more proper name for this function, keeping it
+%% with this name for backwards compatibility
+-spec sort_list([Fact], [string()]) -> [Fact] when
+ Fact :: [{atom(), term()}].
+sort_list(Facts, Sorts) -> sort_list_and_paginate(Facts, Sorts, undefined, false,
+ undefined).
+
+-spec sort_list([Fact], [SortColumn], [SortColumn] | undefined, boolean()) -> [Fact] when
+ Fact :: [{atom(), term()}],
+ SortColumn :: string().
+sort_list(Facts, _, [], _) ->
+ %% Do not sort when we are explicitly requested to sort with an
+ %% empty sort columns list. Note that this clause won't match when
+ %% 'sort' parameter is not provided in a HTTP request at all.
+ Facts;
+sort_list(Facts, DefaultSorts, Sort, Reverse) ->
+ SortList = merge_sorts(DefaultSorts, Sort),
+ %% lists:sort/2 is much more expensive than lists:sort/1
+ Sorted = [V || {_K, V} <- lists:sort(
+ [{sort_key(F, SortList), F} || F <- Facts])],
+ maybe_reverse(Sorted, Reverse).
+
+sort_list_and_paginate(Facts, DefaultSorts, Sort, Reverse, Pagination) ->
+ filter_and_paginate(sort_list(Facts, DefaultSorts, Sort, Reverse), Pagination).
+
+filter_and_paginate(Sorted, Pagination) ->
+ ContextList = maybe_filter_context(Sorted, Pagination),
+ range_filter(ContextList, Pagination, Sorted).
+
+%%
+%% Filtering functions
+%%
+maybe_filter_context(List, #pagination{name = Name, use_regex = UseRegex}) when
+ is_list(Name) ->
+ lists:filter(fun(ListF) ->
+ maybe_filter_by_keyword(name, Name, ListF, UseRegex)
+ end,
+ List);
+%% Here it is backward with the other API(s), that don't filter the data
+maybe_filter_context(List, _) ->
+ List.
+
+
+match_value({_, Value}, ValueTag, UseRegex) when UseRegex =:= "true" ->
+ case re:run(Value, ValueTag, [caseless]) of
+ {match, _} -> true;
+ nomatch -> false
+ end;
+match_value({_, Value}, ValueTag, _) ->
+ Pos = string:str(string:to_lower(binary_to_list(Value)),
+ string:to_lower(ValueTag)),
+ case Pos of
+ Pos when Pos > 0 -> true;
+ _ -> false
+ end.
+
+maybe_filter_by_keyword(KeyTag, ValueTag, List, UseRegex) when
+ is_list(ValueTag), length(ValueTag) > 0 ->
+ match_value(lists:keyfind(KeyTag, 1, List), ValueTag, UseRegex);
+maybe_filter_by_keyword(_, _, _, _) ->
+ true.
+
+check_request_param(V, ReqData) ->
+ case qs_val(V, ReqData) of
+ undefined -> undefined;
+ Str -> list_to_integer(binary_to_list(Str))
+ end.
+
+%% Validates and returns pagination parameters:
+%% Page is assumed to be > 0, PageSize > 0 PageSize <= ?MAX_PAGE_SIZE
+pagination_params(ReqData) ->
+ PageNum = check_request_param(<<"page">>, ReqData),
+ PageSize = check_request_param(<<"page_size">>, ReqData),
+ Name = get_value_param(<<"name">>, ReqData),
+ UseRegex = get_value_param(<<"use_regex">>, ReqData),
+ case {PageNum, PageSize} of
+ {undefined, _} ->
+ undefined;
+ {PageNum, undefined} when is_integer(PageNum) andalso PageNum > 0 ->
+ #pagination{page = PageNum, page_size = ?DEFAULT_PAGE_SIZE,
+ name = Name, use_regex = UseRegex};
+ {PageNum, PageSize} when is_integer(PageNum)
+ andalso is_integer(PageSize)
+ andalso (PageNum > 0)
+ andalso (PageSize > 0)
+ andalso (PageSize =< ?MAX_PAGE_SIZE) ->
+ #pagination{page = PageNum, page_size = PageSize,
+ name = Name, use_regex = UseRegex};
+ _ -> throw({error, invalid_pagination_parameters,
+ io_lib:format("Invalid pagination parameters: page number ~p, page size ~p",
+ [PageNum, PageSize])})
+ end.
+
+-spec maybe_reverse([any()], string() | true | false) -> [any()].
+maybe_reverse([], _) ->
+ [];
+maybe_reverse(RangeList, true) when is_list(RangeList) ->
+ lists:reverse(RangeList);
+maybe_reverse(RangeList, false) ->
+ RangeList.
+
+%% for backwards compatibility, does not filter the list
+range_filter(List, undefined, _)
+ -> List;
+
+range_filter(List, RP = #pagination{page = PageNum, page_size = PageSize},
+ TotalElements) ->
+ Offset = (PageNum - 1) * PageSize + 1,
+ try
+ range_response(sublist(List, Offset, PageSize), RP, List,
+ TotalElements)
+ catch
+ error:function_clause ->
+ Reason = io_lib:format(
+ "Page out of range, page: ~p page size: ~p, len: ~p",
+ [PageNum, PageSize, length(List)]),
+ throw({error, page_out_of_range, Reason})
+ end.
+
+%% Injects pagination information into
+range_response([], #pagination{page = PageNum, page_size = PageSize},
+ TotalFiltered, TotalElements) ->
+ TotalPages = trunc((length(TotalFiltered) + PageSize - 1) / PageSize),
+ [{total_count, length(TotalElements)},
+ {item_count, 0},
+ {filtered_count, length(TotalFiltered)},
+ {page, PageNum},
+ {page_size, PageSize},
+ {page_count, TotalPages},
+ {items, []}
+ ];
+range_response(List, #pagination{page = PageNum, page_size = PageSize},
+ TotalFiltered, TotalElements) ->
+ TotalPages = trunc((length(TotalFiltered) + PageSize - 1) / PageSize),
+ [{total_count, length(TotalElements)},
+ {item_count, length(List)},
+ {filtered_count, length(TotalFiltered)},
+ {page, PageNum},
+ {page_size, PageSize},
+ {page_count, TotalPages},
+ {items, List}
+ ].
+
+sort_key(_Item, []) ->
+ [];
+sort_key(Item, [Sort | Sorts]) ->
+ [get_dotted_value(Sort, Item) | sort_key(Item, Sorts)].
+
+get_dotted_value(Key, Item) ->
+ Keys = string:tokens(Key, "."),
+ get_dotted_value0(Keys, Item).
+
+get_dotted_value0([Key], Item) ->
+ %% Put "nothing" before everything else, in number terms it usually
+ %% means 0.
+ pget_bin(list_to_binary(Key), Item, 0);
+get_dotted_value0([Key | Keys], Item) ->
+ get_dotted_value0(Keys, pget_bin(list_to_binary(Key), Item, [])).
+
+pget_bin(Key, Map, Default) when is_map(Map) ->
+ maps:get(Key, Map, Default);
+pget_bin(Key, List, Default) when is_list(List) ->
+ case lists:partition(fun ({K, _V}) -> a2b(K) =:= Key end, List) of
+ {[{_K, V}], _} -> V;
+ {[], _} -> Default
+ end.
+maybe_pagination(Item, false, ReqData) ->
+ extract_column_items(Item, columns(ReqData));
+maybe_pagination([{items, Item} | T], true, ReqData) ->
+ [{items, extract_column_items(Item, columns(ReqData))} |
+ maybe_pagination(T, true, ReqData)];
+maybe_pagination([H | T], true, ReqData) ->
+ [H | maybe_pagination(T,true, ReqData)];
+maybe_pagination(Item, true, ReqData) ->
+ [maybe_pagination(X, true, ReqData) || X <- Item].
+
+extract_columns(Item, ReqData) ->
+ maybe_pagination(Item, is_pagination_requested(pagination_params(ReqData)),
+ ReqData).
+
+extract_columns_list(Items, ReqData) ->
+ Cols = columns(ReqData),
+ [extract_column_items(Item, Cols) || Item <- Items].
+
+columns(ReqData) ->
+ case qs_val(<<"columns">>, ReqData) of
+ undefined -> all;
+ Bin -> [[list_to_binary(T) || T <- string:tokens(C, ".")]
+ || C <- string:tokens(binary_to_list(Bin), ",")]
+ end.
+
+extract_column_items(Item, all) ->
+ Item;
+extract_column_items(Item = [T | _], Cols) when is_tuple(T) ->
+ [{K, extract_column_items(V, descend_columns(a2b(K), Cols))} ||
+ {K, V} <- Item, want_column(a2b(K), Cols)];
+extract_column_items(L, Cols) when is_list(L) ->
+ [extract_column_items(I, Cols) || I <- L];
+extract_column_items(O, _Cols) ->
+ O.
+
+% want_column(_Col, all) -> true;
+want_column(Col, Cols) -> lists:any(fun([C|_]) -> C == Col end, Cols).
+
+descend_columns(_K, []) -> [];
+descend_columns( K, [[K] | _Rest]) -> all;
+descend_columns( K, [[K | K2] | Rest]) -> [K2 | descend_columns(K, Rest)];
+descend_columns( K, [[_K2 | _ ] | Rest]) -> descend_columns(K, Rest).
+
+a2b(A) when is_atom(A) -> list_to_binary(atom_to_list(A));
+a2b(B) -> B.
+
+bad_request(Reason, ReqData, Context) ->
+ halt_response(400, bad_request, Reason, ReqData, Context).
+
+not_authorised(Reason, ReqData, Context) ->
+ halt_response(401, not_authorised, Reason, ReqData, Context).
+
+not_found(Reason, ReqData, Context) ->
+ halt_response(404, not_found, Reason, ReqData, Context).
+
+internal_server_error(Error, Reason, ReqData, Context) ->
+ rabbit_log:error("~s~n~s", [Error, Reason]),
+ halt_response(500, Error, Reason, ReqData, Context).
+
+invalid_pagination(Type,Reason, ReqData, Context) ->
+ halt_response(400, Type, Reason, ReqData, Context).
+
+halt_response(Code, Type, Reason, ReqData, Context) ->
+ ReasonFormatted = format_reason(Reason),
+ Json = #{<<"error">> => Type,
+ <<"reason">> => ReasonFormatted},
+ ReqData1 = cowboy_req:reply(Code,
+ #{<<"content-type">> => <<"application/json">>},
+ rabbit_json:encode(Json), ReqData),
+ {stop, ReqData1, Context}.
+
+format_reason(Tuple) when is_tuple(Tuple) ->
+ rabbit_mgmt_format:tuple(Tuple);
+format_reason(Binary) when is_binary(Binary) ->
+ Binary;
+format_reason(Other) ->
+ case is_string(Other) of
+ true -> rabbit_mgmt_format:print("~ts", [Other]);
+ false -> rabbit_mgmt_format:print("~p", [Other])
+ end.
+
+is_string(List) when is_list(List) ->
+ lists:all(
+ fun(El) -> is_integer(El) andalso El > 0 andalso El < 16#10ffff end,
+ List);
+is_string(_) -> false.
+
+id(Key, ReqData) when Key =:= exchange;
+ Key =:= source;
+ Key =:= destination ->
+ case id0(Key, ReqData) of
+ <<"amq.default">> -> <<"">>;
+ Name -> Name
+ end;
+id(Key, ReqData) ->
+ id0(Key, ReqData).
+
+id0(Key, ReqData) ->
+ case cowboy_req:binding(Key, ReqData) of
+ undefined -> none;
+ Id -> Id
+ end.
+
+read_complete_body(Req) ->
+ read_complete_body(Req, <<"">>).
+read_complete_body(Req0, Acc) ->
+ case cowboy_req:read_body(Req0) of
+ {ok, Data, Req} -> {ok, <<Acc/binary, Data/binary>>, Req};
+ {more, Data, Req} -> read_complete_body(Req, <<Acc/binary, Data/binary>>)
+ end.
+
+with_decode(Keys, ReqData, Context, Fun) ->
+ {ok, Body, ReqData1} = read_complete_body(ReqData),
+ with_decode(Keys, Body, ReqData1, Context, Fun).
+
+with_decode(Keys, Body, ReqData, Context, Fun) ->
+ case decode(Keys, Body) of
+ {error, Reason} -> bad_request(Reason, ReqData, Context);
+ {ok, Values, JSON} -> try
+ Fun(Values, JSON, ReqData)
+ catch {error, Error} ->
+ bad_request(Error, ReqData, Context)
+ end
+ end.
+
+decode(Keys, Body) ->
+ case decode(Body) of
+ {ok, J0} ->
+ J = maps:fold(fun(K, V, Acc) ->
+ Acc#{binary_to_atom(K, utf8) => V}
+ end, J0, J0),
+ Results = [get_or_missing(K, J) || K <- Keys],
+ case [E || E = {key_missing, _} <- Results] of
+ [] -> {ok, Results, J};
+ Errors -> {error, Errors}
+ end;
+ Else -> Else
+ end.
+
+-type parsed_json() :: map() | atom() | binary().
+-spec decode(binary()) -> {ok, parsed_json()} | {error, term()}.
+
+decode(<<"">>) ->
+ {ok, #{}};
+%% some HTTP API clients include "null" for payload in certain scenarios
+decode(<<"null">>) ->
+ {ok, #{}};
+decode(<<"undefined">>) ->
+ {ok, #{}};
+decode(Body) ->
+ try
+ case rabbit_json:decode(Body) of
+ Val when is_map(Val) ->
+ {ok, Val};
+ Val when is_atom(Val) ->
+ {ok, #{}};
+ %% handle double encoded JSON, see rabbitmq/rabbitmq-management#839
+ Bin when is_binary(Bin) ->
+ {error, "invalid payload: the request body JSON-decoded to a string. "
+ "Is the input doubly-JSON-encoded?"};
+ _ ->
+ {error, not_json}
+ end
+ catch error:_ -> {error, not_json}
+ end.
+
+get_or_missing(K, L) ->
+ case maps:get(K, L, undefined) of
+ undefined -> {key_missing, K};
+ V -> V
+ end.
+
+get_node(Props) ->
+ case maps:get(<<"node">>, Props, undefined) of
+ undefined -> node();
+ N -> rabbit_nodes:make(
+ binary_to_list(N))
+ end.
+
+direct_request(MethodName, Transformers, Extra, ErrorMsg, ReqData,
+ Context = #context{user = User}) ->
+ with_vhost_and_props(
+ fun(VHost, Props, ReqData1) ->
+ Method = props_to_method(MethodName, Props, Transformers, Extra),
+ Node = get_node(Props),
+ case rabbit_misc:rpc_call(Node, rabbit_channel, handle_method,
+ [Method, none, #{}, none,
+ VHost, User]) of
+ {badrpc, nodedown} ->
+ Msg = io_lib:format("Node ~p could not be contacted", [Node]),
+ rabbit_log:warning(ErrorMsg, [Msg]),
+ bad_request(list_to_binary(Msg), ReqData1, Context);
+ {badrpc, {'EXIT', #amqp_error{name = not_found, explanation = Explanation}}} ->
+ rabbit_log:warning(ErrorMsg, [Explanation]),
+ not_found(Explanation, ReqData1, Context);
+ {badrpc, {'EXIT', #amqp_error{name = access_refused, explanation = Explanation}}} ->
+ rabbit_log:warning(ErrorMsg, [Explanation]),
+ not_authorised(<<"Access refused.">>, ReqData1, Context);
+ {badrpc, {'EXIT', #amqp_error{name = not_allowed, explanation = Explanation}}} ->
+ rabbit_log:warning(ErrorMsg, [Explanation]),
+ not_authorised(<<"Access refused.">>, ReqData1, Context);
+ {badrpc, {'EXIT', #amqp_error{explanation = Explanation}}} ->
+ rabbit_log:warning(ErrorMsg, [Explanation]),
+ bad_request(list_to_binary(Explanation), ReqData1, Context);
+ {badrpc, Reason} ->
+ rabbit_log:warning(ErrorMsg, [Reason]),
+ bad_request(
+ list_to_binary(
+ io_lib:format("Request to node ~s failed with ~p",
+ [Node, Reason])),
+ ReqData1, Context);
+ _ -> {true, ReqData1, Context}
+ end
+ end, ReqData, Context).
+
+with_vhost_and_props(Fun, ReqData, Context) ->
+ case vhost(ReqData) of
+ not_found ->
+ not_found(rabbit_data_coercion:to_binary("vhost_not_found"),
+ ReqData, Context);
+ VHost ->
+ {ok, Body, ReqData1} = read_complete_body(ReqData),
+ case decode(Body) of
+ {ok, Props} ->
+ try
+ Fun(VHost, Props, ReqData1)
+ catch {error, Error} ->
+ bad_request(Error, ReqData1, Context)
+ end;
+ {error, Reason} ->
+ bad_request(rabbit_mgmt_format:escape_html_tags(Reason),
+ ReqData1, Context)
+ end
+ end.
+
+props_to_method(MethodName, Props, Transformers, Extra) when Props =:= null orelse
+ Props =:= undefined ->
+ props_to_method(MethodName, #{}, Transformers, Extra);
+props_to_method(MethodName, Props, Transformers, Extra) ->
+ Props1 = [{list_to_atom(binary_to_list(K)), V} || {K, V} <- maps:to_list(Props)],
+ props_to_method(
+ MethodName, rabbit_mgmt_format:format(Props1 ++ Extra, {Transformers, true})).
+
+props_to_method(MethodName, Props) ->
+ Props1 = rabbit_mgmt_format:format(
+ Props,
+ {fun rabbit_mgmt_format:format_args/1, true}),
+ FieldNames = ?FRAMING:method_fieldnames(MethodName),
+ {Res, _Idx} = lists:foldl(
+ fun (K, {R, Idx}) ->
+ NewR = case pget(K, Props1) of
+ undefined -> R;
+ V -> setelement(Idx, R, V)
+ end,
+ {NewR, Idx + 1}
+ end, {?FRAMING:method_record(MethodName), 2},
+ FieldNames),
+ Res.
+
+parse_bool(V) -> rabbit_misc:parse_bool(V).
+
+parse_int(V) -> rabbit_misc:parse_int(V).
+
+with_channel(VHost, ReqData, Context, Fun) ->
+ with_channel(VHost, ReqData, Context, node(), Fun).
+
+with_channel(VHost, ReqData,
+ Context = #context{user = #user {username = Username},
+ password = Password},
+ Node, Fun) ->
+ Params = #amqp_params_direct{username = Username,
+ password = Password,
+ node = Node,
+ virtual_host = VHost},
+ case amqp_connection:start(Params) of
+ {ok, Conn} ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ Fun(Ch)
+ catch
+ exit:{{shutdown,
+ {server_initiated_close, ?NOT_FOUND, Reason}}, _} ->
+ not_found(Reason, ReqData, Context);
+ exit:{{shutdown,
+ {server_initiated_close, ?ACCESS_REFUSED, Reason}}, _} ->
+ not_authorised(Reason, ReqData, Context);
+ exit:{{shutdown, {ServerClose, Code, Reason}}, _}
+ when ServerClose =:= server_initiated_close;
+ ServerClose =:= server_initiated_hard_close ->
+ bad_request_exception(Code, Reason, ReqData, Context);
+ exit:{{shutdown, {connection_closing,
+ {ServerClose, Code, Reason}}}, _}
+ when ServerClose =:= server_initiated_close;
+ ServerClose =:= server_initiated_hard_close ->
+ bad_request_exception(Code, Reason, ReqData, Context)
+ after
+ catch amqp_channel:close(Ch),
+ catch amqp_connection:close(Conn)
+ end;
+ {error, {auth_failure, Msg}} ->
+ not_authorised(Msg, ReqData, Context);
+ {error, not_allowed} ->
+ not_authorised(<<"Access refused.">>, ReqData, Context);
+ {error, access_refused} ->
+ not_authorised(<<"Access refused.">>, ReqData, Context);
+ {error, {nodedown, N}} ->
+ bad_request(
+ list_to_binary(
+ io_lib:format("Node ~s could not be contacted", [N])),
+ ReqData, Context)
+ end.
+
+bad_request_exception(Code, Reason, ReqData, Context) ->
+ bad_request(list_to_binary(io_lib:format("~p ~s", [Code, Reason])),
+ ReqData, Context).
+
+all_or_one_vhost(ReqData, Fun) ->
+ case vhost(ReqData) of
+ none -> lists:append([Fun(V) || V <- rabbit_vhost:list_names()]);
+ not_found -> vhost_not_found;
+ VHost -> Fun(VHost)
+ end.
+
+filter_vhost(List, ReqData, Context) ->
+ User = #user{tags = Tags} = Context#context.user,
+ Fn = case is_admin(Tags) of
+ true -> fun list_visible_vhosts_names/2;
+ false -> fun list_login_vhosts_names/2
+ end,
+ AuthzData = get_authz_data(ReqData),
+ VHosts = Fn(User, AuthzData),
+ [I || I <- List, lists:member(pget(vhost, I), VHosts)].
+
+filter_user(List, _ReqData, #context{user = User}) ->
+ filter_user(List, User).
+
+filter_user(List, #user{username = Username, tags = Tags}) ->
+ case is_monitor(Tags) of
+ true -> List;
+ false -> [I || I <- List, pget(user, I) == Username]
+ end.
+
+filter_conn_ch_list(List, ReqData, Context) ->
+ rabbit_mgmt_format:strip_pids(
+ filter_user(
+ case vhost(ReqData) of
+ none -> List;
+ VHost -> [I || I <- List, pget(vhost, I) =:= VHost]
+ end, ReqData, Context)).
+
+filter_tracked_conn_list(List, ReqData, #context{user = #user{username = FilterUsername, tags = Tags}}) ->
+ %% A bit of duplicated code, but we only go through the list once
+ case {vhost(ReqData), is_monitor(Tags)} of
+ {none, true} ->
+ [[{name, Name},
+ {vhost, VHost},
+ {user, Username},
+ {node, Node}] || #tracked_connection{name = Name, vhost = VHost, username = Username, node = Node} <- List];
+ {FilterVHost, true} ->
+ [[{name, Name},
+ {vhost, VHost},
+ {user, Username},
+ {node, Node}] || #tracked_connection{name = Name, vhost = VHost, username = Username, node = Node} <- List, VHost == FilterVHost];
+ {none, false} ->
+ [[{name, Name},
+ {vhost, VHost},
+ {user, Username},
+ {node, Node}] || #tracked_connection{name = Name, vhost = VHost, username = Username, node = Node} <- List, Username == FilterUsername];
+ {FilterVHost, false} ->
+ [[{name, Name},
+ {vhost, VHost},
+ {user, Username},
+ {node, Node}] || #tracked_connection{name = Name, vhost = VHost, username = Username, node = Node} <- List, VHost == FilterVHost, Username == FilterUsername]
+ end.
+
+set_resp_header(K, V, ReqData) ->
+ cowboy_req:set_resp_header(K, strip_crlf(V), ReqData).
+
+strip_crlf(Str) -> lists:append(string:tokens(Str, "\r\n")).
+
+args([]) -> args(#{});
+args(L) -> rabbit_mgmt_format:to_amqp_table(L).
+
+%% Make replying to a post look like anything else...
+post_respond({true, ReqData, Context}) ->
+ {true, ReqData, Context};
+post_respond({stop, ReqData, Context}) ->
+ {stop, ReqData, Context};
+post_respond({JSON, ReqData, Context}) ->
+ {true, cowboy_req:set_resp_body(JSON, ReqData), Context}.
+
+is_admin(T) -> intersects(T, [administrator]).
+is_policymaker(T) -> intersects(T, [administrator, policymaker]).
+is_monitor(T) -> intersects(T, [administrator, monitoring]).
+is_mgmt_user(T) -> intersects(T, [administrator, monitoring, policymaker,
+ management]).
+
+intersects(A, B) -> lists:any(fun(I) -> lists:member(I, B) end, A).
+
+%% The distinction between list_visible_vhosts and list_login_vhosts
+%% is there to ensure that monitors can always learn of the
+%% existence of all vhosts, and can always see their contribution to
+%% global stats.
+
+list_visible_vhosts_names(User) ->
+ list_visible_vhosts(User, undefined).
+
+list_visible_vhosts_names(User, AuthzData) ->
+ list_visible_vhosts(User, AuthzData).
+
+list_visible_vhosts(User) ->
+ list_visible_vhosts(User, undefined).
+
+list_visible_vhosts(User = #user{tags = Tags}, AuthzData) ->
+ case is_monitor(Tags) of
+ true -> rabbit_vhost:list_names();
+ false -> list_login_vhosts_names(User, AuthzData)
+ end.
+
+list_login_vhosts_names(User, AuthzData) ->
+ [V || V <- rabbit_vhost:list_names(),
+ case catch rabbit_access_control:check_vhost_access(User, V, AuthzData, #{}) of
+ ok -> true;
+ NotOK ->
+ log_access_control_result(NotOK),
+ false
+ end].
+
+list_login_vhosts(User, AuthzData) ->
+ [V || V <- rabbit_vhost:all(),
+ case catch rabbit_access_control:check_vhost_access(User, vhost:get_name(V), AuthzData, #{}) of
+ ok -> true;
+ NotOK ->
+ log_access_control_result(NotOK),
+ false
+ end].
+
+% rabbitmq/rabbitmq-auth-backend-http#100
+log_access_control_result(NotOK) ->
+ rabbit_log:debug("rabbit_access_control:check_vhost_access result: ~p", [NotOK]).
+
+%% base64:decode throws lots of weird errors. Catch and convert to one
+%% that will cause a bad_request.
+b64decode_or_throw(B64) ->
+ rabbit_misc:b64decode_or_throw(B64).
+
+no_range() -> {no_range, no_range, no_range, no_range}.
+
+%% Take floor on queries so we make sure we only return samples
+%% for which we've finished receiving events. Fixes the "drop at
+%% the end" problem.
+range(ReqData) -> {range("lengths", fun floor/2, ReqData),
+ range("msg_rates", fun floor/2, ReqData),
+ range("data_rates", fun floor/2, ReqData),
+ range("node_stats", fun floor/2, ReqData)}.
+
+%% ...but if we know only one event could have contributed towards
+%% what we are interested in, then let's take the ceiling instead and
+%% get slightly fresher data that will match up with any
+%% non-historical data we have (e.g. queue length vs queue messages in
+%% RAM, they should both come from the same snapshot or we might
+%% report more messages in RAM than total).
+%%
+%% However, we only do this for queue lengths since a) it's the only
+%% thing where this ends up being really glaring and b) for other
+%% numbers we care more about the rate than the absolute value, and if
+%% we use ceil() we stand a 50:50 chance of looking up the last sample
+%% in the range before we get it, and thus deriving an instantaneous
+%% rate of 0.0.
+%%
+%% Age is assumed to be > 0, Incr > 0 and (Age div Incr) <= ?MAX_RANGE.
+%% The latter condition allows us to limit the number of samples that
+%% will be sent to the client.
+range_ceil(ReqData) -> {range("lengths", fun ceil/2, ReqData),
+ range("msg_rates", fun floor/2, ReqData),
+ range("data_rates", fun floor/2, ReqData),
+ range("node_stats", fun floor/2, ReqData)}.
+
+range(Prefix, Round, ReqData) ->
+ Age0 = int(Prefix ++ "_age", ReqData),
+ Incr0 = int(Prefix ++ "_incr", ReqData),
+ if
+ is_atom(Age0) orelse is_atom(Incr0) -> no_range;
+ (Age0 > 0) andalso (Incr0 > 0) andalso ((Age0 div Incr0) =< ?MAX_RANGE) ->
+ Age = Age0 * 1000,
+ Incr = Incr0 * 1000,
+ Now = os:system_time(milli_seconds),
+ Last = Round(Now, Incr),
+ #range{first = (Last - Age),
+ last = Last,
+ incr = Incr};
+ true -> throw({error, invalid_range_parameters,
+ io_lib:format("Invalid range parameters: age ~p, incr ~p",
+ [Age0, Incr0])})
+ end.
+
+floor(TS, Interval) -> (TS div Interval) * Interval.
+
+ceil(TS, Interval) -> case floor(TS, Interval) of
+ TS -> TS;
+ Floor -> Floor + Interval
+ end.
+
+ceil(X) when X < 0 ->
+ trunc(X);
+ceil(X) ->
+ T = trunc(X),
+ case X - T == 0 of
+ true -> T;
+ false -> T + 1
+ end.
+
+int(Name, ReqData) ->
+ case qs_val(list_to_binary(Name), ReqData) of
+ undefined -> undefined;
+ Bin -> case catch list_to_integer(binary_to_list(Bin)) of
+ {'EXIT', _} -> undefined;
+ Integer -> Integer
+ end
+ end.
+
+def(undefined, Def) -> Def;
+def(V, _) -> V.
+
+-spec qs_val(binary(), cowboy:req()) -> any() | undefined.
+qs_val(Name, ReqData) ->
+ Qs = cowboy_req:parse_qs(ReqData),
+ proplists:get_value(Name, Qs, undefined).
+
+-spec catch_no_such_user_or_vhost(fun(() -> Result), Replacement) -> Result | Replacement.
+catch_no_such_user_or_vhost(Fun, Replacement) ->
+ try
+ Fun()
+ catch throw:{error, {E, _}} when E =:= no_such_user; E =:= no_such_vhost ->
+ Replacement()
+ end.
+
+%% this retains the old, buggy, pre 23.1 behavour of lists:sublist/3 where an
+%% error is thrown when the request is out of range
+sublist(List, S, L) when is_integer(L), L >= 0 ->
+ lists:sublist(lists:nthtail(S-1, List), L).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl
new file mode 100644
index 0000000000..992ec954e2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_aliveness_test).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(QUEUE, <<"aliveness-test">>).
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:with_channel(
+ rabbit_mgmt_util:vhost(ReqData),
+ ReqData,
+ Context,
+ fun(Ch) ->
+ #'queue.declare_ok'{queue = ?QUEUE} = amqp_channel:call(Ch, #'queue.declare'{
+ queue = ?QUEUE
+ }),
+ ok = amqp_channel:call(Ch, #'basic.publish'{routing_key = ?QUEUE}, #amqp_msg{
+ payload = <<"test_message">>
+ }),
+ case amqp_channel:call(Ch, #'basic.get'{queue = ?QUEUE, no_ack = true}) of
+ {#'basic.get_ok'{}, _} ->
+ %% Don't delete the queue. If this is pinged every few
+ %% seconds we don't want to create a mnesia transaction
+ %% each time.
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ #'basic.get_empty'{} ->
+ Reason = <<"aliveness-test queue is empty">>,
+ failure(Reason, ReqData, Context);
+ Error ->
+ Reason = rabbit_data_coercion:to_binary(Error),
+ failure(Reason, ReqData, Context)
+ end
+ end
+ ).
+
+failure(Reason, ReqData0, Context0) ->
+ Body = #{status => failed, reason => Reason},
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply(Body, ReqData0, Context0),
+ {stop, cowboy_req:reply(?HEALTH_CHECK_FAILURE_STATUS, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl
new file mode 100644
index 0000000000..6899fc54ee
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_auth).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ EnableUAA = application:get_env(rabbitmq_management, enable_uaa, false),
+ Data = case EnableUAA of
+ true ->
+ UAAClientId = application:get_env(rabbitmq_management, uaa_client_id, ""),
+ UAALocation = application:get_env(rabbitmq_management, uaa_location, ""),
+ case is_invalid([UAAClientId, UAALocation]) of
+ true ->
+ rabbit_log:warning("Disabling OAuth 2 authorization, relevant configuration settings are missing", []),
+ [{enable_uaa, false}, {uaa_client_id, <<>>}, {uaa_location, <<>>}];
+ false ->
+ [{enable_uaa, true},
+ {uaa_client_id, rabbit_data_coercion:to_binary(UAAClientId)},
+ {uaa_location, rabbit_data_coercion:to_binary(UAALocation)}]
+ end;
+ false ->
+ [{enable_uaa, false}, {uaa_client_id, <<>>}, {uaa_location, <<>>}]
+ end,
+ rabbit_mgmt_util:reply(Data, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+is_invalid(List) ->
+ lists:any(fun(V) -> V == "" end, List).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl
new file mode 100644
index 0000000000..cdd11826ea
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_auth_attempts.erl
@@ -0,0 +1,79 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_auth_attempts).
+
+-export([init/2, to_json/2, content_types_provided/2, allowed_methods/2, is_authorized/2,
+ delete_resource/2, resource_exists/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+init(Req, [Mode]) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE),
+ {Mode, #context{}}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {node_exists(ReqData, get_node(ReqData)), ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+ rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, Context).
+
+is_authorized(ReqData, {Mode, Context}) ->
+ {Res, Req2, Context2} = rabbit_mgmt_util:is_authorized_monitor(ReqData, Context),
+ {Res, Req2, {Mode, Context2}}.
+
+delete_resource(ReqData, Context) ->
+ Node = get_node(ReqData),
+ case node_exists(ReqData, Node) of
+ false ->
+ {false, ReqData, Context};
+ true ->
+ case rpc:call(Node, rabbit_core_metrics, reset_auth_attempt_metrics, [], infinity) of
+ {badrpc, _} -> {false, ReqData, Context};
+ ok -> {true, ReqData, Context}
+ end
+ end.
+%%--------------------------------------------------------------------
+get_node(ReqData) ->
+ list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))).
+
+node_exists(ReqData, Node) ->
+ case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+ proplists:get_value(name, N) == Node] of
+ [] -> false;
+ [_] -> true
+ end.
+
+augment(Mode, ReqData) ->
+ Node = get_node(ReqData),
+ case node_exists(ReqData, Node) of
+ false ->
+ not_found;
+ true ->
+ Fun = case Mode of
+ all -> get_auth_attempts;
+ by_source -> get_auth_attempts_by_source
+ end,
+ case rpc:call(Node, rabbit_core_metrics, Fun, [], infinity) of
+ {badrpc, _} -> not_available;
+ Result -> Result
+ end
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl
new file mode 100644
index 0000000000..4396edc53b
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl
@@ -0,0 +1,141 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_binding).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ Binding = binding(ReqData),
+ {case Binding of
+ not_found -> false;
+ {bad_request, _} -> false;
+ _ -> case rabbit_binding:exists(Binding) of
+ true -> true;
+ _ -> false
+ end
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ with_binding(ReqData, Context,
+ fun(Binding) ->
+ rabbit_mgmt_util:reply(
+ rabbit_mgmt_format:binding(Binding),
+ ReqData, Context)
+ end).
+
+delete_resource(ReqData, Context) ->
+ MethodName = case rabbit_mgmt_util:destination_type(ReqData) of
+ exchange -> 'exchange.unbind';
+ queue -> 'queue.unbind'
+ end,
+ with_binding(
+ ReqData, Context,
+ fun(#binding{ source = #resource{name = S},
+ destination = #resource{name = D},
+ key = Key,
+ args = Args }) ->
+ rabbit_mgmt_util:direct_request(
+ MethodName,
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{queue, D},
+ {exchange, S},
+ {destination, D},
+ {source, S},
+ {routing_key, Key},
+ {arguments, Args}],
+ "Unbinding error: ~s", ReqData, Context)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+binding(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> Source = rabbit_mgmt_util:id(source, ReqData),
+ Dest = rabbit_mgmt_util:id(destination, ReqData),
+ DestType = rabbit_mgmt_util:destination_type(ReqData),
+ Props = rabbit_mgmt_util:id(props, ReqData),
+ SName = rabbit_misc:r(VHost, exchange, Source),
+ DName = rabbit_misc:r(VHost, DestType, Dest),
+ case unpack(SName, DName, Props) of
+ {bad_request, Str} ->
+ {bad_request, Str};
+ {Key, Args} ->
+ #binding{ source = SName,
+ destination = DName,
+ key = Key,
+ args = Args }
+ end
+ end.
+
+unpack(Src, Dst, Props) ->
+ case rabbit_mgmt_format:tokenise(binary_to_list(Props)) of
+ ["\~"] -> {<<>>, []};
+ %% when routing_key is explicitly set to `null` in JSON payload,
+ %% the value would be stored as null the atom. See rabbitmq/rabbitmq-management#723 for details.
+ ["null"] -> {null, []};
+ ["undefined"] -> {undefined, []};
+ [Key] -> {unquote(Key), []};
+ ["\~", ArgsEnc] -> lookup(<<>>, ArgsEnc, Src, Dst);
+ %% see above
+ ["null", ArgsEnc] -> lookup(null, ArgsEnc, Src, Dst);
+ ["undefined", ArgsEnc] -> lookup(undefined, ArgsEnc, Src, Dst);
+ [Key, ArgsEnc] -> lookup(unquote(Key), ArgsEnc, Src, Dst);
+ _ -> {bad_request, {too_many_tokens, Props}}
+ end.
+
+lookup(RoutingKey, ArgsEnc, Src, Dst) ->
+ lookup(RoutingKey, unquote(ArgsEnc),
+ rabbit_binding:list_for_source_and_destination(Src, Dst)).
+
+lookup(_RoutingKey, _Hash, []) ->
+ {bad_request, "binding not found"};
+lookup(RoutingKey, Hash, [#binding{args = Args} | Rest]) ->
+ case args_hash(Args) =:= Hash of
+ true -> {RoutingKey, Args};
+ false -> lookup(RoutingKey, Hash, Rest)
+ end.
+
+args_hash(Args) ->
+ rabbit_mgmt_format:args_hash(Args).
+
+unquote(Name) ->
+ list_to_binary(rabbit_http_util:unquote(Name)).
+
+with_binding(ReqData, Context, Fun) ->
+ case binding(ReqData) of
+ {bad_request, Reason} ->
+ rabbit_mgmt_util:bad_request(Reason, ReqData, Context);
+ Binding ->
+ Fun(Binding)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl
new file mode 100644
index 0000000000..5ed0a82f8d
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl
@@ -0,0 +1,149 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_bindings).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([allowed_methods/2]).
+-export([content_types_accepted/2, accept_content/2, resource_exists/2]).
+-export([basic/1, augmented/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, [Mode]) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), {Mode, #context{}}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+%% The current version of Cowboy forces us to report the resource doesn't
+%% exist here in order to get a 201 response. It seems Cowboy confuses the
+%% resource from the request and the resource that will be created by POST.
+%% https://github.com/ninenines/cowboy/issues/723#issuecomment-161319576
+resource_exists(ReqData, {Mode, Context}) ->
+ case cowboy_req:method(ReqData) of
+ <<"POST">> ->
+ {false, ReqData, {Mode, Context}};
+ _ ->
+ {case list_bindings(Mode, ReqData) of
+ vhost_not_found -> false;
+ _ -> true
+ end, ReqData, {Mode, Context}}
+ end.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+%% Methods to add to the CORS header.
+%% This clause is called by rabbit_mgmt_cors:handle_options/2
+allowed_methods(undefined, undefined) ->
+ {[<<"HEAD">>, <<"GET">>, <<"POST">>, <<"OPTIONS">>], undefined, undefined};
+allowed_methods(ReqData, {Mode, Context}) ->
+ {case Mode of
+ source_destination -> [<<"HEAD">>, <<"GET">>, <<"POST">>, <<"OPTIONS">>];
+ _ -> [<<"HEAD">>, <<"GET">>, <<"OPTIONS">>]
+ end, ReqData, {Mode, Context}}.
+
+to_json(ReqData, {Mode, Context}) ->
+ Bs = [rabbit_mgmt_format:binding(B) || B <- list_bindings(Mode, ReqData)],
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(Bs, ReqData, Context),
+ ["vhost", "source", "type", "destination",
+ "routing_key", "properties_key"],
+ ReqData, {Mode, Context}).
+
+accept_content(ReqData0, {_Mode, Context}) ->
+ {ok, Body, ReqData} = rabbit_mgmt_util:read_complete_body(ReqData0),
+ Source = rabbit_mgmt_util:id(source, ReqData),
+ Dest = rabbit_mgmt_util:id(destination, ReqData),
+ DestType = rabbit_mgmt_util:id(dtype, ReqData),
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ {ok, Props} = rabbit_mgmt_util:decode(Body),
+ MethodName = case rabbit_mgmt_util:destination_type(ReqData) of
+ exchange -> 'exchange.bind';
+ queue -> 'queue.bind'
+ end,
+ {Key, Args} = key_args(DestType, Props),
+ case rabbit_mgmt_util:direct_request(
+ MethodName,
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{queue, Dest},
+ {exchange, Source},
+ {destination, Dest},
+ {source, Source},
+ {routing_key, Key},
+ {arguments, Args}],
+ "Binding error: ~s", ReqData, Context) of
+ {stop, _, _} = Res ->
+ Res;
+ {true, ReqData, Context2} ->
+ From = binary_to_list(cowboy_req:path(ReqData)),
+ Prefix = rabbit_mgmt_util:get_path_prefix(),
+ BindingProps = rabbit_mgmt_format:pack_binding_props(Key, Args),
+ UrlWithBindings = rabbit_mgmt_format:url("/api/bindings/~s/e/~s/~s/~s/~s",
+ [VHost, Source, DestType,
+ Dest, BindingProps]),
+ To = Prefix ++ binary_to_list(UrlWithBindings),
+ Loc = rabbit_web_dispatch_util:relativise(From, To),
+ {{true, Loc}, ReqData, Context2}
+ end.
+
+is_authorized(ReqData, {Mode, Context}) ->
+ {Res, RD2, C2} = rabbit_mgmt_util:is_authorized_vhost(ReqData, Context),
+ {Res, RD2, {Mode, C2}}.
+
+%%--------------------------------------------------------------------
+
+basic(ReqData) ->
+ [rabbit_mgmt_format:binding(B) ||
+ B <- list_bindings(all, ReqData)].
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context).
+
+key_args(<<"q">>, Props) ->
+ #'queue.bind'{routing_key = K, arguments = A} =
+ rabbit_mgmt_util:props_to_method(
+ 'queue.bind', Props, [], []),
+ {K, A};
+
+key_args(<<"e">>, Props) ->
+ #'exchange.bind'{routing_key = K, arguments = A} =
+ rabbit_mgmt_util:props_to_method(
+ 'exchange.bind', Props,
+ [], []),
+ {K, A}.
+
+%%--------------------------------------------------------------------
+
+list_bindings(all, ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData,
+ fun (VHost) ->
+ rabbit_binding:list(VHost)
+ end);
+list_bindings(exchange_source, ReqData) ->
+ rabbit_binding:list_for_source(r(exchange, exchange, ReqData));
+list_bindings(exchange_destination, ReqData) ->
+ rabbit_binding:list_for_destination(r(exchange, exchange, ReqData));
+list_bindings(queue, ReqData) ->
+ rabbit_binding:list_for_destination(r(queue, destination, ReqData));
+list_bindings(source_destination, ReqData) ->
+ DestType = rabbit_mgmt_util:destination_type(ReqData),
+ rabbit_binding:list_for_source_and_destination(
+ r(exchange, source, ReqData),
+ r(DestType, destination, ReqData)).
+
+r(Type, Name, ReqData) ->
+ rabbit_misc:r(rabbit_mgmt_util:vhost(ReqData), Type,
+ rabbit_mgmt_util:id(Name, ReqData)).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl
new file mode 100644
index 0000000000..0b4aae6c13
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_channel).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ case channel(ReqData) of
+ not_found -> {false, ReqData, Context};
+ _Conn -> {true, ReqData, Context}
+ end;
+ true ->
+ {false, ReqData, Context}
+ end.
+
+to_json(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ Payload = rabbit_mgmt_format:clean_consumer_details(
+ rabbit_mgmt_format:strip_pids(channel(ReqData))),
+ rabbit_mgmt_util:reply(
+ maps:from_list(Payload),
+ ReqData, Context);
+ true ->
+ rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ try
+ rabbit_mgmt_util:is_authorized_user(ReqData, Context, channel(ReqData))
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end;
+ true ->
+ rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, ReqData, Context)
+ end.
+
+%%--------------------------------------------------------------------
+
+channel(ReqData) ->
+ rabbit_mgmt_db:get_channel(rabbit_mgmt_util:id(channel, ReqData),
+ rabbit_mgmt_util:range(ReqData)).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl
new file mode 100644
index 0000000000..a064b2f9f9
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl
@@ -0,0 +1,50 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_channels).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ augmented/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ try
+ rabbit_mgmt_util:reply_list_or_paginate(augmented(ReqData, Context),
+ ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end;
+ true ->
+ rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_conn_ch_list(
+ rabbit_mgmt_db:get_all_channels(
+ rabbit_mgmt_util:range(ReqData)), ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl
new file mode 100644
index 0000000000..454a29e0bb
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_channels_vhost).
+
+%% Lists channels in a vhost
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ augmented/2, resource_exists/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {rabbit_vhost:exists(rabbit_mgmt_util:id(vhost, ReqData)), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ try
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end;
+ true ->
+ rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_conn_ch_list(
+ rabbit_mgmt_db:get_all_channels(
+ rabbit_mgmt_util:range(ReqData)), ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl
new file mode 100644
index 0000000000..132da93ad2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl
@@ -0,0 +1,59 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_cluster_name).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(
+ [{name, rabbit_nodes:cluster_name()}], ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ rabbit_mgmt_util:with_decode(
+ [name], ReqData0, Context, fun([Name], _, ReqData) ->
+ rabbit_nodes:set_cluster_name(
+ as_binary(Name), Username),
+ {true, ReqData, Context}
+ end).
+
+is_authorized(ReqData, Context) ->
+ case cowboy_req:method(ReqData) of
+ <<"PUT">> -> rabbit_mgmt_util:is_authorized_admin(ReqData, Context);
+ _ -> rabbit_mgmt_util:is_authorized(ReqData, Context)
+ end.
+
+as_binary(Val) when is_binary(Val) ->
+ Val;
+as_binary(Val) when is_list(Val) ->
+ list_to_binary(Val).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl
new file mode 100644
index 0000000000..7de605e6a5
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl
@@ -0,0 +1,98 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connection).
+
+-export([init/2, resource_exists/2, to_json/2, content_types_provided/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2, conn/1]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case conn(ReqData) of
+ not_found -> {false, ReqData, Context};
+ _Conn -> {true, ReqData, Context}
+ end.
+
+to_json(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_util:reply(
+ maps:from_list(rabbit_mgmt_format:strip_pids(conn_stats(ReqData))), ReqData, Context);
+ true ->
+ rabbit_mgmt_util:reply([{name, rabbit_mgmt_util:id(connection, ReqData)}],
+ ReqData, Context)
+ end.
+
+delete_resource(ReqData, Context) ->
+ case conn(ReqData) of
+ not_found -> ok;
+ Conn ->
+ case proplists:get_value(pid, Conn) of
+ undefined -> ok;
+ Pid when is_pid(Pid) ->
+ force_close_connection(ReqData, Conn, Pid)
+ end
+ end,
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ try
+ rabbit_mgmt_util:is_authorized_user(ReqData, Context, conn(ReqData))
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+%%--------------------------------------------------------------------
+
+conn(ReqData) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ conn_stats(ReqData);
+ true ->
+ case rabbit_connection_tracking:lookup(rabbit_mgmt_util:id(connection, ReqData)) of
+ #tracked_connection{name = Name, pid = Pid, username = Username, type = Type} ->
+ [{name, Name}, {pid, Pid}, {user, Username}, {type, Type}];
+ not_found ->
+ not_found
+ end
+ end.
+
+conn_stats(ReqData) ->
+ rabbit_mgmt_db:get_connection(rabbit_mgmt_util:id(connection, ReqData),
+ rabbit_mgmt_util:range_ceil(ReqData)).
+
+force_close_connection(ReqData, Conn, Pid) ->
+ Reason = case cowboy_req:header(<<"x-reason">>, ReqData) of
+ undefined -> "Closed via management plugin";
+ V -> binary_to_list(V)
+ end,
+ case proplists:get_value(type, Conn) of
+ direct -> amqp_direct_connection:server_close(Pid, 320, Reason);
+ network -> rabbit_networking:close_connection(Pid, Reason);
+ _ ->
+ % best effort, this will work for connections to the stream plugin
+ gen_server:call(Pid, {shutdown, Reason}, infinity)
+ end,
+ ok.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl
new file mode 100644
index 0000000000..dfbcfb5ba1
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connection_channels).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case rabbit_mgmt_wm_connection:conn(ReqData) of
+ error -> {false, ReqData, Context};
+ _Conn -> {true, ReqData, Context}
+ end.
+
+to_json(ReqData, Context) ->
+ Name = proplists:get_value(name, rabbit_mgmt_wm_connection:conn(ReqData)),
+ Chs = rabbit_mgmt_db:get_all_channels(rabbit_mgmt_util:range(ReqData)),
+ rabbit_mgmt_util:reply_list(
+ [Ch || Ch <- rabbit_mgmt_util:filter_conn_ch_list(Chs, ReqData, Context),
+ conn_name(Ch) =:= Name],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ try
+ rabbit_mgmt_util:is_authorized_user(
+ ReqData, Context, rabbit_mgmt_wm_connection:conn(ReqData))
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+%%--------------------------------------------------------------------
+
+conn_name(Ch) ->
+ proplists:get_value(name, proplists:get_value(connection_details, Ch)).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl
new file mode 100644
index 0000000000..0e1345abca
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connections).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ augmented/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ Connections = do_connections_query(ReqData, Context),
+ rabbit_mgmt_util:reply_list_or_paginate(Connections, ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_conn_ch_list(
+ rabbit_mgmt_db:get_all_connections(
+ rabbit_mgmt_util:range_ceil(ReqData)), ReqData, Context).
+
+do_connections_query(ReqData, Context) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ augmented(ReqData, Context);
+ true ->
+ rabbit_mgmt_util:filter_tracked_conn_list(rabbit_connection_tracking:list(),
+ ReqData, Context)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl
new file mode 100644
index 0000000000..1840a7ae45
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl
@@ -0,0 +1,49 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_connections_vhost).
+
+%% Lists connections in a vhost
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ augmented/2, resource_exists/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {rabbit_vhost:exists(rabbit_mgmt_util:id(vhost, ReqData)), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ rabbit_mgmt_util:reply_list(augmented(ReqData, Context), ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_util:filter_conn_ch_list(
+ rabbit_mgmt_db:get_all_connections(
+ rabbit_mgmt_util:range_ceil(ReqData)), ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl
new file mode 100644
index 0000000000..56945d3a5d
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl
@@ -0,0 +1,59 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(rabbit_mgmt_wm_consumers).
+
+-export([init/2, to_json/2, content_types_provided/2, resource_exists/2,
+ is_authorized/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ none -> true; % none means `all`
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User}) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ Arg = case rabbit_mgmt_util:vhost(ReqData) of
+ none -> all;
+ VHost -> VHost
+ end,
+ Consumers = rabbit_mgmt_format:strip_pids(rabbit_mgmt_db:get_all_consumers(Arg)),
+ Formatted = [rabbit_mgmt_format:format_consumer_arguments(C) || C <- Consumers],
+ rabbit_mgmt_util:reply_list(
+ filter_user(Formatted, User), ReqData, Context);
+ true ->
+ rabbit_mgmt_util:bad_request(<<"Stats in management UI are disabled on this node">>, ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+filter_user(List, #user{username = Username, tags = Tags}) ->
+ case rabbit_mgmt_util:is_monitor(Tags) of
+ true -> List;
+ false -> [I || I <- List,
+ pget(user, pget(channel_details, I)) == Username]
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl
new file mode 100644
index 0000000000..c0687993a9
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl
@@ -0,0 +1,298 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_definitions).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([content_types_accepted/2, allowed_methods/2, accept_json/2]).
+-export([accept_multipart/2]).
+-export([variances/2]).
+
+-export([apply_defs/3, apply_defs/5]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{{<<"application">>, <<"json">>, '*'}, accept_json},
+ {{<<"multipart">>, <<"form-data">>, '*'}, accept_multipart}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ none ->
+ all_definitions(ReqData, Context);
+ not_found ->
+ rabbit_mgmt_util:bad_request(rabbit_data_coercion:to_binary("vhost_not_found"),
+ ReqData, Context);
+ VHost ->
+ vhost_definitions(ReqData, VHost, Context)
+ end.
+
+all_definitions(ReqData, Context) ->
+ Xs = [X || X <- rabbit_mgmt_wm_exchanges:basic(ReqData),
+ export_exchange(X)],
+ Qs = [Q || Q <- rabbit_mgmt_wm_queues:basic(ReqData),
+ export_queue(Q)],
+ QNames = [{pget(name, Q), pget(vhost, Q)} || Q <- Qs],
+ Bs = [B || B <- rabbit_mgmt_wm_bindings:basic(ReqData),
+ export_binding(B, QNames)],
+ Vsn = rabbit:base_product_version(),
+ ProductName = rabbit:product_name(),
+ ProductVersion = rabbit:product_version(),
+ rabbit_mgmt_util:reply(
+ [{rabbit_version, rabbit_data_coercion:to_binary(Vsn)},
+ {rabbitmq_version, rabbit_data_coercion:to_binary(Vsn)},
+ {product_name, rabbit_data_coercion:to_binary(ProductName)},
+ {product_version, rabbit_data_coercion:to_binary(ProductVersion)}] ++
+ filter(
+ [{users, rabbit_mgmt_wm_users:users(all)},
+ {vhosts, rabbit_mgmt_wm_vhosts:basic()},
+ {permissions, rabbit_mgmt_wm_permissions:permissions()},
+ {topic_permissions, rabbit_mgmt_wm_topic_permissions:topic_permissions()},
+ {parameters, rabbit_mgmt_wm_parameters:basic(ReqData)},
+ {global_parameters, rabbit_mgmt_wm_global_parameters:basic()},
+ {policies, rabbit_mgmt_wm_policies:basic(ReqData)},
+ {queues, Qs},
+ {exchanges, Xs},
+ {bindings, Bs}]),
+ case rabbit_mgmt_util:qs_val(<<"download">>, ReqData) of
+ undefined -> ReqData;
+ Filename -> rabbit_mgmt_util:set_resp_header(
+ <<"Content-Disposition">>,
+ "attachment; filename=" ++
+ binary_to_list(Filename), ReqData)
+ end,
+ Context).
+
+accept_json(ReqData0, Context) ->
+ {ok, Body, ReqData} = rabbit_mgmt_util:read_complete_body(ReqData0),
+ accept(Body, ReqData, Context).
+
+vhost_definitions(ReqData, VHost, Context) ->
+ %% rabbit_mgmt_wm_<>:basic/1 filters by VHost if it is available
+ Xs = [strip_vhost(X) || X <- rabbit_mgmt_wm_exchanges:basic(ReqData),
+ export_exchange(X)],
+ VQs = [Q || Q <- rabbit_mgmt_wm_queues:basic(ReqData), export_queue(Q)],
+ Qs = [strip_vhost(Q) || Q <- VQs],
+ QNames = [{pget(name, Q), pget(vhost, Q)} || Q <- VQs],
+ Bs = [strip_vhost(B) || B <- rabbit_mgmt_wm_bindings:basic(ReqData),
+ export_binding(B, QNames)],
+ {ok, Vsn} = application:get_key(rabbit, vsn),
+ Parameters = [rabbit_mgmt_format:parameter(
+ rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(P))
+ || P <- rabbit_runtime_parameters:list(VHost)],
+ rabbit_mgmt_util:reply(
+ [{rabbit_version, rabbit_data_coercion:to_binary(Vsn)}] ++
+ filter(
+ [{parameters, Parameters},
+ {policies, rabbit_mgmt_wm_policies:basic(ReqData)},
+ {queues, Qs},
+ {exchanges, Xs},
+ {bindings, Bs}]),
+ case rabbit_mgmt_util:qs_val(<<"download">>, ReqData) of
+ undefined -> ReqData;
+ Filename ->
+ HeaderVal = "attachment; filename=" ++ binary_to_list(Filename),
+ rabbit_mgmt_util:set_resp_header(<<"Content-Disposition">>, HeaderVal, ReqData)
+ end,
+ Context).
+
+accept_multipart(ReqData0, Context) ->
+ {Parts, ReqData} = get_all_parts(ReqData0),
+ Redirect = get_part(<<"redirect">>, Parts),
+ Payload = get_part(<<"file">>, Parts),
+ Resp = {Res, _, _} = accept(Payload, ReqData, Context),
+ case {Res, Redirect} of
+ {true, unknown} -> {true, ReqData, Context};
+ {true, _} -> {{true, Redirect}, ReqData, Context};
+ _ -> Resp
+ end.
+
+is_authorized(ReqData, Context) ->
+ case rabbit_mgmt_util:qs_val(<<"auth">>, ReqData) of
+ undefined ->
+ case rabbit_mgmt_util:qs_val(<<"token">>, ReqData) of
+ undefined ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context);
+ Token ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context, Token)
+ end;
+ Auth ->
+ is_authorized_qs(ReqData, Context, Auth)
+ end.
+
+%% Support for the web UI - it can't add a normal "authorization"
+%% header for a file download.
+is_authorized_qs(ReqData, Context, Auth) ->
+ case rabbit_web_dispatch_util:parse_auth_header("Basic " ++ Auth) of
+ [Username, Password] -> rabbit_mgmt_util:is_authorized_admin(
+ ReqData, Context, Username, Password);
+ _ -> {?AUTH_REALM, ReqData, Context}
+ end.
+
+%%--------------------------------------------------------------------
+
+decode(<<"">>) ->
+ {ok, #{}};
+decode(Body) ->
+ try
+ Decoded = rabbit_json:decode(Body),
+ Normalised = maps:fold(fun(K, V, Acc) ->
+ Acc#{binary_to_atom(K, utf8) => V}
+ end, Decoded, Decoded),
+ {ok, Normalised}
+ catch error:_ -> {error, not_json}
+ end.
+
+accept(Body, ReqData, Context = #context{user = #user{username = Username}}) ->
+ %% At this point the request was fully received.
+ %% There is no point in the idle_timeout anymore.
+ disable_idle_timeout(ReqData),
+ case decode(Body) of
+ {error, E} ->
+ rabbit_log:error("Encountered an error when parsing definitions: ~p", [E]),
+ rabbit_mgmt_util:bad_request(rabbit_data_coercion:to_binary("failed_to_parse_json"),
+ ReqData, Context);
+ {ok, Map} ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ none ->
+ case apply_defs(Map, Username) of
+ {error, E} ->
+ rabbit_log:error("Encountered an error when importing definitions: ~p", [E]),
+ rabbit_mgmt_util:bad_request(E, ReqData, Context);
+ ok -> {true, ReqData, Context}
+ end;
+ not_found ->
+ rabbit_mgmt_util:not_found(rabbit_data_coercion:to_binary("vhost_not_found"),
+ ReqData, Context);
+ VHost when is_binary(VHost) ->
+ case apply_defs(Map, Username, VHost) of
+ {error, E} ->
+ rabbit_log:error("Encountered an error when importing definitions: ~p", [E]),
+ rabbit_mgmt_util:bad_request(E, ReqData, Context);
+ ok -> {true, ReqData, Context}
+ end
+ end
+ end.
+
+disable_idle_timeout(#{pid := Pid, streamid := StreamID}) ->
+ Pid ! {{Pid, StreamID}, {set_options, #{idle_timeout => infinity}}}.
+
+-spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username()) -> 'ok' | {error, term()}.
+
+apply_defs(Body, ActingUser) ->
+ rabbit_definitions:apply_defs(Body, ActingUser).
+
+-spec apply_defs(Map :: #{atom() => any()}, ActingUser :: rabbit_types:username(),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Body, ActingUser, VHost) ->
+ rabbit_definitions:apply_defs(Body, ActingUser, VHost).
+
+-spec apply_defs(Map :: #{atom() => any()},
+ ActingUser :: rabbit_types:username(),
+ SuccessFun :: fun(() -> 'ok'),
+ ErrorFun :: fun((any()) -> 'ok'),
+ VHost :: vhost:name()) -> 'ok' | {error, term()}.
+
+apply_defs(Body, ActingUser, SuccessFun, ErrorFun, VHost) ->
+ rabbit_definitions:apply_defs(Body, ActingUser, SuccessFun, ErrorFun, VHost).
+
+get_all_parts(Req) ->
+ get_all_parts(Req, []).
+
+get_all_parts(Req0, Acc) ->
+ case cowboy_req:read_part(Req0) of
+ {done, Req1} ->
+ {Acc, Req1};
+ {ok, Headers, Req1} ->
+ Name = case cow_multipart:form_data(Headers) of
+ {data, N} -> N;
+ {file, N, _, _} -> N
+ end,
+ {ok, Body, Req2} = stream_part_body(Req1, <<>>),
+ get_all_parts(Req2, [{Name, Body}|Acc])
+ end.
+
+stream_part_body(Req0, Acc) ->
+ case cowboy_req:read_part_body(Req0) of
+ {more, Data, Req1} ->
+ stream_part_body(Req1, <<Acc/binary, Data/binary>>);
+ {ok, Data, Req1} ->
+ {ok, <<Acc/binary, Data/binary>>, Req1}
+ end.
+
+get_part(Name, Parts) ->
+ case lists:keyfind(Name, 1, Parts) of
+ false -> unknown;
+ {_, Value} -> Value
+ end.
+
+export_queue(Queue) ->
+ pget(owner_pid, Queue) == none.
+
+export_binding(Binding, Qs) ->
+ Src = pget(source, Binding),
+ Dest = pget(destination, Binding),
+ DestType = pget(destination_type, Binding),
+ VHost = pget(vhost, Binding),
+ Src =/= <<"">>
+ andalso
+ ( (DestType =:= queue andalso lists:member({Dest, VHost}, Qs))
+ orelse (DestType =:= exchange andalso Dest =/= <<"">>) ).
+
+export_exchange(Exchange) ->
+ export_name(pget(name, Exchange)).
+
+export_name(<<>>) -> false;
+export_name(<<"amq.", _/binary>>) -> false;
+export_name(_Name) -> true.
+
+%%--------------------------------------------------------------------
+
+rw_state() ->
+ [{users, [name, password_hash, hashing_algorithm, tags, limits]},
+ {vhosts, [name]},
+ {permissions, [user, vhost, configure, write, read]},
+ {topic_permissions, [user, vhost, exchange, write, read]},
+ {parameters, [vhost, component, name, value]},
+ {global_parameters, [name, value]},
+ {policies, [vhost, name, pattern, definition, priority, 'apply-to']},
+ {queues, [name, vhost, durable, auto_delete, arguments]},
+ {exchanges, [name, vhost, type, durable, auto_delete, internal,
+ arguments]},
+ {bindings, [source, vhost, destination, destination_type, routing_key,
+ arguments]}].
+
+filter(Items) ->
+ [filter_items(N, V, proplists:get_value(N, rw_state())) || {N, V} <- Items].
+
+filter_items(Name, List, Allowed) ->
+ {Name, [filter_item(I, Allowed) || I <- List]}.
+
+filter_item(Item, Allowed) ->
+ [{K, Fact} || {K, Fact} <- Item, lists:member(K, Allowed)].
+
+strip_vhost(Item) ->
+ lists:keydelete(vhost, 1, Item).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl
new file mode 100644
index 0000000000..2d5930d1d0
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_exchange).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, exchange/1, exchange/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case exchange(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ [X] = rabbit_mgmt_db:augment_exchanges(
+ [exchange(ReqData)], rabbit_mgmt_util:range(ReqData), full),
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:strip_pids(X), ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+accept_content(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(exchange, ReqData),
+ rabbit_mgmt_util:direct_request(
+ 'exchange.declare',
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{exchange, Name}], "Declare exchange error: ~s", ReqData, Context).
+
+delete_resource(ReqData, Context) ->
+ %% We need to retrieve manually if-unused, as the HTTP API uses '-'
+ %% while the record uses '_'
+ Name = id(ReqData),
+ IfUnused = <<"true">> =:= rabbit_mgmt_util:qs_val(<<"if-unused">>, ReqData),
+ rabbit_mgmt_util:direct_request(
+ 'exchange.delete',
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{exchange, Name},
+ {if_unused, IfUnused}], "Delete exchange error: ~s", ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+exchange(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> exchange(VHost, id(ReqData))
+ end.
+
+exchange(VHost, XName) ->
+ Name = rabbit_misc:r(VHost, exchange, XName),
+ case rabbit_exchange:lookup(Name) of
+ {ok, X} -> rabbit_mgmt_format:exchange(
+ rabbit_exchange:info(X));
+ {error, not_found} -> not_found
+ end.
+
+id(ReqData) ->
+ rabbit_mgmt_util:id(exchange, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl
new file mode 100644
index 0000000000..7e90b54ba1
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_exchange_publish).
+
+-export([init/2, resource_exists/2, is_authorized/2,
+ allowed_methods/2, content_types_provided/2, accept_content/2,
+ content_types_accepted/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_exchange:exchange(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_it(ReqData, Context)).
+
+do_it(ReqData0, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData0),
+ X = rabbit_mgmt_util:id(exchange, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [routing_key, properties, payload, payload_encoding], ReqData0, Context,
+ fun ([RoutingKey, Props0, Payload0, Enc], _, ReqData) when is_binary(Payload0) ->
+ rabbit_mgmt_util:with_channel(
+ VHost, ReqData, Context,
+ fun (Ch) ->
+ MRef = erlang:monitor(process, Ch),
+ amqp_channel:register_confirm_handler(Ch, self()),
+ amqp_channel:register_return_handler(Ch, self()),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ Props = rabbit_mgmt_format:to_basic_properties(Props0),
+ Payload = decode(Payload0, Enc),
+ amqp_channel:cast(Ch, #'basic.publish'{
+ exchange = X,
+ routing_key = RoutingKey,
+ mandatory = true},
+ #amqp_msg{props = Props,
+ payload = Payload}),
+ receive
+ {#'basic.return'{}, _} ->
+ receive
+ #'basic.ack'{} -> ok
+ end,
+ good(MRef, false, ReqData, Context);
+ #'basic.ack'{} ->
+ good(MRef, true, ReqData, Context);
+ #'basic.nack'{} ->
+ erlang:demonitor(MRef),
+ bad(rejected, ReqData, Context);
+ {'DOWN', _, _, _, Err} ->
+ bad(Err, ReqData, Context)
+ end
+ end);
+ ([_RoutingKey, _Props, _Payload, _Enc], _, _ReqData) ->
+ throw({error, payload_not_string})
+ end).
+
+good(MRef, Routed, ReqData, Context) ->
+ erlang:demonitor(MRef),
+ rabbit_mgmt_util:reply([{routed, Routed}], ReqData, Context).
+
+bad({shutdown, {connection_closing,
+ {server_initiated_close, Code, Reason}}}, ReqData, Context) ->
+ rabbit_mgmt_util:bad_request_exception(Code, Reason, ReqData, Context);
+
+bad({shutdown, {server_initiated_close, Code, Reason}}, ReqData, Context) ->
+ rabbit_mgmt_util:bad_request_exception(Code, Reason, ReqData, Context);
+bad(rejected, ReqData, Context) ->
+ Msg = "Unable to publish message. Check queue limits.",
+ rabbit_mgmt_util:bad_request_exception(rejected, Msg, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+decode(Payload, <<"string">>) -> Payload;
+decode(Payload, <<"base64">>) -> rabbit_mgmt_util:b64decode_or_throw(Payload);
+decode(_Payload, Enc) -> throw({error, {unsupported_encoding, Enc}}).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl
new file mode 100644
index 0000000000..9f7abab20f
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_exchanges).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1, augmented/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(DEFAULT_SORT, ["vhost", "name"]).
+
+-define(BASIC_COLUMNS, ["vhost", "name", "type", "durable", "auto_delete",
+ "internal", "arguments", "pid"]).
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case exchanges0(ReqData) of
+ vhost_not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ Basic = rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData,
+ Context),
+ Data = rabbit_mgmt_util:augment_resources(Basic, ?DEFAULT_SORT,
+ ?BASIC_COLUMNS, ReqData,
+ Context, fun augment/2),
+ rabbit_mgmt_util:reply(Data, ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+augment(Basic, ReqData) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_db:augment_exchanges(Basic, rabbit_mgmt_util:range(ReqData),
+ basic);
+ true ->
+ Basic
+ end.
+
+augmented(ReqData, Context) ->
+ rabbit_mgmt_db:augment_exchanges(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ rabbit_mgmt_util:range(ReqData), basic).
+
+basic(ReqData) ->
+ [rabbit_mgmt_format:exchange(X) || X <- exchanges0(ReqData)].
+
+exchanges0(ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_exchange:info_all/1).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl
new file mode 100644
index 0000000000..26c3ccbee6
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl
@@ -0,0 +1,33 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_extensions).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Modules = rabbit_mgmt_dispatcher:modules([]),
+ rabbit_mgmt_util:reply(
+ [Module:web_ui() || Module <- Modules], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl
new file mode 100644
index 0000000000..fafb993951
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flag_enable.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_feature_flag_enable).
+
+-export([init/2,
+ content_types_accepted/2, is_authorized/2,
+ allowed_methods/2, accept_content/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _Args) ->
+ {cowboy_rest,
+ rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE),
+ #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{{<<"application">>, <<"json">>, '*'}, accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"PUT">>, <<"OPTIONS">>], ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+accept_content(ReqData, #context{} = Context) ->
+ NameS = rabbit_mgmt_util:id(name, ReqData),
+ try
+ Name = list_to_existing_atom(binary_to_list(NameS)),
+ case rabbit_feature_flags:enable(Name) of
+ ok ->
+ {true, ReqData, Context};
+ {error, Reason1} ->
+ FormattedReason1 = rabbit_ff_extra:format_error(Reason1),
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(FormattedReason1), ReqData, Context)
+ end
+ catch
+ _:badarg ->
+ Reason2 = unsupported,
+ FormattedReason2 = rabbit_ff_extra:format_error(Reason2),
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(FormattedReason2), ReqData, Context)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl
new file mode 100644
index 0000000000..3f25045a36
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_feature_flags.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_feature_flags).
+
+-export([init/2, to_json/2,
+ content_types_provided/2,
+ is_authorized/2, allowed_methods/2]).
+-export([variances/2]).
+-export([feature_flags/0]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _Args) ->
+ {cowboy_rest,
+ rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE),
+ #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"OPTIONS">>], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(feature_flags(), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ {Res, Req2, Context2} = rabbit_mgmt_util:is_authorized_admin(ReqData, Context),
+ {Res, Req2, Context2}.
+
+%%--------------------------------------------------------------------
+
+feature_flags() ->
+ rabbit_ff_extra:cli_info().
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl
new file mode 100644
index 0000000000..a81c5be8fa
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameter.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_global_parameter).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case parameter(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(parameter(ReqData)),
+ ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ rabbit_mgmt_util:with_decode(
+ [value], ReqData0, Context,
+ fun([Value], _, ReqData) ->
+ Val = if is_map(Value) -> maps:to_list(Value);
+ true -> Value
+ end,
+ rabbit_runtime_parameters:set_global(name(ReqData), Val, Username),
+ {true, ReqData, Context}
+ end).
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ ok = rabbit_runtime_parameters:clear_global(name(ReqData), Username),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_global_parameters(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+parameter(ReqData) ->
+ rabbit_runtime_parameters:lookup_global(name(ReqData)).
+
+name(ReqData) -> rabbit_data_coercion:to_atom(rabbit_mgmt_util:id(name, ReqData)).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl
new file mode 100644
index 0000000000..fa23283ad5
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_global_parameters.erl
@@ -0,0 +1,36 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_global_parameters).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([variances/2]).
+-export([basic/0]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(basic(), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_global_parameters(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+basic() ->
+ rabbit_runtime_parameters:list_global().
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl
new file mode 100644
index 0000000000..06db3bd79e
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_alarms.erl
@@ -0,0 +1,56 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_alarms'
+-module(rabbit_mgmt_wm_health_check_alarms).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Timeout = case cowboy_req:header(<<"timeout">>, ReqData) of
+ undefined -> 70000;
+ Val -> list_to_integer(binary_to_list(Val))
+ end,
+ case rabbit_alarm:get_alarms(Timeout) of
+ [] ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ Xs when length(Xs) > 0 ->
+ Msg = "There are alarms in effect in the cluster",
+ failure(Msg, Xs, ReqData, Context)
+ end.
+
+failure(Message, Alarms0, ReqData, Context) ->
+ Alarms = rabbit_alarm:format_as_maps(Alarms0),
+ Body = #{
+ status => failed,
+ reason => rabbit_data_coercion:to_binary(Message),
+ alarms => Alarms
+ },
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply(Body, ReqData, Context),
+ {stop, cowboy_req:reply(?HEALTH_CHECK_FAILURE_STATUS, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl
new file mode 100644
index 0000000000..f0fd466b2a
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_certificate_expiration.erl
@@ -0,0 +1,178 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_certificate_expiration'
+-module(rabbit_mgmt_wm_health_check_certificate_expiration).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("public_key/include/public_key.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(DAYS_SECONDS, 86400).
+-define(WEEKS_SECONDS, ?DAYS_SECONDS * 7).
+-define(MONTHS_SECONDS, ?DAYS_SECONDS * (365 / 12)).
+-define(YEARS_SECONDS, ?DAYS_SECONDS * 365).
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Listeners = rabbit_networking:active_listeners(),
+ Local = [L || #listener{node = N} = L <- Listeners, N == node()],
+ case convert(within(ReqData), unit(ReqData)) of
+ {error, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context);
+ Seconds ->
+ ExpiringListeners = lists:foldl(fun(L, Acc) ->
+ case listener_expiring_within(L, Seconds) of
+ false -> Acc;
+ Map -> [Map | Acc]
+ end
+ end, [], Local),
+ case ExpiringListeners of
+ [] ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ _ ->
+ Msg = <<"Certificates expiring">>,
+ failure(Msg, ExpiringListeners, ReqData, Context)
+ end
+ end.
+
+failure(Message, Listeners, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message},
+ {expired, Listeners}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+within(ReqData) ->
+ rabbit_mgmt_util:id(within, ReqData).
+
+unit(ReqData) ->
+ rabbit_mgmt_util:id(unit, ReqData).
+
+convert(Time, Unit) ->
+ try
+ do_convert(binary_to_integer(Time), string:lowercase(binary_to_list(Unit)))
+ catch
+ error:badarg ->
+ {error, "Invalid expiration value."};
+ invalid_unit ->
+ {error, "Time unit not recognised. Use: days, seconds, months, years."}
+ end.
+
+do_convert(Time, "days") ->
+ Time * ?DAYS_SECONDS;
+do_convert(Time, "weeks") ->
+ Time * ?WEEKS_SECONDS;
+do_convert(Time, "months") ->
+ Time * ?MONTHS_SECONDS;
+do_convert(Time, "years") ->
+ Time * ?YEARS_SECONDS;
+do_convert(_, _) ->
+ throw(invalid_unit).
+
+listener_expiring_within(#listener{node = Node, protocol = Protocol, ip_address = Interface,
+ port = Port, opts = Opts}, Seconds) ->
+ Certfile = proplists:get_value(certfile, Opts),
+ Cacertfile = proplists:get_value(cacertfile, Opts),
+ Now = calendar:datetime_to_gregorian_seconds(calendar:universal_time()),
+ ExpiryDate = Now + Seconds,
+ CertfileExpiresOn = expired(cert_validity(read_cert(Certfile)), ExpiryDate),
+ CacertfileExpiresOn = expired(cert_validity(read_cert(Cacertfile)), ExpiryDate),
+ case {CertfileExpiresOn, CacertfileExpiresOn} of
+ {[], []} ->
+ false;
+ _ ->
+ #{node => Node,
+ protocol => Protocol,
+ interface => list_to_binary(inet:ntoa(Interface)),
+ port => Port,
+ certfile => list_to_binary(Certfile),
+ cacertfile => list_to_binary(Cacertfile),
+ certfile_expires_on => expires_on_list(CertfileExpiresOn),
+ cacertfile_expires_on => expires_on_list(CacertfileExpiresOn)
+ }
+ end.
+
+expires_on_list({error, Reason}) ->
+ {error, list_to_binary(Reason)};
+expires_on_list(ExpiresOn) ->
+ [seconds_to_bin(S) || S <- ExpiresOn].
+
+read_cert(undefined) ->
+ undefined;
+read_cert({pem, Pem}) ->
+ Pem;
+read_cert(Path) ->
+ case file:read_file(Path) of
+ {ok, Bin} ->
+ Bin;
+ Err ->
+ Err
+ end.
+
+cert_validity(undefined) ->
+ undefined;
+cert_validity(Cert) ->
+ DsaEntries = public_key:pem_decode(Cert),
+ case DsaEntries of
+ [] ->
+ {error, "The certificate file provided does not contain any PEM entry."};
+ _ ->
+ Now = calendar:datetime_to_gregorian_seconds(calendar:universal_time()),
+ lists:map(
+ fun({'Certificate', _, _} = DsaEntry) ->
+ #'Certificate'{tbsCertificate = TBSCertificate} = public_key:pem_entry_decode(DsaEntry),
+ #'TBSCertificate'{validity = Validity} = TBSCertificate,
+ #'Validity'{notAfter = NotAfter, notBefore = NotBefore} = Validity,
+ Start = pubkey_cert:time_str_2_gregorian_sec(NotBefore),
+ case Start > Now of
+ true ->
+ {error, "Certificate is not yet valid"};
+ false ->
+ pubkey_cert:time_str_2_gregorian_sec(NotAfter)
+ end;
+ ({Type, _, _}) ->
+ {error, io_lib:format("The certificate file provided contains a ~p entry",
+ [Type])}
+ end, DsaEntries)
+ end.
+
+expired(undefined, _ExpiryDate) ->
+ [];
+expired({error, _} = Error, _ExpiryDate) ->
+ Error;
+expired(Expires, ExpiryDate) ->
+ lists:filter(fun({error, _}) ->
+ true;
+ (Seconds) ->
+ Seconds < ExpiryDate
+ end, Expires).
+
+seconds_to_bin(Seconds) ->
+ {{Y, M, D}, {H, Min, S}} = calendar:gregorian_seconds_to_datetime(Seconds),
+ list_to_binary(lists:flatten(io_lib:format("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w",
+ [Y, M, D, H, Min, S]))).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl
new file mode 100644
index 0000000000..4553efa3e2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_local_alarms.erl
@@ -0,0 +1,56 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-dignoastics check_local_alarms'
+-module(rabbit_mgmt_wm_health_check_local_alarms).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Timeout = case cowboy_req:header(<<"timeout">>, ReqData) of
+ undefined -> 70000;
+ Val -> list_to_integer(binary_to_list(Val))
+ end,
+ case rabbit_alarm:get_local_alarms(Timeout) of
+ [] ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ Xs when length(Xs) > 0 ->
+ Msg = "There are alarms in effect on the node",
+ failure(Msg, Xs, ReqData, Context)
+ end.
+
+failure(Message, Alarms0, ReqData, Context) ->
+ Alarms = rabbit_alarm:format_as_maps(Alarms0),
+ Body = #{
+ status => failed,
+ reason => rabbit_data_coercion:to_binary(Message),
+ alarms => Alarms
+ },
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply(Body, ReqData, Context),
+ {stop, cowboy_req:reply(?HEALTH_CHECK_FAILURE_STATUS, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl
new file mode 100644
index 0000000000..cef5512551
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_if_node_is_quorum_critical'
+-module(rabbit_mgmt_wm_health_check_node_is_mirror_sync_critical).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ case rabbit_nodes:is_single_node_cluster() of
+ true ->
+ rabbit_mgmt_util:reply([{status, ok},
+ {reason, <<"single node cluster">>}], ReqData, Context);
+ false ->
+ case rabbit_amqqueue:list_local_mirrored_classic_without_synchronised_mirrors_for_cli() of
+ [] ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ Qs when length(Qs) > 0 ->
+ Msg = <<"There are classic mirrored queues without online synchronised mirrors">>,
+ failure(Msg, Qs, ReqData, Context)
+ end
+ end.
+
+failure(Message, Qs, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message},
+ {queues, Qs}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl
new file mode 100644
index 0000000000..857c72b30b
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_node_is_quorum_critical.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_if_node_is_quorum_critical'
+-module(rabbit_mgmt_wm_health_check_node_is_quorum_critical).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ case rabbit_nodes:is_single_node_cluster() of
+ true ->
+ rabbit_mgmt_util:reply([{status, ok},
+ {reason, <<"single node cluster">>}], ReqData, Context);
+ false ->
+ case rabbit_quorum_queue:list_with_minimum_quorum_for_cli() of
+ [] ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ Qs when length(Qs) > 0 ->
+ Msg = <<"There are quorum queues that would lose their quorum if the target node is shut down">>,
+ failure(Msg, Qs, ReqData, Context)
+ end
+ end.
+
+failure(Message, Qs, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message},
+ {queues, Qs}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl
new file mode 100644
index 0000000000..1fcc00f613
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_port_listener.erl
@@ -0,0 +1,66 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_port_listener'
+-module(rabbit_mgmt_wm_health_check_port_listener).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case port(ReqData) of
+ none -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ Port = binary_to_integer(port(ReqData)),
+ Listeners = rabbit_networking:active_listeners(),
+ Local = [L || #listener{node = N} = L <- Listeners, N == node()],
+ PortListeners = [L || #listener{port = P} = L <- Local, P == Port],
+ case PortListeners of
+ [] ->
+ Msg = <<"No active listener">>,
+ failure(Msg, Port, [P || #listener{port = P} <- Local], ReqData, Context);
+ _ ->
+ rabbit_mgmt_util:reply([{status, ok},
+ {port, Port}], ReqData, Context)
+ end
+ catch
+ error:badarg ->
+ rabbit_mgmt_util:bad_request(<<"Invalid port">>, ReqData, Context)
+ end.
+
+failure(Message, Missing, Ports, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message},
+ {missing, Missing},
+ {ports, Ports}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+port(ReqData) ->
+ rabbit_mgmt_util:id(port, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl
new file mode 100644
index 0000000000..a0b7e4e6dd
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_protocol_listener.erl
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_protocol_listener'
+-module(rabbit_mgmt_wm_health_check_protocol_listener).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case protocol(ReqData) of
+ none -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Protocol = normalize_protocol(protocol(ReqData)),
+ Listeners = rabbit_networking:active_listeners(),
+ Local = [L || #listener{node = N} = L <- Listeners, N == node()],
+ ProtoListeners = [L || #listener{protocol = P} = L <- Local, atom_to_list(P) == Protocol],
+ case ProtoListeners of
+ [] ->
+ Msg = <<"No active listener">>,
+ failure(Msg, Protocol, [P || #listener{protocol = P} <- Local], ReqData, Context);
+ _ ->
+ rabbit_mgmt_util:reply([{status, ok},
+ {protocol, list_to_binary(Protocol)}], ReqData, Context)
+ end.
+
+failure(Message, Missing, Protocols, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message},
+ {missing, list_to_binary(Missing)},
+ {protocols, Protocols}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+protocol(ReqData) ->
+ rabbit_mgmt_util:id(protocol, ReqData).
+
+normalize_protocol(Protocol) ->
+ case string:lowercase(binary_to_list(Protocol)) of
+ "amqp091" -> "amqp";
+ "amqp0.9.1" -> "amqp";
+ "amqp0-9-1" -> "amqp";
+ "amqp0_9_1" -> "amqp";
+ "amqp10" -> "amqp";
+ "amqp1.0" -> "amqp";
+ "amqp1-0" -> "amqp";
+ "amqp1_0" -> "amqp";
+ "amqps" -> "amqp/ssl";
+ "mqtt3.1" -> "mqtt";
+ "mqtt3.1.1" -> "mqtt";
+ "mqtt31" -> "mqtt";
+ "mqtt311" -> "mqtt";
+ "mqtt3_1" -> "mqtt";
+ "mqtt3_1_1" -> "mqtt";
+ "mqtts" -> "mqtt/ssl";
+ "mqtt+tls" -> "mqtt/ssl";
+ "mqtt+ssl" -> "mqtt/ssl";
+ "stomp1.0" -> "stomp";
+ "stomp1.1" -> "stomp";
+ "stomp1.2" -> "stomp";
+ "stomp10" -> "stomp";
+ "stomp11" -> "stomp";
+ "stomp12" -> "stomp";
+ "stomp1_0" -> "stomp";
+ "stomp1_1" -> "stomp";
+ "stomp1_2" -> "stomp";
+ "stomps" -> "stomp/ssl";
+ "stomp+tls" -> "stomp/ssl";
+ "stomp+ssl" -> "stomp/ssl";
+ "https" -> "https";
+ "http1" -> "http";
+ "http1.1" -> "http";
+ "http_api" -> "http";
+ "management" -> "http";
+ "management_ui" -> "http";
+ "ui" -> "http";
+ "cli" -> "clustering";
+ "distribution" -> "clustering";
+ "webmqtt" -> "http/web-mqtt";
+ "web-mqtt" -> "http/web-mqtt";
+ "web_mqtt" -> "http/web-mqtt";
+ "webmqtt/tls" -> "https/web-mqtt";
+ "web-mqtt/tls" -> "https/web-mqtt";
+ "webmqtt/ssl" -> "https/web-mqtt";
+ "web-mqtt/ssl" -> "https/web-mqtt";
+ "webmqtt+tls" -> "https/web-mqtt";
+ "web-mqtt+tls" -> "https/web-mqtt";
+ "webmqtt+ssl" -> "https/web-mqtt";
+ "web-mqtt+ssl" -> "https/web-mqtt";
+ "webstomp" -> "http/web-stomp";
+ "web-stomp" -> "http/web-stomp";
+ "web_stomp" -> "http/web-stomp";
+ "webstomp/tls" -> "https/web-stomp";
+ "web-stomp/tls" -> "https/web-stomp";
+ "webstomp/ssl" -> "https/web-stomp";
+ "web-stomp/ssl" -> "https/web-stomp";
+ "webstomp+tls" -> "https/web-stomp";
+ "web-stomp+tls" -> "https/web-stomp";
+ "webstomp+ssl" -> "https/web-stomp";
+ "web-stomp+ssl" -> "https/web-stomp";
+ Any -> Any
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl
new file mode 100644
index 0000000000..0bc9680adc
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_health_check_virtual_hosts.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% An HTTP API counterpart of 'rabbitmq-diagnostics check_virtual_hosts'
+-module(rabbit_mgmt_wm_health_check_virtual_hosts).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ case rabbit_vhost_sup_sup:check() of
+ [] ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ Vs when length(Vs) > 0 ->
+ Msg = <<"Some virtual hosts are down">>,
+ failure(Msg, Vs, ReqData, Context)
+ end.
+
+failure(Message, Vs, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message},
+ {'virtual-hosts', Vs}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl
new file mode 100644
index 0000000000..4fc61bd0a7
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl
@@ -0,0 +1,76 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This original One True Health Check™ has been deprecated as too coarse-grained,
+%% intrusive and prone to false positives under load.
+-module(rabbit_mgmt_wm_healthchecks).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case node0(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Node = node0(ReqData),
+ Timeout = case cowboy_req:header(<<"timeout">>, ReqData) of
+ undefined -> 70000;
+ Val -> list_to_integer(binary_to_list(Val))
+ end,
+ case rabbit_health_check:node(Node, Timeout) of
+ ok ->
+ rabbit_mgmt_util:reply([{status, ok}], ReqData, Context);
+ {badrpc, timeout} ->
+ ErrMsg = rabbit_mgmt_format:print("node ~p health check timed out", [Node]),
+ failure(ErrMsg, ReqData, Context);
+ {badrpc, Err} ->
+ failure(rabbit_mgmt_format:print("~p", Err), ReqData, Context);
+ {error_string, Err} ->
+ S = rabbit_mgmt_format:escape_html_tags(
+ rabbit_data_coercion:to_list(rabbit_mgmt_format:print(Err))),
+ failure(S, ReqData, Context)
+ end.
+
+failure(Message, ReqData, Context) ->
+ {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed},
+ {reason, Message}],
+ ReqData, Context),
+ {stop, cowboy_req:reply(?HEALTH_CHECK_FAILURE_STATUS, #{}, Response, ReqData1), Context1}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+node0(ReqData) ->
+ Node = case rabbit_mgmt_util:id(node, ReqData) of
+ none ->
+ node();
+ Node0 ->
+ list_to_atom(binary_to_list(Node0))
+ end,
+ case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+ proplists:get_value(name, N) == Node] of
+ [] -> not_found;
+ [_] -> Node
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl
new file mode 100644
index 0000000000..5f0e27c4ba
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limit.erl
@@ -0,0 +1,64 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_limit).
+
+-export([init/2,
+ content_types_accepted/2, is_authorized/2,
+ allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{{<<"application">>, <<"json">>, '*'}, accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ case rabbit_mgmt_util:vhost(ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:not_found(vhost_not_found, ReqData0, Context);
+ VHost ->
+ rabbit_mgmt_util:with_decode(
+ [value], ReqData0, Context,
+ fun([Value], _Body, ReqData) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ case rabbit_vhost_limit:update_limit(VHost, Name, Value,
+ Username) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(Reason), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ ok = rabbit_vhost_limit:clear_limit(rabbit_mgmt_util:vhost(ReqData),
+ name(ReqData), Username),
+ {true, ReqData, Context}.
+
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl
new file mode 100644
index 0000000000..96fbf64100
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl
@@ -0,0 +1,59 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_limits).
+
+-export([init/2, to_json/2, content_types_provided/2,
+ resource_exists/2, is_authorized/2, allowed_methods/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"GET">>, <<"OPTIONS">>], ReqData, Context}.
+
+%% Admin user can see all vhosts
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost_visible(ReqData, Context).
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(limits(ReqData, Context), [], ReqData, Context).
+
+limits(ReqData, Context) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ none ->
+ User = Context#context.user,
+ VisibleVhosts = rabbit_mgmt_util:list_visible_vhosts_names(User),
+ [ [{vhost, VHost}, {value, Value}]
+ || {VHost, Value} <- rabbit_vhost_limit:list(),
+ lists:member(VHost, VisibleVhosts) ];
+ VHost when is_binary(VHost) ->
+ case rabbit_vhost_limit:list(VHost) of
+ [] -> [];
+ Value -> [[{vhost, VHost}, {value, Value}]]
+ end
+ end.
+%%--------------------------------------------------------------------
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl
new file mode 100644
index 0000000000..fec7c99643
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_login.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_login).
+
+-export([init/2, is_authorized/2,
+ allowed_methods/2, accept_content/2, content_types_provided/2,
+ content_types_accepted/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{{<<"application">>, <<"x-www-form-urlencoded">>, '*'}, accept_content}], ReqData, Context}.
+
+is_authorized(#{method := <<"OPTIONS">>} = ReqData, Context) ->
+ {true, ReqData, Context};
+is_authorized(ReqData0, Context) ->
+ {ok, Body, ReqData} = cowboy_req:read_urlencoded_body(ReqData0),
+ Username = proplists:get_value(<<"username">>, Body),
+ Password = proplists:get_value(<<"password">>, Body),
+ case rabbit_mgmt_util:is_authorized_user(ReqData, Context, Username, Password) of
+ {true, ReqData1, Context1} ->
+ Value = base64:encode(<<Username/binary,":",Password/binary>>),
+ {true, cowboy_req:set_resp_cookie(<<"auth">>, Value, ReqData1), Context1};
+ Other ->
+ Other
+ end.
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_login(ReqData, Context)).
+
+do_login(ReqData, Context) ->
+ rabbit_mgmt_util:reply(ok, ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl
new file mode 100644
index 0000000000..52c1d4d9bb
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl
@@ -0,0 +1,78 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_node).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case node0(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(node0(ReqData), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+node0(ReqData) ->
+ Node = list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))),
+ [Data] = node_data(Node, ReqData),
+ augment(ReqData, Node, Data).
+
+augment(ReqData, Node, Data) ->
+ lists:foldl(fun (Key, DataN) -> augment(Key, ReqData, Node, DataN) end,
+ Data, [memory, binary]).
+
+augment(Key, ReqData, Node, Data) ->
+ case rabbit_mgmt_util:qs_val(list_to_binary(atom_to_list(Key)), ReqData) of
+ <<"true">> -> Res = case rpc:call(Node, rabbit_vm, Key, [], infinity) of
+ {badrpc, _} -> not_available;
+ Result -> Result
+ end,
+ [{Key, Res} | Data];
+ _ -> Data
+ end.
+
+node_data(Node, ReqData) ->
+ S = rabbit_mnesia:status(),
+ Nodes = proplists:get_value(nodes, S),
+ Running = proplists:get_value(running_nodes, S),
+ Type = find_type(Node, Nodes),
+ Basic = [[{name, Node}, {running, lists:member(Node, Running)}, {type, Type}]],
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_db:augment_nodes(Basic, rabbit_mgmt_util:range_ceil(ReqData));
+ true ->
+ Basic
+ end.
+
+find_type(Node, [{Type, Nodes} | Rest]) ->
+ case lists:member(Node, Nodes) of
+ true -> Type;
+ false -> find_type(Node, Rest)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl
new file mode 100644
index 0000000000..19e4237b44
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_node_memory).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, [Mode]) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), {Mode, #context{}}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {node_exists(ReqData, get_node(ReqData)), ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+ rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, {Mode, Context}).
+
+is_authorized(ReqData, {Mode, Context}) ->
+ {Res, RD, C} = rabbit_mgmt_util:is_authorized_monitor(ReqData, Context),
+ {Res, RD, {Mode, C}}.
+
+%%--------------------------------------------------------------------
+get_node(ReqData) ->
+ list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))).
+
+node_exists(ReqData, Node) ->
+ case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+ proplists:get_value(name, N) == Node] of
+ [] -> false;
+ [_] -> true
+ end.
+
+augment(Mode, ReqData) ->
+ Node = get_node(ReqData),
+ case node_exists(ReqData, Node) of
+ false ->
+ not_found;
+ true ->
+ case rpc:call(Node, rabbit_vm, memory, [], infinity) of
+ {badrpc, _} -> [{memory, not_available}];
+ Result -> [{memory, format(Mode, Result)}]
+ end
+ end.
+
+format(absolute, Result) ->
+ Result;
+format(relative, Result) ->
+ {value, {total, Totals}, Rest} = lists:keytake(total, 1, Result),
+ Total = proplists:get_value(rss, Totals),
+ [{total, 100} | [{K, percentage(V, Total)} || {K, V} <- Rest,
+ K =/= strategy]].
+
+percentage(Part, Total) ->
+ case round((Part/Total) * 100) of
+ 0 when Part =/= 0 ->
+ 1;
+ Int ->
+ Int
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl
new file mode 100644
index 0000000000..2bc40d22ce
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl
@@ -0,0 +1,85 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_node_memory_ets).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([resource_exists/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, [Mode]) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), {Mode, #context{}}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {node_exists(ReqData, get_node(ReqData)), ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+ rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, {Mode, Context}).
+
+is_authorized(ReqData, {Mode, Context}) ->
+ {Res, RD, C} = rabbit_mgmt_util:is_authorized_monitor(ReqData, Context),
+ {Res, RD, {Mode, C}}.
+
+%%--------------------------------------------------------------------
+get_node(ReqData) ->
+ list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))).
+
+get_filter(ReqData) ->
+ case rabbit_mgmt_util:id(filter, ReqData) of
+ none -> all;
+ <<"management">> -> rabbit_mgmt_storage;
+ Other when is_binary(Other) -> list_to_atom(binary_to_list(Other));
+ _ -> all
+ end.
+
+node_exists(ReqData, Node) ->
+ case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+ proplists:get_value(name, N) == Node] of
+ [] -> false;
+ [_] -> true
+ end.
+
+augment(Mode, ReqData) ->
+ Node = get_node(ReqData),
+ Filter = get_filter(ReqData),
+ case node_exists(ReqData, Node) of
+ false ->
+ not_found;
+ true ->
+ case rpc:call(Node, rabbit_vm, ets_tables_memory,
+ [Filter], infinity) of
+ {badrpc, _} -> [{ets_tables_memory, not_available}];
+ [] -> [{ets_tables_memory, no_tables}];
+ Result -> [{ets_tables_memory, format(Mode, Result)}]
+ end
+ end.
+
+format(absolute, Result) ->
+ Total = lists:sum([V || {_K,V} <- Result]),
+ [{total, Total} | Result];
+format(relative, Result) ->
+ Total = lists:sum([V || {_K,V} <- Result]),
+ [{total, 100} | [{K, percentage(V, Total)} || {K, V} <- Result]].
+
+percentage(Part, Total) ->
+ case round((Part/Total) * 100) of
+ 0 when Part =/= 0 ->
+ 1;
+ Int ->
+ Int
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl
new file mode 100644
index 0000000000..c3de2e0bd2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl
@@ -0,0 +1,56 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_nodes).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([all_nodes/1, all_nodes_raw/0]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ rabbit_mgmt_util:reply_list(all_nodes(ReqData), ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+all_nodes(ReqData) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_db:augment_nodes(
+ all_nodes_raw(), rabbit_mgmt_util:range_ceil(ReqData));
+ true ->
+ all_nodes_raw()
+ end.
+
+all_nodes_raw() ->
+ S = rabbit_mnesia:status(),
+ Nodes = proplists:get_value(nodes, S),
+ Types = proplists:get_keys(Nodes),
+ Running = proplists:get_value(running_nodes, S),
+ [[{name, Node}, {type, Type}, {running, lists:member(Node, Running)}] ||
+ Type <- Types, Node <- proplists:get_value(Type, Nodes)].
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl
new file mode 100644
index 0000000000..7490c427dd
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policies.erl
@@ -0,0 +1,49 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_operator_policies).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case basic(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ ["priority"], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+basic(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ none -> rabbit_policy:list_op();
+ VHost -> rabbit_policy:list_op(VHost)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl
new file mode 100644
index 0000000000..85bec3631d
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_operator_policy.erl
@@ -0,0 +1,83 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_operator_policy).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{{<<"application">>, <<"json">>, '*'}, accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case policy(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(policy(ReqData), ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ case rabbit_mgmt_util:vhost(ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:not_found(vhost_not_found, ReqData0, Context);
+ VHost ->
+ rabbit_mgmt_util:with_decode(
+ [pattern, definition], ReqData0, Context,
+ fun([Pattern, Definition], Body, ReqData) ->
+ case rabbit_policy:set_op(
+ VHost, name(ReqData), Pattern,
+ maps:to_list(Definition),
+ maps:get(priority, Body, undefined),
+ maps:get('apply-to', Body, undefined),
+ Username) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(Reason), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ ok = rabbit_policy:delete_op(
+ rabbit_mgmt_util:vhost(ReqData), name(ReqData), Username),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+policy(ReqData) ->
+ rabbit_policy:lookup_op(
+ rabbit_mgmt_util:vhost(ReqData), name(ReqData)).
+
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl
new file mode 100644
index 0000000000..3a7e821b7d
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl
@@ -0,0 +1,182 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_overview).
+
+-export([init/2]).
+-export([to_json/2, content_types_provided/2, is_authorized/2]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User = #user{tags = Tags}}) ->
+ RatesMode = rabbit_mgmt_agent_config:get_env(rates_mode),
+ SRP = get_sample_retention_policies(),
+ %% NB: this duplicates what's in /nodes but we want a global idea
+ %% of this. And /nodes is not accessible to non-monitor users.
+ ExchangeTypes = lists:sort(
+ fun(ET1, ET2) ->
+ proplists:get_value(name, ET1, none)
+ =<
+ proplists:get_value(name, ET2, none)
+ end,
+ rabbit_mgmt_external_stats:list_registry_plugins(exchange)),
+ Overview0 = [{management_version, version(rabbitmq_management)},
+ {rates_mode, RatesMode},
+ {sample_retention_policies, SRP},
+ {exchange_types, ExchangeTypes},
+ {product_version, list_to_binary(rabbit:product_version())},
+ {product_name, list_to_binary(rabbit:product_name())},
+ {rabbitmq_version, list_to_binary(rabbit:base_product_version())},
+ {cluster_name, rabbit_nodes:cluster_name()},
+ {erlang_version, erlang_version()},
+ {erlang_full_version, erlang_full_version()},
+ {disable_stats, rabbit_mgmt_util:disable_stats(ReqData)},
+ {enable_queue_totals, rabbit_mgmt_util:enable_queue_totals(ReqData)}],
+ try
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ Range = rabbit_mgmt_util:range(ReqData),
+ Overview =
+ case rabbit_mgmt_util:is_monitor(Tags) of
+ true ->
+ Overview0 ++
+ [{K, maybe_map(V)} ||
+ {K,V} <- rabbit_mgmt_db:get_overview(Range)] ++
+ [{node, node()},
+ {listeners, listeners()},
+ {contexts, web_contexts(ReqData)}];
+ _ ->
+ Overview0 ++
+ [{K, maybe_map(V)} ||
+ {K, V} <- rabbit_mgmt_db:get_overview(User, Range)]
+ end,
+ rabbit_mgmt_util:reply(Overview, ReqData, Context);
+ true ->
+ VHosts = case rabbit_mgmt_util:is_monitor(Tags) of
+ true -> rabbit_vhost:list_names();
+ _ -> rabbit_mgmt_util:list_visible_vhosts_names(User)
+ end,
+
+ ObjectTotals = case rabbit_mgmt_util:is_monitor(Tags) of
+ true ->
+ [{queues, rabbit_amqqueue:count()},
+ {exchanges, rabbit_exchange:count()},
+ {connections, rabbit_connection_tracking:count()}];
+ _ ->
+ [{queues, length([Q || V <- VHosts, Q <- rabbit_amqqueue:list(V)])},
+ {exchanges, length([X || V <- VHosts, X <- rabbit_exchange:list(V)])}]
+ end,
+ Overview = Overview0 ++
+ [{node, node()},
+ {object_totals, ObjectTotals}],
+ rabbit_mgmt_util:reply(Overview, ReqData, Context)
+ end
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+version(App) ->
+ {ok, V} = application:get_key(App, vsn),
+ list_to_binary(V).
+
+listeners() ->
+ rabbit_mgmt_util:sort_list(
+ [rabbit_mgmt_format:listener(L)
+ || L <- rabbit_networking:active_listeners()],
+ ["protocol", "port", "node"] ).
+
+maybe_map(L) when is_list(L) -> maps:from_list(L);
+maybe_map(V) -> V.
+
+%%--------------------------------------------------------------------
+
+web_contexts(ReqData) ->
+ rabbit_mgmt_util:sort_list(
+ lists:append(
+ [fmt_contexts(N) || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData)]),
+ ["description", "port", "node"]).
+
+fmt_contexts(Node) ->
+ [fmt_context(Node, C) || C <- pget(contexts, Node, [])].
+
+fmt_context(Node, C) ->
+ rabbit_mgmt_format:web_context([{node, pget(name, Node)} | C]).
+
+erlang_version() -> list_to_binary(rabbit_misc:otp_release()).
+
+erlang_full_version() ->
+ list_to_binary(rabbit_misc:otp_system_version()).
+
+get_sample_retention_policies() ->
+ P = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
+ get_sample_retention_policies(P).
+
+get_sample_retention_policies(undefined) ->
+ [{global, []}, {basic, []}, {detailed, []}];
+get_sample_retention_policies(Policies) ->
+ [transform_retention_policy(Pol, Policies) || Pol <- [global, basic, detailed]].
+
+transform_retention_policy(Pol, Policies) ->
+ case proplists:lookup(Pol, Policies) of
+ none ->
+ {Pol, []};
+ {Pol, Intervals} ->
+ {Pol, transform_retention_intervals(Intervals, [])}
+ end.
+
+transform_retention_intervals([], Acc) ->
+ lists:sort(Acc);
+transform_retention_intervals([{MaxAgeInSeconds, _}|Rest], Acc) ->
+ %
+ % Seconds | Interval
+ % 60 | last minute
+ % 600 | last 10 minutes
+ % 3600 | last hour
+ % 28800 | last 8 hours
+ % 86400 | last day
+ %
+ % rabbitmq/rabbitmq-management#635
+ %
+ % We check for the max age in seconds to be within 10% of the value above.
+ % The reason being that the default values are "bit higher" to accommodate
+ % edge cases (see deps/rabbitmq_management_agent/Makefile)
+ AccVal = if
+ MaxAgeInSeconds >= 0 andalso MaxAgeInSeconds =< 66 ->
+ 60;
+ MaxAgeInSeconds >= 540 andalso MaxAgeInSeconds =< 660 ->
+ 600;
+ MaxAgeInSeconds >= 3240 andalso MaxAgeInSeconds =< 3960 ->
+ 3600;
+ MaxAgeInSeconds >= 25920 andalso MaxAgeInSeconds =< 31681 ->
+ 28800;
+ MaxAgeInSeconds >= 77760 andalso MaxAgeInSeconds =< 95041 ->
+ 86400;
+ true ->
+ 0
+ end,
+ transform_retention_intervals(Rest, [AccVal|Acc]).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl
new file mode 100644
index 0000000000..17a97ca4bf
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_parameter).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case parameter(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(
+ rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(parameter(ReqData))),
+ ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = User}) ->
+ case rabbit_mgmt_util:vhost(ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:not_found(vhost_not_found, ReqData0, Context);
+ VHost ->
+ rabbit_mgmt_util:with_decode(
+ [value], ReqData0, Context,
+ fun([Value], _, ReqData) ->
+ case rabbit_runtime_parameters:set(
+ VHost, component(ReqData), name(ReqData),
+ if
+ is_map(Value) -> maps:to_list(Value);
+ true -> Value
+ end,
+ User) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ S = rabbit_mgmt_format:escape_html_tags(
+ rabbit_data_coercion:to_list(Reason)),
+ rabbit_mgmt_util:bad_request(
+ rabbit_data_coercion:to_binary(S), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ ok = rabbit_runtime_parameters:clear(
+ rabbit_mgmt_util:vhost(ReqData), component(ReqData), name(ReqData), Username),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+parameter(ReqData) ->
+ rabbit_runtime_parameters:lookup(
+ rabbit_mgmt_util:vhost(ReqData), component(ReqData), name(ReqData)).
+
+component(ReqData) -> rabbit_mgmt_util:id(component, ReqData).
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl
new file mode 100644
index 0000000000..6acd7a608f
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl
@@ -0,0 +1,78 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_parameters).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1]).
+-export([fix_shovel_publish_properties/1]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case basic(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+%% Hackish fix to make sure we return a JSON object instead of an empty list
+%% when the publish-properties value is empty. Should be removed in 3.7.0
+%% when we switch to a new JSON library.
+fix_shovel_publish_properties(P) ->
+ case lists:keyfind(component, 1, P) of
+ {_, <<"shovel">>} ->
+ case lists:keytake(value, 1, P) of
+ {value, {_, Values}, P2} ->
+ case lists:keytake(<<"publish-properties">>, 1, Values) of
+ {_, {_, []}, Values2} ->
+ P2 ++ [{value, Values2 ++ [{<<"publish-properties">>, empty_struct}]}];
+ _ ->
+ P
+ end;
+ _ -> P
+ end;
+ _ -> P
+ end.
+
+basic(ReqData) ->
+ Raw = case rabbit_mgmt_util:id(component, ReqData) of
+ none -> rabbit_runtime_parameters:list();
+ Name -> case rabbit_mgmt_util:vhost(ReqData) of
+ none -> rabbit_runtime_parameters:list_component(
+ Name);
+ not_found -> not_found;
+ VHost -> rabbit_runtime_parameters:list(
+ VHost, Name)
+ end
+ end,
+ case Raw of
+ not_found -> not_found;
+ _ -> [rabbit_mgmt_format:parameter(fix_shovel_publish_properties(P)) || P <- Raw]
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl
new file mode 100644
index 0000000000..74df5ab960
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permission).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case perms(ReqData) of
+ none -> false;
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(perms(ReqData), ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ case perms(ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found,
+ ReqData0, Context);
+ _ ->
+ User = rabbit_mgmt_util:id(user, ReqData0),
+ VHost = rabbit_mgmt_util:id(vhost, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [configure, write, read], ReqData0, Context,
+ fun([Conf, Write, Read], _, ReqData) ->
+ rabbit_auth_backend_internal:set_permissions(
+ User, VHost, Conf, Write, Read, Username),
+ {true, ReqData, Context}
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ rabbit_auth_backend_internal:clear_permissions(User, VHost, Username),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+perms(ReqData) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ case rabbit_auth_backend_internal:lookup_user(User) of
+ {ok, _} ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found ->
+ not_found;
+ VHost ->
+ rabbit_mgmt_util:catch_no_such_user_or_vhost(
+ fun() ->
+ Perms =
+ rabbit_auth_backend_internal:list_user_vhost_permissions(
+ User, VHost),
+ case Perms of
+ [Rest] -> [{user, User},
+ {vhost, VHost} | Rest];
+ [] -> none
+ end
+ end,
+ fun() -> not_found end)
+ end;
+ {error, _} ->
+ not_found
+ end.
+
+
+
+
+
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl
new file mode 100644
index 0000000000..ad1799b84b
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permissions).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([permissions/0]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(permissions(), ["vhost", "user"],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+permissions() ->
+ rabbit_auth_backend_internal:list_permissions().
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl
new file mode 100644
index 0000000000..ad1a2e5f96
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permissions_user).
+
+-export([init/2, to_json/2, content_types_provided/2, resource_exists/2,
+ is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_user:user(ReqData) of
+ {ok, _} -> true;
+ {error, _} -> false
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ rabbit_mgmt_util:catch_no_such_user_or_vhost(
+ fun() ->
+ Perms = rabbit_auth_backend_internal:list_user_permissions(User),
+ rabbit_mgmt_util:reply_list([[{user, User} | Rest] || Rest <- Perms],
+ ["vhost", "user"], ReqData, Context)
+ end,
+ fun() ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found, ReqData, Context)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl
new file mode 100644
index 0000000000..435c15faa4
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl
@@ -0,0 +1,44 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_permissions_vhost).
+
+-export([init/2, to_json/2, content_types_provided/2, resource_exists/2,
+ is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {rabbit_vhost:exists(rabbit_mgmt_wm_vhost:id(ReqData)), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ rabbit_mgmt_util:catch_no_such_user_or_vhost(
+ fun() ->
+ Perms = rabbit_auth_backend_internal:list_vhost_permissions(VHost),
+ rabbit_mgmt_util:reply_list([[{vhost, VHost} | Rest] || Rest <- Perms],
+ ["vhost", "user"], ReqData, Context)
+ end,
+ fun() ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found, ReqData, Context)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl
new file mode 100644
index 0000000000..c2b1bf1a1a
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl
@@ -0,0 +1,49 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_policies).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case basic(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+ ["priority"], ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+basic(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ none -> rabbit_policy:list();
+ VHost -> rabbit_policy:list(VHost)
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl
new file mode 100644
index 0000000000..5ab8033925
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl
@@ -0,0 +1,82 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_policy).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case policy(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(policy(ReqData), ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ case rabbit_mgmt_util:vhost(ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:not_found(vhost_not_found, ReqData0, Context);
+ VHost ->
+ rabbit_mgmt_util:with_decode(
+ [pattern, definition], ReqData0, Context,
+ fun([Pattern, Definition], Body, ReqData) ->
+ case rabbit_policy:set(
+ VHost, name(ReqData), Pattern,
+ maps:to_list(Definition),
+ maps:get(priority, Body, undefined),
+ maps:get('apply-to', Body, undefined),
+ Username) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ rabbit_mgmt_util:bad_request(
+ rabbit_mgmt_format:escape_html_tags(Reason), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ ok = rabbit_policy:delete(
+ rabbit_mgmt_util:vhost(ReqData), name(ReqData), Username),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+policy(ReqData) ->
+ rabbit_policy:lookup(
+ rabbit_mgmt_util:vhost(ReqData), name(ReqData)).
+
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl
new file mode 100644
index 0000000000..6560be1524
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl
@@ -0,0 +1,113 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, queue/1, queue/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ [Q] = rabbit_mgmt_db:augment_queues(
+ [queue(ReqData)], rabbit_mgmt_util:range_ceil(ReqData),
+ full),
+ Payload = rabbit_mgmt_format:clean_consumer_details(
+ rabbit_mgmt_format:strip_pids(Q)),
+ rabbit_mgmt_util:reply(ensure_defaults(Payload), ReqData, Context);
+ true ->
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:strip_pids(queue(ReqData)),
+ ReqData, Context)
+ end
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+accept_content(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(queue, ReqData),
+ rabbit_mgmt_util:direct_request(
+ 'queue.declare',
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{queue, Name}], "Declare queue error: ~s", ReqData, Context).
+
+delete_resource(ReqData, Context) ->
+ %% We need to retrieve manually if-unused and if-empty, as the HTTP API uses '-'
+ %% while the record uses '_'
+ IfUnused = <<"true">> =:= rabbit_mgmt_util:qs_val(<<"if-unused">>, ReqData),
+ IfEmpty = <<"true">> =:= rabbit_mgmt_util:qs_val(<<"if-empty">>, ReqData),
+ Name = rabbit_mgmt_util:id(queue, ReqData),
+ rabbit_mgmt_util:direct_request(
+ 'queue.delete',
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{queue, Name},
+ {if_unused, IfUnused},
+ {if_empty, IfEmpty}], "Delete queue error: ~s", ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+%% this is here to ensure certain data points are always there. When a queue
+%% is moved there can be transient periods where certain advanced metrics aren't
+%% yet available on the new node.
+ensure_defaults(Payload0) ->
+ case lists:keyfind(garbage_collection, 1, Payload0) of
+ {_K, _V} -> Payload0;
+ false ->
+ [{garbage_collection,
+ [{max_heap_size,-1},
+ {min_bin_vheap_size,-1},
+ {min_heap_size,-1},
+ {fullsweep_after,-1},
+ {minor_gcs,-1}]} | Payload0]
+ end.
+
+queue(ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost -> queue(VHost, rabbit_mgmt_util:id(queue, ReqData))
+ end.
+
+
+queue(VHost, QName) ->
+ Name = rabbit_misc:r(VHost, queue, QName),
+ case rabbit_amqqueue:lookup(Name) of
+ {ok, Q} -> rabbit_mgmt_format:queue(Q);
+ {error, not_found} -> not_found
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl
new file mode 100644
index 0000000000..c3b13f40c2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue_actions).
+
+-export([init/2, resource_exists/2, is_authorized/2,
+ allowed_methods/2, content_types_accepted/2, accept_content/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_queue:queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_it(ReqData, Context)).
+
+do_it(ReqData0, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData0),
+ QName = rabbit_mgmt_util:id(queue, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [action], ReqData0, Context,
+ fun([Action], _Body, ReqData) ->
+ rabbit_amqqueue:with(
+ rabbit_misc:r(VHost, queue, QName),
+ fun(Q) -> action(Action, Q, ReqData, Context) end)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+action(<<"sync">>, Q, ReqData, Context) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end),
+ {true, ReqData, Context};
+
+action(<<"cancel_sync">>, Q, ReqData, Context) when ?is_amqqueue(Q) ->
+ QPid = amqqueue:get_pid(Q),
+ _ = rabbit_amqqueue:cancel_sync_mirrors(QPid),
+ {true, ReqData, Context};
+
+action(Else, _Q, ReqData, Context) ->
+ rabbit_mgmt_util:bad_request({unknown, Else}, ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl
new file mode 100644
index 0000000000..88ad9a0034
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl
@@ -0,0 +1,166 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue_get).
+
+-export([init/2, resource_exists/2, is_authorized/2,
+ allowed_methods/2, accept_content/2, content_types_provided/2,
+ content_types_accepted/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_queue:queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+accept_content(ReqData, Context) ->
+ rabbit_mgmt_util:post_respond(do_it(ReqData, Context)).
+
+do_it(ReqData0, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData0),
+ Q = rabbit_mgmt_util:id(queue, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [ackmode, count, encoding], ReqData0, Context,
+ fun([AckModeBin, CountBin, EncBin], Body, ReqData) ->
+ rabbit_mgmt_util:with_channel(
+ VHost, ReqData, Context,
+ fun (Ch) ->
+ AckMode = list_to_atom(binary_to_list(AckModeBin)),
+ Count = rabbit_mgmt_util:parse_int(CountBin),
+ Enc = case EncBin of
+ <<"auto">> -> auto;
+ <<"base64">> -> base64;
+ _ -> throw({error, <<"Unsupported encoding. Please use auto or base64.">>})
+ end,
+ Trunc = case maps:get(truncate, Body, undefined) of
+ undefined -> none;
+ TruncBin -> rabbit_mgmt_util:parse_int(
+ TruncBin)
+ end,
+
+ Reply = basic_gets(Count, Ch, Q, AckMode, Enc, Trunc),
+ maybe_rejects(Reply, Ch, AckMode),
+ rabbit_mgmt_util:reply(remove_delivery_tag(Reply),
+ ReqData, Context)
+ end)
+ end).
+
+
+
+
+basic_gets(0, _, _, _, _, _) ->
+ [];
+
+basic_gets(Count, Ch, Q, AckMode, Enc, Trunc) ->
+ case basic_get(Ch, Q, AckMode, Enc, Trunc) of
+ none -> [];
+ M -> [M | basic_gets(Count - 1, Ch, Q, AckMode, Enc, Trunc)]
+ end.
+
+
+
+ackmode_to_requeue(reject_requeue_false) -> false;
+ackmode_to_requeue(reject_requeue_true) -> true.
+
+parse_ackmode(ack_requeue_false) -> true;
+parse_ackmode(ack_requeue_true) -> false;
+parse_ackmode(reject_requeue_false) -> false;
+parse_ackmode(reject_requeue_true) -> false.
+
+
+% the messages must rejects later,
+% because we get always the same message if the
+% messages are requeued inside basic_get/5
+maybe_rejects(R, Ch, AckMode) ->
+ lists:foreach(fun(X) ->
+ maybe_reject(Ch, AckMode,
+ proplists:get_value(delivery_tag, X))
+ end, R).
+
+% removes the delivery_tag from the reply.
+% it is not necessary
+remove_delivery_tag([]) -> [];
+remove_delivery_tag([H|T]) ->
+ [proplists:delete(delivery_tag, H) | [X || X <- remove_delivery_tag(T)]].
+
+
+maybe_reject(Ch, AckMode, DeliveryTag) when AckMode == reject_requeue_true;
+ AckMode == reject_requeue_false ->
+ amqp_channel:call(Ch,
+ #'basic.reject'{delivery_tag = DeliveryTag,
+ requeue = ackmode_to_requeue(AckMode)});
+maybe_reject(_Ch, _AckMode, _DeliveryTag) -> ok.
+
+
+basic_get(Ch, Q, AckMode, Enc, Trunc) ->
+ case amqp_channel:call(Ch,
+ #'basic.get'{queue = Q,
+ no_ack = parse_ackmode(AckMode)}) of
+ {#'basic.get_ok'{redelivered = Redelivered,
+ exchange = Exchange,
+ routing_key = RoutingKey,
+ message_count = MessageCount,
+ delivery_tag = DeliveryTag},
+ #amqp_msg{props = Props, payload = Payload}} ->
+ [{payload_bytes, size(Payload)},
+ {redelivered, Redelivered},
+ {exchange, Exchange},
+ {routing_key, RoutingKey},
+ {message_count, MessageCount},
+ {delivery_tag, DeliveryTag},
+ {properties, rabbit_mgmt_format:basic_properties(Props)}] ++
+ payload_part(maybe_truncate(Payload, Trunc), Enc);
+ #'basic.get_empty'{} ->
+ none
+ end.
+
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+maybe_truncate(Payload, none) -> Payload;
+maybe_truncate(Payload, Len) when size(Payload) < Len -> Payload;
+maybe_truncate(Payload, Len) ->
+ <<Start:Len/binary, _Rest/binary>> = Payload,
+ Start.
+
+payload_part(Payload, Enc) ->
+ {PL, E} = case Enc of
+ auto -> case is_utf8(Payload) of
+ true -> {Payload, string};
+ false -> {base64:encode(Payload), base64}
+ end;
+ _ -> {base64:encode(Payload), base64}
+ end,
+ [{payload, PL}, {payload_encoding, E}].
+
+is_utf8(<<>>) -> true;
+is_utf8(<<_/utf8, Rest/bits>>) -> is_utf8(Rest);
+is_utf8(_) -> false.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl
new file mode 100644
index 0000000000..b250984d11
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queue_purge).
+
+-export([init/2, resource_exists/2, is_authorized/2, allowed_methods/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_queue:queue(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+delete_resource(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(queue, ReqData),
+ rabbit_mgmt_util:direct_request(
+ 'queue.purge',
+ fun rabbit_mgmt_format:format_accept_content/1,
+ [{queue, Name}], "Error purging queue: ~s", ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl
new file mode 100644
index 0000000000..7df50f61d9
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl
@@ -0,0 +1,113 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_queues).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2,
+ resource_exists/2, basic/1]).
+-export([variances/2,
+ augmented/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+
+-define(BASIC_COLUMNS, ["vhost", "name", "durable", "auto_delete", "exclusive",
+ "owner_pid", "arguments", "pid", "state"]).
+
+-define(DEFAULT_SORT, ["vhost", "name"]).
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case queues0(ReqData) of
+ vhost_not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ Basic = basic_vhost_filtered(ReqData, Context),
+ Data = rabbit_mgmt_util:augment_resources(Basic, ?DEFAULT_SORT,
+ ?BASIC_COLUMNS, ReqData,
+ Context, fun augment/2),
+ rabbit_mgmt_util:reply(Data, ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData,
+ Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
+
+%%--------------------------------------------------------------------
+%% Exported functions
+
+basic(ReqData) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ [rabbit_mgmt_format:queue(Q) || Q <- queues0(ReqData)] ++
+ [rabbit_mgmt_format:queue(amqqueue:set_state(Q, down)) ||
+ Q <- down_queues(ReqData)];
+ true ->
+ case rabbit_mgmt_util:enable_queue_totals(ReqData) of
+ false ->
+ [rabbit_mgmt_format:queue(Q) ++ policy(Q) || Q <- queues0(ReqData)] ++
+ [rabbit_mgmt_format:queue(amqqueue:set_state(Q, down)) ||
+ Q <- down_queues(ReqData)];
+ true ->
+ [rabbit_mgmt_format:queue_info(Q) || Q <- queues_with_totals(ReqData)] ++
+ [rabbit_mgmt_format:queue(amqqueue:set_state(Q, down)) ||
+ Q <- down_queues(ReqData)]
+ end
+ end.
+
+augmented(ReqData, Context) ->
+ augment(rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context), ReqData).
+
+%%--------------------------------------------------------------------
+%% Private helpers
+
+augment(Basic, ReqData) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_db:augment_queues(Basic, rabbit_mgmt_util:range_ceil(ReqData),
+ basic);
+ true ->
+ Basic
+ end.
+
+basic_vhost_filtered(ReqData, Context) ->
+ rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context).
+
+queues0(ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list/1).
+
+queues_with_totals(ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData, fun collect_info_all/1).
+
+collect_info_all(VHostPath) ->
+ rabbit_amqqueue:collect_info_all(VHostPath, [name, durable, auto_delete, exclusive, owner_pid, arguments, type, state, policy, totals, type_specific]).
+
+down_queues(ReqData) ->
+ rabbit_mgmt_util:all_or_one_vhost(ReqData, fun rabbit_amqqueue:list_down/1).
+
+policy(Q) ->
+ case rabbit_policy:name(Q) of
+ none -> [];
+ Policy -> [{policy, Policy}]
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl
new file mode 100644
index 0000000000..982655493c
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_rebalance_queues.erl
@@ -0,0 +1,58 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_rebalance_queues).
+
+-export([init/2, service_available/2, resource_exists/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, [Mode]) ->
+ Headers = rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE),
+ {cowboy_rest, Headers, {Mode, #context{}}}.
+
+service_available(Req, {{queues, all}, _Context}=State) ->
+ {true, Req, State};
+service_available(Req, State) ->
+ {false, Req, State}.
+
+variances(Req, State) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, State}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"plain">>, '*'}, undefined}], Req, State}.
+
+content_types_accepted(Req, State) ->
+ {[{'*', accept_content}], Req, State}.
+
+allowed_methods(Req, State) ->
+ {[<<"POST">>, <<"OPTIONS">>], Req, State}.
+
+resource_exists(Req, State) ->
+ {true, Req, State}.
+
+accept_content(Req, {_Mode, #context{user = #user{username = Username}}}=State) ->
+ try
+ rabbit_log:info("User '~s' has initiated a queue rebalance", [Username]),
+ spawn(fun() ->
+ rabbit_amqqueue:rebalance(all, <<".*">>, <<".*">>)
+ end),
+ {true, Req, State}
+ catch
+ {error, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), Req, State)
+ end.
+
+is_authorized(Req0, {Mode, Context0}) ->
+ {Res, Req1, Context1} = rabbit_mgmt_util:is_authorized_admin(Req0, Context0),
+ {Res, Req1, {Mode, Context1}}.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl
new file mode 100644
index 0000000000..c5b3808d5a
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_redirect.erl
@@ -0,0 +1,13 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_redirect).
+-export([init/2]).
+
+init(Req0, RedirectTo) ->
+ Req = cowboy_req:reply(301, #{<<"location">> => RedirectTo}, Req0),
+ {ok, Req, RedirectTo}.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl
new file mode 100644
index 0000000000..ea3d698ece
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_reset.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_reset).
+
+-export([init/2, is_authorized/2, resource_exists/2,
+ allowed_methods/2, delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ case get_node(ReqData) of
+ none -> {true, ReqData, Context};
+ {ok, Node} -> {lists:member(Node, rabbit_nodes:all_running()),
+ ReqData, Context}
+ end.
+
+delete_resource(ReqData, Context) ->
+ case get_node(ReqData) of
+ none ->
+ rabbit_mgmt_storage:reset_all();
+ {ok, Node} ->
+ rpc:call(Node, rabbit_mgmt_storage, reset, [])
+ end,
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+
+get_node(ReqData) ->
+ case rabbit_mgmt_util:id(node, ReqData) of
+ none ->
+ none;
+ Node0 ->
+ Node = list_to_atom(binary_to_list(Node0)),
+ {ok, Node}
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl
new file mode 100644
index 0000000000..4a7f608a8a
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_static.erl
@@ -0,0 +1,65 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Alias for cowboy_static that accepts a list of directories
+%% where static files can be found.
+
+-module(rabbit_mgmt_wm_static).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([init/2]).
+-export([malformed_request/2]).
+-export([forbidden/2]).
+-export([content_types_provided/2]).
+-export([resource_exists/2]).
+-export([last_modified/2]).
+-export([generate_etag/2]).
+-export([get_file/2]).
+
+
+init(Req0, {priv_file, _App, _Path}=Opts) ->
+ Req1 = rabbit_mgmt_headers:set_common_permission_headers(Req0, ?MODULE),
+ cowboy_static:init(Req1, Opts);
+init(Req0, [{App, Path}]) ->
+ Req1 = rabbit_mgmt_headers:set_common_permission_headers(Req0, ?MODULE),
+ do_init(Req1, App, Path);
+init(Req0, [{App, Path}|Tail]) ->
+ Req1 = rabbit_mgmt_headers:set_common_permission_headers(Req0, ?MODULE),
+ PathInfo = cowboy_req:path_info(Req1),
+ Filepath = filename:join([code:priv_dir(App), Path|PathInfo]),
+ %% We use erl_prim_loader because the file may be inside an .ez archive.
+ FileInfo = erl_prim_loader:read_file_info(binary_to_list(Filepath)),
+ case FileInfo of
+ {ok, #file_info{type = regular}} -> do_init(Req1, App, Path);
+ {ok, #file_info{type = symlink}} -> do_init(Req1, App, Path);
+ _ -> init(Req0, Tail)
+ end.
+
+do_init(Req, App, Path) ->
+ cowboy_static:init(Req, {priv_dir, App, Path}).
+
+malformed_request(Req, State) ->
+ cowboy_static:malformed_request(Req, State).
+
+forbidden(Req, State) ->
+ cowboy_static:forbidden(Req, State).
+
+content_types_provided(Req, State) ->
+ cowboy_static:content_types_provided(Req, State).
+
+resource_exists(Req, State) ->
+ cowboy_static:resource_exists(Req, State).
+
+last_modified(Req, State) ->
+ cowboy_static:last_modified(Req, State).
+
+generate_etag(Req, State) ->
+ cowboy_static:generate_etag(Req, State).
+
+get_file(Req, State) ->
+ cowboy_static:get_file(Req, State).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl
new file mode 100644
index 0000000000..99f25bb69e
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permission.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_topic_permission).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case topic_perms(ReqData) of
+ none -> false;
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(topic_perms(ReqData), ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ case topic_perms(ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found,
+ ReqData0, Context);
+ _ ->
+ User = rabbit_mgmt_util:id(user, ReqData0),
+ VHost = rabbit_mgmt_util:id(vhost, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [exchange, write, read], ReqData0, Context,
+ fun([Exchange, Write, Read], _, ReqData) ->
+ rabbit_auth_backend_internal:set_topic_permissions(
+ User, VHost, Exchange, Write, Read, Username),
+ {true, ReqData, Context}
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ case rabbit_mgmt_util:id(exchange, ReqData) of
+ none ->
+ rabbit_auth_backend_internal:clear_topic_permissions(User, VHost, Username);
+ Exchange ->
+ rabbit_auth_backend_internal:clear_topic_permissions(User, VHost, Exchange,
+ Username)
+ end,
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+topic_perms(ReqData) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ case rabbit_auth_backend_internal:lookup_user(User) of
+ {ok, _} ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found ->
+ not_found;
+ VHost ->
+ rabbit_mgmt_util:catch_no_such_user_or_vhost(
+ fun() ->
+ Perms =
+ rabbit_auth_backend_internal:list_user_vhost_topic_permissions(
+ User, VHost),
+ case Perms of
+ [] -> none;
+ TopicPermissions -> [[{user, User}, {vhost, VHost} | TopicPermission]
+ || TopicPermission <- TopicPermissions]
+ end
+ end,
+ fun() -> not_found end)
+ end;
+ {error, _} ->
+ not_found
+ end.
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl
new file mode 100644
index 0000000000..c1406043cd
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_topic_permissions).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([topic_permissions/0]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(topic_permissions(), ["vhost", "user"],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+topic_permissions() ->
+ rabbit_auth_backend_internal:list_topic_permissions().
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl
new file mode 100644
index 0000000000..7d9c2d58fc
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_user.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_topic_permissions_user).
+
+-export([init/2, to_json/2, content_types_provided/2, resource_exists/2,
+ is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case rabbit_mgmt_wm_user:user(ReqData) of
+ {ok, _} -> true;
+ {error, _} -> false
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ rabbit_mgmt_util:catch_no_such_user_or_vhost(
+ fun() ->
+ Perms = rabbit_auth_backend_internal:list_user_topic_permissions(User),
+ rabbit_mgmt_util:reply_list([[{user, User} | Rest] || Rest <- Perms],
+ ["vhost", "user"], ReqData, Context)
+ end,
+ fun() ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found, ReqData, Context)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl
new file mode 100644
index 0000000000..8540ee9a77
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_topic_permissions_vhost.erl
@@ -0,0 +1,44 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_topic_permissions_vhost).
+
+-export([init/2, to_json/2, content_types_provided/2, resource_exists/2,
+ is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {rabbit_vhost:exists(rabbit_mgmt_wm_vhost:id(ReqData)), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ rabbit_mgmt_util:catch_no_such_user_or_vhost(
+ fun() ->
+ Perms = rabbit_auth_backend_internal:list_vhost_topic_permissions(VHost),
+ rabbit_mgmt_util:reply_list([[{vhost, VHost} | Rest] || Rest <- Perms],
+ ["vhost", "user"], ReqData, Context)
+ end,
+ fun() ->
+ rabbit_mgmt_util:bad_request(vhost_or_user_not_found, ReqData, Context)
+ end).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl
new file mode 100644
index 0000000000..4c014565f1
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl
@@ -0,0 +1,73 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_user).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, user/1, put_user/2, put_user/3]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case user(ReqData) of
+ {ok, _} -> true;
+ {error, _} -> false
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ {ok, User} = user(ReqData),
+ rabbit_mgmt_util:reply(rabbit_mgmt_format:internal_user(User),
+ ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = ActingUser}}) ->
+ Username = rabbit_mgmt_util:id(user, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [], ReqData0, Context,
+ fun(_, User, ReqData) ->
+ put_user(User#{name => Username}, ActingUser),
+ {true, ReqData, Context}
+ end).
+
+delete_resource(ReqData, Context = #context{user = #user{username = ActingUser}}) ->
+ User = rabbit_mgmt_util:id(user, ReqData),
+ rabbit_auth_backend_internal:delete_user(User, ActingUser),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+user(ReqData) ->
+ rabbit_auth_backend_internal:lookup_user(rabbit_mgmt_util:id(user, ReqData)).
+
+put_user(User, ActingUser) ->
+ put_user(User, undefined, ActingUser).
+
+put_user(User, Version, ActingUser) ->
+ rabbit_auth_backend_internal:put_user(User, Version, ActingUser).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl
new file mode 100644
index 0000000000..8e59c55371
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limit.erl
@@ -0,0 +1,62 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_user_limit).
+
+-export([init/2,
+ content_types_accepted/2, is_authorized/2,
+ allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{{<<"application">>, <<"json">>, '*'}, accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+accept_content(ReqData0, Context = #context{user = #user{username = ActingUser}}) ->
+ case rabbit_mgmt_util:id(user, ReqData0) of
+ not_found ->
+ rabbit_mgmt_util:not_found(user_not_found, ReqData0, Context);
+ Username ->
+ rabbit_mgmt_util:with_decode(
+ [value], ReqData0, Context,
+ fun([Value], _Body, ReqData) ->
+ Limit = #{name(ReqData) => Value},
+ case rabbit_auth_backend_internal:set_user_limits(Username, Limit, ActingUser) of
+ ok ->
+ {true, ReqData, Context};
+ {error_string, Reason} ->
+ rabbit_mgmt_util:bad_request(
+ list_to_binary(Reason), ReqData, Context)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context = #context{user = #user{username = ActingUser}}) ->
+ ok = rabbit_auth_backend_internal:clear_user_limits(
+ rabbit_mgmt_util:id(user, ReqData), name(ReqData), ActingUser),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+name(ReqData) -> rabbit_mgmt_util:id(name, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl
new file mode 100644
index 0000000000..7e4d541f32
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_user_limits.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_user_limits).
+
+-export([init/2, to_json/2, content_types_provided/2,
+ resource_exists/2, is_authorized/2, allowed_methods/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"GET">>, <<"OPTIONS">>], ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_vhost_visible(ReqData, Context).
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case user(ReqData) of
+ none -> true;
+ Username -> rabbit_auth_backend_internal:exists(Username)
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(limits(ReqData, Context), [], ReqData, Context).
+
+limits(ReqData, _Context) ->
+ case user(ReqData) of
+ none ->
+ [ [{user, internal_user:get_username(U)}, {value, maps:to_list(internal_user:get_limits(U))}]
+ || U <- rabbit_auth_backend_internal:all_users(),
+ internal_user:get_limits(U) =/= #{}];
+ Username when is_binary(Username) ->
+ case rabbit_auth_backend_internal:get_user_limits(Username) of
+ Value when is_map(Value) ->
+ case maps:size(Value) of
+ 0 -> [];
+ _ -> [[{user, Username}, {value, maps:to_list(Value)}]]
+ end;
+ undefined ->
+ []
+ end
+ end.
+%%--------------------------------------------------------------------
+
+user(ReqData) ->
+ rabbit_mgmt_util:id(user, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl
new file mode 100644
index 0000000000..67b8345ab9
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl
@@ -0,0 +1,59 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_users).
+
+-export([init/2, to_json/2,
+ content_types_provided/2,
+ is_authorized/2, allowed_methods/2]).
+-export([variances/2]).
+-export([users/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, [Mode]) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), {Mode, #context{}}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"OPTIONS">>], ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+ rabbit_mgmt_util:reply_list(users(Mode), ReqData, Context).
+
+is_authorized(ReqData, {Mode, Context}) ->
+ {Res, Req2, Context2} = rabbit_mgmt_util:is_authorized_admin(ReqData, Context),
+ {Res, Req2, {Mode, Context2}}.
+
+%%--------------------------------------------------------------------
+
+users(all) ->
+ [begin
+ {ok, User} = rabbit_auth_backend_internal:lookup_user(pget(user, U)),
+ rabbit_mgmt_format:internal_user(User)
+ end || U <- rabbit_auth_backend_internal:list_users()];
+users(without_permissions) ->
+ lists:foldl(fun(U, Acc) ->
+ Username = pget(user, U),
+ case rabbit_auth_backend_internal:list_user_permissions(Username) of
+ [] ->
+ {ok, User} = rabbit_auth_backend_internal:lookup_user(Username),
+ [rabbit_mgmt_format:internal_user(User) | Acc];
+ _ ->
+ Acc
+ end
+ end, [], rabbit_auth_backend_internal:list_users()).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl
new file mode 100644
index 0000000000..6f6a7863a2
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_users_bulk_delete.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_users_bulk_delete).
+
+-export([init/2,
+ content_types_accepted/2,
+ content_types_provided/2,
+ is_authorized/2,
+ allowed_methods/2, accept_content/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+accept_content(ReqData0, Context = #context{user = #user{username = ActingUser}}) ->
+ rabbit_mgmt_util:with_decode(
+ [users], ReqData0, Context,
+ fun([Users], _, ReqData) ->
+ [rabbit_auth_backend_internal:delete_user(User, ActingUser)
+ || User <- Users],
+ {true, ReqData, Context}
+ end).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl
new file mode 100644
index 0000000000..e62eecac97
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl
@@ -0,0 +1,97 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_vhost).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2, id/1, put_vhost/5]).
+-export([variances/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {rabbit_vhost:exists(id(ReqData)), ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ try
+ Id = id(ReqData),
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_util:reply(
+ hd(rabbit_mgmt_db:augment_vhosts(
+ [rabbit_vhost:info(Id)], rabbit_mgmt_util:range(ReqData))),
+ ReqData, Context);
+ true ->
+ rabbit_mgmt_util:reply(rabbit_vhost:info(Id), ReqData, Context)
+ end
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+accept_content(ReqData0, Context = #context{user = #user{username = Username}}) ->
+ Name = id(ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [], ReqData0, Context,
+ fun(_, BodyMap, ReqData) ->
+ Trace = rabbit_mgmt_util:parse_bool(maps:get(tracing, BodyMap, undefined)),
+ Description = maps:get(description, BodyMap, <<"">>),
+ Tags = maps:get(tags, BodyMap, <<"">>),
+ case put_vhost(Name, Description, Tags, Trace, Username) of
+ ok ->
+ {true, ReqData, Context};
+ {error, timeout} = E ->
+ rabbit_mgmt_util:internal_server_error(
+ "Timed out while waiting for the vhost to initialise", E,
+ ReqData0, Context)
+ end
+ end).
+
+delete_resource(ReqData, Context = #context{user = #user{username = Username}}) ->
+ VHost = id(ReqData),
+ try
+ rabbit_vhost:delete(VHost, Username)
+ catch _:{error, {no_such_vhost, _}} ->
+ ok
+ end,
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+id(ReqData) ->
+ case rabbit_mgmt_util:id(vhost, ReqData) of
+ [Value] -> Value;
+ Value -> Value
+ end.
+
+put_vhost(Name, Description, Tags, Trace, Username) ->
+ rabbit_vhost:put_vhost(Name, Description, Tags, Trace, Username).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl
new file mode 100644
index 0000000000..e22ff058c7
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost_restart.erl
@@ -0,0 +1,56 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_vhost_restart).
+
+-export([init/2, resource_exists/2, is_authorized/2,
+ allowed_methods/2, content_types_accepted/2, accept_content/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"POST">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ VHost = id(ReqData),
+ {rabbit_vhost:exists(VHost), ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{'*', accept_content}], ReqData, Context}.
+
+accept_content(ReqData, Context) ->
+ VHost = id(ReqData),
+ NodeB = rabbit_mgmt_util:id(node, ReqData),
+ Node = binary_to_atom(NodeB, utf8),
+ case rabbit_vhost_sup_sup:start_vhost(VHost, Node) of
+ {ok, _} ->
+ {true, ReqData, Context};
+ {error, {already_started, _}} ->
+ {true, ReqData, Context};
+ {error, Err} ->
+ Message = io_lib:format("Request to node ~s failed with ~p",
+ [Node, Err]),
+ rabbit_mgmt_util:bad_request(list_to_binary(Message), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+id(ReqData) ->
+ rabbit_mgmt_util:id(vhost, ReqData).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl
new file mode 100644
index 0000000000..7380faa4f0
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_vhosts).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([variances/2]).
+-export([basic/0, augmented/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(BASIC_COLUMNS, ["name", "tracing", "pid"]).
+
+-define(DEFAULT_SORT, ["name"]).
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User}) ->
+ try
+ Basic = [rabbit_vhost:info(V)
+ || V <- rabbit_mgmt_util:list_visible_vhosts(User)],
+ Data = rabbit_mgmt_util:augment_resources(Basic, ?DEFAULT_SORT,
+ ?BASIC_COLUMNS, ReqData,
+ Context, fun augment/2),
+ rabbit_mgmt_util:reply(Data, ReqData, Context)
+ catch
+ {error, invalid_range_parameters, Reason} ->
+ rabbit_mgmt_util:bad_request(iolist_to_binary(Reason), ReqData, Context)
+ end.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+augment(Basic, ReqData) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_db:augment_vhosts(Basic, rabbit_mgmt_util:range(ReqData));
+ true ->
+ Basic
+ end.
+
+augmented(ReqData, #context{user = User}) ->
+ case rabbit_mgmt_util:disable_stats(ReqData) of
+ false ->
+ rabbit_mgmt_db:augment_vhosts(
+ [rabbit_vhost:info(V) || V <- rabbit_mgmt_util:list_visible_vhosts(User)],
+ rabbit_mgmt_util:range(ReqData));
+ true ->
+ [rabbit_vhost:info(V) || V <- rabbit_mgmt_util:list_visible_vhosts(User)]
+ end.
+
+basic() ->
+ rabbit_vhost:info_all([name]).
diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl
new file mode 100644
index 0000000000..22a1063443
--- /dev/null
+++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl
@@ -0,0 +1,36 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_whoami).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([variances/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}.
+
+variances(Req, Context) ->
+ {[<<"accept-encoding">>, <<"origin">>], Req, Context}.
+
+content_types_provided(ReqData, Context) ->
+ {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}.
+
+to_json(ReqData, Context = #context{user = User}) ->
+ FormattedUser = rabbit_mgmt_format:user(User),
+ Expiration = case application:get_env(rabbitmq_management, login_session_timeout) of
+ undefined -> [];
+ {ok, Val} -> [{login_session_timeout, Val}]
+ end,
+ rabbit_mgmt_util:reply(FormattedUser ++ Expiration, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized(ReqData, Context).
diff --git a/deps/rabbitmq_management/test/cache_SUITE.erl b/deps/rabbitmq_management/test/cache_SUITE.erl
new file mode 100644
index 0000000000..00f9cd56c8
--- /dev/null
+++ b/deps/rabbitmq_management/test/cache_SUITE.erl
@@ -0,0 +1,109 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(cache_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ name,
+ fetch,
+ fetch_cached,
+ fetch_stale,
+ fetch_stale_after_expiry,
+ fetch_throws,
+ fetch_cached_with_same_args,
+ fetch_cached_with_different_args_invalidates_cache
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) -> Config.
+
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_Testcase, Config) ->
+ {ok, P} = rabbit_mgmt_db_cache:start_link(banana),
+ rabbit_ct_helpers:set_config(Config, {sut, P}).
+
+end_per_testcase(_Testcase, Config) ->
+ P = ?config(sut, Config),
+ _ = gen_server:stop(P),
+ Config.
+
+-define(DEFAULT_CACHE_TIME, 5000).
+
+%% tests
+
+name(Config) ->
+ ct:pal(?LOW_IMPORTANCE, "Priv: ~p", [?config(priv_dir, Config)]),
+ rabbit_mgmt_db_cache_banana = rabbit_mgmt_db_cache:process_name(banana).
+
+fetch_new_key(_Config) ->
+ {error, key_not_found} = rabbit_mgmt_db_cache:fetch(this_is_not_the_key_you_are_looking_for,
+ fun() -> 123 end).
+
+fetch(_Config) ->
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 123 end).
+
+fetch_cached(_Config) ->
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun() ->
+ timer:sleep(100),
+ 123 end),
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 321 end).
+
+fetch_stale(Config) ->
+ P = ?config(sut, Config),
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 123 end),
+ ok = gen_server:call(P, purge_cache),
+ {ok, 321} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 321 end).
+
+fetch_stale_after_expiry(_Config) ->
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 123 end), % expire quickly
+ timer:sleep(500),
+ {ok, 321} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 321 end).
+
+fetch_throws(_Config) ->
+ {error, {throw, banana_face}} =
+ rabbit_mgmt_db_cache:fetch(banana, fun() -> throw(banana_face) end),
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun() -> 123 end).
+
+fetch_cached_with_same_args(_Config) ->
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun(_) ->
+ timer:sleep(100),
+ 123
+ end, [42]),
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun(_) -> 321 end, [42]).
+
+fetch_cached_with_different_args_invalidates_cache(_Config) ->
+ {ok, 123} = rabbit_mgmt_db_cache:fetch(banana, fun(_) ->
+ timer:sleep(100),
+ 123
+ end, [42]),
+ {ok, 321} = rabbit_mgmt_db_cache:fetch(banana, fun(_) ->
+ timer:sleep(100),
+ 321 end, [442]),
+ {ok, 321} = rabbit_mgmt_db_cache:fetch(banana, fun(_) -> 456 end, [442]).
diff --git a/deps/rabbitmq_management/test/clustering_SUITE.erl b/deps/rabbitmq_management/test/clustering_SUITE.erl
new file mode 100644
index 0000000000..fc096962af
--- /dev/null
+++ b/deps/rabbitmq_management/test/clustering_SUITE.erl
@@ -0,0 +1,874 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(clustering_SUITE).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_ct_broker_helpers, [get_node_config/3, restart_node/2]).
+-import(rabbit_mgmt_test_util, [http_get/2, http_put/4, http_delete/3]).
+-import(rabbit_misc, [pget/2]).
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [{non_parallel_tests, [], [
+ list_cluster_nodes_test,
+ multi_node_case1_test,
+ ha_queue_hosted_on_other_node,
+ ha_queue_with_multiple_consumers,
+ queue_on_other_node,
+ queue_with_multiple_consumers,
+ queue_consumer_cancelled,
+ queue_consumer_channel_closed,
+ queue,
+ queues_single,
+ queues_multiple,
+ queues_removed,
+ channels_multiple_on_different_nodes,
+ channel_closed,
+ channel,
+ channel_other_node,
+ channel_with_consumer_on_other_node,
+ channel_with_consumer_on_one_node,
+ consumers,
+ connections,
+ exchanges,
+ exchange,
+ vhosts,
+ nodes,
+ overview,
+ disable_plugin
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, fine},
+ {collect_statistics_interval, 500}
+ ]}),
+ rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbitmq_management_agent, [
+ {rates_mode, detailed},
+ {sample_retention_policies,
+ %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
+ [{global, [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
+ {basic, [{605, 5}, {3600, 60}]},
+ {detailed, [{10, 5}]}] }]}).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_nodes_count, 2}
+ ]),
+ Config2 = merge_app_env(Config1),
+ rabbit_ct_helpers:run_setup_steps(Config2,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(multi_node_case1_test = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_all_table_data, []),
+ rabbit_ct_broker_helpers:rpc(Config, 1, ?MODULE, clear_all_table_data, []),
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:init_per_testcase">>),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ Config1 = rabbit_ct_helpers:set_config(Config, {conn, Conn}),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(multi_node_case1_test = Testcase, Config) ->
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:end_per_testcase">>),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_client_helpers:close_connection(?config(conn, Config)),
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"clustering_SUITE:end_per_testcase">>),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+list_cluster_nodes_test(Config) ->
+ %% see rmq_nodes_count in init_per_suite
+ ?assertEqual(2, length(http_get(Config, "/nodes"))),
+ passed.
+
+multi_node_case1_test(Config) ->
+ Nodename1 = rabbit_data_coercion:to_binary(get_node_config(Config, 0, nodename)),
+ Nodename2 = rabbit_data_coercion:to_binary(get_node_config(Config, 1, nodename)),
+ Policy = [{pattern, <<".*">>},
+ {definition, [{'ha-mode', <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]),
+ http_delete(Config, "/queues/%2F/multi-node-test-queue", [?NO_CONTENT, ?NOT_FOUND]),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"multi-node-test-queue">>),
+ Q = wait_for_mirrored_queue(Config, "/queues/%2F/multi-node-test-queue"),
+
+ ?assert(lists:member(maps:get(node, Q), [Nodename1, Nodename2])),
+ [Mirror] = maps:get(slave_nodes, Q),
+ [Mirror] = maps:get(synchronised_slave_nodes, Q),
+ ?assert(lists:member(Mirror, [Nodename1, Nodename2])),
+
+ %% restart node2 so that queue master migrates
+ restart_node(Config, 1),
+
+ Q2 = wait_for_mirrored_queue(Config, "/queues/%2F/multi-node-test-queue"),
+ http_delete(Config, "/queues/%2F/multi-node-test-queue", ?NO_CONTENT),
+ http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT),
+
+ ?assert(lists:member(maps:get(node, Q2), [Nodename1, Nodename2])),
+
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ passed.
+
+ha_queue_hosted_on_other_node(Config) ->
+ Policy = [{pattern, <<".*">>},
+ {definition, [{'ha-mode', <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]),
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare_durable(Chan, <<"ha-queue">>),
+ _ = wait_for_mirrored_queue(Config, "/queues/%2F/ha-queue"),
+
+ {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
+ consume(Chan, <<"ha-queue">>),
+
+ timer:sleep(5100),
+ force_stats(),
+ Res = http_get(Config, "/queues/%2F/ha-queue"),
+
+ % assert some basic data is there
+ [Cons] = maps:get(consumer_details, Res),
+ #{} = maps:get(channel_details, Cons), % channel details proplist must not be empty
+ 0 = maps:get(prefetch_count, Cons), % check one of the augmented properties
+ <<"ha-queue">> = maps:get(name, Res),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ http_delete(Config, "/queues/%2F/ha-queue", ?NO_CONTENT),
+ http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT),
+
+ ok.
+
+ha_queue_with_multiple_consumers(Config) ->
+ Policy = [{pattern, <<".*">>},
+ {definition, [{'ha-mode', <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, [?CREATED, ?NO_CONTENT]),
+
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ _ = queue_declare_durable(Chan, <<"ha-queue3">>),
+ _ = wait_for_mirrored_queue(Config, "/queues/%2F/ha-queue3"),
+
+ consume(Chan, <<"ha-queue3">>),
+ force_stats(),
+
+ {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
+ consume(Chan2, <<"ha-queue3">>),
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/queues/%2F/ha-queue3"),
+
+ % assert some basic data is there
+ [C1, C2] = maps:get(consumer_details, Res),
+ % channel details proplist must not be empty
+ #{} = maps:get(channel_details, C1),
+ #{} = maps:get(channel_details, C2),
+ % check one of the augmented properties
+ 0 = maps:get(prefetch_count, C1),
+ 0 = maps:get(prefetch_count, C2),
+ <<"ha-queue3">> = maps:get(name, Res),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+
+ http_delete(Config, "/queues/%2F/ha-queue3", ?NO_CONTENT),
+ http_delete(Config, "/policies/%2F/HA", ?NO_CONTENT),
+
+ ok.
+
+queue_on_other_node(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
+ consume(Chan2, <<"some-queue">>),
+
+ timer:sleep(5100),
+ force_stats(),
+ Res = http_get(Config, "/queues/%2F/some-queue"),
+
+ % assert some basic data is present
+ [Cons] = maps:get(consumer_details, Res),
+ #{} = maps:get(channel_details, Cons), % channel details proplist must not be empty
+ 0 = maps:get(prefetch_count, Cons), % check one of the augmented properties
+ <<"some-queue">> = maps:get(name, Res),
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ ok.
+
+queue_with_multiple_consumers(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ Q = <<"multi-consumer-queue1">>,
+ _ = queue_declare(Chan, Q),
+ _ = wait_for_queue(Config, "/queues/%2F/multi-consumer-queue1"),
+
+
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan2} = amqp_connection:open_channel(Conn),
+ consume(Chan, Q),
+ consume(Chan2, Q),
+ publish(Chan2, Q),
+ publish(Chan, Q),
+ % ensure a message has been consumed and acked
+ receive
+ {#'basic.deliver'{delivery_tag = T}, _} ->
+ amqp_channel:cast(Chan, #'basic.ack'{delivery_tag = T})
+ end,
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/queues/%2F/multi-consumer-queue1"),
+ http_delete(Config, "/queues/%2F/multi-consumer-queue1", ?NO_CONTENT),
+
+ % assert some basic data is there
+ [C1, C2] = maps:get(consumer_details, Res),
+ % channel details proplist must not be empty
+ #{} = maps:get(channel_details, C1),
+ #{} = maps:get(channel_details, C2),
+ % check one of the augmented properties
+ 0 = maps:get(prefetch_count, C1),
+ 0 = maps:get(prefetch_count, C2),
+ Q = maps:get(name, Res),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ ok.
+
+queue_consumer_cancelled(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ Tag = consume(Chan, <<"some-queue">>),
+
+ #'basic.cancel_ok'{} =
+ amqp_channel:call(Chan, #'basic.cancel'{consumer_tag = Tag}),
+ force_stats(),
+ Res = http_get(Config, "/queues/%2F/some-queue"),
+
+ amqp_channel:close(Chan),
+
+ % assert there are no consumer details
+ [] = maps:get(consumer_details, Res),
+ <<"some-queue">> = maps:get(name, Res),
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ ok.
+
+queue_consumer_channel_closed(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ consume(Chan, <<"some-queue">>),
+ force_stats(), % ensure channel stats have been written
+
+ amqp_channel:close(Chan),
+ force_stats(),
+
+ Res = http_get(Config, "/queues/%2F/some-queue"),
+ % assert there are no consumer details
+ [] = maps:get(consumer_details, Res),
+ <<"some-queue">> = maps:get(name, Res),
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ ok.
+
+queue(Config) ->
+ http_put(Config, "/queues/%2F/some-queue", none, [?CREATED, ?NO_CONTENT]),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
+
+ publish(Chan, <<"some-queue">>),
+ basic_get(Chan, <<"some-queue">>),
+ publish(Chan2, <<"some-queue">>),
+ basic_get(Chan2, <<"some-queue">>),
+ force_stats(),
+ timer:sleep(5100),
+ Res = http_get(Config, "/queues/%2F/some-queue"),
+ % assert single queue is returned
+ [#{} | _] = maps:get(deliveries, Res),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ ok.
+
+queues_single(Config) ->
+ http_put(Config, "/queues/%2F/some-queue", none, [?CREATED, ?NO_CONTENT]),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ force_stats(),
+ Res = http_get(Config, "/queues/%2F"),
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ % assert at least one queue is returned
+ ?assert(length(Res) >= 1),
+
+ ok.
+
+queues_multiple(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = queue_declare(Chan, <<"some-other-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+ _ = wait_for_queue(Config, "/queues/%2F/some-other-queue"),
+
+ force_stats(),
+ timer:sleep(5100),
+
+ Res = http_get(Config, "/queues/%2F"),
+ [Q1, Q2 | _] = Res,
+
+ % assert some basic data is present
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ http_delete(Config, "/queues/%2F/some-other-queue", ?NO_CONTENT),
+
+ false = (maps:get(name, Q1) =:= maps:get(name, Q2)),
+ amqp_channel:close(Chan),
+
+ ok.
+
+queues_removed(Config) ->
+ http_put(Config, "/queues/%2F/some-queue", none, [?CREATED, ?NO_CONTENT]),
+ force_stats(),
+ N = length(http_get(Config, "/queues/%2F")),
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ force_stats(),
+ ?assertEqual(N - 1, length(http_get(Config, "/queues/%2F"))),
+ ok.
+
+channels_multiple_on_different_nodes(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan2} = amqp_connection:open_channel(Conn2),
+ consume(Chan, <<"some-queue">>),
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/channels"),
+ % assert two channels are present
+ [_,_] = Res,
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+
+ ok.
+
+channel_closed(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ {ok, Chan2} = amqp_connection:open_channel(?config(conn, Config)),
+ force_stats(),
+
+ consume(Chan2, <<"some-queue">>),
+ amqp_channel:close(Chan),
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/channels"),
+ % assert one channel is present
+ [_] = Res,
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ amqp_channel:close(Chan2),
+
+ ok.
+
+channel(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ [{_, ChData}] = rabbit_ct_broker_helpers:rpc(Config, 0, ets, tab2list, [channel_created]),
+
+ ChName = uri_string:recompose(#{path => binary_to_list(pget(name, ChData))}),
+ timer:sleep(5100),
+ force_stats(),
+ Res = http_get(Config, "/channels/" ++ ChName ),
+ % assert channel is non empty
+ #{} = Res,
+
+ amqp_channel:close(Chan),
+ ok.
+
+channel_other_node(Config) ->
+ Q = <<"some-queue">>,
+ http_put(Config, "/queues/%2F/some-queue", none, [?CREATED, ?NO_CONTENT]),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ [{_, ChData}] = rabbit_ct_broker_helpers:rpc(Config, 1, ets, tab2list,
+ [channel_created]),
+ ChName = uri_string:recompose(#{path => binary_to_list(pget(name, ChData))}),
+ consume(Chan, Q),
+ publish(Chan, Q),
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/channels/" ++ ChName ),
+ % assert channel is non empty
+ #{} = Res,
+ [#{}] = maps:get(deliveries, Res),
+ #{} = maps:get(connection_details, Res),
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ amqp_connection:close(Conn),
+
+ ok.
+
+channel_with_consumer_on_other_node(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ Q = <<"some-queue">>,
+ _ = queue_declare(Chan, Q),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ ChName = get_channel_name(Config, 0),
+ consume(Chan, Q),
+ publish(Chan, Q),
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/channels/" ++ ChName),
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ % assert channel is non empty
+ #{} = Res,
+ [#{}] = maps:get(consumer_details, Res),
+
+ amqp_channel:close(Chan),
+
+ ok.
+
+channel_with_consumer_on_one_node(Config) ->
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+ Q = <<"some-queue">>,
+ _ = queue_declare(Chan, Q),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ ChName = get_channel_name(Config, 0),
+ consume(Chan, Q),
+
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/channels/" ++ ChName),
+ amqp_channel:close(Chan),
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ % assert channel is non empty
+ #{} = Res,
+ [#{}] = maps:get(consumer_details, Res),
+ ok.
+
+consumers(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan2} = amqp_connection:open_channel(Conn2),
+ consume(Chan, <<"some-queue">>),
+ consume(Chan2, <<"some-queue">>),
+
+ timer:sleep(5100),
+ force_stats(),
+ Res = http_get(Config, "/consumers"),
+
+ % assert there are two non-empty consumer records
+ [#{} = C1, #{} = C2] = Res,
+ #{} = maps:get(channel_details, C1),
+ #{} = maps:get(channel_details, C2),
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ amqp_channel:close(Chan),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+
+ ok.
+
+
+connections(Config) ->
+ %% one connection is maintained by CT helpers
+ {ok, Chan} = amqp_connection:open_channel(?config(conn, Config)),
+
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+ {ok, _Chan2} = amqp_connection:open_channel(Conn2),
+
+ %% channel count needs a bit longer for 2nd chan
+ timer:sleep(5100),
+ force_stats(),
+
+ Res = http_get(Config, "/connections"),
+
+ % assert there are two non-empty connection records
+ [#{} = C1, #{} = C2] = Res,
+ 1 = maps:get(channels, C1),
+ 1 = maps:get(channels, C2),
+
+ amqp_channel:close(Chan),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+
+ ok.
+
+
+exchanges(Config) ->
+ {ok, _Chan0} = amqp_connection:open_channel(?config(conn, Config)),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ QName = <<"exchanges-test">>,
+ XName = <<"some-exchange">>,
+ Q = queue_declare(Chan, QName),
+ exchange_declare(Chan, XName),
+ queue_bind(Chan, XName, Q, <<"some-key">>),
+ consume(Chan, QName),
+ publish_to(Chan, XName, <<"some-key">>),
+
+ force_stats(),
+ Res = http_get(Config, "/exchanges"),
+ [X] = [X || X <- Res, maps:get(name, X) =:= XName],
+
+ ?assertEqual(<<"direct">>, maps:get(type, X)),
+
+ amqp_channel:close(Chan),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ ok.
+
+
+exchange(Config) ->
+ {ok, _Chan0} = amqp_connection:open_channel(?config(conn, Config)),
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ QName = <<"exchanges-test">>,
+ XName = <<"some-other-exchange">>,
+ Q = queue_declare(Chan, QName),
+ exchange_declare(Chan, XName),
+ queue_bind(Chan, XName, Q, <<"some-key">>),
+ consume(Chan, QName),
+ publish_to(Chan, XName, <<"some-key">>),
+
+ force_stats(),
+ force_stats(),
+ Res = http_get(Config, "/exchanges/%2F/some-other-exchange"),
+
+ ?assertEqual(<<"direct">>, maps:get(type, Res)),
+
+ amqp_channel:close(Chan),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ ok.
+
+vhosts(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan2} = amqp_connection:open_channel(Conn2),
+ publish(Chan2, <<"some-queue">>),
+ timer:sleep(5100), % TODO force stat emission
+ force_stats(),
+ Res = http_get(Config, "/vhosts"),
+
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+ % default vhost
+ [#{} = Vhost] = Res,
+ % assert vhost has some message stats
+ #{} = maps:get(message_stats, Vhost),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ ok.
+
+nodes(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"some-queue">>),
+ _ = wait_for_queue(Config, "/queues/%2F/some-queue"),
+
+ {ok, Chan2} = amqp_connection:open_channel(Conn),
+ publish(Chan2, <<"some-queue">>),
+ timer:sleep(5100), % TODO force stat emission
+ force_stats(),
+ Res = http_get(Config, "/nodes"),
+ http_delete(Config, "/queues/%2F/some-queue", ?NO_CONTENT),
+
+ [#{} = N1 , #{} = N2] = Res,
+ ?assert(is_binary(maps:get(name, N1))),
+ ?assert(is_binary(maps:get(name, N2))),
+ [#{} | _] = maps:get(cluster_links, N1),
+ [#{} | _] = maps:get(cluster_links, N2),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+
+ ok.
+
+overview(Config) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+ {ok, Chan} = amqp_connection:open_channel(Conn),
+ _ = queue_declare(Chan, <<"queue-n1">>),
+ _ = queue_declare(Chan, <<"queue-n2">>),
+ _ = wait_for_queue(Config, "/queues/%2F/queue-n1"),
+ _ = wait_for_queue(Config, "/queues/%2F/queue-n2"),
+
+ Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 1),
+ {ok, Chan2} = amqp_connection:open_channel(Conn2),
+ publish(Chan, <<"queue-n1">>),
+ publish(Chan2, <<"queue-n2">>),
+ timer:sleep(5100), % TODO force stat emission
+ force_stats(), % channel count needs a bit longer for 2nd chan
+ Res = http_get(Config, "/overview"),
+
+ http_delete(Config, "/queues/%2F/queue-n1", ?NO_CONTENT),
+ http_delete(Config, "/queues/%2F/queue-n2", ?NO_CONTENT),
+ % assert there are two non-empty connection records
+ ObjTots = maps:get(object_totals, Res),
+ ?assert(maps:get(connections, ObjTots) >= 2),
+ ?assert(maps:get(channels, ObjTots) >= 2),
+ #{} = QT = maps:get(queue_totals, Res),
+ ?assert(maps:get(messages_ready, QT) >= 2),
+ MS = maps:get(message_stats, Res),
+ ?assert(maps:get(publish, MS) >= 2),
+ ChurnRates = maps:get(churn_rates, Res),
+ ?assertEqual(maps:get(queue_declared, ChurnRates), 2),
+ ?assertEqual(maps:get(queue_created, ChurnRates), 2),
+ ?assertEqual(maps:get(queue_deleted, ChurnRates), 0),
+ ?assertEqual(maps:get(channel_created, ChurnRates), 2),
+ ?assertEqual(maps:get(channel_closed, ChurnRates), 0),
+ ?assertEqual(maps:get(connection_closed, ChurnRates), 0),
+
+ amqp_channel:close(Chan),
+ amqp_channel:close(Chan2),
+ rabbit_ct_client_helpers:close_connection(Conn),
+ rabbit_ct_client_helpers:close_connection(Conn2),
+
+ ok.
+
+disable_plugin(Config) ->
+ Node = get_node_config(Config, 0, nodename),
+ Status0 = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit, status, []),
+ Listeners0 = proplists:get_value(listeners, Status0),
+ ?assert(lists:member(http, listener_protos(Listeners0))),
+ rabbit_ct_broker_helpers:disable_plugin(Config, Node, 'rabbitmq_web_dispatch'),
+ Status = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit, status, []),
+ Listeners = proplists:get_value(listeners, Status),
+ ?assert(not lists:member(http, listener_protos(Listeners))),
+ rabbit_ct_broker_helpers:enable_plugin(Config, Node, 'rabbitmq_management').
+
+%%----------------------------------------------------------------------------
+%%
+
+clear_all_table_data() ->
+ [ets:delete_all_objects(T) || {T, _} <- ?CORE_TABLES],
+ [ets:delete_all_objects(T) || {T, _} <- ?TABLES],
+ [gen_server:call(P, purge_cache)
+ || {_, P, _, _} <- supervisor:which_children(rabbit_mgmt_db_cache_sup)],
+ send_to_all_collectors(purge_old_stats).
+
+get_channel_name(Config, Node) ->
+ [{_, ChData}|_] = rabbit_ct_broker_helpers:rpc(Config, Node, ets, tab2list,
+ [channel_created]),
+ uri_string:recompose(#{path => binary_to_list(pget(name, ChData))}).
+
+consume(Channel, Queue) ->
+ #'basic.consume_ok'{consumer_tag = Tag} =
+ amqp_channel:call(Channel, #'basic.consume'{queue = Queue}),
+ Tag.
+
+publish(Channel, Key) ->
+ Payload = <<"foobar">>,
+ Publish = #'basic.publish'{routing_key = Key},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+basic_get(Channel, Queue) ->
+ Publish = #'basic.get'{queue = Queue},
+ amqp_channel:call(Channel, Publish).
+
+publish_to(Channel, Exchange, Key) ->
+ Payload = <<"foobar">>,
+ Publish = #'basic.publish'{routing_key = Key,
+ exchange = Exchange},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+exchange_declare(Chan, Name) ->
+ Declare = #'exchange.declare'{exchange = Name},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare).
+
+queue_declare(Chan) ->
+ Declare = #'queue.declare'{},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Chan, Declare),
+ Q.
+
+queue_declare(Chan, Name) ->
+ Declare = #'queue.declare'{queue = Name},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Chan, Declare),
+ Q.
+
+queue_declare_durable(Chan, Name) ->
+ Declare = #'queue.declare'{queue = Name, durable = true, exclusive = false},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Chan, Declare),
+ Q.
+
+queue_bind(Chan, Ex, Q, Key) ->
+ Binding = #'queue.bind'{queue = Q,
+ exchange = Ex,
+ routing_key = Key},
+ #'queue.bind_ok'{} = amqp_channel:call(Chan, Binding).
+
+wait_for_mirrored_queue(Config, Path) ->
+ wait_for_queue(Config, Path, [slave_nodes, synchronised_slave_nodes]).
+
+wait_for_queue(Config, Path) ->
+ wait_for_queue(Config, Path, []).
+
+wait_for_queue(Config, Path, Keys) ->
+ wait_for_queue(Config, Path, Keys, 1000).
+
+wait_for_queue(_Config, Path, Keys, 0) ->
+ exit({timeout, {Path, Keys}});
+
+wait_for_queue(Config, Path, Keys, Count) ->
+ Res = http_get(Config, Path),
+ case present(Keys, Res) of
+ false -> timer:sleep(10),
+ wait_for_queue(Config, Path, Keys, Count - 1);
+ true -> Res
+ end.
+
+present([], _Res) ->
+ true;
+present(Keys, Res) ->
+ lists:all(fun (Key) ->
+ X = maps:get(Key, Res, undefined),
+ X =/= [] andalso X =/= undefined
+ end, Keys).
+
+extract_node(N) ->
+ list_to_atom(hd(string:tokens(binary_to_list(N), "@"))).
+
+%% debugging utilities
+
+trace_fun(Config, MFs) ->
+ Nodename1 = get_node_config(Config, 0, nodename),
+ Nodename2 = get_node_config(Config, 1, nodename),
+ dbg:tracer(process, {fun(A,_) ->
+ ct:pal(?LOW_IMPORTANCE,
+ "TRACE: ~p", [A])
+ end, ok}),
+ dbg:n(Nodename1),
+ dbg:n(Nodename2),
+ dbg:p(all,c),
+ [ dbg:tpl(M, F, cx) || {M, F} <- MFs],
+ [ dbg:tpl(M, F, A, cx) || {M, F, A} <- MFs].
+
+dump_table(Config, Table) ->
+ Data = rabbit_ct_broker_helpers:rpc(Config, 0, ets, tab2list, [Table]),
+ ct:pal(?LOW_IMPORTANCE, "Node 0: Dump of table ~p:~n~p~n", [Table, Data]),
+ Data0 = rabbit_ct_broker_helpers:rpc(Config, 1, ets, tab2list, [Table]),
+ ct:pal(?LOW_IMPORTANCE, "Node 1: Dump of table ~p:~n~p~n", [Table, Data0]).
+
+force_stats() ->
+ force_all(),
+ timer:sleep(2000).
+
+force_all() ->
+ [begin
+ {rabbit_mgmt_external_stats, N} ! emit_update,
+ timer:sleep(125)
+ end || N <- [node() | nodes()]],
+ send_to_all_collectors(collect_metrics).
+
+send_to_all_collectors(Msg) ->
+ [begin
+ [{rabbit_mgmt_metrics_collector:name(Table), N} ! Msg
+ || {Table, _} <- ?CORE_TABLES]
+ end || N <- [node() | nodes()]].
+
+listener_protos(Listeners) ->
+ [listener_proto(L) || L <- Listeners].
+
+listener_proto(#listener{protocol = Proto}) ->
+ Proto;
+listener_proto(Proto) when is_atom(Proto) ->
+ Proto;
+%% rabbit:status/0 used this formatting before rabbitmq/rabbitmq-cli#340
+listener_proto({Proto, _Port, _Interface}) ->
+ Proto.
diff --git a/deps/rabbitmq_management/test/clustering_prop_SUITE.erl b/deps/rabbitmq_management/test/clustering_prop_SUITE.erl
new file mode 100644
index 0000000000..98790745db
--- /dev/null
+++ b/deps/rabbitmq_management/test/clustering_prop_SUITE.erl
@@ -0,0 +1,280 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(clustering_prop_SUITE).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+
+-import(rabbit_ct_broker_helpers, [get_node_config/3]).
+-import(rabbit_mgmt_test_util, [http_get/2, http_get_from_node/3]).
+-import(rabbit_misc, [pget/2]).
+
+-compile([export_all, nowarn_format]).
+
+-export_type([rmqnode/0, queues/0]).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [{non_parallel_tests, [], [
+ prop_connection_channel_counts_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, fine},
+ {collect_statistics_interval, 500}
+ ]}),
+ rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbitmq_management, [
+ {rates_mode, detailed},
+ {sample_retention_policies,
+ %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
+ [{global, [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
+ {basic, [{605, 5}, {3600, 60}]},
+ {detailed, [{605, 5}]}] }]}).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_nodes_count, 3}
+ ]),
+ Config2 = merge_app_env(Config1),
+ rabbit_ct_helpers:run_setup_steps(Config2,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(multi_node_case1_test = Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_all_table_data, []),
+ rabbit_ct_broker_helpers:rpc(Config, 1, ?MODULE, clear_all_table_data, []),
+ rabbit_ct_broker_helpers:rpc(Config, 2, ?MODULE, clear_all_table_data, []),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+prop_connection_channel_counts_test(Config) ->
+ Fun = fun () -> prop_connection_channel_counts(Config) end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 10).
+
+-type rmqnode() :: 0|1|2.
+-type queues() :: qn1 | qn2 | qn3.
+
+prop_connection_channel_counts(Config) ->
+ ?FORALL(Ops, list(frequency([{6, {add_conn, rmqnode(),
+ list(chan)}},
+ {3, rem_conn},
+ {6, rem_chan},
+ {1, force_stats}])),
+ begin
+ % ensure we begin with no connections
+ true = validate_counts(Config, []),
+ Cons = lists:foldl(fun (Op, Agg) ->
+ execute_op(Config, Op, Agg)
+ end, [], Ops),
+ force_stats(),
+ Res = validate_counts(Config, Cons),
+ cleanup(Cons),
+ force_stats(),
+ Res
+ end).
+
+validate_counts(Config, Conns) ->
+ Expected = length(Conns),
+ ChanCount = lists:sum([length(Chans) || {conn, _, Chans} <- Conns]),
+ C1 = length(http_get_from_node(Config, 0, "/connections")),
+ C2 = length(http_get_from_node(Config, 1, "/connections")),
+ C3 = length(http_get_from_node(Config, 2, "/connections")),
+ Ch1 = length(http_get_from_node(Config, 0, "/channels")),
+ Ch2 = length(http_get_from_node(Config, 1, "/channels")),
+ Ch3 = length(http_get_from_node(Config, 2, "/channels")),
+ [Expected, Expected, Expected, ChanCount, ChanCount, ChanCount]
+ =:= [C1, C2, C3, Ch1, Ch2, Ch3].
+
+
+cleanup(Conns) ->
+ [rabbit_ct_client_helpers:close_connection(Conn)
+ || {conn, Conn, _} <- Conns].
+
+execute_op(Config, {add_conn, Node, Chans}, State) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, Node),
+ Chans1 = [begin
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Ch
+ end || _ <- Chans],
+ State ++ [{conn, Conn, Chans1}];
+execute_op(_Config, rem_chan, [{conn, Conn, [Ch | Chans]} | Rem]) ->
+ ok = amqp_channel:close(Ch),
+ Rem ++ [{conn, Conn, Chans}];
+execute_op(_Config, rem_chan, State) -> State;
+execute_op(_Config, rem_conn, []) ->
+ [];
+execute_op(_Config, rem_conn, [{conn, Conn, _Chans} | Rem]) ->
+ rabbit_ct_client_helpers:close_connection(Conn),
+ Rem;
+execute_op(_Config, force_stats, State) ->
+ force_stats(),
+ State.
+
+%%----------------------------------------------------------------------------
+%%
+
+force_stats() ->
+ force_all(),
+ timer:sleep(5000).
+
+force_all() ->
+ [begin
+ {rabbit_mgmt_external_stats, N} ! emit_update,
+ timer:sleep(100),
+ [{rabbit_mgmt_metrics_collector:name(Table), N} ! collect_metrics
+ || {Table, _} <- ?CORE_TABLES]
+ end
+ || N <- [node() | nodes()]].
+
+clear_all_table_data() ->
+ [ets:delete_all_objects(T) || {T, _} <- ?CORE_TABLES],
+ [ets:delete_all_objects(T) || {T, _} <- ?TABLES],
+ [gen_server:call(P, purge_cache)
+ || {_, P, _, _} <- supervisor:which_children(rabbit_mgmt_db_cache_sup)].
+
+get_channel_name(Config, Node) ->
+ [{_, ChData}|_] = rabbit_ct_broker_helpers:rpc(Config, Node, ets, tab2list,
+ [channel_created]),
+ uri_string:recompose(#{path => binary_to_list(pget(name, ChData))}).
+
+consume(Channel, Queue) ->
+ #'basic.consume_ok'{consumer_tag = Tag} =
+ amqp_channel:call(Channel, #'basic.consume'{queue = Queue}),
+ Tag.
+
+publish(Channel, Key) ->
+ Payload = <<"foobar">>,
+ Publish = #'basic.publish'{routing_key = Key},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+basic_get(Channel, Queue) ->
+ Publish = #'basic.get'{queue = Queue},
+ amqp_channel:call(Channel, Publish).
+
+publish_to(Channel, Exchange, Key) ->
+ Payload = <<"foobar">>,
+ Publish = #'basic.publish'{routing_key = Key,
+ exchange = Exchange},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+exchange_declare(Chan, Name) ->
+ Declare = #'exchange.declare'{exchange = Name},
+ #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare).
+
+queue_declare(Chan) ->
+ Declare = #'queue.declare'{},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Chan, Declare),
+ Q.
+
+queue_declare(Chan, Name) ->
+ Declare = #'queue.declare'{queue = Name},
+ #'queue.declare_ok'{queue = Q} = amqp_channel:call(Chan, Declare),
+ Q.
+
+queue_bind(Chan, Ex, Q, Key) ->
+ Binding = #'queue.bind'{queue = Q,
+ exchange = Ex,
+ routing_key = Key},
+ #'queue.bind_ok'{} = amqp_channel:call(Chan, Binding).
+
+wait_for(Config, Path) ->
+ wait_for(Config, Path, [slave_nodes, synchronised_slave_nodes]).
+
+wait_for(Config, Path, Keys) ->
+ wait_for(Config, Path, Keys, 1000).
+
+wait_for(_Config, Path, Keys, 0) ->
+ exit({timeout, {Path, Keys}});
+
+wait_for(Config, Path, Keys, Count) ->
+ Res = http_get(Config, Path),
+ case present(Keys, Res) of
+ false -> timer:sleep(10),
+ wait_for(Config, Path, Keys, Count - 1);
+ true -> Res
+ end.
+
+present(Keys, Res) ->
+ lists:all(fun (Key) ->
+ X = pget(Key, Res),
+ X =/= [] andalso X =/= undefined
+ end, Keys).
+
+assert_single_node(Exp, Act) ->
+ ?assertEqual(1, length(Act)),
+ assert_node(Exp, hd(Act)).
+
+assert_nodes(Exp, Act0) ->
+ Act = [extract_node(A) || A <- Act0],
+ ?assertEqual(length(Exp), length(Act)),
+ [?assert(lists:member(E, Act)) || E <- Exp].
+
+assert_node(Exp, Act) ->
+ ?assertEqual(Exp, list_to_atom(binary_to_list(Act))).
+
+extract_node(N) ->
+ list_to_atom(hd(string:tokens(binary_to_list(N), "@"))).
+
+%% debugging utilities
+
+trace_fun(Config, MFs) ->
+ Nodename1 = get_node_config(Config, 0, nodename),
+ Nodename2 = get_node_config(Config, 1, nodename),
+ dbg:tracer(process, {fun(A,_) ->
+ ct:pal(?LOW_IMPORTANCE,
+ "TRACE: ~p", [A])
+ end, ok}),
+ dbg:n(Nodename1),
+ dbg:n(Nodename2),
+ dbg:p(all,c),
+ [ dbg:tpl(M, F, cx) || {M, F} <- MFs],
+ [ dbg:tpl(M, F, A, cx) || {M, F, A} <- MFs].
+
+dump_table(Config, Table) ->
+ Data = rabbit_ct_broker_helpers:rpc(Config, 0, ets, tab2list, [Table]),
+ ct:pal(?LOW_IMPORTANCE, "Node 0: Dump of table ~p:~n~p~n", [Table, Data]),
+ Data0 = rabbit_ct_broker_helpers:rpc(Config, 1, ets, tab2list, [Table]),
+ ct:pal(?LOW_IMPORTANCE, "Node 1: Dump of table ~p:~n~p~n", [Table, Data0]).
+
diff --git a/deps/rabbitmq_management/test/config_schema_SUITE.erl b/deps/rabbitmq_management/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..e40337c60a
--- /dev/null
+++ b/deps/rabbitmq_management/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_management, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_management/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbit-mgmt/access.log b/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbit-mgmt/access.log
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbit-mgmt/access.log
diff --git a/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets b/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets
new file mode 100644
index 0000000000..aa7b8b12c7
--- /dev/null
+++ b/deps/rabbitmq_management/test/config_schema_SUITE_data/rabbitmq_management.snippets
@@ -0,0 +1,525 @@
+[{http_log,
+ "listeners.tcp.default = 5672
+ collect_statistics_interval = 10000
+ management.http_log_dir = test/config_schema_SUITE_data/rabbit-mgmt
+ management.rates_mode = basic",
+ [{rabbit,[{tcp_listeners,[5672]},{collect_statistics_interval,10000}]},
+ {rabbitmq_management,
+ [{http_log_dir,"test/config_schema_SUITE_data/rabbit-mgmt"},
+ {rates_mode,basic}]}],
+ [rabbitmq_management]},
+
+ %%
+ %% TCP listener
+ %%
+
+ {tcp_listener_port_only,
+ "management.tcp.port = 15674",
+ [{rabbitmq_management,[
+ {tcp_config,[
+ {port,15674}
+ ]}
+ ]}],
+ [rabbitmq_management]},
+
+ {tcp_listener_interface_port,
+ "management.tcp.ip = 192.168.1.2
+ management.tcp.port = 15674",
+ [{rabbitmq_management,[
+ {tcp_config,[
+ {ip, "192.168.1.2"},
+ {port,15674}
+ ]}
+ ]}],
+ [rabbitmq_management]},
+
+ {tcp_listener_server_opts_compress,
+ "management.tcp.compress = true",
+ [
+ {rabbitmq_management, [
+ {tcp_config, [{cowboy_opts, [{compress, true}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tcp_listener_server_opts_compress_and_idle_timeout,
+ "management.tcp.compress = true
+ management.tcp.idle_timeout = 123",
+ [
+ {rabbitmq_management, [
+ {tcp_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tcp_listener_server_opts_compress_and_multiple_timeouts,
+ "management.tcp.compress = true
+ management.tcp.idle_timeout = 123
+ management.tcp.inactivity_timeout = 456
+ management.tcp.request_timeout = 789",
+ [
+ {rabbitmq_management, [
+ {tcp_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tcp_listener_server_opts_multiple_timeouts_only,
+ "management.tcp.idle_timeout = 123
+ management.tcp.inactivity_timeout = 456
+ management.tcp.request_timeout = 789",
+ [
+ {rabbitmq_management, [
+ {tcp_config, [{cowboy_opts, [{idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tcp_listener_server_opts_shutdown_timeout,
+ "management.tcp.shutdown_timeout = 7000",
+ [
+ {rabbitmq_management, [
+ {tcp_config, [{cowboy_opts, [{shutdown_timeout, 7000}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tcp_listener_server_opts_max_keepalive,
+ "management.tcp.max_keepalive = 120",
+ [
+ {rabbitmq_management, [
+ {tcp_config, [{cowboy_opts, [{max_keepalive, 120}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+
+ %%
+ %% TLS listener
+ %%
+
+ {tls_listener_port_only,
+ "management.ssl.port = 15671",
+ [{rabbitmq_management,[
+ {ssl_config,[
+ {port,15671}
+ ]}
+ ]}],
+ [rabbitmq_management]},
+
+ {tls_listener_interface_port,
+ "management.ssl.ip = 192.168.1.2
+ management.ssl.port = 15671",
+ [{rabbitmq_management,[
+ {ssl_config,[
+ {ip, "192.168.1.2"},
+ {port,15671}
+ ]}
+ ]}],
+ [rabbitmq_management]},
+
+ {tls_listener,
+ "management.ssl.ip = 192.168.1.2
+ management.ssl.port = 15671
+ management.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ management.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ management.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ management.ssl.verify = verify_none
+ management.ssl.fail_if_no_peer_cert = false",
+ [{rabbitmq_management,[
+ {ssl_config,[
+ {ip, "192.168.1.2"},
+ {port,15671},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify, verify_none},
+ {fail_if_no_peer_cert, false}
+ ]}
+ ]}],
+ [rabbitmq_management]},
+
+ {tls_listener_cipher_suites,
+ "management.ssl.ip = 192.168.1.2
+ management.ssl.port = 15671
+ management.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ management.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ management.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+
+ management.ssl.honor_cipher_order = true
+ management.ssl.honor_ecc_order = true
+ management.ssl.client_renegotiation = false
+ management.ssl.secure_renegotiate = true
+
+ management.ssl.verify = verify_peer
+ management.ssl.fail_if_no_peer_cert = false
+
+ management.ssl.versions.1 = tlsv1.2
+ management.ssl.versions.2 = tlsv1.1
+
+ management.ssl.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+ management.ssl.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+ management.ssl.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+ management.ssl.ciphers.4 = ECDHE-RSA-AES256-SHA384
+ management.ssl.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+ management.ssl.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+ management.ssl.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+ management.ssl.ciphers.8 = ECDH-RSA-AES256-SHA384
+ management.ssl.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
+ [{rabbitmq_management,[
+ {ssl_config,[
+ {ip, "192.168.1.2"},
+ {port,15671},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false},
+
+ {honor_cipher_order, true},
+ {honor_ecc_order, true},
+ {client_renegotiation, false},
+ {secure_renegotiate, true},
+
+ {versions,['tlsv1.2','tlsv1.1']},
+ {ciphers, [
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDH-ECDSA-AES256-GCM-SHA384",
+ "ECDH-RSA-AES256-GCM-SHA384",
+ "ECDH-ECDSA-AES256-SHA384",
+ "ECDH-RSA-AES256-SHA384",
+ "DHE-RSA-AES256-GCM-SHA384"
+ ]}
+ ]}
+ ]}],
+ [rabbitmq_management]},
+
+ {tls_listener_server_opts_compress,
+ "management.ssl.compress = true",
+ [
+ {rabbitmq_management, [
+ {ssl_config, [{cowboy_opts, [{compress, true}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tls_listener_server_opts_compress_and_idle_timeout,
+ "management.ssl.compress = true
+ management.ssl.idle_timeout = 123",
+ [
+ {rabbitmq_management, [
+ {ssl_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tls_listener_server_opts_compress_and_multiple_timeouts,
+ "management.ssl.compress = true
+ management.ssl.idle_timeout = 123
+ management.ssl.inactivity_timeout = 456
+ management.ssl.request_timeout = 789",
+ [
+ {rabbitmq_management, [
+ {ssl_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tls_listener_server_opts_multiple_timeouts_only,
+ "management.ssl.idle_timeout = 123
+ management.ssl.inactivity_timeout = 456
+ management.ssl.request_timeout = 789",
+ [
+ {rabbitmq_management, [
+ {ssl_config, [{cowboy_opts, [{idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tls_listener_server_opts_shutdown_timeout,
+ "management.ssl.shutdown_timeout = 7000",
+ [
+ {rabbitmq_management, [
+ {ssl_config, [{cowboy_opts, [{shutdown_timeout, 7000}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {tls_listener_server_opts_max_keepalive,
+ "management.ssl.max_keepalive = 120",
+ [
+ {rabbitmq_management, [
+ {ssl_config, [{cowboy_opts, [{max_keepalive, 120}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ %%
+ %% Retention Policies
+ %%
+
+ {retention_policies,
+ "management.sample_retention_policies.global.minute = 5
+ management.sample_retention_policies.global.hour = 60
+ management.sample_retention_policies.global.day = 1200
+
+ management.sample_retention_policies.basic.minute = 5
+ management.sample_retention_policies.basic.hour = 60
+
+ management.sample_retention_policies.detailed.10 = 5",
+ [{rabbitmq_management,
+ [{sample_retention_policies,
+ [{global,[{60,5},{3600,60},{86400,1200}]},
+ {basic,[{60,5},{3600,60}]},
+ {detailed,[{10,5}]}]}]}],
+ [rabbitmq_management]},
+
+ {path_prefix,
+ "management.path_prefix = /a/prefix",
+ [
+ {rabbitmq_management, [
+ {path_prefix, "/a/prefix"}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {login_session_timeout,
+ "management.login_session_timeout = 30",
+ [
+ {rabbitmq_management, [
+ {login_session_timeout, 30}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ %%
+ %% Inter-node query result caching
+ %%
+
+ {db_cache_multiplier,
+ "management.db_cache_multiplier = 7",
+ [
+ {rabbitmq_management, [
+ {management_db_cache_multiplier, 7}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ %%
+ %% CORS
+ %%
+
+ {cors_settings,
+ "management.cors.allow_origins.1 = https://origin1.org
+ management.cors.allow_origins.2 = https://origin2.org
+ management.cors.max_age = 3600",
+ [
+ {rabbitmq_management, [
+ {cors_allow_origins, ["https://origin1.org", "https://origin2.org"]},
+ {cors_max_age, 3600}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {cors_wildcard,
+ "management.cors.allow_origins.1 = *
+ management.cors.max_age = 3600",
+ [
+ {rabbitmq_management, [
+ {cors_allow_origins, ["*"]},
+ {cors_max_age, 3600}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+
+ %%
+ %% CSP
+ %%
+
+ {csp_policy_case1,
+ "management.csp.policy = default-src 'self'",
+ [
+ {rabbitmq_management, [
+ {content_security_policy, "default-src 'self'"}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {csp_policy_case2,
+ "management.csp.policy = default-src https://onlinebanking.examplebank.com",
+ [
+ {rabbitmq_management, [
+ {content_security_policy, "default-src https://onlinebanking.examplebank.com"}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {csp_policy_case3,
+ "management.csp.policy = default-src 'self' *.mailsite.com; img-src *",
+ [
+ {rabbitmq_management, [
+ {content_security_policy, "default-src 'self' *.mailsite.com; img-src *"}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ %%
+ %% HSTS
+ %%
+
+ {hsts_policy_case1,
+ "management.hsts.policy = max-age=31536000; includeSubDomains",
+ [
+ {rabbitmq_management, [
+ {strict_transport_security, "max-age=31536000; includeSubDomains"}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {csp_and_hsts_combined,
+ "management.csp.policy = default-src 'self' *.mailsite.com; img-src *
+ management.hsts.policy = max-age=31536000; includeSubDomains",
+ [
+ {rabbitmq_management, [
+ {content_security_policy, "default-src 'self' *.mailsite.com; img-src *"},
+ {strict_transport_security, "max-age=31536000; includeSubDomains"}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+
+ %%
+ %% Legacy listener configuration
+ %%
+
+ {legacy_tcp_listener,
+ "management.listener.port = 12345",
+ [{rabbitmq_management,[{listener,[{port,12345}]}]}],
+ [rabbitmq_management]},
+
+ {legacy_ssl_listener,
+ "management.listener.port = 15671
+ management.listener.ssl = true
+ management.listener.ssl_opts.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ management.listener.ssl_opts.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ management.listener.ssl_opts.keyfile = test/config_schema_SUITE_data/certs/key.pem",
+ [{rabbitmq_management,
+ [{listener,
+ [{port,15671},
+ {ssl,true},
+ {ssl_opts,
+ [{cacertfile,
+ "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,
+ "test/config_schema_SUITE_data/certs/key.pem"}]}]}]}],
+ [rabbitmq_management]},
+
+ {legacy_tcp_listener_ip,
+ "management.listener.port = 15672
+ management.listener.ip = 127.0.0.1",
+ [{rabbitmq_management,[{listener,[{port,15672},{ip,"127.0.0.1"}]}]}],
+ [rabbitmq_management]},
+ {legacy_ssl_listener_port,
+ "management.listener.port = 15672
+ management.listener.ssl = true
+
+ management.listener.ssl_opts.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ management.listener.ssl_opts.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ management.listener.ssl_opts.keyfile = test/config_schema_SUITE_data/certs/key.pem",
+ [{rabbitmq_management,
+ [{listener,
+ [{port,15672},
+ {ssl,true},
+ {ssl_opts,
+ [{cacertfile,
+ "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,
+ "test/config_schema_SUITE_data/certs/key.pem"}]}]}]}],
+ [rabbitmq_management]},
+
+ {legacy_server_opts_compress,
+ "management.listener.server.compress = true",
+ [
+ {rabbitmq_management, [
+ {listener, [{cowboy_opts, [{compress, true}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {legacy_server_opts_compress_and_idle_timeout,
+ "management.listener.server.compress = true
+ management.listener.server.idle_timeout = 123",
+ [
+ {rabbitmq_management, [
+ {listener, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {legacy_server_opts_compress_and_multiple_timeouts,
+ "management.listener.server.compress = true
+ management.listener.server.idle_timeout = 123
+ management.listener.server.inactivity_timeout = 456
+ management.listener.server.request_timeout = 789",
+ [
+ {rabbitmq_management, [
+ {listener, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {legacy_server_opts_multiple_timeouts_only,
+ "management.listener.server.idle_timeout = 123
+ management.listener.server.inactivity_timeout = 456
+ management.listener.server.request_timeout = 789",
+ [
+ {rabbitmq_management, [
+ {listener, [{cowboy_opts, [{idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {legacy_server_opts_shutdown_timeout,
+ "management.listener.server.shutdown_timeout = 7000",
+ [
+ {rabbitmq_management, [
+ {listener, [{cowboy_opts, [{shutdown_timeout, 7000}]}]}
+ ]}
+ ], [rabbitmq_management]
+ },
+
+ {legacy_server_opts_max_keepalive,
+ "management.listener.server.max_keepalive = 120",
+ [
+ {rabbitmq_management, [
+ {listener, [{cowboy_opts, [{max_keepalive, 120}]}]}
+ ]}
+ ], [rabbitmq_management]
+ }
+
+].
diff --git a/deps/rabbitmq_management/test/listener_config_SUITE.erl b/deps/rabbitmq_management/test/listener_config_SUITE.erl
new file mode 100644
index 0000000000..46d65d2be3
--- /dev/null
+++ b/deps/rabbitmq_management/test/listener_config_SUITE.erl
@@ -0,0 +1,135 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(listener_config_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [{non_parallel_tests, [], [
+ no_config_defaults,
+ tcp_config_only,
+ ssl_config_only,
+
+ multiple_listeners
+ ]}].
+
+init_per_suite(Config) ->
+ application:load(rabbitmq_management),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_testcase(_, Config) ->
+ application:unset_env(rabbitmq_management, listener),
+ application:unset_env(rabbitmq_management, tcp_config),
+ application:unset_env(rabbitmq_management, ssl_config),
+ Config.
+
+end_per_testcase(_, Config) ->
+ application:unset_env(rabbitmq_management, listener),
+ application:unset_env(rabbitmq_management, tcp_config),
+ application:unset_env(rabbitmq_management, ssl_config),
+ Config.
+
+%%
+%% Test Cases
+%%
+
+no_config_defaults(_Config) ->
+ ?assertEqual([
+ [
+ {cowboy_opts,[
+ {sendfile, false}
+ ]},
+ {port, 15672}]
+ ], rabbit_mgmt_app:get_listeners_config()).
+
+
+tcp_config_only(_Config) ->
+ application:set_env(rabbitmq_management, tcp_config, [
+ {port, 999},
+ {cowboy_opts, [
+ {idle_timeout, 10000}
+ ]}
+ ]),
+
+ Expected = [
+ {cowboy_opts,[
+ {idle_timeout, 10000},
+ {sendfile, false}
+ ]},
+ {port, 999}
+ ],
+ ?assertEqual(lists:usort(Expected), get_single_listener_config()).
+
+ssl_config_only(_Config) ->
+ application:set_env(rabbitmq_management, ssl_config, [
+ {port, 999},
+ {idle_timeout, 10000}
+ ]),
+
+ Expected = [
+ {cowboy_opts,[
+ {sendfile,false}
+ ]},
+ {port, 999},
+ {ssl, true},
+ {ssl_opts, [
+ {port, 999},
+ {idle_timeout, 10000}
+ ]}
+ ],
+ ?assertEqual(lists:usort(Expected), get_single_listener_config()).
+
+multiple_listeners(_Config) ->
+ application:set_env(rabbitmq_management, tcp_config, [
+ {port, 998},
+ {cowboy_opts, [
+ {idle_timeout, 10000}
+ ]}
+ ]),
+ application:set_env(rabbitmq_management, ssl_config, [
+ {port, 999},
+ {idle_timeout, 10000}
+ ]),
+ Expected = [
+ [
+ {cowboy_opts, [
+ {idle_timeout, 10000},
+ {sendfile, false}
+ ]},
+ {port,998}
+ ],
+
+ [
+ {cowboy_opts,[
+ {sendfile, false}
+ ]},
+ {port, 999},
+ {ssl, true},
+ {ssl_opts, [
+ {port, 999},
+ {idle_timeout, 10000}
+ ]}
+ ]
+ ],
+ ?assertEqual(lists:usort(Expected), rabbit_mgmt_app:get_listeners_config()).
+
+
+get_single_listener_config() ->
+ [Config] = rabbit_mgmt_app:get_listeners_config(),
+ lists:usort(Config).
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl
new file mode 100644
index 0000000000..85ae582503
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl
@@ -0,0 +1,3545 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_http_SUITE).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_ct_client_helpers, [close_connection/1, close_channel/1,
+ open_unmanaged_connection/1]).
+-import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2,
+ assert_keys/2, assert_no_keys/2,
+ http_get/2, http_get/3, http_get/5,
+ http_get_no_auth/3,
+ http_get_no_map/2,
+ http_put/4, http_put/6,
+ http_post/4, http_post/6,
+ http_upload_raw/8,
+ http_delete/3, http_delete/5,
+ http_put_raw/4, http_post_accept_json/4,
+ req/4, auth_header/2,
+ assert_permanent_redirect/3,
+ uri_base_from/2, format_for_upload/1,
+ amqp_port/1, req/6]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(COLLECT_INTERVAL, 1000).
+-define(PATH_PREFIX, "/custom-prefix").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, all_tests_with_prefix},
+ {group, all_tests_without_prefix},
+ {group, user_limits_ff}
+ ].
+
+groups() ->
+ [
+ {all_tests_with_prefix, [], all_tests()},
+ {all_tests_without_prefix, [], all_tests()},
+ {user_limits_ff, [], [
+ user_limits_list_test,
+ user_limit_set_test
+ ]}
+ ].
+
+all_tests() -> [
+ cli_redirect_test,
+ api_redirect_test,
+ stats_redirect_test,
+ overview_test,
+ auth_test,
+ cluster_name_test,
+ nodes_test,
+ memory_test,
+ ets_tables_memory_test,
+ vhosts_test,
+ vhosts_description_test,
+ vhosts_trace_test,
+ users_test,
+ users_legacy_administrator_test,
+ adding_a_user_with_password_test,
+ adding_a_user_with_password_hash_test,
+ adding_a_user_with_permissions_in_single_operation_test,
+ adding_a_user_without_tags_fails_test,
+ adding_a_user_without_password_or_hash_test,
+ adding_a_user_with_both_password_and_hash_fails_test,
+ updating_a_user_without_password_or_hash_clears_password_test,
+ user_credential_validation_accept_everything_succeeds_test,
+ user_credential_validation_min_length_succeeds_test,
+ user_credential_validation_min_length_fails_test,
+ updating_tags_of_a_passwordless_user_test,
+ permissions_validation_test,
+ permissions_list_test,
+ permissions_test,
+ connections_test,
+ multiple_invalid_connections_test,
+ exchanges_test,
+ queues_test,
+ quorum_queues_test,
+ queues_well_formed_json_test,
+ bindings_test,
+ bindings_post_test,
+ bindings_null_routing_key_test,
+ bindings_e2e_test,
+ permissions_administrator_test,
+ permissions_vhost_test,
+ permissions_amqp_test,
+ permissions_connection_channel_consumer_test,
+ consumers_cq_test,
+ consumers_qq_test,
+ definitions_test,
+ definitions_vhost_test,
+ definitions_password_test,
+ definitions_remove_things_test,
+ definitions_server_named_queue_test,
+ definitions_with_charset_test,
+ long_definitions_test,
+ long_definitions_multipart_test,
+ aliveness_test,
+ arguments_test,
+ arguments_table_test,
+ queue_purge_test,
+ queue_actions_test,
+ exclusive_consumer_test,
+ exclusive_queue_test,
+ connections_channels_pagination_test,
+ exchanges_pagination_test,
+ exchanges_pagination_permissions_test,
+ queue_pagination_test,
+ queue_pagination_columns_test,
+ queues_pagination_permissions_test,
+ samples_range_test,
+ sorting_test,
+ format_output_test,
+ columns_test,
+ get_test,
+ get_encoding_test,
+ get_fail_test,
+ publish_test,
+ publish_large_message_test,
+ publish_accept_json_test,
+ publish_fail_test,
+ publish_base64_test,
+ publish_unrouted_test,
+ if_empty_unused_test,
+ parameters_test,
+ global_parameters_test,
+ policy_test,
+ policy_permissions_test,
+ issue67_test,
+ extensions_test,
+ cors_test,
+ vhost_limits_list_test,
+ vhost_limit_set_test,
+ rates_test,
+ single_active_consumer_cq_test,
+ single_active_consumer_qq_test,
+ oauth_test,
+ disable_basic_auth_test,
+ login_test,
+ csp_headers_test,
+ auth_attempts_test
+].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+merge_app_env(Config) ->
+ Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics_interval, ?COLLECT_INTERVAL}
+ ]}),
+ rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbitmq_management, [
+ {sample_retention_policies,
+ [{global, [{605, 1}]},
+ {basic, [{605, 1}]},
+ {detailed, [{10, 1}]}]
+ }]}).
+
+start_broker(Config) ->
+ Setup0 = rabbit_ct_broker_helpers:setup_steps(),
+ Setup1 = rabbit_ct_client_helpers:setup_steps(),
+ Steps = Setup0 ++ Setup1,
+ rabbit_ct_helpers:run_setup_steps(Config, Steps).
+
+finish_init(Group, Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ NodeConf = [{rmq_nodename_suffix, Group}],
+ Config1 = rabbit_ct_helpers:set_config(Config, NodeConf),
+ merge_app_env(Config1).
+
+enable_feature_flag_or_skip(FFName, Group, Config0) ->
+ Config1 = finish_init(Group, Config0),
+ Config2 = start_broker(Config1),
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config2, nodename),
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config2, 0,
+ rabbit_feature_flags,
+ is_supported_remotely,
+ [Nodes, [FFName], 60000]),
+ case Ret of
+ true ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, rabbit_feature_flags, enable, [FFName]),
+ Config2;
+ false ->
+ end_per_group(Group, Config2),
+ {skip, rabbit_misc:format("Feature flag '~s' is not supported", [FFName])}
+ end.
+
+init_per_group(all_tests_with_prefix=Group, Config0) ->
+ PathConfig = {rabbitmq_management, [{path_prefix, ?PATH_PREFIX}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig),
+ Config2 = finish_init(Group, Config1),
+ Config3 = start_broker(Config2),
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config3, 0,
+ rabbit_feature_flags,
+ is_supported_remotely,
+ [Nodes, [quorum_queue], 60000]),
+ case Ret of
+ true ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config3, 0, rabbit_feature_flags, enable, [quorum_queue]),
+ Config3;
+ false ->
+ end_per_group(Group, Config3),
+ {skip, "Quorum queues are unsupported"}
+ end;
+init_per_group(user_limits_ff = Group, Config0) ->
+ enable_feature_flag_or_skip(user_limits, Group, Config0);
+init_per_group(Group, Config0) ->
+ enable_feature_flag_or_skip(quorum_queue, Group, Config0).
+
+end_per_group(_, Config) ->
+ inets:stop(),
+ Teardown0 = rabbit_ct_client_helpers:teardown_steps(),
+ Teardown1 = rabbit_ct_broker_helpers:teardown_steps(),
+ Steps = Teardown0 ++ Teardown1,
+ rabbit_ct_helpers:run_teardown_steps(Config, Steps).
+
+init_per_testcase(Testcase = permissions_vhost_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost1">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost2">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_SUITE:init_per_testcase">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_SUITE:end_per_testcase">>),
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, disable_basic_auth, false]),
+ Config1 = end_per_testcase0(Testcase, Config),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+end_per_testcase0(T, Config)
+ when T =:= long_definitions_test; T =:= long_definitions_multipart_test ->
+ Vhosts = long_definitions_vhosts(T),
+ [rabbit_ct_broker_helpers:delete_vhost(Config, Name)
+ || #{name := Name} <- Vhosts],
+ Config;
+end_per_testcase0(queues_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"downvhost">>),
+ Config;
+end_per_testcase0(vhost_limits_list_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"limit_test_vhost_1">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"limit_test_vhost_2">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_vhost_1_user">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_vhost_2_user">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"no_vhost_user">>),
+ Config;
+end_per_testcase0(vhost_limit_set_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"limit_test_vhost_1">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_vhost_1_user">>),
+ Config;
+end_per_testcase0(user_limits_list_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"limit_test_vhost_1">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_user_1_user">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_user_2_user">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"no_vhost_user">>),
+ Config;
+end_per_testcase0(user_limit_set_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"limit_test_vhost_1">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_user_1_user">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"limit_test_vhost_1_user">>),
+ Config;
+end_per_testcase0(permissions_vhost_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost1">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost2">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"myuser1">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"myuser2">>),
+ Config;
+end_per_testcase0(_, Config) -> Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+overview_test(Config) ->
+ %% Rather crude, but this req doesn't say much and at least this means it
+ %% didn't blow up.
+ true = 0 < length(maps:get(listeners, http_get(Config, "/overview"))),
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_get(Config, "/overview", "myuser", "myuser", ?OK),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+cluster_name_test(Config) ->
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/cluster-name", [{name, "foo"}], "myuser", "myuser", ?NOT_AUTHORISED),
+ http_put(Config, "/cluster-name", [{name, "foo"}], {group, '2xx'}),
+ #{name := <<"foo">>} = http_get(Config, "/cluster-name", "myuser", "myuser", ?OK),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+nodes_test(Config) ->
+ http_put(Config, "/users/user", [{password, <<"user">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/users/monitor", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], {group, '2xx'}),
+ DiscNode = #{type => <<"disc">>, running => true},
+ assert_list([DiscNode], http_get(Config, "/nodes")),
+ assert_list([DiscNode], http_get(Config, "/nodes", "monitor", "monitor", ?OK)),
+ http_get(Config, "/nodes", "user", "user", ?NOT_AUTHORISED),
+ [Node] = http_get(Config, "/nodes"),
+ Path = "/nodes/" ++ binary_to_list(maps:get(name, Node)),
+ assert_item(DiscNode, http_get(Config, Path, ?OK)),
+ assert_item(DiscNode, http_get(Config, Path, "monitor", "monitor", ?OK)),
+ http_get(Config, Path, "user", "user", ?NOT_AUTHORISED),
+ http_delete(Config, "/users/user", {group, '2xx'}),
+ http_delete(Config, "/users/monitor", {group, '2xx'}),
+ passed.
+
+memory_test(Config) ->
+ [Node] = http_get(Config, "/nodes"),
+ Path = "/nodes/" ++ binary_to_list(maps:get(name, Node)) ++ "/memory",
+ Result = http_get(Config, Path, ?OK),
+ assert_keys([memory], Result),
+ Keys = [total, connection_readers, connection_writers, connection_channels,
+ connection_other, queue_procs, queue_slave_procs, plugins,
+ other_proc, mnesia, mgmt_db, msg_index, other_ets, binary, code,
+ atom, other_system, allocated_unused, reserved_unallocated],
+ assert_keys(Keys, maps:get(memory, Result)),
+ http_get(Config, "/nodes/nonode/memory", ?NOT_FOUND),
+ %% Relative memory as a percentage of the total
+ Result1 = http_get(Config, Path ++ "/relative", ?OK),
+ assert_keys([memory], Result1),
+ Breakdown = maps:get(memory, Result1),
+ assert_keys(Keys, Breakdown),
+
+ assert_item(#{total => 100}, Breakdown),
+ %% allocated_unused and reserved_unallocated
+ %% make this test pretty unpredictable
+ assert_percentage(Breakdown, 20),
+ http_get(Config, "/nodes/nonode/memory/relative", ?NOT_FOUND),
+ passed.
+
+ets_tables_memory_test(Config) ->
+ [Node] = http_get(Config, "/nodes"),
+ Path = "/nodes/" ++ binary_to_list(maps:get(name, Node)) ++ "/memory/ets",
+ Result = http_get(Config, Path, ?OK),
+ assert_keys([ets_tables_memory], Result),
+ NonMgmtKeys = [rabbit_vhost,rabbit_user_permission],
+ Keys = [queue_stats, vhost_stats_coarse_conn_stats,
+ connection_created_stats, channel_process_stats, consumer_stats,
+ queue_msg_rates],
+ assert_keys(Keys ++ NonMgmtKeys, maps:get(ets_tables_memory, Result)),
+ http_get(Config, "/nodes/nonode/memory/ets", ?NOT_FOUND),
+ %% Relative memory as a percentage of the total
+ ResultRelative = http_get(Config, Path ++ "/relative", ?OK),
+ assert_keys([ets_tables_memory], ResultRelative),
+ Breakdown = maps:get(ets_tables_memory, ResultRelative),
+ assert_keys(Keys, Breakdown),
+ assert_item(#{total => 100}, Breakdown),
+ assert_percentage(Breakdown),
+ http_get(Config, "/nodes/nonode/memory/ets/relative", ?NOT_FOUND),
+
+ ResultMgmt = http_get(Config, Path ++ "/management", ?OK),
+ assert_keys([ets_tables_memory], ResultMgmt),
+ assert_keys(Keys, maps:get(ets_tables_memory, ResultMgmt)),
+ assert_no_keys(NonMgmtKeys, maps:get(ets_tables_memory, ResultMgmt)),
+
+ ResultMgmtRelative = http_get(Config, Path ++ "/management/relative", ?OK),
+ assert_keys([ets_tables_memory], ResultMgmtRelative),
+ assert_keys(Keys, maps:get(ets_tables_memory, ResultMgmtRelative)),
+ assert_no_keys(NonMgmtKeys, maps:get(ets_tables_memory, ResultMgmtRelative)),
+ assert_item(#{total => 100}, maps:get(ets_tables_memory, ResultMgmtRelative)),
+ assert_percentage(maps:get(ets_tables_memory, ResultMgmtRelative)),
+
+ ResultUnknownFilter = http_get(Config, Path ++ "/blahblah", ?OK),
+ #{ets_tables_memory := <<"no_tables">>} = ResultUnknownFilter,
+ passed.
+
+assert_percentage(Breakdown0) ->
+ assert_percentage(Breakdown0, 0).
+
+assert_percentage(Breakdown0, ExtraMargin) ->
+ Breakdown = maps:to_list(Breakdown0),
+ Total = lists:sum([P || {K, P} <- Breakdown, K =/= total]),
+ AcceptableMargin = (length(Breakdown) - 1) + ExtraMargin,
+ %% Rounding up and down can lose some digits. Never more than the number
+ %% of items in the breakdown.
+ case ((Total =< 100 + AcceptableMargin) andalso (Total >= 100 - AcceptableMargin)) of
+ false ->
+ throw({bad_percentage, Total, Breakdown});
+ true ->
+ ok
+ end.
+
+auth_test(Config) ->
+ http_put(Config, "/users/user", [{password, <<"user">>},
+ {tags, <<"">>}], {group, '2xx'}),
+ test_auth(Config, ?NOT_AUTHORISED, []),
+ test_auth(Config, ?NOT_AUTHORISED, [auth_header("user", "user")]),
+ test_auth(Config, ?NOT_AUTHORISED, [auth_header("guest", "gust")]),
+ test_auth(Config, ?OK, [auth_header("guest", "guest")]),
+ http_delete(Config, "/users/user", {group, '2xx'}),
+ passed.
+
+%% This test is rather over-verbose as we're trying to test understanding of
+%% Webmachine
+vhosts_test(Config) ->
+ assert_list([#{name => <<"/">>}], http_get(Config, "/vhosts")),
+ %% Create a new one
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ %% PUT should be idempotent
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ %% Check it's there
+ assert_list([#{name => <<"/">>}, #{name => <<"myvhost">>}],
+ http_get(Config, "/vhosts")),
+ %% Check individually
+ assert_item(#{name => <<"/">>}, http_get(Config, "/vhosts/%2F", ?OK)),
+ assert_item(#{name => <<"myvhost">>},http_get(Config, "/vhosts/myvhost")),
+
+ %% Crash it
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, <<"myvhost">>),
+ [NodeData] = http_get(Config, "/nodes"),
+ Node = binary_to_atom(maps:get(name, NodeData), utf8),
+ assert_item(#{name => <<"myvhost">>, cluster_state => #{Node => <<"stopped">>}},
+ http_get(Config, "/vhosts/myvhost")),
+
+ %% Restart it
+ http_post(Config, "/vhosts/myvhost/start/" ++ atom_to_list(Node), [], {group, '2xx'}),
+ assert_item(#{name => <<"myvhost">>, cluster_state => #{Node => <<"running">>}},
+ http_get(Config, "/vhosts/myvhost")),
+
+ %% Delete it
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+ %% It's not there
+ http_get(Config, "/vhosts/myvhost", ?NOT_FOUND),
+ http_delete(Config, "/vhosts/myvhost", ?NOT_FOUND),
+
+ passed.
+
+vhosts_description_test(Config) ->
+ Ret = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config, virtual_host_metadata),
+
+ http_put(Config, "/vhosts/myvhost", [{description, <<"vhost description">>},
+ {tags, <<"tag1,tag2">>}], {group, '2xx'}),
+ Expected = case Ret of
+ {skip, _} ->
+ #{name => <<"myvhost">>};
+ _ ->
+ #{name => <<"myvhost">>,
+ metadata => #{
+ description => <<"vhost description">>,
+ tags => [<<"tag1">>, <<"tag2">>]
+ }}
+ end,
+ assert_item(Expected, http_get(Config, "/vhosts/myvhost")),
+
+ %% Delete it
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+
+ passed.
+
+vhosts_trace_test(Config) ->
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ Disabled = #{name => <<"myvhost">>, tracing => false},
+ Enabled = #{name => <<"myvhost">>, tracing => true},
+ assert_item(Disabled, http_get(Config, "/vhosts/myvhost")),
+ http_put(Config, "/vhosts/myvhost", [{tracing, true}], {group, '2xx'}),
+ assert_item(Enabled, http_get(Config, "/vhosts/myvhost")),
+ http_put(Config, "/vhosts/myvhost", [{tracing, true}], {group, '2xx'}),
+ assert_item(Enabled, http_get(Config, "/vhosts/myvhost")),
+ http_put(Config, "/vhosts/myvhost", [{tracing, false}], {group, '2xx'}),
+ assert_item(Disabled, http_get(Config, "/vhosts/myvhost")),
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+
+ passed.
+
+users_test(Config) ->
+ assert_item(#{name => <<"guest">>, tags => <<"administrator">>},
+ http_get(Config, "/whoami")),
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, login_session_timeout, 100]),
+ assert_item(#{name => <<"guest">>,
+ tags => <<"administrator">>,
+ login_session_timeout => 100},
+ http_get(Config, "/whoami")),
+ http_get(Config, "/users/myuser", ?NOT_FOUND),
+ http_put_raw(Config, "/users/myuser", "Something not JSON", ?BAD_REQUEST),
+ http_put(Config, "/users/myuser", [{flim, <<"flam">>}], ?BAD_REQUEST),
+ http_put(Config, "/users/myuser", [{tags, <<"management">>},
+ {password, <<"myuser">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser", [{password_hash, <<"not_hash">>}], ?BAD_REQUEST),
+ http_put(Config, "/users/myuser", [{password_hash,
+ <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ assert_item(#{name => <<"myuser">>, tags => <<"management">>,
+ password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>},
+ http_get(Config, "/users/myuser")),
+
+ http_put(Config, "/users/myuser", [{password_hash,
+ <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+ {hashing_algorithm, <<"rabbit_password_hashing_md5">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ assert_item(#{name => <<"myuser">>, tags => <<"management">>,
+ password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>,
+ hashing_algorithm => <<"rabbit_password_hashing_md5">>},
+ http_get(Config, "/users/myuser")),
+ http_put(Config, "/users/myuser", [{password, <<"password">>},
+ {tags, <<"administrator, foo">>}], {group, '2xx'}),
+ assert_item(#{name => <<"myuser">>, tags => <<"administrator,foo">>},
+ http_get(Config, "/users/myuser")),
+ assert_list(lists:sort([#{name => <<"myuser">>, tags => <<"administrator,foo">>},
+ #{name => <<"guest">>, tags => <<"administrator">>}]),
+ lists:sort(http_get(Config, "/users"))),
+ test_auth(Config, ?OK, [auth_header("myuser", "password")]),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ test_auth(Config, ?NOT_AUTHORISED, [auth_header("myuser", "password")]),
+ http_get(Config, "/users/myuser", ?NOT_FOUND),
+ passed.
+
+without_permissions_users_test(Config) ->
+ assert_item(#{name => <<"guest">>, tags => <<"administrator">>},
+ http_get(Config, "/whoami")),
+ http_put(Config, "/users/myuser", [{password_hash,
+ <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ Perms = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/permissions/%2F/myuser", Perms, {group, '2xx'}),
+ http_put(Config, "/users/myuserwithoutpermissions", [{password_hash,
+ <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ assert_list([#{name => <<"myuserwithoutpermissions">>, tags => <<"management">>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>,
+ password_hash => <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>}],
+ http_get(Config, "/users/without-permissions")),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ http_delete(Config, "/users/myuserwithoutpermissions", {group, '2xx'}),
+ passed.
+
+users_bulk_delete_test(Config) ->
+ assert_item(#{name => <<"guest">>, tags => <<"administrator">>},
+ http_get(Config, "/whoami")),
+ http_put(Config, "/users/myuser1", [{tags, <<"management">>}, {password, <<"myuser">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser2", [{tags, <<"management">>}, {password, <<"myuser">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser3", [{tags, <<"management">>}, {password, <<"myuser">>}],
+ {group, '2xx'}),
+ http_get(Config, "/users/myuser1", {group, '2xx'}),
+ http_get(Config, "/users/myuser2", {group, '2xx'}),
+ http_get(Config, "/users/myuser3", {group, '2xx'}),
+
+ http_post_json(Config, "/users/bulk-delete",
+ "{\"users\": [\"myuser1\", \"myuser2\"]}", {group, '2xx'}),
+ http_get(Config, "/users/myuser1", ?NOT_FOUND),
+ http_get(Config, "/users/myuser2", ?NOT_FOUND),
+ http_get(Config, "/users/myuser3", {group, '2xx'}),
+ http_post_json(Config, "/users/bulk-delete", "{\"users\": [\"myuser3\"]}",
+ {group, '2xx'}),
+ http_get(Config, "/users/myuser3", ?NOT_FOUND),
+ passed.
+
+users_legacy_administrator_test(Config) ->
+ http_put(Config, "/users/myuser1", [{administrator, <<"true">>},
+ {password, <<"myuser1">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser2", [{administrator, <<"false">>},
+ {password, <<"myuser2">>}],
+ {group, '2xx'}),
+ assert_item(#{name => <<"myuser1">>, tags => <<"administrator">>},
+ http_get(Config, "/users/myuser1")),
+ assert_item(#{name => <<"myuser2">>, tags => <<"">>},
+ http_get(Config, "/users/myuser2")),
+ http_delete(Config, "/users/myuser1", {group, '2xx'}),
+ http_delete(Config, "/users/myuser2", {group, '2xx'}),
+ passed.
+
+adding_a_user_with_password_test(Config) ->
+ http_put(Config, "/users/user10", [{tags, <<"management">>},
+ {password, <<"password">>}],
+ [?CREATED, ?NO_CONTENT]),
+ http_delete(Config, "/users/user10", ?NO_CONTENT).
+adding_a_user_with_password_hash_test(Config) ->
+ http_put(Config, "/users/user11", [{tags, <<"management">>},
+ %% SHA-256 of "secret"
+ {password_hash, <<"2bb80d537b1da3e38bd30361aa855686bde0eacd7162fef6a25fe97bf527a25b">>}],
+ [?CREATED, ?NO_CONTENT]),
+ http_delete(Config, "/users/user11", ?NO_CONTENT).
+
+adding_a_user_with_permissions_in_single_operation_test(Config) ->
+ QArgs = #{},
+ PermArgs = #{configure => <<".*">>,
+ write => <<".*">>,
+ read => <<".*">>},
+ http_delete(Config, "/vhosts/vhost42", [?NO_CONTENT, ?NOT_FOUND]),
+ http_delete(Config, "/vhosts/vhost43", [?NO_CONTENT, ?NOT_FOUND]),
+ http_delete(Config, "/users/user-preconfigured-perms", [?NO_CONTENT, ?NOT_FOUND]),
+
+ http_put(Config, "/vhosts/vhost42", none, [?CREATED, ?NO_CONTENT]),
+ http_put(Config, "/vhosts/vhost43", none, [?CREATED, ?NO_CONTENT]),
+
+ http_put(Config, "/users/user-preconfigured-perms", [{password, <<"user-preconfigured-perms">>},
+ {tags, <<"management">>},
+ {permissions, [
+ {<<"vhost42">>, PermArgs},
+ {<<"vhost43">>, PermArgs}
+ ]}],
+ [?CREATED, ?NO_CONTENT]),
+ assert_list([#{tracing => false, name => <<"vhost42">>},
+ #{tracing => false, name => <<"vhost43">>}],
+ http_get(Config, "/vhosts", "user-preconfigured-perms", "user-preconfigured-perms", ?OK)),
+ http_put(Config, "/queues/vhost42/myqueue", QArgs,
+ "user-preconfigured-perms", "user-preconfigured-perms", [?CREATED, ?NO_CONTENT]),
+ http_put(Config, "/queues/vhost43/myqueue", QArgs,
+ "user-preconfigured-perms", "user-preconfigured-perms", [?CREATED, ?NO_CONTENT]),
+ Test1 =
+ fun(Path) ->
+ http_get(Config, Path, "user-preconfigured-perms", "user-preconfigured-perms", ?OK)
+ end,
+ Test2 =
+ fun(Path1, Path2) ->
+ http_get(Config, Path1 ++ "/vhost42/" ++ Path2, "user-preconfigured-perms", "user-preconfigured-perms",
+ ?OK),
+ http_get(Config, Path1 ++ "/vhost43/" ++ Path2, "user-preconfigured-perms", "user-preconfigured-perms",
+ ?OK)
+ end,
+ Test1("/exchanges"),
+ Test2("/exchanges", ""),
+ Test2("/exchanges", "amq.direct"),
+ Test1("/queues"),
+ Test2("/queues", ""),
+ Test2("/queues", "myqueue"),
+ Test1("/bindings"),
+ Test2("/bindings", ""),
+ Test2("/queues", "myqueue/bindings"),
+ Test2("/exchanges", "amq.default/bindings/source"),
+ Test2("/exchanges", "amq.default/bindings/destination"),
+ Test2("/bindings", "e/amq.default/q/myqueue"),
+ Test2("/bindings", "e/amq.default/q/myqueue/myqueue"),
+ http_delete(Config, "/vhosts/vhost42", ?NO_CONTENT),
+ http_delete(Config, "/vhosts/vhost43", ?NO_CONTENT),
+ http_delete(Config, "/users/user-preconfigured-perms", ?NO_CONTENT),
+ passed.
+
+adding_a_user_without_tags_fails_test(Config) ->
+ http_put(Config, "/users/no-tags", [{password, <<"password">>}], ?BAD_REQUEST).
+
+%% creating a passwordless user makes sense when x509x certificates or another
+%% "external" authentication mechanism or backend is used.
+%% See rabbitmq/rabbitmq-management#383.
+adding_a_user_without_password_or_hash_test(Config) ->
+ http_put(Config, "/users/no-pwd", [{tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+ http_put(Config, "/users/no-pwd", [{tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+ http_delete(Config, "/users/no-pwd", ?NO_CONTENT).
+
+adding_a_user_with_both_password_and_hash_fails_test(Config) ->
+ http_put(Config, "/users/myuser", [{password, <<"password">>},
+ {password_hash, <<"password_hash">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/users/myuser", [{tags, <<"management">>},
+ {password, <<"password">>},
+ {password_hash, <<"password_hash">>}], ?BAD_REQUEST).
+
+updating_a_user_without_password_or_hash_clears_password_test(Config) ->
+ http_put(Config, "/users/myuser", [{tags, <<"management">>},
+ {password, <<"myuser">>}], [?CREATED, ?NO_CONTENT]),
+ %% in this case providing no password or password_hash will
+ %% clear users' credentials
+ http_put(Config, "/users/myuser", [{tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+ assert_item(#{name => <<"myuser">>,
+ tags => <<"management">>,
+ password_hash => <<>>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>},
+ http_get(Config, "/users/myuser")),
+ http_delete(Config, "/users/myuser", ?NO_CONTENT).
+
+-define(NON_GUEST_USERNAME, <<"abc">>).
+
+user_credential_validation_accept_everything_succeeds_test(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?NON_GUEST_USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything),
+ http_put(Config, "/users/abc", [{password, <<"password">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ rabbit_ct_broker_helpers:delete_user(Config, ?NON_GUEST_USERNAME).
+
+user_credential_validation_min_length_succeeds_test(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?NON_GUEST_USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 5),
+ http_put(Config, "/users/abc", [{password, <<"password">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ rabbit_ct_broker_helpers:delete_user(Config, ?NON_GUEST_USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything).
+
+user_credential_validation_min_length_fails_test(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?NON_GUEST_USERNAME),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, min_length, 5),
+ http_put(Config, "/users/abc", [{password, <<"_">>},
+ {tags, <<"management">>}], ?BAD_REQUEST),
+ rabbit_ct_broker_helpers:switch_credential_validator(Config, accept_everything).
+
+updating_tags_of_a_passwordless_user_test(Config) ->
+ rabbit_ct_broker_helpers:delete_user(Config, ?NON_GUEST_USERNAME),
+ http_put(Config, "/users/abc", [{tags, <<"management">>},
+ {password, <<"myuser">>}], [?CREATED, ?NO_CONTENT]),
+
+ %% clear user's password
+ http_put(Config, "/users/abc", [{tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+ assert_item(#{name => ?NON_GUEST_USERNAME,
+ tags => <<"management">>,
+ password_hash => <<>>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>},
+ http_get(Config, "/users/abc")),
+
+ http_put(Config, "/users/abc", [{tags, <<"impersonator">>}], [?CREATED, ?NO_CONTENT]),
+ assert_item(#{name => ?NON_GUEST_USERNAME,
+ tags => <<"impersonator">>,
+ password_hash => <<>>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>},
+ http_get(Config, "/users/abc")),
+
+ http_put(Config, "/users/abc", [{tags, <<"">>}], [?CREATED, ?NO_CONTENT]),
+ assert_item(#{name => ?NON_GUEST_USERNAME,
+ tags => <<"">>,
+ password_hash => <<>>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>},
+ http_get(Config, "/users/abc")),
+
+ http_delete(Config, "/users/abc", ?NO_CONTENT).
+
+permissions_validation_test(Config) ->
+ Good = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/permissions/wrong/guest", Good, ?BAD_REQUEST),
+ http_put(Config, "/permissions/%2F/wrong", Good, ?BAD_REQUEST),
+ http_put(Config, "/permissions/%2F/guest",
+ [{configure, <<"[">>}, {write, <<".*">>}, {read, <<".*">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/permissions/%2F/guest", Good, {group, '2xx'}),
+ passed.
+
+permissions_list_test(Config) ->
+ AllPerms = http_get(Config, "/permissions"),
+ GuestInDefaultVHost = #{user => <<"guest">>,
+ vhost => <<"/">>,
+ configure => <<".*">>,
+ write => <<".*">>,
+ read => <<".*">>},
+
+ ?assert(lists:member(GuestInDefaultVHost, AllPerms)),
+
+ http_put(Config, "/users/myuser1", [{password, <<"">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser2", [{password, <<"">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost1", none, {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost2", none, {group, '2xx'}),
+
+ Perms = [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
+ http_put(Config, "/permissions/myvhost1/myuser1", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost2/myuser1", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost1/myuser2", Perms, {group, '2xx'}),
+
+ %% The user that creates the vhosts gets permission automatically
+ %% See https://github.com/rabbitmq/rabbitmq-management/issues/444
+ ?assertEqual(6, length(http_get(Config, "/permissions"))),
+ ?assertEqual(2, length(http_get(Config, "/users/myuser1/permissions"))),
+ ?assertEqual(1, length(http_get(Config, "/users/myuser2/permissions"))),
+
+ http_get(Config, "/users/notmyuser/permissions", ?NOT_FOUND),
+ http_get(Config, "/vhosts/notmyvhost/permissions", ?NOT_FOUND),
+
+ http_delete(Config, "/users/myuser1", {group, '2xx'}),
+ http_delete(Config, "/users/myuser2", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost1", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost2", {group, '2xx'}),
+ passed.
+
+permissions_test(Config) ->
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+
+ http_put(Config, "/permissions/myvhost/myuser",
+ [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
+ {group, '2xx'}),
+
+ Permission = #{user => <<"myuser">>,
+ vhost => <<"myvhost">>,
+ configure => <<"foo">>,
+ write => <<"foo">>,
+ read => <<"foo">>},
+ %% The user that creates the vhosts gets permission automatically
+ %% See https://github.com/rabbitmq/rabbitmq-management/issues/444
+ PermissionOwner = #{user => <<"guest">>,
+ vhost => <<"/">>,
+ configure => <<".*">>,
+ write => <<".*">>,
+ read => <<".*">>},
+ Default = #{user => <<"guest">>,
+ vhost => <<"/">>,
+ configure => <<".*">>,
+ write => <<".*">>,
+ read => <<".*">>},
+ Permission = http_get(Config, "/permissions/myvhost/myuser"),
+ assert_list(lists:sort([Permission, PermissionOwner, Default]),
+ lists:sort(http_get(Config, "/permissions"))),
+ assert_list([Permission], http_get(Config, "/users/myuser/permissions")),
+ http_delete(Config, "/permissions/myvhost/myuser", {group, '2xx'}),
+ http_get(Config, "/permissions/myvhost/myuser", ?NOT_FOUND),
+
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+ passed.
+
+topic_permissions_list_test(Config) ->
+ http_put(Config, "/users/myuser1", [{password, <<"">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser2", [{password, <<"">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost1", none, {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost2", none, {group, '2xx'}),
+
+ TopicPerms = [{exchange, <<"amq.topic">>}, {write, <<"^a">>}, {read, <<"^b">>}],
+ http_put(Config, "/topic-permissions/myvhost1/myuser1", TopicPerms, {group, '2xx'}),
+ http_put(Config, "/topic-permissions/myvhost2/myuser1", TopicPerms, {group, '2xx'}),
+ http_put(Config, "/topic-permissions/myvhost1/myuser2", TopicPerms, {group, '2xx'}),
+
+ TopicPerms2 = [{exchange, <<"amq.direct">>}, {write, <<"^a">>}, {read, <<"^b">>}],
+ http_put(Config, "/topic-permissions/myvhost1/myuser1", TopicPerms2, {group, '2xx'}),
+
+ 4 = length(http_get(Config, "/topic-permissions")),
+ 3 = length(http_get(Config, "/users/myuser1/topic-permissions")),
+ 1 = length(http_get(Config, "/users/myuser2/topic-permissions")),
+ 3 = length(http_get(Config, "/vhosts/myvhost1/topic-permissions")),
+ 1 = length(http_get(Config, "/vhosts/myvhost2/topic-permissions")),
+
+ http_get(Config, "/users/notmyuser/topic-permissions", ?NOT_FOUND),
+ http_get(Config, "/vhosts/notmyvhost/topic-permissions", ?NOT_FOUND),
+
+ %% Delete permissions for a single vhost-user-exchange combination
+ http_delete(Config, "/topic-permissions/myvhost1/myuser1/amq.direct", {group, '2xx'}),
+ 3 = length(http_get(Config, "/topic-permissions")),
+
+ http_delete(Config, "/users/myuser1", {group, '2xx'}),
+ http_delete(Config, "/users/myuser2", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost1", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost2", {group, '2xx'}),
+ passed.
+
+topic_permissions_test(Config) ->
+ http_put(Config, "/users/myuser1", [{password, <<"">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/users/myuser2", [{password, <<"">>}, {tags, <<"administrator">>}],
+ {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost1", none, {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost2", none, {group, '2xx'}),
+
+ TopicPerms = [{exchange, <<"amq.topic">>}, {write, <<"^a">>}, {read, <<"^b">>}],
+ http_put(Config, "/topic-permissions/myvhost1/myuser1", TopicPerms, {group, '2xx'}),
+ http_put(Config, "/topic-permissions/myvhost2/myuser1", TopicPerms, {group, '2xx'}),
+ http_put(Config, "/topic-permissions/myvhost1/myuser2", TopicPerms, {group, '2xx'}),
+
+ 3 = length(http_get(Config, "/topic-permissions")),
+ 1 = length(http_get(Config, "/topic-permissions/myvhost1/myuser1")),
+ 1 = length(http_get(Config, "/topic-permissions/myvhost2/myuser1")),
+ 1 = length(http_get(Config, "/topic-permissions/myvhost1/myuser2")),
+
+ http_get(Config, "/topic-permissions/myvhost1/notmyuser", ?NOT_FOUND),
+ http_get(Config, "/topic-permissions/notmyvhost/myuser2", ?NOT_FOUND),
+
+ http_delete(Config, "/users/myuser1", {group, '2xx'}),
+ http_delete(Config, "/users/myuser2", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost1", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost2", {group, '2xx'}),
+ passed.
+
+connections_test(Config) ->
+ {Conn, _Ch} = open_connection_and_channel(Config),
+ LocalPort = local_port(Conn),
+ Path = binary_to_list(
+ rabbit_mgmt_format:print(
+ "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w",
+ [LocalPort, amqp_port(Config)])),
+ timer:sleep(1500),
+ Connection = http_get(Config, Path, ?OK),
+ ?assert(maps:is_key(recv_oct, Connection)),
+ ?assert(maps:is_key(garbage_collection, Connection)),
+ ?assert(maps:is_key(send_oct_details, Connection)),
+ ?assert(maps:is_key(reductions, Connection)),
+ http_delete(Config, Path, {group, '2xx'}),
+ %% TODO rabbit_reader:shutdown/2 returns before the connection is
+ %% closed. It may not be worth fixing.
+ Fun = fun() ->
+ try
+ http_get(Config, Path, ?NOT_FOUND),
+ true
+ catch
+ _:_ ->
+ false
+ end
+ end,
+ wait_until(Fun, 60),
+ close_connection(Conn),
+ passed.
+
+multiple_invalid_connections_test(Config) ->
+ Count = 100,
+ spawn_invalid(Config, Count),
+ Page0 = http_get(Config, "/connections?page=1&page_size=100", ?OK),
+ wait_for_answers(Count),
+ Page1 = http_get(Config, "/connections?page=1&page_size=100", ?OK),
+ ?assertEqual(0, maps:get(total_count, Page0)),
+ ?assertEqual(0, maps:get(total_count, Page1)),
+ passed.
+
+test_auth(Config, Code, Headers) ->
+ {ok, {{_, Code, _}, _, _}} = req(Config, get, "/overview", Headers),
+ passed.
+
+exchanges_test(Config) ->
+ %% Can list exchanges
+ http_get(Config, "/exchanges", {group, '2xx'}),
+ %% Can pass booleans or strings
+ Good = [{type, <<"direct">>}, {durable, <<"true">>}],
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ http_get(Config, "/exchanges/myvhost/foo", ?NOT_FOUND),
+ http_put(Config, "/exchanges/myvhost/foo", Good, {group, '2xx'}),
+ http_put(Config, "/exchanges/myvhost/foo", Good, {group, '2xx'}),
+ http_get(Config, "/exchanges/%2F/foo", ?NOT_FOUND),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"myvhost">>,
+ type => <<"direct">>,
+ durable => true,
+ auto_delete => false,
+ internal => false,
+ arguments => #{}},
+ http_get(Config, "/exchanges/myvhost/foo")),
+ http_put(Config, "/exchanges/badvhost/bar", Good, ?NOT_FOUND),
+ http_put(Config, "/exchanges/myvhost/bar", [{type, <<"bad_exchange_type">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/exchanges/myvhost/bar", [{type, <<"direct">>},
+ {durable, <<"troo">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/exchanges/myvhost/foo", [{type, <<"direct">>}],
+ ?BAD_REQUEST),
+
+ http_delete(Config, "/exchanges/myvhost/foo", {group, '2xx'}),
+ http_delete(Config, "/exchanges/myvhost/foo", ?NOT_FOUND),
+
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+ http_get(Config, "/exchanges/badvhost", ?NOT_FOUND),
+ passed.
+
+queues_test(Config) ->
+ Good = [{durable, true}],
+ http_get(Config, "/queues/%2F/foo", ?NOT_FOUND),
+ http_put(Config, "/queues/%2F/foo", Good, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/foo", Good, {group, '2xx'}),
+ http_get(Config, "/queues/%2F/foo", ?OK),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"downvhost">>),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"downvhost">>),
+ http_put(Config, "/queues/downvhost/foo", Good, {group, '2xx'}),
+ http_put(Config, "/queues/downvhost/bar", Good, {group, '2xx'}),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, <<"downvhost">>),
+ %% The vhost is down
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ DownVHost = #{name => <<"downvhost">>, tracing => false, cluster_state => #{Node => <<"stopped">>}},
+ assert_item(DownVHost, http_get(Config, "/vhosts/downvhost")),
+
+ DownQueues = http_get(Config, "/queues/downvhost"),
+ DownQueue = http_get(Config, "/queues/downvhost/foo"),
+
+ assert_list([#{name => <<"bar">>,
+ vhost => <<"downvhost">>,
+ state => <<"stopped">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}},
+ #{name => <<"foo">>,
+ vhost => <<"downvhost">>,
+ state => <<"stopped">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}}], DownQueues),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"downvhost">>,
+ state => <<"stopped">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}}, DownQueue),
+
+ http_put(Config, "/queues/badvhost/bar", Good, ?NOT_FOUND),
+ http_put(Config, "/queues/%2F/bar",
+ [{durable, <<"troo">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/queues/%2F/foo",
+ [{durable, false}],
+ ?BAD_REQUEST),
+
+ http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}),
+ Queues = http_get(Config, "/queues/%2F"),
+ Queue = http_get(Config, "/queues/%2F/foo"),
+ assert_list([#{name => <<"baz">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}},
+ #{name => <<"foo">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}}], Queues),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}}, Queue),
+
+ http_delete(Config, "/queues/%2F/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/baz", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/foo", ?NOT_FOUND),
+ http_get(Config, "/queues/badvhost", ?NOT_FOUND),
+
+ http_delete(Config, "/queues/downvhost/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/downvhost/bar", {group, '2xx'}),
+ passed.
+
+quorum_queues_test(Config) ->
+ %% Test in a loop that no metrics are left behing after deleting a queue
+ quorum_queues_test_loop(Config, 5).
+
+quorum_queues_test_loop(_Config, 0) ->
+ passed;
+quorum_queues_test_loop(Config, N) ->
+ Good = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}],
+ http_get(Config, "/queues/%2f/qq", ?NOT_FOUND),
+ http_put(Config, "/queues/%2f/qq", Good, {group, '2xx'}),
+
+ {Conn, Ch} = open_connection_and_channel(Config),
+ Publish = fun() ->
+ amqp_channel:call(
+ Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"qq">>},
+ #amqp_msg{payload = <<"message">>})
+ end,
+ Publish(),
+ Publish(),
+ wait_until(fun() ->
+ Num = maps:get(messages, http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), undefined),
+ ct:pal("wait_until got ~w", [N]),
+ 2 == Num
+ end, 100),
+
+ http_delete(Config, "/queues/%2f/qq", {group, '2xx'}),
+ http_put(Config, "/queues/%2f/qq", Good, {group, '2xx'}),
+
+ wait_until(fun() ->
+ 0 == maps:get(messages, http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"), undefined)
+ end, 100),
+
+ http_delete(Config, "/queues/%2f/qq", {group, '2xx'}),
+ close_connection(Conn),
+ quorum_queues_test_loop(Config, N-1).
+
+queues_well_formed_json_test(Config) ->
+ %% TODO This test should be extended to the whole API
+ Good = [{durable, true}],
+ http_put(Config, "/queues/%2F/foo", Good, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}),
+
+ Queues = http_get_no_map(Config, "/queues/%2F"),
+ %% Ensure keys are unique
+ [begin
+ Sorted = lists:sort(Q),
+ Sorted = lists:usort(Q)
+ end || Q <- Queues],
+
+ http_delete(Config, "/queues/%2F/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/baz", {group, '2xx'}),
+ passed.
+
+bindings_test(Config) ->
+ XArgs = [{type, <<"direct">>}],
+ QArgs = #{},
+ http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/myqueue", QArgs, {group, '2xx'}),
+ BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
+ http_post(Config, "/bindings/%2F/e/myexchange/q/myqueue", BArgs, {group, '2xx'}),
+ http_get(Config, "/bindings/%2F/e/myexchange/q/myqueue/routing", ?OK),
+ http_get(Config, "/bindings/%2F/e/myexchange/q/myqueue/rooting", ?NOT_FOUND),
+ Binding =
+ #{source => <<"myexchange">>,
+ vhost => <<"/">>,
+ destination => <<"myqueue">>,
+ destination_type => <<"queue">>,
+ routing_key => <<"routing">>,
+ arguments => #{},
+ properties_key => <<"routing">>},
+ DBinding =
+ #{source => <<"">>,
+ vhost => <<"/">>,
+ destination => <<"myqueue">>,
+ destination_type => <<"queue">>,
+ routing_key => <<"myqueue">>,
+ arguments => #{},
+ properties_key => <<"myqueue">>},
+ Binding = http_get(Config, "/bindings/%2F/e/myexchange/q/myqueue/routing"),
+ assert_list([Binding],
+ http_get(Config, "/bindings/%2F/e/myexchange/q/myqueue")),
+ assert_list([DBinding, Binding],
+ http_get(Config, "/queues/%2F/myqueue/bindings")),
+ assert_list([Binding],
+ http_get(Config, "/exchanges/%2F/myexchange/bindings/source")),
+ http_delete(Config, "/bindings/%2F/e/myexchange/q/myqueue/routing", {group, '2xx'}),
+ http_delete(Config, "/bindings/%2F/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/myqueue", {group, '2xx'}),
+ http_get(Config, "/bindings/badvhost", ?NOT_FOUND),
+ http_get(Config, "/bindings/badvhost/myqueue/myexchange/routing", ?NOT_FOUND),
+ http_get(Config, "/bindings/%2F/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
+ passed.
+
+bindings_post_test(Config) ->
+ XArgs = [{type, <<"direct">>}],
+ QArgs = #{},
+ BArgs = [{routing_key, <<"routing">>}, {arguments, [{foo, <<"bar">>}]}],
+ http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/myqueue", QArgs, {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/myexchange/q/badqueue", BArgs, ?NOT_FOUND),
+ http_post(Config, "/bindings/%2F/e/badexchange/q/myqueue", BArgs, ?NOT_FOUND),
+
+ Headers1 = http_post(Config, "/bindings/%2F/e/myexchange/q/myqueue", #{}, {group, '2xx'}),
+ Want0 = "myqueue/\~",
+ ?assertEqual(Want0, pget("location", Headers1)),
+
+ Headers2 = http_post(Config, "/bindings/%2F/e/myexchange/q/myqueue", BArgs, {group, '2xx'}),
+ %% Args hash is calculated from a table, generated from args.
+ Hash = table_hash([{<<"foo">>,longstr,<<"bar">>}]),
+ PropertiesKey = "routing\~" ++ Hash,
+
+ Want1 = "myqueue/" ++ PropertiesKey,
+ ?assertEqual(Want1, pget("location", Headers2)),
+
+ PropertiesKeyBin = list_to_binary(PropertiesKey),
+ Want2 = #{source => <<"myexchange">>,
+ vhost => <<"/">>,
+ destination => <<"myqueue">>,
+ destination_type => <<"queue">>,
+ routing_key => <<"routing">>,
+ arguments => #{foo => <<"bar">>},
+ properties_key => PropertiesKeyBin},
+ URI = "/bindings/%2F/e/myexchange/q/myqueue/" ++ PropertiesKey,
+ ?assertEqual(Want2, http_get(Config, URI, ?OK)),
+
+ http_get(Config, URI ++ "x", ?NOT_FOUND),
+ http_delete(Config, URI, {group, '2xx'}),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/myqueue", {group, '2xx'}),
+ passed.
+
+bindings_null_routing_key_test(Config) ->
+ http_delete(Config, "/exchanges/%2F/myexchange", {one_of, [201, 404]}),
+ XArgs = [{type, <<"direct">>}],
+ QArgs = #{},
+ BArgs = [{routing_key, null}, {arguments, #{}}],
+ http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/myqueue", QArgs, {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/myexchange/q/badqueue", BArgs, ?NOT_FOUND),
+ http_post(Config, "/bindings/%2F/e/badexchange/q/myqueue", BArgs, ?NOT_FOUND),
+
+ Headers1 = http_post(Config, "/bindings/%2F/e/myexchange/q/myqueue", #{}, {group, '2xx'}),
+ Want0 = "myqueue/\~",
+ ?assertEqual(Want0, pget("location", Headers1)),
+
+ Headers2 = http_post(Config, "/bindings/%2F/e/myexchange/q/myqueue", BArgs, {group, '2xx'}),
+ %% Args hash is calculated from a table, generated from args.
+ Hash = table_hash([]),
+ PropertiesKey = "null\~" ++ Hash,
+
+ ?assertEqual("myqueue/null", pget("location", Headers2)),
+ Want1 = #{arguments => #{},
+ destination => <<"myqueue">>,
+ destination_type => <<"queue">>,
+ properties_key => <<"null">>,
+ routing_key => null,
+ source => <<"myexchange">>,
+ vhost => <<"/">>},
+ URI = "/bindings/%2F/e/myexchange/q/myqueue/" ++ PropertiesKey,
+ ?assertEqual(Want1, http_get(Config, URI, ?OK)),
+
+ http_get(Config, URI ++ "x", ?NOT_FOUND),
+ http_delete(Config, URI, {group, '2xx'}),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/myqueue", {group, '2xx'}),
+ passed.
+
+bindings_e2e_test(Config) ->
+ BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
+ http_post(Config, "/bindings/%2F/e/amq.direct/e/badexchange", BArgs, ?NOT_FOUND),
+ http_post(Config, "/bindings/%2F/e/badexchange/e/amq.fanout", BArgs, ?NOT_FOUND),
+ Headers = http_post(Config, "/bindings/%2F/e/amq.direct/e/amq.fanout", BArgs, {group, '2xx'}),
+ "amq.fanout/routing" = pget("location", Headers),
+ #{source := <<"amq.direct">>,
+ vhost := <<"/">>,
+ destination := <<"amq.fanout">>,
+ destination_type := <<"exchange">>,
+ routing_key := <<"routing">>,
+ arguments := #{},
+ properties_key := <<"routing">>} =
+ http_get(Config, "/bindings/%2F/e/amq.direct/e/amq.fanout/routing", ?OK),
+ http_delete(Config, "/bindings/%2F/e/amq.direct/e/amq.fanout/routing", {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/amq.direct/e/amq.headers", BArgs, {group, '2xx'}),
+ Binding =
+ #{source => <<"amq.direct">>,
+ vhost => <<"/">>,
+ destination => <<"amq.headers">>,
+ destination_type => <<"exchange">>,
+ routing_key => <<"routing">>,
+ arguments => #{},
+ properties_key => <<"routing">>},
+ Binding = http_get(Config, "/bindings/%2F/e/amq.direct/e/amq.headers/routing"),
+ assert_list([Binding],
+ http_get(Config, "/bindings/%2F/e/amq.direct/e/amq.headers")),
+ assert_list([Binding],
+ http_get(Config, "/exchanges/%2F/amq.direct/bindings/source")),
+ assert_list([Binding],
+ http_get(Config, "/exchanges/%2F/amq.headers/bindings/destination")),
+ http_delete(Config, "/bindings/%2F/e/amq.direct/e/amq.headers/routing", {group, '2xx'}),
+ http_get(Config, "/bindings/%2F/e/amq.direct/e/amq.headers/rooting", ?NOT_FOUND),
+ passed.
+
+permissions_administrator_test(Config) ->
+ http_put(Config, "/users/isadmin", [{password, <<"isadmin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/notadmin", [{password, <<"notadmin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/notadmin", [{password, <<"notadmin">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ Test =
+ fun(Path) ->
+ http_get(Config, Path, "notadmin", "notadmin", ?NOT_AUTHORISED),
+ http_get(Config, Path, "isadmin", "isadmin", ?OK),
+ http_get(Config, Path, "guest", "guest", ?OK)
+ end,
+ Test("/vhosts/%2F"),
+ Test("/vhosts/%2F/permissions"),
+ Test("/users"),
+ Test("/users/guest"),
+ Test("/users/guest/permissions"),
+ Test("/permissions"),
+ Test("/permissions/%2F/guest"),
+ http_delete(Config, "/users/notadmin", {group, '2xx'}),
+ http_delete(Config, "/users/isadmin", {group, '2xx'}),
+ passed.
+
+permissions_vhost_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/users/myadmin", [{password, <<"myadmin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost1", none, {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost2", none, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost1/myuser", PermArgs, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost1/guest", PermArgs, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost2/guest", PermArgs, {group, '2xx'}),
+ assert_list([#{name => <<"/">>},
+ #{name => <<"myvhost1">>},
+ #{name => <<"myvhost2">>}], http_get(Config, "/vhosts", ?OK)),
+ assert_list([#{name => <<"myvhost1">>}],
+ http_get(Config, "/vhosts", "myuser", "myuser", ?OK)),
+ http_put(Config, "/queues/myvhost1/myqueue", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/myvhost2/myqueue", QArgs, {group, '2xx'}),
+ Test1 =
+ fun(Path) ->
+ Results = http_get(Config, Path, "myuser", "myuser", ?OK),
+ [case maps:get(vhost, Result) of
+ <<"myvhost2">> ->
+ throw({got_result_from_vhost2_in, Path, Result});
+ _ ->
+ ok
+ end || Result <- Results]
+ end,
+ Test2 =
+ fun(Path1, Path2) ->
+ http_get(Config, Path1 ++ "/myvhost1/" ++ Path2, "myuser", "myuser",
+ ?OK),
+ http_get(Config, Path1 ++ "/myvhost2/" ++ Path2, "myuser", "myuser",
+ ?NOT_AUTHORISED)
+ end,
+ Test3 =
+ fun(Path1) ->
+ http_get(Config, Path1 ++ "/myvhost1/", "myadmin", "myadmin",
+ ?OK)
+ end,
+ Test1("/exchanges"),
+ Test2("/exchanges", ""),
+ Test2("/exchanges", "amq.direct"),
+ Test3("/exchanges"),
+ Test1("/queues"),
+ Test2("/queues", ""),
+ Test3("/queues"),
+ Test2("/queues", "myqueue"),
+ Test1("/bindings"),
+ Test2("/bindings", ""),
+ Test3("/bindings"),
+ Test2("/queues", "myqueue/bindings"),
+ Test2("/exchanges", "amq.default/bindings/source"),
+ Test2("/exchanges", "amq.default/bindings/destination"),
+ Test2("/bindings", "e/amq.default/q/myqueue"),
+ Test2("/bindings", "e/amq.default/q/myqueue/myqueue"),
+ http_delete(Config, "/vhosts/myvhost1", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost2", {group, '2xx'}),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ http_delete(Config, "/users/myadmin", {group, '2xx'}),
+ passed.
+
+permissions_amqp_test(Config) ->
+ %% Just test that it works at all, not that it works in all possible cases.
+ QArgs = #{},
+ PermArgs = [{configure, <<"foo.*">>}, {write, <<"foo.*">>},
+ {read, <<"foo.*">>}],
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/myuser", PermArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/bar-queue", QArgs, "myuser", "myuser",
+ ?NOT_AUTHORISED),
+ http_put(Config, "/queues/%2F/bar-queue", QArgs, "nonexistent", "nonexistent",
+ ?NOT_AUTHORISED),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+%% Opens a new connection and a channel on it.
+%% The channel is not managed by rabbit_ct_client_helpers and
+%% should be explicitly closed by the caller.
+open_connection_and_channel(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {Conn, Ch}.
+
+get_conn(Config, Username, Password) ->
+ Port = amqp_port(Config),
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{
+ port = Port,
+ username = list_to_binary(Username),
+ password = list_to_binary(Password)}),
+ LocalPort = local_port(Conn),
+ ConnPath = rabbit_misc:format(
+ "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w",
+ [LocalPort, Port]),
+ ChPath = rabbit_misc:format(
+ "/channels/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w%20(1)",
+ [LocalPort, Port]),
+ ConnChPath = rabbit_misc:format(
+ "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w/channels",
+ [LocalPort, Port]),
+ {Conn, ConnPath, ChPath, ConnChPath}.
+
+permissions_connection_channel_consumer_test(Config) ->
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/users/user", [{password, <<"user">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/user", PermArgs, {group, '2xx'}),
+ http_put(Config, "/users/monitor", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/monitor", PermArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test", #{}, {group, '2xx'}),
+
+ {Conn1, UserConn, UserCh, UserConnCh} = get_conn(Config, "user", "user"),
+ {Conn2, MonConn, MonCh, MonConnCh} = get_conn(Config, "monitor", "monitor"),
+ {Conn3, AdmConn, AdmCh, AdmConnCh} = get_conn(Config, "guest", "guest"),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ {ok, Ch3} = amqp_connection:open_channel(Conn3),
+ [amqp_channel:subscribe(
+ Ch, #'basic.consume'{queue = <<"test">>}, self()) ||
+ Ch <- [Ch1, Ch2, Ch3]],
+ timer:sleep(1500),
+ AssertLength = fun (Path, User, Len) ->
+ Res = http_get(Config, Path, User, User, ?OK),
+ ?assertEqual(Len, length(Res))
+ end,
+ [begin
+ AssertLength(P, "user", 1),
+ AssertLength(P, "monitor", 3),
+ AssertLength(P, "guest", 3)
+ end || P <- ["/connections", "/channels", "/consumers", "/consumers/%2F"]],
+
+ AssertRead = fun(Path, UserStatus) ->
+ http_get(Config, Path, "user", "user", UserStatus),
+ http_get(Config, Path, "monitor", "monitor", ?OK),
+ http_get(Config, Path, ?OK)
+ end,
+ AssertRead(UserConn, ?OK),
+ AssertRead(MonConn, ?NOT_AUTHORISED),
+ AssertRead(AdmConn, ?NOT_AUTHORISED),
+ AssertRead(UserCh, ?OK),
+ AssertRead(MonCh, ?NOT_AUTHORISED),
+ AssertRead(AdmCh, ?NOT_AUTHORISED),
+ AssertRead(UserConnCh, ?OK),
+ AssertRead(MonConnCh, ?NOT_AUTHORISED),
+ AssertRead(AdmConnCh, ?NOT_AUTHORISED),
+
+ AssertClose = fun(Path, User, Status) ->
+ http_delete(Config, Path, User, User, Status)
+ end,
+ AssertClose(UserConn, "monitor", ?NOT_AUTHORISED),
+ AssertClose(MonConn, "user", ?NOT_AUTHORISED),
+ AssertClose(AdmConn, "guest", {group, '2xx'}),
+ AssertClose(MonConn, "guest", {group, '2xx'}),
+ AssertClose(UserConn, "user", {group, '2xx'}),
+
+ http_delete(Config, "/users/user", {group, '2xx'}),
+ http_delete(Config, "/users/monitor", {group, '2xx'}),
+ http_get(Config, "/connections/foo", ?NOT_FOUND),
+ http_get(Config, "/channels/foo", ?NOT_FOUND),
+ http_delete(Config, "/queues/%2F/test", {group, '2xx'}),
+ passed.
+
+consumers_cq_test(Config) ->
+ consumers_test(Config, [{'x-queue-type', <<"classic">>}]).
+
+consumers_qq_test(Config) ->
+ consumers_test(Config, [{'x-queue-type', <<"quorum">>}]).
+
+consumers_test(Config, Args) ->
+ QArgs = [{auto_delete, false}, {durable, true},
+ {arguments, Args}],
+ http_put(Config, "/queues/%2F/test", QArgs, {group, '2xx'}),
+ {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:subscribe(
+ Ch, #'basic.consume'{queue = <<"test">>,
+ no_ack = false,
+ consumer_tag = <<"my-ctag">> }, self()),
+ timer:sleep(1500),
+ assert_list([#{exclusive => false,
+ ack_required => true,
+ active => true,
+ activity_status => <<"up">>,
+ consumer_tag => <<"my-ctag">>}], http_get(Config, "/consumers")),
+ amqp_connection:close(Conn),
+ http_delete(Config, "/queues/%2F/test", {group, '2xx'}),
+ passed.
+
+single_active_consumer_cq_test(Config) ->
+ single_active_consumer(Config,
+ "/queues/%2F/single-active-consumer-cq",
+ <<"single-active-consumer-cq">>,
+ [{'x-queue-type', <<"classic">>}]).
+
+single_active_consumer_qq_test(Config) ->
+ single_active_consumer(Config,
+ "/queues/%2F/single-active-consumer-qq",
+ <<"single-active-consumer-qq">>,
+ [{'x-queue-type', <<"quorum">>}]).
+
+single_active_consumer(Config, Url, QName, Args) ->
+ QArgs = [{auto_delete, false}, {durable, true},
+ {arguments, [{'x-single-active-consumer', true}] ++ Args}],
+ http_put(Config, Url, QArgs, {group, '2xx'}),
+ {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:subscribe(
+ Ch, #'basic.consume'{queue = QName,
+ no_ack = true,
+ consumer_tag = <<"1">> }, self()),
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+ amqp_channel:subscribe(
+ Ch2, #'basic.consume'{queue = QName,
+ no_ack = true,
+ consumer_tag = <<"2">> }, self()),
+ timer:sleep(1500),
+ assert_list([#{exclusive => false,
+ ack_required => false,
+ active => true,
+ activity_status => <<"single_active">>,
+ consumer_tag => <<"1">>},
+ #{exclusive => false,
+ ack_required => false,
+ active => false,
+ activity_status => <<"waiting">>,
+ consumer_tag => <<"2">>}], http_get(Config, "/consumers")),
+ amqp_channel:close(Ch),
+ timer:sleep(1500),
+ assert_list([#{exclusive => false,
+ ack_required => false,
+ active => true,
+ activity_status => <<"single_active">>,
+ consumer_tag => <<"2">>}], http_get(Config, "/consumers")),
+ amqp_connection:close(Conn),
+ http_delete(Config, Url, {group, '2xx'}),
+ passed.
+
+defs(Config, Key, URI, CreateMethod, Args) ->
+ defs(Config, Key, URI, CreateMethod, Args,
+ fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end).
+
+defs_v(Config, Key, URI, CreateMethod, Args) ->
+ Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
+ ReplaceVHostInArgs = fun(M, V2) -> maps:map(fun(vhost, _) -> V2;
+ (_, V1) -> V1 end, M) end,
+
+ %% Test against default vhost
+ defs(Config, Key, Rep1(URI, "%2F"), CreateMethod, ReplaceVHostInArgs(Args, <<"/">>)),
+
+ %% Test against new vhost
+ http_put(Config, "/vhosts/test", none, {group, '2xx'}),
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/permissions/test/guest", PermArgs, {group, '2xx'}),
+ DeleteFun0 = fun(URI2) ->
+ http_delete(Config, URI2, {group, '2xx'})
+ end,
+ DeleteFun1 = fun(_) ->
+ http_delete(Config, "/vhosts/test", {group, '2xx'})
+ end,
+ defs(Config, Key, Rep1(URI, "test"),
+ CreateMethod, ReplaceVHostInArgs(Args, <<"test">>),
+ DeleteFun0, DeleteFun1).
+
+create(Config, CreateMethod, URI, Args) ->
+ case CreateMethod of
+ put -> http_put(Config, URI, Args, {group, '2xx'}),
+ URI;
+ put_update -> http_put(Config, URI, Args, {group, '2xx'}),
+ URI;
+ post -> Headers = http_post(Config, URI, Args, {group, '2xx'}),
+ rabbit_web_dispatch_util:unrelativise(
+ URI, pget("location", Headers))
+ end.
+
+defs(Config, Key, URI, CreateMethod, Args, DeleteFun) ->
+ defs(Config, Key, URI, CreateMethod, Args, DeleteFun, DeleteFun).
+
+defs(Config, Key, URI, CreateMethod, Args, DeleteFun0, DeleteFun1) ->
+ %% Create the item
+ URI2 = create(Config, CreateMethod, URI, Args),
+ %% Make sure it ends up in definitions
+ Definitions = http_get(Config, "/definitions", ?OK),
+ true = lists:any(fun(I) -> test_item(Args, I) end, maps:get(Key, Definitions)),
+
+ %% Delete it
+ DeleteFun0(URI2),
+
+ %% Post the definitions back, it should get recreated in correct form
+ http_post(Config, "/definitions", Definitions, {group, '2xx'}),
+ assert_item(Args, http_get(Config, URI2, ?OK)),
+
+ %% And delete it again
+ DeleteFun1(URI2),
+
+ passed.
+
+register_parameters_and_policy_validator(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register, []),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register_policy_validator, []).
+
+unregister_parameters_and_policy_validator(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister_policy_validator, []),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister, []).
+
+definitions_test(Config) ->
+ register_parameters_and_policy_validator(Config),
+
+ defs_v(Config, queues, "/queues/<vhost>/my-queue", put,
+ #{name => <<"my-queue">>,
+ durable => true}),
+ defs_v(Config, exchanges, "/exchanges/<vhost>/my-exchange", put,
+ #{name => <<"my-exchange">>,
+ type => <<"direct">>}),
+ defs_v(Config, bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
+ #{routing_key => <<"routing">>, arguments => #{}}),
+ defs_v(Config, policies, "/policies/<vhost>/my-policy", put,
+ #{vhost => vhost,
+ name => <<"my-policy">>,
+ pattern => <<".*">>,
+ definition => #{testpos => [1, 2, 3]},
+ priority => 1}),
+ defs_v(Config, parameters, "/parameters/test/<vhost>/good", put,
+ #{vhost => vhost,
+ component => <<"test">>,
+ name => <<"good">>,
+ value => <<"ignore">>}),
+ defs(Config, global_parameters, "/global-parameters/good", put,
+ #{name => <<"good">>,
+ value => #{a => <<"b">>}}),
+ defs(Config, users, "/users/myuser", put,
+ #{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>,
+ tags => <<"management">>}),
+ defs(Config, vhosts, "/vhosts/myvhost", put,
+ #{name => <<"myvhost">>}),
+ defs(Config, permissions, "/permissions/%2F/guest", put,
+ #{user => <<"guest">>,
+ vhost => <<"/">>,
+ configure => <<"c">>,
+ write => <<"w">>,
+ read => <<"r">>}),
+ defs(Config, topic_permissions, "/topic-permissions/%2F/guest", put,
+ #{user => <<"guest">>,
+ vhost => <<"/">>,
+ exchange => <<"amq.topic">>,
+ write => <<"^a">>,
+ read => <<"^b">>}),
+
+ %% We just messed with guest's permissions
+ http_put(Config, "/permissions/%2F/guest",
+ #{configure => <<".*">>,
+ write => <<".*">>,
+ read => <<".*">>}, {group, '2xx'}),
+ BrokenConfig =
+ #{users => [],
+ vhosts => [],
+ permissions => [],
+ queues => [],
+ exchanges => [#{name => <<"not.direct">>,
+ vhost => <<"/">>,
+ type => <<"definitely not direct">>,
+ durable => true,
+ auto_delete => false,
+ arguments => []}
+ ],
+ bindings => []},
+ http_post(Config, "/definitions", BrokenConfig, ?BAD_REQUEST),
+
+ unregister_parameters_and_policy_validator(Config),
+ passed.
+
+long_definitions_test(Config) ->
+ %% Vhosts take time to start. Generate a bunch of them
+ Vhosts = long_definitions_vhosts(long_definitions_test),
+ LongDefs =
+ #{users => [],
+ vhosts => Vhosts,
+ permissions => [],
+ queues => [],
+ exchanges => [],
+ bindings => []},
+ http_post(Config, "/definitions", LongDefs, {group, '2xx'}),
+ passed.
+
+long_definitions_multipart_test(Config) ->
+ %% Vhosts take time to start. Generate a bunch of them
+ Vhosts = long_definitions_vhosts(long_definitions_multipart_test),
+ LongDefs =
+ #{users => [],
+ vhosts => Vhosts,
+ permissions => [],
+ queues => [],
+ exchanges => [],
+ bindings => []},
+ Data = binary_to_list(format_for_upload(LongDefs)),
+ CodeExp = {group, '2xx'},
+ Boundary = "------------long_definitions_multipart_test",
+ Body = format_multipart_filedata(Boundary, [{file, "file", Data}]),
+ ContentType = lists:concat(["multipart/form-data; boundary=", Boundary]),
+ MoreHeaders = [{"content-type", ContentType}, {"content-length", integer_to_list(length(Body))}],
+ http_upload_raw(Config, post, "/definitions", Body, "guest", "guest", CodeExp, MoreHeaders),
+ passed.
+
+long_definitions_vhosts(long_definitions_test) ->
+ [#{name => <<"long_definitions_test-", (integer_to_binary(N))/binary>>} ||
+ N <- lists:seq(1, 120)];
+long_definitions_vhosts(long_definitions_multipart_test) ->
+ Bin = list_to_binary(lists:flatten(lists:duplicate(524288, "X"))),
+ [#{name => <<"long_definitions_test-", Bin/binary, (integer_to_binary(N))/binary>>} ||
+ N <- lists:seq(1, 16)].
+
+defs_vhost(Config, Key, URI, CreateMethod, Args) ->
+ Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
+ ReplaceVHostInArgs = fun(M, V2) -> maps:map(fun(vhost, _) -> V2;
+ (_, V1) -> V1 end, M) end,
+
+ %% Create test vhost
+ http_put(Config, "/vhosts/test", none, {group, '2xx'}),
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/permissions/test/guest", PermArgs, {group, '2xx'}),
+
+ %% Test against default vhost
+ defs_vhost(Config, Key, URI, Rep1, "%2F", "test", CreateMethod,
+ ReplaceVHostInArgs(Args, <<"/">>), ReplaceVHostInArgs(Args, <<"test">>),
+ fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end),
+
+ %% Test against test vhost
+ defs_vhost(Config, Key, URI, Rep1, "test", "%2F", CreateMethod,
+ ReplaceVHostInArgs(Args, <<"test">>), ReplaceVHostInArgs(Args, <<"/">>),
+ fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end),
+
+ %% Remove test vhost
+ http_delete(Config, "/vhosts/test", {group, '2xx'}).
+
+
+defs_vhost(Config, Key, URI0, Rep1, VHost1, VHost2, CreateMethod, Args1, Args2,
+ DeleteFun) ->
+ %% Create the item
+ URI2 = create(Config, CreateMethod, Rep1(URI0, VHost1), Args1),
+ %% Make sure it ends up in definitions
+ Definitions = http_get(Config, "/definitions/" ++ VHost1, ?OK),
+ true = lists:any(fun(I) -> test_item(Args1, I) end, maps:get(Key, Definitions)),
+
+ %% Make sure it is not in the other vhost
+ Definitions0 = http_get(Config, "/definitions/" ++ VHost2, ?OK),
+ false = lists:any(fun(I) -> test_item(Args2, I) end, maps:get(Key, Definitions0)),
+
+ %% Post the definitions back
+ http_post(Config, "/definitions/" ++ VHost2, Definitions, {group, '2xx'}),
+
+ %% Make sure it is now in the other vhost
+ Definitions1 = http_get(Config, "/definitions/" ++ VHost2, ?OK),
+ true = lists:any(fun(I) -> test_item(Args2, I) end, maps:get(Key, Definitions1)),
+
+ %% Delete it
+ DeleteFun(URI2),
+ URI3 = create(Config, CreateMethod, Rep1(URI0, VHost2), Args2),
+ DeleteFun(URI3),
+ passed.
+
+definitions_vhost_test(Config) ->
+ %% Ensures that definitions can be exported/imported from a single virtual
+ %% host to another
+
+ register_parameters_and_policy_validator(Config),
+
+ defs_vhost(Config, queues, "/queues/<vhost>/my-queue", put,
+ #{name => <<"my-queue">>,
+ durable => true}),
+ defs_vhost(Config, exchanges, "/exchanges/<vhost>/my-exchange", put,
+ #{name => <<"my-exchange">>,
+ type => <<"direct">>}),
+ defs_vhost(Config, bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
+ #{routing_key => <<"routing">>, arguments => #{}}),
+ defs_vhost(Config, policies, "/policies/<vhost>/my-policy", put,
+ #{vhost => vhost,
+ name => <<"my-policy">>,
+ pattern => <<".*">>,
+ definition => #{testpos => [1, 2, 3]},
+ priority => 1}),
+
+ defs_vhost(Config, parameters, "/parameters/vhost-limits/<vhost>/limits", put,
+ #{vhost => vhost,
+ name => <<"limits">>,
+ component => <<"vhost-limits">>,
+ value => #{ 'max-connections' => 100 }}),
+ Upload =
+ #{queues => [],
+ exchanges => [],
+ policies => [],
+ parameters => [],
+ bindings => []},
+ http_post(Config, "/definitions/othervhost", Upload, ?NOT_FOUND),
+
+ unregister_parameters_and_policy_validator(Config),
+ passed.
+
+definitions_password_test(Config) ->
+ % Import definitions from 3.5.x
+ Config35 = #{rabbit_version => <<"3.5.4">>,
+ users => [#{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ tags => <<"management">>}
+ ]},
+ Expected35 = #{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ hashing_algorithm => <<"rabbit_password_hashing_md5">>,
+ tags => <<"management">>},
+ http_post(Config, "/definitions", Config35, {group, '2xx'}),
+ Definitions35 = http_get(Config, "/definitions", ?OK),
+ ct:pal("Definitions35: ~p", [Definitions35]),
+ Users35 = maps:get(users, Definitions35),
+ true = lists:any(fun(I) -> test_item(Expected35, I) end, Users35),
+
+ %% Import definitions from from 3.6.0
+ Config36 = #{rabbit_version => <<"3.6.0">>,
+ users => [#{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ tags => <<"management">>}
+ ]},
+ Expected36 = #{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha256">>,
+ tags => <<"management">>},
+ http_post(Config, "/definitions", Config36, {group, '2xx'}),
+
+ Definitions36 = http_get(Config, "/definitions", ?OK),
+ Users36 = maps:get(users, Definitions36),
+ true = lists:any(fun(I) -> test_item(Expected36, I) end, Users36),
+
+ %% No hashing_algorithm provided
+ ConfigDefault = #{rabbit_version => <<"3.6.1">>,
+ users => [#{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ tags => <<"management">>}
+ ]},
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbit,
+ password_hashing_module,
+ rabbit_password_hashing_sha512]),
+
+ ExpectedDefault = #{name => <<"myuser">>,
+ password_hash => <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>,
+ hashing_algorithm => <<"rabbit_password_hashing_sha512">>,
+ tags => <<"management">>},
+ http_post(Config, "/definitions", ConfigDefault, {group, '2xx'}),
+
+ DefinitionsDefault = http_get(Config, "/definitions", ?OK),
+ UsersDefault = maps:get(users, DefinitionsDefault),
+
+ true = lists:any(fun(I) -> test_item(ExpectedDefault, I) end, UsersDefault),
+ passed.
+
+definitions_remove_things_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+ amqp_channel:call(Ch, #'queue.declare'{ queue = <<"my-exclusive">>,
+ exclusive = true }),
+ http_get(Config, "/queues/%2F/my-exclusive", ?OK),
+ Definitions = http_get(Config, "/definitions", ?OK),
+ [] = maps:get(queues, Definitions),
+ [] = maps:get(exchanges, Definitions),
+ [] = maps:get(bindings, Definitions),
+ amqp_channel:close(Ch),
+ close_connection(Conn),
+ passed.
+
+definitions_server_named_queue_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+ %% declares a durable server-named queue for the sake of exporting the definition
+ #'queue.declare_ok'{queue = QName} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"">>, durable = true}),
+ Path = "/queues/%2F/" ++ rabbit_http_util:quote_plus(QName),
+ http_get(Config, Path, ?OK),
+ Definitions = http_get(Config, "/definitions", ?OK),
+ close_channel(Ch),
+ close_connection(Conn),
+ http_delete(Config, Path, {group, '2xx'}),
+ http_get(Config, Path, ?NOT_FOUND),
+ http_post(Config, "/definitions", Definitions, {group, '2xx'}),
+ %% amq.* entities are not imported
+ http_get(Config, Path, ?NOT_FOUND),
+ passed.
+
+definitions_with_charset_test(Config) ->
+ Path = "/definitions",
+ Body0 = http_get(Config, Path, ?OK),
+ Headers = [auth_header("guest", "guest")],
+ Url = uri_base_from(Config, 0) ++ Path,
+ Body1 = format_for_upload(Body0),
+ Request = {Url, Headers, "application/json; charset=utf-8", Body1},
+ {ok, {{_, ?NO_CONTENT, _}, _, []}} = httpc:request(post, Request, ?HTTPC_OPTS, []),
+ passed.
+
+aliveness_test(Config) ->
+ #{status := <<"ok">>} = http_get(Config, "/aliveness-test/%2F", ?OK),
+ http_get(Config, "/aliveness-test/foo", ?NOT_FOUND),
+ http_delete(Config, "/queues/%2F/aliveness-test", {group, '2xx'}),
+ passed.
+
+arguments_test(Config) ->
+ XArgs = [{type, <<"headers">>},
+ {arguments, [{'alternate-exchange', <<"amq.direct">>}]}],
+ QArgs = [{arguments, [{'x-expires', 1800000}]}],
+ BArgs = [{routing_key, <<"">>},
+ {arguments, [{'x-match', <<"all">>},
+ {foo, <<"bar">>}]}],
+ http_delete(Config, "/exchanges/%2F/myexchange", {one_of, [201, 404]}),
+ http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/arguments_test", QArgs, {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/myexchange/q/arguments_test", BArgs, {group, '2xx'}),
+ Definitions = http_get(Config, "/definitions", ?OK),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/arguments_test", {group, '2xx'}),
+ http_post(Config, "/definitions", Definitions, {group, '2xx'}),
+ #{'alternate-exchange' := <<"amq.direct">>} =
+ maps:get(arguments, http_get(Config, "/exchanges/%2F/myexchange", ?OK)),
+ #{'x-expires' := 1800000} =
+ maps:get(arguments, http_get(Config, "/queues/%2F/arguments_test", ?OK)),
+
+ ArgsTable = [{<<"foo">>,longstr,<<"bar">>}, {<<"x-match">>, longstr, <<"all">>}],
+ Hash = table_hash(ArgsTable),
+ PropertiesKey = [$~] ++ Hash,
+
+ assert_item(
+ #{'x-match' => <<"all">>, foo => <<"bar">>},
+ maps:get(arguments,
+ http_get(Config, "/bindings/%2F/e/myexchange/q/arguments_test/" ++
+ PropertiesKey, ?OK))
+ ),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/arguments_test", {group, '2xx'}),
+ passed.
+
+table_hash(Table) ->
+ binary_to_list(rabbit_mgmt_format:args_hash(Table)).
+
+arguments_table_test(Config) ->
+ Args = #{'upstreams' => [<<"amqp://localhost/%2F/upstream1">>,
+ <<"amqp://localhost/%2F/upstream2">>]},
+ XArgs = #{type => <<"headers">>,
+ arguments => Args},
+ http_delete(Config, "/exchanges/%2F/myexchange", {one_of, [201, 404]}),
+ http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}),
+ Definitions = http_get(Config, "/definitions", ?OK),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_post(Config, "/definitions", Definitions, {group, '2xx'}),
+ Args = maps:get(arguments, http_get(Config, "/exchanges/%2F/myexchange", ?OK)),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ passed.
+
+queue_purge_test(Config) ->
+ QArgs = #{},
+ http_put(Config, "/queues/%2F/myqueue", QArgs, {group, '2xx'}),
+ {Conn, Ch} = open_connection_and_channel(Config),
+ Publish = fun() ->
+ amqp_channel:call(
+ Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"myqueue">>},
+ #amqp_msg{payload = <<"message">>})
+ end,
+ Publish(),
+ Publish(),
+ amqp_channel:call(
+ Ch, #'queue.declare'{queue = <<"exclusive">>, exclusive = true}),
+ {#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
+ http_delete(Config, "/queues/%2F/myqueue/contents", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/badqueue/contents", ?NOT_FOUND),
+ http_delete(Config, "/queues/%2F/exclusive/contents", ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/exclusive", ?BAD_REQUEST),
+ #'basic.get_empty'{} =
+ amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
+ close_channel(Ch),
+ close_connection(Conn),
+ http_delete(Config, "/queues/%2F/myqueue", {group, '2xx'}),
+ passed.
+
+queue_actions_test(Config) ->
+ http_put(Config, "/queues/%2F/q", #{}, {group, '2xx'}),
+ http_post(Config, "/queues/%2F/q/actions", [{action, sync}], {group, '2xx'}),
+ http_post(Config, "/queues/%2F/q/actions", [{action, cancel_sync}], {group, '2xx'}),
+ http_post(Config, "/queues/%2F/q/actions", [{action, change_colour}], ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/q", {group, '2xx'}),
+ passed.
+
+exclusive_consumer_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+ #'queue.declare_ok'{ queue = QName } =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName,
+ exclusive = true}, self()),
+ timer:sleep(1500), %% Sadly we need to sleep to let the stats update
+ http_get(Config, "/queues/%2F/"), %% Just check we don't blow up
+ close_channel(Ch),
+ close_connection(Conn),
+ passed.
+
+
+exclusive_queue_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+ #'queue.declare_ok'{ queue = QName } =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ timer:sleep(1500), %% Sadly we need to sleep to let the stats update
+ Path = "/queues/%2F/" ++ rabbit_http_util:quote_plus(QName),
+ Queue = http_get(Config, Path),
+ assert_item(#{name => QName,
+ vhost => <<"/">>,
+ durable => false,
+ auto_delete => false,
+ exclusive => true,
+ arguments => #{}}, Queue),
+ amqp_channel:close(Ch),
+ close_connection(Conn),
+ passed.
+
+connections_channels_pagination_test(Config) ->
+ %% this test uses "unmanaged" (by Common Test helpers) connections to avoid
+ %% connection caching
+ Conn = open_unmanaged_connection(Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Conn1 = open_unmanaged_connection(Config),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ Conn2 = open_unmanaged_connection(Config),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ %% for stats to update
+ timer:sleep(1500),
+ PageOfTwo = http_get(Config, "/connections?page=1&page_size=2", ?OK),
+ ?assertEqual(3, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(3, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_count, PageOfTwo)),
+
+
+ TwoOfTwo = http_get(Config, "/channels?page=2&page_size=2", ?OK),
+ ?assertEqual(3, maps:get(total_count, TwoOfTwo)),
+ ?assertEqual(3, maps:get(filtered_count, TwoOfTwo)),
+ ?assertEqual(1, maps:get(item_count, TwoOfTwo)),
+ ?assertEqual(2, maps:get(page, TwoOfTwo)),
+ ?assertEqual(2, maps:get(page_size, TwoOfTwo)),
+ ?assertEqual(2, maps:get(page_count, TwoOfTwo)),
+
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+ amqp_channel:close(Ch1),
+ amqp_connection:close(Conn1),
+ amqp_channel:close(Ch2),
+ amqp_connection:close(Conn2),
+
+ passed.
+
+exchanges_pagination_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, {group, '2xx'}),
+ http_get(Config, "/exchanges/vh1?page=1&page_size=2", ?OK),
+ http_put(Config, "/exchanges/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/exchanges/vh1/test1", QArgs, {group, '2xx'}),
+ http_put(Config, "/exchanges/%2F/test2_reg", QArgs, {group, '2xx'}),
+ http_put(Config, "/exchanges/vh1/reg_test3", QArgs, {group, '2xx'}),
+
+ %% for stats to update
+ timer:sleep(1500),
+
+ Total = length(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list_names, [])),
+
+ PageOfTwo = http_get(Config, "/exchanges?page=1&page_size=2", ?OK),
+ ?assertEqual(Total, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(round(Total / 2), maps:get(page_count, PageOfTwo)),
+ assert_list([#{name => <<"">>, vhost => <<"/">>},
+ #{name => <<"amq.direct">>, vhost => <<"/">>}
+ ], maps:get(items, PageOfTwo)),
+
+ ByName = http_get(Config, "/exchanges?page=1&page_size=2&name=reg", ?OK),
+ ?assertEqual(Total, maps:get(total_count, ByName)),
+ ?assertEqual(2, maps:get(filtered_count, ByName)),
+ ?assertEqual(2, maps:get(item_count, ByName)),
+ ?assertEqual(1, maps:get(page, ByName)),
+ ?assertEqual(2, maps:get(page_size, ByName)),
+ ?assertEqual(1, maps:get(page_count, ByName)),
+ assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], maps:get(items, ByName)),
+
+
+ RegExByName = http_get(Config,
+ "/exchanges?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true",
+ ?OK),
+ ?assertEqual(Total, maps:get(total_count, RegExByName)),
+ ?assertEqual(1, maps:get(filtered_count, RegExByName)),
+ ?assertEqual(1, maps:get(item_count, RegExByName)),
+ ?assertEqual(1, maps:get(page, RegExByName)),
+ ?assertEqual(2, maps:get(page_size, RegExByName)),
+ ?assertEqual(1, maps:get(page_count, RegExByName)),
+ assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], maps:get(items, RegExByName)),
+
+
+ http_get(Config, "/exchanges?page=1000", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=-1", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=1&page_size=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
+ http_get(Config, "/exchanges?page=-1&page_size=-2", ?BAD_REQUEST),
+ http_delete(Config, "/exchanges/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/exchanges/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/exchanges/%2F/test2_reg", {group, '2xx'}),
+ http_delete(Config, "/exchanges/vh1/reg_test3", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+exchanges_pagination_permissions_test(Config) ->
+ http_put(Config, "/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/non-admin", [{password, <<"non-admin">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/non-admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/admin", Perms, {group, '2xx'}),
+ QArgs = #{},
+ http_put(Config, "/exchanges/%2F/test0", QArgs, "admin", "admin", {group, '2xx'}),
+ http_put(Config, "/exchanges/vh1/test1", QArgs, "non-admin", "non-admin", {group, '2xx'}),
+
+ %% for stats to update
+ timer:sleep(1500),
+
+ FirstPage = http_get(Config, "/exchanges?page=1&name=test1", "non-admin", "non-admin", ?OK),
+
+ ?assertEqual(8, maps:get(total_count, FirstPage)),
+ ?assertEqual(1, maps:get(item_count, FirstPage)),
+ ?assertEqual(1, maps:get(page, FirstPage)),
+ ?assertEqual(100, maps:get(page_size, FirstPage)),
+ ?assertEqual(1, maps:get(page_count, FirstPage)),
+ assert_list([#{name => <<"test1">>, vhost => <<"vh1">>}
+ ], maps:get(items, FirstPage)),
+ http_delete(Config, "/exchanges/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/exchanges/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/users/admin", {group, '2xx'}),
+ http_delete(Config, "/users/non-admin", {group, '2xx'}),
+ passed.
+
+
+
+queue_pagination_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, {group, '2xx'}),
+
+ http_get(Config, "/queues/vh1?page=1&page_size=2", ?OK),
+
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/test1", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test2_reg", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/reg_test3", QArgs, {group, '2xx'}),
+
+ %% for stats to update
+ timer:sleep(1500),
+
+ Total = length(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, list_names, [])),
+
+ PageOfTwo = http_get(Config, "/queues?page=1&page_size=2", ?OK),
+ ?assertEqual(Total, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_count, PageOfTwo)),
+ assert_list([#{name => <<"test0">>, vhost => <<"/">>},
+ #{name => <<"test2_reg">>, vhost => <<"/">>}
+ ], maps:get(items, PageOfTwo)),
+
+ SortedByName = http_get(Config, "/queues?sort=name&page=1&page_size=2", ?OK),
+ ?assertEqual(Total, maps:get(total_count, SortedByName)),
+ ?assertEqual(Total, maps:get(filtered_count, SortedByName)),
+ ?assertEqual(2, maps:get(item_count, SortedByName)),
+ ?assertEqual(1, maps:get(page, SortedByName)),
+ ?assertEqual(2, maps:get(page_size, SortedByName)),
+ ?assertEqual(2, maps:get(page_count, SortedByName)),
+ assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>},
+ #{name => <<"test0">>, vhost => <<"/">>}
+ ], maps:get(items, SortedByName)),
+
+
+ FirstPage = http_get(Config, "/queues?page=1", ?OK),
+ ?assertEqual(Total, maps:get(total_count, FirstPage)),
+ ?assertEqual(Total, maps:get(filtered_count, FirstPage)),
+ ?assertEqual(4, maps:get(item_count, FirstPage)),
+ ?assertEqual(1, maps:get(page, FirstPage)),
+ ?assertEqual(100, maps:get(page_size, FirstPage)),
+ ?assertEqual(1, maps:get(page_count, FirstPage)),
+ assert_list([#{name => <<"test0">>, vhost => <<"/">>},
+ #{name => <<"test1">>, vhost => <<"vh1">>},
+ #{name => <<"test2_reg">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost =><<"vh1">>}
+ ], maps:get(items, FirstPage)),
+
+
+ ReverseSortedByName = http_get(Config,
+ "/queues?page=2&page_size=2&sort=name&sort_reverse=true",
+ ?OK),
+ ?assertEqual(Total, maps:get(total_count, ReverseSortedByName)),
+ ?assertEqual(Total, maps:get(filtered_count, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(item_count, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(page, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(page_size, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(page_count, ReverseSortedByName)),
+ assert_list([#{name => <<"test0">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], maps:get(items, ReverseSortedByName)),
+
+
+ ByName = http_get(Config, "/queues?page=1&page_size=2&name=reg", ?OK),
+ ?assertEqual(Total, maps:get(total_count, ByName)),
+ ?assertEqual(2, maps:get(filtered_count, ByName)),
+ ?assertEqual(2, maps:get(item_count, ByName)),
+ ?assertEqual(1, maps:get(page, ByName)),
+ ?assertEqual(2, maps:get(page_size, ByName)),
+ ?assertEqual(1, maps:get(page_count, ByName)),
+ assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], maps:get(items, ByName)),
+
+ RegExByName = http_get(Config,
+ "/queues?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true",
+ ?OK),
+ ?assertEqual(Total, maps:get(total_count, RegExByName)),
+ ?assertEqual(1, maps:get(filtered_count, RegExByName)),
+ ?assertEqual(1, maps:get(item_count, RegExByName)),
+ ?assertEqual(1, maps:get(page, RegExByName)),
+ ?assertEqual(2, maps:get(page_size, RegExByName)),
+ ?assertEqual(1, maps:get(page_count, RegExByName)),
+ assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], maps:get(items, RegExByName)),
+
+
+ http_get(Config, "/queues?page=1000", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=-1", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=1&page_size=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
+ http_get(Config, "/queues?page=-1&page_size=-2", ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/test2_reg", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/reg_test3", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+queue_pagination_columns_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+
+ http_get(Config, "/queues/vh1?columns=name&page=1&page_size=2", ?OK),
+ http_put(Config, "/queues/%2F/queue_a", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/queue_b", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/queue_c", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/queue_d", QArgs, {group, '2xx'}),
+ PageOfTwo = http_get(Config, "/queues?columns=name&page=1&page_size=2", ?OK),
+ ?assertEqual(4, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(4, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_count, PageOfTwo)),
+ assert_list([#{name => <<"queue_a">>},
+ #{name => <<"queue_c">>}
+ ], maps:get(items, PageOfTwo)),
+
+ ColumnNameVhost = http_get(Config, "/queues/vh1?columns=name&page=1&page_size=2", ?OK),
+ ?assertEqual(2, maps:get(total_count, ColumnNameVhost)),
+ ?assertEqual(2, maps:get(filtered_count, ColumnNameVhost)),
+ ?assertEqual(2, maps:get(item_count, ColumnNameVhost)),
+ ?assertEqual(1, maps:get(page, ColumnNameVhost)),
+ ?assertEqual(2, maps:get(page_size, ColumnNameVhost)),
+ ?assertEqual(1, maps:get(page_count, ColumnNameVhost)),
+ assert_list([#{name => <<"queue_b">>},
+ #{name => <<"queue_d">>}
+ ], maps:get(items, ColumnNameVhost)),
+
+ ColumnsNameVhost = http_get(Config, "/queues?columns=name,vhost&page=2&page_size=2", ?OK),
+ ?assertEqual(4, maps:get(total_count, ColumnsNameVhost)),
+ ?assertEqual(4, maps:get(filtered_count, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(item_count, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(page, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(page_size, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(page_count, ColumnsNameVhost)),
+ assert_list([
+ #{name => <<"queue_b">>,
+ vhost => <<"vh1">>},
+ #{name => <<"queue_d">>,
+ vhost => <<"vh1">>}
+ ], maps:get(items, ColumnsNameVhost)),
+
+
+ http_delete(Config, "/queues/%2F/queue_a", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/queue_b", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/queue_c", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/queue_d", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+queues_pagination_permissions_test(Config) ->
+ http_put(Config, "/users/non-admin", [{password, <<"non-admin">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/non-admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/admin", Perms, {group, '2xx'}),
+ QArgs = #{},
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/test1", QArgs, "non-admin","non-admin", {group, '2xx'}),
+ FirstPage = http_get(Config, "/queues?page=1", "non-admin", "non-admin", ?OK),
+ ?assertEqual(1, maps:get(total_count, FirstPage)),
+ ?assertEqual(1, maps:get(item_count, FirstPage)),
+ ?assertEqual(1, maps:get(page, FirstPage)),
+ ?assertEqual(100, maps:get(page_size, FirstPage)),
+ ?assertEqual(1, maps:get(page_count, FirstPage)),
+ assert_list([#{name => <<"test1">>, vhost => <<"vh1">>}
+ ], maps:get(items, FirstPage)),
+
+ FirstPageAdm = http_get(Config, "/queues?page=1", "admin", "admin", ?OK),
+ ?assertEqual(2, maps:get(total_count, FirstPageAdm)),
+ ?assertEqual(2, maps:get(item_count, FirstPageAdm)),
+ ?assertEqual(1, maps:get(page, FirstPageAdm)),
+ ?assertEqual(100, maps:get(page_size, FirstPageAdm)),
+ ?assertEqual(1, maps:get(page_count, FirstPageAdm)),
+ assert_list([#{name => <<"test1">>, vhost => <<"vh1">>},
+ #{name => <<"test0">>, vhost => <<"/">>}
+ ], maps:get(items, FirstPageAdm)),
+
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/test1","admin","admin", {group, '2xx'}),
+ http_delete(Config, "/users/admin", {group, '2xx'}),
+ http_delete(Config, "/users/non-admin", {group, '2xx'}),
+ passed.
+
+samples_range_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+
+ %% Channels
+ timer:sleep(2000),
+ [ConnInfo | _] = http_get(Config, "/channels?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ ConnDetails = maps:get(connection_details, ConnInfo),
+ ConnName0 = maps:get(name, ConnDetails),
+ ConnName = uri_string:recompose(#{path => binary_to_list(ConnName0)}),
+ ChanName = ConnName ++ uri_string:recompose(#{path => " (1)"}),
+
+ http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_get(Config, "/vhosts/%2F/channels?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/vhosts/%2F/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ %% Connections.
+
+ http_get(Config, "/connections?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_get(Config, "/vhosts/%2F/connections?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/vhosts/%2F/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+
+ %% Exchanges
+
+ http_get(Config, "/exchanges?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/exchanges?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_get(Config, "/exchanges/%2F/amq.direct?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/exchanges/%2F/amq.direct?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ %% Nodes
+ http_get(Config, "/nodes?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/nodes?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ %% Overview
+ http_get(Config, "/overview?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/overview?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ %% Queues
+ http_put(Config, "/queues/%2F/test-001", #{}, {group, '2xx'}),
+ timer:sleep(2000),
+
+ http_get(Config, "/queues/%2F?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/queues/%2F?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+ http_get(Config, "/queues/%2F/test-001?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/queues/%2F/test-001?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_delete(Config, "/queues/%2F/test-001", {group, '2xx'}),
+
+ %% Vhosts
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ timer:sleep(2000),
+
+ http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/vhosts?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+ http_get(Config, "/vhosts/vh1?lengths_age=60&lengths_incr=1", ?OK),
+ http_get(Config, "/vhosts/vh1?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+
+ passed.
+
+sorting_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh19", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh19/guest", PermArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh19/test1", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test2", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh19/test3", QArgs, {group, '2xx'}),
+ timer:sleep(2000),
+ assert_list([#{name => <<"test0">>},
+ #{name => <<"test2">>},
+ #{name => <<"test1">>},
+ #{name => <<"test3">>}], http_get(Config, "/queues", ?OK)),
+ assert_list([#{name => <<"test0">>},
+ #{name => <<"test1">>},
+ #{name => <<"test2">>},
+ #{name => <<"test3">>}], http_get(Config, "/queues?sort=name", ?OK)),
+ assert_list([#{name => <<"test0">>},
+ #{name => <<"test2">>},
+ #{name => <<"test1">>},
+ #{name => <<"test3">>}], http_get(Config, "/queues?sort=vhost", ?OK)),
+ assert_list([#{name => <<"test3">>},
+ #{name => <<"test1">>},
+ #{name => <<"test2">>},
+ #{name => <<"test0">>}], http_get(Config, "/queues?sort_reverse=true", ?OK)),
+ assert_list([#{name => <<"test3">>},
+ #{name => <<"test2">>},
+ #{name => <<"test1">>},
+ #{name => <<"test0">>}], http_get(Config, "/queues?sort=name&sort_reverse=true", ?OK)),
+ assert_list([#{name => <<"test3">>},
+ #{name => <<"test1">>},
+ #{name => <<"test2">>},
+ #{name => <<"test0">>}], http_get(Config, "/queues?sort=vhost&sort_reverse=true", ?OK)),
+ %% Rather poor but at least test it doesn't blow up with dots
+ http_get(Config, "/queues?sort=owner_pid_details.name", ?OK),
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/queues/vh19/test1", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/test2", {group, '2xx'}),
+ http_delete(Config, "/queues/vh19/test3", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh19", {group, '2xx'}),
+ passed.
+
+format_output_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh129", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh129/guest", PermArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ timer:sleep(2000),
+ assert_list([#{name => <<"test0">>,
+ consumer_utilisation => null,
+ exclusive_consumer_tag => null,
+ recoverable_slaves => null}], http_get(Config, "/queues", ?OK)),
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh129", {group, '2xx'}),
+ passed.
+
+columns_test(Config) ->
+ Path = "/queues/%2F/columns.test",
+ TTL = 30000,
+ http_delete(Config, Path, [{group, '2xx'}, 404]),
+ http_put(Config, Path, [{arguments, [{<<"x-message-ttl">>, TTL}]}],
+ {group, '2xx'}),
+ Item = #{arguments => #{'x-message-ttl' => TTL}, name => <<"columns.test">>},
+ timer:sleep(2000),
+ [Item] = http_get(Config, "/queues?columns=arguments.x-message-ttl,name", ?OK),
+ Item = http_get(Config, "/queues/%2F/columns.test?columns=arguments.x-message-ttl,name", ?OK),
+ http_delete(Config, Path, {group, '2xx'}),
+ passed.
+
+get_test(Config) ->
+ %% Real world example...
+ Headers = [{<<"x-forwarding">>, array,
+ [{table,
+ [{<<"uri">>, longstr,
+ <<"amqp://localhost/%2F/upstream">>}]}]}],
+ http_put(Config, "/queues/%2F/myqueue", #{}, {group, '2xx'}),
+ {Conn, Ch} = open_connection_and_channel(Config),
+ #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}),
+ Publish = fun (Payload) ->
+ amqp_channel:cast(
+ Ch, #'basic.publish'{exchange = <<>>,
+ routing_key = <<"myqueue">>},
+ #amqp_msg{props = #'P_basic'{headers = Headers},
+ payload = Payload}),
+ amqp_channel:wait_for_confirms_or_die(Ch, 5)
+ end,
+ Publish(<<"1aaa">>),
+ Publish(<<"2aaa">>),
+ Publish(<<"3aaa">>),
+ [Msg] = http_post(Config, "/queues/%2F/myqueue/get", [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto},
+ {truncate, 1}], ?OK),
+ false = maps:get(redelivered, Msg),
+ <<>> = maps:get(exchange, Msg),
+ <<"myqueue">> = maps:get(routing_key, Msg),
+ <<"1">> = maps:get(payload, Msg),
+ #{'x-forwarding' :=
+ [#{uri := <<"amqp://localhost/%2F/upstream">>}]} =
+ maps:get(headers, maps:get(properties, Msg)),
+
+ [M2, M3] = http_post(Config, "/queues/%2F/myqueue/get", [{ackmode, ack_requeue_true},
+ {count, 5},
+ {encoding, auto}], ?OK),
+ <<"2aaa">> = maps:get(payload, M2),
+ <<"3aaa">> = maps:get(payload, M3),
+ 2 = length(http_post(Config, "/queues/%2F/myqueue/get", [{ackmode, ack_requeue_false},
+ {count, 5},
+ {encoding, auto}], ?OK)),
+ Publish(<<"4aaa">>),
+ Publish(<<"5aaa">>),
+ [M4, M5] = http_post(Config, "/queues/%2F/myqueue/get",
+ [{ackmode, reject_requeue_true},
+ {count, 5},
+ {encoding, auto}], ?OK),
+
+ <<"4aaa">> = maps:get(payload, M4),
+ <<"5aaa">> = maps:get(payload, M5),
+ 2 = length(http_post(Config, "/queues/%2F/myqueue/get",
+ [{ackmode, ack_requeue_false},
+ {count, 5},
+ {encoding, auto}], ?OK)),
+
+ [] = http_post(Config, "/queues/%2F/myqueue/get", [{ackmode, ack_requeue_false},
+ {count, 5},
+ {encoding, auto}], ?OK),
+ http_delete(Config, "/queues/%2F/myqueue", {group, '2xx'}),
+ amqp_channel:close(Ch),
+ close_connection(Conn),
+
+ passed.
+
+get_encoding_test(Config) ->
+ Utf8Text = <<"Loïc was here!"/utf8>>,
+ Utf8Payload = base64:encode(Utf8Text),
+ BinPayload = base64:encode(<<0:64, 16#ff, 16#fd, 0:64>>),
+ Utf8Msg = msg(<<"get_encoding_test">>, #{}, Utf8Payload, <<"base64">>),
+ BinMsg = msg(<<"get_encoding_test">>, #{}, BinPayload, <<"base64">>),
+ http_put(Config, "/queues/%2F/get_encoding_test", #{}, {group, '2xx'}),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Utf8Msg, ?OK),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", BinMsg, ?OK),
+ timer:sleep(250),
+ [RecvUtf8Msg1, RecvBinMsg1] = http_post(Config, "/queues/%2F/get_encoding_test/get",
+ [{ackmode, ack_requeue_false},
+ {count, 2},
+ {encoding, auto}], ?OK),
+ %% Utf-8 payload must be returned as a utf-8 string when auto encoding is used.
+ ?assertEqual(<<"string">>, maps:get(payload_encoding, RecvUtf8Msg1)),
+ ?assertEqual(Utf8Text, maps:get(payload, RecvUtf8Msg1)),
+ %% Binary payload must be base64-encoded when auto is used.
+ ?assertEqual(<<"base64">>, maps:get(payload_encoding, RecvBinMsg1)),
+ ?assertEqual(BinPayload, maps:get(payload, RecvBinMsg1)),
+ %% Good. Now try forcing the base64 encoding.
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Utf8Msg, ?OK),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", BinMsg, ?OK),
+ [RecvUtf8Msg2, RecvBinMsg2] = http_post(Config, "/queues/%2F/get_encoding_test/get",
+ [{ackmode, ack_requeue_false},
+ {count, 2},
+ {encoding, base64}], ?OK),
+ %% All payloads must be base64-encoded when base64 encoding is used.
+ ?assertEqual(<<"base64">>, maps:get(payload_encoding, RecvUtf8Msg2)),
+ ?assertEqual(Utf8Payload, maps:get(payload, RecvUtf8Msg2)),
+ ?assertEqual(<<"base64">>, maps:get(payload_encoding, RecvBinMsg2)),
+ ?assertEqual(BinPayload, maps:get(payload, RecvBinMsg2)),
+ http_delete(Config, "/queues/%2F/get_encoding_test", {group, '2xx'}),
+ passed.
+
+get_fail_test(Config) ->
+ http_put(Config, "/users/myuser", [{password, <<"password">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/queues/%2F/myqueue", #{}, {group, '2xx'}),
+ http_post(Config, "/queues/%2F/myqueue/get",
+ [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], "myuser", "password", ?NOT_AUTHORISED),
+ http_delete(Config, "/queues/%2F/myqueue", {group, '2xx'}),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+
+-define(LARGE_BODY_BYTES, 25000000).
+
+publish_test(Config) ->
+ Headers = #{'x-forwarding' => [#{uri => <<"amqp://localhost/%2F/upstream">>}]},
+ Msg = msg(<<"publish_test">>, Headers, <<"Hello world">>),
+ http_put(Config, "/queues/%2F/publish_test", #{}, {group, '2xx'}),
+ ?assertEqual(#{routed => true},
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg, ?OK)),
+ [Msg2] = http_post(Config, "/queues/%2F/publish_test/get", [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg2),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg2, ?OK),
+ [Msg3] = http_post(Config, "/queues/%2F/publish_test/get", [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg3),
+ http_delete(Config, "/queues/%2F/publish_test", {group, '2xx'}),
+ passed.
+
+publish_large_message_test(Config) ->
+ Headers = #{'x-forwarding' => [#{uri => <<"amqp://localhost/%2F/upstream">>}]},
+ Body = binary:copy(<<"a">>, ?LARGE_BODY_BYTES),
+ Msg = msg(<<"publish_accept_json_test">>, Headers, Body),
+ http_put(Config, "/queues/%2F/publish_accept_json_test", #{}, {group, '2xx'}),
+ ?assertEqual(#{routed => true},
+ http_post_accept_json(Config, "/exchanges/%2F/amq.default/publish",
+ Msg, ?OK)),
+
+ [Msg2] = http_post_accept_json(Config, "/queues/%2F/publish_accept_json_test/get",
+ [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg2),
+ http_post_accept_json(Config, "/exchanges/%2F/amq.default/publish", Msg2, ?OK),
+ [Msg3] = http_post_accept_json(Config, "/queues/%2F/publish_accept_json_test/get",
+ [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg3),
+ http_delete(Config, "/queues/%2F/publish_accept_json_test", {group, '2xx'}),
+ passed.
+
+publish_accept_json_test(Config) ->
+ Headers = #{'x-forwarding' => [#{uri => <<"amqp://localhost/%2F/upstream">>}]},
+ Msg = msg(<<"publish_accept_json_test">>, Headers, <<"Hello world">>),
+ http_put(Config, "/queues/%2F/publish_accept_json_test", #{}, {group, '2xx'}),
+ ?assertEqual(#{routed => true},
+ http_post_accept_json(Config, "/exchanges/%2F/amq.default/publish",
+ Msg, ?OK)),
+
+ [Msg2] = http_post_accept_json(Config, "/queues/%2F/publish_accept_json_test/get",
+ [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg2),
+ http_post_accept_json(Config, "/exchanges/%2F/amq.default/publish", Msg2, ?OK),
+ [Msg3] = http_post_accept_json(Config, "/queues/%2F/publish_accept_json_test/get",
+ [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ assert_item(Msg, Msg3),
+ http_delete(Config, "/queues/%2F/publish_accept_json_test", {group, '2xx'}),
+ passed.
+
+publish_fail_test(Config) ->
+ Msg = msg(<<"publish_fail_test">>, [], <<"Hello world">>),
+ http_put(Config, "/queues/%2F/publish_fail_test", #{}, {group, '2xx'}),
+ http_put(Config, "/users/myuser", [{password, <<"password">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg, "myuser", "password",
+ ?NOT_AUTHORISED),
+ Msg2 = [{exchange, <<"">>},
+ {routing_key, <<"publish_fail_test">>},
+ {properties, [{user_id, <<"foo">>}]},
+ {payload, <<"Hello world">>},
+ {payload_encoding, <<"string">>}],
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg2, ?BAD_REQUEST),
+ Msg3 = [{exchange, <<"">>},
+ {routing_key, <<"publish_fail_test">>},
+ {properties, []},
+ {payload, [<<"not a string">>]},
+ {payload_encoding, <<"string">>}],
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg3, ?BAD_REQUEST),
+ MsgTemplate = [{exchange, <<"">>},
+ {routing_key, <<"publish_fail_test">>},
+ {payload, <<"Hello world">>},
+ {payload_encoding, <<"string">>}],
+ [http_post(Config, "/exchanges/%2F/amq.default/publish",
+ [{properties, [BadProp]} | MsgTemplate], ?BAD_REQUEST)
+ || BadProp <- [{priority, <<"really high">>},
+ {timestamp, <<"recently">>},
+ {expiration, 1234}]],
+ http_delete(Config, "/queues/%2F/publish_fail_test", {group, '2xx'}),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+publish_base64_test(Config) ->
+ %% "abcd"
+ %% @todo Note that we used to accept [] instead of {struct, []} when we shouldn't have.
+ %% This is a breaking change and probably needs to be documented.
+ Msg = msg(<<"publish_base64_test">>, #{}, <<"YWJjZA==">>, <<"base64">>),
+ BadMsg1 = msg(<<"publish_base64_test">>, #{}, <<"flibble">>, <<"base64">>),
+ BadMsg2 = msg(<<"publish_base64_test">>, #{}, <<"YWJjZA==">>, <<"base99">>),
+ http_put(Config, "/queues/%2F/publish_base64_test", #{}, {group, '2xx'}),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg, ?OK),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", BadMsg1, ?BAD_REQUEST),
+ http_post(Config, "/exchanges/%2F/amq.default/publish", BadMsg2, ?BAD_REQUEST),
+ timer:sleep(250),
+ [Msg2] = http_post(Config, "/queues/%2F/publish_base64_test/get", [{ackmode, ack_requeue_false},
+ {count, 1},
+ {encoding, auto}], ?OK),
+ ?assertEqual(<<"abcd">>, maps:get(payload, Msg2)),
+ http_delete(Config, "/queues/%2F/publish_base64_test", {group, '2xx'}),
+ passed.
+
+publish_unrouted_test(Config) ->
+ Msg = msg(<<"hmmm">>, #{}, <<"Hello world">>),
+ ?assertEqual(#{routed => false},
+ http_post(Config, "/exchanges/%2F/amq.default/publish", Msg, ?OK)).
+
+if_empty_unused_test(Config) ->
+ http_put(Config, "/exchanges/%2F/test", #{}, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test", #{}, {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/test/q/test", #{}, {group, '2xx'}),
+ http_post(Config, "/exchanges/%2F/amq.default/publish",
+ msg(<<"test">>, #{}, <<"Hello world">>), ?OK),
+ http_delete(Config, "/queues/%2F/test?if-empty=true", ?BAD_REQUEST),
+ http_delete(Config, "/exchanges/%2F/test?if-unused=true", ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/test/contents", {group, '2xx'}),
+
+ {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = <<"test">> }, self()),
+ http_delete(Config, "/queues/%2F/test?if-unused=true", ?BAD_REQUEST),
+ amqp_connection:close(Conn),
+
+ http_delete(Config, "/queues/%2F/test?if-empty=true", {group, '2xx'}),
+ http_delete(Config, "/exchanges/%2F/test?if-unused=true", {group, '2xx'}),
+ passed.
+
+parameters_test(Config) ->
+ register_parameters_and_policy_validator(Config),
+
+ http_put(Config, "/parameters/test/%2F/good", [{value, <<"ignore">>}], {group, '2xx'}),
+ http_put(Config, "/parameters/test/%2F/maybe", [{value, <<"good">>}], {group, '2xx'}),
+ http_put(Config, "/parameters/test/%2F/maybe", [{value, <<"bad">>}], ?BAD_REQUEST),
+ http_put(Config, "/parameters/test/%2F/bad", [{value, <<"good">>}], ?BAD_REQUEST),
+ http_put(Config, "/parameters/test/um/good", [{value, <<"ignore">>}], ?NOT_FOUND),
+
+ Good = #{vhost => <<"/">>,
+ component => <<"test">>,
+ name => <<"good">>,
+ value => <<"ignore">>},
+ Maybe = #{vhost => <<"/">>,
+ component => <<"test">>,
+ name => <<"maybe">>,
+ value => <<"good">>},
+ List = [Good, Maybe],
+
+ assert_list(List, http_get(Config, "/parameters")),
+ assert_list(List, http_get(Config, "/parameters/test")),
+ assert_list(List, http_get(Config, "/parameters/test/%2F")),
+ assert_list([], http_get(Config, "/parameters/oops")),
+ http_get(Config, "/parameters/test/oops", ?NOT_FOUND),
+
+ assert_item(Good, http_get(Config, "/parameters/test/%2F/good", ?OK)),
+ assert_item(Maybe, http_get(Config, "/parameters/test/%2F/maybe", ?OK)),
+
+ http_delete(Config, "/parameters/test/%2F/good", {group, '2xx'}),
+ http_delete(Config, "/parameters/test/%2F/maybe", {group, '2xx'}),
+ http_delete(Config, "/parameters/test/%2F/bad", ?NOT_FOUND),
+
+ 0 = length(http_get(Config, "/parameters")),
+ 0 = length(http_get(Config, "/parameters/test")),
+ 0 = length(http_get(Config, "/parameters/test/%2F")),
+ unregister_parameters_and_policy_validator(Config),
+ passed.
+
+global_parameters_test(Config) ->
+ InitialParameters = http_get(Config, "/global-parameters"),
+ http_put(Config, "/global-parameters/good", [{value, [{a, <<"b">>}]}], {group, '2xx'}),
+ http_put(Config, "/global-parameters/maybe", [{value,[{c, <<"d">>}]}], {group, '2xx'}),
+
+ Good = #{name => <<"good">>,
+ value => #{a => <<"b">>}},
+ Maybe = #{name => <<"maybe">>,
+ value => #{c => <<"d">>}},
+ List = InitialParameters ++ [Good, Maybe],
+
+ assert_list(List, http_get(Config, "/global-parameters")),
+ http_get(Config, "/global-parameters/oops", ?NOT_FOUND),
+
+ assert_item(Good, http_get(Config, "/global-parameters/good", ?OK)),
+ assert_item(Maybe, http_get(Config, "/global-parameters/maybe", ?OK)),
+
+ http_delete(Config, "/global-parameters/good", {group, '2xx'}),
+ http_delete(Config, "/global-parameters/maybe", {group, '2xx'}),
+ http_delete(Config, "/global-parameters/bad", ?NOT_FOUND),
+
+ InitialCount = length(InitialParameters),
+ InitialCount = length(http_get(Config, "/global-parameters")),
+ passed.
+
+policy_test(Config) ->
+ register_parameters_and_policy_validator(Config),
+ PolicyPos = #{vhost => <<"/">>,
+ name => <<"policy_pos">>,
+ pattern => <<".*">>,
+ definition => #{testpos => [1,2,3]},
+ priority => 10},
+ PolicyEven = #{vhost => <<"/">>,
+ name => <<"policy_even">>,
+ pattern => <<".*">>,
+ definition => #{testeven => [1,2,3,4]},
+ priority => 10},
+ http_put(Config,
+ "/policies/%2F/policy_pos",
+ maps:remove(key, PolicyPos),
+ {group, '2xx'}),
+ http_put(Config,
+ "/policies/%2F/policy_even",
+ maps:remove(key, PolicyEven),
+ {group, '2xx'}),
+ assert_item(PolicyPos, http_get(Config, "/policies/%2F/policy_pos", ?OK)),
+ assert_item(PolicyEven, http_get(Config, "/policies/%2F/policy_even", ?OK)),
+ List = [PolicyPos, PolicyEven],
+ assert_list(List, http_get(Config, "/policies", ?OK)),
+ assert_list(List, http_get(Config, "/policies/%2F", ?OK)),
+
+ http_delete(Config, "/policies/%2F/policy_pos", {group, '2xx'}),
+ http_delete(Config, "/policies/%2F/policy_even", {group, '2xx'}),
+ 0 = length(http_get(Config, "/policies")),
+ 0 = length(http_get(Config, "/policies/%2F")),
+ unregister_parameters_and_policy_validator(Config),
+ passed.
+
+policy_permissions_test(Config) ->
+ register_parameters_and_policy_validator(Config),
+
+ http_put(Config, "/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/mon", [{password, <<"mon">>},
+ {tags, <<"monitoring">>}], {group, '2xx'}),
+ http_put(Config, "/users/policy", [{password, <<"policy">>},
+ {tags, <<"policymaker">>}], {group, '2xx'}),
+ http_put(Config, "/users/mgmt", [{password, <<"mgmt">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put(Config, "/vhosts/v", none, {group, '2xx'}),
+ http_put(Config, "/permissions/v/admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/v/mon", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/v/policy", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/v/mgmt", Perms, {group, '2xx'}),
+
+ Policy = [{pattern, <<".*">>},
+ {definition, [{<<"ha-mode">>, <<"all">>}]}],
+ Param = [{value, <<"">>}],
+
+ http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}),
+ http_put(Config, "/parameters/test/%2F/good", Param, {group, '2xx'}),
+
+ Pos = fun (U) ->
+ http_put(Config, "/policies/v/HA", Policy, U, U, {group, '2xx'}),
+ http_put(Config, "/parameters/test/v/good", Param, U, U, {group, '2xx'}),
+ http_get(Config, "/policies", U, U, {group, '2xx'}),
+ http_get(Config, "/parameters/test", U, U, {group, '2xx'}),
+ http_get(Config, "/parameters", U, U, {group, '2xx'}),
+ http_get(Config, "/policies/v", U, U, {group, '2xx'}),
+ http_get(Config, "/parameters/test/v", U, U, {group, '2xx'}),
+ http_get(Config, "/policies/v/HA", U, U, {group, '2xx'}),
+ http_get(Config, "/parameters/test/v/good", U, U, {group, '2xx'})
+ end,
+ Neg = fun (U) ->
+ http_put(Config, "/policies/v/HA", Policy, U, U, ?NOT_AUTHORISED),
+ http_put(Config,
+ "/parameters/test/v/good", Param, U, U, ?NOT_AUTHORISED),
+ http_put(Config,
+ "/parameters/test/v/admin", Param, U, U, ?NOT_AUTHORISED),
+ %% Policies are read-only for management and monitoring.
+ http_get(Config, "/policies", U, U, ?OK),
+ http_get(Config, "/policies/v", U, U, ?OK),
+ http_get(Config, "/parameters", U, U, ?NOT_AUTHORISED),
+ http_get(Config, "/parameters/test", U, U, ?NOT_AUTHORISED),
+ http_get(Config, "/parameters/test/v", U, U, ?NOT_AUTHORISED),
+ http_get(Config, "/policies/v/HA", U, U, ?NOT_AUTHORISED),
+ http_get(Config, "/parameters/test/v/good", U, U, ?NOT_AUTHORISED)
+ end,
+ AlwaysNeg =
+ fun (U) ->
+ http_put(Config, "/policies/%2F/HA", Policy, U, U, ?NOT_AUTHORISED),
+ http_put(Config, "/parameters/test/%2F/good", Param, U, U, ?NOT_AUTHORISED),
+ http_get(Config, "/policies/%2F/HA", U, U, ?NOT_AUTHORISED),
+ http_get(Config, "/parameters/test/%2F/good", U, U, ?NOT_AUTHORISED)
+ end,
+ AdminPos =
+ fun (U) ->
+ http_put(Config, "/policies/%2F/HA", Policy, U, U, {group, '2xx'}),
+ http_put(Config, "/parameters/test/%2F/good", Param, U, U, {group, '2xx'}),
+ http_get(Config, "/policies/%2F/HA", U, U, {group, '2xx'}),
+ http_get(Config, "/parameters/test/%2F/good", U, U, {group, '2xx'})
+ end,
+
+ [Neg(U) || U <- ["mon", "mgmt"]],
+ [Pos(U) || U <- ["admin", "policy"]],
+ [AlwaysNeg(U) || U <- ["mon", "mgmt", "policy"]],
+ [AdminPos(U) || U <- ["admin"]],
+
+ %% This one is deliberately different between admin and policymaker.
+ http_put(Config, "/parameters/test/v/admin", Param, "admin", "admin", {group, '2xx'}),
+ http_put(Config, "/parameters/test/v/admin", Param, "policy", "policy",
+ ?BAD_REQUEST),
+
+ http_delete(Config, "/vhosts/v", {group, '2xx'}),
+ http_delete(Config, "/users/admin", {group, '2xx'}),
+ http_delete(Config, "/users/mon", {group, '2xx'}),
+ http_delete(Config, "/users/policy", {group, '2xx'}),
+ http_delete(Config, "/users/mgmt", {group, '2xx'}),
+ http_delete(Config, "/policies/%2F/HA", {group, '2xx'}),
+
+ unregister_parameters_and_policy_validator(Config),
+ passed.
+
+issue67_test(Config)->
+ {ok, {{_, 401, _}, Headers, _}} = req(Config, get, "/queues",
+ [auth_header("user_no_access", "password_no_access")]),
+ ?assertEqual("application/json",
+ proplists:get_value("content-type",Headers)),
+ passed.
+
+extensions_test(Config) ->
+ [#{javascript := <<"dispatcher.js">>}] = http_get(Config, "/extensions", ?OK),
+ passed.
+
+cors_test(Config) ->
+ %% With CORS disabled. No header should be received.
+ R = req(Config, get, "/overview", [auth_header("guest", "guest")]),
+ io:format("CORS test R: ~p~n", [R]),
+ {ok, {_, HdNoCORS, _}} = R,
+ io:format("CORS test HdNoCORS: ~p~n", [HdNoCORS]),
+ false = lists:keymember("access-control-allow-origin", 1, HdNoCORS),
+ %% The Vary header should include "Origin" regardless of CORS configuration.
+ {_, "accept, accept-encoding, origin"} = lists:keyfind("vary", 1, HdNoCORS),
+ %% Enable CORS.
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_allow_origins, ["https://rabbitmq.com"]]),
+ %% We should only receive allow-origin and allow-credentials from GET.
+ {ok, {_, HdGetCORS, _}} = req(Config, get, "/overview",
+ [{"origin", "https://rabbitmq.com"}, auth_header("guest", "guest")]),
+ true = lists:keymember("access-control-allow-origin", 1, HdGetCORS),
+ true = lists:keymember("access-control-allow-credentials", 1, HdGetCORS),
+ false = lists:keymember("access-control-expose-headers", 1, HdGetCORS),
+ false = lists:keymember("access-control-max-age", 1, HdGetCORS),
+ false = lists:keymember("access-control-allow-methods", 1, HdGetCORS),
+ false = lists:keymember("access-control-allow-headers", 1, HdGetCORS),
+ %% We should receive allow-origin, allow-credentials and allow-methods from OPTIONS.
+ {ok, {{_, 200, _}, HdOptionsCORS, _}} = req(Config, options, "/overview",
+ [{"origin", "https://rabbitmq.com"}]),
+ true = lists:keymember("access-control-allow-origin", 1, HdOptionsCORS),
+ true = lists:keymember("access-control-allow-credentials", 1, HdOptionsCORS),
+ false = lists:keymember("access-control-expose-headers", 1, HdOptionsCORS),
+ true = lists:keymember("access-control-max-age", 1, HdOptionsCORS),
+ true = lists:keymember("access-control-allow-methods", 1, HdOptionsCORS),
+ false = lists:keymember("access-control-allow-headers", 1, HdOptionsCORS),
+ %% We should receive allow-headers when request-headers is sent.
+ {ok, {_, HdAllowHeadersCORS, _}} = req(Config, options, "/overview",
+ [{"origin", "https://rabbitmq.com"},
+ auth_header("guest", "guest"),
+ {"access-control-request-headers", "x-piggy-bank"}]),
+ {_, "x-piggy-bank"} = lists:keyfind("access-control-allow-headers", 1, HdAllowHeadersCORS),
+ %% Disable preflight request caching.
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_max_age, undefined]),
+ %% We shouldn't receive max-age anymore.
+ {ok, {_, HdNoMaxAgeCORS, _}} = req(Config, options, "/overview",
+ [{"origin", "https://rabbitmq.com"}, auth_header("guest", "guest")]),
+ false = lists:keymember("access-control-max-age", 1, HdNoMaxAgeCORS),
+
+ %% Check OPTIONS method in all paths
+ check_cors_all_endpoints(Config),
+ %% Disable CORS again.
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_allow_origins, []]),
+ passed.
+
+check_cors_all_endpoints(Config) ->
+ Endpoints = get_all_http_endpoints(),
+
+ [begin
+ ct:pal("Options for ~p~n", [EP]),
+ {ok, {{_, 200, _}, _, _}} = req(Config, options, EP, [{"origin", "https://rabbitmq.com"}])
+ end
+ || EP <- Endpoints].
+
+get_all_http_endpoints() ->
+ [ Path || {Path, _, _} <- rabbit_mgmt_dispatcher:dispatcher() ].
+
+vhost_limits_list_test(Config) ->
+ [] = http_get(Config, "/vhost-limits", ?OK),
+
+ http_get(Config, "/vhost-limits/limit_test_vhost_1", ?NOT_FOUND),
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"limit_test_vhost_1">>),
+
+ [] = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+ http_get(Config, "/vhost-limits/limit_test_vhost_2", ?NOT_FOUND),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"limit_test_vhost_2">>),
+
+ [] = http_get(Config, "/vhost-limits/limit_test_vhost_2", ?OK),
+
+ Limits1 = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-connections' => 100, 'max-queues' => 100}}],
+ Limits2 = [#{vhost => <<"limit_test_vhost_2">>,
+ value => #{'max-connections' => 200}}],
+
+ Expected = Limits1 ++ Limits2,
+
+ lists:map(
+ fun(#{vhost := VHost, value := Val}) ->
+ Param = [ {atom_to_binary(K, utf8),V} || {K,V} <- maps:to_list(Val) ],
+ ok = rabbit_ct_broker_helpers:set_parameter(Config, 0, VHost, <<"vhost-limits">>, <<"limits">>, Param)
+ end,
+ Expected),
+
+ Expected = http_get(Config, "/vhost-limits", ?OK),
+ Limits1 = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+ Limits2 = http_get(Config, "/vhost-limits/limit_test_vhost_2", ?OK),
+
+ NoVhostUser = <<"no_vhost_user">>,
+ rabbit_ct_broker_helpers:add_user(Config, NoVhostUser),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, NoVhostUser, [management]),
+ [] = http_get(Config, "/vhost-limits", NoVhostUser, NoVhostUser, ?OK),
+ http_get(Config, "/vhost-limits/limit_test_vhost_1", NoVhostUser, NoVhostUser, ?NOT_AUTHORISED),
+ http_get(Config, "/vhost-limits/limit_test_vhost_2", NoVhostUser, NoVhostUser, ?NOT_AUTHORISED),
+
+ Vhost1User = <<"limit_test_vhost_1_user">>,
+ rabbit_ct_broker_helpers:add_user(Config, Vhost1User),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, Vhost1User, [management]),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Vhost1User, <<"limit_test_vhost_1">>),
+ Limits1 = http_get(Config, "/vhost-limits", Vhost1User, Vhost1User, ?OK),
+ Limits1 = http_get(Config, "/vhost-limits/limit_test_vhost_1", Vhost1User, Vhost1User, ?OK),
+ http_get(Config, "/vhost-limits/limit_test_vhost_2", Vhost1User, Vhost1User, ?NOT_AUTHORISED),
+
+ Vhost2User = <<"limit_test_vhost_2_user">>,
+ rabbit_ct_broker_helpers:add_user(Config, Vhost2User),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, Vhost2User, [management]),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Vhost2User, <<"limit_test_vhost_2">>),
+ Limits2 = http_get(Config, "/vhost-limits", Vhost2User, Vhost2User, ?OK),
+ http_get(Config, "/vhost-limits/limit_test_vhost_1", Vhost2User, Vhost2User, ?NOT_AUTHORISED),
+ Limits2 = http_get(Config, "/vhost-limits/limit_test_vhost_2", Vhost2User, Vhost2User, ?OK).
+
+vhost_limit_set_test(Config) ->
+ [] = http_get(Config, "/vhost-limits", ?OK),
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"limit_test_vhost_1">>),
+ [] = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+
+ %% Set a limit
+ http_put(Config, "/vhost-limits/limit_test_vhost_1/max-queues", [{value, 100}], ?NO_CONTENT),
+
+
+ Limits_Queues = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-queues' => 100}}],
+
+ Limits_Queues = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+
+ %% Set another limit
+ http_put(Config, "/vhost-limits/limit_test_vhost_1/max-connections", [{value, 200}], ?NO_CONTENT),
+
+ Limits_Queues_Connections = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-connections' => 200, 'max-queues' => 100}}],
+
+ Limits_Queues_Connections = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+
+ Limits1 = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-connections' => 200, 'max-queues' => 100}}],
+ Limits1 = http_get(Config, "/vhost-limits", ?OK),
+
+ %% Update a limit
+ http_put(Config, "/vhost-limits/limit_test_vhost_1/max-connections", [{value, 1000}], ?NO_CONTENT),
+ Limits2 = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-connections' => 1000, 'max-queues' => 100}}],
+ Limits2 = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+
+
+ Vhost1User = <<"limit_test_vhost_1_user">>,
+ rabbit_ct_broker_helpers:add_user(Config, Vhost1User),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, Vhost1User, [management]),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Vhost1User, <<"limit_test_vhost_1">>),
+
+ Limits3 = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-connections' => 1000,
+ 'max-queues' => 100}}],
+ Limits3 = http_get(Config, "/vhost-limits/limit_test_vhost_1", Vhost1User, Vhost1User, ?OK),
+
+ %% Only admin can update limits
+ http_put(Config, "/vhost-limits/limit_test_vhost_1/max-connections", [{value, 300}], ?NO_CONTENT),
+
+ %% Clear a limit
+ http_delete(Config, "/vhost-limits/limit_test_vhost_1/max-connections", ?NO_CONTENT),
+ Limits4 = [#{vhost => <<"limit_test_vhost_1">>,
+ value => #{'max-queues' => 100}}],
+ Limits4 = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK),
+
+ %% Only admin can clear limits
+ http_delete(Config, "/vhost-limits/limit_test_vhost_1/max-queues", Vhost1User, Vhost1User, ?NOT_AUTHORISED),
+
+ %% Unknown limit error
+ http_put(Config, "/vhost-limits/limit_test_vhost_1/max-channels", [{value, 200}], ?BAD_REQUEST).
+
+user_limits_list_test(Config) ->
+ ?assertEqual([], http_get(Config, "/user-limits", ?OK)),
+
+ Vhost1 = <<"limit_test_vhost_1">>,
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"limit_test_vhost_1">>),
+
+ http_get(Config, "/user-limits/limit_test_user_1", ?NOT_FOUND),
+
+ User1 = <<"limit_test_user_1">>,
+ rabbit_ct_broker_helpers:add_user(Config, User1),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, User1, [management]),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, User1, Vhost1),
+
+ ?assertEqual([], http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+ http_get(Config, "/user-limits/limit_test_user_2", ?NOT_FOUND),
+
+ User2 = <<"limit_test_user_2">>,
+ rabbit_ct_broker_helpers:add_user(Config, User2),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, User2, [management]),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, User2, Vhost1),
+
+ ?assertEqual([], http_get(Config, "/user-limits/limit_test_user_2", ?OK)),
+
+ Limits1 = [
+ #{
+ user => User1,
+ value => #{
+ 'max-connections' => 100,
+ 'max-channels' => 100
+ }
+ }],
+ Limits2 = [
+ #{
+ user => User2,
+ value => #{
+ 'max-connections' => 200
+ }
+ }],
+
+ Expected = Limits1 ++ Limits2,
+
+ lists:map(
+ fun(#{user := Username, value := Val}) ->
+ rabbit_ct_broker_helpers:set_user_limits(Config, 0, Username, Val)
+ end,
+ Expected),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ Expected =:= http_get(Config, "/user-limits", ?OK)
+ end),
+ Limits1 = http_get(Config, "/user-limits/limit_test_user_1", ?OK),
+ Limits2 = http_get(Config, "/user-limits/limit_test_user_2", ?OK),
+
+ %% Clear limits and assert
+ rabbit_ct_broker_helpers:clear_user_limits(Config, 0, User1,
+ <<"max-connections">>),
+
+ Limits3 = [#{user => User1, value => #{'max-channels' => 100}}],
+ ?assertEqual(Limits3, http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ rabbit_ct_broker_helpers:clear_user_limits(Config, 0, User1,
+ <<"max-channels">>),
+ ?assertEqual([], http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ rabbit_ct_broker_helpers:clear_user_limits(Config, 0, <<"limit_test_user_2">>,
+ <<"all">>),
+ ?assertEqual([], http_get(Config, "/user-limits/limit_test_user_2", ?OK)),
+
+ %% Limit user with no vhost
+ NoVhostUser = <<"no_vhost_user">>,
+ rabbit_ct_broker_helpers:add_user(Config, NoVhostUser),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, NoVhostUser, [management]),
+
+ Limits4 = #{
+ user => NoVhostUser,
+ value => #{
+ 'max-connections' => 150,
+ 'max-channels' => 150
+ }
+ },
+ rabbit_ct_broker_helpers:set_user_limits(Config, 0, NoVhostUser, maps:get(value, Limits4)),
+
+ ?assertEqual([Limits4], http_get(Config, "/user-limits/no_vhost_user", ?OK)).
+
+user_limit_set_test(Config) ->
+ ?assertEqual([], http_get(Config, "/user-limits", ?OK)),
+
+ User1 = <<"limit_test_user_1">>,
+ rabbit_ct_broker_helpers:add_user(Config, User1),
+
+ ?assertEqual([], http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ %% Set a user limit
+ http_put(Config, "/user-limits/limit_test_user_1/max-channels", [{value, 100}], ?NO_CONTENT),
+
+ MaxChannelsLimit = [#{user => User1, value => #{'max-channels' => 100}}],
+ ?assertEqual(MaxChannelsLimit, http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ %% Set another user limit
+ http_put(Config, "/user-limits/limit_test_user_1/max-connections", [{value, 200}], ?NO_CONTENT),
+
+ MaxConnectionsAndChannelsLimit = [
+ #{
+ user => User1,
+ value => #{
+ 'max-connections' => 200,
+ 'max-channels' => 100
+ }
+ }
+ ],
+ ?assertEqual(MaxConnectionsAndChannelsLimit, http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ Limits1 = [
+ #{
+ user => User1,
+ value => #{
+ 'max-connections' => 200,
+ 'max-channels' => 100
+ }
+ }],
+ ?assertEqual(Limits1, http_get(Config, "/user-limits", ?OK)),
+
+ %% Update a user limit
+ http_put(Config, "/user-limits/limit_test_user_1/max-connections", [{value, 1000}], ?NO_CONTENT),
+ Limits2 = [
+ #{
+ user => User1,
+ value => #{
+ 'max-connections' => 1000,
+ 'max-channels' => 100
+ }
+ }],
+ ?assertEqual(Limits2, http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ Vhost1 = <<"limit_test_vhost_1">>,
+ rabbit_ct_broker_helpers:add_vhost(Config, Vhost1),
+
+ Vhost1User = <<"limit_test_vhost_1_user">>,
+ rabbit_ct_broker_helpers:add_user(Config, Vhost1User),
+ rabbit_ct_broker_helpers:set_user_tags(Config, 0, Vhost1User, [management]),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, Vhost1User, Vhost1),
+
+ Limits3 = [
+ #{
+ user => User1,
+ value => #{
+ 'max-connections' => 1000,
+ 'max-channels' => 100
+ }
+ }],
+ ?assertEqual(Limits3, http_get(Config, "/user-limits/limit_test_user_1", Vhost1User, Vhost1User, ?OK)),
+
+ %% Clear a limit
+ http_delete(Config, "/user-limits/limit_test_user_1/max-connections", ?NO_CONTENT),
+ Limits4 = [#{user => User1, value => #{'max-channels' => 100}}],
+ ?assertEqual(Limits4, http_get(Config, "/user-limits/limit_test_user_1", ?OK)),
+
+ %% Only admin can clear limits
+ http_delete(Config, "/user-limits/limit_test_user_1/max-channels", Vhost1User, Vhost1User, ?NOT_AUTHORISED),
+
+ %% Unknown limit error
+ http_put(Config, "/user-limits/limit_test_user_1/max-unknown", [{value, 200}], ?BAD_REQUEST).
+
+rates_test(Config) ->
+ http_put(Config, "/queues/%2F/myqueue", none, {group, '2xx'}),
+ {Conn, Ch} = open_connection_and_channel(Config),
+ Pid = spawn_link(fun() -> publish(Ch) end),
+
+ Condition = fun() ->
+ Overview = http_get(Config, "/overview"),
+ MsgStats = maps:get(message_stats, Overview),
+ QueueTotals = maps:get(queue_totals, Overview),
+
+ maps:get(messages_ready, QueueTotals) > 0 andalso
+ maps:get(messages, QueueTotals) > 0 andalso
+ maps:get(publish, MsgStats) > 0 andalso
+ maps:get(rate, maps:get(publish_details, MsgStats)) > 0 andalso
+ maps:get(rate, maps:get(messages_ready_details, QueueTotals)) > 0 andalso
+ maps:get(rate, maps:get(messages_details, QueueTotals)) > 0
+ end,
+ rabbit_ct_helpers:await_condition(Condition, 60000),
+ Pid ! stop_publish,
+ close_channel(Ch),
+ close_connection(Conn),
+ http_delete(Config, "/queues/%2F/myqueue", ?NO_CONTENT),
+ passed.
+
+cli_redirect_test(Config) ->
+ assert_permanent_redirect(Config, "cli", "/cli/index.html"),
+ passed.
+
+api_redirect_test(Config) ->
+ assert_permanent_redirect(Config, "api", "/api/index.html"),
+ passed.
+
+stats_redirect_test(Config) ->
+ assert_permanent_redirect(Config, "doc/stats.html", "/api/index.html"),
+ passed.
+
+oauth_test(Config) ->
+ Map1 = http_get(Config, "/auth", ?OK),
+ %% Defaults
+ ?assertEqual(false, maps:get(enable_uaa, Map1)),
+ ?assertEqual(<<>>, maps:get(uaa_client_id, Map1)),
+ ?assertEqual(<<>>, maps:get(uaa_location, Map1)),
+ %% Misconfiguration
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, enable_uaa, true]),
+ Map2 = http_get(Config, "/auth", ?OK),
+ ?assertEqual(false, maps:get(enable_uaa, Map2)),
+ ?assertEqual(<<>>, maps:get(uaa_client_id, Map2)),
+ ?assertEqual(<<>>, maps:get(uaa_location, Map2)),
+ %% Valid config
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, uaa_client_id, "rabbit_user"]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, uaa_location, "http://localhost:8080/uaa"]),
+ Map3 = http_get(Config, "/auth", ?OK),
+ ?assertEqual(true, maps:get(enable_uaa, Map3)),
+ ?assertEqual(<<"rabbit_user">>, maps:get(uaa_client_id, Map3)),
+ ?assertEqual(<<"http://localhost:8080/uaa">>, maps:get(uaa_location, Map3)),
+ %% cleanup
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env,
+ [rabbitmq_management, enable_uaa]).
+
+login_test(Config) ->
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ %% Let's do a post without any other form of authorization
+ {ok, {{_, CodeAct, _}, Headers, _}} =
+ req(Config, 0, post, "/login",
+ [{"content-type", "application/x-www-form-urlencoded"}],
+ <<"username=myuser&password=myuser">>),
+ ?assertEqual(200, CodeAct),
+
+ %% Extract the authorization header
+ [Cookie, _Version] = binary:split(list_to_binary(proplists:get_value("set-cookie", Headers)),
+ <<";">>, [global]),
+ [_, Auth] = binary:split(Cookie, <<"=">>, []),
+
+ %% Request the overview with the auth obtained
+ {ok, {{_, CodeAct1, _}, _, _}} =
+ req(Config, get, "/overview", [{"Authorization", "Basic " ++ binary_to_list(Auth)}]),
+ ?assertEqual(200, CodeAct1),
+
+ %% Let's request a login with an unknown user
+ {ok, {{_, CodeAct2, _}, Headers2, _}} =
+ req(Config, 0, post, "/login",
+ [{"content-type", "application/x-www-form-urlencoded"}],
+ <<"username=misteryusernumber1&password=myuser">>),
+ ?assertEqual(401, CodeAct2),
+ ?assert(not proplists:is_defined("set-cookie", Headers2)),
+
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+csp_headers_test(Config) ->
+ AuthHeader = auth_header("guest", "guest"),
+ Headers = [{"origin", "https://rabbitmq.com"}, AuthHeader],
+ {ok, {_, HdGetCsp0, _}} = req(Config, get, "/whoami", Headers),
+ ?assert(lists:keymember("content-security-policy", 1, HdGetCsp0)),
+ {ok, {_, HdGetCsp1, _}} = req(Config, get_static, "/index.html", Headers),
+ ?assert(lists:keymember("content-security-policy", 1, HdGetCsp1)).
+
+disable_basic_auth_test(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, disable_basic_auth, true]),
+ http_get(Config, "/overview", ?NOT_AUTHORISED),
+
+ %% Ensure that a request without auth header does not return a basic auth prompt
+ OverviewResponseHeaders = http_get_no_auth(Config, "/overview", ?NOT_AUTHORISED),
+ ?assertEqual(false, lists:keymember("www-authenticate", 1, OverviewResponseHeaders)),
+
+ http_get(Config, "/nodes", ?NOT_AUTHORISED),
+ http_get(Config, "/vhosts", ?NOT_AUTHORISED),
+ http_get(Config, "/vhost-limits", ?NOT_AUTHORISED),
+ http_put(Config, "/queues/%2F/myqueue", none, ?NOT_AUTHORISED),
+ Policy = [{pattern, <<".*">>},
+ {definition, [{<<"ha-mode">>, <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, ?NOT_AUTHORISED),
+ http_delete(Config, "/queues/%2F/myqueue", ?NOT_AUTHORISED),
+ http_get(Config, "/definitions", ?NOT_AUTHORISED),
+ http_post(Config, "/definitions", [], ?NOT_AUTHORISED),
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, disable_basic_auth, 50]),
+ %% Defaults to 'false' when config is invalid
+ http_get(Config, "/overview", ?OK).
+
+auth_attempts_test(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, reset_auth_attempt_metrics, []),
+ {Conn, _Ch} = open_connection_and_channel(Config),
+ close_connection(Conn),
+ [NodeData] = http_get(Config, "/nodes"),
+ Node = binary_to_atom(maps:get(name, NodeData), utf8),
+ Map = http_get(Config, "/auth/attempts/" ++ atom_to_list(Node), ?OK),
+ Http = get_auth_attempts(<<"http">>, Map),
+ Amqp091 = get_auth_attempts(<<"amqp091">>, Map),
+ ?assertEqual(false, maps:is_key(remote_address, Amqp091)),
+ ?assertEqual(false, maps:is_key(username, Amqp091)),
+ ?assertEqual(1, maps:get(auth_attempts, Amqp091)),
+ ?assertEqual(1, maps:get(auth_attempts_succeeded, Amqp091)),
+ ?assertEqual(0, maps:get(auth_attempts_failed, Amqp091)),
+ ?assertEqual(false, maps:is_key(remote_address, Http)),
+ ?assertEqual(false, maps:is_key(username, Http)),
+ ?assertEqual(2, maps:get(auth_attempts, Http)),
+ ?assertEqual(2, maps:get(auth_attempts_succeeded, Http)),
+ ?assertEqual(0, maps:get(auth_attempts_failed, Http)),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbit, track_auth_attempt_source, true]),
+ {Conn2, _Ch2} = open_connection_and_channel(Config),
+ close_connection(Conn2),
+ Map2 = http_get(Config, "/auth/attempts/" ++ atom_to_list(Node) ++ "/source", ?OK),
+ Map3 = http_get(Config, "/auth/attempts/" ++ atom_to_list(Node), ?OK),
+ Http2 = get_auth_attempts(<<"http">>, Map2),
+ Http3 = get_auth_attempts(<<"http">>, Map3),
+ Amqp091_2 = get_auth_attempts(<<"amqp091">>, Map2),
+ Amqp091_3 = get_auth_attempts(<<"amqp091">>, Map3),
+ ?assertEqual(<<"127.0.0.1">>, maps:get(remote_address, Http2)),
+ ?assertEqual(<<"guest">>, maps:get(username, Http2)),
+ ?assertEqual(1, maps:get(auth_attempts, Http2)),
+ ?assertEqual(1, maps:get(auth_attempts_succeeded, Http2)),
+ ?assertEqual(0, maps:get(auth_attempts_failed, Http2)),
+
+ ?assertEqual(false, maps:is_key(remote_address, Http3)),
+ ?assertEqual(false, maps:is_key(username, Http3)),
+ ?assertEqual(4, maps:get(auth_attempts, Http3)),
+ ?assertEqual(4, maps:get(auth_attempts_succeeded, Http3)),
+ ?assertEqual(0, maps:get(auth_attempts_failed, Http3)),
+
+ ?assertEqual(true, <<>> =/= maps:get(remote_address, Amqp091_2)),
+ ?assertEqual(<<"guest">>, maps:get(username, Amqp091_2)),
+ ?assertEqual(1, maps:get(auth_attempts, Amqp091_2)),
+ ?assertEqual(1, maps:get(auth_attempts_succeeded, Amqp091_2)),
+ ?assertEqual(0, maps:get(auth_attempts_failed, Amqp091_2)),
+
+ ?assertEqual(false, maps:is_key(remote_address, Amqp091_3)),
+ ?assertEqual(false, maps:is_key(username, Amqp091_3)),
+ ?assertEqual(2, maps:get(auth_attempts, Amqp091_3)),
+ ?assertEqual(2, maps:get(auth_attempts_succeeded, Amqp091_3)),
+ ?assertEqual(0, maps:get(auth_attempts_failed, Amqp091_3)),
+
+ passed.
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+msg(Key, Headers, Body) ->
+ msg(Key, Headers, Body, <<"string">>).
+
+msg(Key, Headers, Body, Enc) ->
+ #{exchange => <<"">>,
+ routing_key => Key,
+ properties => #{delivery_mode => 2,
+ headers => Headers},
+ payload => Body,
+ payload_encoding => Enc}.
+
+local_port(Conn) ->
+ [{sock, Sock}] = amqp_connection:info(Conn, [sock]),
+ {ok, Port} = inet:port(Sock),
+ Port.
+
+spawn_invalid(_Config, 0) ->
+ ok;
+spawn_invalid(Config, N) ->
+ Self = self(),
+ spawn(fun() ->
+ timer:sleep(rand:uniform(250)),
+ {ok, Sock} = gen_tcp:connect("localhost", amqp_port(Config), [list]),
+ ok = gen_tcp:send(Sock, "Some Data"),
+ receive_msg(Self)
+ end),
+ spawn_invalid(Config, N-1).
+
+receive_msg(Self) ->
+ receive
+ {tcp, _, [$A, $M, $Q, $P | _]} ->
+ Self ! done
+ after
+ 60000 ->
+ Self ! no_reply
+ end.
+
+wait_for_answers(0) ->
+ ok;
+wait_for_answers(N) ->
+ receive
+ done ->
+ wait_for_answers(N-1);
+ no_reply ->
+ throw(no_reply)
+ end.
+
+publish(Ch) ->
+ amqp_channel:call(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"myqueue">>},
+ #amqp_msg{payload = <<"message">>}),
+ receive
+ stop_publish ->
+ ok
+ after 20 ->
+ publish(Ch)
+ end.
+
+wait_until(_Fun, 0) ->
+ ?assert(wait_failed);
+wait_until(Fun, N) ->
+ case Fun() of
+ true ->
+ timer:sleep(1500);
+ false ->
+ timer:sleep(?COLLECT_INTERVAL + 100),
+ wait_until(Fun, N - 1)
+ end.
+
+http_post_json(Config, Path, Body, Assertion) ->
+ http_upload_raw(Config, post, Path, Body, "guest", "guest",
+ Assertion, [{"content-type", "application/json"}]).
+
+%% @doc encode fields and file for HTTP post multipart/form-data.
+%% @reference Inspired by <a href="http://code.activestate.com/recipes/146306/">Python implementation</a>.
+format_multipart_filedata(Boundary, Files) ->
+ FileParts = lists:map(fun({FieldName, FileName, FileContent}) ->
+ [lists:concat(["--", Boundary]),
+ lists:concat(["content-disposition: form-data; name=\"", atom_to_list(FieldName), "\"; filename=\"", FileName, "\""]),
+ lists:concat(["content-type: ", "application/octet-stream"]),
+ "",
+ FileContent]
+ end, Files),
+ FileParts2 = lists:append(FileParts),
+ EndingParts = [lists:concat(["--", Boundary, "--"]), ""],
+ Parts = lists:append([FileParts2, EndingParts]),
+ string:join(Parts, "\r\n").
+
+get_auth_attempts(Protocol, Map) ->
+ [A] = lists:filter(fun(#{protocol := P}) ->
+ P == Protocol
+ end, Map),
+ A.
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl
new file mode 100644
index 0000000000..8ef17ed29c
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl
@@ -0,0 +1,399 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_http_health_checks_SUITE).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_mgmt_test_util, [http_get/3,
+ req/4,
+ auth_header/2]).
+
+-define(COLLECT_INTERVAL, 1000).
+-define(PATH_PREFIX, "/custom-prefix").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, all_tests},
+ {group, single_node}
+ ].
+
+groups() ->
+ [
+ {all_tests, [], all_tests()},
+ {single_node, [], [
+ alarms_test,
+ local_alarms_test,
+ is_quorum_critical_single_node_test,
+ is_mirror_sync_critical_single_node_test]}
+ ].
+
+all_tests() -> [
+ health_checks_test,
+ is_quorum_critical_test,
+ is_mirror_sync_critical_test,
+ virtual_hosts_test,
+ protocol_listener_test,
+ port_listener_test,
+ certificate_expiration_test
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_group(Group, Config0) ->
+ PathConfig = {rabbitmq_management, [{path_prefix, ?PATH_PREFIX}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig),
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ ClusterSize = case Group of
+ all_tests -> 3;
+ single_node -> 1
+ end,
+ NodeConf = [{rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize},
+ {tcp_ports_base}],
+ Config2 = rabbit_ct_helpers:set_config(Config1, NodeConf),
+ Ret = rabbit_ct_helpers:run_setup_steps(
+ Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ case Ret of
+ {skip, _} ->
+ Ret;
+ Config3 ->
+ EnableFF = rabbit_ct_broker_helpers:enable_feature_flag(
+ Config3, quorum_queue),
+ case EnableFF of
+ ok ->
+ Config3;
+ Skip ->
+ end_per_group(Group, Config3),
+ Skip
+ end
+ end.
+
+end_per_group(_, Config) ->
+ inets:stop(),
+ Teardown0 = rabbit_ct_client_helpers:teardown_steps(),
+ Teardown1 = rabbit_ct_broker_helpers:teardown_steps(),
+ Steps = Teardown0 ++ Teardown1,
+ rabbit_ct_helpers:run_teardown_steps(Config, Steps).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(is_quorum_critical_test = Testcase, Config) ->
+ [_, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ _ = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ _ = rabbit_ct_broker_helpers:start_node(Config, Server3),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(is_mirror_sync_critical_test = Testcase, Config) ->
+ [_, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ _ = rabbit_ct_broker_helpers:start_node(Config, Server2),
+ _ = rabbit_ct_broker_helpers:start_node(Config, Server3),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"ha">>),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_queues, []),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+health_checks_test(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt),
+ http_get(Config, "/health/checks/certificate-expiration/1/days", ?OK),
+ http_get(Config, io_lib:format("/health/checks/port-listener/~p", [Port]), ?OK),
+ http_get(Config, "/health/checks/protocol-listener/http", ?OK),
+ http_get(Config, "/health/checks/virtual-hosts", ?OK),
+ http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK),
+ http_get(Config, "/health/checks/node-is-quorum-critical", ?OK),
+ passed.
+
+alarms_test(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:clear_all_alarms(Config, Server),
+
+ EndpointPath = "/health/checks/alarms",
+ Check0 = http_get(Config, EndpointPath, ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ ok = rabbit_ct_broker_helpers:set_alarm(Config, Server, memory),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end
+ ),
+
+ Body = http_get_failed(Config, EndpointPath),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)),
+ ?assert(is_list(maps:get(<<"alarms">>, Body))),
+
+ rabbit_ct_broker_helpers:clear_all_alarms(Config, Server),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =:= [] end
+ ),
+ ct:pal("Alarms: ~p", [rabbit_ct_broker_helpers:get_alarms(Config, Server)]),
+
+ passed.
+
+local_alarms_test(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:clear_all_alarms(Config, Server),
+
+ EndpointPath = "/health/checks/local-alarms",
+ Check0 = http_get(Config, EndpointPath, ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ ok = rabbit_ct_broker_helpers:set_alarm(Config, Server, file_descriptor_limit),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_alarms(Config, Server) =/= [] end
+ ),
+
+ Body = http_get_failed(Config, EndpointPath),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)),
+ ?assert(is_list(maps:get(<<"alarms">>, Body))),
+
+ rabbit_ct_broker_helpers:clear_all_alarms(Config, Server),
+ rabbit_ct_helpers:await_condition(
+ fun() -> rabbit_ct_broker_helpers:get_local_alarms(Config, Server) =:= [] end
+ ),
+
+ passed.
+
+
+is_quorum_critical_single_node_test(Config) ->
+ Check0 = http_get(Config, "/health/checks/node-is-quorum-critical", ?OK),
+ ?assertEqual(<<"single node cluster">>, maps:get(reason, Check0)),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}],
+ QName = <<"is_quorum_critical_single_node_test">>,
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = true,
+ auto_delete = false,
+ arguments = Args})),
+ Check1 = http_get(Config, "/health/checks/node-is-quorum-critical", ?OK),
+ ?assertEqual(<<"single node cluster">>, maps:get(reason, Check1)),
+
+ passed.
+
+is_quorum_critical_test(Config) ->
+ Check0 = http_get(Config, "/health/checks/node-is-quorum-critical", ?OK),
+ ?assertEqual(false, maps:is_key(reason, Check0)),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}],
+ QName = <<"is_quorum_critical_test">>,
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = true,
+ auto_delete = false,
+ arguments = Args})),
+ Check1 = http_get(Config, "/health/checks/node-is-quorum-critical", ?OK),
+ ?assertEqual(false, maps:is_key(reason, Check1)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server3),
+
+ Body = http_get_failed(Config, "/health/checks/node-is-quorum-critical"),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body)),
+ [Queue] = maps:get(<<"queues">>, Body),
+ ?assertEqual(QName, maps:get(<<"name">>, Queue)),
+
+ passed.
+
+is_mirror_sync_critical_single_node_test(Config) ->
+ Check0 = http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK),
+ ?assertEqual(<<"single node cluster">>, maps:get(reason, Check0)),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ha">>, <<"is_mirror_sync.*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server),
+ QName = <<"is_mirror_sync_critical_single_node_test">>,
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = true,
+ auto_delete = false,
+ arguments = []})),
+ Check1 = http_get(Config, "/health/checks/node-is-mirror-sync-critical", ?OK),
+ ?assertEqual(<<"single node cluster">>, maps:get(reason, Check1)),
+
+ passed.
+
+is_mirror_sync_critical_test(Config) ->
+ Path = "/health/checks/node-is-mirror-sync-critical",
+ Check0 = http_get(Config, Path, ?OK),
+ ?assertEqual(false, maps:is_key(reason, Check0)),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ ok = rabbit_ct_broker_helpers:set_policy(
+ Config, 0, <<"ha">>, <<"is_mirror_sync.*">>, <<"queues">>,
+ [{<<"ha-mode">>, <<"all">>}]),
+ [Server1, Server2, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Server1),
+ QName = <<"is_mirror_sync_critical_test">>,
+ ?assertEqual({'queue.declare_ok', QName, 0, 0},
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ durable = true,
+ auto_delete = false,
+ arguments = []})),
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ {ok, {{_, Code, _}, _, _}} = req(Config, get, Path, [auth_header("guest", "guest")]),
+ Code == ?OK
+ end),
+ Check1 = http_get(Config, Path, ?OK),
+ ?assertEqual(false, maps:is_key(reason, Check1)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server3),
+
+ Body = http_get_failed(Config, Path),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body)),
+ [Queue] = maps:get(<<"queues">>, Body),
+ ?assertEqual(QName, maps:get(<<"name">>, Queue)),
+
+ passed.
+
+virtual_hosts_test(Config) ->
+ VHost1 = <<"vhost1">>,
+ VHost2 = <<"vhost2">>,
+ add_vhost(Config, VHost1),
+ add_vhost(Config, VHost2),
+
+ Path = "/health/checks/virtual-hosts",
+ Check0 = http_get(Config, Path, ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, VHost1),
+
+ Body1 = http_get_failed(Config, Path),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body1)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body1)),
+ ?assertEqual([VHost1], maps:get(<<"virtual-hosts">>, Body1)),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, VHost2),
+
+ Body2 = http_get_failed(Config, Path),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body2)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body2)),
+ VHosts = lists:sort([VHost1, VHost2]),
+ ?assertEqual(VHosts, lists:sort(maps:get(<<"virtual-hosts">>, Body2))),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2),
+ http_get(Config, Path, ?OK),
+
+ passed.
+
+protocol_listener_test(Config) ->
+ Check0 = http_get(Config, "/health/checks/protocol-listener/http", ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ http_get(Config, "/health/checks/protocol-listener/amqp", ?OK),
+ http_get(Config, "/health/checks/protocol-listener/amqp0.9.1", ?OK),
+ http_get(Config, "/health/checks/protocol-listener/amqp0-9-1", ?OK),
+
+ Body0 = http_get_failed(Config, "/health/checks/protocol-listener/mqtt"),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body0)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body0)),
+ ?assertEqual(<<"mqtt">>, maps:get(<<"missing">>, Body0)),
+ ?assert(lists:member(<<"http">>, maps:get(<<"protocols">>, Body0))),
+ ?assert(lists:member(<<"clustering">>, maps:get(<<"protocols">>, Body0))),
+ ?assert(lists:member(<<"amqp">>, maps:get(<<"protocols">>, Body0))),
+
+ http_get_failed(Config, "/health/checks/protocol-listener/doe"),
+ http_get_failed(Config, "/health/checks/protocol-listener/mqtts"),
+ http_get_failed(Config, "/health/checks/protocol-listener/stomp"),
+ http_get_failed(Config, "/health/checks/protocol-listener/stomp1.0"),
+
+ passed.
+
+port_listener_test(Config) ->
+ AMQP = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ MGMT = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt),
+ MQTT = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+
+ Path = fun(Port) ->
+ lists:flatten(io_lib:format("/health/checks/port-listener/~p", [Port]))
+ end,
+
+ Check0 = http_get(Config, Path(AMQP), ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ Check1 = http_get(Config, Path(MGMT), ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check1)),
+
+ http_get(Config, "/health/checks/port-listener/bananas", ?BAD_REQUEST),
+
+ Body0 = http_get_failed(Config, Path(MQTT)),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body0)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body0)),
+ ?assertEqual(MQTT, maps:get(<<"missing">>, Body0)),
+ ?assert(lists:member(AMQP, maps:get(<<"ports">>, Body0))),
+ ?assert(lists:member(MGMT, maps:get(<<"ports">>, Body0))),
+
+ passed.
+
+certificate_expiration_test(Config) ->
+ Check0 = http_get(Config, "/health/checks/certificate-expiration/1/weeks", ?OK),
+ ?assertEqual(<<"ok">>, maps:get(status, Check0)),
+
+ http_get(Config, "/health/checks/certificate-expiration/1/days", ?OK),
+ http_get(Config, "/health/checks/certificate-expiration/1/months", ?OK),
+
+ http_get(Config, "/health/checks/certificate-expiration/two/weeks", ?BAD_REQUEST),
+ http_get(Config, "/health/checks/certificate-expiration/2/week", ?BAD_REQUEST),
+ http_get(Config, "/health/checks/certificate-expiration/2/doe", ?BAD_REQUEST),
+
+ Body0 = http_get_failed(Config, "/health/checks/certificate-expiration/10/years"),
+ ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body0)),
+ ?assertEqual(true, maps:is_key(<<"reason">>, Body0)),
+ [Expired] = maps:get(<<"expired">>, Body0),
+ ?assertEqual(<<"amqp/ssl">>, maps:get(<<"protocol">>, Expired)),
+ AMQP_TLS = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls),
+ ?assertEqual(AMQP_TLS, maps:get(<<"port">>, Expired)),
+ Node = atom_to_binary(rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), utf8),
+ ?assertEqual(Node, maps:get(<<"node">>, Expired)),
+ ?assertEqual(true, maps:is_key(<<"cacertfile">>, Expired)),
+ ?assertEqual(true, maps:is_key(<<"certfile">>, Expired)),
+ ?assertEqual(true, maps:is_key(<<"certfile_expires_on">>, Expired)),
+ ?assertEqual(true, maps:is_key(<<"interface">>, Expired)),
+
+ passed.
+
+http_get_failed(Config, Path) ->
+ {ok, {{_, Code, _}, _, ResBody}} = req(Config, get, Path, [auth_header("guest", "guest")]),
+ ?assertEqual(Code, ?HEALTH_CHECK_FAILURE_STATUS),
+ rabbit_json:decode(rabbit_data_coercion:to_binary(ResBody)).
+
+delete_queues() ->
+ [rabbit_amqqueue:delete(Q, false, false, <<"dummy">>)
+ || Q <- rabbit_amqqueue:list()].
+
+add_vhost(Config, VHost) ->
+ rabbit_ct_broker_helpers:add_vhost(Config, VHost),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"guest">>, VHost).
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl
new file mode 100644
index 0000000000..38bb2bac1a
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl
@@ -0,0 +1,1716 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_only_http_SUITE).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_ct_client_helpers, [close_connection/1, close_channel/1,
+ open_unmanaged_connection/1]).
+-import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2,
+ assert_keys/2, assert_no_keys/2,
+ http_get/2, http_get/3, http_get/5,
+ http_get_no_map/2,
+ http_put/4, http_put/6,
+ http_post/4, http_post/6,
+ http_upload_raw/8,
+ http_delete/3, http_delete/5,
+ http_put_raw/4, http_post_accept_json/4,
+ req/4, auth_header/2,
+ assert_permanent_redirect/3,
+ uri_base_from/2, format_for_upload/1,
+ amqp_port/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(COLLECT_INTERVAL, 1000).
+-define(PATH_PREFIX, "/custom-prefix").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, all_tests_with_prefix},
+ {group, all_tests_without_prefix},
+ {group, stats_disabled_on_request}
+ ].
+
+groups() ->
+ [
+ {all_tests_with_prefix, [], all_tests()},
+ {all_tests_without_prefix, [], all_tests()},
+ {stats_disabled_on_request, [], [disable_with_disable_stats_parameter_test]},
+ {invalid_config, [], [invalid_config_test]}
+ ].
+
+all_tests() -> [
+ overview_test,
+ nodes_test,
+ vhosts_test,
+ connections_test,
+ exchanges_test,
+ queues_test,
+ mirrored_queues_test,
+ quorum_queues_test,
+ queues_well_formed_json_test,
+ permissions_vhost_test,
+ permissions_connection_channel_consumer_test,
+ consumers_cq_test,
+ consumers_qq_test,
+ arguments_test,
+ queue_actions_test,
+ exclusive_queue_test,
+ connections_channels_pagination_test,
+ exchanges_pagination_test,
+ exchanges_pagination_permissions_test,
+ queue_pagination_test,
+ queue_pagination_columns_test,
+ queues_pagination_permissions_test,
+ samples_range_test,
+ sorting_test,
+ columns_test,
+ if_empty_unused_test,
+ queues_enable_totals_test,
+ double_encoded_json_test
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+merge_app_env(Config, DisableStats) ->
+ Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics_interval, ?COLLECT_INTERVAL}
+ ]}),
+ rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbitmq_management, [
+ {disable_management_stats, DisableStats},
+ {sample_retention_policies,
+ [{global, [{605, 1}]},
+ {basic, [{605, 1}]},
+ {detailed, [{10, 1}]}]
+ }]}).
+
+start_broker(Config) ->
+ Setup0 = rabbit_ct_broker_helpers:setup_steps(),
+ Setup1 = rabbit_ct_client_helpers:setup_steps(),
+ Steps = Setup0 ++ Setup1,
+ rabbit_ct_helpers:run_setup_steps(Config, Steps).
+
+finish_init(Group, Config) when Group == all_tests_with_prefix ->
+ finish_init(Group, Config, true);
+finish_init(Group, Config) when Group == all_tests_without_prefix ->
+ finish_init(Group, Config, true);
+finish_init(Group, Config) when Group == stats_disabled_on_request ->
+ finish_init(Group, Config, false);
+finish_init(Group, Config) when Group == invalid_config_test ->
+ finish_init(Group, Config, true).
+
+finish_init(Group, Config, DisableStats) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ NodeConf = [{rmq_nodename_suffix, Group}],
+ Config1 = rabbit_ct_helpers:set_config(Config, NodeConf),
+ merge_app_env(Config1, DisableStats).
+
+init_per_group(all_tests_with_prefix=Group, Config0) ->
+ PathConfig = {rabbitmq_management, [{path_prefix, ?PATH_PREFIX}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig),
+ Config2 = finish_init(Group, Config1),
+ Config3 = start_broker(Config2),
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config3, 0,
+ rabbit_feature_flags,
+ is_supported_remotely,
+ [Nodes, [quorum_queue], 60000]),
+ case Ret of
+ true ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config3, 0, rabbit_feature_flags, enable, [quorum_queue]),
+ Config3;
+ false ->
+ end_per_group(Group, Config3),
+ {skip, "Quorum queues are unsupported"}
+ end;
+init_per_group(Group, Config0) ->
+ Config1 = finish_init(Group, Config0),
+ Config2 = start_broker(Config1),
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config2, nodename),
+ Ret = rabbit_ct_broker_helpers:rpc(
+ Config2, 0,
+ rabbit_feature_flags,
+ is_supported_remotely,
+ [Nodes, [quorum_queue], 60000]),
+ case Ret of
+ true ->
+ ok = rabbit_ct_broker_helpers:rpc(
+ Config2, 0, rabbit_feature_flags, enable, [quorum_queue]),
+ Config2;
+ false ->
+ end_per_group(Group, Config2),
+ {skip, "Quorum queues are unsupported"}
+ end.
+
+end_per_group(_, Config) ->
+ inets:stop(),
+ Teardown0 = rabbit_ct_client_helpers:teardown_steps(),
+ Teardown1 = rabbit_ct_broker_helpers:teardown_steps(),
+ Steps = Teardown0 ++ Teardown1,
+ rabbit_ct_helpers:run_teardown_steps(Config, Steps).
+
+init_per_testcase(Testcase = permissions_vhost_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost1">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost2">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase);
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_only_http_SUITE:init_per_testcase">>),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_broker_helpers:close_all_connections(Config, 0, <<"rabbit_mgmt_only_http_SUITE:end_per_testcase">>),
+ Config1 = end_per_testcase0(Testcase, Config),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+end_per_testcase0(Testcase = queues_enable_totals_test, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env,
+ [rabbitmq_management, enable_queue_totals]),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase0(Testcase = queues_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"downvhost">>),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase0(Testcase = permissions_vhost_test, Config) ->
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost1">>),
+ rabbit_ct_broker_helpers:delete_vhost(Config, <<"myvhost2">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"myuser1">>),
+ rabbit_ct_broker_helpers:delete_user(Config, <<"myuser2">>),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase0(_, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+overview_test(Config) ->
+ Overview = http_get(Config, "/overview"),
+ ?assert(maps:is_key(node, Overview)),
+ ?assert(maps:is_key(object_totals, Overview)),
+ ?assert(not maps:is_key(queue_totals, Overview)),
+ ?assert(not maps:is_key(churn_rates, Overview)),
+ ?assert(not maps:is_key(message_stats, Overview)),
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ OverviewU = http_get(Config, "/overview", "myuser", "myuser", ?OK),
+ ?assert(maps:is_key(node, OverviewU)),
+ ?assert(maps:is_key(object_totals, OverviewU)),
+ ?assert(not maps:is_key(queue_totals, OverviewU)),
+ ?assert(not maps:is_key(churn_rates, OverviewU)),
+ ?assert(not maps:is_key(message_stats, OverviewU)),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ passed.
+
+nodes_test(Config) ->
+ http_put(Config, "/users/user", [{password, <<"user">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/users/monitor", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], {group, '2xx'}),
+ DiscNode = #{type => <<"disc">>, running => true},
+ assert_list([DiscNode], http_get(Config, "/nodes")),
+ assert_list([DiscNode], http_get(Config, "/nodes", "monitor", "monitor", ?OK)),
+ http_get(Config, "/nodes", "user", "user", ?NOT_AUTHORISED),
+ [Node] = http_get(Config, "/nodes"),
+ Path = "/nodes/" ++ binary_to_list(maps:get(name, Node)),
+ NodeReply = http_get(Config, Path, ?OK),
+ assert_item(DiscNode, NodeReply),
+ ?assert(not maps:is_key(channel_closed, NodeReply)),
+ ?assert(not maps:is_key(disk_free, NodeReply)),
+ ?assert(not maps:is_key(fd_used, NodeReply)),
+ ?assert(not maps:is_key(io_file_handle_open_attempt_avg_time, NodeReply)),
+ ?assert(maps:is_key(name, NodeReply)),
+ assert_item(DiscNode, http_get(Config, Path, "monitor", "monitor", ?OK)),
+ http_get(Config, Path, "user", "user", ?NOT_AUTHORISED),
+ http_delete(Config, "/users/user", {group, '2xx'}),
+ http_delete(Config, "/users/monitor", {group, '2xx'}),
+ passed.
+
+%% This test is rather over-verbose as we're trying to test understanding of
+%% Webmachine
+vhosts_test(Config) ->
+ assert_list([#{name => <<"/">>}], http_get(Config, "/vhosts")),
+ %% Create a new one
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ %% PUT should be idempotent
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ %% Check it's there
+ [GetFirst | _] = GetAll = http_get(Config, "/vhosts"),
+ assert_list([#{name => <<"/">>}, #{name => <<"myvhost">>}], GetAll),
+ ?assert(not maps:is_key(message_stats, GetFirst)),
+ ?assert(not maps:is_key(messages_ready_details, GetFirst)),
+ ?assert(not maps:is_key(recv_oct, GetFirst)),
+ ?assert(maps:is_key(cluster_state, GetFirst)),
+
+ %% Check individually
+ Get = http_get(Config, "/vhosts/%2F", ?OK),
+ assert_item(#{name => <<"/">>}, Get),
+ assert_item(#{name => <<"myvhost">>},http_get(Config, "/vhosts/myvhost")),
+ ?assert(not maps:is_key(message_stats, Get)),
+ ?assert(not maps:is_key(messages_ready_details, Get)),
+ ?assert(not maps:is_key(recv_oct, Get)),
+ ?assert(maps:is_key(cluster_state, Get)),
+
+ %% Crash it
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, <<"myvhost">>),
+ [NodeData] = http_get(Config, "/nodes"),
+ Node = binary_to_atom(maps:get(name, NodeData), utf8),
+ assert_item(#{name => <<"myvhost">>, cluster_state => #{Node => <<"stopped">>}},
+ http_get(Config, "/vhosts/myvhost")),
+
+ %% Restart it
+ http_post(Config, "/vhosts/myvhost/start/" ++ atom_to_list(Node), [], {group, '2xx'}),
+ assert_item(#{name => <<"myvhost">>, cluster_state => #{Node => <<"running">>}},
+ http_get(Config, "/vhosts/myvhost")),
+
+ %% Delete it
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+ %% It's not there
+ http_get(Config, "/vhosts/myvhost", ?NOT_FOUND),
+ http_delete(Config, "/vhosts/myvhost", ?NOT_FOUND),
+
+ passed.
+
+connections_test(Config) ->
+ {Conn, _Ch} = open_connection_and_channel(Config),
+ LocalPort = local_port(Conn),
+ Path = binary_to_list(
+ rabbit_mgmt_format:print(
+ "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w",
+ [LocalPort, amqp_port(Config)])),
+ timer:sleep(1500),
+ Connection = http_get(Config, Path, ?OK),
+ ?assertEqual(1, maps:size(Connection)),
+ ?assert(maps:is_key(name, Connection)),
+ ?assert(not maps:is_key(recv_oct_details, Connection)),
+ http_delete(Config, Path, {group, '2xx'}),
+ %% TODO rabbit_reader:shutdown/2 returns before the connection is
+ %% closed. It may not be worth fixing.
+ Fun = fun() ->
+ try
+ http_get(Config, Path, ?NOT_FOUND),
+ true
+ catch
+ _:_ ->
+ false
+ end
+ end,
+ wait_until(Fun, 60),
+ close_connection(Conn),
+ passed.
+
+exchanges_test(Config) ->
+ %% Can list exchanges
+ http_get(Config, "/exchanges", {group, '2xx'}),
+ %% Can pass booleans or strings
+ Good = [{type, <<"direct">>}, {durable, <<"true">>}],
+ http_put(Config, "/vhosts/myvhost", none, {group, '2xx'}),
+ http_get(Config, "/exchanges/myvhost/foo", ?NOT_FOUND),
+ http_put(Config, "/exchanges/myvhost/foo", Good, {group, '2xx'}),
+ http_put(Config, "/exchanges/myvhost/foo", Good, {group, '2xx'}),
+ http_get(Config, "/exchanges/%2F/foo", ?NOT_FOUND),
+ Exchange = http_get(Config, "/exchanges/myvhost/foo"),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"myvhost">>,
+ type => <<"direct">>,
+ durable => true,
+ auto_delete => false,
+ internal => false,
+ arguments => #{}},
+ Exchange),
+ ?assert(not maps:is_key(message_stats, Exchange)),
+ http_put(Config, "/exchanges/badvhost/bar", Good, ?NOT_FOUND),
+ http_put(Config, "/exchanges/myvhost/bar", [{type, <<"bad_exchange_type">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/exchanges/myvhost/bar", [{type, <<"direct">>},
+ {durable, <<"troo">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/exchanges/myvhost/foo", [{type, <<"direct">>}],
+ ?BAD_REQUEST),
+
+ http_delete(Config, "/exchanges/myvhost/foo", {group, '2xx'}),
+ http_delete(Config, "/exchanges/myvhost/foo", ?NOT_FOUND),
+
+ http_delete(Config, "/vhosts/myvhost", {group, '2xx'}),
+ http_get(Config, "/exchanges/badvhost", ?NOT_FOUND),
+ passed.
+
+queues_test(Config) ->
+ Good = [{durable, true}],
+ GoodQQ = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}],
+ http_get(Config, "/queues/%2F/foo", ?NOT_FOUND),
+ http_put(Config, "/queues/%2F/foo", GoodQQ, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/foo", GoodQQ, {group, '2xx'}),
+
+ rabbit_ct_broker_helpers:add_vhost(Config, <<"downvhost">>),
+ rabbit_ct_broker_helpers:set_full_permissions(Config, <<"downvhost">>),
+ http_put(Config, "/queues/downvhost/foo", Good, {group, '2xx'}),
+ http_put(Config, "/queues/downvhost/bar", Good, {group, '2xx'}),
+
+ rabbit_ct_broker_helpers:force_vhost_failure(Config, <<"downvhost">>),
+ %% The vhost is down
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ DownVHost = #{name => <<"downvhost">>, tracing => false, cluster_state => #{Node => <<"stopped">>}},
+ assert_item(DownVHost, http_get(Config, "/vhosts/downvhost")),
+
+ DownQueues = http_get(Config, "/queues/downvhost"),
+ DownQueue = http_get(Config, "/queues/downvhost/foo"),
+
+ assert_list([#{name => <<"bar">>,
+ vhost => <<"downvhost">>,
+ state => <<"stopped">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}},
+ #{name => <<"foo">>,
+ vhost => <<"downvhost">>,
+ state => <<"stopped">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}}], DownQueues),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"downvhost">>,
+ state => <<"stopped">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{}}, DownQueue),
+
+ http_put(Config, "/queues/badvhost/bar", Good, ?NOT_FOUND),
+ http_put(Config, "/queues/%2F/bar",
+ [{durable, <<"troo">>}],
+ ?BAD_REQUEST),
+ http_put(Config, "/queues/%2F/foo",
+ [{durable, false}],
+ ?BAD_REQUEST),
+
+ Policy = [{pattern, <<"baz">>},
+ {definition, [{<<"ha-mode">>, <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}),
+ Queues = http_get(Config, "/queues/%2F"),
+ Queue = http_get(Config, "/queues/%2F/foo"),
+
+ NodeBin = atom_to_binary(Node, utf8),
+ assert_list([#{name => <<"baz">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{},
+ node => NodeBin,
+ slave_nodes => [],
+ synchronised_slave_nodes => []},
+ #{name => <<"foo">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{'x-queue-type' => <<"quorum">>},
+ leader => NodeBin,
+ members => [NodeBin]}], Queues),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{'x-queue-type' => <<"quorum">>},
+ leader => NodeBin,
+ members => [NodeBin]}, Queue),
+
+ ?assert(not maps:is_key(messages, Queue)),
+ ?assert(not maps:is_key(message_stats, Queue)),
+ ?assert(not maps:is_key(messages_details, Queue)),
+ ?assert(not maps:is_key(reductions_details, Queue)),
+ ?assertEqual(NodeBin, maps:get(leader, Queue)),
+ ?assertEqual([NodeBin], maps:get(members, Queue)),
+ ?assertEqual([NodeBin], maps:get(online, Queue)),
+
+ http_delete(Config, "/queues/%2F/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/baz", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/foo", ?NOT_FOUND),
+ http_get(Config, "/queues/badvhost", ?NOT_FOUND),
+
+ http_delete(Config, "/queues/downvhost/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/downvhost/bar", {group, '2xx'}),
+ passed.
+
+queues_enable_totals_test(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, enable_queue_totals, true]),
+
+ Good = [{durable, true}],
+ GoodQQ = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}],
+ http_get(Config, "/queues/%2F/foo", ?NOT_FOUND),
+ http_put(Config, "/queues/%2F/foo", GoodQQ, {group, '2xx'}),
+
+ Policy = [{pattern, <<"baz">>},
+ {definition, [{<<"ha-mode">>, <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}),
+
+ {Conn, Ch} = open_connection_and_channel(Config),
+ Publish = fun(Q) ->
+ amqp_channel:call(
+ Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = Q},
+ #amqp_msg{payload = <<"message">>})
+ end,
+ Publish(<<"baz">>),
+ Publish(<<"foo">>),
+ Publish(<<"foo">>),
+
+ Fun = fun() ->
+ length(rabbit_ct_broker_helpers:rpc(Config, 0, ets, tab2list,
+ [queue_coarse_metrics])) == 2
+ end,
+ wait_until(Fun, 60),
+
+ Queues = http_get(Config, "/queues/%2F"),
+ Queue = http_get(Config, "/queues/%2F/foo"),
+
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ NodeBin = atom_to_binary(Node, utf8),
+ assert_list([#{name => <<"baz">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{},
+ node => NodeBin,
+ slave_nodes => [],
+ messages => 1,
+ messages_ready => 1,
+ messages_unacknowledged => 0,
+ synchronised_slave_nodes => []},
+ #{name => <<"foo">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => null,
+ arguments => #{'x-queue-type' => <<"quorum">>},
+ leader => NodeBin,
+ messages => 2,
+ messages_ready => 2,
+ messages_unacknowledged => 0,
+ members => [NodeBin]}], Queues),
+ assert_item(#{name => <<"foo">>,
+ vhost => <<"/">>,
+ durable => true,
+ auto_delete => false,
+ exclusive => false,
+ arguments => #{'x-queue-type' => <<"quorum">>},
+ leader => NodeBin,
+ members => [NodeBin]}, Queue),
+
+ ?assert(not maps:is_key(messages, Queue)),
+ ?assert(not maps:is_key(messages_ready, Queue)),
+ ?assert(not maps:is_key(messages_unacknowledged, Queue)),
+ ?assert(not maps:is_key(message_stats, Queue)),
+ ?assert(not maps:is_key(messages_details, Queue)),
+ ?assert(not maps:is_key(reductions_details, Queue)),
+
+ http_delete(Config, "/queues/%2F/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/baz", {group, '2xx'}),
+ close_connection(Conn),
+
+ passed.
+
+mirrored_queues_test(Config) ->
+ Policy = [{pattern, <<".*">>},
+ {definition, [{<<"ha-mode">>, <<"all">>}]}],
+ http_put(Config, "/policies/%2F/HA", Policy, {group, '2xx'}),
+
+ Good = [{durable, true}, {arguments, []}],
+ http_get(Config, "/queues/%2f/ha", ?NOT_FOUND),
+ http_put(Config, "/queues/%2f/ha", Good, {group, '2xx'}),
+
+ {Conn, Ch} = open_connection_and_channel(Config),
+ Publish = fun() ->
+ amqp_channel:call(
+ Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"ha">>},
+ #amqp_msg{payload = <<"message">>})
+ end,
+ Publish(),
+ Publish(),
+
+ Queue = http_get(Config, "/queues/%2f/ha?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"),
+
+ %% It's really only one node, but the only thing that matters in this test is to verify the
+ %% key exists
+ Nodes = lists:sort(rabbit_ct_broker_helpers:get_node_configs(Config, nodename)),
+
+ ?assert(not maps:is_key(messages, Queue)),
+ ?assert(not maps:is_key(messages_details, Queue)),
+ ?assert(not maps:is_key(reductions_details, Queue)),
+ ?assert(true, lists:member(maps:get(node, Queue), Nodes)),
+ ?assertEqual([], get_nodes(slave_nodes, Queue)),
+ ?assertEqual([], get_nodes(synchronised_slave_nodes, Queue)),
+
+ http_delete(Config, "/queues/%2f/ha", {group, '2xx'}),
+ close_connection(Conn).
+
+quorum_queues_test(Config) ->
+ Good = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}],
+ http_get(Config, "/queues/%2f/qq", ?NOT_FOUND),
+ http_put(Config, "/queues/%2f/qq", Good, {group, '2xx'}),
+
+ {Conn, Ch} = open_connection_and_channel(Config),
+ Publish = fun() ->
+ amqp_channel:call(
+ Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"qq">>},
+ #amqp_msg{payload = <<"message">>})
+ end,
+ Publish(),
+ Publish(),
+
+ Queue = http_get(Config, "/queues/%2f/qq?lengths_age=60&lengths_incr=5&msg_rates_age=60&msg_rates_incr=5&data_rates_age=60&data_rates_incr=5"),
+
+ %% It's really only one node, but the only thing that matters in this test is to verify the
+ %% key exists
+ Nodes = lists:sort(rabbit_ct_broker_helpers:get_node_configs(Config, nodename)),
+
+ ?assert(not maps:is_key(messages, Queue)),
+ ?assert(not maps:is_key(messages_details, Queue)),
+ ?assert(not maps:is_key(reductions_details, Queue)),
+ ?assert(true, lists:member(maps:get(leader, Queue, undefined), Nodes)),
+ ?assertEqual(Nodes, get_nodes(members, Queue)),
+ ?assertEqual(Nodes, get_nodes(online, Queue)),
+
+ http_delete(Config, "/queues/%2f/qq", {group, '2xx'}),
+ close_connection(Conn).
+
+get_nodes(Tag, Queue) ->
+ lists:sort([binary_to_atom(B, utf8) || B <- maps:get(Tag, Queue)]).
+
+queues_well_formed_json_test(Config) ->
+ %% TODO This test should be extended to the whole API
+ Good = [{durable, true}],
+ http_put(Config, "/queues/%2F/foo", Good, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/baz", Good, {group, '2xx'}),
+
+ Queues = http_get_no_map(Config, "/queues/%2F"),
+ %% Ensure keys are unique
+ [begin
+ Sorted = lists:sort(Q),
+ Sorted = lists:usort(Q)
+ end || Q <- Queues],
+
+ http_delete(Config, "/queues/%2F/foo", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/baz", {group, '2xx'}),
+ passed.
+
+permissions_vhost_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/users/myadmin", [{password, <<"myadmin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost1", none, {group, '2xx'}),
+ http_put(Config, "/vhosts/myvhost2", none, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost1/myuser", PermArgs, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost1/guest", PermArgs, {group, '2xx'}),
+ http_put(Config, "/permissions/myvhost2/guest", PermArgs, {group, '2xx'}),
+ assert_list([#{name => <<"/">>},
+ #{name => <<"myvhost1">>},
+ #{name => <<"myvhost2">>}], http_get(Config, "/vhosts", ?OK)),
+ assert_list([#{name => <<"myvhost1">>}],
+ http_get(Config, "/vhosts", "myuser", "myuser", ?OK)),
+ http_put(Config, "/queues/myvhost1/myqueue", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/myvhost2/myqueue", QArgs, {group, '2xx'}),
+ Test1 =
+ fun(Path) ->
+ Results = http_get(Config, Path, "myuser", "myuser", ?OK),
+ [case maps:get(vhost, Result) of
+ <<"myvhost2">> ->
+ throw({got_result_from_vhost2_in, Path, Result});
+ _ ->
+ ok
+ end || Result <- Results]
+ end,
+ Test2 =
+ fun(Path1, Path2) ->
+ http_get(Config, Path1 ++ "/myvhost1/" ++ Path2, "myuser", "myuser",
+ ?OK),
+ http_get(Config, Path1 ++ "/myvhost2/" ++ Path2, "myuser", "myuser",
+ ?NOT_AUTHORISED)
+ end,
+ Test3 =
+ fun(Path1) ->
+ http_get(Config, Path1 ++ "/myvhost1/", "myadmin", "myadmin",
+ ?OK)
+ end,
+ Test1("/exchanges"),
+ Test2("/exchanges", ""),
+ Test2("/exchanges", "amq.direct"),
+ Test3("/exchanges"),
+ Test1("/queues"),
+ Test2("/queues", ""),
+ Test3("/queues"),
+ Test2("/queues", "myqueue"),
+ Test1("/bindings"),
+ Test2("/bindings", ""),
+ Test3("/bindings"),
+ Test2("/queues", "myqueue/bindings"),
+ Test2("/exchanges", "amq.default/bindings/source"),
+ Test2("/exchanges", "amq.default/bindings/destination"),
+ Test2("/bindings", "e/amq.default/q/myqueue"),
+ Test2("/bindings", "e/amq.default/q/myqueue/myqueue"),
+ http_delete(Config, "/vhosts/myvhost1", {group, '2xx'}),
+ http_delete(Config, "/vhosts/myvhost2", {group, '2xx'}),
+ http_delete(Config, "/users/myuser", {group, '2xx'}),
+ http_delete(Config, "/users/myadmin", {group, '2xx'}),
+ passed.
+
+%% Opens a new connection and a channel on it.
+%% The channel is not managed by rabbit_ct_client_helpers and
+%% should be explicitly closed by the caller.
+open_connection_and_channel(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ {Conn, Ch}.
+
+get_conn(Config, Username, Password) ->
+ Port = amqp_port(Config),
+ {ok, Conn} = amqp_connection:start(#amqp_params_network{
+ port = Port,
+ username = list_to_binary(Username),
+ password = list_to_binary(Password)}),
+ LocalPort = local_port(Conn),
+ ConnPath = rabbit_misc:format(
+ "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w",
+ [LocalPort, Port]),
+ ChPath = rabbit_misc:format(
+ "/channels/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w%20(1)",
+ [LocalPort, Port]),
+ ConnChPath = rabbit_misc:format(
+ "/connections/127.0.0.1%3A~w%20-%3E%20127.0.0.1%3A~w/channels",
+ [LocalPort, Port]),
+ {Conn, ConnPath, ChPath, ConnChPath}.
+
+permissions_connection_channel_consumer_test(Config) ->
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/users/user", [{password, <<"user">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/user", PermArgs, {group, '2xx'}),
+ http_put(Config, "/users/monitor", [{password, <<"monitor">>},
+ {tags, <<"monitoring">>}], {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/monitor", PermArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test", #{}, {group, '2xx'}),
+
+ {Conn1, UserConn, UserCh, UserConnCh} = get_conn(Config, "user", "user"),
+ {Conn2, MonConn, MonCh, MonConnCh} = get_conn(Config, "monitor", "monitor"),
+ {Conn3, AdmConn, AdmCh, AdmConnCh} = get_conn(Config, "guest", "guest"),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+ {ok, Ch3} = amqp_connection:open_channel(Conn3),
+ [amqp_channel:subscribe(
+ Ch, #'basic.consume'{queue = <<"test">>}, self()) ||
+ Ch <- [Ch1, Ch2, Ch3]],
+ timer:sleep(1500),
+ AssertLength = fun (Path, User, Len) ->
+ Res = http_get(Config, Path, User, User, ?OK),
+ rabbit_ct_helpers:await_condition(
+ fun () ->
+ Len =:= length(Res)
+ end)
+ end,
+ AssertDisabled = fun(Path) ->
+ http_get(Config, Path, "user", "user", ?BAD_REQUEST),
+ http_get(Config, Path, "monitor", "monitor", ?BAD_REQUEST),
+ http_get(Config, Path, "guest", "guest", ?BAD_REQUEST),
+ http_get(Config, Path, ?BAD_REQUEST)
+ end,
+
+ AssertLength("/connections", "user", 1),
+ AssertLength("/connections", "monitor", 3),
+ AssertLength("/connections", "guest", 3),
+
+ AssertDisabled("/channels"),
+ AssertDisabled("/consumers"),
+ AssertDisabled("/consumers/%2F"),
+
+ AssertRead = fun(Path, UserStatus) ->
+ http_get(Config, Path, "user", "user", UserStatus),
+ http_get(Config, Path, "monitor", "monitor", ?OK),
+ http_get(Config, Path, ?OK)
+ end,
+
+ AssertRead(UserConn, ?OK),
+ AssertRead(MonConn, ?NOT_AUTHORISED),
+ AssertRead(AdmConn, ?NOT_AUTHORISED),
+ AssertDisabled(UserCh),
+ AssertDisabled(MonCh),
+ AssertDisabled(AdmCh),
+ AssertRead(UserConnCh, ?OK),
+ AssertRead(MonConnCh, ?NOT_AUTHORISED),
+ AssertRead(AdmConnCh, ?NOT_AUTHORISED),
+
+ AssertClose = fun(Path, User, Status) ->
+ http_delete(Config, Path, User, User, Status)
+ end,
+ AssertClose(UserConn, "monitor", ?NOT_AUTHORISED),
+ AssertClose(MonConn, "user", ?NOT_AUTHORISED),
+ AssertClose(AdmConn, "guest", {group, '2xx'}),
+ AssertClose(MonConn, "guest", {group, '2xx'}),
+ AssertClose(UserConn, "user", {group, '2xx'}),
+
+ http_delete(Config, "/users/user", {group, '2xx'}),
+ http_delete(Config, "/users/monitor", {group, '2xx'}),
+ http_get(Config, "/connections/foo", ?NOT_FOUND),
+ http_get(Config, "/channels/foo", ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/test", {group, '2xx'}),
+ passed.
+
+consumers_cq_test(Config) ->
+ consumers_test(Config, [{'x-queue-type', <<"classic">>}]).
+
+consumers_qq_test(Config) ->
+ consumers_test(Config, [{'x-queue-type', <<"quorum">>}]).
+
+consumers_test(Config, Args) ->
+ http_delete(Config, "/queues/%2F/test", [?NO_CONTENT, ?NOT_FOUND]),
+ QArgs = [{auto_delete, false}, {durable, true},
+ {arguments, Args}],
+ http_put(Config, "/queues/%2F/test", QArgs, {group, '2xx'}),
+ {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:subscribe(
+ Ch, #'basic.consume'{queue = <<"test">>,
+ no_ack = false,
+ consumer_tag = <<"my-ctag">> }, self()),
+ timer:sleep(1500),
+
+ http_get(Config, "/consumers", ?BAD_REQUEST),
+
+ amqp_connection:close(Conn),
+ http_delete(Config, "/queues/%2F/test", {group, '2xx'}),
+ passed.
+
+defs(Config, Key, URI, CreateMethod, Args) ->
+ defs(Config, Key, URI, CreateMethod, Args,
+ fun(URI2) -> http_delete(Config, URI2, {group, '2xx'}) end).
+
+defs_v(Config, Key, URI, CreateMethod, Args) ->
+ Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
+ ReplaceVHostInArgs = fun(M, V2) -> maps:map(fun(vhost, _) -> V2;
+ (_, V1) -> V1 end, M) end,
+
+ %% Test against default vhost
+ defs(Config, Key, Rep1(URI, "%2F"), CreateMethod, ReplaceVHostInArgs(Args, <<"/">>)),
+
+ %% Test against new vhost
+ http_put(Config, "/vhosts/test", none, {group, '2xx'}),
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/permissions/test/guest", PermArgs, {group, '2xx'}),
+ DeleteFun0 = fun(URI2) ->
+ http_delete(Config, URI2, {group, '2xx'})
+ end,
+ DeleteFun1 = fun(_) ->
+ http_delete(Config, "/vhosts/test", {group, '2xx'})
+ end,
+ defs(Config, Key, Rep1(URI, "test"),
+ CreateMethod, ReplaceVHostInArgs(Args, <<"test">>),
+ DeleteFun0, DeleteFun1).
+
+create(Config, CreateMethod, URI, Args) ->
+ case CreateMethod of
+ put -> http_put(Config, URI, Args, {group, '2xx'}),
+ URI;
+ put_update -> http_put(Config, URI, Args, {group, '2xx'}),
+ URI;
+ post -> Headers = http_post(Config, URI, Args, {group, '2xx'}),
+ rabbit_web_dispatch_util:unrelativise(
+ URI, pget("location", Headers))
+ end.
+
+defs(Config, Key, URI, CreateMethod, Args, DeleteFun) ->
+ defs(Config, Key, URI, CreateMethod, Args, DeleteFun, DeleteFun).
+
+defs(Config, Key, URI, CreateMethod, Args, DeleteFun0, DeleteFun1) ->
+ %% Create the item
+ URI2 = create(Config, CreateMethod, URI, Args),
+ %% Make sure it ends up in definitions
+ Definitions = http_get(Config, "/definitions", ?OK),
+ true = lists:any(fun(I) -> test_item(Args, I) end, maps:get(Key, Definitions)),
+
+ %% Delete it
+ DeleteFun0(URI2),
+
+ %% Post the definitions back, it should get recreated in correct form
+ http_post(Config, "/definitions", Definitions, {group, '2xx'}),
+ assert_item(Args, http_get(Config, URI2, ?OK)),
+
+ %% And delete it again
+ DeleteFun1(URI2),
+
+ passed.
+
+register_parameters_and_policy_validator(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register, []),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register_policy_validator, []).
+
+unregister_parameters_and_policy_validator(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister_policy_validator, []),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister, []).
+
+arguments_test(Config) ->
+ XArgs = [{type, <<"headers">>},
+ {arguments, [{'alternate-exchange', <<"amq.direct">>}]}],
+ QArgs = [{arguments, [{'x-expires', 1800000}]}],
+ BArgs = [{routing_key, <<"">>},
+ {arguments, [{'x-match', <<"all">>},
+ {foo, <<"bar">>}]}],
+ http_put(Config, "/exchanges/%2F/myexchange", XArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/arguments_test", QArgs, {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/myexchange/q/arguments_test", BArgs, {group, '2xx'}),
+ Definitions = http_get(Config, "/definitions", ?OK),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/arguments_test", {group, '2xx'}),
+ http_post(Config, "/definitions", Definitions, {group, '2xx'}),
+ #{'alternate-exchange' := <<"amq.direct">>} =
+ maps:get(arguments, http_get(Config, "/exchanges/%2F/myexchange", ?OK)),
+ #{'x-expires' := 1800000} =
+ maps:get(arguments, http_get(Config, "/queues/%2F/arguments_test", ?OK)),
+
+ ArgsTable = [{<<"foo">>,longstr,<<"bar">>}, {<<"x-match">>, longstr, <<"all">>}],
+ Hash = table_hash(ArgsTable),
+ PropertiesKey = [$~] ++ Hash,
+
+ assert_item(
+ #{'x-match' => <<"all">>, foo => <<"bar">>},
+ maps:get(arguments,
+ http_get(Config, "/bindings/%2F/e/myexchange/q/arguments_test/" ++
+ PropertiesKey, ?OK))
+ ),
+ http_delete(Config, "/exchanges/%2F/myexchange", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/arguments_test", {group, '2xx'}),
+ passed.
+
+table_hash(Table) ->
+ binary_to_list(rabbit_mgmt_format:args_hash(Table)).
+
+queue_actions_test(Config) ->
+ http_put(Config, "/queues/%2F/q", #{}, {group, '2xx'}),
+ http_post(Config, "/queues/%2F/q/actions", [{action, sync}], {group, '2xx'}),
+ http_post(Config, "/queues/%2F/q/actions", [{action, cancel_sync}], {group, '2xx'}),
+ http_post(Config, "/queues/%2F/q/actions", [{action, change_colour}], ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/q", {group, '2xx'}),
+ passed.
+
+double_encoded_json_test(Config) ->
+ Payload = rabbit_json:encode(rabbit_json:encode(#{<<"durable">> => true, <<"auto_delete">> => false})),
+ %% double-encoded JSON response is a 4xx, e.g. a Bad Request, and not a 500
+ http_put_raw(Config, "/queues/%2F/q", Payload, {group, '4xx'}),
+ passed.
+
+exclusive_queue_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+ #'queue.declare_ok'{queue = QName} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ timer:sleep(2000),
+ Path = "/queues/%2F/" ++ rabbit_http_util:quote_plus(QName),
+ Queue = http_get(Config, Path),
+ assert_item(#{name => QName,
+ vhost => <<"/">>,
+ durable => false,
+ auto_delete => false,
+ exclusive => true,
+ arguments => #{}}, Queue),
+ amqp_channel:close(Ch),
+ close_connection(Conn),
+ passed.
+
+connections_channels_pagination_test(Config) ->
+ %% this test uses "unmanaged" (by Common Test helpers) connections to avoid
+ %% connection caching
+ Conn = open_unmanaged_connection(Config),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ Conn1 = open_unmanaged_connection(Config),
+ {ok, Ch1} = amqp_connection:open_channel(Conn1),
+ Conn2 = open_unmanaged_connection(Config),
+ {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ PageOfTwo = http_get(Config, "/connections?page=1&page_size=2", ?OK),
+ 3 == maps:get(total_count, PageOfTwo) andalso
+ 3 == maps:get(filtered_count, PageOfTwo) andalso
+ 2 == maps:get(item_count, PageOfTwo) andalso
+ 1 == maps:get(page, PageOfTwo) andalso
+ 2 == maps:get(page_size, PageOfTwo) andalso
+ 2 == maps:get(page_count, PageOfTwo) andalso
+ lists:all(fun(C) ->
+ not maps:is_key(recv_oct_details, C)
+ end, maps:get(items, PageOfTwo))
+ end),
+
+ http_get(Config, "/channels?page=2&page_size=2", ?BAD_REQUEST),
+
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+ amqp_channel:close(Ch1),
+ amqp_connection:close(Conn1),
+ amqp_channel:close(Ch2),
+ amqp_connection:close(Conn2),
+
+ passed.
+
+exchanges_pagination_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, {group, '2xx'}),
+ http_get(Config, "/exchanges/vh1?page=1&page_size=2", ?OK),
+ http_put(Config, "/exchanges/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/exchanges/vh1/test1", QArgs, {group, '2xx'}),
+ http_put(Config, "/exchanges/%2F/test2_reg", QArgs, {group, '2xx'}),
+ http_put(Config, "/exchanges/vh1/reg_test3", QArgs, {group, '2xx'}),
+
+ %% for stats to update
+ timer:sleep(1500),
+
+ Total = length(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_exchange, list_names, [])),
+
+ PageOfTwo = http_get(Config, "/exchanges?page=1&page_size=2", ?OK),
+ ?assertEqual(Total, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(round(Total / 2), maps:get(page_count, PageOfTwo)),
+ Items1 = maps:get(items, PageOfTwo),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, Items1)),
+ assert_list([#{name => <<"">>, vhost => <<"/">>},
+ #{name => <<"amq.direct">>, vhost => <<"/">>}
+ ], Items1),
+
+ ByName = http_get(Config, "/exchanges?page=1&page_size=2&name=reg", ?OK),
+ ?assertEqual(Total, maps:get(total_count, ByName)),
+ ?assertEqual(2, maps:get(filtered_count, ByName)),
+ ?assertEqual(2, maps:get(item_count, ByName)),
+ ?assertEqual(1, maps:get(page, ByName)),
+ ?assertEqual(2, maps:get(page_size, ByName)),
+ ?assertEqual(1, maps:get(page_count, ByName)),
+ Items2 = maps:get(items, ByName),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, Items2)),
+ assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], Items2),
+
+ RegExByName = http_get(Config,
+ "/exchanges?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true",
+ ?OK),
+ ?assertEqual(Total, maps:get(total_count, RegExByName)),
+ ?assertEqual(1, maps:get(filtered_count, RegExByName)),
+ ?assertEqual(1, maps:get(item_count, RegExByName)),
+ ?assertEqual(1, maps:get(page, RegExByName)),
+ ?assertEqual(2, maps:get(page_size, RegExByName)),
+ ?assertEqual(1, maps:get(page_count, RegExByName)),
+ Items3 = maps:get(items, RegExByName),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, Items3)),
+ assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], Items3),
+
+
+ http_get(Config, "/exchanges?page=1000", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=-1", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=1&page_size=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/exchanges?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
+ http_get(Config, "/exchanges?page=-1&page_size=-2", ?BAD_REQUEST),
+ http_delete(Config, "/exchanges/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/exchanges/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/exchanges/%2F/test2_reg", {group, '2xx'}),
+ http_delete(Config, "/exchanges/vh1/reg_test3", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+exchanges_pagination_permissions_test(Config) ->
+ http_put(Config, "/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ http_put(Config, "/users/non-admin", [{password, <<"non-admin">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/non-admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/admin", Perms, {group, '2xx'}),
+ QArgs = #{},
+ http_put(Config, "/exchanges/%2F/test0", QArgs, "admin", "admin", {group, '2xx'}),
+ http_put(Config, "/exchanges/vh1/test1", QArgs, "non-admin", "non-admin", {group, '2xx'}),
+
+ %% for stats to update
+ timer:sleep(1500),
+
+ FirstPage = http_get(Config, "/exchanges?page=1&name=test1", "non-admin", "non-admin", ?OK),
+
+ ?assertEqual(8, maps:get(total_count, FirstPage)),
+ ?assertEqual(1, maps:get(item_count, FirstPage)),
+ ?assertEqual(1, maps:get(page, FirstPage)),
+ ?assertEqual(100, maps:get(page_size, FirstPage)),
+ ?assertEqual(1, maps:get(page_count, FirstPage)),
+ Items = maps:get(items, FirstPage),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items)),
+ assert_list([#{name => <<"test1">>, vhost => <<"vh1">>}
+ ], Items),
+ http_delete(Config, "/exchanges/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/exchanges/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/users/admin", {group, '2xx'}),
+ http_delete(Config, "/users/non-admin", {group, '2xx'}),
+ passed.
+
+
+
+queue_pagination_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, {group, '2xx'}),
+
+ http_get(Config, "/queues/vh1?page=1&page_size=2", ?OK),
+
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/test1", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test2_reg", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/reg_test3", QArgs, {group, '2xx'}),
+
+ %% for stats to update
+ timer:sleep(1500),
+
+ Total = length(rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, list_names, [])),
+
+ PageOfTwo = http_get(Config, "/queues?page=1&page_size=2", ?OK),
+ ?assertEqual(Total, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(Total, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_count, PageOfTwo)),
+ Items1 = maps:get(items, PageOfTwo),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items1)),
+ assert_list([#{name => <<"test0">>, vhost => <<"/">>},
+ #{name => <<"test2_reg">>, vhost => <<"/">>}
+ ], Items1),
+
+ SortedByName = http_get(Config, "/queues?sort=name&page=1&page_size=2", ?OK),
+ ?assertEqual(Total, maps:get(total_count, SortedByName)),
+ ?assertEqual(Total, maps:get(filtered_count, SortedByName)),
+ ?assertEqual(2, maps:get(item_count, SortedByName)),
+ ?assertEqual(1, maps:get(page, SortedByName)),
+ ?assertEqual(2, maps:get(page_size, SortedByName)),
+ ?assertEqual(2, maps:get(page_count, SortedByName)),
+ Items2 = maps:get(items, SortedByName),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items2)),
+ assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>},
+ #{name => <<"test0">>, vhost => <<"/">>}
+ ], Items2),
+
+ FirstPage = http_get(Config, "/queues?page=1", ?OK),
+ ?assertEqual(Total, maps:get(total_count, FirstPage)),
+ ?assertEqual(Total, maps:get(filtered_count, FirstPage)),
+ ?assertEqual(4, maps:get(item_count, FirstPage)),
+ ?assertEqual(1, maps:get(page, FirstPage)),
+ ?assertEqual(100, maps:get(page_size, FirstPage)),
+ ?assertEqual(1, maps:get(page_count, FirstPage)),
+ Items3 = maps:get(items, FirstPage),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items3)),
+ assert_list([#{name => <<"test0">>, vhost => <<"/">>},
+ #{name => <<"test1">>, vhost => <<"vh1">>},
+ #{name => <<"test2_reg">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost =><<"vh1">>}
+ ], Items3),
+
+ ReverseSortedByName = http_get(Config,
+ "/queues?page=2&page_size=2&sort=name&sort_reverse=true",
+ ?OK),
+ ?assertEqual(Total, maps:get(total_count, ReverseSortedByName)),
+ ?assertEqual(Total, maps:get(filtered_count, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(item_count, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(page, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(page_size, ReverseSortedByName)),
+ ?assertEqual(2, maps:get(page_count, ReverseSortedByName)),
+ Items4 = maps:get(items, ReverseSortedByName),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items4)),
+ assert_list([#{name => <<"test0">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], Items4),
+
+ ByName = http_get(Config, "/queues?page=1&page_size=2&name=reg", ?OK),
+ ?assertEqual(Total, maps:get(total_count, ByName)),
+ ?assertEqual(2, maps:get(filtered_count, ByName)),
+ ?assertEqual(2, maps:get(item_count, ByName)),
+ ?assertEqual(1, maps:get(page, ByName)),
+ ?assertEqual(2, maps:get(page_size, ByName)),
+ ?assertEqual(1, maps:get(page_count, ByName)),
+ Items5 = maps:get(items, ByName),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items5)),
+ assert_list([#{name => <<"test2_reg">>, vhost => <<"/">>},
+ #{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], Items5),
+
+ RegExByName = http_get(Config,
+ "/queues?page=1&page_size=2&name=%5E(?=%5Ereg)&use_regex=true",
+ ?OK),
+ ?assertEqual(Total, maps:get(total_count, RegExByName)),
+ ?assertEqual(1, maps:get(filtered_count, RegExByName)),
+ ?assertEqual(1, maps:get(item_count, RegExByName)),
+ ?assertEqual(1, maps:get(page, RegExByName)),
+ ?assertEqual(2, maps:get(page_size, RegExByName)),
+ ?assertEqual(1, maps:get(page_count, RegExByName)),
+ Items6 = maps:get(items, RegExByName),
+ ?assert(lists:all(fun(C) ->
+ not maps:is_key(message_stats, C)
+ end, Items6)),
+ assert_list([#{name => <<"reg_test3">>, vhost => <<"vh1">>}
+ ], Items6),
+
+ http_get(Config, "/queues?page=1000", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=-1", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=1&page_size=not_an_integer_value", ?BAD_REQUEST),
+ http_get(Config, "/queues?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
+ http_get(Config, "/queues?page=-1&page_size=-2", ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/test2_reg", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/reg_test3", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+queue_pagination_columns_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+
+ http_get(Config, "/queues/vh1?columns=name&page=1&page_size=2", ?OK),
+ http_put(Config, "/queues/%2F/queue_a", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/queue_b", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/queue_c", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/queue_d", QArgs, {group, '2xx'}),
+ PageOfTwo = http_get(Config, "/queues?columns=name&page=1&page_size=2", ?OK),
+ ?assertEqual(4, maps:get(total_count, PageOfTwo)),
+ ?assertEqual(4, maps:get(filtered_count, PageOfTwo)),
+ ?assertEqual(2, maps:get(item_count, PageOfTwo)),
+ ?assertEqual(1, maps:get(page, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_size, PageOfTwo)),
+ ?assertEqual(2, maps:get(page_count, PageOfTwo)),
+ assert_list([#{name => <<"queue_a">>},
+ #{name => <<"queue_c">>}
+ ], maps:get(items, PageOfTwo)),
+
+ ColumnNameVhost = http_get(Config, "/queues/vh1?columns=name&page=1&page_size=2", ?OK),
+ ?assertEqual(2, maps:get(total_count, ColumnNameVhost)),
+ ?assertEqual(2, maps:get(filtered_count, ColumnNameVhost)),
+ ?assertEqual(2, maps:get(item_count, ColumnNameVhost)),
+ ?assertEqual(1, maps:get(page, ColumnNameVhost)),
+ ?assertEqual(2, maps:get(page_size, ColumnNameVhost)),
+ ?assertEqual(1, maps:get(page_count, ColumnNameVhost)),
+ assert_list([#{name => <<"queue_b">>},
+ #{name => <<"queue_d">>}
+ ], maps:get(items, ColumnNameVhost)),
+
+ ColumnsNameVhost = http_get(Config, "/queues?columns=name,vhost&page=2&page_size=2", ?OK),
+ ?assertEqual(4, maps:get(total_count, ColumnsNameVhost)),
+ ?assertEqual(4, maps:get(filtered_count, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(item_count, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(page, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(page_size, ColumnsNameVhost)),
+ ?assertEqual(2, maps:get(page_count, ColumnsNameVhost)),
+ assert_list([
+ #{name => <<"queue_b">>,
+ vhost => <<"vh1">>},
+ #{name => <<"queue_d">>,
+ vhost => <<"vh1">>}
+ ], maps:get(items, ColumnsNameVhost)),
+
+
+ http_delete(Config, "/queues/%2F/queue_a", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/queue_b", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/queue_c", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/queue_d", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+queues_pagination_permissions_test(Config) ->
+ http_delete(Config, "/vhosts/vh1", [?NO_CONTENT, ?NOT_FOUND]),
+ http_delete(Config, "/queues/%2F/test0", [?NO_CONTENT, ?NOT_FOUND]),
+ http_delete(Config, "/users/admin", [?NO_CONTENT, ?NOT_FOUND]),
+ http_delete(Config, "/users/non-admin", [?NO_CONTENT, ?NOT_FOUND]),
+
+ http_put(Config, "/users/non-admin", [{password, <<"non-admin">>},
+ {tags, <<"management">>}], {group, '2xx'}),
+ http_put(Config, "/users/admin", [{password, <<"admin">>},
+ {tags, <<"administrator">>}], {group, '2xx'}),
+ Perms = [{configure, <<".*">>},
+ {write, <<".*">>},
+ {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/non-admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/%2F/admin", Perms, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/admin", Perms, {group, '2xx'}),
+ QArgs = #{},
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/test1", QArgs, "non-admin","non-admin", {group, '2xx'}),
+ FirstPage = http_get(Config, "/queues?page=1", "non-admin", "non-admin", ?OK),
+ ?assertEqual(1, maps:get(total_count, FirstPage)),
+ ?assertEqual(1, maps:get(item_count, FirstPage)),
+ ?assertEqual(1, maps:get(page, FirstPage)),
+ ?assertEqual(100, maps:get(page_size, FirstPage)),
+ ?assertEqual(1, maps:get(page_count, FirstPage)),
+ assert_list([#{name => <<"test1">>, vhost => <<"vh1">>}
+ ], maps:get(items, FirstPage)),
+
+ FirstPageAdm = http_get(Config, "/queues?page=1", "admin", "admin", ?OK),
+ ?assertEqual(2, maps:get(total_count, FirstPageAdm)),
+ ?assertEqual(2, maps:get(item_count, FirstPageAdm)),
+ ?assertEqual(1, maps:get(page, FirstPageAdm)),
+ ?assertEqual(100, maps:get(page_size, FirstPageAdm)),
+ ?assertEqual(1, maps:get(page_count, FirstPageAdm)),
+ assert_list([#{name => <<"test1">>, vhost => <<"vh1">>},
+ #{name => <<"test0">>, vhost => <<"/">>}
+ ], maps:get(items, FirstPageAdm)),
+
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/test1","admin","admin", {group, '2xx'}),
+ http_delete(Config, "/users/admin", {group, '2xx'}),
+ http_delete(Config, "/users/non-admin", {group, '2xx'}),
+ passed.
+
+samples_range_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+
+ %% Connections
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ 1 == length(http_get(Config, "/connections?lengths_age=60&lengths_incr=1", ?OK))
+ end),
+ [Connection] = http_get(Config, "/connections?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(maps:is_key(name, Connection)),
+
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+
+ %% Exchanges
+ [Exchange1 | _] = http_get(Config, "/exchanges?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(message_stats, Exchange1)),
+ Exchange2 = http_get(Config, "/exchanges/%2F/amq.direct?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(message_stats, Exchange2)),
+
+ %% Nodes
+ [Node] = http_get(Config, "/nodes?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(channel_closed_details, Node)),
+ ?assert(not maps:is_key(channel_closed, Node)),
+ ?assert(not maps:is_key(disk_free, Node)),
+ ?assert(not maps:is_key(io_read_count, Node)),
+
+ %% Overview
+ Overview = http_get(Config, "/overview?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(maps:is_key(node, Overview)),
+ ?assert(maps:is_key(object_totals, Overview)),
+ ?assert(not maps:is_key(queue_totals, Overview)),
+ ?assert(not maps:is_key(churn_rates, Overview)),
+ ?assert(not maps:is_key(message_stats, Overview)),
+
+ %% Queues
+ http_put(Config, "/queues/%2F/test0", #{}, {group, '2xx'}),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ 1 == length(http_get(Config, "/queues/%2F?lengths_age=60&lengths_incr=1", ?OK))
+ end),
+
+ [Queue1] = http_get(Config, "/queues/%2F?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(message_stats, Queue1)),
+ ?assert(not maps:is_key(messages_details, Queue1)),
+ ?assert(not maps:is_key(reductions_details, Queue1)),
+
+ Queue2 = http_get(Config, "/queues/%2F/test0?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(message_stats, Queue2)),
+ ?assert(not maps:is_key(messages_details, Queue2)),
+ ?assert(not maps:is_key(reductions_details, Queue2)),
+
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+
+ %% Vhosts
+
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+
+ rabbit_ct_helpers:await_condition(
+ fun() ->
+ length(http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK)) > 1
+ end),
+
+ [VHost | _] = http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(message_stats, VHost)),
+ ?assert(not maps:is_key(messages_ready_details, VHost)),
+ ?assert(not maps:is_key(recv_oct, VHost)),
+ ?assert(maps:is_key(cluster_state, VHost)),
+
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+
+ passed.
+
+disable_with_disable_stats_parameter_test(Config) ->
+ {Conn, Ch} = open_connection_and_channel(Config),
+
+ %% Ensure we have some queue and exchange stats, needed later
+ http_put(Config, "/queues/%2F/test0", #{}, {group, '2xx'}),
+ timer:sleep(1500),
+ amqp_channel:call(Ch, #'basic.publish'{exchange = <<>>,
+ routing_key = <<"test0">>},
+ #amqp_msg{payload = <<"message">>}),
+
+ %% Channels.
+
+ timer:sleep(1500),
+ %% Check first that stats are available
+ http_get(Config, "/channels", ?OK),
+ %% Now we can disable them
+ http_get(Config, "/channels?disable_stats=true", ?BAD_REQUEST),
+
+
+ %% Connections.
+
+ %% Check first that stats are available
+ [ConnectionStats] = http_get(Config, "/connections", ?OK),
+ ?assert(maps:is_key(recv_oct_details, ConnectionStats)),
+ %% Now we can disable them
+ [Connection] = http_get(Config, "/connections?disable_stats=true", ?OK),
+ ?assert(maps:is_key(name, Connection)),
+ ?assert(not maps:is_key(recv_oct_details, Connection)),
+
+ amqp_channel:close(Ch),
+ amqp_connection:close(Conn),
+
+ %% Exchanges.
+
+ %% Check first that stats are available
+ %% Exchange stats aren't published - even as 0 - until some messages have gone
+ %% through. At the end of this test we ensure that at least the default exchange
+ %% has something to show.
+ ExchangeStats = http_get(Config, "/exchanges", ?OK),
+ ?assert(lists:any(fun(E) ->
+ maps:is_key(message_stats, E)
+ end, ExchangeStats)),
+ %% Now we can disable them
+ Exchanges = http_get(Config, "/exchanges?disable_stats=true", ?OK),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, Exchanges)),
+ Exchange = http_get(Config, "/exchanges/%2F/amq.direct?disable_stats=true&lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(message_stats, Exchange)),
+
+ %% Nodes.
+
+ %% Check that stats are available
+ [NodeStats] = http_get(Config, "/nodes", ?OK),
+ ?assert(maps:is_key(channel_closed_details, NodeStats)),
+ %% Now we can disable them
+ [Node] = http_get(Config, "/nodes?disable_stats=true", ?OK),
+ ?assert(not maps:is_key(channel_closed_details, Node)),
+ ?assert(not maps:is_key(channel_closed, Node)),
+ ?assert(not maps:is_key(disk_free, Node)),
+ ?assert(not maps:is_key(io_read_count, Node)),
+
+
+ %% Overview.
+
+ %% Check that stats are available
+ OverviewStats = http_get(Config, "/overview", ?OK),
+ ?assert(maps:is_key(message_stats, OverviewStats)),
+ %% Now we can disable them
+ Overview = http_get(Config, "/overview?disable_stats=true&lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(not maps:is_key(queue_totals, Overview)),
+ ?assert(not maps:is_key(churn_rates, Overview)),
+ ?assert(not maps:is_key(message_stats, Overview)),
+
+ %% Queues.
+
+ %% Check that stats are available
+ [QueueStats] = http_get(Config, "/queues/%2F?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(maps:is_key(message_stats, QueueStats)),
+ %% Now we can disable them
+ [Queue] = http_get(Config, "/queues/%2F?disable_stats=true", ?OK),
+ ?assert(not maps:is_key(message_stats, Queue)),
+ ?assert(not maps:is_key(messages_details, Queue)),
+ ?assert(not maps:is_key(reductions_details, Queue)),
+
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+
+ %% Vhosts.
+
+ %% Check that stats are available
+ VHostStats = http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(lists:all(fun(E) ->
+ maps:is_key(message_stats, E) and
+ maps:is_key(messages_ready_details, E)
+ end, VHostStats)),
+ %% Now we can disable them
+ VHosts = http_get(Config, "/vhosts?disable_stats=true&lengths_age=60&lengths_incr=1", ?OK),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E) and
+ not maps:is_key(messages_ready_details, E)
+ end, VHosts)),
+
+ passed.
+
+sorting_test(Config) ->
+ QArgs = #{},
+ PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+ http_put(Config, "/vhosts/vh1", none, {group, '2xx'}),
+ http_put(Config, "/permissions/vh1/guest", PermArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test0", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/test1", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test2", QArgs, {group, '2xx'}),
+ http_put(Config, "/queues/vh1/test3", QArgs, {group, '2xx'}),
+ List1 = http_get(Config, "/queues", ?OK),
+ assert_list([#{name => <<"test0">>},
+ #{name => <<"test2">>},
+ #{name => <<"test1">>},
+ #{name => <<"test3">>}], List1),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, List1)),
+ List2 = http_get(Config, "/queues?sort=name", ?OK),
+ assert_list([#{name => <<"test0">>},
+ #{name => <<"test1">>},
+ #{name => <<"test2">>},
+ #{name => <<"test3">>}], List2),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, List2)),
+ List3 = http_get(Config, "/queues?sort=vhost", ?OK),
+ assert_list([#{name => <<"test0">>},
+ #{name => <<"test2">>},
+ #{name => <<"test1">>},
+ #{name => <<"test3">>}], List3),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, List3)),
+ List4 = http_get(Config, "/queues?sort_reverse=true", ?OK),
+ assert_list([#{name => <<"test3">>},
+ #{name => <<"test1">>},
+ #{name => <<"test2">>},
+ #{name => <<"test0">>}], List4),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, List4)),
+ List5 = http_get(Config, "/queues?sort=name&sort_reverse=true", ?OK),
+ assert_list([#{name => <<"test3">>},
+ #{name => <<"test2">>},
+ #{name => <<"test1">>},
+ #{name => <<"test0">>}], List5),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, List5)),
+ List6 = http_get(Config, "/queues?sort=vhost&sort_reverse=true", ?OK),
+ assert_list([#{name => <<"test3">>},
+ #{name => <<"test1">>},
+ #{name => <<"test2">>},
+ #{name => <<"test0">>}], List6),
+ ?assert(lists:all(fun(E) ->
+ not maps:is_key(message_stats, E)
+ end, List6)),
+ %% Rather poor but at least test it doesn't blow up with dots
+ http_get(Config, "/queues?sort=owner_pid_details.name", ?OK),
+ http_delete(Config, "/queues/%2F/test0", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/test1", {group, '2xx'}),
+ http_delete(Config, "/queues/%2F/test2", {group, '2xx'}),
+ http_delete(Config, "/queues/vh1/test3", {group, '2xx'}),
+ http_delete(Config, "/vhosts/vh1", {group, '2xx'}),
+ passed.
+
+columns_test(Config) ->
+ Path = "/queues/%2F/columns.test",
+ TTL = 30000,
+ http_delete(Config, Path, [{group, '2xx'}, 404]),
+ http_put(Config, Path, [{arguments, [{<<"x-message-ttl">>, TTL}]}],
+ {group, '2xx'}),
+ Item = #{arguments => #{'x-message-ttl' => TTL}, name => <<"columns.test">>},
+ timer:sleep(2000),
+ [Item] = http_get(Config, "/queues?columns=arguments.x-message-ttl,name", ?OK),
+ Item = http_get(Config, "/queues/%2F/columns.test?columns=arguments.x-message-ttl,name", ?OK),
+ ?assert(not maps:is_key(message_stats, Item)),
+ ?assert(not maps:is_key(messages_details, Item)),
+ ?assert(not maps:is_key(reductions_details, Item)),
+ http_delete(Config, Path, {group, '2xx'}),
+ passed.
+
+if_empty_unused_test(Config) ->
+ http_put(Config, "/exchanges/%2F/test", #{}, {group, '2xx'}),
+ http_put(Config, "/queues/%2F/test", #{}, {group, '2xx'}),
+ http_post(Config, "/bindings/%2F/e/test/q/test", #{}, {group, '2xx'}),
+ http_post(Config, "/exchanges/%2F/amq.default/publish",
+ msg(<<"test">>, #{}, <<"Hello world">>), ?OK),
+ http_delete(Config, "/queues/%2F/test?if-empty=true", ?BAD_REQUEST),
+ http_delete(Config, "/exchanges/%2F/test?if-unused=true", ?BAD_REQUEST),
+ http_delete(Config, "/queues/%2F/test/contents", {group, '2xx'}),
+
+ {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = <<"test">> }, self()),
+ http_delete(Config, "/queues/%2F/test?if-unused=true", ?BAD_REQUEST),
+ amqp_connection:close(Conn),
+
+ http_delete(Config, "/queues/%2F/test?if-empty=true", {group, '2xx'}),
+ http_delete(Config, "/exchanges/%2F/test?if-unused=true", {group, '2xx'}),
+ passed.
+
+invalid_config_test(Config) ->
+ {Conn, _Ch} = open_connection_and_channel(Config),
+
+ timer:sleep(1500),
+
+ %% Check first that stats aren't available (configured on test setup)
+ http_get(Config, "/channels", ?BAD_REQUEST),
+ http_get(Config, "/connections", ?BAD_REQUEST),
+ http_get(Config, "/exchanges", ?BAD_REQUEST),
+
+ %% Now we can set an invalid config, stats are still available (defaults to 'false')
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, disable_management_stats, 50]),
+ http_get(Config, "/channels", ?OK),
+ http_get(Config, "/connections", ?OK),
+ http_get(Config, "/exchanges", ?OK),
+
+ %% Set a valid config again
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management, disable_management_stats, true]),
+ http_get(Config, "/channels", ?BAD_REQUEST),
+ http_get(Config, "/connections", ?BAD_REQUEST),
+ http_get(Config, "/exchanges", ?BAD_REQUEST),
+
+ %% Now we can set an invalid config in the agent, stats are still available
+ %% (defaults to 'false')
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_management_agent, disable_metrics_collector, "koala"]),
+ http_get(Config, "/channels", ?OK),
+ http_get(Config, "/connections", ?OK),
+ http_get(Config, "/exchanges", ?OK),
+
+ amqp_connection:close(Conn),
+
+ passed.
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+msg(Key, Headers, Body) ->
+ msg(Key, Headers, Body, <<"string">>).
+
+msg(Key, Headers, Body, Enc) ->
+ #{exchange => <<"">>,
+ routing_key => Key,
+ properties => #{delivery_mode => 2,
+ headers => Headers},
+ payload => Body,
+ payload_encoding => Enc}.
+
+local_port(Conn) ->
+ [{sock, Sock}] = amqp_connection:info(Conn, [sock]),
+ {ok, Port} = inet:port(Sock),
+ Port.
+
+spawn_invalid(_Config, 0) ->
+ ok;
+spawn_invalid(Config, N) ->
+ Self = self(),
+ spawn(fun() ->
+ timer:sleep(rand:uniform(250)),
+ {ok, Sock} = gen_tcp:connect("localhost", amqp_port(Config), [list]),
+ ok = gen_tcp:send(Sock, "Some Data"),
+ receive_msg(Self)
+ end),
+ spawn_invalid(Config, N-1).
+
+receive_msg(Self) ->
+ receive
+ {tcp, _, [$A, $M, $Q, $P | _]} ->
+ Self ! done
+ after
+ 60000 ->
+ Self ! no_reply
+ end.
+
+wait_for_answers(0) ->
+ ok;
+wait_for_answers(N) ->
+ receive
+ done ->
+ wait_for_answers(N-1);
+ no_reply ->
+ throw(no_reply)
+ end.
+
+publish(Ch) ->
+ amqp_channel:call(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = <<"myqueue">>},
+ #amqp_msg{payload = <<"message">>}),
+ receive
+ stop_publish ->
+ ok
+ after 50 ->
+ publish(Ch)
+ end.
+
+wait_until(_Fun, 0) ->
+ ?assert(wait_failed);
+wait_until(Fun, N) ->
+ case Fun() of
+ true ->
+ timer:sleep(1500);
+ false ->
+ timer:sleep(?COLLECT_INTERVAL + 100),
+ wait_until(Fun, N - 1)
+ end.
+
+http_post_json(Config, Path, Body, Assertion) ->
+ http_upload_raw(Config, post, Path, Body, "guest", "guest",
+ Assertion, [{"Content-Type", "application/json"}]).
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl
new file mode 100644
index 0000000000..7a192f225a
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl
@@ -0,0 +1,512 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_rabbitmqadmin_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [ {group, list_to_atom(Py)} || Py <- find_pythons() ].
+
+groups() ->
+ Tests = [
+ help,
+ host,
+ base_uri,
+ config_file,
+ user,
+ fmt_long,
+ fmt_kvp,
+ fmt_tsv,
+ fmt_table,
+ fmt_bash,
+ vhosts,
+ users,
+ permissions,
+ alt_vhost,
+ exchanges,
+ queues,
+ queues_unicode,
+ bindings,
+ policies,
+ operator_policies,
+ parameters,
+ publish,
+ ignore_vhost,
+ sort
+ ],
+ [ {list_to_atom(Py), [], Tests} || Py <- find_pythons() ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun (C) ->
+ rabbit_ct_helpers:set_config(C,
+ {rabbitmqadmin_path,
+ rabbitmqadmin(C)})
+ end
+ ]).
+
+end_per_suite(Config) ->
+ ?assertNotEqual(os:getenv("HOME"), ?config(priv_dir, Config)),
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(python2, Config) ->
+ rabbit_ct_helpers:set_config(Config, {python, "python2"});
+init_per_group(python3, Config) ->
+ rabbit_ct_helpers:set_config(Config, {python, "python3"});
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(config_file, Config) ->
+ Home = os:getenv("HOME"),
+ os:putenv("HOME", ?config(priv_dir, Config)),
+ rabbit_ct_helpers:set_config(Config, {env_home, Home});
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(config_file, Config) ->
+ Home = rabbit_ct_helpers:get_config(Config, env_home),
+ os:putenv("HOME", Home);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+help(Config) ->
+ {ok, _} = run(Config, ["--help"]),
+ {ok, _} = run(Config, ["help", "subcommands"]),
+ {ok, _} = run(Config, ["help", "config"]),
+ {error, _, _} = run(Config, ["help", "astronomy"]).
+
+host(Config) ->
+ {ok, _} = run(Config, ["show", "overview"]),
+ {ok, _} = run(Config, ["--host", "localhost", "show", "overview"]),
+ {error, _, _} = run(Config, ["--host", "some-host-that-does-not-exist",
+ "show", "overview"]).
+
+base_uri(Config) ->
+ {ok, _} = run(Config, ["--base-uri", "http://localhost", "list", "exchanges"]),
+ {ok, _} = run(Config, ["--base-uri", "http://localhost/", "list", "exchanges"]),
+ {ok, _} = run(Config, ["--base-uri", "http://localhost", "--vhost", "/", "list", "exchanges"]),
+ {ok, _} = run(Config, ["--base-uri", "http://localhost/", "--vhost", "/", "list", "exchanges"]),
+ {error, _, _} = run(Config, ["--base-uri", "https://some-host-that-does-not-exist:15672/",
+ "list", "exchanges"]),
+ {error, _, _} = run(Config, ["--base-uri", "http://localhost:15672/", "--vhost", "some-vhost-that-does-not-exist",
+ "list", "exchanges"]).
+
+
+config_file(Config) ->
+ MgmtPort = integer_to_list(http_api_port(Config)),
+ {_DefConf, TestConf} = write_test_config(Config),
+
+ %% try using a non-existent config file
+ ?assertMatch({error, _, _}, run(Config, ["--config", "/tmp/no-such-config-file", "show", "overview"])),
+ %% use a config file section with a reachable endpoint and correct credentials
+ ?assertMatch({ok, _}, run(Config, ["--config", TestConf, "--node", "reachable", "show", "overview"])),
+
+ %% Default node in the config file uses an unreachable endpoint. Note that
+ %% the function that drives rabbitmqadmin will specify a --port and that will override
+ %% the config file value.
+ ?assertMatch({error, _, _}, run(Config, ["--config", TestConf, "show", "overview"])),
+
+ %% overrides hostname and port using --base-uri
+ BaseURI = rabbit_misc:format("http://localhost:~s", [MgmtPort]),
+ ?assertMatch({ok, _}, run(Config, ["--config", TestConf, "--base-uri", BaseURI, "show", "overview"])),
+
+ %% overrides --host and --port on the command line
+ ?assertMatch({ok, _}, run(Config, ["--config", TestConf, "--node", "default", "--host", "localhost", "--port", MgmtPort, "show", "overview"])),
+
+ ?assertMatch({ok, _}, run(Config, ["show", "overview"])),
+ ?assertMatch({error, _, _}, run(Config, ["--node", "bad_credentials", "show", "overview"])),
+ %% overrides --username and --password on the command line with correct credentials
+ ?assertMatch({ok, _}, run(Config, ["--node", "bad_credentials", "--username", "guest", "--password", "guest", "show", "overview"])),
+ %% overrides --username and --password on the command line with incorrect credentials
+ ?assertMatch({error, _, _}, run(Config, ["--node", "bad_credentials", "--username", "gu3st", "--password", "guesTTTT", "show", "overview"])).
+
+user(Config) ->
+ ?assertMatch({ok, _}, run(Config, ["--user", "guest", "--password", "guest", "show", "overview"])),
+ ?assertMatch({error, _, _}, run(Config, ["--user", "no", "--password", "guest", "show", "overview"])),
+ ?assertMatch({error, _, _}, run(Config, ["--user", "guest", "--password", "no", "show", "overview"])).
+
+fmt_long(Config) ->
+ Out = multi_line_string([
+ "",
+ "--------------------------------------------------------------------------------",
+ "",
+ " name: /",
+ "tracing: False",
+ "",
+ "--------------------------------------------------------------------------------",
+ "" ]),
+ ?assertEqual({ok, Out}, run(Config, ["--format", "long", "list", "vhosts", "name", "tracing"])).
+
+fmt_kvp(Config) ->
+ Out = multi_line_string(["name=\"/\" tracing=\"False\""]),
+ ?assertEqual({ok, Out}, run(Config, ["--format", "kvp", "list", "vhosts", "name", "tracing"])).
+
+fmt_tsv(Config) ->
+ Out = multi_line_string([
+ "name\ttracing",
+ "/\tFalse"
+ ]),
+ ?assertEqual({ok, Out}, run(Config, ["--format", "tsv", "list", "vhosts", "name", "tracing"])).
+
+fmt_table(Config) ->
+ Out = multi_line_string([
+ "+------+---------+",
+ "| name | tracing |",
+ "+------+---------+",
+ "| / | False |",
+ "+------+---------+"
+ ]),
+ ?assertEqual({ok, Out}, run(Config, ["list", "vhosts", "name", "tracing"])),
+ ?assertEqual({ok, Out}, run(Config, ["--format", "table", "list",
+ "vhosts", "name", "tracing"])).
+
+fmt_bash(Config) ->
+ {ok, "/\n"} = run(Config, ["--format", "bash", "list",
+ "vhosts", "name", "tracing"]).
+
+vhosts(Config) ->
+ {ok, ["/"]} = run_list(Config, l("vhosts")),
+ {ok, _} = run(Config, ["declare", "vhost", "name=foo"]),
+ {ok, ["/", "foo"]} = run_list(Config, l("vhosts")),
+ {ok, _} = run(Config, ["delete", "vhost", "name=foo"]),
+ {ok, ["/"]} = run_list(Config, l("vhosts")).
+
+users(Config) ->
+ {ok, ["guest"]} = run_list(Config, l("users")),
+ {error, _, _} = run(Config, ["declare", "user", "name=foo"]),
+ {ok, _} = run(Config, ["declare", "user", "name=foo", "password=pass", "tags="]),
+
+ {ok, _} = run(Config, ["declare", "user", "name=with_password_hash1", "password_hash=MmJiODBkNTM3YjFkYTNlMzhiZDMwMzYxYWE4NTU2ODZiZGUwZWFjZDcxNjJmZWY2YTI1ZmU5N2JmNTI3YTI1Yg==",
+ "tags=management"]),
+ {ok, _} = run(Config, ["declare", "user", "name=with_password_hash2",
+ "hashing_algorithm=rabbit_password_hashing_sha256", "password_hash=MmJiODBkNTM3YjFkYTNlMzhiZDMwMzYxYWE4NTU2ODZiZGUwZWFjZDcxNjJmZWY2YTI1ZmU5N2JmNTI3YTI1Yg==",
+ "tags=management"]),
+ {ok, _} = run(Config, ["declare", "user", "name=with_password_hash3",
+ "hashing_algorithm=rabbit_password_hashing_sha512", "password_hash=YmQyYjFhYWY3ZWY0ZjA5YmU5ZjUyY2UyZDhkNTk5Njc0ZDgxYWE5ZDZhNDQyMTY5NmRjNGQ5M2RkMDYxOWQ2ODJjZTU2YjRkNjRhOWVmMDk3NzYxY2VkOTllMGY2NzI2NWI1Zjc2MDg1ZTViMGVlN2NhNDY5NmIyYWQ2ZmUyYjI=",
+ "tags=management"]),
+
+ {error, _, _} = run(Config, ["declare", "user", "name=with_password_hash4",
+ "hashing_algorithm=rabbit_password_hashing_sha256", "password_hash=not-base64-encoded",
+ "tags=management"]),
+
+
+ {ok, ["foo", "guest",
+ "with_password_hash1",
+ "with_password_hash2",
+ "with_password_hash3"]} = run_list(Config, l("users")),
+
+ {ok, _} = run(Config, ["delete", "user", "name=foo"]),
+ {ok, _} = run(Config, ["delete", "user", "name=with_password_hash1"]),
+ {ok, _} = run(Config, ["delete", "user", "name=with_password_hash2"]),
+ {ok, _} = run(Config, ["delete", "user", "name=with_password_hash3"]),
+
+ {ok, ["guest"]} = run_list(Config, l("users")).
+
+permissions(Config) ->
+ {ok, _} = run(Config, ["declare", "vhost", "name=foo"]),
+ {ok, _} = run(Config, ["declare", "user", "name=bar", "password=pass", "tags="]),
+ %% The user that creates the vhosts gets permission automatically
+ %% See https://github.com/rabbitmq/rabbitmq-management/issues/444
+ {ok, [["guest", "/"],
+ ["guest", "foo"]]} = run_table(Config, ["list", "permissions",
+ "user", "vhost"]),
+ {ok, _} = run(Config, ["declare", "permission", "user=bar", "vhost=foo",
+ "configure=.*", "write=.*", "read=.*"]),
+ {ok, [["guest", "/"], ["bar", "foo"], ["guest", "foo"]]}
+ = run_table(Config, ["list", "permissions", "user", "vhost"]),
+ {ok, _} = run(Config, ["delete", "user", "name=bar"]),
+ {ok, _} = run(Config, ["delete", "vhost", "name=foo"]).
+
+alt_vhost(Config) ->
+ {ok, _} = run(Config, ["declare", "vhost", "name=foo"]),
+ {ok, _} = run(Config, ["declare", "permission", "user=guest", "vhost=foo",
+ "configure=.*", "write=.*", "read=.*"]),
+ {ok, _} = run(Config, ["declare", "queue", "name=in_/"]),
+ {ok, _} = run(Config, ["--vhost", "foo", "declare", "queue", "name=in_foo"]),
+ {ok, [["/", "in_/"], ["foo", "in_foo"]]} = run_table(Config, ["list", "queues",
+ "vhost", "name"]),
+ {ok, _} = run(Config, ["--vhost", "foo", "delete", "queue", "name=in_foo"]),
+ {ok, _} = run(Config, ["delete", "queue", "name=in_/"]),
+ {ok, _} = run(Config, ["delete", "vhost", "name=foo"]).
+
+exchanges(Config) ->
+ {ok, _} = run(Config, ["declare", "exchange", "name=foo", "type=direct"]),
+ {ok, ["amq.direct",
+ "amq.fanout",
+ "amq.headers",
+ "amq.match",
+ "amq.rabbitmq.trace",
+ "amq.topic",
+ "foo"]} = run_list(Config, l("exchanges")),
+ {ok, _} = run(Config, ["delete", "exchange", "name=foo"]).
+
+queues(Config) ->
+ {ok, _} = run(Config, ["declare", "queue", "name=foo"]),
+ {ok, ["foo"]} = run_list(Config, l("queues")),
+ {ok, _} = run(Config, ["delete", "queue", "name=foo"]).
+
+queues_unicode(Config) ->
+ {ok, _} = run(Config, ["declare", "queue", "name=ööö"]),
+ %% 'ö' is encoded as 0xC3 0xB6 in UTF-8. We use a a list of
+ %% integers here because a binary literal would not work with Erlang
+ %% R16B03.
+ QUEUE_NAME = [195,182, 195,182, 195,182],
+ {ok, [QUEUE_NAME]} = run_list(Config, l("queues")),
+ {ok, _} = run(Config, ["delete", "queue", "name=ööö"]).
+
+bindings(Config) ->
+ {ok, _} = run(Config, ["declare", "queue", "name=foo"]),
+ {ok, _} = run(Config, ["declare", "binding", "source=amq.direct",
+ "destination=foo", "destination_type=queue",
+ "routing_key=test"]),
+ {ok, [["foo", "queue", "foo"],
+ ["amq.direct", "foo", "queue", "test"]
+ ]} = run_table(Config,
+ ["list", "bindings",
+ "source", "destination",
+ "destination_type", "routing_key"]),
+ {ok, _} = run(Config, ["delete", "queue", "name=foo"]).
+
+policies(Config) ->
+ {ok, _} = run(Config, ["declare", "policy", "name=ha",
+ "pattern=.*", "definition={\"ha-mode\":\"all\"}"]),
+ {ok, [["ha", "/", ".*", "{\"ha-mode\": \"all\"}"]]} =
+ run_table(Config, ["list", "policies", "name",
+ "vhost", "pattern", "definition"]),
+ {ok, _} = run(Config, ["delete", "policy", "name=ha"]).
+
+operator_policies(Config) ->
+ {ok, _} = run(Config, ["declare", "operator_policy", "name=len",
+ "pattern=.*", "definition={\"max-length\":100}"]),
+ {ok, [["len", "/", ".*", "{\"max-length\": 100}"]]} =
+ run_table(Config, ["list", "operator_policies", "name",
+ "vhost", "pattern", "definition"]),
+ {ok, _} = run(Config, ["delete", "operator_policy", "name=len"]).
+
+parameters(Config) ->
+ ok = rpc(Config, rabbit_mgmt_runtime_parameters_util, register, []),
+ {ok, _} = run(Config, ["declare", "parameter", "component=test",
+ "name=good", "value=123"]),
+ {ok, [["test", "good", "/", "123"]]} = run_table(Config, ["list",
+ "parameters",
+ "component",
+ "name",
+ "vhost",
+ "value"]),
+ {ok, _} = run(Config, ["delete", "parameter", "component=test", "name=good"]),
+ ok = rpc(Config, rabbit_mgmt_runtime_parameters_util, unregister, []).
+
+publish(Config) ->
+ {ok, _} = run(Config, ["declare", "queue", "name=test"]),
+ {ok, _} = run(Config, ["publish", "routing_key=test", "payload=test_1"]),
+ {ok, _} = run(Config, ["publish", "routing_key=test", "payload=test_2"]),
+ %% publish with stdin
+ %% TODO: this must support Python 3 as well
+ Py = find_python2(),
+ {ok, _} = rabbit_ct_helpers:exec([Py, "-c",
+ publish_with_stdin_python_program(Config, "test_3")],
+ []),
+
+ M = exp_msg("test", 2, "False", "test_1"),
+ {ok, [M]} = run_table(Config, ["get", "queue=test", "ackmode=ack_requeue_false"]),
+ M2 = exp_msg("test", 1, "False", "test_2"),
+ {ok, [M2]} = run_table(Config, ["get", "queue=test", "ackmode=ack_requeue_true"]),
+ M3 = exp_msg("test", 1, "True", "test_2"),
+ {ok, [M3]} = run_table(Config, ["get",
+ "queue=test",
+ "ackmode=ack_requeue_false"]),
+ M4 = exp_msg("test", 0, "False", "test_3"),
+ {ok, [M4]} = run_table(Config, ["get",
+ "queue=test",
+ "ackmode=ack_requeue_false"]),
+ {ok, _} = run(Config, ["publish", "routing_key=test", "payload=test_4"]),
+ Fn = filename:join(?config(priv_dir, Config), "publish_test_4"),
+
+ {ok, _} = run(Config, ["get", "queue=test", "ackmode=ack_requeue_false", "payload_file=" ++ Fn]),
+ {ok, <<"test_4">>} = file:read_file(Fn),
+ {ok, _} = run(Config, ["delete", "queue", "name=test"]).
+
+ignore_vhost(Config) ->
+ {ok, _} = run(Config, ["--vhost", "/", "show", "overview"]),
+ {ok, _} = run(Config, ["--vhost", "/", "list", "users"]),
+ {ok, _} = run(Config, ["--vhost", "/", "list", "vhosts"]),
+ {ok, _} = run(Config, ["--vhost", "/", "list", "nodes"]),
+ {ok, _} = run(Config, ["--vhost", "/", "list", "permissions"]),
+ {ok, _} = run(Config, ["--vhost", "/", "declare", "user",
+ "name=foo", "password=pass", "tags="]),
+ {ok, _} = run(Config, ["delete", "user", "name=foo"]).
+
+sort(Config) ->
+ {ok, _} = run(Config, ["declare", "queue", "name=foo"]),
+ {ok, _} = run(Config, ["declare", "binding", "source=amq.direct",
+ "destination=foo", "destination_type=queue",
+ "routing_key=bbb"]),
+ {ok, _} = run(Config, ["declare", "binding", "source=amq.topic",
+ "destination=foo", "destination_type=queue",
+ "routing_key=aaa"]),
+ {ok, [["foo"],
+ ["amq.direct", "bbb"],
+ ["amq.topic", "aaa"]]} = run_table(Config, ["--sort", "source",
+ "list", "bindings",
+ "source", "routing_key"]),
+ {ok, [["amq.topic", "aaa"],
+ ["amq.direct", "bbb"],
+ ["foo"]]} = run_table(Config, ["--sort", "routing_key",
+ "list", "bindings", "source",
+ "routing_key"]),
+ {ok, [["amq.topic", "aaa"],
+ ["amq.direct", "bbb"],
+ ["foo"]]} = run_table(Config, ["--sort", "source",
+ "--sort-reverse", "list",
+ "bindings", "source",
+ "routing_key"]),
+ {ok, _} = run(Config, ["delete", "queue", "name=foo"]).
+
+%% -------------------------------------------------------------------
+%% Utilities
+%% -------------------------------------------------------------------
+
+exp_msg(Key, Count, Redelivered, Payload) ->
+ % routing_key, message_count,
+ % payload, payload_bytes,
+ % payload_encoding, redelivered
+ [Key, integer_to_list(Count),
+ Payload, integer_to_list(length(Payload)),
+ "string", Redelivered].
+
+rpc(Config, M, F, A) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
+
+l(Thing) ->
+ ["list", Thing, "name"].
+
+multi_line_string(Lines) ->
+ lists:flatten([string:join(Lines, io_lib:nl()), io_lib:nl()]).
+
+run_table(Config, Args) ->
+ {ok, Lines} = run_list(Config, Args),
+ Tokens = [string:tokens(L, "\t") || L <- Lines],
+ {ok, Tokens}.
+
+run_list(Config, Args) ->
+ A = ["-f", "tsv", "-q"],
+ case run(Config, A ++ Args) of
+ {ok, Out} -> {ok, string:tokens(Out, io_lib:nl())};
+ Err -> Err
+ end.
+
+run(Config, Args) ->
+ Py = rabbit_ct_helpers:get_config(Config, python),
+ MgmtPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt),
+ RmqAdmin = rabbit_ct_helpers:get_config(Config, rabbitmqadmin_path),
+ rabbit_ct_helpers:exec([Py,
+ RmqAdmin,
+ "-P",
+ integer_to_list(MgmtPort)] ++ Args,
+ [drop_stdout]).
+
+rabbitmqadmin(Config) ->
+ filename:join([?config(current_srcdir, Config), "bin", "rabbitmqadmin"]).
+
+find_pythons() ->
+ Py2 = rabbit_ct_helpers:exec(["python2", "-V"]),
+ Py3 = rabbit_ct_helpers:exec(["python3", "-V"]),
+ case {Py2, Py3} of
+ {{ok, _}, {ok, _}} -> ["python2", "python3"];
+ {{ok, _}, _} -> ["python2"];
+ {_, {ok, _}} -> ["python3"];
+ _ -> erlang:error("python not found")
+ end.
+
+find_python2() ->
+ Py2 = rabbit_ct_helpers:exec(["python2", "-V"]),
+ Py27 = rabbit_ct_helpers:exec(["python2.7", "-V"]),
+ case {Py2, Py27} of
+ {{ok, _}, {ok, _}} -> ["python2.7"];
+ {{ok, _}, _} -> ["python2"];
+ {_, {ok, _}} -> ["python2.7"];
+ _ -> "python2"
+ end.
+
+publish_with_stdin_python_program(Config, In) ->
+ MgmtPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt),
+ RmqAdmin = rabbit_ct_helpers:get_config(Config, rabbitmqadmin_path),
+ Py = find_python2(),
+ "import subprocess;" ++
+ "proc = subprocess.Popen(['" ++ Py ++ "', '" ++ RmqAdmin ++ "', '-P', '" ++ integer_to_list(MgmtPort) ++
+ "', 'publish', 'routing_key=test'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE);" ++
+ "(stdout, stderr) = proc.communicate('" ++ In ++ "');" ++
+ "exit(proc.returncode)".
+
+write_test_config(Config) ->
+ MgmtPort = integer_to_list(http_api_port(Config)),
+ PrivDir = ?config(priv_dir, Config),
+ DefaultConfig = [
+ "[bad_credentials]",
+ "hostname = localhost",
+ "port =" ++ MgmtPort,
+ "username = gu/est",
+ "password = gu\\est",
+ "declare_vhost = /",
+ "vhost = /",
+ "",
+ "[bad_host]",
+ "hostname = non-existent.acme.com",
+ "port = " ++ MgmtPort,
+ "username = guest",
+ "password = guest"
+ ],
+ TestConfig = [
+ "[reachable]",
+ "hostname = localhost",
+ "port = " ++ MgmtPort,
+ "username = guest",
+ "password = guest",
+ "declare_vhost = /",
+ "vhost = /",
+ "",
+ "[default]",
+ "hostname = non-existent.acme.com",
+ "port = 99799",
+ "username = guest",
+ "password = guest"
+ ],
+ DefaultConfig1 = [string:join(DefaultConfig, io_lib:nl()), io_lib:nl()],
+ TestConfig1 = [string:join(TestConfig, io_lib:nl()), io_lib:nl()],
+ FnDefault = filename:join(PrivDir, ".rabbitmqadmin.conf"),
+ FnTest = filename:join(PrivDir, "test-config"),
+ file:write_file(FnDefault, DefaultConfig1),
+ file:write_file(FnTest, TestConfig1),
+ {FnDefault, FnTest}.
+
+http_api_port(Config) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt).
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl b/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl
new file mode 100644
index 0000000000..0ac911b7c0
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_runtime_parameters_util).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([validate/5, notify/5, notify_clear/4]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+ rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+ case lists:member(administrator, User#user.tags) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+validate(_, <<"test">>, _, _, _) -> {error, "meh", []}.
+
+notify(_, _, _, _, _) -> ok.
+notify_clear(_, _, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+ rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+ rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
+
+unregister_policy_validator() ->
+ rabbit_registry:unregister(policy_validator, <<"testeven">>),
+ rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+ case length(Terms) rem 2 =:= 0 of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+ case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy(_) ->
+ {error, "meh", []}.
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl
new file mode 100644
index 0000000000..7ebb04da69
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_stats_SUITE.erl
@@ -0,0 +1,458 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_stats_SUITE).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+-compile(export_all).
+-compile({no_auto_import, [ceil/1]}).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ format_rate_no_range_test,
+ format_zero_rate_no_range_test,
+ format_incremental_rate_no_range_test,
+ format_incremental_zero_rate_no_range_test,
+ format_total_no_range_test,
+ format_incremental_total_no_range_test,
+ format_rate_range_test,
+ format_incremental_rate_range_test,
+ format_incremental_zero_rate_range_test,
+ format_total_range_test,
+ format_incremental_total_range_test,
+ format_samples_range_test,
+ format_incremental_samples_range_test,
+ format_avg_rate_range_test,
+ format_incremental_avg_rate_range_test,
+ format_avg_range_test,
+ format_incremental_avg_range_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, _Config) ->
+ ok.
+
+init_per_testcase(_, Config) ->
+ Config.
+
+end_per_testcase(_, _Config) ->
+ ok.
+
+%% -------------------------------------------------------------------
+%% Generators.
+%% -------------------------------------------------------------------
+elements_gen() ->
+ ?LET(Length, oneof([1, 2, 3, 7, 8, 20]),
+ ?LET(Elements, list(vector(Length, int())),
+ [erlang:list_to_tuple(E) || E <- Elements])).
+
+stats_tables() ->
+ [connection_stats_coarse_conn_stats, vhost_stats_coarse_conn_stats,
+ channel_stats_fine_stats, channel_exchange_stats_fine_stats,
+ channel_queue_stats_deliver_stats, vhost_stats_fine_stats,
+ queue_stats_deliver_stats, vhost_stats_deliver_stats,
+ channel_stats_deliver_stats, channel_process_stats,
+ queue_stats_publish, queue_exchange_stats_publish,
+ exchange_stats_publish_out, exchange_stats_publish_in,
+ queue_msg_stats, vhost_msg_stats, queue_process_stats,
+ node_coarse_stats, node_persister_stats,
+ node_node_coarse_stats, queue_msg_rates, vhost_msg_rates,
+ connection_churn_rates
+ ].
+
+sample_size(large) ->
+ choose(15, 200);
+sample_size(small) ->
+ choose(0, 1).
+
+sample_gen(_Table, 0) ->
+ [];
+sample_gen(Table, 1) ->
+ ?LET(Stats, stats_gen(Table), [Stats || _ <- lists:seq(1, 5)]);
+sample_gen(Table, N) ->
+ vector(N, stats_gen(Table)).
+
+content_gen(Size) ->
+ ?LET({Table, SampleSize}, {oneof(stats_tables()), sample_size(Size)},
+ ?LET(Stats, sample_gen(Table, SampleSize),
+ {Table, Stats})).
+
+interval_gen() ->
+ %% Keep it at most 150ms, so the test runs in a reasonable time
+ choose(1, 150).
+
+stats_gen(Table) ->
+ ?LET(Vector, vector(length(?stats_per_table(Table)), choose(1, 100)),
+ list_to_tuple(Vector)).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% Rates for 3 or more monotonically increasing samples will always be > 0
+format_rate_no_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, rate_check(fun(Rate) -> Rate > 0 end),
+ false, fun no_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+prop_format(SampleSize, Check, Incremental, RangeFun) ->
+ ?FORALL(
+ {{Table, Data}, Interval}, {content_gen(SampleSize), interval_gen()},
+ begin
+ {LastTS, Slide, Total, Samples}
+ = create_slide(Data, Interval, Incremental, SampleSize),
+ Range = RangeFun(Slide, LastTS, Interval),
+ SamplesFun = fun() -> [Slide] end,
+ InstantRateFun = fun() -> [Slide] end,
+ Results = rabbit_mgmt_stats:format_range(Range, LastTS, Table, 5000,
+ InstantRateFun,
+ SamplesFun),
+ ?WHENFAIL(io:format("Got: ~p~nSlide: ~p~nRange~p~n", [Results, Slide, Range]),
+ Check(Results, Total, Samples, Table))
+ end).
+
+%% Rates for 1 or no samples will always be 0.0 as there aren't
+%% enough datapoints to calculate the instant rate
+format_zero_rate_no_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(small, rate_check(fun(Rate) -> Rate == 0.0 end),
+ false, fun no_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%% Rates for 3 or more monotonically increasing incremental samples will always be > 0
+format_incremental_rate_no_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, rate_check(fun(Rate) -> Rate > 0 end),
+ true, fun no_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%% Rates for 1 or no samples will always be 0.0 as there aren't
+%% enough datapoints to calculate the instant rate
+format_incremental_zero_rate_no_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(small, rate_check(fun(Rate) -> Rate == 0.0 end),
+ true, fun no_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%% Checking totals
+format_total_no_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_total/4, false, fun no_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_incremental_total_no_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_total/4, true, fun no_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%%---------------------
+%% Requests using range
+%%---------------------
+format_rate_range_test(_Config) ->
+ %% Request a range bang on the middle, so we ensure no padding is applied
+ Fun = fun() ->
+ prop_format(large, rate_check(fun(Rate) -> Rate > 0 end),
+ false, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%% Rates for 3 or more monotonically increasing incremental samples will always be > 0
+format_incremental_rate_range_test(_Config) ->
+ %% Request a range bang on the middle, so we ensure no padding is applied
+ Fun = fun() ->
+ prop_format(large, rate_check(fun(Rate) -> Rate > 0 end),
+ true, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%% Rates for 1 or no samples will always be 0.0 as there aren't
+%% enough datapoints to calculate the instant rate
+format_incremental_zero_rate_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(small, rate_check(fun(Rate) -> Rate == 0.0 end),
+ true, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+%% Checking totals
+format_total_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_total/4, false, fun full_range_plus_interval/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_incremental_total_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_total/4, true, fun full_range_plus_interval/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_samples_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_samples/4, false, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_incremental_samples_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_samples/4, true, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_avg_rate_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_avg_rate/4, false, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_incremental_avg_rate_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_avg_rate/4, true, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_avg_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_avg/4, false, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+
+format_incremental_avg_range_test(_Config) ->
+ Fun = fun() ->
+ prop_format(large, fun check_avg/4, true, fun full_range/3)
+ end,
+ rabbit_ct_proper_helpers:run_proper(Fun, [], 100).
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+details(Table) ->
+ [list_to_atom(atom_to_list(S) ++ "_details") || S <- ?stats_per_table(Table)].
+
+add(T1, undefined) ->
+ T1;
+add(T1, T2) ->
+ list_to_tuple(lists:zipwith(fun(A, B) -> A + B end, tuple_to_list(T1), tuple_to_list(T2))).
+
+create_slide(Data, Interval, Incremental, SampleSize) ->
+ %% Use the samples as increments for data generation,
+ %% so we have always increasing counters
+ Now = 0,
+ Slide = exometer_slide:new(Now, 60 * 1000,
+ [{interval, Interval}, {incremental, Incremental}]),
+ Sleep = min_wait(Interval, Data),
+ lists:foldl(
+ fun(E, {TS0, Acc, Total, Samples}) ->
+ TS1 = TS0 + Sleep,
+ NewTotal = add(E, Total),
+ %% We use small sample sizes to keep a constant rate
+ Sample = create_sample(E, Incremental, SampleSize, NewTotal),
+ {TS1, exometer_slide:add_element(TS1, Sample, Acc), NewTotal,
+ [NewTotal | Samples]}
+ end, {Now, Slide, undefined, []}, Data).
+
+create_sample(E, Incremental, SampleSize, NewTotal) ->
+ case {Incremental, SampleSize} of
+ {false, small} -> E;
+ {true, small} ->
+ zero_tuple(E);
+ {false, _} ->
+ %% Guarantees a monotonically increasing counter
+ NewTotal;
+ {true, _} -> E
+ end.
+
+zero_tuple(E) ->
+ Length = length(tuple_to_list(E)),
+ list_to_tuple([0 || _ <- lists:seq(1, Length)]).
+
+min_wait(_, []) ->
+ 0;
+min_wait(Interval, Data) ->
+ %% Send at constant intervals for Interval * 3 ms. This eventually ensures several samples
+ %% on the same interval, max execution time of Interval * 5 and also enough samples to
+ %% generate a rate.
+ case round((Interval * 3) / length(Data)) of
+ 0 -> 1;
+ Min -> Min
+ end.
+
+is_average_time(Atom) ->
+ case re:run(atom_to_list(Atom), "_avg_time$") of
+ nomatch ->
+ false;
+ _ ->
+ true
+ end.
+
+rate_check(RateCheck) ->
+ fun(Results, _, _, Table) ->
+ Check =
+ fun(Detail) ->
+ Rate = proplists:get_value(rate, proplists:get_value(Detail, Results), 0),
+ RateCheck(Rate)
+ end,
+ lists:all(Check, details(Table))
+ end.
+
+check_total(Results, Totals, _Samples, Table) ->
+ Expected = lists:zip(?stats_per_table(Table), tuple_to_list(Totals)),
+ lists:all(fun({K, _} = E) ->
+ case is_average_time(K) of
+ false -> lists:member(E, Results);
+ true -> lists:keymember(K, 1, Results)
+ end
+ end, Expected).
+
+is_avg_time_details(Detail) ->
+ match == re:run(atom_to_list(Detail), "avg_time_details$", [{capture, none}]).
+
+check_samples(Results, _Totals, Samples, Table) ->
+ Details = details(Table),
+ %% Lookup list for the position of the key in the stats tuple
+ Pairs = lists:zip(Details, lists:seq(1, length(Details))),
+
+ NonAvgTimeDetails = lists:filter(fun(D) ->
+ not is_avg_time_details(D)
+ end, Details),
+
+ %% Check that all samples in the results match one of the samples in the inputs
+ lists:all(fun(Detail) ->
+ RSamples = get_from_detail(samples, Detail, Results),
+ lists:all(fun(RS) ->
+ Value = proplists:get_value(sample, RS),
+ case Value of
+ 0 ->
+ true;
+ _ ->
+ lists:keymember(Value,
+ proplists:get_value(Detail, Pairs),
+ Samples)
+ end
+ end, RSamples)
+ end, NonAvgTimeDetails)
+ %% ensure that not all samples are 0
+ andalso lists:all(fun(Detail) ->
+ RSamples = get_from_detail(samples, Detail, Results),
+ lists:any(fun(RS) ->
+ 0 =/= proplists:get_value(sample, RS)
+ end, RSamples)
+ end, Details).
+
+check_avg_rate(Results, _Totals, _Samples, Table) ->
+ Details = details(Table),
+
+ NonAvgTimeDetails = lists:filter(fun(D) ->
+ not is_avg_time_details(D)
+ end, Details),
+
+ AvgTimeDetails = lists:filter(fun(D) ->
+ is_avg_time_details(D)
+ end, Details),
+
+ lists:all(fun(Detail) ->
+ AvgRate = get_from_detail(avg_rate, Detail, Results),
+ Samples = get_from_detail(samples, Detail, Results),
+ S2 = proplists:get_value(sample, hd(Samples)),
+ T2 = proplists:get_value(timestamp, hd(Samples)),
+ S1 = proplists:get_value(sample, lists:last(Samples)),
+ T1 = proplists:get_value(timestamp, lists:last(Samples)),
+ AvgRate == ((S2 - S1) * 1000 / (T2 - T1))
+ end, NonAvgTimeDetails) andalso
+ lists:all(fun(Detail) ->
+ Avg = get_from_detail(avg_rate, Detail, Results),
+ Samples = get_from_detail(samples, Detail, Results),
+ First = proplists:get_value(sample, hd(Samples)),
+ Avg == First
+ end, AvgTimeDetails).
+
+check_avg(Results, _Totals, _Samples, Table) ->
+ Details = details(Table),
+
+ NonAvgTimeDetails = lists:filter(fun(D) ->
+ not is_avg_time_details(D)
+ end, Details),
+
+ AvgTimeDetails = lists:filter(fun(D) ->
+ is_avg_time_details(D)
+ end, Details),
+
+ lists:all(fun(Detail) ->
+ Avg = get_from_detail(avg, Detail, Results),
+ Samples = get_from_detail(samples, Detail, Results),
+ Sum = lists:sum([proplists:get_value(sample, S) || S <- Samples]),
+ Avg == (Sum / length(Samples))
+ end, NonAvgTimeDetails) andalso
+ lists:all(fun(Detail) ->
+ Avg = get_from_detail(avg, Detail, Results),
+ Samples = get_from_detail(samples, Detail, Results),
+ First = proplists:get_value(sample, hd(Samples)),
+ Avg == First
+ end, AvgTimeDetails).
+
+get_from_detail(Tag, Detail, Results) ->
+ proplists:get_value(Tag, proplists:get_value(Detail, Results), []).
+
+full_range(Slide, Last, Interval) ->
+ LastTS = case exometer_slide:last_two(Slide) of
+ [] -> Last;
+ [{L, _} | _] -> L
+ end,
+ #range{first = 0, last = LastTS, incr = Interval}.
+
+full_range_plus_interval(Slide, Last, Interval) ->
+ LastTS = case exometer_slide:last_two(Slide) of
+ [] -> Last;
+ [{L, _} | _] -> L
+ end,
+ % were adding two intervals here due to rounding occasionally pushing the last
+ % sample into the next time "bucket"
+ #range{first = 0, last = LastTS + Interval + Interval, incr = Interval}.
+
+no_range(_Slide, _LastTS, _Interval) ->
+ no_range.
+
+%% Generate a well-formed interval from Start using Interval steps
+last_ts(First, Last, Interval) ->
+ ceil(((Last - First) / Interval)) * Interval + First.
+
+ceil(X) when X < 0 ->
+ trunc(X);
+ceil(X) ->
+ T = trunc(X),
+ case X - T == 0 of
+ true -> T;
+ false -> T + 1
+ end.
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl
new file mode 100644
index 0000000000..03a36ee138
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl
@@ -0,0 +1,469 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_db_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl").
+-include("rabbit_mgmt.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+-import(rabbit_mgmt_test_util, [assert_list/2,
+ reset_management_settings/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-compile(export_all).
+-compile({no_auto_import, [ceil/1]}).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ queue_coarse_test,
+ connection_coarse_test,
+ fine_stats_aggregation_time_test,
+ fine_stats_aggregation_test,
+ all_consumers_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:merge_app_env(
+ rabbit_mgmt_test_util:merge_stats_app_env(Config1, 1000, 1),
+ {rabbitmq_management_agent, [{rates_mode, detailed}]}),
+ rabbit_ct_helpers:run_setup_steps(Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ reset_management_settings(Config),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ reset_management_settings(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+trace_fun(Config, MFs) ->
+ Nodename1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ dbg:tracer(process, {fun(A,_) ->
+ ct:pal(?LOW_IMPORTANCE,
+ "TRACE: ~p", [A])
+ end, ok}),
+ dbg:n(Nodename1),
+ dbg:p(all,c),
+ [ dbg:tpl(M, F, cx) || {M, F} <- MFs].
+
+queue_coarse_test(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, queue_coarse_test1, [Config]).
+
+queue_coarse_test1(_Config) ->
+ [rabbit_mgmt_metrics_collector:override_lookups(T, [{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}])
+ || {T, _} <- ?CORE_TABLES],
+ First = exometer_slide:timestamp(),
+ stats_series(fun stats_q/2, [[{test, 1}, {test2, 1}], [{test, 10}], [{test, 20}]]),
+ timer:sleep(1150 * 2), %% The x2 factor is arbitrary: it makes CI happy.
+ Last = exometer_slide:timestamp(),
+ Interval = 1,
+ R = range(First, Last, Interval),
+ simple_details(get_q(test, R), messages, 20, R),
+ simple_details(get_vhost(R), messages, 21, R),
+ simple_details(get_overview_q(R), messages, 21, R),
+ delete_q(test),
+ timer:sleep(1150),
+ Next = last_ts(First, Interval),
+ R1 = range(First, Next, Interval),
+ simple_details(get_vhost(R1), messages, 1, R1),
+ simple_details(get_overview_q(R1), messages, 1, R1),
+ delete_q(test2),
+ timer:sleep(1150),
+ Next2 = last_ts(First, Interval),
+ R2 = range(First, Next2, Interval),
+ simple_details(get_vhost(R2), messages, 0, R2),
+ simple_details(get_overview_q(R2), messages, 0, R2),
+ [rabbit_mgmt_metrics_collector:reset_lookups(T) || {T, _} <- ?CORE_TABLES],
+ ok.
+
+%% Generate a well-formed interval from Start using Interval steps
+last_ts(First, Interval) ->
+ Now = exometer_slide:timestamp(),
+ ceil(((Now - First) / Interval * 1000)) * Interval + First.
+
+ceil(X) when X < 0 ->
+ trunc(X);
+ceil(X) ->
+ T = trunc(X),
+ case X - T == 0 of
+ true -> T;
+ false -> T + 1
+ end.
+
+connection_coarse_test(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, connection_coarse_test1, [Config]).
+
+connection_coarse_test1(_Config) ->
+ First = exometer_slide:timestamp(),
+ create_conn(test),
+ create_conn(test2),
+ stats_series(fun stats_conn/2, [[{test, 2}, {test2, 5}], [{test, 5}, {test2, 1}],
+ [{test, 10}]]),
+ Last = last_ts(First, 5),
+ R = range(First, Last, 5),
+ simple_details(get_conn(test, R), recv_oct, 10, R),
+ simple_details(get_conn(test2, R), recv_oct, 1, R),
+ delete_conn(test),
+ delete_conn(test2),
+ timer:sleep(1150),
+ assert_list([], rabbit_mgmt_db:get_all_connections(R)),
+ ok.
+
+fine_stats_aggregation_test(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, fine_stats_aggregation_test1, [Config]).
+
+fine_stats_aggregation_test1(_Config) ->
+ [rabbit_mgmt_metrics_collector:override_lookups(T, [{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}])
+ || {T, _} <- ?CORE_TABLES],
+ First = exometer_slide:timestamp(),
+ create_conn(test),
+ create_conn(test2),
+ create_ch(ch1, [{connection, pid(test)}]),
+ create_ch(ch2, [{connection, pid(test2)}]),
+ %% Publish differences
+ channel_series(ch1, [{[{x, 50}], [{q1, x, 15}, {q2, x, 2}], [{q1, 5}, {q2, 5}]},
+ {[{x, 25}], [{q1, x, 10}, {q2, x, 3}], [{q1, -2}, {q2, -3}]},
+ {[{x, 25}], [{q1, x, 25}, {q2, x, 5}], [{q1, -1}, {q2, -1}]}]),
+ channel_series(ch2, [{[{x, 5}], [{q1, x, 15}, {q2, x, 1}], []},
+ {[{x, 2}], [{q1, x, 10}, {q2, x, 2}], []},
+ {[{x, 3}], [{q1, x, 25}, {q2, x, 2}], []}]),
+ timer:sleep(1150),
+
+ fine_stats_aggregation_test0(true, First),
+ delete_q(q2),
+ timer:sleep(1150),
+ fine_stats_aggregation_test0(false, First),
+ delete_ch(ch1),
+ delete_ch(ch2),
+ delete_conn(test),
+ delete_conn(test2),
+ delete_x(x),
+ delete_v(<<"/">>),
+ [rabbit_mgmt_metrics_collector:reset_lookups(T) || {T, _} <- ?CORE_TABLES],
+ ok.
+
+fine_stats_aggregation_test0(Q2Exists, First) ->
+ Last = exometer_slide:timestamp(),
+ R = range(First, Last, 1),
+ Ch1 = get_ch(ch1, R),
+
+ Ch2 = get_ch(ch2, R),
+ X = get_x(x, R),
+ Q1 = get_q(q1, R),
+ V = get_vhost(R),
+ O = get_overview(R),
+ assert_fine_stats(m, publish, 100, Ch1, R),
+ assert_fine_stats(m, publish, 10, Ch2, R),
+ assert_fine_stats(m, publish_in, 110, X, R),
+ assert_fine_stats(m, publish_out, 115, X, R),
+ assert_fine_stats(m, publish, 100, Q1, R),
+ assert_fine_stats(m, deliver_get, 2, Q1, R),
+ assert_fine_stats(m, deliver_get, 3, Ch1, R),
+ assert_fine_stats(m, publish, 110, V, R),
+ assert_fine_stats(m, deliver_get, 3, V, R),
+ assert_fine_stats(m, publish, 110, O, R),
+ assert_fine_stats(m, deliver_get, 3, O, R),
+ assert_fine_stats({pub, x}, publish, 100, Ch1, R),
+ assert_fine_stats({pub, x}, publish, 10, Ch2, R),
+ assert_fine_stats({in, ch1}, publish, 100, X, R),
+ assert_fine_stats({in, ch2}, publish, 10, X, R),
+ assert_fine_stats({out, q1}, publish, 100, X, R),
+ assert_fine_stats({in, x}, publish, 100, Q1, R),
+ assert_fine_stats({del, ch1}, deliver_get, 2, Q1, R),
+ assert_fine_stats({del, q1}, deliver_get, 2, Ch1, R),
+ case Q2Exists of
+ true -> Q2 = get_q(q2, R),
+ assert_fine_stats(m, publish, 15, Q2, R),
+ assert_fine_stats(m, deliver_get, 1, Q2, R),
+ assert_fine_stats({out, q2}, publish, 15, X, R),
+ assert_fine_stats({in, x}, publish, 15, Q2, R),
+ assert_fine_stats({del, ch1}, deliver_get, 1, Q2, R),
+ assert_fine_stats({del, q2}, deliver_get, 1, Ch1, R);
+ false -> assert_fine_stats_neg({out, q2}, X),
+ assert_fine_stats_neg({del, q2}, Ch1)
+ end,
+ ok.
+
+fine_stats_aggregation_time_test(Config) ->
+ %% trace_fun(Config, [{rabbit_mgmt_db, get_data_from_nodes}]),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, fine_stats_aggregation_time_test1, [Config]).
+
+fine_stats_aggregation_time_test1(_Config) ->
+ [rabbit_mgmt_metrics_collector:override_lookups(T, [{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}])
+ || {T, _} <- ?CORE_TABLES],
+ First = exometer_slide:timestamp(),
+ create_ch(ch),
+ channel_series(ch, [{[{x, 50}], [{q, x, 15}], [{q, 5}]},
+ {[{x, 25}], [{q, x, 10}], [{q, 5}]},
+ {[{x, 25}], [{q, x, 25}], [{q, 10}]}]),
+ timer:sleep(1150),
+ Last = exometer_slide:timestamp(),
+
+ channel_series(ch, [{[{x, 10}], [{q, x, 5}], [{q, 2}]}]),
+ Next = exometer_slide:timestamp(),
+
+
+ R1 = range(First, Last, 1),
+ assert_fine_stats(m, publish, 100, get_ch(ch, R1), R1),
+ assert_fine_stats(m, publish, 50, get_q(q, R1), R1),
+ assert_fine_stats(m, deliver_get, 20, get_q(q, R1), R1),
+
+
+ R2 = range(Last, Next, 1),
+ assert_fine_stats(m, publish, 110, get_ch(ch, R2), R2),
+ assert_fine_stats(m, publish, 55, get_q(q, R2), R2),
+ assert_fine_stats(m, deliver_get, 22, get_q(q, R2), R2),
+
+ delete_q(q),
+ delete_ch(ch),
+ delete_x(x),
+ delete_v(<<"/">>),
+
+ [rabbit_mgmt_metrics_collector:reset_lookups(T) || {T, _} <- ?CORE_TABLES],
+ ok.
+
+assert_fine_stats(m, Type, N, Obj, R) ->
+ Act = pget(message_stats, Obj),
+ simple_details(Act, Type, N, R);
+assert_fine_stats({T2, Name}, Type, N, Obj, R) ->
+ Act = find_detailed_stats(Name, pget(expand(T2), Obj)),
+ simple_details(Act, Type, N, R).
+
+assert_fine_stats_neg({T2, Name}, Obj) ->
+ detailed_stats_absent(Name, pget(expand(T2), Obj)).
+
+ %% {{{resource,<<"/">>,queue,<<"test_lazy">>},
+ %% <0.953.0>,<<"amq.ctag-sPlBtNl8zwIGkYhNjJrATA">>},
+ %% false,true,0,[]},
+all_consumers_test(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, all_consumers_test1, [Config]).
+
+all_consumers_test1(_Config) ->
+ %% Tests that we can list all the consumers while channels are in the process of
+ %% being deleted. Thus, channel details might be missing.
+ %% `merge_channel_into_obj` is also used when listing queues, which might now report
+ %% empty channel details while the queue is being deleted. But will not crash.
+ [rabbit_mgmt_metrics_collector:override_lookups(T, [{exchange, fun dummy_lookup/1},
+ {queue, fun dummy_lookup/1}])
+ || {T, _} <- ?CORE_TABLES],
+ create_cons(q1, ch1, <<"ctag">>, false, true, 0, []),
+ timer:sleep(1150),
+ [Consumer] = rabbit_mgmt_db:get_all_consumers(),
+ [] = proplists:get_value(channel_details, Consumer),
+ %% delete_cons(co),
+ [rabbit_mgmt_metrics_collector:reset_lookups(T) || {T, _} <- ?CORE_TABLES],
+ ok.
+%%----------------------------------------------------------------------------
+%% Events in
+%%----------------------------------------------------------------------------
+
+create_conn(Name) ->
+ rabbit_core_metrics:connection_created(pid(Name), [{pid, pid(Name)},
+ {name, a2b(Name)}]).
+
+create_ch(Name, Extra) ->
+ rabbit_core_metrics:channel_created(pid(Name), [{pid, pid(Name)},
+ {name, a2b(Name)}] ++ Extra).
+create_ch(Name) ->
+ create_ch(Name, []).
+
+create_cons(QName, ChName, Tag, Exclusive, AckRequired, PrefetchCount, Args) ->
+ rabbit_core_metrics:consumer_created(pid(ChName), Tag, Exclusive,
+ AckRequired, q(QName),
+ PrefetchCount, false, waiting, Args).
+
+stats_series(Fun, ListsOfPairs) ->
+ [begin
+ [Fun(Name, Msgs) || {Name, Msgs} <- List],
+ timer:sleep(1150)
+ end || List <- ListsOfPairs].
+
+stats_q(Name, Msgs) ->
+ rabbit_core_metrics:queue_stats(q(Name), Msgs, Msgs, Msgs, Msgs).
+
+stats_conn(Name, Oct) ->
+ rabbit_core_metrics:connection_stats(pid(Name), Oct, Oct, Oct).
+
+channel_series(Name, ListOfStats) ->
+ [begin
+ stats_ch(Name, XStats, QXStats, QStats),
+ timer:sleep(1150)
+ end || {XStats, QXStats, QStats} <- ListOfStats].
+
+stats_ch(Name, XStats, QXStats, QStats) ->
+ [rabbit_core_metrics:channel_stats(exchange_stats, publish, {pid(Name), x(XName)}, N)
+ || {XName, N} <- XStats],
+ [rabbit_core_metrics:channel_stats(queue_exchange_stats, publish,
+ {pid(Name), {q(QName), x(XName)}}, N)
+ || {QName, XName, N} <- QXStats],
+ [rabbit_core_metrics:channel_stats(queue_stats, deliver_no_ack, {pid(Name), q(QName)}, N)
+ || {QName, N} <- QStats],
+ ok.
+
+delete_q(Name) ->
+ rabbit_core_metrics:queue_deleted(q(Name)),
+ rabbit_event:notify(queue_deleted, [{name, q(Name)}]).
+
+delete_conn(Name) ->
+ Pid = pid_del(Name),
+ rabbit_core_metrics:connection_closed(Pid),
+ rabbit_event:notify(connection_closed, [{pid, Pid}]).
+
+delete_cons(QName, ChName, Tag) ->
+ Pid = pid_del(ChName),
+ rabbit_core_metrics:consumer_deleted(Pid, Tag, q(QName)),
+ rabbit_event:notify(consumer_deleted, [{channel, Pid},
+ {queue, q(QName)},
+ {consumer_tag, Tag}]).
+
+delete_ch(Name) ->
+ Pid = pid_del(Name),
+ rabbit_core_metrics:channel_closed(Pid),
+ rabbit_core_metrics:channel_exchange_down({Pid, x(x)}),
+ rabbit_event:notify(channel_closed, [{pid, Pid}]).
+
+delete_x(Name) ->
+ rabbit_event:notify(exchange_deleted, [{name, x(Name)}]).
+
+delete_v(Name) ->
+ rabbit_event:notify(vhost_deleted, [{name, Name}]).
+
+%%----------------------------------------------------------------------------
+%% Events out
+%%----------------------------------------------------------------------------
+
+range(F, L, I) ->
+ R = #range{first = F, last = L, incr = I * 1000},
+ {R, R, R, R}.
+
+get_x(Name, Range) ->
+ [X] = rabbit_mgmt_db:augment_exchanges([x2(Name)], Range, full),
+ X.
+
+get_q(Name, Range) ->
+ [Q] = rabbit_mgmt_db:augment_queues([q2(Name)], Range, full),
+ Q.
+
+get_vhost(Range) ->
+ [VHost] = rabbit_mgmt_db:augment_vhosts([[{name, <<"/">>}]], Range),
+ VHost.
+
+get_conn(Name, Range) -> rabbit_mgmt_db:get_connection(a2b(Name), Range).
+get_ch(Name, Range) -> rabbit_mgmt_db:get_channel(a2b(Name), Range).
+
+get_overview(Range) -> rabbit_mgmt_db:get_overview(Range).
+get_overview_q(Range) -> pget(queue_totals, get_overview(Range)).
+
+details0(R, AR, A, L) ->
+ [{rate, R},
+ {samples, [[{sample, S}, {timestamp, T}] || {T, S} <- L]},
+ {avg_rate, AR},
+ {avg, A}].
+
+simple_details(Result, Thing, N, {#range{first = First, last = Last}, _, _, _} = _R) ->
+ ?assertEqual(N, proplists:get_value(Thing, Result)),
+ Details = proplists:get_value(atom_suffix(Thing, "_details"), Result),
+ ?assert(0 =/= proplists:get_value(rate, Details)),
+ Samples = proplists:get_value(samples, Details),
+ TSs = [proplists:get_value(timestamp, S) || S <- Samples],
+ ?assert(First =< lists:min(TSs)),
+ ?assert(Last >= lists:max(TSs)).
+
+atom_suffix(Atom, Suffix) ->
+ list_to_atom(atom_to_list(Atom) ++ Suffix).
+
+find_detailed_stats(Name, List) ->
+ [S] = filter_detailed_stats(Name, List),
+ S.
+
+detailed_stats_absent(Name, List) ->
+ [] = filter_detailed_stats(Name, List).
+
+filter_detailed_stats(Name, List) ->
+ lists:foldl(fun(L, Acc) ->
+ {[{stats, Stats}], [{_, Details}]} =
+ lists:partition(fun({K, _}) -> K == stats end, L),
+ case (pget(name, Details) =:= a2b(Name)) of
+ true ->
+ [Stats | Acc];
+ false ->
+ Acc
+ end
+ end, [], List).
+
+expand(in) -> incoming;
+expand(out) -> outgoing;
+expand(del) -> deliveries;
+expand(pub) -> publishes.
+
+%%----------------------------------------------------------------------------
+%% Util
+%%----------------------------------------------------------------------------
+
+x(Name) -> rabbit_misc:r(<<"/">>, exchange, a2b(Name)).
+x2(Name) -> q2(Name).
+q(Name) -> rabbit_misc:r(<<"/">>, queue, a2b(Name)).
+q2(Name) -> [{name, a2b(Name)},
+ {pid, self()}, % fake a local pid
+ {vhost, <<"/">>}].
+
+pid(Name) ->
+ case get({pid, Name}) of
+ undefined -> P = spawn(fun() -> ok end),
+ put({pid, Name}, P),
+ P;
+ Pid -> Pid
+ end.
+
+pid_del(Name) ->
+ Pid = pid(Name),
+ erase({pid, Name}),
+ Pid.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
+
+dummy_lookup(_Thing) -> true.
diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl
new file mode 100644
index 0000000000..32194bd5c8
--- /dev/null
+++ b/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ tokenise_test,
+ pack_binding_test,
+ path_prefix_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+tokenise_test(_Config) ->
+ [] = rabbit_mgmt_format:tokenise(""),
+ ["foo"] = rabbit_mgmt_format:tokenise("foo"),
+ ["foo", "bar"] = rabbit_mgmt_format:tokenise("foo~bar"),
+ ["foo", "", "bar"] = rabbit_mgmt_format:tokenise("foo~~bar"),
+ ok.
+
+pack_binding_test(_Config) ->
+ assert_binding(<<"~">>,
+ <<"">>, []),
+ assert_binding(<<"foo">>,
+ <<"foo">>, []),
+ assert_binding(<<"foo%7Ebar%2Fbash">>,
+ <<"foo~bar/bash">>, []),
+ assert_binding(<<"foo%7Ebar%7Ebash">>,
+ <<"foo~bar~bash">>, []),
+ ok.
+
+path_prefix_test(_Config) ->
+ Got0 = rabbit_mgmt_util:get_path_prefix(),
+ ?assertEqual("", Got0),
+
+ Pfx0 = "/custom-prefix",
+ application:set_env(rabbitmq_management, path_prefix, Pfx0),
+ Got1 = rabbit_mgmt_util:get_path_prefix(),
+ ?assertEqual(Pfx0, Got1),
+
+ Pfx1 = "custom-prefix",
+ application:set_env(rabbitmq_management, path_prefix, Pfx1),
+ Got2 = rabbit_mgmt_util:get_path_prefix(),
+ ?assertEqual(Pfx0, Got2),
+
+ Pfx2 = <<"custom-prefix">>,
+ application:set_env(rabbitmq_management, path_prefix, Pfx2),
+ Got3 = rabbit_mgmt_util:get_path_prefix(),
+ ?assertEqual(Pfx0, Got3).
+
+%%--------------------------------------------------------------------
+
+assert_binding(Packed, Routing, Args) ->
+ case rabbit_mgmt_format:pack_binding_props(Routing, Args) of
+ Packed ->
+ ok;
+ Act ->
+ throw({pack, Routing, Args, expected, Packed, got, Act})
+ end.
diff --git a/deps/rabbitmq_management/test/stats_SUITE.erl b/deps/rabbitmq_management/test/stats_SUITE.erl
new file mode 100644
index 0000000000..99de1a532e
--- /dev/null
+++ b/deps/rabbitmq_management/test/stats_SUITE.erl
@@ -0,0 +1,178 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(stats_SUITE).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+-compile(export_all).
+
+-import(rabbit_misc, [pget/2]).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ format_range_empty_slide,
+ format_range,
+ format_range_missing_middle,
+ format_range_missing_middle_drop,
+ format_range_incremental_pad,
+ format_range_incremental_pad2,
+ format_range_constant
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, _Config) ->
+ ok.
+
+init_per_testcase(_, Config) ->
+ Config.
+
+end_per_testcase(_, _Config) ->
+ ok.
+
+format_range_empty_slide(_Config) ->
+ Slide = exometer_slide:new(0, 60000, [{incremental, true},
+ {interval, 5000}]),
+ Range = #range{first = 100, last = 200, incr = 10},
+ Table = vhost_stats_fine_stats,
+ SamplesFun = fun() -> [Slide] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 200, Table, 0, fun() ->
+ ok
+ end,
+ SamplesFun),
+ PublishDetails = proplists:get_value(publish_details, Got),
+ Samples = proplists:get_value(samples, PublishDetails),
+ 11 = length(Samples).
+
+format_range(_Config) ->
+ Slide = exometer_slide:new(0, 60000, [{incremental, true},
+ {interval, 10}]),
+ Slide1 = exometer_slide:add_element(197, {10}, Slide),
+ Range = #range{first = 100, last = 200, incr = 10},
+ Table = queue_stats_publish,
+ SamplesFun = fun() -> [Slide1] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 200, Table, 0, fun() ->
+ ok
+ end,
+ SamplesFun),
+ PublishDetails = proplists:get_value(publish_details, Got),
+ [S1, S2 | _Rest] = Samples = proplists:get_value(samples, PublishDetails),
+ 0 = proplists:get_value(sample, S2),
+ 10 = proplists:get_value(sample, S1),
+ 11 = length(Samples).
+
+format_range_missing_middle(_Config) ->
+ Slide = exometer_slide:new(0, 60000, [{incremental, false},
+ {interval, 10}]),
+ Slide1 = exometer_slide:add_element(167, {10}, Slide),
+ Slide2 = exometer_slide:add_element(197, {5}, Slide1),
+ Range = #range{first = 100, last = 200, incr = 10},
+ Table = queue_stats_publish,
+ SamplesFun = fun() -> [Slide2] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 200, Table, 0, fun() ->
+ ok
+ end,
+ SamplesFun),
+ PublishDetails = proplists:get_value(publish_details, Got),
+ [S1, S2, S3, S4 | Rest] = Samples = proplists:get_value(samples, PublishDetails),
+ 10 = proplists:get_value(sample, S4),
+ 10 = proplists:get_value(sample, S3),
+ 10 = proplists:get_value(sample, S2),
+ 5 = proplists:get_value(sample, S1),
+ true = lists:all(fun(P) ->
+ 0 == proplists:get_value(sample, P)
+ end, Rest),
+ 11 = length(Samples).
+
+format_range_missing_middle_drop(_Config) ->
+ Slide = exometer_slide:new(0, 60000, [{incremental, false},
+ {max_n, 12},
+ {interval, 10}]),
+ Slide1 = exometer_slide:add_element(167, {10}, Slide),
+ Slide2 = exometer_slide:add_element(197, {10}, Slide1),
+ Range = #range{first = 100, last = 200, incr = 10},
+ Table = queue_stats_publish,
+ SamplesFun = fun() -> [Slide2] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 200, Table, 0, fun() ->
+ ok
+ end,
+ SamplesFun),
+ PublishDetails = proplists:get_value(publish_details, Got),
+ [S1, S2, S3, S4 | Rest] = Samples = proplists:get_value(samples, PublishDetails),
+ 10 = proplists:get_value(sample, S4),
+ 10 = proplists:get_value(sample, S3),
+ 10 = proplists:get_value(sample, S2),
+ 10 = proplists:get_value(sample, S1),
+ true = lists:all(fun(P) ->
+ 0 == proplists:get_value(sample, P)
+ end, Rest),
+ 11 = length(Samples).
+
+format_range_incremental_pad(_Config) ->
+ Slide = exometer_slide:new(0, 10, [{incremental, true},
+ {interval, 5}]),
+ Slide1 = exometer_slide:add_element(15, {3}, Slide),
+ Range = #range{first = 5, last = 15, incr = 5},
+ Table = queue_stats_publish,
+ SamplesFun = fun() -> [Slide1] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 0, Table, 0, fun() -> ok end,
+ SamplesFun),
+ PublishDetails = proplists:get_value(publish_details, Got),
+ [{3, 15}, {0,10}, {0, 5}] = [{pget(sample, V), pget(timestamp, V)}
+ || V <- pget(samples, PublishDetails)].
+
+format_range_incremental_pad2(_Config) ->
+ Slide = exometer_slide:new(0, 10, [{incremental, true},
+ {interval, 5}]),
+ {_, Slide1} = lists:foldl(fun (V, {TS, S}) ->
+ {TS + 5, exometer_slide:add_element(TS, {V}, S)}
+ end, {5, Slide}, [1,1,0,0,0,1]),
+ Range = #range{first = 10, last = 30, incr = 5},
+ Table = queue_stats_publish,
+ SamplesFun = fun() -> [Slide1] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 0, Table, 0, fun() -> ok end,
+ SamplesFun),
+ PublishDetails = pget(publish_details, Got),
+ [{3, 30}, {2, 25}, {2, 20}, {2, 15}, {2, 10}] =
+ [{pget(sample, V), pget(timestamp, V)}
+ || V <- pget(samples, PublishDetails)].
+
+format_range_constant(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(0, 20, [{incremental, false},
+ {max_n, 100},
+ {interval, 5}]),
+ Slide1 = lists:foldl(fun(N, Acc) ->
+ exometer_slide:add_element(Now + N, {5}, Acc)
+ end, Slide, lists:seq(0, 100, 5)),
+ Range = #range{first = 5, last = 50, incr = 5},
+ Table = queue_stats_publish,
+ SamplesFun = fun() -> [Slide1] end,
+ Got = rabbit_mgmt_stats:format_range(Range, 0, Table, 0, fun() -> ok end,
+ SamplesFun),
+ 5 = proplists:get_value(publish, Got),
+ PD = proplists:get_value(publish_details, Got),
+ 0.0 = proplists:get_value(rate, PD).
diff --git a/deps/rabbitmq_management_agent/.gitignore b/deps/rabbitmq_management_agent/.gitignore
new file mode 100644
index 0000000000..7b45202588
--- /dev/null
+++ b/deps/rabbitmq_management_agent/.gitignore
@@ -0,0 +1,18 @@
+.sw?
+.*.sw?
+*.beam
+*.plt
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_management_agent.d
diff --git a/deps/rabbitmq_management_agent/.travis.yml b/deps/rabbitmq_management_agent/.travis.yml
new file mode 100644
index 0000000000..47b50c0a97
--- /dev/null
+++ b/deps/rabbitmq_management_agent/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: SvMls+ca5j34cUph3yOaOxjOXHGtzfK00HCZgOgKQVkX5MI835MZkRn/HpmNrmuO++J6h8Rqet5v7ejVS0K41IhpRzJqRzRCUmI+/k0+MfgDrLP1KpszJQP05D22Wx46Bumyx7WKoUqr10jn75VhfeJQzsCf/2gn/+aAcs/2pRE=
+ - secure: OxHYDgMTq4dtLKl4hh7evQeb5cZM6Cce5i/akV5ci14c4pmx2P22oyAuDXqDi84959hfspuLx3IW808vnJL9LNQzLGs7jjWX2Yzisz3u5/429efgEQh50liZczmK+c3L1wPQ+RFwY9dysm9U6/+gX/o82qH0yFkVYXHCpIgjAhc=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md b/deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_management_agent/CONTRIBUTING.md b/deps/rabbitmq_management_agent/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_management_agent/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_management_agent/LICENSE b/deps/rabbitmq_management_agent/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_management_agent/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_management_agent/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_management_agent/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_management_agent/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_management_agent/Makefile b/deps/rabbitmq_management_agent/Makefile
new file mode 100644
index 0000000000..4b3ebf8f66
--- /dev/null
+++ b/deps/rabbitmq_management_agent/Makefile
@@ -0,0 +1,34 @@
+PROJECT = rabbitmq_management_agent
+PROJECT_DESCRIPTION = RabbitMQ Management Agent
+PROJECT_MOD = rabbit_mgmt_agent_app
+
+define PROJECT_ENV
+[
+ {rates_mode, basic},
+ {sample_retention_policies,
+ %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
+ [{global, [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
+ {basic, [{605, 5}, {3600, 60}]},
+ {detailed, [{605, 5}]}]}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+LOCAL_DEPS += xmerl mnesia ranch ssl crypto public_key
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+include erlang.mk
diff --git a/deps/rabbitmq_management_agent/erlang.mk b/deps/rabbitmq_management_agent/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_management_agent/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl b/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl
new file mode 100644
index 0000000000..cb2d20db91
--- /dev/null
+++ b/deps/rabbitmq_management_agent/include/rabbit_mgmt_metrics.hrl
@@ -0,0 +1,198 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2010-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-type(event_type() :: queue_stats | queue_exchange_stats | vhost_stats
+ | channel_queue_stats | channel_stats
+ | channel_exchange_stats | exchange_stats
+ | node_stats | node_node_stats | connection_stats).
+-type(type() :: deliver_get | fine_stats | queue_msg_rates | queue_msg_counts
+ | coarse_node_stats | coarse_node_node_stats | coarse_conn_stats
+ | process_stats).
+
+-type(table_name() :: atom()).
+
+-define(TABLES, [{connection_stats_coarse_conn_stats, set},
+ {vhost_stats_coarse_conn_stats, set},
+ {connection_created_stats, set},
+ {connection_stats, set},
+ {channel_created_stats, set},
+ {channel_stats, set},
+ {channel_stats_fine_stats, set},
+ {channel_exchange_stats_fine_stats, set},
+ {channel_queue_stats_deliver_stats, set},
+ {vhost_stats_fine_stats, set},
+ {queue_stats_deliver_stats, set},
+ {vhost_stats_deliver_stats, set},
+ {channel_stats_deliver_stats, set},
+ {channel_process_stats, set},
+ {queue_stats_publish, set},
+ {queue_exchange_stats_publish, set},
+ {exchange_stats_publish_out, set},
+ {exchange_stats_publish_in, set},
+ {consumer_stats, set},
+ {queue_stats, set},
+ {queue_msg_stats, set},
+ {vhost_msg_stats, set},
+ {queue_process_stats, set},
+ {node_stats, set},
+ {node_coarse_stats, set},
+ {node_persister_stats, set},
+ {node_node_stats, set},
+ {node_node_coarse_stats, set},
+ {queue_msg_rates, set},
+ {vhost_msg_rates, set},
+ {connection_churn_rates, set}]).
+
+-define(INDEX_TABLES, [consumer_stats_queue_index,
+ consumer_stats_channel_index,
+ channel_exchange_stats_fine_stats_exchange_index,
+ channel_exchange_stats_fine_stats_channel_index,
+ channel_queue_stats_deliver_stats_queue_index,
+ channel_queue_stats_deliver_stats_channel_index,
+ queue_exchange_stats_publish_queue_index,
+ queue_exchange_stats_publish_exchange_index,
+ node_node_coarse_stats_node_index]).
+
+-define(GC_EVENTS, [connection_closed, channel_closed, consumer_deleted,
+ exchange_deleted, queue_deleted, vhost_deleted,
+ node_node_deleted, channel_consumer_deleted]).
+
+-define(DELEGATE_PREFIX, "delegate_management_").
+
+%%------------------------------------------------------------------------------
+%% Only for documentation and testing purposes, so we keep track of the number and
+%% order of the metrics
+-define(connection_stats_coarse_conn_stats(Recv_oct, Send_oct, Reductions),
+ {Recv_oct, Send_oct, Reductions}).
+-define(vhost_stats_coarse_conn_stats(Recv_oct, Send_oct), {Recv_oct, Send_oct}).
+-define(connection_created_stats(Id, Name, Props), {Id, Name, Props}).
+-define(connection_stats(Id, Props), {Id, Props}).
+-define(channel_created_stats(Id, Name, Props), {Id, Name, Props}).
+-define(channel_consumer_created_stats(Queue, ChPid, ConsumerTag),
+ {Queue, {ChPid, ConsumerTag}}).
+-define(channel_stats(Id, Props), {Id, Props}).
+-define(channel_stats_fine_stats(Publish, Confirm, ReturnUnroutable, DropUnroutable),
+ {Publish, Confirm, ReturnUnroutable, DropUnroutable}).
+-define(channel_exchange_stats_fine_stats(Publish, Confirm, ReturnUnroutable, DropUnroutable),
+ {Publish, Confirm, ReturnUnroutable, DropUnroutable}).
+-define(channel_queue_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
+ Redeliver, Ack, Deliver_get, Get_empty),
+ {Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get,
+ Get_empty}).
+-define(vhost_stats_fine_stats(Publish, Confirm, ReturnUnroutable, DropUnroutable),
+ {Publish, Confirm, ReturnUnroutable, DropUnroutable}).
+-define(queue_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
+ Redeliver, Ack, Deliver_get, Get_empty),
+ {Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get,
+ Get_empty}).
+-define(vhost_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
+ Redeliver, Ack, Deliver_get, Get_empty),
+ {Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get,
+ Get_empty}).
+-define(channel_stats_deliver_stats(Get, Get_no_ack, Deliver, Deliver_no_ack,
+ Redeliver, Ack, Deliver_get, Get_empty),
+ {Get, Get_no_ack, Deliver, Deliver_no_ack, Redeliver, Ack, Deliver_get,
+ Get_empty}).
+-define(channel_process_stats(Reductions), {Reductions}).
+-define(queue_stats_publish(Publish), {Publish}).
+-define(queue_exchange_stats_publish(Publish), {Publish}).
+-define(exchange_stats_publish_out(Publish_out), {Publish_out}).
+-define(exchange_stats_publish_in(Publish_in), {Publish_in}).
+-define(consumer_stats(Id, Props), {Id, Props}).
+-define(queue_stats(Id, Props), {Id, Props}).
+-define(queue_msg_stats(Messages_ready, Messages_unacknowledged, Messages),
+ {Messages_ready, Messages_unacknowledged, Messages}).
+-define(vhost_msg_stats(Messages_ready, Messages_unacknowledged, Messages),
+ {Messages_ready, Messages_unacknowledged, Messages}).
+-define(queue_process_stats(Reductions), {Reductions}).
+-define(node_stats(Id, Props), {Id, Props}).
+-define(node_coarse_stats(Fd_used, Sockets_used, Mem_used, Disk_free, Proc_used,
+ Gc_num, Gc_bytes_reclaimed, Context_switches),
+ {Fd_used, Sockets_used, Mem_used, Disk_free, Proc_used, Gc_num,
+ Gc_bytes_reclaimed, Context_switches}).
+-define(node_persister_stats(Io_read_count, Io_read_bytes, Io_read_avg_time, Io_write_count,
+ Io_write_bytes, Io_write_avg_time, Io_sync_count, Io_sync_avg_time,
+ Io_seek_count, Io_seek_avg_time, Io_reopen_count, Mnesia_ram_tx_count,
+ Mnesia_disk_tx_count, Msg_store_read_count, Msg_store_write_count,
+ Queue_index_journal_write_count, Queue_index_write_count,
+ Queue_index_read_count, Io_file_handle_open_attempt_count,
+ Io_file_handle_open_attempt_avg_time),
+ {Io_read_count, Io_read_bytes, Io_read_avg_time, Io_write_count, Io_write_bytes,
+ Io_write_avg_time, Io_sync_count, Io_sync_avg_time, Io_seek_count, Io_seek_avg_time,
+ Io_reopen_count, Mnesia_ram_tx_count, Mnesia_disk_tx_count, Msg_store_read_count,
+ Msg_store_write_count, Queue_index_journal_write_count, Queue_index_write_count,
+ Queue_index_read_count, Io_file_handle_open_attempt_count,
+ Io_file_handle_open_attempt_avg_time}).
+-define(node_node_stats(Send_bytes, Recv_bytes), {Send_bytes, Recv_bytes}).
+-define(node_node_coarse_stats(Send_bytes, Recv_bytes), {Send_bytes, Recv_bytes}).
+-define(queue_msg_rates(Disk_reads, Disk_writes), {Disk_reads, Disk_writes}).
+-define(vhost_msg_rates(Disk_reads, Disk_writes), {Disk_reads, Disk_writes}).
+-define(old_aggr_stats(Id, Stats), {Id, Stats}).
+-define(connection_churn_rates(Connection_created, Connection_closed, Channel_created,
+ Channel_closed, Queue_declared, Queue_created,
+ Queue_deleted),
+ {Connection_created, Connection_closed, Channel_created, Channel_closed,
+ Queue_declared, Queue_created, Queue_deleted}).
+
+-define(stats_per_table(Table),
+ case Table of
+ connection_stats_coarse_conn_stats ->
+ [recv_oct, send_oct, reductions];
+ vhost_stats_coarse_conn_stats ->
+ [recv_oct, send_oct];
+ T when T =:= channel_stats_fine_stats;
+ T =:= channel_exchange_stats_fine_stats;
+ T =:= vhost_stats_fine_stats ->
+ [publish, confirm, return_unroutable, drop_unroutable];
+ T when T =:= channel_queue_stats_deliver_stats;
+ T =:= queue_stats_deliver_stats;
+ T =:= vhost_stats_deliver_stats;
+ T =:= channel_stats_deliver_stats ->
+ [get, get_no_ack, deliver, deliver_no_ack, redeliver, ack, deliver_get,
+ get_empty];
+ T when T =:= channel_process_stats;
+ T =:= queue_process_stats ->
+ [reductions];
+ T when T =:= queue_stats_publish;
+ T =:= queue_exchange_stats_publish ->
+ [publish];
+ exchange_stats_publish_out ->
+ [publish_out];
+ exchange_stats_publish_in ->
+ [publish_in];
+ T when T =:= queue_msg_stats;
+ T =:= vhost_msg_stats ->
+ [messages_ready, messages_unacknowledged, messages];
+ node_coarse_stats ->
+ [fd_used, sockets_used, mem_used, disk_free, proc_used, gc_num,
+ gc_bytes_reclaimed, context_switches];
+ node_persister_stats ->
+ [io_read_count, io_read_bytes, io_read_avg_time, io_write_count,
+ io_write_bytes, io_write_avg_time, io_sync_count, io_sync_avg_time,
+ io_seek_count, io_seek_avg_time, io_reopen_count, mnesia_ram_tx_count,
+ mnesia_disk_tx_count, msg_store_read_count, msg_store_write_count,
+ queue_index_journal_write_count, queue_index_write_count,
+ queue_index_read_count, io_file_handle_open_attempt_count,
+ io_file_handle_open_attempt_avg_time];
+ node_node_coarse_stats ->
+ [send_bytes, recv_bytes];
+ T when T =:= queue_msg_rates;
+ T =:= vhost_msg_rates ->
+ [disk_reads, disk_writes];
+ T when T =:= connection_churn_rates ->
+ [connection_created, connection_closed, channel_created, channel_closed, queue_declared, queue_created, queue_deleted]
+ end).
+%%------------------------------------------------------------------------------
diff --git a/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl b/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl
new file mode 100644
index 0000000000..92e26b5357
--- /dev/null
+++ b/deps/rabbitmq_management_agent/include/rabbit_mgmt_records.hrl
@@ -0,0 +1,25 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ Management Console.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(context, {user,
+ password = none,
+ impl}). % storage for a context of the resource handler
+
+-record(range, {first :: integer(),
+ last :: integer(),
+ incr :: integer()}).
+
+
diff --git a/deps/rabbitmq_management_agent/priv/schema/rabbitmq_management_agent.schema b/deps/rabbitmq_management_agent/priv/schema/rabbitmq_management_agent.schema
new file mode 100644
index 0000000000..fa8a76725a
--- /dev/null
+++ b/deps/rabbitmq_management_agent/priv/schema/rabbitmq_management_agent.schema
@@ -0,0 +1,4 @@
+%% Agent collectors won't start if metrics collection is disabled, only external stats are enabled.
+%% Also the management application will refuse to start if metrics collection is disabled
+{mapping, "management_agent.disable_metrics_collector", "rabbitmq_management_agent.disable_metrics_collector",
+ [{datatype, {enum, [true, false]}}]}.
diff --git a/deps/rabbitmq_management_agent/rabbitmq-components.mk b/deps/rabbitmq_management_agent/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_management_agent/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl b/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl
new file mode 100644
index 0000000000..bc6bdbdc25
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ResetStatsDbCommand').
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ output/2,
+ switches/0,
+ description/0
+ ]).
+
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+usage() ->
+ <<"reset_stats_db [--all]">>.
+
+validate(_, _) ->
+ ok.
+
+merge_defaults(A, Opts) ->
+ {A, maps:merge(#{all => false}, Opts)}.
+
+switches() ->
+ [{all, boolean}].
+
+run(_Args, #{node := Node, all := true}) ->
+ rabbit_misc:rpc_call(Node, rabbit_mgmt_storage, reset_all, []);
+run(_Args, #{node := Node, all := false}) ->
+ rabbit_misc:rpc_call(Node, rabbit_mgmt_storage, reset, []).
+
+output(Output, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Output).
+
+banner(_, #{all := true}) ->
+ <<"Resetting statistics database in all nodes">>;
+banner(_, #{node := Node}) ->
+ erlang:iolist_to_binary([<<"Resetting statistics database on node ">>,
+ atom_to_binary(Node, utf8)]).
+
+description() ->
+ <<"Resets statistics database. This will remove all metrics data!">>.
diff --git a/deps/rabbitmq_management_agent/src/exometer_slide.erl b/deps/rabbitmq_management_agent/src/exometer_slide.erl
new file mode 100644
index 0000000000..2c4e4c6d35
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/exometer_slide.erl
@@ -0,0 +1,551 @@
+%% This file is a copy of exometer_slide.erl from https://github.com/Feuerlabs/exometer_core,
+%% with the following modifications:
+%%
+%% 1) The elements are tuples of numbers
+%%
+%% 2) Only one element for each expected interval point is added, intermediate values
+%% are discarded. Thus, if we have a window of 60s and interval of 5s, at max 12 elements
+%% are stored.
+%%
+%% 3) Additions can be provided as increments to the last value stored
+%%
+%% 4) sum/1 implements the sum of several slides, generating a new timestamp sequence based
+%% on the given intervals. Elements on each window are added to the closest interval point.
+%%
+%% Original commit: https://github.com/Feuerlabs/exometer_core/commit/2759edc804211b5245867b32c9a20c8fe1d93441
+%%
+%% -------------------------------------------------------------------
+%%
+%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% -------------------------------------------------------------------
+%%
+%% @author Tony Rogvall <tony@rogvall.se>
+%% @author Ulf Wiger <ulf@feuerlabs.com>
+%% @author Magnus Feuer <magnus@feuerlabs.com>
+%%
+%% @doc Efficient sliding-window buffer
+%%
+%% Initial implementation: 29 Sep 2009 by Tony Rogvall
+%%
+%% This module implements an efficient sliding window, maintaining
+%% two lists - a primary and a secondary. Values are paired with a
+%% timestamp (millisecond resolution, see `timestamp/0')
+%% and prepended to the primary list. When the time span between the oldest
+%% and the newest entry in the primary list exceeds the given window size,
+%% the primary list is shifted into the secondary list position, and the
+%% new entry is added to a new (empty) primary list.
+%%
+%% The window can be converted to a list using `to_list/1'.
+%% @end
+%%
+%%
+%% All modifications are (C) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%% The Initial Developer of the Original Code is Basho Technologies, Inc.
+
+-module(exometer_slide).
+
+-export([new/2, new/3,
+ reset/1,
+ add_element/3,
+ to_list/2,
+ to_list/3,
+ foldl/5,
+ map/2,
+ to_normalized_list/5]).
+
+-export([timestamp/0,
+ last_two/1,
+ last/1]).
+
+-export([sum/1,
+ sum/2,
+ sum/5,
+ optimize/1]).
+
+%% For testing
+-export([buffer/1]).
+
+-compile(inline).
+-compile(inline_list_funcs).
+
+
+-type value() :: tuple().
+-type internal_value() :: tuple() | drop.
+-type timestamp() :: non_neg_integer().
+
+-type fold_acc() :: any().
+-type fold_fun() :: fun(({timestamp(), internal_value()}, fold_acc()) -> fold_acc()).
+
+%% Fixed size event buffer
+-record(slide, {size = 0 :: integer(), % ms window
+ n = 0 :: integer(), % number of elements in buf1
+ max_n :: infinity | integer(), % max no of elements
+ incremental = false :: boolean(),
+ interval :: integer(),
+ last = 0 :: integer(), % millisecond timestamp
+ first = undefined :: undefined | integer(), % millisecond timestamp
+ buf1 = [] :: [internal_value()],
+ buf2 = [] :: [internal_value()],
+ total :: undefined | value()}).
+
+-opaque slide() :: #slide{}.
+
+-export_type([slide/0, timestamp/0]).
+
+-spec timestamp() -> timestamp().
+%% @doc Generate a millisecond-resolution timestamp.
+%%
+%% This timestamp format is used e.g. by the `exometer_slide' and
+%% `exometer_histogram' implementations.
+%% @end
+timestamp() ->
+ os:system_time(milli_seconds).
+
+-spec new(_Size::integer(), _Options::list()) -> slide().
+%% @doc Create a new sliding-window buffer.
+%%
+%% `Size' determines the size in milliseconds of the sliding window.
+%% The implementation prepends values into a primary list until the oldest
+%% element in the list is `Size' ms older than the current value. It then
+%% swaps the primary list into a secondary list, and starts prepending to
+%% a new primary list. This means that more data than fits inside the window
+%% will be kept - upwards of twice as much. On the other hand, updating the
+%% buffer is very cheap.
+%% @end
+new(Size, Opts) -> new(timestamp(), Size, Opts).
+
+-spec new(Timestamp :: timestamp(), Size::integer(), Options::list()) -> slide().
+new(TS, Size, Opts) ->
+ #slide{size = Size,
+ max_n = proplists:get_value(max_n, Opts, infinity),
+ interval = proplists:get_value(interval, Opts, infinity),
+ last = TS,
+ first = undefined,
+ incremental = proplists:get_value(incremental, Opts, false),
+ buf1 = [],
+ buf2 = []}.
+
+-spec reset(slide()) -> slide().
+
+%% @doc Empty the buffer
+%%
+reset(Slide) ->
+ Slide#slide{n = 0, buf1 = [], buf2 = [], last = 0, first = undefined}.
+
+%% @doc Add an element to the buffer, tagged with the given timestamp.
+%%
+%% Apart from the specified timestamp, this function works just like
+%% {@link add_element/2}.
+%% @end
+-spec add_element(timestamp(), value(), slide()) -> slide().
+add_element(_TS, _Evt, Slide) when Slide#slide.size == 0 ->
+ Slide;
+add_element(TS, Evt, #slide{last = Last, interval = Interval, total = Total0,
+ incremental = true} = Slide)
+ when (TS - Last) < Interval ->
+ Total = add_to_total(Evt, Total0),
+ Slide#slide{total = Total};
+add_element(TS, Evt, #slide{last = Last, interval = Interval} = Slide)
+ when (TS - Last) < Interval ->
+ Slide#slide{total = Evt};
+add_element(TS, Evt, #slide{last = Last, size = Sz, incremental = true,
+ n = N, max_n = MaxN, total = Total0,
+ buf1 = Buf1} = Slide) ->
+ N1 = N+1,
+ Total = add_to_total(Evt, Total0),
+ %% Total could be the same as the last sample, by adding and substracting
+ %% the same amout to the totals. That is not strictly a drop, but should
+ %% generate new samples.
+ %% I.e. 0, 0, -14, 14 (total = 0, samples = 14, -14, 0, drop)
+ case {is_zeros(Evt), Buf1} of
+ {_, []} ->
+ Slide#slide{n = N1, first = TS, buf1 = [{TS, Total} | Buf1],
+ last = TS, total = Total};
+ _ when TS - Last > Sz; N1 > MaxN ->
+ %% swap
+ Slide#slide{last = TS, n = 1, buf1 = [{TS, Total}],
+ buf2 = Buf1, total = Total};
+ {true, [{_, Total}, {_, drop} = Drop | Tail]} ->
+ %% Memory optimisation
+ Slide#slide{buf1 = [{TS, Total}, Drop | Tail],
+ n = N1, last = TS};
+ {true, [{DropTS, Total} | Tail]} ->
+ %% Memory optimisation
+ Slide#slide{buf1 = [{TS, Total}, {DropTS, drop} | Tail],
+ n = N1, last = TS};
+ _ ->
+ Slide#slide{n = N1, buf1 = [{TS, Total} | Buf1],
+ last = TS, total = Total}
+ end;
+add_element(TS, Evt, #slide{last = Last, size = Sz, n = N, max_n = MaxN,
+ buf1 = Buf1} = Slide)
+ when TS - Last > Sz; N + 1 > MaxN ->
+ Slide#slide{last = TS, n = 1, buf1 = [{TS, Evt}],
+ buf2 = Buf1, total = Evt};
+add_element(TS, Evt, #slide{buf1 = [{_, Evt}, {_, drop} = Drop | Tail],
+ n = N} = Slide) ->
+ %% Memory optimisation
+ Slide#slide{buf1 = [{TS, Evt}, Drop | Tail], n = N + 1, last = TS};
+add_element(TS, Evt, #slide{buf1 = [{DropTS, Evt} | Tail], n = N} = Slide) ->
+ %% Memory optimisation
+ Slide#slide{buf1 = [{TS, Evt}, {DropTS, drop} | Tail],
+ n = N + 1, last = TS};
+add_element(TS, Evt, #slide{n = N, buf1 = Buf1} = Slide) ->
+ N1 = N+1,
+ case Buf1 of
+ [] ->
+ Slide#slide{n = N1, buf1 = [{TS, Evt} | Buf1],
+ last = TS, first = TS, total = Evt};
+ _ ->
+ Slide#slide{n = N1, buf1 = [{TS, Evt} | Buf1],
+ last = TS, total = Evt}
+ end.
+
+add_to_total(Evt, undefined) ->
+ Evt;
+add_to_total({A0}, {B0}) ->
+ {B0 + A0};
+add_to_total({A0, A1}, {B0, B1}) ->
+ {B0 + A0, B1 + A1};
+add_to_total({A0, A1, A2}, {B0, B1, B2}) ->
+ {B0 + A0, B1 + A1, B2 + A2};
+add_to_total({A0, A1, A2, A3}, {B0, B1, B2, B3}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3};
+add_to_total({A0, A1, A2, A3, A4}, {B0, B1, B2, B3, B4}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4};
+add_to_total({A0, A1, A2, A3, A4, A5}, {B0, B1, B2, B3, B4, B5}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5};
+add_to_total({A0, A1, A2, A3, A4, A5, A6}, {B0, B1, B2, B3, B4, B5, B6}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6};
+add_to_total({A0, A1, A2, A3, A4, A5, A6, A7}, {B0, B1, B2, B3, B4, B5, B6, B7}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6, B7 + A7};
+add_to_total({A0, A1, A2, A3, A4, A5, A6, A7, A8}, {B0, B1, B2, B3, B4, B5, B6, B7, B8}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6, B7 + A7, B8 + A8};
+add_to_total({A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14,
+ A15, A16, A17, A18, A19},
+ {B0, B1, B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14,
+ B15, B16, B17, B18, B19}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6, B7 + A7, B8 + A8,
+ B9 + A9, B10 + A10, B11 + A11, B12 + A12, B13 + A13, B14 + A14, B15 + A15, B16 + A16,
+ B17 + A17, B18 + A18, B19 + A19}.
+
+is_zeros({0}) ->
+ true;
+is_zeros({0, 0}) ->
+ true;
+is_zeros({0, 0, 0}) ->
+ true;
+is_zeros({0, 0, 0, 0}) ->
+ true;
+is_zeros({0, 0, 0, 0, 0}) ->
+ true;
+is_zeros({0, 0, 0, 0, 0, 0}) ->
+ true;
+is_zeros({0, 0, 0, 0, 0, 0, 0}) ->
+ true;
+is_zeros({0, 0, 0, 0, 0, 0, 0, 0, 0}) ->
+ true;
+is_zeros({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) ->
+ true;
+is_zeros(_) ->
+ false.
+
+-spec optimize(slide()) -> slide().
+optimize(#slide{buf2 = []} = Slide) ->
+ Slide;
+optimize(#slide{buf1 = Buf1, buf2 = Buf2, max_n = MaxN, n = N} = Slide)
+ when is_integer(MaxN) andalso length(Buf1) < MaxN ->
+ Slide#slide{buf1 = Buf1,
+ buf2 = lists:sublist(Buf2, n_diff(MaxN, N) + 1)};
+optimize(Slide) -> Slide.
+
+snd(T) when is_tuple(T) ->
+ element(2, T).
+
+
+-spec to_list(timestamp(), slide()) -> [{timestamp(), value()}].
+%% @doc Convert the sliding window into a list of timestamped values.
+%% @end
+to_list(_Now, #slide{size = Sz}) when Sz == 0 ->
+ [];
+to_list(Now, #slide{size = Sz} = Slide) ->
+ snd(to_list_from(Now, Now - Sz, Slide)).
+
+to_list(Now, Start, Slide) ->
+ snd(to_list_from(Now, Start, Slide)).
+
+to_list_from(Now, Start0, #slide{max_n = MaxN, buf2 = Buf2, first = FirstTS,
+ interval = Interval} = Slide) ->
+
+ {NewN, Buf1} = maybe_add_last_sample(Now, Slide),
+ Start = first_max(FirstTS, Start0),
+ {Prev0, Buf1_1} = take_since(Buf1, Now, Start, first_max(MaxN, NewN), [], Interval),
+ case take_since(Buf2, Now, Start, first_max(MaxN, NewN), Buf1_1, Interval) of
+ {undefined, Buf1_1} ->
+ {Prev0, Buf1_1};
+ {_Prev, Buf1_1} = Res ->
+ case Prev0 of
+ undefined ->
+ Res;
+ _ ->
+ %% If take_since returns the same buffer, that means we don't
+ %% need Buf2 at all. We might be returning a too old sample
+ %% in previous, so we must use the one from Buf1
+ {Prev0, Buf1_1}
+ end;
+ Res ->
+ Res
+ end.
+
+first_max(F, X) when is_integer(F) -> max(F, X);
+first_max(_, X) -> X.
+
+-spec last_two(slide()) -> [{timestamp(), value()}].
+%% @doc Returns the newest 2 elements on the sample
+last_two(#slide{buf1 = [{TS, Evt} = H1, {_, drop} | _], interval = Interval}) ->
+ [H1, {TS - Interval, Evt}];
+last_two(#slide{buf1 = [H1, H2_0 | _], interval = Interval}) ->
+ H2 = adjust_timestamp(H1, H2_0, Interval),
+ [H1, H2];
+last_two(#slide{buf1 = [H1], buf2 = [H2_0 | _],
+ interval = Interval}) ->
+ H2 = adjust_timestamp(H1, H2_0, Interval),
+ [H1, H2];
+last_two(#slide{buf1 = [H1], buf2 = []}) ->
+ [H1];
+last_two(_) ->
+ [].
+
+adjust_timestamp({TS1, _}, {TS2, V2}, Interval) ->
+ case TS1 - TS2 > Interval of
+ true -> {TS1 - Interval, V2};
+ false -> {TS2, V2}
+ end.
+
+-spec last(slide()) -> value() | undefined.
+last(#slide{total = T}) when T =/= undefined ->
+ T;
+last(#slide{buf1 = [{_TS, T} | _]}) ->
+ T;
+last(#slide{buf2 = [{_TS, T} | _]}) ->
+ T;
+last(_) ->
+ undefined.
+
+-spec foldl(timestamp(), timestamp(), fold_fun(), fold_acc(), slide()) -> fold_acc().
+%% @doc Fold over the sliding window, starting from `Timestamp'.
+%% Now provides a reference point to evaluate whether to include
+%% partial, unrealised sample values in the sequence. Unrealised values will be
+%% appended to the sequence when Now >= LastTS + Interval
+%%
+%% The fun should as `fun({Timestamp, Value}, Acc) -> NewAcc'.
+%% The values are processed in order from oldest to newest.
+%% @end
+foldl(_Now, _Timestamp, _Fun, _Acc, #slide{size = Sz}) when Sz == 0 ->
+ [];
+foldl(Now, Start0, Fun, Acc, #slide{max_n = _MaxN, buf2 = _Buf2,
+ interval = _Interval} = Slide) ->
+ lists:foldl(Fun, Acc, element(2, to_list_from(Now, Start0, Slide)) ++ [last]).
+
+map(Fun, #slide{buf1 = Buf1, buf2 = Buf2, total = Total} = Slide) ->
+ BufFun = fun({Timestamp, Value}) ->
+ {Timestamp, Fun(Value)}
+ end,
+ MappedBuf1 = lists:map(BufFun, Buf1),
+ MappedBuf2 = lists:map(BufFun, Buf2),
+ MappedTotal = Fun(Total),
+ Slide#slide{buf1 = MappedBuf1, buf2 = MappedBuf2, total = MappedTotal}.
+
+maybe_add_last_sample(_Now, #slide{total = T, n = N,
+ buf1 = [{_, T} | _] = Buf1}) ->
+ {N, Buf1};
+maybe_add_last_sample(Now, #slide{total = T,
+ n = N,
+ last = Last,
+ interval = I,
+ buf1 = Buf1})
+ when T =/= undefined andalso Now >= Last + I ->
+ {N + 1, [{Last + I, T} | Buf1]};
+maybe_add_last_sample(_Now, #slide{buf1 = Buf1, n = N}) ->
+ {N, Buf1}.
+
+
+create_normalized_lookup(Start, Interval, RoundFun, Samples) ->
+ lists:foldl(fun({TS, Value}, Acc) when TS - Start >= 0 ->
+ NewTS = map_timestamp(TS, Start, Interval, RoundFun),
+ maps:update_with(NewTS,
+ fun({T, V}) when T > TS ->
+ {T, V};
+ (_) ->
+ {TS, Value}
+ end, {TS, Value}, Acc);
+ (_, Acc) ->
+ Acc
+ end, #{}, Samples).
+
+-spec to_normalized_list(timestamp(), timestamp(), integer(), slide(),
+ no_pad | tuple()) -> [tuple()].
+to_normalized_list(Now, Start, Interval, Slide, Empty) ->
+ to_normalized_list(Now, Start, Interval, Slide, Empty, fun ceil/1).
+
+to_normalized_list(Now, Start, Interval, #slide{first = FirstTS0,
+ total = Total} = Slide,
+ Empty, RoundFun) ->
+
+ RoundTSFun = fun (TS) -> map_timestamp(TS, Start, Interval, RoundFun) end,
+
+ % add interval as we don't want to miss a sample due to rounding
+ {Prev, Samples} = to_list_from(Now + Interval, Start, Slide),
+ Lookup = create_normalized_lookup(Start, Interval, RoundFun, Samples),
+
+ NowRound = RoundTSFun(Now),
+
+ Pad = case Samples of
+ _ when Empty =:= no_pad ->
+ [];
+ [{TS, _} | _] when Prev =/= undefined, Start =< TS ->
+ [{T, snd(Prev)}
+ || T <- lists:seq(RoundTSFun(TS) - Interval, Start,
+ -Interval)];
+ [{TS, _} | _] when is_number(FirstTS0) andalso Start < FirstTS0 ->
+ % only if we know there is nothing in the past can we
+ % generate a 0 pad
+ [{T, Empty} || T <- lists:seq(RoundTSFun(TS) - Interval, Start,
+ -Interval)];
+ _ when FirstTS0 =:= undefined andalso Total =:= undefined ->
+ [{T, Empty} || T <- lists:seq(NowRound, Start, -Interval)];
+ [] -> % samples have been seen, use the total to pad
+ [{T, Total} || T <- lists:seq(NowRound, Start, -Interval)];
+ _ -> []
+ end,
+
+ {_, Res1} = lists:foldl(
+ fun(T, {Last, Acc}) ->
+ case maps:find(T, Lookup) of
+ {ok, {_, V}} ->
+ {V, [{T, V} | Acc]};
+ error when Last =:= undefined ->
+ {Last, Acc};
+ error -> % this pads the last value into the future
+ {Last, [{T, Last} | Acc]}
+ end
+ end, {undefined, []},
+ lists:seq(Start, NowRound, Interval)),
+ Res1 ++ Pad.
+
+
+%% @doc Sums a list of slides
+%%
+%% Takes the last known timestamp and creates an template version of the
+%% sliding window. Timestamps are then truncated and summed with the value
+%% in the template slide.
+%% @end
+-spec sum([slide()]) -> slide().
+sum(Slides) -> sum(Slides, no_pad).
+
+sum([#slide{size = Size, interval = Interval} | _] = Slides, Pad) ->
+ % take the freshest timestamp as reference point for summing operation
+ Now = lists:max([Last || #slide{last = Last} <- Slides]),
+ Start = Now - Size,
+ sum(Now, Start, Interval, Slides, Pad).
+
+
+sum(Now, Start, Interval, [Slide | _ ] = All, Pad) ->
+ Fun = fun({TS, Value}, Acc) ->
+ maps:update_with(TS, fun(V) -> add_to_total(V, Value) end,
+ Value, Acc)
+ end,
+ {Total, Dict} =
+ lists:foldl(fun(#slide{total = T} = S, {Tot, Acc}) ->
+ Samples = to_normalized_list(Now, Start, Interval, S,
+ Pad, fun ceil/1),
+ Total = add_to_total(T, Tot),
+ Folded = lists:foldl(Fun, Acc, Samples),
+ {Total, Folded}
+ end, {undefined, #{}}, All),
+
+ {First, Buffer} = case lists:sort(maps:to_list(Dict)) of
+ [] ->
+ F = case [TS || #slide{first = TS} <- All,
+ is_integer(TS)] of
+ [] -> undefined;
+ FS -> lists:min(FS)
+ end,
+ {F, []};
+ [{F, _} | _ ] = B ->
+ {F, lists:reverse(B)}
+ end,
+ Slide#slide{buf1 = Buffer, buf2 = [], total = Total, n = length(Buffer),
+ first = First, last = Now}.
+
+
+truncated_seq(_First, _Last, _Incr, 0) ->
+ [];
+truncated_seq(TS, TS, _Incr, MaxN) when MaxN > 0 ->
+ [TS];
+truncated_seq(First, Last, Incr, MaxN) when First =< Last andalso MaxN > 0 ->
+ End = min(Last, First + (MaxN * Incr) - Incr),
+ lists:seq(First, End, Incr);
+truncated_seq(First, Last, Incr, MaxN) ->
+ End = max(Last, First + (MaxN * Incr) - Incr),
+ lists:seq(First, End, Incr).
+
+
+take_since([{DropTS, drop} | T], Now, Start, N, [{TS, Evt} | _] = Acc,
+ Interval) ->
+ case T of
+ [] ->
+ Fill = [{TS0, Evt} || TS0 <- truncated_seq(TS - Interval,
+ max(DropTS, Start),
+ -Interval, N)],
+ {undefined, lists:reverse(Fill) ++ Acc};
+ [{TS0, _} = E | Rest] when TS0 >= Start, N > 0 ->
+ Fill = [{TS1, Evt} || TS1 <- truncated_seq(TS0 + Interval,
+ max(TS0 + Interval, TS - Interval),
+ Interval, N)],
+ take_since(Rest, Now, Start, decr(N), [E | Fill ++ Acc], Interval);
+ [Prev | _] -> % next sample is out of range so needs to be filled from Start
+ Fill = [{TS1, Evt} || TS1 <- truncated_seq(Start, max(Start, TS - Interval),
+ Interval, N)],
+ {Prev, Fill ++ Acc}
+ end;
+take_since([{TS, V} = H | T], Now, Start, N, Acc, Interval) when TS >= Start,
+ N > 0,
+ TS =< Now,
+ is_tuple(V) ->
+ take_since(T, Now, Start, decr(N), [H|Acc], Interval);
+take_since([{TS,_} | T], Now, Start, N, Acc, Interval) when TS >= Start, N > 0 ->
+ take_since(T, Now, Start, decr(N), Acc, Interval);
+take_since([Prev | _], _, _, _, Acc, _) ->
+ {Prev, Acc};
+take_since(_, _, _, _, Acc, _) ->
+ %% Don't reverse; already the wanted order.
+ {undefined, Acc}.
+
+decr(N) when is_integer(N) ->
+ N-1;
+decr(N) -> N.
+
+n_diff(A, B) when is_integer(A) ->
+ A - B.
+
+ceil(X) when X < 0 ->
+ trunc(X);
+ceil(X) ->
+ T = trunc(X),
+ case X - T == 0 of
+ true -> T;
+ false -> T + 1
+ end.
+
+map_timestamp(TS, Start, Interval, Round) ->
+ Factor = Round((TS - Start) / Interval),
+ Start + Interval * Factor.
+
+buffer(#slide{buf1 = Buf1, buf2 = Buf2}) ->
+ Buf1 ++ Buf2.
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl
new file mode 100644
index 0000000000..e889815c2f
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_app.erl
@@ -0,0 +1,17 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_agent_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_mgmt_agent_sup_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl
new file mode 100644
index 0000000000..e8d074e891
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_config.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_mgmt_agent_config).
+
+-export([get_env/1, get_env/2]).
+
+%% some people have reasons to only run with the agent enabled:
+%% make it possible for them to configure key management app
+%% settings such as rates_mode.
+get_env(Key) ->
+ rabbit_misc:get_env(rabbitmq_management, Key,
+ rabbit_misc:get_env(rabbitmq_management_agent, Key,
+ undefined)).
+
+get_env(Key, Default) ->
+ rabbit_misc:get_env(rabbitmq_management, Key,
+ rabbit_misc:get_env(rabbitmq_management_agent, Key,
+ Default)).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl
new file mode 100644
index 0000000000..0c4a5465e9
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_agent_sup).
+
+%% pg2 is deprecated in OTP 23.
+-compile(nowarn_deprecated_function).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+
+-export([init/1]).
+-export([start_link/0]).
+
+init([]) ->
+ MCs = maybe_enable_metrics_collector(),
+ ExternalStats = {rabbit_mgmt_external_stats,
+ {rabbit_mgmt_external_stats, start_link, []},
+ permanent, 5000, worker, [rabbit_mgmt_external_stats]},
+ {ok, {{one_for_one, 100, 10}, [ExternalStats] ++ MCs}}.
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+maybe_enable_metrics_collector() ->
+ case application:get_env(rabbitmq_management_agent, disable_metrics_collector, false) of
+ false ->
+ pg2:create(management_db),
+ ok = pg2:join(management_db, self()),
+ ST = {rabbit_mgmt_storage, {rabbit_mgmt_storage, start_link, []},
+ permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_storage]},
+ MD = {delegate_management_sup, {delegate_sup, start_link, [5, ?DELEGATE_PREFIX]},
+ permanent, ?SUPERVISOR_WAIT, supervisor, [delegate_sup]},
+ MC = [{rabbit_mgmt_metrics_collector:name(Table),
+ {rabbit_mgmt_metrics_collector, start_link, [Table]},
+ permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_metrics_collector]}
+ || {Table, _} <- ?CORE_TABLES],
+ MGC = [{rabbit_mgmt_metrics_gc:name(Event),
+ {rabbit_mgmt_metrics_gc, start_link, [Event]},
+ permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_metrics_gc]}
+ || Event <- ?GC_EVENTS],
+ GC = {rabbit_mgmt_gc, {rabbit_mgmt_gc, start_link, []},
+ permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_gc]},
+ [ST, MD, GC | MC ++ MGC];
+ true ->
+ []
+ end.
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl
new file mode 100644
index 0000000000..17ffa35307
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_agent_sup_sup.erl
@@ -0,0 +1,28 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_agent_sup_sup).
+
+-behaviour(supervisor2).
+
+-export([init/1]).
+-export([start_link/0, start_child/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+start_child() ->
+ supervisor2:start_child(?MODULE, sup()).
+
+sup() ->
+ {rabbit_mgmt_agent_sup, {rabbit_mgmt_agent_sup, start_link, []},
+ temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_mgmt_agent_sup]}.
+
+init([]) ->
+ {ok, {{one_for_one, 0, 1}, [sup()]}}.
+
+start_link() ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, []).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl
new file mode 100644
index 0000000000..d73c8a3819
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data.erl
@@ -0,0 +1,572 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_data).
+
+-include("rabbit_mgmt_records.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+
+-export([empty/2, pick_range/2]).
+
+% delegate api
+-export([overview_data/4,
+ consumer_data/2,
+ all_list_queue_data/3,
+ all_detail_queue_data/3,
+ all_exchange_data/3,
+ all_connection_data/3,
+ all_list_channel_data/3,
+ all_detail_channel_data/3,
+ all_vhost_data/3,
+ all_node_data/3,
+ augmented_created_stats/2,
+ augmented_created_stats/3,
+ augment_channel_pids/2,
+ augment_details/2,
+ lookup_element/2,
+ lookup_element/3
+ ]).
+
+-import(rabbit_misc, [pget/2]).
+
+-type maybe_slide() :: exometer_slide:slide() | not_found.
+-type ranges() :: {maybe_range(), maybe_range(), maybe_range(), maybe_range()}.
+-type maybe_range() :: no_range | #range{}.
+
+%%----------------------------------------------------------------------------
+%% Internal, query-time - node-local operations
+%%----------------------------------------------------------------------------
+
+created_stats(Name, Type) ->
+ case ets:select(Type, [{{'_', '$2', '$3'}, [{'==', Name, '$2'}], ['$3']}]) of
+ [] -> not_found;
+ [Elem] -> Elem
+ end.
+
+created_stats(Type) ->
+ %% TODO better tab2list?
+ ets:select(Type, [{{'_', '_', '$3'}, [], ['$3']}]).
+
+-spec all_detail_queue_data(pid(), [any()], ranges()) -> #{atom() => any()}.
+all_detail_queue_data(_Pid, Ids, Ranges) ->
+ lists:foldl(fun (Id, Acc) ->
+ Data = detail_queue_data(Ranges, Id),
+ maps:put(Id, Data, Acc)
+ end, #{}, Ids).
+
+all_list_queue_data(_Pid, Ids, Ranges) ->
+ lists:foldl(fun (Id, Acc) ->
+ Data = list_queue_data(Ranges, Id),
+ maps:put(Id, Data, Acc)
+ end, #{}, Ids).
+
+all_detail_channel_data(_Pid, Ids, Ranges) ->
+ lists:foldl(fun (Id, Acc) ->
+ Data = detail_channel_data(Ranges, Id),
+ maps:put(Id, Data, Acc)
+ end, #{}, Ids).
+
+all_list_channel_data(_Pid, Ids, Ranges) ->
+ lists:foldl(fun (Id, Acc) ->
+ Data = list_channel_data(Ranges, Id),
+ maps:put(Id, Data, Acc)
+ end, #{}, Ids).
+
+connection_data(Ranges, Id) ->
+ maps:from_list([raw_message_data(connection_stats_coarse_conn_stats,
+ pick_range(coarse_conn_stats, Ranges), Id),
+ {connection_stats, lookup_element(connection_stats, Id)}]).
+
+exchange_data(Ranges, Id) ->
+ maps:from_list(
+ exchange_raw_detail_stats_data(Ranges, Id) ++
+ [raw_message_data(exchange_stats_publish_out,
+ pick_range(fine_stats, Ranges), Id),
+ raw_message_data(exchange_stats_publish_in,
+ pick_range(fine_stats, Ranges), Id)]).
+
+vhost_data(Ranges, Id) ->
+ maps:from_list([raw_message_data(vhost_stats_coarse_conn_stats,
+ pick_range(coarse_conn_stats, Ranges), Id),
+ raw_message_data(vhost_msg_stats,
+ pick_range(queue_msg_rates, Ranges), Id),
+ raw_message_data(vhost_stats_fine_stats,
+ pick_range(fine_stats, Ranges), Id),
+ raw_message_data(vhost_stats_deliver_stats,
+ pick_range(deliver_get, Ranges), Id)]).
+
+node_data(Ranges, Id) ->
+ maps:from_list(
+ [{mgmt_stats, mgmt_queue_length_stats(Id)}] ++
+ [{node_node_metrics, node_node_metrics()}] ++
+ node_raw_detail_stats_data(Ranges, Id) ++
+ [raw_message_data(node_coarse_stats,
+ pick_range(coarse_node_stats, Ranges), Id),
+ raw_message_data(node_persister_stats,
+ pick_range(coarse_node_stats, Ranges), Id),
+ {node_stats, lookup_element(node_stats, Id)}] ++
+ node_connection_churn_rates_data(Ranges, Id)).
+
+overview_data(_Pid, User, Ranges, VHosts) ->
+ Raw = [raw_all_message_data(vhost_msg_stats, pick_range(queue_msg_counts, Ranges), VHosts),
+ raw_all_message_data(vhost_stats_fine_stats, pick_range(fine_stats, Ranges), VHosts),
+ raw_all_message_data(vhost_msg_rates, pick_range(queue_msg_rates, Ranges), VHosts),
+ raw_all_message_data(vhost_stats_deliver_stats, pick_range(deliver_get, Ranges), VHosts),
+ raw_message_data(connection_churn_rates, pick_range(queue_msg_rates, Ranges), node())],
+ maps:from_list(Raw ++
+ [{connections_count, count_created_stats(connection_created_stats, User)},
+ {channels_count, count_created_stats(channel_created_stats, User)},
+ {consumers_count, ets:info(consumer_stats, size)}]).
+
+consumer_data(_Pid, VHost) ->
+ maps:from_list(
+ [{C, augment_msg_stats(augment_consumer(C))}
+ || C <- consumers_by_vhost(VHost)]).
+
+all_connection_data(_Pid, Ids, Ranges) ->
+ maps:from_list([{Id, connection_data(Ranges, Id)} || Id <- Ids]).
+
+all_exchange_data(_Pid, Ids, Ranges) ->
+ maps:from_list([{Id, exchange_data(Ranges, Id)} || Id <- Ids]).
+
+all_vhost_data(_Pid, Ids, Ranges) ->
+ maps:from_list([{Id, vhost_data(Ranges, Id)} || Id <- Ids]).
+
+all_node_data(_Pid, Ids, Ranges) ->
+ maps:from_list([{Id, node_data(Ranges, Id)} || Id <- Ids]).
+
+channel_raw_message_data(Ranges, Id) ->
+ [raw_message_data(channel_stats_fine_stats, pick_range(fine_stats, Ranges), Id),
+ raw_message_data(channel_stats_deliver_stats, pick_range(deliver_get, Ranges), Id),
+ raw_message_data(channel_process_stats, pick_range(process_stats, Ranges), Id)].
+
+queue_raw_message_data(Ranges, Id) ->
+ [raw_message_data(queue_stats_publish, pick_range(fine_stats, Ranges), Id),
+ raw_message_data(queue_stats_deliver_stats, pick_range(deliver_get, Ranges), Id),
+ raw_message_data(queue_process_stats, pick_range(process_stats, Ranges), Id),
+ raw_message_data(queue_msg_stats, pick_range(queue_msg_counts, Ranges), Id)].
+
+queue_raw_deliver_stats_data(Ranges, Id) ->
+ [raw_message_data2(channel_queue_stats_deliver_stats,
+ pick_range(deliver_get, Ranges), Key)
+ || Key <- get_table_keys(channel_queue_stats_deliver_stats, second(Id))] ++
+ [raw_message_data2(queue_exchange_stats_publish,
+ pick_range(fine_stats, Ranges), Key)
+ || Key <- get_table_keys(queue_exchange_stats_publish, first(Id))].
+
+node_raw_detail_stats_data(Ranges, Id) ->
+ [raw_message_data2(node_node_coarse_stats,
+ pick_range(coarse_node_node_stats, Ranges), Key)
+ || Key <- get_table_keys(node_node_coarse_stats, first(Id))].
+
+node_connection_churn_rates_data(Ranges, Id) ->
+ [raw_message_data(connection_churn_rates,
+ pick_range(churn_rates, Ranges), Id)].
+
+exchange_raw_detail_stats_data(Ranges, Id) ->
+ [raw_message_data2(channel_exchange_stats_fine_stats,
+ pick_range(fine_stats, Ranges), Key)
+ || Key <- get_table_keys(channel_exchange_stats_fine_stats, second(Id))] ++
+ [raw_message_data2(queue_exchange_stats_publish,
+ pick_range(fine_stats, Ranges), Key)
+ || Key <- get_table_keys(queue_exchange_stats_publish, second(Id))].
+
+channel_raw_detail_stats_data(Ranges, Id) ->
+ [raw_message_data2(channel_exchange_stats_fine_stats,
+ pick_range(fine_stats, Ranges), Key)
+ || Key <- get_table_keys(channel_exchange_stats_fine_stats, first(Id))] ++
+ [raw_message_data2(channel_queue_stats_deliver_stats,
+ pick_range(fine_stats, Ranges), Key)
+ || Key <- get_table_keys(channel_queue_stats_deliver_stats, first(Id))].
+
+raw_message_data2(Table, no_range, Id) ->
+ SmallSample = lookup_smaller_sample(Table, Id),
+ {{Table, Id}, {SmallSample, not_found}};
+raw_message_data2(Table, Range, Id) ->
+ SmallSample = lookup_smaller_sample(Table, Id),
+ Samples = lookup_samples(Table, Id, Range),
+ {{Table, Id}, {SmallSample, Samples}}.
+
+detail_queue_data(Ranges, Id) ->
+ maps:from_list(queue_raw_message_data(Ranges, Id) ++
+ queue_raw_deliver_stats_data(Ranges, Id) ++
+ [{queue_stats, lookup_element(queue_stats, Id)},
+ {consumer_stats, get_queue_consumer_stats(Id)}]).
+
+list_queue_data(Ranges, Id) ->
+ maps:from_list(queue_raw_message_data(Ranges, Id) ++
+ queue_raw_deliver_stats_data(Ranges, Id) ++
+ [{queue_stats, lookup_element(queue_stats, Id)}]).
+
+detail_channel_data(Ranges, Id) ->
+ maps:from_list(channel_raw_message_data(Ranges, Id) ++
+ channel_raw_detail_stats_data(Ranges, Id) ++
+ [{channel_stats, lookup_element(channel_stats, Id)},
+ {consumer_stats, get_consumer_stats(Id)}]).
+
+list_channel_data(Ranges, Id) ->
+ maps:from_list(channel_raw_message_data(Ranges, Id) ++
+ channel_raw_detail_stats_data(Ranges, Id) ++
+ [{channel_stats, lookup_element(channel_stats, Id)}]).
+
+-spec raw_message_data(atom(), maybe_range(), any()) ->
+ {atom(), {maybe_slide(), maybe_slide()}}.
+raw_message_data(Table, no_range, Id) ->
+ SmallSample = lookup_smaller_sample(Table, Id),
+ {Table, {SmallSample, not_found}};
+raw_message_data(Table, Range, Id) ->
+ SmallSample = lookup_smaller_sample(Table, Id),
+ Samples = lookup_samples(Table, Id, Range),
+ {Table, {SmallSample, Samples}}.
+
+raw_all_message_data(Table, Range, VHosts) ->
+ SmallSample = lookup_all(Table, VHosts, select_smaller_sample(Table)),
+ RangeSample = case Range of
+ no_range -> not_found;
+ _ ->
+ lookup_all(Table, VHosts, select_range_sample(Table,
+ Range))
+ end,
+ {Table, {SmallSample, RangeSample}}.
+
+get_queue_consumer_stats(Id) ->
+ Consumers = ets:select(consumer_stats, match_queue_consumer_spec(Id)),
+ [augment_consumer(C) || C <- Consumers].
+
+get_consumer_stats(Id) ->
+ Consumers = ets:select(consumer_stats, match_consumer_spec(Id)),
+ [augment_consumer(C) || C <- Consumers].
+
+count_created_stats(Type, all) ->
+ ets:info(Type, size);
+count_created_stats(Type, User) ->
+ length(filter_user(created_stats(Type), User)).
+
+augment_consumer({{Q, Ch, CTag}, Props}) ->
+ [{queue, format_resource(Q)},
+ {channel_details, augment_channel_pid(Ch)},
+ {channel_pid, Ch},
+ {consumer_tag, CTag} | Props].
+
+consumers_by_vhost(VHost) ->
+ ets:select(consumer_stats,
+ [{{{#resource{virtual_host = '$1', _ = '_'}, '_', '_'}, '_'},
+ [{'orelse', {'==', 'all', VHost}, {'==', VHost, '$1'}}],
+ ['$_']}]).
+
+augment_msg_stats(Props) ->
+ augment_details(Props, []) ++ Props.
+
+augment_details([{_, none} | T], Acc) ->
+ augment_details(T, Acc);
+augment_details([{_, unknown} | T], Acc) ->
+ augment_details(T, Acc);
+augment_details([{connection, Value} | T], Acc) ->
+ augment_details(T, [{connection_details, augment_connection_pid(Value)} | Acc]);
+augment_details([{channel, Value} | T], Acc) ->
+ augment_details(T, [{channel_details, augment_channel_pid(Value)} | Acc]);
+augment_details([{owner_pid, Value} | T], Acc) ->
+ augment_details(T, [{owner_pid_details, augment_connection_pid(Value)} | Acc]);
+augment_details([_ | T], Acc) ->
+ augment_details(T, Acc);
+augment_details([], Acc) ->
+ Acc.
+
+augment_channel_pids(_Pid, ChPids) ->
+ lists:map(fun (ChPid) -> augment_channel_pid(ChPid) end, ChPids).
+
+augment_channel_pid(Pid) ->
+ Ch = lookup_element(channel_created_stats, Pid, 3),
+ Conn = lookup_element(connection_created_stats, pget(connection, Ch), 3),
+ case Conn of
+ [] -> %% If the connection has just been opened, we might not yet have the data
+ [];
+ _ ->
+ [{name, pget(name, Ch)},
+ {pid, pget(pid, Ch)},
+ {number, pget(number, Ch)},
+ {user, pget(user, Ch)},
+ {connection_name, pget(name, Conn)},
+ {peer_port, pget(peer_port, Conn)},
+ {peer_host, pget(peer_host, Conn)}]
+ end.
+
+augment_connection_pid(Pid) ->
+ Conn = lookup_element(connection_created_stats, Pid, 3),
+ case Conn of
+ [] -> %% If the connection has just been opened, we might not yet have the data
+ [];
+ _ ->
+ [{name, pget(name, Conn)},
+ {peer_port, pget(peer_port, Conn)},
+ {peer_host, pget(peer_host, Conn)}]
+ end.
+
+augmented_created_stats(_Pid, Key, Type) ->
+ case created_stats(Key, Type) of
+ not_found -> not_found;
+ S -> augment_msg_stats(S)
+ end.
+
+augmented_created_stats(_Pid, Type) ->
+ [ augment_msg_stats(S) || S <- created_stats(Type) ].
+
+match_consumer_spec(Id) ->
+ [{{{'_', '$1', '_'}, '_'}, [{'==', Id, '$1'}], ['$_']}].
+
+match_queue_consumer_spec(Id) ->
+ [{{{'$1', '_', '_'}, '_'}, [{'==', {Id}, '$1'}], ['$_']}].
+
+lookup_element(Table, Key) -> lookup_element(Table, Key, 2).
+
+lookup_element(Table, Key, Pos) ->
+ try ets:lookup_element(Table, Key, Pos)
+ catch error:badarg -> []
+ end.
+
+-spec lookup_smaller_sample(atom(), any()) -> maybe_slide().
+lookup_smaller_sample(Table, Id) ->
+ case ets:lookup(Table, {Id, select_smaller_sample(Table)}) of
+ [] ->
+ not_found;
+ [{_, Slide}] ->
+ Slide1 = exometer_slide:optimize(Slide),
+ maybe_convert_for_compatibility(Table, Slide1)
+ end.
+
+-spec lookup_samples(atom(), any(), #range{}) -> maybe_slide().
+lookup_samples(Table, Id, Range) ->
+ case ets:lookup(Table, {Id, select_range_sample(Table, Range)}) of
+ [] ->
+ not_found;
+ [{_, Slide}] ->
+ Slide1 = exometer_slide:optimize(Slide),
+ maybe_convert_for_compatibility(Table, Slide1)
+ end.
+
+lookup_all(Table, Ids, SecondKey) ->
+ Slides = lists:foldl(fun(Id, Acc) ->
+ case ets:lookup(Table, {Id, SecondKey}) of
+ [] ->
+ Acc;
+ [{_, Slide}] ->
+ [Slide | Acc]
+ end
+ end, [], Ids),
+ case Slides of
+ [] ->
+ not_found;
+ _ ->
+ Slide = exometer_slide:sum(Slides, empty(Table, 0)),
+ maybe_convert_for_compatibility(Table, Slide)
+ end.
+
+maybe_convert_for_compatibility(Table, Slide)
+ when Table =:= channel_stats_fine_stats orelse
+ Table =:= channel_exchange_stats_fine_stats orelse
+ Table =:= vhost_stats_fine_stats ->
+ ConversionNeeded = rabbit_feature_flags:is_disabled(
+ drop_unroutable_metric),
+ case ConversionNeeded of
+ false ->
+ Slide;
+ true ->
+ %% drop_drop because the metric is named "drop_unroutable"
+ rabbit_mgmt_data_compat:drop_drop_unroutable_metric(Slide)
+ end;
+maybe_convert_for_compatibility(Table, Slide)
+ when Table =:= channel_queue_stats_deliver_stats orelse
+ Table =:= channel_stats_deliver_stats orelse
+ Table =:= queue_stats_deliver_stats orelse
+ Table =:= vhost_stats_deliver_stats ->
+ ConversionNeeded = rabbit_feature_flags:is_disabled(
+ empty_basic_get_metric),
+ case ConversionNeeded of
+ false ->
+ Slide;
+ true ->
+ rabbit_mgmt_data_compat:drop_get_empty_queue_metric(Slide)
+ end;
+maybe_convert_for_compatibility(_, Slide) ->
+ Slide.
+
+get_table_keys(Table, Id0) ->
+ ets:select(Table, match_spec_keys(Id0)).
+
+match_spec_keys(Id) ->
+ MatchCondition = to_match_condition(Id),
+ MatchHead = {{{'$1', '$2'}, '_'}, '_'},
+ [{MatchHead, [MatchCondition], [{{'$1', '$2'}}]}].
+
+to_match_condition({'_', Id1}) when is_tuple(Id1) ->
+ {'==', {Id1}, '$2'};
+to_match_condition({'_', Id1}) ->
+ {'==', Id1, '$2'};
+to_match_condition({Id0, '_'}) when is_tuple(Id0) ->
+ {'==', {Id0}, '$1'};
+to_match_condition({Id0, '_'}) ->
+ {'==', Id0, '$1'}.
+
+mgmt_queue_length_stats(Id) when Id =:= node() ->
+ GCsQueueLengths = lists:map(fun (T) ->
+ case whereis(rabbit_mgmt_metrics_gc:name(T)) of
+ P when is_pid(P) ->
+ {message_queue_len, Len} =
+ erlang:process_info(P, message_queue_len),
+ {T, Len};
+ _ -> {T, 0}
+ end
+ end,
+ ?GC_EVENTS),
+ [{metrics_gc_queue_length, GCsQueueLengths}];
+mgmt_queue_length_stats(_Id) ->
+ % if it isn't for the current node just return an empty list
+ [].
+
+node_node_metrics() ->
+ maps:from_list(ets:tab2list(node_node_metrics)).
+
+select_range_sample(Table, #range{first = First, last = Last}) ->
+ Range = Last - First,
+ Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
+ Policy = retention_policy(Table),
+ [T | _] = TablePolicies = lists:sort(proplists:get_value(Policy, Policies)),
+ {_, Sample} = select_smallest_above(T, TablePolicies, Range),
+ Sample.
+
+select_smaller_sample(Table) ->
+ Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
+ Policy = retention_policy(Table),
+ TablePolicies = proplists:get_value(Policy, Policies),
+ [V | _] = lists:sort([I || {_, I} <- TablePolicies]),
+ V.
+
+select_smallest_above(V, [], _) ->
+ V;
+select_smallest_above(_, [{H, _} = S | _T], Interval) when (H * 1000) > Interval ->
+ S;
+select_smallest_above(_, [H | T], Interval) ->
+ select_smallest_above(H, T, Interval).
+
+pick_range(queue_msg_counts, {RangeL, _RangeM, _RangeD, _RangeN}) ->
+ RangeL;
+pick_range(K, {_RangeL, RangeM, _RangeD, _RangeN}) when K == fine_stats;
+ K == deliver_get;
+ K == queue_msg_rates ->
+ RangeM;
+pick_range(K, {_RangeL, _RangeM, RangeD, _RangeN}) when K == coarse_conn_stats;
+ K == process_stats ->
+ RangeD;
+pick_range(K, {_RangeL, _RangeM, _RangeD, RangeN})
+ when K == coarse_node_stats;
+ K == coarse_node_node_stats;
+ K == churn_rates ->
+ RangeN.
+
+first(Id) ->
+ {Id, '_'}.
+
+second(Id) ->
+ {'_', Id}.
+
+empty(Type, V) when Type =:= connection_stats_coarse_conn_stats;
+ Type =:= queue_msg_stats;
+ Type =:= vhost_msg_stats ->
+ {V, V, V};
+empty(Type, V) when Type =:= channel_stats_fine_stats;
+ Type =:= channel_exchange_stats_fine_stats;
+ Type =:= vhost_stats_fine_stats ->
+ {V, V, V, V};
+empty(Type, V) when Type =:= channel_queue_stats_deliver_stats;
+ Type =:= queue_stats_deliver_stats;
+ Type =:= vhost_stats_deliver_stats;
+ Type =:= channel_stats_deliver_stats ->
+ {V, V, V, V, V, V, V, V};
+empty(Type, V) when Type =:= channel_process_stats;
+ Type =:= queue_process_stats;
+ Type =:= queue_stats_publish;
+ Type =:= queue_exchange_stats_publish;
+ Type =:= exchange_stats_publish_out;
+ Type =:= exchange_stats_publish_in ->
+ {V};
+empty(node_coarse_stats, V) ->
+ {V, V, V, V, V, V, V, V};
+empty(node_persister_stats, V) ->
+ {V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V, V};
+empty(Type, V) when Type =:= node_node_coarse_stats;
+ Type =:= vhost_stats_coarse_conn_stats;
+ Type =:= queue_msg_rates;
+ Type =:= vhost_msg_rates ->
+ {V, V};
+empty(connection_churn_rates, V) ->
+ {V, V, V, V, V, V, V}.
+
+retention_policy(connection_stats_coarse_conn_stats) ->
+ basic;
+retention_policy(channel_stats_fine_stats) ->
+ basic;
+retention_policy(channel_queue_stats_deliver_stats) ->
+ detailed;
+retention_policy(channel_exchange_stats_fine_stats) ->
+ detailed;
+retention_policy(channel_process_stats) ->
+ basic;
+retention_policy(vhost_stats_fine_stats) ->
+ global;
+retention_policy(vhost_stats_deliver_stats) ->
+ global;
+retention_policy(vhost_stats_coarse_conn_stats) ->
+ global;
+retention_policy(vhost_msg_rates) ->
+ global;
+retention_policy(channel_stats_deliver_stats) ->
+ basic;
+retention_policy(queue_stats_deliver_stats) ->
+ basic;
+retention_policy(queue_stats_publish) ->
+ basic;
+retention_policy(queue_exchange_stats_publish) ->
+ basic;
+retention_policy(exchange_stats_publish_out) ->
+ basic;
+retention_policy(exchange_stats_publish_in) ->
+ basic;
+retention_policy(queue_process_stats) ->
+ basic;
+retention_policy(queue_msg_stats) ->
+ basic;
+retention_policy(queue_msg_rates) ->
+ basic;
+retention_policy(vhost_msg_stats) ->
+ global;
+retention_policy(node_coarse_stats) ->
+ global;
+retention_policy(node_persister_stats) ->
+ global;
+retention_policy(node_node_coarse_stats) ->
+ global;
+retention_policy(connection_churn_rates) ->
+ global.
+
+format_resource(unknown) -> unknown;
+format_resource(Res) -> format_resource(name, Res).
+
+format_resource(_, unknown) ->
+ unknown;
+format_resource(NameAs, #resource{name = Name, virtual_host = VHost}) ->
+ [{NameAs, Name}, {vhost, VHost}].
+
+filter_user(List, #user{username = Username, tags = Tags}) ->
+ case is_monitor(Tags) of
+ true -> List;
+ false -> [I || I <- List, pget(user, I) == Username]
+ end.
+
+is_monitor(T) -> intersects(T, [administrator, monitoring]).
+intersects(A, B) -> lists:any(fun(I) -> lists:member(I, B) end, A).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl
new file mode 100644
index 0000000000..9fd127aff5
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_data_compat.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_data_compat).
+
+-export([fill_get_empty_queue_metric/1,
+ drop_get_empty_queue_metric/1,
+ fill_consumer_active_fields/1,
+ fill_drop_unroutable_metric/1,
+ drop_drop_unroutable_metric/1]).
+
+fill_get_empty_queue_metric(Slide) ->
+ exometer_slide:map(
+ fun
+ (Value) when is_tuple(Value) andalso size(Value) =:= 8 ->
+ Value;
+ (Value) when is_tuple(Value) andalso size(Value) =:= 7 ->
+ %% Inject a 0 for the new metric
+ list_to_tuple(
+ tuple_to_list(Value) ++ [0]);
+ (Value) ->
+ Value
+ end, Slide).
+
+drop_get_empty_queue_metric(Slide) ->
+ exometer_slide:map(
+ fun
+ (Value) when is_tuple(Value) andalso size(Value) =:= 8 ->
+ %% We want to remove the last element, which is
+ %% the count of basic.get on empty queues.
+ list_to_tuple(
+ lists:sublist(
+ tuple_to_list(Value), size(Value) - 1));
+ (Value) when is_tuple(Value) andalso size(Value) =:= 7 ->
+ Value;
+ (Value) ->
+ Value
+ end, Slide).
+
+fill_drop_unroutable_metric(Slide) ->
+ exometer_slide:map(
+ fun
+ (Value) when is_tuple(Value) andalso size(Value) =:= 4 ->
+ Value;
+ (Value) when is_tuple(Value) andalso size(Value) =:= 3 ->
+ %% Inject a 0
+ list_to_tuple(
+ tuple_to_list(Value) ++ [0]);
+ (Value) ->
+ Value
+ end, Slide).
+
+drop_drop_unroutable_metric(Slide) ->
+ exometer_slide:map(
+ fun
+ (Value) when is_tuple(Value) andalso size(Value) =:= 4 ->
+ %% Remove the last element.
+ list_to_tuple(
+ lists:sublist(
+ tuple_to_list(Value), size(Value) - 1));
+ (Value) when is_tuple(Value) andalso size(Value) =:= 3 ->
+ Value;
+ (Value) ->
+ Value
+ end, Slide).
+
+fill_consumer_active_fields(ConsumersStats) ->
+ [case proplists:get_value(active, ConsumerStats) of
+ undefined ->
+ [{active, true},
+ {activity_status, up}
+ | ConsumerStats];
+ _ ->
+ ConsumerStats
+ end
+ || ConsumerStats <- ConsumersStats].
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl
new file mode 100644
index 0000000000..c1e43223d7
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl
@@ -0,0 +1,99 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_db_handler).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%% Make sure our database is hooked in *before* listening on the network or
+%% recovering queues (i.e. so there can't be any events fired before it starts).
+-rabbit_boot_step({rabbit_mgmt_db_handler,
+ [{description, "management agent"},
+ {mfa, {?MODULE, add_handler, []}},
+ {cleanup, {gen_event, delete_handler,
+ [rabbit_event, ?MODULE, []]}},
+ {requires, rabbit_event},
+ {enables, recovery}]}).
+
+-behaviour(gen_event).
+
+-export([add_handler/0, gc/0, rates_mode/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+add_handler() ->
+ ok = ensure_statistics_enabled(),
+ gen_event:add_handler(rabbit_event, ?MODULE, []).
+
+gc() ->
+ erlang:garbage_collect(whereis(rabbit_event)).
+
+rates_mode() ->
+ case rabbit_mgmt_agent_config:get_env(rates_mode) of
+ undefined -> basic;
+ Mode -> Mode
+ end.
+
+handle_force_fine_statistics() ->
+ case rabbit_mgmt_agent_config:get_env(force_fine_statistics) of
+ undefined ->
+ ok;
+ X ->
+ rabbit_log:warning(
+ "force_fine_statistics set to ~p; ignored.~n"
+ "Replaced by {rates_mode, none} in the rabbitmq_management "
+ "application.~n", [X])
+ end.
+
+%%----------------------------------------------------------------------------
+
+ensure_statistics_enabled() ->
+ ForceStats = rates_mode() =/= none,
+ handle_force_fine_statistics(),
+ {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
+ rabbit_log:info("Management plugin: using rates mode '~p'~n", [rates_mode()]),
+ case {ForceStats, StatsLevel} of
+ {true, fine} ->
+ ok;
+ {true, _} ->
+ application:set_env(rabbit, collect_statistics, fine);
+ {false, none} ->
+ application:set_env(rabbit, collect_statistics, coarse);
+ {_, _} ->
+ ok
+ end,
+ ok = rabbit:force_event_refresh(erlang:make_ref()).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, []}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(#event{type = Type} = Event, State)
+ when Type == connection_closed; Type == channel_closed; Type == queue_deleted;
+ Type == exchange_deleted; Type == vhost_deleted;
+ Type == consumer_deleted; Type == node_node_deleted;
+ Type == channel_consumer_deleted ->
+ gen_server:cast(rabbit_mgmt_metrics_gc:name(Type), {event, Event}),
+ {ok, State};
+handle_event(_, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl
new file mode 100644
index 0000000000..5e92d8394c
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl
@@ -0,0 +1,501 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_external_stats).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([list_registry_plugins/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(METRICS_KEYS, [fd_used, sockets_used, mem_used, disk_free, proc_used, gc_num,
+ gc_bytes_reclaimed, context_switches]).
+
+-define(PERSISTER_KEYS, [persister_stats]).
+
+-define(OTHER_KEYS, [name, partitions, os_pid, fd_total, sockets_total, mem_limit,
+ mem_alarm, disk_free_limit, disk_free_alarm, proc_total,
+ rates_mode, uptime, run_queue, processors, exchange_types,
+ auth_mechanisms, applications, contexts, log_files,
+ db_dir, config_files, net_ticktime, enabled_plugins,
+ mem_calculation_strategy, ra_open_file_metrics]).
+
+-define(TEN_MINUTES_AS_SECONDS, 600).
+
+%%--------------------------------------------------------------------
+
+-record(state, {
+ fd_total,
+ fhc_stats,
+ node_owners,
+ last_ts,
+ interval,
+ error_logged_time
+}).
+
+%%--------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%%--------------------------------------------------------------------
+
+get_used_fd(State0) ->
+ try
+ case get_used_fd(os:type(), State0) of
+ {State1, UsedFd} when is_number(UsedFd) ->
+ {State1, UsedFd};
+ {State1, _Other} ->
+ %% Defaults to 0 if data is not available
+ {State1, 0}
+ end
+ catch
+ _:Error ->
+ State2 = log_fd_error("Could not infer the number of file handles used: ~p~n", [Error], State0),
+ {State2, 0}
+ end.
+
+get_used_fd({unix, linux}, State0) ->
+ case file:list_dir("/proc/" ++ os:getpid() ++ "/fd") of
+ {ok, Files} ->
+ {State0, length(Files)};
+ {error, _} ->
+ get_used_fd({unix, generic}, State0)
+ end;
+
+get_used_fd({unix, BSD}, State0)
+ when BSD == openbsd; BSD == freebsd; BSD == netbsd ->
+ IsDigit = fun (D) -> lists:member(D, "0123456789*") end,
+ Output = os:cmd("fstat -p " ++ os:getpid()),
+ try
+ F = fun (Line) ->
+ lists:all(IsDigit, lists:nth(4, string:tokens(Line, " ")))
+ end,
+ UsedFd = length(lists:filter(F, string:tokens(Output, "\n"))),
+ {State0, UsedFd}
+ catch _:Error:Stacktrace ->
+ State1 = log_fd_error("Could not parse fstat output:~n~s~n~p~n",
+ [Output, {Error, Stacktrace}], State0),
+ {State1, 0}
+ end;
+
+get_used_fd({unix, _}, State0) ->
+ Cmd = rabbit_misc:format(
+ "lsof -d \"0-9999999\" -lna -p ~s || echo failed", [os:getpid()]),
+ Res = os:cmd(Cmd),
+ case string:right(Res, 7) of
+ "failed\n" ->
+ State1 = log_fd_error("Could not obtain lsof output~n", [], State0),
+ {State1, 0};
+ _ ->
+ UsedFd = string:words(Res, $\n) - 1,
+ {State0, UsedFd}
+ end;
+
+%% handle.exe can be obtained from
+%% https://technet.microsoft.com/en-us/sysinternals/bb896655.aspx
+
+%% Output looks like:
+
+%% Handle v3.42
+%% Copyright (C) 1997-2008 Mark Russinovich
+%% Sysinternals - www.sysinternals.com
+%%
+%% Handle type summary:
+%% ALPC Port : 2
+%% Desktop : 1
+%% Directory : 1
+%% Event : 108
+%% File : 25
+%% IoCompletion : 3
+%% Key : 7
+%% KeyedEvent : 1
+%% Mutant : 1
+%% Process : 3
+%% Process : 38
+%% Thread : 41
+%% Timer : 3
+%% TpWorkerFactory : 2
+%% WindowStation : 2
+%% Total handles: 238
+
+%% Nthandle v4.22 - Handle viewer
+%% Copyright (C) 1997-2019 Mark Russinovich
+%% Sysinternals - www.sysinternals.com
+%%
+%% Handle type summary:
+%% <Unknown type> : 1
+%% <Unknown type> : 166
+%% ALPC Port : 11
+%% Desktop : 1
+%% Directory : 2
+%% Event : 226
+%% File : 122
+%% IoCompletion : 8
+%% IRTimer : 6
+%% Key : 42
+%% Mutant : 7
+%% Process : 3
+%% Section : 2
+%% Semaphore : 43
+%% Thread : 36
+%% TpWorkerFactory : 3
+%% WaitCompletionPacket: 25
+%% WindowStation : 2
+%% Total handles: 706
+
+%% Note that the "File" number appears to include network sockets too; I assume
+%% that's the number we care about. Note also that if you omit "-s" you will
+%% see a list of file handles *without* network sockets. If you then add "-a"
+%% you will see a list of handles of various types, including network sockets
+%% shown as file handles to \Device\Afd.
+
+get_used_fd({win32, _}, State0) ->
+ Handle = rabbit_misc:os_cmd(
+ "handle.exe /accepteula -s -p " ++ os:getpid() ++ " 2> nul"),
+ case Handle of
+ [] ->
+ State1 = log_fd_error("Could not find handle.exe, please install from sysinternals~n", [], State0),
+ {State1, 0};
+ _ ->
+ case find_files_line(string:tokens(Handle, "\r\n")) of
+ unknown ->
+ State1 = log_fd_error("handle.exe output did not contain "
+ "a line beginning with ' File ', unable "
+ "to determine used file descriptor "
+ "count: ~p~n", [Handle], State0),
+ {State1, 0};
+ UsedFd ->
+ {State0, UsedFd}
+ end
+ end.
+
+find_files_line([]) ->
+ unknown;
+find_files_line([" File " ++ Rest | _T]) ->
+ [Files] = string:tokens(Rest, ": "),
+ list_to_integer(Files);
+find_files_line([_H | T]) ->
+ find_files_line(T).
+
+-define(SAFE_CALL(Fun, NoProcFailResult),
+ try
+ Fun
+ catch exit:{noproc, _} -> NoProcFailResult
+ end).
+
+get_disk_free_limit() -> ?SAFE_CALL(rabbit_disk_monitor:get_disk_free_limit(),
+ disk_free_monitoring_disabled).
+
+get_disk_free() -> ?SAFE_CALL(rabbit_disk_monitor:get_disk_free(),
+ disk_free_monitoring_disabled).
+
+log_fd_error(Fmt, Args, #state{error_logged_time = undefined}=State) ->
+ % rabbitmq/rabbitmq-management#90
+ % no errors have been logged, so log it and make a note of when
+ Now = erlang:monotonic_time(second),
+ ok = rabbit_log:error(Fmt, Args),
+ State#state{error_logged_time = Now};
+log_fd_error(Fmt, Args, #state{error_logged_time = Time}=State) ->
+ Now = erlang:monotonic_time(second),
+ case Now >= Time + ?TEN_MINUTES_AS_SECONDS of
+ true ->
+ % rabbitmq/rabbitmq-management#90
+ % it has been longer than 10 minutes,
+ % re-log the error
+ ok = rabbit_log:error(Fmt, Args),
+ State#state{error_logged_time = Now};
+ _ ->
+ % 10 minutes have not yet passed
+ State
+ end.
+%%--------------------------------------------------------------------
+
+infos([], Acc, State) ->
+ {State, lists:reverse(Acc)};
+infos([Item|T], Acc0, State0) ->
+ {State1, Infos} = i(Item, State0),
+ Acc1 = [{Item, Infos}|Acc0],
+ infos(T, Acc1, State1).
+
+i(name, State) ->
+ {State, node()};
+i(partitions, State) ->
+ {State, rabbit_node_monitor:partitions()};
+i(fd_used, State) ->
+ get_used_fd(State);
+i(fd_total, #state{fd_total = FdTotal}=State) ->
+ {State, FdTotal};
+i(sockets_used, State) ->
+ {State, proplists:get_value(sockets_used, file_handle_cache:info([sockets_used]))};
+i(sockets_total, State) ->
+ {State, proplists:get_value(sockets_limit, file_handle_cache:info([sockets_limit]))};
+i(os_pid, State) ->
+ {State, list_to_binary(os:getpid())};
+i(mem_used, State) ->
+ {State, vm_memory_monitor:get_process_memory()};
+i(mem_calculation_strategy, State) ->
+ {State, vm_memory_monitor:get_memory_calculation_strategy()};
+i(mem_limit, State) ->
+ {State, vm_memory_monitor:get_memory_limit()};
+i(mem_alarm, State) ->
+ {State, resource_alarm_set(memory)};
+i(proc_used, State) ->
+ {State, erlang:system_info(process_count)};
+i(proc_total, State) ->
+ {State, erlang:system_info(process_limit)};
+i(run_queue, State) ->
+ {State, erlang:statistics(run_queue)};
+i(processors, State) ->
+ {State, erlang:system_info(logical_processors)};
+i(disk_free_limit, State) ->
+ {State, get_disk_free_limit()};
+i(disk_free, State) ->
+ {State, get_disk_free()};
+i(disk_free_alarm, State) ->
+ {State, resource_alarm_set(disk)};
+i(contexts, State) ->
+ {State, rabbit_web_dispatch_contexts()};
+i(uptime, State) ->
+ {Total, _} = erlang:statistics(wall_clock),
+ {State, Total};
+i(rates_mode, State) ->
+ {State, rabbit_mgmt_db_handler:rates_mode()};
+i(exchange_types, State) ->
+ {State, list_registry_plugins(exchange)};
+i(log_files, State) ->
+ {State, [list_to_binary(F) || F <- rabbit:log_locations()]};
+i(db_dir, State) ->
+ {State, list_to_binary(rabbit_mnesia:dir())};
+i(config_files, State) ->
+ {State, [list_to_binary(F) || F <- rabbit:config_files()]};
+i(net_ticktime, State) ->
+ {State, net_kernel:get_net_ticktime()};
+i(persister_stats, State) ->
+ {State, persister_stats(State)};
+i(enabled_plugins, State) ->
+ {ok, Dir} = application:get_env(rabbit, enabled_plugins_file),
+ {State, rabbit_plugins:read_enabled(Dir)};
+i(auth_mechanisms, State) ->
+ {ok, Mechanisms} = application:get_env(rabbit, auth_mechanisms),
+ F = fun (N) ->
+ lists:member(list_to_atom(binary_to_list(N)), Mechanisms)
+ end,
+ {State, list_registry_plugins(auth_mechanism, F)};
+i(applications, State) ->
+ {State, [format_application(A) || A <- lists:keysort(1, rabbit_misc:which_applications())]};
+i(gc_num, State) ->
+ {GCs, _, _} = erlang:statistics(garbage_collection),
+ {State, GCs};
+i(gc_bytes_reclaimed, State) ->
+ {_, Words, _} = erlang:statistics(garbage_collection),
+ {State, Words * erlang:system_info(wordsize)};
+i(context_switches, State) ->
+ {Sw, 0} = erlang:statistics(context_switches),
+ {State, Sw};
+i(ra_open_file_metrics, State) ->
+ {State, [{ra_log_wal, ra_metrics(ra_log_wal)},
+ {ra_log_segment_writer, ra_metrics(ra_log_segment_writer)}]}.
+
+ra_metrics(K) ->
+ try
+ case ets:lookup(ra_open_file_metrics, whereis(K)) of
+ [] -> 0;
+ [{_, C}] -> C
+ end
+ catch
+ error:badarg ->
+ %% On startup the mgmt might start before ra does
+ 0
+ end.
+
+resource_alarm_set(Source) ->
+ lists:member({{resource_limit, Source, node()},[]},
+ rabbit_alarm:get_alarms()).
+
+list_registry_plugins(Type) ->
+ list_registry_plugins(Type, fun(_) -> true end).
+
+list_registry_plugins(Type, Fun) ->
+ [registry_plugin_enabled(set_plugin_name(Name, Module), Fun) ||
+ {Name, Module} <- rabbit_registry:lookup_all(Type)].
+
+registry_plugin_enabled(Desc, Fun) ->
+ Desc ++ [{enabled, Fun(proplists:get_value(name, Desc))}].
+
+format_application({Application, Description, Version}) ->
+ [{name, Application},
+ {description, list_to_binary(Description)},
+ {version, list_to_binary(Version)}].
+
+set_plugin_name(Name, Module) ->
+ [{name, list_to_binary(atom_to_list(Name))} |
+ proplists:delete(name, Module:description())].
+
+persister_stats(#state{fhc_stats = FHC}) ->
+ [{flatten_key(K), V} || {{_Op, _Type} = K, V} <- FHC].
+
+flatten_key({A, B}) ->
+ list_to_atom(atom_to_list(A) ++ "_" ++ atom_to_list(B)).
+
+cluster_links() ->
+ {ok, Items} = net_kernel:nodes_info(),
+ [Link || Item <- Items,
+ Link <- [format_nodes_info(Item)], Link =/= undefined].
+
+format_nodes_info({Node, Info}) ->
+ Owner = proplists:get_value(owner, Info),
+ case catch process_info(Owner, links) of
+ {links, Links} ->
+ case [Link || Link <- Links, is_port(Link)] of
+ [Port] ->
+ {Node, Owner, format_nodes_info1(Port)};
+ _ ->
+ undefined
+ end;
+ _ ->
+ undefined
+ end.
+
+format_nodes_info1(Port) ->
+ case {rabbit_net:socket_ends(Port, inbound),
+ rabbit_net:getstat(Port, [recv_oct, send_oct])} of
+ {{ok, {PeerAddr, PeerPort, SockAddr, SockPort}}, {ok, Stats}} ->
+ [{peer_addr, maybe_ntoab(PeerAddr)},
+ {peer_port, PeerPort},
+ {sock_addr, maybe_ntoab(SockAddr)},
+ {sock_port, SockPort},
+ {recv_bytes, pget(recv_oct, Stats)},
+ {send_bytes, pget(send_oct, Stats)}];
+ _ ->
+ []
+ end.
+
+maybe_ntoab(A) when is_tuple(A) -> list_to_binary(rabbit_misc:ntoab(A));
+maybe_ntoab(H) -> H.
+
+%%--------------------------------------------------------------------
+
+%% This is slightly icky in that we introduce knowledge of
+%% rabbit_web_dispatch, which is not a dependency. But the last thing I
+%% want to do is create a rabbitmq_mochiweb_management_agent plugin.
+rabbit_web_dispatch_contexts() ->
+ [format_context(C) || C <- rabbit_web_dispatch_registry_list_all()].
+
+%% For similar reasons we don't declare a dependency on
+%% rabbitmq_mochiweb - so at startup there's no guarantee it will be
+%% running. So we have to catch this noproc.
+rabbit_web_dispatch_registry_list_all() ->
+ case code:is_loaded(rabbit_web_dispatch_registry) of
+ false -> [];
+ _ -> try
+ M = rabbit_web_dispatch_registry, %% Fool xref
+ M:list_all()
+ catch exit:{noproc, _} ->
+ []
+ end
+ end.
+
+format_context({Path, Description, Rest}) ->
+ [{description, list_to_binary(Description)},
+ {path, list_to_binary("/" ++ Path)} |
+ format_mochiweb_option_list(Rest)].
+
+format_mochiweb_option_list(C) ->
+ [{K, format_mochiweb_option(K, V)} || {K, V} <- C].
+
+format_mochiweb_option(ssl_opts, V) ->
+ format_mochiweb_option_list(V);
+format_mochiweb_option(_K, V) ->
+ case io_lib:printable_list(V) of
+ true -> list_to_binary(V);
+ false -> list_to_binary(rabbit_misc:format("~w", [V]))
+ end.
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+ {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+ State = #state{fd_total = file_handle_cache:ulimit(),
+ fhc_stats = get_fhc_stats(),
+ node_owners = sets:new(),
+ interval = Interval},
+ %% We can update stats straight away as they need to be available
+ %% when the mgmt plugin starts a collector
+ {ok, emit_update(State)}.
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info(emit_update, State) ->
+ {noreply, emit_update(State)};
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(_, _) -> ok.
+
+code_change(_, State, _) -> {ok, State}.
+
+%%--------------------------------------------------------------------
+
+emit_update(State0) ->
+ State1 = update_state(State0),
+ {State2, MStats} = infos(?METRICS_KEYS, [], State1),
+ {State3, PStats} = infos(?PERSISTER_KEYS, [], State2),
+ {State4, OStats} = infos(?OTHER_KEYS, [], State3),
+ [{persister_stats, PStats0}] = PStats,
+ [{name, _Name} | OStats0] = OStats,
+ rabbit_core_metrics:node_stats(persister_metrics, PStats0),
+ rabbit_core_metrics:node_stats(coarse_metrics, MStats),
+ rabbit_core_metrics:node_stats(node_metrics, OStats0),
+ rabbit_event:notify(node_stats, PStats ++ MStats ++ OStats),
+ erlang:send_after(State4#state.interval, self(), emit_update),
+ emit_node_node_stats(State4).
+
+emit_node_node_stats(State = #state{node_owners = Owners}) ->
+ Links = cluster_links(),
+ NewOwners = sets:from_list([{Node, Owner} || {Node, Owner, _} <- Links]),
+ Dead = sets:to_list(sets:subtract(Owners, NewOwners)),
+ [rabbit_event:notify(
+ node_node_deleted, [{route, Route}]) || {Node, _Owner} <- Dead,
+ Route <- [{node(), Node},
+ {Node, node()}]],
+ [begin
+ rabbit_core_metrics:node_node_stats({node(), Node}, Stats),
+ rabbit_event:notify(
+ node_node_stats, [{route, {node(), Node}} | Stats])
+ end || {Node, _Owner, Stats} <- Links],
+ State#state{node_owners = NewOwners}.
+
+update_state(State0) ->
+ %% Store raw data, the average operation time is calculated during querying
+ %% from the accumulated total
+ FHC = get_fhc_stats(),
+ State0#state{fhc_stats = FHC}.
+
+get_fhc_stats() ->
+ dict:to_list(dict:merge(fun(_, V1, V2) -> V1 + V2 end,
+ dict:from_list(file_handle_cache_stats:get()),
+ dict:from_list(get_ra_io_metrics()))).
+
+get_ra_io_metrics() ->
+ lists:sort(ets:tab2list(ra_io_metrics)).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl
new file mode 100644
index 0000000000..c8173c1244
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_ff.erl
@@ -0,0 +1,20 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_ff).
+
+-rabbit_feature_flag(
+ {empty_basic_get_metric,
+ #{desc => "Count AMQP `basic.get` on empty queues in stats",
+ stability => stable
+ }}).
+
+-rabbit_feature_flag(
+ {drop_unroutable_metric,
+ #{desc => "Count unroutable publishes to be dropped in stats",
+ stability => stable
+ }}).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl
new file mode 100644
index 0000000000..4c9e8c189f
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_format.erl
@@ -0,0 +1,559 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_format).
+
+-export([format/2, ip/1, ipb/1, amqp_table/1, tuple/1]).
+-export([parameter/1, now_to_str/1, now_to_str_ms/1, strip_pids/1]).
+-export([protocol/1, resource/1, queue/1, queue_state/1, queue_info/1]).
+-export([exchange/1, user/1, internal_user/1, binding/1, url/2]).
+-export([pack_binding_props/2, tokenise/1]).
+-export([to_amqp_table/1, listener/1, web_context/1, properties/1, basic_properties/1]).
+-export([record/2, to_basic_properties/1]).
+-export([addr/1, port/1]).
+-export([format_nulls/1, escape_html_tags/1]).
+-export([print/2, print/1]).
+
+-export([format_queue_stats/1, format_channel_stats/1,
+ format_consumer_arguments/1,
+ format_connection_created/1,
+ format_accept_content/1, format_args/1]).
+
+-export([strip_queue_pids/1]).
+
+-export([clean_consumer_details/1, clean_channel_details/1]).
+
+-export([args_hash/1]).
+
+-import(rabbit_misc, [pget/2, pget/3, pset/3]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+
+%%--------------------------------------------------------------------
+
+format(Stats, {[], _}) ->
+ [Stat || {_Name, Value} = Stat <- Stats, Value =/= unknown];
+format(Stats, {Fs, true}) ->
+ [Fs(Stat) || {_Name, Value} = Stat <- Stats, Value =/= unknown];
+format(Stats, {Fs, false}) ->
+ lists:concat([Fs(Stat) || {_Name, Value} = Stat <- Stats,
+ Value =/= unknown]).
+
+format_queue_stats({reductions, _}) ->
+ [];
+format_queue_stats({exclusive_consumer_pid, _}) ->
+ [];
+format_queue_stats({single_active_consumer_pid, _}) ->
+ [];
+format_queue_stats({slave_pids, ''}) ->
+ [];
+format_queue_stats({slave_pids, Pids}) ->
+ [{slave_nodes, [node(Pid) || Pid <- Pids]}];
+format_queue_stats({leader, Leader}) ->
+ [{node, Leader}];
+format_queue_stats({synchronised_slave_pids, ''}) ->
+ [];
+format_queue_stats({effective_policy_definition, []}) ->
+ [{effective_policy_definition, #{}}];
+format_queue_stats({synchronised_slave_pids, Pids}) ->
+ [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]}];
+format_queue_stats({backing_queue_status, Value}) ->
+ [{backing_queue_status, properties(Value)}];
+format_queue_stats({idle_since, Value}) ->
+ [{idle_since, now_to_str(Value)}];
+format_queue_stats({state, Value}) ->
+ queue_state(Value);
+format_queue_stats({disk_reads, _}) ->
+ [];
+format_queue_stats({disk_writes, _}) ->
+ [];
+format_queue_stats(Stat) ->
+ [Stat].
+
+format_channel_stats([{idle_since, Value} | Rest]) ->
+ [{idle_since, now_to_str(Value)} | Rest];
+format_channel_stats(Stats) ->
+ Stats.
+
+%% Conerts an HTTP API request payload value
+%% to AMQP 0-9-1 arguments table
+format_args({arguments, []}) ->
+ {arguments, []};
+format_args({arguments, Value}) ->
+ {arguments, to_amqp_table(Value)};
+format_args(Stat) ->
+ Stat.
+
+format_connection_created({host, Value}) ->
+ {host, addr(Value)};
+format_connection_created({peer_host, Value}) ->
+ {peer_host, addr(Value)};
+format_connection_created({port, Value}) ->
+ {port, port(Value)};
+format_connection_created({peer_port, Value}) ->
+ {peer_port, port(Value)};
+format_connection_created({protocol, Value}) ->
+ {protocol, protocol(Value)};
+format_connection_created({client_properties, Value}) ->
+ {client_properties, amqp_table(Value)};
+format_connection_created(Stat) ->
+ Stat.
+
+format_exchange_and_queue({policy, Value}) ->
+ policy(Value);
+format_exchange_and_queue({arguments, Value}) ->
+ [{arguments, amqp_table(Value)}];
+format_exchange_and_queue({name, Value}) ->
+ resource(Value);
+format_exchange_and_queue(Stat) ->
+ [Stat].
+
+format_binding({source, Value}) ->
+ resource(source, Value);
+format_binding({arguments, Value}) ->
+ [{arguments, amqp_table(Value)}];
+format_binding(Stat) ->
+ [Stat].
+
+format_basic_properties({headers, Value}) ->
+ {headers, amqp_table(Value)};
+format_basic_properties(Stat) ->
+ Stat.
+
+format_accept_content({durable, Value}) ->
+ {durable, parse_bool(Value)};
+format_accept_content({auto_delete, Value}) ->
+ {auto_delete, parse_bool(Value)};
+format_accept_content({internal, Value}) ->
+ {internal, parse_bool(Value)};
+format_accept_content(Stat) ->
+ Stat.
+
+print(Fmt, Val) when is_list(Val) ->
+ list_to_binary(lists:flatten(io_lib:format(Fmt, Val)));
+print(Fmt, Val) ->
+ print(Fmt, [Val]).
+
+print(Val) when is_list(Val) ->
+ list_to_binary(lists:flatten(Val));
+print(Val) ->
+ Val.
+
+%% TODO - can we remove all these "unknown" cases? Coverage never hits them.
+
+ip(unknown) -> unknown;
+ip(IP) -> list_to_binary(rabbit_misc:ntoa(IP)).
+
+ipb(unknown) -> unknown;
+ipb(IP) -> list_to_binary(rabbit_misc:ntoab(IP)).
+
+addr(S) when is_list(S); is_atom(S); is_binary(S) -> print("~s", S);
+addr(Addr) when is_tuple(Addr) -> ip(Addr).
+
+port(Port) when is_number(Port) -> Port;
+port(Port) -> print("~w", Port).
+
+properties(unknown) -> unknown;
+properties(Table) -> maps:from_list([{Name, tuple(Value)} ||
+ {Name, Value} <- Table]).
+
+amqp_table(Value) -> rabbit_misc:amqp_table(Value).
+
+parameter(P) -> pset(value, pget(value, P), P).
+
+tuple(unknown) -> unknown;
+tuple(Tuple) when is_tuple(Tuple) -> [tuple(E) || E <- tuple_to_list(Tuple)];
+tuple(Term) -> Term.
+
+protocol(unknown) ->
+ unknown;
+protocol(Version = {_Major, _Minor, _Revision}) ->
+ protocol({'AMQP', Version});
+protocol({Family, Version}) ->
+ print("~s ~s", [Family, protocol_version(Version)]).
+
+protocol_version(Arbitrary)
+ when is_list(Arbitrary) -> Arbitrary;
+protocol_version({Major, Minor}) -> io_lib:format("~B-~B", [Major, Minor]);
+protocol_version({Major, Minor, 0}) -> protocol_version({Major, Minor});
+protocol_version({Major, Minor, Revision}) -> io_lib:format("~B-~B-~B",
+ [Major, Minor, Revision]).
+
+now_to_str(unknown) ->
+ unknown;
+now_to_str(MilliSeconds) ->
+ BaseDate = calendar:datetime_to_gregorian_seconds({{1970, 1, 1},
+ {0, 0, 0}}),
+ Seconds = BaseDate + (MilliSeconds div 1000),
+ {{Y, M, D}, {H, Min, S}} = calendar:gregorian_seconds_to_datetime(Seconds),
+ print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w", [Y, M, D, H, Min, S]).
+
+now_to_str_ms(unknown) ->
+ unknown;
+now_to_str_ms(MilliSeconds) ->
+ print("~s:~3.3.0w", [now_to_str(MilliSeconds), MilliSeconds rem 1000]).
+
+resource(unknown) -> unknown;
+resource(Res) -> resource(name, Res).
+
+resource(_, unknown) ->
+ unknown;
+resource(NameAs, #resource{name = Name, virtual_host = VHost}) ->
+ [{NameAs, Name}, {vhost, VHost}].
+
+policy('') -> [];
+policy(Policy) -> [{policy, Policy}].
+
+internal_user(User) ->
+ [{name, internal_user:get_username(User)},
+ {password_hash, base64:encode(internal_user:get_password_hash(User))},
+ {hashing_algorithm, rabbit_auth_backend_internal:hashing_module_for_user(
+ User)},
+ {tags, tags(internal_user:get_tags(User))},
+ {limits, internal_user:get_limits(User)}].
+
+user(User) ->
+ [{name, User#user.username},
+ {tags, tags(User#user.tags)}].
+
+tags(Tags) ->
+ list_to_binary(string:join([atom_to_list(T) || T <- Tags], ",")).
+
+listener(#listener{node = Node, protocol = Protocol,
+ ip_address = IPAddress, port = Port, opts=Opts}) ->
+ [{node, Node},
+ {protocol, Protocol},
+ {ip_address, ip(IPAddress)},
+ {port, Port},
+ {socket_opts, format_socket_opts(Opts)}].
+
+web_context(Props0) ->
+ SslOpts = pget(ssl_opts, Props0, []),
+ Props = proplists:delete(ssl_opts, Props0),
+ [{ssl_opts, format_socket_opts(SslOpts)} | Props].
+
+format_socket_opts(Opts) ->
+ format_socket_opts(Opts, []).
+
+format_socket_opts([], Acc) ->
+ lists:reverse(Acc);
+%% for HTTP API listeners this will be included into
+%% socket_opts
+format_socket_opts([{ssl_opts, Value} | Tail], Acc) ->
+ format_socket_opts(Tail, [{ssl_opts, format_socket_opts(Value)} | Acc]);
+%% exclude options that have values that are nested
+%% data structures or may include functions. They are fairly
+%% obscure and not worth reporting via HTTP API.
+format_socket_opts([{verify_fun, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([{crl_cache, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([{partial_chain, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([{user_lookup_fun, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([{sni_fun, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+%% we do not report SNI host details in the UI,
+%% so skip this option and avoid some recursive formatting
+%% complexity
+format_socket_opts([{sni_hosts, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([{reuse_session, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+%% we do not want to report configured cipher suites, even
+%% though formatting them is straightforward
+format_socket_opts([{ciphers, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+%% single atom options, e.g. `binary`
+format_socket_opts([Head | Tail], Acc) when is_atom(Head) ->
+ format_socket_opts(Tail, [{Head, true} | Acc]);
+%% verify_fun value is a tuple that includes a function
+format_socket_opts([_Head = {verify_fun, _Value} | Tail], Acc) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([Head = {Name, Value} | Tail], Acc) when is_list(Value) ->
+ case io_lib:printable_unicode_list(Value) of
+ true -> format_socket_opts(Tail, [{Name, unicode:characters_to_binary(Value)} | Acc]);
+ false -> format_socket_opts(Tail, [Head | Acc])
+ end;
+format_socket_opts([{Name, Value} | Tail], Acc) when is_tuple(Value) ->
+ format_socket_opts(Tail, [{Name, tuple_to_list(Value)} | Acc]);
+%% exclude functions from JSON encoding
+format_socket_opts([_Head = {_Name, Value} | Tail], Acc) when is_function(Value) ->
+ format_socket_opts(Tail, Acc);
+format_socket_opts([Head | Tail], Acc) ->
+ format_socket_opts(Tail, [Head | Acc]).
+
+pack_binding_props(<<"">>, []) ->
+ <<"~">>;
+pack_binding_props(Key, []) ->
+ list_to_binary(quote_binding(Key));
+pack_binding_props(Key, Args) ->
+ ArgsEnc = args_hash(Args),
+ list_to_binary(quote_binding(Key) ++ "~" ++ quote_binding(ArgsEnc)).
+
+quote_binding(Name) ->
+ re:replace(rabbit_http_util:quote_plus(Name), "~", "%7E", [global]).
+
+%% Unfortunately string:tokens("foo~~bar", "~"). -> ["foo","bar"], we lose
+%% the fact that there's a double ~.
+tokenise("") ->
+ [];
+tokenise(Str) ->
+ Count = string:cspan(Str, "~"),
+ case length(Str) of
+ Count -> [Str];
+ _ -> [string:sub_string(Str, 1, Count) |
+ tokenise(string:sub_string(Str, Count + 2))]
+ end.
+
+to_amqp_table(V) -> rabbit_misc:to_amqp_table(V).
+
+url(Fmt, Vals) ->
+ print(Fmt, [rabbit_http_util:quote_plus(V) || V <- Vals]).
+
+exchange(X) ->
+ format(X, {fun format_exchange_and_queue/1, false}).
+
+%% We get queues using rabbit_amqqueue:list/1 rather than :info_all/1 since
+%% the latter wakes up each queue. Therefore we have a record rather than a
+%% proplist to deal with.
+queue(Q) when ?is_amqqueue(Q) ->
+ Name = amqqueue:get_name(Q),
+ Durable = amqqueue:is_durable(Q),
+ AutoDelete = amqqueue:is_auto_delete(Q),
+ ExclusiveOwner = amqqueue:get_exclusive_owner(Q),
+ Arguments = amqqueue:get_arguments(Q),
+ Pid = amqqueue:get_pid(Q),
+ State = amqqueue:get_state(Q),
+ %% TODO: in the future queue types should be registered with their
+ %% full and short names and this hard-coded translation should not be
+ %% necessary
+ Type = case amqqueue:get_type(Q) of
+ rabbit_classic_queue -> classic;
+ rabbit_quorum_queue -> quorum;
+ T -> T
+ end,
+ format(
+ [{name, Name},
+ {durable, Durable},
+ {auto_delete, AutoDelete},
+ {exclusive, is_pid(ExclusiveOwner)},
+ {owner_pid, ExclusiveOwner},
+ {arguments, Arguments},
+ {pid, Pid},
+ {type, Type},
+ {state, State}] ++ rabbit_amqqueue:format(Q),
+ {fun format_exchange_and_queue/1, false}).
+
+queue_info(List) ->
+ format(List, {fun format_exchange_and_queue/1, false}).
+
+queue_state({syncing, Msgs}) -> [{state, syncing},
+ {sync_messages, Msgs}];
+queue_state({terminated_by, Name}) ->
+ [{state, terminated},
+ {terminated_by, Name}];
+queue_state(Status) -> [{state, Status}].
+
+%% We get bindings using rabbit_binding:list_*/1 rather than :info_all/1 since
+%% there are no per-exchange / queue / etc variants for the latter. Therefore
+%% we have a record rather than a proplist to deal with.
+binding(#binding{source = S,
+ key = Key,
+ destination = D,
+ args = Args}) ->
+ format(
+ [{source, S},
+ {destination, D#resource.name},
+ {destination_type, D#resource.kind},
+ {routing_key, Key},
+ {arguments, Args},
+ {properties_key, pack_binding_props(Key, Args)}],
+ {fun format_binding/1, false}).
+
+basic_properties(Props = #'P_basic'{}) ->
+ Res = record(Props, record_info(fields, 'P_basic')),
+ format(Res, {fun format_basic_properties/1, true}).
+
+record(Record, Fields) ->
+ {Res, _Ix} = lists:foldl(fun (K, {L, Ix}) ->
+ {case element(Ix, Record) of
+ undefined -> L;
+ V -> [{K, V}|L]
+ end, Ix + 1}
+ end, {[], 2}, Fields),
+ Res.
+
+to_basic_properties(Props) when is_map(Props) ->
+ E = fun err/2,
+ Fmt = fun (headers, H) -> to_amqp_table(H);
+ (delivery_mode, V) when is_integer(V) -> V;
+ (delivery_mode, _V) -> E(not_int,delivery_mode);
+ (priority, V) when is_integer(V) -> V;
+ (priority, _V) -> E(not_int, priority);
+ (timestamp, V) when is_integer(V) -> V;
+ (timestamp, _V) -> E(not_int, timestamp);
+ (_, V) when is_binary(V) -> V;
+ (K, _V) -> E(not_string, K)
+ end,
+ {Res, _Ix} = lists:foldl(
+ fun (K, {P, Ix}) ->
+ {case maps:get(a2b(K), Props, undefined) of
+ undefined -> P;
+ V -> setelement(Ix, P, Fmt(K, V))
+ end, Ix + 1}
+ end, {#'P_basic'{}, 2},
+ record_info(fields, 'P_basic')),
+ Res.
+
+-spec err(term(), term()) -> no_return().
+err(A, B) ->
+ throw({error, {A, B}}).
+
+a2b(A) ->
+ list_to_binary(atom_to_list(A)).
+
+strip_queue_pids(Item) ->
+ strip_queue_pids(Item, []).
+
+strip_queue_pids([{_, unknown} | T], Acc) ->
+ strip_queue_pids(T, Acc);
+strip_queue_pids([{pid, Pid} | T], Acc0) when is_pid(Pid) ->
+ Acc = case proplists:is_defined(node, Acc0) of
+ false -> [{node, node(Pid)} | Acc0];
+ true -> Acc0
+ end,
+ strip_queue_pids(T, Acc);
+strip_queue_pids([{pid, _} | T], Acc) ->
+ strip_queue_pids(T, Acc);
+strip_queue_pids([{owner_pid, _} | T], Acc) ->
+ strip_queue_pids(T, Acc);
+strip_queue_pids([Any | T], Acc) ->
+ strip_queue_pids(T, [Any | Acc]);
+strip_queue_pids([], Acc) ->
+ Acc.
+
+%% Items can be connections, channels, consumers or queues, hence remove takes
+%% various items.
+strip_pids(Item = [T | _]) when is_tuple(T) ->
+ lists:usort(strip_pids(Item, []));
+
+strip_pids(Items) -> [lists:usort(strip_pids(I)) || I <- Items].
+
+strip_pids([{_, unknown} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{pid, Pid} | T], Acc) when is_pid(Pid) ->
+ strip_pids(T, [{node, node(Pid)} | Acc]);
+strip_pids([{pid, _} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{connection, _} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{owner_pid, _} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{channel, _} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{channel_pid, _} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{exclusive_consumer_pid, _} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{slave_pids, ''} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{slave_pids, Pids} | T], Acc) ->
+ strip_pids(T, [{slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
+strip_pids([{synchronised_slave_pids, ''} | T], Acc) ->
+ strip_pids(T, Acc);
+strip_pids([{synchronised_slave_pids, Pids} | T], Acc) ->
+ strip_pids(T, [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
+strip_pids([{K, [P|_] = Nested} | T], Acc) when is_tuple(P) -> % recurse
+ strip_pids(T, [{K, strip_pids(Nested)} | Acc]);
+strip_pids([{K, [L|_] = Nested} | T], Acc) when is_list(L) -> % recurse
+ strip_pids(T, [{K, strip_pids(Nested)} | Acc]);
+strip_pids([Any | T], Acc) ->
+ strip_pids(T, [Any | Acc]);
+strip_pids([], Acc) ->
+ Acc.
+
+%% Format for JSON replies. Transforms '' into null
+format_nulls(Items) when is_list(Items) ->
+ [format_null_item(Pair) || Pair <- Items];
+format_nulls(Item) ->
+ format_null_item(Item).
+
+format_null_item({Key, ''}) ->
+ {Key, null};
+format_null_item({Key, Value}) when is_list(Value) ->
+ {Key, format_nulls(Value)};
+format_null_item({Key, Value}) ->
+ {Key, Value};
+format_null_item([{_K, _V} | _T] = L) ->
+ format_nulls(L);
+format_null_item(Value) ->
+ Value.
+
+
+-spec escape_html_tags(string()) -> binary().
+
+escape_html_tags(S) ->
+ escape_html_tags(rabbit_data_coercion:to_list(S), []).
+
+
+-spec escape_html_tags(string(), string()) -> binary().
+
+escape_html_tags([], Acc) ->
+ rabbit_data_coercion:to_binary(lists:reverse(Acc));
+escape_html_tags("<" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&lt;", Acc));
+escape_html_tags(">" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&gt;", Acc));
+escape_html_tags("&" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&amp;", Acc));
+escape_html_tags([C | Rest], Acc) ->
+ escape_html_tags(Rest, [C | Acc]).
+
+
+-spec clean_consumer_details(proplists:proplist()) -> proplists:proplist().
+clean_consumer_details(Obj) ->
+ case pget(consumer_details, Obj) of
+ undefined -> Obj;
+ Cds ->
+ Cons = [format_consumer_arguments(clean_channel_details(Con)) || Con <- Cds],
+ pset(consumer_details, Cons, Obj)
+ end.
+
+-spec clean_channel_details(proplists:proplist()) -> proplists:proplist().
+clean_channel_details(Obj) ->
+ Obj0 = lists:keydelete(channel_pid, 1, Obj),
+ case pget(channel_details, Obj0) of
+ undefined -> Obj0;
+ Chd ->
+ pset(channel_details,
+ lists:keydelete(pid, 1, Chd),
+ Obj0)
+ end.
+
+-spec format_consumer_arguments(proplists:proplist()) -> proplists:proplist().
+format_consumer_arguments(Obj) ->
+ case pget(arguments, Obj) of
+ undefined -> Obj;
+ #{} -> Obj;
+ [] -> pset(arguments, #{}, Obj);
+ Args -> pset(arguments, amqp_table(Args), Obj)
+ end.
+
+
+parse_bool(<<"true">>) -> true;
+parse_bool(<<"false">>) -> false;
+parse_bool(true) -> true;
+parse_bool(false) -> false;
+parse_bool(undefined) -> undefined;
+parse_bool(V) -> throw({error, {not_boolean, V}}).
+
+args_hash(Args) ->
+ list_to_binary(rabbit_misc:base64url(<<(erlang:phash2(Args, 1 bsl 32)):32>>)).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl
new file mode 100644
index 0000000000..99ddc89a8e
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_gc.erl
@@ -0,0 +1,230 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_mgmt_gc).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-record(state, {timer,
+ interval
+ }).
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init(_) ->
+ Interval = rabbit_misc:get_env(rabbitmq_management_agent, metrics_gc_interval, 120000),
+ {ok, start_timer(#state{interval = Interval})}.
+
+handle_call(test, _From, State) ->
+ {reply, ok, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(start_gc, State) ->
+ gc_connections(),
+ gc_vhosts(),
+ gc_channels(),
+ gc_queues(),
+ gc_exchanges(),
+ gc_nodes(),
+ {noreply, start_timer(State)}.
+
+terminate(_Reason, #state{timer = TRef}) ->
+ _ = erlang:cancel_timer(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+start_timer(#state{interval = Interval} = St) ->
+ TRef = erlang:send_after(Interval, self(), start_gc),
+ St#state{timer = TRef}.
+
+gc_connections() ->
+ gc_process(connection_stats_coarse_conn_stats),
+ gc_process(connection_created_stats),
+ gc_process(connection_stats).
+
+gc_vhosts() ->
+ VHosts = rabbit_vhost:list(),
+ GbSet = gb_sets:from_list(VHosts),
+ gc_entity(vhost_stats_coarse_conn_stats, GbSet),
+ gc_entity(vhost_stats_fine_stats, GbSet),
+ gc_entity(vhost_msg_stats, GbSet),
+ gc_entity(vhost_msg_rates, GbSet),
+ gc_entity(vhost_stats_deliver_stats, GbSet).
+
+gc_channels() ->
+ gc_process(channel_created_stats),
+ gc_process(channel_stats),
+ gc_process(channel_stats_fine_stats),
+ gc_process(channel_process_stats),
+ gc_process(channel_stats_deliver_stats),
+ ok.
+
+gc_queues() ->
+ Queues = rabbit_amqqueue:list_names(),
+ GbSet = gb_sets:from_list(Queues),
+ LocalQueues = rabbit_amqqueue:list_local_names(),
+ LocalGbSet = gb_sets:from_list(LocalQueues),
+ gc_entity(queue_stats_publish, GbSet),
+ gc_entity(queue_stats, LocalGbSet),
+ gc_entity(queue_msg_stats, LocalGbSet),
+ gc_entity(queue_process_stats, LocalGbSet),
+ gc_entity(queue_msg_rates, LocalGbSet),
+ gc_entity(queue_stats_deliver_stats, GbSet),
+ gc_process_and_entity(channel_queue_stats_deliver_stats_queue_index, GbSet),
+ gc_process_and_entity(consumer_stats_queue_index, GbSet),
+ gc_process_and_entity(consumer_stats_channel_index, GbSet),
+ gc_process_and_entity(consumer_stats, GbSet),
+ gc_process_and_entity(channel_exchange_stats_fine_stats_channel_index, GbSet),
+ gc_process_and_entity(channel_queue_stats_deliver_stats, GbSet),
+ gc_process_and_entity(channel_queue_stats_deliver_stats_channel_index, GbSet),
+ ExchangeGbSet = gb_sets:from_list(rabbit_exchange:list_names()),
+ gc_entities(queue_exchange_stats_publish, GbSet, ExchangeGbSet),
+ gc_entities(queue_exchange_stats_publish_queue_index, GbSet, ExchangeGbSet),
+ gc_entities(queue_exchange_stats_publish_exchange_index, GbSet, ExchangeGbSet).
+
+gc_exchanges() ->
+ Exchanges = rabbit_exchange:list_names(),
+ GbSet = gb_sets:from_list(Exchanges),
+ gc_entity(exchange_stats_publish_in, GbSet),
+ gc_entity(exchange_stats_publish_out, GbSet),
+ gc_entity(channel_exchange_stats_fine_stats_exchange_index, GbSet),
+ gc_process_and_entity(channel_exchange_stats_fine_stats, GbSet).
+
+gc_nodes() ->
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ GbSet = gb_sets:from_list(Nodes),
+ gc_entity(node_stats, GbSet),
+ gc_entity(node_coarse_stats, GbSet),
+ gc_entity(node_persister_stats, GbSet),
+ gc_entity(node_node_coarse_stats_node_index, GbSet),
+ gc_entity(node_node_stats, GbSet),
+ gc_entity(node_node_coarse_stats, GbSet).
+
+gc_process(Table) ->
+ ets:foldl(fun({{Pid, _} = Key, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({Pid = Key, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({Pid = Key, _, _}, none) ->
+ gc_process(Pid, Table, Key);
+ ({{Pid, _} = Key, _, _, _, _}, none) ->
+ gc_process(Pid, Table, Key)
+ end, none, Table).
+
+gc_process(Pid, Table, Key) ->
+ case rabbit_misc:is_process_alive(Pid) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_entity(Table, GbSet) ->
+ ets:foldl(fun({{_, Id} = Key, _}, none) when Table == node_node_stats ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({{{_, Id}, _} = Key, _}, none) when Table == node_node_coarse_stats ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({{Id, _} = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({Id = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet);
+ ({{Id, _} = Key, _}, none) ->
+ gc_entity(Id, Table, Key, GbSet)
+ end, none, Table).
+
+gc_entity(Id, Table, Key, GbSet) ->
+ case gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_process_and_entity(Table, GbSet) ->
+ ets:foldl(fun({{Id, Pid, _} = Key, _}, none) when Table == consumer_stats ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({Id = Key, {_, Pid, _}} = Object, none)
+ when Table == consumer_stats_queue_index ->
+ gc_object(Pid, Table, Object),
+ gc_entity(Id, Table, Key, GbSet);
+ ({Pid = Key, {Id, _, _}} = Object, none)
+ when Table == consumer_stats_channel_index ->
+ gc_object(Id, Table, Object, GbSet),
+ gc_process(Pid, Table, Key);
+ ({Id = Key, {{Pid, _}, _}} = Object, none)
+ when Table == channel_exchange_stats_fine_stats_exchange_index;
+ Table == channel_queue_stats_deliver_stats_queue_index ->
+ gc_object(Pid, Table, Object),
+ gc_entity(Id, Table, Key, GbSet);
+ ({Pid = Key, {{_, Id}, _}} = Object, none)
+ when Table == channel_exchange_stats_fine_stats_channel_index;
+ Table == channel_queue_stats_deliver_stats_channel_index ->
+ gc_object(Id, Table, Object, GbSet),
+ gc_process(Pid, Table, Key);
+ ({{{Pid, Id}, _} = Key, _}, none)
+ when Table == channel_queue_stats_deliver_stats;
+ Table == channel_exchange_stats_fine_stats ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{{Pid, Id}, _} = Key, _, _, _, _, _, _, _, _}, none) ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet);
+ ({{{Pid, Id}, _} = Key, _, _, _, _}, none) ->
+ gc_process_and_entity(Id, Pid, Table, Key, GbSet)
+ end, none, Table).
+
+gc_process_and_entity(Id, Pid, Table, Key, GbSet) ->
+ case rabbit_misc:is_process_alive(Pid) andalso gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete(Table, Key),
+ none
+ end.
+
+gc_object(Pid, Table, Object) ->
+ case rabbit_misc:is_process_alive(Pid) of
+ true ->
+ none;
+ false ->
+ ets:delete_object(Table, Object),
+ none
+ end.
+
+gc_object(Id, Table, Object, GbSet) ->
+ case gb_sets:is_member(Id, GbSet) of
+ true ->
+ none;
+ false ->
+ ets:delete_object(Table, Object),
+ none
+ end.
+
+gc_entities(Table, QueueGbSet, ExchangeGbSet) ->
+ ets:foldl(fun({{{Q, X}, _} = Key, _}, none)
+ when Table == queue_exchange_stats_publish ->
+ gc_entity(Q, Table, Key, QueueGbSet),
+ gc_entity(X, Table, Key, ExchangeGbSet);
+ ({Q, {{_, X}, _}} = Object, none)
+ when Table == queue_exchange_stats_publish_queue_index ->
+ gc_object(X, Table, Object, ExchangeGbSet),
+ gc_entity(Q, Table, Q, QueueGbSet);
+ ({X, {{Q, _}, _}} = Object, none)
+ when Table == queue_exchange_stats_publish_exchange_index ->
+ gc_object(Q, Table, Object, QueueGbSet),
+ gc_entity(X, Table, X, ExchangeGbSet)
+ end, none, Table).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl
new file mode 100644
index 0000000000..298f17a18d
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_collector.erl
@@ -0,0 +1,712 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_mgmt_metrics_collector).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_core_metrics.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+
+-behaviour(gen_server).
+-compile({no_auto_import, [ceil/1]}).
+
+-spec start_link(atom()) -> rabbit_types:ok_pid_or_error().
+
+-export([name/1]).
+-export([start_link/1]).
+-export([override_lookups/2, reset_lookups/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+-export([index_table/2]).
+-export([reset_all/0]).
+
+-import(rabbit_mgmt_data, [lookup_element/3]).
+
+-record(state, {table, interval, policies, rates_mode, lookup_queue,
+ lookup_exchange, old_aggr_stats}).
+
+%% Data is stored in ETS tables:
+%% * One ETS table per metric (queue_stats, channel_stats_deliver_stats...)
+%% (see ?TABLES in rabbit_mgmt_metrics.hrl)
+%% * Stats are stored as key value pairs where the key is a tuple of
+%% some value (such as a channel pid) and the retention interval.
+%% The value is an instance of an exometer_slide providing a sliding window
+%% of samples for some {Object, Interval}.
+%% * Each slide can store multiple stats. See stats_per_table in
+%% rabbit_mgmt_metrics.hrl for a map of which stats are recorded in which
+%% table.
+
+reset_all() ->
+ [reset(Table) || {Table, _} <- ?CORE_TABLES].
+
+reset(Table) ->
+ gen_server:cast(name(Table), reset).
+
+name(Table) ->
+ list_to_atom((atom_to_list(Table) ++ "_metrics_collector")).
+
+
+start_link(Table) ->
+ gen_server:start_link({local, name(Table)}, ?MODULE, [Table], []).
+
+override_lookups(Table, Lookups) ->
+ gen_server:call(name(Table), {override_lookups, Lookups}, infinity).
+
+reset_lookups(Table) ->
+ gen_server:call(name(Table), reset_lookups, infinity).
+
+init([Table]) ->
+ {RatesMode, Policies} = load_config(),
+ Policy = retention_policy(Table),
+ Interval = take_smaller(proplists:get_value(Policy, Policies, [])) * 1000,
+ erlang:send_after(Interval, self(), collect_metrics),
+ {ok, #state{table = Table, interval = Interval,
+ policies = {proplists:get_value(basic, Policies),
+ proplists:get_value(detailed, Policies),
+ proplists:get_value(global, Policies)},
+ rates_mode = RatesMode,
+ old_aggr_stats = #{},
+ lookup_queue = fun queue_exists/1,
+ lookup_exchange = fun exchange_exists/1}}.
+
+handle_call(reset_lookups, _From, State) ->
+ {reply, ok, State#state{lookup_queue = fun queue_exists/1,
+ lookup_exchange = fun exchange_exists/1}};
+handle_call({override_lookups, Lookups}, _From, State) ->
+ {reply, ok, State#state{lookup_queue = pget(queue, Lookups),
+ lookup_exchange = pget(exchange, Lookups)}};
+handle_call({submit, Fun}, _From, State) ->
+ {reply, Fun(), State};
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(reset, State) ->
+ {noreply, State#state{old_aggr_stats = #{}}};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+
+handle_info(collect_metrics, #state{interval = Interval} = State0) ->
+ Timestamp = exometer_slide:timestamp(),
+ State = aggregate_metrics(Timestamp, State0),
+ erlang:send_after(Interval, self(), collect_metrics),
+ {noreply, State};
+handle_info(purge_old_stats, State) ->
+ {noreply, State#state{old_aggr_stats = #{}}};
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+retention_policy(connection_created) -> basic; %% really nothing
+retention_policy(connection_metrics) -> basic;
+retention_policy(connection_coarse_metrics) -> basic;
+retention_policy(channel_created) -> basic;
+retention_policy(channel_metrics) -> basic;
+retention_policy(channel_queue_exchange_metrics) -> detailed;
+retention_policy(channel_exchange_metrics) -> detailed;
+retention_policy(channel_queue_metrics) -> detailed;
+retention_policy(channel_process_metrics) -> basic;
+retention_policy(consumer_created) -> basic;
+retention_policy(queue_metrics) -> basic;
+retention_policy(queue_coarse_metrics) -> basic;
+retention_policy(node_persister_metrics) -> global;
+retention_policy(node_coarse_metrics) -> global;
+retention_policy(node_metrics) -> basic;
+retention_policy(node_node_metrics) -> global;
+retention_policy(connection_churn_metrics) -> basic.
+
+take_smaller(Policies) ->
+ Intervals = [I || {_, I} <- Policies],
+ case Intervals of
+ [] -> throw(missing_sample_retention_policies);
+ _ -> lists:min(Intervals)
+ end.
+
+insert_old_aggr_stats(NextStats, Id, Stat) ->
+ NextStats#{Id => Stat}.
+
+handle_deleted_queues(queue_coarse_metrics, Remainders,
+ #state{policies = {BPolicies, _, GPolicies}}) ->
+ TS = exometer_slide:timestamp(),
+ lists:foreach(fun ({Queue, {R, U, M}}) ->
+ NegStats = ?vhost_msg_stats(-R, -U, -M),
+ [insert_entry(vhost_msg_stats, vhost(Queue), TS,
+ NegStats, Size, Interval, true)
+ || {Size, Interval} <- GPolicies],
+ % zero out msg stats to avoid duplicating msg
+ % stats when a master queue is migrated
+ QNegStats = ?queue_msg_stats(0, 0, 0),
+ [insert_entry(queue_msg_stats, Queue, TS,
+ QNegStats, Size, Interval, false)
+ || {Size, Interval} <- BPolicies],
+ ets:delete(queue_stats, Queue),
+ ets:delete(queue_process_stats, Queue)
+ end, maps:to_list(Remainders));
+handle_deleted_queues(_T, _R, _P) -> ok.
+
+aggregate_metrics(Timestamp, #state{table = Table,
+ policies = {_, _, _GPolicies}} = State0) ->
+ Table = State0#state.table,
+ {Next, Ops, #state{old_aggr_stats = Remainders}} =
+ ets:foldl(fun(R, {NextStats, O, State}) ->
+ aggregate_entry(R, NextStats, O, State)
+ end, {#{}, #{}, State0}, Table),
+ maps:fold(fun(Tbl, TblOps, Acc) ->
+ _ = exec_table_ops(Tbl, Timestamp, TblOps),
+ Acc
+ end, no_acc, Ops),
+
+ handle_deleted_queues(Table, Remainders, State0),
+ State0#state{old_aggr_stats = Next}.
+
+exec_table_ops(Table, Timestamp, TableOps) ->
+ maps:fold(fun(_Id, {insert, Entry}, A) ->
+ ets:insert(Table, Entry),
+ A;
+ (Id, {insert_with_index, Entry}, A) ->
+ insert_with_index(Table, Id, Entry),
+ A;
+ ({Id, Size, Interval, Incremental},
+ {insert_entry, Entry}, A) ->
+ insert_entry(Table, Id, Timestamp, Entry, Size,
+ Interval, Incremental),
+ A
+ end, no_acc, TableOps).
+
+
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = connection_created} = State) ->
+ case ets:lookup(connection_created_stats, Id) of
+ [] ->
+ Ftd = rabbit_mgmt_format:format(
+ Metrics,
+ {fun rabbit_mgmt_format:format_connection_created/1, true}),
+ Entry = ?connection_created_stats(Id, pget(name, Ftd, unknown), Ftd),
+ Ops = insert_op(connection_created_stats, Id, Entry, Ops0),
+ {NextStats, Ops, State};
+ _ ->
+ {NextStats, Ops0, State}
+ end;
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = connection_metrics} = State) ->
+ Entry = ?connection_stats(Id, Metrics),
+ Ops = insert_op(connection_stats, Id, Entry, Ops0),
+ {NextStats, Ops, State};
+aggregate_entry({Id, RecvOct, SendOct, Reductions, 0}, NextStats, Ops0,
+ #state{table = connection_coarse_metrics,
+ policies = {BPolicies, _, GPolicies}} = State) ->
+ Stats = ?vhost_stats_coarse_conn_stats(RecvOct, SendOct),
+ Diff = get_difference(Id, Stats, State),
+
+ Ops1 = insert_entry_ops(vhost_stats_coarse_conn_stats,
+ vhost({connection_created, Id}), true, Diff, Ops0,
+ GPolicies),
+
+ Entry = ?connection_stats_coarse_conn_stats(RecvOct, SendOct, Reductions),
+ Ops2 = insert_entry_ops(connection_stats_coarse_conn_stats, Id, false, Entry,
+ Ops1, BPolicies),
+ {insert_old_aggr_stats(NextStats, Id, Stats), Ops2, State};
+aggregate_entry({Id, RecvOct, SendOct, _Reductions, 1}, NextStats, Ops0,
+ #state{table = connection_coarse_metrics,
+ policies = {_BPolicies, _, GPolicies}} = State) ->
+ Stats = ?vhost_stats_coarse_conn_stats(RecvOct, SendOct),
+ Diff = get_difference(Id, Stats, State),
+ Ops1 = insert_entry_ops(vhost_stats_coarse_conn_stats,
+ vhost({connection_created, Id}), true, Diff, Ops0,
+ GPolicies),
+ rabbit_core_metrics:delete(connection_coarse_metrics, Id),
+ {NextStats, Ops1, State};
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = channel_created} = State) ->
+ case ets:lookup(channel_created_stats, Id) of
+ [] ->
+ Ftd = rabbit_mgmt_format:format(Metrics, {[], false}),
+ Entry = ?channel_created_stats(Id, pget(name, Ftd, unknown), Ftd),
+ Ops = insert_op(channel_created_stats, Id, Entry, Ops0),
+ {NextStats, Ops, State};
+ _ ->
+ {NextStats, Ops0, State}
+ end;
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = channel_metrics} = State) ->
+ %% First metric must be `idle_since` (if available), as expected by
+ %% `rabbit_mgmt_format:format_channel_stats`. This is a performance
+ %% optimisation that avoids traversing the whole list when only
+ %% one element has to be formatted.
+ Ftd = rabbit_mgmt_format:format_channel_stats(Metrics),
+ Entry = ?channel_stats(Id, Ftd),
+ Ops = insert_op(channel_stats, Id, Entry, Ops0),
+ {NextStats, Ops, State};
+aggregate_entry({{Ch, X} = Id, Publish0, Confirm, ReturnUnroutable, DropUnroutable, 0},
+ NextStats, Ops0,
+ #state{table = channel_exchange_metrics,
+ policies = {BPolicies, DPolicies, GPolicies},
+ rates_mode = RatesMode,
+ lookup_exchange = ExchangeFun} = State) ->
+ Stats = ?channel_stats_fine_stats(Publish0, Confirm, ReturnUnroutable, DropUnroutable),
+ {Publish, _, _, _} = Diff = get_difference(Id, Stats, State),
+
+ Ops1 = insert_entry_ops(channel_stats_fine_stats, Ch, true, Diff, Ops0,
+ BPolicies),
+ Ops2 = insert_entry_ops(vhost_stats_fine_stats, vhost(X), true, Diff, Ops1,
+ GPolicies),
+ Ops3 = case {ExchangeFun(X), RatesMode} of
+ {true, basic} ->
+ Entry = ?exchange_stats_publish_in(Publish),
+ insert_entry_ops(exchange_stats_publish_in, X, true, Entry,
+ Ops2, DPolicies);
+ {true, _} ->
+ Entry = ?exchange_stats_publish_in(Publish),
+ O = insert_entry_ops(exchange_stats_publish_in, X, true,
+ Entry, Ops2, DPolicies),
+ insert_entry_ops(channel_exchange_stats_fine_stats, Id,
+ false, Stats, O, DPolicies);
+ _ ->
+ Ops2
+ end,
+ {insert_old_aggr_stats(NextStats, Id, Stats), Ops3, State};
+aggregate_entry({{_Ch, X} = Id, Publish0, Confirm, ReturnUnroutable, DropUnroutable, 1},
+ NextStats, Ops0,
+ #state{table = channel_exchange_metrics,
+ policies = {_BPolicies, DPolicies, GPolicies},
+ lookup_exchange = ExchangeFun} = State) ->
+ Stats = ?channel_stats_fine_stats(Publish0, Confirm, ReturnUnroutable, DropUnroutable),
+ {Publish, _, _, _} = Diff = get_difference(Id, Stats, State),
+ Ops1 = insert_entry_ops(vhost_stats_fine_stats, vhost(X), true, Diff, Ops0,
+ GPolicies),
+ Ops2 = case ExchangeFun(X) of
+ true ->
+ Entry = ?exchange_stats_publish_in(Publish),
+ insert_entry_ops(exchange_stats_publish_in, X, true, Entry,
+ Ops1, DPolicies);
+ _ ->
+ Ops1
+ end,
+ rabbit_core_metrics:delete(channel_exchange_metrics, Id),
+ {NextStats, Ops2, State};
+aggregate_entry({{Ch, Q} = Id, Get, GetNoAck, Deliver, DeliverNoAck,
+ Redeliver, Ack, GetEmpty, 0}, NextStats, Ops0,
+ #state{table = channel_queue_metrics,
+ policies = {BPolicies, DPolicies, GPolicies},
+ rates_mode = RatesMode,
+ lookup_queue = QueueFun} = State) ->
+ Stats = ?vhost_stats_deliver_stats(Get, GetNoAck, Deliver, DeliverNoAck,
+ Redeliver, Ack,
+ Deliver + DeliverNoAck + Get + GetNoAck,
+ GetEmpty),
+ Diff = get_difference(Id, Stats, State),
+
+ Ops1 = insert_entry_ops(vhost_stats_deliver_stats, vhost(Q), true, Diff,
+ Ops0, GPolicies),
+
+ Ops2 = insert_entry_ops(channel_stats_deliver_stats, Ch, true, Diff, Ops1,
+ BPolicies),
+
+ Ops3 = case {QueueFun(Q), RatesMode} of
+ {true, basic} ->
+ insert_entry_ops(queue_stats_deliver_stats, Q, true, Diff,
+ Ops2, BPolicies);
+ {true, _} ->
+ O = insert_entry_ops(queue_stats_deliver_stats, Q, true,
+ Diff, Ops2, BPolicies),
+ insert_entry_ops(channel_queue_stats_deliver_stats, Id,
+ false, Stats, O, DPolicies);
+ _ ->
+ Ops2
+ end,
+ {insert_old_aggr_stats(NextStats, Id, Stats), Ops3, State};
+aggregate_entry({{_, Q} = Id, Get, GetNoAck, Deliver, DeliverNoAck,
+ Redeliver, Ack, GetEmpty, 1}, NextStats, Ops0,
+ #state{table = channel_queue_metrics,
+ policies = {BPolicies, _, GPolicies},
+ lookup_queue = QueueFun} = State) ->
+ Stats = ?vhost_stats_deliver_stats(Get, GetNoAck, Deliver, DeliverNoAck,
+ Redeliver, Ack,
+ Deliver + DeliverNoAck + Get + GetNoAck,
+ GetEmpty),
+ Diff = get_difference(Id, Stats, State),
+
+ Ops1 = insert_entry_ops(vhost_stats_deliver_stats, vhost(Q), true, Diff,
+ Ops0, GPolicies),
+ Ops2 = case QueueFun(Q) of
+ true ->
+ insert_entry_ops(queue_stats_deliver_stats, Q, true, Diff,
+ Ops1, BPolicies);
+ _ ->
+ Ops1
+ end,
+ rabbit_core_metrics:delete(channel_queue_metrics, Id),
+ {NextStats, Ops2, State};
+aggregate_entry({{_Ch, {Q, X}} = Id, Publish, ToDelete}, NextStats, Ops0,
+ #state{table = channel_queue_exchange_metrics,
+ policies = {BPolicies, _, _},
+ rates_mode = RatesMode,
+ lookup_queue = QueueFun,
+ lookup_exchange = ExchangeFun} = State) ->
+ Stats = ?queue_stats_publish(Publish),
+ Diff = get_difference(Id, Stats, State),
+ Ops1 = case {QueueFun(Q), ExchangeFun(X), RatesMode, ToDelete} of
+ {true, false, _, _} ->
+ insert_entry_ops(queue_stats_publish, Q, true, Diff,
+ Ops0, BPolicies);
+ {false, true, _, _} ->
+ insert_entry_ops(exchange_stats_publish_out, X, true, Diff,
+ Ops0, BPolicies);
+ {true, true, basic, _} ->
+ O = insert_entry_ops(queue_stats_publish, Q, true, Diff,
+ Ops0, BPolicies),
+ insert_entry_ops(exchange_stats_publish_out, X, true, Diff,
+ O, BPolicies);
+ {true, true, _, 0} ->
+ O1 = insert_entry_ops(queue_stats_publish, Q, true, Diff,
+ Ops0, BPolicies),
+ O2 = insert_entry_ops(exchange_stats_publish_out, X, true,
+ Diff, O1, BPolicies),
+ insert_entry_ops(queue_exchange_stats_publish, {Q, X},
+ true, Diff, O2, BPolicies);
+ {true, true, _, 1} ->
+ O = insert_entry_ops(queue_stats_publish, Q, true, Diff,
+ Ops0, BPolicies),
+ insert_entry_ops(exchange_stats_publish_out, X, true,
+ Diff, O, BPolicies);
+ _ ->
+ Ops0
+ end,
+ case ToDelete of
+ 0 ->
+ {insert_old_aggr_stats(NextStats, Id, Stats), Ops1, State};
+ 1 ->
+ rabbit_core_metrics:delete(channel_queue_exchange_metrics, Id),
+ {NextStats, Ops1, State}
+ end;
+aggregate_entry({Id, Reductions}, NextStats, Ops0,
+ #state{table = channel_process_metrics,
+ policies = {BPolicies, _, _}} = State) ->
+ Entry = ?channel_process_stats(Reductions),
+ Ops = insert_entry_ops(channel_process_stats, Id, false,
+ Entry, Ops0, BPolicies),
+ {NextStats, Ops, State};
+aggregate_entry({Id, Exclusive, AckRequired, PrefetchCount,
+ Active, ActivityStatus, Args},
+ NextStats, Ops0,
+ #state{table = consumer_created} = State) ->
+ case ets:lookup(consumer_stats, Id) of
+ [] ->
+ Fmt = rabbit_mgmt_format:format([{exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {prefetch_count, PrefetchCount},
+ {active, Active},
+ {activity_status, ActivityStatus},
+ {arguments, Args}], {[], false}),
+ Entry = ?consumer_stats(Id, Fmt),
+ Ops = insert_with_index_op(consumer_stats, Id, Entry, Ops0),
+ {NextStats, Ops , State};
+ [{_K, V}] ->
+ CurrentActive = proplists:get_value(active, V, undefined),
+ case Active =:= CurrentActive of
+ false ->
+ Fmt = rabbit_mgmt_format:format([{exclusive, Exclusive},
+ {ack_required, AckRequired},
+ {prefetch_count, PrefetchCount},
+ {active, Active},
+ {activity_status, ActivityStatus},
+ {arguments, Args}], {[], false}),
+ Entry = ?consumer_stats(Id, Fmt),
+ Ops = insert_with_index_op(consumer_stats, Id, Entry, Ops0),
+ {NextStats, Ops , State};
+ _ ->
+ {NextStats, Ops0, State}
+ end;
+ _ ->
+ {NextStats, Ops0, State}
+ end;
+aggregate_entry({Id, Metrics, 0}, NextStats, Ops0,
+ #state{table = queue_metrics,
+ policies = {BPolicies, _, GPolicies},
+ lookup_queue = QueueFun} = State) ->
+ Stats = ?queue_msg_rates(pget(disk_reads, Metrics, 0),
+ pget(disk_writes, Metrics, 0)),
+ Diff = get_difference(Id, Stats, State),
+ Ops1 = insert_entry_ops(vhost_msg_rates, vhost(Id), true, Diff, Ops0,
+ GPolicies),
+ Ops2 = case QueueFun(Id) of
+ true ->
+ O = insert_entry_ops(queue_msg_rates, Id, false, Stats, Ops1,
+ BPolicies),
+ Fmt = rabbit_mgmt_format:format(
+ Metrics,
+ {fun rabbit_mgmt_format:format_queue_stats/1, false}),
+ insert_op(queue_stats, Id, ?queue_stats(Id, Fmt), O);
+ false ->
+ Ops1
+ end,
+ {insert_old_aggr_stats(NextStats, Id, Stats), Ops2, State};
+aggregate_entry({Id, Metrics, 1}, NextStats, Ops0,
+ #state{table = queue_metrics,
+ policies = {_, _, GPolicies}} = State) ->
+ Stats = ?queue_msg_rates(pget(disk_reads, Metrics, 0),
+ pget(disk_writes, Metrics, 0)),
+ Diff = get_difference(Id, Stats, State),
+ Ops = insert_entry_ops(vhost_msg_rates, vhost(Id), true, Diff, Ops0,
+ GPolicies),
+ rabbit_core_metrics:delete(queue_metrics, Id),
+ {NextStats, Ops, State};
+aggregate_entry({Name, Ready, Unack, Msgs, Red}, NextStats, Ops0,
+ #state{table = queue_coarse_metrics,
+ old_aggr_stats = Old,
+ policies = {BPolicies, _, GPolicies},
+ lookup_queue = QueueFun} = State) ->
+ Stats = ?vhost_msg_stats(Ready, Unack, Msgs),
+ Diff = get_difference(Name, Stats, State),
+ Ops1 = insert_entry_ops(vhost_msg_stats, vhost(Name), true, Diff, Ops0,
+ GPolicies),
+ Ops2 = case QueueFun(Name) of
+ true ->
+ QPS =?queue_process_stats(Red),
+ O1 = insert_entry_ops(queue_process_stats, Name, false, QPS,
+ Ops1, BPolicies),
+ QMS = ?queue_msg_stats(Ready, Unack, Msgs),
+ insert_entry_ops(queue_msg_stats, Name, false, QMS,
+ O1, BPolicies);
+ _ ->
+ Ops1
+ end,
+ State1 = State#state{old_aggr_stats = maps:remove(Name, Old)},
+ {insert_old_aggr_stats(NextStats, Name, Stats), Ops2, State1};
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = node_metrics} = State) ->
+ Ops = insert_op(node_stats, Id, {Id, Metrics}, Ops0),
+ {NextStats, Ops, State};
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = node_coarse_metrics,
+ policies = {_, _, GPolicies}} = State) ->
+ Stats = ?node_coarse_stats(
+ pget(fd_used, Metrics, 0), pget(sockets_used, Metrics, 0),
+ pget(mem_used, Metrics, 0), pget(disk_free, Metrics, 0),
+ pget(proc_used, Metrics, 0), pget(gc_num, Metrics, 0),
+ pget(gc_bytes_reclaimed, Metrics, 0),
+ pget(context_switches, Metrics, 0)),
+ Ops = insert_entry_ops(node_coarse_stats, Id, false, Stats, Ops0,
+ GPolicies),
+ {NextStats, Ops, State};
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = node_persister_metrics,
+ policies = {_, _, GPolicies}} = State) ->
+ Stats = ?node_persister_stats(
+ pget(io_read_count, Metrics, 0), pget(io_read_bytes, Metrics, 0),
+ pget(io_read_time, Metrics, 0), pget(io_write_count, Metrics, 0),
+ pget(io_write_bytes, Metrics, 0), pget(io_write_time, Metrics, 0),
+ pget(io_sync_count, Metrics, 0), pget(io_sync_time, Metrics, 0),
+ pget(io_seek_count, Metrics, 0), pget(io_seek_time, Metrics, 0),
+ pget(io_reopen_count, Metrics, 0), pget(mnesia_ram_tx_count, Metrics, 0),
+ pget(mnesia_disk_tx_count, Metrics, 0), pget(msg_store_read_count, Metrics, 0),
+ pget(msg_store_write_count, Metrics, 0),
+ pget(queue_index_journal_write_count, Metrics, 0),
+ pget(queue_index_write_count, Metrics, 0), pget(queue_index_read_count, Metrics, 0),
+ pget(io_file_handle_open_attempt_count, Metrics, 0),
+ pget(io_file_handle_open_attempt_time, Metrics, 0)),
+ Ops = insert_entry_ops(node_persister_stats, Id, false, Stats, Ops0,
+ GPolicies),
+ {NextStats, Ops, State};
+aggregate_entry({Id, Metrics}, NextStats, Ops0,
+ #state{table = node_node_metrics,
+ policies = {_, _, GPolicies}} = State) ->
+ Stats = ?node_node_coarse_stats(pget(send_bytes, Metrics, 0),
+ pget(recv_bytes, Metrics, 0)),
+ CleanMetrics = lists:keydelete(recv_bytes, 1,
+ lists:keydelete(send_bytes, 1, Metrics)),
+ Ops1 = insert_op(node_node_stats, Id, ?node_node_stats(Id, CleanMetrics),
+ Ops0),
+ Ops = insert_entry_ops(node_node_coarse_stats, Id, false, Stats, Ops1,
+ GPolicies),
+ {NextStats, Ops, State};
+aggregate_entry({Id, ConnCreated, ConnClosed, ChCreated, ChClosed,
+ QueueDeclared, QueueCreated, QueueDeleted}, NextStats, Ops0,
+ #state{table = connection_churn_metrics,
+ policies = {_, _, GPolicies}} = State) ->
+ %% Id is the local node. There is only one entry on every ETS table.
+ Stats = ?connection_churn_rates(ConnCreated, ConnClosed, ChCreated, ChClosed,
+ QueueDeclared, QueueCreated, QueueDeleted),
+ Diff = get_difference(Id, Stats, State),
+ Ops = insert_entry_ops(connection_churn_rates, Id, true, Diff, Ops0,
+ GPolicies),
+ {insert_old_aggr_stats(NextStats, Id, Stats), Ops, State}.
+
+insert_entry(Table, Id, TS, Entry, Size, Interval0, Incremental) ->
+ Key = {Id, Interval0},
+ Slide =
+ case ets:lookup(Table, Key) of
+ [{Key, S}] ->
+ S;
+ [] ->
+ IntervalMs = Interval0 * 1000,
+ % add some margin to Size and max_n to reduce chances of off-by-one errors
+ exometer_slide:new(TS - IntervalMs, (Size + Interval0) * 1000,
+ [{interval, IntervalMs},
+ {max_n, ceil(Size / Interval0) + 1},
+ {incremental, Incremental}])
+ end,
+ insert_with_index(Table, Key, {Key, exometer_slide:add_element(TS, Entry,
+ Slide)}).
+
+update_op(Table, Key, Op, Ops) ->
+ TableOps = case maps:find(Table, Ops) of
+ {ok, Inner} ->
+ maps:put(Key, Op, Inner);
+ error ->
+ Inner = #{},
+ maps:put(Key, Op, Inner)
+ end,
+ maps:put(Table, TableOps, Ops).
+
+insert_with_index_op(Table, Key, Entry, Ops) ->
+ update_op(Table, Key, {insert_with_index, Entry}, Ops).
+
+insert_op(Table, Key, Entry, Ops) ->
+ update_op(Table, Key, {insert, Entry}, Ops).
+
+insert_entry_op(Table, Key, Entry, Ops) ->
+ TableOps0 = case maps:find(Table, Ops) of
+ {ok, Inner} -> Inner;
+ error -> #{}
+ end,
+ TableOps = maps:update_with(Key, fun({insert_entry, Entry0}) ->
+ {insert_entry, sum_entry(Entry0, Entry)}
+ end, {insert_entry, Entry}, TableOps0),
+ maps:put(Table, TableOps, Ops).
+
+insert_entry_ops(Table, Id, Incr, Entry, Ops, Policies) ->
+ lists:foldl(fun({Size, Interval}, Acc) ->
+ Key = {Id, Size, Interval, Incr},
+ insert_entry_op(Table, Key, Entry, Acc)
+ end, Ops, Policies).
+
+get_difference(Id, Stats, #state{old_aggr_stats = OldStats}) ->
+ case maps:find(Id, OldStats) of
+ error ->
+ Stats;
+ {ok, OldStat} ->
+ difference(OldStat, Stats)
+ end.
+
+sum_entry({A0}, {B0}) ->
+ {B0 + A0};
+sum_entry({A0, A1}, {B0, B1}) ->
+ {B0 + A0, B1 + A1};
+sum_entry({A0, A1, A2}, {B0, B1, B2}) ->
+ {B0 + A0, B1 + A1, B2 + A2};
+sum_entry({A0, A1, A2, A3}, {B0, B1, B2, B3}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3};
+sum_entry({A0, A1, A2, A3, A4}, {B0, B1, B2, B3, B4}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4};
+sum_entry({A0, A1, A2, A3, A4, A5}, {B0, B1, B2, B3, B4, B5}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5};
+sum_entry({A0, A1, A2, A3, A4, A5, A6}, {B0, B1, B2, B3, B4, B5, B6}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6};
+sum_entry({A0, A1, A2, A3, A4, A5, A6, A7}, {B0, B1, B2, B3, B4, B5, B6, B7}) ->
+ {B0 + A0, B1 + A1, B2 + A2, B3 + A3, B4 + A4, B5 + A5, B6 + A6, B7 + A7}.
+
+difference({A0}, {B0}) ->
+ {B0 - A0};
+difference({A0, A1}, {B0, B1}) ->
+ {B0 - A0, B1 - A1};
+difference({A0, A1, A2}, {B0, B1, B2}) ->
+ {B0 - A0, B1 - A1, B2 - A2};
+difference({A0, A1, A2, A3}, {B0, B1, B2, B3}) ->
+ {B0 - A0, B1 - A1, B2 - A2, B3 - A3};
+difference({A0, A1, A2, A3, A4}, {B0, B1, B2, B3, B4}) ->
+ {B0 - A0, B1 - A1, B2 - A2, B3 - A3, B4 - A4};
+difference({A0, A1, A2, A3, A4, A5}, {B0, B1, B2, B3, B4, B5}) ->
+ {B0 - A0, B1 - A1, B2 - A2, B3 - A3, B4 - A4, B5 - A5};
+difference({A0, A1, A2, A3, A4, A5, A6}, {B0, B1, B2, B3, B4, B5, B6}) ->
+ {B0 - A0, B1 - A1, B2 - A2, B3 - A3, B4 - A4, B5 - A5, B6 - A6};
+difference({A0, A1, A2, A3, A4, A5, A6, A7}, {B0, B1, B2, B3, B4, B5, B6, B7}) ->
+ {B0 - A0, B1 - A1, B2 - A2, B3 - A3, B4 - A4, B5 - A5, B6 - A6, B7 - A7}.
+
+vhost(#resource{virtual_host = VHost}) ->
+ VHost;
+vhost({queue_stats, #resource{virtual_host = VHost}}) ->
+ VHost;
+vhost({TName, Pid}) ->
+ pget(vhost, lookup_element(TName, Pid, 2)).
+
+exchange_exists(Name) ->
+ case rabbit_exchange:lookup(Name) of
+ {ok, _} ->
+ true;
+ _ ->
+ false
+ end.
+
+queue_exists(Name) ->
+ case rabbit_amqqueue:lookup(Name) of
+ {ok, _} ->
+ true;
+ _ ->
+ false
+ end.
+
+insert_with_index(Table, Key, Tuple) ->
+ Insert = ets:insert(Table, Tuple),
+ insert_index(Table, Key),
+ Insert.
+
+insert_index(consumer_stats, {Q, Ch, _} = Key) ->
+ ets:insert(index_table(consumer_stats, queue), {Q, Key}),
+ ets:insert(index_table(consumer_stats, channel), {Ch, Key});
+insert_index(channel_exchange_stats_fine_stats, {{Ch, Ex}, _} = Key) ->
+ ets:insert(index_table(channel_exchange_stats_fine_stats, exchange), {Ex, Key}),
+ ets:insert(index_table(channel_exchange_stats_fine_stats, channel), {Ch, Key});
+insert_index(channel_queue_stats_deliver_stats, {{Ch, Q}, _} = Key) ->
+ ets:insert(index_table(channel_queue_stats_deliver_stats, queue), {Q, Key}),
+ ets:insert(index_table(channel_queue_stats_deliver_stats, channel), {Ch, Key});
+insert_index(queue_exchange_stats_publish, {{Q, Ex}, _} = Key) ->
+ ets:insert(index_table(queue_exchange_stats_publish, queue), {Q, Key}),
+ ets:insert(index_table(queue_exchange_stats_publish, exchange), {Ex, Key});
+insert_index(node_node_coarse_stats, {{_, Node}, _} = Key) ->
+ ets:insert(index_table(node_node_coarse_stats, node), {Node, Key});
+insert_index(_, _) -> ok.
+
+index_table(consumer_stats, queue) -> consumer_stats_queue_index;
+index_table(consumer_stats, channel) -> consumer_stats_channel_index;
+index_table(channel_exchange_stats_fine_stats, exchange) -> channel_exchange_stats_fine_stats_exchange_index;
+index_table(channel_exchange_stats_fine_stats, channel) -> channel_exchange_stats_fine_stats_channel_index;
+index_table(channel_queue_stats_deliver_stats, queue) -> channel_queue_stats_deliver_stats_queue_index;
+index_table(channel_queue_stats_deliver_stats, channel) -> channel_queue_stats_deliver_stats_channel_index;
+index_table(queue_exchange_stats_publish, queue) -> queue_exchange_stats_publish_queue_index;
+index_table(queue_exchange_stats_publish, exchange) -> queue_exchange_stats_publish_exchange_index;
+index_table(node_node_coarse_stats, node) -> node_node_coarse_stats_node_index.
+
+load_config() ->
+ RatesMode = rabbit_mgmt_agent_config:get_env(rates_mode),
+ Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies, []),
+ {RatesMode, Policies}.
+
+ceil(X) when X < 0 ->
+ trunc(X);
+ceil(X) ->
+ T = trunc(X),
+ case X - T == 0 of
+ true -> T;
+ false -> T + 1
+ end.
+
+pget(Key, List) -> pget(Key, List, unknown).
+
+pget(Key, List, Default) when is_number(Default) ->
+ case rabbit_misc:pget(Key, List) of
+ Number when is_number(Number) ->
+ Number;
+ _Other ->
+ Default
+ end;
+pget(Key, List, Default) ->
+ rabbit_misc:pget(Key, List, Default).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl
new file mode 100644
index 0000000000..f1ae48e0e4
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_metrics_gc.erl
@@ -0,0 +1,175 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_mgmt_metrics_gc).
+
+-record(state, {basic_i,
+ detailed_i,
+ global_i}).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-spec start_link(atom()) -> rabbit_types:ok_pid_or_error().
+
+-export([name/1]).
+-export([start_link/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+name(EventType) ->
+ list_to_atom((atom_to_list(EventType) ++ "_metrics_gc")).
+
+start_link(EventType) ->
+ gen_server:start_link({local, name(EventType)}, ?MODULE, [], []).
+
+init(_) ->
+ Policies = rabbit_mgmt_agent_config:get_env(sample_retention_policies),
+ {ok, #state{basic_i = intervals(basic, Policies),
+ global_i = intervals(global, Policies),
+ detailed_i = intervals(detailed, Policies)}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast({event, #event{type = connection_closed, props = Props}},
+ State = #state{basic_i = BIntervals}) ->
+ Pid = pget(pid, Props),
+ remove_connection(Pid, BIntervals),
+ {noreply, State};
+handle_cast({event, #event{type = channel_closed, props = Props}},
+ State = #state{basic_i = BIntervals}) ->
+ Pid = pget(pid, Props),
+ remove_channel(Pid, BIntervals),
+ {noreply, State};
+handle_cast({event, #event{type = consumer_deleted, props = Props}}, State) ->
+ remove_consumer(Props),
+ {noreply, State};
+handle_cast({event, #event{type = exchange_deleted, props = Props}},
+ State = #state{basic_i = BIntervals}) ->
+ Name = pget(name, Props),
+ remove_exchange(Name, BIntervals),
+ {noreply, State};
+handle_cast({event, #event{type = queue_deleted, props = Props}},
+ State = #state{basic_i = BIntervals}) ->
+ Name = pget(name, Props),
+ remove_queue(Name, BIntervals),
+ {noreply, State};
+handle_cast({event, #event{type = vhost_deleted, props = Props}},
+ State = #state{global_i = GIntervals}) ->
+ Name = pget(name, Props),
+ remove_vhost(Name, GIntervals),
+ {noreply, State};
+handle_cast({event, #event{type = node_node_deleted, props = Props}}, State) ->
+ Name = pget(route, Props),
+ remove_node_node(Name),
+ {noreply, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+remove_connection(Id, BIntervals) ->
+ ets:delete(connection_created_stats, Id),
+ ets:delete(connection_stats, Id),
+ delete_samples(connection_stats_coarse_conn_stats, Id, BIntervals),
+ ok.
+
+remove_channel(Id, BIntervals) ->
+ ets:delete(channel_created_stats, Id),
+ ets:delete(channel_stats, Id),
+ delete_samples(channel_process_stats, Id, BIntervals),
+ delete_samples(channel_stats_fine_stats, Id, BIntervals),
+ delete_samples(channel_stats_deliver_stats, Id, BIntervals),
+ index_delete(consumer_stats, channel, Id),
+ index_delete(channel_exchange_stats_fine_stats, channel, Id),
+ index_delete(channel_queue_stats_deliver_stats, channel, Id),
+ ok.
+
+remove_consumer(Props) ->
+ Id = {pget(queue, Props), pget(channel, Props), pget(consumer_tag, Props)},
+ ets:delete(consumer_stats, Id),
+ cleanup_index(consumer_stats, Id),
+ ok.
+
+remove_exchange(Name, BIntervals) ->
+ delete_samples(exchange_stats_publish_out, Name, BIntervals),
+ delete_samples(exchange_stats_publish_in, Name, BIntervals),
+ index_delete(queue_exchange_stats_publish, exchange, Name),
+ index_delete(channel_exchange_stats_fine_stats, exchange, Name),
+ ok.
+
+remove_queue(Name, BIntervals) ->
+ ets:delete(queue_stats, Name),
+ delete_samples(queue_stats_publish, Name, BIntervals),
+ delete_samples(queue_stats_deliver_stats, Name, BIntervals),
+ delete_samples(queue_process_stats, Name, BIntervals),
+ delete_samples(queue_msg_stats, Name, BIntervals),
+ delete_samples(queue_msg_rates, Name, BIntervals),
+ index_delete(channel_queue_stats_deliver_stats, queue, Name),
+ index_delete(queue_exchange_stats_publish, queue, Name),
+ index_delete(consumer_stats, queue, Name),
+
+ ok.
+
+remove_vhost(Name, GIntervals) ->
+ delete_samples(vhost_stats_coarse_conn_stats, Name, GIntervals),
+ delete_samples(vhost_stats_fine_stats, Name, GIntervals),
+ delete_samples(vhost_stats_deliver_stats, Name, GIntervals),
+ ok.
+
+remove_node_node(Name) ->
+ index_delete(node_node_coarse_stats, node, Name),
+ ok.
+
+intervals(Type, Policies) ->
+ [I || {_, I} <- proplists:get_value(Type, Policies)].
+
+delete_samples(Table, Id, Intervals) ->
+ [ets:delete(Table, {Id, I}) || I <- Intervals],
+ ok.
+
+index_delete(Table, Type, Id) ->
+ IndexTable = rabbit_mgmt_metrics_collector:index_table(Table, Type),
+ Keys = ets:lookup(IndexTable, Id),
+ [ begin
+ ets:delete(Table, Key),
+ cleanup_index(Table, Key)
+ end
+ || {_Index, Key} <- Keys ],
+ ets:delete(IndexTable, Id),
+ ok.
+
+cleanup_index(consumer_stats, {Q, Ch, _} = Key) ->
+ delete_index(consumer_stats, queue, {Q, Key}),
+ delete_index(consumer_stats, channel, {Ch, Key}),
+ ok;
+cleanup_index(channel_exchange_stats_fine_stats, {{Ch, Ex}, _} = Key) ->
+ delete_index(channel_exchange_stats_fine_stats, exchange, {Ex, Key}),
+ delete_index(channel_exchange_stats_fine_stats, channel, {Ch, Key}),
+ ok;
+cleanup_index(channel_queue_stats_deliver_stats, {{Ch, Q}, _} = Key) ->
+ delete_index(channel_queue_stats_deliver_stats, queue, {Q, Key}),
+ delete_index(channel_queue_stats_deliver_stats, channel, {Ch, Key}),
+ ok;
+cleanup_index(queue_exchange_stats_publish, {{Q, Ex}, _} = Key) ->
+ delete_index(queue_exchange_stats_publish, queue, {Q, Key}),
+ delete_index(queue_exchange_stats_publish, exchange, {Ex, Key}),
+ ok;
+cleanup_index(node_node_coarse_stats, {{_, Node}, _} = Key) ->
+ delete_index(node_node_coarse_stats, node, {Node, Key}),
+ ok;
+cleanup_index(_, _) -> ok.
+
+delete_index(Table, Index, Obj) ->
+ ets:delete_object(rabbit_mgmt_metrics_collector:index_table(Table, Index),
+ Obj).
+
+pget(Key, List) -> rabbit_misc:pget(Key, List, unknown).
diff --git a/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl b/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl
new file mode 100644
index 0000000000..4c5c8c18ef
--- /dev/null
+++ b/deps/rabbitmq_management_agent/src/rabbit_mgmt_storage.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_mgmt_storage).
+-behaviour(gen_server2).
+-record(state, {}).
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+-export([start_link/0]).
+-export([reset/0, reset_all/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-include("rabbit_mgmt_metrics.hrl").
+
+%% ETS owner
+start_link() ->
+ gen_server2:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+reset() ->
+ rabbit_log:warning("Resetting RabbitMQ management storage"),
+ [ets:delete_all_objects(IndexTable) || IndexTable <- ?INDEX_TABLES],
+ [ets:delete_all_objects(Table) || {Table, _} <- ?TABLES],
+ _ = rabbit_mgmt_metrics_collector:reset_all(),
+ ok.
+
+reset_all() ->
+ _ = [rpc:call(Node, rabbit_mgmt_storage, reset, [])
+ || Node <- rabbit_nodes:all_running()],
+ ok.
+
+init(_) ->
+ _ = [ets:new(IndexTable, [public, bag, named_table])
+ || IndexTable <- ?INDEX_TABLES],
+ _ = [ets:new(Table, [public, Type, named_table])
+ || {Table, Type} <- ?TABLES],
+ _ = ets:new(rabbit_mgmt_db_cache, [public, set, named_table]),
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl b/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl
new file mode 100644
index 0000000000..abdf24853d
--- /dev/null
+++ b/deps/rabbitmq_management_agent/test/exometer_slide_SUITE.erl
@@ -0,0 +1,631 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(exometer_slide_SUITE).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ incremental_add_element_basics,
+ last_two_normalises_old_sample_timestamp,
+ incremental_last_two_returns_last_two_completed_samples,
+ incremental_sum,
+ incremental_sum_stale,
+ incremental_sum_stale2,
+ incremental_sum_with_drop,
+ incremental_sum_with_total,
+ foldl_realises_partial_sample,
+ foldl_and_to_list,
+ foldl_and_to_list_incremental,
+ optimize,
+ stale_to_list,
+ to_list_single_after_drop,
+ to_list_drop_and_roll,
+ to_list_with_drop,
+ to_list_simple,
+ foldl_with_drop,
+ sum_single,
+ to_normalized_list,
+ to_normalized_list_no_padding,
+ to_list_in_the_past,
+ sum_mgmt_352,
+ sum_mgmt_352_extra,
+ sum_mgmt_352_peak
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, _Config) ->
+ ok.
+
+init_per_testcase(_, Config) ->
+ Config.
+
+end_per_testcase(_, _Config) ->
+ ok.
+
+%% -------------------------------------------------------------------
+%% Generators.
+%% -------------------------------------------------------------------
+elements_gen() ->
+ ?LET(Length, oneof([1, 2, 3, 7, 8, 20]),
+ ?LET(Elements, list(vector(Length, int())),
+ [erlang:list_to_tuple(E) || E <- Elements])).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% TODO: turn tests into properties
+
+incremental_add_element_basics(_Config) ->
+ Now = exometer_slide:timestamp(),
+ S0 = exometer_slide:new(Now, 10, [{incremental, true},
+ {interval, 100}]),
+
+ [] = exometer_slide:to_list(Now, S0),
+ % add element before next interval
+ S1 = exometer_slide:add_element(Now + 10, {1}, S0),
+
+ [] = exometer_slide:to_list(Now + 20, S1),
+
+ %% to_list is not empty, as we take the 'real' total if a full interval has passed
+ Now100 = Now + 100,
+ [{Now100, {1}}] = exometer_slide:to_list(Now100, S1),
+
+ Then = Now + 101,
+ % add element after interval
+ S2 = exometer_slide:add_element(Then, {1}, S1),
+
+ % contains single element with incremented value
+ [{Then, {2}}] = exometer_slide:to_list(Then, S2).
+
+last_two_normalises_old_sample_timestamp(_Config) ->
+ Now = 0,
+ S0 = exometer_slide:new(Now, 10, [{incremental, true},
+ {interval, 100}]),
+
+ S1 = exometer_slide:add_element(100, {1}, S0),
+ S2 = exometer_slide:add_element(500, {1}, S1),
+
+ [{500, {2}}, {400, {1}}] = exometer_slide:last_two(S2).
+
+
+incremental_last_two_returns_last_two_completed_samples(_Config) ->
+ Now = exometer_slide:timestamp(),
+ S0 = exometer_slide:new(Now, 10, [{incremental, true},
+ {interval, 100}]),
+
+ % add two full elements then a partial
+ Now100 = Now + 100,
+ Now200 = Now + 200,
+ S1 = exometer_slide:add_element(Now100, {1}, S0),
+ S2 = exometer_slide:add_element(Now200, {1}, S1),
+ S3 = exometer_slide:add_element(Now + 210, {1}, S2),
+
+ [{Now200, {2}}, {Now100, {1}}] = exometer_slide:last_two(S3).
+
+incremental_sum(_Config) ->
+ Now = exometer_slide:timestamp(),
+ S1 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end,
+ exometer_slide:new(Now, 1000, [{incremental, true}, {interval, 100}]),
+ lists:seq(100, 1000, 100)),
+ Now50 = Now - 50,
+ S2 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now50 + Next, {1}, S)
+ end,
+ exometer_slide:new(Now50, 1000, [{incremental, true}, {interval, 100}]),
+ lists:seq(100, 1000, 100)),
+ S3 = exometer_slide:sum([S1, S2]),
+
+ 10 = length(exometer_slide:to_list(Now + 1000, S1)),
+ 10 = length(exometer_slide:to_list(Now + 1000, S2)),
+ 10 = length(exometer_slide:to_list(Now + 1000, S3)).
+
+incremental_sum_stale(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{incremental, true}, {interval, 5}]),
+
+ S1 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [1, 8, 15, 21, 27]),
+
+ S2 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [2, 7, 14, 20, 25]),
+ S3 = exometer_slide:sum([S1, S2]),
+ [27,22,17,12,7] = lists:reverse([T || {T, _} <- exometer_slide:to_list(27, S3)]),
+ [10,8,6,4,2] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(27, S3)]).
+
+incremental_sum_stale2(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{incremental, true},
+ {max_n, 5},
+ {interval, 5}]),
+
+ S1 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [5]),
+
+ S2 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [500, 505, 510, 515, 520, 525, 527]),
+ S3 = exometer_slide:sum([S1, S2], {0}),
+ [500, 505, 510, 515, 520, 525] = [T || {T, _} <- exometer_slide:to_list(525, S3)],
+ [7,6,5,4,3,2] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(525, S3)]).
+
+incremental_sum_with_drop(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{incremental, true},
+ {max_n, 5},
+ {interval, 5}]),
+
+ S1 = lists:foldl(fun ({Next, Incr}, S) ->
+ exometer_slide:add_element(Now + Next, {Incr}, S)
+ end, Slide, [{1, 1}, {8, 0}, {15, 0}, {21, 1}, {27, 0}]),
+
+ S2 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [2, 7, 14, 20, 25]),
+ S3 = exometer_slide:sum([S1, S2]),
+ [27,22,17,12,7] = lists:reverse([T || {T, _} <- exometer_slide:to_list(27, S3)]),
+ [7,6,4,3,2] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(27, S3)]).
+
+incremental_sum_with_total(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 50, [{incremental, true}, {interval, 5}]),
+
+ S1 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [5, 10, 15, 20, 25]),
+
+ S2 = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [7, 12, 17, 22, 23]),
+ S3 = exometer_slide:sum([S1, S2]),
+ {10} = exometer_slide:last(S3),
+ [25,20,15,10,5] = lists:reverse([T || {T, _} <- exometer_slide:to_list(26, S3)]),
+ [ 9, 7, 5, 3,1] = lists:reverse([V || {_, {V}} <- exometer_slide:to_list(26, S3)]).
+
+foldl_realises_partial_sample(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{incremental, true}, {interval, 5}]),
+ S = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {1}, S)
+ end, Slide, [5, 10, 15, 20, 23]),
+ Fun = fun(last, Acc) -> Acc;
+ ({TS, {X}}, Acc) -> [{TS, X} | Acc]
+ end,
+
+ [{25, 5}, {20, 4}, {15, 3}, {10, 2}, {5, 1}] =
+ exometer_slide:foldl(25, 5, Fun, [], S),
+ [{20, 4}, {15, 3}, {10, 2}, {5, 1}] =
+ exometer_slide:foldl(20, 5, Fun, [], S),
+ % do not realise sample unless Now is at least an interval beyond the last
+ % full sample
+ [{20, 4}, {15, 3}, {10, 2}, {5, 1}] =
+ exometer_slide:foldl(23, 5, Fun, [], S).
+
+optimize(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5}, {max_n, 5}]),
+ S = lists:foldl(fun (Next, S) ->
+ exometer_slide:add_element(Now + Next, {Next}, S)
+ end, Slide, [5, 10, 15, 20, 25, 30, 35]),
+ OS = exometer_slide:optimize(S),
+ SRes = exometer_slide:to_list(35, S),
+ OSRes = exometer_slide:to_list(35, OS),
+ SRes = OSRes,
+ ?assert(S =/= OS).
+
+to_list_with_drop(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ S = exometer_slide:add_element(30, {1}, Slide),
+ S2 = exometer_slide:add_element(35, {1}, S),
+ S3 = exometer_slide:add_element(40, {0}, S2),
+ S4 = exometer_slide:add_element(45, {0}, S3),
+ [{30, {1}}, {35, {2}}, {40, {2}}, {45, {2}}] = exometer_slide:to_list(45, S4).
+
+to_list_simple(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ S = exometer_slide:add_element(30, {0}, Slide),
+ S2 = exometer_slide:add_element(35, {0}, S),
+ [{30, {0}}, {35, {0}}] = exometer_slide:to_list(38, S2).
+
+foldl_with_drop(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ S = exometer_slide:add_element(30, {1}, Slide),
+ S2 = exometer_slide:add_element(35, {1}, S),
+ S3 = exometer_slide:add_element(40, {0}, S2),
+ S4 = exometer_slide:add_element(45, {0}, S3),
+ Fun = fun(last, Acc) -> Acc;
+ ({TS, {X}}, Acc) -> [{TS, X} | Acc]
+ end,
+ [{45, 2}, {40, 2}, {35, 2}, {30, 1}] =
+ exometer_slide:foldl(45, 30, Fun, [], S4).
+
+foldl_and_to_list(_Config) ->
+ Now = 0,
+ Tests = [ % {input, expected, query range}
+ {[],
+ [],
+ {0, 10}},
+ {[{5, 1}],
+ [{5, {1}}],
+ {0, 5}},
+ {[{10, 1}],
+ [{10, {1}}],
+ {0, 10}},
+ {[{5, 1}, {10, 2}],
+ [{10, {2}}, {5, {1}}],
+ {0, 10}},
+ {[{5, 0}, {10, 0}], % drop 1
+ [{10, {0}}, {5, {0}}],
+ {0, 10}},
+ {[{5, 2}, {10, 1}, {15, 1}], % drop 2
+ [{15, {1}}, {10, {1}}, {5, {2}}],
+ {0, 15}},
+ {[{10, 0}, {15, 0}, {20, 0}], % drop
+ [{20, {0}}, {15, {0}}, {10, {0}}],
+ {0, 20}},
+ {[{5, 1}, {10, 5}, {15, 5}, {20, 0}], % drop middle
+ [{20, {0}}, {15, {5}}, {10, {5}}, {5, {1}}],
+ {0, 20}},
+ {[{5, 1}, {10, 5}, {15, 5}, {20, 1}], % drop middle filtered
+ [{20, {1}}, {15, {5}}, {10, {5}}],
+ {10, 20}},
+ {[{5, 1}, {10, 2}, {15, 3}, {20, 4}, {25, 4}, {30, 5}], % buffer roll over
+ [{30, {5}}, {25, {4}}, {20, {4}}, {15, {3}}, {10, {2}}, {5, {1}}],
+ {5, 30}}
+ ],
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {max_n, 5}]),
+ Fun = fun(last, Acc) -> Acc;
+ (V, Acc) -> [V | Acc]
+ end,
+ [begin
+ S = lists:foldl(fun ({T, V}, Acc) ->
+ exometer_slide:add_element(T, {V}, Acc)
+ end, Slide, Inputs),
+ Expected = exometer_slide:foldl(To, From, Fun, [], S),
+ ExpRev = lists:reverse(Expected),
+ ExpRev = exometer_slide:to_list(To, From, S)
+ end || {Inputs, Expected, {From, To}} <- Tests].
+
+foldl_and_to_list_incremental(_Config) ->
+ Now = 0,
+ Tests = [ % {input, expected, query range}
+ {[],
+ [],
+ {0, 10}},
+ {[{5, 1}],
+ [{5, {1}}],
+ {0, 5}},
+ {[{10, 1}],
+ [{10, {1}}],
+ {0, 10}},
+ {[{5, 1}, {10, 1}],
+ [{10, {2}}, {5, {1}}],
+ {0, 10}},
+ {[{5, 0}, {10, 0}], % drop 1
+ [{10, {0}}, {5, {0}}],
+ {0, 10}},
+ {[{5, 1}, {10, 0}, {15, 0}], % drop 2
+ [{15, {1}}, {10, {1}}, {5, {1}}],
+ {0, 15}},
+ {[{10, 0}, {15, 0}, {20, 0}], % drop
+ [{20, {0}}, {15, {0}}, {10, {0}}],
+ {0, 20}},
+ {[{5, 1}, {10, 0}, {15, 0}, {20, 1}], % drop middle
+ [{20, {2}}, {15, {1}}, {10, {1}}, {5, {1}}],
+ {0, 20}},
+ {[{5, 1}, {10, 0}, {15, 0}, {20, 1}], % drop middle filtered
+ [{20, {2}}, {15, {1}}, {10, {1}}],
+ {10, 20}},
+ {[{5, 1}, {10, 1}, {15, 1}, {20, 1}, {25, 0}, {30, 1}], % buffer roll over
+ [{30, {5}}, {25, {4}}, {20, {4}}, {15, {3}}, {10, {2}}, {5, {1}}],
+ {5, 30}}
+ ],
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ Fun = fun(last, Acc) -> Acc;
+ (V, Acc) -> [V | Acc]
+ end,
+ [begin
+ S = lists:foldl(fun ({T, V}, Acc) ->
+ exometer_slide:add_element(T, {V}, Acc)
+ end, Slide, Inputs),
+ Expected = exometer_slide:foldl(To, From, Fun, [], S),
+ ExpRev = lists:reverse(Expected),
+ ExpRev = exometer_slide:to_list(To, From, S)
+ end || {Inputs, Expected, {From, To}} <- Tests].
+
+stale_to_list(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5}, {max_n, 5}]),
+ S = exometer_slide:add_element(50, {1}, Slide),
+ S2 = exometer_slide:add_element(55, {1}, S),
+ [] = exometer_slide:to_list(100, S2).
+
+to_list_single_after_drop(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ S = exometer_slide:add_element(5, {0}, Slide),
+ S2 = exometer_slide:add_element(10, {0}, S),
+ S3 = exometer_slide:add_element(15, {1}, S2),
+ Res = exometer_slide:to_list(17, S3),
+ [{5,{0}},{10,{0}},{15,{1}}] = Res.
+
+
+to_list_drop_and_roll(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 10, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ S = exometer_slide:add_element(5, {0}, Slide),
+ S2 = exometer_slide:add_element(10, {0}, S),
+ S3 = exometer_slide:add_element(15, {0}, S2),
+ [{10, {0}}, {15, {0}}] = exometer_slide:to_list(17, S3).
+
+
+sum_single(_Config) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 25, [{interval, 5},
+ {incremental, true},
+ {max_n, 5}]),
+ S = exometer_slide:add_element(Now + 5, {0}, Slide),
+ S2 = exometer_slide:add_element(Now + 10, {0}, S),
+ Summed = exometer_slide:sum([S2]),
+ [_,_] = exometer_slide:to_list(15, Summed).
+
+
+to_normalized_list(_Config) ->
+ Interval = 5,
+ Tests = [ % {input, expected, query range}
+ {[], % zero pad when slide has never seen any samples
+ [{10, {0}}, {5, {0}}, {0, {0}}],
+ {0, 10}},
+ {[{5, 1}], % zero pad before first known sample
+ [{5, {1}}, {0, {0}}],
+ {0, 5}},
+ {[{10, 1}, {15, 1}], % zero pad before last know sample
+ [{15, {2}}, {10, {1}}, {5, {0}}],
+ {5, 15}},
+ {[{5, 1}, {15, 1}], % insert missing sample using previous total
+ [{15, {2}}, {10, {1}}, {5, {1}}],
+ {5, 15}},
+ % {[{6, 1}, {11, 1}, {16, 1}], % align timestamps with query
+ % [{15, {3}}, {10, {2}}, {5, {1}}, {0, {0}}],
+ % {0, 15}},
+ {[{5, 1}, {10, 1}, {15, 1}, {20, 1}, {25, 1}, {30, 1}], % outside of max_n
+ [{30, {6}}, {25, {5}}, {20, {4}}, {15, {3}}, {10, {2}}], % we cannot possibly be expected deduce what 10 should be
+ {10, 30}},
+ {[{5, 1}, {20, 1}, {25, 1}], % as long as the past TS 5 sample still exists we should use to for padding
+ [{25, {3}}, {20, {2}}, {15, {1}}, {10, {1}}],
+ {10, 25}},
+ {[{5, 1}, {10, 1}], % pad based on total
+ [{35, {2}}, {30, {2}}],
+ {30, 35}},
+ {[{5, 1}], % make up future values to fill the window
+ [{10, {1}}, {5, {1}}],
+ {5, 10}},
+ {[{5, 1}, {7, 1}], % realise last sample
+ [{10, {2}}, {5, {1}}],
+ {5, 10}}
+ ],
+
+ Slide = exometer_slide:new(0, 20, [{interval, 5},
+ {incremental, true},
+ {max_n, 4}]),
+ [begin
+ S0 = lists:foldl(fun ({T, V}, Acc) ->
+ exometer_slide:add_element(T, {V}, Acc)
+ end, Slide, Inputs),
+ Expected = exometer_slide:to_normalized_list(To, From, Interval, S0, {0}),
+ S = exometer_slide:sum([exometer_slide:optimize(S0)], {0}), % also test it post sum
+ Expected = exometer_slide:to_normalized_list(To, From, Interval, S, {0})
+ end || {Inputs, Expected, {From, To}} <- Tests].
+
+to_normalized_list_no_padding(_Config) ->
+ Interval = 5,
+ Tests = [ % {input, expected, query range}
+ {[],
+ [],
+ {0, 10}},
+ {[{5, 1}],
+ [{5, {1}}],
+ {0, 5}},
+ {[{5, 1}, {15, 1}],
+ [{15, {2}}, {10, {1}}, {5, {1}}],
+ {5, 15}},
+ {[{10, 1}, {15, 1}],
+ [{15, {2}}, {10, {1}}],
+ {5, 15}},
+ {[{5, 1}, {20, 1}], % NB as 5 is outside of the query we can't pick the value up
+ [{20, {2}}],
+ {10, 20}}
+ ],
+
+ Slide = exometer_slide:new(0, 20, [{interval, 5},
+ {incremental, true},
+ {max_n, 4}]),
+ [begin
+ S = lists:foldl(fun ({T, V}, Acc) ->
+ exometer_slide:add_element(T, {V}, Acc)
+ end, Slide, Inputs),
+ Expected = exometer_slide:to_normalized_list(To, From, Interval, S, no_pad)
+ end || {Inputs, Expected, {From, To}} <- Tests].
+
+to_list_in_the_past(_Config) ->
+ Slide = exometer_slide:new(0, 20, [{interval, 5},
+ {incremental, true},
+ {max_n, 4}]),
+ % ensure firstTS is way in the past
+ S0 = exometer_slide:add_element(5, {1}, Slide),
+ S1 = exometer_slide:add_element(105, {0}, S0),
+ S = exometer_slide:add_element(110, {0}, S1), % create drop
+ % query into the past
+ % this could happen if a node with and incorrect clock joins the cluster
+ [] = exometer_slide:to_list(50, 10, S).
+
+sum_mgmt_352(_Config) ->
+ %% In bug mgmt#352 all the samples returned have the same vale
+ Slide = sum_mgmt_352_slide(),
+ Last = 1487689330000,
+ First = 1487689270000,
+ Incr = 5000,
+ Empty = {0},
+ Sum = exometer_slide:sum(Last, First, Incr, [Slide], Empty),
+ Values = sets:to_list(sets:from_list(
+ [V || {_, V} <- exometer_slide:buffer(Sum)])),
+ true = (length(Values) == 12),
+ ok.
+
+sum_mgmt_352_extra(_Config) ->
+ %% Testing previous case clause to the one that fixes mgmt_352
+ %% exometer_slide.erl#L463
+ %% In the buggy version, all but the last sample are the same
+ Slide = sum_mgmt_352_slide_extra(),
+ Last = 1487689330000,
+ First = 1487689260000,
+ Incr = 5000,
+ Empty = {0},
+ Sum = exometer_slide:sum(Last, First, Incr, [Slide], Empty),
+ Values = sets:to_list(sets:from_list(
+ [V || {_, V} <- exometer_slide:buffer(Sum)])),
+ true = (length(Values) == 13),
+ ok.
+
+sum_mgmt_352_peak(_Config) ->
+ %% When buf2 contains data, we were returning a too old sample that
+ %% created a massive rate peak at the beginning of the graph.
+ %% The sample used was captured during a debug session.
+ Slide = sum_mgmt_352_slide_peak(),
+ Last = 1487752040000,
+ First = 1487751980000,
+ Incr = 5000,
+ Empty = {0},
+ Sum = exometer_slide:sum(Last, First, Incr, [Slide], Empty),
+ [{LastV}, {BLastV} | _] =
+ lists:reverse([V || {_, V} <- exometer_slide:buffer(Sum)]),
+ Rate = (BLastV - LastV) div 5,
+ true = (Rate < 20000),
+ ok.
+
+%% -------------------------------------------------------------------
+%% Util
+%% -------------------------------------------------------------------
+
+ele(TS, V) -> {TS, {V}}.
+
+%% -------------------------------------------------------------------
+%% Data
+%% -------------------------------------------------------------------
+sum_mgmt_352_slide() ->
+ %% Provide slide as is, from a debug session triggering mgmt-352 bug
+ {slide,610000,45,122,true,5000,1487689328468,1487689106834,
+ [{1487689328468,{1574200}},
+ {1487689323467,{1538800}},
+ {1487689318466,{1500800}},
+ {1487689313465,{1459138}},
+ {1487689308463,{1419200}},
+ {1487689303462,{1379600}},
+ {1487689298461,{1340000}},
+ {1487689293460,{1303400}},
+ {1487689288460,{1265600}},
+ {1487689283458,{1231400}},
+ {1487689278457,{1215800}},
+ {1487689273456,{1215200}},
+ {1487689262487,drop},
+ {1487689257486,{1205600}}],
+ [],
+ {1591000}}.
+
+sum_mgmt_352_slide_extra() ->
+ {slide,610000,45,122,true,5000,1487689328468,1487689106834,
+ [{1487689328468,{1574200}},
+ {1487689323467,{1538800}},
+ {1487689318466,{1500800}},
+ {1487689313465,{1459138}},
+ {1487689308463,{1419200}},
+ {1487689303462,{1379600}},
+ {1487689298461,{1340000}},
+ {1487689293460,{1303400}},
+ {1487689288460,{1265600}},
+ {1487689283458,{1231400}},
+ {1487689278457,{1215800}},
+ {1487689273456,{1215200}},
+ {1487689272487,drop},
+ {1487689269486,{1205600}}],
+ [],
+ {1591000}}.
+
+sum_mgmt_352_slide_peak() ->
+ {slide,610000,96,122,true,5000,1487752038481,1487750936863,
+ [{1487752038481,{11994024}},
+ {1487752033480,{11923200}},
+ {1487752028476,{11855800}},
+ {1487752023474,{11765800}},
+ {1487752018473,{11702431}},
+ {1487752013472,{11636200}},
+ {1487752008360,{11579800}},
+ {1487752003355,{11494800}},
+ {1487751998188,{11441400}},
+ {1487751993184,{11381000}},
+ {1487751988180,{11320000}},
+ {1487751983178,{11263000}},
+ {1487751978177,{11187600}},
+ {1487751973172,{11123375}},
+ {1487751968167,{11071800}},
+ {1487751963166,{11006200}},
+ {1487751958162,{10939477}},
+ {1487751953161,{10882400}},
+ {1487751948140,{10819600}},
+ {1487751943138,{10751200}},
+ {1487751938134,{10744400}},
+ {1487751933129,drop},
+ {1487751927807,{10710200}},
+ {1487751922803,{10670000}}],
+ [{1487751553386,{6655800}},
+ {1487751548385,{6580365}},
+ {1487751543384,{6509358}}],
+ {11994024}}.
diff --git a/deps/rabbitmq_management_agent/test/metrics_SUITE.erl b/deps/rabbitmq_management_agent/test/metrics_SUITE.erl
new file mode 100644
index 0000000000..227a04b21c
--- /dev/null
+++ b/deps/rabbitmq_management_agent/test/metrics_SUITE.erl
@@ -0,0 +1,157 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(metrics_SUITE).
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ node,
+ storage_reset
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ _Config1 = rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, fine},
+ {collect_statistics_interval, 500}
+ ]}).
+ %% rabbit_ct_helpers:merge_app_env(
+ %% Config1, {rabbitmq_management_agent, [{sample_retention_policies,
+ %% [{global, [{605, 500}]},
+ %% {basic, [{605, 500}]},
+ %% {detailed, [{10, 500}]}] }]}).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+read_table_rpc(Config, Table) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, read_table, [Table]).
+
+read_table(Table) ->
+ ets:tab2list(Table).
+
+force_stats() ->
+ rabbit_mgmt_external_stats ! emit_update.
+
+node(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ % force multipe stats refreshes
+ [ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, force_stats, [])
+ || _ <- lists:seq(0, 10)],
+ [_] = read_table_rpc(Config, node_persister_metrics),
+ [_] = read_table_rpc(Config, node_coarse_metrics),
+ [_] = read_table_rpc(Config, node_metrics),
+ true = wait_until(
+ fun() ->
+ Tab = read_table_rpc(Config, node_node_metrics),
+ lists:keymember({A, B}, 1, Tab)
+ end, 10).
+
+
+storage_reset(Config) ->
+ %% Ensures that core stats are reset, otherwise consume generates negative values
+ %% Doesn't really test if the reset does anything!
+ {Ch, Q} = publish_msg(Config),
+ wait_until(fun() ->
+ {1, 0, 1} == get_vhost_stats(Config)
+ end),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_storage, reset, []),
+ wait_until(fun() ->
+ {1, 0, 1} == get_vhost_stats(Config)
+ end),
+ consume_msg(Ch, Q),
+ wait_until(fun() ->
+ {0, 0, 0} == get_vhost_stats(Config)
+ end),
+ rabbit_ct_client_helpers:close_channel(Ch).
+
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+
+publish_msg(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Q}),
+ {Ch, Q}.
+
+consume_msg(Ch, Q) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = true}, self()),
+ receive #'basic.consume_ok'{} -> ok
+ end,
+ receive {#'basic.deliver'{}, #amqp_msg{payload = Q}} ->
+ ok
+ end.
+
+wait_until(Fun) ->
+ wait_until(Fun, 120).
+
+wait_until(_, 0) ->
+ false;
+wait_until(Fun, N) ->
+ case Fun() of
+ true ->
+ true;
+ false ->
+ timer:sleep(1000),
+ wait_until(Fun, N-1)
+ end.
+
+get_vhost_stats(Config) ->
+ Dict = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_data, overview_data,
+ [none, all ,{no_range, no_range, no_range,no_range},
+ [<<"/">>]]),
+ {ok, {VhostMsgStats, _}} = maps:find(vhost_msg_stats, Dict),
+ exometer_slide:last(VhostMsgStats).
diff --git a/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl b/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl
new file mode 100644
index 0000000000..b5ee4e9ce2
--- /dev/null
+++ b/deps/rabbitmq_management_agent/test/rabbit_mgmt_gc_SUITE.erl
@@ -0,0 +1,626 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_gc_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [],
+ [ queue_stats,
+ quorum_queue_stats,
+ connection_stats,
+ channel_stats,
+ vhost_stats,
+ exchange_stats,
+ node_stats,
+ consumer_stats
+ ]
+ }
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ Config1 = rabbit_ct_helpers:merge_app_env(
+ Config,
+ {rabbitmq_management_agent, [
+ {metrics_gc_interval, 6000000},
+ {rates_mode, detailed},
+ {sample_retention_policies,
+ %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
+ [{global, [{605, 1}, {3660, 60}]},
+ {basic, [{605, 1}, {3600, 60}]},
+ {detailed, [{605, 1}]}] }]}),
+ rabbit_ct_helpers:merge_app_env(Config1,
+ {rabbit, [
+ {collect_statistics_interval, 100},
+ {collect_statistics, fine}
+ ]}).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_nodes_count, 2},
+ {rmq_nodes_clustered, true}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(
+ Config1,
+ [ fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(quorum_queue_stats = Testcase, Config) ->
+ case rabbit_ct_broker_helpers:enable_feature_flag(Config, quorum_queue) of
+ ok ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(
+ Config, rabbit_ct_client_helpers:setup_steps());
+ Skip ->
+ Skip
+ end;
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase),
+ rabbit_ct_helpers:run_teardown_steps(
+ Config,
+ rabbit_ct_client_helpers:teardown_steps()).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+queue_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_stats">>},
+ #amqp_msg{payload = <<"hello">>}),
+ {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_stats">>,
+ no_ack = true}),
+ timer:sleep(1150),
+
+ Q = q(<<"myqueue">>),
+ X = x(<<"">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_stats, {Q, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_stats_publish, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_msg_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_process_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_msg_rates, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_stats_deliver_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_exchange_stats_publish,
+ {{{Q, X}, 5}, slide}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats, Q]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats_publish, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_msg_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_process_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_msg_rates, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats_deliver_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_exchange_stats_publish, {{Q, X}, 5}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_stats_publish]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_msg_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_process_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_msg_rates]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_stats_deliver_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [queue_exchange_stats_publish]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats, Q]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats_publish, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_msg_stats, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_process_stats, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_msg_rates, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats_deliver_stats, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_exchange_stats_publish, {{Q, X}, 5}]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+quorum_queue_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch,
+ #'queue.declare'{queue = <<"quorum_queue_stats">>,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}),
+ timer:sleep(1150),
+
+ Q = q(<<"quorum_queue_stats">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_stats, {Q, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_msg_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_process_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [queue_msg_rates, {{Q, 5}, slide}]),
+
+ rabbit_ct_broker_helpers:rpc(Config, B, ets, insert,
+ [queue_stats, {Q, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, B, ets, insert,
+ [queue_msg_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, B, ets, insert,
+ [queue_process_stats, {{Q, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, B, ets, insert,
+ [queue_msg_rates, {{Q, 5}, slide}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_stats, Q]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_msg_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_process_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [queue_msg_rates, {Q, 5}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, B, ets, lookup,
+ [queue_stats, Q]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, B, ets, lookup,
+ [queue_msg_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, B, ets, lookup,
+ [queue_process_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, B, ets, lookup,
+ [queue_msg_rates, {Q, 5}]),
+
+ {ok, _, {_, Leader}} = rabbit_ct_broker_helpers:rpc(Config, B, ra, members,
+ [{'%2F_quorum_queue_stats', B}]),
+ [Follower] = [A, B] -- [Leader],
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, Leader, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, Leader, gen_server, call, [rabbit_mgmt_gc, test]),
+ rabbit_ct_broker_helpers:rpc(Config, Follower, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, Follower, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, Follower, ets, lookup,
+ [queue_stats, Q]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, Follower, ets, lookup,
+ [queue_msg_stats, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, Follower, ets, lookup,
+ [queue_process_stats, {Q, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, Follower, ets, lookup,
+ [queue_msg_rates, {Q, 5}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, Leader, ets, lookup,
+ [queue_stats, Q]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, Leader, ets, lookup,
+ [queue_msg_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, Leader, ets, lookup,
+ [queue_process_stats, {Q, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, Leader, ets, lookup,
+ [queue_msg_rates, {Q, 5}]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"quorum_queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+connection_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_stats">>},
+ #amqp_msg{payload = <<"hello">>}),
+ timer:sleep(1150),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [connection_stats_coarse_conn_stats,
+ {{DeadPid, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [connection_created_stats, {DeadPid, name, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [connection_stats, {DeadPid, infos}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_stats_coarse_conn_stats, {DeadPid, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_created_stats, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_stats, DeadPid]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_stats_coarse_conn_stats, {DeadPid, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_created_stats, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [connection_stats, DeadPid]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [connection_stats_coarse_conn_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [connection_created_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [connection_stats]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+channel_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_stats">>},
+ #amqp_msg{payload = <<"hello">>}),
+ {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_stats">>,
+ no_ack=true}),
+ timer:sleep(1150),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+
+ X = x(<<"myexchange">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_created_stats, {DeadPid, name, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_stats, {DeadPid, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_stats_fine_stats, {{DeadPid, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_exchange_stats_fine_stats,
+ {{{DeadPid, X}, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_queue_stats_deliver_stats,
+ {{{DeadPid, X}, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_stats_deliver_stats,
+ {{DeadPid, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [channel_process_stats,
+ {{DeadPid, 5}, slide}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_created_stats, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_stats, DeadPid]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_stats_fine_stats, {DeadPid, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_exchange_stats_fine_stats,
+ {{DeadPid, X}, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_stats_deliver_stats,
+ {{DeadPid, X}, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_stats_deliver_stats,
+ {DeadPid, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_process_stats,
+ {DeadPid, 5}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_created_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_stats_fine_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_exchange_stats_fine_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_queue_stats_deliver_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_stats_deliver_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [channel_process_stats]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_created_stats, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_stats, DeadPid]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_stats_fine_stats, {DeadPid, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_exchange_stats_fine_stats,
+ {{DeadPid, X}, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_queue_stats_deliver_stats,
+ {{DeadPid, X}, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_stats_deliver_stats,
+ {DeadPid, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [channel_process_stats,
+ {DeadPid, 5}]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+vhost_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_stats">>},
+ #amqp_msg{payload = <<"hello">>}),
+ {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_stats">>,
+ no_ack=true}),
+ timer:sleep(1150),
+
+ VHost = <<"myvhost">>,
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [vhost_stats_coarse_conn_stats,
+ {{VHost, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [vhost_stats_fine_stats,
+ {{VHost, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [vhost_stats_deliver_stats,
+ {{VHost, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [vhost_msg_stats,
+ {{VHost, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [vhost_msg_rates,
+ {{VHost, 5}, slide}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_stats_coarse_conn_stats, {VHost, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_stats_fine_stats, {VHost, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_stats_deliver_stats, {VHost, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_msg_stats, {VHost, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_msg_rates, {VHost, 5}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_stats_coarse_conn_stats, {VHost, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_stats_fine_stats, {VHost, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_stats_deliver_stats, {VHost, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_msg_stats, {VHost, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [vhost_msg_rates, {VHost, 5}]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [vhost_stats_coarse_conn_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [vhost_stats_fine_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [vhost_stats_deliver_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [vhost_msg_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [vhost_msg_rates]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+exchange_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = <<"queue_stats">>},
+ #amqp_msg{payload = <<"hello">>}),
+ {#'basic.get_ok'{}, _} = amqp_channel:call(Ch, #'basic.get'{queue = <<"queue_stats">>,
+ no_ack=true}),
+ timer:sleep(1150),
+
+ Exchange = x(<<"myexchange">>),
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [exchange_stats_publish_out,
+ {{Exchange, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [exchange_stats_publish_in,
+ {{Exchange, 5}, slide}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [exchange_stats_publish_out, {Exchange, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [exchange_stats_publish_in, {Exchange, 5}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [exchange_stats_publish_out, {Exchange, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [exchange_stats_publish_in, {Exchange, 5}]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [exchange_stats_publish_out]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [exchange_stats_publish_in]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+node_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ timer:sleep(150),
+
+ Node = 'mynode',
+
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [node_stats, {Node, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [node_coarse_stats,
+ {{Node, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [node_persister_stats,
+ {{Node, 5}, slide}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [node_node_stats,
+ {{A, Node}, infos}]),
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert, [node_node_coarse_stats,
+ {{{A, Node}, 5}, slide}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_stats, Node]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_coarse_stats, {Node, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_persister_stats, {Node, 5}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_stats, {A, Node}]),
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_coarse_stats, {{A, Node}, 5}]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_stats, Node]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_coarse_stats, {Node, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_persister_stats, {Node, 5}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_stats, {A, Node}]),
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup,
+ [node_node_coarse_stats, {{A, Node}, 5}]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [node_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [node_coarse_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [node_persister_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [node_node_stats]),
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [node_node_coarse_stats]),
+
+ ok.
+
+consumer_stats(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue_stats">>}),
+ amqp_channel:call(Ch, #'basic.consume'{queue = <<"queue_stats">>}),
+ timer:sleep(1150),
+
+ DeadPid = rabbit_ct_broker_helpers:rpc(Config, A, ?MODULE, dead_pid, []),
+ Q = q(<<"queue_stats">>),
+
+ Id = {Q, DeadPid, tag},
+ rabbit_ct_broker_helpers:rpc(Config, A, ets, insert,
+ [consumer_stats, {Id, infos}]),
+
+ [_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, [consumer_stats, Id]),
+
+ %% Trigger gc. When the gen_server:call returns, the gc has already finished.
+ rabbit_ct_broker_helpers:rpc(Config, A, erlang, send, [rabbit_mgmt_gc, start_gc]),
+ rabbit_ct_broker_helpers:rpc(Config, A, gen_server, call, [rabbit_mgmt_gc, test]),
+
+ [_|_] = rabbit_ct_broker_helpers:rpc(Config, A, ets, tab2list,
+ [consumer_stats]),
+
+ [] = rabbit_ct_broker_helpers:rpc(Config, A, ets, lookup, [consumer_stats, Id]),
+
+ amqp_channel:call(Ch, #'queue.delete'{queue = <<"queue_stats">>}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ ok.
+
+dead_pid() ->
+ spawn(fun() -> ok end).
+
+q(Name) ->
+ #resource{ virtual_host = <<"/">>,
+ kind = queue,
+ name = Name }.
+
+x(Name) ->
+ #resource{ virtual_host = <<"/">>,
+ kind = exchange,
+ name = Name }.
diff --git a/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl b/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl
new file mode 100644
index 0000000000..0261606bd5
--- /dev/null
+++ b/deps/rabbitmq_management_agent/test/rabbit_mgmt_slide_SUITE.erl
@@ -0,0 +1,174 @@
+%%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mgmt_slide_SUITE).
+
+-include_lib("proper/include/proper.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ last_two_test,
+ last_two_incremental_test,
+ sum_test,
+ sum_incremental_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, _Config) ->
+ ok.
+
+init_per_testcase(_, Config) ->
+ Config.
+
+end_per_testcase(_, _Config) ->
+ ok.
+
+%% -------------------------------------------------------------------
+%% Generators.
+%% -------------------------------------------------------------------
+elements_gen() ->
+ ?LET(Length, oneof([1, 2, 3, 7, 8, 20]),
+ ?LET(Elements, list(vector(Length, int())),
+ [erlang:list_to_tuple(E) || E <- Elements])).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+last_two_test(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_last_two/0, [], 100).
+
+prop_last_two() ->
+ ?FORALL(
+ Elements, elements_gen(),
+ begin
+ Interval = 1,
+ Incremental = false,
+ {_LastTS, Slide} = new_slide(Interval, Incremental, Elements),
+ Expected = last_two(Elements),
+ ValuesOnly = [V || {_Timestamp, V} <- exometer_slide:last_two(Slide)],
+ ?WHENFAIL(io:format("Last two values obtained: ~p~nExpected: ~p~n"
+ "Slide: ~p~n", [ValuesOnly, Expected, Slide]),
+ Expected == ValuesOnly)
+ end).
+
+last_two_incremental_test(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_last_two_incremental/0, [], 100).
+
+prop_last_two_incremental() ->
+ ?FORALL(
+ Elements, non_empty(elements_gen()),
+ begin
+ Interval = 1,
+ Incremental = true,
+ {_LastTS, Slide} = new_slide(Interval, Incremental, Elements),
+ [{_Timestamp, Values} | _] = exometer_slide:last_two(Slide),
+ Expected = add_elements(Elements),
+ ?WHENFAIL(io:format("Expected a total of: ~p~nGot: ~p~n"
+ "Slide: ~p~n", [Expected, Values, Slide]),
+ Values == Expected)
+ end).
+
+sum_incremental_test(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_sum/1, [true], 100).
+
+sum_test(_Config) ->
+ rabbit_ct_proper_helpers:run_proper(fun prop_sum/1, [false], 100).
+
+prop_sum(Inc) ->
+ ?FORALL(
+ {Elements, Number}, {non_empty(elements_gen()), ?SUCHTHAT(I, int(), I > 0)},
+ begin
+ Interval = 1,
+ {LastTS, Slide} = new_slide(Interval, Inc, Elements),
+ %% Add the same so the timestamp matches. As the timestamps are handled
+ %% internally, we cannot guarantee on which interval they go otherwise
+ %% (unless we manually manipulate the slide content).
+ Sum = exometer_slide:sum([Slide || _ <- lists:seq(1, Number)]),
+ Values = [V || {_TS, V} <- exometer_slide:to_list(LastTS + 1, Sum)],
+ Expected = expected_sum(Slide, LastTS + 1, Number, Interval, Inc),
+ ?WHENFAIL(io:format("Expected: ~p~nGot: ~p~nSlide:~p~n",
+ [Expected, Values, Slide]),
+ Values == Expected)
+ end).
+
+expected_sum(Slide, Now, Number, _Int, false) ->
+ [sum_n_times(V, Number) || {_TS, V} <- exometer_slide:to_list(Now, Slide)];
+expected_sum(Slide, Now, Number, Int, true) ->
+ [{TSfirst, First} = F | Rest] = All = exometer_slide:to_list(Now, Slide),
+ {TSlast, _Last} = case Rest of
+ [] ->
+ F;
+ _ ->
+ lists:last(Rest)
+ end,
+ Seq = lists:seq(TSfirst, TSlast, Int),
+ {Expected, _} = lists:foldl(fun(TS0, {Acc, Previous}) ->
+ Actual = proplists:get_value(TS0, All, Previous),
+ {[sum_n_times(Actual, Number) | Acc], Actual}
+ end, {[], First}, Seq),
+ lists:reverse(Expected).
+%% -------------------------------------------------------------------
+%% Helpers
+%% -------------------------------------------------------------------
+new_slide(Interval, Incremental, Elements) ->
+ new_slide(Interval, Interval, Incremental, Elements).
+
+new_slide(PublishInterval, Interval, Incremental, Elements) ->
+ Now = 0,
+ Slide = exometer_slide:new(Now, 60 * 1000, [{interval, Interval},
+ {incremental, Incremental}]),
+ lists:foldl(
+ fun(E, {TS0, Acc}) ->
+ TS1 = TS0 + PublishInterval,
+ {TS1, exometer_slide:add_element(TS1, E, Acc)}
+ end, {Now, Slide}, Elements).
+
+last_two(Elements) when length(Elements) >= 2 ->
+ [F, S | _] = lists:reverse(Elements),
+ [F, S];
+last_two(Elements) ->
+ Elements.
+
+add_elements([H | T]) ->
+ add_elements(T, H).
+
+add_elements([], Acc) ->
+ Acc;
+add_elements([Tuple | T], Acc) ->
+ add_elements(T, sum(Tuple, Acc)).
+
+sum(T1, T2) ->
+ list_to_tuple(lists:zipwith(fun(A, B) -> A + B end, tuple_to_list(T1), tuple_to_list(T2))).
+
+sum_n_times(V, N) ->
+ sum_n_times(V, V, N - 1).
+
+sum_n_times(_V, Acc, 0) ->
+ Acc;
+sum_n_times(V, Acc, N) ->
+ sum_n_times(V, sum(V, Acc), N-1).
diff --git a/deps/rabbitmq_mqtt/.gitignore b/deps/rabbitmq_mqtt/.gitignore
new file mode 100644
index 0000000000..7f34fc74e2
--- /dev/null
+++ b/deps/rabbitmq_mqtt/.gitignore
@@ -0,0 +1,24 @@
+.sw?
+.*.sw?
+*.beam
+.idea/*
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/log/
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+debug/*
+
+test/config_schema_SUITE_data/schema/
+test/.idea/*
+
+rabbitmq_mqtt.d
diff --git a/deps/rabbitmq_mqtt/.travis.yml b/deps/rabbitmq_mqtt/.travis.yml
new file mode 100644
index 0000000000..0eb9abafd6
--- /dev/null
+++ b/deps/rabbitmq_mqtt/.travis.yml
@@ -0,0 +1,62 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+ - maven
+cache:
+ apt: true
+env:
+ global:
+ - secure: Jw9u8yRg1vhWLtMCSrUFTxVnBuXvy6wd+voso4GlwGjeRzyc4YzSHvCAElENULHiWDJetbJGU/D9oGBYkRnYR2nwjPu5bvW9rsZYYUKWHE3V20ddTHHX+7lU5WeTWDkFy9VlD2nUiUplCNX8zuTU9T59KaP2ifcOv1Gr47Wo02o=
+ - secure: ZaLpWOIx0uR/yqQRKMx8pZ0+AhJx/GIczZmJhq1m0huGhJwqdCefK5iwUwzTXXN3MX+WBv8Jj1rCnmsKqtpoNFMM3Twyl518KBOGzrzg4asPefH+pj5QNvpcmL4PKe8KuLAV0zxmFzrj9OoV8P4MAMHq83MNfjan56cd9SLjbD0=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_mqtt/.travis.yml.patch b/deps/rabbitmq_mqtt/.travis.yml.patch
new file mode 100644
index 0000000000..b7ddd9414e
--- /dev/null
+++ b/deps/rabbitmq_mqtt/.travis.yml.patch
@@ -0,0 +1,10 @@
+--- ../rabbit_common/.travis.yml 2019-09-13 13:48:46.258483000 +0200
++++ .travis.yml 2020-03-03 13:53:33.515196000 +0100
+@@ -13,6 +13,7 @@
+ apt:
+ packages:
+ - awscli
++ - maven
+ cache:
+ apt: true
+ env:
diff --git a/deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md b/deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_mqtt/CONTRIBUTING.md b/deps/rabbitmq_mqtt/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_mqtt/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_mqtt/LICENSE b/deps/rabbitmq_mqtt/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_mqtt/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_mqtt/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_mqtt/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_mqtt/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_mqtt/Makefile b/deps/rabbitmq_mqtt/Makefile
new file mode 100644
index 0000000000..1dbbfe037d
--- /dev/null
+++ b/deps/rabbitmq_mqtt/Makefile
@@ -0,0 +1,54 @@
+PROJECT = rabbitmq_mqtt
+PROJECT_DESCRIPTION = RabbitMQ MQTT Adapter
+PROJECT_MOD = rabbit_mqtt
+
+define PROJECT_ENV
+[
+ {default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {ssl_cert_login,false},
+ %% To satisfy an unfortunate expectation from popular MQTT clients.
+ {allow_anonymous, true},
+ {vhost, <<"/">>},
+ {exchange, <<"amq.topic">>},
+ {subscription_ttl, 86400000}, %% 24 hours
+ {retained_message_store, rabbit_mqtt_retained_msg_store_dets},
+ %% only used by DETS store
+ {retained_message_store_dets_sync_interval, 2000},
+ {prefetch, 10},
+ {ssl_listeners, []},
+ {tcp_listeners, [1883]},
+ {num_tcp_acceptors, 10},
+ {num_ssl_acceptors, 10},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true}]},
+ {proxy_protocol, false},
+ {sparkplug, false}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = ranch rabbit_common rabbit amqp_client ra
+TEST_DEPS = emqttc ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+dep_emqttc = git https://github.com/rabbitmq/emqttc.git remove-logging
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
+
+
+clean::
+ if test -d test/java_SUITE_data; then cd test/java_SUITE_data && $(MAKE) clean; fi
diff --git a/deps/rabbitmq_mqtt/README.md b/deps/rabbitmq_mqtt/README.md
new file mode 100644
index 0000000000..f0ba5d6b03
--- /dev/null
+++ b/deps/rabbitmq_mqtt/README.md
@@ -0,0 +1,38 @@
+# RabbitMQ MQTT Plugin
+
+## Getting Started
+
+This is an MQTT plugin for RabbitMQ.
+
+The plugin is included in the RabbitMQ distribution. To enable
+it, use [rabbitmq-plugins](https://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html):
+
+ rabbitmq-plugins enable rabbitmq_mqtt
+
+Default port used by the plugin is `1883`.
+
+## Documentation
+
+[MQTT plugin documentation](https://www.rabbitmq.com/mqtt.html) is available
+from rabbitmq.com.
+
+## Contributing
+
+See [CONTRIBUTING.md](https://github.com/rabbitmq/rabbitmq-mqtt/blob/master/CONTRIBUTING.md).
+
+### Running Tests
+
+After cloning RabbitMQ umbrella repository, change into the `rabbitmq-mqtt` directory
+and run
+
+ make tests
+
+This will bring up a RabbitMQ node with the plugin enabled and run integration tests
+against it. Note that there must be no other MQTT server running on ports `1883` and `8883`.
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the [Mozilla Public License](https://www.rabbitmq.com/mpl.html),
+the same as RabbitMQ.
diff --git a/deps/rabbitmq_mqtt/erlang.mk b/deps/rabbitmq_mqtt/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_mqtt/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_mqtt/include/mqtt_machine.hrl b/deps/rabbitmq_mqtt/include/mqtt_machine.hrl
new file mode 100644
index 0000000000..b670c7b32e
--- /dev/null
+++ b/deps/rabbitmq_mqtt/include/mqtt_machine.hrl
@@ -0,0 +1,8 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(machine_state, {client_ids = #{}}).
diff --git a/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl b/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl
new file mode 100644
index 0000000000..912f5ad46f
--- /dev/null
+++ b/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl
@@ -0,0 +1,92 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(CLIENT_ID_MAXLEN, 23).
+
+%% reader state
+-record(state, { socket,
+ conn_name,
+ await_recv,
+ deferred_recv,
+ received_connect_frame,
+ connection_state,
+ keepalive,
+ keepalive_sup,
+ conserve,
+ parse_state,
+ proc_state,
+ connection,
+ stats_timer }).
+
+%% processor state
+-record(proc_state, { socket,
+ subscriptions,
+ consumer_tags,
+ unacked_pubs,
+ awaiting_ack,
+ awaiting_seqno,
+ message_id,
+ client_id,
+ clean_sess,
+ will_msg,
+ channels,
+ connection,
+ exchange,
+ adapter_info,
+ ssl_login_name,
+ %% Retained messages handler. See rabbit_mqtt_retainer_sup
+ %% and rabbit_mqtt_retainer.
+ retainer_pid,
+ auth_state,
+ send_fun,
+ peer_addr,
+ mqtt2amqp_fun,
+ amqp2mqtt_fun,
+ register_state }).
+
+-record(auth_state, {username,
+ user,
+ vhost}).
+
+%% does not include vhost: it is used in
+%% the table name
+-record(retained_message, {topic,
+ mqtt_msg}).
+
+-define(INFO_ITEMS,
+ [host,
+ port,
+ peer_host,
+ peer_port,
+ protocol,
+ channels,
+ channel_max,
+ frame_max,
+ client_properties,
+ ssl,
+ ssl_protocol,
+ ssl_key_exchange,
+ ssl_cipher,
+ ssl_hash,
+ conn_name,
+ connection_state,
+ connection,
+ consumer_tags,
+ unacked_pubs,
+ awaiting_ack,
+ awaiting_seqno,
+ message_id,
+ client_id,
+ clean_sess,
+ will_msg,
+ exchange,
+ ssl_login_name,
+ retainer_pid,
+ user,
+ vhost]).
+
+-define(MQTT_GUIDE_URL, <<"https://rabbitmq.com/mqtt.html">>).
diff --git a/deps/rabbitmq_mqtt/include/rabbit_mqtt_frame.hrl b/deps/rabbitmq_mqtt/include/rabbit_mqtt_frame.hrl
new file mode 100644
index 0000000000..2b06da502b
--- /dev/null
+++ b/deps/rabbitmq_mqtt/include/rabbit_mqtt_frame.hrl
@@ -0,0 +1,90 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(PROTOCOL_NAMES, [{3, "MQIsdp"}, {4, "MQTT"}]).
+
+%% frame types
+
+-define(CONNECT, 1).
+-define(CONNACK, 2).
+-define(PUBLISH, 3).
+-define(PUBACK, 4).
+-define(PUBREC, 5).
+-define(PUBREL, 6).
+-define(PUBCOMP, 7).
+-define(SUBSCRIBE, 8).
+-define(SUBACK, 9).
+-define(UNSUBSCRIBE, 10).
+-define(UNSUBACK, 11).
+-define(PINGREQ, 12).
+-define(PINGRESP, 13).
+-define(DISCONNECT, 14).
+
+%% connect return codes
+
+-define(CONNACK_ACCEPT, 0).
+-define(CONNACK_PROTO_VER, 1). %% unacceptable protocol version
+-define(CONNACK_INVALID_ID, 2). %% identifier rejected
+-define(CONNACK_SERVER, 3). %% server unavailable
+-define(CONNACK_CREDENTIALS, 4). %% bad user name or password
+-define(CONNACK_AUTH, 5). %% not authorized
+
+%% qos levels
+
+-define(QOS_0, 0).
+-define(QOS_1, 1).
+-define(QOS_2, 2).
+
+%% TODO
+-type message_id() :: any().
+
+-record(mqtt_frame, {fixed,
+ variable,
+ payload}).
+
+-record(mqtt_frame_fixed, {type = 0,
+ dup = 0,
+ qos = 0,
+ retain = 0}).
+
+-record(mqtt_frame_connect, {proto_ver,
+ will_retain,
+ will_qos,
+ will_flag,
+ clean_sess,
+ keep_alive,
+ client_id,
+ will_topic,
+ will_msg,
+ username,
+ password}).
+
+-record(mqtt_frame_connack, {session_present,
+ return_code}).
+
+-record(mqtt_frame_publish, {topic_name,
+ message_id}).
+
+-record(mqtt_frame_subscribe,{message_id,
+ topic_table}).
+
+-record(mqtt_frame_suback, {message_id,
+ qos_table = []}).
+
+-record(mqtt_topic, {name,
+ qos}).
+
+-record(mqtt_frame_other, {other}).
+
+-record(mqtt_msg, {retain :: boolean(),
+ qos :: ?QOS_0 | ?QOS_1 | ?QOS_2,
+ topic :: string(),
+ dup :: boolean(),
+ message_id :: message_id(),
+ payload :: binary()}).
+
+-type mqtt_msg() :: #mqtt_msg{}.
diff --git a/deps/rabbitmq_mqtt/include/rabbit_mqtt_retained_msg_store.hrl b/deps/rabbitmq_mqtt/include/rabbit_mqtt_retained_msg_store.hrl
new file mode 100644
index 0000000000..52b61b5924
--- /dev/null
+++ b/deps/rabbitmq_mqtt/include/rabbit_mqtt_retained_msg_store.hrl
@@ -0,0 +1,6 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
diff --git a/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema b/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema
new file mode 100644
index 0000000000..317f5bb04f
--- /dev/null
+++ b/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema
@@ -0,0 +1,259 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ MQTT Adapter
+%%
+%% See https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md
+%% for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_mqtt,
+% [%% Set the default user name and password. Will be used as the default login
+%% if a connecting client provides no other login details.
+%%
+%% Please note that setting this will allow clients to connect without
+%% authenticating!
+%%
+%% {default_user, <<"guest">>},
+%% {default_pass, <<"guest">>},
+
+{mapping, "mqtt.default_user", "rabbitmq_mqtt.default_user", [
+ {datatype, string}
+]}.
+
+{mapping, "mqtt.default_pass", "rabbitmq_mqtt.default_pass", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_mqtt.default_user",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.default_user", Conf))
+end}.
+
+{translation, "rabbitmq_mqtt.default_pass",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.default_pass", Conf))
+end}.
+
+%% Enable anonymous access. If this is set to false, clients MUST provide
+%% login information in order to connect. See the default_user/default_pass
+%% configuration elements for managing logins without authentication.
+%%
+%% {allow_anonymous, true},
+
+{mapping, "mqtt.allow_anonymous", "rabbitmq_mqtt.allow_anonymous",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% If you have multiple chosts, specify the one to which the
+%% adapter connects.
+%%
+%% {vhost, <<"/">>},
+
+{mapping, "mqtt.vhost", "rabbitmq_mqtt.vhost", [{datatype, string}]}.
+
+{translation, "rabbitmq_mqtt.vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.vhost", Conf))
+end}.
+
+%% Specify the exchange to which messages from MQTT clients are published.
+%%
+%% {exchange, <<"amq.topic">>},
+
+{mapping, "mqtt.exchange", "rabbitmq_mqtt.exchange", [{datatype, string}]}.
+
+{translation, "rabbitmq_mqtt.exchange",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.exchange", Conf))
+end}.
+
+%% Specify TTL (time to live) to control the lifetime of non-clean sessions.
+%%
+%% {subscription_ttl, 1800000},
+{mapping, "mqtt.subscription_ttl", "rabbitmq_mqtt.subscription_ttl", [
+ {datatype, [{enum, [undefined, infinity]}, integer]}
+]}.
+
+{translation, "rabbitmq_mqtt.subscription_ttl",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.subscription_ttl", Conf, undefined) of
+ undefined -> undefined;
+ infinity -> undefined;
+ Ms -> Ms
+ end
+end}.
+
+%% Set the prefetch count (governing the maximum number of unacknowledged
+%% messages that will be delivered).
+%%
+%% {prefetch, 10},
+{mapping, "mqtt.prefetch", "rabbitmq_mqtt.prefetch",
+ [{datatype, integer}]}.
+
+%% Enable "Sparkplug B" namespace recognition so that the dot in the
+%% namespace is not translated to a slash
+%%
+%% {sparkplug, true},
+{mapping, "mqtt.sparkplug", "rabbitmq_mqtt.sparkplug",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.retained_message_store", "rabbitmq_mqtt.retained_message_store",
+ [{datatype, atom}]}.
+
+{mapping, "mqtt.retained_message_store_dets_sync_interval", "rabbitmq_mqtt.retained_message_store_dets_sync_interval",
+ [{datatype, integer}]}.
+
+%% Whether or not to enable proxy protocol support.
+%%
+%% {proxy_protocol, false}
+
+{mapping, "mqtt.proxy_protocol", "rabbitmq_mqtt.proxy_protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% TCP/SSL Configuration (as per the broker configuration).
+%%
+%% {tcp_listeners, [1883]},
+%% {ssl_listeners, []},
+
+{mapping, "mqtt.listeners.tcp", "rabbitmq_mqtt.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "mqtt.listeners.tcp.$name", "rabbitmq_mqtt.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_mqtt.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("mqtt.listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+{mapping, "mqtt.listeners.ssl", "rabbitmq_mqtt.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "mqtt.listeners.ssl.$name", "rabbitmq_mqtt.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_mqtt.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("mqtt.listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and SSL listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 10},
+
+{mapping, "mqtt.num_acceptors.ssl", "rabbitmq_mqtt.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "mqtt.num_acceptors.tcp", "rabbitmq_mqtt.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "mqtt.ssl_cert_login", "rabbitmq_mqtt.ssl_cert_login", [
+ {datatype, {enum, [true, false]}}]}.
+
+
+%% TCP/Socket options (as per the broker configuration).
+%%
+%% {tcp_listen_options, [{backlog, 128},
+%% {nodelay, true}]}
+% ]},
+
+%% TCP listener section ======================================================
+
+{mapping, "mqtt.tcp_listen_options", "rabbitmq_mqtt.rabbit.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbitmq_mqtt.rabbit.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.tcp_listen_options") of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid mqtt.tcp_listen_options")
+ end
+end}.
+
+{mapping, "mqtt.tcp_listen_options.backlog", "rabbitmq_mqtt.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "mqtt.tcp_listen_options.nodelay", "rabbitmq_mqtt.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "mqtt.tcp_listen_options.buffer", "rabbitmq_mqtt.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.delay_send", "rabbitmq_mqtt.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.dontroute", "rabbitmq_mqtt.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.exit_on_close", "rabbitmq_mqtt.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.fd", "rabbitmq_mqtt.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.high_msgq_watermark", "rabbitmq_mqtt.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.high_watermark", "rabbitmq_mqtt.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.keepalive", "rabbitmq_mqtt.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.low_msgq_watermark", "rabbitmq_mqtt.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.low_watermark", "rabbitmq_mqtt.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.port", "rabbitmq_mqtt.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "mqtt.tcp_listen_options.priority", "rabbitmq_mqtt.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.recbuf", "rabbitmq_mqtt.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.send_timeout", "rabbitmq_mqtt.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.send_timeout_close", "rabbitmq_mqtt.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.sndbuf", "rabbitmq_mqtt.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.tos", "rabbitmq_mqtt.tcp_listen_options.tos",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.linger.on", "rabbitmq_mqtt.tcp_listen_options.linger",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.linger.timeout", "rabbitmq_mqtt.tcp_listen_options.linger",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{translation, "rabbitmq_mqtt.tcp_listen_options.linger",
+fun(Conf) ->
+ LingerOn = cuttlefish:conf_get("mqtt.tcp_listen_options.linger.on", Conf, false),
+ LingerTimeout = cuttlefish:conf_get("mqtt.tcp_listen_options.linger.timeout", Conf, 0),
+ {LingerOn, LingerTimeout}
+end}.
diff --git a/deps/rabbitmq_mqtt/rabbitmq-components.mk b/deps/rabbitmq_mqtt/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_mqtt/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl
new file mode 100644
index 0000000000..f0aefb526b
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.DecommissionMqttNodeCommand').
+
+-include("rabbit_mqtt.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([scopes/0,
+ switches/0,
+ aliases/0,
+ usage/0,
+ usage_doc_guides/0,
+ banner/2,
+ validate/2,
+ merge_defaults/2,
+ run/2,
+ output/2,
+ description/0,
+ help_section/0]).
+
+scopes() -> [ctl].
+switches() -> [].
+aliases() -> [].
+
+description() -> <<"Removes cluster member and permanently deletes its cluster-wide MQTT state">>.
+
+help_section() ->
+ {plugin, mqtt}.
+
+validate([], _Opts) ->
+ {validation_failure, not_enough_args};
+validate([_, _ | _], _Opts) ->
+ {validation_failure, too_many_args};
+validate([_], _) ->
+ ok.
+
+merge_defaults(Args, Opts) ->
+ {Args, Opts}.
+
+usage() ->
+ <<"decommission_mqtt_node <node>">>.
+
+usage_doc_guides() ->
+ [?MQTT_GUIDE_URL].
+
+run([Node], #{node := NodeName,
+ timeout := Timeout}) ->
+ case rabbit_misc:rpc_call(NodeName, rabbit_mqtt_collector, leave, [Node], Timeout) of
+ {badrpc, _} = Error ->
+ Error;
+ nodedown ->
+ {ok, list_to_binary(io_lib:format("Node ~s is down but has been successfully removed"
+ " from the cluster", [Node]))};
+ Result ->
+ %% 'ok' or 'timeout'
+ %% TODO: Ra will timeout if the node is not a cluster member - should this be fixed??
+ Result
+ end.
+
+banner([Node], _) -> list_to_binary(io_lib:format("Removing node ~s from the list of MQTT nodes...", [Node])).
+
+output(Result, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result).
diff --git a/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl
new file mode 100644
index 0000000000..a5745a7f58
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand.erl
@@ -0,0 +1,87 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand').
+
+-include("rabbit_mqtt.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([formatter/0,
+ scopes/0,
+ switches/0,
+ aliases/0,
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ banner/2,
+ validate/2,
+ merge_defaults/2,
+ run/2,
+ output/2,
+ description/0,
+ help_section/0]).
+
+formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Table'.
+scopes() -> [ctl, diagnostics].
+switches() -> [{verbose, boolean}].
+aliases() -> [{'V', verbose}].
+
+description() -> <<"Lists MQTT connections on the target node">>.
+
+help_section() ->
+ {plugin, mqtt}.
+
+validate(Args, _) ->
+ case 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':validate_info_keys(Args,
+ ?INFO_ITEMS) of
+ {ok, _} -> ok;
+ Error -> Error
+ end.
+
+merge_defaults([], Opts) ->
+ merge_defaults([<<"client_id">>, <<"conn_name">>], Opts);
+merge_defaults(Args, Opts) ->
+ {Args, maps:merge(#{verbose => false}, Opts)}.
+
+usage() ->
+ <<"list_mqtt_connections [<column> ...]">>.
+
+usage_additional() ->
+ Prefix = <<" must be one of ">>,
+ InfoItems = 'Elixir.Enum':join(lists:usort(?INFO_ITEMS), <<", ">>),
+ [
+ {<<"<column>">>, <<Prefix/binary, InfoItems/binary>>}
+ ].
+
+usage_doc_guides() ->
+ [?MQTT_GUIDE_URL].
+
+run(Args, #{node := NodeName,
+ timeout := Timeout,
+ verbose := Verbose}) ->
+ InfoKeys = case Verbose of
+ true -> ?INFO_ITEMS;
+ false -> 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':prepare_info_keys(Args)
+ end,
+
+ %% a node uses the Raft-based collector to list connections, which knows about all connections in the cluster
+ %% so no need to reach out to all the nodes
+ Nodes = [NodeName],
+
+ 'Elixir.RabbitMQ.CLI.Ctl.RpcStream':receive_list_items(
+ NodeName,
+ rabbit_mqtt,
+ emit_connection_info_all,
+ [Nodes, InfoKeys],
+ Timeout,
+ InfoKeys,
+ length(Nodes)).
+
+banner(_, _) -> <<"Listing MQTT connections ...">>.
+
+output(Result, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result).
diff --git a/deps/rabbitmq_mqtt/src/mqtt_machine.erl b/deps/rabbitmq_mqtt/src/mqtt_machine.erl
new file mode 100644
index 0000000000..334aa9e32c
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/mqtt_machine.erl
@@ -0,0 +1,134 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(mqtt_machine).
+-behaviour(ra_machine).
+
+-include("mqtt_machine.hrl").
+
+-export([init/1,
+ apply/3,
+ state_enter/2,
+ notify_connection/2]).
+
+-type state() :: #machine_state{}.
+
+-type config() :: map().
+
+-type reply() :: {ok, term()} | {error, term()}.
+-type client_id() :: term().
+
+-type command() :: {register, client_id(), pid()} |
+ {unregister, client_id(), pid()} |
+ list.
+
+-spec init(config()) -> state().
+init(_Conf) ->
+ #machine_state{}.
+
+-spec apply(map(), command(), state()) ->
+ {state(), reply(), ra_machine:effects()}.
+apply(_Meta, {register, ClientId, Pid}, #machine_state{client_ids = Ids} = State0) ->
+ {Effects, Ids1} =
+ case maps:find(ClientId, Ids) of
+ {ok, OldPid} when Pid =/= OldPid ->
+ Effects0 = [{demonitor, process, OldPid},
+ {monitor, process, Pid},
+ {mod_call, ?MODULE, notify_connection, [OldPid, duplicate_id]}],
+ {Effects0, maps:remove(ClientId, Ids)};
+ _ ->
+ Effects0 = [{monitor, process, Pid}],
+ {Effects0, Ids}
+ end,
+ State = State0#machine_state{client_ids = maps:put(ClientId, Pid, Ids1)},
+ {State, ok, Effects};
+
+apply(Meta, {unregister, ClientId, Pid}, #machine_state{client_ids = Ids} = State0) ->
+ State = case maps:find(ClientId, Ids) of
+ {ok, Pid} -> State0#machine_state{client_ids = maps:remove(ClientId, Ids)};
+ %% don't delete client id that might belong to a newer connection
+ %% that kicked the one with Pid out
+ {ok, _AnotherPid} -> State0;
+ error -> State0
+ end,
+ Effects0 = [{demonitor, process, Pid}],
+ %% snapshot only when the map has changed
+ Effects = case State of
+ State0 -> Effects0;
+ _ -> Effects0 ++ snapshot_effects(Meta, State)
+ end,
+ {State, ok, Effects};
+
+apply(_Meta, {down, DownPid, noconnection}, State) ->
+ %% Monitor the node the pid is on (see {nodeup, Node} below)
+ %% so that we can detect when the node is re-connected and discover the
+ %% actual fate of the connection processes on it
+ Effect = {monitor, node, node(DownPid)},
+ {State, ok, Effect};
+
+apply(Meta, {down, DownPid, _}, #machine_state{client_ids = Ids} = State0) ->
+ Ids1 = maps:filter(fun (_ClientId, Pid) when Pid =:= DownPid ->
+ false;
+ (_, _) ->
+ true
+ end, Ids),
+ State = State0#machine_state{client_ids = Ids1},
+ Delta = maps:keys(Ids) -- maps:keys(Ids1),
+ Effects = lists:map(fun(Id) ->
+ [{mod_call, rabbit_log, debug,
+ ["MQTT connection with client id '~s' failed", [Id]]}] end, Delta),
+ {State, ok, Effects ++ snapshot_effects(Meta, State)};
+
+apply(_Meta, {nodeup, Node}, State) ->
+ %% Work out if any pids that were disconnected are still
+ %% alive.
+ %% Re-request the monitor for the pids on the now-back node.
+ Effects = [{monitor, process, Pid} || Pid <- all_pids(State), node(Pid) == Node],
+ {State, ok, Effects};
+apply(_Meta, {nodedown, _Node}, State) ->
+ {State, ok};
+
+apply(Meta, {leave, Node}, #machine_state{client_ids = Ids} = State0) ->
+ Ids1 = maps:filter(fun (_ClientId, Pid) -> node(Pid) =/= Node end, Ids),
+ Delta = maps:keys(Ids) -- maps:keys(Ids1),
+
+ Effects = lists:foldl(fun (ClientId, Acc) ->
+ Pid = maps:get(ClientId, Ids),
+ [
+ {demonitor, process, Pid},
+ {mod_call, ?MODULE, notify_connection, [Pid, decommission_node]},
+ {mod_call, rabbit_log, debug,
+ ["MQTT will remove client ID '~s' from known "
+ "as its node has been decommissioned", [ClientId]]}
+ ] ++ Acc
+ end, [], Delta),
+
+ State = State0#machine_state{client_ids = Ids1},
+ {State, ok, Effects ++ snapshot_effects(Meta, State)};
+
+apply(_Meta, Unknown, State) ->
+ error_logger:error_msg("MQTT Raft state machine received unknown command ~p~n", [Unknown]),
+ {State, {error, {unknown_command, Unknown}}, []}.
+
+state_enter(leader, State) ->
+ %% re-request monitors for all known pids, this would clean up
+ %% records for all connections are no longer around, e.g. right after node restart
+ [{monitor, process, Pid} || Pid <- all_pids(State)];
+state_enter(_, _) ->
+ [].
+
+%% ==========================
+
+%% Avoids blocking the Raft leader.
+notify_connection(Pid, Reason) ->
+ spawn(fun() -> gen_server2:cast(Pid, Reason) end).
+
+-spec snapshot_effects(map(), state()) -> ra_machine:effects().
+snapshot_effects(#{index := RaftIdx}, State) ->
+ [{release_cursor, RaftIdx, State}].
+
+all_pids(#machine_state{client_ids = Ids}) ->
+ maps:values(Ids).
diff --git a/deps/rabbitmq_mqtt/src/mqtt_node.erl b/deps/rabbitmq_mqtt/src/mqtt_node.erl
new file mode 100644
index 0000000000..84dcd9b3a4
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/mqtt_node.erl
@@ -0,0 +1,132 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(mqtt_node).
+
+-export([start/0, node_id/0, server_id/0, all_node_ids/0, leave/1, trigger_election/0]).
+
+-define(ID_NAME, mqtt_node).
+-define(START_TIMEOUT, 100000).
+-define(RETRY_INTERVAL, 5000).
+-define(RA_OPERATION_TIMEOUT, 60000).
+
+node_id() ->
+ server_id(node()).
+
+server_id() ->
+ server_id(node()).
+
+server_id(Node) ->
+ {?ID_NAME, Node}.
+
+all_node_ids() ->
+ [server_id(N) || N <- rabbit_mnesia:cluster_nodes(all),
+ can_participate_in_clientid_tracking(N)].
+
+start() ->
+ %% 3s to 6s randomized
+ Repetitions = rand:uniform(10) + 10,
+ start(300, Repetitions).
+
+start(_Delay, AttemptsLeft) when AttemptsLeft =< 0 ->
+ start_server(),
+ trigger_election();
+start(Delay, AttemptsLeft) ->
+ NodeId = server_id(),
+ Nodes = compatible_peer_servers(),
+ case ra_directory:uid_of(?ID_NAME) of
+ undefined ->
+ case Nodes of
+ [] ->
+ %% Since cluster members are not known ahead of time and initial boot can be happening in parallel,
+ %% we wait and check a few times (up to a few seconds) to see if we can discover any peers to
+ %% join before forming a cluster. This reduces the probability of N independent clusters being
+ %% formed in the common scenario of N nodes booting in parallel e.g. because they were started
+ %% at the same time by a deployment tool.
+ %%
+ %% This scenario does not guarantee single cluster formation but without knowing the list of members
+ %% ahead of time, this is a best effort workaround. Multi-node consensus is apparently hard
+ %% to achieve without having consensus around expected cluster members.
+ rabbit_log:info("MQTT: will wait for ~p more ms for cluster members to join before triggering a Raft leader election", [Delay]),
+ timer:sleep(Delay),
+ start(Delay, AttemptsLeft - 1);
+ Peers ->
+ %% Trigger an election.
+ %% This is required when we start a node for the first time.
+ %% Using default timeout because it supposed to reply fast.
+ rabbit_log:info("MQTT: discovered ~p cluster peers that support client ID tracking", [length(Peers)]),
+ start_server(),
+ join_peers(NodeId, Peers),
+ ra:trigger_election(NodeId, ?RA_OPERATION_TIMEOUT)
+ end;
+ _ ->
+ join_peers(NodeId, Nodes),
+ ra:restart_server(NodeId),
+ ra:trigger_election(NodeId)
+ end,
+ ok.
+
+compatible_peer_servers() ->
+ all_node_ids() -- [(node_id())].
+
+start_server() ->
+ NodeId = node_id(),
+ Nodes = compatible_peer_servers(),
+ UId = ra:new_uid(ra_lib:to_binary(?ID_NAME)),
+ Timeout = application:get_env(kernel, net_ticktime, 60) + 5,
+ Conf = #{cluster_name => ?ID_NAME,
+ id => NodeId,
+ uid => UId,
+ friendly_name => ?ID_NAME,
+ initial_members => Nodes,
+ log_init_args => #{uid => UId},
+ tick_timeout => Timeout,
+ machine => {module, mqtt_machine, #{}}
+ },
+ ra:start_server(Conf).
+
+trigger_election() ->
+ ra:trigger_election(server_id()).
+
+join_peers(_NodeId, []) ->
+ ok;
+join_peers(NodeId, Nodes) ->
+ join_peers(NodeId, Nodes, 100).
+join_peers(_NodeId, [], _RetriesLeft) ->
+ ok;
+join_peers(_NodeId, _Nodes, RetriesLeft) when RetriesLeft =:= 0 ->
+ rabbit_log:error("MQTT: exhausted all attempts while trying to rejoin cluster peers");
+join_peers(NodeId, Nodes, RetriesLeft) ->
+ case ra:members(Nodes, ?START_TIMEOUT) of
+ {ok, Members, _} ->
+ case lists:member(NodeId, Members) of
+ true -> ok;
+ false -> ra:add_member(Members, NodeId)
+ end;
+ {timeout, _} ->
+ rabbit_log:debug("MQTT: timed out contacting cluster peers, %s retries left", [RetriesLeft]),
+ timer:sleep(?RETRY_INTERVAL),
+ join_peers(NodeId, Nodes, RetriesLeft - 1);
+ Err ->
+ Err
+ end.
+
+-spec leave(node()) -> 'ok' | 'timeout' | 'nodedown'.
+leave(Node) ->
+ NodeId = server_id(),
+ ToLeave = server_id(Node),
+ try
+ ra:leave_and_delete_server(NodeId, ToLeave)
+ catch
+ exit:{{nodedown, Node}, _} ->
+ nodedown
+ end.
+
+can_participate_in_clientid_tracking(Node) ->
+ case rpc:call(Node, mqtt_machine, module_info, []) of
+ {badrpc, _} -> false;
+ _ -> true
+ end.
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl
new file mode 100644
index 0000000000..192f8a7fee
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+-export([connection_info_local/1,
+ emit_connection_info_local/3,
+ emit_connection_info_all/4,
+ close_all_client_connections/1]).
+
+start(normal, []) ->
+ {ok, Listeners} = application:get_env(tcp_listeners),
+ {ok, SslListeners} = application:get_env(ssl_listeners),
+ ok = mqtt_node:start(),
+ Result = rabbit_mqtt_sup:start_link({Listeners, SslListeners}, []),
+ EMPid = case rabbit_event:start_link() of
+ {ok, Pid} -> Pid;
+ {error, {already_started, Pid}} -> Pid
+ end,
+ gen_event:add_handler(EMPid, rabbit_mqtt_internal_event_handler, []),
+ Result.
+
+stop(_) ->
+ rabbit_mqtt_sup:stop_listeners().
+
+-spec close_all_client_connections(string() | binary()) -> {'ok', non_neg_integer()}.
+close_all_client_connections(Reason) ->
+ Connections = rabbit_mqtt_collector:list(),
+ [rabbit_mqtt_reader:close_connection(Pid, Reason) || {_, Pid} <- Connections],
+ {ok, length(Connections)}.
+
+emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [spawn_link(Node, rabbit_mqtt, emit_connection_info_local,
+ [Items, Ref, AggregatorPid])
+ || Node <- Nodes],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_connection_info_local(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun({_, Pid}) ->
+ rabbit_mqtt_reader:info(Pid, Items)
+ end,
+ rabbit_mqtt_collector:list()).
+
+connection_info_local(Items) ->
+ Connections = rabbit_mqtt_collector:list(),
+ [rabbit_mqtt_reader:info(Pid, Items)
+ || {_, Pid} <- Connections].
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl
new file mode 100644
index 0000000000..341ee46850
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_collector.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_collector).
+
+-include("mqtt_machine.hrl").
+
+-export([register/2, register/3, unregister/2, list/0, leave/1]).
+
+%%----------------------------------------------------------------------------
+-spec register(term(), pid()) -> {ok, reference()} | {error, term()}.
+register(ClientId, Pid) ->
+ {ClusterName, _} = NodeId = mqtt_node:server_id(),
+ case ra_leaderboard:lookup_leader(ClusterName) of
+ undefined ->
+ case ra:members(NodeId) of
+ {ok, _, Leader} ->
+ register(Leader, ClientId, Pid);
+ _ = Error ->
+ Error
+ end;
+ Leader ->
+ register(Leader, ClientId, Pid)
+ end.
+
+-spec register(ra:server_id(), term(), pid()) ->
+ {ok, reference()} | {error, term()}.
+register(ServerId, ClientId, Pid) ->
+ Corr = make_ref(),
+ send_ra_command(ServerId, {register, ClientId, Pid}, Corr),
+ erlang:send_after(5000, self(), {ra_event, undefined, register_timeout}),
+ {ok, Corr}.
+
+unregister(ClientId, Pid) ->
+ {ClusterName, _} = mqtt_node:server_id(),
+ case ra_leaderboard:lookup_leader(ClusterName) of
+ undefined ->
+ ok;
+ Leader ->
+ send_ra_command(Leader, {unregister, ClientId, Pid}, no_correlation)
+ end.
+
+list() ->
+ {ClusterName, _} = mqtt_node:server_id(),
+ QF = fun (#machine_state{client_ids = Ids}) -> maps:to_list(Ids) end,
+ case ra_leaderboard:lookup_leader(ClusterName) of
+ undefined ->
+ NodeIds = mqtt_node:all_node_ids(),
+ case ra:leader_query(NodeIds, QF) of
+ {ok, {_, Ids}, _} -> Ids;
+ {timeout, _} ->
+ rabbit_log:debug("~s:list/0 leader query timed out",
+ [?MODULE]),
+ []
+ end;
+ Leader ->
+ case ra:leader_query(Leader, QF) of
+ {ok, {_, Ids}, _} -> Ids;
+ {error, _} ->
+ [];
+ {timeout, _} ->
+ rabbit_log:debug("~s:list/0 leader query timed out",
+ [?MODULE]),
+ []
+ end
+ end.
+
+leave(NodeBin) ->
+ Node = binary_to_atom(NodeBin, utf8),
+ ServerId = mqtt_node:server_id(),
+ run_ra_command(ServerId, {leave, Node}),
+ mqtt_node:leave(Node).
+
+%%----------------------------------------------------------------------------
+-spec run_ra_command(term(), term()) -> term() | {error, term()}.
+run_ra_command(ServerId, RaCommand) ->
+ case ra:process_command(ServerId, RaCommand) of
+ {ok, Result, _} -> Result;
+ _ = Error -> Error
+ end.
+
+-spec send_ra_command(term(), term(), term()) -> ok.
+send_ra_command(ServerId, RaCommand, Correlation) ->
+ ok = ra:pipeline_command(ServerId, RaCommand, Correlation, normal).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_info.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_info.erl
new file mode 100644
index 0000000000..4e73a19253
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_info.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_mqtt_connection_info).
+
+%% Module to add the MQTT client ID to authentication properties
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, _Pid, Infos) ->
+ case proplists:get_value(variable_map, Infos, undefined) of
+ VariableMap when is_map(VariableMap) ->
+ case maps:get(<<"client_id">>, VariableMap, []) of
+ ClientId when is_binary(ClientId)->
+ [{client_id, ClientId}];
+ [] ->
+ []
+ end;
+ _ ->
+ []
+ end.
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_sup.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_sup.erl
new file mode 100644
index 0000000000..0a150caa38
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_sup.erl
@@ -0,0 +1,43 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_connection_sup).
+
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/4, start_keepalive_link/0]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link(Ref, _Sock, _Transport, []) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, KeepaliveSup} = supervisor2:start_child(
+ SupPid,
+ {rabbit_mqtt_keepalive_sup,
+ {rabbit_mqtt_connection_sup, start_keepalive_link, []},
+ intrinsic, infinity, supervisor, [rabbit_keepalive_sup]}),
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {rabbit_mqtt_reader,
+ {rabbit_mqtt_reader, start_link, [KeepaliveSup, Ref]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_mqtt_reader]}),
+ {ok, SupPid, ReaderPid}.
+
+start_keepalive_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
+
+
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_frame.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_frame.erl
new file mode 100644
index 0000000000..950c5bd6c4
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_frame.erl
@@ -0,0 +1,224 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_frame).
+
+-export([parse/2, initial_state/0]).
+-export([serialise/1]).
+
+-include("rabbit_mqtt_frame.hrl").
+
+-define(RESERVED, 0).
+-define(MAX_LEN, 16#fffffff).
+-define(HIGHBIT, 2#10000000).
+-define(LOWBITS, 2#01111111).
+
+initial_state() -> none.
+
+parse(<<>>, none) ->
+ {more, fun(Bin) -> parse(Bin, none) end};
+parse(<<MessageType:4, Dup:1, QoS:2, Retain:1, Rest/binary>>, none) ->
+ parse_remaining_len(Rest, #mqtt_frame_fixed{ type = MessageType,
+ dup = bool(Dup),
+ qos = QoS,
+ retain = bool(Retain) });
+parse(Bin, Cont) -> Cont(Bin).
+
+parse_remaining_len(<<>>, Fixed) ->
+ {more, fun(Bin) -> parse_remaining_len(Bin, Fixed) end};
+parse_remaining_len(Rest, Fixed) ->
+ parse_remaining_len(Rest, Fixed, 1, 0).
+
+parse_remaining_len(_Bin, _Fixed, _Multiplier, Length)
+ when Length > ?MAX_LEN ->
+ {error, invalid_mqtt_frame_len};
+parse_remaining_len(<<>>, Fixed, Multiplier, Length) ->
+ {more, fun(Bin) -> parse_remaining_len(Bin, Fixed, Multiplier, Length) end};
+parse_remaining_len(<<1:1, Len:7, Rest/binary>>, Fixed, Multiplier, Value) ->
+ parse_remaining_len(Rest, Fixed, Multiplier * ?HIGHBIT, Value + Len * Multiplier);
+parse_remaining_len(<<0:1, Len:7, Rest/binary>>, Fixed, Multiplier, Value) ->
+ parse_frame(Rest, Fixed, Value + Len * Multiplier).
+
+parse_frame(Bin, #mqtt_frame_fixed{ type = Type,
+ qos = Qos } = Fixed, Length) ->
+ case {Type, Bin} of
+ {?CONNECT, <<FrameBin:Length/binary, Rest/binary>>} ->
+ {ProtoName, Rest1} = parse_utf(FrameBin),
+ <<ProtoVersion : 8, Rest2/binary>> = Rest1,
+ <<UsernameFlag : 1,
+ PasswordFlag : 1,
+ WillRetain : 1,
+ WillQos : 2,
+ WillFlag : 1,
+ CleanSession : 1,
+ _Reserved : 1,
+ KeepAlive : 16/big,
+ Rest3/binary>> = Rest2,
+ {ClientId, Rest4} = parse_utf(Rest3),
+ {WillTopic, Rest5} = parse_utf(Rest4, WillFlag),
+ {WillMsg, Rest6} = parse_msg(Rest5, WillFlag),
+ {UserName, Rest7} = parse_utf(Rest6, UsernameFlag),
+ {PasssWord, <<>>} = parse_utf(Rest7, PasswordFlag),
+ case protocol_name_approved(ProtoVersion, ProtoName) of
+ true ->
+ wrap(Fixed,
+ #mqtt_frame_connect{
+ proto_ver = ProtoVersion,
+ will_retain = bool(WillRetain),
+ will_qos = WillQos,
+ will_flag = bool(WillFlag),
+ clean_sess = bool(CleanSession),
+ keep_alive = KeepAlive,
+ client_id = ClientId,
+ will_topic = WillTopic,
+ will_msg = WillMsg,
+ username = UserName,
+ password = PasssWord}, Rest);
+ false ->
+ {error, protocol_header_corrupt}
+ end;
+ {?PUBLISH, <<FrameBin:Length/binary, Rest/binary>>} ->
+ {TopicName, Rest1} = parse_utf(FrameBin),
+ {MessageId, Payload} = case Qos of
+ 0 -> {undefined, Rest1};
+ _ -> <<M:16/big, R/binary>> = Rest1,
+ {M, R}
+ end,
+ wrap(Fixed, #mqtt_frame_publish { topic_name = TopicName,
+ message_id = MessageId },
+ Payload, Rest);
+ {?PUBACK, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<MessageId:16/big>> = FrameBin,
+ wrap(Fixed, #mqtt_frame_publish { message_id = MessageId }, Rest);
+ {Subs, <<FrameBin:Length/binary, Rest/binary>>}
+ when Subs =:= ?SUBSCRIBE orelse Subs =:= ?UNSUBSCRIBE ->
+ 1 = Qos,
+ <<MessageId:16/big, Rest1/binary>> = FrameBin,
+ Topics = parse_topics(Subs, Rest1, []),
+ wrap(Fixed, #mqtt_frame_subscribe { message_id = MessageId,
+ topic_table = Topics }, Rest);
+ {Minimal, Rest}
+ when Minimal =:= ?DISCONNECT orelse Minimal =:= ?PINGREQ ->
+ Length = 0,
+ wrap(Fixed, Rest);
+ {_, TooShortBin} ->
+ {more, fun(BinMore) ->
+ parse_frame(<<TooShortBin/binary, BinMore/binary>>,
+ Fixed, Length)
+ end}
+ end.
+
+parse_topics(_, <<>>, Topics) ->
+ Topics;
+parse_topics(?SUBSCRIBE = Sub, Bin, Topics) ->
+ {Name, <<_:6, QoS:2, Rest/binary>>} = parse_utf(Bin),
+ parse_topics(Sub, Rest, [#mqtt_topic { name = Name, qos = QoS } | Topics]);
+parse_topics(?UNSUBSCRIBE = Sub, Bin, Topics) ->
+ {Name, <<Rest/binary>>} = parse_utf(Bin),
+ parse_topics(Sub, Rest, [#mqtt_topic { name = Name } | Topics]).
+
+wrap(Fixed, Variable, Payload, Rest) ->
+ {ok, #mqtt_frame { variable = Variable, fixed = Fixed, payload = Payload }, Rest}.
+wrap(Fixed, Variable, Rest) ->
+ {ok, #mqtt_frame { variable = Variable, fixed = Fixed }, Rest}.
+wrap(Fixed, Rest) ->
+ {ok, #mqtt_frame { fixed = Fixed }, Rest}.
+
+parse_utf(Bin, 0) ->
+ {undefined, Bin};
+parse_utf(Bin, _) ->
+ parse_utf(Bin).
+
+parse_utf(<<Len:16/big, Str:Len/binary, Rest/binary>>) ->
+ {binary_to_list(Str), Rest}.
+
+parse_msg(Bin, 0) ->
+ {undefined, Bin};
+parse_msg(<<Len:16/big, Msg:Len/binary, Rest/binary>>, _) ->
+ {Msg, Rest}.
+
+bool(0) -> false;
+bool(1) -> true.
+
+%% serialisation
+
+serialise(#mqtt_frame{ fixed = Fixed,
+ variable = Variable,
+ payload = Payload }) ->
+ serialise_variable(Fixed, Variable, serialise_payload(Payload)).
+
+serialise_payload(undefined) -> <<>>;
+serialise_payload(B) when is_binary(B) -> B.
+
+serialise_variable(#mqtt_frame_fixed { type = ?CONNACK } = Fixed,
+ #mqtt_frame_connack { session_present = SessionPresent,
+ return_code = ReturnCode },
+ <<>> = PayloadBin) ->
+ VariableBin = <<?RESERVED:7, (opt(SessionPresent)):1, ReturnCode:8>>,
+ serialise_fixed(Fixed, VariableBin, PayloadBin);
+
+serialise_variable(#mqtt_frame_fixed { type = SubAck } = Fixed,
+ #mqtt_frame_suback { message_id = MessageId,
+ qos_table = Qos },
+ <<>> = _PayloadBin)
+ when SubAck =:= ?SUBACK orelse SubAck =:= ?UNSUBACK ->
+ VariableBin = <<MessageId:16/big>>,
+ QosBin = << <<?RESERVED:6, Q:2>> || Q <- Qos >>,
+ serialise_fixed(Fixed, VariableBin, QosBin);
+
+serialise_variable(#mqtt_frame_fixed { type = ?PUBLISH,
+ qos = Qos } = Fixed,
+ #mqtt_frame_publish { topic_name = TopicName,
+ message_id = MessageId },
+ PayloadBin) ->
+ TopicBin = serialise_utf(TopicName),
+ MessageIdBin = case Qos of
+ 0 -> <<>>;
+ 1 -> <<MessageId:16/big>>
+ end,
+ serialise_fixed(Fixed, <<TopicBin/binary, MessageIdBin/binary>>, PayloadBin);
+
+serialise_variable(#mqtt_frame_fixed { type = ?PUBACK } = Fixed,
+ #mqtt_frame_publish { message_id = MessageId },
+ PayloadBin) ->
+ MessageIdBin = <<MessageId:16/big>>,
+ serialise_fixed(Fixed, MessageIdBin, PayloadBin);
+
+serialise_variable(#mqtt_frame_fixed {} = Fixed,
+ undefined,
+ <<>> = _PayloadBin) ->
+ serialise_fixed(Fixed, <<>>, <<>>).
+
+serialise_fixed(#mqtt_frame_fixed{ type = Type,
+ dup = Dup,
+ qos = Qos,
+ retain = Retain }, VariableBin, PayloadBin)
+ when is_integer(Type) andalso ?CONNECT =< Type andalso Type =< ?DISCONNECT ->
+ Len = size(VariableBin) + size(PayloadBin),
+ true = (Len =< ?MAX_LEN),
+ LenBin = serialise_len(Len),
+ <<Type:4, (opt(Dup)):1, (opt(Qos)):2, (opt(Retain)):1,
+ LenBin/binary, VariableBin/binary, PayloadBin/binary>>.
+
+serialise_utf(String) ->
+ StringBin = unicode:characters_to_binary(String),
+ Len = size(StringBin),
+ true = (Len =< 16#ffff),
+ <<Len:16/big, StringBin/binary>>.
+
+serialise_len(N) when N =< ?LOWBITS ->
+ <<0:1, N:7>>;
+serialise_len(N) ->
+ <<1:1, (N rem ?HIGHBIT):7, (serialise_len(N div ?HIGHBIT))/binary>>.
+
+opt(undefined) -> ?RESERVED;
+opt(false) -> 0;
+opt(true) -> 1;
+opt(X) when is_integer(X) -> X.
+
+protocol_name_approved(Ver, Name) ->
+ lists:member({Ver, Name}, ?PROTOCOL_NAMES).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl
new file mode 100644
index 0000000000..2a371b4142
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_internal_event_handler.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_internal_event_handler).
+
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+
+init([]) ->
+ {ok, []}.
+
+handle_event({event, vhost_created, Info, _, _}, State) ->
+ Name = pget(name, Info),
+ rabbit_mqtt_retainer_sup:child_for_vhost(Name),
+ {ok, State};
+handle_event({event, vhost_deleted, Info, _, _}, State) ->
+ Name = pget(name, Info),
+ rabbit_mqtt_retainer_sup:delete_child(Name),
+ {ok, State};
+handle_event({event, maintenance_connections_closed, _Info, _, _}, State) ->
+ %% we should close our connections
+ {ok, NConnections} = rabbit_mqtt:close_all_client_connections("node is being put into maintenance mode"),
+ rabbit_log:alert("Closed ~b local MQTT client connections", [NConnections]),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl
new file mode 100644
index 0000000000..c3a25096e6
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl
@@ -0,0 +1,1054 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_processor).
+
+-export([info/2, initial_state/2, initial_state/5,
+ process_frame/2, amqp_pub/2, amqp_callback/2, send_will/1,
+ close_connection/1, handle_pre_hibernate/0,
+ handle_ra_event/2]).
+
+%% for testing purposes
+-export([get_vhost_username/1, get_vhost/3, get_vhost_from_user_mapping/2,
+ add_client_id_to_adapter_info/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_mqtt_frame.hrl").
+-include("rabbit_mqtt.hrl").
+
+-define(APP, rabbitmq_mqtt).
+-define(FRAME_TYPE(Frame, Type),
+ Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }}).
+-define(MAX_TOPIC_PERMISSION_CACHE_SIZE, 12).
+
+initial_state(Socket, SSLLoginName) ->
+ RealSocket = rabbit_net:unwrap_socket(Socket),
+ {ok, {PeerAddr, _PeerPort}} = rabbit_net:peername(RealSocket),
+ initial_state(RealSocket, SSLLoginName,
+ adapter_info(Socket, 'MQTT'),
+ fun serialise_and_send_to_client/2, PeerAddr).
+
+initial_state(Socket, SSLLoginName,
+ AdapterInfo0 = #amqp_adapter_info{additional_info = Extra},
+ SendFun, PeerAddr) ->
+ {ok, {mqtt2amqp_fun, M2A}, {amqp2mqtt_fun, A2M}} =
+ rabbit_mqtt_util:get_topic_translation_funs(),
+ %% MQTT connections use exactly one channel. The frame max is not
+ %% applicable and there is no way to know what client is used.
+ AdapterInfo = AdapterInfo0#amqp_adapter_info{additional_info = [
+ {channels, 1},
+ {channel_max, 1},
+ {frame_max, 0},
+ {client_properties,
+ [{<<"product">>, longstr, <<"MQTT client">>}]} | Extra]},
+ #proc_state{ unacked_pubs = gb_trees:empty(),
+ awaiting_ack = gb_trees:empty(),
+ message_id = 1,
+ subscriptions = #{},
+ consumer_tags = {undefined, undefined},
+ channels = {undefined, undefined},
+ exchange = rabbit_mqtt_util:env(exchange),
+ socket = Socket,
+ adapter_info = AdapterInfo,
+ ssl_login_name = SSLLoginName,
+ send_fun = SendFun,
+ peer_addr = PeerAddr,
+ mqtt2amqp_fun = M2A,
+ amqp2mqtt_fun = A2M}.
+
+process_frame(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
+ PState = #proc_state{ connection = undefined } )
+ when Type =/= ?CONNECT ->
+ {error, connect_expected, PState};
+process_frame(Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
+ PState) ->
+ case process_request(Type, Frame, PState) of
+ {ok, PState1} -> {ok, PState1, PState1#proc_state.connection};
+ Ret -> Ret
+ end.
+
+add_client_id_to_adapter_info(ClientId, #amqp_adapter_info{additional_info = AdditionalInfo0} = AdapterInfo) ->
+ AdditionalInfo1 = [{variable_map, #{<<"client_id">> => ClientId}}
+ | AdditionalInfo0],
+ ClientProperties = proplists:get_value(client_properties, AdditionalInfo1, [])
+ ++ [{client_id, longstr, ClientId}],
+ AdditionalInfo2 = case lists:keysearch(client_properties, 1, AdditionalInfo1) of
+ {value, _} ->
+ lists:keyreplace(client_properties,
+ 1,
+ AdditionalInfo1,
+ {client_properties, ClientProperties});
+ false ->
+ [{client_properties, ClientProperties} | AdditionalInfo1]
+ end,
+ AdapterInfo#amqp_adapter_info{additional_info = AdditionalInfo2}.
+
+process_request(?CONNECT,
+ #mqtt_frame{ variable = #mqtt_frame_connect{
+ username = Username,
+ password = Password,
+ proto_ver = ProtoVersion,
+ clean_sess = CleanSess,
+ client_id = ClientId0,
+ keep_alive = Keepalive} = Var},
+ PState0 = #proc_state{ ssl_login_name = SSLLoginName,
+ send_fun = SendFun,
+ adapter_info = AdapterInfo,
+ peer_addr = Addr}) ->
+ ClientId = case ClientId0 of
+ [] -> rabbit_mqtt_util:gen_client_id();
+ [_|_] -> ClientId0
+ end,
+ rabbit_log_connection:debug("Received a CONNECT, client ID: ~p (expanded to ~p), username: ~p, "
+ "clean session: ~p, protocol version: ~p, keepalive: ~p",
+ [ClientId0, ClientId, Username, CleanSess, ProtoVersion, Keepalive]),
+ AdapterInfo1 = add_client_id_to_adapter_info(rabbit_data_coercion:to_binary(ClientId), AdapterInfo),
+ PState1 = PState0#proc_state{adapter_info = AdapterInfo1},
+ Ip = list_to_binary(inet:ntoa(Addr)),
+ {Return, PState5} =
+ case {lists:member(ProtoVersion, proplists:get_keys(?PROTOCOL_NAMES)),
+ ClientId0 =:= [] andalso CleanSess =:= false} of
+ {false, _} ->
+ {?CONNACK_PROTO_VER, PState1};
+ {_, true} ->
+ {?CONNACK_INVALID_ID, PState1};
+ _ ->
+ case creds(Username, Password, SSLLoginName) of
+ nocreds ->
+ rabbit_core_metrics:auth_attempt_failed(Ip, <<>>, mqtt),
+ rabbit_log_connection:error("MQTT login failed: no credentials provided~n"),
+ {?CONNACK_CREDENTIALS, PState1};
+ {invalid_creds, {undefined, Pass}} when is_list(Pass) ->
+ rabbit_core_metrics:auth_attempt_failed(Ip, <<>>, mqtt),
+ rabbit_log_connection:error("MQTT login failed: no username is provided"),
+ {?CONNACK_CREDENTIALS, PState1};
+ {invalid_creds, {User, undefined}} when is_list(User) ->
+ rabbit_core_metrics:auth_attempt_failed(Ip, User, mqtt),
+ rabbit_log_connection:error("MQTT login failed for user '~p': no password provided", [User]),
+ {?CONNACK_CREDENTIALS, PState1};
+ {UserBin, PassBin} ->
+ case process_login(UserBin, PassBin, ProtoVersion, PState1) of
+ connack_dup_auth ->
+ {SessionPresent0, PState2} = maybe_clean_sess(PState1),
+ {{?CONNACK_ACCEPT, SessionPresent0}, PState2};
+ {?CONNACK_ACCEPT, Conn, VHost, AState} ->
+ case rabbit_mqtt_collector:register(ClientId, self()) of
+ {ok, Corr} ->
+ RetainerPid = rabbit_mqtt_retainer_sup:child_for_vhost(VHost),
+ link(Conn),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ link(Ch),
+ amqp_channel:enable_delivery_flow_control(Ch),
+ Prefetch = rabbit_mqtt_util:env(prefetch),
+ #'basic.qos_ok'{} = amqp_channel:call(Ch,
+ #'basic.qos'{prefetch_count = Prefetch}),
+ rabbit_mqtt_reader:start_keepalive(self(), Keepalive),
+ PState3 = PState1#proc_state{
+ will_msg = make_will_msg(Var),
+ clean_sess = CleanSess,
+ channels = {Ch, undefined},
+ connection = Conn,
+ client_id = ClientId,
+ retainer_pid = RetainerPid,
+ auth_state = AState,
+ register_state = {pending, Corr}},
+ {SessionPresent1, PState4} = maybe_clean_sess(PState3),
+ {{?CONNACK_ACCEPT, SessionPresent1}, PState4};
+ %% e.g. this node was removed from the MQTT cluster members
+ {error, _} = Err ->
+ rabbit_log_connection:error("MQTT cannot accept a connection: "
+ "client ID tracker is unavailable: ~p", [Err]),
+ %% ignore all exceptions, we are shutting down
+ catch amqp_connection:close(Conn),
+ {?CONNACK_SERVER, PState1};
+ {timeout, _} ->
+ rabbit_log_connection:error("MQTT cannot accept a connection: "
+ "client ID registration timed out"),
+ %% ignore all exceptions, we are shutting down
+ catch amqp_connection:close(Conn),
+ {?CONNACK_SERVER, PState1}
+ end;
+ ConnAck -> {ConnAck, PState1}
+ end
+ end
+ end,
+ {ReturnCode, SessionPresent} = case Return of
+ {?CONNACK_ACCEPT, Bool} -> {?CONNACK_ACCEPT, Bool};
+ Other -> {Other, false}
+ end,
+ SendFun(#mqtt_frame{fixed = #mqtt_frame_fixed{type = ?CONNACK},
+ variable = #mqtt_frame_connack{
+ session_present = SessionPresent,
+ return_code = ReturnCode}},
+ PState5),
+ case ReturnCode of
+ ?CONNACK_ACCEPT -> {ok, PState5};
+ ?CONNACK_CREDENTIALS -> {error, unauthenticated, PState5};
+ ?CONNACK_AUTH -> {error, unauthorized, PState5};
+ ?CONNACK_SERVER -> {error, unavailable, PState5};
+ ?CONNACK_INVALID_ID -> {error, invalid_client_id, PState5};
+ ?CONNACK_PROTO_VER -> {error, unsupported_protocol_version, PState5}
+ end;
+
+process_request(?PUBACK,
+ #mqtt_frame{
+ variable = #mqtt_frame_publish{ message_id = MessageId }},
+ #proc_state{ channels = {Channel, _},
+ awaiting_ack = Awaiting } = PState) ->
+ %% tag can be missing because of bogus clients and QoS downgrades
+ case gb_trees:is_defined(MessageId, Awaiting) of
+ false ->
+ {ok, PState};
+ true ->
+ Tag = gb_trees:get(MessageId, Awaiting),
+ amqp_channel:cast(Channel, #'basic.ack'{ delivery_tag = Tag }),
+ {ok, PState#proc_state{ awaiting_ack = gb_trees:delete(MessageId, Awaiting) }}
+ end;
+
+process_request(?PUBLISH,
+ Frame = #mqtt_frame{
+ fixed = Fixed = #mqtt_frame_fixed{ qos = ?QOS_2 }},
+ PState) ->
+ % Downgrade QOS_2 to QOS_1
+ process_request(?PUBLISH,
+ Frame#mqtt_frame{
+ fixed = Fixed#mqtt_frame_fixed{ qos = ?QOS_1 }},
+ PState);
+process_request(?PUBLISH,
+ #mqtt_frame{
+ fixed = #mqtt_frame_fixed{ qos = Qos,
+ retain = Retain,
+ dup = Dup },
+ variable = #mqtt_frame_publish{ topic_name = Topic,
+ message_id = MessageId },
+ payload = Payload },
+ PState = #proc_state{retainer_pid = RPid,
+ amqp2mqtt_fun = Amqp2MqttFun}) ->
+ check_publish(Topic, fun() ->
+ Msg = #mqtt_msg{retain = Retain,
+ qos = Qos,
+ topic = Topic,
+ dup = Dup,
+ message_id = MessageId,
+ payload = Payload},
+ Result = amqp_pub(Msg, PState),
+ case Retain of
+ false -> ok;
+ true -> hand_off_to_retainer(RPid, Amqp2MqttFun, Topic, Msg)
+ end,
+ {ok, Result}
+ end, PState);
+
+process_request(?SUBSCRIBE,
+ #mqtt_frame{
+ variable = #mqtt_frame_subscribe{
+ message_id = SubscribeMsgId,
+ topic_table = Topics},
+ payload = undefined},
+ #proc_state{channels = {Channel, _},
+ exchange = Exchange,
+ retainer_pid = RPid,
+ send_fun = SendFun,
+ message_id = StateMsgId,
+ mqtt2amqp_fun = Mqtt2AmqpFun} = PState0) ->
+ rabbit_log_connection:debug("Received a SUBSCRIBE for topic(s) ~p", [Topics]),
+ check_subscribe(Topics, fun() ->
+ {QosResponse, PState1} =
+ lists:foldl(fun (#mqtt_topic{name = TopicName,
+ qos = Qos}, {QosList, PState}) ->
+ SupportedQos = supported_subs_qos(Qos),
+ {Queue, #proc_state{subscriptions = Subs} = PState1} =
+ ensure_queue(SupportedQos, PState),
+ RoutingKey = Mqtt2AmqpFun(TopicName),
+ Binding = #'queue.bind'{
+ queue = Queue,
+ exchange = Exchange,
+ routing_key = RoutingKey},
+ #'queue.bind_ok'{} = amqp_channel:call(Channel, Binding),
+ SupportedQosList = case maps:find(TopicName, Subs) of
+ {ok, L} -> [SupportedQos|L];
+ error -> [SupportedQos]
+ end,
+ {[SupportedQos | QosList],
+ PState1 #proc_state{
+ subscriptions =
+ maps:put(TopicName, SupportedQosList, Subs)}}
+ end, {[], PState0}, Topics),
+ SendFun(#mqtt_frame{fixed = #mqtt_frame_fixed{type = ?SUBACK},
+ variable = #mqtt_frame_suback{
+ message_id = SubscribeMsgId,
+ qos_table = QosResponse}}, PState1),
+ %% we may need to send up to length(Topics) messages.
+ %% if QoS is > 0 then we need to generate a message id,
+ %% and increment the counter.
+ StartMsgId = safe_max_id(SubscribeMsgId, StateMsgId),
+ N = lists:foldl(fun (Topic, Acc) ->
+ case maybe_send_retained_message(RPid, Topic, Acc, PState1) of
+ {true, X} -> Acc + X;
+ false -> Acc
+ end
+ end, StartMsgId, Topics),
+ {ok, PState1#proc_state{message_id = N}}
+ end, PState0);
+
+process_request(?UNSUBSCRIBE,
+ #mqtt_frame{
+ variable = #mqtt_frame_subscribe{ message_id = MessageId,
+ topic_table = Topics },
+ payload = undefined }, #proc_state{ channels = {Channel, _},
+ exchange = Exchange,
+ client_id = ClientId,
+ subscriptions = Subs0,
+ send_fun = SendFun,
+ mqtt2amqp_fun = Mqtt2AmqpFun } = PState) ->
+ rabbit_log_connection:debug("Received an UNSUBSCRIBE for topic(s) ~p", [Topics]),
+ Queues = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ Subs1 =
+ lists:foldl(
+ fun (#mqtt_topic{ name = TopicName }, Subs) ->
+ QosSubs = case maps:find(TopicName, Subs) of
+ {ok, Val} when is_list(Val) -> lists:usort(Val);
+ error -> []
+ end,
+ RoutingKey = Mqtt2AmqpFun(TopicName),
+ lists:foreach(
+ fun (QosSub) ->
+ Queue = element(QosSub + 1, Queues),
+ Binding = #'queue.unbind'{
+ queue = Queue,
+ exchange = Exchange,
+ routing_key = RoutingKey},
+ #'queue.unbind_ok'{} = amqp_channel:call(Channel, Binding)
+ end, QosSubs),
+ maps:remove(TopicName, Subs)
+ end, Subs0, Topics),
+ SendFun(#mqtt_frame{ fixed = #mqtt_frame_fixed { type = ?UNSUBACK },
+ variable = #mqtt_frame_suback{ message_id = MessageId }},
+ PState),
+ {ok, PState #proc_state{ subscriptions = Subs1 }};
+
+process_request(?PINGREQ, #mqtt_frame{}, #proc_state{ send_fun = SendFun } = PState) ->
+ rabbit_log_connection:debug("Received a PINGREQ"),
+ SendFun(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?PINGRESP }},
+ PState),
+ rabbit_log_connection:debug("Sent a PINGRESP"),
+ {ok, PState};
+
+process_request(?DISCONNECT, #mqtt_frame{}, PState) ->
+ rabbit_log_connection:debug("Received a DISCONNECT"),
+ {stop, PState}.
+
+hand_off_to_retainer(RetainerPid, Amqp2MqttFun, Topic0, #mqtt_msg{payload = <<"">>}) ->
+ Topic1 = Amqp2MqttFun(Topic0),
+ rabbit_mqtt_retainer:clear(RetainerPid, Topic1),
+ ok;
+hand_off_to_retainer(RetainerPid, Amqp2MqttFun, Topic0, Msg) ->
+ Topic1 = Amqp2MqttFun(Topic0),
+ rabbit_mqtt_retainer:retain(RetainerPid, Topic1, Msg),
+ ok.
+
+maybe_send_retained_message(RPid, #mqtt_topic{name = Topic0, qos = SubscribeQos}, MsgId,
+ #proc_state{ send_fun = SendFun,
+ amqp2mqtt_fun = Amqp2MqttFun } = PState) ->
+ Topic1 = Amqp2MqttFun(Topic0),
+ case rabbit_mqtt_retainer:fetch(RPid, Topic1) of
+ undefined -> false;
+ Msg ->
+ %% calculate effective QoS as the lower value of SUBSCRIBE frame QoS
+ %% and retained message QoS. The spec isn't super clear on this, we
+ %% do what Mosquitto does, per user feedback.
+ Qos = erlang:min(SubscribeQos, Msg#mqtt_msg.qos),
+ Id = case Qos of
+ ?QOS_0 -> undefined;
+ ?QOS_1 -> MsgId
+ end,
+ SendFun(#mqtt_frame{fixed = #mqtt_frame_fixed{
+ type = ?PUBLISH,
+ qos = Qos,
+ dup = false,
+ retain = Msg#mqtt_msg.retain
+ }, variable = #mqtt_frame_publish{
+ message_id = Id,
+ topic_name = Topic1
+ },
+ payload = Msg#mqtt_msg.payload}, PState),
+ case Qos of
+ ?QOS_0 -> false;
+ ?QOS_1 -> {true, 1}
+ end
+ end.
+
+amqp_callback({#'basic.deliver'{ consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ routing_key = RoutingKey },
+ #amqp_msg{ props = #'P_basic'{ headers = Headers },
+ payload = Payload },
+ DeliveryCtx} = Delivery,
+ #proc_state{ channels = {Channel, _},
+ awaiting_ack = Awaiting,
+ message_id = MsgId,
+ send_fun = SendFun,
+ amqp2mqtt_fun = Amqp2MqttFun } = PState) ->
+ amqp_channel:notify_received(DeliveryCtx),
+ case {delivery_dup(Delivery), delivery_qos(ConsumerTag, Headers, PState)} of
+ {true, {?QOS_0, ?QOS_1}} ->
+ amqp_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = DeliveryTag }),
+ {ok, PState};
+ {true, {?QOS_0, ?QOS_0}} ->
+ {ok, PState};
+ {Dup, {DeliveryQos, _SubQos} = Qos} ->
+ TopicName = Amqp2MqttFun(RoutingKey),
+ SendFun(
+ #mqtt_frame{ fixed = #mqtt_frame_fixed{
+ type = ?PUBLISH,
+ qos = DeliveryQos,
+ dup = Dup },
+ variable = #mqtt_frame_publish{
+ message_id =
+ case DeliveryQos of
+ ?QOS_0 -> undefined;
+ ?QOS_1 -> MsgId
+ end,
+ topic_name = TopicName },
+ payload = Payload}, PState),
+ case Qos of
+ {?QOS_0, ?QOS_0} ->
+ {ok, PState};
+ {?QOS_1, ?QOS_1} ->
+ Awaiting1 = gb_trees:insert(MsgId, DeliveryTag, Awaiting),
+ PState1 = PState#proc_state{ awaiting_ack = Awaiting1 },
+ PState2 = next_msg_id(PState1),
+ {ok, PState2};
+ {?QOS_0, ?QOS_1} ->
+ amqp_channel:cast(
+ Channel, #'basic.ack'{ delivery_tag = DeliveryTag }),
+ {ok, PState}
+ end
+ end;
+
+amqp_callback(#'basic.ack'{ multiple = true, delivery_tag = Tag } = Ack,
+ PState = #proc_state{ unacked_pubs = UnackedPubs,
+ send_fun = SendFun }) ->
+ case gb_trees:size(UnackedPubs) > 0 andalso
+ gb_trees:take_smallest(UnackedPubs) of
+ {TagSmall, MsgId, UnackedPubs1} when TagSmall =< Tag ->
+ SendFun(
+ #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?PUBACK },
+ variable = #mqtt_frame_publish{ message_id = MsgId }},
+ PState),
+ amqp_callback(Ack, PState #proc_state{ unacked_pubs = UnackedPubs1 });
+ _ ->
+ {ok, PState}
+ end;
+
+amqp_callback(#'basic.ack'{ multiple = false, delivery_tag = Tag },
+ PState = #proc_state{ unacked_pubs = UnackedPubs,
+ send_fun = SendFun }) ->
+ SendFun(
+ #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = ?PUBACK },
+ variable = #mqtt_frame_publish{
+ message_id = gb_trees:get(
+ Tag, UnackedPubs) }}, PState),
+ {ok, PState #proc_state{ unacked_pubs = gb_trees:delete(Tag, UnackedPubs) }}.
+
+delivery_dup({#'basic.deliver'{ redelivered = Redelivered },
+ #amqp_msg{ props = #'P_basic'{ headers = Headers }},
+ _DeliveryCtx}) ->
+ case rabbit_mqtt_util:table_lookup(Headers, <<"x-mqtt-dup">>) of
+ undefined -> Redelivered;
+ {bool, Dup} -> Redelivered orelse Dup
+ end.
+
+ensure_valid_mqtt_message_id(Id) when Id >= 16#ffff ->
+ 1;
+ensure_valid_mqtt_message_id(Id) ->
+ Id.
+
+safe_max_id(Id0, Id1) ->
+ ensure_valid_mqtt_message_id(erlang:max(Id0, Id1)).
+
+next_msg_id(PState = #proc_state{ message_id = MsgId0 }) ->
+ MsgId1 = ensure_valid_mqtt_message_id(MsgId0 + 1),
+ PState#proc_state{ message_id = MsgId1 }.
+
+%% decide at which qos level to deliver based on subscription
+%% and the message publish qos level. non-MQTT publishes are
+%% assumed to be qos 1, regardless of delivery_mode.
+delivery_qos(Tag, _Headers, #proc_state{ consumer_tags = {Tag, _} }) ->
+ {?QOS_0, ?QOS_0};
+delivery_qos(Tag, Headers, #proc_state{ consumer_tags = {_, Tag} }) ->
+ case rabbit_mqtt_util:table_lookup(Headers, <<"x-mqtt-publish-qos">>) of
+ {byte, Qos} -> {lists:min([Qos, ?QOS_1]), ?QOS_1};
+ undefined -> {?QOS_1, ?QOS_1}
+ end.
+
+maybe_clean_sess(PState = #proc_state { clean_sess = false,
+ connection = Conn,
+ client_id = ClientId }) ->
+ SessionPresent = session_present(Conn, ClientId),
+ {_Queue, PState1} = ensure_queue(?QOS_1, PState),
+ {SessionPresent, PState1};
+maybe_clean_sess(PState = #proc_state { clean_sess = true,
+ connection = Conn,
+ client_id = ClientId }) ->
+ {_, Queue} = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ {ok, Channel} = amqp_connection:open_channel(Conn),
+ ok = try amqp_channel:call(Channel, #'queue.delete'{ queue = Queue }) of
+ #'queue.delete_ok'{} -> ok
+ catch
+ exit:_Error -> ok
+ after
+ amqp_channel:close(Channel)
+ end,
+ {false, PState}.
+
+session_present(Conn, ClientId) ->
+ {_, QueueQ1} = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ Declare = #'queue.declare'{queue = QueueQ1,
+ passive = true},
+ {ok, Channel} = amqp_connection:open_channel(Conn),
+ try
+ amqp_channel:call(Channel, Declare),
+ amqp_channel:close(Channel),
+ true
+ catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Text}}, _} ->
+ false
+ end.
+
+make_will_msg(#mqtt_frame_connect{ will_flag = false }) ->
+ undefined;
+make_will_msg(#mqtt_frame_connect{ will_retain = Retain,
+ will_qos = Qos,
+ will_topic = Topic,
+ will_msg = Msg }) ->
+ #mqtt_msg{ retain = Retain,
+ qos = Qos,
+ topic = Topic,
+ dup = false,
+ payload = Msg }.
+
+process_login(_UserBin, _PassBin, _ProtoVersion,
+ #proc_state{channels = {Channel, _},
+ peer_addr = Addr,
+ auth_state = #auth_state{username = Username,
+ vhost = VHost}}) when is_pid(Channel) ->
+ UsernameStr = rabbit_data_coercion:to_list(Username),
+ VHostStr = rabbit_data_coercion:to_list(VHost),
+ rabbit_core_metrics:auth_attempt_failed(list_to_binary(inet:ntoa(Addr)), Username, mqtt),
+ rabbit_log_connection:warning("MQTT detected duplicate connect/login attempt for user ~p, vhost ~p",
+ [UsernameStr, VHostStr]),
+ connack_dup_auth;
+process_login(UserBin, PassBin, ProtoVersion,
+ #proc_state{channels = {undefined, undefined},
+ socket = Sock,
+ adapter_info = AdapterInfo,
+ ssl_login_name = SslLoginName,
+ peer_addr = Addr}) ->
+ {ok, {_, _, _, ToPort}} = rabbit_net:socket_ends(Sock, inbound),
+ {VHostPickedUsing, {VHost, UsernameBin}} = get_vhost(UserBin, SslLoginName, ToPort),
+ rabbit_log_connection:info(
+ "MQTT vhost picked using ~s~n",
+ [human_readable_vhost_lookup_strategy(VHostPickedUsing)]),
+ RemoteAddress = list_to_binary(inet:ntoa(Addr)),
+ case rabbit_vhost:exists(VHost) of
+ true ->
+ case amqp_connection:start(#amqp_params_direct{
+ username = UsernameBin,
+ password = PassBin,
+ virtual_host = VHost,
+ adapter_info = set_proto_version(AdapterInfo, ProtoVersion)}) of
+ {ok, Connection} ->
+ case rabbit_access_control:check_user_loopback(UsernameBin, Addr) of
+ ok ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, UsernameBin,
+ mqtt),
+ [{internal_user, InternalUser}] = amqp_connection:info(
+ Connection, [internal_user]),
+ {?CONNACK_ACCEPT, Connection, VHost,
+ #auth_state{user = InternalUser,
+ username = UsernameBin,
+ vhost = VHost}};
+ not_allowed ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, UsernameBin,
+ mqtt),
+ amqp_connection:close(Connection),
+ rabbit_log_connection:warning(
+ "MQTT login failed for ~p access_refused "
+ "(access must be from localhost)~n",
+ [binary_to_list(UsernameBin)]),
+ ?CONNACK_AUTH
+ end;
+ {error, {auth_failure, Explanation}} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, UsernameBin, mqtt),
+ rabbit_log_connection:error("MQTT login failed for user '~p' auth_failure: ~s~n",
+ [binary_to_list(UserBin), Explanation]),
+ ?CONNACK_CREDENTIALS;
+ {error, access_refused} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, UsernameBin, mqtt),
+ rabbit_log_connection:warning("MQTT login failed for user '~p': access_refused "
+ "(vhost access not allowed)~n",
+ [binary_to_list(UserBin)]),
+ ?CONNACK_AUTH;
+ {error, not_allowed} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, UsernameBin, mqtt),
+ %% when vhost allowed for TLS connection
+ rabbit_log_connection:warning("MQTT login failed for ~p access_refused "
+ "(vhost access not allowed)~n",
+ [binary_to_list(UserBin)]),
+ ?CONNACK_AUTH
+ end;
+ false ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, UsernameBin, mqtt),
+ rabbit_log_connection:error("MQTT login failed for user '~p' auth_failure: vhost ~s does not exist~n",
+ [binary_to_list(UserBin), VHost]),
+ ?CONNACK_CREDENTIALS
+ end.
+
+get_vhost(UserBin, none, Port) ->
+ get_vhost_no_ssl(UserBin, Port);
+get_vhost(UserBin, undefined, Port) ->
+ get_vhost_no_ssl(UserBin, Port);
+get_vhost(UserBin, SslLogin, Port) ->
+ get_vhost_ssl(UserBin, SslLogin, Port).
+
+get_vhost_no_ssl(UserBin, Port) ->
+ case vhost_in_username(UserBin) of
+ true ->
+ {vhost_in_username_or_default, get_vhost_username(UserBin)};
+ false ->
+ PortVirtualHostMapping = rabbit_runtime_parameters:value_global(
+ mqtt_port_to_vhost_mapping
+ ),
+ case get_vhost_from_port_mapping(Port, PortVirtualHostMapping) of
+ undefined ->
+ {default_vhost, {rabbit_mqtt_util:env(vhost), UserBin}};
+ VHost ->
+ {port_to_vhost_mapping, {VHost, UserBin}}
+ end
+ end.
+
+get_vhost_ssl(UserBin, SslLoginName, Port) ->
+ UserVirtualHostMapping = rabbit_runtime_parameters:value_global(
+ mqtt_default_vhosts
+ ),
+ case get_vhost_from_user_mapping(SslLoginName, UserVirtualHostMapping) of
+ undefined ->
+ PortVirtualHostMapping = rabbit_runtime_parameters:value_global(
+ mqtt_port_to_vhost_mapping
+ ),
+ case get_vhost_from_port_mapping(Port, PortVirtualHostMapping) of
+ undefined ->
+ {vhost_in_username_or_default, get_vhost_username(UserBin)};
+ VHostFromPortMapping ->
+ {port_to_vhost_mapping, {VHostFromPortMapping, UserBin}}
+ end;
+ VHostFromCertMapping ->
+ {cert_to_vhost_mapping, {VHostFromCertMapping, UserBin}}
+ end.
+
+vhost_in_username(UserBin) ->
+ case application:get_env(?APP, ignore_colons_in_username) of
+ {ok, true} -> false;
+ _ ->
+ %% split at the last colon, disallowing colons in username
+ case re:split(UserBin, ":(?!.*?:)") of
+ [_, _] -> true;
+ [UserBin] -> false
+ end
+ end.
+
+get_vhost_username(UserBin) ->
+ Default = {rabbit_mqtt_util:env(vhost), UserBin},
+ case application:get_env(?APP, ignore_colons_in_username) of
+ {ok, true} -> Default;
+ _ ->
+ %% split at the last colon, disallowing colons in username
+ case re:split(UserBin, ":(?!.*?:)") of
+ [Vhost, UserName] -> {Vhost, UserName};
+ [UserBin] -> Default
+ end
+ end.
+
+get_vhost_from_user_mapping(_User, not_found) ->
+ undefined;
+get_vhost_from_user_mapping(User, Mapping) ->
+ M = rabbit_data_coercion:to_proplist(Mapping),
+ case rabbit_misc:pget(User, M) of
+ undefined ->
+ undefined;
+ VHost ->
+ VHost
+ end.
+
+get_vhost_from_port_mapping(_Port, not_found) ->
+ undefined;
+get_vhost_from_port_mapping(Port, Mapping) ->
+ M = rabbit_data_coercion:to_proplist(Mapping),
+ Res = case rabbit_misc:pget(rabbit_data_coercion:to_binary(Port), M) of
+ undefined ->
+ undefined;
+ VHost ->
+ VHost
+ end,
+ Res.
+
+human_readable_vhost_lookup_strategy(vhost_in_username_or_default) ->
+ "vhost in username or default";
+human_readable_vhost_lookup_strategy(port_to_vhost_mapping) ->
+ "MQTT port to vhost mapping";
+human_readable_vhost_lookup_strategy(cert_to_vhost_mapping) ->
+ "client certificate to vhost mapping";
+human_readable_vhost_lookup_strategy(default_vhost) ->
+ "plugin configuration or default";
+human_readable_vhost_lookup_strategy(Val) ->
+ atom_to_list(Val).
+
+creds(User, Pass, SSLLoginName) ->
+ DefaultUser = rabbit_mqtt_util:env(default_user),
+ DefaultPass = rabbit_mqtt_util:env(default_pass),
+ {ok, Anon} = application:get_env(?APP, allow_anonymous),
+ {ok, TLSAuth} = application:get_env(?APP, ssl_cert_login),
+ HaveDefaultCreds = Anon =:= true andalso
+ is_binary(DefaultUser) andalso
+ is_binary(DefaultPass),
+
+ CredentialsProvided = User =/= undefined orelse
+ Pass =/= undefined,
+
+ CorrectCredentials = is_list(User) andalso
+ is_list(Pass),
+
+ SSLLoginProvided = TLSAuth =:= true andalso
+ SSLLoginName =/= none,
+
+ case {CredentialsProvided, CorrectCredentials, SSLLoginProvided, HaveDefaultCreds} of
+ %% Username and password take priority
+ {true, true, _, _} -> {list_to_binary(User),
+ list_to_binary(Pass)};
+ %% Either username or password is provided
+ {true, false, _, _} -> {invalid_creds, {User, Pass}};
+ %% rabbitmq_mqtt.ssl_cert_login is true. SSL user name provided.
+ %% Authenticating using username only.
+ {false, false, true, _} -> {SSLLoginName, none};
+ %% Anonymous connection uses default credentials
+ {false, false, false, true} -> {DefaultUser, DefaultPass};
+ _ -> nocreds
+ end.
+
+supported_subs_qos(?QOS_0) -> ?QOS_0;
+supported_subs_qos(?QOS_1) -> ?QOS_1;
+supported_subs_qos(?QOS_2) -> ?QOS_1.
+
+delivery_mode(?QOS_0) -> 1;
+delivery_mode(?QOS_1) -> 2;
+delivery_mode(?QOS_2) -> 2.
+
+%% different qos subscriptions are received in different queues
+%% with appropriate durability and timeout arguments
+%% this will lead to duplicate messages for overlapping subscriptions
+%% with different qos values - todo: prevent duplicates
+ensure_queue(Qos, #proc_state{ channels = {Channel, _},
+ client_id = ClientId,
+ clean_sess = CleanSess,
+ consumer_tags = {TagQ0, TagQ1} = Tags} = PState) ->
+ {QueueQ0, QueueQ1} = rabbit_mqtt_util:subcription_queue_name(ClientId),
+ Qos1Args = case {rabbit_mqtt_util:env(subscription_ttl), CleanSess} of
+ {undefined, _} ->
+ [];
+ {Ms, false} when is_integer(Ms) ->
+ [{<<"x-expires">>, long, Ms}];
+ _ ->
+ []
+ end,
+ QueueSetup =
+ case {TagQ0, TagQ1, Qos} of
+ {undefined, _, ?QOS_0} ->
+ {QueueQ0,
+ #'queue.declare'{ queue = QueueQ0,
+ durable = false,
+ auto_delete = true },
+ #'basic.consume'{ queue = QueueQ0,
+ no_ack = true }};
+ {_, undefined, ?QOS_1} ->
+ {QueueQ1,
+ #'queue.declare'{ queue = QueueQ1,
+ durable = true,
+ %% Clean session means a transient connection,
+ %% translating into auto-delete.
+ %%
+ %% see rabbitmq/rabbitmq-mqtt#37
+ auto_delete = CleanSess,
+ arguments = Qos1Args },
+ #'basic.consume'{ queue = QueueQ1,
+ no_ack = false }};
+ {_, _, ?QOS_0} ->
+ {exists, QueueQ0};
+ {_, _, ?QOS_1} ->
+ {exists, QueueQ1}
+ end,
+ case QueueSetup of
+ {Queue, Declare, Consume} ->
+ #'queue.declare_ok'{} = amqp_channel:call(Channel, Declare),
+ #'basic.consume_ok'{ consumer_tag = Tag } =
+ amqp_channel:call(Channel, Consume),
+ {Queue, PState #proc_state{ consumer_tags = setelement(Qos+1, Tags, Tag) }};
+ {exists, Q} ->
+ {Q, PState}
+ end.
+
+send_will(PState = #proc_state{will_msg = undefined}) ->
+ PState;
+
+send_will(PState = #proc_state{will_msg = WillMsg = #mqtt_msg{retain = Retain,
+ topic = Topic},
+ retainer_pid = RPid,
+ channels = {ChQos0, ChQos1},
+ amqp2mqtt_fun = Amqp2MqttFun}) ->
+ case check_topic_access(Topic, write, PState) of
+ ok ->
+ amqp_pub(WillMsg, PState),
+ case Retain of
+ false -> ok;
+ true ->
+ hand_off_to_retainer(RPid, Amqp2MqttFun, Topic, WillMsg)
+ end;
+ Error ->
+ rabbit_log:warning(
+ "Could not send last will: ~p~n",
+ [Error])
+ end,
+ case ChQos1 of
+ undefined -> ok;
+ _ -> amqp_channel:close(ChQos1)
+ end,
+ case ChQos0 of
+ undefined -> ok;
+ _ -> amqp_channel:close(ChQos0)
+ end,
+ PState #proc_state{ channels = {undefined, undefined} }.
+
+amqp_pub(undefined, PState) ->
+ PState;
+
+%% set up a qos1 publishing channel if necessary
+%% this channel will only be used for publishing, not consuming
+amqp_pub(Msg = #mqtt_msg{ qos = ?QOS_1 },
+ PState = #proc_state{ channels = {ChQos0, undefined},
+ awaiting_seqno = undefined,
+ connection = Conn }) ->
+ {ok, Channel} = amqp_connection:open_channel(Conn),
+ #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(Channel, self()),
+ amqp_pub(Msg, PState #proc_state{ channels = {ChQos0, Channel},
+ awaiting_seqno = 1 });
+
+amqp_pub(#mqtt_msg{ qos = Qos,
+ topic = Topic,
+ dup = Dup,
+ message_id = MessageId,
+ payload = Payload },
+ PState = #proc_state{ channels = {ChQos0, ChQos1},
+ exchange = Exchange,
+ unacked_pubs = UnackedPubs,
+ awaiting_seqno = SeqNo,
+ mqtt2amqp_fun = Mqtt2AmqpFun }) ->
+ RoutingKey = Mqtt2AmqpFun(Topic),
+ Method = #'basic.publish'{ exchange = Exchange,
+ routing_key = RoutingKey },
+ Headers = [{<<"x-mqtt-publish-qos">>, byte, Qos},
+ {<<"x-mqtt-dup">>, bool, Dup}],
+ Msg = #amqp_msg{ props = #'P_basic'{ headers = Headers,
+ delivery_mode = delivery_mode(Qos)},
+ payload = Payload },
+ {UnackedPubs1, Ch, SeqNo1} =
+ case Qos =:= ?QOS_1 andalso MessageId =/= undefined of
+ true -> {gb_trees:enter(SeqNo, MessageId, UnackedPubs), ChQos1,
+ SeqNo + 1};
+ false -> {UnackedPubs, ChQos0, SeqNo}
+ end,
+ amqp_channel:cast_flow(Ch, Method, Msg),
+ PState #proc_state{ unacked_pubs = UnackedPubs1,
+ awaiting_seqno = SeqNo1 }.
+
+adapter_info(Sock, ProtoName) ->
+ amqp_connection:socket_adapter_info(Sock, {ProtoName, "N/A"}).
+
+set_proto_version(AdapterInfo = #amqp_adapter_info{protocol = {Proto, _}}, Vsn) ->
+ AdapterInfo#amqp_adapter_info{protocol = {Proto,
+ human_readable_mqtt_version(Vsn)}}.
+
+human_readable_mqtt_version(3) ->
+ "3.1.0";
+human_readable_mqtt_version(4) ->
+ "3.1.1";
+human_readable_mqtt_version(_) ->
+ "N/A".
+
+serialise_and_send_to_client(Frame, #proc_state{ socket = Sock }) ->
+ try rabbit_net:port_command(Sock, rabbit_mqtt_frame:serialise(Frame)) of
+ Res ->
+ Res
+ catch _:Error ->
+ rabbit_log_connection:error("MQTT: a socket write failed, the socket might already be closed"),
+ rabbit_log_connection:debug("Failed to write to socket ~p, error: ~p, frame: ~p",
+ [Sock, Error, Frame])
+ end.
+
+close_connection(PState = #proc_state{ connection = undefined }) ->
+ PState;
+close_connection(PState = #proc_state{ connection = Connection,
+ client_id = ClientId }) ->
+ % todo: maybe clean session
+ case ClientId of
+ undefined -> ok;
+ _ ->
+ case rabbit_mqtt_collector:unregister(ClientId, self()) of
+ ok -> ok;
+ %% ignore as we are shutting down
+ {timeout, _} -> ok
+ end
+ end,
+ %% ignore noproc or other exceptions, we are shutting down
+ catch amqp_connection:close(Connection),
+ PState #proc_state{ channels = {undefined, undefined},
+ connection = undefined }.
+
+handle_pre_hibernate() ->
+ erase(topic_permission_cache),
+ ok.
+
+handle_ra_event({applied, [{Corr, ok}]},
+ PState = #proc_state{register_state = {pending, Corr}}) ->
+ %% success case - command was applied transition into registered state
+ PState#proc_state{register_state = registered};
+handle_ra_event({not_leader, Leader, Corr},
+ PState = #proc_state{register_state = {pending, Corr},
+ client_id = ClientId}) ->
+ %% retry command against actual leader
+ {ok, NewCorr} = rabbit_mqtt_collector:register(Leader, ClientId, self()),
+ PState#proc_state{register_state = {pending, NewCorr}};
+handle_ra_event(register_timeout,
+ PState = #proc_state{register_state = {pending, _Corr},
+ client_id = ClientId}) ->
+ {ok, NewCorr} = rabbit_mqtt_collector:register(ClientId, self()),
+ PState#proc_state{register_state = {pending, NewCorr}};
+handle_ra_event(register_timeout, PState) ->
+ PState;
+handle_ra_event(Evt, PState) ->
+ %% log these?
+ rabbit_log:debug("unhandled ra_event: ~w ~n", [Evt]),
+ PState.
+
+%% NB: check_*: MQTT spec says we should ack normally, ie pretend there
+%% was no auth error, but here we are closing the connection with an error. This
+%% is what happens anyway if there is an authorization failure at the AMQP 0-9-1 client level.
+
+check_publish(TopicName, Fn, PState) ->
+ case check_topic_access(TopicName, write, PState) of
+ ok -> Fn();
+ _ -> {error, unauthorized, PState}
+ end.
+
+check_subscribe([], Fn, _) ->
+ Fn();
+
+check_subscribe([#mqtt_topic{name = TopicName} | Topics], Fn, PState) ->
+ case check_topic_access(TopicName, read, PState) of
+ ok -> check_subscribe(Topics, Fn, PState);
+ _ -> {error, unauthorized, PState}
+ end.
+
+check_topic_access(TopicName, Access,
+ #proc_state{
+ auth_state = #auth_state{user = User = #user{username = Username},
+ vhost = VHost},
+ exchange = Exchange,
+ client_id = ClientId,
+ mqtt2amqp_fun = Mqtt2AmqpFun }) ->
+ Cache =
+ case get(topic_permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+
+ Key = {TopicName, Username, ClientId, VHost, Exchange, Access},
+ case lists:member(Key, Cache) of
+ true ->
+ ok;
+ false ->
+ Resource = #resource{virtual_host = VHost,
+ kind = topic,
+ name = Exchange},
+
+ RoutingKey = Mqtt2AmqpFun(TopicName),
+ Context = #{routing_key => RoutingKey,
+ variable_map => #{
+ <<"username">> => Username,
+ <<"vhost">> => VHost,
+ <<"client_id">> => rabbit_data_coercion:to_binary(ClientId)
+ }
+ },
+
+ try rabbit_access_control:check_topic_access(User, Resource, Access, Context) of
+ ok ->
+ CacheTail = lists:sublist(Cache, ?MAX_TOPIC_PERMISSION_CACHE_SIZE - 1),
+ put(topic_permission_cache, [Key | CacheTail]),
+ ok;
+ R ->
+ R
+ catch
+ _:{amqp_error, access_refused, Msg, _} ->
+ rabbit_log:error("operation resulted in an error (access_refused): ~p~n", [Msg]),
+ {error, access_refused};
+ _:Error ->
+ rabbit_log:error("~p~n", [Error]),
+ {error, access_refused}
+ end
+ end.
+
+info(consumer_tags, #proc_state{consumer_tags = Val}) -> Val;
+info(unacked_pubs, #proc_state{unacked_pubs = Val}) -> Val;
+info(awaiting_ack, #proc_state{awaiting_ack = Val}) -> Val;
+info(awaiting_seqno, #proc_state{awaiting_seqno = Val}) -> Val;
+info(message_id, #proc_state{message_id = Val}) -> Val;
+info(client_id, #proc_state{client_id = Val}) ->
+ rabbit_data_coercion:to_binary(Val);
+info(clean_sess, #proc_state{clean_sess = Val}) -> Val;
+info(will_msg, #proc_state{will_msg = Val}) -> Val;
+info(channels, #proc_state{channels = Val}) -> Val;
+info(exchange, #proc_state{exchange = Val}) -> Val;
+info(adapter_info, #proc_state{adapter_info = Val}) -> Val;
+info(ssl_login_name, #proc_state{ssl_login_name = Val}) -> Val;
+info(retainer_pid, #proc_state{retainer_pid = Val}) -> Val;
+info(user, #proc_state{auth_state = #auth_state{username = Val}}) -> Val;
+info(vhost, #proc_state{auth_state = #auth_state{vhost = Val}}) -> Val;
+info(host, #proc_state{adapter_info = #amqp_adapter_info{host = Val}}) -> Val;
+info(port, #proc_state{adapter_info = #amqp_adapter_info{port = Val}}) -> Val;
+info(peer_host, #proc_state{adapter_info = #amqp_adapter_info{peer_host = Val}}) -> Val;
+info(peer_port, #proc_state{adapter_info = #amqp_adapter_info{peer_port = Val}}) -> Val;
+info(protocol, #proc_state{adapter_info = #amqp_adapter_info{protocol = Val}}) ->
+ case Val of
+ {Proto, Version} -> {Proto, rabbit_data_coercion:to_binary(Version)};
+ Other -> Other
+ end;
+info(channels, PState) -> additional_info(channels, PState);
+info(channel_max, PState) -> additional_info(channel_max, PState);
+info(frame_max, PState) -> additional_info(frame_max, PState);
+info(client_properties, PState) -> additional_info(client_properties, PState);
+info(ssl, PState) -> additional_info(ssl, PState);
+info(ssl_protocol, PState) -> additional_info(ssl_protocol, PState);
+info(ssl_key_exchange, PState) -> additional_info(ssl_key_exchange, PState);
+info(ssl_cipher, PState) -> additional_info(ssl_cipher, PState);
+info(ssl_hash, PState) -> additional_info(ssl_hash, PState);
+info(Other, _) -> throw({bad_argument, Other}).
+
+
+additional_info(Key,
+ #proc_state{adapter_info =
+ #amqp_adapter_info{additional_info = AddInfo}}) ->
+ proplists:get_value(Key, AddInfo).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl
new file mode 100644
index 0000000000..39c0761321
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl
@@ -0,0 +1,480 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_reader).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(gen_server2).
+
+-export([start_link/2]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ code_change/3, terminate/2, handle_pre_hibernate/1]).
+
+-export([conserve_resources/3, start_keepalive/2,
+ close_connection/2]).
+
+-export([ssl_login_name/1]).
+-export([info/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_mqtt.hrl").
+
+-define(SIMPLE_METRICS, [pid, recv_oct, send_oct, reductions]).
+-define(OTHER_METRICS, [recv_cnt, send_cnt, send_pend, garbage_collection, state]).
+
+%%----------------------------------------------------------------------------
+
+start_link(KeepaliveSup, Ref) ->
+ Pid = proc_lib:spawn_link(?MODULE, init,
+ [[KeepaliveSup, Ref]]),
+
+ {ok, Pid}.
+
+conserve_resources(Pid, _, {_, Conserve, _}) ->
+ Pid ! {conserve_resources, Conserve},
+ ok.
+
+info(Pid, InfoItems) ->
+ case InfoItems -- ?INFO_ITEMS of
+ [] -> gen_server2:call(Pid, {info, InfoItems});
+ UnknownItems -> throw({bad_argument, UnknownItems})
+ end.
+
+close_connection(Pid, Reason) ->
+ gen_server:cast(Pid, {close_connection, Reason}).
+
+%%----------------------------------------------------------------------------
+
+init([KeepaliveSup, Ref]) ->
+ process_flag(trap_exit, true),
+ {ok, Sock} = rabbit_networking:handshake(Ref,
+ application:get_env(rabbitmq_mqtt, proxy_protocol, false)),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ case rabbit_net:connection_string(Sock, inbound) of
+ {ok, ConnStr} ->
+ rabbit_log_connection:debug("MQTT accepting TCP connection ~p (~s)~n", [self(), ConnStr]),
+ rabbit_alarm:register(
+ self(), {?MODULE, conserve_resources, []}),
+ ProcessorState = rabbit_mqtt_processor:initial_state(Sock,ssl_login_name(RealSocket)),
+ gen_server2:enter_loop(?MODULE, [],
+ rabbit_event:init_stats_timer(
+ control_throttle(
+ #state{socket = RealSocket,
+ conn_name = ConnStr,
+ await_recv = false,
+ connection_state = running,
+ received_connect_frame = false,
+ keepalive = {none, none},
+ keepalive_sup = KeepaliveSup,
+ conserve = false,
+ parse_state = rabbit_mqtt_frame:initial_state(),
+ proc_state = ProcessorState }), #state.stats_timer),
+ {backoff, 1000, 1000, 10000});
+ {network_error, Reason} ->
+ rabbit_net:fast_close(RealSocket),
+ terminate({shutdown, Reason}, undefined);
+ {error, enotconn} ->
+ rabbit_net:fast_close(RealSocket),
+ terminate(shutdown, undefined);
+ {error, Reason} ->
+ rabbit_net:fast_close(RealSocket),
+ terminate({network_error, Reason}, undefined)
+ end.
+
+handle_call({info, InfoItems}, _From, State) ->
+ Infos = lists:map(
+ fun(InfoItem) ->
+ {InfoItem, info_internal(InfoItem, State)}
+ end,
+ InfoItems),
+ {reply, Infos, State};
+
+handle_call(Msg, From, State) ->
+ {stop, {mqtt_unexpected_call, Msg, From}, State}.
+
+handle_cast(duplicate_id,
+ State = #state{ proc_state = PState,
+ conn_name = ConnName }) ->
+ rabbit_log_connection:warning("MQTT disconnecting client ~p with duplicate id '~s'~n",
+ [ConnName, rabbit_mqtt_processor:info(client_id, PState)]),
+ {stop, {shutdown, duplicate_id}, State};
+
+handle_cast(decommission_node,
+ State = #state{ proc_state = PState,
+ conn_name = ConnName }) ->
+ rabbit_log_connection:warning("MQTT disconnecting client ~p with client ID '~s' as its node is about"
+ " to be decommissioned~n",
+ [ConnName, rabbit_mqtt_processor:info(client_id, PState)]),
+ {stop, {shutdown, decommission_node}, State};
+
+handle_cast({close_connection, Reason},
+ State = #state{conn_name = ConnName, proc_state = PState}) ->
+ rabbit_log_connection:warning("MQTT disconnecting client ~p with client ID '~s', reason: ~s",
+ [ConnName, rabbit_mqtt_processor:info(client_id, PState), Reason]),
+ {stop, {shutdown, server_initiated_close}, State};
+
+handle_cast(Msg, State) ->
+ {stop, {mqtt_unexpected_cast, Msg}, State}.
+
+handle_info({#'basic.deliver'{}, #amqp_msg{}, _DeliveryCtx} = Delivery,
+ State = #state{ proc_state = ProcState }) ->
+ callback_reply(State, rabbit_mqtt_processor:amqp_callback(Delivery,
+ ProcState));
+
+handle_info(#'basic.ack'{} = Ack, State = #state{ proc_state = ProcState }) ->
+ callback_reply(State, rabbit_mqtt_processor:amqp_callback(Ack, ProcState));
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State, hibernate};
+
+handle_info(#'basic.cancel'{}, State) ->
+ {stop, {shutdown, subscription_cancelled}, State};
+
+handle_info({'EXIT', _Conn, Reason}, State) ->
+ {stop, {connection_died, Reason}, State};
+
+handle_info({Tag, Sock, Data},
+ State = #state{ socket = Sock, connection_state = blocked })
+ when Tag =:= tcp; Tag =:= ssl ->
+ {noreply, State#state{ deferred_recv = Data }, hibernate};
+
+handle_info({Tag, Sock, Data},
+ State = #state{ socket = Sock, connection_state = running })
+ when Tag =:= tcp; Tag =:= ssl ->
+ process_received_bytes(
+ Data, control_throttle(State #state{ await_recv = false }));
+
+handle_info({Tag, Sock}, State = #state{socket = Sock})
+ when Tag =:= tcp_closed; Tag =:= ssl_closed ->
+ network_error(closed, State);
+
+handle_info({Tag, Sock, Reason}, State = #state{socket = Sock})
+ when Tag =:= tcp_error; Tag =:= ssl_error ->
+ network_error(Reason, State);
+
+handle_info({inet_reply, Sock, ok}, State = #state{socket = Sock}) ->
+ {noreply, State, hibernate};
+
+handle_info({inet_reply, Sock, {error, Reason}}, State = #state{socket = Sock}) ->
+ network_error(Reason, State);
+
+handle_info({conserve_resources, Conserve}, State) ->
+ maybe_process_deferred_recv(
+ control_throttle(State #state{ conserve = Conserve }));
+
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ maybe_process_deferred_recv(control_throttle(State));
+
+handle_info({start_keepalives, Keepalive},
+ State = #state { keepalive_sup = KeepaliveSup, socket = Sock }) ->
+ %% Only the client has the responsibility for sending keepalives
+ SendFun = fun() -> ok end,
+ Parent = self(),
+ ReceiveFun = fun() -> Parent ! keepalive_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ KeepaliveSup, Sock, 0, SendFun, Keepalive, ReceiveFun),
+ {noreply, State #state { keepalive = Heartbeater }};
+
+handle_info(keepalive_timeout, State = #state {conn_name = ConnStr,
+ proc_state = PState}) ->
+ rabbit_log_connection:error("closing MQTT connection ~p (keepalive timeout)~n", [ConnStr]),
+ send_will_and_terminate(PState, {shutdown, keepalive_timeout}, State);
+
+handle_info(emit_stats, State) ->
+ {noreply, emit_stats(State), hibernate};
+
+handle_info({ra_event, _From, Evt},
+ #state{proc_state = PState} = State) ->
+ %% handle applied event to ensure registration command actually got applied
+ %% handle not_leader notification in case we send the command to a non-leader
+ PState1 = rabbit_mqtt_processor:handle_ra_event(Evt, PState),
+ {noreply, State#state{proc_state = PState1}, hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {mqtt_unexpected_msg, Msg}, State}.
+
+terminate(Reason, State) ->
+ maybe_emit_stats(State),
+ do_terminate(Reason, State).
+
+handle_pre_hibernate(State) ->
+ rabbit_mqtt_processor:handle_pre_hibernate(),
+ {hibernate, State}.
+
+do_terminate({network_error, {ssl_upgrade_error, closed}, ConnStr}, _State) ->
+ rabbit_log_connection:error("MQTT detected TLS upgrade error on ~s: connection closed~n",
+ [ConnStr]);
+
+do_terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, "handshake failure"}}, ConnStr}, _State) ->
+ log_tls_alert(handshake_failure, ConnStr);
+do_terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, "unknown ca"}}, ConnStr}, _State) ->
+ log_tls_alert(unknown_ca, ConnStr);
+do_terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, {Err, _}}}, ConnStr}, _State) ->
+ log_tls_alert(Err, ConnStr);
+do_terminate({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, Alert}}, ConnStr}, _State) ->
+ log_tls_alert(Alert, ConnStr);
+do_terminate({network_error, {ssl_upgrade_error, Reason}, ConnStr}, _State) ->
+ rabbit_log_connection:error("MQTT detected TLS upgrade error on ~s: ~p~n",
+ [ConnStr, Reason]);
+
+do_terminate({network_error, Reason, ConnStr}, _State) ->
+ rabbit_log_connection:error("MQTT detected network error on ~s: ~p~n",
+ [ConnStr, Reason]);
+
+do_terminate({network_error, Reason}, _State) ->
+ rabbit_log_connection:error("MQTT detected network error: ~p~n", [Reason]);
+
+do_terminate(normal, #state{proc_state = ProcState,
+ conn_name = ConnName}) ->
+ rabbit_mqtt_processor:close_connection(ProcState),
+ rabbit_log_connection:info("closing MQTT connection ~p (~s)~n", [self(), ConnName]),
+ ok;
+
+do_terminate(_Reason, #state{proc_state = ProcState}) ->
+ rabbit_mqtt_processor:close_connection(ProcState),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+ssl_login_name(Sock) ->
+ case rabbit_net:peercert(Sock) of
+ {ok, C} -> case rabbit_ssl:peer_cert_auth_name(C) of
+ unsafe -> none;
+ not_found -> none;
+ Name -> Name
+ end;
+ {error, no_peercert} -> none;
+ nossl -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+log_tls_alert(handshake_failure, ConnStr) ->
+ rabbit_log_connection:error("MQTT detected TLS upgrade error on ~s: handshake failure~n",
+ [ConnStr]);
+log_tls_alert(unknown_ca, ConnStr) ->
+ rabbit_log_connection:error("MQTT detected TLS certificate verification error on ~s: alert 'unknown CA'~n",
+ [ConnStr]);
+log_tls_alert(Alert, ConnStr) ->
+ rabbit_log_connection:error("MQTT detected TLS upgrade error on ~s: alert ~s~n",
+ [ConnStr, Alert]).
+
+log_new_connection(#state{conn_name = ConnStr, proc_state = PState}) ->
+ rabbit_log_connection:info("accepting MQTT connection ~p (~s, client id: ~s)~n",
+ [self(), ConnStr, rabbit_mqtt_processor:info(client_id, PState)]).
+
+process_received_bytes(<<>>, State = #state{proc_state = ProcState,
+ received_connect_frame = false}) ->
+ MqttConn = ProcState#proc_state.connection,
+ case MqttConn of
+ undefined -> ok;
+ _ -> log_new_connection(State)
+ end,
+ {noreply, ensure_stats_timer(State#state{ received_connect_frame = true }), hibernate};
+process_received_bytes(<<>>, State) ->
+ {noreply, ensure_stats_timer(State), hibernate};
+process_received_bytes(Bytes,
+ State = #state{ parse_state = ParseState,
+ proc_state = ProcState,
+ conn_name = ConnStr }) ->
+ case parse(Bytes, ParseState) of
+ {more, ParseState1} ->
+ {noreply,
+ ensure_stats_timer( State #state{ parse_state = ParseState1 }),
+ hibernate};
+ {ok, Frame, Rest} ->
+ case rabbit_mqtt_processor:process_frame(Frame, ProcState) of
+ {ok, ProcState1, ConnPid} ->
+ PS = rabbit_mqtt_frame:initial_state(),
+ process_received_bytes(
+ Rest,
+ State #state{ parse_state = PS,
+ proc_state = ProcState1,
+ connection = ConnPid });
+ %% PUBLISH and more
+ {error, unauthorized = Reason, ProcState1} ->
+ rabbit_log_connection:error("MQTT connection ~s is closing due to an authorization failure~n", [ConnStr]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ %% CONNECT frames only
+ {error, unauthenticated = Reason, ProcState1} ->
+ rabbit_log_connection:error("MQTT connection ~s is closing due to an authentication failure~n", [ConnStr]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ %% CONNECT frames only
+ {error, invalid_client_id = Reason, ProcState1} ->
+ rabbit_log_connection:error("MQTT cannot accept connection ~s: client uses an invalid ID~n", [ConnStr]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ %% CONNECT frames only
+ {error, unsupported_protocol_version = Reason, ProcState1} ->
+ rabbit_log_connection:error("MQTT cannot accept connection ~s: incompatible protocol version~n", [ConnStr]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ {error, unavailable = Reason, ProcState1} ->
+ rabbit_log_connection:error("MQTT cannot accept connection ~s due to an internal error or unavailable component~n",
+ [ConnStr]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ {error, Reason, ProcState1} ->
+ rabbit_log_connection:error("MQTT protocol error on connection ~s: ~p~n",
+ [ConnStr, Reason]),
+ {stop, {shutdown, Reason}, pstate(State, ProcState1)};
+ {error, Error} ->
+ rabbit_log_connection:error("MQTT detected a framing error on connection ~s: ~p~n",
+ [ConnStr, Error]),
+ {stop, {shutdown, Error}, State};
+ {stop, ProcState1} ->
+ {stop, normal, pstate(State, ProcState1)}
+ end;
+ {error, {cannot_parse, Error, Stacktrace}} ->
+ rabbit_log_connection:error("MQTT cannot parse a frame on connection '~s', unparseable payload: ~p, error: {~p, ~p} ~n",
+ [ConnStr, Bytes, Error, Stacktrace]),
+ {stop, {shutdown, Error}, State};
+ {error, Error} ->
+ rabbit_log_connection:error("MQTT detected a framing error on connection ~s: ~p~n",
+ [ConnStr, Error]),
+ {stop, {shutdown, Error}, State}
+ end.
+
+callback_reply(State, {ok, ProcState}) ->
+ {noreply, pstate(State, ProcState), hibernate};
+callback_reply(State, {error, Reason, ProcState}) ->
+ {stop, Reason, pstate(State, ProcState)}.
+
+start_keepalive(_, 0 ) -> ok;
+start_keepalive(Pid, Keepalive) -> Pid ! {start_keepalives, Keepalive}.
+
+pstate(State = #state {}, PState = #proc_state{}) ->
+ State #state{ proc_state = PState }.
+
+%%----------------------------------------------------------------------------
+parse(Bytes, ParseState) ->
+ try
+ rabbit_mqtt_frame:parse(Bytes, ParseState)
+ catch
+ _:Reason:Stacktrace ->
+ {error, {cannot_parse, Reason, Stacktrace}}
+ end.
+
+send_will_and_terminate(PState, State) ->
+ send_will_and_terminate(PState, {shutdown, conn_closed}, State).
+
+send_will_and_terminate(PState, Reason, State = #state{conn_name = ConnStr}) ->
+ rabbit_mqtt_processor:send_will(PState),
+ rabbit_log_connection:debug("MQTT: about to send will message (if any) on connection ~p", [ConnStr]),
+ % todo: flush channel after publish
+ {stop, Reason, State}.
+
+network_error(closed,
+ State = #state{conn_name = ConnStr,
+ proc_state = PState}) ->
+ MqttConn = PState#proc_state.connection,
+ Fmt = "MQTT connection ~p will terminate because peer closed TCP connection~n",
+ Args = [ConnStr],
+ case MqttConn of
+ undefined -> rabbit_log_connection:debug(Fmt, Args);
+ _ -> rabbit_log_connection:info(Fmt, Args)
+ end,
+ send_will_and_terminate(PState, State);
+
+network_error(Reason,
+ State = #state{conn_name = ConnStr,
+ proc_state = PState}) ->
+ rabbit_log_connection:info("MQTT detected network error for ~p: ~p~n",
+ [ConnStr, Reason]),
+ send_will_and_terminate(PState, State).
+
+run_socket(State = #state{ connection_state = blocked }) ->
+ State;
+run_socket(State = #state{ deferred_recv = Data }) when Data =/= undefined ->
+ State;
+run_socket(State = #state{ await_recv = true }) ->
+ State;
+run_socket(State = #state{ socket = Sock }) ->
+ rabbit_net:setopts(Sock, [{active, once}]),
+ State#state{ await_recv = true }.
+
+control_throttle(State = #state{ connection_state = Flow,
+ conserve = Conserve }) ->
+ case {Flow, Conserve orelse credit_flow:blocked()} of
+ {running, true} -> ok = rabbit_heartbeat:pause_monitor(
+ State#state.keepalive),
+ State #state{ connection_state = blocked };
+ {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
+ State#state.keepalive),
+ run_socket(State #state{
+ connection_state = running });
+ {_, _} -> run_socket(State)
+ end.
+
+maybe_process_deferred_recv(State = #state{ deferred_recv = undefined }) ->
+ {noreply, State, hibernate};
+maybe_process_deferred_recv(State = #state{ deferred_recv = Data, socket = Sock }) ->
+ handle_info({tcp, Sock, Data},
+ State#state{ deferred_recv = undefined }).
+
+maybe_emit_stats(undefined) ->
+ ok;
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #state.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State=#state{connection = C}) when C == none; C == undefined ->
+ %% Avoid emitting stats on terminate when the connection has not yet been
+ %% established, as this causes orphan entries on the stats database
+ State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+ ensure_stats_timer(State1);
+emit_stats(State) ->
+ [{_, Pid}, {_, Recv_oct}, {_, Send_oct}, {_, Reductions}] = I
+ = infos(?SIMPLE_METRICS, State),
+ Infos = infos(?OTHER_METRICS, State),
+ rabbit_core_metrics:connection_stats(Pid, Infos),
+ rabbit_core_metrics:connection_stats(Pid, Recv_oct, Send_oct, Reductions),
+ rabbit_event:notify(connection_stats, Infos ++ I),
+ State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+ ensure_stats_timer(State1).
+
+ensure_stats_timer(State = #state{}) ->
+ rabbit_event:ensure_stats_timer(State, #state.stats_timer, emit_stats).
+
+infos(Items, State) -> [{Item, info_internal(Item, State)} || Item <- Items].
+
+info_internal(pid, State) -> info_internal(connection, State);
+info_internal(SockStat, #state{socket = Sock}) when SockStat =:= recv_oct;
+ SockStat =:= recv_cnt;
+ SockStat =:= send_oct;
+ SockStat =:= send_cnt;
+ SockStat =:= send_pend ->
+ case rabbit_net:getstat(Sock, [SockStat]) of
+ {ok, [{_, N}]} when is_number(N) -> N;
+ _ -> 0
+ end;
+info_internal(state, State) -> info_internal(connection_state, State);
+info_internal(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+info_internal(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+info_internal(conn_name, #state{conn_name = Val}) ->
+ rabbit_data_coercion:to_binary(Val);
+info_internal(connection_state, #state{received_connect_frame = false}) ->
+ starting;
+info_internal(connection_state, #state{connection_state = Val}) ->
+ Val;
+info_internal(connection, #state{connection = Val}) ->
+ Val;
+info_internal(Key, #state{proc_state = ProcState}) ->
+ rabbit_mqtt_processor:info(Key, ProcState).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl
new file mode 100644
index 0000000000..4b3ee95743
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store.erl
@@ -0,0 +1,23 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_retained_msg_store).
+
+-export([behaviour_info/1, table_name_for/1]).
+
+behaviour_info(callbacks) ->
+ [{new, 2},
+ {recover, 2},
+ {insert, 3},
+ {lookup, 2},
+ {delete, 2},
+ {terminate, 1}];
+behaviour_info(_Other) ->
+ undefined.
+
+table_name_for(VHost) ->
+ rabbit_mqtt_util:vhost_name_to_table_name(VHost).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl
new file mode 100644
index 0000000000..03c5942d35
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_dets.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_retained_msg_store_dets).
+
+-behaviour(rabbit_mqtt_retained_msg_store).
+-include("rabbit_mqtt.hrl").
+
+-export([new/2, recover/2, insert/3, lookup/2, delete/2, terminate/1]).
+
+-record(store_state, {
+ %% DETS table name
+ table
+}).
+
+
+new(Dir, VHost) ->
+ Tid = open_table(Dir, VHost),
+ #store_state{table = Tid}.
+
+recover(Dir, VHost) ->
+ case open_table(Dir, VHost) of
+ {error, _} -> {error, uninitialized};
+ {ok, Tid} -> {ok, #store_state{table = Tid}}
+ end.
+
+insert(Topic, Msg, #store_state{table = T}) ->
+ ok = dets:insert(T, #retained_message{topic = Topic, mqtt_msg = Msg}).
+
+lookup(Topic, #store_state{table = T}) ->
+ case dets:lookup(T, Topic) of
+ [] -> not_found;
+ [Entry] -> Entry
+ end.
+
+delete(Topic, #store_state{table = T}) ->
+ ok = dets:delete(T, Topic).
+
+terminate(#store_state{table = T}) ->
+ ok = dets:close(T).
+
+open_table(Dir, VHost) ->
+ dets:open_file(rabbit_mqtt_retained_msg_store:table_name_for(VHost),
+ table_options(rabbit_mqtt_util:path_for(Dir, VHost, ".dets"))).
+
+table_options(Path) ->
+ [{type, set}, {keypos, #retained_message.topic},
+ {file, Path}, {ram_file, true}, {repair, true},
+ {auto_save, rabbit_misc:get_env(rabbit_mqtt,
+ retained_message_store_dets_sync_interval, 2000)}].
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl
new file mode 100644
index 0000000000..9080a6f4cf
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_ets.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_retained_msg_store_ets).
+
+-behaviour(rabbit_mqtt_retained_msg_store).
+-include("rabbit_mqtt.hrl").
+
+-export([new/2, recover/2, insert/3, lookup/2, delete/2, terminate/1]).
+
+-record(store_state, {
+ %% ETS table ID
+ table,
+ %% where the table is stored on disk
+ filename
+}).
+
+
+new(Dir, VHost) ->
+ Path = rabbit_mqtt_util:path_for(Dir, VHost),
+ TableName = rabbit_mqtt_retained_msg_store:table_name_for(VHost),
+ file:delete(Path),
+ Tid = ets:new(TableName, [set, public, {keypos, #retained_message.topic}]),
+ #store_state{table = Tid, filename = Path}.
+
+recover(Dir, VHost) ->
+ Path = rabbit_mqtt_util:path_for(Dir, VHost),
+ case ets:file2tab(Path) of
+ {ok, Tid} -> file:delete(Path),
+ {ok, #store_state{table = Tid, filename = Path}};
+ {error, _} -> {error, uninitialized}
+ end.
+
+insert(Topic, Msg, #store_state{table = T}) ->
+ true = ets:insert(T, #retained_message{topic = Topic, mqtt_msg = Msg}),
+ ok.
+
+lookup(Topic, #store_state{table = T}) ->
+ case ets:lookup(T, Topic) of
+ [] -> not_found;
+ [Entry] -> Entry
+ end.
+
+delete(Topic, #store_state{table = T}) ->
+ true = ets:delete(T, Topic),
+ ok.
+
+terminate(#store_state{table = T, filename = Path}) ->
+ ok = ets:tab2file(T, Path,
+ [{extended_info, [object_count]}]).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl
new file mode 100644
index 0000000000..382ffbc63d
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retained_msg_store_noop.erl
@@ -0,0 +1,31 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_retained_msg_store_noop).
+
+-behaviour(rabbit_mqtt_retained_msg_store).
+-include("rabbit_mqtt.hrl").
+
+-export([new/2, recover/2, insert/3, lookup/2, delete/2, terminate/1]).
+
+new(_Dir, _VHost) ->
+ ok.
+
+recover(_Dir, _VHost) ->
+ {ok, ok}.
+
+insert(_Topic, _Msg, _State) ->
+ ok.
+
+lookup(_Topic, _State) ->
+ not_found.
+
+delete(_Topic, _State) ->
+ ok.
+
+terminate(_State) ->
+ ok.
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl
new file mode 100644
index 0000000000..2aa873ecfb
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl
@@ -0,0 +1,98 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_retainer).
+
+-behaviour(gen_server2).
+-include("rabbit_mqtt.hrl").
+-include("rabbit_mqtt_frame.hrl").
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, start_link/2]).
+
+-export([retain/3, fetch/2, clear/2, store_module/0]).
+
+-define(SERVER, ?MODULE).
+-define(TIMEOUT, 30000).
+
+-record(retainer_state, {store_mod,
+ store}).
+
+-spec retain(pid(), string(), mqtt_msg()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+
+%%----------------------------------------------------------------------------
+
+start_link(RetainStoreMod, VHost) ->
+ gen_server2:start_link(?MODULE, [RetainStoreMod, VHost], []).
+
+retain(Pid, Topic, Msg = #mqtt_msg{retain = true}) ->
+ gen_server2:cast(Pid, {retain, Topic, Msg});
+
+retain(_Pid, _Topic, Msg = #mqtt_msg{retain = false}) ->
+ throw({error, {retain_is_false, Msg}}).
+
+fetch(Pid, Topic) ->
+ gen_server2:call(Pid, {fetch, Topic}, ?TIMEOUT).
+
+clear(Pid, Topic) ->
+ gen_server2:cast(Pid, {clear, Topic}).
+
+%%----------------------------------------------------------------------------
+
+init([StoreMod, VHost]) ->
+ process_flag(trap_exit, true),
+ State = case StoreMod:recover(store_dir(), VHost) of
+ {ok, Store} -> #retainer_state{store = Store,
+ store_mod = StoreMod};
+ {error, _} -> #retainer_state{store = StoreMod:new(store_dir(), VHost),
+ store_mod = StoreMod}
+ end,
+ {ok, State}.
+
+store_module() ->
+ case application:get_env(rabbitmq_mqtt, retained_message_store) of
+ {ok, Mod} -> Mod;
+ undefined -> undefined
+ end.
+
+%%----------------------------------------------------------------------------
+
+handle_cast({retain, Topic, Msg},
+ State = #retainer_state{store = Store, store_mod = Mod}) ->
+ ok = Mod:insert(Topic, Msg, Store),
+ {noreply, State};
+handle_cast({clear, Topic},
+ State = #retainer_state{store = Store, store_mod = Mod}) ->
+ ok = Mod:delete(Topic, Store),
+ {noreply, State}.
+
+handle_call({fetch, Topic}, _From,
+ State = #retainer_state{store = Store, store_mod = Mod}) ->
+ Reply = case Mod:lookup(Topic, Store) of
+ #retained_message{mqtt_msg = Msg} -> Msg;
+ not_found -> undefined
+ end,
+ {reply, Reply, State}.
+
+handle_info(stop, State) ->
+ {stop, normal, State};
+
+handle_info(Info, State) ->
+ {stop, {unknown_info, Info}, State}.
+
+store_dir() ->
+ rabbit_mnesia:dir().
+
+terminate(_Reason, #retainer_state{store = Store, store_mod = Mod}) ->
+ Mod:terminate(Store),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl
new file mode 100644
index 0000000000..86b54ce3d7
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl
@@ -0,0 +1,60 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_retainer_sup).
+-behaviour(supervisor2).
+
+-export([start_link/1, init/1, start_child/2,start_child/1, child_for_vhost/1,
+ delete_child/1]).
+
+-define(ENCODING, utf8).
+
+-spec start_child(binary()) -> supervisor2:startchild_ret().
+-spec start_child(term(), binary()) -> supervisor2:startchild_ret().
+
+start_link(SupName) ->
+ supervisor2:start_link(SupName, ?MODULE, []).
+
+child_for_vhost(VHost) when is_binary(VHost) ->
+ case rabbit_mqtt_retainer_sup:start_child(VHost) of
+ {ok, Pid} -> Pid;
+ {error, {already_started, Pid}} -> Pid
+ end.
+
+start_child(VHost) when is_binary(VHost) ->
+ start_child(rabbit_mqtt_retainer:store_module(), VHost).
+
+start_child(RetainStoreMod, VHost) ->
+ supervisor2:start_child(?MODULE,
+
+ {vhost_to_atom(VHost),
+ {rabbit_mqtt_retainer, start_link, [RetainStoreMod, VHost]},
+ permanent, 60, worker, [rabbit_mqtt_retainer]}).
+
+delete_child(VHost) ->
+ Id = vhost_to_atom(VHost),
+ ok = supervisor2:terminate_child(?MODULE, Id),
+ ok = supervisor2:delete_child(?MODULE, Id).
+
+init([]) ->
+ Mod = rabbit_mqtt_retainer:store_module(),
+ rabbit_log:info("MQTT retained message store: ~p~n",
+ [Mod]),
+ {ok, {{one_for_one, 5, 5}, child_specs(Mod, rabbit_vhost:list_names())}}.
+
+child_specs(Mod, VHosts) ->
+ %% see start_child/2
+ [{vhost_to_atom(V),
+ {rabbit_mqtt_retainer, start_link, [Mod, V]},
+ permanent, infinity, worker, [rabbit_mqtt_retainer]} || V <- VHosts].
+
+vhost_to_atom(VHost) ->
+ %% we'd like to avoid any conversion here because
+ %% this atom isn't meant to be human-readable, only
+ %% unique. This makes sure we don't get noisy process restarts
+ %% with really unusual vhost names used by various HTTP API test suites
+ rabbit_data_coercion:to_atom(VHost, latin1).
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl
new file mode 100644
index 0000000000..c00be457d3
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl
@@ -0,0 +1,73 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_sup).
+-behaviour(supervisor2).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/2, init/1, stop_listeners/0]).
+
+-define(TCP_PROTOCOL, 'mqtt').
+-define(TLS_PROTOCOL, 'mqtt/ssl').
+
+start_link(Listeners, []) ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, [Listeners]).
+
+init([{Listeners, SslListeners0}]) ->
+ NumTcpAcceptors = application:get_env(rabbitmq_mqtt, num_tcp_acceptors, 10),
+ {ok, SocketOpts} = application:get_env(rabbitmq_mqtt, tcp_listen_options),
+ {SslOpts, NumSslAcceptors, SslListeners}
+ = case SslListeners0 of
+ [] -> {none, 0, []};
+ _ -> {rabbit_networking:ensure_ssl(),
+ application:get_env(rabbitmq_mqtt, num_ssl_acceptors, 10),
+ case rabbit_networking:poodle_check('MQTT') of
+ ok -> SslListeners0;
+ danger -> []
+ end}
+ end,
+ {ok, {{one_for_all, 10, 10},
+ [{rabbit_mqtt_retainer_sup,
+ {rabbit_mqtt_retainer_sup, start_link, [{local, rabbit_mqtt_retainer_sup}]},
+ transient, ?SUPERVISOR_WAIT, supervisor, [rabbit_mqtt_retainer_sup]} |
+ listener_specs(fun tcp_listener_spec/1,
+ [SocketOpts, NumTcpAcceptors], Listeners) ++
+ listener_specs(fun ssl_listener_spec/1,
+ [SocketOpts, SslOpts, NumSslAcceptors], SslListeners)]}}.
+
+stop_listeners() ->
+ rabbit_networking:stop_ranch_listener_of_protocol(?TCP_PROTOCOL),
+ rabbit_networking:stop_ranch_listener_of_protocol(?TLS_PROTOCOL),
+ ok.
+
+%%
+%% Implementation
+%%
+
+listener_specs(Fun, Args, Listeners) ->
+ [Fun([Address | Args]) ||
+ Listener <- Listeners,
+ Address <- rabbit_networking:tcp_listener_addresses(Listener)].
+
+tcp_listener_spec([Address, SocketOpts, NumAcceptors]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_mqtt_listener_sup, Address, SocketOpts,
+ transport(?TCP_PROTOCOL), rabbit_mqtt_connection_sup, [],
+ mqtt, NumAcceptors, "MQTT TCP listener").
+
+ssl_listener_spec([Address, SocketOpts, SslOpts, NumAcceptors]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_mqtt_listener_sup, Address, SocketOpts ++ SslOpts,
+ transport(?TLS_PROTOCOL), rabbit_mqtt_connection_sup, [],
+ 'mqtt/ssl', NumAcceptors, "MQTT TLS listener").
+
+transport(Protocol) ->
+ case Protocol of
+ ?TCP_PROTOCOL -> ranch_tcp;
+ ?TLS_PROTOCOL -> ranch_ssl
+ end.
diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl
new file mode 100644
index 0000000000..0fbe7e8a85
--- /dev/null
+++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl
@@ -0,0 +1,139 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_mqtt_util).
+
+-include("rabbit_mqtt.hrl").
+
+-export([subcription_queue_name/1,
+ gen_client_id/0,
+ env/1,
+ table_lookup/2,
+ path_for/2,
+ path_for/3,
+ vhost_name_to_table_name/1,
+ get_topic_translation_funs/0
+ ]).
+
+-define(MAX_TOPIC_TRANSLATION_CACHE_SIZE, 12).
+
+subcription_queue_name(ClientId) ->
+ Base = "mqtt-subscription-" ++ ClientId ++ "qos",
+ {list_to_binary(Base ++ "0"), list_to_binary(Base ++ "1")}.
+
+cached(CacheName, Fun, Arg) ->
+ Cache =
+ case get(CacheName) of
+ undefined ->
+ [];
+ Other ->
+ Other
+ end,
+ case lists:keyfind(Arg, 1, Cache) of
+ {_, V} ->
+ V;
+ false ->
+ V = Fun(Arg),
+ CacheTail = lists:sublist(Cache, ?MAX_TOPIC_TRANSLATION_CACHE_SIZE - 1),
+ put(CacheName, [{Arg, V} | CacheTail]),
+ V
+ end.
+
+to_amqp(T0) ->
+ T1 = string:replace(T0, "/", ".", all),
+ T2 = string:replace(T1, "+", "*", all),
+ erlang:iolist_to_binary(T2).
+
+to_mqtt(T0) ->
+ T1 = string:replace(T0, "*", "+", all),
+ T2 = string:replace(T1, ".", "/", all),
+ erlang:iolist_to_binary(T2).
+
+%% amqp mqtt descr
+%% * + match one topic level
+%% # # match multiple topic levels
+%% . / topic level separator
+get_topic_translation_funs() ->
+ SparkplugB = env(sparkplug),
+ ToAmqpFun = fun(Topic) ->
+ cached(mta_cache, fun to_amqp/1, Topic)
+ end,
+ ToMqttFun = fun(Topic) ->
+ cached(atm_cache, fun to_mqtt/1, Topic)
+ end,
+ {M2AFun, A2MFun} = case SparkplugB of
+ true ->
+ {ok, M2A_SpRe} = re:compile("^sp[AB]v\\d+\\.\\d+/"),
+ {ok, A2M_SpRe} = re:compile("^sp[AB]v\\d+___\\d+\\."),
+ M2A = fun(T0) ->
+ case re:run(T0, M2A_SpRe) of
+ nomatch ->
+ ToAmqpFun(T0);
+ {match, _} ->
+ T1 = string:replace(T0, ".", "___", leading),
+ ToAmqpFun(T1)
+ end
+ end,
+ A2M = fun(T0) ->
+ case re:run(T0, A2M_SpRe) of
+ nomatch ->
+ ToMqttFun(T0);
+ {match, _} ->
+ T1 = ToMqttFun(T0),
+ T2 = string:replace(T1, "___", ".", leading),
+ erlang:iolist_to_binary(T2)
+ end
+ end,
+ {M2A, A2M};
+ _ ->
+ M2A = fun(T) ->
+ ToAmqpFun(T)
+ end,
+ A2M = fun(T) ->
+ ToMqttFun(T)
+ end,
+ {M2A, A2M}
+ end,
+ {ok, {mqtt2amqp_fun, M2AFun}, {amqp2mqtt_fun, A2MFun}}.
+
+gen_client_id() ->
+ lists:nthtail(1, rabbit_guid:string(rabbit_guid:gen_secure(), [])).
+
+env(Key) ->
+ case application:get_env(rabbitmq_mqtt, Key) of
+ {ok, Val} -> coerce_env_value(Key, Val);
+ undefined -> undefined
+ end.
+
+%% TODO: move to rabbit_common
+coerce_env_value(default_pass, Val) -> rabbit_data_coercion:to_binary(Val);
+coerce_env_value(default_user, Val) -> rabbit_data_coercion:to_binary(Val);
+coerce_env_value(exchange, Val) -> rabbit_data_coercion:to_binary(Val);
+coerce_env_value(vhost, Val) -> rabbit_data_coercion:to_binary(Val);
+coerce_env_value(_, Val) -> Val.
+
+table_lookup(undefined, _Key) ->
+ undefined;
+table_lookup(Table, Key) ->
+ rabbit_misc:table_lookup(Table, Key).
+
+vhost_name_to_dir_name(VHost) ->
+ vhost_name_to_dir_name(VHost, ".ets").
+vhost_name_to_dir_name(VHost, Suffix) ->
+ <<Num:128>> = erlang:md5(VHost),
+ "mqtt_retained_" ++ rabbit_misc:format("~36.16.0b", [Num]) ++ Suffix.
+
+path_for(Dir, VHost) ->
+ filename:join(Dir, vhost_name_to_dir_name(VHost)).
+
+path_for(Dir, VHost, Suffix) ->
+ filename:join(Dir, vhost_name_to_dir_name(VHost, Suffix)).
+
+
+vhost_name_to_table_name(VHost) ->
+ <<Num:128>> = erlang:md5(VHost),
+ list_to_atom("rabbit_mqtt_retained_" ++ rabbit_misc:format("~36.16.0b", [Num])).
diff --git a/deps/rabbitmq_mqtt/test/auth_SUITE.erl b/deps/rabbitmq_mqtt/test/auth_SUITE.erl
new file mode 100644
index 0000000000..7368139d95
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/auth_SUITE.erl
@@ -0,0 +1,493 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(auth_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-define(CONNECT_TIMEOUT, 10000).
+
+all() ->
+ [{group, anonymous_no_ssl_user},
+ {group, anonymous_ssl_user},
+ {group, no_ssl_user},
+ {group, ssl_user},
+ {group, client_id_propagation}].
+
+groups() ->
+ [{anonymous_ssl_user, [],
+ [anonymous_auth_success,
+ user_credentials_auth,
+ ssl_user_auth_success,
+ ssl_user_vhost_not_allowed,
+ ssl_user_vhost_parameter_mapping_success,
+ ssl_user_vhost_parameter_mapping_not_allowed,
+ ssl_user_vhost_parameter_mapping_vhost_does_not_exist,
+ ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping
+ ]},
+ {anonymous_no_ssl_user, [],
+ [anonymous_auth_success,
+ user_credentials_auth,
+ port_vhost_mapping_success,
+ port_vhost_mapping_success_no_mapping,
+ port_vhost_mapping_not_allowed,
+ port_vhost_mapping_vhost_does_not_exist
+ %% SSL auth will succeed, because we cannot ignore anonymous
+ ]},
+ {ssl_user, [],
+ [anonymous_auth_failure,
+ user_credentials_auth,
+ ssl_user_auth_success,
+ ssl_user_vhost_not_allowed,
+ ssl_user_vhost_parameter_mapping_success,
+ ssl_user_vhost_parameter_mapping_not_allowed,
+ ssl_user_vhost_parameter_mapping_vhost_does_not_exist,
+ ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping
+ ]},
+ {no_ssl_user, [],
+ [anonymous_auth_failure,
+ user_credentials_auth,
+ ssl_user_auth_failure,
+ port_vhost_mapping_success,
+ port_vhost_mapping_success_no_mapping,
+ port_vhost_mapping_not_allowed,
+ port_vhost_mapping_vhost_does_not_exist
+ ]},
+ {client_id_propagation, [],
+ [client_id_propagation]
+ }
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {rmq_certspwd, "bunnychow"}
+ ]),
+ MqttConfig = mqtt_config(Group),
+ AuthConfig = auth_config(Group),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun(Conf) -> merge_app_env(MqttConfig, Conf) end ] ++
+ [ fun(Conf) -> case AuthConfig of
+ undefined -> Conf;
+ _ -> merge_app_env(AuthConfig, Conf)
+ end
+ end ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+merge_app_env(MqttConfig, Config) ->
+ rabbit_ct_helpers:merge_app_env(Config, MqttConfig).
+
+mqtt_config(anonymous_ssl_user) ->
+ {rabbitmq_mqtt, [{ssl_cert_login, true},
+ {allow_anonymous, true}]};
+mqtt_config(anonymous_no_ssl_user) ->
+ {rabbitmq_mqtt, [{ssl_cert_login, false},
+ {allow_anonymous, true}]};
+mqtt_config(ssl_user) ->
+ {rabbitmq_mqtt, [{ssl_cert_login, true},
+ {allow_anonymous, false}]};
+mqtt_config(no_ssl_user) ->
+ {rabbitmq_mqtt, [{ssl_cert_login, false},
+ {allow_anonymous, false}]};
+mqtt_config(client_id_propagation) ->
+ {rabbitmq_mqtt, [{ssl_cert_login, true},
+ {allow_anonymous, true}]}.
+
+auth_config(client_id_propagation) ->
+ {rabbit, [
+ {auth_backends, [rabbit_auth_backend_mqtt_mock]}
+ ]
+ };
+auth_config(_) ->
+ undefined.
+
+init_per_testcase(Testcase, Config) when Testcase == ssl_user_auth_success;
+ Testcase == ssl_user_auth_failure ->
+ Config1 = set_cert_user_on_default_vhost(Config),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase);
+init_per_testcase(ssl_user_vhost_parameter_mapping_success, Config) ->
+ Config1 = set_cert_user_on_default_vhost(Config),
+ User = ?config(temp_ssl_user, Config1),
+ ok = rabbit_ct_broker_helpers:clear_permissions(Config1, User, <<"/">>),
+ Config2 = set_vhost_for_cert_user(Config1, User),
+ rabbit_ct_helpers:testcase_started(Config2, ssl_user_vhost_parameter_mapping_success);
+init_per_testcase(ssl_user_vhost_parameter_mapping_not_allowed, Config) ->
+ Config1 = set_cert_user_on_default_vhost(Config),
+ User = ?config(temp_ssl_user, Config1),
+ Config2 = set_vhost_for_cert_user(Config1, User),
+ VhostForCertUser = ?config(temp_vhost_for_ssl_user, Config2),
+ ok = rabbit_ct_broker_helpers:clear_permissions(Config2, User, VhostForCertUser),
+ rabbit_ct_helpers:testcase_started(Config2, ssl_user_vhost_parameter_mapping_not_allowed);
+init_per_testcase(user_credentials_auth, Config) ->
+ User = <<"new-user">>,
+ Pass = <<"new-user-pass">>,
+ ok = rabbit_ct_broker_helpers:add_user(Config, 0, User, Pass),
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, <<"/">>),
+ Config1 = rabbit_ct_helpers:set_config(Config, [{new_user, User},
+ {new_user_pass, Pass}]),
+ rabbit_ct_helpers:testcase_started(Config1, user_credentials_auth);
+init_per_testcase(ssl_user_vhost_not_allowed, Config) ->
+ Config1 = set_cert_user_on_default_vhost(Config),
+ User = ?config(temp_ssl_user, Config1),
+ ok = rabbit_ct_broker_helpers:clear_permissions(Config1, User, <<"/">>),
+ rabbit_ct_helpers:testcase_started(Config1, ssl_user_vhost_not_allowed);
+init_per_testcase(ssl_user_vhost_parameter_mapping_vhost_does_not_exist, Config) ->
+ Config1 = set_cert_user_on_default_vhost(Config),
+ User = ?config(temp_ssl_user, Config1),
+ Config2 = set_vhost_for_cert_user(Config1, User),
+ VhostForCertUser = ?config(temp_vhost_for_ssl_user, Config2),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VhostForCertUser),
+ rabbit_ct_helpers:testcase_started(Config1, ssl_user_vhost_parameter_mapping_vhost_does_not_exist);
+init_per_testcase(port_vhost_mapping_success, Config) ->
+ User = <<"guest">>,
+ Config1 = set_vhost_for_port_vhost_mapping_user(Config, User),
+ rabbit_ct_broker_helpers:clear_permissions(Config1, User, <<"/">>),
+ rabbit_ct_helpers:testcase_started(Config1, port_vhost_mapping_success);
+init_per_testcase(port_vhost_mapping_success_no_mapping, Config) ->
+ User = <<"guest">>,
+ Config1 = set_vhost_for_port_vhost_mapping_user(Config, User),
+ PortToVHostMappingParameter = [
+ {<<"1">>, <<"unlikely to exist">>},
+ {<<"2">>, <<"unlikely to exist">>}],
+ ok = rabbit_ct_broker_helpers:set_global_parameter(Config, mqtt_port_to_vhost_mapping, PortToVHostMappingParameter),
+ VHost = ?config(temp_vhost_for_port_mapping, Config1),
+ rabbit_ct_broker_helpers:clear_permissions(Config1, User, VHost),
+ rabbit_ct_helpers:testcase_started(Config1, port_vhost_mapping_success_no_mapping);
+init_per_testcase(port_vhost_mapping_not_allowed, Config) ->
+ User = <<"guest">>,
+ Config1 = set_vhost_for_port_vhost_mapping_user(Config, User),
+ rabbit_ct_broker_helpers:clear_permissions(Config1, User, <<"/">>),
+ VHost = ?config(temp_vhost_for_port_mapping, Config1),
+ rabbit_ct_broker_helpers:clear_permissions(Config1, User, VHost),
+ rabbit_ct_helpers:testcase_started(Config1, port_vhost_mapping_not_allowed);
+init_per_testcase(port_vhost_mapping_vhost_does_not_exist, Config) ->
+ User = <<"guest">>,
+ Config1 = set_vhost_for_port_vhost_mapping_user(Config, User),
+ rabbit_ct_broker_helpers:clear_permissions(Config1, User, <<"/">>),
+ VHost = ?config(temp_vhost_for_port_mapping, Config1),
+ rabbit_ct_broker_helpers:delete_vhost(Config1, VHost),
+ rabbit_ct_helpers:testcase_started(Config1, port_vhost_mapping_vhost_does_not_exist);
+init_per_testcase(ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping, Config) ->
+ Config1 = set_cert_user_on_default_vhost(Config),
+ User = ?config(temp_ssl_user, Config1),
+ Config2 = set_vhost_for_cert_user(Config1, User),
+
+ Config3 = set_vhost_for_port_vhost_mapping_user(Config2, User),
+ VhostForPortMapping = ?config(mqtt_port_to_vhost_mapping, Config2),
+ rabbit_ct_broker_helpers:clear_permissions(Config3, User, VhostForPortMapping),
+
+ rabbit_ct_broker_helpers:clear_permissions(Config3, User, <<"/">>),
+ rabbit_ct_helpers:testcase_started(Config3, ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping);
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+set_cert_user_on_default_vhost(Config) ->
+ CertsDir = ?config(rmq_certsdir, Config),
+ CertFile = filename:join([CertsDir, "client", "cert.pem"]),
+ {ok, CertBin} = file:read_file(CertFile),
+ [{'Certificate', Cert, not_encrypted}] = public_key:pem_decode(CertBin),
+ UserBin = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_ssl,
+ peer_cert_auth_name,
+ [Cert]),
+ User = binary_to_list(UserBin),
+ ok = rabbit_ct_broker_helpers:add_user(Config, 0, User, ""),
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, <<"/">>),
+ rabbit_ct_helpers:set_config(Config, [{temp_ssl_user, User}]).
+
+set_vhost_for_cert_user(Config, User) ->
+ VhostForCertUser = <<"vhost_for_cert_user">>,
+ UserToVHostMappingParameter = [
+ {rabbit_data_coercion:to_binary(User), VhostForCertUser},
+ {<<"O=client,CN=unlikelytoexistuser">>, <<"vhost2">>}
+ ],
+ ok = rabbit_ct_broker_helpers:add_vhost(Config, VhostForCertUser),
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VhostForCertUser),
+ ok = rabbit_ct_broker_helpers:set_global_parameter(Config, mqtt_default_vhosts, UserToVHostMappingParameter),
+ rabbit_ct_helpers:set_config(Config, [{temp_vhost_for_ssl_user, VhostForCertUser}]).
+
+set_vhost_for_port_vhost_mapping_user(Config, User) ->
+ VhostForPortMapping = <<"vhost_for_port_vhost_mapping">>,
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ TlsPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls),
+ PortToVHostMappingParameter = [
+ {integer_to_binary(Port), VhostForPortMapping},
+ {<<"1884">>, <<"vhost2">>},
+ {integer_to_binary(TlsPort), VhostForPortMapping},
+ {<<"8884">>, <<"vhost2">>}
+
+ ],
+ ok = rabbit_ct_broker_helpers:add_vhost(Config, VhostForPortMapping),
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VhostForPortMapping),
+ ok = rabbit_ct_broker_helpers:set_global_parameter(Config, mqtt_port_to_vhost_mapping, PortToVHostMappingParameter),
+ rabbit_ct_helpers:set_config(Config, [{temp_vhost_for_port_mapping, VhostForPortMapping}]).
+
+end_per_testcase(Testcase, Config) when Testcase == ssl_user_auth_success;
+ Testcase == ssl_user_auth_failure;
+ Testcase == ssl_user_vhost_not_allowed ->
+ delete_cert_user(Config),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(TestCase, Config) when TestCase == ssl_user_vhost_parameter_mapping_success;
+ TestCase == ssl_user_vhost_parameter_mapping_not_allowed ->
+ delete_cert_user(Config),
+ VhostForCertUser = ?config(temp_vhost_for_ssl_user, Config),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VhostForCertUser),
+ ok = rabbit_ct_broker_helpers:clear_global_parameter(Config, mqtt_default_vhosts),
+ rabbit_ct_helpers:testcase_finished(Config, TestCase);
+end_per_testcase(user_credentials_auth, Config) ->
+ User = ?config(new_user, Config),
+ {ok,_} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["delete_user", User]),
+ rabbit_ct_helpers:testcase_finished(Config, user_credentials_auth);
+end_per_testcase(ssl_user_vhost_parameter_mapping_vhost_does_not_exist, Config) ->
+ delete_cert_user(Config),
+ ok = rabbit_ct_broker_helpers:clear_global_parameter(Config, mqtt_default_vhosts),
+ rabbit_ct_helpers:testcase_finished(Config, ssl_user_vhost_parameter_mapping_vhost_does_not_exist);
+end_per_testcase(Testcase, Config) when Testcase == port_vhost_mapping_success;
+ Testcase == port_vhost_mapping_not_allowed;
+ Testcase == port_vhost_mapping_success_no_mapping ->
+ User = <<"guest">>,
+ rabbit_ct_broker_helpers:set_full_permissions(Config, User, <<"/">>),
+ VHost = ?config(temp_vhost_for_port_mapping, Config),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VHost),
+ ok = rabbit_ct_broker_helpers:clear_global_parameter(Config, mqtt_port_to_vhost_mapping),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(port_vhost_mapping_vhost_does_not_exist, Config) ->
+ User = <<"guest">>,
+ ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, <<"/">>),
+ ok = rabbit_ct_broker_helpers:clear_global_parameter(Config, mqtt_port_to_vhost_mapping),
+ rabbit_ct_helpers:testcase_finished(Config, port_vhost_mapping_vhost_does_not_exist);
+end_per_testcase(ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping, Config) ->
+ delete_cert_user(Config),
+ VhostForCertUser = ?config(temp_vhost_for_ssl_user, Config),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VhostForCertUser),
+ ok = rabbit_ct_broker_helpers:clear_global_parameter(Config, mqtt_default_vhosts),
+
+ VHostForPortVHostMapping = ?config(temp_vhost_for_port_mapping, Config),
+ ok = rabbit_ct_broker_helpers:delete_vhost(Config, VHostForPortVHostMapping),
+ ok = rabbit_ct_broker_helpers:clear_global_parameter(Config, mqtt_port_to_vhost_mapping),
+ rabbit_ct_helpers:testcase_finished(Config, ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+delete_cert_user(Config) ->
+ User = ?config(temp_ssl_user, Config),
+ {ok,_} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["delete_user", User]).
+
+anonymous_auth_success(Config) ->
+ expect_successful_connection(fun connect_anonymous/1, Config).
+
+anonymous_auth_failure(Config) ->
+ expect_authentication_failure(fun connect_anonymous/1, Config).
+
+
+ssl_user_auth_success(Config) ->
+ expect_successful_connection(fun connect_ssl/1, Config).
+
+ssl_user_auth_failure(Config) ->
+ expect_authentication_failure(fun connect_ssl/1, Config).
+
+user_credentials_auth(Config) ->
+ NewUser = ?config(new_user, Config),
+ NewUserPass = ?config(new_user_pass, Config),
+
+ expect_successful_connection(
+ fun(Conf) -> connect_user(NewUser, NewUserPass, Conf) end,
+ Config),
+
+ expect_successful_connection(
+ fun(Conf) -> connect_user(<<"guest">>, <<"guest">>, Conf) end,
+ Config),
+
+ expect_successful_connection(
+ fun(Conf) -> connect_user(<<"/:guest">>, <<"guest">>, Conf) end,
+ Config),
+
+ expect_authentication_failure(
+ fun(Conf) -> connect_user(NewUser, <<"invalid_pass">>, Conf) end,
+ Config),
+
+ expect_authentication_failure(
+ fun(Conf) -> connect_user(undefined, <<"pass">>, Conf) end,
+ Config),
+
+ expect_authentication_failure(
+ fun(Conf) -> connect_user(NewUser, undefined, Conf) end,
+ Config),
+
+ expect_authentication_failure(
+ fun(Conf) -> connect_user(<<"non-existing-vhost:guest">>, <<"guest">>, Conf) end,
+ Config).
+
+ssl_user_vhost_parameter_mapping_success(Config) ->
+ expect_successful_connection(fun connect_ssl/1, Config).
+
+ssl_user_vhost_parameter_mapping_not_allowed(Config) ->
+ expect_authentication_failure(fun connect_ssl/1, Config).
+
+ssl_user_vhost_not_allowed(Config) ->
+ expect_authentication_failure(fun connect_ssl/1, Config).
+
+ssl_user_vhost_parameter_mapping_vhost_does_not_exist(Config) ->
+ expect_authentication_failure(fun connect_ssl/1, Config).
+
+port_vhost_mapping_success(Config) ->
+ expect_successful_connection(
+ fun(Conf) -> connect_user(<<"guest">>, <<"guest">>, Conf) end,
+ Config).
+
+port_vhost_mapping_success_no_mapping(Config) ->
+ %% no vhost mapping for the port, falling back to default vhost
+ %% where the user can connect
+ expect_successful_connection(
+ fun(Conf) -> connect_user(<<"guest">>, <<"guest">>, Conf) end,
+ Config
+ ).
+
+port_vhost_mapping_not_allowed(Config) ->
+ expect_authentication_failure(
+ fun(Conf) -> connect_user(<<"guest">>, <<"guest">>, Conf) end,
+ Config
+ ).
+
+port_vhost_mapping_vhost_does_not_exist(Config) ->
+ expect_authentication_failure(
+ fun(Conf) -> connect_user(<<"guest">>, <<"guest">>, Conf) end,
+ Config
+ ).
+
+ssl_user_port_vhost_mapping_takes_precedence_over_cert_vhost_mapping(Config) ->
+ expect_successful_connection(fun connect_ssl/1, Config).
+
+connect_anonymous(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, <<"simpleClient">>},
+ {proto_ver, 3},
+ {logger, info}]).
+
+connect_ssl(Config) ->
+ CertsDir = ?config(rmq_certsdir, Config),
+ SSLConfig = [{cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])},
+ {certfile, filename:join([CertsDir, "client", "cert.pem"])},
+ {keyfile, filename:join([CertsDir, "client", "key.pem"])}],
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls),
+ emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, <<"simpleClient">>},
+ {proto_ver, 3},
+ {logger, info},
+ {ssl, SSLConfig}]).
+
+client_id_propagation(Config) ->
+ ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+ rabbit_auth_backend_mqtt_mock),
+ ClientId = <<"client-id-propagation">>,
+ {ok, C} = connect_user(<<"client-id-propagation">>, <<"client-id-propagation">>,
+ Config, ClientId),
+ receive {mqttc, C, connected} -> ok
+ after ?CONNECT_TIMEOUT -> exit(emqttc_connection_timeout)
+ end,
+ emqttc:subscribe(C, <<"TopicA">>, qos0),
+ [{authentication, AuthProps}] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_mqtt_mock,
+ get,
+ [authentication]),
+ ?assertEqual(ClientId, proplists:get_value(client_id, AuthProps)),
+
+ [{vhost_access, AuthzData}] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_mqtt_mock,
+ get,
+ [vhost_access]),
+ ?assertEqual(ClientId, maps:get(<<"client_id">>, AuthzData)),
+
+ [{resource_access, AuthzContext}] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_mqtt_mock,
+ get,
+ [resource_access]),
+ ?assertEqual(true, maps:size(AuthzContext) > 0),
+ ?assertEqual(ClientId, maps:get(<<"client_id">>, AuthzContext)),
+
+ [{topic_access, TopicContext}] = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_auth_backend_mqtt_mock,
+ get,
+ [topic_access]),
+ VariableMap = maps:get(variable_map, TopicContext),
+ ?assertEqual(ClientId, maps:get(<<"client_id">>, VariableMap)),
+
+ emqttc:disconnect(C).
+
+connect_user(User, Pass, Config) ->
+ connect_user(User, Pass, Config, User).
+connect_user(User, Pass, Config, ClientID) ->
+ Creds = case User of
+ undefined -> [];
+ _ -> [{username, User}]
+ end ++ case Pass of
+ undefined -> [];
+ _ -> [{password, Pass}]
+ end,
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, ClientID},
+ {proto_ver, 3},
+ {logger, info}] ++ Creds).
+
+expect_successful_connection(ConnectFun, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, reset_auth_attempt_metrics, []),
+ {ok, C} = ConnectFun(Config),
+ receive {mqttc, C, connected} -> emqttc:disconnect(C)
+ after ?CONNECT_TIMEOUT -> exit(emqttc_connection_timeout)
+ end,
+ [Attempt] =
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, get_auth_attempts, []),
+ ?assertEqual(false, proplists:is_defined(remote_address, Attempt)),
+ ?assertEqual(false, proplists:is_defined(username, Attempt)),
+ ?assertEqual(proplists:get_value(protocol, Attempt), <<"mqtt">>),
+ ?assertEqual(proplists:get_value(auth_attempts, Attempt), 1),
+ ?assertEqual(proplists:get_value(auth_attempts_failed, Attempt), 0),
+ ?assertEqual(proplists:get_value(auth_attempts_succeeded, Attempt), 1).
+
+expect_authentication_failure(ConnectFun, Config) ->
+ process_flag(trap_exit, true),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, reset_auth_attempt_metrics, []),
+ {ok, C} = ConnectFun(Config),
+ Result = receive
+ {mqttc, C, connected} -> {error, unexpected_anonymous_connection};
+ {'EXIT', C, {shutdown,{connack_error,'CONNACK_AUTH'}}} -> ok;
+ {'EXIT', C, {shutdown,{connack_error,'CONNACK_CREDENTIALS'}}} -> ok
+ after
+ ?CONNECT_TIMEOUT -> {error, emqttc_connection_timeout}
+ end,
+ [Attempt] =
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_core_metrics, get_auth_attempts, []),
+ ?assertEqual(false, proplists:is_defined(remote_address, Attempt), <<>>),
+ ?assertEqual(false, proplists:is_defined(username, Attempt)),
+ ?assertEqual(proplists:get_value(protocol, Attempt), <<"mqtt">>),
+ ?assertEqual(proplists:get_value(auth_attempts, Attempt), 1),
+ ?assertEqual(proplists:get_value(auth_attempts_failed, Attempt), 1),
+ ?assertEqual(proplists:get_value(auth_attempts_succeeded, Attempt), 0),
+ process_flag(trap_exit, false),
+ case Result of
+ ok -> ok;
+ {error, Err} -> exit(Err)
+ end.
diff --git a/deps/rabbitmq_mqtt/test/cluster_SUITE.erl b/deps/rabbitmq_mqtt/test/cluster_SUITE.erl
new file mode 100644
index 0000000000..941b195ced
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/cluster_SUITE.erl
@@ -0,0 +1,188 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(cluster_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ connection_id_tracking,
+ connection_id_tracking_on_nodedown,
+ connection_id_tracking_with_decommissioned_node
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {minutes, 5}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, basic},
+ {collect_statistics_interval, 100}
+ ]}).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {rmq_extra_tcp_ports, [tcp_port_mqtt_extra,
+ tcp_port_mqtt_tls_extra]},
+ {rmq_nodes_clustered, true},
+ {rmq_nodes_count, 5}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+%% Note about running this testsuite in a mixed-versions cluster:
+%% All even-numbered nodes will use the same code base when using a
+%% secondary Umbrella. Odd-numbered nodes might use an incompatible code
+%% base. When cluster-wide client ID tracking was introduced, it was not
+%% put behind a feature flag because there was no need for one. Here, we
+%% don't have a way to ensure that all nodes participate in client ID
+%% tracking. However, those using the same code should. That's why we
+%% limit our RPC calls to those nodes.
+%%
+%% That's also the reason why we use a 5-node cluster: with node 2 and
+%% 4 which might not participate, it leaves nodes 1, 3 and 5: thus 3
+%% nodes, the minimum to use Ra in proper conditions.
+
+connection_id_tracking(Config) ->
+ ID = <<"duplicate-id">>,
+ {ok, MRef1, C1} = connect_to_node(Config, 0, ID),
+ emqttc:subscribe(C1, <<"TopicA">>, qos0),
+ emqttc:publish(C1, <<"TopicA">>, <<"Payload">>),
+ expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+
+ %% there's one connection
+ assert_connection_count(Config, 10, 2, 1),
+
+ %% connect to the same node (A or 0)
+ {ok, MRef2, _C2} = connect_to_node(Config, 0, ID),
+
+ %% C1 is disconnected
+ await_disconnection(MRef1),
+
+ %% connect to a different node (C or 2)
+ {ok, _, C3} = connect_to_node(Config, 2, ID),
+ assert_connection_count(Config, 10, 2, 1),
+
+ %% C2 is disconnected
+ await_disconnection(MRef2),
+
+ emqttc:disconnect(C3).
+
+connection_id_tracking_on_nodedown(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, MRef, C} = connect_to_node(Config, 0, <<"simpleClient">>),
+ emqttc:subscribe(C, <<"TopicA">>, qos0),
+ emqttc:publish(C, <<"TopicA">>, <<"Payload">>),
+ expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+ assert_connection_count(Config, 10, 2, 1),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Server),
+ await_disconnection(MRef),
+ assert_connection_count(Config, 10, 2, 0),
+ ok.
+
+connection_id_tracking_with_decommissioned_node(Config) ->
+ Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, MRef, C} = connect_to_node(Config, 0, <<"simpleClient">>),
+ emqttc:subscribe(C, <<"TopicA">>, qos0),
+ emqttc:publish(C, <<"TopicA">>, <<"Payload">>),
+ expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+
+ assert_connection_count(Config, 10, 2, 1),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["decommission_mqtt_node", Server]),
+ await_disconnection(MRef),
+ assert_connection_count(Config, 10, 2, 0),
+ ok.
+
+%%
+%% Helpers
+%%
+
+assert_connection_count(_Config, 0, _, _) ->
+ ct:fail("failed to complete rabbit_mqtt_collector:list/0");
+assert_connection_count(Config, Retries, NodeId, NumElements) ->
+ List = rabbit_ct_broker_helpers:rpc(Config, NodeId, rabbit_mqtt_collector, list, []),
+ case length(List) == NumElements of
+ true ->
+ ok;
+ false ->
+ timer:sleep(200),
+ assert_connection_count(Config, Retries-1, NodeId, NumElements)
+ end.
+
+
+
+connect_to_node(Config, Node, ClientID) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_mqtt),
+ {ok, C} = connect(Port, ClientID),
+ MRef = erlang:monitor(process, C),
+ {ok, MRef, C}.
+
+connect(Port, ClientID) ->
+ {ok, C} = emqttc:start_link([{host, "localhost"},
+ {port, Port},
+ {client_id, ClientID},
+ {proto_ver, 3},
+ {logger, info},
+ {puback_timeout, 1}]),
+ unlink(C),
+ {ok, C}.
+
+await_disconnection(Ref) ->
+ receive
+ {'DOWN', Ref, _, _, _} -> ok
+ after 30000 -> exit(missing_down_message)
+ end.
+
+expect_publishes(_Topic, []) -> ok;
+expect_publishes(Topic, [Payload|Rest]) ->
+ receive
+ {publish, Topic, Payload} -> expect_publishes(Topic, Rest)
+ after 5000 ->
+ throw({publish_not_delivered, Payload})
+ end.
diff --git a/deps/rabbitmq_mqtt/test/command_SUITE.erl b/deps/rabbitmq_mqtt/test/command_SUITE.erl
new file mode 100644
index 0000000000..a15c3789f7
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/command_SUITE.erl
@@ -0,0 +1,158 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+-module(command_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_mqtt.hrl").
+
+
+-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand').
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ merge_defaults,
+ run
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_extra_tcp_ports, [tcp_port_mqtt_extra,
+ tcp_port_mqtt_tls_extra]},
+ {rmq_nodes_clustered, true},
+ {rmq_nodes_count, 3}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+merge_defaults(_Config) ->
+ {[<<"client_id">>, <<"conn_name">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([], #{}),
+
+ {[<<"other_key">>], #{verbose := true}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => true}),
+
+ {[<<"other_key">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => false}).
+
+
+run(Config) ->
+
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Opts = #{node => Node, timeout => 10000, verbose => false},
+
+ %% No connections
+ [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)),
+
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, _} = emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, <<"simpleClient">>},
+ {proto_ver, 3},
+ {logger, info},
+ {puback_timeout, 1}]),
+ ct:sleep(100),
+
+ [[{client_id, <<"simpleClient">>}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)),
+
+
+ {ok, _} = emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, <<"simpleClient1">>},
+ {proto_ver, 3},
+ {logger, info},
+ {username, <<"guest">>},
+ {password, <<"guest">>},
+ {puback_timeout, 1}]),
+ ct:sleep(200),
+
+ [[{client_id, <<"simpleClient">>}, {user, <<"guest">>}],
+ [{client_id, <<"simpleClient1">>}, {user, <<"guest">>}]] =
+ lists:sort(
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>, <<"user">>],
+ Opts))),
+
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ start_amqp_connection(network, Node, Port),
+
+ %% There are still just two connections
+ [[{client_id, <<"simpleClient">>}],
+ [{client_id, <<"simpleClient1">>}]] =
+ lists:sort('Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts))),
+
+ start_amqp_connection(direct, Node, Port),
+ ct:sleep(200),
+
+ %% Still two MQTT connections, one direct AMQP 0-9-1 connection
+ [[{client_id, <<"simpleClient">>}],
+ [{client_id, <<"simpleClient1">>}]] =
+ lists:sort('Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts))),
+
+ %% Verbose returns all keys
+ Infos = lists:map(fun(El) -> atom_to_binary(El, utf8) end, ?INFO_ITEMS),
+ AllKeys1 = 'Elixir.Enum':to_list(?COMMAND:run(Infos, Opts)),
+ AllKeys2 = 'Elixir.Enum':to_list(?COMMAND:run([], Opts#{verbose => true})),
+
+ %% There are two connections
+ [FirstPL, _] = AllKeys1,
+ [SecondPL, _] = AllKeys2,
+
+ First = maps:from_list(lists:usort(FirstPL)),
+ Second = maps:from_list(lists:usort(SecondPL)),
+
+ %% Keys are INFO_ITEMS
+ KeysCount = length(?INFO_ITEMS),
+ ?assert(KeysCount =:= maps:size(First)),
+ ?assert(KeysCount =:= maps:size(Second)),
+
+ Keys = maps:keys(First),
+
+ [] = Keys -- ?INFO_ITEMS,
+ [] = ?INFO_ITEMS -- Keys.
+
+
+start_amqp_connection(Type, Node, Port) ->
+ amqp_connection:start(amqp_params(Type, Node, Port)).
+
+amqp_params(network, _, Port) ->
+ #amqp_params_network{port = Port};
+amqp_params(direct, Node, _) ->
+ #amqp_params_direct{node = Node}.
+
+
+
diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl b/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..c760148cad
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_mqtt, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets
new file mode 100644
index 0000000000..032cce01f9
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets
@@ -0,0 +1,144 @@
+[{defaults,
+ "listeners.tcp.default = 5672
+ mqtt.default_user = guest
+ mqtt.default_pass = guest
+ mqtt.allow_anonymous = true
+ mqtt.vhost = /
+ mqtt.exchange = amq.topic
+ mqtt.subscription_ttl = 1800000
+ mqtt.prefetch = 10
+ mqtt.sparkplug = true
+ mqtt.listeners.ssl = none
+## Default MQTT with TLS port is 8883
+# mqtt.listeners.ssl.default = 8883
+ mqtt.listeners.tcp.default = 1883
+ mqtt.tcp_listen_options.backlog = 128
+ mqtt.tcp_listen_options.nodelay = true
+ mqtt.proxy_protocol = false",
+ [{rabbit,[{tcp_listeners,[5672]}]},
+ {rabbitmq_mqtt,
+ [{default_user,<<"guest">>},
+ {default_pass,<<"guest">>},
+ {allow_anonymous,true},
+ {vhost,<<"/">>},
+ {exchange,<<"amq.topic">>},
+ {subscription_ttl,1800000},
+ {prefetch,10},
+ {sparkplug,true},
+ {ssl_listeners,[]},
+ {tcp_listeners,[1883]},
+ {tcp_listen_options,[{backlog,128},{nodelay,true}]},
+ {proxy_protocol,false}]}],
+ [rabbitmq_mqtt]},
+
+ {listener_tcp_options,
+ "mqtt.listeners.tcp.1 = 127.0.0.1:61613
+ mqtt.listeners.tcp.2 = ::1:61613
+
+ mqtt.tcp_listen_options.backlog = 2048
+ mqtt.tcp_listen_options.recbuf = 8192
+ mqtt.tcp_listen_options.sndbuf = 8192
+
+ mqtt.tcp_listen_options.keepalive = true
+ mqtt.tcp_listen_options.nodelay = true
+
+ mqtt.tcp_listen_options.exit_on_close = true
+
+ mqtt.tcp_listen_options.send_timeout = 120
+",
+ [{rabbitmq_mqtt,[
+ {tcp_listeners,[
+ {"127.0.0.1",61613},
+ {"::1",61613}
+ ]}
+ , {tcp_listen_options, [
+ {backlog, 2048},
+ {exit_on_close, true},
+
+ {recbuf, 8192},
+ {sndbuf, 8192},
+
+ {send_timeout, 120},
+
+ {keepalive, true},
+ {nodelay, true}
+ ]}
+ ]}],
+ [rabbitmq_mqtt]},
+
+
+ {ssl,
+ "ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = true
+
+ mqtt.listeners.ssl.default = 8883
+ mqtt.listeners.tcp.default = 1883",
+ [{rabbit,
+ [{ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,true}]}]},
+ {rabbitmq_mqtt,[{ssl_listeners,[8883]},{tcp_listeners,[1883]}]}],
+ [rabbitmq_mqtt]},
+ {ssl_cert_login,
+ "mqtt.ssl_cert_login = true",
+ [{rabbitmq_mqtt,[{ssl_cert_login,true}]}],
+ [rabbitmq_mqtt]},
+ {ssl_cert_login_from,
+ "ssl_cert_login_from = common_name",
+ [{rabbit,[{ssl_cert_login_from,common_name}]}],
+ [rabbitmq_mqtt]},
+ {proxy_protocol,
+ "listeners.tcp.default = 5672
+ mqtt.default_user = guest
+ mqtt.default_pass = guest
+ mqtt.allow_anonymous = true
+ mqtt.vhost = /
+ mqtt.exchange = amq.topic
+ mqtt.subscription_ttl = undefined
+ mqtt.prefetch = 10
+ mqtt.proxy_protocol = true",
+ [{rabbit,[{tcp_listeners,[5672]}]},
+ {rabbitmq_mqtt,
+ [{default_user,<<"guest">>},
+ {default_pass,<<"guest">>},
+ {allow_anonymous,true},
+ {vhost,<<"/">>},
+ {exchange,<<"amq.topic">>},
+ {subscription_ttl,undefined},
+ {prefetch,10},
+ {proxy_protocol,true}]}],
+ [rabbitmq_mqtt]},
+ {prefetch_retained_msg_store,
+ "mqtt.default_user = guest
+ mqtt.default_pass = guest
+ mqtt.allow_anonymous = true
+ mqtt.vhost = /
+ mqtt.exchange = amq.topic
+ mqtt.subscription_ttl = 1800000
+ mqtt.prefetch = 10
+## use DETS (disk-based) store for retained messages
+ mqtt.retained_message_store = rabbit_mqtt_retained_msg_store_dets
+## only used by DETS store
+ mqtt.retained_message_store_dets_sync_interval = 2000
+
+ mqtt.listeners.ssl = none
+ mqtt.listeners.tcp.default = 1883",
+ [{rabbitmq_mqtt,
+ [{default_user,<<"guest">>},
+ {default_pass,<<"guest">>},
+ {allow_anonymous,true},
+ {vhost,<<"/">>},
+ {exchange,<<"amq.topic">>},
+ {subscription_ttl,1800000},
+ {prefetch,10},
+ {retained_message_store,rabbit_mqtt_retained_msg_store_dets},
+ {retained_message_store_dets_sync_interval,2000},
+ {ssl_listeners,[]},
+ {tcp_listeners,[1883]}]}],
+ [rabbitmq_mqtt]}].
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE.erl b/deps/rabbitmq_mqtt/test/java_SUITE.erl
new file mode 100644
index 0000000000..34ec8dac19
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE.erl
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(java_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(BASE_CONF_RABBIT, {rabbit, [{ssl_options, [{fail_if_no_peer_cert, false}]}]}).
+-define(BASE_CONF_MQTT,
+ {rabbitmq_mqtt, [
+ {ssl_cert_login, true},
+ {allow_anonymous, false},
+ {sparkplug, true},
+ {tcp_listeners, []},
+ {ssl_listeners, []}
+ ]}).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ java
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 600}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ {ok, Ssl} = q(Config, [erlang_node_config, rabbit, ssl_options]),
+ Ssl1 = lists:keyreplace(fail_if_no_peer_cert, 1, Ssl, {fail_if_no_peer_cert, false}),
+ Config1 = rabbit_ct_helpers:merge_app_env(Config, {rabbit, [{ssl_options, Ssl1}]}),
+ rabbit_ct_helpers:merge_app_env(Config1, ?BASE_CONF_MQTT).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_certspwd, "bunnychow"},
+ {rmq_nodes_clustered, true},
+ {rmq_nodes_count, 3}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ CertsDir = ?config(rmq_certsdir, Config),
+ CertFile = filename:join([CertsDir, "client", "cert.pem"]),
+ {ok, CertBin} = file:read_file(CertFile),
+ [{'Certificate', Cert, not_encrypted}] = public_key:pem_decode(CertBin),
+ UserBin = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_ssl,
+ peer_cert_auth_name,
+ [Cert]),
+ User = binary_to_list(UserBin),
+ {ok,_} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["add_user", User, ""]),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["set_permissions", "-p", "/", User, ".*", ".*", ".*"]),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0,
+ ["set_topic_permissions", "-p", "/", "guest", "amq.topic",
+ % Write permission
+ "test-topic|test-retained-topic|{username}.{client_id}.a|^sp[AB]v\\d+___\\d+",
+ % Read permission
+ "test-topic|test-retained-topic|last-will|{username}.{client_id}.a|^sp[AB]v\\d+___\\d+"]),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+java(Config) ->
+ CertsDir = rabbit_ct_helpers:get_config(Config, rmq_certsdir),
+ MqttPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ MqttPort2 = rabbit_ct_broker_helpers:get_node_config(Config, 1, tcp_port_mqtt),
+ MqttPort3 = rabbit_ct_broker_helpers:get_node_config(Config, 2, tcp_port_mqtt),
+ MqttSslPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls),
+ AmqpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ os:putenv("SSL_CERTS_DIR", CertsDir),
+ os:putenv("MQTT_SSL_PORT", erlang:integer_to_list(MqttSslPort)),
+ os:putenv("MQTT_PORT", erlang:integer_to_list(MqttPort)),
+ os:putenv("MQTT_PORT_2", erlang:integer_to_list(MqttPort2)),
+ os:putenv("MQTT_PORT_3", erlang:integer_to_list(MqttPort3)),
+ os:putenv("AMQP_PORT", erlang:integer_to_list(AmqpPort)),
+ DataDir = rabbit_ct_helpers:get_config(Config, data_dir),
+ MakeResult = rabbit_ct_helpers:make(Config, DataDir, ["tests"]),
+ {ok, _} = MakeResult.
+
+rpc(Config, M, F, A) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
+
+q(P, [K | Rem]) ->
+ case proplists:get_value(K, P) of
+ undefined -> undefined;
+ V -> q(V, Rem)
+ end;
+q(P, []) -> {ok, P}.
+
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.gitignore b/deps/rabbitmq_mqtt/test/java_SUITE_data/.gitignore
new file mode 100644
index 0000000000..4c70cdb707
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/.gitignore
@@ -0,0 +1,3 @@
+/build/
+/lib/
+/target/
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100755
index 0000000000..2e394d5b34
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,110 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+*/
+
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL =
+ "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: : " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar
new file mode 100755
index 0000000000..01e6799737
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.jar
Binary files differ
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties
new file mode 100755
index 0000000000..00d32aab1d
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip \ No newline at end of file
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/Makefile b/deps/rabbitmq_mqtt/test/java_SUITE_data/Makefile
new file mode 100644
index 0000000000..e2f9748eb2
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/Makefile
@@ -0,0 +1,27 @@
+export PATH :=$(CURDIR):$(PATH)
+HOSTNAME := $(shell hostname)
+MVN_FLAGS += -Ddeps.dir="$(abspath $(DEPS_DIR))" \
+ -Dhostname=$(HOSTNAME) \
+ -Dcerts.dir=$(SSL_CERTS_DIR) \
+ -Dmqtt.ssl.port=$(MQTT_SSL_PORT) \
+ -Dmqtt.port=$(MQTT_PORT) \
+ -Dmqtt.port.2=$(MQTT_PORT_2) \
+ -Dmqtt.port.3=$(MQTT_PORT_3) \
+ -Damqp.port=$(AMQP_PORT)
+
+.PHONY: deps tests clean distclean
+
+deps:
+ mkdir -p lib
+ @mvnw dependency:copy-dependencies -DoutputDirectory=lib
+
+tests:
+ # Note: to run a single test
+ # @mvnw -q $(MVN_FLAGS) -Dtest=MqttTest#subscribeMultiple test
+ @mvnw -q $(MVN_FLAGS) test
+
+clean:
+ @mvnw clean
+
+distclean: clean
+ rm -f lib/*.jar
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw
new file mode 100755
index 0000000000..8b9da3b8b6
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw
@@ -0,0 +1,286 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven2 Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+ # TODO classpath?
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ wget "$jarUrl" -O "$wrapperJarPath"
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ curl -o "$wrapperJarPath" "$jarUrl"
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd
new file mode 100755
index 0000000000..a5284c7939
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/mvnw.cmd
@@ -0,0 +1,161 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM https://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven2 Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
+FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ echo Found %WRAPPER_JAR%
+) else (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
+ echo Finished downloading %WRAPPER_JAR%
+)
+@REM End of extension
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml
new file mode 100644
index 0000000000..b27b58c172
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"
+ xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>com.rabbitmq</groupId>
+ <artifactId>amqp-client-mqtt</artifactId>
+ <version>3.8.0-SNAPSHOT</version>
+ <packaging>jar</packaging>
+
+ <name>RabbitMQ MQTT plugin dependencies list</name>
+ <description>Fetches test dependencies only.</description>
+ <url>https://www.rabbitmq.com</url>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.eclipse.paho</groupId>
+ <artifactId>org.eclipse.paho.client.mqttv3</artifactId>
+ <version>[1.2.1,)</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.rabbitmq</groupId>
+ <artifactId>amqp-client</artifactId>
+ <version>5.7.3</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter</artifactId>
+ <version>5.5.2</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <properties>
+ <test-keystore.ca>${project.build.directory}/ca.keystore</test-keystore.ca>
+ <test-keystore.password>bunnychow</test-keystore.password>
+ <groovy-scripts.dir>${basedir}/src/test/scripts</groovy-scripts.dir>
+ </properties>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.22.2</version>
+ <configuration>
+ <environmentVariables>
+ <DEPS_DIR>${deps.dir}</DEPS_DIR>
+ </environmentVariables>
+ <systemPropertyVariables>
+ <hostname>${hostname}</hostname>
+ <certs.dir>${certs.dir}</certs.dir>
+ <mqtt.ssl.port>${mqtt.ssl.port}</mqtt.ssl.port>
+ <mqtt.port>${mqtt.port}</mqtt.port>
+ <mqtt.port.2>${mqtt.port.2}</mqtt.port.2>
+ <amqp.port>${amqp.port}</amqp.port>
+
+ <test-keystore.ca>${test-keystore.ca}</test-keystore.ca>
+ <test-keystore.password>${test-keystore.password}</test-keystore.password>
+ <test-client-cert.path>${certs.dir}/client/keycert.p12</test-client-cert.path>
+ <test-client-cert.password>bunnychow</test-client-cert.password>
+
+ </systemPropertyVariables>
+ <!--
+ needed because of bug in OpenJDK 8 u181 on Debian distros
+ see https://stackoverflow.com/questions/53010200/maven-surefire-could-not-find-forkedbooter-class
+ -->
+ <argLine>-Djdk.net.URLClassPath.disableClassPathURLCheck=true</argLine>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.gmaven</groupId>
+ <artifactId>groovy-maven-plugin</artifactId>
+ <version>2.1.1</version>
+ <dependencies>
+ <dependency>
+ <groupId>org.codehaus.groovy</groupId>
+ <artifactId>groovy-all</artifactId>
+ <version>2.4.17</version>
+ </dependency>
+ </dependencies>
+ <executions>
+ <execution>
+ <phase>generate-test-resources</phase>
+ <id>remove-old-test-keystores</id>
+ <goals>
+ <goal>execute</goal>
+ </goals>
+ <configuration>
+ <source>
+ ${groovy-scripts.dir}/remove_old_test_keystores.groovy
+ </source>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>keytool-maven-plugin</artifactId>
+ <version>1.5</version>
+ <executions>
+ <execution>
+ <id>generate-test-ca-keystore</id>
+ <phase>generate-test-resources</phase>
+ <goals>
+ <goal>importCertificate</goal>
+ </goals>
+ <configuration>
+ <file>${certs.dir}/testca/cacert.pem</file>
+ <keystore>${test-keystore.ca}</keystore>
+ <storepass>${test-keystore.password}</storepass>
+ <noprompt>true</noprompt>
+ <alias>server1</alias>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.8.1</version>
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ <compilerArgs>
+ <arg>-Xlint:deprecation</arg>
+ <arg>-Xlint:unchecked</arg>
+ </compilerArgs>
+ </configuration>
+ </plugin>
+
+ </plugins>
+ </build>
+</project>
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test.config b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test.config
new file mode 100644
index 0000000000..3d6bafff86
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test.config
@@ -0,0 +1,14 @@
+[{rabbitmq_mqtt, [
+ {ssl_cert_login, true},
+ {allow_anonymous, true},
+ {tcp_listeners, [1883]},
+ {ssl_listeners, [8883]}
+ ]},
+ {rabbit, [{ssl_options, [{cacertfile,"%%CERTS_DIR%%/testca/cacert.pem"},
+ {certfile,"%%CERTS_DIR%%/server/cert.pem"},
+ {keyfile,"%%CERTS_DIR%%/server/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}
+ ]}
+ ]}
+].
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java
new file mode 100644
index 0000000000..24c4a0be14
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/MqttTest.java
@@ -0,0 +1,1030 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+//
+// Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.mqtt.test;
+
+import com.rabbitmq.client.*;
+import org.eclipse.paho.client.mqttv3.*;
+import org.eclipse.paho.client.mqttv3.internal.NetworkModule;
+import org.eclipse.paho.client.mqttv3.internal.TCPNetworkModule;
+import org.eclipse.paho.client.mqttv3.internal.wire.MqttPingReq;
+import org.eclipse.paho.client.mqttv3.internal.wire.MqttWireMessage;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInfo;
+
+import javax.net.SocketFactory;
+import java.io.*;
+import java.net.InetAddress;
+import java.net.Socket;
+import java.nio.charset.StandardCharsets;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BooleanSupplier;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+/***
+ * MQTT v3.1 tests
+ *
+ */
+
+public class MqttTest implements MqttCallback {
+
+ private static final Duration EXPECT_TIMEOUT = Duration.ofSeconds(10);
+
+ private final String host = "localhost";
+ private final String brokerUrl = "tcp://" + host + ":" + getPort();
+ private final String brokerThreeUrl = "tcp://" + host + ":" + getThirdPort();
+ private volatile List<MqttMessage> receivedMessages;
+
+ private final byte[] payload = "payload".getBytes();
+ private final String topic = "test-topic";
+ private final String retainedTopic = "test-retained-topic";
+ private int testDelay = 2000;
+
+ private volatile long lastReceipt;
+ private volatile boolean expectConnectionFailure;
+ private volatile boolean failOnDelivery = false;
+
+ private Connection conn;
+ private Channel ch;
+
+ private static int getPort() {
+ Object port = System.getProperty("mqtt.port", "1883");
+ assertNotNull(port);
+ return Integer.parseInt(port.toString());
+ }
+
+ private static int getThirdPort() {
+ Object port = System.getProperty("mqtt.port.3", "1883");
+ assertNotNull(port);
+ return Integer.parseInt(port.toString());
+ }
+
+ private static int getAmqpPort() {
+ Object port = System.getProperty("amqp.port", "5672");
+ assertNotNull(port);
+ return Integer.parseInt(port.toString());
+ }
+
+ // override the 10s limit
+ private class TestMqttConnectOptions extends MqttConnectOptions {
+ private int keepAliveInterval = 60;
+ private final String user_name = "guest";
+ private final String password = "guest";
+
+ public TestMqttConnectOptions() {
+ super.setUserName(user_name);
+ super.setPassword(password.toCharArray());
+ super.setCleanSession(true);
+ super.setKeepAliveInterval(60);
+ // PublishMultiple overwhelms Paho defaults
+ super.setMaxInflight(15000);
+ }
+
+ @Override
+ public void setKeepAliveInterval(int keepAliveInterval) {
+ this.keepAliveInterval = keepAliveInterval;
+ }
+
+ @Override
+ public int getKeepAliveInterval() {
+ return this.keepAliveInterval;
+ }
+ }
+
+ private MqttClient newClient(TestInfo testInfo) throws MqttException {
+ return newClient(clientId(testInfo));
+ }
+
+ private MqttClient newClient(String client_id) throws MqttException {
+ return newClient(brokerUrl, client_id);
+ }
+
+ private MqttClient newClient(String uri, TestInfo testInfo) throws MqttException {
+ return newClient(uri, clientId(testInfo));
+ }
+
+ private MqttClient newClient(String uri, String client_id) throws MqttException {
+ return new MqttClient(uri, client_id, null);
+ }
+
+ private MqttClient newConnectedClient(TestInfo testInfo, MqttConnectOptions conOpt) throws MqttException {
+ return newConnectedClient(clientId(testInfo), conOpt);
+ }
+
+ private MqttClient newConnectedClient(String client_id, MqttConnectOptions conOpt) throws MqttException {
+ MqttClient client = newClient(brokerUrl, client_id);
+ client.connect(conOpt);
+ return client;
+ }
+
+ private static String clientId(TestInfo info) {
+ return "test-" + info.getTestMethod().get().getName();
+ }
+
+ private void disconnect(MqttClient client) {
+ try {
+ if (client.isConnected()) {
+ client.disconnect(5000);
+ }
+ } catch (Exception ignored) {}
+ }
+
+ @BeforeEach
+ public void setUp() {
+ receivedMessages = Collections.synchronizedList(new ArrayList<>());
+ expectConnectionFailure = false;
+ }
+
+ @AfterEach
+ public void tearDown() {
+ // clean any sticky sessions
+ receivedMessages.clear();
+ }
+
+ private void setUpAmqp() throws IOException, TimeoutException {
+ int port = getAmqpPort();
+ ConnectionFactory cf = new ConnectionFactory();
+ cf.setHost(host);
+ cf.setPort(port);
+ conn = cf.newConnection();
+ ch = conn.createChannel();
+ }
+
+ private void tearDownAmqp() throws IOException {
+ if (conn.isOpen()) {
+ conn.close();
+ }
+ }
+
+ @Test
+ public void connectFirst() throws MqttException, IOException {
+ NetworkModule networkModule = new TCPNetworkModule(SocketFactory.getDefault(), host, getPort(), "");
+ networkModule.start();
+ DataInputStream in = new DataInputStream(networkModule.getInputStream());
+ OutputStream out = networkModule.getOutputStream();
+
+ MqttWireMessage message = new MqttPingReq();
+
+ try {
+ // ---8<---
+ // Copy/pasted from write() in MqttOutputStream.java.
+ byte[] bytes = message.getHeader();
+ byte[] pl = message.getPayload();
+ out.write(bytes,0,bytes.length);
+
+ int offset = 0;
+ int chunckSize = 1024;
+ while (offset < pl.length) {
+ int length = Math.min(chunckSize, pl.length - offset);
+ out.write(pl, offset, length);
+ offset += chunckSize;
+ }
+ // ---8<---
+
+ // ---8<---
+ // Copy/pasted from flush() in MqttOutputStream.java.
+ out.flush();
+ // ---8<---
+
+ // ---8<---
+ // Copy/pasted from readMqttWireMessage() in MqttInputStream.java.
+ ByteArrayOutputStream bais = new ByteArrayOutputStream();
+ byte first = in.readByte();
+ // ---8<---
+
+ fail("Error expected if CONNECT is not first packet");
+ } catch (IOException ignored) {}
+ }
+
+ @Test public void invalidUser(TestInfo info) throws MqttException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setUserName("invalid-user");
+ MqttClient client = newClient(info);
+ try {
+ client.connect(client_opts);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ } finally {
+ if (client.isConnected()) {
+ disconnect(client);
+ }
+ }
+ }
+
+ // rabbitmq/rabbitmq-mqtt#37: QoS 1, clean session = false
+ @Test public void qos1AndCleanSessionUnset()
+ throws MqttException, IOException, TimeoutException, InterruptedException {
+ testQueuePropertiesWithCleanSessionUnset("qos1-no-clean-session", 1, true, false);
+ }
+
+ protected void testQueuePropertiesWithCleanSessionSet(String cid, int qos, boolean durable, boolean autoDelete)
+ throws IOException, MqttException, TimeoutException, InterruptedException {
+ testQueuePropertiesWithCleanSession(true, cid, qos, durable, autoDelete);
+ }
+
+ protected void testQueuePropertiesWithCleanSessionUnset(String cid, int qos, boolean durable, boolean autoDelete)
+ throws IOException, MqttException, TimeoutException, InterruptedException {
+ testQueuePropertiesWithCleanSession(false, cid, qos, durable, autoDelete);
+ }
+
+ protected void testQueuePropertiesWithCleanSession(boolean cleanSession, String cid, int qos,
+ boolean durable, boolean autoDelete)
+ throws MqttException, IOException, TimeoutException {
+ MqttClient c = newClient(brokerUrl, cid);
+ MqttConnectOptions opts = new TestMqttConnectOptions();
+ opts.setUserName("guest");
+ opts.setPassword("guest".toCharArray());
+ opts.setCleanSession(cleanSession);
+ c.connect(opts);
+
+ setUpAmqp();
+ Channel tmpCh = conn.createChannel();
+
+ String q = "mqtt-subscription-" + cid + "qos" + qos;
+
+ c.subscribe(topic, qos);
+ // there is no server-sent notification about subscription
+ // success so we inject a delay
+ waitForTestDelay();
+
+ // ensure the queue is declared with the arguments we expect
+ // e.g. mqtt-subscription-client-3aqos0
+ try {
+ // first ensure the queue exists
+ tmpCh.queueDeclarePassive(q);
+ // then assert on properties
+ Map<String, Object> args = new HashMap<>();
+ args.put("x-expires", 86400000);
+ tmpCh.queueDeclare(q, durable, autoDelete, false, args);
+ } finally {
+ if (c.isConnected()) {
+ c.disconnect(3000);
+ }
+
+ Channel tmpCh2 = conn.createChannel();
+ tmpCh2.queueDelete(q);
+ tmpCh2.close();
+ tearDownAmqp();
+ }
+ }
+
+ @Test public void invalidPassword(TestInfo info) throws MqttException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setUserName("invalid-user");
+ client_opts.setPassword("invalid-password".toCharArray());
+ MqttClient client = newClient(info);
+ try {
+ client.connect(client_opts);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ } finally {
+ if (client.isConnected()) {
+ disconnect(client);
+ }
+ }
+ }
+
+ @Test public void emptyPassword(TestInfo info) throws MqttException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setPassword("".toCharArray());
+
+ MqttClient client = newClient(info);
+ try {
+ client.connect(client_opts);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ }
+ }
+
+
+ @Test public void subscribeQos0(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newClient(info);
+ client.connect(client_opts);
+ client.setCallback(this);
+ client.subscribe(topic, 0);
+
+ publish(client, topic, 0, payload);
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), payload);
+ assertEquals(0, receivedMessages.get(0).getQos());
+ disconnect(client);
+ }
+
+ @Test public void subscribeUnsubscribe(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newClient(info);
+ client.connect(client_opts);
+ client.setCallback(this);
+ client.subscribe(topic, 0);
+
+ publish(client, topic, 1, payload);
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), payload);
+ assertEquals(0, receivedMessages.get(0).getQos());
+
+ client.unsubscribe(topic);
+ publish(client, topic, 0, payload);
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ disconnect(client);
+ }
+
+ @Test public void subscribeQos1(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newClient(info);
+ client.connect(client_opts);
+ client.setCallback(this);
+ client.subscribe(topic, 1);
+
+ publish(client, topic, 0, payload);
+ publish(client, topic, 1, payload);
+ publish(client, topic, 2, payload);
+
+ waitAtMost(() -> receivedMessagesSize() == 3);
+
+ MqttMessage msg1 = receivedMessages.get(0);
+ MqttMessage msg2 = receivedMessages.get(1);
+ MqttMessage msg3 = receivedMessages.get(1);
+
+ assertArrayEquals(msg1.getPayload(), payload);
+ assertEquals(0, msg1.getQos());
+
+ assertArrayEquals(msg2.getPayload(), payload);
+ assertEquals(1, msg2.getQos());
+
+ // Downgraded QoS 2 to QoS 1
+ assertArrayEquals(msg3.getPayload(), payload);
+ assertEquals(1, msg3.getQos());
+
+ disconnect(client);
+ }
+
+ @Test public void subscribeReceivesRetainedMessagesWithMatchingQoS(TestInfo info)
+ throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newClient(info);
+ client.connect(client_opts);
+ client.setCallback(this);
+ clearRetained(client, retainedTopic);
+ client.subscribe(retainedTopic, 1);
+
+ publishRetained(client, retainedTopic, 1, "retain 1".getBytes(StandardCharsets.UTF_8));
+ publishRetained(client, retainedTopic, 1, "retain 2".getBytes(StandardCharsets.UTF_8));
+
+ waitAtMost(() -> receivedMessagesSize() == 2);
+ MqttMessage lastMsg = receivedMessages.get(1);
+
+ client.unsubscribe(retainedTopic);
+ receivedMessages.clear();
+ client.subscribe(retainedTopic, 1);
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ final MqttMessage retainedMsg = receivedMessages.get(0);
+ assertEquals(new String(lastMsg.getPayload()),
+ new String(retainedMsg.getPayload()));
+
+ disconnect(client);
+ }
+
+ @Test public void subscribeReceivesRetainedMessagesWithDowngradedQoS(TestInfo info)
+ throws MqttException, InterruptedException {
+ MqttConnectOptions clientOpts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, clientOpts);
+ client.setCallback(this);
+ clearRetained(client, retainedTopic);
+ client.subscribe(retainedTopic, 1);
+
+ publishRetained(client, retainedTopic, 1, "retain 1".getBytes(StandardCharsets.UTF_8));
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ MqttMessage lastMsg = receivedMessages.get(0);
+
+ client.unsubscribe(retainedTopic);
+ receivedMessages.clear();
+ final int subscribeQoS = 0;
+ client.subscribe(retainedTopic, subscribeQoS);
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ final MqttMessage retainedMsg = receivedMessages.get(0);
+ assertEquals(new String(lastMsg.getPayload()),
+ new String(retainedMsg.getPayload()));
+ assertEquals(subscribeQoS, retainedMsg.getQos());
+
+ disconnect(client);
+ }
+
+ @Test public void publishWithEmptyMessageClearsRetained(TestInfo info)
+ throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.setCallback(this);
+ clearRetained(client, retainedTopic);
+ client.subscribe(retainedTopic, 1);
+
+ publishRetained(client, retainedTopic, 1, "retain 1".getBytes(StandardCharsets.UTF_8));
+ publishRetained(client, retainedTopic, 1, "retain 2".getBytes(StandardCharsets.UTF_8));
+
+ waitAtMost(() -> receivedMessagesSize() == 2);
+ client.unsubscribe(retainedTopic);
+ receivedMessages.clear();
+
+ clearRetained(client, retainedTopic);
+ client.subscribe(retainedTopic, 1);
+ waitAtMost(() -> receivedMessagesSize() == 0);
+
+ disconnect(client);
+ }
+
+ @Test public void topics(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.setCallback(this);
+ client.subscribe("/+/test-topic/#");
+ String[] cases = new String[]{"/pre/test-topic2", "/test-topic", "/a/test-topic/b/c/d", "/frob/test-topic"};
+ List<String> expected = Arrays.asList("/a/test-topic/b/c/d", "/frob/test-topic");
+ for(String example : cases){
+ publish(client, example, 0, example.getBytes());
+ }
+ waitAtMost(() -> receivedMessagesSize() == expected.size());
+ for (MqttMessage m : receivedMessages){
+ expected.contains(new String(m.getPayload()));
+ }
+ disconnect(client);
+ }
+
+ @Test public void sparkplugTopics(TestInfo info) throws MqttException, IOException, InterruptedException, TimeoutException {
+ final String amqp091Topic = "spBv1___0.MACLab.DDATA.Opto22.CLX";
+ final String sparkplugTopic = "spBv1.0/MACLab/+/Opto22/CLX";
+
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.setCallback(this);
+ client.subscribe(sparkplugTopic);
+
+ setUpAmqp();
+ ch.basicPublish("amq.topic", amqp091Topic, MessageProperties.MINIMAL_BASIC, payload);
+ tearDownAmqp();
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ disconnect(client);
+ }
+
+ @Test public void nonCleanSession(TestInfo info) throws MqttException, InterruptedException {
+ String clientIdBase = clientId(info);
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setCleanSession(false);
+ MqttClient client = newConnectedClient(clientIdBase + "-1", client_opts);
+ client.subscribe(topic, 1);
+ client.disconnect();
+
+ MqttClient client2 = newConnectedClient(clientIdBase + "-2", client_opts);
+ publish(client2, topic, 1, payload);
+ client2.disconnect();
+
+ client.setCallback(this);
+ client.connect(client_opts);
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), payload);
+ disconnect(client);
+ }
+
+ @Test public void sessionRedelivery(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setCleanSession(false);
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.subscribe(topic, 1);
+ disconnect(client);
+
+ MqttClient client2 = newConnectedClient(info, client_opts);
+ publish(client2, topic, 1, payload);
+ disconnect(client2);
+
+ failOnDelivery = true;
+
+ // Connection should fail. Messages will be redelivered.
+ client.setCallback(this);
+ client.connect(client_opts);
+
+ // Message has been delivered but connection has failed.
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), payload);
+
+ assertFalse(client.isConnected());
+
+ receivedMessages.clear();
+ failOnDelivery = false;
+
+ client.setCallback(this);
+ client.connect(client_opts);
+
+ // Message has been redelivered after session resume
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), payload);
+ assertTrue(client.isConnected());
+ disconnect(client);
+
+ receivedMessages.clear();
+
+ client.setCallback(this);
+ waitAtMost(() -> client.isConnected() == false);
+ client.connect(client_opts);
+
+ // This time messaage are acknowledged and won't be redelivered
+ waitAtMost(() -> receivedMessagesSize() == 0);
+ assertEquals(0, receivedMessages.size());
+
+ disconnect(client);
+ }
+
+ @Test public void cleanSession(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setCleanSession(false);
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.subscribe(topic, 1);
+ client.disconnect();
+
+ MqttClient client2 = newConnectedClient(info, client_opts);
+ publish(client2, topic, 1, payload);
+ disconnect(client2);
+
+ client_opts.setCleanSession(true);
+ client.connect(client_opts);
+ client.setCallback(this);
+ client.subscribe(topic, 1);
+
+ waitAtMost(() -> receivedMessagesSize() == 0);
+ client.unsubscribe(topic);
+ disconnect(client);
+ }
+
+ @Test public void multipleClientIds(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ // uses duplicate client ID
+ MqttClient client2 = newConnectedClient(info, client_opts);
+ // the older connection with this client ID will be closed
+ waitAtMost(() -> client.isConnected() == false);
+ disconnect(client2);
+ }
+
+ @Test public void multipleClusterClientIds(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ MqttClient client3 = newClient(brokerThreeUrl, info);
+ client3.connect(client_opts);
+ waitAtMost(() -> client.isConnected() == false);
+ disconnect(client3);
+ }
+
+ @Test public void ping(TestInfo info) throws MqttException, InterruptedException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ client_opts.setKeepAliveInterval(1);
+ MqttClient client = newConnectedClient(info, client_opts);
+ waitAtMost(() -> client.isConnected());
+ disconnect(client);
+ }
+
+ @Test public void will(TestInfo info) throws MqttException, InterruptedException, IOException {
+ String clientIdBase = clientId(info);
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client2 = newConnectedClient(clientIdBase + "-2", client_opts);
+ client2.subscribe(topic);
+ client2.setCallback(this);
+
+ final SocketFactory factory = SocketFactory.getDefault();
+ final ArrayList<Socket> sockets = new ArrayList<>();
+ SocketFactory testFactory = new SocketFactory() {
+ public Socket createSocket(String s, int i) throws IOException {
+ Socket sock = factory.createSocket(s, i);
+ sockets.add(sock);
+ return sock;
+ }
+ public Socket createSocket(String s, int i, InetAddress a, int i1) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i, InetAddress a1, int i1) {
+ return null;
+ }
+ @Override
+ public Socket createSocket() {
+ Socket sock = new Socket();
+ sockets.add(sock);
+ return sock;
+ }
+ };
+
+ MqttClient client = newClient(clientIdBase + "-1");
+ MqttTopic willTopic = client.getTopic(topic);
+
+ MqttConnectOptions opts = new TestMqttConnectOptions();
+ opts.setSocketFactory(testFactory);
+ opts.setWill(willTopic, payload, 0, false);
+ opts.setCleanSession(false);
+
+ client.connect(opts);
+
+ assertTrue(sockets.size() >= 1);
+ expectConnectionFailure = true;
+ sockets.get(0).close();
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), payload);
+ client2.unsubscribe(topic);
+ disconnect(client2);
+ }
+
+ @Test public void willIsRetained(TestInfo info) throws MqttException, InterruptedException, IOException {
+ String clientIdBase = clientId(info);
+ MqttConnectOptions client2_opts = new TestMqttConnectOptions();
+ client2_opts.setCleanSession(true);
+ MqttClient client2 = newConnectedClient(clientIdBase + "-2", client2_opts);
+ client2.setCallback(this);
+
+ clearRetained(client2, retainedTopic);
+ client2.subscribe(retainedTopic, 1);
+ disconnect(client2);
+
+ final SocketFactory factory = SocketFactory.getDefault();
+ final ArrayList<Socket> sockets = new ArrayList<>();
+ SocketFactory testFactory = new SocketFactory() {
+ public Socket createSocket(String s, int i) throws IOException {
+ Socket sock = factory.createSocket(s, i);
+ sockets.add(sock);
+ return sock;
+ }
+ public Socket createSocket(String s, int i, InetAddress a, int i1) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i, InetAddress a1, int i1) {
+ return null;
+ }
+ @Override
+ public Socket createSocket() {
+ Socket sock = new Socket();
+ sockets.add(sock);
+ return sock;
+ }
+ };
+
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+
+ MqttClient client = newClient(clientIdBase + "-1");
+ MqttTopic willTopic = client.getTopic(retainedTopic);
+ byte[] willPayload = "willpayload".getBytes();
+
+ client_opts.setSocketFactory(testFactory);
+ client_opts.setWill(willTopic, willPayload, 1, true);
+
+ client.connect(client_opts);
+
+ assertEquals(1, sockets.size());
+ sockets.get(0).close();
+
+ // let last will propagate after disconnection
+ waitForTestDelay();
+
+ client2.connect(client2_opts);
+ client2.setCallback(this);
+ client2.subscribe(retainedTopic, 1);
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertArrayEquals(receivedMessages.get(0).getPayload(), willPayload);
+ client2.unsubscribe(topic);
+ disconnect(client2);
+ }
+
+ @Test public void subscribeMultiple(TestInfo info) throws MqttException {
+ String clientIdBase = clientId(info);
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(clientIdBase + "-1", client_opts);
+ publish(client, "/test-topic/1", 1, "msq1-qos1".getBytes());
+
+ MqttClient client2 = newConnectedClient(clientIdBase + "-2", client_opts);
+ client2.setCallback(this);
+ client2.subscribe("/test-topic/#");
+ client2.subscribe("/test-topic/#");
+
+ publish(client, "/test-topic/2", 0, "msq2-qos0".getBytes());
+ publish(client, "/test-topic/3", 1, "msq3-qos1".getBytes());
+ publish(client, "/test-topic/4", 2, "msq3-qos2".getBytes());
+ publish(client, topic, 0, "msq4-qos0".getBytes());
+ publish(client, topic, 1, "msq4-qos1".getBytes());
+
+
+ assertEquals(3, receivedMessages.size());
+ disconnect(client);
+ disconnect(client2);
+ }
+
+ @Test public void publishMultiple() throws MqttException, InterruptedException {
+ int pubCount = 50;
+ for (int subQos=0; subQos <= 2; subQos++){
+ for (int pubQos=0; pubQos <= 2; pubQos++){
+ // avoid reusing the client in this test as a shared
+ // client cannot handle connection churn very well. MK.
+ String cid = "test-sub-qos-" + subQos + "-pub-qos-" + pubQos;
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newClient(brokerUrl, cid);
+ client.connect(client_opts);
+ client.subscribe(topic, subQos);
+ client.setCallback(this);
+ long start = System.currentTimeMillis();
+ for (int i=0; i<pubCount; i++){
+ publish(client, topic, pubQos, payload);
+ }
+
+ waitAtMost(() -> receivedMessagesSize() == pubCount);
+ System.out.println("publish QOS" + pubQos + " subscribe QOS" + subQos +
+ ", " + pubCount + " msgs took " +
+ (lastReceipt - start)/1000.0 + "sec");
+ client.disconnect(5000);
+ receivedMessages.clear();
+ }
+ }
+ }
+
+ @Test public void topicAuthorisationPublish(TestInfo info) throws Exception {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.setCallback(this);
+ client.subscribe("some/test-topic");
+ publish(client, "some/test-topic", 1, "content".getBytes());
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertTrue(client.isConnected());
+ try {
+ publish(client, "forbidden-topic", 1, "content".getBytes());
+ fail("Publishing on a forbidden topic, an exception should have been thrown");
+ client.disconnect();
+ } catch(Exception e) {
+ // OK
+ }
+ }
+
+ @Test public void topicAuthorisationSubscribe(TestInfo info) throws Exception {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.setCallback(this);
+ client.subscribe("some/test-topic");
+ try {
+ client.subscribe("forbidden-topic");
+ fail("Subscribing to a forbidden topic, an exception should have been thrown");
+ client.disconnect();
+ } catch(Exception e) {
+ // OK
+ e.printStackTrace();
+ }
+ }
+
+ @Test public void lastWillDowngradesQoS2(TestInfo info) throws Exception {
+ String lastWillTopic = "test-topic-will-downgrades-qos";
+
+ MqttConnectOptions client2Opts = new TestMqttConnectOptions();
+ MqttClient client2 = newConnectedClient(info, client2Opts);
+ client2.subscribe(lastWillTopic);
+ client2.setCallback(this);
+
+ final SocketFactory factory = SocketFactory.getDefault();
+ final ArrayList<Socket> sockets = new ArrayList<>();
+ SocketFactory testFactory = new SocketFactory() {
+ public Socket createSocket(String s, int i) throws IOException {
+ Socket sock = factory.createSocket(s, i);
+ sockets.add(sock);
+ return sock;
+ }
+ public Socket createSocket(String s, int i, InetAddress a, int i1) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i, InetAddress a1, int i1) {
+ return null;
+ }
+ @Override
+ public Socket createSocket() {
+ Socket sock = new Socket();
+ sockets.add(sock);
+ return sock;
+ }
+ };
+
+ MqttConnectOptions clientOpts = new TestMqttConnectOptions();
+
+ MqttClient client = newClient("test-topic-will-downgrades-qos");
+ clientOpts.setSocketFactory(testFactory);
+ MqttTopic willTopic = client.getTopic(lastWillTopic);
+ clientOpts.setWill(willTopic, payload, 2, false);
+ clientOpts.setCleanSession(false);
+ client.connect(clientOpts);
+
+ waitAtMost(() -> sockets.size() == 1);
+ expectConnectionFailure = true;
+ sockets.get(0).close();
+
+ // let some time after disconnection
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertEquals(1, receivedMessages.size());
+ disconnect(client2);
+ }
+
+ @Test public void lastWillNotSentOnRestrictedTopic(TestInfo info) throws Exception {
+ MqttConnectOptions client2_opts = new TestMqttConnectOptions();
+
+ MqttClient client2 = newConnectedClient(info, client2_opts);
+ // topic authorized for subscription, restricted for publishing
+ String lastWillTopic = "last-will";
+ client2.subscribe(lastWillTopic);
+ client2.setCallback(this);
+
+ final SocketFactory factory = SocketFactory.getDefault();
+ final ArrayList<Socket> sockets = new ArrayList<>();
+ SocketFactory testFactory = new SocketFactory() {
+ public Socket createSocket(String s, int i) throws IOException {
+ Socket sock = factory.createSocket(s, i);
+ sockets.add(sock);
+ return sock;
+ }
+ public Socket createSocket(String s, int i, InetAddress a, int i1) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i) {
+ return null;
+ }
+ public Socket createSocket(InetAddress a, int i, InetAddress a1, int i1) {
+ return null;
+ }
+ @Override
+ public Socket createSocket() {
+ Socket sock = new Socket();
+ sockets.add(sock);
+ return sock;
+ }
+ };
+
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+
+ MqttClient client = newClient("last-will-not-sent-on-restricted-topic");
+ client_opts.setSocketFactory(testFactory);
+ MqttTopic willTopic = client.getTopic(lastWillTopic);
+ client_opts.setWill(willTopic, payload, 0, false);
+ client_opts.setCleanSession(false);
+ client.connect(client_opts);
+
+ assertEquals(1, sockets.size());
+ expectConnectionFailure = true;
+ sockets.get(0).close();
+
+ // let some time after disconnection
+ waitForTestDelay();
+ assertEquals(0, receivedMessages.size());
+ disconnect(client2);
+ }
+
+ @Test public void topicAuthorisationVariableExpansion(TestInfo info) throws Exception {
+ final String client_id = clientId(info);
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(client_id, client_opts);
+ client.setCallback(this);
+ String topicWithExpandedVariables = "guest/" + client_id + "/a";
+ client.subscribe(topicWithExpandedVariables);
+ publish(client, topicWithExpandedVariables, 1, "content".getBytes());
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ assertTrue(client.isConnected());
+ try {
+ publish(client, "guest/WrongClientId/a", 1, "content".getBytes());
+ fail("Publishing on a forbidden topic, an exception should have been thrown");
+ client.disconnect();
+ } catch(Exception e) {
+ // OK
+ }
+ }
+
+ @Test public void interopM2A(TestInfo info) throws MqttException, IOException, InterruptedException, TimeoutException {
+ setUpAmqp();
+ String queue = ch.queueDeclare().getQueue();
+ ch.queueBind(queue, "amq.topic", topic);
+
+ byte[] interopPayload = "interop-body".getBytes();
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ publish(client, topic, 1, interopPayload);
+ disconnect(client);
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference<byte[]> messageBody = new AtomicReference<>();
+ ch.basicConsume(queue, true, new DefaultConsumer(ch) {
+ @Override
+ public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
+ messageBody.set(body);
+ latch.countDown();
+ }
+ });
+ assertTrue(latch.await(EXPECT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
+ assertEquals(new String(interopPayload), new String(messageBody.get()));
+ assertNull(ch.basicGet(queue, true));
+ tearDownAmqp();
+ }
+
+ @Test public void interopA2M(TestInfo info) throws MqttException, IOException, InterruptedException, TimeoutException {
+ MqttConnectOptions client_opts = new TestMqttConnectOptions();
+ MqttClient client = newConnectedClient(info, client_opts);
+ client.setCallback(this);
+ client.subscribe(topic, 1);
+
+ setUpAmqp();
+ ch.basicPublish("amq.topic", topic, MessageProperties.MINIMAL_BASIC, payload);
+ tearDownAmqp();
+
+ waitAtMost(() -> receivedMessagesSize() == 1);
+ client.disconnect();
+ }
+
+ private void publish(MqttClient client, String topicName, int qos, byte[] payload) throws MqttException {
+ publish(client, topicName, qos, payload, false);
+ }
+
+ private void publish(MqttClient client, String topicName, int qos, byte[] payload, boolean retained) throws MqttException {
+ MqttTopic topic = client.getTopic(topicName);
+ MqttMessage message = new MqttMessage(payload);
+ message.setQos(qos);
+ message.setRetained(retained);
+ MqttDeliveryToken token = topic.publish(message);
+ token.waitForCompletion();
+ }
+
+ private void publishRetained(MqttClient client, String topicName, int qos, byte[] payload) throws MqttException {
+ publish(client, topicName, qos, payload, true);
+ }
+
+ private void clearRetained(MqttClient client, String topicName) throws MqttException {
+ publishRetained(client, topicName, 1, "".getBytes());
+ }
+
+ public void connectionLost(Throwable cause) {
+ if (!expectConnectionFailure)
+ fail("Connection unexpectedly lost");
+ }
+
+ public void messageArrived(String topic, MqttMessage message) throws Exception {
+ lastReceipt = System.currentTimeMillis();
+ receivedMessages.add(message);
+ if (failOnDelivery) {
+ throw new Exception("unexpected delivery on topic " + topic);
+ }
+ }
+
+ public void deliveryComplete(IMqttDeliveryToken token) {
+ }
+
+ private Integer receivedMessagesSize() {
+ return receivedMessages.size();
+ }
+
+ private void waitForTestDelay() {
+ try {
+ Thread.sleep(testDelay);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void waitAtMost(BooleanSupplier condition) throws InterruptedException {
+ if (condition.getAsBoolean()) {
+ return;
+ }
+ int waitTime = 100;
+ int waitedTime = 0;
+ long timeoutInMs = EXPECT_TIMEOUT.toMillis();
+ while (waitedTime <= timeoutInMs) {
+ Thread.sleep(waitTime);
+ if (condition.getAsBoolean()) {
+ return;
+ }
+ waitedTime += waitTime;
+ }
+ fail("Waited " + EXPECT_TIMEOUT.get(ChronoUnit.SECONDS) + " second(s), condition never got true");
+ }
+}
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/rabbit-test.sh b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/rabbit-test.sh
new file mode 100755
index 0000000000..cba9bcd493
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/rabbit-test.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+CTL=$1
+USER="O=client,CN=$(hostname)"
+
+# Test direct connections
+$CTL add_user "$USER" ''
+$CTL set_permissions -p / "$USER" ".*" ".*" ".*"
+$CTL set_topic_permissions -p / "$USER" "amq.topic" "test-topic|test-retained-topic|.*topic.*" "test-topic|test-retained-topic|.*topic.*|last-will"
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/setup-rabbit-test.sh b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/setup-rabbit-test.sh
new file mode 100644
index 0000000000..2e2282ee07
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/setup-rabbit-test.sh
@@ -0,0 +1,2 @@
+#!/bin/sh -e
+sh -e `dirname $0`/rabbit-test.sh "$DEPS_DIR/rabbit/scripts/rabbitmqctl -n $RABBITMQ_NODENAME"
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java
new file mode 100644
index 0000000000..2ea4c7a638
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java
@@ -0,0 +1,157 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+//
+// Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.mqtt.test.tls;
+
+import org.eclipse.paho.client.mqttv3.*;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+
+/**
+ * MQTT v3.1 tests
+ *
+ */
+
+public class MqttSSLTest implements MqttCallback {
+
+ private final String brokerUrl = "ssl://" + getHost() + ":" + getPort();
+ private String clientId;
+ private String clientId2;
+ private MqttClient client;
+ private MqttClient client2;
+ private MqttConnectOptions conOpt;
+
+ private volatile List<MqttMessage> receivedMessages;
+ private volatile boolean expectConnectionFailure;
+
+ private static String getPort() {
+ Object port = System.getProperty("mqtt.ssl.port");
+ assertNotNull(port);
+ return port.toString();
+ }
+
+ private static String getHost() {
+ Object host = System.getProperty("hostname");
+ assertNotNull(host);
+ return host.toString();
+ }
+
+ // override 10s limit
+ private class MyConnOpts extends MqttConnectOptions {
+ private int keepAliveInterval = 60;
+
+ @Override
+ public void setKeepAliveInterval(int keepAliveInterval) {
+ this.keepAliveInterval = keepAliveInterval;
+ }
+
+ @Override
+ public int getKeepAliveInterval() {
+ return keepAliveInterval;
+ }
+ }
+
+
+ @BeforeEach
+ public void setUp() throws MqttException, IOException {
+ clientId = getClass().getSimpleName() + ((int) (10000 * Math.random()));
+ clientId2 = clientId + "-2";
+ client = new MqttClient(brokerUrl, clientId, null);
+ client2 = new MqttClient(brokerUrl, clientId2, null);
+ conOpt = new MyConnOpts();
+ conOpt.setSocketFactory(MutualAuth.getSSLContextWithoutCert().getSocketFactory());
+ setConOpts(conOpt);
+ receivedMessages = Collections.synchronizedList(new ArrayList<MqttMessage>());
+ expectConnectionFailure = false;
+ }
+
+ @AfterEach
+ public void tearDown() throws MqttException {
+ // clean any sticky sessions
+ setConOpts(conOpt);
+ client = new MqttClient(brokerUrl, clientId, null);
+ try {
+ client.connect(conOpt);
+ client.disconnect();
+ } catch (Exception ignored) {
+ }
+
+ client2 = new MqttClient(brokerUrl, clientId2, null);
+ try {
+ client2.connect(conOpt);
+ client2.disconnect();
+ } catch (Exception ignored) {
+ }
+ }
+
+
+ private void setConOpts(MqttConnectOptions conOpts) {
+ conOpts.setCleanSession(true);
+ conOpts.setKeepAliveInterval(60);
+ }
+
+ @Test
+ public void certLogin() throws MqttException {
+ try {
+ conOpt.setSocketFactory(MutualAuth.getSSLContextWithClientCert().getSocketFactory());
+ client.connect(conOpt);
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail("Exception: " + e.getMessage());
+ }
+ }
+
+
+ @Test public void invalidUser() throws MqttException {
+ conOpt.setUserName("invalid-user");
+ try {
+ client.connect(conOpt);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail("Exception: " + e.getMessage());
+ }
+ }
+
+ @Test public void invalidPassword() throws MqttException {
+ conOpt.setUserName("invalid-user");
+ conOpt.setPassword("invalid-password".toCharArray());
+ try {
+ client.connect(conOpt);
+ fail("Authentication failure expected");
+ } catch (MqttException ex) {
+ assertEquals(MqttException.REASON_CODE_FAILED_AUTHENTICATION, ex.getReasonCode());
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail("Exception: " + e.getMessage());
+ }
+ }
+
+
+ public void connectionLost(Throwable cause) {
+ if (!expectConnectionFailure)
+ fail("Connection unexpectedly lost");
+ }
+
+ public void messageArrived(String topic, MqttMessage message) throws Exception {
+ receivedMessages.add(message);
+ }
+
+ public void deliveryComplete(IMqttDeliveryToken token) {
+ }
+}
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java
new file mode 100644
index 0000000000..081cae4052
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/java/com/rabbitmq/mqtt/test/tls/MutualAuth.java
@@ -0,0 +1,89 @@
+package com.rabbitmq.mqtt.test.tls;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+import java.io.IOException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateException;
+import java.util.Arrays;
+import java.util.List;
+import java.io.FileInputStream;
+
+
+public class MutualAuth {
+
+ private MutualAuth() {
+
+ }
+
+ private static String getStringProperty(String propertyName) throws IllegalArgumentException {
+ Object value = System.getProperty(propertyName);
+ if (value == null) throw new IllegalArgumentException("Property: " + propertyName + " not found");
+ return value.toString();
+ }
+
+ private static TrustManagerFactory getServerTrustManagerFactory() throws NoSuchAlgorithmException, CertificateException, IOException, KeyStoreException {
+ String keystorePath = System.getProperty("test-keystore.ca");
+ char[] trustPhrase = getStringProperty("test-keystore.password").toCharArray();
+ MutualAuth dummy = new MutualAuth();
+
+ // Server TrustStore
+ KeyStore tks = KeyStore.getInstance("JKS");
+ tks.load(new FileInputStream(keystorePath), trustPhrase);
+
+ TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
+ tmf.init(tks);
+
+ return tmf;
+ }
+
+ public static SSLContext getSSLContextWithClientCert() throws IOException {
+
+ char[] clientPhrase = getStringProperty("test-client-cert.password").toCharArray();
+
+ String p12Path = System.getProperty("test-client-cert.path");
+
+ MutualAuth dummy = new MutualAuth();
+ try {
+ SSLContext sslContext = getVanillaSSLContext();
+ // Client Keystore
+ KeyStore ks = KeyStore.getInstance("PKCS12");
+ ks.load(new FileInputStream(p12Path), clientPhrase);
+ KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
+ kmf.init(ks, clientPhrase);
+
+ sslContext.init(kmf.getKeyManagers(), getServerTrustManagerFactory().getTrustManagers(), null);
+ return sslContext;
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+
+ }
+
+ private static SSLContext getVanillaSSLContext() throws NoSuchAlgorithmException {
+ SSLContext result = null;
+ List<String> xs = Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1");
+ for(String x : xs) {
+ try {
+ return SSLContext.getInstance(x);
+ } catch (NoSuchAlgorithmException nae) {
+ // keep trying
+ }
+ }
+ throw new NoSuchAlgorithmException("Could not obtain an SSLContext for TLS 1.0-1.2");
+ }
+
+ public static SSLContext getSSLContextWithoutCert() throws IOException {
+ try {
+ SSLContext sslContext = getVanillaSSLContext();
+ sslContext.init(null, getServerTrustManagerFactory().getTrustManagers(), null);
+ return sslContext;
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+
+}
diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/scripts/remove_old_test_keystores.groovy b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/scripts/remove_old_test_keystores.groovy
new file mode 100644
index 0000000000..6864a41e29
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/src/test/scripts/remove_old_test_keystores.groovy
@@ -0,0 +1,10 @@
+def dir = new File(project.build.directory)
+
+dir.mkdir()
+
+// This pattern starts with `.*`. This is normally useless and even
+// inefficient but the matching doesn't work without it...
+def pattern = ~/.*\.keystore$/
+dir.eachFileMatch(pattern) { file ->
+ file.delete()
+}
diff --git a/deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl b/deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl
new file mode 100644
index 0000000000..abdc3506dc
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/mqtt_machine_SUITE.erl
@@ -0,0 +1,73 @@
+-module(mqtt_machine_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("mqtt_machine.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ basics
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+basics(_Config) ->
+ S0 = mqtt_machine:init(#{}),
+ ClientId = <<"id1">>,
+ {S1, ok, _} = mqtt_machine:apply(meta(1), {register, ClientId, self()}, S0),
+ ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 1, S1),
+ {S2, ok, _} = mqtt_machine:apply(meta(2), {register, ClientId, self()}, S1),
+ ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 1, S2),
+ {S3, ok, _} = mqtt_machine:apply(meta(3), {down, self(), noproc}, S2),
+ ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 0, S3),
+ {S4, ok, _} = mqtt_machine:apply(meta(3), {unregister, ClientId, self()}, S2),
+ ?assertMatch(#machine_state{client_ids = Ids} when map_size(Ids) == 0, S4),
+
+ ok.
+
+%% Utility
+
+meta(Idx) ->
+ #{index => Idx,
+ term => 1,
+ ts => erlang:system_time(millisecond)}.
diff --git a/deps/rabbitmq_mqtt/test/processor_SUITE.erl b/deps/rabbitmq_mqtt/test/processor_SUITE.erl
new file mode 100644
index 0000000000..e38a1d5318
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/processor_SUITE.erl
@@ -0,0 +1,211 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+
+-module(processor_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ ignores_colons_in_username_if_option_set,
+ interprets_colons_in_username_if_option_not_set,
+ get_vhosts_from_global_runtime_parameter,
+ get_vhost,
+ add_client_id_to_adapter_info
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 60}}].
+
+init_per_suite(Config) ->
+ ok = application:load(rabbitmq_mqtt),
+ Config.
+end_per_suite(Config) ->
+ ok = application:unload(rabbitmq_mqtt),
+ Config.
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+init_per_testcase(get_vhost, Config) ->
+ mnesia:start(),
+ mnesia:create_table(rabbit_runtime_parameters, [
+ {attributes, record_info(fields, runtime_parameters)},
+ {record_name, runtime_parameters}]),
+ Config;
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(get_vhost, Config) ->
+ mnesia:stop(),
+ Config;
+end_per_testcase(_, Config) -> Config.
+
+ignore_colons(B) -> application:set_env(rabbitmq_mqtt, ignore_colons_in_username, B).
+
+ignores_colons_in_username_if_option_set(_Config) ->
+ ignore_colons(true),
+ ?assertEqual({rabbit_mqtt_util:env(vhost), <<"a:b:c">>},
+ rabbit_mqtt_processor:get_vhost_username(<<"a:b:c">>)).
+
+interprets_colons_in_username_if_option_not_set(_Config) ->
+ ignore_colons(false),
+ ?assertEqual({<<"a:b">>, <<"c">>},
+ rabbit_mqtt_processor:get_vhost_username(<<"a:b:c">>)).
+
+get_vhosts_from_global_runtime_parameter(_Config) ->
+ MappingParameter = [
+ {<<"O=client,CN=dummy1">>, <<"vhost1">>},
+ {<<"O=client,CN=dummy2">>, <<"vhost2">>}
+ ],
+ <<"vhost1">> = rabbit_mqtt_processor:get_vhost_from_user_mapping(<<"O=client,CN=dummy1">>, MappingParameter),
+ <<"vhost2">> = rabbit_mqtt_processor:get_vhost_from_user_mapping(<<"O=client,CN=dummy2">>, MappingParameter),
+ undefined = rabbit_mqtt_processor:get_vhost_from_user_mapping(<<"O=client,CN=dummy3">>, MappingParameter),
+ undefined = rabbit_mqtt_processor:get_vhost_from_user_mapping(<<"O=client,CN=dummy3">>, not_found).
+
+get_vhost(_Config) ->
+ clear_vhost_global_parameters(),
+
+ %% not a certificate user, no cert/vhost mapping, no vhost in user
+ %% should use default vhost
+ {_, {<<"/">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, none, 1883),
+ {_, {<<"/">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, undefined, 1883),
+ clear_vhost_global_parameters(),
+
+ %% not a certificate user, no cert/vhost mapping, vhost in user
+ %% should use vhost in user
+ {_, {<<"somevhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"somevhost:guest">>, none, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, no cert/vhost mapping
+ %% should use default vhost
+ {_, {<<"/">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, <<"O=client,CN=dummy">>, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, cert/vhost mapping with global runtime parameter
+ %% should use mapping
+ set_global_parameter(mqtt_default_vhosts, [
+ {<<"O=client,CN=dummy">>, <<"somevhost">>},
+ {<<"O=client,CN=otheruser">>, <<"othervhost">>}
+ ]),
+ {_, {<<"somevhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, <<"O=client,CN=dummy">>, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, cert/vhost mapping with global runtime parameter, but no key for the user
+ %% should use default vhost
+ set_global_parameter(mqtt_default_vhosts, [{<<"O=client,CN=otheruser">>, <<"somevhost">>}]),
+ {_, {<<"/">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, <<"O=client,CN=dummy">>, 1883),
+ clear_vhost_global_parameters(),
+
+ %% not a certificate user, port/vhost mapping
+ %% should use mapping
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1883">>, <<"somevhost">>},
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"somevhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, none, 1883),
+ clear_vhost_global_parameters(),
+
+ %% not a certificate user, port/vhost mapping, but vhost in username
+ %% vhost in username should take precedence
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1883">>, <<"somevhost">>},
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"vhostinusername">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"vhostinusername:guest">>, none, 1883),
+ clear_vhost_global_parameters(),
+
+ %% not a certificate user, port/vhost mapping, but no mapping for this port
+ %% should use default vhost
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"/">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, none, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, port/vhost parameter, mapping, no cert/vhost mapping
+ %% should use port/vhost mapping
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1883">>, <<"somevhost">>},
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"somevhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, <<"O=client,CN=dummy">>, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, port/vhost parameter but no mapping, cert/vhost mapping
+ %% should use cert/vhost mapping
+ set_global_parameter(mqtt_default_vhosts, [
+ {<<"O=client,CN=dummy">>, <<"somevhost">>},
+ {<<"O=client,CN=otheruser">>, <<"othervhost">>}
+ ]),
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"somevhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, <<"O=client,CN=dummy">>, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, port/vhost parameter, cert/vhost parameter
+ %% cert/vhost parameter takes precedence
+ set_global_parameter(mqtt_default_vhosts, [
+ {<<"O=client,CN=dummy">>, <<"cert-somevhost">>},
+ {<<"O=client,CN=otheruser">>, <<"othervhost">>}
+ ]),
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1883">>, <<"port-vhost">>},
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"cert-somevhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, <<"O=client,CN=dummy">>, 1883),
+ clear_vhost_global_parameters(),
+
+ %% certificate user, no port/vhost or cert/vhost mapping, vhost in username
+ %% should use vhost in username
+ {_, {<<"vhostinusername">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"vhostinusername:guest">>, <<"O=client,CN=dummy">>, 1883),
+
+ %% not a certificate user, port/vhost parameter, cert/vhost parameter
+ %% port/vhost mapping is used, as cert/vhost should not be used
+ set_global_parameter(mqtt_default_vhosts, [
+ {<<"O=cert">>, <<"cert-somevhost">>},
+ {<<"O=client,CN=otheruser">>, <<"othervhost">>}
+ ]),
+ set_global_parameter(mqtt_port_to_vhost_mapping, [
+ {<<"1883">>, <<"port-vhost">>},
+ {<<"1884">>, <<"othervhost">>}
+ ]),
+ {_, {<<"port-vhost">>, <<"guest">>}} = rabbit_mqtt_processor:get_vhost(<<"guest">>, none, 1883),
+ clear_vhost_global_parameters(),
+ ok.
+
+add_client_id_to_adapter_info(_Config) ->
+ TestFun = fun(AdapterInfo) ->
+ Info0 = rabbit_mqtt_processor:add_client_id_to_adapter_info(<<"my-client-id">>, AdapterInfo),
+ AdditionalInfo0 = Info0#amqp_adapter_info.additional_info,
+ ?assertEqual(#{<<"client_id">> => <<"my-client-id">>}, proplists:get_value(variable_map, AdditionalInfo0)),
+ ClientProperties = proplists:get_value(client_properties, AdditionalInfo0),
+ ?assertEqual([{client_id,longstr,<<"my-client-id">>}], ClientProperties)
+ end,
+ lists:foreach(TestFun, [#amqp_adapter_info{}, #amqp_adapter_info{additional_info = [{client_properties, []}]}]),
+ ok.
+
+set_global_parameter(Key, Term) ->
+ InsertParameterFun = fun () ->
+ mnesia:write(rabbit_runtime_parameters, #runtime_parameters{key = Key, value = Term}, write)
+ end,
+
+ {atomic, ok} = mnesia:transaction(InsertParameterFun).
+
+clear_vhost_global_parameters() ->
+ DeleteParameterFun = fun () ->
+ ok = mnesia:delete(rabbit_runtime_parameters, mqtt_default_vhosts, write),
+ ok = mnesia:delete(rabbit_runtime_parameters, mqtt_port_to_vhost_mapping, write)
+ end,
+ {atomic, ok} = mnesia:transaction(DeleteParameterFun).
diff --git a/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..5403de23d3
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,125 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(proxy_protocol_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ proxy_protocol,
+ proxy_protocol_tls
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {rmq_certspwd, "bunnychow"},
+ {rabbitmq_ct_tls_verify, verify_none}
+ ]),
+ MqttConfig = mqtt_config(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun(Conf) -> merge_app_env(MqttConfig, Conf) end ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+mqtt_config() ->
+ {rabbitmq_mqtt, [
+ {proxy_protocol, true},
+ {ssl_cert_login, true},
+ {allow_anonymous, true}]}.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ ok = inet:send(Socket, mqtt_3_1_1_connect_frame()),
+ {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+proxy_protocol_tls(Config) ->
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, SslSocket} = ssl:connect(Socket, [], ?TIMEOUT),
+ ok = ssl:send(SslSocket, mqtt_3_1_1_connect_frame()),
+ {ok, _Packet} = ssl:recv(SslSocket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+connection_name() ->
+ Connections = ets:tab2list(connection_created),
+ {_Key, Values} = lists:nth(1, Connections),
+ {_, Name} = lists:keyfind(name, 1, Values),
+ Name.
+
+merge_app_env(MqttConfig, Config) ->
+ rabbit_ct_helpers:merge_app_env(Config, MqttConfig).
+
+mqtt_3_1_1_connect_frame() ->
+ <<16,
+ 24,
+ 0,
+ 4,
+ 77,
+ 81,
+ 84,
+ 84,
+ 4,
+ 2,
+ 0,
+ 60,
+ 0,
+ 12,
+ 84,
+ 101,
+ 115,
+ 116,
+ 67,
+ 111,
+ 110,
+ 115,
+ 117,
+ 109,
+ 101,
+ 114>>.
diff --git a/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl b/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl
new file mode 100644
index 0000000000..5272138c6b
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/rabbit_auth_backend_mqtt_mock.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% A mock authn/authz that records information during calls. For testing purposes only.
+
+-module(rabbit_auth_backend_mqtt_mock).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4,
+ state_can_expire/0,
+ get/1]).
+
+user_login_authentication(_, AuthProps) ->
+ ets:new(?MODULE, [set, public, named_table]),
+ ets:insert(?MODULE, {authentication, AuthProps}),
+ {ok, #auth_user{username = <<"dummy">>,
+ tags = [],
+ impl = none}}.
+
+user_login_authorization(_, _) ->
+ io:format("login authorization"),
+ {ok, does_not_matter}.
+
+check_vhost_access(#auth_user{}, _VHostPath, AuthzData) ->
+ ets:insert(?MODULE, {vhost_access, AuthzData}),
+ true.
+check_resource_access(#auth_user{}, #resource{}, _Permission, AuthzContext) ->
+ ets:insert(?MODULE, {resource_access, AuthzContext}),
+ true.
+check_topic_access(#auth_user{}, #resource{}, _Permission, TopicContext) ->
+ ets:insert(?MODULE, {topic_access, TopicContext}),
+ true.
+
+state_can_expire() -> false.
+
+get(K) ->
+ ets:lookup(?MODULE, K).
diff --git a/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app b/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app
new file mode 100644
index 0000000000..c4083ec5fc
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app
@@ -0,0 +1,19 @@
+{application, rabbitmq_mqtt,
+ [{description, "RabbitMQ MQTT Adapter"},
+ {vsn, "%%VSN%%"},
+ {modules, []},
+ {registered, []},
+ {mod, {rabbit_mqtt, []}},
+ {env, [{default_user, "guest_user"},
+ {default_pass, "guest_pass"},
+ {ssl_cert_login,false},
+ {allow_anonymous, true},
+ {vhost, "/"},
+ {exchange, "amq.topic"},
+ {subscription_ttl, 1800000}, % 30 min
+ {prefetch, 10},
+ {ssl_listeners, []},
+ {tcp_listeners, [1883]},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true}]}]},
+ {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
diff --git a/deps/rabbitmq_mqtt/test/reader_SUITE.erl b/deps/rabbitmq_mqtt/test/reader_SUITE.erl
new file mode 100644
index 0000000000..b94fdb5920
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/reader_SUITE.erl
@@ -0,0 +1,166 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(reader_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ block,
+ handle_invalid_frames,
+ stats
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 60}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, basic},
+ {collect_statistics_interval, 100}
+ ]}).
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_extra_tcp_ports, [tcp_port_mqtt_extra,
+ tcp_port_mqtt_tls_extra]}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+block(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, C} = emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, <<"simpleClient">>},
+ {proto_ver, 3},
+ {logger, info},
+ {puback_timeout, 1}]),
+ %% Only here to ensure the connection is really up
+ emqttc:subscribe(C, <<"TopicA">>, qos0),
+ emqttc:publish(C, <<"TopicA">>, <<"Payload">>),
+ expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+ emqttc:unsubscribe(C, [<<"TopicA">>]),
+
+ emqttc:subscribe(C, <<"Topic1">>, qos0),
+
+ %% Not blocked
+ {ok, _} = emqttc:sync_publish(C, <<"Topic1">>, <<"Not blocked yet">>,
+ [{qos, 1}]),
+
+ ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.00000001]),
+ ok = rpc(Config, rabbit_alarm, set_alarm, [{{resource_limit, memory, node()}, []}]),
+
+ %% Let it block
+ timer:sleep(100),
+ %% Blocked, but still will publish
+ {error, ack_timeout} = emqttc:sync_publish(C, <<"Topic1">>, <<"Now blocked">>,
+ [{qos, 1}]),
+
+ %% Blocked
+ {error, ack_timeout} = emqttc:sync_publish(C, <<"Topic1">>,
+ <<"Blocked">>, [{qos, 1}]),
+
+ rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]),
+ rpc(Config, rabbit_alarm, clear_alarm, [{resource_limit, memory, node()}]),
+
+ %% Let alarms clear
+ timer:sleep(1000),
+
+ expect_publishes(<<"Topic1">>, [<<"Not blocked yet">>,
+ <<"Now blocked">>,
+ <<"Blocked">>]),
+
+ emqttc:disconnect(C).
+
+handle_invalid_frames(Config) ->
+ N = rpc(Config, ets, info, [connection_metrics, size]),
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, C} = gen_tcp:connect("localhost", P, []),
+ Bin = <<"GET / HTTP/1.1\r\nHost: www.rabbitmq.com\r\nUser-Agent: curl/7.43.0\r\nAccept: */*">>,
+ gen_tcp:send(C, Bin),
+ gen_tcp:close(C),
+ %% No new stats entries should be inserted as connection never got to initialize
+ N = rpc(Config, ets, info, [connection_metrics, size]).
+
+stats(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ %% CMN = rpc(Config, ets, info, [connection_metrics, size]),
+ %% CCMN = rpc(Config, ets, info, [connection_coarse_metrics, size]),
+ {ok, C} = emqttc:start_link([{host, "localhost"},
+ {port, P},
+ {client_id, <<"simpleClient">>},
+ {proto_ver, 3},
+ {logger, info},
+ {puback_timeout, 1}]),
+ %% Ensure that there are some stats
+ emqttc:subscribe(C, <<"TopicA">>, qos0),
+ emqttc:publish(C, <<"TopicA">>, <<"Payload">>),
+ expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+ emqttc:unsubscribe(C, [<<"TopicA">>]),
+ timer:sleep(1000), %% Wait for stats to be emitted, which it does every 100ms
+ %% Retrieve the connection Pid
+ [{_, Reader}] = rpc(Config, rabbit_mqtt_collector, list, []),
+ [{_, Pid}] = rpc(Config, rabbit_mqtt_reader, info, [Reader, [connection]]),
+ %% Verify the content of the metrics, garbage_collection must be present
+ [{Pid, Props}] = rpc(Config, ets, lookup, [connection_metrics, Pid]),
+ true = proplists:is_defined(garbage_collection, Props),
+ %% If the coarse entry is present, stats were successfully emitted
+ [{Pid, _, _, _, _}] = rpc(Config, ets, lookup,
+ [connection_coarse_metrics, Pid]),
+ emqttc:disconnect(C).
+
+expect_publishes(_Topic, []) -> ok;
+expect_publishes(Topic, [Payload|Rest]) ->
+ receive
+ {publish, Topic, Payload} -> expect_publishes(Topic, Rest)
+ after 5000 ->
+ throw({publish_not_delivered, Payload})
+ end.
+
+rpc(Config, M, F, A) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
diff --git a/deps/rabbitmq_mqtt/test/retainer_SUITE.erl b/deps/rabbitmq_mqtt/test/retainer_SUITE.erl
new file mode 100644
index 0000000000..22b72a8d87
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/retainer_SUITE.erl
@@ -0,0 +1,144 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(retainer_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ coerce_configuration_data,
+ should_translate_amqp2mqtt_on_publish,
+ should_translate_amqp2mqtt_on_retention,
+ should_translate_amqp2mqtt_on_retention_search
+ ]}
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 600}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_extra_tcp_ports, [tcp_port_mqtt_extra,
+ tcp_port_mqtt_tls_extra]}
+ ]),
+ % see https://github.com/rabbitmq/rabbitmq-mqtt/issues/86
+ RabbitConfig = {rabbit, [
+ {default_user, "guest"},
+ {default_pass, "guest"},
+ {default_vhost, "/"},
+ {default_permissions, [".*", ".*", ".*"]}
+ ]},
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun(Conf) -> merge_app_env(RabbitConfig, Conf) end ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+merge_app_env(MqttConfig, Config) ->
+ rabbit_ct_helpers:merge_app_env(Config, MqttConfig).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+coerce_configuration_data(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, C} = emqttc:start_link(connection_opts(P)),
+
+ emqttc:subscribe(C, <<"TopicA">>, qos0),
+ emqttc:publish(C, <<"TopicA">>, <<"Payload">>),
+ expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+
+ emqttc:disconnect(C),
+ ok.
+
+%% -------------------------------------------------------------------
+%% When a client is subscribed to TopicA/Device.Field and another
+%% client publishes to TopicA/Device.Field the client should be
+%% sent messages for the translated topic (TopicA/Device/Field)
+%% -------------------------------------------------------------------
+should_translate_amqp2mqtt_on_publish(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, C} = emqttc:start_link(connection_opts(P)),
+ %% there's an active consumer
+ emqttc:subscribe(C, <<"TopicA/Device.Field">>, qos1),
+ emqttc:publish(C, <<"TopicA/Device.Field">>, <<"Payload">>, [{retain, true}]),
+ expect_publishes(<<"TopicA/Device/Field">>, [<<"Payload">>]),
+ emqttc:disconnect(C).
+
+%% -------------------------------------------------------------------
+%% If a client is publishes a retained message to TopicA/Device.Field and another
+%% client subscribes to TopicA/Device.Field the client should be
+%% sent the retained message for the translated topic (TopicA/Device/Field)
+%% -------------------------------------------------------------------
+should_translate_amqp2mqtt_on_retention(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, C} = emqttc:start_link(connection_opts(P)),
+ %% publish with retain = true before a consumer comes around
+ emqttc:publish(C, <<"TopicA/Device.Field">>, <<"Payload">>, [{retain, true}]),
+ emqttc:subscribe(C, <<"TopicA/Device.Field">>, qos1),
+ expect_publishes(<<"TopicA/Device/Field">>, [<<"Payload">>]),
+ emqttc:disconnect(C).
+
+%% -------------------------------------------------------------------
+%% If a client is publishes a retained message to TopicA/Device.Field and another
+%% client subscribes to TopicA/Device/Field the client should be
+%% sent retained message for the translated topic (TopicA/Device/Field)
+%% -------------------------------------------------------------------
+should_translate_amqp2mqtt_on_retention_search(Config) ->
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+ {ok, C} = emqttc:start_link(connection_opts(P)),
+ emqttc:publish(C, <<"TopicA/Device.Field">>, <<"Payload">>, [{retain, true}]),
+ emqttc:subscribe(C, <<"TopicA/Device/Field">>, qos1),
+ expect_publishes(<<"TopicA/Device/Field">>, [<<"Payload">>]),
+ emqttc:disconnect(C).
+
+connection_opts(Port) ->
+ [{host, "localhost"},
+ {port, Port},
+ {client_id, <<"simpleClientRetainer">>},
+ {proto_ver,3},
+ {logger, info},
+ {puback_timeout, 1}].
+
+ expect_publishes(_Topic, []) -> ok;
+ expect_publishes(Topic, [Payload | Rest]) ->
+ receive
+ {publish, Topic, Payload} -> expect_publishes(Topic, Rest)
+ after 1500 ->
+ throw({publish_not_delivered, Payload})
+ end.
diff --git a/deps/rabbitmq_mqtt/test/util_SUITE.erl b/deps/rabbitmq_mqtt/test/util_SUITE.erl
new file mode 100644
index 0000000000..6694498595
--- /dev/null
+++ b/deps/rabbitmq_mqtt/test/util_SUITE.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(util_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, util_tests}
+ ].
+
+groups() ->
+ [
+ {util_tests, [parallel], [
+ coerce_exchange,
+ coerce_vhost,
+ coerce_default_user,
+ coerce_default_pass,
+ mqtt_amqp_topic_translation
+ ]
+ }
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 60}}].
+
+init_per_suite(Config) ->
+ ok = application:load(rabbitmq_mqtt),
+ Config.
+end_per_suite(Config) ->
+ ok = application:unload(rabbitmq_mqtt),
+ Config.
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+coerce_exchange(_) ->
+ ?assertEqual(<<"amq.topic">>, rabbit_mqtt_util:env(exchange)).
+
+coerce_vhost(_) ->
+ ?assertEqual(<<"/">>, rabbit_mqtt_util:env(vhost)).
+
+coerce_default_user(_) ->
+ ?assertEqual(<<"guest_user">>, rabbit_mqtt_util:env(default_user)).
+
+coerce_default_pass(_) ->
+ ?assertEqual(<<"guest_pass">>, rabbit_mqtt_util:env(default_pass)).
+
+mqtt_amqp_topic_translation(_) ->
+ ok = application:set_env(rabbitmq_mqtt, sparkplug, true),
+ {ok, {mqtt2amqp_fun, Mqtt2AmqpFun}, {amqp2mqtt_fun, Amqp2MqttFun}} =
+ rabbit_mqtt_util:get_topic_translation_funs(),
+
+ T0 = "/foo/bar/+/baz",
+ T0_As_Amqp = <<".foo.bar.*.baz">>,
+ T0_As_Mqtt = <<"/foo/bar/+/baz">>,
+ ?assertEqual(T0_As_Amqp, Mqtt2AmqpFun(T0)),
+ ?assertEqual(T0_As_Mqtt, Amqp2MqttFun(T0_As_Amqp)),
+
+ T1 = "spAv1.0/foo/bar/+/baz",
+ T1_As_Amqp = <<"spAv1___0.foo.bar.*.baz">>,
+ T1_As_Mqtt = <<"spAv1.0/foo/bar/+/baz">>,
+ ?assertEqual(T1_As_Amqp, Mqtt2AmqpFun(T1)),
+ ?assertEqual(T1_As_Mqtt, Amqp2MqttFun(T1_As_Amqp)),
+
+ T2 = "spBv2.90/foo/bar/+/baz",
+ T2_As_Amqp = <<"spBv2___90.foo.bar.*.baz">>,
+ T2_As_Mqtt = <<"spBv2.90/foo/bar/+/baz">>,
+ ?assertEqual(T2_As_Amqp, Mqtt2AmqpFun(T2)),
+ ?assertEqual(T2_As_Mqtt, Amqp2MqttFun(T2_As_Amqp)),
+
+ ok = application:unset_env(rabbitmq_mqtt, sparkplug),
+ ok.
diff --git a/deps/rabbitmq_peer_discovery_aws/.gitignore b/deps/rabbitmq_peer_discovery_aws/.gitignore
new file mode 100644
index 0000000000..08630c4a36
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/.gitignore
@@ -0,0 +1,25 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/config_schema_SUITE_data/schema/
+/test/ct.cover.spec
+/xrefr
+
+/rabbitmq_peer_discovery_aws.d
diff --git a/deps/rabbitmq_peer_discovery_aws/.travis.yml b/deps/rabbitmq_peer_discovery_aws/.travis.yml
new file mode 100644
index 0000000000..790cda2cbc
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: t2tKVW6mWZYHwW/ps6sKVIUm3Lk/t+7B4wRLiQzMYEhbAr7AQr/qnvT1cyEm5GVNohLB6yx2U2Wr7TIMAjGgEhLKPWG0djAYjQEcak3U8/6fd/+9YfDb396/pOryNA7d3YcgcUzpH1qglxxrxWcE9tf4heb5+V16lyJ767qpbtDH6B76ywOJmYK8GTsmpaje6YxG0t3xsrIH8HwIHDom2bummJZui9R+ndCYLFccqyR/z0ui0pdjw5+V6SuggQUSh4R9PGez7YVyHVSKiYTXOTrcJ98/ZlBD4KNlNWeqTB2UbSf85ngWdV2thncQy9Mgof2dtMFQHEH/QMEwdKZwqf0Pc8fO0Gzrn7U73JgLp/v1sJxmCKhkm5HJNaQd/3XYCturAN9TfKBKQkiz28tnTJWKppisdrwcV3wl/ZI1Tm0y2xqG2TBvx4OotwwkaMGhGAwDhRrd7x4jXcVtfAyT458gXEtQUaGP0kZFoPSah35iBRiLxzNLMrMRTSHBBJILYxiyVsWL/UEh2aQwfluNc673AqFTm6MggKWX/TpjXqpcJ6mT+Q7p/Dd17RNAUWeSMtHvTDAqkbzDujM/7RRMuf3ZX4TVSvC+aq41UMFU+8K+u47X5UUEtPlyQNHDp2lliWyBj0fQkizYDCME8yuEEA+azcKY73G6jIbt6FWxMpw=
+ - secure: UJx8ibllZKBn2N1ixoSXrZ3N6D08iajBjjPK3s5hqWgY+39rv9Q2FzzjlmUMc4q2ui0skL0JmPt/EyA/TEr9OEkSpHaUkYPCUMQS51fJmhbKoghQ4OjrtBhmyMTzm4a9BqPYl8jNNbKu9U1mARH3JzmeUUc2odfK1Gf1RLraKymSMx+fYHrVE1h4Puwy2Xm/se39LgVgv+RWTpofVN+IzY41uTeJ0aeTkwRs8XvFLSRBCW2ye8rz+4VhSZB9xHJF+ySJVUuWz99Q4VMVGjuMfevZgZGGDKTS7yDL2L1uhRrMgzwBse1iZXxOYyBXAw6ewydbg8wCvwlU6JAs8uaT5vGmvUc8OW/Dp58m8Uv4vzcrR1cYb7V7SIctV23juxso8KkDs0dsHIlpfT4PIosJdVmrgFfwibTPc698nyIbmXx8bY1K1cUP81+s4ZzQ6nTWcBRii1o4L+PAUtkc8G3Uca8UwAilMdf4+bEfGCF3SppT/Uq7s81i4sPWY7G6gdXlEtCViqMFsLGTx9vOmXJCIyIVGcXYMKexYDjOebSJ06tPcjox5y8ZIPQbraUt7L974gHbRn0OeqifoopUfHhgg8bPhXS5os6zljmwnCJebvTw0N/YGQUKetcVuBUANypkLekJE/1/TQ2cUpKeLDuCVkvPhpgP71lm0gqmXcp9rcY=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_peer_discovery_aws/CODE_OF_CONDUCT.md b/deps/rabbitmq_peer_discovery_aws/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_peer_discovery_aws/CONTRIBUTING.md b/deps/rabbitmq_peer_discovery_aws/CONTRIBUTING.md
new file mode 100644
index 0000000000..1fee78b7e8
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/CONTRIBUTING.md
@@ -0,0 +1,128 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+
+Integration tests of this plugin need AWS credentials and an SSH key pair.
+The credentials can be set in two ways:
+
+ * Via a `$HOME/.aws/credentials` file
+ * Using the `AWS_SECRET_ACCESS_KEY` and `AWS_ACCESS_KEY_ID`
+
+Below is an example of a `.aws/credentials` file:
+
+``` ini
+[default]
+aws_access_key_id = EXAMPLEACCESSKEYID
+aws_secret_access_key = a-secret-access-key
+```
+
+A private SSH key is also required to transfer a locally compiled version
+of the repository and transfer it to all remote nodes. It is set via the `SSH_KEY` environment
+variable.
+
+ make ct-integration
+
+will run integration tests [on AWS] only.
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_peer_discovery_aws/LICENSE b/deps/rabbitmq_peer_discovery_aws/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_peer_discovery_aws/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_peer_discovery_aws/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_peer_discovery_aws/Makefile b/deps/rabbitmq_peer_discovery_aws/Makefile
new file mode 100644
index 0000000000..411afa867a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/Makefile
@@ -0,0 +1,19 @@
+PROJECT = rabbitmq_peer_discovery_aws
+PROJECT_DESCRIPTION = AWS-based RabbitMQ peer discovery backend
+
+LOCAL_DEPS = inets
+DEPS = rabbit_common rabbitmq_peer_discovery_common rabbitmq_aws rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers ct_helper
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_peer_discovery_aws/README.md b/deps/rabbitmq_peer_discovery_aws/README.md
new file mode 100644
index 0000000000..3e755de69a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/README.md
@@ -0,0 +1,56 @@
+# RabbitMQ Peer Discovery AWS
+
+This is an AWS-based implementation of RabbitMQ [peer discovery interface](https://www.rabbitmq.com/blog/2018/02/12/peer-discovery-subsystem-in-rabbitmq-3-7/)
+(new in 3.7.0, previously available in the [rabbitmq-autocluster plugin](https://github.com/rabbitmq/rabbitmq-autocluster)
+by Gavin Roy).
+
+This plugin only performs peer discovery and most basic node health monitoring
+using AWS (EC2) APIs as data source.
+Please get familiar with [RabbitMQ clustering fundamentals](https://rabbitmq.com/clustering.html) before attempting
+to use it.
+
+While it may seem at times that this is a RabbitMQ cluster management solution,
+it is not. Cluster provisioning and most of Day 2 operations such as [proper monitoring](https://rabbitmq.com/monitoring.html)
+are not in scope for this plugin.
+
+
+## Supported RabbitMQ Versions
+
+This plugin requires RabbitMQ 3.7.0 or later.
+
+For a AWS-based peer discovery and cluster formation
+mechanism that supports 3.6.x, see [rabbitmq-autocluster](https://github.com/rabbitmq/rabbitmq-autocluster).
+
+
+## Installation
+
+This plugin ships with [supported RabbitMQ versions](https://www.rabbitmq.com/versions.html).
+There is no need to install it separately.
+
+As with any [plugin](https://rabbitmq.com/plugins.html), it must be enabled before it
+can be used. For peer discovery plugins it means they must be [enabled](https://rabbitmq.com//plugins.html#basics) or [preconfigured](https://rabbitmq.com//plugins.html#enabled-plugins-file)
+before first node boot:
+
+```
+rabbitmq-plugins --offline enable rabbitmq_peer_discovery_aws
+```
+
+
+## Documentation
+
+See [RabbitMQ Cluster Formation guide](https://www.rabbitmq.com/cluster-formation.html).
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://www.rabbitmq.com/github.html).
+
+
+## License
+
+[Licensed under the MPL](LICENSE-MPL-RabbitMQ), same as RabbitMQ server.
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_peer_discovery_aws/erlang.mk b/deps/rabbitmq_peer_discovery_aws/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema b/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema
new file mode 100644
index 0000000000..85a55404fd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/priv/schema/rabbitmq_peer_discovery_aws.schema
@@ -0,0 +1,96 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% region
+
+{mapping, "cluster_formation.aws.region", "rabbit.cluster_formation.peer_discovery_aws.aws_ec2_region", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_ec2_region",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.aws.region", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% use_private_ip
+
+{mapping, "cluster_formation.aws.use_private_ip", "rabbit.cluster_formation.peer_discovery_aws.aws_use_private_ip", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_use_private_ip",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.aws.use_private_ip", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% access_key_id
+
+{mapping, "cluster_formation.aws.access_key_id", "rabbit.cluster_formation.peer_discovery_aws.aws_access_key", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_access_key",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.aws.access_key_id", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% secret_key
+
+{mapping, "cluster_formation.aws.secret_key", "rabbit.cluster_formation.peer_discovery_aws.aws_secret_key", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_secret_key",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.aws.secret_key", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% use_autoscaling_group
+
+{mapping, "cluster_formation.aws.use_autoscaling_group", "rabbit.cluster_formation.peer_discovery_aws.aws_autoscaling", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_autoscaling",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.aws.use_autoscaling_group", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% ec2_tags
+
+{mapping, "cluster_formation.aws.instance_tags", "rabbit.cluster_formation.peer_discovery_aws.aws_ec2_tags", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "cluster_formation.aws.instance_tags.$name", "rabbit.cluster_formation.peer_discovery_aws.aws_ec2_tags", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_aws.aws_ec2_tags",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.aws.instance_tags", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("cluster_formation.aws.instance_tags", Conf),
+ [ {lists:last(K), V} || {K, V} <- Settings ]
+ end
+end}.
diff --git a/deps/rabbitmq_peer_discovery_aws/rabbitmq-components.mk b/deps/rabbitmq_peer_discovery_aws/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl b/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl
new file mode 100644
index 0000000000..a814b0ac2f
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/src/rabbit_peer_discovery_aws.erl
@@ -0,0 +1,379 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_aws).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl").
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+
+-type tags() :: map().
+-type filters() :: [{string(), string()}].
+
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+% rabbitmq/rabbitmq-peer-discovery-aws#25
+
+% Note: this timeout must not be greater than the default
+% gen_server:call timeout of 5000ms. Note that `timeout`,
+% when set, is used as the connect and then request timeout
+% by `httpc`
+-define(INSTANCE_ID_TIMEOUT, 2250).
+-define(INSTANCE_ID_URL,
+ "http://169.254.169.254/latest/meta-data/instance-id").
+
+-define(CONFIG_MODULE, rabbit_peer_discovery_config).
+-define(UTIL_MODULE, rabbit_peer_discovery_util).
+
+-define(BACKEND_CONFIG_KEY, peer_discovery_aws).
+
+-define(CONFIG_MAPPING,
+ #{
+ aws_autoscaling => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "AWS_AUTOSCALING",
+ default_value = false
+ },
+ aws_ec2_tags => #peer_discovery_config_entry_meta{
+ type = map,
+ env_variable = "AWS_EC2_TAGS",
+ default_value = #{}
+ },
+ aws_access_key => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "AWS_ACCESS_KEY_ID",
+ default_value = "undefined"
+ },
+ aws_secret_key => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "AWS_SECRET_ACCESS_KEY",
+ default_value = "undefined"
+ },
+ aws_ec2_region => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "AWS_EC2_REGION",
+ default_value = "undefined"
+ },
+ aws_use_private_ip => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "AWS_USE_PRIVATE_IP",
+ default_value = false
+ }
+ }).
+
+%%
+%% API
+%%
+
+init() ->
+ rabbit_log:debug("Peer discovery AWS: initialising..."),
+ ok = application:ensure_started(inets),
+ %% we cannot start this plugin yet since it depends on the rabbit app,
+ %% which is in the process of being started by the time this function is called
+ application:load(rabbitmq_peer_discovery_common),
+ rabbit_peer_discovery_httpc:maybe_configure_proxy(),
+ rabbit_peer_discovery_httpc:maybe_configure_inet6().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+list_nodes() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ {ok, _} = application:ensure_all_started(rabbitmq_aws),
+ rabbit_log:debug("Started rabbitmq_aws"),
+ rabbit_log:debug("Will use AWS access key of '~s'", [get_config_key(aws_access_key, M)]),
+ ok = maybe_set_region(get_config_key(aws_ec2_region, M)),
+ ok = maybe_set_credentials(get_config_key(aws_access_key, M),
+ get_config_key(aws_secret_key, M)),
+ case get_config_key(aws_autoscaling, M) of
+ true ->
+ get_autoscaling_group_node_list(instance_id(), get_tags());
+ false ->
+ get_node_list_from_tags(get_tags())
+ end.
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ %% see rabbitmq-peer-discovery-aws#17
+ true.
+
+
+-spec register() -> ok.
+register() ->
+ ok.
+
+-spec unregister() -> ok.
+unregister() ->
+ ok.
+
+-spec post_registration() -> ok | {error, Reason :: string()}.
+
+post_registration() ->
+ ok.
+
+-spec lock(Node :: atom()) -> not_supported.
+
+lock(_Node) ->
+ not_supported.
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(_Data) ->
+ ok.
+
+%%
+%% Implementation
+%%
+-spec get_config_key(Key :: atom(), Map :: #{atom() => peer_discovery_config_value()})
+ -> peer_discovery_config_value().
+
+get_config_key(Key, Map) ->
+ ?CONFIG_MODULE:get(Key, ?CONFIG_MAPPING, Map).
+
+-spec maybe_set_credentials(AccessKey :: string(),
+ SecretKey :: string()) -> ok.
+%% @private
+%% @doc Set the API credentials if they are set in configuration.
+%% @end
+%%
+maybe_set_credentials("undefined", _) -> ok;
+maybe_set_credentials(_, "undefined") -> ok;
+maybe_set_credentials(AccessKey, SecretKey) ->
+ rabbit_log:debug("Setting AWS credentials, access key: '~s'", [AccessKey]),
+ rabbitmq_aws:set_credentials(AccessKey, SecretKey).
+
+
+-spec maybe_set_region(Region :: string()) -> ok.
+%% @private
+%% @doc Set the region from the configuration value, if it was set.
+%% @end
+%%
+maybe_set_region("undefined") -> ok;
+maybe_set_region(Value) ->
+ rabbit_log:debug("Setting AWS region to ~p", [Value]),
+ rabbitmq_aws:set_region(Value).
+
+get_autoscaling_group_node_list(error, _) ->
+ rabbit_log:warning("Cannot discover any nodes: failed to fetch this node's EC2 "
+ "instance id from ~s", [?INSTANCE_ID_URL]),
+ {ok, {[], disc}};
+get_autoscaling_group_node_list(Instance, Tag) ->
+ case get_all_autoscaling_instances([]) of
+ {ok, Instances} ->
+ case find_autoscaling_group(Instances, Instance) of
+ {ok, Group} ->
+ rabbit_log:debug("Performing autoscaling group discovery, group: ~p", [Group]),
+ Values = get_autoscaling_instances(Instances, Group, []),
+ rabbit_log:debug("Performing autoscaling group discovery, found instances: ~p", [Values]),
+ case get_hostname_by_instance_ids(Values, Tag) of
+ error ->
+ Msg = "Cannot discover any nodes: DescribeInstances API call failed",
+ rabbit_log:error(Msg),
+ {error, Msg};
+ Names ->
+ rabbit_log:debug("Performing autoscaling group-based discovery, hostnames: ~p", [Names]),
+ {ok, {[?UTIL_MODULE:node_name(N) || N <- Names], disc}}
+ end;
+ error ->
+ rabbit_log:warning("Cannot discover any nodes because no AWS "
+ "autoscaling group could be found in "
+ "the instance description. Make sure that this instance"
+ " belongs to an autoscaling group.", []),
+ {ok, {[], disc}}
+ end;
+ _ ->
+ Msg = "Cannot discover any nodes because AWS autoscaling group description API call failed",
+ rabbit_log:warning(Msg),
+ {error, Msg}
+ end.
+
+get_autoscaling_instances([], _, Accum) -> Accum;
+get_autoscaling_instances([H|T], Group, Accum) ->
+ GroupName = proplists:get_value("AutoScalingGroupName", H),
+ case GroupName == Group of
+ true ->
+ Node = proplists:get_value("InstanceId", H),
+ get_autoscaling_instances(T, Group, lists:append([Node], Accum));
+ false ->
+ get_autoscaling_instances(T, Group, Accum)
+ end.
+
+get_all_autoscaling_instances(Accum) ->
+ QArgs = [{"Action", "DescribeAutoScalingInstances"}, {"Version", "2011-01-01"}],
+ fetch_all_autoscaling_instances(QArgs, Accum).
+
+get_all_autoscaling_instances(Accum, 'undefined') -> {ok, Accum};
+get_all_autoscaling_instances(Accum, NextToken) ->
+ QArgs = [{"Action", "DescribeAutoScalingInstances"}, {"Version", "2011-01-01"},
+ {"NextToken", NextToken}],
+ fetch_all_autoscaling_instances(QArgs, Accum).
+
+fetch_all_autoscaling_instances(QArgs, Accum) ->
+ Path = "/?" ++ rabbitmq_aws_urilib:build_query_string(QArgs),
+ case api_get_request("autoscaling", Path) of
+ {ok, Payload} ->
+ Instances = flatten_autoscaling_datastructure(Payload),
+ NextToken = get_next_token(Payload),
+ get_all_autoscaling_instances(lists:append(Instances, Accum), NextToken);
+ {error, Reason} = Error ->
+ rabbit_log:error("Error fetching autoscaling group instance list: ~p", [Reason]),
+ Error
+ end.
+
+flatten_autoscaling_datastructure(Value) ->
+ Response = proplists:get_value("DescribeAutoScalingInstancesResponse", Value),
+ Result = proplists:get_value("DescribeAutoScalingInstancesResult", Response),
+ Instances = proplists:get_value("AutoScalingInstances", Result),
+ [Instance || {_, Instance} <- Instances].
+
+get_next_token(Value) ->
+ Response = proplists:get_value("DescribeAutoScalingInstancesResponse", Value),
+ Result = proplists:get_value("DescribeAutoScalingInstancesResult", Response),
+ NextToken = proplists:get_value("NextToken", Result),
+ NextToken.
+
+api_get_request(Service, Path) ->
+ case rabbitmq_aws:get(Service, Path) of
+ {ok, {_Headers, Payload}} ->
+ rabbit_log:debug("AWS request: ~s~nResponse: ~p~n",
+ [Path, Payload]),
+ {ok, Payload};
+ {error, {credentials, _}} -> {error, credentials};
+ {error, Message, _} -> {error, Message}
+ end.
+
+-spec find_autoscaling_group(Instances :: list(), Instance :: string())
+ -> string() | error.
+%% @private
+%% @doc Attempt to find the Auto Scaling Group ID by finding the current
+%% instance in the list of instances returned by the autoscaling API
+%% endpoint.
+%% @end
+%%
+find_autoscaling_group([], _) -> error;
+find_autoscaling_group([H|T], Instance) ->
+ case proplists:get_value("InstanceId", H) == Instance of
+ true ->
+ {ok, proplists:get_value("AutoScalingGroupName", H)};
+ false ->
+ find_autoscaling_group(T, Instance)
+ end.
+
+get_hostname_by_instance_ids(Instances, Tag) ->
+ QArgs = build_instance_list_qargs(Instances,
+ [{"Action", "DescribeInstances"},
+ {"Version", "2015-10-01"}]),
+ QArgs2 = lists:keysort(1, maybe_add_tag_filters(Tag, QArgs, 1)),
+ Path = "/?" ++ rabbitmq_aws_urilib:build_query_string(QArgs2),
+ get_hostname_names(Path).
+
+-spec build_instance_list_qargs(Instances :: list(), Accum :: list()) -> list().
+%% @private
+%% @doc Build the Query args for filtering instances by InstanceID.
+%% @end
+%%
+build_instance_list_qargs([], Accum) -> Accum;
+build_instance_list_qargs([H|T], Accum) ->
+ Key = "InstanceId." ++ integer_to_list(length(Accum) + 1),
+ build_instance_list_qargs(T, lists:append([{Key, H}], Accum)).
+
+-spec maybe_add_tag_filters(tags(), filters(), integer()) -> filters().
+maybe_add_tag_filters(Tags, QArgs, Num) ->
+ {Filters, _} =
+ maps:fold(fun(Key, Value, {AccQ, AccN}) ->
+ {lists:append(
+ [{"Filter." ++ integer_to_list(AccN) ++ ".Name", "tag:" ++ Key},
+ {"Filter." ++ integer_to_list(AccN) ++ ".Value.1", Value}],
+ AccQ),
+ AccN + 1}
+ end, {QArgs, Num}, Tags),
+ Filters.
+
+-spec get_node_list_from_tags(tags()) -> {ok, {[node()], disc}}.
+get_node_list_from_tags([]) ->
+ rabbit_log:warning("Cannot discover any nodes because AWS tags are not configured!", []),
+ {ok, {[], disc}};
+get_node_list_from_tags(Tags) ->
+ {ok, {[?UTIL_MODULE:node_name(N) || N <- get_hostname_by_tags(Tags)], disc}}.
+
+get_hostname_name_from_reservation_set([], Accum) -> Accum;
+get_hostname_name_from_reservation_set([{"item", RI}|T], Accum) ->
+ InstancesSet = proplists:get_value("instancesSet", RI),
+ Items = [Item || {"item", Item} <- InstancesSet],
+ HostnameKey = select_hostname(),
+ Hostnames = [Hostname || Item <- Items,
+ {HKey, Hostname} <- Item,
+ HKey == HostnameKey,
+ Hostname =/= ""],
+ get_hostname_name_from_reservation_set(T, Accum ++ Hostnames).
+
+get_hostname_names(Path) ->
+ case api_get_request("ec2", Path) of
+ {ok, Payload} ->
+ Response = proplists:get_value("DescribeInstancesResponse", Payload),
+ ReservationSet = proplists:get_value("reservationSet", Response),
+ get_hostname_name_from_reservation_set(ReservationSet, []);
+ {error, Reason} ->
+ rabbit_log:error("Error fetching node list via EC2 API, request path: ~s, error: ~p", [Path, Reason]),
+ error
+ end.
+
+get_hostname_by_tags(Tags) ->
+ QArgs = [{"Action", "DescribeInstances"}, {"Version", "2015-10-01"}],
+ QArgs2 = lists:keysort(1, maybe_add_tag_filters(Tags, QArgs, 1)),
+ Path = "/?" ++ rabbitmq_aws_urilib:build_query_string(QArgs2),
+ case get_hostname_names(Path) of
+ error ->
+ rabbit_log:warning("Cannot discover any nodes because AWS "
+ "instance description with tags ~p failed", [Tags]),
+ [];
+ Names ->
+ Names
+ end.
+
+-spec select_hostname() -> string().
+select_hostname() ->
+ case get_config_key(aws_use_private_ip, ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY)) of
+ true -> "privateIpAddress";
+ false -> "privateDnsName";
+ _ -> "privateDnsName"
+ end.
+
+-spec instance_id() -> string() | error.
+%% @private
+%% @doc Return the local instance ID from the EC2 metadata service
+%% @end
+%%
+instance_id() ->
+ case httpc:request(get, {?INSTANCE_ID_URL, []},
+ [{timeout, ?INSTANCE_ID_TIMEOUT}], []) of
+ {ok, {{_, 200, _}, _, Value}} ->
+ rabbit_log:debug("Fetched EC2 instance ID from ~p: ~p",
+ [?INSTANCE_ID_URL, Value]),
+ Value;
+ Other ->
+ rabbit_log:error("Failed to fetch EC2 instance ID from ~p: ~p",
+ [?INSTANCE_ID_URL, Other]),
+ error
+ end.
+
+-spec get_tags() -> tags().
+get_tags() ->
+ Tags = get_config_key(aws_ec2_tags, ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY)),
+ case Tags of
+ Value when is_list(Value) ->
+ maps:from_list(Value);
+ _ -> Tags
+ end.
diff --git a/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl b/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl
new file mode 100644
index 0000000000..f60667dfd3
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/src/rabbitmq_peer_discovery_aws.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module exists as an alias for rabbit_peer_discovery_aws.
+%% Some users assume that the discovery module is the same as plugin
+%% name. This module tries to fill the naming gap between module and plugin names.
+-module(rabbitmq_peer_discovery_aws).
+-behaviour(rabbit_peer_discovery_backend).
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+
+-define(DELEGATE, rabbit_peer_discovery_aws).
+
+%%
+%% API
+%%
+
+init() ->
+ ?DELEGATE:init().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+list_nodes() ->
+ ?DELEGATE:list_nodes().
+
+-spec supports_registration() -> boolean().
+supports_registration() ->
+ ?DELEGATE:supports_registration().
+
+
+-spec register() -> ok.
+register() ->
+ ?DELEGATE:register().
+
+-spec unregister() -> ok.
+unregister() ->
+ ?DELEGATE:unregister().
+
+-spec post_registration() -> ok | {error, Reason :: string()}.
+post_registration() ->
+ ?DELEGATE:post_registration().
+
+-spec lock(Node :: atom()) -> not_supported.
+lock(Node) ->
+ ?DELEGATE:lock(Node).
+
+-spec unlock(Data :: term()) -> ok.
+unlock(Data) ->
+ ?DELEGATE:unlock(Data).
diff --git a/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..87a6edded7
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_peer_discovery_aws, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets
new file mode 100644
index 0000000000..d1fa86ae00
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/test/config_schema_SUITE_data/rabbitmq_peer_discovery_aws.snippets
@@ -0,0 +1,95 @@
+[
+ %% discovery mechanism
+ {aws_ec2_discovery_mechanism_as_module,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_aws
+ cluster_formation.aws.region = us-west-1", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_aws},
+ {peer_discovery_aws, [
+ {aws_ec2_region, "us-west-1"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+ {aws_ec2_discovery_mechanism_as_alias,
+ "cluster_formation.peer_discovery_backend = aws
+ cluster_formation.aws.region = us-west-1", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_aws},
+ {peer_discovery_aws, [
+ {aws_ec2_region, "us-west-1"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+
+ %% region
+ {aws_ec2_region, "cluster_formation.aws.region = us-west-1", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_aws, [
+ {aws_ec2_region, "us-west-1"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+ {aws_use_private_ip, "cluster_formation.aws.use_private_ip = true", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_aws, [
+ {aws_use_private_ip, true}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+ {aws_access_key_id, "cluster_formation.aws.access_key_id = AKIAIOSFODNN7EXAMPLE", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_aws, [
+ {aws_access_key, "AKIAIOSFODNN7EXAMPLE"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+ {aws_secret_key, "cluster_formation.aws.secret_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_aws, [
+ {aws_secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+ {aws_use_autoscaling_group, "cluster_formation.aws.use_autoscaling_group = true", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_aws, [
+ {aws_autoscaling, true}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ },
+ {aws_ec2_tags,
+ "cluster_formation.aws.instance_tags.tag1 = value1
+ cluster_formation.aws.instance_tags.tag2 = value2
+ cluster_formation.aws.instance_tags.tag3 = value3",
+ [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_aws, [
+ {aws_ec2_tags, [{"tag1", "value1"}, {"tag2", "value2"}, {"tag3", "value3"}]}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_aws]
+ }
+].
diff --git a/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl b/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl
new file mode 100644
index 0000000000..e574f3c00c
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/test/integration_SUITE.erl
@@ -0,0 +1,190 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(integration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([all/0,
+ suite/0,
+ groups/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_group/2, end_per_group/2,
+ init_per_testcase/2, end_per_testcase/2,
+
+ cluster_was_formed/1
+ ]).
+
+all() ->
+ [
+ {group, using_tags},
+ {group, using_autoscaling_group}
+ ].
+
+suite() ->
+ [
+ {timetrap, {hours, 1}}
+ ].
+
+groups() ->
+ [
+ {using_tags, [parallel], [cluster_was_formed]},
+ {using_autoscaling_group, [parallel], [cluster_was_formed]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [
+ {terraform_files_suffix, rabbit_ct_helpers:random_term_checksum()},
+ {terraform_aws_ec2_region, "eu-west-1"},
+ {rmq_nodes_clustered, false}
+ ]),
+ Config2 = init_aws_credentials(Config1),
+ rabbit_ct_helpers:run_setup_steps(Config2).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(using_tags, Config) ->
+ TfConfigDir = rabbit_ct_vm_helpers:aws_autoscaling_group_module(Config),
+ AccessKeyId = ?config(aws_access_key_id, Config),
+ SecretAccessKey = ?config(aws_secret_access_key, Config),
+ Suffix = ?config(terraform_files_suffix, Config),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, {terraform_config_dir, TfConfigDir}),
+ rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend, rabbit_peer_discovery_aws},
+ {peer_discovery_aws,
+ [
+ {aws_ec2_region, ?config(terraform_aws_ec2_region, Config)},
+ {aws_access_key, AccessKeyId},
+ {aws_secret_key, SecretAccessKey},
+ {aws_ec2_tags, [{"rabbitmq-testing-suffix", Suffix}]}
+ ]}]}]});
+init_per_group(using_autoscaling_group, Config) ->
+ TfConfigDir = rabbit_ct_vm_helpers:aws_autoscaling_group_module(Config),
+ AccessKeyId = ?config(aws_access_key_id, Config),
+ SecretAccessKey = ?config(aws_secret_access_key, Config),
+ Config1 = rabbit_ct_helpers:set_config(
+ Config, {terraform_config_dir, TfConfigDir}),
+ rabbit_ct_helpers:merge_app_env(
+ Config1,
+ {rabbit,
+ [{cluster_formation,
+ [{peer_discovery_backend, rabbit_peer_discovery_aws},
+ {peer_discovery_aws,
+ [
+ {aws_ec2_region, ?config(terraform_aws_ec2_region, Config)},
+ {aws_access_key, AccessKeyId},
+ {aws_secret_key, SecretAccessKey},
+ {aws_autoscaling, true}
+ ]}]}]}).
+
+end_per_group(_Group, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ InstanceName = rabbit_ct_helpers:testcase_absname(Config, Testcase),
+ InstanceCount = 2,
+ ClusterSize = InstanceCount,
+ Config1 = rabbit_ct_helpers:set_config(
+ Config,
+ [{terraform_instance_name, InstanceName},
+ {terraform_instance_count, InstanceCount},
+ {rmq_nodename_suffix, Testcase},
+ {rmq_nodes_count, ClusterSize}]),
+ rabbit_ct_helpers:run_steps(
+ Config1,
+ [fun rabbit_ct_broker_helpers:run_make_dist/1] ++
+ rabbit_ct_vm_helpers:setup_steps() ++
+ rabbit_ct_broker_helpers:setup_steps_for_vms()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(
+ Config,
+ rabbit_ct_broker_helpers:teardown_steps_for_vms() ++
+ rabbit_ct_vm_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+init_aws_credentials(Config) ->
+ AccessKeyId = get_env_var_or_awscli_config_key(
+ "AWS_ACCESS_KEY_ID", "aws_access_key_id"),
+ SecretAccessKey = get_env_var_or_awscli_config_key(
+ "AWS_SECRET_ACCESS_KEY", "aws_secret_access_key"),
+ rabbit_ct_helpers:set_config(
+ Config,
+ [
+ {aws_access_key_id, AccessKeyId},
+ {aws_secret_access_key, SecretAccessKey}
+ ]).
+
+get_env_var_or_awscli_config_key(EnvVar, AwscliKey) ->
+ case os:getenv(EnvVar) of
+ false -> get_awscli_config_key(AwscliKey);
+ Value -> Value
+ end.
+
+get_awscli_config_key(AwscliKey) ->
+ AwscliConfig = read_awscli_config(),
+ maps:get(AwscliKey, AwscliConfig, undefined).
+
+read_awscli_config() ->
+ Filename = filename:join([os:getenv("HOME"), ".aws", "credentials"]),
+ case filelib:is_regular(Filename) of
+ true -> read_awscli_config(Filename);
+ false -> #{}
+ end.
+
+read_awscli_config(Filename) ->
+ {ok, Content} = file:read_file(Filename),
+ Lines = string:tokens(binary_to_list(Content), "\n"),
+ read_awscli_config(Lines, #{}).
+
+read_awscli_config([Line | Rest], AwscliConfig) ->
+ Line1 = string:strip(Line),
+ case Line1 of
+ [$# | _] ->
+ read_awscli_config(Rest, AwscliConfig);
+ [$[ | _] ->
+ read_awscli_config(Rest, AwscliConfig);
+ _ ->
+ [Key, Value] = string:tokens(Line1, "="),
+ Key1 = string:strip(Key),
+ Value1 = string:strip(Value),
+ read_awscli_config(Rest, AwscliConfig#{Key1 => Value1})
+ end;
+read_awscli_config([], AwscliConfig) ->
+ AwscliConfig.
+
+%% -------------------------------------------------------------------
+%% Run arbitrary code.
+%% -------------------------------------------------------------------
+
+cluster_was_formed(Config) ->
+ CTPeers = rabbit_ct_vm_helpers:get_ct_peers(Config),
+ ?assertEqual(lists:duplicate(length(CTPeers), false),
+ [rabbit:is_running(CTPeer) || CTPeer <- CTPeers]),
+ RabbitMQNodes = lists:sort(
+ rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename)),
+ ?assertEqual(lists:duplicate(length(RabbitMQNodes), true),
+ [rabbit:is_running(Node) || Node <- RabbitMQNodes]),
+
+ ?assertEqual(lists:duplicate(length(RabbitMQNodes), true),
+ rabbit_ct_broker_helpers:rpc_all(
+ Config, rabbit_mnesia, is_clustered, [])),
+ ClusteredNodes = lists:sort(
+ rabbit_ct_broker_helpers:rpc(
+ Config, 0, rabbit_mnesia, cluster_nodes, [running])),
+ ?assertEqual(ClusteredNodes, RabbitMQNodes).
diff --git a/deps/rabbitmq_peer_discovery_aws/test/rabbitmq_peer_discovery_aws_SUITE.erl b/deps/rabbitmq_peer_discovery_aws/test/rabbitmq_peer_discovery_aws_SUITE.erl
new file mode 100644
index 0000000000..3894e99d34
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_aws/test/rabbitmq_peer_discovery_aws_SUITE.erl
@@ -0,0 +1,116 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_aws_SUITE).
+
+-compile(export_all).
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+all() ->
+ [
+ {group, unit}
+ ].
+
+groups() ->
+ [
+ {unit, [], [
+ maybe_add_tag_filters,
+ get_hostname_name_from_reservation_set,
+ registration_support
+ ]}].
+
+%%%
+%%% Testcases
+%%%
+
+maybe_add_tag_filters(_Config) ->
+ Tags = maps:from_list([{"region", "us-west-2"}, {"service", "rabbitmq"}]),
+ Expectation = lists:sort(
+ [{"Filter.2.Name", "tag:service"},
+ {"Filter.2.Value.1", "rabbitmq"},
+ {"Filter.1.Name", "tag:region"},
+ {"Filter.1.Value.1", "us-west-2"}]),
+ Result = lists:sort(rabbit_peer_discovery_aws:maybe_add_tag_filters(Tags, [], 1)),
+ ?assertEqual(Expectation, Result).
+
+get_hostname_name_from_reservation_set(_Config) ->
+ {
+ foreach,
+ fun on_start/0,
+ fun on_finish/1,
+ [{"from private DNS",
+ fun() ->
+ Expectation = ["ip-10-0-16-31.eu-west-1.compute.internal",
+ "ip-10-0-16-29.eu-west-1.compute.internal"],
+ ?assertEqual(Expectation,
+ rabbit_peer_discovery_aws:get_hostname_name_from_reservation_set(
+ reservation_set(), []))
+ end},
+ {"from private IP",
+ fun() ->
+ os:putenv("AWS_USE_PRIVATE_IP", "true"),
+ Expectation = ["10.0.16.31", "10.0.16.29"],
+ ?assertEqual(Expectation,
+ rabbit_peer_discovery_aws:get_hostname_name_from_reservation_set(
+ reservation_set(), []))
+ end}]
+ }.
+
+registration_support(_Config) ->
+ ?assertEqual(rabbit_peer_discovery_aws:supports_registration(), true).
+
+%%%
+%%% Implementation
+%%%
+
+on_start() ->
+ reset().
+
+on_finish(_Config) ->
+ reset().
+
+reset() ->
+ application:unset_env(rabbit, cluster_formation),
+ os:unsetenv("AWS_USE_PRIVATE_IP").
+
+reservation_set() ->
+ [{"item", [{"reservationId","r-006cfdbf8d04c5f01"},
+ {"ownerId","248536293561"},
+ {"groupSet",[]},
+ {"instancesSet",
+ [{"item",
+ [{"instanceId","i-0c6d048641f09cad2"},
+ {"imageId","ami-ef4c7989"},
+ {"instanceState",
+ [{"code","16"},{"name","running"}]},
+ {"privateDnsName",
+ "ip-10-0-16-29.eu-west-1.compute.internal"},
+ {"dnsName",[]},
+ {"instanceType","c4.large"},
+ {"launchTime","2017-04-07T12:05:10"},
+ {"subnetId","subnet-61ff660"},
+ {"vpcId","vpc-4fe1562b"},
+ {"privateIpAddress","10.0.16.29"}]}]}]},
+ {"item", [{"reservationId","r-006cfdbf8d04c5f01"},
+ {"ownerId","248536293561"},
+ {"groupSet",[]},
+ {"instancesSet",
+ [{"item",
+ [{"instanceId","i-1c6d048641f09cad2"},
+ {"imageId","ami-af4c7989"},
+ {"instanceState",
+ [{"code","16"},{"name","running"}]},
+ {"privateDnsName",
+ "ip-10-0-16-31.eu-west-1.compute.internal"},
+ {"dnsName",[]},
+ {"instanceType","c4.large"},
+ {"launchTime","2017-04-07T12:05:10"},
+ {"subnetId","subnet-61ff660"},
+ {"vpcId","vpc-4fe1562b"},
+ {"privateIpAddress","10.0.16.31"}]}]}]}].
diff --git a/deps/rabbitmq_peer_discovery_common/.gitignore b/deps/rabbitmq_peer_discovery_common/.gitignore
new file mode 100644
index 0000000000..11537ad822
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/.gitignore
@@ -0,0 +1,25 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/config_schema_SUITE_data/schema/
+/test/ct.cover.spec
+/xrefr
+
+/rabbitmq_peer_discovery_common.d
diff --git a/deps/rabbitmq_peer_discovery_common/.travis.yml b/deps/rabbitmq_peer_discovery_common/.travis.yml
new file mode 100644
index 0000000000..65d0389c21
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: GSpypPCRiJGa9bSlSseoQtnrSWqER9LP6L42204omhPVIXXXg0Yksdfjh5iDropylZBO86A1XBQcWPgb7kFZFNDZfKe6qVefuoTMKITAUW7qxihhFWJTqL0Y3Hbsadtc5OQ69Uu/nKL2V820pQ93ksyJApBRVoWLb66uxzkyvbY2bjEL2qpnnKjh232IAgI+wiHSo95lrg+zx/GMNafdU4IsGMJ7IgLK2frKynYM7Qg2iVkQw7+//rnZXhtIGY5LtMtlF2sgGkFmcAg8H/cIhI2cCuam3FLnDZp1eWoYBs12Y7Bsthik2culpHaV21mzankyO3KedBNKUwNhDVz02wzBmbakxBUTxLHi91x2t5GLrB8qk5deBmSaMuqq1XgO2qVWi1/oJ9FKiaRgJ2q9T7ZhgPmwj9WsAhrMuCTWbECD+Z/xEJL13OGTfpD9gcQ/emZWLmbinf1WGRWFMnin1QedLtzyXChItXdnbVdyBENm0XaHEj5i2SPpin4xw3P9f7GS/nh9UP2qaTp18Vtwj/Ub4MyXtWz/rR0E8YjDf7llGKl0yiG/0IqR3dyJ0KKq2MU9qbosSYHZOoygFtKLKfKpCNW6QXTfTbylsaw45mnQwwhor1+9VgpyysDET8B1XJAkgjMkbS+cKvLwcxJxy6UyqZA27P2pIuZOWfqCCPo=
+ - secure: DFzxIJaJJVWc4h5baROCRS6+graLmXjPVXGflx/eTWXhWHMvcSPnRCNEbIGhwbmX1OE+J9SogOjAi6Ga/9l0x5K/n5pQqsazc8gbmXXLcExmskzS/k0lfHKD0rCblTv64XOiYIGkQzGzZfOL8MeDROmqOdQNrIv7JT9Jxsdg9ycivfdb86hLWonyyUQTJbDTKzfq97JDKFFiuA4g3xN0UMrX+/R3qx9ajoKrPmf5UTrFSfSWvCPB02Edj5of9gnIYeCusy/DgBhSNygMQ50A0RqrBRhXjwJQNvJKANYGyk7FzSDBGH0nPZYjOVH61LNjyz/cVqdUddkUOGEOGTcxObJwUV7WfxH75AHCsrtD4OZZT95Z7kxLT4MGhDTqQWGI3HHNQxhuo3shfzvU2g8mXlGyilly2Gfygsau+de9CAmUUbtzTJ8nXdxz54DB8rrhGDA45yjt2trgKEQwPP+9gcOm/4rpJb/AOb5eHttoyroKmXL7CqhAd2y1+U0pR4oyX3f4IJyQG794vPv83K+yaUTc8mzRjP3hVJnt9ugJXVhQw4vsOnOeQ7eYuhNBNLTbk3eVEK10FF9C8JcUVz/Ij/viTHjT+RcQXdukBE+p0cDCyz3wRbF5X69ABl3i5u0TxjkCz5E3CmZ4YfLSPv2dDweqSwFvBjATe6wD04PB74Y=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_peer_discovery_common/CODE_OF_CONDUCT.md b/deps/rabbitmq_peer_discovery_common/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_peer_discovery_common/CONTRIBUTING.md b/deps/rabbitmq_peer_discovery_common/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_peer_discovery_common/LICENSE b/deps/rabbitmq_peer_discovery_common/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_peer_discovery_common/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_peer_discovery_common/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_peer_discovery_common/Makefile b/deps/rabbitmq_peer_discovery_common/Makefile
new file mode 100644
index 0000000000..c1d3dcaffa
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/Makefile
@@ -0,0 +1,20 @@
+PROJECT = rabbitmq_peer_discovery_common
+PROJECT_DESCRIPTION = Modules shared by various peer discovery backends
+PROJECT_MOD = rabbit_peer_discovery_common_app
+
+LOCAL_DEPS = inets
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers ct_helper
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_peer_discovery_common/README.md b/deps/rabbitmq_peer_discovery_common/README.md
new file mode 100644
index 0000000000..74958a27e8
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/README.md
@@ -0,0 +1,27 @@
+# RabbitMQ Peer Discovery Commons
+
+This library is similar in purpose to [rabbit-common](https://github.com/rabbitmq/rabbitmq-common) but focusses exclusively
+on [RabbitMQ peer discovery backends](https://www.rabbitmq.com/cluster-formation.html) (available as [a plugin](https://github.com/rabbitmq/rabbitmq-autocluster) for 3.6.x releases).
+
+It is not supposed to be enabled, disabled or used directly by the users, only as a
+dependency of other plugins.
+
+
+## Project Maturity
+
+This library [shipped with RabbitMQ 3.7.0](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.7.0).
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://www.rabbitmq.com/github.html).
+
+
+## License
+
+[Licensed under the MPL](LICENSE-MPL-RabbitMQ), same as RabbitMQ server.
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_peer_discovery_common/erlang.mk b/deps/rabbitmq_peer_discovery_common/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl b/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl
new file mode 100644
index 0000000000..14aa6ad3b8
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl
@@ -0,0 +1,26 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+% rabbitmq/rabbitmq-peer-discovery-aws#25
+% Note: this timeout must not be greater than the default
+% gen_server:call timeout of 5000ms. This `timeout`,
+% when set, is used as the connect and then request timeout
+% by `httpc`
+-define(DEFAULT_HTTP_TIMEOUT, 2250).
+
+-type peer_discovery_config_value() :: atom() | integer() | string() | undefined.
+
+-record(peer_discovery_config_entry_meta,
+ {env_variable :: string(),
+ default_value :: peer_discovery_config_value(),
+ type :: atom()}).
+
+-type(peer_discovery_config_entry_meta() :: #peer_discovery_config_entry_meta{
+ type :: atom(),
+ env_variable :: string(),
+ default_value :: peer_discovery_config_value()
+ }).
diff --git a/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema b/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema
new file mode 100644
index 0000000000..59936e02b0
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/priv/schema/rabbitmq_peer_discovery_common.schema
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% interval
+
+{mapping, "cluster_formation.node_cleanup.interval", "rabbit.cluster_formation.node_cleanup.cleanup_interval", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbit.cluster_formation.node_cleanup.cleanup_interval",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.node_cleanup.interval", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% only log warnings?
+
+{mapping, "cluster_formation.node_cleanup.only_log_warning", "rabbit.cluster_formation.node_cleanup.cleanup_only_log_warning", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.node_cleanup.cleanup_only_log_warning",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.node_cleanup.only_log_warning", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% HTTP proxy host
+
+{mapping, "cluster_formation.proxy.http_proxy", "rabbit.cluster_formation.proxy.http_proxy", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.proxy.http_proxy",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.proxy.http_proxy", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% HTTPS proxy host
+
+{mapping, "cluster_formation.proxy.https_proxy", "rabbit.cluster_formation.proxy.https_proxy", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.proxy.https_proxy",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.proxy.https_proxy", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Proxy exclusion list
+
+{mapping, "cluster_formation.proxy.proxy_exclusions.$name", "rabbit.cluster_formation.proxy.proxy_exclusions",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.proxy.proxy_exclusions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("cluster_formation.proxy.proxy_exclusions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
diff --git a/deps/rabbitmq_peer_discovery_common/rabbitmq-components.mk b/deps/rabbitmq_peer_discovery_common/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl
new file mode 100644
index 0000000000..9adc972b14
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_cleanup.erl
@@ -0,0 +1,298 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_peer_discovery_cleanup).
+
+-behaviour(gen_server).
+
+-include("include/rabbit_peer_discovery.hrl").
+
+-export([start_link/0,
+ check_cluster/0]).
+
+%% gen_server callbacks
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-define(CONFIG_MODULE, rabbit_peer_discovery_config).
+-define(CONFIG_KEY, node_cleanup).
+
+-define(CONFIG_MAPPING,
+ #{
+ cleanup_interval => #peer_discovery_config_entry_meta{
+ type = integer,
+ env_variable = "CLEANUP_INTERVAL",
+ default_value = 60
+ },
+ cleanup_only_log_warning => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "CLEANUP_ONLY_LOG_WARNING",
+ default_value = true
+ }
+ }).
+
+-record(state, {interval, warn_only, timer}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+-spec(start_link() ->
+ {ok, Pid :: pid()} | ignore | {error, Reason :: term()}).
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+-spec(check_cluster() ->ok).
+check_cluster() ->
+ ok = gen_server:call(?MODULE, check_cluster).
+
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Initializes the server
+%%
+%% @spec init(Args) -> {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%% @end
+%%--------------------------------------------------------------------
+-spec(init(Args :: term()) ->
+ {ok, State :: #state{}} |
+ {ok, State :: #state{}, timeout() | hibernate} |
+ {stop, Reason :: term()} | ignore).
+init([]) ->
+ Map = ?CONFIG_MODULE:config_map(?CONFIG_KEY),
+ case map_size(Map) of
+ 0 ->
+ rabbit_log:info("Peer discovery: node cleanup is disabled", []),
+ {ok, #state{}};
+ _ ->
+ Interval = ?CONFIG_MODULE:get(cleanup_interval, ?CONFIG_MAPPING, Map),
+ WarnOnly = ?CONFIG_MODULE:get(cleanup_only_log_warning, ?CONFIG_MAPPING, Map),
+ State = #state{interval = Interval,
+ warn_only = WarnOnly,
+ timer = apply_interval(Interval)},
+ WarnMsg = case WarnOnly of
+ true -> "will only log warnings";
+ false -> "will remove nodes not known to the discovery backend"
+ end,
+ rabbit_log:info("Peer discovery: enabling node cleanup (~s). Check interval: ~p seconds.",
+ [WarnMsg, State#state.interval]),
+ {ok, State}
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling call messages
+%%
+%% @end
+%%--------------------------------------------------------------------
+-spec(handle_call(Request :: term(), From :: {pid(), Tag :: term()},
+ State :: #state{}) ->
+ {reply, Reply :: term(), NewState :: #state{}} |
+ {reply, Reply :: term(), NewState :: #state{}, timeout() | hibernate} |
+ {noreply, NewState :: #state{}} |
+ {noreply, NewState :: #state{}, timeout() | hibernate} |
+ {stop, Reason :: term(), Reply :: term(), NewState :: #state{}} |
+ {stop, Reason :: term(), NewState :: #state{}}).
+
+handle_call(check_cluster, _From, State) ->
+ rabbit_log:debug("Peer discovery: checking for partitioned nodes to clean up."),
+ maybe_cleanup(State),
+ {reply, ok, State};
+handle_call(_Request, _From, State) ->
+ {reply, ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling cast messages
+%%
+%% @end
+%%--------------------------------------------------------------------
+-spec(handle_cast(Request :: term(), State :: #state{}) ->
+ {noreply, NewState :: #state{}} |
+ {noreply, NewState :: #state{}, timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: #state{}}).
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Handling all non call/cast messages
+%%
+%% @spec handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% @end
+%%--------------------------------------------------------------------
+-spec(handle_info(Info :: timeout() | term(), State :: #state{}) ->
+ {noreply, NewState :: #state{}} |
+ {noreply, NewState :: #state{}, timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: #state{}}).
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called by a gen_server when it is about to
+%% terminate. It should be the opposite of Module:init/1 and do any
+%% necessary cleaning up. When it returns, the gen_server terminates
+%% with Reason. The return value is ignored.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+-spec(terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
+ State :: #state{}) -> term()).
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+-spec(code_change(OldVsn :: term() | {down, term()}, State :: #state{},
+ Extra :: term()) ->
+ {ok, NewState :: #state{}} | {error, Reason :: term()}).
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Create the timer that will invoke a gen_server cast for this
+%% module invoking maybe_cleanup/1 every N seconds.
+%% @spec apply_interval(integer()) -> timer:tref()
+%% @end
+%%--------------------------------------------------------------------
+-spec apply_interval(integer()) -> timer:tref().
+apply_interval(Seconds) ->
+ {ok, TRef} = timer:apply_interval(Seconds * 1000, ?MODULE,
+ check_cluster, []),
+ TRef.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Fetch the list of nodes from service discovery and all of the
+%% partitioned nodes in RabbitMQ, removing any node from the
+%% partitioned list that exists in the service discovery list.
+%% @spec maybe_cleanup(State :: #state{}) -> NewState :: #state{}
+%% @end
+%%--------------------------------------------------------------------
+-spec maybe_cleanup(State :: #state{}) -> ok.
+maybe_cleanup(State) ->
+ maybe_cleanup(State, unreachable_nodes()).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Fetch the list of nodes from service discovery and all of the
+%% unreachable nodes in RabbitMQ, removing any node from the
+%% unreachable list that exists in the service discovery list.
+%% @spec maybe_cleanup(State :: #state{},
+%% UnreachableNodes :: [node()]) -> ok
+%% @end
+%%--------------------------------------------------------------------
+-spec maybe_cleanup(State :: #state{},
+ UnreachableNodes :: [node()]) -> ok.
+maybe_cleanup(_, []) ->
+ rabbit_log:debug("Peer discovery: all known cluster nodes are up.");
+maybe_cleanup(State, UnreachableNodes) ->
+ rabbit_log:debug("Peer discovery: cleanup discovered unreachable nodes: ~p",
+ [UnreachableNodes]),
+ case lists:subtract(UnreachableNodes, service_discovery_nodes()) of
+ [] ->
+ rabbit_log:debug("Peer discovery: all unreachable nodes are still "
+ "registered with the discovery backend ~p",
+ [rabbit_peer_discovery:backend()]),
+ ok;
+ Nodes ->
+ rabbit_log:debug("Peer discovery: unreachable nodes are not registered "
+ "with the discovery backend ~p", [Nodes]),
+ maybe_remove_nodes(Nodes, State#state.warn_only)
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Iterate over the list of partitioned nodes, either logging the
+%% node that would be removed or actually removing it.
+%% @spec maybe_remove_nodes(PartitionedNodes :: [node()],
+%% WarnOnly :: true | false) -> ok
+%% @end
+%%--------------------------------------------------------------------
+-spec maybe_remove_nodes(PartitionedNodes :: [node()],
+ WarnOnly :: true | false) -> ok.
+maybe_remove_nodes([], _) -> ok;
+maybe_remove_nodes([Node | Nodes], true) ->
+ rabbit_log:warning("Peer discovery: node ~s is unreachable", [Node]),
+ maybe_remove_nodes(Nodes, true);
+maybe_remove_nodes([Node | Nodes], false) ->
+ rabbit_log:warning("Peer discovery: removing unknown node ~s from the cluster", [Node]),
+ rabbit_mnesia:forget_cluster_node(Node, false),
+ maybe_remove_nodes(Nodes, false).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Return nodes in the RabbitMQ cluster that are unhealthy.
+%% @spec unreachable_nodes() -> [node()]
+%% @end
+%%--------------------------------------------------------------------
+-spec unreachable_nodes() -> [node()].
+unreachable_nodes() ->
+ Status = rabbit_mnesia:status(),
+ Nodes = proplists:get_value(nodes, Status, []),
+ Running = proplists:get_value(running_nodes, Status, []),
+ All = lists:merge(proplists:get_value(disc, Nodes, []),
+ proplists:get_value(ram, Nodes, [])),
+ lists:subtract(All, Running).
+
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Return the nodes that the service discovery backend knows about
+%% @spec service_discovery_nodes() -> [node()]
+%% @end
+%%--------------------------------------------------------------------
+-spec service_discovery_nodes() -> [node()].
+service_discovery_nodes() ->
+ Module = rabbit_peer_discovery:backend(),
+ case rabbit_peer_discovery:normalize(Module:list_nodes()) of
+ {ok, {Nodes, _Type}} ->
+ rabbit_log:debug("Peer discovery cleanup: ~p returned ~p",
+ [Module, Nodes]),
+ Nodes;
+ {error, Reason} ->
+ rabbit_log:debug("Peer discovery cleanup: ~p returned error ~p",
+ [Module, Reason]),
+ []
+ end.
diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl
new file mode 100644
index 0000000000..ee9cc02b7a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_app.erl
@@ -0,0 +1,19 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_common_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_peer_discovery_httpc:maybe_configure_proxy(),
+ rabbit_peer_discovery_httpc:maybe_configure_inet6(),
+ rabbit_peer_discovery_common_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl
new file mode 100644
index 0000000000..8166be5013
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_common_sup.erl
@@ -0,0 +1,24 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_common_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([init/1]).
+-export([start_link/0]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Cleanup = {rabbit_peer_discovery_cleanup,
+ {rabbit_peer_discovery_cleanup, start_link, []},
+ permanent, ?WORKER_WAIT, worker, [rabbit_peer_discovery_cleanup]},
+ {ok, {{one_for_one, 10, 10}, [Cleanup]}}.
diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl
new file mode 100644
index 0000000000..e5e14aa67f
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_config.erl
@@ -0,0 +1,131 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_config).
+
+-include("rabbit_peer_discovery.hrl").
+
+-export([get/3, get_integer/3, config_map/1]).
+
+%%
+%% API
+%%
+
+-spec get(Key :: atom(),
+ Mapping :: #{atom() => peer_discovery_config_entry_meta()},
+ Config :: #{atom() => peer_discovery_config_value()}) -> peer_discovery_config_value().
+
+get(Key, Mapping, Config) ->
+ case maps:is_key(Key, Mapping) of
+ false ->
+ rabbit_log:error("Key ~s is not found in peer discovery config mapping ~p!", [Key, Mapping]),
+ throw({badkey, Key});
+ true ->
+ get_with_entry_meta(Key, maps:get(Key, Mapping), Config)
+ end.
+
+-spec get_integer(Key :: atom(),
+ Mapping :: #{atom() => peer_discovery_config_entry_meta()},
+ Config :: #{atom() => peer_discovery_config_value()}) -> integer().
+
+get_integer(Key, Mapping, Config) ->
+ case maps:is_key(Key, Mapping) of
+ false ->
+ rabbit_log:error("Key ~s is not found in peer discovery config mapping ~p!", [Key, Mapping]),
+ throw({badkey, Key});
+ true ->
+ get_integer_with_entry_meta(Key, maps:get(Key, Mapping), Config)
+ end.
+
+-spec config_map(atom()) -> #{atom() => peer_discovery_config_value()}.
+
+config_map(BackendConfigKey) ->
+ case application:get_env(rabbit, cluster_formation) of
+ undefined -> #{};
+ {ok, ClusterFormation} ->
+ case proplists:get_value(BackendConfigKey, ClusterFormation) of
+ undefined -> #{};
+ Proplist -> maps:from_list(Proplist)
+ end
+ end.
+
+%%
+%% Implementation
+%%
+
+-spec get_with_entry_meta(Key :: atom(),
+ EntryMeta :: #peer_discovery_config_entry_meta{},
+ Map :: #{atom() => peer_discovery_config_value()}) -> peer_discovery_config_value().
+
+get_with_entry_meta(Key, #peer_discovery_config_entry_meta{env_variable = EV,
+ default_value = Default,
+ type = Type}, Map) ->
+ normalize(Type, get_from_env_variable_or_map(Map, EV, Key, Default)).
+
+-spec get_integer_with_entry_meta(Key :: atom(),
+ EntryMeta :: #peer_discovery_config_entry_meta{},
+ Map :: #{atom() => peer_discovery_config_value()}) -> integer().
+
+get_integer_with_entry_meta(Key, #peer_discovery_config_entry_meta{env_variable = EV,
+ default_value = Default,
+ type = Type}, Map) ->
+ normalize(Type, get_integer_from_env_variable_or_map(Map, EV, Key, Default)).
+
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Return the a value from the OS environment value, or the provided
+%% map if OS env var is not set, or the
+%% default value.
+%% @end
+%%--------------------------------------------------------------------
+-spec get_from_env_variable_or_map(Map :: map(), OSKey :: string(), AppKey :: atom(),
+ Default :: atom() | integer() | string())
+ -> atom() | integer() | string().
+get_from_env_variable_or_map(Map, OSKey, AppKey, Default) ->
+ case rabbit_peer_discovery_util:getenv(OSKey) of
+ false -> maps:get(AppKey, Map, Default);
+ Value -> Value
+ end.
+
+-spec get_integer_from_env_variable_or_map(Map :: map(), OSKey :: string(), AppKey :: atom(),
+ Default :: integer())
+ -> integer().
+get_integer_from_env_variable_or_map(Map, OSKey, AppKey, Default) ->
+ case rabbit_peer_discovery_util:getenv(OSKey) of
+ false -> maps:get(AppKey, Map, Default);
+ "" -> Default;
+ Value -> Value
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Return the normalized value in as the proper data type
+%% @end
+%%--------------------------------------------------------------------
+-spec normalize(Type :: atom(),
+ Value :: atom() | boolean() | integer() | string() | list()) ->
+ atom() | integer() | string().
+%% TODO: switch these to use delegate to rabbit_data_coercion:*
+normalize(Type, Value) when Type =:= port ->
+ rabbit_peer_discovery_util:parse_port(Value);
+normalize(Type, Value) when Type =:= atom ->
+ rabbit_peer_discovery_util:as_atom(Value);
+normalize(Type, Value) when Type =:= list ->
+ rabbit_data_coercion:to_list(Value);
+normalize(Type, Value) when Type =:= integer ->
+ rabbit_peer_discovery_util:as_integer(Value);
+normalize(Type, Value) when Type =:= string ->
+ rabbit_peer_discovery_util:as_string(Value);
+normalize(Type, Value) when Type =:= proplist ->
+ rabbit_peer_discovery_util:as_proplist(Value);
+normalize(Type, Value) when Type =:= map ->
+ rabbit_peer_discovery_util:as_map(Value).
diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl
new file mode 100644
index 0000000000..44062465c6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_httpc.erl
@@ -0,0 +1,421 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_httpc).
+
+-include("include/rabbit_peer_discovery.hrl").
+
+%%
+%% API
+%%
+
+-export([build_query/1,
+ build_uri/5,
+ build_path/1,
+ delete/6,
+ get/5,
+ get/7,
+ post/6,
+ post/8,
+ put/6,
+ put/7,
+ maybe_configure_proxy/0,
+ maybe_configure_inet6/0]).
+
+%% Export all for unit tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+
+-define(CONFIG_MODULE, rabbit_peer_discovery_config).
+-define(CONFIG_KEY, proxy).
+
+-define(CONFIG_MAPPING,
+ #{
+ http_proxy => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "HTTP_PROXY",
+ default_value = undefined
+ },
+ https_proxy => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "HTTPS_PROXY",
+ default_value = undefined
+ },
+ proxy_exclusions => #peer_discovery_config_entry_meta{
+ type = list,
+ env_variable = "PROXY_EXCLUSIONS",
+ default_value = []
+ }
+ }).
+
+
+-define(CONTENT_JSON, "application/json").
+-define(CONTENT_JSON_WITH_CHARSET, "application/json; charset=utf-8").
+-define(CONTENT_URLENCODED, "application/x-www-form-urlencoded").
+
+
+%% @public
+%% @spec build_uri(Scheme, Host, Port, Path, QArgs) -> string()
+%% where Scheme = string()
+%% Host = string()
+%% Port = integer()
+%% Path = string()
+%% QArgs = proplist()
+%% @doc Build the request URI
+%% @end
+%%
+build_uri(Scheme, Host, Port, Path0, []) ->
+ Path = case string:slice(Path0, 0, 1) of
+ "/" -> Path0;
+ _ -> "/" ++ Path0
+ end,
+ uri_string:recompose(#{scheme => Scheme,
+ host => Host,
+ port => rabbit_data_coercion:to_integer(Port),
+ path => Path});
+build_uri(Scheme, Host, Port, Path0, QArgs) ->
+ Path = case string:slice(Path0, 0, 1) of
+ "/" -> Path0;
+ _ -> "/" ++ Path0
+ end,
+ uri_string:recompose(#{scheme => Scheme,
+ host => Host,
+ port => rabbit_data_coercion:to_integer(Port),
+ path => Path,
+ query => build_query(QArgs)}).
+
+-spec build_path(list()) -> string().
+
+build_path(Segments0) when is_list(Segments0) ->
+ Segments = [rabbit_data_coercion:to_list(PS) || PS <- Segments0],
+ uri_string:recompose(#{path => string:join(Segments, "/")}).
+
+%% @public
+%% @spec build_query(proplist()) -> string()
+%% @doc Build the query parameters string from a proplist
+%% @end
+%%
+build_query(Args) when is_list(Args) ->
+ Normalized = [{rabbit_data_coercion:to_list(K), rabbit_data_coercion:to_list(V)} || {K, V} <- Args],
+ uri_string:compose_query(Normalized).
+
+
+%% @public
+%% @spec get(Scheme, Host, Port, Path, Args) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP GET request
+%% @end
+%%
+get(Scheme, Host, Port, Path, Args) ->
+ get(Scheme, Host, Port, Path, Args, [], []).
+
+
+%% @public
+%% @spec get(Scheme, Host, Port, Path, Args, Headers) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Headers = proplist(),
+%% HttpOpts = proplist(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP GET request
+%% @end
+%%
+get(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ URL = build_uri(Scheme, Host, Port, Path, Args),
+ rabbit_log:debug("GET ~s", [URL]),
+ HttpOpts1 = ensure_timeout(HttpOpts),
+ Response = httpc:request(get, {URL, Headers}, HttpOpts1, []),
+ rabbit_log:debug("Response: ~p", [Response]),
+ parse_response(Response).
+
+
+%% @public
+%% @spec post(Scheme, Host, Port, Path, Args, Body) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Body = string(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP POST request
+%% @end
+%%
+post(Scheme, Host, Port, Path, Args, Body) ->
+ post(Scheme, Host, Port, Path, Args, [], [], Body).
+
+%% @public
+%% @spec post(Scheme, Host, Port, Path, Headers, HttpOpts, Args, Body) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Headers = proplist(),
+%% HttpOpts = proplist(),
+%% Body = string(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP POST request
+%% @end
+%%
+post(Scheme, Host, Port, Path, Args, Headers, HttpOpts, Body) ->
+ URL = build_uri(Scheme, Host, Port, Path, Args),
+ rabbit_log:debug("POST ~s [~p]", [URL, Body]),
+ HttpOpts1 = ensure_timeout(HttpOpts),
+ Response = httpc:request(post, {URL, Headers, ?CONTENT_JSON, Body}, HttpOpts1, []),
+ rabbit_log:debug("Response: [~p]", [Response]),
+ parse_response(Response).
+
+
+%% @public
+%% @spec put(Scheme, Host, Port, Path, Args, Body) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Body = string(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP PUT request
+%% @end
+%%
+
+-spec put(Scheme, Host, Port, Path, Args, Body) -> {ok, string()} | {error, any()} when
+ Scheme :: atom() | string(),
+ Host :: string() | binary(),
+ Port :: integer(),
+ Path :: string() | binary(),
+ Args :: list(),
+ Body :: string() | binary() | tuple().
+put(Scheme, Host, Port, Path, Args, Body) ->
+ URL = build_uri(Scheme, Host, Port, Path, Args),
+ rabbit_log:debug("PUT ~s [~p]", [URL, Body]),
+ HttpOpts = ensure_timeout(),
+ Response = httpc:request(put, {URL, [], ?CONTENT_URLENCODED, Body}, HttpOpts, []),
+ rabbit_log:debug("Response: [~p]", [Response]),
+ parse_response(Response).
+
+
+%% @spec put(Scheme, Host, Port, Path, Args, Headers, Body) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Headers = proplist(),
+%% Body = string(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP PUT request
+%% @end
+%%
+-spec put(Scheme, Host, Port, Path, Args, Headers, Body) -> {ok, string()} | {error, any()} when
+ Scheme :: atom() | string(),
+ Host :: string() | binary(),
+ Port :: integer(),
+ Path :: string() | binary(),
+ Args :: list(),
+ Headers :: list(),
+ Body :: string() | binary() | tuple().
+put(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ URL = build_uri(Scheme, Host, Port, Path, Args),
+ rabbit_log:debug("PUT ~s [~p] [~p]", [URL, Headers, Body]),
+ HttpOpts = ensure_timeout(),
+ Response = httpc:request(put, {URL, Headers, ?CONTENT_URLENCODED, Body}, HttpOpts, []),
+ rabbit_log:debug("Response: [~p]", [Response]),
+ parse_response(Response).
+
+
+%% @public
+%% @spec delete(Scheme, Host, Port, Path, Args, Body) -> Result
+%% @where Scheme = string(),
+%% Host = string(),
+%% Port = integer(),
+%% Path = string(),
+%% Args = proplist(),
+%% Body = string(),
+%% Result = {ok, mixed}|{error, Reason::string()}
+%% @doc Perform a HTTP DELETE request
+%% @end
+%%
+delete(Scheme, Host, Port, PathSegments, Args, Body) when is_list(PathSegments) ->
+ Path = uri_string:recompose(#{path => lists:join("/", [rabbit_data_coercion:to_list(PS) || PS <- PathSegments])}),
+ delete(Scheme, Host, Port, Path, Args, Body);
+delete(Scheme, Host, Port, Path, Args, Body) ->
+ URL = build_uri(Scheme, Host, Port, Path, Args),
+ rabbit_log:debug("DELETE ~s [~p]", [URL, Body]),
+ HttpOpts = ensure_timeout(),
+ Response = httpc:request(delete, {URL, [], ?CONTENT_URLENCODED, Body}, HttpOpts, []),
+ rabbit_log:debug("Response: [~p]", [Response]),
+ parse_response(Response).
+
+
+%% @public
+%% @spec maybe_configure_proxy() -> Result
+%% @where Result = ok.
+%% @doc Configures HTTP[S] proxy settings in httpc, if necessary
+%% @end
+-spec maybe_configure_proxy() -> ok.
+maybe_configure_proxy() ->
+ Map = ?CONFIG_MODULE:config_map(?CONFIG_KEY),
+ case map_size(Map) of
+ 0 ->
+ rabbit_log:debug("HTTP client proxy is not configured"),
+ ok;
+ _ ->
+ HttpProxy = ?CONFIG_MODULE:get(http_proxy, ?CONFIG_MAPPING, Map),
+ HttpsProxy = ?CONFIG_MODULE:get(https_proxy, ?CONFIG_MAPPING, Map),
+ ProxyExclusions = ?CONFIG_MODULE:get(proxy_exclusions, ?CONFIG_MAPPING, Map),
+ rabbit_log:debug("Configured HTTP proxy: ~p, HTTPS proxy: ~p, exclusions: ~p",
+ [HttpProxy, HttpsProxy, ProxyExclusions]),
+ maybe_set_proxy(proxy, HttpProxy, ProxyExclusions),
+ maybe_set_proxy(https_proxy, HttpsProxy, ProxyExclusions),
+ ok
+ end.
+
+%% @public
+%% @spec maybe_configure_inet6() -> Result
+%% @where Result = ok.
+%% @doc Configures HTTP[S] inet6 settings in httpc, if necessary
+%% @end
+-spec maybe_configure_inet6() -> ok.
+maybe_configure_inet6() ->
+ IpFamily = case proplists:get_value(inet6, inet:get_rc(), false) of
+ true -> inet6;
+ false -> inet
+ end,
+ httpc:set_option(ipfamily, IpFamily).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Set httpc proxy options.
+%% @end
+%%--------------------------------------------------------------------
+-spec maybe_set_proxy(Option :: atom(),
+ ProxyUrl :: string(),
+ ProxyExclusions :: list()) -> ok | {error, Reason :: term()}.
+maybe_set_proxy(_Option, "undefined", _ProxyExclusions) -> ok;
+maybe_set_proxy(Option, ProxyUrl, ProxyExclusions) ->
+ case parse_proxy_uri(Option, ProxyUrl) of
+ UriMap ->
+ Host = maps:get(host, UriMap),
+ Port = maps:get(port, UriMap, 80),
+ rabbit_log:debug(
+ "Configuring HTTP client's ~s setting: ~p, exclusions: ~p",
+ [Option, {Host, Port}, ProxyExclusions]),
+ httpc:set_option(Option, {{Host, Port}, ProxyExclusions})
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Support defining proxy address with or without uri scheme.
+%% @end
+%%--------------------------------------------------------------------
+-spec parse_proxy_uri(ProxyType :: atom(), ProxyUrl :: string()) -> uri_string:uri_map().
+parse_proxy_uri(ProxyType, ProxyUrl) ->
+ case ProxyType of
+ proxy ->
+ case string:slice(ProxyUrl, 0, 4) of
+ "http" -> uri_string:parse(ProxyUrl);
+ _ -> uri_string:parse("http://" ++ ProxyUrl)
+ end;
+ https_proxy ->
+ case string:slice(ProxyUrl, 0, 5) of
+ "https" -> uri_string:parse(ProxyUrl);
+ _ -> uri_string:parse("https://" ++ ProxyUrl)
+ end
+ end.
+
+
+
+%% @private
+%% @spec decode_body(mixed) -> list()
+%% @doc Decode the response body and return a list
+%% @end
+%%
+
+-spec decode_body(string(), string() | binary() | term()) -> 'false' | 'null' | 'true' |
+ binary() | [any()] | number() | map().
+
+decode_body(_, []) -> [];
+decode_body(?CONTENT_JSON_WITH_CHARSET, Body) ->
+ decode_body(?CONTENT_JSON, Body);
+decode_body(?CONTENT_JSON, Body) ->
+ case rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body)) of
+ {ok, Value} ->
+ Value;
+ {error, Err} ->
+ rabbit_log:error("HTTP client could not decode a JSON payload "
+ "(JSON parser returned an error): ~p.~n",
+ [Err]),
+ []
+ end.
+
+%% @private
+%% @spec parse_response(Response) -> {ok, string()} | {error, mixed}
+%% @where Response = {status_line(), headers(), Body} | {status_code(), Body}
+%% @doc Decode the response body and return a list
+%% @end
+%%
+-spec parse_response({ok, integer(), string()} | {error, any()}) -> {ok, string()} | {error, any()}.
+
+parse_response({error, Reason}) ->
+ rabbit_log:debug("HTTP error ~p", [Reason]),
+ {error, lists:flatten(io_lib:format("~p", [Reason]))};
+
+parse_response({ok, 200, Body}) -> {ok, decode_body(?CONTENT_JSON, Body)};
+parse_response({ok, 201, Body}) -> {ok, decode_body(?CONTENT_JSON, Body)};
+parse_response({ok, 204, _}) -> {ok, []};
+parse_response({ok, Code, Body}) ->
+ rabbit_log:debug("HTTP Response (~p) ~s", [Code, Body]),
+ {error, integer_to_list(Code)};
+
+parse_response({ok, {{_,200,_}, Headers, Body}}) ->
+ {ok, decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body)};
+parse_response({ok,{{_,201,_}, Headers, Body}}) ->
+ {ok, decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body)};
+parse_response({ok,{{_,204,_}, _, _}}) -> {ok, []};
+parse_response({ok,{{_Vsn,Code,_Reason},_,Body}}) ->
+ rabbit_log:debug("HTTP Response (~p) ~s", [Code, Body]),
+ {error, integer_to_list(Code)}.
+
+%% @private
+%% @doc Ensure that the timeout option is set.
+%% @end
+%%
+-spec ensure_timeout() -> list({atom(), term()}).
+ensure_timeout() ->
+ [{timeout, ?DEFAULT_HTTP_TIMEOUT}].
+
+%% @private
+%% @doc Ensure that the timeout option is set, and does not exceed
+%% about 1/2 of the default gen_server:call timeout. This gives
+%% enough time for a long connect and request phase to succeed.
+%% @end
+%%
+-spec ensure_timeout(Options :: list({atom(), term()})) -> list({atom(), term()}).
+ensure_timeout(Options) ->
+ case proplists:get_value(timeout, Options) of
+ undefined ->
+ Options ++ [{timeout, ?DEFAULT_HTTP_TIMEOUT}];
+ Value when is_integer(Value) andalso Value >= 0 andalso Value =< ?DEFAULT_HTTP_TIMEOUT ->
+ Options;
+ _ ->
+ Options1 = proplists:delete(timeout, Options),
+ Options1 ++ [{timeout, ?DEFAULT_HTTP_TIMEOUT}]
+ end.
diff --git a/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl
new file mode 100644
index 0000000000..90584f13d8
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/src/rabbit_peer_discovery_util.erl
@@ -0,0 +1,404 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_peer_discovery_util).
+
+%% API
+-export([getenv/1,
+ as_atom/1,
+ as_integer/1,
+ as_string/1,
+ as_list/1,
+ nic_ipv4/1,
+ node_hostname/1,
+ node_name/1,
+ parse_port/1,
+ as_proplist/1,
+ as_map/1,
+ stringify_error/1,
+ maybe_backend_configured/4
+ ]).
+
+
+%% Export all for unit tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-type ifopt() :: {flag,[atom()]} | {addr, inet:ip_address()} |
+ {netmask,inet:ip_address()} | {broadaddr,inet:ip_address()} |
+ {dstaddr,inet:ip_address()} | {hwaddr,[byte()]}.
+
+-type stringifyable() :: atom() | binary() | string() | integer().
+
+
+-spec getenv(Key :: string() | undefined) -> string() | false.
+getenv(undefined) ->
+ false;
+getenv(Key) ->
+ process_getenv_value(Key, os:getenv(Key)).
+
+-define(DEFAULT_NODE_PREFIX, "rabbit").
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Check the response of os:getenv/1 to see if it's false and if it is
+%% chain down to maybe_getenv_with_subkey/2 to see if the environment
+%% variable has a prefix of RABBITMQ_, potentially trying to get an
+%% environment variable without the prefix.
+%% @end
+%%--------------------------------------------------------------------
+-spec process_getenv_value(Key :: string(), Value :: string() | false)
+ -> string() | false.
+process_getenv_value(Key, false) ->
+ maybe_getenv_with_subkey(Key, string:left(Key, 9));
+process_getenv_value(_, Value) -> Value.
+
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Check to see if the OS environment variable starts with RABBITMQ_
+%% and if so, try and fetch the value from an environment variable
+%% with the prefix removed.
+%% @end
+%%--------------------------------------------------------------------
+-spec maybe_getenv_with_subkey(Key :: string(), Prefix :: string())
+ -> string() | false.
+maybe_getenv_with_subkey(Key, "RABBITMQ_") ->
+ os:getenv(string:sub_string(Key, 10));
+maybe_getenv_with_subkey(_, _) ->
+ false.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the passed in value as an atom.
+%% @end
+%%--------------------------------------------------------------------
+-spec as_atom(atom() | binary() | string()) -> atom().
+as_atom(Value) when is_atom(Value) ->
+ Value;
+as_atom(Value) when is_binary(Value) ->
+ list_to_atom(binary_to_list(Value));
+as_atom(Value) when is_list(Value) ->
+ list_to_atom(Value);
+as_atom(Value) ->
+ rabbit_log:error("Unexpected data type for atom value: ~p~n",
+ [Value]),
+ Value.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the passed in value as an integer.
+%% @end
+%%--------------------------------------------------------------------
+-spec as_integer(binary() | integer() | string()) -> integer().
+as_integer([]) -> undefined;
+as_integer(Value) when is_binary(Value) ->
+ list_to_integer(as_string(Value));
+as_integer(Value) when is_list(Value) ->
+ list_to_integer(Value);
+as_integer(Value) when is_integer(Value) ->
+ Value;
+as_integer(Value) ->
+ rabbit_log:error("Unexpected data type for integer value: ~p~n",
+ [Value]),
+ Value.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the passed in value as a string.
+%% @end
+%%--------------------------------------------------------------------
+-spec as_string(Value :: atom() | binary() | integer() | string())
+ -> string().
+as_string([]) -> "";
+as_string(Value) when is_atom(Value) ->
+ as_string(atom_to_list(Value));
+as_string(Value) when is_binary(Value) ->
+ as_string(binary_to_list(Value));
+as_string(Value) when is_integer(Value) ->
+ as_string(integer_to_list(Value));
+as_string(Value) when is_list(Value) ->
+ lists:flatten(Value);
+as_string(Value) ->
+ rabbit_log:error("Unexpected data type for list value: ~p~n",
+ [Value]),
+ Value.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the IP address for the current node for the specified
+%% network interface controller.
+%% @end
+%%--------------------------------------------------------------------
+-spec nic_ipv4(Device :: string())
+ -> {ok, string()} | {error, not_found}.
+nic_ipv4(Device) ->
+ {ok, Interfaces} = inet:getifaddrs(),
+ nic_ipv4(Device, Interfaces).
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Parse the interface out of the list of interfaces, returning the
+%% IPv4 address if found.
+%% @end
+%%--------------------------------------------------------------------
+-spec nic_ipv4(Device :: string(), Interfaces :: list())
+ -> {ok, string()} | {error, not_found}.
+nic_ipv4(_, []) -> {error, not_found};
+nic_ipv4(Device, [{Interface, Opts}|_]) when Interface =:= Device ->
+ {ok, nic_ipv4_address(Opts)};
+nic_ipv4(Device, [_|T]) ->
+ nic_ipv4(Device,T).
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the formatted IPv4 address out of the list of addresses
+%% for the interface.
+%% @end
+%%--------------------------------------------------------------------
+-spec nic_ipv4_address([ifopt()]) -> {ok, string()} | {error, not_found}.
+nic_ipv4_address([]) -> {error, not_found};
+nic_ipv4_address([{addr, {A,B,C,D}}|_]) ->
+ inet_parse:ntoa({A,B,C,D});
+nic_ipv4_address([_|T]) ->
+ nic_ipv4_address(T).
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the hostname for the current node (without the tuple)
+%% Hostname can be taken from network info or nodename.
+%% @end
+%%--------------------------------------------------------------------
+-spec node_hostname(boolean()) -> string().
+node_hostname(false = _FromNodename) ->
+ {ok, Hostname} = inet:gethostname(),
+ Hostname;
+node_hostname(true = _FromNodename) ->
+ {match, [Hostname]} = re:run(atom_to_list(node()), "@(.*)",
+ [{capture, [1], list}]),
+ Hostname.
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the proper node name for clustering purposes
+%% @end
+%%--------------------------------------------------------------------
+-spec node_name(Value :: atom() | binary() | string()) -> atom().
+node_name(Value) when is_atom(Value) ->
+ node_name(atom_to_list(Value));
+node_name(Value) when is_binary(Value) ->
+ node_name(binary_to_list(Value));
+node_name(Value) when is_list(Value) ->
+ case lists:member($@, Value) of
+ true ->
+ list_to_atom(Value);
+ false ->
+ list_to_atom(string:join([node_prefix(),
+ node_name_parse(as_string(Value))],
+ "@"))
+ end.
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Parse the value passed into nodename, checking if it's an IP
+%% address. If so, return it. If not, then check to see if longname
+%% support is turned on. if so, return it. If not, extract the left
+%% most part of the name, delimited by periods.
+%% @end
+%%--------------------------------------------------------------------
+-spec node_name_parse(Value :: string()) -> string().
+node_name_parse(Value) ->
+ %% TODO: support IPv6 here
+ case inet:parse_ipv4strict_address(Value) of
+ {ok, _} ->
+ Value;
+ {error, einval} ->
+ node_name_parse(net_kernel:longnames(), Value)
+ end.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Continue the parsing logic from node_name_parse/1. This is where
+%% result of the IPv4 check is processed.
+%% @end
+%%--------------------------------------------------------------------
+-spec node_name_parse(IsIPv4 :: true | false, Value :: string())
+ -> string().
+node_name_parse(true, Value) -> Value;
+node_name_parse(false, Value) ->
+ Parts = string:tokens(Value, "."),
+ node_name_parse(length(Parts), Value, Parts).
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Properly deal with returning the hostname if it's made up of
+%% multiple segments like www.rabbitmq.com, returning www, or if it's
+%% only a single segment, return that.
+%% @end
+%%--------------------------------------------------------------------
+-spec node_name_parse(Segments :: integer(),
+ Value :: string(),
+ Parts :: [string()])
+ -> string().
+node_name_parse(1, Value, _) -> Value;
+node_name_parse(_, _, Parts) ->
+ as_string(lists:nth(1, Parts)).
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Extract the "local part" of the ``RABBITMQ_NODENAME`` environment
+%% variable, if set, otherwise use the default node name value
+%% (rabbit).
+%% @end
+%%--------------------------------------------------------------------
+-spec node_prefix() -> string().
+node_prefix() ->
+ case getenv("RABBITMQ_NODENAME") of
+ false -> ?DEFAULT_NODE_PREFIX;
+ Value ->
+ rabbit_data_coercion:to_list(getenv("RABBITMQ_NODENAME")),
+ lists:nth(1, string:tokens(Value, "@"))
+ end.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Returns the port, even if Docker linking overwrites a configuration
+%% value to be a URI instead of numeric value
+%% @end
+%%--------------------------------------------------------------------
+-spec parse_port(Value :: integer() | string()) -> integer().
+parse_port(Value) when is_list(Value) ->
+ as_integer(lists:last(string:tokens(Value, ":")));
+parse_port(Value) -> as_integer(Value).
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Returns a proplist from a JSON structure (environment variable) or
+%% the settings key (already a proplist).
+%% @end
+%%--------------------------------------------------------------------
+-spec as_proplist(Value :: string() | [{string(), string()}]) ->
+ [{string(), string()}].
+as_proplist([Tuple | _] = Json) when is_tuple(Tuple) ->
+ Json;
+as_proplist([]) ->
+ [];
+as_proplist(List) when is_list(List) ->
+ Value = rabbit_data_coercion:to_binary(List),
+ case rabbit_json:try_decode(Value) of
+ {ok, Map} ->
+ [{binary_to_list(K), binary_to_list(V)}
+ || {K, V} <- maps:to_list(Map)];
+ {error, Error} ->
+ rabbit_log:error("Unexpected data type for proplist value: ~p. JSON parser returned an error: ~p!~n",
+ [Value, Error]),
+ []
+ end;
+as_proplist(Value) ->
+ rabbit_log:error("Unexpected data type for proplist value: ~p.~n",
+ [Value]),
+ [].
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Returns a map from a JSON structure (environment variable) or
+%% the settings key, converts from proplists.
+%% @end
+%%--------------------------------------------------------------------
+-spec as_map(Value :: string() | [{string(), string()}]) ->
+ #{}.
+as_map([Tuple | _] = Json) when is_tuple(Tuple) ->
+ maps:from_list(Json);
+as_map([]) ->
+ #{};
+as_map(List) when is_list(List) ->
+ Value = rabbit_data_coercion:to_binary(List),
+ case rabbit_json:try_decode(Value) of
+ {ok, Map} ->
+ Map;
+ {error, Error} ->
+ rabbit_log:error("Unexpected data type for map value: ~p. JSON parser returned an error: ~p!~n",
+ [Value, Error]),
+ []
+ end;
+as_map(Map) when is_map(Map) ->
+ Map;
+as_map(Value) ->
+ rabbit_log:error("Unexpected data type for map value: ~p.~n",
+ [Value]),
+ [].
+
+-spec stringify_error({ok, term()} | {error, term()}) -> {ok, term()} | {error, string()}.
+stringify_error({ok, Val}) ->
+ {ok, Val};
+stringify_error({error, Str}) when is_list(Str) ->
+ {error, Str};
+stringify_error({error, Term}) ->
+ {error, lists:flatten(io_lib:format("~p", [Term]))}.
+
+-spec maybe_backend_configured(BackendConfigKey :: atom(),
+ ClusterFormationUndefinedFun :: fun(() -> {ok, term()} | ok),
+ BackendUndefinedFun :: fun(() -> {ok, term()} | ok),
+ ConfiguredFun :: fun((list()) -> {ok, term()})) -> {ok, term()}.
+maybe_backend_configured(BackendConfigKey,
+ ClusterFormationUndefinedFun,
+ BackendUndefinedFun,
+ ConfiguredFun) ->
+ case application:get_env(rabbit, cluster_formation) of
+ undefined ->
+ ClusterFormationUndefinedFun();
+ {ok, ClusterFormation} ->
+ rabbit_log:debug("Peer discovery: translated cluster formation configuration: ~p", [ClusterFormation]),
+ case proplists:get_value(BackendConfigKey, ClusterFormation) of
+ undefined ->
+ BackendUndefinedFun();
+ Proplist ->
+ rabbit_log:debug("Peer discovery: cluster formation backend configuration: ~p", [Proplist]),
+ ConfiguredFun(Proplist)
+ end
+ end.
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Return the passed in value as a list of strings.
+%% @end
+%%--------------------------------------------------------------------
+-spec as_list(Value :: stringifyable() | list()) -> list().
+as_list([]) -> [];
+as_list(Value) when is_atom(Value) ; is_integer(Value) ; is_binary(Value) ->
+ [Value];
+as_list(Value) when is_list(Value) ->
+ case io_lib:printable_list(Value) or io_lib:printable_unicode_list(Value) of
+ true -> [case string:to_float(S) of
+ {Float, []} -> Float;
+ _ -> case string:to_integer(S) of
+ {Integer, []} -> Integer;
+ _ -> string:strip(S)
+ end
+ end || S <- string:tokens(Value, ",")];
+ false -> Value
+ end;
+as_list(Value) ->
+ rabbit_log:error("Unexpected data type for list value: ~p~n",
+ [Value]),
+ Value.
diff --git a/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..4b9dea3f9e
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_peer_discovery_common, Config1).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets
new file mode 100644
index 0000000000..9d285b62eb
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_common/test/config_schema_SUITE_data/rabbitmq_peer_discovery_common.snippets
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+[
+ %% interval
+ {host_proxy, "cluster_formation.node_cleanup.interval = 60", [
+ {rabbit, [
+ {cluster_formation, [
+ {node_cleanup, [
+ {cleanup_interval, 60}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_common]
+ }
+ %% only log warnings?
+ , {host_proxy, "cluster_formation.node_cleanup.only_log_warning = true", [
+ {rabbit, [
+ {cluster_formation, [
+ {node_cleanup, [
+ {cleanup_only_log_warning, true}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_common]
+ }
+ %% HTTP proxy host
+ , {host_proxy, "cluster_formation.proxy.http_proxy = proxy.example.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {proxy, [
+ {http_proxy, "proxy.example.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_common]
+ }
+ %% HTTPS proxy host
+ , {host_proxy, "cluster_formation.proxy.https_proxy = proxy.example.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {proxy, [
+ {https_proxy, "proxy.example.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_common]
+ }
+ %% proxy exclusions
+ , {host_proxy,
+"
+cluster_formation.proxy.proxy_exclusions.1 = host.a
+cluster_formation.proxy.proxy_exclusions.2 = host.b
+cluster_formation.proxy.proxy_exclusions.3 = host.c", [
+ {rabbit, [
+ {cluster_formation, [
+ {proxy, [
+ {proxy_exclusions, ["host.a", "host.b", "host.c"]}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_common]
+ }
+].
diff --git a/deps/rabbitmq_peer_discovery_consul/.gitignore b/deps/rabbitmq_peer_discovery_consul/.gitignore
new file mode 100644
index 0000000000..5f4c759658
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/.gitignore
@@ -0,0 +1,25 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/config_schema_SUITE_data/schema/
+/test/ct.cover.spec
+/xrefr
+
+/rabbitmq_peer_discovery_consul.d
diff --git a/deps/rabbitmq_peer_discovery_consul/.travis.yml b/deps/rabbitmq_peer_discovery_consul/.travis.yml
new file mode 100644
index 0000000000..be2805cfb5
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: muidjSYyIPBi3e01ZdBloqCIxj1tAdeQiCCkSP93a1Mm46sA/cfYqn3EOVU/JyyJ+z7DEeqaNcIUaLxO17vvfYU6osf8DaGBFqH9jT0FYvwyysEP8eHvooXXwbFyxcg5wRXmTWkHX74WPDb07lISc9u6/EWTXLvbWD4hqrzFoo6b40v1l/spZtbv0vFf1tOkqmdx8YKlenjY7sGXdZZfly4wqFoML/+5NDZPoxytmjgmAat+O392pGGpXmqLHS2sA6r0TGfUn47H+OmyN2RE7FFojvWOJYz0/t8E1E6ISYXze/fEfQuYocIhhbtTSrB3OJ+Y5UBWieQUdJr9z6DYGFTb/yPiifSbF1fgJQiMGyywWYIWwc396rKxltNZHSGBbQLyZ0onl/zDz4kiRaGd++4HWkFuLP/vWqMZWx69MWcGlcjunFB+oECZi59olmgr2qrrAgVMrropbMSgm4LC5h9hHyRHHMCcEOaSdJjwzVUbJw70EnwBPEhIl+s/9VBQd6px99Ua9SFxVJLsCYaM7LsBZAb6sbcuQtgo5Z1p91zNroHs1YyceEl5Cmi226w9snvnUYgl5uNxbKjRp9eLYyWSJbc7M83i0nN/NgDgsryPWY+cxKcgoO8IBxE+vjWqwhXCgTu9fmbbe6V2MEJ/xMlHPje3GlGrDZORSQzTA70=
+ - secure: ABGIg6n0yJeAyvIbaAtsm5UeJYE6F09B6rSdaf054lnkDTuB9Rm5Rqx4PUckd3CBzo8rGeYrbJuCSvyBUmfR4nFZsPMUOaSJlV7a6L4oRowkaeE7OepevSN8YaBXSv2mw3uflCaCBfvRdzLYl3gX/8WpT32JBNIhw5UDBisoeu/1Iwjp28VFwrOKyap4D0VRperr/M2Mn3zvkiJ6pHLNOOokqkGYJDES+aIMuXSWTPm4PPeSgXxFFdHQo1lM+10nqX7Dm1HsIX6U6ST4ibbR4MAu4IGUR1kEpMOjqmoVPdFsxKaYMWhewHQ0xJYXIskPHjDXFTDz38o+9KanUfnINdxMeuC7XNGwQZ0CNkWYF0dfxwOuCSzBIUxbjhYhwEyUF6O4qICQk429QJcd4UfKQGNuDhVp0ufAQsPeXpBgP2YsVvwWgk00s6KR3XC/I7raJtQXQ9dU8cEEXuzGmW+WTSd5YfcyBJTyxw7YPO/a0Zk+51O6F6lpsS4q4WH7e4VHwOq8lGwsI7BcpCvOPb4M6EdaE76fc2uDn/lqL0Q4w3hV01ctOI6vqjjV3gglhuGZZ+p0JWa5IHBUifP6Kou2KRyDFHbIfXs0HyFvq7APAnDZSoUCQOjKro3ui8vgE/wYJL4XzEWbY3bn0LHbNe4ZUpzACUShjUNIuG3+3DgaPM4=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_peer_discovery_consul/CODE_OF_CONDUCT.md b/deps/rabbitmq_peer_discovery_consul/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_peer_discovery_consul/CONTRIBUTING.md b/deps/rabbitmq_peer_discovery_consul/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_peer_discovery_consul/LICENSE b/deps/rabbitmq_peer_discovery_consul/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_peer_discovery_consul/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_peer_discovery_consul/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_peer_discovery_consul/Makefile b/deps/rabbitmq_peer_discovery_consul/Makefile
new file mode 100644
index 0000000000..48b2f4477c
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/Makefile
@@ -0,0 +1,19 @@
+PROJECT = rabbitmq_peer_discovery_consul
+PROJECT_DESCRIPTION = Consult-based RabbitMQ peer discovery backend
+PROJECT_MOD = rabbitmq_peer_discovery_consul_app
+
+DEPS = rabbit_common rabbitmq_peer_discovery_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers ct_helper meck
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_peer_discovery_consul/README.md b/deps/rabbitmq_peer_discovery_consul/README.md
new file mode 100644
index 0000000000..448e15c007
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/README.md
@@ -0,0 +1,56 @@
+# RabbitMQ Peer Discovery Consul
+
+This is a Consul-based implementation of RabbitMQ [peer discovery interface](https://www.rabbitmq.com/blog/2018/02/12/peer-discovery-subsystem-in-rabbitmq-3-7/)
+(new in 3.7.0, previously available in the [rabbitmq-autocluster plugin](https://github.com/rabbitmq/rabbitmq-autocluster)
+by Gavin Roy).
+
+This plugin only performs peer discovery and most basic node health monitoring
+using [Consul](https://www.consul.io/) as a data source.
+Please get familiar with [RabbitMQ clustering fundamentals](https://rabbitmq.com/clustering.html) before attempting
+to use it.
+
+While it may seem at times that this is a RabbitMQ cluster management solution,
+it is not. Cluster provisioning and most of Day 2 operations such as [proper monitoring](https://rabbitmq.com/monitoring.html)
+are not in scope for this plugin.
+
+
+## Supported RabbitMQ Versions
+
+This plugin requires RabbitMQ 3.7.0 or later.
+
+For a Consul-based peer discovery and cluster formation
+mechanism that supports 3.6.x, see [rabbitmq-autocluster](https://github.com/rabbitmq/rabbitmq-autocluster).
+
+
+## Installation
+
+This plugin ships with [supported RabbitMQ versions](https://www.rabbitmq.com/versions.html).
+There is no need to install it separately.
+
+As with any [plugin](https://rabbitmq.com/plugins.html), it must be enabled before it
+can be used. For peer discovery plugins it means they must be [enabled](https://rabbitmq.com//plugins.html#basics) or [preconfigured](https://rabbitmq.com//plugins.html#enabled-plugins-file)
+before first node boot:
+
+```
+rabbitmq-plugins --offline enable rabbitmq_peer_discovery_consul
+```
+
+
+## Documentation
+
+See [RabbitMQ Cluster Formation guide](https://www.rabbitmq.com/cluster-formation.html).
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://www.rabbitmq.com/github.html).
+
+
+## License
+
+[Licensed under the MPL](LICENSE-MPL-RabbitMQ), same as RabbitMQ server.
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_peer_discovery_consul/erlang.mk b/deps/rabbitmq_peer_discovery_consul/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/README.md b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/README.md
new file mode 100644
index 0000000000..2cdf7d2e80
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/README.md
@@ -0,0 +1,31 @@
+Dynamic RabbitMQ cluster using:
+
+1. [Docker compose](https://docs.docker.com/compose/)
+
+2. [Consul](https://www.consul.io)
+
+3. [HA proxy](https://github.com/docker/dockercloud-haproxy)
+
+4. [rabbitmq-peer-discovery-consul plugin](https://github.com/rabbitmq/rabbitmq-peer-discovery-consul)
+
+---
+
+How to run:
+
+```
+docker-compose up
+```
+
+How to scale:
+
+```
+docker-compose up --scale rabbit=2 -d
+```
+
+
+---
+
+Check running status:
+
+- Consul Management: http://localhost:8500/ui/
+- RabbitMQ Management: http://localhost:15672/#/
diff --git a/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/enabled_plugins b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/enabled_plugins
new file mode 100644
index 0000000000..f863eb4644
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/enabled_plugins
@@ -0,0 +1 @@
+[rabbitmq_management,rabbitmq_peer_discovery_consul]. \ No newline at end of file
diff --git a/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/rabbitmq.conf b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/rabbitmq.conf
new file mode 100644
index 0000000000..274eefcf66
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/conf/rabbitmq.conf
@@ -0,0 +1,11 @@
+loopback_users.guest = false
+cluster_formation.peer_discovery_backend = rabbit_peer_discovery_consul
+cluster_formation.consul.host = consul
+# Set to true if automatic removal of unknown/absent nodes
+# is desired. This can be dangerous, see
+# * https://www.rabbitmq.com/cluster-formation.html#node-health-checks-and-cleanup
+# * https://groups.google.com/forum/#!msg/rabbitmq-users/wuOfzEywHXo/k8z_HWIkBgAJ
+cluster_formation.node_cleanup.only_log_warning = true
+cluster_formation.consul.svc_addr_auto = true
+cluster_partition_handling = autoheal
+vm_memory_high_watermark.relative = 0.8
diff --git a/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/docker-compose.yml b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/docker-compose.yml
new file mode 100644
index 0000000000..4f8e0e8864
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/examples/compose_consul_haproxy/docker-compose.yml
@@ -0,0 +1,44 @@
+version: "2"
+services:
+ rabbit:
+ environment:
+ - TCP_PORTS=15672, 5672
+ - RABBITMQ_ERLANG_COOKIE="mycookie"
+ networks:
+ - back
+ image: rabbitmq:3.7
+ expose:
+ - 15672
+ - 5672
+ - 5671
+ - 15671
+ tty: true
+ volumes:
+ - ./conf/:/etc/rabbitmq/
+ command: sh -c "sleep 10; rabbitmq-server;"
+ lb:
+ image: dockercloud/haproxy
+ environment:
+ - MODE=tcp
+ links:
+ - rabbit
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ ports:
+ - 15672:15672
+ - 5672:5672
+ networks:
+ - back
+ consul1:
+ image: "consul"
+ container_name: "consul"
+ hostname: "consul"
+ ports:
+ - "8400:8400"
+ - "8500:8500"
+ - "8600:53"
+ networks:
+ - back
+
+networks:
+ back:
diff --git a/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl b/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl
new file mode 100644
index 0000000000..3870e7c1e7
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/include/rabbit_peer_discovery_consul.hrl
@@ -0,0 +1,111 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(BACKEND_CONFIG_KEY, peer_discovery_consul).
+
+-define(CONFIG_MAPPING,
+ #{
+ cluster_name => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CLUSTER_NAME",
+ default_value = "default"
+ },
+ consul_acl_token => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_ACL_TOKEN",
+ default_value = "undefined"
+ },
+ consul_include_nodes_with_warnings => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "CONSUL_INCLUDE_NODES_WITH_WARNINGS",
+ default_value = false
+ },
+ consul_scheme => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_SCHEME",
+ default_value = "http"
+ },
+ consul_host => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_HOST",
+ default_value = "localhost"
+ },
+ consul_port => #peer_discovery_config_entry_meta{
+ type = port,
+ env_variable = "CONSUL_PORT",
+ default_value = 8500
+ },
+ consul_domain => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_DOMAIN",
+ default_value = "consul"
+ },
+ consul_svc => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_SVC",
+ default_value = "rabbitmq"
+ },
+ consul_svc_addr => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_SVC_ADDR",
+ default_value = "undefined"
+ },
+ consul_svc_addr_auto => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "CONSUL_SVC_ADDR_AUTO",
+ default_value = false
+ },
+ consul_svc_addr_nic => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_SVC_ADDR_NIC",
+ default_value = "undefined"
+ },
+ consul_svc_addr_nodename => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "CONSUL_SVC_ADDR_NODENAME",
+ default_value = false
+ },
+ consul_svc_port => #peer_discovery_config_entry_meta{
+ type = integer,
+ env_variable = "CONSUL_SVC_PORT",
+ default_value = 5672
+ },
+ consul_svc_ttl => #peer_discovery_config_entry_meta{
+ type = integer,
+ env_variable = "CONSUL_SVC_TTL",
+ default_value = 30
+ },
+ consul_svc_tags => #peer_discovery_config_entry_meta{
+ type = list,
+ env_variable = "CONSUL_SVC_TAGS",
+ default_value = []
+ },
+ consul_svc_meta => #peer_discovery_config_entry_meta{
+ type = list,
+ default_value = []
+ },
+ consul_deregister_after => #peer_discovery_config_entry_meta{
+ type = integer,
+ env_variable = "CONSUL_DEREGISTER_AFTER",
+ default_value = ""
+ },
+ consul_use_longname => #peer_discovery_config_entry_meta{
+ type = atom,
+ env_variable = "CONSUL_USE_LONGNAME",
+ default_value = false
+ },
+ consul_lock_prefix => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "CONSUL_LOCK_PREFIX",
+ default_value = "rabbitmq"
+ },
+ lock_wait_time => #peer_discovery_config_entry_meta{
+ type = integer,
+ env_variable = "LOCK_WAIT_TIME",
+ default_value = 300
+ }
+ }).
diff --git a/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema b/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema
new file mode 100644
index 0000000000..3256d48159
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/priv/schema/rabbitmq_peer_discovery_consul.schema
@@ -0,0 +1,324 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% host
+
+{mapping, "cluster_formation.consul.host", "rabbit.cluster_formation.peer_discovery_consul.consul_host", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_host",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.host", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% port
+
+{mapping, "cluster_formation.consul.port", "rabbit.cluster_formation.peer_discovery_consul.consul_port", [
+ {datatype, integer}, {validators, ["port"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_port",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.port", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% scheme
+
+{mapping, "cluster_formation.consul.scheme", "rabbit.cluster_formation.peer_discovery_consul.consul_scheme", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_scheme",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.scheme", Conf, "http") of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% ACL token
+
+{mapping, "cluster_formation.consul.acl_token", "rabbit.cluster_formation.peer_discovery_consul.consul_acl_token", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_acl_token",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.acl_token", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% cluster name
+
+{mapping, "cluster_formation.consul.cluster_name", "rabbit.cluster_formation.peer_discovery_consul.cluster_name", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.cluster_name",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.cluster_name", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% service name
+
+{mapping, "cluster_formation.consul.svc", "rabbit.cluster_formation.peer_discovery_consul.consul_svc", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% service address
+
+{mapping, "cluster_formation.consul.svc_addr", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_addr", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% compute service address automatically?
+
+{mapping, "cluster_formation.consul.svc_addr_auto", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_auto", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_auto",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_addr_auto", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% service address NIC
+
+{mapping, "cluster_formation.consul.svc_addr_nic", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_nic", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_nic",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_addr_nic", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% use (Erlang) node name when compuing service address?
+
+{mapping, "cluster_formation.consul.svc_addr_use_nodename", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_nodename", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_addr_nodename",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_addr_use_nodename", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% (optionally) append a suffix to node names retrieved from Consul
+
+{mapping, "cluster_formation.consul.domain_suffix", "rabbit.cluster_formation.peer_discovery_consul.consul_domain", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_domain",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.domain_suffix", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% include nodes with warnings?
+
+{mapping, "cluster_formation.consul.include_nodes_with_warnings", "rabbit.cluster_formation.peer_discovery_consul.include_nodes_with_warnings", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_include_nodes_with_warnings",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.include_nodes_with_warnings", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% service (RabbitMQ node) port
+
+{mapping, "cluster_formation.consul.svc_port", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_port", [
+ {datatype, integer}, {validators, ["port"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_port",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_port", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+
+%% service check TTL
+
+{mapping, "cluster_formation.consul.svc_ttl", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_ttl", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_ttl",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_ttl", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% should a node in warning state be deregister by Consul after a period of time?
+
+{mapping, "cluster_formation.consul.deregister_after", "rabbit.cluster_formation.peer_discovery_consul.consul_deregister_after", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_deregister_after",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.deregister_after", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% use long node names?
+
+{mapping, "cluster_formation.consul.use_longname", "rabbit.cluster_formation.peer_discovery_consul.consul_use_longname", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_use_longname",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.use_longname", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% service tags
+
+{mapping, "cluster_formation.consul.svc_tags", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_tags", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "cluster_formation.consul.svc_tags.$name", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_tags", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_tags",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_tags", Conf, undefined) of
+ none -> [];
+ _ ->
+ Pairs = cuttlefish_variable:filter_by_prefix("cluster_formation.consul.svc_tags", Conf),
+ [V || {_, V} <- Pairs]
+ end
+end}.
+
+%% service metadata
+
+{mapping, "cluster_formation.consul.svc_meta", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_meta", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "cluster_formation.consul.svc_meta.$name", "rabbit.cluster_formation.peer_discovery_consul.consul_svc_meta", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_svc_meta",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.svc_meta", Conf, undefined) of
+ none -> [];
+ _ ->
+ Pairs = cuttlefish_variable:filter_by_prefix("cluster_formation.consul.svc_meta", Conf),
+ [{list_to_binary(lists:last(Segments)), list_to_binary(V)} || {Segments, V} <- Pairs]
+ end
+end}.
+
+%% lock key prefix
+
+{mapping, "cluster_formation.consul.lock_prefix", "rabbit.cluster_formation.peer_discovery_consul.consul_lock_prefix", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.consul_lock_prefix",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.lock_prefix", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% lock acquisition timeout
+
+{mapping, "cluster_formation.consul.lock_wait_time", "rabbit.cluster_formation.peer_discovery_consul.lock_wait_time", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{mapping, "cluster_formation.consul.lock_timeout", "rabbit.cluster_formation.peer_discovery_consul.lock_wait_time", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% an alias for lock acquisition timeout to be consistent with the etcd backend
+
+{translation, "rabbit.cluster_formation.peer_discovery_consul.lock_wait_time",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.consul.lock_timeout", Conf, undefined) of
+ undefined ->
+ case cuttlefish:conf_get("cluster_formation.consul.lock_wait_time", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end;
+ Value -> Value
+ end
+end}.
diff --git a/deps/rabbitmq_peer_discovery_consul/rabbitmq-components.mk b/deps/rabbitmq_peer_discovery_consul/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl
new file mode 100644
index 0000000000..81125988cc
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/src/rabbit_peer_discovery_consul.erl
@@ -0,0 +1,777 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_consul).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl").
+-include("rabbit_peer_discovery_consul.hrl").
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+-export([send_health_check_pass/0]).
+-export([session_ttl_update_callback/1]).
+%% useful for debugging from the REPL with RABBITMQ_ALLOW_INPUT
+-export([service_id/0, service_address/0]).
+%% for tests
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+-define(CONFIG_MODULE, rabbit_peer_discovery_config).
+-define(UTIL_MODULE, rabbit_peer_discovery_util).
+
+-define(CONSUL_CHECK_NOTES, "RabbitMQ Consul-based peer discovery plugin TTL check").
+
+%%
+%% API
+%%
+
+init() ->
+ rabbit_log:debug("Peer discovery Consul: initialising..."),
+ ok = application:ensure_started(inets),
+ %% we cannot start this plugin yet since it depends on the rabbit app,
+ %% which is in the process of being started by the time this function is called
+ application:load(rabbitmq_peer_discovery_common),
+ rabbit_peer_discovery_httpc:maybe_configure_proxy(),
+ rabbit_peer_discovery_httpc:maybe_configure_inet6().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}.
+
+list_nodes() ->
+ Fun0 = fun() -> {ok, {[], disc}} end,
+ Fun1 = fun() ->
+ rabbit_log:warning("Peer discovery backend is set to ~s "
+ "but final config does not contain rabbit.cluster_formation.peer_discovery_consul. "
+ "Cannot discover any nodes because Consul cluster details are not configured!",
+ [?MODULE]),
+ {ok, {[], disc}}
+ end,
+ Fun2 = fun(Proplist) ->
+ M = maps:from_list(Proplist),
+ case rabbit_peer_discovery_httpc:get(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ rabbit_peer_discovery_httpc:build_path([v1, health, service, get_config_key(consul_svc, M)]),
+ list_nodes_query_args(),
+ maybe_add_acl([]),
+ []) of
+ {ok, Nodes} ->
+ IncludeWithWarnings = get_config_key(consul_include_nodes_with_warnings, M),
+ Result = extract_nodes(
+ filter_nodes(Nodes, IncludeWithWarnings)),
+ {ok, {Result, disc}};
+ {error, _} = Error ->
+ Error
+ end
+ end,
+ rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, Fun0, Fun1, Fun2).
+
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ true.
+
+
+-spec register() -> ok | {error, Reason :: string()}.
+register() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ case registration_body() of
+ {ok, Body} ->
+ rabbit_log:debug("Consul registration body: ~s", [Body]),
+ case rabbit_peer_discovery_httpc:put(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ rabbit_peer_discovery_httpc:build_path([v1, agent, service, register]),
+ [],
+ maybe_add_acl([]),
+ Body) of
+ {ok, _} -> ok;
+ Error -> Error
+ end;
+ Error -> Error
+ end.
+
+
+-spec unregister() -> ok | {error, Reason :: string()}.
+unregister() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ ID = service_id(),
+ rabbit_log:debug("Unregistering with Consul using service ID '~s'", [ID]),
+ case rabbit_peer_discovery_httpc:put(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ rabbit_peer_discovery_httpc:build_path([v1, agent, service, deregister, ID]),
+ [],
+ maybe_add_acl([]),
+ []) of
+ {ok, Response} ->
+ rabbit_log:info("Consul's response to the unregistration attempt: ~p", [Response]),
+ ok;
+ Error ->
+ rabbit_log:info("Failed to unregister service with ID '~s` with Consul: ~p",
+ [ID, Error]),
+ Error
+ end.
+
+-spec post_registration() -> ok.
+
+post_registration() ->
+ %% don't wait for one full interval, make
+ %% sure we let Consul know the service is healthy
+ %% right after registration. See rabbitmq/rabbitmq_peer_discovery_consul#8.
+ send_health_check_pass(),
+ ok.
+
+-spec lock(Node :: atom()) -> {ok, Data :: term()} | {error, Reason :: string()}.
+
+lock(Node) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ rabbit_log:debug("Effective Consul peer discovery configuration: ~p", [M]),
+ case create_session(Node, get_config_key(consul_svc_ttl, M)) of
+ {ok, SessionId} ->
+ TRef = start_session_ttl_updater(SessionId),
+ Now = erlang:system_time(seconds),
+ EndTime = Now + get_config_key(lock_wait_time, M),
+ lock(TRef, SessionId, Now, EndTime);
+ {error, Reason} ->
+ {error, lists:flatten(io_lib:format("Error while creating a session, reason: ~s",
+ [Reason]))}
+ end.
+
+-spec unlock({SessionId :: string(), TRef :: timer:tref()}) -> ok.
+
+unlock({SessionId, TRef}) ->
+ timer:cancel(TRef),
+ rabbit_log:debug("Stopped session renewal"),
+ case release_lock(SessionId) of
+ {ok, true} ->
+ ok;
+ {ok, false} ->
+ {error, lists:flatten(io_lib:format("Error while releasing the lock, session ~s may have been invalidated", [SessionId]))};
+ {error, _} = Err ->
+ Err
+ end.
+
+%%
+%% Implementation
+%%
+
+-spec get_config_key(Key :: atom(), Map :: #{atom() => peer_discovery_config_value()})
+ -> peer_discovery_config_value().
+
+get_config_key(Key, Map) ->
+ ?CONFIG_MODULE:get(Key, ?CONFIG_MAPPING, Map).
+
+-spec get_integer_config_key(Key :: atom(), Map :: #{atom() => peer_discovery_config_value()})
+ -> integer().
+
+get_integer_config_key(Key, Map) ->
+ ?CONFIG_MODULE:get_integer(Key, ?CONFIG_MAPPING, Map).
+
+
+-spec filter_nodes(ConsulResult :: list(), AllowWarning :: atom()) -> list().
+filter_nodes(Nodes, Warn) ->
+ case Warn of
+ true ->
+ lists:filter(fun(Node) ->
+ Checks = maps:get(<<"Checks">>, Node),
+ lists:all(fun(Check) ->
+ lists:member(maps:get(<<"Status">>, Check),
+ [<<"passing">>, <<"warning">>])
+ end,
+ Checks)
+ end,
+ Nodes);
+ false -> Nodes
+ end.
+
+-spec extract_nodes(ConsulResult :: list()) -> list().
+extract_nodes(Data) -> extract_nodes(Data, []).
+
+-spec extract_nodes(ConsulResult :: list(), Nodes :: list())
+ -> list().
+extract_nodes([], Nodes) -> Nodes;
+extract_nodes([H | T], Nodes) ->
+ Service = maps:get(<<"Service">>, H),
+ Value = maps:get(<<"Address">>, Service),
+ NodeName = case ?UTIL_MODULE:as_string(Value) of
+ "" ->
+ NodeData = maps:get(<<"Node">>, H),
+ Node = maps:get(<<"Node">>, NodeData),
+ maybe_add_domain(?UTIL_MODULE:node_name(Node));
+ Address ->
+ ?UTIL_MODULE:node_name(Address)
+ end,
+ extract_nodes(T, lists:merge(Nodes, [NodeName])).
+
+-spec maybe_add_acl(QArgs :: list()) -> list().
+maybe_add_acl(QArgs) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ case get_config_key(consul_acl_token, M) of
+ "undefined" -> QArgs;
+ ACL -> lists:append(QArgs, [{"X-Consul-Token", ACL}])
+ end.
+
+-spec list_nodes_query_args() -> list().
+list_nodes_query_args() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ list_nodes_query_args(get_config_key(cluster_name, M)).
+
+-spec list_nodes_query_args(ClusterName :: string()) -> list().
+list_nodes_query_args(Cluster) ->
+ ClusterTag = case Cluster of
+ "default" -> [];
+ _ -> [{tag, Cluster}]
+ end,
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ list_nodes_query_args(ClusterTag, get_config_key(consul_include_nodes_with_warnings, M)).
+
+-spec list_nodes_query_args(Args :: list(), AllowWarn :: atom()) -> list().
+list_nodes_query_args(Value, Warn) ->
+ case Warn of
+ true -> Value;
+ false -> [passing | Value]
+ end.
+
+-spec registration_body() -> {ok, Body :: binary()} | {error, atom()}.
+registration_body() ->
+ Payload = build_registration_body(),
+ registration_body(rabbit_json:try_encode(Payload)).
+
+-spec registration_body(Response :: {ok, Body :: string()} |
+ {error, Reason :: atom()})
+ -> {ok, Body :: binary()} | {error, Reason :: atom()}.
+registration_body({ok, Body}) ->
+ {ok, rabbit_data_coercion:to_binary(Body)};
+registration_body({error, Reason}) ->
+ rabbit_log:error("Error serializing the request body: ~p",
+ [Reason]),
+ {error, Reason}.
+
+
+-spec build_registration_body() -> list().
+build_registration_body() ->
+ Payload1 = registration_body_add_id(),
+ Payload2 = registration_body_add_name(Payload1),
+ Payload3 = registration_body_maybe_add_address(Payload2),
+ Payload4 = registration_body_add_port(Payload3),
+ Payload5 = registration_body_maybe_add_check(Payload4),
+ Payload6 = registration_body_maybe_add_tag(Payload5),
+ registration_body_maybe_add_meta(Payload6).
+
+-spec registration_body_add_id() -> list().
+registration_body_add_id() ->
+ [{'ID', rabbit_data_coercion:to_atom(service_id())}].
+
+-spec registration_body_add_name(Payload :: list()) -> list().
+registration_body_add_name(Payload) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ Name = rabbit_data_coercion:to_atom(get_config_key(consul_svc, M)),
+ lists:append(Payload, [{'Name', Name}]).
+
+-spec registration_body_maybe_add_address(Payload :: list())
+ -> list().
+registration_body_maybe_add_address(Payload) ->
+ registration_body_maybe_add_address(Payload, service_address()).
+
+-spec registration_body_maybe_add_address(Payload :: list(), string())
+ -> list().
+registration_body_maybe_add_address(Payload, "undefined") -> Payload;
+registration_body_maybe_add_address(Payload, Address) ->
+ lists:append(Payload, [{'Address', rabbit_data_coercion:to_atom(Address)}]).
+
+registration_body_maybe_add_check(Payload) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ TTL = get_config_key(consul_svc_ttl, M),
+ registration_body_maybe_add_check(Payload, TTL).
+
+-spec registration_body_maybe_add_check(Payload :: list(),
+ TTL :: integer() | undefined)
+ -> list().
+registration_body_maybe_add_check(Payload, undefined) ->
+ case registration_body_maybe_add_deregister([]) of
+ [{'DeregisterCriticalServiceAfter', _}]->
+ rabbit_log:warning("Can't use Consul's service deregistration feature without " ++
+ "using TTL. The parameter will be ignored"),
+ Payload;
+
+ _ -> Payload
+ end;
+registration_body_maybe_add_check(Payload, TTL) ->
+ CheckItems = [{'Notes', rabbit_data_coercion:to_atom(?CONSUL_CHECK_NOTES)},
+ {'TTL', rabbit_data_coercion:to_atom(service_ttl(TTL))},
+ {'Status', 'passing'}],
+ Check = [{'Check', registration_body_maybe_add_deregister(CheckItems)}],
+ lists:append(Payload, Check).
+
+-spec registration_body_add_port(Payload :: list()) -> list().
+registration_body_add_port(Payload) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ lists:append(Payload,
+ [{'Port', get_config_key(consul_svc_port, M)}]).
+
+registration_body_maybe_add_deregister(Payload) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ Deregister = get_config_key(consul_deregister_after, M),
+ registration_body_maybe_add_deregister(Payload, Deregister).
+
+-spec registration_body_maybe_add_deregister(Payload :: list(),
+ TTL :: integer() | undefined)
+ -> list().
+registration_body_maybe_add_deregister(Payload, undefined) -> Payload;
+registration_body_maybe_add_deregister(Payload, Deregister_After) ->
+ Deregister = {'DeregisterCriticalServiceAfter',
+ rabbit_data_coercion:to_atom(service_ttl(Deregister_After))},
+ Payload ++ [Deregister].
+
+-spec registration_body_maybe_add_tag(Payload :: list()) -> list().
+registration_body_maybe_add_tag(Payload) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ Value = get_config_key(cluster_name, M),
+ Tags = ?UTIL_MODULE:as_list(get_config_key(consul_svc_tags, M)),
+ registration_body_maybe_add_tag(Payload, Value, Tags).
+
+-spec registration_body_maybe_add_tag(Payload :: list(),
+ ClusterName :: string(),
+ Tags :: list())
+ -> list().
+registration_body_maybe_add_tag(Payload, "default", []) -> Payload;
+registration_body_maybe_add_tag(Payload, "default", Tags) ->
+ lists:append(Payload, [{'Tags', [rabbit_data_coercion:to_atom(X) || X <- Tags]}]);
+registration_body_maybe_add_tag(Payload, Cluster, []) ->
+ lists:append(Payload, [{'Tags', [rabbit_data_coercion:to_atom(Cluster)]}]);
+registration_body_maybe_add_tag(Payload, Cluster, Tags) ->
+ lists:append(Payload, [{'Tags', [rabbit_data_coercion:to_atom(Cluster)] ++ [rabbit_data_coercion:to_atom(X) || X <- Tags]}]).
+
+
+-spec registration_body_maybe_add_meta(Payload :: list()) -> list().
+registration_body_maybe_add_meta(Payload) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ ClusterName = get_config_key(cluster_name, M),
+ Meta = ?UTIL_MODULE:as_list(get_config_key(consul_svc_meta, M)),
+ registration_body_maybe_add_meta(Payload, ClusterName, Meta).
+
+-spec registration_body_maybe_add_meta(Payload :: list(),
+ ClusterName :: string(),
+ Meta :: list()) -> list().
+registration_body_maybe_add_meta(Payload, "default", []) ->
+ Payload;
+registration_body_maybe_add_meta(Payload, "default", Meta) ->
+ lists:append(Payload, [{<<"meta">>, Meta}]);
+registration_body_maybe_add_meta(Payload, _ClusterName, []) ->
+ Payload;
+registration_body_maybe_add_meta(Payload, ClusterName, Meta) ->
+ Merged = maps:to_list(maps:merge(#{<<"cluster">> => rabbit_data_coercion:to_binary(ClusterName)}, maps:from_list(Meta))),
+ lists:append(Payload, [{<<"meta">>, Merged}]).
+
+
+-spec validate_addr_parameters(false | true, false | true) -> false | true.
+validate_addr_parameters(false, true) ->
+ rabbit_log:warning("The parameter CONSUL_SVC_ADDR_NODENAME" ++
+ " can be used only if CONSUL_SVC_ADDR_AUTO is true." ++
+ " CONSUL_SVC_ADDR_NODENAME value will be ignored."),
+ false;
+validate_addr_parameters(_, _) ->
+ true.
+
+
+-spec service_address() -> string().
+service_address() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ validate_addr_parameters(get_config_key(consul_svc_addr_auto, M),
+ get_config_key(consul_svc_addr_nodename, M)),
+ service_address(get_config_key(consul_svc_addr, M),
+ get_config_key(consul_svc_addr_auto, M),
+ get_config_key(consul_svc_addr_nic, M),
+ get_config_key(consul_svc_addr_nodename, M)).
+
+
+-spec service_address(Static :: string(),
+ Auto :: boolean(),
+ AutoNIC :: string(),
+ FromNodename :: boolean()) -> string().
+service_address(_, true, "undefined", FromNodename) ->
+ rabbit_peer_discovery_util:node_hostname(FromNodename);
+service_address(Value, false, "undefined", _) ->
+ Value;
+service_address(_, true, NIC, _) ->
+ %% TODO: support IPv6
+ {ok, Addr} = rabbit_peer_discovery_util:nic_ipv4(NIC),
+ Addr;
+%% this combination makes no sense but this is what rabbitmq-autocluster
+%% and this plugin have been allowing for a couple of years, so we keep
+%% this clause around for backwards compatibility.
+%% See rabbitmq/rabbitmq-peer-discovery-consul#12 for details.
+service_address(_, false, NIC, _) ->
+ {ok, Addr} = rabbit_peer_discovery_util:nic_ipv4(NIC),
+ Addr.
+
+
+-spec service_id() -> string().
+service_id() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ service_id(get_config_key(consul_svc, M),
+ service_address()).
+
+-spec service_id(Name :: string(), Address :: string()) -> string().
+service_id(Service, "undefined") -> Service;
+service_id(Service, Address) ->
+ string:join([Service, Address], ":").
+
+-spec service_ttl(TTL :: integer()) -> string().
+service_ttl(Value) ->
+ rabbit_peer_discovery_util:as_string(Value) ++ "s".
+
+-spec maybe_add_domain(Domain :: atom()) -> atom().
+maybe_add_domain(Value) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ case get_config_key(consul_use_longname, M) of
+ true ->
+ rabbit_data_coercion:to_atom(string:join([atom_to_list(Value),
+ "node",
+ get_config_key(consul_domain, M)],
+ "."));
+ false -> Value
+ end.
+
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Let Consul know that this node is still around
+%% @end
+%%--------------------------------------------------------------------
+
+-spec send_health_check_pass() -> ok.
+
+send_health_check_pass() ->
+ Service = string:join(["service", service_id()], ":"),
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ rabbit_log:debug("Running Consul health check"),
+ case rabbit_peer_discovery_httpc:put(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ rabbit_peer_discovery_httpc:build_path([v1, agent, check, pass, Service]),
+ [],
+ maybe_add_acl([]),
+ []) of
+ {ok, []} -> ok;
+ {error, "429"} ->
+ %% Too Many Requests, see https://www.consul.io/docs/agent/checks.html
+ rabbit_log:warning("Consul responded to a health check with 429 Too Many Requests"),
+ ok;
+ {error, "500"} ->
+ rabbit_log:warning("Consul responded to a health check with a 500 status, will wait and try re-registering"),
+ maybe_re_register(wait_for_list_nodes()),
+ ok;
+ {error, Reason} ->
+ rabbit_log:error("Error running Consul health check: ~p",
+ [Reason]),
+ ok
+ end.
+
+maybe_re_register({error, Reason}) ->
+ rabbit_log:error("Internal error in Consul while updating health check. "
+ "Cannot obtain list of nodes registered in Consul either: ~p",
+ [Reason]);
+maybe_re_register({ok, {Members, _NodeType}}) ->
+ maybe_re_register(Members);
+maybe_re_register({ok, Members}) ->
+ maybe_re_register(Members);
+maybe_re_register(Members) ->
+ case lists:member(node(), Members) of
+ true ->
+ rabbit_log:error("Internal error in Consul while updating health check",
+ []);
+ false ->
+ rabbit_log:error("Internal error in Consul while updating health check, "
+ "node is not registered. Re-registering", []),
+ register()
+ end.
+
+wait_for_list_nodes() ->
+ wait_for_list_nodes(60).
+
+wait_for_list_nodes(N) ->
+ case {list_nodes(), N} of
+ {Reply, 0} ->
+ Reply;
+ {{ok, _} = Reply, _} ->
+ Reply;
+ {{error, _}, _} ->
+ timer:sleep(1000),
+ wait_for_list_nodes(N - 1)
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Create a session to be acquired for a common key
+%% @end
+%%--------------------------------------------------------------------
+-spec create_session(string(), pos_integer()) -> {ok, string()} | {error, Reason::string()}.
+create_session(Name, TTL) ->
+ case consul_session_create([], maybe_add_acl([]),
+ [{'Name', Name},
+ {'TTL', rabbit_data_coercion:to_atom(service_ttl(TTL))}]) of
+ {ok, Response} ->
+ {ok, get_session_id(Response)};
+ {error, _} = Err ->
+ Err
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Create session
+%% @end
+%%--------------------------------------------------------------------
+-spec consul_session_create(Query, Headers, Body) -> {ok, string()} | {error, any()} when
+ Query :: list(),
+ Headers :: [{string(), string()}],
+ Body :: term().
+consul_session_create(Query, Headers, Body) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ case serialize_json_body(Body) of
+ {ok, Serialized} ->
+ rabbit_peer_discovery_httpc:put(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ "v1/session/create",
+ Query,
+ Headers,
+ Serialized);
+ {error, _} = Err ->
+ Err
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Process the result of JSON encoding the request body payload,
+%% returning the body as a binary() value or the error returned by
+%% the JSON serialization library.
+%% @end
+%%--------------------------------------------------------------------
+-spec serialize_json_body(term()) -> {ok, Payload :: binary()} | {error, atom()}.
+serialize_json_body([]) -> {ok, []};
+serialize_json_body(Payload) ->
+ case rabbit_json:try_encode(Payload) of
+ {ok, Body} -> {ok, Body};
+ {error, Reason} -> {error, Reason}
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Extract session ID from Consul response
+%% @end
+%%--------------------------------------------------------------------
+-spec get_session_id(term()) -> string().
+get_session_id(#{<<"ID">> := ID}) -> binary:bin_to_list(ID).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Start periodically renewing an existing session ttl
+%% @end
+%%--------------------------------------------------------------------
+-spec start_session_ttl_updater(string()) -> ok.
+start_session_ttl_updater(SessionId) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ Interval = get_config_key(consul_svc_ttl, M),
+ rabbit_log:debug("Starting session renewal"),
+ {ok, TRef} = timer:apply_interval(Interval * 500, ?MODULE,
+ session_ttl_update_callback, [SessionId]),
+ TRef.
+
+%%
+%% @doc
+%% Tries to acquire lock. If the lock is held by someone else, waits until it
+%% is released, or too much time has passed
+%% @end
+-spec lock(timer:tref(), string(), pos_integer(), pos_integer()) -> {ok, string()} | {error, string()}.
+lock(TRef, _, Now, EndTime) when EndTime < Now ->
+ timer:cancel(TRef),
+ {error, "Acquiring lock taking too long, bailing out"};
+lock(TRef, SessionId, _, EndTime) ->
+ case acquire_lock(SessionId) of
+ {ok, true} ->
+ {ok, {SessionId, TRef}};
+ {ok, false} ->
+ case get_lock_status() of
+ {ok, {SessionHeld, ModifyIndex}} ->
+ Wait = max(EndTime - erlang:system_time(seconds), 0),
+ case wait_for_lock_release(SessionHeld, ModifyIndex, Wait) of
+ ok ->
+ lock(TRef, SessionId, erlang:system_time(seconds), EndTime);
+ {error, Reason} ->
+ timer:cancel(TRef),
+ {error, lists:flatten(io_lib:format("Error waiting for lock release, reason: ~s",[Reason]))}
+ end;
+ {error, Reason} ->
+ timer:cancel(TRef),
+ {error, lists:flatten(io_lib:format("Error obtaining lock status, reason: ~s", [Reason]))}
+ end;
+ {error, Reason} ->
+ timer:cancel(TRef),
+ {error, lists:flatten(io_lib:format("Error while acquiring lock, reason: ~s", [Reason]))}
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Acquire session for a key
+%% @end
+%%--------------------------------------------------------------------
+-spec acquire_lock(string()) -> {ok, any()} | {error, string()}.
+acquire_lock(SessionId) ->
+ consul_kv_write(startup_lock_path(), [{acquire, SessionId}], maybe_add_acl([]), []).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Release a previously acquired lock held by a given session
+%% @end
+%%--------------------------------------------------------------------
+-spec release_lock(string()) -> {ok, any()} | {error, string()}.
+release_lock(SessionId) ->
+ consul_kv_write(startup_lock_path(), [{release, SessionId}], maybe_add_acl([]), []).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Write KV store key value
+%% @end
+%%--------------------------------------------------------------------
+-spec consul_kv_write(Path, Query, Headers, Body) -> {ok, any()} | {error, string()} when
+ Path :: string(),
+ Query :: [{string(), string()}],
+ Headers :: [{string(), string()}],
+ Body :: term().
+consul_kv_write(Path, Query, Headers, Body) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ case serialize_json_body(Body) of
+ {ok, Serialized} ->
+ rabbit_peer_discovery_httpc:put(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ "v1/kv/" ++ Path,
+ Query,
+ Headers,
+ Serialized);
+ {error, _} = Err ->
+ Err
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Read KV store key value
+%% @end
+%%--------------------------------------------------------------------
+-spec consul_kv_read(Path, Query, Headers) -> {ok, term()} | {error, string()} when
+ Path :: string(),
+ Query :: [{string(), string()}],
+ Headers :: [{string(), string()}].
+consul_kv_read(Path, Query, Headers) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ rabbit_peer_discovery_httpc:get(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ "v1/kv/" ++ Path,
+ Query,
+ Headers,
+ []).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Get lock status
+%% XXX: probably makes sense to wrap output in a record to be
+%% more future-proof
+%% @end
+%%--------------------------------------------------------------------
+-spec get_lock_status() -> {ok, term()} | {error, string()}.
+get_lock_status() ->
+ case consul_kv_read(startup_lock_path(), [], maybe_add_acl([])) of
+ {ok, [KeyData | _]} ->
+ SessionHeld = maps:get(<<"Session">>, KeyData, undefined) =/= undefined,
+ ModifyIndex = maps:get(<<"ModifyIndex">>, KeyData),
+ {ok, {SessionHeld, ModifyIndex}};
+ {error, _} = Err ->
+ Err
+ end.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Returns consul path for startup lock
+%% @end
+%%--------------------------------------------------------------------
+-spec startup_lock_path() -> string().
+startup_lock_path() ->
+ base_path() ++ "/" ++ "startup_lock".
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc Return a list of path segments that are the base path for all
+%% consul kv keys related to current cluster.
+%% @end
+%%--------------------------------------------------------------------
+-spec base_path() -> string().
+base_path() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ Segments = [get_config_key(consul_lock_prefix, M), get_config_key(cluster_name, M)],
+ rabbit_peer_discovery_httpc:build_path(Segments).
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Wait for lock to be released if it has been acquired by another node
+%% @end
+%%--------------------------------------------------------------------
+-spec wait_for_lock_release(atom(), pos_integer(), pos_integer()) -> ok | {error, string()}.
+wait_for_lock_release(false, _, _) -> ok;
+wait_for_lock_release(_, Index, Wait) ->
+ case consul_kv_read(startup_lock_path(),
+ [{index, Index}, {wait, service_ttl(Wait)}],
+ maybe_add_acl([])) of
+ {ok, _} -> ok;
+ {error, _} = Err -> Err
+ end.
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Renew an existing session
+%% @end
+%%--------------------------------------------------------------------
+-spec session_ttl_update_callback(string()) -> string().
+session_ttl_update_callback(SessionId) ->
+ _ = consul_session_renew(SessionId, [], maybe_add_acl([])),
+ SessionId.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Renew session TTL
+%% @end
+%%--------------------------------------------------------------------
+-spec consul_session_renew(string(), [{string(), string()}], [{string(), string()}]) -> {ok, term()} | {error, string()}.
+consul_session_renew(SessionId, Query, Headers) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ rabbit_peer_discovery_httpc:put(get_config_key(consul_scheme, M),
+ get_config_key(consul_host, M),
+ get_integer_config_key(consul_port, M),
+ rabbit_peer_discovery_httpc:build_path([v1, session, renew, rabbit_data_coercion:to_atom(SessionId)]),
+ Query,
+ Headers,
+ []).
diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl
new file mode 100644
index 0000000000..a46ff0aa73
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul.erl
@@ -0,0 +1,59 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_consul).
+-behaviour(rabbit_peer_discovery_backend).
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+-export([send_health_check_pass/0]).
+-export([session_ttl_update_callback/1]).
+
+-define(DELEGATE, rabbit_peer_discovery_consul).
+
+%%
+%% API
+%%
+
+init() ->
+ ?DELEGATE:init().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}.
+list_nodes() ->
+ ?DELEGATE:list_nodes().
+
+-spec supports_registration() -> boolean().
+supports_registration() ->
+ ?DELEGATE:supports_registration().
+
+-spec register() -> ok | {error, Reason :: string()}.
+register() ->
+ ?DELEGATE:register().
+
+-spec unregister() -> ok | {error, Reason :: string()}.
+unregister() ->
+ ?DELEGATE:unregister().
+
+-spec post_registration() -> ok.
+post_registration() ->
+ ?DELEGATE:post_registration().
+
+-spec lock(Node :: atom()) -> {ok, Data :: term()} | {error, Reason :: string()}.
+lock(Node) ->
+ ?DELEGATE:lock(Node).
+
+-spec unlock({SessionId :: string(), TRef :: timer:tref()}) -> ok.
+unlock(Data) ->
+ ?DELEGATE:unlock(Data).
+
+-spec send_health_check_pass() -> ok.
+send_health_check_pass() ->
+ ?DELEGATE:send_health_check_pass().
+
+-spec session_ttl_update_callback(string()) -> string().
+session_ttl_update_callback(SessionId) ->
+ ?DELEGATE:session_ttl_update_callback(SessionId).
diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl
new file mode 100644
index 0000000000..d2dbea94be
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_app.erl
@@ -0,0 +1,21 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_consul_app).
+
+%%
+%% API
+%%
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbitmq_peer_discovery_consul_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl
new file mode 100644
index 0000000000..f81787f4a9
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_health_check_helper.erl
@@ -0,0 +1,95 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This gen_server starts a periodic timer on behalf of
+%% a short lived process that kicks off peer discovery.
+%% This is so that the timer is not automatically canceled
+%% and cleaned up by the timer server when the short lived
+%% process terminates.
+
+-module(rabbitmq_peer_discovery_consul_health_check_helper).
+
+-behaviour(gen_server).
+
+-include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl").
+-include("rabbit_peer_discovery_consul.hrl").
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-record(state, {timer_ref}).
+
+
+%%
+%% API
+%%
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init([]) ->
+ case rabbit_peer_discovery:backend() of
+ rabbit_peer_discovery_consul ->
+ set_up_periodic_health_check();
+ rabbitmq_peer_discovery_consul ->
+ set_up_periodic_health_check();
+ _ ->
+ {ok, #state{}}
+ end.
+
+handle_call(_Msg, _From, State) ->
+ {reply, not_understood, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_MSg, State) ->
+ {noreply, State}.
+
+terminate(_Arg, #state{timer_ref = undefined}) ->
+ ok;
+
+terminate(_Arg, #state{timer_ref = TRef}) ->
+ timer:cancel(TRef),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%
+%% Implementation
+%%
+
+set_up_periodic_health_check() ->
+ M = rabbit_peer_discovery_config:config_map(peer_discover_consul),
+ case rabbit_peer_discovery_config:get(consul_svc_ttl, ?CONFIG_MAPPING, M) of
+ undefined ->
+ {ok, #state{}};
+ %% in seconds
+ Interval ->
+ %% We cannot use timer:apply_interval/4 in rabbit_peer_discovery_consul
+ %% because this function is executed in a short live process and when it
+ %% exits, the timer module will automatically cancel the
+ %% timer.
+ %%
+ %% Instead we delegate to a locally registered gen_server,
+ %% `rabbitmq_peer_discovery_consul_health_check_helper`.
+ %%
+ %% The register step cannot call this gen_server either because when mnesia is
+ %% started the plugins are not yet loaded.
+ %%
+ %% The value is 1/2 of what's configured to avoid a race
+ %% condition between check TTL expiration and in flight
+ %% notifications
+
+ IntervalInMs = Interval * 500, % note this is 1/2
+ rabbit_log:info("Starting Consul health check notifier (effective interval: ~p milliseconds)", [IntervalInMs]),
+ {ok, TRef} = timer:apply_interval(IntervalInMs, rabbit_peer_discovery_consul,
+ send_health_check_pass, []),
+ {ok, #state{timer_ref = TRef}}
+ end.
diff --git a/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl
new file mode 100644
index 0000000000..8f232c4b26
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/src/rabbitmq_peer_discovery_consul_sup.erl
@@ -0,0 +1,37 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbitmq_peer_discovery_consul_sup).
+
+-behaviour(supervisor).
+
+-export([init/1, start_link/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_peer_discovery_consul.hrl").
+
+%%
+%% API
+%%
+
+init([]) ->
+ Flags = #{strategy => one_for_one, intensity => 1, period => 1},
+ Fun0 = fun() -> {ok, {Flags, []}} end,
+ Fun1 = fun() -> {ok, {Flags, []}} end,
+ Fun2 = fun(_) ->
+ Specs = [#{id => rabbitmq_peer_discovery_consul_health_check_helper,
+ start => {rabbitmq_peer_discovery_consul_health_check_helper, start_link, []},
+ restart => permanent,
+ shutdown => ?SUPERVISOR_WAIT,
+ type => worker,
+ modules => [rabbitmq_peer_discovery_consul_health_check_helper]
+ }],
+ {ok, {Flags, Specs}}
+ end,
+ rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, Fun0, Fun1, Fun2).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
diff --git a/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..dc497b1736
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_peer_discovery_consul, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets
new file mode 100644
index 0000000000..de85793456
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/test/config_schema_SUITE_data/rabbitmq_peer_discovery_consul.snippets
@@ -0,0 +1,290 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+[
+ {consul_discovery_mechanism_as_module,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_consul
+ cluster_formation.consul.host = consul.eng.megacorp.local",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {peer_discovery_consul, [
+ {consul_host, "consul.eng.megacorp.local"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]},
+
+ {consul_host,
+ "cluster_formation.peer_discovery_backend = consul
+ cluster_formation.consul.host = consul.eng.megacorp.local",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {peer_discovery_consul, [
+ {consul_host, "consul.eng.megacorp.local"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_port,
+ "cluster_formation.consul.port = 9700",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_port, 9700}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_host_and_port,
+ "cluster_formation.consul.host = consul.eng.megacorp.local
+ cluster_formation.consul.port = 9800",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_host, "consul.eng.megacorp.local"},
+ {consul_port, 9800}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_scheme,
+ "cluster_formation.consul.scheme = https",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_scheme, "https"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {consul_acl_token,
+ "cluster_formation.consul.acl_token = abcdef",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_acl_token, "abcdef"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+ , {consul_cluster_name,
+ "cluster_formation.consul.cluster_name = abcdef",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {cluster_name, "abcdef"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+ , {consul_domain_suffix,
+ "cluster_formation.consul.domain_suffix = rabbitmq.eng.local",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_domain, "rabbitmq.eng.local"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+ , {consul_svc,
+ "cluster_formation.consul.svc = abcdef",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc, "abcdef"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+ , {consul_svc_addr,
+ "cluster_formation.consul.svc_addr = abcdef",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_addr, "abcdef"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+ , {consul_svc_addr_auto,
+ "cluster_formation.consul.svc_addr_auto = true",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_addr_auto, true}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+
+ , {consul_svc_addr_nic,
+ "cluster_formation.consul.svc_addr_nic = abcdef",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_addr_nic, "abcdef"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+
+ , {consul_svc_addr_use_nodename,
+ "cluster_formation.consul.svc_addr_use_nodename = true",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_addr_nodename, true}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+
+ , {consul_svc_port,
+ "cluster_formation.consul.svc_port = 5673",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_port, 5673}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {consul_svc_ttl,
+ "cluster_formation.consul.svc_ttl = 60",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_ttl, 60}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {consul_deregister_after,
+ "cluster_formation.consul.deregister_after = 60",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_deregister_after, 60}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {consul_use_longname,
+ "cluster_formation.consul.use_longname = true",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_use_longname, true}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {lock_prefix,
+ "cluster_formation.consul.lock_prefix = prefix",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_lock_prefix, "prefix"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ %% lock acquisition timeout
+
+ , {consul_lock_timeout,
+ "cluster_formation.consul.lock_timeout = 60",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {lock_wait_time, 60}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ %% alias for consistency with etcd
+ , {consul_lock_wait_time,
+ "cluster_formation.consul.lock_wait_time = 60",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {lock_wait_time, 60}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {consul_service_tags,
+ "cluster_formation.consul.svc_tags.1 = tag-a
+ cluster_formation.consul.svc_tags.2 = tag-b",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_tags, ["tag-a", "tag-b"]}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_service_tags_set_to_none,
+ "cluster_formation.consul.svc_tags = none",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_tags, []}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_service_tags_with_spaces,
+ "cluster_formation.consul.svc_tags.1 = tag with spaces
+ cluster_formation.consul.svc_tags.2 = another one",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_tags, ["tag with spaces",
+ "another one"]}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {consul_service_meta,
+ "cluster_formation.consul.svc_meta.owner = an-owner
+ cluster_formation.consul.svc_meta.env = qa",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_meta, [{<<"owner">>, <<"an-owner">>},
+ {<<"env">>, <<"qa">>}]}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_service_meta_set_to_none,
+ "cluster_formation.consul.svc_meta = none",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_meta, []}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+ , {consul_service_meta_with_spaces,
+ "cluster_formation.consul.svc_meta.owner = an owner
+ cluster_formation.consul.svc_meta.env = qa",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_consul, [
+ {consul_svc_meta, [{<<"owner">>, <<"an owner">>},
+ {<<"env">>, <<"qa">>}]}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_consul]}
+
+
+ , {all_values_absent,
+ "",
+ [],
+ [rabbitmq_peer_discovery_consul]}
+
+].
diff --git a/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl b/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl
new file mode 100644
index 0000000000..ff925b3b34
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_consul/test/rabbitmq_peer_discovery_consul_SUITE.erl
@@ -0,0 +1,1135 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_consul_SUITE).
+
+-compile(export_all).
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+all() ->
+ [
+ {group, registration_body_tests}
+ , {group, registration_tests}
+ , {group, unregistration_tests}
+ , {group, list_node_tests}
+ , {group, other_tests}
+ , {group, lock_tests}
+ ].
+
+groups() ->
+ [
+ {registration_body_tests, [], [
+ registration_body_simple_case,
+ registration_body_svc_addr_set_via_env_var,
+ registration_body_svc_ttl_set_via_env_var,
+ registration_body_svc_tags_set_via_env_var,
+ registration_body_deregister_after_set_via_env_var,
+ registration_body_ttl_and_deregister_after_both_unset_via_env_var,
+ registration_body_ttl_unset_and_deregister_after_set_via_env_var,
+ service_id_all_defaults_test,
+ service_id_with_unset_address_test,
+ service_ttl_default_test
+ ]}
+ , {registration_tests, [], [
+ registration_with_all_default_values_test,
+ registration_with_cluster_name_test,
+ registration_without_acl_token_test,
+ registration_with_acl_token_test,
+ registration_with_auto_addr_test,
+ registration_with_auto_addr_from_nodename_test,
+ registration_with_auto_addr_nic_test,
+ registration_with_auto_addr_nic_issue_12_test,
+ registration_generic_error_test
+ ]}
+ , {unregistration_tests, [], [
+ unregistration_with_all_defaults_test,
+ unregistration_without_acl_token_test,
+ unregistration_with_acl_token_test,
+ unregistration_with_generic_error_test
+ ]}
+ , {list_node_tests, [], [
+ list_nodes_default_values_test,
+ list_nodes_without_acl_token_test,
+ list_nodes_with_acl_token_test,
+ list_nodes_with_cluster_name_token_test,
+ list_nodes_with_cluster_name_and_acl_token_test,
+ list_nodes_return_value_basic_test,
+ list_nodes_return_value_basic_long_node_name_test,
+ list_nodes_return_value_long_node_name_and_custom_domain_test,
+ list_nodes_return_value_srv_address_test,
+ list_nodes_return_value_nodes_in_warning_state_included_test,
+ list_nodes_return_value_nodes_in_warning_state_filtered_out_test
+ ]}
+ , {other_tests, [], [
+ health_check_with_all_defaults_test,
+ health_check_without_acl_token_test,
+ health_check_with_acl_token_test,
+ health_check_error_handling_test
+ ]}
+ , {lock_tests, [], [
+ startup_lock_path_with_prefix_test,
+ startup_lock_path_default_value_test,
+ startup_lock_path_with_cluster_name_test,
+ create_session_with_token_test,
+ create_session_without_token_test,
+ get_lock_status_with_token_test,
+ get_lock_status_with_session_test,
+ get_lock_status_without_session_test,
+ wait_for_lock_release_with_session_with_token_test,
+ wait_for_lock_release_with_session_without_token_test,
+ wait_for_lock_release_without_session_test,
+ acquire_lock_with_token_test,
+ acquire_lock_not_acquired_test,
+ acquire_lock_successfully_acquired_test,
+ release_lock_with_token_test,
+ release_lock_not_released_test,
+ release_lock_successfully_released_test,
+ consul_kv_read_custom_values_test,
+ consul_kv_read_default_values_test,
+ consul_kv_write_custom_values_test,
+ consul_kv_write_default_values_test,
+ consul_session_renew_custom_values_test,
+ consul_session_renew_default_values_test,
+ consul_session_create_custom_values_test,
+ consul_session_create_default_values_test
+ ]}
+ ].
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+reset() ->
+ meck:unload(),
+ application:unset_env(rabbit, cluster_formation),
+ [os:unsetenv(Var) || Var <- [
+ "CLUSTER_NAME",
+ "CONSUL_SCHEME",
+ "CONSUL_HOST",
+ "CONSUL_PORT",
+ "CONSUL_ACL_TOKEN",
+ "CONSUL_SVC",
+ "CONSUL_SVC_ADDR",
+ "CONSUL_SVC_ADDR_AUTO",
+ "CONSUL_SVC_ADDR_NIC",
+ "CONSUL_SVC_ADDR_NODENAME",
+ "CONSUL_SVC_PORT",
+ "CONSUL_SVC_TTL",
+ "CONSUL_SVC_TAGS",
+ "CONSUL_DOMAIN",
+ "CONSUL_DEREGISTER_AFTER",
+ "CONSUL_INCLUDE_NODES_WITH_WARNINGS",
+ "CONSUL_USE_LONGNAME",
+ "CONSUL_LOCK_PREFIX"
+ ]].
+
+init_per_testcase(_TC, Config) ->
+ reset(),
+ meck:new(rabbit_log, []),
+ meck:new(rabbit_peer_discovery_httpc, [passthrough]),
+ Config.
+
+end_per_testcase(_TC, Config) ->
+ reset(),
+ Config.
+
+
+%%%
+%%% Testcases
+%%%
+
+-define(CONSUL_CHECK_NOTES, 'RabbitMQ Consul-based peer discovery plugin TTL check').
+
+registration_body_simple_case(_Config) ->
+ Expectation = [{'ID',rabbitmq},
+ {'Name', rabbitmq},
+ {'Port', 5672},
+ {'Check',
+ [{'Notes', ?CONSUL_CHECK_NOTES},
+ {'TTL', '30s'},
+ {'Status', 'passing'}]}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+registration_body_svc_addr_set_via_env_var(_Config) ->
+ os:putenv("CONSUL_SVC_ADDR", "mercurio"),
+ Expectation = [{'ID', 'rabbitmq:mercurio'},
+ {'Name', rabbitmq},
+ {'Address', mercurio},
+ {'Port', 5672},
+ {'Check',
+ [{'Notes', ?CONSUL_CHECK_NOTES},
+ {'TTL', '30s'},
+ {'Status', 'passing'}]}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+registration_body_svc_ttl_set_via_env_var(_Config) ->
+ os:putenv("CONSUL_SVC_TTL", "257"),
+ Expectation = [{'ID', 'rabbitmq'},
+ {'Name', rabbitmq},
+ {'Port', 5672},
+ {'Check',
+ [{'Notes', ?CONSUL_CHECK_NOTES},
+ {'TTL', '257s'},
+ {'Status', 'passing'}]}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+registration_body_svc_tags_set_via_env_var(_Config) ->
+ os:putenv("CONSUL_SVC_TAGS", "urlprefix-:5672 proto=tcp, mq, mq server"),
+ Expectation = [{'ID', 'rabbitmq'},
+ {'Name', rabbitmq},
+ {'Port', 5672},
+ {'Check',
+ [{'Notes', ?CONSUL_CHECK_NOTES},
+ {'TTL', '30s'},
+ {'Status', 'passing'}]},
+ {'Tags',['urlprefix-:5672 proto=tcp',mq,'mq server']}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+registration_body_deregister_after_set_via_env_var(_Config) ->
+ os:putenv("CONSUL_DEREGISTER_AFTER", "520"),
+ Expectation = [{'ID', 'rabbitmq'},
+ {'Name', rabbitmq},
+ {'Port', 5672},
+ {'Check',
+ [{'Notes', ?CONSUL_CHECK_NOTES},
+ {'TTL','30s'},
+ {'Status', 'passing'},
+ {'DeregisterCriticalServiceAfter','520s'}]}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+registration_body_ttl_and_deregister_after_both_unset_via_env_var(_Config) ->
+ os:putenv("CONSUL_DEREGISTER_AFTER", ""),
+ os:putenv("CONSUL_SVC_TTL", ""),
+ Expectation = [{'ID', 'rabbitmq'},
+ {'Name', rabbitmq},
+ {'Port', 5672}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+%% "deregister after" won't be enabled if TTL isn't set
+registration_body_ttl_unset_and_deregister_after_set_via_env_var(_Config) ->
+ os:putenv("CONSUL_DEREGISTER_AFTER", "120"),
+ os:putenv("CONSUL_SVC_TTL", ""),
+ Expectation = [{'ID', 'rabbitmq'},
+ {'Name', rabbitmq},
+ {'Port', 5672}],
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:build_registration_body()).
+
+service_id_all_defaults_test(_Config) ->
+ ?assertEqual("rabbitmq", rabbit_peer_discovery_consul:service_id()).
+
+service_id_with_unset_address_test(_Config) ->
+ os:putenv("CONSUL_SVC", "rmq"),
+ os:putenv("CONSUL_SVC_ADDR", "mercurio.local"),
+ os:putenv("CONSUL_SVC_ADDR_AUTO", "false"),
+ os:putenv("CONSUL_SVC_ADDR_NODENAME", ""),
+ ?assertEqual("rmq:mercurio.local", rabbit_peer_discovery_consul:service_id()).
+
+service_ttl_default_test(_Config) ->
+ ?assertEqual("30s", rabbit_peer_discovery_consul:service_ttl(30)).
+
+list_nodes_default_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/health/service/rabbitmq", Path),
+ ?assertEqual([passing], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {error, "testing"}
+ end),
+ ?assertEqual({ok, {[], disc}}, rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_without_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("https", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/health/service/rabbit", Path),
+ ?assertEqual([passing], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {error, "testing"}
+ end),
+ os:putenv("CONSUL_SCHEME", "https"),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ os:putenv("CONSUL_SVC", "rabbit"),
+ ?assertEqual({ok, {[], disc}}, rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_with_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/health/service/rabbitmq", Path),
+ ?assertEqual([passing], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {error, "testing"}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, {[], disc}}, rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_with_cluster_name_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/health/service/rabbitmq", Path),
+ ?assertEqual([passing], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {error, "testing"}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, {[], disc}}, rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_with_cluster_name_and_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/health/service/rabbitmq", Path),
+ ?assertEqual([passing, {tag, "qa"}], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {error, "testing"}
+ end),
+ os:putenv("CLUSTER_NAME", "qa"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, {[], disc}}, rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_return_value_basic_test(_Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {randomized_startup_delay_range, {0, 1}},
+ {peer_discovery_consul, [
+ {consul_host, "localhost"},
+ {consul_port, 8500}
+ ]}
+ ]),
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(_, _, _, _, _, _, _) ->
+ Body = "[{\"Node\": {\"Node\": \"rabbit2.internal.domain\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1.internal.domain\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]",
+ rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body))
+ end),
+ ?assertEqual({ok, {['rabbit@rabbit1', 'rabbit@rabbit2'], disc}},
+ rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_return_value_basic_long_node_name_test(_Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {randomized_startup_delay_range, {0, 1}},
+ {peer_discovery_consul, [
+ {consul_host, "localhost"},
+ {consul_port, 8500},
+ {consul_use_longname, true}
+ ]}
+ ]),
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(_, _, _, _, _, _, _) ->
+ Body = "[{\"Node\": {\"Node\": \"rabbit2\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit2\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]",
+ rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body))
+ end),
+ ?assertEqual({ok, {['rabbit@rabbit1.node.consul', 'rabbit@rabbit2.node.consul'], disc}},
+ rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_return_value_long_node_name_and_custom_domain_test(_Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {randomized_startup_delay_range, {0, 1}},
+ {peer_discovery_consul, [
+ {consul_host, "localhost"},
+ {consul_port, 8500},
+ {consul_use_longname, true},
+ {consul_domain, "internal"}
+ ]}
+ ]),
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(_, _, _, _, _, _, _) ->
+ Body = "[{\"Node\": {\"Node\": \"rabbit2\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit2\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"\", \"Port\": 5672, \"ID\": \"rabbitmq\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]",
+ rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body))
+ end),
+ ?assertEqual({ok, {['rabbit@rabbit1.node.internal', 'rabbit@rabbit2.node.internal'], disc}},
+ rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_return_value_srv_address_test(_Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {randomized_startup_delay_range, {0, 1}},
+ {peer_discovery_consul, [
+ {consul_host, "localhost"},
+ {consul_port, 8500}
+ ]}
+ ]),
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(_, _, _, _, _, _, _) ->
+ Body = "[{\"Node\": {\"Node\": \"rabbit2.internal.domain\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq:172.172.16.4.50\", \"Output\": \"\"}, {\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.16.4.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.16.4.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1.internal.domain\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.172.16.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.172.16.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]",
+ rabbit_json:try_decode(rabbit_data_coercion:to_binary(Body))
+ end),
+ ?assertEqual({ok, {['rabbit@172.16.4.51', 'rabbit@172.172.16.51'], disc}},
+ rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_return_value_nodes_in_warning_state_included_test(_Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {randomized_startup_delay_range, {0, 1}},
+ {peer_discovery_consul, [
+ {consul_host, "localhost"},
+ {consul_port, 8500}
+ ]}
+ ]),
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(_, _, _, _, [], _, _) ->
+ rabbit_json:try_decode(list_of_nodes_with_warnings());
+ (_, _, _, _, [passing], _, _) ->
+ rabbit_json:try_decode(list_of_nodes_without_warnings())
+ end),
+ os:putenv("CONSUL_INCLUDE_NODES_WITH_WARNINGS", "true"),
+ ?assertEqual({ok, {['rabbit@172.16.4.51'], disc}},
+ rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+list_nodes_return_value_nodes_in_warning_state_filtered_out_test(_Config) ->
+ application:set_env(rabbit, cluster_formation,
+ [
+ {peer_discovery_backend, rabbit_peer_discovery_consul},
+ {randomized_startup_delay_range, {0, 1}},
+ {peer_discovery_consul, [
+ {consul_host, "localhost"},
+ {consul_port, 8500}
+ ]}
+ ]),
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(_, _, _, _, [], _, _) ->
+ rabbit_json:try_decode(list_of_nodes_with_warnings());
+ (_, _, _, _, [passing], _, _) ->
+ rabbit_json:try_decode(list_of_nodes_without_warnings())
+ end),
+ os:putenv("CONSUL_INCLUDE_NODES_WITH_WARNINGS", "false"),
+ ?assertEqual({ok, {['rabbit@172.16.4.51', 'rabbit@172.172.16.51'], disc}},
+ rabbit_peer_discovery_consul:list_nodes()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+registration_with_all_default_values_test(_Config) ->
+ meck:expect(rabbit_log, debug, fun(_Message) -> ok end),
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ Expect = <<"{\"ID\":\"rabbitmq\",\"Name\":\"rabbitmq\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_log)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+registration_with_cluster_name_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ Expect = <<"{\"ID\":\"rabbitmq\",\"Name\":\"rabbitmq\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"},\"Tags\":[\"test-rabbit\"]}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CLUSTER_NAME", "test-rabbit"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+registration_without_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("https", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ Expect = <<"{\"ID\":\"rabbit:10.0.0.1\",\"Name\":\"rabbit\",\"Address\":\"10.0.0.1\",\"Port\":5671,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_SCHEME", "https"),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ os:putenv("CONSUL_SVC", "rabbit"),
+ os:putenv("CONSUL_SVC_ADDR", "10.0.0.1"),
+ os:putenv("CONSUL_SVC_PORT", "5671"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+registration_with_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("https", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ Expect = <<"{\"ID\":\"rabbit:10.0.0.1\",\"Name\":\"rabbit\",\"Address\":\"10.0.0.1\",\"Port\":5671,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_SCHEME", "https"),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ os:putenv("CONSUL_SVC", "rabbit"),
+ os:putenv("CONSUL_SVC_ADDR", "10.0.0.1"),
+ os:putenv("CONSUL_SVC_PORT", "5671"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+registration_with_auto_addr_test(_Config) ->
+ meck:new(rabbit_peer_discovery_util, [passthrough]),
+ meck:expect(rabbit_peer_discovery_util, node_hostname, fun(true) -> "bob.consul.node";
+ (false) -> "bob" end),
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ Expect = <<"{\"ID\":\"rabbitmq:bob\",\"Name\":\"rabbitmq\",\"Address\":\"bob\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ os:putenv("CONSUL_SVC_ADDR_AUTO", "true"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)),
+ ?assert(meck:validate(rabbit_peer_discovery_util)).
+
+registration_with_auto_addr_from_nodename_test(_Config) ->
+ meck:new(rabbit_peer_discovery_util, [passthrough]),
+ meck:expect(rabbit_peer_discovery_util, node_hostname, fun(true) -> "bob.consul.node";
+ (false) -> "bob" end),
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ Expect = <<"{\"ID\":\"rabbitmq:bob.consul.node\",\"Name\":\"rabbitmq\",\"Address\":\"bob.consul.node\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ os:putenv("CONSUL_SVC_ADDR_AUTO", "true"),
+ os:putenv("CONSUL_SVC_ADDR_NODENAME", "true"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)),
+ ?assert(meck:validate(rabbit_peer_discovery_util)).
+
+registration_with_auto_addr_nic_test(_Config) ->
+ meck:new(rabbit_peer_discovery_util, [passthrough]),
+ meck:expect(rabbit_peer_discovery_util, nic_ipv4,
+ fun(NIC) ->
+ ?assertEqual("en0", NIC),
+ {ok, "172.16.4.50"}
+ end),
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ Expect = <<"{\"ID\":\"rabbitmq:172.16.4.50\",\"Name\":\"rabbitmq\",\"Address\":\"172.16.4.50\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ os:putenv("CONSUL_SVC_ADDR_AUTO", "true"),
+ os:putenv("CONSUL_SVC_ADDR_NIC", "en0"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)),
+ ?assert(meck:validate(rabbit_peer_discovery_util)).
+
+registration_with_auto_addr_nic_issue_12_test(_Config) ->
+ meck:new(rabbit_peer_discovery_util, [passthrough]),
+ meck:expect(rabbit_peer_discovery_util, nic_ipv4,
+ fun(NIC) ->
+ ?assertEqual("en0", NIC),
+ {ok, "172.16.4.50"}
+ end),
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/register", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ Expect = <<"{\"ID\":\"rabbitmq:172.16.4.50\",\"Name\":\"rabbitmq\",\"Address\":\"172.16.4.50\",\"Port\":5672,\"Check\":{\"Notes\":\"RabbitMQ Consul-based peer discovery plugin TTL check\",\"TTL\":\"30s\",\"Status\":\"passing\"}}">>,
+ ?assertEqual(Expect, Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ os:putenv("CONSUL_SVC_ADDR_AUTO", "false"),
+ os:putenv("CONSUL_SVC_ADDR_NIC", "en0"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)),
+ ?assert(meck:validate(rabbit_peer_discovery_util)).
+
+registration_generic_error_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(_Scheme, _Host, _Port, _Path, _Args, _Headers, _Body) ->
+ {error, "testing"}
+ end),
+ ?assertEqual({error, "testing"}, rabbit_peer_discovery_consul:register()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+health_check_with_all_defaults_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/check/pass/service:rabbitmq", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:send_health_check_pass()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+health_check_without_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("https", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/agent/check/pass/service:rabbit", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_SCHEME", "https"),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ os:putenv("CONSUL_SVC", "rabbit"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:send_health_check_pass()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+health_check_with_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/check/pass/service:rabbitmq", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:send_health_check_pass()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+health_check_error_handling_test(_Config) ->
+ meck:expect(rabbit_log, error, fun(_Message, _Args) ->
+ ok
+ end),
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(_Scheme, _Host, _Port, _Path, _Args, _Headers, _HttpOpts) ->
+ {error, "testing"}
+ end),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:send_health_check_pass()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)),
+ ?assert(meck:validate(rabbit_log)).
+
+
+unregistration_with_all_defaults_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/deregister/rabbitmq", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:unregister()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+
+unregistration_without_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("https", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/agent/service/deregister/rabbit:10.0.0.1", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_SCHEME", "https"),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ os:putenv("CONSUL_SVC", "rabbit"),
+ os:putenv("CONSUL_SVC_ADDR", "10.0.0.1"),
+ os:putenv("CONSUL_SVC_PORT", "5671"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:unregister()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+unregistration_with_acl_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.service.consul", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/agent/service/deregister/rabbitmq", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.service.consul"),
+ os:putenv("CONSUL_PORT", "8500"),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:unregister()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+unregistration_with_generic_error_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(_Scheme, _Host, _Port, _Path, _Args, _Headers, _HttpOpts) ->
+ {error, "testing"}
+ end),
+ ?assertEqual({error, "testing"}, rabbit_peer_discovery_consul:unregister()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+startup_lock_path_default_value_test(_Config) ->
+ Expectation = "rabbitmq/default/startup_lock",
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:startup_lock_path()).
+
+startup_lock_path_with_prefix_test(_Config) ->
+ Expectation = "myprefix/default/startup_lock",
+ os:putenv("CONSUL_LOCK_PREFIX", "myprefix"),
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:startup_lock_path()).
+
+startup_lock_path_with_cluster_name_test(_Config) ->
+ os:putenv("CLUSTER_NAME", "mycluster"),
+ Expectation = "rabbitmq/mycluster/startup_lock",
+ ?assertEqual(Expectation, rabbit_peer_discovery_consul:startup_lock_path()).
+
+create_session_without_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/session/create", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ Expect = <<"{\"Name\":\"node-name\",\"TTL\":\"30s\"}">>,
+ ?assertEqual(Expect, Body),
+ {ok, #{<<"ID">> => <<"session-id">>}}
+ end),
+ ?assertEqual({ok, "session-id"}, rabbit_peer_discovery_consul:create_session('node-name', 30)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+create_session_with_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/session/create", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ Expect = <<"{\"Name\":\"node-name\",\"TTL\":\"30s\"}">>,
+ ?assertEqual(Expect, Body),
+ {ok, #{<<"ID">> => <<"session-id">>}}
+ end),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, "session-id"}, rabbit_peer_discovery_consul:create_session('node-name', 30)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+get_lock_status_without_session_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, [#{<<"LockIndex">> => 3,
+ <<"Key">> => <<"rabbitmq/default/startup_lock">>,
+ <<"Flags">> => 0,
+ <<"Value">> => <<"W3t9XQ==">>,
+ <<"Session">> => <<"session-id">>,
+ <<"CreateIndex">> => 8,
+ <<"ModifyIndex">> => 21}]}
+ end),
+ ?assertEqual({ok, {true, 21}}, rabbit_peer_discovery_consul:get_lock_status()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+get_lock_status_with_session_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, [#{<<"LockIndex">> => 3,
+ <<"Key">> => <<"rabbitmq/default/startup_lock">>,
+ <<"Flags">> => 0,
+ <<"Value">> => <<"W3t9XQ==">>,
+ <<"CreateIndex">> => 8,
+ <<"ModifyIndex">> => 21}]}
+ end),
+ ?assertEqual({ok, {false, 21}}, rabbit_peer_discovery_consul:get_lock_status()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+get_lock_status_with_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, [#{<<"LockIndex">> => 3,
+ <<"Key">> => <<"rabbitmq/default/startup_lock">>,
+ <<"Flags">> => 0,
+ <<"Value">> => <<"W3t9XQ==">>,
+ <<"CreateIndex">> => 8,
+ <<"ModifyIndex">> => 21}]}
+ end),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, {false, 21}}, rabbit_peer_discovery_consul:get_lock_status()),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+wait_for_lock_release_with_session_without_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{index, 42}, {wait, "300s"}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:wait_for_lock_release(true, 42, 300)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+wait_for_lock_release_with_session_with_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{index, 42}, {wait, "300s"}], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual(ok, rabbit_peer_discovery_consul:wait_for_lock_release(true, 42, 300)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+wait_for_lock_release_without_session_test(_Config) ->
+ ?assertEqual(ok, rabbit_peer_discovery_consul:wait_for_lock_release(false, 0, 0)).
+
+acquire_lock_successfully_acquired_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, true}
+ end),
+ ?assertEqual({ok, true}, rabbit_peer_discovery_consul:acquire_lock(session_id)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+acquire_lock_not_acquired_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, false}
+ end),
+ ?assertEqual({ok, false}, rabbit_peer_discovery_consul:acquire_lock(session_id)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+acquire_lock_with_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], Body),
+ {ok, true}
+ end),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, true}, rabbit_peer_discovery_consul:acquire_lock(session_id)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+release_lock_successfully_released_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{release, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, true}
+ end),
+ ?assertEqual({ok, true}, rabbit_peer_discovery_consul:release_lock(session_id)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+release_lock_not_released_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{release, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, false}
+ end),
+ ?assertEqual({ok, false}, rabbit_peer_discovery_consul:release_lock(session_id)),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+release_lock_with_token_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/rabbitmq/default/startup_lock", Path),
+ ?assertEqual([{release, session_id}], Args),
+ ?assertEqual([{"X-Consul-Token", "token-value"}], Headers),
+ ?assertEqual([], Body),
+ {ok, true}
+ end),
+ os:putenv("CONSUL_ACL_TOKEN", "token-value"),
+ ?assertEqual({ok, true}, rabbit_peer_discovery_consul:release_lock(session_id)).
+
+consul_kv_read_default_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/path/to/key", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_kv_read("path/to/key", [{acquire, session_id}], rabbit_peer_discovery_consul:maybe_add_acl([]))),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_kv_read_custom_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, get,
+ fun(Scheme, Host, Port, Path, Args, Headers, HttpOpts) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.node.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/kv/path/to/key", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], HttpOpts),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.node.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_kv_read("path/to/key", [{acquire, session_id}], rabbit_peer_discovery_consul:maybe_add_acl([]))),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_kv_write_default_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/kv/path/to/key", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, []}
+ end),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_kv_write("path/to/key", [{acquire, session_id}], rabbit_peer_discovery_consul:maybe_add_acl([]), [])),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_kv_write_custom_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.node.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/kv/path/to/key", Path),
+ ?assertEqual([{acquire, session_id}], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.node.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_kv_write("path/to/key", [{acquire, session_id}], rabbit_peer_discovery_consul:maybe_add_acl([]), [])),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_session_create_default_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/session/create", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, []}
+ end),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_session_create([], rabbit_peer_discovery_consul:maybe_add_acl([]), [])),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_session_create_custom_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.node.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/session/create", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.node.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_session_create([], rabbit_peer_discovery_consul:maybe_add_acl([]), [])),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_session_renew_default_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("localhost", Host),
+ ?assertEqual(8500, Port),
+ ?assertEqual("v1/session/renew/session_id", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, []}
+ end),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_session_renew("session_id", [], rabbit_peer_discovery_consul:maybe_add_acl([]))),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+consul_session_renew_custom_values_test(_Config) ->
+ meck:expect(rabbit_peer_discovery_httpc, put,
+ fun(Scheme, Host, Port, Path, Args, Headers, Body) ->
+ ?assertEqual("http", Scheme),
+ ?assertEqual("consul.node.consul", Host),
+ ?assertEqual(8501, Port),
+ ?assertEqual("v1/session/renew/session_id", Path),
+ ?assertEqual([], Args),
+ ?assertEqual([], Headers),
+ ?assertEqual([], Body),
+ {ok, []}
+ end),
+ os:putenv("CONSUL_HOST", "consul.node.consul"),
+ os:putenv("CONSUL_PORT", "8501"),
+ ?assertEqual({ok, []}, rabbit_peer_discovery_consul:consul_session_renew("session_id", [], rabbit_peer_discovery_consul:maybe_add_acl([]))),
+ ?assert(meck:validate(rabbit_peer_discovery_httpc)).
+
+%%%
+%%% Implementation
+%%%
+
+list_of_nodes_with_warnings() ->
+ <<"[{\"Node\": {\"Node\": \"rabbit2.internal.domain\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"warning\", \"ServiceID\": \"rabbitmq:172.172.16.4.50\", \"Output\": \"\"}, {\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.16.4.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.16.4.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1.internal.domain\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"critical\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.172.16.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.172.16.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]">>.
+
+list_of_nodes_without_warnings() ->
+ <<"[{\"Node\": {\"Node\": \"rabbit2.internal.domain\", \"Address\": \"10.20.16.160\"}, \"Checks\": [{\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq:172.172.16.4.50\", \"Output\": \"\"}, {\"Node\": \"rabbit2.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.16.4.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.16.4.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}, {\"Node\": {\"Node\": \"rabbit1.internal.domain\", \"Address\": \"10.20.16.159\"}, \"Checks\": [{\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"service:rabbitmq\", \"Name\": \"Service \'rabbitmq\' check\", \"ServiceName\": \"rabbitmq\", \"Notes\": \"Connect to the port internally every 30 seconds\", \"Status\": \"passing\", \"ServiceID\": \"rabbitmq\", \"Output\": \"\"}, {\"Node\": \"rabbit1.internal.domain\", \"CheckID\": \"serfHealth\", \"Name\": \"Serf Health Status\", \"ServiceName\": \"\", \"Notes\": \"\", \"Status\": \"passing\", \"ServiceID\": \"\", \"Output\": \"Agent alive and reachable\"}], \"Service\": {\"Address\": \"172.172.16.51\", \"Port\": 5672, \"ID\": \"rabbitmq:172.172.16.51\", \"Service\": \"rabbitmq\", \"Tags\": [\"amqp\"]}}]">>.
diff --git a/deps/rabbitmq_peer_discovery_etcd/.gitignore b/deps/rabbitmq_peer_discovery_etcd/.gitignore
new file mode 100644
index 0000000000..018293a553
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/.gitignore
@@ -0,0 +1,26 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+tags
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/config_schema_SUITE_data/schema/
+/test/ct.cover.spec
+/xrefr
+
+/rabbitmq_peer_discovery_etcd.d
diff --git a/deps/rabbitmq_peer_discovery_etcd/.travis.yml b/deps/rabbitmq_peer_discovery_etcd/.travis.yml
new file mode 100644
index 0000000000..eff9ee34b6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/.travis.yml
@@ -0,0 +1,62 @@
+# vim:sw=2:et:
+
+os: linux
+dist: bionic
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+ - daemonize
+cache:
+ apt: true
+env:
+ global:
+ - secure: AZevrggrLdOzDo8TeLHTLFyCp7En4ksZRy6gBqxb31/KmeDXooLsSfuvr0FhVmLRzCWFIdcZQk2j9ZHqvCz6OFfw4ZtOUoZmjwMP9FpiLcBQprWeiQhBXrhdSPe3ZY/oRFsMXaVIyc9+Z0dR32zgoYHJcn3KR6vjmzPaigzygStTc/Om6Xoal4AQghtOAJJ27OsayBQJyldknL6tVkFzB9oCOYBpu5HoLPSf+jzG/bF1BIHCre2mU7FASFAyNAQ+BlnyBBv5CY8hKTPTNL9FApErg0gUmmjZPRepZOJjAzFVBK2qOIAcSieTvAYzP2GkedZppyYuJLJnSuala+tSYKaQ/o9XZlIdFkoAX7ta3qk55JHCIzhSf9IE5SOfIS9DTaIWiQKSO6bvONMnqpy+ttQuCVeaFqROzxav6LcD0tJnHAP0to1pYKtJ0imxDxjFrGg3yrV2KkEAAAfc1Suo+B9vNzmarj580rspC5yZXmiIPE3tB42XvfIaeuJ2Ag3KCR3UyLIctl/HFBz1xTEYCX5mcWN5OQ/7YSmmL8xGc88hDg3BO8TJiHdxpi+lRxWztwzG6PTZrcn4G2aaEBjBgLP5Vq65MX2TEG5PFsgraecaLYszuZSNtHFEAZ+ITGjhuOfgn4IpjrasxFwD8vlwxxaoHNHgrdYLsiSn73WoIA0=
+ - secure: Qqz6IymWGWwcne62cer87hsGD84ioz0PhG4hLjBObP+xfqVVHXMOENKUPFDEtb6+e/DHh3QyHXd88EyIRAgaiCJyzIed85rxsyUcnlc0qyeW+4G1shzG4V9H/d+/0hUUc7PFgquwcQ7YUWH3y7GZt+GBIthYc1nV8uiWOEEAu04OXCFxfuVZ9TPMrdKjlgYkjwCwfZ76tHxOh8gXWHmPFog5mWOwbwS/rQwCCpyCc0J6PK3EuMtL98b5GDFjSY4wQxvLCYt1Cv6kwCTZNoNr7ke4g+LmqksCZ9fCmdM99B6T0+iAGA+0OlKZRLUIv1ANoktWHiB/XYseyqfC+rfcb0uaS0+b8Yg52Ojej7qMvlNowrT6gmxblTC02CzBkdiJlM4QqThiHbUkdSR3tDSd2W/2IgABo8EQ8p74otrnUsAcAJIGtHAOoiUmzXTozATp0Jpp7z4vmAWkh5wqLFhR0CdzGv3jFZrgxVlL596MjmpNS14NhrQpU1YU319rnnq0qflK5Fv+U3KQBIOuFWXABlYEr6zxd7zNmHOXpV5PGPMnilY9j7uLP9BLrR30GdKeucN0r1xUoYXqjCx0msfZwW1bhC0WXiC3raBRWuRelG4wAAWSrckBqBwHQ8rsY0FWxvbw4jrf6yJf4zyQxmIdCUe+fyEfAmZUrYgdTVwd9nk=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_peer_discovery_etcd/.travis.yml.patch b/deps/rabbitmq_peer_discovery_etcd/.travis.yml.patch
new file mode 100644
index 0000000000..e8fed3532e
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/.travis.yml.patch
@@ -0,0 +1,19 @@
+--- .travis.yml
++++ .travis.yml
+@@ -1,7 +1,7 @@
+ # vim:sw=2:et:
+
+ os: linux
+-dist: xenial
++dist: bionic
+ language: elixir
+ notifications:
+ email:
+@@ -13,6 +13,7 @@ addons:
+ apt:
+ packages:
+ - awscli
++ - daemonize
+ cache:
+ apt: true
+ env:
diff --git a/deps/rabbitmq_peer_discovery_etcd/CODE_OF_CONDUCT.md b/deps/rabbitmq_peer_discovery_etcd/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_peer_discovery_etcd/CONTRIBUTING.md b/deps/rabbitmq_peer_discovery_etcd/CONTRIBUTING.md
new file mode 100644
index 0000000000..5933280316
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/CONTRIBUTING.md
@@ -0,0 +1,118 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+See [RUNNING_TESTS.md](./RUNNING_TESTS.md) to learn how the test suites provision an `etcd`
+node and what the prerequisites are.
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `system` suite:
+
+ make ct-system
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_peer_discovery_etcd/LICENSE b/deps/rabbitmq_peer_discovery_etcd/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_peer_discovery_etcd/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_peer_discovery_etcd/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_peer_discovery_etcd/Makefile b/deps/rabbitmq_peer_discovery_etcd/Makefile
new file mode 100644
index 0000000000..5a1023c2bf
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_peer_discovery_etcd
+PROJECT_DESCRIPTION = etcd-based RabbitMQ peer discovery backend
+PROJECT_MOD = rabbitmq_peer_discovery_etcd_app
+
+DEPS = rabbit_common rabbitmq_peer_discovery_common rabbit eetcd gun
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers ct_helper meck
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+dep_gun = hex 1.3.3
+dep_eetcd = hex 0.3.3
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_peer_discovery_etcd/README.md b/deps/rabbitmq_peer_discovery_etcd/README.md
new file mode 100644
index 0000000000..4183557bdd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/README.md
@@ -0,0 +1,57 @@
+# RabbitMQ Peer Discovery Etcd
+
+This is an etcd-based implementation of RabbitMQ [peer discovery interface](http://www.rabbitmq.com/blog/2018/02/12/peer-discovery-subsystem-in-rabbitmq-3-7/)
+(new in 3.7.0, previously available in the [rabbitmq-autocluster plugin](https://github.com/rabbitmq/rabbitmq-autocluster)
+by Gavin Roy).
+
+This plugin only performs peer discovery and most basic node health monitoring
+using [etcd](https://coreos.com/etcd/) as a data source.
+Please get familiar with [RabbitMQ clustering fundamentals](https://rabbitmq.com/clustering.html) before attempting
+to use it.
+
+While it may seem at times that this is a RabbitMQ cluster management solution,
+it is not. Cluster provisioning and most of Day 2 operations such as [proper monitoring](https://rabbitmq.com/monitoring.html)
+are not in scope for this plugin.
+
+
+## Supported RabbitMQ Versions
+
+This plugin requires RabbitMQ 3.7.0 or later.
+
+
+## Supported etcd Versions
+
+The plugin supports etcd 3.4 or later and uses the current stable v3 gRPC API.
+
+## Installation
+
+This plugin ships with [supported RabbitMQ versions](https://www.rabbitmq.com/versions.html).
+There is no need to install it separately.
+
+As with any [plugin](https://rabbitmq.com/plugins.html), it must be enabled before it
+can be used. For peer discovery plugins it means they must be [enabled](https://rabbitmq.com//plugins.html#basics) or [preconfigured](https://rabbitmq.com//plugins.html#enabled-plugins-file)
+before first node boot:
+
+```
+rabbitmq-plugins --offline enable rabbitmq_peer_discovery_etcd
+```
+
+
+## Documentation
+
+See [RabbitMQ Cluster Formation guide](https://www.rabbitmq.com/cluster-formation.html).
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](http://www.rabbitmq.com/github.html).
+
+
+## License
+
+[Licensed under the MPL](LICENSE-MPL-RabbitMQ), same as RabbitMQ server.
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_peer_discovery_etcd/RUNNING_TESTS.md b/deps/rabbitmq_peer_discovery_etcd/RUNNING_TESTS.md
new file mode 100644
index 0000000000..9aafe85ca7
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/RUNNING_TESTS.md
@@ -0,0 +1,33 @@
+## Running Common Test Suites
+
+```shell
+gmake tests
+```
+
+When running tests via `gmake tests`, there is no need to run the
+`init-etcd.sh` script as the test suite will do it for you.
+
+
+## etcd Node Management
+
+The test suite of this plugin ships with a [script](./test/system_SUITE_data/init-etcd.sh)
+that starts an `etcd` node in the background.
+
+This script can also be used to start a node for experimenting with this
+plugin. To run it:
+
+```shell
+./test/system_SUITE_data/init-etcd.sh [etcd data dir] [etcd client port]
+```
+
+where `[etcd data dir]` is the desired `etcd` node data directory path.
+
+The script depends on the [`daemonize`
+tool](https://software.clapper.org/daemonize/) for running the process in the
+background and pid file creation.
+
+To stop a node started this way use the pid file created by `daemonize`:
+
+```shell
+pkill -INT $(cat [etcd data dir]/etcd.pid)
+```
diff --git a/deps/rabbitmq_peer_discovery_etcd/erlang.mk b/deps/rabbitmq_peer_discovery_etcd/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl b/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl
new file mode 100644
index 0000000000..a4764f3e80
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/include/rabbit_peer_discovery_etcd.hrl
@@ -0,0 +1,26 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(BACKEND_CONFIG_KEY, peer_discovery_etcd).
+
+-record(statem_data, {
+ endpoints,
+ tls_options,
+ connection_name,
+ connection_pid,
+ connection_monitor,
+ key_prefix,
+ cluster_name,
+ node_key_lease_id,
+ node_key_ttl_in_seconds,
+ %% the pid of the process returned by eetcd_lease:keep_alive/2
+ %% which refreshes this node's key lease
+ node_lease_keepalive_pid,
+ lock_ttl_in_seconds,
+ username,
+ obfuscated_password
+}).
diff --git a/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema b/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema
new file mode 100644
index 0000000000..3a2c3f9399
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/priv/schema/rabbitmq_peer_discovery_etcd.schema
@@ -0,0 +1,279 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Endpoints
+
+{mapping, "cluster_formation.etcd.endpoints", "rabbit.cluster_formation.peer_discovery_etcd.endpoints", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "cluster_formation.etcd.endpoints.$index", "rabbit.cluster_formation.peer_discovery_etcd.endpoints", [
+ {datatype, [string, ip]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.endpoints",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.endpoints", Conf, undefined) of
+ none -> [];
+ _ ->
+ Endpoints = cuttlefish_variable:filter_by_prefix("cluster_formation.etcd.endpoints", Conf),
+ [V || {_, V} <- Endpoints]
+ end
+end}.
+
+%% Legacy: etcd host
+
+{mapping, "cluster_formation.etcd.host", "rabbit.cluster_formation.peer_discovery_etcd.etcd_host", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_host",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.host", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Legacy: etcd port
+
+{mapping, "cluster_formation.etcd.port", "rabbit.cluster_formation.peer_discovery_etcd.etcd_port", [
+ {datatype, integer},
+ {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_port",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.port", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Legacy: etcd scheme. The key remains for backwards compatibility, it will not be used.
+
+{mapping, "cluster_formation.etcd.scheme", "rabbit.cluster_formation.peer_discovery_etcd.etcd_scheme", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_scheme",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.scheme", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% key prefix appended after /rabbitmq/{discovery,locks} (a mandatory prefix we enforce as of #22)
+
+{mapping, "cluster_formation.etcd.key_prefix", "rabbit.cluster_formation.peer_discovery_etcd.etcd_prefix", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_prefix",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.key_prefix", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% cluster name
+
+{mapping, "cluster_formation.etcd.cluster_name", "rabbit.cluster_formation.peer_discovery_etcd.cluster_name", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.cluster_name",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.cluster_name", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% node key ttl
+
+{mapping, "cluster_formation.etcd.node_ttl", "rabbit.cluster_formation.peer_discovery_etcd.etcd_node_ttl", [
+ {datatype, integer},
+ {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_node_ttl",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.node_ttl", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% lock acquisition timeout
+
+{mapping, "cluster_formation.etcd.lock_wait_time", "rabbit.cluster_formation.peer_discovery_etcd.lock_wait_time", [
+ {datatype, integer},
+ {validators, ["non_negative_integer"]}
+]}.
+
+{mapping, "cluster_formation.etcd.lock_timeout", "rabbit.cluster_formation.peer_discovery_etcd.lock_wait_time", [
+ {datatype, integer},
+ {validators, ["non_negative_integer"]}
+]}.
+
+%% an alias for lock acquisition timeout to be consistent with the etcd backend
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.lock_wait_time",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.lock_timeout", Conf, undefined) of
+ undefined ->
+ case cuttlefish:conf_get("cluster_formation.etcd.lock_wait_time", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end;
+ Value -> Value
+ end
+end}.
+
+%% authentication
+
+{mapping, "cluster_formation.etcd.username", "rabbit.cluster_formation.peer_discovery_etcd.etcd_username", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_username",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.username", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+{mapping, "cluster_formation.etcd.password", "rabbit.cluster_formation.peer_discovery_etcd.etcd_password", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.etcd_password",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.password", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%%
+%% TLS client options
+%%
+
+{mapping, "cluster_formation.etcd.ssl_options", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.etcd.ssl_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid cluster_formation.etcd.ssl_options")
+ end
+end}.
+
+{mapping, "cluster_formation.etcd.ssl_options.verify", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.fail_if_no_peer_cert", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.cacertfile", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.certfile", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.cacerts.$name", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("cluster_formation.etcd.ssl_options.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "cluster_formation.etcd.ssl_options.cert", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("cluster_formation.etcd.ssl_options.cert", Conf))
+end}.
+
+{mapping, "cluster_formation.etcd.ssl_options.crl_check", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.depth", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.dh", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("cluster_formation.etcd.ssl_options.dh", Conf))
+end}.
+
+{mapping, "cluster_formation.etcd.ssl_options.dhfile", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.key.RSAPrivateKey", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.key.DSAPrivateKey", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.key.PrivateKeyInfo", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.key",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("cluster_formation.etcd.ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "cluster_formation.etcd.ssl_options.keyfile", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.log_alert", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.password", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.password",
+ [{datatype, string}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.psk_identity", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.reuse_sessions", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.secure_renegotiate", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "cluster_formation.etcd.ssl_options.versions.$version", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("cluster_formation.etcd.ssl_options.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "cluster_formation.etcd.ssl_options.ciphers.$cipher", "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_etcd.ssl_options.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("cluster_formation.etcd.ssl_options.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
diff --git a/deps/rabbitmq_peer_discovery_etcd/rabbitmq-components.mk b/deps/rabbitmq_peer_discovery_etcd/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl
new file mode 100644
index 0000000000..0aec413edd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbit_peer_discovery_etcd.erl
@@ -0,0 +1,108 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_etcd).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl").
+-include("rabbit_peer_discovery_etcd.hrl").
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+
+-define(ETCD_CLIENT, rabbitmq_peer_discovery_etcd_v3_client).
+
+%%
+%% API
+%%
+
+init() ->
+ %% We cannot start this plugin yet since it depends on the rabbit app,
+ %% which is in the process of being started by the time this function is called
+ application:load(rabbitmq_peer_discovery_common),
+ application:load(rabbitmq_peer_discovery_etcd),
+
+ %% Here we start the client very early on, before plugins have initialized.
+ %% We need to do it conditionally, however.
+ NoOp = fun() -> ok end,
+ Run = fun(_) ->
+ rabbit_log:debug("Peer discovery etcd: initialising..."),
+ application:ensure_all_started(eetcd),
+ Formation = application:get_env(rabbit, cluster_formation, []),
+ Opts = maps:from_list(proplists:get_value(peer_discovery_etcd, Formation, [])),
+ {ok, Pid} = rabbitmq_peer_discovery_etcd_v3_client:start_link(Opts),
+ %% unlink so that this supervisor's lifecycle does not affect RabbitMQ core
+ unlink(Pid),
+ rabbit_log:debug("etcd peer discovery: v3 client pid: ~p", [whereis(rabbitmq_peer_discovery_etcd_v3_client)])
+ end,
+ rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, NoOp, NoOp, Run),
+
+ ok.
+
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}.
+
+list_nodes() ->
+ Fun0 = fun() -> {ok, {[], disc}} end,
+ Fun1 = fun() ->
+ rabbit_log:warning("Peer discovery backend is set to ~s "
+ "but final config does not contain "
+ "rabbit.cluster_formation.peer_discovery_etcd. "
+ "Cannot discover any nodes because etcd cluster details are not configured!",
+ [?MODULE]),
+ {ok, {[], disc}}
+ end,
+ Fun2 = fun(_Proplist) ->
+ %% error logging will be done by the client
+ Nodes = rabbitmq_peer_discovery_etcd_v3_client:list_nodes(),
+ {ok, {Nodes, disc}}
+ end,
+ rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, Fun0, Fun1, Fun2).
+
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ true.
+
+
+-spec register() -> ok | {error, string()}.
+
+register() ->
+ Result = ?ETCD_CLIENT:register(),
+ rabbit_log:info("Registered node with etcd"),
+ Result.
+
+
+-spec unregister() -> ok | {error, string()}.
+unregister() ->
+ %% This backend unregisters on plugin (etcd v3 client) deactivation
+ %% because by the time unregistration happens, the plugin and thus the client
+ %% it provides are already gone. MK.
+ ok.
+
+-spec post_registration() -> ok | {error, Reason :: string()}.
+
+post_registration() ->
+ ok.
+
+-spec lock(Node :: atom()) -> {ok, Data :: term()} | {error, Reason :: string()}.
+
+lock(Node) when is_atom(Node) ->
+ case rabbitmq_peer_discovery_etcd_v3_client:lock(Node) of
+ {ok, GeneratedKey} -> {ok, GeneratedKey};
+ {error, _} = Error -> Error
+ end.
+
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(GeneratedKey) ->
+ rabbitmq_peer_discovery_etcd_v3_client:unlock(GeneratedKey).
diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl
new file mode 100644
index 0000000000..d9778b0afd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_etcd).
+-behaviour(rabbit_peer_discovery_backend).
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1]).
+
+-define(DELEGATE, rabbit_peer_discovery_etcd).
+
+%%
+%% API
+%%
+
+init() ->
+ ?DELEGATE:init().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+list_nodes() ->
+ ?DELEGATE:list_nodes().
+
+-spec supports_registration() -> boolean().
+supports_registration() ->
+ ?DELEGATE:supports_registration().
+
+
+-spec register() -> ok.
+register() ->
+ ?DELEGATE:register().
+
+-spec unregister() -> ok.
+unregister() ->
+ ?DELEGATE:unregister().
+
+-spec post_registration() -> ok | {error, Reason :: string()}.
+post_registration() ->
+ ?DELEGATE:post_registration().
+
+-spec lock(Node :: atom()) -> not_supported.
+lock(Node) ->
+ ?DELEGATE:lock(Node).
+
+-spec unlock(Data :: term()) -> ok.
+unlock(Data) ->
+ ?DELEGATE:unlock(Data).
diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl
new file mode 100644
index 0000000000..e43a820385
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_app.erl
@@ -0,0 +1,29 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_etcd_app).
+
+%%
+%% API
+%%
+
+-behaviour(application).
+-export([start/2, stop/1, prep_stop/1]).
+
+start(_Type, _StartArgs) ->
+ %% The tree had been started earlier, see rabbit_peer_discovery_etcd:init/0. MK.
+ rabbitmq_peer_discovery_etcd_sup:start_link().
+
+prep_stop(_State) ->
+ try
+ rabbitmq_peer_discovery_etcd_v3_client:unregister()
+ catch
+ _:_ -> ok
+ end.
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl
new file mode 100644
index 0000000000..6d220f8092
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_sup.erl
@@ -0,0 +1,50 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_etcd_sup).
+
+-behaviour(supervisor).
+
+-export([init/1, start_link/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_peer_discovery_etcd.hrl").
+
+%%
+%% API
+%%
+
+init([]) ->
+ Flags = #{strategy => one_for_one, intensity => 10, period => 1},
+ Fun0 = fun() -> {ok, {Flags, []}} end,
+ Fun1 = fun() -> {ok, {Flags, []}} end,
+ Fun2 = fun(_) ->
+ %% we stop the previously started client and "re-attach" it. MK.
+ rabbitmq_peer_discovery_etcd_v3_client:stop(),
+ Formation = application:get_env(rabbit, cluster_formation, []),
+ Opts = maps:from_list(proplists:get_value(peer_discovery_etcd, Formation, [])),
+ EtcdClientFSM = #{
+ id => rabbitmq_peer_discovery_etcd_v3_client,
+ start => {rabbitmq_peer_discovery_etcd_v3_client, start_link, [Opts]},
+ restart => permanent,
+ shutdown => ?SUPERVISOR_WAIT,
+ type => worker,
+ modules => [rabbitmq_peer_discovery_etcd_v3_client]
+ },
+ Specs = [
+ EtcdClientFSM
+ ],
+ {ok, {Flags, Specs}}
+ end,
+ rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, Fun0, Fun1, Fun2).
+
+start_link() ->
+ case supervisor:start_link({local, ?MODULE}, ?MODULE, []) of
+ {ok, Pid} -> {ok, Pid};
+ {error, {already_started, Pid}} -> {ok, Pid};
+ {error, _} = Err -> Err
+ end.
diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl
new file mode 100644
index 0000000000..864767c25e
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl
@@ -0,0 +1,441 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_etcd_v3_client).
+
+%% API
+-export([]).
+
+
+-behaviour(gen_statem).
+
+-export([start_link/1, start/1, stop/0]).
+-export([init/1, callback_mode/0, terminate/3]).
+-export([register/1, register/0, unregister/1, unregister/0, list_nodes/0, list_nodes/1]).
+-export([lock/0, lock/1, lock/2, unlock/0, unlock/1, unlock/2]).
+-export([recover/3, connected/3, disconnected/3]).
+
+%% for tests
+-export([extract_node/1, filter_node/1, registration_value/1, node_key_base/1, node_key/1, lock_key_base/1]).
+
+-import(rabbit_data_coercion, [to_binary/1, to_list/1]).
+
+-compile(nowarn_unused_function).
+
+-include("rabbit_peer_discovery_etcd.hrl").
+
+%%
+%% API
+%%
+
+-define(ETCD_CONN_NAME, ?MODULE).
+%% 60s by default matches the default heartbeat timeout.
+%% We add 1s for state machine bookkeeping and
+-define(DEFAULT_NODE_KEY_LEASE_TTL, 61).
+%% don't allow node lease key TTL to be lower than this
+%% as overly low values can cause annoying timeouts in etcd client operations
+-define(MINIMUM_NODE_KEY_LEASE_TTL, 15).
+%% default randomized delay range is 5s to 60s, so this value
+%% produces a comparable delay
+-define(DEFAULT_LOCK_WAIT_TTL, 70).
+%% don't allow lock lease TTL to be lower than this
+%% as overly low values can cause annoying timeouts in etcd client operations
+-define(MINIMUM_LOCK_WAIT_TTL, 30).
+
+-define(CALL_TIMEOUT, 15000).
+
+start(Conf) ->
+ gen_statem:start({local, ?MODULE}, ?MODULE, Conf, []).
+
+start_link(Conf) ->
+ gen_statem:start_link({local, ?MODULE}, ?MODULE, Conf, []).
+
+stop() ->
+ gen_statem:stop(?MODULE).
+
+init(Args) ->
+ ok = application:ensure_started(eetcd),
+ Settings = normalize_settings(Args),
+ Endpoints = maps:get(endpoints, Settings),
+ Username = maps:get(etcd_username, Settings, undefined),
+ Password = maps:get(etcd_password, Settings, undefined),
+ TLSOpts = maps:get(ssl_options, Settings, []),
+ Actions = [{next_event, internal, start}],
+ {ok, recover, #statem_data{
+ endpoints = Endpoints,
+ tls_options = TLSOpts,
+ username = Username,
+ obfuscated_password = obfuscate(Password),
+ key_prefix = maps:get(etcd_prefix, Settings, <<"rabbitmq">>),
+ node_key_ttl_in_seconds = erlang:max(
+ ?MINIMUM_NODE_KEY_LEASE_TTL,
+ maps:get(etcd_node_ttl, Settings, ?DEFAULT_NODE_KEY_LEASE_TTL)
+ ),
+ cluster_name = maps:get(cluster_name, Settings, <<"default">>),
+ lock_ttl_in_seconds = erlang:max(
+ ?MINIMUM_LOCK_WAIT_TTL,
+ maps:get(lock_wait_time, Settings, ?DEFAULT_LOCK_WAIT_TTL)
+ )
+ }, Actions}.
+
+callback_mode() -> [state_functions, state_enter].
+
+terminate(Reason, State, Data) ->
+ rabbit_log:debug("etcd v3 API client will terminate in state ~p, reason: ~p",
+ [State, Reason]),
+ disconnect(?ETCD_CONN_NAME, Data),
+ rabbit_log:debug("etcd v3 API client has disconnected"),
+ rabbit_log:debug("etcd v3 API client: total number of connections to etcd is ~p", [length(eetcd_conn_sup:info())]),
+ ok.
+
+register() ->
+ register(?MODULE).
+
+register(ServerRef) ->
+ gen_statem:call(ServerRef, register, ?CALL_TIMEOUT).
+
+unregister() ->
+ ?MODULE:unregister(?MODULE).
+
+unregister(ServerRef) ->
+ gen_statem:call(ServerRef, unregister, ?CALL_TIMEOUT).
+
+list_nodes() ->
+ list_nodes(?MODULE).
+
+list_nodes(ServerRef) ->
+ gen_statem:call(ServerRef, list_keys, ?CALL_TIMEOUT).
+
+lock() ->
+ lock(?MODULE, node()).
+
+lock(Node) ->
+ lock(?MODULE, Node).
+
+lock(ServerRef, Node) ->
+ gen_statem:call(ServerRef, {lock, Node}, ?CALL_TIMEOUT).
+
+unlock() ->
+ unlock(?MODULE, node()).
+
+unlock(LockKey) ->
+ unlock(?MODULE, LockKey).
+
+unlock(ServerRef, LockKey) ->
+ gen_statem:call(ServerRef, {unlock, LockKey}, ?CALL_TIMEOUT).
+
+%%
+%% States
+%%
+
+recover(enter, _PrevState, #statem_data{endpoints = Endpoints}) ->
+ rabbit_log:debug("etcd v3 API client has entered recovery state, endpoints: ~s",
+ [string:join(Endpoints, ",")]),
+ keep_state_and_data;
+recover(internal, start, Data = #statem_data{endpoints = Endpoints, connection_monitor = Ref}) ->
+ rabbit_log:debug("etcd v3 API client will attempt to connect, endpoints: ~s",
+ [string:join(Endpoints, ",")]),
+ maybe_demonitor(Ref),
+ {Transport, TransportOpts} = pick_transport(Data),
+ case Transport of
+ tcp -> rabbit_log:info("etcd v3 API client is configured to connect over plain TCP, without using TLS");
+ tls -> rabbit_log:info("etcd v3 API client is configured to use TLS")
+ end,
+ ConnName = ?ETCD_CONN_NAME,
+ case connect(ConnName, Endpoints, Transport, TransportOpts, Data) of
+ {ok, Pid} ->
+ rabbit_log:debug("etcd v3 API client connection: ~p", [Pid]),
+ rabbit_log:debug("etcd v3 API client: total number of connections to etcd is ~p", [length(eetcd_conn_sup:info())]),
+ {next_state, connected, Data#statem_data{
+ connection_name = ConnName,
+ connection_pid = Pid,
+ connection_monitor = monitor(process, Pid)
+ }};
+ {error, Errors} ->
+ [rabbit_log:error("etcd peer discovery: failed to connect to endpoint ~p: ~p", [Endpoint, Err]) || {Endpoint, Err} <- Errors],
+ ensure_disconnected(?ETCD_CONN_NAME, Data),
+ Actions = [{state_timeout, reconnection_interval(), recover}],
+ {keep_state, reset_statem_data(Data), Actions}
+ end;
+recover(state_timeout, _PrevState, Data) ->
+ rabbit_log:debug("etcd peer discovery: connection entered a reconnection delay state"),
+ ensure_disconnected(?ETCD_CONN_NAME, Data),
+ {next_state, recover, reset_statem_data(Data)};
+recover({call, From}, Req, _Data) ->
+ rabbit_log:error("etcd v3 API: client received a call ~p while not connected, will do nothing", [Req]),
+ gen_statem:reply(From, {error, not_connected}),
+ keep_state_and_data.
+
+
+connected(enter, _PrevState, Data) ->
+ rabbit_log:info("etcd peer discovery: successfully connected to etcd"),
+
+ {keep_state, acquire_node_key_lease_grant(Data)};
+connected(info, {'DOWN', ConnRef, process, ConnPid, Reason}, Data = #statem_data{
+ connection_pid = ConnPid,
+ connection_monitor = ConnRef
+ }) ->
+ rabbit_log:debug("etcd peer discovery: connection to etcd ~p is down: ~p", [ConnPid, Reason]),
+ maybe_demonitor(ConnRef),
+ {next_state, recover, reset_statem_data(Data)};
+connected({call, From}, {lock, _Node}, Data = #statem_data{connection_name = Conn, lock_ttl_in_seconds = TTL}) ->
+ case eetcd_lease:grant(eetcd_kv:new(Conn), TTL) of
+ {ok, #{'ID' := LeaseID}} ->
+ Key = lock_key_base(Data),
+ rabbit_log:debug("etcd peer discovery: granted a lease ~p for registration lock ~s with TTL = ~p", [LeaseID, Key, TTL]),
+ case eetcd_lock:lock(lock_context(Conn, Data), Key, LeaseID) of
+ {ok, #{key := GeneratedKey}} ->
+ rabbit_log:debug("etcd peer discovery: successfully acquired a lock, lock owner key: ~s", [GeneratedKey]),
+ reply_and_retain_state(From, {ok, GeneratedKey});
+ {error, _} = Error ->
+ rabbit_log:debug("etcd peer discovery: failed to acquire a lock using key ~s: ~p", [Key, Error]),
+ reply_and_retain_state(From, Error)
+ end;
+ {error, _} = Error ->
+ rabbit_log:debug("etcd peer discovery: failed to get a lease for registration lock: ~p", [Error]),
+ reply_and_retain_state(From, Error)
+ end;
+connected({call, From}, {unlock, GeneratedKey}, Data = #statem_data{connection_name = Conn}) ->
+ Ctx = unlock_context(Conn, Data),
+ case eetcd_lock:unlock(Ctx, GeneratedKey) of
+ {ok, _} ->
+ rabbit_log:debug("etcd peer discovery: successfully released lock, lock owner key: ~s", [GeneratedKey]),
+ reply_and_retain_state(From, ok);
+ {error, _} = Error ->
+ rabbit_log:debug("etcd peer discovery: failed to release registration lock, lock owner key: ~s, error ~p",
+ [GeneratedKey, Error]),
+ reply_and_retain_state(From, Error)
+ end;
+connected({call, From}, register, Data = #statem_data{connection_name = Conn}) ->
+ Ctx = registration_context(Conn, Data),
+ Key = node_key(Data),
+ eetcd_kv:put(Ctx, Key, registration_value(Data)),
+ rabbit_log:debug("etcd peer discovery: put key ~p, done with registration", [Key]),
+ gen_statem:reply(From, ok),
+ keep_state_and_data;
+connected({call, From}, unregister, Data = #statem_data{connection_name = Conn}) ->
+ unregister(Conn, Data),
+ gen_statem:reply(From, ok),
+ {keep_state, Data#statem_data{
+ node_key_lease_id = undefined
+ }};
+connected({call, From}, list_keys, Data = #statem_data{connection_name = Conn}) ->
+ Prefix = node_key_base(Data),
+ C1 = eetcd_kv:new(Conn),
+ C2 = eetcd_kv:with_prefix(eetcd_kv:with_key(C1, Prefix)),
+ rabbit_log:debug("etcd peer discovery: will use prefix ~s to query for node keys", [Prefix]),
+ {ok, #{kvs := Result}} = eetcd_kv:get(C2),
+ rabbit_log:debug("etcd peer discovery returned keys: ~p", [Result]),
+ Values = [maps:get(value, M) || M <- Result],
+ case Values of
+ Xs when is_list(Xs) ->
+ rabbit_log:debug("etcd peer discovery: listing node keys returned ~b results", [length(Xs)]),
+ ParsedNodes = lists:map(fun extract_node/1, Xs),
+ {Successes, Failures} = lists:partition(fun filter_node/1, ParsedNodes),
+ JoinedString = lists:join(",", [rabbit_data_coercion:to_list(Node) || Node <- lists:usort(Successes)]),
+ rabbit_log:error("etcd peer discovery: successfully extracted nodes: ~s", [JoinedString]),
+ lists:foreach(fun(Val) ->
+ rabbit_log:error("etcd peer discovery: failed to extract node name from etcd value ~p", [Val])
+ end, Failures),
+ gen_statem:reply(From, lists:usort(Successes)),
+ keep_state_and_data;
+ Other ->
+ rabbit_log:debug("etcd peer discovery: listing node keys returned ~p", [Other]),
+ gen_statem:reply(From, []),
+ keep_state_and_data
+ end.
+
+disconnected(enter, _PrevState, _Data) ->
+ rabbit_log:info("etcd peer discovery: successfully disconnected from etcd"),
+ keep_state_and_data.
+
+
+%%
+%% Implementation
+%%
+
+acquire_node_key_lease_grant(Data = #statem_data{connection_name = Name, node_key_ttl_in_seconds = TTL}) ->
+ %% acquire a lease for TTL
+ {ok, #{'ID' := LeaseID}} = eetcd_lease:grant(Name, TTL),
+ {ok, KeepalivePid} = eetcd_lease:keep_alive(Name, LeaseID),
+ rabbit_log:debug("etcd peer discovery: acquired a lease ~p for node key ~s with TTL = ~p", [LeaseID, node_key(Data), TTL]),
+ Data#statem_data{
+ node_key_lease_id = LeaseID,
+ node_lease_keepalive_pid = KeepalivePid
+ }.
+
+registration_context(ConnName, #statem_data{node_key_lease_id = LeaseID}) ->
+ Ctx1 = eetcd_kv:new(ConnName),
+ eetcd_kv:with_lease(Ctx1, LeaseID).
+
+unregistration_context(ConnName, _Data) ->
+ eetcd_kv:new(ConnName).
+
+lock_context(ConnName, #statem_data{lock_ttl_in_seconds = LeaseTTL}) ->
+ %% LeaseTT is in seconds, eetcd_lock:with_timeout/2 expects milliseconds
+ eetcd_lock:with_timeout(eetcd_lock:new(ConnName), LeaseTTL * 1000).
+
+unlock_context(ConnName, #statem_data{lock_ttl_in_seconds = Timeout}) ->
+ %% caps the timeout here using the lock TTL value, it makes more
+ %% sense than picking an arbitrary number. MK.
+ eetcd_lock:with_timeout(eetcd_lock:new(ConnName), Timeout * 1000).
+
+node_key_base(#statem_data{cluster_name = ClusterName, key_prefix = Prefix}) ->
+ to_binary(rabbit_misc:format("/rabbitmq/discovery/~s/clusters/~s/nodes", [Prefix, ClusterName])).
+
+node_key(Data) ->
+ to_binary(rabbit_misc:format("~s/~s", [node_key_base(Data), node()])).
+
+lock_key_base(#statem_data{key_prefix = Prefix, cluster_name = ClusterName}) ->
+ Key = rabbit_misc:format("/rabbitmq/locks/~s/clusters/~s/registration",
+ [Prefix, ClusterName]),
+ to_binary(Key).
+
+%% This value is not used and merely
+%% provides additional context to the operator.
+registration_value(#statem_data{node_key_lease_id = LeaseID, node_key_ttl_in_seconds = TTL}) ->
+ to_binary(rabbit_json:encode(#{
+ <<"node">> => to_binary(node()),
+ <<"lease_id">> => LeaseID,
+ <<"ttl">> => TTL
+ })).
+
+-spec extract_node(binary()) -> atom() | {error, any()}.
+
+extract_node(Payload) ->
+ case rabbit_json:decode(Payload) of
+ {error, Error} -> {error, Error};
+ Map ->
+ case maps:get(<<"node">>, Map, undefined) of
+ undefined -> undefined;
+ Node -> rabbit_data_coercion:to_atom(Node)
+ end
+ end.
+
+filter_node(undefined) -> false;
+filter_node({error, _}) -> false;
+filter_node(_Other) -> true.
+
+
+error_is_already_started({_Endpoint, already_started}) ->
+ true;
+error_is_already_started({_Endpoint, _}) ->
+ false.
+
+connect(Name, Endpoints, Transport, TransportOpts, Data) ->
+ case eetcd_conn:lookup(Name) of
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid};
+ {error, eetcd_conn_unavailable} ->
+ do_connect(Name, Endpoints, Transport, TransportOpts, Data)
+ end.
+
+do_connect(Name, Endpoints, Transport, TransportOpts, Data = #statem_data{username = Username}) ->
+ case Username of
+ undefined -> rabbit_log:info("etcd peer discovery: will connect to etcd without authentication (no credentials configured)");
+ _ -> rabbit_log:info("etcd peer discovery: will connect to etcd as user '~s'", [Username])
+ end,
+ case eetcd:open(Name, Endpoints, connection_options(Data), Transport, TransportOpts) of
+ {ok, Pid} -> {ok, Pid};
+ {error, Errors0} ->
+ Errors = case is_list(Errors0) of
+ true -> Errors0;
+ false -> [Errors0]
+ end,
+ rabbit_log:debug("etcd peer discovery: connection errors: ~p",
+ [Errors]),
+ rabbit_log:debug("etcd peer discovery: are all connection errors benign?: ~p",
+ [lists:all(fun error_is_already_started/1, Errors)]),
+ %% If all errors are already_started we can ignore them.
+ %% eetcd registers connections under a name
+ case lists:all(fun error_is_already_started/1, Errors) of
+ true ->
+ eetcd_conn:lookup(Name);
+ false ->
+ {error, Errors}
+ end
+ end.
+
+connection_options(#statem_data{username = Username, obfuscated_password = Password}) ->
+ SharedOpts = [{mode, random}],
+ case {Username, Password} of
+ {undefined, _} -> SharedOpts;
+ {_, undefined} -> SharedOpts;
+ {UVal, PVal} ->
+ [{name, UVal}, {password, to_list(deobfuscate(PVal))}] ++ SharedOpts
+ end.
+
+
+obfuscate(undefined) -> undefined;
+obfuscate(Password) ->
+ credentials_obfuscation:encrypt(to_binary(Password)).
+
+deobfuscate(undefined) -> undefined;
+deobfuscate(Password) ->
+ credentials_obfuscation:decrypt(to_binary(Password)).
+
+disconnect(ConnName, #statem_data{connection_monitor = Ref}) ->
+ maybe_demonitor(Ref),
+ do_disconnect(ConnName).
+
+unregister(Conn, Data = #statem_data{node_key_lease_id = LeaseID, node_lease_keepalive_pid = KAPid}) ->
+ Ctx = unregistration_context(Conn, Data),
+ Key = node_key(Data),
+ eetcd_kv:delete(Ctx, Key),
+ rabbit_log:debug("etcd peer discovery: deleted key ~s, done with unregistration", [Key]),
+ eetcd_lease:revoke(Ctx, LeaseID),
+ exit(KAPid, normal),
+ rabbit_log:debug("etcd peer discovery: revoked a lease ~p for node key ~s", [LeaseID, Key]),
+ ok.
+
+reply_and_retain_state(From, Value) ->
+ gen_statem:reply(From, Value),
+ keep_state_and_data.
+
+maybe_demonitor(undefined) ->
+ true;
+maybe_demonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref).
+
+reset_statem_data(Data0 = #statem_data{endpoints = Es, connection_monitor = Ref}) when Es =/= undefined ->
+ maybe_demonitor(Ref),
+ Data0#statem_data{
+ connection_pid = undefined,
+ connection_monitor = undefined
+ }.
+
+ensure_disconnected(Name, #statem_data{connection_monitor = Ref}) ->
+ maybe_demonitor(Ref),
+ do_disconnect(Name).
+
+do_disconnect(Name) ->
+ try
+ eetcd:close(Name)
+ catch _:_ ->
+ ok
+ end.
+
+reconnection_interval() ->
+ 3000.
+
+normalize_settings(Map) when is_map(Map) ->
+ Endpoints = maps:get(endpoints, Map, []),
+ LegacyEndpoints = case maps:get(etcd_host, Map, undefined) of
+ undefined -> [];
+ Hostname ->
+ Port = maps:get(etcd_port, Map, 2379),
+ [rabbit_misc:format("~s:~p", [Hostname, Port])]
+ end,
+
+ AllEndpoints = Endpoints ++ LegacyEndpoints,
+ maps:merge(maps:without([etcd_prefix, etcd_node_ttl, lock_wait_time], Map),
+ #{endpoints => AllEndpoints}).
+
+pick_transport(#statem_data{tls_options = []}) ->
+ {tcp, []};
+pick_transport(#statem_data{tls_options = Opts}) ->
+ {tls, Opts}.
diff --git a/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..0d9e7ceeab
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_peer_discovery_etcd, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets
new file mode 100644
index 0000000000..2d41aa0dbe
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/test/config_schema_SUITE_data/rabbitmq_peer_discovery_etcd.snippets
@@ -0,0 +1,137 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+[
+ %% discovery mechanism
+ {etcd_discovery_mechanism_as_module,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_etcd
+ cluster_formation.etcd.host = etcd.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_etcd},
+ {peer_discovery_etcd, [
+ {etcd_host, "etcd.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+
+ {etcd_discovery_mechanism_shortcut,
+ "cluster_formation.peer_discovery_backend = etcd
+ cluster_formation.etcd.host = etcd.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_etcd},
+ {peer_discovery_etcd, [
+ {etcd_host, "etcd.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+
+ %% etcd hostname
+ {etcd_host, "cluster_formation.etcd.host = etcd.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {etcd_host, "etcd.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ %% etcd port
+ {etcd_port, "cluster_formation.etcd.port = 7979", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {etcd_port, 7979}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ %% credentials
+ {etcd_username, "cluster_formation.etcd.username = rabbitmq", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {etcd_username, "rabbitmq"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ {etcd_password, "cluster_formation.etcd.password = rabbitmq", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {etcd_password, "rabbitmq"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ %% key prefix
+ {etcd_key_prefix, "cluster_formation.etcd.key_prefix = staging", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {etcd_prefix, "staging"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ %% cluster name
+ {etcd_cluster_name, "cluster_formation.etcd.cluster_name = staging", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {cluster_name, "staging"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ %% node key TTL
+ {etcd_node_ttl, "cluster_formation.etcd.node_ttl = 60", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {etcd_node_ttl, 60}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+ %% lock acquisition timeout
+ {etcd_lock_wait_time, "cluster_formation.etcd.lock_timeout = 400", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {lock_wait_time, 400}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ },
+
+ %% alias for consistency with etcd
+ {etcd_lock_wait_time, "cluster_formation.etcd.lock_wait_time = 400", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_etcd, [
+ {lock_wait_time, 400}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_etcd]
+ }
+].
diff --git a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl
new file mode 100644
index 0000000000..14533148b0
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE.erl
@@ -0,0 +1,141 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbit_peer_discovery_etcd.hrl").
+
+-import(rabbit_data_coercion, [to_binary/1, to_integer/1]).
+
+
+all() ->
+ [
+ {group, v3_client}
+ ].
+
+groups() ->
+ [
+ {v3_client, [], [
+ etcd_connection_sanity_check_test,
+ init_opens_a_connection_test,
+ registration_with_locking_test
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [fun init_etcd/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config, [fun stop_etcd/1]).
+
+init_etcd(Config) ->
+ DataDir = ?config(data_dir, Config),
+ PrivDir = ?config(priv_dir, Config),
+ TcpPort = 25389,
+ EtcdDir = filename:join([PrivDir, "etcd"]),
+ InitEtcd = filename:join([DataDir, "init-etcd.sh"]),
+ Cmd = [InitEtcd, EtcdDir, {"~b", [TcpPort]}],
+ case rabbit_ct_helpers:exec(Cmd) of
+ {ok, Stdout} ->
+ case re:run(Stdout, "^ETCD_PID=([0-9]+)$", [{capture, all_but_first, list}, multiline]) of
+ {match, [EtcdPid]} ->
+ ct:pal(?LOW_IMPORTANCE, "etcd PID: ~s~netcd is listening on: ~b", [EtcdPid, TcpPort]),
+ rabbit_ct_helpers:set_config(Config, [{etcd_pid, EtcdPid},
+ {etcd_endpoints, [rabbit_misc:format("localhost:~p", [TcpPort])]},
+ {etcd_port, TcpPort}]);
+ nomatch ->
+ ct:pal(?HI_IMPORTANCE, "init-etcd.sh output did not match what's expected: ~p", [Stdout])
+ end;
+ {error, Code, Reason} ->
+ ct:pal(?HI_IMPORTANCE, "init-etcd.sh exited with code ~p: ~p", [Code, Reason]),
+ _ = rabbit_ct_helpers:exec(["pkill", "-INT", "etcd"]),
+ {skip, "Failed to initialize etcd"}
+ end.
+
+stop_etcd(Config) ->
+ EtcdPid = ?config(etcd_pid, Config),
+ Cmd = ["kill", "-INT", EtcdPid],
+ _ = rabbit_ct_helpers:exec(Cmd),
+ Config.
+
+
+%%
+%% Test cases
+%%
+
+etcd_connection_sanity_check_test(Config) ->
+ application:ensure_all_started(eetcd),
+ Endpoints = ?config(etcd_endpoints, Config),
+ ?assertMatch({ok, _Pid}, eetcd:open(test, Endpoints)),
+
+ Condition1 = fun() ->
+ 1 =:= length(eetcd_conn_sup:info())
+ end,
+ try
+ rabbit_ct_helpers:await_condition(Condition1, 60000)
+ after
+ eetcd:close(test)
+ end,
+ Condition2 = fun() ->
+ 0 =:= length(eetcd_conn_sup:info())
+ end,
+ rabbit_ct_helpers:await_condition(Condition2, 60000).
+
+init_opens_a_connection_test(Config) ->
+ Endpoints = ?config(etcd_endpoints, Config),
+ {ok, Pid} = start_client(Endpoints),
+ Condition = fun() ->
+ 1 =:= length(eetcd_conn_sup:info())
+ end,
+ try
+ rabbit_ct_helpers:await_condition(Condition, 90000)
+ after
+ gen_statem:stop(Pid)
+ end,
+ ?assertEqual(0, length(eetcd_conn_sup:info())).
+
+
+registration_with_locking_test(Config) ->
+ Endpoints = ?config(etcd_endpoints, Config),
+ {ok, Pid} = start_client(Endpoints),
+ Condition1 = fun() ->
+ 1 =:= length(eetcd_conn_sup:info())
+ end,
+ rabbit_ct_helpers:await_condition(Condition1, 90000),
+
+ {ok, LockOwnerKey} = rabbitmq_peer_discovery_etcd_v3_client:lock(Pid, node()),
+ rabbitmq_peer_discovery_etcd_v3_client:register(Pid),
+ ?assertEqual(ok, rabbitmq_peer_discovery_etcd_v3_client:unlock(Pid, LockOwnerKey)),
+
+ Condition2 = fun() ->
+ [node()] =:= rabbitmq_peer_discovery_etcd_v3_client:list_nodes(Pid)
+ end,
+ try
+ rabbit_ct_helpers:await_condition(Condition2, 45000)
+ after
+ gen_statem:stop(Pid)
+ end.
+
+%%
+%% Helpers
+%%
+
+start_client(Endpoints) ->
+ case rabbitmq_peer_discovery_etcd_v3_client:start(#{endpoints => Endpoints}) of
+ {ok, Pid} ->
+ {ok, Pid};
+ {error, {already_started, Pid}} ->
+ {ok, Pid}
+ end.
diff --git a/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh
new file mode 100755
index 0000000000..6323657d4b
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/test/system_SUITE_data/init-etcd.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# vim:sw=4:et:
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at https://mozilla.org/MPL/2.0/.
+#
+# Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+#
+
+set -ex
+
+case "$(uname -s)" in
+ Linux)
+ package_os="linux"
+ tmpdir=/tmp
+ archive="tar.gz"
+ ;;
+ Darwin)
+ package_os="darwin"
+ tmpdir=$TMPDIR
+ archive="zip"
+ ;;
+ *)
+ exit 1
+ ;;
+esac
+
+etcd_data_dir=${1:-"$tmpdir/etcd/data"}
+pidfile="$etcd_data_dir/etcd.pid"
+tcp_port=${2:-2379}
+
+ETCD_VER=v3.4.6
+
+GITHUB_URL=https://github.com/etcd-io/etcd/releases/download
+DOWNLOAD_URL=${GITHUB_URL}
+
+rm -rf "${tmpdir}/etcd-${ETCD_VER}"
+
+if ! [ -f "${tmpdir}/etcd-${ETCD_VER}-$package_os-amd64.$archive" ]; then
+ curl -L "${DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-$package_os-amd64.$archive" -o "${tmpdir}/etcd-${ETCD_VER}-$package_os-amd64.$archive"
+fi
+
+case "$(uname -s)" in
+ Linux)
+ mkdir -p "${tmpdir}/etcd-${ETCD_VER}-$package_os-amd64/"
+ tar xzvf "/tmp/etcd-${ETCD_VER}-$package_os-amd64.$archive" -C "${tmpdir}/etcd-${ETCD_VER}-$package_os-amd64/" --strip-components=1
+ ;;
+ Darwin)
+ unzip -q -o -d "$tmpdir" "${tmpdir}/etcd-${ETCD_VER}-$package_os-amd64.$archive"
+ ;;
+ *)
+ exit 1
+ ;;
+esac
+
+mv "${tmpdir}/etcd-${ETCD_VER}-$package_os-amd64/" "${tmpdir}/etcd-${ETCD_VER}/"
+
+rm -rf "$etcd_data_dir"
+mkdir -p "$etcd_data_dir"
+
+# daemonize(1) is installed under this path on Debian
+PATH=$PATH:/usr/sbin
+
+daemonize -p "$pidfile" -l "${etcd_data_dir}/daemonize_lock" -- "$tmpdir/etcd-${ETCD_VER}/etcd" \
+ --data-dir "$etcd_data_dir" --name peer-discovery-0 --initial-advertise-peer-urls http://127.0.0.1:2380 \
+ --listen-peer-urls http://127.0.0.1:2380 \
+ --listen-client-urls "http://127.0.0.1:${tcp_port}" \
+ --advertise-client-urls "http://127.0.0.1:${tcp_port}" \
+ --initial-cluster-token rabbitmq-peer-discovery-etcd \
+ --initial-cluster peer-discovery-0=http://127.0.0.1:2380 \
+ --initial-cluster-state new
+
+
+for seconds in {1..30}; do
+ "$tmpdir/etcd-${ETCD_VER}/etcdctl" put rabbitmq-ct rabbitmq-ct --dial-timeout=1s && break
+ sleep 1
+done
+
+echo ETCD_PID=$(cat "$pidfile")
+
diff --git a/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl b/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl
new file mode 100644
index 0000000000..10df977f1f
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_etcd/test/unit_SUITE.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbit_peer_discovery_etcd.hrl").
+
+-import(rabbit_data_coercion, [to_binary/1]).
+
+
+all() ->
+ [
+ {group, unit}
+ ].
+
+groups() ->
+ [
+ {unit, [], [
+ registration_value_test,
+ extract_nodes_case1_test,
+ filter_nodes_test,
+ node_key_base_test,
+ node_key_test,
+ lock_key_base_test
+ ]}
+ ].
+
+
+%%
+%% Test cases
+%%
+
+registration_value_test(_Config) ->
+ LeaseID = 8488283859587364900,
+ TTL = 61,
+ Input = #statem_data{
+ node_key_lease_id = LeaseID,
+ node_key_ttl_in_seconds = TTL
+ },
+ Expected = registration_value_of(LeaseID, TTL),
+ ?assertEqual(Expected, rabbitmq_peer_discovery_etcd_v3_client:registration_value(Input)).
+
+
+extract_nodes_case1_test(_Config) ->
+ Input = registration_value_of(8488283859587364900, 61),
+ Expected = node(),
+
+ ?assertEqual(Expected, rabbitmq_peer_discovery_etcd_v3_client:extract_node(Input)),
+
+ ?assertEqual(undefined, rabbitmq_peer_discovery_etcd_v3_client:extract_node(<<"{}">>)).
+
+filter_nodes_test(_Config) ->
+ Input = [node(), undefined, undefined, {error, reason1}, {error, {another, reason}}],
+ Expected = [node()],
+
+ ?assertEqual(Expected, lists:filter(fun rabbitmq_peer_discovery_etcd_v3_client:filter_node/1, Input)).
+
+node_key_base_test(_Config) ->
+ Expected = <<"/rabbitmq/discovery/prefffix/clusters/cluster-a/nodes">>,
+ Input = #statem_data{
+ cluster_name = "cluster-a",
+ key_prefix = "prefffix"
+ },
+ ?assertEqual(Expected, rabbitmq_peer_discovery_etcd_v3_client:node_key_base(Input)).
+
+node_key_test(_Config) ->
+ Expected = to_binary(rabbit_misc:format("/rabbitmq/discovery/prefffix/clusters/cluster-a/nodes/~s", [node()])),
+ Input = #statem_data{
+ cluster_name = "cluster-a",
+ key_prefix = "prefffix"
+ },
+ ?assertEqual(Expected, rabbitmq_peer_discovery_etcd_v3_client:node_key(Input)).
+
+lock_key_base_test(_Config) ->
+ Expected = <<"/rabbitmq/locks/prefffix/clusters/cluster-b/registration">>,
+ Input = #statem_data{
+ cluster_name = "cluster-b",
+ key_prefix = "prefffix"
+ },
+ ?assertEqual(Expected, rabbitmq_peer_discovery_etcd_v3_client:lock_key_base(Input)).
+
+%%
+%% Helpers
+%%
+
+registration_value_of(LeaseID, TTL) ->
+ to_binary(rabbit_json:encode(#{
+ <<"node">> => to_binary(node()),
+ <<"lease_id">> => LeaseID,
+ <<"ttl">> => TTL
+ })).
diff --git a/deps/rabbitmq_peer_discovery_k8s/.gitignore b/deps/rabbitmq_peer_discovery_k8s/.gitignore
new file mode 100644
index 0000000000..08362ef45f
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/.gitignore
@@ -0,0 +1,25 @@
+*~
+.sw?
+.*.sw?
+*.beam
+*.coverdata
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/config_schema_SUITE_data/schema/
+/test/ct.cover.spec
+/xrefr
+
+/rabbitmq_peer_discovery_k8s.d
diff --git a/deps/rabbitmq_peer_discovery_k8s/.travis.yml b/deps/rabbitmq_peer_discovery_k8s/.travis.yml
new file mode 100644
index 0000000000..c30fce9eb7
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: aeHv/XU16cTsiZznWmAr5qi0mYDihao/wpNJ5QTdef+dSXmr66AKn+7HoRKO+K7itt1GhOBpnfEGZagocPPbPyYX/u1FGsCeE3nRvg24aNCdxNnZ+uPa8Vs6kPQokNzYMSvUH5y75zdnBH2zwEihro92fSG3aTzK7Gdr0XC5xoYiSRMaffbPLMaxay/JuSmO/qOCKiMIZnnuzEBHV0RIlKOCubTkVB7kVW0o66auh9FpYqP7JMIrKHV2aKFNExEvFuhag732T5hhH/bfzq+NbV/BS9BWws9BDqM2LBvjAgaLgqZ1VTjmkYbtqjIq6dQz3ESc8ir5nuyVyuAQeKIe57XyB5v5sbpPADqfChmXI5rn0CIt3iMlPzzUX5KuCC640lela6wp6xYfXk5TGsBgrIRZ+x9iOnU/M5qUsVGfxAnoI/FLMQ386k10G3ZzIgEcHrz/wwracMY2boJdgMEXugIJA6sPIH3WNCa6QWWOAGP2U736ApuXjwiDED/3WxfN6o4gcWEZwWYqOZQIFVZSu/LAo3Tby5x/bubkKySznqssDhv5rcdHN0+uOATFW2+1xJ2cPyeP5tJnTvBbEeWs5InWkeSZ84iB95Un6ETxxeHOpssT0ygyXtCmXWVAcA+uBwF2hcMZlA00dvoYfMvEPBFeVs9YLMcdZvkGTp5iaoQ=
+ - secure: Bqvu0TyVQN/22yPX4F5kboh8Y8XQ31ptAD+yf7Rt9e9rjkl72I8hjmXxx8fBsmgZUIsqiuzUsEtKJ41TVtke2gDUacqWpp6cl9swgIwTqqjX8XI2jwcnn810d00/xX0Fy9k8hi5GIOpArjDz+oXDYiCebTsQi2v6LQ7taDvfhFRF09EBtBb+0A4M1pGT4haUe0RmcN6GtNTVtQKx/SQZFMT4rWTv0NmlZODGH/Bj7w65c71DD7ok37B3LJTlI7BkkSvwokrbCx/kPlKcDICP0LswqEIsHOkPEILTBG4PJR1W+IhaKfFvPqEsrerIcq+k08neZhyKqBzYqJV+Ri4wus8AqzKlZPNPALnJFqhuII3u6j/HSZsceFFOpZo7pq1m5FwT6HXJv3NjAHj8IeJOverJEL/iSf/VXEvRZrmykFgNEVyP9DOhdcLYM5t99VYzFItJMg59sXbyIxG0RzZjSxMMyswi1IK/SCWg5owacyO32+ptD+N6C6pEEaECltMA4hnC/yay3CUlK/KPRjzDyyQFA7dAtJofFlVSl/s59x5yz1cUKXhfcQEOi28dVqL5aNX9RvpLBkwQ22+ZzMEm20JvdoxrDiYs5okQ3b2SC5mGJPprEVbd6/bXbKIy4fXlZqcu39PtF9VVEEaOY5NJgElWHqwL2/Bvi/fOeFW6kYA=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_peer_discovery_k8s/CODE_OF_CONDUCT.md b/deps/rabbitmq_peer_discovery_k8s/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_peer_discovery_k8s/CONTRIBUTING.md b/deps/rabbitmq_peer_discovery_k8s/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_peer_discovery_k8s/LICENSE b/deps/rabbitmq_peer_discovery_k8s/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_peer_discovery_k8s/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_peer_discovery_k8s/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_peer_discovery_k8s/Makefile b/deps/rabbitmq_peer_discovery_k8s/Makefile
new file mode 100644
index 0000000000..12c3ade92a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/Makefile
@@ -0,0 +1,19 @@
+PROJECT = rabbitmq_peer_discovery_k8s
+PROJECT_DESCRIPTION = Kubernetes-based RabbitMQ peer discovery backend
+PROJECT_MOD = rabbitmq_peer_discovery_k8s_app
+
+DEPS = rabbit_common rabbitmq_peer_discovery_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers ct_helper meck
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_peer_discovery_k8s/README.md b/deps/rabbitmq_peer_discovery_k8s/README.md
new file mode 100644
index 0000000000..faaad4591d
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/README.md
@@ -0,0 +1,61 @@
+# RabbitMQ Peer Discovery Kubernetes
+
+## Overview
+
+This is an implementation of RabbitMQ [peer discovery interface](https://www.rabbitmq.com/blog/2018/02/12/peer-discovery-subsystem-in-rabbitmq-3-7/)
+for Kubernetes.
+
+This plugin only performs peer discovery using Kubernetes API as the source of data on running cluster pods.
+Please get familiar with [RabbitMQ clustering fundamentals](https://rabbitmq.com/clustering.html) before attempting
+to use it.
+
+Cluster provisioning and most of Day 2 operations such as [proper monitoring](https://rabbitmq.com/monitoring.html)
+are not in scope for this plugin.
+
+For a more comprehensive open source RabbitMQ on Kubernetes deployment solution,
+see the [RabbitMQ Cluster Operator for Kubernetes](https://www.rabbitmq.com/kubernetes/operator/operator-overview.html).
+The Operator is developed [on GitHub](https://github.com/rabbitmq/cluster-operator/) and contains its
+own [set of examples](https://github.com/rabbitmq/cluster-operator/tree/master/docs/examples).
+
+
+## Supported RabbitMQ Versions
+
+This plugin ships with RabbitMQ 3.7.0 or later.
+
+
+## Installation
+
+This plugin ships with [supported RabbitMQ versions](https://www.rabbitmq.com/versions.html).
+There is no need to install it separately.
+
+As with any [plugin](https://rabbitmq.com/plugins.html), it must be enabled before it
+can be used. For peer discovery plugins it means they must be [enabled](https://rabbitmq.com//plugins.html#basics) or [preconfigured](https://rabbitmq.com//plugins.html#enabled-plugins-file)
+before first node boot:
+
+```
+rabbitmq-plugins --offline enable rabbitmq_peer_discovery_k8s
+```
+
+## Documentation
+
+See [RabbitMQ Cluster Formation guide](https://www.rabbitmq.com/cluster-formation.html) for an overview
+of the peer discovery subsystem, general and Kubernetes-specific configurable values and troubleshooting tips.
+
+Example deployments that use this plugin can be found in an [RabbitMQ on Kubernetes examples repository](https://github.com/rabbitmq/diy-kubernetes-examples).
+Note that they are just that, examples, and won't be optimal for every use case or cover a lot of important production
+system concerns such as monitoring, persistent volume settings, access control, sizing, and so on.
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://www.rabbitmq.com/github.html).
+
+
+## License
+
+[Licensed under the MPL](LICENSE-MPL-RabbitMQ), same as RabbitMQ server.
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_peer_discovery_k8s/erlang.mk b/deps/rabbitmq_peer_discovery_k8s/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_peer_discovery_k8s/examples/README.md b/deps/rabbitmq_peer_discovery_k8s/examples/README.md
new file mode 100644
index 0000000000..43e795aa17
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/examples/README.md
@@ -0,0 +1,3 @@
+# Deploy RabbitMQ on Kubernetes with the Kubernetes Peer Discovery Plugin
+
+These examples have graduated and moved to [a separate repository](https://github.com/rabbitmq/diy-kubernetes-examples). \ No newline at end of file
diff --git a/deps/rabbitmq_peer_discovery_k8s/examples/gke/README.md b/deps/rabbitmq_peer_discovery_k8s/examples/gke/README.md
new file mode 100644
index 0000000000..8523780755
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/examples/gke/README.md
@@ -0,0 +1,3 @@
+# Deploy RabbitMQ on Kubernetes with the Kubernetes Peer Discovery Plugin to GKE
+
+This example has graduated and moved to [a separate repository](https://github.com/rabbitmq/diy-kubernetes-examples/tree/master/gke). \ No newline at end of file
diff --git a/deps/rabbitmq_peer_discovery_k8s/examples/kind/README.md b/deps/rabbitmq_peer_discovery_k8s/examples/kind/README.md
new file mode 100644
index 0000000000..1045547a99
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/examples/kind/README.md
@@ -0,0 +1,3 @@
+# Deploy RabbitMQ on Kubernetes with the Kubernetes Peer Discovery Plugin to Kind
+
+This example has graduated and moved to [a separate repository](https://github.com/rabbitmq/diy-kubernetes-examples/tree/master/kind).
diff --git a/deps/rabbitmq_peer_discovery_k8s/examples/minikube/README.md b/deps/rabbitmq_peer_discovery_k8s/examples/minikube/README.md
new file mode 100644
index 0000000000..57dbc67227
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/examples/minikube/README.md
@@ -0,0 +1,3 @@
+# Deploy RabbitMQ on Kubernetes with the Kubernetes Peer Discovery Plugin to Minikube
+
+This example has graduated and moved to [a separate repository](https://github.com/rabbitmq/diy-kubernetes-examples/tree/master/minikube). \ No newline at end of file
diff --git a/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl b/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl
new file mode 100644
index 0000000000..9665b67023
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/include/rabbit_peer_discovery_k8s.hrl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(CONFIG_MODULE, rabbit_peer_discovery_config).
+-define(UTIL_MODULE, rabbit_peer_discovery_util).
+-define(HTTPC_MODULE, rabbit_peer_discovery_httpc).
+
+-define(BACKEND_CONFIG_KEY, peer_discovery_k8s).
+
+-define(K8S_EVENT_SOURCE_DESCRIPTION, "rabbitmq_peer_discovery").
+
+-define(CONFIG_MAPPING,
+ #{
+ k8s_scheme => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_SCHEME",
+ default_value = "https"
+ },
+ k8s_host => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_HOST",
+ default_value = "kubernetes.default.svc.cluster.local"
+ },
+ k8s_port => #peer_discovery_config_entry_meta{
+ type = integer,
+ env_variable = "K8S_PORT",
+ default_value = 443
+ },
+ k8s_token_path => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_TOKEN_PATH",
+ default_value = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+ },
+ k8s_cert_path => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_CERT_PATH",
+ default_value = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ },
+ k8s_namespace_path => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_NAMESPACE_PATH",
+ default_value = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
+ },
+ k8s_service_name => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_SERVICE_NAME",
+ default_value = "rabbitmq"
+ },
+ k8s_address_type => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_ADDRESS_TYPE",
+ default_value = "ip"
+ },
+ k8s_hostname_suffix => #peer_discovery_config_entry_meta{
+ type = string,
+ env_variable = "K8S_HOSTNAME_SUFFIX",
+ default_value = ""
+ }
+ }).
diff --git a/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema b/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema
new file mode 100644
index 0000000000..6c21b6ccea
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/priv/schema/rabbitmq_peer_discovery_k8s.schema
@@ -0,0 +1,133 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Kubernetes host
+
+{mapping, "cluster_formation.k8s.host", "rabbit.cluster_formation.peer_discovery_k8s.k8s_host", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_host",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.host", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Kubernetes port
+
+{mapping, "cluster_formation.k8s.port", "rabbit.cluster_formation.peer_discovery_k8s.k8s_port", [
+ {datatype, integer},
+ {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_port",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.port", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Kubernetes URI scheme
+
+{mapping, "cluster_formation.k8s.scheme", "rabbit.cluster_formation.peer_discovery_k8s.k8s_scheme", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_scheme",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.scheme", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% (ACL) Token path
+
+{mapping, "cluster_formation.k8s.token_path", "rabbit.cluster_formation.peer_discovery_k8s.k8s_token_path", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_token_path",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.token_path", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Certificate path
+
+{mapping, "cluster_formation.k8s.cert_path", "rabbit.cluster_formation.peer_discovery_k8s.k8s_cert_path", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_cert_path",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.cert_path", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Namespace path
+
+{mapping, "cluster_formation.k8s.namespace_path", "rabbit.cluster_formation.peer_discovery_k8s.k8s_namespace_path", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_namespace_path",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.namespace_path", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Service name
+
+{mapping, "cluster_formation.k8s.service_name", "rabbit.cluster_formation.peer_discovery_k8s.k8s_service_name", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_service_name",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.service_name", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Address type
+
+{mapping, "cluster_formation.k8s.address_type", "rabbit.cluster_formation.peer_discovery_k8s.k8s_address_type", [
+ {datatype, {enum, [hostname, ip]}}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_address_type",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.address_type", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% Hostname suffix
+
+{mapping, "cluster_formation.k8s.hostname_suffix", "rabbit.cluster_formation.peer_discovery_k8s.k8s_hostname_suffix", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.cluster_formation.peer_discovery_k8s.k8s_hostname_suffix",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_formation.k8s.hostname_suffix", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
diff --git a/deps/rabbitmq_peer_discovery_k8s/rabbitmq-components.mk b/deps/rabbitmq_peer_discovery_k8s/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl
new file mode 100644
index 0000000000..3990cf8b11
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbit_peer_discovery_k8s.erl
@@ -0,0 +1,231 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_k8s).
+-behaviour(rabbit_peer_discovery_backend).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbitmq_peer_discovery_common/include/rabbit_peer_discovery.hrl").
+-include("rabbit_peer_discovery_k8s.hrl").
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1, randomized_startup_delay_range/0,
+ send_event/3, generate_v1_event/7]).
+
+-ifdef(TEST).
+-compile(export_all).
+-endif.
+
+%%
+%% API
+%%
+
+init() ->
+ rabbit_log:debug("Peer discovery Kubernetes: initialising..."),
+ ok = application:ensure_started(inets),
+ %% we cannot start this plugin yet since it depends on the rabbit app,
+ %% which is in the process of being started by the time this function is called
+ application:load(rabbitmq_peer_discovery_common),
+ rabbit_peer_discovery_httpc:maybe_configure_proxy(),
+ rabbit_peer_discovery_httpc:maybe_configure_inet6().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} | {error, Reason :: string()}.
+
+list_nodes() ->
+ case make_request() of
+ {ok, Response} ->
+ Addresses = extract_node_list(Response),
+ {ok, {lists:map(fun node_name/1, Addresses), disc}};
+ {error, Reason} ->
+ Details = io_lib:format("Failed to fetch a list of nodes from Kubernetes API: ~s", [Reason]),
+ rabbit_log:error(Details),
+ send_event("Warning", "Failed", Details),
+ {error, Reason}
+ end.
+
+-spec supports_registration() -> boolean().
+
+supports_registration() ->
+ %% see rabbitmq-peer-discovery-aws#17,
+ %% rabbitmq-peer-discovery-k8s#23
+ true.
+
+
+-spec register() -> ok.
+register() ->
+ ok.
+
+-spec unregister() -> ok.
+unregister() ->
+ ok.
+
+-spec post_registration() -> ok | {error, Reason :: string()}.
+post_registration() ->
+ Details = io_lib:format("Node ~s is registered", [node()]),
+ send_event("Normal", "Created", Details).
+
+-spec lock(Node :: atom()) -> not_supported.
+
+lock(_Node) ->
+ not_supported.
+
+-spec unlock(Data :: term()) -> ok.
+
+unlock(_Data) ->
+ ok.
+
+-spec randomized_startup_delay_range() -> {integer(), integer()}.
+
+randomized_startup_delay_range() ->
+ %% Pods in a stateful set are initialized one by one,
+ %% so RSD is not really necessary for this plugin.
+ %% See https://www.rabbitmq.com/cluster-formation.html#peer-discovery-k8s for details.
+ {0, 2}.
+
+%%
+%% Implementation
+%%
+
+-spec get_config_key(Key :: atom(), Map :: #{atom() => peer_discovery_config_value()})
+ -> peer_discovery_config_value().
+
+get_config_key(Key, Map) ->
+ ?CONFIG_MODULE:get(Key, ?CONFIG_MAPPING, Map).
+
+%% @doc Perform a HTTP GET request to K8s
+%% @end
+%%
+-spec make_request() -> {ok, map() | list() | term()} | {error, term()}.
+
+make_request() ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ {ok, Token} = file:read_file(get_config_key(k8s_token_path, M)),
+ Token1 = binary:replace(Token, <<"\n">>, <<>>),
+ ?HTTPC_MODULE:get(
+ get_config_key(k8s_scheme, M),
+ get_config_key(k8s_host, M),
+ get_config_key(k8s_port, M),
+ base_path(endpoints,get_config_key(k8s_service_name, M)),
+ [],
+ [{"Authorization", "Bearer " ++ binary_to_list(Token1)}],
+ [{ssl, [{cacertfile, get_config_key(k8s_cert_path, M)}]}]).
+
+%% @spec node_name(k8s_endpoint) -> list()
+%% @doc Return a full rabbit node name, appending hostname suffix
+%% @end
+%%
+node_name(Address) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ ?UTIL_MODULE:node_name(
+ ?UTIL_MODULE:as_string(Address) ++ get_config_key(k8s_hostname_suffix, M)).
+
+
+%% @spec maybe_ready_address(k8s_subsets()) -> list()
+%% @doc Return a list of ready nodes
+%% SubSet can contain also "notReadyAddresses"
+%% @end
+%%
+-spec maybe_ready_address([map()]) -> list().
+
+maybe_ready_address(Subset) ->
+ case maps:get(<<"notReadyAddresses">>, Subset, undefined) of
+ undefined -> ok;
+ NotReadyAddresses ->
+ Formatted = string:join([binary_to_list(get_address(X)) || X <- NotReadyAddresses], ", "),
+ rabbit_log:info("k8s endpoint listing returned nodes not yet ready: ~s", [Formatted])
+ end,
+ maps:get(<<"addresses">>, Subset, []).
+
+%% @doc Return a list of nodes
+%% see https://kubernetes.io/docs/api-reference/v1/definitions/#_v1_endpoints
+%% @end
+%%
+-spec extract_node_list(map()) -> list().
+
+extract_node_list(Response) ->
+ IpLists = [[get_address(Address)
+ || Address <- maybe_ready_address(Subset)] || Subset <- maps:get(<<"subsets">>, Response, [])],
+ sets:to_list(sets:union(lists:map(fun sets:from_list/1, IpLists))).
+
+
+%% @doc Return a list of path segments that are the base path for k8s key actions
+%% @end
+%%
+-spec base_path(events | endpoints, term()) -> string().
+base_path(Type, Args) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ {ok, Namespace} = file:read_file(get_config_key(k8s_namespace_path, M)),
+ NameSpace1 = binary:replace(Namespace, <<"\n">>, <<>>),
+ rabbit_peer_discovery_httpc:build_path([api, v1, namespaces, NameSpace1, Type, Args]).
+
+%% get_config_key(k8s_service_name, M)
+
+get_address(Address) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ maps:get(list_to_binary(get_config_key(k8s_address_type, M)), Address).
+
+
+generate_v1_event(Namespace, Type, Reason, Message) ->
+ {ok, HostName} = inet:gethostname(),
+ Name =
+ io_lib:format(HostName ++ ".~B",[os:system_time(millisecond)]),
+ TimeInSeconds = calendar:system_time_to_rfc3339(erlang:system_time(second)),
+ generate_v1_event(Namespace, Name, Type, Reason, Message, TimeInSeconds, HostName).
+
+
+generate_v1_event(Namespace, Name, Type, Reason, Message, Timestamp, HostName) ->
+ #{
+ metadata => #{
+ namespace => rabbit_data_coercion:to_binary(Namespace),
+ name => rabbit_data_coercion:to_binary(Name)
+ },
+ type => rabbit_data_coercion:to_binary(Type),
+ reason => rabbit_data_coercion:to_binary(Reason),
+ message => rabbit_data_coercion:to_binary(Message),
+ count => 1,
+ lastTimestamp => rabbit_data_coercion:to_binary(Timestamp),
+ involvedObject => #{
+ apiVersion => <<"v1">>,
+ kind => <<"RabbitMQ">>,
+ name => rabbit_data_coercion:to_binary("pod/" ++ HostName),
+ namespace => rabbit_data_coercion:to_binary(Namespace)
+ },
+ source => #{
+ component => rabbit_data_coercion:to_binary(HostName ++ "/" ++
+ ?K8S_EVENT_SOURCE_DESCRIPTION),
+ host => rabbit_data_coercion:to_binary(HostName)
+ }
+ }.
+
+
+%% @doc Perform a HTTP POST request to K8s to send and k8s v1.Event
+%% @end
+%%
+-spec send_event(term(),term(), term()) -> {ok, term()} | {error, term()}.
+send_event(Type, Reason, Message) ->
+ M = ?CONFIG_MODULE:config_map(?BACKEND_CONFIG_KEY),
+ {ok, Token} = file:read_file(get_config_key(k8s_token_path, M)),
+ Token1 = binary:replace(Token, <<"\n">>, <<>>),
+ {ok, NameSpace} = file:read_file(
+ get_config_key(k8s_namespace_path, M)),
+ NameSpace1 = binary:replace(NameSpace, <<"\n">>, <<>>),
+
+ V1Event = generate_v1_event(NameSpace1, Type, Reason, Message),
+
+ Body = rabbit_data_coercion:to_list(rabbit_json:encode(V1Event)),
+ ?HTTPC_MODULE:post(
+ get_config_key(k8s_scheme, M),
+ get_config_key(k8s_host, M),
+ get_config_key(k8s_port, M),
+ base_path(events,""),
+ [],
+ [{"Authorization", "Bearer " ++ rabbit_data_coercion:to_list(Token1)}],
+ [{ssl, [{cacertfile, get_config_key(k8s_cert_path, M)}]}],
+ Body
+ ). \ No newline at end of file
diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl
new file mode 100644
index 0000000000..a0d455ef4e
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module exists as an alias for rabbit_peer_discovery_aws.
+%% Some users assume that the discovery module is the same as plugin
+%% name. This module tries to fill the naming gap between module and plugin names.
+-module(rabbitmq_peer_discovery_k8s).
+-behaviour(rabbit_peer_discovery_backend).
+
+-export([init/0, list_nodes/0, supports_registration/0, register/0, unregister/0,
+ post_registration/0, lock/1, unlock/1, randomized_startup_delay_range/0,
+ send_event/3, generate_v1_event/7]).
+
+-define(DELEGATE, rabbit_peer_discovery_k8s).
+
+%%
+%% API
+%%
+
+init() ->
+ ?DELEGATE:init().
+
+-spec list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+list_nodes() ->
+ ?DELEGATE:list_nodes().
+
+-spec supports_registration() -> boolean().
+supports_registration() ->
+ ?DELEGATE:supports_registration().
+
+-spec register() -> ok.
+register() ->
+ ?DELEGATE:register().
+
+-spec unregister() -> ok.
+unregister() ->
+ ?DELEGATE:unregister().
+
+-spec post_registration() -> ok | {error, Reason :: string()}.
+post_registration() ->
+ ?DELEGATE:post_registration().
+
+-spec lock(Node :: atom()) -> not_supported.
+lock(Node) ->
+ ?DELEGATE:lock(Node).
+
+-spec unlock(Data :: term()) -> ok.
+unlock(Data) ->
+ ?DELEGATE:unlock(Data).
+
+-spec randomized_startup_delay_range() -> {integer(), integer()}.
+randomized_startup_delay_range() ->
+ ?DELEGATE:randomized_startup_delay_range().
+
+generate_v1_event(Namespace, Name, Type, Message, Reason, Timestamp, HostName) ->
+ ?DELEGATE:generate_v1_event(Namespace, Name, Type, Message, Reason, Timestamp, HostName).
+
+%% @doc Perform a HTTP POST request to K8s to send and k8s v1.Event
+%% @end
+%%
+-spec send_event(term(),term(), term()) -> {ok, term()} | {error, term()}.
+send_event(Type, Reason, Message) ->
+ ?DELEGATE:send_event(Type, Reason, Message).
diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl
new file mode 100644
index 0000000000..0a77e1305e
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_app.erl
@@ -0,0 +1,21 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_k8s_app).
+
+%%
+%% API
+%%
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbitmq_peer_discovery_k8s_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl
new file mode 100644
index 0000000000..60bf73a23d
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_node_monitor.erl
@@ -0,0 +1,49 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This gen_server receives node monitoring events from net_kernel
+%% and forwards them to the Kubernetes API.
+
+-module(rabbitmq_peer_discovery_k8s_node_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+%%
+%% API
+%%
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init([]) ->
+ _ = net_kernel:monitor_nodes(true, []),
+ {ok, #{}}.
+
+handle_call(_Msg, _From, State) ->
+ {reply, not_understood, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({nodeup, Node}, State) ->
+ Details = io_lib:format("Node ~s is up ", [Node]),
+ rabbit_peer_discovery_k8s:send_event("Normal", "NodeUp", Details),
+ {noreply, State};
+handle_info({nodedown, Node}, State) ->
+ Details = io_lib:format("Node ~s is down or disconnected ", [Node]),
+ rabbit_peer_discovery_k8s:send_event("Warning", "NodeDown", Details),
+ {noreply, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl
new file mode 100644
index 0000000000..f7a0b1c094
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/src/rabbitmq_peer_discovery_k8s_sup.erl
@@ -0,0 +1,38 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_k8s_sup).
+
+-behaviour(supervisor).
+
+-export([init/1, start_link/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_peer_discovery_k8s.hrl").
+
+%%
+%% API
+%%
+
+init([]) ->
+ Flags = #{strategy => one_for_one, intensity => 1, period => 1},
+ Fun0 = fun() -> {ok, {Flags, []}} end,
+ Fun1 = fun() -> {ok, {Flags, []}} end,
+ Fun2 = fun(_) ->
+ Specs = [#{id => rabbitmq_peer_discovery_k8s_node_monitor,
+ start => {rabbitmq_peer_discovery_k8s_node_monitor, start_link, []},
+ restart => permanent,
+ shutdown => ?SUPERVISOR_WAIT,
+ type => worker,
+ modules => [rabbitmq_peer_discovery_k8s]
+ }],
+ {ok, {Flags, Specs}}
+ end,
+ rabbit_peer_discovery_util:maybe_backend_configured(?BACKEND_CONFIG_KEY, Fun0, Fun1, Fun2).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..6fa29b19c6
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_peer_discovery_k8s, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets
new file mode 100644
index 0000000000..64ad3f0d9f
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/test/config_schema_SUITE_data/rabbitmq_peer_discovery_k8s.snippets
@@ -0,0 +1,136 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+[
+ {k8s_discovery_mechanism_as_module,
+ "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = k8s.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_k8s},
+ {peer_discovery_k8s, [
+ {k8s_host, "k8s.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ },
+
+ {k8s_discovery_mechanism_as_alias1,
+ "cluster_formation.peer_discovery_backend = k8s
+ cluster_formation.k8s.host = k8s.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_k8s},
+ {peer_discovery_k8s, [
+ {k8s_host, "k8s.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ },
+
+ {k8s_discovery_mechanism_as_alias2,
+ "cluster_formation.peer_discovery_backend = kubernetes
+ cluster_formation.k8s.host = k8s.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_backend, rabbit_peer_discovery_k8s},
+ {peer_discovery_k8s, [
+ {k8s_host, "k8s.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ },
+
+ {k8s_host, "cluster_formation.k8s.host = k8s.eng.megacorp.local", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_host, "k8s.eng.megacorp.local"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ }
+
+, {k8s_port,
+ "cluster_formation.k8s.port = 9700",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_port, 9700}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_k8s]}
+
+, {k8s_host_and_port,
+ "cluster_formation.k8s.host = k8s.eng.megacorp.local
+ cluster_formation.k8s.port = 443",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_host, "k8s.eng.megacorp.local"},
+ {k8s_port, 443}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_k8s]}
+
+
+, {k8s_scheme,
+ "cluster_formation.k8s.scheme = https",
+ [{rabbit, [{cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_scheme, "https"}
+ ]}
+ ]}]}],
+ [rabbitmq_peer_discovery_k8s]}
+
+
+, {k8s_service_name, "cluster_formation.k8s.service_name = kubernetes-service", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_service_name, "kubernetes-service"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ }
+
+, {k8s_token_path, "cluster_formation.k8s.token_path = /a/b/c", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_token_path, "/a/b/c"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ }
+
+, {k8s_token_path, "cluster_formation.k8s.cert_path = /a/b/c", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_cert_path, "/a/b/c"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ }
+
+, {k8s_token_path, "cluster_formation.k8s.namespace_path = /a/b/c", [
+ {rabbit, [
+ {cluster_formation, [
+ {peer_discovery_k8s, [
+ {k8s_namespace_path, "/a/b/c"}
+ ]}
+ ]}
+ ]}
+ ], [rabbitmq_peer_discovery_k8s]
+ }
+].
diff --git a/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl b/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl
new file mode 100644
index 0000000000..546f48d0f2
--- /dev/null
+++ b/deps/rabbitmq_peer_discovery_k8s/test/rabbitmq_peer_discovery_k8s_SUITE.erl
@@ -0,0 +1,132 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2015-2016 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbitmq_peer_discovery_k8s_SUITE).
+
+-compile(export_all).
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+all() ->
+ [
+ {group, unit}
+ ].
+
+groups() ->
+ [
+ {unit, [], [
+ extract_node_list_long_test,
+ extract_node_list_short_test,
+ extract_node_list_hostname_short_test,
+ extract_node_list_real_test,
+ extract_node_list_with_not_ready_addresses_test,
+ node_name_empty_test,
+ node_name_suffix_test,
+ registration_support,
+ event_v1_test
+ ]}].
+
+init_per_testcase(T, Config) when T == node_name_empty_test;
+ T == node_name_suffix_test ->
+ meck:new(net_kernel, [passthrough, unstick]),
+ meck:expect(net_kernel, longnames, fun() -> true end),
+ Config;
+init_per_testcase(_, Config) ->
+ Config.
+
+end_per_testcase(_, _Config) ->
+ meck:unload(),
+ application:unset_env(rabbit, cluster_formation),
+ [os:unsetenv(Var) || Var <- ["K8S_HOSTNAME_SUFFIX",
+ "K8S_ADDRESS_TYPE"]].
+
+%%%
+%%% Testcases
+%%%
+
+registration_support(_Config) ->
+ ?assertEqual(rabbit_peer_discovery_k8s:supports_registration(), true).
+
+extract_node_list_long_test(_Config) ->
+ {ok, Response} =
+ rabbit_json:try_decode(
+ rabbit_data_coercion:to_binary(
+ "{\"name\": \"mysvc\",\n\"subsets\": [\n{\n\"addresses\": [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\"ports\": [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n},\n{\n\"addresses\": [{\"ip\": \"10.10.3.3\"}],\n\"ports\": [{\"name\": \"a\", \"port\": 93},{\"name\": \"b\", \"port\": 76}]\n}]}")),
+ Expectation = [<<"10.10.1.1">>, <<"10.10.2.2">>, <<"10.10.3.3">>],
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:extract_node_list(Response)).
+
+extract_node_list_short_test(_Config) ->
+ {ok, Response} =
+ rabbit_json:try_decode(
+ rabbit_data_coercion:to_binary(
+ "{\"name\": \"mysvc\",\n\"subsets\": [\n{\n\"addresses\": [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\"ports\": [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n}]}")),
+ Expectation = [<<"10.10.1.1">>, <<"10.10.2.2">>],
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:extract_node_list(Response)).
+
+extract_node_list_hostname_short_test(_Config) ->
+ os:putenv("K8S_ADDRESS_TYPE", "hostname"),
+ {ok, Response} =
+ rabbit_json:try_decode(
+ rabbit_data_coercion:to_binary(
+ "{\"name\": \"mysvc\",\n\"subsets\": [\n{\n\"addresses\": [{\"ip\": \"10.10.1.1\", \"hostname\": \"rabbitmq-1\"}, {\"ip\": \"10.10.2.2\", \"hostname\": \"rabbitmq-2\"}],\n\"ports\": [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n}]}")),
+ Expectation = [<<"rabbitmq-1">>, <<"rabbitmq-2">>],
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:extract_node_list(Response)).
+
+extract_node_list_real_test(_Config) ->
+ {ok, Response} =
+ rabbit_json:try_decode(
+ rabbit_data_coercion:to_binary(
+ "{\"kind\":\"Endpoints\",\"apiVersion\":\"v1\",\"metadata\":{\"name\":\"galera\",\"namespace\":\"default\",\"selfLink\":\"/api/v1/namespaces/default/endpoints/galera\",\"uid\":\"646f8305-3491-11e6-8c20-ecf4bbd91e6c\",\"resourceVersion\":\"17373568\",\"creationTimestamp\":\"2016-06-17T13:42:54Z\",\"labels\":{\"app\":\"mysqla\"}},\"subsets\":[{\"addresses\":[{\"ip\":\"10.1.29.8\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"mariadb-tco7k\",\"uid\":\"fb59cc71-558c-11e6-86e9-ecf4bbd91e6c\",\"resourceVersion\":\"13034802\"}},{\"ip\":\"10.1.47.2\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"mariadb-izgp8\",\"uid\":\"fb484ab3-558c-11e6-86e9-ecf4bbd91e6c\",\"resourceVersion\":\"13035747\"}},{\"ip\":\"10.1.47.3\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"mariadb-init-ffrsz\",\"uid\":\"fb12e1d3-558c-11e6-86e9-ecf4bbd91e6c\",\"resourceVersion\":\"13032722\"}},{\"ip\":\"10.1.94.2\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"mariadb-zcc0o\",\"uid\":\"fb31ce6e-558c-11e6-86e9-ecf4bbd91e6c\",\"resourceVersion\":\"13034771\"}}],\"ports\":[{\"name\":\"mysql\",\"port\":3306,\"protocol\":\"TCP\"}]}]}")),
+ Expectation = [<<"10.1.94.2">>, <<"10.1.47.3">>, <<"10.1.47.2">>,
+ <<"10.1.29.8">>],
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:extract_node_list(Response)).
+
+extract_node_list_with_not_ready_addresses_test(_Config) ->
+ {ok, Response} =
+ rabbit_json:try_decode(
+ rabbit_data_coercion:to_binary(
+ "{\"kind\":\"Endpoints\",\"apiVersion\":\"v1\",\"metadata\":{\"name\":\"rabbitmq\",\"namespace\":\"test-rabbitmq\",\"selfLink\":\"\/api\/v1\/namespaces\/test-rabbitmq\/endpoints\/rabbitmq\",\"uid\":\"4ff733b8-3ad2-11e7-a40d-080027cbdcae\",\"resourceVersion\":\"170098\",\"creationTimestamp\":\"2017-05-17T07:27:41Z\",\"labels\":{\"app\":\"rabbitmq\",\"type\":\"LoadBalancer\"}},\"subsets\":[{\"notReadyAddresses\":[{\"ip\":\"172.17.0.2\",\"hostname\":\"rabbitmq-0\",\"nodeName\":\"minikube\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"test-rabbitmq\",\"name\":\"rabbitmq-0\",\"uid\":\"e980fe5a-3afd-11e7-a40d-080027cbdcae\",\"resourceVersion\":\"170044\"}},{\"ip\":\"172.17.0.4\",\"hostname\":\"rabbitmq-1\",\"nodeName\":\"minikube\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"test-rabbitmq\",\"name\":\"rabbitmq-1\",\"uid\":\"f6285603-3afd-11e7-a40d-080027cbdcae\",\"resourceVersion\":\"170071\"}},{\"ip\":\"172.17.0.5\",\"hostname\":\"rabbitmq-2\",\"nodeName\":\"minikube\",\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"test-rabbitmq\",\"name\":\"rabbitmq-2\",\"uid\":\"fd5a86dc-3afd-11e7-a40d-080027cbdcae\",\"resourceVersion\":\"170096\"}}],\"ports\":[{\"name\":\"amqp\",\"port\":5672,\"protocol\":\"TCP\"},{\"name\":\"http\",\"port\":15672,\"protocol\":\"TCP\"}]}]}")),
+ Expectation = [],
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:extract_node_list(Response)).
+
+node_name_empty_test(_Config) ->
+ Expectation = 'rabbit@rabbitmq-0',
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:node_name(<<"rabbitmq-0">>)).
+
+node_name_suffix_test(_Config) ->
+ os:putenv("K8S_HOSTNAME_SUFFIX", ".rabbitmq.default.svc.cluster.local"),
+ Expectation = 'rabbit@rabbitmq-0.rabbitmq.default.svc.cluster.local',
+ ?assertEqual(Expectation, rabbit_peer_discovery_k8s:node_name(<<"rabbitmq-0">>)).
+
+event_v1_test(_Config) ->
+ Expectation = #{
+ count => 1,
+ type => <<"Normal">>,
+ lastTimestamp => <<"2019-12-06T15:10:23+00:00">>,
+ reason => <<"Reason">>,
+ message => <<"MyMessage">>,
+ metadata =>#{
+ name => <<"test">> ,
+ namespace => <<"namespace">>
+ },
+ involvedObject =>#{
+ apiVersion => <<"v1">>,
+ kind => <<"RabbitMQ">>,
+ name => <<"pod/MyHostName">>,
+ namespace => <<"namespace">>
+ },
+ source =>#{
+ component => <<"MyHostName/rabbitmq_peer_discovery">>,
+ host => <<"MyHostName">>
+ }
+ },
+ ?assertEqual(Expectation,
+ rabbit_peer_discovery_k8s:generate_v1_event(<<"namespace">>, "test",
+ "Normal", "Reason", "MyMessage", "2019-12-06T15:10:23+00:00", "MyHostName")).
diff --git a/deps/rabbitmq_prometheus/.autocomplete b/deps/rabbitmq_prometheus/.autocomplete
new file mode 100755
index 0000000000..afd8ef8a8b
--- /dev/null
+++ b/deps/rabbitmq_prometheus/.autocomplete
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+eval "$(make autocomplete)"
diff --git a/deps/rabbitmq_prometheus/.dockerignore b/deps/rabbitmq_prometheus/.dockerignore
new file mode 100644
index 0000000000..00ebe93b6d
--- /dev/null
+++ b/deps/rabbitmq_prometheus/.dockerignore
@@ -0,0 +1,8 @@
+.erlang.mk
+.git
+ebin
+logs
+prometheus
+src
+test
+tmp
diff --git a/deps/rabbitmq_prometheus/.gitignore b/deps/rabbitmq_prometheus/.gitignore
new file mode 100644
index 0000000000..c00cbc643a
--- /dev/null
+++ b/deps/rabbitmq_prometheus/.gitignore
@@ -0,0 +1,24 @@
+.sw?
+.*.sw?
+*.beam
+*~
+\#*
+.#*
+*.d
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+erl_crash.dump
+prometheus/data
+
+test/config_schema_SUITE_data/schema/
diff --git a/deps/rabbitmq_prometheus/CODE_OF_CONDUCT.md b/deps/rabbitmq_prometheus/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_prometheus/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_prometheus/CONTRIBUTING.md b/deps/rabbitmq_prometheus/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_prometheus/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_prometheus/Dockerfile b/deps/rabbitmq_prometheus/Dockerfile
new file mode 100644
index 0000000000..3452115475
--- /dev/null
+++ b/deps/rabbitmq_prometheus/Dockerfile
@@ -0,0 +1,315 @@
+# The official Canonical Ubuntu Bionic image is ideal from a security perspective,
+# especially for the enterprises that we, the RabbitMQ team, have to deal with
+FROM ubuntu:18.04
+
+RUN set -eux; \
+ apt-get update; \
+ apt-get install -y --no-install-recommends \
+# grab gosu for easy step-down from root
+ gosu \
+ ; \
+ rm -rf /var/lib/apt/lists/*; \
+# verify that the "gosu" binary works
+ gosu nobody true
+
+# Default to a PGP keyserver that pgp-happy-eyeballs recognizes, but allow for substitutions locally
+ARG PGP_KEYSERVER=ha.pool.sks-keyservers.net
+# If you are building this image locally and are getting `gpg: keyserver receive failed: No data` errors,
+# run the build with a different PGP_KEYSERVER, e.g. docker build --tag rabbitmq:3.7 --build-arg PGP_KEYSERVER=pgpkeys.eu 3.7/ubuntu
+# For context, see https://github.com/docker-library/official-images/issues/4252
+
+# Using the latest OpenSSL LTS release, with support until September 2023 - https://www.openssl.org/source/
+ENV OPENSSL_VERSION 1.1.1g
+ENV OPENSSL_SOURCE_SHA256="ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46"
+# https://www.openssl.org/community/omc.html
+ENV OPENSSL_PGP_KEY_IDS="0x8657ABB260F056B1E5190839D9C4D26D0E604491 0x5B2545DAB21995F4088CEFAA36CEE4DEB00CFE33 0xED230BEC4D4F2518B9D7DF41F0DB4D21C1D35231 0xC1F33DD8CE1D4CC613AF14DA9195C48241FBF7DD 0x7953AC1FBC3DC8B3B292393ED5E9E43F7DF9EE8C 0xE5E52560DD91C556DDBDA5D02064C53641C25E5D"
+
+# Use the latest stable Erlang/OTP release - make find-latest-otp - https://github.com/erlang/otp/tags
+ARG OTP_VERSION
+ENV OTP_VERSION ${OTP_VERSION}
+# TODO add PGP checking when the feature will be added to Erlang/OTP's build system
+# http://erlang.org/pipermail/erlang-questions/2019-January/097067.html
+ARG OTP_SHA256
+ENV OTP_SOURCE_SHA256=${OTP_SHA256}
+
+# Install dependencies required to build Erlang/OTP from source
+# http://erlang.org/doc/installation_guide/INSTALL.html
+# autoconf: Required to configure Erlang/OTP before compiling
+# dpkg-dev: Required to set up host & build type when compiling Erlang/OTP
+# gnupg: Required to verify OpenSSL artefacts
+# libncurses5-dev: Required for Erlang/OTP new shell & observer_cli - https://github.com/zhongwencool/observer_cli
+RUN set -eux; \
+ \
+ savedAptMark="$(apt-mark showmanual)"; \
+ apt-get update; \
+ apt-get install --yes --no-install-recommends \
+ autoconf \
+ ca-certificates \
+ dpkg-dev \
+ gcc \
+ gnupg \
+ libncurses5-dev \
+ make \
+ wget \
+ ; \
+ rm -rf /var/lib/apt/lists/*; \
+ \
+ OPENSSL_SOURCE_URL="https://www.openssl.org/source/openssl-$OPENSSL_VERSION.tar.gz"; \
+ OPENSSL_PATH="/usr/local/src/openssl-$OPENSSL_VERSION"; \
+ OPENSSL_CONFIG_DIR=/usr/local/etc/ssl; \
+ \
+# Required by the crypto & ssl Erlang/OTP applications
+ wget --progress dot:giga --output-document "$OPENSSL_PATH.tar.gz.asc" "$OPENSSL_SOURCE_URL.asc"; \
+ wget --progress dot:giga --output-document "$OPENSSL_PATH.tar.gz" "$OPENSSL_SOURCE_URL"; \
+ export GNUPGHOME="$(mktemp -d)"; \
+ for key in $OPENSSL_PGP_KEY_IDS; do \
+ gpg --batch --keyserver "$PGP_KEYSERVER" --recv-keys "$key" || true; \
+ done; \
+ gpg --batch --verify "$OPENSSL_PATH.tar.gz.asc" "$OPENSSL_PATH.tar.gz"; \
+ gpgconf --kill all; \
+ rm -rf "$GNUPGHOME"; \
+ echo "$OPENSSL_SOURCE_SHA256 *$OPENSSL_PATH.tar.gz" | sha256sum --check --strict -; \
+ mkdir -p "$OPENSSL_PATH"; \
+ tar --extract --file "$OPENSSL_PATH.tar.gz" --directory "$OPENSSL_PATH" --strip-components 1; \
+ \
+# Configure OpenSSL for compilation
+ cd "$OPENSSL_PATH"; \
+# OpenSSL's "config" script uses a lot of "uname"-based target detection...
+ MACHINE="$(dpkg-architecture --query DEB_BUILD_GNU_CPU)" \
+ RELEASE="4.x.y-z" \
+ SYSTEM='Linux' \
+ BUILD='???' \
+ ./config \
+ --openssldir="$OPENSSL_CONFIG_DIR" \
+# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure /usr/local/lib is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364)
+ -Wl,-rpath=/usr/local/lib \
+ ; \
+# Compile, install OpenSSL, verify that the command-line works & development headers are present
+ make -j "$(getconf _NPROCESSORS_ONLN)"; \
+ make install_sw install_ssldirs; \
+ cd ..; \
+ rm -rf "$OPENSSL_PATH"*; \
+ ldconfig; \
+# use Debian's CA certificates
+ rmdir "$OPENSSL_CONFIG_DIR/certs" "$OPENSSL_CONFIG_DIR/private"; \
+ ln -sf /etc/ssl/certs /etc/ssl/private "$OPENSSL_CONFIG_DIR"; \
+# smoke test
+ openssl version; \
+ \
+ OTP_SOURCE_URL="https://github.com/erlang/otp/archive/OTP-$OTP_VERSION.tar.gz"; \
+ OTP_PATH="/usr/local/src/otp-$OTP_VERSION"; \
+ \
+# Download, verify & extract OTP_SOURCE
+ mkdir -p "$OTP_PATH"; \
+ wget --progress dot:giga --output-document "$OTP_PATH.tar.gz" "$OTP_SOURCE_URL"; \
+ echo "$OTP_SOURCE_SHA256 *$OTP_PATH.tar.gz" | sha256sum --check --strict -; \
+ tar --extract --file "$OTP_PATH.tar.gz" --directory "$OTP_PATH" --strip-components 1; \
+ \
+# Configure Erlang/OTP for compilation, disable unused features & applications
+# http://erlang.org/doc/applications.html
+# ERL_TOP is required for Erlang/OTP makefiles to find the absolute path for the installation
+ cd "$OTP_PATH"; \
+ export ERL_TOP="$OTP_PATH"; \
+ ./otp_build autoconf; \
+ CFLAGS="$(dpkg-buildflags --get CFLAGS)"; export CFLAGS; \
+# add -rpath to avoid conflicts between our OpenSSL's "libssl.so" and the libssl package by making sure /usr/local/lib is searched first (but only for Erlang/OpenSSL to avoid issues with other tools using libssl; https://github.com/docker-library/rabbitmq/issues/364)
+ export CFLAGS="$CFLAGS -Wl,-rpath=/usr/local/lib"; \
+ hostArch="$(dpkg-architecture --query DEB_HOST_GNU_TYPE)"; \
+ buildArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \
+ dpkgArch="$(dpkg --print-architecture)"; dpkgArch="${dpkgArch##*-}"; \
+ ./configure \
+ --host="$hostArch" \
+ --build="$buildArch" \
+ --disable-dynamic-ssl-lib \
+ --disable-hipe \
+ --disable-sctp \
+ --disable-silent-rules \
+ --enable-clock-gettime \
+ --enable-hybrid-heap \
+ --enable-kernel-poll \
+ --enable-shared-zlib \
+ --enable-smp-support \
+ --enable-threads \
+ --with-microstate-accounting=extra \
+ --without-common_test \
+ --without-debugger \
+ --without-dialyzer \
+ --without-diameter \
+ --without-edoc \
+ --without-erl_docgen \
+ --without-erl_interface \
+ --without-et \
+ --without-eunit \
+ --without-ftp \
+ --without-hipe \
+ --without-jinterface \
+ --without-megaco \
+ --without-observer \
+ --without-odbc \
+ --without-reltool \
+ --without-ssh \
+ --without-tftp \
+ --without-wx \
+ ; \
+# Compile & install Erlang/OTP
+ make -j "$(getconf _NPROCESSORS_ONLN)" GEN_OPT_FLGS="-O2 -fno-strict-aliasing"; \
+ make install; \
+ cd ..; \
+ rm -rf \
+ "$OTP_PATH"* \
+ /usr/local/lib/erlang/lib/*/examples \
+ /usr/local/lib/erlang/lib/*/src \
+ ; \
+ \
+# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
+ apt-mark auto '.*' > /dev/null; \
+ [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \
+ find /usr/local -type f -executable -exec ldd '{}' ';' \
+ | awk '/=>/ { print $(NF-1) }' \
+ | sort -u \
+ | xargs -r dpkg-query --search \
+ | cut -d: -f1 \
+ | sort -u \
+ | xargs -r apt-mark manual \
+ ; \
+ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
+ \
+# Check that OpenSSL still works after purging build dependencies
+ openssl version; \
+# Check that Erlang/OTP crypto & ssl were compiled against OpenSSL correctly
+ erl -noshell -eval 'io:format("~p~n~n~p~n~n", [crypto:supports(), ssl:versions()]), init:stop().'
+
+ENV RABBITMQ_DATA_DIR=/var/lib/rabbitmq
+# Create rabbitmq system user & group, fix permissions & allow root user to connect to the RabbitMQ Erlang VM
+RUN set -eux; \
+ groupadd --gid 999 --system rabbitmq; \
+ useradd --uid 999 --system --home-dir "$RABBITMQ_DATA_DIR" --gid rabbitmq rabbitmq; \
+ mkdir -p "$RABBITMQ_DATA_DIR" /etc/rabbitmq /tmp/rabbitmq-ssl /var/log/rabbitmq; \
+ chown -fR rabbitmq:rabbitmq "$RABBITMQ_DATA_DIR" /etc/rabbitmq /tmp/rabbitmq-ssl /var/log/rabbitmq; \
+ chmod 777 "$RABBITMQ_DATA_DIR" /etc/rabbitmq /tmp/rabbitmq-ssl /var/log/rabbitmq; \
+ ln -sf "$RABBITMQ_DATA_DIR/.erlang.cookie" /root/.erlang.cookie
+
+# Use the latest alpha RabbitMQ 3.8 release - https://dl.bintray.com/rabbitmq/all-dev/rabbitmq-server/
+ARG RABBITMQ_VERSION
+ENV RABBITMQ_VERSION=${RABBITMQ_VERSION}
+ARG RABBITMQ_BUILD_NUMBER
+ENV RABBITMQ_BUILD_NUMBER=${RABBITMQ_BUILD_NUMBER}
+# https://www.rabbitmq.com/signatures.html#importing-gpg
+ENV RABBITMQ_PGP_KEY_ID="0x0A9AF2115F4687BD29803A206B73A36E6026DFCA"
+ENV RABBITMQ_HOME=/opt/rabbitmq
+
+# Add RabbitMQ to PATH, send all logs to TTY
+ENV PATH=$RABBITMQ_HOME/sbin:$PATH \
+ RABBITMQ_LOGS=- RABBITMQ_SASL_LOGS=-
+
+# Install RabbitMQ
+RUN set -eux; \
+ \
+ savedAptMark="$(apt-mark showmanual)"; \
+ apt-get update; \
+ apt-get install --yes --no-install-recommends \
+ ca-certificates \
+ gnupg \
+ wget \
+ xz-utils \
+ ; \
+ rm -rf /var/lib/apt/lists/*; \
+ \
+ RABBITMQ_SOURCE_URL="https://s3-eu-west-1.amazonaws.com/server-release-pipeline/v3.9.x/unverified-packages/rabbitmq-server-${RABBITMQ_VERSION}-build-${RABBITMQ_BUILD_NUMBER}-generic-unix-latest-toolchain.tar"; \
+ RABBITMQ_PATH="/usr/local/src/rabbitmq-$RABBITMQ_VERSION"; \
+ \
+ # wget --progress dot:giga --output-document "$RABBITMQ_PATH.tar.xz.asc" "$RABBITMQ_SOURCE_URL.asc"; \
+ wget --progress dot:giga --output-document "$RABBITMQ_PATH.tar" "$RABBITMQ_SOURCE_URL"; \
+ tar --extract --file "$RABBITMQ_PATH.tar"; \
+ \
+ # export GNUPGHOME="$(mktemp -d)"; \
+ # gpg --batch --keyserver "$PGP_KEYSERVER" --recv-keys "$RABBITMQ_PGP_KEY_ID"; \
+ # gpg --batch --verify "$RABBITMQ_PATH.tar.xz.asc" "$RABBITMQ_PATH.tar.xz"; \
+ # gpgconf --kill all; \
+ # rm -rf "$GNUPGHOME"; \
+ \
+ mkdir -p "$RABBITMQ_HOME"; \
+ tar --extract --file "rabbitmq-server-generic-unix-latest-toolchain-${RABBITMQ_VERSION}.tar.xz" --directory "$RABBITMQ_HOME" --strip-components 1; \
+ rm -rf "$RABBITMQ_PATH"* rabbitmq-server-generic-unix*; \
+# Do not default SYS_PREFIX to RABBITMQ_HOME, leave it empty
+ grep -qE '^SYS_PREFIX=\$\{RABBITMQ_HOME\}$' "$RABBITMQ_HOME/sbin/rabbitmq-defaults"; \
+ sed -i 's/^SYS_PREFIX=.*$/SYS_PREFIX=/' "$RABBITMQ_HOME/sbin/rabbitmq-defaults"; \
+ grep -qE '^SYS_PREFIX=$' "$RABBITMQ_HOME/sbin/rabbitmq-defaults"; \
+ chown -R rabbitmq:rabbitmq "$RABBITMQ_HOME"; \
+ \
+ apt-mark auto '.*' > /dev/null; \
+ apt-mark manual $savedAptMark; \
+ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
+ \
+# verify assumption of no stale cookies
+ [ ! -e "$RABBITMQ_DATA_DIR/.erlang.cookie" ]; \
+# Ensure RabbitMQ was installed correctly by running a few commands that do not depend on a running server, as the rabbitmq user
+# If they all succeed, it's safe to assume that things have been set up correctly
+ gosu rabbitmq rabbitmqctl help; \
+ gosu rabbitmq rabbitmqctl list_ciphers; \
+ gosu rabbitmq rabbitmq-plugins list; \
+# no stale cookies
+ rm "$RABBITMQ_DATA_DIR/.erlang.cookie"
+
+# Added for backwards compatibility - users can simply COPY custom plugins to /plugins
+RUN ln -sf /opt/rabbitmq/plugins /plugins
+
+# set home so that any `--user` knows where to put the erlang cookie
+ENV HOME $RABBITMQ_DATA_DIR
+# Hint that the data (a.k.a. home dir) dir should be separate volume
+VOLUME $RABBITMQ_DATA_DIR
+
+# warning: the VM is running with native name encoding of latin1 which may cause Elixir to malfunction as it expects utf8. Please ensure your locale is set to UTF-8 (which can be verified by running "locale" in your shell)
+# Setting all environment variables that control language preferences, behaviour differs - https://www.gnu.org/software/gettext/manual/html_node/The-LANGUAGE-variable.html#The-LANGUAGE-variable
+# https://docs.docker.com/samples/library/ubuntu/#locales
+ENV LANG=C.UTF-8 LANGUAGE=C.UTF-8 LC_ALL=C.UTF-8
+
+COPY docker/docker-entrypoint.sh /usr/local/bin/
+ENTRYPOINT ["docker-entrypoint.sh"]
+
+EXPOSE 4369 5671 5672 25672
+CMD ["rabbitmq-server"]
+
+# rabbitmq_management
+RUN rabbitmq-plugins enable --offline rabbitmq_management && \
+ rabbitmq-plugins is_enabled rabbitmq_management --offline
+# extract "rabbitmqadmin" from inside the "rabbitmq_management-X.Y.Z.ez" plugin zipfile
+# see https://github.com/docker-library/rabbitmq/issues/207
+RUN set -eux; \
+ erl -noinput -eval ' \
+ { ok, AdminBin } = zip:foldl(fun(FileInArchive, GetInfo, GetBin, Acc) -> \
+ case Acc of \
+ "" -> \
+ case lists:suffix("/rabbitmqadmin", FileInArchive) of \
+ true -> GetBin(); \
+ false -> Acc \
+ end; \
+ _ -> Acc \
+ end \
+ end, "", init:get_plain_arguments()), \
+ io:format("~s", [ AdminBin ]), \
+ init:stop(). \
+ ' -- /plugins/rabbitmq_management-*.ez > /usr/local/bin/rabbitmqadmin; \
+ [ -s /usr/local/bin/rabbitmqadmin ]; \
+ chmod +x /usr/local/bin/rabbitmqadmin; \
+ apt-get update; apt-get install -y --no-install-recommends python3; rm -rf /var/lib/apt/lists/*; \
+ rabbitmqadmin --version
+EXPOSE 15671 15672
+
+# rabbitmq_top
+RUN rabbitmq-plugins enable --offline rabbitmq_top && \
+ rabbitmq-plugins is_enabled rabbitmq_top --offline
+
+# rabbitmq_prometheus
+RUN rm /plugins/prometheus*.ez
+COPY plugins/prometheus*.ez /plugins/
+RUN rm /plugins/rabbitmq_prometheus*.ez
+COPY plugins/rabbitmq_prometheus*.ez /plugins/
+
+ARG RABBITMQ_PROMETHEUS_VERSION
+RUN chmod --recursive --verbose a+r /plugins/*.ez && \
+ chown --recursive --verbose rabbitmq:rabbitmq /plugins && \
+ rabbitmq-plugins enable --offline rabbitmq_prometheus && \
+ rabbitmq-plugins is_enabled rabbitmq_prometheus --offline && \
+ rabbitmq-plugins list | grep "rabbitmq_prometheus.*${RABBITMQ_PROMETHEUS_VERSION}"
+EXPOSE 15692
diff --git a/deps/rabbitmq_prometheus/LICENSE b/deps/rabbitmq_prometheus/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_prometheus/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_prometheus/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_prometheus/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_prometheus/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_prometheus/Makefile b/deps/rabbitmq_prometheus/Makefile
new file mode 100644
index 0000000000..178429c7ae
--- /dev/null
+++ b/deps/rabbitmq_prometheus/Makefile
@@ -0,0 +1,245 @@
+TODAY := $(shell date -u +'%Y.%m.%d')
+# Use the latest alpha RabbitMQ 3.9 release - https://ci.rabbitmq.com/teams/main/pipelines/server-release:v3.9.x/jobs/build-test-package-generic-unix-latest-toolchain
+BASED_ON_RABBITMQ_VERSION := 3.9.0-alpha.349
+DOCKER_IMAGE_NAME := pivotalrabbitmq/rabbitmq-prometheus
+DOCKER_IMAGE_VERSION := $(BASED_ON_RABBITMQ_VERSION)-$(TODAY)
+# RABBITMQ_VERSION is used in rabbitmq-components.mk to set PROJECT_VERSION
+RABBITMQ_VERSION ?= $(DOCKER_IMAGE_VERSION)
+# This is taken from the CI job above
+RABBITMQ_BUILD_NUMBER := 375
+# make find-latest-otp
+OTP_VERSION := 23.0.2
+OTP_SHA256 := 6bab92d1a1b20cc319cd845c23db3611cc99f8c99a610d117578262e3c108af3
+
+define PROJECT_ENV
+[
+ {return_per_object_metrics, false}
+]
+endef
+
+PROJECT := rabbitmq_prometheus
+PROJECT_MOD := rabbit_prometheus_app
+DEPS = rabbit rabbitmq_management_agent prometheus rabbitmq_web_dispatch
+# Deps that are not applications
+# rabbitmq_management is added so that we build a custom version, for the Docker image
+BUILD_DEPS = accept amqp_client rabbit_common rabbitmq_management
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers eunit_formatters
+
+EUNIT_OPTS = no_tty, {report, {eunit_progress, [colored, profile]}}
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+ifneq ($(DISABLE_METRICS_COLLECTOR),)
+BUILD_DEPS = accept amqp_client rabbit_common
+RABBITMQ_CONFIG_FILE = $(CURDIR)/rabbitmq-disable-metrics-collector.conf
+export RABBITMQ_CONFIG_FILE
+endif
+
+include rabbitmq-components.mk
+include erlang.mk
+
+define MAKE_TARGETS
+ awk -F: '/^[^\.%\t][a-zA-Z\._\-]*:+.*$$/ { printf "%s\n", $$1 }' $(MAKEFILE_LIST)
+endef
+define BASH_AUTOCOMPLETE
+ complete -W \"$$($(MAKE_TARGETS) | sort | uniq)\" make gmake m
+endef
+.PHONY: autocomplete
+autocomplete: ## ac | Configure shell for autocompletion - eval "$(gmake autocomplete)"
+ @echo "$(BASH_AUTOCOMPLETE)"
+.PHONY: ac
+ac: autocomplete
+# Continuous Feedback for the ac target - run in a separate pane while iterating on it
+.PHONY: CFac
+CFac:
+ @watch -c $(MAKE) ac
+
+.PHONY: clean-docker
+clean-docker: ## cd | Clean all Docker containers & volumes
+ @docker system prune -f && \
+ docker volume prune -f
+.PHONY: cd
+cd: clean-docker
+
+.PHONY: readme
+readme: ## r | Preview README & live reload on edit
+ @docker run --interactive --tty --rm --name changelog_md \
+ --volume $(CURDIR):/data \
+ --volume $(HOME)/.grip:/.grip \
+ --expose 5000 --publish 5000:5000 \
+ mbentley/grip --context=. 0.0.0.0:5000
+.PHONY: pre
+pre: preview-readme
+
+define CTOP_CONTAINER
+docker pull quay.io/vektorlab/ctop:latest && \
+docker run --rm --interactive --tty \
+ --cpus 0.5 --memory 128M \
+ --volume /var/run/docker.sock:/var/run/docker.sock \
+ --name ctop_$(USER) \
+ quay.io/vektorlab/ctop:latest
+endef
+.PHONY: ctop
+ctop: ## c | Interact with all containers via a top-like utility
+ @$(CTOP_CONTAINER)
+.PHONY: c
+c: ctop
+
+.PHONY: dockerhub-login
+dockerhub-login: ## dl | Login to Docker Hub as pivotalrabbitmq
+ @echo "$$(lpass show --password 7672183166535202820)" | \
+ docker login --username pivotalrabbitmq --password-stdin
+.PHONY: dl
+dl: dockerhub-login
+
+.PHONY: docker-image
+docker-image: docker-image-build docker-image-push ## di | Build & push Docker image to Docker Hub
+.PHONY: di
+di: docker-image
+
+.PHONY: docker-image-build
+docker-image-build: ## dib | Build Docker image locally - make tests
+ @docker build --pull \
+ --build-arg PGP_KEYSERVER=pgpkeys.uk \
+ --build-arg OTP_VERSION=$(OTP_VERSION) \
+ --build-arg OTP_SHA256=$(OTP_SHA256) \
+ --build-arg RABBITMQ_VERSION=$(BASED_ON_RABBITMQ_VERSION) \
+ --build-arg RABBITMQ_BUILD_NUMBER=$(RABBITMQ_BUILD_NUMBER) \
+ --build-arg RABBITMQ_PROMETHEUS_VERSION=$(RABBITMQ_VERSION) \
+ --tag $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_VERSION) \
+ --tag $(DOCKER_IMAGE_NAME):latest .
+.PHONY: dib
+dib: docker-image-build
+
+.PHONY: docker-image-bump
+docker-image-bump: ## diu | Bump Docker image version across all docker-compose-* files
+ @sed -i '' \
+ -e 's|$(DOCKER_IMAGE_NAME):.*|$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_VERSION)|g' \
+ -e 's|pivotalrabbitmq/perf-test:.*|pivotalrabbitmq/perf-test:2.11.0-ubuntu|g' \
+ docker/docker-compose-{overview,dist-tls,qq}.yml
+.PHONY: diu
+diu: docker-image-bump
+
+.PHONY: docker-image-push
+docker-image-push: ## dip | Push local Docker image to Docker Hub
+ @docker push $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_VERSION) && \
+ docker push $(DOCKER_IMAGE_NAME):latest
+.PHONY: dip
+dip: docker-image-push
+
+.PHONY: docker-image-run
+docker-image-run: ## dir | Run container with local Docker image
+ @docker run --interactive --tty \
+ --publish=5672:5672 \
+ --publish=15672:15672 \
+ --publish=15692:15692 \
+ $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_VERSION)
+.PHONY: dir
+dir: docker-image-run
+
+RUN ?= up --detach && docker-compose --file $(@F) logs --follow
+DOCKER_COMPOSE_FILES := $(wildcard docker/docker-compose-*.yml)
+.PHONY: $(DOCKER_COMPOSE_FILES)
+$(DOCKER_COMPOSE_FILES):
+ @cd docker && \
+ docker-compose --file $(@F) $(RUN) ; \
+ true
+.PHONY: down
+down: RUN = down
+down: $(DOCKER_COMPOSE_FILES) ## d | Stop all containers
+.PHONY: d
+d: down
+
+JQ := /usr/local/bin/jq
+$(JQ):
+ @brew install jq
+
+OTP_CURRENT_STABLE_MAJOR := 23
+define LATEST_STABLE_OTP_VERSION
+curl --silent --fail https://api.github.com/repos/erlang/otp/git/refs/tags | \
+ $(JQ) -r '.[].ref | sub("refs/tags/OTP.{1}";"") | match("^$(OTP_CURRENT_STABLE_MAJOR)[0-9.]+$$") | .string' | \
+ tail -n 1
+endef
+.PHONY: find-latest-otp
+find-latest-otp: $(JQ) ## flo | Find latest OTP version archive + sha1
+ @printf "Version: " && \
+ export VERSION="$$($(LATEST_STABLE_OTP_VERSION))" && \
+ echo "$$VERSION" && \
+ printf "Checksum: " && \
+ wget --continue --quiet --output-document="/tmp/OTP-$$VERSION.tar.gz" "https://github.com/erlang/otp/archive/OTP-$$VERSION.tar.gz" && \
+ shasum -a 256 "/tmp/OTP-$$VERSION.tar.gz"
+.PHONY: flo
+flo: find-latest-otp
+
+# Defined as explicit, individual targets so that autocompletion works
+define DOCKER_COMPOSE_UP
+cd docker && \
+docker-compose --file docker-compose-$(@F).yml up --detach
+endef
+.PHONY: metrics
+metrics: ## m | Run all metrics containers: Grafana, Prometheus & friends
+ @$(DOCKER_COMPOSE_UP)
+.PHONY: m
+m: metrics
+.PHONY: overview
+overview: ## o | Make RabbitMQ Overview panels come alive
+ @$(DOCKER_COMPOSE_UP)
+.PHONY: o
+o: overview
+.PHONY: dist-tls
+dist-tls: ## dt | Make Erlang-Distribution panels come alive - HIGH LOAD
+ @$(DOCKER_COMPOSE_UP)
+.PHONY: dt
+dt: dist-tls
+.PHONY: qq
+qq: ## q | Make RabbitMQ-Quorum-Queues-Raft panels come alive - HIGH LOAD
+ @$(DOCKER_COMPOSE_UP)
+.PHONY: q
+q: qq
+
+.PHONY: h
+h:
+ @awk -F"[:#]" '/^[^\.][a-zA-Z\._\-]+:+.+##.+$$/ { printf "\033[36m%-24s\033[0m %s\n", $$1, $$4 }' $(MAKEFILE_LIST) \
+ | sort
+# Continuous Feedback for the h target - run in a separate pane while iterating on it
+.PHONY: CFh
+CFh:
+ @watch -c $(MAKE) h
+
+# Defined as explicit, individual targets so that autocompletion works
+DASHBOARDS_TO_PATH := $(CURDIR)/docker/grafana/dashboards
+define GENERATE_DASHBOARD
+cd $(DASHBOARDS_TO_PATH) \
+&& $(JQ) --slurp add $(@F) ../__inputs.json \
+| $(JQ) '.templating.list[].datasource = "$${DS_PROMETHEUS}"' \
+| $(JQ) '.panels[].datasource = "$${DS_PROMETHEUS}"'
+endef
+.PHONY: Erlang-Distribution.json
+Erlang-Distribution.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+.PHONY: Erlang-Memory-Allocators.json
+Erlang-Memory-Allocators.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+.PHONY: Erlang-Distributions-Compare.json
+Erlang-Distributions-Compare.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+.PHONY: RabbitMQ-Overview.json
+RabbitMQ-Overview.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+.PHONY: RabbitMQ-PerfTest.json
+RabbitMQ-PerfTest.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+.PHONY: RabbitMQ-Quorum-Queues-Raft.json
+RabbitMQ-Quorum-Queues-Raft.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+.PHONY: rabbitmq-exporter_vs_rabbitmq-prometheus.json
+rabbitmq-exporter_vs_rabbitmq-prometheus.json: $(JQ)
+ @$(GENERATE_DASHBOARD)
+
diff --git a/deps/rabbitmq_prometheus/README.md b/deps/rabbitmq_prometheus/README.md
new file mode 100644
index 0000000000..5aeb625ca3
--- /dev/null
+++ b/deps/rabbitmq_prometheus/README.md
@@ -0,0 +1,104 @@
+[![Build](https://img.shields.io/github/workflow/status/rabbitmq/rabbitmq-prometheus/Test)](https://github.com/rabbitmq/rabbitmq-prometheus/actions?query=workflow%3ATest)
+[![Grafana Dashboards](https://img.shields.io/badge/Grafana-6%20dashboards-blue)](https://grafana.com/orgs/rabbitmq)
+
+# Prometheus Exporter of Core RabbitMQ Metrics
+
+## Getting Started
+
+This is a Prometheus exporter of core RabbitMQ metrics, developed by the RabbitMQ core team.
+It is largely a "clean room" design that reuses some prior work from Prometheus exporters done by the community.
+
+## Project Maturity
+
+This plugin is new as of RabbitMQ `3.8.0`.
+
+## Documentation
+
+See [Monitoring RabbitMQ with Prometheus and Grafana](https://www.rabbitmq.com/prometheus.html).
+
+
+## Installation
+
+This plugin is included into RabbitMQ 3.8.x releases. Like all [plugins](https://www.rabbitmq.com/plugins.html), it has to be
+[enabled](https://www.rabbitmq.com/plugins.html#ways-to-enable-plugins) before it can be used:
+
+To enable it with [rabbitmq-plugins](http://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html):
+
+``` shell
+rabbitmq-plugins enable rabbitmq_prometheus
+```
+
+## Usage
+
+See the [documentation guide](https://www.rabbitmq.com/prometheus.html).
+
+Default port used by the plugin is `15692` and the endpoint path is at `/metrics`.
+To try it with `curl`:
+
+```shell
+curl -v -H "Accept:text/plain" "http://localhost:15692/metrics"
+```
+
+In most environments there would be no configuration necessary.
+
+See the entire list of [metrics](metrics.md) exposed via the default port.
+
+
+## Configuration
+
+This exporter supports the following options via a set of `prometheus.*` configuration keys:
+
+ * `prometheus.return_per_object_metrics` returns [individual (per object) metrics that are not aggregated](https://www.rabbitmq.com/prometheus.html#metric-aggregation) (default is `false`).
+ * `prometheus.path` defines a scrape endpoint (default is `"/metrics"`).
+ * `prometheus.tcp.*` controls HTTP listener settings that match [those used by the RabbitMQ HTTP API](https://www.rabbitmq.com/management.html#configuration)
+ * `prometheus.ssl.*` controls TLS (HTTPS) listener settings that match [those used by the RabbitMQ HTTP API](https://www.rabbitmq.com/management.html#single-listener-https)
+
+Sample configuration snippet:
+
+```ini
+# these values are defaults
+prometheus.return_per_object_metrics = false
+prometheus.path = /metrics
+prometheus.tcp.port = 15692
+```
+
+When metrics are returned per object, nodes with 80k queues have been measured to take 58 seconds to return 1.9 million metrics in a 98MB response payload.
+In order to not put unnecessary pressure on your metrics system, metrics are aggregated by default.
+
+When debugging, it may be useful to return metrics per object (unaggregated).
+This can be enabled on-the-fly, without restarting or configuring RabbitMQ, using the following command:
+
+```
+rabbitmqctl eval 'application:set_env(rabbitmq_prometheus, return_per_object_metrics, true).'
+```
+
+To go back to aggregated metrics on-the-fly, run the following command:
+
+```
+rabbitmqctl eval 'application:set_env(rabbitmq_prometheus, return_per_object_metrics, false).'
+```
+
+
+## Contributing
+
+See [CONTRIBUTING.md](https://github.com/rabbitmq/rabbitmq-prometheus/blob/master/CONTRIBUTING.md).
+
+
+## Makefile
+
+This project uses [erlang.mk](https://erlang.mk/), running `make help` will return erlang.mk help.
+
+To see all custom targets that have been documented, run `make h`.
+
+For Bash shell autocompletion, run `eval "$(make autocomplete)"`, then type `make a<TAB>` to see all Make targets starting with the letter `a`, e.g.:
+
+```sh
+$ make a<TAB
+ac all.coverdata app-build apps apps-eunit asciidoc-guide autocomplete
+all app app-c_src apps-ct asciidoc asciidoc-manual
+```
+
+
+## Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_prometheus/docker/docker-compose-dist-tls.yml b/deps/rabbitmq_prometheus/docker/docker-compose-dist-tls.yml
new file mode 100644
index 0000000000..241de1c919
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/docker-compose-dist-tls.yml
@@ -0,0 +1,86 @@
+# https://docs.docker.com/compose/compose-file/
+version: "3.6"
+
+# https://docs.docker.com/compose/compose-file/#networks
+networks:
+ rabbitmq-prometheus:
+
+# https://docs.docker.com/compose/compose-file/#volumes
+volumes:
+ rabbitmq-prometheus_prometheus:
+ rabbitmq-prometheus_grafana:
+
+services:
+ rmq0-dist-tls: &rabbitmq
+ # https://hub.docker.com/r/pivotalrabbitmq/rabbitmq-prometheus/tags
+ image: pivotalrabbitmq/rabbitmq-prometheus:3.9.0-alpha.349-2020.06.18
+ networks:
+ - "rabbitmq-prometheus"
+ ports:
+ - "5676:5672"
+ - "15676:15672"
+ - "15696:15692"
+ # https://unix.stackexchange.com/questions/71940/killing-tcp-connection-in-linux
+ # https://en.wikipedia.org/wiki/Tcpkill
+ # https://www.digitalocean.com/community/tutorials/iptables-essentials-common-firewall-rules-and-commands#block-an-ip-address
+ cap_add:
+ - ALL
+ hostname: rmq0-dist-tls
+ environment:
+ RABBITMQ_ERLANG_COOKIE: rabbitmq-prometheus
+ # Uncomment the following line if you want debug logs & colour
+ # RABBITMQ_LOG: debug,+color
+ volumes:
+ # This does not work that well on Windows
+ # https://github.com/rabbitmq/rabbitmq-prometheus/commit/c4b04ea9bae877ff7d22a7085475965016933d91#commitcomment-40660523
+ # - ./erlang.cookie:/var/lib/rabbitmq/.erlang.cookie
+ - ./rabbitmq-dist-tls.conf:/etc/rabbitmq/rabbitmq.conf:ro
+ - ./rabbitmq-env.conf:/etc/rabbitmq/rabbitmq-env.conf:ro
+ - ./rabbitmq-ssl_dist.config:/etc/rabbitmq/ssl_dist.config:ro
+ - ./rabbitmq-dist-tls-definitions.json:/etc/rabbitmq/rabbitmq-definitions.json:ro
+ - ./rabbitmq-ssl:/etc/rabbitmq/ssl:ro
+ # we want to simulate hitting thresholds
+ ulimits:
+ nofile:
+ soft: "2000"
+ hard: "2000"
+ rmq1-dist-tls:
+ << : *rabbitmq
+ hostname: rmq1-dist-tls
+ ports:
+ - "5677:5672"
+ - "15677:15672"
+ - "15697:15692"
+ rmq2-dist-tls:
+ << : *rabbitmq
+ hostname: rmq2-dist-tls
+ ports:
+ - "5678:5672"
+ - "15678:15672"
+ - "15698:15692"
+ stress-dist-tls:
+ # https://hub.docker.com/r/pivotalrabbitmq/perf-test/tags
+ image: &perf-test-image pivotalrabbitmq/perf-test:2.11.0-ubuntu
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq0-dist-tls:5672/%2f"
+ QUEUE_PATTERN: "ha3-stress_dist-%d"
+ QUEUE_PATTERN_FROM: 1
+ QUEUE_PATTERN_TO: 10
+ PRODUCERS: 10
+ CONSUMERS: 10
+ ROUTING_KEY: max1
+ SIZE: 512000
+ QOS: 100
+ AUTOACK: "false"
+ VARIABLE_RATE: "1:30,20:30,40:30"
+ SERVERS_STARTUP_TIMEOUT: &startup_timeout 30
+ METRICS_PROMETHEUS: "true"
+ rabbitmq-exporter:
+ # https://hub.docker.com/r/kbudde/rabbitmq-exporter/tags
+ image: kbudde/rabbitmq-exporter:v0.29.0
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ RABBIT_URL: "http://rmq0-dist-tls:15672"
diff --git a/deps/rabbitmq_prometheus/docker/docker-compose-metrics.yml b/deps/rabbitmq_prometheus/docker/docker-compose-metrics.yml
new file mode 100644
index 0000000000..c977821f8d
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/docker-compose-metrics.yml
@@ -0,0 +1,73 @@
+# https://docs.docker.com/compose/compose-file/
+version: "3.6"
+
+# https://docs.docker.com/compose/compose-file/#networks
+networks:
+ rabbitmq-prometheus:
+
+# https://docs.docker.com/compose/compose-file/#volumes
+volumes:
+ rabbitmq-prometheus_prometheus:
+ rabbitmq-prometheus_grafana:
+
+services:
+ grafana:
+ # https://hub.docker.com/r/grafana/grafana/tags
+ image: grafana/grafana:7.3.2
+ ports:
+ - "3000:3000"
+ networks:
+ - "rabbitmq-prometheus"
+ volumes:
+ - rabbitmq-prometheus_grafana:/var/lib/grafana
+ - ./grafana/dashboards.yml:/etc/grafana/provisioning/dashboards/rabbitmq.yaml
+ - ./grafana/datasources.yml:/etc/grafana/provisioning/datasources/prometheus.yaml
+ - ./grafana/dashboards:/dashboards
+ environment:
+ # https://grafana.com/plugins/flant-statusmap-panel
+ # https://grafana.com/plugins/grafana-piechart-panel
+ # https://grafana.com/plugins/grafana-polystat-panel
+ # https://grafana.com/plugins/jdbranham-diagram-panel
+ # https://grafana.com/plugins/michaeldmoore-multistat-panel
+ # https://grafana.com/plugins/vonage-status-panel
+ # https://grafana.com/plugins/yesoreyeram-boomtable-panel
+ GF_INSTALL_PLUGINS: "flant-statusmap-panel,grafana-piechart-panel"
+ prometheus:
+ # https://hub.docker.com/r/prom/prometheus/tags
+ image: prom/prometheus:v2.22.1
+ networks:
+ - "rabbitmq-prometheus"
+ ports:
+ - "9090:9090"
+ volumes:
+ - rabbitmq-prometheus_prometheus:/prometheus
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml
+ node-exporter:
+ command:
+ - '--path.procfs=/host/proc'
+ - '--path.rootfs=/rootfs'
+ - '--path.sysfs=/host/sys'
+ - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
+ expose:
+ - 9100
+ # https://hub.docker.com/r/prom/node-exporter/tags
+ image: prom/node-exporter:v1.0.1
+ networks:
+ - "rabbitmq-prometheus"
+ volumes:
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /:/rootfs:ro
+ cadvisor:
+ expose:
+ - 8080
+ # https://hub.docker.com/r/google/cadvisor/tags
+ image: google/cadvisor:v0.33.0
+ networks:
+ - "rabbitmq-prometheus"
+ volumes:
+ - /:/rootfs:ro
+ - /var/run:/var/run:rw
+ - /sys:/sys:ro
+ - /var/lib/docker/:/var/lib/docker:ro
+ #- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
diff --git a/deps/rabbitmq_prometheus/docker/docker-compose-overview.yml b/deps/rabbitmq_prometheus/docker/docker-compose-overview.yml
new file mode 100644
index 0000000000..92f039b30a
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/docker-compose-overview.yml
@@ -0,0 +1,187 @@
+# https://docs.docker.com/compose/compose-file/
+version: "3.6"
+
+# https://docs.docker.com/compose/compose-file/#networks
+networks:
+ rabbitmq-prometheus:
+
+# https://docs.docker.com/compose/compose-file/#volumes
+volumes:
+ rabbitmq-prometheus_prometheus:
+ rabbitmq-prometheus_grafana:
+
+services:
+ rmq0: &rabbitmq
+ # https://hub.docker.com/r/pivotalrabbitmq/rabbitmq-prometheus/tags
+ image: pivotalrabbitmq/rabbitmq-prometheus:3.9.0-alpha.349-2020.06.18
+ networks:
+ - "rabbitmq-prometheus"
+ ports:
+ - "5673:5672"
+ - "15673:15672"
+ - "15693:15692"
+ # https://unix.stackexchange.com/questions/71940/killing-tcp-connection-in-linux
+ # https://en.wikipedia.org/wiki/Tcpkill
+ # https://www.digitalocean.com/community/tutorials/iptables-essentials-common-firewall-rules-and-commands#block-an-ip-address
+ cap_add:
+ - ALL
+ hostname: rmq0
+ environment:
+ RABBITMQ_ERLANG_COOKIE: rabbitmq-prometheus
+ # Uncomment the following line if you want debug logs & colour
+ # RABBITMQ_LOG: debug,+color
+ volumes:
+ # This does not work that well on Windows
+ # https://github.com/rabbitmq/rabbitmq-prometheus/commit/c4b04ea9bae877ff7d22a7085475965016933d91#commitcomment-40660523
+ # - ./erlang.cookie:/var/lib/rabbitmq/.erlang.cookie
+ - ./rabbitmq-overview.conf:/etc/rabbitmq/rabbitmq.conf:ro
+ - ./rabbitmq-overview-definitions.json:/etc/rabbitmq/rabbitmq-definitions.json:ro
+ # we want to simulate hitting thresholds
+ ulimits:
+ nofile:
+ soft: "2000"
+ hard: "2000"
+ rmq1:
+ << : *rabbitmq
+ hostname: rmq1
+ ports:
+ - "5674:5672"
+ - "15674:15672"
+ - "15694:15692"
+ rmq2:
+ << : *rabbitmq
+ hostname: rmq2
+ ports:
+ - "5675:5672"
+ - "15675:15672"
+ - "15695:15692"
+
+ basic-get:
+ # https://hub.docker.com/r/pivotalrabbitmq/perf-test/tags
+ image: &perf-test-image pivotalrabbitmq/perf-test:2.11.0-ubuntu
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq0:5672/%2f"
+ QUEUE: basic-get
+ ROUTING_KEY: basic-get
+ VARIABLE_RATE: "1:1,0:30"
+ POLLING: "true"
+ POLLING_INTERVAL: 5000
+ AUTOACK: "false"
+ SERVERS_STARTUP_TIMEOUT: &startup_timeout 60
+ METRICS_PROMETHEUS: "true"
+ basic-get-auto:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq0:5672/%2f"
+ QUEUE: basic-get
+ ROUTING_KEY: basic-get
+ PRODUCERS: 0
+ POLLING: "true"
+ POLLING_INTERVAL: 5000
+ AUTOACK: "true"
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ greedy-consumer:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq0:5672/%2f"
+ QUEUE: greedy-consumer
+ ROUTING_KEY: greedy-consumer
+ VARIABLE_RATE: "100:20,0:20"
+ CONSUMER_RATE: 50
+ QOS: 2000
+ AUTOACK: "false"
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ publisher-confirms:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq1:5672/%2f"
+ QUEUE: publisher-confirms
+ ROUTING_KEY: publisher-confirms
+ AUTOACK: "true"
+ VARIABLE_RATE: "12:30,25:30,50:30,100:30"
+ CONFIRM: 1
+ CONFIRM_TIMEOUT: 1
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ slow-consumer-persistent:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq1:5672/%2f"
+ QUEUE: ha3-slow-consumer-persistent
+ ROUTING_KEY: slow-consumer-persistent
+ QUEUE_ARGS: x-max-length=10000
+ FLAG: persistent
+ AUTO_DELETE: "false"
+ SIZE: 51200
+ VARIABLE_RATE: "100:20,0:20"
+ CONSUMER_RATE: 50
+ QOS: 50
+ AUTOACK: "false"
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ nack:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ entrypoint: []
+ command: /bin/bash -c "while true; do bin/runjava com.rabbitmq.perf.PerfTest; sleep 10; done"
+ environment:
+ TIME: 60
+ URI: "amqp://guest:guest@rmq2:5672/%2f"
+ VARIABLE_RATE: "1:10,0:20"
+ QUEUE: nack
+ QUEUE_ARGS: x-max-length=100
+ ROUTING_KEY: nack
+ AUTOACK: "false"
+ NACK: "true"
+ QOS: 5
+ CONSUMER_LATENCY: 3000000
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ unroutable-return:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq2:5672/%2f"
+ VARIABLE_RATE: "2:30,4:30,10:30"
+ VARIABLE_SIZE: "100:30,200:30"
+ CONSUMERS: 0
+ FLAG: mandatory
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ unroutable-drop:
+ image: *perf-test-image
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URI: "amqp://guest:guest@rmq2:5672/%2f"
+ VARIABLE_RATE: "5:30,10:30,20:30"
+ VARIABLE_SIZE: "100:30,200:30"
+ CONSUMERS: 0
+ SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ METRICS_PROMETHEUS: "true"
+ # many-queues:
+ # image: *perf-test-image
+ # networks:
+ # - "rabbitmq-prometheus"
+ # environment:
+ # URI: "amqp://guest:guest@rmq1:5672/%2f"
+ # QUEUE_PATTERN: "mq%d"
+ # QUEUE_PATTERN_FROM: 1
+ # QUEUE_PATTERN_TO: 1000
+ # RATE: 1
+ # SERVERS_STARTUP_TIMEOUT: *startup_timeout
+ # METRICS_PROMETHEUS: "true"
diff --git a/deps/rabbitmq_prometheus/docker/docker-compose-qq.yml b/deps/rabbitmq_prometheus/docker/docker-compose-qq.yml
new file mode 100644
index 0000000000..846fd1ba64
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/docker-compose-qq.yml
@@ -0,0 +1,72 @@
+# https://docs.docker.com/compose/compose-file/
+version: "3.6"
+
+# https://docs.docker.com/compose/compose-file/#networks
+networks:
+ rabbitmq-prometheus:
+
+# https://docs.docker.com/compose/compose-file/#volumes
+volumes:
+ rabbitmq-prometheus_prometheus:
+ rabbitmq-prometheus_grafana:
+
+services:
+ rmq0-qq: &rabbitmq
+ # https://hub.docker.com/r/pivotalrabbitmq/rabbitmq-prometheus/tags
+ image: pivotalrabbitmq/rabbitmq-prometheus:3.9.0-alpha.349-2020.06.18
+ networks:
+ - "rabbitmq-prometheus"
+ ports:
+ - "5679:5672"
+ - "15679:15672"
+ - "15699:15692"
+ # https://unix.stackexchange.com/questions/71940/killing-tcp-connection-in-linux
+ # https://en.wikipedia.org/wiki/Tcpkill
+ # https://www.digitalocean.com/community/tutorials/iptables-essentials-common-firewall-rules-and-commands#block-an-ip-address
+ cap_add:
+ - ALL
+ hostname: rmq0-qq
+ environment:
+ RABBITMQ_ERLANG_COOKIE: rabbitmq-prometheus
+ # Uncomment the following line if you want debug logs & colour
+ # RABBITMQ_LOG: debug,+color
+ volumes:
+ # This does not work that well on Windows
+ # https://github.com/rabbitmq/rabbitmq-prometheus/commit/c4b04ea9bae877ff7d22a7085475965016933d91#commitcomment-40660523
+ # - ./erlang.cookie:/var/lib/rabbitmq/.erlang.cookie
+ - ./rabbitmq-qq.conf:/etc/rabbitmq/rabbitmq.conf:ro
+ - ./rabbitmq-qq-env.conf:/etc/rabbitmq/rabbitmq-env.conf:ro
+ - ./rabbitmq-qq-definitions.json:/etc/rabbitmq/rabbitmq-definitions.json:ro
+ rmq1-qq:
+ << : *rabbitmq
+ hostname: rmq1-qq
+ ports:
+ - "5680:5672"
+ - "15680:15672"
+ - "15700:15692"
+ rmq2-qq:
+ << : *rabbitmq
+ hostname: rmq2-qq
+ ports:
+ - "5681:5672"
+ - "15681:15672"
+ - "15701:15692"
+ qq-moderate-load:
+ image: &perf-test-image pivotalrabbitmq/perf-test:2.11.0-ubuntu
+ networks:
+ - "rabbitmq-prometheus"
+ environment:
+ URIS: "amqp://guest:guest@rmq0-qq:5672/%2f,amqp://guest:guest@rmq1-qq:5672/%2f,amqp://guest:guest@rmq2-qq:5672/%2f"
+ CONFIRM: 50
+ QUEUE_PATTERN: "qq%d"
+ QUEUE_PATTERN_FROM: 1
+ QUEUE_PATTERN_TO: 10
+ PRODUCERS: 10
+ CONSUMERS: 10
+ QUEUE_ARGS: x-queue-type=quorum,x-max-length=1000
+ FLAG: persistent
+ AUTO_DELETE: "false"
+ RATE: 50
+ AUTOACK: "false"
+ SERVERS_STARTUP_TIMEOUT: &startup_timeout 30
+ METRICS_PROMETHEUS: "true"
diff --git a/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh b/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh
new file mode 100755
index 0000000000..08cd3bcf12
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh
@@ -0,0 +1,404 @@
+#!/bin/bash
+set -eu
+
+# usage: file_env VAR [DEFAULT]
+# ie: file_env 'XYZ_DB_PASSWORD' 'example'
+# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
+# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
+file_env() {
+ local var="$1"
+ local fileVar="${var}_FILE"
+ local def="${2:-}"
+ if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
+ echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
+ exit 1
+ fi
+ local val="$def"
+ if [ "${!var:-}" ]; then
+ val="${!var}"
+ elif [ "${!fileVar:-}" ]; then
+ val="$(< "${!fileVar}")"
+ fi
+ export "$var"="$val"
+ unset "$fileVar"
+}
+
+# backwards compatibility for old environment variables
+: "${RABBITMQ_SSL_CERTFILE:=${RABBITMQ_SSL_CERT_FILE:-}}"
+: "${RABBITMQ_SSL_KEYFILE:=${RABBITMQ_SSL_KEY_FILE:-}}"
+: "${RABBITMQ_SSL_CACERTFILE:=${RABBITMQ_SSL_CA_FILE:-}}"
+
+# "management" SSL config should default to using the same certs
+: "${RABBITMQ_MANAGEMENT_SSL_CACERTFILE:=$RABBITMQ_SSL_CACERTFILE}"
+: "${RABBITMQ_MANAGEMENT_SSL_CERTFILE:=$RABBITMQ_SSL_CERTFILE}"
+: "${RABBITMQ_MANAGEMENT_SSL_KEYFILE:=$RABBITMQ_SSL_KEYFILE}"
+
+# Allowed env vars that will be read from mounted files (i.e. Docker Secrets):
+fileEnvKeys=(
+ default_user
+ default_pass
+)
+
+# https://www.rabbitmq.com/configure.html
+sslConfigKeys=(
+ cacertfile
+ certfile
+ depth
+ fail_if_no_peer_cert
+ keyfile
+ verify
+)
+managementConfigKeys=(
+ "${sslConfigKeys[@]/#/ssl_}"
+)
+rabbitConfigKeys=(
+ default_pass
+ default_user
+ default_vhost
+ hipe_compile
+ vm_memory_high_watermark
+)
+fileConfigKeys=(
+ management_ssl_cacertfile
+ management_ssl_certfile
+ management_ssl_keyfile
+ ssl_cacertfile
+ ssl_certfile
+ ssl_keyfile
+)
+allConfigKeys=(
+ "${managementConfigKeys[@]/#/management_}"
+ "${rabbitConfigKeys[@]}"
+ "${sslConfigKeys[@]/#/ssl_}"
+)
+
+declare -A configDefaults=(
+ [management_ssl_fail_if_no_peer_cert]='false'
+ [management_ssl_verify]='verify_none'
+
+ [ssl_fail_if_no_peer_cert]='true'
+ [ssl_verify]='verify_peer'
+)
+
+# allow the container to be started with `--user`
+if [[ "$1" == rabbitmq* ]] && [ "$(id -u)" = '0' ]; then
+ # this needs to happen late enough that we have the SSL config
+ # https://github.com/docker-library/rabbitmq/issues/283
+ for conf in "${allConfigKeys[@]}"; do
+ var="RABBITMQ_${conf^^}"
+ val="${!var:-}"
+ [ -n "$val" ] || continue
+ case "$conf" in
+ *_ssl_*file | ssl_*file )
+ if [ -f "$val" ] && ! gosu rabbitmq test -r "$val"; then
+ newFile="/tmp/rabbitmq-ssl/$conf.pem"
+ echo >&2
+ echo >&2 "WARNING: '$val' ($var) is not readable by rabbitmq ($(id rabbitmq)); copying to '$newFile'"
+ echo >&2
+ cat "$val" > "$newFile"
+ chown rabbitmq "$newFile"
+ chmod 0400 "$newFile"
+ eval 'export '$var'="$newFile"'
+ fi
+ ;;
+ esac
+ done
+
+ if [ "$1" = 'rabbitmq-server' ]; then
+ find /var/lib/rabbitmq \! -user rabbitmq -exec chown rabbitmq '{}' +
+ fi
+
+ exec gosu rabbitmq "$BASH_SOURCE" "$@"
+fi
+
+haveConfig=
+haveSslConfig=
+haveManagementSslConfig=
+for fileEnvKey in "${fileEnvKeys[@]}"; do file_env "RABBITMQ_${fileEnvKey^^}"; done
+for conf in "${allConfigKeys[@]}"; do
+ var="RABBITMQ_${conf^^}"
+ val="${!var:-}"
+ if [ "$val" ]; then
+ if [ "${configDefaults[$conf]:-}" ] && [ "${configDefaults[$conf]}" = "$val" ]; then
+ # if the value set is the same as the default, treat it as if it isn't set
+ continue
+ fi
+ haveConfig=1
+ case "$conf" in
+ ssl_*) haveSslConfig=1 ;;
+ management_ssl_*) haveManagementSslConfig=1 ;;
+ esac
+ fi
+done
+if [ "$haveSslConfig" ]; then
+ missing=()
+ for sslConf in cacertfile certfile keyfile; do
+ var="RABBITMQ_SSL_${sslConf^^}"
+ val="${!var}"
+ if [ -z "$val" ]; then
+ missing+=( "$var" )
+ fi
+ done
+ if [ "${#missing[@]}" -gt 0 ]; then
+ {
+ echo
+ echo 'error: SSL requested, but missing required configuration'
+ for miss in "${missing[@]}"; do
+ echo " - $miss"
+ done
+ echo
+ } >&2
+ exit 1
+ fi
+fi
+missingFiles=()
+for conf in "${fileConfigKeys[@]}"; do
+ var="RABBITMQ_${conf^^}"
+ val="${!var}"
+ if [ "$val" ] && [ ! -f "$val" ]; then
+ missingFiles+=( "$val ($var)" )
+ fi
+done
+if [ "${#missingFiles[@]}" -gt 0 ]; then
+ {
+ echo
+ echo 'error: files specified, but missing'
+ for miss in "${missingFiles[@]}"; do
+ echo " - $miss"
+ done
+ echo
+ } >&2
+ exit 1
+fi
+
+# set defaults for missing values (but only after we're done with all our checking so we don't throw any of that off)
+for conf in "${!configDefaults[@]}"; do
+ default="${configDefaults[$conf]}"
+ var="RABBITMQ_${conf^^}"
+ [ -z "${!var:-}" ] || continue
+ eval "export $var=\"\$default\""
+done
+
+# if long and short hostnames are not the same, use long hostnames
+if [ "$(hostname)" != "$(hostname -s)" ]; then
+ : "${RABBITMQ_USE_LONGNAME:=true}"
+fi
+
+if [ "${RABBITMQ_ERLANG_COOKIE:-}" ]; then
+ cookieFile='/var/lib/rabbitmq/.erlang.cookie'
+ if [ -e "$cookieFile" ]; then
+ if [ "$(cat "$cookieFile" 2>/dev/null)" != "$RABBITMQ_ERLANG_COOKIE" ]; then
+ echo >&2
+ echo >&2 "warning: $cookieFile contents do not match RABBITMQ_ERLANG_COOKIE"
+ echo >&2
+ fi
+ else
+ echo "$RABBITMQ_ERLANG_COOKIE" > "$cookieFile"
+ fi
+ chmod 600 "$cookieFile"
+fi
+
+configBase="${RABBITMQ_CONFIG_FILE:-/etc/rabbitmq/rabbitmq}"
+oldConfigFile="$configBase.config"
+newConfigFile="$configBase.conf"
+
+shouldWriteConfig="$haveConfig"
+if [ -n "$shouldWriteConfig" ] && [ -f "$oldConfigFile" ]; then
+ {
+ echo "error: Docker configuration environment variables specified, but old-style (Erlang syntax) configuration file '$oldConfigFile' exists"
+ echo " Suggested fixes: (choose one)"
+ echo " - remove '$oldConfigFile'"
+ echo " - remove any Docker-specific 'RABBITMQ_...' environment variables"
+ echo " - convert '$oldConfigFile' to the newer sysctl format ('$newConfigFile'); see https://www.rabbitmq.com/configure.html#config-file"
+ } >&2
+ exit 1
+fi
+if [ -z "$shouldWriteConfig" ] && [ ! -f "$oldConfigFile" ] && [ ! -f "$newConfigFile" ]; then
+ # no config files, we should write one
+ shouldWriteConfig=1
+fi
+
+# http://stackoverflow.com/a/2705678/433558
+sed_escape_lhs() {
+ echo "$@" | sed -e 's/[]\/$*.^|[]/\\&/g'
+}
+sed_escape_rhs() {
+ echo "$@" | sed -e 's/[\/&]/\\&/g'
+}
+rabbit_set_config() {
+ local key="$1"; shift
+ local val="$1"; shift
+
+ [ -e "$newConfigFile" ] || touch "$newConfigFile"
+
+ local sedKey="$(sed_escape_lhs "$key")"
+ local sedVal="$(sed_escape_rhs "$val")"
+ sed -ri \
+ "s/^[[:space:]]*(${sedKey}[[:space:]]*=[[:space:]]*)\S.*\$/\1${sedVal}/" \
+ "$newConfigFile"
+ if ! grep -qE "^${sedKey}[[:space:]]*=" "$newConfigFile"; then
+ echo "$key = $val" >> "$newConfigFile"
+ fi
+}
+rabbit_comment_config() {
+ local key="$1"; shift
+
+ [ -e "$newConfigFile" ] || touch "$newConfigFile"
+
+ local sedKey="$(sed_escape_lhs "$key")"
+ sed -ri \
+ "s/^[[:space:]]*#?[[:space:]]*(${sedKey}[[:space:]]*=[[:space:]]*\S.*)\$/# \1/" \
+ "$newConfigFile"
+}
+rabbit_env_config() {
+ local prefix="$1"; shift
+
+ local conf
+ for conf; do
+ local var="rabbitmq${prefix:+_$prefix}_$conf"
+ var="${var^^}"
+
+ local key="$conf"
+ case "$prefix" in
+ ssl) key="ssl_options.$key" ;;
+ management_ssl) key="management.listener.ssl_opts.$key" ;;
+ esac
+
+ local val="${!var:-}"
+ local rawVal="$val"
+ case "$conf" in
+ fail_if_no_peer_cert|hipe_compile)
+ case "${val,,}" in
+ false|no|0|'') rawVal='false' ;;
+ true|yes|1|*) rawVal='true' ;;
+ esac
+ ;;
+
+ vm_memory_high_watermark) continue ;; # handled separately
+ esac
+
+ if [ -n "$rawVal" ]; then
+ rabbit_set_config "$key" "$rawVal"
+ else
+ rabbit_comment_config "$key"
+ fi
+ done
+}
+
+if [ "$1" = 'rabbitmq-server' ] && [ "$shouldWriteConfig" ]; then
+ rabbit_set_config 'loopback_users.guest' 'false'
+
+ # determine whether to set "vm_memory_high_watermark" (based on cgroups)
+ memTotalKb=
+ if [ -r /proc/meminfo ]; then
+ memTotalKb="$(awk -F ':? +' '$1 == "MemTotal" { print $2; exit }' /proc/meminfo)"
+ fi
+ memLimitB=
+ if [ -r /sys/fs/cgroup/memory/memory.limit_in_bytes ]; then
+ # "18446744073709551615" is a valid value for "memory.limit_in_bytes", which is too big for Bash math to handle
+ # "$(( 18446744073709551615 / 1024 ))" = 0; "$(( 18446744073709551615 * 40 / 100 ))" = 0
+ memLimitB="$(awk -v totKb="$memTotalKb" '{
+ limB = $0;
+ limKb = limB / 1024;
+ if (!totKb || limKb < totKb) {
+ printf "%.0f\n", limB;
+ }
+ }' /sys/fs/cgroup/memory/memory.limit_in_bytes)"
+ fi
+ if [ -n "$memLimitB" ]; then
+ # if we have a cgroup memory limit, let's inform RabbitMQ of what it is (so it can calculate vm_memory_high_watermark properly)
+ # https://github.com/rabbitmq/rabbitmq-server/pull/1234
+ rabbit_set_config 'total_memory_available_override_value' "$memLimitB"
+ fi
+ # https://www.rabbitmq.com/memory.html#memsup-usage
+ if [ "${RABBITMQ_VM_MEMORY_HIGH_WATERMARK:-}" ]; then
+ # https://github.com/docker-library/rabbitmq/pull/105#issuecomment-242165822
+ vmMemoryHighWatermark="$(
+ echo "$RABBITMQ_VM_MEMORY_HIGH_WATERMARK" | awk '
+ /^[0-9]*[.][0-9]+$|^[0-9]+([.][0-9]+)?%$/ {
+ perc = $0;
+ if (perc ~ /%$/) {
+ gsub(/%$/, "", perc);
+ perc = perc / 100;
+ }
+ if (perc > 1.0 || perc < 0.0) {
+ printf "error: invalid percentage for vm_memory_high_watermark: %s (must be >= 0%%, <= 100%%)\n", $0 > "/dev/stderr";
+ exit 1;
+ }
+ printf "vm_memory_high_watermark.relative %0.03f\n", perc;
+ next;
+ }
+ /^[0-9]+$/ {
+ printf "vm_memory_high_watermark.absolute %s\n", $0;
+ next;
+ }
+ /^[0-9]+([.][0-9]+)?[a-zA-Z]+$/ {
+ printf "vm_memory_high_watermark.absolute %s\n", $0;
+ next;
+ }
+ {
+ printf "error: unexpected input for vm_memory_high_watermark: %s\n", $0;
+ exit 1;
+ }
+ '
+ )"
+ if [ "$vmMemoryHighWatermark" ]; then
+ vmMemoryHighWatermarkKey="${vmMemoryHighWatermark%% *}"
+ vmMemoryHighWatermarkVal="${vmMemoryHighWatermark#$vmMemoryHighWatermarkKey }"
+ rabbit_set_config "$vmMemoryHighWatermarkKey" "$vmMemoryHighWatermarkVal"
+ case "$vmMemoryHighWatermarkKey" in
+ # make sure we only set one or the other
+ 'vm_memory_high_watermark.absolute') rabbit_comment_config 'vm_memory_high_watermark.relative' ;;
+ 'vm_memory_high_watermark.relative') rabbit_comment_config 'vm_memory_high_watermark.absolute' ;;
+ esac
+ fi
+ fi
+
+ if [ "$haveSslConfig" ]; then
+ rabbit_set_config 'listeners.ssl.default' 5671
+ rabbit_env_config 'ssl' "${sslConfigKeys[@]}"
+ else
+ rabbit_set_config 'listeners.tcp.default' 5672
+ fi
+
+ rabbit_env_config '' "${rabbitConfigKeys[@]}"
+
+ # if management plugin is installed, generate config for it
+ # https://www.rabbitmq.com/management.html#configuration
+ if [ "$(rabbitmq-plugins list -q -m -e 'rabbitmq_management$')" ]; then
+ if [ "$haveManagementSslConfig" ]; then
+ rabbit_set_config 'management.listener.port' 15671
+ rabbit_set_config 'management.listener.ssl' 'true'
+ rabbit_env_config 'management_ssl' "${sslConfigKeys[@]}"
+ else
+ rabbit_set_config 'management.listener.port' 15672
+ rabbit_set_config 'management.listener.ssl' 'false'
+ fi
+
+ # if definitions file exists, then load it
+ # https://www.rabbitmq.com/management.html#load-definitions
+ managementDefinitionsFile='/etc/rabbitmq/definitions.json'
+ if [ -f "$managementDefinitionsFile" ]; then
+ # see also https://github.com/docker-library/rabbitmq/pull/112#issuecomment-271485550
+ rabbit_set_config 'management.load_definitions' "$managementDefinitionsFile"
+ fi
+ fi
+fi
+
+combinedSsl='/tmp/rabbitmq-ssl/combined.pem'
+if [ "$haveSslConfig" ] && [[ "$1" == rabbitmq* ]] && [ ! -f "$combinedSsl" ]; then
+ # Create combined cert
+ cat "$RABBITMQ_SSL_CERTFILE" "$RABBITMQ_SSL_KEYFILE" > "$combinedSsl"
+ chmod 0400 "$combinedSsl"
+fi
+if [ "$haveSslConfig" ] && [ -f "$combinedSsl" ]; then
+ # More ENV vars for make clustering happiness
+ # we don't handle clustering in this script, but these args should ensure
+ # clustered SSL-enabled members will talk nicely
+ export ERL_SSL_PATH="$(erl -eval 'io:format("~p", [code:lib_dir(ssl, ebin)]),halt().' -noshell)"
+ sslErlArgs="-pa $ERL_SSL_PATH -proto_dist inet_tls -ssl_dist_opt server_certfile $combinedSsl -ssl_dist_opt server_secure_renegotiate true client_secure_renegotiate true"
+ export RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="${RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS:-} $sslErlArgs"
+ export RABBITMQ_CTL_ERL_ARGS="${RABBITMQ_CTL_ERL_ARGS:-} $sslErlArgs"
+fi
+
+exec "$@"
diff --git a/deps/rabbitmq_prometheus/docker/erlang.cookie b/deps/rabbitmq_prometheus/docker/erlang.cookie
new file mode 100644
index 0000000000..6245e63c18
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/erlang.cookie
@@ -0,0 +1 @@
+rabbitmq-prometheus
diff --git a/deps/rabbitmq_prometheus/docker/grafana/__inputs.json b/deps/rabbitmq_prometheus/docker/grafana/__inputs.json
new file mode 100644
index 0000000000..6a82614abd
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/__inputs.json
@@ -0,0 +1,12 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ]
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards.yml b/deps/rabbitmq_prometheus/docker/grafana/dashboards.yml
new file mode 100644
index 0000000000..4b99d5443e
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards.yml
@@ -0,0 +1,10 @@
+apiVersion: 1
+
+providers:
+ - name: 'rabbitmq'
+ orgId: 1
+ folder: ''
+ type: file
+ disableDeletion: true
+ options:
+ path: /dashboards
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json
new file mode 100644
index 0000000000..96550fd9e3
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distribution.json
@@ -0,0 +1,2332 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "singlestat",
+ "name": "Singlestat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "flant-statusmap-panel",
+ "name": "Statusmap",
+ "version": "0.1.1"
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "$$hashKey": "object:13",
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Erlang Distribution links, inet socket, port driver, dist process + tls_connection & tls_sender",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "id": null,
+ "iteration": 1587996382757,
+ "links": [],
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 25,
+ "interval": "",
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) OR vector(0)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "2,6",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All distribution links",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "When a connection between a node and peer is established, the distribution link is considered to be `up`",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 6,
+ "x": 6,
+ "y": 0
+ },
+ "id": 27,
+ "interval": "",
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"} == 3) OR vector(0)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "2,6",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Established distribution links",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#37872D",
+ "#1F60C4",
+ "#C4162A"
+ ],
+ "datasource": null,
+ "description": "When a new connection is exchanging information between the node and the peer the distribution link is considered to be `pending`",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 6,
+ "x": 12,
+ "y": 0
+ },
+ "id": 26,
+ "interval": "",
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"} == 1) OR vector(0)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "1,6",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Connecting distribution links",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#37872D",
+ "#1F60C4",
+ "#C4162A"
+ ],
+ "datasource": null,
+ "description": "When a new connection is established and there is an existing connection between the node and the peer, this connection needs to wait for the initial connection to go down before it can become active. The distribution link is considered `up_pending`",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 6,
+ "x": 18,
+ "y": 0
+ },
+ "id": 28,
+ "interval": "",
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "count(erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"} == 2) OR vector(0)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "1,6",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Waiting distribution links",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 3
+ },
+ "id": 74,
+ "panels": [],
+ "title": "distribution links",
+ "type": "row"
+ },
+ {
+ "cards": {
+ "cardHSpacing": 2,
+ "cardMinWidth": 5,
+ "cardRound": null,
+ "cardVSpacing": 2
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateGreens",
+ "defaultColor": "#757575",
+ "exponent": 0.5,
+ "min": null,
+ "mode": "discrete",
+ "thresholds": [
+ {
+ "color": "#37872D",
+ "tooltip": "established",
+ "value": "3"
+ },
+ {
+ "color": "#FA6400",
+ "tooltip": "connecting",
+ "value": "1"
+ },
+ {
+ "color": "#FADE2A",
+ "tooltip": "waiting",
+ "value": "2"
+ }
+ ]
+ },
+ "data": {
+ "decimals": null,
+ "unitFormat": "short"
+ },
+ "datasource": null,
+ "description": "",
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 4
+ },
+ "highlightCards": true,
+ "id": 19,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "nullPointMode": "as empty",
+ "targets": [
+ {
+ "aggregation": "Last",
+ "decimals": 2,
+ "displayAliasType": "Warning / Critical",
+ "displayType": "Regular",
+ "displayValueWithAlias": "Never",
+ "expr": "erlang_vm_dist_node_state * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A",
+ "units": "none",
+ "valueHandler": "Number Threshold"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "State of distribution links",
+ "tooltip": {
+ "show": true
+ },
+ "type": "flant-statusmap-panel",
+ "useMax": true,
+ "xAxis": {
+ "labelFormat": "%a %m/%d",
+ "minBucketWidthToShowWeekends": 4,
+ "show": true,
+ "showCrosshair": true,
+ "showWeekends": true
+ },
+ "yAxis": {
+ "show": true,
+ "showCrosshair": false
+ },
+ "yAxisSort": "metrics"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Data currently buffered in the output queue of the distribution link.\n\nAny values above the 64MB threshold hint to an overloaded distribution. This applies to the default `zdbbl` value in RabbitMQ, which is 128MB.\n\n- [RabbitMQ Runtime Tuning - zdbbl](https://www.rabbitmq.com/runtime.html#distribution-buffer)\n- [erl +zdbbl](http://erlang.org/doc/man/erl.html#+zdbbl)",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 4
+ },
+ "hiddenSeries": false,
+ "id": 62,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "$$hashKey": "object:130",
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "$$hashKey": "object:131",
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "$$hashKey": "object:132",
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "$$hashKey": "object:133",
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "$$hashKey": "object:134",
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "$$hashKey": "object:135",
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "$$hashKey": "object:136",
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "$$hashKey": "object:137",
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "$$hashKey": "object:138",
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "$$hashKey": "object:139",
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "erlang_vm_dist_node_queue_size_bytes * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "$$hashKey": "object:234",
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 65536000,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Data buffered in the distribution links queue",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:206",
+ "decimals": 0,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:207",
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 9,
+ "panels": [],
+ "title": "inet socket",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Data sent to peer node / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "$$hashKey": "object:276",
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "$$hashKey": "object:277",
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "$$hashKey": "object:278",
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "$$hashKey": "object:279",
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "$$hashKey": "object:280",
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "$$hashKey": "object:281",
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "$$hashKey": "object:282",
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "$$hashKey": "object:283",
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "$$hashKey": "object:284",
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "$$hashKey": "object:285",
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_recv_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} <- {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Data received from peer node / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:352",
+ "decimals": 1,
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:353",
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Number of inet packets sent to the distribution link port.\n\nIf too few messages are sent and data sits in the port driver buffer, increasing the `inet_dist_connect_options` as well as `inet_dist_listen_options` buffer values will result in more stable throughput.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_send_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Distribution messages sent to peer node / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Number of inet packets received from the distribution link port.\n\nIf too many messages are received, increasing the `inet_dist_connect_options` as well as `inet_dist_listen_options` buffer values will result in more stable throughput.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_recv_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} <- {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Distribution messages received from peer node / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Corresponds to the average size of the argument passed to `gen_tcp:send/2` or equivalent.\n\nTypically corresponds to TCP window size.\n\nIf TLS is used for inter-node communication, the `inet` packet size will be varied so that the system as a whole is both secure and performant.\n\n`inet` packets larger than the TCP window will be split into TCP packets by the system kernel.\n\n`inet` packets smaller than the TCP window _may_ be joined into TCP packets by the system kernel.\n\n* [`inet` packet](http://erlang.org/doc/man/inet.html#packet)",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 20
+ },
+ "hiddenSeries": false,
+ "id": 39,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) / \n(rate(erlang_vm_dist_send_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average inet packet size sent to peer node",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Corresponds to the average size of the argument received from `gen_tcp:recv/2` or equivalent.\n\nTypically corresponds to TCP window size.\n\nIf TLS is used for inter-node communication, the `inet` packet size will be varied so that the system as a whole is both secure and performant.\n\n* [`inet` packet](http://erlang.org/doc/man/inet.html#packet)",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 20
+ },
+ "hiddenSeries": false,
+ "id": 50,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rate(erlang_vm_dist_recv_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) / \n(rate(erlang_vm_dist_recv_cnt[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} <- {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average inet packet size received from peer node",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 11,
+ "panels": [],
+ "title": "port driver",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The total number of bytes allocated for this port by the runtime system. The port itself can have allocated memory that is not included.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 26
+ },
+ "hiddenSeries": false,
+ "id": 12,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "erlang_vm_dist_port_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory used by the port driver",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The total number of bytes queued by the port using the ERTS driver queue implementation\n\nAny values above a few KBs hint to an overloaded distribution",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "hiddenSeries": false,
+ "id": 7,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "erlang_vm_dist_port_queue_size_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Data in the port driver buffer",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 31
+ },
+ "id": 14,
+ "panels": [],
+ "repeat": "erlang_vm_dist_proc_type",
+ "title": "$erlang_vm_dist_proc_type process",
+ "type": "row"
+ },
+ {
+ "cacheTimeout": null,
+ "cards": {
+ "cardHSpacing": 2,
+ "cardMinWidth": 5,
+ "cardRound": null,
+ "cardVSpacing": 2
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateGnYlRd",
+ "defaultColor": "#757575",
+ "exponent": 0.5,
+ "mode": "discrete",
+ "thresholds": [
+ {
+ "color": "#37872D",
+ "tooltip": "waiting",
+ "value": "6"
+ },
+ {
+ "color": "#96D98D",
+ "tooltip": "running",
+ "value": "5"
+ },
+ {
+ "color": "#1F60C4",
+ "tooltip": "garbage_collecting",
+ "value": "4"
+ },
+ {
+ "color": "#FADE2A",
+ "tooltip": "runnable",
+ "value": "3"
+ },
+ {
+ "color": "#FA6400",
+ "tooltip": "suspended",
+ "value": "2"
+ },
+ {
+ "color": "#C4162A",
+ "tooltip": "exiting",
+ "value": "1"
+ }
+ ]
+ },
+ "data": {
+ "decimals": null,
+ "unitFormat": "short"
+ },
+ "datasource": null,
+ "description": "",
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "highlightCards": true,
+ "id": 18,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "nullPointMode": "as empty",
+ "targets": [
+ {
+ "aggregation": "Last",
+ "decimals": 2,
+ "displayAliasType": "Warning / Critical",
+ "displayType": "Regular",
+ "displayValueWithAlias": "Never",
+ "expr": "erlang_vm_dist_proc_status{type=\"$erlang_vm_dist_proc_type\"} * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"} ",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A",
+ "units": "none",
+ "valueHandler": "Number Threshold"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Process state",
+ "tooltip": {
+ "show": true
+ },
+ "type": "flant-statusmap-panel",
+ "useMax": true,
+ "xAxis": {
+ "labelFormat": "%a %m/%d",
+ "minBucketWidthToShowWeekends": 4,
+ "show": true,
+ "showCrosshair": true,
+ "showWeekends": true
+ },
+ "yAxis": {
+ "show": true,
+ "showCrosshair": false
+ },
+ "yAxisSort": "metrics"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The number of messages currently in the message queue of the process\n\nAny values above 0 hint to an overloaded distribution",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "erlang_vm_dist_proc_message_queue_len{type=\"$erlang_vm_dist_proc_type\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages in the process queue",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The size in bytes of process memory. This includes call stack, heap, and internal structures.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 37
+ },
+ "hiddenSeries": false,
+ "id": 15,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "erlang_vm_dist_proc_memory_bytes{type=\"$erlang_vm_dist_proc_type\"} * on(instance) group_left(rabbitmq_node, rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory used by the process",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The number of reductions executed by the process",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 37
+ },
+ "hiddenSeries": false,
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_proc_reductions{type=\"$erlang_vm_dist_proc_type\"}[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Process reductions / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 22,
+ "style": "dark",
+ "tags": [
+ "rabbitmq-prometheus"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "text": "rabbitmq-qq",
+ "value": "rabbitmq-qq"
+ },
+ "datasource": null,
+ "definition": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "hide": 0,
+ "includeAll": false,
+ "index": -1,
+ "label": "RabbitMQ Cluster",
+ "multi": false,
+ "name": "rabbitmq_cluster",
+ "options": [],
+ "query": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": null,
+ "definition": "label_values(erlang_vm_dist_proc_status, type)",
+ "hide": 0,
+ "includeAll": true,
+ "index": -1,
+ "label": "Process type",
+ "multi": true,
+ "name": "erlang_vm_dist_proc_type",
+ "options": [],
+ "query": "label_values(erlang_vm_dist_proc_status, type)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Erlang-Distribution",
+ "uid": "d-SFCCmZz",
+ "variables": {
+ "list": []
+ },
+ "version": 4
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json
new file mode 100644
index 0000000000..fd4cb651d0
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Distributions-Compare.json
@@ -0,0 +1,1807 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "table",
+ "id": "table",
+ "name": "Table",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "iteration": 1571066778520,
+ "links": [],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 67,
+ "panels": [],
+ "title": "rabbitmq-prometheus",
+ "type": "row"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 1
+ },
+ "id": 56,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Node -> Peer",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "Bps"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=~\"$rabbitmq_cluster\"}",
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Erlang Distribution outgoing traffic / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Erlang Distribution traffic, node network traffic and CPU + PerfTest message throughput and latency",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 1
+ },
+ "id": 3,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(erlang_vm_dist_send_bytes[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=~\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}} -> {{peer}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Erlang Distribution outgoing traffic / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 65,
+ "panels": [],
+ "title": "node-exporter_cadvisor",
+ "type": "row"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 9
+ },
+ "id": 61,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Host",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "Bps"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_receive_bytes_total{instance=~\"$host\"}[5m]))",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum by(name) (rate(container_network_receive_bytes_total{name=~\"$container\"}[1m]))",
+ "legendFormat": "{{name}}",
+ "refId": "B"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network incoming traffic / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 9
+ },
+ "height": "",
+ "id": 58,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "maxPerRow": 3,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeatDirection": "h",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "calculatedInterval": "2s",
+ "datasourceErrors": {},
+ "errors": {},
+ "expr": "sum by(instance) (rate(node_network_receive_bytes_total{instance=~\"$host\"}[5m]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "sum by(name) (rate(container_network_receive_bytes_total{name=~\"$container\"}[1m]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Network incoming traffic / s",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "Bps",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 16
+ },
+ "id": 60,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Host",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "Bps"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum by(instance) (rate(node_network_transmit_bytes_total{instance=~\"$host\"}[5m]))",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum by(name) (rate(container_network_transmit_bytes_total{name=~\"$container\"}[1m]))",
+ "legendFormat": "{{name}}",
+ "refId": "B"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network outgoing traffic / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 16
+ },
+ "height": "",
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "maxPerRow": 3,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeatDirection": "h",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "calculatedInterval": "2s",
+ "datasourceErrors": {},
+ "errors": {},
+ "expr": "sum by(instance) (rate(node_network_transmit_bytes_total{instance=~\"$host\"}[5m]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "sum by(name) (rate(container_network_transmit_bytes_total{name=~\"$container\"}[1m]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Network outgoing traffic / s",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "Bps",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 23
+ },
+ "id": 59,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Host",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "percent"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "(max by (instance) (irate(node_cpu_seconds_total{job=\"node\", mode=~\"user|system|iowait|softirq\", instance=~\"$host\"}[5m])) * 100)",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum by(name) (irate(container_cpu_usage_seconds_total{name=~\"$container\"}[1m])) * 100",
+ "legendFormat": "{{name}}",
+ "refId": "B"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 23
+ },
+ "height": "",
+ "id": 28,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "maxPerRow": 3,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "repeatDirection": "h",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "calculatedInterval": "2s",
+ "datasourceErrors": {},
+ "errors": {},
+ "expr": "(max by (instance) (irate(node_cpu_seconds_total{job=\"node\", mode=~\"user|system|iowait|softirq\", instance=~\"$host\"}[5m])) * 100)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ },
+ {
+ "expr": "sum by(name) (irate(container_cpu_usage_seconds_total{name=~\"$container\"}[1m])) * 100",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 30
+ },
+ "id": 63,
+ "panels": [],
+ "title": "rabbitmq-perf-test",
+ "type": "row"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 31
+ },
+ "id": 49,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_published{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages published / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 31
+ },
+ "id": 51,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_published{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages published / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 38
+ },
+ "id": 55,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_consumed{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages consumed / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 38
+ },
+ "id": 53,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_consumed{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages consumed / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 0,
+ "y": 45
+ },
+ "id": 47,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "s"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\", instance=~\"$instance\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "End-to-end message latency",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 15,
+ "x": 9,
+ "y": 45
+ },
+ "id": 45,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\", instance=~\"$instance\"}",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "End-to-end message latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 9,
+ "x": 0,
+ "y": 52
+ },
+ "id": 43,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\", instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "End-to-end message latency distribution",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": 20,
+ "mode": "histogram",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "rgb(255, 255, 255)",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateBlues",
+ "exponent": 0.4,
+ "max": null,
+ "min": null,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": null,
+ "gridPos": {
+ "h": 8,
+ "w": 15,
+ "x": 9,
+ "y": 52
+ },
+ "heatmap": {},
+ "hideZeroBuckets": true,
+ "highlightCards": true,
+ "id": 41,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {},
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\", instance=~\"$instance\"}",
+ "format": "heatmap",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "title": "End-to-end message latency distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "transparent": true,
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 20,
+ "style": "dark",
+ "tags": [
+ "cadvisor",
+ "node-exporter",
+ "rabbitmq-perf-test",
+ "rabbitmq-prometheus"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": null,
+ "definition": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "RabbitMQ Cluster",
+ "multi": true,
+ "name": "rabbitmq_cluster",
+ "options": [],
+ "query": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": null,
+ "definition": "label_values(perftest_published, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "PerfTest Instance",
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": "label_values(perftest_published, instance)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "0.99",
+ "value": "0.99"
+ },
+ "datasource": null,
+ "definition": "label_values(perftest_latency_seconds, quantile)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Percentile",
+ "multi": false,
+ "name": "percentile",
+ "options": [],
+ "query": "label_values(perftest_latency_seconds, quantile)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 4,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": null,
+ "definition": "label_values(node_network_info, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Host",
+ "multi": true,
+ "name": "host",
+ "options": [],
+ "query": "label_values(node_network_info, instance)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": null,
+ "definition": "label_values(container_network_receive_bytes_total, name)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "or Container",
+ "multi": true,
+ "name": "container",
+ "options": [],
+ "query": "label_values(container_network_receive_bytes_total, name)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Erlang-Distributions-Compare",
+ "uid": "C0jeDstZk",
+ "version": 1
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json
new file mode 100644
index 0000000000..ffaa8d2b2c
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/Erlang-Memory-Allocators.json
@@ -0,0 +1,2355 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "singlestat",
+ "name": "Singlestat",
+ "version": ""
+ },
+ {
+ "type": "table",
+ "id": "table",
+ "name": "Table",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Erlang VM memory utilisation from erts_alloc perspective",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "id": null,
+ "iteration": 1582549977400,
+ "links": [],
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#E02F44",
+ "#3274D9",
+ "#56A64B"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": true,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 0
+ },
+ "id": 50,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "repeatDirection": "v",
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "25,50",
+ "title": "Allocated Used",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#56A64B",
+ "#3274D9",
+ "#E02F44"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": true,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 4,
+ "y": 0
+ },
+ "id": 51,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "repeatDirection": "v",
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "(\n sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n -\n sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n) / sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "50,75",
+ "title": "Allocated Unused",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "description": "Total Size of Allocated Blocks\n\nMemory that is actively used by Erlang data.\n\nThe smallest unit of memory that an allocator can work with is called a `block`. When you call an allocator to allocate a certain amount of memory what you get back is a block. It is also blocks that you give as an argument to the allocator when you want to deallocate memory.\n\nThe allocator does not allocate blocks from the operating system directly though. Instead the allocator allocates a carrier from the operating system, either through `sys_alloc` or through `mseg_alloc`, which in turn uses `malloc` or `mmap`. If `sys_alloc` is used the carrier is placed on the C-heap and if `mseg_alloc` is used the carrier is placed in a segment.\n\nSmall blocks are placed in a multiblock carrier. A multiblock carrier can as the name suggests contain many blocks. Larger blocks are placed in a singleblock carrier, which as the name implies on contains one block.\n\nWhat’s considered a small and a large block is determined by the parameter singleblock carrier threshold (`sbct`).\n\n* [erts_alloc](http://erlang.org/doc/man/erts_alloc.html)\n* [The Memory Subsystem](https://github.com/happi/theBeamBook/blob/master/chapters/memory.asciidoc)",
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": true,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 8,
+ "y": 0
+ },
+ "id": 215,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "repeatDirection": "v",
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Allocated Used",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "description": "Allocated Carriers - Allocated Blocks\n\n* [erts_alloc](http://erlang.org/doc/man/erts_alloc.html)\n* [The Memory Subsystem](https://github.com/happi/theBeamBook/blob/master/chapters/memory.asciidoc)",
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": true,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 12,
+ "y": 0
+ },
+ "id": 216,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "repeatDirection": "v",
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Allocated Unused",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "description": "Total Size of Allocated Carriers\n\nMemory that is reserved by the Erlang VM, which fits one of the following descriptions:\n\n* actively used memory\n* memory given by the OS, yet-to-be-used\n* deallocated memory kept around before it is destroyed\n* carrier gaps in multi-block carriers (a.k.a. memory fragmentation)\n\nThe total size of allocated carriers can be either larger or smaller than the Erlang VM system process RSS memory.\n\n* [erts_alloc](http://erlang.org/doc/man/erts_alloc.html)\n* [The Memory Subsystem](https://github.com/happi/theBeamBook/blob/master/chapters/memory.asciidoc)",
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": true,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 16,
+ "y": 0
+ },
+ "id": 188,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "repeatDirection": "v",
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Allocated Total",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "description": "Erlang VM Resident Set Size (RSS)\n\nAs reported by the OS",
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": true,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 20,
+ "y": 0
+ },
+ "id": 214,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "repeatDirection": "v",
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Resident Set Size",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 3
+ },
+ "id": 59,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 3,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "decbytes"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"}",
+ "legendFormat": "Resident Set Size",
+ "refId": "A"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Allocated Used",
+ "refId": "B"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Allocated Unused",
+ "refId": "C"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 16,
+ "x": 8,
+ "y": 3
+ },
+ "hiddenSeries": false,
+ "id": 61,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "repeatDirection": "v",
+ "seriesOverrides": [
+ {
+ "alias": "Allocated Used",
+ "color": "#37872D"
+ },
+ {
+ "alias": "Allocated Unused",
+ "color": "#FADE2A"
+ },
+ {
+ "alias": "Resident Set Size",
+ "color": "#C4162A",
+ "stack": false
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"}",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "Resident Set Size",
+ "refId": "A"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Allocated Used",
+ "refId": "B"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Allocated Unused",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 10
+ },
+ "id": 226,
+ "panels": [],
+ "title": "Allocated by Allocator Type",
+ "type": "row"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "description": "",
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 10,
+ "w": 8,
+ "x": 0,
+ "y": 11
+ },
+ "id": 55,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 3,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Memory Allocator",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "decbytes"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum by(alloc) (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "{{alloc}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 16,
+ "x": 8,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 53,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "repeatDirection": "v",
+ "seriesOverrides": [
+ {
+ "alias": "binary_alloc"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by(alloc) (erlang_vm_allocators{usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{alloc}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 21
+ },
+ "id": 63,
+ "panels": [],
+ "repeat": "memory_allocator",
+ "title": "$memory_allocator",
+ "type": "row"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#E02F44",
+ "#3274D9",
+ "#56A64B"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 22
+ },
+ "id": 20,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "50,75",
+ "title": "Multiblock - Utilization",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 4,
+ "y": 22
+ },
+ "id": 234,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "MB-C",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 6,
+ "y": 22
+ },
+ "id": 236,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "MB-B",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#E02F44",
+ "#3274D9",
+ "#56A64B"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 8,
+ "y": 22
+ },
+ "id": 223,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "50,75",
+ "title": "Multiblock Pool - Utilization",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 12,
+ "y": 22
+ },
+ "id": 238,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "MBP-C",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 14,
+ "y": 22
+ },
+ "id": 241,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "MBP-B",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#E02F44",
+ "#3274D9",
+ "#56A64B"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 16,
+ "y": 22
+ },
+ "id": 231,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n* 100",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "50,75",
+ "title": "Singleblock - Utilization",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 20,
+ "y": 22
+ },
+ "id": 242,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "SB-C",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": 1,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 22,
+ "y": 22
+ },
+ "id": 243,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "options": {},
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "SB-B",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "description": "",
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 25
+ },
+ "id": 227,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 0,
+ "desc": false
+ },
+ "styles": [
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "decbytes"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock - Used",
+ "refId": "A"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock - Unused",
+ "refId": "B"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Used",
+ "refId": "C"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Unused",
+ "refId": "D"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Used",
+ "refId": "E"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Unused",
+ "refId": "F"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "$memory_allocator - Usage",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 16,
+ "x": 8,
+ "y": 25
+ },
+ "hiddenSeries": false,
+ "id": 244,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 0.5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "Multiblock - Used",
+ "color": "#1F60C4"
+ },
+ {
+ "alias": "Multiblock - Unused",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "Multiblock Pool - Used",
+ "color": "#8F3BB8"
+ },
+ {
+ "alias": "Multiblock Pool - Unused",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "Singleblock - Used",
+ "color": "#FA6400"
+ },
+ {
+ "alias": "Singleblock - Unused",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "Multiblock - Used",
+ "refId": "A"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock - Unused",
+ "refId": "B"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Used",
+ "refId": "C"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"mbcs_pool\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Unused",
+ "refId": "D"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Used",
+ "refId": "E"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"carriers_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n-\nsum (erlang_vm_allocators{alloc=~\"$memory_allocator\", usage=\"blocks_size\", kind=\"sbcs\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Unused",
+ "refId": "F"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "$memory_allocator - Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "description": "",
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 32
+ },
+ "id": 232,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 0,
+ "desc": false
+ },
+ "styles": [
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "decbytes"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock - Carrier",
+ "refId": "A"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock - Block",
+ "refId": "B"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Carrier",
+ "refId": "C"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Block",
+ "refId": "D"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Carrier",
+ "refId": "E"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Block",
+ "refId": "F"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "$memory_allocator - Average Size",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 16,
+ "x": 8,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 235,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 0.5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "Multiblock - Block",
+ "color": "#1F60C4"
+ },
+ {
+ "alias": "Multiblock - Carrier",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "Multiblock Pool - Block",
+ "color": "#8F3BB8"
+ },
+ {
+ "alias": "Multiblock Pool - Carrier",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "Singleblock - Block",
+ "color": "#FA6400"
+ },
+ {
+ "alias": "Singleblock - Carrier",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "Multiblock - Block",
+ "refId": "A"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock - Carrier",
+ "refId": "B"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Block",
+ "refId": "C"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"mbcs_pool\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Multiblock Pool - Carrier",
+ "refId": "D"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"blocks\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Block",
+ "refId": "E"
+ },
+ {
+ "expr": "sum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers_size\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})\n/\nsum (erlang_vm_allocators{kind=\"sbcs\", alloc=\"$memory_allocator\", usage=\"carriers\"} * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\", rabbitmq_node=\"$rabbitmq_node\"})",
+ "legendFormat": "Singleblock - Carrier",
+ "refId": "F"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "$memory_allocator - Average Size",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 22,
+ "style": "dark",
+ "tags": [
+ "rabbitmq-prometheus"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": null,
+ "definition": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "RabbitMQ Cluster",
+ "multi": false,
+ "name": "rabbitmq_cluster",
+ "options": [],
+ "query": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": null,
+ "definition": "label_values(rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}, rabbitmq_node)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "RabbitMQ Node",
+ "multi": false,
+ "name": "rabbitmq_node",
+ "options": [],
+ "query": "label_values(rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}, rabbitmq_node)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": null,
+ "definition": "label_values(erlang_vm_allocators, alloc)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Erlang Memory Allocator",
+ "multi": true,
+ "name": "memory_allocator",
+ "options": [],
+ "query": "label_values(erlang_vm_allocators, alloc)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Erlang-Memory-Allocators",
+ "uid": "o_rtdpWik",
+ "version": 1
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json
new file mode 100644
index 0000000000..efbed6ef98
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Overview.json
@@ -0,0 +1,5708 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "7.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "prometheus",
+ "version": "2.0.0"
+ },
+ {
+ "type": "table",
+ "id": "table",
+ "name": "Table",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "singlestat",
+ "name": "Singlestat",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "A new RabbitMQ Management Overview",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "iteration": 1605271611134,
+ "links": [
+ {
+ "icon": "doc",
+ "tags": [],
+ "targetBlank": true,
+ "title": "Monitoring with Prometheus & Grafana",
+ "tooltip": "",
+ "type": "link",
+ "url": "https://www.rabbitmq.com/prometheus.html"
+ }
+ ],
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorPrefix": false,
+ "colorValue": false,
+ "colors": [
+ "#37872D",
+ "#1F60C4",
+ "#C4162A"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "short",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 64,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_queue_messages_ready * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "10000,100000",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Ready messages",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "decimals": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "short",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 6,
+ "x": 6,
+ "y": 0
+ },
+ "id": 62,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_published_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "-1,50",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Incoming messages / s",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 12,
+ "y": 0
+ },
+ "id": 66,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_channels * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) - sum(rabbitmq_channel_consumers * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "0,10",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Publishers",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 16,
+ "y": 0
+ },
+ "id": 37,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_connections * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "0,10",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Connections",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 20,
+ "y": 0
+ },
+ "id": 40,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_queues * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "0,10",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Queues",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#37872D",
+ "#1F60C4",
+ "#C4162A"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "short",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 6,
+ "x": 0,
+ "y": 3
+ },
+ "id": 65,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_queue_messages_unacked * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "100,500",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Unacknowledged messages",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "short",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 6,
+ "x": 6,
+ "y": 3
+ },
+ "id": 63,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_redelivered_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) +\nsum(rate(rabbitmq_channel_messages_delivered_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) +\nsum(rate(rabbitmq_channel_messages_delivered_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) +\nsum(rate(rabbitmq_channel_get_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) +\nsum(rate(rabbitmq_channel_get_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "-1,50",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Outgoing messages / s",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 12,
+ "y": 3
+ },
+ "id": 41,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_channel_consumers * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "0,10",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Consumers",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#C4162A",
+ "#1F60C4",
+ "#37872D"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 16,
+ "y": 3
+ },
+ "id": 38,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_channels * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "0,10",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Channels",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "#1F60C4",
+ "#37872D",
+ "#C4162A"
+ ],
+ "datasource": null,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 20,
+ "y": 3
+ },
+ "id": 67,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "pluginVersion": "6.1.3",
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(255, 255, 255, 0)",
+ "full": false,
+ "lineColor": "rgb(255, 255, 255)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_build_info * on(instance) group_left(rabbitmq_cluster) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "3,8",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Nodes",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "id": 4,
+ "panels": [],
+ "title": "NODES",
+ "type": "row"
+ },
+ {
+ "columns": [],
+ "datasource": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 4,
+ "w": 24,
+ "x": 0,
+ "y": 7
+ },
+ "id": 69,
+ "links": [],
+ "pageSize": null,
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 8,
+ "desc": false
+ },
+ "styles": [
+ {
+ "alias": "Erlang/OTP",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": null,
+ "link": false,
+ "mappingType": 1,
+ "pattern": "erlang_version",
+ "thresholds": [
+ ""
+ ],
+ "type": "string",
+ "unit": "none"
+ },
+ {
+ "alias": "RabbitMQ",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "mappingType": 1,
+ "pattern": "rabbitmq_version",
+ "thresholds": [
+ ""
+ ],
+ "type": "string",
+ "unit": "none"
+ },
+ {
+ "alias": "Host",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "mappingType": 1,
+ "pattern": "instance",
+ "preserveFormat": false,
+ "thresholds": [],
+ "type": "string",
+ "unit": "short",
+ "valueMaps": []
+ },
+ {
+ "alias": "Node name",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "mappingType": 1,
+ "pattern": "rabbitmq_node",
+ "thresholds": [
+ ""
+ ],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "Time",
+ "thresholds": [],
+ "type": "hidden",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "Value",
+ "thresholds": [],
+ "type": "hidden",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "job",
+ "thresholds": [],
+ "type": "hidden",
+ "unit": "short"
+ },
+ {
+ "alias": "Cluster",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "rabbitmq_cluster",
+ "thresholds": [],
+ "type": "hidden",
+ "unit": "short",
+ "valueMaps": []
+ },
+ {
+ "alias": "prometheus.erl",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "prometheus_client_version",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "rabbitmq_prometheus",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "prometheus_plugin_version",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "rabbitmq_build_info * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "",
+ "transform": "table",
+ "type": "table-old"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked.\n\nThis value can temporarily go negative because the memory alarm is triggered with a slight delay.\n\nThe kernel's view of the amount of memory used by the node can differ from what the node itself can observe. This means that this value can be negative for a sustained period of time.\n\nBy default nodes use resident set size (RSS) to compute how much memory they use. This strategy can be changed (see the guides below).\n\n* [Alarms](https://www.rabbitmq.com/alarms.html)\n* [Memory Alarms](https://www.rabbitmq.com/memory.html)\n* [Reasoning About Memory Use](https://www.rabbitmq.com/memory-use.html)\n* [Blocked Connection Notifications](https://www.rabbitmq.com/connection-blocked.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 7,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rabbitmq_resident_memory_limit_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) -\n(rabbitmq_process_resident_memory_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 536870912,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory available before publishers blocked",
+ "tooltip": {
+ "shared": true,
+ "sort": 1,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "This metric is reported for the partition where the RabbitMQ data directory is stored.\n\nIf the value is zero or less, the disk alarm will be triggered and all publishing connections across all cluster nodes will be blocked.\n\nThis value can temporarily go negative because the free disk space alarm is triggered with a slight delay.\n\n* [Alarms](https://www.rabbitmq.com/alarms.html)\n* [Disk Space Alarms](https://www.rabbitmq.com/disk-alarms.html)\n* [Disk Space](https://www.rabbitmq.com/production-checklist.html#resource-limits-disk-space)\n* [Persistence Configuration](https://www.rabbitmq.com/persistence-conf.html)\n* [Blocked Connection Notifications](https://www.rabbitmq.com/connection-blocked.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 12,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 8,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rabbitmq_disk_space_available_bytes * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 1073741824,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 5368709120,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Disk space available before publishers blocked",
+ "tooltip": {
+ "shared": true,
+ "sort": 1,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 1,
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "When this value reaches zero, new connections will not be accepted and disk write operations may fail.\n\nClient libraries, peer nodes and CLI tools will not be able to connect when the node runs out of available file descriptors.\n\n* [Open File Handles Limit](https://www.rabbitmq.com/production-checklist.html#resource-limits-file-handle-limit)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 4,
+ "w": 4,
+ "x": 20,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rabbitmq_process_max_fds * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) -\n(rabbitmq_process_open_fds * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 500,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 1000,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "File descriptors available",
+ "tooltip": {
+ "shared": true,
+ "sort": 1,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": -1,
+ "format": "none",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "When this value reaches zero, new connections will not be accepted.\n\nClient libraries, peer nodes and CLI tools will not be able to connect when the node runs out of available file descriptors.\n\n* [Networking and RabbitMQ](https://www.rabbitmq.com/networking.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 4,
+ "w": 4,
+ "x": 20,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(rabbitmq_process_max_tcp_sockets * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) -\n(rabbitmq_process_open_tcp_sockets * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 500,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 1000,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "TCP sockets available",
+ "tooltip": {
+ "shared": true,
+ "sort": 1,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": -1,
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 27,
+ "panels": [],
+ "title": "QUEUED MESSAGES",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Total number of ready messages ready to be delivered to consumers.\n\nAim to keep this value as low as possible. RabbitMQ behaves best when messages are flowing through it. It's OK for publishers to occasionally outpace consumers, but the expectation is that consumers will eventually process all ready messages.\n\nIf this metric keeps increasing, your system will eventually run out of memory and/or disk space. Consider using TTL or Queue Length Limit to prevent unbounded message growth.\n\n* [Queues](https://www.rabbitmq.com/queues.html)\n* [Consumers](https://www.rabbitmq.com/consumers.html)\n* [Queue Length Limit](https://www.rabbitmq.com/maxlength.html)\n* [Time-To-Live and Expiration](https://www.rabbitmq.com/ttl.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 20
+ },
+ "hiddenSeries": false,
+ "id": 9,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_queue_messages_ready * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages ready to be delivered to consumers",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged.\n\n* [Queues](https://www.rabbitmq.com/queues.html)\n* [Confirms and Acknowledgements](https://www.rabbitmq.com/confirms.html)\n* [Consumer Prefetch](https://www.rabbitmq.com/consumer-prefetch.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 20
+ },
+ "hiddenSeries": false,
+ "id": 19,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rabbitmq_queue_messages_unacked * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages pending consumer acknowledgement",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 11,
+ "panels": [],
+ "title": "INCOMING MESSAGES",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The incoming message rate before any routing rules are applied.\n\nIf this value is lower than the number of messages published to queues, it may indicate that some messages are delivered to more than one queue.\n\nIf this value is higher than the number of messages published to queues, messages cannot be routed and will either be dropped or returned to publishers.\n\n* [Publishers](https://www.rabbitmq.com/publishers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 26
+ },
+ "hiddenSeries": false,
+ "id": 13,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_published_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages published / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations.\n\nIf this metric is consistently at zero it may suggest that publisher confirms are not used by clients. The safety of published messages is likely to be at risk.\n\n* [Publisher Confirms](https://www.rabbitmq.com/confirms.html#publisher-confirms)\n* [Publisher Confirms and Data Safety](https://www.rabbitmq.com/publishers.html#data-safety)\n* [When Will Published Messages Be Confirmed by the Broker?](https://www.rabbitmq.com/confirms.html#when-publishes-are-confirmed)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "hiddenSeries": false,
+ "id": 18,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_confirmed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages confirmed to publishers / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages received from publishers and successfully routed to the master queue replicas.\n\n* [Queues](https://www.rabbitmq.com/queues.html)\n* [Publishers](https://www.rabbitmq.com/publishers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 31
+ },
+ "hiddenSeries": false,
+ "id": 61,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_queue_messages_published_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages routed to queues / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet.\n\n* [Publishers](https://www.rabbitmq.com/publishers.html)\n* [Confirms and Acknowledgements](https://www.rabbitmq.com/confirms.html)\n* [When Will Published Messages Be Confirmed by the Broker?](https://www.rabbitmq.com/confirms.html#when-publishes-are-confirmed)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 31
+ },
+ "hiddenSeries": false,
+ "id": 12,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_unconfirmed[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages unconfirmed to publishers / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages that cannot be routed and are dropped. \n\nAny value above zero means message loss and likely suggests a routing problem on the publisher end.\n\n* [Unroutable Message Handling](https://www.rabbitmq.com/publishers.html#unroutable)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 36
+ },
+ "hiddenSeries": false,
+ "id": 34,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_unroutable_dropped_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Unroutable messages dropped / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages that cannot be routed and are returned back to publishers.\n\nSustained values above zero may indicate a routing problem on the publisher end.\n\n* [Unroutable Message Handling](https://www.rabbitmq.com/publishers.html#unroutable)\n* [When Will Published Messages Be Confirmed by the Broker?](https://www.rabbitmq.com/confirms.html#when-publishes-are-confirmed)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 36
+ },
+ "hiddenSeries": false,
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_unroutable_returned_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Unroutable messages returned to publishers / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 41
+ },
+ "id": 29,
+ "panels": [],
+ "title": "OUTGOING MESSAGES",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages delivered to consumers. It includes messages that have been redelivered.\n\nThis metric does not include messages that have been fetched by consumers using `basic.get` (consumed by polling).\n\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 42
+ },
+ "id": 14,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(\n (rate(rabbitmq_channel_messages_delivered_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) +\n (rate(rabbitmq_channel_messages_delivered_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})\n) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages delivered / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages that have been redelivered to consumers. It includes messages that have been requeued automatically and redelivered due to channel exceptions or connection closures.\n\nHaving some redeliveries is expected, but if this metric is consistently non-zero, it is worth investigating why.\n\n* [Negative Acknowledgement and Requeuing of Deliveries](https://www.rabbitmq.com/confirms.html#consumer-nacks-requeue)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 42
+ },
+ "id": 15,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_redelivered_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 20,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 100,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages redelivered / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of message deliveries to consumers that use manual acknowledgement mode.\n\nWhen this mode is used, RabbitMQ waits for consumers to acknowledge messages before more messages can be delivered.\n\nThis is the safest way of consuming messages.\n\n* [Consumer Acknowledgements](https://www.rabbitmq.com/confirms.html)\n* [Consumer Prefetch](https://www.rabbitmq.com/consumer-prefetch.html)\n* [Consumer Acknowledgement Modes, Prefetch and Throughput](https://www.rabbitmq.com/confirms.html#channel-qos-prefetch-throughput)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 47
+ },
+ "id": 20,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_delivered_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages delivered with manual ack / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of message deliveries to consumers that use automatic acknowledgement mode.\n\nWhen this mode is used, RabbitMQ does not wait for consumers to acknowledge message deliveries.\n\nThis mode is fire-and-forget and does not offer any delivery safety guarantees. It tends to provide higher throughput and it may lead to consumer overload and higher consumer memory usage.\n\n* [Consumer Acknowledgement Modes, Prefetch and Throughput](https://www.rabbitmq.com/confirms.html#channel-qos-prefetch-throughput)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 47
+ },
+ "id": 21,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_delivered_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages delivered auto ack / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of message acknowledgements coming from consumers that use manual acknowledgement mode.\n\n* [Consumer Acknowledgements](https://www.rabbitmq.com/confirms.html)\n* [Consumer Prefetch](https://www.rabbitmq.com/consumer-prefetch.html)\n* [Consumer Acknowledgement Modes, Prefetch and Throughput](https://www.rabbitmq.com/confirms.html#channel-qos-prefetch-throughput)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 52
+ },
+ "id": 22,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_messages_acked_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages acknowledged / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages delivered to polling consumers that use automatic acknowledgement mode.\n\nThe use of polling consumers is highly inefficient and therefore strongly discouraged.\n\n* [Fetching individual messages](https://www.rabbitmq.com/consumers.html#fetching)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 52
+ },
+ "id": 24,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_get_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Polling operations with auto ack / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of polling consumer operations that yield no result.\n\nAny value above zero means that RabbitMQ resources are wasted by polling consumers.\n\nCompare this metric to the other polling consumer metrics to see the inefficiency rate.\n\nThe use of polling consumers is highly inefficient and therefore strongly discouraged.\n\n* [Fetching individual messages](https://www.rabbitmq.com/consumers.html#fetching)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 57
+ },
+ "id": 25,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": false,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_get_empty_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Polling operations that yield no result / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of messages delivered to polling consumers that use manual acknowledgement mode.\n\nThe use of polling consumers is highly inefficient and therefore strongly discouraged.\n\n* [Fetching individual messages](https://www.rabbitmq.com/consumers.html#fetching)\n* [Consumers](https://www.rabbitmq.com/consumers.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 57
+ },
+ "id": 23,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/rabbit/",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channel_get_ack_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 0,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Polling operations with manual ack / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 62
+ },
+ "id": 53,
+ "panels": [],
+ "title": "QUEUES",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Total number of queue masters per node. \n\nThis metric makes it easy to see sub-optimal queue distribution in a cluster.\n\n* [Queue Masters, Data Locality](https://www.rabbitmq.com/ha.html#master-migration-data-locality)\n* [Queues](https://www.rabbitmq.com/queues.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 63
+ },
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rabbitmq_queues * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Total queues",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": -1,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of queue declarations performed by clients.\n\nLow sustained values above zero are to be expected. High rates may be indicative of queue churn or high rates of connection recovery. Confirm connection recovery rates by using the _Connections opened_ metric.\n\n* [Queues](https://www.rabbitmq.com/queues.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 12,
+ "y": 63
+ },
+ "id": 58,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_queues_declared_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Queues declared / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of new queues created (as opposed to redeclarations).\n\nLow sustained values above zero are to be expected. High rates may be indicative of queue churn or high rates of connection recovery. Confirm connection recovery rates by using the _Connections opened_ metric.\n\n* [Queues](https://www.rabbitmq.com/queues.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 16,
+ "y": 63
+ },
+ "id": 60,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_queues_created_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Queues created / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of queues deleted.\n\nLow sustained values above zero are to be expected. High rates may be indicative of queue churn or high rates of connection recovery. Confirm connection recovery rates by using the _Connections opened_ metric.\n\n* [Queues](https://www.rabbitmq.com/queues.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 20,
+ "y": 63
+ },
+ "id": 59,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_queues_deleted_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Queues deleted / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 68
+ },
+ "id": 51,
+ "panels": [],
+ "title": "CHANNELS",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Total number of channels on all currently opened connections.\n\nIf this metric grows monotonically it is highly likely a channel leak in one of the applications. Confirm channel leaks by using the _Channels opened_ and _Channels closed_ metrics.\n\n* [Channel Leak](https://www.rabbitmq.com/channels.html#channel-leaks)\n* [Channels](https://www.rabbitmq.com/channels.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 69
+ },
+ "id": 54,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rabbitmq_channels * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Total channels",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": -1,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of new channels opened by applications across all connections. Channels are expected to be long-lived.\n\nLow sustained values above zero are to be expected. High rates may be indicative of channel churn or mass connection recovery. Confirm connection recovery rates by using the _Connections opened_ metric.\n\n* [High Channel Churn](https://www.rabbitmq.com/channels.html#high-channel-churn)\n* [Channels](https://www.rabbitmq.com/channels.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 12,
+ "y": 69
+ },
+ "id": 55,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channels_opened_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Channels opened / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of channels closed by applications across all connections. Channels are expected to be long-lived.\n\nLow sustained values above zero are to be expected. High rates may be indicative of channel churn or mass connection recovery. Confirm connection recovery rates by using the _Connections opened_ metric.\n\n* [High Channel Churn](https://www.rabbitmq.com/channels.html#high-channel-churn)\n* [Channels](https://www.rabbitmq.com/channels.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 18,
+ "y": 69
+ },
+ "id": 56,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_channels_closed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Channels closed / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 74
+ },
+ "id": 46,
+ "panels": [],
+ "title": "CONNECTIONS",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Total number of client connections.\n\nIf this metric grows monotonically it is highly likely a connection leak in one of the applications. Confirm connection leaks by using the _Connections opened_ and _Connections closed_ metrics.\n\n* [Connection Leak](https://www.rabbitmq.com/connections.html#monitoring)\n* [Connections](https://www.rabbitmq.com/connections.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 75
+ },
+ "id": 47,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rabbitmq_connections * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Total connections",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": -1,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of new connections opened by clients. Connections are expected to be long-lived.\n\nLow sustained values above zero are to be expected. High rates may be indicative of connection churn or mass connection recovery.\n\n* [Connection Leak](https://www.rabbitmq.com/connections.html#monitoring)\n* [Connections](https://www.rabbitmq.com/connections.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 12,
+ "y": 75
+ },
+ "id": 48,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_connections_opened_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Connections opened / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "The rate of connections closed. Connections are expected to be long-lived.\n\nLow sustained values above zero are to be expected. High rates may be indicative of connection churn or mass connection recovery.\n\n* [Connections](https://www.rabbitmq.com/connections.html)",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 10,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 18,
+ "y": 75
+ },
+ "id": 49,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_connections_closed_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Connections closed / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "rabbitmq-prometheus"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": null,
+ "definition": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "RabbitMQ Cluster",
+ "multi": false,
+ "name": "rabbitmq_cluster",
+ "options": [],
+ "query": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "RabbitMQ-Overview",
+ "uid": "Kn5xm-gZk",
+ "version": 7
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-PerfTest.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-PerfTest.json
new file mode 100644
index 0000000000..374616a3d9
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-PerfTest.json
@@ -0,0 +1,1739 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "table",
+ "id": "table",
+ "name": "Table",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "iteration": 1570184644782,
+ "links": [],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": null,
+ "description": "RabbitMQ message latency & throughput across all PerfTest instances",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 22,
+ "panels": [],
+ "title": "LATENCY",
+ "type": "row"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 1
+ },
+ "id": 33,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "s"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "End-to-end message latency",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 1
+ },
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\",instance=~\"$instance\"}",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "End-to-end message latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "rgb(255, 255, 255)",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateSpectral",
+ "exponent": 0.4,
+ "max": null,
+ "min": null,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": null,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 1
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 16,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {},
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\",instance=~\"$instance\"} > 0",
+ "format": "heatmap",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "title": "End-to-end message latency distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "transparent": true,
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 1
+ },
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_latency_seconds{quantile=\"$percentile\",instance=~\"$instance\"} > 0",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "End-to-end message latency distribution",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": 20,
+ "mode": "histogram",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 8
+ },
+ "id": 34,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "s"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_confirm_latency_seconds{quantile=\"$percentile\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Publish confirm latency",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 8
+ },
+ "id": 37,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_confirm_latency_seconds{quantile=\"$percentile\",instance=~\"$instance\"}",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Publish confirm latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "rgb(255, 255, 255)",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateSpectral",
+ "exponent": 0.4,
+ "max": null,
+ "min": null,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": null,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 8
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 38,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {},
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "perftest_confirm_latency_seconds{quantile=\"$percentile\",instance=~\"$instance\"} > 0",
+ "format": "heatmap",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "title": "Publish confirm latency distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "transparent": true,
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 8
+ },
+ "id": 39,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_confirm_latency_seconds{quantile=\"$percentile\",instance=~\"$instance\"} > 0",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Publish confirm latency distribution",
+ "tooltip": {
+ "shared": false,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": 20,
+ "mode": "histogram",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "id": 24,
+ "panels": [],
+ "title": "THROUGHPUT",
+ "type": "row"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 16
+ },
+ "id": 27,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_published",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages published / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 16
+ },
+ "id": 19,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_published{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages published / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 16
+ },
+ "id": 20,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_consumed{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages consumed / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 16
+ },
+ "id": 28,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_consumed",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages consumed / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 23
+ },
+ "id": 30,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_confirmed",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages confirmed / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 23
+ },
+ "id": 29,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_confirmed{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages confirmed / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 23
+ },
+ "id": 40,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_nacked{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages rejected / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 23
+ },
+ "id": 41,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_nacked",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages rejected / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 30
+ },
+ "id": 32,
+ "options": {},
+ "pageSize": 5,
+ "pluginVersion": "6.4.1",
+ "showHeader": true,
+ "sort": {
+ "col": 4,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Instance",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "perftest_returned",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Messages returned / s",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 30
+ },
+ "id": 31,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": false,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "perftest_returned{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages returned / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "transparent": true,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 20,
+ "style": "dark",
+ "tags": [
+ "rabbitmq-perf-test"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": null,
+ "definition": "label_values(perftest_published, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "PerfTest Instance",
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": "label_values(perftest_published, instance)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "0.99",
+ "value": "0.99"
+ },
+ "datasource": null,
+ "definition": "label_values(perftest_latency_seconds, quantile)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Percentile",
+ "multi": false,
+ "name": "percentile",
+ "options": [],
+ "query": "label_values(perftest_latency_seconds, quantile)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 4,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "RabbitMQ-PerfTest",
+ "uid": "pK9UatSiz",
+ "version": 1
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json
new file mode 100644
index 0000000000..08413a7f73
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/RabbitMQ-Quorum-Queues-Raft.json
@@ -0,0 +1,755 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Raft state for all Quorum Queues running in a RabbitMQ cluster",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "id": null,
+ "iteration": 1581011566961,
+ "links": [
+ {
+ "icon": "doc",
+ "tags": [],
+ "targetBlank": true,
+ "title": "Quorum Queues Documentation",
+ "tooltip": "",
+ "type": "link",
+ "url": "https://www.rabbitmq.com/quorum-queues.html"
+ }
+ ],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "##### Rate of Raft log operations committed\n\nThis includes all queue operations, including publishes & consumer acknowledgements.\n\nThis tracks the progress of the Raft commit index on all members, including followers.\n\nIf a RabbitMQ node does not run a Raft member, it will not report any entries committed.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 64,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_raft_log_commit_index[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Log entries committed / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cacheTimeout": null,
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "rgb(255, 255, 255)",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateCool",
+ "exponent": 0.4,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": null,
+ "description": "##### Time for a log entry to be committed\n\nThis is an indicator of Raft operational overhead. Values will increase with increased load as the system trades latency for throughput.\n\nThis metric samples the time it takes for a log entry to be written to a Raft log and that entry being committed.\n\nBecause quorum queues fsync all operations to disk before committing them, they are not suitable for low-latency workloads.",
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 65,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {},
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "rabbitmq_raft_entry_commit_latency_seconds * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Log entry commit latency",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "lower",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "##### Pending Raft log entries\n\nTracks the number of Raft log entries that have been written but not yet committed.\n\nHigh & growing values may be indicative of a quorum of members not being available so that a queue can make progress.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "hiddenSeries": false,
+ "id": 62,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "total",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(\n (rabbitmq_raft_log_last_written_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) -\n (rabbitmq_raft_log_commit_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})\n) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Uncommitted log entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "##### Rate of Raft leader elections\n\nTracks the increments of the Raft term.\n\nSustained non-zero rates are indicative of network and/or availability issues, or queue churn. The other reason may be quorum queue declarations.\n\nValues above 0 are normal, some leader elections are expected. Sustained high values may be of concern.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "hiddenSeries": false,
+ "id": 63,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "total",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(rabbitmq_raft_term_total[60s]) * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) by(rabbitmq_node)",
+ "format": "time_series",
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{rabbitmq_node}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 3,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Leader elections / s",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "cacheTimeout": null,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "##### Number of entries in the Raft log\n\nTracks the number of Raft log entries since the last snapshot.\n\nLarge values can either be indicative of large quorum queue backlogs or availability problems. If the uncommitted entries metric is large as well, there is a genuine availability problem in the system.",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "hiddenSeries": false,
+ "id": 18,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/^rabbit@[\\w.-]+0/",
+ "color": "#56A64B"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+1/",
+ "color": "#F2CC0C"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+2/",
+ "color": "#3274D9"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+3/",
+ "color": "#A352CC"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+4/",
+ "color": "#FF780A"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+5/",
+ "color": "#96D98D"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+6/",
+ "color": "#FFEE52"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+7/",
+ "color": "#8AB8FF"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+8/",
+ "color": "#CA95E5"
+ },
+ {
+ "alias": "/^rabbit@[\\w.-]+9/",
+ "color": "#FFB357"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(\n (rabbitmq_raft_log_last_written_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"}) - \n (rabbitmq_raft_log_snapshot_index * on(instance) group_left(rabbitmq_cluster, rabbitmq_node) rabbitmq_identity_info{rabbitmq_cluster=\"$rabbitmq_cluster\"})\n) by(queue, rabbitmq_node) > 5000",
+ "hide": false,
+ "legendFormat": "{{rabbitmq_node}} {{queue}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Raft members with >5k entries in the log",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 21,
+ "style": "dark",
+ "tags": [
+ "rabbitmq-prometheus"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "text": "",
+ "value": ""
+ },
+ "datasource": null,
+ "definition": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "RabbitMQ Cluster",
+ "multi": false,
+ "name": "rabbitmq_cluster",
+ "options": [],
+ "query": "label_values(rabbitmq_identity_info, rabbitmq_cluster)",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "RabbitMQ-Quorum-Queues-Raft",
+ "uid": "f1Mee9nZz",
+ "version": 1
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/dashboards/rabbitmq-exporter_vs_rabbitmq-prometheus.json b/deps/rabbitmq_prometheus/docker/grafana/dashboards/rabbitmq-exporter_vs_rabbitmq-prometheus.json
new file mode 100644
index 0000000000..1d84e11968
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/dashboards/rabbitmq-exporter_vs_rabbitmq-prometheus.json
@@ -0,0 +1,375 @@
+{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "rabbitmq-exporter vs rabbitmq-prometheus",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "links": [],
+ "panels": [
+ {
+ "columns": [
+ {
+ "text": "Min",
+ "value": "min"
+ },
+ {
+ "text": "Max",
+ "value": "max"
+ },
+ {
+ "text": "Avg",
+ "value": "avg"
+ },
+ {
+ "text": "Current",
+ "value": "current"
+ }
+ ],
+ "datasource": null,
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 10,
+ "w": 8,
+ "x": 0,
+ "y": 0
+ },
+ "id": 5,
+ "options": {},
+ "pageSize": null,
+ "pluginVersion": "6.4.1",
+ "scroll": true,
+ "showHeader": true,
+ "sort": {
+ "col": 3,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 0,
+ "mappingType": 1,
+ "pattern": "Metric",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 1,
+ "mappingType": 1,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "s"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "scrape_duration_seconds{job=\"rabbitmq-exporter\"}",
+ "legendFormat": "{{job}}",
+ "refId": "A"
+ },
+ {
+ "expr": "scrape_duration_seconds{job=\"rabbitmq-server\", instance=~\".*dist-tls.*\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "B"
+ },
+ {
+ "expr": "scrape_duration_seconds{job=\"rabbitmq-prometheus\"}",
+ "legendFormat": "{{deployment}} {{instance}}",
+ "refId": "C"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Prometheus target scrape duration",
+ "transform": "timeseries_aggregations",
+ "type": "table"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 16,
+ "x": 8,
+ "y": 0
+ },
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "scrape_duration_seconds{job=\"rabbitmq-exporter\"}",
+ "legendFormat": "{{job}}",
+ "refId": "A"
+ },
+ {
+ "expr": "scrape_duration_seconds{job=\"rabbitmq-server\", instance=~\".*dist-tls.*\"}",
+ "legendFormat": "{{instance}}",
+ "refId": "B"
+ },
+ {
+ "expr": "scrape_duration_seconds{job=\"rabbitmq-prometheus\"}",
+ "legendFormat": "{{deployment}} {{instance}}",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 10,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 59,
+ "yaxis": "left"
+ }
+ ],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Prometheus target scrape duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 10
+ },
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "http_request_duration_microseconds{quantile=\"0.99\", instance=\"rabbitmq-exporter:9090\"}",
+ "format": "heatmap",
+ "legendFormat": "{{quantile}}th",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "rabbitmq-exporter - RabbitMQ HTTP API request duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "15s",
+ "schemaVersion": 20,
+ "style": "dark",
+ "tags": [
+ "rabbitmq-exporter",
+ "rabbitmq-prometheus"
+ ],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "15s",
+ "30s",
+ "1m",
+ "5m",
+ "10m"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "rabbitmq-exporter_vs_rabbitmq-prometheus",
+ "uid": "hNmaJ2AZk",
+ "version": 1
+}
diff --git a/deps/rabbitmq_prometheus/docker/grafana/datasources.yml b/deps/rabbitmq_prometheus/docker/grafana/datasources.yml
new file mode 100644
index 0000000000..916e507146
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/datasources.yml
@@ -0,0 +1,44 @@
+apiVersion: 1
+
+datasources:
+ # <string, required> name of the datasource. Required
+ - name: prometheus
+ # <string, required> datasource type. Required
+ type: prometheus
+ # <string, required> access mode. direct or proxy. Required
+ access: proxy
+ # <int> org id. will default to orgId 1 if not specified
+ orgId: 1
+ # <string> url
+ url: http://prometheus:9090
+ # <string> database password, if used
+ # password:
+ # <string> database user, if used
+ # user:
+ # <string> database name, if used
+ # database:
+ # <bool> enable/disable basic auth
+ # basicAuth:
+ # <string> basic auth username
+ # basicAuthUser:
+ # <string> basic auth password
+ # basicAuthPassword:
+ # <bool> enable/disable with credentials headers
+ # withCredentials:
+ # <bool> mark as default datasource. Max one per org
+ isDefault: true
+ # <map> fields that will be converted to json and stored in json_data
+ # jsonData:
+ # graphiteVersion: "1.1"
+ # tlsAuth: true
+ # tlsAuthWithCACert: true
+ # httpHeaderName1: "Authorization"
+ # <string> json object of data that will be encrypted.
+ # secureJsonData:
+ # tlsCACert: "..."
+ # tlsClientCert: "..."
+ # tlsClientKey: "..."
+ # httpHeaderValue1: "Bearer xf5yhfkpsnmgo"
+ version: 1
+ # <bool> allow users to edit datasources from the UI.
+ editable: false
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-11352.md b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-11352.md
new file mode 100644
index 0000000000..814f5715ff
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-11352.md
@@ -0,0 +1,48 @@
+# Erlang-Distribution
+
+Erlang Distribution links, inet socket, port driver, dist process + tls_connection & tls_sender
+
+## Categories
+
+* RabbitMQ
+
+## README
+
+Understand the behaviour of Erlang clustering via Erlang Distribution links, inet socket, port driver & dist process.
+
+If the Erlang Distribution is using TLS, the state of tls_connection & tls_sender processes will be shown as well.
+
+Metrics displayed:
+
+* Distribution link
+ * State: established / connecting / waiting
+ * Data buffered
+ * Data sent to peer node / s
+ * Data received from peer node / s
+ * Messages sent to peer node / s
+ * Messages received from peer node / s
+ * Average inet packet size sent to peer node
+ * Average inet packet size received from peer node
+
+* Port driver
+ * Memory used
+ * Data buffered
+
+* Dist process
+ * State: waiting / running / garbage_collecting / runnable / suspended / exiting
+ * Queued messages
+ * Memory used
+ * Process reductions / s
+
+The last set of metrics are repeated for the `tls_connection` and `tls_sender` processes if the Erlang Distribution is using TLS.
+
+Filter by:
+
+* RabbitMQ Cluster
+* Process type
+
+Depends on `rabbitmq-prometheus` plugin, built-in since [RabbitMQ v3.8.0](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.8.0)
+
+Learn more about [RabbitMQ built-in Prometheus support](https://www.rabbitmq.com/prometheus.html)
+
+To get it working locally with RabbitMQ in 3 simple steps, follow this [Quick Start guide](https://www.rabbitmq.com/prometheus.html#quick-start)
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04-original.png
new file mode 100644
index 0000000000..4261cca134
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04.jpg
new file mode 100644
index 0000000000..6556393ede
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-2019-12-04.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04-original.png
new file mode 100644
index 0000000000..db2ae6a593
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04.jpg
new file mode 100644
index 0000000000..c73271ed94
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-info-2019-12-04.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-logo-2019-12-04.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-logo-2019-12-04.png
new file mode 100644
index 0000000000..040fbfe525
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-logo-2019-12-04.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04-original.png
new file mode 100644
index 0000000000..53a7bd6f7d
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04.jpg
new file mode 100644
index 0000000000..1cf8a1d7b4
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distribution-tls-sender-2019-12-04.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-10988.md b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-10988.md
new file mode 100644
index 0000000000..07d01840e9
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-10988.md
@@ -0,0 +1,32 @@
+# Erlang-Distributions-Compare
+
+Erlang Distribution traffic, node network traffic and CPU + PerfTest message throughput and latency
+
+## Categories
+
+* RabbitMQ
+
+## README
+
+Compare the effects of running Erlang Distribution with different compression algorithms (deflate, lz4, zstd, etc.)
+
+Metrics displayed:
+
+* Erlang Distribution outgoing link traffic / s
+* Network incoming & outgoing traffic / s
+* CPU utilisation
+* Messages published & consumed / s
+* End-to-end message latency
+
+Filter by:
+
+* RabbitMQ Cluster
+* PerfTest Instance & message latency percentile
+* Host when using [node_exporter](https://github.com/prometheus/node_exporter)
+* Container when using [cadvisor](https://github.com/google/cadvisor)
+
+Depends on `rabbitmq-prometheus` plugin, built-in since [RabbitMQ v3.8.0](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.8.0)
+
+Learn more about [RabbitMQ built-in Prometheus support](https://www.rabbitmq.com/prometheus.html)
+
+To get it working locally with RabbitMQ in 3 simple steps, follow this [Quick Start guide](https://www.rabbitmq.com/prometheus.html#quick-start)
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-cluster-2019-10-14.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-cluster-2019-10-14.png
new file mode 100644
index 0000000000..8872896e47
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-cluster-2019-10-14.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-container-2019-10-14.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-container-2019-10-14.png
new file mode 100644
index 0000000000..dc2e19ad59
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-container-2019-10-14.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-instance-2019-10-14.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-instance-2019-10-14.png
new file mode 100644
index 0000000000..801c0524b8
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-instance-2019-10-14.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-logo-2019-10-14.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-logo-2019-10-14.png
new file mode 100644
index 0000000000..f64d2afcdf
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-logo-2019-10-14.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-overview-2019-10-14.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-overview-2019-10-14.png
new file mode 100644
index 0000000000..baa0482edc
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-distributions-compare-overview-2019-10-14.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-11350.md b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-11350.md
new file mode 100644
index 0000000000..ed083d9be3
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-11350.md
@@ -0,0 +1,65 @@
+# Erlang-Memory-Allocators
+
+Erlang VM memory utilisation from erts_alloc perspective
+
+## Categories
+
+* RabbitMQ
+
+## README
+
+Understand Erlang VM memory breakdown across all allocators & schedulers.
+
+Metrics displayed:
+
+* Resident Set Size - as captured by `rabbitmq_process_resident_memory_bytes`
+
+* Allocated
+ * Total
+ * Used
+ * Unused
+
+* Allocated by Allocator Type (Min / Max / Avg / Current)
+ * binary_alloc
+ * driver_alloc
+ * eheap_alloc
+ * ets_alloc
+ * exec_alloc
+ * fix_alloc
+ * literal_alloc
+ * ll_alloc
+ * sl_alloc
+ * std_alloc
+ * temp_alloc
+
+For each allocator type:
+
+* Multiblock
+ * Used
+ * Block
+ * Carrier
+ * Unused
+
+* Multiblock Pool
+ * Used
+ * Block
+ * Carrier
+ * Unused
+
+* Singleblock
+ * Used
+ * Block
+ * Carrier
+ * Unused
+
+Filter by:
+
+* RabbitMQ Cluster
+* RabbitMQ Node
+* Erlang Memory Allocator (Multi-value + All)
+
+Depends on `rabbitmq-prometheus` plugin, built-in since [RabbitMQ v3.8.0](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.8.0)
+
+Learn more about [RabbitMQ built-in Prometheus support](https://www.rabbitmq.com/prometheus.html)
+
+To get it working locally with RabbitMQ in 3 simple steps, follow this [Quick Start guide](https://www.rabbitmq.com/prometheus.html#quick-start)
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03-original.png
new file mode 100644
index 0000000000..45cfd675bb
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03.jpg
new file mode 100644
index 0000000000..ed7529a332
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03-original.png
new file mode 100644
index 0000000000..1154893937
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03.jpg
new file mode 100644
index 0000000000..b25e63f1d2
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-binary-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03-original.png
new file mode 100644
index 0000000000..688353abf8
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03.jpg
new file mode 100644
index 0000000000..3144ef3852
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-info-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-logo-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-logo-2019-12-03.jpg
new file mode 100644
index 0000000000..c327c70559
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/erlang-memory-allocators-logo-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-logo.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-logo.png
new file mode 100644
index 0000000000..ca01bbc1f1
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-logo.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-10991.md b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-10991.md
new file mode 100644
index 0000000000..aec15a34ad
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-10991.md
@@ -0,0 +1,40 @@
+# RabbitMQ-Overview
+
+An alternative to RabbitMQ Management Overview
+
+## Categories
+
+* RabbitMQ
+
+## README
+
+Understand the state of any RabbitMQ cluster at a glance. Includes all metrics displayed on RabbitMQ Management Overview page.
+
+This dashboard includes detailed explanation for all metrics displayed, with links to relevant official docs and guides.
+
+All metrics are node-specific making it trivial to visualise cluster imbalances (a.k.a. cluster hotspots).
+
+Some graph panels include sensible default thresholds.
+
+Metrics displayed:
+
+* Node identity, including RabbitMQ & Erlang/OTP version
+* Node memory & disk available before publishers blocked (alarm triggers)
+* Node file descriptors & TCP sockets available
+* Ready & pending messages
+* Incoming message rates: published / routed to queues / confirmed / unconfirmed / returned / dropped
+* Outgoing message rated: delivered with auto or manual acks / acknowledged / redelivered
+* Polling operation with auto or manual acks, as well as empty ops
+* Queues, including declaration & deletion rates
+* Channels, including open & close rates
+* Connections, including open & close rates
+
+Filter by:
+
+* RabbitMQ Cluster
+
+Requires `rabbitmq-prometheus` to be enabled, a built-in plugin since [RabbitMQ v3.8.0](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.8.0)
+
+Learn more about [RabbitMQ built-in Prometheus support](https://www.rabbitmq.com/prometheus.html)
+
+To get it working locally with RabbitMQ in 3 simple steps, follow this [Quick Start guide](https://www.rabbitmq.com/prometheus.html#quick-start)
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-2019-10-21.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-2019-10-21.png
new file mode 100644
index 0000000000..0533d0fe5e
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-2019-10-21.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-collapsed-2019-10-21.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-collapsed-2019-10-21.png
new file mode 100644
index 0000000000..4f7c19d640
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-collapsed-2019-10-21.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-info-2019-10-21.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-info-2019-10-21.png
new file mode 100644
index 0000000000..554e115897
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-info-2019-10-21.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21-original.png
new file mode 100644
index 0000000000..71ffd1d25b
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21.png
new file mode 100644
index 0000000000..cfb452ceb1
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-overview-logo-2019-10-21.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-instance-2019-10-03.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-instance-2019-10-03.png
new file mode 100644
index 0000000000..314b28d92d
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-instance-2019-10-03.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04-original.png
new file mode 100644
index 0000000000..2765ae5a4c
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04.png
new file mode 100644
index 0000000000..7801409dc9
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-logo-2019-10-04.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-overview-2019-10-03.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-overview-2019-10-03.png
new file mode 100644
index 0000000000..369035c610
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-overview-2019-10-03.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-percentile-2019-10-03.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-percentile-2019-10-03.png
new file mode 100644
index 0000000000..a56d0e96e2
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perf-test-percentile-2019-10-03.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perftest-6566.md b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perftest-6566.md
new file mode 100644
index 0000000000..21b12ef64c
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-perftest-6566.md
@@ -0,0 +1,23 @@
+# RabbitMQ-PerfTest
+
+RabbitMQ message latency & throughput across all PerfTest instances
+
+## Categories
+
+* RabbitMQ
+
+## README
+
+[PerfTest](https://github.com/rabbitmq/rabbitmq-perf-test) is a RabbitMQ performance testing tool. Verified to work with PerfTest v2.2.0 - v2.9.0.
+
+Visualise the following metrics from all PerfTest instances:
+
+* end-to-end message latency
+* publish confirm latency
+* messages published / s
+* messages confirmed / s
+* messages returned / s
+* messages consumed / s
+* messages rejected / s
+
+To get it working locally with RabbitMQ in 3 simple steps, follow this [Quick Start guide](https://www.rabbitmq.com/prometheus.html#quick-start)
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-11340.md b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-11340.md
new file mode 100644
index 0000000000..8dac14364a
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-11340.md
@@ -0,0 +1,29 @@
+# RabbitMQ-Quorum-Queues-Raft
+
+Raft state for all Quorum Queues running in a RabbitMQ cluster
+
+## Categories
+
+* RabbitMQ
+
+## README
+
+Helps understand the state of all Raft members running the Quorum Queues in a RabbitMQ 3.8.x cluster.
+
+Metrics displayed:
+
+* Log entries committed / s
+* Log entry commit latency
+* Uncommitted log entries
+* Leader elections / s
+* Raft members with >5k entries in the log
+
+Filter by:
+
+* RabbitMQ Cluster
+
+Depends on `rabbitmq-prometheus` plugin, built-in since [RabbitMQ v3.8.0](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.8.0)
+
+Learn more about [RabbitMQ built-in Prometheus support](https://www.rabbitmq.com/prometheus.html)
+
+To get it working locally with RabbitMQ in 3 simple steps, follow this [Quick Start guide](https://www.rabbitmq.com/prometheus.html#quick-start)
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03-original.png
new file mode 100644
index 0000000000..22e194bc7e
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03.jpg
new file mode 100644
index 0000000000..b47ad2b218
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-03.jpg
new file mode 100644
index 0000000000..a4d3ec5344
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-030-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-030-original.png
new file mode 100644
index 0000000000..04c83c2f58
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-info-2019-12-030-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-logo-2019-12-03.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-logo-2019-12-03.png
new file mode 100644
index 0000000000..bb2acac002
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-logo-2019-12-03.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03-original.png b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03-original.png
new file mode 100644
index 0000000000..b140387839
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03-original.png
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03.jpg b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03.jpg
new file mode 100644
index 0000000000..61cf8845e4
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/grafana/publish/rabbitmq-quorum-queues-raft-node-2019-12-03.jpg
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/prometheus.yml b/deps/rabbitmq_prometheus/docker/prometheus.yml
new file mode 100644
index 0000000000..cec45c7d04
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/prometheus.yml
@@ -0,0 +1,71 @@
+# https://prometheus.io/docs/prometheus/latest/configuration/configuration/
+global:
+ # This is higher than RabbitMQ's collect_statistics_interval,
+ # but still close enough to capture metrics that were refreshed within this interval
+ # This value determines the range that we use with rate():
+ # https://www.robustperception.io/what-range-should-i-use-with-rate
+ scrape_interval: 15s # Default is every 1 minute.
+ # scrape_timeout: 10s # Default is 10 seconds.
+ # evaluation_interval: 60s # Default is every 1 minute.
+
+# Alertmanager configuration
+alerting:
+ alertmanagers:
+ - static_configs:
+ - targets:
+ # - 'alertmanager:9093'
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first_rules.yml"
+ # - "second_rules.yml"
+
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+ static_configs:
+ - targets: ['localhost:9090']
+ - job_name: 'docker'
+ static_configs:
+ - targets: ['docker.for.mac.localhost:9323']
+ - job_name: 'node-exporter'
+ static_configs:
+ - targets: ['node-exporter:9100']
+ - job_name: 'cadvisor'
+ static_configs:
+ - targets: ['cadvisor:8080']
+ - job_name: 'rabbitmq-server'
+ static_configs:
+ - targets:
+ - 'rmq0:15692'
+ - 'rmq1:15692'
+ - 'rmq2:15692'
+ - 'rmq0-dist-tls:15692'
+ - 'rmq1-dist-tls:15692'
+ - 'rmq2-dist-tls:15692'
+ - 'rmq0-qq:15692'
+ - 'rmq1-qq:15692'
+ - 'rmq2-qq:15692'
+ - job_name: 'rabbitmq-perf-test'
+ static_configs:
+ - targets:
+ # docker-compose-overview.yml
+ - 'basic-get:8080'
+ - 'basic-get-auto:8080'
+ - 'greedy-consumer:8080'
+ - 'publisher-confirms:8080'
+ - 'slow-consumer-persistent:8080'
+ - 'nack:8080'
+ - 'unroutable-return:8080'
+ - 'unroutable-drop:8080'
+ # docker-compose-dist-tls.yml
+ - 'stress-dist-tls:8080'
+ # docker-compose-qq.yml
+ - 'qq-moderate-load:8080'
+ - job_name: 'rabbitmq-exporter'
+ scrape_interval: 60s
+ scrape_timeout: 59s
+ static_configs:
+ - targets:
+ # docker-compose-dist-tls.yml
+ - 'rabbitmq-exporter:9090'
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls-definitions.json b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls-definitions.json
new file mode 100644
index 0000000000..cda3418f4b
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls-definitions.json
@@ -0,0 +1,49 @@
+{
+ "global_parameters": [
+ {"name": "cluster_name", "value": "rabbitmq-dist-tls"}
+ ],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "policies": [
+ {
+ "apply-to": "queues",
+ "definition": {"ha-mode": "exactly", "ha-params": 1},
+ "name": "ha1",
+ "pattern": "ha1.*",
+ "priority": 0,
+ "vhost": "/"
+ },
+ {
+ "apply-to": "queues",
+ "definition": {"ha-mode": "exactly", "ha-params": 2},
+ "name": "ha2",
+ "pattern": "ha2.*",
+ "priority": 0,
+ "vhost": "/"
+ },
+ {
+ "apply-to": "queues",
+ "definition": {"ha-mode": "exactly", "ha-params": 3},
+ "name": "ha3",
+ "pattern": "ha3.*",
+ "priority": 0,
+ "vhost": "/"
+ }
+ ],
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "hENva+fxJ7gnmaBK/WhwNHOYbvB53/QjNcqhtF4KqF7p21+x",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [{"name": "/"}]
+}
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf
new file mode 100644
index 0000000000..ecc7de7633
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf
@@ -0,0 +1,25 @@
+# https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq.conf.example
+loopback_users.guest = false
+listeners.tcp.default = 5672
+management.listener.port = 15672
+management.listener.ssl = false
+
+vm_memory_high_watermark.absolute = 4GiB
+vm_memory_high_watermark_paging_ratio = 0.9
+disk_free_limit.absolute = 2048MiB
+
+cluster_name = rabbitmq-dist-tls
+
+cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+cluster_formation.classic_config.nodes.1 = rabbit@rmq0-dist-tls
+cluster_formation.classic_config.nodes.2 = rabbit@rmq1-dist-tls
+cluster_formation.classic_config.nodes.3 = rabbit@rmq2-dist-tls
+
+management.load_definitions = /etc/rabbitmq/rabbitmq-definitions.json
+
+# background_gc_enabled = true
+
+# Increase the 5s default so that we are below Prometheus' scrape interval,
+# but still refresh in time for Prometheus scrape
+# This is linked to Prometheus scrape interval & range used with rate()
+collect_statistics_interval = 10000
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-env.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-env.conf
new file mode 100644
index 0000000000..d21503cf15
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-env.conf
@@ -0,0 +1,4 @@
+export INET_TLS="-proto_dist inet_tls -ssl_dist_optfile /etc/rabbitmq/ssl_dist.config"
+export INET_DIST_BUFFER="-kernel inet_dist_connect_options [{buffer,1048576}] -kernel inet_dist_listen_options [{buffer,1048576}]"
+export SERVER_ADDITIONAL_ERL_ARGS="$INET_TLS $INET_DIST_BUFFER"
+export CTL_ERL_ARGS="$INET_TLS $INET_DIST_BUFFER"
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-overview-definitions.json b/deps/rabbitmq_prometheus/docker/rabbitmq-overview-definitions.json
new file mode 100644
index 0000000000..0d58b235be
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-overview-definitions.json
@@ -0,0 +1,49 @@
+{
+ "global_parameters": [
+ {"name": "cluster_name", "value": "rabbitmq-overview"}
+ ],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "policies": [
+ {
+ "apply-to": "queues",
+ "definition": {"ha-mode": "exactly", "ha-params": 1},
+ "name": "ha1",
+ "pattern": "ha1.*",
+ "priority": 0,
+ "vhost": "/"
+ },
+ {
+ "apply-to": "queues",
+ "definition": {"ha-mode": "exactly", "ha-params": 2},
+ "name": "ha2",
+ "pattern": "ha2.*",
+ "priority": 0,
+ "vhost": "/"
+ },
+ {
+ "apply-to": "queues",
+ "definition": {"ha-mode": "exactly", "ha-params": 3},
+ "name": "ha3",
+ "pattern": "ha3.*",
+ "priority": 0,
+ "vhost": "/"
+ }
+ ],
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "hENva+fxJ7gnmaBK/WhwNHOYbvB53/QjNcqhtF4KqF7p21+x",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [{"name": "/"}]
+}
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf
new file mode 100644
index 0000000000..bc157213d3
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf
@@ -0,0 +1,32 @@
+# https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq.conf.example
+loopback_users.guest = false
+listeners.tcp.default = 5672
+management.listener.port = 15672
+management.listener.ssl = false
+
+vm_memory_high_watermark.absolute = 768MiB
+vm_memory_high_watermark_paging_ratio = 0.2
+
+cluster_name = rabbitmq-overview
+
+cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+cluster_formation.classic_config.nodes.1 = rabbit@rmq0
+cluster_formation.classic_config.nodes.2 = rabbit@rmq1
+cluster_formation.classic_config.nodes.3 = rabbit@rmq2
+
+management.load_definitions = /etc/rabbitmq/rabbitmq-definitions.json
+
+# background_gc_enabled = true
+
+# Increase the 5s default so that we are below Prometheus' scrape interval,
+# but still refresh in time for Prometheus scrape
+# This is linked to Prometheus scrape interval & range used with rate()
+collect_statistics_interval = 10000
+
+# Run RabbitMQ Management in Management-only mode, no stats
+# https://github.com/rabbitmq/rabbitmq-management/pull/707
+# management.disable_stats = true
+
+# Return per-object metrics (unaggregated)
+# https://github.com/rabbitmq/rabbitmq-prometheus/pull/28
+# prometheus.return_per_object_metrics = true
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-qq-definitions.json b/deps/rabbitmq_prometheus/docker/rabbitmq-qq-definitions.json
new file mode 100644
index 0000000000..13347ed36e
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-qq-definitions.json
@@ -0,0 +1,23 @@
+{
+ "global_parameters": [
+ {"name": "cluster_name", "value": "rabbitmq-qq"}
+ ],
+ "permissions": [
+ {
+ "configure": ".*",
+ "read": ".*",
+ "user": "guest",
+ "vhost": "/",
+ "write": ".*"
+ }
+ ],
+ "users": [
+ {
+ "hashing_algorithm": "rabbit_password_hashing_sha256",
+ "name": "guest",
+ "password_hash": "hENva+fxJ7gnmaBK/WhwNHOYbvB53/QjNcqhtF4KqF7p21+x",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [{"name": "/"}]
+}
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-qq-env.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-qq-env.conf
new file mode 100644
index 0000000000..2a641d2c6c
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-qq-env.conf
@@ -0,0 +1,2 @@
+export RA="-ra wal_max_size_bytes 536870912"
+export SERVER_START_ARGS="$RA"
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-qq.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-qq.conf
new file mode 100644
index 0000000000..ac50706dab
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-qq.conf
@@ -0,0 +1,32 @@
+# https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq.conf.example
+loopback_users.guest = false
+listeners.tcp.default = 5672
+management.listener.port = 15672
+management.listener.ssl = false
+
+# Raft WAL defaults to 512MB
+# We want the node to have more memory available than 512MB, ideally 3x
+vm_memory_high_watermark.absolute = 1536MB
+
+cluster_name = rabbitmq-qq
+
+cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+cluster_formation.classic_config.nodes.1 = rabbit@rmq0-qq
+cluster_formation.classic_config.nodes.2 = rabbit@rmq1-qq
+cluster_formation.classic_config.nodes.3 = rabbit@rmq2-qq
+
+management.load_definitions = /etc/rabbitmq/rabbitmq-definitions.json
+
+# background_gc_enabled = true
+
+# Increase the 5s default so that we are below Prometheus' scrape interval,
+# but still refresh in time for Prometheus scrape
+# This is linked to Prometheus scrape interval & range used with rate()
+collect_statistics_interval = 10000
+
+# Enable debugging
+# log.console.level = debug
+
+# Return per-object metrics (unaggregated)
+# https://github.com/rabbitmq/rabbitmq-prometheus/pull/28
+prometheus.return_per_object_metrics = true
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_certificate.pem b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_certificate.pem
new file mode 100644
index 0000000000..717d4c6b6b
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRTCCAi2gAwIBAgIJAJ6tbccLOBogMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV
+BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE5
+MDUyODExMzk1NFoXDTI5MDUyNTExMzk1NFowMTEgMB4GA1UEAwwXVExTR2VuU2Vs
+ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDdG6L5G8lamSkbVB5fvwX+2HA5JOylrk7AxUnWRJJqtg3q
+5QghMmVeBNYuO9h9spfZ4kMmfje4YyDHjgBzfymiu1nDWgQSJnWWCRV+RnT1OHsr
+Qm+8Y4pMASb4dSobU9q7aXrwuVaSbuNFqyALay+A1ZY/RmxEF9PsmS2e08OPnGxt
+lZc/h69udrx5E17WMN8KK3A8A6zAKDp5xVDOBURHJoujQgLrSQLZHrSr/GhWTT6Y
+/aO7wJz5h9AK3O31COtbSoEVpv6z1rzTJe7TVmXmGJE40+SAP9/k0vu7TnLgBAVJ
+38TlpTXqkxwSqdcqQOgRYl6MWS3JRVJCr3uVLqxpAgMBAAGjYDBeMAsGA1UdDwQE
+AwIBBjAdBgNVHQ4EFgQU5uLt6Ju+zd4VLtzZIm7H92x7SN8wHwYDVR0jBBgwFoAU
+5uLt6Ju+zd4VLtzZIm7H92x7SN8wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0B
+AQsFAAOCAQEACvTdgRUZbkfzuCuhEgrCqxQtiyMTBJt1yPQ3s4j+T6ZpW/iz6D4r
+7TwqP46gx8OL9wmQpgPskl8y+dtsG8YyMsoM4jL2jlSK0q+zZ2d2EGZUCbArZee9
+m64wa2Z/ZRT59tRQFn7gEc0WcwvG9V4mPhuNwwSaYQcebDwJHybp8nmUu/0+JEVS
+cm6Yyzqmy5z95voobya15sV/IrtsOg4ufPUhjDoBOd6pvOkxSMBG6P/ZPE904HdY
+38m8KdsfyDeHWG5yGhtRdEnqJlaQ6a/MoR13MwaYhb3cevctTo+hYaBY1fM7MEEe
+FbuyVxWhwm2JdgI+e0ychay/LzNOxr5nrg==
+-----END CERTIFICATE-----
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_key.pem b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_key.pem
new file mode 100644
index 0000000000..7c0a77ae32
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/ca_key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdG6L5G8lamSkb
+VB5fvwX+2HA5JOylrk7AxUnWRJJqtg3q5QghMmVeBNYuO9h9spfZ4kMmfje4YyDH
+jgBzfymiu1nDWgQSJnWWCRV+RnT1OHsrQm+8Y4pMASb4dSobU9q7aXrwuVaSbuNF
+qyALay+A1ZY/RmxEF9PsmS2e08OPnGxtlZc/h69udrx5E17WMN8KK3A8A6zAKDp5
+xVDOBURHJoujQgLrSQLZHrSr/GhWTT6Y/aO7wJz5h9AK3O31COtbSoEVpv6z1rzT
+Je7TVmXmGJE40+SAP9/k0vu7TnLgBAVJ38TlpTXqkxwSqdcqQOgRYl6MWS3JRVJC
+r3uVLqxpAgMBAAECggEARciPKygF6L4mJyXpYDad4czVvnNU7QWSfWzG3QVPYLaM
+tfILGs8ZBw9nubfRn/pZyr6Kiv+S/nLqyZCbu9gKf/25WtpyJnhU7EF5sxzxlON6
+nDCfq0mPQGWsswXngDB6/GNSFd+o3xZ4TGix+Gif2X2PqfDEAsmybR37NAGJ2yzh
+BBs0mzvWqxXXVkaJCe8M2P6VCaqOIEl0FRb/tOcP1kguQYOMh2/dPDOWQZApEZXA
+OejQPveaywTc1ZmqOtWIIHHMPlNd+oayaUIsKGIz/D+KSsyjCwiV9sZ1BOye6bEd
+Uw4C1COgVAEVC+4qYD/QZ3vy6smAvLTGWKRX61YMAQKBgQD5TojClmTOxZ1ihTtG
+oD6PKmpKJz1fi3pt6X2FNW0RRefZfqx5m1DGFRuzEwREag65s79lTEUP3Y/D03Tz
+7dfMMMiRdXLBC4fqq0b8Dlyhrylp0wgbJlKe003frM4UseZVN2JsrpaDfhzsmOsG
+m6nFUx+Dimys8T3FvcB4B842SQKBgQDjC0tMr0UDhpV/OtBUmnHB/J/Bh5YI8zxs
+fSH2UlwKC8omad8UCw0e9jOXcMoQ/Va0n9JXfRlOzBGxr/lUt+8xA3jq0nVWrzsB
+rtHDEAxgYN9Ua4CpbGW91WnVd67oBzQHDb7E9H1jdv06KBGcciV1ubPAPW0HbPoY
+QnZPJzhFIQKBgQDxoVNdveQgoFnFJjpoXmGj4sk368ZJN2dG5jn5BOocBrQLEgJa
+rtVf1w+MxY6qPFzfUb7ckEqTcPupIebFkS1Jw7JxgPLi9akbIjxfig/vyVSLM7U0
+kzUaqNReSR9H4ZDL4S0tBx3sfivMiP2p3u9xx/nQRTbFe9+SKzJDY9VSuQKBgQCr
+f/yBEGOxJhgG4wY64e7L+WczaJeMay9ZQ6xDxDKB5CXunkpDeZWPramK20fHTZYR
+GihorbZQLxWugp5zv/yNqXHM7uxzbinEO6l3DshKhOItgobNc3Pm0S4AcUdNPSOv
+sFJJs+SDHX5qT5n98rsZEg0VSVDyKv8/E/UKFTYMAQKBgBa66F3msuNsOdmn23JT
+CtByn0Rp+QjhxK03BlBy6WHsM2JCJW6V2TxvHkC0lIXx6QfEiZ7HTNFk9OXzO++i
+GPQq9sKwZOICY3fu3rnEZIOFLTa1eqzLh9f2PftxnZG0SME9Mgmp4SYLLN7oV0UY
+lEpI1sA6tlO+xGNwY7+aBaG0
+-----END PRIVATE KEY-----
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_certificate.pem b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_certificate.pem
new file mode 100644
index 0000000000..3beb7918af
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_certificate.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDIzCCAgugAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTA1MjgxMTM5
+NTVaFw0yOTA1MjUxMTM5NTVaMCIxDzANBgNVBAMMBmZvY2tlcjEPMA0GA1UECgwG
+Y2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAygvU5TZhn127
+pF4vU+GGUJ23g/oZcUStAmZJOm3f5WnUIsRCW8y+jA9G12Oav4Ntf90ctedWUAFk
+C0dpWSw+951StA7TnBzZYUCnzhXf4IOstG86LdZZPlJMUlRB7S2C2p493WFtygnG
++6YSL38wC3O1EFImm8XJQ5mAX1EcN8YVWsKxeyIHn1T0Ql6HQ9Ij01k6CxWaRo8M
+mzn80aMwCg06VsZB6WcYZKxS3Pc52xfYg0Mes31gO9TFHQORjvsRKRRW0aDIEFMq
+7s2n/OkdKV5KMye8j0Yy86lyzJGgVtddidO0yfPS1KoFrS/pQDDwkAy2RG3O36pl
+fib/zvYjAQIDAQABo1UwUzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUE
+DDAKBggrBgEFBQcDAjAkBgNVHREEHTAbggZmb2NrZXKCBmZvY2tlcoIJbG9jYWxo
+b3N0MA0GCSqGSIb3DQEBCwUAA4IBAQCq19cxFMhe2zhF8A8mccishK2QFH5fq5Kb
+BRFFm5YoK/6YQU0JKyVC5jTLm1KvMdHjaT1s73znJTydOmy42CjAoLE8yIxZRZ6C
+Txc+JP+dmBNQFLaldiiAxU58VbmM45ulbukrUmyA4V0M5REOxSdqfMOAltKIBHvO
+PrRT2xDg2JIiTdKQngRO7rhPZw8Lwb6IhXju6SyEyRtNW5hkyghMYFdKrL/SmBB7
+3gOaX/hqNvW3d8bzD7CrLxp04c4Ve7D3IrLYXzW1bgV4isLa5I0NPx91MTi26e1Z
+1WpHt2VQIXO/z80ms4QTdNLJyfC48VB9jOvjm+ffjdGGq3ShZ8Wx
+-----END CERTIFICATE-----
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.p12 b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.p12
new file mode 100644
index 0000000000..2aff648b27
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.p12
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.pem b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.pem
new file mode 100644
index 0000000000..679f0fcb9e
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/client_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAygvU5TZhn127pF4vU+GGUJ23g/oZcUStAmZJOm3f5WnUIsRC
+W8y+jA9G12Oav4Ntf90ctedWUAFkC0dpWSw+951StA7TnBzZYUCnzhXf4IOstG86
+LdZZPlJMUlRB7S2C2p493WFtygnG+6YSL38wC3O1EFImm8XJQ5mAX1EcN8YVWsKx
+eyIHn1T0Ql6HQ9Ij01k6CxWaRo8Mmzn80aMwCg06VsZB6WcYZKxS3Pc52xfYg0Me
+s31gO9TFHQORjvsRKRRW0aDIEFMq7s2n/OkdKV5KMye8j0Yy86lyzJGgVtddidO0
+yfPS1KoFrS/pQDDwkAy2RG3O36plfib/zvYjAQIDAQABAoIBAQCnEgGJR96RmUCA
+OiGIiwQ1xTTqvPQs5UzqnwoOdqydV1VDKmZ04mjuB/PcR1C+7D6vbHdBM3n7ziqb
+6QED68JlnoG0wk3Y2UInWP9pOk1VqYw154OEQ6qoDgVuTp31MZzyicc+OEJAvrT7
+geenvtRLo/Ik/MlQJcy3SUcNilR7CKgwZVKZbLrMXEj9Oy4A9hjePA4EPuAqFL+6
+g1FreIL/bOs/hLt10+WVRjTfxV+J5LpLxyOCEsCHiGcA5YgT3gytR5KcoR7hUi4n
+8PAmwfg2PYKqe4abkPguyMFxP+q39AAd1T26zt9dqI31TcLcmrubJbsJoU/hk+dO
+PnAXehXBAoGBAO6nvO4FGf34F1Pq58hXsadN39Jyf7Xg/dRh/qHilTYCATIilxIZ
+1TqmOcbKmQdT4p3uutU3HKVyN734D4duDK9rM76NIJxUJOAylEhRYGSHxKVdxj6v
+nsKuQb73EOX5mxfVXkO+w3bSByYtsS0Zq27v9oGE2N13ETg8BbgP7NCjAoGBANi6
++HJdxbs7DD7LY36ejytTHNVEBc9XowLKL5VdgpvRP+g/v6ZZ0Hh+rNlwzGabvxKY
+J1E6Noq+YoiTBMbQtWO6m2/PjlN68pVwayUOibVrAIvHGqpIc7jVAjF7QAxCVXqa
+vDVVM2kG6DQV7PjezfOnCLkY8DPVHik+BXk5qOQLAoGACgoqa4Txa/q02IXoTEhM
+I/KluM+ySa92XinlMWA70LLS5A4Kdut5SedWROOe/mzr+qLhXzjbTqZ4a9h0psjd
+Zb/HxHthZNIxXvEhxc0OlgwUHLTOMzLmJeB4yZTRNCuE98K1HXp39JavUE7ZxkDv
+MlKL9zTkfqSUp932i6C34KsCgYBK7glrcOQl4TpQk3zoj+Urs2ZqjUP89W2UT1a4
+K0Y2nNedcjzORIsAvbsAyjDA4OPkh9pACQJGYaArW7jxC8NKoX+P3pB0aDNqWx2Q
+CECwCjh8GvArIzVvF6XUeaohEuX+p3Sec8B8dTjemR3+xPkrXfp0FB0nrmzpEVpP
+GRI9FQKBgQCeBTw2VrLaAwLNlYeP9TfGwHmID7a8PFE4Atqgm0w7hoF2ABp1tVs0
+U6i2T8ue3fuBPWGh8BJX4EPP0DBbmsSLti+kTKTaDwsW0nZ+yuos3LgP3BOx8fnJ
+4xEZM+nh0qEwRny+nT0u0QJ3jwDOMFRPIVj7Vrf6tWJ0sNi8WUpAbg==
+-----END RSA PRIVATE KEY-----
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_certificate.pem b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_certificate.pem
new file mode 100644
index 0000000000..3831468616
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_certificate.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTA1MjgxMTM5
+NTRaFw0yOTA1MjUxMTM5NTRaMCIxDzANBgNVBAMMBmZvY2tlcjEPMA0GA1UECgwG
+c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2gs+lVgbg0Xg
+Hd9Bj9sR1qqsBh9YzbccoN15ZLelYo0JCMGUq1jqrzOhjlffPA7iz6hon9hJVjps
+MJTdIjutDKtP2y4BSN5P/tJ04cATGdDaOKAQztRY/gDVhUl2ZL8tZcfH0yuJ91AZ
+pj7xZqGN12UDsLuiplevkBST5NBclks6lXK/ZyaUwHTXiB8my5BsMk8k97n8UfBo
+x22jGT20LffzoEzA2Z85qUgyBVGmMG7C/uotvyqykjm3MP+Qdd/JaajnDCPxdQst
+0olJwa2XIf9oslYFBo+R0wGVKMoa3fxWMlwh+43VOi1wsN9x6AdhmZvVMSCJ3MnB
+52PgIkHU6wIDAQABo4GWMIGTMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1Ud
+JQQMMAoGCCsGAQUFBwMBMCQGA1UdEQQdMBuCBmZvY2tlcoIGZm9ja2Vygglsb2Nh
+bGhvc3QwHQYDVR0OBBYEFNQf86rQEpzfJ1cpl9Y/P0H5S4RvMB8GA1UdIwQYMBaA
+FObi7eibvs3eFS7c2SJux/dse0jfMA0GCSqGSIb3DQEBCwUAA4IBAQBaGzNihsml
+vJTAewEmHopgK6CTPoXqk4x/ufRQx6AswgaroXACGCCyKqvZ6X4Xr89PF0TZF4Lu
+q6sm9HYgVGZRUy2wlTjOCpgg/ObtfeWlfF89bcWn2RZKgca+N4CmA/t+3px7TY29
+3uYXRHU6wWKUVtSHLYU3VeVv0LDwZ9vslm3S2FVziZmjU7y85Ikm9ZweknU/+Su0
+aeo+um3v+/JUbnLJqwkNVg648NVmTlVi0NFVnsaC2R4tLTjh4MJrI/A1CL3hyqYd
+08uSAsn/6uZZgyAri9qpjdqou8wTPvlqwHdPfkMve4K9EnPox3ys3nP1Cv15Ldmi
+zUZ6LFLwhqB8
+-----END CERTIFICATE-----
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.p12 b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.p12
new file mode 100644
index 0000000000..17be1b8a15
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.p12
Binary files differ
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.pem b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.pem
new file mode 100644
index 0000000000..4092278dbc
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl/server_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA2gs+lVgbg0XgHd9Bj9sR1qqsBh9YzbccoN15ZLelYo0JCMGU
+q1jqrzOhjlffPA7iz6hon9hJVjpsMJTdIjutDKtP2y4BSN5P/tJ04cATGdDaOKAQ
+ztRY/gDVhUl2ZL8tZcfH0yuJ91AZpj7xZqGN12UDsLuiplevkBST5NBclks6lXK/
+ZyaUwHTXiB8my5BsMk8k97n8UfBox22jGT20LffzoEzA2Z85qUgyBVGmMG7C/uot
+vyqykjm3MP+Qdd/JaajnDCPxdQst0olJwa2XIf9oslYFBo+R0wGVKMoa3fxWMlwh
++43VOi1wsN9x6AdhmZvVMSCJ3MnB52PgIkHU6wIDAQABAoIBAHUFlegHKnTwupCt
+sn0ngNJqNPrwNfPqM6YuVylz2oIi2VdmtYikpweyuSExDfEouBfk9YncRbfOkSQd
+sPTffEeRZcFnVm8qSVqkzvAd6uJvEb9UQNEQbFaeUfo/7Bez9wRqDVfLRTeDONDd
+dSe0yvBwGWPvwCJwaxvzv1ANd7N83I7JWajZrBhIWdgu5Tq2E5m8beFxBVvVOdNX
+s7hBdqqPf2SMQqDIw5/NIUuKc4dqBsnikEq9oSD9ueghDxRuNEYfsqk+31L6DpOr
+CGIRekyFn+J0HDc2WyNG60GadSrUUhrV3XxT8+bRGYahkVAK9OazeD1/phHA/87O
+scWGY8kCgYEA+aPORKSye5xgreEOYYQ7Xv1KqUadxj9X6MkUbFzDnLE3rrGbdyuy
+L5L23kdIU5bsgQtSfLSAWvAT/PxmUvoKHQKL9TOljCnUYaHJo5E75AA4+XG2Tc8V
+w93TFq8XBcGFzGsUC2HYSxRGTUmCV9LIRk8P7mpaUgcLQ2nX4tk1oR0CgYEA35ld
+UtJxCNDmJBpGmRZtUo5jPgYOWtJnbp23wjQaFYlbG759pZyFSLwY+HA2F03iFRaz
+VzBPNErs10EWovvFToKcWIdLA5GsL3nRhF2+z43BtxxR2PMGwtE7zzl0uxkl2fDM
+G3JFpgS03VDK/1wgxWFpiKG6fvLIuStoTpFit6cCgYEAv59ve914a84ToCT7Jvub
+n0SKhPmBrmV9E2EWXj+un56hAWWv0oAmHMKQ4PqSmCdnj36jaLodNLDIhw8vdE4e
+aTpFOIjWcIrDTdYks3uOH2vUUBVrPLrTGDA0EluALpjoHYusLTiEEM2VUUo11Tgl
+J2MU9YuJnSFvEeYzIxxcQDkCgYAGGa0OIWesEeb1fTIH4zDAW9ohSxEBMVRZFDx7
+M+iyRbkSnLPY3NhWD6uuu4D+ScWtRZ1G965xgsnLZZ80eyH67l1NnQNS6ZqFARHS
+Rqx6BSgP0tn4zUiV8Bdto+enYxxBQeyPoyJ7UQ2OLDIdnWnXeLzkWWU5KW6IfsKt
+whGraQKBgQDDq0wYR7jqPuiM9ptB7Jc6odZrpBJftIyfKzw3CSeeMbPZujvR6gl3
+32uTsZjlWNCy9TRwvVdbnzFJx5NTZmbUS3jupk3L/Je+TURGoxs2NGfSc9ldfQmA
+FEtUyhEHWPkGJTjhE1sRh5LSV3aj0wHd1Qd40sOX16wDLZT8AFz09g==
+-----END RSA PRIVATE KEY-----
diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-ssl_dist.config b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl_dist.config
new file mode 100644
index 0000000000..44aa87df5a
--- /dev/null
+++ b/deps/rabbitmq_prometheus/docker/rabbitmq-ssl_dist.config
@@ -0,0 +1,10 @@
+[
+ {server, [
+ {certfile, "/etc/rabbitmq/ssl/server_certificate.pem"},
+ {keyfile, "/etc/rabbitmq/ssl/server_key.pem"},
+ {secure_renegotiate, true}
+ ]},
+ {client, [
+ {secure_renegotiate, true}
+ ]}
+].
diff --git a/deps/rabbitmq_prometheus/erlang.mk b/deps/rabbitmq_prometheus/erlang.mk
new file mode 100644
index 0000000000..4bfafd97de
--- /dev/null
+++ b/deps/rabbitmq_prometheus/erlang.mk
@@ -0,0 +1,7686 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT = plugins/proper plugins/protobuffs
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_prometheus/metrics.md b/deps/rabbitmq_prometheus/metrics.md
new file mode 100644
index 0000000000..ff76d014c3
--- /dev/null
+++ b/deps/rabbitmq_prometheus/metrics.md
@@ -0,0 +1,260 @@
+# Metrics
+
+<!-- TOC depthFrom:2 depthTo:6 withLinks:1 updateOnSave:1 orderedList:0 -->
+
+- [RabbitMQ](#rabbitmq)
+ - [Global](#global)
+ - [Overview](#overview)
+ - [Connections](#connections)
+ - [Channels](#channels)
+ - [Queues](#queues)
+ - [Erlang via RabbitMQ](#erlang-via-rabbitmq)
+ - [Disk IO](#disk-io)
+ - [Raft](#raft)
+- [Telemetry](#telemetry)
+- [Erlang](#erlang)
+ - [Mnesia](#mnesia)
+ - [VM](#vm)
+
+<!-- /TOC -->
+
+## RabbitMQ
+
+### Global
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_consumer_prefetch | Limit of unacknowledged messages for each consumer |
+| rabbitmq_channel_prefetch | Total limit of unacknowledged messages for all consumers on a channel |
+
+### Overview
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_connections_opened_total | Total number of connections opened |
+| rabbitmq_connections_closed_total | Total number of connections closed or terminated |
+| rabbitmq_channels_opened_total | Total number of channels opened |
+| rabbitmq_channels_closed_total | Total number of channels closed |
+| rabbitmq_queues_declared_total | Total number of queues declared |
+| rabbitmq_queues_created_total | Total number of queues created |
+| rabbitmq_queues_deleted_total | Total number of queues deleted |
+| rabbitmq_process_open_fds | Open file descriptors |
+| rabbitmq_process_open_tcp_sockets | Open TCP sockets |
+| rabbitmq_process_resident_memory_bytes | Memory used in bytes |
+| rabbitmq_disk_space_available_bytes | Disk space available in bytes |
+| rabbitmq_process_max_fds | Open file descriptors limit |
+| rabbitmq_process_max_tcp_sockets | Open TCP sockets limit |
+| rabbitmq_resident_memory_limit_bytes | Memory high watermark in bytes |
+| rabbitmq_disk_space_available_limit_bytes | Free disk space low watermark in bytes |
+| rabbitmq_connections | Connections currently open |
+| rabbitmq_channels | Channels currently open |
+| rabbitmq_consumers | Consumers currently connected |
+| rabbitmq_queues | Queues available |
+| rabbitmq_build_info | RabbitMQ & Erlang/OTP version info |
+| rabbitmq_identity_info | RabbitMQ node & cluster identity info |
+
+### Connections
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_connection_incoming_bytes_total | Total number of bytes received on a connection |
+| rabbitmq_connection_outgoing_bytes_total | Total number of bytes sent on a connection |
+| rabbitmq_connection_process_reductions_total | Total number of connection process reductions |
+| rabbitmq_connection_incoming_packets_total | Total number of packets received on a connection |
+| rabbitmq_connection_outgoing_packets_total | Total number of packets sent on a connection |
+| rabbitmq_connection_pending_packets | Number of packets waiting to be sent on a connection |
+| rabbitmq_connection_channels | Channels on a connection |
+
+### Channels
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_channel_consumers | Consumers on a channel |
+| rabbitmq_channel_messages_unacked | Delivered but not yet acknowledged messages |
+| rabbitmq_channel_messages_unconfirmed | Published but not yet confirmed messages |
+| rabbitmq_channel_messages_uncommitted | Messages received in a transaction but not yet committed |
+| rabbitmq_channel_acks_uncommitted | Message acknowledgements in a transaction not yet committed |
+| rabbitmq_channel_messages_published_total | Total number of messages published into an exchange on a channel |
+| rabbitmq_channel_messages_confirmed_total | Total number of messages published into an exchange and confirmed on the channel |
+| rabbitmq_channel_messages_unroutable_returned_total | Total number of messages published as mandatory into an exchange and returned to the publisher as unroutable |
+| rabbitmq_channel_messages_unroutable_dropped_total | Total number of messages published as non-mandatory into an exchange and dropped as unroutable |
+| rabbitmq_channel_process_reductions_total | Total number of channel process reductions |
+| rabbitmq_channel_get_ack_total | Total number of messages fetched with basic.get in manual acknowledgement mode |
+| rabbitmq_channel_get_total | Total number of messages fetched with basic.get in automatic acknowledgement mode |
+| rabbitmq_channel_messages_delivered_ack_total | Total number of messages delivered to consumers in manual acknowledgement mode |
+| rabbitmq_channel_messages_delivered_total | Total number of messages delivered to consumers in automatic acknowledgement mode |
+| rabbitmq_channel_messages_redelivered_total | Total number of messages redelivered to consumers |
+| rabbitmq_channel_messages_acked_total | Total number of messages acknowledged by consumers |
+| rabbitmq_channel_get_empty_total | Total number of times basic.get operations fetched no message |
+
+
+### Queues
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_queue_messages_published_total | Total number of messages published to queues |
+| rabbitmq_queue_messages_ready | Messages ready to be delivered to consumers |
+| rabbitmq_queue_messages_unacked | Messages delivered to consumers but not yet acknowledged |
+| rabbitmq_queue_messages | Sum of ready and unacknowledged messages - total queue depth |
+| rabbitmq_queue_process_reductions_total | Total number of queue process reductions |
+| rabbitmq_queue_consumers | Consumers on a queue |
+| rabbitmq_queue_consumer_utilisation | Consumer utilisation |
+| rabbitmq_queue_process_memory_bytes | Memory in bytes used by the Erlang queue process |
+| rabbitmq_queue_messages_ram | Ready and unacknowledged messages stored in memory |
+| rabbitmq_queue_messages_ram_bytes | Size of ready and unacknowledged messages stored in memory |
+| rabbitmq_queue_messages_ready_ram | Ready messages stored in memory |
+| rabbitmq_queue_messages_unacked_ram | Unacknowledged messages stored in memory |
+| rabbitmq_queue_messages_persistent | Persistent messages |
+| rabbitmq_queue_messages_persistent_bytes | Size in bytes of persistent messages |
+| rabbitmq_queue_messages_bytes | Size in bytes of ready and unacknowledged messages |
+| rabbitmq_queue_messages_ready_bytes | Size in bytes of ready messages |
+| rabbitmq_queue_messages_unacked_bytes | Size in bytes of all unacknowledged messages |
+| rabbitmq_queue_messages_paged_out | Messages paged out to disk |
+| rabbitmq_queue_messages_paged_out_bytes | Size in bytes of messages paged out to disk |
+| rabbitmq_queue_disk_reads_total | Total number of times queue read messages from disk |
+| rabbitmq_queue_disk_writes_total | Total number of times queue wrote messages to disk |
+
+
+
+### Erlang via RabbitMQ
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_erlang_processes_used | Erlang processes used |
+| rabbitmq_erlang_gc_runs_total | Total number of Erlang garbage collector runs |
+| rabbitmq_erlang_gc_reclaimed_bytes_totalTotal | number of bytes of memory reclaimed by Erlang garbage collector |
+| rabbitmq_erlang_scheduler_context_switches_total | Total number of Erlang scheduler context switches |
+| rabbitmq_erlang_processes_limit | Erlang processes limit |
+| rabbitmq_erlang_scheduler_run_queue | Erlang scheduler run queue |
+| rabbitmq_erlang_net_ticktime_seconds | Inter-node heartbeat interval in seconds |
+| rabbitmq_erlang_uptime_seconds | Node uptime |
+
+
+### Disk IO
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_io_read_ops_total | Total number of I/O read operations |
+| rabbitmq_io_read_bytes_total | Total number of I/O bytes read |
+| rabbitmq_io_write_ops_total | Total number of I/O write operations |
+| rabbitmq_io_write_bytes_total | Total number of I/O bytes written |
+| rabbitmq_io_sync_ops_total | Total number of I/O sync operations |
+| rabbitmq_io_seek_ops_total | Total number of I/O seek operations |
+| rabbitmq_io_open_attempt_ops_total | Total number of file open attempts |
+| rabbitmq_io_reopen_ops_total | Total number of times files have been reopened |
+| rabbitmq_schema_db_ram_tx_total | Total number of Schema DB memory transactions |
+| rabbitmq_schema_db_disk_tx_total | Total number of Schema DB disk transactions |
+| rabbitmq_msg_store_read_total | Total number of Message Store read operations |
+| rabbitmq_msg_store_write_total | Total number of Message Store write operations |
+| rabbitmq_queue_index_read_ops_total | Total number of Queue Index read operations |
+| rabbitmq_queue_index_write_ops_total | Total number of Queue Index write operations |
+| rabbitmq_queue_index_journal_write_ops_total | Total number of Queue Index Journal write operations |
+| rabbitmq_io_read_time_seconds_total | Total I/O read time |
+| rabbitmq_io_write_time_seconds_total | Total I/O write time |
+| rabbitmq_io_sync_time_seconds_total | Total I/O sync time |
+| rabbitmq_io_seek_time_seconds_total | Total I/O seek time |
+| rabbitmq_io_open_attempt_time_seconds_total | Total file open attempts time |
+
+### Raft
+
+| Metric | Description |
+| --- | --- |
+| rabbitmq_raft_term_total | Current Raft term number |
+| rabbitmq_raft_log_snapshot_index | Raft log snapshot index |
+| rabbitmq_raft_log_last_applied_index | Raft log last applied index |
+| rabbitmq_raft_log_commit_index | Raft log commit index |
+| rabbitmq_raft_log_last_written_index | Raft log last written index |
+| rabbitmq_raft_entry_commit_latency_seconds | Time taken for an entry to be committed |
+
+## Telemetry
+
+| Metric | Description |
+| --- | --- |
+| telemetry_scrape_encoded_size_bytes | Scrape size, encoded |
+| telemetry_scrape_size_bytes | Scrape size, not encoded |
+| telemetry_scrape_duration_seconds | Scrape duration |
+
+## Erlang
+
+### Mnesia
+
+| Metric | Description |
+| --- | --- |
+| erlang_mnesia_held_locks | Number of held locks |
+| erlang_mnesia_lock_queue | Number of transactions waiting for a lock |
+| erlang_mnesia_transaction_participants | Number of participant transactions |
+| erlang_mnesia_transaction_coordinators | Number of coordinator transactions |
+| erlang_mnesia_failed_transactions | Number of failed (i.e. aborted) transactions |
+| erlang_mnesia_committed_transactions | Number of committed transactions |
+| erlang_mnesia_logged_transactions | Number of transactions logged |
+| erlang_mnesia_restarted_transactions | Total number of transaction restarts |
+
+
+### VM
+
+| Metric | Description |
+| --- | --- |
+| erlang_vm_dist_recv_bytes | Number of bytes received by the socket. |
+| erlang_vm_dist_recv_cnt | Number of packets received by the socket. |
+| erlang_vm_dist_recv_max_bytes | Size of the largest packet, in bytes, received by the socket. |
+| erlang_vm_dist_recv_avg_bytes | Average size of packets, in bytes, received by the socket. |
+| erlang_vm_dist_recv_dvi_bytes | Average packet size deviation, in bytes, received by the socket. |
+| erlang_vm_dist_send_bytes | Number of bytes sent from the socket. |
+| erlang_vm_dist_send_cnt | Number of packets sent from the socket. |
+| erlang_vm_dist_send_max_bytes | Size of the largest packet, in bytes, sent from the socket. |
+| erlang_vm_dist_send_avg_bytes | Average size of packets, in bytes, sent from the socket. |
+| erlang_vm_dist_send_pend_bytes | Number of bytes waiting to be sent by the socket. |
+| erlang_vm_dist_port_input_bytes | The total number of bytes read from the port. |
+| erlang_vm_dist_port_output_bytes | The total number of bytes written to the port. |
+| erlang_vm_dist_port_memory_bytes | The total number of bytes allocated for this port by the runtime system. The port itself can have allocated memory that is not included. |
+| erlang_vm_dist_port_queue_size_bytes | The total number of bytes queued by the port using the ERTS driver queue implementation. |
+| erlang_vm_dist_proc_memory_bytes | The size in bytes of the process. This includes call stack, heap, and internal structures. |
+| erlang_vm_dist_proc_heap_size_words | The size in words of the youngest heap generation of the process. This generation includes the process stack. This information is highly implementation-dependent, and can change if the implementation changes. |
+| erlang_vm_dist_proc_min_heap_size_words | The minimum heap size for the process. |
+| erlang_vm_dist_proc_min_bin_vheap_size_words | The minimum binary virtual heap size for the process. |
+| erlang_vm_dist_proc_stack_size_words | The stack size, in words, of the process. |
+| erlang_vm_dist_proc_total_heap_size_words | The total size, in words, of all heap fragments of the process. This includes the process stack and any unreceived messages that are considered to be part of the heap. |
+| erlang_vm_dist_proc_message_queue_len | The number of messages currently in the message queue of the process. |
+| erlang_vm_dist_proc_reductions | The number of reductions executed by the process. |
+| erlang_vm_dist_proc_status | The current status of the distribution process. The status is represented as a numerical value where `exiting=1', `suspended=2', `runnable=3', `garbage_collecting=4', `running=5' and `waiting=6'. |
+| erlang_vm_dist_node_state | The current state of the distribution link. The state is represented as a numerical value where `pending=1', `up_pending=2' and `up=3'. |
+| erlang_vm_dist_node_queue_size_bytes | The number of bytes in the output distribution queue. This queue sits between the Erlang code and the port driver. |
+| erlang_vm_memory_atom_bytes_total | The total amount of memory currently allocated for atoms. This memory is part of the memory presented as system memory. |
+| erlang_vm_memory_bytes_total | The total amount of memory currently allocated. This is the same as the sum of the memory size for processes and system. |
+| erlang_vm_memory_dets_tables | Erlang VM DETS Tables count. |
+| erlang_vm_memory_ets_tables | Erlang VM ETS Tables count. |
+| erlang_vm_memory_processes_bytes_total | The total amount of memory currently allocated for the Erlang processes. |
+| erlang_vm_memory_system_bytes_total | The total amount of memory currently allocated for the emulator that is not directly related to any Erlang process. Memory presented as processes is not included in this memory. |
+| erlang_vm_statistics_bytes_output_total | Total number of bytes output to ports. |
+| erlang_vm_statistics_bytes_received_total | Total number of bytes received through ports. |
+| erlang_vm_statistics_context_switches | Total number of context switches since the system started. |
+| erlang_vm_statistics_dirty_cpu_run_queue_length | Length of the dirty CPU run-queue. |
+| erlang_vm_statistics_dirty_io_run_queue_length | Length of the dirty IO run-queue. |
+| erlang_vm_statistics_garbage_collection_number_of_gcs | Garbage collection: number of GCs. |
+| erlang_vm_statistics_garbage_collection_bytes_reclaimed | Garbage collection: bytes reclaimed. |
+| erlang_vm_statistics_garbage_collection_words_reclaimed | Garbage collection: words reclaimed. |
+| erlang_vm_statistics_reductions_total | Total reductions. |
+| erlang_vm_statistics_run_queues_length_total | Length of normal run-queues. |
+| erlang_vm_statistics_wallclock_time_milliseconds | Information about wall clock. Same as erlang_vm_statistics_runtime_milliseconds except that real time is measured. |
+| erlang_vm_statistics_runtime_milliseconds | The sum of the runtime for all threads in the Erlang runtime system. Can be greater than wall clock time. |
+| erlang_vm_statistics_wallclock_time_milliseconds | Information about wall clock. Same as erlang_vm_statistics_runtime_milliseconds except that real time is measured. |
+| erlang_vm_dirty_cpu_schedulers | The number of scheduler dirty CPU scheduler threads used by the emulator. |
+| erlang_vm_dirty_cpu_schedulers_online | The number of dirty CPU scheduler threads online. |
+| erlang_vm_dirty_io_schedulers | The number of scheduler dirty I/O scheduler threads used by the emulator. |
+| erlang_vm_ets_limit | The maximum number of ETS tables allowed. |
+| erlang_vm_logical_processors | The detected number of logical processors configured in the system. |
+| erlang_vm_logical_processors_available | The detected number of logical processors available to the Erlang runtime system. |
+| erlang_vm_logical_processors_online | The detected number of logical processors online on the system. |
+| erlang_vm_port_count | The number of ports currently existing at the local node. |
+| erlang_vm_port_limit | The maximum number of simultaneously existing ports at the local node. |
+| erlang_vm_process_count | The number of processes currently existing at the local node. |
+| erlang_vm_process_limit | The maximum number of simultaneously existing processes at the local node. |
+| erlang_vm_schedulers | The number of scheduler threads used by the emulator. |
+| erlang_vm_schedulers_online | The number of schedulers online. |
+| erlang_vm_smp_support | 1 if the emulator has been compiled with SMP support, otherwise 0. |
+| erlang_vm_threads | 1 if the emulator has been compiled with thread support, otherwise 0. |
+| erlang_vm_thread_pool_size | The number of async threads in the async thread pool used for asynchronous driver calls. |
+| erlang_vm_time_correction | 1 if time correction is enabled, otherwise 0. |
+| erlang_vm_atom_count | The number of atom currently existing at the local node. |
+| erlang_vm_atom_limit | The maximum number of simultaneously existing atom at the local node. |
+| erlang_vm_allocators | Allocated (carriers_size) and used (blocks_size) memory for the different allocators in the VM. See erts_alloc(3). |
diff --git a/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema b/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema
new file mode 100644
index 0000000000..bdef14782b
--- /dev/null
+++ b/deps/rabbitmq_prometheus/priv/schema/rabbitmq_prometheus.schema
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Prometheus Plugin
+%%
+%% See https://rabbitmq.com/prometheus.html for details
+%% ----------------------------------------------------------------------------
+
+%% Option to return metrics per-object, unaggregated
+{mapping, "prometheus.return_per_object_metrics", "rabbitmq_prometheus.return_per_object_metrics",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Endpoint path
+{mapping, "prometheus.path", "rabbitmq_prometheus.path",
+ [{datatype, string}]}.
+
+%% HTTP (TCP) listener options ========================================================
+
+%% HTTP listener consistent with the management plugin, Web STOMP and Web MQTT.
+%%
+%% {tcp_config, [{port, 15692},
+%% {ip, "127.0.0.1"}]}
+
+{mapping, "prometheus.tcp.port", "rabbitmq_prometheus.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "prometheus.tcp.ip", "rabbitmq_prometheus.tcp_config.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "prometheus.tcp.compress", "rabbitmq_prometheus.tcp_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "prometheus.tcp.idle_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.inactivity_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.request_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.shutdown_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.max_keepalive", "rabbitmq_prometheus.tcp_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+
+%% HTTPS (TLS) listener options ========================================================
+
+%% HTTPS listener consistent with the management plugin, Web STOMP and Web MQTT.
+%%
+%% {ssl_config, [{port, 15691},
+%% {ip, "127.0.0.1"},
+%% {cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}
+
+{mapping, "prometheus.ssl.port", "rabbitmq_prometheus.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "prometheus.ssl.backlog", "rabbitmq_prometheus.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "prometheus.ssl.ip", "rabbitmq_prometheus.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "prometheus.ssl.certfile", "rabbitmq_prometheus.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "prometheus.ssl.keyfile", "rabbitmq_prometheus.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "prometheus.ssl.cacertfile", "rabbitmq_prometheus.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "prometheus.ssl.password", "rabbitmq_prometheus.ssl_config.password",
+ [{datatype, string}]}.
+
+{mapping, "prometheus.ssl.verify", "rabbitmq_prometheus.ssl_config.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "prometheus.ssl.fail_if_no_peer_cert", "rabbitmq_prometheus.ssl_config.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.honor_cipher_order", "rabbitmq_prometheus.ssl_config.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.honor_ecc_order", "rabbitmq_prometheus.ssl_config.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.reuse_sessions", "rabbitmq_prometheus.ssl_config.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.secure_renegotiate", "rabbitmq_prometheus.ssl_config.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.client_renegotiation", "rabbitmq_prometheus.ssl_config.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.depth", "rabbitmq_prometheus.ssl_config.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "prometheus.ssl.versions.$version", "rabbitmq_prometheus.ssl_config.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_prometheus.ssl_config.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("prometheus.ssl.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "prometheus.ssl.ciphers.$cipher", "rabbitmq_prometheus.ssl_config.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_prometheus.ssl_config.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("prometheus.ssl.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+{mapping, "prometheus.ssl.compress", "rabbitmq_prometheus.ssl_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "prometheus.ssl.idle_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.inactivity_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.request_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.shutdown_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.max_keepalive", "rabbitmq_prometheus.ssl_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
diff --git a/deps/rabbitmq_prometheus/rabbitmq-components.mk b/deps/rabbitmq_prometheus/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_prometheus/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_prometheus/rabbitmq-disable-metrics-collector.conf b/deps/rabbitmq_prometheus/rabbitmq-disable-metrics-collector.conf
new file mode 100644
index 0000000000..c71af1a7d9
--- /dev/null
+++ b/deps/rabbitmq_prometheus/rabbitmq-disable-metrics-collector.conf
@@ -0,0 +1 @@
+management_agent.disable_metrics_collector = true
diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl
new file mode 100644
index 0000000000..255260627a
--- /dev/null
+++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl
@@ -0,0 +1,531 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(prometheus_rabbitmq_core_metrics_collector).
+-export([register/0,
+ deregister_cleanup/1,
+ collect_mf/2,
+ collect_metrics/2]).
+
+-import(prometheus_model_helpers, [create_mf/4,
+ create_mf/5,
+ gauge_metric/2,
+ counter_metric/2,
+ untyped_metric/2]).
+
+-include_lib("prometheus/include/prometheus.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(prometheus_collector).
+
+%% Because all metrics are from RabbitMQ's perspective,
+%% cached for up to 5 seconds by default (configurable),
+%% we prepend rabbitmq_ to all metrics emitted by this collector.
+%% Some metrics are for Erlang (erlang_), Mnesia (schema_db_) or the System (io_),
+%% as observed by RabbitMQ.
+-define(METRIC_NAME_PREFIX, "rabbitmq_").
+
+%% ==The source of these metrics can be found in the rabbit_core_metrics module==
+%% The relevant files are:
+%% * rabbit_common/src/rabbit_core_metrics.erl
+%% * rabbit_common/include/rabbit_core_metrics.hrl
+%%
+%% ==How to determine if a metric should be of type GAUGE or COUNTER?==
+%%
+%% * GAUGE if you care about its value rather than rate of change
+%% - value can decrease as well as decrease
+%% * COUNTER if you care about the rate of change
+%% - value can only increase
+%%
+%% To put it differently, if the metric is used with rate(), it's a COUNTER, otherwise it's a GAUGE.
+%%
+%% More info: https://prometheus.io/docs/practices/instrumentation/#counter-vs-gauge-summary-vs-histogram
+
+% Some metrics require to be converted, mostly those that represent time.
+% It is a Prometheus best practice to use specific base units: https://prometheus.io/docs/practices/naming/#base-units
+% Extra context: https://github.com/prometheus/docs/pull/1414#issuecomment-522337895
+
+-define(MILLISECOND, 1000).
+-define(MICROSECOND, 1000000).
+
+-define(METRICS_RAW, [
+ {channel_metrics, [
+ {2, undefined, channel_consumers, gauge, "Consumers on a channel", consumer_count},
+ {2, undefined, channel_messages_unacked, gauge, "Delivered but not yet acknowledged messages", messages_unacknowledged},
+ {2, undefined, channel_messages_unconfirmed, gauge, "Published but not yet confirmed messages", messages_unconfirmed},
+ {2, undefined, channel_messages_uncommitted, gauge, "Messages received in a transaction but not yet committed", messages_uncommitted},
+ {2, undefined, channel_acks_uncommitted, gauge, "Message acknowledgements in a transaction not yet committed", acks_uncommitted},
+ {2, undefined, consumer_prefetch, gauge, "Limit of unacknowledged messages for each consumer", prefetch_count},
+ {2, undefined, channel_prefetch, gauge, "Total limit of unacknowledged messages for all consumers on a channel", global_prefetch_count}
+ ]},
+
+ {channel_exchange_metrics, [
+ {2, undefined, channel_messages_published_total, counter, "Total number of messages published into an exchange on a channel"},
+ {3, undefined, channel_messages_confirmed_total, counter, "Total number of messages published into an exchange and confirmed on the channel"},
+ {4, undefined, channel_messages_unroutable_returned_total, counter, "Total number of messages published as mandatory into an exchange and returned to the publisher as unroutable"},
+ {5, undefined, channel_messages_unroutable_dropped_total, counter, "Total number of messages published as non-mandatory into an exchange and dropped as unroutable"}
+ ]},
+
+ {channel_process_metrics, [
+ {2, undefined, channel_process_reductions_total, counter, "Total number of channel process reductions"}
+ ]},
+
+ {channel_queue_metrics, [
+ {2, undefined, channel_get_ack_total, counter, "Total number of messages fetched with basic.get in manual acknowledgement mode"},
+ {3, undefined, channel_get_total, counter, "Total number of messages fetched with basic.get in automatic acknowledgement mode"},
+ {4, undefined, channel_messages_delivered_ack_total, counter, "Total number of messages delivered to consumers in manual acknowledgement mode"},
+ {5, undefined, channel_messages_delivered_total, counter, "Total number of messages delivered to consumers in automatic acknowledgement mode"},
+ {6, undefined, channel_messages_redelivered_total, counter, "Total number of messages redelivered to consumers"},
+ {7, undefined, channel_messages_acked_total, counter, "Total number of messages acknowledged by consumers"},
+ {8, undefined, channel_get_empty_total, counter, "Total number of times basic.get operations fetched no message"}
+ ]},
+
+ {connection_churn_metrics, [
+ {2, undefined, connections_opened_total, counter, "Total number of connections opened"},
+ {3, undefined, connections_closed_total, counter, "Total number of connections closed or terminated"},
+ {4, undefined, channels_opened_total, counter, "Total number of channels opened"},
+ {5, undefined, channels_closed_total, counter, "Total number of channels closed"},
+ {6, undefined, queues_declared_total, counter, "Total number of queues declared"},
+ {7, undefined, queues_created_total, counter, "Total number of queues created"},
+ {8, undefined, queues_deleted_total, counter, "Total number of queues deleted"}
+ ]},
+
+ {connection_coarse_metrics, [
+ {2, undefined, connection_incoming_bytes_total, counter, "Total number of bytes received on a connection"},
+ {3, undefined, connection_outgoing_bytes_total, counter, "Total number of bytes sent on a connection"},
+ {4, undefined, connection_process_reductions_total, counter, "Total number of connection process reductions"}
+ ]},
+
+ {connection_metrics, [
+ {2, undefined, connection_incoming_packets_total, counter, "Total number of packets received on a connection", recv_cnt},
+ {2, undefined, connection_outgoing_packets_total, counter, "Total number of packets sent on a connection", send_cnt},
+ {2, undefined, connection_pending_packets, gauge, "Number of packets waiting to be sent on a connection", send_pend},
+ {2, undefined, connection_channels, gauge, "Channels on a connection", channels}
+ ]},
+
+ {channel_queue_exchange_metrics, [
+ {2, undefined, queue_messages_published_total, counter, "Total number of messages published to queues"}
+ ]},
+
+ {node_coarse_metrics, [
+ {2, undefined, process_open_fds, gauge, "Open file descriptors", fd_used},
+ {2, undefined, process_open_tcp_sockets, gauge, "Open TCP sockets", sockets_used},
+ {2, undefined, process_resident_memory_bytes, gauge, "Memory used in bytes", mem_used},
+ {2, undefined, disk_space_available_bytes, gauge, "Disk space available in bytes", disk_free},
+ {2, undefined, erlang_processes_used, gauge, "Erlang processes used", proc_used},
+ {2, undefined, erlang_gc_runs_total, counter, "Total number of Erlang garbage collector runs", gc_num},
+ {2, undefined, erlang_gc_reclaimed_bytes_total, counter, "Total number of bytes of memory reclaimed by Erlang garbage collector", gc_bytes_reclaimed},
+ {2, undefined, erlang_scheduler_context_switches_total, counter, "Total number of Erlang scheduler context switches", context_switches}
+ ]},
+
+ {node_metrics, [
+ {2, undefined, process_max_fds, gauge, "Open file descriptors limit", fd_total},
+ {2, undefined, process_max_tcp_sockets, gauge, "Open TCP sockets limit", sockets_total},
+ {2, undefined, resident_memory_limit_bytes, gauge, "Memory high watermark in bytes", mem_limit},
+ {2, undefined, disk_space_available_limit_bytes, gauge, "Free disk space low watermark in bytes", disk_free_limit},
+ {2, undefined, erlang_processes_limit, gauge, "Erlang processes limit", proc_total},
+ {2, undefined, erlang_scheduler_run_queue, gauge, "Erlang scheduler run queue", run_queue},
+ {2, undefined, erlang_net_ticktime_seconds, gauge, "Inter-node heartbeat interval", net_ticktime},
+ {2, ?MILLISECOND, erlang_uptime_seconds, gauge, "Node uptime", uptime}
+ ]},
+
+ {node_persister_metrics, [
+ {2, undefined, io_read_ops_total, counter, "Total number of I/O read operations", io_read_count},
+ {2, undefined, io_read_bytes_total, counter, "Total number of I/O bytes read", io_read_bytes},
+ {2, undefined, io_write_ops_total, counter, "Total number of I/O write operations", io_write_count},
+ {2, undefined, io_write_bytes_total, counter, "Total number of I/O bytes written", io_write_bytes},
+ {2, undefined, io_sync_ops_total, counter, "Total number of I/O sync operations", io_sync_count},
+ {2, undefined, io_seek_ops_total, counter, "Total number of I/O seek operations", io_seek_count},
+ {2, undefined, io_open_attempt_ops_total, counter, "Total number of file open attempts", io_file_handle_open_attempt_count},
+ {2, undefined, io_reopen_ops_total, counter, "Total number of times files have been reopened", io_reopen_count},
+ {2, undefined, schema_db_ram_tx_total, counter, "Total number of Schema DB memory transactions", mnesia_ram_tx_count},
+ {2, undefined, schema_db_disk_tx_total, counter, "Total number of Schema DB disk transactions", mnesia_disk_tx_count},
+ {2, undefined, msg_store_read_total, counter, "Total number of Message Store read operations", msg_store_read_count},
+ {2, undefined, msg_store_write_total, counter, "Total number of Message Store write operations", msg_store_write_count},
+ {2, undefined, queue_index_read_ops_total, counter, "Total number of Queue Index read operations", queue_index_read_count},
+ {2, undefined, queue_index_write_ops_total, counter, "Total number of Queue Index write operations", queue_index_write_count},
+ {2, undefined, queue_index_journal_write_ops_total, counter, "Total number of Queue Index Journal write operations", queue_index_journal_write_count},
+ {2, ?MICROSECOND, io_read_time_seconds_total, counter, "Total I/O read time", io_read_time},
+ {2, ?MICROSECOND, io_write_time_seconds_total, counter, "Total I/O write time", io_write_time},
+ {2, ?MICROSECOND, io_sync_time_seconds_total, counter, "Total I/O sync time", io_sync_time},
+ {2, ?MICROSECOND, io_seek_time_seconds_total, counter, "Total I/O seek time", io_seek_time},
+ {2, ?MICROSECOND, io_open_attempt_time_seconds_total, counter, "Total file open attempts time", io_file_handle_open_attempt_time}
+ ]},
+
+ {ra_metrics, [
+ {2, undefined, raft_term_total, counter, "Current Raft term number"},
+ {3, undefined, raft_log_snapshot_index, gauge, "Raft log snapshot index"},
+ {4, undefined, raft_log_last_applied_index, gauge, "Raft log last applied index"},
+ {5, undefined, raft_log_commit_index, gauge, "Raft log commit index"},
+ {6, undefined, raft_log_last_written_index, gauge, "Raft log last written index"},
+ {7, ?MILLISECOND, raft_entry_commit_latency_seconds, gauge, "Time taken for a log entry to be committed"}
+ ]},
+
+ {queue_coarse_metrics, [
+ {2, undefined, queue_messages_ready, gauge, "Messages ready to be delivered to consumers"},
+ {3, undefined, queue_messages_unacked, gauge, "Messages delivered to consumers but not yet acknowledged"},
+ {4, undefined, queue_messages, gauge, "Sum of ready and unacknowledged messages - total queue depth"},
+ {5, undefined, queue_process_reductions_total, counter, "Total number of queue process reductions"}
+ ]},
+
+ {queue_metrics, [
+ {2, undefined, queue_consumers, gauge, "Consumers on a queue", consumers},
+ {2, undefined, queue_consumer_utilisation, gauge, "Consumer utilisation", consumer_utilisation},
+ {2, undefined, queue_process_memory_bytes, gauge, "Memory in bytes used by the Erlang queue process", memory},
+ {2, undefined, queue_messages_ram, gauge, "Ready and unacknowledged messages stored in memory", messages_ram},
+ {2, undefined, queue_messages_ram_bytes, gauge, "Size of ready and unacknowledged messages stored in memory", message_bytes_ram},
+ {2, undefined, queue_messages_ready_ram, gauge, "Ready messages stored in memory", messages_ready_ram},
+ {2, undefined, queue_messages_unacked_ram, gauge, "Unacknowledged messages stored in memory", messages_unacknowledged_ram},
+ {2, undefined, queue_messages_persistent, gauge, "Persistent messages", messages_persistent},
+ {2, undefined, queue_messages_persistent_bytes, gauge, "Size in bytes of persistent messages", message_bytes_persistent},
+ {2, undefined, queue_messages_bytes, gauge, "Size in bytes of ready and unacknowledged messages", message_bytes},
+ {2, undefined, queue_messages_ready_bytes, gauge, "Size in bytes of ready messages", message_bytes_ready},
+ {2, undefined, queue_messages_unacked_bytes, gauge, "Size in bytes of all unacknowledged messages", message_bytes_unacknowledged},
+ {2, undefined, queue_messages_paged_out, gauge, "Messages paged out to disk", messages_paged_out},
+ {2, undefined, queue_messages_paged_out_bytes, gauge, "Size in bytes of messages paged out to disk", message_bytes_paged_out},
+ {2, undefined, queue_disk_reads_total, counter, "Total number of times queue read messages from disk", disk_reads},
+ {2, undefined, queue_disk_writes_total, counter, "Total number of times queue wrote messages to disk", disk_writes}
+ ]},
+
+ {auth_attempt_metrics, [
+ {2, undefined, auth_attempts_total, counter, "Total number of authorization attempts on a node"},
+ {3, undefined, auth_attempts_succeeded_total, counter, "Total number of successful authorization attempts on a node"},
+ {4, undefined, auth_attempts_failed_total, counter, "Total number of failed authorization attempts on a node"}
+ ]},
+
+ {auth_attempt_detailed_metrics, [
+ {2, undefined, auth_attempts_total, counter, "Total number of authorization attempts on a node"},
+ {3, undefined, auth_attempts_succeeded_total, counter, "Total number of successful authorization attempts on a node"},
+ {4, undefined, auth_attempts_failed_total, counter, "Total number of failed authorization attempts on a node"}
+ ]}
+
+]).
+
+-define(TOTALS, [
+ %% ordering differs from metrics above, refer to list comprehension
+ {connection_created, connections, gauge, "Connections currently open"},
+ {channel_created, channels, gauge, "Channels currently open"},
+ {consumer_created, consumers, gauge, "Consumers currently connected"},
+ {queue_metrics, queues, gauge, "Queues available"}
+]).
+
+%%====================================================================
+%% Collector API
+%%====================================================================
+
+register() ->
+ ok = prometheus_registry:register_collector(?MODULE).
+
+deregister_cleanup(_) -> ok.
+
+collect_mf(_Registry, Callback) ->
+ {ok, PerObjectMetrics} = application:get_env(rabbitmq_prometheus, return_per_object_metrics),
+ [begin
+ Data = get_data(Table, PerObjectMetrics),
+ mf(Callback, Contents, Data)
+ end || {Table, Contents} <- ?METRICS_RAW, needs_processing(PerObjectMetrics, Table)],
+ [begin
+ Size = ets:info(Table, size),
+ mf_totals(Callback, Name, Type, Help, Size)
+ end || {Table, Name, Type, Help} <- ?TOTALS],
+ add_metric_family(build_info(), Callback),
+ add_metric_family(identity_info(), Callback),
+ ok.
+
+needs_processing(false, auth_attempt_detailed_metrics) ->
+ %% When per object metrics are disabled the detailed authentication attempt metrics
+ %% create duplicates. Totals are carried on `auth_attempt_metrics`
+ false;
+needs_processing(_, _) ->
+ true.
+
+build_info() ->
+ ProductInfo = rabbit:product_info(),
+ #{product_base_version := BaseVersion} = ProductInfo,
+ {ok, PrometheusPluginVersion} = application:get_key(rabbitmq_prometheus, vsn),
+ {ok, PrometheusClientVersion} = application:get_key(prometheus, vsn),
+ Properties0 = [
+ {rabbitmq_version, BaseVersion},
+ {prometheus_plugin_version, PrometheusPluginVersion},
+ {prometheus_client_version, PrometheusClientVersion},
+ {erlang_version, rabbit_misc:otp_release()}
+ ],
+ Properties1 = case ProductInfo of
+ #{product_version := ProductVersion} ->
+ [{product_version, ProductVersion} | Properties0];
+ _ ->
+ Properties0
+ end,
+ Properties = case ProductInfo of
+ #{product_name := ProductName} ->
+ [{product_name, ProductName} | Properties1];
+ _ ->
+ Properties1
+ end,
+ {
+ build_info,
+ untyped,
+ "RabbitMQ & Erlang/OTP version info",
+ [{
+ Properties,
+ 1
+ }]
+ }.
+
+identity_info() ->
+ {
+ identity_info,
+ untyped,
+ "RabbitMQ node & cluster identity info",
+ [{
+ [
+ {rabbitmq_node, node()},
+ {rabbitmq_cluster, rabbit_nodes:cluster_name()}
+ ],
+ 1
+ }]
+ }.
+
+add_metric_family({Name, Type, Help, Metrics}, Callback) ->
+ Callback(create_mf(?METRIC_NAME(Name), Help, Type, Metrics)).
+
+mf(Callback, Contents, Data) ->
+ [begin
+ Fun = case Conversion of
+ undefined ->
+ fun(D) -> element(Index, D) end;
+ BaseUnitConversionFactor ->
+ fun(D) -> element(Index, D) / BaseUnitConversionFactor end
+ end,
+ Callback(
+ create_mf(
+ ?METRIC_NAME(Name),
+ Help,
+ catch_boolean(Type),
+ ?MODULE,
+ {Type, Fun, Data}
+ )
+ )
+ end || {Index, Conversion, Name, Type, Help} <- Contents],
+ [begin
+ Fun = case Conversion of
+ undefined ->
+ fun(D) -> proplists:get_value(Key, element(Index, D)) end;
+ BaseUnitConversionFactor ->
+ fun(D) -> proplists:get_value(Key, element(Index, D)) / BaseUnitConversionFactor end
+ end,
+ Callback(
+ create_mf(
+ ?METRIC_NAME(Name),
+ Help,
+ catch_boolean(Type),
+ ?MODULE,
+ {Type, Fun, Data}
+ )
+ )
+ end || {Index, Conversion, Name, Type, Help, Key} <- Contents].
+
+mf_totals(Callback, Name, Type, Help, Size) ->
+ Callback(
+ create_mf(
+ ?METRIC_NAME(Name),
+ Help,
+ catch_boolean(Type),
+ Size
+ )
+ ).
+
+collect_metrics(_, {Type, Fun, Items}) ->
+ [metric(Type, labels(Item), Fun(Item)) || Item <- Items].
+
+labels(Item) ->
+ label(element(1, Item)).
+
+label(#resource{virtual_host = VHost, kind = exchange, name = Name}) ->
+ [{vhost, VHost}, {exchange, Name}];
+label(#resource{virtual_host = VHost, kind = queue, name = Name}) ->
+ [{vhost, VHost}, {queue, Name}];
+label({P, {#resource{virtual_host = QVHost, kind = queue, name = QName},
+ #resource{virtual_host = EVHost, kind = exchange, name = EName}}}) when is_pid(P) ->
+ %% channel_queue_exchange_metrics {channel_id, {queue_id, exchange_id}}
+ [{channel, P}, {queue_vhost, QVHost}, {queue, QName},
+ {exchange_vhost, EVHost}, {exchange, EName}];
+label({RemoteAddress, Username, Protocol}) when is_binary(RemoteAddress), is_binary(Username),
+ is_atom(Protocol) ->
+ lists:filter(fun({_, V}) ->
+ V =/= <<>>
+ end, [{remote_address, RemoteAddress}, {username, Username},
+ {protocol, atom_to_binary(Protocol, utf8)}]);
+label({I1, I2}) ->
+ label(I1) ++ label(I2);
+label(P) when is_pid(P) ->
+ [{channel, P}];
+label(A) when is_atom(A) ->
+ case is_protocol(A) of
+ true -> [{protocol, atom_to_binary(A, utf8)}];
+ false -> []
+ end.
+
+is_protocol(P) ->
+ lists:member(P, [amqp091, amqp10, mqtt, http]).
+
+metric(counter, Labels, Value) ->
+ emit_counter_metric_if_defined(Labels, Value);
+metric(gauge, Labels, Value) ->
+ emit_gauge_metric_if_defined(Labels, Value);
+metric(untyped, Labels, Value) ->
+ untyped_metric(Labels, Value);
+metric(boolean, Labels, Value0) ->
+ Value = case Value0 of
+ true -> 1;
+ false -> 0;
+ undefined -> undefined
+ end,
+ untyped_metric(Labels, Value).
+
+%%====================================================================
+%% Private Parts
+%%====================================================================
+catch_boolean(boolean) ->
+ untyped;
+catch_boolean(T) ->
+ T.
+
+emit_counter_metric_if_defined(Labels, Value) ->
+ case Value of
+ undefined -> undefined;
+ '' ->
+ counter_metric(Labels, undefined);
+ Value ->
+ counter_metric(Labels, Value)
+ end.
+
+emit_gauge_metric_if_defined(Labels, Value) ->
+ case Value of
+ undefined -> undefined;
+ '' ->
+ gauge_metric(Labels, undefined);
+ Value ->
+ gauge_metric(Labels, Value)
+ end.
+
+get_data(connection_metrics = Table, false) ->
+ {Table, A1, A2, A3, A4} = ets:foldl(fun({_, Props}, {T, A1, A2, A3, A4}) ->
+ {T,
+ sum(proplists:get_value(recv_cnt, Props), A1),
+ sum(proplists:get_value(send_cnt, Props), A2),
+ sum(proplists:get_value(send_pend, Props), A3),
+ sum(proplists:get_value(channels, Props), A4)}
+ end, empty(Table), Table),
+ [{Table, [{recv_cnt, A1}, {send_cnt, A2}, {send_pend, A3}, {channels, A4}]}];
+get_data(channel_metrics = Table, false) ->
+ {Table, A1, A2, A3, A4, A5, A6, A7} =
+ ets:foldl(fun({_, Props}, {T, A1, A2, A3, A4, A5, A6, A7}) ->
+ {T,
+ sum(proplists:get_value(consumer_count, Props), A1),
+ sum(proplists:get_value(messages_unacknowledged, Props), A2),
+ sum(proplists:get_value(messages_unconfirmed, Props), A3),
+ sum(proplists:get_value(messages_uncommitted, Props), A4),
+ sum(proplists:get_value(acks_uncommitted, Props), A5),
+ sum(proplists:get_value(prefetch_count, Props), A6),
+ sum(proplists:get_value(global_prefetch_count, Props), A7)}
+ end, empty(Table), Table),
+ [{Table, [{consumer_count, A1}, {messages_unacknowledged, A2}, {messages_unconfirmed, A3},
+ {messages_uncommitted, A4}, {acks_uncommitted, A5}, {prefetch_count, A6},
+ {global_prefetch_count, A7}]}];
+get_data(queue_metrics = Table, false) ->
+ {Table, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16} =
+ ets:foldl(fun({_, Props, _}, {T, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10,
+ A11, A12, A13, A14, A15, A16}) ->
+ {T,
+ sum(proplists:get_value(consumers, Props), A1),
+ sum(proplists:get_value(consumer_utilisation, Props), A2),
+ sum(proplists:get_value(memory, Props), A3),
+ sum(proplists:get_value(messages_ram, Props), A4),
+ sum(proplists:get_value(message_bytes_ram, Props), A5),
+ sum(proplists:get_value(messages_ready_ram, Props), A6),
+ sum(proplists:get_value(messages_unacknowledged_ram, Props), A7),
+ sum(proplists:get_value(messages_persistent, Props), A8),
+ sum(proplists:get_value(message_bytes_persistent, Props), A9),
+ sum(proplists:get_value(message_bytes, Props), A10),
+ sum(proplists:get_value(message_bytes_ready, Props), A11),
+ sum(proplists:get_value(message_bytes_unacknowledged, Props), A12),
+ sum(proplists:get_value(messages_paged_out, Props), A13),
+ sum(proplists:get_value(message_bytes_paged_out, Props), A14),
+ sum(proplists:get_value(disk_reads, Props), A15),
+ sum(proplists:get_value(disk_writes, Props), A16)
+ }
+ end, empty(Table), Table),
+ [{Table, [{consumers, A1}, {consumer_utilisation, A2}, {memory, A3}, {messages_ram, A4},
+ {message_bytes_ram, A5}, {messages_ready_ram, A6},
+ {messages_unacknowledged_ram, A7}, {messages_persistent, A8},
+ {messages_bytes_persistent, A9}, {message_bytes, A10},
+ {message_bytes_ready, A11}, {message_bytes_unacknowledged, A12},
+ {messages_paged_out, A13}, {message_bytes_paged_out, A14},
+ {disk_reads, A15}, {disk_writes, A16}]}];
+get_data(Table, false) when Table == channel_exchange_metrics;
+ Table == queue_coarse_metrics;
+ Table == channel_queue_metrics;
+ Table == connection_coarse_metrics;
+ Table == channel_queue_exchange_metrics;
+ Table == ra_metrics;
+ Table == channel_process_metrics ->
+ Result = ets:foldl(fun({_, V1}, {T, A1}) ->
+ {T, V1 + A1};
+ ({_, V1, _}, {T, A1}) ->
+ {T, V1 + A1};
+ ({_, V1, V2, V3}, {T, A1, A2, A3}) ->
+ {T, V1 + A1, V2 + A2, V3 + A3};
+ ({_, V1, V2, V3, _}, {T, A1, A2, A3}) ->
+ {T, V1 + A1, V2 + A2, V3 + A3};
+ ({_, V1, V2, V3, V4, _}, {T, A1, A2, A3, A4}) ->
+ {T, V1 + A1, V2 + A2, V3 + A3, V4 + A4};
+ ({_, V1, V2, V3, V4}, {T, A1, A2, A3, A4}) ->
+ {T, V1 + A1, V2 + A2, V3 + A3, V4 + A4};
+ ({_, V1, V2, V3, V4, V5, V6}, {T, A1, A2, A3, A4, A5, A6}) ->
+ %% ra_metrics: raft_entry_commit_latency_seconds needs to be an average
+ {T, V1 + A1, V2 + A2, V3 + A3, V4 + A4, V5 + A5, accumulate_count_and_sum(V6, A6)};
+ ({_, V1, V2, V3, V4, V5, V6, V7, _}, {T, A1, A2, A3, A4, A5, A6, A7}) ->
+ {T, V1 + A1, V2 + A2, V3 + A3, V4 + A4, V5 + A5, V6 + A6, V7 + A7}
+ end, empty(Table), Table),
+ case Table of
+ %% raft_entry_commit_latency_seconds needs to be an average
+ ra_metrics ->
+ {Count, Sum} = element(7, Result),
+ [setelement(7, Result, division(Sum, Count))];
+ _ ->
+ [Result]
+ end;
+get_data(Table, _) ->
+ ets:tab2list(Table).
+
+division(0, 0) ->
+ 0;
+division(A, B) ->
+ A / B.
+
+accumulate_count_and_sum(Value, {Count, Sum}) ->
+ {Count + 1, Sum + Value}.
+
+empty(T) when T == channel_queue_exchange_metrics; T == channel_process_metrics ->
+ {T, 0};
+empty(T) when T == connection_coarse_metrics; T == auth_attempt_metrics; T == auth_attempt_detailed_metrics ->
+ {T, 0, 0, 0};
+empty(T) when T == channel_exchange_metrics; T == queue_coarse_metrics; T == connection_metrics ->
+ {T, 0, 0, 0, 0};
+empty(T) when T == ra_metrics ->
+ {T, 0, 0, 0, 0, 0, {0, 0}};
+empty(T) when T == channel_queue_metrics; T == channel_metrics ->
+ {T, 0, 0, 0, 0, 0, 0, 0};
+empty(queue_metrics = T) ->
+ {T, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}.
+
+sum(undefined, B) ->
+ B;
+sum('', B) ->
+ B;
+sum(A, B) ->
+ A + B.
diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl
new file mode 100644
index 0000000000..4aa2934b7e
--- /dev/null
+++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_app.erl
@@ -0,0 +1,134 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prometheus_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-behaviour(supervisor).
+-export([init/1]).
+
+-define(TCP_CONTEXT, rabbitmq_prometheus_tcp).
+-define(TLS_CONTEXT, rabbitmq_prometheus_tls).
+-define(DEFAULT_PORT, 15692).
+-define(DEFAULT_TLS_PORT, 15691).
+
+start(_Type, _StartArgs) ->
+ %% TCP listener uses prometheus.tcp.*.
+ %% TLS listener uses prometheus.ssl.*
+ start_configured_listener(),
+ supervisor:start_link({local,?MODULE},?MODULE,[]).
+
+stop(_State) ->
+ unregister_all_contexts(),
+ ok.
+
+init(_) ->
+ {ok, {{one_for_one, 3, 10}, []}}.
+
+-spec start_configured_listener() -> ok.
+start_configured_listener() ->
+ Listeners0 = case {has_configured_tcp_listener(),
+ has_configured_tls_listener()} of
+ {false, false} ->
+ %% nothing is configured
+ [get_tcp_listener()];
+ {false, true} ->
+ [get_tls_listener()];
+ {true, false} ->
+ [get_tcp_listener()];
+ {true, true} ->
+ [get_tcp_listener(),
+ get_tls_listener()]
+ end,
+ Listeners1 = maybe_disable_sendfile(Listeners0),
+ [start_listener(Listener) || Listener <- Listeners1].
+
+maybe_disable_sendfile(Listeners) ->
+ DisableSendfile = #{sendfile => false},
+ F = fun(L0) ->
+ CowboyOptsL0 = proplists:get_value(cowboy_opts, L0, []),
+ CowboyOptsM0 = maps:from_list(CowboyOptsL0),
+ CowboyOptsM1 = maps:merge(DisableSendfile, CowboyOptsM0),
+ CowboyOptsL1 = maps:to_list(CowboyOptsM1),
+ L1 = lists:keydelete(cowboy_opts, 1, L0),
+ [{cowboy_opts, CowboyOptsL1}|L1]
+ end,
+ lists:map(F, Listeners).
+
+has_configured_tcp_listener() ->
+ has_configured_listener(tcp_config).
+
+has_configured_tls_listener() ->
+ has_configured_listener(ssl_config).
+
+has_configured_listener(Key) ->
+ case application:get_env(rabbitmq_prometheus, Key, undefined) of
+ undefined -> false;
+ _ -> true
+ end.
+
+get_tls_listener() ->
+ {ok, Listener0} = application:get_env(rabbitmq_prometheus, ssl_config),
+ case proplists:get_value(cowboy_opts, Listener0) of
+ undefined ->
+ [{ssl, true}, {ssl_opts, Listener0}];
+ CowboyOpts ->
+ Listener1 = lists:keydelete(cowboy_opts, 1, Listener0),
+ [{ssl, true}, {ssl_opts, Listener1}, {cowboy_opts, CowboyOpts}]
+ end.
+
+get_tcp_listener() ->
+ application:get_env(rabbitmq_prometheus, tcp_config, []).
+
+start_listener(Listener0) ->
+ {Type, ContextName, Protocol} = case is_tls(Listener0) of
+ true -> {tls, ?TLS_CONTEXT, 'https/prometheus'};
+ false -> {tcp, ?TCP_CONTEXT, 'http/prometheus'}
+ end,
+ {ok, Listener1} = ensure_port_and_protocol(Type, Protocol, Listener0),
+ {ok, _} = register_context(ContextName, Listener1),
+ log_startup(Type, Listener1).
+
+register_context(ContextName, Listener) ->
+ Dispatcher = rabbit_prometheus_dispatcher:build_dispatcher(),
+ rabbit_web_dispatch:register_context_handler(
+ ContextName, Listener, "",
+ Dispatcher, "RabbitMQ Prometheus").
+
+unregister_all_contexts() ->
+ rabbit_web_dispatch:unregister_context(?TCP_CONTEXT),
+ rabbit_web_dispatch:unregister_context(?TLS_CONTEXT).
+
+ensure_port_and_protocol(tls, Protocol, Listener) ->
+ do_ensure_port_and_protocol(?DEFAULT_TLS_PORT, Protocol, Listener);
+ensure_port_and_protocol(tcp, Protocol, Listener) ->
+ do_ensure_port_and_protocol(?DEFAULT_PORT, Protocol, Listener).
+
+do_ensure_port_and_protocol(Port, Protocol, Listener) ->
+ %% include default port if it's not provided in the config
+ %% as Cowboy won't start if the port is missing
+ M0 = maps:from_list(Listener),
+ M1 = maps:merge(#{port => Port, protocol => Protocol}, M0),
+ {ok, maps:to_list(M1)}.
+
+log_startup(tcp, Listener) ->
+ rabbit_log:info("Prometheus metrics: HTTP (non-TLS) listener started on port ~w", [port(Listener)]);
+log_startup(tls, Listener) ->
+ rabbit_log:info("Prometheus metrics: HTTPS listener started on port ~w", [port(Listener)]).
+
+
+port(Listener) ->
+ proplists:get_value(port, Listener, ?DEFAULT_PORT).
+
+is_tls(Listener) ->
+ case proplists:get_value(ssl, Listener) of
+ undefined -> false;
+ false -> false;
+ _ -> true
+ end.
diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl
new file mode 100644
index 0000000000..910123c005
--- /dev/null
+++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl
@@ -0,0 +1,24 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prometheus_dispatcher).
+
+-export([build_dispatcher/0]).
+
+-define(DEFAULT_PATH, "/metrics").
+
+build_dispatcher() ->
+ {ok, _} = application:ensure_all_started(prometheus),
+ prometheus_registry:register_collectors([prometheus_rabbitmq_core_metrics_collector]),
+ rabbit_prometheus_handler:setup(),
+ cowboy_router:compile([{'_', dispatcher()}]).
+
+dispatcher() ->
+ [{path() ++ "/[:registry]", rabbit_prometheus_handler, []}].
+
+path() ->
+ application:get_env(rabbitmq_prometheus, path, ?DEFAULT_PATH).
diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl
new file mode 100644
index 0000000000..bb7a98150d
--- /dev/null
+++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_handler.erl
@@ -0,0 +1,149 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_prometheus_handler).
+
+-export([init/2]).
+-export([generate_response/2, content_types_provided/2, is_authorized/2]).
+-export([setup/0]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(SCRAPE_DURATION, telemetry_scrape_duration_seconds).
+-define(SCRAPE_SIZE, telemetry_scrape_size_bytes).
+-define(SCRAPE_ENCODED_SIZE, telemetry_scrape_encoded_size_bytes).
+
+%% ===================================================================
+%% Cowboy Handler Callbacks
+%% ===================================================================
+
+init(Req, _State) ->
+ {cowboy_rest, Req, #{}}.
+
+content_types_provided(ReqData, Context) ->
+ %% Since Prometheus 2.0 Protobuf is no longer supported
+ {[{{<<"text">>, <<"plain">>, '*'}, generate_response}], ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ {true, ReqData, Context}.
+
+setup() ->
+ TelemetryRegistry = telemetry_registry(),
+
+ ScrapeDuration = [{name, ?SCRAPE_DURATION},
+ {help, "Scrape duration"},
+ {labels, ["registry", "content_type"]},
+ {registry, TelemetryRegistry}],
+ ScrapeSize = [{name, ?SCRAPE_SIZE},
+ {help, "Scrape size, not encoded"},
+ {labels, ["registry", "content_type"]},
+ {registry, TelemetryRegistry}],
+ ScrapeEncodedSize = [{name, ?SCRAPE_ENCODED_SIZE},
+ {help, "Scrape size, encoded"},
+ {labels, ["registry", "content_type", "encoding"]},
+ {registry, TelemetryRegistry}],
+
+ prometheus_summary:declare(ScrapeDuration),
+ prometheus_summary:declare(ScrapeSize),
+ prometheus_summary:declare(ScrapeEncodedSize).
+
+%% ===================================================================
+%% Private functions
+%% ===================================================================
+
+generate_response(ReqData, Context) ->
+ Method = cowboy_req:method(ReqData),
+ Response = gen_response(Method, ReqData),
+ {stop, Response, Context}.
+
+gen_response(<<"GET">>, Request) ->
+ Registry0 = cowboy_req:binding(registry, Request, <<"default">>),
+ case prometheus_registry:exists(Registry0) of
+ false ->
+ cowboy_req:reply(404, #{}, <<"Unknown Registry">>, Request);
+ Registry ->
+ gen_metrics_response(Registry, Request)
+ end;
+gen_response(_, Request) ->
+ Request.
+
+gen_metrics_response(Registry, Request) ->
+ {Code, RespHeaders, Body} = reply(Registry, Request),
+
+ Headers = to_cowboy_headers(RespHeaders),
+ cowboy_req:reply(Code, maps:from_list(Headers), Body, Request).
+
+to_cowboy_headers(RespHeaders) ->
+ lists:map(fun to_cowboy_headers_/1, RespHeaders).
+
+to_cowboy_headers_({Name, Value}) ->
+ {to_cowboy_name(Name), Value}.
+
+to_cowboy_name(Name) ->
+ binary:replace(atom_to_binary(Name, utf8), <<"_">>, <<"-">>).
+
+reply(Registry, Request) ->
+ case validate_registry(Registry, registry()) of
+ {true, RealRegistry} ->
+ format_metrics(Request, RealRegistry);
+ {registry_conflict, _ReqR, _ConfR} ->
+ {409, [], <<>>};
+ {registry_not_found, _ReqR} ->
+ {404, [], <<>>};
+ false ->
+ false
+ end.
+
+format_metrics(Request, Registry) ->
+ AcceptEncoding = cowboy_req:header(<<"accept-encoding">>, Request, undefined),
+ ContentType = prometheus_text_format:content_type(),
+ Scrape = render_format(ContentType, Registry),
+ Encoding = accept_encoding_header:negotiate(AcceptEncoding, [<<"identity">>,
+ <<"gzip">>]),
+ encode_format(ContentType, binary_to_list(Encoding), Scrape, Registry).
+
+render_format(ContentType, Registry) ->
+ TelemetryRegistry = telemetry_registry(),
+
+ Scrape = prometheus_summary:observe_duration(
+ TelemetryRegistry,
+ ?SCRAPE_DURATION,
+ [Registry, ContentType],
+ fun () -> prometheus_text_format:format(Registry) end),
+ prometheus_summary:observe(TelemetryRegistry,
+ ?SCRAPE_SIZE,
+ [Registry, ContentType],
+ iolist_size(Scrape)),
+ Scrape.
+
+validate_registry(undefined, auto) ->
+ {true, default};
+validate_registry(Registry, auto) ->
+ {true, Registry};
+validate_registry(Registry, Registry) ->
+ {true, Registry};
+validate_registry(Asked, Conf) ->
+ {registry_conflict, Asked, Conf}.
+
+telemetry_registry() ->
+ application:get_env(rabbitmq_prometheus, telemetry_registry, default).
+
+registry() ->
+ application:get_env(rabbitmq_prometheus, registry, auto).
+
+encode_format(ContentType, Encoding, Scrape, Registry) ->
+ Encoded = encode_format_(Encoding, Scrape),
+ prometheus_summary:observe(telemetry_registry(),
+ ?SCRAPE_ENCODED_SIZE,
+ [Registry, ContentType, Encoding],
+ iolist_size(Encoded)),
+ {200, [{content_type, binary_to_list(ContentType)},
+ {content_encoding, Encoding}], Encoded}.
+
+encode_format_("gzip", Scrape) ->
+ zlib:gzip(Scrape);
+encode_format_("identity", Scrape) ->
+ Scrape.
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl b/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..97719d9246
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_prometheus, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets
new file mode 100644
index 0000000000..90b1b4c181
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/rabbitmq_prometheus.snippets
@@ -0,0 +1,280 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+[
+ %%
+ %% Path
+ %%
+
+ {endpoint_path,
+ "prometheus.path = /metriczzz",
+ [{rabbitmq_prometheus,[
+ {path, "/metriczzz"}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ %%
+ %% TCP listener
+ %%
+
+ {tcp_listener_port_only,
+ "prometheus.tcp.port = 15692",
+ [{rabbitmq_prometheus,[
+ {tcp_config,[
+ {port,15692}
+ ]}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ {tcp_listener_interface_port,
+ "prometheus.tcp.ip = 192.168.1.2
+ prometheus.tcp.port = 15692",
+ [{rabbitmq_prometheus,[
+ {tcp_config,[
+ {ip, "192.168.1.2"},
+ {port,15692}
+ ]}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ {tcp_listener_server_opts_compress,
+ "prometheus.tcp.compress = true",
+ [
+ {rabbitmq_prometheus, [
+ {tcp_config, [{cowboy_opts, [{compress, true}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tcp_listener_server_opts_compress_and_idle_timeout,
+ "prometheus.tcp.compress = true
+ prometheus.tcp.idle_timeout = 123",
+ [
+ {rabbitmq_prometheus, [
+ {tcp_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tcp_listener_server_opts_compress_and_multiple_timeouts,
+ "prometheus.tcp.compress = true
+ prometheus.tcp.idle_timeout = 123
+ prometheus.tcp.inactivity_timeout = 456
+ prometheus.tcp.request_timeout = 789",
+ [
+ {rabbitmq_prometheus, [
+ {tcp_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tcp_listener_server_opts_multiple_timeouts_only,
+ "prometheus.tcp.idle_timeout = 123
+ prometheus.tcp.inactivity_timeout = 456
+ prometheus.tcp.request_timeout = 789",
+ [
+ {rabbitmq_prometheus, [
+ {tcp_config, [{cowboy_opts, [{idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tcp_listener_server_opts_shutdown_timeout,
+ "prometheus.tcp.shutdown_timeout = 7000",
+ [
+ {rabbitmq_prometheus, [
+ {tcp_config, [{cowboy_opts, [{shutdown_timeout, 7000}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tcp_listener_server_opts_max_keepalive,
+ "prometheus.tcp.max_keepalive = 120",
+ [
+ {rabbitmq_prometheus, [
+ {tcp_config, [{cowboy_opts, [{max_keepalive, 120}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+
+ %%
+ %% TLS listener
+ %%
+
+ {tls_listener_port_only,
+ "prometheus.ssl.port = 15691",
+ [{rabbitmq_prometheus,[
+ {ssl_config,[
+ {port,15691}
+ ]}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ {tls_listener_interface_port,
+ "prometheus.ssl.ip = 192.168.1.2
+ prometheus.ssl.port = 15691",
+ [{rabbitmq_prometheus,[
+ {ssl_config,[
+ {ip, "192.168.1.2"},
+ {port,15691}
+ ]}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ {tls_listener,
+ "prometheus.ssl.ip = 192.168.1.2
+ prometheus.ssl.port = 15691
+ prometheus.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ prometheus.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ prometheus.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ prometheus.ssl.verify = verify_none
+ prometheus.ssl.fail_if_no_peer_cert = false",
+ [{rabbitmq_prometheus,[
+ {ssl_config,[
+ {ip, "192.168.1.2"},
+ {port,15691},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify, verify_none},
+ {fail_if_no_peer_cert, false}
+ ]}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ {tls_listener_cipher_suites,
+ "prometheus.ssl.ip = 192.168.1.2
+ prometheus.ssl.port = 15691
+ prometheus.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ prometheus.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ prometheus.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+
+ prometheus.ssl.honor_cipher_order = true
+ prometheus.ssl.honor_ecc_order = true
+ prometheus.ssl.client_renegotiation = false
+ prometheus.ssl.secure_renegotiate = true
+
+ prometheus.ssl.verify = verify_peer
+ prometheus.ssl.fail_if_no_peer_cert = false
+
+ prometheus.ssl.versions.1 = tlsv1.2
+ prometheus.ssl.versions.2 = tlsv1.1
+
+ prometheus.ssl.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+ prometheus.ssl.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+ prometheus.ssl.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+ prometheus.ssl.ciphers.4 = ECDHE-RSA-AES256-SHA384
+ prometheus.ssl.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+ prometheus.ssl.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+ prometheus.ssl.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+ prometheus.ssl.ciphers.8 = ECDH-RSA-AES256-SHA384
+ prometheus.ssl.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
+ [{rabbitmq_prometheus,[
+ {ssl_config,[
+ {ip, "192.168.1.2"},
+ {port,15691},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false},
+
+ {honor_cipher_order, true},
+ {honor_ecc_order, true},
+ {client_renegotiation, false},
+ {secure_renegotiate, true},
+
+ {versions,['tlsv1.2','tlsv1.1']},
+ {ciphers, [
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDH-ECDSA-AES256-GCM-SHA384",
+ "ECDH-RSA-AES256-GCM-SHA384",
+ "ECDH-ECDSA-AES256-SHA384",
+ "ECDH-RSA-AES256-SHA384",
+ "DHE-RSA-AES256-GCM-SHA384"
+ ]}
+ ]}
+ ]}],
+ [rabbitmq_prometheus]},
+
+ {tls_listener_server_opts_compress,
+ "prometheus.ssl.compress = true",
+ [
+ {rabbitmq_prometheus, [
+ {ssl_config, [{cowboy_opts, [{compress, true}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tls_listener_server_opts_compress_and_idle_timeout,
+ "prometheus.ssl.compress = true
+ prometheus.ssl.idle_timeout = 123",
+ [
+ {rabbitmq_prometheus, [
+ {ssl_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tls_listener_server_opts_compress_and_multiple_timeouts,
+ "prometheus.ssl.compress = true
+ prometheus.ssl.idle_timeout = 123
+ prometheus.ssl.inactivity_timeout = 456
+ prometheus.ssl.request_timeout = 789",
+ [
+ {rabbitmq_prometheus, [
+ {ssl_config, [{cowboy_opts, [{compress, true},
+ {idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tls_listener_server_opts_multiple_timeouts_only,
+ "prometheus.ssl.idle_timeout = 123
+ prometheus.ssl.inactivity_timeout = 456
+ prometheus.ssl.request_timeout = 789",
+ [
+ {rabbitmq_prometheus, [
+ {ssl_config, [{cowboy_opts, [{idle_timeout, 123},
+ {inactivity_timeout, 456},
+ {request_timeout, 789}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tls_listener_server_opts_shutdown_timeout,
+ "prometheus.ssl.shutdown_timeout = 7000",
+ [
+ {rabbitmq_prometheus, [
+ {ssl_config, [{cowboy_opts, [{shutdown_timeout, 7000}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ },
+
+ {tls_listener_server_opts_max_keepalive,
+ "prometheus.ssl.max_keepalive = 120",
+ [
+ {rabbitmq_prometheus, [
+ {ssl_config, [{cowboy_opts, [{max_keepalive, 120}]}]}
+ ]}
+ ], [rabbitmq_prometheus]
+ }
+].
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management.schema b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management.schema
new file mode 100644
index 0000000000..e05da0a001
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management.schema
@@ -0,0 +1,436 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Management Plugin
+%%
+%% See https://www.rabbitmq.com/management.html for details
+%% ----------------------------------------------------------------------------
+
+%% Load definitions from a JSON file or directory of files. See
+%% https://www.rabbitmq.com/management.html#load-definitions
+%%
+%% {load_definitions, "/path/to/schema.json"},
+%% {load_definitions, "/path/to/schemas"},
+{mapping, "management.load_definitions", "rabbitmq_management.load_definitions",
+ [{datatype, string},
+ {validators, ["file_accessible"]}]}.
+
+%% Log all requests to the management HTTP API to a file.
+%%
+%% {http_log_dir, "/path/to/access.log"},
+
+{mapping, "management.http_log_dir", "rabbitmq_management.http_log_dir",
+ [{datatype, string}]}.
+
+%% HTTP (TCP) listener options ========================================================
+
+%% HTTP listener consistent with Web STOMP and Web MQTT.
+%%
+%% {tcp_config, [{port, 15672},
+%% {ip, "127.0.0.1"}]}
+
+{mapping, "management.tcp.port", "rabbitmq_management.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "management.tcp.ip", "rabbitmq_management.tcp_config.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "management.tcp.compress", "rabbitmq_management.tcp_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "management.tcp.idle_timeout", "rabbitmq_management.tcp_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.inactivity_timeout", "rabbitmq_management.tcp_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.request_timeout", "rabbitmq_management.tcp_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.shutdown_timeout", "rabbitmq_management.tcp_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.tcp.max_keepalive", "rabbitmq_management.tcp_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+
+%% HTTPS (TLS) listener options ========================================================
+
+%% HTTPS listener consistent with Web STOMP and Web MQTT.
+%%
+%% {ssl_config, [{port, 15671},
+%% {ip, "127.0.0.1"},
+%% {cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}
+
+{mapping, "management.ssl.port", "rabbitmq_management.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "management.ssl.backlog", "rabbitmq_management.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "management.ssl.ip", "rabbitmq_management.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "management.ssl.certfile", "rabbitmq_management.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "management.ssl.keyfile", "rabbitmq_management.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "management.ssl.cacertfile", "rabbitmq_management.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "management.ssl.password", "rabbitmq_management.ssl_config.password",
+ [{datatype, string}]}.
+
+{mapping, "management.ssl.verify", "rabbitmq_management.ssl_config.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "management.ssl.fail_if_no_peer_cert", "rabbitmq_management.ssl_config.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.honor_cipher_order", "rabbitmq_management.ssl_config.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.honor_ecc_order", "rabbitmq_management.ssl_config.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.reuse_sessions", "rabbitmq_management.ssl_config.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.secure_renegotiate", "rabbitmq_management.ssl_config.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.client_renegotiation", "rabbitmq_management.ssl_config.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.ssl.depth", "rabbitmq_management.ssl_config.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "management.ssl.versions.$version", "rabbitmq_management.ssl_config.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_management.ssl_config.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.ssl.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "management.ssl.ciphers.$cipher", "rabbitmq_management.ssl_config.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.ssl_config.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.ssl.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+{mapping, "management.ssl.compress", "rabbitmq_management.ssl_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "management.ssl.idle_timeout", "rabbitmq_management.ssl_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.inactivity_timeout", "rabbitmq_management.ssl_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.request_timeout", "rabbitmq_management.ssl_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.shutdown_timeout", "rabbitmq_management.ssl_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "management.ssl.max_keepalive", "rabbitmq_management.ssl_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+
+
+%% Legacy listener options ========================================================
+
+%% Legacy (pre-3.7.9) TCP listener format.
+%%
+%% {listener, [{port, 12345},
+%% {ip, "127.0.0.1"},
+%% {ssl, true},
+%% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}]},
+
+{mapping, "management.listener.port", "rabbitmq_management.listener.port",
+ [{datatype, integer}]}.
+
+{mapping, "management.listener.ip", "rabbitmq_management.listener.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "management.listener.ssl", "rabbitmq_management.listener.ssl",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.server.compress", "rabbitmq_management.listener.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.server.idle_timeout", "rabbitmq_management.listener.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.inactivity_timeout", "rabbitmq_management.listener.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.request_timeout", "rabbitmq_management.listener.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.shutdown_timeout", "rabbitmq_management.listener.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{mapping, "management.listener.server.max_keepalive", "rabbitmq_management.listener.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+%% Legacy HTTPS listener options ========================================================
+
+{mapping, "management.listener.ssl_opts", "rabbitmq_management.listener.ssl_opts", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.listener.ssl_opts", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid management.listener.ssl_opts")
+ end
+end}.
+
+{mapping, "management.listener.ssl_opts.verify", "rabbitmq_management.listener.ssl_opts.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "management.listener.ssl_opts.fail_if_no_peer_cert", "rabbitmq_management.listener.ssl_opts.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.cacertfile", "rabbitmq_management.listener.ssl_opts.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.certfile", "rabbitmq_management.listener.ssl_opts.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.cacerts.$name", "rabbitmq_management.listener.ssl_opts.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "management.listener.ssl_opts.honor_cipher_order", "rabbitmq_management.listener.ssl_opts.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.honor_ecc_order", "rabbitmq_management.listener.ssl_opts.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.reuse_sessions", "rabbitmq_management.listener.ssl_opts.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.secure_renegotiate", "rabbitmq_management.listener.ssl_opts.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.client_renegotiation", "rabbitmq_management.listener.ssl_opts.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+
+{mapping, "management.listener.ssl_opts.versions.$version", "rabbitmq_management.listener.ssl_opts.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.versions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+
+{mapping, "management.listener.ssl_opts.cert", "rabbitmq_management.listener.ssl_opts.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.cert", Conf))
+end}.
+
+{mapping, "management.listener.ssl_opts.crl_check", "rabbitmq_management.listener.ssl_opts.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "management.listener.ssl_opts.depth", "rabbitmq_management.listener.ssl_opts.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "management.listener.ssl_opts.dh", "rabbitmq_management.listener.ssl_opts.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.dh", Conf))
+end}.
+
+{mapping, "management.listener.ssl_opts.dhfile", "rabbitmq_management.listener.ssl_opts.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.key.RSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.key.DSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.key.PrivateKeyInfo", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "management.listener.ssl_opts.keyfile", "rabbitmq_management.listener.ssl_opts.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.log_alert", "rabbitmq_management.listener.ssl_opts.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.password", "rabbitmq_management.listener.ssl_opts.password",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.psk_identity", "rabbitmq_management.listener.ssl_opts.psk_identity",
+ [{datatype, string}]}.
+
+
+%% A custom path prefix for all HTTP request handlers.
+%%
+%% {path_prefix, "/a/prefix"},
+
+{mapping, "management.path_prefix", "rabbitmq_management.path_prefix",
+ [{datatype, string}]}.
+
+%% Login session timeout in minutes
+
+{mapping, "management.login_session_timeout", "rabbitmq_management.login_session_timeout", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+%% CORS
+
+{mapping, "management.cors.allow_origins", "rabbitmq_management.cors_allow_origins", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "management.cors.allow_origins.$name", "rabbitmq_management.cors_allow_origins", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_management.cors_allow_origins",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.cors.allow_origins", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.cors.allow_origins", Conf),
+ [V || {_, V} <- Settings]
+ end
+end}.
+
+
+{mapping, "management.cors.max_age", "rabbitmq_management.cors_max_age", [
+ {datatype, integer}, {validators, ["non_negative_integer"]}
+]}.
+
+{translation, "rabbitmq_management.cors_max_age",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.cors.max_age", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% CSP (https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP)
+
+{mapping, "management.csp.policy", "rabbitmq_management.content_security_policy", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_management.content_security_policy",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.csp.policy", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+
+%% HSTS (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
+
+{mapping, "management.hsts.policy", "rabbitmq_management.strict_transport_security", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_management.strict_transport_security",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.hsts.policy", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+end}.
+
+%% OAuth 2/SSO access only
+
+{mapping, "management.disable_basic_auth", "rabbitmq_management.disable_basic_auth",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Management only
+
+{mapping, "management.disable_stats", "rabbitmq_management.disable_management_stats", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "management.enable_queue_totals", "rabbitmq_management.enable_queue_totals", [
+ {datatype, {enum, [true, false]}}]}.
+
+%% ===========================================================================
+%% Authorization
+
+{mapping, "management.enable_uaa", "rabbitmq_management.enable_uaa",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.uaa_client_id", "rabbitmq_management.uaa_client_id",
+ [{datatype, string}]}.
+
+{mapping, "management.uaa_location", "rabbitmq_management.uaa_location",
+ [{datatype, string}]}.
+
+%% ===========================================================================
+
+
+%% One of 'basic', 'detailed' or 'none'. See
+%% https://www.rabbitmq.com/management.html#fine-stats for more details.
+%% {rates_mode, basic},
+{mapping, "management.rates_mode", "rabbitmq_management.rates_mode",
+ [{datatype, {enum, [basic, detailed, none]}}]}.
+
+%% Configure how long aggregated data (such as message rates and queue
+%% lengths) is retained. Please read the plugin's documentation in
+%% https://www.rabbitmq.com/management.html#configuration for more
+%% details.
+%%
+%% {sample_retention_policies,
+%% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]},
+%% {basic, [{60, 5}, {3600, 60}]},
+%% {detailed, [{10, 5}]}]}
+% ]},
+
+{mapping, "management.sample_retention_policies.$section.$interval",
+ "rabbitmq_management.sample_retention_policies",
+ [{datatype, integer}]}.
+
+{translation, "rabbitmq_management.sample_retention_policies",
+fun(Conf) ->
+ Global = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.global", Conf),
+ Basic = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.basic", Conf),
+ Detailed = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.detailed", Conf),
+ TranslateKey = fun("minute") -> 60;
+ ("hour") -> 3600;
+ ("day") -> 86400;
+ (Other) -> list_to_integer(Other)
+ end,
+ TranslatePolicy = fun(Section) ->
+ [ {TranslateKey(Key), Val} || {[_,_,_,Key], Val} <- Section ]
+ end,
+ [{global, TranslatePolicy(Global)},
+ {basic, TranslatePolicy(Basic)},
+ {detailed, TranslatePolicy(Detailed)}]
+end}.
+
+
+{validator, "is_dir", "is not directory",
+fun(File) ->
+ ReadFile = file:list_dir(File),
+ element(1, ReadFile) == ok
+end}.
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management_agent.schema b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management_agent.schema
new file mode 100644
index 0000000000..fa8a76725a
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_management_agent.schema
@@ -0,0 +1,4 @@
+%% Agent collectors won't start if metrics collection is disabled, only external stats are enabled.
+%% Also the management application will refuse to start if metrics collection is disabled
+{mapping, "management_agent.disable_metrics_collector", "rabbitmq_management_agent.disable_metrics_collector",
+ [{datatype, {enum, [true, false]}}]}.
diff --git a/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_prometheus.schema b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_prometheus.schema
new file mode 100644
index 0000000000..4f1028a2a6
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/config_schema_SUITE_data/schema/rabbitmq_prometheus.schema
@@ -0,0 +1,116 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Prometheus Plugin
+%%
+%% See https://rabbitmq.com/prometheus.html for details
+%% ----------------------------------------------------------------------------
+
+%% Endpoint path
+{mapping, "prometheus.path", "rabbitmq_prometheus.path",
+ [{datatype, string}]}.
+
+%% HTTP (TCP) listener options ========================================================
+
+%% HTTP listener consistent with the management plugin, Web STOMP and Web MQTT.
+%%
+%% {tcp_config, [{port, 15692},
+%% {ip, "127.0.0.1"}]}
+
+{mapping, "prometheus.tcp.port", "rabbitmq_prometheus.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "prometheus.tcp.ip", "rabbitmq_prometheus.tcp_config.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "prometheus.tcp.compress", "rabbitmq_prometheus.tcp_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "prometheus.tcp.idle_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.inactivity_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.request_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.shutdown_timeout", "rabbitmq_prometheus.tcp_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.tcp.max_keepalive", "rabbitmq_prometheus.tcp_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+
+%% HTTPS (TLS) listener options ========================================================
+
+%% HTTPS listener consistent with the management plugin, Web STOMP and Web MQTT.
+%%
+%% {ssl_config, [{port, 15691},
+%% {ip, "127.0.0.1"},
+%% {cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}
+
+{mapping, "prometheus.ssl.port", "rabbitmq_prometheus.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "prometheus.ssl.backlog", "rabbitmq_prometheus.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "prometheus.ssl.ip", "rabbitmq_prometheus.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "prometheus.ssl.certfile", "rabbitmq_prometheus.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "prometheus.ssl.keyfile", "rabbitmq_prometheus.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "prometheus.ssl.cacertfile", "rabbitmq_prometheus.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "prometheus.ssl.password", "rabbitmq_prometheus.ssl_config.password",
+ [{datatype, string}]}.
+
+{mapping, "prometheus.ssl.verify", "rabbitmq_prometheus.ssl_config.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "prometheus.ssl.fail_if_no_peer_cert", "rabbitmq_prometheus.ssl_config.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.honor_cipher_order", "rabbitmq_prometheus.ssl_config.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.honor_ecc_order", "rabbitmq_prometheus.ssl_config.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.reuse_sessions", "rabbitmq_prometheus.ssl_config.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.secure_renegotiate", "rabbitmq_prometheus.ssl_config.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.client_renegotiation", "rabbitmq_prometheus.ssl_config.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "prometheus.ssl.depth", "rabbitmq_prometheus.ssl_config.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "prometheus.ssl.versions.$version", "rabbitmq_prometheus.ssl_config.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_prometheus.ssl_config.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("prometheus.ssl.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "prometheus.ssl.ciphers.$cipher", "rabbitmq_prometheus.ssl_config.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_prometheus.ssl_config.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("prometheus.ssl.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+{mapping, "prometheus.ssl.compress", "rabbitmq_prometheus.ssl_config.cowboy_opts.compress",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "prometheus.ssl.idle_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.inactivity_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.inactivity_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.request_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.request_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.shutdown_timeout", "rabbitmq_prometheus.ssl_config.cowboy_opts.shutdown_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+{mapping, "prometheus.ssl.max_keepalive", "rabbitmq_prometheus.ssl_config.cowboy_opts.max_keepalive",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
diff --git a/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl b/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl
new file mode 100644
index 0000000000..9ef4a43efa
--- /dev/null
+++ b/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl
@@ -0,0 +1,282 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_prometheus_http_SUITE).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, default_config},
+ {group, config_path},
+ {group, config_port},
+ {group, aggregated_metrics},
+ {group, per_object_metrics},
+ {group, commercial}
+ ].
+
+groups() ->
+ [
+ {default_config, [], generic_tests()},
+ {config_path, [], generic_tests()},
+ {config_port, [], generic_tests()},
+ {aggregated_metrics, [], [
+ aggregated_metrics_test,
+ specific_erlang_metrics_present_test
+ ]},
+ {per_object_metrics, [], [
+ per_object_metrics_test,
+ specific_erlang_metrics_present_test
+ ]},
+ {commercial, [], [
+ build_info_product_test
+ ]}
+ ].
+
+generic_tests() ->
+ [
+ get_test,
+ content_type_test,
+ encoding_test,
+ gzip_encoding_test,
+ build_info_test,
+ identity_info_test
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+init_per_group(default_config, Config) ->
+ init_per_group(default_config, Config, []);
+init_per_group(config_path, Config0) ->
+ PathConfig = {rabbitmq_prometheus, [{path, "/bunnieshop"}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig),
+ init_per_group(config_path, Config1, [{prometheus_path, "/bunnieshop"}]);
+init_per_group(config_port, Config0) ->
+ PathConfig = {rabbitmq_prometheus, [{tcp_config, [{port, 15772}]}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig),
+ init_per_group(config_port, Config1, [{prometheus_port, 15772}]);
+init_per_group(per_object_metrics, Config0) ->
+ PathConfig = {rabbitmq_prometheus, [{return_per_object_metrics, true}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, PathConfig),
+ init_per_group(aggregated_metrics, Config1);
+init_per_group(aggregated_metrics, Config0) ->
+ Config1 = rabbit_ct_helpers:merge_app_env(
+ Config0,
+ [{rabbit, [{collect_statistics, coarse}, {collect_statistics_interval, 100}]}]
+ ),
+ Config2 = init_per_group(aggregated_metrics, Config1, []),
+ ok = rabbit_ct_broker_helpers:enable_feature_flag(Config2, quorum_queue),
+
+ A = rabbit_ct_broker_helpers:get_node_config(Config2, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config2, A),
+
+ Q = <<"prometheus_test_queue">>,
+ amqp_channel:call(Ch,
+ #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]
+ }),
+ amqp_channel:cast(Ch,
+ #'basic.publish'{routing_key = Q},
+ #amqp_msg{payload = <<"msg">>}),
+ timer:sleep(150),
+ {#'basic.get_ok'{}, #amqp_msg{}} = amqp_channel:call(Ch, #'basic.get'{queue = Q}),
+ %% We want to check consumer metrics, so we need at least 1 consumer bound
+ %% but we don't care what it does if anything as long as the runner process does
+ %% not have to handle the consumer's messages.
+ ConsumerPid = sleeping_consumer(),
+ #'basic.consume_ok'{consumer_tag = CTag} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q}, ConsumerPid),
+ timer:sleep(10000),
+
+ Config2 ++ [{channel_pid, Ch}, {queue_name, Q}, {consumer_tag, CTag}, {consumer_pid, ConsumerPid}];
+init_per_group(commercial, Config0) ->
+ ProductConfig = {rabbit, [{product_name, "WolfMQ"}, {product_version, "2020"}]},
+ Config1 = rabbit_ct_helpers:merge_app_env(Config0, ProductConfig),
+ init_per_group(commercial, Config1, []).
+
+init_per_group(Group, Config0, Extra) ->
+ rabbit_ct_helpers:log_environment(),
+ inets:start(),
+ NodeConf = [{rmq_nodename_suffix, Group}] ++ Extra,
+ Config1 = rabbit_ct_helpers:set_config(Config0, NodeConf),
+ rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps()
+ ++ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(aggregated_metrics, Config) ->
+ Ch = ?config(channel_pid, Config),
+ CTag = ?config(consumer_tag, Config),
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+ ConsumerPid = ?config(consumer_pid, Config),
+ ConsumerPid ! stop,
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?config(queue_name, Config)}),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ end_per_group_(Config);
+end_per_group(_, Config) ->
+ end_per_group_(Config).
+
+end_per_group_(Config) ->
+ inets:stop(),
+ rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_client_helpers:teardown_steps()
+ ++ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% a no-op consumer
+sleeping_consumer_loop() ->
+ receive
+ stop -> ok;
+ #'basic.consume_ok'{} -> sleeping_consumer_loop();
+ #'basic.cancel'{} -> sleeping_consumer_loop();
+ {#'basic.deliver'{}, _Payload} -> sleeping_consumer_loop()
+ end.
+
+sleeping_consumer() ->
+ spawn(fun() ->
+ sleeping_consumer_loop()
+ end).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+get_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ %% Check that the body looks like a valid response
+ ?assertEqual(match, re:run(Body, "TYPE", [{capture, none}])),
+ Port = proplists:get_value(prometheus_port, Config, 15692),
+ URI = lists:flatten(io_lib:format("http://localhost:~p/metricsooops", [Port])),
+ {ok, {{_, CodeAct, _}, _, _}} = httpc:request(get, {URI, []}, ?HTTPC_OPTS, []),
+ ?assertMatch(404, CodeAct).
+
+content_type_test(Config) ->
+ {Headers, Body} = http_get_with_pal(Config, [{"accept", "text/plain"}], 200),
+ ?assertEqual(match, re:run(proplists:get_value("content-type", Headers),
+ "text/plain", [{capture, none}])),
+ %% Check that the body looks like a valid response
+ ?assertEqual(match, re:run(Body, "^# TYPE", [{capture, none}, multiline])),
+
+ http_get_with_pal(Config, [{"accept", "text/plain, text/html"}], 200),
+ http_get_with_pal(Config, [{"accept", "*/*"}], 200),
+ http_get_with_pal(Config, [{"accept", "text/xdvi"}], 406),
+ http_get_with_pal(Config, [{"accept", "application/vnd.google.protobuf"}], 406).
+
+encoding_test(Config) ->
+ {Headers, Body} = http_get(Config, [{"accept-encoding", "deflate"}], 200),
+ ?assertMatch("identity", proplists:get_value("content-encoding", Headers)),
+ ?assertEqual(match, re:run(Body, "^# TYPE", [{capture, none}, multiline])).
+
+gzip_encoding_test(Config) ->
+ {Headers, Body} = http_get(Config, [{"accept-encoding", "gzip"}], 200),
+ ?assertMatch("gzip", proplists:get_value("content-encoding", Headers)),
+ %% If the body is not gzip, zlib:gunzip will crash
+ ?assertEqual(match, re:run(zlib:gunzip(Body), "^# TYPE", [{capture, none}, multiline])).
+
+aggregated_metrics_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ ?assertEqual(match, re:run(Body, "^# TYPE", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^# HELP", [{capture, none}, multiline])),
+ ?assertEqual(nomatch, re:run(Body, ?config(queue_name, Config), [{capture, none}])),
+ %% Check the first metric value from each ETS table owned by rabbitmq_metrics
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_consumers ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_messages_published_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_process_reductions_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_get_ack_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connections_opened_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_bytes_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_packets_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_queue_messages_published_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_process_open_fds ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_process_max_fds ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_io_read_ops_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_raft_term_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_queue_messages_ready ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_queue_consumers ", [{capture, none}, multiline])),
+ %% Check the first metric value in each ETS table that requires converting
+ ?assertEqual(match, re:run(Body, "^rabbitmq_erlang_uptime_seconds ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_io_read_time_seconds_total ", [{capture, none}, multiline])),
+ %% Check the first TOTALS metric value
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connections ", [{capture, none}, multiline])),
+ %% Check raft_entry_commit_latency_seconds because we are aggregating it
+ ?assertEqual(match, re:run(Body, "^rabbitmq_raft_entry_commit_latency_seconds ", [{capture, none}, multiline])).
+
+per_object_metrics_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ ?assertEqual(match, re:run(Body, "^# TYPE", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^# HELP", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, ?config(queue_name, Config), [{capture, none}])),
+ %% Check the first metric value from each ETS table owned by rabbitmq_metrics
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_consumers{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_messages_published_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_process_reductions_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_channel_get_ack_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connections_opened_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_bytes_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connection_incoming_packets_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_queue_messages_published_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_process_open_fds ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_process_max_fds ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_io_read_ops_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_raft_term_total{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_queue_messages_ready{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_queue_consumers{", [{capture, none}, multiline])),
+ %% Check the first metric value in each ETS table that requires converting
+ ?assertEqual(match, re:run(Body, "^rabbitmq_erlang_uptime_seconds ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_io_read_time_seconds_total ", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_raft_entry_commit_latency_seconds{", [{capture, none}, multiline])),
+ %% Check the first TOTALS metric value
+ ?assertEqual(match, re:run(Body, "^rabbitmq_connections ", [{capture, none}, multiline])).
+
+build_info_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_build_info{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "rabbitmq_version=\"", [{capture, none}])),
+ ?assertEqual(match, re:run(Body, "prometheus_plugin_version=\"", [{capture, none}])),
+ ?assertEqual(match, re:run(Body, "prometheus_client_version=\"", [{capture, none}])),
+ ?assertEqual(match, re:run(Body, "erlang_version=\"", [{capture, none}])).
+
+build_info_product_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ ?assertEqual(match, re:run(Body, "product_name=\"WolfMQ\"", [{capture, none}])),
+ ?assertEqual(match, re:run(Body, "product_version=\"2020\"", [{capture, none}])),
+ %% Check that RabbitMQ version is still displayed
+ ?assertEqual(match, re:run(Body, "rabbitmq_version=\"", [{capture, none}])).
+
+identity_info_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ ?assertEqual(match, re:run(Body, "^rabbitmq_identity_info{", [{capture, none}, multiline])),
+ ?assertEqual(match, re:run(Body, "rabbitmq_node=", [{capture, none}])),
+ ?assertEqual(match, re:run(Body, "rabbitmq_cluster=", [{capture, none}])).
+
+specific_erlang_metrics_present_test(Config) ->
+ {_Headers, Body} = http_get_with_pal(Config, [], 200),
+ ?assertEqual(match, re:run(Body, "^erlang_vm_dist_node_queue_size_bytes{", [{capture, none}, multiline])).
+
+http_get(Config, ReqHeaders, CodeExp) ->
+ Path = proplists:get_value(prometheus_path, Config, "/metrics"),
+ Port = proplists:get_value(prometheus_port, Config, 15692),
+ URI = lists:flatten(io_lib:format("http://localhost:~p~s", [Port, Path])),
+ {ok, {{_HTTP, CodeAct, _}, Headers, Body}} =
+ httpc:request(get, {URI, ReqHeaders}, ?HTTPC_OPTS, []),
+ ?assertMatch(CodeExp, CodeAct),
+ {Headers, Body}.
+
+http_get_with_pal(Config, ReqHeaders, CodeExp) ->
+ {Headers, Body} = http_get(Config, ReqHeaders, CodeExp),
+ %% Print and log response body - it makes is easier to find why a match failed
+ ct:pal(Body),
+ {Headers, Body}.
diff --git a/deps/rabbitmq_random_exchange/.gitignore b/deps/rabbitmq_random_exchange/.gitignore
new file mode 100644
index 0000000000..1dde0fdd27
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/.gitignore
@@ -0,0 +1,19 @@
+.DS_Store
+erl_crash.dump
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_random_exchange.d
diff --git a/deps/rabbitmq_random_exchange/.travis.yml b/deps/rabbitmq_random_exchange/.travis.yml
new file mode 100644
index 0000000000..e62cb25046
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: K6zc08NT0Ns4uRkSswv68ZRT8DWc6o5vetZeTyxCRv/YvR0DVM2ypQJDTF3NeRdRY4/nzOghmlUVDWxsjSeQr20kdq0QKq4tvYW2ImovWj9D6mEkuysYhvkbgP1G6HT9wKlj6RPtaXjOsYYdUeg8RFkRoJXxtV1MqBCd1TkxVFh8v5X5JHpy+UcE7G45OlBsK0yoCGJzxdJoucSrtkiNRvEtzICEA2A1I/4PS4VCA0CIoFYpqIK1BwNI/0tfDWd3DIAaOT9rDyfDo65jYJv0/1Wpg5+C9znZ7qpbt8VJfN6YGa1SKxFcZP80a72WWIeiYWvGh/48/JrTGSey4fHIUkppl3q5gAHrgZrIU4TlFTqIDJlSLARF7OpRAsxVabx5TmzBpMSXrEsJ3CafWxHFy4QfzEN+2YvAmlENxbKrGbmLhy4CCN4ExuTGbPRMFupU3JsxVZh06ePuaSTXTgNazI5dIjbwZ484hRXQtf5bk7q/vf8DFPv//CvxEhLil3WVKvUYj7dXDlSznPHqvrhgxZAAoskdr06fYVjYjofEVXXGAh8Mkkwg2RIg4JXR6lJTxHZHz9KTR2rxXTT5wTvuiS0NLj11Qt8wdlyhJlBIroKRBcvtLTIxNHE6R6ijQmx3lcp8tuakh/qSPJNeX1mtKpzv7o/8eeR0sPWUFL48Wn0=
+ - secure: jnbAqLkgUAacsns/vThwsT3BLXc44zkNS79worps5tvchFxOQnK0meVrSU8jtEdG/M+VnaZNKZMukkTKbKwGeFT46wfkbb2gQ+07m7f7rUahc4lleUOL4/Xt/rZBwPsv5pq817rzz4f9EBqjQ2TkdjD0zkcBAxIbujLCyJQg2IavzCqXzTjdfVC9M4/LzQt4Hm5mQwfNGZ7vWzgkRHxHNb3vkQgPXWDinluiC1Hoh6Q+dWoHf22GmwJKtvSD1trn+rjvJv6SO4ZuoY7jUnZzXXrqiy1031X18IwIlonkz6XdGZDyLppTdaUrnoOjBhvMUhnMnWwassTz1/UsvQOe9WLAeHCdKKjfnwuAsql4G+D0WKzp0s0O52gNSo6fajkCPshnWzrTFn7VZJtRWgvuyb2qmvUyFp9KtjucobLks/fcMA9UtPBq8AZ9SARpbbwrT+4LJtrgovXwJVQwwTyS5wSFl70GjymsLZM/LqCu6Rnpytw3j9OJTSSXcO89HStm7jCGPuiMBsKmA7fNhx5y/O85SrjJoE4hjz+lIwsHWju9C5AH08+kMXs6uuf9cahoiD+mo1yVOxAjDTtvMYucbO35rGn79Jhn35E6YqdyKchqiU+S7LRcGSN/mn+XtvnCSHNqindrY0c3q/+mmFXYAxPqi3k4Rm8Vys1Tr1yb7PU=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_random_exchange/CODE_OF_CONDUCT.md b/deps/rabbitmq_random_exchange/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_random_exchange/CONTRIBUTING.md b/deps/rabbitmq_random_exchange/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_random_exchange/LICENSE b/deps/rabbitmq_random_exchange/LICENSE
new file mode 100644
index 0000000000..41d3b1a29d
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/LICENSE
@@ -0,0 +1,12 @@
+This package, the RabbitMQ Random Exchange, is dual-licensed under
+the Apache License v2 and the Mozilla Public License v2.0.
+
+For the Apache License, please see the file LICENSE-APACHE2.
+
+For the Mozilla Public License, please see the file LICENSE-MPL-RabbitMQ.
+
+For attribution of copyright and other details of provenance, please
+refer to the source code.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_random_exchange/LICENSE-APACHE2 b/deps/rabbitmq_random_exchange/LICENSE-APACHE2
new file mode 100644
index 0000000000..5e69524394
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/LICENSE-APACHE2
@@ -0,0 +1,204 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2007-2013 VMware, Inc.
+ Copyright 2011 Jon Brisbin.
+ Copyright 2013-2020 VMware, Inc. or its affiliates.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/deps/rabbitmq_random_exchange/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_random_exchange/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_random_exchange/Makefile b/deps/rabbitmq_random_exchange/Makefile
new file mode 100644
index 0000000000..3502c5656a
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/Makefile
@@ -0,0 +1,17 @@
+PROJECT = rabbitmq_random_exchange
+PROJECT_DESCRIPTION = RabbitMQ Random Exchange
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_random_exchange/README.md b/deps/rabbitmq_random_exchange/README.md
new file mode 100644
index 0000000000..3f6f2662de
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/README.md
@@ -0,0 +1,89 @@
+# RabbitMQ Random Exchange Type
+
+This exchange type is for load-balancing among consumers. It's basically
+a direct exchange, with the exception that, instead of each consumer bound
+to that exchange with the same routing key getting a copy of the message,
+the exchange type randomly selects a queue to route to.
+
+There is no weighting or anything, so maybe load "balancing" might be a bit
+of a misnomer. It uses Erlang's crypto:rand_uniform/2 function, if you're
+interested.
+
+## Installation
+
+Install the corresponding .ez files from our
+[GitHub releases](https://github.com/rabbitmq/rabbitmq-random-exchange/releases) or [Community Plugins page](https://www.rabbitmq.com/community-plugins.html).
+
+Then run the following command:
+
+```bash
+rabbitmq-plugins enable rabbitmq_random_exchange
+```
+
+## Building from Source
+
+Please see [RabbitMQ Plugin Development guide](https://www.rabbitmq.com/plugin-development.html).
+
+To build the plugin:
+
+ git clone git://github.com/rabbitmq/rabbitmq-random-exchange.git
+ cd rabbitmq-random-exchange
+ make
+
+Then copy all the `*.ez` files inside the `plugins` folder to the [RabbitMQ plugins directory](https://www.rabbitmq.com/relocate.html)
+and enable the plugin:
+
+ [sudo] rabbitmq-plugins enable rabbitmq_random_exchange
+
+
+## Usage
+
+To create a _random_, just declare an exchange providing the type `"x-random"`.
+
+```java
+channel.exchangeDeclare("logs", "x-random");
+```
+
+and bind several queues to it. Routing keys will be ignored by modern releases
+of this exchange plugin: the binding and its target queue (exchange) are picked
+entirely randomly.
+
+### Extended Example
+
+This example uses [Bunny](http://rubybunny.info) and demonstrates
+how the plugin is mean to be used with 3 queues:
+
+``` ruby
+#!/usr/bin/env ruby
+
+require 'bundler'
+Bundler.setup(:default)
+require 'bunny'
+
+c = Bunny.new; c.start
+ch = c.create_channel
+
+rx = ch.exchange("x.rnd", durable: true, type: "x-random")
+
+q1 = ch.queue("r1").bind(rx)
+q2 = ch.queue("r2").bind(rx)
+q3 = ch.queue("r3").bind(rx)
+
+ch.confirm_select
+
+100.times do
+ rx.publish(rand.to_s, routing_key: rand.to_s)
+end
+
+ch.wait_for_confirms
+
+# the consumer part is left out: see
+# management UI
+
+puts "Done"
+```
+
+
+## License
+
+See [LICENSE](./LICENSE).
diff --git a/deps/rabbitmq_random_exchange/erlang.mk b/deps/rabbitmq_random_exchange/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_random_exchange/include/.gitignore b/deps/rabbitmq_random_exchange/include/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/include/.gitignore
diff --git a/deps/rabbitmq_random_exchange/rabbitmq-components.mk b/deps/rabbitmq_random_exchange/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl b/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl
new file mode 100644
index 0000000000..0c30664cd4
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/src/rabbit_exchange_type_random.erl
@@ -0,0 +1,60 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type_random).
+-behaviour(rabbit_exchange_type).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-rabbit_boot_step({?MODULE, [
+ {description, "exchange type random"},
+ {mfa, {rabbit_registry, register, [exchange, <<"x-random">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}
+]}).
+
+-export([
+ add_binding/3,
+ assert_args_equivalence/2,
+ create/2,
+ delete/3,
+ policy_changed/2,
+ description/0,
+ recover/2,
+ remove_bindings/3,
+ validate_binding/2,
+ route/2,
+ serialise_events/0,
+ validate/1,
+ info/1,
+ info/2
+]).
+
+description() ->
+ [{name, <<"x-random">>}, {description, <<"Randomly picks a binding (queue) to route via (to).">>}].
+
+route(_X=#exchange{name = Name}, _Delivery) ->
+ Matches = rabbit_router:match_routing_key(Name, ['_']),
+ case length(Matches) of
+ Len when Len < 2 -> Matches;
+ Len ->
+ Rand = rand:uniform(Len),
+ [lists:nth(Rand, Matches)]
+ end.
+
+info(_X) -> [].
+info(_X, _) -> [].
+serialise_events() -> false.
+validate(_X) -> ok.
+create(_Tx, _X) -> ok.
+recover(_X, _Bs) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+validate_binding(_X, _B) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/deps/rabbitmq_random_exchange/test/.gitkeep b/deps/rabbitmq_random_exchange/test/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_random_exchange/test/.gitkeep
diff --git a/deps/rabbitmq_recent_history_exchange/.gitignore b/deps/rabbitmq_recent_history_exchange/.gitignore
new file mode 100644
index 0000000000..ce64405dd2
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/.gitignore
@@ -0,0 +1,19 @@
+.DS_Store
+erl_crash.dump
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_recent_history_exchange.d
diff --git a/deps/rabbitmq_recent_history_exchange/.travis.yml b/deps/rabbitmq_recent_history_exchange/.travis.yml
new file mode 100644
index 0000000000..0525f47962
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: c1blwwWvP1F9FYSbQUby5u5wFaiYo9yj6Zb3v0LQKVW2/VSybNavYg0MJTDKDe7a4Uz6egD9Vj1C0T7R9hFNSbmuqyTF1bn38OPmKs3judwU+Jmh/3Yf3cY4LoHQ84FI0yt7fhn5pe1n+tfhGDY5u/NX5l7Ygt5P/Q64vhBYmRk=
+ - secure: TMnfbPncejldrsxNFeIUWtIMdZz2kYSYgwCLjHUIeLeV4rUbioEM/C2E1BFLdmW2ep1jSXZ8UaWVXg5zKI99USSDibxaV4Kgy8YEIiVqPBEj+rzGTHQcsHYH+6XmjJElrAyJqxlTmfwFZ0skGkxCgB6Dbfvl5z5g0o0c9G1vlJI=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md b/deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md b/deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_recent_history_exchange/LICENSE b/deps/rabbitmq_recent_history_exchange/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_recent_history_exchange/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_recent_history_exchange/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_recent_history_exchange/Makefile b/deps/rabbitmq_recent_history_exchange/Makefile
new file mode 100644
index 0000000000..98cafb7442
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_recent_history_exchange
+PROJECT_DESCRIPTION = RabbitMQ Recent History Exchange
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_recent_history_exchange/README.md b/deps/rabbitmq_recent_history_exchange/README.md
new file mode 100644
index 0000000000..c22391ad7a
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/README.md
@@ -0,0 +1,83 @@
+# RabbitMQ Recent History Cache
+
+Keeps track of the last 20 messages that passed through the exchange. Every time a queue is bound to the exchange it delivers that last 20 messages to them. This is useful for implementing a very simple __Chat History__ where clients that join the conversation can get the latest messages.
+
+Exchange Type: `x-recent-history`
+
+## Installation ##
+
+### RabbitMQ 3.6.0 or later
+
+As of RabbitMQ `3.6.0` this plugin is included into the RabbitMQ distribution.
+
+Enable it with the following command:
+
+```bash
+rabbitmq-plugins enable rabbitmq_recent_history_exchange
+```
+
+### With Earlier Versions
+
+Install the corresponding .ez files from our
+[Community Plugins archive](https://www.rabbitmq.com/community-plugins/)..
+
+Then run the following command:
+
+```bash
+rabbitmq-plugins enable rabbitmq_recent_history_exchange
+```
+
+## Building from Source
+
+Please see [RabbitMQ Plugin Development guide](https://www.rabbitmq.com/plugin-development.html).
+
+To build the plugin:
+
+ git clone git://github.com/rabbitmq/rabbitmq-recent-history-exchange.git
+ cd rabbitmq-recent-history-exchange
+ make
+
+Then copy all the `*.ez` files inside the `plugins` folder to the [RabbitMQ plugins directory](https://www.rabbitmq.com/relocate.html)
+and enable the plugin:
+
+ [sudo] rabbitmq-plugins enable rabbitmq_recent_history_exchange
+
+## Usage ##
+
+### Creating an exchange ###
+
+To create a _recent history exchange_, just declare an exchange providing the type `"x-recent-history"`.
+
+```java
+channel.exchangeDeclare("logs", "x-recent-history");
+```
+
+### Providing a custom history length ###
+
+Typically this exchange will store the latest 20 messages sent over
+the exchange. If you want to set a different cache length, then you
+can pass a `"x-recent-history-length"` argument to `exchange.declare`.
+The argument must be an integer greater or equal to zero.
+
+For example in Java:
+
+```java
+Map<String, Object> args = new HashMap<String, Object>();
+args.put("x-recent-history-length", 60);
+channel.exchangeDeclare("rh", "x-recent-history", false, false, args);
+```
+
+### Preventing some messages from being stored ###
+
+In case you would like to not store certain messages, just
+add the header `"x-recent-history-no-store"` with the value `true` to
+the message.
+
+## Disabling the Plugin ##
+
+A future version of RabbitMQ will allow users to disable plugins. When
+you disable this plugin, it will delete all the cached messages.
+
+## License
+
+See LICENSE.
diff --git a/deps/rabbitmq_recent_history_exchange/erlang.mk b/deps/rabbitmq_recent_history_exchange/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_recent_history_exchange/etc/rabbit-hare.config b/deps/rabbitmq_recent_history_exchange/etc/rabbit-hare.config
new file mode 100644
index 0000000000..3574dfbc86
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/etc/rabbit-hare.config
@@ -0,0 +1,3 @@
+%% -*- erlang -*-
+%% Note - we still need this for rabbit_exchange_type_recent_history_test_util:plugin_dir/0 to work...
+[].
diff --git a/deps/rabbitmq_recent_history_exchange/etc/rabbit-test.config b/deps/rabbitmq_recent_history_exchange/etc/rabbit-test.config
new file mode 100644
index 0000000000..3574dfbc86
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/etc/rabbit-test.config
@@ -0,0 +1,3 @@
+%% -*- erlang -*-
+%% Note - we still need this for rabbit_exchange_type_recent_history_test_util:plugin_dir/0 to work...
+[].
diff --git a/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl b/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl
new file mode 100644
index 0000000000..f143955733
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/include/rabbit_recent_history.hrl
@@ -0,0 +1,10 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(KEEP_NB, 20).
+-define(RH_TABLE, rh_exchange_table).
+-record(cached, {key, content}).
diff --git a/deps/rabbitmq_recent_history_exchange/rabbitmq-components.mk b/deps/rabbitmq_recent_history_exchange/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl b/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl
new file mode 100644
index 0000000000..7758f72184
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/src/rabbit_exchange_type_recent_history.erl
@@ -0,0 +1,206 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(rabbit_exchange_type_recent_history).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include("rabbit_recent_history.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-import(rabbit_misc, [table_lookup/2]).
+
+-export([description/0, serialise_events/0, route/2]).
+-export([validate/1, validate_binding/2, create/2, delete/3, add_binding/3,
+ remove_bindings/3, assert_args_equivalence/2, policy_changed/2]).
+-export([setup_schema/0, disable_plugin/0]).
+-export([info/1, info/2]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "exchange type x-recent-history"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"x-recent-history">>, ?MODULE]}},
+ {cleanup, {?MODULE, disable_plugin, []}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-rabbit_boot_step({rabbit_exchange_type_recent_history_mnesia,
+ [{description, "recent history exchange type: mnesia"},
+ {mfa, {?MODULE, setup_schema, []}},
+ {requires, database},
+ {enables, external_infrastructure}]}).
+
+-define(INTEGER_ARG_TYPES, [byte, short, signedint, long]).
+
+info(_X) -> [].
+info(_X, _) -> [].
+
+description() ->
+ [{name, <<"x-recent-history">>},
+ {description, <<"Recent History Exchange.">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = XName,
+ arguments = Args},
+ #delivery{message = Message}) ->
+ Length = table_lookup(Args, <<"x-recent-history-length">>),
+ maybe_cache_msg(XName, Message, Length),
+ rabbit_router:match_routing_key(XName, ['_']).
+
+validate(#exchange{arguments = Args}) ->
+ case table_lookup(Args, <<"x-recent-history-length">>) of
+ undefined ->
+ ok;
+ {Type, Val} ->
+ case check_int_arg(Type) of
+ ok when Val > 0 ->
+ ok;
+ _ ->
+ rabbit_misc:protocol_error(precondition_failed,
+ "Invalid argument ~p, "
+ "'x-recent-history-length' "
+ "must be a positive integer",
+ [Val])
+ end
+ end.
+
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+policy_changed(_X1, _X2) -> ok.
+
+delete(transaction, #exchange{ name = XName }, _Bs) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ mnesia:delete(?RH_TABLE, XName, write)
+ end),
+ ok;
+delete(none, _Exchange, _Bs) ->
+ ok.
+
+add_binding(transaction, #exchange{ name = XName },
+ #binding{ destination = #resource{kind = queue} = QName }) ->
+ case rabbit_amqqueue:lookup(QName) of
+ {error, not_found} ->
+ destination_not_found_error(QName);
+ {ok, Q} ->
+ Msgs = get_msgs_from_cache(XName),
+ deliver_messages([Q], Msgs)
+ end,
+ ok;
+add_binding(transaction, #exchange{ name = XName },
+ #binding{ destination = #resource{kind = exchange} = DestName }) ->
+ case rabbit_exchange:lookup(DestName) of
+ {error, not_found} ->
+ destination_not_found_error(DestName);
+ {ok, X} ->
+ Msgs = get_msgs_from_cache(XName),
+ [begin
+ Delivery = rabbit_basic:delivery(false, false, Msg, undefined),
+ Qs = rabbit_exchange:route(X, Delivery),
+ case rabbit_amqqueue:lookup(Qs) of
+ [] ->
+ destination_not_found_error(Qs);
+ QPids ->
+ deliver_messages(QPids, [Msg])
+ end
+ end || Msg <- Msgs]
+ end,
+ ok;
+add_binding(none, _Exchange, _Binding) ->
+ ok.
+
+remove_bindings(_Tx, _X, _Bs) -> ok.
+
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+%%----------------------------------------------------------------------------
+
+setup_schema() ->
+ mnesia:create_table(?RH_TABLE,
+ [{attributes, record_info(fields, cached)},
+ {record_name, cached},
+ {type, set}]),
+ mnesia:add_table_copy(?RH_TABLE, node(), ram_copies),
+ mnesia:wait_for_tables([?RH_TABLE], 30000),
+ ok.
+
+disable_plugin() ->
+ rabbit_registry:unregister(exchange, <<"x-recent-history">>),
+ mnesia:delete_table(?RH_TABLE),
+ ok.
+
+%%----------------------------------------------------------------------------
+%%private
+maybe_cache_msg(XName,
+ #basic_message{content =
+ #content{properties =
+ #'P_basic'{headers = Headers}}}
+ = Message,
+ Length) ->
+ case Headers of
+ undefined ->
+ cache_msg(XName, Message, Length);
+ _ ->
+ Store = table_lookup(Headers, <<"x-recent-history-no-store">>),
+ case Store of
+ {bool, true} ->
+ ok;
+ _ ->
+ cache_msg(XName, Message, Length)
+ end
+ end.
+
+cache_msg(XName, Message, Length) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ Cached = get_msgs_from_cache(XName),
+ store_msg(XName, Cached, Message, Length)
+ end).
+
+get_msgs_from_cache(XName) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () ->
+ case mnesia:read(?RH_TABLE, XName) of
+ [] ->
+ [];
+ [#cached{key = XName, content=Cached}] ->
+ Cached
+ end
+ end).
+
+store_msg(Key, Cached, Message, undefined) ->
+ store_msg0(Key, Cached, Message, ?KEEP_NB);
+store_msg(Key, Cached, Message, {_Type, Length}) ->
+ store_msg0(Key, Cached, Message, Length).
+
+store_msg0(Key, Cached, Message, Length) ->
+ mnesia:write(?RH_TABLE,
+ #cached{key = Key,
+ content = [Message|lists:sublist(Cached, Length-1)]},
+ write).
+
+deliver_messages(Qs, Msgs) ->
+ lists:map(
+ fun (Msg) ->
+ Delivery = rabbit_basic:delivery(false, false, Msg, undefined),
+ rabbit_amqqueue:deliver(Qs, Delivery)
+ end, lists:reverse(Msgs)).
+
+destination_not_found_error(DestName) ->
+ rabbit_misc:protocol_error(
+ internal_error,
+ "could not find queue/exchange '~s'",
+ [DestName]).
+
+%% adapted from rabbit_amqqueue.erl
+check_int_arg(Type) ->
+ case lists:member(Type, ?INTEGER_ARG_TYPES) of
+ true -> ok;
+ false -> {error, {unacceptable_type, Type}}
+ end.
diff --git a/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl b/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl
new file mode 100644
index 0000000000..f14b4e549f
--- /dev/null
+++ b/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl
@@ -0,0 +1,280 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module(system_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_recent_history.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ default_length_test,
+ length_argument_test,
+ wrong_argument_type_test,
+ no_store_test,
+ e2e_test,
+ multinode_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ inets:start(),
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+ rabbit_ct_helpers:set_config(Config, {test_resource_name,
+ re:replace(TestCaseName, "/", "-", [global, {return, list}])}).
+
+end_per_testcase(_Testcase, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+default_length_test(Config) ->
+ Qs = qs(),
+ test0(Config, fun () ->
+ #'basic.publish'{exchange = make_exchange_name(Config, "0")}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [], Qs, 100, length(Qs) * ?KEEP_NB).
+
+length_argument_test(Config) ->
+ Qs = qs(),
+ test0(Config, fun () ->
+ #'basic.publish'{exchange = make_exchange_name(Config, "0")}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [{<<"x-recent-history-length">>, long, 30}], Qs, 100, length(Qs) * 30).
+
+wrong_argument_type_test(Config) ->
+ wrong_argument_type_test0(Config, -30),
+ wrong_argument_type_test0(Config, 0).
+
+
+no_store_test(Config) ->
+ Qs = qs(),
+ test0(Config, fun () ->
+ #'basic.publish'{exchange = make_exchange_name(Config, "0")}
+ end,
+ fun() ->
+ H = [{<<"x-recent-history-no-store">>, bool, true}],
+ #amqp_msg{props = #'P_basic'{headers = H}, payload = <<>>}
+ end, [], Qs, 100, 0).
+
+e2e_test(Config) ->
+ MsgCount = 10,
+
+ {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = make_exchange_name(Config, "1"),
+ type = <<"x-recent-history">>,
+ auto_delete = true
+ }),
+
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = make_exchange_name(Config, "2"),
+ type = <<"direct">>,
+ auto_delete = true
+ }),
+
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = <<"q">>
+ }),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {
+ queue = Q,
+ exchange = make_exchange_name(Config, "2"),
+ routing_key = <<"">>
+ }),
+
+ #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}),
+ [amqp_channel:call(Chan,
+ #'basic.publish'{exchange = make_exchange_name(Config, "1")},
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}) ||
+ _ <- lists:duplicate(MsgCount, const)],
+ amqp_channel:call(Chan, #'tx.commit'{}),
+
+ amqp_channel:call(Chan,
+ #'exchange.bind' {
+ source = make_exchange_name(Config, "1"),
+ destination = make_exchange_name(Config, "2"),
+ routing_key = <<"">>
+ }),
+
+ #'queue.declare_ok'{message_count = Count, queue = Q} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ passive = true,
+ queue = Q
+ }),
+
+ ?assertEqual(MsgCount, Count),
+
+ amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "1") }),
+ amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "2") }),
+ amqp_channel:call(Chan, #'queue.delete' { queue = Q }),
+
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+ ok.
+
+multinode_test(Config) ->
+ {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 1),
+
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = make_exchange_name(Config, "1"),
+ type = <<"x-recent-history">>,
+ auto_delete = false
+ }),
+
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = <<"q">>
+ }),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' {
+ queue = Q,
+ exchange = make_exchange_name(Config, "1"),
+ routing_key = <<"">>
+ }),
+
+ amqp_channel:call(Chan, #'queue.delete' { queue = Q }),
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+
+ rabbit_ct_broker_helpers:restart_broker(Config, 1),
+
+ {Conn2, Chan2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+
+ #'queue.declare_ok'{queue = Q2} =
+ amqp_channel:call(Chan2, #'queue.declare' {
+ queue = <<"q2">>
+ }),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan2, #'queue.bind' {
+ queue = Q2,
+ exchange = make_exchange_name(Config, "1"),
+ routing_key = <<"">>
+ }),
+
+ amqp_channel:call(Chan2, #'exchange.delete' { exchange = make_exchange_name(Config, "2") }),
+ amqp_channel:call(Chan2, #'queue.delete' { queue = Q2 }),
+
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn2, Chan2),
+ ok.
+
+test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues, MsgCount, ExpectedCount) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config),
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = make_exchange_name(Config, "0"),
+ type = <<"x-recent-history">>,
+ auto_delete = true,
+ arguments = DeclareArgs
+ }),
+
+ #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}),
+ [amqp_channel:call(Chan,
+ MakeMethod(),
+ MakeMsg()) || _ <- lists:duplicate(MsgCount, const)],
+ amqp_channel:call(Chan, #'tx.commit'{}),
+
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q,
+ exchange = make_exchange_name(Config, "0"),
+ routing_key = <<"">>})
+ || Q <- Queues],
+
+ %% Wait a few seconds for all messages to be queued.
+ timer:sleep(3000),
+
+ Counts =
+ [begin
+ #'queue.declare_ok'{message_count = M} =
+ amqp_channel:call(Chan, #'queue.declare' {queue = Q,
+ exclusive = true }),
+ M
+ end || Q <- Queues],
+
+ ?assertEqual(ExpectedCount, lists:sum(Counts)),
+
+ amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "0") }),
+ [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
+ rabbit_ct_client_helpers:close_channel(Chan),
+
+ ok.
+
+wrong_argument_type_test0(Config, Length) ->
+ Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+ Chan = amqp_connection:open_channel(Conn),
+ DeclareArgs = [{<<"x-recent-history-length">>, long, Length}],
+ process_flag(trap_exit, true),
+ ?assertExit(_, amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = make_exchange_name(Config, "0"),
+ type = <<"x-recent-history">>,
+ auto_delete = true,
+ arguments = DeclareArgs
+ })),
+ amqp_connection:close(Conn),
+ ok.
+
+qs() ->
+ [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>].
+
+make_exchange_name(Config, Suffix) ->
+ B = rabbit_ct_helpers:get_config(Config, test_resource_name),
+ erlang:list_to_binary("x-" ++ B ++ "-" ++ Suffix).
diff --git a/deps/rabbitmq_sharding/.gitignore b/deps/rabbitmq_sharding/.gitignore
new file mode 100644
index 0000000000..855de11b42
--- /dev/null
+++ b/deps/rabbitmq_sharding/.gitignore
@@ -0,0 +1,18 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+/rabbitmq_sharding.d
diff --git a/deps/rabbitmq_sharding/.hgignore b/deps/rabbitmq_sharding/.hgignore
new file mode 100644
index 0000000000..eb32f05f13
--- /dev/null
+++ b/deps/rabbitmq_sharding/.hgignore
@@ -0,0 +1,7 @@
+~$
+\.beam$
+^build/
+^dist/
+^cover/
+^erl_crash.dump$
+^ebin/rabbitmq_sharding\.app$
diff --git a/deps/rabbitmq_sharding/.travis.yml b/deps/rabbitmq_sharding/.travis.yml
new file mode 100644
index 0000000000..0420390dba
--- /dev/null
+++ b/deps/rabbitmq_sharding/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: uA/bYdGfaL4VabgEv2qXHvr+7Uel70wACE7OoJrVTk8s6w543xWKNGgCt4dsOdwVwuo/rbDNXkZ1xMN86HYx7EJS6IxX6C5xG4BLEI2rxAsryxdrALSzjsocbxN71CwZsHO/lpwDKe/wOci0CxhLbFsc0NJEEOTEGONwF+IziKs=
+ - secure: LX/VG+uGA02GpKCB3m+uqGrr7EKVAYBBxi/K3IPYY3EbJIzKRe+zlSZO46MOjgD9NTLqFPR8oM7toGFOmarWT1YJI/pDutErJlwInTxJ4R1vij/E2z1Dl8CSzKlbK/O7qjIB+MObdz+auPVczrW7oaHHKPRPNV/ZZ0n70TuIqAY=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_sharding/CODE_OF_CONDUCT.md b/deps/rabbitmq_sharding/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_sharding/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_sharding/CONTRIBUTING.md b/deps/rabbitmq_sharding/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_sharding/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_sharding/LICENSE b/deps/rabbitmq_sharding/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_sharding/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_sharding/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_sharding/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_sharding/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_sharding/LICENSE-MPL2 b/deps/rabbitmq_sharding/LICENSE-MPL2
new file mode 100644
index 0000000000..ee6256cdb6
--- /dev/null
+++ b/deps/rabbitmq_sharding/LICENSE-MPL2
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_sharding/Makefile b/deps/rabbitmq_sharding/Makefile
new file mode 100644
index 0000000000..5d77580517
--- /dev/null
+++ b/deps/rabbitmq_sharding/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_sharding
+PROJECT_DESCRIPTION = RabbitMQ Sharding Plugin
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_sharding/README.extra.md b/deps/rabbitmq_sharding/README.extra.md
new file mode 100644
index 0000000000..1c657a9da5
--- /dev/null
+++ b/deps/rabbitmq_sharding/README.extra.md
@@ -0,0 +1,79 @@
+# Additional information #
+
+Here you can find some extra information about how the plugin works
+and the reasons for it.
+
+## Why do we need this plugin? ##
+
+RabbitMQ queues are bound to the node where they were first
+declared. This means that even if you create a cluster of RabbitMQ
+brokers, at some point all message traffic will go to the node where
+the queue lives. What this plugin does is to give you a centralized
+place where to send your messages, plus __load balancing__ across many
+nodes, by adding queues to the other nodes in the cluster.
+
+The advantage of this setup is that the queues from where your
+consumers will get messages will be local to the node where they are
+connected. On the other hand, the producers don't need to care about
+what's behind the exchange.
+
+All the plumbing to __automatically maintain__ the shard queues is
+done by the plugin. If you add more nodes to the cluster, then the
+plugin will __automatically create queues in those nodes__.
+
+If you remove nodes from the cluster then RabbitMQ will take care of
+taking them out of the list of bound queues. Message loss can happen
+in the case where a race occurs from a node going away and your
+message arriving to the shard exchange. If you can't afford to lose a
+message then you can use
+[publisher confirms](https://www.rabbitmq.com/confirms.html) to prevent
+message loss.
+
+## Message Ordering ##
+
+Message order is maintained per sharded queue, but not globally. This
+means that once a message entered a queue, then for that queue and the
+set of consumers attached to the queue, ordering will be preserved.
+
+If you need global ordering then stick with
+[mirrored queues](https://www.rabbitmq.com/ha.html).
+
+## What strategy is used for picking the queue name ##
+
+When you issue a `basic.consume`, the plugin will choose the queue
+with the _least amount of consumers_. The queue will be local to the
+broker your client is connected to. Of course the local sharded queue
+will be part of the set of queues that belong to the chosen shard.
+
+## Intercepted Channel Behaviour ##
+
+This plugin works with the new `channel interceptors`. An interceptor
+basically allows a plugin to modify parts of an AMQP method. For
+example in this plugin case, whenever a user sends a `basic.consume`,
+the plugin will map the queue name sent by the user to one of the
+sharded queues.
+
+Also a plugin can decide that a certain AMQP method can't be performed
+on a queue that's managed by the plugin. In this case declaring a queue
+called `my_shard` doesn't make much sense when there's actually a
+sharded queue by that name. In this case the plugin will return a
+channel error to the user.
+
+These are the AMQP methods intercepted by the plugin, and the
+respective behaviour:
+
+- `'basic.consume', QueueName`: The plugin will pick the sharded queue
+ with the least amount of consumers from the `QueueName` shard.
+- `'basic.get', QueueName`: The plugin will pick the sharded queue
+ with the least amount of consumers from the `QueueName` shard.
+- `'queue.declare', QueueName`: The plugin rewrites `QueueName` to be
+ the first queue in the shard, so `queue.declare_ok` returns the stats
+ for that queue.
+- `'queue.bind', QueueName`: since there isn't an actual `QueueName`
+ queue, this method returns a channel error.
+- `'queue.unbind', QueueName`: since there isn't an actual `QueueName`
+ queue, this method returns a channel error.
+- `'queue.purge', QueueName`: since there isn't an actual `QueueName`
+ queue, this method returns a channel error.
+- `'queue.delete', QueueName`: since there isn't an actual `QueueName`
+ queue, this method returns a channel error.
diff --git a/deps/rabbitmq_sharding/README.md b/deps/rabbitmq_sharding/README.md
new file mode 100644
index 0000000000..2be6b44231
--- /dev/null
+++ b/deps/rabbitmq_sharding/README.md
@@ -0,0 +1,220 @@
+# RabbitMQ Sharding Plugin
+
+This plugin introduces the concept of sharded queues for
+RabbitMQ. Sharding is performed by exchanges, that is, messages
+will be partitioned across "shard" queues by one exchange that we should
+define as sharded. The machinery used behind the scenes implies
+defining an exchange that will partition, or shard messages across
+queues. The partitioning will be done automatically for you, i.e: once
+you define an exchange as _sharded_, then the supporting queues will
+be automatically created on every cluster node and messages will be sharded across them.
+
+## Project Maturity
+
+This plugin is reasonably mature and known to have production users.
+
+## Overview
+
+The following graphic depicts how the plugin works from the standpoint
+of a publisher and a consumer:
+
+![Sharding Overview](https://raw.githubusercontent.com/rabbitmq/rabbitmq-sharding/master/docs/sharded_queues.png)
+
+On the picture above the producers publishes a series of
+messages, those messages get partitioned to different queues, and then
+our consumer get messages from one of those queues. Therefore if there is
+a partition with 3 queues, it is assumed that there are at least 3
+consumers to get all the messages from those queues.
+
+Queues in RabbitMQ are [units of concurrency](https://www.rabbitmq.com/queues.html#runtime-characteristics)
+(and, if there are enough cores available, parallelism). This plugin makes
+it possible to have a single logical queue that is partitioned into
+multiple regular queues ("shards"). This trades off total ordering
+on the logical queue for gains in parallelism.
+
+Message distribution between shards (partitioning) is achieved
+with a custom exchange type that distributes messages by applying
+a hashing function to the routing key.
+
+
+## Messages Distribution Between Shards (Partitioning)
+
+The exchanges that ship by default with RabbitMQ work in an "all or
+nothing" fashion, i.e: if a routing key matches a set of queues bound
+to the exchange, then RabbitMQ will route the message to all the
+queues in that set. For this plugin to work it is necessary to
+route messages to an exchange that would partition messages, so they
+are routed to _at most_ one queue (a subset).
+
+The plugin provides a new exchange type, `"x-modulus-hash"`, that will use
+a hashing function to partition messages routed to a logical queue
+across a number of regular queues (shards).
+
+The `"x-modulus-hash"` exchange will hash the routing key used to
+publish the message and then it will apply a `Hash mod N` to pick the
+queue where to route the message, where N is the number of queues
+bound to the exchange. **This exchange will completely ignore the
+binding key used to bind the queue to the exchange**.
+
+There are other exchanges with similar behaviour:
+the _Consistent Hash Exchange_ or the _Random Exchange_.
+Those were designed with regular queues in mind, not this plugin, so `"x-modulus-hash"`
+is highly recommended.
+
+If message partitioning is the only feature necessary and the automatic scaling
+of the number of shards (covered below) is not needed or desired, consider using
+[Consistent Hash Exchange](https://github.com/rabbitmq/rabbitmq-consistent-hash-exchange)
+instead of this plugin.
+
+
+## Auto-scaling
+
+One of the main properties of this plugin is that when a new node
+is added to the RabbitMQ cluster, then the plugin will automatically create
+more shards on the new node. Say there is a shard with 4 queues on
+`node a` and `node b` just joined the cluster. The plugin will
+automatically create 4 queues on `node b` and "join" them to the shard
+partition. Already delivered messages _will not_ be rebalanced but
+newly arriving messages will be partitioned to the new queues.
+
+
+## Consuming From a Sharded [Pseudo-]Queue ##
+
+While the plugin creates a bunch of "shard" queues behind the scenes, the idea
+is that those queues act like a big logical queue where you consume
+messages from it. Total ordering of messages between shards is not defined.
+
+An example should illustrate this better: let's say you declared the
+exchange _images_ to be a sharded exchange. Then RabbitMQ creates
+several "shard" queues behind the scenes:
+
+ * _shard: - nodename images 1_
+ * _shard: - nodename images 2_
+ * _shard: - nodename images 3_
+ * _shard: - nodename images 4_.
+
+To consume from a sharded queue, register a consumer on the `"images"` pseudo-queue
+using the `basic.consume` method. RabbitMQ will attach the consumer to a shard
+behind the scenes. Note that **consumers must not declare a queue with the same
+name as the sharded pseudo-queue prior to consuming**.
+
+TL;DR: if you have a shard called _images_, then you can directly
+consume from a queue called _images_.
+
+How does it work? The plugin will chose the queue from the shard with
+the _least amount of consumers_, provided the queue contents are local
+to the broker you are connected to.
+
+**NOTE: there's a small race condition between RabbitMQ updating the
+queue's internal stats about consumers and when clients issue
+`basic.consume` commands.** The problem with this is that if your
+client issue many `basic.consume` commands without too much time in
+between, it might happen that the plugin assigns the consumers to
+queues in an uneven way.
+
+
+## Load Distribution and Consumer Balancing
+
+As of RabbitMQ 3.8.1, the plugin is no longer affected by the queue master locator policy when using mirrored queues. Please read below if you use a previous version.
+
+This plugin can be affected by [queue master locator policy used](https://www.rabbitmq.com/ha.html) in
+the cluster as well as client connection load balancing strategy.
+
+"Minimum masters" is a queue master locator that is most in line with the goals of
+this plugin.
+
+For load balancers, the "least connections" strategy is more likely to produce an even distribution compared
+to round robin and other strategies.
+
+### How Evenly Will Messages Be Distributed?
+
+As with many data distribution approaches based on a hashing function,
+even distribution between shards depends on the distribution (variability) of inputs,
+that is, routing keys. In other words the larger the set of routing keys is,
+the more even will message distribution between shareds be. If all messages had
+the same routing key, they would all end up on the same shard.
+
+
+
+## Installing ##
+
+### RabbitMQ 3.6.0 or later
+
+As of RabbitMQ `3.6.0` this plugin is included into the RabbitMQ distribution.
+
+Like any other [RabbitMQ plugin](https://www.rabbitmq.com/plugins.html) it has to be enabled before it can be used:
+
+```bash
+rabbitmq-plugins enable rabbitmq_sharding
+```
+
+You'd probably want to also enable the Consistent Hash Exchange
+plugin, too.
+
+### With Earlier Versions
+
+Install the corresponding .ez files from our
+[Community Plugins archive](https://www.rabbitmq.com/community-plugins/).
+
+Then run the following command:
+
+```bash
+rabbitmq-plugins enable rabbitmq_sharding
+```
+
+You'd probably want to also enable the Consistent Hash Exchange
+plugin, too.
+
+## Usage ##
+
+Once the plugin is installed you can define an exchange as sharded by
+setting up a policy that matches the exchange name. For example if we
+have the exchange called `shard.images`, we could define the following
+policy to shard it:
+
+```bash
+$CTL set_policy images-shard "^shard.images$" '{"shards-per-node": 2, "routing-key": "1234"}'
+```
+
+This will create `2` sharded queues per node in the cluster, and will
+bind those queues using the `"1234"` routing key.
+
+### About the routing-key policy definition ###
+
+In the example above we use the routing key `1234` when defining the
+policy. This means that the underlying exchanges used for sharding
+will bind the sharded queues to the exchange using the `1234` routing
+key specified above. This means that for a direct exchange, _only
+messages that are published with the routing key `1234` will be routed
+to the sharded queues. If you decide to use a fanout exchange for
+sharding, then the `1234` routing key, while used during binding, will
+be ignored by the exchange. If you use the `"x-modulus-hash"`
+exchange, then the routing key will be ignored as well. So depending
+on the exchange you use, will be the effect the `routing-key` policy
+definition has while routing messages.
+
+The `routing-key` policy definition is optional.
+
+
+## Building from Source
+
+Get the RabbitMQ Public Umbrella ready as explained in the
+[RabbitMQ Plugin Development Guide](https://www.rabbitmq.com/plugin-development.html).
+
+Move to the umbrella folder an then run the following commands, to
+fetch dependencies:
+
+```bash
+make up
+cd deps/rabbitmq-sharding
+make dist
+```
+
+## LICENSE ##
+
+See the LICENSE file.
+
+## Extra information ##
+
+Some information about how the plugin affects message ordering and
+some other details can be found in the file README.extra.md
diff --git a/deps/rabbitmq_sharding/docs/sharded_queues.png b/deps/rabbitmq_sharding/docs/sharded_queues.png
new file mode 100644
index 0000000000..6ab26e660a
--- /dev/null
+++ b/deps/rabbitmq_sharding/docs/sharded_queues.png
Binary files differ
diff --git a/deps/rabbitmq_sharding/erlang.mk b/deps/rabbitmq_sharding/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_sharding/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_sharding/etc/rabbit-hare.config b/deps/rabbitmq_sharding/etc/rabbit-hare.config
new file mode 100644
index 0000000000..3ec308d9af
--- /dev/null
+++ b/deps/rabbitmq_sharding/etc/rabbit-hare.config
@@ -0,0 +1,3 @@
+%% -*- erlang -*-
+%% Note - we still need this for rabbit_sharding_test_util:plugin_dir/0 to work...
+[].
diff --git a/deps/rabbitmq_sharding/etc/rabbit-test.config b/deps/rabbitmq_sharding/etc/rabbit-test.config
new file mode 100644
index 0000000000..3ec308d9af
--- /dev/null
+++ b/deps/rabbitmq_sharding/etc/rabbit-test.config
@@ -0,0 +1,3 @@
+%% -*- erlang -*-
+%% Note - we still need this for rabbit_sharding_test_util:plugin_dir/0 to work...
+[].
diff --git a/deps/rabbitmq_sharding/etc/rkey.sh b/deps/rabbitmq_sharding/etc/rkey.sh
new file mode 100755
index 0000000000..bc72ef19b3
--- /dev/null
+++ b/deps/rabbitmq_sharding/etc/rkey.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+CTL=$1
+
+curl -i -u guest:guest -H "content-type:application/json" \
+ -XPUT -d'{"type":"x-consistent-hash","durable":true}' \
+ http://localhost:15672/api/exchanges/%2f/rkey.ex
+
+$CTL set_policy rkey-shard "^rkey\." '{"shards-per-node": 2, "routing-key": "1234"}'
diff --git a/deps/rabbitmq_sharding/rabbitmq-components.mk b/deps/rabbitmq_sharding/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_sharding/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl
new file mode 100644
index 0000000000..e802570e55
--- /dev/null
+++ b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_decorator.erl
@@ -0,0 +1,89 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_exchange_decorator).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "sharding exchange decorator"},
+ {mfa, {rabbit_registry, register,
+ [exchange_decorator, <<"sharding">>, ?MODULE]}},
+ {cleanup, {rabbit_registry, unregister,
+ [exchange_decorator, <<"sharding">>]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_exchange_decorator).
+
+-export([description/0, serialise_events/1]).
+-export([create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, route/2, active_for/1]).
+
+-import(rabbit_sharding_util, [shard/1]).
+
+%%----------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Shard exchange decorator">>}].
+
+serialise_events(_X) -> false.
+
+create(transaction, _X) ->
+ ok;
+create(none, X) ->
+ maybe_start_sharding(X),
+ ok.
+
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+
+route(_, _) -> [].
+
+active_for(X) ->
+ case shard(X) of
+ true -> noroute;
+ false -> none
+ end.
+
+%% we have to remove the policy from ?SHARDING_TABLE
+delete(transaction, _X, _Bs) -> ok;
+delete(none, X, _Bs) ->
+ maybe_stop_sharding(X),
+ ok.
+
+%% we have to remove the old policy from ?SHARDING_TABLE
+%% and then add the new one.
+policy_changed(OldX, NewX) ->
+ maybe_update_sharding(OldX, NewX),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+maybe_update_sharding(OldX, NewX) ->
+ case shard(NewX) of
+ true ->
+ rabbit_sharding_shard:maybe_update_shards(OldX, NewX);
+ false ->
+ rabbit_sharding_shard:stop_sharding(OldX)
+ end.
+
+maybe_start_sharding(X)->
+ case shard(X) of
+ true ->
+ rabbit_sharding_shard:ensure_sharded_queues(X);
+ false ->
+ ok
+ end.
+
+maybe_stop_sharding(X) ->
+ case shard(X) of
+ true ->
+ rabbit_sharding_shard:stop_sharding(X);
+ false ->
+ ok
+ end.
diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl
new file mode 100644
index 0000000000..0509972757
--- /dev/null
+++ b/deps/rabbitmq_sharding/src/rabbit_sharding_exchange_type_modulus_hash.erl
@@ -0,0 +1,60 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_exchange_type_modulus_hash).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(rabbit_exchange_type).
+
+-export([description/0, serialise_events/0, route/2, info/1, info/2]).
+-export([validate/1, validate_binding/2,
+ create/2, delete/3, policy_changed/2,
+ add_binding/3, remove_bindings/3, assert_args_equivalence/2]).
+
+-rabbit_boot_step(
+ {rabbit_sharding_exchange_type_modulus_hash_registry,
+ [{description, "exchange type x-modulus-hash: registry"},
+ {mfa, {rabbit_registry, register,
+ [exchange, <<"x-modulus-hash">>, ?MODULE]}},
+ {cleanup, {rabbit_registry, unregister,
+ [exchange, <<"x-modulus-hash">>]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+-define(PHASH2_RANGE, 134217728). %% 2^27
+
+description() ->
+ [{description, <<"Modulus Hashing Exchange">>}].
+
+serialise_events() -> false.
+
+route(#exchange{name = Name},
+ #delivery{message = #basic_message{routing_keys = Routes}}) ->
+ Qs = rabbit_router:match_routing_key(Name, ['_']),
+ case length(Qs) of
+ 0 -> [];
+ N -> [lists:nth(hash_mod(Routes, N), Qs)]
+ end.
+
+info(_) -> [].
+
+info(_, _) -> [].
+
+validate(_X) -> ok.
+validate_binding(_X, _B) -> ok.
+create(_Tx, _X) -> ok.
+delete(_Tx, _X, _Bs) -> ok.
+policy_changed(_X1, _X2) -> ok.
+add_binding(_Tx, _X, _B) -> ok.
+remove_bindings(_Tx, _X, _Bs) -> ok.
+assert_args_equivalence(X, Args) ->
+ rabbit_exchange:assert_args_equivalence(X, Args).
+
+hash_mod(Routes, N) ->
+ M = erlang:phash2(Routes, ?PHASH2_RANGE) rem N,
+ M + 1. %% erlang lists are 1..N indexed.
diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl
new file mode 100644
index 0000000000..4acba78827
--- /dev/null
+++ b/deps/rabbitmq_sharding/src/rabbit_sharding_interceptor.erl
@@ -0,0 +1,170 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_interceptor).
+
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-behaviour(rabbit_channel_interceptor).
+
+-export([description/0, intercept/3, applies_to/0, init/1]).
+
+%% exported for tests
+-export([consumer_count/1]).
+
+-import(rabbit_sharding_util, [a2b/1, shards_per_node/1]).
+-import(rabbit_misc, [r/3, format/2, protocol_error/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "sharding interceptor"},
+ {mfa, {rabbit_registry, register,
+ [channel_interceptor,
+ <<"sharding interceptor">>, ?MODULE]}},
+ {cleanup, {rabbit_registry, unregister,
+ [channel_interceptor,
+ <<"sharding interceptor">>]}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+init(Ch) ->
+ rabbit_channel:get_vhost(Ch).
+
+description() ->
+ [{description, <<"Sharding interceptor for channel methods">>}].
+
+intercept(#'basic.consume'{queue = QName} = Method, Content, VHost) ->
+ case queue_name(VHost, QName) of
+ {ok, QName2} ->
+ {Method#'basic.consume'{queue = QName2}, Content};
+ {error, QName} ->
+ precondition_failed("Error finding sharded queue for: ~p", [QName])
+ end;
+
+intercept(#'basic.get'{queue = QName} = Method, Content, VHost) ->
+ case queue_name(VHost, QName) of
+ {ok, QName2} ->
+ {Method#'basic.get'{queue = QName2}, Content};
+ {error, QName} ->
+ precondition_failed("Error finding sharded queue for: ~p", [QName])
+ end;
+
+intercept(#'queue.delete'{queue = QName} = Method, Content, VHost) ->
+ case is_sharded(VHost, QName) of
+ true ->
+ precondition_failed("Can't delete sharded queue: ~p", [QName]);
+ _ ->
+ {Method, Content}
+ end;
+
+intercept(#'queue.declare'{queue = QName} = Method, Content, VHost) ->
+ case is_sharded(VHost, QName) of
+ true ->
+ %% Since as an interceptor we can't modify what the channel
+ %% will return, we then modify the queue name so the channel
+ %% can at least return a queue.declare_ok for that particular
+ %% queue. Picking the first queue over the others is totally
+ %% arbitrary.
+ QName2 = rabbit_sharding_util:make_queue_name(
+ QName, a2b(node()), 0),
+ {Method#'queue.declare'{queue = QName2}, Content};
+ _ ->
+ {Method, Content}
+ end;
+
+intercept(#'queue.bind'{queue = QName} = Method, Content, VHost) ->
+ case is_sharded(VHost, QName) of
+ true ->
+ precondition_failed("Can't bind sharded queue: ~p", [QName]);
+ _ ->
+ {Method, Content}
+ end;
+
+intercept(#'queue.unbind'{queue = QName} = Method, Content, VHost) ->
+ case is_sharded(VHost, QName) of
+ true ->
+ precondition_failed("Can't unbind sharded queue: ~p", [QName]);
+ _ ->
+ {Method, Content}
+ end;
+
+intercept(#'queue.purge'{queue = QName} = Method, Content, VHost) ->
+ case is_sharded(VHost, QName) of
+ true ->
+ precondition_failed("Can't purge sharded queue: ~p", [QName]);
+ _ ->
+ {Method, Content}
+ end;
+
+intercept(Method, Content, _VHost) ->
+ {Method, Content}.
+
+applies_to() ->
+ ['basic.consume', 'basic.get', 'queue.delete', 'queue.declare',
+ 'queue.bind', 'queue.unbind', 'queue.purge'].
+
+%%----------------------------------------------------------------------------
+
+%% If the queue is not part of a shard, return unmodified name
+queue_name(VHost, QBin) ->
+ case lookup_exchange(VHost, QBin) of
+ {ok, X} ->
+ case rabbit_sharding_util:shard(X) of
+ true ->
+ least_consumers(VHost, QBin, shards_per_node(X));
+ _ ->
+ {ok, QBin}
+ end;
+ _Error ->
+ {ok, QBin}
+ end.
+
+is_sharded(VHost, QBin) ->
+ case lookup_exchange(VHost, QBin) of
+ {ok, X} ->
+ rabbit_sharding_util:shard(X);
+ _Error ->
+ false
+ end.
+
+lookup_exchange(VHost, QBin) ->
+ rabbit_exchange:lookup(r(VHost, exchange, QBin)).
+
+least_consumers(VHost, QBin, N) ->
+ F = fun(QNum) ->
+ QBin2 = rabbit_sharding_util:make_queue_name(
+ QBin, a2b(node()), QNum),
+ case consumer_count(r(VHost, queue, QBin2)) of
+ {error, E} -> {error, E};
+ [{consumers, C}] -> {C, QBin2}
+ end
+
+ end,
+ case queues_with_count(F, N) of
+ [] ->
+ {error, QBin};
+ Queues ->
+ [{_, QBin3} | _ ] = lists:sort(Queues),
+ {ok, QBin3}
+ end.
+
+queues_with_count(F, N) ->
+ lists:foldl(fun (C, Acc) ->
+ case F(C) of
+ {error, _} -> Acc;
+ Ret -> [Ret|Acc]
+ end
+ end, [], lists:seq(0, N-1)).
+
+consumer_count(QName) ->
+ rabbit_amqqueue:with(
+ QName,
+ fun(Q) ->
+ rabbit_amqqueue:info(Q, [consumers])
+ end).
+
+precondition_failed(Format, QName) ->
+ protocol_error(precondition_failed, Format, QName).
diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl
new file mode 100644
index 0000000000..da07765849
--- /dev/null
+++ b/deps/rabbitmq_sharding/src/rabbit_sharding_policy_validator.erl
@@ -0,0 +1,61 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_policy_validator).
+
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([register/0, validate_policy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "sharding parameters"},
+ {mfa, {?MODULE, register, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ [rabbit_registry:register(Class, Name, ?MODULE) ||
+ {Class, Name} <- [{policy_validator, <<"shards-per-node">>},
+ {policy_validator, <<"routing-key">>}]],
+ ok.
+
+validate_policy(KeyList) ->
+ SPN = proplists:get_value(<<"shards-per-node">>, KeyList, none),
+ RKey = proplists:get_value(<<"routing-key">>, KeyList, none),
+ case {SPN, RKey} of
+ {none, none} ->
+ ok;
+ {none, _} ->
+ {error, "shards-per-node must be specified", []};
+ {SPN, none} ->
+ validate_shards_per_node(SPN);
+ {SPN, RKey} ->
+ case validate_shards_per_node(SPN) of
+ ok -> validate_routing_key(RKey);
+ Else -> Else
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+
+validate_shards_per_node(Term) when is_number(Term) ->
+ case Term >= 0 of
+ true ->
+ ok;
+ false ->
+ {error, "shards-per-node should be greater than 0, actually was ~p",
+ [Term]}
+ end;
+validate_shards_per_node(Term) ->
+ {error, "shards-per-node should be a number, actually was ~p", [Term]}.
+
+validate_routing_key(Term) when is_binary(Term) ->
+ ok;
+validate_routing_key(Term) ->
+ {error, "routing-key should be binary, actually was ~p", [Term]}.
diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl
new file mode 100644
index 0000000000..8c2365db88
--- /dev/null
+++ b/deps/rabbitmq_sharding/src/rabbit_sharding_shard.erl
@@ -0,0 +1,133 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_shard).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([maybe_shard_exchanges/0,
+ ensure_sharded_queues/1,
+ maybe_update_shards/2,
+ stop_sharding/1]).
+
+-import(rabbit_misc, [r/3]).
+-import(rabbit_sharding_util, [a2b/1, exchange_bin/1, make_queue_name/3,
+ routing_key/1, shards_per_node/1]).
+
+-rabbit_boot_step({rabbit_sharding_maybe_shard,
+ [{description, "rabbit sharding maybe shard"},
+ {mfa, {?MODULE, maybe_shard_exchanges, []}},
+ {requires, recovery}]}).
+
+-define(MAX_CONNECTION_CLOSE_TIMEOUT, 10000).
+-define(SHARDING_USER, <<"rmq-sharding">>).
+
+%% We make sure the sharded queues are created when
+%% RabbitMQ starts.
+maybe_shard_exchanges() ->
+ [maybe_shard_exchanges(V) || V <- rabbit_vhost:list_names()],
+ ok.
+
+maybe_shard_exchanges(VHost) ->
+ [ensure_sharded_queues(X) ||
+ X <- rabbit_sharding_util:sharded_exchanges(VHost)].
+
+%% queue needs to be declared on the respective node.
+ensure_sharded_queues(X) ->
+ add_queues(X),
+ bind_queues(X).
+
+maybe_update_shards(OldX, NewX) ->
+ maybe_unbind_queues(routing_key(OldX), routing_key(NewX), OldX),
+ add_queues(NewX),
+ bind_queues(NewX).
+
+stop_sharding(OldX) ->
+ unbind_queues(shards_per_node(OldX), OldX).
+
+%% routing key didn't change. Do nothing.
+maybe_unbind_queues(RK, RK, _OldX) ->
+ ok;
+maybe_unbind_queues(_RK, _NewRK, OldX) ->
+ unbind_queues(shards_per_node(OldX), OldX).
+
+unbind_queues(undefined, _X) ->
+ ok;
+unbind_queues(OldSPN, #exchange{name = XName} = X) ->
+ OldRKey = routing_key(X),
+ foreach_node(fun(Node) ->
+ [unbind_queue(XName, OldRKey, N, Node)
+ || N <- lists:seq(0, OldSPN-1)]
+ end).
+
+add_queues(#exchange{name = XName, durable = Durable} = X) ->
+ SPN = shards_per_node(X),
+ foreach_node(fun(Node) ->
+ [declare_queue(XName, Durable, N, Node)
+ || N <- lists:seq(0, SPN-1)]
+ end).
+
+bind_queues(#exchange{name = XName} = X) ->
+ RKey = routing_key(X),
+ SPN = shards_per_node(X),
+ foreach_node(fun(Node) ->
+ [bind_queue(XName, RKey, N, Node) ||
+ N <- lists:seq(0, SPN-1)]
+ end).
+
+%%----------------------------------------------------------------------------
+
+declare_queue(XName, Durable, N, Node) ->
+ QBin = make_queue_name(exchange_bin(XName), a2b(Node), N),
+ QueueName = rabbit_misc:r(v(XName), queue, QBin),
+ try rabbit_amqqueue:declare(QueueName, Durable, false, [], none,
+ ?SHARDING_USER, {ignore_location, Node}) of
+ {_Reply, _Q} ->
+ ok
+ catch
+ _Error:Reason ->
+ rabbit_log:error("sharding failed to declare queue for exchange ~p"
+ " - soft error:~n~p~n",
+ [exchange_bin(XName), Reason]),
+ error
+ end.
+
+bind_queue(XName, RoutingKey, N, Node) ->
+ binding_action(fun rabbit_binding:add/3,
+ XName, RoutingKey, N, Node,
+ "sharding failed to bind queue ~p to exchange ~p"
+ " - soft error:~n~p~n").
+
+unbind_queue(XName, RoutingKey, N, Node) ->
+ binding_action(fun rabbit_binding:remove/3,
+ XName, RoutingKey, N, Node,
+ "sharding failed to unbind queue ~p to exchange ~p"
+ " - soft error:~n~p~n").
+
+binding_action(F, XName, RoutingKey, N, Node, ErrMsg) ->
+ QBin = make_queue_name(exchange_bin(XName), a2b(Node), N),
+ QueueName = rabbit_misc:r(v(XName), queue, QBin),
+ case F(#binding{source = XName,
+ destination = QueueName,
+ key = RoutingKey,
+ args = []},
+ fun (_X, _Q) -> ok end,
+ ?SHARDING_USER) of
+ ok -> ok;
+ {error, Reason} ->
+ rabbit_log:error(ErrMsg, [QBin, exchange_bin(XName), Reason]),
+ error
+ end.
+
+v(#resource{virtual_host = VHost}) ->
+ VHost.
+
+foreach_node(F) ->
+ [F(Node) || Node <- running_nodes()].
+
+running_nodes() ->
+ proplists:get_value(running_nodes, rabbit_mnesia:status(), []).
diff --git a/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl b/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl
new file mode 100644
index 0000000000..cb5f719c65
--- /dev/null
+++ b/deps/rabbitmq_sharding/src/rabbit_sharding_util.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_util).
+
+-export([shard/1, sharded_exchanges/1]).
+-export([get_policy/2, shards_per_node/1, routing_key/1]).
+-export([exchange_bin/1, make_queue_name/3, a2b/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-import(rabbit_misc, [pget/3]).
+
+shard(X) ->
+ case get_policy(<<"shards-per-node">>, X) of
+ undefined -> false;
+ _SPN -> true
+ end.
+
+sharded_exchanges(VHost) ->
+ [X || X <- rabbit_exchange:list(VHost), shard(X)].
+
+shards_per_node(X) ->
+ get_policy(<<"shards-per-node">>, X).
+
+routing_key(X) ->
+ case get_policy(<<"routing-key">>, X) of
+ undefined ->
+ <<>>;
+ Value ->
+ Value
+ end.
+
+get_policy(Key, X) ->
+ rabbit_policy:get(Key, X).
+
+exchange_bin(#resource{name = XBin}) -> XBin.
+
+make_queue_name(QBin, NodeBin, QNum) ->
+ %% we do this to prevent unprintable characters in queue names
+ QNumBin = list_to_binary(lists:flatten(io_lib:format("~p", [QNum]))),
+ <<"sharding: ", QBin/binary, " - ", NodeBin/binary, " - ", QNumBin/binary>>.
+
+a2b(A) -> list_to_binary(atom_to_list(A)).
diff --git a/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl b/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl
new file mode 100644
index 0000000000..66ce3daa4c
--- /dev/null
+++ b/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl
@@ -0,0 +1,147 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_hash_exchange_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ routed_to_zero_queue_test,
+ routed_to_one_queue_test,
+ routed_to_many_queue_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, {test_resource_name,
+ re:replace(TestCaseName, "/", "-", [global, {return, list}])}),
+ rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+routed_to_zero_queue_test(Config) ->
+ test0(Config, fun () ->
+ #'basic.publish'{exchange = make_exchange_name(Config, "0"), routing_key = rnd()}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [], 5, 0),
+
+ passed.
+
+routed_to_one_queue_test(Config) ->
+ test0(Config, fun () ->
+ #'basic.publish'{exchange = make_exchange_name(Config, "0"), routing_key = rnd()}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [<<"q1">>, <<"q2">>, <<"q3">>], 1, 1),
+
+ passed.
+
+routed_to_many_queue_test(Config) ->
+ test0(Config, fun () ->
+ #'basic.publish'{exchange = make_exchange_name(Config, "0"), routing_key = rnd()}
+ end,
+ fun() ->
+ #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+ end, [<<"q1">>, <<"q2">>, <<"q3">>], 5, 5),
+
+ passed.
+
+test0(Config, MakeMethod, MakeMsg, Queues, MsgCount, Count) ->
+ {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ E = make_exchange_name(Config, "0"),
+
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Chan,
+ #'exchange.declare' {
+ exchange = E,
+ type = <<"x-modulus-hash">>,
+ auto_delete = true
+ }),
+ [#'queue.declare_ok'{} =
+ amqp_channel:call(Chan, #'queue.declare' {
+ queue = Q, exclusive = true }) || Q <- Queues],
+ [#'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+ exchange = E,
+ routing_key = <<"">>})
+ || Q <- Queues],
+
+ amqp_channel:call(Chan, #'confirm.select'{}),
+
+ [amqp_channel:call(Chan,
+ MakeMethod(),
+ MakeMsg()) || _ <- lists:duplicate(MsgCount, const)],
+
+ % ensure that the messages have been delivered to the queues before asking
+ % for the message count
+ amqp_channel:wait_for_confirms_or_die(Chan),
+
+ Counts =
+ [begin
+ #'queue.declare_ok'{message_count = M} =
+ amqp_channel:call(Chan, #'queue.declare' {queue = Q,
+ exclusive = true }),
+ M
+ end || Q <- Queues],
+
+ ?assertEqual(Count, lists:sum(Counts)),
+
+ amqp_channel:call(Chan, #'exchange.delete' { exchange = E }),
+ [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
+
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+ ok.
+
+rnd() ->
+ list_to_binary(integer_to_list(rand:uniform(1000000))).
+
+make_exchange_name(Config, Suffix) ->
+ B = rabbit_ct_helpers:get_config(Config, test_resource_name),
+ erlang:list_to_binary("x-" ++ B ++ "-" ++ Suffix).
diff --git a/deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl b/deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl
new file mode 100644
index 0000000000..65f96a4e5d
--- /dev/null
+++ b/deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl
@@ -0,0 +1,339 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_sharding_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit/include/amqqueue.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(TEST_X, <<"sharding.test">>).
+
+-import(rabbit_sharding_util, [a2b/1, exchange_bin/1]).
+-import(rabbit_ct_broker_helpers, [set_parameter/5, clear_parameter/4,
+ set_policy/6, clear_policy/3]).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ shard_empty_routing_key_test,
+ shard_queue_creation_test,
+ shard_queue_creation2_test,
+ shard_update_spn_test,
+ shard_decrease_spn_keep_queues_test,
+ shard_update_routing_key_test,
+ shard_basic_consume_interceptor_test,
+ shard_auto_scale_cluster_test,
+ queue_declare_test,
+ shard_queue_master_locator_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ inets:start(),
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+ rabbit_ct_helpers:set_config(Config, {test_resource_name,
+ re:replace(TestCaseName, "/", "-", [global, {return, list}])}).
+
+end_per_testcase(_Testcase, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+shard_empty_routing_key_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3)),
+ timer:sleep(1000),
+ ?assertEqual(6, length(queues(Config, 0))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"3_shard">>])
+ end).
+
+shard_queue_creation_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+ ?assertEqual(6, length(queues(Config, 0))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"3_shard">>])
+ end).
+
+shard_queue_creation2_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+ ?assertEqual(0, length(queues(Config, 0))),
+
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+
+ ?assertEqual(6, length(queues(Config, 0))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"3_shard">>])
+ end).
+
+%% SPN = Shards Per Node
+shard_update_spn_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+ ?assertEqual(6, length(queues(Config, 0))),
+
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(5, <<"1234">>)),
+ ?assertEqual(10, length(queues(Config, 0))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 5}],
+ [<<"3_shard">>])
+ end).
+
+shard_decrease_spn_keep_queues_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(5, <<"1234">>)),
+ ?assertEqual(10, length(queues(Config, 0))),
+
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+ ?assertEqual(10, length(queues(Config, 0))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 5}],
+ [<<"3_shard">>])
+ end).
+
+
+%% changes the routing key policy, therefore the queues should be
+%% unbound first and then bound with the new routing key.
+shard_update_routing_key_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"rkey">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+ timer:sleep(1000),
+ Bs = bindings(Config, 0, ?TEST_X),
+
+ set_policy(Config, 0, <<"rkey">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"4321">>)),
+ timer:sleep(1000),
+ Bs2 = bindings(Config, 0, ?TEST_X),
+
+ ?assert(Bs =/= Bs2),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 1}],
+ [<<"rkey">>])
+ end).
+
+%% tests that the interceptor returns queue names
+%% sorted by consumer count and then by queue index.
+shard_basic_consume_interceptor_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Sh = ?TEST_X,
+ amqp_channel:call(Ch, x_declare(Sh)),
+ set_policy(Config, 0, <<"three">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+ start_consumer(Ch, Sh),
+ assert_consumers(Config, Sh, 0, 1),
+ assert_consumers(Config, Sh, 1, 0),
+ assert_consumers(Config, Sh, 2, 0),
+
+ start_consumer(Ch, Sh),
+ assert_consumers(Config, Sh, 0, 1),
+ assert_consumers(Config, Sh, 1, 1),
+ assert_consumers(Config, Sh, 2, 0),
+
+ start_consumer(Ch, Sh),
+ assert_consumers(Config, Sh, 0, 1),
+ assert_consumers(Config, Sh, 1, 1),
+ assert_consumers(Config, Sh, 2, 1),
+
+ start_consumer(Ch, Sh),
+ assert_consumers(Config, Sh, 0, 2),
+ assert_consumers(Config, Sh, 1, 1),
+ assert_consumers(Config, Sh, 2, 1),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"three">>])
+ end).
+
+shard_auto_scale_cluster_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Sh = ?TEST_X,
+ amqp_channel:call(Ch, x_declare(Sh)),
+ set_policy(Config, 0, <<"three">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+ ?assertEqual(6, length(queues(Config, 0))),
+ Qs = queues(Config, 0),
+
+ ?assertEqual(6, length(Qs)),
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ?assertEqual(Nodes, lists:usort(queue_nodes(Qs))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"three">>])
+ end).
+
+queue_declare_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"declare">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+ Declare = #'queue.declare'{queue = <<"sharding.test">>,
+ auto_delete = false,
+ durable = true},
+
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, Declare),
+
+ ?assertEqual(Q, shard_q(Config, 0, xr(?TEST_X), 0)),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"declare">>])
+ end).
+
+shard_queue_master_locator_test(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ set_policy(Config, 0, <<"qml">>, <<"^sharding">>, <<"queues">>, [{<<"queue-master-locator">>, <<"client-local">>}]),
+ amqp_channel:call(Ch, x_declare(?TEST_X)),
+ set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+
+ NodesOfQueues = [node(amqqueue:get_pid(Q)) || Q <- queues(Config, 0)],
+ ?assertEqual(3, length(lists:filter(fun(N) -> N == A end, NodesOfQueues))),
+ ?assertEqual(3, length(lists:filter(fun(N) -> N == B end, NodesOfQueues))),
+
+ teardown(Config, Ch,
+ [{?TEST_X, 6}],
+ [<<"3_shard">>, <<"qml">>])
+ end).
+
+start_consumer(Ch, Shard) ->
+ amqp_channel:call(Ch, #'basic.consume'{queue = Shard}).
+
+assert_consumers(Config, Shard, QInd, Count) ->
+ Q0 = qr(shard_q(Config, 0, xr(Shard), QInd)),
+ [{consumers, C0}] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_sharding_interceptor, consumer_count, [Q0]),
+ ?assertEqual(C0, Count).
+
+queues(Config, NodeIndex) ->
+ case rabbit_ct_broker_helpers:rpc(Config, NodeIndex, rabbit_amqqueue, list, [<<"/">>]) of
+ {badrpc, _} -> [];
+ Qs -> Qs
+ end.
+
+bindings(Config, NodeIndex, XName) ->
+ case rabbit_ct_broker_helpers:rpc(Config, NodeIndex, rabbit_binding, list_for_source, [xr(XName)]) of
+ {badrpc, _} -> [];
+ Bs -> Bs
+ end.
+
+with_ch(Config, Fun) ->
+ {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+ Fun(Ch),
+ rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch),
+ cleanup(Config, 0),
+ ok.
+
+cleanup(Config) ->
+ cleanup(Config, 0).
+cleanup(Config, NodeIndex) ->
+ [rabbit_ct_broker_helpers:rpc(Config, NodeIndex, rabbit_amqqueue, delete, [Q, false, false, <<"test-user">>])
+ || Q <- queues(Config, 0)].
+
+teardown(Config, Ch, Xs, Policies) ->
+ [begin
+ amqp_channel:call(Ch, x_delete(XName)),
+ delete_queues(Config, Ch, XName, N)
+ end || {XName, N} <- Xs],
+ [clear_policy(Config, 0, Policy) || Policy <- Policies].
+
+delete_queues(Config, Ch, Name, N) ->
+ [amqp_channel:call(Ch, q_delete(Config, Name, QInd)) || QInd <- lists:seq(0, N-1)].
+
+x_declare(Name) -> x_declare(Name, <<"x-modulus-hash">>).
+
+x_declare(Name, Type) ->
+ #'exchange.declare'{exchange = Name,
+ type = Type,
+ durable = true}.
+
+x_delete(Name) ->
+ #'exchange.delete'{exchange = Name}.
+
+q_delete(Config, Name, QInd) ->
+ #'queue.delete'{queue = shard_q(Config, 0, xr(Name), QInd)}.
+
+shard_q(Config, NodeIndex, X, N) ->
+ rabbit_sharding_util:make_queue_name(
+ exchange_bin(X), a2b(rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename)), N).
+
+policy_definition(SPN) ->
+ [{<<"shards-per-node">>, SPN}].
+
+policy_definition(SPN, RK) ->
+ [{<<"shards-per-node">>, SPN}, {<<"routing-key">>, RK}].
+
+queue_nodes(Qs) ->
+ [queue_node(Q) || Q <- Qs].
+
+queue_node(Q) when ?is_amqqueue(Q) ->
+ amqqueue:qnode(Q).
+
+xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+qr(Name) -> rabbit_misc:r(<<"/">>, queue, Name).
diff --git a/deps/rabbitmq_shovel/.gitignore b/deps/rabbitmq_shovel/.gitignore
new file mode 100644
index 0000000000..899c1dcefd
--- /dev/null
+++ b/deps/rabbitmq_shovel/.gitignore
@@ -0,0 +1,22 @@
+.sw?
+.*.sw?
+*.beam
+*.plt
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+elvis
+elvis.config
+
+/rabbitmq_shovel.d
diff --git a/deps/rabbitmq_shovel/.travis.yml b/deps/rabbitmq_shovel/.travis.yml
new file mode 100644
index 0000000000..8061f6cdc5
--- /dev/null
+++ b/deps/rabbitmq_shovel/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: gGj9z50r1UhYRLmm3RL2fTMJ56gWmLxusB+uZLteJXMm8mWVnh8cEy6o4IKpo+7zR89eDLciNrSBmIohh/VMPKMLLr20gI9D5gIXHZmxMWavkc2Pnyk4KEqzoYebsWKUBtvktafGlaW+W9E/DZdAhEh3xuqUN7SppypUfSag/yM=
+ - secure: FZTb3CpPbv816P5pFQUiyYsFHsRNTJDZ28bBrW7ZfAzCVYuu6PysCb+cofxSfPA0OMhUB46WtFeprx8sMgER1lEvUsIAyB8T5avPegOuFV9c4Wk1xAgK346eB+TH9ORjQZwx8beiMAu39aWwBu/zykcl2rWHozZMQOOgCK2Jzdc=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_shovel/CODE_OF_CONDUCT.md b/deps/rabbitmq_shovel/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_shovel/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_shovel/CONTRIBUTING.md b/deps/rabbitmq_shovel/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_shovel/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_shovel/LICENSE b/deps/rabbitmq_shovel/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_shovel/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_shovel/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_shovel/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_shovel/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_shovel/Makefile b/deps/rabbitmq_shovel/Makefile
new file mode 100644
index 0000000000..06ad5fe759
--- /dev/null
+++ b/deps/rabbitmq_shovel/Makefile
@@ -0,0 +1,40 @@
+PROJECT = rabbitmq_shovel
+PROJECT_DESCRIPTION = Data Shovel for RabbitMQ
+PROJECT_MOD = rabbit_shovel
+
+define PROJECT_ENV
+[
+ {defaults, [
+ {prefetch_count, 1000},
+ {ack_mode, on_confirm},
+ {publish_fields, []},
+ {publish_properties, []},
+ {reconnect_delay, 5}
+ ]}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit amqp_client amqp10_client
+dep_amqp10_client = git https://github.com/rabbitmq/rabbitmq-amqp1.0-client.git master
+
+LOCAL_DEPS = crypto
+
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_amqp1_0 meck
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk elvis_mk
+dep_elvis_mk = git https://github.com/inaka/elvis.mk.git master
+
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_shovel/README.md b/deps/rabbitmq_shovel/README.md
new file mode 100644
index 0000000000..08b0f66500
--- /dev/null
+++ b/deps/rabbitmq_shovel/README.md
@@ -0,0 +1,23 @@
+## RabbitMQ Shovel
+
+RabbitMQ Shovel is a WAN-friendly tool for moving messages from
+a queue to an exchange, typically between different nodes. It is implemented
+as a RabbitMQ plugin and has a [management UI extension](https://github.com/rabbitmq/rabbitmq-shovel-management/).
+
+
+## Supported RabbitMQ Versions
+
+This plugin ships with RabbitMQ, there is no need to
+install it separately.
+
+
+## Documentation
+
+See [RabbitMQ shovel plugin](https://www.rabbitmq.com/shovel.html) on rabbitmq.com.
+
+
+## License and Copyright
+
+Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html).
+
+2007-2020 (c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_shovel/erlang.mk b/deps/rabbitmq_shovel/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_shovel/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_shovel/include/rabbit_shovel.hrl b/deps/rabbitmq_shovel/include/rabbit_shovel.hrl
new file mode 100644
index 0000000000..49d41d1e8f
--- /dev/null
+++ b/deps/rabbitmq_shovel/include/rabbit_shovel.hrl
@@ -0,0 +1,31 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(endpoint,
+ {uris,
+ resource_declaration
+ }).
+
+-record(shovel,
+ {sources,
+ destinations,
+ prefetch_count,
+ ack_mode,
+ publish_fields,
+ publish_properties,
+ queue,
+ reconnect_delay,
+ delete_after = never
+ }).
+
+-define(SHOVEL_USER, <<"rmq-shovel">>).
+
+-define(DEFAULT_PREFETCH, 1000).
+-define(DEFAULT_ACK_MODE, on_confirm).
+-define(DEFAULT_RECONNECT_DELAY, 5).
+
+-define(SHOVEL_GUIDE_URL, <<"https://rabbitmq.com/shovel.html">>).
diff --git a/deps/rabbitmq_shovel/rabbitmq-components.mk b/deps/rabbitmq_shovel/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_shovel/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl
new file mode 100644
index 0000000000..59f2a8a5e9
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand').
+
+-include("rabbit_shovel.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ switches/0,
+ aliases/0,
+ output/2,
+ help_section/0,
+ description/0
+ ]).
+
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+usage() ->
+ <<"delete_shovel [--vhost <vhost>] <name>">>.
+
+usage_additional() ->
+ [
+ {<<"<name>">>, <<"Shovel to delete">>}
+ ].
+
+usage_doc_guides() ->
+ [?SHOVEL_GUIDE_URL].
+
+description() ->
+ <<"Deletes a Shovel">>.
+
+help_section() ->
+ {plugin, shovel}.
+
+validate([], _Opts) ->
+ {validation_failure, not_enough_args};
+validate([_, _ | _], _Opts) ->
+ {validation_failure, too_many_args};
+validate([_], _Opts) ->
+ ok.
+
+merge_defaults(A, Opts) ->
+ {A, maps:merge(#{vhost => <<"/">>}, Opts)}.
+
+banner([Name], #{vhost := VHost}) ->
+ erlang:list_to_binary(io_lib:format("Deleting shovel ~s in vhost ~s",
+ [Name, VHost])).
+
+run([Name], #{node := Node, vhost := VHost}) ->
+ ActingUser = 'Elixir.RabbitMQ.CLI.Core.Helpers':cli_acting_user(),
+ case rabbit_misc:rpc_call(Node, rabbit_shovel_util, delete_shovel, [VHost, Name, ActingUser]) of
+ {badrpc, _} = Error ->
+ Error;
+ {error, not_found} ->
+ ErrMsg = rabbit_misc:format("Shovel with the given name was not found "
+ "on the target node '~s' and / or virtual host '~s'",
+ [Node, VHost]),
+ {error, rabbit_data_coercion:to_binary(ErrMsg)};
+ ok -> ok
+ end.
+
+switches() ->
+ [].
+
+aliases() ->
+ [].
+
+output(E, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E).
diff --git a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl
new file mode 100644
index 0000000000..e6e64443ad
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand.erl
@@ -0,0 +1,84 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.RestartShovelCommand').
+
+-include("rabbit_shovel.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ flags/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ aliases/0,
+ output/2,
+ help_section/0,
+ description/0
+ ]).
+
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+
+flags() ->
+ [].
+
+aliases() ->
+ [].
+
+validate([], _Opts) ->
+ {validation_failure, not_enough_args};
+validate([_], _Opts) ->
+ ok;
+validate(_, _Opts) ->
+ {validation_failure, too_many_args}.
+
+merge_defaults(A, Opts) ->
+ {A, maps:merge(#{vhost => <<"/">>}, Opts)}.
+
+banner([Name], #{node := Node, vhost := VHost}) ->
+ erlang:iolist_to_binary([<<"Restarting dynamic Shovel ">>, Name, <<" in virtual host ">>, VHost,
+ << " on node ">>, atom_to_binary(Node, utf8)]).
+
+run([Name], #{node := Node, vhost := VHost}) ->
+ case rabbit_misc:rpc_call(Node, rabbit_shovel_util, restart_shovel, [VHost, Name]) of
+ {badrpc, _} = Error ->
+ Error;
+ {error, not_found} ->
+ ErrMsg = rabbit_misc:format("Shovel with the given name was not found "
+ "on the target node '~s' and / or virtual host '~s'",
+ [Node, VHost]),
+ {error, rabbit_data_coercion:to_binary(ErrMsg)};
+ ok -> ok
+ end.
+
+output(Output, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Output).
+
+usage() ->
+ <<"restart_shovel <name>">>.
+
+usage_additional() ->
+ [
+ {<<"<name>">>, <<"name of the Shovel to restart">>}
+ ].
+
+usage_doc_guides() ->
+ [?SHOVEL_GUIDE_URL].
+
+help_section() ->
+ {plugin, shovel}.
+
+description() ->
+ <<"Restarts a dynamic Shovel">>.
diff --git a/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl
new file mode 100644
index 0000000000..4f007762fb
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand.erl
@@ -0,0 +1,129 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand').
+
+-include("rabbit_shovel.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([
+ usage/0,
+ usage_doc_guides/0,
+ flags/0,
+ validate/2,
+ merge_defaults/2,
+ banner/2,
+ run/2,
+ switches/0,
+ aliases/0,
+ output/2,
+ scopes/0,
+ formatter/0,
+ help_section/0,
+ description/0
+ ]).
+
+
+%%----------------------------------------------------------------------------
+%% Callbacks
+%%----------------------------------------------------------------------------
+usage() ->
+ <<"shovel_status">>.
+
+usage_doc_guides() ->
+ [?SHOVEL_GUIDE_URL].
+
+description() ->
+ <<"Displays status of Shovel on a node">>.
+
+help_section() ->
+ {plugin, shovel}.
+
+flags() ->
+ [].
+
+formatter() ->
+ 'Elixir.RabbitMQ.CLI.Formatters.Table'.
+
+validate(_,_) ->
+ ok.
+
+merge_defaults(A,O) ->
+ {A, O}.
+
+banner(_, #{node := Node}) ->
+ erlang:iolist_to_binary([<<"Shovel status on node ">>,
+ atom_to_binary(Node, utf8)]).
+
+run(_Args, #{node := Node}) ->
+ case rabbit_misc:rpc_call(Node, rabbit_shovel_status, status, []) of
+ {badrpc, _} = Error ->
+ Error;
+ Status ->
+ {stream, Status}
+ end.
+
+switches() ->
+ [].
+
+aliases() ->
+ [].
+
+output({stream, ShovelStatus}, _Opts) ->
+ Formatted = [fmt_name(Name,
+ fmt_status(Status,
+ #{type => Type,
+ last_changed => fmt_ts(Timestamp)}))
+ || {Name, Type, Status, Timestamp} <- ShovelStatus],
+ {stream, Formatted};
+output(E, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(E).
+
+scopes() ->
+ ['ctl', 'diagnostics'].
+
+%%----------------------------------------------------------------------------
+%% Formatting
+%%----------------------------------------------------------------------------
+fmt_name({Vhost, Name}, Map) ->
+ Map#{name => Name, vhost => Vhost};
+fmt_name(Name, Map) ->
+ %% Static shovel names don't contain the vhost
+ Map#{name => Name}.
+
+fmt_ts({{YY, MM, DD}, {Hour, Min, Sec}}) ->
+ erlang:list_to_binary(
+ io_lib:format("~4..0w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w",
+ [YY, MM, DD, Hour, Min, Sec])).
+
+fmt_status({'running' = St, Proplist}, Map) ->
+ maps:merge(Map#{state => St,
+ source_protocol => proplists:get_value(src_protocol, Proplist,
+ undefined),
+ source => proplists:get_value(src_uri, Proplist),
+ destination_protocol => proplists:get_value(dest_protocol, Proplist, undefined),
+ destination => proplists:get_value(dest_uri, Proplist),
+ termination_reason => <<>>}, details_to_map(Proplist));
+fmt_status('starting' = St, Map) ->
+ Map#{state => St,
+ source => <<>>,
+ destination => <<>>,
+ termination_reason => <<>>};
+fmt_status({'terminated' = St, Reason}, Map) ->
+ Map#{state => St,
+ termination_reason => list_to_binary(io_lib:format("~p", [Reason])),
+ source => <<>>,
+ destination => <<>>}.
+
+details_to_map(Proplist) ->
+ Keys = [{src_address, source_address}, {src_queue, source_queue},
+ {src_exchange, source_exchange}, {src_exchange_key, source_exchange_key},
+ {dest_address, destination_address}, {dest_queue, destination_queue},
+ {dest_exchange, destination_exchange}, {dest_exchange_key, destination_exchange_key}],
+ maps:from_list([{New, proplists:get_value(Old, Proplist)}
+ || {Old, New} <- Keys, proplists:is_defined(Old, Proplist)]).
diff --git a/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl
new file mode 100644
index 0000000000..44fc12183c
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_amqp091_shovel.erl
@@ -0,0 +1,520 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp091_shovel).
+
+-behaviour(rabbit_shovel_behaviour).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-export([
+ parse/2,
+ source_uri/1,
+ dest_uri/1,
+ source_protocol/1,
+ dest_protocol/1,
+ source_endpoint/1,
+ dest_endpoint/1,
+ connect_source/1,
+ init_source/1,
+ connect_dest/1,
+ init_dest/1,
+ handle_source/2,
+ handle_dest/2,
+ close_source/1,
+ close_dest/1,
+ ack/3,
+ nack/3,
+ forward/4
+ ]).
+
+-define(MAX_CONNECTION_CLOSE_TIMEOUT, 10000).
+
+parse(_Name, {source, Source}) ->
+ Prefetch = parse_parameter(prefetch_count, fun parse_non_negative_integer/1,
+ proplists:get_value(prefetch_count, Source,
+ ?DEFAULT_PREFETCH)),
+ Queue = parse_parameter(queue, fun parse_binary/1,
+ proplists:get_value(queue, Source)),
+ #{module => ?MODULE,
+ uris => proplists:get_value(uris, Source),
+ resource_decl => decl_fun(Source),
+ queue => Queue,
+ delete_after => proplists:get_value(delete_after, Source, never),
+ prefetch_count => Prefetch};
+parse(Name, {destination, Dest}) ->
+ PubProp = proplists:get_value(publish_properties, Dest, []),
+ PropsFun = try_make_parse_publish(publish_properties, PubProp),
+ PubFields = proplists:get_value(publish_fields, Dest, []),
+ PubFieldsFun = try_make_parse_publish(publish_fields, PubFields),
+ AFH = proplists:get_value(add_forward_headers, Dest, false),
+ ATH = proplists:get_value(add_timestamp_header, Dest, false),
+ PropsFun1 = add_forward_headers_fun(Name, AFH, PropsFun),
+ PropsFun2 = add_timestamp_header_fun(ATH, PropsFun1),
+ #{module => ?MODULE,
+ uris => proplists:get_value(uris, Dest),
+ resource_decl => decl_fun(Dest),
+ props_fun => PropsFun2,
+ fields_fun => PubFieldsFun,
+ add_forward_headers => AFH,
+ add_timestamp_header => ATH}.
+
+connect_source(Conf = #{name := Name,
+ source := #{uris := Uris} = Src}) ->
+ {Conn, Chan, Uri} = make_conn_and_chan(Uris, Name),
+ Conf#{source => Src#{current => {Conn, Chan, Uri}}}.
+
+init_source(Conf = #{ack_mode := AckMode,
+ source := #{queue := Queue,
+ current := {Conn, Chan, _},
+ prefetch_count := Prefetch,
+ resource_decl := Decl} = Src}) ->
+ Decl(Conn, Chan),
+
+ NoAck = AckMode =:= no_ack,
+ case NoAck of
+ false ->
+ #'basic.qos_ok'{} =
+ amqp_channel:call(Chan, #'basic.qos'{prefetch_count = Prefetch}),
+ ok;
+ true -> ok
+ end,
+ Remaining = remaining(Chan, Conf),
+ case Remaining of
+ 0 ->
+ exit({shutdown, autodelete});
+ _ -> ok
+ end,
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Chan, #'basic.consume'{queue = Queue,
+ no_ack = NoAck}, self()),
+ Conf#{source => Src#{remaining => Remaining,
+ remaining_unacked => Remaining}}.
+
+connect_dest(Conf = #{name := Name, dest := #{uris := Uris} = Dst}) ->
+ {Conn, Chan, URI} = make_conn_and_chan(Uris, Name),
+ Conf#{dest => Dst#{current => {Conn, Chan, URI}}}.
+
+init_dest(Conf = #{ack_mode := AckMode,
+ dest := #{current := {Conn, Chan, _},
+ resource_decl := Decl} = Dst}) ->
+
+ Decl(Conn, Chan),
+
+ case AckMode of
+ on_confirm ->
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Chan, #'confirm.select'{}),
+ ok = amqp_channel:register_confirm_handler(Chan, self());
+ _ ->
+ ok
+ end,
+ Conf#{dest => Dst#{unacked => #{}}}.
+
+ack(Tag, Multi, State = #{source := #{current := {_, Chan, _}}}) ->
+ ok = amqp_channel:cast(Chan, #'basic.ack'{delivery_tag = Tag,
+ multiple = Multi}),
+ State.
+
+nack(Tag, Multi, State = #{source := #{current := {_, Chan, _}}}) ->
+ ok = amqp_channel:cast(Chan, #'basic.nack'{delivery_tag = Tag,
+ multiple = Multi}),
+ State.
+
+source_uri(#{source := #{current := {_, _, Uri}}}) -> Uri.
+dest_uri(#{dest := #{current := {_, _, Uri}}}) -> Uri.
+
+source_protocol(_State) -> amqp091.
+dest_protocol(_State) -> amqp091.
+
+source_endpoint(#{source := #{queue := <<>>,
+ source_exchange := SrcX,
+ source_exchange_key := SrcXKey}}) ->
+ [{src_exchange, SrcX},
+ {src_exchange_key, SrcXKey}];
+source_endpoint(#{source := #{queue := Queue}}) ->
+ [{src_queue, Queue}];
+source_endpoint(_Config) ->
+ [].
+
+dest_endpoint(#{shovel_type := static}) ->
+ [];
+dest_endpoint(#{dest := Dest}) ->
+ Keys = [dest_exchange, dest_exchange_key, dest_queue],
+ maps:to_list(maps:filter(fun(K, _) -> proplists:is_defined(K, Keys) end, Dest)).
+
+forward(IncomingTag, Props, Payload,
+ State0 = #{dest := #{props_fun := PropsFun,
+ current := {_, _, DstUri},
+ fields_fun := FieldsFun}}) ->
+ SrcUri = rabbit_shovel_behaviour:source_uri(State0),
+ % do publish
+ Exchange = maps:get(exchange, Props, undefined),
+ RoutingKey = maps:get(routing_key, Props, undefined),
+ Method = #'basic.publish'{exchange = Exchange, routing_key = RoutingKey},
+ Method1 = FieldsFun(SrcUri, DstUri, Method),
+ Msg1 = #amqp_msg{props = PropsFun(SrcUri, DstUri, props_from_map(Props)),
+ payload = Payload},
+ publish(IncomingTag, Method1, Msg1, State0).
+
+props_from_map(Map) ->
+ #'P_basic'{content_type = maps:get(content_type, Map, undefined),
+ content_encoding = maps:get(content_encoding, Map, undefined),
+ headers = maps:get(headers, Map, undefined),
+ delivery_mode = maps:get(delivery_mode, Map, undefined),
+ priority = maps:get(priority, Map, undefined),
+ correlation_id = maps:get(correlation_id, Map, undefined),
+ reply_to = maps:get(reply_to, Map, undefined),
+ expiration = maps:get(expiration, Map, undefined),
+ message_id = maps:get(message_id, Map, undefined),
+ timestamp = maps:get(timestamp, Map, undefined),
+ type = maps:get(type, Map, undefined),
+ user_id = maps:get(user_id, Map, undefined),
+ app_id = maps:get(app_id, Map, undefined),
+ cluster_id = maps:get(cluster_id, Map, undefined)}.
+
+map_from_props(#'P_basic'{content_type = Content_type,
+ content_encoding = Content_encoding,
+ headers = Headers,
+ delivery_mode = Delivery_mode,
+ priority = Priority,
+ correlation_id = Correlation_id,
+ reply_to = Reply_to,
+ expiration = Expiration,
+ message_id = Message_id,
+ timestamp = Timestamp,
+ type = Type,
+ user_id = User_id,
+ app_id = App_id,
+ cluster_id = Cluster_id}) ->
+ lists:foldl(fun({_K, undefined}, Acc) -> Acc;
+ ({K, V}, Acc) -> Acc#{K => V}
+ end, #{}, [{content_type, Content_type},
+ {content_encoding, Content_encoding},
+ {headers, Headers},
+ {delivery_mode, Delivery_mode},
+ {priority, Priority},
+ {correlation_id, Correlation_id},
+ {reply_to, Reply_to},
+ {expiration, Expiration},
+ {message_id, Message_id},
+ {timestamp, Timestamp},
+ {type, Type},
+ {user_id, User_id},
+ {app_id, App_id},
+ {cluster_id, Cluster_id}
+ ]).
+
+handle_source(#'basic.consume_ok'{}, State) ->
+ State;
+handle_source({#'basic.deliver'{delivery_tag = Tag,
+ exchange = Exchange,
+ routing_key = RoutingKey},
+ #amqp_msg{props = Props0, payload = Payload}}, State) ->
+ Props = (map_from_props(Props0))#{exchange => Exchange,
+ routing_key => RoutingKey},
+ % forward to destination
+ rabbit_shovel_behaviour:forward(Tag, Props, Payload, State);
+
+handle_source({'EXIT', Conn, Reason},
+ #{source := #{current := {Conn, _, _}}}) ->
+ {stop, {inbound_conn_died, Reason}};
+
+handle_source(_Msg, _State) ->
+ not_handled.
+
+handle_dest(#'basic.ack'{delivery_tag = Seq, multiple = Multiple},
+ State = #{ack_mode := on_confirm}) ->
+ confirm_to_inbound(fun (Tag, Multi, StateX) ->
+ rabbit_shovel_behaviour:ack(Tag, Multi, StateX)
+ end, Seq, Multiple, State);
+
+handle_dest(#'basic.nack'{delivery_tag = Seq, multiple = Multiple},
+ State = #{ack_mode := on_confirm }) ->
+ confirm_to_inbound(fun (Tag, Multi, StateX) ->
+ rabbit_shovel_behaviour:nack(Tag, Multi, StateX)
+ end, Seq, Multiple, State);
+
+handle_dest(#'basic.cancel'{}, #{name := Name}) ->
+ rabbit_log:warning("Shovel ~p received a 'basic.cancel' from the server", [Name]),
+ {stop, {shutdown, restart}};
+
+handle_dest({'EXIT', Conn, Reason}, #{dest := #{current := {Conn, _, _}}}) ->
+ {stop, {outbound_conn_died, Reason}};
+
+handle_dest(_Msg, _State) ->
+ not_handled.
+
+close_source(#{source := #{current := {Conn, _, _}}}) ->
+ catch amqp_connection:close(Conn, ?MAX_CONNECTION_CLOSE_TIMEOUT),
+ ok;
+close_source(_) ->
+ %% It never connected, connection doesn't exist
+ ok.
+
+close_dest(#{dest := #{current := {Conn, _, _}}}) ->
+ catch amqp_connection:close(Conn, ?MAX_CONNECTION_CLOSE_TIMEOUT),
+ ok;
+close_dest(_) ->
+ %% It never connected, connection doesn't exist
+ ok.
+
+confirm_to_inbound(ConfirmFun, Seq, Multiple,
+ State0 = #{dest := #{unacked := Unacked} = Dst}) ->
+ #{Seq := InTag} = Unacked,
+ State = ConfirmFun(InTag, Multiple, State0),
+ {Unacked1, Removed} = remove_delivery_tags(Seq, Multiple, Unacked, 0),
+ rabbit_shovel_behaviour:decr_remaining(Removed,
+ State#{dest =>
+ Dst#{unacked => Unacked1}}).
+
+publish(_Tag, _Method, _Msg, State = #{source := #{remaining_unacked := 0}}) ->
+ %% We are in on-confirm mode, and are autodelete. We have
+ %% published all the messages we need to; we just wait for acks to
+ %% come back. So drop subsequent messages on the floor to be
+ %% requeued later.
+ State;
+
+publish(IncomingTag, Method, Msg,
+ State = #{ack_mode := AckMode,
+ dest := Dst}) ->
+ #{unacked := Unacked,
+ current := {_, OutboundChan, _}} = Dst,
+ Seq = case AckMode of
+ on_confirm ->
+ amqp_channel:next_publish_seqno(OutboundChan);
+ _ -> undefined
+ end,
+ ok = amqp_channel:call(OutboundChan, Method, Msg),
+ rabbit_shovel_behaviour:decr_remaining_unacked(
+ case AckMode of
+ no_ack ->
+ rabbit_shovel_behaviour:decr_remaining(1, State);
+ on_confirm ->
+ State#{dest => Dst#{unacked => Unacked#{Seq => IncomingTag}}};
+ on_publish ->
+ State1 = rabbit_shovel_behaviour:ack(IncomingTag, false, State),
+ rabbit_shovel_behaviour:decr_remaining(1, State1)
+ end).
+
+make_conn_and_chan([], {VHost, Name} = _ShovelName) ->
+ rabbit_log:error(
+ "Shovel '~s' in vhost '~s' has no more URIs to try for connection",
+ [Name, VHost]),
+ erlang:error(failed_to_connect_using_provided_uris);
+make_conn_and_chan([], ShovelName) ->
+ rabbit_log:error(
+ "Shovel '~s' has no more URIs to try for connection",
+ [ShovelName]),
+ erlang:error(failed_to_connect_using_provided_uris);
+make_conn_and_chan(URIs, ShovelName) ->
+ try do_make_conn_and_chan(URIs, ShovelName) of
+ Val -> Val
+ catch throw:{error, Reason, URI} ->
+ log_connection_failure(Reason, URI, ShovelName),
+ make_conn_and_chan(lists:usort(URIs -- [URI]), ShovelName)
+ end.
+
+do_make_conn_and_chan(URIs, ShovelName) ->
+ URI = lists:nth(rand:uniform(length(URIs)), URIs),
+ {ok, AmqpParam} = amqp_uri:parse(URI),
+ ConnName = get_connection_name(ShovelName),
+ case amqp_connection:start(AmqpParam, ConnName) of
+ {ok, Conn} ->
+ link(Conn),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ link(Ch),
+ {Conn, Ch, list_to_binary(amqp_uri:remove_credentials(URI))};
+ {error, not_allowed} ->
+ throw({error, not_allowed, URI});
+ {error, Reason} ->
+ throw({error, Reason, URI})
+ end.
+
+log_connection_failure(Reason, URI, {VHost, Name} = _ShovelName) ->
+ rabbit_log:error(
+ "Shovel '~s' in vhost '~s' failed to connect (URI: ~s): ~s~n",
+ [Name, VHost, amqp_uri:remove_credentials(URI), human_readable_connection_error(Reason)]);
+log_connection_failure(Reason, URI, ShovelName) ->
+ rabbit_log:error(
+ "Shovel '~s' failed to connect (URI: ~s): ~s~n",
+ [ShovelName, amqp_uri:remove_credentials(URI), human_readable_connection_error(Reason)]).
+
+human_readable_connection_error({auth_failure, Msg}) ->
+ Msg;
+human_readable_connection_error(not_allowed) ->
+ "access to target virtual host was refused";
+human_readable_connection_error(unknown_host) ->
+ "unknown host (failed to resolve hostname)";
+human_readable_connection_error(econnrefused) ->
+ "connection to target host was refused (ECONNREFUSED)";
+human_readable_connection_error(econnreset) ->
+ "connection to target host was reset by peer (ECONNRESET)";
+human_readable_connection_error(etimedout) ->
+ "connection to target host timed out (ETIMEDOUT)";
+human_readable_connection_error(ehostunreach) ->
+ "target host is unreachable (EHOSTUNREACH)";
+human_readable_connection_error(nxdomain) ->
+ "target hostname cannot be resolved (NXDOMAIN)";
+human_readable_connection_error(eacces) ->
+ "connection to target host failed with EACCES. "
+ "This may be due to insufficient RabbitMQ process permissions or "
+ "a reserved IP address used as destination";
+human_readable_connection_error(Other) ->
+ rabbit_misc:format("~p", [Other]).
+
+get_connection_name(ShovelName) when is_atom(ShovelName) ->
+ Prefix = <<"Shovel ">>,
+ ShovelNameAsBinary = atom_to_binary(ShovelName, utf8),
+ <<Prefix/binary, ShovelNameAsBinary/binary>>;
+%% for dynamic shovels, name is a binary
+get_connection_name(ShovelName) when is_binary(ShovelName) ->
+ Prefix = <<"Shovel ">>,
+ <<Prefix/binary, ShovelName/binary>>;
+%% fallback
+get_connection_name(_) ->
+ <<"Shovel">>.
+
+remove_delivery_tags(Seq, false, Unacked, 0) ->
+ {maps:remove(Seq, Unacked), 1};
+remove_delivery_tags(Seq, true, Unacked, Count) ->
+ case maps:size(Unacked) of
+ 0 -> {Unacked, Count};
+ _ ->
+ maps:fold(fun(K, _V, {Acc, Cnt}) when K =< Seq ->
+ {maps:remove(K, Acc), Cnt + 1};
+ (_K, _V, Acc) -> Acc
+ end, {Unacked, 0}, Unacked)
+ end.
+
+remaining(_Ch, #{source := #{delete_after := never}}) ->
+ unlimited;
+remaining(Ch, #{source := #{delete_after := 'queue-length',
+ queue := Queue}}) ->
+ #'queue.declare_ok'{message_count = N} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ passive = true}),
+ N;
+remaining(_Ch, #{source := #{delete_after := Count}}) ->
+ Count.
+
+%%% PARSING
+
+try_make_parse_publish(Key, Fields) ->
+ make_parse_publish(Key, Fields).
+
+make_parse_publish(publish_fields, Fields) ->
+ make_publish_fun(Fields, record_info(fields, 'basic.publish'));
+make_parse_publish(publish_properties, Fields) ->
+ make_publish_fun(Fields, record_info(fields, 'P_basic')).
+
+make_publish_fun(Fields, ValidFields) when is_list(Fields) ->
+ SuppliedFields = proplists:get_keys(Fields),
+ case SuppliedFields -- ValidFields of
+ [] ->
+ FieldIndices = make_field_indices(ValidFields, Fields),
+ fun (_SrcUri, _DestUri, Publish) ->
+ lists:foldl(fun ({Pos1, Value}, Pub) ->
+ setelement(Pos1, Pub, Value)
+ end, Publish, FieldIndices)
+ end;
+ Unexpected ->
+ fail({invalid_parameter_value, publish_properties,
+ {unexpected_fields, Unexpected, ValidFields}})
+ end;
+make_publish_fun(Fields, _) ->
+ fail({invalid_parameter_value, publish_properties,
+ {require_list, Fields}}).
+
+make_field_indices(Valid, Fields) ->
+ make_field_indices(Fields, field_map(Valid, 2), []).
+
+make_field_indices([], _Idxs , Acc) ->
+ lists:reverse(Acc);
+make_field_indices([{Key, Value} | Rest], Idxs, Acc) ->
+ make_field_indices(Rest, Idxs, [{dict:fetch(Key, Idxs), Value} | Acc]).
+
+field_map(Fields, Idx0) ->
+ {Dict, _IdxMax} =
+ lists:foldl(fun (Field, {Dict1, Idx1}) ->
+ {dict:store(Field, Idx1, Dict1), Idx1 + 1}
+ end, {dict:new(), Idx0}, Fields),
+ Dict.
+
+-spec fail(term()) -> no_return().
+fail(Reason) -> throw({error, Reason}).
+
+add_forward_headers_fun(Name, true, PubProps) ->
+ fun(SrcUri, DestUri, Props) ->
+ rabbit_shovel_util:update_headers(
+ [{<<"shovelled-by">>, rabbit_nodes:cluster_name()},
+ {<<"shovel-type">>, <<"static">>},
+ {<<"shovel-name">>, list_to_binary(atom_to_list(Name))}],
+ [], SrcUri, DestUri, PubProps(SrcUri, DestUri, Props))
+ end;
+add_forward_headers_fun(_Name, false, PubProps) ->
+ PubProps.
+
+add_timestamp_header_fun(true, PubProps) ->
+ fun(SrcUri, DestUri, Props) ->
+ rabbit_shovel_util:add_timestamp_header(
+ PubProps(SrcUri, DestUri, Props))
+ end;
+add_timestamp_header_fun(false, PubProps) -> PubProps.
+
+parse_declaration({[], Acc}) ->
+ Acc;
+parse_declaration({[{Method, Props} | Rest], Acc}) when is_list(Props) ->
+ FieldNames = try rabbit_framing_amqp_0_9_1:method_fieldnames(Method)
+ catch exit:Reason -> fail(Reason)
+ end,
+ case proplists:get_keys(Props) -- FieldNames of
+ [] -> ok;
+ UnknownFields -> fail({unknown_fields, Method, UnknownFields})
+ end,
+ {Res, _Idx} = lists:foldl(
+ fun (K, {R, Idx}) ->
+ NewR = case proplists:get_value(K, Props) of
+ undefined -> R;
+ V -> setelement(Idx, R, V)
+ end,
+ {NewR, Idx + 1}
+ end, {rabbit_framing_amqp_0_9_1:method_record(Method), 2},
+ FieldNames),
+ parse_declaration({Rest, [Res | Acc]});
+parse_declaration({[{Method, Props} | _Rest], _Acc}) ->
+ fail({expected_method_field_list, Method, Props});
+parse_declaration({[Method | Rest], Acc}) ->
+ parse_declaration({[{Method, []} | Rest], Acc}).
+
+decl_fun(Endpoint) ->
+ Decl = parse_declaration({proplists:get_value(declarations, Endpoint, []),
+ []}),
+ fun (_Conn, Ch) ->
+ [begin
+ amqp_channel:call(Ch, M)
+ end || M <- lists:reverse(Decl)]
+ end.
+
+parse_parameter(Param, Fun, Value) ->
+ try
+ Fun(Value)
+ catch
+ _:{error, Err} ->
+ fail({invalid_parameter_value, Param, Err})
+ end.
+
+parse_non_negative_integer(N) when is_integer(N) andalso N >= 0 ->
+ N;
+parse_non_negative_integer(N) ->
+ fail({require_non_negative_integer, N}).
+
+parse_binary(Binary) when is_binary(Binary) ->
+ Binary;
+parse_binary(NotABinary) ->
+ fail({require_binary, NotABinary}).
diff --git a/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl
new file mode 100644
index 0000000000..17e5fbba08
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl
@@ -0,0 +1,407 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp10_shovel).
+
+-behaviour(rabbit_shovel_behaviour).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-export([
+ parse/2,
+ source_uri/1,
+ dest_uri/1,
+ source_protocol/1,
+ dest_protocol/1,
+ source_endpoint/1,
+ dest_endpoint/1,
+ connect_source/1,
+ init_source/1,
+ connect_dest/1,
+ init_dest/1,
+ handle_source/2,
+ handle_dest/2,
+ close_source/1,
+ close_dest/1,
+ ack/3,
+ nack/3,
+ forward/4
+ ]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+-import(rabbit_data_coercion, [to_binary/1]).
+
+-define(INFO(Text, Args), error_logger:info_msg(Text, Args)).
+-define(LINK_CREDIT_TIMEOUT, 5000).
+
+-type state() :: rabbit_shovel_behaviour:state().
+-type uri() :: rabbit_shovel_behaviour:uri().
+-type tag() :: rabbit_shovel_behaviour:tag().
+-type endpoint_config() :: rabbit_shovel_behaviour:source_config()
+ | rabbit_shovel_behaviour:dest_config().
+
+-spec parse(binary(), {source | destination, proplists:proplist()}) ->
+ endpoint_config().
+parse(_Name, {destination, Conf}) ->
+ Uris = pget(uris, Conf),
+ #{module => ?MODULE,
+ uris => Uris,
+ unacked => #{},
+ target_address => pget(target_address, Conf),
+ properties => maps:from_list(pget(properties, Conf, [])),
+ application_properties => maps:from_list(pget(application_properties, Conf, [])),
+ delivery_annotations => maps:from_list(pget(delivery_annotations, Conf, [])),
+ message_annotations => maps:from_list(pget(message_annotations, Conf, [])),
+ add_forward_headers => pget(add_forward_headers, Conf, false),
+ add_timestamp_header => pget(add_timestamp_header, Conf, false)
+ };
+parse(_Name, {source, Conf}) ->
+ Uris = pget(uris, Conf),
+ #{module => ?MODULE,
+ uris => Uris,
+ prefetch_count => pget(prefetch_count, Conf, 1000),
+ delete_after => pget(delete_after, Conf, never),
+ source_address => pget(source_address, Conf)}.
+
+-spec connect_source(state()) -> state().
+connect_source(State = #{name := Name,
+ ack_mode := AckMode,
+ source := #{uris := [Uri | _],
+ source_address := Addr} = Src}) ->
+ AttachFun = fun amqp10_client:attach_receiver_link/5,
+ {Conn, Sess, LinkRef} = connect(Name, AckMode, Uri, "receiver", Addr, Src,
+ AttachFun),
+ State#{source => Src#{current => #{conn => Conn,
+ session => Sess,
+ link => LinkRef,
+ uri => Uri}}}.
+
+-spec connect_dest(state()) -> state().
+connect_dest(State = #{name := Name,
+ ack_mode := AckMode,
+ dest := #{uris := [Uri | _],
+ target_address := Addr} = Dst}) ->
+ AttachFun = fun amqp10_client:attach_sender_link_sync/5,
+ {Conn, Sess, LinkRef} = connect(Name, AckMode, Uri, "sender", Addr, Dst,
+ AttachFun),
+ %% wait for link credit here as if there are messages waiting we may try
+ %% to forward before we've received credit
+ State#{dest => Dst#{current => #{conn => Conn,
+ session => Sess,
+ link_state => attached,
+ pending => [],
+ link => LinkRef,
+ uri => Uri}}}.
+
+connect(Name, AckMode, Uri, Postfix, Addr, Map, AttachFun) ->
+ {ok, Config} = amqp10_client:parse_uri(Uri),
+ {ok, Conn} = amqp10_client:open_connection(Config),
+ {ok, Sess} = amqp10_client:begin_session(Conn),
+ link(Conn),
+ LinkName = begin
+ LinkName0 = gen_unique_name(Name, Postfix),
+ rabbit_data_coercion:to_binary(LinkName0)
+ end,
+ % mixed settlement mode covers all the ack_modes
+ SettlementMode = case AckMode of
+ no_ack -> settled;
+ _ -> unsettled
+ end,
+ % needs to be sync, i.e. awaits the 'attach' event as
+ % else we may try to use the link before it is ready
+ Durability = maps:get(durability, Map, unsettled_state),
+ {ok, LinkRef} = AttachFun(Sess, LinkName, Addr,
+ SettlementMode,
+ Durability),
+ {Conn, Sess, LinkRef}.
+
+-spec init_source(state()) -> state().
+init_source(State = #{source := #{current := #{link := Link},
+ prefetch_count := Prefetch} = Src}) ->
+ {Credit, RenewAfter} = case Src of
+ #{delete_after := R} when is_integer(R) ->
+ {R, never};
+ #{prefetch_count := Pre} ->
+ {Pre, round(Prefetch/10)}
+ end,
+ ok = amqp10_client:flow_link_credit(Link, Credit, RenewAfter),
+ Remaining = case Src of
+ #{delete_after := never} -> unlimited;
+ #{delete_after := Rem} -> Rem;
+ _ -> unlimited
+ end,
+ State#{source => Src#{remaining => Remaining,
+ remaining_unacked => Remaining,
+ last_acked_tag => -1}}.
+
+-spec init_dest(state()) -> state().
+init_dest(#{name := Name,
+ shovel_type := Type,
+ dest := #{add_forward_headers := true} = Dst} = State) ->
+ Props = #{<<"shovelled-by">> => rabbit_nodes:cluster_name(),
+ <<"shovel-type">> => rabbit_data_coercion:to_binary(Type),
+ <<"shovel-name">> => rabbit_data_coercion:to_binary(Name)},
+ State#{dest => Dst#{cached_forward_headers => Props}};
+init_dest(State) ->
+ State.
+
+-spec source_uri(state()) -> uri().
+source_uri(#{source := #{current := #{uri := Uri}}}) -> Uri.
+
+-spec dest_uri(state()) -> uri().
+dest_uri(#{dest := #{current := #{uri := Uri}}}) -> Uri.
+
+source_protocol(_State) -> amqp10.
+dest_protocol(_State) -> amqp10.
+
+source_endpoint(#{shovel_type := static}) ->
+ [];
+source_endpoint(#{shovel_type := dynamic,
+ source := #{source_address := Addr}}) ->
+ [{src_address, Addr}].
+
+dest_endpoint(#{shovel_type := static}) ->
+ [];
+dest_endpoint(#{shovel_type := dynamic,
+ dest := #{target_address := Addr}}) ->
+ [{dest_address, Addr}].
+
+-spec handle_source(Msg :: any(), state()) -> not_handled | state().
+handle_source({amqp10_msg, _LinkRef, Msg}, State) ->
+ Tag = amqp10_msg:delivery_id(Msg),
+ Payload = amqp10_msg:body_bin(Msg),
+ rabbit_shovel_behaviour:forward(Tag, #{}, Payload, State);
+handle_source({amqp10_event, {connection, Conn, opened}},
+ State = #{source := #{current := #{conn := Conn}}}) ->
+ State;
+handle_source({amqp10_event, {connection, Conn, {closed, Why}}},
+ #{source := #{current := #{conn := Conn}},
+ name := Name}) ->
+ ?INFO("Shovel ~s source connection closed. Reason: ~p~n", [Name, Why]),
+ {stop, {inbound_conn_closed, Why}};
+handle_source({amqp10_event, {session, Sess, begun}},
+ State = #{source := #{current := #{session := Sess}}}) ->
+ State;
+handle_source({amqp10_event, {session, Sess, {ended, Why}}},
+ #{source := #{current := #{session := Sess}}}) ->
+ {stop, {inbound_session_ended, Why}};
+handle_source({amqp10_event, {link, Link, {detached, Why}}},
+ #{source := #{current := #{link := Link}}}) ->
+ {stop, {inbound_link_detached, Why}};
+handle_source({amqp10_event, {link, Link, _Evt}},
+ State= #{source := #{current := #{link := Link}}}) ->
+ State;
+handle_source({'EXIT', Conn, Reason},
+ #{source := #{current := #{conn := Conn}}}) ->
+ {stop, {outbound_conn_died, Reason}};
+handle_source(_Msg, _State) ->
+ not_handled.
+
+-spec handle_dest(Msg :: any(), state()) -> not_handled | state().
+handle_dest({amqp10_disposition, {Result, Tag}},
+ State0 = #{ack_mode := on_confirm,
+ dest := #{unacked := Unacked} = Dst,
+ name := Name}) ->
+ State1 = State0#{dest => Dst#{unacked => maps:remove(Tag, Unacked)}},
+ {Decr, State} =
+ case {Unacked, Result} of
+ {#{Tag := IncomingTag}, accepted} ->
+ {1, rabbit_shovel_behaviour:ack(IncomingTag, false, State1)};
+ {#{Tag := IncomingTag}, rejected} ->
+ {1, rabbit_shovel_behaviour:nack(IncomingTag, false, State1)};
+ _ -> % not found - this should ideally not happen
+ error_logger:warning_msg("Shovel ~s amqp10 destination "
+ "disposition tag not found: ~p~n",
+ [Name, Tag]),
+ {0, State1}
+ end,
+ rabbit_shovel_behaviour:decr_remaining(Decr, State);
+handle_dest({amqp10_event, {connection, Conn, opened}},
+ State = #{dest := #{current := #{conn := Conn}}}) ->
+ State;
+handle_dest({amqp10_event, {connection, Conn, {closed, Why}}},
+ #{name := Name,
+ dest := #{current := #{conn := Conn}}}) ->
+ ?INFO("Shovel ~s destination connection closed. Reason: ~p~n", [Name, Why]),
+ {stop, {outbound_conn_died, Why}};
+handle_dest({amqp10_event, {session, Sess, begun}},
+ State = #{dest := #{current := #{session := Sess}}}) ->
+ State;
+handle_dest({amqp10_event, {session, Sess, {ended, Why}}},
+ #{dest := #{current := #{session := Sess}}}) ->
+ {stop, {outbound_conn_died, Why}};
+handle_dest({amqp10_event, {link, Link, {detached, Why}}},
+ #{dest := #{current := #{link := Link}}}) ->
+ {stop, {outbound_link_detached, Why}};
+handle_dest({amqp10_event, {link, Link, credited}},
+ State0 = #{dest := #{current := #{link := Link},
+ pending := Pend} = Dst}) ->
+
+ %% we have credit so can begin to forward
+ State = State0#{dest => Dst#{link_state => credited,
+ pending => []}},
+ lists:foldl(fun ({A, B, C}, S) ->
+ forward(A, B, C, S)
+ end, State, lists:reverse(Pend));
+handle_dest({amqp10_event, {link, Link, _Evt}},
+ State= #{dest := #{current := #{link := Link}}}) ->
+ State;
+handle_dest({'EXIT', Conn, Reason},
+ #{dest := #{current := #{conn := Conn}}}) ->
+ {stop, {outbound_conn_died, Reason}};
+handle_dest(_Msg, _State) ->
+ not_handled.
+
+close_source(#{source := #{current := #{conn := Conn}}}) ->
+ _ = amqp10_client:close_connection(Conn),
+ ok;
+close_source(_Config) -> ok.
+
+close_dest(#{dest := #{current := #{conn := Conn}}}) ->
+ _ = amqp10_client:close_connection(Conn),
+ ok;
+close_dest(_Config) -> ok.
+
+-spec ack(Tag :: tag(), Multi :: boolean(), state()) -> state().
+ack(Tag, true, State = #{source := #{current := #{session := Session},
+ last_acked_tag := LastTag} = Src}) ->
+ First = LastTag + 1,
+ ok = amqp10_client_session:disposition(Session, receiver, First,
+ Tag, true, accepted),
+ State#{source => Src#{last_acked_tag => Tag}};
+ack(Tag, false, State = #{source := #{current :=
+ #{session := Session}} = Src}) ->
+ ok = amqp10_client_session:disposition(Session, receiver, Tag,
+ Tag, true, accepted),
+ State#{source => Src#{last_acked_tag => Tag}}.
+
+-spec nack(Tag :: tag(), Multi :: boolean(), state()) -> state().
+nack(Tag, false, State = #{source :=
+ #{current := #{session := Session}} = Src}) ->
+ % the tag is the same as the deliveryid
+ ok = amqp10_client_session:disposition(Session, receiver, Tag,
+ Tag, false, rejected),
+ State#{source => Src#{last_nacked_tag => Tag}};
+nack(Tag, true, State = #{source := #{current := #{session := Session},
+ last_nacked_tag := LastTag} = Src}) ->
+ First = LastTag + 1,
+ ok = amqp10_client_session:disposition(Session, receiver, First,
+ Tag, true, accepted),
+ State#{source => Src#{last_nacked_tag => Tag}}.
+
+-spec forward(Tag :: tag(), Props :: #{atom() => any()},
+ Payload :: binary(), state()) -> state().
+forward(_Tag, _Props, _Payload,
+ #{source := #{remaining_unacked := 0}} = State) ->
+ State;
+forward(Tag, Props, Payload,
+ #{dest := #{current := #{link_state := attached},
+ pending := Pend0} = Dst} = State) ->
+ %% simply cache the forward oo
+ Pend = [{Tag, Props, Payload} | Pend0],
+ State#{dest => Dst#{pending => {Pend}}};
+forward(Tag, Props, Payload,
+ #{dest := #{current := #{link := Link},
+ unacked := Unacked} = Dst,
+ ack_mode := AckMode} = State) ->
+ OutTag = rabbit_data_coercion:to_binary(Tag),
+ Msg0 = new_message(OutTag, Payload, State),
+ Msg = add_timestamp_header(
+ State, set_message_properties(
+ Props, add_forward_headers(State, Msg0))),
+ ok = amqp10_client:send_msg(Link, Msg),
+ rabbit_shovel_behaviour:decr_remaining_unacked(
+ case AckMode of
+ no_ack ->
+ rabbit_shovel_behaviour:decr_remaining(1, State);
+ on_confirm ->
+ State#{dest => Dst#{unacked => Unacked#{OutTag => Tag}}};
+ on_publish ->
+ State1 = rabbit_shovel_behaviour:ack(Tag, false, State),
+ rabbit_shovel_behaviour:decr_remaining(1, State1)
+ end).
+
+new_message(Tag, Payload, #{ack_mode := AckMode,
+ dest := #{properties := Props,
+ application_properties := AppProps,
+ message_annotations := MsgAnns}}) ->
+ Msg0 = amqp10_msg:new(Tag, Payload, AckMode =/= on_confirm),
+ Msg1 = amqp10_msg:set_properties(Props, Msg0),
+ Msg = amqp10_msg:set_message_annotations(MsgAnns, Msg1),
+ amqp10_msg:set_application_properties(AppProps, Msg).
+
+add_timestamp_header(#{dest := #{add_timestamp_header := true}}, Msg) ->
+ P =#{creation_time => os:system_time(milli_seconds)},
+ amqp10_msg:set_properties(P, Msg);
+add_timestamp_header(_, Msg) -> Msg.
+
+add_forward_headers(#{dest := #{cached_forward_headers := Props}}, Msg) ->
+ amqp10_msg:set_application_properties(Props, Msg);
+add_forward_headers(_, Msg) -> Msg.
+
+set_message_properties(Props, Msg) ->
+ %% this is effectively special handling properties from amqp 0.9.1
+ maps:fold(
+ fun(content_type, Ct, M) ->
+ amqp10_msg:set_properties(
+ #{content_type => to_binary(Ct)}, M);
+ (content_encoding, Ct, M) ->
+ amqp10_msg:set_properties(
+ #{content_encoding => to_binary(Ct)}, M);
+ (delivery_mode, 2, M) ->
+ amqp10_msg:set_headers(#{durable => true}, M);
+ (correlation_id, Ct, M) ->
+ amqp10_msg:set_properties(#{correlation_id => to_binary(Ct)}, M);
+ (reply_to, Ct, M) ->
+ amqp10_msg:set_properties(#{reply_to => to_binary(Ct)}, M);
+ (message_id, Ct, M) ->
+ amqp10_msg:set_properties(#{message_id => to_binary(Ct)}, M);
+ (timestamp, Ct, M) ->
+ amqp10_msg:set_properties(#{creation_time => Ct}, M);
+ (user_id, Ct, M) ->
+ amqp10_msg:set_properties(#{user_id => Ct}, M);
+ (headers, Headers0, M) when is_list(Headers0) ->
+ %% AMPQ 0.9.1 are added as applicatin properties
+ %% TODO: filter headers to make safe
+ Headers = lists:foldl(
+ fun ({K, _T, V}, Acc) ->
+ case is_amqp10_compat(V) of
+ true ->
+ Acc#{to_binary(K) => V};
+ false ->
+ Acc
+ end
+ end, #{}, Headers0),
+ amqp10_msg:set_application_properties(Headers, M);
+ (Key, Value, M) ->
+ case is_amqp10_compat(Value) of
+ true ->
+ amqp10_msg:set_application_properties(
+ #{to_binary(Key) => Value}, M);
+ false ->
+ M
+ end
+ end, Msg, Props).
+
+gen_unique_name(Pre0, Post0) ->
+ Pre = to_binary(Pre0),
+ Post = to_binary(Post0),
+ Id = bin_to_hex(crypto:strong_rand_bytes(8)),
+ <<Pre/binary, <<"_">>/binary, Id/binary, <<"_">>/binary, Post/binary>>.
+
+bin_to_hex(Bin) ->
+ <<<<if N >= 10 -> N -10 + $a;
+ true -> N + $0 end>>
+ || <<N:4>> <= Bin>>.
+
+is_amqp10_compat(T) ->
+ is_binary(T) orelse
+ is_number(T) orelse
+ %% TODO: not all lists are compatible
+ is_list(T) orelse
+ is_boolean(T).
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_shovel.erl
new file mode 100644
index 0000000000..190a9e2051
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel.erl
@@ -0,0 +1,21 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel).
+
+-export([start/0, stop/0, start/2, stop/1]).
+
+start() ->
+ _ = rabbit_shovel_sup:start_link(),
+ ok.
+
+stop() -> ok.
+
+start(normal, []) ->
+ rabbit_shovel_sup:start_link().
+
+stop(_State) -> ok.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl
new file mode 100644
index 0000000000..00cfe9b0ae
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_behaviour.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_behaviour).
+
+-export([
+ % dynamic calls
+ parse/3,
+ connect_dest/1,
+ connect_source/1,
+ init_dest/1,
+ init_source/1,
+ close_dest/1,
+ close_source/1,
+ handle_dest/2,
+ handle_source/2,
+ source_uri/1,
+ dest_uri/1,
+ source_protocol/1,
+ dest_protocol/1,
+ source_endpoint/1,
+ dest_endpoint/1,
+ forward/4,
+ ack/3,
+ nack/3,
+ % common functions
+ decr_remaining_unacked/1,
+ decr_remaining/2
+ ]).
+
+-type tag() :: non_neg_integer().
+-type uri() :: string() | binary().
+-type ack_mode() :: 'no_ack' | 'on_confirm' | 'on_publish'.
+-type source_config() :: #{module => atom(),
+ uris => [uri()],
+ atom() => term()
+ }.
+-type dest_config() :: #{module => atom(),
+ uris => [uri()],
+ atom() => term()
+ }.
+-type state() :: #{source => source_config(),
+ dest => dest_config(),
+ ack_mode => ack_mode(),
+ atom() => term()}.
+
+-export_type([state/0, source_config/0, dest_config/0, uri/0]).
+
+-callback parse(binary(), {source | destination, Conf :: proplists:proplist()}) ->
+ source_config() | dest_config().
+
+-callback connect_source(state()) -> state().
+-callback connect_dest(state()) -> state().
+
+-callback init_source(state()) -> state().
+-callback init_dest(state()) -> state().
+
+-callback source_uri(state()) -> uri().
+-callback dest_uri(state()) -> uri().
+
+-callback source_protocol(state()) -> atom().
+-callback dest_protocol(state()) -> atom().
+
+-callback source_endpoint(state()) -> proplists:proplist().
+-callback dest_endpoint(state()) -> proplists:proplist().
+
+-callback close_dest(state()) -> ok.
+-callback close_source(state()) -> ok.
+
+-callback handle_source(Msg :: any(), state()) ->
+ not_handled | state() | {stop, any()}.
+-callback handle_dest(Msg :: any(), state()) ->
+ not_handled | state() | {stop, any()}.
+
+-callback ack(Tag :: tag(), Multi :: boolean(), state()) -> state().
+-callback nack(Tag :: tag(), Multi :: boolean(), state()) -> state().
+-callback forward(Tag :: tag(), Props :: #{atom() => any()},
+ Payload :: binary(), state()) -> state().
+
+
+-spec parse(atom(), binary(), {source | destination, proplists:proplist()}) ->
+ source_config() | dest_config().
+parse(Mod, Name, Conf) ->
+ Mod:parse(Name, Conf).
+
+-spec connect_source(state()) -> state().
+connect_source(State = #{source := #{module := Mod}}) ->
+ Mod:connect_source(State).
+
+-spec connect_dest(state()) -> state().
+connect_dest(State = #{dest := #{module := Mod}}) ->
+ Mod:connect_dest(State).
+
+-spec init_source(state()) -> state().
+init_source(State = #{source := #{module := Mod}}) ->
+ Mod:init_source(State).
+
+-spec init_dest(state()) -> state().
+init_dest(State = #{dest := #{module := Mod}}) ->
+ Mod:init_dest(State).
+
+-spec close_source(state()) -> ok.
+close_source(State = #{source := #{module := Mod}}) ->
+ Mod:close_source(State).
+
+-spec close_dest(state()) -> ok.
+close_dest(State = #{dest := #{module := Mod}}) ->
+ Mod:close_dest(State).
+
+-spec handle_source(any(), state()) ->
+ not_handled | state() | {stop, any()}.
+handle_source(Msg, State = #{source := #{module := Mod}}) ->
+ Mod:handle_source(Msg, State).
+
+-spec handle_dest(any(), state()) ->
+ not_handled | state() | {stop, any()}.
+handle_dest(Msg, State = #{dest := #{module := Mod}}) ->
+ Mod:handle_dest(Msg, State).
+
+source_uri(#{source := #{module := Mod}} = State) ->
+ Mod:source_uri(State).
+
+dest_uri(#{dest := #{module := Mod}} = State) ->
+ Mod:dest_uri(State).
+
+source_protocol(#{source := #{module := Mod}} = State) ->
+ Mod:source_protocol(State).
+
+dest_protocol(#{dest := #{module := Mod}} = State) ->
+ Mod:dest_protocol(State).
+
+source_endpoint(#{source := #{module := Mod}} = State) ->
+ Mod:source_endpoint(State).
+
+dest_endpoint(#{dest := #{module := Mod}} = State) ->
+ Mod:dest_endpoint(State).
+
+-spec forward(tag(), #{atom() => any()}, binary(), state()) -> state().
+forward(Tag, Props, Payload, #{dest := #{module := Mod}} = State) ->
+ Mod:forward(Tag, Props, Payload, State).
+
+-spec ack(tag(), boolean(), state()) -> state().
+ack(Tag, Multi, #{source := #{module := Mod}} = State) ->
+ Mod:ack(Tag, Multi, State).
+
+-spec nack(tag(), boolean(), state()) -> state().
+nack(Tag, Multi, #{source := #{module := Mod}} = State) ->
+ Mod:nack(Tag, Multi, State).
+
+%% Common functions
+decr_remaining_unacked(State = #{source := #{remaining_unacked := unlimited}}) ->
+ State;
+decr_remaining_unacked(State = #{source := #{remaining_unacked := 0}}) ->
+ State;
+decr_remaining_unacked(State = #{source := #{remaining_unacked := N} = Src}) ->
+ State#{source => Src#{remaining_unacked => N - 1}}.
+
+decr_remaining(_N, State = #{source := #{remaining := unlimited}}) ->
+ State;
+decr_remaining(N, State = #{source := #{remaining := M} = Src,
+ name := Name}) ->
+ case M > N of
+ true -> State#{source => Src#{remaining => M - N}};
+ false ->
+ error_logger:info_msg("shutting down shovel ~s, none remaining ~p~n",
+ [Name, State]),
+ exit({shutdown, autodelete})
+ end.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl
new file mode 100644
index 0000000000..06678177d7
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_config.erl
@@ -0,0 +1,176 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_config).
+
+-export([parse/2,
+ ensure_defaults/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+resolve_module(amqp091) -> rabbit_amqp091_shovel;
+resolve_module(amqp10) -> rabbit_amqp10_shovel.
+
+is_legacy(Config) ->
+ not proplists:is_defined(source, Config).
+
+get_brokers(Props) ->
+ case proplists:get_value(brokers, Props) of
+ undefined ->
+ [get_value(broker, Props)];
+ Brokers ->
+ Brokers
+ end.
+
+convert_from_legacy(Config) ->
+ S = get_value(sources, Config),
+ validate(S),
+ SUris = get_brokers(S),
+ validate_uris(brokers, SUris),
+ D = get_value(destinations, Config),
+ validate(D),
+ DUris = get_brokers(D),
+ validate_uris(brokers, DUris),
+ Q = get_value(queue, Config),
+ DA = proplists:get_value(delete_after, Config, never),
+ Pref = proplists:get_value(prefetch_count, Config, ?DEFAULT_PREFETCH),
+ RD = proplists:get_value(reconnect_delay, Config, ?DEFAULT_RECONNECT_DELAY),
+ AckMode = proplists:get_value(ack_mode, Config, ?DEFAULT_ACK_MODE),
+ validate_ack_mode(AckMode),
+ PubFields = proplists:get_value(publish_fields, Config, []),
+ PubProps = proplists:get_value(publish_properties, Config, []),
+ AFH = proplists:get_value(add_forward_headers, Config, false),
+ ATH = proplists:get_value(add_timestamp_header, Config, false),
+ SourceDecls = proplists:get_value(declarations, S, []),
+ validate_list(SourceDecls),
+ DestDecls = proplists:get_value(declarations, D, []),
+ validate_list(DestDecls),
+ [{source, [{protocol, amqp091},
+ {uris, SUris},
+ {declarations, SourceDecls},
+ {queue, Q},
+ {delete_after, DA},
+ {prefetch_count, Pref}]},
+ {destination, [{protocol, amqp091},
+ {uris, DUris},
+ {declarations, DestDecls},
+ {publish_properties, PubProps},
+ {publish_fields, PubFields},
+ {add_forward_headers, AFH},
+ {add_timestamp_header, ATH}]},
+ {ack_mode, AckMode},
+ {reconnect_delay, RD}].
+
+parse(ShovelName, Config0) ->
+ try
+ validate(Config0),
+ case is_legacy(Config0) of
+ true ->
+ Config = convert_from_legacy(Config0),
+ parse_current(ShovelName, Config);
+ false ->
+ parse_current(ShovelName, Config0)
+ end
+ catch throw:{error, Reason} ->
+ {error, {invalid_shovel_configuration, ShovelName, Reason}};
+ throw:Reason ->
+ {error, {invalid_shovel_configuration, ShovelName, Reason}}
+ end.
+
+validate(Props) ->
+ validate_proplist(Props),
+ validate_duplicates(Props).
+
+validate_proplist(Props) when is_list (Props) ->
+ case lists:filter(fun ({_, _}) -> false;
+ (_) -> true
+ end, Props) of
+ [] -> ok;
+ Invalid ->
+ throw({invalid_parameters, Invalid})
+ end;
+validate_proplist(X) ->
+ throw({require_list, X}).
+
+validate_duplicates(Props) ->
+ case duplicate_keys(Props) of
+ [] -> ok;
+ Invalid ->
+ throw({duplicate_parameters, Invalid})
+ end.
+
+validate_list(L) when is_list(L) -> ok;
+validate_list(L) ->
+ throw({require_list, L}).
+
+validate_uris(Key, L) when not is_list(L) ->
+ throw({require_list, Key, L});
+validate_uris(Key, []) ->
+ throw({expected_non_empty_list, Key});
+validate_uris(_Key, L) ->
+ validate_uris0(L).
+
+validate_uris0([Uri | Uris]) ->
+ case amqp_uri:parse(Uri) of
+ {ok, _Params} ->
+ validate_uris0(Uris);
+ {error, _} = Err ->
+ throw(Err)
+ end;
+validate_uris0([]) -> ok.
+
+parse_current(ShovelName, Config) ->
+ {source, Source} = proplists:lookup(source, Config),
+ validate(Source),
+ SrcMod = resolve_module(proplists:get_value(protocol, Source, amqp091)),
+ {destination, Destination} = proplists:lookup(destination, Config),
+ validate(Destination),
+ DstMod = resolve_module(proplists:get_value(protocol, Destination, amqp091)),
+ AckMode = proplists:get_value(ack_mode, Config, no_ack),
+ validate_ack_mode(AckMode),
+ {ok, #{name => ShovelName,
+ shovel_type => static,
+ ack_mode => AckMode,
+ reconnect_delay => proplists:get_value(reconnect_delay, Config,
+ ?DEFAULT_RECONNECT_DELAY),
+ source => rabbit_shovel_behaviour:parse(SrcMod, ShovelName,
+ {source, Source}),
+ dest => rabbit_shovel_behaviour:parse(DstMod, ShovelName,
+ {destination, Destination})}}.
+
+%% ensures that any defaults that have been applied to a parsed
+%% shovel, are written back to the original proplist
+ensure_defaults(ShovelConfig, ParsedShovel) ->
+ lists:keystore(reconnect_delay, 1,
+ ShovelConfig,
+ {reconnect_delay,
+ ParsedShovel#shovel.reconnect_delay}).
+
+-spec fail(term()) -> no_return().
+fail(Reason) -> throw({error, Reason}).
+
+validate_ack_mode(Val) when Val =:= no_ack orelse
+ Val =:= on_publish orelse
+ Val =:= on_confirm ->
+ ok;
+validate_ack_mode(WrongVal) ->
+ fail({invalid_parameter_value, ack_mode,
+ {ack_mode_value_requires_one_of, {no_ack, on_publish, on_confirm},
+ WrongVal}}).
+
+duplicate_keys(PropList) when is_list(PropList) ->
+ proplists:get_keys(
+ lists:foldl(fun (K, L) -> lists:keydelete(K, 1, L) end, PropList,
+ proplists:get_keys(PropList))).
+
+get_value(Key, Props) ->
+ case proplists:get_value(Key, Props) of
+ undefined ->
+ throw({missing_parameter, Key});
+ V -> V
+ end.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl
new file mode 100644
index 0000000000..00f97619d0
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_dyn_worker_sup).
+-behaviour(supervisor2).
+
+-export([start_link/2, init/1]).
+
+-import(rabbit_misc, [pget/3]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_shovel.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+start_link(Name, Config) ->
+ supervisor2:start_link(?MODULE, [Name, Config]).
+
+%%----------------------------------------------------------------------------
+
+init([Name, Config0]) ->
+ Config = rabbit_data_coercion:to_proplist(Config0),
+ Delay = pget(<<"reconnect-delay">>, Config, ?DEFAULT_RECONNECT_DELAY),
+ case Name of
+ {VHost, ShovelName} -> rabbit_log:debug("Shovel '~s' in virtual host '~s' will use reconnection delay of ~p", [ShovelName, VHost, Delay]);
+ ShovelName -> rabbit_log:debug("Shovel '~s' will use reconnection delay of ~s", [ShovelName, Delay])
+ end,
+ Restart = case Delay of
+ N when is_integer(N) andalso N > 0 ->
+ case pget(<<"src-delete-after">>, Config, pget(<<"delete-after">>, Config, <<"never">>)) of
+ %% always try to reconnect
+ <<"never">> -> {permanent, N};
+ %% this Shovel is an autodelete one
+ M when is_integer(M) andalso M > 0 -> {transient, N};
+ <<"queue-length">> -> {transient, N}
+ end;
+ %% reconnect-delay = 0 means "do not reconnect"
+ _ -> temporary
+ end,
+
+ {ok, {{one_for_one, 1, ?MAX_WAIT},
+ [{Name,
+ {rabbit_shovel_worker, start_link, [dynamic, Name, Config]},
+ Restart,
+ 16#ffffffff, worker, [rabbit_shovel_worker]}]}}.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl
new file mode 100644
index 0000000000..347b3d9d47
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl
@@ -0,0 +1,71 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_dyn_worker_sup_sup).
+-behaviour(mirrored_supervisor).
+
+-export([start_link/0, init/1, adjust/2, stop_child/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include("rabbit_shovel.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-define(SUPERVISOR, ?MODULE).
+
+start_link() ->
+ Pid = case mirrored_supervisor:start_link(
+ {local, ?SUPERVISOR}, ?SUPERVISOR,
+ fun rabbit_misc:execute_mnesia_transaction/1, ?MODULE, []) of
+ {ok, Pid0} -> Pid0;
+ {error, {already_started, Pid0}} -> Pid0
+ end,
+ Shovels = rabbit_runtime_parameters:list_component(<<"shovel">>),
+ [start_child({pget(vhost, Shovel), pget(name, Shovel)},
+ pget(value, Shovel)) || Shovel <- Shovels],
+ {ok, Pid}.
+
+adjust(Name, Def) ->
+ case child_exists(Name) of
+ true -> stop_child(Name);
+ false -> ok
+ end,
+ start_child(Name, Def).
+
+start_child(Name, Def) ->
+ case mirrored_supervisor:start_child(
+ ?SUPERVISOR,
+ {Name, {rabbit_shovel_dyn_worker_sup, start_link, [Name, Def]},
+ transient, ?WORKER_WAIT, worker, [rabbit_shovel_dyn_worker_sup]}) of
+ {ok, _Pid} -> ok;
+ {error, {already_started, _Pid}} -> ok
+ end.
+
+child_exists(Name) ->
+ lists:any(fun ({N, _, _, _}) -> N =:= Name end,
+ mirrored_supervisor:which_children(?SUPERVISOR)).
+
+stop_child(Name) ->
+ case get(shovel_worker_autodelete) of
+ true -> ok; %% [1]
+ _ ->
+ ok = mirrored_supervisor:terminate_child(?SUPERVISOR, Name),
+ ok = mirrored_supervisor:delete_child(?SUPERVISOR, Name),
+ rabbit_shovel_status:remove(Name)
+ end.
+
+%% [1] An autodeleting worker removes its own parameter, and thus ends
+%% up here via the parameter callback. It is a transient worker that
+%% is just about to terminate normally - so we don't need to tell the
+%% supervisor to stop us - and as usual if we call into our own
+%% supervisor we risk deadlock.
+%%
+%% See rabbit_shovel_worker:maybe_autodelete/1
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_one, 3, 10}, []}}.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl
new file mode 100644
index 0000000000..df390c2b92
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_parameters.erl
@@ -0,0 +1,465 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_parameters).
+-behaviour(rabbit_runtime_parameter).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-export([validate/5, notify/5, notify_clear/4]).
+-export([register/0, unregister/0, parse/3]).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "shovel parameters"},
+ {mfa, {rabbit_shovel_parameters, register, []}},
+ {cleanup, {?MODULE, unregister, []}},
+ {requires, rabbit_registry},
+ {enables, recovery}]}).
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"shovel">>, ?MODULE).
+
+unregister() ->
+ rabbit_registry:unregister(runtime_parameter, <<"shovel">>).
+
+validate(_VHost, <<"shovel">>, Name, Def0, User) ->
+ Def = rabbit_data_coercion:to_proplist(Def0),
+ Validations =
+ shovel_validation()
+ ++ src_validation(Def, User)
+ ++ dest_validation(Def, User),
+ validate_src(Def)
+ ++ validate_dest(Def)
+ ++ rabbit_parameter_validation:proplist(Name, Validations, Def);
+
+validate(_VHost, _Component, Name, _Term, _User) ->
+ {error, "name not recognised: ~p", [Name]}.
+
+pget2(K1, K2, Defs) -> case {pget(K1, Defs), pget(K2, Defs)} of
+ {undefined, undefined} -> zero;
+ {undefined, _} -> one;
+ {_, undefined} -> one;
+ {_, _} -> both
+ end.
+
+notify(VHost, <<"shovel">>, Name, Definition, _Username) ->
+ rabbit_shovel_dyn_worker_sup_sup:adjust({VHost, Name}, Definition).
+
+notify_clear(VHost, <<"shovel">>, Name, _Username) ->
+ rabbit_shovel_dyn_worker_sup_sup:stop_child({VHost, Name}).
+
+%%----------------------------------------------------------------------------
+
+validate_src(Def) ->
+ case protocols(Def) of
+ {amqp091, _} -> validate_amqp091_src(Def);
+ {amqp10, _} -> []
+ end.
+
+validate_dest(Def) ->
+ case protocols(Def) of
+ {_, amqp091} -> validate_amqp091_dest(Def);
+ {_, amqp10} -> []
+ end.
+
+validate_amqp091_src(Def) ->
+ [case pget2(<<"src-exchange">>, <<"src-queue">>, Def) of
+ zero -> {error, "Must specify 'src-exchange' or 'src-queue'", []};
+ one -> ok;
+ both -> {error, "Cannot specify 'src-exchange' and 'src-queue'", []}
+ end,
+ case {pget(<<"src-delete-after">>, Def, pget(<<"delete-after">>, Def)), pget(<<"ack-mode">>, Def)} of
+ {N, <<"no-ack">>} when is_integer(N) ->
+ {error, "Cannot specify 'no-ack' and numerical 'delete-after'", []};
+ _ ->
+ ok
+ end].
+
+validate_amqp091_dest(Def) ->
+ [case pget2(<<"dest-exchange">>, <<"dest-queue">>, Def) of
+ zero -> ok;
+ one -> ok;
+ both -> {error, "Cannot specify 'dest-exchange' and 'dest-queue'", []}
+ end].
+
+shovel_validation() ->
+ [{<<"reconnect-delay">>, fun rabbit_parameter_validation:number/2,optional},
+ {<<"ack-mode">>, rabbit_parameter_validation:enum(
+ ['no-ack', 'on-publish', 'on-confirm']), optional},
+ {<<"src-protocol">>,
+ rabbit_parameter_validation:enum(['amqp10', 'amqp091']), optional},
+ {<<"dest-protocol">>,
+ rabbit_parameter_validation:enum(['amqp10', 'amqp091']), optional}
+ ].
+
+src_validation(Def, User) ->
+ case protocols(Def) of
+ {amqp091, _} -> amqp091_src_validation(Def, User);
+ {amqp10, _} -> amqp10_src_validation(Def, User)
+ end.
+
+
+amqp10_src_validation(_Def, User) ->
+ [
+ {<<"src-uri">>, validate_uri_fun(User), mandatory},
+ {<<"src-address">>, fun rabbit_parameter_validation:binary/2, mandatory},
+ {<<"src-prefetch-count">>, fun rabbit_parameter_validation:number/2, optional},
+ {<<"src-delete-after">>, fun validate_delete_after/2, optional}
+ ].
+
+amqp091_src_validation(_Def, User) ->
+ [
+ {<<"src-uri">>, validate_uri_fun(User), mandatory},
+ {<<"src-exchange">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"src-exchange-key">>,fun rabbit_parameter_validation:binary/2,optional},
+ {<<"src-queue">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"prefetch-count">>, fun rabbit_parameter_validation:number/2,optional},
+ {<<"src-prefetch-count">>, fun rabbit_parameter_validation:number/2,optional},
+ %% a deprecated pre-3.7 setting
+ {<<"delete-after">>, fun validate_delete_after/2, optional},
+ %% currently used multi-protocol friend name, introduced in 3.7
+ {<<"src-delete-after">>, fun validate_delete_after/2, optional}
+ ].
+
+dest_validation(Def0, User) ->
+ Def = rabbit_data_coercion:to_proplist(Def0),
+ case protocols(Def) of
+ {_, amqp091} -> amqp091_dest_validation(Def, User);
+ {_, amqp10} -> amqp10_dest_validation(Def, User)
+ end.
+
+amqp10_dest_validation(_Def, User) ->
+ [{<<"dest-uri">>, validate_uri_fun(User), mandatory},
+ {<<"dest-address">>, fun rabbit_parameter_validation:binary/2, mandatory},
+ {<<"dest-add-forward-headers">>, fun rabbit_parameter_validation:boolean/2, optional},
+ {<<"dest-add-timestamp-header">>, fun rabbit_parameter_validation:boolean/2, optional},
+ {<<"dest-application-properties">>, fun validate_amqp10_map/2, optional},
+ {<<"dest-message-annotations">>, fun validate_amqp10_map/2, optional},
+ % TODO: restrict to allowed fields
+ {<<"dest-properties">>, fun validate_amqp10_map/2, optional}
+ ].
+
+amqp091_dest_validation(_Def, User) ->
+ [{<<"dest-uri">>, validate_uri_fun(User), mandatory},
+ {<<"dest-exchange">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"dest-exchange-key">>,fun rabbit_parameter_validation:binary/2,optional},
+ {<<"dest-queue">>, fun rabbit_parameter_validation:binary/2,optional},
+ {<<"add-forward-headers">>, fun rabbit_parameter_validation:boolean/2,optional},
+ {<<"add-timestamp-header">>, fun rabbit_parameter_validation:boolean/2,optional},
+ {<<"dest-add-forward-headers">>, fun rabbit_parameter_validation:boolean/2,optional},
+ {<<"dest-add-timestamp-header">>, fun rabbit_parameter_validation:boolean/2,optional},
+ {<<"publish-properties">>, fun validate_properties/2, optional},
+ {<<"dest-publish-properties">>, fun validate_properties/2, optional}
+ ].
+
+validate_uri_fun(User) ->
+ fun (Name, Term) -> validate_uri(Name, Term, User) end.
+
+validate_uri(Name, Term, User) when is_binary(Term) ->
+ case rabbit_parameter_validation:binary(Name, Term) of
+ ok -> case amqp_uri:parse(binary_to_list(Term)) of
+ {ok, P} -> validate_params_user(P, User);
+ {error, E} -> {error, "\"~s\" not a valid URI: ~p", [Term, E]}
+ end;
+ E -> E
+ end;
+validate_uri(Name, Term, User) ->
+ case rabbit_parameter_validation:list(Name, Term) of
+ ok -> case [V || URI <- Term,
+ V <- [validate_uri(Name, URI, User)],
+ element(1, V) =:= error] of
+ [] -> ok;
+ [E | _] -> E
+ end;
+ E -> E
+ end.
+
+validate_params_user(#amqp_params_direct{}, none) ->
+ ok;
+validate_params_user(#amqp_params_direct{virtual_host = VHost},
+ User = #user{username = Username}) ->
+ VHostAccess = case catch rabbit_access_control:check_vhost_access(User, VHost, undefined, #{}) of
+ ok -> ok;
+ NotOK ->
+ rabbit_log:debug("rabbit_access_control:check_vhost_access result: ~p", [NotOK]),
+ NotOK
+ end,
+ case rabbit_vhost:exists(VHost) andalso VHostAccess of
+ ok -> ok;
+ _ ->
+ {error, "user \"~s\" may not connect to vhost \"~s\"", [Username, VHost]}
+ end;
+validate_params_user(#amqp_params_network{}, _User) ->
+ ok.
+
+validate_delete_after(_Name, <<"never">>) -> ok;
+validate_delete_after(_Name, <<"queue-length">>) -> ok;
+validate_delete_after(_Name, N) when is_integer(N) -> ok;
+validate_delete_after(Name, Term) ->
+ {error, "~s should be number, \"never\" or \"queue-length\", actually was "
+ "~p", [Name, Term]}.
+
+validate_amqp10_map(Name, Terms0) ->
+ Terms = rabbit_data_coercion:to_proplist(Terms0),
+ Str = fun rabbit_parameter_validation:binary/2,
+ Validation = [{K, Str, optional} || {K, _} <- Terms],
+ rabbit_parameter_validation:proplist(Name, Validation, Terms).
+
+%% TODO headers?
+validate_properties(Name, Term0) ->
+ Term = case Term0 of
+ T when is_map(T) ->
+ rabbit_data_coercion:to_proplist(Term0);
+ T when is_list(T) ->
+ rabbit_data_coercion:to_proplist(Term0);
+ Other -> Other
+ end,
+ Str = fun rabbit_parameter_validation:binary/2,
+ Num = fun rabbit_parameter_validation:number/2,
+ rabbit_parameter_validation:proplist(
+ Name, [{<<"content_type">>, Str, optional},
+ {<<"content_encoding">>, Str, optional},
+ {<<"delivery_mode">>, Num, optional},
+ {<<"priority">>, Num, optional},
+ {<<"correlation_id">>, Str, optional},
+ {<<"reply_to">>, Str, optional},
+ {<<"expiration">>, Str, optional},
+ {<<"message_id">>, Str, optional},
+ {<<"timestamp">>, Num, optional},
+ {<<"type">>, Str, optional},
+ {<<"user_id">>, Str, optional},
+ {<<"app_id">>, Str, optional},
+ {<<"cluster_id">>, Str, optional}], Term).
+
+%%----------------------------------------------------------------------------
+
+parse({VHost, Name}, ClusterName, Def) ->
+ {Source, SourceHeaders} = parse_source(Def),
+ {ok, #{name => Name,
+ shovel_type => dynamic,
+ source => Source,
+ dest => parse_dest({VHost, Name}, ClusterName, Def,
+ SourceHeaders),
+ ack_mode => translate_ack_mode(pget(<<"ack-mode">>, Def, <<"on-confirm">>)),
+ reconnect_delay => pget(<<"reconnect-delay">>, Def,
+ ?DEFAULT_RECONNECT_DELAY)}}.
+
+parse_source(Def) ->
+ case protocols(Def) of
+ {amqp10, _} -> parse_amqp10_source(Def);
+ {amqp091, _} -> parse_amqp091_source(Def)
+ end.
+
+parse_dest(VHostName, ClusterName, Def, SourceHeaders) ->
+ case protocols(Def) of
+ {_, amqp10} ->
+ parse_amqp10_dest(VHostName, ClusterName, Def, SourceHeaders);
+ {_, amqp091} ->
+ parse_amqp091_dest(VHostName, ClusterName, Def, SourceHeaders)
+ end.
+
+parse_amqp10_dest({_VHost, _Name}, _ClusterName, Def, SourceHeaders) ->
+ Uris = get_uris(<<"dest-uri">>, Def),
+ Address = pget(<<"dest-address">>, Def),
+ Properties =
+ rabbit_data_coercion:to_proplist(
+ pget(<<"dest-properties">>, Def, [])),
+ AppProperties =
+ rabbit_data_coercion:to_proplist(
+ pget(<<"dest-application-properties">>, Def, [])),
+ MessageAnns =
+ rabbit_data_coercion:to_proplist(
+ pget(<<"dest-message-annotations">>, Def, [])),
+ #{module => rabbit_amqp10_shovel,
+ uris => Uris,
+ target_address => Address,
+ message_annotations => maps:from_list(MessageAnns),
+ application_properties => maps:from_list(AppProperties ++ SourceHeaders),
+ properties => maps:from_list(
+ lists:map(fun({K, V}) ->
+ {rabbit_data_coercion:to_atom(K), V}
+ end, Properties)),
+ add_timestamp_header => pget(<<"dest-add-timestamp-header">>, Def, false),
+ add_forward_headers => pget(<<"dest-add-forward-headers">>, Def, false),
+ unacked => #{}
+ }.
+
+parse_amqp091_dest({VHost, Name}, ClusterName, Def, SourceHeaders) ->
+ DestURIs = get_uris(<<"dest-uri">>, Def),
+ DestX = pget(<<"dest-exchange">>, Def, none),
+ DestXKey = pget(<<"dest-exchange-key">>, Def, none),
+ DestQ = pget(<<"dest-queue">>, Def, none),
+ DestDeclFun = fun (Conn, _Ch) ->
+ case DestQ of
+ none -> ok;
+ _ -> ensure_queue(Conn, DestQ)
+ end
+ end,
+ {X, Key} = case DestQ of
+ none -> {DestX, DestXKey};
+ _ -> {<<>>, DestQ}
+ end,
+ Table2 = [{K, V} || {K, V} <- [{<<"dest-exchange">>, DestX},
+ {<<"dest-exchange-key">>, DestXKey},
+ {<<"dest-queue">>, DestQ}],
+ V =/= none],
+ PubFun = fun (_SrcURI, _DestURI, P0) ->
+ P1 = case X of
+ none -> P0;
+ _ -> P0#'basic.publish'{exchange = X}
+ end,
+ case Key of
+ none -> P1;
+ _ -> P1#'basic.publish'{routing_key = Key}
+ end
+ end,
+ AddHeadersLegacy = pget(<<"add-forward-headers">>, Def, false),
+ AddHeaders = pget(<<"dest-add-forward-headers">>, Def, AddHeadersLegacy),
+ Table0 = [{<<"shovelled-by">>, ClusterName},
+ {<<"shovel-type">>, <<"dynamic">>},
+ {<<"shovel-name">>, Name},
+ {<<"shovel-vhost">>, VHost}],
+ SetProps = lookup_indices(pget(<<"dest-publish-properties">>, Def,
+ pget(<<"publish-properties">>, Def, [])),
+ record_info(fields, 'P_basic')),
+ AddTimestampHeaderLegacy = pget(<<"add-timestamp-header">>, Def, false),
+ AddTimestampHeader = pget(<<"dest-add-timestamp-header">>, Def,
+ AddTimestampHeaderLegacy),
+ PubPropsFun = fun (SrcURI, DestURI, P0) ->
+ P = set_properties(P0, SetProps),
+ P1 = case AddHeaders of
+ true -> rabbit_shovel_util:update_headers(
+ Table0, SourceHeaders ++ Table2,
+ SrcURI, DestURI, P);
+ false -> P
+ end,
+ case AddTimestampHeader of
+ true -> rabbit_shovel_util:add_timestamp_header(P1);
+ false -> P1
+ end
+ end,
+ %% Details are only used for status report in rabbitmqctl, as vhost is not
+ %% available to query the runtime parameters.
+ Details = maps:from_list([{K, V} || {K, V} <- [{dest_exchange, DestX},
+ {dest_exchange_key, DestXKey},
+ {dest_queue, DestQ}],
+ V =/= none]),
+ maps:merge(#{module => rabbit_amqp091_shovel,
+ uris => DestURIs,
+ resource_decl => DestDeclFun,
+ fields_fun => PubFun,
+ props_fun => PubPropsFun
+ }, Details).
+
+parse_amqp10_source(Def) ->
+ Uris = get_uris(<<"src-uri">>, Def),
+ Address = pget(<<"src-address">>, Def),
+ DeleteAfter = pget(<<"src-delete-after">>, Def, <<"never">>),
+ PrefetchCount = pget(<<"src-prefetch-count">>, Def, 1000),
+ Headers = [],
+ {#{module => rabbit_amqp10_shovel,
+ uris => Uris,
+ source_address => Address,
+ delete_after => opt_b2a(DeleteAfter),
+ prefetch_count => PrefetchCount}, Headers}.
+
+parse_amqp091_source(Def) ->
+ SrcURIs = get_uris(<<"src-uri">>, Def),
+ SrcX = pget(<<"src-exchange">>,Def, none),
+ SrcXKey = pget(<<"src-exchange-key">>, Def, <<>>), %% [1]
+ SrcQ = pget(<<"src-queue">>, Def, none),
+ {SrcDeclFun, Queue, DestHeaders} =
+ case SrcQ of
+ none -> {fun (_Conn, Ch) ->
+ Ms = [#'queue.declare'{exclusive = true},
+ #'queue.bind'{routing_key = SrcXKey,
+ exchange = SrcX}],
+ [amqp_channel:call(Ch, M) || M <- Ms]
+ end, <<>>, [{<<"src-exchange">>, SrcX},
+ {<<"src-exchange-key">>, SrcXKey}]};
+ _ -> {fun (Conn, _Ch) ->
+ ensure_queue(Conn, SrcQ)
+ end, SrcQ, [{<<"src-queue">>, SrcQ}]}
+ end,
+ DeleteAfter = pget(<<"src-delete-after">>, Def,
+ pget(<<"delete-after">>, Def, <<"never">>)),
+ PrefetchCount = pget(<<"src-prefetch-count">>, Def,
+ pget(<<"prefetch-count">>, Def, 1000)),
+ %% Details are only used for status report in rabbitmqctl, as vhost is not
+ %% available to query the runtime parameters.
+ Details = maps:from_list([{K, V} || {K, V} <- [{source_exchange, SrcX},
+ {source_exchange_key, SrcXKey}],
+ V =/= none]),
+ {maps:merge(#{module => rabbit_amqp091_shovel,
+ uris => SrcURIs,
+ resource_decl => SrcDeclFun,
+ queue => Queue,
+ delete_after => opt_b2a(DeleteAfter),
+ prefetch_count => PrefetchCount
+ }, Details), DestHeaders}.
+
+get_uris(Key, Def) ->
+ URIs = case pget(Key, Def) of
+ B when is_binary(B) -> [B];
+ L when is_list(L) -> L
+ end,
+ [binary_to_list(URI) || URI <- URIs].
+
+translate_ack_mode(<<"on-confirm">>) -> on_confirm;
+translate_ack_mode(<<"on-publish">>) -> on_publish;
+translate_ack_mode(<<"no-ack">>) -> no_ack.
+
+ensure_queue(Conn, Queue) ->
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ try
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ passive = true})
+ catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Text}}, _} ->
+ {ok, Ch2} = amqp_connection:open_channel(Conn),
+ amqp_channel:call(Ch2, #'queue.declare'{queue = Queue,
+ durable = true}),
+ catch amqp_channel:close(Ch2)
+
+ after
+ catch amqp_channel:close(Ch)
+ end.
+
+opt_b2a(B) when is_binary(B) -> list_to_atom(binary_to_list(B));
+opt_b2a(N) -> N.
+
+set_properties(Props, []) ->
+ Props;
+set_properties(Props, [{Ix, V} | Rest]) ->
+ set_properties(setelement(Ix, Props, V), Rest).
+
+lookup_indices(KVs0, L) ->
+ KVs = rabbit_data_coercion:to_proplist(KVs0),
+ [{1 + list_find(list_to_atom(binary_to_list(K)), L), V} || {K, V} <- KVs].
+
+list_find(K, L) -> list_find(K, L, 1).
+
+list_find(K, [K|_], N) -> N;
+list_find(K, [], _N) -> exit({not_found, K});
+list_find(K, [_|L], N) -> list_find(K, L, N + 1).
+
+protocols(Def) when is_map(Def) ->
+ protocols(rabbit_data_coercion:to_proplist(Def));
+protocols(Def) ->
+ Src = case lists:keyfind(<<"src-protocol">>, 1, Def) of
+ {_, SrcProtocol} ->
+ rabbit_data_coercion:to_atom(SrcProtocol);
+ false -> amqp091
+ end,
+ Dst = case lists:keyfind(<<"dest-protocol">>, 1, Def) of
+ {_, DstProtocol} ->
+ rabbit_data_coercion:to_atom(DstProtocol);
+ false -> amqp091
+ end,
+ {Src, Dst}.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl
new file mode 100644
index 0000000000..d4836bea81
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_status).
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([report/3, remove/1, status/0, lookup/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-record(state, {}).
+-record(entry, {name, type, info, timestamp}).
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+report(Name, Type, Info) ->
+ gen_server:cast(?SERVER, {report, Name, Type, Info, calendar:local_time()}).
+
+remove(Name) ->
+ gen_server:cast(?SERVER, {remove, Name}).
+
+status() ->
+ gen_server:call(?SERVER, status, infinity).
+
+lookup(Name) ->
+ gen_server:call(?SERVER, {lookup, Name}, infinity).
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME,
+ [named_table, {keypos, #entry.name}, private]),
+ {ok, #state{}}.
+
+handle_call(status, _From, State) ->
+ Entries = ets:tab2list(?ETS_NAME),
+ {reply, [{Entry#entry.name, Entry#entry.type, Entry#entry.info,
+ Entry#entry.timestamp}
+ || Entry <- Entries], State};
+
+handle_call({lookup, Name}, _From, State) ->
+ Link = case ets:lookup(?ETS_NAME, Name) of
+ [Entry] -> [{name, Name},
+ {type, Entry#entry.type},
+ {info, Entry#entry.info},
+ {timestamp, Entry#entry.timestamp}];
+ [] -> not_found
+ end,
+ {reply, Link, State}.
+
+handle_cast({report, Name, Type, Info, Timestamp}, State) ->
+ true = ets:insert(?ETS_NAME, #entry{name = Name, type = Type, info = Info,
+ timestamp = Timestamp}),
+ rabbit_event:notify(shovel_worker_status,
+ split_name(Name) ++ split_status(Info)),
+ {noreply, State};
+
+handle_cast({remove, Name}, State) ->
+ true = ets:delete(?ETS_NAME, Name),
+ rabbit_event:notify(shovel_worker_removed, split_name(Name)),
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+split_status({running, MoreInfo}) -> [{status, running} | MoreInfo];
+split_status({terminated, Reason}) -> [{status, terminated},
+ {reason, Reason}];
+split_status(Status) when is_atom(Status) -> [{status, Status}].
+
+split_name({VHost, Name}) -> [{name, Name},
+ {vhost, VHost}];
+split_name(Name) when is_atom(Name) -> [{name, Name}].
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl
new file mode 100644
index 0000000000..f04b4758a3
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_sup.erl
@@ -0,0 +1,77 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_sup).
+-behaviour(supervisor2).
+
+-export([start_link/0, init/1]).
+
+-import(rabbit_shovel_config, []).
+
+-include("rabbit_shovel.hrl").
+
+start_link() ->
+ case parse_configuration(application:get_env(shovels)) of
+ {ok, Configurations} ->
+ supervisor2:start_link({local, ?MODULE}, ?MODULE, [Configurations]);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+init([Configurations]) ->
+ Len = dict:size(Configurations),
+ ChildSpecs = [{rabbit_shovel_status,
+ {rabbit_shovel_status, start_link, []},
+ transient, 16#ffffffff, worker,
+ [rabbit_shovel_status]},
+ {rabbit_shovel_dyn_worker_sup_sup,
+ {rabbit_shovel_dyn_worker_sup_sup, start_link, []},
+ transient, 16#ffffffff, supervisor,
+ [rabbit_shovel_dyn_worker_sup_sup]} |
+ make_child_specs(Configurations)],
+ {ok, {{one_for_one, 2*Len, 2}, ChildSpecs}}.
+
+make_child_specs(Configurations) ->
+ dict:fold(
+ fun (ShovelName, ShovelConfig, Acc) ->
+ [{ShovelName,
+ {rabbit_shovel_worker_sup, start_link,
+ [ShovelName, ShovelConfig]},
+ permanent,
+ 16#ffffffff,
+ supervisor,
+ [rabbit_shovel_worker_sup]} | Acc]
+ end, [], Configurations).
+
+parse_configuration(undefined) ->
+ {ok, dict:new()};
+parse_configuration({ok, Env}) ->
+ {ok, Defaults} = application:get_env(defaults),
+ parse_configuration(Defaults, Env, dict:new()).
+
+parse_configuration(_Defaults, [], Acc) ->
+ {ok, Acc};
+parse_configuration(Defaults, [{ShovelName, ShovelConfig} | Env], Acc)
+ when is_atom(ShovelName) andalso is_list(ShovelConfig) ->
+ case dict:is_key(ShovelName, Acc) of
+ true -> {error, {duplicate_shovel_definition, ShovelName}};
+ false -> case validate_shovel_config(ShovelName, ShovelConfig) of
+ {ok, Shovel} ->
+ %% make sure the config we accumulate has any
+ %% relevant default values (discovered during
+ %% validation), applied back to it
+ Acc2 = dict:store(ShovelName, Shovel, Acc),
+ parse_configuration(Defaults, Env, Acc2);
+ Error ->
+ Error
+ end
+ end;
+parse_configuration(_Defaults, _, _Acc) ->
+ {error, require_list_of_shovel_configurations}.
+
+validate_shovel_config(ShovelName, ShovelConfig) ->
+ rabbit_shovel_config:parse(ShovelName, ShovelConfig).
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl
new file mode 100644
index 0000000000..509db7de00
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_util.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_util).
+
+-export([update_headers/5,
+ add_timestamp_header/1,
+ delete_shovel/3,
+ restart_shovel/2]).
+
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-define(ROUTING_HEADER, <<"x-shovelled">>).
+-define(TIMESTAMP_HEADER, <<"x-shovelled-timestamp">>).
+
+update_headers(Prefix, Suffix, SrcURI, DestURI,
+ Props = #'P_basic'{headers = Headers}) ->
+ Table = Prefix ++ [{<<"src-uri">>, SrcURI},
+ {<<"dest-uri">>, DestURI}] ++ Suffix,
+ Headers2 = rabbit_basic:prepend_table_header(
+ ?ROUTING_HEADER, [{K, longstr, V} || {K, V} <- Table],
+ Headers),
+ Props#'P_basic'{headers = Headers2}.
+
+add_timestamp_header(Props = #'P_basic'{headers = undefined}) ->
+ add_timestamp_header(Props#'P_basic'{headers = []});
+add_timestamp_header(Props = #'P_basic'{headers = Headers}) ->
+ Headers2 = rabbit_misc:set_table_value(Headers,
+ ?TIMESTAMP_HEADER,
+ long,
+ os:system_time(seconds)),
+ Props#'P_basic'{headers = Headers2}.
+
+delete_shovel(VHost, Name, ActingUser) ->
+ case rabbit_shovel_status:lookup({VHost, Name}) of
+ not_found ->
+ {error, not_found};
+ _Obj ->
+ ok = rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name, ActingUser)
+ end.
+
+restart_shovel(VHost, Name) ->
+ case rabbit_shovel_status:lookup({VHost, Name}) of
+ not_found ->
+ {error, not_found};
+ _Obj ->
+ ok = rabbit_shovel_dyn_worker_sup_sup:stop_child({VHost, Name}),
+ {ok, _} = rabbit_shovel_dyn_worker_sup_sup:start_link(),
+ ok
+ end.
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl
new file mode 100644
index 0000000000..919db25910
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl
@@ -0,0 +1,224 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_worker).
+-behaviour(gen_server2).
+
+-export([start_link/3]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+%% for testing purposes
+-export([get_connection_name/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_shovel.hrl").
+
+-record(state, {inbound_conn, inbound_ch, outbound_conn, outbound_ch,
+ name, type, config, inbound_uri, outbound_uri, unacked,
+ remaining, %% [1]
+ remaining_unacked}). %% [2]
+
+%% [1] Counts down until we shut down in all modes
+%% [2] Counts down until we stop publishing in on-confirm mode
+
+start_link(Type, Name, Config) ->
+ ok = rabbit_shovel_status:report(Name, Type, starting),
+ gen_server2:start_link(?MODULE, [Type, Name, Config], []).
+
+%%---------------------------
+%% Gen Server Implementation
+%%---------------------------
+
+init([Type, Name, Config0]) ->
+ Config = case Type of
+ static ->
+ Config0;
+ dynamic ->
+ ClusterName = rabbit_nodes:cluster_name(),
+ {ok, Conf} = rabbit_shovel_parameters:parse(Name,
+ ClusterName,
+ Config0),
+ Conf
+ end,
+ rabbit_log_shovel:debug("Initialising a Shovel ~s of type '~s'", [human_readable_name(Name), Type]),
+ gen_server2:cast(self(), init),
+ {ok, #state{name = Name, type = Type, config = Config}}.
+
+handle_call(_Msg, _From, State) ->
+ {noreply, State}.
+
+handle_cast(init, State = #state{config = Config0}) ->
+ try rabbit_shovel_behaviour:connect_source(Config0) of
+ Config ->
+ rabbit_log_shovel:debug("Shovel ~s connected to source", [human_readable_name(maps:get(name, Config))]),
+ %% this makes sure that connection pid is updated in case
+ %% any of the subsequent connection/init steps fail. See
+ %% rabbitmq/rabbitmq-shovel#54 for context.
+ gen_server2:cast(self(), connect_dest),
+ {noreply, State#state{config = Config}}
+ catch _:_ ->
+ rabbit_log_shovel:error("Shovel ~s could not connect to source", [human_readable_name(maps:get(name, Config0))]),
+ {stop, shutdown, State}
+ end;
+handle_cast(connect_dest, State = #state{config = Config0}) ->
+ try rabbit_shovel_behaviour:connect_dest(Config0) of
+ Config ->
+ rabbit_log_shovel:debug("Shovel ~s connected to destination", [human_readable_name(maps:get(name, Config))]),
+ gen_server2:cast(self(), init_shovel),
+ {noreply, State#state{config = Config}}
+ catch _:_ ->
+ rabbit_log_shovel:error("Shovel ~s could not connect to destination", [human_readable_name(maps:get(name, Config0))]),
+ {stop, shutdown, State}
+ end;
+handle_cast(init_shovel, State = #state{config = Config}) ->
+ %% Don't trap exits until we have established connections so that
+ %% if we try to shut down while waiting for a connection to be
+ %% established then we don't block
+ process_flag(trap_exit, true),
+ Config1 = rabbit_shovel_behaviour:init_dest(Config),
+ Config2 = rabbit_shovel_behaviour:init_source(Config1),
+ rabbit_log_shovel:debug("Shovel ~s has finished setting up its topology", [human_readable_name(maps:get(name, Config2))]),
+ State1 = State#state{config = Config2},
+ ok = report_running(State1),
+ {noreply, State1}.
+
+
+handle_info(Msg, State = #state{config = Config, name = Name}) ->
+ case rabbit_shovel_behaviour:handle_source(Msg, Config) of
+ not_handled ->
+ case rabbit_shovel_behaviour:handle_dest(Msg, Config) of
+ not_handled ->
+ rabbit_log_shovel:warning("Shovel ~s could not handle a destination message ~p", [human_readable_name(Name), Msg]),
+ {noreply, State};
+ {stop, {outbound_conn_died, heartbeat_timeout}} ->
+ rabbit_log_shovel:error("Shovel ~s detected missed heartbeats on destination connection", [human_readable_name(Name)]),
+ {stop, {shutdown, heartbeat_timeout}, State};
+ {stop, {outbound_conn_died, Reason}} ->
+ rabbit_log_shovel:error("Shovel ~s detected destination connection failure: ~p", [human_readable_name(Name), Reason]),
+ {stop, Reason, State};
+ {stop, Reason} ->
+ rabbit_log_shovel:debug("Shovel ~s decided to stop due a message from destination: ~p", [human_readable_name(Name), Reason]),
+ {stop, Reason, State};
+ Config1 ->
+ {noreply, State#state{config = Config1}}
+ end;
+ {stop, {inbound_conn_died, heartbeat_timeout}} ->
+ rabbit_log_shovel:error("Shovel ~s detected missed heartbeats on source connection", [human_readable_name(Name)]),
+ {stop, {shutdown, heartbeat_timeout}, State};
+ {stop, {inbound_conn_died, Reason}} ->
+ rabbit_log_shovel:error("Shovel ~s detected source connection failure: ~p", [human_readable_name(Name), Reason]),
+ {stop, Reason, State};
+ {stop, Reason} ->
+ rabbit_log_shovel:error("Shovel ~s decided to stop due a message from source: ~p", [human_readable_name(Name), Reason]),
+ {stop, Reason, State};
+ Config1 ->
+ {noreply, State#state{config = Config1}}
+ end.
+
+terminate({shutdown, autodelete}, State = #state{name = {VHost, Name},
+ type = dynamic}) ->
+ rabbit_log_shovel:info("Shovel '~s' is stopping (it was configured to autodelete and transfer is completed)",
+ [human_readable_name({VHost, Name})]),
+ close_connections(State),
+ %% See rabbit_shovel_dyn_worker_sup_sup:stop_child/1
+ put(shovel_worker_autodelete, true),
+ _ = rabbit_runtime_parameters:clear(VHost, <<"shovel">>, Name, ?SHOVEL_USER),
+ rabbit_shovel_status:remove({VHost, Name}),
+ ok;
+terminate(shutdown, State) ->
+ close_connections(State),
+ ok;
+terminate(socket_closed_unexpectedly, State) ->
+ close_connections(State),
+ ok;
+terminate({'EXIT', heartbeat_timeout}, State = #state{name = Name}) ->
+ rabbit_log_shovel:error("Shovel ~s is stopping because of a heartbeat timeout", [human_readable_name(Name)]),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, "heartbeat timeout"}),
+ close_connections(State),
+ ok;
+terminate({'EXIT', outbound_conn_died}, State = #state{name = Name}) ->
+ rabbit_log_shovel:error("Shovel ~s is stopping because destination connection failed", [human_readable_name(Name)]),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, "destination connection failed"}),
+ close_connections(State),
+ ok;
+terminate({'EXIT', inbound_conn_died}, State = #state{name = Name}) ->
+ rabbit_log_shovel:error("Shovel ~s is stopping because destination connection failed", [human_readable_name(Name)]),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, "source connection failed"}),
+ close_connections(State),
+ ok;
+terminate({shutdown, heartbeat_timeout}, State = #state{name = Name}) ->
+ rabbit_log_shovel:error("Shovel ~s is stopping because of a heartbeat timeout", [human_readable_name(Name)]),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, "heartbeat timeout"}),
+ close_connections(State),
+ ok;
+terminate({shutdown, restart}, State = #state{name = Name}) ->
+ rabbit_log_shovel:error("Shovel ~s is stopping to restart", [human_readable_name(Name)]),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, "needed a restart"}),
+ close_connections(State),
+ ok;
+terminate(Reason, State = #state{name = Name}) ->
+ rabbit_log_shovel:error("Shovel ~s is stopping, reason: ~p", [human_readable_name(Name), Reason]),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {terminated, Reason}),
+ close_connections(State),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%---------------------------
+%% Helpers
+%%---------------------------
+
+human_readable_name(Name) ->
+ case Name of
+ {VHost, ShovelName} -> rabbit_misc:format("'~s' in virtual host '~s'", [ShovelName, VHost]);
+ ShovelName -> rabbit_misc:format("'~s'", [ShovelName])
+ end.
+
+report_running(#state{config = Config} = State) ->
+ InUri = rabbit_shovel_behaviour:source_uri(Config),
+ OutUri = rabbit_shovel_behaviour:dest_uri(Config),
+ InProto = rabbit_shovel_behaviour:source_protocol(Config),
+ OutProto = rabbit_shovel_behaviour:dest_protocol(Config),
+ InEndpoint = rabbit_shovel_behaviour:source_endpoint(Config),
+ OutEndpoint = rabbit_shovel_behaviour:dest_endpoint(Config),
+ rabbit_shovel_status:report(State#state.name, State#state.type,
+ {running, [{src_uri, rabbit_data_coercion:to_binary(InUri)},
+ {src_protocol, rabbit_data_coercion:to_binary(InProto)},
+ {dest_protocol, rabbit_data_coercion:to_binary(OutProto)},
+ {dest_uri, rabbit_data_coercion:to_binary(OutUri)}]
+ ++ props_to_binary(InEndpoint) ++ props_to_binary(OutEndpoint)
+ }).
+
+props_to_binary(Props) ->
+ [{K, rabbit_data_coercion:to_binary(V)} || {K, V} <- Props].
+
+%% for static shovels, name is an atom from the configuration file
+get_connection_name(ShovelName) when is_atom(ShovelName) ->
+ Prefix = <<"Shovel ">>,
+ ShovelNameAsBinary = atom_to_binary(ShovelName, utf8),
+ <<Prefix/binary, ShovelNameAsBinary/binary>>;
+
+%% for dynamic shovels, name is a tuple with a binary
+get_connection_name({_, Name}) when is_binary(Name) ->
+ Prefix = <<"Shovel ">>,
+ <<Prefix/binary, Name/binary>>;
+
+%% fallback
+get_connection_name(_) ->
+ <<"Shovel">>.
+
+close_connections(#state{config = Conf}) ->
+ ok = rabbit_shovel_behaviour:close_source(Conf),
+ ok = rabbit_shovel_behaviour:close_dest(Conf).
diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl
new file mode 100644
index 0000000000..afa8f7987e
--- /dev/null
+++ b/deps/rabbitmq_shovel/src/rabbit_shovel_worker_sup.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_worker_sup).
+-behaviour(mirrored_supervisor).
+
+-export([start_link/2, init/1]).
+
+-include("rabbit_shovel.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+start_link(ShovelName, ShovelConfig) ->
+ mirrored_supervisor:start_link({local, ShovelName}, ShovelName,
+ fun rabbit_misc:execute_mnesia_transaction/1,
+ ?MODULE, [ShovelName, ShovelConfig]).
+
+init([Name, Config]) ->
+ ChildSpecs = [{Name,
+ {rabbit_shovel_worker, start_link, [static, Name, Config]},
+ case Config of
+ #{reconnect_delay := N}
+ when is_integer(N) andalso N > 0 -> {permanent, N};
+ _ -> temporary
+ end,
+ 16#ffffffff,
+ worker,
+ [rabbit_shovel_worker]}],
+ {ok, {{one_for_one, 1, ?MAX_WAIT}, ChildSpecs}}.
diff --git a/deps/rabbitmq_shovel/test/amqp10_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_SUITE.erl
new file mode 100644
index 0000000000..37cbcb6c56
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/amqp10_SUITE.erl
@@ -0,0 +1,293 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(EXCHANGE, <<"test_exchange">>).
+-define(TO_SHOVEL, <<"to_the_shovel">>).
+-define(FROM_SHOVEL, <<"from_the_shovel">>).
+-define(UNSHOVELLED, <<"unshovelled">>).
+-define(SHOVELLED, <<"shovelled">>).
+-define(TIMEOUT, 1000).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+groups() ->
+ [
+ {tests, [], [
+ amqp10_destination_no_ack,
+ amqp10_destination_on_publish,
+ amqp10_destination_on_confirm,
+ amqp10_source_no_ack,
+ amqp10_source_on_publish,
+ amqp10_source_on_confirm
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ {ok, _} = application:ensure_all_started(amqp10_client),
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun stop_shovel_plugin/1]).
+
+end_per_suite(Config) ->
+ application:stop(amqp10_client),
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+stop_shovel_plugin(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, stop, [rabbitmq_shovel]),
+ Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+amqp10_destination_no_ack(Config) ->
+ amqp10_destination(Config, no_ack).
+
+amqp10_destination_on_publish(Config) ->
+ amqp10_destination(Config, on_publish).
+
+amqp10_destination_on_confirm(Config) ->
+ amqp10_destination(Config, on_confirm).
+
+amqp10_destination(Config, AckMode) ->
+ TargetQ = <<"a-queue">>,
+ ok = setup_amqp10_destination_shovel(Config, TargetQ, AckMode),
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, Conn} = amqp10_client:open_connection(Hostname, Port),
+ {ok, Sess} = amqp10_client:begin_session(Conn),
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Sess,
+ <<"amqp-destination-receiver">>,
+ TargetQ, settled, unsettled_state),
+ ok = amqp10_client:flow_link_credit(Receiver, 5, never),
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Timestamp = erlang:system_time(millisecond),
+ Msg = #amqp_msg{payload = <<42>>,
+ props = #'P_basic'{delivery_mode = 2,
+ headers = [{<<"header1">>, long, 1},
+ {<<"header2">>, longstr, <<"h2">>}],
+ content_encoding = ?UNSHOVELLED,
+ content_type = ?UNSHOVELLED,
+ correlation_id = ?UNSHOVELLED,
+ %% needs to be guest here
+ user_id = <<"guest">>,
+ message_id = ?UNSHOVELLED,
+ reply_to = ?UNSHOVELLED,
+ timestamp = Timestamp,
+ type = ?UNSHOVELLED
+ }},
+ publish(Chan, Msg, ?EXCHANGE, ?TO_SHOVEL),
+
+ receive
+ {amqp10_msg, Receiver, InMsg} ->
+ [<<42>>] = amqp10_msg:body(InMsg),
+ #{content_type := ?UNSHOVELLED,
+ content_encoding := ?UNSHOVELLED,
+ correlation_id := ?UNSHOVELLED,
+ user_id := <<"guest">>,
+ message_id := ?UNSHOVELLED,
+ reply_to := ?UNSHOVELLED
+ %% timestamp gets overwritten
+ % creation_time := Timestamp
+ } = amqp10_msg:properties(InMsg),
+ #{<<"routing_key">> := ?TO_SHOVEL,
+ <<"type">> := ?UNSHOVELLED,
+ <<"header1">> := 1,
+ <<"header2">> := <<"h2">>
+ } = amqp10_msg:application_properties(InMsg),
+ #{durable := true} = amqp10_msg:headers(InMsg),
+ ok
+ after ?TIMEOUT ->
+ throw(timeout_waiting_for_deliver1)
+ end,
+
+ [{test_shovel, static, {running, _Info}, _Time}] =
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_shovel_status, status, []),
+ amqp10_client:detach_link(Receiver),
+ amqp10_client:close_connection(Conn),
+ rabbit_ct_client_helpers:close_channel(Chan).
+
+amqp10_source_no_ack(Config) ->
+ amqp10_source(Config, no_ack).
+
+amqp10_source_on_publish(Config) ->
+ amqp10_source(Config, on_publish).
+
+amqp10_source_on_confirm(Config) ->
+ amqp10_source(Config, on_confirm).
+
+amqp10_source(Config, AckMode) ->
+ SourceQ = <<"source-queue">>,
+ DestQ = <<"dest-queue">>,
+ ok = setup_amqp10_source_shovel(Config, SourceQ, DestQ, AckMode),
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+ CTag = consume(Chan, DestQ, AckMode =:= no_ack),
+ Msg = #amqp_msg{payload = <<42>>,
+ props = #'P_basic'{delivery_mode = 2,
+ content_type = ?UNSHOVELLED}},
+ % publish to source
+ publish(Chan, Msg, <<>>, SourceQ),
+
+ receive
+ {#'basic.deliver'{consumer_tag = CTag, delivery_tag = AckTag},
+ #amqp_msg{payload = <<42>>,
+ props = #'P_basic'{%delivery_mode = 2,
+ %content_type = ?SHOVELLED,
+ headers = [{<<"x-shovelled">>, _, _},
+ {<<"x-shovelled-timestamp">>,
+ long, _}]}}} ->
+ case AckMode of
+ no_ack -> ok;
+ _ -> ok = amqp_channel:call(
+ Chan, #'basic.ack'{delivery_tag = AckTag})
+ end
+ after ?TIMEOUT -> throw(timeout_waiting_for_deliver1)
+ end,
+
+ [{test_shovel, static, {running, _Info}, _Time}] =
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_shovel_status, status, []),
+ rabbit_ct_client_helpers:close_channel(Chan).
+
+setup_amqp10_source_shovel(Config, SourceQueue, DestQueue, AckMode) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Shovel = [{test_shovel,
+ [{source,
+ [{protocol, amqp10},
+ {uris, [rabbit_misc:format("amqp://~s:~b",
+ [Hostname, Port])]},
+ {source_address, SourceQueue}]
+ },
+ {destination,
+ [{uris, [rabbit_misc:format("amqp://~s:~b/%2f?heartbeat=5",
+ [Hostname, Port])]},
+ {declarations,
+ [{'queue.declare', [{queue, DestQueue}, auto_delete]}]},
+ {publish_fields, [{exchange, <<>>},
+ {routing_key, DestQueue}]},
+ {publish_properties, [{delivery_mode, 2},
+ {content_type, ?SHOVELLED}]},
+ {add_forward_headers, true},
+ {add_timestamp_header, true}]},
+ {queue, <<>>},
+ {ack_mode, AckMode}
+ ]}],
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_shovel,
+ [Shovel]).
+
+setup_amqp10_destination_shovel(Config, Queue, AckMode) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Shovel = [{test_shovel,
+ [{source,
+ [{uris, [rabbit_misc:format("amqp://~s:~b/%2f?heartbeat=5",
+ [Hostname, Port])]},
+ {declarations,
+ [{'queue.declare', [exclusive, auto_delete]},
+ {'exchange.declare', [{exchange, ?EXCHANGE}, auto_delete]},
+ {'queue.bind', [{queue, <<>>}, {exchange, ?EXCHANGE},
+ {routing_key, ?TO_SHOVEL}]}]},
+ {queue, <<>>}]},
+ {destination,
+ [{protocol, amqp10},
+ {uris, [rabbit_misc:format("amqp://~s:~b",
+ [Hostname, Port])]},
+ {add_forward_headers, true},
+ {add_timestamp_header, true},
+ {target_address, Queue}]
+ },
+ {ack_mode, AckMode}]}],
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_shovel,
+ [Shovel]).
+setup_amqp10_shovel(Config, SourceQueue, DestQueue, AckMode) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ Shovel = [{test_shovel,
+ [{source,
+ [{protocol, amqp10},
+ {uris, [rabbit_misc:format("amqp://~s:~b",
+ [Hostname, Port])]},
+ {source_address, SourceQueue}]},
+ {destination,
+ [{protocol, amqp10},
+ {uris, [rabbit_misc:format("amqp://~s:~b",
+ [Hostname, Port])]},
+ {add_forward_headers, true},
+ {add_timestamp_header, true},
+ {target_address, DestQueue}]
+ },
+ {ack_mode, AckMode}]}],
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_shovel,
+ [Shovel]).
+
+setup_shovel(ShovelConfig) ->
+ _ = application:stop(rabbitmq_shovel),
+ application:set_env(rabbitmq_shovel, shovels, ShovelConfig, infinity),
+ ok = application:start(rabbitmq_shovel),
+ await_running_shovel(test_shovel).
+
+await_running_shovel(Name) ->
+ case [N || {N, _, {running, _}, _}
+ <- rabbit_shovel_status:status(),
+ N =:= Name] of
+ [_] -> ok;
+ _ -> timer:sleep(100),
+ await_running_shovel(Name)
+ end.
+
+consume(Chan, Queue, NoAck) ->
+ #'basic.consume_ok'{consumer_tag = CTag} =
+ amqp_channel:subscribe(Chan, #'basic.consume'{queue = Queue,
+ no_ack = NoAck,
+ exclusive = false},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = CTag} -> ok
+ after ?TIMEOUT -> throw(timeout_waiting_for_consume_ok)
+ end,
+ CTag.
+
+publish(Chan, Msg, Exchange, RoutingKey) ->
+ ok = amqp_channel:call(Chan, #'basic.publish'{exchange = Exchange,
+ routing_key = RoutingKey},
+ Msg).
diff --git a/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl
new file mode 100644
index 0000000000..c8375ead1a
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl
@@ -0,0 +1,410 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_dynamic_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests},
+ {group, with_map_config}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ simple,
+ change_definition,
+ autodelete_amqp091_src_on_confirm,
+ autodelete_amqp091_src_on_publish,
+ autodelete_amqp091_dest_on_confirm,
+ autodelete_amqp091_dest_on_publish,
+ simple_amqp10_dest,
+ simple_amqp10_src
+ ]},
+ {with_map_config, [], [
+ simple,
+ simple_amqp10_dest,
+ simple_amqp10_src
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config0) ->
+ {ok, _} = application:ensure_all_started(amqp10_client),
+ rabbit_ct_helpers:log_environment(),
+ Config = rabbit_ct_helpers:merge_app_env(Config0,
+ [{lager, [{error_logger_hwm, 200}]}]),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ application:stop(amqp10_client),
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(with_map_config, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{map_config, true}]);
+init_per_group(_, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{map_config, false}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config0) ->
+ SrcQ = list_to_binary(atom_to_list(Testcase) ++ "_src"),
+ DestQ = list_to_binary(atom_to_list(Testcase) ++ "_dest"),
+ DestQ2 = list_to_binary(atom_to_list(Testcase) ++ "_dest2"),
+ Config = [{srcq, SrcQ}, {destq, DestQ}, {destq2, DestQ2} | Config0],
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ with_session(Config,
+ fun (Sess) ->
+ test_amqp10_destination(Config, Src, Dest, Sess, <<"amqp10">>,
+ <<"src-address">>)
+ end).
+
+simple_amqp10_dest(Config) ->
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ with_session(Config,
+ fun (Sess) ->
+ test_amqp10_destination(Config, Src, Dest, Sess, <<"amqp091">>,
+ <<"src-queue">>)
+ end).
+
+test_amqp10_destination(Config, Src, Dest, Sess, Protocol, ProtocolSrc) ->
+ MapConfig = ?config(map_config, Config),
+ shovel_test_utils:set_param(Config, <<"test">>,
+ [{<<"src-protocol">>, Protocol},
+ {ProtocolSrc, Src},
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"dest-address">>, Dest},
+ {<<"dest-add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"dest-application-properties">>,
+ case MapConfig of
+ true ->
+ #{<<"app-prop-key">> => <<"app-prop-value">>};
+ _ ->
+ [{<<"app-prop-key">>, <<"app-prop-value">>}]
+ end},
+ {<<"dest-properties">>,
+ case MapConfig of
+ true ->
+ #{<<"user_id">> => <<"guest">>};
+ _ ->
+ [{<<"user_id">>, <<"guest">>}]
+ end},
+ {<<"dest-message-annotations">>,
+ case MapConfig of
+ true ->
+ #{<<"message-ann-key">> =>
+ <<"message-ann-value">>};
+ _ ->
+ [{<<"message-ann-key">>,
+ <<"message-ann-value">>}]
+ end}]),
+ Msg = publish_expect(Sess, Src, Dest, <<"tag1">>, <<"hello">>),
+ ?assertMatch((#{user_id := <<"guest">>, creation_time := _}),
+ (amqp10_msg:properties(Msg))),
+ ?assertMatch((#{<<"shovel-name">> := <<"test">>,
+ <<"shovel-type">> := <<"dynamic">>, <<"shovelled-by">> := _,
+ <<"app-prop-key">> := <<"app-prop-value">>}),
+ (amqp10_msg:application_properties(Msg))),
+ ?assertMatch((#{<<"message-ann-key">> := <<"message-ann-value">>}),
+ (amqp10_msg:message_annotations(Msg))).
+
+simple_amqp10_src(Config) ->
+ MapConfig = ?config(map_config, Config),
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ with_session(Config,
+ fun (Sess) ->
+ shovel_test_utils:set_param(
+ Config,
+ <<"test">>, [{<<"src-protocol">>, <<"amqp10">>},
+ {<<"src-address">>, Src},
+ {<<"dest-protocol">>, <<"amqp091">>},
+ {<<"dest-queue">>, Dest},
+ {<<"add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"publish-properties">>,
+ case MapConfig of
+ true -> #{<<"cluster_id">> => <<"x">>};
+ _ -> [{<<"cluster_id">>, <<"x">>}]
+ end}
+ ]),
+ _Msg = publish_expect(Sess, Src, Dest, <<"tag1">>,
+ <<"hello">>),
+ % the fidelity loss is quite high when consuming using the amqp10
+ % plugin. For example custom headers aren't current translated.
+ % This isn't due to the shovel though.
+ ok
+ end).
+
+change_definition(Config) ->
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ Dest2 = ?config(destq2, Config),
+ with_session(Config,
+ fun (Sess) ->
+ shovel_test_utils:set_param(Config, <<"test">>,
+ [{<<"src-address">>, Src},
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"dest-address">>, Dest}]),
+ publish_expect(Sess, Src, Dest, <<"tag2">>,<<"hello">>),
+ shovel_test_utils:set_param(Config, <<"test">>,
+ [{<<"src-address">>, Src},
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"dest-address">>, Dest2}]),
+ publish_expect(Sess, Src, Dest2, <<"tag3">>, <<"hello">>),
+ expect_empty(Sess, Dest),
+ shovel_test_utils:clear_param(Config, <<"test">>),
+ publish_expect(Sess, Src, Src, <<"tag4">>, <<"hello2">>),
+ expect_empty(Sess, Dest),
+ expect_empty(Sess, Dest2)
+ end).
+
+autodelete_amqp091_src_on_confirm(Config) ->
+ autodelete_case(Config, {<<"on-confirm">>, 50, 50, 50},
+ fun autodelete_amqp091_src/2),
+ ok.
+
+autodelete_amqp091_src_on_publish(Config) ->
+ autodelete_case(Config, {<<"on-publish">>, 50, 50, 50},
+ fun autodelete_amqp091_src/2),
+ ok.
+
+autodelete_amqp091_dest_on_confirm(Config) ->
+ autodelete_case(Config, {<<"on-confirm">>, 50, 50, 50},
+ fun autodelete_amqp091_dest/2),
+ ok.
+
+autodelete_amqp091_dest_on_publish(Config) ->
+ autodelete_case(Config, {<<"on-publish">>, 50, 50, 50},
+ fun autodelete_amqp091_dest/2),
+ ok.
+
+autodelete_case(Config, Args, CaseFun) ->
+ with_session(Config, CaseFun(Config, Args)).
+
+autodelete_do(Config, {AckMode, After, ExpSrc, ExpDest}) ->
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ fun (Session) ->
+ publish_count(Session, Src, <<"hello">>, 100),
+ shovel_test_utils:set_param_nowait(
+ Config,
+ <<"test">>, [{<<"src-address">>, Src},
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"src-delete-after">>, After},
+ {<<"src-prefetch-count">>, 5},
+ {<<"dest-address">>, Dest},
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"ack-mode">>, AckMode}
+ ]),
+ await_autodelete(Config, <<"test">>),
+ expect_count(Session, Dest, <<"hello">>, ExpDest),
+ expect_count(Session, Src, <<"hello">>, ExpSrc)
+ end.
+
+autodelete_amqp091_src(Config, {AckMode, After, ExpSrc, ExpDest}) ->
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ fun (Session) ->
+ publish_count(Session, Src, <<"hello">>, 100),
+ shovel_test_utils:set_param_nowait(
+ Config,
+ <<"test">>, [{<<"src-queue">>, Src},
+ {<<"src-protocol">>, <<"amqp091">>},
+ {<<"src-delete-after">>, After},
+ {<<"src-prefetch-count">>, 5},
+ {<<"dest-address">>, Dest},
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"ack-mode">>, AckMode}
+ ]),
+ await_autodelete(Config, <<"test">>),
+ expect_count(Session, Dest, <<"hello">>, ExpDest),
+ expect_count(Session, Src, <<"hello">>, ExpSrc)
+ end.
+
+autodelete_amqp091_dest(Config, {AckMode, After, ExpSrc, ExpDest}) ->
+ Src = ?config(srcq, Config),
+ Dest = ?config(destq, Config),
+ fun (Session) ->
+ publish_count(Session, Src, <<"hello">>, 100),
+ shovel_test_utils:set_param_nowait(
+ Config,
+ <<"test">>, [{<<"src-address">>, Src},
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"src-delete-after">>, After},
+ {<<"src-prefetch-count">>, 5},
+ {<<"dest-queue">>, Dest},
+ {<<"dest-protocol">>, <<"amqp091">>},
+ {<<"ack-mode">>, AckMode}
+ ]),
+ await_autodelete(Config, <<"test">>),
+ expect_count(Session, Dest, <<"hello">>, ExpDest),
+ expect_count(Session, Src, <<"hello">>, ExpSrc)
+ end.
+
+%%----------------------------------------------------------------------------
+
+with_session(Config, Fun) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, Conn} = amqp10_client:open_connection(Hostname, Port),
+ {ok, Sess} = amqp10_client:begin_session(Conn),
+ Fun(Sess),
+ amqp10_client:close_connection(Conn),
+ ok.
+
+publish(Sender, Tag, Payload) when is_binary(Payload) ->
+ Headers = #{durable => true},
+ Msg = amqp10_msg:set_headers(Headers,
+ amqp10_msg:new(Tag, Payload, false)),
+ ok = amqp10_client:send_msg(Sender, Msg),
+ receive
+ {amqp10_disposition, {accepted, Tag}} -> ok
+ after 3000 ->
+ exit(publish_disposition_not_received)
+ end.
+
+publish_expect(Session, Source, Dest, Tag, Payload) ->
+ LinkName = <<"dynamic-sender-", Dest/binary>>,
+ {ok, Sender} = amqp10_client:attach_sender_link(Session, LinkName, Source,
+ unsettled, unsettled_state),
+ ok = await_amqp10_event(link, Sender, attached),
+ publish(Sender, Tag, Payload),
+ amqp10_client:detach_link(Sender),
+ expect_one(Session, Dest, Payload).
+
+await_amqp10_event(On, Ref, Evt) ->
+ receive
+ {amqp10_event, {On, Ref, Evt}} -> ok
+ after 5000 ->
+ exit({amqp10_event_timeout, On, Ref, Evt})
+ end.
+
+expect_one(Session, Dest, Payload) ->
+ LinkName = <<"dynamic-receiver-", Dest/binary>>,
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session, LinkName,
+ Dest, settled,
+ unsettled_state),
+ ok = amqp10_client:flow_link_credit(Receiver, 1, never),
+ Msg = expect(Receiver, Payload),
+ amqp10_client:detach_link(Receiver),
+ Msg.
+
+expect(Receiver, _Payload) ->
+ receive
+ {amqp10_msg, Receiver, InMsg} ->
+ InMsg
+ after 4000 ->
+ throw(timeout_in_expect_waiting_for_delivery)
+ end.
+
+expect_empty(Session, Dest) ->
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"dynamic-receiver">>,
+ Dest, settled,
+ unsettled_state),
+ % probably good enough given we don't currently have a means of
+ % echoing flow state
+ {error, timeout} = amqp10_client:get_msg(Receiver, 250),
+ amqp10_client:detach_link(Receiver).
+
+publish_count(Session, Address, Payload, Count) ->
+ LinkName = <<"dynamic-sender-", Address/binary>>,
+ {ok, Sender} = amqp10_client:attach_sender_link(Session, LinkName,
+ Address, unsettled,
+ unsettled_state),
+ ok = await_amqp10_event(link, Sender, attached),
+ [begin
+ Tag = rabbit_data_coercion:to_binary(I),
+ publish(Sender, Tag, <<Payload/binary, Tag/binary>>)
+ end || I <- lists:seq(1, Count)],
+ amqp10_client:detach_link(Sender).
+
+expect_count(Session, Address, Payload, Count) ->
+ {ok, Receiver} = amqp10_client:attach_receiver_link(Session,
+ <<"dynamic-receiver",
+ Address/binary>>,
+ Address, settled,
+ unsettled_state),
+ ok = amqp10_client:flow_link_credit(Receiver, Count, never),
+ [begin
+ expect(Receiver, Payload)
+ end || _ <- lists:seq(1, Count)],
+ expect_empty(Session, Address),
+ amqp10_client:detach_link(Receiver).
+
+
+invalid_param(Config, Value, User) ->
+ {error_string, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_runtime_parameters, set,
+ [<<"/">>, <<"shovel">>, <<"invalid">>, Value, User]).
+
+valid_param(Config, Value, User) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, valid_param1, [Config, Value, User]).
+
+valid_param1(_Config, Value, User) ->
+ ok = rabbit_runtime_parameters:set(
+ <<"/">>, <<"shovel">>, <<"a">>, Value, User),
+ ok = rabbit_runtime_parameters:clear(<<"/">>, <<"shovel">>, <<"a">>, <<"acting-user">>).
+
+invalid_param(Config, Value) -> invalid_param(Config, Value, none).
+valid_param(Config, Value) -> valid_param(Config, Value, none).
+
+lookup_user(Config, Name) ->
+ {ok, User} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_access_control, check_user_login, [Name, []]),
+ User.
+
+await_autodelete(Config, Name) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, await_autodelete1, [Config, Name], 10000).
+
+await_autodelete1(_Config, Name) ->
+ shovel_test_utils:await(
+ fun () -> not lists:member(Name, shovels_from_parameters()) end),
+ shovel_test_utils:await(
+ fun () ->
+ not lists:member(Name,
+ shovel_test_utils:shovels_from_status())
+ end).
+
+shovels_from_parameters() ->
+ L = rabbit_runtime_parameters:list(<<"/">>, <<"shovel">>),
+ [rabbit_misc:pget(name, Shovel) || Shovel <- L].
diff --git a/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl
new file mode 100644
index 0000000000..8c52889ed6
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/amqp10_shovel_SUITE.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqp10_shovel_SUITE).
+
+-compile(export_all).
+
+-export([
+ ]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp10_common/include/amqp10_framing.hrl").
+
+%%%===================================================================
+%%% Common Test callbacks
+%%%===================================================================
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+
+all_tests() ->
+ [
+ amqp_encoded_data_list,
+ amqp_encoded_amqp_value
+ ].
+
+groups() ->
+ [
+ {tests, [], all_tests()}
+ ].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ meck:unload(),
+ ok.
+
+%%%===================================================================
+%%% Test cases
+%%%===================================================================
+
+amqp_encoded_data_list(_Config) ->
+ meck:new(rabbit_shovel_behaviour, [passthrough]),
+ meck:expect(rabbit_shovel_behaviour, forward,
+ fun (_, _, Pay, S) ->
+ ?assert(erlang:is_binary(Pay)),
+ S
+ end),
+ %% fake some shovel state
+ State = #{source => #{},
+ dest => #{module => rabbit_amqp10_shovel},
+ ack_mode => no_ack},
+ Body = [
+ #'v1_0.data'{content = <<"one">>},
+ #'v1_0.data'{content = <<"two">>}
+ ],
+ Msg = amqp10_msg:new(55, Body),
+ rabbit_amqp10_shovel:handle_source({amqp10_msg, linkref, Msg}, State),
+
+ ?assert(meck:validate(rabbit_shovel_behaviour)),
+ ok.
+
+amqp_encoded_amqp_value(_Config) ->
+ meck:new(rabbit_shovel_behaviour, [passthrough]),
+ meck:expect(rabbit_shovel_behaviour, forward,
+ fun (_, _, Pay, S) ->
+ ?assert(erlang:is_binary(Pay)),
+ S
+ end),
+ %% fake some shovel state
+ State = #{source => #{},
+ dest => #{module => rabbit_amqp10_shovel},
+ ack_mode => no_ack},
+ Body = #'v1_0.amqp_value'{content = {utf8, <<"hi">>}},
+ Msg = amqp10_msg:new(55, Body),
+ rabbit_amqp10_shovel:handle_source({amqp10_msg, linkref, Msg}, State),
+
+ ?assert(meck:validate(rabbit_shovel_behaviour)),
+ ok.
+
+%% Utility
diff --git a/deps/rabbitmq_shovel/test/config_SUITE.erl b/deps/rabbitmq_shovel/test/config_SUITE.erl
new file mode 100644
index 0000000000..d637ad1048
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/config_SUITE.erl
@@ -0,0 +1,130 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_SUITE).
+
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+
+-define(EXCHANGE, <<"test_exchange">>).
+-define(TO_SHOVEL, <<"to_the_shovel">>).
+-define(FROM_SHOVEL, <<"from_the_shovel">>).
+-define(UNSHOVELLED, <<"unshovelled">>).
+-define(SHOVELLED, <<"shovelled">>).
+-define(TIMEOUT, 1000).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+groups() ->
+ [
+ {tests, [parallel], [
+ parse_amqp091,
+ parse_amqp10_mixed
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(_Testcase, Config) -> Config.
+
+end_per_testcase(_Testcase, Config) -> Config.
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+parse_amqp091(_Config) ->
+ Amqp091Src = {source, [{protocol, amqp091},
+ {uris, ["ampq://myhost:5672/vhost"]},
+ {declarations, []},
+ {queue, <<"the-queue">>},
+ {delete_after, never},
+ {prefetch_count, 10}]},
+ Amqp091Dst = {destination, [{protocol, amqp091},
+ {uris, ["ampq://myhost:5672"]},
+ {declarations, []},
+ {publish_properties, [{delivery_mode, 1}]},
+ {publish_fields, []},
+ {add_forward_headers, true}]},
+ In = [Amqp091Src,
+ Amqp091Dst,
+ {ack_mode, on_confirm},
+ {reconnect_delay, 2}],
+
+ ?assertMatch(
+ {ok, #{name := my_shovel,
+ ack_mode := on_confirm,
+ reconnect_delay := 2,
+ dest := #{module := rabbit_amqp091_shovel,
+ uris := ["ampq://myhost:5672"],
+ fields_fun := _PubFields,
+ props_fun := _PubProps,
+ resource_decl := _DDecl,
+ add_timestamp_header := false,
+ add_forward_headers := true},
+ source := #{module := rabbit_amqp091_shovel,
+ uris := ["ampq://myhost:5672/vhost"],
+ queue := <<"the-queue">>,
+ prefetch_count := 10,
+ delete_after := never,
+ resource_decl := _SDecl}}},
+ rabbit_shovel_config:parse(my_shovel, In)),
+ ok.
+
+parse_amqp10_mixed(_Config) ->
+ Amqp10Src = {source, [{protocol, amqp10},
+ {uris, ["ampq://myotherhost:5672"]},
+ {source_address, <<"the-queue">>}
+ ]},
+ Amqp10Dst = {destination, [{protocol, amqp10},
+ {uris, ["ampq://myhost:5672"]},
+ {target_address, <<"targe-queue">>},
+ {message_annotations, [{soma_ann, <<"some-info">>}]},
+ {properties, [{user_id, <<"some-user">>}]},
+ {application_properties, [{app_prop_key, <<"app_prop_value">>}]},
+ {add_forward_headers, true}
+ ]},
+ In = [Amqp10Src,
+ Amqp10Dst,
+ {ack_mode, on_confirm},
+ {reconnect_delay, 2}],
+
+ ?assertMatch(
+ {ok, #{name := my_shovel,
+ ack_mode := on_confirm,
+ source := #{module := rabbit_amqp10_shovel,
+ uris := ["ampq://myotherhost:5672"],
+ source_address := <<"the-queue">>
+ },
+ dest := #{module := rabbit_amqp10_shovel,
+ uris := ["ampq://myhost:5672"],
+ target_address := <<"targe-queue">>,
+ properties := #{user_id := <<"some-user">>},
+ application_properties := #{app_prop_key := <<"app_prop_value">>},
+ message_annotations := #{soma_ann := <<"some-info">>},
+ add_forward_headers := true}}},
+ rabbit_shovel_config:parse(my_shovel, In)),
+ ok.
diff --git a/deps/rabbitmq_shovel/test/configuration_SUITE.erl b/deps/rabbitmq_shovel/test/configuration_SUITE.erl
new file mode 100644
index 0000000000..03f287c85d
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/configuration_SUITE.erl
@@ -0,0 +1,359 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(configuration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(EXCHANGE, <<"test_exchange">>).
+-define(TO_SHOVEL, <<"to_the_shovel">>).
+-define(FROM_SHOVEL, <<"from_the_shovel">>).
+-define(UNSHOVELLED, <<"unshovelled">>).
+-define(SHOVELLED, <<"shovelled">>).
+-define(TIMEOUT, 1000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ zero_shovels,
+ invalid_legacy_configuration,
+ valid_legacy_configuration,
+ valid_configuration
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++
+ [fun stop_shovel_plugin/1]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+stop_shovel_plugin(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, stop, [rabbitmq_shovel]),
+ Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+zero_shovels(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, zero_shovels1, [Config]).
+
+zero_shovels1(_Config) ->
+ %% shovel can be started with zero shovels configured
+ ok = application:start(rabbitmq_shovel),
+ ok = application:stop(rabbitmq_shovel),
+ passed.
+
+invalid_legacy_configuration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, invalid_legacy_configuration1, [Config]).
+
+invalid_legacy_configuration1(_Config) ->
+ %% various ways of breaking the config
+ require_list_of_shovel_configurations =
+ test_broken_shovel_configs(invalid_config),
+
+ require_list_of_shovel_configurations =
+ test_broken_shovel_configs([{test_shovel, invalid_shovel_config}]),
+
+ Config = [{sources, [{broker, "amqp://"}]},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, <<"">>}],
+
+ {duplicate_shovel_definition, test_shovel} =
+ test_broken_shovel_configs(
+ [{test_shovel, Config}, {test_shovel, Config}]),
+
+ {invalid_parameters, [{invalid, invalid, invalid}]} =
+ test_broken_shovel_config([{invalid, invalid, invalid} | Config]),
+
+ {duplicate_parameters, [queue]} =
+ test_broken_shovel_config([{queue, <<"">>} | Config]),
+
+ {missing_parameter, _} =
+ test_broken_shovel_config([]),
+
+ {require_list, invalid} =
+ test_broken_shovel_sources(invalid),
+
+ {missing_parameter, broker} =
+ test_broken_shovel_sources([]),
+
+ {require_list, brokers, invalid} =
+ test_broken_shovel_sources([{brokers, invalid}]),
+
+ {expected_string_uri, 42} =
+ test_broken_shovel_sources([{brokers, [42]}]),
+
+ {{unexpected_uri_scheme, "invalid"}, "invalid://"} =
+ test_broken_shovel_sources([{broker, "invalid://"}]),
+
+ {{unable_to_parse_uri, no_scheme}, "invalid"} =
+ test_broken_shovel_sources([{broker, "invalid"}]),
+
+ {require_list, invalid} =
+ test_broken_shovel_sources([{broker, "amqp://"},
+ {declarations, invalid}]),
+ {unknown_method_name, 42} =
+ test_broken_shovel_sources([{broker, "amqp://"},
+ {declarations, [42]}]),
+
+ {expected_method_field_list, 'queue.declare', 42} =
+ test_broken_shovel_sources([{broker, "amqp://"},
+ {declarations, [{'queue.declare', 42}]}]),
+
+ {unknown_fields, 'queue.declare', [invalid]} =
+ test_broken_shovel_sources(
+ [{broker, "amqp://"},
+ {declarations, [{'queue.declare', [invalid]}]}]),
+
+ {{invalid_amqp_params_parameter, heartbeat, "text",
+ [{"heartbeat", "text"}], {not_an_integer, "text"}}, _} =
+ test_broken_shovel_sources(
+ [{broker, "amqp://localhost/?heartbeat=text"}]),
+
+ {{invalid_amqp_params_parameter, username, "text",
+ [{"username", "text"}],
+ {parameter_unconfigurable_in_query, username, "text"}}, _} =
+ test_broken_shovel_sources([{broker, "amqp://?username=text"}]),
+
+ {invalid_parameter_value, prefetch_count,
+ {require_non_negative_integer, invalid}} =
+ test_broken_shovel_config([{prefetch_count, invalid} | Config]),
+
+ {invalid_parameter_value, ack_mode,
+ {ack_mode_value_requires_one_of,
+ {no_ack, on_publish, on_confirm}, invalid}} =
+ test_broken_shovel_config([{ack_mode, invalid} | Config]),
+
+ {invalid_parameter_value, queue,
+ {require_binary, invalid}} =
+ test_broken_shovel_config([{sources, [{broker, "amqp://"}]},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, invalid}]),
+
+ {invalid_parameter_value, publish_properties,
+ {require_list, invalid}} =
+ test_broken_shovel_config([{publish_properties, invalid} | Config]),
+
+ {invalid_parameter_value, publish_properties,
+ {unexpected_fields, [invalid], _}} =
+ test_broken_shovel_config([{publish_properties, [invalid]} | Config]),
+
+ {{invalid_ssl_parameter, fail_if_no_peer_cert, "42", _,
+ {require_boolean, '42'}}, _} =
+ test_broken_shovel_sources([{broker, "amqps://username:password@host:5673/vhost?cacertfile=/path/to/cacert.pem&certfile=/path/to/certfile.pem&keyfile=/path/to/keyfile.pem&verify=verify_peer&fail_if_no_peer_cert=42"}]),
+
+ passed.
+
+test_broken_shovel_configs(Configs) ->
+ application:set_env(rabbitmq_shovel, shovels, Configs),
+ {error, {Error, _}} = application:start(rabbitmq_shovel),
+ Error.
+
+test_broken_shovel_config(Config) ->
+ {invalid_shovel_configuration, test_shovel, Error} =
+ test_broken_shovel_configs([{test_shovel, Config}]),
+ Error.
+
+test_broken_shovel_sources(Sources) ->
+ test_broken_shovel_config([{sources, Sources},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, <<"">>}]).
+
+valid_legacy_configuration(Config) ->
+ ok = setup_legacy_shovels(Config),
+ run_valid_test(Config).
+
+valid_configuration(Config) ->
+ ok = setup_shovels(Config),
+ run_valid_test(Config).
+
+run_valid_test(Config) ->
+ Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ #'queue.declare_ok'{ queue = Q } =
+ amqp_channel:call(Chan, #'queue.declare' { exclusive = true }),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q, exchange = ?EXCHANGE,
+ routing_key = ?FROM_SHOVEL }),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Chan, #'queue.bind' { queue = Q, exchange = ?EXCHANGE,
+ routing_key = ?TO_SHOVEL }),
+
+ #'basic.consume_ok'{ consumer_tag = CTag } =
+ amqp_channel:subscribe(Chan,
+ #'basic.consume' { queue = Q, exclusive = true },
+ self()),
+ receive
+ #'basic.consume_ok'{ consumer_tag = CTag } -> ok
+ after ?TIMEOUT -> throw(timeout_waiting_for_consume_ok)
+ end,
+
+ ok = amqp_channel:call(Chan,
+ #'basic.publish' { exchange = ?EXCHANGE,
+ routing_key = ?TO_SHOVEL },
+ #amqp_msg { payload = <<42>>,
+ props = #'P_basic' {
+ delivery_mode = 2,
+ content_type = ?UNSHOVELLED }
+ }),
+
+ receive
+ {#'basic.deliver' { consumer_tag = CTag, delivery_tag = AckTag,
+ routing_key = ?FROM_SHOVEL },
+ #amqp_msg { payload = <<42>>,
+ props = #'P_basic' {
+ delivery_mode = 2,
+ content_type = ?SHOVELLED,
+ headers = [{<<"x-shovelled">>, _, _},
+ {<<"x-shovelled-timestamp">>,
+ long, _}]}
+ }} ->
+ ok = amqp_channel:call(Chan, #'basic.ack'{ delivery_tag = AckTag })
+ after ?TIMEOUT -> throw(timeout_waiting_for_deliver1)
+ end,
+
+ [{test_shovel, static, {running, _Info}, _Time}] =
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_shovel_status, status, []),
+
+ receive
+ {#'basic.deliver' { consumer_tag = CTag, delivery_tag = AckTag1,
+ routing_key = ?TO_SHOVEL },
+ #amqp_msg { payload = <<42>>,
+ props = #'P_basic' { delivery_mode = 2,
+ content_type = ?UNSHOVELLED }
+ }} ->
+ ok = amqp_channel:call(Chan, #'basic.ack'{ delivery_tag = AckTag1 })
+ after ?TIMEOUT -> throw(timeout_waiting_for_deliver2)
+ end,
+
+ rabbit_ct_client_helpers:close_channel(Chan).
+
+setup_legacy_shovels(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_legacy_shovels1, [Config]).
+
+setup_shovels(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_shovels1, [Config]).
+
+setup_legacy_shovels1(Config) ->
+ _ = application:stop(rabbitmq_shovel),
+ Hostname = ?config(rmq_hostname, Config),
+ TcpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0,
+ tcp_port_amqp),
+ %% a working config
+ application:set_env(
+ rabbitmq_shovel,
+ shovels,
+ [{test_shovel,
+ [{sources,
+ [{broker, rabbit_misc:format("amqp://~s:~b/%2f?heartbeat=5",
+ [Hostname, TcpPort])},
+ {declarations,
+ [{'queue.declare', [exclusive, auto_delete]},
+ {'exchange.declare', [{exchange, ?EXCHANGE}, auto_delete]},
+ {'queue.bind', [{queue, <<>>}, {exchange, ?EXCHANGE},
+ {routing_key, ?TO_SHOVEL}]}
+ ]}]},
+ {destinations,
+ [{broker, rabbit_misc:format("amqp://~s:~b/%2f",
+ [Hostname, TcpPort])}]},
+ {queue, <<>>},
+ {ack_mode, on_confirm},
+ {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]},
+ {publish_properties, [{delivery_mode, 2},
+ {cluster_id, <<"my-cluster">>},
+ {content_type, ?SHOVELLED}]},
+ {add_forward_headers, true},
+ {add_timestamp_header, true}
+ ]}],
+ infinity),
+
+ ok = application:start(rabbitmq_shovel),
+ await_running_shovel(test_shovel).
+
+setup_shovels1(Config) ->
+ _ = application:stop(rabbitmq_shovel),
+ Hostname = ?config(rmq_hostname, Config),
+ TcpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0,
+ tcp_port_amqp),
+ %% a working config
+ application:set_env(
+ rabbitmq_shovel,
+ shovels,
+ [{test_shovel,
+ [{source,
+ [{uris, [rabbit_misc:format("amqp://~s:~b/%2f?heartbeat=5",
+ [Hostname, TcpPort])]},
+ {declarations,
+ [{'queue.declare', [exclusive, auto_delete]},
+ {'exchange.declare', [{exchange, ?EXCHANGE}, auto_delete]},
+ {'queue.bind', [{queue, <<>>}, {exchange, ?EXCHANGE},
+ {routing_key, ?TO_SHOVEL}]}]},
+ {queue, <<>>}]},
+ {destination,
+ [{uris, [rabbit_misc:format("amqp://~s:~b/%2f",
+ [Hostname, TcpPort])]},
+ {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]},
+ {publish_properties, [{delivery_mode, 2},
+ {cluster_id, <<"my-cluster">>},
+ {content_type, ?SHOVELLED}]},
+ {add_forward_headers, true},
+ {add_timestamp_header, true}]},
+ {ack_mode, on_confirm}]}],
+ infinity),
+
+ ok = application:start(rabbitmq_shovel),
+ await_running_shovel(test_shovel).
+
+await_running_shovel(Name) ->
+ case [N || {N, _, {running, _}, _}
+ <- rabbit_shovel_status:status(),
+ N =:= Name] of
+ [_] -> ok;
+ _ -> timer:sleep(100),
+ await_running_shovel(Name)
+ end.
diff --git a/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl b/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl
new file mode 100644
index 0000000000..6af7a39c65
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/delete_shovel_command_SUITE.erl
@@ -0,0 +1,78 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(delete_shovel_command_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(CMD, 'Elixir.RabbitMQ.CLI.Ctl.Commands.DeleteShovelCommand').
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ delete_not_found,
+ delete
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Config2.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+delete_not_found(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A, vhost => <<"/">>},
+ {error, _} = ?CMD:run([<<"myshovel">>], Opts).
+
+delete(Config) ->
+ shovel_test_utils:set_param(
+ Config,
+ <<"myshovel">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A, vhost => <<"/">>},
+ ok = ?CMD:run([<<"myshovel">>], Opts),
+ [] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_shovel_status,
+ status, []).
diff --git a/deps/rabbitmq_shovel/test/dynamic_SUITE.erl b/deps/rabbitmq_shovel/test/dynamic_SUITE.erl
new file mode 100644
index 0000000000..5311af3c6d
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/dynamic_SUITE.erl
@@ -0,0 +1,494 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(dynamic_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ simple,
+ set_properties_using_proplist,
+ set_properties_using_map,
+ set_empty_properties_using_proplist,
+ set_empty_properties_using_map,
+ headers,
+ exchange,
+ restart,
+ change_definition,
+ autodelete,
+ validation,
+ security_validation,
+ get_connection_name
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ shovel_test_utils:set_param(
+ Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
+ end).
+
+set_properties_using_map(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Ps = [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"publish-properties">>, #{<<"cluster_id">> => <<"x">>}}],
+ shovel_test_utils:set_param(Config, <<"test">>, Ps),
+ #amqp_msg{props = #'P_basic'{cluster_id = Cluster}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi">>),
+ <<"x">> = Cluster
+ end).
+
+set_properties_using_proplist(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Ps = [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"publish-properties">>, [{<<"cluster_id">>, <<"x">>}]}],
+ shovel_test_utils:set_param(Config, <<"test">>, Ps),
+ #amqp_msg{props = #'P_basic'{cluster_id = Cluster}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi">>),
+ <<"x">> = Cluster
+ end).
+
+set_empty_properties_using_map(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Ps = [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"publish-properties">>, #{}}],
+ shovel_test_utils:set_param(Config, <<"test">>, Ps),
+ #amqp_msg{props = #'P_basic'{}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi">>)
+ end).
+
+set_empty_properties_using_proplist(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ Ps = [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"publish-properties">>, []}],
+ shovel_test_utils:set_param(Config, <<"test">>, Ps),
+ #amqp_msg{props = #'P_basic'{}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi">>)
+ end).
+
+headers(Config) ->
+ with_ch(Config,
+ fun(Ch) ->
+ %% No headers by default
+ shovel_test_utils:set_param(Config,
+ <<"test">>,
+ [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ #amqp_msg{props = #'P_basic'{headers = undefined}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi1">>),
+
+ shovel_test_utils:set_param(Config,
+ <<"test">>,
+ [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"add-forward-headers">>, true},
+ {<<"add-timestamp-header">>, true}]),
+ Timestmp = os:system_time(seconds),
+ #amqp_msg{props = #'P_basic'{headers = Headers}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi2">>),
+ [{<<"x-shovelled">>, _, [{table, ShovelledHeader}]},
+ {<<"x-shovelled-timestamp">>, long, TS}] = Headers,
+ %% We assume that the message was shovelled within a 2 second
+ %% window.
+ true = TS >= Timestmp andalso TS =< Timestmp + 2,
+ {<<"shovel-type">>, _, <<"dynamic">>} =
+ lists:keyfind(<<"shovel-type">>, 1, ShovelledHeader),
+ {<<"shovel-vhost">>, _, <<"/">>} =
+ lists:keyfind(<<"shovel-vhost">>, 1, ShovelledHeader),
+ {<<"shovel-name">>, _, <<"test">>} =
+ lists:keyfind(<<"shovel-name">>, 1, ShovelledHeader),
+
+ shovel_test_utils:set_param(Config,
+ <<"test">>,
+ [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"add-timestamp-header">>, true}]),
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-shovelled-timestamp">>,
+ long, _}]}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi3">>),
+
+ shovel_test_utils:set_param(Config,
+ <<"test">>,
+ [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"add-forward-headers">>, true}]),
+ #amqp_msg{props = #'P_basic'{headers = [{<<"x-shovelled">>,
+ _, _}]}} =
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi4">>)
+
+ end).
+
+exchange(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"queue">>,
+ durable = true}),
+ amqp_channel:call(
+ Ch, #'queue.bind'{queue = <<"queue">>,
+ exchange = <<"amq.topic">>,
+ routing_key = <<"test-key">>}),
+ shovel_test_utils:set_param(Config,
+ <<"test">>, [{<<"src-exchange">>, <<"amq.direct">>},
+ {<<"src-exchange-key">>,<<"test-key">>},
+ {<<"dest-exchange">>, <<"amq.topic">>}]),
+ publish_expect(Ch, <<"amq.direct">>, <<"test-key">>,
+ <<"queue">>, <<"hello">>),
+ shovel_test_utils:set_param(Config,
+ <<"test">>, [{<<"src-exchange">>, <<"amq.direct">>},
+ {<<"src-exchange-key">>, <<"test-key">>},
+ {<<"dest-exchange">>, <<"amq.topic">>},
+ {<<"dest-exchange-key">>,<<"new-key">>}]),
+ publish(Ch, <<"amq.direct">>, <<"test-key">>, <<"hello">>),
+ expect_empty(Ch, <<"queue">>),
+ amqp_channel:call(
+ Ch, #'queue.bind'{queue = <<"queue">>,
+ exchange = <<"amq.topic">>,
+ routing_key = <<"new-key">>}),
+ publish_expect(Ch, <<"amq.direct">>, <<"test-key">>,
+ <<"queue">>, <<"hello">>)
+ end).
+
+restart(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ shovel_test_utils:set_param(Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ %% The catch is because connections link to the shovel,
+ %% so one connection will die, kill the shovel, kill
+ %% the other connection, then we can't close it
+ Conns = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_direct, list, []),
+ [catch amqp_connection:close(C) || C <- Conns],
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
+ end).
+
+change_definition(Config) ->
+ with_ch(Config,
+ fun (Ch) ->
+ shovel_test_utils:set_param(
+ Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>),
+ shovel_test_utils:set_param(
+ Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest2">>}]),
+ publish_expect(Ch, <<>>, <<"src">>, <<"dest2">>, <<"hello">>),
+ expect_empty(Ch, <<"dest">>),
+ shovel_test_utils:clear_param(Config, <<"test">>),
+ publish_expect(Ch, <<>>, <<"src">>, <<"src">>, <<"hello">>),
+ expect_empty(Ch, <<"dest">>),
+ expect_empty(Ch, <<"dest2">>)
+ end).
+
+autodelete(Config) ->
+ autodelete_case(Config, {<<"on-confirm">>, 50, 50, 50}),
+ autodelete_case(Config, {<<"on-confirm">>, <<"queue-length">>, 0, 100}),
+ autodelete_case(Config, {<<"on-publish">>, <<"queue-length">>, 0, 100}),
+ autodelete_case(Config, {<<"on-publish">>, 50, 50, 50}),
+ %% no-ack is not compatible with explicit count
+ autodelete_case(Config, {<<"no-ack">>, <<"queue-length">>, 0, 100}),
+ ok.
+
+autodelete_case(Config, Args) ->
+ with_ch(Config, autodelete_do(Config, Args)).
+
+autodelete_do(Config, {AckMode, After, ExpSrc, ExpDest}) ->
+ fun (Ch) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = <<"src">>}),
+ publish_count(Ch, <<>>, <<"src">>, <<"hello">>, 100),
+ amqp_channel:wait_for_confirms(Ch),
+ shovel_test_utils:set_param_nowait(
+ Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>},
+ {<<"src-prefetch-count">>, 50},
+ {<<"ack-mode">>, AckMode},
+ {<<"src-delete-after">>, After}]),
+ await_autodelete(Config, <<"test">>),
+ expect_count(Ch, <<"dest">>, <<"hello">>, ExpDest),
+ expect_count(Ch, <<"src">>, <<"hello">>, ExpSrc)
+ end.
+
+validation(Config) ->
+ URIs = [{<<"src-uri">>, <<"amqp://">>},
+ {<<"dest-uri">>, <<"amqp://">>}],
+
+ %% Need valid src and dest URIs
+ invalid_param(Config, []),
+ invalid_param(Config,
+ [{<<"src-queue">>, <<"test">>},
+ {<<"src-uri">>, <<"derp">>},
+ {<<"dest-uri">>, <<"amqp://">>}]),
+ invalid_param(Config,
+ [{<<"src-queue">>, <<"test">>},
+ {<<"src-uri">>, [<<"derp">>]},
+ {<<"dest-uri">>, <<"amqp://">>}]),
+ invalid_param(Config,
+ [{<<"src-queue">>, <<"test">>},
+ {<<"dest-uri">>, <<"amqp://">>}]),
+
+ %% Also need src exchange or queue
+ invalid_param(Config,
+ URIs),
+ valid_param(Config,
+ [{<<"src-exchange">>, <<"test">>} | URIs]),
+ QURIs = [{<<"src-queue">>, <<"test">>} | URIs],
+ valid_param(Config, QURIs),
+
+ %% But not both
+ invalid_param(Config,
+ [{<<"src-exchange">>, <<"test">>} | QURIs]),
+
+ %% Check these are of right type
+ invalid_param(Config,
+ [{<<"prefetch-count">>, <<"three">>} | QURIs]),
+ invalid_param(Config,
+ [{<<"reconnect-delay">>, <<"three">>} | QURIs]),
+ invalid_param(Config,
+ [{<<"ack-mode">>, <<"whenever">>} | QURIs]),
+ invalid_param(Config,
+ [{<<"src-delete-after">>, <<"whenever">>} | QURIs]),
+
+ %% Check properties have to look property-ish
+ valid_param(Config,
+ [{<<"src-exchange">>, <<"test">>},
+ {<<"publish-properties">>, [{<<"cluster_id">>, <<"rabbit@localhost">>},
+ {<<"delivery_mode">>, 2}]}
+ | URIs]),
+ valid_param(Config,
+ #{<<"publish-properties">> => #{<<"cluster_id">> => <<"rabbit@localhost">>,
+ <<"delivery_mode">> => 2},
+ <<"src-exchange">> => <<"test">>,
+ <<"src-uri">> => <<"amqp://">>,
+ <<"dest-uri">> => <<"amqp://">>}),
+ invalid_param(Config,
+ [{<<"publish-properties">>, [{<<"nonexistent">>, <<>>}]}]),
+ invalid_param(Config,
+ #{<<"publish-properties">> => #{<<"nonexistent">> => <<>>}}),
+ invalid_param(Config,
+ [{<<"publish-properties">>, [{<<"cluster_id">>, 2}]}]),
+ invalid_param(Config,
+ [{<<"publish-properties">>, <<"something">>}]),
+
+ %% Can't use explicit message count and no-ack together
+ invalid_param(Config,
+ [{<<"src-delete-after">>, 1},
+ {<<"ack-mode">>, <<"no-ack">>} | QURIs]),
+ %% superseded by src-delete-after
+ invalid_param(Config,
+ [{<<"delete-after">>, 1},
+ {<<"ack-mode">>, <<"no-ack">>} | QURIs]),
+ ok.
+
+security_validation(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, security_validation_add_user, []),
+
+ Qs = [{<<"src-queue">>, <<"test">>},
+ {<<"dest-queue">>, <<"test2">>}],
+
+ A = lookup_user(Config, <<"a">>),
+ valid_param(Config, [{<<"src-uri">>, <<"amqp://localhost:5672/a">>},
+ {<<"dest-uri">>, <<"amqp://localhost:5672/b">>} | Qs], A),
+ %% src-uri and dest-uri are not valid URIs
+ invalid_param(Config,
+ [{<<"src-uri">>, <<"an arbitrary string">>},
+ {<<"dest-uri">>, <<"\o/ \o/ \o/">>} | Qs], A),
+ %% missing src-queue and dest-queue
+ invalid_param(Config,
+ [{<<"src-uri">>, <<"amqp://localhost/a">>},
+ {<<"dest-uri">>, <<"amqp://localhost/b">>}], A),
+
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, security_validation_remove_user, []),
+ ok.
+
+security_validation_add_user() ->
+ [begin
+ rabbit_vhost:add(U, <<"acting-user">>),
+ rabbit_auth_backend_internal:add_user(U, <<>>, <<"acting-user">>),
+ rabbit_auth_backend_internal:set_permissions(
+ U, U, <<".*">>, <<".*">>, <<".*">>, <<"acting-user">>)
+ end || U <- [<<"a">>, <<"b">>]],
+ ok.
+
+security_validation_remove_user() ->
+ [begin
+ rabbit_vhost:delete(U, <<"acting-user">>),
+ rabbit_auth_backend_internal:delete_user(U, <<"acting-user">>)
+ end || U <- [<<"a">>, <<"b">>]],
+ ok.
+
+get_connection_name(_Config) ->
+ <<"Shovel static_shovel_name_as_atom">> = rabbit_shovel_worker:get_connection_name(static_shovel_name_as_atom),
+ <<"Shovel dynamic_shovel_name_as_binary">> = rabbit_shovel_worker:get_connection_name({<<"/">>, <<"dynamic_shovel_name_as_binary">>}),
+ <<"Shovel">> = rabbit_shovel_worker:get_connection_name({<<"/">>, {unexpected, tuple}}),
+ <<"Shovel">> = rabbit_shovel_worker:get_connection_name({one, two, three}),
+ <<"Shovel">> = rabbit_shovel_worker:get_connection_name(<<"anything else">>).
+
+
+%%----------------------------------------------------------------------------
+
+with_ch(Config, Fun) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Fun(Ch),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ cleanup(Config),
+ ok.
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+ publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+ amqp_channel:cast(Ch, #'basic.publish'{exchange = X,
+ routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+ publish(Ch, X, Key, Payload),
+ expect(Ch, Q, Payload).
+
+expect(Ch, Q, Payload) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = true}, self()),
+ CTag = receive
+ #'basic.consume_ok'{consumer_tag = CT} -> CT
+ end,
+ Msg = receive
+ {#'basic.deliver'{}, #amqp_msg{payload = Payload} = M} ->
+ M
+ after 4000 ->
+ exit({not_received, Payload})
+ end,
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+ Msg.
+
+expect_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{ queue = Q }).
+
+publish_count(Ch, X, Key, M, Count) ->
+ [begin
+
+ publish(Ch, X, Key, M)
+ end || _ <- lists:seq(1, Count)].
+
+expect_count(Ch, Q, M, Count) ->
+ [begin
+ expect(Ch, Q, M)
+ end || _ <- lists:seq(1, Count)],
+ expect_empty(Ch, Q).
+
+invalid_param(Config, Value, User) ->
+ {error_string, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_runtime_parameters, set,
+ [<<"/">>, <<"shovel">>, <<"invalid">>, Value, User]).
+
+valid_param(Config, Value, User) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, valid_param1, [Config, Value, User]).
+
+valid_param1(_Config, Value, User) ->
+ ok = rabbit_runtime_parameters:set(
+ <<"/">>, <<"shovel">>, <<"name">>, Value, User),
+ ok = rabbit_runtime_parameters:clear(<<"/">>, <<"shovel">>, <<"name">>, <<"acting-user">>).
+
+invalid_param(Config, Value) -> invalid_param(Config, Value, none).
+valid_param(Config, Value) -> valid_param(Config, Value, none).
+
+lookup_user(Config, Name) ->
+ {ok, User} = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_access_control, check_user_login, [Name, []]),
+ User.
+
+cleanup(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, cleanup1, [Config]).
+
+cleanup1(_Config) ->
+ [rabbit_runtime_parameters:clear(rabbit_misc:pget(vhost, P),
+ rabbit_misc:pget(component, P),
+ rabbit_misc:pget(name, P),
+ <<"acting-user">>) ||
+ P <- rabbit_runtime_parameters:list()],
+ [rabbit_amqqueue:delete(Q, false, false, <<"acting-user">>)
+ || Q <- rabbit_amqqueue:list()].
+
+await_autodelete(Config, Name) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, await_autodelete1, [Config, Name]).
+
+await_autodelete1(_Config, Name) ->
+ shovel_test_utils:await(
+ fun () -> not lists:member(Name, shovels_from_parameters()) end),
+ shovel_test_utils:await(
+ fun () ->
+ not lists:member(Name,
+ shovel_test_utils:shovels_from_status())
+ end).
+
+shovels_from_parameters() ->
+ L = rabbit_runtime_parameters:list(<<"/">>, <<"shovel">>),
+ [rabbit_misc:pget(name, Shovel) || Shovel <- L].
diff --git a/deps/rabbitmq_shovel/test/parameters_SUITE.erl b/deps/rabbitmq_shovel/test/parameters_SUITE.erl
new file mode 100644
index 0000000000..516b1bd190
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/parameters_SUITE.erl
@@ -0,0 +1,350 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(parameters_SUITE).
+
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-define(EXCHANGE, <<"test_exchange">>).
+-define(TO_SHOVEL, <<"to_the_shovel">>).
+-define(FROM_SHOVEL, <<"from_the_shovel">>).
+-define(UNSHOVELLED, <<"unshovelled">>).
+-define(SHOVELLED, <<"shovelled">>).
+-define(TIMEOUT, 1000).
+
+all() ->
+ [
+ {group, tests}
+ ].
+
+groups() ->
+ [
+ {tests, [parallel], [
+ parse_amqp091_maps,
+ parse_amqp091_proplists,
+ parse_amqp091_empty_maps,
+ parse_amqp091_empty_proplists,
+ parse_amqp10,
+ parse_amqp10_minimal,
+ validate_amqp10,
+ validate_amqp10_with_a_map
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(_Testcase, Config) -> Config.
+
+end_per_testcase(_Testcase, Config) -> Config.
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+parse_amqp091_maps(_Config) ->
+ Params =
+ [{<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-protocol">>, <<"amqp091">>},
+ {<<"dest-protocol">>, <<"amqp091">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"add-forward-headers">>, true},
+ {<<"add-timestamp-header">>, true},
+ {<<"publish-properties">>, #{<<"cluster_id">> => <<"x">>,
+ <<"delivery_mode">> => 2}},
+ {<<"ack-mode">>, <<"on-publish">>},
+ {<<"src-delete-after">>, <<"queue-length">>},
+ {<<"prefetch-count">>, 30},
+ {<<"reconnect-delay">>, 1001},
+ {<<"src-queue">>, <<"a-src-queue">>},
+ {<<"dest-queue">>, <<"a-dest-queue">>}
+ ],
+
+ test_parse_amqp091(Params).
+
+parse_amqp091_proplists(_Config) ->
+ Params =
+ [{<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-protocol">>, <<"amqp091">>},
+ {<<"dest-protocol">>, <<"amqp091">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"dest-add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"dest-publish-properties">>, [{<<"cluster_id">>, <<"x">>},
+ {<<"delivery_mode">>, 2}]},
+ {<<"ack-mode">>, <<"on-publish">>},
+ {<<"src-delete-after">>, <<"queue-length">>},
+ {<<"src-prefetch-count">>, 30},
+ {<<"reconnect-delay">>, 1001},
+ {<<"src-queue">>, <<"a-src-queue">>},
+ {<<"dest-queue">>, <<"a-dest-queue">>}
+ ],
+ test_parse_amqp091(Params).
+
+parse_amqp091_empty_maps(_Config) ->
+ Params =
+ [{<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-protocol">>, <<"amqp091">>},
+ {<<"dest-protocol">>, <<"amqp091">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"dest-add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"dest-publish-properties">>, #{}},
+ {<<"ack-mode">>, <<"on-publish">>},
+ {<<"src-delete-after">>, <<"queue-length">>},
+ {<<"src-prefetch-count">>, 30},
+ {<<"reconnect-delay">>, 1001},
+ {<<"src-queue">>, <<"a-src-queue">>},
+ {<<"dest-queue">>, <<"a-dest-queue">>}
+ ],
+ test_parse_amqp091_with_blank_proprties(Params).
+
+parse_amqp091_empty_proplists(_Config) ->
+ Params =
+ [{<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-protocol">>, <<"amqp091">>},
+ {<<"dest-protocol">>, <<"amqp091">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"dest-add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"dest-publish-properties">>, []},
+ {<<"ack-mode">>, <<"on-publish">>},
+ {<<"src-delete-after">>, <<"queue-length">>},
+ {<<"src-prefetch-count">>, 30},
+ {<<"reconnect-delay">>, 1001},
+ {<<"src-queue">>, <<"a-src-queue">>},
+ {<<"dest-queue">>, <<"a-dest-queue">>}
+ ],
+ test_parse_amqp091_with_blank_proprties(Params).
+
+
+test_parse_amqp091(Params) ->
+ {ok, Result} = rabbit_shovel_parameters:parse({"vhost", "name"},
+ "my-cluster", Params),
+ #{ack_mode := on_publish,
+ name := "name",
+ reconnect_delay := 1001,
+ dest := #{module := rabbit_amqp091_shovel,
+ uris := ["amqp://remotehost:5672"],
+ props_fun := PropsFun
+ },
+ source := #{module := rabbit_amqp091_shovel,
+ uris := ["amqp://localhost:5672"],
+ prefetch_count := 30,
+ queue := <<"a-src-queue">>,
+ delete_after := 'queue-length'}
+ } = Result,
+
+ #'P_basic'{headers = ActualHeaders,
+ delivery_mode = 2,
+ cluster_id = <<"x">>} = PropsFun("amqp://localhost:5672",
+ "amqp://remotehost:5672",
+ #'P_basic'{headers = undefined}),
+ assert_amqp901_headers(ActualHeaders),
+ ok.
+
+test_parse_amqp091_with_blank_proprties(Params) ->
+ {ok, Result} = rabbit_shovel_parameters:parse({"vhost", "name"},
+ "my-cluster", Params),
+ #{ack_mode := on_publish,
+ name := "name",
+ reconnect_delay := 1001,
+ dest := #{module := rabbit_amqp091_shovel,
+ uris := ["amqp://remotehost:5672"],
+ props_fun := PropsFun
+ },
+ source := #{module := rabbit_amqp091_shovel,
+ uris := ["amqp://localhost:5672"],
+ prefetch_count := 30,
+ queue := <<"a-src-queue">>,
+ delete_after := 'queue-length'}
+ } = Result,
+
+ #'P_basic'{headers = ActualHeaders} = PropsFun("amqp://localhost:5672",
+ "amqp://remotehost:5672",
+ #'P_basic'{headers = undefined}),
+ assert_amqp901_headers(ActualHeaders),
+ ok.
+
+assert_amqp901_headers(ActualHeaders) ->
+ {_, array, [{table, Shovelled}]} = lists:keyfind(<<"x-shovelled">>, 1, ActualHeaders),
+ {_, long, _} = lists:keyfind(<<"x-shovelled-timestamp">>, 1, ActualHeaders),
+
+ ExpectedHeaders =
+ [{<<"shovelled-by">>, "my-cluster"},
+ {<<"shovel-type">>, <<"dynamic">>},
+ {<<"shovel-name">>, "name"},
+ {<<"shovel-vhost">>, "vhost"},
+ {<<"src-uri">>,"amqp://localhost:5672"},
+ {<<"dest-uri">>,"amqp://remotehost:5672"},
+ {<<"src-queue">>,<<"a-src-queue">>},
+ {<<"dest-queue">>,<<"a-dest-queue">>}],
+ lists:foreach(fun({K, V}) ->
+ ?assertMatch({K, _, V},
+ lists:keyfind(K, 1, Shovelled))
+ end, ExpectedHeaders),
+ ok.
+
+parse_amqp10(_Config) ->
+ Params =
+ [
+ {<<"ack-mode">>, <<"on-publish">>},
+ {<<"reconnect-delay">>, 1001},
+
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-address">>, <<"a-src-queue">>},
+ {<<"src-delete-after">>, <<"never">>},
+ {<<"src-prefetch-count">>, 30},
+
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"dest-address">>, <<"a-dest-queue">>},
+ {<<"dest-add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"dest-application-properties">>, [{<<"some-app-prop">>,
+ <<"app-prop-value">>}]},
+ {<<"dest-message-annotations">>, [{<<"some-message-ann">>,
+ <<"message-ann-value">>}]},
+ {<<"dest-properties">>, [{<<"user_id">>, <<"some-user">>}]}
+ ],
+
+ ?assertMatch(
+ {ok, #{name := "my_shovel",
+ ack_mode := on_publish,
+ source := #{module := rabbit_amqp10_shovel,
+ uris := ["amqp://localhost:5672"],
+ delete_after := never,
+ prefetch_count := 30,
+ source_address := <<"a-src-queue">>
+ },
+ dest := #{module := rabbit_amqp10_shovel,
+ uris := ["amqp://remotehost:5672"],
+ target_address := <<"a-dest-queue">>,
+ message_annotations := #{<<"some-message-ann">> :=
+ <<"message-ann-value">>},
+ application_properties := #{<<"some-app-prop">> :=
+ <<"app-prop-value">>},
+ properties := #{user_id := <<"some-user">>},
+ add_timestamp_header := true,
+ add_forward_headers := true
+ }
+ }},
+ rabbit_shovel_parameters:parse({"vhost", "my_shovel"}, "my-cluster",
+ Params)),
+ ok.
+
+parse_amqp10_minimal(_Config) ->
+ Params =
+ [
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-address">>, <<"a-src-queue">>},
+
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"dest-address">>, <<"a-dest-queue">>}
+ ],
+ ?assertMatch(
+ {ok, #{name := "my_shovel",
+ ack_mode := on_confirm,
+ source := #{module := rabbit_amqp10_shovel,
+ uris := ["amqp://localhost:5672"],
+ delete_after := never,
+ source_address := <<"a-src-queue">>
+ },
+ dest := #{module := rabbit_amqp10_shovel,
+ uris := ["amqp://remotehost:5672"],
+ unacked := #{},
+ target_address := <<"a-dest-queue">>
+ }
+ }},
+ rabbit_shovel_parameters:parse({"vhost", "my_shovel"}, "my-cluster",
+ Params)),
+ ok.
+
+validate_amqp10(_Config) ->
+ Params =
+ [
+ {<<"ack-mode">>, <<"on-publish">>},
+ {<<"reconnect-delay">>, 1001},
+
+ {<<"src-protocol">>, <<"amqp10">>},
+ {<<"src-uri">>, <<"amqp://localhost:5672">>},
+ {<<"src-address">>, <<"a-src-queue">>},
+ {<<"src-delete-after">>, <<"never">>},
+ {<<"src-prefetch-count">>, 30},
+
+ {<<"dest-protocol">>, <<"amqp10">>},
+ {<<"dest-uri">>, <<"amqp://remotehost:5672">>},
+ {<<"dest-address">>, <<"a-dest-queue">>},
+ {<<"dest-add-forward-headers">>, true},
+ {<<"dest-add-timestamp-header">>, true},
+ {<<"dest-application-properties">>, [{<<"some-app-prop">>,
+ <<"app-prop-value">>}]},
+ {<<"dest-message-annotations">>, [{<<"some-message-ann">>,
+ <<"message-ann-value">>}]},
+ {<<"dest-properties">>, [{<<"user_id">>, <<"some-user">>}]}
+ ],
+
+ Res = rabbit_shovel_parameters:validate("my-vhost", <<"shovel">>,
+ "my-shovel", Params, none),
+ [] = validate_ok(Res),
+ ok.
+
+validate_amqp10_with_a_map(_Config) ->
+ Params =
+ #{
+ <<"ack-mode">> => <<"on-publish">>,
+ <<"reconnect-delay">> => 1001,
+
+ <<"src-protocol">> => <<"amqp10">>,
+ <<"src-uri">> => <<"amqp://localhost:5672">>,
+ <<"src-address">> => <<"a-src-queue">>,
+ <<"src-delete-after">> => <<"never">>,
+ <<"src-prefetch-count">> => 30,
+
+ <<"dest-protocol">> => <<"amqp10">>,
+ <<"dest-uri">> => <<"amqp://remotehost:5672">>,
+ <<"dest-address">> => <<"a-dest-queue">>,
+ <<"dest-add-forward-headers">> => true,
+ <<"dest-add-timestamp-header">> => true,
+ <<"dest-application-properties">> => [{<<"some-app-prop">>,
+ <<"app-prop-value">>}],
+ <<"dest-message-annotations">> => [{<<"some-message-ann">>, <<"message-ann-value">>}],
+ <<"dest-properties">> => #{<<"user_id">> => <<"some-user">>}
+ },
+
+ Res = rabbit_shovel_parameters:validate("my-vhost", <<"shovel">>,
+ "my-shovel", Params, none),
+ [] = validate_ok(Res),
+ ok.
+
+validate_ok([ok | T]) ->
+ validate_ok(T);
+validate_ok([[_|_] = L | T]) ->
+ validate_ok(L) ++ validate_ok(T);
+validate_ok([]) -> [];
+validate_ok(X) ->
+ exit({not_ok, X}).
diff --git a/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl b/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl
new file mode 100644
index 0000000000..667822b20c
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/shovel_status_command_SUITE.erl
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(shovel_status_command_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(CMD, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ShovelStatusCommand').
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ run_not_started,
+ output_not_started,
+ run_starting,
+ output_starting,
+ run_running,
+ output_running
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()),
+ Config2.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+run_not_started(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, []} = ?CMD:run([], Opts).
+
+output_not_started(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, []} = ?CMD:output({stream, []}, Opts).
+
+run_starting(Config) ->
+ shovel_test_utils:set_param_nowait(
+ Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ case ?CMD:run([], Opts) of
+ {stream, [{{<<"/">>, <<"test">>}, dynamic, starting, _}]} ->
+ ok;
+ {stream, []} ->
+ throw(shovel_not_found);
+ {stream, [{{<<"/">>, <<"test">>}, dynamic, {running, _}, _}]} ->
+ ct:pal("Shovel is already running, starting could not be tested!")
+ end,
+ shovel_test_utils:clear_param(Config, <<"test">>).
+
+output_starting(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, [#{vhost := <<"/">>, name := <<"test">>, type := dynamic,
+ state := starting, last_changed := <<"2016-11-17 10:00:00">>}]}
+ = ?CMD:output({stream, [{{<<"/">>, <<"test">>}, dynamic, starting,
+ {{2016, 11, 17}, {10, 00, 00}}}]}, Opts),
+ shovel_test_utils:clear_param(Config, <<"test">>).
+
+run_running(Config) ->
+ shovel_test_utils:set_param(
+ Config,
+ <<"test">>, [{<<"src-queue">>, <<"src">>},
+ {<<"dest-queue">>, <<"dest">>}]),
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, [{{<<"/">>, <<"test">>}, dynamic, {running, _}, _}]}
+ = ?CMD:run([], Opts),
+ shovel_test_utils:clear_param(Config, <<"test">>).
+
+output_running(Config) ->
+ [A] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Opts = #{node => A},
+ {stream, [#{vhost := <<"/">>, name := <<"test">>, type := dynamic,
+ state := running, source := <<"amqp://server-1">>,
+ destination := <<"amqp://server-2">>,
+ termination_reason := <<>>,
+ last_changed := <<"2016-11-17 10:00:00">>}]} =
+ ?CMD:output({stream, [{{<<"/">>, <<"test">>}, dynamic,
+ {running, [{src_uri, <<"amqp://server-1">>},
+ {dest_uri, <<"amqp://server-2">>}]},
+ {{2016, 11, 17}, {10, 00, 00}}}]}, Opts),
+ shovel_test_utils:clear_param(Config, <<"test">>).
diff --git a/deps/rabbitmq_shovel/test/shovel_test_utils.erl b/deps/rabbitmq_shovel/test/shovel_test_utils.erl
new file mode 100644
index 0000000000..6e49ada8a1
--- /dev/null
+++ b/deps/rabbitmq_shovel/test/shovel_test_utils.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(shovel_test_utils).
+
+-include_lib("common_test/include/ct.hrl").
+-export([set_param/3, set_param_nowait/3, await_shovel/2, await_shovel1/2,
+ shovels_from_status/0, await/1, clear_param/2]).
+
+make_uri(Config) ->
+ Hostname = ?config(rmq_hostname, Config),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ list_to_binary(lists:flatten(io_lib:format("amqp://~s:~b",
+ [Hostname, Port]))).
+set_param(Config, Name, Value) ->
+ set_param_nowait(Config, Name, Value),
+ await_shovel(Config, Name).
+
+set_param_nowait(Config, Name, Value) ->
+ Uri = make_uri(Config),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_runtime_parameters, set, [
+ <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>, Uri},
+ {<<"dest-uri">>, [Uri]} |
+ Value], none]).
+
+await_shovel(Config, Name) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, await_shovel1, [Config, Name]).
+
+await_shovel1(_Config, Name) ->
+ await(fun () -> lists:member(Name, shovels_from_status()) end).
+
+shovels_from_status() ->
+ S = rabbit_shovel_status:status(),
+ [N || {{<<"/">>, N}, dynamic, {running, _}, _} <- S].
+
+await(Pred) ->
+ case Pred() of
+ true -> ok;
+ false -> timer:sleep(100),
+ await(Pred)
+ end.
+
+clear_param(Config, Name) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_runtime_parameters, clear, [<<"/">>, <<"shovel">>, Name, <<"acting-user">>]).
diff --git a/deps/rabbitmq_shovel_management/.gitignore b/deps/rabbitmq_shovel_management/.gitignore
new file mode 100644
index 0000000000..e909ef3689
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_shovel_management.d
diff --git a/deps/rabbitmq_shovel_management/.travis.yml b/deps/rabbitmq_shovel_management/.travis.yml
new file mode 100644
index 0000000000..7d9aa8ed13
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: arAUvph47vJz3TL1r/lhORnLZh5X1oYdcWXfXxO1ZMoFKOFPhp6WzinGZO7/OzGhl+x9ABZPrXl/qvTq++uHr9qMkYN4o3bYGcvCLvqAQs0EWVijKG2wOqVWfdh5nQ/VJuFqh9nfDKoZpIzjcEUUIFAsYf6/pbVikZOxT5E6fRM=
+ - secure: PyHQ66yOaZEUS94WUaOwLT5oo3/HJBIZQfODtvvdmP5piBNZV0uggXuTJAG+Ep0eGB6Y0kOJQfk/wzND3X+Nxxx4qHscJNrhlnrIAz1PxekAei8eSPlTM4FBxJLHuLZbZmNB7ruOETWBpO8VdrH09cKsT/EymsPs4MLzzHRDEz8=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md b/deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_shovel_management/CONTRIBUTING.md b/deps/rabbitmq_shovel_management/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_shovel_management/LICENSE b/deps/rabbitmq_shovel_management/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_shovel_management/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_shovel_management/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_shovel_management/Makefile b/deps/rabbitmq_shovel_management/Makefile
new file mode 100644
index 0000000000..0a8740dc6e
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_shovel_management
+PROJECT_DESCRIPTION = Management extension for the Shovel plugin
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit rabbitmq_management rabbitmq_shovel
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_amqp1_0
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_shovel_management/README.md b/deps/rabbitmq_shovel_management/README.md
new file mode 100644
index 0000000000..8e2bcfc8a4
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/README.md
@@ -0,0 +1,96 @@
+# RabbitMQ Shovel Management Plugin
+
+Adds information on shovel status to the management plugin. Build it
+like any other plugin.
+
+If you have a heterogenous cluster (where the nodes have different
+plugins installed), this should be installed on the same nodes as the
+management plugin.
+
+
+## Installing
+
+This plugin ships with RabbitMQ. Like all [plugins](https://www.rabbitmq.com/plugins.html), it must be enabled
+before it can be used:
+
+```
+rabbitmq-plugins enable rabbitmq_shovel_management
+```
+
+
+## Usage
+
+When the plugin is enabled, there will be a Shovel management
+link under the Admin tab.
+
+### HTTP API
+
+The HTTP API adds endpoints for listing, creating, and deleting shovels.
+
+#### `GET /api/shovels[/{vhost}]`
+Lists all shovels, optionally filtering by Virtual Host.
+
+**Example**
+
+```bash
+curl -u guest:guest -v http://localhost:15672/api/shovels/%2f
+```
+
+#### `PUT /api/parameters/shovel/{vhost}/{name}`
+Creates a shovel, passing in the configuration as JSON in the request body.
+
+**Example**
+
+Create a file called ``shovel.json`` similar to the following, replacing the parameter values as desired:
+```json
+{
+ "component": "shovel",
+ "name": "my-shovel",
+ "value": {
+ "ack-mode": "on-publish",
+ "add-forward-headers": false,
+ "delete-after": "never",
+ "dest-exchange": null,
+ "dest-queue": "dest",
+ "dest-uri": "amqp://",
+ "prefetch-count": 250,
+ "reconnect-delay": 30,
+ "src-queue": "source",
+ "src-uri": "amqp://"
+ },
+ "vhost": "/"
+}
+```
+
+Once created, post the file to the HTTP API:
+
+```bash
+curl -u guest:guest -v -X PUT -H 'Content-Type: application/json' -d @./shovel.json \
+ http://localhost:15672/api/parameters/shovel/%2F/my-shovel
+```
+*Note* Either `dest_queue` OR `dest_exchange` can be specified in the `value` stanza of the JSON, but not both.
+
+#### `GET /api/parameters/shovel/{vhost}/{name}`
+Shows the configurtion parameters for a shovel.
+
+**Example**
+
+```bash
+curl -u guest:guest -v http://localhost:15672/api/parameters/shovel/%2F/my-shovel
+```
+
+#### `DELETE /api/parameters/shovel/{vhost}/{name}`
+
+Deletes a shovel.
+
+**Example**
+
+```bash
+curl -u guest:guest -v -X DELETE http://localhost:15672/api/parameters/shovel/%2F/my-shovel
+```
+
+## License and Copyright
+
+Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html).
+
+2007-2018 (c) 2007-2020 VMware, Inc. or its affiliates.
diff --git a/deps/rabbitmq_shovel_management/erlang.mk b/deps/rabbitmq_shovel_management/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_shovel_management/priv/www/js/shovel.js b/deps/rabbitmq_shovel_management/priv/www/js/shovel.js
new file mode 100644
index 0000000000..a4f85cb988
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/priv/www/js/shovel.js
@@ -0,0 +1,195 @@
+dispatcher_add(function(sammy) {
+ sammy.get('#/shovels', function() {
+ render({'shovels': {path: '/shovels',
+ options: {vhost:true}}},
+ 'shovels', '#/shovels');
+ });
+ sammy.get('#/dynamic-shovels', function() {
+ render({'shovels': {path: '/parameters/shovel',
+ options:{vhost:true}},
+ 'vhosts': '/vhosts'},
+ 'dynamic-shovels', '#/dynamic-shovels');
+ });
+ sammy.get('#/dynamic-shovels/:vhost/:id', function() {
+ render({'shovel': '/parameters/shovel/' + esc(this.params['vhost']) + '/' + esc(this.params['id'])},
+ 'dynamic-shovel', '#/dynamic-shovels');
+ });
+ sammy.put('#/shovel-parameters-move-messages', function() {
+ var num_keys = ['src-prefetch-count', 'reconnect-delay'];
+ var bool_keys = ['dest-add-forward-headers'];
+ var arrayable_keys = ['src-uri', 'dest-uri'];
+ var redirect = this.params['redirect'];
+ if (redirect !== undefined) {
+ delete this.params['redirect'];
+ }
+ put_parameter(this, [], num_keys, bool_keys, arrayable_keys);
+ if (redirect !== undefined) {
+ go_to(redirect);
+ }
+ return false;
+ });
+ sammy.put('#/shovel-parameters', function() {
+ // patch up the protocol selectors
+ var src_proto = this.params['src-protocol-selector'];
+ this.params['src-protocol'] = src_proto.substr(0, src_proto.indexOf('-'));
+ var dest_proto = this.params['dest-protocol-selector'];
+ this.params['dest-protocol'] = dest_proto.substr(0, dest_proto.indexOf('-'));
+
+ //remove fields not required by the selected protocol
+ if (this.params['src-protocol'] == 'amqp10') {
+ remove_params_with(this, 'amqp091-src');
+ } else {
+ remove_params_with(this, 'amqp10-src');
+ }
+ if (this.params['dest-protocol'] == 'amqp10') {
+ remove_params_with(this, 'amqp091-dest');
+ } else {
+ remove_params_with(this, 'amqp10-dest');
+ }
+
+ var trimProtoPrefix = function (x) {
+ if(x.startsWith('amqp10-') || x.startsWith('amqp091-')) {
+ return x.substr(x.indexOf('-') + 1, x.length);
+ }
+ return x;
+ };
+
+ rekey_params(this, trimProtoPrefix);
+
+ var num_keys = ['src-prefetch-count', 'reconnect-delay'];
+
+ //copy the correct delete-after value
+ if (this.params['src-delete-after-selector'] == 'never') {
+ this.params['src-delete-after'] = 'never';
+ } else if (this.params['src-delete-after-selector'] == 'number') {
+ num_keys.push('src-delete-after');
+ }
+
+ delete this.params['src-delete-after-selector'];
+
+ var bool_keys = ['dest-add-forward-headers'];
+ var arrayable_keys = ['src-uri', 'dest-uri'];
+ var redirect = this.params['redirect'];
+ if (redirect !== undefined) {
+ delete this.params['redirect'];
+ }
+ put_parameter(this, [], num_keys, bool_keys, arrayable_keys);
+ if (redirect !== undefined) {
+ go_to(redirect);
+ }
+ return false;
+ });
+ sammy.del('#/shovel-parameters', function() {
+ if (sync_delete(this, '/shovels/vhost/:vhost/:name')) {
+ go_to('#/dynamic-shovels');
+ } else {
+ show_popup('warn', 'Shovel not deleted because it is not running on this node.');
+ return false;
+ }
+ });
+ sammy.del("#/shovel-restart-link", function(){
+ if (sync_delete(this, '/shovels/vhost/:vhost/:name/restart')) {
+ update();
+ } else {
+ show_popup('warn', 'Shovel not restarted because it is not running on this node.');
+ return false;
+ }
+ });
+});
+
+
+NAVIGATION['Admin'][0]['Shovel Status'] = ['#/shovels', "monitoring"];
+NAVIGATION['Admin'][0]['Shovel Management'] = ['#/dynamic-shovels', "policymaker"];
+
+HELP['shovel-uri'] =
+ 'Both source and destination can be either a local or remote broker. See the "URI examples" pane for examples of how to construct URIs. If connecting to a cluster, you can enter several URIs here separated by spaces.';
+
+HELP['shovel-amqp10-address'] =
+ 'The AMQP 1.0 address representing the source or target node.'
+
+HELP['shovel-queue-exchange'] =
+ 'You can set both source and destination as either a queue or an exchange. If you choose "queue", it will be declared beforehand; if you choose "exchange" it will not, but an appropriate binding and queue will be created when the source is an exchange.';
+
+HELP['shovel-prefetch'] =
+ 'Maximum number of unacknowledged messages that may be in flight over a shovel at one time. Defaults to 1000 if not set.';
+
+HELP['shovel-reconnect'] =
+ 'Time in seconds to wait after a shovel goes down before attempting reconnection. Defaults to 1 if not set.';
+
+HELP['shovel-forward-headers'] =
+ 'Whether to add headers to the shovelled messages indicating where they have been shovelled from and to. Defaults to false if not set.';
+
+HELP['shovel-ack-mode'] =
+ '<dl>\
+ <dt><code>on-confirm</code></dt>\
+ <dd>Messages are acknowledged at the source after they have been confirmed at the destination. Handles network errors and broker failures without losing messages. The slowest option, and the default.</dd>\
+ <dt><code>on-publish</code></dt>\
+ <dd>Messages are acknowledged at the source after they have been published at the destination. Handles network errors without losing messages, but may lose messages in the event of broker failures.</dd>\
+ <dt><code>no-ack</code></dt>\
+ <dd>Message acknowledgements are not used. The fastest option, but may lose messages in the event of network or broker failures.</dd>\
+</dl>';
+
+HELP['shovel-amqp091-auto-delete'] =
+ '<dl>\
+ <dt><code>Never</code></dt>\
+ <dd>The shovel never deletes itself; it will persist until it is explicitly removed.</dd>\
+ <dt><code>After initial length transferred</code></dt>\
+ <dd>The shovel will check the length of the queue when it starts up. It will transfer that many messages, and then delete itself.</dd>\
+</dl>';
+
+HELP['shovel-amqp10-auto-delete'] =
+ '<dl>\
+ <dt><code>Never</code></dt>\
+ <dd>The shovel never deletes itself; it will persist until it is explicitly removed.</dd>\
+ <dt><code>After num messages</code></dt>\
+ <dd>The shovel will delete itself after the given number of messages have been transferred.</dd>\
+</dl>';
+function remove_params_with(sammy, prefix) {
+ for (var i in sammy.params) {
+ if(i.startsWith(prefix)) {
+ delete sammy.params[i];
+ }
+ }
+}
+
+function rekey_params(sammy, func) {
+ for (var i in sammy.params) {
+ var k = func(i);
+ if(i != k) {
+ var v = sammy.params[i];
+ delete sammy.params[i];
+ sammy.params[k] = v;
+ }
+ }
+}
+function link_shovel(vhost, name) {
+ return _link_to(name, '#/dynamic-shovels/' + esc(vhost) + '/' + esc(name));
+}
+
+function fmt_shovel_endpoint(prefix, shovel) {
+ var txt = '';
+ if(shovel[prefix + "protocol"] == 'amqp10') {
+ txt += fmt_string(shovel[prefix + 'address']);
+ } else {
+ if (shovel[prefix + 'queue']) {
+ txt += fmt_string(shovel[prefix + 'queue']) + '<sub>queue</sub>';
+ } else {
+ if (shovel[prefix + 'exchange']) {
+ txt += fmt_string(shovel[prefix + 'exchange']);
+ } else {
+ txt += '<i>as published</i>';
+ }
+ if (shovel[prefix + 'exchange_key']) {
+ txt += ' : ' + fmt_string(shovel[prefix + 'exchange_key']);
+ }
+ txt += '<sub>exchange</sub>';
+ }
+ }
+
+ return txt;
+}
+
+function fallback_value(shovel, key1, key2) {
+ var v = shovel.value[key1];
+ return (v !== undefined ? v : shovel.value[key2]);
+}
diff --git a/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovel.ejs b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovel.ejs
new file mode 100644
index 0000000000..58607c4b0f
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovel.ejs
@@ -0,0 +1,57 @@
+<h1>Dynamic Shovel: <b><%= fmt_string(shovel.name) %></b><%= fmt_maybe_vhost(shovel.vhost) %></h1>
+
+<div class="section">
+ <h2>Overview</h2>
+ <div class="hider">
+ <table class="facts">
+ <tr>
+ <th>Source</th>
+ <td><%= fmt_string(fmt_uri_with_credentials(shovel.value['src-uri'])) %></td>
+ </tr>
+ <tr>
+ <th> </th>
+ <td><%= fmt_shovel_endpoint('src-', shovel.value) %></td>
+ </tr>
+ <tr>
+ <th>Destination</th>
+ <td><%= fmt_string(fmt_uri_with_credentials(shovel.value['dest-uri'])) %></td>
+ </tr>
+ <tr>
+ <th> </th>
+ <td><%= fmt_shovel_endpoint('dest-', shovel.value) %></td>
+ </tr>
+ <tr>
+ <th>Prefetch count</th>
+ <td><%= fmt_string(fallback_value(shovel, 'src-prefetch-count', 'prefetch-count')) %></td>
+ </tr>
+ <tr>
+ <th>Reconnect delay</th>
+ <td><%= fmt_time(shovel.value['reconnect-delay'], 's') %></td>
+ </tr>
+ <tr>
+ <th>Add headers</th>
+ <td><%= fmt_boolean(fallback_value(shovel, 'dest-add-forward-headers', 'add-forward-headers')) %></td>
+ </tr>
+ <tr>
+ <th>Ack mode</th>
+ <td><%= fmt_string(shovel.value['ack-mode']) %></td>
+ </tr>
+ <tr>
+ <th>Auto-delete</th>
+ <td><%= fmt_string(fallback_value(shovel, 'src-delete-after', 'delete-after')) %></td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Delete this shovel</h2>
+ <div class="hider">
+ <form action="#/shovel-parameters" method="delete" class="confirm">
+ <input type="hidden" name="component" value="shovel"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(shovel.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(shovel.name) %>"/>
+ <input type="submit" value="Delete this shovel"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs
new file mode 100644
index 0000000000..837674f062
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/dynamic-shovels.ejs
@@ -0,0 +1,365 @@
+<h1>Dynamic Shovels</h1>
+<div class="section">
+ <h2>Shovels</h2>
+ <div class="hider updatable">
+<% if (shovels.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>Name</th>
+ <th colspan="4">Source</th>
+ <th colspan="4">Destination</th>
+ <th>Reconnect Delay</th>
+ <th>Ack mode</th>
+ <th>Auto-delete</th>
+ </tr>
+ <tr>
+<% if (vhosts_interesting) { %>
+ <th colspan="2"></th>
+<% } %>
+<% if (!vhosts_interesting) { %>
+ <th colspan="1"></th>
+<% } %>
+ <th>Protocol</th>
+ <th>Uri</th>
+ <th>Endpoint</th>
+ <th>Prefetch</th>
+ <th>Protocol</th>
+ <th>Uri</th>
+ <th>Endpoint</th>
+ <th>Add headers</th>
+ <th colspan="3"></th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < shovels.length; i++) {
+ var shovel = shovels[i];
+%>
+ <tr<%= alt_rows(i)%>>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(shovel.vhost) %></td>
+<% } %>
+ <td><%= link_shovel(shovel.vhost, shovel.name) %></td>
+ <td><%= fmt_string(shovel.value['src-protocol']) %></td>
+ <td><%= fmt_shortened_uri(fmt_uri_with_credentials(shovel.value['src-uri'])) %></td>
+ <td><%= fmt_shovel_endpoint('src-', shovel.value) %></td>
+ <td><%= shovel.value['src-prefetch-count'] %></td>
+ <td><%= fmt_string(shovel.value['dest-protocol']) %></td>
+ <td><%= fmt_shortened_uri(fmt_uri_with_credentials(shovel.value['dest-uri'])) %></td>
+ <td><%= fmt_shovel_endpoint('dest-', shovel.value) %></td>
+ <td><%= fmt_boolean(fallback_value(shovel, 'dest-add-forward-headers', 'add-forward-headers')) %></td>
+ <td class="r"><%= fmt_time(shovel.value['reconnect-delay'], 's') %></td>
+ <td class="c"><%= fmt_string(shovel.value['ack-mode']) %></td>
+ <td><%= fmt_string(fallback_value(shovel, 'src-delete-after', 'delete-after')) %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no shovels ...</p>
+<% } %>
+ </div>
+</div>
+
+<div class="section-hidden">
+ <h2>Add a new shovel</h2>
+ <div class="hider">
+ <form action="#/shovel-parameters" method="put">
+ <input type="hidden" name="component" value="shovel"/>
+ <table class="form dynamic-shovels">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th>Source:</th>
+ <td>
+ <select name="src-protocol-selector" class="controls-appearance">
+ <option value="amqp091-src">AMQP 0.9.1</option>
+ <option value="amqp10-src">AMQP 1.0</option>
+ </select>
+ <div id="amqp10-src-div" style="display: none;">
+ <table class="subform">
+ <tr>
+ <td>
+ <label class="wide">
+ URI:
+ <span class="help" id="shovel-uri"></span>
+ </label>
+ </td>
+ <td>
+ <input type="text" name="amqp10-src-uri" value="amqp://localhost:5672"/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <label class="wide">
+ Address:
+ <span class="help" id="shovel-amqp10-address"></span>
+ </label>
+ </th>
+ <td><input type="text" name="amqp10-src-address"/></td>
+ </tr>
+ <tr>
+ <td>
+ <label class="wide">
+ Prefetch count:
+ <span class="help" id="shovel-prefetch"></span>
+ </label>
+ </td>
+ <td><input type="text" name="amqp10-src-prefetch-count"/></td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Auto-delete
+ <span class="help" id="shovel-amqp10-auto-delete"></span>
+ </label>
+ </td>
+ <td>
+ <select name="amqp10-src-delete-after-selector" class="controls-appearance">
+ <option value="never">Never</option>
+ <option value="number">After num messages</option>
+ </select>
+ </td>
+ <td>
+ <div id="number-div" style="display: none;">
+ <input type="text" name="amqp10-src-delete-after"/>
+ </div>
+ </td>
+ </tr>
+ </table>
+ </div>
+ <div id="amqp091-src-div">
+ <table class="subform">
+ <tr>
+ <td>
+ <label>
+ URI:
+ <span class="help" id="shovel-uri"></span>
+ </label>
+ </td>
+ <td>
+ <select name="queue-or-exchange" class="controls-appearance">
+ <option value="src-queue">Queue:</option>
+ <option value="src-exchange">Exchange:</option>
+ </select>
+ <span class="help" id="shovel-queue-exchange"></span>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <input type="text" name="src-uri" value="amqp://"/>
+ <span class="mand">*</span>
+ </td>
+ <td>
+ <div id="src-queue-div">
+ <input type="text" name="amqp091-src-queue"/>
+ </div>
+ <div id="src-exchange-div" style="display: none;">
+ <input type="text" name="amqp091-src-exchange"/>
+ Routing key: <input type="text" name="src-exchange-key"/>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Prefetch count:
+ <span class="help" id="shovel-prefetch"></span>
+ </label>
+ </td>
+ <td><input type="text" name="amqp091-src-prefetch-count"/></td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Auto-delete
+ <span class="help" id="shovel-amqp091-auto-delete"></span>
+ </label>
+ </td>
+ <td>
+ <select name="amqp091-src-delete-after">
+ <option value="never">Never</option>
+ <option value="queue-length">After initial length transferred</option>
+ </select>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th>Destination:</th>
+ <td>
+ <select name="dest-protocol-selector" class="controls-appearance">
+ <option value="amqp091-dest">AMQP 0.9.1</option>
+ <option value="amqp10-dest">AMQP 1.0</option>
+ </select>
+ <div id="amqp10-dest-div" style="display: none;">
+ <table class="subform">
+ <tr>
+ <td>
+ <label>
+ URI:
+ <span class="help" id="shovel-uri"></span>
+ </label>
+ </td>
+ <td>
+ <input type="text" name="dest-uri" value="amqp://localhost:5672"/>
+ <span class="mand">*</span>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Address:
+ <span class="help" id="shovel-amqp10-address"></span>
+ </label>
+ </td>
+ <td><input type="text" name="amqp10-dest-address"/></td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Add forwarding headers:
+ <span class="help" id="shovel-forward-headers"></span>
+ </label>
+ </td>
+ <td>
+ <select name="amqp10-dest-add-forward-headers">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ </table>
+ </div>
+ <div id="amqp091-dest-div">
+ <table class="subform">
+ <tr>
+ <td>
+ <label>
+ URI
+ <span class="help" id="shovel-uri"></span>
+ </label>
+ </td>
+ <td>
+ <select name="queue-or-exchange" class="narrow controls-appearance">
+ <option value="dest-queue">Queue:</option>
+ <option value="dest-exchange">Exchange:</option>
+ </select>
+ <span class="help" id="shovel-queue-exchange"></span>
+ </td>
+ </tr>
+ <tr>
+ <td><input type="text" name="amqp091-dest-uri" value="amqp://"/><span class="mand">*</span></td>
+ <td>
+ <div id="dest-queue-div">
+ <input type="text" name="amqp091-dest-queue"/>
+ </div>
+ <div id="dest-exchange-div" style="display: none;">
+ <input type="text" name="amqp091-dest-exchange"/>
+ Routing key: <input type="text" name="dest-exchange-key"/>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <label>
+ Add forwarding headers:
+ <span class="help" id="shovel-forward-headers"></span>
+ </label>
+ </td>
+ <td>
+ <select name="amqp091-dest-add-forward-headers">
+ <option value="false">No</option>
+ <option value="true">Yes</option>
+ </select>
+ </td>
+ </tr>
+ </table>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Reconnect delay:
+ <span class="help" id="shovel-reconnect"></span>
+ </label>
+ </th>
+ <td><input type="text" name="reconnect-delay"/> s</td>
+ </tr>
+ <tr>
+ <th>
+ <label>
+ Acknowledgement mode:
+ <span class="help" id="shovel-ack-mode"></span>
+ </label>
+ </th>
+ <td>
+ <select name="ack-mode">
+ <option value="on-confirm">On confirm</option>
+ <option value="on-publish">On publish</option>
+ <option value="no-ack">No ack</option>
+ </select>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add shovel"/>
+ </form>
+ </div>
+</div>
+<div class="section-hidden">
+ <h2>URI examples</h2>
+ <div class="hider">
+ <ul>
+ <li>
+ <code>amqp://</code><br/>
+ connect to local server as default user
+ </li><br />
+ <li>
+ <code>amqp://user@/my-vhost</code><br/>
+ connect to local server with alternate user and virtual host
+ (passwords are not required for local connections)
+ </li><br />
+ <li>
+ <code>amqp://server-name</code><br/>
+ connect to server-name, without SSL and default credentials
+ </li><br />
+ <li>
+ <code>amqp://user:password@server-name/my-vhost</code><br/>
+ connect to server-name, with credentials and overridden
+ virtual host
+ </li><br />
+ <li>
+ <code>amqps://user:password@server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer</code><br/>
+ connect to server-name, with credentials and SSL
+ </li><br />
+ <li>
+ <code>amqps://server-name?cacertfile=/path/to/cacert.pem&certfile=/path/to/cert.pem&keyfile=/path/to/key.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external</code><br/>
+ connect to server-name, with SSL and EXTERNAL authentication
+ </li>
+ </ul>
+ </div>
+</div>
diff --git a/deps/rabbitmq_shovel_management/priv/www/js/tmpl/shovels.ejs b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/shovels.ejs
new file mode 100644
index 0000000000..b2a360bc10
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/priv/www/js/tmpl/shovels.ejs
@@ -0,0 +1,78 @@
+<h1>Shovel Status</h1>
+<%
+ var extra_width = 0;
+ if (vhosts_interesting) extra_width++;
+ if (nodes_interesting) extra_width++;
+%>
+<div class="updatable">
+<% if (shovels.length > 0) { %>
+<table class="list">
+ <thead>
+ <tr>
+ <th>Name</th>
+<% if (nodes_interesting) { %>
+ <th>Node</th>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <th>Virtual Host</th>
+<% } %>
+ <th>State</th>
+ <th colspan="3">Source</th>
+ <th colspan="3">Destination</th>
+ <th>Last changed</th>
+ <th>Operations</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < shovels.length; i++) {
+ var shovel = shovels[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td>
+ <%= fmt_string(shovel.name) %>
+ <sub><%= fmt_string(shovel.type) %></sub>
+ </td>
+<% if (nodes_interesting) { %>
+ <td><%= fmt_node(shovel.node) %></td>
+<% } %>
+<% if (vhosts_interesting) { %>
+ <td><%= fmt_string(shovel.vhost, '') %></td>
+<% } %>
+<% if (shovel.state == 'terminated') { %>
+ <td colspan="5"><%= fmt_state('red', shovel.state) %></td>
+ <td><%= shovel.timestamp %></td>
+ </tr>
+ <tr>
+ <td colspan="<%= 8 + extra_width %>">
+ <pre><%= fmt_string(shovel.reason) %></pre>
+ </td>
+<% } else { %>
+ <td><%= fmt_state('green', shovel.state) %></td>
+ <td><%= fmt_string(shovel.src_protocol) %></td>
+ <td><%= shovel.src_uri == undefined ? fmt_string(shovel.src_uri) : fmt_string(fmt_uri_with_credentials(shovel.src_uri)) %></td>
+ <td><%= fmt_shovel_endpoint('src_', shovel) %></td>
+ <td><%= fmt_string(shovel.dest_protocol) %></td>
+ <td><%= shovel.dest_uri == undefined ? fmt_string(shovel.dest_uri) : fmt_string(fmt_uri_with_credentials(shovel.dest_uri)) %></td>
+ <td><%= fmt_shovel_endpoint('dest_', shovel) %></td>
+ <td><%= shovel.timestamp %></td>
+<% } %>
+<% if (shovel.type == 'dynamic') { %>
+ <td>
+ <form action="#/shovel-restart-link" method="delete" class="confirm">
+ <input type="hidden" name="name" value="<%= fmt_node(shovel.name) %>"/>
+ <input type="hidden" name="vhost" value="<%= fmt_string(shovel.vhost) %>"/>
+ <input type="submit" value="Restart"/>
+ </form>
+ </td>
+<% } else { %>
+ <td/>
+ <% } %>
+ </tr>
+ <% } %>
+ </tbody>
+</table>
+<% } else { %>
+ <p>... no shovels ...</p>
+<% } %>
+</div>
diff --git a/deps/rabbitmq_shovel_management/rabbitmq-components.mk b/deps/rabbitmq_shovel_management/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl
new file mode 100644
index 0000000000..a843472e0f
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl
@@ -0,0 +1,158 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_shovel_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+-export([init/2, to_json/2, resource_exists/2, content_types_provided/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2]).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+dispatcher() -> [{"/shovels", ?MODULE, []},
+ {"/shovels/:vhost", ?MODULE, []},
+ {"/shovels/vhost/:vhost/:name", ?MODULE, []},
+ {"/shovels/vhost/:vhost/:name/restart", ?MODULE, []}].
+
+web_ui() -> [{javascript, <<"shovel.js">>}].
+
+%%--------------------------------------------------------------------
+
+init(Req, _Opts) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"DELETE">>, <<"OPTIONS">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ Reply = case rabbit_mgmt_util:vhost(ReqData) of
+ not_found ->
+ false;
+ VHost ->
+ case rabbit_mgmt_util:id(name, ReqData) of
+ none -> true;
+ Name ->
+ %% Deleting or restarting a shovel
+ case rabbit_shovel_status:lookup({VHost, Name}) of
+ not_found ->
+ rabbit_log:error("Shovel with the name '~s' was not found "
+ "on the target node '~s' and / or virtual host '~s'",
+ [Name, node(), VHost]),
+ false;
+ _ ->
+ true
+ end
+ end
+ end,
+ {Reply, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply_list(
+ filter_vhost_req(status(ReqData, Context), ReqData), ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_monitor(ReqData, Context).
+
+delete_resource(ReqData, #context{user = #user{username = Username}}=Context) ->
+ VHost = rabbit_mgmt_util:id(vhost, ReqData),
+ Reply = case rabbit_mgmt_util:id(name, ReqData) of
+ none ->
+ false;
+ Name ->
+ %% We must distinguish between a delete and restart
+ case is_restart(ReqData) of
+ true ->
+ case rabbit_shovel_util:restart_shovel(VHost, Name) of
+ {error, ErrMsg} ->
+ rabbit_log:error("Error restarting shovel: ~s", [ErrMsg]),
+ false;
+ ok -> true
+ end;
+ _ ->
+ case rabbit_shovel_util:delete_shovel(VHost, Name, Username) of
+ {error, ErrMsg} ->
+ rabbit_log:error("Error deleting shovel: ~s", [ErrMsg]),
+ false;
+ ok -> true
+ end
+ end
+ end,
+ {Reply, ReqData, Context}.
+
+%%--------------------------------------------------------------------
+
+is_restart(ReqData) ->
+ Path = cowboy_req:path(ReqData),
+ case string:find(Path, "/restart", trailing) of
+ nomatch -> false;
+ _ -> true
+ end.
+
+filter_vhost_req(List, ReqData) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ none -> List;
+ VHost -> [I || I <- List,
+ pget(vhost, I) =:= VHost]
+ end.
+
+%% Allow users to see things in the vhosts they are authorised. But
+%% static shovels do not have a vhost, so only allow admins (not
+%% monitors) to see them.
+filter_vhost_user(List, _ReqData, #context{user = User = #user{tags = Tags}}) ->
+ VHosts = rabbit_mgmt_util:list_login_vhosts_names(User, undefined),
+ [I || I <- List, case pget(vhost, I) of
+ undefined -> lists:member(administrator, Tags);
+ VHost -> lists:member(VHost, VHosts)
+ end].
+
+status(ReqData, Context) ->
+ filter_vhost_user(
+ lists:append([status(Node) || Node <- [node() | nodes()]]),
+ ReqData, Context).
+
+status(Node) ->
+ case rpc:call(Node, rabbit_shovel_status, status, [], infinity) of
+ {badrpc, {'EXIT', _}} ->
+ [];
+ Status ->
+ [format(Node, I) || I <- Status]
+ end.
+
+format(Node, {Name, Type, Info, TS}) ->
+ [{node, Node}, {timestamp, format_ts(TS)}] ++
+ format_name(Type, Name) ++
+ format_info(Info).
+
+format_name(static, Name) -> [{name, Name},
+ {type, static}];
+format_name(dynamic, {VHost, Name}) -> [{name, Name},
+ {vhost, VHost},
+ {type, dynamic}].
+
+format_info(starting) ->
+ [{state, starting}];
+
+format_info({running, Props}) ->
+ [{state, running}] ++ Props;
+
+format_info({terminated, Reason}) ->
+ [{state, terminated},
+ {reason, print("~p", [Reason])}].
+
+format_ts({{Y, M, D}, {H, Min, S}}) ->
+ print("~w-~2.2.0w-~2.2.0w ~w:~2.2.0w:~2.2.0w", [Y, M, D, H, Min, S]).
+
+print(Fmt, Val) ->
+ list_to_binary(io_lib:format(Fmt, Val)).
diff --git a/deps/rabbitmq_shovel_management/test/http_SUITE.erl b/deps/rabbitmq_shovel_management/test/http_SUITE.erl
new file mode 100644
index 0000000000..4248e90419
--- /dev/null
+++ b/deps/rabbitmq_shovel_management/test/http_SUITE.erl
@@ -0,0 +1,336 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(http_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ amqp10_shovels,
+ shovels,
+ dynamic_plugin_enable_disable
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1, [
+ fun configure_shovels/1,
+ fun start_inets/1
+ ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(amqp10_shovels = Testcase, Config) ->
+ http_delete(Config, "/parameters/shovel/%2f/my-dynamic-amqp10", ?NO_CONTENT),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(shovels = Testcase, Config) ->
+ http_delete(Config, "/vhosts/v", ?NO_CONTENT),
+ http_delete(Config, "/users/admin", ?NO_CONTENT),
+ http_delete(Config, "/users/mon", ?NO_CONTENT),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+configure_shovels(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbitmq_shovel, [
+ {shovels,
+ [{'my-static',
+ [{sources, [
+ {broker, "amqp://"},
+ {declarations, [
+ {'queue.declare', [{queue, <<"static">>}]}]}
+ ]},
+ {destinations, [{broker, "amqp://"}]},
+ {queue, <<"static">>},
+ {publish_fields, [
+ {exchange, <<"">>},
+ {routing_key, <<"static2">>}
+ ]}
+ ]}
+ ]}
+ ]}).
+
+start_inets(Config) ->
+ ok = application:start(inets),
+ Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+amqp10_shovels(Config) ->
+ Port = integer_to_binary(
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)),
+ http_put(Config, "/parameters/shovel/%2f/my-dynamic-amqp10",
+ #{value => #{'src-protocol' => <<"amqp10">>,
+ 'src-uri' => <<"amqp://localhost:", Port/binary>>,
+ 'src-address' => <<"test">>,
+ 'dest-protocol' => <<"amqp10">>,
+ 'dest-uri' => <<"amqp://localhost:", Port/binary>>,
+ 'dest-address' => <<"test2">>,
+ 'dest-properties' => #{},
+ 'dest-application-properties' => #{},
+ 'dest-message-annotations' => #{}}}, ?CREATED),
+ % sleep to give the shovel time to emit a full report
+ % that includes the protocols used.
+ wait_until(fun () ->
+ case lists:sort(fun(#{name := AName}, #{name := BName}) ->
+ AName < BName
+ end,
+ http_get(Config, "/shovels", "guest", "guest", ?OK))
+ of
+ [#{name := <<"my-dynamic-amqp10">>,
+ src_protocol := <<"amqp10">>,
+ dest_protocol := <<"amqp10">>,
+ type := <<"dynamic">>},
+ #{name := <<"my-static">>,
+ src_protocol := <<"amqp091">>,
+ dest_protocol := <<"amqp091">>,
+ type := <<"static">>}] ->
+ true;
+ _ ->
+ false
+ end
+ end, 20),
+ ok.
+
+
+-define(StaticPattern, #{name := <<"my-static">>,
+ type := <<"static">>}).
+
+-define(Dynamic1Pattern, #{name := <<"my-dynamic">>,
+ vhost := <<"/">>,
+ type := <<"dynamic">>}).
+
+-define(Dynamic2Pattern, #{name := <<"my-dynamic">>,
+ vhost := <<"v">>,
+ type := <<"dynamic">>}).
+
+shovels(Config) ->
+ http_put(Config, "/users/admin",
+ #{password => <<"admin">>, tags => <<"administrator">>}, ?CREATED),
+ http_put(Config, "/users/mon",
+ #{password => <<"mon">>, tags => <<"monitoring">>}, ?CREATED),
+ http_put(Config, "/vhosts/v", none, ?CREATED),
+ Perms = #{configure => <<".*">>,
+ write => <<".*">>,
+ read => <<".*">>},
+ http_put(Config, "/permissions/v/guest", Perms, ?NO_CONTENT),
+ http_put(Config, "/permissions/v/admin", Perms, ?CREATED),
+ http_put(Config, "/permissions/v/mon", Perms, ?CREATED),
+
+ [http_put(Config, "/parameters/shovel/" ++ V ++ "/my-dynamic",
+ #{value => #{'src-protocol' => <<"amqp091">>,
+ 'src-uri' => <<"amqp://">>,
+ 'src-queue' => <<"test">>,
+ 'dest-protocol' => <<"amqp091">>,
+ 'dest-uri' => <<"amqp://">>,
+ 'dest-queue' => <<"test2">>}}, ?CREATED)
+ || V <- ["%2f", "v"]],
+
+ ?assertMatch([?StaticPattern, ?Dynamic1Pattern, ?Dynamic2Pattern],
+ http_get(Config, "/shovels", "guest", "guest", ?OK)),
+ ?assertMatch([?Dynamic1Pattern],
+ http_get(Config, "/shovels/%2f", "guest", "guest", ?OK)),
+ ?assertMatch([?Dynamic2Pattern],
+ http_get(Config, "/shovels/v", "guest", "guest", ?OK)),
+
+ ?assertMatch([?StaticPattern, ?Dynamic2Pattern],
+ http_get(Config, "/shovels", "admin", "admin", ?OK)),
+ ?assertMatch([],
+ http_get(Config, "/shovels/%2f", "admin", "admin", ?OK)),
+ ?assertMatch([?Dynamic2Pattern],
+ http_get(Config, "/shovels/v", "admin", "admin", ?OK)),
+
+ ?assertMatch([?Dynamic2Pattern],
+ http_get(Config, "/shovels", "mon", "mon", ?OK)),
+ ?assertMatch([],
+ http_get(Config, "/shovels/%2f", "mon", "mon", ?OK)),
+ ?assertMatch([?Dynamic2Pattern],
+ http_get(Config, "/shovels/v", "mon", "mon", ?OK)),
+ ok.
+
+%% It's a bit arbitrary to be testing this here, but we want to be
+%% able to test that mgmt extensions can be started and stopped
+%% *somewhere*, and here is as good a place as any.
+dynamic_plugin_enable_disable(Config) ->
+ http_get(Config, "/shovels", ?OK),
+ rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+ "rabbitmq_shovel_management"),
+ http_get(Config, "/shovels", ?NOT_FOUND),
+ http_get(Config, "/overview", ?OK),
+ rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+ "rabbitmq_management"),
+ http_fail(Config, "/shovels"),
+ http_fail(Config, "/overview"),
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+ "rabbitmq_management"),
+ http_get(Config, "/shovels", ?NOT_FOUND),
+ http_get(Config, "/overview", ?OK),
+ rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+ "rabbitmq_shovel_management"),
+ http_get(Config, "/shovels", ?OK),
+ http_get(Config, "/overview", ?OK),
+ passed.
+
+%%---------------------------------------------------------------------------
+%% TODO this is mostly copypasta from the mgmt tests
+
+http_get(Config, Path) ->
+ http_get(Config, Path, ?OK).
+
+http_get(Config, Path, CodeExp) ->
+ http_get(Config, Path, "guest", "guest", CodeExp).
+
+http_get(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_fail(Config, Path) ->
+ {error, {failed_connect, _}} = req(Config, get, Path, []).
+
+http_put(Config, Path, List, CodeExp) ->
+ http_put_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_put(Config, Path, List, User, Pass, CodeExp) ->
+ http_put_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post(Config, Path, List, CodeExp) ->
+ http_post_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_post(Config, Path, List, User, Pass, CodeExp) ->
+ http_post_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp).
+
+format_for_upload(none) ->
+ <<"">>;
+format_for_upload(Map) ->
+ iolist_to_binary(rabbit_json:encode(convert_keys(Map))).
+
+convert_keys(Map) ->
+ maps:fold(fun
+ (K, V, Acc) when is_map(V) ->
+ Acc#{atom_to_binary(K, latin1) => convert_keys(V)};
+ (K, V, Acc) ->
+ Acc#{atom_to_binary(K, latin1) => V}
+ end, #{}, Map).
+
+http_put_raw(Config, Path, Body, CodeExp) ->
+ http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp).
+
+http_put_raw(Config, Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(Config, put, Path, Body, User, Pass, CodeExp).
+
+http_post_raw(Config, Path, Body, CodeExp) ->
+ http_upload_raw(Config, post, Path, Body, "guest", "guest", CodeExp).
+
+http_post_raw(Config, Path, Body, User, Pass, CodeExp) ->
+ http_upload_raw(Config, post, Path, Body, User, Pass, CodeExp).
+
+http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, Type, Path, [auth_header(User, Pass)], Body),
+ assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_delete(Config, Path, CodeExp) ->
+ http_delete(Config, Path, "guest", "guest", CodeExp).
+
+http_delete(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, delete, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+assert_code(CodeExp, CodeAct, _Type, _Path, _Body) ->
+ ?assertEqual(CodeExp, CodeAct).
+
+req_uri(Config, Path) ->
+ rabbit_misc:format("~s/api~s", [
+ rabbit_ct_broker_helpers:node_uri(Config, 0, management),
+ Path
+ ]).
+
+req(Config, Type, Path, Headers) ->
+ httpc:request(Type,
+ {req_uri(Config, Path), Headers},
+ ?HTTPC_OPTS, []).
+
+req(Config, Type, Path, Headers, Body) ->
+ httpc:request(Type,
+ {req_uri(Config, Path), Headers, "application/json", Body},
+ ?HTTPC_OPTS, []).
+
+decode(?OK, _Headers, ResBody) -> cleanup(rabbit_json:decode(rabbit_data_coercion:to_binary(ResBody)));
+decode(_, Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+ [cleanup(I) || I <- L];
+cleanup(M) when is_map(M) ->
+ maps:fold(fun(K, V, Acc) ->
+ Acc#{binary_to_atom(K, latin1) => cleanup(V)}
+ end, #{}, M);
+cleanup(I) ->
+ I.
+
+auth_header(Username, Password) ->
+ {"Authorization",
+ "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+assert_list(Exp, Act) ->
+ _ = [assert_item(ExpI, ActI) || {ExpI, ActI} <- lists:zip(Exp, Act)],
+ ok.
+
+assert_item(ExpI, ActI) ->
+ ExpI = maps:with(maps:keys(ExpI), ActI),
+ ok.
+
+wait_until(_Fun, 0) ->
+ ?assert(wait_failed);
+wait_until(Fun, N) ->
+ case Fun() of
+ true ->
+ ok;
+ false ->
+ timer:sleep(500),
+ wait_until(Fun, N - 1)
+ end.
diff --git a/deps/rabbitmq_stomp/.gitignore b/deps/rabbitmq_stomp/.gitignore
new file mode 100644
index 0000000000..14dbfd18d3
--- /dev/null
+++ b/deps/rabbitmq_stomp/.gitignore
@@ -0,0 +1,29 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/debug/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/xrefr
+
+rabbitmq_stomp.d
+
+# Python testsuite.
+.python-version
+*.pyc
+test/python_SUITE_data/deps/pika/pika/
+test/python_SUITE_data/deps/pika/pika-*/
+test/python_SUITE_data/deps/stomppy/stomppy/
+test/python_SUITE_data/deps/stomppy/stomppy-git/
+
+test/config_schema_SUITE_data/schema/
diff --git a/deps/rabbitmq_stomp/.travis.yml b/deps/rabbitmq_stomp/.travis.yml
new file mode 100644
index 0000000000..e2fc1041e7
--- /dev/null
+++ b/deps/rabbitmq_stomp/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: oLN5hBjMeKvT365DSoNLPPIZ9Bf9gxEgP3EJCZgPgKVvsE+4DhosdwYPxo1mNA2mq+6soizNGiW5LlD92UZonNgptl7UDwmVFWSHawEopYz67zFbcohEeHnKFr5bAapGgttdAHkfWH5nxv90O6OfEva0QBXkQb8O/hOdmYsVYOs=
+ - secure: efpmC/exFPHVbK4peAI4hAi7WKb5eUPgqhax95iDF54aVbt6SuO4h/t4gC2eiKU9el4YEccmapHfJyQ5FZSEw+aWS0wAXpmXlbIc8rxKuWbESeqvGKTcDmILfcLJYXt/B3pNzynRQCPUJkYo946j18+kfzB+cBHm7TV021hnt9w=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_stomp/CODE_OF_CONDUCT.md b/deps/rabbitmq_stomp/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_stomp/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_stomp/CONTRIBUTING.md b/deps/rabbitmq_stomp/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_stomp/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_stomp/LICENSE b/deps/rabbitmq_stomp/LICENSE
new file mode 100644
index 0000000000..f2da65d175
--- /dev/null
+++ b/deps/rabbitmq_stomp/LICENSE
@@ -0,0 +1,4 @@
+This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_stomp/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_stomp/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_stomp/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_stomp/Makefile b/deps/rabbitmq_stomp/Makefile
new file mode 100644
index 0000000000..a8a3e57c90
--- /dev/null
+++ b/deps/rabbitmq_stomp/Makefile
@@ -0,0 +1,46 @@
+PROJECT = rabbitmq_stomp
+PROJECT_DESCRIPTION = RabbitMQ STOMP plugin
+PROJECT_MOD = rabbit_stomp
+
+define PROJECT_ENV
+[
+ {default_user,
+ [{login, <<"guest">>},
+ {passcode, <<"guest">>}]},
+ {default_vhost, <<"/">>},
+ {default_topic_exchange, <<"amq.topic">>},
+ {default_nack_requeue, true},
+ {ssl_cert_login, false},
+ {implicit_connect, false},
+ {tcp_listeners, [61613]},
+ {ssl_listeners, []},
+ {num_tcp_acceptors, 10},
+ {num_ssl_acceptors, 10},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true}]},
+ %% see rabbitmq/rabbitmq-stomp#39
+ {trailing_lf, true},
+ %% see rabbitmq/rabbitmq-stomp#57
+ {hide_server_info, false},
+ {proxy_protocol, false}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = ranch rabbit_common rabbit amqp_client
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_stomp/NOTES b/deps/rabbitmq_stomp/NOTES
new file mode 100644
index 0000000000..b4a9f02df4
--- /dev/null
+++ b/deps/rabbitmq_stomp/NOTES
@@ -0,0 +1,71 @@
+Comments from Sean Treadway, 2 June 2008, on the rabbitmq-discuss list:
+
+ - On naming, extensibility, and headers:
+
+ "STOMP looked like it was MQ agnostic and extensible while keeping
+ the core headers well defined (ack=client, message_id, etc...),
+ but my application was not MQ agnostic. Plus I saw some of the
+ ActiveMQ headers weren't available or necessary in RabbitMQ.
+
+ "Keeping the AMQP naming is the best way to piggy back on the AMQP
+ documentation. For those that need simple, transient queues, the
+ existing STOMP documentation would be sufficient."
+
+ ...
+
+ "I only have experience with RabbitMQ, so I'm fine with exposing
+ AMQP rather than try to come to some agreement over the extension
+ names of standard STOMP headers."
+
+ - On queue deletion over STOMP:
+
+ "Here, I would stick with the verbs defined in STOMP and extend the
+ verbs with headers. One possibility is to use UNSUBSCRIBE
+ messages to change the queue properties before sending the
+ 'basic.cancel' method. Another possibility is to change queue
+ properties on a SUBSCRIBE message. Neither seem nice to me. Third
+ option is to do nothing, and delete the queues outside of the
+ STOMP protocol"
+
+Comments from Darien Kindlund, 11 February 2009, on the rabbitmq-discuss list:
+
+ - On testing of connection establishment:
+
+ "[O]nce I switched each perl process over to re-using their
+ existing STOMP connection, things worked much, much better. As
+ such, I'm continuing development. In your unit testing, you may
+ want to include rapid connect/disconnect behavior or otherwise
+ explicitly warn developers to avoid this scenario."
+
+Comments from Novak Joe, 11 September 2008, on the rabbitmq-discuss list:
+
+ - On broadcast send:
+
+ "That said, I think it would also be useful to add to the STOMP
+ wiki page an additional note on broadcast SEND. In particular I
+ found that in order to send a message to a broadcast exchange it
+ needs look something like:
+
+ ---------------------------------
+ SEND
+ destination:x.mytopic
+ exchange:amq.topic
+
+ my message
+ \x00
+ --------------------------------
+
+ "However my initial newb intuition was that it should look more like:
+
+ ---------------------------------
+ SEND
+ destination:
+ exchange:amq.topic
+ routing_key:x.mytopic
+
+ my message
+ \x00
+ --------------------------------
+
+ "The ruby examples cleared this up but not before I experienced a
+ bit of confusion on the subject."
diff --git a/deps/rabbitmq_stomp/README.md b/deps/rabbitmq_stomp/README.md
new file mode 100644
index 0000000000..922793ba66
--- /dev/null
+++ b/deps/rabbitmq_stomp/README.md
@@ -0,0 +1,18 @@
+# RabbitMQ STOMP adapter
+
+The STOMP adapter is included in the RabbitMQ distribution. To enable
+it, use [rabbitmq-plugins](https://www.rabbitmq.com/man/rabbitmq-plugins.1.man.html):
+
+ rabbitmq-plugins enable rabbitmq_stomp
+
+## Supported STOMP Versions
+
+1.0 through 1.2.
+
+## Documentation
+
+[RabbitMQ STOMP plugin documentation](https://www.rabbitmq.com/stomp.html).
+
+## Continuous Integration
+
+[![Build Status](https://travis-ci.org/rabbitmq/rabbitmq-stomp.svg?branch=master)](https://travis-ci.org/rabbitmq/rabbitmq-stomp)
diff --git a/deps/rabbitmq_stomp/erlang.mk b/deps/rabbitmq_stomp/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_stomp/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_recv.pl b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_recv.pl
new file mode 100755
index 0000000000..7b8b9cce0c
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_recv.pl
@@ -0,0 +1,13 @@
+#!/usr/bin/perl -w
+# subscribe to messages from the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+$stomp->subscribe({'destination'=>'/queue/foo', 'ack'=>'client'});
+while (1) {
+ my $frame = $stomp->receive_frame;
+ print $frame->body . "\n";
+ $stomp->ack({frame=>$frame});
+ last if $frame->body eq 'QUIT';
+}
+$stomp->disconnect;
diff --git a/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_client.pl b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_client.pl
new file mode 100755
index 0000000000..b3e5ee6fd3
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_client.pl
@@ -0,0 +1,14 @@
+#!/usr/bin/perl -w
+
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+
+my $private_q_name = "/temp-queue/test";
+
+$stomp->send({destination => '/queue/rabbitmq_stomp_rpc_service',
+ 'reply-to' => $private_q_name,
+ body => "request from $private_q_name"});
+print "Reply: " . $stomp->receive_frame->body . "\n";
+
+$stomp->disconnect;
diff --git a/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_service.pl b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_service.pl
new file mode 100755
index 0000000000..31e79aea4a
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_rpc_service.pl
@@ -0,0 +1,21 @@
+#!/usr/bin/perl -w
+
+use Net::Stomp;
+
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+
+$stomp->subscribe({'destination'=>'/queue/rabbitmq_stomp_rpc_service', 'ack'=>'client'});
+while (1) {
+ print "Waiting for request...\n";
+ my $frame = $stomp->receive_frame;
+ print "Received message, reply_to = " . $frame->headers->{"reply-to"} . "\n";
+ print $frame->body . "\n";
+
+ $stomp->send({destination => $frame->headers->{"reply-to"}, bytes_message => 1,
+ body => "Got body: " . $frame->body});
+ $stomp->ack({frame=>$frame});
+ last if $frame->body eq 'QUIT';
+}
+
+$stomp->disconnect;
diff --git a/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send.pl b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send.pl
new file mode 100755
index 0000000000..4d26b7837e
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send.pl
@@ -0,0 +1,9 @@
+#!/usr/bin/perl -w
+# send a message to the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+$stomp->send({destination=>'/exchange/amq.fanout',
+ bytes_message=>1,
+ body=>($ARGV[0] or "test\0message")});
+$stomp->disconnect;
diff --git a/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send_many.pl b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send_many.pl
new file mode 100755
index 0000000000..f6ff54ed95
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_send_many.pl
@@ -0,0 +1,11 @@
+#!/usr/bin/perl -w
+# send a message to the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest'});
+for (my $i = 0; $i < 10000; $i++) {
+ $stomp->send({destination=>'/queue/foo',
+ bytes_message=>1,
+ body=>($ARGV[0] or "message $i")});
+}
+$stomp->disconnect;
diff --git a/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_slow_recv.pl b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_slow_recv.pl
new file mode 100755
index 0000000000..043568f348
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/perl/rabbitmq_stomp_slow_recv.pl
@@ -0,0 +1,14 @@
+#!/usr/bin/perl -w
+# subscribe to messages from the queue 'foo'
+use Net::Stomp;
+my $stomp = Net::Stomp->new({hostname=>'localhost', port=>'61613'});
+$stomp->connect({login=>'guest', passcode=>'guest', prefetch=>1});
+$stomp->subscribe({'destination'=>'/queue/foo', 'ack'=>'client'});
+while (1) {
+ my $frame = $stomp->receive_frame;
+ print $frame->body . "\n";
+ sleep 1;
+ $stomp->ack({frame=>$frame});
+ last if $frame->body eq 'QUIT';
+}
+$stomp->disconnect;
diff --git a/deps/rabbitmq_stomp/examples/ruby/amq-sender.rb b/deps/rabbitmq_stomp/examples/ruby/amq-sender.rb
new file mode 100644
index 0000000000..baaab5628c
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/amq-sender.rb
@@ -0,0 +1,10 @@
+require 'rubygems'
+require 'stomp'
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+
+# This publishes a message to a queue named 'amq-test' which is managed by AMQP broker.
+client.publish("/amq/queue/amq-test", "test-message")
+
+# close this connection
+client.close
diff --git a/deps/rabbitmq_stomp/examples/ruby/cb-receiver.rb b/deps/rabbitmq_stomp/examples/ruby/cb-receiver.rb
new file mode 100644
index 0000000000..4e6e26141a
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/cb-receiver.rb
@@ -0,0 +1,8 @@
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+conn.subscribe('/queue/carl')
+while mesg = conn.receive
+ puts mesg.body
+end
diff --git a/deps/rabbitmq_stomp/examples/ruby/cb-sender.rb b/deps/rabbitmq_stomp/examples/ruby/cb-sender.rb
new file mode 100644
index 0000000000..3d7594681f
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/cb-sender.rb
@@ -0,0 +1,6 @@
+require 'rubygems'
+require 'stomp'
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+10000.times { |i| client.publish '/queue/carl', "Test Message number #{i}"}
+client.publish '/queue/carl', "All Done!"
diff --git a/deps/rabbitmq_stomp/examples/ruby/cb-slow-receiver.rb b/deps/rabbitmq_stomp/examples/ruby/cb-slow-receiver.rb
new file mode 100644
index 0000000000..d98e5f8170
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/cb-slow-receiver.rb
@@ -0,0 +1,13 @@
+require 'rubygems'
+require 'stomp'
+
+# Note: requires support for connect_headers hash in the STOMP gem's connection.rb
+conn = Stomp::Connection.open('guest', 'guest', 'localhost', 61613, false, 5, {:prefetch => 1})
+conn.subscribe('/queue/carl', {:ack => 'client'})
+while mesg = conn.receive
+ puts mesg.body
+ puts 'Sleeping...'
+ sleep 0.2
+ puts 'Awake again. Acking.'
+ conn.ack mesg.headers['message-id']
+end
diff --git a/deps/rabbitmq_stomp/examples/ruby/exchange-receiver.rb b/deps/rabbitmq_stomp/examples/ruby/exchange-receiver.rb
new file mode 100644
index 0000000000..76bf4a5c9d
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/exchange-receiver.rb
@@ -0,0 +1,15 @@
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open("guest", "guest", "localhost")
+conn.subscribe '/exchange/amq.fanout/test'
+
+puts "Waiting for messages..."
+
+begin
+ while mesg = conn.receive
+ puts mesg.body
+ end
+rescue Exception => _
+ conn.disconnect
+end
diff --git a/deps/rabbitmq_stomp/examples/ruby/exchange-sender.rb b/deps/rabbitmq_stomp/examples/ruby/exchange-sender.rb
new file mode 100644
index 0000000000..ed556eacae
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/exchange-sender.rb
@@ -0,0 +1,12 @@
+require 'rubygems'
+require 'stomp'
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+
+# This publishes a message to the 'amq.fanout' exchange which is managed by
+# AMQP broker and specifies routing-key of 'test'. You can get other exchanges
+# through 'list_exchanges' subcommand of 'rabbitmqctl' utility.
+client.publish("/exchange/amq.fanout/test", "test message")
+
+# close this connection
+client.close
diff --git a/deps/rabbitmq_stomp/examples/ruby/persistent-receiver.rb b/deps/rabbitmq_stomp/examples/ruby/persistent-receiver.rb
new file mode 100644
index 0000000000..5a83df6fb0
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/persistent-receiver.rb
@@ -0,0 +1,11 @@
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+conn.subscribe('/queue/durable', :'auto-delete' => false, :durable => true)
+
+puts "Waiting for messages..."
+
+while mesg = conn.receive
+ puts mesg.body
+end
diff --git a/deps/rabbitmq_stomp/examples/ruby/persistent-sender.rb b/deps/rabbitmq_stomp/examples/ruby/persistent-sender.rb
new file mode 100644
index 0000000000..1be32d6c76
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/persistent-sender.rb
@@ -0,0 +1,13 @@
+require 'rubygems'
+require 'stomp'
+
+# Use this case to test durable queues
+#
+# Start the sender - 11 messages will be sent to /queue/durable and the sender exits
+# Stop the server - 11 messages will be written to disk
+# Start the server
+# Start the receiver - 11 messages should be received and the receiver - interrupt the receive loop
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+10.times { |i| client.publish '/queue/durable', "Test Message number #{i} sent at #{Time.now}", 'delivery-mode' => '2'}
+client.publish '/queue/durable', "All Done!"
diff --git a/deps/rabbitmq_stomp/examples/ruby/temp-queue-client.rb b/deps/rabbitmq_stomp/examples/ruby/temp-queue-client.rb
new file mode 100644
index 0000000000..39828708e8
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/temp-queue-client.rb
@@ -0,0 +1,9 @@
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open("guest", "guest", "localhost")
+conn.publish("/queue/rpc-service", "test message", {
+ 'reply-to' => '/temp-queue/test'
+})
+puts conn.receive.body
+conn.disconnect
diff --git a/deps/rabbitmq_stomp/examples/ruby/temp-queue-service.rb b/deps/rabbitmq_stomp/examples/ruby/temp-queue-service.rb
new file mode 100644
index 0000000000..fea4fa7803
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/temp-queue-service.rb
@@ -0,0 +1,15 @@
+require 'rubygems'
+require 'stomp'
+
+conn = Stomp::Connection.open("guest", "guest", "localhost")
+conn.subscribe '/queue/rpc-service'
+
+begin
+ while mesg = conn.receive
+ puts "received message and replies to #{mesg.headers['reply-to']}"
+
+ conn.publish(mesg.headers['reply-to'], '(reply) ' + mesg.body)
+ end
+rescue Exception => _
+ conn.disconnect
+end
diff --git a/deps/rabbitmq_stomp/examples/ruby/topic-broadcast-receiver.rb b/deps/rabbitmq_stomp/examples/ruby/topic-broadcast-receiver.rb
new file mode 100644
index 0000000000..b338e53c34
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/topic-broadcast-receiver.rb
@@ -0,0 +1,11 @@
+require 'rubygems'
+require 'stomp'
+
+topic = ARGV[0] || 'x'
+puts "Binding to /topic/#{topic}"
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+conn.subscribe("/topic/#{topic}")
+while mesg = conn.receive
+ puts mesg.body
+end
diff --git a/deps/rabbitmq_stomp/examples/ruby/topic-broadcast-with-unsubscribe.rb b/deps/rabbitmq_stomp/examples/ruby/topic-broadcast-with-unsubscribe.rb
new file mode 100644
index 0000000000..19f05ee9d2
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/topic-broadcast-with-unsubscribe.rb
@@ -0,0 +1,13 @@
+require 'rubygems'
+require 'stomp' # this is a gem
+
+conn = Stomp::Connection.open('guest', 'guest', 'localhost')
+puts "Subscribing to /topic/x"
+conn.subscribe('/topic/x')
+puts 'Receiving...'
+mesg = conn.receive
+puts mesg.body
+puts "Unsubscribing from /topic/x"
+conn.unsubscribe('/topic/x')
+puts 'Sleeping 5 seconds...'
+sleep 5
diff --git a/deps/rabbitmq_stomp/examples/ruby/topic-sender.rb b/deps/rabbitmq_stomp/examples/ruby/topic-sender.rb
new file mode 100644
index 0000000000..b0861f9542
--- /dev/null
+++ b/deps/rabbitmq_stomp/examples/ruby/topic-sender.rb
@@ -0,0 +1,7 @@
+require 'rubygems'
+require 'stomp'
+
+client = Stomp::Client.new("guest", "guest", "localhost", 61613)
+client.publish '/topic/x.y', 'first message'
+client.publish '/topic/x.z', 'second message'
+client.publish '/topic/x', 'third message'
diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp.hrl
new file mode 100644
index 0000000000..3d31535d14
--- /dev/null
+++ b/deps/rabbitmq_stomp/include/rabbit_stomp.hrl
@@ -0,0 +1,42 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(stomp_configuration, {default_login,
+ default_passcode,
+ force_default_creds = false,
+ implicit_connect,
+ ssl_cert_login}).
+
+-define(SUPPORTED_VERSIONS, ["1.0", "1.1", "1.2"]).
+
+-define(INFO_ITEMS,
+ [conn_name,
+ connection,
+ connection_state,
+ session_id,
+ channel,
+ version,
+ implicit_connect,
+ auth_login,
+ auth_mechanism,
+ peer_addr,
+ host,
+ port,
+ peer_host,
+ peer_port,
+ protocol,
+ channels,
+ channel_max,
+ frame_max,
+ client_properties,
+ ssl,
+ ssl_protocol,
+ ssl_key_exchange,
+ ssl_cipher,
+ ssl_hash]).
+
+-define(STOMP_GUIDE_URL, <<"https://rabbitmq.com/stomp.html">>).
diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl
new file mode 100644
index 0000000000..13b8b2e94c
--- /dev/null
+++ b/deps/rabbitmq_stomp/include/rabbit_stomp_frame.hrl
@@ -0,0 +1,8 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(stomp_frame, {command, headers, body_iolist}).
diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl
new file mode 100644
index 0000000000..974b5825c8
--- /dev/null
+++ b/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl
@@ -0,0 +1,73 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(HEADER_ACCEPT_VERSION, "accept-version").
+-define(HEADER_ACK, "ack").
+-define(HEADER_AMQP_MESSAGE_ID, "amqp-message-id").
+-define(HEADER_APP_ID, "app-id").
+-define(HEADER_AUTO_DELETE, "auto-delete").
+-define(HEADER_CONTENT_ENCODING, "content-encoding").
+-define(HEADER_CONTENT_LENGTH, "content-length").
+-define(HEADER_CONTENT_TYPE, "content-type").
+-define(HEADER_CORRELATION_ID, "correlation-id").
+-define(HEADER_DESTINATION, "destination").
+-define(HEADER_DURABLE, "durable").
+-define(HEADER_EXPIRATION, "expiration").
+-define(HEADER_EXCLUSIVE, "exclusive").
+-define(HEADER_HEART_BEAT, "heart-beat").
+-define(HEADER_HOST, "host").
+-define(HEADER_ID, "id").
+-define(HEADER_LOGIN, "login").
+-define(HEADER_MESSAGE_ID, "message-id").
+-define(HEADER_PASSCODE, "passcode").
+-define(HEADER_PERSISTENT, "persistent").
+-define(HEADER_PREFETCH_COUNT, "prefetch-count").
+-define(HEADER_PRIORITY, "priority").
+-define(HEADER_RECEIPT, "receipt").
+-define(HEADER_REDELIVERED, "redelivered").
+-define(HEADER_REPLY_TO, "reply-to").
+-define(HEADER_SERVER, "server").
+-define(HEADER_SESSION, "session").
+-define(HEADER_SUBSCRIPTION, "subscription").
+-define(HEADER_TIMESTAMP, "timestamp").
+-define(HEADER_TRANSACTION, "transaction").
+-define(HEADER_TYPE, "type").
+-define(HEADER_USER_ID, "user-id").
+-define(HEADER_VERSION, "version").
+-define(HEADER_X_DEAD_LETTER_EXCHANGE, "x-dead-letter-exchange").
+-define(HEADER_X_DEAD_LETTER_ROUTING_KEY, "x-dead-letter-routing-key").
+-define(HEADER_X_EXPIRES, "x-expires").
+-define(HEADER_X_MAX_LENGTH, "x-max-length").
+-define(HEADER_X_MAX_LENGTH_BYTES, "x-max-length-bytes").
+-define(HEADER_X_MAX_PRIORITY, "x-max-priority").
+-define(HEADER_X_MESSAGE_TTL, "x-message-ttl").
+-define(HEADER_X_QUEUE_NAME, "x-queue-name").
+-define(HEADER_X_QUEUE_TYPE, "x-queue-type").
+
+-define(MESSAGE_ID_SEPARATOR, "@@").
+
+-define(HEADERS_NOT_ON_SEND, [?HEADER_MESSAGE_ID]).
+
+-define(TEMP_QUEUE_ID_PREFIX, "/temp-queue/").
+
+-define(HEADER_ARGUMENTS, [
+ ?HEADER_X_DEAD_LETTER_EXCHANGE,
+ ?HEADER_X_DEAD_LETTER_ROUTING_KEY,
+ ?HEADER_X_EXPIRES,
+ ?HEADER_X_MAX_LENGTH,
+ ?HEADER_X_MAX_LENGTH_BYTES,
+ ?HEADER_X_MAX_PRIORITY,
+ ?HEADER_X_MESSAGE_TTL,
+ ?HEADER_X_QUEUE_TYPE
+ ]).
+
+-define(HEADER_PARAMS, [
+ ?HEADER_AUTO_DELETE,
+ ?HEADER_DURABLE,
+ ?HEADER_EXCLUSIVE,
+ ?HEADER_PERSISTENT
+ ]).
diff --git a/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema b/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema
new file mode 100644
index 0000000000..8b8646ae82
--- /dev/null
+++ b/deps/rabbitmq_stomp/priv/schema/rabbitmq_stomp.schema
@@ -0,0 +1,237 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% ==========================================================================
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Stomp Adapter
+%%
+%% See https://www.rabbitmq.com/stomp.html for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_stomp,
+% [%% Network Configuration - the format is generally the same as for the broker
+
+%% Listen only on localhost (ipv4 & ipv6) on a specific port.
+%% {tcp_listeners, [{"127.0.0.1", 61613},
+%% {"::1", 61613}]},
+
+{mapping, "stomp.listeners.tcp", "rabbitmq_stomp.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "stomp.listeners.tcp.$name", "rabbitmq_stomp.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_stomp.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("stomp.listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("stomp.listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+{mapping, "stomp.tcp_listen_options", "rabbitmq_stomp.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbitmq_stomp.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("stomp.tcp_listen_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid stomp.tcp_listen_options")
+ end
+end}.
+
+{mapping, "stomp.tcp_listen_options.backlog", "rabbitmq_stomp.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "stomp.tcp_listen_options.nodelay", "rabbitmq_stomp.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "stomp.tcp_listen_options.buffer", "rabbitmq_stomp.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.delay_send", "rabbitmq_stomp.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stomp.tcp_listen_options.dontroute", "rabbitmq_stomp.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stomp.tcp_listen_options.exit_on_close", "rabbitmq_stomp.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stomp.tcp_listen_options.fd", "rabbitmq_stomp.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.high_msgq_watermark", "rabbitmq_stomp.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.high_watermark", "rabbitmq_stomp.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.keepalive", "rabbitmq_stomp.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stomp.tcp_listen_options.low_msgq_watermark", "rabbitmq_stomp.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.low_watermark", "rabbitmq_stomp.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.port", "rabbitmq_stomp.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "stomp.tcp_listen_options.priority", "rabbitmq_stomp.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.recbuf", "rabbitmq_stomp.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.send_timeout", "rabbitmq_stomp.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.send_timeout_close", "rabbitmq_stomp.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stomp.tcp_listen_options.sndbuf", "rabbitmq_stomp.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.tos", "rabbitmq_stomp.tcp_listen_options.tos",
+ [{datatype, integer}]}.
+
+{mapping, "stomp.tcp_listen_options.linger.on", "rabbitmq_stomp.tcp_listen_options.linger",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stomp.tcp_listen_options.linger.timeout", "rabbitmq_stomp.tcp_listen_options.linger",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{translation, "rabbitmq_stomp.tcp_listen_options.linger",
+fun(Conf) ->
+ LingerOn = cuttlefish:conf_get("stomp.tcp_listen_options.linger.on", Conf, false),
+ LingerTimeout = cuttlefish:conf_get("stomp.tcp_listen_options.linger.timeout", Conf, 0),
+ {LingerOn, LingerTimeout}
+end}.
+
+
+%%
+%% TLS
+%%
+
+{mapping, "stomp.listeners.ssl", "rabbitmq_stomp.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "stomp.listeners.ssl.$name", "rabbitmq_stomp.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_stomp.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("stomp.listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("stomp.listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and SSL listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 10},
+
+{mapping, "stomp.num_acceptors.ssl", "rabbitmq_stomp.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "stomp.num_acceptors.tcp", "rabbitmq_stomp.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+%% Additional TLS options
+
+%% Extract a name from the client's certificate when using TLS.
+%%
+%% Defaults to true.
+
+{mapping, "stomp.ssl_cert_login", "rabbitmq_stomp.ssl_cert_login",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Set a default user name and password. This is used as the default login
+%% whenever a CONNECT frame omits the login and passcode headers.
+%%
+%% Please note that setting this will allow clients to connect without
+%% authenticating!
+%%
+%% {default_user, [{login, "guest"},
+%% {passcode, "guest"}]},
+
+{mapping, "stomp.default_vhost", "rabbitmq_stomp.default_vhost", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_stomp.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("stomp.default_vhost", Conf, "/"))
+end}.
+
+{mapping, "stomp.default_user", "rabbitmq_stomp.default_user.login", [
+ {datatype, string}
+]}.
+
+{mapping, "stomp.default_pass", "rabbitmq_stomp.default_user.passcode", [
+ {datatype, string}
+]}.
+
+{mapping, "stomp.default_topic_exchange", "rabbitmq_stomp.default_topic_exchange", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_stomp.default_topic_exchange",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("stomp.default_topic_exchange", Conf, "amq.topic"))
+end}.
+
+%% If a default user is configured, or if x.509
+%% certificate-based client authentication is used, use this setting to allow clients to
+%% omit the CONNECT frame entirely. If set to true, the client is
+%% automatically connected as the default user or user supplied in the
+%% x.509/TLS certificate whenever the first frame sent on a session is not a
+%% CONNECT frame.
+%%
+%% Defaults to true.
+
+{mapping, "stomp.implicit_connect", "rabbitmq_stomp.implicit_connect",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Whether or not to enable proxy protocol support.
+%%
+%% Defaults to false.
+
+{mapping, "stomp.proxy_protocol", "rabbitmq_stomp.proxy_protocol",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Whether or not to hide server info
+%%
+%% Defaults to false.
+
+{mapping, "stomp.hide_server_info", "rabbitmq_stomp.hide_server_info",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Whether or not to always requeue the message on nack
+%% If not set then coordinated by the usage of the frame "requeue" header
+%% Useful when you are not fully controlling the STOMP consumer implementation
+%%
+%% Defaults to true.
+
+{mapping, "stomp.default_nack_requeue", "rabbitmq_stomp.default_nack_requeue",
+ [{datatype, {enum, [true, false]}}]}.
diff --git a/deps/rabbitmq_stomp/rabbitmq-components.mk b/deps/rabbitmq_stomp/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_stomp/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl b/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl
new file mode 100644
index 0000000000..d26615e99f
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand.erl
@@ -0,0 +1,95 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand').
+
+-include("rabbit_stomp.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([formatter/0,
+ scopes/0,
+ switches/0,
+ aliases/0,
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ banner/2,
+ validate/2,
+ merge_defaults/2,
+ run/2,
+ output/2,
+ description/0,
+ help_section/0]).
+
+formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Table'.
+
+scopes() -> [ctl, diagnostics].
+
+switches() -> [{verbose, boolean}].
+aliases() -> [{'V', verbose}].
+
+description() -> <<"Lists STOMP connections on the target node">>.
+
+help_section() ->
+ {plugin, stomp}.
+
+validate(Args, _) ->
+ case 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':validate_info_keys(Args,
+ ?INFO_ITEMS) of
+ {ok, _} -> ok;
+ Error -> Error
+ end.
+
+merge_defaults([], Opts) ->
+ merge_defaults([<<"session_id">>, <<"conn_name">>], Opts);
+merge_defaults(Args, Opts) ->
+ {Args, maps:merge(#{verbose => false}, Opts)}.
+
+usage() ->
+ <<"list_stomp_connections [<column> ...]">>.
+
+usage_additional() ->
+ Prefix = <<" must be one of ">>,
+ InfoItems = 'Elixir.Enum':join(lists:usort(?INFO_ITEMS), <<", ">>),
+ [
+ {<<"<column>">>, <<Prefix/binary, InfoItems/binary>>}
+ ].
+
+usage_doc_guides() ->
+ [?STOMP_GUIDE_URL].
+
+run(Args, #{node := NodeName,
+ timeout := Timeout,
+ verbose := Verbose}) ->
+ InfoKeys = case Verbose of
+ true -> ?INFO_ITEMS;
+ false -> 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':prepare_info_keys(Args)
+ end,
+ Nodes = 'Elixir.RabbitMQ.CLI.Core.Helpers':nodes_in_cluster(NodeName),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.RpcStream':receive_list_items(
+ NodeName,
+ rabbit_stomp,
+ emit_connection_info_all,
+ [Nodes, InfoKeys],
+ Timeout,
+ InfoKeys,
+ length(Nodes)).
+
+banner(_, _) -> <<"Listing STOMP connections ...">>.
+
+output(Result, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result).
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp.erl b/deps/rabbitmq_stomp/src/rabbit_stomp.erl
new file mode 100644
index 0000000000..449c2ef92f
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp.erl
@@ -0,0 +1,131 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp).
+
+-include("rabbit_stomp.hrl").
+
+-behaviour(application).
+-export([start/2, stop/1]).
+-export([parse_default_user/2]).
+-export([connection_info_local/1,
+ emit_connection_info_local/3,
+ emit_connection_info_all/4,
+ list/0,
+ close_all_client_connections/1]).
+
+-define(DEFAULT_CONFIGURATION,
+ #stomp_configuration{
+ default_login = undefined,
+ default_passcode = undefined,
+ implicit_connect = false,
+ ssl_cert_login = false}).
+
+start(normal, []) ->
+ Config = parse_configuration(),
+ Listeners = parse_listener_configuration(),
+ Result = rabbit_stomp_sup:start_link(Listeners, Config),
+ EMPid = case rabbit_event:start_link() of
+ {ok, Pid} -> Pid;
+ {error, {already_started, Pid}} -> Pid
+ end,
+ gen_event:add_handler(EMPid, rabbit_stomp_internal_event_handler, []),
+ Result.
+
+stop(_) ->
+ rabbit_stomp_sup:stop_listeners().
+
+-spec close_all_client_connections(string() | binary()) -> {'ok', non_neg_integer()}.
+close_all_client_connections(Reason) ->
+ Connections = list(),
+ [rabbit_stomp_reader:close_connection(Pid, Reason) || Pid <- Connections],
+ {ok, length(Connections)}.
+
+emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [spawn_link(Node, rabbit_stomp, emit_connection_info_local,
+ [Items, Ref, AggregatorPid])
+ || Node <- Nodes],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_connection_info_local(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Pid) ->
+ rabbit_stomp_reader:info(Pid, Items)
+ end,
+ list()).
+
+connection_info_local(Items) ->
+ Connections = list(),
+ [rabbit_stomp_reader:info(Pid, Items) || Pid <- Connections].
+
+parse_listener_configuration() ->
+ {ok, Listeners} = application:get_env(tcp_listeners),
+ {ok, SslListeners} = application:get_env(ssl_listeners),
+ {Listeners, SslListeners}.
+
+parse_configuration() ->
+ {ok, UserConfig} = application:get_env(default_user),
+ Conf0 = parse_default_user(UserConfig, ?DEFAULT_CONFIGURATION),
+ {ok, SSLLogin} = application:get_env(ssl_cert_login),
+ {ok, ImplicitConnect} = application:get_env(implicit_connect),
+ Conf = Conf0#stomp_configuration{ssl_cert_login = SSLLogin,
+ implicit_connect = ImplicitConnect},
+ report_configuration(Conf),
+ Conf.
+
+parse_default_user([], Configuration) ->
+ Configuration;
+parse_default_user([{login, Login} | Rest], Configuration) ->
+ parse_default_user(Rest, Configuration#stomp_configuration{
+ default_login = Login});
+parse_default_user([{passcode, Passcode} | Rest], Configuration) ->
+ parse_default_user(Rest, Configuration#stomp_configuration{
+ default_passcode = Passcode});
+parse_default_user([Unknown | Rest], Configuration) ->
+ rabbit_log:warning("rabbit_stomp: ignoring invalid default_user "
+ "configuration option: ~p~n", [Unknown]),
+ parse_default_user(Rest, Configuration).
+
+report_configuration(#stomp_configuration{
+ default_login = Login,
+ implicit_connect = ImplicitConnect,
+ ssl_cert_login = SSLCertLogin}) ->
+ case Login of
+ undefined -> ok;
+ _ -> rabbit_log:info("rabbit_stomp: default user '~s' "
+ "enabled~n", [Login])
+ end,
+
+ case ImplicitConnect of
+ true -> rabbit_log:info("rabbit_stomp: implicit connect enabled~n");
+ false -> ok
+ end,
+
+ case SSLCertLogin of
+ true -> rabbit_log:info("rabbit_stomp: ssl_cert_login enabled~n");
+ false -> ok
+ end,
+
+ ok.
+
+list() ->
+ [Client
+ || {_, ListSupPid, _, _} <- supervisor2:which_children(rabbit_stomp_sup),
+ {_, RanchSup, supervisor, _} <- supervisor2:which_children(ListSupPid),
+ {ranch_conns_sup, ConnSup, _, _} <- supervisor:which_children(RanchSup),
+ {_, CliSup, _, _} <- supervisor:which_children(ConnSup),
+ {rabbit_stomp_reader, Client, _, _} <- supervisor:which_children(CliSup)].
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl
new file mode 100644
index 0000000000..d40e00f811
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl
@@ -0,0 +1,50 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_client_sup).
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/4, init/1]).
+
+start_link(Ref, _Sock, _Transport, Configuration) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, HelperPid} =
+ supervisor2:start_child(SupPid,
+ {rabbit_stomp_heartbeat_sup,
+ {rabbit_connection_helper_sup, start_link, []},
+ intrinsic, infinity, supervisor,
+ [rabbit_connection_helper_sup]}),
+
+ %% We want the reader to be transient since when it exits normally
+ %% the processor may have some work still to do (and the reader
+ %% tells the processor to exit). However, if the reader terminates
+ %% abnormally then we want to take everything down.
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {rabbit_stomp_reader,
+ {rabbit_stomp_reader,
+ start_link, [HelperPid, Ref, Configuration]},
+ intrinsic, ?WORKER_WAIT, worker,
+ [rabbit_stomp_reader]}),
+
+ {ok, SupPid, ReaderPid}.
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
+
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl
new file mode 100644
index 0000000000..e1562796e3
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_connection_info.erl
@@ -0,0 +1,25 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_stomp_connection_info).
+
+%% Note: this is necessary to prevent code:get_object_code from
+%% backing up due to a missing module. See VESC-888.
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, _Pid, _Infos) ->
+ [].
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl
new file mode 100644
index 0000000000..6b91dc3748
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_frame.erl
@@ -0,0 +1,266 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% stomp_frame implements the STOMP framing protocol "version 1.0", as
+%% per https://stomp.codehaus.org/Protocol
+
+-module(rabbit_stomp_frame).
+
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-export([parse/2, initial_state/0]).
+-export([header/2, header/3,
+ boolean_header/2, boolean_header/3,
+ integer_header/2, integer_header/3,
+ binary_header/2, binary_header/3]).
+-export([serialize/1, serialize/2]).
+
+initial_state() -> none.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% STOMP 1.1 frames basic syntax
+%% Rabbit modifications:
+%% o CR LF is equivalent to LF in all element terminators (eol).
+%% o Escape codes for header names and values include \r for CR
+%% and CR is not allowed.
+%% o Header names and values are not limited to UTF-8 strings.
+%% o Header values may contain unescaped colons
+%%
+%% frame_seq ::= *(noise frame)
+%% noise ::= *(NUL | eol)
+%% eol ::= LF | CR LF
+%% frame ::= cmd hdrs body NUL
+%% body ::= *OCTET
+%% cmd ::= 1*NOTEOL eol
+%% hdrs ::= *hdr eol
+%% hdr ::= hdrname COLON hdrvalue eol
+%% hdrname ::= 1*esc_char
+%% hdrvalue ::= *esc_char
+%% esc_char ::= HDROCT | BACKSLASH ESCCODE
+%%
+%% Terms in CAPS all represent sets (alternatives) of single octets.
+%% They are defined here using a small extension of BNF, minus (-):
+%%
+%% term1 - term2 denotes any of the possibilities in term1
+%% excluding those in term2.
+%% In this grammar minus is only used for sets of single octets.
+%%
+%% OCTET ::= '00'x..'FF'x % any octet
+%% NUL ::= '00'x % the zero octet
+%% LF ::= '\n' % '0a'x newline or linefeed
+%% CR ::= '\r' % '0d'x carriage return
+%% NOTEOL ::= OCTET - (CR | LF) % any octet except CR or LF
+%% BACKSLASH ::= '\\' % '5c'x
+%% ESCCODE ::= 'c' | 'n' | 'r' | BACKSLASH
+%% COLON ::= ':'
+%% HDROCT ::= NOTEOL - (COLON | BACKSLASH)
+%% % octets allowed in a header
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% explicit frame characters
+-define(NUL, 0).
+-define(CR, $\r).
+-define(LF, $\n).
+-define(BSL, $\\).
+-define(COLON, $:).
+
+%% header escape codes
+-define(LF_ESC, $n).
+-define(BSL_ESC, $\\).
+-define(COLON_ESC, $c).
+-define(CR_ESC, $r).
+
+%% parser state
+-record(state, {acc, cmd, hdrs, hdrname}).
+
+parse(Content, {resume, Continuation}) -> Continuation(Content);
+parse(Content, none ) -> parser(Content, noframe, #state{}).
+
+more(Continuation) -> {more, {resume, Continuation}}.
+
+%% Single-function parser: Term :: noframe | command | headers | hdrname | hdrvalue
+%% general more and line-end detection
+parser(<<>>, Term , State) -> more(fun(Rest) -> parser(Rest, Term, State) end);
+parser(<<?CR>>, Term , State) -> more(fun(Rest) -> parser(<<?CR, Rest/binary>>, Term, State) end);
+parser(<<?CR, ?LF, Rest/binary>>, Term , State) -> parser(<<?LF, Rest/binary>>, Term, State);
+parser(<<?CR, Ch:8, _Rest/binary>>, Term , _State) -> {error, {unexpected_chars(Term), [?CR, Ch]}};
+%% escape processing (only in hdrname and hdrvalue terms)
+parser(<<?BSL>>, Term , State) -> more(fun(Rest) -> parser(<<?BSL, Rest/binary>>, Term, State) end);
+parser(<<?BSL, Ch:8, Rest/binary>>, Term , State)
+ when Term == hdrname;
+ Term == hdrvalue -> unescape(Ch, fun(Ech) -> parser(Rest, Term, accum(Ech, State)) end);
+%% inter-frame noise
+parser(<<?NUL, Rest/binary>>, noframe , State) -> parser(Rest, noframe, State);
+parser(<<?LF, Rest/binary>>, noframe , State) -> parser(Rest, noframe, State);
+%% detect transitions
+parser( Rest, noframe , State) -> goto(noframe, command, Rest, State);
+parser(<<?LF, Rest/binary>>, command , State) -> goto(command, headers, Rest, State);
+parser(<<?LF, Rest/binary>>, headers , State) -> goto(headers, body, Rest, State);
+parser( Rest, headers , State) -> goto(headers, hdrname, Rest, State);
+parser(<<?COLON, Rest/binary>>, hdrname , State) -> goto(hdrname, hdrvalue, Rest, State);
+parser(<<?LF, Rest/binary>>, hdrname , State) -> goto(hdrname, headers, Rest, State);
+parser(<<?LF, Rest/binary>>, hdrvalue, State) -> goto(hdrvalue, headers, Rest, State);
+%% accumulate
+parser(<<Ch:8, Rest/binary>>, Term , State) -> parser(Rest, Term, accum(Ch, State)).
+
+%% state transitions
+goto(noframe, command, Rest, State ) -> parser(Rest, command, State#state{acc = []});
+goto(command, headers, Rest, State = #state{acc = Acc} ) -> parser(Rest, headers, State#state{cmd = lists:reverse(Acc), hdrs = []});
+goto(headers, body, Rest, #state{cmd = Cmd, hdrs = Hdrs}) -> parse_body(Rest, #stomp_frame{command = Cmd, headers = Hdrs});
+goto(headers, hdrname, Rest, State ) -> parser(Rest, hdrname, State#state{acc = []});
+goto(hdrname, hdrvalue, Rest, State = #state{acc = Acc} ) -> parser(Rest, hdrvalue, State#state{acc = [], hdrname = lists:reverse(Acc)});
+goto(hdrname, headers, _Rest, #state{acc = Acc} ) -> {error, {header_no_value, lists:reverse(Acc)}}; % badly formed header -- fatal error
+goto(hdrvalue, headers, Rest, State = #state{acc = Acc, hdrs = Headers, hdrname = HdrName}) ->
+ parser(Rest, headers, State#state{hdrs = insert_header(Headers, HdrName, lists:reverse(Acc))}).
+
+%% error atom
+unexpected_chars(noframe) -> unexpected_chars_between_frames;
+unexpected_chars(command) -> unexpected_chars_in_command;
+unexpected_chars(hdrname) -> unexpected_chars_in_header;
+unexpected_chars(hdrvalue) -> unexpected_chars_in_header;
+unexpected_chars(_Term) -> unexpected_chars.
+
+%% general accumulation
+accum(Ch, State = #state{acc = Acc}) -> State#state{acc = [Ch | Acc]}.
+
+%% resolve escapes (with error processing)
+unescape(?LF_ESC, Fun) -> Fun(?LF);
+unescape(?BSL_ESC, Fun) -> Fun(?BSL);
+unescape(?COLON_ESC, Fun) -> Fun(?COLON);
+unescape(?CR_ESC, Fun) -> Fun(?CR);
+unescape(Ch, _Fun) -> {error, {bad_escape, [?BSL, Ch]}}.
+
+%% insert header unless aleady seen
+insert_header(Headers, Name, Value) ->
+ case lists:keymember(Name, 1, Headers) of
+ true -> Headers; % first header only
+ false -> [{Name, Value} | Headers]
+ end.
+
+parse_body(Content, Frame = #stomp_frame{command = Command}) ->
+ case Command of
+ "SEND" -> parse_body(Content, Frame, [], integer_header(Frame, ?HEADER_CONTENT_LENGTH, unknown));
+ _ -> parse_body(Content, Frame, [], unknown)
+ end.
+
+parse_body(Content, Frame, Chunks, unknown) ->
+ parse_body2(Content, Frame, Chunks, case firstnull(Content) of
+ -1 -> {more, unknown};
+ Pos -> {done, Pos}
+ end);
+parse_body(Content, Frame, Chunks, Remaining) ->
+ Size = byte_size(Content),
+ parse_body2(Content, Frame, Chunks, case Remaining >= Size of
+ true -> {more, Remaining - Size};
+ false -> {done, Remaining}
+ end).
+
+parse_body2(Content, Frame, Chunks, {more, Left}) ->
+ Chunks1 = finalize_chunk(Content, Chunks),
+ more(fun(Rest) -> parse_body(Rest, Frame, Chunks1, Left) end);
+parse_body2(Content, Frame, Chunks, {done, Pos}) ->
+ <<Chunk:Pos/binary, 0, Rest/binary>> = Content,
+ Body = lists:reverse(finalize_chunk(Chunk, Chunks)),
+ {ok, Frame#stomp_frame{body_iolist = Body}, Rest}.
+
+finalize_chunk(<<>>, Chunks) -> Chunks;
+finalize_chunk(Chunk, Chunks) -> [Chunk | Chunks].
+
+default_value({ok, Value}, _DefaultValue) -> Value;
+default_value(not_found, DefaultValue) -> DefaultValue.
+
+header(#stomp_frame{headers = Headers}, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ {value, {_, Str}} -> {ok, Str};
+ _ -> not_found
+ end.
+
+header(F, K, D) -> default_value(header(F, K), D).
+
+boolean_header(#stomp_frame{headers = Headers}, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ {value, {_, "true"}} -> {ok, true};
+ {value, {_, "false"}} -> {ok, false};
+ %% some Python clients serialize True/False as "True"/"False"
+ {value, {_, "True"}} -> {ok, true};
+ {value, {_, "False"}} -> {ok, false};
+ _ -> not_found
+ end.
+
+boolean_header(F, K, D) -> default_value(boolean_header(F, K), D).
+
+internal_integer_header(Headers, Key) ->
+ case lists:keysearch(Key, 1, Headers) of
+ {value, {_, Str}} -> {ok, list_to_integer(string:strip(Str))};
+ _ -> not_found
+ end.
+
+integer_header(#stomp_frame{headers = Headers}, Key) ->
+ internal_integer_header(Headers, Key).
+
+integer_header(F, K, D) -> default_value(integer_header(F, K), D).
+
+binary_header(F, K) ->
+ case header(F, K) of
+ {ok, Str} -> {ok, list_to_binary(Str)};
+ not_found -> not_found
+ end.
+
+binary_header(F, K, D) -> default_value(binary_header(F, K), D).
+
+serialize(Frame) ->
+ serialize(Frame, true).
+
+%% second argument controls whether a trailing linefeed
+%% character should be added, see rabbitmq/rabbitmq-stomp#39.
+serialize(Frame, true) ->
+ serialize(Frame, false) ++ [?LF];
+serialize(#stomp_frame{command = Command,
+ headers = Headers,
+ body_iolist = BodyFragments}, false) ->
+ Len = iolist_size(BodyFragments),
+ [Command, ?LF,
+ lists:map(fun serialize_header/1,
+ lists:keydelete(?HEADER_CONTENT_LENGTH, 1, Headers)),
+ if
+ Len > 0 -> [?HEADER_CONTENT_LENGTH ++ ":", integer_to_list(Len), ?LF];
+ true -> []
+ end,
+ ?LF, BodyFragments, 0].
+
+serialize_header({K, V}) when is_integer(V) -> hdr(escape(K), integer_to_list(V));
+serialize_header({K, V}) when is_boolean(V) -> hdr(escape(K), boolean_to_list(V));
+serialize_header({K, V}) when is_list(V) -> hdr(escape(K), escape(V)).
+
+boolean_to_list(true) -> "true";
+boolean_to_list(_) -> "false".
+
+hdr(K, V) -> [K, ?COLON, V, ?LF].
+
+escape(Str) -> [escape1(Ch) || Ch <- Str].
+
+escape1(?COLON) -> [?BSL, ?COLON_ESC];
+escape1(?BSL) -> [?BSL, ?BSL_ESC];
+escape1(?LF) -> [?BSL, ?LF_ESC];
+escape1(?CR) -> [?BSL, ?CR_ESC];
+escape1(Ch) -> Ch.
+
+firstnull(Content) -> firstnull(Content, 0).
+
+firstnull(<<>>, _N) -> -1;
+firstnull(<<0, _Rest/binary>>, N) -> N;
+firstnull(<<_Ch, Rest/binary>>, N) -> firstnull(Rest, N+1).
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl
new file mode 100644
index 0000000000..47331312ce
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_internal_event_handler.erl
@@ -0,0 +1,46 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_internal_event_handler).
+
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+
+init([]) ->
+ {ok, []}.
+
+handle_event({event, maintenance_connections_closed, _Info, _, _}, State) ->
+ %% we should close our connections
+ {ok, NConnections} = rabbit_stomp:close_all_client_connections("node is being put into maintenance mode"),
+ rabbit_log:alert("Closed ~b local STOMP client connections", [NConnections]),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl
new file mode 100644
index 0000000000..570a7a146a
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl
@@ -0,0 +1,1220 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_processor).
+
+-export([initial_state/2, process_frame/2, flush_and_die/1]).
+-export([flush_pending_receipts/3,
+ handle_exit/3,
+ cancel_consumer/2,
+ send_delivery/5]).
+
+-export([adapter_name/1]).
+-export([info/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-record(proc_state, {session_id, channel, connection, subscriptions,
+ version, start_heartbeat_fun, pending_receipts,
+ config, route_state, reply_queues, frame_transformer,
+ adapter_info, send_fun, ssl_login_name, peer_addr,
+ %% see rabbitmq/rabbitmq-stomp#39
+ trailing_lf, auth_mechanism, auth_login,
+ default_topic_exchange, default_nack_requeue}).
+
+-record(subscription, {dest_hdr, ack_mode, multi_ack, description}).
+
+-define(FLUSH_TIMEOUT, 60000).
+
+adapter_name(State) ->
+ #proc_state{adapter_info = #amqp_adapter_info{name = Name}} = State,
+ Name.
+
+%%----------------------------------------------------------------------------
+
+-spec initial_state(
+ #stomp_configuration{},
+ {SendFun, AdapterInfo, SSLLoginName, PeerAddr})
+ -> #proc_state{}
+ when SendFun :: fun((atom(), binary()) -> term()),
+ AdapterInfo :: #amqp_adapter_info{},
+ SSLLoginName :: atom() | binary(),
+ PeerAddr :: inet:ip_address().
+
+-type process_frame_result() ::
+ {ok, term(), #proc_state{}} |
+ {stop, term(), #proc_state{}}.
+
+-spec process_frame(#stomp_frame{}, #proc_state{}) ->
+ process_frame_result().
+
+-spec flush_and_die(#proc_state{}) -> #proc_state{}.
+
+-spec command({Command, Frame}, State) -> process_frame_result()
+ when Command :: string(),
+ Frame :: #stomp_frame{},
+ State :: #proc_state{}.
+
+-type process_fun() :: fun((#proc_state{}) ->
+ {ok, #stomp_frame{}, #proc_state{}} |
+ {error, string(), string(), #proc_state{}} |
+ {stop, term(), #proc_state{}}).
+-spec process_request(process_fun(), fun((#proc_state{}) -> #proc_state{}), #proc_state{}) ->
+ process_frame_result().
+
+-spec flush_pending_receipts(DeliveryTag, IsMulti, State) -> State
+ when State :: #proc_state{},
+ DeliveryTag :: term(),
+ IsMulti :: boolean().
+
+-spec handle_exit(From, Reason, State) -> unknown_exit | {stop, Reason, State}
+ when State :: #proc_state{},
+ From :: pid(),
+ Reason :: term().
+
+-spec cancel_consumer(binary(), #proc_state{}) -> process_frame_result().
+
+-spec send_delivery(#'basic.deliver'{}, term(), term(), term(),
+ #proc_state{}) -> #proc_state{}.
+
+%%----------------------------------------------------------------------------
+
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+process_frame(Frame = #stomp_frame{command = Command}, State) ->
+ command({Command, Frame}, State).
+
+flush_and_die(State) ->
+ close_connection(State).
+
+info(session_id, #proc_state{session_id = Val}) ->
+ Val;
+info(channel, #proc_state{channel = Val}) -> Val;
+info(version, #proc_state{version = Val}) -> Val;
+info(implicit_connect, #proc_state{config = #stomp_configuration{implicit_connect = Val}}) -> Val;
+info(auth_login, #proc_state{auth_login = Val}) -> Val;
+info(auth_mechanism, #proc_state{auth_mechanism = Val}) -> Val;
+info(peer_addr, #proc_state{peer_addr = Val}) -> Val;
+info(host, #proc_state{adapter_info = #amqp_adapter_info{host = Val}}) -> Val;
+info(port, #proc_state{adapter_info = #amqp_adapter_info{port = Val}}) -> Val;
+info(peer_host, #proc_state{adapter_info = #amqp_adapter_info{peer_host = Val}}) -> Val;
+info(peer_port, #proc_state{adapter_info = #amqp_adapter_info{peer_port = Val}}) -> Val;
+info(protocol, #proc_state{adapter_info = #amqp_adapter_info{protocol = Val}}) ->
+ case Val of
+ {Proto, Version} -> {Proto, rabbit_data_coercion:to_binary(Version)};
+ Other -> Other
+ end;
+info(channels, PState) -> additional_info(channels, PState);
+info(channel_max, PState) -> additional_info(channel_max, PState);
+info(frame_max, PState) -> additional_info(frame_max, PState);
+info(client_properties, PState) -> additional_info(client_properties, PState);
+info(ssl, PState) -> additional_info(ssl, PState);
+info(ssl_protocol, PState) -> additional_info(ssl_protocol, PState);
+info(ssl_key_exchange, PState) -> additional_info(ssl_key_exchange, PState);
+info(ssl_cipher, PState) -> additional_info(ssl_cipher, PState);
+info(ssl_hash, PState) -> additional_info(ssl_hash, PState).
+
+initial_state(Configuration,
+ {SendFun, AdapterInfo0 = #amqp_adapter_info{additional_info = Extra},
+ SSLLoginName, PeerAddr}) ->
+ %% STOMP connections use exactly one channel. The frame max is not
+ %% applicable and there is no way to know what client is used.
+ AdapterInfo = AdapterInfo0#amqp_adapter_info{additional_info=[
+ {channels, 1},
+ {channel_max, 1},
+ {frame_max, 0},
+ %% TODO: can we use a header to make it possible for clients
+ %% to override this value?
+ {client_properties, [{<<"product">>, longstr, <<"STOMP client">>}]}
+ |Extra]},
+ #proc_state {
+ send_fun = SendFun,
+ adapter_info = AdapterInfo,
+ ssl_login_name = SSLLoginName,
+ peer_addr = PeerAddr,
+ session_id = none,
+ channel = none,
+ connection = none,
+ subscriptions = #{},
+ version = none,
+ pending_receipts = undefined,
+ config = Configuration,
+ route_state = rabbit_routing_util:init_state(),
+ reply_queues = #{},
+ frame_transformer = undefined,
+ trailing_lf = application:get_env(rabbitmq_stomp, trailing_lf, true),
+ default_topic_exchange = application:get_env(rabbitmq_stomp, default_topic_exchange, <<"amq.topic">>),
+ default_nack_requeue = application:get_env(rabbitmq_stomp, default_nack_requeue, true)}.
+
+
+command({"STOMP", Frame}, State) ->
+ process_connect(no_implicit, Frame, State);
+
+command({"CONNECT", Frame}, State) ->
+ process_connect(no_implicit, Frame, State);
+
+command(Request, State = #proc_state{channel = none,
+ config = #stomp_configuration{
+ implicit_connect = true}}) ->
+ {ok, State1 = #proc_state{channel = Ch}, _} =
+ process_connect(implicit, #stomp_frame{headers = []}, State),
+ case Ch of
+ none -> {stop, normal, State1};
+ _ -> command(Request, State1)
+ end;
+
+command(_Request, State = #proc_state{channel = none,
+ config = #stomp_configuration{
+ implicit_connect = false}}) ->
+ {ok, send_error("Illegal command",
+ "You must log in using CONNECT first",
+ State), none};
+
+command({Command, Frame}, State = #proc_state{frame_transformer = FT}) ->
+ Frame1 = FT(Frame),
+ process_request(
+ fun(StateN) ->
+ case validate_frame(Command, Frame1, StateN) of
+ R = {error, _, _, _} -> R;
+ _ -> handle_frame(Command, Frame1, StateN)
+ end
+ end,
+ fun(StateM) -> ensure_receipt(Frame1, StateM) end,
+ State).
+
+cancel_consumer(Ctag, State) ->
+ process_request(
+ fun(StateN) -> server_cancel_consumer(Ctag, StateN) end,
+ State).
+
+handle_exit(Conn, {shutdown, {server_initiated_close, Code, Explanation}},
+ State = #proc_state{connection = Conn}) ->
+ amqp_death(Code, Explanation, State);
+handle_exit(Conn, {shutdown, {connection_closing,
+ {server_initiated_close, Code, Explanation}}},
+ State = #proc_state{connection = Conn}) ->
+ amqp_death(Code, Explanation, State);
+handle_exit(Conn, Reason, State = #proc_state{connection = Conn}) ->
+ _ = send_error("AMQP connection died", "Reason: ~p", [Reason], State),
+ {stop, {conn_died, Reason}, State};
+
+handle_exit(Ch, {shutdown, {server_initiated_close, Code, Explanation}},
+ State = #proc_state{channel = Ch}) ->
+ amqp_death(Code, Explanation, State);
+
+handle_exit(Ch, Reason, State = #proc_state{channel = Ch}) ->
+ _ = send_error("AMQP channel died", "Reason: ~p", [Reason], State),
+ {stop, {channel_died, Reason}, State};
+handle_exit(Ch, {shutdown, {server_initiated_close, Code, Explanation}},
+ State = #proc_state{channel = Ch}) ->
+ amqp_death(Code, Explanation, State);
+handle_exit(_, _, _) -> unknown_exit.
+
+
+process_request(ProcessFun, State) ->
+ process_request(ProcessFun, fun (StateM) -> StateM end, State).
+
+
+process_request(ProcessFun, SuccessFun, State) ->
+ Res = case catch ProcessFun(State) of
+ {'EXIT',
+ {{shutdown,
+ {server_initiated_close, ReplyCode, Explanation}}, _}} ->
+ amqp_death(ReplyCode, Explanation, State);
+ {'EXIT', {amqp_error, access_refused, Msg, _}} ->
+ amqp_death(access_refused, Msg, State);
+ {'EXIT', Reason} ->
+ priv_error("Processing error", "Processing error",
+ Reason, State);
+ Result ->
+ Result
+ end,
+ case Res of
+ {ok, Frame, NewState = #proc_state{connection = Conn}} ->
+ _ = case Frame of
+ none -> ok;
+ _ -> send_frame(Frame, NewState)
+ end,
+ {ok, SuccessFun(NewState), Conn};
+ {error, Message, Detail, NewState = #proc_state{connection = Conn}} ->
+ {ok, send_error(Message, Detail, NewState), Conn};
+ {stop, normal, NewState} ->
+ {stop, normal, SuccessFun(NewState)};
+ {stop, R, NewState} ->
+ {stop, R, NewState}
+ end.
+
+process_connect(Implicit, Frame,
+ State = #proc_state{channel = none,
+ config = Config,
+ ssl_login_name = SSLLoginName,
+ adapter_info = AdapterInfo}) ->
+ process_request(
+ fun(StateN) ->
+ case negotiate_version(Frame) of
+ {ok, Version} ->
+ FT = frame_transformer(Version),
+ Frame1 = FT(Frame),
+ {Auth, {Username, Passwd}} = creds(Frame1, SSLLoginName, Config),
+ {ok, DefaultVHost} = application:get_env(
+ rabbitmq_stomp, default_vhost),
+ {ProtoName, _} = AdapterInfo#amqp_adapter_info.protocol,
+ Res = do_login(
+ Username, Passwd,
+ login_header(Frame1, ?HEADER_HOST, DefaultVHost),
+ login_header(Frame1, ?HEADER_HEART_BEAT, "0,0"),
+ AdapterInfo#amqp_adapter_info{
+ protocol = {ProtoName, Version}}, Version,
+ StateN#proc_state{frame_transformer = FT,
+ auth_mechanism = Auth,
+ auth_login = Username}),
+ case {Res, Implicit} of
+ {{ok, _, StateN1}, implicit} -> ok(StateN1);
+ _ -> Res
+ end;
+ {error, no_common_version} ->
+ error("Version mismatch",
+ "Supported versions are ~s~n",
+ [string:join(?SUPPORTED_VERSIONS, ",")],
+ StateN)
+ end
+ end,
+ State).
+
+creds(_, _, #stomp_configuration{default_login = DefLogin,
+ default_passcode = DefPasscode,
+ force_default_creds = true}) ->
+ {config, {iolist_to_binary(DefLogin), iolist_to_binary(DefPasscode)}};
+creds(Frame, SSLLoginName,
+ #stomp_configuration{default_login = DefLogin,
+ default_passcode = DefPasscode}) ->
+ PasswordCreds = {login_header(Frame, ?HEADER_LOGIN, DefLogin),
+ login_header(Frame, ?HEADER_PASSCODE, DefPasscode)},
+ case {rabbit_stomp_frame:header(Frame, ?HEADER_LOGIN), SSLLoginName} of
+ {not_found, none} -> {config, PasswordCreds};
+ {not_found, SSLName} -> {ssl, {SSLName, none}};
+ _ -> {stomp_headers, PasswordCreds}
+ end.
+
+login_header(Frame, Key, Default) when is_binary(Default) ->
+ login_header(Frame, Key, binary_to_list(Default));
+login_header(Frame, Key, Default) ->
+ case rabbit_stomp_frame:header(Frame, Key, Default) of
+ undefined -> undefined;
+ Hdr -> list_to_binary(Hdr)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Frame Transformation
+%%----------------------------------------------------------------------------
+
+frame_transformer("1.0") -> fun rabbit_stomp_util:trim_headers/1;
+frame_transformer(_) -> fun(Frame) -> Frame end.
+
+%%----------------------------------------------------------------------------
+%% Frame Validation
+%%----------------------------------------------------------------------------
+
+report_missing_id_header(State) ->
+ error("Missing Header",
+ "Header 'id' is required for durable subscriptions", State).
+
+validate_frame(Command, Frame, State)
+ when Command =:= "SUBSCRIBE" orelse Command =:= "UNSUBSCRIBE" ->
+ Hdr = fun(Name) -> rabbit_stomp_frame:header(Frame, Name) end,
+ case {Hdr(?HEADER_DURABLE), Hdr(?HEADER_PERSISTENT), Hdr(?HEADER_ID)} of
+ {{ok, "true"}, _, not_found} ->
+ report_missing_id_header(State);
+ {_, {ok, "true"}, not_found} ->
+ report_missing_id_header(State);
+ _ ->
+ ok(State)
+ end;
+validate_frame(_Command, _Frame, State) ->
+ ok(State).
+
+%%----------------------------------------------------------------------------
+%% Frame handlers
+%%----------------------------------------------------------------------------
+
+handle_frame("DISCONNECT", _Frame, State) ->
+ {stop, normal, close_connection(State)};
+
+handle_frame("SUBSCRIBE", Frame, State) ->
+ with_destination("SUBSCRIBE", Frame, State, fun do_subscribe/4);
+
+handle_frame("UNSUBSCRIBE", Frame, State) ->
+ ConsumerTag = rabbit_stomp_util:consumer_tag(Frame),
+ cancel_subscription(ConsumerTag, Frame, State);
+
+handle_frame("SEND", Frame, State) ->
+ without_headers(?HEADERS_NOT_ON_SEND, "SEND", Frame, State,
+ fun (_Command, Frame1, State1) ->
+ with_destination("SEND", Frame1, State1, fun do_send/4)
+ end);
+
+handle_frame("ACK", Frame, State) ->
+ ack_action("ACK", Frame, State, fun create_ack_method/3);
+
+handle_frame("NACK", Frame, State) ->
+ ack_action("NACK", Frame, State, fun create_nack_method/3);
+
+handle_frame("BEGIN", Frame, State) ->
+ transactional_action(Frame, "BEGIN", fun begin_transaction/2, State);
+
+handle_frame("COMMIT", Frame, State) ->
+ transactional_action(Frame, "COMMIT", fun commit_transaction/2, State);
+
+handle_frame("ABORT", Frame, State) ->
+ transactional_action(Frame, "ABORT", fun abort_transaction/2, State);
+
+handle_frame(Command, _Frame, State) ->
+ error("Bad command",
+ "Could not interpret command ~p~n",
+ [Command],
+ State).
+
+%%----------------------------------------------------------------------------
+%% Internal helpers for processing frames callbacks
+%%----------------------------------------------------------------------------
+
+ack_action(Command, Frame,
+ State = #proc_state{subscriptions = Subs,
+ channel = Channel,
+ version = Version,
+ default_nack_requeue = DefaultNackRequeue}, MethodFun) ->
+ AckHeader = rabbit_stomp_util:ack_header_name(Version),
+ case rabbit_stomp_frame:header(Frame, AckHeader) of
+ {ok, AckValue} ->
+ case rabbit_stomp_util:parse_message_id(AckValue) of
+ {ok, {ConsumerTag, _SessionId, DeliveryTag}} ->
+ case maps:find(ConsumerTag, Subs) of
+ {ok, Sub} ->
+ Requeue = rabbit_stomp_frame:boolean_header(Frame, "requeue", DefaultNackRequeue),
+ Method = MethodFun(DeliveryTag, Sub, Requeue),
+ case transactional(Frame) of
+ {yes, Transaction} ->
+ extend_transaction(
+ Transaction, {Method}, State);
+ no ->
+ amqp_channel:call(Channel, Method),
+ ok(State)
+ end;
+ error ->
+ error("Subscription not found",
+ "Message with id ~p has no subscription",
+ [AckValue],
+ State)
+ end;
+ _ ->
+ error("Invalid header",
+ "~p must include a valid ~p header~n",
+ [Command, AckHeader],
+ State)
+ end;
+ not_found ->
+ error("Missing header",
+ "~p must include the ~p header~n",
+ [Command, AckHeader],
+ State)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal helpers for processing frames callbacks
+%%----------------------------------------------------------------------------
+
+server_cancel_consumer(ConsumerTag, State = #proc_state{subscriptions = Subs}) ->
+ case maps:find(ConsumerTag, Subs) of
+ error ->
+ error("Server cancelled unknown subscription",
+ "Consumer tag ~p is not associated with a subscription.~n",
+ [ConsumerTag],
+ State);
+ {ok, Subscription = #subscription{description = Description}} ->
+ Id = case rabbit_stomp_util:tag_to_id(ConsumerTag) of
+ {ok, {_, Id1}} -> Id1;
+ {error, {_, Id1}} -> "Unknown[" ++ Id1 ++ "]"
+ end,
+ _ = send_error_frame("Server cancelled subscription",
+ [{?HEADER_SUBSCRIPTION, Id}],
+ "The server has canceled a subscription.~n"
+ "No more messages will be delivered for ~p.~n",
+ [Description],
+ State),
+ tidy_canceled_subscription(ConsumerTag, Subscription,
+ undefined, State)
+ end.
+
+cancel_subscription({error, invalid_prefix}, _Frame, State) ->
+ error("Invalid id",
+ "UNSUBSCRIBE 'id' may not start with ~s~n",
+ [?TEMP_QUEUE_ID_PREFIX],
+ State);
+
+cancel_subscription({error, _}, _Frame, State) ->
+ error("Missing destination or id",
+ "UNSUBSCRIBE must include a 'destination' or 'id' header",
+ State);
+
+cancel_subscription({ok, ConsumerTag, Description}, Frame,
+ State = #proc_state{subscriptions = Subs,
+ channel = Channel}) ->
+ case maps:find(ConsumerTag, Subs) of
+ error ->
+ error("No subscription found",
+ "UNSUBSCRIBE must refer to an existing subscription.~n"
+ "Subscription to ~p not found.~n",
+ [Description],
+ State);
+ {ok, Subscription = #subscription{description = Descr}} ->
+ case amqp_channel:call(Channel,
+ #'basic.cancel'{
+ consumer_tag = ConsumerTag}) of
+ #'basic.cancel_ok'{consumer_tag = ConsumerTag} ->
+ tidy_canceled_subscription(ConsumerTag, Subscription,
+ Frame, State);
+ _ ->
+ error("Failed to cancel subscription",
+ "UNSUBSCRIBE to ~p failed.~n",
+ [Descr],
+ State)
+ end
+ end.
+
+%% Server-initiated cancelations will pass an undefined instead of a
+%% STOMP frame. In this case we know that the queue was deleted and
+%% thus we don't have to clean it up.
+tidy_canceled_subscription(ConsumerTag, _Subscription,
+ undefined, State = #proc_state{subscriptions = Subs}) ->
+ Subs1 = maps:remove(ConsumerTag, Subs),
+ ok(State#proc_state{subscriptions = Subs1});
+
+%% Client-initiated cancelations will pass an actual frame
+tidy_canceled_subscription(ConsumerTag, #subscription{dest_hdr = DestHdr},
+ Frame, State = #proc_state{subscriptions = Subs}) ->
+ Subs1 = maps:remove(ConsumerTag, Subs),
+ {ok, Dest} = rabbit_routing_util:parse_endpoint(DestHdr),
+ maybe_delete_durable_sub(Dest, Frame, State#proc_state{subscriptions = Subs1}).
+
+maybe_delete_durable_sub({topic, Name}, Frame,
+ State = #proc_state{channel = Channel}) ->
+ case rabbit_stomp_util:has_durable_header(Frame) of
+ true ->
+ {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
+ QName = rabbit_stomp_util:subscription_queue_name(Name, Id, Frame),
+ amqp_channel:call(Channel,
+ #'queue.delete'{queue = list_to_binary(QName),
+ nowait = false}),
+ ok(State);
+ false ->
+ ok(State)
+ end;
+maybe_delete_durable_sub(_Destination, _Frame, State) ->
+ ok(State).
+
+with_destination(Command, Frame, State, Fun) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_DESTINATION) of
+ {ok, DestHdr} ->
+ case rabbit_routing_util:parse_endpoint(DestHdr) of
+ {ok, Destination} ->
+ case Fun(Destination, DestHdr, Frame, State) of
+ {error, invalid_endpoint} ->
+ error("Invalid destination",
+ "'~s' is not a valid destination for '~s'~n",
+ [DestHdr, Command],
+ State);
+ {error, {invalid_destination, Msg}} ->
+ error("Invalid destination",
+ "~s",
+ [Msg],
+ State);
+ {error, Reason} ->
+ throw(Reason);
+ Result ->
+ Result
+ end;
+ {error, {invalid_destination, Type, Content}} ->
+ error("Invalid destination",
+ "'~s' is not a valid ~p destination~n",
+ [Content, Type],
+ State);
+ {error, {unknown_destination, Content}} ->
+ error("Unknown destination",
+ "'~s' is not a valid destination.~n"
+ "Valid destination types are: ~s.~n",
+ [Content,
+ string:join(rabbit_routing_util:all_dest_prefixes(),
+ ", ")], State)
+ end;
+ not_found ->
+ error("Missing destination",
+ "~p must include a 'destination' header~n",
+ [Command],
+ State)
+ end.
+
+without_headers([Hdr | Hdrs], Command, Frame, State, Fun) ->
+ case rabbit_stomp_frame:header(Frame, Hdr) of
+ {ok, _} ->
+ error("Invalid header",
+ "'~s' is not allowed on '~s'.~n",
+ [Hdr, Command],
+ State);
+ not_found ->
+ without_headers(Hdrs, Command, Frame, State, Fun)
+ end;
+without_headers([], Command, Frame, State, Fun) ->
+ Fun(Command, Frame, State).
+
+do_login(undefined, _, _, _, _, _, State) ->
+ error("Bad CONNECT", "Missing login or passcode header(s)", State);
+do_login(Username, Passwd, VirtualHost, Heartbeat, AdapterInfo, Version,
+ State = #proc_state{peer_addr = Addr}) ->
+ case start_connection(
+ #amqp_params_direct{username = Username,
+ password = Passwd,
+ virtual_host = VirtualHost,
+ adapter_info = AdapterInfo}, Username, Addr) of
+ {ok, Connection} ->
+ link(Connection),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ link(Channel),
+ amqp_channel:enable_delivery_flow_control(Channel),
+ SessionId = rabbit_guid:string(rabbit_guid:gen_secure(), "session"),
+ {SendTimeout, ReceiveTimeout} = ensure_heartbeats(Heartbeat),
+
+ Headers = [{?HEADER_SESSION, SessionId},
+ {?HEADER_HEART_BEAT,
+ io_lib:format("~B,~B", [SendTimeout, ReceiveTimeout])},
+ {?HEADER_VERSION, Version}],
+ ok("CONNECTED",
+ case application:get_env(rabbitmq_stomp, hide_server_info, false) of
+ true -> Headers;
+ false -> [{?HEADER_SERVER, server_header()} | Headers]
+ end,
+ "",
+ State#proc_state{session_id = SessionId,
+ channel = Channel,
+ connection = Connection,
+ version = Version});
+ {error, {auth_failure, _}} ->
+ rabbit_log:warning("STOMP login failed for user ~p~n",
+ [binary_to_list(Username)]),
+ error("Bad CONNECT", "Access refused for user '" ++
+ binary_to_list(Username) ++ "'~n", [], State);
+ {error, not_allowed} ->
+ rabbit_log:warning("STOMP login failed - not_allowed "
+ "(vhost access not allowed)~n"),
+ error("Bad CONNECT", "Virtual host '" ++
+ binary_to_list(VirtualHost) ++
+ "' access denied", State);
+ {error, access_refused} ->
+ rabbit_log:warning("STOMP login failed - access_refused "
+ "(vhost access not allowed)~n"),
+ error("Bad CONNECT", "Virtual host '" ++
+ binary_to_list(VirtualHost) ++
+ "' access denied", State);
+ {error, not_loopback} ->
+ rabbit_log:warning("STOMP login failed - access_refused "
+ "(user must access over loopback)~n"),
+ error("Bad CONNECT", "non-loopback access denied", State)
+ end.
+
+start_connection(Params, Username, Addr) ->
+ case amqp_connection:start(Params) of
+ {ok, Conn} -> case rabbit_access_control:check_user_loopback(
+ Username, Addr) of
+ ok -> {ok, Conn};
+ not_allowed -> amqp_connection:close(Conn),
+ {error, not_loopback}
+ end;
+ {error, E} -> {error, E}
+ end.
+
+server_header() ->
+ {ok, Product} = application:get_key(rabbit, description),
+ {ok, Version} = application:get_key(rabbit, vsn),
+ rabbit_misc:format("~s/~s", [Product, Version]).
+
+do_subscribe(Destination, DestHdr, Frame,
+ State = #proc_state{subscriptions = Subs,
+ route_state = RouteState,
+ channel = Channel,
+ default_topic_exchange = DfltTopicEx}) ->
+ check_subscription_access(Destination, State),
+ Prefetch =
+ rabbit_stomp_frame:integer_header(Frame, ?HEADER_PREFETCH_COUNT,
+ undefined),
+ {AckMode, IsMulti} = rabbit_stomp_util:ack_mode(Frame),
+ case ensure_endpoint(source, Destination, Frame, Channel, RouteState) of
+ {ok, Queue, RouteState1} ->
+ {ok, ConsumerTag, Description} =
+ rabbit_stomp_util:consumer_tag(Frame),
+ case Prefetch of
+ undefined -> ok;
+ _ -> amqp_channel:call(
+ Channel, #'basic.qos'{prefetch_count = Prefetch})
+ end,
+ case maps:find(ConsumerTag, Subs) of
+ {ok, _} ->
+ Message = "Duplicated subscription identifier",
+ Detail = "A subscription identified by '~s' already exists.",
+ _ = error(Message, Detail, [ConsumerTag], State),
+ _ = send_error(Message, Detail, [ConsumerTag], State),
+ {stop, normal, close_connection(State)};
+ error ->
+ ExchangeAndKey = parse_routing(Destination, DfltTopicEx),
+ try
+ amqp_channel:subscribe(Channel,
+ #'basic.consume'{
+ queue = Queue,
+ consumer_tag = ConsumerTag,
+ no_local = false,
+ no_ack = (AckMode == auto),
+ exclusive = false,
+ arguments = []},
+ self()),
+ ok = rabbit_routing_util:ensure_binding(
+ Queue, ExchangeAndKey, Channel)
+ catch exit:Err ->
+ %% it's safe to delete this queue, it
+ %% was server-named and declared by us
+ case Destination of
+ {exchange, _} ->
+ ok = maybe_clean_up_queue(Queue, State);
+ {topic, _} ->
+ ok = maybe_clean_up_queue(Queue, State);
+ _ ->
+ ok
+ end,
+ exit(Err)
+ end,
+ ok(State#proc_state{subscriptions =
+ maps:put(
+ ConsumerTag,
+ #subscription{dest_hdr = DestHdr,
+ ack_mode = AckMode,
+ multi_ack = IsMulti,
+ description = Description},
+ Subs),
+ route_state = RouteState1})
+ end;
+ {error, _} = Err ->
+ Err
+ end.
+
+check_subscription_access(Destination = {topic, _Topic},
+ #proc_state{auth_login = _User,
+ connection = Connection,
+ default_topic_exchange = DfltTopicEx}) ->
+ [{amqp_params, AmqpParams}, {internal_user, InternalUser = #user{username = Username}}] =
+ amqp_connection:info(Connection, [amqp_params, internal_user]),
+ #amqp_params_direct{virtual_host = VHost} = AmqpParams,
+ {Exchange, RoutingKey} = parse_routing(Destination, DfltTopicEx),
+ Resource = #resource{virtual_host = VHost,
+ kind = topic,
+ name = rabbit_data_coercion:to_binary(Exchange)},
+ Context = #{routing_key => rabbit_data_coercion:to_binary(RoutingKey),
+ variable_map => #{<<"vhost">> => VHost, <<"username">> => Username}
+ },
+ rabbit_access_control:check_topic_access(InternalUser, Resource, read, Context);
+check_subscription_access(_, _) ->
+ authorized.
+
+maybe_clean_up_queue(Queue, #proc_state{connection = Connection}) ->
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ catch amqp_channel:call(Channel, #'queue.delete'{queue = Queue}),
+ catch amqp_channel:close(Channel),
+ ok.
+
+do_send(Destination, _DestHdr,
+ Frame = #stomp_frame{body_iolist = BodyFragments},
+ State = #proc_state{channel = Channel,
+ route_state = RouteState,
+ default_topic_exchange = DfltTopicEx}) ->
+ case ensure_endpoint(dest, Destination, Frame, Channel, RouteState) of
+
+ {ok, _Q, RouteState1} ->
+
+ {Frame1, State1} =
+ ensure_reply_to(Frame, State#proc_state{route_state = RouteState1}),
+
+ Props = rabbit_stomp_util:message_properties(Frame1),
+
+ {Exchange, RoutingKey} = parse_routing(Destination, DfltTopicEx),
+
+ Method = #'basic.publish'{
+ exchange = list_to_binary(Exchange),
+ routing_key = list_to_binary(RoutingKey),
+ mandatory = false,
+ immediate = false},
+
+ case transactional(Frame1) of
+ {yes, Transaction} ->
+ extend_transaction(
+ Transaction,
+ fun(StateN) ->
+ maybe_record_receipt(Frame1, StateN)
+ end,
+ {Method, Props, BodyFragments},
+ State1);
+ no ->
+ ok(send_method(Method, Props, BodyFragments,
+ maybe_record_receipt(Frame1, State1)))
+ end;
+
+ {error, _} = Err ->
+
+ Err
+ end.
+
+create_ack_method(DeliveryTag, #subscription{multi_ack = IsMulti}, _) ->
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti}.
+
+create_nack_method(DeliveryTag, #subscription{multi_ack = IsMulti}, Requeue) ->
+ #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti,
+ requeue = Requeue}.
+
+negotiate_version(Frame) ->
+ ClientVers = re:split(rabbit_stomp_frame:header(
+ Frame, ?HEADER_ACCEPT_VERSION, "1.0"),
+ ",", [{return, list}]),
+ rabbit_stomp_util:negotiate_version(ClientVers, ?SUPPORTED_VERSIONS).
+
+
+send_delivery(Delivery = #'basic.deliver'{consumer_tag = ConsumerTag},
+ Properties, Body, DeliveryCtx,
+ State = #proc_state{
+ session_id = SessionId,
+ subscriptions = Subs,
+ version = Version}) ->
+ NewState = case maps:find(ConsumerTag, Subs) of
+ {ok, #subscription{ack_mode = AckMode}} ->
+ send_frame(
+ "MESSAGE",
+ rabbit_stomp_util:headers(SessionId, Delivery, Properties,
+ AckMode, Version),
+ Body,
+ State);
+ error ->
+ send_error("Subscription not found",
+ "There is no current subscription with tag '~s'.",
+ [ConsumerTag],
+ State)
+ end,
+ notify_received(DeliveryCtx),
+ NewState.
+
+notify_received(undefined) ->
+ %% no notification for quorum queues
+ ok;
+notify_received(DeliveryCtx) ->
+ %% notification for flow control
+ amqp_channel:notify_received(DeliveryCtx).
+
+send_method(Method, Channel, State) ->
+ amqp_channel:call(Channel, Method),
+ State.
+
+send_method(Method, State = #proc_state{channel = Channel}) ->
+ send_method(Method, Channel, State).
+
+send_method(Method, Properties, BodyFragments,
+ State = #proc_state{channel = Channel}) ->
+ send_method(Method, Channel, Properties, BodyFragments, State).
+
+send_method(Method = #'basic.publish'{}, Channel, Properties, BodyFragments,
+ State) ->
+ amqp_channel:cast_flow(
+ Channel, Method,
+ #amqp_msg{props = Properties,
+ payload = list_to_binary(BodyFragments)}),
+ State.
+
+close_connection(State = #proc_state{connection = none}) ->
+ State;
+%% Closing the connection will close the channel and subchannels
+close_connection(State = #proc_state{connection = Connection}) ->
+ %% ignore noproc or other exceptions to avoid debris
+ catch amqp_connection:close(Connection),
+ State#proc_state{channel = none, connection = none, subscriptions = none};
+close_connection(undefined) ->
+ rabbit_log:debug("~s:close_connection: undefined state", [?MODULE]),
+ #proc_state{channel = none, connection = none, subscriptions = none}.
+
+%%----------------------------------------------------------------------------
+%% Reply-To
+%%----------------------------------------------------------------------------
+
+ensure_reply_to(Frame = #stomp_frame{headers = Headers}, State) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_REPLY_TO) of
+ not_found ->
+ {Frame, State};
+ {ok, ReplyTo} ->
+ {ok, Destination} = rabbit_routing_util:parse_endpoint(ReplyTo),
+ case rabbit_routing_util:dest_temp_queue(Destination) of
+ none ->
+ {Frame, State};
+ TempQueueId ->
+ {ReplyQueue, State1} =
+ ensure_reply_queue(TempQueueId, State),
+ {Frame#stomp_frame{
+ headers = lists:keyreplace(
+ ?HEADER_REPLY_TO, 1, Headers,
+ {?HEADER_REPLY_TO, ReplyQueue})},
+ State1}
+ end
+ end.
+
+ensure_reply_queue(TempQueueId, State = #proc_state{channel = Channel,
+ reply_queues = RQS,
+ subscriptions = Subs}) ->
+ case maps:find(TempQueueId, RQS) of
+ {ok, RQ} ->
+ {binary_to_list(RQ), State};
+ error ->
+ #'queue.declare_ok'{queue = Queue} =
+ amqp_channel:call(Channel,
+ #'queue.declare'{auto_delete = true,
+ exclusive = true}),
+
+ ConsumerTag = rabbit_stomp_util:consumer_tag_reply_to(TempQueueId),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Channel,
+ #'basic.consume'{
+ queue = Queue,
+ consumer_tag = ConsumerTag,
+ no_ack = true,
+ nowait = false},
+ self()),
+
+ Destination = binary_to_list(Queue),
+
+ %% synthesise a subscription to the reply queue destination
+ Subs1 = maps:put(ConsumerTag,
+ #subscription{dest_hdr = Destination,
+ multi_ack = false},
+ Subs),
+
+ {Destination, State#proc_state{
+ reply_queues = maps:put(TempQueueId, Queue, RQS),
+ subscriptions = Subs1}}
+ end.
+
+%%----------------------------------------------------------------------------
+%% Receipt Handling
+%%----------------------------------------------------------------------------
+
+ensure_receipt(Frame = #stomp_frame{command = Command}, State) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_RECEIPT) of
+ {ok, Id} -> do_receipt(Command, Id, State);
+ not_found -> State
+ end.
+
+do_receipt("SEND", _, State) ->
+ %% SEND frame receipts are handled when messages are confirmed
+ State;
+do_receipt(_Frame, ReceiptId, State) ->
+ send_frame("RECEIPT", [{"receipt-id", ReceiptId}], "", State).
+
+maybe_record_receipt(Frame, State = #proc_state{channel = Channel,
+ pending_receipts = PR}) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_RECEIPT) of
+ {ok, Id} ->
+ PR1 = case PR of
+ undefined ->
+ amqp_channel:register_confirm_handler(
+ Channel, self()),
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ gb_trees:empty();
+ _ ->
+ PR
+ end,
+ SeqNo = amqp_channel:next_publish_seqno(Channel),
+ State#proc_state{pending_receipts = gb_trees:insert(SeqNo, Id, PR1)};
+ not_found ->
+ State
+ end.
+
+flush_pending_receipts(DeliveryTag, IsMulti,
+ State = #proc_state{pending_receipts = PR}) ->
+ {Receipts, PR1} = accumulate_receipts(DeliveryTag, IsMulti, PR),
+ State1 = lists:foldl(fun(ReceiptId, StateN) ->
+ do_receipt(none, ReceiptId, StateN)
+ end, State, Receipts),
+ State1#proc_state{pending_receipts = PR1}.
+
+accumulate_receipts(DeliveryTag, false, PR) ->
+ case gb_trees:lookup(DeliveryTag, PR) of
+ {value, ReceiptId} -> {[ReceiptId], gb_trees:delete(DeliveryTag, PR)};
+ none -> {[], PR}
+ end;
+
+accumulate_receipts(DeliveryTag, true, PR) ->
+ case gb_trees:is_empty(PR) of
+ true -> {[], PR};
+ false -> accumulate_receipts1(DeliveryTag,
+ gb_trees:take_smallest(PR), [])
+ end.
+
+accumulate_receipts1(DeliveryTag, {Key, Value, PR}, Acc)
+ when Key > DeliveryTag ->
+ {lists:reverse(Acc), gb_trees:insert(Key, Value, PR)};
+accumulate_receipts1(DeliveryTag, {_Key, Value, PR}, Acc) ->
+ Acc1 = [Value | Acc],
+ case gb_trees:is_empty(PR) of
+ true -> {lists:reverse(Acc1), PR};
+ false -> accumulate_receipts1(DeliveryTag,
+ gb_trees:take_smallest(PR), Acc1)
+ end.
+
+%%----------------------------------------------------------------------------
+%% Transaction Support
+%%----------------------------------------------------------------------------
+
+transactional(Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_TRANSACTION) of
+ {ok, Transaction} -> {yes, Transaction};
+ not_found -> no
+ end.
+
+transactional_action(Frame, Name, Fun, State) ->
+ case transactional(Frame) of
+ {yes, Transaction} ->
+ Fun(Transaction, State);
+ no ->
+ error("Missing transaction",
+ "~p must include a 'transaction' header~n",
+ [Name],
+ State)
+ end.
+
+with_transaction(Transaction, State, Fun) ->
+ case get({transaction, Transaction}) of
+ undefined ->
+ error("Bad transaction",
+ "Invalid transaction identifier: ~p~n",
+ [Transaction],
+ State);
+ Actions ->
+ Fun(Actions, State)
+ end.
+
+begin_transaction(Transaction, State) ->
+ put({transaction, Transaction}, []),
+ ok(State).
+
+extend_transaction(Transaction, Callback, Action, State) ->
+ extend_transaction(Transaction, {callback, Callback, Action}, State).
+
+extend_transaction(Transaction, Action, State0) ->
+ with_transaction(
+ Transaction, State0,
+ fun (Actions, State) ->
+ put({transaction, Transaction}, [Action | Actions]),
+ ok(State)
+ end).
+
+commit_transaction(Transaction, State0) ->
+ with_transaction(
+ Transaction, State0,
+ fun (Actions, State) ->
+ FinalState = lists:foldr(fun perform_transaction_action/2,
+ State,
+ Actions),
+ erase({transaction, Transaction}),
+ ok(FinalState)
+ end).
+
+abort_transaction(Transaction, State0) ->
+ with_transaction(
+ Transaction, State0,
+ fun (_Actions, State) ->
+ erase({transaction, Transaction}),
+ ok(State)
+ end).
+
+perform_transaction_action({callback, Callback, Action}, State) ->
+ perform_transaction_action(Action, Callback(State));
+perform_transaction_action({Method}, State) ->
+ send_method(Method, State);
+perform_transaction_action({Method, Props, BodyFragments}, State) ->
+ send_method(Method, Props, BodyFragments, State).
+
+%%--------------------------------------------------------------------
+%% Heartbeat Management
+%%--------------------------------------------------------------------
+
+ensure_heartbeats(Heartbeats) ->
+
+ [CX, CY] = [list_to_integer(X) ||
+ X <- re:split(Heartbeats, ",", [{return, list}])],
+
+ {SendTimeout, ReceiveTimeout} =
+ {millis_to_seconds(CY), millis_to_seconds(CX)},
+
+ _ = rabbit_stomp_reader:start_heartbeats(self(), {SendTimeout, ReceiveTimeout}),
+ {SendTimeout * 1000 , ReceiveTimeout * 1000}.
+
+millis_to_seconds(M) when M =< 0 -> 0;
+millis_to_seconds(M) when M < 1000 -> 1;
+millis_to_seconds(M) -> M div 1000.
+
+%%----------------------------------------------------------------------------
+%% Queue Setup
+%%----------------------------------------------------------------------------
+
+ensure_endpoint(_Direction, {queue, []}, _Frame, _Channel, _State) ->
+ {error, {invalid_destination, "Destination cannot be blank"}};
+
+ensure_endpoint(source, EndPoint, {_, _, Headers, _} = Frame, Channel, State) ->
+ Params =
+ [{subscription_queue_name_gen,
+ fun () ->
+ Id = build_subscription_id(Frame),
+ % Note: we discard the exchange here so there's no need to use
+ % the default_topic_exchange configuration key
+ {_, Name} = rabbit_routing_util:parse_routing(EndPoint),
+ list_to_binary(rabbit_stomp_util:subscription_queue_name(Name, Id, Frame))
+ end
+ }] ++ rabbit_stomp_util:build_params(EndPoint, Headers),
+ Arguments = rabbit_stomp_util:build_arguments(Headers),
+ rabbit_routing_util:ensure_endpoint(source, Channel, EndPoint,
+ [Arguments | Params], State);
+
+ensure_endpoint(Direction, EndPoint, {_, _, Headers, _}, Channel, State) ->
+ Params = rabbit_stomp_util:build_params(EndPoint, Headers),
+ Arguments = rabbit_stomp_util:build_arguments(Headers),
+ rabbit_routing_util:ensure_endpoint(Direction, Channel, EndPoint,
+ [Arguments | Params], State).
+
+build_subscription_id(Frame) ->
+ case rabbit_stomp_util:has_durable_header(Frame) of
+ true ->
+ {ok, Id} = rabbit_stomp_frame:header(Frame, ?HEADER_ID),
+ Id;
+ false ->
+ rabbit_guid:gen_secure()
+ end.
+
+%%----------------------------------------------------------------------------
+%% Success/error handling
+%%----------------------------------------------------------------------------
+
+ok(State) ->
+ {ok, none, State}.
+
+ok(Command, Headers, BodyFragments, State) ->
+ {ok, #stomp_frame{command = Command,
+ headers = Headers,
+ body_iolist = BodyFragments}, State}.
+
+amqp_death(access_refused = ErrorName, Explanation, State) ->
+ ErrorDesc = rabbit_misc:format("~s~n", [Explanation]),
+ log_error(ErrorName, ErrorDesc, none),
+ {stop, normal, close_connection(send_error(atom_to_list(ErrorName), ErrorDesc, State))};
+amqp_death(ReplyCode, Explanation, State) ->
+ ErrorName = amqp_connection:error_atom(ReplyCode),
+ ErrorDesc = rabbit_misc:format("~s~n", [Explanation]),
+ log_error(ErrorName, ErrorDesc, none),
+ {stop, normal, close_connection(send_error(atom_to_list(ErrorName), ErrorDesc, State))}.
+
+error(Message, Detail, State) ->
+ priv_error(Message, Detail, none, State).
+
+error(Message, Format, Args, State) ->
+ priv_error(Message, Format, Args, none, State).
+
+priv_error(Message, Detail, ServerPrivateDetail, State) ->
+ log_error(Message, Detail, ServerPrivateDetail),
+ {error, Message, Detail, State}.
+
+priv_error(Message, Format, Args, ServerPrivateDetail, State) ->
+ priv_error(Message, rabbit_misc:format(Format, Args), ServerPrivateDetail,
+ State).
+
+log_error(Message, Detail, ServerPrivateDetail) ->
+ rabbit_log:error("STOMP error frame sent:~n"
+ "Message: ~p~n"
+ "Detail: ~p~n"
+ "Server private detail: ~p~n",
+ [Message, Detail, ServerPrivateDetail]).
+
+%%----------------------------------------------------------------------------
+%% Frame sending utilities
+%%----------------------------------------------------------------------------
+
+send_frame(Command, Headers, BodyFragments, State) ->
+ send_frame(#stomp_frame{command = Command,
+ headers = Headers,
+ body_iolist = BodyFragments},
+ State).
+
+send_frame(Frame, State = #proc_state{send_fun = SendFun,
+ trailing_lf = TrailingLF}) ->
+ SendFun(async, rabbit_stomp_frame:serialize(Frame, TrailingLF)),
+ State.
+
+send_error_frame(Message, ExtraHeaders, Format, Args, State) ->
+ send_error_frame(Message, ExtraHeaders, rabbit_misc:format(Format, Args),
+ State).
+
+send_error_frame(Message, ExtraHeaders, Detail, State) ->
+ send_frame("ERROR", [{"message", Message},
+ {"content-type", "text/plain"},
+ {"version", string:join(?SUPPORTED_VERSIONS, ",")}] ++
+ ExtraHeaders,
+ Detail, State).
+
+send_error(Message, Detail, State) ->
+ send_error_frame(Message, [], Detail, State).
+
+send_error(Message, Format, Args, State) ->
+ send_error(Message, rabbit_misc:format(Format, Args), State).
+
+additional_info(Key,
+ #proc_state{adapter_info =
+ #amqp_adapter_info{additional_info = AddInfo}}) ->
+ proplists:get_value(Key, AddInfo).
+
+parse_routing(Destination, DefaultTopicExchange) ->
+ {Exchange0, RoutingKey} = rabbit_routing_util:parse_routing(Destination),
+ Exchange1 = maybe_apply_default_topic_exchange(Exchange0, DefaultTopicExchange),
+ {Exchange1, RoutingKey}.
+
+maybe_apply_default_topic_exchange("amq.topic"=Exchange, <<"amq.topic">>=_DefaultTopicExchange) ->
+ %% This is the case where the destination is the same
+ %% as the default of amq.topic
+ Exchange;
+maybe_apply_default_topic_exchange("amq.topic"=_Exchange, DefaultTopicExchange) ->
+ %% This is the case where the destination would have been
+ %% amq.topic but we have configured a different default
+ binary_to_list(DefaultTopicExchange);
+maybe_apply_default_topic_exchange(Exchange, _DefaultTopicExchange) ->
+ %% This is the case where the destination is different than
+ %% amq.topic, so it must have been specified in the
+ %% message headers
+ Exchange.
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl
new file mode 100644
index 0000000000..8f081d618f
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl
@@ -0,0 +1,465 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_reader).
+-behaviour(gen_server2).
+
+-export([start_link/3]).
+-export([conserve_resources/3]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ code_change/3, terminate/2]).
+-export([start_heartbeats/2]).
+-export([info/2, close_connection/2]).
+-export([ssl_login_name/2]).
+
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(SIMPLE_METRICS, [pid, recv_oct, send_oct, reductions]).
+-define(OTHER_METRICS, [recv_cnt, send_cnt, send_pend, garbage_collection, state,
+ timeout]).
+
+-record(reader_state, {socket, conn_name, parse_state, processor_state, state,
+ conserve_resources, recv_outstanding, stats_timer,
+ parent, connection, heartbeat_sup, heartbeat,
+ timeout_sec %% heartbeat timeout value used, 0 means
+ %% heartbeats are disabled
+ }).
+
+%%----------------------------------------------------------------------------
+
+start_link(SupHelperPid, Ref, Configuration) ->
+ Pid = proc_lib:spawn_link(?MODULE, init,
+ [[SupHelperPid, Ref, Configuration]]),
+ {ok, Pid}.
+
+info(Pid, InfoItems) ->
+ case InfoItems -- ?INFO_ITEMS of
+ [] ->
+ gen_server2:call(Pid, {info, InfoItems});
+ UnknownItems -> throw({bad_argument, UnknownItems})
+ end.
+
+close_connection(Pid, Reason) ->
+ gen_server:cast(Pid, {close_connection, Reason}).
+
+
+init([SupHelperPid, Ref, Configuration]) ->
+ process_flag(trap_exit, true),
+ {ok, Sock} = rabbit_networking:handshake(Ref,
+ application:get_env(rabbitmq_stomp, proxy_protocol, false)),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+
+ case rabbit_net:connection_string(Sock, inbound) of
+ {ok, ConnStr} ->
+ ProcInitArgs = processor_args(Configuration, Sock),
+ ProcState = rabbit_stomp_processor:initial_state(Configuration,
+ ProcInitArgs),
+
+ rabbit_log_connection:info("accepting STOMP connection ~p (~s)~n",
+ [self(), ConnStr]),
+
+ ParseState = rabbit_stomp_frame:initial_state(),
+ _ = register_resource_alarm(),
+ gen_server2:enter_loop(?MODULE, [],
+ rabbit_event:init_stats_timer(
+ run_socket(control_throttle(
+ #reader_state{socket = RealSocket,
+ conn_name = ConnStr,
+ parse_state = ParseState,
+ processor_state = ProcState,
+ heartbeat_sup = SupHelperPid,
+ heartbeat = {none, none},
+ state = running,
+ conserve_resources = false,
+ recv_outstanding = false})), #reader_state.stats_timer),
+ {backoff, 1000, 1000, 10000});
+ {network_error, Reason} ->
+ rabbit_net:fast_close(RealSocket),
+ terminate({shutdown, Reason}, undefined);
+ {error, enotconn} ->
+ rabbit_net:fast_close(RealSocket),
+ terminate(shutdown, undefined);
+ {error, Reason} ->
+ rabbit_net:fast_close(RealSocket),
+ terminate({network_error, Reason}, undefined)
+ end.
+
+
+handle_call({info, InfoItems}, _From, State) ->
+ Infos = lists:map(
+ fun(InfoItem) ->
+ {InfoItem, info_internal(InfoItem, State)}
+ end,
+ InfoItems),
+ {reply, Infos, State};
+handle_call(Msg, From, State) ->
+ {stop, {stomp_unexpected_call, Msg, From}, State}.
+
+handle_cast({close_connection, Reason}, State) ->
+ {stop, {shutdown, {server_initiated_close, Reason}}, State};
+handle_cast(client_timeout, State) ->
+ {stop, {shutdown, client_heartbeat_timeout}, State};
+handle_cast(Msg, State) ->
+ {stop, {stomp_unexpected_cast, Msg}, State}.
+
+
+handle_info({Tag, Sock, Data}, State=#reader_state{socket=Sock})
+ when Tag =:= tcp; Tag =:= ssl ->
+ case process_received_bytes(Data, State#reader_state{recv_outstanding = false}) of
+ {ok, NewState} ->
+ {noreply, ensure_stats_timer(run_socket(control_throttle(NewState))), hibernate};
+ {stop, Reason, NewState} ->
+ {stop, Reason, NewState}
+ end;
+handle_info({Tag, Sock}, State=#reader_state{socket=Sock})
+ when Tag =:= tcp_closed; Tag =:= ssl_closed ->
+ {stop, normal, State};
+handle_info({Tag, Sock, Reason}, State=#reader_state{socket=Sock})
+ when Tag =:= tcp_error; Tag =:= ssl_error ->
+ {stop, {inet_error, Reason}, State};
+handle_info({inet_reply, _Sock, {error, closed}}, State) ->
+ {stop, normal, State};
+handle_info({inet_reply, _, ok}, State) ->
+ {noreply, State, hibernate};
+handle_info({inet_reply, _, Status}, State) ->
+ {stop, Status, State};
+handle_info(emit_stats, State) ->
+ {noreply, emit_stats(State), hibernate};
+handle_info({conserve_resources, Conserve}, State) ->
+ NewState = State#reader_state{conserve_resources = Conserve},
+ {noreply, run_socket(control_throttle(NewState)), hibernate};
+handle_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ {noreply, run_socket(control_throttle(State)), hibernate};
+
+%%----------------------------------------------------------------------------
+
+handle_info(client_timeout, State) ->
+ {stop, {shutdown, client_heartbeat_timeout}, State};
+
+%%----------------------------------------------------------------------------
+
+handle_info(#'basic.consume_ok'{}, State) ->
+ {noreply, State, hibernate};
+handle_info(#'basic.cancel_ok'{}, State) ->
+ {noreply, State, hibernate};
+handle_info(#'basic.ack'{delivery_tag = Tag, multiple = IsMulti}, State) ->
+ ProcState = processor_state(State),
+ NewProcState = rabbit_stomp_processor:flush_pending_receipts(Tag,
+ IsMulti,
+ ProcState),
+ {noreply, processor_state(NewProcState, State), hibernate};
+handle_info({Delivery = #'basic.deliver'{},
+ Message = #amqp_msg{}},
+ State) ->
+ %% receiving a message from a quorum queue
+ %% no delivery context
+ handle_info({Delivery, Message, undefined}, State);
+handle_info({Delivery = #'basic.deliver'{},
+ #amqp_msg{props = Props, payload = Payload},
+ DeliveryCtx},
+ State) ->
+ ProcState = processor_state(State),
+ NewProcState = rabbit_stomp_processor:send_delivery(Delivery,
+ Props,
+ Payload,
+ DeliveryCtx,
+ ProcState),
+ {noreply, processor_state(NewProcState, State), hibernate};
+handle_info(#'basic.cancel'{consumer_tag = Ctag}, State) ->
+ ProcState = processor_state(State),
+ case rabbit_stomp_processor:cancel_consumer(Ctag, ProcState) of
+ {ok, NewProcState, _} ->
+ {noreply, processor_state(NewProcState, State), hibernate};
+ {stop, Reason, NewProcState} ->
+ {stop, Reason, processor_state(NewProcState, State)}
+ end;
+
+handle_info({start_heartbeats, {0, 0}}, State) ->
+ {noreply, State#reader_state{timeout_sec = {0, 0}}};
+
+handle_info({start_heartbeats, {SendTimeout, ReceiveTimeout}},
+ State = #reader_state{heartbeat_sup = SupPid, socket = Sock}) ->
+
+ SendFun = fun() -> catch rabbit_net:send(Sock, <<$\n>>) end,
+ Pid = self(),
+ ReceiveFun = fun() -> gen_server2:cast(Pid, client_timeout) end,
+ Heartbeat = rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
+ SendFun, ReceiveTimeout, ReceiveFun),
+ {noreply, State#reader_state{heartbeat = Heartbeat,
+ timeout_sec = {SendTimeout, ReceiveTimeout}}};
+
+
+%%----------------------------------------------------------------------------
+handle_info({'EXIT', From, Reason}, State) ->
+ ProcState = processor_state(State),
+ case rabbit_stomp_processor:handle_exit(From, Reason, ProcState) of
+ {stop, NewReason, NewProcState} ->
+ {stop, NewReason, processor_state(NewProcState, State)};
+ unknown_exit ->
+ {stop, {connection_died, Reason}, State}
+ end.
+%%----------------------------------------------------------------------------
+
+process_received_bytes([], State) ->
+ {ok, State};
+process_received_bytes(Bytes,
+ State = #reader_state{
+ processor_state = ProcState,
+ parse_state = ParseState}) ->
+ case rabbit_stomp_frame:parse(Bytes, ParseState) of
+ {more, ParseState1} ->
+ {ok, State#reader_state{parse_state = ParseState1}};
+ {ok, Frame, Rest} ->
+ case rabbit_stomp_processor:process_frame(Frame, ProcState) of
+ {ok, NewProcState, Conn} ->
+ PS = rabbit_stomp_frame:initial_state(),
+ NextState = maybe_block(State, Frame),
+ process_received_bytes(Rest, NextState#reader_state{
+ processor_state = NewProcState,
+ parse_state = PS,
+ connection = Conn});
+ {stop, Reason, NewProcState} ->
+ {stop, Reason,
+ processor_state(NewProcState, State)}
+ end;
+ {error, Reason} ->
+ %% The parser couldn't parse data. We log the reason right
+ %% now and stop with the reason 'normal' instead of the
+ %% actual parsing error, because the supervisor would log
+ %% a crash report (which is not that useful) and handle
+ %% recovery, but it's too slow.
+ log_reason({network_error, Reason}, State),
+ {stop, normal, State}
+ end.
+
+conserve_resources(Pid, _Source, {_, Conserve, _}) ->
+ Pid ! {conserve_resources, Conserve},
+ ok.
+
+register_resource_alarm() ->
+ rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}).
+
+
+control_throttle(State = #reader_state{state = CS,
+ conserve_resources = Mem,
+ heartbeat = Heartbeat}) ->
+ case {CS, Mem orelse credit_flow:blocked()} of
+ {running, true} -> State#reader_state{state = blocking};
+ {blocking, false} -> rabbit_heartbeat:resume_monitor(Heartbeat),
+ State#reader_state{state = running};
+ {blocked, false} -> rabbit_heartbeat:resume_monitor(Heartbeat),
+ State#reader_state{state = running};
+ {_, _} -> State
+ end.
+
+maybe_block(State = #reader_state{state = blocking, heartbeat = Heartbeat},
+ #stomp_frame{command = "SEND"}) ->
+ rabbit_heartbeat:pause_monitor(Heartbeat),
+ State#reader_state{state = blocked};
+maybe_block(State, _) ->
+ State.
+
+run_socket(State = #reader_state{state = blocked}) ->
+ State;
+run_socket(State = #reader_state{recv_outstanding = true}) ->
+ State;
+run_socket(State = #reader_state{socket = Sock}) ->
+ rabbit_net:setopts(Sock, [{active, once}]),
+ State#reader_state{recv_outstanding = true}.
+
+
+terminate(Reason, undefined) ->
+ log_reason(Reason, undefined),
+ {stop, Reason};
+terminate(Reason, State = #reader_state{processor_state = ProcState}) ->
+ maybe_emit_stats(State),
+ log_reason(Reason, State),
+ _ = rabbit_stomp_processor:flush_and_die(ProcState),
+ {stop, Reason}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+log_reason({network_error, {ssl_upgrade_error, closed}, ConnStr}, _State) ->
+ rabbit_log_connection:error("STOMP detected TLS upgrade error on ~s: connection closed~n",
+ [ConnStr]);
+
+
+log_reason({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, "handshake failure"}}, ConnStr}, _State) ->
+ log_tls_alert(handshake_failure, ConnStr);
+log_reason({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, "unknown ca"}}, ConnStr}, _State) ->
+ log_tls_alert(unknown_ca, ConnStr);
+log_reason({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, {Err, _}}}, ConnStr}, _State) ->
+ log_tls_alert(Err, ConnStr);
+log_reason({network_error,
+ {ssl_upgrade_error,
+ {tls_alert, Alert}}, ConnStr}, _State) ->
+ log_tls_alert(Alert, ConnStr);
+log_reason({network_error, {ssl_upgrade_error, Reason}, ConnStr}, _State) ->
+ rabbit_log_connection:error("STOMP detected TLS upgrade error on ~s: ~p~n",
+ [ConnStr, Reason]);
+
+log_reason({network_error, Reason, ConnStr}, _State) ->
+ rabbit_log_connection:error("STOMP detected network error on ~s: ~p~n",
+ [ConnStr, Reason]);
+
+log_reason({network_error, Reason}, _State) ->
+ rabbit_log_connection:error("STOMP detected network error: ~p~n", [Reason]);
+
+log_reason({shutdown, client_heartbeat_timeout},
+ #reader_state{ processor_state = ProcState }) ->
+ AdapterName = rabbit_stomp_processor:adapter_name(ProcState),
+ rabbit_log_connection:warning("STOMP detected missed client heartbeat(s) "
+ "on connection ~s, closing it~n", [AdapterName]);
+
+log_reason({shutdown, {server_initiated_close, Reason}},
+ #reader_state{conn_name = ConnName}) ->
+ rabbit_log_connection:info("closing STOMP connection ~p (~s), reason: ~s~n",
+ [self(), ConnName, Reason]);
+
+log_reason(normal, #reader_state{conn_name = ConnName}) ->
+ rabbit_log_connection:info("closing STOMP connection ~p (~s)~n", [self(), ConnName]);
+
+log_reason(shutdown, undefined) ->
+ rabbit_log_connection:error("closing STOMP connection that never completed connection handshake (negotiation)~n", []);
+
+log_reason(Reason, #reader_state{processor_state = ProcState}) ->
+ AdapterName = rabbit_stomp_processor:adapter_name(ProcState),
+ rabbit_log_connection:warning("STOMP connection ~s terminated"
+ " with reason ~p, closing it~n", [AdapterName, Reason]).
+
+log_tls_alert(handshake_failure, ConnStr) ->
+ rabbit_log_connection:error("STOMP detected TLS upgrade error on ~s: handshake failure~n",
+ [ConnStr]);
+log_tls_alert(unknown_ca, ConnStr) ->
+ rabbit_log_connection:error("STOMP detected TLS certificate verification error on ~s: alert 'unknown CA'~n",
+ [ConnStr]);
+log_tls_alert(Alert, ConnStr) ->
+ rabbit_log_connection:error("STOMP detected TLS upgrade error on ~s: alert ~s~n",
+ [ConnStr, Alert]).
+
+
+%%----------------------------------------------------------------------------
+
+processor_args(Configuration, Sock) ->
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ SendFun = fun (sync, IoData) ->
+ %% no messages emitted
+ catch rabbit_net:send(RealSocket, IoData);
+ (async, IoData) ->
+ %% {inet_reply, _, _} will appear soon
+ %% We ignore certain errors here, as we will be
+ %% receiving an asynchronous notification of the
+ %% same (or a related) fault shortly anyway. See
+ %% bug 21365.
+ catch rabbit_net:port_command(RealSocket, IoData)
+ end,
+ {ok, {PeerAddr, _PeerPort}} = rabbit_net:sockname(RealSocket),
+ {SendFun, adapter_info(Sock),
+ ssl_login_name(RealSocket, Configuration), PeerAddr}.
+
+adapter_info(Sock) ->
+ amqp_connection:socket_adapter_info(Sock, {'STOMP', 0}).
+
+ssl_login_name(_Sock, #stomp_configuration{ssl_cert_login = false}) ->
+ none;
+ssl_login_name(Sock, #stomp_configuration{ssl_cert_login = true}) ->
+ case rabbit_net:peercert(Sock) of
+ {ok, C} -> case rabbit_ssl:peer_cert_auth_name(C) of
+ unsafe -> none;
+ not_found -> none;
+ Name -> Name
+ end;
+ {error, no_peercert} -> none;
+ nossl -> none
+ end.
+
+%%----------------------------------------------------------------------------
+
+start_heartbeats(_, {0,0} ) -> ok;
+start_heartbeats(Pid, Heartbeat) -> Pid ! {start_heartbeats, Heartbeat}.
+
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #reader_state.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State=#reader_state{connection = C}) when C == none; C == undefined ->
+ %% Avoid emitting stats on terminate when the connection has not yet been
+ %% established, as this causes orphan entries on the stats database
+ State1 = rabbit_event:reset_stats_timer(State, #reader_state.stats_timer),
+ ensure_stats_timer(State1);
+emit_stats(State) ->
+ [{_, Pid}, {_, Recv_oct}, {_, Send_oct}, {_, Reductions}] = I
+ = infos(?SIMPLE_METRICS, State),
+ Infos = infos(?OTHER_METRICS, State),
+ rabbit_core_metrics:connection_stats(Pid, Infos),
+ rabbit_core_metrics:connection_stats(Pid, Recv_oct, Send_oct, Reductions),
+ rabbit_event:notify(connection_stats, Infos ++ I),
+ State1 = rabbit_event:reset_stats_timer(State, #reader_state.stats_timer),
+ ensure_stats_timer(State1).
+
+ensure_stats_timer(State = #reader_state{}) ->
+ rabbit_event:ensure_stats_timer(State, #reader_state.stats_timer, emit_stats).
+
+%%----------------------------------------------------------------------------
+
+
+processor_state(#reader_state{ processor_state = ProcState }) -> ProcState.
+processor_state(ProcState, #reader_state{} = State) ->
+ State#reader_state{ processor_state = ProcState}.
+
+%%----------------------------------------------------------------------------
+
+infos(Items, State) -> [{Item, info_internal(Item, State)} || Item <- Items].
+
+info_internal(pid, State) -> info_internal(connection, State);
+info_internal(SockStat, #reader_state{socket = Sock}) when SockStat =:= recv_oct;
+ SockStat =:= recv_cnt;
+ SockStat =:= send_oct;
+ SockStat =:= send_cnt;
+ SockStat =:= send_pend ->
+ case rabbit_net:getstat(Sock, [SockStat]) of
+ {ok, [{_, N}]} when is_number(N) -> N;
+ _ -> 0
+ end;
+info_internal(state, State) -> info_internal(connection_state, State);
+info_internal(garbage_collection, _State) ->
+ rabbit_misc:get_gc_info(self());
+info_internal(reductions, _State) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+info_internal(timeout, #reader_state{timeout_sec = {_, Receive}}) ->
+ Receive;
+info_internal(timeout, #reader_state{timeout_sec = undefined}) ->
+ 0;
+info_internal(conn_name, #reader_state{conn_name = Val}) ->
+ rabbit_data_coercion:to_binary(Val);
+info_internal(connection, #reader_state{connection = Val}) ->
+ Val;
+info_internal(connection_state, #reader_state{state = Val}) ->
+ Val;
+info_internal(Key, #reader_state{processor_state = ProcState}) ->
+ rabbit_stomp_processor:info(Key, ProcState).
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl
new file mode 100644
index 0000000000..ee74569af9
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_sup.erl
@@ -0,0 +1,83 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_sup).
+-behaviour(supervisor).
+
+-export([start_link/2, init/1, stop_listeners/0]).
+
+-define(TCP_PROTOCOL, 'stomp').
+-define(TLS_PROTOCOL, 'stomp/ssl').
+
+start_link(Listeners, Configuration) ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE,
+ [Listeners, Configuration]).
+
+init([{Listeners, SslListeners0}, Configuration]) ->
+ NumTcpAcceptors = application:get_env(rabbitmq_stomp, num_tcp_acceptors, 10),
+ {ok, SocketOpts} = application:get_env(rabbitmq_stomp, tcp_listen_options),
+ {SslOpts, NumSslAcceptors, SslListeners}
+ = case SslListeners0 of
+ [] -> {none, 0, []};
+ _ -> {rabbit_networking:ensure_ssl(),
+ application:get_env(rabbitmq_stomp, num_ssl_acceptors, 10),
+ case rabbit_networking:poodle_check('STOMP') of
+ ok -> SslListeners0;
+ danger -> []
+ end}
+ end,
+ Flags = #{
+ strategy => one_for_all,
+ period => 10,
+ intensity => 10
+ },
+ {ok, {Flags,
+ listener_specs(fun tcp_listener_spec/1,
+ [SocketOpts, Configuration, NumTcpAcceptors], Listeners) ++
+ listener_specs(fun ssl_listener_spec/1,
+ [SocketOpts, SslOpts, Configuration, NumSslAcceptors], SslListeners)}}.
+
+stop_listeners() ->
+ rabbit_networking:stop_ranch_listener_of_protocol(?TCP_PROTOCOL),
+ rabbit_networking:stop_ranch_listener_of_protocol(?TLS_PROTOCOL),
+ ok.
+
+%%
+%% Implementation
+%%
+
+listener_specs(Fun, Args, Listeners) ->
+ [Fun([Address | Args]) ||
+ Listener <- Listeners,
+ Address <- rabbit_networking:tcp_listener_addresses(Listener)].
+
+tcp_listener_spec([Address, SocketOpts, Configuration, NumAcceptors]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_stomp_listener_sup, Address, SocketOpts,
+ transport(?TCP_PROTOCOL), rabbit_stomp_client_sup, Configuration,
+ stomp, NumAcceptors, "STOMP TCP listener").
+
+ssl_listener_spec([Address, SocketOpts, SslOpts, Configuration, NumAcceptors]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_stomp_listener_sup, Address, SocketOpts ++ SslOpts,
+ transport(?TLS_PROTOCOL), rabbit_stomp_client_sup, Configuration,
+ 'stomp/ssl', NumAcceptors, "STOMP TLS listener").
+
+transport(Protocol) ->
+ case Protocol of
+ ?TCP_PROTOCOL -> ranch_tcp;
+ ?TLS_PROTOCOL -> ranch_ssl
+ end.
diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl
new file mode 100644
index 0000000000..6df1affbb7
--- /dev/null
+++ b/deps/rabbitmq_stomp/src/rabbit_stomp_util.erl
@@ -0,0 +1,418 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_util).
+
+-export([parse_message_id/1, subscription_queue_name/3]).
+-export([longstr_field/2]).
+-export([ack_mode/1, consumer_tag_reply_to/1, consumer_tag/1, message_headers/1,
+ headers_post_process/1, headers/5, message_properties/1, tag_to_id/1,
+ msg_header_name/1, ack_header_name/1, build_arguments/1, build_params/2,
+ has_durable_header/1]).
+-export([negotiate_version/2]).
+-export([trim_headers/1]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-define(INTERNAL_TAG_PREFIX, "T_").
+-define(QUEUE_TAG_PREFIX, "Q_").
+
+%%--------------------------------------------------------------------
+%% Frame and Header Parsing
+%%--------------------------------------------------------------------
+
+consumer_tag_reply_to(QueueId) ->
+ internal_tag(?TEMP_QUEUE_ID_PREFIX ++ QueueId).
+
+consumer_tag(Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_ID) of
+ {ok, Id} ->
+ case lists:prefix(?TEMP_QUEUE_ID_PREFIX, Id) of
+ false -> {ok, internal_tag(Id), "id='" ++ Id ++ "'"};
+ true -> {error, invalid_prefix}
+ end;
+ not_found ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_DESTINATION) of
+ {ok, DestHdr} ->
+ {ok, queue_tag(DestHdr),
+ "destination='" ++ DestHdr ++ "'"};
+ not_found ->
+ {error, missing_destination_header}
+ end
+ end.
+
+ack_mode(Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_ACK, "auto") of
+ "auto" -> {auto, false};
+ "client" -> {client, true};
+ "client-individual" -> {client, false}
+ end.
+
+message_properties(Frame = #stomp_frame{headers = Headers}) ->
+ BinH = fun(K) -> rabbit_stomp_frame:binary_header(Frame, K, undefined) end,
+ IntH = fun(K) -> rabbit_stomp_frame:integer_header(Frame, K, undefined) end,
+
+ DeliveryMode = case rabbit_stomp_frame:boolean_header(
+ Frame, ?HEADER_PERSISTENT, false) of
+ true -> 2;
+ false -> undefined
+ end,
+
+ #'P_basic'{ content_type = BinH(?HEADER_CONTENT_TYPE),
+ content_encoding = BinH(?HEADER_CONTENT_ENCODING),
+ headers = [longstr_field(K, V) ||
+ {K, V} <- Headers, user_header(K)],
+ delivery_mode = DeliveryMode,
+ priority = IntH(?HEADER_PRIORITY),
+ correlation_id = BinH(?HEADER_CORRELATION_ID),
+ reply_to = BinH(?HEADER_REPLY_TO),
+ expiration = BinH(?HEADER_EXPIRATION),
+ message_id = BinH(?HEADER_AMQP_MESSAGE_ID),
+ timestamp = IntH(?HEADER_TIMESTAMP),
+ type = BinH(?HEADER_TYPE),
+ user_id = BinH(?HEADER_USER_ID),
+ app_id = BinH(?HEADER_APP_ID) }.
+
+message_headers(Props = #'P_basic'{headers = Headers}) ->
+ adhoc_convert_headers(
+ Headers,
+ lists:foldl(fun({Header, Index}, Acc) ->
+ maybe_header(Header, element(Index, Props), Acc)
+ end, [],
+ [{?HEADER_CONTENT_TYPE, #'P_basic'.content_type},
+ {?HEADER_CONTENT_ENCODING, #'P_basic'.content_encoding},
+ {?HEADER_PERSISTENT, #'P_basic'.delivery_mode},
+ {?HEADER_PRIORITY, #'P_basic'.priority},
+ {?HEADER_CORRELATION_ID, #'P_basic'.correlation_id},
+ {?HEADER_REPLY_TO, #'P_basic'.reply_to},
+ {?HEADER_EXPIRATION, #'P_basic'.expiration},
+ {?HEADER_AMQP_MESSAGE_ID, #'P_basic'.message_id},
+ {?HEADER_TIMESTAMP, #'P_basic'.timestamp},
+ {?HEADER_TYPE, #'P_basic'.type},
+ {?HEADER_USER_ID, #'P_basic'.user_id},
+ {?HEADER_APP_ID, #'P_basic'.app_id}])).
+
+adhoc_convert_headers(undefined, Existing) ->
+ Existing;
+adhoc_convert_headers(Headers, Existing) ->
+ lists:foldr(fun ({K, longstr, V}, Acc) ->
+ [{binary_to_list(K), binary_to_list(V)} | Acc];
+ ({K, signedint, V}, Acc) ->
+ [{binary_to_list(K), integer_to_list(V)} | Acc];
+ (_, Acc) ->
+ Acc
+ end, Existing, Headers).
+
+headers_extra(SessionId, AckMode, Version,
+ #'basic.deliver'{consumer_tag = ConsumerTag,
+ delivery_tag = DeliveryTag,
+ exchange = ExchangeBin,
+ routing_key = RoutingKeyBin,
+ redelivered = Redelivered}) ->
+ case tag_to_id(ConsumerTag) of
+ {ok, {internal, Id}} -> [{?HEADER_SUBSCRIPTION, Id}];
+ _ -> []
+ end ++
+ [{?HEADER_DESTINATION,
+ format_destination(binary_to_list(ExchangeBin),
+ binary_to_list(RoutingKeyBin))},
+ {?HEADER_MESSAGE_ID,
+ create_message_id(ConsumerTag, SessionId, DeliveryTag)},
+ {?HEADER_REDELIVERED, Redelivered}] ++
+ case AckMode == client andalso Version == "1.2" of
+ true -> [{?HEADER_ACK,
+ create_message_id(ConsumerTag, SessionId, DeliveryTag)}];
+ false -> []
+ end.
+
+headers_post_process(Headers) ->
+ Prefixes = rabbit_routing_util:dest_prefixes(),
+ [case Header of
+ {?HEADER_REPLY_TO, V} ->
+ case lists:any(fun (P) -> lists:prefix(P, V) end, Prefixes) of
+ true -> {?HEADER_REPLY_TO, V};
+ false -> {?HEADER_REPLY_TO, ?REPLY_QUEUE_PREFIX ++ V}
+ end;
+ {_, _} ->
+ Header
+ end || Header <- Headers].
+
+headers(SessionId, Delivery, Properties, AckMode, Version) ->
+ headers_extra(SessionId, AckMode, Version, Delivery) ++
+ headers_post_process(message_headers(Properties)).
+
+tag_to_id(<<?INTERNAL_TAG_PREFIX, Id/binary>>) ->
+ {ok, {internal, binary_to_list(Id)}};
+tag_to_id(<<?QUEUE_TAG_PREFIX, Id/binary>>) ->
+ {ok, {queue, binary_to_list(Id)}};
+tag_to_id(Other) when is_binary(Other) ->
+ {error, {unknown, binary_to_list(Other)}}.
+
+user_header(Hdr)
+ when Hdr =:= ?HEADER_CONTENT_TYPE orelse
+ Hdr =:= ?HEADER_CONTENT_ENCODING orelse
+ Hdr =:= ?HEADER_PERSISTENT orelse
+ Hdr =:= ?HEADER_PRIORITY orelse
+ Hdr =:= ?HEADER_CORRELATION_ID orelse
+ Hdr =:= ?HEADER_REPLY_TO orelse
+ Hdr =:= ?HEADER_EXPIRATION orelse
+ Hdr =:= ?HEADER_AMQP_MESSAGE_ID orelse
+ Hdr =:= ?HEADER_TIMESTAMP orelse
+ Hdr =:= ?HEADER_TYPE orelse
+ Hdr =:= ?HEADER_USER_ID orelse
+ Hdr =:= ?HEADER_APP_ID orelse
+ Hdr =:= ?HEADER_DESTINATION ->
+ false;
+user_header(_) ->
+ true.
+
+parse_message_id(MessageId) ->
+ case split(MessageId, ?MESSAGE_ID_SEPARATOR) of
+ [ConsumerTag, SessionId, DeliveryTag] ->
+ {ok, {list_to_binary(ConsumerTag),
+ SessionId,
+ list_to_integer(DeliveryTag)}};
+ _ ->
+ {error, invalid_message_id}
+ end.
+
+negotiate_version(ClientVers, ServerVers) ->
+ Common = lists:filter(fun(Ver) ->
+ lists:member(Ver, ServerVers)
+ end, ClientVers),
+ case Common of
+ [] ->
+ {error, no_common_version};
+ [H|T] ->
+ {ok, lists:foldl(fun(Ver, AccN) ->
+ max_version(Ver, AccN)
+ end, H, T)}
+ end.
+
+max_version(V, V) ->
+ V;
+max_version(V1, V2) ->
+ Split = fun(X) -> re:split(X, "\\.", [{return, list}]) end,
+ find_max_version({V1, Split(V1)}, {V2, Split(V2)}).
+
+find_max_version({V1, [X|T1]}, {V2, [X|T2]}) ->
+ find_max_version({V1, T1}, {V2, T2});
+find_max_version({V1, [X]}, {V2, [Y]}) ->
+ case list_to_integer(X) >= list_to_integer(Y) of
+ true -> V1;
+ false -> V2
+ end;
+find_max_version({_V1, []}, {V2, Y}) when length(Y) > 0 ->
+ V2;
+find_max_version({V1, X}, {_V2, []}) when length(X) > 0 ->
+ V1.
+
+%% ---- Header processing helpers ----
+
+longstr_field(K, V) ->
+ {list_to_binary(K), longstr, list_to_binary(V)}.
+
+maybe_header(_Key, undefined, Acc) ->
+ Acc;
+maybe_header(?HEADER_PERSISTENT, 2, Acc) ->
+ [{?HEADER_PERSISTENT, "true"} | Acc];
+maybe_header(Key, Value, Acc) when is_binary(Value) ->
+ [{Key, binary_to_list(Value)} | Acc];
+maybe_header(Key, Value, Acc) when is_integer(Value) ->
+ [{Key, integer_to_list(Value)}| Acc];
+maybe_header(_Key, _Value, Acc) ->
+ Acc.
+
+create_message_id(ConsumerTag, SessionId, DeliveryTag) ->
+ [ConsumerTag,
+ ?MESSAGE_ID_SEPARATOR,
+ SessionId,
+ ?MESSAGE_ID_SEPARATOR,
+ integer_to_list(DeliveryTag)].
+
+trim_headers(Frame = #stomp_frame{headers = Hdrs}) ->
+ Frame#stomp_frame{headers = [{K, string:strip(V, left)} || {K, V} <- Hdrs]}.
+
+internal_tag(Base) ->
+ list_to_binary(?INTERNAL_TAG_PREFIX ++ Base).
+
+queue_tag(Base) ->
+ list_to_binary(?QUEUE_TAG_PREFIX ++ Base).
+
+ack_header_name("1.2") -> ?HEADER_ID;
+ack_header_name("1.1") -> ?HEADER_MESSAGE_ID;
+ack_header_name("1.0") -> ?HEADER_MESSAGE_ID.
+
+msg_header_name("1.2") -> ?HEADER_ACK;
+msg_header_name("1.1") -> ?HEADER_MESSAGE_ID;
+msg_header_name("1.0") -> ?HEADER_MESSAGE_ID.
+
+build_arguments(Headers) ->
+ Arguments =
+ lists:foldl(fun({K, V}, Acc) ->
+ case lists:member(K, ?HEADER_ARGUMENTS) of
+ true -> [build_argument(K, V) | Acc];
+ false -> Acc
+ end
+ end,
+ [],
+ Headers),
+ {arguments, Arguments}.
+
+build_argument(?HEADER_X_DEAD_LETTER_EXCHANGE, Val) ->
+ {list_to_binary(?HEADER_X_DEAD_LETTER_EXCHANGE), longstr,
+ list_to_binary(string:strip(Val))};
+build_argument(?HEADER_X_DEAD_LETTER_ROUTING_KEY, Val) ->
+ {list_to_binary(?HEADER_X_DEAD_LETTER_ROUTING_KEY), longstr,
+ list_to_binary(string:strip(Val))};
+build_argument(?HEADER_X_EXPIRES, Val) ->
+ {list_to_binary(?HEADER_X_EXPIRES), long,
+ list_to_integer(string:strip(Val))};
+build_argument(?HEADER_X_MAX_LENGTH, Val) ->
+ {list_to_binary(?HEADER_X_MAX_LENGTH), long,
+ list_to_integer(string:strip(Val))};
+build_argument(?HEADER_X_MAX_LENGTH_BYTES, Val) ->
+ {list_to_binary(?HEADER_X_MAX_LENGTH_BYTES), long,
+ list_to_integer(string:strip(Val))};
+build_argument(?HEADER_X_MAX_PRIORITY, Val) ->
+ {list_to_binary(?HEADER_X_MAX_PRIORITY), long,
+ list_to_integer(string:strip(Val))};
+build_argument(?HEADER_X_MESSAGE_TTL, Val) ->
+ {list_to_binary(?HEADER_X_MESSAGE_TTL), long,
+ list_to_integer(string:strip(Val))};
+build_argument(?HEADER_X_QUEUE_TYPE, Val) ->
+ {list_to_binary(?HEADER_X_QUEUE_TYPE), longstr,
+ list_to_binary(string:strip(Val))}.
+
+build_params(EndPoint, Headers) ->
+ Params = lists:foldl(fun({K, V}, Acc) ->
+ case lists:member(K, ?HEADER_PARAMS) of
+ true -> [build_param(K, V) | Acc];
+ false -> Acc
+ end
+ end,
+ [],
+ Headers),
+ rabbit_misc:plmerge(Params, default_params(EndPoint)).
+
+build_param(?HEADER_PERSISTENT, Val) ->
+ {durable, string_to_boolean(Val)};
+
+build_param(?HEADER_DURABLE, Val) ->
+ {durable, string_to_boolean(Val)};
+
+build_param(?HEADER_AUTO_DELETE, Val) ->
+ {auto_delete, string_to_boolean(Val)};
+
+build_param(?HEADER_EXCLUSIVE, Val) ->
+ {exclusive, string_to_boolean(Val)}.
+
+default_params({queue, _}) ->
+ [{durable, true}];
+
+default_params({exchange, _}) ->
+ [{exclusive, false}, {auto_delete, true}];
+
+default_params({topic, _}) ->
+ [{exclusive, false}, {auto_delete, true}];
+
+default_params(_) ->
+ [{durable, false}].
+
+string_to_boolean("True") ->
+ true;
+string_to_boolean("true") ->
+ true;
+string_to_boolean("False") ->
+ false;
+string_to_boolean("false") ->
+ false;
+string_to_boolean(_) ->
+ undefined.
+
+has_durable_header(Frame) ->
+ rabbit_stomp_frame:boolean_header(
+ Frame, ?HEADER_DURABLE, false) or
+ rabbit_stomp_frame:boolean_header(
+ Frame, ?HEADER_PERSISTENT, false).
+
+%%--------------------------------------------------------------------
+%% Destination Formatting
+%%--------------------------------------------------------------------
+
+format_destination("", RoutingKey) ->
+ ?QUEUE_PREFIX ++ "/" ++ escape(RoutingKey);
+format_destination("amq.topic", RoutingKey) ->
+ ?TOPIC_PREFIX ++ "/" ++ escape(RoutingKey);
+format_destination(Exchange, "") ->
+ ?EXCHANGE_PREFIX ++ "/" ++ escape(Exchange);
+format_destination(Exchange, RoutingKey) ->
+ ?EXCHANGE_PREFIX ++ "/" ++ escape(Exchange) ++ "/" ++ escape(RoutingKey).
+
+%%--------------------------------------------------------------------
+%% Destination Parsing
+%%--------------------------------------------------------------------
+
+subscription_queue_name(Destination, SubscriptionId, Frame) ->
+ case rabbit_stomp_frame:header(Frame, ?HEADER_X_QUEUE_NAME, undefined) of
+ undefined ->
+ %% We need a queue name that a) can be derived from the
+ %% Destination and SubscriptionId, and b) meets the constraints on
+ %% AMQP queue names. It doesn't need to be secure; we use md5 here
+ %% simply as a convenient means to bound the length.
+ rabbit_guid:string(
+ erlang:md5(
+ term_to_binary_compat:term_to_binary_1(
+ {Destination, SubscriptionId})),
+ "stomp-subscription");
+ Name ->
+ Name
+ end.
+
+%% ---- Helpers ----
+
+split([], _Splitter) -> [];
+split(Content, Splitter) -> split(Content, [], [], Splitter).
+
+split([], RPart, RParts, _Splitter) ->
+ lists:reverse([lists:reverse(RPart) | RParts]);
+split(Content = [Elem | Rest1], RPart, RParts, Splitter) ->
+ case take_prefix(Splitter, Content) of
+ {ok, Rest2} ->
+ split(Rest2, [], [lists:reverse(RPart) | RParts], Splitter);
+ not_found ->
+ split(Rest1, [Elem | RPart], RParts, Splitter)
+ end.
+
+take_prefix([Char | Prefix], [Char | List]) -> take_prefix(Prefix, List);
+take_prefix([], List) -> {ok, List};
+take_prefix(_Prefix, _List) -> not_found.
+
+escape(Str) -> escape(Str, []).
+
+escape([$/ | Str], Acc) -> escape(Str, "F2%" ++ Acc); %% $/ == '2F'x
+escape([$% | Str], Acc) -> escape(Str, "52%" ++ Acc); %% $% == '25'x
+escape([X | Str], Acc) when X < 32 orelse X > 127 ->
+ escape(Str, revhex(X) ++ "%" ++ Acc);
+escape([C | Str], Acc) -> escape(Str, [C | Acc]);
+escape([], Acc) -> lists:reverse(Acc).
+
+revhex(I) -> hexdig(I) ++ hexdig(I bsr 4).
+
+hexdig(I) -> erlang:integer_to_list(I band 15, 16).
diff --git a/deps/rabbitmq_stomp/test/amqqueue_SUITE.erl b/deps/rabbitmq_stomp/test/amqqueue_SUITE.erl
new file mode 100644
index 0000000000..0474fd67d6
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/amqqueue_SUITE.erl
@@ -0,0 +1,319 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(amqqueue_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+-define(QUEUE, <<"TestQueue">>).
+-define(DESTINATION, "/amq/queue/TestQueue").
+
+all() ->
+ [{group, version_to_group_name(V)} || V <- ?SUPPORTED_VERSIONS].
+
+groups() ->
+ Tests = [
+ publish_no_dest_error,
+ publish_unauthorized_error,
+ subscribe_error,
+ subscribe,
+ unsubscribe_ack,
+ subscribe_ack,
+ send,
+ delete_queue_subscribe,
+ temp_destination_queue,
+ temp_destination_in_send,
+ blank_destination_in_send
+ ],
+
+ [{version_to_group_name(V), [sequence], Tests}
+ || V <- ?SUPPORTED_VERSIONS].
+
+version_to_group_name(V) ->
+ list_to_atom(re:replace("version_" ++ V,
+ "\\.",
+ "_",
+ [global, {return, list}])).
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(Group, Config) ->
+ Suffix = string:sub_string(atom_to_list(Group), 9),
+ Version = re:replace(Suffix, "_", ".", [global, {return, list}]),
+ rabbit_ct_helpers:set_config(Config, [{version, Version}]).
+
+end_per_group(_Group, Config) -> Config.
+
+init_per_testcase(TestCase, Config) ->
+ Version = ?config(version, Config),
+ StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+ {ok, Connection} = amqp_connection:start(#amqp_params_direct{
+ node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)
+ }),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Client} = rabbit_stomp_client:connect(Version, StompPort),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {amqp_connection, Connection},
+ {amqp_channel, Channel},
+ {stomp_client, Client}
+ ]),
+ init_per_testcase0(TestCase, Config1).
+
+end_per_testcase(TestCase, Config) ->
+ Connection = ?config(amqp_connection, Config),
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ rabbit_stomp_client:disconnect(Client),
+ amqp_channel:close(Channel),
+ amqp_connection:close(Connection),
+ end_per_testcase0(TestCase, Config).
+
+init_per_testcase0(publish_unauthorized_error, Config) ->
+ Channel = ?config(amqp_channel, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = <<"RestrictedQueue">>,
+ auto_delete = true}),
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, add_user,
+ [<<"user">>, <<"pass">>, <<"acting-user">>]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, set_permissions, [
+ <<"user">>, <<"/">>, <<"nothing">>, <<"nothing">>, <<"nothing">>, <<"acting-user">>]),
+ Version = ?config(version, Config),
+ StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+ {ok, ClientFoo} = rabbit_stomp_client:connect(Version, "user", "pass", StompPort),
+ rabbit_ct_helpers:set_config(Config, [{client_foo, ClientFoo}]);
+init_per_testcase0(_, Config) ->
+ Config.
+
+end_per_testcase0(publish_unauthorized_error, Config) ->
+ ClientFoo = ?config(client_foo, Config),
+ rabbit_stomp_client:disconnect(ClientFoo),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, delete_user,
+ [<<"user">>, <<"acting-user">>]),
+ Config;
+end_per_testcase0(_, Config) ->
+ Config.
+
+publish_no_dest_error(Config) ->
+ Client = ?config(stomp_client, Config),
+ rabbit_stomp_client:send(
+ Client, "SEND", [{"destination", "/exchange/non-existent"}], ["hello"]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "not_found" = proplists:get_value("message", Hdrs),
+ ok.
+
+publish_unauthorized_error(Config) ->
+ ClientFoo = ?config(client_foo, Config),
+ rabbit_stomp_client:send(
+ ClientFoo, "SEND", [{"destination", "/amq/queue/RestrictedQueue"}], ["hello"]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(ClientFoo, "ERROR"),
+ "access_refused" = proplists:get_value("message", Hdrs),
+ ok.
+
+subscribe_error(Config) ->
+ Client = ?config(stomp_client, Config),
+ %% SUBSCRIBE to missing queue
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "not_found" = proplists:get_value("message", Hdrs),
+ ok.
+
+subscribe(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}, {"receipt", "foo"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from amqp
+ Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+
+ amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{},
+ payload = <<"hello">>}),
+
+ {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+ ok.
+
+unsubscribe_ack(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ Version = ?config(version, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION},
+ {"receipt", "rcpt1"},
+ {"ack", "client"},
+ {"id", "subscription-id"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from amqp
+ Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+
+ amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{},
+ payload = <<"hello">>}),
+
+ {ok, Client2, Hdrs1, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+
+ rabbit_stomp_client:send(
+ Client2, "UNSUBSCRIBE", [{"destination", ?DESTINATION},
+ {"id", "subscription-id"}]),
+
+ rabbit_stomp_client:send(
+ Client2, "ACK", [{rabbit_stomp_util:ack_header_name(Version),
+ proplists:get_value(
+ rabbit_stomp_util:msg_header_name(Version), Hdrs1)},
+ {"receipt", "rcpt2"}]),
+
+ {ok, _Client3, Hdrs2, _Body2} = stomp_receive(Client2, "ERROR"),
+ ?assertEqual("Subscription not found",
+ proplists:get_value("message", Hdrs2)),
+ ok.
+
+subscribe_ack(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ Version = ?config(version, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION},
+ {"receipt", "foo"},
+ {"ack", "client"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from amqp
+ Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+
+ amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{},
+ payload = <<"hello">>}),
+
+ {ok, _Client2, Headers, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+ false = (Version == "1.2") xor proplists:is_defined(?HEADER_ACK, Headers),
+
+ MsgHeader = rabbit_stomp_util:msg_header_name(Version),
+ AckValue = proplists:get_value(MsgHeader, Headers),
+ AckHeader = rabbit_stomp_util:ack_header_name(Version),
+
+ rabbit_stomp_client:send(Client, "ACK", [{AckHeader, AckValue}]),
+ #'basic.get_empty'{} =
+ amqp_channel:call(Channel, #'basic.get'{queue = ?QUEUE}),
+ ok.
+
+send(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}, {"receipt", "foo"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% send from stomp
+ rabbit_stomp_client:send(
+ Client1, "SEND", [{"destination", ?DESTINATION}], ["hello"]),
+
+ {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
+ ok.
+
+delete_queue_subscribe(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+
+ %% subscribe and wait for receipt
+ rabbit_stomp_client:send(
+ Client, "SUBSCRIBE", [{"destination", ?DESTINATION}, {"receipt", "bah"}]),
+ {ok, Client1, _, _} = stomp_receive(Client, "RECEIPT"),
+
+ %% delete queue while subscribed
+ #'queue.delete_ok'{} =
+ amqp_channel:call(Channel, #'queue.delete'{queue = ?QUEUE}),
+
+ {ok, _Client2, Headers, _} = stomp_receive(Client1, "ERROR"),
+
+ ?DESTINATION = proplists:get_value("subscription", Headers),
+
+ % server closes connection
+ ok.
+
+temp_destination_queue(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE,
+ auto_delete = true}),
+ rabbit_stomp_client:send( Client, "SEND", [{"destination", ?DESTINATION},
+ {"reply-to", "/temp-queue/foo"}],
+ ["ping"]),
+ amqp_channel:call(Channel,#'basic.consume'{queue = ?QUEUE, no_ack = true}),
+ receive #'basic.consume_ok'{consumer_tag = _Tag} -> ok end,
+ ReplyTo = receive {#'basic.deliver'{delivery_tag = _DTag},
+ #'amqp_msg'{payload = <<"ping">>,
+ props = #'P_basic'{reply_to = RT}}} -> RT
+ end,
+ ok = amqp_channel:call(Channel,
+ #'basic.publish'{routing_key = ReplyTo},
+ #amqp_msg{payload = <<"pong">>}),
+ {ok, _Client1, _, [<<"pong">>]} = stomp_receive(Client, "MESSAGE"),
+ ok.
+
+temp_destination_in_send(Config) ->
+ Client = ?config(stomp_client, Config),
+ rabbit_stomp_client:send( Client, "SEND", [{"destination", "/temp-queue/foo"}],
+ ["poing"]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "Invalid destination" = proplists:get_value("message", Hdrs),
+ ok.
+
+blank_destination_in_send(Config) ->
+ Client = ?config(stomp_client, Config),
+ rabbit_stomp_client:send( Client, "SEND", [{"destination", ""}],
+ ["poing"]),
+ {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+ "Invalid destination" = proplists:get_value("message", Hdrs),
+ ok.
+
+stomp_receive(Client, Command) ->
+ {#stomp_frame{command = Command,
+ headers = Hdrs,
+ body_iolist = Body}, Client1} =
+ rabbit_stomp_client:recv(Client),
+ {ok, Client1, Hdrs, Body}.
+
diff --git a/deps/rabbitmq_stomp/test/command_SUITE.erl b/deps/rabbitmq_stomp/test/command_SUITE.erl
new file mode 100644
index 0000000000..8fe9fa0d0f
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/command_SUITE.erl
@@ -0,0 +1,127 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(command_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp.hrl").
+
+
+-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand').
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ merge_defaults,
+ run
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+merge_defaults(_Config) ->
+ {[<<"session_id">>, <<"conn_name">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([], #{}),
+
+ {[<<"other_key">>], #{verbose := true}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => true}),
+
+ {[<<"other_key">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => false}).
+
+
+run(Config) ->
+
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Opts = #{node => Node, timeout => 10000, verbose => false},
+
+ %% No connections
+ [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)),
+
+ StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+
+ {ok, _Client} = rabbit_stomp_client:connect(StompPort),
+ ct:sleep(100),
+
+ [[{session_id, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"session_id">>], Opts)),
+
+
+ {ok, _Client2} = rabbit_stomp_client:connect(StompPort),
+ ct:sleep(100),
+
+ [[{session_id, _}], [{session_id, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"session_id">>], Opts)),
+
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ start_amqp_connection(network, Node, Port),
+
+ %% There are still just two connections
+ [[{session_id, _}], [{session_id, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"session_id">>], Opts)),
+
+ start_amqp_connection(direct, Node, Port),
+
+ %% Still two MQTT connections, one direct AMQP 0-9-1 connection
+ [[{session_id, _}], [{session_id, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"session_id">>], Opts)),
+
+ %% Verbose returns all keys
+ Infos = lists:map(fun(El) -> atom_to_binary(El, utf8) end, ?INFO_ITEMS),
+ AllKeys = 'Elixir.Enum':to_list(?COMMAND:run(Infos, Opts)),
+ AllKeys = 'Elixir.Enum':to_list(?COMMAND:run([], Opts#{verbose => true})),
+
+ %% There are two connections
+ [First, _Second] = AllKeys,
+
+ %% Keys are INFO_ITEMS
+ KeysCount = length(?INFO_ITEMS),
+ KeysCount = length(First),
+
+ {Keys, _} = lists:unzip(First),
+
+ [] = Keys -- ?INFO_ITEMS,
+ [] = ?INFO_ITEMS -- Keys.
+
+
+start_amqp_connection(Type, Node, Port) ->
+ Params = amqp_params(Type, Node, Port),
+ {ok, _Connection} = amqp_connection:start(Params).
+
+amqp_params(network, _, Port) ->
+ #amqp_params_network{port = Port};
+amqp_params(direct, Node, _) ->
+ #amqp_params_direct{node = Node}.
diff --git a/deps/rabbitmq_stomp/test/config_schema_SUITE.erl b/deps/rabbitmq_stomp/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..8d340810f7
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_stomp, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_stomp/test/config_schema_SUITE_data/rabbitmq_stomp.snippets b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/rabbitmq_stomp.snippets
new file mode 100644
index 0000000000..6081240c68
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/config_schema_SUITE_data/rabbitmq_stomp.snippets
@@ -0,0 +1,97 @@
+[{listener_port,
+ "stomp.listeners.tcp.1 = 12345",
+ [{rabbitmq_stomp,[{tcp_listeners,[12345]}]}],
+ [rabbitmq_stomp]},
+ {listeners_ip,
+ "stomp.listeners.tcp.1 = 127.0.0.1:61613
+ stomp.listeners.tcp.2 = ::1:61613",
+ [{rabbitmq_stomp,[{tcp_listeners,[{"127.0.0.1",61613},{"::1",61613}]}]}],
+ [rabbitmq_stomp]},
+
+ {listener_tcp_options,
+ "stomp.listeners.tcp.1 = 127.0.0.1:61613
+ stomp.listeners.tcp.2 = ::1:61613
+
+ stomp.tcp_listen_options.backlog = 2048
+ stomp.tcp_listen_options.recbuf = 8192
+ stomp.tcp_listen_options.sndbuf = 8192
+
+ stomp.tcp_listen_options.keepalive = true
+ stomp.tcp_listen_options.nodelay = true
+
+ stomp.tcp_listen_options.exit_on_close = true
+
+ stomp.tcp_listen_options.send_timeout = 120
+",
+ [{rabbitmq_stomp,[
+ {tcp_listeners,[
+ {"127.0.0.1",61613},
+ {"::1",61613}
+ ]}
+ , {tcp_listen_options, [
+ {backlog, 2048},
+ {exit_on_close, true},
+
+ {recbuf, 8192},
+ {sndbuf, 8192},
+
+ {send_timeout, 120},
+
+ {keepalive, true},
+ {nodelay, true}
+ ]}
+ ]}],
+ [rabbitmq_stomp]},
+
+ {ssl,
+ "ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ ssl_options.verify = verify_peer
+ ssl_options.fail_if_no_peer_cert = true
+
+ stomp.listeners.tcp.1 = 61613
+ stomp.listeners.ssl.1 = 61614",
+ [{rabbit,
+ [{ssl_options,
+ [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,true}]}]},
+ {rabbitmq_stomp,[{tcp_listeners,[61613]},{ssl_listeners,[61614]}]}],
+ [rabbitmq_stomp]},
+ {defaults,
+ "stomp.default_user = guest
+ stomp.default_pass = guest
+ stomp.proxy_protocol = false
+ stomp.hide_server_info = false",
+ [{rabbitmq_stomp,[{default_user,[{login,"guest"},{passcode,"guest"}]},
+ {proxy_protocol,false},{hide_server_info,false}]}],
+ [rabbitmq_stomp]},
+ {ssl_cert_login,
+ "stomp.ssl_cert_login = true",
+ [{rabbitmq_stomp,[{ssl_cert_login,true}]}],
+ [rabbitmq_stomp]},
+ {proxy_protocol,
+ "stomp.default_user = guest
+ stomp.default_pass = guest
+ stomp.implicit_connect = true
+ stomp.proxy_protocol = true",
+ [{rabbitmq_stomp,[{default_user,[{login,"guest"},{passcode,"guest"}]},
+ {implicit_connect,true},
+ {proxy_protocol,true}]}],
+ [rabbitmq_stomp]},
+ {default_vhost,
+ "stomp.default_vhost = /",
+ [{rabbitmq_stomp,[{default_vhost,<<"/">>}]}],
+ [rabbitmq_stomp]},
+ {default_topic_exchange,
+ "stomp.default_topic_exchange = my.fancy.topic",
+ [{rabbitmq_stomp,[{default_topic_exchange,<<"my.fancy.topic">>}]}],
+ [rabbitmq_stomp]},
+ {hide_server_info,
+ "stomp.hide_server_info = true",
+ [{rabbitmq_stomp,[{hide_server_info,true}]}],
+ [rabbitmq_stomp]}
+].
diff --git a/deps/rabbitmq_stomp/test/connections_SUITE.erl b/deps/rabbitmq_stomp/test/connections_SUITE.erl
new file mode 100644
index 0000000000..4f9b027bb9
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/connections_SUITE.erl
@@ -0,0 +1,160 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(connections_SUITE).
+-compile(export_all).
+
+-import(rabbit_misc, [pget/2]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp_frame.hrl").
+-define(DESTINATION, "/queue/bulk-test").
+
+all() ->
+ [
+ messages_not_dropped_on_disconnect,
+ direct_client_connections_are_not_leaked,
+ stats_are_not_leaked,
+ stats,
+ heartbeat
+ ].
+
+merge_app_env(Config) ->
+ rabbit_ct_helpers:merge_app_env(Config,
+ {rabbit, [
+ {collect_statistics, basic},
+ {collect_statistics_interval, 100}
+ ]}).
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun merge_app_env/1 ] ++
+ rabbit_ct_broker_helpers:setup_steps()).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+-define(GARBAGE, <<"bdaf63dda9d78b075c748b740e7c3510ad203b07\nbdaf63dd">>).
+
+count_connections(Config) ->
+ StompPort = get_stomp_port(Config),
+ %% The default port is 61613 but it's in the middle of the ephemeral
+ %% ports range on many operating systems. Therefore, there is a
+ %% chance this port is already in use. Let's use a port close to the
+ %% AMQP default port.
+ IPv4Count = try
+ %% Count IPv4 connections. On some platforms, the IPv6 listener
+ %% implicitely listens to IPv4 connections too so the IPv4
+ %% listener doesn't exist. Thus this try/catch. This is the case
+ %% with Linux where net.ipv6.bindv6only is disabled (default in
+ %% most cases).
+ rpc_count_connections(Config, {acceptor, {0,0,0,0}, StompPort})
+ catch
+ _:{badarg, _} -> 0;
+ _:Other -> exit({foo, Other})
+ end,
+ IPv6Count = try
+ %% Count IPv6 connections. We also use a try/catch block in case
+ %% the host is not configured for IPv6.
+ rpc_count_connections(Config, {acceptor, {0,0,0,0,0,0,0,0}, StompPort})
+ catch
+ _:{badarg, _} -> 0;
+ _:Other1 -> exit({foo, Other1})
+ end,
+ IPv4Count + IPv6Count.
+
+rpc_count_connections(Config, ConnSpec) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ranch_server, count_connections, [ConnSpec]).
+
+direct_client_connections_are_not_leaked(Config) ->
+ StompPort = get_stomp_port(Config),
+ N = count_connections(Config),
+ lists:foreach(fun (_) ->
+ {ok, Client = {Socket, _}} = rabbit_stomp_client:connect(StompPort),
+ %% send garbage which trips up the parser
+ gen_tcp:send(Socket, ?GARBAGE),
+ rabbit_stomp_client:send(
+ Client, "LOL", [{"", ""}])
+ end,
+ lists:seq(1, 100)),
+ timer:sleep(5000),
+ N = count_connections(Config),
+ ok.
+
+messages_not_dropped_on_disconnect(Config) ->
+ StompPort = get_stomp_port(Config),
+ N = count_connections(Config),
+ {ok, Client} = rabbit_stomp_client:connect(StompPort),
+ N1 = N + 1,
+ N1 = count_connections(Config),
+ [rabbit_stomp_client:send(
+ Client, "SEND", [{"destination", ?DESTINATION}],
+ [integer_to_list(Count)]) || Count <- lists:seq(1, 1000)],
+ rabbit_stomp_client:disconnect(Client),
+ QName = rabbit_misc:r(<<"/">>, queue, <<"bulk-test">>),
+ timer:sleep(3000),
+ N = count_connections(Config),
+ {ok, Q} = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [QName]),
+ Messages = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, info, [Q, [messages]]),
+ 1000 = pget(messages, Messages),
+ ok.
+
+get_stomp_port(Config) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp).
+
+stats_are_not_leaked(Config) ->
+ StompPort = get_stomp_port(Config),
+ N = rabbit_ct_broker_helpers:rpc(Config, 0, ets, info, [connection_metrics, size]),
+ {ok, C} = gen_tcp:connect("localhost", StompPort, []),
+ Bin = <<"GET / HTTP/1.1\r\nHost: www.rabbitmq.com\r\nUser-Agent: curl/7.43.0\r\nAccept: */*\n\n">>,
+ gen_tcp:send(C, Bin),
+ gen_tcp:close(C),
+ timer:sleep(1000), %% Wait for stats to be emitted, which it does every 100ms
+ N = rabbit_ct_broker_helpers:rpc(Config, 0, ets, info, [connection_metrics, size]),
+ ok.
+
+stats(Config) ->
+ StompPort = get_stomp_port(Config),
+ {ok, Client} = rabbit_stomp_client:connect(StompPort),
+ timer:sleep(1000), %% Wait for stats to be emitted, which it does every 100ms
+ %% Retrieve the connection Pid
+ [Reader] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_stomp, list, []),
+ [{_, Pid}] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_stomp_reader,
+ info, [Reader, [connection]]),
+ %% Verify the content of the metrics, garbage_collection must be present
+ [{Pid, Props}] = rabbit_ct_broker_helpers:rpc(Config, 0, ets, lookup,
+ [connection_metrics, Pid]),
+ true = proplists:is_defined(garbage_collection, Props),
+ 0 = proplists:get_value(timeout, Props),
+ %% If the coarse entry is present, stats were successfully emitted
+ [{Pid, _, _, _, _}] = rabbit_ct_broker_helpers:rpc(Config, 0, ets, lookup,
+ [connection_coarse_metrics, Pid]),
+ rabbit_stomp_client:disconnect(Client),
+ ok.
+
+heartbeat(Config) ->
+ StompPort = get_stomp_port(Config),
+ {ok, Client} = rabbit_stomp_client:connect("1.2", "guest", "guest", StompPort,
+ [{"heart-beat", "5000,7000"}]),
+ timer:sleep(1000), %% Wait for stats to be emitted, which it does every 100ms
+ %% Retrieve the connection Pid
+ [Reader] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_stomp, list, []),
+ [{_, Pid}] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_stomp_reader,
+ info, [Reader, [connection]]),
+ %% Verify the content of the heartbeat timeout
+ [{Pid, Props}] = rabbit_ct_broker_helpers:rpc(Config, 0, ets, lookup,
+ [connection_metrics, Pid]),
+ 5 = proplists:get_value(timeout, Props),
+ rabbit_stomp_client:disconnect(Client),
+ ok.
diff --git a/deps/rabbitmq_stomp/test/frame_SUITE.erl b/deps/rabbitmq_stomp/test/frame_SUITE.erl
new file mode 100644
index 0000000000..da191ac12a
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/frame_SUITE.erl
@@ -0,0 +1,191 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(frame_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+-compile(export_all).
+
+all() ->
+ [
+ parse_simple_frame,
+ parse_simple_frame_crlf,
+ parse_command_only,
+ parse_command_prefixed_with_newline,
+ parse_ignore_empty_frames,
+ parse_heartbeat_interframe,
+ parse_crlf_interframe,
+ parse_carriage_return_not_ignored_interframe,
+ parse_carriage_return_mid_command,
+ parse_carriage_return_end_command,
+ parse_resume_mid_command,
+ parse_resume_mid_header_key,
+ parse_resume_mid_header_val,
+ parse_resume_mid_body,
+ parse_no_header_stripping,
+ parse_multiple_headers,
+ header_no_colon,
+ no_nested_escapes,
+ header_name_with_cr,
+ header_value_with_cr,
+ header_value_with_colon,
+ headers_escaping_roundtrip,
+ headers_escaping_roundtrip_without_trailing_lf
+ ].
+
+parse_simple_frame(_) ->
+ parse_simple_frame_gen("\n").
+
+parse_simple_frame_crlf(_) ->
+ parse_simple_frame_gen("\r\n").
+
+parse_simple_frame_gen(Term) ->
+ Headers = [{"header1", "value1"}, {"header2", "value2"}],
+ Content = frame_string("COMMAND",
+ Headers,
+ "Body Content",
+ Term),
+ {"COMMAND", Frame, _State} = parse_complete(Content),
+ [?assertEqual({ok, Value},
+ rabbit_stomp_frame:header(Frame, Key)) ||
+ {Key, Value} <- Headers],
+ #stomp_frame{body_iolist = Body} = Frame,
+ ?assertEqual(<<"Body Content">>, iolist_to_binary(Body)).
+
+parse_command_only(_) ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("COMMAND\n\n\0").
+
+parse_command_prefixed_with_newline(_) ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\nCOMMAND\n\n\0").
+
+parse_ignore_empty_frames(_) ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\0\0COMMAND\n\n\0").
+
+parse_heartbeat_interframe(_) ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\nCOMMAND\n\n\0").
+
+parse_crlf_interframe(_) ->
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\r\nCOMMAND\n\n\0").
+
+parse_carriage_return_not_ignored_interframe(_) ->
+ {error, {unexpected_chars_between_frames, "\rC"}} = parse("\rCOMMAND\n\n\0").
+
+parse_carriage_return_mid_command(_) ->
+ {error, {unexpected_chars_in_command, "\rA"}} = parse("COMM\rAND\n\n\0").
+
+parse_carriage_return_end_command(_) ->
+ {error, {unexpected_chars_in_command, "\r\r"}} = parse("COMMAND\r\r\n\n\0").
+
+parse_resume_mid_command(_) ->
+ First = "COMM",
+ Second = "AND\n\n\0",
+ {more, Resume} = parse(First),
+ {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse(Second, Resume).
+
+parse_resume_mid_header_key(_) ->
+ First = "COMMAND\nheade",
+ Second = "r1:value1\n\n\0",
+ {more, Resume} = parse(First),
+ {ok, Frame = #stomp_frame{command = "COMMAND"}, _Rest} =
+ parse(Second, Resume),
+ ?assertEqual({ok, "value1"},
+ rabbit_stomp_frame:header(Frame, "header1")).
+
+parse_resume_mid_header_val(_) ->
+ First = "COMMAND\nheader1:val",
+ Second = "ue1\n\n\0",
+ {more, Resume} = parse(First),
+ {ok, Frame = #stomp_frame{command = "COMMAND"}, _Rest} =
+ parse(Second, Resume),
+ ?assertEqual({ok, "value1"},
+ rabbit_stomp_frame:header(Frame, "header1")).
+
+parse_resume_mid_body(_) ->
+ First = "COMMAND\n\nABC",
+ Second = "DEF\0",
+ {more, Resume} = parse(First),
+ {ok, #stomp_frame{command = "COMMAND", body_iolist = Body}, _Rest} =
+ parse(Second, Resume),
+ ?assertEqual([<<"ABC">>, <<"DEF">>], Body).
+
+parse_no_header_stripping(_) ->
+ Content = "COMMAND\nheader: foo \n\n\0",
+ {ok, Frame, _} = parse(Content),
+ {ok, Val} = rabbit_stomp_frame:header(Frame, "header"),
+ ?assertEqual(" foo ", Val).
+
+parse_multiple_headers(_) ->
+ Content = "COMMAND\nheader:correct\nheader:incorrect\n\n\0",
+ {ok, Frame, _} = parse(Content),
+ {ok, Val} = rabbit_stomp_frame:header(Frame, "header"),
+ ?assertEqual("correct", Val).
+
+header_no_colon(_) ->
+ Content = "COMMAND\n"
+ "hdr1:val1\n"
+ "hdrerror\n"
+ "hdr2:val2\n"
+ "\n\0",
+ ?assertEqual(parse(Content), {error, {header_no_value, "hdrerror"}}).
+
+no_nested_escapes(_) ->
+ Content = "COM\\\\rAND\n" % no escapes
+ "hdr\\\\rname:" % one escape
+ "hdr\\\\rval\n\n\0", % one escape
+ {ok, Frame, _} = parse(Content),
+ ?assertEqual(Frame,
+ #stomp_frame{command = "COM\\\\rAND",
+ headers = [{"hdr\\rname", "hdr\\rval"}],
+ body_iolist = []}).
+
+header_name_with_cr(_) ->
+ Content = "COMMAND\nhead\rer:val\n\n\0",
+ {error, {unexpected_chars_in_header, "\re"}} = parse(Content).
+
+header_value_with_cr(_) ->
+ Content = "COMMAND\nheader:val\rue\n\n\0",
+ {error, {unexpected_chars_in_header, "\ru"}} = parse(Content).
+
+header_value_with_colon(_) ->
+ Content = "COMMAND\nheader:val:ue\n\n\0",
+ {ok, Frame, _} = parse(Content),
+ ?assertEqual(Frame,
+ #stomp_frame{ command = "COMMAND",
+ headers = [{"header", "val:ue"}],
+ body_iolist = []}).
+
+test_frame_serialization(Expected, TrailingLF) ->
+ {ok, Frame, _} = parse(Expected),
+ {ok, Val} = rabbit_stomp_frame:header(Frame, "head\r:\ner"),
+ ?assertEqual(":\n\r\\", Val),
+ Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame, TrailingLF)),
+ ?assertEqual(Expected, rabbit_misc:format("~s", [Serialized])).
+
+headers_escaping_roundtrip(_) ->
+ test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0\n", true).
+
+headers_escaping_roundtrip_without_trailing_lf(_) ->
+ test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0", false).
+
+parse(Content) ->
+ parse(Content, rabbit_stomp_frame:initial_state()).
+parse(Content, State) ->
+ rabbit_stomp_frame:parse(list_to_binary(Content), State).
+
+parse_complete(Content) ->
+ {ok, Frame = #stomp_frame{command = Command}, State} = parse(Content),
+ {Command, Frame, State}.
+
+frame_string(Command, Headers, BodyContent, Term) ->
+ HeaderString =
+ lists:flatten([Key ++ ":" ++ Value ++ Term || {Key, Value} <- Headers]),
+ Command ++ Term ++ HeaderString ++ Term ++ BodyContent ++ "\0" ++ "\n".
+
diff --git a/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..46c1c6c743
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,104 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(proxy_protocol_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(TIMEOUT, 5000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ proxy_protocol,
+ proxy_protocol_tls
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Suffix},
+ {rmq_certspwd, "bunnychow"},
+ {rabbitmq_ct_tls_verify, verify_none}
+ ]),
+ MqttConfig = stomp_config(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ [ fun(Conf) -> merge_app_env(MqttConfig, Conf) end ] ++
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+stomp_config() ->
+ {rabbitmq_stomp, [
+ {proxy_protocol, true}
+ ]}.
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ ok = inet:send(Socket, stomp_connect_frame()),
+ {ok, _Packet} = gen_tcp:recv(Socket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+proxy_protocol_tls(Config) ->
+ app_utils:start_applications([asn1, crypto, public_key, ssl]),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp_tls),
+ {ok, Socket} = gen_tcp:connect({127,0,0,1}, Port,
+ [binary, {active, false}, {packet, raw}]),
+ ok = inet:send(Socket, "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, SslSocket} = ssl:connect(Socket, [], ?TIMEOUT),
+ ok = ssl:send(SslSocket, stomp_connect_frame()),
+ {ok, _Packet} = ssl:recv(SslSocket, 0, ?TIMEOUT),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ gen_tcp:close(Socket),
+ ok.
+
+connection_name() ->
+ Connections = ets:tab2list(connection_created),
+ {_Key, Values} = lists:nth(1, Connections),
+ {_, Name} = lists:keyfind(name, 1, Values),
+ Name.
+
+merge_app_env(MqttConfig, Config) ->
+ rabbit_ct_helpers:merge_app_env(Config, MqttConfig).
+
+stomp_connect_frame() ->
+ <<"CONNECT\n",
+ "login:guest\n",
+ "passcode:guest\n",
+ "\n",
+ 0>>. \ No newline at end of file
diff --git a/deps/rabbitmq_stomp/test/python_SUITE.erl b/deps/rabbitmq_stomp/test/python_SUITE.erl
new file mode 100644
index 0000000000..9613b25032
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE.erl
@@ -0,0 +1,72 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(python_SUITE).
+-compile(export_all).
+-include_lib("common_test/include/ct.hrl").
+
+all() ->
+ [
+ common,
+ ssl,
+ connect_options
+ ].
+
+init_per_testcase(TestCase, Config) ->
+ Suffix = rabbit_ct_helpers:testcase_absname(Config, TestCase, "-"),
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_certspwd, "bunnychow"},
+ {rmq_nodename_suffix, Suffix}]),
+ rabbit_ct_helpers:log_environment(),
+ Config2 = rabbit_ct_helpers:run_setup_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps()),
+ DataDir = ?config(data_dir, Config2),
+ PikaDir = filename:join([DataDir, "deps", "pika"]),
+ StomppyDir = filename:join([DataDir, "deps", "stomppy"]),
+ rabbit_ct_helpers:make(Config2, PikaDir, []),
+ rabbit_ct_helpers:make(Config2, StomppyDir, []),
+ Config2.
+
+end_per_testcase(_, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+
+common(Config) ->
+ run(Config, filename:join("src", "test.py")).
+
+connect_options(Config) ->
+ run(Config, filename:join("src", "test_connect_options.py")).
+
+ssl(Config) ->
+ run(Config, filename:join("src", "test_ssl.py")).
+
+run(Config, Test) ->
+ DataDir = ?config(data_dir, Config),
+ CertsDir = rabbit_ct_helpers:get_config(Config, rmq_certsdir),
+ StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+ StompPortTls = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp_tls),
+ AmqpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ NodeName = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ PythonPath = os:getenv("PYTHONPATH"),
+ os:putenv("PYTHONPATH", filename:join([DataDir, "deps", "pika","pika"])
+ ++":"++
+ filename:join([DataDir, "deps", "stomppy", "stomppy"])
+ ++ ":" ++
+ PythonPath),
+ os:putenv("AMQP_PORT", integer_to_list(AmqpPort)),
+ os:putenv("STOMP_PORT", integer_to_list(StompPort)),
+ os:putenv("STOMP_PORT_TLS", integer_to_list(StompPortTls)),
+ os:putenv("RABBITMQ_NODENAME", atom_to_list(NodeName)),
+ os:putenv("SSL_CERTS_PATH", CertsDir),
+ {ok, _} = rabbit_ct_helpers:exec([filename:join(DataDir, Test)]).
+
+
+cur_dir() ->
+ {ok, Src} = filelib:find_source(?MODULE),
+ filename:dirname(Src).
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/deps/pika/Makefile b/deps/rabbitmq_stomp/test/python_SUITE_data/deps/pika/Makefile
new file mode 100644
index 0000000000..10aa6f0212
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/deps/pika/Makefile
@@ -0,0 +1,27 @@
+UPSTREAM_GIT=https://github.com/pika/pika.git
+REVISION=1.1.0
+
+LIB_DIR=pika
+CHECKOUT_DIR=pika-$(REVISION)
+
+TARGETS=$(LIB_DIR)
+
+all: $(TARGETS)
+
+clean:
+ rm -rf $(LIB_DIR)
+
+distclean: clean
+ rm -rf $(CHECKOUT_DIR)
+
+$(LIB_DIR) : $(CHECKOUT_DIR)
+ rm -rf $@
+ cp -R $< $@
+
+$(CHECKOUT_DIR):
+ git clone --depth 1 --branch $(REVISION) $(UPSTREAM_GIT) $@ || \
+ (rm -rf $@; exit 1)
+
+echo-revision:
+ @echo $(REVISION)
+
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/deps/stomppy/Makefile b/deps/rabbitmq_stomp/test/python_SUITE_data/deps/stomppy/Makefile
new file mode 100644
index 0000000000..40f5bd1db7
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/deps/stomppy/Makefile
@@ -0,0 +1,27 @@
+UPSTREAM_GIT=https://github.com/jasonrbriggs/stomp.py.git
+REVISION=v4.0.16
+
+LIB_DIR=stomppy
+CHECKOUT_DIR=stomppy-git
+
+TARGETS=$(LIB_DIR)
+
+all: $(TARGETS)
+
+clean:
+ rm -rf $(LIB_DIR)
+
+distclean: clean
+ rm -rf $(CHECKOUT_DIR)
+
+$(LIB_DIR) : $(CHECKOUT_DIR)
+ rm -rf $@
+ cp -R $< $@
+
+$(CHECKOUT_DIR):
+ git clone $(UPSTREAM_GIT) $@
+ (cd $@ && git checkout $(REVISION)) || rm -rf $@
+
+echo-revision:
+ @echo $(REVISION)
+
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py
new file mode 100644
index 0000000000..9103bc76ea
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py
@@ -0,0 +1,252 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import time
+import os
+
+class TestAck(base.BaseTest):
+
+ def test_ack_client(self):
+ destination = "/queue/ack-test"
+
+ # subscribe and send message
+ self.listener.reset(2) ## expecting 2 messages
+ self.subscribe_dest(self.conn, destination, None,
+ ack='client',
+ headers={'prefetch-count': '10'})
+ self.conn.send(destination, "test1")
+ self.conn.send(destination, "test2")
+ self.assertTrue(self.listener.wait(4), "initial message not received")
+ self.assertEquals(2, len(self.listener.messages))
+
+ # disconnect with no ack
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ listener2.reset(2)
+ conn2.set_listener('', listener2)
+ self.subscribe_dest(conn2, destination, None,
+ ack='client',
+ headers={'prefetch-count': '10'})
+ self.assertTrue(listener2.wait(), "message not received again")
+ self.assertEquals(2, len(listener2.messages))
+
+ # now ack only the last message - expecting cumulative behaviour
+ mid = listener2.messages[1]['headers'][self.ack_id_source_header]
+ self.ack_message(conn2, mid, None)
+ finally:
+ conn2.disconnect()
+
+ # now reconnect again, shouldn't see the message
+ conn3 = self.create_connection()
+ try:
+ listener3 = base.WaitableListener()
+ conn3.set_listener('', listener3)
+ self.subscribe_dest(conn3, destination, None)
+ self.assertFalse(listener3.wait(3),
+ "unexpected message. ACK not working?")
+ finally:
+ conn3.disconnect()
+
+ def test_ack_client_individual(self):
+ destination = "/queue/ack-test-individual"
+
+ # subscribe and send message
+ self.listener.reset(2) ## expecting 2 messages
+ self.subscribe_dest(self.conn, destination, None,
+ ack='client-individual',
+ headers={'prefetch-count': '10'})
+ self.conn.send(destination, "test1")
+ self.conn.send(destination, "test2")
+ self.assertTrue(self.listener.wait(4), "Both initial messages not received")
+ self.assertEquals(2, len(self.listener.messages))
+
+ # disconnect without acks
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ listener2.reset(2) ## expect 2 messages
+ conn2.set_listener('', listener2)
+ self.subscribe_dest(conn2, destination, None,
+ ack='client-individual',
+ headers={'prefetch-count': '10'})
+ self.assertTrue(listener2.wait(2.5), "Did not receive 2 messages")
+ self.assertEquals(2, len(listener2.messages), "Not exactly 2 messages received")
+
+ # now ack only the 'test2' message - expecting individual behaviour
+ nummsgs = len(listener2.messages)
+ mid = None
+ for ind in range(nummsgs):
+ if listener2.messages[ind]['message']=="test2":
+ mid = listener2.messages[ind]['headers'][self.ack_id_source_header]
+ self.assertEquals(1, ind, 'Expecting test2 to be second message')
+ break
+ self.assertTrue(mid, "Did not find test2 message id.")
+ self.ack_message(conn2, mid, None)
+ finally:
+ conn2.disconnect()
+
+ # now reconnect again, shouldn't see the message
+ conn3 = self.create_connection()
+ try:
+ listener3 = base.WaitableListener()
+ listener3.reset(2) ## expecting a single message, but wait for two
+ conn3.set_listener('', listener3)
+ self.subscribe_dest(conn3, destination, None)
+ self.assertFalse(listener3.wait(2.5),
+ "Expected to see only one message. ACK not working?")
+ self.assertEquals(1, len(listener3.messages), "Expecting exactly one message")
+ self.assertEquals("test1", listener3.messages[0]['message'], "Unexpected message remains")
+ finally:
+ conn3.disconnect()
+
+ def test_ack_client_tx(self):
+ destination = "/queue/ack-test-tx"
+
+ # subscribe and send message
+ self.listener.reset()
+ self.subscribe_dest(self.conn, destination, None, ack='client')
+ self.conn.send(destination, "test")
+ self.assertTrue(self.listener.wait(3), "initial message not received")
+ self.assertEquals(1, len(self.listener.messages))
+
+ # disconnect with no ack
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ tx = "abc"
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+ conn2.begin(transaction=tx)
+ self.subscribe_dest(conn2, destination, None, ack='client')
+ self.assertTrue(listener2.wait(), "message not received again")
+ self.assertEquals(1, len(listener2.messages))
+
+ # now ack
+ mid = listener2.messages[0]['headers'][self.ack_id_source_header]
+ self.ack_message(conn2, mid, None, transaction=tx)
+
+ #now commit
+ conn2.commit(transaction=tx)
+ finally:
+ conn2.disconnect()
+
+ # now reconnect again, shouldn't see the message
+ conn3 = self.create_connection()
+ try:
+ listener3 = base.WaitableListener()
+ conn3.set_listener('', listener3)
+ self.subscribe_dest(conn3, destination, None)
+ self.assertFalse(listener3.wait(3),
+ "unexpected message. TX ACK not working?")
+ finally:
+ conn3.disconnect()
+
+ def test_topic_prefetch(self):
+ destination = "/topic/prefetch-test"
+
+ # subscribe and send message
+ self.listener.reset(6) ## expect 6 messages
+ self.subscribe_dest(self.conn, destination, None,
+ ack='client',
+ headers={'prefetch-count': '5'})
+
+ for x in range(10):
+ self.conn.send(destination, "test" + str(x))
+
+ self.assertFalse(self.listener.wait(3),
+ "Should not have been able to see 6 messages")
+ self.assertEquals(5, len(self.listener.messages))
+
+ def test_nack(self):
+ destination = "/queue/nack-test"
+
+ #subscribe and send
+ self.subscribe_dest(self.conn, destination, None,
+ ack='client-individual')
+ self.conn.send(destination, "nack-test")
+
+ self.assertTrue(self.listener.wait(), "Not received message")
+ message_id = self.listener.messages[0]['headers'][self.ack_id_source_header]
+ self.listener.reset()
+
+ self.nack_message(self.conn, message_id, None)
+ self.assertTrue(self.listener.wait(), "Not received message after NACK")
+ message_id = self.listener.messages[0]['headers'][self.ack_id_source_header]
+ self.ack_message(self.conn, message_id, None)
+
+ def test_nack_multi(self):
+ destination = "/queue/nack-multi"
+
+ self.listener.reset(2)
+
+ #subscribe and send
+ self.subscribe_dest(self.conn, destination, None,
+ ack='client',
+ headers = {'prefetch-count' : '10'})
+ self.conn.send(destination, "nack-test1")
+ self.conn.send(destination, "nack-test2")
+
+ self.assertTrue(self.listener.wait(), "Not received messages")
+ message_id = self.listener.messages[1]['headers'][self.ack_id_source_header]
+ self.listener.reset(2)
+
+ self.nack_message(self.conn, message_id, None)
+ self.assertTrue(self.listener.wait(), "Not received message again")
+ message_id = self.listener.messages[1]['headers'][self.ack_id_source_header]
+ self.ack_message(self.conn, message_id, None)
+
+ def test_nack_without_requeueing(self):
+ destination = "/queue/nack-test-no-requeue"
+
+ self.subscribe_dest(self.conn, destination, None,
+ ack='client-individual')
+ self.conn.send(destination, "nack-test")
+
+ self.assertTrue(self.listener.wait(), "Not received message")
+ message_id = self.listener.messages[0]['headers'][self.ack_id_source_header]
+ self.listener.reset()
+
+ self.conn.send_frame("NACK", {self.ack_id_header: message_id, "requeue": False})
+ self.assertFalse(self.listener.wait(4), "Received message after NACK with requeue = False")
+
+class TestAck11(TestAck):
+
+ def create_connection_obj(self, version='1.1', vhost='/', heartbeats=(0, 0)):
+ conn = stomp.StompConnection11(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
+ vhost=vhost,
+ heartbeats=heartbeats)
+ self.ack_id_source_header = 'message-id'
+ self.ack_id_header = 'message-id'
+ return conn
+
+ def test_version(self):
+ self.assertEquals('1.1', self.conn.version)
+
+class TestAck12(TestAck):
+
+ def create_connection_obj(self, version='1.2', vhost='/', heartbeats=(0, 0)):
+ conn = stomp.StompConnection12(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
+ vhost=vhost,
+ heartbeats=heartbeats)
+ self.ack_id_source_header = 'ack'
+ self.ack_id_header = 'id'
+ return conn
+
+ def test_version(self):
+ self.assertEquals('1.2', self.conn.version)
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py
new file mode 100644
index 0000000000..2c5ee45a8e
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/amqp_headers.py
@@ -0,0 +1,42 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import pika
+import base
+import os
+
+class TestAmqpHeaders(base.BaseTest):
+ def test_headers_to_stomp(self):
+ self.listener.reset(1)
+ queueName='test-amqp-headers-to-stomp'
+
+ # Set up STOMP subscription
+ self.subscribe_dest(self.conn, '/topic/test', None, headers={'x-queue-name': queueName})
+
+ # Set up AMQP connection
+ amqp_params = pika.ConnectionParameters(host='localhost', port=int(os.environ["AMQP_PORT"]))
+ amqp_conn = pika.BlockingConnection(amqp_params)
+ amqp_chan = amqp_conn.channel()
+
+ # publish a message with headers to the named AMQP queue
+ amqp_headers = { 'x-custom-hdr-1': 'value1',
+ 'x-custom-hdr-2': 'value2',
+ 'custom-hdr-3': 'value3' }
+ amqp_props = pika.BasicProperties(headers=amqp_headers)
+ amqp_chan.basic_publish(exchange='', routing_key=queueName, body='Hello World!', properties=amqp_props)
+
+ # check if we receive the message from the STOMP subscription
+ self.assertTrue(self.listener.wait(2), "initial message not received")
+ self.assertEquals(1, len(self.listener.messages))
+ msg = self.listener.messages[0]
+ self.assertEquals('Hello World!', msg['message'])
+ self.assertEquals('value1', msg['headers']['x-custom-hdr-1'])
+ self.assertEquals('value2', msg['headers']['x-custom-hdr-2'])
+ self.assertEquals('value3', msg['headers']['custom-hdr-3'])
+
+ self.conn.disconnect()
+ amqp_conn.close()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py
new file mode 100644
index 0000000000..a8f7ef59b9
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py
@@ -0,0 +1,259 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import sys
+import threading
+import os
+
+
+class BaseTest(unittest.TestCase):
+
+ def create_connection_obj(self, version='1.0', vhost='/', heartbeats=(0, 0)):
+ if version == '1.0':
+ conn = stomp.StompConnection10(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))])
+ self.ack_id_source_header = 'message-id'
+ self.ack_id_header = 'message-id'
+ elif version == '1.1':
+ conn = stomp.StompConnection11(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
+ vhost=vhost,
+ heartbeats=heartbeats)
+ self.ack_id_source_header = 'message-id'
+ self.ack_id_header = 'message-id'
+ elif version == '1.2':
+ conn = stomp.StompConnection12(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
+ vhost=vhost,
+ heartbeats=heartbeats)
+ self.ack_id_source_header = 'ack'
+ self.ack_id_header = 'id'
+ else:
+ conn = stomp.StompConnection12(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
+ vhost=vhost,
+ heartbeats=heartbeats)
+ conn.version = version
+ return conn
+
+ def create_connection(self, user='guest', passcode='guest', wait=True, **kwargs):
+ conn = self.create_connection_obj(**kwargs)
+ conn.start()
+ conn.connect(user, passcode, wait=wait)
+ return conn
+
+ def subscribe_dest(self, conn, destination, sub_id, **kwargs):
+ if type(conn) is stomp.StompConnection10:
+ # 'id' is optional in STOMP 1.0.
+ if sub_id != None:
+ kwargs['id'] = sub_id
+ conn.subscribe(destination, **kwargs)
+ else:
+ # 'id' is required in STOMP 1.1+.
+ if sub_id == None:
+ sub_id = 'ctag'
+ conn.subscribe(destination, sub_id, **kwargs)
+
+ def unsubscribe_dest(self, conn, destination, sub_id, **kwargs):
+ if type(conn) is stomp.StompConnection10:
+ # 'id' is optional in STOMP 1.0.
+ if sub_id != None:
+ conn.unsubscribe(id=sub_id, **kwargs)
+ else:
+ conn.unsubscribe(destination=destination, **kwargs)
+ else:
+ # 'id' is required in STOMP 1.1+.
+ if sub_id == None:
+ sub_id = 'ctag'
+ conn.unsubscribe(sub_id, **kwargs)
+
+ def ack_message(self, conn, msg_id, sub_id, **kwargs):
+ if type(conn) is stomp.StompConnection10:
+ conn.ack(msg_id, **kwargs)
+ elif type(conn) is stomp.StompConnection11:
+ if sub_id == None:
+ sub_id = 'ctag'
+ conn.ack(msg_id, sub_id, **kwargs)
+ elif type(conn) is stomp.StompConnection12:
+ conn.ack(msg_id, **kwargs)
+
+ def nack_message(self, conn, msg_id, sub_id, **kwargs):
+ if type(conn) is stomp.StompConnection10:
+ # Normally unsupported by STOMP 1.0.
+ conn.send_frame("NACK", {"message-id": msg_id})
+ elif type(conn) is stomp.StompConnection11:
+ if sub_id == None:
+ sub_id = 'ctag'
+ conn.nack(msg_id, sub_id, **kwargs)
+ elif type(conn) is stomp.StompConnection12:
+ conn.nack(msg_id, **kwargs)
+
+ def create_subscriber_connection(self, dest):
+ conn = self.create_connection()
+ listener = WaitableListener()
+ conn.set_listener('', listener)
+ self.subscribe_dest(conn, dest, None, receipt="sub.receipt")
+ listener.wait()
+ self.assertEquals(1, len(listener.receipts))
+ listener.reset()
+ return conn, listener
+
+ def setUp(self):
+ # Note: useful for debugging
+ # import stomp.listener
+ self.conn = self.create_connection()
+ self.listener = WaitableListener()
+ self.conn.set_listener('waitable', self.listener)
+ # Note: useful for debugging
+ # self.printing_listener = stomp.listener.PrintingListener()
+ # self.conn.set_listener('printing', self.printing_listener)
+
+ def tearDown(self):
+ if self.conn.is_connected():
+ self.conn.disconnect()
+ self.conn.stop()
+
+ def simple_test_send_rec(self, dest, headers={}):
+ self.listener.reset()
+
+ self.subscribe_dest(self.conn, dest, None)
+ self.conn.send(dest, "foo", headers=headers)
+
+ self.assertTrue(self.listener.wait(), "Timeout, no message received")
+
+ # assert no errors
+ if len(self.listener.errors) > 0:
+ self.fail(self.listener.errors[0]['message'])
+
+ # check header content
+ msg = self.listener.messages[0]
+ self.assertEquals("foo", msg['message'])
+ self.assertEquals(dest, msg['headers']['destination'])
+ return msg['headers']
+
+ def assertListener(self, errMsg, numMsgs=0, numErrs=0, numRcts=0, timeout=10):
+ if numMsgs + numErrs + numRcts > 0:
+ self._assertTrue(self.listener.wait(timeout), errMsg + " (#awaiting)")
+ else:
+ self._assertFalse(self.listener.wait(timeout), errMsg + " (#awaiting)")
+ self._assertEquals(numMsgs, len(self.listener.messages), errMsg + " (#messages)")
+ self._assertEquals(numErrs, len(self.listener.errors), errMsg + " (#errors)")
+ self._assertEquals(numRcts, len(self.listener.receipts), errMsg + " (#receipts)")
+
+ def _assertTrue(self, bool, msg):
+ if not bool:
+ self.listener.print_state(msg, True)
+ self.assertTrue(bool, msg)
+
+ def _assertFalse(self, bool, msg):
+ if bool:
+ self.listener.print_state(msg, True)
+ self.assertFalse(bool, msg)
+
+ def _assertEquals(self, expected, actual, msg):
+ if expected != actual:
+ self.listener.print_state(msg, True)
+ self.assertEquals(expected, actual, msg)
+
+ def assertListenerAfter(self, verb, errMsg="", numMsgs=0, numErrs=0, numRcts=0, timeout=5):
+ num = numMsgs + numErrs + numRcts
+ self.listener.reset(num if num>0 else 1)
+ verb()
+ self.assertListener(errMsg=errMsg, numMsgs=numMsgs, numErrs=numErrs, numRcts=numRcts, timeout=timeout)
+
+class WaitableListener(object):
+
+ def __init__(self):
+ self.debug = False
+ if self.debug:
+ print('(listener) init')
+ self.messages = []
+ self.errors = []
+ self.receipts = []
+ self.latch = Latch(1)
+ self.msg_no = 0
+
+ def _next_msg_no(self):
+ self.msg_no += 1
+ return self.msg_no
+
+ def _append(self, array, msg, hdrs):
+ mno = self._next_msg_no()
+ array.append({'message' : msg, 'headers' : hdrs, 'msg_no' : mno})
+ self.latch.countdown()
+
+ def on_receipt(self, headers, message):
+ if self.debug:
+ print('(on_receipt) message: {}, headers: {}'.format(message, headers))
+ self._append(self.receipts, message, headers)
+
+ def on_error(self, headers, message):
+ if self.debug:
+ print('(on_error) message: {}, headers: {}'.format(message, headers))
+ self._append(self.errors, message, headers)
+
+ def on_message(self, headers, message):
+ if self.debug:
+ print('(on_message) message: {}, headers: {}'.format(message, headers))
+ self._append(self.messages, message, headers)
+
+ def reset(self, count=1):
+ if self.debug:
+ self.print_state('(reset listener--old state)')
+ self.messages = []
+ self.errors = []
+ self.receipts = []
+ self.latch = Latch(count)
+ self.msg_no = 0
+ if self.debug:
+ self.print_state('(reset listener--new state)')
+
+ def wait(self, timeout=10):
+ return self.latch.wait(timeout)
+
+ def print_state(self, hdr="", full=False):
+ print(hdr)
+ print('#messages: {}'.format(len(self.messages)))
+ print('#errors: {}', len(self.errors))
+ print('#receipts: {}'.format(len(self.receipts)))
+ print('Remaining count: {}'.format(self.latch.get_count()))
+ if full:
+ if len(self.messages) != 0: print('Messages: {}'.format(self.messages))
+ if len(self.errors) != 0: print('Messages: {}'.format(self.errors))
+ if len(self.receipts) != 0: print('Messages: {}'.format(self.receipts))
+
+class Latch(object):
+
+ def __init__(self, count=1):
+ self.cond = threading.Condition()
+ self.cond.acquire()
+ self.count = count
+ self.cond.release()
+
+ def countdown(self):
+ self.cond.acquire()
+ if self.count > 0:
+ self.count -= 1
+ if self.count == 0:
+ self.cond.notify_all()
+ self.cond.release()
+
+ def wait(self, timeout=None):
+ try:
+ self.cond.acquire()
+ if self.count == 0:
+ return True
+ else:
+ self.cond.wait(timeout)
+ return self.count == 0
+ finally:
+ self.cond.release()
+
+ def get_count(self):
+ try:
+ self.cond.acquire()
+ return self.count
+ finally:
+ self.cond.release()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_options.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_options.py
new file mode 100644
index 0000000000..f71c4acf70
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_options.py
@@ -0,0 +1,51 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import test_util
+import os
+
+class TestConnectOptions(base.BaseTest):
+
+ def test_implicit_connect(self):
+ ''' Implicit connect with receipt on first command '''
+ self.conn.disconnect()
+ test_util.enable_implicit_connect()
+ listener = base.WaitableListener()
+ new_conn = stomp.Connection(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))])
+ new_conn.set_listener('', listener)
+
+ new_conn.start() # not going to issue connect
+ self.subscribe_dest(new_conn, "/topic/implicit", 'sub_implicit',
+ receipt='implicit')
+
+ try:
+ self.assertTrue(listener.wait(5))
+ self.assertEquals(1, len(listener.receipts),
+ 'Missing receipt. Likely not connected')
+ self.assertEquals('implicit', listener.receipts[0]['headers']['receipt-id'])
+ finally:
+ new_conn.disconnect()
+ test_util.disable_implicit_connect()
+
+ def test_default_user(self):
+ ''' Default user connection '''
+ self.conn.disconnect()
+ test_util.enable_default_user()
+ listener = base.WaitableListener()
+ new_conn = stomp.Connection(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))])
+ new_conn.set_listener('', listener)
+ new_conn.start()
+ new_conn.connect()
+ try:
+ self.assertFalse(listener.wait(3)) # no error back
+ self.assertTrue(new_conn.is_connected())
+ finally:
+ new_conn.disconnect()
+ test_util.disable_default_user()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py
new file mode 100644
index 0000000000..76e5402686
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py
@@ -0,0 +1,536 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import time
+
+class TestExchange(base.BaseTest):
+
+
+ def test_amq_direct(self):
+ ''' Test basic send/receive for /exchange/amq.direct '''
+ self.__test_exchange_send_rec("amq.direct", "route")
+
+ def test_amq_topic(self):
+ ''' Test basic send/receive for /exchange/amq.topic '''
+ self.__test_exchange_send_rec("amq.topic", "route")
+
+ def test_amq_fanout(self):
+ ''' Test basic send/receive for /exchange/amq.fanout '''
+ self.__test_exchange_send_rec("amq.fanout", "route")
+
+ def test_amq_fanout_no_route(self):
+ ''' Test basic send/receive, /exchange/amq.direct, no routing key'''
+ self.__test_exchange_send_rec("amq.fanout")
+
+ def test_invalid_exchange(self):
+ ''' Test invalid exchange error '''
+ self.listener.reset(1)
+ self.subscribe_dest(self.conn, "/exchange/does.not.exist", None,
+ ack="auto")
+ self.assertListener("Expecting an error", numErrs=1)
+ err = self.listener.errors[0]
+ self.assertEquals("not_found", err['headers']['message'])
+ self.assertEquals(
+ "NOT_FOUND - no exchange 'does.not.exist' in vhost '/'\n",
+ err['message'])
+ time.sleep(1)
+ self.assertFalse(self.conn.is_connected())
+
+ def __test_exchange_send_rec(self, exchange, route = None):
+ if exchange != "amq.topic":
+ dest = "/exchange/" + exchange
+ else:
+ dest = "/topic"
+ if route != None:
+ dest += "/" + route
+
+ self.simple_test_send_rec(dest)
+
+class TestQueue(base.BaseTest):
+
+ def test_send_receive(self):
+ ''' Test basic send/receive for /queue '''
+ destination = '/queue/test'
+ self.simple_test_send_rec(destination)
+
+ def test_send_receive_in_other_conn(self):
+ ''' Test send in one connection, receive in another '''
+ destination = '/queue/test2'
+
+ # send
+ self.conn.send(destination, "hello")
+
+ # now receive
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+
+ self.subscribe_dest(conn2, destination, None, ack="auto")
+ self.assertTrue(listener2.wait(10), "no receive")
+ finally:
+ conn2.disconnect()
+
+ def test_send_receive_in_other_conn_with_disconnect(self):
+ ''' Test send, disconnect, receive '''
+ destination = '/queue/test3'
+
+ # send
+ self.conn.send(destination, "hello thar", receipt="foo")
+ self.listener.wait(3)
+ self.conn.disconnect()
+
+ # now receive
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+
+ self.subscribe_dest(conn2, destination, None, ack="auto")
+ self.assertTrue(listener2.wait(10), "no receive")
+ finally:
+ conn2.disconnect()
+
+
+ def test_multi_subscribers(self):
+ ''' Test multiple subscribers against a single /queue destination '''
+ destination = '/queue/test-multi'
+
+ ## set up two subscribers
+ conn1, listener1 = self.create_subscriber_connection(destination)
+ conn2, listener2 = self.create_subscriber_connection(destination)
+
+ try:
+ ## now send
+ self.conn.send(destination, "test1")
+ self.conn.send(destination, "test2")
+
+ ## expect both consumers to get a message?
+ self.assertTrue(listener1.wait(2))
+ self.assertEquals(1, len(listener1.messages),
+ "unexpected message count")
+ self.assertTrue(listener2.wait(2))
+ self.assertEquals(1, len(listener2.messages),
+ "unexpected message count")
+ finally:
+ conn1.disconnect()
+ conn2.disconnect()
+
+ def test_send_with_receipt(self):
+ destination = '/queue/test-receipt'
+ def noop(): pass
+ self.__test_send_receipt(destination, noop, noop)
+
+ def test_send_with_receipt_tx(self):
+ destination = '/queue/test-receipt-tx'
+ tx = 'receipt.tx'
+
+ def before():
+ self.conn.begin(transaction=tx)
+
+ def after():
+ self.assertFalse(self.listener.wait(1))
+ self.conn.commit(transaction=tx)
+
+ self.__test_send_receipt(destination, before, after, {'transaction': tx})
+
+ def test_interleaved_receipt_no_receipt(self):
+ ''' Test i-leaved receipt/no receipt, no-r bracketed by rs '''
+
+ destination = '/queue/ir'
+
+ self.listener.reset(5)
+
+ self.subscribe_dest(self.conn, destination, None, ack="auto")
+ self.conn.send(destination, 'first', receipt='a')
+ self.conn.send(destination, 'second')
+ self.conn.send(destination, 'third', receipt='b')
+
+ self.assertListener("Missing messages/receipts", numMsgs=3, numRcts=2, timeout=3)
+
+ self.assertEquals(set(['a','b']), self.__gather_receipts())
+
+ def test_interleaved_receipt_no_receipt_tx(self):
+ ''' Test i-leaved receipt/no receipt, no-r bracketed by r+xactions '''
+
+ destination = '/queue/ir'
+ tx = 'tx.ir'
+
+ # three messages and two receipts
+ self.listener.reset(5)
+
+ self.subscribe_dest(self.conn, destination, None, ack="auto")
+ self.conn.begin(transaction=tx)
+
+ self.conn.send(destination, 'first', receipt='a', transaction=tx)
+ self.conn.send(destination, 'second', transaction=tx)
+ self.conn.send(destination, 'third', receipt='b', transaction=tx)
+ self.conn.commit(transaction=tx)
+
+ self.assertListener("Missing messages/receipts", numMsgs=3, numRcts=2, timeout=40)
+
+ expected = set(['a', 'b'])
+ missing = expected.difference(self.__gather_receipts())
+
+ self.assertEquals(set(), missing, "Missing receipts: " + str(missing))
+
+ def test_interleaved_receipt_no_receipt_inverse(self):
+ ''' Test i-leaved receipt/no receipt, r bracketed by no-rs '''
+
+ destination = '/queue/ir'
+
+ self.listener.reset(4)
+
+ self.subscribe_dest(self.conn, destination, None, ack="auto")
+ self.conn.send(destination, 'first')
+ self.conn.send(destination, 'second', receipt='a')
+ self.conn.send(destination, 'third')
+
+ self.assertListener("Missing messages/receipt", numMsgs=3, numRcts=1, timeout=3)
+
+ self.assertEquals(set(['a']), self.__gather_receipts())
+
+ def __test_send_receipt(self, destination, before, after, headers = {}):
+ count = 50
+ self.listener.reset(count)
+
+ before()
+ expected_receipts = set()
+
+ for x in range(0, count):
+ receipt = "test" + str(x)
+ expected_receipts.add(receipt)
+ self.conn.send(destination, "test receipt",
+ receipt=receipt, headers=headers)
+ after()
+
+ self.assertTrue(self.listener.wait(5))
+
+ missing_receipts = expected_receipts.difference(
+ self.__gather_receipts())
+
+ self.assertEquals(set(), missing_receipts,
+ "missing receipts: " + str(missing_receipts))
+
+ def __gather_receipts(self):
+ result = set()
+ for r in self.listener.receipts:
+ result.add(r['headers']['receipt-id'])
+ return result
+
+class TestTopic(base.BaseTest):
+
+ def test_send_receive(self):
+ ''' Test basic send/receive for /topic '''
+ destination = '/topic/test'
+ self.simple_test_send_rec(destination)
+
+ def test_send_multiple(self):
+ ''' Test /topic with multiple consumers '''
+ destination = '/topic/multiple'
+
+ ## set up two subscribers
+ conn1, listener1 = self.create_subscriber_connection(destination)
+ conn2, listener2 = self.create_subscriber_connection(destination)
+
+ try:
+ ## listeners are expecting 2 messages
+ listener1.reset(2)
+ listener2.reset(2)
+
+ ## now send
+ self.conn.send(destination, "test1")
+ self.conn.send(destination, "test2")
+
+ ## expect both consumers to get both messages
+ self.assertTrue(listener1.wait(5))
+ self.assertEquals(2, len(listener1.messages),
+ "unexpected message count")
+ self.assertTrue(listener2.wait(5))
+ self.assertEquals(2, len(listener2.messages),
+ "unexpected message count")
+ finally:
+ conn1.disconnect()
+ conn2.disconnect()
+
+ def test_send_multiple_with_a_large_message(self):
+ ''' Test /topic with multiple consumers '''
+ destination = '/topic/16mb'
+ # payload size
+ s = 1024 * 1024 * 16
+ message = 'x' * s
+
+ conn1, listener1 = self.create_subscriber_connection(destination)
+ conn2, listener2 = self.create_subscriber_connection(destination)
+
+ try:
+ listener1.reset(2)
+ listener2.reset(2)
+
+ self.conn.send(destination, message)
+ self.conn.send(destination, message)
+
+ self.assertTrue(listener1.wait(10))
+ self.assertEquals(2, len(listener1.messages),
+ "unexpected message count")
+ self.assertTrue(len(listener2.messages[0]['message']) == s,
+ "unexpected message size")
+
+ self.assertTrue(listener2.wait(10))
+ self.assertEquals(2, len(listener2.messages),
+ "unexpected message count")
+ finally:
+ conn1.disconnect()
+ conn2.disconnect()
+
+class TestReplyQueue(base.BaseTest):
+
+ def test_reply_queue(self):
+ ''' Test with two separate clients. Client 1 sends
+ message to a known destination with a defined reply
+ queue. Client 2 receives on known destination and replies
+ on the reply destination. Client 1 gets the reply message'''
+
+ known = '/queue/known'
+ reply = '/temp-queue/0'
+
+ ## Client 1 uses pre-supplied connection and listener
+ ## Set up client 2
+ conn2, listener2 = self.create_subscriber_connection(known)
+
+ try:
+ self.conn.send(known, "test",
+ headers = {"reply-to": reply})
+
+ self.assertTrue(listener2.wait(5))
+ self.assertEquals(1, len(listener2.messages))
+
+ reply_to = listener2.messages[0]['headers']['reply-to']
+ self.assertTrue(reply_to.startswith('/reply-queue/'))
+
+ conn2.send(reply_to, "reply")
+ self.assertTrue(self.listener.wait(5))
+ self.assertEquals("reply", self.listener.messages[0]['message'])
+ finally:
+ conn2.disconnect()
+
+ def test_reuse_reply_queue(self):
+ ''' Test re-use of reply-to queue '''
+
+ known2 = '/queue/known2'
+ known3 = '/queue/known3'
+ reply = '/temp-queue/foo'
+
+ def respond(cntn, listna):
+ self.assertTrue(listna.wait(5))
+ self.assertEquals(1, len(listna.messages))
+ reply_to = listna.messages[0]['headers']['reply-to']
+ self.assertTrue(reply_to.startswith('/reply-queue/'))
+ cntn.send(reply_to, "reply")
+
+ ## Client 1 uses pre-supplied connection and listener
+ ## Set up clients 2 and 3
+ conn2, listener2 = self.create_subscriber_connection(known2)
+ conn3, listener3 = self.create_subscriber_connection(known3)
+ try:
+ self.listener.reset(2)
+ self.conn.send(known2, "test2",
+ headers = {"reply-to": reply})
+ self.conn.send(known3, "test3",
+ headers = {"reply-to": reply})
+ respond(conn2, listener2)
+ respond(conn3, listener3)
+
+ self.assertTrue(self.listener.wait(5))
+ self.assertEquals(2, len(self.listener.messages))
+ self.assertEquals("reply", self.listener.messages[0]['message'])
+ self.assertEquals("reply", self.listener.messages[1]['message'])
+ finally:
+ conn2.disconnect()
+ conn3.disconnect()
+
+ def test_perm_reply_queue(self):
+ '''As test_reply_queue, but with a non-temp reply queue'''
+
+ known = '/queue/known'
+ reply = '/queue/reply'
+
+ ## Client 1 uses pre-supplied connection and listener
+ ## Set up client 2
+ conn1, listener1 = self.create_subscriber_connection(reply)
+ conn2, listener2 = self.create_subscriber_connection(known)
+
+ try:
+ conn1.send(known, "test",
+ headers = {"reply-to": reply})
+
+ self.assertTrue(listener2.wait(5))
+ self.assertEquals(1, len(listener2.messages))
+
+ reply_to = listener2.messages[0]['headers']['reply-to']
+ self.assertTrue(reply_to == reply)
+
+ conn2.send(reply_to, "reply")
+ self.assertTrue(listener1.wait(5))
+ self.assertEquals("reply", listener1.messages[0]['message'])
+ finally:
+ conn1.disconnect()
+ conn2.disconnect()
+
+class TestDurableSubscription(base.BaseTest):
+
+ ID = 'test.subscription'
+
+ def __subscribe(self, dest, conn=None, id=None):
+ if not conn:
+ conn = self.conn
+ if not id:
+ id = TestDurableSubscription.ID
+
+ self.subscribe_dest(conn, dest, id, ack="auto",
+ headers = {'durable': 'true',
+ 'receipt': 1,
+ 'auto-delete': False})
+
+ def __assert_receipt(self, listener=None, pos=None):
+ if not listener:
+ listener = self.listener
+
+ self.assertTrue(listener.wait(5))
+ self.assertEquals(1, len(self.listener.receipts))
+ if pos is not None:
+ self.assertEquals(pos, self.listener.receipts[0]['msg_no'])
+
+ def __assert_message(self, msg, listener=None, pos=None):
+ if not listener:
+ listener = self.listener
+
+ self.assertTrue(listener.wait(5))
+ self.assertEquals(1, len(listener.messages))
+ self.assertEquals(msg, listener.messages[0]['message'])
+ if pos is not None:
+ self.assertEquals(pos, self.listener.messages[0]['msg_no'])
+
+ def do_test_durable_subscription(self, durability_header):
+ destination = '/topic/durable'
+
+ self.__subscribe(destination)
+ self.__assert_receipt()
+
+ # send first message without unsubscribing
+ self.listener.reset(1)
+ self.conn.send(destination, "first")
+ self.__assert_message("first")
+
+ # now unsubscribe (disconnect only)
+ self.unsubscribe_dest(self.conn, destination, TestDurableSubscription.ID)
+
+ # send again
+ self.listener.reset(2)
+ self.conn.send(destination, "second")
+
+ # resubscribe and expect receipt
+ self.__subscribe(destination)
+ self.__assert_receipt(pos=1)
+ # and message
+ self.__assert_message("second", pos=2)
+
+ # now unsubscribe (cancel)
+ self.unsubscribe_dest(self.conn, destination, TestDurableSubscription.ID,
+ headers={durability_header: 'true'})
+
+ # send again
+ self.listener.reset(1)
+ self.conn.send(destination, "third")
+
+ # resubscribe and expect no message
+ self.__subscribe(destination)
+ self.assertTrue(self.listener.wait(3))
+ self.assertEquals(0, len(self.listener.messages))
+ self.assertEquals(1, len(self.listener.receipts))
+
+ def test_durable_subscription(self):
+ self.do_test_durable_subscription('durable')
+
+ def test_durable_subscription_and_legacy_header(self):
+ self.do_test_durable_subscription('persistent')
+
+ def test_share_subscription(self):
+ destination = '/topic/durable-shared'
+
+ conn2 = self.create_connection()
+ conn2.set_listener('', self.listener)
+
+ try:
+ self.__subscribe(destination)
+ self.__assert_receipt()
+ self.listener.reset(1)
+ self.__subscribe(destination, conn2)
+ self.__assert_receipt()
+
+ self.listener.reset(100)
+
+ # send 100 messages
+ for x in range(0, 100):
+ self.conn.send(destination, "msg" + str(x))
+
+ self.assertTrue(self.listener.wait(5))
+ self.assertEquals(100, len(self.listener.messages))
+ finally:
+ conn2.disconnect()
+
+ def test_separate_ids(self):
+ destination = '/topic/durable-separate'
+
+ conn2 = self.create_connection()
+ listener2 = base.WaitableListener()
+ conn2.set_listener('', listener2)
+
+ try:
+ # ensure durable subscription exists for each ID
+ self.__subscribe(destination)
+ self.__assert_receipt()
+ self.__subscribe(destination, conn2, "other.id")
+ self.__assert_receipt(listener2)
+ self.unsubscribe_dest(self.conn, destination, TestDurableSubscription.ID)
+ self.unsubscribe_dest(conn2, destination, "other.id")
+
+ self.listener.reset(101)
+ listener2.reset(101) ## 100 messages and 1 receipt
+
+ # send 100 messages
+ for x in range(0, 100):
+ self.conn.send(destination, "msg" + str(x))
+
+ self.__subscribe(destination)
+ self.__subscribe(destination, conn2, "other.id")
+
+ for l in [self.listener, listener2]:
+ self.assertTrue(l.wait(20))
+ self.assertTrue(len(l.messages) >= 90)
+ self.assertTrue(len(l.messages) <= 100)
+
+ finally:
+ conn2.disconnect()
+
+ def do_test_durable_subscribe_no_id_and_header(self, header):
+ destination = '/topic/durable-invalid'
+
+ self.conn.send_frame('SUBSCRIBE',
+ {'destination': destination, 'ack': 'auto', header: 'true'})
+ self.listener.wait(3)
+ self.assertEquals(1, len(self.listener.errors))
+ self.assertEquals("Missing Header", self.listener.errors[0]['headers']['message'])
+
+ def test_durable_subscribe_no_id(self):
+ self.do_test_durable_subscribe_no_id_and_header('durable')
+
+ def test_durable_subscribe_no_id_and_legacy_header(self):
+ self.do_test_durable_subscribe_no_id_and_header('persistent')
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py
new file mode 100644
index 0000000000..884ada50e8
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py
@@ -0,0 +1,101 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import time
+
+class TestErrorsAndCloseConnection(base.BaseTest):
+ def __test_duplicate_consumer_tag_with_headers(self, destination, headers):
+ self.subscribe_dest(self.conn, destination, None,
+ headers = headers)
+
+ self.subscribe_dest(self.conn, destination, None,
+ headers = headers)
+
+ self.assertTrue(self.listener.wait())
+
+ self.assertEquals(1, len(self.listener.errors))
+ errorReceived = self.listener.errors[0]
+ self.assertEquals("Duplicated subscription identifier", errorReceived['headers']['message'])
+ self.assertEquals("A subscription identified by 'T_1' already exists.", errorReceived['message'])
+ time.sleep(2)
+ self.assertFalse(self.conn.is_connected())
+
+
+ def test_duplicate_consumer_tag_with_transient_destination(self):
+ destination = "/exchange/amq.direct/duplicate-consumer-tag-test1"
+ self.__test_duplicate_consumer_tag_with_headers(destination, {'id': 1})
+
+ def test_duplicate_consumer_tag_with_durable_destination(self):
+ destination = "/queue/duplicate-consumer-tag-test2"
+ self.__test_duplicate_consumer_tag_with_headers(destination, {'id': 1,
+ 'persistent': True})
+
+
+class TestErrors(base.BaseTest):
+
+ def test_invalid_queue_destination(self):
+ self.__test_invalid_destination("queue", "/bah/baz")
+
+ def test_invalid_empty_queue_destination(self):
+ self.__test_invalid_destination("queue", "")
+
+ def test_invalid_topic_destination(self):
+ self.__test_invalid_destination("topic", "/bah/baz")
+
+ def test_invalid_empty_topic_destination(self):
+ self.__test_invalid_destination("topic", "")
+
+ def test_invalid_exchange_destination(self):
+ self.__test_invalid_destination("exchange", "/bah/baz/boo")
+
+ def test_invalid_empty_exchange_destination(self):
+ self.__test_invalid_destination("exchange", "")
+
+ def test_invalid_default_exchange_destination(self):
+ self.__test_invalid_destination("exchange", "//foo")
+
+ def test_unknown_destination(self):
+ self.listener.reset()
+ self.conn.send("/something/interesting", 'test_unknown_destination')
+
+ self.assertTrue(self.listener.wait())
+ self.assertEquals(1, len(self.listener.errors))
+
+ err = self.listener.errors[0]
+ self.assertEquals("Unknown destination", err['headers']['message'])
+
+ def test_send_missing_destination(self):
+ self.__test_missing_destination("SEND")
+
+ def test_send_missing_destination(self):
+ self.__test_missing_destination("SUBSCRIBE")
+
+ def __test_missing_destination(self, command):
+ self.listener.reset()
+ self.conn.send_frame(command)
+
+ self.assertTrue(self.listener.wait())
+ self.assertEquals(1, len(self.listener.errors))
+
+ err = self.listener.errors[0]
+ self.assertEquals("Missing destination", err['headers']['message'])
+
+ def __test_invalid_destination(self, dtype, content):
+ self.listener.reset()
+ self.conn.send("/" + dtype + content, '__test_invalid_destination:' + dtype + content)
+
+ self.assertTrue(self.listener.wait())
+ self.assertEquals(1, len(self.listener.errors))
+
+ err = self.listener.errors[0]
+ self.assertEquals("Invalid destination", err['headers']['message'])
+ self.assertEquals("'" + content + "' is not a valid " +
+ dtype + " destination\n",
+ err['message'])
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/lifecycle.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/lifecycle.py
new file mode 100644
index 0000000000..d7b558e7b5
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/lifecycle.py
@@ -0,0 +1,187 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import time
+
+class TestLifecycle(base.BaseTest):
+
+ def test_unsubscribe_exchange_destination(self):
+ ''' Test UNSUBSCRIBE command with exchange'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d))
+
+ def test_unsubscribe_exchange_destination_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with exchange'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d, receipt="unsub.rct"), numRcts=1)
+
+ def test_unsubscribe_queue_destination(self):
+ ''' Test UNSUBSCRIBE command with queue'''
+ d = "/queue/unsub01"
+ self.unsub_test(d, self.sub_and_send(d))
+
+ def test_unsubscribe_queue_destination_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with queue'''
+ d = "/queue/unsub02"
+ self.unsub_test(d, self.sub_and_send(d, receipt="unsub.rct"), numRcts=1)
+
+ def test_unsubscribe_exchange_id(self):
+ ''' Test UNSUBSCRIBE command with exchange by id'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d, subid="exchid"))
+
+ def test_unsubscribe_exchange_id_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with exchange by id'''
+ d = "/exchange/amq.fanout"
+ self.unsub_test(d, self.sub_and_send(d, subid="exchid", receipt="unsub.rct"), numRcts=1)
+
+ def test_unsubscribe_queue_id(self):
+ ''' Test UNSUBSCRIBE command with queue by id'''
+ d = "/queue/unsub03"
+ self.unsub_test(d, self.sub_and_send(d, subid="queid"))
+
+ def test_unsubscribe_queue_id_with_receipt(self):
+ ''' Test receipted UNSUBSCRIBE command with queue by id'''
+ d = "/queue/unsub04"
+ self.unsub_test(d, self.sub_and_send(d, subid="queid", receipt="unsub.rct"), numRcts=1)
+
+ def test_connect_version_1_0(self):
+ ''' Test CONNECT with version 1.0'''
+ self.conn.disconnect()
+ new_conn = self.create_connection(version="1.0")
+ try:
+ self.assertTrue(new_conn.is_connected())
+ finally:
+ new_conn.disconnect()
+ self.assertFalse(new_conn.is_connected())
+
+ def test_connect_version_1_1(self):
+ ''' Test CONNECT with version 1.1'''
+ self.conn.disconnect()
+ new_conn = self.create_connection(version="1.1")
+ try:
+ self.assertTrue(new_conn.is_connected())
+ finally:
+ new_conn.disconnect()
+ self.assertFalse(new_conn.is_connected())
+
+ def test_connect_version_1_2(self):
+ ''' Test CONNECT with version 1.2'''
+ self.conn.disconnect()
+ new_conn = self.create_connection(version="1.2")
+ try:
+ self.assertTrue(new_conn.is_connected())
+ finally:
+ new_conn.disconnect()
+ self.assertFalse(new_conn.is_connected())
+
+ def test_heartbeat_disconnects_client(self):
+ ''' Test heart-beat disconnection'''
+ self.conn.disconnect()
+ new_conn = self.create_connection(version='1.1', heartbeats=(1500, 0))
+ try:
+ self.assertTrue(new_conn.is_connected())
+ time.sleep(1)
+ self.assertTrue(new_conn.is_connected())
+ time.sleep(3)
+ self.assertFalse(new_conn.is_connected())
+ finally:
+ if new_conn.is_connected():
+ new_conn.disconnect()
+
+ def test_unsupported_version(self):
+ ''' Test unsupported version on CONNECT command'''
+ self.bad_connect("Supported versions are 1.0,1.1,1.2\n", version='100.1')
+
+ def test_bad_username(self):
+ ''' Test bad username'''
+ self.bad_connect("Access refused for user 'gust'\n", user='gust')
+
+ def test_bad_password(self):
+ ''' Test bad password'''
+ self.bad_connect("Access refused for user 'guest'\n", passcode='gust')
+
+ def test_bad_vhost(self):
+ ''' Test bad virtual host'''
+ self.bad_connect("Virtual host '//' access denied", version='1.1', vhost='//')
+
+ def bad_connect(self, expected, user='guest', passcode='guest', **kwargs):
+ self.conn.disconnect()
+ new_conn = self.create_connection_obj(**kwargs)
+ listener = base.WaitableListener()
+ new_conn.set_listener('', listener)
+ try:
+ new_conn.start()
+ new_conn.connect(user, passcode)
+ self.assertTrue(listener.wait())
+ self.assertEquals(expected, listener.errors[0]['message'])
+ finally:
+ if new_conn.is_connected():
+ new_conn.disconnect()
+
+ def test_bad_header_on_send(self):
+ ''' Test disallowed header on SEND '''
+ self.listener.reset(1)
+ self.conn.send_frame("SEND", {"destination":"a", "message-id":"1"})
+ self.assertTrue(self.listener.wait())
+ self.assertEquals(1, len(self.listener.errors))
+ errorReceived = self.listener.errors[0]
+ self.assertEquals("Invalid header", errorReceived['headers']['message'])
+ self.assertEquals("'message-id' is not allowed on 'SEND'.\n", errorReceived['message'])
+
+ def test_send_recv_header(self):
+ ''' Test sending a custom header and receiving it back '''
+ dest = '/queue/custom-header'
+ hdrs = {'x-custom-header-1': 'value1',
+ 'x-custom-header-2': 'value2',
+ 'custom-header-3': 'value3'}
+ self.listener.reset(1)
+ recv_hdrs = self.simple_test_send_rec(dest, headers=hdrs)
+ self.assertEquals('value1', recv_hdrs['x-custom-header-1'])
+ self.assertEquals('value2', recv_hdrs['x-custom-header-2'])
+ self.assertEquals('value3', recv_hdrs['custom-header-3'])
+
+ def test_disconnect(self):
+ ''' Test DISCONNECT command'''
+ self.conn.disconnect()
+ self.assertFalse(self.conn.is_connected())
+
+ def test_disconnect_with_receipt(self):
+ ''' Test the DISCONNECT command with receipts '''
+ time.sleep(3)
+ self.listener.reset(1)
+ self.conn.send_frame("DISCONNECT", {"receipt": "test"})
+ self.assertTrue(self.listener.wait())
+ self.assertEquals(1, len(self.listener.receipts))
+ receiptReceived = self.listener.receipts[0]['headers']['receipt-id']
+ self.assertEquals("test", receiptReceived
+ , "Wrong receipt received: '" + receiptReceived + "'")
+
+ def unsub_test(self, dest, verbs, numRcts=0):
+ def afterfun():
+ self.conn.send(dest, "after-test")
+ subverb, unsubverb = verbs
+ self.assertListenerAfter(subverb, numMsgs=1,
+ errMsg="FAILED to subscribe and send")
+ self.assertListenerAfter(unsubverb, numRcts=numRcts,
+ errMsg="Incorrect responses from UNSUBSCRIBE")
+ self.assertListenerAfter(afterfun,
+ errMsg="Still receiving messages")
+
+ def sub_and_send(self, dest, subid=None, receipt=None):
+ def subfun():
+ self.subscribe_dest(self.conn, dest, subid)
+ self.conn.send(dest, "test")
+ def unsubfun():
+ headers = {}
+ if receipt != None:
+ headers['receipt'] = receipt
+ self.unsubscribe_dest(self.conn, dest, subid, **headers)
+ return subfun, unsubfun
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py
new file mode 100644
index 0000000000..40f908c5d9
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py
@@ -0,0 +1,331 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import re
+import socket
+import functools
+import time
+import sys
+import os
+
+def connect(cnames):
+ ''' Decorator that creates stomp connections and issues CONNECT '''
+ cmd=('CONNECT\n'
+ 'login:guest\n'
+ 'passcode:guest\n'
+ '\n'
+ '\n\0')
+ resp = ('CONNECTED\n'
+ 'server:RabbitMQ/(.*)\n'
+ 'session:(.*)\n'
+ 'heart-beat:0,0\n'
+ 'version:1.0\n'
+ '\n\x00')
+ def w(m):
+ @functools.wraps(m)
+ def wrapper(self, *args, **kwargs):
+ for cname in cnames:
+ sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sd.settimeout(30000)
+ sd.connect((self.host, self.port))
+ sd.sendall(cmd.encode('utf-8'))
+ self.match(resp, sd.recv(4096).decode('utf-8'))
+ setattr(self, cname, sd)
+ try:
+ r = m(self, *args, **kwargs)
+ finally:
+ for cname in cnames:
+ try:
+ getattr(self, cname).close()
+ except IOError:
+ pass
+ return r
+ return wrapper
+ return w
+
+
+class TestParsing(unittest.TestCase):
+ host='127.0.0.1'
+ # The default port is 61613 but it's in the middle of the ephemeral
+ # ports range on many operating systems. Therefore, there is a
+ # chance this port is already in use. Let's use a port close to the
+ # AMQP default port.
+ port=int(os.environ["STOMP_PORT"])
+
+
+ def match(self, pattern, data):
+ ''' helper: try to match a regexp with a string.
+ Fail test if they do not match.
+ '''
+ matched = re.match(pattern, data)
+ if matched:
+ return matched.groups()
+ self.assertTrue(False, 'No match:\n{}\n\n{}'.format(pattern, data))
+
+ def recv_atleast(self, bufsize):
+ recvhead = []
+ rl = bufsize
+ while rl > 0:
+ buf = self.cd.recv(rl).decode('utf-8')
+ bl = len(buf)
+ if bl==0: break
+ recvhead.append( buf )
+ rl -= bl
+ return ''.join(recvhead)
+
+
+ @connect(['cd'])
+ def test_newline_after_nul(self):
+ cmd = ('\n'
+ 'SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ 'SEND\n'
+ 'content-type:text/plain\n'
+ 'destination:/exchange/amq.fanout\n\n'
+ 'hello\n\x00\n')
+ self.cd.sendall(cmd.encode('utf-8'))
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'redelivered:false\n'
+ 'content-type:text/plain\n'
+ 'content-length:6\n'
+ '\n'
+ 'hello\n\0')
+ self.match(resp, self.cd.recv(4096).decode('utf-8'))
+
+ @connect(['cd'])
+ def test_send_without_content_type(self):
+ cmd = ('\n'
+ 'SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ 'SEND\n'
+ 'destination:/exchange/amq.fanout\n\n'
+ 'hello\n\x00')
+ self.cd.sendall(cmd.encode('utf-8'))
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'redelivered:false\n'
+ 'content-length:6\n'
+ '\n'
+ 'hello\n\0')
+ self.match(resp, self.cd.recv(4096).decode('utf-8'))
+
+ @connect(['cd'])
+ def test_send_without_content_type_binary(self):
+ msg = 'hello'
+ cmd = ('\n'
+ 'SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ 'SEND\n'
+ 'destination:/exchange/amq.fanout\n' +
+ 'content-length:{}\n\n'.format(len(msg)) +
+ '{}\x00'.format(msg))
+ self.cd.sendall(cmd.encode('utf-8'))
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'redelivered:false\n' +
+ 'content-length:{}\n'.format(len(msg)) +
+ '\n{}\0'.format(msg))
+ self.match(resp, self.cd.recv(4096).decode('utf-8'))
+
+ @connect(['cd'])
+ def test_newline_after_nul_and_leading_nul(self):
+ cmd = ('\n'
+ '\x00SUBSCRIBE\n'
+ 'destination:/exchange/amq.fanout\n'
+ '\n\x00\n'
+ '\x00SEND\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'content-type:text/plain\n'
+ '\nhello\n\x00\n')
+ self.cd.sendall(cmd.encode('utf-8'))
+ resp = ('MESSAGE\n'
+ 'destination:/exchange/amq.fanout\n'
+ 'message-id:Q_/exchange/amq.fanout@@session-(.*)\n'
+ 'redelivered:false\n'
+ 'content-type:text/plain\n'
+ 'content-length:6\n'
+ '\n'
+ 'hello\n\0')
+ self.match(resp, self.cd.recv(4096).decode('utf-8'))
+
+ @connect(['cd'])
+ def test_bad_command(self):
+ ''' Trigger an error message. '''
+ cmd = ('WRONGCOMMAND\n'
+ 'destination:a\n'
+ 'exchange:amq.fanout\n'
+ '\n\0')
+ self.cd.sendall(cmd.encode('utf-8'))
+ resp = ('ERROR\n'
+ 'message:Bad command\n'
+ 'content-type:text/plain\n'
+ 'version:1.0,1.1,1.2\n'
+ 'content-length:43\n'
+ '\n'
+ 'Could not interpret command "WRONGCOMMAND"\n'
+ '\0')
+ self.match(resp, self.cd.recv(4096).decode('utf-8'))
+
+ @connect(['sd', 'cd1', 'cd2'])
+ def test_broadcast(self):
+ ''' Single message should be delivered to two consumers:
+ amq.topic --routing_key--> first_queue --> first_connection
+ \--routing_key--> second_queue--> second_connection
+ '''
+ subscribe=( 'SUBSCRIBE\n'
+ 'id: XsKNhAf\n'
+ 'destination:/exchange/amq.topic/da9d4779\n'
+ '\n\0')
+ for cd in [self.cd1, self.cd2]:
+ cd.sendall(subscribe.encode('utf-8'))
+
+ time.sleep(0.1)
+
+ cmd = ('SEND\n'
+ 'content-type:text/plain\n'
+ 'destination:/exchange/amq.topic/da9d4779\n'
+ '\n'
+ 'message'
+ '\n\0')
+ self.sd.sendall(cmd.encode('utf-8'))
+
+ resp=('MESSAGE\n'
+ 'subscription:(.*)\n'
+ 'destination:/topic/da9d4779\n'
+ 'message-id:(.*)\n'
+ 'redelivered:false\n'
+ 'content-type:text/plain\n'
+ 'content-length:8\n'
+ '\n'
+ 'message'
+ '\n\x00')
+ for cd in [self.cd1, self.cd2]:
+ self.match(resp, cd.recv(4096).decode('utf-8'))
+
+ @connect(['cd'])
+ def test_message_with_embedded_nulls(self):
+ ''' Test sending/receiving message with embedded nulls. '''
+ dest='destination:/exchange/amq.topic/test_embed_nulls_message\n'
+ resp_dest='destination:/topic/test_embed_nulls_message\n'
+ subscribe=( 'SUBSCRIBE\n'
+ 'id:xxx\n'
+ +dest+
+ '\n\0')
+ self.cd.sendall(subscribe.encode('utf-8'))
+
+ boilerplate = '0123456789'*1024 # large enough boilerplate
+ message = '01'
+ oldi = 2
+ for i in [5, 90, 256-1, 384-1, 512, 1024, 1024+256+64+32]:
+ message = message + '\0' + boilerplate[oldi+1:i]
+ oldi = i
+ msg_len = len(message)
+
+ cmd = ('SEND\n'
+ +dest+
+ 'content-type:text/plain\n'
+ 'content-length:%i\n'
+ '\n'
+ '%s'
+ '\0' % (len(message), message))
+ self.cd.sendall(cmd.encode('utf-8'))
+
+ headresp=('MESSAGE\n' # 8
+ 'subscription:(.*)\n' # 14 + subscription
+ +resp_dest+ # 44
+ 'message-id:(.*)\n' # 12 + message-id
+ 'redelivered:false\n' # 18
+ 'content-type:text/plain\n' # 24
+ 'content-length:%i\n' # 16 + 4==len('1024')
+ '\n' # 1
+ '(.*)$' # prefix of body+null (potentially)
+ % len(message) )
+ headlen = 8 + 24 + 14 + (3) + 44 + 12 + 18 + (48) + 16 + (4) + 1 + (1)
+
+ headbuf = self.recv_atleast(headlen)
+ self.assertFalse(len(headbuf) == 0)
+
+ (sub, msg_id, bodyprefix) = self.match(headresp, headbuf)
+ bodyresp=( '%s\0' % message )
+ bodylen = len(bodyresp);
+
+ bodybuf = ''.join([bodyprefix,
+ self.recv_atleast(bodylen - len(bodyprefix))])
+
+ self.assertEqual(len(bodybuf), msg_len+1,
+ "body received not the same length as message sent")
+ self.assertEqual(bodybuf, bodyresp,
+ " body (...'%s')\nincorrectly returned as (...'%s')"
+ % (bodyresp[-10:], bodybuf[-10:]))
+
+ @connect(['cd'])
+ def test_message_in_packets(self):
+ ''' Test sending/receiving message in packets. '''
+ base_dest='topic/test_embed_nulls_message\n'
+ dest='destination:/exchange/amq.' + base_dest
+ resp_dest='destination:/'+ base_dest
+ subscribe=( 'SUBSCRIBE\n'
+ 'id:xxx\n'
+ +dest+
+ '\n\0')
+ self.cd.sendall(subscribe.encode('utf-8'))
+
+ boilerplate = '0123456789'*1024 # large enough boilerplate
+
+ message = boilerplate[:1024 + 512 + 256 + 32]
+ msg_len = len(message)
+
+ msg_to_send = ('SEND\n'
+ +dest+
+ 'content-type:text/plain\n'
+ '\n'
+ '%s'
+ '\0' % (message) )
+ packet_size = 191
+ part_index = 0
+ msg_to_send_len = len(msg_to_send)
+ while part_index < msg_to_send_len:
+ part = msg_to_send[part_index:part_index+packet_size]
+ time.sleep(0.1)
+ self.cd.sendall(part.encode('utf-8'))
+ part_index += packet_size
+
+ headresp=('MESSAGE\n' # 8
+ 'subscription:(.*)\n' # 14 + subscription
+ +resp_dest+ # 44
+ 'message-id:(.*)\n' # 12 + message-id
+ 'redelivered:false\n' # 18
+ 'content-type:text/plain\n' # 24
+ 'content-length:%i\n' # 16 + 4==len('1024')
+ '\n' # 1
+ '(.*)$' # prefix of body+null (potentially)
+ % len(message) )
+ headlen = 8 + 24 + 14 + (3) + 44 + 12 + 18 + (48) + 16 + (4) + 1 + (1)
+
+ headbuf = self.recv_atleast(headlen)
+ self.assertFalse(len(headbuf) == 0)
+
+ (sub, msg_id, bodyprefix) = self.match(headresp, headbuf)
+ bodyresp=( '%s\0' % message )
+ bodylen = len(bodyresp);
+
+ bodybuf = ''.join([bodyprefix,
+ self.recv_atleast(bodylen - len(bodyprefix))])
+
+ self.assertEqual(len(bodybuf), msg_len+1,
+ "body received not the same length as message sent")
+ self.assertEqual(bodybuf, bodyresp,
+ " body ('%s')\nincorrectly returned as ('%s')"
+ % (bodyresp, bodybuf))
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py
new file mode 100644
index 0000000000..3761c92360
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py
@@ -0,0 +1,87 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import pika
+import base
+import time
+import os
+
+class TestQueueProperties(base.BaseTest):
+
+ def test_subscribe(self):
+ destination = "/queue/queue-properties-subscribe-test"
+
+ # subscribe
+ self.subscribe_dest(self.conn, destination, None,
+ headers={
+ 'x-message-ttl': 60000,
+ 'x-expires': 70000,
+ 'x-max-length': 10,
+ 'x-max-length-bytes': 20000,
+ 'x-dead-letter-exchange': 'dead-letter-exchange',
+ 'x-dead-letter-routing-key': 'dead-letter-routing-key',
+ 'x-max-priority': 6,
+ })
+
+ # now try to declare the queue using pika
+ # if the properties are the same we should
+ # not get any error
+ connection = pika.BlockingConnection(pika.ConnectionParameters(
+ host='127.0.0.1', port=int(os.environ["AMQP_PORT"])))
+ channel = connection.channel()
+ channel.queue_declare(queue='queue-properties-subscribe-test',
+ durable=True,
+ arguments={
+ 'x-message-ttl': 60000,
+ 'x-expires': 70000,
+ 'x-max-length': 10,
+ 'x-max-length-bytes': 20000,
+ 'x-dead-letter-exchange': 'dead-letter-exchange',
+ 'x-dead-letter-routing-key': 'dead-letter-routing-key',
+ 'x-max-priority': 6,
+ })
+
+ self.conn.disconnect()
+ connection.close()
+
+ def test_send(self):
+ destination = "/queue/queue-properties-send-test"
+
+ # send
+ self.conn.send(destination, "test1",
+ headers={
+ 'x-message-ttl': 60000,
+ 'x-expires': 70000,
+ 'x-max-length': 10,
+ 'x-max-length-bytes': 20000,
+ 'x-dead-letter-exchange': 'dead-letter-exchange',
+ 'x-dead-letter-routing-key': 'dead-letter-routing-key',
+ 'x-max-priority': 6,
+ })
+
+ # now try to declare the queue using pika
+ # if the properties are the same we should
+ # not get any error
+ connection = pika.BlockingConnection(pika.ConnectionParameters(
+ host='127.0.0.1', port=int(os.environ["AMQP_PORT"])))
+ channel = connection.channel()
+ channel.queue_declare(queue='queue-properties-send-test',
+ durable=True,
+ arguments={
+ 'x-message-ttl': 60000,
+ 'x-expires': 70000,
+ 'x-max-length': 10,
+ 'x-max-length-bytes': 20000,
+ 'x-dead-letter-exchange': 'dead-letter-exchange',
+ 'x-dead-letter-routing-key': 'dead-letter-routing-key',
+ 'x-max-priority': 6,
+ })
+
+ self.conn.disconnect()
+ connection.close()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py
new file mode 100644
index 0000000000..3dfdd72cc9
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py
@@ -0,0 +1,40 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import time
+
+class TestRedelivered(base.BaseTest):
+
+ def test_redelivered(self):
+ destination = "/queue/redelivered-test"
+
+ # subscribe and send message
+ self.subscribe_dest(self.conn, destination, None, ack='client')
+ self.conn.send(destination, "test1")
+ message_receive_timeout = 30
+ self.assertTrue(self.listener.wait(message_receive_timeout), "Test message not received within {0} seconds".format(message_receive_timeout))
+ self.assertEquals(1, len(self.listener.messages))
+ self.assertEquals('false', self.listener.messages[0]['headers']['redelivered'])
+
+ # disconnect with no ack
+ self.conn.disconnect()
+
+ # now reconnect
+ conn2 = self.create_connection()
+ try:
+ listener2 = base.WaitableListener()
+ listener2.reset(1)
+ conn2.set_listener('', listener2)
+ self.subscribe_dest(conn2, destination, None, ack='client')
+ self.assertTrue(listener2.wait(), "message not received again")
+ self.assertEquals(1, len(listener2.messages))
+ self.assertEquals('true', listener2.messages[0]['headers']['redelivered'])
+ finally:
+ conn2.disconnect()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py
new file mode 100644
index 0000000000..6fbcb3d492
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py
@@ -0,0 +1,41 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import base
+import stomp
+import unittest
+import time
+
+class TestReliability(base.BaseTest):
+
+ def test_send_and_disconnect(self):
+ ''' Test close socket after send does not lose messages '''
+ destination = "/queue/reliability"
+ pub_conn = self.create_connection()
+ try:
+ msg = "0" * (128)
+
+ count = 10000
+
+ listener = base.WaitableListener()
+ listener.reset(count)
+ self.conn.set_listener('', listener)
+ self.subscribe_dest(self.conn, destination, None)
+
+ for x in range(0, count):
+ pub_conn.send(destination, msg + str(x))
+ time.sleep(2.0)
+ pub_conn.disconnect()
+
+ if listener.wait(30):
+ self.assertEquals(count, len(listener.messages))
+ else:
+ listener.print_state("Final state of listener:")
+ self.fail("Did not receive %s messages in time" % count)
+ finally:
+ if pub_conn.is_connected():
+ pub_conn.disconnect()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/ssl_lifecycle.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/ssl_lifecycle.py
new file mode 100644
index 0000000000..570ad9f5a3
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/ssl_lifecycle.py
@@ -0,0 +1,81 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import os
+import os.path
+import sys
+
+import stomp
+import base
+import ssl
+
+base_path = os.path.dirname(sys.argv[0])
+
+ssl_key_file = os.path.join(os.getenv('SSL_CERTS_PATH'), 'client', 'key.pem')
+ssl_cert_file = os.path.join(os.getenv('SSL_CERTS_PATH'), 'client', 'cert.pem')
+ssl_ca_certs = os.path.join(os.getenv('SSL_CERTS_PATH'), 'testca', 'cacert.pem')
+
+class TestSslClient(unittest.TestCase):
+
+ def __ssl_connect(self):
+ conn = stomp.Connection(host_and_ports = [ ('localhost', int(os.environ["STOMP_PORT_TLS"])) ],
+ use_ssl = True, ssl_key_file = ssl_key_file,
+ ssl_cert_file = ssl_cert_file,
+ ssl_ca_certs = ssl_ca_certs)
+ print("FILE: ".format(ssl_cert_file))
+ conn.start()
+ conn.connect("guest", "guest")
+ return conn
+
+ def __ssl_auth_connect(self):
+ conn = stomp.Connection(host_and_ports = [ ('localhost', int(os.environ["STOMP_PORT_TLS"])) ],
+ use_ssl = True, ssl_key_file = ssl_key_file,
+ ssl_cert_file = ssl_cert_file,
+ ssl_ca_certs = ssl_ca_certs)
+ conn.start()
+ conn.connect()
+ return conn
+
+ def test_ssl_connect(self):
+ conn = self.__ssl_connect()
+ conn.disconnect()
+
+ def test_ssl_auth_connect(self):
+ conn = self.__ssl_auth_connect()
+ conn.disconnect()
+
+ def test_ssl_send_receive(self):
+ conn = self.__ssl_connect()
+ self.__test_conn(conn)
+
+ def test_ssl_auth_send_receive(self):
+ conn = self.__ssl_auth_connect()
+ self.__test_conn(conn)
+
+ def __test_conn(self, conn):
+ try:
+ listener = base.WaitableListener()
+
+ conn.set_listener('', listener)
+
+ d = "/topic/ssl.test"
+ conn.subscribe(destination=d, ack="auto", id="ctag", receipt="sub")
+
+ self.assertTrue(listener.wait(1))
+
+ self.assertEquals("sub",
+ listener.receipts[0]['headers']['receipt-id'])
+
+ listener.reset(1)
+ conn.send(body="Hello SSL!", destination=d)
+
+ self.assertTrue(listener.wait())
+
+ self.assertEquals("Hello SSL!", listener.messages[0]['message'])
+ finally:
+ conn.disconnect()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test.py
new file mode 100755
index 0000000000..01967465a2
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import test_runner
+
+if __name__ == '__main__':
+ modules = [
+ 'parsing',
+ 'errors',
+ 'lifecycle',
+ 'ack',
+ 'amqp_headers',
+ 'queue_properties',
+ 'reliability',
+ 'transactions',
+ 'x_queue_name',
+ 'destinations',
+ 'redelivered',
+ 'topic_permissions',
+ 'x_queue_type_quorum'
+ ]
+ test_runner.run_unittests(modules)
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_connect_options.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_connect_options.py
new file mode 100755
index 0000000000..10efa4fbb4
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_connect_options.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import test_runner
+
+if __name__ == '__main__':
+ modules = ['connect_options']
+ test_runner.run_unittests(modules)
+
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py
new file mode 100644
index 0000000000..9aa5855b02
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import sys
+import os
+
+def run_unittests(modules):
+ suite = unittest.TestSuite()
+ for m in modules:
+ mod = __import__(m)
+ for name in dir(mod):
+ obj = getattr(mod, name)
+ if name.startswith("Test") and issubclass(obj, unittest.TestCase):
+ suite.addTest(unittest.TestLoader().loadTestsFromTestCase(obj))
+
+ ts = unittest.TextTestRunner().run(unittest.TestSuite(suite))
+ if ts.errors or ts.failures:
+ sys.exit(1)
+
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_ssl.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_ssl.py
new file mode 100755
index 0000000000..95d2d2baa7
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_ssl.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import test_runner
+import test_util
+
+if __name__ == '__main__':
+ modules = ['ssl_lifecycle']
+ test_util.ensure_ssl_auth_user()
+ test_runner.run_unittests(modules)
+
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py
new file mode 100644
index 0000000000..911100c54f
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import subprocess
+import socket
+import sys
+import os
+import os.path
+
+def ensure_ssl_auth_user():
+ user = 'O=client,CN=%s' % socket.gethostname()
+ rabbitmqctl(['stop_app'])
+ rabbitmqctl(['reset'])
+ rabbitmqctl(['start_app'])
+ rabbitmqctl(['add_user', user, 'foo'])
+ rabbitmqctl(['clear_password', user])
+ rabbitmqctl(['set_permissions', user, '.*', '.*', '.*'])
+
+def enable_implicit_connect():
+ switch_config(implicit_connect='true', default_user='[{login, "guest"}, {passcode, "guest"}]')
+
+def disable_implicit_connect():
+ switch_config(implicit_connect='false', default_user='[]')
+
+def enable_default_user():
+ switch_config(default_user='[{login, "guest"}, {passcode, "guest"}]')
+
+def disable_default_user():
+ switch_config(default_user='[]')
+
+def switch_config(implicit_connect='', default_user=''):
+ cmd = ''
+ cmd += 'ok = io:format("~n===== Ranch listeners (before stop) =====~n~n~p~n", [ranch:info()]),'
+ cmd += 'ok = application:stop(rabbitmq_stomp),'
+ cmd += 'io:format("~n===== Ranch listeners (after stop) =====~n~n~p~n", [ranch:info()]),'
+ if implicit_connect:
+ cmd += 'ok = application:set_env(rabbitmq_stomp,implicit_connect,{}),'.format(implicit_connect)
+ if default_user:
+ cmd += 'ok = application:set_env(rabbitmq_stomp,default_user,{}),'.format(default_user)
+ cmd += 'ok = application:start(rabbitmq_stomp),'
+ cmd += 'io:format("~n===== Ranch listeners (after start) =====~n~n~p~n", [ranch:info()]).'
+ rabbitmqctl(['eval', cmd])
+
+def rabbitmqctl(args):
+ ctl = os.getenv('RABBITMQCTL')
+ cmdline = [ctl, '-n', os.getenv('RABBITMQ_NODENAME')]
+ cmdline.extend(args)
+ subprocess.check_call(cmdline)
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py
new file mode 100644
index 0000000000..6272f6d8b5
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/topic_permissions.py
@@ -0,0 +1,52 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import base
+import test_util
+import sys
+
+
+class TestTopicPermissions(base.BaseTest):
+ @classmethod
+ def setUpClass(cls):
+ test_util.rabbitmqctl(['set_topic_permissions', 'guest', 'amq.topic', '^{username}.Authorised', '^{username}.Authorised'])
+ cls.authorised_topic = '/topic/guest.AuthorisedTopic'
+ cls.restricted_topic = '/topic/guest.RestrictedTopic'
+
+ @classmethod
+ def tearDownClass(cls):
+ test_util.rabbitmqctl(['clear_topic_permissions', 'guest'])
+
+ def test_publish_authorisation(self):
+ ''' Test topic permissions via publish '''
+ self.listener.reset()
+
+ # send on authorised topic
+ self.subscribe_dest(self.conn, self.authorised_topic, None)
+ self.conn.send(self.authorised_topic, "authorised hello")
+
+ self.assertTrue(self.listener.wait(), "Timeout, no message received")
+
+ # assert no errors
+ if len(self.listener.errors) > 0:
+ self.fail(self.listener.errors[0]['message'])
+
+ # check msg content
+ msg = self.listener.messages[0]
+ self.assertEqual("authorised hello", msg['message'])
+ self.assertEqual(self.authorised_topic, msg['headers']['destination'])
+
+ self.listener.reset()
+
+ # send on restricted topic
+ self.conn.send(self.restricted_topic, "hello")
+
+ self.assertTrue(self.listener.wait(), "Timeout, no message received")
+
+ # assert errors
+ self.assertGreater(len(self.listener.errors), 0)
+ self.assertIn("ACCESS_REFUSED", self.listener.errors[0]['message'])
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py
new file mode 100644
index 0000000000..379806bfb8
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py
@@ -0,0 +1,61 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import base
+import time
+
+class TestTransactions(base.BaseTest):
+
+ def test_tx_commit(self):
+ ''' Test TX with a COMMIT and ensure messages are delivered '''
+ destination = "/exchange/amq.fanout"
+ tx = "test.tx"
+
+ self.listener.reset()
+ self.subscribe_dest(self.conn, destination, None)
+ self.conn.begin(transaction=tx)
+ self.conn.send(destination, "hello!", transaction=tx)
+ self.conn.send(destination, "again!")
+
+ ## should see the second message
+ self.assertTrue(self.listener.wait(3))
+ self.assertEquals(1, len(self.listener.messages))
+ self.assertEquals("again!", self.listener.messages[0]['message'])
+
+ ## now look for the first message
+ self.listener.reset()
+ self.conn.commit(transaction=tx)
+ self.assertTrue(self.listener.wait(3))
+ self.assertEquals(1, len(self.listener.messages),
+ "Missing committed message")
+ self.assertEquals("hello!", self.listener.messages[0]['message'])
+
+ def test_tx_abort(self):
+ ''' Test TX with an ABORT and ensure messages are discarded '''
+ destination = "/exchange/amq.fanout"
+ tx = "test.tx"
+
+ self.listener.reset()
+ self.subscribe_dest(self.conn, destination, None)
+ self.conn.begin(transaction=tx)
+ self.conn.send(destination, "hello!", transaction=tx)
+ self.conn.send(destination, "again!")
+
+ ## should see the second message
+ self.assertTrue(self.listener.wait(3))
+ self.assertEquals(1, len(self.listener.messages))
+ self.assertEquals("again!", self.listener.messages[0]['message'])
+
+ ## now look for the first message to be discarded
+ self.listener.reset()
+ self.conn.abort(transaction=tx)
+ self.assertFalse(self.listener.wait(3))
+ self.assertEquals(0, len(self.listener.messages),
+ "Unexpected committed message")
+
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py
new file mode 100644
index 0000000000..f2c90486eb
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py
@@ -0,0 +1,71 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import unittest
+import stomp
+import pika
+import base
+import time
+import os
+
+class TestUserGeneratedQueueName(base.BaseTest):
+
+ def test_exchange_dest(self):
+ queueName='my-user-generated-queue-name-exchange'
+
+ # subscribe
+ self.subscribe_dest(
+ self.conn,
+ '/exchange/amq.direct/test',
+ None,
+ headers={ 'x-queue-name': queueName }
+ )
+
+ connection = pika.BlockingConnection(
+ pika.ConnectionParameters( host='127.0.0.1', port=int(os.environ["AMQP_PORT"])))
+ channel = connection.channel()
+
+ # publish a message to the named queue
+ channel.basic_publish(
+ exchange='',
+ routing_key=queueName,
+ body='Hello World!')
+
+ # check if we receive the message from the STOMP subscription
+ self.assertTrue(self.listener.wait(2), "initial message not received")
+ self.assertEquals(1, len(self.listener.messages))
+
+ self.conn.disconnect()
+ connection.close()
+
+ def test_topic_dest(self):
+ queueName='my-user-generated-queue-name-topic'
+
+ # subscribe
+ self.subscribe_dest(
+ self.conn,
+ '/topic/test',
+ None,
+ headers={ 'x-queue-name': queueName }
+ )
+
+ connection = pika.BlockingConnection(
+ pika.ConnectionParameters( host='127.0.0.1', port=int(os.environ["AMQP_PORT"])))
+ channel = connection.channel()
+
+ # publish a message to the named queue
+ channel.basic_publish(
+ exchange='',
+ routing_key=queueName,
+ body='Hello World!')
+
+ # check if we receive the message from the STOMP subscription
+ self.assertTrue(self.listener.wait(2), "initial message not received")
+ self.assertEquals(1, len(self.listener.messages))
+
+ self.conn.disconnect()
+ connection.close()
diff --git a/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py
new file mode 100644
index 0000000000..1018abd0d4
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_type_quorum.py
@@ -0,0 +1,62 @@
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+import pika
+import base
+import time
+import os
+import re
+
+
+class TestUserGeneratedQueueName(base.BaseTest):
+
+ def test_quorum_queue(self):
+ queueName = 'my-quorum-queue'
+
+ # subscribe
+ self.subscribe_dest(
+ self.conn,
+ '/topic/quorum-queue-test',
+ None,
+ headers={
+ 'x-queue-name': queueName,
+ 'x-queue-type': 'quorum',
+ 'durable': True,
+ 'auto-delete': False,
+ 'id': 1234
+ }
+ )
+
+ # let the quorum queue some time to start
+ time.sleep(5)
+
+ connection = pika.BlockingConnection(
+ pika.ConnectionParameters(host='127.0.0.1', port=int(os.environ["AMQP_PORT"])))
+ channel = connection.channel()
+
+ # publish a message to the named queue
+ channel.basic_publish(
+ exchange='',
+ routing_key=queueName,
+ body='Hello World!')
+
+ # could we declare a quorum queue?
+ quorum_queue_supported = True
+ if len(self.listener.errors) > 0:
+ pattern = re.compile(r"feature flag is disabled", re.MULTILINE)
+ for error in self.listener.errors:
+ if pattern.search(error['message']) != None:
+ quorum_queue_supported = False
+ break
+
+ if quorum_queue_supported:
+ # check if we receive the message from the STOMP subscription
+ self.assertTrue(self.listener.wait(5), "initial message not received")
+ self.assertEquals(1, len(self.listener.messages))
+ self.conn.disconnect()
+
+ connection.close()
diff --git a/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl b/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl
new file mode 100644
index 0000000000..739512e3b3
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl
@@ -0,0 +1,75 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% The stupidest client imaginable, just for testing.
+
+-module(rabbit_stomp_client).
+
+-export([connect/1, connect/2, connect/4, connect/5, disconnect/1, send/2, send/3, send/4, recv/1]).
+
+-include("rabbit_stomp_frame.hrl").
+
+-define(TIMEOUT, 1000). % milliseconds
+
+connect(Port) -> connect0([], "guest", "guest", Port, []).
+connect(V, Port) -> connect0([{"accept-version", V}], "guest", "guest", Port, []).
+connect(V, Login, Pass, Port) -> connect0([{"accept-version", V}], Login, Pass, Port, []).
+connect(V, Login, Pass, Port, Headers) -> connect0([{"accept-version", V}], Login, Pass, Port, Headers).
+
+connect0(Version, Login, Pass, Port, Headers) ->
+ %% The default port is 61613 but it's in the middle of the ephemeral
+ %% ports range on many operating systems. Therefore, there is a
+ %% chance this port is already in use. Let's use a port close to the
+ %% AMQP default port.
+ {ok, Sock} = gen_tcp:connect(localhost, Port, [{active, false}, binary]),
+ Client0 = recv_state(Sock),
+ send(Client0, "CONNECT", [{"login", Login},
+ {"passcode", Pass} | Version] ++ Headers),
+ {#stomp_frame{command = "CONNECTED"}, Client1} = recv(Client0),
+ {ok, Client1}.
+
+disconnect(Client = {Sock, _}) ->
+ send(Client, "DISCONNECT"),
+ gen_tcp:close(Sock).
+
+send(Client, Command) ->
+ send(Client, Command, []).
+
+send(Client, Command, Headers) ->
+ send(Client, Command, Headers, []).
+
+send({Sock, _}, Command, Headers, Body) ->
+ Frame = rabbit_stomp_frame:serialize(
+ #stomp_frame{command = list_to_binary(Command),
+ headers = Headers,
+ body_iolist = Body}),
+ gen_tcp:send(Sock, Frame).
+
+recv_state(Sock) ->
+ {Sock, []}.
+
+recv({_Sock, []} = Client) ->
+ recv(Client, rabbit_stomp_frame:initial_state(), 0);
+recv({Sock, [Frame | Frames]}) ->
+ {Frame, {Sock, Frames}}.
+
+recv(Client = {Sock, _}, FrameState, Length) ->
+ {ok, Payload} = gen_tcp:recv(Sock, Length, ?TIMEOUT),
+ parse(Payload, Client, FrameState, Length).
+
+parse(Payload, Client = {Sock, FramesRev}, FrameState, Length) ->
+ case rabbit_stomp_frame:parse(Payload, FrameState) of
+ {ok, Frame, <<>>} ->
+ recv({Sock, lists:reverse([Frame | FramesRev])});
+ {ok, Frame, <<"\n">>} ->
+ recv({Sock, lists:reverse([Frame | FramesRev])});
+ {ok, Frame, Rest} ->
+ parse(Rest, {Sock, [Frame | FramesRev]},
+ rabbit_stomp_frame:initial_state(), Length);
+ {more, NewState} ->
+ recv(Client, NewState, 0)
+ end.
diff --git a/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl b/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl
new file mode 100644
index 0000000000..6b5b9298fa
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/src/rabbit_stomp_publish_test.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stomp_publish_test).
+
+-export([run/0]).
+
+-include("rabbit_stomp_frame.hrl").
+
+-define(DESTINATION, "/queue/test").
+
+-define(MICROS_PER_UPDATE, 5000000).
+-define(MICROS_PER_UPDATE_MSG, 100000).
+-define(MICROS_PER_SECOND, 1000000).
+
+%% A very simple publish-and-consume-as-fast-as-you-can test.
+
+run() ->
+ [put(K, 0) || K <- [sent, recd, last_sent, last_recd]],
+ put(last_ts, erlang:monotonic_time()),
+ {ok, Pub} = rabbit_stomp_client:connect(),
+ {ok, Recv} = rabbit_stomp_client:connect(),
+ Self = self(),
+ spawn(fun() -> publish(Self, Pub, 0, erlang:monotonic_time()) end),
+ rabbit_stomp_client:send(
+ Recv, "SUBSCRIBE", [{"destination", ?DESTINATION}]),
+ spawn(fun() -> recv(Self, Recv, 0, erlang:monotonic_time()) end),
+ report().
+
+report() ->
+ receive
+ {sent, C} -> put(sent, C);
+ {recd, C} -> put(recd, C)
+ end,
+ Diff = erlang:convert_time_unit(
+ erlang:monotonic_time() - get(last_ts), native, microseconds),
+ case Diff > ?MICROS_PER_UPDATE of
+ true -> S = get(sent) - get(last_sent),
+ R = get(recd) - get(last_recd),
+ put(last_sent, get(sent)),
+ put(last_recd, get(recd)),
+ put(last_ts, erlang:monotonic_time()),
+ io:format("Send ~p msg/s | Recv ~p msg/s~n",
+ [trunc(S * ?MICROS_PER_SECOND / Diff),
+ trunc(R * ?MICROS_PER_SECOND / Diff)]);
+ false -> ok
+ end,
+ report().
+
+publish(Owner, Client, Count, TS) ->
+ rabbit_stomp_client:send(
+ Client, "SEND", [{"destination", ?DESTINATION}],
+ [integer_to_list(Count)]),
+ Diff = erlang:convert_time_unit(
+ erlang:monotonic_time() - TS, native, microseconds),
+ case Diff > ?MICROS_PER_UPDATE_MSG of
+ true -> Owner ! {sent, Count + 1},
+ publish(Owner, Client, Count + 1,
+ erlang:monotonic_time());
+ false -> publish(Owner, Client, Count + 1, TS)
+ end.
+
+recv(Owner, Client0, Count, TS) ->
+ {#stomp_frame{body_iolist = Body}, Client1} =
+ rabbit_stomp_client:recv(Client0),
+ BodyInt = list_to_integer(binary_to_list(iolist_to_binary(Body))),
+ Count = BodyInt,
+ Diff = erlang:convert_time_unit(
+ erlang:monotonic_time() - TS, native, microseconds),
+ case Diff > ?MICROS_PER_UPDATE_MSG of
+ true -> Owner ! {recd, Count + 1},
+ recv(Owner, Client1, Count + 1,
+ erlang:monotonic_time());
+ false -> recv(Owner, Client1, Count + 1, TS)
+ end.
+
diff --git a/deps/rabbitmq_stomp/test/src/test.config b/deps/rabbitmq_stomp/test/src/test.config
new file mode 100644
index 0000000000..5968824996
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/src/test.config
@@ -0,0 +1,13 @@
+[{rabbitmq_stomp, [{default_user, []},
+ {ssl_cert_login, true},
+ {tcp_listeners, [5673]},
+ {ssl_listeners, [5674]}
+ ]},
+ {rabbit, [{ssl_options, [{cacertfile,"%%CERTS_DIR%%/testca/cacert.pem"},
+ {certfile,"%%CERTS_DIR%%/server/cert.pem"},
+ {keyfile,"%%CERTS_DIR%%/server/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,true}
+ ]}
+ ]}
+].
diff --git a/deps/rabbitmq_stomp/test/topic_SUITE.erl b/deps/rabbitmq_stomp/test/topic_SUITE.erl
new file mode 100644
index 0000000000..4a6421a326
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/topic_SUITE.erl
@@ -0,0 +1,170 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(topic_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stomp.hrl").
+-include("rabbit_stomp_frame.hrl").
+-include("rabbit_stomp_headers.hrl").
+
+all() ->
+ [{group, list_to_atom("version_" ++ V)} || V <- ?SUPPORTED_VERSIONS].
+
+groups() ->
+ Tests = [
+ publish_topic_authorisation,
+ subscribe_topic_authorisation,
+ change_default_topic_exchange
+ ],
+
+ [{list_to_atom("version_" ++ V), [sequence], Tests}
+ || V <- ?SUPPORTED_VERSIONS].
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(Group, Config) ->
+ Version = string:sub_string(atom_to_list(Group), 9),
+ rabbit_ct_helpers:set_config(Config, [{version, Version}]).
+
+end_per_group(_Group, Config) -> Config.
+
+init_per_testcase(_TestCase, Config) ->
+ Version = ?config(version, Config),
+ StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+ {ok, Connection} = amqp_connection:start(#amqp_params_direct{
+ node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)
+ }),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ {ok, Client} = rabbit_stomp_client:connect(Version, StompPort),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {amqp_connection, Connection},
+ {amqp_channel, Channel},
+ {stomp_client, Client}
+ ]),
+ init_per_testcase0(Config1).
+
+end_per_testcase(_TestCase, Config) ->
+ Connection = ?config(amqp_connection, Config),
+ Channel = ?config(amqp_channel, Config),
+ Client = ?config(stomp_client, Config),
+ rabbit_stomp_client:disconnect(Client),
+ amqp_channel:close(Channel),
+ amqp_connection:close(Connection),
+ end_per_testcase0(Config).
+
+init_per_testcase0(Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, add_user,
+ [<<"user">>, <<"pass">>, <<"acting-user">>]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, set_permissions, [
+ <<"user">>, <<"/">>, <<".*">>, <<".*">>, <<".*">>, <<"acting-user">>]),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, set_topic_permissions, [
+ <<"user">>, <<"/">>, <<"amq.topic">>, <<"^{username}.Authorised">>, <<"^{username}.Authorised">>, <<"acting-user">>]),
+ Version = ?config(version, Config),
+ StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+ {ok, ClientFoo} = rabbit_stomp_client:connect(Version, "user", "pass", StompPort),
+ rabbit_ct_helpers:set_config(Config, [{client_foo, ClientFoo}]).
+
+end_per_testcase0(Config) ->
+ ClientFoo = ?config(client_foo, Config),
+ rabbit_stomp_client:disconnect(ClientFoo),
+ rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, delete_user,
+ [<<"user">>, <<"acting-user">>]),
+ Config.
+
+publish_topic_authorisation(Config) ->
+ ClientFoo = ?config(client_foo, Config),
+
+ AuthorisedTopic = "/topic/user.AuthorisedTopic",
+ RestrictedTopic = "/topic/user.RestrictedTopic",
+
+ %% send on authorised topic
+ rabbit_stomp_client:send(
+ ClientFoo, "SUBSCRIBE", [{"destination", AuthorisedTopic}]),
+
+ rabbit_stomp_client:send(
+ ClientFoo, "SEND", [{"destination", AuthorisedTopic}], ["authorised hello"]),
+
+ {ok, _Client1, _, Body} = stomp_receive(ClientFoo, "MESSAGE"),
+ [<<"authorised hello">>] = Body,
+
+ %% send on restricted topic
+ rabbit_stomp_client:send(
+ ClientFoo, "SEND", [{"destination", RestrictedTopic}], ["hello"]),
+ {ok, _Client2, Hdrs2, _} = stomp_receive(ClientFoo, "ERROR"),
+ "access_refused" = proplists:get_value("message", Hdrs2),
+ ok.
+
+subscribe_topic_authorisation(Config) ->
+ ClientFoo = ?config(client_foo, Config),
+
+ AuthorisedTopic = "/topic/user.AuthorisedTopic",
+ RestrictedTopic = "/topic/user.RestrictedTopic",
+
+ %% subscribe to authorised topic
+ rabbit_stomp_client:send(
+ ClientFoo, "SUBSCRIBE", [{"destination", AuthorisedTopic}]),
+
+ rabbit_stomp_client:send(
+ ClientFoo, "SEND", [{"destination", AuthorisedTopic}], ["authorised hello"]),
+
+ {ok, _Client1, _, Body} = stomp_receive(ClientFoo, "MESSAGE"),
+ [<<"authorised hello">>] = Body,
+
+ %% subscribe to restricted topic
+ rabbit_stomp_client:send(
+ ClientFoo, "SUBSCRIBE", [{"destination", RestrictedTopic}]),
+ {ok, _Client2, Hdrs2, _} = stomp_receive(ClientFoo, "ERROR"),
+ "access_refused" = proplists:get_value("message", Hdrs2),
+ ok.
+
+change_default_topic_exchange(Config) ->
+ Channel = ?config(amqp_channel, Config),
+ ClientFoo = ?config(client_foo, Config),
+ Ex = <<"my-topic-exchange">>,
+ AuthorisedTopic = "/topic/user.AuthorisedTopic",
+
+ Declare = #'exchange.declare'{exchange = Ex, type = <<"topic">>},
+ #'exchange.declare_ok'{} = amqp_channel:call(Channel, Declare),
+
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_stomp, default_topic_exchange, Ex]),
+
+ rabbit_stomp_client:send(
+ ClientFoo, "SUBSCRIBE", [{"destination", AuthorisedTopic}]),
+
+ rabbit_stomp_client:send(
+ ClientFoo, "SEND", [{"destination", AuthorisedTopic}], ["ohai there"]),
+
+ {ok, _Client1, _, Body} = stomp_receive(ClientFoo, "MESSAGE"),
+ [<<"ohai there">>] = Body,
+
+ Delete = #'exchange.delete'{exchange = Ex},
+ #'exchange.delete_ok'{} = amqp_channel:call(Channel, Delete),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, [rabbitmq_stomp, default_topic_exchange]),
+ ok.
+
+
+stomp_receive(Client, Command) ->
+ {#stomp_frame{command = Command,
+ headers = Hdrs,
+ body_iolist = Body}, Client1} =
+ rabbit_stomp_client:recv(Client),
+ {ok, Client1, Hdrs, Body}.
+
diff --git a/deps/rabbitmq_stomp/test/util_SUITE.erl b/deps/rabbitmq_stomp/test/util_SUITE.erl
new file mode 100644
index 0000000000..89d9d9e37e
--- /dev/null
+++ b/deps/rabbitmq_stomp/test/util_SUITE.erl
@@ -0,0 +1,242 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(util_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
+-include("rabbit_stomp_frame.hrl").
+-compile(export_all).
+
+all() -> [
+ longstr_field,
+ message_properties,
+ message_headers,
+ minimal_message_headers_with_no_custom,
+ headers_post_process,
+ headers_post_process_noop_replyto,
+ headers_post_process_noop2,
+ negotiate_version_both_empty,
+ negotiate_version_no_common,
+ negotiate_version_simple_common,
+ negotiate_version_two_choice_common,
+ negotiate_version_two_choice_common_out_of_order,
+ negotiate_version_two_choice_big_common,
+ negotiate_version_choice_mismatched_length,
+ negotiate_version_choice_duplicates,
+ trim_headers,
+ ack_mode_auto,
+ ack_mode_auto_default,
+ ack_mode_client,
+ ack_mode_client_individual,
+ consumer_tag_id,
+ consumer_tag_destination,
+ consumer_tag_invalid,
+ parse_valid_message_id,
+ parse_invalid_message_id
+ ].
+
+
+%%--------------------------------------------------------------------
+%% Header Processing Tests
+%%--------------------------------------------------------------------
+
+longstr_field(_) ->
+ {<<"ABC">>, longstr, <<"DEF">>} =
+ rabbit_stomp_util:longstr_field("ABC", "DEF").
+
+message_properties(_) ->
+ Headers = [
+ {"content-type", "text/plain"},
+ {"content-encoding", "UTF-8"},
+ {"persistent", "true"},
+ {"priority", "1"},
+ {"correlation-id", "123"},
+ {"reply-to", "something"},
+ {"expiration", "my-expiration"},
+ {"amqp-message-id", "M123"},
+ {"timestamp", "123456"},
+ {"type", "freshly-squeezed"},
+ {"user-id", "joe"},
+ {"app-id", "joe's app"},
+ {"str", "foo"},
+ {"int", "123"}
+ ],
+
+ #'P_basic'{
+ content_type = <<"text/plain">>,
+ content_encoding = <<"UTF-8">>,
+ delivery_mode = 2,
+ priority = 1,
+ correlation_id = <<"123">>,
+ reply_to = <<"something">>,
+ expiration = <<"my-expiration">>,
+ message_id = <<"M123">>,
+ timestamp = 123456,
+ type = <<"freshly-squeezed">>,
+ user_id = <<"joe">>,
+ app_id = <<"joe's app">>,
+ headers = [{<<"str">>, longstr, <<"foo">>},
+ {<<"int">>, longstr, <<"123">>}]
+ } =
+ rabbit_stomp_util:message_properties(#stomp_frame{headers = Headers}).
+
+message_headers(_) ->
+ Properties = #'P_basic'{
+ headers = [{<<"str">>, longstr, <<"foo">>},
+ {<<"int">>, signedint, 123}],
+ content_type = <<"text/plain">>,
+ content_encoding = <<"UTF-8">>,
+ delivery_mode = 2,
+ priority = 1,
+ correlation_id = 123,
+ reply_to = <<"something">>,
+ message_id = <<"M123">>,
+ timestamp = 123456,
+ type = <<"freshly-squeezed">>,
+ user_id = <<"joe">>,
+ app_id = <<"joe's app">>},
+
+ Headers = rabbit_stomp_util:message_headers(Properties),
+
+ Expected = [
+ {"content-type", "text/plain"},
+ {"content-encoding", "UTF-8"},
+ {"persistent", "true"},
+ {"priority", "1"},
+ {"correlation-id", "123"},
+ {"reply-to", "something"},
+ {"expiration", "my-expiration"},
+ {"amqp-message-id", "M123"},
+ {"timestamp", "123456"},
+ {"type", "freshly-squeezed"},
+ {"user-id", "joe"},
+ {"app-id", "joe's app"},
+ {"str", "foo"},
+ {"int", "123"}
+ ],
+
+ [] = lists:subtract(Headers, Expected).
+
+minimal_message_headers_with_no_custom(_) ->
+ Properties = #'P_basic'{},
+
+ Headers = rabbit_stomp_util:message_headers(Properties),
+ Expected = [
+ {"content-type", "text/plain"},
+ {"content-encoding", "UTF-8"},
+ {"amqp-message-id", "M123"}
+ ],
+
+ [] = lists:subtract(Headers, Expected).
+
+headers_post_process(_) ->
+ Headers = [{"header1", "1"},
+ {"header2", "12"},
+ {"reply-to", "something"}],
+ Expected = [{"header1", "1"},
+ {"header2", "12"},
+ {"reply-to", "/reply-queue/something"}],
+ [] = lists:subtract(
+ rabbit_stomp_util:headers_post_process(Headers), Expected).
+
+headers_post_process_noop_replyto(_) ->
+ [begin
+ Headers = [{"reply-to", Prefix ++ "/something"}],
+ Headers = rabbit_stomp_util:headers_post_process(Headers)
+ end || Prefix <- rabbit_routing_util:dest_prefixes()].
+
+headers_post_process_noop2(_) ->
+ Headers = [{"header1", "1"},
+ {"header2", "12"}],
+ Expected = [{"header1", "1"},
+ {"header2", "12"}],
+ [] = lists:subtract(
+ rabbit_stomp_util:headers_post_process(Headers), Expected).
+
+negotiate_version_both_empty(_) ->
+ {error, no_common_version} = rabbit_stomp_util:negotiate_version([],[]).
+
+negotiate_version_no_common(_) ->
+ {error, no_common_version} =
+ rabbit_stomp_util:negotiate_version(["1.2"],["1.3"]).
+
+negotiate_version_simple_common(_) ->
+ {ok, "1.2"} =
+ rabbit_stomp_util:negotiate_version(["1.2"],["1.2"]).
+
+negotiate_version_two_choice_common(_) ->
+ {ok, "1.3"} =
+ rabbit_stomp_util:negotiate_version(["1.2", "1.3"],["1.2", "1.3"]).
+
+negotiate_version_two_choice_common_out_of_order(_) ->
+ {ok, "1.3"} =
+ rabbit_stomp_util:negotiate_version(["1.3", "1.2"],["1.2", "1.3"]).
+
+negotiate_version_two_choice_big_common(_) ->
+ {ok, "1.20.23"} =
+ rabbit_stomp_util:negotiate_version(["1.20.23", "1.30.456"],
+ ["1.20.23", "1.30.457"]).
+negotiate_version_choice_mismatched_length(_) ->
+ {ok, "1.2.3"} =
+ rabbit_stomp_util:negotiate_version(["1.2", "1.2.3"],
+ ["1.2.3", "1.2"]).
+negotiate_version_choice_duplicates(_) ->
+ {ok, "1.2"} =
+ rabbit_stomp_util:negotiate_version(["1.2", "1.2"],
+ ["1.2", "1.2"]).
+trim_headers(_) ->
+ #stomp_frame{headers = [{"one", "foo"}, {"two", "baz "}]} =
+ rabbit_stomp_util:trim_headers(
+ #stomp_frame{headers = [{"one", " foo"}, {"two", " baz "}]}).
+
+%%--------------------------------------------------------------------
+%% Frame Parsing Tests
+%%--------------------------------------------------------------------
+
+ack_mode_auto(_) ->
+ Frame = #stomp_frame{headers = [{"ack", "auto"}]},
+ {auto, _} = rabbit_stomp_util:ack_mode(Frame).
+
+ack_mode_auto_default(_) ->
+ Frame = #stomp_frame{headers = []},
+ {auto, _} = rabbit_stomp_util:ack_mode(Frame).
+
+ack_mode_client(_) ->
+ Frame = #stomp_frame{headers = [{"ack", "client"}]},
+ {client, true} = rabbit_stomp_util:ack_mode(Frame).
+
+ack_mode_client_individual(_) ->
+ Frame = #stomp_frame{headers = [{"ack", "client-individual"}]},
+ {client, false} = rabbit_stomp_util:ack_mode(Frame).
+
+consumer_tag_id(_) ->
+ Frame = #stomp_frame{headers = [{"id", "foo"}]},
+ {ok, <<"T_foo">>, _} = rabbit_stomp_util:consumer_tag(Frame).
+
+consumer_tag_destination(_) ->
+ Frame = #stomp_frame{headers = [{"destination", "foo"}]},
+ {ok, <<"Q_foo">>, _} = rabbit_stomp_util:consumer_tag(Frame).
+
+consumer_tag_invalid(_) ->
+ Frame = #stomp_frame{headers = []},
+ {error, missing_destination_header} = rabbit_stomp_util:consumer_tag(Frame).
+
+%%--------------------------------------------------------------------
+%% Message ID Parsing Tests
+%%--------------------------------------------------------------------
+
+parse_valid_message_id(_) ->
+ {ok, {<<"bar">>, "abc", 123}} =
+ rabbit_stomp_util:parse_message_id("bar@@abc@@123").
+
+parse_invalid_message_id(_) ->
+ {error, invalid_message_id} =
+ rabbit_stomp_util:parse_message_id("blah").
+
diff --git a/deps/rabbitmq_stream/.gitignore b/deps/rabbitmq_stream/.gitignore
new file mode 100644
index 0000000000..14abe16374
--- /dev/null
+++ b/deps/rabbitmq_stream/.gitignore
@@ -0,0 +1,56 @@
+.eunit
+*.o
+*.beam
+*.plt
+erl_crash.dump
+.concrete/DEV_MODE
+
+# rebar 2.x
+.rebar
+rel/example_project
+ebin/*.beam
+deps
+
+# rebar 3
+.rebar3
+_build/
+_checkouts/
+
+erl_crash.dump
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/ebin/
+/logs/
+/plugins/
+/xrefr
+elvis
+callgrind*
+ct.coverdata
+test/ct.cover.spec
+_build
+
+rabbitmq_stream.d
+*.plt
+*.d
+
+*.jar
+
+
+*~
+.sw?
+.*.sw?
+*.beam
+*.class
+*.dat
+*.dump
+*.iml
+*.ipr
+*.iws
+.DS_Store
+\#~
+/.idea/
+/deps/
diff --git a/deps/rabbitmq_stream/CODE_OF_CONDUCT.md b/deps/rabbitmq_stream/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..1f6ef1c576
--- /dev/null
+++ b/deps/rabbitmq_stream/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_stream/CONTRIBUTING.md b/deps/rabbitmq_stream/CONTRIBUTING.md
new file mode 100644
index 0000000000..45bbcbe62e
--- /dev/null
+++ b/deps/rabbitmq_stream/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_stream/LICENSE b/deps/rabbitmq_stream/LICENSE
new file mode 100644
index 0000000000..669a2bf450
--- /dev/null
+++ b/deps/rabbitmq_stream/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ server is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_stream/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..c4b20dbc66
--- /dev/null
+++ b/deps/rabbitmq_stream/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,370 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+The Original Code is RabbitMQ.
+
+The Initial Developer of the Original Code is Pivotal Software, Inc.
+Copyright (c) 2020 VMware, Inc or its affiliates. All rights reserved. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/Makefile b/deps/rabbitmq_stream/Makefile
new file mode 100644
index 0000000000..88d7717281
--- /dev/null
+++ b/deps/rabbitmq_stream/Makefile
@@ -0,0 +1,34 @@
+PROJECT = rabbitmq_stream
+PROJECT_DESCRIPTION = RabbitMQ Stream
+PROJECT_MOD = rabbit_stream
+
+define PROJECT_ENV
+[
+ {tcp_listeners, [5555]},
+ {num_tcp_acceptors, 10},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true}]},
+ {initial_credits, 50000},
+ {credits_required_for_unblocking, 12500},
+ {frame_max, 1048576},
+ {heartbeat, 60},
+ {advertised_host, undefined},
+ {advertised_port, undefined}
+]
+endef
+
+
+DEPS = rabbit
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_stream/README.adoc b/deps/rabbitmq_stream/README.adoc
new file mode 100644
index 0000000000..56e942671b
--- /dev/null
+++ b/deps/rabbitmq_stream/README.adoc
@@ -0,0 +1,53 @@
+= RabbitMQ Stream Plugin
+
+This is a RabbitMQ plugin that exposes streams - append-only, FIFO structures -
+in a RabbitMQ broker/cluster, through a link:docs/PROTOCOL.adoc[custom binary protocol].
+
+== How to Use
+
+See the https://rabbitmq.github.io/rabbitmq-stream-java-client/snapshot/htmlsingle/#setting-up-rabbitmq[stream
+Java client] documentation to set up a node with the stream plugin.
+
+The https://rabbitmq.github.io/rabbitmq-stream-java-client/snapshot/htmlsingle/[stream Java client]
+is currently the best way to experiment with the plugin.
+
+== Stream Protocol
+
+The plugin uses a link:docs/PROTOCOL.adoc[custom binary protocol].
+
+== Project Maturity
+
+The project is in early stages of development and is considered experimental.
+It is not ready for production use.
+
+== Support
+
+* For questions: https://groups.google.com/forum/#!forum/rabbitmq-users[RabbitMQ Users]
+* For bugs and feature requests: https://github.com/rabbitmq/rabbitmq-stream/issues[GitHub Issues]
+
+The project is currently under development, there is no guarantee yet that it will be maintained and supported
+in the future (read: you are welcome to experiment with it and give feedback, but please do not base
+your whole business on it).
+
+== Build Instructions
+
+----
+git clone git@github.com:rabbitmq/rabbitmq-public-umbrella.git
+cd rabbitmq-public-umbrella
+make co
+make up BRANCH="stream-queue" -j 32
+cd deps
+git clone git@github.com:rabbitmq/rabbitmq-stream.git rabbitmq_stream
+cd rabbitmq_stream
+make run-broker
+----
+
+Then follow the instructions to https://github.com/rabbitmq/rabbitmq-stream-java-client[build the client and the performance tool].
+
+== Licensing
+
+Released under the link:LICENSE-MPL-RabbitMQ[MPL 2.0].
+
+== Copyright
+
+(c) 2020 VMware, Inc. or its affiliates. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/docs/PROTOCOL.adoc b/deps/rabbitmq_stream/docs/PROTOCOL.adoc
new file mode 100644
index 0000000000..14e149664c
--- /dev/null
+++ b/deps/rabbitmq_stream/docs/PROTOCOL.adoc
@@ -0,0 +1,499 @@
+= RabbitMQ Stream Protocol Reference
+
+This is the reference of the RabbitMQ stream protocol. Note the protocol
+is still under development and is subject to change.
+
+The https://github.com/rabbitmq/rabbitmq-stream-java-client[RabbitMQ stream Java client]
+is currently the reference implementation.
+
+== Types
+
+int8, int16, int32, int64 - Signed integers (big endian order)
+
+uint8, uint16, uint32, uint64 - Unsigned integers (big endian order)
+
+bytes - int32 for the length followed by the bytes of content, length of -1 indicates null.
+
+string - int16 for the length followed by the bytes of content, length of -1 indicates null.
+
+arrays - int32 for the length followed by the repetition of the structure, notation uses [], e.g.
+[int32] for an array of int32.
+
+== Frame Structure
+
+```
+Frame => Size (Request | Response | Command)
+ Size => int32 (size without the 4 bytes of the size element)
+
+Request => Key Version (CorrelationId) Content
+ Key => int16
+ Version => int16
+ CorrelationId => int32
+ Command => bytes // see command details below
+
+Response => Key Version CorrelationId ResponseCode
+ Key => int16
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+
+Command => Key Version Content
+ Key => int16
+ Version => int16
+ Content => bytes // see command details below
+```
+
+Most commands are request/reply, but some commands (e.g. `Deliver`) are one-direction only and thus
+doest not contain a correlation ID.
+
+== Commands
+
+.Stream Protocol Commands
+|===
+|Command |From |Key | Expects response?
+
+|<<publish>>
+|Client
+|0
+|No
+
+|<<publishconfirm>>
+|Server
+|1
+|No
+
+|<<subscribe>>
+|Client
+|2
+|Yes
+
+|<<deliver>>
+|Server
+|3
+|No
+
+|<<credit>>
+|Client
+|4
+|No
+
+|<<unsubscribe>>
+|Client
+|5
+|Yes
+
+|<<publisherror>>
+|Server
+|6
+|No
+
+|<<metadataupdate>>
+|Server
+|7
+|No
+
+|<<metadata>>
+|Client
+|8
+|No
+
+|<<saslhandshake>>
+|Client
+|9
+|Yes
+
+|<<saslauthenticate>>
+|Client
+|10
+|Yes
+
+|<<tune>>
+|Server
+|11
+|Yes
+
+|<<open>>
+|Server
+|12
+|Yes
+
+|<<close>>
+|Client & Server
+|13
+|Yes
+
+|<<heartbeat>>
+|Client & Server
+|14
+|No
+
+|<<peerproperties>>
+|Client
+|15
+|Yes
+
+|<<commitoffset>>
+|Client
+|16
+|No
+
+|<<queryoffset>>
+|Client
+|17
+|Yes
+
+|<<create>>
+|Client
+|998
+|Yes
+
+|<<delete>>
+|Client
+|999
+|Yes
+|===
+
+=== Publish
+
+```
+Publish => Key Version Stream PublishedMessages
+ Key => int16 // 0
+ Version => int16
+ Stream => string // the name of the stream
+ PublisherId => uint8
+ PublishedMessages => [PublishedMessage]
+ PublishedMessage => PublishingId Message
+ PublishingId => int64
+ Message => bytes
+```
+
+=== PublishConfirm
+
+```
+PublishConfirm => Key Version PublishingIds
+ Key => int16 // 1
+ Version => int16
+ PublisherId => uint8
+ PublishingIds => [int64] // to correlate with the messages sent
+```
+
+=== Subscribe
+
+```
+Subscribe => Key Version CorrelationId SubscriptionId Stream OffsetSpecification Credit
+ Key => int16 // 2
+ Version => int16
+ CorrelationId => int32 // correlation id to correlate the response
+ SubscriptionId => uint8 // client-supplied id to identify the subscription
+ Stream => string // the name of the stream
+ OffsetSpecification => OffsetType Offset
+ OffsetType => int16 // 0 (first), 1 (last), 2 (next), 3 (offset), 4 (timestamp)
+ Offset => uint64 (for offset) | int64 (for timestamp)
+ Credit => int16
+```
+
+=== Deliver
+
+```
+Deliver => Key Version SubscriptionId OsirisChunk
+ Key => int16 // 3
+ Version => int32
+ SubscriptionId => uint8
+ OsirisChunk => MagicVersion NumEntries NumRecords Epoch ChunkFirstOffset ChunkCrc DataLength Messages
+ MagicVersion => int8
+ NumEntries => uint16
+ NumRecords => uint32
+ Epoch => uint64
+ ChunkFirstOffset => uint64
+ ChunkCrc => int32
+ DataLength => uint32
+ Messages => [Message] // no int32 for the size for this array
+ Message => EntryTypeAndSize
+ Data => bytes
+```
+
+NB: See the https://github.com/rabbitmq/osiris/blob/348db0528986d6025b823bcf1ae0570aa63f5e25/src/osiris_log.erl#L49-L81[Osiris project]
+for details on the structure of messages.
+
+=== Credit
+
+```
+Credit => Key Version SubscriptionId Credit
+ Key => int16 // 4
+ Version => int16
+ SubscriptionId => int8
+ Credit => int16 // the number of chunks that can be sent
+
+CreditResponse => Key Version ResponseCode SubscriptionId
+ Key => int16 // 4
+ Version => int16
+ ResponseCode => int16
+ SubscriptionId => int8
+```
+
+NB: the server sent a response only in case of problem, e.g. crediting an unknown subscription.
+
+=== Unsubscribe
+
+```
+Unsubscribe => Key Version CorrelationId SubscriptionId
+ Key => int16 // 5
+ Version => int16
+ CorrelationId => int32
+ SubscriptionId => int8
+```
+
+=== PublishError
+
+```
+PublishError => Key Version [PublishingError]
+ Key => int16 // 6
+ Version => int16
+ PublisherId => int8
+ PublishingError => PublishingId Code
+ PublishingId => int64
+ Code => int16 // code to identify the problem
+```
+
+=== MetadataUpdate
+
+```
+MetadataUpdate => Key Version MetadataInfo
+ Key => int16 // 7
+ Version => int16
+ MetadataInfo => Code Stream
+ Code => int16 // code to identify the information
+ Stream => string // the stream implied
+```
+
+=== Metadata
+
+```
+MetadataQuery => Key Version CorrelationId [Stream]
+ Key => int16 // 8
+ Version => int16
+ CorrelationId => int32
+ Stream => string
+
+MetadataResponse => Key Version CorrelationId [Broker] [StreamMetadata]
+ Key => int16 // 8
+ Version => int16
+ CorrelationId => int32
+ Broker => Reference Host Port
+ Reference => int16
+ Host => string
+ Port => int32
+ StreamMetadata => StreamName LeaderReference ReplicasReferences
+ StreamName => string
+ ResponseCode => int16
+ LeaderReference => int16
+ ReplicasReferences => [int16]
+```
+
+=== SaslHandshake
+
+```
+SaslHandshakeRequest => Key Version CorrelationId Mechanism
+ Key => int16 // 9
+ Version => int16
+ CorrelationId => int32
+
+SaslHandshakeResponse => Key Version CorrelationId ResponseCode [Mechanism]
+ Key => int16 // 9
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+ Mechanism => string
+```
+
+=== SaslAuthenticate
+
+```
+SaslAuthenticateRequest => Key Version CorrelationId Mechanism SaslOpaqueData
+ Key => int16 // 10
+ Version => int16
+ CorrelationId => int32
+ Mechanism => string
+ SaslOpaqueData => bytes
+
+SaslAuthenticateResponse => Key Version CorrelationId ResponseCode SaslOpaqueData
+ Key => int16 // 10
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+ SaslOpaqueData => bytes
+```
+
+=== Tune
+
+```
+TuneRequest => Key Version FrameMax Heartbeat
+ Key => int16 // 11, to identify the command
+ Version => int16
+ FrameMax => int32 // in bytes, 0 means no limit
+ Heartbeat => int32 // in seconds, 0 means no heartbeat
+
+TuneResponse => TuneRequest
+```
+
+=== Open
+
+```
+OpenRequest => Key Version CorrelationId VirtualHost
+ Key => int16 // 12
+ Version => int16
+ CorrelationId => int32
+ VirtualHost => string
+
+OpenResponse => Key Version CorrelationId ResponseCode
+ Key => int16 // 12
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+```
+
+=== Close
+
+```
+CloseRequest => Key Version CorrelationId ClosingCode ClosingReason
+ Key => int16 // 13
+ Version => int16
+ CorrelationId => int32
+ ClosingCode => int16
+ ClosingReason => string
+
+CloseResponse => Key Version CorrelationId ResponseCode
+ Key => int16 // 13
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+```
+
+=== Heartbeat
+
+```
+Heartbeat => Key Version
+ Key => int16 // 14
+ Version => int16
+```
+
+=== PeerProperties
+
+```
+PeerPropertiesRequest => Key Version PeerProperties
+ Key => int16 // 15
+ Version => int16
+ CorrelationId => int32
+ PeerProperties => [PeerProperty]
+ PeerProperty => Key Value
+ Key => string
+ Value => string
+
+SaslAuthenticateResponse => Key Version CorrelationId ResponseCode PeerProperties
+ Key => int16 // 15
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+ PeerProperties => [PeerProperty]
+ PeerProperty => Key Value
+ Key => string
+ Value => string
+```
+
+=== CommitOffset
+
+```
+CommitOffset => Key Version Reference Stream Offset
+ Key => int16 // 16
+ Version => int16
+ CorrelationId => int32 // not used yet
+ Reference => string // max 256 characters
+ SubscriptionId => uint8
+ Offset => int64
+```
+
+=== QueryOffset
+
+```
+QueryOffsetRequest => Key Version CorrelationId Reference Stream
+ Key => int16 // 17
+ Version => int16
+ CorrelationId => int32
+ Reference => string // max 256 characters
+ Stream => string
+
+QueryOffsetResponse => Key Version CorrelationId Reference Stream
+ Key => int16 // 17
+ Version => int16
+ CorrelationId => int32
+ ResponseCode => int16
+ Offset => int64
+```
+
+=== Create
+
+```
+Create => Key Version CorrelationId Stream Arguments
+ Key => int16 // 998
+ Version => int16
+ CorrelationId => int32
+ Stream => string
+ Arguments => [Argument]
+ Argument => Key Value
+ Key => string
+ Value => string
+```
+
+=== Delete
+
+```
+Delete => Key Version CorrelationId Stream
+ Key => int16 // 999
+ Version => int16
+ CorrelationId => int32
+ Stream => string
+```
+
+== Authentication
+
+Once a client is connected to the server, it initiates an authentication
+sequence. The next figure shows the steps of the sequence:
+
+[ditaa]
+.Authentication Sequence
+....
+Client Server
+ + +
+ | Peer Properties Exchange |
+ |-------------------------->|
+ |<--------------------------|
+ | |
+ | SASL Handshake |
+ |-------------------------->|
+ |<--------------------------|
+ | |
+ | SASL Authenticate |
+ |-------------------------->|
+ |<--------------------------|
+ | |
+ | Tune |
+ |<--------------------------|
+ |-------------------------->|
+ | |
+ | Open |
+ |-------------------------->|
+ |<--------------------------|
+ | |
+ + +
+....
+
+* SaslHandshake: the client asks about the SASL mechanisms the server supports. It
+can then pick one from the list the server returns.
+* SaslAuthenticate: the client answers to the server's challenge(s), using the
+SASL mechanism it picked. The server will send a `Tune` frame once it is satisfied
+with the client authentication response.
+* Tune: the server sends a `Tune` frame to suggest some settings (max frame size, heartbeat).
+The client answers with a `Tune` frame with the settings he agrees on, possibly adjusted
+from the server's suggestions.
+* Open: the client sends an `Open` frame to pick a virtual host to connect to. The server
+answers whether it accepts the access or not. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/erlang.mk b/deps/rabbitmq_stream/erlang.mk
new file mode 100644
index 0000000000..83988d3025
--- /dev/null
+++ b/deps/rabbitmq_stream/erlang.mk
@@ -0,0 +1,7712 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-18-g7edc30a
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lerl_interface -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ ok = file:write_file("$(1)", unicode:characters_to_binary([
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ])),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir:
+ $(gen_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(call core_find,$(TEST_DIR)/,*.erl)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(?F);
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(?F);
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_stream/include/rabbit_stream.hrl b/deps/rabbitmq_stream/include/rabbit_stream.hrl
new file mode 100644
index 0000000000..0593893d93
--- /dev/null
+++ b/deps/rabbitmq_stream/include/rabbit_stream.hrl
@@ -0,0 +1,70 @@
+-define(COMMAND_PUBLISH, 0).
+-define(COMMAND_PUBLISH_CONFIRM, 1).
+-define(COMMAND_SUBSCRIBE, 2).
+-define(COMMAND_DELIVER, 3).
+-define(COMMAND_CREDIT, 4).
+-define(COMMAND_UNSUBSCRIBE, 5).
+-define(COMMAND_PUBLISH_ERROR, 6).
+-define(COMMAND_METADATA_UPDATE, 7).
+-define(COMMAND_METADATA, 8).
+-define(COMMAND_SASL_HANDSHAKE, 9).
+-define(COMMAND_SASL_AUTHENTICATE, 10).
+-define(COMMAND_TUNE, 11).
+-define(COMMAND_OPEN, 12).
+-define(COMMAND_CLOSE, 13).
+-define(COMMAND_HEARTBEAT, 14).
+-define(COMMAND_PEER_PROPERTIES, 15).
+-define(COMMAND_COMMIT_OFFSET, 16).
+-define(COMMAND_QUERY_OFFSET, 17).
+-define(COMMAND_CREATE_STREAM, 998).
+-define(COMMAND_DELETE_STREAM, 999).
+
+-define(VERSION_0, 0).
+
+-define(RESPONSE_CODE_OK, 0).
+-define(RESPONSE_CODE_STREAM_DOES_NOT_EXIST, 1).
+-define(RESPONSE_CODE_SUBSCRIPTION_ID_ALREADY_EXISTS, 2).
+-define(RESPONSE_CODE_SUBSCRIPTION_ID_DOES_NOT_EXIST, 3).
+-define(RESPONSE_CODE_STREAM_ALREADY_EXISTS, 4).
+-define(RESPONSE_CODE_STREAM_NOT_AVAILABLE, 5).
+-define(RESPONSE_SASL_MECHANISM_NOT_SUPPORTED, 6).
+-define(RESPONSE_AUTHENTICATION_FAILURE, 7).
+-define(RESPONSE_SASL_ERROR, 8).
+-define(RESPONSE_SASL_CHALLENGE, 9).
+-define(RESPONSE_SASL_AUTHENTICATION_FAILURE_LOOPBACK, 10).
+-define(RESPONSE_VHOST_ACCESS_FAILURE, 11).
+-define(RESPONSE_CODE_UNKNOWN_FRAME, 12).
+-define(RESPONSE_CODE_FRAME_TOO_LARGE, 13).
+-define(RESPONSE_CODE_INTERNAL_ERROR, 14).
+-define(RESPONSE_CODE_ACCESS_REFUSED, 15).
+-define(RESPONSE_CODE_PRECONDITION_FAILED, 16).
+
+-define(OFFSET_TYPE_FIRST, 0).
+-define(OFFSET_TYPE_LAST, 1).
+-define(OFFSET_TYPE_NEXT, 2).
+-define(OFFSET_TYPE_OFFSET, 3).
+-define(OFFSET_TYPE_TIMESTAMP, 4).
+
+-define(DEFAULT_INITIAL_CREDITS, 50000).
+-define(DEFAULT_CREDITS_REQUIRED_FOR_UNBLOCKING, 12500).
+-define(DEFAULT_FRAME_MAX, 1048576). %% 1 MiB
+-define(DEFAULT_HEARTBEAT, 60). %% 60 seconds
+
+-define(INFO_ITEMS,
+ [conn_name,
+ port,
+ peer_port,
+ host,
+ peer_host,
+ user,
+ vhost,
+ subscriptions,
+ connection_state,
+ auth_mechanism,
+ heartbeat,
+ frame_max,
+ client_properties,
+ connected_at
+ ]).
+
+-define(STREAM_GUIDE_URL, <<"https://rabbitmq.com/stream.html">>). \ No newline at end of file
diff --git a/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema b/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema
new file mode 100644
index 0000000000..0dc66d5382
--- /dev/null
+++ b/deps/rabbitmq_stream/priv/schema/rabbitmq_stream.schema
@@ -0,0 +1,158 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% ==========================================================================
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Stream Plugin
+%%
+%% See https://www.rabbitmq.com/stream.html for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_stream,
+% [%% Network Configuration - the format is generally the same as for the broker
+
+%% Listen only on localhost (ipv4 & ipv6) on a specific port.
+%% {tcp_listeners, [{"127.0.0.1", 5555},
+%% {"::1", 5555}]},
+
+{mapping, "stream.listeners.tcp", "rabbitmq_stream.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "stream.listeners.tcp.$name", "rabbitmq_stream.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_stream.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("stream.listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("stream.listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+{mapping, "stream.tcp_listen_options", "rabbitmq_stream.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbitmq_stream.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("stream.tcp_listen_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid stream.tcp_listen_options")
+ end
+end}.
+
+{mapping, "stream.tcp_listen_options.backlog", "rabbitmq_stream.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "stream.tcp_listen_options.nodelay", "rabbitmq_stream.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "stream.tcp_listen_options.buffer", "rabbitmq_stream.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.delay_send", "rabbitmq_stream.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stream.tcp_listen_options.dontroute", "rabbitmq_stream.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stream.tcp_listen_options.exit_on_close", "rabbitmq_stream.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stream.tcp_listen_options.fd", "rabbitmq_stream.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.high_msgq_watermark", "rabbitmq_stream.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.high_watermark", "rabbitmq_stream.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.keepalive", "rabbitmq_stream.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stream.tcp_listen_options.low_msgq_watermark", "rabbitmq_stream.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.low_watermark", "rabbitmq_stream.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.port", "rabbitmq_stream.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "stream.tcp_listen_options.priority", "rabbitmq_stream.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.recbuf", "rabbitmq_stream.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.send_timeout", "rabbitmq_stream.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.send_timeout_close", "rabbitmq_stream.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stream.tcp_listen_options.sndbuf", "rabbitmq_stream.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.tos", "rabbitmq_stream.tcp_listen_options.tos",
+ [{datatype, integer}]}.
+
+{mapping, "stream.tcp_listen_options.linger.on", "rabbitmq_stream.tcp_listen_options.linger",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "stream.tcp_listen_options.linger.timeout", "rabbitmq_stream.tcp_listen_options.linger",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]}.
+
+{translation, "rabbitmq_stream.tcp_listen_options.linger",
+fun(Conf) ->
+ LingerOn = cuttlefish:conf_get("stream.tcp_listen_options.linger.on", Conf, false),
+ LingerTimeout = cuttlefish:conf_get("stream.tcp_listen_options.linger.timeout", Conf, 0),
+ {LingerOn, LingerTimeout}
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP listener
+%%
+%% {num_tcp_acceptors, 10},
+
+{mapping, "stream.num_acceptors.tcp", "rabbitmq_stream.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "stream.initial_credits", "rabbitmq_stream.initial_credits", [
+ {datatype, integer}
+]}.
+
+{mapping, "stream.credits_required_for_unblocking", "rabbitmq_stream.credits_required_for_unblocking", [
+ {datatype, integer}
+]}.
+
+{mapping, "stream.frame_max", "rabbitmq_stream.frame_max", [
+ {datatype, integer}
+]}.
+
+{mapping, "stream.heartbeat", "rabbitmq_stream.heartbeat", [
+ {datatype, integer}
+]}.
+
+{mapping, "stream.advertised_host", "rabbitmq_stream.advertised_host", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_stream.advertised_host",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("stream.advertised_host", Conf))
+end}.
+
+{mapping, "stream.advertised_port", "rabbitmq_stream.advertised_port", [
+ {datatype, integer}
+]}. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/rabbitmq-components.mk b/deps/rabbitmq_stream/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_stream/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl
new file mode 100644
index 0000000000..f185ab044e
--- /dev/null
+++ b/deps/rabbitmq_stream/src/Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand.erl
@@ -0,0 +1,95 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+
+-module('Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand').
+
+-include("rabbit_stream.hrl").
+
+-behaviour('Elixir.RabbitMQ.CLI.CommandBehaviour').
+
+-export([formatter/0,
+ scopes/0,
+ switches/0,
+ aliases/0,
+ usage/0,
+ usage_additional/0,
+ usage_doc_guides/0,
+ banner/2,
+ validate/2,
+ merge_defaults/2,
+ run/2,
+ output/2,
+ description/0,
+ help_section/0]).
+
+formatter() -> 'Elixir.RabbitMQ.CLI.Formatters.Table'.
+
+scopes() -> [ctl, diagnostics, streams].
+
+switches() -> [{verbose, boolean}].
+aliases() -> [{'V', verbose}].
+
+description() -> <<"Lists stream connections on the target node">>.
+
+help_section() ->
+ {plugin, stream}.
+
+validate(Args, _) ->
+ case 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':validate_info_keys(Args,
+ ?INFO_ITEMS) of
+ {ok, _} -> ok;
+ Error -> Error
+ end.
+
+merge_defaults([], Opts) ->
+ merge_defaults([<<"conn_name">>], Opts);
+merge_defaults(Args, Opts) ->
+ {Args, maps:merge(#{verbose => false}, Opts)}.
+
+usage() ->
+ <<"list_stream_connections [<column> ...]">>.
+
+usage_additional() ->
+ Prefix = <<" must be one of ">>,
+ InfoItems = 'Elixir.Enum':join(lists:usort(?INFO_ITEMS), <<", ">>),
+ [
+ {<<"<column>">>, <<Prefix/binary, InfoItems/binary>>}
+ ].
+
+usage_doc_guides() ->
+ [?STREAM_GUIDE_URL].
+
+run(Args, #{node := NodeName,
+ timeout := Timeout,
+ verbose := Verbose}) ->
+ InfoKeys = case Verbose of
+ true -> ?INFO_ITEMS;
+ false -> 'Elixir.RabbitMQ.CLI.Ctl.InfoKeys':prepare_info_keys(Args)
+ end,
+ Nodes = 'Elixir.RabbitMQ.CLI.Core.Helpers':nodes_in_cluster(NodeName),
+
+ 'Elixir.RabbitMQ.CLI.Ctl.RpcStream':receive_list_items(
+ NodeName,
+ rabbit_stream,
+ emit_connection_info_all,
+ [Nodes, InfoKeys],
+ Timeout,
+ InfoKeys,
+ length(Nodes)).
+
+banner(_, _) -> <<"Listing stream connections ...">>.
+
+output(Result, _Opts) ->
+ 'Elixir.RabbitMQ.CLI.DefaultOutput':output(Result).
diff --git a/deps/rabbitmq_stream/src/rabbit_stream.erl b/deps/rabbitmq_stream/src/rabbit_stream.erl
new file mode 100644
index 0000000000..8353d66d57
--- /dev/null
+++ b/deps/rabbitmq_stream/src/rabbit_stream.erl
@@ -0,0 +1,103 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream).
+-behaviour(application).
+
+-export([start/2, host/0, port/0, kill_connection/1]).
+-export([stop/1]).
+-export([emit_connection_info_local/3,
+ emit_connection_info_all/4,
+ list/0]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+start(_Type, _Args) ->
+ rabbit_stream_sup:start_link().
+
+host() ->
+ case application:get_env(rabbitmq_stream, advertised_host, undefined) of
+ undefined ->
+ hostname_from_node();
+ Host ->
+ rabbit_data_coercion:to_binary(Host)
+ end.
+
+hostname_from_node() ->
+ case re:split(rabbit_data_coercion:to_binary(node()),
+ "@",
+ [{return, binary}, {parts, 2}]) of
+ [_, Hostname] ->
+ Hostname;
+ [_] ->
+ rabbit_data_coercion:to_binary(inet:gethostname())
+ end.
+
+port() ->
+ case application:get_env(rabbitmq_stream, advertised_port, undefined) of
+ undefined ->
+ port_from_listener();
+ Port ->
+ Port
+ end.
+
+port_from_listener() ->
+ Listeners = rabbit_networking:node_listeners(node()),
+ Port = lists:foldl(fun(#listener{port = Port, protocol = stream}, _Acc) ->
+ Port;
+ (_, Acc) ->
+ Acc
+ end, undefined, Listeners),
+ Port.
+
+stop(_State) ->
+ ok.
+
+kill_connection(ConnectionName) ->
+ ConnectionNameBin = rabbit_data_coercion:to_binary(ConnectionName),
+ lists:foreach(fun(ConnectionPid) ->
+ ConnectionPid ! {infos, self()},
+ receive
+ {ConnectionPid, #{<<"connection_name">> := ConnectionNameBin}} ->
+ exit(ConnectionPid, kill);
+ {ConnectionPid, _ClientProperties} ->
+ ok
+ after 1000 ->
+ ok
+ end
+ end, pg_local:get_members(rabbit_stream_connections)).
+
+emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
+ Pids = [spawn_link(Node, rabbit_stream, emit_connection_info_local,
+ [Items, Ref, AggregatorPid])
+ || Node <- Nodes],
+ rabbit_control_misc:await_emitters_termination(Pids),
+ ok.
+
+emit_connection_info_local(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map_with_exit_handler(
+ AggregatorPid, Ref, fun(Pid) ->
+ rabbit_stream_reader:info(Pid, Items)
+ end,
+ list()).
+
+list() ->
+ [Client
+ || {_, ListSupPid, _, _} <- supervisor2:which_children(rabbit_stream_sup),
+ {_, RanchSup, supervisor, _} <- supervisor2:which_children(ListSupPid),
+ {ranch_conns_sup, ConnSup, _, _} <- supervisor:which_children(RanchSup),
+ {_, CliSup, _, _} <- supervisor:which_children(ConnSup),
+ {rabbit_stream_reader, Client, _, _} <- supervisor:which_children(CliSup)]. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl b/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl
new file mode 100644
index 0000000000..3092a68517
--- /dev/null
+++ b/deps/rabbitmq_stream/src/rabbit_stream_connection_sup.erl
@@ -0,0 +1,49 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_connection_sup).
+
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/4, start_keepalive_link/0]).
+
+-export([init/1]).
+
+
+start_link(Ref, _Sock, Transport, Opts) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, KeepaliveSup} = supervisor2:start_child(
+ SupPid,
+ {rabbit_stream_keepalive_sup,
+ {rabbit_stream_connection_sup, start_keepalive_link, []},
+ intrinsic, infinity, supervisor, [rabbit_keepalive_sup]}),
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {rabbit_stream_reader,
+ {rabbit_stream_reader, start_link, [KeepaliveSup, Transport, Ref, Opts]},
+ intrinsic, ?WORKER_WAIT, worker, [rabbit_stream_reader]}),
+ {ok, SupPid, ReaderPid}.
+
+start_keepalive_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbitmq_stream/src/rabbit_stream_manager.erl b/deps/rabbitmq_stream/src/rabbit_stream_manager.erl
new file mode 100644
index 0000000000..e418dd1022
--- /dev/null
+++ b/deps/rabbitmq_stream/src/rabbit_stream_manager.erl
@@ -0,0 +1,262 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_manager).
+-behaviour(gen_server).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%% API
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2]).
+-export([start_link/1, create/4, delete/3, lookup_leader/2, lookup_local_member/2, topology/2]).
+
+-record(state, {
+ configuration
+}).
+
+start_link(Conf) ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [Conf], []).
+
+init([Conf]) ->
+ {ok, #state{configuration = Conf}}.
+
+-spec create(binary(), binary(), #{binary() => binary()}, binary()) ->
+ {ok, map()} | {error, reference_already_exists} | {error, internal_error}.
+create(VirtualHost, Reference, Arguments, Username) ->
+ gen_server:call(?MODULE, {create, VirtualHost, Reference, Arguments, Username}).
+
+-spec delete(binary(), binary(), binary()) ->
+ {ok, deleted} | {error, reference_not_found}.
+delete(VirtualHost, Reference, Username) ->
+ gen_server:call(?MODULE, {delete, VirtualHost, Reference, Username}).
+
+-spec lookup_leader(binary(), binary()) -> pid() | cluster_not_found.
+lookup_leader(VirtualHost, Stream) ->
+ gen_server:call(?MODULE, {lookup_leader, VirtualHost, Stream}).
+
+-spec lookup_local_member(binary(), binary()) -> {ok, pid()} | {error, not_found}.
+lookup_local_member(VirtualHost, Stream) ->
+ gen_server:call(?MODULE, {lookup_local_member, VirtualHost, Stream}).
+
+-spec topology(binary(), binary()) ->
+ {ok, #{leader_node => pid(), replica_nodes => [pid()]}} | {error, stream_not_found}.
+topology(VirtualHost, Stream) ->
+ gen_server:call(?MODULE, {topology, VirtualHost, Stream}).
+
+stream_queue_arguments(Arguments) ->
+ stream_queue_arguments([{<<"x-queue-type">>, longstr, <<"stream">>}], Arguments).
+
+stream_queue_arguments(ArgumentsAcc, Arguments) when map_size(Arguments) =:= 0 ->
+ ArgumentsAcc;
+stream_queue_arguments(ArgumentsAcc, #{<<"max-length-bytes">> := Value} = Arguments) ->
+ stream_queue_arguments(
+ [{<<"x-max-length-bytes">>, long, binary_to_integer(Value)}] ++ ArgumentsAcc,
+ maps:remove(<<"max-length-bytes">>, Arguments)
+ );
+stream_queue_arguments(ArgumentsAcc, #{<<"max-age">> := Value} = Arguments) ->
+ stream_queue_arguments(
+ [{<<"x-max-age">>, longstr, Value}] ++ ArgumentsAcc,
+ maps:remove(<<"max-age">>, Arguments)
+ );
+stream_queue_arguments(ArgumentsAcc, #{<<"max-segment-size">> := Value} = Arguments) ->
+ stream_queue_arguments(
+ [{<<"x-max-segment-size">>, long, binary_to_integer(Value)}] ++ ArgumentsAcc,
+ maps:remove(<<"max-segment-size">>, Arguments)
+ );
+stream_queue_arguments(ArgumentsAcc, #{<<"initial-cluster-size">> := Value} = Arguments) ->
+ stream_queue_arguments(
+ [{<<"x-initial-cluster-size">>, long, binary_to_integer(Value)}] ++ ArgumentsAcc,
+ maps:remove(<<"initial-cluster-size">>, Arguments)
+ );
+stream_queue_arguments(ArgumentsAcc, #{<<"queue-leader-locator">> := Value} = Arguments) ->
+ stream_queue_arguments(
+ [{<<"x-queue-leader-locator">>, longstr, Value}] ++ ArgumentsAcc,
+ maps:remove(<<"queue-leader-locator">>, Arguments)
+ );
+stream_queue_arguments(ArgumentsAcc, _Arguments) ->
+ ArgumentsAcc.
+
+validate_stream_queue_arguments([]) ->
+ ok;
+validate_stream_queue_arguments([{<<"x-initial-cluster-size">>, long, ClusterSize} | _]) when ClusterSize =< 0 ->
+ error;
+validate_stream_queue_arguments([{<<"x-queue-leader-locator">>, longstr, Locator} | T]) ->
+ case lists:member(Locator, [<<"client-local">>,
+ <<"random">>,
+ <<"least-leaders">>]) of
+ true ->
+ validate_stream_queue_arguments(T);
+ false ->
+ error
+ end;
+validate_stream_queue_arguments([_ | T]) ->
+ validate_stream_queue_arguments(T).
+
+
+handle_call({create, VirtualHost, Reference, Arguments, Username}, _From, State) ->
+ Name = #resource{virtual_host = VirtualHost, kind = queue, name = Reference},
+ StreamQueueArguments = stream_queue_arguments(Arguments),
+ case validate_stream_queue_arguments(StreamQueueArguments) of
+ ok ->
+ Q0 = amqqueue:new(
+ Name,
+ none, true, false, none, StreamQueueArguments,
+ VirtualHost, #{user => Username}, rabbit_stream_queue
+ ),
+ try
+ case rabbit_stream_queue:declare(Q0, node()) of
+ {new, Q} ->
+ {reply, {ok, amqqueue:get_type_state(Q)}, State};
+ {existing, _} ->
+ {reply, {error, reference_already_exists}, State};
+ {error, Err} ->
+ rabbit_log:warning("Error while creating ~p stream, ~p~n", [Reference, Err]),
+ {reply, {error, internal_error}, State}
+ end
+ catch
+ exit:Error ->
+ rabbit_log:info("Error while creating ~p stream, ~p~n", [Reference, Error]),
+ {reply, {error, internal_error}, State}
+ end;
+ error ->
+ {reply, {error, validation_failed}, State}
+ end;
+handle_call({delete, VirtualHost, Reference, Username}, _From, State) ->
+ Name = #resource{virtual_host = VirtualHost, kind = queue, name = Reference},
+ rabbit_log:debug("Trying to delete stream ~p~n", [Reference]),
+ case rabbit_amqqueue:lookup(Name) of
+ {ok, Q} ->
+ rabbit_log:debug("Found queue record ~p, checking if it is a stream~n", [Reference]),
+ case is_stream_queue(Q) of
+ true ->
+ rabbit_log:debug("Queue record ~p is a stream, trying to delete it~n", [Reference]),
+ {ok, _} = rabbit_stream_queue:delete(Q, false, false, Username),
+ rabbit_log:debug("Stream ~p deleted~n", [Reference]),
+ {reply, {ok, deleted}, State};
+ _ ->
+ rabbit_log:debug("Queue record ~p is NOT a stream, returning error~n", [Reference]),
+ {reply, {error, reference_not_found}, State}
+ end;
+ {error, not_found} ->
+ rabbit_log:debug("Stream ~p not found, cannot delete it~n", [Reference]),
+ {reply, {error, reference_not_found}, State}
+ end;
+handle_call({lookup_leader, VirtualHost, Stream}, _From, State) ->
+ Name = #resource{virtual_host = VirtualHost, kind = queue, name = Stream},
+ Res = case rabbit_amqqueue:lookup(Name) of
+ {ok, Q} ->
+ case is_stream_queue(Q) of
+ true ->
+ #{leader_pid := LeaderPid} = amqqueue:get_type_state(Q),
+ LeaderPid;
+ _ ->
+ cluster_not_found
+ end;
+ _ ->
+ cluster_not_found
+ end,
+ {reply, Res, State};
+handle_call({lookup_local_member, VirtualHost, Stream}, _From, State) ->
+ Name = #resource{virtual_host = VirtualHost, kind = queue, name = Stream},
+ Res = case rabbit_amqqueue:lookup(Name) of
+ {ok, Q} ->
+ case is_stream_queue(Q) of
+ true ->
+ #{leader_pid := LeaderPid, replica_pids := ReplicaPids} = amqqueue:get_type_state(Q),
+ LocalMember = lists:foldl(fun(Pid, Acc) ->
+ case node(Pid) =:= node() of
+ true ->
+ Pid;
+ false ->
+ Acc
+ end
+ end, undefined, [LeaderPid] ++ ReplicaPids),
+ case LocalMember of
+ undefined ->
+ {error, not_available};
+ Pid ->
+ {ok, Pid}
+ end;
+ _ ->
+ {error, not_found}
+ end;
+ {error, not_found} ->
+ case rabbit_amqqueue:not_found_or_absent_dirty(Name) of
+ not_found ->
+ {error, not_found};
+ _ ->
+ {error, not_available}
+ end;
+ _ ->
+ {error, not_found}
+ end,
+ {reply, Res, State};
+handle_call({topology, VirtualHost, Stream}, _From, State) ->
+ Name = #resource{virtual_host = VirtualHost, kind = queue, name = Stream},
+ Res = case rabbit_amqqueue:lookup(Name) of
+ {ok, Q} ->
+ case is_stream_queue(Q) of
+ true ->
+ QState = amqqueue:get_type_state(Q),
+ ProcessAliveFun = fun(Pid) ->
+ rpc:call(node(Pid), erlang, is_process_alive, [Pid], 10000)
+ end,
+ LeaderNode = case ProcessAliveFun(maps:get(leader_pid, QState)) of
+ true ->
+ maps:get(leader_node, QState);
+ _ ->
+ undefined
+ end,
+ ReplicaNodes = lists:foldl(fun(Pid, Acc) ->
+ case ProcessAliveFun(Pid) of
+ true ->
+ Acc ++ [node(Pid)];
+ _ ->
+ Acc
+ end
+ end, [], maps:get(replica_pids, QState)),
+ {ok, #{leader_node => LeaderNode, replica_nodes => ReplicaNodes}};
+ _ ->
+ {error, stream_not_found}
+ end;
+ {error, not_found} ->
+ case rabbit_amqqueue:not_found_or_absent_dirty(Name) of
+ not_found ->
+ {error, stream_not_found};
+ _ ->
+ {error, stream_not_available}
+ end;
+ _ ->
+ {error, stream_not_found}
+ end,
+ {reply, Res, State};
+handle_call(which_children, _From, State) ->
+ {reply, [], State}.
+
+handle_cast(_, State) ->
+ {noreply, State}.
+
+handle_info(Info, State) ->
+ rabbit_log:info("Received info ~p~n", [Info]),
+ {noreply, State}.
+
+is_stream_queue(Q) ->
+ case amqqueue:get_type(Q) of
+ rabbit_stream_queue ->
+ true;
+ _ ->
+ false
+ end. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/src/rabbit_stream_reader.erl b/deps/rabbitmq_stream/src/rabbit_stream_reader.erl
new file mode 100644
index 0000000000..d3b4820256
--- /dev/null
+++ b/deps/rabbitmq_stream/src/rabbit_stream_reader.erl
@@ -0,0 +1,1274 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_reader).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_stream.hrl").
+
+-record(consumer, {
+ socket :: rabbit_net:socket(), %% ranch_transport:socket(),
+ member_pid :: pid(),
+ offset :: osiris:offset(),
+ subscription_id :: integer(),
+ segment :: osiris_log:state(),
+ credit :: integer(),
+ stream :: binary()
+}).
+
+-record(stream_connection_state, {
+ data :: 'none' | binary(),
+ blocked :: boolean(),
+ consumers :: #{integer() => #consumer{}}
+}).
+
+-record(stream_connection, {
+ name :: string(),
+ %% server host
+ host,
+ %% client host
+ peer_host,
+ %% server port
+ port,
+ %% client port
+ peer_port,
+ auth_mechanism,
+ connected_at :: integer(),
+ helper_sup :: pid(),
+ socket :: rabbit_net:socket(),
+ stream_leaders :: #{binary() => pid()},
+ stream_subscriptions :: #{binary() => [integer()]},
+ credits :: atomics:atomics_ref(),
+ authentication_state :: atom(),
+ user :: 'undefined' | #user{},
+ virtual_host :: 'undefined' | binary(),
+ connection_step :: atom(), % tcp_connected, peer_properties_exchanged, authenticating, authenticated, tuning, tuned, opened, failure, closing, closing_done
+ frame_max :: integer(),
+ heartbeat :: integer(),
+ heartbeater :: any(),
+ client_properties = #{} :: #{binary() => binary()},
+ monitors = #{} :: #{reference() => binary()},
+ stats_timer :: reference(),
+ send_file_oct :: atomics:atomics_ref()
+}).
+
+-record(configuration, {
+ initial_credits :: integer(),
+ credits_required_for_unblocking :: integer(),
+ frame_max :: integer(),
+ heartbeat :: integer()
+}).
+
+-define(RESPONSE_FRAME_SIZE, 10). % 2 (key) + 2 (version) + 4 (correlation ID) + 2 (response code)
+-define(CREATION_EVENT_KEYS,
+ [pid, name, port, peer_port, host,
+ peer_host, ssl, peer_cert_subject, peer_cert_issuer,
+ peer_cert_validity, auth_mechanism, ssl_protocol,
+ ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
+ timeout, frame_max, channel_max, client_properties, connected_at,
+ node, user_who_performed_action]).
+-define(SIMPLE_METRICS, [pid, recv_oct, send_oct, reductions]).
+-define(OTHER_METRICS, [recv_cnt, send_cnt, send_pend, state, channels, garbage_collection,
+ timeout]).
+-define(AUTH_NOTIFICATION_INFO_KEYS,
+ [host, name, peer_host, peer_port, protocol, auth_mechanism,
+ ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject,
+ peer_cert_validity]).
+
+%% API
+-export([start_link/4, init/1, info/2]).
+
+start_link(KeepaliveSup, Transport, Ref, Opts) ->
+ Pid = proc_lib:spawn_link(?MODULE, init,
+ [[KeepaliveSup, Transport, Ref, Opts]]),
+
+ {ok, Pid}.
+
+init([KeepaliveSup, Transport, Ref, #{initial_credits := InitialCredits,
+ credits_required_for_unblocking := CreditsRequiredBeforeUnblocking,
+ frame_max := FrameMax,
+ heartbeat := Heartbeat}]) ->
+ process_flag(trap_exit, true),
+ {ok, Sock} = rabbit_networking:handshake(Ref,
+ application:get_env(rabbitmq_stream, proxy_protocol, false)),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ case rabbit_net:connection_string(Sock, inbound) of
+ {ok, ConnStr} ->
+ Credits = atomics:new(1, [{signed, true}]),
+ SendFileOct = atomics:new(1, [{signed, false}]),
+ atomics:put(SendFileOct, 1, 0),
+ init_credit(Credits, InitialCredits),
+ {PeerHost, PeerPort, Host, Port} =
+ socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
+ Connection = #stream_connection{
+ name = rabbit_data_coercion:to_binary(ConnStr),
+ host = Host,
+ peer_host = PeerHost,
+ port = Port,
+ peer_port = PeerPort,
+ connected_at = os:system_time(milli_seconds),
+ auth_mechanism = none,
+ helper_sup = KeepaliveSup,
+ socket = RealSocket,
+ stream_leaders = #{},
+ stream_subscriptions = #{},
+ credits = Credits,
+ authentication_state = none,
+ connection_step = tcp_connected,
+ frame_max = FrameMax,
+ send_file_oct = SendFileOct},
+ State = #stream_connection_state{
+ consumers = #{}, blocked = false, data = none
+ },
+ Transport:setopts(RealSocket, [{active, once}]),
+
+ listen_loop_pre_auth(Transport, Connection, State, #configuration{
+ initial_credits = InitialCredits,
+ credits_required_for_unblocking = CreditsRequiredBeforeUnblocking,
+ frame_max = FrameMax,
+ heartbeat = Heartbeat
+ });
+ {Error, Reason} ->
+ rabbit_net:fast_close(RealSocket),
+ rabbit_log:warning("Closing connection because of ~p ~p~n", [Error, Reason])
+ end.
+
+socket_op(Sock, Fun) ->
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ case Fun(Sock) of
+ {ok, Res} -> Res;
+ {error, Reason} ->
+ rabbit_log:warning("Error during socket operation ~p~n", [Reason]),
+ rabbit_net:fast_close(RealSocket),
+ exit(normal)
+ end.
+
+init_credit(CreditReference, Credits) ->
+ atomics:put(CreditReference, 1, Credits).
+
+sub_credits(CreditReference, Credits) ->
+ atomics:sub(CreditReference, 1, Credits).
+
+add_credits(CreditReference, Credits) ->
+ atomics:add(CreditReference, 1, Credits).
+
+has_credits(CreditReference) ->
+ atomics:get(CreditReference, 1) > 0.
+
+has_enough_credits_to_unblock(CreditReference, CreditsRequiredForUnblocking) ->
+ atomics:get(CreditReference, 1) > CreditsRequiredForUnblocking.
+
+listen_loop_pre_auth(Transport, #stream_connection{socket = S} = Connection, State,
+ #configuration{frame_max = FrameMax, heartbeat = Heartbeat} = Configuration) ->
+ {OK, Closed, Error} = Transport:messages(),
+ %% FIXME introduce timeout to complete the connection opening (after block should be enough)
+ receive
+ {OK, S, Data} ->
+ #stream_connection{connection_step = ConnectionStep0} = Connection,
+ {Connection1, State1} = handle_inbound_data_pre_auth(Transport, Connection, State, Data),
+ Transport:setopts(S, [{active, once}]),
+ #stream_connection{connection_step = ConnectionStep} = Connection1,
+ rabbit_log:info("Transitioned from ~p to ~p~n", [ConnectionStep0, ConnectionStep]),
+ case ConnectionStep of
+ authenticated ->
+ TuneFrame = <<?COMMAND_TUNE:16, ?VERSION_0:16, FrameMax:32, Heartbeat:32>>,
+ frame(Transport, Connection1, TuneFrame),
+ listen_loop_pre_auth(Transport, Connection1#stream_connection{connection_step = tuning}, State1, Configuration);
+ opened ->
+ % TODO remove registration to rabbit_stream_connections
+ % just meant to be able to close the connection remotely
+ % should be possible once the connections are available in ctl list_connections
+ pg_local:join(rabbit_stream_connections, self()),
+ Connection2 = rabbit_event:init_stats_timer(Connection1, #stream_connection.stats_timer),
+ Connection3 = ensure_stats_timer(Connection2),
+ Infos = augment_infos_with_user_provided_connection_name(
+ infos(?CREATION_EVENT_KEYS, Connection3, State1),
+ Connection3
+ ),
+ rabbit_core_metrics:connection_created(self(), Infos),
+ rabbit_event:notify(connection_created, Infos),
+ rabbit_networking:register_non_amqp_connection(self()),
+ listen_loop_post_auth(Transport, Connection3, State1, Configuration);
+ failure ->
+ close(Transport, S);
+ _ ->
+ listen_loop_pre_auth(Transport, Connection1, State1, Configuration)
+ end;
+ {Closed, S} ->
+ rabbit_log:info("Socket ~w closed [~w]~n", [S, self()]),
+ ok;
+ {Error, S, Reason} ->
+ rabbit_log:info("Socket error ~p [~w]~n", [Reason, S, self()]);
+ M ->
+ rabbit_log:warning("Unknown message ~p~n", [M]),
+ close(Transport, S)
+ end.
+
+augment_infos_with_user_provided_connection_name(Infos, #stream_connection{client_properties = ClientProperties}) ->
+ case ClientProperties of
+ #{<<"connection_name">> := UserProvidedConnectionName} ->
+ [{user_provided_name, UserProvidedConnectionName} | Infos];
+ _ ->
+ Infos
+ end.
+
+close(Transport, S) ->
+ Transport:shutdown(S, write),
+ Transport:close(S).
+
+listen_loop_post_auth(Transport, #stream_connection{socket = S,
+ stream_subscriptions = StreamSubscriptions, credits = Credits,
+ heartbeater = Heartbeater, monitors = Monitors, client_properties = ClientProperties,
+ send_file_oct = SendFileOct} = Connection0,
+ #stream_connection_state{consumers = Consumers, blocked = Blocked} = State,
+ #configuration{credits_required_for_unblocking = CreditsRequiredForUnblocking} = Configuration) ->
+ Connection = ensure_stats_timer(Connection0),
+ {OK, Closed, Error} = Transport:messages(),
+ receive
+ {OK, S, Data} ->
+ {Connection1, State1} = handle_inbound_data_post_auth(Transport, Connection, State, Data),
+ #stream_connection{connection_step = Step} = Connection1,
+ case Step of
+ closing ->
+ close(Transport, S),
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection1, State1);
+ close_sent ->
+ rabbit_log:debug("Transitioned to close_sent ~n"),
+ Transport:setopts(S, [{active, once}]),
+ listen_loop_post_close(Transport, Connection1, State1, Configuration);
+ _ ->
+ State2 = case Blocked of
+ true ->
+ case has_enough_credits_to_unblock(Credits, CreditsRequiredForUnblocking) of
+ true ->
+ Transport:setopts(S, [{active, once}]),
+ ok = rabbit_heartbeat:resume_monitor(Heartbeater),
+ State1#stream_connection_state{blocked = false};
+ false ->
+ State1
+ end;
+ false ->
+ case has_credits(Credits) of
+ true ->
+ Transport:setopts(S, [{active, once}]),
+ State1;
+ false ->
+ ok = rabbit_heartbeat:pause_monitor(Heartbeater),
+ State1#stream_connection_state{blocked = true}
+ end
+ end,
+ listen_loop_post_auth(Transport, Connection1, State2, Configuration)
+ end;
+ {'DOWN', MonitorRef, process, _OsirisPid, _Reason} ->
+ {Connection1, State1} = case Monitors of
+ #{MonitorRef := Stream} ->
+ Monitors1 = maps:remove(MonitorRef, Monitors),
+ C = Connection#stream_connection{monitors = Monitors1},
+ case clean_state_after_stream_deletion_or_failure(Stream, C, State) of
+ {cleaned, NewConnection, NewState} ->
+ StreamSize = byte_size(Stream),
+ FrameSize = 2 + 2 + 2 + 2 + StreamSize,
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_METADATA_UPDATE:16, ?VERSION_0:16,
+ ?RESPONSE_CODE_STREAM_NOT_AVAILABLE:16, StreamSize:16, Stream/binary>>]),
+ {NewConnection, NewState};
+ {not_cleaned, SameConnection, SameState} ->
+ {SameConnection, SameState}
+ end;
+ _ ->
+ {Connection, State}
+ end,
+ listen_loop_post_auth(Transport, Connection1, State1, Configuration);
+ {'$gen_cast', {queue_event, _QueueResource, {osiris_written, _QueueResource, CorrelationList}}} ->
+ {FirstPublisherId, _FirstPublishingId} = lists:nth(1, CorrelationList),
+ {LastPublisherId, LastPublishingIds, LastCount} = lists:foldl(fun({PublisherId, PublishingId}, {CurrentPublisherId, PublishingIds, Count}) ->
+ case PublisherId of
+ CurrentPublisherId ->
+ {CurrentPublisherId, [PublishingIds, <<PublishingId:64>>], Count + 1};
+ OtherPublisherId ->
+ FrameSize = 2 + 2 + 1 + 4 + Count * 8,
+ %% FIXME enforce max frame size
+ %% in practice, this should be necessary only for very large chunks and for very small frame size limits
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_PUBLISH_CONFIRM:16, ?VERSION_0:16>>,
+ <<CurrentPublisherId:8>>,
+ <<Count:32>>, PublishingIds]),
+ {OtherPublisherId, <<PublishingId:64>>, 1}
+ end
+ end, {FirstPublisherId, <<>>, 0}, CorrelationList),
+ FrameSize = 2 + 2 + 1 + 4 + LastCount * 8,
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_PUBLISH_CONFIRM:16, ?VERSION_0:16>>,
+ <<LastPublisherId:8>>,
+ <<LastCount:32>>, LastPublishingIds]),
+ CorrelationIdCount = length(CorrelationList),
+ add_credits(Credits, CorrelationIdCount),
+ State1 = case Blocked of
+ true ->
+ case has_enough_credits_to_unblock(Credits, CreditsRequiredForUnblocking) of
+ true ->
+ Transport:setopts(S, [{active, once}]),
+ ok = rabbit_heartbeat:resume_monitor(Heartbeater),
+ State#stream_connection_state{blocked = false};
+ false ->
+ State
+ end;
+ false ->
+ State
+ end,
+ listen_loop_post_auth(Transport, Connection, State1, Configuration);
+ {'$gen_cast', {queue_event, #resource{name = StreamName}, {osiris_offset, _QueueResource, -1}}} ->
+ rabbit_log:info("received osiris offset event for ~p with offset ~p~n", [StreamName, -1]),
+ listen_loop_post_auth(Transport, Connection, State, Configuration);
+ {'$gen_cast', {queue_event, #resource{name = StreamName}, {osiris_offset, _QueueResource, Offset}}} when Offset > -1 ->
+ {Connection1, State1} = case maps:get(StreamName, StreamSubscriptions, undefined) of
+ undefined ->
+ rabbit_log:info("osiris offset event for ~p, but no subscription (leftover messages after unsubscribe?)", [StreamName]),
+ {Connection, State};
+ [] ->
+ rabbit_log:info("osiris offset event for ~p, but no registered consumers!", [StreamName]),
+ {Connection#stream_connection{stream_subscriptions = maps:remove(StreamName, StreamSubscriptions)}, State};
+ CorrelationIds when is_list(CorrelationIds) ->
+ Consumers1 = lists:foldl(fun(CorrelationId, ConsumersAcc) ->
+ #{CorrelationId := Consumer} = ConsumersAcc,
+ #consumer{credit = Credit} = Consumer,
+ Consumer1 = case Credit of
+ 0 ->
+ Consumer;
+ _ ->
+ {{segment, Segment1}, {credit, Credit1}} = send_chunks(
+ Transport,
+ Consumer,
+ SendFileOct
+ ),
+ Consumer#consumer{segment = Segment1, credit = Credit1}
+ end,
+ ConsumersAcc#{CorrelationId => Consumer1}
+ end,
+ Consumers,
+ CorrelationIds),
+ {Connection, State#stream_connection_state{consumers = Consumers1}}
+ end,
+ listen_loop_post_auth(Transport, Connection1, State1, Configuration);
+ heartbeat_send ->
+ Frame = <<?COMMAND_HEARTBEAT:16, ?VERSION_0:16>>,
+ case catch frame(Transport, Connection, Frame) of
+ ok ->
+ listen_loop_post_auth(Transport, Connection, State, Configuration);
+ Unexpected ->
+ rabbit_log:info("Heartbeat send error ~p, closing connection~n", [Unexpected]),
+ C1 = demonitor_all_streams(Connection),
+ close(Transport, C1)
+ end;
+ heartbeat_timeout ->
+ rabbit_log:info("Heartbeat timeout, closing connection~n"),
+ C1 = demonitor_all_streams(Connection),
+ close(Transport, C1);
+ {infos, From} ->
+ From ! {self(), ClientProperties},
+ listen_loop_post_auth(Transport, Connection, State, Configuration);
+ {'$gen_call', From, info} ->
+ gen_server:reply(From, infos(?INFO_ITEMS, Connection, State)),
+ listen_loop_post_auth(Transport, Connection, State, Configuration);
+ {'$gen_call', From, {info, Items}} ->
+ gen_server:reply(From, infos(Items, Connection, State)),
+ listen_loop_post_auth(Transport, Connection, State, Configuration);
+ emit_stats ->
+ Connection1 = emit_stats(Connection, State),
+ listen_loop_post_auth(Transport, Connection1, State, Configuration);
+ {'$gen_cast', {force_event_refresh, Ref}} ->
+ Infos = augment_infos_with_user_provided_connection_name(
+ infos(?CREATION_EVENT_KEYS, Connection, State),
+ Connection
+ ),
+ rabbit_event:notify(connection_created, Infos, Ref),
+ Connection1 = rabbit_event:init_stats_timer(Connection, #stream_connection.stats_timer),
+ listen_loop_post_auth(Transport, Connection1, State, Configuration);
+ {'$gen_call', From, {shutdown, Explanation}} ->
+ % likely closing call from the management plugin
+ gen_server:reply(From, ok),
+ rabbit_log:info("Forcing stream connection ~p closing: ~p~n", [self(), Explanation]),
+ demonitor_all_streams(Connection),
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection, State),
+ close(Transport, S),
+ ok;
+ {Closed, S} ->
+ demonitor_all_streams(Connection),
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection, State),
+ rabbit_log:info("Socket ~w closed [~w]~n", [S, self()]),
+ ok;
+ {Error, S, Reason} ->
+ demonitor_all_streams(Connection),
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection, State),
+ rabbit_log:info("Socket error ~p [~w]~n", [Reason, S, self()]);
+ M ->
+ rabbit_log:warning("Unknown message ~p~n", [M]),
+ %% FIXME send close
+ listen_loop_post_auth(Transport, Connection, State, Configuration)
+ end.
+
+listen_loop_post_close(Transport, #stream_connection{socket = S} = Connection, State, Configuration) ->
+ {OK, Closed, Error} = Transport:messages(),
+ %% FIXME demonitor streams
+ %% FIXME introduce timeout to complete the connection closing (after block should be enough)
+ receive
+ {OK, S, Data} ->
+ Transport:setopts(S, [{active, once}]),
+ {Connection1, State1} = handle_inbound_data_post_close(Transport, Connection, State, Data),
+ #stream_connection{connection_step = Step} = Connection1,
+ case Step of
+ closing_done ->
+ rabbit_log:debug("Received close confirmation from client"),
+ close(Transport, S),
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection1, State1);
+ _ ->
+ Transport:setopts(S, [{active, once}]),
+ listen_loop_post_close(Transport, Connection1, State1, Configuration)
+ end;
+ {Closed, S} ->
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection, State),
+ rabbit_log:info("Socket ~w closed [~w]~n", [S, self()]),
+ ok;
+ {Error, S, Reason} ->
+ rabbit_log:info("Socket error ~p [~w]~n", [Reason, S, self()]),
+ close(Transport, S),
+ rabbit_networking:unregister_non_amqp_connection(self()),
+ notify_connection_closed(Connection, State);
+ M ->
+ rabbit_log:warning("Ignored message on closing ~p~n", [M])
+ end.
+
+handle_inbound_data_pre_auth(Transport, Connection, State, Rest) ->
+ handle_inbound_data(Transport, Connection, State, Rest, fun handle_frame_pre_auth/5).
+
+handle_inbound_data_post_auth(Transport, Connection, State, Rest) ->
+ handle_inbound_data(Transport, Connection, State, Rest, fun handle_frame_post_auth/5).
+
+handle_inbound_data_post_close(Transport, Connection, State, Rest) ->
+ handle_inbound_data(Transport, Connection, State, Rest, fun handle_frame_post_close/5).
+
+handle_inbound_data(_Transport, Connection, State, <<>>, _HandleFrameFun) ->
+ {Connection, State};
+handle_inbound_data(Transport, #stream_connection{frame_max = FrameMax} = Connection,
+ #stream_connection_state{data = none} = State, <<Size:32, _Frame:Size/binary, _Rest/bits>>, _HandleFrameFun)
+ when FrameMax /= 0 andalso Size > FrameMax - 4 ->
+ CloseReason = <<"frame too large">>,
+ CloseReasonLength = byte_size(CloseReason),
+ CloseFrame = <<?COMMAND_CLOSE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_FRAME_TOO_LARGE:16,
+ CloseReasonLength:16, CloseReason:CloseReasonLength/binary>>,
+ frame(Transport, Connection, CloseFrame),
+ {Connection#stream_connection{connection_step = close_sent}, State};
+handle_inbound_data(Transport, Connection,
+ #stream_connection_state{data = none} = State, <<Size:32, Frame:Size/binary, Rest/bits>>, HandleFrameFun) ->
+ {Connection1, State1, Rest1} = HandleFrameFun(Transport, Connection, State, Frame, Rest),
+ handle_inbound_data(Transport, Connection1, State1, Rest1, HandleFrameFun);
+handle_inbound_data(_Transport, Connection, #stream_connection_state{data = none} = State, Data, _HandleFrameFun) ->
+ {Connection, State#stream_connection_state{data = Data}};
+handle_inbound_data(Transport, Connection, #stream_connection_state{data = Leftover} = State, Data, HandleFrameFun) ->
+ State1 = State#stream_connection_state{data = none},
+ %% FIXME avoid concatenation to avoid a new binary allocation
+ %% see osiris_replica:parse_chunk/3
+ handle_inbound_data(Transport, Connection, State1, <<Leftover/binary, Data/binary>>, HandleFrameFun).
+
+generate_publishing_error_details(Acc, _Code, <<>>) ->
+ Acc;
+generate_publishing_error_details(Acc, Code, <<PublishingId:64, MessageSize:32, _Message:MessageSize/binary, Rest/binary>>) ->
+ generate_publishing_error_details(
+ <<Acc/binary, PublishingId:64, Code:16>>,
+ Code,
+ Rest).
+
+handle_frame_pre_auth(Transport, #stream_connection{socket = S} = Connection, State,
+ <<?COMMAND_PEER_PROPERTIES:16, ?VERSION_0:16, CorrelationId:32,
+ ClientPropertiesCount:32, ClientPropertiesFrame/binary>>, Rest) ->
+
+ {ClientProperties, _} = rabbit_stream_utils:parse_map(ClientPropertiesFrame, ClientPropertiesCount),
+
+ {ok, Product} = application:get_key(rabbit, description),
+ {ok, Version} = application:get_key(rabbit, vsn),
+
+ %% Get any configuration-specified server properties
+ RawConfigServerProps = application:get_env(rabbit,
+ server_properties, []),
+
+ ConfigServerProperties = lists:foldl(fun({K, V}, Acc) ->
+ maps:put(rabbit_data_coercion:to_binary(K), V, Acc)
+ end, #{}, RawConfigServerProps),
+
+ ServerProperties = maps:merge(ConfigServerProperties, #{
+ <<"product">> => Product,
+ <<"version">> => Version,
+ <<"cluster_name">> => rabbit_nodes:cluster_name(),
+ <<"platform">> => rabbit_misc:platform_and_version(),
+ <<"copyright">> => ?COPYRIGHT_MESSAGE,
+ <<"information">> => ?INFORMATION_MESSAGE
+ }),
+
+ ServerPropertiesCount = map_size(ServerProperties),
+
+ ServerPropertiesFragment = maps:fold(fun(K, V, Acc) ->
+ Key = rabbit_data_coercion:to_binary(K),
+ Value = rabbit_data_coercion:to_binary(V),
+ KeySize = byte_size(Key),
+ ValueSize = byte_size(Value),
+ <<Acc/binary, KeySize:16, Key:KeySize/binary, ValueSize:16, Value:ValueSize/binary>>
+ end, <<>>, ServerProperties),
+
+ Frame = <<?COMMAND_PEER_PROPERTIES:16, ?VERSION_0:16, CorrelationId:32, ?RESPONSE_CODE_OK:16,
+ ServerPropertiesCount:32, ServerPropertiesFragment/binary>>,
+ FrameSize = byte_size(Frame),
+
+ Transport:send(S, [<<FrameSize:32>>, <<Frame/binary>>]),
+ {Connection#stream_connection{client_properties = ClientProperties, authentication_state = peer_properties_exchanged}, State, Rest};
+handle_frame_pre_auth(Transport, #stream_connection{socket = S} = Connection, State,
+ <<?COMMAND_SASL_HANDSHAKE:16, ?VERSION_0:16, CorrelationId:32>>, Rest) ->
+
+ Mechanisms = rabbit_stream_utils:auth_mechanisms(S),
+ MechanismsFragment = lists:foldl(fun(M, Acc) ->
+ Size = byte_size(M),
+ <<Acc/binary, Size:16, M:Size/binary>>
+ end, <<>>, Mechanisms),
+ MechanismsCount = length(Mechanisms),
+ Frame = <<?COMMAND_SASL_HANDSHAKE:16, ?VERSION_0:16, CorrelationId:32, ?RESPONSE_CODE_OK:16,
+ MechanismsCount:32, MechanismsFragment/binary>>,
+ FrameSize = byte_size(Frame),
+
+ Transport:send(S, [<<FrameSize:32>>, <<Frame/binary>>]),
+ {Connection, State, Rest};
+handle_frame_pre_auth(Transport,
+ #stream_connection{socket = S,
+ authentication_state = AuthState0,
+ host = Host} = Connection0, State,
+ <<?COMMAND_SASL_AUTHENTICATE:16, ?VERSION_0:16, CorrelationId:32,
+ MechanismLength:16, Mechanism:MechanismLength/binary,
+ SaslFragment/binary>>, Rest) ->
+
+ SaslBin = case SaslFragment of
+ <<-1:32/signed>> ->
+ <<>>;
+ <<SaslBinaryLength:32, SaslBinary:SaslBinaryLength/binary>> ->
+ SaslBinary
+ end,
+
+ {Connection1, Rest1} = case rabbit_stream_utils:auth_mechanism_to_module(Mechanism, S) of
+ {ok, AuthMechanism} ->
+ AuthState = case AuthState0 of
+ none ->
+ AuthMechanism:init(S);
+ AS ->
+ AS
+ end,
+ RemoteAddress = list_to_binary(inet:ntoa(Host)),
+ C1 = Connection0#stream_connection{auth_mechanism = {Mechanism, AuthMechanism}},
+ {C2, FrameFragment} =
+ case AuthMechanism:handle_response(SaslBin, AuthState) of
+ {refused, Username, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, stream),
+ auth_fail(Username, Msg, Args, C1, State),
+ rabbit_log:warning(Msg, Args),
+ {C1#stream_connection{connection_step = failure}, <<?RESPONSE_AUTHENTICATION_FAILURE:16>>};
+ {protocol_error, Msg, Args} ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, <<>>, stream),
+ notify_auth_result(none, user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}],
+ C1, State),
+ rabbit_log:warning(Msg, Args),
+ {C1#stream_connection{connection_step = failure}, <<?RESPONSE_SASL_ERROR:16>>};
+ {challenge, Challenge, AuthState1} ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, <<>>, stream),
+ ChallengeSize = byte_size(Challenge),
+ {C1#stream_connection{authentication_state = AuthState1, connection_step = authenticating},
+ <<?RESPONSE_SASL_CHALLENGE:16, ChallengeSize:32, Challenge/binary>>
+ };
+ {ok, User = #user{username = Username}} ->
+ case rabbit_access_control:check_user_loopback(Username, S) of
+ ok ->
+ rabbit_core_metrics:auth_attempt_succeeded(RemoteAddress, Username, stream),
+ notify_auth_result(Username, user_authentication_success,
+ [], C1, State),
+ {C1#stream_connection{authentication_state = done, user = User, connection_step = authenticated},
+ <<?RESPONSE_CODE_OK:16>>
+ };
+ not_allowed ->
+ rabbit_core_metrics:auth_attempt_failed(RemoteAddress, Username, stream),
+ rabbit_log:warning("User '~s' can only connect via localhost~n", [Username]),
+ {C1#stream_connection{connection_step = failure}, <<?RESPONSE_SASL_AUTHENTICATION_FAILURE_LOOPBACK:16>>}
+ end
+ end,
+ Frame = <<?COMMAND_SASL_AUTHENTICATE:16, ?VERSION_0:16, CorrelationId:32, FrameFragment/binary>>,
+ frame(Transport, C1, Frame),
+ {C2, Rest};
+ {error, _} ->
+ Frame = <<?COMMAND_SASL_AUTHENTICATE:16, ?VERSION_0:16, CorrelationId:32, ?RESPONSE_SASL_MECHANISM_NOT_SUPPORTED:16>>,
+ frame(Transport, Connection0, Frame),
+ {Connection0#stream_connection{connection_step = failure}, Rest}
+ end,
+
+ {Connection1, State, Rest1};
+handle_frame_pre_auth(_Transport, #stream_connection{helper_sup = SupPid, socket = Sock, name = ConnectionName} = Connection, State,
+ <<?COMMAND_TUNE:16, ?VERSION_0:16, FrameMax:32, Heartbeat:32>>, Rest) ->
+ rabbit_log:info("Tuning response ~p ~p ~n", [FrameMax, Heartbeat]),
+ Parent = self(),
+ %% sending a message to the main process so the heartbeat frame is sent from this main process
+ %% otherwise heartbeat frames can interleave with chunk delivery
+ %% (chunk delivery is made of 2 calls on the socket, one for the header and one send_file for the chunk,
+ %% we don't want a heartbeat frame to sneak in in-between)
+ SendFun =
+ fun() ->
+ Parent ! heartbeat_send,
+ ok
+ end,
+ ReceiveFun = fun() -> Parent ! heartbeat_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ SupPid, Sock, ConnectionName,
+ Heartbeat, SendFun, Heartbeat, ReceiveFun),
+
+ {Connection#stream_connection{connection_step = tuned, frame_max = FrameMax,
+ heartbeat = Heartbeat, heartbeater = Heartbeater}, State, Rest};
+handle_frame_pre_auth(Transport, #stream_connection{user = User, socket = S} = Connection, State,
+ <<?COMMAND_OPEN:16, ?VERSION_0:16, CorrelationId:32,
+ VirtualHostLength:16, VirtualHost:VirtualHostLength/binary>>, Rest) ->
+
+ %% FIXME enforce connection limit (see rabbit_reader:is_over_connection_limit/2)
+
+ {Connection1, Frame} = try
+ rabbit_access_control:check_vhost_access(User, VirtualHost, {socket, S}, #{}),
+ F = <<?COMMAND_OPEN:16, ?VERSION_0:16, CorrelationId:32, ?RESPONSE_CODE_OK:16>>,
+ %% FIXME check if vhost is alive (see rabbit_reader:is_vhost_alive/2)
+ {Connection#stream_connection{connection_step = opened, virtual_host = VirtualHost}, F}
+ catch
+ exit:_ ->
+ Fr = <<?COMMAND_OPEN:16, ?VERSION_0:16, CorrelationId:32, ?RESPONSE_VHOST_ACCESS_FAILURE:16>>,
+ {Connection#stream_connection{connection_step = failure}, Fr}
+ end,
+
+ frame(Transport, Connection1, Frame),
+
+ {Connection1, State, Rest};
+handle_frame_pre_auth(_Transport, Connection, State, <<?COMMAND_HEARTBEAT:16, ?VERSION_0:16>>, Rest) ->
+ rabbit_log:info("Received heartbeat frame pre auth~n"),
+ {Connection, State, Rest};
+handle_frame_pre_auth(_Transport, Connection, State, Frame, Rest) ->
+ rabbit_log:warning("unknown frame ~p ~p, closing connection.~n", [Frame, Rest]),
+ {Connection#stream_connection{connection_step = failure}, State, Rest}.
+
+auth_fail(Username, Msg, Args, Connection, ConnectionState) ->
+ notify_auth_result(Username, user_authentication_failure,
+ [{error, rabbit_misc:format(Msg, Args)}], Connection, ConnectionState).
+
+notify_auth_result(Username, AuthResult, ExtraProps, Connection, ConnectionState) ->
+ EventProps = [{connection_type, network},
+ {name, case Username of none -> ''; _ -> Username end}] ++
+ [case Item of
+ name -> {connection_name, i(name, Connection, ConnectionState)};
+ _ -> {Item, i(Item, Connection, ConnectionState)}
+ end || Item <- ?AUTH_NOTIFICATION_INFO_KEYS] ++
+ ExtraProps,
+ rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+
+handle_frame_post_auth(Transport, #stream_connection{socket = S, credits = Credits,
+ virtual_host = VirtualHost, user = User} = Connection, State,
+ <<?COMMAND_PUBLISH:16, ?VERSION_0:16,
+ StreamSize:16, Stream:StreamSize/binary,
+ PublisherId:8/unsigned,
+ MessageCount:32, Messages/binary>>, Rest) ->
+ case rabbit_stream_utils:check_write_permitted(
+ #resource{name = Stream, kind = queue, virtual_host = VirtualHost},
+ User,
+ #{}) of
+ ok ->
+ case lookup_leader(Stream, Connection) of
+ cluster_not_found ->
+ FrameSize = 2 + 2 + 1 + 4 + (8 + 2) * MessageCount,
+ Details = generate_publishing_error_details(<<>>, ?RESPONSE_CODE_STREAM_DOES_NOT_EXIST, Messages),
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_PUBLISH_ERROR:16, ?VERSION_0:16,
+ PublisherId:8,
+ MessageCount:32, Details/binary>>]),
+ {Connection, State, Rest};
+ {ClusterLeader, Connection1} ->
+ rabbit_stream_utils:write_messages(ClusterLeader, PublisherId, Messages),
+ sub_credits(Credits, MessageCount),
+ {Connection1, State, Rest}
+ end;
+ error ->
+ FrameSize = 2 + 2 + 1 + 4 + (8 + 2) * MessageCount,
+ Details = generate_publishing_error_details(<<>>, ?RESPONSE_CODE_ACCESS_REFUSED, Messages),
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_PUBLISH_ERROR:16, ?VERSION_0:16,
+ PublisherId:8,
+ MessageCount:32, Details/binary>>]),
+ {Connection, State, Rest}
+ end;
+handle_frame_post_auth(Transport, #stream_connection{socket = Socket,
+ stream_subscriptions = StreamSubscriptions, virtual_host = VirtualHost, user = User,
+ send_file_oct = SendFileOct} = Connection,
+ #stream_connection_state{consumers = Consumers} = State,
+ <<?COMMAND_SUBSCRIBE:16, ?VERSION_0:16, CorrelationId:32, SubscriptionId:8/unsigned, StreamSize:16, Stream:StreamSize/binary,
+ OffsetType:16/signed, OffsetAndCredit/binary>>, Rest) ->
+ case rabbit_stream_utils:check_read_permitted(
+ #resource{name = Stream, kind = queue, virtual_host = VirtualHost},
+ User,
+ #{}) of
+ ok ->
+ case rabbit_stream_manager:lookup_local_member(VirtualHost, Stream) of
+ {error, not_available} ->
+ response(Transport, Connection, ?COMMAND_SUBSCRIBE, CorrelationId, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE),
+ {Connection, State, Rest};
+ {error, not_found} ->
+ response(Transport, Connection, ?COMMAND_SUBSCRIBE, CorrelationId, ?RESPONSE_CODE_STREAM_DOES_NOT_EXIST),
+ {Connection, State, Rest};
+ {ok, LocalMemberPid} ->
+ case subscription_exists(StreamSubscriptions, SubscriptionId) of
+ true ->
+ response(Transport, Connection, ?COMMAND_SUBSCRIBE, CorrelationId, ?RESPONSE_CODE_SUBSCRIPTION_ID_ALREADY_EXISTS),
+ {Connection, State, Rest};
+ false ->
+ {OffsetSpec, Credit} = case OffsetType of
+ ?OFFSET_TYPE_FIRST ->
+ <<Crdt:16>> = OffsetAndCredit,
+ {first, Crdt};
+ ?OFFSET_TYPE_LAST ->
+ <<Crdt:16>> = OffsetAndCredit,
+ {last, Crdt};
+ ?OFFSET_TYPE_NEXT ->
+ <<Crdt:16>> = OffsetAndCredit,
+ {next, Crdt};
+ ?OFFSET_TYPE_OFFSET ->
+ <<Offset:64/unsigned, Crdt:16>> = OffsetAndCredit,
+ {Offset, Crdt};
+ ?OFFSET_TYPE_TIMESTAMP ->
+ <<Timestamp:64/signed, Crdt:16>> = OffsetAndCredit,
+ {{timestamp, Timestamp}, Crdt}
+ end,
+ {ok, Segment} = osiris:init_reader(LocalMemberPid, OffsetSpec),
+ ConsumerState = #consumer{
+ member_pid = LocalMemberPid, offset = OffsetSpec, subscription_id = SubscriptionId, socket = Socket,
+ segment = Segment,
+ credit = Credit,
+ stream = Stream
+ },
+
+ Connection1 = maybe_monitor_stream(LocalMemberPid, Stream, Connection),
+
+ response_ok(Transport, Connection, ?COMMAND_SUBSCRIBE, CorrelationId),
+
+ {{segment, Segment1}, {credit, Credit1}} = send_chunks(
+ Transport,
+ ConsumerState,
+ SendFileOct
+ ),
+ Consumers1 = Consumers#{SubscriptionId => ConsumerState#consumer{segment = Segment1, credit = Credit1}},
+
+ StreamSubscriptions1 =
+ case StreamSubscriptions of
+ #{Stream := SubscriptionIds} ->
+ StreamSubscriptions#{Stream => [SubscriptionId] ++ SubscriptionIds};
+ _ ->
+ StreamSubscriptions#{Stream => [SubscriptionId]}
+ end,
+ {Connection1#stream_connection{stream_subscriptions = StreamSubscriptions1}, State#stream_connection_state{consumers = Consumers1}, Rest}
+ end
+ end;
+ error ->
+ response(Transport, Connection, ?COMMAND_SUBSCRIBE, CorrelationId, ?RESPONSE_CODE_ACCESS_REFUSED),
+ {Connection, State, Rest}
+ end;
+handle_frame_post_auth(Transport, #stream_connection{stream_subscriptions = StreamSubscriptions,
+ stream_leaders = StreamLeaders} = Connection,
+ #stream_connection_state{consumers = Consumers} = State,
+ <<?COMMAND_UNSUBSCRIBE:16, ?VERSION_0:16, CorrelationId:32, SubscriptionId:8/unsigned>>, Rest) ->
+ case subscription_exists(StreamSubscriptions, SubscriptionId) of
+ false ->
+ response(Transport, Connection, ?COMMAND_UNSUBSCRIBE, CorrelationId, ?RESPONSE_CODE_SUBSCRIPTION_ID_DOES_NOT_EXIST),
+ {Connection, State, Rest};
+ true ->
+ #{SubscriptionId := Consumer} = Consumers,
+ Stream = Consumer#consumer.stream,
+ #{Stream := SubscriptionsForThisStream} = StreamSubscriptions,
+ SubscriptionsForThisStream1 = lists:delete(SubscriptionId, SubscriptionsForThisStream),
+ {Connection1, StreamSubscriptions1, StreamLeaders1} =
+ case length(SubscriptionsForThisStream1) of
+ 0 ->
+ %% no more subscriptions for this stream
+ %% we unregister even though it could affect publishing if the stream is published to
+ %% from this connection and is deleted.
+ %% to mitigate this, we remove the stream from the leaders cache
+ %% this way the stream leader will be looked up in the next publish command
+ %% and registered to.
+ C = demonitor_stream(Stream, Connection),
+ {C, maps:remove(Stream, StreamSubscriptions),
+ maps:remove(Stream, StreamLeaders)
+ };
+ _ ->
+ {Connection, StreamSubscriptions#{Stream => SubscriptionsForThisStream1}, StreamLeaders}
+ end,
+ Consumers1 = maps:remove(SubscriptionId, Consumers),
+ response_ok(Transport, Connection, ?COMMAND_SUBSCRIBE, CorrelationId),
+ {Connection1#stream_connection{
+ stream_subscriptions = StreamSubscriptions1,
+ stream_leaders = StreamLeaders1
+ }, State#stream_connection_state{consumers = Consumers1}, Rest}
+ end;
+handle_frame_post_auth(Transport, #stream_connection{socket = S, send_file_oct = SendFileOct} = Connection,
+ #stream_connection_state{consumers = Consumers} = State,
+ <<?COMMAND_CREDIT:16, ?VERSION_0:16, SubscriptionId:8/unsigned, Credit:16/signed>>, Rest) ->
+
+ case Consumers of
+ #{SubscriptionId := Consumer} ->
+ #consumer{credit = AvailableCredit} = Consumer,
+
+ {{segment, Segment1}, {credit, Credit1}} = send_chunks(
+ Transport,
+ Consumer,
+ AvailableCredit + Credit,
+ SendFileOct
+ ),
+
+ Consumer1 = Consumer#consumer{segment = Segment1, credit = Credit1},
+ {Connection, State#stream_connection_state{consumers = Consumers#{SubscriptionId => Consumer1}}, Rest};
+ _ ->
+ rabbit_log:warning("Giving credit to unknown subscription: ~p~n", [SubscriptionId]),
+ Frame = <<?COMMAND_CREDIT:16, ?VERSION_0:16, ?RESPONSE_CODE_SUBSCRIPTION_ID_DOES_NOT_EXIST:16, SubscriptionId:8>>,
+ FrameSize = byte_size(Frame),
+ Transport:send(S, [<<FrameSize:32>>, Frame]),
+ {Connection, State, Rest}
+ end;
+handle_frame_post_auth(_Transport, #stream_connection{virtual_host = VirtualHost, user = User} = Connection,
+ State,
+ <<?COMMAND_COMMIT_OFFSET:16, ?VERSION_0:16, _CorrelationId:32,
+ ReferenceSize:16, Reference:ReferenceSize/binary,
+ StreamSize:16, Stream:StreamSize/binary, Offset:64>>, Rest) ->
+
+ case rabbit_stream_utils:check_write_permitted(
+ #resource{name = Stream, kind = queue, virtual_host = VirtualHost},
+ User,
+ #{}) of
+ ok ->
+ case lookup_leader(Stream, Connection) of
+ cluster_not_found ->
+ rabbit_log:info("Could not find leader to commit offset on ~p~n", [Stream]),
+ %% FIXME commit offset is fire-and-forget, so no response even if error, change this?
+ {Connection, State, Rest};
+ {ClusterLeader, Connection1} ->
+ osiris:write_tracking(ClusterLeader, Reference, Offset),
+ {Connection1, State, Rest}
+ end;
+ error ->
+ %% FIXME commit offset is fire-and-forget, so no response even if error, change this?
+ rabbit_log:info("Not authorized to commit offset on ~p~n", [Stream]),
+ {Connection, State, Rest}
+ end;
+handle_frame_post_auth(Transport, #stream_connection{socket = S, virtual_host = VirtualHost, user = User} = Connection,
+ State,
+ <<?COMMAND_QUERY_OFFSET:16, ?VERSION_0:16, CorrelationId:32,
+ ReferenceSize:16, Reference:ReferenceSize/binary,
+ StreamSize:16, Stream:StreamSize/binary>>, Rest) ->
+ FrameSize = ?RESPONSE_FRAME_SIZE + 8,
+ {ResponseCode, Offset} = case rabbit_stream_utils:check_read_permitted(
+ #resource{name = Stream, kind = queue, virtual_host = VirtualHost},
+ User,
+ #{}) of
+ ok ->
+ case rabbit_stream_manager:lookup_local_member(VirtualHost, Stream) of
+ {error, not_found} ->
+ {?RESPONSE_CODE_STREAM_DOES_NOT_EXIST, 0};
+ {ok, LocalMemberPid} ->
+ {?RESPONSE_CODE_OK, case osiris:read_tracking(LocalMemberPid, Reference) of
+ undefined ->
+ 0;
+ Offt ->
+ Offt
+ end}
+ end;
+ error ->
+ {?RESPONSE_CODE_ACCESS_REFUSED, 0}
+ end,
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_QUERY_OFFSET:16, ?VERSION_0:16>>,
+ <<CorrelationId:32>>, <<ResponseCode:16>>, <<Offset:64>>]),
+ {Connection, State, Rest};
+handle_frame_post_auth(Transport, #stream_connection{virtual_host = VirtualHost, user = #user{username = Username} = User} = Connection,
+ State,
+ <<?COMMAND_CREATE_STREAM:16, ?VERSION_0:16, CorrelationId:32, StreamSize:16, Stream:StreamSize/binary,
+ ArgumentsCount:32, ArgumentsBinary/binary>>, Rest) ->
+ case rabbit_stream_utils:enforce_correct_stream_name(Stream) of
+ {ok, StreamName} ->
+ {Arguments, _Rest} = rabbit_stream_utils:parse_map(ArgumentsBinary, ArgumentsCount),
+ case rabbit_stream_utils:check_configure_permitted(
+ #resource{name = StreamName, kind = queue, virtual_host = VirtualHost},
+ User,
+ #{}) of
+ ok ->
+ case rabbit_stream_manager:create(VirtualHost, StreamName, Arguments, Username) of
+ {ok, #{leader_pid := LeaderPid, replica_pids := ReturnedReplicas}} ->
+ rabbit_log:info("Created cluster with leader ~p and replicas ~p~n", [LeaderPid, ReturnedReplicas]),
+ response_ok(Transport, Connection, ?COMMAND_CREATE_STREAM, CorrelationId),
+ {Connection, State, Rest};
+ {error, validation_failed} ->
+ response(Transport, Connection, ?COMMAND_CREATE_STREAM, CorrelationId, ?RESPONSE_CODE_PRECONDITION_FAILED),
+ {Connection, State, Rest};
+ {error, reference_already_exists} ->
+ response(Transport, Connection, ?COMMAND_CREATE_STREAM, CorrelationId, ?RESPONSE_CODE_STREAM_ALREADY_EXISTS),
+ {Connection, State, Rest};
+ {error, _} ->
+ response(Transport, Connection, ?COMMAND_CREATE_STREAM, CorrelationId, ?RESPONSE_CODE_INTERNAL_ERROR),
+ {Connection, State, Rest}
+ end;
+ error ->
+ response(Transport, Connection, ?COMMAND_CREATE_STREAM, CorrelationId, ?RESPONSE_CODE_ACCESS_REFUSED),
+ {Connection, State, Rest}
+ end;
+ _ ->
+ response(Transport, Connection, ?COMMAND_CREATE_STREAM, CorrelationId, ?RESPONSE_CODE_PRECONDITION_FAILED),
+ {Connection, State, Rest}
+ end;
+handle_frame_post_auth(Transport, #stream_connection{socket = S, virtual_host = VirtualHost,
+ user = #user{username = Username} = User} = Connection, State,
+ <<?COMMAND_DELETE_STREAM:16, ?VERSION_0:16, CorrelationId:32, StreamSize:16, Stream:StreamSize/binary>>, Rest) ->
+ case rabbit_stream_utils:check_configure_permitted(
+ #resource{name = Stream, kind = queue, virtual_host = VirtualHost},
+ User,
+ #{}) of
+ ok ->
+ case rabbit_stream_manager:delete(VirtualHost, Stream, Username) of
+ {ok, deleted} ->
+ response_ok(Transport, Connection, ?COMMAND_DELETE_STREAM, CorrelationId),
+ {Connection1, State1} = case clean_state_after_stream_deletion_or_failure(Stream, Connection, State) of
+ {cleaned, NewConnection, NewState} ->
+ StreamSize = byte_size(Stream),
+ FrameSize = 2 + 2 + 2 + 2 + StreamSize,
+ Transport:send(S, [<<FrameSize:32, ?COMMAND_METADATA_UPDATE:16, ?VERSION_0:16,
+ ?RESPONSE_CODE_STREAM_NOT_AVAILABLE:16, StreamSize:16, Stream/binary>>]),
+ {NewConnection, NewState};
+ {not_cleaned, SameConnection, SameState} ->
+ {SameConnection, SameState}
+ end,
+ {Connection1, State1, Rest};
+ {error, reference_not_found} ->
+ response(Transport, Connection, ?COMMAND_DELETE_STREAM, CorrelationId, ?RESPONSE_CODE_STREAM_DOES_NOT_EXIST),
+ {Connection, State, Rest}
+ end;
+ error ->
+ response(Transport, Connection, ?COMMAND_DELETE_STREAM, CorrelationId, ?RESPONSE_CODE_ACCESS_REFUSED),
+ {Connection, State, Rest}
+ end;
+handle_frame_post_auth(Transport, #stream_connection{socket = S, virtual_host = VirtualHost} = Connection, State,
+ <<?COMMAND_METADATA:16, ?VERSION_0:16, CorrelationId:32, StreamCount:32, BinaryStreams/binary>>, Rest) ->
+ Streams = rabbit_stream_utils:extract_stream_list(BinaryStreams, []),
+
+ %% get the nodes involved in the streams
+ NodesMap = lists:foldl(fun(Stream, Acc) ->
+ case rabbit_stream_manager:topology(VirtualHost, Stream) of
+ {ok, #{leader_node := undefined, replica_nodes := ReplicaNodes}} ->
+ lists:foldl(fun(ReplicaNode, NodesAcc) -> maps:put(ReplicaNode, ok, NodesAcc) end, Acc, ReplicaNodes);
+ {ok, #{leader_node := LeaderNode, replica_nodes := ReplicaNodes}} ->
+ Acc1 = maps:put(LeaderNode, ok, Acc),
+ lists:foldl(fun(ReplicaNode, NodesAcc) -> maps:put(ReplicaNode, ok, NodesAcc) end, Acc1, ReplicaNodes);
+ {error, _} ->
+ Acc
+ end
+ end, #{}, Streams),
+
+ Nodes = maps:keys(NodesMap),
+ {NodesInfo, _} = lists:foldl(fun(Node, {Acc, Index}) ->
+ Host = rpc:call(Node, rabbit_stream, host, []),
+ Port = rpc:call(Node, rabbit_stream, port, []),
+ case {is_binary(Host), is_integer(Port)} of
+ {true, true} ->
+ {Acc#{Node => {{index, Index}, {host, Host}, {port, Port}}}, Index + 1};
+ _ ->
+ rabbit_log:warning("Error when retrieving broker metadata: ~p ~p~n", [Host, Port]),
+ {Acc, Index}
+ end
+ end, {#{}, 0}, Nodes),
+
+ BrokersCount = map_size(NodesInfo),
+ BrokersBin = maps:fold(fun(_K, {{index, Index}, {host, Host}, {port, Port}}, Acc) ->
+ HostLength = byte_size(Host),
+ <<Acc/binary, Index:16, HostLength:16, Host:HostLength/binary, Port:32>>
+ end, <<BrokersCount:32>>, NodesInfo),
+
+
+ MetadataBin = lists:foldl(fun(Stream, Acc) ->
+ StreamLength = byte_size(Stream),
+ case rabbit_stream_manager:topology(VirtualHost, Stream) of
+ {error, stream_not_found} ->
+ <<Acc/binary, StreamLength:16, Stream:StreamLength/binary, ?RESPONSE_CODE_STREAM_DOES_NOT_EXIST:16,
+ -1:16, 0:32>>;
+ {error, stream_not_available} ->
+ <<Acc/binary, StreamLength:16, Stream:StreamLength/binary, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE:16,
+ -1:16, 0:32>>;
+ {ok, #{leader_node := LeaderNode, replica_nodes := Replicas}} ->
+ LeaderIndex = case NodesInfo of
+ #{LeaderNode := NodeInfo} ->
+ {{index, LeaderIdx}, {host, _}, {port, _}} = NodeInfo,
+ LeaderIdx;
+ _ ->
+ -1
+ end,
+ {ReplicasBinary, ReplicasCount} = lists:foldl(fun(Replica, {Bin, Count}) ->
+ case NodesInfo of
+ #{Replica := NI} ->
+ {{index, ReplicaIndex}, {host, _}, {port, _}} = NI,
+ {<<Bin/binary, ReplicaIndex:16>>, Count + 1};
+ _ ->
+ {Bin, Count}
+ end
+
+
+ end, {<<>>, 0}, Replicas),
+ <<Acc/binary, StreamLength:16, Stream:StreamLength/binary, ?RESPONSE_CODE_OK:16,
+ LeaderIndex:16, ReplicasCount:32, ReplicasBinary/binary>>
+ end
+
+ end, <<StreamCount:32>>, Streams),
+ Frame = <<?COMMAND_METADATA:16, ?VERSION_0:16, CorrelationId:32, BrokersBin/binary, MetadataBin/binary>>,
+ FrameSize = byte_size(Frame),
+ Transport:send(S, <<FrameSize:32, Frame/binary>>),
+ {Connection, State, Rest};
+handle_frame_post_auth(Transport, Connection, State,
+ <<?COMMAND_CLOSE:16, ?VERSION_0:16, CorrelationId:32,
+ ClosingCode:16, ClosingReasonLength:16, ClosingReason:ClosingReasonLength/binary>>, _Rest) ->
+ rabbit_log:info("Received close command ~p ~p~n", [ClosingCode, ClosingReason]),
+ Frame = <<?COMMAND_CLOSE:16, ?VERSION_0:16, CorrelationId:32, ?RESPONSE_CODE_OK:16>>,
+ frame(Transport, Connection, Frame),
+ {Connection#stream_connection{connection_step = closing}, State, <<>>}; %% we ignore any subsequent frames
+handle_frame_post_auth(_Transport, Connection, State, <<?COMMAND_HEARTBEAT:16, ?VERSION_0:16>>, Rest) ->
+ rabbit_log:info("Received heartbeat frame post auth~n"),
+ {Connection, State, Rest};
+handle_frame_post_auth(Transport, Connection, State, Frame, Rest) ->
+ rabbit_log:warning("unknown frame ~p ~p, sending close command.~n", [Frame, Rest]),
+ CloseReason = <<"unknown frame">>,
+ CloseReasonLength = byte_size(CloseReason),
+ CloseFrame = <<?COMMAND_CLOSE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_UNKNOWN_FRAME:16,
+ CloseReasonLength:16, CloseReason:CloseReasonLength/binary>>,
+ frame(Transport, Connection, CloseFrame),
+ {Connection#stream_connection{connection_step = close_sent}, State, Rest}.
+
+notify_connection_closed(#stream_connection{name = Name} = Connection, ConnectionState) ->
+ rabbit_core_metrics:connection_closed(self()),
+ ClientProperties = i(client_properties, Connection, ConnectionState),
+ EventProperties = [{name, Name},
+ {pid, self()},
+ {node, node()},
+ {client_properties, ClientProperties}],
+ rabbit_event:notify(connection_closed,
+ augment_infos_with_user_provided_connection_name(EventProperties, Connection)).
+
+handle_frame_post_close(_Transport, Connection, State,
+ <<?COMMAND_CLOSE:16, ?VERSION_0:16, _CorrelationId:32, _ResponseCode:16>>, Rest) ->
+ rabbit_log:info("Received close confirmation~n"),
+ {Connection#stream_connection{connection_step = closing_done}, State, Rest};
+handle_frame_post_close(_Transport, Connection, State, <<?COMMAND_HEARTBEAT:16, ?VERSION_0:16>>, Rest) ->
+ rabbit_log:info("Received heartbeat frame post close~n"),
+ {Connection, State, Rest};
+handle_frame_post_close(_Transport, Connection, State, Frame, Rest) ->
+ rabbit_log:warning("ignored frame on close ~p ~p.~n", [Frame, Rest]),
+ {Connection, State, Rest}.
+
+clean_state_after_stream_deletion_or_failure(Stream, #stream_connection{stream_leaders = StreamLeaders, stream_subscriptions = StreamSubscriptions} = Connection,
+ #stream_connection_state{consumers = Consumers} = State) ->
+ case {maps:is_key(Stream, StreamSubscriptions), maps:is_key(Stream, StreamLeaders)} of
+ {true, _} ->
+ #{Stream := SubscriptionIds} = StreamSubscriptions,
+ {cleaned, Connection#stream_connection{
+ stream_leaders = maps:remove(Stream, StreamLeaders),
+ stream_subscriptions = maps:remove(Stream, StreamSubscriptions)
+ }, State#stream_connection_state{consumers = maps:without(SubscriptionIds, Consumers)}};
+ {false, true} ->
+ {cleaned, Connection#stream_connection{
+ stream_leaders = maps:remove(Stream, StreamLeaders)
+ }, State};
+ {false, false} ->
+ {not_cleaned, Connection, State}
+ end.
+
+lookup_leader(Stream, #stream_connection{stream_leaders = StreamLeaders, virtual_host = VirtualHost} = Connection) ->
+ case maps:get(Stream, StreamLeaders, undefined) of
+ undefined ->
+ case lookup_leader_from_manager(VirtualHost, Stream) of
+ cluster_not_found ->
+ cluster_not_found;
+ LeaderPid ->
+ Connection1 = maybe_monitor_stream(LeaderPid, Stream, Connection),
+ {LeaderPid, Connection1#stream_connection{stream_leaders = StreamLeaders#{Stream => LeaderPid}}}
+ end;
+ LeaderPid ->
+ {LeaderPid, Connection}
+ end.
+
+lookup_leader_from_manager(VirtualHost, Stream) ->
+ rabbit_stream_manager:lookup_leader(VirtualHost, Stream).
+
+maybe_monitor_stream(Pid, Stream, #stream_connection{monitors = Monitors} = Connection) ->
+ case lists:member(Stream, maps:values(Monitors)) of
+ true ->
+ Connection;
+ false ->
+ MonitorRef = monitor(process, Pid),
+ Connection#stream_connection{monitors = maps:put(MonitorRef, Stream, Monitors)}
+ end.
+
+demonitor_stream(Stream, #stream_connection{monitors = Monitors0} = Connection) ->
+ Monitors = maps:fold(fun(MonitorRef, Strm, Acc) ->
+ case Strm of
+ Stream ->
+ Acc;
+ _ ->
+ maps:put(MonitorRef, Strm, Acc)
+
+ end
+ end, #{}, Monitors0),
+ Connection#stream_connection{monitors = Monitors}.
+
+demonitor_all_streams(#stream_connection{monitors = Monitors} = Connection) ->
+ lists:foreach(fun(MonitorRef) ->
+ demonitor(MonitorRef, [flush])
+ end, maps:keys(Monitors)),
+ Connection#stream_connection{monitors = #{}}.
+
+frame(Transport, #stream_connection{socket = S}, Frame) ->
+ FrameSize = byte_size(Frame),
+ Transport:send(S, [<<FrameSize:32>>, Frame]).
+
+response_ok(Transport, State, CommandId, CorrelationId) ->
+ response(Transport, State, CommandId, CorrelationId, ?RESPONSE_CODE_OK).
+
+response(Transport, #stream_connection{socket = S}, CommandId, CorrelationId, ResponseCode) ->
+ Transport:send(S, [<<?RESPONSE_FRAME_SIZE:32, CommandId:16, ?VERSION_0:16>>, <<CorrelationId:32>>, <<ResponseCode:16>>]).
+
+subscription_exists(StreamSubscriptions, SubscriptionId) ->
+ SubscriptionIds = lists:flatten(maps:values(StreamSubscriptions)),
+ lists:any(fun(Id) -> Id =:= SubscriptionId end, SubscriptionIds).
+
+send_file_callback(Transport, #consumer{socket = S, subscription_id = SubscriptionId}, Counter) ->
+ fun(Size) ->
+ FrameSize = 2 + 2 + 1 + Size,
+ FrameBeginning = <<FrameSize:32, ?COMMAND_DELIVER:16, ?VERSION_0:16, SubscriptionId:8/unsigned>>,
+ Transport:send(S, FrameBeginning),
+ atomics:add(Counter, 1, Size)
+ end.
+
+send_chunks(Transport, #consumer{credit = Credit} = State, Counter) ->
+ send_chunks(Transport, State, Credit, Counter).
+
+send_chunks(_Transport, #consumer{segment = Segment}, 0, _Counter) ->
+ {{segment, Segment}, {credit, 0}};
+send_chunks(Transport, #consumer{segment = Segment} = State, Credit, Counter) ->
+ send_chunks(Transport, State, Segment, Credit, true, Counter).
+
+send_chunks(_Transport, _State, Segment, 0 = _Credit, _Retry, _Counter) ->
+ {{segment, Segment}, {credit, 0}};
+send_chunks(Transport, #consumer{socket = S} = State, Segment, Credit, Retry, Counter) ->
+ case osiris_log:send_file(S, Segment, send_file_callback(Transport, State, Counter)) of
+ {ok, Segment1} ->
+ send_chunks(
+ Transport,
+ State,
+ Segment1,
+ Credit - 1,
+ true,
+ Counter
+ );
+ {end_of_stream, Segment1} ->
+ case Retry of
+ true ->
+ timer:sleep(1),
+ send_chunks(Transport, State, Segment1, Credit, false, Counter);
+ false ->
+ #consumer{member_pid = LocalMember} = State,
+ osiris:register_offset_listener(LocalMember, osiris_log:next_offset(Segment1)),
+ {{segment, Segment1}, {credit, Credit}}
+ end
+ end.
+
+emit_stats(Connection, ConnectionState) ->
+ [{_, Pid}, {_, Recv_oct}, {_, Send_oct}, {_, Reductions}] = I
+ = infos(?SIMPLE_METRICS, Connection, ConnectionState),
+ Infos = infos(?OTHER_METRICS, Connection, ConnectionState),
+ rabbit_core_metrics:connection_stats(Pid, Infos),
+ rabbit_core_metrics:connection_stats(Pid, Recv_oct, Send_oct, Reductions),
+ rabbit_event:notify(connection_stats, Infos ++ I),
+ Connection1 = rabbit_event:reset_stats_timer(Connection, #stream_connection.stats_timer),
+ ensure_stats_timer(Connection1).
+
+ensure_stats_timer(Connection = #stream_connection{}) ->
+ rabbit_event:ensure_stats_timer(Connection, #stream_connection.stats_timer, emit_stats).
+
+info(Pid, InfoItems) ->
+ case InfoItems -- ?INFO_ITEMS of
+ [] ->
+ gen_server2:call(Pid, {info, InfoItems});
+ UnknownItems -> throw({bad_argument, UnknownItems})
+ end.
+
+infos(Items, Connection, State) -> [{Item, i(Item, Connection, State)} || Item <- Items].
+
+i(pid, _, _) -> self();
+i(node, _, _) -> node();
+i(SockStat, #stream_connection{socket = Sock, send_file_oct = Counter}, _) when
+ SockStat =:= send_oct -> % Number of bytes sent from the socket.
+ case rabbit_net:getstat(Sock, [SockStat]) of
+ {ok, [{_, N}]} when is_number(N) -> N + atomics:get(Counter, 1);
+ _ -> 0 + atomics:get(Counter, 1)
+ end;
+i(SockStat, #stream_connection{socket = Sock}, _) when
+ SockStat =:= recv_oct; % Number of bytes received by the socket.
+ SockStat =:= recv_cnt; % Number of packets received by the socket.
+ SockStat =:= send_cnt; % Number of packets sent from the socket.
+ SockStat =:= send_pend -> % Number of bytes waiting to be sent by the socket.
+ case rabbit_net:getstat(Sock, [SockStat]) of
+ {ok, [{_, N}]} when is_number(N) -> N;
+ _ -> 0
+ end;
+i(reductions, _, _) ->
+ {reductions, Reductions} = erlang:process_info(self(), reductions),
+ Reductions;
+i(garbage_collection, _, _) ->
+ rabbit_misc:get_gc_info(self());
+i(state, Connection, ConnectionState) -> i(connection_state, Connection, ConnectionState);
+i(timeout, Connection, ConnectionState) -> i(heartbeat, Connection, ConnectionState);
+i(name, Connection, ConnectionState) -> i(conn_name, Connection, ConnectionState);
+i(conn_name, #stream_connection{name = Name}, _) -> Name;
+i(port, #stream_connection{port = Port}, _) -> Port;
+i(peer_port, #stream_connection{peer_port = PeerPort}, _) -> PeerPort;
+i(host, #stream_connection{host = Host}, _) -> Host;
+i(peer_host, #stream_connection{peer_host = PeerHost}, _) -> PeerHost;
+i(ssl, _, _) -> false;
+i(peer_cert_subject, _, _) -> '';
+i(peer_cert_issuer, _, _) -> '';
+i(peer_cert_validity, _, _) -> '';
+i(ssl_protocol, _, _) -> '';
+i(ssl_key_exchange, _, _) -> '';
+i(ssl_cipher, _, _) -> '';
+i(ssl_hash, _, _) -> '';
+i(channels, _, _) -> 0;
+i(protocol, _, _) -> {<<"stream">>, ""};
+i(user_who_performed_action, Connection, ConnectionState) -> i(user, Connection, ConnectionState);
+i(user, #stream_connection{user = U}, _) -> U#user.username;
+i(vhost, #stream_connection{virtual_host = VirtualHost}, _) -> VirtualHost;
+i(subscriptions, _, #stream_connection_state{consumers = Consumers}) -> maps:size(Consumers);
+i(connection_state, _Connection, #stream_connection_state{blocked = true}) -> blocked;
+i(connection_state, _Connection, #stream_connection_state{blocked = false}) -> running;
+i(auth_mechanism, #stream_connection{auth_mechanism = none}, _) -> none;
+i(auth_mechanism, #stream_connection{auth_mechanism = {Name, _Mod}}, _) -> Name;
+i(heartbeat, #stream_connection{heartbeat = Heartbeat}, _) -> Heartbeat;
+i(frame_max, #stream_connection{frame_max = FrameMax}, _) -> FrameMax;
+i(channel_max, _, _) -> 0;
+i(client_properties, #stream_connection{client_properties = CP}, _) -> rabbit_misc:to_amqp_table(CP);
+i(connected_at, #stream_connection{connected_at = T}, _) -> T;
+i(Item, #stream_connection{}, _) -> throw({bad_argument, Item}). \ No newline at end of file
diff --git a/deps/rabbitmq_stream/src/rabbit_stream_sup.erl b/deps/rabbitmq_stream/src/rabbit_stream_sup.erl
new file mode 100644
index 0000000000..b331b47356
--- /dev/null
+++ b/deps/rabbitmq_stream/src/rabbit_stream_sup.erl
@@ -0,0 +1,61 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_sup).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+-include("rabbit_stream.hrl").
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ {ok, Listeners} = application:get_env(rabbitmq_stream, tcp_listeners),
+ NumTcpAcceptors = application:get_env(rabbitmq_stream, num_tcp_acceptors, 10),
+ {ok, SocketOpts} = application:get_env(rabbitmq_stream, tcp_listen_options),
+ Nodes = rabbit_mnesia:cluster_nodes(all),
+ OsirisConf = #{nodes => Nodes},
+
+ ServerConfiguration = #{
+ initial_credits => application:get_env(rabbitmq_stream, initial_credits, ?DEFAULT_INITIAL_CREDITS),
+ credits_required_for_unblocking => application:get_env(rabbitmq_stream, credits_required_for_unblocking, ?DEFAULT_CREDITS_REQUIRED_FOR_UNBLOCKING),
+ frame_max => application:get_env(rabbit_stream, frame_max, ?DEFAULT_FRAME_MAX),
+ heartbeat => application:get_env(rabbit_stream, heartbeat, ?DEFAULT_HEARTBEAT)
+ },
+
+ StreamManager = #{id => rabbit_stream_manager,
+ type => worker,
+ start => {rabbit_stream_manager, start_link, [OsirisConf]}},
+
+ {ok, {{one_for_all, 10, 10},
+ [StreamManager] ++
+ listener_specs(fun tcp_listener_spec/1,
+ [SocketOpts, ServerConfiguration, NumTcpAcceptors], Listeners)}}.
+
+listener_specs(Fun, Args, Listeners) ->
+ [Fun([Address | Args]) ||
+ Listener <- Listeners,
+ Address <- rabbit_networking:tcp_listener_addresses(Listener)].
+
+tcp_listener_spec([Address, SocketOpts, Configuration, NumAcceptors]) ->
+ rabbit_networking:tcp_listener_spec(
+ rabbit_stream_listener_sup, Address, SocketOpts,
+ ranch_tcp, rabbit_stream_connection_sup, Configuration,
+ stream, NumAcceptors, "Stream TCP listener").
+
diff --git a/deps/rabbitmq_stream/src/rabbit_stream_utils.erl b/deps/rabbitmq_stream/src/rabbit_stream_utils.erl
new file mode 100644
index 0000000000..c20aacb12c
--- /dev/null
+++ b/deps/rabbitmq_stream/src/rabbit_stream_utils.erl
@@ -0,0 +1,125 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_utils).
+
+%% API
+-export([enforce_correct_stream_name/1, write_messages/3, parse_map/2,
+ auth_mechanisms/1, auth_mechanism_to_module/2,
+ check_configure_permitted/3, check_write_permitted/3, check_read_permitted/3,
+ extract_stream_list/2]).
+
+-define(MAX_PERMISSION_CACHE_SIZE, 12).
+
+enforce_correct_stream_name(Name) ->
+ % from rabbit_channel
+ StrippedName = binary:replace(Name, [<<"\n">>, <<"\r">>], <<"">>, [global]),
+ case check_name(StrippedName) of
+ ok ->
+ {ok, StrippedName};
+ error ->
+ error
+ end.
+
+check_name(<<"amq.", _/binary>>) ->
+ error;
+check_name(<<"">>) ->
+ error;
+check_name(_Name) ->
+ ok.
+
+write_messages(_ClusterLeader, _PublisherId, <<>>) ->
+ ok;
+write_messages(ClusterLeader, PublisherId, <<PublishingId:64, 0:1, MessageSize:31, Message:MessageSize/binary, Rest/binary>>) ->
+ % FIXME handle write error
+ ok = osiris:write(ClusterLeader, {PublisherId, PublishingId}, Message),
+ write_messages(ClusterLeader, PublisherId, Rest);
+write_messages(ClusterLeader, PublisherId, <<PublishingId:64, 1:1, CompressionType:3, _Unused:4, MessageCount:16, BatchSize:32, Batch:BatchSize/binary, Rest/binary>>) ->
+ % FIXME handle write error
+ ok = osiris:write(ClusterLeader, {PublisherId, PublishingId}, {batch, MessageCount, CompressionType, Batch}),
+ write_messages(ClusterLeader, PublisherId, Rest).
+
+
+parse_map(<<>>, _Count) ->
+ {#{}, <<>>};
+parse_map(Content, 0) ->
+ {#{}, Content};
+parse_map(Arguments, Count) ->
+ parse_map(#{}, Arguments, Count).
+
+parse_map(Acc, <<>>, _Count) ->
+ {Acc, <<>>};
+parse_map(Acc, Content, 0) ->
+ {Acc, Content};
+parse_map(Acc, <<KeySize:16, Key:KeySize/binary, ValueSize:16, Value:ValueSize/binary, Rest/binary>>, Count) ->
+ parse_map(maps:put(Key, Value, Acc), Rest, Count - 1).
+
+auth_mechanisms(Sock) ->
+ {ok, Configured} = application:get_env(rabbit, auth_mechanisms),
+ [rabbit_data_coercion:to_binary(Name) || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
+ Module:should_offer(Sock), lists:member(Name, Configured)].
+
+auth_mechanism_to_module(TypeBin, Sock) ->
+ case rabbit_registry:binary_to_type(TypeBin) of
+ {error, not_found} ->
+ rabbit_log:warning("Unknown authentication mechanism '~p'~n", [TypeBin]),
+ {error, not_found};
+ T ->
+ case {lists:member(TypeBin, rabbit_stream_utils:auth_mechanisms(Sock)),
+ rabbit_registry:lookup_module(auth_mechanism, T)} of
+ {true, {ok, Module}} ->
+ {ok, Module};
+ _ ->
+ rabbit_log:warning("Invalid authentication mechanism '~p'~n", [T]),
+ {error, invalid}
+ end
+ end.
+
+check_resource_access(User, Resource, Perm, Context) ->
+ V = {Resource, Context, Perm},
+
+ Cache = case get(permission_cache) of
+ undefined -> [];
+ Other -> Other
+ end,
+ case lists:member(V, Cache) of
+ true -> ok;
+ false ->
+ try
+ rabbit_access_control:check_resource_access(
+ User, Resource, Perm, Context),
+ CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE - 1),
+ put(permission_cache, [V | CacheTail]),
+ ok
+ catch
+ exit:_ ->
+ error
+ end
+ end.
+
+check_configure_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, configure, Context).
+
+check_write_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, write, Context).
+
+check_read_permitted(Resource, User, Context) ->
+ check_resource_access(User, Resource, read, Context).
+
+extract_stream_list(<<>>, Streams) ->
+ Streams;
+extract_stream_list(<<Length:16, Stream:Length/binary, Rest/binary>>, Streams) ->
+ extract_stream_list(Rest, [Stream | Streams]). \ No newline at end of file
diff --git a/deps/rabbitmq_stream/test/command_SUITE.erl b/deps/rabbitmq_stream/test/command_SUITE.erl
new file mode 100644
index 0000000000..41ab5904ff
--- /dev/null
+++ b/deps/rabbitmq_stream/test/command_SUITE.erl
@@ -0,0 +1,136 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(command_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_stream.hrl").
+
+
+-define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListStreamConnectionsCommand').
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ merge_defaults,
+ run
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+merge_defaults(_Config) ->
+ {[<<"conn_name">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([], #{}),
+
+ {[<<"other_key">>], #{verbose := true}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => true}),
+
+ {[<<"other_key">>], #{verbose := false}} =
+ ?COMMAND:merge_defaults([<<"other_key">>], #{verbose => false}).
+
+
+run(Config) ->
+
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Opts = #{node => Node, timeout => 10000, verbose => false},
+
+ %% No connections
+ [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)),
+
+ StreamPort = rabbit_stream_SUITE:get_stream_port(Config),
+
+ S1 = start_stream_connection(StreamPort),
+ ct:sleep(100),
+
+ [[{conn_name, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"conn_name">>], Opts)),
+
+ S2 = start_stream_connection(StreamPort),
+ ct:sleep(100),
+
+ [[{conn_name, _}], [{conn_name, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"conn_name">>], Opts)),
+
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ start_amqp_connection(network, Node, Port),
+
+ %% There are still just two connections
+ [[{conn_name, _}], [{conn_name, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"conn_name">>], Opts)),
+
+ start_amqp_connection(direct, Node, Port),
+
+ %% Still two MQTT connections, one direct AMQP 0-9-1 connection
+ [[{conn_name, _}], [{conn_name, _}]] =
+ 'Elixir.Enum':to_list(?COMMAND:run([<<"conn_name">>], Opts)),
+
+ %% Verbose returns all keys
+ Infos = lists:map(fun(El) -> atom_to_binary(El, utf8) end, ?INFO_ITEMS),
+ AllKeys = 'Elixir.Enum':to_list(?COMMAND:run(Infos, Opts)),
+ AllKeys = 'Elixir.Enum':to_list(?COMMAND:run([], Opts#{verbose => true})),
+
+ %% There are two connections
+ [First, _Second] = AllKeys,
+
+ %% Keys are INFO_ITEMS
+ KeysCount = length(?INFO_ITEMS),
+ KeysCount = length(First),
+
+ {Keys, _} = lists:unzip(First),
+
+ [] = Keys -- ?INFO_ITEMS,
+ [] = ?INFO_ITEMS -- Keys,
+
+ rabbit_stream_SUITE:test_close(S1),
+ rabbit_stream_SUITE:test_close(S2),
+ ok.
+
+start_stream_connection(Port) ->
+ {ok, S} = gen_tcp:connect("localhost", Port, [{active, false},
+ {mode, binary}]),
+ rabbit_stream_SUITE:test_peer_properties(S),
+ rabbit_stream_SUITE:test_authenticate(S),
+ S.
+
+start_amqp_connection(Type, Node, Port) ->
+ Params = amqp_params(Type, Node, Port),
+ {ok, _Connection} = amqp_connection:start(Params).
+
+amqp_params(network, _, Port) ->
+ #amqp_params_network{port = Port};
+amqp_params(direct, Node, _) ->
+ #amqp_params_direct{node = Node}.
diff --git a/deps/rabbitmq_stream/test/config_schema_SUITE.erl b/deps/rabbitmq_stream/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..a298811541
--- /dev/null
+++ b/deps/rabbitmq_stream/test/config_schema_SUITE.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_stream, Config1).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
diff --git a/deps/rabbitmq_stream/test/config_schema_SUITE_data/rabbitmq_stream.snippets b/deps/rabbitmq_stream/test/config_schema_SUITE_data/rabbitmq_stream.snippets
new file mode 100644
index 0000000000..8f60ef9710
--- /dev/null
+++ b/deps/rabbitmq_stream/test/config_schema_SUITE_data/rabbitmq_stream.snippets
@@ -0,0 +1,73 @@
+[{listener_port,
+ "stream.listeners.tcp.1 = 12345",
+ [{rabbitmq_stream,[{tcp_listeners,[12345]}]}],
+ [rabbitmq_stream]},
+ {listeners_ip,
+ "stream.listeners.tcp.1 = 127.0.0.1:5555
+ stream.listeners.tcp.2 = ::1:5555",
+ [{rabbitmq_stream,[{tcp_listeners,[{"127.0.0.1",5555},{"::1",5555}]}]}],
+ [rabbitmq_stream]},
+
+ {listener_tcp_options,
+ "stream.listeners.tcp.1 = 127.0.0.1:5555
+ stream.listeners.tcp.2 = ::1:5555
+
+ stream.tcp_listen_options.backlog = 2048
+ stream.tcp_listen_options.recbuf = 8192
+ stream.tcp_listen_options.sndbuf = 8192
+
+ stream.tcp_listen_options.keepalive = true
+ stream.tcp_listen_options.nodelay = true
+
+ stream.tcp_listen_options.exit_on_close = true
+
+ stream.tcp_listen_options.send_timeout = 120
+",
+ [{rabbitmq_stream,[
+ {tcp_listeners,[
+ {"127.0.0.1",5555},
+ {"::1",5555}
+ ]}
+ , {tcp_listen_options, [
+ {backlog, 2048},
+ {exit_on_close, true},
+
+ {recbuf, 8192},
+ {sndbuf, 8192},
+
+ {send_timeout, 120},
+
+ {keepalive, true},
+ {nodelay, true}
+ ]}
+ ]}],
+ [rabbitmq_stream]},
+ {defaults,
+ "stream.frame_max = 1048576
+ stream.heartbeat = 60
+ stream.initial_credits = 50000
+ stream.credits_required_for_unblocking = 12500",
+ [{rabbitmq_stream,[{initial_credits, 50000},
+ {credits_required_for_unblocking, 12500},
+ {frame_max, 1048576},
+ {heartbeat, 60}]}],
+ [rabbitmq_stream]},
+ {advertised_host_port,
+ "stream.advertised_host = some-host
+ stream.advertised_port = 5556",
+ [{rabbitmq_stream,[{advertised_host, <<"some-host">>},
+ {advertised_port, 5556}]}],
+ [rabbitmq_stream]},
+ {credits,
+ "stream.frame_max = 2097152
+ stream.heartbeat = 120",
+ [{rabbitmq_stream,[{frame_max, 2097152},
+ {heartbeat, 120}]}],
+ [rabbitmq_stream]},
+ {protocol,
+ "stream.initial_credits = 100000
+ stream.credits_required_for_unblocking = 25000",
+ [{rabbitmq_stream,[{initial_credits, 100000},
+ {credits_required_for_unblocking, 25000}]}],
+ [rabbitmq_stream]}
+]. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl b/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl
new file mode 100644
index 0000000000..4197b1de71
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl
@@ -0,0 +1,266 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 2.0 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/en-US/MPL/2.0/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_stream_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include("rabbit_stream.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, single_node},
+ {group, cluster}
+ ].
+
+groups() ->
+ [
+ {single_node, [], [test_stream]},
+ {cluster, [], [test_stream, java]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(single_node, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps());
+init_per_group(cluster = Group, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]),
+ Config2 = rabbit_ct_helpers:set_config(Config1,
+ [{rmq_nodes_count, 3},
+ {rmq_nodename_suffix, Group},
+ {tcp_ports_base}]),
+ rabbit_ct_helpers:run_setup_steps(Config2,
+ [fun(StepConfig) ->
+ rabbit_ct_helpers:merge_app_env(StepConfig,
+ {aten, [{poll_interval, 1000}]})
+ end] ++
+ rabbit_ct_broker_helpers:setup_steps());
+init_per_group(_, Config) ->
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_group(java, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config);
+end_per_group(_, Config) ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_Test, _Config) ->
+ ok.
+
+test_stream(Config) ->
+ Port = get_stream_port(Config),
+ test_server(Port),
+ ok.
+
+java(Config) ->
+ StreamPortNode1 = get_stream_port(Config, 0),
+ StreamPortNode2 = get_stream_port(Config, 1),
+ Node1Name = get_node_name(Config, 0),
+ Node2Name = get_node_name(Config, 1),
+ RabbitMqCtl = get_rabbitmqctl(Config),
+ DataDir = rabbit_ct_helpers:get_config(Config, data_dir),
+ MakeResult = rabbit_ct_helpers:make(Config, DataDir, ["tests",
+ {"NODE1_STREAM_PORT=~b", [StreamPortNode1]},
+ {"NODE1_NAME=~p", [Node1Name]},
+ {"NODE2_NAME=~p", [Node2Name]},
+ {"NODE2_STREAM_PORT=~b", [StreamPortNode2]},
+ {"RABBITMQCTL=~p", [RabbitMqCtl]}
+ ]),
+ {ok, _} = MakeResult.
+
+get_rabbitmqctl(Config) ->
+ rabbit_ct_helpers:get_config(Config, rabbitmqctl_cmd).
+
+get_stream_port(Config) ->
+ get_stream_port(Config, 0).
+
+get_stream_port(Config, Node) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_stream).
+
+get_node_name(Config) ->
+ get_node_name(Config, 0).
+
+get_node_name(Config, Node) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, Node, nodename).
+
+test_server(Port) ->
+ {ok, S} = gen_tcp:connect("localhost", Port, [{active, false},
+ {mode, binary}]),
+ test_peer_properties(S),
+ test_authenticate(S),
+ Stream = <<"stream1">>,
+ test_create_stream(S, Stream),
+ Body = <<"hello">>,
+ test_publish_confirm(S, Stream, Body),
+ SubscriptionId = 42,
+ Rest = test_subscribe(S, SubscriptionId, Stream),
+ test_deliver(S, Rest, SubscriptionId, Body),
+ test_delete_stream(S, Stream),
+ test_metadata_update_stream_deleted(S, Stream),
+ test_close(S),
+ closed = wait_for_socket_close(S, 10),
+ ok.
+
+test_peer_properties(S) ->
+ PeerPropertiesFrame = <<?COMMAND_PEER_PROPERTIES:16, ?VERSION_0:16, 1:32, 0:32>>,
+ PeerPropertiesFrameSize = byte_size(PeerPropertiesFrame),
+ gen_tcp:send(S, <<PeerPropertiesFrameSize:32, PeerPropertiesFrame/binary>>),
+ {ok, <<_Size:32, ?COMMAND_PEER_PROPERTIES:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16, _Rest/binary>>} = gen_tcp:recv(S, 0, 5000).
+
+test_authenticate(S) ->
+ SaslHandshakeFrame = <<?COMMAND_SASL_HANDSHAKE:16, ?VERSION_0:16, 1:32>>,
+ SaslHandshakeFrameSize = byte_size(SaslHandshakeFrame),
+ gen_tcp:send(S, <<SaslHandshakeFrameSize:32, SaslHandshakeFrame/binary>>),
+ Plain = <<"PLAIN">>,
+ AmqPlain = <<"AMQPLAIN">>,
+ {ok, SaslAvailable} = gen_tcp:recv(S, 0, 5000),
+ %% mechanisms order is not deterministic, so checking both orders
+ ok = case SaslAvailable of
+ <<31:32, ?COMMAND_SASL_HANDSHAKE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16, 2:32,
+ 5:16, Plain:5/binary, 8:16, AmqPlain:8/binary>> ->
+ ok;
+ <<31:32, ?COMMAND_SASL_HANDSHAKE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16, 2:32,
+ 8:16, AmqPlain:8/binary, 5:16, Plain:5/binary>> ->
+ ok;
+ _ ->
+ failed
+ end,
+
+ Username = <<"guest">>,
+ Password = <<"guest">>,
+ Null = 0,
+ PlainSasl = <<Null:8, Username/binary, Null:8, Password/binary>>,
+ PlainSaslSize = byte_size(PlainSasl),
+
+ SaslAuthenticateFrame = <<?COMMAND_SASL_AUTHENTICATE:16, ?VERSION_0:16, 2:32,
+ 5:16, Plain/binary, PlainSaslSize:32, PlainSasl/binary>>,
+
+ SaslAuthenticateFrameSize = byte_size(SaslAuthenticateFrame),
+
+ gen_tcp:send(S, <<SaslAuthenticateFrameSize:32, SaslAuthenticateFrame/binary>>),
+
+ {ok, <<10:32, ?COMMAND_SASL_AUTHENTICATE:16, ?VERSION_0:16, 2:32, ?RESPONSE_CODE_OK:16, RestTune/binary>>} = gen_tcp:recv(S, 0, 5000),
+
+ TuneExpected = <<12:32, ?COMMAND_TUNE:16, ?VERSION_0:16, ?DEFAULT_FRAME_MAX:32, ?DEFAULT_HEARTBEAT:32>>,
+ case RestTune of
+ <<>> ->
+ {ok, TuneExpected} = gen_tcp:recv(S, 0, 5000);
+ TuneReceived ->
+ TuneExpected = TuneReceived
+ end,
+
+ TuneFrame = <<?COMMAND_TUNE:16, ?VERSION_0:16, ?DEFAULT_FRAME_MAX:32, 0:32>>,
+ TuneFrameSize = byte_size(TuneFrame),
+ gen_tcp:send(S, <<TuneFrameSize:32, TuneFrame/binary>>),
+
+ VirtualHost = <<"/">>,
+ VirtualHostLength = byte_size(VirtualHost),
+ OpenFrame = <<?COMMAND_OPEN:16, ?VERSION_0:16, 3:32, VirtualHostLength:16, VirtualHost/binary>>,
+ OpenFrameSize = byte_size(OpenFrame),
+ gen_tcp:send(S, <<OpenFrameSize:32, OpenFrame/binary>>),
+ {ok, <<10:32, ?COMMAND_OPEN:16, ?VERSION_0:16, 3:32, ?RESPONSE_CODE_OK:16>>} = gen_tcp:recv(S, 0, 5000).
+
+
+test_create_stream(S, Stream) ->
+ StreamSize = byte_size(Stream),
+ CreateStreamFrame = <<?COMMAND_CREATE_STREAM:16, ?VERSION_0:16, 1:32, StreamSize:16, Stream:StreamSize/binary, 0:32>>,
+ FrameSize = byte_size(CreateStreamFrame),
+ gen_tcp:send(S, <<FrameSize:32, CreateStreamFrame/binary>>),
+ {ok, <<_Size:32, ?COMMAND_CREATE_STREAM:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16>>} = gen_tcp:recv(S, 0, 5000).
+
+test_delete_stream(S, Stream) ->
+ StreamSize = byte_size(Stream),
+ DeleteStreamFrame = <<?COMMAND_DELETE_STREAM:16, ?VERSION_0:16, 1:32, StreamSize:16, Stream:StreamSize/binary>>,
+ FrameSize = byte_size(DeleteStreamFrame),
+ gen_tcp:send(S, <<FrameSize:32, DeleteStreamFrame/binary>>),
+ ResponseFrameSize = 10,
+ {ok, <<ResponseFrameSize:32, ?COMMAND_DELETE_STREAM:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16>>} = gen_tcp:recv(S, 4 + 10, 5000).
+
+test_publish_confirm(S, Stream, Body) ->
+ BodySize = byte_size(Body),
+ StreamSize = byte_size(Stream),
+ PublishFrame = <<?COMMAND_PUBLISH:16, ?VERSION_0:16, StreamSize:16, Stream:StreamSize/binary, 42:8, 1:32, 1:64, BodySize:32, Body:BodySize/binary>>,
+ FrameSize = byte_size(PublishFrame),
+ gen_tcp:send(S, <<FrameSize:32, PublishFrame/binary>>),
+ {ok, <<_Size:32, ?COMMAND_PUBLISH_CONFIRM:16, ?VERSION_0:16, 42:8, 1:32, 1:64>>} = gen_tcp:recv(S, 0, 5000).
+
+test_subscribe(S, SubscriptionId, Stream) ->
+ StreamSize = byte_size(Stream),
+ SubscribeFrame = <<?COMMAND_SUBSCRIBE:16, ?VERSION_0:16, 1:32, SubscriptionId:8, StreamSize:16, Stream:StreamSize/binary,
+ ?OFFSET_TYPE_OFFSET:16, 0:64, 10:16>>,
+ FrameSize = byte_size(SubscribeFrame),
+ gen_tcp:send(S, <<FrameSize:32, SubscribeFrame/binary>>),
+ Res = gen_tcp:recv(S, 0, 5000),
+ {ok, <<_Size:32, ?COMMAND_SUBSCRIBE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16, Rest/binary>>} = Res,
+ Rest.
+
+test_deliver(S, Rest, SubscriptionId, Body) ->
+ BodySize = byte_size(Body),
+ Frame = read_frame(S, Rest),
+ <<54:32, ?COMMAND_DELIVER:16, ?VERSION_0:16, SubscriptionId:8, 5:4/unsigned, 0:4/unsigned, 0:8,
+ 1:16, 1:32,
+ _Timestamp:64, _Epoch:64, 0:64, _Crc:32, _DataLength:32,
+ 0:1, BodySize:31/unsigned, Body/binary>> = Frame.
+
+test_metadata_update_stream_deleted(S, Stream) ->
+ StreamSize = byte_size(Stream),
+ {ok, <<15:32, ?COMMAND_METADATA_UPDATE:16, ?VERSION_0:16, ?RESPONSE_CODE_STREAM_NOT_AVAILABLE:16, StreamSize:16, Stream/binary>>} = gen_tcp:recv(S, 0, 5000).
+
+test_close(S) ->
+ CloseReason = <<"OK">>,
+ CloseReasonSize = byte_size(CloseReason),
+ CloseFrame = <<?COMMAND_CLOSE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16, CloseReasonSize:16, CloseReason/binary>>,
+ CloseFrameSize = byte_size(CloseFrame),
+ gen_tcp:send(S, <<CloseFrameSize:32, CloseFrame/binary>>),
+ {ok, <<10:32, ?COMMAND_CLOSE:16, ?VERSION_0:16, 1:32, ?RESPONSE_CODE_OK:16>>} = gen_tcp:recv(S, 0, 5000).
+
+wait_for_socket_close(_S, 0) ->
+ not_closed;
+wait_for_socket_close(S, Attempt) ->
+ case gen_tcp:recv(S, 0, 1000) of
+ {error, timeout} ->
+ wait_for_socket_close(S, Attempt - 1);
+ {error, closed} ->
+ closed
+ end.
+
+read_frame(S, Buffer) ->
+ inet:setopts(S, [{active, once}]),
+ receive
+ {tcp, S, Received} ->
+ Data = <<Buffer/binary, Received/binary>>,
+ case Data of
+ <<Size:32, _Body:Size/binary>> ->
+ Data;
+ _ ->
+ read_frame(S, Data)
+ end
+ after
+ 1000 ->
+ inet:setopts(S, [{active, false}]),
+ Buffer
+ end. \ No newline at end of file
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.gitignore b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.gitignore
new file mode 100644
index 0000000000..4c70cdb707
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.gitignore
@@ -0,0 +1,3 @@
+/build/
+/lib/
+/target/
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java
new file mode 100644
index 0000000000..b901097f2d
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/MavenWrapperDownloader.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2007-present the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.net.*;
+import java.io.*;
+import java.nio.channels.*;
+import java.util.Properties;
+
+public class MavenWrapperDownloader {
+
+ private static final String WRAPPER_VERSION = "0.5.6";
+ /**
+ * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
+ */
+ private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
+
+ /**
+ * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
+ * use instead of the default one.
+ */
+ private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
+ ".mvn/wrapper/maven-wrapper.properties";
+
+ /**
+ * Path where the maven-wrapper.jar will be saved to.
+ */
+ private static final String MAVEN_WRAPPER_JAR_PATH =
+ ".mvn/wrapper/maven-wrapper.jar";
+
+ /**
+ * Name of the property which should be used to override the default download url for the wrapper.
+ */
+ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
+
+ public static void main(String args[]) {
+ System.out.println("- Downloader started");
+ File baseDirectory = new File(args[0]);
+ System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
+
+ // If the maven-wrapper.properties exists, read it and check if it contains a custom
+ // wrapperUrl parameter.
+ File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
+ String url = DEFAULT_DOWNLOAD_URL;
+ if(mavenWrapperPropertyFile.exists()) {
+ FileInputStream mavenWrapperPropertyFileInputStream = null;
+ try {
+ mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
+ Properties mavenWrapperProperties = new Properties();
+ mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
+ url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
+ } catch (IOException e) {
+ System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
+ } finally {
+ try {
+ if(mavenWrapperPropertyFileInputStream != null) {
+ mavenWrapperPropertyFileInputStream.close();
+ }
+ } catch (IOException e) {
+ // Ignore ...
+ }
+ }
+ }
+ System.out.println("- Downloading from: " + url);
+
+ File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
+ if(!outputFile.getParentFile().exists()) {
+ if(!outputFile.getParentFile().mkdirs()) {
+ System.out.println(
+ "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
+ }
+ }
+ System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
+ try {
+ downloadFileFromURL(url, outputFile);
+ System.out.println("Done");
+ System.exit(0);
+ } catch (Throwable e) {
+ System.out.println("- Error downloading");
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+
+ private static void downloadFileFromURL(String urlString, File destination) throws Exception {
+ if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
+ String username = System.getenv("MVNW_USERNAME");
+ char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
+ Authenticator.setDefault(new Authenticator() {
+ @Override
+ protected PasswordAuthentication getPasswordAuthentication() {
+ return new PasswordAuthentication(username, password);
+ }
+ });
+ }
+ URL website = new URL(urlString);
+ ReadableByteChannel rbc;
+ rbc = Channels.newChannel(website.openStream());
+ FileOutputStream fos = new FileOutputStream(destination);
+ fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ fos.close();
+ rbc.close();
+ }
+
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000000..642d572ce9
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1,2 @@
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
+wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/Makefile b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/Makefile
new file mode 100644
index 0000000000..89be00931c
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/Makefile
@@ -0,0 +1,18 @@
+export PATH :=$(CURDIR):$(PATH)
+HOSTNAME := $(shell hostname)
+MVN_FLAGS += -Dhostname=$(HOSTNAME) \
+ -Dnode1.stream.port=$(NODE1_STREAM_PORT) \
+ -Dnode1.name=$(NODE1_NAME) \
+ -Dnode2.name=$(NODE2_NAME) \
+ -Dnode2.stream.port=$(NODE2_STREAM_PORT) \
+ -Drabbitmqctl.bin=$(RABBITMQCTL)
+
+.PHONY: tests clean
+
+tests:
+ # Note: to run a single test
+ # @mvnw -q $(MVN_FLAGS) -Dtest=StreamTest#metadataOnClusterShouldReturnLeaderAndReplicas test
+ @mvnw $(MVN_FLAGS) test
+
+clean:
+ @mvnw clean
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw
new file mode 100755
index 0000000000..41c0f0c23d
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw
@@ -0,0 +1,310 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Maven Start Up Batch script
+#
+# Required ENV vars:
+# ------------------
+# JAVA_HOME - location of a JDK home dir
+#
+# Optional ENV vars
+# -----------------
+# M2_HOME - location of maven2's installed home dir
+# MAVEN_OPTS - parameters passed to the Java VM when running Maven
+# e.g. to debug Maven itself, use
+# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+# ----------------------------------------------------------------------------
+
+if [ -z "$MAVEN_SKIP_RC" ] ; then
+
+ if [ -f /etc/mavenrc ] ; then
+ . /etc/mavenrc
+ fi
+
+ if [ -f "$HOME/.mavenrc" ] ; then
+ . "$HOME/.mavenrc"
+ fi
+
+fi
+
+# OS specific support. $var _must_ be set to either true or false.
+cygwin=false;
+darwin=false;
+mingw=false
+case "`uname`" in
+ CYGWIN*) cygwin=true ;;
+ MINGW*) mingw=true;;
+ Darwin*) darwin=true
+ # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
+ # See https://developer.apple.com/library/mac/qa/qa1170/_index.html
+ if [ -z "$JAVA_HOME" ]; then
+ if [ -x "/usr/libexec/java_home" ]; then
+ export JAVA_HOME="`/usr/libexec/java_home`"
+ else
+ export JAVA_HOME="/Library/Java/Home"
+ fi
+ fi
+ ;;
+esac
+
+if [ -z "$JAVA_HOME" ] ; then
+ if [ -r /etc/gentoo-release ] ; then
+ JAVA_HOME=`java-config --jre-home`
+ fi
+fi
+
+if [ -z "$M2_HOME" ] ; then
+ ## resolve links - $0 may be a link to maven's home
+ PRG="$0"
+
+ # need this for relative symlinks
+ while [ -h "$PRG" ] ; do
+ ls=`ls -ld "$PRG"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '/.*' > /dev/null; then
+ PRG="$link"
+ else
+ PRG="`dirname "$PRG"`/$link"
+ fi
+ done
+
+ saveddir=`pwd`
+
+ M2_HOME=`dirname "$PRG"`/..
+
+ # make it fully qualified
+ M2_HOME=`cd "$M2_HOME" && pwd`
+
+ cd "$saveddir"
+ # echo Using m2 at $M2_HOME
+fi
+
+# For Cygwin, ensure paths are in UNIX format before anything is touched
+if $cygwin ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --unix "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
+fi
+
+# For Mingw, ensure paths are in UNIX format before anything is touched
+if $mingw ; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME="`(cd "$M2_HOME"; pwd)`"
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
+fi
+
+if [ -z "$JAVA_HOME" ]; then
+ javaExecutable="`which javac`"
+ if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
+ # readlink(1) is not available as standard on Solaris 10.
+ readLink=`which readlink`
+ if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
+ if $darwin ; then
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
+ else
+ javaExecutable="`readlink -f \"$javaExecutable\"`"
+ fi
+ javaHome="`dirname \"$javaExecutable\"`"
+ javaHome=`expr "$javaHome" : '\(.*\)/bin'`
+ JAVA_HOME="$javaHome"
+ export JAVA_HOME
+ fi
+ fi
+fi
+
+if [ -z "$JAVACMD" ] ; then
+ if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD="$JAVA_HOME/jre/sh/java"
+ else
+ JAVACMD="$JAVA_HOME/bin/java"
+ fi
+ else
+ JAVACMD="`which java`"
+ fi
+fi
+
+if [ ! -x "$JAVACMD" ] ; then
+ echo "Error: JAVA_HOME is not defined correctly." >&2
+ echo " We cannot execute $JAVACMD" >&2
+ exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+ echo "Warning: JAVA_HOME environment variable is not set."
+fi
+
+CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
+
+# traverses directory structure from process work directory to filesystem root
+# first directory with .mvn subdirectory is considered project base directory
+find_maven_basedir() {
+
+ if [ -z "$1" ]
+ then
+ echo "Path not specified to find_maven_basedir"
+ return 1
+ fi
+
+ basedir="$1"
+ wdir="$1"
+ while [ "$wdir" != '/' ] ; do
+ if [ -d "$wdir"/.mvn ] ; then
+ basedir=$wdir
+ break
+ fi
+ # workaround for JBEAP-8937 (on Solaris 10/Sparc)
+ if [ -d "${wdir}" ]; then
+ wdir=`cd "$wdir/.."; pwd`
+ fi
+ # end of workaround
+ done
+ echo "${basedir}"
+}
+
+# concatenates all lines of a file
+concat_lines() {
+ if [ -f "$1" ]; then
+ echo "$(tr -s '\n' ' ' < "$1")"
+ fi
+}
+
+BASE_DIR=`find_maven_basedir "$(pwd)"`
+if [ -z "$BASE_DIR" ]; then
+ exit 1;
+fi
+
+##########################################################################################
+# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+# This allows using the maven wrapper in projects that prohibit checking in binary data.
+##########################################################################################
+if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found .mvn/wrapper/maven-wrapper.jar"
+ fi
+else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
+ fi
+ if [ -n "$MVNW_REPOURL" ]; then
+ jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+ else
+ jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+ fi
+ while IFS="=" read key value; do
+ case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
+ esac
+ done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Downloading from: $jarUrl"
+ fi
+ wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
+ if $cygwin; then
+ wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
+ fi
+
+ if command -v wget > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found wget ... using wget"
+ fi
+ if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
+ wget "$jarUrl" -O "$wrapperJarPath"
+ else
+ wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
+ fi
+ elif command -v curl > /dev/null; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Found curl ... using curl"
+ fi
+ if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
+ curl -o "$wrapperJarPath" "$jarUrl" -f
+ else
+ curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
+ fi
+
+ else
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo "Falling back to using Java to download"
+ fi
+ javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
+ # For Cygwin, switch paths to Windows format before running javac
+ if $cygwin; then
+ javaClass=`cygpath --path --windows "$javaClass"`
+ fi
+ if [ -e "$javaClass" ]; then
+ if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Compiling MavenWrapperDownloader.java ..."
+ fi
+ # Compiling the Java class
+ ("$JAVA_HOME/bin/javac" "$javaClass")
+ fi
+ if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
+ # Running the downloader
+ if [ "$MVNW_VERBOSE" = true ]; then
+ echo " - Running MavenWrapperDownloader.java ..."
+ fi
+ ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
+ fi
+ fi
+ fi
+fi
+##########################################################################################
+# End of extension
+##########################################################################################
+
+export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
+if [ "$MVNW_VERBOSE" = true ]; then
+ echo $MAVEN_PROJECTBASEDIR
+fi
+MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin; then
+ [ -n "$M2_HOME" ] &&
+ M2_HOME=`cygpath --path --windows "$M2_HOME"`
+ [ -n "$JAVA_HOME" ] &&
+ JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+ [ -n "$CLASSPATH" ] &&
+ CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+ [ -n "$MAVEN_PROJECTBASEDIR" ] &&
+ MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
+fi
+
+# Provide a "standardized" way to retrieve the CLI args that will
+# work with both Windows and non-Windows executions.
+MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
+export MAVEN_CMD_LINE_ARGS
+
+WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+exec "$JAVACMD" \
+ $MAVEN_OPTS \
+ -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
+ "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
+ ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd
new file mode 100644
index 0000000000..86115719e5
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/mvnw.cmd
@@ -0,0 +1,182 @@
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements. See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership. The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License. You may obtain a copy of the License at
+@REM
+@REM http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied. See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Maven Start Up Batch script
+@REM
+@REM Required ENV vars:
+@REM JAVA_HOME - location of a JDK home dir
+@REM
+@REM Optional ENV vars
+@REM M2_HOME - location of maven2's installed home dir
+@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
+@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
+@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
+@REM e.g. to debug Maven itself, use
+@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
+@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
+@REM ----------------------------------------------------------------------------
+
+@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
+@echo off
+@REM set title of command window
+title %0
+@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
+@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
+
+@REM set %HOME% to equivalent of $HOME
+if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
+
+@REM Execute a user defined script before this one
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
+@REM check for pre script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
+if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
+:skipRcPre
+
+@setlocal
+
+set ERROR_CODE=0
+
+@REM To isolate internal variables from possible post scripts, we use another setlocal
+@setlocal
+
+@REM ==== START VALIDATION ====
+if not "%JAVA_HOME%" == "" goto OkJHome
+
+echo.
+echo Error: JAVA_HOME not found in your environment. >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+:OkJHome
+if exist "%JAVA_HOME%\bin\java.exe" goto init
+
+echo.
+echo Error: JAVA_HOME is set to an invalid directory. >&2
+echo JAVA_HOME = "%JAVA_HOME%" >&2
+echo Please set the JAVA_HOME variable in your environment to match the >&2
+echo location of your Java installation. >&2
+echo.
+goto error
+
+@REM ==== END VALIDATION ====
+
+:init
+
+@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
+@REM Fallback to current working directory if not found.
+
+set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
+IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
+
+set EXEC_DIR=%CD%
+set WDIR=%EXEC_DIR%
+:findBaseDir
+IF EXIST "%WDIR%"\.mvn goto baseDirFound
+cd ..
+IF "%WDIR%"=="%CD%" goto baseDirNotFound
+set WDIR=%CD%
+goto findBaseDir
+
+:baseDirFound
+set MAVEN_PROJECTBASEDIR=%WDIR%
+cd "%EXEC_DIR%"
+goto endDetectBaseDir
+
+:baseDirNotFound
+set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
+cd "%EXEC_DIR%"
+
+:endDetectBaseDir
+
+IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
+
+@setlocal EnableExtensions EnableDelayedExpansion
+for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
+@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
+
+:endReadAdditionalConfig
+
+SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
+set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
+set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
+
+set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+
+FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
+ IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
+)
+
+@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
+@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
+if exist %WRAPPER_JAR% (
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Found %WRAPPER_JAR%
+ )
+) else (
+ if not "%MVNW_REPOURL%" == "" (
+ SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
+ )
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Couldn't find %WRAPPER_JAR%, downloading it ...
+ echo Downloading from: %DOWNLOAD_URL%
+ )
+
+ powershell -Command "&{"^
+ "$webclient = new-object System.Net.WebClient;"^
+ "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
+ "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
+ "}"^
+ "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
+ "}"
+ if "%MVNW_VERBOSE%" == "true" (
+ echo Finished downloading %WRAPPER_JAR%
+ )
+)
+@REM End of extension
+
+@REM Provide a "standardized" way to retrieve the CLI args that will
+@REM work with both Windows and non-Windows executions.
+set MAVEN_CMD_LINE_ARGS=%*
+
+%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
+if ERRORLEVEL 1 goto error
+goto end
+
+:error
+set ERROR_CODE=1
+
+:end
+@endlocal & set ERROR_CODE=%ERROR_CODE%
+
+if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
+@REM check for post script, once with legacy .bat ending and once with .cmd ending
+if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
+if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
+:skipRcPost
+
+@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
+if "%MAVEN_BATCH_PAUSE%" == "on" pause
+
+if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
+
+exit /B %ERROR_CODE%
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml
new file mode 100644
index 0000000000..aa27c29baf
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml
@@ -0,0 +1,143 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>com.rabbitmq.stream</groupId>
+ <artifactId>rabbitmq-stream-tests</artifactId>
+ <version>1.0-SNAPSHOT</version>
+
+ <licenses>
+ <license>
+ <name>MPL 2.0</name>
+ <url>https://www.mozilla.org/en-US/MPL/2.0/</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
+
+ <developers>
+ <developer>
+ <email>info@rabbitmq.com</email>
+ <name>Team RabbitMQ</name>
+ <organization>VMware, Inc. or its affiliates.</organization>
+ <organizationUrl>https://rabbitmq.com</organizationUrl>
+ </developer>
+ </developers>
+
+ <properties>
+ <stream-client.version>0.1.0-SNAPSHOT</stream-client.version>
+ <proton-j.version>0.33.6</proton-j.version>
+ <junit.jupiter.version>5.7.0</junit.jupiter.version>
+ <assertj.version>3.17.2</assertj.version>
+ <mockito.version>3.5.11</mockito.version>
+ <logback.version>1.2.3</logback.version>
+ <maven.compiler.plugin.version>3.8.1</maven.compiler.plugin.version>
+ <maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version>
+ <spotless.version>2.2.0</spotless.version>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ </properties>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>com.rabbitmq</groupId>
+ <artifactId>stream-client</artifactId>
+ <version>${stream-client.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.qpid</groupId>
+ <artifactId>proton-j</artifactId>
+ <version>${proton-j.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <version>${junit.jupiter.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-params</artifactId>
+ <version>${junit.jupiter.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.assertj</groupId>
+ <artifactId>assertj-core</artifactId>
+ <version>${assertj.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <version>${mockito.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-classic</artifactId>
+ <version>${logback.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+
+ <plugins>
+
+ <plugin>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>${maven.compiler.plugin.version}</version>
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ <compilerArgs>
+ <arg>-Xlint:deprecation</arg>
+ <arg>-Xlint:unchecked</arg>
+ </compilerArgs>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${maven-surefire-plugin.version}</version>
+ </plugin>
+
+ <plugin>
+ <groupId>com.diffplug.spotless</groupId>
+ <artifactId>spotless-maven-plugin</artifactId>
+ <version>${spotless.version}</version>
+ <configuration>
+ <java>
+ <googleJavaFormat>
+ <version>1.9</version>
+ <style>GOOGLE</style>
+ </googleJavaFormat>
+ </java>
+ </configuration>
+ </plugin>
+
+ </plugins>
+
+ </build>
+
+ <repositories>
+
+ <repository>
+ <id>ossrh</id>
+ <url>https://oss.sonatype.org/content/repositories/snapshots</url>
+ <snapshots><enabled>true</enabled></snapshots>
+ <releases><enabled>false</enabled></releases>
+ </repository>
+
+ </repositories>
+
+</project> \ No newline at end of file
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java
new file mode 100644
index 0000000000..993c19b852
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/ClusterSizeTest.java
@@ -0,0 +1,65 @@
+// The contents of this file are subject to the Mozilla Public License
+// Version 2.0 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/en-US/MPL/2.0/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is Pivotal Software, Inc.
+// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.stream;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.rabbitmq.stream.impl.Client;
+import com.rabbitmq.stream.impl.Client.Response;
+import com.rabbitmq.stream.impl.Client.StreamMetadata;
+import java.util.Collections;
+import java.util.UUID;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.junit.jupiter.params.provider.ValueSource;
+
+@ExtendWith(TestUtils.StreamTestInfrastructureExtension.class)
+public class ClusterSizeTest {
+
+ TestUtils.ClientFactory cf;
+
+ @ParameterizedTest
+ @ValueSource(strings = {"-1", "0"})
+ void clusterSizeZeroShouldReturnError(String clusterSize) {
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ String s = UUID.randomUUID().toString();
+ Response response =
+ client.create(s, Collections.singletonMap("initial-cluster-size", clusterSize));
+ assertThat(response.isOk()).isFalse();
+ assertThat(response.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_PRECONDITION_FAILED);
+ }
+
+ @ParameterizedTest
+ @CsvSource({"1,1", "2,2", "3,3", "5,3"})
+ void clusterSizeShouldReflectOnMetadata(String requestedClusterSize, int expectedClusterSize) {
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ String s = UUID.randomUUID().toString();
+ try {
+ Response response =
+ client.create(s, Collections.singletonMap("initial-cluster-size", requestedClusterSize));
+ assertThat(response.isOk()).isTrue();
+ StreamMetadata metadata = client.metadata(s).get(s);
+ assertThat(metadata).isNotNull();
+ assertThat(metadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK);
+ int actualClusterSize = metadata.getLeader() == null ? 0 : 1 + metadata.getReplicas().size();
+ assertThat(actualClusterSize).isEqualTo(expectedClusterSize);
+ } finally {
+ client.delete(s);
+ }
+ }
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java
new file mode 100644
index 0000000000..c7a390f00d
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/FailureTest.java
@@ -0,0 +1,541 @@
+// The contents of this file are subject to the Mozilla Public License
+// Version 2.0 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/en-US/MPL/2.0/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is Pivotal Software, Inc.
+// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.stream;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.fail;
+
+import com.rabbitmq.stream.codec.WrapperMessageBuilder;
+import com.rabbitmq.stream.impl.Client;
+import java.nio.charset.StandardCharsets;
+import java.time.Duration;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+
+@ExtendWith(TestUtils.StreamTestInfrastructureExtension.class)
+public class FailureTest {
+
+ TestUtils.ClientFactory cf;
+ String stream;
+ ExecutorService executorService;
+
+ static void wait(Duration duration) {
+ try {
+ Thread.sleep(duration.toMillis());
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @AfterEach
+ void tearDown() {
+ if (executorService != null) {
+ executorService.shutdownNow();
+ }
+ }
+
+ @Test
+ void leaderFailureWhenPublisherConnectedToReplica() throws Exception {
+ Set<String> messages = new HashSet<>();
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ Map<String, Client.StreamMetadata> metadata = client.metadata(stream);
+ Client.StreamMetadata streamMetadata = metadata.get(stream);
+ assertThat(streamMetadata).isNotNull();
+
+ assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1());
+ assertThat(streamMetadata.getReplicas()).isNotEmpty();
+ Client.Broker replica = streamMetadata.getReplicas().get(0);
+ assertThat(replica.getPort()).isNotEqualTo(TestUtils.streamPortNode1());
+
+ AtomicReference<CountDownLatch> confirmLatch = new AtomicReference<>(new CountDownLatch(1));
+
+ CountDownLatch metadataLatch = new CountDownLatch(1);
+ Client publisher =
+ cf.get(
+ new Client.ClientParameters()
+ .port(replica.getPort())
+ .metadataListener((stream, code) -> metadataLatch.countDown())
+ .publishConfirmListener(
+ (publisherId, publishingId) -> confirmLatch.get().countDown()));
+ String message = "all nodes available";
+ messages.add(message);
+ publisher.publish(
+ stream,
+ (byte) 1,
+ Collections.singletonList(
+ publisher.messageBuilder().addData(message.getBytes(StandardCharsets.UTF_8)).build()));
+ assertThat(confirmLatch.get().await(10, TimeUnit.SECONDS)).isTrue();
+ confirmLatch.set(null);
+
+ try {
+ Host.rabbitmqctl("stop_app");
+ try {
+ cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ fail("Node app stopped, connecting should not be possible");
+ } catch (Exception e) {
+ // OK
+ }
+
+ assertThat(metadataLatch.await(10, TimeUnit.SECONDS)).isTrue();
+
+ // wait until there's a new leader
+ TestUtils.waitAtMost(
+ Duration.ofSeconds(10),
+ () -> {
+ Client.StreamMetadata m = publisher.metadata(stream).get(stream);
+ return m.getLeader() != null && m.getLeader().getPort() != TestUtils.streamPortNode1();
+ });
+
+ confirmLatch.set(new CountDownLatch(1));
+ message = "2 nodes available";
+ messages.add(message);
+ publisher.publish(
+ stream,
+ (byte) 1,
+ Collections.singletonList(
+ publisher
+ .messageBuilder()
+ .addData(message.getBytes(StandardCharsets.UTF_8))
+ .build()));
+ assertThat(confirmLatch.get().await(10, TimeUnit.SECONDS)).isTrue();
+ confirmLatch.set(null);
+ } finally {
+ Host.rabbitmqctl("start_app");
+ }
+
+ // wait until all the replicas are there
+ TestUtils.waitAtMost(
+ Duration.ofSeconds(5),
+ () -> {
+ Client.StreamMetadata m = publisher.metadata(stream).get(stream);
+ return m.getReplicas().size() == 2;
+ });
+
+ confirmLatch.set(new CountDownLatch(1));
+ message = "all nodes are back";
+ messages.add(message);
+ publisher.publish(
+ stream,
+ (byte) 1,
+ Collections.singletonList(
+ publisher.messageBuilder().addData(message.getBytes(StandardCharsets.UTF_8)).build()));
+ assertThat(confirmLatch.get().await(10, TimeUnit.SECONDS)).isTrue();
+ confirmLatch.set(null);
+
+ CountDownLatch consumeLatch = new CountDownLatch(2);
+ Set<String> bodies = ConcurrentHashMap.newKeySet();
+ Client consumer =
+ cf.get(
+ new Client.ClientParameters()
+ .port(TestUtils.streamPortNode1())
+ .messageListener(
+ (subscriptionId, offset, msg) -> {
+ bodies.add(new String(msg.getBodyAsBinary(), StandardCharsets.UTF_8));
+ consumeLatch.countDown();
+ }));
+
+ TestUtils.waitAtMost(
+ Duration.ofSeconds(5),
+ () -> {
+ Client.Response response =
+ consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10);
+ return response.isOk();
+ });
+ assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
+ assertThat(bodies)
+ .hasSize(3)
+ .contains("all nodes available", "2 nodes available", "all nodes are back");
+ }
+
+ @Test
+ void noLostConfirmedMessagesWhenLeaderGoesAway() throws Exception {
+ executorService = Executors.newCachedThreadPool();
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ Map<String, Client.StreamMetadata> metadata = client.metadata(stream);
+ Client.StreamMetadata streamMetadata = metadata.get(stream);
+ assertThat(streamMetadata).isNotNull();
+
+ assertThat(streamMetadata.getLeader()).isNotNull();
+ assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1());
+
+ Map<Long, Message> published = new ConcurrentHashMap<>();
+ Set<Message> confirmed = ConcurrentHashMap.newKeySet();
+
+ Client.PublishConfirmListener publishConfirmListener =
+ (publisherId, publishingId) -> {
+ Message confirmedMessage;
+ int attempts = 0;
+ while ((confirmedMessage = published.remove(publishingId)) == null && attempts < 10) {
+ wait(Duration.ofMillis(5));
+ attempts++;
+ }
+ confirmed.add(confirmedMessage);
+ };
+
+ AtomicLong generation = new AtomicLong(0);
+ AtomicLong sequence = new AtomicLong(0);
+ AtomicBoolean connected = new AtomicBoolean(true);
+ AtomicReference<Client> publisher = new AtomicReference<>();
+ CountDownLatch reconnectionLatch = new CountDownLatch(1);
+ AtomicReference<Client.ShutdownListener> shutdownListenerReference = new AtomicReference<>();
+ Client.ShutdownListener shutdownListener =
+ shutdownContext -> {
+ if (shutdownContext.getShutdownReason()
+ == Client.ShutdownContext.ShutdownReason.UNKNOWN) {
+ // avoid long-running task in the IO thread
+ executorService.submit(
+ () -> {
+ connected.set(false);
+
+ Client locator =
+ cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2()));
+ // wait until there's a new leader
+ try {
+ TestUtils.waitAtMost(
+ Duration.ofSeconds(5),
+ () -> {
+ Client.StreamMetadata m = locator.metadata(stream).get(stream);
+ return m.getLeader() != null
+ && m.getLeader().getPort() != TestUtils.streamPortNode1();
+ });
+ } catch (Throwable e) {
+ reconnectionLatch.countDown();
+ return;
+ }
+
+ int newLeaderPort = locator.metadata(stream).get(stream).getLeader().getPort();
+ Client newPublisher =
+ cf.get(
+ new Client.ClientParameters()
+ .port(newLeaderPort)
+ .shutdownListener(shutdownListenerReference.get())
+ .publishConfirmListener(publishConfirmListener));
+
+ generation.incrementAndGet();
+ published.clear();
+ publisher.set(newPublisher);
+ connected.set(true);
+
+ reconnectionLatch.countDown();
+ });
+ }
+ };
+ shutdownListenerReference.set(shutdownListener);
+
+ client =
+ cf.get(
+ new Client.ClientParameters()
+ .port(streamMetadata.getLeader().getPort())
+ .shutdownListener(shutdownListener)
+ .publishConfirmListener(publishConfirmListener));
+
+ publisher.set(client);
+
+ AtomicBoolean keepPublishing = new AtomicBoolean(true);
+
+ executorService.submit(
+ () -> {
+ while (keepPublishing.get()) {
+ if (connected.get()) {
+ Message message =
+ publisher
+ .get()
+ .messageBuilder()
+ .properties()
+ .messageId(sequence.getAndIncrement())
+ .messageBuilder()
+ .applicationProperties()
+ .entry("generation", generation.get())
+ .messageBuilder()
+ .build();
+ try {
+ long publishingId =
+ publisher
+ .get()
+ .publish(stream, (byte) 1, Collections.singletonList(message))
+ .get(0);
+ published.put(publishingId, message);
+ } catch (Exception e) {
+ // keep going
+ }
+ wait(Duration.ofMillis(10));
+ } else {
+ wait(Duration.ofSeconds(1));
+ }
+ }
+ });
+
+ // let's publish for a bit of time
+ Thread.sleep(2000);
+
+ assertThat(confirmed).isNotEmpty();
+ int confirmedCount = confirmed.size();
+
+ try {
+ Host.rabbitmqctl("stop_app");
+
+ assertThat(reconnectionLatch.await(10, TimeUnit.SECONDS)).isTrue();
+
+ // let's publish for a bit of time
+ Thread.sleep(2000);
+
+ } finally {
+ Host.rabbitmqctl("start_app");
+ }
+ assertThat(confirmed).hasSizeGreaterThan(confirmedCount);
+ confirmedCount = confirmed.size();
+
+ Client metadataClient = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode2()));
+ // wait until all the replicas are there
+ TestUtils.waitAtMost(
+ Duration.ofSeconds(5),
+ () -> {
+ Client.StreamMetadata m = metadataClient.metadata(stream).get(stream);
+ return m.getReplicas().size() == 2;
+ });
+
+ // let's publish for a bit of time
+ Thread.sleep(2000);
+
+ assertThat(confirmed).hasSizeGreaterThan(confirmedCount);
+
+ keepPublishing.set(false);
+
+ Queue<Message> consumed = new ConcurrentLinkedQueue<>();
+ Set<Long> generations = ConcurrentHashMap.newKeySet();
+ CountDownLatch consumedLatch = new CountDownLatch(1);
+ Client.StreamMetadata m = metadataClient.metadata(stream).get(stream);
+ Client consumer =
+ cf.get(
+ new Client.ClientParameters()
+ .port(m.getReplicas().get(0).getPort())
+ .chunkListener(
+ (client1, subscriptionId, offset, messageCount, dataSize) ->
+ client1.credit(subscriptionId, 1))
+ .messageListener(
+ (subscriptionId, offset, message) -> {
+ consumed.add(message);
+ generations.add((Long) message.getApplicationProperties().get("generation"));
+ if (consumed.size() == confirmed.size()) {
+ consumedLatch.countDown();
+ }
+ }));
+
+ Client.Response response =
+ consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10);
+ assertThat(response.isOk()).isTrue();
+
+ assertThat(consumedLatch.await(5, TimeUnit.SECONDS)).isTrue();
+ assertThat(generations).hasSize(2).contains(0L, 1L);
+ assertThat(consumed).hasSizeGreaterThanOrEqualTo(confirmed.size());
+ long lastMessageId = -1;
+ for (Message message : consumed) {
+ long messageId = message.getProperties().getMessageIdAsLong();
+ assertThat(messageId).isGreaterThanOrEqualTo(lastMessageId);
+ lastMessageId = messageId;
+ }
+ assertThat(lastMessageId).isPositive().isLessThanOrEqualTo(sequence.get());
+ }
+
+ @Test
+ void consumerReattachesToOtherReplicaWhenReplicaGoesAway() throws Exception {
+ executorService = Executors.newCachedThreadPool();
+ Client metadataClient = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ Map<String, Client.StreamMetadata> metadata = metadataClient.metadata(stream);
+ Client.StreamMetadata streamMetadata = metadata.get(stream);
+ assertThat(streamMetadata).isNotNull();
+
+ assertThat(streamMetadata.getLeader()).isNotNull();
+ assertThat(streamMetadata.getLeader().getPort()).isEqualTo(TestUtils.streamPortNode1());
+
+ Map<Long, Message> published = new ConcurrentHashMap<>();
+ Set<Message> confirmed = ConcurrentHashMap.newKeySet();
+ Set<Long> confirmedIds = ConcurrentHashMap.newKeySet();
+ Client.PublishConfirmListener publishConfirmListener =
+ (publisherId, publishingId) -> {
+ Message confirmedMessage;
+ int attempts = 0;
+ while ((confirmedMessage = published.remove(publishingId)) == null && attempts < 10) {
+ wait(Duration.ofMillis(5));
+ attempts++;
+ }
+ confirmed.add(confirmedMessage);
+ confirmedIds.add(confirmedMessage.getProperties().getMessageIdAsLong());
+ };
+
+ Client publisher =
+ cf.get(
+ new Client.ClientParameters()
+ .port(streamMetadata.getLeader().getPort())
+ .publishConfirmListener(publishConfirmListener));
+
+ AtomicLong generation = new AtomicLong(0);
+ AtomicLong sequence = new AtomicLong(0);
+ AtomicBoolean keepPublishing = new AtomicBoolean(true);
+ CountDownLatch publishingLatch = new CountDownLatch(1);
+
+ executorService.submit(
+ () -> {
+ while (keepPublishing.get()) {
+ Message message =
+ new WrapperMessageBuilder()
+ .properties()
+ .messageId(sequence.getAndIncrement())
+ .messageBuilder()
+ .applicationProperties()
+ .entry("generation", generation.get())
+ .messageBuilder()
+ .build();
+ try {
+ long publishingId =
+ publisher.publish(stream, (byte) 1, Collections.singletonList(message)).get(0);
+ published.put(publishingId, message);
+ } catch (Exception e) {
+ // keep going
+ }
+ wait(Duration.ofMillis(10));
+ }
+ publishingLatch.countDown();
+ });
+
+ Queue<Message> consumed = new ConcurrentLinkedQueue<>();
+
+ Client.Broker replica =
+ streamMetadata.getReplicas().stream()
+ .filter(broker -> broker.getPort() == TestUtils.streamPortNode2())
+ .findFirst()
+ .orElseThrow(() -> new NoSuchElementException());
+
+ AtomicLong lastProcessedOffset = new AtomicLong(-1);
+ Set<Long> generations = ConcurrentHashMap.newKeySet();
+ Set<Long> consumedIds = ConcurrentHashMap.newKeySet();
+ Client.MessageListener messageListener =
+ (subscriptionId, offset, message) -> {
+ consumed.add(message);
+ generations.add((Long) message.getApplicationProperties().get("generation"));
+ consumedIds.add(message.getProperties().getMessageIdAsLong());
+ lastProcessedOffset.set(offset);
+ };
+
+ CountDownLatch reconnectionLatch = new CountDownLatch(1);
+ AtomicReference<Client.ShutdownListener> shutdownListenerReference = new AtomicReference<>();
+ Client.ShutdownListener shutdownListener =
+ shutdownContext -> {
+ if (shutdownContext.getShutdownReason()
+ == Client.ShutdownContext.ShutdownReason.UNKNOWN) {
+ // avoid long-running task in the IO thread
+ executorService.submit(
+ () -> {
+ Client.StreamMetadata m = metadataClient.metadata(stream).get(stream);
+ int newReplicaPort = m.getReplicas().get(0).getPort();
+
+ Client newConsumer =
+ cf.get(
+ new Client.ClientParameters()
+ .port(newReplicaPort)
+ .shutdownListener(shutdownListenerReference.get())
+ .chunkListener(
+ (client1, subscriptionId, offset, messageCount, dataSize) ->
+ client1.credit(subscriptionId, 1))
+ .messageListener(messageListener));
+
+ newConsumer.subscribe(
+ (byte) 1,
+ stream,
+ OffsetSpecification.offset(lastProcessedOffset.get() + 1),
+ 10);
+
+ generation.incrementAndGet();
+ reconnectionLatch.countDown();
+ });
+ }
+ };
+ shutdownListenerReference.set(shutdownListener);
+
+ Client consumer =
+ cf.get(
+ new Client.ClientParameters()
+ .port(replica.getPort())
+ .shutdownListener(shutdownListener)
+ .chunkListener(
+ (client1, subscriptionId, offset, messageCount, dataSize) ->
+ client1.credit(subscriptionId, 1))
+ .messageListener(messageListener));
+
+ Client.Response response =
+ consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10);
+ assertThat(response.isOk()).isTrue();
+
+ // let's publish for a bit of time
+ Thread.sleep(2000);
+
+ assertThat(confirmed).isNotEmpty();
+ assertThat(consumed).isNotEmpty();
+ int confirmedCount = confirmed.size();
+
+ try {
+ Host.rabbitmqctl("stop_app", Host.node2name());
+
+ assertThat(reconnectionLatch.await(10, TimeUnit.SECONDS)).isTrue();
+
+ // let's publish for a bit of time
+ Thread.sleep(2000);
+
+ } finally {
+ Host.rabbitmqctl("start_app", Host.node2name());
+ }
+ assertThat(confirmed).hasSizeGreaterThan(confirmedCount);
+ confirmedCount = confirmed.size();
+
+ // wait until all the replicas are there
+ TestUtils.waitAtMost(
+ Duration.ofSeconds(5),
+ () -> {
+ Client.StreamMetadata m = metadataClient.metadata(stream).get(stream);
+ return m.getReplicas().size() == 2;
+ });
+
+ // let's publish for a bit of time
+ Thread.sleep(2000);
+
+ assertThat(confirmed).hasSizeGreaterThan(confirmedCount);
+
+ keepPublishing.set(false);
+
+ assertThat(publishingLatch.await(5, TimeUnit.SECONDS)).isTrue();
+
+ TestUtils.waitAtMost(Duration.ofSeconds(5), () -> consumed.size() >= confirmed.size());
+
+ assertThat(generations).hasSize(2).contains(0L, 1L);
+ assertThat(consumed).hasSizeGreaterThanOrEqualTo(confirmed.size());
+ long lastMessageId = -1;
+ for (Message message : consumed) {
+ long messageId = message.getProperties().getMessageIdAsLong();
+ assertThat(messageId).isGreaterThanOrEqualTo(lastMessageId);
+ lastMessageId = messageId;
+ }
+ assertThat(lastMessageId).isPositive().isLessThanOrEqualTo(sequence.get());
+
+ confirmedIds.forEach(confirmedId -> assertThat(consumedIds).contains(confirmedId));
+ }
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java
new file mode 100644
index 0000000000..0134038a8b
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/Host.java
@@ -0,0 +1,117 @@
+// The contents of this file are subject to the Mozilla Public License
+// Version 2.0 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/en-US/MPL/2.0/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is Pivotal Software, Inc.
+// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.stream;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+public class Host {
+
+ private static String capture(InputStream is) throws IOException {
+ BufferedReader br = new BufferedReader(new InputStreamReader(is));
+ String line;
+ StringBuilder buff = new StringBuilder();
+ while ((line = br.readLine()) != null) {
+ buff.append(line).append("\n");
+ }
+ return buff.toString();
+ }
+
+ private static Process executeCommand(String command) throws IOException {
+ Process pr = executeCommandProcess(command);
+
+ int ev = waitForExitValue(pr);
+ if (ev != 0) {
+ String stdout = capture(pr.getInputStream());
+ String stderr = capture(pr.getErrorStream());
+ throw new IOException(
+ "unexpected command exit value: "
+ + ev
+ + "\ncommand: "
+ + command
+ + "\n"
+ + "\nstdout:\n"
+ + stdout
+ + "\nstderr:\n"
+ + stderr
+ + "\n");
+ }
+ return pr;
+ }
+
+ private static int waitForExitValue(Process pr) {
+ while (true) {
+ try {
+ pr.waitFor();
+ break;
+ } catch (InterruptedException ignored) {
+ }
+ }
+ return pr.exitValue();
+ }
+
+ private static Process executeCommandProcess(String command) throws IOException {
+ String[] finalCommand;
+ if (System.getProperty("os.name").toLowerCase().contains("windows")) {
+ finalCommand = new String[4];
+ finalCommand[0] = "C:\\winnt\\system32\\cmd.exe";
+ finalCommand[1] = "/y";
+ finalCommand[2] = "/c";
+ finalCommand[3] = command;
+ } else {
+ finalCommand = new String[3];
+ finalCommand[0] = "/bin/sh";
+ finalCommand[1] = "-c";
+ finalCommand[2] = command;
+ }
+ return Runtime.getRuntime().exec(finalCommand);
+ }
+
+ public static Process rabbitmqctl(String command) throws IOException {
+ return rabbitmqctl(command, node1name());
+ }
+
+ public static Process rabbitmqctl(String command, String nodename) throws IOException {
+ return executeCommand(rabbitmqctlCommand() + " -n '" + nodename + "'" + " " + command);
+ }
+
+ public static String node1name() {
+ try {
+ return System.getProperty(
+ "node1.name", "rabbit-1@" + InetAddress.getLocalHost().getHostName());
+ } catch (UnknownHostException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static String node2name() {
+ try {
+ return System.getProperty(
+ "node2.name", "rabbit-2@" + InetAddress.getLocalHost().getHostName());
+ } catch (UnknownHostException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ static String rabbitmqctlCommand() {
+ return System.getProperty("rabbitmqctl.bin");
+ }
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java
new file mode 100644
index 0000000000..5dc2256643
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/LeaderLocatorTest.java
@@ -0,0 +1,170 @@
+// The contents of this file are subject to the Mozilla Public License
+// Version 2.0 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/en-US/MPL/2.0/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is Pivotal Software, Inc.
+// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.stream;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.rabbitmq.stream.impl.Client;
+import com.rabbitmq.stream.impl.Client.Broker;
+import com.rabbitmq.stream.impl.Client.ClientParameters;
+import com.rabbitmq.stream.impl.Client.Response;
+import com.rabbitmq.stream.impl.Client.StreamMetadata;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.slf4j.LoggerFactory;
+
+@ExtendWith(TestUtils.StreamTestInfrastructureExtension.class)
+public class LeaderLocatorTest {
+
+ TestUtils.ClientFactory cf;
+
+ @Test
+ void invalidLocatorShouldReturnError() {
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ String s = UUID.randomUUID().toString();
+ Response response = client.create(s, Collections.singletonMap("queue-leader-locator", "foo"));
+ assertThat(response.isOk()).isFalse();
+ assertThat(response.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_PRECONDITION_FAILED);
+ }
+
+ @Test
+ void clientLocalLocatorShouldMakeLeaderOnConnectedNode() {
+ int[] ports = new int[] {TestUtils.streamPortNode1(), TestUtils.streamPortNode2()};
+ for (int port : ports) {
+ Client client = cf.get(new Client.ClientParameters().port(port));
+ String s = UUID.randomUUID().toString();
+ try {
+ Response response =
+ client.create(s, Collections.singletonMap("queue-leader-locator", "client-local"));
+ assertThat(response.isOk()).isTrue();
+ StreamMetadata metadata = client.metadata(s).get(s);
+ assertThat(metadata).isNotNull();
+ assertThat(metadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK);
+ assertThat(metadata.getLeader()).isNotNull().extracting(b -> b.getPort()).isEqualTo(port);
+ } finally {
+ client.delete(s);
+ }
+ }
+ }
+
+ @Test
+ void randomLocatorShouldCreateOnAllNodesAfterSomeTime() throws Exception {
+ int clusterSize = 3;
+ Set<String> createdStreams = ConcurrentHashMap.newKeySet();
+ Set<Broker> leaderNodes = ConcurrentHashMap.newKeySet(clusterSize);
+ CountDownLatch latch = new CountDownLatch(1);
+ Client client = cf.get(new ClientParameters().port(TestUtils.streamPortNode1()));
+ Runnable runnable =
+ () -> {
+ while (leaderNodes.size() < clusterSize && !Thread.interrupted()) {
+ String s = UUID.randomUUID().toString();
+ Response response =
+ client.create(s, Collections.singletonMap("queue-leader-locator", "random"));
+ if (!response.isOk()) {
+ break;
+ }
+ createdStreams.add(s);
+ StreamMetadata metadata = client.metadata(s).get(s);
+ if (metadata == null || !metadata.isResponseOk() || metadata.getLeader() == null) {
+ break;
+ }
+ leaderNodes.add(metadata.getLeader());
+ }
+ latch.countDown();
+ };
+
+ Thread worker = new Thread(runnable);
+ worker.start();
+
+ try {
+ assertThat(latch.await(10, SECONDS)).isTrue();
+ assertThat(leaderNodes).hasSize(clusterSize);
+ // in case Broker class is broken
+ assertThat(leaderNodes.stream().map(b -> b.getPort()).collect(Collectors.toSet()))
+ .hasSize(clusterSize);
+ } finally {
+ if (worker.isAlive()) {
+ worker.interrupt();
+ }
+ createdStreams.forEach(
+ s -> {
+ Response response = client.delete(s);
+ if (!response.isOk()) {
+ LoggerFactory.getLogger(LeaderLocatorTest.class).warn("Error while deleting stream");
+ }
+ });
+ }
+ }
+
+ @Test
+ void leastLeadersShouldStreamLeadersOnTheCluster() {
+ int clusterSize = 3;
+ int streamsByNode = 5;
+ int streamCount = clusterSize * streamsByNode;
+ Set<String> createdStreams = ConcurrentHashMap.newKeySet();
+ Client client = cf.get(new ClientParameters().port(TestUtils.streamPortNode1()));
+
+ try {
+ IntStream.range(0, streamCount)
+ .forEach(
+ i -> {
+ String s = UUID.randomUUID().toString();
+ Response response =
+ client.create(
+ s, Collections.singletonMap("queue-leader-locator", "least-leaders"));
+ assertThat(response.isOk()).isTrue();
+ createdStreams.add(s);
+ });
+
+ Map<Integer, Integer> leaderCount = new HashMap<>();
+ Map<String, StreamMetadata> metadata =
+ client.metadata(createdStreams.toArray(new String[] {}));
+ assertThat(metadata).hasSize(streamCount);
+
+ metadata
+ .values()
+ .forEach(
+ streamMetadata -> {
+ assertThat(streamMetadata.isResponseOk()).isTrue();
+ assertThat(streamMetadata.getLeader()).isNotNull();
+ leaderCount.compute(
+ streamMetadata.getLeader().getPort(),
+ (port, value) -> value == null ? 1 : ++value);
+ });
+ assertThat(leaderCount).hasSize(clusterSize);
+ leaderCount.values().forEach(count -> assertThat(count).isEqualTo(streamsByNode));
+ } finally {
+ createdStreams.forEach(
+ s -> {
+ Response response = client.delete(s);
+ if (!response.isOk()) {
+ LoggerFactory.getLogger(LeaderLocatorTest.class).warn("Error while deleting stream");
+ }
+ });
+ }
+ }
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java
new file mode 100644
index 0000000000..08024a12bf
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/StreamTest.java
@@ -0,0 +1,173 @@
+// The contents of this file are subject to the Mozilla Public License
+// Version 2.0 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/en-US/MPL/2.0/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is Pivotal Software, Inc.
+// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.stream;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.rabbitmq.stream.impl.Client;
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+@ExtendWith(TestUtils.StreamTestInfrastructureExtension.class)
+public class StreamTest {
+
+ String stream;
+ TestUtils.ClientFactory cf;
+
+ static Stream<Arguments> shouldBePossibleToPublishFromAnyNodeAndConsumeFromAnyMember() {
+ return Stream.of(
+ brokers(
+ "leader", metadata -> metadata.getLeader(), "leader", metadata -> metadata.getLeader()),
+ brokers(
+ "leader",
+ metadata -> metadata.getLeader(),
+ "replica",
+ metadata -> metadata.getReplicas().iterator().next()),
+ brokers(
+ "replica",
+ metadata -> metadata.getReplicas().iterator().next(),
+ "leader",
+ metadata -> metadata.getLeader()),
+ brokers(
+ "replica",
+ metadata -> new ArrayList<>(metadata.getReplicas()).get(0),
+ "replica",
+ metadata -> new ArrayList<>(metadata.getReplicas()).get(1)));
+ }
+
+ static Arguments brokers(
+ String dp,
+ Function<Client.StreamMetadata, Client.Broker> publisherBroker,
+ String dc,
+ Function<Client.StreamMetadata, Client.Broker> consumerBroker) {
+ return Arguments.of(
+ new FunctionWithToString<>(dp, publisherBroker),
+ new FunctionWithToString<>(dc, consumerBroker));
+ }
+
+ @ParameterizedTest
+ @MethodSource
+ void shouldBePossibleToPublishFromAnyNodeAndConsumeFromAnyMember(
+ Function<Client.StreamMetadata, Client.Broker> publisherBroker,
+ Function<Client.StreamMetadata, Client.Broker> consumerBroker)
+ throws Exception {
+
+ int messageCount = 10_000;
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ Map<String, Client.StreamMetadata> metadata = client.metadata(stream);
+ assertThat(metadata).hasSize(1).containsKey(stream);
+ Client.StreamMetadata streamMetadata = metadata.get(stream);
+
+ CountDownLatch publishingLatch = new CountDownLatch(messageCount);
+ Client publisher =
+ cf.get(
+ new Client.ClientParameters()
+ .port(publisherBroker.apply(streamMetadata).getPort())
+ .publishConfirmListener(
+ (publisherId, publishingId) -> publishingLatch.countDown()));
+
+ IntStream.range(0, messageCount)
+ .forEach(
+ i ->
+ publisher.publish(
+ stream,
+ (byte) 1,
+ Collections.singletonList(
+ publisher
+ .messageBuilder()
+ .addData(("hello " + i).getBytes(StandardCharsets.UTF_8))
+ .build())));
+
+ assertThat(publishingLatch.await(10, TimeUnit.SECONDS)).isTrue();
+
+ CountDownLatch consumingLatch = new CountDownLatch(messageCount);
+ Set<String> bodies = ConcurrentHashMap.newKeySet(messageCount);
+ Client consumer =
+ cf.get(
+ new Client.ClientParameters()
+ .port(consumerBroker.apply(streamMetadata).getPort())
+ .chunkListener(
+ (client1, subscriptionId, offset, messageCount1, dataSize) ->
+ client1.credit(subscriptionId, 10))
+ .messageListener(
+ (subscriptionId, offset, message) -> {
+ bodies.add(new String(message.getBodyAsBinary(), StandardCharsets.UTF_8));
+ consumingLatch.countDown();
+ }));
+
+ consumer.subscribe((byte) 1, stream, OffsetSpecification.first(), 10);
+
+ assertThat(consumingLatch.await(10, TimeUnit.SECONDS)).isTrue();
+ assertThat(bodies).hasSize(messageCount);
+ IntStream.range(0, messageCount).forEach(i -> assertThat(bodies.contains("hello " + i)));
+ }
+
+ @Test
+ void metadataOnClusterShouldReturnLeaderAndReplicas() {
+ Client client = cf.get(new Client.ClientParameters().port(TestUtils.streamPortNode1()));
+ Map<String, Client.StreamMetadata> metadata = client.metadata(stream);
+ assertThat(metadata).hasSize(1).containsKey(stream);
+ Client.StreamMetadata streamMetadata = metadata.get(stream);
+ assertThat(streamMetadata.getResponseCode()).isEqualTo(Constants.RESPONSE_CODE_OK);
+ assertThat(streamMetadata.getReplicas()).hasSize(2);
+
+ BiConsumer<Client.Broker, Client.Broker> assertNodesAreDifferent =
+ (node, anotherNode) -> {
+ assertThat(node.getHost()).isEqualTo(anotherNode.getHost());
+ assertThat(node.getPort()).isNotEqualTo(anotherNode.getPort());
+ };
+
+ streamMetadata
+ .getReplicas()
+ .forEach(replica -> assertNodesAreDifferent.accept(replica, streamMetadata.getLeader()));
+ List<Client.Broker> replicas = new ArrayList<>(streamMetadata.getReplicas());
+ assertNodesAreDifferent.accept(replicas.get(0), replicas.get(1));
+ }
+
+ static class FunctionWithToString<T, R> implements Function<T, R> {
+
+ final String toString;
+ final Function<T, R> delegate;
+
+ FunctionWithToString(String toString, Function<T, R> delegate) {
+ this.toString = toString;
+ this.delegate = delegate;
+ }
+
+ @Override
+ public R apply(T t) {
+ return delegate.apply(t);
+ }
+
+ @Override
+ public String toString() {
+ return toString;
+ }
+ }
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java
new file mode 100644
index 0000000000..c49a8d5832
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/java/com/rabbitmq/stream/TestUtils.java
@@ -0,0 +1,179 @@
+// The contents of this file are subject to the Mozilla Public License
+// Version 2.0 (the "License"); you may not use this file except in
+// compliance with the License. You may obtain a copy of the License
+// at https://www.mozilla.org/en-US/MPL/2.0/
+//
+// Software distributed under the License is distributed on an "AS IS"
+// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+// the License for the specific language governing rights and
+// limitations under the License.
+//
+// The Original Code is RabbitMQ.
+//
+// The Initial Developer of the Original Code is Pivotal Software, Inc.
+// Copyright (c) 2020 VMware, Inc. or its affiliates. All rights reserved.
+//
+
+package com.rabbitmq.stream;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.fail;
+
+import com.rabbitmq.stream.impl.Client;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import java.lang.reflect.Field;
+import java.time.Duration;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BooleanSupplier;
+import org.junit.jupiter.api.extension.*;
+
+public class TestUtils {
+
+ static int streamPortNode1() {
+ String port = System.getProperty("node1.stream.port", "5555");
+ return Integer.valueOf(port);
+ }
+
+ static int streamPortNode2() {
+ String port = System.getProperty("node2.stream.port", "5556");
+ return Integer.valueOf(port);
+ }
+
+ static void waitAtMost(Duration duration, BooleanSupplier condition) throws InterruptedException {
+ if (condition.getAsBoolean()) {
+ return;
+ }
+ int waitTime = 100;
+ int waitedTime = 0;
+ long timeoutInMs = duration.toMillis();
+ while (waitedTime <= timeoutInMs) {
+ Thread.sleep(waitTime);
+ if (condition.getAsBoolean()) {
+ return;
+ }
+ waitedTime += waitTime;
+ }
+ fail("Waited " + duration.getSeconds() + " second(s), condition never got true");
+ }
+
+ static class StreamTestInfrastructureExtension
+ implements BeforeAllCallback, AfterAllCallback, BeforeEachCallback, AfterEachCallback {
+
+ private static final ExtensionContext.Namespace NAMESPACE =
+ ExtensionContext.Namespace.create(StreamTestInfrastructureExtension.class);
+
+ private static ExtensionContext.Store store(ExtensionContext extensionContext) {
+ return extensionContext.getRoot().getStore(NAMESPACE);
+ }
+
+ private static EventLoopGroup eventLoopGroup(ExtensionContext context) {
+ return (EventLoopGroup) store(context).get("nettyEventLoopGroup");
+ }
+
+ @Override
+ public void beforeAll(ExtensionContext context) {
+ store(context).put("nettyEventLoopGroup", new NioEventLoopGroup());
+ }
+
+ @Override
+ public void beforeEach(ExtensionContext context) throws Exception {
+ try {
+ Field streamField =
+ context.getTestInstance().get().getClass().getDeclaredField("eventLoopGroup");
+ streamField.setAccessible(true);
+ streamField.set(context.getTestInstance().get(), eventLoopGroup(context));
+ } catch (NoSuchFieldException e) {
+
+ }
+ try {
+ Field streamField = context.getTestInstance().get().getClass().getDeclaredField("stream");
+ streamField.setAccessible(true);
+ String stream = UUID.randomUUID().toString();
+ streamField.set(context.getTestInstance().get(), stream);
+ Client client =
+ new Client(
+ new Client.ClientParameters()
+ .eventLoopGroup(eventLoopGroup(context))
+ .port(streamPortNode1()));
+ Client.Response response = client.create(stream);
+ assertThat(response.isOk()).isTrue();
+ client.close();
+ store(context).put("testMethodStream", stream);
+ } catch (NoSuchFieldException e) {
+
+ }
+
+ for (Field declaredField : context.getTestInstance().get().getClass().getDeclaredFields()) {
+ if (declaredField.getType().equals(ClientFactory.class)) {
+ declaredField.setAccessible(true);
+ ClientFactory clientFactory = new ClientFactory(eventLoopGroup(context));
+ declaredField.set(context.getTestInstance().get(), clientFactory);
+ store(context).put("testClientFactory", clientFactory);
+ break;
+ }
+ }
+ }
+
+ @Override
+ public void afterEach(ExtensionContext context) throws Exception {
+ try {
+ Field streamField = context.getTestInstance().get().getClass().getDeclaredField("stream");
+ streamField.setAccessible(true);
+ String stream = (String) streamField.get(context.getTestInstance().get());
+ Client client =
+ new Client(
+ new Client.ClientParameters()
+ .eventLoopGroup(eventLoopGroup(context))
+ .port(streamPortNode1()));
+ Client.Response response = client.delete(stream);
+ assertThat(response.isOk()).isTrue();
+ client.close();
+ store(context).remove("testMethodStream");
+ } catch (NoSuchFieldException e) {
+
+ }
+
+ ClientFactory clientFactory = (ClientFactory) store(context).get("testClientFactory");
+ if (clientFactory != null) {
+ clientFactory.close();
+ }
+ }
+
+ @Override
+ public void afterAll(ExtensionContext context) throws Exception {
+ EventLoopGroup eventLoopGroup = eventLoopGroup(context);
+ eventLoopGroup.shutdownGracefully(1, 10, SECONDS).get(10, SECONDS);
+ }
+ }
+
+ static class ClientFactory {
+
+ private final EventLoopGroup eventLoopGroup;
+ private final Set<Client> clients = ConcurrentHashMap.newKeySet();
+
+ public ClientFactory(EventLoopGroup eventLoopGroup) {
+ this.eventLoopGroup = eventLoopGroup;
+ }
+
+ public Client get() {
+ return get(new Client.ClientParameters());
+ }
+
+ public Client get(Client.ClientParameters parameters) {
+ // don't set the port, it would override the caller's port setting
+ Client client = new Client(parameters.eventLoopGroup(eventLoopGroup));
+ clients.add(client);
+ return client;
+ }
+
+ private void close() {
+ for (Client c : clients) {
+ c.close();
+ }
+ }
+ }
+}
diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/resources/logback-test.xml b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/resources/logback-test.xml
new file mode 100644
index 0000000000..45d598991d
--- /dev/null
+++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/src/test/resources/logback-test.xml
@@ -0,0 +1,13 @@
+<configuration>
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <logger name="com.rabbitmq.stream" level="info" />
+
+ <root level="info">
+ <appender-ref ref="STDOUT" />
+ </root>
+</configuration> \ No newline at end of file
diff --git a/deps/rabbitmq_top/.gitignore b/deps/rabbitmq_top/.gitignore
new file mode 100644
index 0000000000..1413a066a4
--- /dev/null
+++ b/deps/rabbitmq_top/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_top.d
diff --git a/deps/rabbitmq_top/.travis.yml b/deps/rabbitmq_top/.travis.yml
new file mode 100644
index 0000000000..ca06c9acb9
--- /dev/null
+++ b/deps/rabbitmq_top/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: RH+dA2KEBVnbwQxcrVoXEv9jRxHPgpPRM/RzndEgnG9pO0wreeOumrzUWwk0paSNnpe3LOZC5LbkJwSsUtDUKiu3n7qT2hYyiaV4Y7TIhL1tDgQi8YkvbAva9rR0jKhlhDDtKJvLwFux9/LqYPJnDg4Sw8gs6g+6cPOMR1ciPzw=
+ - secure: XTaKnhJ76c/4OgIGYOQh5RW4q5RrJcbWQN4c2+pDZCWTE2uHzFGDRaQ5/uO1w6fy2WRqdJZTfAZJhlQGXALL6uULJpz2PhxjHbeFP6nCzSUkxULj+KvITKd5wNKyPQaDn716fiYc1aJm2x3F/FSOPaFaH9p/wmEm50vw/vdVP7s=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_top/CODE_OF_CONDUCT.md b/deps/rabbitmq_top/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_top/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_top/CONTRIBUTING.md b/deps/rabbitmq_top/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_top/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_top/LICENSE b/deps/rabbitmq_top/LICENSE
new file mode 100644
index 0000000000..fa300fe0a0
--- /dev/null
+++ b/deps/rabbitmq_top/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ Top plugin, is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_top/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_top/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_top/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_top/Makefile b/deps/rabbitmq_top/Makefile
new file mode 100644
index 0000000000..36914886f7
--- /dev/null
+++ b/deps/rabbitmq_top/Makefile
@@ -0,0 +1,21 @@
+PROJECT = rabbitmq_top
+PROJECT_DESCRIPTION = RabbitMQ Top
+PROJECT_MOD = rabbit_top_app
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit amqp_client rabbitmq_management
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_top/README.md b/deps/rabbitmq_top/README.md
new file mode 100644
index 0000000000..018f5490c1
--- /dev/null
+++ b/deps/rabbitmq_top/README.md
@@ -0,0 +1,67 @@
+# RabbitMQ Top Plugin
+
+Adds UNIX top-like information on the Erlang VM to the [management UI](https://www.rabbitmq.com/management.html).
+The closest interactive CLI alternative in recent releases is `rabbitmq-diagnostics observer`.
+
+This is what it looks like:
+
+![](https://i.imgur.com/m7cWTLV.pngP)
+
+## Installation
+
+This plugin ships with RabbitMQ as of `3.6.3`. Enable it with
+
+``` bash
+# use sudo as necessary
+rabbitmq-plugins enable rabbitmq_top
+```
+
+### RabbitMQ 3.5.x
+
+You can download a pre-built binary of this plugin for RabbitMQ `3.5.x` from [RabbitMQ Community plugins page](https://bintray.com/rabbitmq/community-plugins/rabbitmq_top).
+
+
+## Usage
+
+Sort by process ID, memory use or reductions/sec (an approximate
+measure of CPU use).
+
+Click on the process description (e.g. "my queue") to see that
+object's management view.
+
+Click on the process ID (e.g. "&lt;0.3423.0&gt;") to see some more
+Erlang process details.
+
+See [Memory Use Analysis guide](https://www.rabbitmq.com/memory-use.html) on RabbitMQ website
+for more information.
+
+## HTTP API
+
+You can drive the HTTP API yourself. It installs into the management plugin's API; you should understand that first. Once you do, the additional paths look like:
+
+ /api/top/<node-name>
+
+List of processes. Takes similar query string parameters to other
+lists, `sort`, `sort_reverse` and `columns`. Sorting is quite
+important as it currently hard-codes returning the top 20 processes.
+
+ /api/process/<pid>
+
+Individual process details.
+
+## More Screenshots
+
+Individual process metrics are also available:
+
+![](https://i.imgur.com/BYgIqQF.png)
+
+## Building from Source
+
+You can build and install it like any other plugin (see
+[the plugin development guide](https://www.rabbitmq.com/plugin-development.html)).
+
+## License and Copyright
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the same license as RabbitMQ.
diff --git a/deps/rabbitmq_top/erlang.mk b/deps/rabbitmq_top/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_top/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs b/deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs
new file mode 100644
index 0000000000..4ee21156a1
--- /dev/null
+++ b/deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs
@@ -0,0 +1,54 @@
+<h1>Top ETS Tables: <b><%= top.node %></b></h1>
+
+<p>
+ Node:
+ <select id="top-node-ets">
+ <% for (var i = 0; i < nodes.length; i++) { %>
+ <option name="#/top/<%= fmt_string(nodes[i].name) %>"<% if (nodes[i].name == top.node) { %>selected="selected"<% } %>><%= nodes[i].name %></option>
+ <% } %>
+ </select>
+
+ Rows:
+ <select id="row-count-ets">
+ <%
+ var row_counts = [20, 50, 100, 150];
+ for (var i = 0; i < row_counts.length; i++) {
+ %>
+ <option name="<%= row_counts[i] %>"
+ <% if (row_counts[i] == top.row_count) { %>selected="selected"<% } %>>
+ <%= row_counts[i] %></option>
+ <% } %>
+ </select>
+</p>
+
+<table class="list updatable">
+ <thead>
+ <tr>
+ <th><%= fmt_sort_desc_by_default('Name', 'name') %></th>
+ <th><%= fmt_sort_desc_by_default('Owner Name', 'owner_name') %></th>
+ <th><%= fmt_sort_desc_by_default('Memory', 'memory') %></th>
+ <th><%= fmt_sort_desc_by_default('Size', 'size') %></th>
+ <th><%= fmt_sort_desc_by_default('Type', 'type') %></th>
+ <th><%= fmt_sort_desc_by_default('Named', 'named_table') %></th>
+ <th>Protection</th>
+ <th>Compressed</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < top.ets_tables.length; i++) {
+ var table = top.ets_tables[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td><%= fmt_string(table.name) %></td>
+ <td><%= fmt_string(table.owner_name) %></td>
+ <td><%= fmt_bytes(table.memory * 1.0) %></td>
+ <td><%= table.size %></td>
+ <td><%= table.type %></td>
+ <td><%= table.named_table %></td>
+ <td><%= table.protection %></td>
+ <td><%= table.compressed %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
diff --git a/deps/rabbitmq_top/priv/www/js/tmpl/process.ejs b/deps/rabbitmq_top/priv/www/js/tmpl/process.ejs
new file mode 100644
index 0000000000..54249afdbb
--- /dev/null
+++ b/deps/rabbitmq_top/priv/www/js/tmpl/process.ejs
@@ -0,0 +1,58 @@
+<h1>Process: <b><%= fmt_string(process.pid) %></b></h1>
+
+<div class="updatable">
+ <table class="facts">
+ <tr>
+ <th>Description</th>
+ <td><%= fmt_process_name(process) %></td>
+ </tr>
+ <tr>
+ <th>Type</th>
+ <td><%= fmt_remove_rabbit_prefix(process.name.type) %></td>
+ </tr>
+ <tr>
+ <th>Memory</th>
+ <td><%= fmt_bytes(process.memory) %></td>
+ </tr>
+ <tr>
+ <th>Reductions / sec</th>
+ <td><%= fmt_reduction_delta(process.reduction_delta) %></td>
+ </tr>
+ <tr>
+ <th>Total reductions</th>
+ <td><%= process.reductions %></td>
+ </tr>
+ <tr>
+ <th>Erlang mailbox</th>
+ <td><%= process.message_queue_len %></td>
+ </tr>
+ <tr>
+ <th>gen_server2 buffer <span class="help" id="gen-server2-buffer"></span></th>
+ <td><pre><%= process.buffer_len %></pre></td>
+ </tr>
+ <tr>
+ <th>Status</th>
+ <td><%= process.status %></td>
+ </tr>
+ <tr>
+ <th>Trap exit</th>
+ <td><%= fmt_boolean(process.trap_exit) %></td>
+ </tr>
+ <tr>
+ <th>Links</th>
+ <td><%= fmt_pids(process.links) %></td>
+ </tr>
+ <tr>
+ <th>Monitors</th>
+ <td><%= fmt_pids(process.monitors) %></td>
+ </tr>
+ <tr>
+ <th>Monitored by</th>
+ <td><%= fmt_pids(process.monitored_by) %></td>
+ </tr>
+ <tr>
+ <th>Current stacktrace</th>
+ <td><pre><%= fmt_string(process.current_stacktrace) %></pre></td>
+ </tr>
+ </table>
+</div>
diff --git a/deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs b/deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs
new file mode 100644
index 0000000000..e06f58d68e
--- /dev/null
+++ b/deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs
@@ -0,0 +1,54 @@
+<h1>Top Processes: <b><%= top.node %></b></h1>
+
+<p>
+ Node:
+ <select id="top-node">
+ <% for (var i = 0; i < nodes.length; i++) { %>
+ <option name="#/top/<%= fmt_string(nodes[i].name) %>"<% if (nodes[i].name == top.node) { %>selected="selected"<% } %>><%= nodes[i].name %></option>
+ <% } %>
+ </select>
+
+ Rows:
+ <select id="row-count">
+ <%
+ var row_counts = [20, 50, 100, 150];
+ for (var i = 0; i < row_counts.length; i++) {
+ %>
+ <option name="<%= row_counts[i] %>"
+ <% if (row_counts[i] == top.row_count) { %>selected="selected"<% } %>>
+ <%= row_counts[i] %></option>
+ <% } %>
+ </select>
+</p>
+
+<table class="list updatable">
+ <thead>
+ <tr>
+ <th><%= fmt_sort_desc_by_default('Process', 'pid') %></th>
+ <th>Description</th>
+ <th>Type</th>
+ <th><%= fmt_sort_desc_by_default('Memory', 'memory') %></th>
+ <th><%= fmt_sort_desc_by_default('Reductions / sec', 'reduction_delta') %></th>
+ <th><%= fmt_sort_desc_by_default('Erlang mailbox', 'message_queue_len') %></th>
+ <th><%= fmt_sort_desc_by_default('gen_server2 buffer', 'buffer_len') %><span class="help" id="gen-server2-buffer"></span></th>
+ <th>Status</th>
+ </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < top.processes.length; i++) {
+ var process = top.processes[i];
+%>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_pid(process.pid) %></td>
+ <td><%= fmt_process_name(process) %></td>
+ <td><%= fmt_remove_rabbit_prefix(process.name.type) %></td>
+ <td><%= fmt_bytes(process.memory * 1.0) %></td>
+ <td><%= fmt_reduction_delta(process.reduction_delta) %></td>
+ <td><%= process.message_queue_len %></td>
+ <td><%= process.buffer_len %></td>
+ <td><%= process.status %></td>
+ </tr>
+<% } %>
+ </tbody>
+</table>
diff --git a/deps/rabbitmq_top/priv/www/js/top.js b/deps/rabbitmq_top/priv/www/js/top.js
new file mode 100644
index 0000000000..a175899628
--- /dev/null
+++ b/deps/rabbitmq_top/priv/www/js/top.js
@@ -0,0 +1,128 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+//
+// Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+
+dispatcher_add(function(sammy) {
+ sammy.get('#/top', function() {
+ var nodes = JSON.parse(sync_get('/nodes'));
+ go_to('#/top/' + nodes[0].name + "/20");
+ });
+ sammy.get('#/top/ets', function() {
+ var nodes = JSON.parse(sync_get('/nodes'));
+ go_to('#/top/ets/' + nodes[0].name + "/20");
+ });
+ sammy.get('#/top/:node/:row_count', function() {
+ render({'top': {path: '/top/' + esc(this.params['node']),
+ options: {sort: true,
+ row_count: this.params['row_count']}},
+ 'nodes': '/nodes'},
+ 'processes', '#/top');
+ });
+ sammy.get('#/top/ets/:node/:row_count', function() {
+ render({'top': {path: '/top/ets/' + esc(this.params['node']),
+ options: {sort: true,
+ row_count: this.params['row_count']}},
+ 'nodes': '/nodes'},
+ 'ets_tables', '#/top/ets');
+ });
+ sammy.get('#/process/:pid', function() {
+ render({'process': '/process/' + esc(this.params['pid'])},
+ 'process', '#/top');
+ });
+});
+
+NAVIGATION['Admin'][0]['Top Processes'] = ['#/top', 'administrator'];
+NAVIGATION['Admin'][0]['Top ETS Tables'] = ['#/top/ets', 'administrator'];
+
+HELP['gen-server2-buffer'] = "The processes with a <strong>gen_server2 buffer</strong> value of <code>\>= 0</code> are of type gen_server2. " +
+"They drain their Erlang mailbox into an internal queue as an optimisation. " +
+"In this context, \"queue\" refers to an internal data structure and must not be confused with a RabbitMQ queue.";
+
+$(document).on('change', 'select#top-node', function() {
+ var url='#/top/' + $(this).val() + "/" + $('select#row-count').val();
+ go_to(url);
+});
+
+$(document).on('change', 'select#top-node-ets', function() {
+ var url='#/top/ets/' + $(this).val() + "/" + $('select#row-count-ets').val();
+ go_to(url);
+});
+
+$(document).on('change', 'select#row-count', function() {
+ go_to('#/top/' + $('select#top-node').val() + "/" + $(this).val());
+});
+
+$(document).on('change', 'select#row-count-ets', function() {
+ go_to('#/top/ets/' + $('select#top-node-ets').val() + "/" + $(this).val());
+});
+
+function link_pid(name) {
+ return _link_to(name, '#/process/' + esc(name));
+}
+
+// fmt_sort assumes ascending ordering by default and
+// relies on two global variables, current_sort and current_sort_reverse.
+// rabbitmq-top, however, wants to sort things in descending order by default
+// because otherwise it is useless on initial page load.
+//
+// Thsi discrepancy means that either in the management UI or in the top
+// tables the sorting indicator (a triangle) will be reversed.
+//
+// Changing the global variables has side effects, so we
+// copy fmt_sort here and flip the arrow to meet top's needs.
+function fmt_sort_desc_by_default(display, sort) {
+ var prefix = '';
+ if (current_sort == sort) {
+ prefix = '<span class="arrow">' +
+ (current_sort_reverse ? '&#9650; ' : '&#9660; ') +
+ '</span>';
+ }
+ return '<a class="sort" sort="' + sort + '">' + prefix + display + '</a>';
+}
+
+function fmt_process_name(process) {
+ if (process == undefined) return '';
+ var name = process.name;
+
+ if (name.supertype != undefined) {
+ if (name.supertype == 'channel') {
+ return link_channel(name.connection_name + ' (' +
+ name.channel_number + ')');
+ }
+ else if (name.supertype == 'queue') {
+ return link_queue(name.vhost, name.queue_name);
+ }
+ else if (name.supertype == 'connection') {
+ return link_conn(name.connection_name);
+ }
+ }
+ else {
+ return '<b>' + name.name + '</b>';
+ }
+}
+
+function fmt_remove_rabbit_prefix(name) {
+ if (name == 'rabbit_amqqueue_process') return 'queue';
+
+ if (name.substring(0, 7) == 'rabbit_') {
+ return name.substring(7);
+ }
+ else {
+ return name;
+ }
+}
+
+function fmt_pids(pids) {
+ var txt = '';
+ for (var i = 0; i < pids.length; i++) {
+ txt += link_pid(pids[i]) + ' ';
+ }
+
+ return txt;
+}
+
+function fmt_reduction_delta(delta) {
+ return Math.round(delta / 5); // gen_server updates every 5s
+}
diff --git a/deps/rabbitmq_top/rabbitmq-components.mk b/deps/rabbitmq_top/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_top/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_top/src/rabbit_top_app.erl b/deps/rabbitmq_top/src/rabbit_top_app.erl
new file mode 100644
index 0000000000..6ad279a586
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_app.erl
@@ -0,0 +1,17 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_top_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_top/src/rabbit_top_extension.erl b/deps/rabbitmq_top/src/rabbit_top_extension.erl
new file mode 100644
index 0000000000..26961b85bf
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_extension.erl
@@ -0,0 +1,18 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_extension).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+
+dispatcher() -> [{"/top/:node", rabbit_top_wm_processes, []},
+ {"/top/ets/:node", rabbit_top_wm_ets_tables, []},
+ {"/process/:pid", rabbit_top_wm_process, []}].
+
+web_ui() -> [{javascript, <<"top.js">>}].
diff --git a/deps/rabbitmq_top/src/rabbit_top_sup.erl b/deps/rabbitmq_top/src/rabbit_top_sup.erl
new file mode 100644
index 0000000000..63e9bef08e
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_sup.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Top = {rabbit_top_worker,
+ {rabbit_top_worker, start_link, []},
+ permanent, ?WORKER_WAIT, worker, [rabbit_top_worker]},
+ {ok, {{one_for_one, 10, 10}, [Top]}}.
+
diff --git a/deps/rabbitmq_top/src/rabbit_top_util.erl b/deps/rabbitmq_top/src/rabbit_top_util.erl
new file mode 100644
index 0000000000..985e5344d5
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_util.erl
@@ -0,0 +1,142 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_util).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([toplist/3, fmt_all/1, fmt/1, obtain_name/1, safe_process_info/2]).
+-export([sort_by_param/2, sort_order_param/1, row_count_param/2]).
+
+toplist(Key, Count, List) ->
+ Sorted = lists:sublist(
+ lists:reverse(
+ lists:keysort(1, [toplist(Key, I) || I <- List])), Count),
+ [add_name(Info) || {_, Info} <- Sorted].
+
+toplist(Key, Info) ->
+ {Key, Val} = lists:keyfind(Key, 1, Info),
+ {Val, Info}.
+
+sort_by_param(ReqData, Default) ->
+ case rabbit_mgmt_util:qs_val(<<"sort">>, ReqData) of
+ undefined -> Default;
+ Bin -> rabbit_data_coercion:to_atom(Bin)
+ end.
+
+sort_order_param(ReqData) ->
+ case rabbit_mgmt_util:qs_val(<<"sort_reverse">>, ReqData) of
+ <<"true">> -> asc;
+ _ -> desc
+ end.
+
+row_count_param(ReqData, Default) ->
+ case rabbit_mgmt_util:qs_val(<<"row_count">>, ReqData) of
+ undefined -> Default;
+ Bin -> rabbit_data_coercion:to_integer(Bin)
+ end.
+
+add_name(Info) ->
+ {pid, Pid} = lists:keyfind(pid, 1, Info),
+ [{name, obtain_name(Pid)} | Info].
+
+fmt_all(Info) -> [{K, fmt(V)} || {K, V} <- Info].
+
+fmt(Pid) when is_pid(Pid) ->
+ list_to_binary(pid_to_list(Pid));
+fmt(Other) ->
+ list_to_binary(rabbit_misc:format("~p", [Other])).
+
+obtain_name(Pid) ->
+ lists:foldl(fun(Fun, fail) -> Fun(Pid);
+ (_Fun, Res) -> Res
+ end, fail, [fun obtain_from_registered_name/1,
+ fun obtain_from_process_name/1,
+ fun obtain_from_initial_call/1]).
+
+obtain_from_registered_name(Pid) ->
+ case safe_process_info(Pid, registered_name) of
+ {registered_name, Name} -> [{type, registered},
+ {name, Name}];
+ _ -> fail
+ end.
+
+obtain_from_process_name(Pid) ->
+ case safe_process_info(Pid, dictionary) of
+ {dictionary, Dict} ->
+ case lists:keyfind(process_name, 1, Dict) of
+ {process_name, Name} -> fmt_process_name(Name);
+ false -> fail
+ end;
+ _ ->
+ fail
+ end.
+
+fmt_process_name({Type, {ConnName, ChNum}}) when is_binary(ConnName),
+ is_integer(ChNum) ->
+ [{supertype, channel},
+ {type, Type},
+ {connection_name, ConnName},
+ {channel_number, ChNum}];
+
+fmt_process_name({Type, #resource{virtual_host = VHost,
+ name = Name}}) ->
+ [{supertype, queue},
+ {type, Type},
+ {queue_name, Name},
+ {vhost, VHost}];
+
+fmt_process_name({Type, ConnName}) when is_binary(ConnName) ->
+ [{supertype, connection},
+ {type, Type},
+ {connection_name, ConnName}];
+
+fmt_process_name({Type, unknown}) -> %% probably some adapter thing
+ [{supertype, connection},
+ {type, Type},
+ {connection_name, unknown}].
+
+obtain_from_initial_call(Pid) ->
+ case initial_call(Pid) of
+ fail -> [{type, starting},
+ {name, fmt(Pid)}];
+ MFA -> case guess_initial_call(MFA) of
+ fail -> [{type, unknown},
+ {name, fmt(MFA)}];
+ Name -> [{type, known},
+ {name, Name}]
+ end
+ end.
+
+initial_call(Pid) ->
+ case initial_call_dict(Pid) of
+ fail -> case safe_process_info(Pid, initial_call) of
+ {initial_call, MFA} -> MFA;
+ _ -> fail
+ end;
+ MFA -> MFA
+ end.
+
+initial_call_dict(Pid) ->
+ case safe_process_info(Pid, dictionary) of
+ {dictionary, Dict} ->
+ case lists:keyfind('$initial_call', 1, Dict) of
+ {'$initial_call', MFA} -> MFA;
+ false -> fail
+ end;
+ _ ->
+ fail
+ end.
+
+guess_initial_call({supervisor, _F, _A}) -> supervisor;
+guess_initial_call({supervisor2, _F, _A}) -> supervisor;
+guess_initial_call({cowboy_protocol, _F, _A}) -> cowboy_protocol;
+guess_initial_call(_MFA) -> fail.
+
+
+safe_process_info(Pid, Info) ->
+ rpc:call(node(Pid), erlang, process_info, [Pid, Info]).
diff --git a/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl b/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl
new file mode 100644
index 0000000000..5ae17b4f97
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_wm_ets_tables).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Sort = rabbit_top_util:sort_by_param(ReqData, memory),
+ Node = rabbit_data_coercion:to_atom(rabbit_mgmt_util:id(node, ReqData)),
+ Order = rabbit_top_util:sort_order_param(ReqData),
+ RowCount = rabbit_top_util:row_count_param(ReqData, 20),
+
+ rabbit_mgmt_util:reply([{node, Node},
+ {row_count, RowCount},
+ {ets_tables, ets_tables(Node, Sort, Order, RowCount)}],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+ets_tables(Node, Sort, Order, RowCount) ->
+ try
+ [fmt(P) || P <- rabbit_top_worker:ets_tables(Node, Sort, Order, RowCount)]
+ catch
+ exit:{noproc, _} ->
+ []
+ end.
+
+fmt(Info) ->
+ {owner, OPid} = lists:keyfind(owner, 1, Info),
+ {heir, HPid} = lists:keyfind(heir, 1, Info),
+ %% OTP 21 introduced the 'id' element that contains a reference.
+ %% These cannot be serialised and must be removed from the proplist
+ Info1 = lists:keydelete(owner, 1,
+ lists:keydelete(id, 1, Info)),
+ Info2 = lists:keydelete(heir, 1, Info1),
+ [{owner, rabbit_top_util:fmt(OPid)},
+ {heir, rabbit_top_util:fmt(HPid)} | Info2].
diff --git a/deps/rabbitmq_top/src/rabbit_top_wm_process.erl b/deps/rabbitmq_top/src/rabbit_top_wm_process.erl
new file mode 100644
index 0000000000..0a80105a65
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_wm_process.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_wm_process).
+
+-export([init/2, to_json/2, resource_exists/2, content_types_provided/2,
+ is_authorized/2]).
+
+-define(ADDITIONAL_INFO,
+ [current_stacktrace, trap_exit, links, monitors, monitored_by]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(proc(ReqData), ReqData, Context).
+
+resource_exists(ReqData, Context) ->
+ {case proc(ReqData) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+proc(ReqData) ->
+ PidBin = rabbit_mgmt_util:id(pid, ReqData),
+ try list_to_pid(binary_to_list(PidBin)) of
+ Pid -> case rabbit_top_worker:proc(Pid) of
+ {ok, Base} -> [{pid, PidBin},
+ {name, rabbit_top_util:obtain_name(Pid)}] ++
+ Base ++
+ case rabbit_top_util:safe_process_info(
+ Pid, ?ADDITIONAL_INFO) of
+ undefined -> [];
+ Props -> fmt(Props)
+ end;
+ error -> not_found
+ end
+ catch
+ error:badarg ->
+ not_found
+ end.
+
+
+fmt(Props) -> [{K, fmt(K, V)} || {K, V} <- Props].
+
+fmt(links, V) -> [rabbit_top_util:fmt(P) || P <- V, is_pid(P)];
+fmt(monitors, V) -> [rabbit_top_util:fmt(P) || {process, P} <- V];
+fmt(monitored_by, V) -> [rabbit_top_util:fmt(P) || P <- V];
+fmt(current_stacktrace, V) -> rabbit_top_util:fmt(V);
+fmt(_K, V) -> V.
diff --git a/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl b/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl
new file mode 100644
index 0000000000..a5f14d429f
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_wm_processes).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%%--------------------------------------------------------------------
+
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ Sort = rabbit_top_util:sort_by_param(ReqData, reduction_delta),
+ Node = rabbit_data_coercion:to_atom(rabbit_mgmt_util:id(node, ReqData)),
+ Order = rabbit_top_util:sort_order_param(ReqData),
+ RowCount = rabbit_top_util:row_count_param(ReqData, 20),
+
+ rabbit_mgmt_util:reply([{node, Node},
+ {row_count, RowCount},
+ {processes, procs(Node, Sort, Order, RowCount)}],
+ ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+procs(Node, Sort, Order, RowCount) ->
+ try
+ [fmt(P) || P <- rabbit_top_worker:procs(Node, Sort, Order, RowCount)]
+ catch
+ exit:{noproc, _} ->
+ []
+ end.
+
+fmt(Info) ->
+ {pid, Pid} = lists:keyfind(pid, 1, Info),
+ Info1 = lists:keydelete(pid, 1, Info),
+ [{pid, rabbit_top_util:fmt(Pid)},
+ {name, rabbit_top_util:obtain_name(Pid)} | Info1].
diff --git a/deps/rabbitmq_top/src/rabbit_top_worker.erl b/deps/rabbitmq_top/src/rabbit_top_worker.erl
new file mode 100644
index 0000000000..ef6b0c984f
--- /dev/null
+++ b/deps/rabbitmq_top/src/rabbit_top_worker.erl
@@ -0,0 +1,171 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_top_worker).
+-behaviour(gen_server).
+
+-define(PROCESS_INFO, [memory, message_queue_len, reductions, status]).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([procs/4, proc/1, ets_tables/4, ets_table/1]).
+
+-define(SERVER, ?MODULE).
+-define(MILLIS, 1000).
+-define(EVERY, 5).
+-define(SLEEP, ?EVERY * ?MILLIS).
+
+-record(state, {procs, ets_tables}).
+
+%%--------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+
+procs(Node, Key, Rev, Count) ->
+ gen_server:call({?SERVER, Node}, {procs, Key, Rev, Count}, infinity).
+
+proc(Pid) ->
+ gen_server:call({?SERVER, node(Pid)}, {proc, Pid}, infinity).
+
+ets_tables(Node, Key, Rev, Count) ->
+ gen_server:call({?SERVER, Node}, {ets_tables, Key, Rev, Count}, infinity).
+
+ets_table(Name) ->
+ table_info(Name).
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+ ensure_timer(),
+ {ok, #state{procs = procs(#{}),
+ ets_tables = ets_tables([])}}.
+
+handle_call({ets_tables, Key, Order, Count}, _From,
+ State = #state{ets_tables = Tables}) ->
+ {reply, toplist(Key, Order, Count, Tables), State};
+
+handle_call({procs, Key, Order, Count}, _From, State = #state{procs = Procs}) ->
+ {reply, toplist(Key, Order, Count, flatten(Procs)), State};
+
+handle_call({proc, Pid}, _From, State = #state{procs = Procs}) ->
+ {reply, maps:find(Pid, Procs), State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Msg, State = #state{procs = OldProcs, ets_tables = OldTables}) ->
+ ensure_timer(),
+ {noreply, State#state{procs = procs(OldProcs),
+ ets_tables = ets_tables(OldTables)}};
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+
+ensure_timer() ->
+ erlang:send_after(?SLEEP, self(), update).
+
+procs(OldProcs) ->
+ lists:foldl(
+ fun(Pid, Procs) ->
+ case process_info(Pid, ?PROCESS_INFO) of
+ undefined ->
+ Procs;
+ Props ->
+ Delta = (reductions(Props) -
+ case maps:find(Pid, OldProcs) of
+ {ok, OldProps} -> reductions(OldProps);
+ error -> 0
+ end) div ?EVERY,
+ NewProps = expand_gen_server2_info(
+ Pid, [{reduction_delta, Delta} | Props]),
+ maps:put(Pid, NewProps, Procs)
+ end
+ end, #{}, processes()).
+
+reductions(Props) ->
+ {reductions, R} = lists:keyfind(reductions, 1, Props),
+ R.
+
+ets_tables(_OldTables) ->
+ F = fun
+ (Table) ->
+ case table_info(Table) of
+ undefined -> false;
+ Info -> {true, Info}
+ end
+ end,
+ lists:filtermap(F, ets:all()).
+
+table_info(Table) ->
+ TableInfo = ets:info(Table),
+ map_table_info(Table, TableInfo).
+
+map_table_info(_Table, undefined) ->
+ undefined;
+map_table_info(_Table, TableInfo) ->
+ F = fun
+ ({memory, MemWords}) ->
+ {memory, bytes(MemWords)};
+ (Other) ->
+ Other
+ end,
+ Info = lists:map(F, TableInfo),
+ {owner, OwnerPid} = lists:keyfind(owner, 1, Info),
+ case process_info(OwnerPid, registered_name) of
+ [] -> Info;
+ {registered_name, OwnerName} -> [{owner_name, OwnerName} | Info]
+ end.
+
+flatten(Procs) ->
+ maps:fold(fun(Name, Props, Rest) ->
+ [[{pid, Name} | Props] | Rest]
+ end, [], Procs).
+
+%%--------------------------------------------------------------------
+
+toplist(Key, Order, Count, List) ->
+ RevFun = case Order of
+ asc -> fun (L) -> L end;
+ desc -> fun lists:reverse/1
+ end,
+ Keyed = [toplist(Key, I) || I <- List],
+ Sorted = lists:sublist(RevFun(lists:keysort(1, Keyed)), Count),
+ [Info || {_, Info} <- Sorted].
+
+toplist(Key, Info) ->
+ % Do not crash if unknown sort key. Keep unsorted instead.
+ case lists:keyfind(Key, 1, Info) of
+ {Key, Val} -> {Val, Info};
+ false -> {undefined, Info}
+ end.
+
+bytes(Words) -> try
+ Words * erlang:system_info(wordsize)
+ catch
+ _:_ -> 0
+ end.
+
+expand_gen_server2_info(Pid, Props) ->
+ case rabbit_core_metrics:get_gen_server2_stats(Pid) of
+ not_found ->
+ [{buffer_len, -1} | Props];
+ BufferLength ->
+ [{buffer_len, BufferLength} | Props]
+ end.
diff --git a/deps/rabbitmq_tracing/.gitignore b/deps/rabbitmq_tracing/.gitignore
new file mode 100644
index 0000000000..8ed19236ab
--- /dev/null
+++ b/deps/rabbitmq_tracing/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_tracing.d
diff --git a/deps/rabbitmq_tracing/.travis.yml b/deps/rabbitmq_tracing/.travis.yml
new file mode 100644
index 0000000000..88c8faba01
--- /dev/null
+++ b/deps/rabbitmq_tracing/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: W790qqYNuP5r7OLnElmGMJhaa+0aKs8c5SBE4GUrCtzM/TVhSA636yu8AC40TU/qzBgISObx3fLY7mVK45bFetFiF4YTVlwld50CXjAZ0XLpmv1TcqU4hIwQ7U5OD76UAiYFckbIPxkAZ37sfZj2jcKBNR6obMMUxHd+eXm/XCE=
+ - secure: CDccdPeAR5bWYcKUzFZwSaUfPmdxBMZwfmArCFmxRntTyZ/ZvZLsVkPUrokLRwMtZSb9sdF2BhoJrfPQqHp8zqxFEYaRQIScBbiSkKHIcC6PVHvLwizQV5PfGnjmHcQ/b/Xdq/XtujYQkoRwYZZvcZ/kBOcRT6xjSBctMrCAF3c=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_tracing/CODE_OF_CONDUCT.md b/deps/rabbitmq_tracing/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_tracing/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_tracing/CONTRIBUTING.md b/deps/rabbitmq_tracing/CONTRIBUTING.md
new file mode 100644
index 0000000000..42af1f7517
--- /dev/null
+++ b/deps/rabbitmq_tracing/CONTRIBUTING.md
@@ -0,0 +1,123 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Running Tests
+
+To run a "fast suite" (a subset of tests):
+
+ make ct-fast
+
+To run a "slow suite" (a subset of tests that take much longer to run):
+
+ make ct-slow
+
+To run a particular suite:
+
+ make ct-$suite_name
+
+for example, to run the `backing_queue` suite:
+
+ make ct-backing_queue
+
+Finally,
+
+ make tests
+
+will run all suites.
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_tracing/LICENSE b/deps/rabbitmq_tracing/LICENSE
new file mode 100644
index 0000000000..bd34c8520c
--- /dev/null
+++ b/deps/rabbitmq_tracing/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ Tracing plugin, is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_tracing/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_tracing/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_tracing/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_tracing/Makefile b/deps/rabbitmq_tracing/Makefile
new file mode 100644
index 0000000000..8ff32ddd79
--- /dev/null
+++ b/deps/rabbitmq_tracing/Makefile
@@ -0,0 +1,30 @@
+PROJECT = rabbitmq_tracing
+PROJECT_DESCRIPTION = RabbitMQ message logging / tracing
+PROJECT_MOD = rabbit_tracing_app
+
+define PROJECT_ENV
+[
+ {directory, "/var/tmp/rabbitmq-tracing"},
+ {username, <<"guest">>},
+ {password, <<"guest">>}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = rabbit_common rabbit rabbitmq_management
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_tracing/README.md b/deps/rabbitmq_tracing/README.md
new file mode 100644
index 0000000000..9b764652e9
--- /dev/null
+++ b/deps/rabbitmq_tracing/README.md
@@ -0,0 +1,67 @@
+# RabbitMQ (Message) Tracing Plugin
+
+This is an opinionated tracing plugin that extends RabbitMQ management UI.
+It logs messages passing through vhosts [with enabled tracing](https://www.rabbitmq.com/firehose.html) to a log
+file.
+
+## Usage
+
+This plugin ships with RabbitMQ. Enabled it with `rabbitmq-plugins enable`,
+then see a "Tracing" tab in the management UI.
+
+
+## Configuration
+
+Configuration options are under the `rabbitmq_tracing` app (config section,
+if you will):
+
+ * `directory`: controls where the log files go. It defaults to "/var/tmp/rabbitmq-tracing".
+ * `username`: username to be used by tracing event consumers (default: `<<"guest">>`)
+ * `password`: password to be used by tracing event consumers (default: `<<"guest">>`)
+
+## Performance
+
+TL;DR: this plugin is intended to be used in development and QA environments.
+It will increase RAM consumption and CPU usage of a node.
+
+On a few year old developer-grade machine, rabbitmq-tracing can write
+about 2000 msg/s to a log file. You should be careful using
+rabbitmq-tracing if you think you're going to capture more messages
+than this. Any messages that can't be logged are queued.
+
+The code to serve up the log files over HTTP is not at all
+sophisticated or efficient, it loads the whole log into memory. If you
+have large log files you may wish to transfer them off the server in
+some other way.
+
+## HTTP API Endpoints
+
+```
+GET /api/traces
+GET /api/traces/node/<node>
+GET /api/traces/<vhost>
+GET /api/traces/node/<node>/<vhost>
+GET PUT DELETE /api/traces/<vhost>/<name>
+GET PUT DELETE /api/traces/node/<node>/<vhost>/<name>
+GET /api/trace-files
+GET /api/trace-files/node/<node>
+GET DELETE /api/trace-files/<name> (GET returns the file as text/plain)
+GET DELETE /api/trace-files/node/<node>/<name> (GET returns the file as text/plain)
+```
+
+Example for how to create a trace using [RabbitMQ HTTP API](https://www.rabbitmq.com/management.html):
+
+```
+curl -i -u guest:guest -H "content-type:application/json" -XPUT \
+ http://localhost:15672/api/traces/%2f/my-trace \
+ -d'{"format":"text","pattern":"#", "max_payload_bytes":1000,
+ "tracer_connection_username":"guest", "tracer_connection_password":"guest"}'
+```
+
+The format and pattern fields are mandatory.
+
+`tracer_connection_username` and `tracer_connection_password` control what credentials the tracing
+connection will use. Both are optional and default to the configured
+plugin values.
+
+`max_payload_bytes` is optional (omit it to prevent payload truncation).
diff --git a/deps/rabbitmq_tracing/erlang.mk b/deps/rabbitmq_tracing/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_tracing/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_tracing/priv/www/js/tmpl/traces.ejs b/deps/rabbitmq_tracing/priv/www/js/tmpl/traces.ejs
new file mode 100644
index 0000000000..4c1244c6dd
--- /dev/null
+++ b/deps/rabbitmq_tracing/priv/www/js/tmpl/traces.ejs
@@ -0,0 +1,174 @@
+<h1>Traces: <b><%= node.name %></b></h1>
+<p>
+ Node:
+ <select id="traces-node">
+ <% for (var i = 0; i < nodes.length; i++) { %>
+ <option name="#/traces/<%= fmt_string(nodes[i].name) %>"<% if (nodes[i].name == node.name) { %>selected="selected"<% } %>><%= nodes[i].name %></option>
+ <% } %>
+ </select>
+</p>
+
+<div class="section">
+ <h2>All traces</h2>
+ <div class="hider updatable">
+ <table class="two-col-layout">
+ <tr>
+ <td>
+ <h3>Currently running traces</h3>
+ <% if (traces.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+ <% if (vhosts_interesting) { %>
+ <th>Virtual host</th>
+ <% } %>
+ <th>Name</th>
+ <th>Pattern</th>
+ <th>Format</th>
+ <th>Payload limit</th>
+ <th>Rate</th>
+ <th>Queued</th>
+ <th>Tracer connection username</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < traces.length; i++) {
+ var trace = traces[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <% if (vhosts_interesting) { %>
+ <td><%= fmt_string(trace.vhost) %></td>
+ <% } %>
+ <td><%= fmt_string(trace.name) %></td>
+ <td><%= fmt_string(trace.pattern) %></td>
+ <td><%= fmt_string(trace.format) %></td>
+ <td class="c"><%= fmt_string(trace.max_payload_bytes, 'Unlimited') %></td>
+ <% if (trace.queue) { %>
+ <td class="r">
+ <%= fmt_detail_rate(trace.queue.message_stats, 'deliver_no_ack') %>
+ </td>
+ <td class="r">
+ <%= trace.queue.messages %>
+ <sub><%= link_trace_queue(trace) %></sub>
+ </td>
+ <% } else { %>
+ <td colspan="2">
+ <div class="status-red"><acronym title="The trace failed to start - check the server logs for details.">FAILED</acronym></div>
+ </td>
+ <% } %>
+ <td><%= fmt_string(trace.tracer_connection_username) %></td>
+ <td>
+ <form action="#/traces/node/<%= node.name %>" method="delete">
+ <input type="hidden" name="vhost" value="<%= fmt_string(trace.vhost) %>"/>
+ <input type="hidden" name="name" value="<%= fmt_string(trace.name) %>"/>
+ <input type="submit" value="Stop"/>
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no traces running ...</p>
+ <% } %>
+ </td>
+ <td>
+ <h3>Trace log files</h3>
+ <% if (files.length > 0) { %>
+ <table class="list">
+ <thead>
+ <tr>
+ <th>Name</th>
+ <th>Size</th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <%
+ for (var i = 0; i < files.length; i++) {
+ var file = files[i];
+ %>
+ <tr<%= alt_rows(i)%>>
+ <td><%= link_trace(node.name, file.name) %></td>
+ <td class="r"><%= fmt_bytes(file.size) %></td>
+ <td>
+ <form action="#/trace-files/node/<%= node.name %>" method="delete" class="inline-form">
+ <input type="hidden" name="name" value="<%= fmt_string(file.name) %>"/>
+ <input type="submit" value="Delete" />
+ </form>
+ </td>
+ </tr>
+ <% } %>
+ </tbody>
+ </table>
+ <% } else { %>
+ <p>... no files ...</p>
+ <% } %>
+ </td>
+ </tr>
+ </table>
+ </div>
+</div>
+
+<div class="section">
+ <h2>Add a new trace</h2>
+ <div class="hider">
+ <form action="#/traces/node/<%= node.name %>" method="put">
+ <table class="form">
+<% if (vhosts_interesting) { %>
+ <tr>
+ <th><label>Virtual host:</label></th>
+ <td>
+ <select name="vhost">
+ <% for (var i = 0; i < vhosts.length; i++) { %>
+ <option value="<%= fmt_string(vhosts[i].name) %>"><%= fmt_string(vhosts[i].name) %></option>
+ <% } %>
+ </select>
+ </td>
+ </tr>
+<% } else { %>
+ <tr><td><input type="hidden" name="vhost" value="<%= fmt_string(vhosts[0].name) %>"/></td></tr>
+<% } %>
+ <tr>
+ <th><label>Name:</label></th>
+ <td><input type="text" name="name"/><span class="mand">*</span></td>
+ </tr>
+ <tr>
+ <th><label>Format:</label></th>
+ <td>
+ <select name="format">
+ <option value="text">Text</option>
+ <option value="json">JSON</option>
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Tracer connection username:</label></th>
+ <td><input type="text" name="tracer_connection_username"/></td>
+ <td><label>Tracer connection password:</label></td>
+ <td>
+ <div id="password-div">
+ <input type="password" name="tracer_connection_password"/>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Max payload bytes: <span class="help" id="tracing-max-payload"></span></label></th>
+ <td>
+ <input type="text" name="max_payload_bytes" value=""/>
+ </td>
+ </tr>
+ <tr>
+ <th><label>Pattern:</label></th>
+ <td>
+ <input type="text" name="pattern" value="#"/>
+ <sub>Examples: #, publish.#, deliver.# #.amq.direct, #.myqueue</sub>
+ </td>
+ </tr>
+ </table>
+ <input type="submit" value="Add trace"/>
+ </form>
+ </div>
+</div>
diff --git a/deps/rabbitmq_tracing/priv/www/js/tracing.js b/deps/rabbitmq_tracing/priv/www/js/tracing.js
new file mode 100644
index 0000000000..0e7fbdb47f
--- /dev/null
+++ b/deps/rabbitmq_tracing/priv/www/js/tracing.js
@@ -0,0 +1,60 @@
+dispatcher_add(function(sammy) {
+ sammy.get('#/traces', function() {
+ var nodes = JSON.parse(sync_get('/nodes'));
+ go_to('#/traces/' + nodes[0].name);
+ });
+ sammy.get('#/traces/:node', function() {
+ render({'traces': '/traces/node/' + esc(this.params['node']),
+ 'vhosts': '/vhosts',
+ 'node': '/nodes/' + esc(this.params['node']),
+ 'nodes': '/nodes',
+ 'files': '/trace-files/node/' + esc(this.params['node'])},
+ 'traces', '#/traces');
+ });
+ sammy.get('#/traces/node/:node/:vhost/:name', function() {
+ var path = '/traces/node/' + esc(this.params['node']) + '/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
+ render({'trace': path},
+ 'trace', '#/traces');
+ });
+ sammy.put('#/traces/node/:node', function() {
+ if (this.params['max_payload_bytes'] === '') {
+ delete this.params['max_payload_bytes'];
+ }
+ else {
+ this.params['max_payload_bytes'] =
+ parseInt(this.params['max_payload_bytes']);
+ }
+ if (sync_put(this, '/traces/node/' + esc(this.params['node']) + '/:vhost/:name'))
+ update();
+ return false;
+ });
+ sammy.del('#/traces/node/:node', function() {
+ if (sync_delete(this, '/traces/node/' + esc(this.params['node'])
+ + '/:vhost/:name'))
+ partial_update();
+ return false;
+ });
+ sammy.del('#/trace-files/node/:node', function() {
+ if (sync_delete(this, '/trace-files/node/' + esc(this.params['node']) + '/:name'))
+ partial_update();
+ return false;
+ });
+});
+
+NAVIGATION['Admin'][0]['Tracing'] = ['#/traces', 'administrator'];
+
+HELP['tracing-max-payload'] =
+ 'Maximum size of payload to log, in bytes. Payloads larger than this limit will be truncated. Leave blank to prevent truncation. Set to 0 to prevent logging of payload altogether.';
+
+$(document).on('change', 'select#traces-node', function() {
+ var url='#/traces/' + $(this).val();
+ go_to(url);
+});
+
+function link_trace(node, name) {
+ return _link_to(name, 'api/trace-files/node/' + esc(node) + '/' + esc(name));
+}
+
+function link_trace_queue(trace) {
+ return _link_to('(queue)', '#/queues/' + esc(trace.vhost) + '/' + esc(trace.queue.name));
+}
diff --git a/deps/rabbitmq_tracing/rabbitmq-components.mk b/deps/rabbitmq_tracing/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_tracing/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl
new file mode 100644
index 0000000000..394f44a9d0
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_app.erl
@@ -0,0 +1,17 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, _StartArgs) ->
+ rabbit_tracing_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl
new file mode 100644
index 0000000000..c610790086
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer.erl
@@ -0,0 +1,247 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_consumer).
+
+-behaviour(gen_server).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_misc, [pget/2, pget/3, table_lookup/2]).
+
+-record(state, {conn, ch, vhost, queue, file, filename, format, buf, buf_cnt,
+ max_payload}).
+-record(log_record, {timestamp, type, exchange, queue, node, connection,
+ vhost, username, channel, routing_keys, routed_queues,
+ properties, payload}).
+
+-define(DEFAULT_USERNAME, <<"guest">>).
+-define(DEFAULT_PASSWORD, <<"guest">>).
+
+-define(X, <<"amq.rabbitmq.trace">>).
+-define(MAX_BUF, 100).
+
+-export([start_link/1, info_all/1]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+start_link(Args) ->
+ gen_server:start_link(?MODULE, Args, []).
+
+info_all(Pid) ->
+ gen_server:call(Pid, info_all, infinity).
+
+%%----------------------------------------------------------------------------
+
+init(Args0) ->
+ process_flag(trap_exit, true),
+ Args = filter_optional_user_pass(Args0),
+ Name = pget(name, Args),
+ VHost = pget(vhost, Args),
+ Username = pget(tracer_connection_username, Args,
+ rabbit_misc:get_env(rabbitmq_tracing, username, ?DEFAULT_USERNAME)),
+ Password = pget(tracer_connection_password, Args,
+ rabbit_misc:get_env(rabbitmq_tracing, password, ?DEFAULT_PASSWORD)),
+ Username = rabbit_tracing_util:coerce_env_value(username, Username),
+ Password = rabbit_tracing_util:coerce_env_value(password, Password),
+ MaxPayload = pget(max_payload_bytes, Args, unlimited),
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_direct{virtual_host = VHost,
+ username = Username,
+ password = Password}),
+ link(Conn),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+ link(Ch),
+ #'queue.declare_ok'{queue = Q} =
+ amqp_channel:call(Ch, #'queue.declare'{durable = false,
+ exclusive = true}),
+ #'queue.bind_ok'{} =
+ amqp_channel:call(
+ Ch, #'queue.bind'{exchange = ?X, queue = Q,
+ routing_key = pget(pattern, Args)}),
+ amqp_channel:enable_delivery_flow_control(Ch),
+ #'basic.consume_ok'{} =
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = true}, self()),
+ {ok, Dir} = application:get_env(directory),
+ Filename = Dir ++ "/" ++ binary_to_list(Name) ++ ".log",
+ case filelib:ensure_dir(Filename) of
+ ok ->
+ case prim_file:open(Filename, [append]) of
+ {ok, F} ->
+ rabbit_tracing_traces:announce(VHost, Name, self()),
+ Format = list_to_atom(binary_to_list(pget(format, Args))),
+ rabbit_log:info("Tracer opened log file ~p with "
+ "format ~p~n", [Filename, Format]),
+ {ok, #state{conn = Conn, ch = Ch, vhost = VHost, queue = Q,
+ file = F, filename = Filename,
+ format = Format, buf = [], buf_cnt = 0,
+ max_payload = MaxPayload}};
+ {error, E} ->
+ {stop, {could_not_open, Filename, E}}
+ end;
+ {error, E} ->
+ {stop, {could_not_create_dir, Dir, E}}
+ end.
+
+handle_call(info_all, _From, State = #state{vhost = V, queue = Q}) ->
+ [QInfo] = rabbit_mgmt_db:augment_queues(
+ [rabbit_mgmt_wm_queue:queue(V, Q)],
+ rabbit_mgmt_util:no_range(), basic),
+ {reply, [{queue, rabbit_mgmt_format:strip_pids(QInfo)}], State};
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info({BasicDeliver, Msg, DeliveryCtx},
+ State = #state{format = Format}) ->
+ amqp_channel:notify_received(DeliveryCtx),
+ {noreply, log(Format, delivery_to_log_record({BasicDeliver, Msg}, State),
+ State),
+ 0};
+
+handle_info(timeout, State) ->
+ {noreply, flush(State)};
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(shutdown, State = #state{conn = Conn, ch = Ch,
+ file = F, filename = Filename}) ->
+ flush(State),
+ catch amqp_channel:close(Ch),
+ catch amqp_connection:close(Conn),
+ catch prim_file:close(F),
+ rabbit_log:info("Tracer closed log file ~p~n", [Filename]),
+ ok;
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_, State, _) -> {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+delivery_to_log_record({#'basic.deliver'{routing_key = Key},
+ #amqp_msg{props = #'P_basic'{headers = H},
+ payload = Payload}}, State) ->
+ {Type, Q, RQs} = case Key of
+ <<"publish.", _Rest/binary>> ->
+ {array, Qs} = table_lookup(H, <<"routed_queues">>),
+ {published, none, [Q || {_, Q} <- Qs]};
+ <<"deliver.", Rest/binary>> ->
+ {received, Rest, none}
+ end,
+ {longstr, Node} = table_lookup(H, <<"node">>),
+ {longstr, X} = table_lookup(H, <<"exchange_name">>),
+ {array, Keys} = table_lookup(H, <<"routing_keys">>),
+ {table, Props} = table_lookup(H, <<"properties">>),
+ {longstr, Conn} = table_lookup(H, <<"connection">>),
+ {longstr, VHost} = table_lookup(H, <<"vhost">>),
+ {longstr, User} = table_lookup(H, <<"user">>),
+ {signedint, Chan} = table_lookup(H, <<"channel">>),
+ #log_record{timestamp = rabbit_mgmt_format:now_to_str_ms(
+ os:system_time(milli_seconds)),
+ type = Type,
+ exchange = X,
+ queue = Q,
+ node = Node,
+ connection = Conn,
+ vhost = VHost,
+ username = User,
+ channel = Chan,
+ routing_keys = [K || {_, K} <- Keys],
+ routed_queues= RQs,
+ properties = Props,
+ payload = truncate(Payload, State)}.
+
+log(text, Record, State) ->
+ Fmt = "~n========================================"
+ "========================================~n~s: Message ~s~n~n"
+ "Node: ~s~nConnection: ~s~n"
+ "Virtual host: ~s~nUser: ~s~n"
+ "Channel: ~p~nExchange: ~s~n"
+ "Routing keys: ~p~n" ++
+ case Record#log_record.queue of
+ none -> "";
+ _ -> "Queue: ~s~n"
+ end ++
+ case Record#log_record.routed_queues of
+ none -> "";
+ _ -> "Routed queues: ~p~n"
+ end ++
+ "Properties: ~p~nPayload: ~n~s~n",
+ Args =
+ [Record#log_record.timestamp,
+ Record#log_record.type,
+ Record#log_record.node, Record#log_record.connection,
+ Record#log_record.vhost, Record#log_record.username,
+ Record#log_record.channel, Record#log_record.exchange,
+ Record#log_record.routing_keys] ++
+ case Record#log_record.queue of
+ none -> [];
+ Q -> [Q]
+ end ++
+ case Record#log_record.routed_queues of
+ none -> [];
+ RQs -> [RQs]
+ end ++
+ [Record#log_record.properties, Record#log_record.payload],
+ print_log(io_lib:format(Fmt, Args), State);
+
+log(json, Record, State) ->
+ print_log([rabbit_json:encode(
+ #{timestamp => Record#log_record.timestamp,
+ type => Record#log_record.type,
+ node => Record#log_record.node,
+ connection => Record#log_record.connection,
+ vhost => Record#log_record.vhost,
+ user => Record#log_record.username,
+ channel => Record#log_record.channel,
+ exchange => Record#log_record.exchange,
+ queue => Record#log_record.queue,
+ routed_queues => Record#log_record.routed_queues,
+ routing_keys => Record#log_record.routing_keys,
+ properties => rabbit_mgmt_format:amqp_table(
+ Record#log_record.properties),
+ payload => base64:encode(Record#log_record.payload)}),
+ "\n"],
+ State).
+
+print_log(LogMsg, State = #state{buf = Buf, buf_cnt = BufCnt}) ->
+ maybe_flush(State#state{buf = [LogMsg | Buf], buf_cnt = BufCnt + 1}).
+
+maybe_flush(State = #state{buf_cnt = ?MAX_BUF}) ->
+ flush(State);
+maybe_flush(State) ->
+ State.
+
+flush(State = #state{file = F, buf = Buf}) ->
+ prim_file:write(F, lists:reverse(Buf)),
+ State#state{buf = [], buf_cnt = 0}.
+
+truncate(Payload, #state{max_payload = Max}) ->
+ case Max =:= unlimited orelse size(Payload) =< Max of
+ true -> Payload;
+ false -> <<Trunc:Max/binary, _/binary>> = Payload,
+ Trunc
+ end.
+
+filter_optional_user_pass(Args) ->
+ case lists:member({tracer_connection_username,<<>>}, Args) of
+ true ->
+ [{K, V} || {K, V} <- Args,
+ not lists:member(K, [tracer_connection_username,
+ tracer_connection_password,
+ <<"tracer_connection_username">>,
+ <<"tracer_connection_password">>])];
+ _ ->
+ Args
+ end.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl
new file mode 100644
index 0000000000..b651a75a99
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_consumer_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/1]).
+-export([init/1]).
+
+start_link(Args) -> supervisor2:start_link(?MODULE, Args).
+
+%%----------------------------------------------------------------------------
+
+init(Args) ->
+ {ok, {{one_for_one, 3, 10},
+ [{consumer, {rabbit_tracing_consumer, start_link, [Args]},
+ transient, ?WORKER_WAIT, worker,
+ [rabbit_tracing_consumer]}]}}.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl
new file mode 100644
index 0000000000..dc3a70b7cc
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_files.erl
@@ -0,0 +1,48 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_files).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([list/0, exists/1, delete/1, full_path/1]).
+
+%%--------------------------------------------------------------------
+
+list() ->
+ {ok, Dir} = application:get_env(rabbitmq_tracing, directory),
+ ok = filelib:ensure_dir(Dir ++ "/a"),
+ {ok, Names} = file:list_dir(Dir),
+ [file_info(Name) || Name <- Names].
+
+exists(Name) ->
+ filelib:is_regular(full_path(Name)).
+
+delete(Name) ->
+ ok = file:delete(full_path(Name)).
+
+full_path(Name0) when is_binary(Name0) ->
+ full_path(binary_to_list(Name0));
+full_path(Name0) ->
+ {ok, Dir} = application:get_env(rabbitmq_tracing, directory),
+ case rabbit_http_util:safe_relative_path(Name0) of
+ undefined -> exit(how_rude);
+ Name -> Dir ++ "/" ++ Name
+ end.
+
+%%--------------------------------------------------------------------
+
+file_info(Name) ->
+ Size = case file:read_file_info(full_path(Name)) of
+ {ok, Info} ->
+ Info#file_info.size;
+ {error, Error} ->
+ rabbit_log:warning("error getting file info for ~s: ~p",
+ [Name, Error]),
+ 0
+ end,
+ [{name, list_to_binary(Name)}, {size, Size}].
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl
new file mode 100644
index 0000000000..c408150939
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_mgmt.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_mgmt).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+
+dispatcher() -> [{"/traces", rabbit_tracing_wm_traces, []},
+ {"/traces/node/:node", rabbit_tracing_wm_traces, []},
+ {"/traces/:vhost", rabbit_tracing_wm_traces, []},
+ {"/traces/node/:node/:vhost", rabbit_tracing_wm_traces, []},
+ {"/traces/:vhost/:name", rabbit_tracing_wm_trace, []},
+ {"/traces/node/:node/:vhost/:name", rabbit_tracing_wm_trace, []},
+ {"/trace-files", rabbit_tracing_wm_files, []},
+ {"/trace-files/node/:node", rabbit_tracing_wm_files, []},
+ {"/trace-files/:name", rabbit_tracing_wm_file, []},
+ {"/trace-files/node/:node/:name", rabbit_tracing_wm_file, []}].
+
+web_ui() -> [{javascript, <<"tracing.js">>}].
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl
new file mode 100644
index 0000000000..d8fdd94d81
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl
@@ -0,0 +1,50 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-define(SUPERVISOR, ?MODULE).
+
+-export([start_link/0, start_child/2, stop_child/1]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ supervisor:start_link({local, ?SUPERVISOR}, ?MODULE, []).
+
+start_child(Id, Args) ->
+ supervisor:start_child(
+ ?SUPERVISOR,
+ {Id, {rabbit_tracing_consumer_sup, start_link, [Args]},
+ temporary, ?SUPERVISOR_WAIT, supervisor,
+ [rabbit_tracing_consumer_sup]}).
+
+stop_child(Id) ->
+ supervisor:terminate_child(?SUPERVISOR, Id),
+ supervisor:delete_child(?SUPERVISOR, Id),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+init([]) -> {ok, {{one_for_one, 3, 10},
+ [{traces, {rabbit_tracing_traces, start_link, []},
+ transient, ?WORKER_WAIT, worker,
+ [rabbit_tracing_traces]}]}}.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl
new file mode 100644
index 0000000000..fc2d2b1520
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_traces.erl
@@ -0,0 +1,116 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_traces).
+
+-behaviour(gen_server).
+
+-import(rabbit_misc, [pget/2]).
+
+-export([list/0, lookup/2, create/3, stop/2, announce/3]).
+
+-export([start_link/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, { table }).
+
+%%--------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+list() ->
+ gen_server:call(?MODULE, list, infinity).
+
+lookup(VHost, Name) ->
+ gen_server:call(?MODULE, {lookup, VHost, Name}, infinity).
+
+create(VHost, Name, Trace) ->
+ gen_server:call(?MODULE, {create, VHost, Name, Trace}, infinity).
+
+stop(VHost, Name) ->
+ gen_server:call(?MODULE, {stop, VHost, Name}, infinity).
+
+announce(VHost, Name, Pid) ->
+ gen_server:cast(?MODULE, {announce, {VHost, Name}, Pid}).
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{table = ets:new(anon, [private])}}.
+
+handle_call(list, _From, State = #state{table = Table}) ->
+ {reply, [augment(Trace) || {_K, Trace} <- ets:tab2list(Table)], State};
+
+handle_call({lookup, VHost, Name}, _From, State = #state{table = Table}) ->
+ {reply, case ets:lookup(Table, {VHost, Name}) of
+ [] -> not_found;
+ [{_K, Trace}] -> augment(Trace)
+ end, State};
+
+handle_call({create, VHost, Name, Trace0}, _From,
+ State = #state{table = Table}) ->
+ Already = vhost_tracing(VHost, Table),
+ Trace = pset(vhost, VHost, pset(name, Name, Trace0)),
+ true = ets:insert(Table, {{VHost, Name}, Trace}),
+ case Already of
+ true -> ok;
+ false -> rabbit_trace:start(VHost)
+ end,
+ {reply, rabbit_tracing_sup:start_child({VHost, Name}, Trace), State};
+
+handle_call({stop, VHost, Name}, _From, State = #state{table = Table}) ->
+ true = ets:delete(Table, {VHost, Name}),
+ case vhost_tracing(VHost, Table) of
+ true -> ok;
+ false -> rabbit_trace:stop(VHost)
+ end,
+ rabbit_tracing_sup:stop_child({VHost, Name}),
+ {reply, ok, State};
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast({announce, Key, Pid}, State = #state{table = Table}) ->
+ case ets:lookup(Table, Key) of
+ [] -> ok;
+ [{_, Trace}] -> ets:insert(Table, {Key, pset(pid, Pid, Trace)})
+ end,
+ {noreply, State};
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(_, _) -> ok.
+
+code_change(_, State, _) -> {ok, State}.
+
+%%--------------------------------------------------------------------
+
+pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
+
+vhost_tracing(VHost, Table) ->
+ case [true || {{V, _}, _} <- ets:tab2list(Table), V =:= VHost] of
+ [] -> false;
+ _ -> true
+ end.
+
+augment(Trace) ->
+ Pid = pget(pid, Trace),
+ Trace1 = lists:keydelete(tracer_connection_password, 1,
+ lists:keydelete(<<"tracer_connection_password">>, 1,
+ lists:keydelete(pid, 1, Trace))),
+ case Pid of
+ undefined -> Trace1;
+ _ -> rabbit_tracing_consumer:info_all(Pid) ++ Trace1
+ end.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl
new file mode 100644
index 0000000000..93cb1dcb20
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_util.erl
@@ -0,0 +1,32 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_util).
+
+-export([coerce_env_value/2]).
+-export([apply_on_node/5]).
+
+coerce_env_value(username, Val) -> rabbit_data_coercion:to_binary(Val);
+coerce_env_value(password, Val) -> rabbit_data_coercion:to_binary(Val);
+coerce_env_value(_, Val) -> Val.
+
+apply_on_node(ReqData, Context, Mod, Fun, Args) ->
+ case rabbit_mgmt_util:id(node, ReqData) of
+ none ->
+ apply(Mod, Fun, Args);
+ Node0 ->
+ Node = binary_to_atom(Node0, utf8),
+ case rpc:call(Node, Mod, Fun, Args) of
+ {badrpc, _} = Error ->
+ Msg = io_lib:format("Node ~p could not be contacted: ~p",
+ [Node, Error]),
+ rabbit_log:warning(Msg, []),
+ rabbit_mgmt_util:bad_request(list_to_binary(Msg), ReqData, Context);
+ Any ->
+ Any
+ end
+ end.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl
new file mode 100644
index 0000000000..17c225cba6
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_file.erl
@@ -0,0 +1,52 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_tracing_wm_file).
+
+-export([init/2, resource_exists/2, serve/2, content_types_provided/2,
+ is_authorized/2, allowed_methods/2, delete_resource/2]).
+-export([serve/1]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"text/plain">>, serve}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"DELETE">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ Exists = rabbit_tracing_util:apply_on_node(ReqData, Context, rabbit_tracing_files,
+ exists, [Name]),
+ {Exists, ReqData, Context}.
+
+serve(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ Content = rabbit_tracing_util:apply_on_node(ReqData, Context,
+ rabbit_tracing_wm_file,
+ serve, [Name]),
+ {Content, ReqData, Context}.
+
+serve(Name) ->
+ Path = rabbit_tracing_files:full_path(Name),
+ {ok, Content} = file:read_file(Path),
+ Content.
+
+delete_resource(ReqData, Context) ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ ok = rabbit_tracing_util:apply_on_node(ReqData, Context, rabbit_tracing_files,
+ delete, [Name]),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl
new file mode 100644
index 0000000000..74dc527524
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_files.erl
@@ -0,0 +1,30 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_wm_files).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ List = rabbit_tracing_util:apply_on_node(ReqData, Context,
+ rabbit_tracing_files, list, []),
+ rabbit_mgmt_util:reply(List, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl
new file mode 100644
index 0000000000..92474be77e
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_trace.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(rabbit_tracing_wm_trace).
+
+-export([init/2, resource_exists/2, to_json/2,
+ content_types_provided/2, content_types_accepted/2,
+ is_authorized/2, allowed_methods/2, accept_content/2,
+ delete_resource/2]).
+
+-define(ERR, <<"Something went wrong trying to start the trace - check the "
+ "logs.">>).
+
+-import(rabbit_misc, [pget/2, pget/3]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+content_types_accepted(ReqData, Context) ->
+ {[{<<"application/json">>, accept_content}], ReqData, Context}.
+
+allowed_methods(ReqData, Context) ->
+ {[<<"HEAD">>, <<"GET">>, <<"PUT">>, <<"DELETE">>], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+ {case trace(ReqData, Context) of
+ not_found -> false;
+ _ -> true
+ end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ rabbit_mgmt_util:reply(trace(ReqData, Context), ReqData, Context).
+
+accept_content(ReqData0, Ctx) ->
+ case rabbit_mgmt_util:vhost(ReqData0) of
+ not_found ->
+ not_found;
+ VHost ->
+ Name = rabbit_mgmt_util:id(name, ReqData0),
+ rabbit_mgmt_util:with_decode(
+ [format, pattern], ReqData0, Ctx,
+ fun([_, _], Trace, ReqData) ->
+ Fs = [fun val_payload_bytes/5, fun val_format/5,
+ fun val_create/5],
+ case lists:foldl(fun (F, ok) -> F(ReqData, Ctx, VHost,
+ Name, Trace);
+ (_F, Err) -> Err
+ end, ok, Fs) of
+ ok -> {true, ReqData, Ctx};
+ Err -> rabbit_mgmt_util:bad_request(Err,
+ ReqData,
+ Ctx)
+ end
+ end)
+ end.
+
+delete_resource(ReqData, Context) ->
+ VHost = rabbit_mgmt_util:vhost(ReqData),
+ rabbit_tracing_util:apply_on_node(ReqData, Context, rabbit_tracing_traces, stop,
+ [VHost, rabbit_mgmt_util:id(name, ReqData)]),
+ {true, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+trace(ReqData, Context) ->
+ case rabbit_mgmt_util:vhost(ReqData) of
+ not_found -> not_found;
+ VHost ->
+ Name = rabbit_mgmt_util:id(name, ReqData),
+ rabbit_tracing_util:apply_on_node(ReqData, Context, rabbit_tracing_traces,
+ lookup, [VHost, Name])
+ end.
+
+val_payload_bytes(_ReqData, _Context, _VHost, _Name, Trace) ->
+ case is_integer(maps:get(max_payload_bytes, Trace, 0)) of
+ false -> <<"max_payload_bytes not integer">>;
+ true -> ok
+ end.
+
+val_format(_ReqData, _Context, _VHost, _Name, Trace) ->
+ case lists:member(maps:get(format, Trace), [<<"json">>, <<"text">>]) of
+ false -> <<"format not json or text">>;
+ true -> ok
+ end.
+
+val_create(ReqData, Context, VHost, Name, Trace) ->
+ case rabbit_tracing_util:apply_on_node(
+ ReqData, Context, rabbit_tracing_traces, create,
+ [VHost, Name, maps:to_list(Trace)]) of
+ {ok, _} -> ok;
+ _ -> ?ERR
+ end.
diff --git a/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl
new file mode 100644
index 0000000000..f41dc76336
--- /dev/null
+++ b/deps/rabbitmq_tracing/src/rabbit_tracing_wm_traces.erl
@@ -0,0 +1,39 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_wm_traces).
+
+-export([init/2, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl").
+
+%%--------------------------------------------------------------------
+init(Req, _State) ->
+ {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}.
+
+
+content_types_provided(ReqData, Context) ->
+ {[{<<"application/json">>, to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+ List = rabbit_tracing_util:apply_on_node(ReqData, Context,
+ rabbit_tracing_traces, list, []),
+ rabbit_mgmt_util:reply(List, ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+ rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
diff --git a/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl b/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl
new file mode 100644
index 0000000000..9c34ca6605
--- /dev/null
+++ b/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl
@@ -0,0 +1,235 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_tracing_SUITE).
+
+-compile(export_all).
+
+-define(LOG_DIR, "/var/tmp/rabbitmq-tracing/").
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ tracing_test,
+ tracing_validation_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ inets:start(),
+ rabbit_ct_helpers:log_environment(),
+ %% initializes httpc
+ inets:start(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+
+tracing_test(Config) ->
+ case filelib:is_dir(?LOG_DIR) of
+ true -> {ok, Files} = file:list_dir(?LOG_DIR),
+ [ok = file:delete(?LOG_DIR ++ F) || F <- Files];
+ _ -> ok
+ end,
+
+ [] = http_get(Config, "/traces/%2f/"),
+ [] = http_get(Config, "/trace-files/"),
+
+ Args = #{format => <<"json">>,
+ pattern => <<"#">>},
+ http_put(Config, "/traces/%2f/test", Args, ?CREATED),
+ assert_list([#{name => <<"test">>,
+ format => <<"json">>,
+ pattern => <<"#">>}], http_get(Config, "/traces/%2f/")),
+ assert_item(#{name => <<"test">>,
+ format => <<"json">>,
+ pattern => <<"#">>}, http_get(Config, "/traces/%2f/test")),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config),
+ amqp_channel:cast(Ch, #'basic.publish'{ exchange = <<"amq.topic">>,
+ routing_key = <<"key">> },
+ #amqp_msg{props = #'P_basic'{},
+ payload = <<"Hello world">>}),
+
+ rabbit_ct_client_helpers:close_channel(Ch),
+
+ timer:sleep(100),
+
+ http_delete(Config, "/traces/%2f/test", ?NO_CONTENT),
+ [] = http_get(Config, "/traces/%2f/"),
+ assert_list([#{name => <<"test.log">>}], http_get(Config, "/trace-files/")),
+ %% This is a bit cheeky as the log is actually one JSON doc per
+ %% line and only check the first line
+ RawLog = http_get_raw(Config, "/trace-files/test.log"),
+ FirstLogLine = hd(string:tokens(RawLog, "\n")),
+ FirstLogItem = decode(?OK, [], FirstLogLine),
+ assert_item(#{type => <<"published">>,
+ exchange => <<"amq.topic">>,
+ routing_keys => [<<"key">>],
+ payload => base64:encode(<<"Hello world">>)},
+ FirstLogItem),
+ http_delete(Config, "/trace-files/test.log", ?NO_CONTENT),
+
+ passed.
+
+tracing_validation_test(Config) ->
+ Path = "/traces/%2f/test",
+ http_put(Config, Path, #{pattern => <<"#">>}, ?BAD_REQUEST),
+ http_put(Config, Path, #{format => <<"json">>}, ?BAD_REQUEST),
+ http_put(Config, Path, #{format => <<"ebcdic">>,
+ pattern => <<"#">>}, ?BAD_REQUEST),
+ http_put(Config, Path, #{format => <<"text">>,
+ pattern => <<"#">>,
+ max_payload_bytes => <<"abc">>}, ?BAD_REQUEST),
+ http_put(Config, Path, #{format => <<"json">>,
+ pattern => <<"#">>,
+ max_payload_bytes => 1000}, ?CREATED),
+ http_delete(Config, Path, ?NO_CONTENT),
+ ok.
+
+%%---------------------------------------------------------------------------
+%% TODO: Below is copied from rabbit_mgmt_test_http,
+%% should be moved to use rabbit_mgmt_test_util once rabbitmq_management
+%% is moved to Common Test
+
+http_get(Config, Path) ->
+ http_get(Config, Path, ?OK).
+
+http_get(Config, Path, CodeExp) ->
+ http_get(Config, Path, "guest", "guest", CodeExp).
+
+http_get(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_get_raw(Config, Path) ->
+ http_get_raw(Config, Path, "guest", "guest", ?OK).
+
+http_get_raw(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, _Headers, ResBody}} =
+ req(Config, get, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+ ResBody.
+
+http_put(Config, Path, List, CodeExp) ->
+ http_put_raw(Config, Path, format_for_upload(List), CodeExp).
+
+format_for_upload(Map0) ->
+ Map = maps:fold(fun(K, V, Acc) ->
+ Acc#{atom_to_binary(K, latin1) => V}
+ end, #{}, Map0),
+ iolist_to_binary(rabbit_json:encode(Map)).
+
+http_put_raw(Config, Path, Body, CodeExp) ->
+ http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp).
+
+http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, Type, Path, [auth_header(User, Pass)], Body),
+ assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+http_delete(Config, Path, CodeExp) ->
+ http_delete(Config, Path, "guest", "guest", CodeExp).
+
+http_delete(Config, Path, User, Pass, CodeExp) ->
+ {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+ req(Config, delete, Path, [auth_header(User, Pass)]),
+ assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+ decode(CodeExp, Headers, ResBody).
+
+assert_code(CodeExp, CodeAct, Type, Path, Body) ->
+ case CodeExp of
+ CodeAct -> ok;
+ _ -> error({expected, CodeExp, got, CodeAct, type, Type,
+ path, Path, body, Body})
+ end.
+
+mgmt_port(Config) ->
+ config_port(Config, tcp_port_mgmt).
+
+config_port(Config, PortKey) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, PortKey).
+
+uri_base_from(Config) ->
+ binary_to_list(
+ rabbit_mgmt_format:print(
+ "http://localhost:~w/api",
+ [mgmt_port(Config)])).
+
+req(Config, Type, Path, Headers) ->
+ httpc:request(Type, {uri_base_from(Config) ++ Path, Headers}, ?HTTPC_OPTS, []).
+
+req(Config, Type, Path, Headers, Body) ->
+ httpc:request(Type, {uri_base_from(Config) ++ Path, Headers, "application/json", Body},
+ ?HTTPC_OPTS, []).
+
+decode(?OK, _Headers, ResBody) -> cleanup(rabbit_json:decode(rabbit_data_coercion:to_binary(ResBody)));
+decode(_, Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+ [cleanup(I) || I <- L];
+cleanup(M) when is_map(M) ->
+ maps:fold(fun(K, V, Acc) ->
+ Acc#{binary_to_atom(K, latin1) => cleanup(V)}
+ end, #{}, M);
+cleanup(I) ->
+ I.
+
+auth_header(Username, Password) ->
+ {"Authorization",
+ "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+%%---------------------------------------------------------------------------
+
+assert_list(Exp, Act) ->
+ _ = [assert_item(ExpI, ActI) || {ExpI, ActI} <- lists:zip(Exp, Act)],
+ ok.
+
+assert_item(ExpI, ActI) ->
+ ExpI = maps:with(maps:keys(ExpI), ActI),
+ ok.
diff --git a/deps/rabbitmq_trust_store/.gitignore b/deps/rabbitmq_trust_store/.gitignore
new file mode 100644
index 0000000000..0a8b81e945
--- /dev/null
+++ b/deps/rabbitmq_trust_store/.gitignore
@@ -0,0 +1,21 @@
+.sw?
+*.orig
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/debug/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_trust_store.d
+
+test/config_schema_SUITE_data/schema/
diff --git a/deps/rabbitmq_trust_store/.travis.yml b/deps/rabbitmq_trust_store/.travis.yml
new file mode 100644
index 0000000000..b2645e1981
--- /dev/null
+++ b/deps/rabbitmq_trust_store/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: bl0nZFgpmbTpi6xP+e8upEyDXvvczug1lBO3Nvg+PHGnPernQ290Z6THZFQRtR8a9ZQKOAOFHukl21a4bKaZBPceBK2hajbd7gwaB13EfQNNbR0qF9bVQVHvS1ZevRJYnxvRORkZKTdaxB23/ccVS1KA8lEFu7dRzvC6lZgN0DmjcOtYyq7JhOUDNFSie0gTf+kdxGTiZq04V63S1bL1V7fL3/HPIv61r2+VE5nRS5R9J5fK5vOZVsXeZTFZA70oKrnJIW7t3qs4m4nexTqGloU8M0Oz/EUdNbvRb6uxcClb8wAEm5r4HlPcs9bILUO746FiePCYFnKWyU6KHxPNZx1U7rMzRdS7wln9CqutM+uu+CE9Fh74XzAfok766GW/vfH6Yf6izhp1suXiqRl5kRweKMNouUt5vImaxBmfSEgK+UBFWMXCwv/1Nd1jFoQTU/2WC0qZFJfiJYLCRitVT3ak70gA57EmGoaf7WsYGkJgDOuIUb5XzTu4N5/E/kflx0Jm0gy47I7a9t8tURhTsle2eH57ZdAEss8mDxfvyzeb5z7+OnAfo42RirdL5AuIAFiDeGTpzsyxOlsbWOtBUBjCuhI0I324fYWJ8p6CEsrUdnCge6Na5SzM2YE7Ujg/9HcNu+x20m76rfWLUrqEqhMZBJzphpeszueYH7N+4BI=
+ - secure: osb2C+YAXlVO5ltbH/o4y28/4xT593kOSy7QrHfBeUSCZPPmVVh0cRtpMkekeuxMcJFxyJrInBIgYcIcpwPk6gipOu55Bj/beIwuhlTkVR89KhG8VYGPSF9umkQNRldtAgRfWYlpiMIBYU8hNX5NflANBbFmshXY/Bi8RMUJ0aSfWR64WVRoOsdsLwQyhJF9ny1M7WmwszQZ4fyN12xx8fb/36cv6nO8Bm/9/hT5xjJ1+xRb7QueuIoDYhsclPgHrxByozTeb094sfejpIfnzeiyyxBPGlOTg6MtkVD/pDANqS8XFppLqQMLQvCMTvfrVfQZoORWk2LZUuCBz480eFM7wJlsfpr/HoHCnWRhzsZA4CbQcWbYMQ2pXAmG+gOQvO+VKobnwr+tEmIGKe9vob4k1qvdnQhEwtOZVPblfmHrzlHxYERntYsDnF75kME4DGKD/39qd2DuvKuODg6SfZweHkUX9smpk76g1E16Q7hEoyQGhA+z7OycC3oH4LSaSKxIukLUdBemt1W5jgFVSLiVqxCt1xnKMjbPki//5xMJocFPT5iXpW9EOaZugpRAWGlJxRQtIDIKvMMHiZ9sodGY9xDse6Zdh6UbpKXE3+OZVTG+uSFhP3itY22kzoznRbxvwiF4RWo7XaP8S5+4iOupxVw5bcxcttFrEy2MCRY=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md b/deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_trust_store/CONTRIBUTING.md b/deps/rabbitmq_trust_store/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_trust_store/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_trust_store/LICENSE b/deps/rabbitmq_trust_store/LICENSE
new file mode 100644
index 0000000000..dae131ee2b
--- /dev/null
+++ b/deps/rabbitmq_trust_store/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ Trust Store plugin, is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_trust_store/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_trust_store/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_trust_store/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_trust_store/Makefile b/deps/rabbitmq_trust_store/Makefile
new file mode 100644
index 0000000000..d9629187de
--- /dev/null
+++ b/deps/rabbitmq_trust_store/Makefile
@@ -0,0 +1,29 @@
+PROJECT = rabbitmq_trust_store
+PROJECT_DESCRIPTION = Client X.509 certificates trust store
+PROJECT_MOD = rabbit_trust_store_app
+
+define PROJECT_ENV
+[
+ {default_refresh_interval, 30},
+ {providers, [rabbit_trust_store_file_provider]}
+ ]
+endef
+
+DEPS = rabbit_common rabbit
+LOCAL_DEPS += ssl crypto public_key
+## We need the Cowboy's test utilities
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client ct_helper trust_store_http
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+dep_trust_store_http = git https://github.com/rabbitmq/trust_store_http.git
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_trust_store/README.md b/deps/rabbitmq_trust_store/README.md
new file mode 100644
index 0000000000..3a1d608bf1
--- /dev/null
+++ b/deps/rabbitmq_trust_store/README.md
@@ -0,0 +1,210 @@
+# RabbitMQ Certificate Trust Store
+
+This plugin provides an alternative [TLS (x.509) certificate verification](https://www.rabbitmq.com/ssl.html#peer-verification)
+strategy. Instead of the traditional PKI chain traversal mechanism,
+this strategy simply checks the leaf certificate presented by a client
+against an approved ("whitelisted") set of certificates.
+
+
+## Rationale
+
+When RabbitMQ is configured to use TLS for client connections, by default it will
+use the standard [PKI certificate chain traversal](https://www.rabbitmq.com/ssl.html#peer-verification) process.
+What certificates are trusted is ultimately controlled by adjusting the set of trusted CA certificates.
+
+This configuration is standard for data services and it works well for many use cases. However,
+this configuration is largely static: a change in trusted CA certificates requires a cluster
+reconfiguration (redeployment).
+There is no convenient means with which to change the set in realtime, and in particular
+without affecting existing client connections.
+
+This plugin offers an alternative. It maintains a set (list) of trusted .PEM formatted TLS (x509) certificates,
+refreshed at configurable intervals. Said certificates are then used
+to verify inbound TLS-enabled client connections across the entire RabbitMQ node (affects all plugins and protocols).
+The set is node-local.
+
+Certificates can be loaded into the trusted list from different sources. Sources are loaded using "providers".
+Two providers ship with the plugin: the local filesystem and an HTTPS endpoint.
+
+New providers can be added by implementing the `rabbit_trust_store_certificate_provider` behaviour.
+
+The default provider is `rabbit_trust_store_file_provider`, which will load certificates
+from a configured local filesystem directory.
+
+
+## Installation
+
+This plugin ships with modern RabbitMQ versions. Like all [plugins](https://www.rabbitmq.com/plugins.html),
+it has to be enabled before it can be used:
+
+``` sh
+rabbitmq-plugins enable rabbitmq_trust_store
+```
+
+## Usage
+
+### Filesystem provider
+
+Configure the trust store with a directory of whitelisted certificates
+and a refresh interval:
+
+``` ini
+## trusted certificate directory path
+trust_store.directory = $HOME/rabbit/whitelist
+trust_store.refresh_interval = 30
+```
+
+Setting `refresh_interval` to `0` seconds will disable automatic refresh.
+
+Certificates are identified and distinguished by their **filenames**, file modification time and
+a hash value of file contents.
+
+#### Installing a Certificate
+
+Write a `PEM` formatted certificate file to the configured directory
+to whitelist it. This contains all the necessary information to
+authorize a client which presents the very same certificate to the
+server.
+
+#### Removing a Certificate
+
+Delete the certificate file from the configured directory to remove it
+from the whitelist.
+
+> Note: TLS session caching bypasses the trust store certificate validation and can
+make it seem as if a removed certificate is still active. Disabling session caching
+in the broker by setting the `reuse_sessions` ssl option to `false` can be done if
+timely certificate removal is important.
+
+### HTTP provider
+
+HTTP provider loads certificates via HTTP(S) from remote server.
+
+The server should have following API:
+
+- `GET <root>` - list certificates in JSON format: `{"certificates": [{"id": <id>, "path": <url>}, ...]}`
+- `GET <root>/<path>` - download PEM encoded certificate.
+
+Where `<root>` is a configured certificate path, `<id>` - unique certificate identifier,
+`<path>` - relative certificate path to load it from server.
+
+Configuration of the HTTP provider:
+
+
+```
+trust_store.providers.1 = http
+trust_store.url = https://example.cert.url/path
+trust_store.refresh_interval = 30
+```
+
+The example above uses an alias, `http` for `rabbit_trust_store_http_provider`.
+Available aliases are:
+
+- `file` - `rabbit_trust_store_file_provider`
+- `http` - `rabbit_trust_store_http_provider`
+
+In the erlang terms format:
+
+```
+{rabbitmq_trust_store,
+ [{providers, [rabbit_trust_store_http_provider]},
+ {url, "https://example.cert.url/path"},
+ {refresh_interval, {seconds, 30}}
+ ]}.
+```
+
+You can specify TLS options if you use HTTPS:
+
+```
+trust_store.providers.1 = http
+trust_store.url = https://example.secure.cert.url/path
+trust_store.refresh_interval = 30
+trust_store.ssl_options.certfile = /client/cert.pem
+trust_store.ssl_options.keyfile = /client/key.pem
+trust_store.ssl_options.cacertfile = /ca/cert.pem
+```
+
+In the erlang terms format:
+
+```
+{rabbitmq_trust_store,
+ [{providers, [rabbit_trust_store_http_provider]},
+ {url, "https://example.secure.cert.url/path"},
+ {refresh_interval, {seconds, 30}},
+ {ssl_options, [{certfile, "/client/cert.pem"},
+ {keyfile, "/client/key.pem"},
+ {cacertfile, "/ca/cert.pem"}
+ ]}
+ ]}.
+```
+
+HTTP provider uses `If-Modified-Since` during list request header to avoid updating
+unchanged list of certificates.
+
+You can additionally specify headers (e.g. authorization) using Erlang term format:
+
+```
+{rabbitmq_trust_store,
+ [{providers, [rabbit_trust_store_http_provider]},
+ {url, "https://example.cert.url/path"},
+ {headers, [{"Authorization", "Bearer token"}]},
+ {refresh_interval, {seconds, 30}}
+ ]}.
+```
+
+#### Example
+
+`examples/rabbitmq_trust_store_django` is an example Django application, which serves
+certificates from a directory.
+
+
+### Listing certificates
+
+To list the currently loaded certificates use the `rabbitmqctl` utility as follows:
+
+```
+ rabbitmqctl eval 'io:format(rabbit_trust_store:list()).'
+```
+
+This will output a formatted list of certificates similar to:
+
+```
+ Name: cert.pem
+ Serial: 1 | 0x1
+ Subject: O=client,CN=snowman.local
+ Issuer: L=87613,CN=MyTestRootCA
+ Validity: "2016-05-24T15:28:25Z - 2026-05-22T15:28:25Z"
+```
+
+Note that this command reads each certificate from disk in order to extract
+all the relevant information. If there are a large number of certificates in the
+trust store use this command sparingly.
+
+
+## How it Works
+
+When the trust-store starts it configures TLS listening sockets,
+whitelists the certificates in the given directory, then accepting
+sockets can query the trust-store with their client's certificate. It
+refreshes the whitelist to correspond with changes in the directory's
+contents, installing and removing certificate details, after a refresh
+interval or a manual refresh (by invoking a `rabbitmqctl eval
+'rabbit_trust_store:refresh().'` from the commandline).
+
+
+## Building from Source
+
+See [Plugin Development guide](https://www.rabbitmq.com/plugin-development.html).
+
+TL;DR: running
+
+ make dist
+
+will build the plugin and put build artifacts under the `./plugins` directory.
+
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the MPL, the same license as RabbitMQ.
diff --git a/deps/rabbitmq_trust_store/erlang.mk b/deps/rabbitmq_trust_store/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_trust_store/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_trust_store/examples/rabbit_trust_store_opera_com_provider.erl b/deps/rabbitmq_trust_store/examples/rabbit_trust_store_opera_com_provider.erl
new file mode 100644
index 0000000000..9a11479a2f
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbit_trust_store_opera_com_provider.erl
@@ -0,0 +1,69 @@
+-module(rabbit_trust_store_opera_com_provider).
+
+-behaviour(rabbit_trust_store_certificate_provider).
+
+-export([list_certs/1, list_certs/2, load_cert/3]).
+
+%% This is an example implementation for
+%% rabbit_trust_store_certificate_provider behaviour.
+%% The module uses https://certs.opera.com/02/roots/ as a source of
+%% CA certificates
+%% The module can be used as an example when
+%% implementing certificate provider for trust store plugin.
+
+
+%% This function loads a list of certificates
+list_certs(_Config) ->
+ inets:start(),
+ case httpc:request(get, {"https://certs.opera.com/02/roots/", []},
+ [], [{body_format, binary}]) of
+ {ok, {{_,Code,_}, _Headers, Body}} when Code div 100 == 2 ->
+ %% First link in directory listing is a parent dir link.
+ {match, [_ParentDirLink | CertMatches]} =
+ re:run(Body, "<td><a href=\"([^\"]*)\">",
+ [global, {capture, all_but_first, binary}]),
+
+ CertNames = lists:append(CertMatches),
+ %% certs.opera.com uses thumbprints for certificate file names
+ %% so they should be unique (there is no need to add change time)
+ {ok,
+ [{CertName,
+ [{name, CertName},
+ %% Url can be formed from CertName, so there is no
+ %% need for this attribute.
+ %% But we use it as an example for providers where CertName and
+ %% url are different.
+ {url, <<"https://certs.opera.com/02/roots/", CertName/binary>>}]}
+ || CertName <- CertNames],
+ nostate};
+ Other -> {error, {http_error, Other}}
+ end.
+
+%% Since we have no state for the provider,
+%% we call the stateless version of this functions
+list_certs(Config, _) -> list_certs(Config).
+
+%% This function loads a certificate data using certifocate ID and attributes.
+%% We use the url parameter in attributes.
+%% Some providers can ignore attributes and use CertId instead
+load_cert(_CertId, Attributes, _Config) ->
+ Url = proplists:get_value(url, Attributes),
+ case httpc:request(get, {rabbit_data_coercion:to_list(Url), []},
+ [], [{body_format, binary}]) of
+ {ok, {{_,Code,_}, _Headers, Body}} when Code div 100 == 2 ->
+ %% We assume that there is only one certificate per file.
+ BodySingleLine = binary:replace(Body, <<"\n">>, <<>>, [global]),
+ {match, [CertEncoded|_]} =
+ re:run(BodySingleLine,
+ "<certificate-data>(.*)</certificate-data>",
+ [{capture, all_but_first, binary}, ungreedy]),
+ [{'Certificate', Cert, not_encrypted}] =
+ public_key:pem_decode(<<"-----BEGIN CERTIFICATE-----\n",
+ CertEncoded/binary,
+ "\n-----END CERTIFICATE-----\n">>),
+ {ok, Cert};
+ Other -> {error, {http_error, Other}}
+ end.
+
+
+
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/.gitignore b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/.gitignore
new file mode 100644
index 0000000000..6dbe16d613
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/.gitignore
@@ -0,0 +1,4 @@
+*.pyc
+*.sqlite3
+certs/
+venv/
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/README.md b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/README.md
new file mode 100644
index 0000000000..0bd536869a
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/README.md
@@ -0,0 +1,49 @@
+# RabbitMQ trust store HTTP server example
+
+`rabbitmq_trust_store_django` is a very minimalistic [Django](https://www.djangoproject.com/) 1.10+ application
+that rabbitmq-trust-store's `rabbit_trust_store_http_provider` can use as a source of certificates.
+The project serves PEM encoded CA certificate files from the `certs` directory.
+It's really not designed to be anything other than an example.
+
+
+## Running the Example
+
+1. Put certificates that should be trusted in PEM format into the `certs` directory.
+
+2. Run `python manage.py runserver` to launch it after [installing Django](https://docs.djangoproject.com/en/1.10/topics/install/).
+
+3. Configure RabbitMQ trust store to use `rabbit_trust_store_http_provider`.
+
+Below is a very minimalistic example that assumes the example is available
+on the local machine via HTTP:
+
+```
+{rabbitmq_trust_store,
+ [{providers, [rabbit_trust_store_http_provider]},
+ {url, "http://127.0.0.1:8000/"},
+ {refresh_interval, {seconds, 30}}
+ ]}.
+```
+
+You can specify TLS options if you use HTTPS:
+
+```
+{rabbitmq_trust_store,
+ [{providers, [rabbit_trust_store_http_provider]},
+ {url, "https://secure.store.example.local:8000/"},
+ {refresh_interval, {seconds, 30}},
+ {ssl_options, [{certfile, "/client/cert.pem"},
+ {keyfile, "/client/key.pem"},
+ {cacertfile, "/ca/cert.pem"}
+ ]}
+ ]}.
+```
+
+
+## HTTP API Endpoints
+
+This project will serve static files from `certs` directory and
+will list them in JSON format described in [rabbitmq-trust-store](https://github.com/rabbitmq/rabbitmq-trust-store/)
+
+If you're not familiar with Django, urls.py and auth/views.py may be
+most illuminating.
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py
new file mode 100755
index 0000000000..469f277bd4
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/manage.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+import os
+import sys
+
+if __name__ == "__main__":
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rabbitmq_trust_store_django.settings")
+
+ from django.core.management import execute_from_command_line
+
+ execute_from_command_line(sys.argv)
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/__init__.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/__init__.py
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/settings.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/settings.py
new file mode 100644
index 0000000000..79962be02f
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/settings.py
@@ -0,0 +1,156 @@
+"""
+Django settings for rabbitmq_trust_store_django project.
+
+Generated by 'django-admin startproject' using Django 1.9.4.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.9/topics/settings/
+
+For the full list of settings and their values, see
+https://docs.djangoproject.com/en/1.9/ref/settings/
+"""
+
+import logging.config
+import os
+
+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
+
+# SECURITY WARNING: keep the secret key used in production secret!
+SECRET_KEY = 'y3ws-)+baz%a4^p(2@+ell%&bj-q2q^f2&do)c2-feo#a4*x0t'
+
+# SECURITY WARNING: don't run with debug turned on in production!
+DEBUG = True
+
+ALLOWED_HOSTS = []
+
+logging.config.dictConfig({
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'verbose': {
+ 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
+ 'style': '{',
+ },
+ 'simple': {
+ 'format': '{levelname} {message}',
+ 'style': '{',
+ },
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'verbose'
+ },
+ },
+ 'loggers': {
+ 'django': {
+ 'handlers': ['console'],
+ 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
+ },
+ 'rabbitmq_trust_store_django': {
+ 'handlers': ['console'],
+ 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
+ }
+ },
+})
+
+# Application definition
+
+INSTALLED_APPS = [
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.messages',
+ 'django.contrib.staticfiles',
+]
+
+MIDDLEWARE_CLASSES = [
+ 'django.middleware.security.SecurityMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.middleware.csrf.CsrfViewMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
+ 'django.contrib.messages.middleware.MessageMiddleware',
+ 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+]
+
+ROOT_URLCONF = 'rabbitmq_trust_store_django.urls'
+
+TEMPLATES = [
+ {
+ 'BACKEND': 'django.template.backends.django.DjangoTemplates',
+ 'DIRS': [],
+ 'APP_DIRS': True,
+ 'OPTIONS': {
+ 'context_processors': [
+ 'django.template.context_processors.debug',
+ 'django.template.context_processors.request',
+ 'django.contrib.auth.context_processors.auth',
+ 'django.contrib.messages.context_processors.messages',
+ ],
+ },
+ },
+]
+
+WSGI_APPLICATION = 'rabbitmq_trust_store_django.wsgi.application'
+
+
+# Database
+# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
+ }
+}
+
+
+# Password validation
+# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
+
+AUTH_PASSWORD_VALIDATORS = [
+ {
+ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
+ },
+]
+
+
+# Internationalization
+# https://docs.djangoproject.com/en/1.9/topics/i18n/
+
+LANGUAGE_CODE = 'en-us'
+
+TIME_ZONE = 'UTC'
+
+USE_I18N = True
+
+USE_L10N = True
+
+USE_TZ = True
+
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/1.9/howto/static-files/
+
+STATIC_URL = "/certs/"
+
+STATICFILES_DIRS = [
+ os.path.join(BASE_DIR, "certs")
+]
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/__init__.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/__init__.py
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/apps.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/apps.py
new file mode 100644
index 0000000000..b7eb12104b
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/apps.py
@@ -0,0 +1,7 @@
+from __future__ import unicode_literals
+
+from django.apps import AppConfig
+
+
+class TrustStoreConfig(AppConfig):
+ name = 'trust_store'
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/tests.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/tests.py
new file mode 100644
index 0000000000..7ce503c2dd
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/tests.py
@@ -0,0 +1,3 @@
+from django.test import TestCase
+
+# Create your tests here.
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/urls.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/urls.py
new file mode 100644
index 0000000000..67d18f88ee
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/urls.py
@@ -0,0 +1,7 @@
+from django.conf.urls import url
+
+from . import views
+
+urlpatterns = [
+ url(r'^$', views.index, name='index'),
+] \ No newline at end of file
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/views.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/views.py
new file mode 100644
index 0000000000..2e69e63cf5
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/trust_store/views.py
@@ -0,0 +1,46 @@
+import logging
+import os
+
+from django.shortcuts import render
+from django.http import HttpResponse, JsonResponse
+from django.conf import settings
+from django.views.decorators.http import last_modified
+from datetime import datetime
+
+logger = logging.getLogger(__name__)
+
+def latest_dir_change(request):
+ timestamp = os.stat(cert_directory()).st_mtime
+ dt = datetime.fromtimestamp(timestamp)
+ logger.debug('latest_dir_change: %s', dt)
+ return dt
+
+@last_modified(latest_dir_change)
+def index(request):
+ request.META
+ directory = cert_directory()
+ certs = {'certificates': [file_object(file) for file in pem_files(directory)]}
+ return JsonResponse(certs)
+
+def cert_directory():
+ return os.path.join(settings.BASE_DIR, "certs")
+
+def pem_files(directory):
+ files = os.listdir(directory)
+ return [os.path.join(directory, file) for file in files if is_pem(file)]
+
+def is_pem(file):
+ return 'pem' == os.path.splitext(file)[1][1:]
+
+def file_object(file):
+ return {'id': file_id(file), 'path': path_for_file(file)}
+
+
+def file_id(file):
+ mtime = str(int(os.stat(file).st_mtime))
+ basename = os.path.basename(file)
+ return basename + ':' + mtime
+
+def path_for_file(file):
+ basename = os.path.basename(file)
+ return "/certs/" + basename
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/urls.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/urls.py
new file mode 100644
index 0000000000..eb5263f3ea
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/urls.py
@@ -0,0 +1,21 @@
+"""rabbitmq_trust_store_django URL Configuration
+
+The `urlpatterns` list routes URLs to views. For more information please see:
+ https://docs.djangoproject.com/en/1.9/topics/http/urls/
+Examples:
+Function views
+ 1. Add an import: from my_app import views
+ 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
+Class-based views
+ 1. Add an import: from other_app.views import Home
+ 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
+Including another URLconf
+ 1. Import the include() function: from django.conf.urls import url, include
+ 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
+"""
+from django.conf.urls import include, url
+from django.contrib import admin
+
+urlpatterns = [
+ url(r'', include('rabbitmq_trust_store_django.trust_store.urls')),
+]
diff --git a/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/wsgi.py b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/wsgi.py
new file mode 100644
index 0000000000..6e40d3b8d9
--- /dev/null
+++ b/deps/rabbitmq_trust_store/examples/rabbitmq_trust_store_django/rabbitmq_trust_store_django/wsgi.py
@@ -0,0 +1,16 @@
+"""
+WSGI config for rabbitmq_trust_store_django project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
+"""
+
+import os
+
+from django.core.wsgi import get_wsgi_application
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rabbitmq_trust_store_django.settings")
+
+application = get_wsgi_application()
diff --git a/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema b/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema
new file mode 100644
index 0000000000..85fffbe706
--- /dev/null
+++ b/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema
@@ -0,0 +1,145 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+{mapping, "trust_store.providers.$name", "rabbitmq_trust_store.providers", [
+ {datatype, atom}
+]}.
+
+{translation, "rabbitmq_trust_store.providers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("trust_store.providers", Conf),
+ ProviderModule = fun
+ (file) -> rabbit_trust_store_file_provider;
+ (http) -> rabbit_trust_store_http_provider;
+ (Other) when is_atom(Other) -> Other;
+ (_) -> cuttlefish:invalid("Unknown/unsupported trust store provider")
+ end,
+
+ Settings = cuttlefish_variable:filter_by_prefix("trust_store.providers", Conf),
+ [ ProviderModule(V) || {_, V} <- Settings ]
+end}.
+
+
+{mapping, "trust_store.refresh_interval", "rabbitmq_trust_store.refresh_interval", [
+ {datatype, integer}
+]}.
+
+{mapping, "trust_store.directory", "rabbitmq_trust_store.directory", [
+ {datatype, string}
+]}.
+
+{mapping, "trust_store.url", "rabbitmq_trust_store.url", [
+ {datatype, string}
+]}.
+
+{mapping, "trust_store.ssl_options", "rabbitmq_trust_store.ssl_options", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbitmq_trust_store.ssl_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("trust_store.ssl_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid ssl_options")
+ end
+end}.
+
+{mapping, "trust_store.ssl_options.verify", "rabbitmq_trust_store.ssl_options.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "trust_store.ssl_options.fail_if_no_peer_cert", "rabbitmq_trust_store.ssl_options.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "trust_store.ssl_options.cacertfile", "rabbitmq_trust_store.ssl_options.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "trust_store.ssl_options.certfile", "rabbitmq_trust_store.ssl_options.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "trust_store.ssl_options.cacerts.$name", "rabbitmq_trust_store.ssl_options.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_trust_store.ssl_options.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("trust_store.ssl_options.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "trust_store.ssl_options.cert", "rabbitmq_trust_store.ssl_options.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_trust_store.ssl_options.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("trust_store.ssl_options.cert", Conf))
+end}.
+
+{mapping, "trust_store.ssl_options.client_renegotiation", "rabbitmq_trust_store.ssl_options.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "trust_store.ssl_options.crl_check", "rabbitmq_trust_store.ssl_options.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "trust_store.ssl_options.depth", "rabbitmq_trust_store.ssl_options.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "trust_store.ssl_options.dh", "rabbitmq_trust_store.ssl_options.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_trust_store.ssl_options.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("trust_store.ssl_options.dh", Conf))
+end}.
+
+{mapping, "trust_store.ssl_options.dhfile", "rabbitmq_trust_store.ssl_options.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "trust_store.ssl_options.honor_cipher_order", "rabbitmq_trust_store.ssl_options.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "trust_store.ssl_options.key.RSAPrivateKey", "rabbitmq_trust_store.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "trust_store.ssl_options.key.DSAPrivateKey", "rabbitmq_trust_store.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "trust_store.ssl_options.key.PrivateKeyInfo", "rabbitmq_trust_store.ssl_options.key",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_trust_store.ssl_options.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("trust_store.ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "trust_store.ssl_options.keyfile", "rabbitmq_trust_store.ssl_options.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "trust_store.ssl_options.log_alert", "rabbitmq_trust_store.ssl_options.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "trust_store.ssl_options.password", "rabbitmq_trust_store.ssl_options.password",
+ [{datatype, string}]}.
+
+{mapping, "trust_store.ssl_options.psk_identity", "rabbitmq_trust_store.ssl_options.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "trust_store.ssl_options.reuse_sessions", "rabbitmq_trust_store.ssl_options.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "trust_store.ssl_options.secure_renegotiate", "rabbitmq_trust_store.ssl_options.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "trust_store.ssl_options.versions.$version", "rabbitmq_trust_store.ssl_options.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_trust_store.ssl_options.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("trust_store.ssl_options.versions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
diff --git a/deps/rabbitmq_trust_store/rabbitmq-components.mk b/deps/rabbitmq_trust_store/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_trust_store/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl
new file mode 100644
index 0000000000..6ebe2dc2ac
--- /dev/null
+++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl
@@ -0,0 +1,352 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trust_store).
+
+%% Transitional step until we can require Erlang/OTP 21 and
+%% use the now recommended try/catch syntax for obtaining the stack trace.
+-compile(nowarn_deprecated_function).
+
+-behaviour(gen_server).
+
+-export([mode/0, refresh/0, list/0]). %% Console Interface.
+-export([whitelisted/3, is_whitelisted/1]). %% Client-side Interface.
+-export([start_link/0]).
+-export([init/1, terminate/2,
+ handle_call/3, handle_cast/2,
+ handle_info/2,
+ code_change/3]).
+
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("public_key/include/public_key.hrl").
+
+-type certificate() :: #'OTPCertificate'{}.
+-type event() :: valid_peer
+ | valid
+ | {bad_cert, Other :: atom()
+ | unknown_ca
+ | selfsigned_peer}
+ | {extension, #'Extension'{}}.
+-type state() :: confirmed | continue.
+-type outcome() :: {valid, state()}
+ | {fail, Reason :: term()}
+ | {unknown, state()}.
+
+-record(state, {
+ providers_state :: [{module(), term()}],
+ refresh_interval :: integer()
+}).
+-record(entry, {
+ name :: string(),
+ cert_id :: term(),
+ provider :: module(),
+ issuer_id :: tuple(),
+ certificate :: public_key:der_encoded()
+}).
+
+
+%% OTP Supervision
+
+start_link() ->
+ gen_server:start_link({local, trust_store}, ?MODULE, [], []).
+
+
+%% Console Interface
+
+-spec mode() -> 'automatic' | 'manual'.
+mode() ->
+ gen_server:call(trust_store, mode).
+
+-spec refresh() -> integer().
+refresh() ->
+ gen_server:call(trust_store, refresh).
+
+-spec list() -> string().
+list() ->
+ Formatted = lists:map(
+ fun(#entry{
+ name = N,
+ cert_id = CertId,
+ certificate = Cert,
+ issuer_id = {_, Serial}}) ->
+ %% Use the certificate unique identifier as a default for the name.
+ Name = case N of
+ undefined -> io_lib:format("~p", [CertId]);
+ _ -> N
+ end,
+ Validity = rabbit_ssl:peer_cert_validity(Cert),
+ Subject = rabbit_ssl:peer_cert_subject(Cert),
+ Issuer = rabbit_ssl:peer_cert_issuer(Cert),
+ Text = io_lib:format("Name: ~s~nSerial: ~p | 0x~.16.0B~n"
+ "Subject: ~s~nIssuer: ~s~nValidity: ~p~n",
+ [Name, Serial, Serial,
+ Subject, Issuer, Validity]),
+ lists:flatten(Text)
+ end,
+ ets:tab2list(table_name())),
+ string:join(Formatted, "~n~n").
+
+%% Client (SSL Socket) Interface
+
+-spec whitelisted(certificate(), event(), state()) -> outcome().
+whitelisted(_, {bad_cert, unknown_ca}, confirmed) ->
+ {valid, confirmed};
+whitelisted(#'OTPCertificate'{}=C, {bad_cert, unknown_ca}, continue) ->
+ case is_whitelisted(C) of
+ true ->
+ {valid, confirmed};
+ false ->
+ {fail, "CA not known AND certificate not whitelisted"}
+ end;
+whitelisted(#'OTPCertificate'{}=C, {bad_cert, selfsigned_peer}, continue) ->
+ case is_whitelisted(C) of
+ true ->
+ {valid, confirmed};
+ false ->
+ {fail, "certificate not whitelisted"}
+ end;
+whitelisted(_, {bad_cert, _} = Reason, _) ->
+ {fail, Reason};
+whitelisted(_, valid, St) ->
+ {valid, St};
+whitelisted(#'OTPCertificate'{}=_, valid_peer, St) ->
+ {valid, St};
+whitelisted(_, {extension, _}, St) ->
+ {unknown, St}.
+
+-spec is_whitelisted(certificate()) -> boolean().
+is_whitelisted(#'OTPCertificate'{}=C) ->
+ Id = extract_issuer_id(C),
+ ets:member(table_name(), Id).
+
+%% Generic Server Callbacks
+
+init([]) ->
+ erlang:process_flag(trap_exit, true),
+ ets:new(table_name(), table_options()),
+ Config = application:get_all_env(rabbitmq_trust_store),
+ ProvidersState = refresh_certs(Config, []),
+ Interval = refresh_interval(Config),
+ if
+ Interval =:= 0 ->
+ ok;
+ Interval > 0 ->
+ erlang:send_after(Interval, erlang:self(), refresh)
+ end,
+ State = #state{
+ providers_state = ProvidersState,
+ refresh_interval = Interval},
+ {ok, State}.
+
+handle_call(mode, _, St) ->
+ {reply, mode(St), St};
+handle_call(refresh, _, #state{providers_state = ProvidersState} = St) ->
+ Config = application:get_all_env(rabbitmq_trust_store),
+ NewProvidersState = refresh_certs(Config, ProvidersState),
+ {reply, ok, St#state{providers_state = NewProvidersState}};
+handle_call(_, _, St) ->
+ {noreply, St}.
+
+handle_cast(_, St) ->
+ {noreply, St}.
+
+handle_info(refresh, #state{refresh_interval = Interval,
+ providers_state = ProvidersState} = St) ->
+ Config = application:get_all_env(rabbitmq_trust_store),
+ try
+ rabbit_log:debug("Trust store will attempt to refresh certificates..."),
+ NewProvidersState = refresh_certs(Config, ProvidersState),
+ {noreply, St#state{providers_state = NewProvidersState}}
+ catch
+ _:Error ->
+ rabbit_log:error("Failed to refresh certificates: ~p", [Error]),
+ {noreply, St#state{providers_state = ProvidersState}}
+ after
+ erlang:send_after(Interval, erlang:self(), refresh)
+ end;
+handle_info(_, St) ->
+ {noreply, St}.
+
+terminate(shutdown, _St) ->
+ true = ets:delete(table_name()).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%% Ancillary & Constants
+
+mode(#state{refresh_interval = I}) ->
+ if
+ I =:= 0 -> 'manual';
+ I > 0 -> 'automatic'
+ end.
+
+refresh_interval(Config) ->
+ Seconds = case proplists:get_value(refresh_interval, Config) of
+ undefined ->
+ default_refresh_interval();
+ S when is_integer(S), S >= 0 ->
+ S;
+ {seconds, S} when is_integer(S), S >= 0 ->
+ S
+ end,
+ timer:seconds(Seconds).
+
+default_refresh_interval() ->
+ {ok, I} = application:get_env(rabbitmq_trust_store, default_refresh_interval),
+ I.
+
+
+%% =================================
+
+-spec refresh_certs(Config, State) -> State
+ when State :: [{module(), term()}],
+ Config :: list().
+refresh_certs(Config, State) ->
+ Providers = providers(Config),
+ clean_deleted_providers(Providers),
+ lists:foldl(
+ fun(Provider, NewStates) ->
+ ProviderState = proplists:get_value(Provider, State, nostate),
+ RefreshedState = refresh_provider_certs(Provider, Config, ProviderState),
+ [{Provider, RefreshedState} | NewStates]
+ end,
+ [],
+ Providers).
+
+-spec refresh_provider_certs(Provider, Config, ProviderState) -> ProviderState
+ when Provider :: module(),
+ Config :: list(),
+ ProviderState :: term().
+refresh_provider_certs(Provider, Config, ProviderState) ->
+ case list_certs(Provider, Config, ProviderState) of
+ no_change ->
+ rabbit_log:debug("Trust store provider reported no certificate changes"),
+ ProviderState;
+ ok ->
+ rabbit_log:debug("Trust store provider reported no certificate changes"),
+ ProviderState;
+ {ok, CertsList, NewProviderState} ->
+ rabbit_log:debug("Trust store listed certificates: ~p", [CertsList]),
+ update_certs(CertsList, Provider, Config),
+ NewProviderState;
+ {error, Reason} ->
+ rabbit_log:error("Unable to load certificate list for provider ~p,"
+ " reason: ~p~n",
+ [Provider, Reason]),
+ ProviderState
+ end.
+
+list_certs(Provider, Config, nostate) ->
+ Provider:list_certs(Config);
+list_certs(Provider, Config, ProviderState) ->
+ Provider:list_certs(Config, ProviderState).
+
+update_certs(CertsList, Provider, Config) ->
+ rabbit_log:debug("Updating ~p fetched trust store certificates", [length(CertsList)]),
+ OldCertIds = get_old_cert_ids(Provider),
+ {NewCertIds, _} = lists:unzip(CertsList),
+
+ lists:foreach(
+ fun(CertId) ->
+ Attributes = proplists:get_value(CertId, CertsList),
+ Name = proplists:get_value(name, Attributes, undefined),
+ case load_and_decode_cert(Provider, CertId, Attributes, Config) of
+ {ok, Cert, IssuerId} ->
+ save_cert(CertId, Provider, IssuerId, Cert, Name);
+ {error, Reason} ->
+ rabbit_log:error("Unable to load CA certificate ~p"
+ " with provider ~p,"
+ " reason: ~p",
+ [CertId, Provider, Reason])
+ end
+ end,
+ NewCertIds -- OldCertIds),
+ lists:foreach(
+ fun(CertId) ->
+ delete_cert(CertId, Provider)
+ end,
+ OldCertIds -- NewCertIds),
+ ok.
+
+load_and_decode_cert(Provider, CertId, Attributes, Config) ->
+ try
+ case Provider:load_cert(CertId, Attributes, Config) of
+ {ok, Cert} ->
+ DecodedCert = public_key:pkix_decode_cert(Cert, otp),
+ Id = extract_issuer_id(DecodedCert),
+ {ok, Cert, Id};
+ {error, Reason} -> {error, Reason}
+ end
+ catch _:Error:Stacktrace ->
+ {error, {Error, Stacktrace}}
+ end.
+
+delete_cert(CertId, Provider) ->
+ MS = ets:fun2ms(fun(#entry{cert_id = CId, provider = P})
+ when P == Provider, CId == CertId ->
+ true
+ end),
+ ets:select_delete(table_name(), MS).
+
+save_cert(CertId, Provider, Id, Cert, Name) ->
+ ets:insert(table_name(), #entry{cert_id = CertId,
+ provider = Provider,
+ issuer_id = Id,
+ certificate = Cert,
+ name = Name}).
+
+get_old_cert_ids(Provider) ->
+ MS = ets:fun2ms(fun(#entry{provider = P, cert_id = CId})
+ when P == Provider ->
+ CId
+ end),
+ ets:select(table_name(), MS).
+
+providers(Config) ->
+ Providers = proplists:get_value(providers, Config, []),
+ lists:filter(
+ fun(Provider) ->
+ case code:ensure_loaded(Provider) of
+ {module, Provider} -> true;
+ {error, Error} ->
+ rabbit_log:warning("Unable to load trust store certificates"
+ " with provider module ~p. Reason: ~p~n",
+ [Provider, Error]),
+ false
+ end
+ end,
+ Providers).
+
+table_name() ->
+ trust_store_whitelist.
+
+table_options() ->
+ [protected,
+ named_table,
+ set,
+ {keypos, #entry.issuer_id},
+ {heir, none}].
+
+extract_issuer_id(#'OTPCertificate'{} = C) ->
+ {Serial, Issuer} = case public_key:pkix_issuer_id(C, other) of
+ {error, _Reason} ->
+ {ok, Identifier} = public_key:pkix_issuer_id(C, self),
+ Identifier;
+ {ok, Identifier} ->
+ Identifier
+ end,
+ {Issuer, Serial}.
+
+clean_deleted_providers(Providers) ->
+ [{EntryMatch, _, [true]}] =
+ ets:fun2ms(fun(#entry{provider = P})-> true end),
+ Condition = [ {'=/=', '$1', Provider} || Provider <- Providers ],
+ ets:select_delete(table_name(), [{EntryMatch, Condition, [true]}]).
+
diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl
new file mode 100644
index 0000000000..5048eb60e5
--- /dev/null
+++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl
@@ -0,0 +1,78 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trust_store_app).
+-behaviour(application).
+-export([change_SSL_options/0]).
+-export([revert_SSL_options/0]).
+-export([start/2, stop/1]).
+
+-rabbit_boot_step({rabbit_trust_store, [
+ {description, "Overrides TLS options in take RabbitMQ trust store into account"},
+ {mfa, {?MODULE, change_SSL_options, []}},
+ {cleanup, {?MODULE, revert_SSL_options, []}},
+ %% {requires, ...},
+ {enables, networking}]}).
+
+change_SSL_options() ->
+ After = case application:get_env(rabbit, ssl_options) of
+ undefined ->
+ Before = [],
+ edit(Before);
+ {ok, Before} when is_list(Before) ->
+ ok = application:set_env(rabbit, initial_SSL_options, Before),
+ edit(Before)
+ end,
+ ok = application:set_env(rabbit,
+ ssl_options, After).
+
+revert_SSL_options() ->
+ {ok, Cfg} = application:get_env(rabbit, initial_SSL_options),
+ ok = application:set_env(rabbit, ssl_options, Cfg).
+
+start(normal, _) ->
+ rabbit_trust_store_sup:start_link().
+
+stop(_) ->
+ ok.
+
+
+%% Ancillary & Constants
+
+edit(Options) ->
+ case proplists:get_value(verify_fun, Options) of
+ undefined ->
+ ok;
+ Val ->
+ rabbit_log:warning("RabbitMQ trust store plugin is used "
+ "and the verify_fun TLS option is set: ~p. "
+ "It will be overwritten by the plugin.~n", [Val]),
+ ok
+ end,
+ %% Only enter those options neccessary for this application.
+ lists:keymerge(1, required_options(),
+ [{verify_fun, {delegate(), continue}},
+ {partial_chain, fun partial_chain/1} | Options]).
+
+delegate() -> fun rabbit_trust_store:whitelisted/3.
+
+partial_chain(Chain) ->
+ % special handling of clients that present a chain rather than just a peer cert.
+ case lists:reverse(Chain) of
+ [PeerDer, Ca | _] ->
+ Peer = public_key:pkix_decode_cert(PeerDer, otp),
+ % If the Peer is whitelisted make it's immediate Authority a trusted one.
+ % This means the peer will automatically be validated.
+ case rabbit_trust_store:is_whitelisted(Peer) of
+ true -> {trusted_ca, Ca};
+ false -> unknown_ca
+ end;
+ _ -> unknown_ca
+ end.
+
+required_options() ->
+ [{verify, verify_peer}, {fail_if_no_peer_cert, true}].
diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl
new file mode 100644
index 0000000000..10e3367ec5
--- /dev/null
+++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_certificate_provider.erl
@@ -0,0 +1,31 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trust_store_certificate_provider).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-callback list_certs(Config)
+ -> no_change | {ok, [{CertId, Attributes}], ProviderState}
+ when Config :: list(),
+ ProviderState :: term(),
+ CertId :: term(),
+ Attributes :: list().
+
+-callback list_certs(Config, ProviderState)
+ -> no_change | {ok, [{CertId, Attributes}], ProviderState}
+ when Config :: list(),
+ ProviderState :: term(),
+ CertId :: term(),
+ Attributes :: list().
+
+-callback load_cert(CertId, Attributes, Config)
+ -> {ok, Cert} | {error, term()}
+ when CertId :: term(),
+ Attributes :: list(),
+ Config :: list(),
+ Cert :: public_key:der_encoded(). \ No newline at end of file
diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl
new file mode 100644
index 0000000000..820ad550dd
--- /dev/null
+++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_file_provider.erl
@@ -0,0 +1,110 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trust_store_file_provider).
+
+-include_lib("kernel/include/file.hrl").
+
+-behaviour(rabbit_trust_store_certificate_provider).
+
+-export([list_certs/1, list_certs/2, load_cert/3]).
+
+-define(DIRECTORY_OR_FILE_NAME_EXISTS, eexist).
+
+-type cert_id() :: {FileName :: string(),
+ ChangeTime :: integer(),
+ Hash :: integer()}.
+
+-spec list_certs(Config :: list())
+ -> no_change | {ok, [{cert_id(), list()}], State}
+ when State :: nostate.
+list_certs(Config) ->
+ Path = directory_path(Config),
+ Certs = list_certs_0(Path),
+ {ok, Certs, nostate}.
+
+-spec list_certs(Config :: list(), State)
+ -> no_change | {ok, [{cert_id(), list()}], State}
+ when State :: nostate.
+list_certs(Config, _) ->
+ list_certs(Config).
+
+-spec load_cert(cert_id(), list(), Config :: list())
+ -> {ok, Cert :: public_key:der_encoded()}.
+load_cert({FileName, _, _}, _, Config) ->
+ Path = directory_path(Config),
+ Cert = extract_cert(Path, FileName),
+ rabbit_log:info(
+ "trust store: loading certificate '~s'", [FileName]),
+ {ok, Cert}.
+
+extract_cert(Path, FileName) ->
+ Absolute = filename:join(Path, FileName),
+ scan_then_parse(Absolute).
+
+scan_then_parse(Filename) when is_list(Filename) ->
+ {ok, Bin} = file:read_file(Filename),
+ [{'Certificate', Data, not_encrypted}] = public_key:pem_decode(Bin),
+ Data.
+
+list_certs_0(Path) ->
+ {ok, FileNames} = file:list_dir(Path),
+ lists:map(
+ fun(FileName) ->
+ AbsName = filename:absname(FileName, Path),
+ CertId = {FileName,
+ modification_time(AbsName),
+ file_content_hash(AbsName)},
+ {CertId, [{name, FileName}]}
+ end,
+ FileNames).
+
+modification_time(Path) ->
+ {ok, Info} = file:read_file_info(Path, [{time, posix}]),
+ Info#file_info.mtime.
+
+file_content_hash(Path) ->
+ {ok, Data} = file:read_file(Path),
+ erlang:phash2(Data).
+
+directory_path(Config) ->
+ directory_path(Config, default_directory()).
+
+directory_path(Config, Default) ->
+ Path = case proplists:get_value(directory, Config) of
+ undefined ->
+ Default;
+ V when is_binary(V) ->
+ binary_to_list(V);
+ V when is_list(V) ->
+ V
+ end,
+ ok = ensure_directory(Path),
+ Path.
+
+default_directory() ->
+ %% Dismantle the directory tree: first the table & meta-data
+ %% directory, then the Mesia database directory, finally the node
+ %% directory where we will place the default whitelist in `Full`.
+ Table = filename:split(rabbit_mnesia:dir()),
+ Node = lists:sublist(Table, length(Table) - 2),
+ Full = Node ++ ["trust_store", "whitelist"],
+ filename:join(Full).
+
+ensure_directory(Path) ->
+ ok = ensure_parent_directories(Path),
+ case file:make_dir(Path) of
+ {error, ?DIRECTORY_OR_FILE_NAME_EXISTS} ->
+ true = filelib:is_dir(Path),
+ ok;
+ ok ->
+ ok
+ end.
+
+ensure_parent_directories(Path) ->
+ filelib:ensure_dir(Path).
+
diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl
new file mode 100644
index 0000000000..5b05e5a986
--- /dev/null
+++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl
@@ -0,0 +1,111 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trust_store_http_provider).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-behaviour(rabbit_trust_store_certificate_provider).
+
+-define(PROFILE, ?MODULE).
+
+-export([list_certs/1, list_certs/2, load_cert/3]).
+
+-record(http_state,{
+ url :: string(),
+ http_options :: list(),
+ headers :: httpc:headers()
+}).
+
+list_certs(Config) ->
+ init(Config),
+ State = init_state(Config),
+ list_certs(Config, State).
+
+list_certs(_, #http_state{url = Url,
+ http_options = HttpOptions,
+ headers = Headers} = State) ->
+ case (httpc:request(get, {Url, Headers}, HttpOptions, [{body_format, binary}], ?PROFILE)) of
+ {ok, {{_, 200, _}, RespHeaders, Body}} ->
+ rabbit_log:debug("Trust store HTTP[S] provider responded with 200 OK"),
+ Certs = decode_cert_list(Body),
+ NewState = new_state(RespHeaders, State),
+ {ok, Certs, NewState};
+ {ok, {{_,304, _}, _, _}} -> no_change;
+ {ok, {{_,Code,_}, _, Body}} -> {error, {http_error, Code, Body}};
+ {error, Reason} ->
+ rabbit_log:error("Trust store HTTP[S] provider request failed: ~p", [Reason]),
+ {error, Reason}
+ end.
+
+load_cert(_, Attributes, Config) ->
+ CertPath = proplists:get_value(path, Attributes),
+ #http_state{url = BaseUrl,
+ http_options = HttpOptions,
+ headers = Headers} = init_state(Config),
+ Url = join_url(BaseUrl, CertPath),
+ Res = httpc:request(get,
+ {Url, Headers},
+ HttpOptions,
+ [{body_format, binary}, {full_result, false}],
+ ?PROFILE),
+ case Res of
+ {ok, {200, Body}} ->
+ [{'Certificate', Cert, not_encrypted}] = public_key:pem_decode(Body),
+ {ok, Cert};
+ {ok, {Code, Body}} -> {error, {http_error, Code, Body}};
+ {error, Reason} -> {error, Reason}
+ end.
+
+join_url(BaseUrl, CertPath) ->
+ string:strip(rabbit_data_coercion:to_list(BaseUrl), right, $/)
+ ++ "/" ++
+ string:strip(rabbit_data_coercion:to_list(CertPath), left, $/).
+
+init(Config) ->
+ inets:start(httpc, [{profile, ?PROFILE}]),
+ application:ensure_all_started(ssl),
+ Options = proplists:get_value(proxy_options, Config, []),
+ httpc:set_options(Options, ?PROFILE).
+
+init_state(Config) ->
+ Url = proplists:get_value(url, Config),
+ Headers = proplists:get_value(http_headers, Config, []),
+ HttpOptions = case proplists:get_value(ssl_options, Config) of
+ undefined -> [];
+ SslOpts -> [{ssl, SslOpts}]
+ end,
+ #http_state{url = Url, http_options = HttpOptions, headers = [{"connection", "close"} | Headers]}.
+
+decode_cert_list(Body) ->
+ try
+ Struct = rabbit_json:decode(Body),
+ #{<<"certificates">> := Certs} = Struct,
+ lists:map(
+ fun(Cert) ->
+ Path = maps:get(<<"path">>, Cert),
+ CertId = maps:get(<<"id">>, Cert),
+ {CertId, [{path, Path}]}
+ end, Certs)
+ catch _:badarg ->
+ rabbit_log:error("Trust store failed to decode an HTTP[S] response: JSON parser failed"),
+ [];
+ _:Error ->
+ rabbit_log:error("Trust store failed to decode an HTTP[S] response: ~p", [Error]),
+ []
+ end.
+
+new_state(RespHeaders, #http_state{headers = Headers0} = State) ->
+ LastModified0 = proplists:get_value("last-modified", RespHeaders),
+ LastModified1 = proplists:get_value("Last-Modified", RespHeaders, LastModified0),
+ case LastModified1 of
+ undefined -> State;
+ Value ->
+ Headers1 = lists:ukeysort(1, Headers0),
+ NewHeaders = lists:ukeymerge(1, [{"If-Modified-Since", Value}], Headers1),
+ State#http_state{headers = NewHeaders}
+ end.
diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl
new file mode 100644
index 0000000000..f587d1e000
--- /dev/null
+++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_trust_store_sup).
+-behaviour(supervisor).
+-export([start_link/0]).
+-export([init/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+
+%% ...
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+%% ...
+
+init([]) ->
+ Flags = #{strategy => one_for_one,
+ intensity => 10,
+ period => 1},
+ ChildSpecs = [
+ #{
+ id => trust_store,
+ start => {rabbit_trust_store, start_link, []},
+ restart => permanent,
+ shutdown => timer:seconds(15),
+ type => worker,
+ modules => [rabbit_trust_store]
+ }
+ ],
+
+ {ok, {Flags, ChildSpecs}}.
diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl b/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..a2f0e01a81
--- /dev/null
+++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_trust_store, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets
new file mode 100644
index 0000000000..d45f48ecef
--- /dev/null
+++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets
@@ -0,0 +1,28 @@
+[{trust_store,"trust_store.refresh_interval = 30",
+ [{rabbitmq_trust_store,[{refresh_interval,30}]}],
+ [rabbitmq_trust_store]},
+ {trust_store_file,"trust_store.directory = /tmp/foo/bar",
+ [{rabbitmq_trust_store,[{directory,"/tmp/foo/bar"}]}],
+ [rabbitmq_trust_store]},
+ {trust_store_providers,
+ "trust_store.providers.1 = file
+ trust_store.providers.2 = http
+ trust_store.providers.3 = rabbit_trust_store_opera_com_provider",
+ [{rabbitmq_trust_store,
+ [{providers,
+ [rabbit_trust_store_file_provider,
+ rabbit_trust_store_http_provider,
+ rabbit_trust_store_opera_com_provider]}]}],
+ [rabbitmq_trust_store]},
+ {trust_store_http,
+ "trust_store.providers.1 = http
+ trust_store.url = https://example.com
+ trust_store.ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ trust_store.ssl_options.password = i_am_password",
+ [{rabbitmq_trust_store,
+ [{providers,[rabbit_trust_store_http_provider]},
+ {url,"https://example.com"},
+ {ssl_options,
+ [{certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {password,"i_am_password"}]}]}],
+ [rabbitmq_trust_store]}].
diff --git a/deps/rabbitmq_trust_store/test/system_SUITE.erl b/deps/rabbitmq_trust_store/test/system_SUITE.erl
new file mode 100644
index 0000000000..78e74f3b57
--- /dev/null
+++ b/deps/rabbitmq_trust_store/test/system_SUITE.erl
@@ -0,0 +1,956 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(SERVER_REJECT_CLIENT, {tls_alert, "unknown ca"}).
+-define(SERVER_REJECT_CLIENT_NEW, {tls_alert, {unknown_ca, _}}).
+-define(SERVER_REJECT_CLIENT_ERLANG24,
+ {tls_alert,
+ {handshake_failure,
+ "TLS client: In state cipher received SERVER ALERT: Fatal - "
+ "Handshake Failure\n"}}).
+-define(SERVER_REJECT_CONNECTION_ERLANG23,
+ {{socket_error,
+ {tls_alert,
+ {handshake_failure,
+ "TLS client: In state connection received SERVER ALERT: Fatal - "
+ "Handshake Failure\n"}}},
+ {expecting,'connection.start'}}).
+
+all() ->
+ [
+ {group, http_provider_tests},
+ {group, file_provider_tests}
+ ].
+
+groups() ->
+ CommonTests = [
+ validate_chain,
+ validate_longer_chain,
+ validate_chain_without_whitelisted,
+ whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root,
+ removed_certificate_denied_from_AMQP_client,
+ installed_certificate_accepted_from_AMQP_client,
+ whitelist_directory_DELTA,
+ replaced_whitelisted_certificate_should_be_accepted,
+ ensure_configuration_using_binary_strings_is_handled,
+ ignore_corrupt_cert,
+ ignore_same_cert_with_different_name,
+ list],
+ [
+ {file_provider_tests, [], [
+ library,
+ invasive_SSL_option_change,
+ validation_success_for_AMQP_client,
+ validation_failure_for_AMQP_client,
+ disabled_provider_removes_certificates,
+ enabled_provider_adds_cerificates |
+ CommonTests
+ ]},
+ {http_provider_tests, [], CommonTests}
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 180}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+set_up_node(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_extra_tcp_ports, [tcp_port_amqp_tls_extra]}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+tear_down_node(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(file_provider_tests, Config) ->
+ case set_up_node(Config) of
+ {skip, _} = Error -> Error;
+ Config1 ->
+ WhitelistDir = filename:join([?config(rmq_certsdir, Config1),
+ "trust_store", "file_provider_tests"]),
+ Config2 = init_whitelist_dir(Config1, WhitelistDir),
+ ok = rabbit_ct_broker_helpers:rpc(Config2, 0,
+ ?MODULE, change_configuration,
+ [rabbitmq_trust_store, [{directory, WhitelistDir},
+ {refresh_interval, interval()},
+ {providers, [rabbit_trust_store_file_provider]}]]),
+ Config2
+ end;
+
+init_per_group(http_provider_tests, Config) ->
+ case set_up_node(Config) of
+ {skip, _} = Error -> Error;
+ Config1 ->
+ WhitelistDir = filename:join([?config(rmq_certsdir, Config1),
+ "trust_store", "http_provider_tests"]),
+ Config2 = init_whitelist_dir(Config1, WhitelistDir),
+ Config3 = init_provider_server(Config2, WhitelistDir),
+ Url = ?config(trust_store_server_url, Config3),
+
+ ok = rabbit_ct_broker_helpers:rpc(Config3, 0,
+ ?MODULE, change_configuration,
+ [rabbitmq_trust_store, [{url, Url},
+ {refresh_interval, interval()},
+ {providers, [rabbit_trust_store_http_provider]}]]),
+ Config3
+ end.
+
+init_provider_server(Config, WhitelistDir) ->
+ %% Assume we don't have more than 100 ports allocated for tests
+ PortBase = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_ports_base),
+
+ CertServerPort = PortBase + 100,
+ Url = "http://127.0.0.1:" ++ integer_to_list(CertServerPort) ++ "/",
+ application:load(trust_store_http),
+ ok = application:set_env(trust_store_http, directory, WhitelistDir),
+ ok = application:set_env(trust_store_http, port, CertServerPort),
+ ok = application:unset_env(trust_store_http, ssl_options),
+ application:ensure_all_started(trust_store_http),
+ rabbit_ct_helpers:set_config(Config, [{trust_store_server_port, CertServerPort},
+ {trust_store_server_url, Url}]).
+
+end_per_group(file_provider_tests, Config) ->
+ Config1 = tear_down_node(Config),
+ tear_down_whitelist_dir(Config1),
+ Config;
+end_per_group(http_provider_tests, Config) ->
+ Config1 = tear_down_node(Config),
+ application:stop(trust_store_http),
+ Config1;
+end_per_group(_, Config) ->
+ tear_down_node(Config).
+
+init_whitelist_dir(Config, WhitelistDir) ->
+ ok = filelib:ensure_dir(WhitelistDir),
+ ok = file:make_dir(WhitelistDir),
+ rabbit_ct_helpers:set_config(Config, {whitelist_dir, WhitelistDir}).
+
+tear_down_whitelist_dir(Config) ->
+ WhitelistDir = ?config(whitelist_dir, Config),
+ ok = rabbit_file:recursive_delete([WhitelistDir]).
+
+init_per_testcase(Testcase, Config) ->
+ WhitelistDir = ?config(whitelist_dir, Config),
+ ok = rabbit_file:recursive_delete([WhitelistDir]),
+ ok = file:make_dir(WhitelistDir),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, change_configuration,
+ [rabbitmq_trust_store, []]),
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+library(_) ->
+ %% Given: Makefile.
+ {_Root, _Certificate, _Key} = ct_helper:make_certs(),
+ ok.
+
+invasive_SSL_option_change(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, invasive_SSL_option_change1, []).
+
+invasive_SSL_option_change1() ->
+ %% Given: Rabbit is started with the boot-steps in the
+ %% Trust-Store's OTP Application file.
+
+ %% When: we get Rabbit's SSL options.
+ {ok, Options} = application:get_env(rabbit, ssl_options),
+
+ %% Then: all necessary settings are correct.
+ verify_peer = proplists:get_value(verify, Options),
+ true = proplists:get_value(fail_if_no_peer_cert, Options),
+ {Verifyfun, _UserState} = proplists:get_value(verify_fun, Options),
+
+ {module, rabbit_trust_store} = erlang:fun_info(Verifyfun, module),
+ ok.
+
+validation_success_for_AMQP_client(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, validation_success_for_AMQP_client1, [Config]).
+
+validation_success_for_AMQP_client1(Config) ->
+ %% This test intentionally doesn't whitelist any certificates.
+ %% Both the client and the server use certificate/key pairs signed by
+ %% the same root CA. This exercises a verify_fun clause that no ther tests hit.
+ %% Note that when this test is executed together with the HTTP provider group
+ %% it runs into unexpected interference and fails, even if TLS app PEM cache is force
+ %% cleared. That's why originally each group was made to use a separate node.
+ AuthorityInfo = {Root, _AuthorityKey} = erl_make_certs:make_cert([]),
+ {Certificate, Key} = chain(AuthorityInfo),
+ {Certificate2, Key2} = chain(AuthorityInfo),
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ %% When: Rabbit accepts just this one authority's certificate
+ %% (i.e. these are options that'd be in the configuration
+ %% file).
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Certificate2},
+ {key, Key2} | cfg()], 1),
+
+ %% Then: a client presenting a certifcate rooted at the same
+ %% authority connects successfully.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{verify, verify_none},
+ {cert, Certificate},
+ {key, Key}]}),
+
+ %% Clean: client & server TLS/TCP.
+ ok = amqp_connection:close(Con),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+validation_failure_for_AMQP_client(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, validation_failure_for_AMQP_client1, [Config]).
+
+validation_failure_for_AMQP_client1(Config) ->
+ %% Given: a root certificate and a certificate rooted with another
+ %% authority.
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertOther, KeyOther} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+ %% When: Rabbit accepts certificates rooted with just one
+ %% particular authority.
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% Then: a client presenting a certificate rooted with another
+ %% authority is REJECTED.
+ {error, Error} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{verify, verify_none},
+ {cert, CertOther},
+ {key, KeyOther}]}),
+ case Error of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% With Erlang 18.3, there is a regression which causes the SSL
+ %% connection to crash with the following exception:
+ %% ** {badarg,[{ets,update_counter,[1507362,#Ref<0.0.3.9>,-1],[]},
+ %% {ssl_pkix_db,ref_count,3,...
+ %%
+ %% When this exception reaches the connection process before the
+ %% expected TLS error, amqp_connection:start() returns {error,
+ %% closed} instead.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+
+ %% Clean: server TLS/TCP.
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+validate_chain(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, validate_chain1, [Config]).
+
+validate_chain1(Config) ->
+ %% Given: a whitelisted certificate `CertTrusted` AND a CA `RootTrusted`
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {RootTrusted, CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+ ok = whitelist(Config, "alice", CertTrusted, KeyTrusted),
+ rabbit_trust_store:refresh(),
+
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% When: a client connects and present `RootTrusted` as well as the `CertTrusted`
+ %% Then: the connection is successful.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [RootTrusted]},
+ {cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none},
+ {server_name_indication, disable}]}),
+ %% Clean: client & server TLS/TCP
+ ok = amqp_connection:close(Con),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+validate_longer_chain(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, validate_longer_chain1, [Config]).
+
+validate_longer_chain1(Config) ->
+
+ {Root, Cert, Key} = ct_helper:make_certs(),
+
+ %% Given: a whitelisted certificate `CertTrusted`
+ %% AND a certificate `CertUntrusted` that is not whitelisted with the same root as `CertTrusted`
+ %% AND `CertInter` intermediate CA
+ %% AND `RootTrusted` CA
+ AuthorityInfo = {RootCA, _AuthorityKey} = erl_make_certs:make_cert([]),
+ Inter = {CertInter, {KindInter, KeyDataInter, _}} = erl_make_certs:make_cert([{issuer, AuthorityInfo}]),
+ KeyInter = {KindInter, KeyDataInter},
+ {CertUntrusted, {KindUntrusted, KeyDataUntrusted, _}} = erl_make_certs:make_cert([{issuer, Inter}]),
+ KeyUntrusted = {KindUntrusted, KeyDataUntrusted},
+ {CertTrusted, {Kind, KeyData, _}} = erl_make_certs:make_cert([{issuer, Inter}]),
+ KeyTrusted = {Kind, KeyData},
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+ ok = whitelist(Config, "alice", CertTrusted, KeyTrusted),
+ rabbit_trust_store:refresh(),
+
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% When: a client connects and present `CertInter` as well as the `CertTrusted`
+ %% Then: the connection is successful.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [CertInter]},
+ {cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none}]}),
+
+ %% When: a client connects and present `RootTrusted` and `CertInter` as well as the `CertTrusted`
+ %% Then: the connection is successful.
+ {ok, Con2} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [RootCA, CertInter]},
+ {cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none}]}),
+
+ %% When: a client connects and present `CertInter` and `RootCA` as well as the `CertTrusted`
+ %% Then: the connection is successful.
+ {ok, Con3} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [CertInter, RootCA]},
+ {cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none}]}),
+
+ % %% When: a client connects and present `CertInter` and `RootCA` but NOT `CertTrusted`
+ % %% Then: the connection is not succcessful
+ {error, Error1} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [RootCA]},
+ {cert, CertInter},
+ {key, KeyInter},
+ {verify, verify_none}]}),
+ case Error1 of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+
+ %% When: a client connects and present `CertUntrusted` and `RootCA` and `CertInter`
+ %% Then: the connection is not succcessful
+ %% TODO: for some reason this returns `bad certifice` rather than `unknown ca`
+ {error, Error2} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [RootCA, CertInter]},
+ {cert, CertUntrusted},
+ {key, KeyUntrusted},
+ {verify, verify_none}]}),
+ case Error2 of
+ %% Expected error from amqp_client.
+ {tls_alert, "bad certificate"} -> ok;
+ {tls_alert, {bad_certificate, _}} -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+
+ %% Clean: client & server TLS/TCP
+ ok = amqp_connection:close(Con),
+ ok = amqp_connection:close(Con2),
+ ok = amqp_connection:close(Con3),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+validate_chain_without_whitelisted(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, validate_chain_without_whitelisted1, [Config]).
+
+validate_chain_without_whitelisted1(Config) ->
+ %% Given: a certificate `CertUntrusted` that is NOT whitelisted.
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {RootUntrusted, CertUntrusted, KeyUntrusted} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ rabbit_trust_store:refresh(),
+
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% When: Rabbit validates paths
+ %% Then: a client presenting the non-whitelisted certificate `CertUntrusted` and `RootUntrusted`
+ %% is rejected
+ {error, Error} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cacerts, [RootUntrusted]},
+ {cert, CertUntrusted},
+ {key, KeyUntrusted},
+ {verify, verify_none}]}),
+ case Error of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root1, [Config]).
+
+whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root1(Config) ->
+ %% Given: a certificate `CertTrusted` AND that it is whitelisted.
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+ ok = whitelist(Config, "alice", CertTrusted, KeyTrusted),
+ rabbit_trust_store:refresh(),
+
+ %% When: Rabbit validates paths with a different root `R` than
+ %% that of the certificate `CertTrusted`.
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% Then: a client presenting the whitelisted certificate `C`
+ %% is allowed.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none}]}),
+ %% Clean: client & server TLS/TCP
+ ok = amqp_connection:close(Con),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+removed_certificate_denied_from_AMQP_client(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, removed_certificate_denied_from_AMQP_client1, [Config]).
+
+removed_certificate_denied_from_AMQP_client1(Config) ->
+ %% Given: a certificate `CertOther` AND that it is whitelisted.
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertOther, KeyOther} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ ok = whitelist(Config, "bob", CertOther, KeyOther),
+ rabbit_trust_store:refresh(),
+
+ %% When: we wait for at least one second (the accuracy of the
+ %% file system's time), remove the whitelisted certificate,
+ %% then wait for the trust-store to refresh the whitelist.
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ wait_for_file_system_time(),
+ ok = delete("bob.pem", Config),
+ wait_for_trust_store_refresh(),
+
+ %% Then: a client presenting the removed whitelisted
+ %% certificate `CertOther` is denied.
+ {error, Error} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertOther},
+ {key, KeyOther},
+ {verify, verify_none}]}),
+ case Error of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+
+ %% Clean: server TLS/TCP
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+installed_certificate_accepted_from_AMQP_client(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, installed_certificate_accepted_from_AMQP_client1, [Config]).
+
+installed_certificate_accepted_from_AMQP_client1(Config) ->
+ %% Given: a certificate `CertOther` which is NOT yet whitelisted.
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertOther, KeyOther} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ rabbit_trust_store:refresh(),
+
+ %% When: we wait for at least one second (the accuracy of the
+ %% file system's time), add a certificate to the directory,
+ %% then wait for the trust-store to refresh the whitelist.
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ wait_for_file_system_time(),
+ ok = whitelist(Config, "charlie", CertOther, KeyOther),
+ wait_for_trust_store_refresh(),
+
+ %% Then: a client presenting the whitelisted certificate `CertOther`
+ %% is allowed.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertOther},
+ {key, KeyOther},
+ {verify, verify_none}]}),
+
+ %% Clean: Client & server TLS/TCP
+ ok = amqp_connection:close(Con),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+whitelist_directory_DELTA(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, whitelist_directory_DELTA1, [Config]).
+
+whitelist_directory_DELTA1(Config) ->
+ %% Given: a certificate `Root` which Rabbit can use as a
+ %% root certificate to validate agianst AND three
+ %% certificates which clients can present (the first two
+ %% of which are whitelisted).
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ {Root, Cert, Key} = ct_helper:make_certs(),
+
+ {_, CertListed1, KeyListed1} = ct_helper:make_certs(),
+ {_, CertRevoked, KeyRevoked} = ct_helper:make_certs(),
+ {_, CertListed2, KeyListed2} = ct_helper:make_certs(),
+
+ ok = whitelist(Config, "foo", CertListed1, KeyListed1),
+ ok = whitelist(Config, "bar", CertRevoked, KeyRevoked),
+ rabbit_trust_store:refresh(),
+
+ %% When: we wait for at least one second (the accuracy
+ %% of the file system's time), delete a certificate and
+ %% a certificate to the directory, then wait for the
+ %% trust-store to refresh the whitelist.
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ wait_for_file_system_time(),
+ ok = delete("bar.pem", Config),
+ ok = whitelist(Config, "baz", CertListed2, KeyListed2),
+ wait_for_trust_store_refresh(),
+
+ %% Then: connectivity to Rabbit is as it should be.
+ {ok, Conn1} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertListed1},
+ {key, KeyListed1},
+ {verify, verify_none}]}),
+ {error, Error} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertRevoked},
+ {key, KeyRevoked},
+ {verify, verify_none}]}),
+ case Error of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+
+ {ok, Conn2} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertListed2},
+ {key, KeyListed2},
+ {verify, verify_none}]}),
+ %% Clean: delete certificate file, close client & server
+ %% TLS/TCP
+ ok = amqp_connection:close(Conn1),
+ ok = amqp_connection:close(Conn2),
+
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+replaced_whitelisted_certificate_should_be_accepted(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, replaced_whitelisted_certificate_should_be_accepted1, [Config]).
+
+replaced_whitelisted_certificate_should_be_accepted1(Config) ->
+ %% Given: a root certificate and a 2 other certificates
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertFirst, KeyFirst} = ct_helper:make_certs(),
+ {_, CertUpdated, KeyUpdated} = ct_helper:make_certs(),
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+ %% And: the first certificate has been whitelisted
+ ok = whitelist(Config, "bart", CertFirst, KeyFirst),
+ rabbit_trust_store:refresh(),
+
+ wait_for_trust_store_refresh(),
+
+ %% verify that the first cert can be used to connect
+ {ok, Con} =
+ amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertFirst},
+ {key, KeyFirst},
+ {verify, verify_none}]}),
+ %% verify the other certificate is not accepted
+ {error, Error1} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertUpdated},
+ {key, KeyUpdated},
+ {verify, verify_none}]}),
+ case Error1 of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression;
+
+ %% ssl:setopts/2 hangs indefinitely on occasion
+ {timeout, {gen_server,call,[_,post_init|_]}} -> ssl_setopts_hangs_occassionally
+ end,
+ ok = amqp_connection:close(Con),
+
+ %% When: a whitelisted certicate is replaced with one with the same name
+ ok = whitelist(Config, "bart", CertUpdated, KeyUpdated),
+
+ wait_for_trust_store_refresh(),
+
+ %% Then: the first certificate should be rejected
+ {error, Error2} = amqp_connection:start(
+ #amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertFirst},
+ %% disable ssl session caching
+ %% as this ensures the cert
+ %% will be re-verified by the
+ %% server
+ {reuse_sessions, false},
+ {key, KeyFirst},
+ {verify, verify_none}]}),
+ case Error2 of
+ %% Expected error from amqp_client.
+ ?SERVER_REJECT_CLIENT -> ok;
+ ?SERVER_REJECT_CLIENT_NEW -> ok;
+ ?SERVER_REJECT_CLIENT_ERLANG24 -> ok;
+ ?SERVER_REJECT_CONNECTION_ERLANG23 -> ok;
+
+ %% See previous comment in validation_failure_for_AMQP_client1/1.
+ closed -> expected_erlang_18_ssl_regression
+ end,
+
+ %% And: the updated certificate should allow the user to connect
+ {ok, Con2} =
+ amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertUpdated},
+ {reuse_sessions, false},
+ {key, KeyUpdated},
+ {verify, verify_none}]}),
+ ok = amqp_connection:close(Con2),
+ %% Clean: server TLS/TCP.
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+ensure_configuration_using_binary_strings_is_handled(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, ensure_configuration_using_binary_strings_is_handled1, [Config]).
+
+ensure_configuration_using_binary_strings_is_handled1(Config) ->
+ ok = change_configuration(rabbitmq_trust_store, [{directory, list_to_binary(whitelist_dir(Config))},
+ {refresh_interval,
+ {seconds, interval()}}]).
+
+ignore_corrupt_cert(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, ignore_corrupt_cert1, [Config]).
+
+ignore_corrupt_cert1(Config) ->
+ %% Given: a certificate `CertTrusted` AND that it is whitelisted.
+ %% Given: a corrupt certificate.
+
+ Port = port(Config),
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+ rabbit_trust_store:refresh(),
+ ok = whitelist(Config, "alice", CertTrusted, KeyTrusted),
+
+ %% When: Rabbit tries to whitelist the corrupt certificate.
+ ok = whitelist(Config, "corrupt", <<48>>, KeyTrusted),
+ rabbit_trust_store:refresh(),
+
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% Then: the trust store should keep functioning
+ %% And: a client presenting the whitelisted certificate `CertTrusted`
+ %% is allowed.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none}]}),
+ %% Clean: client & server TLS/TCP
+ ok = amqp_connection:close(Con),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+ignore_same_cert_with_different_name(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, ignore_same_cert_with_different_name1, [Config]).
+
+ignore_same_cert_with_different_name1(Config) ->
+ %% Given: a certificate `CertTrusted` AND that it is whitelisted.
+ %% Given: the same certificate saved with a different filename.
+
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ Port = port(Config),
+ {Root, Cert, Key} = ct_helper:make_certs(),
+ {_, CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+ rabbit_trust_store:refresh(),
+ ok = whitelist(Config, "alice", CertTrusted, KeyTrusted),
+ %% When: Rabbit tries to insert the duplicate certificate
+ ok = whitelist(Config, "malice", CertTrusted, KeyTrusted),
+ rabbit_trust_store:refresh(),
+
+ catch rabbit_networking:stop_tcp_listener(Port),
+ ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+ {cert, Cert},
+ {key, Key} | cfg()], 1),
+
+ %% Then: the trust store should keep functioning.
+ %% And: a client presenting the whitelisted certificate `CertTrusted`
+ %% is allowed.
+ {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+ port = Port,
+ ssl_options = [{cert, CertTrusted},
+ {key, KeyTrusted},
+ {verify, verify_none}]}),
+ %% Clean: client & server TLS/TCP
+ ok = amqp_connection:close(Con),
+ ok = rabbit_networking:stop_tcp_listener(Port).
+
+list(Config) ->
+ %% FIXME: The file provider calls stat(2) on the certificate
+ %% directory to detect any new certificates. Unfortunately, the
+ %% modification time has a resolution of one second. Thus, it can
+ %% miss certificates added within the same second after a refresh.
+ %% To workaround this, we force a refresh, wait for two seconds,
+ %% write the test certificate and call refresh again.
+ %%
+ %% Once this is fixed, the two lines below can be removed.
+ %%
+ %% See rabbitmq/rabbitmq-trust-store#58.
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_trust_store, refresh, []),
+ timer:sleep(2000),
+
+ {_Root, Cert, Key} = ct_helper:make_certs(),
+ ok = whitelist(Config, "alice", Cert, Key),
+ % wait_for_trust_store_refresh(),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_trust_store, refresh, []),
+ Certs = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_trust_store, list, []),
+ % only really tests it isn't totally broken.
+ {match, _} = re:run(Certs, ".*alice\.pem.*").
+
+disabled_provider_removes_certificates(Config) ->
+ {_Root, Cert, Key} = ct_helper:make_certs(),
+ ok = whitelist(Config, "alice", Cert, Key),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_trust_store, refresh, []),
+
+ %% Certificate is there
+ Certs = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_trust_store, list, []),
+ {match, _} = re:run(Certs, ".*alice\.pem.*"),
+
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_trust_store, providers, []]),
+ wait_for_trust_store_refresh(),
+
+ %% Certificate is not there anymore
+ CertsAfterDelete = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_trust_store, list, []),
+ nomatch = re:run(CertsAfterDelete, ".*alice\.pem.*").
+
+enabled_provider_adds_cerificates(Config) ->
+ {_Root, Cert, Key} = ct_helper:make_certs(),
+ ok = whitelist(Config, "alice", Cert, Key),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, change_configuration,
+ [rabbitmq_trust_store, [{directory, whitelist_dir(Config)},
+ {providers, []}]]),
+
+ %% Certificate is not there yet
+ Certs = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_trust_store, list, []),
+ nomatch = re:run(Certs, ".*alice\.pem.*"),
+
+
+ rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env,
+ [rabbitmq_trust_store, providers, [rabbit_trust_store_file_provider]]),
+ wait_for_trust_store_refresh(),
+
+ %% Certificate is there
+ CertsAfterAdd = rabbit_ct_broker_helpers:rpc(Config, 0,
+ rabbit_trust_store, list, []),
+ {match, _} = re:run(CertsAfterAdd, ".*alice\.pem.*").
+
+
+%% Test Constants
+
+port(Config) ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls_extra).
+
+whitelist_dir(Config) ->
+ ?config(whitelist_dir, Config).
+
+interval() ->
+ 1.
+
+wait_for_file_system_time() ->
+ timer:sleep(timer:seconds(1)).
+
+wait_for_trust_store_refresh() ->
+ timer:sleep(5 * timer:seconds(interval())).
+
+cfg() ->
+ {ok, Cfg} = application:get_env(rabbit, ssl_options),
+ Cfg.
+
+%% Ancillary
+
+chain(Issuer) ->
+ %% Theses are DER encoded.
+ {Certificate, {Kind, Key, _}} = erl_make_certs:make_cert([{issuer, Issuer}]),
+ {Certificate, {Kind, Key}}.
+
+change_configuration(App, Props) ->
+ ok = application:stop(App),
+ ok = change_cfg(App, Props),
+ application:start(App).
+
+change_cfg(_, []) ->
+ ok;
+change_cfg(App, [{Name,Value}|Rest]) ->
+ ok = application:set_env(App, Name, Value),
+ change_cfg(App, Rest).
+
+whitelist(Config, Filename, Certificate, {A, B} = _Key) ->
+ Path = whitelist_dir(Config),
+ ok = erl_make_certs:write_pem(Path, Filename, {Certificate, {A, B, not_encrypted}}),
+ [file:delete(filename:join(Path, K)) || K <- filelib:wildcard("*_key.pem", Path)],
+ ok.
+
+delete(Name, Config) ->
+ F = filename:join([whitelist_dir(Config), Name]),
+ file:delete(F).
diff --git a/deps/rabbitmq_web_dispatch/.gitignore b/deps/rabbitmq_web_dispatch/.gitignore
new file mode 100644
index 0000000000..ff4f97e1a4
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_web_dispatch.d
diff --git a/deps/rabbitmq_web_dispatch/.travis.yml b/deps/rabbitmq_web_dispatch/.travis.yml
new file mode 100644
index 0000000000..7c7f0f487d
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: VadllhHC3w7y4PixeY6yLPd4yrJFCgGenbypB3wbsCwTANpbLk6UADlxRQiANy1Pi/B3IorX2WlWKkrxy8GsJjew0YOxaTL2TP4Gor2O6EcnEqpaZMjKXNwwNrl3oZAo1QtaHr+N0yYy0ob9NoUxk3qKruFbMDVz1tph1TUAwt8=
+ - secure: QGz2fHyxOFXiunH4KLbfHbYhMcWI1Ex8c8d/oA4nXb48Hn2HaLVJcxBLa76mMke1k4e7UDYcPrSmqpqVi9N+OAWKwVZTLpdbTTChlPE+gUTHwloH2KGSm99Gj7+9q9dbg32h635S5PVIJE5BEp/5KnrDHE5HEoBaf3ggdRhr+3U=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md b/deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_web_dispatch/CONTRIBUTING.md b/deps/rabbitmq_web_dispatch/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_web_dispatch/LICENSE b/deps/rabbitmq_web_dispatch/LICENSE
new file mode 100644
index 0000000000..aeed2782e3
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ HTTP dispatcher library, is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_web_dispatch/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_web_dispatch/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_web_dispatch/Makefile b/deps/rabbitmq_web_dispatch/Makefile
new file mode 100644
index 0000000000..a451bf2979
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/Makefile
@@ -0,0 +1,26 @@
+PROJECT = rabbitmq_web_dispatch
+PROJECT_DESCRIPTION = RabbitMQ Web Dispatcher
+PROJECT_MOD = rabbit_web_dispatch_app
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+LOCAL_DEPS = inets
+DEPS = rabbit_common rabbit cowboy
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+dep_cowboy = hex 2.0.0
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+include erlang.mk
diff --git a/deps/rabbitmq_web_dispatch/README.md b/deps/rabbitmq_web_dispatch/README.md
new file mode 100644
index 0000000000..74807d8467
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/README.md
@@ -0,0 +1,14 @@
+# RabbitMQ Web Dispatch
+
+rabbitmq-web-dispatch is a thin veneer around Cowboy that provides the
+ability for multiple applications to co-exist on Cowboy
+listeners. Applications can register static document roots or dynamic
+handlers to be executed, dispatched by URL path prefix.
+
+See
+
+ * [Management plugin guide](https://www.rabbitmq.com/management.html)
+ * [Web STOMP guide](https://www.rabbitmq.com/web-stomp.html)
+ * [Web MQTT guide](https://www.rabbitmq.com/web-mqtt.html)
+
+for information on configuring plugins that expose an HTTP or WebSocket interface.
diff --git a/deps/rabbitmq_web_dispatch/erlang.mk b/deps/rabbitmq_web_dispatch/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_web_dispatch/rabbitmq-components.mk b/deps/rabbitmq_web_dispatch/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl
new file mode 100644
index 0000000000..c8186619f6
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_middleware.erl
@@ -0,0 +1,24 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_cowboy_middleware).
+-behavior(cowboy_middleware).
+
+-export([execute/2]).
+
+execute(Req, Env) ->
+ %% Find the correct dispatch list for this path.
+ Listener = maps:get(rabbit_listener, Env),
+ case rabbit_web_dispatch_registry:lookup(Listener, Req) of
+ {ok, Dispatch} ->
+ {ok, Req, maps:put(dispatch, Dispatch, Env)};
+ {error, Reason} ->
+ Req2 = cowboy_req:reply(500,
+ #{<<"content-type">> => <<"text/plain">>},
+ "Registry Error: " ++ io_lib:format("~p", [Reason]), Req),
+ {stop, Req2}
+ end.
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl
new file mode 100644
index 0000000000..9271567d91
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_redirect.erl
@@ -0,0 +1,15 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_cowboy_redirect).
+
+-export([init/2]).
+
+init(Req0, RedirectPort) ->
+ URI = cowboy_req:uri(Req0, #{port => RedirectPort}),
+ Req = cowboy_req:reply(301, #{<<"location">> => URI}, Req0),
+ {ok, Req, RedirectPort}.
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl
new file mode 100644
index 0000000000..d51086a69d
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_cowboy_stream_h.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_cowboy_stream_h).
+-behavior(cowboy_stream).
+
+-export([init/3]).
+-export([data/4]).
+-export([info/3]).
+-export([terminate/3]).
+-export([early_error/5]).
+
+-record(state, {
+ next :: any(),
+ req :: cowboy_req:req()
+}).
+
+init(StreamId, Req, Opts) ->
+ {Commands, Next} = cowboy_stream:init(StreamId, Req, Opts),
+ {Commands, #state{next = Next, req = Req}}.
+
+data(StreamId, IsFin, Data, State = #state{next = Next}) ->
+ {Commands, Next1} = cowboy_stream:data(StreamId, IsFin, Data, Next),
+ {Commands, State#state{next = Next1}}.
+
+info(StreamId, Response, State = #state{next = Next, req = Req}) ->
+ Response1 = case Response of
+ {response, 404, Headers0, <<>>} ->
+ %% TODO: should we log the actual response?
+ log_response(Response, Req),
+ Json = rabbit_json:encode(#{
+ error => list_to_binary(httpd_util:reason_phrase(404)),
+ reason => <<"Not Found">>}),
+ Headers1 = maps:put(<<"content-length">>, integer_to_list(iolist_size(Json)), Headers0),
+ Headers = maps:put(<<"content-type">>, <<"application/json">>, Headers1),
+ {response, 404, Headers, Json};
+ {response, _, _, _} ->
+ log_response(Response, Req),
+ Response;
+ {headers, _, _} ->
+ log_stream_response(Response, Req),
+ Response;
+ _ ->
+ Response
+ end,
+ {Commands, Next1} = cowboy_stream:info(StreamId, Response1, Next),
+ {Commands, State#state{next = Next1}}.
+
+terminate(StreamId, Reason, #state{next = Next}) ->
+ cowboy_stream:terminate(StreamId, Reason, Next).
+
+early_error(StreamId, Reason, PartialReq, Resp, Opts) ->
+ cowboy_stream:early_error(StreamId, Reason, PartialReq, Resp, Opts).
+
+log_response({response, Status, _Headers, Body}, Req) ->
+ webmachine_log:log_access({Status, Body, Req}).
+
+log_stream_response({headers, Status, _Headers}, Req) ->
+ webmachine_log:log_access({Status, <<>>, Req}).
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl
new file mode 100644
index 0000000000..24a81a986a
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch).
+
+-export([register_context_handler/5, register_static_context/6]).
+-export([register_port_redirect/4]).
+-export([unregister_context/1]).
+
+%% Handler Registration
+
+%% Registers a dynamic selector and handler combination, with a link
+%% to display in lists.
+register_handler(Name, Listener, Selector, Handler, Link) ->
+ rabbit_web_dispatch_registry:add(Name, Listener, Selector, Handler, Link).
+
+%% Methods for standard use cases
+
+%% Registers a dynamic handler under a fixed context path, with link
+%% to display in the global context.
+register_context_handler(Name, Listener, Prefix, Handler, LinkText) ->
+ register_handler(
+ Name, Listener, context_selector(Prefix), Handler, {Prefix, LinkText}),
+ {ok, Prefix}.
+
+%% Convenience function registering a fully static context to serve
+%% content from a module-relative directory, with link to display in
+%% the global context.
+register_static_context(Name, Listener, Prefix, Module, FSPath, LinkText) ->
+ register_handler(Name, Listener,
+ context_selector(Prefix),
+ static_context_handler(Prefix, Module, FSPath),
+ {Prefix, LinkText}),
+ {ok, Prefix}.
+
+%% A context which just redirects the request to a different port.
+register_port_redirect(Name, Listener, Prefix, RedirectPort) ->
+ register_context_handler(
+ Name, Listener, Prefix,
+ cowboy_router:compile([{'_', [{'_', rabbit_cowboy_redirect, RedirectPort}]}]),
+ rabbit_misc:format("Redirect to port ~B", [RedirectPort])).
+
+context_selector("") ->
+ fun(_Req) -> true end;
+context_selector(Prefix) ->
+ Prefix1 = list_to_binary("/" ++ Prefix),
+ fun(Req) ->
+ Path = cowboy_req:path(Req),
+ (Path == Prefix1) orelse (binary:match(Path, << Prefix1/binary, $/ >>) =/= nomatch)
+ end.
+
+%% Produces a handler for use with register_handler that serves up
+%% static content from a directory specified relative to the application
+%% (owning the module) priv directory, or to the directory containing
+%% the ebin directory containing the named module's beam file.
+static_context_handler(Prefix, Module, FSPath) ->
+ FSPathInPriv = re:replace(FSPath, "^priv/?", "", [{return, list}]),
+ case application:get_application(Module) of
+ {ok, App} when FSPathInPriv =/= FSPath ->
+ %% The caller indicated a file in the application's `priv`
+ %% dir.
+ cowboy_router:compile([{'_', [
+ {"/" ++ Prefix, cowboy_static,
+ {priv_file, App, filename:join(FSPathInPriv, "index.html")}},
+ {"/" ++ Prefix ++ "/[...]", cowboy_static,
+ {priv_dir, App, FSPathInPriv}}
+ ]}]);
+ _ ->
+ %% The caller indicated a file we should access directly.
+ {file, Here} = code:is_loaded(Module),
+ ModuleRoot = filename:dirname(filename:dirname(Here)),
+ LocalPath = filename:join(ModuleRoot, FSPath),
+ cowboy_router:compile([{'_', [
+ {"/" ++ Prefix, cowboy_static,
+ {file, LocalPath ++ "/index.html"}},
+ {"/" ++ Prefix ++ "/[...]", cowboy_static,
+ {dir, LocalPath}}
+ ]}])
+ end.
+
+%% The opposite of all those register_* functions.
+unregister_context(Name) ->
+ rabbit_web_dispatch_registry:remove(Name).
+
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl
new file mode 100644
index 0000000000..1de05c77d7
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_app.erl
@@ -0,0 +1,21 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_app).
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for rabbit_web_dispatch.
+start(_Type, _StartArgs) ->
+ rabbit_web_dispatch_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for rabbit_web_dispatch.
+stop(_State) ->
+ ok.
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl
new file mode 100644
index 0000000000..93ee349af1
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_listing_handler.erl
@@ -0,0 +1,27 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_listing_handler).
+
+-export([init/2]).
+
+init(Req0, Listener) ->
+ HTMLPrefix =
+ "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\">"
+ "<head><title>RabbitMQ Web Server</title></head>"
+ "<body><h1>RabbitMQ Web Server</h1><p>Contexts available:</p><ul>",
+ HTMLSuffix = "</ul></body></html>",
+ List =
+ case rabbit_web_dispatch_registry:list(Listener) of
+ [] ->
+ "<li>No contexts installed</li>";
+ Contexts ->
+ [["<li><a href=\"/", Path, "/\">", Desc, "</a></li>"]
+ || {Path, Desc} <- Contexts]
+ end,
+ Req = cowboy_req:reply(200, #{}, [HTMLPrefix, List, HTMLSuffix], Req0),
+ {ok, Req, Listener}.
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl
new file mode 100644
index 0000000000..8d933ac668
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_registry.erl
@@ -0,0 +1,208 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_registry).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([add/5, remove/1, set_fallback/2, lookup/2, list_all/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+-export([list/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(ETS, rabbitmq_web_dispatch).
+
+%% This gen_server is merely to serialise modifications to the dispatch
+%% table for listeners.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+add(Name, Listener, Selector, Handler, Link) ->
+ gen_server:call(?MODULE, {add, Name, Listener, Selector, Handler, Link},
+ infinity).
+
+remove(Name) ->
+ gen_server:call(?MODULE, {remove, Name}, infinity).
+
+%% @todo This needs to be dispatch instead of a fun too.
+%% But I'm not sure what code is using this.
+set_fallback(Listener, FallbackHandler) ->
+ gen_server:call(?MODULE, {set_fallback, Listener, FallbackHandler},
+ infinity).
+
+lookup(Listener, Req) ->
+ case lookup_dispatch(Listener) of
+ {ok, {Selectors, Fallback}} ->
+ case catch match_request(Selectors, Req) of
+ {'EXIT', Reason} -> {error, {lookup_failure, Reason}};
+ not_found -> {ok, Fallback};
+ Dispatch -> {ok, Dispatch}
+ end;
+ Err ->
+ Err
+ end.
+
+%% This is called in a somewhat obfuscated manner in
+%% rabbit_mgmt_external_stats:rabbit_web_dispatch_registry_list_all()
+list_all() ->
+ gen_server:call(?MODULE, list_all, infinity).
+
+%% Callback Methods
+
+init([]) ->
+ ?ETS = ets:new(?ETS, [named_table, public]),
+ {ok, undefined}.
+
+handle_call({add, Name, Listener, Selector, Handler, Link = {_, Desc}}, _From,
+ undefined) ->
+ Continue = case rabbit_web_dispatch_sup:ensure_listener(Listener) of
+ new -> set_dispatch(
+ Listener, [],
+ listing_fallback_handler(Listener)),
+ listener_started(Listener),
+ true;
+ existing -> true;
+ ignore -> false
+ end,
+ case Continue of
+ true -> case lookup_dispatch(Listener) of
+ {ok, {Selectors, Fallback}} ->
+ Selector2 = lists:keystore(
+ Name, 1, Selectors,
+ {Name, Selector, Handler, Link}),
+ set_dispatch(Listener, Selector2, Fallback);
+ {error, {different, Desc2, Listener2}} ->
+ exit({incompatible_listeners,
+ {Desc, Listener}, {Desc2, Listener2}})
+ end;
+ false -> ok
+ end,
+ {reply, ok, undefined};
+
+handle_call({remove, Name}, _From,
+ undefined) ->
+ case listener_by_name(Name) of
+ {error, not_found} ->
+ rabbit_log:warning("HTTP listener registry could not find context ~p",
+ [Name]),
+ {reply, ok, undefined};
+ {ok, Listener} ->
+ {ok, {Selectors, Fallback}} = lookup_dispatch(Listener),
+ Selectors1 = lists:keydelete(Name, 1, Selectors),
+ set_dispatch(Listener, Selectors1, Fallback),
+ case Selectors1 of
+ [] -> rabbit_web_dispatch_sup:stop_listener(Listener),
+ listener_stopped(Listener);
+ _ -> ok
+ end,
+ {reply, ok, undefined}
+ end;
+
+handle_call({set_fallback, Listener, FallbackHandler}, _From,
+ undefined) ->
+ {ok, {Selectors, _OldFallback}} = lookup_dispatch(Listener),
+ set_dispatch(Listener, Selectors, FallbackHandler),
+ {reply, ok, undefined};
+
+handle_call(list_all, _From, undefined) ->
+ {reply, list(), undefined};
+
+handle_call(Req, _From, State) ->
+ rabbit_log:error("Unexpected call to ~p: ~p~n", [?MODULE, Req]),
+ {stop, unknown_request, State}.
+
+handle_cast(_, State) ->
+ {noreply, State}.
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(_, _) ->
+ true = ets:delete(?ETS),
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%---------------------------------------------------------------------------
+
+%% Internal Methods
+
+listener_started(Listener) ->
+ [rabbit_networking:tcp_listener_started(Protocol, Listener, IPAddress, Port)
+ || {Protocol, IPAddress, Port} <- listener_info(Listener)],
+ ok.
+
+listener_stopped(Listener) ->
+ [rabbit_networking:tcp_listener_stopped(Protocol, Listener, IPAddress, Port)
+ || {Protocol, IPAddress, Port} <- listener_info(Listener)],
+ ok.
+
+listener_info(Listener) ->
+ Protocol = case pget(protocol, Listener) of
+ undefined ->
+ case pget(ssl, Listener) of
+ true -> https;
+ _ -> http
+ end;
+ P ->
+ P
+ end,
+ Port = pget(port, Listener),
+ [{Protocol, IPAddress, Port}
+ || {IPAddress, _Port, _Family}
+ <- rabbit_networking:tcp_listener_addresses(Port)].
+
+lookup_dispatch(Lsnr) ->
+ case ets:lookup(?ETS, pget(port, Lsnr)) of
+ [{_, Lsnr, S, F}] -> {ok, {S, F}};
+ [{_, Lsnr2, S, _F}] -> {error, {different, first_desc(S), Lsnr2}};
+ [] -> {error, {no_record_for_listener, Lsnr}}
+ end.
+
+first_desc([{_N, _S, _H, {_, Desc}} | _]) -> Desc.
+
+set_dispatch(Listener, Selectors, Fallback) ->
+ ets:insert(?ETS, {pget(port, Listener), Listener, Selectors, Fallback}).
+
+match_request([], _) ->
+ not_found;
+match_request([{_Name, Selector, Dispatch, _Link}|Rest], Req) ->
+ case Selector(Req) of
+ true -> Dispatch;
+ false -> match_request(Rest, Req)
+ end.
+
+list() ->
+ [{Path, Desc, Listener} ||
+ {_P, Listener, Selectors, _F} <- ets:tab2list(?ETS),
+ {_N, _S, _H, {Path, Desc}} <- Selectors].
+
+-spec listener_by_name(atom()) -> {ok, term()} | {error, not_found}.
+listener_by_name(Name) ->
+ case [L || {_P, L, S, _F} <- ets:tab2list(?ETS), contains_name(Name, S)] of
+ [Listener] -> {ok, Listener};
+ [] -> {error, not_found}
+ end.
+
+contains_name(Name, Selectors) ->
+ lists:member(Name, [N || {N, _S, _H, _L} <- Selectors]).
+
+list(Listener) ->
+ {ok, {Selectors, _Fallback}} = lookup_dispatch(Listener),
+ [{Path, Desc} || {_N, _S, _H, {Path, Desc}} <- Selectors].
+
+%%---------------------------------------------------------------------------
+
+listing_fallback_handler(Listener) ->
+ cowboy_router:compile([{'_', [
+ {"/", rabbit_web_dispatch_listing_handler, Listener}
+ ]}]).
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl
new file mode 100644
index 0000000000..1d270550c0
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_sup.erl
@@ -0,0 +1,131 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_sup).
+
+-behaviour(supervisor).
+
+-define(SUP, ?MODULE).
+
+%% External exports
+-export([start_link/0, ensure_listener/1, stop_listener/1]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?SUP}, ?MODULE, []).
+
+ensure_listener(Listener) ->
+ case proplists:get_value(port, Listener) of
+ undefined ->
+ {error, {no_port_given, Listener}};
+ _ ->
+ {Transport, TransportOpts, ProtoOpts} = preprocess_config(Listener),
+ ProtoOptsMap = maps:from_list(ProtoOpts),
+ StreamHandlers = stream_handlers_config(ProtoOpts),
+ rabbit_log:debug("Starting HTTP[S] listener with transport ~s, options ~p and protocol options ~p, stream handlers ~p",
+ [Transport, TransportOpts, ProtoOptsMap, StreamHandlers]),
+ CowboyOptsMap =
+ maps:merge(#{env =>
+ #{rabbit_listener => Listener},
+ middlewares =>
+ [rabbit_cowboy_middleware, cowboy_router, cowboy_handler],
+ stream_handlers => StreamHandlers},
+ ProtoOptsMap),
+ Child = ranch:child_spec(rabbit_networking:ranch_ref(Listener), 100,
+ Transport, TransportOpts,
+ cowboy_clear, CowboyOptsMap),
+ case supervisor:start_child(?SUP, Child) of
+ {ok, _} -> new;
+ {error, {already_started, _}} -> existing;
+ {error, {E, _}} -> check_error(Listener, E)
+ end
+ end.
+
+stop_listener(Listener) ->
+ Name = rabbit_networking:ranch_ref(Listener),
+ ok = supervisor:terminate_child(?SUP, {ranch_listener_sup, Name}),
+ ok = supervisor:delete_child(?SUP, {ranch_listener_sup, Name}).
+
+%% @spec init([[instance()]]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Registry = {rabbit_web_dispatch_registry,
+ {rabbit_web_dispatch_registry, start_link, []},
+ transient, 5000, worker, dynamic},
+ Log = {rabbit_mgmt_access_logger, {gen_event, start_link,
+ [{local, webmachine_log_event}]},
+ permanent, 5000, worker, [dynamic]},
+ {ok, {{one_for_one, 10, 10}, [Registry, Log]}}.
+
+%%
+%% Implementation
+%%
+
+preprocess_config(Options) ->
+ case proplists:get_value(ssl, Options) of
+ true -> _ = rabbit_networking:ensure_ssl(),
+ case rabbit_networking:poodle_check('HTTP') of
+ ok -> case proplists:get_value(ssl_opts, Options) of
+ undefined -> auto_ssl(Options);
+ _ -> fix_ssl(Options)
+ end;
+ danger -> {ranch_tcp, transport_config(Options), protocol_config(Options)}
+ end;
+ _ -> {ranch_tcp, transport_config(Options), protocol_config(Options)}
+ end.
+
+auto_ssl(Options) ->
+ {ok, ServerOpts} = application:get_env(rabbit, ssl_options),
+ Remove = [verify, fail_if_no_peer_cert],
+ SSLOpts = [{K, V} || {K, V} <- ServerOpts,
+ not lists:member(K, Remove)],
+ fix_ssl([{ssl_opts, SSLOpts} | Options]).
+
+fix_ssl(Options) ->
+ SSLOpts = proplists:get_value(ssl_opts, Options),
+ {ranch_ssl,
+ transport_config(Options ++ rabbit_networking:fix_ssl_options(SSLOpts)),
+ protocol_config(Options)}.
+
+transport_config(Options0) ->
+ Options = proplists:delete(protocol,
+ proplists:delete(ssl,
+ proplists:delete(ssl_opts,
+ proplists:delete(cowboy_opts,
+ Options0)))),
+ case proplists:get_value(ip, Options) of
+ undefined ->
+ Options;
+ IP when is_tuple(IP) ->
+ Options;
+ IP when is_list(IP) ->
+ {ok, ParsedIP} = inet_parse:address(IP),
+ [{ip, ParsedIP}|proplists:delete(ip, Options)]
+ end.
+
+protocol_config(Options) ->
+ proplists:get_value(cowboy_opts, Options, []).
+
+stream_handlers_config(Options) ->
+ case lists:keyfind(compress, 1, Options) of
+ {compress, false} -> [rabbit_cowboy_stream_h, cowboy_stream_h];
+ %% Compress by default. Since 2.0 the compress option in cowboys
+ %% has been replaced by the cowboy_compress_h handler
+ %% Compress is not applied if data < 300 bytes
+ _ -> [rabbit_cowboy_stream_h, cowboy_compress_h, cowboy_stream_h]
+ end.
+
+check_error(Listener, Error) ->
+ Ignore = proplists:get_value(ignore_in_use, Listener, false),
+ case {Error, Ignore} of
+ {eaddrinuse, true} -> ignore;
+ _ -> exit({could_not_start_listener, Listener, Error})
+ end.
diff --git a/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl
new file mode 100644
index 0000000000..8aca673a48
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/rabbit_web_dispatch_util.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_util).
+
+-export([parse_auth_header/1]).
+-export([relativise/2, unrelativise/2]).
+
+%% @todo remove
+parse_auth_header(Header) ->
+ case Header of
+ "Basic " ++ Base64 ->
+ Str = base64:mime_decode_to_string(Base64),
+ case string:chr(Str, $:) of
+ 0 -> invalid;
+ N -> [list_to_binary(string:sub_string(Str, 1, N - 1)),
+ list_to_binary(string:sub_string(Str, N + 1))]
+ end;
+ _ ->
+ invalid
+ end.
+
+relativise("/" ++ F, "/" ++ T) ->
+ From = string:tokens(F, "/"),
+ To = string:tokens(T, "/"),
+ string:join(relativise0(From, To), "/").
+
+relativise0([H], [H|_] = To) ->
+ To;
+relativise0([H|From], [H|To]) ->
+ relativise0(From, To);
+relativise0(From, []) ->
+ lists:duplicate(length(From), "..");
+relativise0([_|From], To) ->
+ lists:duplicate(length(From), "..") ++ To;
+relativise0([], To) ->
+ To.
+
+unrelativise(_, "/" ++ T) -> "/" ++ T;
+unrelativise(F, "./" ++ T) -> unrelativise(F, T);
+unrelativise(F, "../" ++ T) -> unrelativise(strip_tail(F), T);
+unrelativise(F, T) -> case string:str(F, "/") of
+ 0 -> T;
+ _ -> strip_tail(F) ++ "/" ++ T
+ end.
+
+strip_tail("") -> exit(not_enough_to_strip);
+strip_tail(S) -> case string:rstr(S, "/") of
+ 0 -> "";
+ I -> string:left(S, I - 1)
+ end.
diff --git a/deps/rabbitmq_web_dispatch/src/webmachine_log.erl b/deps/rabbitmq_web_dispatch/src/webmachine_log.erl
new file mode 100644
index 0000000000..dd4da91924
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/webmachine_log.erl
@@ -0,0 +1,239 @@
+%% Copyright (c) 2011-2012 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% https://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc Helper functions for webmachine's default log handlers
+
+-module(webmachine_log).
+
+-include("webmachine_logger.hrl").
+
+-export([add_handler/2,
+ call/2,
+ call/3,
+ datehour/0,
+ datehour/1,
+ defer_refresh/1,
+ delete_handler/1,
+ fix_log/2,
+ fmt_ip/1,
+ fmtnow/0,
+ log_access/1,
+ log_close/3,
+ log_open/1,
+ log_open/2,
+ log_write/2,
+ maybe_rotate/3,
+ month/1,
+ refresh/2,
+ suffix/1,
+ zeropad/2,
+ zone/0]).
+
+-record(state, {hourstamp :: non_neg_integer(),
+ filename :: string(),
+ handle :: file:io_device()}).
+
+%% @doc Add a handler to receive log events
+-type add_handler_result() :: ok | {'EXIT', term()} | term().
+-spec add_handler(atom() | {atom(), term()}, term()) -> add_handler_result().
+add_handler(Mod, Args) ->
+ gen_event:add_handler(?EVENT_LOGGER, Mod, Args).
+
+%% @doc Make a synchronous call directly to a specific event handler
+%% module
+-type error() :: {error, bad_module} | {'EXIT', term()} | term().
+-spec call(atom(), term()) -> term() | error().
+call(Mod, Msg) ->
+ gen_event:call(?EVENT_LOGGER, Mod, Msg).
+
+%% @doc Make a synchronous call directly to a specific event handler
+%% module
+-spec call(atom(), term(), timeout()) -> term() | error().
+call(Mod, Msg, Timeout) ->
+ gen_event:call(?EVENT_LOGGER, Mod, Msg, Timeout).
+
+%% @doc Return a four-tuple containing year, month, day, and hour
+%% of the current time.
+-type datehour() :: {calendar:year(), calendar:month(), calendar:day(), calendar:hour()}.
+-spec datehour() -> datehour().
+datehour() ->
+ datehour(os:timestamp()).
+
+%% @doc Return a four-tuple containing year, month, day, and hour
+%% of the specified time.
+-spec datehour(erlang:timestamp()) -> datehour().
+datehour(TS) ->
+ {{Y, M, D}, {H, _, _}} = calendar:now_to_universal_time(TS),
+ {Y, M, D, H}.
+
+%% @doc Defer the refresh of a log file.
+-spec defer_refresh(atom()) -> {ok, timer:tref()} | {error, term()}.
+defer_refresh(Mod) ->
+ {_, {_, M, S}} = calendar:universal_time(),
+ Time = 1000 * (3600 - ((M * 60) + S)),
+ timer:apply_after(Time, ?MODULE, refresh, [Mod, os:timestamp()]).
+
+%% @doc Remove a log handler
+-type delete_handler_result() :: term() | {error, module_not_found} | {'EXIT', term()}.
+-spec delete_handler(atom() | {atom(), term()}) -> delete_handler_result().
+delete_handler(Mod) ->
+ gen_event:delete_handler(?EVENT_LOGGER, Mod, []).
+
+%% Seek backwards to the last valid log entry
+-spec fix_log(file:io_device(), non_neg_integer()) -> ok.
+fix_log(_FD, 0) ->
+ ok;
+fix_log(FD, 1) ->
+ {ok, 0} = file:position(FD, 0),
+ ok;
+fix_log(FD, Location) ->
+ case file:pread(FD, Location - 1, 1) of
+ {ok, [$\n | _]} ->
+ ok;
+ {ok, _} ->
+ fix_log(FD, Location - 1)
+ end.
+
+%% @doc Format an IP address or host name
+-spec fmt_ip(undefined | string() | inet:ip4_address() | inet:ip6_address()) -> string().
+fmt_ip(IP) when is_tuple(IP) ->
+ inet_parse:ntoa(IP);
+fmt_ip(undefined) ->
+ "0.0.0.0";
+fmt_ip(HostName) ->
+ HostName.
+
+%% @doc Format the current time into a string
+-spec fmtnow() -> string().
+fmtnow() ->
+ {{Year, Month, Date}, {Hour, Min, Sec}} = calendar:local_time(),
+ io_lib:format("[~2..0w/~s/~4..0w:~2..0w:~2..0w:~2..0w ~s]",
+ [Date,month(Month),Year, Hour, Min, Sec, zone()]).
+
+%% @doc Notify registered log event handler of an access event.
+-spec log_access(tuple()) -> ok.
+log_access({_, _, _}=LogData) ->
+ gen_event:sync_notify(?EVENT_LOGGER, {log_access, LogData}).
+
+%% @doc Close a log file.
+-spec log_close(atom(), string(), file:io_device()) -> ok | {error, term()}.
+log_close(Mod, Name, FD) ->
+ error_logger:info_msg("~p: closing log file: ~p~n", [Mod, Name]),
+ file:close(FD).
+
+%% @doc Open a new log file for writing
+-spec log_open(string()) -> {file:io_device(), non_neg_integer()}.
+log_open(FileName) ->
+ DateHour = datehour(),
+ {log_open(FileName, DateHour), DateHour}.
+
+%% @doc Open a new log file for writing
+-spec log_open(string(), non_neg_integer()) -> file:io_device().
+log_open(FileName, DateHour) ->
+ LogName = FileName ++ suffix(DateHour),
+ error_logger:info_msg("opening log file: ~p~n", [LogName]),
+ filelib:ensure_dir(LogName),
+ {ok, FD} = file:open(LogName, [read, write, raw]),
+ {ok, Location} = file:position(FD, eof),
+ fix_log(FD, Location),
+ file:truncate(FD),
+ FD.
+
+-spec log_write(file:io_device(), iolist()) -> ok | {error, term()}.
+log_write(FD, IoData) ->
+ file:write(FD, lists:flatten(IoData)).
+
+%% @doc Rotate a log file if the hour it represents
+%% has passed.
+-spec maybe_rotate(atom(), erlang:timestamp(), #state{}) -> #state{}.
+maybe_rotate(Mod, Time, State) ->
+ ThisHour = datehour(Time),
+ if ThisHour == State#state.hourstamp ->
+ State;
+ true ->
+ defer_refresh(Mod),
+ log_close(Mod, State#state.filename, State#state.handle),
+ Handle = log_open(State#state.filename, ThisHour),
+ State#state{hourstamp=ThisHour, handle=Handle}
+ end.
+
+%% @doc Convert numeric month value to the abbreviation
+-spec month(1..12) -> string().
+month(1) ->
+ "Jan";
+month(2) ->
+ "Feb";
+month(3) ->
+ "Mar";
+month(4) ->
+ "Apr";
+month(5) ->
+ "May";
+month(6) ->
+ "Jun";
+month(7) ->
+ "Jul";
+month(8) ->
+ "Aug";
+month(9) ->
+ "Sep";
+month(10) ->
+ "Oct";
+month(11) ->
+ "Nov";
+month(12) ->
+ "Dec".
+
+%% @doc Make a synchronous call to instruct a log handler to refresh
+%% itself.
+-spec refresh(atom(), erlang:timestamp()) -> ok | {error, term()}.
+refresh(Mod, Time) ->
+ call(Mod, {refresh, Time}, infinity).
+
+-spec suffix(datehour()) -> string().
+suffix({Y, M, D, H}) ->
+ YS = zeropad(Y, 4),
+ MS = zeropad(M, 2),
+ DS = zeropad(D, 2),
+ HS = zeropad(H, 2),
+ lists:flatten([$., YS, $_, MS, $_, DS, $_, HS]).
+
+-spec zeropad(integer(), integer()) -> string().
+zeropad(Num, MinLength) ->
+ NumStr = integer_to_list(Num),
+ zeropad_str(NumStr, MinLength - length(NumStr)).
+
+-spec zeropad_str(string(), integer()) -> string().
+zeropad_str(NumStr, Zeros) when Zeros > 0 ->
+ zeropad_str([$0 | NumStr], Zeros - 1);
+zeropad_str(NumStr, _) ->
+ NumStr.
+
+-spec zone() -> string().
+zone() ->
+ Time = erlang:universaltime(),
+ LocalTime = calendar:universal_time_to_local_time(Time),
+ DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) -
+ calendar:datetime_to_gregorian_seconds(Time),
+ zone((DiffSecs/3600)*100).
+
+%% Ugly reformatting code to get times like +0000 and -1300
+
+-spec zone(integer()) -> string().
+zone(Val) when Val < 0 ->
+ io_lib:format("-~4..0w", [trunc(abs(Val))]);
+zone(Val) when Val >= 0 ->
+ io_lib:format("+~4..0w", [trunc(abs(Val))]).
diff --git a/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl b/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl
new file mode 100644
index 0000000000..e19b247d73
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/webmachine_log_handler.erl
@@ -0,0 +1,128 @@
+%% Copyright (c) 2011-2013 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% https://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+
+%% @doc Default log handler for webmachine
+
+-module(webmachine_log_handler).
+
+-behaviour(gen_event).
+
+%% gen_event callbacks
+-export([init/1,
+ handle_call/2,
+ handle_event/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-include("webmachine_logger.hrl").
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+-endif.
+
+-record(state, {hourstamp, filename, handle}).
+
+-define(FILENAME, "access.log").
+
+%% ===================================================================
+%% gen_event callbacks
+%% ===================================================================
+
+%% @private
+init([BaseDir]) ->
+ webmachine_log:defer_refresh(?MODULE),
+ FileName = filename:join(BaseDir, ?FILENAME),
+ {Handle, DateHour} = webmachine_log:log_open(FileName),
+ {ok, #state{filename=FileName, handle=Handle, hourstamp=DateHour}}.
+
+%% @private
+handle_call({_Label, MRef, get_modules}, State) ->
+ {ok, {MRef, [?MODULE]}, State};
+handle_call({refresh, Time}, State) ->
+ {ok, ok, webmachine_log:maybe_rotate(?MODULE, Time, State)};
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+%% @private
+handle_event({log_access, LogData}, State) ->
+ NewState = webmachine_log:maybe_rotate(?MODULE, os:timestamp(), State),
+ Msg = format_req(LogData),
+ webmachine_log:log_write(NewState#state.handle, Msg),
+ {ok, NewState};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%% @private
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%% @private
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% ===================================================================
+%% Internal functions
+%% ===================================================================
+
+%% We currently keep most of the Webmachine logging facility. But
+%% since we are now using Cowboy, a few small parts had to change.
+%% This is one such part. The code is however equivalent to Webmachine's.
+
+format_req({Status0, Body, Req}) ->
+ User = user_from_req(Req),
+ Time = webmachine_log:fmtnow(),
+ Status = integer_to_list(Status0),
+ Length1 = case Body of
+ {sendfile, _, Length0, _} -> Length0;
+ _ -> iolist_size(Body)
+ end,
+ Length = integer_to_list(Length1),
+ Method = cowboy_req:method(Req),
+ Path = cowboy_req:path(Req),
+ Peer = case cowboy_req:peer(Req) of
+ {Peer0, _Port} -> Peer0;
+ Other -> Other
+ end,
+ Version = cowboy_req:version(Req),
+ Referer = cowboy_req:header(<<"referer">>, Req, <<>>),
+ UserAgent = cowboy_req:header(<<"user-agent">>, Req, <<>>),
+ fmt_alog(Time, Peer, User, Method, Path, Version,
+ Status, Length, Referer, UserAgent).
+
+fmt_alog(Time, Ip, User, Method, Path, Version,
+ Status, Length, Referrer, UserAgent) ->
+ [webmachine_log:fmt_ip(Ip), " - ", User, [$\s], Time, [$\s, $"], Method, " ", Path,
+ " ", atom_to_list(Version), [$",$\s],
+ Status, [$\s], Length, [$\s,$"], Referrer,
+ [$",$\s,$"], UserAgent, [$",$\n]].
+
+user_from_req(Req) ->
+ try cowboy_req:parse_header(<<"authorization">>, Req) of
+ {basic, Username, _} ->
+ Username;
+ {bearer, _} ->
+ rabbit_data_coercion:to_binary(
+ application:get_env(rabbitmq_management, uaa_client_id, ""));
+ _ ->
+ "-"
+ catch _:_ ->
+ "-"
+ end.
diff --git a/deps/rabbitmq_web_dispatch/src/webmachine_logger.hrl b/deps/rabbitmq_web_dispatch/src/webmachine_logger.hrl
new file mode 100644
index 0000000000..c07068ae6a
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/src/webmachine_logger.hrl
@@ -0,0 +1,16 @@
+-record(wm_log_data,
+ {resource_module :: atom(),
+ start_time :: tuple(),
+ method :: atom(),
+ headers,
+ peer,
+ path :: string(),
+ version,
+ response_code,
+ response_length,
+ end_time :: tuple(),
+ finish_time :: tuple(),
+ notes}).
+-type wm_log_data() :: #wm_log_data{}.
+
+-define(EVENT_LOGGER, webmachine_log_event).
diff --git a/deps/rabbitmq_web_dispatch/test/priv/www/index.html b/deps/rabbitmq_web_dispatch/test/priv/www/index.html
new file mode 100644
index 0000000000..b9f7cd4694
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/test/priv/www/index.html
@@ -0,0 +1,7 @@
+<html>
+ <head>
+ <title>RabbitMQ HTTP Server Test Page</title>
+ </head>
+ <body>
+ </body>
+</html>
diff --git a/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl
new file mode 100644
index 0000000000..5975272e1a
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl
@@ -0,0 +1,197 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_SUITE).
+
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ query_static_resource_test,
+ add_idempotence_test,
+ log_source_address_test,
+ parse_ip_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE},
+ {rmq_extra_tcp_ports, [tcp_port_http_extra]}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+query_static_resource_test(Config) ->
+ Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_http_extra),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, query_static_resource_test1, [Host, Port]).
+query_static_resource_test1(Host, Port) ->
+ inets:start(),
+ %% TODO this is a fairly rubbish test, but not as bad as it was
+ rabbit_web_dispatch:register_static_context(test, [{port, Port}],
+ "rabbit_web_dispatch_test",
+ ?MODULE, "test/priv/www", "Test"),
+ inets:start(),
+ {ok, {_Status, _Headers, Body}} =
+ httpc:request(format("http://~s:~w/rabbit_web_dispatch_test/index.html", [Host, Port])),
+ ?assert(string:str(Body, "RabbitMQ HTTP Server Test Page") /= 0),
+
+ passed.
+
+add_idempotence_test(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_http_extra),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, add_idempotence_test1, [Port]).
+add_idempotence_test1(Port) ->
+ inets:start(),
+ F = fun(_Req) -> ok end,
+ L = {"/foo", "Foo"},
+ rabbit_web_dispatch_registry:add(foo, [{port, Port}], F, F, L),
+ rabbit_web_dispatch_registry:add(foo, [{port, Port}], F, F, L),
+ ?assertEqual(
+ 1, length([ok || {"/foo", _, _} <-
+ rabbit_web_dispatch_registry:list_all()])),
+ passed.
+
+parse_ip_test(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_http_extra),
+ %% I have to Port + 1 here to have a free port not used by a listener.
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, parse_ip_test1, [Port + 1]).
+parse_ip_test1(Port) ->
+ F = fun(_Req) -> ok end,
+ L = {"/parse_ip", "ParseIP"},
+ rabbit_web_dispatch_registry:add(parse_ip, [{port, Port}, {ip, "127.0.0.1"}], F, F, L),
+ ?assertEqual(
+ 1, length([ok || {"/parse_ip", _, _} <-
+ rabbit_web_dispatch_registry:list_all()])),
+ passed.
+
+
+log_source_address_test(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_http_extra),
+ rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, log_source_address_test1, [Port]).
+
+log_source_address_test1(Port) ->
+ inets:start(),
+ %% Given: everything in place to issue AND log an HTTP response.
+ {ok, _} = rabbit_web_dispatch:register_context_handler(log_response_test,
+ [{port, Port}], path(), table(), description()),
+ ok = webmachine_log:add_handler(webmachine_log_handler, [log_directory()]),
+
+ %% When: when a client makes a request.
+ {ok, _Response} = httpc:request(get, {"http://127.0.0.1:" ++
+ string(Port) ++ "/" ++ string(path()), []}, [],
+ [{ip, string(source())}]),
+
+ %% Then: log written WITH source IP address AND path.
+ true = logged(source()),
+ true = logged(path ()),
+ true = logged(binary(status())),
+
+ passed.
+
+%% Ancillary procedures for log test
+
+%% Resource for testing with.
+path() -> <<"wonderland">>.
+
+%% HTTP server port.
+port() -> 4096.
+
+%% Log files will be written here.
+log_directory() ->
+ os:getenv("RABBITMQ_LOG_BASE") ++ "/".
+
+%% Source IP address of request.
+source() -> <<"127.0.0.1">>.
+
+description() -> "Test that source IP address is logged upon HTTP response.".
+
+status() -> 500.
+
+reason() -> <<"Testing, testing... 1, 2, 3.">>.
+
+%% HTTP server forwarding table.
+table() ->
+ cowboy_router:compile([{source(), [{"/" ++ string(path()), ?MODULE, []}]}]).
+
+%% Cowboy handler callbacks.
+init(Req, State) ->
+ cowboy_req:reply(
+ status(), #{<<"content-type">> => <<"text/plain">>}, reason(), Req),
+ {ok, Req, State}.
+
+terminate(_, _, _) ->
+ ok.
+
+%% Predicate: the given `Text` is read from file.
+logged(Text) ->
+ {ok, Handle} = file:open(log_directory() ++
+ "access.log" ++ webmachine_log:suffix(webmachine_log:datehour()),
+ [read, binary]),
+ logged(Handle, Text).
+
+logged(Handle, Text) ->
+ case io:get_line(Handle, "") of
+ eof ->
+ file:close(Handle),
+ false;
+ Line when is_binary(Line) ->
+ case binary:matches(Line, Text) of
+ [] ->
+ logged(Handle, Text);
+ [{N,_}] when is_integer(N), N >= 0 ->
+ true
+ end
+ end.
+
+%% Convenience procedures.
+string(N) when is_integer(N) ->
+ erlang:integer_to_list(N);
+string(B) when is_binary(B) ->
+ erlang:binary_to_list(B).
+
+binary(N) when is_integer(N) ->
+ erlang:integer_to_binary(N).
+
+format(Fmt, Val) ->
+ lists:flatten(io_lib:format(Fmt, Val)).
diff --git a/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl
new file mode 100644
index 0000000000..a71dabe014
--- /dev/null
+++ b/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl
@@ -0,0 +1,73 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_unit_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ relativise_test,
+ unrelativise_test
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(_Testcase, Config) ->
+ Config.
+
+end_per_testcase(_Testcase, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+relativise_test(_Config) ->
+ Rel = fun rabbit_web_dispatch_util:relativise/2,
+ ?assertEqual("baz", Rel("/foo/bar/bash", "/foo/bar/baz")),
+ ?assertEqual("../bax/baz", Rel("/foo/bar/bash", "/foo/bax/baz")),
+ ?assertEqual("../bax/baz", Rel("/bar/bash", "/bax/baz")),
+ ?assertEqual("..", Rel("/foo/bar/bash", "/foo/bar")),
+ ?assertEqual("../..", Rel("/foo/bar/bash", "/foo")),
+ ?assertEqual("bar/baz", Rel("/foo/bar", "/foo/bar/baz")),
+ ?assertEqual("foo", Rel("/", "/foo")),
+
+ passed.
+
+unrelativise_test(_Config) ->
+ Un = fun rabbit_web_dispatch_util:unrelativise/2,
+ ?assertEqual("/foo/bar", Un("/foo/foo", "bar")),
+ ?assertEqual("/foo/bar", Un("/foo/foo", "./bar")),
+ ?assertEqual("bar", Un("foo", "bar")),
+ ?assertEqual("/baz/bar", Un("/foo/foo", "../baz/bar")),
+
+ passed.
diff --git a/deps/rabbitmq_web_mqtt/.gitignore b/deps/rabbitmq_web_mqtt/.gitignore
new file mode 100644
index 0000000000..05dc56dccf
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/.gitignore
@@ -0,0 +1,17 @@
+*.swp
+/.erlang.mk/
+/cover/
+/deps/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+/test/*.beam
+
+/rabbitmq_web_mqtt.d
+
+test/config_schema_SUITE_data/schema/
diff --git a/deps/rabbitmq_web_mqtt/.travis.yml b/deps/rabbitmq_web_mqtt/.travis.yml
new file mode 100644
index 0000000000..731588a655
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: bbHDJAounLsXozDTEMgtZzR3opieYtJ3tth44JKZ1CgVDLWlLTRvNIJgzH77bDTph4GhMiULF2M3LR0EL7nNGfWUN7B5EoXvtLdGLvaKjI8dt+UIkAVZfsOhYpI2DLbtA3GcspmKpslcUE7k3bC63zHJR05nwFMChtbaMVeRL4LOxgrcQXf78dGSVCihVcm4mmNNg2ruBhYl6Q1I4JWO1udaHIxAZz7u2+JT4pk0UX7gERl5WpaxgPni2ucrVMN/ByzSqNODI1XX2VimJcmbcmUaW7YP5YsfCQHMEN1nxnGboUAue29ZhdJzX6KiVR4gpH5rIPnCMvL2b6DY2srQgwO2SQWjtGOaEVX3B2VG8lccnew7CM10PDl8COyRWN8GtBVoQf8sizlSWiB+lS5gK4wCljBduGF1Df0ntpFJHim6r6/5dcRJAK7myEh63ZGajSINqYN3XFek0jucQL7hCg61RqBofaj8+C0BhoYKb4ImwX7cyqpV16K1tN0VO6PwQfzW7LdR9ydC5lg9EGt+DJnLiSqiX+H8kH2J0tUVR2ubp2qBnW5jqOZCz6f/5wIBwpOtHGk/tJTCgLnhyYadkPh4+TRS7iZet/34eIcvAFfYEI5MmrfsYjyp1J8wAEd6ZoxpEiWA5Qotxc6cnCU7GCJR1/11Ah+zQiIEO9LpU2s=
+ - secure: IyAiNhAl9bLW3Txp+EydYwAsd03npbjZnMOLKXeLCSl68+HU/2SsH1xCqFf/bHL4SXoCI/Icgn36WFQ+U7HMqJ1EDjpmkb94UY+cP4090rChiowlEmVFRIw2lTaguWO3b4JYmqE8Zu1L2eS/L+gyt5tRGC3kzVwkJ8qPkLwcS8RsAJJ15kHPV/lH9IlqYk8G667L7YhAZ5RsVFqBmWpn9+vpH3Xpx9TOtLMk171GQFTGxSIAAn5fZ2oyY0zXD03AwL4O4am9B0kCLvwkSfWmGsXY6wITGSOOVjyYq6MD7wa+G7yYWzzEv1ogyvAu1U7ut0+HkSUBx/cTghYsE5UAK0a84oPCKaY83p8dVKXFfxnP24sQiHdCzFyiZms6ohLzyPeDMH8Repy77dTgT2MMHVv4xVt/9qCFB5EguSmJUgldelPNpBOlC/9kVm5yM67NbufZYEr+5pIs+dwk3R1ypau5as3Tyk7jOJy4ykoQZdzDPygouXjq0CCmOjtAaS6j9rv5OTq+1pr0JvgSR9Kfo5/2p12g8Sn8GugdKMX11PPgguQSB1xeLq1X3QT9bPU6HUFG3oV3KHtuD064x/j1a1AzS4HHUF53JV+CfKk4BvrFEbd6qf4VVghjexLTXssBS2MJq1IN2gItVWT6DZ1HESj3XI9WgrdaPno3uH1a3zA=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_web_mqtt/CODE_OF_CONDUCT.md b/deps/rabbitmq_web_mqtt/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_web_mqtt/CONTRIBUTING.md b/deps/rabbitmq_web_mqtt/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_web_mqtt/LICENSE b/deps/rabbitmq_web_mqtt/LICENSE
new file mode 100644
index 0000000000..ff53248d65
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ MQTT-over-WebSockets plugin, is licensed under the MPL 2.0. For the
+MPL 2.0, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_web_mqtt/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_web_mqtt/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_web_mqtt/Makefile b/deps/rabbitmq_web_mqtt/Makefile
new file mode 100644
index 0000000000..6d38319759
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/Makefile
@@ -0,0 +1,33 @@
+PROJECT = rabbitmq_web_mqtt
+PROJECT_DESCRIPTION = RabbitMQ MQTT-over-WebSockets adapter
+PROJECT_MOD = rabbit_web_mqtt_app
+
+define PROJECT_ENV
+[
+ {tcp_config, [{port, 15675}]},
+ {ssl_config, []},
+ {num_tcp_acceptors, 10},
+ {num_ssl_acceptors, 10},
+ {cowboy_opts, []},
+ {proxy_protocol, false}
+ ]
+endef
+
+DEPS = rabbit_common rabbit cowboy rabbitmq_mqtt
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+# FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked.
+# See rabbitmq-components.mk.
+BUILD_DEPS += ranch
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_web_mqtt/README.md b/deps/rabbitmq_web_mqtt/README.md
new file mode 100644
index 0000000000..accf14206a
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/README.md
@@ -0,0 +1,33 @@
+# RabbitMQ Web MQTT plugin
+
+This plugin provides support for MQTT-over-WebSockets to RabbitMQ.
+
+## Installation
+
+This plugin ships with modern versions of RabbitMQ.
+Like all plugins, it [must be enabled](https://www.rabbitmq.com/plugins.html) before it can be used:
+
+``` bash
+# this might require sudo
+rabbitmq-plugins enable rabbitmq_web_mqtt
+```
+
+## Documentation
+
+Please refer to the [RabbitMQ Web MQTT guide](https://www.rabbitmq.com/web-mqtt.html).
+
+
+## Building From Source
+
+ * [Generic plugin build instructions](https://www.rabbitmq.com/plugin-development.html)
+ * Instructions on [how to install a plugin into RabbitMQ broker](https://www.rabbitmq.com/plugins.html#installing-plugins)
+
+Note that release branches (`stable` vs. `master`) and target RabbitMQ version need to be taken into account
+when building plugins from source.
+
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the same license as RabbitMQ. See LICENSE for details.
diff --git a/deps/rabbitmq_web_mqtt/erlang.mk b/deps/rabbitmq_web_mqtt/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema b/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema
new file mode 100644
index 0000000000..5c1f4e202d
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema
@@ -0,0 +1,183 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+{mapping, "web_mqtt.num_acceptors.tcp", "rabbitmq_web_mqtt.num_tcp_acceptors",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.num_acceptors.ssl", "rabbitmq_web_mqtt.num_ssl_acceptors",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.max_connections", "rabbitmq_web_mqtt.max_connections", [
+ {datatype, [{enum, [infinity]}, integer]}
+]}.
+
+{mapping, "web_mqtt.tcp.backlog", "rabbitmq_web_mqtt.tcp_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.tcp.listener", "rabbitmq_web_mqtt.tcp_config", [
+ {datatype, [{enum, [none]}, ip]}
+]}.
+{mapping, "web_mqtt.tcp.ip", "rabbitmq_web_mqtt.tcp_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "web_mqtt.tcp.port", "rabbitmq_web_mqtt.tcp_config.port",
+ [{datatype, integer}]}.
+
+{mapping, "web_mqtt.ws_path", "rabbitmq_web_mqtt.ws_path",
+ [{datatype, string}]}.
+
+{translation,
+ "rabbitmq_web_mqtt.tcp_config",
+ fun(Conf) ->
+ Setting = cuttlefish:conf_get("web_mqtt.tcp.listener", Conf, undefined),
+ case Setting of
+ none -> [];
+ undefined -> [{port, 15675}];
+ {Ip, Port} when is_list(Ip), is_integer(Port) ->
+ [{ip, Ip}, {port, Port}]
+ end
+ end
+}.
+
+{mapping, "web_mqtt.ssl.listener", "rabbitmq_web_mqtt.ssl_config", [
+ {datatype, [{enum, [none]}, ip]}
+]}.
+
+{mapping, "web_mqtt.ssl.backlog", "rabbitmq_web_mqtt.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.ssl.ip", "rabbitmq_web_mqtt.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "web_mqtt.ssl.port", "rabbitmq_web_mqtt.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.ssl.certfile", "rabbitmq_web_mqtt.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_mqtt.ssl.keyfile", "rabbitmq_web_mqtt.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_mqtt.ssl.cacertfile", "rabbitmq_web_mqtt.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_mqtt.ssl.password", "rabbitmq_web_mqtt.ssl_config.password",
+ [{datatype, string}]}.
+
+{translation,
+ "rabbitmq_web_mqtt.ssl_config",
+ fun(Conf) ->
+ Setting = cuttlefish:conf_get("web_mqtt.ssl.listener", Conf, undefined),
+ case Setting of
+ none -> cuttlefish:unset();
+ undefined -> [];
+ {Ip, Port} when is_list(Ip), is_integer(Port) ->
+ %% we redo some of Cuttlefish's work here to
+ %% populate rabbitmq_web_mqtt.ssl_config here
+ TLSConf = cuttlefish_variable:filter_by_prefix("web_mqtt.ssl", Conf),
+ %% preserve all web_mqtt.ssl.* keys in rabbitmq_web_mqtt.ssl_config
+ TLSConfM0 = maps:fold(fun(Path, V, Acc) ->
+ maps:put(list_to_atom(lists:last(Path)), V, Acc)
+ end, #{}, maps:from_list(TLSConf)),
+ TLSConfM = maps:remove(listener, TLSConfM0),
+ ListenerM = maps:from_list([{ip, Ip}, {port, Port}]),
+ lists:keysort(1, maps:to_list(maps:merge(TLSConfM, ListenerM)));
+
+ _ -> Setting
+ end
+ end
+}.
+
+{mapping, "web_mqtt.ssl.honor_cipher_order", "rabbitmq_web_mqtt.ssl_config.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_mqtt.ssl.honor_ecc_order", "rabbitmq_web_mqtt.ssl_config.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_mqtt.ssl.reuse_sessions", "rabbitmq_web_mqtt.ssl_config.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_mqtt.ssl.secure_renegotiate", "rabbitmq_web_mqtt.ssl_config.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_mqtt.ssl.client_renegotiation", "rabbitmq_web_mqtt.ssl_config.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_mqtt.ssl.crl_check", "rabbitmq_web_mqtt.ssl_config.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "web_mqtt.ssl.depth", "rabbitmq_web_mqtt.ssl_config.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "web_mqtt.ssl.versions.$version", "rabbitmq_web_mqtt.ssl_config.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_web_mqtt.ssl_config.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("web_mqtt.ssl.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "web_mqtt.ssl.ciphers.$cipher", "rabbitmq_web_mqtt.ssl_config.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_web_mqtt.ssl_config.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("web_mqtt.ssl.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+
+%%
+%% Cowboy options
+%%
+
+{mapping, "web_mqtt.cowboy_opts.max_empty_lines", "rabbitmq_web_mqtt.cowboy_opts.max_empty_lines",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_header_name_length", "rabbitmq_web_mqtt.cowboy_opts.max_header_name_length",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_header_value_length", "rabbitmq_web_mqtt.cowboy_opts.max_header_value_length",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_headers", "rabbitmq_web_mqtt.cowboy_opts.max_headers",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_keepalive", "rabbitmq_web_mqtt.cowboy_opts.max_keepalive",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_request_line_length", "rabbitmq_web_mqtt.cowboy_opts.max_request_line_length",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.timeout", "rabbitmq_web_mqtt.cowboy_opts.idle_timeout",
+ [{datatype, integer}]}.
+
+%% backwards compatibility
+{mapping, "web_mqtt.cowboy_opts.timeout", "rabbitmq_web_mqtt.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]
+}.
+%% recent Cowboy versions have several timeout settings
+{mapping, "web_mqtt.cowboy_opts.idle_timeout", "rabbitmq_web_mqtt.cowboy_opts.idle_timeout",
+ [{datatype, integer}]
+}.
+
+{translation,
+ "rabbitmq_web_mqtt.cowboy_opts.idle_timeout",
+ fun(Conf) ->
+ case cuttlefish:conf_get("web_mqtt.cowboy_opts.timeout", Conf, undefined) of
+ Value when is_integer(Value) ->
+ Value;
+ undefined ->
+ case cuttlefish:conf_get("web_mqtt.cowboy_opts.idle_timeout", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+ end
+ end
+}.
+
+{mapping, "web_mqtt.ws_opts.compress", "rabbitmq_web_mqtt.cowboy_ws_opts.compress",
+ [{datatype, {enum, [true, false]}}]
+}.
+{mapping, "web_mqtt.ws_opts.max_frame_size", "rabbitmq_web_mqtt.cowboy_ws_opts.max_frame_size",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]
+}.
+{mapping, "web_mqtt.ws_opts.idle_timeout", "rabbitmq_web_mqtt.cowboy_ws_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]
+}.
+
+%% Whether or not to enable proxy protocol support.
+%%
+%% {proxy_protocol, false}
+
+{mapping, "web_mqtt.proxy_protocol", "rabbitmq_web_mqtt.proxy_protocol",
+ [{datatype, {enum, [true, false]}}]}.
diff --git a/deps/rabbitmq_web_mqtt/rabbitmq-components.mk b/deps/rabbitmq_web_mqtt/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl
new file mode 100644
index 0000000000..00b80bf562
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_app.erl
@@ -0,0 +1,192 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_mqtt_app).
+
+-behaviour(application).
+-export([
+ start/2,
+ prep_stop/1,
+ stop/1,
+ list_connections/0,
+ close_all_client_connections/1
+]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+-behaviour(supervisor).
+-export([init/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(TCP_PROTOCOL, 'http/web-mqtt').
+-define(TLS_PROTOCOL, 'https/web-mqtt').
+
+%%
+%% API
+%%
+
+-spec start(_, _) -> {ok, pid()}.
+start(_Type, _StartArgs) ->
+ mqtt_init(),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+-spec prep_stop(term()) -> term().
+prep_stop(State) ->
+ State.
+
+-spec stop(_) -> ok.
+stop(_State) ->
+ rabbit_networking:stop_ranch_listener_of_protocol(?TCP_PROTOCOL),
+ rabbit_networking:stop_ranch_listener_of_protocol(?TLS_PROTOCOL),
+ ok.
+
+init([]) -> {ok, {{one_for_one, 1, 5}, []}}.
+
+-spec list_connections() -> [pid()].
+list_connections() ->
+ PlainPids = connection_pids_of_protocol(?TCP_PROTOCOL),
+ TLSPids = connection_pids_of_protocol(?TLS_PROTOCOL),
+
+ PlainPids ++ TLSPids.
+
+-spec close_all_client_connections(string()) -> {'ok', non_neg_integer()}.
+close_all_client_connections(Reason) ->
+ Connections = list_connections(),
+ [rabbit_web_mqtt_handler:close_connection(Pid, Reason) || Pid <- Connections],
+ {ok, length(Connections)}.
+
+%%
+%% Implementation
+%%
+
+connection_pids_of_protocol(Protocol) ->
+ case rabbit_networking:ranch_ref_of_protocol(Protocol) of
+ undefined -> [];
+ AcceptorRef ->
+ lists:map(fun cowboy_ws_connection_pid/1, ranch:procs(AcceptorRef, connections))
+ end.
+
+-spec cowboy_ws_connection_pid(pid()) -> pid().
+cowboy_ws_connection_pid(RanchConnPid) ->
+ Children = supervisor:which_children(RanchConnPid),
+ {cowboy_clear, Pid, _, _} = lists:keyfind(cowboy_clear, 1, Children),
+ Pid.
+
+mqtt_init() ->
+ CowboyOpts0 = maps:from_list(get_env(cowboy_opts, [])),
+ CowboyWsOpts = maps:from_list(get_env(cowboy_ws_opts, [])),
+
+ Routes = cowboy_router:compile([{'_', [
+ {get_env(ws_path, "/ws"), rabbit_web_mqtt_handler, [{ws_opts, CowboyWsOpts}]}
+ ]}]),
+ CowboyOpts = CowboyOpts0#{env => #{dispatch => Routes},
+ middlewares => [cowboy_router, rabbit_web_mqtt_middleware, cowboy_handler],
+ proxy_header => get_env(proxy_protocol, false)},
+ case get_env(tcp_config, []) of
+ [] -> ok;
+ TCPConf0 -> start_tcp_listener(TCPConf0, CowboyOpts)
+ end,
+ case get_env(ssl_config, []) of
+ [] -> ok;
+ TLSConf0 -> start_tls_listener(TLSConf0, CowboyOpts)
+ end,
+ ok.
+
+start_tcp_listener(TCPConf0, CowboyOpts) ->
+ {TCPConf, IpStr, Port} = get_tcp_conf(TCPConf0),
+ RanchTransportOpts = #{
+ socket_opts => TCPConf,
+ connection_type => supervisor,
+ max_connections => get_max_connections(),
+ num_acceptors => get_env(num_tcp_acceptors, 10)
+ },
+ case ranch:start_listener(rabbit_networking:ranch_ref(TCPConf),
+ ranch_tcp,
+ RanchTransportOpts,
+ rabbit_web_mqtt_connection_sup,
+ CowboyOpts) of
+ {ok, _} -> ok;
+ {error, {already_started, _}} -> ok;
+ {error, ErrTCP} ->
+ rabbit_log_connection:error(
+ "Failed to start a WebSocket (HTTP) listener. Error: ~p,"
+ " listener settings: ~p~n",
+ [ErrTCP, TCPConf]),
+ throw(ErrTCP)
+ end,
+ listener_started(?TCP_PROTOCOL, TCPConf),
+ rabbit_log:info("rabbit_web_mqtt: listening for HTTP connections on ~s:~w~n",
+ [IpStr, Port]).
+
+start_tls_listener(TLSConf0, CowboyOpts) ->
+ rabbit_networking:ensure_ssl(),
+ {TLSConf, TLSIpStr, TLSPort} = get_tls_conf(TLSConf0),
+ RanchTransportOpts = #{
+ socket_opts => TLSConf,
+ connection_type => supervisor,
+ max_connections => get_max_connections(),
+ num_acceptors => get_env(num_ssl_acceptors, 10)
+ },
+ case ranch:start_listener(rabbit_networking:ranch_ref(TLSConf),
+ ranch_ssl,
+ RanchTransportOpts,
+ rabbit_web_mqtt_connection_sup,
+ CowboyOpts) of
+ {ok, _} -> ok;
+ {error, {already_started, _}} -> ok;
+ {error, ErrTLS} ->
+ rabbit_log_connection:error(
+ "Failed to start a TLS WebSocket (HTTPS) listener. Error: ~p,"
+ " listener settings: ~p~n",
+ [ErrTLS, TLSConf]),
+ throw(ErrTLS)
+ end,
+ listener_started(?TLS_PROTOCOL, TLSConf),
+ rabbit_log:info("rabbit_web_mqtt: listening for HTTPS connections on ~s:~w~n",
+ [TLSIpStr, TLSPort]).
+
+listener_started(Protocol, Listener) ->
+ Port = rabbit_misc:pget(port, Listener),
+ [rabbit_networking:tcp_listener_started(Protocol, Listener,
+ IPAddress, Port)
+ || {IPAddress, _Port, _Family}
+ <- rabbit_networking:tcp_listener_addresses(Port)],
+ ok.
+
+get_tcp_conf(TCPConf0) ->
+ TCPConf1 = case proplists:get_value(port, TCPConf0) of
+ undefined -> [{port, 15675}|TCPConf0];
+ _ -> TCPConf0
+ end,
+ get_ip_port(TCPConf1).
+
+get_tls_conf(TLSConf0) ->
+ TLSConf1 = case proplists:get_value(port, TLSConf0) of
+ undefined -> [{port, 15675}|proplists:delete(port, TLSConf0)];
+ _ -> TLSConf0
+ end,
+ get_ip_port(TLSConf1).
+
+get_ip_port(Conf0) ->
+ IpStr = proplists:get_value(ip, Conf0),
+ Ip = normalize_ip(IpStr),
+ Conf1 = lists:keyreplace(ip, 1, Conf0, {ip, Ip}),
+ Port = proplists:get_value(port, Conf1),
+ {Conf1, IpStr, Port}.
+
+normalize_ip(IpStr) when is_list(IpStr) ->
+ {ok, Ip} = inet:parse_address(IpStr),
+ Ip;
+normalize_ip(Ip) ->
+ Ip.
+
+get_max_connections() ->
+ get_env(max_connections, infinity).
+
+get_env(Key, Default) ->
+ rabbit_misc:get_env(rabbitmq_web_mqtt, Key, Default).
diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_info.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_info.erl
new file mode 100644
index 0000000000..cb71ac6b76
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_info.erl
@@ -0,0 +1,26 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_mqtt_connection_info).
+
+%% Module to add the MQTT client ID to authentication properties
+
+%% API
+-export([additional_authn_params/4]).
+
+additional_authn_params(_Creds, _VHost, _Pid, Infos) ->
+ case proplists:get_value(variable_map, Infos, undefined) of
+ VariableMap when is_map(VariableMap) ->
+ case maps:get(<<"client_id">>, VariableMap, []) of
+ ClientId when is_binary(ClientId)->
+ [{client_id, ClientId}];
+ [] ->
+ []
+ end;
+ _ ->
+ []
+ end. \ No newline at end of file
diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_sup.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_sup.erl
new file mode 100644
index 0000000000..9ac87a4260
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_connection_sup.erl
@@ -0,0 +1,53 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_mqtt_connection_sup).
+
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/4, start_keepalive_link/0]).
+
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link(Ref, Sock, Transport, CowboyOpts0) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, KeepaliveSup} = supervisor2:start_child(
+ SupPid,
+ {rabbit_web_mqtt_keepalive_sup,
+ {?MODULE, start_keepalive_link, []},
+ intrinsic, infinity, supervisor, [rabbit_keepalive_sup]}),
+
+ %% In order for the Websocket handler to receive the KeepaliveSup
+ %% variable, we need to pass it first through the environment and
+ %% then have the middleware rabbit_web_mqtt_middleware place it
+ %% in the initial handler state.
+ Env = maps:get(env, CowboyOpts0),
+ CowboyOpts = CowboyOpts0#{env => Env#{keepalive_sup => KeepaliveSup,
+ socket => Sock}},
+ Protocol = case Transport of
+ ranch_tcp -> cowboy_clear;
+ ranch_ssl -> cowboy_tls
+ end,
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {Protocol,
+ {Protocol, start_link, [Ref, Sock, Transport, CowboyOpts]},
+ intrinsic, ?WORKER_WAIT, worker, [Protocol]}),
+ {ok, SupPid, ReaderPid}.
+
+start_keepalive_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl
new file mode 100644
index 0000000000..d7d1a3b499
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl
@@ -0,0 +1,281 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_mqtt_handler).
+-behaviour(cowboy_websocket).
+
+-export([
+ init/2,
+ websocket_init/1,
+ websocket_handle/2,
+ websocket_info/2,
+ terminate/3
+]).
+-export([close_connection/2]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-record(state, {
+ conn_name,
+ keepalive,
+ keepalive_sup,
+ parse_state,
+ proc_state,
+ state,
+ conserve_resources,
+ socket,
+ peername,
+ stats_timer,
+ connection
+}).
+
+init(Req, Opts) ->
+ {PeerAddr, _PeerPort} = maps:get(peer, Req),
+ {_, KeepaliveSup} = lists:keyfind(keepalive_sup, 1, Opts),
+ {_, Sock0} = lists:keyfind(socket, 1, Opts),
+ Sock = case maps:get(proxy_header, Req, undefined) of
+ undefined ->
+ Sock0;
+ ProxyInfo ->
+ {rabbit_proxy_socket, Sock0, ProxyInfo}
+ end,
+ case rabbit_net:connection_string(Sock, inbound) of
+ {ok, ConnStr} ->
+ Req2 = case cowboy_req:header(<<"sec-websocket-protocol">>, Req) of
+ undefined -> Req;
+ SecWsProtocol ->
+ cowboy_req:set_resp_header(<<"sec-websocket-protocol">>, SecWsProtocol, Req)
+ end,
+ WsOpts0 = proplists:get_value(ws_opts, Opts, #{}),
+ WsOpts = maps:merge(#{compress => true}, WsOpts0),
+ {cowboy_websocket, Req2, #state{
+ conn_name = ConnStr,
+ keepalive = {none, none},
+ keepalive_sup = KeepaliveSup,
+ parse_state = rabbit_mqtt_frame:initial_state(),
+ state = running,
+ conserve_resources = false,
+ socket = Sock,
+ peername = PeerAddr
+ }, WsOpts};
+ _ ->
+ {stop, Req}
+ end.
+
+websocket_init(State = #state{conn_name = ConnStr, socket = Sock, peername = PeerAddr}) ->
+ rabbit_log_connection:info("accepting Web MQTT connection ~p (~s)~n", [self(), ConnStr]),
+ AdapterInfo = amqp_connection:socket_adapter_info(Sock, {'Web MQTT', "N/A"}),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ ProcessorState = rabbit_mqtt_processor:initial_state(Sock,
+ rabbit_mqtt_reader:ssl_login_name(RealSocket),
+ AdapterInfo,
+ fun send_reply/2,
+ PeerAddr),
+ process_flag(trap_exit, true),
+ {ok,
+ rabbit_event:init_stats_timer(
+ State#state{proc_state = ProcessorState},
+ #state.stats_timer),
+ hibernate}.
+
+-spec close_connection(pid(), string()) -> 'ok'.
+close_connection(Pid, Reason) ->
+ rabbit_log_connection:info("Web MQTT: will terminate connection process ~p, reason: ~s",
+ [Pid, Reason]),
+ sys:terminate(Pid, Reason),
+ ok.
+
+websocket_handle({binary, Data}, State) ->
+ handle_data(Data, State);
+%% Silently ignore ping and pong frames.
+websocket_handle({Ping, _}, State) when Ping =:= ping; Ping =:= pong ->
+ {ok, State, hibernate};
+websocket_handle(Ping, State) when Ping =:= ping; Ping =:= pong ->
+ {ok, State, hibernate};
+%% Log any other unexpected frames.
+websocket_handle(Frame, State) ->
+ rabbit_log_connection:info("Web MQTT: unexpected WebSocket frame ~p~n",
+ [Frame]),
+ {ok, State, hibernate}.
+
+websocket_info({conserve_resources, Conserve}, State) ->
+ NewState = State#state{conserve_resources = Conserve},
+ handle_credits(control_throttle(NewState));
+websocket_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ handle_credits(control_throttle(State));
+websocket_info({#'basic.deliver'{}, #amqp_msg{}, _DeliveryCtx} = Delivery,
+ State = #state{ proc_state = ProcState0 }) ->
+ case rabbit_mqtt_processor:amqp_callback(Delivery, ProcState0) of
+ {ok, ProcState} ->
+ {ok, State #state { proc_state = ProcState }, hibernate};
+ {error, _, _} ->
+ stop(State)
+ end;
+websocket_info(#'basic.ack'{} = Ack, State = #state{ proc_state = ProcState0 }) ->
+ case rabbit_mqtt_processor:amqp_callback(Ack, ProcState0) of
+ {ok, ProcState} ->
+ {ok, State #state { proc_state = ProcState }, hibernate};
+ {error, _, _} ->
+ stop(State)
+ end;
+websocket_info(#'basic.consume_ok'{}, State) ->
+ {ok, State, hibernate};
+websocket_info(#'basic.cancel'{}, State) ->
+ stop(State);
+websocket_info({reply, Data}, State) ->
+ {reply, {binary, Data}, State, hibernate};
+websocket_info({'EXIT', _, _}, State) ->
+ stop(State);
+websocket_info({'$gen_cast', duplicate_id}, State = #state{ proc_state = ProcState,
+ conn_name = ConnName }) ->
+ rabbit_log_connection:warning("Web MQTT disconnecting a client with duplicate ID '~s' (~p)~n",
+ [rabbit_mqtt_processor:info(client_id, ProcState), ConnName]),
+ stop(State);
+websocket_info({'$gen_cast', {close_connection, Reason}}, State = #state{ proc_state = ProcState,
+ conn_name = ConnName }) ->
+ rabbit_log_connection:warning("Web MQTT disconnecting client with ID '~s' (~p), reason: ~s~n",
+ [rabbit_mqtt_processor:info(client_id, ProcState), ConnName, Reason]),
+ stop(State);
+websocket_info({start_keepalives, Keepalive},
+ State = #state{ socket = Sock, keepalive_sup = KeepaliveSup }) ->
+ %% Only the client has the responsibility for sending keepalives
+ SendFun = fun() -> ok end,
+ Parent = self(),
+ ReceiveFun = fun() -> Parent ! keepalive_timeout end,
+ Heartbeater = rabbit_heartbeat:start(
+ KeepaliveSup, Sock, 0, SendFun, Keepalive, ReceiveFun),
+ {ok, State #state { keepalive = Heartbeater }, hibernate};
+websocket_info(keepalive_timeout, State = #state{conn_name = ConnStr}) ->
+ rabbit_log_connection:error("closing Web MQTT connection ~p (keepalive timeout)~n", [ConnStr]),
+ stop(State);
+websocket_info(emit_stats, State) ->
+ {ok, emit_stats(State), hibernate};
+websocket_info({ra_event, _, _}, State) ->
+ {ok, State, hibernate};
+websocket_info(Msg, State) ->
+ rabbit_log_connection:info("Web MQTT: unexpected message ~p~n",
+ [Msg]),
+ {ok, State, hibernate}.
+
+terminate(_, _, #state{ proc_state = undefined }) ->
+ ok;
+terminate(_, _, State) ->
+ stop_rabbit_mqtt_processor(State),
+ ok.
+
+%% Internal.
+
+handle_data(Data, State0 = #state{conn_name = ConnStr}) ->
+ case handle_data1(Data, State0) of
+ {ok, State1 = #state{state = blocked}, hibernate} ->
+ {[{active, false}], State1, hibernate};
+ {error, Error} ->
+ stop_with_framing_error(State0, Error, ConnStr);
+ Other ->
+ Other
+ end.
+
+handle_data1(<<>>, State) ->
+ {ok, ensure_stats_timer(control_throttle(State)), hibernate};
+handle_data1(Data, State = #state{ parse_state = ParseState,
+ proc_state = ProcState,
+ conn_name = ConnStr }) ->
+ case rabbit_mqtt_frame:parse(Data, ParseState) of
+ {more, ParseState1} ->
+ {ok, ensure_stats_timer(control_throttle(
+ State #state{ parse_state = ParseState1 })), hibernate};
+ {ok, Frame, Rest} ->
+ case rabbit_mqtt_processor:process_frame(Frame, ProcState) of
+ {ok, ProcState1, ConnPid} ->
+ PS = rabbit_mqtt_frame:initial_state(),
+ handle_data1(
+ Rest,
+ State #state{ parse_state = PS,
+ proc_state = ProcState1,
+ connection = ConnPid });
+ {error, Reason, _} ->
+ rabbit_log_connection:info("MQTT protocol error ~p for connection ~p~n",
+ [Reason, ConnStr]),
+ stop(State, 1002, Reason);
+ {error, Error} ->
+ stop_with_framing_error(State, Error, ConnStr);
+ {stop, _} ->
+ stop(State)
+ end;
+ Other ->
+ Other
+ end.
+
+stop(State) ->
+ stop(State, 1000, "MQTT died").
+
+stop(State, CloseCode, Error0) ->
+ stop_rabbit_mqtt_processor(State),
+ Error1 = rabbit_data_coercion:to_binary(Error0),
+ {[{close, CloseCode, Error1}], State}.
+
+stop_with_framing_error(State, Error0, ConnStr) ->
+ Error1 = rabbit_misc:format("~p", [Error0]),
+ rabbit_log_connection:error("MQTT detected framing error '~s' for connection ~p~n",
+ [Error1, ConnStr]),
+ stop(State, 1007, Error1).
+
+stop_rabbit_mqtt_processor(State = #state{state = running,
+ proc_state = ProcState,
+ conn_name = ConnName}) ->
+ maybe_emit_stats(State),
+ rabbit_log_connection:info("closing Web MQTT connection ~p (~s)~n", [self(), ConnName]),
+ rabbit_mqtt_processor:send_will(ProcState),
+ rabbit_mqtt_processor:close_connection(ProcState).
+
+handle_credits(State0) ->
+ case control_throttle(State0) of
+ State = #state{state = running} ->
+ {[{active, true}], State};
+ State ->
+ {ok, State}
+ end.
+
+control_throttle(State = #state{ state = CS,
+ conserve_resources = Mem }) ->
+ case {CS, Mem orelse credit_flow:blocked()} of
+ {running, true} -> ok = rabbit_heartbeat:pause_monitor(
+ State#state.keepalive),
+ State #state{ state = blocked };
+ {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
+ State#state.keepalive),
+ State #state{ state = running };
+ {_, _} -> State
+ end.
+
+send_reply(Frame, _) ->
+ self() ! {reply, rabbit_mqtt_frame:serialise(Frame)}.
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #state.stats_timer, emit_stats).
+
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #state.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State=#state{connection = C}) when C == none; C == undefined ->
+ %% Avoid emitting stats on terminate when the connection has not yet been
+ %% established, as this causes orphan entries on the stats database
+ State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+ State1;
+emit_stats(State=#state{socket=Sock, state=RunningState, connection=Conn}) ->
+ SockInfos = case rabbit_net:getstat(Sock,
+ [recv_oct, recv_cnt, send_oct, send_cnt, send_pend]) of
+ {ok, SI} -> SI;
+ {error, _} -> []
+ end,
+ Infos = [{pid, Conn}, {state, RunningState}|SockInfos],
+ rabbit_core_metrics:connection_stats(Conn, Infos),
+ rabbit_event:notify(connection_stats, Infos),
+ State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+ State1.
diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_middleware.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_middleware.erl
new file mode 100644
index 0000000000..7bc92099c6
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_middleware.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_mqtt_middleware).
+-behavior(cowboy_middleware).
+
+-export([execute/2]).
+
+execute(Req, Env) ->
+ #{keepalive_sup := KeepaliveSup} = Env,
+ Sock = maps:get(socket, Env),
+ case maps:get(handler_opts, Env, undefined) of
+ undefined -> {ok, Req, Env};
+ Opts when is_list(Opts) ->
+ {ok, Req, Env#{handler_opts => [{keepalive_sup, KeepaliveSup},
+ {socket, Sock}
+ |Opts]}}
+ end.
diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..7d4211797c
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_web_mqtt, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets
new file mode 100644
index 0000000000..7aaae956ca
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets
@@ -0,0 +1,220 @@
+[{listener,
+ "web_mqtt.tcp.listener = 127.0.0.1:12345",
+ [{rabbitmq_web_mqtt,
+ [{tcp_config, [{ip,"127.0.0.1"},{port,12345}]}]}],
+ [rabbitmq_web_mqtt]},
+ {tcp_listener_none,
+ "web_mqtt.tcp.listener = none",
+ [{rabbitmq_web_mqtt,
+ [{tcp_config, []}]}],
+ [rabbitmq_web_mqtt]},
+ {tcp_config,
+ "web_mqtt.tcp.ip = 127.0.0.3
+ web_mqtt.tcp.port = 11122",
+ [{rabbitmq_web_mqtt,
+ [{tcp_config, [{ip,"127.0.0.3"},{port,11122}]}]}],
+ [rabbitmq_web_mqtt]},
+ {tcp_config_defaut,
+ "web_mqtt.tcp.backlog = 2048",
+ [{rabbitmq_web_mqtt,
+ [{tcp_config, [{backlog, 2048}]}]}],
+ [rabbitmq_web_mqtt]},
+ {tcp_config_ip,
+ "web_mqtt.tcp.ip = 127.0.0.3",
+ [{rabbitmq_web_mqtt,
+ [{tcp_config, [{ip,"127.0.0.3"}]}]}],
+ [rabbitmq_web_mqtt]},
+ {tcp_config_port,
+ "web_mqtt.tcp.port = 34567",
+ [{rabbitmq_web_mqtt,
+ [{tcp_config, [{port,34567}]}]}],
+ [rabbitmq_web_mqtt]},
+
+ {num_acceptors_tcp,
+ "web_mqtt.num_acceptors.tcp = 20",
+ [{rabbitmq_web_mqtt,
+ [{num_tcp_acceptors, 20}]}],
+ [rabbitmq_web_mqtt]},
+
+ {num_acceptors_tls,
+ "web_mqtt.num_acceptors.ssl = 20",
+ [{rabbitmq_web_mqtt,
+ [{num_ssl_acceptors, 20}]}],
+ [rabbitmq_web_mqtt]},
+
+ {max_connections,
+ "web_mqtt.max_connections = 5000",
+ [{rabbitmq_web_mqtt,
+ [{max_connections, 5000}]}],
+ [rabbitmq_web_mqtt]},
+
+ {ssl_listener,
+ "web_mqtt.ssl.listener = 127.0.0.4:15672",
+ [{rabbitmq_web_mqtt,
+ [{ssl_config, [{ip,"127.0.0.4"},{port,15672}]}]}],
+ [rabbitmq_web_mqtt]},
+ {ssl_listener_none,
+ "web_mqtt.ssl.listener = none",
+ [],
+ [rabbitmq_web_mqtt]},
+
+ {ssl_with_listener,
+ "web_mqtt.ssl.listener = 127.0.0.2:15671
+ web_mqtt.ssl.backlog = 1024
+ web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ web_mqtt.ssl.password = changeme",
+ [{rabbitmq_web_mqtt,
+ [{ssl_config,
+ [{ip,"127.0.0.2"},
+ {port,15671},
+ {backlog,1024},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password,"changeme"}]}]}],
+ [rabbitmq_web_mqtt]},
+
+ {ssl,
+ "web_mqtt.ssl.ip = 127.0.0.2
+ web_mqtt.ssl.port = 15671
+ web_mqtt.ssl.backlog = 1024
+ web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ web_mqtt.ssl.password = changeme
+
+ web_mqtt.ssl.versions.tls1_2 = tlsv1.2
+ web_mqtt.ssl.versions.tls1_1 = tlsv1.1",
+ [{rabbitmq_web_mqtt,
+ [{ssl_config,
+ [
+ {ip,"127.0.0.2"},
+ {port,15671},
+ {backlog,1024},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password,"changeme"},
+
+ {versions,['tlsv1.2','tlsv1.1']}
+ ]}]}],
+ [rabbitmq_web_mqtt]},
+
+ {ssl_ciphers,
+ "web_mqtt.ssl.port = 15671
+ web_mqtt.ssl.backlog = 1024
+ web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ web_mqtt.ssl.password = changeme
+
+ web_mqtt.ssl.honor_cipher_order = true
+ web_mqtt.ssl.honor_ecc_order = true
+ web_mqtt.ssl.client_renegotiation = false
+ web_mqtt.ssl.secure_renegotiate = true
+
+ web_mqtt.ssl.versions.1 = tlsv1.2
+ web_mqtt.ssl.versions.2 = tlsv1.1
+ web_mqtt.ssl.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+ web_mqtt.ssl.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+ web_mqtt.ssl.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+ web_mqtt.ssl.ciphers.4 = ECDHE-RSA-AES256-SHA384
+ web_mqtt.ssl.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+ web_mqtt.ssl.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+ web_mqtt.ssl.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+ web_mqtt.ssl.ciphers.8 = ECDH-RSA-AES256-SHA384
+ web_mqtt.ssl.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
+ [{rabbitmq_web_mqtt,
+ [{ssl_config,
+ [{port,15671},
+ {backlog,1024},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password,"changeme"},
+
+ {honor_cipher_order, true},
+ {honor_ecc_order, true},
+ {client_renegotiation, false},
+ {secure_renegotiate, true},
+
+ {versions,['tlsv1.2','tlsv1.1']},
+ {ciphers, [
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDH-ECDSA-AES256-GCM-SHA384",
+ "ECDH-RSA-AES256-GCM-SHA384",
+ "ECDH-ECDSA-AES256-SHA384",
+ "ECDH-RSA-AES256-SHA384",
+ "DHE-RSA-AES256-GCM-SHA384"
+ ]}
+ ]}]}],
+ [rabbitmq_web_mqtt]},
+
+ {websocket_endpoint,
+ "web_mqtt.ws_path = /rmq/ws",
+ [{rabbitmq_web_mqtt,
+ [{ws_path, "/rmq/ws"}]}],
+ [rabbitmq_web_mqtt]},
+
+ %%
+ %% Cowboy options
+ %%
+
+ {cowboy_max_keepalive,
+ "web_mqtt.cowboy_opts.max_keepalive = 10",
+ [{rabbitmq_web_mqtt,[{cowboy_opts,[{max_keepalive, 10}]}]}],
+ [rabbitmq_web_mqtt]},
+
+ {cowboy_timeout,
+ "web_mqtt.cowboy_opts.timeout = 10000",
+ [{rabbitmq_web_mqtt,[{cowboy_opts,[{idle_timeout, 10000}]}]}],
+ [rabbitmq_web_mqtt]},
+
+ {cowboy_idle_timeout,
+ "web_mqtt.cowboy_opts.idle_timeout = 10000",
+ [{rabbitmq_web_mqtt,[{cowboy_opts,[{idle_timeout, 10000}]}]}],
+ [rabbitmq_web_mqtt]},
+
+ %%
+ %% Cowboy WebSocket options
+ %%
+
+{ws_opts_compress_true,
+ "web_mqtt.ws_opts.compress = true",
+ [{rabbitmq_web_mqtt,[{cowboy_ws_opts,[{compress, true}]}]}],
+ [rabbitmq_web_mqtt]},
+
+{ws_opts_compress_false,
+ "web_mqtt.ws_opts.compress = false",
+ [{rabbitmq_web_mqtt,[{cowboy_ws_opts,[{compress, false}]}]}],
+ [rabbitmq_web_mqtt]},
+
+{ws_opts_max_frame_size,
+ "web_mqtt.ws_opts.max_frame_size = 8000",
+ [{rabbitmq_web_mqtt,[{cowboy_ws_opts,[{max_frame_size, 8000}]}]}],
+ [rabbitmq_web_mqtt]},
+
+{ws_idle_timeout,
+ "web_mqtt.ws_opts.idle_timeout = 10000",
+ [{rabbitmq_web_mqtt,[{cowboy_ws_opts,[{idle_timeout, 10000}]}]}],
+ [rabbitmq_web_mqtt]},
+
+ %%
+ %% Proxy Protocol
+ %%
+
+{proxy_protocol_disabled,
+ "web_mqtt.proxy_protocol = false",
+ [{rabbitmq_web_mqtt,[{proxy_protocol,false}]}],
+ [rabbitmq_web_mqtt]},
+
+{proxy_protocol_enabled,
+ "web_mqtt.proxy_protocol = true",
+ [{rabbitmq_web_mqtt,[{proxy_protocol,true}]}],
+ [rabbitmq_web_mqtt]}
+].
diff --git a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..9c6b9355e4
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,105 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(proxy_protocol_SUITE).
+
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("src/emqttc_packet.hrl").
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 2}}
+ ].
+
+all() ->
+ [{group, http_tests}].%,
+% {group, https_tests}].
+
+groups() ->
+ Tests = [
+ proxy_protocol
+ ],
+ [%{https_tests, [], Tests},
+ {http_tests, [], Tests}].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config) ->
+ Protocol = case Group of
+ http_tests -> "ws";
+ https_tests -> "wss"
+ end,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE},
+ {protocol, Protocol},
+ {rabbitmq_ct_tls_verify, verify_none},
+ {rabbitmq_ct_tls_fail_if_no_peer_cert, false}]),
+
+ rabbit_ct_helpers:run_setup_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++ [
+ fun configure_proxy_protocol/1,
+ fun configure_ssl/1
+ ]).
+
+configure_proxy_protocol(Config) ->
+ rabbit_ws_test_util:update_app_env(Config, proxy_protocol, true),
+ Config.
+
+configure_ssl(Config) ->
+ ErlangConfig = proplists:get_value(erlang_node_config, Config, []),
+ RabbitAppConfig = proplists:get_value(rabbit, ErlangConfig, []),
+ RabbitSslConfig = proplists:get_value(ssl_options, RabbitAppConfig, []),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt_tls),
+ rabbit_ws_test_util:update_app_env(Config, ssl_config, [{port, Port} | lists:keydelete(port, 1, RabbitSslConfig)]),
+ Config.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_mqtt_port_str(Config),
+
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self(),
+ undefined, [], "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, _} = rfc6455_client:open(WS),
+ Frame = emqttc_serialiser:serialise(
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ client_id = <<"web-mqtt-tests-proxy-protocol">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ rfc6455_client:send_binary(WS, Frame),
+ {binary, _P} = rfc6455_client:recv(WS),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+connection_name() ->
+ Connections = ets:tab2list(connection_created),
+ {_Key, Values} = lists:nth(1, Connections),
+ {_, Name} = lists:keyfind(name, 1, Values),
+ Name.
diff --git a/deps/rabbitmq_web_mqtt/test/src/emqttc_packet.hrl b/deps/rabbitmq_web_mqtt/test/src/emqttc_packet.hrl
new file mode 100644
index 0000000000..f1063fdcff
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/src/emqttc_packet.hrl
@@ -0,0 +1,240 @@
+%%%-----------------------------------------------------------------------------
+%%% @Copyright (C) 2012-2015, Feng Lee <feng@emqtt.io>
+%%%
+%%% Permission is hereby granted, free of charge, to any person obtaining a copy
+%%% of this software and associated documentation files (the "Software"), to deal
+%%% in the Software without restriction, including without limitation the rights
+%%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+%%% copies of the Software, and to permit persons to whom the Software is
+%%% furnished to do so, subject to the following conditions:
+%%%
+%%% The above copyright notice and this permission notice shall be included in all
+%%% copies or substantial portions of the Software.
+%%%
+%%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+%%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+%%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+%%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+%%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+%%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+%%% SOFTWARE.
+%%%-----------------------------------------------------------------------------
+%%% @doc
+%%% emqttc packet header.
+%%% @end
+%%%-----------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------
+%% MQTT Protocol Version and Levels
+%%------------------------------------------------------------------------------
+-define(MQTT_PROTO_V31, 3).
+-define(MQTT_PROTO_V311, 4).
+
+-define(PROTOCOL_NAMES, [
+ {?MQTT_PROTO_V31, <<"MQIsdp">>},
+ {?MQTT_PROTO_V311, <<"MQTT">>}]).
+
+-type mqtt_vsn() :: ?MQTT_PROTO_V31 | ?MQTT_PROTO_V311.
+
+%%------------------------------------------------------------------------------
+%% QoS Levels
+%%------------------------------------------------------------------------------
+
+-define(QOS_0, 0).
+-define(QOS_1, 1).
+-define(QOS_2, 2).
+
+-define(IS_QOS(I), (I >= ?QOS_0 andalso I =< ?QOS_2)).
+
+-type mqtt_qos() :: ?QOS_0 | ?QOS_1 | ?QOS_2.
+
+%%------------------------------------------------------------------------------
+%% Default Keepalive Timeout(secs)
+%%------------------------------------------------------------------------------
+-define(KEEPALIVE, 90).
+
+%%------------------------------------------------------------------------------
+%% Max ClientId Length. Why 1024? NiDongDe!
+%%------------------------------------------------------------------------------
+-define(MAX_CLIENTID_LEN, 1024).
+
+%%------------------------------------------------------------------------------
+%% MQTT Control Packet Types
+%%------------------------------------------------------------------------------
+-define(RESERVED, 0). %% Reserved
+-define(CONNECT, 1). %% Client request to connect to Server
+-define(CONNACK, 2). %% Server to Client: Connect acknowledgment
+-define(PUBLISH, 3). %% Publish message
+-define(PUBACK, 4). %% Publish acknowledgment
+-define(PUBREC, 5). %% Publish received (assured delivery part 1)
+-define(PUBREL, 6). %% Publish release (assured delivery part 2)
+-define(PUBCOMP, 7). %% Publish complete (assured delivery part 3)
+-define(SUBSCRIBE, 8). %% Client subscribe request
+-define(SUBACK, 9). %% Server Subscribe acknowledgment
+-define(UNSUBSCRIBE, 10). %% Unsubscribe request
+-define(UNSUBACK, 11). %% Unsubscribe acknowledgment
+-define(PINGREQ, 12). %% PING request
+-define(PINGRESP, 13). %% PING response
+-define(DISCONNECT, 14). %% Client is disconnecting
+
+-define(TYPE_NAMES, [
+ 'CONNECT',
+ 'CONNACK',
+ 'PUBLISH',
+ 'PUBACK',
+ 'PUBREC',
+ 'PUBREL',
+ 'PUBCOMP',
+ 'SUBSCRIBE',
+ 'SUBACK',
+ 'UNSUBSCRIBE',
+ 'UNSUBACK',
+ 'PINGREQ',
+ 'PINGRESP',
+ 'DISCONNECT']).
+
+-type mqtt_packet_type() :: ?RESERVED..?DISCONNECT.
+
+%%------------------------------------------------------------------------------
+%% MQTT Connect Return Codes
+%%------------------------------------------------------------------------------
+-define(CONNACK_ACCEPT, 0). %% Connection accepted
+-define(CONNACK_PROTO_VER, 1). %% Unacceptable protocol version
+-define(CONNACK_INVALID_ID, 2). %% Client Identifier is correct UTF-8 but not allowed by the Server
+-define(CONNACK_SERVER, 3). %% Server unavailable
+-define(CONNACK_CREDENTIALS, 4). %% Username or password is malformed
+-define(CONNACK_AUTH, 5). %% Client is not authorized to connect
+
+-type mqtt_connack() :: ?CONNACK_ACCEPT..?CONNACK_AUTH.
+
+%%------------------------------------------------------------------------------
+%% MQTT Parser and Serialiser
+%%------------------------------------------------------------------------------
+-define(MAX_LEN, 16#fffffff).
+-define(HIGHBIT, 2#10000000).
+-define(LOWBITS, 2#01111111).
+
+%%------------------------------------------------------------------------------
+%% MQTT Packet Fixed Header
+%%------------------------------------------------------------------------------
+-record(mqtt_packet_header, {
+ type = ?RESERVED :: mqtt_packet_type(),
+ dup = false :: boolean(),
+ qos = ?QOS_0 :: mqtt_qos(),
+ retain = false :: boolean()}).
+
+%%------------------------------------------------------------------------------
+%% MQTT Packets
+%%------------------------------------------------------------------------------
+-type mqtt_packet_id() :: 1..16#ffff | undefined.
+
+-record(mqtt_packet_connect, {
+ client_id = <<>> :: binary(),
+ proto_ver = ?MQTT_PROTO_V311 :: mqtt_vsn(),
+ proto_name = <<"MQTT">> :: binary(),
+ will_retain = false :: boolean(),
+ will_qos = ?QOS_0 :: mqtt_qos(),
+ will_flag = false :: boolean(),
+ clean_sess = false :: boolean(),
+ keep_alive = 60 :: non_neg_integer(),
+ will_topic = undefined :: undefined | binary(),
+ will_msg = undefined :: undefined | binary(),
+ username = undefined :: undefined | binary(),
+ password = undefined :: undefined | binary()}).
+
+-record(mqtt_packet_connack, {
+ ack_flags = ?RESERVED :: 0 | 1,
+ return_code :: mqtt_connack() }).
+
+-record(mqtt_packet_publish, {
+ topic_name :: binary(),
+ packet_id :: mqtt_packet_id() }).
+
+-record(mqtt_packet_puback, {
+ packet_id :: mqtt_packet_id() }).
+
+-record(mqtt_packet_subscribe, {
+ packet_id :: mqtt_packet_id(),
+ topic_table :: list({binary(), mqtt_qos()}) }).
+
+-record(mqtt_packet_unsubscribe, {
+ packet_id :: mqtt_packet_id(),
+ topics :: list(binary()) }).
+
+-record(mqtt_packet_suback, {
+ packet_id :: mqtt_packet_id(),
+ qos_table :: list(mqtt_qos() | 128) }).
+
+-record(mqtt_packet_unsuback, {
+ packet_id :: mqtt_packet_id() }).
+
+%%------------------------------------------------------------------------------
+%% MQTT Control Packet
+%%------------------------------------------------------------------------------
+-record(mqtt_packet, {
+ header :: #mqtt_packet_header{},
+ variable :: #mqtt_packet_connect{} | #mqtt_packet_connack{}
+ | #mqtt_packet_publish{} | #mqtt_packet_puback{}
+ | #mqtt_packet_subscribe{} | #mqtt_packet_suback{}
+ | #mqtt_packet_unsubscribe{} | #mqtt_packet_unsuback{}
+ | mqtt_packet_id() | undefined,
+ payload :: binary() | undefined }).
+
+-type mqtt_packet() :: #mqtt_packet{}.
+
+%%------------------------------------------------------------------------------
+%% MQTT Packet Match
+%%------------------------------------------------------------------------------
+-define(CONNECT_PACKET(Packet),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?CONNECT}, variable = Packet}).
+
+-define(CONNACK_PACKET(ReturnCode),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?CONNACK},
+ variable = #mqtt_packet_connack{return_code = ReturnCode}}).
+
+-define(PUBLISH_PACKET(Qos, Topic, PacketId, Payload),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?PUBLISH,
+ qos = Qos},
+ variable = #mqtt_packet_publish{topic_name = Topic,
+ packet_id = PacketId},
+ payload = Payload}).
+
+-define(PUBACK_PACKET(Type, PacketId),
+ #mqtt_packet{header = #mqtt_packet_header{type = Type},
+ variable = #mqtt_packet_puback{packet_id = PacketId}}).
+
+-define(PUBREL_PACKET(PacketId),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?PUBREL, qos = ?QOS_1},
+ variable = #mqtt_packet_puback{packet_id = PacketId}}).
+
+-define(SUBSCRIBE_PACKET(PacketId, TopicTable),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?SUBSCRIBE, qos = ?QOS_1},
+ variable = #mqtt_packet_subscribe{packet_id = PacketId,
+ topic_table = TopicTable}}).
+-define(SUBACK_PACKET(PacketId, QosTable),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?SUBACK},
+ variable = #mqtt_packet_suback{packet_id = PacketId,
+ qos_table = QosTable}}).
+-define(UNSUBSCRIBE_PACKET(PacketId, Topics),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBSCRIBE, qos = ?QOS_1},
+ variable = #mqtt_packet_unsubscribe{packet_id = PacketId,
+ topics = Topics}}).
+-define(UNSUBACK_PACKET(PacketId),
+ #mqtt_packet{header = #mqtt_packet_header{type = ?UNSUBACK},
+ variable = #mqtt_packet_unsuback{packet_id = PacketId}}).
+
+-define(PACKET(Type),
+ #mqtt_packet{header = #mqtt_packet_header{type = Type}}).
+
+%%------------------------------------------------------------------------------
+%% MQTT Message
+%%------------------------------------------------------------------------------
+-record(mqtt_message, {
+ qos = ?QOS_0 :: mqtt_qos(),
+ retain = false :: boolean(),
+ dup = false :: boolean(),
+ msgid :: mqtt_packet_id(),
+ topic :: binary(),
+ payload :: binary()}).
+
+-type mqtt_message() :: #mqtt_message{}.
diff --git a/deps/rabbitmq_web_mqtt/test/src/emqttc_parser.erl b/deps/rabbitmq_web_mqtt/test/src/emqttc_parser.erl
new file mode 100644
index 0000000000..ff800c1e1f
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/src/emqttc_parser.erl
@@ -0,0 +1,215 @@
+%%%-----------------------------------------------------------------------------
+%%% Copyright (c) 2012-2015 eMQTT.IO, All Rights Reserved.
+%%%
+%%% Permission is hereby granted, free of charge, to any person obtaining a copy
+%%% of this software and associated documentation files (the "Software"), to deal
+%%% in the Software without restriction, including without limitation the rights
+%%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+%%% copies of the Software, and to permit persons to whom the Software is
+%%% furnished to do so, subject to the following conditions:
+%%%
+%%% The above copyright notice and this permission notice shall be included in all
+%%% copies or substantial portions of the Software.
+%%%
+%%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+%%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+%%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+%%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+%%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+%%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+%%% SOFTWARE.
+%%%-----------------------------------------------------------------------------
+%%% @doc
+%%% emqttc received packet parser.
+%%%
+%%% @end
+%%%-----------------------------------------------------------------------------
+-module(emqttc_parser).
+
+-author("feng@emqtt.io").
+
+-include("emqttc_packet.hrl").
+
+%% API
+-export([new/0, parse/2]).
+
+
+%%%-----------------------------------------------------------------------------
+%% @doc Initialize a parser.
+%% @end
+%%%-----------------------------------------------------------------------------
+-spec new() -> none.
+new() -> none.
+
+%%%-----------------------------------------------------------------------------
+%% @doc Parse MQTT Packet.
+%% @end
+%%%-----------------------------------------------------------------------------
+-spec parse(binary(), none | fun()) -> {ok, mqtt_packet()} | {error, any()} | {more, fun()}.
+parse(<<>>, none) ->
+ {more, fun(Bin) -> parse(Bin, none) end};
+parse(<<PacketType:4, Dup:1, QoS:2, Retain:1, Rest/binary>>, none) ->
+ parse_remaining_len(Rest, #mqtt_packet_header{type = PacketType,
+ dup = bool(Dup),
+ qos = QoS,
+ retain = bool(Retain)});
+parse(Bin, Cont) -> Cont(Bin).
+
+parse_remaining_len(<<>>, Header) ->
+ {more, fun(Bin) -> parse_remaining_len(Bin, Header) end};
+parse_remaining_len(Rest, Header) ->
+ parse_remaining_len(Rest, Header, 1, 0).
+
+parse_remaining_len(_Bin, _Header, _Multiplier, Length)
+ when Length > ?MAX_LEN ->
+ {error, invalid_mqtt_frame_len};
+parse_remaining_len(<<>>, Header, Multiplier, Length) ->
+ {more, fun(Bin) -> parse_remaining_len(Bin, Header, Multiplier, Length) end};
+parse_remaining_len(<<1:1, Len:7, Rest/binary>>, Header, Multiplier, Value) ->
+ parse_remaining_len(Rest, Header, Multiplier * ?HIGHBIT, Value + Len * Multiplier);
+parse_remaining_len(<<0:1, Len:7, Rest/binary>>, Header, Multiplier, Value) ->
+ parse_frame(Rest, Header, Value + Len * Multiplier).
+
+parse_frame(Bin, #mqtt_packet_header{type = Type,
+ qos = Qos} = Header, Length) ->
+ case {Type, Bin} of
+ %{?CONNECT, <<FrameBin:Length/binary, Rest/binary>>} ->
+ % {ProtoName, Rest1} = parse_utf(FrameBin),
+ % <<ProtoVersion : 8, Rest2/binary>> = Rest1,
+ % <<UsernameFlag : 1,
+ % PasswordFlag : 1,
+ % WillRetain : 1,
+ % WillQos : 2,
+ % WillFlag : 1,
+ % CleanSession : 1,
+ % _Reserved : 1,
+ % KeepAlive : 16/big,
+ % Rest3/binary>> = Rest2,
+ % {ClientId, Rest4} = parse_utf(Rest3),
+ % {WillTopic, Rest5} = parse_utf(Rest4, WillFlag),
+ % {WillMsg, Rest6} = parse_msg(Rest5, WillFlag),
+ % {UserName, Rest7} = parse_utf(Rest6, UsernameFlag),
+ % {PasssWord, <<>>} = parse_utf(Rest7, PasswordFlag),
+ % case protocol_name_approved(ProtoVersion, ProtoName) of
+ % true ->
+ % wrap(Header,
+ % #mqtt_packet_connect{
+ % proto_ver = ProtoVersion,
+ % proto_name = ProtoName,
+ % will_retain = bool(WillRetain),
+ % will_qos = WillQos,
+ % will_flag = bool(WillFlag),
+ % clean_sess = bool(CleanSession),
+ % keep_alive = KeepAlive,
+ % client_id = ClientId,
+ % will_topic = WillTopic,
+ % will_msg = WillMsg,
+ % username = UserName,
+ % password = PasssWord}, Rest);
+ % false ->
+ % {error, protocol_header_corrupt}
+ % end;
+ {?CONNACK, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<_Reserved:7, SP:1, ReturnCode:8>> = FrameBin,
+ wrap(Header, #mqtt_packet_connack{ack_flags = SP,
+ return_code = ReturnCode }, Rest);
+ {?PUBLISH, <<FrameBin:Length/binary, Rest/binary>>} ->
+ {TopicName, Rest1} = parse_utf(FrameBin),
+ {PacketId, Payload} = case Qos of
+ 0 -> {undefined, Rest1};
+ _ -> <<Id:16/big, R/binary>> = Rest1,
+ {Id, R}
+ end,
+ wrap(Header, #mqtt_packet_publish{topic_name = TopicName,
+ packet_id = PacketId},
+ Payload, Rest);
+ {?PUBACK, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<PacketId:16/big>> = FrameBin,
+ wrap(Header, #mqtt_packet_puback{packet_id = PacketId}, Rest);
+ {?PUBREC, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<PacketId:16/big>> = FrameBin,
+ wrap(Header, #mqtt_packet_puback{packet_id = PacketId}, Rest);
+ {?PUBREL, <<FrameBin:Length/binary, Rest/binary>>} ->
+ 1 = Qos,
+ <<PacketId:16/big>> = FrameBin,
+ wrap(Header, #mqtt_packet_puback{packet_id = PacketId}, Rest);
+ {?PUBCOMP, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<PacketId:16/big>> = FrameBin,
+ wrap(Header, #mqtt_packet_puback{packet_id = PacketId}, Rest);
+ %{?SUBSCRIBE, <<FrameBin:Length/binary, Rest/binary>>} ->
+ % 1 = Qos,
+ % <<PacketId:16/big, Rest1/binary>> = FrameBin,
+ % TopicTable = parse_topics(?SUBSCRIBE, Rest1, []),
+ % wrap(Header, #mqtt_packet_subscribe{packet_id = PacketId,
+ % topic_table = TopicTable}, Rest);
+ {?SUBACK, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<PacketId:16/big, Rest1/binary>> = FrameBin,
+ wrap(Header, #mqtt_packet_suback{packet_id = PacketId,
+ qos_table = parse_qos(Rest1, [])}, Rest);
+ %{?UNSUBSCRIBE, <<FrameBin:Length/binary, Rest/binary>>} ->
+ % 1 = Qos,
+ % <<PacketId:16/big, Rest1/binary>> = FrameBin,
+ % Topics = parse_topics(?UNSUBSCRIBE, Rest1, []),
+ % wrap(Header, #mqtt_packet_unsubscribe{packet_id = PacketId,
+ % topics = Topics}, Rest);
+ {?UNSUBACK, <<FrameBin:Length/binary, Rest/binary>>} ->
+ <<PacketId:16/big>> = FrameBin,
+ wrap(Header, #mqtt_packet_unsuback{packet_id = PacketId}, Rest);
+ %{?PINGREQ, Rest} ->
+ % Length = 0,
+ % wrap(Header, Rest);
+ {?PINGRESP, Rest} ->
+ Length = 0,
+ wrap(Header, Rest);
+ %{?DISCONNECT, Rest} ->
+ % Length = 0,
+ % wrap(Header, Rest);
+ {_, TooShortBin} ->
+ {more, fun(BinMore) ->
+ parse_frame(<<TooShortBin/binary, BinMore/binary>>,
+ Header, Length)
+ end}
+ end.
+
+wrap(Header, Variable, Payload, Rest) ->
+ {ok, #mqtt_packet{header = Header, variable = Variable, payload = Payload}, Rest}.
+wrap(Header, Variable, Rest) ->
+ {ok, #mqtt_packet{header = Header, variable = Variable}, Rest}.
+wrap(Header, Rest) ->
+ {ok, #mqtt_packet{header = Header}, Rest}.
+
+parse_qos(<<>>, Acc) ->
+ lists:reverse(Acc);
+parse_qos(<<QoS:8/unsigned, Rest/binary>>, Acc) ->
+ parse_qos(Rest, [QoS | Acc]).
+
+% server function
+%parse_topics(_, <<>>, Topics) ->
+% Topics;
+%parse_topics(?SUBSCRIBE = Sub, Bin, Topics) ->
+% {Name, <<_:6, QoS:2, Rest/binary>>} = parse_utf(Bin),
+% parse_topics(Sub, Rest, [{Name, QoS}| Topics]);
+%parse_topics(?UNSUBSCRIBE = Sub, Bin, Topics) ->
+% {Name, <<Rest/binary>>} = parse_utf(Bin),
+% parse_topics(Sub, Rest, [Name | Topics]).
+
+%parse_utf(Bin, 0) ->
+% {undefined, Bin};
+%parse_utf(Bin, _) ->
+% parse_utf(Bin).
+
+parse_utf(<<Len:16/big, Str:Len/binary, Rest/binary>>) ->
+ {Str, Rest}.
+
+% server function
+%parse_msg(Bin, 0) ->
+% {undefined, Bin};
+%parse_msg(<<Len:16/big, Msg:Len/binary, Rest/binary>>, _) ->
+% {Msg, Rest}.
+
+bool(0) -> false;
+bool(1) -> true.
+
+%protocol_name_approved(Ver, Name) ->
+% lists:member({Ver, Name}, ?PROTOCOL_NAMES).
+
diff --git a/deps/rabbitmq_web_mqtt/test/src/emqttc_serialiser.erl b/deps/rabbitmq_web_mqtt/test/src/emqttc_serialiser.erl
new file mode 100644
index 0000000000..5654de2212
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/src/emqttc_serialiser.erl
@@ -0,0 +1,157 @@
+%%%-----------------------------------------------------------------------------
+%%% Copyright (c) 2012-2015 eMQTT.IO, All Rights Reserved.
+%%%
+%%% Permission is hereby granted, free of charge, to any person obtaining a copy
+%%% of this software and associated documentation files (the "Software"), to deal
+%%% in the Software without restriction, including without limitation the rights
+%%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+%%% copies of the Software, and to permit persons to whom the Software is
+%%% furnished to do so, subject to the following conditions:
+%%%
+%%% The above copyright notice and this permission notice shall be included in all
+%%% copies or substantial portions of the Software.
+%%%
+%%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+%%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+%%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+%%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+%%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+%%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+%%% SOFTWARE.
+%%%-----------------------------------------------------------------------------
+%%% @doc
+%%% emqttc packet serialiser.
+%%%
+%%% @end
+%%%-----------------------------------------------------------------------------
+-module(emqttc_serialiser).
+
+-author("feng@emqtt.io").
+
+-include("emqttc_packet.hrl").
+
+%% API
+-export([serialise/1]).
+
+
+serialise(#mqtt_packet{header = Header = #mqtt_packet_header{type = Type},
+ variable = Variable,
+ payload = Payload}) ->
+ serialise_header(Header,
+ serialise_variable(Type, Variable,
+ serialise_payload(Payload))).
+
+serialise_header(#mqtt_packet_header{type = Type,
+ dup = Dup,
+ qos = Qos,
+ retain = Retain},
+ {VariableBin, PayloadBin})
+ when is_integer(Type) andalso ?CONNECT =< Type andalso Type =< ?DISCONNECT ->
+ Len = size(VariableBin) + size(PayloadBin),
+ true = (Len =< ?MAX_LEN),
+ LenBin = serialise_len(Len),
+ <<Type:4, (opt(Dup)):1, (opt(Qos)):2, (opt(Retain)):1,
+ LenBin/binary, VariableBin/binary, PayloadBin/binary>>.
+
+serialise_variable(?CONNECT, #mqtt_packet_connect{client_id = ClientId,
+ proto_ver = ProtoVer,
+ proto_name = ProtoName,
+ will_retain = WillRetain,
+ will_qos = WillQos,
+ will_flag = WillFlag,
+ clean_sess = CleanSess,
+ keep_alive = KeepAlive,
+ will_topic = WillTopic,
+ will_msg = WillMsg,
+ username = Username,
+ password = Password }, undefined) ->
+ VariableBin = <<(size(ProtoName)):16/big-unsigned-integer,
+ ProtoName/binary,
+ ProtoVer:8,
+ (opt(Username)):1,
+ (opt(Password)):1,
+ (opt(WillRetain)):1,
+ WillQos:2,
+ (opt(WillFlag)):1,
+ (opt(CleanSess)):1,
+ 0:1,
+ KeepAlive:16/big-unsigned-integer>>,
+ PayloadBin = serialise_utf(ClientId),
+ PayloadBin1 = case WillFlag of
+ true -> <<PayloadBin/binary,
+ (serialise_utf(WillTopic))/binary,
+ (size(WillMsg)):16/big-unsigned-integer,
+ WillMsg/binary>>;
+ false -> PayloadBin
+ end,
+ UserPasswd = << <<(serialise_utf(B))/binary>> || B <- [Username, Password], B =/= undefined >>,
+ {VariableBin, <<PayloadBin1/binary, UserPasswd/binary>>};
+
+serialise_variable(?CONNACK, #mqtt_packet_connack{ack_flags = AckFlags,
+ return_code = ReturnCode},
+ undefined) ->
+ {<<AckFlags:8, ReturnCode:8>>, <<>>};
+
+serialise_variable(?SUBSCRIBE, #mqtt_packet_subscribe{packet_id = PacketId,
+ topic_table = Topics }, undefined) ->
+ {<<PacketId:16/big>>, serialise_topics(Topics)};
+
+serialise_variable(?SUBACK, #mqtt_packet_suback {packet_id = PacketId,
+ qos_table = QosTable},
+ undefined) ->
+ {<<PacketId:16/big>>, << <<Q:8>> || Q <- QosTable >>};
+
+serialise_variable(?UNSUBSCRIBE, #mqtt_packet_unsubscribe{
+ packet_id = PacketId, topics = Topics }, undefined) ->
+ {<<PacketId:16/big>>, serialise_topics(Topics)};
+
+serialise_variable(?UNSUBACK, #mqtt_packet_suback {packet_id = PacketId},
+ undefined) ->
+ {<<PacketId:16/big>>, <<>>};
+
+serialise_variable(?PUBLISH, #mqtt_packet_publish { topic_name = TopicName,
+ packet_id = PacketId }, PayloadBin) ->
+ TopicBin = serialise_utf(TopicName),
+ PacketIdBin = if
+ PacketId =:= undefined -> <<>>;
+ true -> <<PacketId:16/big>>
+ end,
+ {<<TopicBin/binary, PacketIdBin/binary>>, PayloadBin};
+
+serialise_variable(PubAck, #mqtt_packet_puback { packet_id = PacketId }, _Payload)
+ when PubAck =:= ?PUBACK; PubAck =:= ?PUBREC; PubAck =:= ?PUBREL; PubAck =:= ?PUBCOMP ->
+ {<<PacketId:16/big>>, <<>>};
+
+serialise_variable(?PINGREQ, undefined, undefined) ->
+ {<<>>, <<>>};
+
+serialise_variable(?DISCONNECT, undefined, undefined) ->
+ {<<>>, <<>>}.
+
+serialise_payload(undefined) ->
+ undefined;
+serialise_payload(Bin) when is_binary(Bin) ->
+ Bin.
+
+serialise_topics([{_Topic, _Qos}|_] = Topics) ->
+ << <<(serialise_utf(Topic))/binary, ?RESERVED:6, Qos:2>> || {Topic, Qos} <- Topics >>;
+
+serialise_topics([H|_] = Topics) when is_binary(H) ->
+ << <<(serialise_utf(Topic))/binary>> || Topic <- Topics >>.
+
+serialise_utf(String) ->
+ StringBin = unicode:characters_to_binary(String),
+ Len = size(StringBin),
+ true = (Len =< 16#ffff),
+ <<Len:16/big, StringBin/binary>>.
+
+serialise_len(N) when N =< ?LOWBITS ->
+ <<0:1, N:7>>;
+serialise_len(N) ->
+ <<1:1, (N rem ?HIGHBIT):7, (serialise_len(N div ?HIGHBIT))/binary>>.
+
+opt(undefined) -> ?RESERVED;
+opt(false) -> 0;
+opt(true) -> 1;
+opt(X) when is_integer(X) -> X;
+opt(B) when is_binary(B) -> 1.
diff --git a/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl b/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl
new file mode 100644
index 0000000000..fcc91f6e13
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/src/rabbit_ws_test_util.erl
@@ -0,0 +1,30 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ws_test_util).
+
+-export([update_app_env/3, get_web_mqtt_port_str/1]).
+
+update_app_env(Config, Key, Val) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_web_mqtt, Key, Val]),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, stop,
+ [rabbitmq_web_mqtt]),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, start,
+ [rabbitmq_web_mqtt]).
+
+get_web_mqtt_port_str(Config) ->
+ Port = case rabbit_ct_helpers:get_config(Config, protocol) of
+ "ws" ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt);
+ "wss" ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt_tls)
+ end,
+ integer_to_list(Port).
diff --git a/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl b/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl
new file mode 100644
index 0000000000..f9650fd4a6
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/src/rfc6455_client.erl
@@ -0,0 +1,306 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rfc6455_client).
+
+-export([new/2, new/3, new/4, new/5, open/1, recv/1, recv/2, send/2, send_binary/2, close/1, close/2]).
+
+-record(state, {host, port, addr, path, ppid, socket, data, phase, transport}).
+
+%% --------------------------------------------------------------------------
+
+new(WsUrl, PPid) ->
+ new(WsUrl, PPid, undefined, [], <<>>).
+
+new(WsUrl, PPid, AuthInfo) ->
+ new(WsUrl, PPid, AuthInfo, [], <<>>).
+
+new(WsUrl, PPid, AuthInfo, Protocols) ->
+ new(WsUrl, PPid, AuthInfo, Protocols, <<>>).
+
+new(WsUrl, PPid, AuthInfo, Protocols, TcpPreface) ->
+ crypto:start(),
+ application:ensure_all_started(ssl),
+ {Transport, Url} = case WsUrl of
+ "ws://" ++ Rest -> {gen_tcp, Rest};
+ "wss://" ++ SslRest -> {ssl, SslRest}
+ end,
+ [Addr, Path] = split("/", Url, 1),
+ [Host0, MaybePort] = split(":", Addr, 1, empty),
+ Host = case inet:parse_ipv4_address(Host0) of
+ {ok, IP} -> IP;
+ _ -> Host0
+ end,
+ Port = case MaybePort of
+ empty -> 80;
+ V -> {I, ""} = string:to_integer(V), I
+ end,
+ State = #state{host = Host,
+ port = Port,
+ addr = Addr,
+ path = "/" ++ Path,
+ ppid = PPid,
+ transport = Transport},
+ spawn_link(fun () ->
+ start_conn(State, AuthInfo, Protocols, TcpPreface)
+ end).
+
+open(WS) ->
+ receive
+ {rfc6455, open, WS, Opts} ->
+ {ok, Opts};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+recv(WS) ->
+ receive
+ {rfc6455, recv, WS, Payload} ->
+ {ok, Payload};
+ {rfc6455, recv_binary, WS, Payload} ->
+ {binary, Payload};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+recv(WS, Timeout) ->
+ receive
+ {rfc6455, recv, WS, Payload} ->
+ {ok, Payload};
+ {rfc6455, recv_binary, WS, Payload} ->
+ {binary, Payload};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ after Timeout ->
+ {error, timeout}
+ end.
+
+send(WS, IoData) ->
+ WS ! {send, IoData},
+ ok.
+
+send_binary(WS, IoData) ->
+ WS ! {send_binary, IoData},
+ ok.
+
+close(WS) ->
+ close(WS, {1000, ""}).
+
+close(WS, WsReason) ->
+ WS ! {close, WsReason},
+ receive
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+
+%% --------------------------------------------------------------------------
+
+start_conn(State = #state{transport = Transport}, AuthInfo, Protocols, TcpPreface) ->
+ {ok, Socket} = case TcpPreface of
+ <<>> ->
+ Transport:connect(State#state.host, State#state.port,
+ [binary,
+ {packet, 0}]);
+ _ ->
+ {ok, Socket0} = gen_tcp:connect(State#state.host, State#state.port,
+ [binary,
+ {packet, 0}]),
+ gen_tcp:send(Socket0, TcpPreface),
+ case Transport of
+ gen_tcp -> {ok, Socket0};
+ ssl -> Transport:connect(Socket0, [])
+ end
+ end,
+
+ AuthHd = case AuthInfo of
+ undefined -> "";
+ _ ->
+ Login = proplists:get_value(login, AuthInfo),
+ Passcode = proplists:get_value(passcode, AuthInfo),
+ "Authorization: Basic "
+ ++ base64:encode_to_string(Login ++ ":" ++ Passcode)
+ ++ "\r\n"
+ end,
+
+ ProtocolHd = case Protocols of
+ [] -> "";
+ _ -> "Sec-Websocket-Protocol: " ++ string:join(Protocols, ", ") ++ "\r\n"
+ end,
+
+ Key = base64:encode_to_string(crypto:strong_rand_bytes(16)),
+ Transport:send(Socket,
+ "GET " ++ State#state.path ++ " HTTP/1.1\r\n" ++
+ "Host: " ++ State#state.addr ++ "\r\n" ++
+ "Upgrade: websocket\r\n" ++
+ "Connection: Upgrade\r\n" ++
+ AuthHd ++
+ ProtocolHd ++
+ "Sec-WebSocket-Key: " ++ Key ++ "\r\n" ++
+ "Origin: null\r\n" ++
+ "Sec-WebSocket-Version: 13\r\n\r\n"),
+
+ loop(State#state{socket = Socket,
+ data = <<>>,
+ phase = opening}).
+
+do_recv(State = #state{phase = opening, ppid = PPid, data = Data}) ->
+ case split("\r\n\r\n", binary_to_list(Data), 1, empty) of
+ [_Http, empty] -> State;
+ [Http, Data1] ->
+ %% TODO: don't ignore http response data, verify key
+ PPid ! {rfc6455, open, self(), [{http_response, Http}]},
+ State#state{phase = open,
+ data = Data1}
+ end;
+do_recv(State = #state{phase = Phase, data = Data, socket = Socket, transport = Transport, ppid = PPid})
+ when Phase =:= open orelse Phase =:= closing ->
+ R = case Data of
+ <<F:1, _:3, O:4, 0:1, L:7, Payload:L/binary, Rest/binary>>
+ when L < 126 ->
+ {F, O, Payload, Rest};
+
+ <<F:1, _:3, O:4, 0:1, 126:7, L2:16, Payload:L2/binary, Rest/binary>> ->
+ {F, O, Payload, Rest};
+
+ <<F:1, _:3, O:4, 0:1, 127:7, L2:64, Payload:L2/binary, Rest/binary>> ->
+ {F, O, Payload, Rest};
+
+ <<_:1, _:3, _:4, 1:1, _/binary>> ->
+ %% According o rfc6455 5.1 the server must not mask any frames.
+ die(Socket, Transport, PPid, {1006, "Protocol error"}, normal);
+ _ ->
+ moredata
+ end,
+ case R of
+ moredata ->
+ State;
+ _ -> do_recv2(State, R)
+ end.
+
+do_recv2(State = #state{phase = Phase, socket = Socket, ppid = PPid, transport = Transport}, R) ->
+ case R of
+ {1, 1, Payload, Rest} ->
+ PPid ! {rfc6455, recv, self(), Payload},
+ State#state{data = Rest};
+ {1, 2, Payload, Rest} ->
+ PPid ! {rfc6455, recv_binary, self(), Payload},
+ State#state{data = Rest};
+ {1, 8, Payload, _Rest} ->
+ WsReason = case Payload of
+ <<WC:16, WR/binary>> -> {WC, WR};
+ <<>> -> {1005, "No status received"}
+ end,
+ case Phase of
+ open -> %% echo
+ do_close(State, WsReason),
+ Transport:close(Socket);
+ closing ->
+ ok
+ end,
+ die(Socket, Transport, PPid, WsReason, normal);
+ {_, _, _, _Rest} ->
+ io:format("Unknown frame type~n"),
+ die(Socket, Transport, PPid, {1006, "Unknown frame type"}, normal)
+ end.
+
+encode_frame(F, O, Payload) ->
+ Mask = crypto:strong_rand_bytes(4),
+ MaskedPayload = apply_mask(Mask, iolist_to_binary(Payload)),
+
+ L = byte_size(MaskedPayload),
+ IoData = case L of
+ _ when L < 126 ->
+ [<<F:1, 0:3, O:4, 1:1, L:7>>, Mask, MaskedPayload];
+ _ when L < 65536 ->
+ [<<F:1, 0:3, O:4, 1:1, 126:7, L:16>>, Mask, MaskedPayload];
+ _ ->
+ [<<F:1, 0:3, O:4, 1:1, 127:7, L:64>>, Mask, MaskedPayload]
+ end,
+ iolist_to_binary(IoData).
+
+do_send(State = #state{socket = Socket, transport = Transport}, Payload) ->
+ Transport:send(Socket, encode_frame(1, 1, Payload)),
+ State.
+
+do_send_binary(State = #state{socket = Socket, transport = Transport}, Payload) ->
+ Transport:send(Socket, encode_frame(1, 2, Payload)),
+ State.
+
+do_close(State = #state{socket = Socket, transport = Transport}, {Code, Reason}) ->
+ Payload = iolist_to_binary([<<Code:16>>, Reason]),
+ Transport:send(Socket, encode_frame(1, 8, Payload)),
+ State#state{phase = closing}.
+
+
+loop(State = #state{socket = Socket, transport = Transport, ppid = PPid, data = Data,
+ phase = Phase}) ->
+ receive
+ {In, Socket, Bin} when In =:= tcp; In =:= ssl ->
+ State1 = State#state{data = iolist_to_binary([Data, Bin])},
+ loop(do_recv(State1));
+ {send, Payload} when Phase == open ->
+ loop(do_send(State, Payload));
+ {send_binary, Payload} when Phase == open ->
+ loop(do_send_binary(State, Payload));
+ {Closed, Socket} when Closed =:= tcp_closed; Closed =:= ssl_closed ->
+ die(Socket, Transport, PPid, {1006, "Connection closed abnormally"}, normal);
+ {close, WsReason} when Phase == open ->
+ loop(do_close(State, WsReason));
+ {Error, Socket, Reason} when Error =:= tcp_error; Error =:= ssl_error ->
+ die(Socket, Transport, PPid, {1006, "Connection closed abnormally"}, Reason);
+ Other ->
+ error({unknown_message, Other, Socket})
+ end.
+
+
+die(Socket, Transport, PPid, WsReason, Reason) ->
+ Transport:shutdown(Socket, read_write),
+ PPid ! {rfc6455, close, self(), WsReason},
+ exit(Reason).
+
+
+%% --------------------------------------------------------------------------
+
+split(SubStr, Str, Limit) ->
+ split(SubStr, Str, Limit, "").
+
+split(SubStr, Str, Limit, Default) ->
+ Acc = split(SubStr, Str, Limit, [], Default),
+ lists:reverse(Acc).
+split(_SubStr, Str, 0, Acc, _Default) -> [Str | Acc];
+split(SubStr, Str, Limit, Acc, Default) ->
+ {L, R} = case string:str(Str, SubStr) of
+ 0 -> {Str, Default};
+ I -> {string:substr(Str, 1, I-1),
+ string:substr(Str, I+length(SubStr))}
+ end,
+ split(SubStr, R, Limit-1, [L | Acc], Default).
+
+
+apply_mask(Mask, Data) when is_number(Mask) ->
+ apply_mask(<<Mask:32>>, Data);
+
+apply_mask(<<0:32>>, Data) ->
+ Data;
+apply_mask(Mask, Data) ->
+ iolist_to_binary(lists:reverse(apply_mask2(Mask, Data, []))).
+
+apply_mask2(M = <<Mask:32>>, <<Data:32, Rest/binary>>, Acc) ->
+ T = Data bxor Mask,
+ apply_mask2(M, Rest, [<<T:32>> | Acc]);
+apply_mask2(<<Mask:24, _:8>>, <<Data:24>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:24>> | Acc];
+apply_mask2(<<Mask:16, _:16>>, <<Data:16>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:16>> | Acc];
+apply_mask2(<<Mask:8, _:24>>, <<Data:8>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:8>> | Acc];
+apply_mask2(_, <<>>, Acc) ->
+ Acc.
diff --git a/deps/rabbitmq_web_mqtt/test/src/system_SUITE.erl b/deps/rabbitmq_web_mqtt/test/src/system_SUITE.erl
new file mode 100644
index 0000000000..e91efda12c
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt/test/src/system_SUITE.erl
@@ -0,0 +1,244 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-include_lib("eunit/include/eunit.hrl").
+-include("emqttc_packet.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [],
+ [connection
+ , pubsub_shared_connection
+ , pubsub_separate_connections
+ , last_will_enabled
+ , last_will_disabled
+ , disconnect
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, ?MODULE}
+ ]),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+-define(DEFAULT_TIMEOUT, 15000).
+
+
+connection(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt),
+ PortStr = integer_to_list(Port),
+ WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+pubsub_shared_connection(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt),
+ PortStr = integer_to_list(Port),
+ WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-pubsub">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS),
+
+ Dst = <<"/topic/test-web-mqtt">>,
+
+ ok = raw_send(WS, ?SUBSCRIBE_PACKET(1, [{Dst, ?QOS_1}])),
+ {ok, ?SUBACK_PACKET(_, _), _} = raw_recv(WS),
+
+ Payload = <<"a\x00a">>,
+
+ ok = raw_send(WS, ?PUBLISH_PACKET(?QOS_1, Dst, 2, Payload)),
+ {ok, ?PUBLISH_PACKET(_, Dst, _, Payload), _} = raw_recv(WS),
+
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+pubsub_separate_connections(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt),
+ PortStr = integer_to_list(Port),
+ WS1 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS1),
+ ok = raw_send(WS1,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-publisher">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS1),
+
+ WS2 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS2),
+ ok = raw_send(WS2,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-consumer">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS2),
+
+ Dst = <<"/topic/test-web-mqtt">>,
+ ok = raw_send(WS2, ?SUBSCRIBE_PACKET(1, [{Dst, ?QOS_1}])),
+ {ok, ?SUBACK_PACKET(_, _), _} = raw_recv(WS2),
+
+ Payload = <<"a\x00a">>,
+ ok = raw_send(WS1, ?PUBLISH_PACKET(?QOS_1, Dst, 2, Payload)),
+ {ok, ?PUBLISH_PACKET(_, Dst, _, Payload), _} = raw_recv(WS2),
+
+ {close, _} = rfc6455_client:close(WS1),
+ {close, _} = rfc6455_client:close(WS2),
+ ok.
+
+last_will_enabled(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt),
+ PortStr = integer_to_list(Port),
+
+ LastWillDst = <<"/topic/web-mqtt-tests-ws1-last-will">>,
+ LastWillMsg = <<"a last will and testament message">>,
+
+ WS1 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS1),
+ ok = raw_send(WS1,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-last-will-ws1">>,
+ will_flag = true,
+ will_qos = ?QOS_1,
+ will_topic = LastWillDst,
+ will_msg = LastWillMsg,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS1),
+
+ WS2 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS2),
+ ok = raw_send(WS2,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-last-will-ws2">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS2),
+
+ ok = raw_send(WS2, ?SUBSCRIBE_PACKET(1, [{LastWillDst, ?QOS_1}])),
+ {ok, ?SUBACK_PACKET(_, _), _} = raw_recv(WS2),
+
+ {close, _} = rfc6455_client:close(WS1),
+ ?assertMatch({ok, ?PUBLISH_PACKET(_, LastWillDst, _, LastWillMsg), _}, raw_recv(WS2, 5000)),
+
+ {close, _} = rfc6455_client:close(WS2),
+ ok.
+
+last_will_disabled(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt),
+ PortStr = integer_to_list(Port),
+
+ LastWillDst = <<"/topic/web-mqtt-tests-ws1-last-will-disabled">>,
+ LastWillMsg = <<"a last will and testament message">>,
+
+ WS1 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS1),
+ ok = raw_send(WS1,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-last-will-ws1-disabled">>,
+ will_flag = false,
+ will_qos = ?QOS_1,
+ will_topic = LastWillDst,
+ will_msg = LastWillMsg,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS1),
+
+ WS2 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS2),
+ ok = raw_send(WS2,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-last-will-ws2-disabled">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS2),
+
+ ok = raw_send(WS2, ?SUBSCRIBE_PACKET(1, [{LastWillDst, ?QOS_1}])),
+ ?assertMatch({ok, ?SUBACK_PACKET(_, _), _}, raw_recv(WS2)),
+
+ {close, _} = rfc6455_client:close(WS1),
+ ?assertEqual({error, timeout}, raw_recv(WS2, 3000)),
+
+ {close, _} = rfc6455_client:close(WS2),
+ ok.
+
+disconnect(Config) ->
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_mqtt),
+ PortStr = integer_to_list(Port),
+ WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS,
+ ?CONNECT_PACKET(#mqtt_packet_connect{
+ clean_sess = true,
+ client_id = <<"web-mqtt-tests-disconnect">>,
+ username = <<"guest">>,
+ password = <<"guest">>})),
+
+ {ok, ?CONNACK_PACKET(?CONNACK_ACCEPT), _} = raw_recv(WS),
+
+ ok = raw_send(WS, ?PACKET(?DISCONNECT)),
+ {close, {1000, _}} = rfc6455_client:recv(WS),
+
+ ok.
+
+
+raw_send(WS, Packet) ->
+ Frame = emqttc_serialiser:serialise(Packet),
+ rfc6455_client:send_binary(WS, Frame).
+
+raw_recv(WS) ->
+ raw_recv(WS, ?DEFAULT_TIMEOUT).
+
+raw_recv(WS, Timeout) ->
+ case rfc6455_client:recv(WS, Timeout) of
+ {binary, P} ->
+ emqttc_parser:parse(P, emqttc_parser:new());
+ {error, timeout} ->
+ {error, timeout}
+ end.
diff --git a/deps/rabbitmq_web_mqtt_examples/.gitignore b/deps/rabbitmq_web_mqtt_examples/.gitignore
new file mode 100644
index 0000000000..6804c4c69c
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_web_mqtt_examples.d
diff --git a/deps/rabbitmq_web_mqtt_examples/CODE_OF_CONDUCT.md b/deps/rabbitmq_web_mqtt_examples/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_web_mqtt_examples/CONTRIBUTING.md b/deps/rabbitmq_web_mqtt_examples/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_web_mqtt_examples/LICENSE b/deps/rabbitmq_web_mqtt_examples/LICENSE
new file mode 100644
index 0000000000..65aca654fc
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/LICENSE
@@ -0,0 +1,9 @@
+This package, the rabbitmq-web-mqtt-examples, is licensed under the
+MPL 2.0. For the MPL, please see LICENSE-MPL-RabbitMQ.
+
+priv/mqttws31.js is a part of the Paho project
+(https://eclipse.org/paho/clients/js/) and is released under
+the EPL and the EDL.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_web_mqtt_examples/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_web_mqtt_examples/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_web_mqtt_examples/Makefile b/deps/rabbitmq_web_mqtt_examples/Makefile
new file mode 100644
index 0000000000..8333d61a63
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/Makefile
@@ -0,0 +1,23 @@
+PROJECT = rabbitmq_web_mqtt_examples
+PROJECT_DESCRIPTION = Rabbit WEB-MQTT - examples
+PROJECT_MOD = rabbit_web_mqtt_examples_app
+
+define PROJECT_ENV
+[
+ {listener, [{port, 15670}]}
+ ]
+endef
+
+DEPS = rabbit_common rabbit rabbitmq_web_dispatch rabbitmq_web_mqtt
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_web_mqtt_examples/README.md b/deps/rabbitmq_web_mqtt_examples/README.md
new file mode 100644
index 0000000000..4fde10815e
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/README.md
@@ -0,0 +1,15 @@
+# RabbitMQ Web MQTT Examples
+
+This RabbitMQ plugin contains few basic examples of [RabbitMQ Web MQTT plugin](https://github.com/rabbitmq/rabbitmq-web-mqtt)
+usage.
+
+It starts a server that binds to port 15670 and serves a few static
+HTML files on port 15670 (e.g. [http://127.0.0.1:15670](http://127.0.0.1:15670/)).
+Note that Web STOMP examples use the same port, so these plugins cannot be enabled
+at the same time unless they are configured to use different ports.
+
+## Installation
+
+This plugin ships with RabbitMQ. Enabled it using [CLI tools](https://www.rabbitmq.com/cli.html):
+
+ rabbitmq-plugins enable rabbitmq_web_mqtt_examples
diff --git a/deps/rabbitmq_web_mqtt_examples/erlang.mk b/deps/rabbitmq_web_mqtt_examples/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/bunny.html b/deps/rabbitmq_web_mqtt_examples/priv/bunny.html
new file mode 100644
index 0000000000..3ff155b9d8
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/bunny.html
@@ -0,0 +1,169 @@
+<!doctype html>
+<html><head>
+ <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
+ <script src="mqttws31.js" type="text/javascript"></script>
+
+ <style>
+ #cnvs {
+ border: none;
+ -moz-border-radius: 4px;
+ cursor: url(pencil.cur),crosshair;
+ position: absolute;
+ overflow: hidden;
+ width: 100%;
+ height: 100%;
+ }
+ #cnvs:active {
+ cursor: url(pencil.cur),crosshair;
+ }
+ body {
+ overflow: hidden;
+ }
+ </style>
+ <title>RabbitMQ Web MQTT Examples: Bunny Drawing</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web MQTT Examples</a> > Bunny Drawing</h1>
+ <canvas id="cnvs"></canvas>
+<script>
+var send; var draw;
+send = draw = function(){};
+
+var lines = [];
+
+var canvas = document.getElementById('cnvs');
+
+if (canvas.getContext) {
+ var ctx = canvas.getContext('2d');
+
+ var img = new Image();
+ img.onload = function() {
+ ctx.drawImage(img, 230, 160);
+ };
+ img.src = 'bunny.png';
+
+ draw = function(p) {
+ ctx.beginPath();
+ ctx.moveTo(p.x1, p.y1);
+ ctx.lineTo(p.x2, p.y2);
+ ctx.stroke();
+ ctx.drawImage(img, 230, 160);
+ };
+
+ var do_resize = function() {
+ canvas.width = window.innerWidth;
+ canvas.height = window.innerHeight;
+
+ ctx.font = "bold 20px sans-serif";
+ ctx.fillStyle = "#444";
+ ctx.fillText("Draw wings on the bunny!", 260, 100);
+ ctx.font = "normal 16px sans-serif";
+ ctx.fillStyle = "#888";
+ ctx.fillText("(For more fun open a second browser)", 255, 130);
+
+ ctx.drawImage(img, 230, 160);
+
+ ctx.strokeStyle = "#fa0";
+ ctx.lineWidth = "10";
+ ctx.lineCap = "round";
+
+ $.map(lines, function (p) {
+ draw(p);
+ });
+ };
+
+ $(window).resize(do_resize);
+ $(do_resize);
+
+
+ var pos = $('#cnvs').position();
+ var prev = null;
+ $('#cnvs').mousedown(function(evt) {
+ evt.preventDefault();
+ evt.stopPropagation();
+ $('#cnvs').bind('mousemove', function(e) {
+ var curr = {x:e.pageX-pos.left, y:e.pageY-pos.top};
+ if (!prev) {
+ prev = curr;
+ return;
+ }
+ if (Math.sqrt(Math.pow(prev.x - curr.x, 2) +
+ Math.pow(prev.y - curr.y, 2)) > 8) {
+ var p = {x1:prev.x, y1:prev.y, x2:curr.x, y2:curr.y}
+ lines.push(p);
+ draw(p);
+ send(JSON.stringify(p));
+ prev = curr;
+ }
+ });
+ });
+ $('html').mouseup(function() {
+ prev = null;
+ $('#cnvs').unbind('mousemove');
+ });
+}
+else {
+ document.write("Sorry - this demo requires a browser with canvas tag support.");
+}
+
+// MQTT boilerplate
+
+var debug = function(){
+ if (window.console && console.log && console.log.apply) {
+ console.log.apply(console, arguments);
+ }
+};
+
+var wsbroker = location.hostname; //mqtt websocket enabled broker
+var wsport = 15675; // port for above
+
+var client = new Paho.MQTT.Client(wsbroker, wsport, "/ws",
+ "myclientid_" + parseInt(Math.random() * 100, 10));
+
+client.onConnectionLost = function (responseObject) {
+ debug("CONNECTION LOST - " + responseObject.errorMessage);
+};
+
+client.onMessageArrived = function (message) {
+ debug("RECEIVE ON " + message.destinationName + " PAYLOAD " + message.payloadString);
+
+ var p = JSON.parse(message.payloadString);
+ lines.push(p);
+ draw(p, true);
+};
+
+var options = {
+ timeout: 3,
+ keepAliveInterval: 30,
+ onSuccess: function () {
+ debug("CONNECTION SUCCESS");
+ client.subscribe("bunny", {qos: 1});
+ },
+ onFailure: function (message) {
+ debug("CONNECTION FAILURE - " + message.errorMessage);
+ }
+};
+
+if (location.protocol == "https:") {
+ options.useSSL = true;
+}
+
+debug("CONNECT TO " + wsbroker + ":" + wsport);
+client.connect(options);
+
+$('#first input').focus(function() {
+ if (!has_had_focus) {
+ has_had_focus = true;
+ $(this).val("");
+ }
+});
+
+send = function(data) {
+ message = new Paho.MQTT.Message(data);
+ message.destinationName = "bunny";
+ debug("SEND ON " + message.destinationName + " PAYLOAD " + data);
+ client.send(message);
+};
+
+</script>
+</body></html>
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/bunny.png b/deps/rabbitmq_web_mqtt_examples/priv/bunny.png
new file mode 100644
index 0000000000..6c2284ba84
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/bunny.png
Binary files differ
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/echo.html b/deps/rabbitmq_web_mqtt_examples/priv/echo.html
new file mode 100644
index 0000000000..18ccb324ae
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/echo.html
@@ -0,0 +1,135 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8"/>
+ <title>RabbitMQ Web MQTT Example</title>
+ <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
+ <script src="mqttws31.js" type="text/javascript"></script>
+ <style>
+ .box {
+ width: 440px;
+ float: left;
+ margin: 0 20px 0 20px;
+ }
+
+ .box div, .box input {
+ border: 1px solid;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ width: 100%;
+ padding: 5px;
+ margin: 3px 0 10px 0;
+ }
+
+ .box div {
+ border-color: grey;
+ height: 300px;
+ overflow: auto;
+ }
+
+ div code {
+ display: block;
+ }
+
+ #first div code {
+ -moz-border-radius: 2px;
+ border-radius: 2px;
+ border: 1px solid #eee;
+ margin-bottom: 5px;
+ }
+
+ #second div {
+ font-size: 0.8em;
+ }
+ </style>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+ </head>
+ <body lang="en">
+ <h1>RabbitMQ Web MQTT Example</h1>
+
+ <div id="first" class="box">
+ <h2>Received</h2>
+ <div></div>
+ <form><input autocomplete="off" value="Type here..."></input></form>
+ </div>
+
+ <div id="second" class="box">
+ <h2>Logs</h2>
+ <div></div>
+ </div>
+
+ <script>
+ var has_had_focus = false;
+ var pipe = function(el_name, send) {
+ var div = $(el_name + ' div');
+ var inp = $(el_name + ' input');
+ var form = $(el_name + ' form');
+
+ var print = function(m, p) {
+ p = (p === undefined) ? '' : JSON.stringify(p);
+ div.append($("<code>").text(m + ' ' + p));
+ div.scrollTop(div.scrollTop() + 10000);
+ };
+
+ if (send) {
+ form.submit(function() {
+ send(inp.val());
+ inp.val('');
+ return false;
+ });
+ }
+ return print;
+ };
+
+ var print_first = pipe('#first', function(data) {
+ message = new Paho.MQTT.Message(data);
+ message.destinationName = "test";
+ debug("SEND ON " + message.destinationName + " PAYLOAD " + data);
+ client.send(message);
+ });
+
+ var debug = pipe('#second');
+
+ var wsbroker = location.hostname; //mqtt websocket enabled broker
+ var wsport = 15675; // port for above
+
+ var client = new Paho.MQTT.Client(wsbroker, wsport, "/ws",
+ "myclientid_" + parseInt(Math.random() * 100, 10));
+
+ client.onConnectionLost = function (responseObject) {
+ debug("CONNECTION LOST - " + responseObject.errorMessage);
+ };
+
+ client.onMessageArrived = function (message) {
+ debug("RECEIVE ON " + message.destinationName + " PAYLOAD " + message.payloadString);
+ print_first(message.payloadString);
+ };
+
+ var options = {
+ timeout: 3,
+ keepAliveInterval: 30,
+ onSuccess: function () {
+ debug("CONNECTION SUCCESS");
+ client.subscribe("test", {qos: 1});
+ },
+ onFailure: function (message) {
+ debug("CONNECTION FAILURE - " + message.errorMessage);
+ }
+ };
+
+ if (location.protocol == "https:") {
+ options.useSSL = true;
+ }
+
+ debug("CONNECT TO " + wsbroker + ":" + wsport);
+ client.connect(options);
+
+ $('#first input').focus(function() {
+ if (!has_had_focus) {
+ has_had_focus = true;
+ $(this).val("");
+ }
+ });
+ </script>
+ </body>
+</html>
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/index.html b/deps/rabbitmq_web_mqtt_examples/priv/index.html
new file mode 100644
index 0000000000..4a39f12389
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/index.html
@@ -0,0 +1,15 @@
+<!doctype html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title>RabbitMQ Web MQTT Examples</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+ </head>
+ <body>
+ <h1>RabbitMQ Web MQTT Examples</h1>
+ <ul class="menu">
+ <li><a href="echo.html">Simple Echo Server</a></li>
+ <li><a href="bunny.html">Bunny Drawing</a></li>
+ </ul>
+ </body>
+</html>
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/main.css b/deps/rabbitmq_web_mqtt_examples/priv/main.css
new file mode 100644
index 0000000000..a5cdcda152
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/main.css
@@ -0,0 +1,38 @@
+body {
+ font-family: "Arial";
+ color: #444;
+}
+
+h1, h2 {
+ color: #f60;
+ font-weight: normal;
+}
+
+h1 {
+ font-size: 1.5em;
+}
+
+h2 {
+ font-size: 1.2em;
+ margin: 0;
+}
+
+a {
+ color: #f60;
+ border: 1px solid #fda;
+ background: #fff0e0;
+ border-radius: 3px; -moz-border-radius: 3px;
+ padding: 2px;
+ text-decoration: none;
+ /* font-weight: bold; */
+}
+
+ul.menu {
+ list-style-type: none;
+ padding: 0;
+ margin: 0;
+}
+
+ul.menu li {
+ padding: 5px 0;
+}
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/mqttws31.js b/deps/rabbitmq_web_mqtt_examples/priv/mqttws31.js
new file mode 100644
index 0000000000..4456646e6e
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/mqttws31.js
@@ -0,0 +1,2143 @@
+/*******************************************************************************
+ * Copyright (c) 2013 IBM Corp.
+ *
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Eclipse Distribution License v1.0 which accompany this distribution.
+ *
+ * The Eclipse Public License is available at
+ * https://www.eclipse.org/legal/epl-v10.html
+ * and the Eclipse Distribution License is available at
+ * https://www.eclipse.org/org/documents/edl-v10.php.
+ *
+ * Contributors:
+ * Andrew Banks - initial API and implementation and initial documentation
+ *******************************************************************************/
+
+
+// Only expose a single object name in the global namespace.
+// Everything must go through this module. Global Paho.MQTT module
+// only has a single public function, client, which returns
+// a Paho.MQTT client object given connection details.
+
+/**
+ * Send and receive messages using web browsers.
+ * <p>
+ * This programming interface lets a JavaScript client application use the MQTT V3.1 or
+ * V3.1.1 protocol to connect to an MQTT-supporting messaging server.
+ *
+ * The function supported includes:
+ * <ol>
+ * <li>Connecting to and disconnecting from a server. The server is identified by its host name and port number.
+ * <li>Specifying options that relate to the communications link with the server,
+ * for example the frequency of keep-alive heartbeats, and whether SSL/TLS is required.
+ * <li>Subscribing to and receiving messages from MQTT Topics.
+ * <li>Publishing messages to MQTT Topics.
+ * </ol>
+ * <p>
+ * The API consists of two main objects:
+ * <dl>
+ * <dt><b>{@link Paho.MQTT.Client}</b></dt>
+ * <dd>This contains methods that provide the functionality of the API,
+ * including provision of callbacks that notify the application when a message
+ * arrives from or is delivered to the messaging server,
+ * or when the status of its connection to the messaging server changes.</dd>
+ * <dt><b>{@link Paho.MQTT.Message}</b></dt>
+ * <dd>This encapsulates the payload of the message along with various attributes
+ * associated with its delivery, in particular the destination to which it has
+ * been (or is about to be) sent.</dd>
+ * </dl>
+ * <p>
+ * The programming interface validates parameters passed to it, and will throw
+ * an Error containing an error message intended for developer use, if it detects
+ * an error with any parameter.
+ * <p>
+ * Example:
+ *
+ * <code><pre>
+client = new Paho.MQTT.Client(location.hostname, Number(location.port), "clientId");
+client.onConnectionLost = onConnectionLost;
+client.onMessageArrived = onMessageArrived;
+client.connect({onSuccess:onConnect});
+
+function onConnect() {
+ // Once a connection has been made, make a subscription and send a message.
+ console.log("onConnect");
+ client.subscribe("/World");
+ message = new Paho.MQTT.Message("Hello");
+ message.destinationName = "/World";
+ client.send(message);
+};
+function onConnectionLost(responseObject) {
+ if (responseObject.errorCode !== 0)
+ console.log("onConnectionLost:"+responseObject.errorMessage);
+};
+function onMessageArrived(message) {
+ console.log("onMessageArrived:"+message.payloadString);
+ client.disconnect();
+};
+ * </pre></code>
+ * @namespace Paho.MQTT
+ */
+
+if (typeof Paho === "undefined") {
+ Paho = {};
+}
+
+Paho.MQTT = (function (global) {
+
+ // Private variables below, these are only visible inside the function closure
+ // which is used to define the module.
+
+ var version = "@VERSION@";
+ var buildLevel = "@BUILDLEVEL@";
+
+ /**
+ * Unique message type identifiers, with associated
+ * associated integer values.
+ * @private
+ */
+ var MESSAGE_TYPE = {
+ CONNECT: 1,
+ CONNACK: 2,
+ PUBLISH: 3,
+ PUBACK: 4,
+ PUBREC: 5,
+ PUBREL: 6,
+ PUBCOMP: 7,
+ SUBSCRIBE: 8,
+ SUBACK: 9,
+ UNSUBSCRIBE: 10,
+ UNSUBACK: 11,
+ PINGREQ: 12,
+ PINGRESP: 13,
+ DISCONNECT: 14
+ };
+
+ // Collection of utility methods used to simplify module code
+ // and promote the DRY pattern.
+
+ /**
+ * Validate an object's parameter names to ensure they
+ * match a list of expected variables name for this option
+ * type. Used to ensure option object passed into the API don't
+ * contain erroneous parameters.
+ * @param {Object} obj - User options object
+ * @param {Object} keys - valid keys and types that may exist in obj.
+ * @throws {Error} Invalid option parameter found.
+ * @private
+ */
+ var validate = function(obj, keys) {
+ for (var key in obj) {
+ if (obj.hasOwnProperty(key)) {
+ if (keys.hasOwnProperty(key)) {
+ if (typeof obj[key] !== keys[key])
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof obj[key], key]));
+ } else {
+ var errorStr = "Unknown property, " + key + ". Valid properties are:";
+ for (var key in keys)
+ if (keys.hasOwnProperty(key))
+ errorStr = errorStr+" "+key;
+ throw new Error(errorStr);
+ }
+ }
+ }
+ };
+
+ /**
+ * Return a new function which runs the user function bound
+ * to a fixed scope.
+ * @param {function} User function
+ * @param {object} Function scope
+ * @return {function} User function bound to another scope
+ * @private
+ */
+ var scope = function (f, scope) {
+ return function () {
+ return f.apply(scope, arguments);
+ };
+ };
+
+ /**
+ * Unique message type identifiers, with associated
+ * associated integer values.
+ * @private
+ */
+ var ERROR = {
+ OK: {code:0, text:"AMQJSC0000I OK."},
+ CONNECT_TIMEOUT: {code:1, text:"AMQJSC0001E Connect timed out."},
+ SUBSCRIBE_TIMEOUT: {code:2, text:"AMQJS0002E Subscribe timed out."},
+ UNSUBSCRIBE_TIMEOUT: {code:3, text:"AMQJS0003E Unsubscribe timed out."},
+ PING_TIMEOUT: {code:4, text:"AMQJS0004E Ping timed out."},
+ INTERNAL_ERROR: {code:5, text:"AMQJS0005E Internal error. Error Message: {0}, Stack trace: {1}"},
+ CONNACK_RETURNCODE: {code:6, text:"AMQJS0006E Bad Connack return code:{0} {1}."},
+ SOCKET_ERROR: {code:7, text:"AMQJS0007E Socket error:{0}."},
+ SOCKET_CLOSE: {code:8, text:"AMQJS0008I Socket closed."},
+ MALFORMED_UTF: {code:9, text:"AMQJS0009E Malformed UTF data:{0} {1} {2}."},
+ UNSUPPORTED: {code:10, text:"AMQJS0010E {0} is not supported by this browser."},
+ INVALID_STATE: {code:11, text:"AMQJS0011E Invalid state {0}."},
+ INVALID_TYPE: {code:12, text:"AMQJS0012E Invalid type {0} for {1}."},
+ INVALID_ARGUMENT: {code:13, text:"AMQJS0013E Invalid argument {0} for {1}."},
+ UNSUPPORTED_OPERATION: {code:14, text:"AMQJS0014E Unsupported operation."},
+ INVALID_STORED_DATA: {code:15, text:"AMQJS0015E Invalid data in local storage key={0} value={1}."},
+ INVALID_MQTT_MESSAGE_TYPE: {code:16, text:"AMQJS0016E Invalid MQTT message type {0}."},
+ MALFORMED_UNICODE: {code:17, text:"AMQJS0017E Malformed Unicode string:{0} {1}."},
+ };
+
+ /** CONNACK RC Meaning. */
+ var CONNACK_RC = {
+ 0:"Connection Accepted",
+ 1:"Connection Refused: unacceptable protocol version",
+ 2:"Connection Refused: identifier rejected",
+ 3:"Connection Refused: server unavailable",
+ 4:"Connection Refused: bad user name or password",
+ 5:"Connection Refused: not authorized"
+ };
+
+ /**
+ * Format an error message text.
+ * @private
+ * @param {error} ERROR.KEY value above.
+ * @param {substitutions} [array] substituted into the text.
+ * @return the text with the substitutions made.
+ */
+ var format = function(error, substitutions) {
+ var text = error.text;
+ if (substitutions) {
+ var field,start;
+ for (var i=0; i<substitutions.length; i++) {
+ field = "{"+i+"}";
+ start = text.indexOf(field);
+ if(start > 0) {
+ var part1 = text.substring(0,start);
+ var part2 = text.substring(start+field.length);
+ text = part1+substitutions[i]+part2;
+ }
+ }
+ }
+ return text;
+ };
+
+ //MQTT protocol and version 6 M Q I s d p 3
+ var MqttProtoIdentifierv3 = [0x00,0x06,0x4d,0x51,0x49,0x73,0x64,0x70,0x03];
+ //MQTT proto/version for 311 4 M Q T T 4
+ var MqttProtoIdentifierv4 = [0x00,0x04,0x4d,0x51,0x54,0x54,0x04];
+
+ /**
+ * Construct an MQTT wire protocol message.
+ * @param type MQTT packet type.
+ * @param options optional wire message attributes.
+ *
+ * Optional properties
+ *
+ * messageIdentifier: message ID in the range [0..65535]
+ * payloadMessage: Application Message - PUBLISH only
+ * connectStrings: array of 0 or more Strings to be put into the CONNECT payload
+ * topics: array of strings (SUBSCRIBE, UNSUBSCRIBE)
+ * requestQoS: array of QoS values [0..2]
+ *
+ * "Flag" properties
+ * cleanSession: true if present / false if absent (CONNECT)
+ * willMessage: true if present / false if absent (CONNECT)
+ * isRetained: true if present / false if absent (CONNECT)
+ * userName: true if present / false if absent (CONNECT)
+ * password: true if present / false if absent (CONNECT)
+ * keepAliveInterval: integer [0..65535] (CONNECT)
+ *
+ * @private
+ * @ignore
+ */
+ var WireMessage = function (type, options) {
+ this.type = type;
+ for (var name in options) {
+ if (options.hasOwnProperty(name)) {
+ this[name] = options[name];
+ }
+ }
+ };
+
+ WireMessage.prototype.encode = function() {
+ // Compute the first byte of the fixed header
+ var first = ((this.type & 0x0f) << 4);
+
+ /*
+ * Now calculate the length of the variable header + payload by adding up the lengths
+ * of all the component parts
+ */
+
+ var remLength = 0;
+ var topicStrLength = new Array();
+ var destinationNameLength = 0;
+
+ // if the message contains a messageIdentifier then we need two bytes for that
+ if (this.messageIdentifier != undefined)
+ remLength += 2;
+
+ switch(this.type) {
+ // If this a Connect then we need to include 12 bytes for its header
+ case MESSAGE_TYPE.CONNECT:
+ switch(this.mqttVersion) {
+ case 3:
+ remLength += MqttProtoIdentifierv3.length + 3;
+ break;
+ case 4:
+ remLength += MqttProtoIdentifierv4.length + 3;
+ break;
+ }
+
+ remLength += UTF8Length(this.clientId) + 2;
+ if (this.willMessage != undefined) {
+ remLength += UTF8Length(this.willMessage.destinationName) + 2;
+ // Will message is always a string, sent as UTF-8 characters with a preceding length.
+ var willMessagePayloadBytes = this.willMessage.payloadBytes;
+ if (!(willMessagePayloadBytes instanceof Uint8Array))
+ willMessagePayloadBytes = new Uint8Array(payloadBytes);
+ remLength += willMessagePayloadBytes.byteLength +2;
+ }
+ if (this.userName != undefined)
+ remLength += UTF8Length(this.userName) + 2;
+ if (this.password != undefined)
+ remLength += UTF8Length(this.password) + 2;
+ break;
+
+ // Subscribe, Unsubscribe can both contain topic strings
+ case MESSAGE_TYPE.SUBSCRIBE:
+ first |= 0x02; // Qos = 1;
+ for ( var i = 0; i < this.topics.length; i++) {
+ topicStrLength[i] = UTF8Length(this.topics[i]);
+ remLength += topicStrLength[i] + 2;
+ }
+ remLength += this.requestedQos.length; // 1 byte for each topic's Qos
+ // QoS on Subscribe only
+ break;
+
+ case MESSAGE_TYPE.UNSUBSCRIBE:
+ first |= 0x02; // Qos = 1;
+ for ( var i = 0; i < this.topics.length; i++) {
+ topicStrLength[i] = UTF8Length(this.topics[i]);
+ remLength += topicStrLength[i] + 2;
+ }
+ break;
+
+ case MESSAGE_TYPE.PUBREL:
+ first |= 0x02; // Qos = 1;
+ break;
+
+ case MESSAGE_TYPE.PUBLISH:
+ if (this.payloadMessage.duplicate) first |= 0x08;
+ first = first |= (this.payloadMessage.qos << 1);
+ if (this.payloadMessage.retained) first |= 0x01;
+ destinationNameLength = UTF8Length(this.payloadMessage.destinationName);
+ remLength += destinationNameLength + 2;
+ var payloadBytes = this.payloadMessage.payloadBytes;
+ remLength += payloadBytes.byteLength;
+ if (payloadBytes instanceof ArrayBuffer)
+ payloadBytes = new Uint8Array(payloadBytes);
+ else if (!(payloadBytes instanceof Uint8Array))
+ payloadBytes = new Uint8Array(payloadBytes.buffer);
+ break;
+
+ case MESSAGE_TYPE.DISCONNECT:
+ break;
+
+ default:
+ ;
+ }
+
+ // Now we can allocate a buffer for the message
+
+ var mbi = encodeMBI(remLength); // Convert the length to MQTT MBI format
+ var pos = mbi.length + 1; // Offset of start of variable header
+ var buffer = new ArrayBuffer(remLength + pos);
+ var byteStream = new Uint8Array(buffer); // view it as a sequence of bytes
+
+ //Write the fixed header into the buffer
+ byteStream[0] = first;
+ byteStream.set(mbi,1);
+
+ // If this is a PUBLISH then the variable header starts with a topic
+ if (this.type == MESSAGE_TYPE.PUBLISH)
+ pos = writeString(this.payloadMessage.destinationName, destinationNameLength, byteStream, pos);
+ // If this is a CONNECT then the variable header contains the protocol name/version, flags and keepalive time
+
+ else if (this.type == MESSAGE_TYPE.CONNECT) {
+ switch (this.mqttVersion) {
+ case 3:
+ byteStream.set(MqttProtoIdentifierv3, pos);
+ pos += MqttProtoIdentifierv3.length;
+ break;
+ case 4:
+ byteStream.set(MqttProtoIdentifierv4, pos);
+ pos += MqttProtoIdentifierv4.length;
+ break;
+ }
+ var connectFlags = 0;
+ if (this.cleanSession)
+ connectFlags = 0x02;
+ if (this.willMessage != undefined ) {
+ connectFlags |= 0x04;
+ connectFlags |= (this.willMessage.qos<<3);
+ if (this.willMessage.retained) {
+ connectFlags |= 0x20;
+ }
+ }
+ if (this.userName != undefined)
+ connectFlags |= 0x80;
+ if (this.password != undefined)
+ connectFlags |= 0x40;
+ byteStream[pos++] = connectFlags;
+ pos = writeUint16 (this.keepAliveInterval, byteStream, pos);
+ }
+
+ // Output the messageIdentifier - if there is one
+ if (this.messageIdentifier != undefined)
+ pos = writeUint16 (this.messageIdentifier, byteStream, pos);
+
+ switch(this.type) {
+ case MESSAGE_TYPE.CONNECT:
+ pos = writeString(this.clientId, UTF8Length(this.clientId), byteStream, pos);
+ if (this.willMessage != undefined) {
+ pos = writeString(this.willMessage.destinationName, UTF8Length(this.willMessage.destinationName), byteStream, pos);
+ pos = writeUint16(willMessagePayloadBytes.byteLength, byteStream, pos);
+ byteStream.set(willMessagePayloadBytes, pos);
+ pos += willMessagePayloadBytes.byteLength;
+
+ }
+ if (this.userName != undefined)
+ pos = writeString(this.userName, UTF8Length(this.userName), byteStream, pos);
+ if (this.password != undefined)
+ pos = writeString(this.password, UTF8Length(this.password), byteStream, pos);
+ break;
+
+ case MESSAGE_TYPE.PUBLISH:
+ // PUBLISH has a text or binary payload, if text do not add a 2 byte length field, just the UTF characters.
+ byteStream.set(payloadBytes, pos);
+
+ break;
+
+// case MESSAGE_TYPE.PUBREC:
+// case MESSAGE_TYPE.PUBREL:
+// case MESSAGE_TYPE.PUBCOMP:
+// break;
+
+ case MESSAGE_TYPE.SUBSCRIBE:
+ // SUBSCRIBE has a list of topic strings and request QoS
+ for (var i=0; i<this.topics.length; i++) {
+ pos = writeString(this.topics[i], topicStrLength[i], byteStream, pos);
+ byteStream[pos++] = this.requestedQos[i];
+ }
+ break;
+
+ case MESSAGE_TYPE.UNSUBSCRIBE:
+ // UNSUBSCRIBE has a list of topic strings
+ for (var i=0; i<this.topics.length; i++)
+ pos = writeString(this.topics[i], topicStrLength[i], byteStream, pos);
+ break;
+
+ default:
+ // Do nothing.
+ }
+
+ return buffer;
+ }
+
+ function decodeMessage(input,pos) {
+ var startingPos = pos;
+ var first = input[pos];
+ var type = first >> 4;
+ var messageInfo = first &= 0x0f;
+ pos += 1;
+
+
+ // Decode the remaining length (MBI format)
+
+ var digit;
+ var remLength = 0;
+ var multiplier = 1;
+ do {
+ if (pos == input.length) {
+ return [null,startingPos];
+ }
+ digit = input[pos++];
+ remLength += ((digit & 0x7F) * multiplier);
+ multiplier *= 128;
+ } while ((digit & 0x80) != 0);
+
+ var endPos = pos+remLength;
+ if (endPos > input.length) {
+ return [null,startingPos];
+ }
+
+ var wireMessage = new WireMessage(type);
+ switch(type) {
+ case MESSAGE_TYPE.CONNACK:
+ var connectAcknowledgeFlags = input[pos++];
+ if (connectAcknowledgeFlags & 0x01)
+ wireMessage.sessionPresent = true;
+ wireMessage.returnCode = input[pos++];
+ break;
+
+ case MESSAGE_TYPE.PUBLISH:
+ var qos = (messageInfo >> 1) & 0x03;
+
+ var len = readUint16(input, pos);
+ pos += 2;
+ var topicName = parseUTF8(input, pos, len);
+ pos += len;
+ // If QoS 1 or 2 there will be a messageIdentifier
+ if (qos > 0) {
+ wireMessage.messageIdentifier = readUint16(input, pos);
+ pos += 2;
+ }
+
+ var message = new Paho.MQTT.Message(input.subarray(pos, endPos));
+ if ((messageInfo & 0x01) == 0x01)
+ message.retained = true;
+ if ((messageInfo & 0x08) == 0x08)
+ message.duplicate = true;
+ message.qos = qos;
+ message.destinationName = topicName;
+ wireMessage.payloadMessage = message;
+ break;
+
+ case MESSAGE_TYPE.PUBACK:
+ case MESSAGE_TYPE.PUBREC:
+ case MESSAGE_TYPE.PUBREL:
+ case MESSAGE_TYPE.PUBCOMP:
+ case MESSAGE_TYPE.UNSUBACK:
+ wireMessage.messageIdentifier = readUint16(input, pos);
+ break;
+
+ case MESSAGE_TYPE.SUBACK:
+ wireMessage.messageIdentifier = readUint16(input, pos);
+ pos += 2;
+ wireMessage.returnCode = input.subarray(pos, endPos);
+ break;
+
+ default:
+ ;
+ }
+
+ return [wireMessage,endPos];
+ }
+
+ function writeUint16(input, buffer, offset) {
+ buffer[offset++] = input >> 8; //MSB
+ buffer[offset++] = input % 256; //LSB
+ return offset;
+ }
+
+ function writeString(input, utf8Length, buffer, offset) {
+ offset = writeUint16(utf8Length, buffer, offset);
+ stringToUTF8(input, buffer, offset);
+ return offset + utf8Length;
+ }
+
+ function readUint16(buffer, offset) {
+ return 256*buffer[offset] + buffer[offset+1];
+ }
+
+ /**
+ * Encodes an MQTT Multi-Byte Integer
+ * @private
+ */
+ function encodeMBI(number) {
+ var output = new Array(1);
+ var numBytes = 0;
+
+ do {
+ var digit = number % 128;
+ number = number >> 7;
+ if (number > 0) {
+ digit |= 0x80;
+ }
+ output[numBytes++] = digit;
+ } while ( (number > 0) && (numBytes<4) );
+
+ return output;
+ }
+
+ /**
+ * Takes a String and calculates its length in bytes when encoded in UTF8.
+ * @private
+ */
+ function UTF8Length(input) {
+ var output = 0;
+ for (var i = 0; i<input.length; i++)
+ {
+ var charCode = input.charCodeAt(i);
+ if (charCode > 0x7FF)
+ {
+ // Surrogate pair means its a 4 byte character
+ if (0xD800 <= charCode && charCode <= 0xDBFF)
+ {
+ i++;
+ output++;
+ }
+ output +=3;
+ }
+ else if (charCode > 0x7F)
+ output +=2;
+ else
+ output++;
+ }
+ return output;
+ }
+
+ /**
+ * Takes a String and writes it into an array as UTF8 encoded bytes.
+ * @private
+ */
+ function stringToUTF8(input, output, start) {
+ var pos = start;
+ for (var i = 0; i<input.length; i++) {
+ var charCode = input.charCodeAt(i);
+
+ // Check for a surrogate pair.
+ if (0xD800 <= charCode && charCode <= 0xDBFF) {
+ var lowCharCode = input.charCodeAt(++i);
+ if (isNaN(lowCharCode)) {
+ throw new Error(format(ERROR.MALFORMED_UNICODE, [charCode, lowCharCode]));
+ }
+ charCode = ((charCode - 0xD800)<<10) + (lowCharCode - 0xDC00) + 0x10000;
+
+ }
+
+ if (charCode <= 0x7F) {
+ output[pos++] = charCode;
+ } else if (charCode <= 0x7FF) {
+ output[pos++] = charCode>>6 & 0x1F | 0xC0;
+ output[pos++] = charCode & 0x3F | 0x80;
+ } else if (charCode <= 0xFFFF) {
+ output[pos++] = charCode>>12 & 0x0F | 0xE0;
+ output[pos++] = charCode>>6 & 0x3F | 0x80;
+ output[pos++] = charCode & 0x3F | 0x80;
+ } else {
+ output[pos++] = charCode>>18 & 0x07 | 0xF0;
+ output[pos++] = charCode>>12 & 0x3F | 0x80;
+ output[pos++] = charCode>>6 & 0x3F | 0x80;
+ output[pos++] = charCode & 0x3F | 0x80;
+ };
+ }
+ return output;
+ }
+
+ function parseUTF8(input, offset, length) {
+ var output = "";
+ var utf16;
+ var pos = offset;
+
+ while (pos < offset+length)
+ {
+ var byte1 = input[pos++];
+ if (byte1 < 128)
+ utf16 = byte1;
+ else
+ {
+ var byte2 = input[pos++]-128;
+ if (byte2 < 0)
+ throw new Error(format(ERROR.MALFORMED_UTF, [byte1.toString(16), byte2.toString(16),""]));
+ if (byte1 < 0xE0) // 2 byte character
+ utf16 = 64*(byte1-0xC0) + byte2;
+ else
+ {
+ var byte3 = input[pos++]-128;
+ if (byte3 < 0)
+ throw new Error(format(ERROR.MALFORMED_UTF, [byte1.toString(16), byte2.toString(16), byte3.toString(16)]));
+ if (byte1 < 0xF0) // 3 byte character
+ utf16 = 4096*(byte1-0xE0) + 64*byte2 + byte3;
+ else
+ {
+ var byte4 = input[pos++]-128;
+ if (byte4 < 0)
+ throw new Error(format(ERROR.MALFORMED_UTF, [byte1.toString(16), byte2.toString(16), byte3.toString(16), byte4.toString(16)]));
+ if (byte1 < 0xF8) // 4 byte character
+ utf16 = 262144*(byte1-0xF0) + 4096*byte2 + 64*byte3 + byte4;
+ else // longer encodings are not supported
+ throw new Error(format(ERROR.MALFORMED_UTF, [byte1.toString(16), byte2.toString(16), byte3.toString(16), byte4.toString(16)]));
+ }
+ }
+ }
+
+ if (utf16 > 0xFFFF) // 4 byte character - express as a surrogate pair
+ {
+ utf16 -= 0x10000;
+ output += String.fromCharCode(0xD800 + (utf16 >> 10)); // lead character
+ utf16 = 0xDC00 + (utf16 & 0x3FF); // trail character
+ }
+ output += String.fromCharCode(utf16);
+ }
+ return output;
+ }
+
+ /**
+ * Repeat keepalive requests, monitor responses.
+ * @ignore
+ */
+ var Pinger = function(client, window, keepAliveInterval) {
+ this._client = client;
+ this._window = window;
+ this._keepAliveInterval = keepAliveInterval*1000;
+ this.isReset = false;
+
+ var pingReq = new WireMessage(MESSAGE_TYPE.PINGREQ).encode();
+
+ var doTimeout = function (pinger) {
+ return function () {
+ return doPing.apply(pinger);
+ };
+ };
+
+ /** @ignore */
+ var doPing = function() {
+ if (!this.isReset) {
+ this._client._trace("Pinger.doPing", "Timed out");
+ this._client._disconnected( ERROR.PING_TIMEOUT.code , format(ERROR.PING_TIMEOUT));
+ } else {
+ this.isReset = false;
+ this._client._trace("Pinger.doPing", "send PINGREQ");
+ this._client.socket.send(pingReq);
+ this.timeout = this._window.setTimeout(doTimeout(this), this._keepAliveInterval);
+ }
+ }
+
+ this.reset = function() {
+ this.isReset = true;
+ this._window.clearTimeout(this.timeout);
+ if (this._keepAliveInterval > 0)
+ this.timeout = setTimeout(doTimeout(this), this._keepAliveInterval);
+ }
+
+ this.cancel = function() {
+ this._window.clearTimeout(this.timeout);
+ }
+ };
+
+ /**
+ * Monitor request completion.
+ * @ignore
+ */
+ var Timeout = function(client, window, timeoutSeconds, action, args) {
+ this._window = window;
+ if (!timeoutSeconds)
+ timeoutSeconds = 30;
+
+ var doTimeout = function (action, client, args) {
+ return function () {
+ return action.apply(client, args);
+ };
+ };
+ this.timeout = setTimeout(doTimeout(action, client, args), timeoutSeconds * 1000);
+
+ this.cancel = function() {
+ this._window.clearTimeout(this.timeout);
+ }
+ };
+
+ /*
+ * Internal implementation of the Websockets MQTT V3.1 client.
+ *
+ * @name Paho.MQTT.ClientImpl @constructor
+ * @param {String} host the DNS nameof the webSocket host.
+ * @param {Number} port the port number for that host.
+ * @param {String} clientId the MQ client identifier.
+ */
+ var ClientImpl = function (uri, host, port, path, clientId) {
+ // Check dependencies are satisfied in this browser.
+ if (!("WebSocket" in global && global["WebSocket"] !== null)) {
+ throw new Error(format(ERROR.UNSUPPORTED, ["WebSocket"]));
+ }
+ if (!("localStorage" in global && global["localStorage"] !== null)) {
+ throw new Error(format(ERROR.UNSUPPORTED, ["localStorage"]));
+ }
+ if (!("ArrayBuffer" in global && global["ArrayBuffer"] !== null)) {
+ throw new Error(format(ERROR.UNSUPPORTED, ["ArrayBuffer"]));
+ }
+ this._trace("Paho.MQTT.Client", uri, host, port, path, clientId);
+
+ this.host = host;
+ this.port = port;
+ this.path = path;
+ this.uri = uri;
+ this.clientId = clientId;
+
+ // Local storagekeys are qualified with the following string.
+ // The conditional inclusion of path in the key is for backward
+ // compatibility to when the path was not configurable and assumed to
+ // be /mqtt
+ this._localKey=host+":"+port+(path!="/mqtt"?":"+path:"")+":"+clientId+":";
+
+ // Create private instance-only message queue
+ // Internal queue of messages to be sent, in sending order.
+ this._msg_queue = [];
+
+ // Messages we have sent and are expecting a response for, indexed by their respective message ids.
+ this._sentMessages = {};
+
+ // Messages we have received and acknowleged and are expecting a confirm message for
+ // indexed by their respective message ids.
+ this._receivedMessages = {};
+
+ // Internal list of callbacks to be executed when messages
+ // have been successfully sent over web socket, e.g. disconnect
+ // when it doesn't have to wait for ACK, just message is dispatched.
+ this._notify_msg_sent = {};
+
+ // Unique identifier for SEND messages, incrementing
+ // counter as messages are sent.
+ this._message_identifier = 1;
+
+ // Used to determine the transmission sequence of stored sent messages.
+ this._sequence = 0;
+
+
+ // Load the local state, if any, from the saved version, only restore state relevant to this client.
+ for (var key in localStorage)
+ if ( key.indexOf("Sent:"+this._localKey) == 0
+ || key.indexOf("Received:"+this._localKey) == 0)
+ this.restore(key);
+ };
+
+ // Messaging Client public instance members.
+ ClientImpl.prototype.host;
+ ClientImpl.prototype.port;
+ ClientImpl.prototype.path;
+ ClientImpl.prototype.uri;
+ ClientImpl.prototype.clientId;
+
+ // Messaging Client private instance members.
+ ClientImpl.prototype.socket;
+ /* true once we have received an acknowledgement to a CONNECT packet. */
+ ClientImpl.prototype.connected = false;
+ /* The largest message identifier allowed, may not be larger than 2**16 but
+ * if set smaller reduces the maximum number of outbound messages allowed.
+ */
+ ClientImpl.prototype.maxMessageIdentifier = 65536;
+ ClientImpl.prototype.connectOptions;
+ ClientImpl.prototype.hostIndex;
+ ClientImpl.prototype.onConnectionLost;
+ ClientImpl.prototype.onMessageDelivered;
+ ClientImpl.prototype.onMessageArrived;
+ ClientImpl.prototype.traceFunction;
+ ClientImpl.prototype._msg_queue = null;
+ ClientImpl.prototype._connectTimeout;
+ /* The sendPinger monitors how long we allow before we send data to prove to the server that we are alive. */
+ ClientImpl.prototype.sendPinger = null;
+ /* The receivePinger monitors how long we allow before we require evidence that the server is alive. */
+ ClientImpl.prototype.receivePinger = null;
+
+ ClientImpl.prototype.receiveBuffer = null;
+
+ ClientImpl.prototype._traceBuffer = null;
+ ClientImpl.prototype._MAX_TRACE_ENTRIES = 100;
+
+ ClientImpl.prototype.connect = function (connectOptions) {
+ var connectOptionsMasked = this._traceMask(connectOptions, "password");
+ this._trace("Client.connect", connectOptionsMasked, this.socket, this.connected);
+
+ if (this.connected)
+ throw new Error(format(ERROR.INVALID_STATE, ["already connected"]));
+ if (this.socket)
+ throw new Error(format(ERROR.INVALID_STATE, ["already connected"]));
+
+ this.connectOptions = connectOptions;
+
+ if (connectOptions.uris) {
+ this.hostIndex = 0;
+ this._doConnect(connectOptions.uris[0]);
+ } else {
+ this._doConnect(this.uri);
+ }
+
+ };
+
+ ClientImpl.prototype.subscribe = function (filter, subscribeOptions) {
+ this._trace("Client.subscribe", filter, subscribeOptions);
+
+ if (!this.connected)
+ throw new Error(format(ERROR.INVALID_STATE, ["not connected"]));
+
+ var wireMessage = new WireMessage(MESSAGE_TYPE.SUBSCRIBE);
+ wireMessage.topics=[filter];
+ if (subscribeOptions.qos != undefined)
+ wireMessage.requestedQos = [subscribeOptions.qos];
+ else
+ wireMessage.requestedQos = [0];
+
+ if (subscribeOptions.onSuccess) {
+ wireMessage.onSuccess = function(grantedQos) {subscribeOptions.onSuccess({invocationContext:subscribeOptions.invocationContext,grantedQos:grantedQos});};
+ }
+
+ if (subscribeOptions.onFailure) {
+ wireMessage.onFailure = function(errorCode) {subscribeOptions.onFailure({invocationContext:subscribeOptions.invocationContext,errorCode:errorCode});};
+ }
+
+ if (subscribeOptions.timeout) {
+ wireMessage.timeOut = new Timeout(this, window, subscribeOptions.timeout, subscribeOptions.onFailure
+ , [{invocationContext:subscribeOptions.invocationContext,
+ errorCode:ERROR.SUBSCRIBE_TIMEOUT.code,
+ errorMessage:format(ERROR.SUBSCRIBE_TIMEOUT)}]);
+ }
+
+ // All subscriptions return a SUBACK.
+ this._requires_ack(wireMessage);
+ this._schedule_message(wireMessage);
+ };
+
+ /** @ignore */
+ ClientImpl.prototype.unsubscribe = function(filter, unsubscribeOptions) {
+ this._trace("Client.unsubscribe", filter, unsubscribeOptions);
+
+ if (!this.connected)
+ throw new Error(format(ERROR.INVALID_STATE, ["not connected"]));
+
+ var wireMessage = new WireMessage(MESSAGE_TYPE.UNSUBSCRIBE);
+ wireMessage.topics = [filter];
+
+ if (unsubscribeOptions.onSuccess) {
+ wireMessage.callback = function() {unsubscribeOptions.onSuccess({invocationContext:unsubscribeOptions.invocationContext});};
+ }
+ if (unsubscribeOptions.timeout) {
+ wireMessage.timeOut = new Timeout(this, window, unsubscribeOptions.timeout, unsubscribeOptions.onFailure
+ , [{invocationContext:unsubscribeOptions.invocationContext,
+ errorCode:ERROR.UNSUBSCRIBE_TIMEOUT.code,
+ errorMessage:format(ERROR.UNSUBSCRIBE_TIMEOUT)}]);
+ }
+
+ // All unsubscribes return a SUBACK.
+ this._requires_ack(wireMessage);
+ this._schedule_message(wireMessage);
+ };
+
+ ClientImpl.prototype.send = function (message) {
+ this._trace("Client.send", message);
+
+ if (!this.connected)
+ throw new Error(format(ERROR.INVALID_STATE, ["not connected"]));
+
+ wireMessage = new WireMessage(MESSAGE_TYPE.PUBLISH);
+ wireMessage.payloadMessage = message;
+
+ if (message.qos > 0)
+ this._requires_ack(wireMessage);
+ else if (this.onMessageDelivered)
+ this._notify_msg_sent[wireMessage] = this.onMessageDelivered(wireMessage.payloadMessage);
+ this._schedule_message(wireMessage);
+ };
+
+ ClientImpl.prototype.disconnect = function () {
+ this._trace("Client.disconnect");
+
+ if (!this.socket)
+ throw new Error(format(ERROR.INVALID_STATE, ["not connecting or connected"]));
+
+ wireMessage = new WireMessage(MESSAGE_TYPE.DISCONNECT);
+
+ // Run the disconnected call back as soon as the message has been sent,
+ // in case of a failure later on in the disconnect processing.
+ // as a consequence, the _disconected call back may be run several times.
+ this._notify_msg_sent[wireMessage] = scope(this._disconnected, this);
+
+ this._schedule_message(wireMessage);
+ };
+
+ ClientImpl.prototype.getTraceLog = function () {
+ if ( this._traceBuffer !== null ) {
+ this._trace("Client.getTraceLog", new Date());
+ this._trace("Client.getTraceLog in flight messages", this._sentMessages.length);
+ for (var key in this._sentMessages)
+ this._trace("_sentMessages ",key, this._sentMessages[key]);
+ for (var key in this._receivedMessages)
+ this._trace("_receivedMessages ",key, this._receivedMessages[key]);
+
+ return this._traceBuffer;
+ }
+ };
+
+ ClientImpl.prototype.startTrace = function () {
+ if ( this._traceBuffer === null ) {
+ this._traceBuffer = [];
+ }
+ this._trace("Client.startTrace", new Date(), version);
+ };
+
+ ClientImpl.prototype.stopTrace = function () {
+ delete this._traceBuffer;
+ };
+
+ ClientImpl.prototype._doConnect = function (wsurl) {
+ // When the socket is open, this client will send the CONNECT WireMessage using the saved parameters.
+ if (this.connectOptions.useSSL) {
+ var uriParts = wsurl.split(":");
+ uriParts[0] = "wss";
+ wsurl = uriParts.join(":");
+ }
+ this.connected = false;
+ if (this.connectOptions.mqttVersion < 4) {
+ this.socket = new WebSocket(wsurl, ["mqttv3.1"]);
+ } else {
+ this.socket = new WebSocket(wsurl, ["mqtt"]);
+ }
+ this.socket.binaryType = 'arraybuffer';
+
+ this.socket.onopen = scope(this._on_socket_open, this);
+ this.socket.onmessage = scope(this._on_socket_message, this);
+ this.socket.onerror = scope(this._on_socket_error, this);
+ this.socket.onclose = scope(this._on_socket_close, this);
+
+ this.sendPinger = new Pinger(this, window, this.connectOptions.keepAliveInterval);
+ this.receivePinger = new Pinger(this, window, this.connectOptions.keepAliveInterval);
+
+ this._connectTimeout = new Timeout(this, window, this.connectOptions.timeout, this._disconnected, [ERROR.CONNECT_TIMEOUT.code, format(ERROR.CONNECT_TIMEOUT)]);
+ };
+
+
+ // Schedule a new message to be sent over the WebSockets
+ // connection. CONNECT messages cause WebSocket connection
+ // to be started. All other messages are queued internally
+ // until this has happened. When WS connection starts, process
+ // all outstanding messages.
+ ClientImpl.prototype._schedule_message = function (message) {
+ this._msg_queue.push(message);
+ // Process outstanding messages in the queue if we have an open socket, and have received CONNACK.
+ if (this.connected) {
+ this._process_queue();
+ }
+ };
+
+ ClientImpl.prototype.store = function(prefix, wireMessage) {
+ var storedMessage = {type:wireMessage.type, messageIdentifier:wireMessage.messageIdentifier, version:1};
+
+ switch(wireMessage.type) {
+ case MESSAGE_TYPE.PUBLISH:
+ if(wireMessage.pubRecReceived)
+ storedMessage.pubRecReceived = true;
+
+ // Convert the payload to a hex string.
+ storedMessage.payloadMessage = {};
+ var hex = "";
+ var messageBytes = wireMessage.payloadMessage.payloadBytes;
+ for (var i=0; i<messageBytes.length; i++) {
+ if (messageBytes[i] <= 0xF)
+ hex = hex+"0"+messageBytes[i].toString(16);
+ else
+ hex = hex+messageBytes[i].toString(16);
+ }
+ storedMessage.payloadMessage.payloadHex = hex;
+
+ storedMessage.payloadMessage.qos = wireMessage.payloadMessage.qos;
+ storedMessage.payloadMessage.destinationName = wireMessage.payloadMessage.destinationName;
+ if (wireMessage.payloadMessage.duplicate)
+ storedMessage.payloadMessage.duplicate = true;
+ if (wireMessage.payloadMessage.retained)
+ storedMessage.payloadMessage.retained = true;
+
+ // Add a sequence number to sent messages.
+ if ( prefix.indexOf("Sent:") == 0 ) {
+ if ( wireMessage.sequence === undefined )
+ wireMessage.sequence = ++this._sequence;
+ storedMessage.sequence = wireMessage.sequence;
+ }
+ break;
+
+ default:
+ throw Error(format(ERROR.INVALID_STORED_DATA, [key, storedMessage]));
+ }
+ localStorage.setItem(prefix+this._localKey+wireMessage.messageIdentifier, JSON.stringify(storedMessage));
+ };
+
+ ClientImpl.prototype.restore = function(key) {
+ var value = localStorage.getItem(key);
+ var storedMessage = JSON.parse(value);
+
+ var wireMessage = new WireMessage(storedMessage.type, storedMessage);
+
+ switch(storedMessage.type) {
+ case MESSAGE_TYPE.PUBLISH:
+ // Replace the payload message with a Message object.
+ var hex = storedMessage.payloadMessage.payloadHex;
+ var buffer = new ArrayBuffer((hex.length)/2);
+ var byteStream = new Uint8Array(buffer);
+ var i = 0;
+ while (hex.length >= 2) {
+ var x = parseInt(hex.substring(0, 2), 16);
+ hex = hex.substring(2, hex.length);
+ byteStream[i++] = x;
+ }
+ var payloadMessage = new Paho.MQTT.Message(byteStream);
+
+ payloadMessage.qos = storedMessage.payloadMessage.qos;
+ payloadMessage.destinationName = storedMessage.payloadMessage.destinationName;
+ if (storedMessage.payloadMessage.duplicate)
+ payloadMessage.duplicate = true;
+ if (storedMessage.payloadMessage.retained)
+ payloadMessage.retained = true;
+ wireMessage.payloadMessage = payloadMessage;
+
+ break;
+
+ default:
+ throw Error(format(ERROR.INVALID_STORED_DATA, [key, value]));
+ }
+
+ if (key.indexOf("Sent:"+this._localKey) == 0) {
+ wireMessage.payloadMessage.duplicate = true;
+ this._sentMessages[wireMessage.messageIdentifier] = wireMessage;
+ } else if (key.indexOf("Received:"+this._localKey) == 0) {
+ this._receivedMessages[wireMessage.messageIdentifier] = wireMessage;
+ }
+ };
+
+ ClientImpl.prototype._process_queue = function () {
+ var message = null;
+ // Process messages in order they were added
+ var fifo = this._msg_queue.reverse();
+
+ // Send all queued messages down socket connection
+ while ((message = fifo.pop())) {
+ this._socket_send(message);
+ // Notify listeners that message was successfully sent
+ if (this._notify_msg_sent[message]) {
+ this._notify_msg_sent[message]();
+ delete this._notify_msg_sent[message];
+ }
+ }
+ };
+
+ /**
+ * Expect an ACK response for this message. Add message to the set of in progress
+ * messages and set an unused identifier in this message.
+ * @ignore
+ */
+ ClientImpl.prototype._requires_ack = function (wireMessage) {
+ var messageCount = Object.keys(this._sentMessages).length;
+ if (messageCount > this.maxMessageIdentifier)
+ throw Error ("Too many messages:"+messageCount);
+
+ while(this._sentMessages[this._message_identifier] !== undefined) {
+ this._message_identifier++;
+ }
+ wireMessage.messageIdentifier = this._message_identifier;
+ this._sentMessages[wireMessage.messageIdentifier] = wireMessage;
+ if (wireMessage.type === MESSAGE_TYPE.PUBLISH) {
+ this.store("Sent:", wireMessage);
+ }
+ if (this._message_identifier === this.maxMessageIdentifier) {
+ this._message_identifier = 1;
+ }
+ };
+
+ /**
+ * Called when the underlying websocket has been opened.
+ * @ignore
+ */
+ ClientImpl.prototype._on_socket_open = function () {
+ // Create the CONNECT message object.
+ var wireMessage = new WireMessage(MESSAGE_TYPE.CONNECT, this.connectOptions);
+ wireMessage.clientId = this.clientId;
+ this._socket_send(wireMessage);
+ };
+
+ /**
+ * Called when the underlying websocket has received a complete packet.
+ * @ignore
+ */
+ ClientImpl.prototype._on_socket_message = function (event) {
+ this._trace("Client._on_socket_message", event.data);
+ // Reset the receive ping timer, we now have evidence the server is alive.
+ this.receivePinger.reset();
+ var messages = this._deframeMessages(event.data);
+ for (var i = 0; i < messages.length; i+=1) {
+ this._handleMessage(messages[i]);
+ }
+ }
+
+ ClientImpl.prototype._deframeMessages = function(data) {
+ var byteArray = new Uint8Array(data);
+ if (this.receiveBuffer) {
+ var newData = new Uint8Array(this.receiveBuffer.length+byteArray.length);
+ newData.set(this.receiveBuffer);
+ newData.set(byteArray,this.receiveBuffer.length);
+ byteArray = newData;
+ delete this.receiveBuffer;
+ }
+ try {
+ var offset = 0;
+ var messages = [];
+ while(offset < byteArray.length) {
+ var result = decodeMessage(byteArray,offset);
+ var wireMessage = result[0];
+ offset = result[1];
+ if (wireMessage !== null) {
+ messages.push(wireMessage);
+ } else {
+ break;
+ }
+ }
+ if (offset < byteArray.length) {
+ this.receiveBuffer = byteArray.subarray(offset);
+ }
+ } catch (error) {
+ this._disconnected(ERROR.INTERNAL_ERROR.code , format(ERROR.INTERNAL_ERROR, [error.message,error.stack.toString()]));
+ return;
+ }
+ return messages;
+ }
+
+ ClientImpl.prototype._handleMessage = function(wireMessage) {
+
+ this._trace("Client._handleMessage", wireMessage);
+
+ try {
+ switch(wireMessage.type) {
+ case MESSAGE_TYPE.CONNACK:
+ this._connectTimeout.cancel();
+
+ // If we have started using clean session then clear up the local state.
+ if (this.connectOptions.cleanSession) {
+ for (var key in this._sentMessages) {
+ var sentMessage = this._sentMessages[key];
+ localStorage.removeItem("Sent:"+this._localKey+sentMessage.messageIdentifier);
+ }
+ this._sentMessages = {};
+
+ for (var key in this._receivedMessages) {
+ var receivedMessage = this._receivedMessages[key];
+ localStorage.removeItem("Received:"+this._localKey+receivedMessage.messageIdentifier);
+ }
+ this._receivedMessages = {};
+ }
+ // Client connected and ready for business.
+ if (wireMessage.returnCode === 0) {
+ this.connected = true;
+ // Jump to the end of the list of uris and stop looking for a good host.
+ if (this.connectOptions.uris)
+ this.hostIndex = this.connectOptions.uris.length;
+ } else {
+ this._disconnected(ERROR.CONNACK_RETURNCODE.code , format(ERROR.CONNACK_RETURNCODE, [wireMessage.returnCode, CONNACK_RC[wireMessage.returnCode]]));
+ break;
+ }
+
+ // Resend messages.
+ var sequencedMessages = new Array();
+ for (var msgId in this._sentMessages) {
+ if (this._sentMessages.hasOwnProperty(msgId))
+ sequencedMessages.push(this._sentMessages[msgId]);
+ }
+
+ // Sort sentMessages into the original sent order.
+ var sequencedMessages = sequencedMessages.sort(function(a,b) {return a.sequence - b.sequence;} );
+ for (var i=0, len=sequencedMessages.length; i<len; i++) {
+ var sentMessage = sequencedMessages[i];
+ if (sentMessage.type == MESSAGE_TYPE.PUBLISH && sentMessage.pubRecReceived) {
+ var pubRelMessage = new WireMessage(MESSAGE_TYPE.PUBREL, {messageIdentifier:sentMessage.messageIdentifier});
+ this._schedule_message(pubRelMessage);
+ } else {
+ this._schedule_message(sentMessage);
+ };
+ }
+
+ // Execute the connectOptions.onSuccess callback if there is one.
+ if (this.connectOptions.onSuccess) {
+ this.connectOptions.onSuccess({invocationContext:this.connectOptions.invocationContext});
+ }
+
+ // Process all queued messages now that the connection is established.
+ this._process_queue();
+ break;
+
+ case MESSAGE_TYPE.PUBLISH:
+ this._receivePublish(wireMessage);
+ break;
+
+ case MESSAGE_TYPE.PUBACK:
+ var sentMessage = this._sentMessages[wireMessage.messageIdentifier];
+ // If this is a re flow of a PUBACK after we have restarted receivedMessage will not exist.
+ if (sentMessage) {
+ delete this._sentMessages[wireMessage.messageIdentifier];
+ localStorage.removeItem("Sent:"+this._localKey+wireMessage.messageIdentifier);
+ if (this.onMessageDelivered)
+ this.onMessageDelivered(sentMessage.payloadMessage);
+ }
+ break;
+
+ case MESSAGE_TYPE.PUBREC:
+ var sentMessage = this._sentMessages[wireMessage.messageIdentifier];
+ // If this is a re flow of a PUBREC after we have restarted receivedMessage will not exist.
+ if (sentMessage) {
+ sentMessage.pubRecReceived = true;
+ var pubRelMessage = new WireMessage(MESSAGE_TYPE.PUBREL, {messageIdentifier:wireMessage.messageIdentifier});
+ this.store("Sent:", sentMessage);
+ this._schedule_message(pubRelMessage);
+ }
+ break;
+
+ case MESSAGE_TYPE.PUBREL:
+ var receivedMessage = this._receivedMessages[wireMessage.messageIdentifier];
+ localStorage.removeItem("Received:"+this._localKey+wireMessage.messageIdentifier);
+ // If this is a re flow of a PUBREL after we have restarted receivedMessage will not exist.
+ if (receivedMessage) {
+ this._receiveMessage(receivedMessage);
+ delete this._receivedMessages[wireMessage.messageIdentifier];
+ }
+ // Always flow PubComp, we may have previously flowed PubComp but the server lost it and restarted.
+ var pubCompMessage = new WireMessage(MESSAGE_TYPE.PUBCOMP, {messageIdentifier:wireMessage.messageIdentifier});
+ this._schedule_message(pubCompMessage);
+ break;
+
+ case MESSAGE_TYPE.PUBCOMP:
+ var sentMessage = this._sentMessages[wireMessage.messageIdentifier];
+ delete this._sentMessages[wireMessage.messageIdentifier];
+ localStorage.removeItem("Sent:"+this._localKey+wireMessage.messageIdentifier);
+ if (this.onMessageDelivered)
+ this.onMessageDelivered(sentMessage.payloadMessage);
+ break;
+
+ case MESSAGE_TYPE.SUBACK:
+ var sentMessage = this._sentMessages[wireMessage.messageIdentifier];
+ if (sentMessage) {
+ if(sentMessage.timeOut)
+ sentMessage.timeOut.cancel();
+ // This will need to be fixed when we add multiple topic support
+ if (wireMessage.returnCode[0] === 0x80) {
+ if (sentMessage.onFailure) {
+ sentMessage.onFailure(wireMessage.returnCode);
+ }
+ } else if (sentMessage.onSuccess) {
+ sentMessage.onSuccess(wireMessage.returnCode);
+ }
+ delete this._sentMessages[wireMessage.messageIdentifier];
+ }
+ break;
+
+ case MESSAGE_TYPE.UNSUBACK:
+ var sentMessage = this._sentMessages[wireMessage.messageIdentifier];
+ if (sentMessage) {
+ if (sentMessage.timeOut)
+ sentMessage.timeOut.cancel();
+ if (sentMessage.callback) {
+ sentMessage.callback();
+ }
+ delete this._sentMessages[wireMessage.messageIdentifier];
+ }
+
+ break;
+
+ case MESSAGE_TYPE.PINGRESP:
+ /* The sendPinger or receivePinger may have sent a ping, the receivePinger has already been reset. */
+ this.sendPinger.reset();
+ break;
+
+ case MESSAGE_TYPE.DISCONNECT:
+ // Clients do not expect to receive disconnect packets.
+ this._disconnected(ERROR.INVALID_MQTT_MESSAGE_TYPE.code , format(ERROR.INVALID_MQTT_MESSAGE_TYPE, [wireMessage.type]));
+ break;
+
+ default:
+ this._disconnected(ERROR.INVALID_MQTT_MESSAGE_TYPE.code , format(ERROR.INVALID_MQTT_MESSAGE_TYPE, [wireMessage.type]));
+ };
+ } catch (error) {
+ this._disconnected(ERROR.INTERNAL_ERROR.code , format(ERROR.INTERNAL_ERROR, [error.message,error.stack.toString()]));
+ return;
+ }
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._on_socket_error = function (error) {
+ this._disconnected(ERROR.SOCKET_ERROR.code , format(ERROR.SOCKET_ERROR, [error.data]));
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._on_socket_close = function () {
+ this._disconnected(ERROR.SOCKET_CLOSE.code , format(ERROR.SOCKET_CLOSE));
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._socket_send = function (wireMessage) {
+
+ if (wireMessage.type == 1) {
+ var wireMessageMasked = this._traceMask(wireMessage, "password");
+ this._trace("Client._socket_send", wireMessageMasked);
+ }
+ else this._trace("Client._socket_send", wireMessage);
+
+ this.socket.send(wireMessage.encode());
+ /* We have proved to the server we are alive. */
+ this.sendPinger.reset();
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._receivePublish = function (wireMessage) {
+ switch(wireMessage.payloadMessage.qos) {
+ case "undefined":
+ case 0:
+ this._receiveMessage(wireMessage);
+ break;
+
+ case 1:
+ var pubAckMessage = new WireMessage(MESSAGE_TYPE.PUBACK, {messageIdentifier:wireMessage.messageIdentifier});
+ this._schedule_message(pubAckMessage);
+ this._receiveMessage(wireMessage);
+ break;
+
+ case 2:
+ this._receivedMessages[wireMessage.messageIdentifier] = wireMessage;
+ this.store("Received:", wireMessage);
+ var pubRecMessage = new WireMessage(MESSAGE_TYPE.PUBREC, {messageIdentifier:wireMessage.messageIdentifier});
+ this._schedule_message(pubRecMessage);
+
+ break;
+
+ default:
+ throw Error("Invaild qos="+wireMmessage.payloadMessage.qos);
+ };
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._receiveMessage = function (wireMessage) {
+ if (this.onMessageArrived) {
+ this.onMessageArrived(wireMessage.payloadMessage);
+ }
+ };
+
+ /**
+ * Client has disconnected either at its own request or because the server
+ * or network disconnected it. Remove all non-durable state.
+ * @param {errorCode} [number] the error number.
+ * @param {errorText} [string] the error text.
+ * @ignore
+ */
+ ClientImpl.prototype._disconnected = function (errorCode, errorText) {
+ this._trace("Client._disconnected", errorCode, errorText);
+
+ this.sendPinger.cancel();
+ this.receivePinger.cancel();
+ if (this._connectTimeout)
+ this._connectTimeout.cancel();
+ // Clear message buffers.
+ this._msg_queue = [];
+ this._notify_msg_sent = {};
+
+ if (this.socket) {
+ // Cancel all socket callbacks so that they cannot be driven again by this socket.
+ this.socket.onopen = null;
+ this.socket.onmessage = null;
+ this.socket.onerror = null;
+ this.socket.onclose = null;
+ if (this.socket.readyState === 1)
+ this.socket.close();
+ delete this.socket;
+ }
+
+ if (this.connectOptions.uris && this.hostIndex < this.connectOptions.uris.length-1) {
+ // Try the next host.
+ this.hostIndex++;
+ this._doConnect(this.connectOptions.uris[this.hostIndex]);
+
+ } else {
+
+ if (errorCode === undefined) {
+ errorCode = ERROR.OK.code;
+ errorText = format(ERROR.OK);
+ }
+
+ // Run any application callbacks last as they may attempt to reconnect and hence create a new socket.
+ if (this.connected) {
+ this.connected = false;
+ // Execute the connectionLostCallback if there is one, and we were connected.
+ if (this.onConnectionLost)
+ this.onConnectionLost({errorCode:errorCode, errorMessage:errorText});
+ } else {
+ // Otherwise we never had a connection, so indicate that the connect has failed.
+ if (this.connectOptions.mqttVersion === 4 && this.connectOptions.mqttVersionExplicit === false) {
+ this._trace("Failed to connect V4, dropping back to V3")
+ this.connectOptions.mqttVersion = 3;
+ if (this.connectOptions.uris) {
+ this.hostIndex = 0;
+ this._doConnect(this.connectOptions.uris[0]);
+ } else {
+ this._doConnect(this.uri);
+ }
+ } else if(this.connectOptions.onFailure) {
+ this.connectOptions.onFailure({invocationContext:this.connectOptions.invocationContext, errorCode:errorCode, errorMessage:errorText});
+ }
+ }
+ }
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._trace = function () {
+ // Pass trace message back to client's callback function
+ if (this.traceFunction) {
+ for (var i in arguments)
+ {
+ if (typeof arguments[i] !== "undefined")
+ arguments[i] = JSON.stringify(arguments[i]);
+ }
+ var record = Array.prototype.slice.call(arguments).join("");
+ this.traceFunction ({severity: "Debug", message: record });
+ }
+
+ //buffer style trace
+ if ( this._traceBuffer !== null ) {
+ for (var i = 0, max = arguments.length; i < max; i++) {
+ if ( this._traceBuffer.length == this._MAX_TRACE_ENTRIES ) {
+ this._traceBuffer.shift();
+ }
+ if (i === 0) this._traceBuffer.push(arguments[i]);
+ else if (typeof arguments[i] === "undefined" ) this._traceBuffer.push(arguments[i]);
+ else this._traceBuffer.push(" "+JSON.stringify(arguments[i]));
+ };
+ };
+ };
+
+ /** @ignore */
+ ClientImpl.prototype._traceMask = function (traceObject, masked) {
+ var traceObjectMasked = {};
+ for (var attr in traceObject) {
+ if (traceObject.hasOwnProperty(attr)) {
+ if (attr == masked)
+ traceObjectMasked[attr] = "******";
+ else
+ traceObjectMasked[attr] = traceObject[attr];
+ }
+ }
+ return traceObjectMasked;
+ };
+
+ // ------------------------------------------------------------------------
+ // Public Programming interface.
+ // ------------------------------------------------------------------------
+
+ /**
+ * The JavaScript application communicates to the server using a {@link Paho.MQTT.Client} object.
+ * <p>
+ * Most applications will create just one Client object and then call its connect() method,
+ * however applications can create more than one Client object if they wish.
+ * In this case the combination of host, port and clientId attributes must be different for each Client object.
+ * <p>
+ * The send, subscribe and unsubscribe methods are implemented as asynchronous JavaScript methods
+ * (even though the underlying protocol exchange might be synchronous in nature).
+ * This means they signal their completion by calling back to the application,
+ * via Success or Failure callback functions provided by the application on the method in question.
+ * Such callbacks are called at most once per method invocation and do not persist beyond the lifetime
+ * of the script that made the invocation.
+ * <p>
+ * In contrast there are some callback functions, most notably <i>onMessageArrived</i>,
+ * that are defined on the {@link Paho.MQTT.Client} object.
+ * These may get called multiple times, and aren't directly related to specific method invocations made by the client.
+ *
+ * @name Paho.MQTT.Client
+ *
+ * @constructor
+ *
+ * @param {string} host - the address of the messaging server, as a fully qualified WebSocket URI, as a DNS name or dotted decimal IP address.
+ * @param {number} port - the port number to connect to - only required if host is not a URI
+ * @param {string} path - the path on the host to connect to - only used if host is not a URI. Default: '/mqtt'.
+ * @param {string} clientId - the Messaging client identifier, between 1 and 23 characters in length.
+ *
+ * @property {string} host - <i>read only</i> the server's DNS hostname or dotted decimal IP address.
+ * @property {number} port - <i>read only</i> the server's port.
+ * @property {string} path - <i>read only</i> the server's path.
+ * @property {string} clientId - <i>read only</i> used when connecting to the server.
+ * @property {function} onConnectionLost - called when a connection has been lost.
+ * after a connect() method has succeeded.
+ * Establish the call back used when a connection has been lost. The connection may be
+ * lost because the client initiates a disconnect or because the server or network
+ * cause the client to be disconnected. The disconnect call back may be called without
+ * the connectionComplete call back being invoked if, for example the client fails to
+ * connect.
+ * A single response object parameter is passed to the onConnectionLost callback containing the following fields:
+ * <ol>
+ * <li>errorCode
+ * <li>errorMessage
+ * </ol>
+ * @property {function} onMessageDelivered called when a message has been delivered.
+ * All processing that this Client will ever do has been completed. So, for example,
+ * in the case of a Qos=2 message sent by this client, the PubComp flow has been received from the server
+ * and the message has been removed from persistent storage before this callback is invoked.
+ * Parameters passed to the onMessageDelivered callback are:
+ * <ol>
+ * <li>{@link Paho.MQTT.Message} that was delivered.
+ * </ol>
+ * @property {function} onMessageArrived called when a message has arrived in this Paho.MQTT.client.
+ * Parameters passed to the onMessageArrived callback are:
+ * <ol>
+ * <li>{@link Paho.MQTT.Message} that has arrived.
+ * </ol>
+ */
+ var Client = function (host, port, path, clientId) {
+
+ var uri;
+
+ if (typeof host !== "string")
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof host, "host"]));
+
+ if (arguments.length == 2) {
+ // host: must be full ws:// uri
+ // port: clientId
+ clientId = port;
+ uri = host;
+ var match = uri.match(/^(wss?):\/\/((\[(.+)\])|([^\/]+?))(:(\d+))?(\/.*)$/);
+ if (match) {
+ host = match[4]||match[2];
+ port = parseInt(match[7]);
+ path = match[8];
+ } else {
+ throw new Error(format(ERROR.INVALID_ARGUMENT,[host,"host"]));
+ }
+ } else {
+ if (arguments.length == 3) {
+ clientId = path;
+ path = "/mqtt";
+ }
+ if (typeof port !== "number" || port < 0)
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof port, "port"]));
+ if (typeof path !== "string")
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof path, "path"]));
+
+ var ipv6AddSBracket = (host.indexOf(":") != -1 && host.slice(0,1) != "[" && host.slice(-1) != "]");
+ uri = "ws://"+(ipv6AddSBracket?"["+host+"]":host)+":"+port+path;
+ }
+
+ var clientIdLength = 0;
+ for (var i = 0; i<clientId.length; i++) {
+ var charCode = clientId.charCodeAt(i);
+ if (0xD800 <= charCode && charCode <= 0xDBFF) {
+ i++; // Surrogate pair.
+ }
+ clientIdLength++;
+ }
+ if (typeof clientId !== "string" || clientIdLength > 65535)
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [clientId, "clientId"]));
+
+ var client = new ClientImpl(uri, host, port, path, clientId);
+ this._getHost = function() { return host; };
+ this._setHost = function() { throw new Error(format(ERROR.UNSUPPORTED_OPERATION)); };
+
+ this._getPort = function() { return port; };
+ this._setPort = function() { throw new Error(format(ERROR.UNSUPPORTED_OPERATION)); };
+
+ this._getPath = function() { return path; };
+ this._setPath = function() { throw new Error(format(ERROR.UNSUPPORTED_OPERATION)); };
+
+ this._getURI = function() { return uri; };
+ this._setURI = function() { throw new Error(format(ERROR.UNSUPPORTED_OPERATION)); };
+
+ this._getClientId = function() { return client.clientId; };
+ this._setClientId = function() { throw new Error(format(ERROR.UNSUPPORTED_OPERATION)); };
+
+ this._getOnConnectionLost = function() { return client.onConnectionLost; };
+ this._setOnConnectionLost = function(newOnConnectionLost) {
+ if (typeof newOnConnectionLost === "function")
+ client.onConnectionLost = newOnConnectionLost;
+ else
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof newOnConnectionLost, "onConnectionLost"]));
+ };
+
+ this._getOnMessageDelivered = function() { return client.onMessageDelivered; };
+ this._setOnMessageDelivered = function(newOnMessageDelivered) {
+ if (typeof newOnMessageDelivered === "function")
+ client.onMessageDelivered = newOnMessageDelivered;
+ else
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof newOnMessageDelivered, "onMessageDelivered"]));
+ };
+
+ this._getOnMessageArrived = function() { return client.onMessageArrived; };
+ this._setOnMessageArrived = function(newOnMessageArrived) {
+ if (typeof newOnMessageArrived === "function")
+ client.onMessageArrived = newOnMessageArrived;
+ else
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof newOnMessageArrived, "onMessageArrived"]));
+ };
+
+ this._getTrace = function() { return client.traceFunction; };
+ this._setTrace = function(trace) {
+ if(typeof trace === "function"){
+ client.traceFunction = trace;
+ }else{
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof trace, "onTrace"]));
+ }
+ };
+
+ /**
+ * Connect this Messaging client to its server.
+ *
+ * @name Paho.MQTT.Client#connect
+ * @function
+ * @param {Object} connectOptions - attributes used with the connection.
+ * @param {number} connectOptions.timeout - If the connect has not succeeded within this
+ * number of seconds, it is deemed to have failed.
+ * The default is 30 seconds.
+ * @param {string} connectOptions.userName - Authentication username for this connection.
+ * @param {string} connectOptions.password - Authentication password for this connection.
+ * @param {Paho.MQTT.Message} connectOptions.willMessage - sent by the server when the client
+ * disconnects abnormally.
+ * @param {Number} connectOptions.keepAliveInterval - the server disconnects this client if
+ * there is no activity for this number of seconds.
+ * The default value of 60 seconds is assumed if not set.
+ * @param {boolean} connectOptions.cleanSession - if true(default) the client and server
+ * persistent state is deleted on successful connect.
+ * @param {boolean} connectOptions.useSSL - if present and true, use an SSL Websocket connection.
+ * @param {object} connectOptions.invocationContext - passed to the onSuccess callback or onFailure callback.
+ * @param {function} connectOptions.onSuccess - called when the connect acknowledgement
+ * has been received from the server.
+ * A single response object parameter is passed to the onSuccess callback containing the following fields:
+ * <ol>
+ * <li>invocationContext as passed in to the onSuccess method in the connectOptions.
+ * </ol>
+ * @config {function} [onFailure] called when the connect request has failed or timed out.
+ * A single response object parameter is passed to the onFailure callback containing the following fields:
+ * <ol>
+ * <li>invocationContext as passed in to the onFailure method in the connectOptions.
+ * <li>errorCode a number indicating the nature of the error.
+ * <li>errorMessage text describing the error.
+ * </ol>
+ * @config {Array} [hosts] If present this contains either a set of hostnames or fully qualified
+ * WebSocket URIs (ws://example.com:1883/mqtt), that are tried in order in place
+ * of the host and port paramater on the construtor. The hosts are tried one at at time in order until
+ * one of then succeeds.
+ * @config {Array} [ports] If present the set of ports matching the hosts. If hosts contains URIs, this property
+ * is not used.
+ * @throws {InvalidState} if the client is not in disconnected state. The client must have received connectionLost
+ * or disconnected before calling connect for a second or subsequent time.
+ */
+ this.connect = function (connectOptions) {
+ connectOptions = connectOptions || {} ;
+ validate(connectOptions, {timeout:"number",
+ userName:"string",
+ password:"string",
+ willMessage:"object",
+ keepAliveInterval:"number",
+ cleanSession:"boolean",
+ useSSL:"boolean",
+ invocationContext:"object",
+ onSuccess:"function",
+ onFailure:"function",
+ hosts:"object",
+ ports:"object",
+ mqttVersion:"number"});
+
+ // If no keep alive interval is set, assume 60 seconds.
+ if (connectOptions.keepAliveInterval === undefined)
+ connectOptions.keepAliveInterval = 60;
+
+ if (connectOptions.mqttVersion > 4 || connectOptions.mqttVersion < 3) {
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.mqttVersion, "connectOptions.mqttVersion"]));
+ }
+
+ if (connectOptions.mqttVersion === undefined) {
+ connectOptions.mqttVersionExplicit = false;
+ connectOptions.mqttVersion = 4;
+ } else {
+ connectOptions.mqttVersionExplicit = true;
+ }
+
+ //Check that if password is set, so is username
+ if (connectOptions.password === undefined && connectOptions.userName !== undefined)
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.password, "connectOptions.password"]))
+
+ if (connectOptions.willMessage) {
+ if (!(connectOptions.willMessage instanceof Message))
+ throw new Error(format(ERROR.INVALID_TYPE, [connectOptions.willMessage, "connectOptions.willMessage"]));
+ // The will message must have a payload that can be represented as a string.
+ // Cause the willMessage to throw an exception if this is not the case.
+ connectOptions.willMessage.stringPayload;
+
+ if (typeof connectOptions.willMessage.destinationName === "undefined")
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof connectOptions.willMessage.destinationName, "connectOptions.willMessage.destinationName"]));
+ }
+ if (typeof connectOptions.cleanSession === "undefined")
+ connectOptions.cleanSession = true;
+ if (connectOptions.hosts) {
+
+ if (!(connectOptions.hosts instanceof Array) )
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.hosts, "connectOptions.hosts"]));
+ if (connectOptions.hosts.length <1 )
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.hosts, "connectOptions.hosts"]));
+
+ var usingURIs = false;
+ for (var i = 0; i<connectOptions.hosts.length; i++) {
+ if (typeof connectOptions.hosts[i] !== "string")
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof connectOptions.hosts[i], "connectOptions.hosts["+i+"]"]));
+ if (/^(wss?):\/\/((\[(.+)\])|([^\/]+?))(:(\d+))?(\/.*)$/.test(connectOptions.hosts[i])) {
+ if (i == 0) {
+ usingURIs = true;
+ } else if (!usingURIs) {
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.hosts[i], "connectOptions.hosts["+i+"]"]));
+ }
+ } else if (usingURIs) {
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.hosts[i], "connectOptions.hosts["+i+"]"]));
+ }
+ }
+
+ if (!usingURIs) {
+ if (!connectOptions.ports)
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.ports, "connectOptions.ports"]));
+ if (!(connectOptions.ports instanceof Array) )
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.ports, "connectOptions.ports"]));
+ if (connectOptions.hosts.length != connectOptions.ports.length)
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [connectOptions.ports, "connectOptions.ports"]));
+
+ connectOptions.uris = [];
+
+ for (var i = 0; i<connectOptions.hosts.length; i++) {
+ if (typeof connectOptions.ports[i] !== "number" || connectOptions.ports[i] < 0)
+ throw new Error(format(ERROR.INVALID_TYPE, [typeof connectOptions.ports[i], "connectOptions.ports["+i+"]"]));
+ var host = connectOptions.hosts[i];
+ var port = connectOptions.ports[i];
+
+ var ipv6 = (host.indexOf(":") != -1);
+ uri = "ws://"+(ipv6?"["+host+"]":host)+":"+port+path;
+ connectOptions.uris.push(uri);
+ }
+ } else {
+ connectOptions.uris = connectOptions.hosts;
+ }
+ }
+
+ client.connect(connectOptions);
+ };
+
+ /**
+ * Subscribe for messages, request receipt of a copy of messages sent to the destinations described by the filter.
+ *
+ * @name Paho.MQTT.Client#subscribe
+ * @function
+ * @param {string} filter describing the destinations to receive messages from.
+ * <br>
+ * @param {object} subscribeOptions - used to control the subscription
+ *
+ * @param {number} subscribeOptions.qos - the maiximum qos of any publications sent
+ * as a result of making this subscription.
+ * @param {object} subscribeOptions.invocationContext - passed to the onSuccess callback
+ * or onFailure callback.
+ * @param {function} subscribeOptions.onSuccess - called when the subscribe acknowledgement
+ * has been received from the server.
+ * A single response object parameter is passed to the onSuccess callback containing the following fields:
+ * <ol>
+ * <li>invocationContext if set in the subscribeOptions.
+ * </ol>
+ * @param {function} subscribeOptions.onFailure - called when the subscribe request has failed or timed out.
+ * A single response object parameter is passed to the onFailure callback containing the following fields:
+ * <ol>
+ * <li>invocationContext - if set in the subscribeOptions.
+ * <li>errorCode - a number indicating the nature of the error.
+ * <li>errorMessage - text describing the error.
+ * </ol>
+ * @param {number} subscribeOptions.timeout - which, if present, determines the number of
+ * seconds after which the onFailure calback is called.
+ * The presence of a timeout does not prevent the onSuccess
+ * callback from being called when the subscribe completes.
+ * @throws {InvalidState} if the client is not in connected state.
+ */
+ this.subscribe = function (filter, subscribeOptions) {
+ if (typeof filter !== "string")
+ throw new Error("Invalid argument:"+filter);
+ subscribeOptions = subscribeOptions || {} ;
+ validate(subscribeOptions, {qos:"number",
+ invocationContext:"object",
+ onSuccess:"function",
+ onFailure:"function",
+ timeout:"number"
+ });
+ if (subscribeOptions.timeout && !subscribeOptions.onFailure)
+ throw new Error("subscribeOptions.timeout specified with no onFailure callback.");
+ if (typeof subscribeOptions.qos !== "undefined"
+ && !(subscribeOptions.qos === 0 || subscribeOptions.qos === 1 || subscribeOptions.qos === 2 ))
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [subscribeOptions.qos, "subscribeOptions.qos"]));
+ client.subscribe(filter, subscribeOptions);
+ };
+
+ /**
+ * Unsubscribe for messages, stop receiving messages sent to destinations described by the filter.
+ *
+ * @name Paho.MQTT.Client#unsubscribe
+ * @function
+ * @param {string} filter - describing the destinations to receive messages from.
+ * @param {object} unsubscribeOptions - used to control the subscription
+ * @param {object} unsubscribeOptions.invocationContext - passed to the onSuccess callback
+ or onFailure callback.
+ * @param {function} unsubscribeOptions.onSuccess - called when the unsubscribe acknowledgement has been received from the server.
+ * A single response object parameter is passed to the
+ * onSuccess callback containing the following fields:
+ * <ol>
+ * <li>invocationContext - if set in the unsubscribeOptions.
+ * </ol>
+ * @param {function} unsubscribeOptions.onFailure called when the unsubscribe request has failed or timed out.
+ * A single response object parameter is passed to the onFailure callback containing the following fields:
+ * <ol>
+ * <li>invocationContext - if set in the unsubscribeOptions.
+ * <li>errorCode - a number indicating the nature of the error.
+ * <li>errorMessage - text describing the error.
+ * </ol>
+ * @param {number} unsubscribeOptions.timeout - which, if present, determines the number of seconds
+ * after which the onFailure callback is called. The presence of
+ * a timeout does not prevent the onSuccess callback from being
+ * called when the unsubscribe completes
+ * @throws {InvalidState} if the client is not in connected state.
+ */
+ this.unsubscribe = function (filter, unsubscribeOptions) {
+ if (typeof filter !== "string")
+ throw new Error("Invalid argument:"+filter);
+ unsubscribeOptions = unsubscribeOptions || {} ;
+ validate(unsubscribeOptions, {invocationContext:"object",
+ onSuccess:"function",
+ onFailure:"function",
+ timeout:"number"
+ });
+ if (unsubscribeOptions.timeout && !unsubscribeOptions.onFailure)
+ throw new Error("unsubscribeOptions.timeout specified with no onFailure callback.");
+ client.unsubscribe(filter, unsubscribeOptions);
+ };
+
+ /**
+ * Send a message to the consumers of the destination in the Message.
+ *
+ * @name Paho.MQTT.Client#send
+ * @function
+ * @param {string|Paho.MQTT.Message} topic - <b>mandatory</b> The name of the destination to which the message is to be sent.
+ * - If it is the only parameter, used as Paho.MQTT.Message object.
+ * @param {String|ArrayBuffer} payload - The message data to be sent.
+ * @param {number} qos The Quality of Service used to deliver the message.
+ * <dl>
+ * <dt>0 Best effort (default).
+ * <dt>1 At least once.
+ * <dt>2 Exactly once.
+ * </dl>
+ * @param {Boolean} retained If true, the message is to be retained by the server and delivered
+ * to both current and future subscriptions.
+ * If false the server only delivers the message to current subscribers, this is the default for new Messages.
+ * A received message has the retained boolean set to true if the message was published
+ * with the retained boolean set to true
+ * and the subscrption was made after the message has been published.
+ * @throws {InvalidState} if the client is not connected.
+ */
+ this.send = function (topic,payload,qos,retained) {
+ var message ;
+
+ if(arguments.length == 0){
+ throw new Error("Invalid argument."+"length");
+
+ }else if(arguments.length == 1) {
+
+ if (!(topic instanceof Message) && (typeof topic !== "string"))
+ throw new Error("Invalid argument:"+ typeof topic);
+
+ message = topic;
+ if (typeof message.destinationName === "undefined")
+ throw new Error(format(ERROR.INVALID_ARGUMENT,[message.destinationName,"Message.destinationName"]));
+ client.send(message);
+
+ }else {
+ //parameter checking in Message object
+ message = new Message(payload);
+ message.destinationName = topic;
+ if(arguments.length >= 3)
+ message.qos = qos;
+ if(arguments.length >= 4)
+ message.retained = retained;
+ client.send(message);
+ }
+ };
+
+ /**
+ * Normal disconnect of this Messaging client from its server.
+ *
+ * @name Paho.MQTT.Client#disconnect
+ * @function
+ * @throws {InvalidState} if the client is already disconnected.
+ */
+ this.disconnect = function () {
+ client.disconnect();
+ };
+
+ /**
+ * Get the contents of the trace log.
+ *
+ * @name Paho.MQTT.Client#getTraceLog
+ * @function
+ * @return {Object[]} tracebuffer containing the time ordered trace records.
+ */
+ this.getTraceLog = function () {
+ return client.getTraceLog();
+ }
+
+ /**
+ * Start tracing.
+ *
+ * @name Paho.MQTT.Client#startTrace
+ * @function
+ */
+ this.startTrace = function () {
+ client.startTrace();
+ };
+
+ /**
+ * Stop tracing.
+ *
+ * @name Paho.MQTT.Client#stopTrace
+ * @function
+ */
+ this.stopTrace = function () {
+ client.stopTrace();
+ };
+
+ this.isConnected = function() {
+ return client.connected;
+ };
+ };
+
+ Client.prototype = {
+ get host() { return this._getHost(); },
+ set host(newHost) { this._setHost(newHost); },
+
+ get port() { return this._getPort(); },
+ set port(newPort) { this._setPort(newPort); },
+
+ get path() { return this._getPath(); },
+ set path(newPath) { this._setPath(newPath); },
+
+ get clientId() { return this._getClientId(); },
+ set clientId(newClientId) { this._setClientId(newClientId); },
+
+ get onConnectionLost() { return this._getOnConnectionLost(); },
+ set onConnectionLost(newOnConnectionLost) { this._setOnConnectionLost(newOnConnectionLost); },
+
+ get onMessageDelivered() { return this._getOnMessageDelivered(); },
+ set onMessageDelivered(newOnMessageDelivered) { this._setOnMessageDelivered(newOnMessageDelivered); },
+
+ get onMessageArrived() { return this._getOnMessageArrived(); },
+ set onMessageArrived(newOnMessageArrived) { this._setOnMessageArrived(newOnMessageArrived); },
+
+ get trace() { return this._getTrace(); },
+ set trace(newTraceFunction) { this._setTrace(newTraceFunction); }
+
+ };
+
+ /**
+ * An application message, sent or received.
+ * <p>
+ * All attributes may be null, which implies the default values.
+ *
+ * @name Paho.MQTT.Message
+ * @constructor
+ * @param {String|ArrayBuffer} payload The message data to be sent.
+ * <p>
+ * @property {string} payloadString <i>read only</i> The payload as a string if the payload consists of valid UTF-8 characters.
+ * @property {ArrayBuffer} payloadBytes <i>read only</i> The payload as an ArrayBuffer.
+ * <p>
+ * @property {string} destinationName <b>mandatory</b> The name of the destination to which the message is to be sent
+ * (for messages about to be sent) or the name of the destination from which the message has been received.
+ * (for messages received by the onMessage function).
+ * <p>
+ * @property {number} qos The Quality of Service used to deliver the message.
+ * <dl>
+ * <dt>0 Best effort (default).
+ * <dt>1 At least once.
+ * <dt>2 Exactly once.
+ * </dl>
+ * <p>
+ * @property {Boolean} retained If true, the message is to be retained by the server and delivered
+ * to both current and future subscriptions.
+ * If false the server only delivers the message to current subscribers, this is the default for new Messages.
+ * A received message has the retained boolean set to true if the message was published
+ * with the retained boolean set to true
+ * and the subscrption was made after the message has been published.
+ * <p>
+ * @property {Boolean} duplicate <i>read only</i> If true, this message might be a duplicate of one which has already been received.
+ * This is only set on messages received from the server.
+ *
+ */
+ var Message = function (newPayload) {
+ var payload;
+ if ( typeof newPayload === "string"
+ || newPayload instanceof ArrayBuffer
+ || newPayload instanceof Int8Array
+ || newPayload instanceof Uint8Array
+ || newPayload instanceof Int16Array
+ || newPayload instanceof Uint16Array
+ || newPayload instanceof Int32Array
+ || newPayload instanceof Uint32Array
+ || newPayload instanceof Float32Array
+ || newPayload instanceof Float64Array
+ ) {
+ payload = newPayload;
+ } else {
+ throw (format(ERROR.INVALID_ARGUMENT, [newPayload, "newPayload"]));
+ }
+
+ this._getPayloadString = function () {
+ if (typeof payload === "string")
+ return payload;
+ else
+ return parseUTF8(payload, 0, payload.length);
+ };
+
+ this._getPayloadBytes = function() {
+ if (typeof payload === "string") {
+ var buffer = new ArrayBuffer(UTF8Length(payload));
+ var byteStream = new Uint8Array(buffer);
+ stringToUTF8(payload, byteStream, 0);
+
+ return byteStream;
+ } else {
+ return payload;
+ };
+ };
+
+ var destinationName = undefined;
+ this._getDestinationName = function() { return destinationName; };
+ this._setDestinationName = function(newDestinationName) {
+ if (typeof newDestinationName === "string")
+ destinationName = newDestinationName;
+ else
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [newDestinationName, "newDestinationName"]));
+ };
+
+ var qos = 0;
+ this._getQos = function() { return qos; };
+ this._setQos = function(newQos) {
+ if (newQos === 0 || newQos === 1 || newQos === 2 )
+ qos = newQos;
+ else
+ throw new Error("Invalid argument:"+newQos);
+ };
+
+ var retained = false;
+ this._getRetained = function() { return retained; };
+ this._setRetained = function(newRetained) {
+ if (typeof newRetained === "boolean")
+ retained = newRetained;
+ else
+ throw new Error(format(ERROR.INVALID_ARGUMENT, [newRetained, "newRetained"]));
+ };
+
+ var duplicate = false;
+ this._getDuplicate = function() { return duplicate; };
+ this._setDuplicate = function(newDuplicate) { duplicate = newDuplicate; };
+ };
+
+ Message.prototype = {
+ get payloadString() { return this._getPayloadString(); },
+ get payloadBytes() { return this._getPayloadBytes(); },
+
+ get destinationName() { return this._getDestinationName(); },
+ set destinationName(newDestinationName) { this._setDestinationName(newDestinationName); },
+
+ get qos() { return this._getQos(); },
+ set qos(newQos) { this._setQos(newQos); },
+
+ get retained() { return this._getRetained(); },
+ set retained(newRetained) { this._setRetained(newRetained); },
+
+ get duplicate() { return this._getDuplicate(); },
+ set duplicate(newDuplicate) { this._setDuplicate(newDuplicate); }
+ };
+
+ // Module contents.
+ return {
+ Client: Client,
+ Message: Message
+ };
+})(window);
diff --git a/deps/rabbitmq_web_mqtt_examples/priv/pencil.cur b/deps/rabbitmq_web_mqtt_examples/priv/pencil.cur
new file mode 100755
index 0000000000..a3e3598bf0
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/priv/pencil.cur
Binary files differ
diff --git a/deps/rabbitmq_web_mqtt_examples/rabbitmq-components.mk b/deps/rabbitmq_web_mqtt_examples/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl b/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl
new file mode 100644
index 0000000000..d0e676fd60
--- /dev/null
+++ b/deps/rabbitmq_web_mqtt_examples/src/rabbit_web_mqtt_examples_app.erl
@@ -0,0 +1,29 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_mqtt_examples_app).
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ {ok, Listener} = application:get_env(rabbitmq_web_mqtt_examples, listener),
+ {ok, _} = rabbit_web_dispatch:register_static_context(
+ web_mqtt_examples, Listener, "web-mqtt-examples", ?MODULE,
+ "priv", "WEB-MQTT: examples"),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ rabbit_web_dispatch:unregister_context(web_mqtt_examples),
+ ok.
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.
diff --git a/deps/rabbitmq_web_stomp/.gitignore b/deps/rabbitmq_web_stomp/.gitignore
new file mode 100644
index 0000000000..00665f2044
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/.gitignore
@@ -0,0 +1,19 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+rabbitmq_web_stomp.d
+
+test/config_schema_SUITE_data/schema/
diff --git a/deps/rabbitmq_web_stomp/.travis.yml b/deps/rabbitmq_web_stomp/.travis.yml
new file mode 100644
index 0000000000..fa1e2ae2f0
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: OcDUSwDDsVyyU4rNVGZMyBQsVHTqaOBJzhHMiNF8zkQQZmduVQz2maruBfJcdJq7TeMRTo6FPpHG2HTCLYGqxjVxWALo/oyEvekXqp1iK3SzKoN/xrYIsm+X5kZ3+DeNXzthhMN+tSXGWF1+KddCiI5vQq72s2jdlQHZwTdIWLw=
+ - secure: T3CYCsExInWzqQ1BdZaPZUVj3FnoYlk9QLXImCu82k2eVVjUWBQhQNEmgfuNCinTed97BVsYd5tCEOUPvenfsoW+FBZQElLzumlgUHVdkWwsmY33xmevJbBmFyhsXwRY5FH6hfQ5FKlHAS41Rn1q8UOZYPffsC4TsiBSGqKc1Z4=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.9'
+otp_release:
+ - '21.3'
+ - '22.2'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md b/deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_web_stomp/CONTRIBUTING.md b/deps/rabbitmq_web_stomp/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbitmq_web_stomp/LICENSE b/deps/rabbitmq_web_stomp/LICENSE
new file mode 100644
index 0000000000..6155281969
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/LICENSE
@@ -0,0 +1,5 @@
+This package, the RabbitMQ STOMP-over-WebSockets plugin, is licensed under the MPL 2.0. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_web_stomp/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_web_stomp/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_web_stomp/Makefile b/deps/rabbitmq_web_stomp/Makefile
new file mode 100644
index 0000000000..90724d9442
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/Makefile
@@ -0,0 +1,39 @@
+PROJECT = rabbitmq_web_stomp
+PROJECT_DESCRIPTION = RabbitMQ STOMP-over-WebSockets support
+PROJECT_MOD = rabbit_web_stomp_app
+
+define PROJECT_ENV
+[
+ {tcp_config, [{port, 15674}]},
+ {ssl_config, []},
+ {num_tcp_acceptors, 10},
+ {num_ssl_acceptors, 10},
+ {cowboy_opts, []},
+ {proxy_protocol, false},
+ {ws_frame, text},
+ {use_http_auth, false}
+ ]
+endef
+
+define PROJECT_APP_EXTRA_KEYS
+ {broker_version_requirements, []}
+endef
+
+DEPS = cowboy rabbit_common rabbit rabbitmq_stomp
+TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers
+
+# FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked.
+# See rabbitmq-components.mk.
+BUILD_DEPS += ranch
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_web_stomp/README.md b/deps/rabbitmq_web_stomp/README.md
new file mode 100644
index 0000000000..e0370abc0e
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/README.md
@@ -0,0 +1,34 @@
+# RabbitMQ Web STOMP plugin
+
+This plugin provides support for STOMP-over-WebSockets to RabbitMQ.
+
+## Installation
+
+This plugin ships with modern versions of RabbitMQ.
+Like all plugins, it [must be enabled](https://www.rabbitmq.com/plugins.html) before it can be used:
+
+``` bash
+# this might require sudo
+rabbitmq-plugins enable rabbitmq_web_stomp
+```
+
+## Documentation
+
+Please refer to the [RabbitMQ Web STOMP guide](https://www.rabbitmq.com/web-stomp.html).
+
+## Building from Source
+
+See [Plugin Development guide](https://www.rabbitmq.com/plugin-development.html).
+
+TL;DR: running
+
+ make dist
+
+will build the plugin and put build artifacts under the `./plugins` directory.
+
+
+## Copyright and License
+
+(c) 2007-2020 VMware, Inc. or its affiliates.
+
+Released under the MPL, the same license as RabbitMQ.
diff --git a/deps/rabbitmq_web_stomp/erlang.mk b/deps/rabbitmq_web_stomp/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema b/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema
new file mode 100644
index 0000000000..590d9a1049
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema
@@ -0,0 +1,191 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+{mapping, "web_stomp.port", "rabbitmq_web_stomp.port",
+ [{datatype, integer}]}.
+
+{mapping, "web_stomp.ws_frame", "rabbitmq_web_stomp.ws_frame",
+ [{datatype, {enum, [binary, text]}}]}.
+
+{mapping, "web_stomp.num_acceptors.tcp", "rabbitmq_web_stomp.num_tcp_acceptors",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.num_acceptors.ssl", "rabbitmq_web_stomp.num_ssl_acceptors",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.max_connections", "rabbitmq_web_stomp.max_connections", [
+ {datatype, [{enum, [infinity]}, integer]}
+]}.
+
+{mapping, "web_stomp.tcp.port", "rabbitmq_web_stomp.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.tcp.backlog", "rabbitmq_web_stomp.tcp_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.tcp.ip", "rabbitmq_web_stomp.tcp_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "web_stomp.tcp.listener", "rabbitmq_web_stomp.tcp_config", [
+ {datatype, [{enum, [none]}, ip]}
+]}.
+
+{mapping, "web_stomp.ws_path", "rabbitmq_web_stomp.ws_path",
+ [{datatype, string}]}.
+
+{mapping, "web_stomp.use_http_auth", "rabbitmq_web_stomp.use_http_auth",
+ [{datatype, {enum, [true, false]}}]}.
+
+{translation,
+ "rabbitmq_web_stomp.tcp_config",
+ fun(Conf) ->
+ Setting = cuttlefish:conf_get("web_stomp.tcp.listener", Conf, undefined),
+ case Setting of
+ none -> [];
+ undefined -> [{port, 15674}];
+ {Ip, Port} when is_list(Ip), is_integer(Port) ->
+ [{ip, Ip}, {port, Port}]
+ end
+ end
+}.
+
+{mapping, "web_stomp.ssl.listener", "rabbitmq_web_stomp.ssl_config", [
+ {datatype, [{enum, [none]}, ip]}
+]}.
+
+{mapping, "web_stomp.ssl.port", "rabbitmq_web_stomp.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.ssl.backlog", "rabbitmq_web_stomp.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.ssl.ip", "rabbitmq_web_stomp.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "web_stomp.ssl.certfile", "rabbitmq_web_stomp.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_stomp.ssl.keyfile", "rabbitmq_web_stomp.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_stomp.ssl.cacertfile", "rabbitmq_web_stomp.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_stomp.ssl.password", "rabbitmq_web_stomp.ssl_config.password",
+ [{datatype, string}]}.
+
+{translation,
+ "rabbitmq_web_stomp.ssl_config",
+ fun(Conf) ->
+ Setting = cuttlefish:conf_get("web_stomp.ssl.listener", Conf, undefined),
+ case Setting of
+ none -> cuttlefish:unset();
+ undefined -> [];
+ {Ip, Port} when is_list(Ip), is_integer(Port) ->
+ %% we redo some of Cuttlefish's work here to
+ %% populate rabbitmq_web_stomp.ssl_config here
+ TLSConf = cuttlefish_variable:filter_by_prefix("web_stomp.ssl", Conf),
+ %% preserve all web_stomp.ssl.* keys in rabbitmq_web_stomp.ssl_config
+ TLSConfM0 = maps:fold(fun(Path, V, Acc) ->
+ maps:put(list_to_atom(lists:last(Path)), V, Acc)
+ end, #{}, maps:from_list(TLSConf)),
+ TLSConfM = maps:remove(listener, TLSConfM0),
+ ListenerM = maps:from_list([{ip, Ip}, {port, Port}]),
+ lists:keysort(1, maps:to_list(maps:merge(TLSConfM, ListenerM)));
+
+ _ -> Setting
+ end
+ end
+}.
+
+{mapping, "web_stomp.ssl.honor_cipher_order", "rabbitmq_web_stomp.ssl_config.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_stomp.ssl.honor_ecc_order", "rabbitmq_web_stomp.ssl_config.honor_ecc_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_stomp.ssl.reuse_sessions", "rabbitmq_web_stomp.ssl_config.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_stomp.ssl.secure_renegotiate", "rabbitmq_web_stomp.ssl_config.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_stomp.ssl.client_renegotiation", "rabbitmq_web_stomp.ssl_config.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "web_stomp.ssl.crl_check", "rabbitmq_web_stomp.ssl_config.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "web_stomp.ssl.depth", "rabbitmq_web_stomp.ssl_config.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "web_stomp.ssl.versions.$version", "rabbitmq_web_stomp.ssl_config.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_web_stomp.ssl_config.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("web_stomp.ssl.versions", Conf),
+ [V || {_, V} <- Settings]
+end}.
+
+{mapping, "web_stomp.ssl.ciphers.$cipher", "rabbitmq_web_stomp.ssl_config.ciphers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_web_stomp.ssl_config.ciphers",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("web_stomp.ssl.ciphers", Conf),
+ lists:reverse([V || {_, V} <- Settings])
+end}.
+
+
+%%
+%% Cowboy options
+%%
+
+{mapping, "web_stomp.cowboy_opts.max_empty_lines", "rabbitmq_web_stomp.cowboy_opts.max_empty_lines",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_header_name_length", "rabbitmq_web_stomp.cowboy_opts.max_header_name_length",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_header_value_length", "rabbitmq_web_stomp.cowboy_opts.max_header_value_length",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_headers", "rabbitmq_web_stomp.cowboy_opts.max_headers",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_keepalive", "rabbitmq_web_stomp.cowboy_opts.max_keepalive",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_request_line_length", "rabbitmq_web_stomp.cowboy_opts.max_request_line_length",
+ [{datatype, integer}]}.
+
+%% backwards compatibility
+{mapping, "web_stomp.cowboy_opts.timeout", "rabbitmq_web_stomp.cowboy_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]
+}.
+%% recent Cowboy versions have several timeout settings
+{mapping, "web_stomp.cowboy_opts.idle_timeout", "rabbitmq_web_stomp.cowboy_opts.idle_timeout",
+ [{datatype, integer}]
+}.
+
+{translation,
+ "rabbitmq_web_stomp.cowboy_opts.idle_timeout",
+ fun(Conf) ->
+ case cuttlefish:conf_get("web_stomp.cowboy_opts.timeout", Conf, undefined) of
+ Value when is_integer(Value) ->
+ Value;
+ undefined ->
+ case cuttlefish:conf_get("web_stomp.cowboy_opts.idle_timeout", Conf, undefined) of
+ undefined -> cuttlefish:unset();
+ Value -> Value
+ end
+ end
+ end
+}.
+
+{mapping, "web_stomp.ws_opts.compress", "rabbitmq_web_stomp.cowboy_ws_opts.compress",
+ [{datatype, {enum, [true, false]}}]
+}.
+{mapping, "web_stomp.ws_opts.max_frame_size", "rabbitmq_web_stomp.cowboy_ws_opts.max_frame_size",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]
+}.
+{mapping, "web_stomp.ws_opts.idle_timeout", "rabbitmq_web_stomp.cowboy_ws_opts.idle_timeout",
+ [{datatype, integer}, {validators, ["non_negative_integer"]}]
+}.
+
+
+%% Whether or not to enable Proxy protocol support.
+%%
+%% {proxy_protocol, false}
+
+{mapping, "web_stomp.proxy_protocol", "rabbitmq_web_stomp.proxy_protocol",
+ [{datatype, {enum, [true, false]}}]}.
diff --git a/deps/rabbitmq_web_stomp/rabbitmq-components.mk b/deps/rabbitmq_web_stomp/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl
new file mode 100644
index 0000000000..61441f077a
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_app.erl
@@ -0,0 +1,27 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_app).
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+%%----------------------------------------------------------------------------
+
+-spec start(_, _) -> {ok, pid()}.
+start(_Type, _StartArgs) ->
+ ok = rabbit_web_stomp_listener:init(),
+ EMPid = case rabbit_event:start_link() of
+ {ok, Pid} -> Pid;
+ {error, {already_started, Pid}} -> Pid
+ end,
+ gen_event:add_handler(EMPid, rabbit_web_stomp_internal_event_handler, []),
+ rabbit_web_stomp_sup:start_link().
+
+-spec stop(_) -> ok.
+stop(State) ->
+ rabbit_web_stomp_listener:stop(State).
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl
new file mode 100644
index 0000000000..0703681482
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_connection_sup.erl
@@ -0,0 +1,51 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_connection_sup).
+
+-behaviour(supervisor2).
+-behaviour(ranch_protocol).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/4, start_keepalive_link/0]).
+-export([init/1]).
+
+%%----------------------------------------------------------------------------
+
+start_link(Ref, Sock, Transport, CowboyOpts0) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, []),
+ {ok, KeepaliveSup} = supervisor2:start_child(
+ SupPid,
+ {rabbit_web_stomp_keepalive_sup,
+ {?MODULE, start_keepalive_link, []},
+ intrinsic, infinity, supervisor, [rabbit_keepalive_sup]}),
+ %% In order for the Websocket handler to receive the KeepaliveSup
+ %% variable, we need to pass it first through the environment and
+ %% then have the middleware rabbit_web_mqtt_middleware place it
+ %% in the initial handler state.
+ Env = maps:get(env, CowboyOpts0),
+ CowboyOpts = CowboyOpts0#{env => Env#{keepalive_sup => KeepaliveSup,
+ socket => Sock}},
+ Protocol = case Transport of
+ ranch_tcp -> cowboy_clear;
+ ranch_ssl -> cowboy_tls
+ end,
+ {ok, ReaderPid} = supervisor2:start_child(
+ SupPid,
+ {Protocol,
+ {Protocol, start_link, [Ref, Sock, Transport, CowboyOpts]},
+ intrinsic, ?WORKER_WAIT, worker, [Protocol]}),
+ {ok, SupPid, ReaderPid}.
+
+start_keepalive_link() ->
+ supervisor2:start_link(?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl
new file mode 100644
index 0000000000..6f417f31a2
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_handler.erl
@@ -0,0 +1,339 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_handler).
+-behaviour(cowboy_websocket).
+
+-include_lib("rabbitmq_stomp/include/rabbit_stomp.hrl").
+-include_lib("rabbitmq_stomp/include/rabbit_stomp_frame.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%% Websocket.
+-export([
+ init/2,
+ websocket_init/1,
+ websocket_handle/2,
+ websocket_info/2,
+ terminate/3
+]).
+-export([close_connection/2]).
+
+-record(state, {
+ frame_type,
+ heartbeat_mode,
+ heartbeat,
+ heartbeat_sup,
+ parse_state,
+ proc_state,
+ state,
+ conserve_resources,
+ socket,
+ peername,
+ auth_hd,
+ stats_timer,
+ connection
+}).
+
+%% Websocket.
+init(Req0, Opts) ->
+ {PeerAddr, _PeerPort} = maps:get(peer, Req0),
+ {_, KeepaliveSup} = lists:keyfind(keepalive_sup, 1, Opts),
+ {_, Sock0} = lists:keyfind(socket, 1, Opts),
+ Sock = case maps:get(proxy_header, Req0, undefined) of
+ undefined ->
+ Sock0;
+ ProxyInfo ->
+ {rabbit_proxy_socket, Sock0, ProxyInfo}
+ end,
+ Req = case cowboy_req:parse_header(<<"sec-websocket-protocol">>, Req0) of
+ undefined -> Req0;
+ Protocols ->
+ case filter_stomp_protocols(Protocols) of
+ [] -> Req0;
+ [StompProtocol|_] ->
+ cowboy_req:set_resp_header(<<"sec-websocket-protocol">>,
+ StompProtocol, Req0)
+ end
+ end,
+ WsOpts0 = proplists:get_value(ws_opts, Opts, #{}),
+ WsOpts = maps:merge(#{compress => true}, WsOpts0),
+ {cowboy_websocket, Req, #state{
+ frame_type = proplists:get_value(type, Opts, text),
+ heartbeat_sup = KeepaliveSup,
+ heartbeat = {none, none},
+ heartbeat_mode = heartbeat,
+ state = running,
+ conserve_resources = false,
+ socket = Sock,
+ peername = PeerAddr,
+ auth_hd = cowboy_req:header(<<"authorization">>, Req)
+ }, WsOpts}.
+
+websocket_init(State) ->
+ ok = file_handle_cache:obtain(),
+ process_flag(trap_exit, true),
+ {ok, ProcessorState} = init_processor_state(State),
+ {ok, rabbit_event:init_stats_timer(
+ State#state{proc_state = ProcessorState,
+ parse_state = rabbit_stomp_frame:initial_state()},
+ #state.stats_timer)}.
+
+-spec close_connection(pid(), string()) -> 'ok'.
+close_connection(Pid, Reason) ->
+ rabbit_log_connection:info("Web STOMP: will terminate connection process ~p, reason: ~s",
+ [Pid, Reason]),
+ sys:terminate(Pid, Reason),
+ ok.
+
+init_processor_state(#state{socket=Sock, peername=PeerAddr, auth_hd=AuthHd}) ->
+ Self = self(),
+ SendFun = fun (_Sync, Data) ->
+ Self ! {send, Data},
+ ok
+ end,
+
+ SSLLogin = application:get_env(rabbitmq_stomp, ssl_cert_login, false),
+ StompConfig0 = #stomp_configuration{ssl_cert_login = SSLLogin, implicit_connect = false},
+ UseHTTPAuth = application:get_env(rabbitmq_web_stomp, use_http_auth, false),
+ UserConfig = application:get_env(rabbitmq_stomp, default_user, undefined),
+ StompConfig1 = rabbit_stomp:parse_default_user(UserConfig, StompConfig0),
+ StompConfig2 = case UseHTTPAuth of
+ true ->
+ case AuthHd of
+ undefined ->
+ %% We fall back to the default STOMP credentials.
+ StompConfig1#stomp_configuration{force_default_creds = true};
+ _ ->
+ {basic, HTTPLogin, HTTPPassCode}
+ = cow_http_hd:parse_authorization(AuthHd),
+ StompConfig0#stomp_configuration{
+ default_login = HTTPLogin,
+ default_passcode = HTTPPassCode,
+ force_default_creds = true}
+ end;
+ false ->
+ StompConfig1
+ end,
+
+ AdapterInfo = amqp_connection:socket_adapter_info(Sock, {'Web STOMP', 0}),
+ RealSocket = rabbit_net:unwrap_socket(Sock),
+ LoginNameFromCertificate = rabbit_stomp_reader:ssl_login_name(RealSocket, StompConfig2),
+ ProcessorState = rabbit_stomp_processor:initial_state(
+ StompConfig2,
+ {SendFun, AdapterInfo, LoginNameFromCertificate, PeerAddr}),
+ {ok, ProcessorState}.
+
+websocket_handle({text, Data}, State) ->
+ handle_data(Data, State);
+websocket_handle({binary, Data}, State) ->
+ handle_data(Data, State);
+websocket_handle(_Frame, State) ->
+ {ok, State}.
+
+websocket_info({send, Msg}, State=#state{frame_type=FrameType}) ->
+ {reply, {FrameType, Msg}, State};
+
+websocket_info({conserve_resources, Conserve}, State) ->
+ NewState = State#state{conserve_resources = Conserve},
+ handle_credits(control_throttle(NewState));
+websocket_info({bump_credit, Msg}, State) ->
+ credit_flow:handle_bump_msg(Msg),
+ handle_credits(control_throttle(State));
+
+websocket_info(#'basic.consume_ok'{}, State) ->
+ {ok, State};
+websocket_info(#'basic.cancel_ok'{}, State) ->
+ {ok, State};
+websocket_info(#'basic.ack'{delivery_tag = Tag, multiple = IsMulti},
+ State=#state{ proc_state = ProcState0 }) ->
+ ProcState = rabbit_stomp_processor:flush_pending_receipts(Tag,
+ IsMulti,
+ ProcState0),
+ {ok, State#state{ proc_state = ProcState }};
+websocket_info({Delivery = #'basic.deliver'{},
+ #amqp_msg{props = Props, payload = Payload},
+ DeliveryCtx},
+ State=#state{ proc_state = ProcState0 }) ->
+ ProcState = rabbit_stomp_processor:send_delivery(Delivery,
+ Props,
+ Payload,
+ DeliveryCtx,
+ ProcState0),
+ {ok, State#state{ proc_state = ProcState }};
+websocket_info(#'basic.cancel'{consumer_tag = Ctag},
+ State=#state{ proc_state = ProcState0 }) ->
+ case rabbit_stomp_processor:cancel_consumer(Ctag, ProcState0) of
+ {ok, ProcState, _Connection} ->
+ {ok, State#state{ proc_state = ProcState }};
+ {stop, _Reason, ProcState} ->
+ stop(State#state{ proc_state = ProcState })
+ end;
+
+websocket_info({start_heartbeats, _},
+ State = #state{heartbeat_mode = no_heartbeat}) ->
+ {ok, State};
+
+websocket_info({start_heartbeats, {0, 0}}, State) ->
+ {ok, State};
+websocket_info({start_heartbeats, {SendTimeout, ReceiveTimeout}},
+ State = #state{socket = Sock,
+ heartbeat_sup = SupPid,
+ heartbeat_mode = heartbeat}) ->
+ Self = self(),
+ SendFun = fun () -> Self ! {send, <<$\n>>}, ok end,
+ ReceiveFun = fun() -> Self ! client_timeout end,
+ Heartbeat = rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
+ SendFun, ReceiveTimeout, ReceiveFun),
+ {ok, State#state{heartbeat = Heartbeat}};
+websocket_info(client_timeout, State) ->
+ stop(State);
+
+%%----------------------------------------------------------------------------
+websocket_info({'EXIT', From, Reason},
+ State=#state{ proc_state = ProcState0 }) ->
+ case rabbit_stomp_processor:handle_exit(From, Reason, ProcState0) of
+ {stop, _Reason, ProcState} ->
+ stop(State#state{ proc_state = ProcState });
+ unknown_exit ->
+ stop(State)
+ end;
+%%----------------------------------------------------------------------------
+
+websocket_info(emit_stats, State) ->
+ {ok, emit_stats(State)};
+
+websocket_info(Msg, State) ->
+ rabbit_log_connection:info("Web STOMP: unexpected message ~p~n",
+ [Msg]),
+ {ok, State}.
+
+terminate(_Reason, _Req, #state{proc_state = undefined}) ->
+ ok;
+terminate(_Reason, _Req, #state{proc_state = ProcState}) ->
+ rabbit_stomp_processor:flush_and_die(ProcState),
+ ok.
+
+%%----------------------------------------------------------------------------
+
+%% The protocols v10.stomp, v11.stomp and v12.stomp are registered
+%% at IANA: https://www.iana.org/assignments/websocket/websocket.xhtml
+
+filter_stomp_protocols(Protocols) ->
+ lists:reverse(lists:sort(lists:filter(
+ fun(<< "v1", C, ".stomp">>)
+ when C =:= $2; C =:= $1; C =:= $0 -> true;
+ (_) ->
+ false
+ end,
+ Protocols))).
+
+%%----------------------------------------------------------------------------
+
+handle_data(Data, State0) ->
+ case handle_data1(Data, State0) of
+ {ok, State1 = #state{state = blocked}} ->
+ {[{active, false}], State1};
+ {error, Error0} ->
+ Error1 = rabbit_misc:format("~p", [Error0]),
+ rabbit_log_connection:error("STOMP detected framing error '~s'~n", [Error1]),
+ stop(State0, 1007, Error1);
+ Other ->
+ Other
+ end.
+
+handle_data1(<<>>, State) ->
+ {ok, ensure_stats_timer(State)};
+handle_data1(Bytes, State = #state{proc_state = ProcState,
+ parse_state = ParseState}) ->
+ case rabbit_stomp_frame:parse(Bytes, ParseState) of
+ {more, ParseState1} ->
+ {ok, ensure_stats_timer(State#state{ parse_state = ParseState1 })};
+ {ok, Frame, Rest} ->
+ case rabbit_stomp_processor:process_frame(Frame, ProcState) of
+ {ok, ProcState1, ConnPid} ->
+ ParseState1 = rabbit_stomp_frame:initial_state(),
+ State1 = maybe_block(State, Frame),
+ handle_data1(
+ Rest,
+ State1 #state{ parse_state = ParseState1,
+ proc_state = ProcState1,
+ connection = ConnPid });
+ {stop, _Reason, ProcState1} ->
+ stop(State#state{ proc_state = ProcState1 })
+ end;
+ Other ->
+ Other
+ end.
+
+maybe_block(State = #state{state = blocking, heartbeat = Heartbeat},
+ #stomp_frame{command = "SEND"}) ->
+ rabbit_heartbeat:pause_monitor(Heartbeat),
+ State#state{state = blocked};
+maybe_block(State, _) ->
+ State.
+
+stop(State) ->
+ stop(State, 1000, "STOMP died").
+
+stop(State = #state{proc_state = ProcState}, CloseCode, Error0) ->
+ maybe_emit_stats(State),
+ ok = file_handle_cache:release(),
+ rabbit_stomp_processor:flush_and_die(ProcState),
+ Error1 = rabbit_data_coercion:to_binary(Error0),
+ {[{close, CloseCode, Error1}], State}.
+
+%%----------------------------------------------------------------------------
+
+handle_credits(State0) ->
+ case control_throttle(State0) of
+ State = #state{state = running} ->
+ {[{active, true}], State};
+ State ->
+ {ok, State}
+ end.
+
+control_throttle(State = #state{state = CS,
+ conserve_resources = Mem}) ->
+ case {CS, Mem orelse credit_flow:blocked()} of
+ {running, true} -> blocking(State);
+ {blocking, false} -> running(State);
+ {blocked, false} -> running(State);
+ {_, _} -> State
+ end.
+
+blocking(State) ->
+ State#state{state = blocking}.
+
+running(State = #state{heartbeat=Heartbeat}) ->
+ rabbit_heartbeat:resume_monitor(Heartbeat),
+ State#state{state = running}.
+
+%%----------------------------------------------------------------------------
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State, #state.stats_timer, emit_stats).
+
+maybe_emit_stats(State) ->
+ rabbit_event:if_enabled(State, #state.stats_timer,
+ fun() -> emit_stats(State) end).
+
+emit_stats(State=#state{connection = C}) when C == none; C == undefined ->
+ %% Avoid emitting stats on terminate when the connection has not yet been
+ %% established, as this causes orphan entries on the stats database
+ State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+ State1;
+emit_stats(State=#state{socket=Sock, state=RunningState, connection=Conn}) ->
+ SockInfos = case rabbit_net:getstat(Sock,
+ [recv_oct, recv_cnt, send_oct, send_cnt, send_pend]) of
+ {ok, SI} -> SI;
+ {error, _} -> []
+ end,
+ Infos = [{pid, Conn}, {state, RunningState}|SockInfos],
+ rabbit_core_metrics:connection_stats(Conn, Infos),
+ rabbit_event:notify(connection_stats, Infos),
+ State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+ State1.
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl
new file mode 100644
index 0000000000..5e129de049
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_internal_event_handler.erl
@@ -0,0 +1,37 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_internal_event_handler).
+
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]).
+
+-import(rabbit_misc, [pget/2]).
+
+init([]) ->
+ {ok, []}.
+
+handle_event({event, maintenance_connections_closed, _Info, _, _}, State) ->
+ %% we should close our connections
+ {ok, NConnections} = rabbit_web_stomp_listener:close_all_client_connections("node is being put into maintenance mode"),
+ rabbit_log:alert("Closed ~b local Web STOMP client connections", [NConnections]),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl
new file mode 100644
index 0000000000..b644329d1d
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_listener.erl
@@ -0,0 +1,212 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_listener).
+
+-export([
+ init/0,
+ stop/1,
+ list_connections/0,
+ close_all_client_connections/1
+]).
+
+%% for testing purposes
+-export([get_binding_address/1, get_tcp_port/1, get_tcp_conf/2]).
+
+-include_lib("rabbitmq_stomp/include/rabbit_stomp.hrl").
+
+-import(rabbit_misc, [pget/2]).
+
+-define(TCP_PROTOCOL, 'http/web-stomp').
+-define(TLS_PROTOCOL, 'https/web-stomp').
+
+%%
+%% API
+%%
+
+-spec init() -> ok.
+init() ->
+ WsFrame = get_env(ws_frame, text),
+ CowboyOpts0 = maps:from_list(get_env(cowboy_opts, [])),
+ CowboyOpts = CowboyOpts0#{proxy_header => get_env(proxy_protocol, false)},
+ CowboyWsOpts = maps:from_list(get_env(cowboy_ws_opts, [])),
+
+ VhostRoutes = [
+ {get_env(ws_path, "/ws"), rabbit_web_stomp_handler, [{type, WsFrame}, {ws_opts, CowboyWsOpts}]}
+ ],
+ Routes = cowboy_router:compile([{'_', VhostRoutes}]), % any vhost
+
+ case get_env(tcp_config, []) of
+ [] -> ok;
+ TCPConf0 -> start_tcp_listener(TCPConf0, CowboyOpts, Routes)
+ end,
+ case get_env(ssl_config, []) of
+ [] -> ok;
+ TLSConf0 -> start_tls_listener(TLSConf0, CowboyOpts, Routes)
+ end,
+ ok.
+
+stop(State) ->
+ rabbit_networking:stop_ranch_listener_of_protocol(?TCP_PROTOCOL),
+ rabbit_networking:stop_ranch_listener_of_protocol(?TLS_PROTOCOL),
+ State.
+
+-spec list_connections() -> [pid()].
+list_connections() ->
+ PlainPids = connection_pids_of_protocol(?TCP_PROTOCOL),
+ TLSPids = connection_pids_of_protocol(?TLS_PROTOCOL),
+
+ PlainPids ++ TLSPids.
+
+-spec close_all_client_connections(string()) -> {'ok', non_neg_integer()}.
+close_all_client_connections(Reason) ->
+ Connections = list_connections(),
+ [rabbit_web_stomp_handler:close_connection(Pid, Reason) || Pid <- Connections],
+ {ok, length(Connections)}.
+
+
+%%
+%% Implementation
+%%
+
+connection_pids_of_protocol(Protocol) ->
+ case rabbit_networking:ranch_ref_of_protocol(Protocol) of
+ undefined -> [];
+ AcceptorRef ->
+ lists:map(fun cowboy_ws_connection_pid/1, ranch:procs(AcceptorRef, connections))
+ end.
+
+-spec cowboy_ws_connection_pid(pid()) -> pid().
+cowboy_ws_connection_pid(RanchConnPid) ->
+ Children = supervisor:which_children(RanchConnPid),
+ {cowboy_clear, Pid, _, _} = lists:keyfind(cowboy_clear, 1, Children),
+ Pid.
+
+start_tcp_listener(TCPConf0, CowboyOpts0, Routes) ->
+ NumTcpAcceptors = case application:get_env(rabbitmq_web_stomp, num_tcp_acceptors) of
+ undefined -> get_env(num_acceptors, 10);
+ {ok, NumTcp} -> NumTcp
+ end,
+ Port = get_tcp_port(application:get_all_env(rabbitmq_web_stomp)),
+ TCPConf = get_tcp_conf(TCPConf0, Port),
+ RanchTransportOpts = #{
+ socket_opts => TCPConf,
+ connection_type => supervisor,
+ max_connections => get_max_connections(),
+ num_acceptors => NumTcpAcceptors
+ },
+ CowboyOpts = CowboyOpts0#{env => #{dispatch => Routes},
+ middlewares => [cowboy_router,
+ rabbit_web_stomp_middleware,
+ cowboy_handler]},
+ case ranch:start_listener(rabbit_networking:ranch_ref(TCPConf),
+ ranch_tcp,
+ RanchTransportOpts,
+ rabbit_web_stomp_connection_sup,
+ CowboyOpts) of
+ {ok, _} -> ok;
+ {error, {already_started, _}} -> ok;
+ {error, ErrTCP} ->
+ rabbit_log_connection:error(
+ "Failed to start a WebSocket (HTTP) listener. Error: ~p,"
+ " listener settings: ~p~n",
+ [ErrTCP, TCPConf]),
+ throw(ErrTCP)
+ end,
+ listener_started(?TCP_PROTOCOL, TCPConf),
+ rabbit_log_connection:info(
+ "rabbit_web_stomp: listening for HTTP connections on ~s:~w~n",
+ [get_binding_address(TCPConf), Port]).
+
+
+start_tls_listener(TLSConf0, CowboyOpts0, Routes) ->
+ rabbit_networking:ensure_ssl(),
+ NumSslAcceptors = case application:get_env(rabbitmq_web_stomp, num_ssl_acceptors) of
+ undefined -> get_env(num_acceptors, 10);
+ {ok, NumSsl} -> NumSsl
+ end,
+ TLSPort = proplists:get_value(port, TLSConf0),
+ TLSConf = maybe_parse_ip(TLSConf0),
+ RanchTransportOpts = #{
+ socket_opts => TLSConf,
+ connection_type => supervisor,
+ max_connections => get_max_connections(),
+ num_acceptors => NumSslAcceptors
+ },
+ CowboyOpts = CowboyOpts0#{env => #{dispatch => Routes},
+ middlewares => [cowboy_router,
+ rabbit_web_stomp_middleware,
+ cowboy_handler]},
+ case ranch:start_listener(rabbit_networking:ranch_ref(TLSConf),
+ ranch_ssl,
+ RanchTransportOpts,
+ rabbit_web_stomp_connection_sup,
+ CowboyOpts) of
+ {ok, _} -> ok;
+ {error, {already_started, _}} -> ok;
+ {error, ErrTLS} ->
+ rabbit_log_connection:error(
+ "Failed to start a TLS WebSocket (HTTPS) listener. Error: ~p,"
+ " listener settings: ~p~n",
+ [ErrTLS, TLSConf]),
+ throw(ErrTLS)
+ end,
+ listener_started(?TLS_PROTOCOL, TLSConf),
+ rabbit_log_connection:info(
+ "rabbit_web_stomp: listening for HTTPS connections on ~s:~w~n",
+ [get_binding_address(TLSConf), TLSPort]).
+
+listener_started(Protocol, Listener) ->
+ Port = rabbit_misc:pget(port, Listener),
+ [rabbit_networking:tcp_listener_started(Protocol, Listener,
+ IPAddress, Port)
+ || {IPAddress, _Port, _Family}
+ <- rabbit_networking:tcp_listener_addresses(Port)],
+ ok.
+
+get_env(Key, Default) ->
+ rabbit_misc:get_env(rabbitmq_web_stomp, Key, Default).
+
+get_tcp_port(Configuration) ->
+ %% The 'tcp_config' option may include the port, and we already have
+ %% a 'port' option. We prioritize the 'port' option in 'tcp_config' (if any)
+ %% over the one found at the root of the env proplist.
+ TcpConfiguration = proplists:get_value(tcp_config, Configuration, []),
+ case proplists:get_value(port, TcpConfiguration) of
+ undefined ->
+ proplists:get_value(port, Configuration, 15674);
+ Port ->
+ Port
+ end.
+
+get_tcp_conf(TcpConfiguration, Port0) ->
+ Port = [{port, Port0} | proplists:delete(port, TcpConfiguration)],
+ maybe_parse_ip(Port).
+
+maybe_parse_ip(Configuration) ->
+ case proplists:get_value(ip, Configuration) of
+ undefined ->
+ Configuration;
+ IP when is_tuple(IP) ->
+ Configuration;
+ IP when is_list(IP) ->
+ {ok, ParsedIP} = inet_parse:address(IP),
+ [{ip, ParsedIP} | proplists:delete(ip, Configuration)]
+ end.
+
+get_binding_address(Configuration) ->
+ case proplists:get_value(ip, Configuration) of
+ undefined ->
+ "0.0.0.0";
+ IP when is_tuple(IP) ->
+ inet:ntoa(IP);
+ IP when is_list(IP) ->
+ IP
+ end.
+
+get_max_connections() ->
+ rabbit_misc:get_env(rabbitmq_web_stomp, max_connections, infinity).
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl
new file mode 100644
index 0000000000..1a38493444
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_middleware.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_middleware).
+-behavior(cowboy_middleware).
+
+-export([execute/2]).
+
+execute(Req, Env) ->
+ #{keepalive_sup := KeepaliveSup} = Env,
+ Sock = maps:get(socket, Env),
+ case maps:get(handler_opts, Env, undefined) of
+ undefined -> {ok, Req, Env};
+ Opts when is_list(Opts) ->
+ {ok, Req, Env#{handler_opts => [{keepalive_sup, KeepaliveSup},
+ {socket, Sock}
+ |Opts]}}
+ end.
diff --git a/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl
new file mode 100644
index 0000000000..97f1659806
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/src/rabbit_web_stomp_sup.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_sup).
+-behaviour(supervisor2).
+
+-export([start_link/0, init/1]).
+
+-define(SUP_NAME, ?MODULE).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> ignore | {'ok', pid()} | {'error', any()}.
+start_link() ->
+ supervisor2:start_link({local, ?SUP_NAME}, ?MODULE, []).
+
+init([]) ->
+ {ok, {{one_for_one, 1, 5}, []}}.
diff --git a/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl b/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl
new file mode 100644
index 0000000000..695d993149
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/amqp_stomp_SUITE.erl
@@ -0,0 +1,103 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(amqp_stomp_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(QUEUE, <<"TestQueue">>).
+-define(DESTINATION, "/amq/queue/TestQueue").
+
+all() ->
+ [
+ pubsub_amqp
+ ].
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE},
+ {protocol, "ws"}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:testcase_started(Config, Testcase),
+ {ok, Connection} = amqp_connection:start(#amqp_params_direct{
+ node = rabbit_ct_broker_helpers:get_node_config(Config1, 0, nodename)
+ }),
+ {ok, Channel} = amqp_connection:open_channel(Connection),
+ rabbit_ct_helpers:set_config(Config1, [
+ {amqp_connection, Connection},
+ {amqp_channel, Channel}
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Connection = ?config(amqp_connection, Config),
+ Channel = ?config(amqp_channel, Config),
+ amqp_channel:close(Channel),
+ amqp_connection:close(Connection),
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+raw_send(WS, Command, Headers) ->
+ raw_send(WS, Command, Headers, <<>>).
+raw_send(WS, Command, Headers, Body) ->
+ Frame = stomp:marshal(Command, Headers, Body),
+ rfc6455_client:send(WS, Frame).
+
+raw_recv(WS) ->
+ {ok, P} = rfc6455_client:recv(WS),
+ stomp:unmarshal(P).
+
+
+pubsub_amqp(Config) ->
+ Ch = ?config(amqp_channel, Config),
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QUEUE, auto_delete = true}),
+
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login", "guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ ok = raw_send(WS, "SUBSCRIBE", [{"destination", ?DESTINATION},
+ {"id", "pubsub_amqp"},
+ {"x-queue-name", ?QUEUE}]),
+
+ CHK1 = <<"x-custom-hdr-1">>,
+ CHV1 = <<"value1">>,
+ CH1 = {CHK1, longstr, CHV1},
+ CHK2 = <<"x-custom-hdr-2">>,
+ CHV2 = <<"value2">>,
+ CH2 = {CHK2, longstr, CHV2},
+ CHK3 = <<"custom-hdr-3">>,
+ CHV3 = <<"value3">>,
+ CH3 = {CHK3, longstr, <<"value3">>},
+
+ Publish = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE},
+ Props = #'P_basic'{headers = [CH1, CH2, CH3]},
+ amqp_channel:call(Ch, Publish, #amqp_msg{props = Props, payload = <<"a\x00a">>}),
+
+ {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv(WS),
+
+ {close, _} = rfc6455_client:close(WS),
+
+ "/queue/TestQueue" = binary_to_list(proplists:get_value(<<"destination">>, H)),
+ {CHK1, CHV1} = {CHK1, proplists:get_value(CHK1, H)},
+ {CHK2, CHV2} = {CHK2, proplists:get_value(CHK2, H)},
+ {CHK3, CHV3} = {CHK3, proplists:get_value(CHK3, H)},
+ ok.
diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl b/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..391b43dfb4
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-compile(export_all).
+
+all() ->
+ [
+ run_snippets
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config1 = rabbit_ct_helpers:run_setup_steps(Config),
+ rabbit_ct_config_schema:init_schemas(rabbitmq_web_stomp, Config1).
+
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ rabbit_ct_config_schema:run_snippets(Config).
+
diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets
new file mode 100644
index 0000000000..2de8b9fcc3
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets
@@ -0,0 +1,226 @@
+[{listener,
+ "web_stomp.tcp.listener = 127.0.0.1:12345",
+ [{rabbitmq_web_stomp,
+ [{tcp_config, [{ip,"127.0.0.1"},{port,12345}]}]}],
+ [rabbitmq_web_stomp]},
+ {tcp_listener_none,
+ "web_stomp.tcp.listener = none",
+ [{rabbitmq_web_stomp,
+ [{tcp_config, []}]}],
+ [rabbitmq_web_stomp]},
+ {tcp_config,
+ "web_stomp.tcp.ip = 127.0.0.4
+ web_stomp.tcp.port = 11123",
+ [{rabbitmq_web_stomp,
+ [{tcp_config, [{ip,"127.0.0.4"},{port,11123}]}]}],
+ [rabbitmq_web_stomp]},
+ {tcp_config_defaut,
+ "web_stomp.tcp.backlog = 2048",
+ [{rabbitmq_web_stomp,
+ [{tcp_config, [{backlog, 2048}]}]}],
+ [rabbitmq_web_stomp]},
+ {port,
+ "web_stomp.port = 12345",
+ [{rabbitmq_web_stomp,[{port,12345}]}],
+ [rabbitmq_web_stomp]},
+
+ {num_acceptors_tcp,
+ "web_stomp.num_acceptors.tcp = 20",
+ [{rabbitmq_web_stomp,
+ [{num_tcp_acceptors, 20}]}],
+ [rabbitmq_web_stomp]},
+
+ {num_acceptors_tls,
+ "web_stomp.num_acceptors.ssl = 20",
+ [{rabbitmq_web_stomp,
+ [{num_ssl_acceptors, 20}]}],
+ [rabbitmq_web_stomp]},
+
+ {max_connections,
+ "web_stomp.max_connections = 5000",
+ [{rabbitmq_web_stomp,
+ [{max_connections, 5000}]}],
+ [rabbitmq_web_stomp]},
+
+ {ssl_listener,
+ "web_stomp.ssl.listener = 127.0.0.4:15672",
+ [{rabbitmq_web_stomp,
+ [{ssl_config, [{ip,"127.0.0.4"},{port,15672}]}]}],
+ [rabbitmq_web_stomp]},
+ {ssl_listener_none,
+ "web_stomp.ssl.listener = none",
+ [],
+ [rabbitmq_web_stomp]},
+
+ {ssl_with_listener,
+ "web_stomp.ssl.listener = 127.0.0.2:15671
+ web_stomp.ssl.backlog = 1024
+ web_stomp.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ web_stomp.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ web_stomp.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ web_stomp.ssl.password = changeme",
+ [{rabbitmq_web_stomp,
+ [{ssl_config,
+ [{ip,"127.0.0.2"},
+ {port,15671},
+ {backlog,1024},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password,"changeme"}]}]}],
+ [rabbitmq_web_stomp]},
+
+ {ssl,
+ "web_stomp.ssl.port = 15671
+ web_stomp.ssl.backlog = 1024
+ web_stomp.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ web_stomp.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ web_stomp.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ web_stomp.ssl.password = changeme
+
+ web_stomp.ssl.versions.tls1_2 = tlsv1.2
+ web_stomp.ssl.versions.tls1_1 = tlsv1.1",
+ [{rabbitmq_web_stomp,
+ [{ssl_config,
+ [{port,15671},
+ {backlog,1024},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password,"changeme"},
+
+ {versions,['tlsv1.2','tlsv1.1']}
+ ]}]}],
+ [rabbitmq_web_stomp]},
+
+ {ssl_ciphers,
+ "web_stomp.ssl.port = 15671
+ web_stomp.ssl.backlog = 1024
+ web_stomp.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ web_stomp.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ web_stomp.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ web_stomp.ssl.password = changeme
+
+ web_stomp.ssl.honor_cipher_order = true
+ web_stomp.ssl.honor_ecc_order = true
+ web_stomp.ssl.client_renegotiation = false
+ web_stomp.ssl.secure_renegotiate = true
+
+ web_stomp.ssl.versions.1 = tlsv1.2
+ web_stomp.ssl.versions.2 = tlsv1.1
+ web_stomp.ssl.ciphers.1 = ECDHE-ECDSA-AES256-GCM-SHA384
+ web_stomp.ssl.ciphers.2 = ECDHE-RSA-AES256-GCM-SHA384
+ web_stomp.ssl.ciphers.3 = ECDHE-ECDSA-AES256-SHA384
+ web_stomp.ssl.ciphers.4 = ECDHE-RSA-AES256-SHA384
+ web_stomp.ssl.ciphers.5 = ECDH-ECDSA-AES256-GCM-SHA384
+ web_stomp.ssl.ciphers.6 = ECDH-RSA-AES256-GCM-SHA384
+ web_stomp.ssl.ciphers.7 = ECDH-ECDSA-AES256-SHA384
+ web_stomp.ssl.ciphers.8 = ECDH-RSA-AES256-SHA384
+ web_stomp.ssl.ciphers.9 = DHE-RSA-AES256-GCM-SHA384",
+ [{rabbitmq_web_stomp,
+ [{ssl_config,
+ [{port,15671},
+ {backlog,1024},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password,"changeme"},
+
+ {honor_cipher_order, true},
+ {honor_ecc_order, true},
+ {client_renegotiation, false},
+ {secure_renegotiate, true},
+
+ {versions,['tlsv1.2','tlsv1.1']},
+ {ciphers, [
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDH-ECDSA-AES256-GCM-SHA384",
+ "ECDH-RSA-AES256-GCM-SHA384",
+ "ECDH-ECDSA-AES256-SHA384",
+ "ECDH-RSA-AES256-SHA384",
+ "DHE-RSA-AES256-GCM-SHA384"
+ ]}
+ ]}]}],
+ [rabbitmq_web_stomp]},
+
+ {websocket_endpoint,
+ "web_stomp.ws_path = /rmq/ws",
+ [{rabbitmq_web_stomp,
+ [{ws_path, "/rmq/ws"}]}],
+ [rabbitmq_web_stomp]},
+
+ {ws_frame,
+ "web_stomp.ws_frame = binary",
+ [{rabbitmq_web_stomp,[{ws_frame,binary}]}],
+ [rabbitmq_web_stomp]},
+
+ {use_http_auth,
+ "web_stomp.use_http_auth = true",
+ [{rabbitmq_web_stomp,[{use_http_auth, true}]}],
+ [rabbitmq_web_stomp]},
+
+ %%
+ %% Cowboy options
+ %%
+
+ {cowboy_max_keepalive,
+ "web_stomp.cowboy_opts.max_keepalive = 10",
+ [{rabbitmq_web_stomp,[{cowboy_opts,[{max_keepalive,10}]}]}],
+ [rabbitmq_web_stomp]},
+
+ {cowboy_timeout,
+ "web_stomp.cowboy_opts.timeout = 10000",
+ [{rabbitmq_web_stomp,[{cowboy_opts,[{idle_timeout, 10000}]}]}],
+ [rabbitmq_web_stomp]},
+
+ {cowboy_idle_timeout,
+ "web_stomp.cowboy_opts.idle_timeout = 10000",
+ [{rabbitmq_web_stomp,[{cowboy_opts,[{idle_timeout, 10000}]}]}],
+ [rabbitmq_web_stomp]},
+
+ %%
+ %% Cowboy WebSocket options
+ %%
+
+{ws_opts_compress_true,
+ "web_stomp.ws_opts.compress = true",
+ [{rabbitmq_web_stomp,[{cowboy_ws_opts,[{compress, true}]}]}],
+ [rabbitmq_web_stomp]},
+
+{ws_opts_compress_false,
+ "web_stomp.ws_opts.compress = false",
+ [{rabbitmq_web_stomp,[{cowboy_ws_opts,[{compress, false}]}]}],
+ [rabbitmq_web_stomp]},
+
+{ws_opts_max_frame_size,
+ "web_stomp.ws_opts.max_frame_size = 8000",
+ [{rabbitmq_web_stomp,[{cowboy_ws_opts,[{max_frame_size, 8000}]}]}],
+ [rabbitmq_web_stomp]},
+
+{ws_idle_timeout,
+ "web_stomp.ws_opts.idle_timeout = 10000",
+ [{rabbitmq_web_stomp,[{cowboy_ws_opts,[{idle_timeout, 10000}]}]}],
+ [rabbitmq_web_stomp]},
+
+ %%
+ %% Proxy protocol
+ %%
+
+{ws_proxy_protocol_explicitly_disabled,
+ "web_stomp.proxy_protocol = false",
+ [{rabbitmq_web_stomp,[
+ {proxy_protocol, false}
+ ]}],
+ [rabbitmq_web_stomp]},
+
+{ws_proxy_protocol_enabled,
+ "web_stomp.proxy_protocol = true",
+ [{rabbitmq_web_stomp,[
+ {proxy_protocol, true}
+ ]}],
+ [rabbitmq_web_stomp]}
+
+].
diff --git a/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl b/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl
new file mode 100644
index 0000000000..36d05849ac
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl
@@ -0,0 +1,269 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(cowboy_websocket_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+ [{group, integration},
+ {group, default_login_enabled},
+ {group, default_login_disabled}].
+
+groups() ->
+ [
+ {integration, [],
+ [
+ connection_succeeds,
+ connection_fails,
+ pubsub,
+ pubsub_binary,
+ disconnect,
+ http_auth
+ ]},
+ %% rabbitmq/rabbitmq-web-stomp#110
+ {default_login_enabled, [],
+ [
+ connection_with_explicitly_provided_correct_credentials,
+ connection_with_default_login_succeeds
+ ]},
+ %% rabbitmq/rabbitmq-web-stomp#110
+ {default_login_disabled, [],
+ [
+ connection_with_explicitly_provided_correct_credentials,
+ connection_without_credentials_fails
+ ]}
+ ].
+
+init_per_suite(Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE},
+ {protocol, "ws"}]),
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(pubsub_binary, Config) ->
+ rabbit_ws_test_util:update_app_env(Config, ws_frame, binary),
+ Config;
+init_per_testcase(http_auth, Config) ->
+ rabbit_ws_test_util:update_app_env(Config, use_http_auth, true),
+ Config;
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(pubsub_binary, Config) ->
+ rabbit_ws_test_util:update_app_env(Config, ws_frame, text),
+ Config;
+end_per_testcase(http_auth, Config) ->
+ rabbit_ws_test_util:update_app_env(Config, use_http_auth, false),
+ Config;
+end_per_testcase(_, Config) -> Config.
+
+
+connection_succeeds(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+connection_fails(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login", "uncorrect_$55"}, {"passcode", "uncorrect_$88"}]),
+ {<<"ERROR">>, _, <<"Access refused for user 'uncorrect_$55'\n">>} = raw_recv(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+connection_with_explicitly_provided_correct_credentials(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login", "guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+connection_with_default_login_succeeds(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", []),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+connection_without_credentials_fails(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_stomp, default_user, undefined]),
+
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT"),
+
+ {close, _} = raw_recv(WS),
+ ok.
+
+raw_send(WS, Command) ->
+ raw_send(WS, Command, [], <<>>).
+
+raw_send(WS, Command, Headers) ->
+ raw_send(WS, Command, Headers, <<>>).
+raw_send(WS, Command, Headers, Body) ->
+ Frame = stomp:marshal(Command, Headers, Body),
+ rfc6455_client:send(WS, Frame).
+
+raw_recv(WS) ->
+ case rfc6455_client:recv(WS) of
+ {ok, P} -> stomp:unmarshal(P);
+ Other -> Other
+ end.
+
+pubsub(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login", "guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:strong_rand_bytes(8))),
+
+ ok = raw_send(WS, "SUBSCRIBE", [{"destination", Dst},
+ {"id", "s0"}]),
+
+ CustHdr1K = "x-custom-hdr-1",
+ CustHdr1 = {CustHdr1K, "value1"},
+ CustHdr2K = "x-custom-hdr-2",
+ CustHdr2 = {CustHdr2K, "value2"},
+ CustHdr3K = "custom-hdr-3",
+ CustHdr3 = {CustHdr3K, "value3"},
+ ok = raw_send(WS, "SEND", [{"destination", Dst}, {"content-length", "3"},
+ CustHdr1, CustHdr2, CustHdr3], <<"a\x00a">>),
+
+ {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv(WS),
+
+ Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+ CustHdr1 = {CustHdr1K, binary_to_list(proplists:get_value(list_to_binary(CustHdr1K), H))},
+ CustHdr2 = {CustHdr2K, binary_to_list(proplists:get_value(list_to_binary(CustHdr2K), H))},
+ CustHdr3 = {CustHdr3K, binary_to_list(proplists:get_value(list_to_binary(CustHdr3K), H))},
+
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+
+raw_send_binary(WS, Command, Headers) ->
+ raw_send_binary(WS, Command, Headers, <<>>).
+raw_send_binary(WS, Command, Headers, Body) ->
+ Frame = stomp:marshal(Command, Headers, Body),
+ rfc6455_client:send_binary(WS, Frame).
+
+raw_recv_binary(WS) ->
+ {binary, P} = rfc6455_client:recv(WS),
+ stomp:unmarshal(P).
+
+
+pubsub_binary(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv_binary(WS),
+
+ Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:strong_rand_bytes(8))),
+
+ ok = raw_send(WS, "SUBSCRIBE", [{"destination", Dst},
+ {"id", "s0"}]),
+
+ ok = raw_send(WS, "SEND", [{"destination", Dst},
+ {"content-length", "3"}], <<"a\x00a">>),
+
+ {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv_binary(WS),
+ Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+
+ {close, _} = rfc6455_client:close(WS).
+
+disconnect(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ ok = raw_send(WS, "DISCONNECT", []),
+ {close, {1000, _}} = rfc6455_client:recv(WS),
+
+ ok.
+
+http_auth(Config) ->
+ %% Intentionally put bad credentials in the CONNECT frame,
+ %% and good credentials in the Authorization header, to
+ %% confirm that the right credentials are picked.
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self(),
+ [{login, "guest"}, {passcode, "guest"}]),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+ {close, _} = rfc6455_client:close(WS),
+
+ %% Confirm that if no Authorization header is provided,
+ %% the default STOMP plugin credentials are used. We
+ %% expect an error because the default credentials are
+ %% left undefined.
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_stomp, default_user,
+ [{login, "bad-default"}, {passcode, "bad-default"}]
+ ]),
+
+Protocol = ?config(protocol, Config),
+ WS2 = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS2),
+ ok = raw_send(WS2, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+ {<<"ERROR">>, _, _} = raw_recv(WS2),
+ {close, _} = rfc6455_client:close(WS2),
+
+ %% Confirm that we can connect if the default STOMP
+ %% credentials are used.
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_stomp, default_user,
+ [{login, "guest"}, {passcode, "guest"}]
+ ]),
+
+Protocol = ?config(protocol, Config),
+ WS3 = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS3),
+ ok = raw_send(WS3, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS3),
+ {close, _} = rfc6455_client:close(WS3),
+
+ ok.
diff --git a/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl
new file mode 100644
index 0000000000..4a153d37cc
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/proxy_protocol_SUITE.erl
@@ -0,0 +1,102 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(proxy_protocol_SUITE).
+
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include_lib("eunit/include/eunit.hrl").
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 2}}
+ ].
+
+all() ->
+ [{group, http_tests},
+ {group, https_tests}].
+
+groups() ->
+ Tests = [
+ proxy_protocol
+ ],
+ [{https_tests, [], Tests},
+ {http_tests, [], Tests}].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config) ->
+ Protocol = case Group of
+ http_tests -> "ws";
+ https_tests -> "wss"
+ end,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE},
+ {protocol, Protocol},
+ {rabbitmq_ct_tls_verify, verify_none},
+ {rabbitmq_ct_tls_fail_if_no_peer_cert, false}]),
+
+ rabbit_ct_helpers:run_setup_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++ [
+ fun configure_proxy_protocol/1,
+ fun configure_ssl/1
+ ]).
+
+configure_proxy_protocol(Config) ->
+ rabbit_ws_test_util:update_app_env(Config, proxy_protocol, true),
+ Config.
+
+configure_ssl(Config) ->
+ ErlangConfig = proplists:get_value(erlang_node_config, Config, []),
+ RabbitAppConfig = proplists:get_value(rabbit, ErlangConfig, []),
+ RabbitSslConfig = proplists:get_value(ssl_options, RabbitAppConfig, []),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_stomp_tls),
+ rabbit_ws_test_util:update_app_env(Config, ssl_config, [{port, Port} | lists:keydelete(port, 1, RabbitSslConfig)]),
+ Config.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+proxy_protocol(Config) ->
+ Port = list_to_integer(rabbit_ws_test_util:get_web_stomp_port_str(Config)),
+ PortStr = integer_to_list(Port),
+
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self(),
+ undefined, [], "PROXY TCP4 192.168.1.1 192.168.1.2 80 81\r\n"),
+ {ok, _} = rfc6455_client:open(WS),
+ Frame = stomp:marshal("CONNECT", [{"login","guest"}, {"passcode", "guest"}], <<>>),
+ rfc6455_client:send(WS, Frame),
+ {ok, _P} = rfc6455_client:recv(WS),
+ ConnectionName = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, connection_name, []),
+ match = re:run(ConnectionName, <<"^192.168.1.1:80 ">>, [{capture, none}]),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+connection_name() ->
+ Connections = ets:tab2list(connection_created),
+ {_Key, Values} = lists:nth(1, Connections),
+ {_, Name} = lists:keyfind(name, 1, Values),
+ Name.
diff --git a/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl b/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl
new file mode 100644
index 0000000000..f05b87751c
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl
@@ -0,0 +1,201 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(raw_websocket_SUITE).
+
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include_lib("eunit/include/eunit.hrl").
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 2}}
+ ].
+
+all() ->
+ [{group, http_tests},
+ {group, https_tests}].
+
+groups() ->
+ Tests = [
+ connection,
+ connection_with_protocols,
+ pubsub,
+ disconnect,
+ http_auth
+ ],
+ [{https_tests, [], Tests},
+ {http_tests, [], Tests}].
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(Group, Config) ->
+ Protocol = case Group of
+ http_tests -> "ws";
+ https_tests -> "wss"
+ end,
+ Config1 = rabbit_ct_helpers:set_config(Config,
+ [{rmq_nodename_suffix, ?MODULE},
+ {protocol, Protocol},
+ {rabbitmq_ct_tls_verify, verify_none},
+ {rabbitmq_ct_tls_fail_if_no_peer_cert, false}]),
+
+ rabbit_ct_helpers:run_setup_steps(
+ Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++ [fun configure_ssl/1]).
+
+configure_ssl(Config) ->
+ ErlangConfig = proplists:get_value(erlang_node_config, Config, []),
+ RabbitAppConfig = proplists:get_value(rabbit, ErlangConfig, []),
+ RabbitSslConfig = proplists:get_value(ssl_options, RabbitAppConfig, []),
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_stomp_tls),
+ rabbit_ws_test_util:update_app_env(Config, ssl_config, [{port, Port} | lists:keydelete(port, 1, RabbitSslConfig)]),
+ Config.
+
+end_per_group(_Group, Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config,
+ rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(http_auth, Config) ->
+ rabbit_ws_test_util:update_app_env(Config, use_http_auth, true),
+ Config;
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(http_auth, Config) ->
+ rabbit_ws_test_util:update_app_env(Config, use_http_auth, false),
+ Config;
+end_per_testcase(_, Config) -> Config.
+
+connection(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+connection_with_protocols(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self(),
+ undefined, ["v11.stomp", "v10.stomp", "v12.stomp"]),
+ {ok, _} = rfc6455_client:open(WS),
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+
+raw_send(WS, Command, Headers) ->
+ raw_send(WS, Command, Headers, <<>>).
+raw_send(WS, Command, Headers, Body) ->
+ Frame = stomp:marshal(Command, Headers, Body),
+ rfc6455_client:send(WS, Frame).
+
+raw_recv(WS) ->
+ {ok, P} = rfc6455_client:recv(WS),
+ stomp:unmarshal(P).
+
+
+pubsub(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:strong_rand_bytes(8))),
+
+ ok = raw_send(WS, "SUBSCRIBE", [{"destination", Dst},
+ {"id", "s0"}]),
+
+ CustHdr1K = "x-custom-hdr-1",
+ CustHdr1 = {CustHdr1K, "value1"},
+ CustHdr2K = "x-custom-hdr-2",
+ CustHdr2 = {CustHdr2K, "value2"},
+ CustHdr3K = "custom-hdr-3",
+ CustHdr3 = {CustHdr3K, "value3"},
+ ok = raw_send(WS, "SEND", [{"destination", Dst}, {"content-length", "3"},
+ CustHdr1, CustHdr2, CustHdr3], <<"a\x00a">>),
+
+ {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv(WS),
+
+ Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+ CustHdr1 = {CustHdr1K, binary_to_list(proplists:get_value(list_to_binary(CustHdr1K), H))},
+ CustHdr2 = {CustHdr2K, binary_to_list(proplists:get_value(list_to_binary(CustHdr2K), H))},
+ CustHdr3 = {CustHdr3K, binary_to_list(proplists:get_value(list_to_binary(CustHdr3K), H))},
+
+ {close, _} = rfc6455_client:close(WS),
+ ok.
+
+
+disconnect(Config) ->
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+ ok = raw_send(WS, "DISCONNECT", []),
+ {close, {1000, _}} = rfc6455_client:recv(WS),
+
+ ok.
+
+http_auth(Config) ->
+ %% Intentionally put bad credentials in the CONNECT frame,
+ %% and good credentials in the Authorization header, to
+ %% confirm that the right credentials are picked.
+ PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+ Protocol = ?config(protocol, Config),
+ WS = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self(),
+ [{login, "guest"}, {passcode, "guest"}]),
+ {ok, _} = rfc6455_client:open(WS),
+ ok = raw_send(WS, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+ {close, _} = rfc6455_client:close(WS),
+
+ %% Confirm that if no Authorization header is provided,
+ %% the default STOMP plugin credentials are used. We
+ %% expect an error because the default credentials are invalid.
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_stomp, default_user,
+ [{login, "bad-default"}, {passcode, "bad-default"}]
+ ]),
+
+ WS2 = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS2),
+ ok = raw_send(WS2, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+ {<<"ERROR">>, _, _} = raw_recv(WS2),
+ {close, _} = rfc6455_client:close(WS2),
+
+ %% Confirm that we can connect if the default STOMP
+ %% credentials are used.
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_stomp, default_user,
+ [{login, "guest"}, {passcode, "guest"}]
+ ]),
+
+ WS3 = rfc6455_client:new(Protocol ++ "://127.0.0.1:" ++ PortStr ++ "/ws", self()),
+ {ok, _} = rfc6455_client:open(WS3),
+ ok = raw_send(WS3, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+ {<<"CONNECTED">>, _, <<>>} = raw_recv(WS3),
+ {close, _} = rfc6455_client:close(WS3),
+
+ ok.
diff --git a/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl b/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl
new file mode 100644
index 0000000000..822d9c27f1
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl
@@ -0,0 +1,30 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ws_test_util).
+
+-export([update_app_env/3, get_web_stomp_port_str/1]).
+
+update_app_env(Config, Key, Val) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env,
+ [rabbitmq_web_stomp, Key, Val]),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, stop,
+ [rabbitmq_web_stomp]),
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, start,
+ [rabbitmq_web_stomp]).
+
+get_web_stomp_port_str(Config) ->
+ Port = case rabbit_ct_helpers:get_config(Config, protocol) of
+ "ws" ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_stomp);
+ "wss" ->
+ rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_web_stomp_tls)
+ end,
+ integer_to_list(Port).
diff --git a/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl b/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl
new file mode 100644
index 0000000000..5ef4c40099
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl
@@ -0,0 +1,284 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rfc6455_client).
+
+-export([new/2, new/3, new/4, new/5, open/1, recv/1, send/2, close/1, close/2]).
+
+-record(state, {host, port, addr, path, ppid, socket, data, phase, transport}).
+
+%% --------------------------------------------------------------------------
+
+new(WsUrl, PPid) ->
+ new(WsUrl, PPid, undefined, [], <<>>).
+
+new(WsUrl, PPid, AuthInfo) ->
+ new(WsUrl, PPid, AuthInfo, [], <<>>).
+
+new(WsUrl, PPid, AuthInfo, Protocols) ->
+ new(WsUrl, PPid, AuthInfo, Protocols, <<>>).
+
+new(WsUrl, PPid, AuthInfo, Protocols, TcpPreface) ->
+ crypto:start(),
+ application:ensure_all_started(ssl),
+ {Transport, Url} = case WsUrl of
+ "ws://" ++ Rest -> {gen_tcp, Rest};
+ "wss://" ++ SslRest -> {ssl, SslRest}
+ end,
+ [Addr, Path] = split("/", Url, 1),
+ [Host0, MaybePort] = split(":", Addr, 1, empty),
+ Host = case inet:parse_ipv4_address(Host0) of
+ {ok, IP} -> IP;
+ _ -> Host0
+ end,
+ Port = case MaybePort of
+ empty -> 80;
+ V -> {I, ""} = string:to_integer(V), I
+ end,
+ State = #state{host = Host,
+ port = Port,
+ addr = Addr,
+ path = "/" ++ Path,
+ ppid = PPid,
+ transport = Transport},
+ spawn_link(fun () ->
+ start_conn(State, AuthInfo, Protocols, TcpPreface)
+ end).
+
+open(WS) ->
+ receive
+ {rfc6455, open, WS, Opts} ->
+ {ok, Opts};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+recv(WS) ->
+ receive
+ {rfc6455, recv, WS, Payload} ->
+ {ok, Payload};
+ {rfc6455, recv_binary, WS, Payload} ->
+ {binary, Payload};
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+send(WS, IoData) ->
+ WS ! {send, IoData},
+ ok.
+
+close(WS) ->
+ close(WS, {1000, ""}).
+
+close(WS, WsReason) ->
+ WS ! {close, WsReason},
+ receive
+ {rfc6455, close, WS, R} ->
+ {close, R}
+ end.
+
+
+%% --------------------------------------------------------------------------
+
+start_conn(State = #state{transport = Transport}, AuthInfo, Protocols, TcpPreface) ->
+ {ok, Socket} = case TcpPreface of
+ <<>> ->
+ Transport:connect(State#state.host, State#state.port,
+ [binary,
+ {packet, 0}]);
+ _ ->
+ {ok, Socket0} = gen_tcp:connect(State#state.host, State#state.port,
+ [binary,
+ {packet, 0}]),
+ gen_tcp:send(Socket0, TcpPreface),
+ case Transport of
+ gen_tcp -> {ok, Socket0};
+ ssl -> Transport:connect(Socket0, [])
+ end
+ end,
+
+ AuthHd = case AuthInfo of
+ undefined -> "";
+ _ ->
+ Login = proplists:get_value(login, AuthInfo),
+ Passcode = proplists:get_value(passcode, AuthInfo),
+ "Authorization: Basic "
+ ++ base64:encode_to_string(Login ++ ":" ++ Passcode)
+ ++ "\r\n"
+ end,
+
+ ProtocolHd = case Protocols of
+ [] -> "";
+ _ -> "Sec-Websocket-Protocol: " ++ string:join(Protocols, ", ") ++ "\r\n"
+ end,
+
+ Key = base64:encode_to_string(crypto:strong_rand_bytes(16)),
+ Transport:send(Socket,
+ "GET " ++ State#state.path ++ " HTTP/1.1\r\n" ++
+ "Host: " ++ State#state.addr ++ "\r\n" ++
+ "Upgrade: websocket\r\n" ++
+ "Connection: Upgrade\r\n" ++
+ AuthHd ++
+ ProtocolHd ++
+ "Sec-WebSocket-Key: " ++ Key ++ "\r\n" ++
+ "Origin: null\r\n" ++
+ "Sec-WebSocket-Version: 13\r\n\r\n"),
+
+ loop(State#state{socket = Socket,
+ data = <<>>,
+ phase = opening}).
+
+do_recv(State = #state{phase = opening, ppid = PPid, data = Data}) ->
+ case split("\r\n\r\n", binary_to_list(Data), 1, empty) of
+ [_Http, empty] -> State;
+ [Http, Data1] ->
+ %% TODO: don't ignore http response data, verify key
+ PPid ! {rfc6455, open, self(), [{http_response, Http}]},
+ State#state{phase = open,
+ data = Data1}
+ end;
+do_recv(State = #state{phase = Phase, data = Data, socket = Socket, transport = Transport, ppid = PPid})
+ when Phase =:= open orelse Phase =:= closing ->
+ R = case Data of
+ <<F:1, _:3, O:4, 0:1, L:7, Payload:L/binary, Rest/binary>>
+ when L < 126 ->
+ {F, O, Payload, Rest};
+
+ <<F:1, _:3, O:4, 0:1, 126:7, L2:16, Payload:L2/binary, Rest/binary>> ->
+ {F, O, Payload, Rest};
+
+ <<F:1, _:3, O:4, 0:1, 127:7, L2:64, Payload:L2/binary, Rest/binary>> ->
+ {F, O, Payload, Rest};
+
+ <<_:1, _:3, _:4, 1:1, _/binary>> ->
+ %% According o rfc6455 5.1 the server must not mask any frames.
+ die(Socket, Transport, PPid, {1006, "Protocol error"}, normal);
+ _ ->
+ moredata
+ end,
+ case R of
+ moredata ->
+ State;
+ _ -> do_recv2(State, R)
+ end.
+
+do_recv2(State = #state{phase = Phase, socket = Socket, ppid = PPid, transport = Transport}, R) ->
+ case R of
+ {1, 1, Payload, Rest} ->
+ PPid ! {rfc6455, recv, self(), Payload},
+ State#state{data = Rest};
+ {1, 2, Payload, Rest} ->
+ PPid ! {rfc6455, recv_binary, self(), Payload},
+ State#state{data = Rest};
+ {1, 8, Payload, _Rest} ->
+ WsReason = case Payload of
+ <<WC:16, WR/binary>> -> {WC, WR};
+ <<>> -> {1005, "No status received"}
+ end,
+ case Phase of
+ open -> %% echo
+ do_close(State, WsReason),
+ Transport:close(Socket);
+ closing ->
+ ok
+ end,
+ die(Socket, Transport, PPid, WsReason, normal);
+ {_, _, _, _Rest} ->
+ io:format("Unknown frame type~n"),
+ die(Socket, Transport, PPid, {1006, "Unknown frame type"}, normal)
+ end.
+
+encode_frame(F, O, Payload) ->
+ Mask = crypto:strong_rand_bytes(4),
+ MaskedPayload = apply_mask(Mask, iolist_to_binary(Payload)),
+
+ L = byte_size(MaskedPayload),
+ IoData = case L of
+ _ when L < 126 ->
+ [<<F:1, 0:3, O:4, 1:1, L:7>>, Mask, MaskedPayload];
+ _ when L < 65536 ->
+ [<<F:1, 0:3, O:4, 1:1, 126:7, L:16>>, Mask, MaskedPayload];
+ _ ->
+ [<<F:1, 0:3, O:4, 1:1, 127:7, L:64>>, Mask, MaskedPayload]
+ end,
+ iolist_to_binary(IoData).
+
+do_send(State = #state{socket = Socket, transport = Transport}, Payload) ->
+ Transport:send(Socket, encode_frame(1, 1, Payload)),
+ State.
+
+do_close(State = #state{socket = Socket, transport = Transport}, {Code, Reason}) ->
+ Payload = iolist_to_binary([<<Code:16>>, Reason]),
+ Transport:send(Socket, encode_frame(1, 8, Payload)),
+ State#state{phase = closing}.
+
+
+loop(State = #state{socket = Socket, transport = Transport, ppid = PPid, data = Data,
+ phase = Phase}) ->
+ receive
+ {In, Socket, Bin} when In =:= tcp; In =:= ssl ->
+ State1 = State#state{data = iolist_to_binary([Data, Bin])},
+ loop(do_recv(State1));
+ {send, Payload} when Phase == open ->
+ loop(do_send(State, Payload));
+ {Closed, Socket} when Closed =:= tcp_closed; Closed =:= ssl_closed ->
+ die(Socket, Transport, PPid, {1006, "Connection closed abnormally"}, normal);
+ {close, WsReason} when Phase == open ->
+ loop(do_close(State, WsReason));
+ {Error, Socket, Reason} when Error =:= tcp_error; Error =:= ssl_error ->
+ die(Socket, Transport, PPid, {1006, "Connection closed abnormally"}, Reason);
+ Other ->
+ error({unknown_message, Other, Socket})
+ end.
+
+
+die(Socket, Transport, PPid, WsReason, Reason) ->
+ Transport:shutdown(Socket, read_write),
+ PPid ! {rfc6455, close, self(), WsReason},
+ exit(Reason).
+
+
+%% --------------------------------------------------------------------------
+
+split(SubStr, Str, Limit) ->
+ split(SubStr, Str, Limit, "").
+
+split(SubStr, Str, Limit, Default) ->
+ Acc = split(SubStr, Str, Limit, [], Default),
+ lists:reverse(Acc).
+split(_SubStr, Str, 0, Acc, _Default) -> [Str | Acc];
+split(SubStr, Str, Limit, Acc, Default) ->
+ {L, R} = case string:str(Str, SubStr) of
+ 0 -> {Str, Default};
+ I -> {string:substr(Str, 1, I-1),
+ string:substr(Str, I+length(SubStr))}
+ end,
+ split(SubStr, R, Limit-1, [L | Acc], Default).
+
+
+apply_mask(Mask, Data) when is_number(Mask) ->
+ apply_mask(<<Mask:32>>, Data);
+
+apply_mask(<<0:32>>, Data) ->
+ Data;
+apply_mask(Mask, Data) ->
+ iolist_to_binary(lists:reverse(apply_mask2(Mask, Data, []))).
+
+apply_mask2(M = <<Mask:32>>, <<Data:32, Rest/binary>>, Acc) ->
+ T = Data bxor Mask,
+ apply_mask2(M, Rest, [<<T:32>> | Acc]);
+apply_mask2(<<Mask:24, _:8>>, <<Data:24>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:24>> | Acc];
+apply_mask2(<<Mask:16, _:16>>, <<Data:16>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:16>> | Acc];
+apply_mask2(<<Mask:8, _:24>>, <<Data:8>>, Acc) ->
+ T = Data bxor Mask,
+ [<<T:8>> | Acc];
+apply_mask2(_, <<>>, Acc) ->
+ Acc.
diff --git a/deps/rabbitmq_web_stomp/test/src/stomp.erl b/deps/rabbitmq_web_stomp/test/src/stomp.erl
new file mode 100644
index 0000000000..5ee1a2c43a
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/src/stomp.erl
@@ -0,0 +1,45 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(stomp).
+
+-export([marshal/2, marshal/3, unmarshal/1]).
+
+-export([list_to_hex/1]).
+
+marshal(Command, Headers) ->
+ marshal(Command, Headers, <<>>).
+marshal(Command, Headers, Body) ->
+ Lines = [Command] ++ [[K, ":", V] || {K, V} <- Headers] ++ [["\n", Body]],
+ iolist_to_binary([iolist_join(Lines, "\n"), "\x00"]).
+
+unmarshal(Frame) ->
+ [Head, Body] = binary:split(Frame, <<"\n\n">>),
+ [Command | HeaderLines] = binary:split(Head, <<"\n">>, [global]),
+ Headers = [list_to_tuple(binary:split(Line, <<":">>)) || Line <- HeaderLines],
+ [Body1, <<>>] = binary:split(Body, [<<0, 10>>],[{scope,{byte_size(Body)-2, 2}}]),
+ {Command, Headers, Body1}.
+
+%% ----------
+
+iolist_join(List, Separator) ->
+ lists:reverse(iolist_join2(List, Separator, [])).
+
+iolist_join2([], _Separator, Acc) ->
+ Acc;
+iolist_join2([E | List], Separator, Acc) ->
+ iolist_join2(List, Separator, [E, Separator | Acc]).
+
+
+list_to_hex(L) ->
+ lists:flatten(lists:map(fun(X) -> int_to_hex(X) end, L)).
+int_to_hex(N) when N < 256 ->
+ [hex(N div 16), hex(N rem 16)].
+hex(N) when N < 10 ->
+ $0+N;
+hex(N) when N >= 10, N < 16 ->
+ $a + (N-10).
diff --git a/deps/rabbitmq_web_stomp/test/unit_SUITE.erl b/deps/rabbitmq_web_stomp/test/unit_SUITE.erl
new file mode 100644
index 0000000000..4e7ad10861
--- /dev/null
+++ b/deps/rabbitmq_web_stomp/test/unit_SUITE.erl
@@ -0,0 +1,58 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ get_tcp_conf,
+ get_tcp_port,
+ get_binding_address
+ ].
+
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+get_tcp_conf(_Config) ->
+ [{ip, {127, 0, 0, 1}}, {port, 15674}] = lists:sort(rabbit_web_stomp_listener:get_tcp_conf(
+ [{ip, {127, 0, 0, 1}}, {port, 1245}], 15674
+ )),
+ [{ip, {127, 0, 0, 1}}, {port, 15674}] = lists:sort(rabbit_web_stomp_listener:get_tcp_conf(
+ [{ip, {127, 0, 0, 1}}], 15674
+ )),
+ [{port, 15674}] = lists:sort(rabbit_web_stomp_listener:get_tcp_conf(
+ [], 15674
+ )),
+ ok.
+
+get_tcp_port(_Config) ->
+ 15674 = rabbit_web_stomp_listener:get_tcp_port([{port, 15674}]),
+ 15674 = rabbit_web_stomp_listener:get_tcp_port([{port, 15674}, {tcp_config, []}]),
+ 12345 = rabbit_web_stomp_listener:get_tcp_port([{port, 15674}, {tcp_config, [{port, 12345}]}]),
+ ok.
+
+get_binding_address(_Config) ->
+ "0.0.0.0" = rabbit_web_stomp_listener:get_binding_address([]),
+ "192.168.1.10" = rabbit_web_stomp_listener:get_binding_address([{ip, "192.168.1.10"}]),
+ "192.168.1.10" = rabbit_web_stomp_listener:get_binding_address([{ip, {192, 168, 1, 10}}]),
+ ok.
diff --git a/deps/rabbitmq_web_stomp_examples/.gitignore b/deps/rabbitmq_web_stomp_examples/.gitignore
new file mode 100644
index 0000000000..8f28f69b4f
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/.gitignore
@@ -0,0 +1,17 @@
+.sw?
+.*.sw?
+*.beam
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/logs/
+/plugins/
+/plugins.lock
+/sbin/
+/sbin.lock
+
+/rabbitmq_web_stomp_examples.d
diff --git a/deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md b/deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbitmq_web_stomp_examples/CONTRIBUTING.md b/deps/rabbitmq_web_stomp_examples/CONTRIBUTING.md
new file mode 100644
index 0000000000..592e7ced57
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/CONTRIBUTING.md
@@ -0,0 +1,99 @@
+Thank you for using RabbitMQ and for taking the time to contribute to the project.
+This document has two main parts:
+
+ * when and how to file GitHub issues for RabbitMQ projects
+ * how to submit pull requests
+
+They intend to save you and RabbitMQ maintainers some time, so please
+take a moment to read through them.
+
+## Overview
+
+### GitHub issues
+
+The RabbitMQ team uses GitHub issues for _specific actionable items_ that
+engineers can work on. This assumes the following:
+
+* GitHub issues are not used for questions, investigations, root cause
+ analysis, discussions of potential issues, etc (as defined by this team)
+* Enough information is provided by the reporter for maintainers to work with
+
+The team receives many questions through various venues every single
+day. Frequently, these questions do not include the necessary details
+the team needs to begin useful work. GitHub issues can very quickly
+turn into a something impossible to navigate and make sense
+of. Because of this, questions, investigations, root cause analysis,
+and discussions of potential features are all considered to be
+[mailing list][rmq-users] material. If you are unsure where to begin,
+the [RabbitMQ users mailing list][rmq-users] is the right place.
+
+Getting all the details necessary to reproduce an issue, make a
+conclusion or even form a hypothesis about what's happening can take a
+fair amount of time. Please help others help you by providing a way to
+reproduce the behavior you're observing, or at least sharing as much
+relevant information as possible on the [RabbitMQ users mailing
+list][rmq-users].
+
+Please provide versions of the software used:
+
+ * RabbitMQ server
+ * Erlang
+ * Operating system version (and distribution, if applicable)
+ * All client libraries used
+ * RabbitMQ plugins (if applicable)
+
+The following information greatly helps in investigating and reproducing issues:
+
+ * RabbitMQ server logs
+ * A code example or terminal transcript that can be used to reproduce
+ * Full exception stack traces (a single line message is not enough!)
+ * `rabbitmqctl report` and `rabbitmqctl environment` output
+ * Other relevant details about the environment and workload, e.g. a traffic capture
+ * Feel free to edit out hostnames and other potentially sensitive information.
+
+To make collecting much of this and other environment information, use
+the [`rabbitmq-collect-env`][rmq-collect-env] script. It will produce an archive with
+server logs, operating system logs, output of certain diagnostics commands and so on.
+Please note that **no effort is made to scrub any information that may be sensitive**.
+
+### Pull Requests
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+Here's the recommended workflow:
+
+ * [Fork the repository][github-fork] or repositories you plan on contributing to. If multiple
+ repositories are involved in addressing the same issue, please use the same branch name
+ in each repository
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests (usually with `make tests`), commit with a
+ [descriptive message][git-commit-msgs], push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement][ca-agreement] if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first
+ask the core team for their opinion on the [RabbitMQ users mailing list][rmq-users].
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed
+copy of our [Contributor Agreement][ca-agreement] around the time you
+submit your pull request. This will make it much easier (in some
+cases, possible) for the RabbitMQ team at Pivotal to merge your
+contribution.
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list][rmq-users].
+
+[rmq-collect-env]: https://github.com/rabbitmq/support-tools/blob/master/scripts/rabbitmq-collect-env
+[git-commit-msgs]: https://chris.beams.io/posts/git-commit/
+[rmq-users]: https://groups.google.com/forum/#!forum/rabbitmq-users
+[ca-agreement]: https://cla.pivotal.io/sign/rabbitmq
+[github-fork]: https://help.github.com/articles/fork-a-repo/
diff --git a/deps/rabbitmq_web_stomp_examples/LICENSE b/deps/rabbitmq_web_stomp_examples/LICENSE
new file mode 100644
index 0000000000..1df45da459
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/LICENSE
@@ -0,0 +1,9 @@
+This package, the rabbitmq-web-stomp-examples, is licensed under the
+MPL 2.0. For the MPL, please see LICENSE-MPL-RabbitMQ.
+
+priv/stomp.js is a part of stomp-websocket project
+(https://github.com/jmesnil/stomp-websocket) and is released under
+APL2. For the license see LICENSE-APL2-Stomp-Websocket.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbitmq_web_stomp_examples/LICENSE-APL2-Stomp-Websocket b/deps/rabbitmq_web_stomp_examples/LICENSE-APL2-Stomp-Websocket
new file mode 100644
index 0000000000..ef51da2b0e
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/LICENSE-APL2-Stomp-Websocket
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/deps/rabbitmq_web_stomp_examples/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_web_stomp_examples/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbitmq_web_stomp_examples/Makefile b/deps/rabbitmq_web_stomp_examples/Makefile
new file mode 100644
index 0000000000..61b30b8707
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/Makefile
@@ -0,0 +1,23 @@
+PROJECT = rabbitmq_web_stomp_examples
+PROJECT_DESCRIPTION = Rabbit WEB-STOMP - examples
+PROJECT_MOD = rabbit_web_stomp_examples_app
+
+define PROJECT_ENV
+[
+ {listener, [{port, 15670}]}
+ ]
+endef
+
+DEPS = rabbit_common rabbit rabbitmq_web_dispatch rabbitmq_web_stomp
+
+DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/deps/rabbitmq_web_stomp_examples/README.md b/deps/rabbitmq_web_stomp_examples/README.md
new file mode 100644
index 0000000000..109e8c45ff
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/README.md
@@ -0,0 +1,13 @@
+# RabbitMQ Web STOMP Examples
+
+This project contains few basic examples of [RabbitMQ Web STOMP plugin](https://github.com/rabbitmq/rabbitmq-web-stomp)
+usage.
+
+Once installed the server will bind to port 15670 and serve few static
+HTML files on port 15670 (e.g. [http://127.0.0.1:15670](http://127.0.0.1:15670/)).
+
+## Installation
+
+This plugin ships with RabbitMQ. Enabled it using [CLI tools](https://www.rabbitmq.com/cli.html):
+
+ rabbitmq-plugins enable rabbitmq_web_stomp_examples
diff --git a/deps/rabbitmq_web_stomp_examples/erlang.mk b/deps/rabbitmq_web_stomp_examples/erlang.mk
new file mode 100644
index 0000000000..fce4be0b0a
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/erlang.mk
@@ -0,0 +1,7808 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT =
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper)
+.PHONY: proper
+
+# Targets.
+
+tests:: proper
+
+define proper_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ Module = fun(M) ->
+ [true] =:= lists:usort([
+ case atom_to_list(F) of
+ "prop_" ++ _ ->
+ io:format("Testing ~p:~p/0~n", [M, F]),
+ proper:quickcheck(M:F(), nocolors);
+ _ ->
+ true
+ end
+ || {F, 0} <- M:module_info(exports)])
+ end,
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]);
+ module -> Module($(2));
+ function -> proper:quickcheck($(2), nocolors)
+ end,
+ CoverExport("$(COVER_DATA_DIR)/proper.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+proper: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call proper_check.erl,module,$(t)))
+else
+proper: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)()))
+endif
+else
+proper: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbitmq_web_stomp_examples/priv/bunny.html b/deps/rabbitmq_web_stomp_examples/priv/bunny.html
new file mode 100644
index 0000000000..593379d5fd
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/bunny.html
@@ -0,0 +1,136 @@
+<!doctype html>
+<html><head>
+ <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
+ <script src="stomp.js"></script>
+
+ <style>
+ #cnvs {
+ border: none;
+ -moz-border-radius: 4px;
+ cursor: url(pencil.cur),crosshair;
+ position: absolute;
+ overflow: hidden;
+ width: 100%;
+ height: 100%;
+ }
+ #cnvs:active {
+ cursor: url(pencil.cur),crosshair;
+ }
+ body {
+ overflow: hidden;
+ }
+ </style>
+ <title>RabbitMQ Web STOMP Examples: Bunny Drawing</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web STOMP Examples</a> > Bunny Drawing</h1>
+ <canvas id="cnvs"></canvas>
+<script>
+var send; var draw;
+send = draw = function(){};
+
+var lines = [];
+
+var canvas = document.getElementById('cnvs');
+
+if (canvas.getContext) {
+ var ctx = canvas.getContext('2d');
+
+ var img = new Image();
+ img.onload = function() {
+ ctx.drawImage(img, 230, 160);
+ };
+ img.src = 'bunny.png';
+
+ draw = function(p) {
+ ctx.beginPath();
+ ctx.moveTo(p.x1, p.y1);
+ ctx.lineTo(p.x2, p.y2);
+ ctx.stroke();
+ ctx.drawImage(img, 230, 160);
+ };
+
+ var do_resize = function() {
+ canvas.width = window.innerWidth;
+ canvas.height = window.innerHeight;
+
+ ctx.font = "bold 20px sans-serif";
+ ctx.fillStyle = "#444";
+ ctx.fillText("Draw wings on the bunny!", 260, 100);
+ ctx.font = "normal 16px sans-serif";
+ ctx.fillStyle = "#888";
+ ctx.fillText("(For more fun open a second browser)", 255, 130);
+
+ ctx.drawImage(img, 230, 160);
+
+ ctx.strokeStyle = "#fa0";
+ ctx.lineWidth = "10";
+ ctx.lineCap = "round";
+
+ $.map(lines, function (p) {
+ draw(p);
+ });
+ };
+
+ $(window).resize(do_resize);
+ $(do_resize);
+
+
+ var pos = $('#cnvs').position();
+ var prev = null;
+ $('#cnvs').mousedown(function(evt) {
+ evt.preventDefault();
+ evt.stopPropagation();
+ $('#cnvs').bind('mousemove', function(e) {
+ var curr = {x:e.pageX-pos.left, y:e.pageY-pos.top};
+ if (!prev) {
+ prev = curr;
+ return;
+ }
+ if (Math.sqrt(Math.pow(prev.x - curr.x, 2) +
+ Math.pow(prev.y - curr.y, 2)) > 8) {
+ var p = {x1:prev.x, y1:prev.y, x2:curr.x, y2:curr.y}
+ lines.push(p);
+ draw(p);
+ send(JSON.stringify(p));
+ prev = curr;
+ }
+ });
+ });
+ $('html').mouseup(function() {
+ prev = null;
+ $('#cnvs').unbind('mousemove');
+ });
+}
+else {
+ document.write("Sorry - this demo requires a browser with canvas tag support.");
+}
+
+// Stomp.js boilerplate
+var ws = new WebSocket('ws://' + window.location.hostname + ':15674/ws');
+var client = Stomp.over(ws);
+
+client.debug = function() {
+ if (window.console && console.log && console.log.apply) {
+ console.log.apply(console, arguments);
+ }
+};
+
+send = function(data) {
+ client.send('/topic/bunny', {}, data);
+};
+
+var on_connect = function(x) {
+ id = client.subscribe('/topic/bunny', function(d) {
+ var p = JSON.parse(d.body);
+ lines.push(p);
+ draw(p, true);
+ });
+};
+var on_error = function() {
+ console.log('error');
+};
+client.connect('guest', 'guest', on_connect, on_error, '/');
+
+</script>
+</body></html>
diff --git a/deps/rabbitmq_web_stomp_examples/priv/bunny.png b/deps/rabbitmq_web_stomp_examples/priv/bunny.png
new file mode 100644
index 0000000000..6c2284ba84
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/bunny.png
Binary files differ
diff --git a/deps/rabbitmq_web_stomp_examples/priv/echo.html b/deps/rabbitmq_web_stomp_examples/priv/echo.html
new file mode 100644
index 0000000000..ce4013ceee
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/echo.html
@@ -0,0 +1,105 @@
+<!DOCTYPE html>
+<html><head>
+ <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
+ <script src="stomp.js"></script>
+ <style>
+ .box {
+ width: 440px;
+ float: left;
+ margin: 0 20px 0 20px;
+ }
+
+ .box div, .box input {
+ border: 1px solid;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ width: 100%;
+ padding: 5px;
+ margin: 3px 0 10px 0;
+ }
+
+ .box div {
+ border-color: grey;
+ height: 300px;
+ overflow: auto;
+ }
+
+ div code {
+ display: block;
+ }
+
+ #first div code {
+ -moz-border-radius: 2px;
+ border-radius: 2px;
+ border: 1px solid #eee;
+ margin-bottom: 5px;
+ }
+
+ #second div {
+ font-size: 0.8em;
+ }
+ </style>
+ <title>RabbitMQ Web STOMP Examples : Echo Server</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web STOMP Examples</a> > Echo Server</h1>
+
+ <div id="first" class="box">
+ <h2>Received</h2>
+ <div></div>
+ <form><input autocomplete="off" value="Type here..."></input></form>
+ </div>
+
+ <div id="second" class="box">
+ <h2>Logs</h2>
+ <div></div>
+ </div>
+
+ <script>
+ var has_had_focus = false;
+ var pipe = function(el_name, send) {
+ var div = $(el_name + ' div');
+ var inp = $(el_name + ' input');
+ var form = $(el_name + ' form');
+
+ var print = function(m, p) {
+ p = (p === undefined) ? '' : JSON.stringify(p);
+ div.append($("<code>").text(m + ' ' + p));
+ div.scrollTop(div.scrollTop() + 10000);
+ };
+
+ if (send) {
+ form.submit(function() {
+ send(inp.val());
+ inp.val('');
+ return false;
+ });
+ }
+ return print;
+ };
+
+ // Stomp.js boilerplate
+ var client = Stomp.client('ws://' + window.location.hostname + ':15674/ws');
+ client.debug = pipe('#second');
+
+ var print_first = pipe('#first', function(data) {
+ client.send('/topic/test', {"content-type":"text/plain"}, data);
+ });
+ var on_connect = function(x) {
+ id = client.subscribe("/topic/test", function(d) {
+ print_first(d.body);
+ });
+ };
+ var on_error = function() {
+ console.log('error');
+ };
+ client.connect('guest', 'guest', on_connect, on_error, '/');
+
+ $('#first input').focus(function() {
+ if (!has_had_focus) {
+ has_had_focus = true;
+ $(this).val("");
+ }
+ });
+ </script>
+</body></html>
diff --git a/deps/rabbitmq_web_stomp_examples/priv/index.html b/deps/rabbitmq_web_stomp_examples/priv/index.html
new file mode 100644
index 0000000000..f722addd85
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/index.html
@@ -0,0 +1,16 @@
+<!doctype html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title>RabbitMQ Web STOMP Examples</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+ </head>
+ <body>
+ <h1>RabbitMQ Web STOMP Examples</h1>
+ <ul class="menu">
+ <li><a href="echo.html">Simple Echo Server</a></li>
+ <li><a href="bunny.html">Bunny Drawing</a></li>
+ <li><a href="temp-queue.html">Temporary Queue Example</a></li>
+ </ul>
+ </body>
+</html>
diff --git a/deps/rabbitmq_web_stomp_examples/priv/main.css b/deps/rabbitmq_web_stomp_examples/priv/main.css
new file mode 100644
index 0000000000..a5cdcda152
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/main.css
@@ -0,0 +1,38 @@
+body {
+ font-family: "Arial";
+ color: #444;
+}
+
+h1, h2 {
+ color: #f60;
+ font-weight: normal;
+}
+
+h1 {
+ font-size: 1.5em;
+}
+
+h2 {
+ font-size: 1.2em;
+ margin: 0;
+}
+
+a {
+ color: #f60;
+ border: 1px solid #fda;
+ background: #fff0e0;
+ border-radius: 3px; -moz-border-radius: 3px;
+ padding: 2px;
+ text-decoration: none;
+ /* font-weight: bold; */
+}
+
+ul.menu {
+ list-style-type: none;
+ padding: 0;
+ margin: 0;
+}
+
+ul.menu li {
+ padding: 5px 0;
+}
diff --git a/deps/rabbitmq_web_stomp_examples/priv/pencil.cur b/deps/rabbitmq_web_stomp_examples/priv/pencil.cur
new file mode 100755
index 0000000000..a3e3598bf0
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/pencil.cur
Binary files differ
diff --git a/deps/rabbitmq_web_stomp_examples/priv/stomp.js b/deps/rabbitmq_web_stomp_examples/priv/stomp.js
new file mode 100644
index 0000000000..8c3ca8fe26
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/stomp.js
@@ -0,0 +1,501 @@
+// Generated by CoffeeScript 1.7.1
+
+/*
+ Stomp Over WebSocket http://jmesnil.net/stomp-websocket/doc/ | Apache License V2.0
+
+ Copyright (C) 2010-2013 [Jeff Mesnil](https://jmesnil.net/)
+ Copyright (C) 2012 [FuseSource, Inc.](https://fusesource.com)
+ */
+
+(function() {
+ var Byte, Client, Frame, Stomp,
+ __hasProp = {}.hasOwnProperty,
+ __slice = [].slice;
+
+ Byte = {
+ LF: '\x0A',
+ NULL: '\x00'
+ };
+
+ Frame = (function() {
+ var unmarshallSingle;
+
+ function Frame(command, headers, body) {
+ this.command = command;
+ this.headers = headers != null ? headers : {};
+ this.body = body != null ? body : '';
+ }
+
+ Frame.prototype.toString = function() {
+ var lines, name, skipContentLength, value, _ref;
+ lines = [this.command];
+ skipContentLength = this.headers['content-length'] === false ? true : false;
+ if (skipContentLength) {
+ delete this.headers['content-length'];
+ }
+ _ref = this.headers;
+ for (name in _ref) {
+ if (!__hasProp.call(_ref, name)) continue;
+ value = _ref[name];
+ lines.push("" + name + ":" + value);
+ }
+ if (this.body && !skipContentLength) {
+ lines.push("content-length:" + (Frame.sizeOfUTF8(this.body)));
+ }
+ lines.push(Byte.LF + this.body);
+ return lines.join(Byte.LF);
+ };
+
+ Frame.sizeOfUTF8 = function(s) {
+ if (s) {
+ return encodeURI(s).match(/%..|./g).length;
+ } else {
+ return 0;
+ }
+ };
+
+ unmarshallSingle = function(data) {
+ var body, chr, command, divider, headerLines, headers, i, idx, len, line, start, trim, _i, _j, _len, _ref, _ref1;
+ divider = data.search(RegExp("" + Byte.LF + Byte.LF));
+ headerLines = data.substring(0, divider).split(Byte.LF);
+ command = headerLines.shift();
+ headers = {};
+ trim = function(str) {
+ return str.replace(/^\s+|\s+$/g, '');
+ };
+ _ref = headerLines.reverse();
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ line = _ref[_i];
+ idx = line.indexOf(':');
+ headers[trim(line.substring(0, idx))] = trim(line.substring(idx + 1));
+ }
+ body = '';
+ start = divider + 2;
+ if (headers['content-length']) {
+ len = parseInt(headers['content-length']);
+ body = ('' + data).substring(start, start + len);
+ } else {
+ chr = null;
+ for (i = _j = start, _ref1 = data.length; start <= _ref1 ? _j < _ref1 : _j > _ref1; i = start <= _ref1 ? ++_j : --_j) {
+ chr = data.charAt(i);
+ if (chr === Byte.NULL) {
+ break;
+ }
+ body += chr;
+ }
+ }
+ return new Frame(command, headers, body);
+ };
+
+ Frame.unmarshall = function(datas) {
+ var frame, frames, last_frame, r;
+ frames = datas.split(RegExp("" + Byte.NULL + Byte.LF + "*"));
+ r = {
+ frames: [],
+ partial: ''
+ };
+ r.frames = (function() {
+ var _i, _len, _ref, _results;
+ _ref = frames.slice(0, -1);
+ _results = [];
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ frame = _ref[_i];
+ _results.push(unmarshallSingle(frame));
+ }
+ return _results;
+ })();
+ last_frame = frames.slice(-1)[0];
+ if (last_frame === Byte.LF || (last_frame.search(RegExp("" + Byte.NULL + Byte.LF + "*$"))) !== -1) {
+ r.frames.push(unmarshallSingle(last_frame));
+ } else {
+ r.partial = last_frame;
+ }
+ return r;
+ };
+
+ Frame.marshall = function(command, headers, body) {
+ var frame;
+ frame = new Frame(command, headers, body);
+ return frame.toString() + Byte.NULL;
+ };
+
+ return Frame;
+
+ })();
+
+ Client = (function() {
+ var now;
+
+ function Client(ws) {
+ this.ws = ws;
+ this.ws.binaryType = "arraybuffer";
+ this.counter = 0;
+ this.connected = false;
+ this.heartbeat = {
+ outgoing: 10000,
+ incoming: 10000
+ };
+ this.maxWebSocketFrameSize = 16 * 1024;
+ this.subscriptions = {};
+ this.partialData = '';
+ }
+
+ Client.prototype.debug = function(message) {
+ var _ref;
+ return typeof window !== "undefined" && window !== null ? (_ref = window.console) != null ? _ref.log(message) : void 0 : void 0;
+ };
+
+ now = function() {
+ if (Date.now) {
+ return Date.now();
+ } else {
+ return new Date().valueOf;
+ }
+ };
+
+ Client.prototype._transmit = function(command, headers, body) {
+ var out;
+ out = Frame.marshall(command, headers, body);
+ if (typeof this.debug === "function") {
+ this.debug(">>> " + out);
+ }
+ while (true) {
+ if (out.length > this.maxWebSocketFrameSize) {
+ this.ws.send(out.substring(0, this.maxWebSocketFrameSize));
+ out = out.substring(this.maxWebSocketFrameSize);
+ if (typeof this.debug === "function") {
+ this.debug("remaining = " + out.length);
+ }
+ } else {
+ return this.ws.send(out);
+ }
+ }
+ };
+
+ Client.prototype._setupHeartbeat = function(headers) {
+ var serverIncoming, serverOutgoing, ttl, v, _ref, _ref1;
+ if ((_ref = headers.version) !== Stomp.VERSIONS.V1_1 && _ref !== Stomp.VERSIONS.V1_2) {
+ return;
+ }
+ _ref1 = (function() {
+ var _i, _len, _ref1, _results;
+ _ref1 = headers['heart-beat'].split(",");
+ _results = [];
+ for (_i = 0, _len = _ref1.length; _i < _len; _i++) {
+ v = _ref1[_i];
+ _results.push(parseInt(v));
+ }
+ return _results;
+ })(), serverOutgoing = _ref1[0], serverIncoming = _ref1[1];
+ if (!(this.heartbeat.outgoing === 0 || serverIncoming === 0)) {
+ ttl = Math.max(this.heartbeat.outgoing, serverIncoming);
+ if (typeof this.debug === "function") {
+ this.debug("send PING every " + ttl + "ms");
+ }
+ this.pinger = Stomp.setInterval(ttl, (function(_this) {
+ return function() {
+ _this.ws.send(Byte.LF);
+ return typeof _this.debug === "function" ? _this.debug(">>> PING") : void 0;
+ };
+ })(this));
+ }
+ if (!(this.heartbeat.incoming === 0 || serverOutgoing === 0)) {
+ ttl = Math.max(this.heartbeat.incoming, serverOutgoing);
+ if (typeof this.debug === "function") {
+ this.debug("check PONG every " + ttl + "ms");
+ }
+ return this.ponger = Stomp.setInterval(ttl, (function(_this) {
+ return function() {
+ var delta;
+ delta = now() - _this.serverActivity;
+ if (delta > ttl * 2) {
+ if (typeof _this.debug === "function") {
+ _this.debug("did not receive server activity for the last " + delta + "ms");
+ }
+ return _this.ws.close();
+ }
+ };
+ })(this));
+ }
+ };
+
+ Client.prototype._parseConnect = function() {
+ var args, connectCallback, errorCallback, headers;
+ args = 1 <= arguments.length ? __slice.call(arguments, 0) : [];
+ headers = {};
+ switch (args.length) {
+ case 2:
+ headers = args[0], connectCallback = args[1];
+ break;
+ case 3:
+ if (args[1] instanceof Function) {
+ headers = args[0], connectCallback = args[1], errorCallback = args[2];
+ } else {
+ headers.login = args[0], headers.passcode = args[1], connectCallback = args[2];
+ }
+ break;
+ case 4:
+ headers.login = args[0], headers.passcode = args[1], connectCallback = args[2], errorCallback = args[3];
+ break;
+ default:
+ headers.login = args[0], headers.passcode = args[1], connectCallback = args[2], errorCallback = args[3], headers.host = args[4];
+ }
+ return [headers, connectCallback, errorCallback];
+ };
+
+ Client.prototype.connect = function() {
+ var args, errorCallback, headers, out;
+ args = 1 <= arguments.length ? __slice.call(arguments, 0) : [];
+ out = this._parseConnect.apply(this, args);
+ headers = out[0], this.connectCallback = out[1], errorCallback = out[2];
+ if (typeof this.debug === "function") {
+ this.debug("Opening Web Socket...");
+ }
+ this.ws.onmessage = (function(_this) {
+ return function(evt) {
+ var arr, c, client, data, frame, messageID, onreceive, subscription, unmarshalledData, _i, _len, _ref, _results;
+ data = typeof ArrayBuffer !== 'undefined' && evt.data instanceof ArrayBuffer ? (arr = new Uint8Array(evt.data), typeof _this.debug === "function" ? _this.debug("--- got data length: " + arr.length) : void 0, ((function() {
+ var _i, _len, _results;
+ _results = [];
+ for (_i = 0, _len = arr.length; _i < _len; _i++) {
+ c = arr[_i];
+ _results.push(String.fromCharCode(c));
+ }
+ return _results;
+ })()).join('')) : evt.data;
+ _this.serverActivity = now();
+ if (data === Byte.LF) {
+ if (typeof _this.debug === "function") {
+ _this.debug("<<< PONG");
+ }
+ return;
+ }
+ if (typeof _this.debug === "function") {
+ _this.debug("<<< " + data);
+ }
+ unmarshalledData = Frame.unmarshall(_this.partialData + data);
+ _this.partialData = unmarshalledData.partial;
+ _ref = unmarshalledData.frames;
+ _results = [];
+ for (_i = 0, _len = _ref.length; _i < _len; _i++) {
+ frame = _ref[_i];
+ switch (frame.command) {
+ case "CONNECTED":
+ if (typeof _this.debug === "function") {
+ _this.debug("connected to server " + frame.headers.server);
+ }
+ _this.connected = true;
+ _this._setupHeartbeat(frame.headers);
+ _results.push(typeof _this.connectCallback === "function" ? _this.connectCallback(frame) : void 0);
+ break;
+ case "MESSAGE":
+ subscription = frame.headers.subscription;
+ onreceive = _this.subscriptions[subscription] || _this.onreceive;
+ if (onreceive) {
+ client = _this;
+ messageID = frame.headers["message-id"];
+ frame.ack = function(headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ return client.ack(messageID, subscription, headers);
+ };
+ frame.nack = function(headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ return client.nack(messageID, subscription, headers);
+ };
+ _results.push(onreceive(frame));
+ } else {
+ _results.push(typeof _this.debug === "function" ? _this.debug("Unhandled received MESSAGE: " + frame) : void 0);
+ }
+ break;
+ case "RECEIPT":
+ _results.push(typeof _this.onreceipt === "function" ? _this.onreceipt(frame) : void 0);
+ break;
+ case "ERROR":
+ _results.push(typeof errorCallback === "function" ? errorCallback(frame) : void 0);
+ break;
+ default:
+ _results.push(typeof _this.debug === "function" ? _this.debug("Unhandled frame: " + frame) : void 0);
+ }
+ }
+ return _results;
+ };
+ })(this);
+ this.ws.onclose = (function(_this) {
+ return function() {
+ var msg;
+ msg = "Whoops! Lost connection to " + _this.ws.url;
+ if (typeof _this.debug === "function") {
+ _this.debug(msg);
+ }
+ _this._cleanUp();
+ return typeof errorCallback === "function" ? errorCallback(msg) : void 0;
+ };
+ })(this);
+ return this.ws.onopen = (function(_this) {
+ return function() {
+ if (typeof _this.debug === "function") {
+ _this.debug('Web Socket Opened...');
+ }
+ headers["accept-version"] = Stomp.VERSIONS.supportedVersions();
+ headers["heart-beat"] = [_this.heartbeat.outgoing, _this.heartbeat.incoming].join(',');
+ return _this._transmit("CONNECT", headers);
+ };
+ })(this);
+ };
+
+ Client.prototype.disconnect = function(disconnectCallback, headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ this._transmit("DISCONNECT", headers);
+ this.ws.onclose = null;
+ this.ws.close();
+ this._cleanUp();
+ return typeof disconnectCallback === "function" ? disconnectCallback() : void 0;
+ };
+
+ Client.prototype._cleanUp = function() {
+ this.connected = false;
+ if (this.pinger) {
+ Stomp.clearInterval(this.pinger);
+ }
+ if (this.ponger) {
+ return Stomp.clearInterval(this.ponger);
+ }
+ };
+
+ Client.prototype.send = function(destination, headers, body) {
+ if (headers == null) {
+ headers = {};
+ }
+ if (body == null) {
+ body = '';
+ }
+ headers.destination = destination;
+ return this._transmit("SEND", headers, body);
+ };
+
+ Client.prototype.subscribe = function(destination, callback, headers) {
+ var client;
+ if (headers == null) {
+ headers = {};
+ }
+ if (!headers.id) {
+ headers.id = "sub-" + this.counter++;
+ }
+ headers.destination = destination;
+ this.subscriptions[headers.id] = callback;
+ this._transmit("SUBSCRIBE", headers);
+ client = this;
+ return {
+ id: headers.id,
+ unsubscribe: function() {
+ return client.unsubscribe(headers.id);
+ }
+ };
+ };
+
+ Client.prototype.unsubscribe = function(id) {
+ delete this.subscriptions[id];
+ return this._transmit("UNSUBSCRIBE", {
+ id: id
+ });
+ };
+
+ Client.prototype.begin = function(transaction) {
+ var client, txid;
+ txid = transaction || "tx-" + this.counter++;
+ this._transmit("BEGIN", {
+ transaction: txid
+ });
+ client = this;
+ return {
+ id: txid,
+ commit: function() {
+ return client.commit(txid);
+ },
+ abort: function() {
+ return client.abort(txid);
+ }
+ };
+ };
+
+ Client.prototype.commit = function(transaction) {
+ return this._transmit("COMMIT", {
+ transaction: transaction
+ });
+ };
+
+ Client.prototype.abort = function(transaction) {
+ return this._transmit("ABORT", {
+ transaction: transaction
+ });
+ };
+
+ Client.prototype.ack = function(messageID, subscription, headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ headers["message-id"] = messageID;
+ headers.subscription = subscription;
+ return this._transmit("ACK", headers);
+ };
+
+ Client.prototype.nack = function(messageID, subscription, headers) {
+ if (headers == null) {
+ headers = {};
+ }
+ headers["message-id"] = messageID;
+ headers.subscription = subscription;
+ return this._transmit("NACK", headers);
+ };
+
+ return Client;
+
+ })();
+
+ Stomp = {
+ VERSIONS: {
+ V1_0: '1.0',
+ V1_1: '1.1',
+ V1_2: '1.2',
+ supportedVersions: function() {
+ return '1.1,1.0';
+ }
+ },
+ client: function(url, protocols) {
+ var klass, ws;
+ if (protocols == null) {
+ protocols = ['v10.stomp', 'v11.stomp'];
+ }
+ klass = Stomp.WebSocketClass || WebSocket;
+ ws = new klass(url, protocols);
+ return new Client(ws);
+ },
+ over: function(ws) {
+ return new Client(ws);
+ },
+ Frame: Frame
+ };
+
+ if (typeof exports !== "undefined" && exports !== null) {
+ exports.Stomp = Stomp;
+ }
+
+ if (typeof window !== "undefined" && window !== null) {
+ Stomp.setInterval = function(interval, f) {
+ return window.setInterval(f, interval);
+ };
+ Stomp.clearInterval = function(id) {
+ return window.clearInterval(id);
+ };
+ window.Stomp = Stomp;
+ } else if (!exports) {
+ self.Stomp = Stomp;
+ }
+
+}).call(this);
diff --git a/deps/rabbitmq_web_stomp_examples/priv/temp-queue.html b/deps/rabbitmq_web_stomp_examples/priv/temp-queue.html
new file mode 100644
index 0000000000..f19c6ed4bf
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/priv/temp-queue.html
@@ -0,0 +1,98 @@
+<!DOCTYPE html>
+<html><head>
+ <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
+ <script src="stomp.js"></script>
+ <style>
+ .box {
+ width: 440px;
+ float: left;
+ margin: 0 20px 0 20px;
+ }
+
+ .box div, .box input {
+ border: 1px solid;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ width: 100%;
+ padding: 5px;
+ margin: 3px 0 10px 0;
+ }
+
+ .box div {
+ border-color: grey;
+ height: 300px;
+ overflow: auto;
+ }
+
+ div code {
+ display: block;
+ }
+
+ #first div code {
+ -moz-border-radius: 2px;
+ border-radius: 2px;
+ border: 1px solid #eee;
+ margin-bottom: 5px;
+ }
+
+ #second div {
+ font-size: 0.8em;
+ }
+ </style>
+ <title>RabbitMQ Web STOMP Examples : Temporary Queue</title>
+ <link href="main.css" rel="stylesheet" type="text/css"/>
+</head><body lang="en">
+ <h1><a href="index.html">RabbitMQ Web STOMP Examples</a> > Temporary Queue</h1>
+
+ <p>When you type text in the form's input, the application will send a message to the <code>/queue/test</code> destination
+ with the <code>reply-to</code> header set to <code>/temp-queue/foo</code>.</p>
+ <p>The STOMP client sets a default <code>onreceive</code> callback to receive messages from this temporary queue and display the message's text.</p>
+ <p>Finally, the client subscribes to the <code>/queue/test</code> destination. When it receives message from this destination, it reverses the message's
+ text and reply by sending the reversed text to the destination defined by the message's <code>reply-to</code> header.</p>
+
+ <div id="first" class="box">
+ <h2>Received</h2>
+ <div></div>
+ <form><input autocomplete="off" placeholder="Type here..."></input></form>
+ </div>
+
+ <div id="second" class="box">
+ <h2>Logs</h2>
+ <div></div>
+ </div>
+
+ <script>
+ var ws = new WebSocket('ws://' + window.location.hostname + ':15674/ws');
+ var client = Stomp.over(ws);
+
+ client.debug = function(e) {
+ $('#second div').append($("<code>").text(e));
+ };
+
+ // default receive callback to get message from temporary queues
+ client.onreceive = function(m) {
+ $('#first div').append($("<code>").text(m.body));
+ }
+
+ var on_connect = function(x) {
+ id = client.subscribe("/queue/test", function(m) {
+ // reply by sending the reversed text to the temp queue defined in the "reply-to" header
+ var reversedText = m.body.split("").reverse().join("");
+ client.send(m.headers['reply-to'], {"content-type":"text/plain"}, reversedText);
+ });
+ };
+ var on_error = function() {
+ console.log('error');
+ };
+ client.connect('guest', 'guest', on_connect, on_error, '/');
+
+ $('#first form').submit(function() {
+ var text = $('#first form input').val();
+ if (text) {
+ client.send('/queue/test', {'reply-to': '/temp-queue/foo'}, text);
+ $('#first form input').val("");
+ }
+ return false;
+ });
+ </script>
+</body></html>
diff --git a/deps/rabbitmq_web_stomp_examples/rabbitmq-components.mk b/deps/rabbitmq_web_stomp_examples/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl b/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl
new file mode 100644
index 0000000000..58011ed126
--- /dev/null
+++ b/deps/rabbitmq_web_stomp_examples/src/rabbit_web_stomp_examples_app.erl
@@ -0,0 +1,29 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_web_stomp_examples_app).
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% Dummy supervisor - see Ulf Wiger's comment at
+%% http://erlang.2086793.n4.nabble.com/initializing-library-applications-without-processes-td2094473.html
+-behaviour(supervisor).
+-export([init/1]).
+
+start(_Type, _StartArgs) ->
+ {ok, Listener} = application:get_env(rabbitmq_web_stomp_examples, listener),
+ {ok, _} = rabbit_web_dispatch:register_static_context(
+ web_stomp_examples, Listener, "web-stomp-examples", ?MODULE,
+ "priv", "WEB-STOMP: examples"),
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+stop(_State) ->
+ rabbit_web_dispatch:unregister_context(web_stomp_examples),
+ ok.
+
+init([]) -> {ok, {{one_for_one, 3, 10}, []}}.